diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index ce4f8a1a5b8d..7ab0812e7cff 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -29,8 +29,8 @@ RUN apt-get install -y curl wget gnupg python3 python-is-python3 python3-pip git build-essential tmux vim RUN python -m pip install \ - pip==24.0.0 \ - setuptools==69.5.1 \ + pip==24.1.2 \ + setuptools==70.3.0 \ poetry==1.7.1 USER $USERNAME diff --git a/.editorconfig b/.editorconfig index 321808ebaecf..103fe51237c8 100644 --- a/.editorconfig +++ b/.editorconfig @@ -16,6 +16,14 @@ profile = black indent_style = space indent_size = 2 +[*.md] +indent_style = space +indent_size = 2 + [*.yml] indent_style = space indent_size = 2 + +[*.toml] +indent_style = space +indent_size = 4 diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 0b8702a8360c..7e0910c449e9 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -7,7 +7,10 @@ README.md @jafermarq @tanertopal @danieljanes # Flower Baselines -/baselines @jafermarq @tanertopal @danieljanes +/baselines @jafermarq @danieljanes + +# Flower Benchmarks +/benchmarks @jafermarq @danieljanes # Flower Datasets /datasets @jafermarq @tanertopal @danieljanes @@ -15,6 +18,9 @@ README.md @jafermarq @tanertopal @danieljanes # Flower Examples /examples @jafermarq @tanertopal @danieljanes +# Flower Templates +/src/py/flwr/cli/new/templates @jafermarq @tanertopal @danieljanes + # Changelog /doc/source/ref-changelog.md @jafermarq @tanertopal @danieljanes @@ -24,3 +30,9 @@ README.md @jafermarq @tanertopal @danieljanes # GitHub Actions and Workflows /.github/workflows @Robert-Steiner @tanertopal @danieljanes /.github/actions @Robert-Steiner @tanertopal @danieljanes + +# Docker-related files +/.devcontainer @Robert-Steiner @Moep90 @tanertopal @danieljanes +**/Dockerfile @Robert-Steiner @Moep90 @tanertopal @danieljanes +**/*.Dockerfile @Robert-Steiner @Moep90 @tanertopal @danieljanes +/src/docker @Robert-Steiner @Moep90 @tanertopal @danieljanes diff --git a/.github/actions/bootstrap/action.yml b/.github/actions/bootstrap/action.yml index bee90beffa7d..a8a98acdf304 100644 --- a/.github/actions/bootstrap/action.yml +++ b/.github/actions/bootstrap/action.yml @@ -3,13 +3,13 @@ description: "Bootstrap Python environment (install and configure Python version inputs: python-version: description: "Version range or exact version of Python or PyPy to use, using SemVer's version range syntax." - default: 3.8 + default: 3.9 pip-version: description: "Version of pip to be installed using pip" - default: 24.0.0 + default: 24.1.2 setuptools-version: description: "Version of setuptools to be installed using pip" - default: 69.5.1 + default: 70.3.0 poetry-version: description: "Version of poetry to be installed using pip" default: 1.7.1 diff --git a/.github/workflows/_docker-build.yml b/.github/workflows/_docker-build.yml index 608158fc0a5a..b5c27c9b4834 100644 --- a/.github/workflows/_docker-build.yml +++ b/.github/workflows/_docker-build.yml @@ -36,7 +36,7 @@ permissions: jobs: build: name: Build image - runs-on: ubuntu-22.04 + runs-on: ${{ matrix.platform.runner-os }} timeout-minutes: 180 outputs: build-id: ${{ steps.build-id.outputs.id }} @@ -44,10 +44,8 @@ jobs: fail-fast: true matrix: platform: [ - # build-push action and qemu use different platform names - # therefore we create a map - { name: "amd64", qemu: "", docker: "linux/amd64" }, - { name: "arm64", qemu: "arm64", docker: "linux/arm64" }, + { name: "amd64", docker: "linux/amd64", runner-os: "ubuntu-22.04" }, + { name: "arm64", docker: "linux/arm64", runner-os: "ubuntu-4-core-arm64" }, ] steps: - name: Create build id @@ -60,14 +58,24 @@ jobs: hash = hashlib.sha256('''${{ inputs.namespace-repository }} ${{ inputs.file-dir }} ${{ inputs.build-args }}'''.encode()) + # Adds two spaces to the line breaks to ensure proper indentation + # when passing the multi-line string to the wretry.action. + # Without it, the multi-line string is passed like this: + # + # build-args: | + # ARG1= + # ARG2= + # ARG3= + # + # This causes the Docker action to interpret ARG2 and ARG3 as keys instead + # of values ​​of the multi-line string. + build_args = '''${{ inputs.build-args }}'''.replace("\n", "\n ") + with open(os.environ['GITHUB_OUTPUT'], 'a') as fh: print(f"id={hash.hexdigest()}", file=fh) - - - name: Set up QEMU - if: matrix.platform.qemu != '' - uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v3.0.0 - with: - platforms: ${{ matrix.platform.qemu }} + print("build-args<> "$GITHUB_OUTPUT" echo "setuptools-version=${{ steps.bootstrap.outputs.setuptools-version }}" >> "$GITHUB_OUTPUT" + echo "flwr-version-ref=git+${{ github.server_url }}/${{ github.repository }}.git@${{ github.sha }}" >> "$GITHUB_OUTPUT" - - id: matrix - run: | - python dev/build-docker-image-matrix.py --flwr-version ${{ github.event.inputs.flwr-version }} > matrix.json - echo "matrix=$(cat matrix.json)" >> $GITHUB_OUTPUT - - build-base-images: + build-docker-base-images: name: Build base images + if: github.repository == 'adap/flower' uses: ./.github/workflows/_docker-build.yml needs: parameters - strategy: - fail-fast: false - matrix: ${{ fromJson(needs.parameters.outputs.matrix).base }} with: - namespace-repository: ${{ matrix.images.namespace_repository }} - file-dir: ${{ matrix.images.file_dir }} + namespace-repository: flwr/base + file-dir: src/docker/base/ubuntu build-args: | - PYTHON_VERSION=${{ matrix.images.python_version }} PIP_VERSION=${{ needs.parameters.outputs.pip-version }} SETUPTOOLS_VERSION=${{ needs.parameters.outputs.setuptools-version }} - DISTRO=${{ matrix.images.distro.name }} - DISTRO_VERSION=${{ matrix.images.distro.version }} - FLWR_VERSION=${{ matrix.images.flwr_version }} - tags: ${{ matrix.images.tag }} + FLWR_VERSION_REF=${{ needs.parameters.outputs.flwr-version-ref }} + tags: unstable secrets: dockerhub-user: ${{ secrets.DOCKERHUB_USERNAME }} dockerhub-token: ${{ secrets.DOCKERHUB_TOKEN }} - build-binary-images: + build-docker-binary-images: name: Build binary images + if: github.repository == 'adap/flower' uses: ./.github/workflows/_docker-build.yml - needs: [parameters, build-base-images] + needs: build-docker-base-images strategy: fail-fast: false - matrix: ${{ fromJson(needs.parameters.outputs.matrix).binary }} + matrix: + images: [ + { repository: "flwr/superlink", file_dir: "src/docker/superlink" }, + { repository: "flwr/supernode", file_dir: "src/docker/supernode" }, + { repository: "flwr/serverapp", file_dir: "src/docker/serverapp" }, + { repository: "flwr/superexec", file_dir: "src/docker/superexec" }, + { repository: "flwr/clientapp", file_dir: "src/docker/clientapp" } + ] with: - namespace-repository: ${{ matrix.images.namespace_repository }} + namespace-repository: ${{ matrix.images.repository }} file-dir: ${{ matrix.images.file_dir }} - build-args: BASE_IMAGE=${{ matrix.images.base_image }} - tags: ${{ matrix.images.tags }} + build-args: BASE_IMAGE=unstable + tags: unstable secrets: dockerhub-user: ${{ secrets.DOCKERHUB_USERNAME }} dockerhub-token: ${{ secrets.DOCKERHUB_TOKEN }} diff --git a/.github/workflows/docker-readme.yml b/.github/workflows/docker-readme.yml new file mode 100644 index 000000000000..29dd787d638e --- /dev/null +++ b/.github/workflows/docker-readme.yml @@ -0,0 +1,51 @@ +name: Update Docker READMEs + +on: + push: + branches: + - 'main' + paths: + - 'src/docker/**/README.md' + +jobs: + collect: + if: ${{ github.repository == 'adap/flower' }} + name: Collect Docker READMEs + runs-on: ubuntu-22.04 + timeout-minutes: 10 + outputs: + readme_files: ${{ steps.filter.outputs.readme_files }} + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2 + id: filter + with: + list-files: "json" + filters: | + readme: + - 'src/docker/**/README.md' + + update: + if: ${{ needs.collect.outputs.readme_files != '' && toJson(fromJson(needs.collect.outputs.readme_files)) != '[]' }} + name: Update Docker READMEs + runs-on: ubuntu-22.04 + timeout-minutes: 10 + needs: collect + strategy: + matrix: + readme_path: ${{ fromJSON(needs.collect.outputs.readme_files) }} + + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - id: repository + run: echo "name=$(basename $(dirname ${{ matrix.readme_path }}))" >> "$GITHUB_OUTPUT" + + - name: Docker Hub Description + uses: peter-evans/dockerhub-description@e98e4d1628a5f3be2be7c231e50981aee98723ae # v4.0.0 + with: + repository: flwr/${{ steps.repository.outputs.name }} + readme-filepath: ${{ matrix.readme_path }} + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index d66362de9d32..012f584561ac 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -51,6 +51,64 @@ jobs: short_sha: ${{ steps.upload.outputs.SHORT_SHA }} dir: ${{ steps.upload.outputs.DIR }} + superexec: + runs-on: ubuntu-22.04 + timeout-minutes: 10 + needs: wheel + strategy: + fail-fast: false + matrix: + python-version: ["3.9", "3.10", "3.11"] + directory: [e2e-bare-auth] + connection: [secure, insecure] + engine: [deployment-engine, simulation-engine] + authentication: [no-auth, client-auth] + exclude: + - connection: insecure + authentication: client-auth + name: | + SuperExec / + Python ${{ matrix.python-version }} / + ${{ matrix.connection }} / + ${{ matrix.authentication }} / + ${{ matrix.engine }} + defaults: + run: + working-directory: e2e/${{ matrix.directory }} + steps: + - uses: actions/checkout@v4 + - name: Bootstrap + uses: ./.github/actions/bootstrap + with: + python-version: ${{ matrix.python-version }} + poetry-skip: 'true' + - name: Install Flower from repo + if: ${{ github.repository != 'adap/flower' || github.event.pull_request.head.repo.fork || github.actor == 'dependabot[bot]' }} + working-directory: ./ + run: | + if [[ "${{ matrix.engine }}" == "simulation-engine" ]]; then + python -m pip install ".[simulation]" + else + python -m pip install . + fi + - name: Download and install Flower wheel from artifact store + if: ${{ github.repository == 'adap/flower' && !github.event.pull_request.head.repo.fork && github.actor != 'dependabot[bot]' }} + run: | + # Define base URL for wheel file + WHEEL_URL="https://${{ env.ARTIFACT_BUCKET }}/py/${{ needs.wheel.outputs.dir }}/${{ needs.wheel.outputs.short_sha }}/${{ needs.wheel.outputs.whl_path }}" + if [[ "${{ matrix.engine }}" == "simulation-engine" ]]; then + python -m pip install "flwr[simulation] @ ${WHEEL_URL}" + else + python -m pip install "${WHEEL_URL}" + fi + - name: > + Run SuperExec test / + ${{ matrix.connection }} / + ${{ matrix.authentication }} / + ${{ matrix.engine }} + working-directory: e2e/${{ matrix.directory }} + run: ./../test_superexec.sh "${{ matrix.connection }}" "${{ matrix.authentication}}" "${{ matrix.engine }}" + frameworks: runs-on: ubuntu-22.04 timeout-minutes: 10 @@ -60,57 +118,63 @@ jobs: strategy: matrix: include: - - directory: bare + - directory: e2e-bare + e2e: e2e_bare - - directory: bare-https + - directory: e2e-bare-https + e2e: e2e_bare_https - - directory: bare-client-auth + - directory: e2e-bare-auth + e2e: e2e_bare_auth - - directory: jax + - directory: e2e-jax + e2e: e2e_jax - - directory: pytorch + - directory: e2e-pytorch + e2e: e2e_pytorch dataset: | from torchvision.datasets import CIFAR10 CIFAR10('./data', download=True) - - directory: tensorflow + - directory: e2e-tensorflow + e2e: e2e_tensorflow dataset: | import tensorflow as tf tf.keras.datasets.cifar10.load_data() - - directory: tabnet - dataset: | - import tensorflow_datasets as tfds - tfds.load(name='iris', split=tfds.Split.TRAIN) - - - directory: opacus + - directory: e2e-opacus + e2e: e2e_opacus dataset: | from torchvision.datasets import CIFAR10 CIFAR10('./data', download=True) - - directory: pytorch-lightning + - directory: e2e-pytorch-lightning + e2e: e2e_pytorch_lightning dataset: | from torchvision.datasets import MNIST MNIST('./data', download=True) - - directory: scikit-learn + - directory: e2e-scikit-learn + e2e: e2e_scikit_learn dataset: | import openml openml.datasets.get_dataset(554) - - directory: fastai + - directory: e2e-fastai + e2e: e2e_fastai dataset: | from fastai.vision.all import untar_data, URLs untar_data(URLs.MNIST) - - directory: pandas + - directory: e2e-pandas + e2e: e2e_pandas dataset: | from pathlib import Path from sklearn.datasets import load_iris Path('data').mkdir(exist_ok=True) load_iris(as_frame=True)['data'].to_csv('./data/client.csv') - name: Framework / ${{matrix.directory}} + name: Framework / ${{ matrix.directory }} defaults: run: @@ -121,7 +185,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v5 with: - python-version: 3.8 + python-version: 3.9 - name: Install build tools run: | python -m pip install -U pip==23.3.1 @@ -144,26 +208,30 @@ jobs: if: ${{ matrix.dataset }} run: python -c "${{ matrix.dataset }}" - name: Run edge client test - if: ${{ matrix.directory != 'bare-client-auth' }} - run: ./../test.sh "${{ matrix.directory }}" + if: ${{ matrix.directory != 'e2e-bare-auth' }} + working-directory: e2e/${{ matrix.directory }}/${{ matrix.e2e }} + run: ./../../test_legacy.sh "${{ matrix.directory }}" - name: Run virtual client test - if: ${{ matrix.directory != 'bare-client-auth' }} + if: ${{ matrix.directory != 'e2e-bare-auth' }} run: python simulation.py - name: Run simulation engine test - if: ${{ matrix.directory == 'pytorch' || matrix.directory == 'tensorflow'}} + if: ${{ matrix.directory == 'e2e-pytorch' || matrix.directory == 'e2e-tensorflow'}} run: python simulation_next.py - name: Run driver test - if: ${{ matrix.directory != 'bare-client-auth' }} - run: ./../test_driver.sh "${{ matrix.directory }}" + if: ${{ matrix.directory != 'e2e-bare-auth' }} + run: ./../test_superlink.sh "${{ matrix.directory }}" - name: Run driver test with REST - if: ${{ matrix.directory == 'bare' }} - run: ./../test_driver.sh bare rest + if: ${{ matrix.directory == 'e2e-bare' }} + run: ./../test_superlink.sh bare rest - name: Run driver test with SQLite database - if: ${{ matrix.directory == 'bare' }} - run: ./../test_driver.sh bare sqlite + if: ${{ matrix.directory == 'e2e-bare' }} + run: ./../test_superlink.sh bare sqlite - name: Run driver test with client authentication - if: ${{ matrix.directory == 'bare-client-auth' }} - run: ./../test_driver.sh bare client-auth + if: ${{ matrix.directory == 'e2e-bare-auth' }} + run: ./../test_superlink.sh "${{ matrix.directory }}" client-auth + - name: Run reconnection test with SQLite database + if: ${{ matrix.directory == 'e2e-bare' }} + run: ./../test_reconnection.sh sqlite - name: Cache save Python location id: cache-save-python uses: actions/cache/save@v4 @@ -208,3 +276,74 @@ jobs: - name: Test strategies run: | python test.py "${{ matrix.strat }}" + + templates: + runs-on: ubuntu-22.04 + timeout-minutes: 10 + needs: wheel + strategy: + matrix: + framework: ["numpy", "pytorch", "tensorflow", "jax", "sklearn"] + + name: Template / ${{ matrix.framework }} + + steps: + - uses: actions/checkout@v4 + - name: Bootstrap + uses: ./.github/actions/bootstrap + - name: Install Flower from repo + if: ${{ github.repository != 'adap/flower' || github.event.pull_request.head.repo.fork || github.actor == 'dependabot[bot]' }} + run: | + python -m pip install . + - name: Install Flower wheel from artifact store + if: ${{ github.repository == 'adap/flower' && !github.event.pull_request.head.repo.fork && github.actor != 'dependabot[bot]' }} + run: | + python -m pip install https://${{ env.ARTIFACT_BUCKET }}/py/${{ needs.wheel.outputs.dir }}/${{ needs.wheel.outputs.short_sha }}/${{ needs.wheel.outputs.whl_path }} + - name: Create project and install it + run: | + flwr new tmp-${{ matrix.framework }} --framework ${{ matrix.framework }} --username gh_ci + cd tmp-${{ matrix.framework }} + pip install . + - name: Run project + run: | + cd tmp-${{ matrix.framework }} + flwr run --run-config num-server-rounds=1 2>&1 | tee flwr_output.log + if grep -q "ERROR" flwr_output.log; then + exit 1 + fi + + build_and_install: + runs-on: ubuntu-22.04 + timeout-minutes: 10 + needs: wheel + strategy: + matrix: + framework: ["numpy"] + python-version: ["3.9", "3.10", "3.11"] + + name: | + Build & Install / + Python ${{ matrix.python-version }} / + ${{ matrix.framework }} + + steps: + - uses: actions/checkout@v4 + - name: Bootstrap + uses: ./.github/actions/bootstrap + with: + python-version: ${{ matrix.python-version }} + poetry-skip: 'true' + - name: Install Flower from repo + if: ${{ github.repository != 'adap/flower' || github.event.pull_request.head.repo.fork || github.actor == 'dependabot[bot]' }} + run: | + python -m pip install . + - name: Install Flower wheel from artifact store + if: ${{ github.repository == 'adap/flower' && !github.event.pull_request.head.repo.fork && github.actor != 'dependabot[bot]' }} + run: | + python -m pip install https://${{ env.ARTIFACT_BUCKET }}/py/${{ needs.wheel.outputs.dir }}/${{ needs.wheel.outputs.short_sha }}/${{ needs.wheel.outputs.whl_path }} + - name: Create project, build, and install it + run: | + flwr new tmp-${{ matrix.framework }} --framework ${{ matrix.framework }} --username gh_ci + cd tmp-${{ matrix.framework }} + flwr build + flwr install *.fab diff --git a/.github/workflows/framework-release.yml b/.github/workflows/framework-release.yml index a941b47d58fc..e608329872de 100644 --- a/.github/workflows/framework-release.yml +++ b/.github/workflows/framework-release.yml @@ -16,6 +16,8 @@ jobs: if: ${{ github.repository == 'adap/flower' }} name: Publish release runs-on: ubuntu-22.04 + outputs: + flwr-version: ${{ steps.publish.outputs.flwr-version }} steps: - name: Checkout code uses: actions/checkout@v4 @@ -26,10 +28,12 @@ jobs: uses: ./.github/actions/bootstrap - name: Get artifacts and publish + id: publish env: GITHUB_REF: ${{ github.ref }} run: | TAG_NAME=$(echo "${GITHUB_REF_NAME}" | cut -c2-) + echo "flwr-version=$TAG_NAME" >> "$GITHUB_OUTPUT" wheel_name="flwr-${TAG_NAME}-py3-none-any.whl" tar_name="flwr-${TAG_NAME}.tar.gz" @@ -43,3 +47,69 @@ jobs: curl $tar_url --output dist/$tar_name python -m poetry publish -u __token__ -p ${{ secrets.PYPI_TOKEN_RELEASE_FLWR }} + + parameters: + if: ${{ github.repository == 'adap/flower' }} + name: Collect docker build parameters + runs-on: ubuntu-22.04 + timeout-minutes: 10 + needs: publish + outputs: + pip-version: ${{ steps.versions.outputs.pip-version }} + setuptools-version: ${{ steps.versions.outputs.setuptools-version }} + matrix: ${{ steps.matrix.outputs.matrix }} + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - uses: ./.github/actions/bootstrap + id: bootstrap + + - id: versions + run: | + echo "pip-version=${{ steps.bootstrap.outputs.pip-version }}" >> "$GITHUB_OUTPUT" + echo "setuptools-version=${{ steps.bootstrap.outputs.setuptools-version }}" >> "$GITHUB_OUTPUT" + + - id: matrix + run: | + python dev/build-docker-image-matrix.py --flwr-version "${{ needs.publish.outputs.flwr-version }}" > matrix.json + echo "matrix=$(cat matrix.json)" >> $GITHUB_OUTPUT + + build-base-images: + if: ${{ github.repository == 'adap/flower' }} + name: Build base images + uses: ./.github/workflows/_docker-build.yml + needs: parameters + strategy: + fail-fast: false + matrix: ${{ fromJson(needs.parameters.outputs.matrix).base }} + with: + namespace-repository: ${{ matrix.images.namespace_repository }} + file-dir: ${{ matrix.images.file_dir }} + build-args: | + PYTHON_VERSION=${{ matrix.images.python_version }} + PIP_VERSION=${{ needs.parameters.outputs.pip-version }} + SETUPTOOLS_VERSION=${{ needs.parameters.outputs.setuptools-version }} + DISTRO=${{ matrix.images.distro.name }} + DISTRO_VERSION=${{ matrix.images.distro.version }} + FLWR_VERSION=${{ matrix.images.flwr_version }} + tags: ${{ matrix.images.tag }} + secrets: + dockerhub-user: ${{ secrets.DOCKERHUB_USERNAME }} + dockerhub-token: ${{ secrets.DOCKERHUB_TOKEN }} + + build-binary-images: + if: ${{ github.repository == 'adap/flower' }} + name: Build binary images + uses: ./.github/workflows/_docker-build.yml + needs: [parameters, build-base-images] + strategy: + fail-fast: false + matrix: ${{ fromJson(needs.parameters.outputs.matrix).binary }} + with: + namespace-repository: ${{ matrix.images.namespace_repository }} + file-dir: ${{ matrix.images.file_dir }} + build-args: BASE_IMAGE=${{ matrix.images.base_image }} + tags: ${{ matrix.images.tags }} + secrets: + dockerhub-user: ${{ secrets.DOCKERHUB_USERNAME }} + dockerhub-token: ${{ secrets.DOCKERHUB_TOKEN }} diff --git a/.github/workflows/framework.yml b/.github/workflows/framework.yml index 784f04750c5e..a8ff69204b58 100644 --- a/.github/workflows/framework.yml +++ b/.github/workflows/framework.yml @@ -23,14 +23,16 @@ jobs: # Latest version which comes cached in the host image can be found here: # https://github.com/actions/runner-images/blob/main/images/linux/Ubuntu2204-Readme.md#python # In case of a mismatch, the job has to download Python to install it. - # Note: Due to a bug in actions/setup-python we have to put 3.10 in - # qoutes as it will otherwise will assume 3.1 - python: [3.8, 3.9, '3.10', '3.11'] + # Note: Due to a bug in actions/setup-python, we have to put "3.10" in + # quotes as it will otherwise assume "3.1" + python: ['3.9', '3.10', '3.11'] name: Python ${{ matrix.python }} steps: - uses: actions/checkout@v4 + with: + fetch-depth: 0 - name: Bootstrap uses: ./.github/actions/bootstrap with: diff --git a/.github/workflows/release-nightly.yml b/.github/workflows/release-nightly.yml index 939a9581871c..fcefff300cb7 100644 --- a/.github/workflows/release-nightly.yml +++ b/.github/workflows/release-nightly.yml @@ -3,7 +3,6 @@ name: Release nightly on: schedule: - cron: "0 23 * * *" - - cron: "30 23 * * *" env: FLWR_TELEMETRY_ENABLED: 0 @@ -25,15 +24,11 @@ jobs: id: bootstrap uses: ./.github/actions/bootstrap - name: Release nightly - if: github.event.schedule == '0 23 * * *' + id: release env: PYPI_TOKEN: ${{ secrets.PYPI_TOKEN }} - run: ./dev/publish-nightly.sh - - name: Read nightly version and name - if: github.event.schedule == '30 23 * * *' - id: release run: | - RESULT=$(./dev/publish-nightly.sh --skip-publish) + RESULT=$(./dev/publish-nightly.sh) if [ "$RESULT" == "There were no commits in the last 24 hours." ]; then echo "skip=true" >> $GITHUB_OUTPUT fi @@ -45,7 +40,7 @@ jobs: build-docker-base-images: name: Build nightly base images - if: github.repository == 'adap/flower' && needs.release-nightly.outputs.skip != 'true' && github.event.schedule == '30 23 * * *' + if: github.repository == 'adap/flower' && needs.release-nightly.outputs.skip != 'true' uses: ./.github/workflows/_docker-build.yml needs: release-nightly with: @@ -65,7 +60,7 @@ jobs: build-docker-binary-images: name: Build nightly binary images - if: github.repository == 'adap/flower' && needs.release-nightly.outputs.skip != 'true' && github.event.schedule == '30 23 * * *' + if: github.repository == 'adap/flower' && needs.release-nightly.outputs.skip != 'true' uses: ./.github/workflows/_docker-build.yml needs: [release-nightly, build-docker-base-images] strategy: @@ -74,7 +69,9 @@ jobs: images: [ { repository: "flwr/superlink", file_dir: "src/docker/superlink" }, { repository: "flwr/supernode", file_dir: "src/docker/supernode" }, - { repository: "flwr/serverapp", file_dir: "src/docker/serverapp" } + { repository: "flwr/serverapp", file_dir: "src/docker/serverapp" }, + { repository: "flwr/superexec", file_dir: "src/docker/superexec" }, + { repository: "flwr/clientapp", file_dir: "src/docker/clientapp" } ] with: namespace-repository: ${{ matrix.images.repository }} diff --git a/.github/workflows/update_translations.yml b/.github/workflows/update_translations.yml new file mode 100644 index 000000000000..9419f4aaef25 --- /dev/null +++ b/.github/workflows/update_translations.yml @@ -0,0 +1,79 @@ +name: Translations + +on: + schedule: + - cron: '0 0 * * *' # Runs every day at midnight + workflow_dispatch: # Allows to manually trigger the workflow + +jobs: + update-and-pr: + runs-on: ubuntu-22.04 + permissions: + contents: write + pull-requests: write + env: + branch-name: auto-update-trans-text + name: Update text + steps: + - uses: actions/checkout@v4 + + - name: Bootstrap + uses: ./.github/actions/bootstrap + with: + python-version: '3.10' + + - name: Install dependencies + run: | + python -m poetry install + pip install sphinx==7.3.7 + + - name: Install pandoc + uses: nikeee/setup-pandoc@v1 + + - name: Update text and translations for all locales + run: | + cd doc + make update-text + for langDir in locales/*; do + if [ -d "$langDir" ]; then + lang=$(basename $langDir) + echo "Updating language $lang" + make update-lang lang=$lang + fi + done + + - name: Commit changes + run: | + git config --local user.email "41898282+github-actions[bot]@users.noreply.github.com" + git config --local user.name "github-actions[bot]" + git add doc/locales + git commit -m "Update text and language files" + continue-on-error: true + + - name: Calculate diff # Even without doc changes the update-lang command will generate 228 additions and 60 deletions, so we only want to open a PR when there is more + id: calculate_diff + run: | + additions=$(git diff --numstat HEAD^1 | awk '{s+=$1} END {print s}') + deletions=$(git diff --numstat HEAD^1 | awk '{s+=$2} END {print s}') + echo "Additions: $additions" + echo "Deletions: $deletions" + echo "additions=$additions" >> $GITHUB_OUTPUT + echo "deletions=$deletions" >> $GITHUB_OUTPUT + + - name: Push changes + if: steps.calculate_diff.outputs.additions > 228 && steps.calculate_diff.outputs.deletions > 60 + uses: ad-m/github-push-action@master + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + branch: '${{ env.branch-name }}' + + - name: Create Pull Request + if: steps.calculate_diff.outputs.additions > 228 && steps.calculate_diff.outputs.deletions > 60 + uses: peter-evans/create-pull-request@v6 + with: + token: ${{ secrets.GITHUB_TOKEN }} + branch: '${{ env.branch-name }}' + delete-branch: true + title: 'docs(framework:skip) Update source texts for translations (automated)' + body: 'This PR is auto-generated to update text and language files.' + draft: false diff --git a/README.md b/README.md index a010abfcb2f5..7aa73fe609bb 100644 --- a/README.md +++ b/README.md @@ -18,6 +18,7 @@ [![PRs Welcome](https://img.shields.io/badge/PRs-welcome-brightgreen.svg)](https://github.com/adap/flower/blob/main/CONTRIBUTING.md) ![Build](https://github.com/adap/flower/actions/workflows/framework.yml/badge.svg) [![Downloads](https://static.pepy.tech/badge/flwr)](https://pepy.tech/project/flwr) +[![Docker Hub](https://img.shields.io/badge/Docker%20Hub-flwr-blue)](https://hub.docker.com/u/flwr) [![Slack](https://img.shields.io/badge/Chat-Slack-red)](https://flower.ai/join-slack) Flower (`flwr`) is a framework for building federated learning systems. The @@ -101,6 +102,7 @@ Flower Baselines is a collection of community-contributed projects that reproduc - [FedNova](https://github.com/adap/flower/tree/main/baselines/fednova) - [HeteroFL](https://github.com/adap/flower/tree/main/baselines/heterofl) - [FedAvgM](https://github.com/adap/flower/tree/main/baselines/fedavgm) +- [FedRep](https://github.com/adap/flower/tree/main/baselines/fedrep) - [FedStar](https://github.com/adap/flower/tree/main/baselines/fedstar) - [FedWav2vec2](https://github.com/adap/flower/tree/main/baselines/fedwav2vec2) - [FjORD](https://github.com/adap/flower/tree/main/baselines/fjord) @@ -143,23 +145,23 @@ Other [examples](https://github.com/adap/flower/tree/main/examples): - [PyTorch: From Centralized to Federated](https://github.com/adap/flower/tree/main/examples/pytorch-from-centralized-to-federated) - [Vertical FL](https://github.com/adap/flower/tree/main/examples/vertical-fl) - [Federated Finetuning of OpenAI's Whisper](https://github.com/adap/flower/tree/main/examples/whisper-federated-finetuning) -- [Federated Finetuning of Large Language Model](https://github.com/adap/flower/tree/main/examples/llm-flowertune) -- [Federated Finetuning of a Vision Transformer](https://github.com/adap/flower/tree/main/examples/vit-finetune) +- [Federated Finetuning of Large Language Model](https://github.com/adap/flower/tree/main/examples/flowertune-llm) +- [Federated Finetuning of a Vision Transformer](https://github.com/adap/flower/tree/main/examples/flowertune-vit) - [Advanced Flower with TensorFlow/Keras](https://github.com/adap/flower/tree/main/examples/advanced-tensorflow) - [Advanced Flower with PyTorch](https://github.com/adap/flower/tree/main/examples/advanced-pytorch) -- Single-Machine Simulation of Federated Learning Systems ([PyTorch](https://github.com/adap/flower/tree/main/examples/simulation-pytorch)) ([Tensorflow](https://github.com/adap/flower/tree/main/examples/simulation-tensorflow)) - [Comprehensive Flower+XGBoost](https://github.com/adap/flower/tree/main/examples/xgboost-comprehensive) - [Flower through Docker Compose and with Grafana dashboard](https://github.com/adap/flower/tree/main/examples/flower-via-docker-compose) - [Flower with KaplanMeierFitter from the lifelines library](https://github.com/adap/flower/tree/main/examples/federated-kaplan-meier-fitter) - [Sample Level Privacy with Opacus](https://github.com/adap/flower/tree/main/examples/opacus) - [Sample Level Privacy with TensorFlow-Privacy](https://github.com/adap/flower/tree/main/examples/tensorflow-privacy) +- [Flower with a Tabular Dataset](https://github.com/adap/flower/tree/main/examples/fl-tabular) ## Community Flower is built by a wonderful community of researchers and engineers. [Join Slack](https://flower.ai/join-slack) to meet them, [contributions](#contributing-to-flower) are welcome. - + ## Citation diff --git a/baselines/README.md b/baselines/README.md index 3a84df02d8de..75bcccb68b2a 100644 --- a/baselines/README.md +++ b/baselines/README.md @@ -1,10 +1,9 @@ # Flower Baselines +> [!NOTE] > We are changing the way we structure the Flower baselines. While we complete the transition to the new format, you can still find the existing baselines in the `flwr_baselines` directory. Currently, you can make use of baselines for [FedAvg](https://github.com/adap/flower/tree/main/baselines/flwr_baselines/flwr_baselines/publications/fedavg_mnist), [FedOpt](https://github.com/adap/flower/tree/main/baselines/flwr_baselines/flwr_baselines/publications/adaptive_federated_optimization), and [LEAF-FEMNIST](https://github.com/adap/flower/tree/main/baselines/flwr_baselines/flwr_baselines/publications/leaf/femnist). -> The documentation below has been updated to reflect the new way of using Flower baselines. - ## Structure @@ -15,17 +14,15 @@ baselines// ├── README.md ├── pyproject.toml └── - ├── *.py # several .py files including main.py and __init__.py - └── conf - └── *.yaml # one or more Hydra config files + └── *.py # several .py files ``` -Please note that some baselines might include additional files (e.g. a `requirements.txt`) or a hierarchy of `.yaml` files for [Hydra](https://hydra.cc/). ## Running the baselines -Each baseline is self-contained in its own directory. Furthermore, each baseline defines its own Python environment using [Poetry](https://python-poetry.org/docs/) via a `pyproject.toml` file and [`pyenv`](https://github.com/pyenv/pyenv). If you haven't setup `Poetry` and `pyenv` already on your machine, please take a look at the [Documentation](https://flower.ai/docs/baselines/how-to-use-baselines.html#setting-up-your-machine) for a guide on how to do so. +> [!NOTE] +> We are in the process of migrating all baselines to use `flwr run`. Those baselines that remain using the previous system (i.e. using [Poetry](https://python-poetry.org/), [Hydra](https://hydra.cc/) and [start_simulation](https://flower.ai/docs/framework/ref-api/flwr.simulation.start_simulation.html)) might require you to first setup `Poetry` and `pyenv` already on your machine, please take a look at the [Documentation](https://flower.ai/docs/baselines/how-to-use-baselines.html#setting-up-your-machine) for a guide on how to do so. -Assuming `pyenv` and `Poetry` are already installed on your system. Running a baseline can be done by: +Each baseline is self-contained in its own directory. To run a baseline: 1. Cloning the flower repository @@ -34,11 +31,7 @@ Assuming `pyenv` and `Poetry` are already installed on your system. Running a ba ``` 2. Navigate inside the directory of the baseline you'd like to run. -3. Follow the `[Environment Setup]` instructions in the `README.md`. In most cases this will require you to just do: - - ```bash - poetry install - ``` +3. Follow the `[Environment Setup]` instructions in the `README.md`. 4. Run the baseline as indicated in the `[Running the Experiments]` section in the `README.md` or in the `[Expected Results]` section to reproduce the experiments in the paper. @@ -46,17 +39,22 @@ Assuming `pyenv` and `Poetry` are already installed on your system. Running a ba Do you have a new federated learning paper and want to add a new baseline to Flower? Or do you want to add an experiment to an existing baseline paper? Great, we really appreciate your contribution !! +> [!TIP] +> A more verbose version of these steps can be found in the [Flower Baselines documentation](https://flower.ai/docs/baselines/how-to-contribute-baselines.html). + The steps to follow are: +1. Create a new Python 3.10 environment and install Flower (`pip install flwr`) 1. Fork the Flower repo and clone it into your machine. -2. Navigate to the `baselines/` directory, choose a single-word (and **lowercase**) name for your baseline, and from there run: +2. Navigate to the `baselines/` directory, from there and with your environment activated, run: ```bash - # This will create a new directory with the same structure as `baseline_template`. - ./dev/create-baseline.sh + # Choose option "Flower Baseline" when prompted + flwr new ``` -3. Then, go inside your baseline directory and continue with the steps detailed in `EXTENDED_README.md` and `README.md`. -4. Once your code is ready and you have checked that following the instructions in your `README.md` the Python environment can be created correctly and that running the code following your instructions can reproduce the experiments in the paper, you just need to create a Pull Request (PR). Then, the process to merge your baseline into the Flower repo will begin! +3. Then, go inside your baseline directory and continue with the steps detailed in the `README.md`. +4. Once your code is ready, check that you have completed all the sections in the `README.md` and that, if a new environment is created, your baseline still runs (i.e. play the role of a person running the baseline you want to contribute). +5. Create a Pull Request (PR). Then, the process to merge your baseline into the Flower repo will begin! Further resources: diff --git a/baselines/baseline_template/EXTENDED_README.md b/baselines/baseline_template/EXTENDED_README.md deleted file mode 100644 index 9c8f5bc72fa9..000000000000 --- a/baselines/baseline_template/EXTENDED_README.md +++ /dev/null @@ -1,123 +0,0 @@ - -# Extended Readme - -> The baselines are expected to run in a machine running Ubuntu 22.04 - -While `README.md` should include information about the baseline you implement and how to run it, this _extended_ readme provides info on what's the expected directory structure for a new baseline and more generally the instructions to follow before your baseline can be merged into the Flower repository. Please follow closely these instructions. It is likely that you have already completed steps 1-2. - -1. Fork the Flower repository and clone it. -2. Navigate to the `baselines/` directory and from there run: - ```bash - # This will create a new directory with the same structure as this `baseline_template` directory. - ./dev/create-baseline.sh - ``` -3. All your code and configs should go into a sub-directory with the same name as the name of your baseline. - * The sub-directory contains a series of Python scripts that you can edit. Please stick to these files and consult with us if you need additional ones. - * There is also a basic config structure in `/conf` ready be parsed by [Hydra](https://hydra.cc/) when executing your `main.py`. -4. Therefore, the directory structure in your baseline should look like: - ```bash - baselines/ - ├── README.md # describes your baseline and everything needed to use it - ├── EXTENDED_README.md # to remove before creating your PR - ├── pyproject.toml # details your Python environment - └── - ├── *.py # several .py files including main.py and __init__.py - └── conf - └── *.yaml # one or more Hydra config files - - ``` -> :warning: Make sure the variable `name` in `pyproject.toml` is set to the name of the sub-directory containing all your code. - -5. Add your dependencies to the `pyproject.toml` (see below a few examples on how to do it). Read more about Poetry below in this `EXTENDED_README.md`. -6. Regularly check that your coding style and the documentation you add follow good coding practices. To test whether your code meets the requirements, please run the following: - ```bash - # After activating your environment and from your baseline's directory - cd .. # to go to the top-level directory of all baselines - ./dev/test-baseline.sh - ./dev/test-baseline-structure.sh - ``` - Both `test-baseline.sh` and `test-baseline-structure.sh` will also be automatically run when you create a PR, and both tests need to pass for the baseline to be merged. - To automatically solve some formatting issues and apply easy fixes, please run the formatting script: - ```bash - # After activating your environment and from your baseline's directory - cd .. # to go to the top-level directory of all baselines - ./dev/format-baseline.sh - ``` -7. Ensure that the Python environment for your baseline can be created without errors by simply running `poetry install` and that this is properly described later when you complete the `Environment Setup` section in `README.md`. This is specially important if your environment requires additional steps after doing `poetry install`. -8. Ensure that your baseline runs with default arguments by running `poetry run python -m .main`. Then, describe this and other forms of running your code in the `Running the Experiments` section in `README.md`. -9. Once your code is ready and you have checked: - * that following the instructions in your `README.md` the Python environment can be created correctly - - * that running the code following your instructions can reproduce the experiments in the paper - - , then you just need to create a Pull Request (PR) to kickstart the process of merging your baseline into the Flower repository. - -> Once you are happy to merge your baseline contribution, please delete this `EXTENDED_README.md` file. - - -## About Poetry - -We use Poetry to manage the Python environment for each individual baseline. You can follow the instructions [here](https://python-poetry.org/docs/) to install Poetry in your machine. - - -### Specifying a Python Version (optional) -By default, Poetry will use the Python version in your system. In some settings, you might want to specify a particular version of Python to use inside your Poetry environment. You can do so with [`pyenv`](https://github.com/pyenv/pyenv). Check the documentation for the different ways of installing `pyenv`, but one easy way is using the [automatic installer](https://github.com/pyenv/pyenv-installer): -```bash -curl https://pyenv.run | bash # then, don't forget links to your .bashrc/.zshrc -``` - -You can then install any Python version with `pyenv install ` (e.g. `pyenv install 3.9.17`). Then, in order to use that version for your baseline, you'd do the following: - -```bash -# cd to your baseline directory (i.e. where the `pyproject.toml` is) -pyenv local - -# set that version for poetry -poetry env use - -# then you can install your Poetry environment (see the next setp) -``` - -### Installing Your Environment -With the Poetry tool already installed, you can create an environment for this baseline with commands: -```bash -# run this from the same directory as the `pyproject.toml` file is -poetry install -``` - -This will create a basic Python environment with just Flower and additional packages, including those needed for simulation. Next, you should add the dependencies for your code. It is **critical** that you fix the version of the packages you use using a `=` not a `=^`. You can do so via [`poetry add`](https://python-poetry.org/docs/cli/#add). Below are some examples: - -```bash -# For instance, if you want to install tqdm -poetry add tqdm==4.65.0 - -# If you already have a requirements.txt, you can add all those packages (but ensure you have fixed the version) in one go as follows: -poetry add $( cat requirements.txt ) -``` -With each `poetry add` command, the `pyproject.toml` gets automatically updated so you don't need to keep that `requirements.txt` as part of this baseline. - - -More critically however, is adding your ML framework of choice to the list of dependencies. For some frameworks you might be able to do so with the `poetry add` command. Check [the Poetry documentation](https://python-poetry.org/docs/cli/#add) for how to add packages in various ways. For instance, let's say you want to use PyTorch: - -```bash -# with plain `pip` you'd run a command such as: -pip install torch==1.13.1+cu117 torchvision==0.14.1+cu117 torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cu117 - -# to add the same 3 dependencies to your Poetry environment you'd need to add the URL to the wheel that the above pip command auto-resolves for you. -# You can find those wheels in `https://download.pytorch.org/whl/cu117`. Copy the link and paste it after the `poetry add` command. -# For instance to add `torch==1.13.1+cu117` and a x86 Linux system with Python3.8 you'd: -poetry add https://download.pytorch.org/whl/cu117/torch-1.13.1%2Bcu117-cp38-cp38-linux_x86_64.whl -# you'll need to repeat this for both `torchvision` and `torchaudio` -``` -The above is just an example of how you can add these dependencies. Please refer to the Poetry documentation to extra reference. - -If all attempts fail, you can still install packages via standard `pip`. You'd first need to source/activate your Poetry environment. -```bash -# first ensure you have created your environment -# and installed the base packages provided in the template -poetry install - -# then activate it -poetry shell -``` -Now you are inside your environment (pretty much as when you use `virtualenv` or `conda`) so you can install further packages with `pip`. Please note that, unlike with `poetry add`, these extra requirements won't be captured by `pyproject.toml`. Therefore, please ensure that you provide all instructions needed to: (1) create the base environment with Poetry and (2) install any additional dependencies via `pip` when you complete your `README.md`. \ No newline at end of file diff --git a/baselines/baseline_template/README.md b/baselines/baseline_template/README.md deleted file mode 100644 index ee6e1e96976f..000000000000 --- a/baselines/baseline_template/README.md +++ /dev/null @@ -1,87 +0,0 @@ ---- -title: title of the paper -url: URL to the paper page (not the pdf) -labels: [label1, label2] # please add between 4 and 10 single-word (maybe two-words) labels (e.g. system heterogeneity, image classification, asynchronous, weight sharing, cross-silo). Do not use "" -dataset: [dataset1, dataset2] # list of datasets you include in your baseline. Do not use "" ---- - -# :warning: *_Title of your baseline_* - -> Note: If you use this baseline in your work, please remember to cite the original authors of the paper as well as the Flower paper. - -> :warning: This is the template to follow when creating a new Flower Baseline. Please follow the instructions in `EXTENDED_README.md` - -> :warning: Please follow the instructions carefully. You can see the [FedProx-MNIST baseline](https://github.com/adap/flower/tree/main/baselines/fedprox) as an example of a baseline that followed this guide. - -> :warning: Please complete the metadata section at the very top of this README. This generates a table at the top of the file that will facilitate indexing baselines. - -**Paper:** :warning: *_add the URL of the paper page (not to the .pdf). For instance if you link a paper on ArXiv, add here the URL to the abstract page (e.g. https://arxiv.org/abs/1512.03385). If your paper is in from a journal or conference proceedings, please follow the same logic._* - -**Authors:** :warning: *_list authors of the paper_* - -**Abstract:** :warning: *_add here the abstract of the paper you are implementing_* - - -## About this baseline - -**What’s implemented:** :warning: *_Concisely describe what experiment(s) in the publication can be replicated by running the code. Please only use a few sentences. Start with: “The code in this directory …”_* - -**Datasets:** :warning: *_List the datasets you used (if you used a medium to large dataset, >10GB please also include the sizes of the dataset)._* - -**Hardware Setup:** :warning: *_Give some details about the hardware (e.g. a server with 8x V100 32GB and 256GB of RAM) you used to run the experiments for this baseline. Someone out there might not have access to the same resources you have so, could list the absolute minimum hardware needed to run the experiment in a reasonable amount of time ? (e.g. minimum is 1x 16GB GPU otherwise a client model can’t be trained with a sufficiently large batch size). Could you test this works too?_* - -**Contributors:** :warning: *_let the world know who contributed to this baseline. This could be either your name, your name and affiliation at the time, or your GitHub profile name if you prefer. If multiple contributors signed up for this baseline, please list yourself and your colleagues_* - - -## Experimental Setup - -**Task:** :warning: *_what’s the primary task that is being federated? (e.g. image classification, next-word prediction). If you have experiments for several, please list them_* - -**Model:** :warning: *_provide details about the model you used in your experiments (if more than use a list). If your model is small, describing it as a table would be :100:. Some FL methods do not use an off-the-shelve model (e.g. ResNet18) instead they create your own. If this is your case, please provide a summary here and give pointers to where in the paper (e.g. Appendix B.4) is detailed._* - -**Dataset:** :warning: *_Earlier you listed already the datasets that your baseline uses. Now you should include a breakdown of the details about each of them. Please include information about: how the dataset is partitioned (e.g. LDA with alpha 0.1 as default and all clients have the same number of training examples; or each client gets assigned a different number of samples following a power-law distribution with each client only instances of 2 classes)? if your dataset is naturally partitioned just state “naturally partitioned”; how many partitions there are (i.e. how many clients)? Please include this an all information relevant about the dataset and its partitioning into a table._* - -**Training Hyperparameters:** :warning: *_Include a table with all the main hyperparameters in your baseline. Please show them with their default value._* - - -## Environment Setup - -:warning: _The Python environment for all baselines should follow these guidelines in the `EXTENDED_README`. Specify the steps to create and activate your environment. If there are any external system-wide requirements, please include instructions for them too. These instructions should be comprehensive enough so anyone can run them (if non standard, describe them step-by-step)._ - - -## Running the Experiments - -:warning: _Provide instructions on the steps to follow to run all the experiments._ -```bash -# The main experiment implemented in your baseline using default hyperparameters (that should be setup in the Hydra configs) should run (including dataset download and necessary partitioning) by executing the command: - -poetry run python -m .main # where is the name of this directory and that of the only sub-directory in this directory (i.e. where all your source code is) - -# If you are using a dataset that requires a complicated download (i.e. not using one natively supported by TF/PyTorch) + preprocessing logic, you might want to tell people to run one script first that will do all that. Please ensure the download + preprocessing can be configured to suit (at least!) a different download directory (and use as default the current directory). The expected command to run to do this is: - -poetry run python -m .dataset_preparation - -# It is expected that you baseline supports more than one dataset and different FL settings (e.g. different number of clients, dataset partitioning methods, etc). Please provide a list of commands showing how these experiments are run. Include also a short explanation of what each one does. Here it is expected you'll be using the Hydra syntax to override the default config. - -poetry run python -m .main -. -. -. -poetry run python -m .main -``` - - -## Expected Results - -:warning: _Your baseline implementation should replicate several of the experiments in the original paper. Please include here the exact command(s) needed to run each of those experiments followed by a figure (e.g. a line plot) or table showing the results you obtained when you ran the code. Below is an example of how you can present this. Please add command followed by results for all your experiments._ - -```bash -# it is likely that for one experiment you need to sweep over different hyperparameters. You are encouraged to use Hydra's multirun functionality for this. This is an example of how you could achieve this for some typical FL hyperparameteres - -poetry run python -m .main --multirun num_client_per_round=5,10,50 dataset=femnist,cifar10 -# the above command will run a total of 6 individual experiments (because 3client_configs x 2datasets = 6 -- you can think of it as a grid). - -[Now show a figure/table displaying the results of the above command] - -# add more commands + plots for additional experiments. -``` diff --git a/baselines/baseline_template/baseline_template/client.py b/baselines/baseline_template/baseline_template/client.py deleted file mode 100644 index d2e2206111f3..000000000000 --- a/baselines/baseline_template/baseline_template/client.py +++ /dev/null @@ -1,5 +0,0 @@ -"""Define your client class and a function to construct such clients. - -Please overwrite `flwr.client.NumPyClient` or `flwr.client.Client` and create a function -to instantiate your client. -""" diff --git a/baselines/baseline_template/baseline_template/conf/base.yaml b/baselines/baseline_template/baseline_template/conf/base.yaml deleted file mode 100644 index 2d65b3b989b2..000000000000 --- a/baselines/baseline_template/baseline_template/conf/base.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- -# this is the config that will be loaded as default by main.py -# Please follow the provided structure (this will ensuring all baseline follow -# a similar configuration structure and hence be easy to customise) - -dataset: - # dataset config - -model: - # model config - -strategy: - _target_: # points to your strategy (either custom or exiting in Flower) - # rest of strategy config - -client: - # client config diff --git a/baselines/baseline_template/baseline_template/dataset.py b/baselines/baseline_template/baseline_template/dataset.py deleted file mode 100644 index 5e436abe12fb..000000000000 --- a/baselines/baseline_template/baseline_template/dataset.py +++ /dev/null @@ -1,10 +0,0 @@ -"""Handle basic dataset creation. - -In case of PyTorch it should return dataloaders for your dataset (for both the clients -and the server). If you are using a custom dataset class, this module is the place to -define it. If your dataset requires to be downloaded (and this is not done -automatically -- e.g. as it is the case for many dataset in TorchVision) and -partitioned, please include all those functions and logic in the -`dataset_preparation.py` module. You can use all those functions from functions/methods -defined here of course. -""" diff --git a/baselines/baseline_template/baseline_template/dataset_preparation.py b/baselines/baseline_template/baseline_template/dataset_preparation.py deleted file mode 100644 index bd3440b9276b..000000000000 --- a/baselines/baseline_template/baseline_template/dataset_preparation.py +++ /dev/null @@ -1,34 +0,0 @@ -"""Handle the dataset partitioning and (optionally) complex downloads. - -Please add here all the necessary logic to either download, uncompress, pre/post-process -your dataset (or all of the above). If the desired way of running your baseline is to -first download the dataset and partition it and then run the experiments, please -uncomment the lines below and tell us in the README.md (see the "Running the Experiment" -block) that this file should be executed first. -""" -# import hydra -# from hydra.core.hydra_config import HydraConfig -# from hydra.utils import call, instantiate -# from omegaconf import DictConfig, OmegaConf - - -# @hydra.main(config_path="conf", config_name="base", version_base=None) -# def download_and_preprocess(cfg: DictConfig) -> None: -# """Does everything needed to get the dataset. - -# Parameters -# ---------- -# cfg : DictConfig -# An omegaconf object that stores the hydra config. -# """ - -# ## 1. print parsed config -# print(OmegaConf.to_yaml(cfg)) - -# # Please include here all the logic -# # Please use the Hydra config style as much as possible specially -# # for parts that can be customised (e.g. how data is partitioned) - -# if __name__ == "__main__": - -# download_and_preprocess() diff --git a/baselines/baseline_template/baseline_template/main.py b/baselines/baseline_template/baseline_template/main.py deleted file mode 100644 index 25ae1bec6a10..000000000000 --- a/baselines/baseline_template/baseline_template/main.py +++ /dev/null @@ -1,57 +0,0 @@ -"""Create and connect the building blocks for your experiments; start the simulation. - -It includes processioning the dataset, instantiate strategy, specify how the global -model is going to be evaluated, etc. At the end, this script saves the results. -""" -# these are the basic packages you'll need here -# feel free to remove some if aren't needed -import hydra -from omegaconf import DictConfig, OmegaConf - - -@hydra.main(config_path="conf", config_name="base", version_base=None) -def main(cfg: DictConfig) -> None: - """Run the baseline. - - Parameters - ---------- - cfg : DictConfig - An omegaconf object that stores the hydra config. - """ - # 1. Print parsed config - print(OmegaConf.to_yaml(cfg)) - - # 2. Prepare your dataset - # here you should call a function in datasets.py that returns whatever is needed to: - # (1) ensure the server can access the dataset used to evaluate your model after - # aggregation - # (2) tell each client what dataset partitions they should use (e.g. a this could - # be a location in the file system, a list of dataloader, a list of ids to extract - # from a dataset, it's up to you) - - # 3. Define your clients - # Define a function that returns another function that will be used during - # simulation to instantiate each individual client - # client_fn = client.() - - # 4. Define your strategy - # pass all relevant argument (including the global dataset used after aggregation, - # if needed by your method.) - # strategy = instantiate(cfg.strategy, ) - - # 5. Start Simulation - # history = fl.simulation.start_simulation() - - # 6. Save your results - # Here you can save the `history` returned by the simulation and include - # also other buffers, statistics, info needed to be saved in order to later - # on generate the plots you provide in the README.md. You can for instance - # access elements that belong to the strategy for example: - # data = strategy.get_my_custom_data() -- assuming you have such method defined. - # Hydra will generate for you a directory each time you run the code. You - # can retrieve the path to that directory with this: - # save_path = HydraConfig.get().runtime.output_dir - - -if __name__ == "__main__": - main() diff --git a/baselines/baseline_template/baseline_template/models.py b/baselines/baseline_template/baseline_template/models.py deleted file mode 100644 index 71fa553d1f59..000000000000 --- a/baselines/baseline_template/baseline_template/models.py +++ /dev/null @@ -1,7 +0,0 @@ -"""Define our models, and training and eval functions. - -If your model is 100% off-the-shelf (e.g. directly from torchvision without requiring -modifications) you might be better off instantiating your model directly from the Hydra -config. In this way, swapping your model for another one can be done without changing -the python code at all -""" diff --git a/baselines/baseline_template/baseline_template/server.py b/baselines/baseline_template/baseline_template/server.py deleted file mode 100644 index 2fd7d42cde5a..000000000000 --- a/baselines/baseline_template/baseline_template/server.py +++ /dev/null @@ -1,5 +0,0 @@ -"""Create global evaluation function. - -Optionally, also define a new Server class (please note this is not needed in most -settings). -""" diff --git a/baselines/baseline_template/baseline_template/strategy.py b/baselines/baseline_template/baseline_template/strategy.py deleted file mode 100644 index 17436c401c30..000000000000 --- a/baselines/baseline_template/baseline_template/strategy.py +++ /dev/null @@ -1,5 +0,0 @@ -"""Optionally define a custom strategy. - -Needed only when the strategy is not yet implemented in Flower or because you want to -extend or modify the functionality of an existing strategy. -""" diff --git a/baselines/baseline_template/baseline_template/utils.py b/baselines/baseline_template/baseline_template/utils.py deleted file mode 100644 index 9a831719d623..000000000000 --- a/baselines/baseline_template/baseline_template/utils.py +++ /dev/null @@ -1,6 +0,0 @@ -"""Define any utility function. - -They are not directly relevant to the other (more FL specific) python modules. For -example, you may define here things like: loading a model from a checkpoint, saving -results, plotting. -""" diff --git a/baselines/dev/create-baseline.sh b/baselines/dev/create-baseline.sh deleted file mode 100755 index 53cd79c569aa..000000000000 --- a/baselines/dev/create-baseline.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash - -# This script duplicates the `baseline_template` directory and changes its name -# to the one you specify when running this script. That name is also used to -# rename the subdirectory inside your new baseline directory as well as to set -# the Python package name that Poetry will build - -set -e -cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/../ - -template="baseline_template" -name=$1 - -# copying directory -echo "Copying '$template' and renaming it to '$name'" -cp -r $template $name - -# renaming sub-directory -echo "Renaming sub-directory as '$name'" -mv $name/$template $name/$name - -# adjusting package name in pyproject.toml -cd $name -if [[ "$OSTYPE" == "darwin"* ]]; then - sed -i '' -e "s//$name/" pyproject.toml -else - sed -i -e "s//$name/" pyproject.toml -fi - -echo "!!! Your directory for your baseline '$name' is ready." diff --git a/baselines/doc/source/conf.py b/baselines/doc/source/conf.py index a9525c44ab7b..a2667dbcf006 100644 --- a/baselines/doc/source/conf.py +++ b/baselines/doc/source/conf.py @@ -37,7 +37,7 @@ author = "The Flower Authors" # The full version, including alpha/beta/rc tags -release = "1.8.0" +release = "1.13.0" # -- General configuration --------------------------------------------------- diff --git a/baselines/doc/source/how-to-contribute-baselines.rst b/baselines/doc/source/how-to-contribute-baselines.rst index b568e73f1c11..429ac714c1aa 100644 --- a/baselines/doc/source/how-to-contribute-baselines.rst +++ b/baselines/doc/source/how-to-contribute-baselines.rst @@ -6,16 +6,14 @@ Do you have a new federated learning paper and want to add a new baseline to Flo The goal of Flower Baselines is to reproduce experiments from popular papers to accelerate researchers by enabling faster comparisons to new strategies, datasets, models, and federated pipelines in general. Before you start to work on a new baseline or experiment, please check the `Flower Issues `_ or `Flower Pull Requests `_ to see if someone else is already working on it. Please open a new issue if you are planning to work on a new baseline or experiment with a short description of the corresponding paper and the experiment you want to contribute. +If you are proposing a brand new baseline, please indicate what experiments from the paper are planning to include. Requirements ------------ -Contributing a new baseline is really easy. You only have to make sure that your federated learning experiments are running with Flower and replicate the results of a paper. Flower baselines need to make use of: +Contributing a new baseline is really easy. You only have to make sure that your federated learning experiments run with Flower, use `Flower Datasets `_, and replicate the results of a paper. +Preferably, the baselines make use of PyTorch, but other ML frameworks are also welcome. The baselines are expected to run in a machine with Ubuntu 22.04, but if yours runs also on macOS even better! -* `Poetry `_ to manage the Python environment. -* `Hydra `_ to manage the configuration files for your experiments. - -You can find more information about how to setup Poetry in your machine in the ``EXTENDED_README.md`` that is generated when you prepare your baseline. Add a new Flower Baseline ------------------------- @@ -27,11 +25,18 @@ Let's say you want to contribute the code of your most recent Federated Learning #. **Get the Flower source code on your machine** #. Fork the Flower codebase: go to the `Flower GitHub repo `_ and fork the code (click the *Fork* button in the top-right corner and follow the instructions) #. Clone the (forked) Flower source code: :code:`git clone git@github.com:[your_github_username]/flower.git` - #. Open the code in your favorite editor. -#. **Use the provided script to create your baseline directory** - #. Navigate to the baselines directory and run :code:`./dev/create-baseline.sh fedawesome` - #. A new directory in :code:`baselines/fedawesome` is created. - #. Follow the instructions in :code:`EXTENDED_README.md` and :code:`README.md` in your baseline directory. +#. **Create a new baseline using the template** + #. Create a new Python environment with Python 3.10 (we recommend doing this with `pyenv `_) + #. Install flower with: :code:`pip install flwr`. + #. Navigate to the baselines directory and run: :code:`flwr new fedawesome`. When prompted, choose the option :code:`Flower Baseline`. + #. A new directory in :code:`baselines/fedawesome` is created with the structure needed for a Flower Baseline. + #. Follow the instructions in the :code:`README.md` in your baseline directory. + + .. tip:: + At this point, your baseline contains source code showing how a simple :code:`PyTorch+CIFAR10` project can be built with Flower. + You can run it directly by executing :code:`flwr run .` from inside the directory of your baseline. Update the code with that + needed to implement your baseline. + #. **Open a pull request** #. Stage your changes: :code:`git add .` #. Commit & push: :code:`git commit -m "Create new FedAwesome baseline" ; git push` @@ -49,15 +54,18 @@ Further reading: Usability --------- -Flower is known and loved for its usability. Therefore, make sure that your baseline or experiment can be executed with a single command such as: +Flower is known and loved for its usability. Therefore, make sure that your baseline or experiment can be executed with a single command after installing the baseline project: .. code-block:: bash - poetry run python -m .main - - # or, once sourced into your environment - python -m .main + # Install the baseline project + pip install -e . + + # Run the baseline using default config + flwr run . + + # Run the baseline overriding the config + flwr run . --run-config lr=0.01,num-server-rounds=200 -We provide you with a `template-baseline `_ to use as guidance when contributing your baseline. Having all baselines follow a homogenous structure helps users to tryout many baselines without the overheads of having to understand each individual codebase. Similarly, by using Hydra throughout, users will immediately know how to parameterise your experiments directly from the command line. -We look forward to your contribution! +We look forward to your contribution! \ No newline at end of file diff --git a/baselines/doc/source/how-to-use-baselines.rst b/baselines/doc/source/how-to-use-baselines.rst index 4704a9b6074e..ec65f8f7d5ee 100644 --- a/baselines/doc/source/how-to-use-baselines.rst +++ b/baselines/doc/source/how-to-use-baselines.rst @@ -5,7 +5,6 @@ Use Baselines We are changing the way we structure the Flower baselines. While we complete the transition to the new format, you can still find the existing baselines and use them: `baselines (old) `_. Currently, you can make use of baselines for `FedAvg `_, `FedOpt `_, and `LEAF-FEMNIST `_. - The documentation below has been updated to reflect the new way of using Flower baselines. Structure --------- @@ -15,87 +14,116 @@ All baselines are available in the directory `baselines / + ├── LICENSE ├── README.md - ├── pyproject.toml + ├── pyproject.toml # defines dependencies + ├── _static # optionally a directory to save plots └── - ├── *.py # several .py files including main.py and __init__.py - └── conf - └── *.yaml # one or more Hydra config files - -Please note that some baselines might include additional files (e.g. a :code:`requirements.txt`) or a hierarchy of :code:`.yaml` files for `Hydra `_. + └── *.py # several .py files Setting up your machine ----------------------- -.. note:: - Flower baselines are designed to run on Ubuntu 22.04. While a GPU is not required to run the baselines, some of the more computationally demanding ones do benefit from GPU acceleration. +.. tip:: + Flower baselines are designed to run on Ubuntu 22.04 and Python 3.10. While a GPU is not required to run the baselines, some of the more computationally demanding ones do benefit from GPU acceleration. + All baselines are expected to make use of `pyenv `_. -Common to all baselines is `Poetry `_, a tool to manage Python dependencies. Baselines also make use of `Pyenv `_. You'll need to install both on your system before running a baseline. What follows is a step-by-step guide on getting :code:`pyenv` and :code:`Poetry` installed on your system. +.. note:: + We are in the process of migrating all baselines to use `flwr run`. Those that haven't yet been migrated still make use of `Poetry `_, a tool to manage Python dependencies. + Identifying whether the baseline you want to run requires Poetry or not is easy: check if the `Environment Setup` section in the baseline readme mentions Poetry. + Follow the instructions later in this section if you need to setup Poetry in your system. -Let's begin by installing :code:`pyenv`. We'll be following the standard procedure. Please refer to the `pyenv docs `_ for alternative ways of installing it. +Let's begin by installing :code:`pyenv`. We'll be following the standard procedure. Please refer to the `pyenv docs `_ for alternative ways of installing it, including for platforms other than Ubuntu. .. code-block:: bash - # first install a few packages needed later for pyenv - sudo apt install build-essential zlib1g-dev libssl-dev libsqlite3-dev \ - libreadline-dev libbz2-dev libffi-dev liblzma-dev + # first install a few packages needed later for pyenv + sudo apt install build-essential zlib1g-dev libssl-dev libsqlite3-dev \ + libreadline-dev libbz2-dev libffi-dev liblzma-dev - # now clone pyenv into your home directory (this is the default way of installing pyenv) - git clone https://github.com/pyenv/pyenv.git ~/.pyenv + # now clone pyenv into your home directory (this is the default way of installing pyenv) + git clone https://github.com/pyenv/pyenv.git ~/.pyenv - # Then add pyenv to your path by adding the below to your .bashrc/.zshrc - export PYENV_ROOT="$HOME/.pyenv" - command -v pyenv >/dev/null || export PATH="$PYENV_ROOT/bin:$PATH" - eval "$(pyenv init -)" + # Then add pyenv to your path by adding the below to your .bashrc/.zshrc + export PYENV_ROOT="$HOME/.pyenv" + command -v pyenv >/dev/null || export PATH="$PYENV_ROOT/bin:$PATH" + eval "$(pyenv init -)" Verify your installation by opening a new terminal and .. code-block:: bash - # check python versions available - pyenv versions - # * system (...) # <-- it should just show one + # check python versions available + pyenv versions + # * system (...) # <-- it should just show one + +Then you can proceed and install any version of Python. Baselines use Python 3.10, so we'll be installing a recent version of it. + +.. code-block:: bash + + pyenv install 3.10.14 + # this will take a little while + # once done, you should see that that version is available + pyenv versions + # system + # * 3.10.14 # <-- you just installed this -Then you can proceed and install any version of Python. Most baselines currently use Python 3.10.6, so we'll be installing that one. +Next, let's install the :code:`virtualenv` plugin. Check `the documentation `_ for alternative installation methods. .. code-block:: bash - pyenv install 3.10.6 - # this will take a little while - # once done, you should see that that version is available - pyenv versions - # system - # * 3.10.6 # <-- you just installed this + # Clone `pyenv-virtualenv` + git clone https://github.com/pyenv/pyenv-virtualenv.git $(pyenv root)/plugins/pyenv-virtualenv + + # Restart your shell + exec "$SHELL" + -Now that we have :code:`pyenv` installed, we are ready to install :code:`poetry`. Installing Poetry can be done from a single command: +Using :code:`pyenv` +~~~~~~~~~~~~~~~~~~~ + +Creating a virtual environment can be done as follows: .. code-block:: bash - curl -sSL https://install.python-poetry.org | python3 - + # Create an environment for Python 3.10.14 named test-env + pyenv virtualenv 3.10.14 test-env + + # Then activate it + pyenv activate test-env + + # Deactivate it as follows + pyenv deactivate - # add to path by putting this line at the end of your .zshrc/.bashrc - export PATH="$HOME/.local/bin:$PATH" + +(optional) Setup Poetry +~~~~~~~~~~~~~~~~~~~~~~~ + +Now that we have :code:`pyenv` installed, we are ready to install :code:`poetry`. It can be done from a single command: + +.. code-block:: bash + + curl -sSL https://install.python-poetry.org | python3 - + + # add to path by putting this line at the end of your .zshrc/.bashrc + export PATH="$HOME/.local/bin:$PATH" To install Poetry from source, to customise your installation, or to further integrate Poetry with your shell after installation, please check `the Poetry documentation `_. + Using a Flower Baseline ----------------------- -To use Flower Baselines you need first to install :code:`pyenv` and :code:`Poetry`, then: +To use Flower Baselines you need first to install :code:`pyenv` and, depending on the baselines, also :code:`Poetry`, then: 1. Clone the flower repository .. code-block:: bash - git clone https://github.com/adap/flower.git && cd flower + git clone https://github.com/adap/flower.git && cd flower 2. Navigate inside the directory of the baseline you'd like to run -3. Follow the :code:`[Environment Setup]` instructions in the :code:`README.md`. In most cases this will require you to just do: - -.. code-block:: bash - - poetry install - -4. Run the baseline as indicated in the :code:`[Running the Experiments]` section in the :code:`README.md` or in the `[Expected Results]` section to reproduce the experiments in the paper. +3. Follow the :code:`[Environment Setup]` instructions in the :code:`README.md`. +4. Run the baseline as indicated in the :code:`[Running the Experiments]` section in the :code:`README.md` or in the :code:`[Expected Results]` section to reproduce the experiments in the paper. diff --git a/baselines/fedrep/.gitignore b/baselines/fedrep/.gitignore new file mode 100644 index 000000000000..eca5b2809311 --- /dev/null +++ b/baselines/fedrep/.gitignore @@ -0,0 +1,5 @@ +# generated files +outputs/ +client_states/ +datasets/ +models/ diff --git a/baselines/baseline_template/LICENSE b/baselines/fedrep/LICENSE similarity index 100% rename from baselines/baseline_template/LICENSE rename to baselines/fedrep/LICENSE diff --git a/baselines/fedrep/README.md b/baselines/fedrep/README.md new file mode 100644 index 000000000000..ece30edf0943 --- /dev/null +++ b/baselines/fedrep/README.md @@ -0,0 +1,126 @@ +--- +title: Exploiting Shared Representations for Personalized Federated Learning +url: http://arxiv.org/abs/2102.07078 +labels: [image classification, label heterogeneity, personalized federated learning] +dataset: [CIFAR-10, CIFAR-100] +--- + +# Exploiting Shared Representations for Personalized Federated Learning + +**Paper:** [arxiv.org/abs/2102.07078](http://arxiv.org/abs/2102.07078) + +**Authors:** Liam Collins, Hamed Hassani, Aryan Mokhtari, Sanjay Shakkottai + +**Abstract:** Deep neural networks have shown the ability to extract universal feature representations from data such as images and text that have been useful for a variety of learning tasks. However, the fruits of representation learning have yet to be fully-realized in federated settings. Although data in federated settings is often non-i.i.d. across clients, the success of centralized deep learning suggests that data often shares a global feature representation, while the statistical heterogeneity across clients or tasks is concentrated in the labels. Based on this intuition, we propose a novel federated learning framework and algorithm for learning a shared data representation across clients and unique local heads for each client. Our algorithm harnesses the distributed computational power across clients to perform many local-updates with respect to the low-dimensional local parameters for every update of the representation. We prove that this method obtains linear convergence to the ground-truth representation with near-optimal sample complexity in a linear setting, demonstrating that it can efficiently reduce the problem dimension for each client. This result is of interest beyond federated learning to a broad class of problems in which we aim to learn a shared low-dimensional representation among data distributions, for example in meta-learning and multi-task learning. Further, extensive experimental results show the empirical improvement of our method over alternative personalized federated learning approaches in federated environments with heterogeneous data. + + +## About this baseline + +**What’s implemented:** The code in this directory replicates the experiments in _Exploiting Shared Representations for Personalized Federated Learning_ (Liam Collins et al., 2021) for CIFAR10 and CIFAR-100 datasets, which proposed the `FedRep` model. Specifically, it replicates the results of CIFAR-10 (`(100, 2), (100, 5)`) and CIFAR-100 (`(100, 5), (100, 20)`) found in table 1 in their paper. + +**Datasets:** CIFAR-10, CIFAR-100 from `Flower Datasets`. + +**Hardware Setup:** WSL2 Ubuntu 22.04 LTS, NVIDIA RTX 3070 Laptop, 32GB RAM, AMD Ryzen 9 5900HX. + +**Contributors:** Jiahao Tan<> + + +## Experimental Setup + +**Task:** Image Classification + +**Model:** This directory implements 2 models: + +- CNNCifar10 +- CNNCifar100 + +These two models are modified from the [official repo](https://github.com/rahulv0205/fedrep_experiments)'s. To be clear that, in the official models, there is no BN layers. However, without BN layer helping, training will definitely collapse. + +Please see how models are implemented using a so called model_manager and model_split class since FedRep uses head and base layers in a neural network. These classes are defined in the `models.py` file and thereafter called when building new models in the directory `/implemented_models`. Please, extend and add new models as you wish. + +**Dataset:** CIFAR10, CIFAR-100. CIFAR10/100 will be partitioned based on number of classes for data that each client shall receive e.g. 4 allocated classes could be [1, 3, 5, 9]. + +**Training Hyperparameters:** The hyperparameters can be found in `conf/base.yaml` file which is the configuration file for the main script. + +| Description | Default Value | +| --------------------- | ----------------------------------- | +| `num_clients` | `100` | +| `num_rounds` | `100` | +| `num_local_epochs` | `5` | +| `num_rep_epochs` | `1` | +| `enable_finetune` | `False` | +| `num_finetune_epochs` | `5` | +| `use_cuda` | `true` | +| `specified_device` | `null` | +| `client resources` | `{'num_cpus': 2, 'num_gpus': 0.5 }` | +| `learning_rate` | `0.01` | +| `batch_size` | `50` | +| `model_name` | `cnncifar10` | +| `algorithm` | `fedrep` | + + +## Environment Setup + +To construct the Python environment follow these steps: + +```bash +# Set Python 3.10 +pyenv local 3.10.12 +# Tell poetry to use python 3.10 +poetry env use 3.10.12 + +# Install the base Poetry environment +poetry install + +# Activate the environment +poetry shell +``` + +## Running the Experiments + +``` +python -m fedrep.main # this will run using the default settings in the `conf/base.yaml` +``` + +While the config files contain a large number of settings, the ones below are the main ones you'd likely want to modify to . +```bash +algorithm: fedavg, fedrep # these are currently supported +dataset.name: cifar10, cifar100 +dataset.num_classes: 2, 5, 20 (only for CIFAR-100) +model_name: cnncifar10, cnncifar100 +``` + + +## Expected Results + +### CIFAR-10 (100, 2) + +``` +python -m fedrep.main --config-name cifar10_100_2 algorithm=fedrep +python -m fedrep.main --config-name cifar10_100_2 algorithm=fedavg +``` + + +### CIFAR-10 (100, 5) + +``` +python -m fedrep.main --config-name cifar10_100_5 algorithm=fedrep +python -m fedrep.main --config-name cifar10_100_5 algorithm=fedavg +``` + + +### CIFAR-100 (100, 5) + +``` +python -m fedrep.main --config-name cifar100_100_5 algorithm=fedrep +python -m fedrep.main --config-name cifar100_100_5 algorithm=fedavg +``` + + +### CIFAR-100 (100, 20) + +``` +python -m fedrep.main --config-name cifar100_100_20 algorithm=fedrep +python -m fedrep.main --config-name cifar100_100_20 algorithm=fedavg +``` + \ No newline at end of file diff --git a/baselines/fedrep/_static/cifar100_100_20.png b/baselines/fedrep/_static/cifar100_100_20.png new file mode 100644 index 000000000000..2421f15ac6c6 Binary files /dev/null and b/baselines/fedrep/_static/cifar100_100_20.png differ diff --git a/baselines/fedrep/_static/cifar100_100_5.png b/baselines/fedrep/_static/cifar100_100_5.png new file mode 100644 index 000000000000..17f25eb480c4 Binary files /dev/null and b/baselines/fedrep/_static/cifar100_100_5.png differ diff --git a/baselines/fedrep/_static/cifar10_100_2.png b/baselines/fedrep/_static/cifar10_100_2.png new file mode 100644 index 000000000000..75ee48b2c970 Binary files /dev/null and b/baselines/fedrep/_static/cifar10_100_2.png differ diff --git a/baselines/fedrep/_static/cifar10_100_5.png b/baselines/fedrep/_static/cifar10_100_5.png new file mode 100644 index 000000000000..1d20a953f9c4 Binary files /dev/null and b/baselines/fedrep/_static/cifar10_100_5.png differ diff --git a/baselines/baseline_template/baseline_template/__init__.py b/baselines/fedrep/fedrep/__init__.py similarity index 100% rename from baselines/baseline_template/baseline_template/__init__.py rename to baselines/fedrep/fedrep/__init__.py diff --git a/baselines/fedrep/fedrep/base_model.py b/baselines/fedrep/fedrep/base_model.py new file mode 100644 index 000000000000..e6a74c01bf9b --- /dev/null +++ b/baselines/fedrep/fedrep/base_model.py @@ -0,0 +1,324 @@ +"""Abstract class for splitting a model into body and head.""" + +import os +from abc import ABC, abstractmethod +from typing import Any, Dict, List, Optional, OrderedDict, Tuple, Union + +import numpy as np +import torch +import torch.nn as nn +from omegaconf import DictConfig +from torch import Tensor +from torch.utils.data import DataLoader + +from fedrep.constants import ( + DEFAULT_FINETUNE_EPOCHS, + DEFAULT_LOCAL_TRAIN_EPOCHS, + DEFAULT_REPRESENTATION_EPOCHS, +) + + +def get_device( + use_cuda: bool = True, specified_device: Optional[int] = None +) -> torch.device: + """Get the tensor device. + + Args: + use_cuda: Flag indicates whether to use CUDA or not. Defaults to True. + specified_device: Specified cuda device to use. Defaults to None. + + Raises + ------ + ValueError: Specified device not in CUDA_VISIBLE_DEVICES. + + Returns + ------- + The selected or fallbacked device. + """ + device = torch.device("cpu") + if use_cuda and torch.cuda.is_available(): + if specified_device is not None: + cuda_visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES") + if cuda_visible_devices is not None: + devices = [int(d) for d in cuda_visible_devices.split(",")] + if specified_device in devices: + device = torch.device(f"cuda:{specified_device}") + else: + raise ValueError( + f"Specified device {specified_device}" + " not in CUDA_VISIBLE_DEVICES" + ) + else: + print("CUDA_VISIBLE_DEVICES not exists, using torch.device('cuda').") + else: + device = torch.device("cuda") + + return device + + +class ModelSplit(ABC, nn.Module): + """Abstract class for splitting a model into body and head.""" + + def __init__(self, model: nn.Module): + """Initialize the attributes of the model split. + + Args: + model: dict containing the vocab sizes of the input attributes. + """ + super().__init__() + + self._body, self._head = self._get_model_parts(model) + + @abstractmethod + def _get_model_parts(self, model: nn.Module) -> Tuple[nn.Module, nn.Module]: + """Return the body and head of the model. + + Args: + model: model to be split into head and body + + Returns + ------- + Tuple where the first element is the body of the model + and the second is the head. + """ + + @property + def body(self) -> nn.Module: + """Return model body.""" + return self._body + + @body.setter + def body(self, state_dict: OrderedDict[str, Tensor]) -> None: + """Set model body. + + Args: + state_dict: dictionary of the state to set the model body to. + """ + self._body.load_state_dict(state_dict, strict=True) + + @property + def head(self) -> nn.Module: + """Return model head.""" + return self._head + + @head.setter + def head(self, state_dict: OrderedDict[str, Tensor]) -> None: + """Set model head. + + Args: + state_dict: dictionary of the state to set the model head to. + """ + self._head.load_state_dict(state_dict, strict=True) + + def get_parameters(self) -> List[np.ndarray]: + """Get model parameters. + + Returns + ------- + Body and head parameters + """ + return [ + val.cpu().numpy() + for val in [ + *self.body.state_dict().values(), + *self.head.state_dict().values(), + ] + ] + + def set_parameters(self, state_dict: Dict[str, Tensor]) -> None: + """Set model parameters. + + Args: + state_dict: dictionary of the state to set the model to. + """ + self.load_state_dict(state_dict, strict=False) + + def enable_head(self) -> None: + """Enable gradient tracking for the head parameters.""" + for param in self._head.parameters(): + param.requires_grad = True + + def enable_body(self) -> None: + """Enable gradient tracking for the body parameters.""" + for param in self._body.parameters(): + param.requires_grad = True + + def disable_head(self) -> None: + """Disable gradient tracking for the head parameters.""" + for param in self._head.parameters(): + param.requires_grad = False + + def disable_body(self) -> None: + """Disable gradient tracking for the body parameters.""" + for param in self._body.parameters(): + param.requires_grad = False + + def forward(self, inputs: Any) -> Any: + """Forward inputs through the body and the head.""" + return self.head(self.body(inputs)) + + +# pylint: disable=R0902, R0913, R0801 +class ModelManager(ABC): + """Manager for models with Body/Head split.""" + + def __init__( + self, + client_id: int, + config: DictConfig, + trainloader: DataLoader, + testloader: DataLoader, + client_save_path: Optional[str], + model_split_class: Any, # ModelSplit + ): + """Initialize the attributes of the model manager. + + Args: + client_id: The id of the client. + config: Dict containing the configurations to be used by the manager. + trainloader: Client train dataloader. + testloader: Client test dataloader. + client_save_path: Path to save the client model head state. + model_split_class: Class to be used to split the model into body and head \ + (concrete implementation of ModelSplit). + """ + super().__init__() + self.config = config + self.client_id = client_id + self.trainloader = trainloader + self.testloader = testloader + self.device = get_device( + use_cuda=getattr(self.config, "use_cuda", True), + specified_device=getattr(self.config, "specified_device", None), + ) + self.client_save_path = client_save_path + self.learning_rate = config.get("learning_rate", 0.01) + self.momentum = config.get("momentum", 0.5) + self._model: ModelSplit = model_split_class(self._create_model()) + + @abstractmethod + def _create_model(self) -> nn.Module: + """Return model to be splitted into head and body.""" + + @property + def model(self) -> ModelSplit: + """Return model.""" + return self._model + + def train(self) -> Dict[str, Union[List[Dict[str, float]], int, float]]: + """Train the model maintained in self.model. + + Returns + ------- + Dict containing the train metrics. + """ + # Load client state (head) if client_save_path is not None and it is not empty + if self.client_save_path is not None and os.path.isfile(self.client_save_path): + self._model.head.load_state_dict(torch.load(self.client_save_path)) + + num_local_epochs = DEFAULT_LOCAL_TRAIN_EPOCHS + if hasattr(self.config, "num_local_epochs"): + num_local_epochs = int(self.config.num_local_epochs) + + num_rep_epochs = DEFAULT_REPRESENTATION_EPOCHS + if hasattr(self.config, "num_rep_epochs"): + num_rep_epochs = int(self.config.num_rep_epochs) + + criterion = torch.nn.CrossEntropyLoss() + weights = [v for k, v in self._model.named_parameters() if "weight" in k] + biases = [v for k, v in self._model.named_parameters() if "bias" in k] + optimizer = torch.optim.SGD( + [ + {"params": weights, "weight_decay": 1e-4}, + {"params": biases, "weight_decay": 0.0}, + ], + lr=self.learning_rate, + momentum=self.momentum, + ) + correct, total = 0, 0 + loss: torch.Tensor = 0.0 + + self._model.train() + for i in range(num_local_epochs + num_rep_epochs): + if i < num_local_epochs: + self._model.disable_body() + self._model.enable_head() + else: + self._model.enable_body() + self._model.disable_head() + for batch in self.trainloader: + images = batch["img"] + labels = batch["label"] + outputs = self._model(images.to(self.device)) + labels = labels.to(self.device) + loss = criterion(outputs, labels) + optimizer.zero_grad() + loss.backward() + optimizer.step() + total += labels.size(0) + correct += (torch.max(outputs.data, 1)[1] == labels).sum().item() + + # Save client state (head) + if self.client_save_path is not None: + torch.save(self._model.head.state_dict(), self.client_save_path) + + return {"loss": loss.item(), "accuracy": correct / total} + + def test(self) -> Dict[str, float]: + """Test the model maintained in self.model. + + Returns + ------- + Dict containing the test metrics. + """ + # Load client state (head) + if self.client_save_path is not None and os.path.isfile(self.client_save_path): + self._model.head.load_state_dict(torch.load(self.client_save_path)) + + num_finetune_epochs = DEFAULT_FINETUNE_EPOCHS + if hasattr(self.config, "num_finetune_epochs"): + num_finetune_epochs = int(self.config.num_finetune_epochs) + + if num_finetune_epochs > 0 and self.config.get("enable_finetune", False): + optimizer = torch.optim.SGD(self._model.parameters(), lr=self.learning_rate) + criterion = torch.nn.CrossEntropyLoss() + self._model.train() + for _ in range(num_finetune_epochs): + for batch in self.trainloader: + images = batch["img"].to(self.device) + labels = batch["label"].to(self.device) + outputs = self._model(images) + loss = criterion(outputs, labels) + optimizer.zero_grad() + loss.backward() + optimizer.step() + + criterion = torch.nn.CrossEntropyLoss() + correct, total, loss = 0, 0, 0.0 + + self._model.eval() + with torch.no_grad(): + for batch in self.testloader: + images = batch["img"].to(self.device) + labels = batch["label"].to(self.device) + outputs = self._model(images) + loss += criterion(outputs, labels).item() + total += labels.size(0) + correct += (torch.max(outputs.data, 1)[1] == labels).sum().item() + + return { + "loss": loss / len(self.testloader.dataset), + "accuracy": correct / total, + } + + def train_dataset_size(self) -> int: + """Return train data set size.""" + return len(self.trainloader.dataset) + + def test_dataset_size(self) -> int: + """Return test data set size.""" + return len(self.testloader.dataset) + + def total_dataset_size(self) -> int: + """Return total data set size.""" + return len(self.trainloader.dataset) + len(self.testloader.dataset) diff --git a/baselines/fedrep/fedrep/client.py b/baselines/fedrep/fedrep/client.py new file mode 100644 index 000000000000..f857fd2cf82a --- /dev/null +++ b/baselines/fedrep/fedrep/client.py @@ -0,0 +1,319 @@ +"""Client implementation - can call FedPep and FedAvg clients.""" + +from collections import OrderedDict +from pathlib import Path +from typing import Callable, Dict, List, Tuple, Type, Union + +import numpy as np +import torch +from flwr.client import Client, NumPyClient +from flwr.common import NDArrays, Scalar +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import PathologicalPartitioner +from flwr_datasets.preprocessor import Merger +from omegaconf import DictConfig +from torch.utils.data import DataLoader +from torchvision import transforms + +from fedrep.constants import MEAN, STD, Algorithm +from fedrep.models import CNNCifar10ModelManager, CNNCifar100ModelManager + +PROJECT_DIR = Path(__file__).parent.parent.absolute() + +FEDERATED_DATASET = None + + +class BaseClient(NumPyClient): + """Implementation of Federated Averaging (FedAvg) Client.""" + + # pylint: disable=R0913 + def __init__( + self, + client_id: int, + trainloader: DataLoader, + testloader: DataLoader, + config: DictConfig, + model_manager_class: Union[ + Type[CNNCifar10ModelManager], Type[CNNCifar100ModelManager] + ], + client_state_save_path: str = "", + ): + """Initialize client attributes. + + Args: + client_id: The client ID. + trainloader: Client train data loader. + testloader: Client test data loader. + config: dictionary containing the client configurations. + model_manager_class: class to be used as the model manager. + client_state_save_path: Path for saving model head parameters. + (Just for FedRep). Defaults to "". + """ + super().__init__() + + self.client_id = client_id + self.client_state_save_path = ( + (client_state_save_path + f"/client_{self.client_id}") + if client_state_save_path != "" + else None + ) + self.model_manager = model_manager_class( + client_id=self.client_id, + config=config, + trainloader=trainloader, + testloader=testloader, + client_save_path=self.client_state_save_path, + ) + + def get_parameters(self, config: Dict[str, Scalar]) -> NDArrays: + """Return the current local model parameters.""" + return self.model_manager.model.get_parameters() + + def set_parameters( + self, parameters: List[np.ndarray], evaluate: bool = False + ) -> None: + """Set the local model parameters to the received parameters. + + Args: + parameters: parameters to set the model to. + """ + _ = evaluate + model_keys = [ + k + for k in self.model_manager.model.state_dict().keys() + if k.startswith("_body") or k.startswith("_head") + ] + params_dict = zip(model_keys, parameters) + + state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) + + self.model_manager.model.set_parameters(state_dict) + + def perform_train(self) -> Dict[str, Union[List[Dict[str, float]], int, float]]: + """Perform local training to the whole model. + + Returns + ------- + Dict with the train metrics. + """ + self.model_manager.model.enable_body() + self.model_manager.model.enable_head() + + return self.model_manager.train() + + def fit( + self, parameters: NDArrays, config: Dict[str, Scalar] + ) -> Tuple[NDArrays, int, Dict[str, Union[bool, bytes, float, int, str]]]: + """Train the provided parameters using the locally held dataset. + + Args: + parameters: The current (global) model parameters. + config: configuration parameters for training sent by the server. + + Returns + ------- + Tuple containing the locally updated model parameters, \ + the number of examples used for training and \ + the training metrics. + """ + self.set_parameters(parameters) + + train_results = self.perform_train() + + # Update train history + print("<------- TRAIN RESULTS -------> :", train_results) + + return self.get_parameters(config), self.model_manager.train_dataset_size(), {} + + def evaluate( + self, parameters: NDArrays, config: Dict[str, Scalar] + ) -> Tuple[float, int, Dict[str, Union[bool, bytes, float, int, str]]]: + """Evaluate the provided global parameters using the locally held dataset. + + Args: + parameters: The current (global) model parameters. + config: configuration parameters for training sent by the server. + + Returns + ------- + Tuple containing the test loss, \ + the number of examples used for evaluation and \ + the evaluation metrics. + """ + self.set_parameters(parameters, evaluate=True) + + # Test the model + test_results = self.model_manager.test() + print("<------- TEST RESULTS -------> :", test_results) + + return ( + test_results.get("loss", 0.0), + self.model_manager.test_dataset_size(), + {k: v for k, v in test_results.items() if not isinstance(v, (dict, list))}, + ) + + +class FedRepClient(BaseClient): + """Implementation of Federated Personalization (FedRep) Client.""" + + def get_parameters(self, config: Dict[str, Scalar]) -> NDArrays: + """Return the current local body parameters.""" + return [ + val.cpu().numpy() + for val in self.model_manager.model.body.state_dict().values() + ] + + def set_parameters(self, parameters: List[np.ndarray], evaluate=False) -> None: + """Set the local body parameters to the received parameters. + + Args: + parameters: parameters to set the body to. + evaluate: whether the client is evaluating or not. + """ + model_keys = [ + k + for k in self.model_manager.model.state_dict().keys() + if k.startswith("_body") + ] + + if not evaluate: + # Only update client's local head if it hasn't trained yet + model_keys.extend( + [ + k + for k in self.model_manager.model.state_dict().keys() + if k.startswith("_head") + ] + ) + + state_dict = OrderedDict( + (k, torch.from_numpy(v)) for k, v in zip(model_keys, parameters) + ) + + self.model_manager.model.set_parameters(state_dict) + + +# pylint: disable=E1101, W0603 +def get_client_fn_simulation( + config: DictConfig, client_state_save_path: str = "" +) -> Callable[[str], Client]: + """Generate the client function that creates the Flower Clients. + + Parameters + ---------- + model : DictConfig + The model configuration. + cleint_state_save_path : str + The path to save the client state. + + Returns + ------- + Tuple[Callable[[str], FlowerClient], DataLoader] + A tuple containing the client function that creates Flower Clients and + the DataLoader that will be used for testing + """ + assert config.model_name.lower() in [ + "cnncifar10", + "cnncifar100", + ], f"Model {config.model_name} not implemented" + + # - you can define your own data transformation strategy here - + # These transformations are from the official repo + train_data_transform = transforms.Compose( + [ + transforms.RandomCrop(32, padding=4), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize(MEAN[config.dataset.name], STD[config.dataset.name]), + ] + ) + test_data_transform = transforms.Compose( + [ + transforms.ToTensor(), + transforms.Normalize(MEAN[config.dataset.name], STD[config.dataset.name]), + ] + ) + + use_fine_label = False + if config.dataset.name.lower() == "cifar100": + use_fine_label = True + + partitioner = PathologicalPartitioner( + num_partitions=config.num_clients, + partition_by="fine_label" if use_fine_label else "label", + num_classes_per_partition=config.dataset.num_classes, + class_assignment_mode="random", + shuffle=True, + seed=config.dataset.seed, + ) + + global FEDERATED_DATASET + if FEDERATED_DATASET is None: + FEDERATED_DATASET = FederatedDataset( + dataset=config.dataset.name.lower(), + partitioners={"all": partitioner}, + preprocessor=Merger({"all": ("train", "test")}), + ) + + def apply_train_transforms(batch): + """Apply transforms for train data to the partition from FederatedDataset.""" + batch["img"] = [train_data_transform(img) for img in batch["img"]] + if use_fine_label: + batch["label"] = batch["fine_label"] + return batch + + def apply_test_transforms(batch): + """Apply transforms for test data to the partition from FederatedDataset.""" + batch["img"] = [test_data_transform(img) for img in batch["img"]] + if use_fine_label: + batch["label"] = batch["fine_label"] + return batch + + # pylint: disable=E1101 + def client_fn(cid: str) -> Client: + """Create a Flower client representing a single organization.""" + cid_use = int(cid) + + partition = FEDERATED_DATASET.load_partition(cid_use, split="all") + + partition_train_test = partition.train_test_split( + train_size=config.dataset.fraction, shuffle=True, seed=config.dataset.seed + ) + + trainset = partition_train_test["train"].with_transform(apply_train_transforms) + testset = partition_train_test["test"].with_transform(apply_test_transforms) + + trainloader = DataLoader(trainset, config.batch_size, shuffle=True) + testloader = DataLoader(testset, config.batch_size) + + model_manager_class: Union[ + Type[CNNCifar10ModelManager], Type[CNNCifar100ModelManager] + ] + if config.model_name.lower() == "cnncifar10": + model_manager_class = CNNCifar10ModelManager + elif config.model_name.lower() == "cnncifar100": + model_manager_class = CNNCifar100ModelManager + else: + raise NotImplementedError( + f"Model {config.model_name} not implemented, check name." + ) + + if config.algorithm.lower() == Algorithm.FEDREP.value: + return FedRepClient( # type: ignore[attr-defined] + client_id=cid_use, + trainloader=trainloader, + testloader=testloader, + config=config, + model_manager_class=model_manager_class, + client_state_save_path=client_state_save_path, + ).to_client() + return BaseClient( # type: ignore[attr-defined] + client_id=cid_use, + trainloader=trainloader, + testloader=testloader, + config=config, + model_manager_class=model_manager_class, + client_state_save_path=client_state_save_path, + ).to_client() + + return client_fn diff --git a/baselines/fedrep/fedrep/conf/base.yaml b/baselines/fedrep/fedrep/conf/base.yaml new file mode 100644 index 000000000000..0d74c4fe78b6 --- /dev/null +++ b/baselines/fedrep/fedrep/conf/base.yaml @@ -0,0 +1,46 @@ +--- +num_clients: 100 # total number of clients +num_local_epochs: 5 # number of local epochs +num_rep_epochs: 1 # number of representation epochs (only for FedRep) +enable_finetune: false +# num_finetune_epochs: 10 +batch_size: 50 +num_rounds: 100 +learning_rate: 0.01 +momentum: 0.5 +algorithm: fedrep +model_name: cnncifar10 + +client_resources: + num_cpus: 2 + num_gpus: 0.5 + +use_cuda: true +specified_device: null # the ID of cuda device, if null, then use defaults torch.device("cuda") + +dataset: + name: cifar10 + split: sample + num_classes: 2 + seed: 42 + num_clients: ${num_clients} + fraction: 0.83 + +model: + _target_: fedrep.implemented_models.cnn_cifar100.CNNCifar10 + +fit_config: + drop_client: false + epochs: ${num_local_epochs} + batch_size: ${batch_size} + +strategy: + _target_: fedrep.strategy.FedRep + fraction_fit: 0.1 + fraction_evaluate: 0.1 + min_fit_clients: 2 + min_evaluate_clients: 2 + min_available_clients: 2 + evaluate_fn: null + on_fit_config_fn: null + on_evaluate_config_fn: null diff --git a/baselines/fedrep/fedrep/conf/cifar100_100_20.yaml b/baselines/fedrep/fedrep/conf/cifar100_100_20.yaml new file mode 100644 index 000000000000..30f9fd209d58 --- /dev/null +++ b/baselines/fedrep/fedrep/conf/cifar100_100_20.yaml @@ -0,0 +1,44 @@ +--- +num_clients: 100 # total number of clients +num_local_epochs: 5 # number of local epochs +num_rep_epochs: 1 # number of representation epochs (only for FedRep) +enable_finetune: false +# num_finetune_epochs: 10 +batch_size: 50 +num_rounds: 100 +learning_rate: 0.01 +momentum: 0.5 +algorithm: fedrep +model_name: cnncifar100 + +client_resources: + num_cpus: 2 + num_gpus: 0.5 + +use_cuda: true +specified_device: null + +dataset: + name: cifar100 + num_classes: 20 + seed: 42 + fraction: 0.83 + +model: + _target_: fedrep.implemented_models.cnn_cifar100.CNNCifar100 + +fit_config: + drop_client: false + epochs: ${num_local_epochs} + batch_size: ${batch_size} + +strategy: + _target_: fedrep.strategy.FedRep + fraction_fit: 0.1 + fraction_evaluate: 0.1 + min_fit_clients: 2 + min_evaluate_clients: 2 + min_available_clients: 2 + evaluate_fn: null + on_fit_config_fn: null + on_evaluate_config_fn: null diff --git a/baselines/fedrep/fedrep/conf/cifar100_100_5.yaml b/baselines/fedrep/fedrep/conf/cifar100_100_5.yaml new file mode 100644 index 000000000000..e0add8f03b45 --- /dev/null +++ b/baselines/fedrep/fedrep/conf/cifar100_100_5.yaml @@ -0,0 +1,44 @@ +--- +num_clients: 100 # total number of clients +num_local_epochs: 5 # number of local epochs +num_rep_epochs: 1 # number of representation epochs (only for FedRep) +enable_finetune: false +# num_finetune_epochs: 10 +batch_size: 50 +num_rounds: 100 +learning_rate: 0.01 +momentum: 0.5 +algorithm: fedrep +model_name: cnncifar100 + +client_resources: + num_cpus: 2 + num_gpus: 0.5 + +use_cuda: true +specified_device: null + +dataset: + name: cifar100 + num_classes: 5 + seed: 42 + fraction: 0.83 + +model: + _target_: fedrep.implemented_models.cnn_cifar100.CNNCifar100 + +fit_config: + drop_client: false + epochs: ${num_local_epochs} + batch_size: ${batch_size} + +strategy: + _target_: fedrep.strategy.FedRep + fraction_fit: 0.1 + fraction_evaluate: 0.1 + min_fit_clients: 2 + min_evaluate_clients: 2 + min_available_clients: 2 + evaluate_fn: null + on_fit_config_fn: null + on_evaluate_config_fn: null diff --git a/baselines/fedrep/fedrep/conf/cifar10_100_2.yaml b/baselines/fedrep/fedrep/conf/cifar10_100_2.yaml new file mode 100644 index 000000000000..83ee34a298ae --- /dev/null +++ b/baselines/fedrep/fedrep/conf/cifar10_100_2.yaml @@ -0,0 +1,44 @@ +--- +num_clients: 100 # total number of clients +num_local_epochs: 5 # number of local epochs +num_rep_epochs: 1 # number of representation epochs (only for FedRep) +enable_finetune: false +# num_finetune_epochs: 10 +batch_size: 50 +num_rounds: 100 +learning_rate: 0.01 +momentum: 0.5 +algorithm: fedrep +model_name: cnncifar10 + +client_resources: + num_cpus: 2 + num_gpus: 0.5 + +use_cuda: true +specified_device: null + +dataset: + name: cifar10 + num_classes: 2 + seed: 42 + fraction: 0.83 + +model: + _target_: fedrep.implemented_models.cnn_cifar10.CNNCifar10 + +fit_config: + drop_client: false + epochs: ${num_local_epochs} + batch_size: ${batch_size} + +strategy: + _target_: fedrep.strategy.FedRep + fraction_fit: 0.1 + fraction_evaluate: 0.1 + min_fit_clients: 2 + min_evaluate_clients: 2 + min_available_clients: 2 + evaluate_fn: null + on_fit_config_fn: null + on_evaluate_config_fn: null diff --git a/baselines/fedrep/fedrep/conf/cifar10_100_5.yaml b/baselines/fedrep/fedrep/conf/cifar10_100_5.yaml new file mode 100644 index 000000000000..0cbd104406f0 --- /dev/null +++ b/baselines/fedrep/fedrep/conf/cifar10_100_5.yaml @@ -0,0 +1,44 @@ +--- +num_clients: 100 # total number of clients +num_local_epochs: 5 # number of local epochs +num_rep_epochs: 1 # number of representation epochs (only for FedRep) +enable_finetune: false +# num_finetune_epochs: 10 +batch_size: 50 +num_rounds: 100 +learning_rate: 0.01 +momentum: 0.5 +algorithm: fedrep +model_name: cnncifar10 + +client_resources: + num_cpus: 2 + num_gpus: 0.5 + +use_cuda: true +specified_device: null + +dataset: + name: cifar10 + num_classes: 5 + seed: 42 + fraction: 0.83 + +model: + _target_: fedrep.implemented_models.cnn_cifar10.CNNCifar10 + +fit_config: + drop_client: false + epochs: ${num_local_epochs} + batch_size: ${batch_size} + +strategy: + _target_: fedrep.strategy.FedRep + fraction_fit: 0.1 + fraction_evaluate: 0.1 + min_fit_clients: 2 + min_evaluate_clients: 2 + min_available_clients: 2 + evaluate_fn: null + on_fit_config_fn: null + on_evaluate_config_fn: null diff --git a/baselines/fedrep/fedrep/constants.py b/baselines/fedrep/fedrep/constants.py new file mode 100644 index 000000000000..27e68f2b786c --- /dev/null +++ b/baselines/fedrep/fedrep/constants.py @@ -0,0 +1,19 @@ +"""Constants used in machine learning pipeline.""" + +from enum import Enum + + +class Algorithm(Enum): + """Algorithm names.""" + + FEDREP = "fedrep" + FEDAVG = "fedavg" + + +DEFAULT_LOCAL_TRAIN_EPOCHS: int = 10 +DEFAULT_FINETUNE_EPOCHS: int = 5 +DEFAULT_REPRESENTATION_EPOCHS: int = 1 + +MEAN = {"cifar10": [0.485, 0.456, 0.406], "cifar100": [0.507, 0.487, 0.441]} + +STD = {"cifar10": [0.229, 0.224, 0.225], "cifar100": [0.267, 0.256, 0.276]} diff --git a/baselines/fedrep/fedrep/dataset.py b/baselines/fedrep/fedrep/dataset.py new file mode 100644 index 000000000000..a616e38ae220 --- /dev/null +++ b/baselines/fedrep/fedrep/dataset.py @@ -0,0 +1 @@ +"""FedRep uses flwr-datasets.""" diff --git a/baselines/fedrep/fedrep/dataset_preparation.py b/baselines/fedrep/fedrep/dataset_preparation.py new file mode 100644 index 000000000000..a616e38ae220 --- /dev/null +++ b/baselines/fedrep/fedrep/dataset_preparation.py @@ -0,0 +1 @@ +"""FedRep uses flwr-datasets.""" diff --git a/baselines/fedrep/fedrep/main.py b/baselines/fedrep/fedrep/main.py new file mode 100644 index 000000000000..223b98aa21fa --- /dev/null +++ b/baselines/fedrep/fedrep/main.py @@ -0,0 +1,123 @@ +"""Create and connect the building blocks for your experiments; start the simulation. + +It includes processioning the dataset, instantiate strategy, specify how the global +model is going to be evaluated, etc. At the end, this script saves the results. +""" + +from pathlib import Path +from typing import List, Tuple + +import flwr as fl +import hydra +from flwr.common.parameter import ndarrays_to_parameters +from flwr.common.typing import Metrics +from hydra.core.hydra_config import HydraConfig +from hydra.utils import instantiate +from omegaconf import DictConfig, OmegaConf + +from fedrep.utils import ( + get_client_fn, + get_create_model_fn, + plot_metric_from_history, + save_results_as_pickle, + set_client_state_save_path, + set_client_strategy, +) + + +@hydra.main(config_path="conf", config_name="base", version_base=None) +def main(cfg: DictConfig) -> None: + """Run the baseline. + + Parameterss + ---------- + cfg : DictConfig + An omegaconf object that stores the hydra config. + """ + # Print parsed config + print(OmegaConf.to_yaml(cfg)) + + # set client strategy + cfg = set_client_strategy(cfg) + + # Create directory to store client states if it does not exist + # Client state has subdirectories with the name of current time + client_state_save_path = set_client_state_save_path() + + # Define your clients + # Get client function + client_fn = get_client_fn(config=cfg, client_state_save_path=client_state_save_path) + + # get a function that will be used to construct the config that the client's + # fit() method will received + def get_on_fit_config(): + def fit_config_fn(server_round: int): + # resolve and convert to python dict + fit_config = OmegaConf.to_container(cfg.fit_config, resolve=True) + _ = server_round + return fit_config + + return fit_config_fn + + # get a function that will be used to construct the model + create_model, split = get_create_model_fn(cfg) + + model = split(create_model()) + + def evaluate_metrics_aggregation_fn( + eval_metrics: List[Tuple[int, Metrics]] + ) -> Metrics: + weights, accuracies = [], [] + for num_examples, metric in eval_metrics: + weights.append(num_examples) + accuracies.append(metric["accuracy"] * num_examples) + accuracy = sum(accuracies) / sum(weights) # type: ignore[arg-type] + return {"accuracy": accuracy} + + # Define your strategy + strategy = instantiate( + cfg.strategy, + initial_parameters=ndarrays_to_parameters(model.get_parameters()), + on_fit_config_fn=get_on_fit_config(), + evaluate_metrics_aggregation_fn=evaluate_metrics_aggregation_fn, + ) + + # Start Simulation + history = fl.simulation.start_simulation( + client_fn=client_fn, + num_clients=cfg.num_clients, + config=fl.server.ServerConfig(num_rounds=cfg.num_rounds), + client_resources={ + "num_cpus": cfg.client_resources.num_cpus, + "num_gpus": cfg.client_resources.num_gpus, + }, + strategy=strategy, + ) + + # Experiment completed. Now we save the results and + # generate plots using the `history` + print("................") + print(history) + + # Save your results + save_path = Path(HydraConfig.get().runtime.output_dir) + + # save results as a Python pickle using a file_path + # the directory created by Hydra for each run + save_results_as_pickle(history, file_path=save_path) + # plot results and include them in the readme + strategy_name = strategy.__class__.__name__ + file_suffix: str = ( + f"_{strategy_name}" + f"_C={cfg.num_clients}" + f"_B={cfg.batch_size}" + f"_E={cfg.num_local_epochs}" + f"_R={cfg.num_rounds}" + f"_lr={cfg.learning_rate}" + ) + + plot_metric_from_history(history, save_path, (file_suffix)) + + +if __name__ == "__main__": + main() diff --git a/baselines/fedrep/fedrep/models.py b/baselines/fedrep/fedrep/models.py new file mode 100644 index 000000000000..b230f4e49766 --- /dev/null +++ b/baselines/fedrep/fedrep/models.py @@ -0,0 +1,130 @@ +"""Model, model manager and model split for CIFAR-10 and CIFAR-100.""" + +from typing import Tuple + +import torch +import torch.nn as nn + +from fedrep.base_model import ModelManager, ModelSplit + + +# pylint: disable=W0223 +class CNNCifar10(nn.Module): + """CNN model for CIFAR10 dataset. + + Refer to + https://github.com/rahulv0205/fedrep_experiments/blob/main/models/Nets.py + """ + + def __init__(self): + """Initialize the model.""" + super().__init__() + + # Note that in the official implementation, the body has no BN layers. + # However, no BN will definitely lead training to collapse. + self.body = nn.Sequential( + nn.Conv2d(3, 64, 5), + nn.BatchNorm2d(64), + nn.ReLU(), + nn.MaxPool2d(2, 2), + nn.Conv2d(64, 64, 5), + nn.BatchNorm2d(64), + nn.ReLU(), + nn.MaxPool2d(2, 2), + nn.Flatten(), + nn.Linear(64 * 5 * 5, 120), + nn.ReLU(), + nn.Linear(120, 64), + nn.ReLU(), + ) + + self.head = nn.Sequential(nn.Linear(64, 10)) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Forward pass of the model.""" + x = self.body(x) + return self.head(x) + + +class CNNCifar10ModelSplit(ModelSplit): + """Split CNNCifar10 model into body and head.""" + + def _get_model_parts(self, model: CNNCifar10) -> Tuple[nn.Module, nn.Module]: + return model.body, model.head + + +# pylint: disable=R0902, R0913, R0801 +class CNNCifar10ModelManager(ModelManager): + """Manager for models with Body/Head split.""" + + def __init__(self, **kwargs): + """Initialize the attributes of the model manager. + + Args: + client_id: The id of the client. + config: Dict containing the configurations to be used by the manager. + """ + super().__init__(model_split_class=CNNCifar10ModelSplit, **kwargs) + + def _create_model(self) -> nn.Module: + """Return CNNCifar10 model to be splitted into head and body.""" + return CNNCifar10().to(self.device) + + +# pylint: disable=W0223 +class CNNCifar100(nn.Module): + """CNN model for CIFAR100 dataset. + + Refer to + https://github.com/rahulv0205/fedrep_experiments/blob/main/models/Nets.py + """ + + def __init__(self): + """Initialize the model.""" + super().__init__() + + # Note that in the official implementation, the body has no BN layers. + # However, no BN will definitely lead training to collapse. + self.body = nn.Sequential( + nn.Conv2d(3, 64, 5), + nn.BatchNorm2d(64), + nn.ReLU(), + nn.MaxPool2d(2, 2), + nn.Conv2d(64, 128, 5), + nn.BatchNorm2d(128), + nn.ReLU(), + nn.MaxPool2d(2, 2), + nn.Flatten(), + nn.Linear(128 * 5 * 5, 256), + nn.ReLU(), + nn.Linear(256, 128), + nn.ReLU(), + nn.Dropout(0.6), + ) + + self.head = nn.Sequential(nn.Linear(128, 100)) + + +class CNNCifar100ModelSplit(ModelSplit): + """Split CNNCifar100 model into body and head.""" + + def _get_model_parts(self, model: CNNCifar100) -> Tuple[nn.Module, nn.Module]: + return model.body, model.head + + +# pylint: disable=R0902, R0913, R0801 +class CNNCifar100ModelManager(ModelManager): + """Manager for models with Body/Head split.""" + + def __init__(self, **kwargs): + """Initialize the attributes of the model manager. + + Args: + client_id: The id of the client. + config: Dict containing the configurations to be used by the manager. + """ + super().__init__(model_split_class=CNNCifar100ModelSplit, **kwargs) + + def _create_model(self) -> CNNCifar100: + """Return CNNCifar100 model to be splitted into head and body.""" + return CNNCifar100().to(self.device) diff --git a/baselines/fedrep/fedrep/server.py b/baselines/fedrep/fedrep/server.py new file mode 100644 index 000000000000..5b0c34035ae6 --- /dev/null +++ b/baselines/fedrep/fedrep/server.py @@ -0,0 +1 @@ +"""Server strategies pipelines for FedRep.""" diff --git a/baselines/fedrep/fedrep/strategy.py b/baselines/fedrep/fedrep/strategy.py new file mode 100644 index 000000000000..3bee45326a6f --- /dev/null +++ b/baselines/fedrep/fedrep/strategy.py @@ -0,0 +1,12 @@ +"""FL server strategies.""" + +from flwr.server.strategy import FedAvg + + +class FedRep(FedAvg): + """FedRep strategy.""" + + def __repr__(self) -> str: + """Compute a string representation of the strategy.""" + rep = f"FedRep(accept_failures={self.accept_failures})" + return rep diff --git a/baselines/fedrep/fedrep/utils.py b/baselines/fedrep/fedrep/utils.py new file mode 100644 index 000000000000..b706ebf1e041 --- /dev/null +++ b/baselines/fedrep/fedrep/utils.py @@ -0,0 +1,204 @@ +"""Utility functions for FedRep.""" + +import logging +import os +import pickle +import time +from pathlib import Path +from secrets import token_hex +from typing import Callable, Optional, Type, Union + +import matplotlib.pyplot as plt +import numpy as np +from flwr.client import Client +from flwr.server.history import History +from omegaconf import DictConfig + +from fedrep.base_model import get_device +from fedrep.client import get_client_fn_simulation +from fedrep.constants import Algorithm +from fedrep.models import ( + CNNCifar10, + CNNCifar10ModelSplit, + CNNCifar100, + CNNCifar100ModelSplit, +) + + +def set_client_state_save_path() -> str: + """Set the client state save path.""" + client_state_save_path = time.strftime("%Y-%m-%d") + client_state_sub_path = time.strftime("%H-%M-%S") + client_state_save_path = ( + f"./client_states/{client_state_save_path}/{client_state_sub_path}" + ) + if not os.path.exists(client_state_save_path): + os.makedirs(client_state_save_path) + return client_state_save_path + + +# pylint: disable=W1202 +def set_client_strategy(cfg: DictConfig) -> DictConfig: + """Set the client strategy.""" + algorithm = cfg.algorithm.lower() + if algorithm == Algorithm.FEDREP.value: + cfg.strategy["_target_"] = "fedrep.strategy.FedRep" + elif algorithm == Algorithm.FEDAVG.value: + cfg.strategy["_target_"] = "flwr.server.strategy.FedAvg" + else: + logging.warning( + "Algorithm {} not implemented. Fallback to FedAvg.".format(algorithm) + ) + return cfg + + +def get_client_fn( + config: DictConfig, client_state_save_path: str = "" +) -> Callable[[str], Client]: + """Get client function.""" + # Get algorithm + algorithm = config.algorithm.lower() + # Get client fn + if algorithm == "fedrep": + client_fn = get_client_fn_simulation( + config=config, client_state_save_path=client_state_save_path + ) + elif algorithm == "fedavg": + client_fn = get_client_fn_simulation(config=config) + else: + raise NotImplementedError + return client_fn + + +def get_create_model_fn( + config: DictConfig, +) -> tuple[ + Callable[[], Union[type[CNNCifar10], type[CNNCifar100]]], + Union[type[CNNCifar10ModelSplit], type[CNNCifar100ModelSplit]], +]: + """Get create model function.""" + device = get_device( + use_cuda=getattr(config, "use_cuda", True), + specified_device=getattr(config, "specified_device", None), + ) + split: Union[Type[CNNCifar10ModelSplit], Type[CNNCifar100ModelSplit]] = ( + CNNCifar10ModelSplit + ) + if config.model_name.lower() == "cnncifar10": + + def create_model() -> Union[Type[CNNCifar10], Type[CNNCifar100]]: + """Create initial CNNCifar10 model.""" + return CNNCifar10().to(device) + + elif config.model_name.lower() == "cnncifar100": + split = CNNCifar100ModelSplit + + def create_model() -> Union[Type[CNNCifar10], Type[CNNCifar100]]: + """Create initial CNNCifar100 model.""" + return CNNCifar100().to(device) + + else: + raise NotImplementedError("Model not implemented, check name. ") + return create_model, split + + +def plot_metric_from_history( + hist: History, save_plot_path: Path, suffix: Optional[str] = "" +) -> None: + """Plot from Flower server History. + + Parameters + ---------- + hist : History + Object containing evaluation for all rounds. + save_plot_path : Path + Folder to save the plot to. + suffix: Optional[str] + Optional string to add at the end of the filename for the plot. + """ + metric_type = "distributed" + metric_dict = ( + hist.metrics_centralized + if metric_type == "centralized" + else hist.metrics_distributed + ) + try: + _, values = zip(*metric_dict["accuracy"]) + except KeyError: # If no available metric data + return + + # let's extract decentralized loss (main metric reported in FedProx paper) + rounds_loss, values_loss = zip(*hist.losses_distributed) + + _, axs = plt.subplots(nrows=2, ncols=1, sharex="row") + axs[0].plot(np.asarray(rounds_loss), np.asarray(values_loss)) # type: ignore + axs[1].plot(np.asarray(rounds_loss), np.asarray(values)) # type: ignore + + axs[0].set_ylabel("Loss") # type: ignore + axs[1].set_ylabel("Accuracy") # type: ignore + + axs[0].grid() # type: ignore + axs[1].grid() # type: ignore + # plt.title(f"{metric_type.capitalize()} Validation - MNIST") + plt.xlabel("Rounds") + # plt.legend(loc="lower right") + + plt.savefig(Path(save_plot_path) / Path(f"{metric_type}_metrics{suffix}.png")) + plt.close() + + +def save_results_as_pickle( + history: History, + file_path: Union[str, Path], + default_filename: Optional[str] = "results.pkl", +) -> None: + """Save results from simulation to pickle. + + Parameters + ---------- + history: History + History returned by start_simulation. + file_path: Union[str, Path] + Path to file to create and store both history and extra_results. + If path is a directory, the default_filename will be used. + path doesn't exist, it will be created. If file exists, a + randomly generated suffix will be added to the file name. This + is done to avoid overwritting results. + extra_results : Optional[Dict] + A dictionary containing additional results you would like + to be saved to disk. Default: {} (an empty dictionary) + default_filename: Optional[str] + File used by default if file_path points to a directory instead + to a file. Default: "results.pkl" + """ + path = Path(file_path) + + # ensure path exists + path.mkdir(exist_ok=True, parents=True) + + def _add_random_suffix(path_: Path): + """Add a random suffix to the file name.""" + print(f"File `{path_}` exists! ") + suffix = token_hex(4) + print(f"New results to be saved with suffix: {suffix}") + return path_.parent / (path_.stem + "_" + suffix + ".pkl") + + def _complete_path_with_default_name(path_: Path): + """Append the default file name to the path.""" + print("Using default filename") + if default_filename is None: + return path_ + return path_ / default_filename + + if path.is_dir(): + path = _complete_path_with_default_name(path) + + if path.is_file(): + path = _add_random_suffix(path) + + print(f"Results will be saved into: {path}") + # data = {"history": history, **extra_results} + data = {"history": history} + # save results to pickle + with open(str(path), "wb") as handle: + pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL) diff --git a/baselines/baseline_template/pyproject.toml b/baselines/fedrep/pyproject.toml similarity index 81% rename from baselines/baseline_template/pyproject.toml rename to baselines/fedrep/pyproject.toml index 31f1ee7bfe6d..e4c3551af19a 100644 --- a/baselines/baseline_template/pyproject.toml +++ b/baselines/fedrep/pyproject.toml @@ -3,11 +3,11 @@ requires = ["poetry-core>=1.4.0"] build-backend = "poetry.masonry.api" [tool.poetry] -name = "" # <----- Ensure it matches the name of your baseline directory containing all the source code +name = "fedrep" version = "1.0.0" -description = "Flower Baselines" +description = "Exploiting Shared Representations for Personalized Federated Learning" license = "Apache-2.0" -authors = ["The Flower Authors "] +authors = ["Jiahao Tan "] readme = "README.md" homepage = "https://flower.ai" repository = "https://github.com/adap/flower" @@ -37,9 +37,16 @@ classifiers = [ ] [tool.poetry.dependencies] -python = ">=3.8.15, <3.12.0" # don't change this -flwr = { extras = ["simulation"], version = "1.5.0" } -hydra-core = "1.3.2" # don't change this +python = ">=3.10.0, <3.11.0" # don't change this +flwr = { extras = ["simulation"], version = "1.9.0" } +hydra-core = "1.3.2" # don't change this +pandas = "^2.2.2" +matplotlib = "^3.9.0" +tqdm = "^4.66.4" +torch = "^2.2.2" +torchvision = "^0.17.2" +setuptools = "<70" +flwr-datasets = { extras = ["vision"], version = ">=0.3.0" } [tool.poetry.dev-dependencies] isort = "==5.13.2" @@ -52,6 +59,7 @@ pytest = "==6.2.4" pytest-watch = "==4.2.0" ruff = "==0.0.272" types-requests = "==2.27.7" +virtualenv = "==20.21.0" [tool.isort] line_length = 88 @@ -68,9 +76,7 @@ target-version = ["py38", "py39", "py310", "py311"] [tool.pytest.ini_options] minversion = "6.2" addopts = "-qq" -testpaths = [ - "flwr_baselines", -] +testpaths = ["flwr_baselines"] [tool.mypy] ignore_missing_imports = true @@ -78,18 +84,14 @@ strict = false plugins = "numpy.typing.mypy_plugin" [tool.pylint."MESSAGES CONTROL"] -disable = "bad-continuation,duplicate-code,too-few-public-methods,useless-import-alias" good-names = "i,j,k,_,x,y,X,Y" signature-mutators = "hydra.main.main" -[tool.pylint.typecheck] +[tool.pylint."TYPECHECK"] generated-members = "numpy.*, torch.*, tensorflow.*" [[tool.mypy.overrides]] -module = [ - "importlib.metadata.*", - "importlib_metadata.*", -] +module = ["importlib.metadata.*", "importlib_metadata.*"] follow_imports = "skip" follow_imports_for_stubs = true disallow_untyped_calls = false diff --git a/baselines/flanders/.gitignore b/baselines/flanders/.gitignore new file mode 100644 index 000000000000..4187d73689f0 --- /dev/null +++ b/baselines/flanders/.gitignore @@ -0,0 +1,9 @@ +outputs/* +clients_params/* +flanders/datasets_files/* +*.log +flanders/__pycache__ +MNIST +.DS_Store +*/__pycache__ +multirun \ No newline at end of file diff --git a/baselines/flanders/LICENSE b/baselines/flanders/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/baselines/flanders/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/baselines/flanders/README.md b/baselines/flanders/README.md new file mode 100644 index 000000000000..46d95211d619 --- /dev/null +++ b/baselines/flanders/README.md @@ -0,0 +1,161 @@ +--- +title: Protecting Federated Learning from Extreme Model Poisoning Attacks via Multidimensional Time Series Anomaly Detection +url: https://arxiv.org/abs/2303.16668 +labels: [robustness, model poisoning, anomaly detection, autoregressive model, regression, classification] +dataset: [MNIST, FashionMNIST] +--- + +# FLANDERS: Protecting Federated Learning from Extreme Model Poisoning Attacks via Multidimensional Time Series Anomaly Detection + +> Note: If you use this baseline in your work, please remember to cite the original authors of the paper as well as the Flower paper. + +**Paper:** [arxiv.org/abs/2303.16668](https://arxiv.org/abs/2303.16668) + +**Authors:** Edoardo Gabrielli, Gabriele Tolomei, Dimitri Belli, Vittorio Miori + +**Abstract:** Current defense mechanisms against model poisoning attacks in federated learning (FL) systems have proven effective up to a certain threshold of malicious clients. In this work, we introduce FLANDERS, a novel pre-aggregation filter for FL resilient to large-scale model poisoning attacks, i.e., when malicious clients far exceed legitimate participants. FLANDERS treats the sequence of local models sent by clients in each FL round as a matrix-valued time series. Then, it identifies malicious client updates as outliers in this time series by comparing actual observations with estimates generated by a matrix autoregressive forecasting model maintained by the server. Experiments conducted in several non-iid FL setups show that FLANDERS significantly improves robustness across a wide spectrum of attacks when paired with standard and robust existing aggregation methods. + +## About this baseline + +**What’s implemented:** The code in this directory replicates the results of FLANDERS+\[baseline\] on MNIST and Fashion-MNIST under all attack settings: Gaussian, LIE, OPT, and AGR-MM; with $r=[0.2,0.6,0.8]$ (i.e., the fraction of malicious clients), specifically about tables 1, 3, 10, 11, 15, 17, 19, 20 and Figure 3. + +**Datasets:** MNIST, FMNIST + +**Hardware Setup:** AMD Ryzen 9, 64 GB RAM, and an NVIDIA 4090 GPU with 24 GB VRAM. + +**Estimated time to run:** You can expect to run experiments on the given setup in 2m with *MNIST* and 3m with *Fashion-MNIST*, without attacks. With an Apple M2 Pro, 16gb RAM, each experiment with 10 clients for MNIST runs in about 24 minutes. Note that experiments with OPT (fang) and AGR-MM (minmax) can be up to 5x times slower. + +**Contributors:** Edoardo Gabrielli, Sapienza University of Rome ([GitHub](https://github.com/edogab33), [Scholar](https://scholar.google.com/citations?user=b3bePdYAAAAJ)) + + +## Experimental Setup + +Please, checkout Appendix F and G of the paper for a comprehensive overview of the hyperparameters setup, however here's a summary. + +**Task:** Image classification + +**Models:** + +MNIST (multilabel classification, fully connected, feed forward NN): +- Multilevel Perceptron (MLP) +- minimizing multiclass cross-entropy loss using Adam optimizer +- input: 784 +- hidden layer 1: 128 +- hidden layer 2: 256 + +Fashion-MNIST (multilabel classification, fully connected, feed forward NN): +- Multilevel Perceptron (MLP) +- minimizing multiclass cross-entropy loss using Adam optimizer +- input: 784 +- hidden layer 1: 256 +- hidden layer 2: 128 +- hidden layer 3: 64 + +**Dataset:** Every dataset is partitioned into two disjoint sets: 80% for training and 20% for testing. The training set is distributed across all clients (100) by using the Dirichlet distribution with $\alpha=0.5$, simulating a high non-i.i.d. scenario, while the testing set is uniform and held by the server to evaluate the global model. + +| Description | Default Value | +| ----------- | ----- | +| Partitions | 100 | +| Evaluation | centralized | +| Training set | 80% | +| Testing set | 20% | +| Distribution | Dirichlet | +| $\alpha$ | 0.5 | + +**Training Hyperparameters:** + +| Dataset | # of clients | Clients per round | # of rounds | Batch size | Learning rate | Optimizer | Dropout | Alpha | Beta | # of clients to keep | Sampling | +| -- | -- | -- | -- | -- | -- | -- | -- | -- | -- | -- | -- | +| MNIST | 100 | 100 | 50 | 32 | $10^{-3}$ | Adam | 0.2 | 0.0 | 0.0 | $m - b$ | 500 | +| FMNIST | 100 | 100 | 50 | 32 | $10^{-3}$ | Adam | 0.2 | 0.0 | 0.0 | $m - b$ | 500 | + +Where $m$ is the number of clients partecipating during n-th round and $b$ is the number of malicious clients. The variable $sampling$ identifies how many parameters MAR analyzes. + + +## Environment Setup + +```bash +# Use a version of Python >=3.9 and <3.12.0. +pyenv local 3.10.12 +poetry env use 3.10.12 + +# Install everything from the toml +poetry install + +# Activate the env +poetry shell +``` + + +## Running the Experiments +Ensure that the environment is properly set up, then run: + +```bash +python -m flanders.main +``` + +To execute a single experiment with the default values in `conf/base.yaml`. + +To run custom experiments, you can override the default values like that: + +```bash +python -m flanders.main dataset=mnist server.attack_fn=lie server.num_malicious=1 +``` + +To run multiple custom experiments: + +```bash +python -m flanders.main --multirun dataset=mnist,fmnist server.attack_fn=gaussian,lie,fang,minmax server.num_malicious=0,1,2,3,4,5 +``` + +## Expected Results + +To run all the experiments of the paper (for MNIST and Fashion-MNIST), I've set up a script: + +```bash +sh run.sh +``` + +This code will produce the output in the file `outputs/all_results.csv`. To generate the plots and tables displayed below, you can use the notebook in the `plotting/` directory. + + +### Accuracy over multiple rounds +**(left) MNIST, FLANDERS+FedAvg with 80% of malicious clients (b = 80); (right) Vanilla FedAvg in the same setting:** + +![acc_over_rounds](_static/screenshot-8.png) + +### Precision and Recall of FLANDERS + +**b = 20:** + +![alt text](_static/screenshot-4.png) +--- + +**b = 60:** + +![alt text](_static/screenshot-5.png) +--- +**b = 80:** + +![alt text](_static/screenshot-6.png) + + +### Accuracy w.r.t. number of attackers: +**b = 0:** + +![alt text](_static/screenshot.png) + +--- +**b = 20:** + +![alt text](_static/screenshot-1.png) + +--- +**b = 60:** + +![alt text](_static/screenshot-2.png) + +--- +**b = 80:** + +![alt text](_static/screenshot-3.png) diff --git a/baselines/flanders/_static/screenshot-1.png b/baselines/flanders/_static/screenshot-1.png new file mode 100644 index 000000000000..f9c14a7e72f2 Binary files /dev/null and b/baselines/flanders/_static/screenshot-1.png differ diff --git a/baselines/flanders/_static/screenshot-2.png b/baselines/flanders/_static/screenshot-2.png new file mode 100644 index 000000000000..7aacd2ba5778 Binary files /dev/null and b/baselines/flanders/_static/screenshot-2.png differ diff --git a/baselines/flanders/_static/screenshot-3.png b/baselines/flanders/_static/screenshot-3.png new file mode 100644 index 000000000000..978ed4902bf5 Binary files /dev/null and b/baselines/flanders/_static/screenshot-3.png differ diff --git a/baselines/flanders/_static/screenshot-4.png b/baselines/flanders/_static/screenshot-4.png new file mode 100644 index 000000000000..5a24c47ff513 Binary files /dev/null and b/baselines/flanders/_static/screenshot-4.png differ diff --git a/baselines/flanders/_static/screenshot-5.png b/baselines/flanders/_static/screenshot-5.png new file mode 100644 index 000000000000..e0defab01d22 Binary files /dev/null and b/baselines/flanders/_static/screenshot-5.png differ diff --git a/baselines/flanders/_static/screenshot-6.png b/baselines/flanders/_static/screenshot-6.png new file mode 100644 index 000000000000..bfb3120fef7b Binary files /dev/null and b/baselines/flanders/_static/screenshot-6.png differ diff --git a/baselines/flanders/_static/screenshot-8.png b/baselines/flanders/_static/screenshot-8.png new file mode 100644 index 000000000000..cda98c21d034 Binary files /dev/null and b/baselines/flanders/_static/screenshot-8.png differ diff --git a/baselines/flanders/_static/screenshot.png b/baselines/flanders/_static/screenshot.png new file mode 100644 index 000000000000..537ebb66c123 Binary files /dev/null and b/baselines/flanders/_static/screenshot.png differ diff --git a/baselines/flanders/flanders/__init__.py b/baselines/flanders/flanders/__init__.py new file mode 100644 index 000000000000..eb3edd489459 --- /dev/null +++ b/baselines/flanders/flanders/__init__.py @@ -0,0 +1 @@ +"""FLANDERS package.""" diff --git a/baselines/flanders/flanders/attacks.py b/baselines/flanders/flanders/attacks.py new file mode 100644 index 000000000000..9b1acd9ad639 --- /dev/null +++ b/baselines/flanders/flanders/attacks.py @@ -0,0 +1,493 @@ +"""Implementation of attacks used in the paper.""" + +import math +from typing import Dict, List, Tuple + +import numpy as np +from flwr.common import FitRes, ndarrays_to_parameters, parameters_to_ndarrays +from flwr.server.client_proxy import ClientProxy +from scipy.stats import norm + + +# pylint: disable=unused-argument +def no_attack( + ordered_results: List[Tuple[ClientProxy, FitRes]], states: Dict[str, bool], **kwargs +): + """No attack.""" + return ordered_results, {} + + +def gaussian_attack(ordered_results, states, **kwargs): + """Apply Gaussian attack on parameters. + + Parameters + ---------- + ordered_results + List of tuples (client_proxy, fit_result) ordered by client id. + states + Dictionary of client ids and their states (True if malicious, False otherwise). + magnitude + Magnitude of the attack. + dataset_name + Name of the dataset. + + Returns + ------- + results + List of tuples (client_proxy, fit_result) ordered by client id. + """ + magnitude = kwargs.get("magnitude", 0.0) + dataset_name = kwargs.get("dataset_name", "no name") + results = ordered_results.copy() + + def perturbate(vect): + return vect + np.random.normal(loc=0, scale=magnitude, size=vect.size) + + for proxy, fitres in ordered_results: + if states[fitres.metrics["cid"]]: + params = parameters_to_ndarrays(fitres.parameters) + if dataset_name == "income": + new_params = [perturbate(layer) for layer in params] + else: + new_params = [] + for par in params: + # if par is an array of one element, it is a scalar + if par.size == 1: + new_params.append(perturbate(par)) + else: + new_params.append(np.apply_along_axis(perturbate, 0, par)) + fitres.parameters = ndarrays_to_parameters(new_params) + results[int(fitres.metrics["cid"])] = (proxy, fitres) + return results, {} + + +# pylint: disable=too-many-locals, unused-argument +def lie_attack( + ordered_results, + states, + omniscent=True, + **kwargs, +): + """Apply Omniscent LIE attack, Baruch et al. (2019) on parameters. + + Parameters + ---------- + ordered_results + List of tuples (client_proxy, fit_result) ordered by client id. + states + Dictionary of client ids and their states (True if malicious, False otherwise). + omniscent + Whether the attacker knows the local models of all clients or not. + + Returns + ------- + results + List of tuples (client_proxy, fit_result) ordered by client id. + """ + results = ordered_results.copy() + params = [parameters_to_ndarrays(fitres.parameters) for _, fitres in results] + grads_mean = [np.mean(layer, axis=0) for layer in zip(*params)] + grads_stdev = [np.std(layer, axis=0) ** 0.5 for layer in zip(*params)] + + if not omniscent: + # if not omniscent, the attacker doesn't know the + # local models of all clients, but only of the corrupted ones + params = [ + params[i] + for i in range(len(params)) + if states[results[i][1].metrics["cid"]] + ] + + num_clients = len(ordered_results) + num_malicious = sum(val is True for val in states.values()) + + # pylint: disable=c-extension-no-member + num_supporters = math.floor((num_clients / 2) + 1) - num_malicious + + z_max = norm.cdf( + (num_clients - num_malicious - num_supporters) / (num_clients - num_malicious) + ) + + for proxy, fitres in ordered_results: + if states[fitres.metrics["cid"]]: + mul_std = [layer * z_max for layer in grads_stdev] + new_params = [grads_mean[i] - mul_std[i] for i in range(len(grads_mean))] + fitres.parameters = ndarrays_to_parameters(new_params) + results[int(fitres.metrics["cid"])] = (proxy, fitres) + return results, {} + + +def fang_attack( + ordered_results, + states, + omniscent=True, + **kwargs, +): + """Apply Local Model Poisoning Attacks. + + (Fang et al. (2020)) + Specifically designed for Krum, but they claim it works for other + aggregation functions as well. + Full-knowledge version (attackers knows the local models of all clients). + + Parameters + ---------- + ordered_results + List of tuples (client_proxy, fit_result) ordered by client id. + states + Dictionary of client ids and their states (True if malicious, False + otherwise). + omniscent + Whether the attacker knows the local models of all clients or not. + num_layers + Number of layers. + w_re + The received global model. + old_lambda + The lambda from the previous round. + threshold + The threshold for lambda. + malicious_selected + Whether the attacker was selected as malicious in the previous round. + + Returns + ------- + results + List of tuples (client_proxy, fit_result) ordered by client id. + """ + num_layers = kwargs.get("num_layers", 2) + w_re = kwargs.get("w_re", None) # the received global model + threshold = kwargs.get("threshold", 1e-5) + + num_clients = len(ordered_results) + num_corrupted = sum(val is True for val in states.values()) + # there can't be an attack with less than 2 malicious clients + # to avoid division by 0 + num_corrupted = max(num_corrupted, 2) + + if not omniscent: + # if not omniscent, the attacker doesn't know the + # local models of all clients, but only of the corrupted ones + ordered_results = [ + ordered_results[i] + for i in range(len(ordered_results)) + if states[ordered_results[i][1].metrics["cid"]] + ] + + # Initialize lambda + benign = [ + (parameters_to_ndarrays(fitres.parameters), fitres.num_examples) + for _, fitres in ordered_results + if states[fitres.metrics["cid"]] is False + ] + all_params = [ + (parameters_to_ndarrays(fitres.parameters), fitres.num_examples) + for _, fitres in ordered_results + ] + # Compute the smallest distance that Krum would choose + _, _, _, distances = _krum(all_params, num_corrupted, 1) + + idx_benign = [int(cid) for cid in states.keys() if states[cid] is False] + + min_dist = np.min(np.array(distances)[idx_benign]) / ( + ((num_clients - 2) * (num_corrupted - 1)) * np.sqrt(num_layers) + ) + + # Compute max distance from w_re + dist_wre = np.zeros((len(benign))) + for i in range(len(benign)): + dist = [benign[i][0][j] - w_re[j] for j in range(num_layers)] + norm_sums = 0 + for k in dist: + norm_sums += np.linalg.norm(k) + dist_wre[i] = norm_sums**2 + max_dist = np.max(dist_wre) / np.sqrt(num_layers) + lamda = min( + min_dist + max_dist, 999 + ) # lambda (capped to 999 to avoid numerical problems in specific settings) + + malicious_selected, corrupted_params = _fang_corrupt_and_select( + all_params, w_re, states, num_corrupted, lamda + ) + while lamda > threshold and malicious_selected is False: + lamda = lamda * 0.5 + malicious_selected, corrupted_params = _fang_corrupt_and_select( + all_params, w_re, states, num_corrupted, lamda + ) + + # Set corrupted clients' updates to w_1 + results = [ + ( + ( + proxy, + FitRes( + fitres.status, + parameters=ndarrays_to_parameters(corrupted_params), + num_examples=fitres.num_examples, + metrics=fitres.metrics, + ), + ) + if states[fitres.metrics["cid"]] + else (proxy, fitres) + ) + for proxy, fitres in ordered_results + ] + + return results, {} + + +def minmax_attack( + ordered_results, + states, + omniscent=True, + **kwargs, +): + """Apply Min-Max agnostic attack. + + Full-knowledge, perturbation function chosen according to our experimental + results. + From: + "Manipulating the Byzantine: Optimizing Model Poisoning Attacks and + Defenses for Federated Learning" (Shejwalkar et al., 2021) + + Parameters + ---------- + ordered_results + List of tuples (client_proxy, fit_result) ordered by client id. + states + Dictionary of client ids and their states (True if malicious, False + otherwise). + omniscent + Whether the attacker knows the local models of all clients or not. + threshold + Threshold for lambda. + lambda_init + Initial value for lambda. + + Returns + ------- + results + List of tuples (client_proxy, fit_result) ordered by client id. + """ + dataset_name = kwargs.get("dataset_name", None) + threshold = kwargs.get("threshold", 1e-5) + lambda_init = kwargs.get("lambda", 5.0) + malicious_num = kwargs.get("malicious_num", 0) + + results = ordered_results.copy() + params = [parameters_to_ndarrays(fitres.parameters) for _, fitres in results] + params_avg = [np.mean(param, axis=0) for param in zip(*params)] + + if not omniscent: + # if not omniscent, the attacker doesn't know the + # local models of all clients, but only of the corrupted ones + results = [ + results[i] + for i in range(len(results)) + if states[results[i][1].metrics["cid"]] + ] + + # Decide what perturbation to use according to the + # results presented in the paper. + if dataset_name == "mnist": + # Apply std perturbation + # In the paper authors state that sign function is the best + # but in my experience std perturbation works better + perturbation_vect = [-np.std(layer, axis=0) for layer in zip(*params)] + elif dataset_name == "cifar": + # Apply std perturbation + perturbation_vect = [-np.std(layer, axis=0) for layer in zip(*params)] + else: + # Apply std perturbation + perturbation_vect = [-np.std(layer, axis=0) for layer in zip(*params)] + + # Compute lambda (referred as gamma in the paper) + lambda_succ = lambda_init + 1 + curr_lambda = lambda_init + step = lambda_init * 0.5 + while ( + abs(lambda_succ - curr_lambda) > threshold + and step > threshold + and malicious_num > 0 + ): + # Compute malicious gradients + perturbed_params = [ + curr_lambda * perturbation_vect[i] for i in range(len(perturbation_vect)) + ] + corrupted_params = [ + params_avg[i] + perturbed_params[i] for i in range(len(params_avg)) + ] + + # Set corrupted clients' updates to corrupted_params + params_c = [ + corrupted_params if states[str(i)] else params[i] + for i in range(len(params)) + ] + distance_matrix = _compute_distances(params_c) + + # Remove from matrix distance_matrix all malicious clients in both + # rows and columns + distance_matrix_b = np.delete( + distance_matrix, + [ + i + for i in range(len(distance_matrix)) + if states[results[i][1].metrics["cid"]] + ], + axis=0, + ) + distance_matrix_b = np.delete( + distance_matrix_b, + [ + i + for i in range(len(distance_matrix)) + if states[results[i][1].metrics["cid"]] + ], + axis=1, + ) + + # Remove from distance_matrix all benign clients on + # rows and all malicious on columns + distance_matrix_m = np.delete( + distance_matrix, + [ + i + for i in range(len(distance_matrix)) + if not states[results[i][1].metrics["cid"]] + ], + axis=0, + ) + distance_matrix_m = np.delete( + distance_matrix_m, + [ + i + for i in range(len(distance_matrix)) + if states[results[i][1].metrics["cid"]] + ], + axis=1, + ) + + # Take the maximum distance between any benign client and any malicious one + max_dist_m = np.max(distance_matrix_m) + + # Take the maximum distance between any two benign clients + max_dist_b = np.max(distance_matrix_b) + + # Compute lambda (best scaling coefficient) + if max_dist_m < max_dist_b: + # Lambda (gamma in the paper) is good. Save and try to increase it + lambda_succ = curr_lambda + curr_lambda = curr_lambda + step * 0.5 + else: + # Lambda is to big, must be reduced to increse the chances of being selected + curr_lambda = curr_lambda - step * 0.5 + step *= 0.5 + + # Compute the final malicious update + perturbation_vect = [ + lambda_succ * perturbation_vect[i] for i in range(len(perturbation_vect)) + ] + corrupted_params = [ + params_avg[i] + perturbation_vect[i] for i in range(len(params_avg)) + ] + corrupted_params = ndarrays_to_parameters(corrupted_params) + for proxy, fitres in ordered_results: + if states[fitres.metrics["cid"]]: + fitres.parameters = corrupted_params + results[int(fitres.metrics["cid"])] = (proxy, fitres) + return results, {} + + +def _krum(results, num_malicious, to_keep, num_closest=None): + """Get the best parameters vector according to the Krum function. + + Output: the best parameters vector. + """ + weights = [w for w, _ in results] # list of weights + distance_matrix = _compute_distances(weights) # matrix of distances + + if not num_closest: + num_closest = ( + len(weights) - num_malicious - 2 + ) # number of closest points to use + if num_closest <= 0: + num_closest = 1 + elif num_closest > len(weights): + num_closest = len(weights) + + closest_indices = _get_closest_indices( + distance_matrix, num_closest + ) # indices of closest points + + scores = [ + np.sum(distance_matrix[i, closest_indices[i]]) + for i in range(len(distance_matrix)) + ] # scores i->j for each i + + best_index = np.argmin(scores) # index of the best score + best_indices = np.argsort(scores)[::-1][ + len(scores) - to_keep : + ] # indices of best scores (multikrum) + return weights[best_index], best_index, best_indices, scores + + +def _compute_distances(weights): + """Compute distances between vectors. + + Input: weights - list of weights vectors + Output: distances - matrix distance_matrix of squared distances between the vectors + """ + flat_w = np.array([np.concatenate(par, axis=None).ravel() for par in weights]) + distance_matrix = np.zeros((len(weights), len(weights))) + for i, _ in enumerate(flat_w): + for j, _ in enumerate(flat_w): + delta = flat_w[i] - flat_w[j] + dist = np.linalg.norm(delta) + distance_matrix[i, j] = dist**2 + return distance_matrix + + +def _get_closest_indices(distance_matrix, num_closest): + """Get the indices of the closest points. + + Args: + distance_matrix + matrix of distances + num_closest + number of closest points to get for each parameter vector + Output: + closest_indices + list of lists of indices of the closest points for each vector. + """ + closest_indices = [] + for idx, _ in enumerate(distance_matrix): + closest_indices.append( + np.argsort(distance_matrix[idx])[1 : num_closest + 1].tolist() + ) + return closest_indices + + +def _fang_corrupt_params(global_model, lamda): + # Compute sign vector num_supporters + magnitude = [] + for i, _ in enumerate(global_model): + magnitude.append(np.sign(global_model[i]) * lamda) + + corrupted_params = [ + global_model[i] - magnitude[i] for i in range(len(global_model)) + ] # corrupted model + return corrupted_params + + +def _fang_corrupt_and_select(all_models, global_model, states, num_corrupted, lamda): + # Check that krum selects a malicious client + corrupted_params = _fang_corrupt_params(global_model, lamda) + all_models_m = [ + (corrupted_params, num_examples) if states[str(i)] else (model, num_examples) + for i, (model, num_examples) in enumerate(all_models) + ] + _, idx_best_model, _, _ = _krum(all_models_m, num_corrupted, 1) + + # Check if the best model is malicious + malicious_selected = states[str(idx_best_model)] + return malicious_selected, corrupted_params diff --git a/baselines/flanders/flanders/client.py b/baselines/flanders/flanders/client.py new file mode 100644 index 000000000000..57513ccf7291 --- /dev/null +++ b/baselines/flanders/flanders/client.py @@ -0,0 +1,174 @@ +"""Clients implementation for Flanders.""" + +from collections import OrderedDict +from pathlib import Path +from typing import Tuple + +import flwr as fl +import numpy as np +import ray +import torch + +from .dataset import get_dataloader, mnist_transformation +from .models import ( + FMnistNet, + MnistNet, + test_fmnist, + test_mnist, + train_fmnist, + train_mnist, +) + +XY = Tuple[np.ndarray, np.ndarray] + + +def get_params(model): + """Get model weights as a list of NumPy ndarrays.""" + return [val.cpu().numpy() for _, val in model.state_dict().items()] + + +def set_params(model, params): + """Set model weights from a list of NumPy ndarrays.""" + params_dict = zip(model.state_dict().keys(), params) + state_dict = OrderedDict({k: torch.from_numpy(np.copy(v)) for k, v in params_dict}) + model.load_state_dict(state_dict, strict=True) + + +class MnistClient(fl.client.NumPyClient): + """Implementation of MNIST image classification using PyTorch.""" + + def __init__(self, cid, fed_dir_data): + """Instantiate a client for the MNIST dataset.""" + self.cid = cid + self.fed_dir = Path(fed_dir_data) + self.properties = {"tensor_type": "numpy.ndarray"} + + # Instantiate model + self.net = MnistNet() + + # Determine device + # self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + if torch.cuda.is_available(): + self.device = torch.device("cuda") + elif torch.backends.mps.is_available(): + self.device = torch.device("mps") + else: + self.device = torch.device("cpu") + + def get_parameters(self, config): + """Get model parameters as a list of NumPy ndarrays.""" + return get_params(self.net) + + def fit(self, parameters, config): + """Set model parameters from a list of NumPy ndarrays.""" + set_params(self.net, parameters) + + # Load data for this client and get trainloader + num_workers = 1 + trainloader = get_dataloader( + self.fed_dir, + self.cid, + is_train=True, + batch_size=config["batch_size"], + workers=num_workers, + transform=mnist_transformation, + ) + + self.net.to(self.device) + train_mnist(self.net, trainloader, epochs=config["epochs"], device=self.device) + + return ( + get_params(self.net), + len(trainloader.dataset), + {"cid": self.cid, "malicious": config["malicious"]}, + ) + + def evaluate(self, parameters, config): + """Evaluate using local test dataset.""" + set_params(self.net, parameters) + + # Load data for this client and get trainloader + num_workers = len(ray.worker.get_resource_ids()["CPU"]) + valloader = get_dataloader( + self.fed_dir, + self.cid, + is_train=False, + batch_size=50, + workers=num_workers, + transform=mnist_transformation, + ) + + self.net.to(self.device) + loss, accuracy = test_mnist(self.net, valloader, device=self.device) + + return float(loss), len(valloader.dataset), {"accuracy": float(accuracy)} + + +class FMnistClient(fl.client.NumPyClient): + """Implementation of MNIST image classification using PyTorch.""" + + def __init__(self, cid, fed_dir_data): + """Instantiate a client for the MNIST dataset.""" + self.cid = cid + self.fed_dir = Path(fed_dir_data) + self.properties = {"tensor_type": "numpy.ndarray"} + + # Instantiate model + self.net = FMnistNet() + + # Determine device + # self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + if torch.cuda.is_available(): + self.device = torch.device("cuda") + elif torch.backends.mps.is_available(): + self.device = torch.device("mps") + else: + self.device = torch.device("cpu") + + def get_parameters(self, config): + """Get model parameters as a list of NumPy ndarrays.""" + return get_params(self.net) + + def fit(self, parameters, config): + """Set model parameters from a list of NumPy ndarrays.""" + set_params(self.net, parameters) + + # Load data for this client and get trainloader + num_workers = 1 + trainloader = get_dataloader( + self.fed_dir, + self.cid, + is_train=True, + batch_size=config["batch_size"], + workers=num_workers, + transform=mnist_transformation, + ) + + self.net.to(self.device) + train_fmnist(self.net, trainloader, epochs=config["epochs"], device=self.device) + + return ( + get_params(self.net), + len(trainloader.dataset), + {"cid": self.cid, "malicious": config["malicious"]}, + ) + + def evaluate(self, parameters, config): + """Evaluate using local test dataset.""" + set_params(self.net, parameters) + + # Load data for this client and get trainloader + num_workers = len(ray.worker.get_resource_ids()["CPU"]) + valloader = get_dataloader( + self.fed_dir, + self.cid, + is_train=False, + batch_size=50, + workers=num_workers, + transform=mnist_transformation, + ) + + self.net.to(self.device) + loss, accuracy = test_fmnist(self.net, valloader, device=self.device) + + return float(loss), len(valloader.dataset), {"accuracy": float(accuracy)} diff --git a/baselines/flanders/flanders/conf/aggregate_fn/bulyan.yaml b/baselines/flanders/flanders/conf/aggregate_fn/bulyan.yaml new file mode 100644 index 000000000000..1361f158daf1 --- /dev/null +++ b/baselines/flanders/flanders/conf/aggregate_fn/bulyan.yaml @@ -0,0 +1,9 @@ +--- +name: bulyan + +aggregate_fn: + function: flwr.server.strategy.aggregate.aggregate_bulyan + parameters: + aggregation_name: aggregate_krum + aggregation_module_name: flwr.server.strategy.aggregate + to_keep: 0 # if 0, normal Krum is applied \ No newline at end of file diff --git a/baselines/flanders/flanders/conf/aggregate_fn/fedavg.yaml b/baselines/flanders/flanders/conf/aggregate_fn/fedavg.yaml new file mode 100644 index 000000000000..826a4163b2eb --- /dev/null +++ b/baselines/flanders/flanders/conf/aggregate_fn/fedavg.yaml @@ -0,0 +1,6 @@ +--- +name: fedavg + +aggregate_fn: + function: flwr.server.strategy.aggregate.aggregate + parameters: {} \ No newline at end of file diff --git a/baselines/flanders/flanders/conf/aggregate_fn/fedmedian.yaml b/baselines/flanders/flanders/conf/aggregate_fn/fedmedian.yaml new file mode 100644 index 000000000000..7bf0a725ab6f --- /dev/null +++ b/baselines/flanders/flanders/conf/aggregate_fn/fedmedian.yaml @@ -0,0 +1,6 @@ +--- +name: fedmedian + +aggregate_fn: + function: flwr.server.strategy.aggregate.aggregate_median + parameters: {} \ No newline at end of file diff --git a/baselines/flanders/flanders/conf/aggregate_fn/krum.yaml b/baselines/flanders/flanders/conf/aggregate_fn/krum.yaml new file mode 100644 index 000000000000..220b93d92b3e --- /dev/null +++ b/baselines/flanders/flanders/conf/aggregate_fn/krum.yaml @@ -0,0 +1,7 @@ +--- +name: krum + +aggregate_fn: + function: flwr.server.strategy.aggregate.aggregate_krum + parameters: + to_keep: 10 \ No newline at end of file diff --git a/baselines/flanders/flanders/conf/aggregate_fn/trimmedmean.yaml b/baselines/flanders/flanders/conf/aggregate_fn/trimmedmean.yaml new file mode 100644 index 000000000000..d2e418fa9738 --- /dev/null +++ b/baselines/flanders/flanders/conf/aggregate_fn/trimmedmean.yaml @@ -0,0 +1,7 @@ +--- +name: trimmedmean + +aggregate_fn: + function: flwr.server.strategy.aggregate.aggregate_trimmed_avg + parameters: + proportiontocut: 0.4 \ No newline at end of file diff --git a/baselines/flanders/flanders/conf/base.yaml b/baselines/flanders/flanders/conf/base.yaml new file mode 100644 index 000000000000..9742d85e2af8 --- /dev/null +++ b/baselines/flanders/flanders/conf/base.yaml @@ -0,0 +1,27 @@ +defaults: + - _self_ + - strategy: fedavg + - aggregate_fn: fedavg + +dataset: mnist + +server: + _target_: flanders.server.EnhancedServer + num_rounds: 100 + pool_size: 100 + warmup_rounds: 2 + sampling: 500 + history_dir: clients_params + magnitude: 10 + threshold: 1e-05 + attack_fn: gaussian + num_malicious: 0 + omniscent: True + noniidness: 0.5 + +server_device: cpu +seed: 33 + +client_resources: + num_cpus: 1 + num_gpus: 0 \ No newline at end of file diff --git a/baselines/flanders/flanders/conf/strategy/bulyan.yaml b/baselines/flanders/flanders/conf/strategy/bulyan.yaml new file mode 100644 index 000000000000..1692d5d4306c --- /dev/null +++ b/baselines/flanders/flanders/conf/strategy/bulyan.yaml @@ -0,0 +1,8 @@ +--- +name: bulyan + +strategy: + _target_: flwr.server.strategy.Bulyan + _recursive_: true + num_malicious_clients: $(server.num_malicious) + to_keep: 0 # Normal Krum is applied \ No newline at end of file diff --git a/baselines/flanders/flanders/conf/strategy/fedavg.yaml b/baselines/flanders/flanders/conf/strategy/fedavg.yaml new file mode 100644 index 000000000000..1be4b0a0cc5b --- /dev/null +++ b/baselines/flanders/flanders/conf/strategy/fedavg.yaml @@ -0,0 +1,5 @@ +--- +name: fedavg + +strategy: + _target_: flwr.server.strategy.FedAvg \ No newline at end of file diff --git a/baselines/flanders/flanders/conf/strategy/fedmedian.yaml b/baselines/flanders/flanders/conf/strategy/fedmedian.yaml new file mode 100644 index 000000000000..d79293f4ca23 --- /dev/null +++ b/baselines/flanders/flanders/conf/strategy/fedmedian.yaml @@ -0,0 +1,5 @@ +--- +name: fedmedian + +strategy: + _target_: flwr.server.strategy.FedMedian \ No newline at end of file diff --git a/baselines/flanders/flanders/conf/strategy/flanders.yaml b/baselines/flanders/flanders/conf/strategy/flanders.yaml new file mode 100644 index 000000000000..0222708dd836 --- /dev/null +++ b/baselines/flanders/flanders/conf/strategy/flanders.yaml @@ -0,0 +1,10 @@ +--- +name: flanders + +strategy: + _target_: flanders.strategy.Flanders + _recursive_: true + num_clients_to_keep: 3 # number of benign local models to filter-out before the aggregation (atm it's set to be pool_size - num_malicious, hard coded in main.py) + maxiter: 100 # number of iterations done by MAR + alpha: 1 + beta: 1 \ No newline at end of file diff --git a/baselines/flanders/flanders/conf/strategy/krum.yaml b/baselines/flanders/flanders/conf/strategy/krum.yaml new file mode 100644 index 000000000000..bc36d37755fa --- /dev/null +++ b/baselines/flanders/flanders/conf/strategy/krum.yaml @@ -0,0 +1,7 @@ +--- +name: krum + +strategy: + _target_: flwr.server.strategy.Krum + num_clients_to_keep: 3 + num_malicious_clients: ${server.num_malicious} \ No newline at end of file diff --git a/baselines/flanders/flanders/conf/strategy/trimmedmean.yaml b/baselines/flanders/flanders/conf/strategy/trimmedmean.yaml new file mode 100644 index 000000000000..561755f82d35 --- /dev/null +++ b/baselines/flanders/flanders/conf/strategy/trimmedmean.yaml @@ -0,0 +1,6 @@ +--- +name: trimmedmean + +strategy: + _target_: flwr.server.strategy.FedTrimmedAvg + beta: 0.2 \ No newline at end of file diff --git a/baselines/flanders/flanders/dataset.py b/baselines/flanders/flanders/dataset.py new file mode 100644 index 000000000000..2c13e80d75c5 --- /dev/null +++ b/baselines/flanders/flanders/dataset.py @@ -0,0 +1,289 @@ +"""Dataset utilities for FL experiments.""" + +# Borrowed from adap/Flower examples + +import shutil +from pathlib import Path +from typing import Any, Callable, Optional, Tuple + +import numpy as np +import torch +from PIL import Image +from torch.utils.data import DataLoader, SubsetRandomSampler +from torchvision import datasets, transforms +from torchvision.datasets import VisionDataset + +from .dataset_preparation import create_lda_partitions + + +class Data(torch.utils.data.Dataset): + """Dataset class.""" + + def __init__(self, X, y): + """Initialize dataset.""" + self.X = torch.from_numpy(X.astype(np.float32)) + self.y = torch.from_numpy(y.astype(np.float32)) + self.len = self.X.shape[0] + + def __getitem__(self, index): + """Return data and label pair.""" + return self.X[index], self.y[index] + + def __len__(self): + """Return size of dataset.""" + return self.len + + +def get_dataset(path_to_data: Path, cid: str, partition: str, transform=None): + """Return TorchVisionFL dataset object.""" + # generate path to cid's data + path_to_data = path_to_data / cid / (partition + ".pt") + + return TorchVisionFL(path_to_data, transform=transform) + + +# pylint: disable=too-many-arguments, too-many-locals +def get_dataloader( + path_to_data: str, + cid: str, + is_train: bool, + batch_size: int, + workers: int, + transform=None, +): + """Generate trainset/valset object and returns appropiate dataloader.""" + partition = "train" if is_train else "val" + dataset = get_dataset(Path(path_to_data), str(cid), partition, transform=transform) + + # we use as number of workers all the cpu cores assigned to this actor + kwargs = {"num_workers": workers, "pin_memory": True, "drop_last": False} + return DataLoader(dataset, batch_size=batch_size, **kwargs) + + +def get_random_id_splits(total: int, val_ratio: float, shuffle: bool = True): + """Random split. + + Split a list of length `total` into two following a (1-val_ratio):val_ratio + partitioning. + + By default the indices are shuffled before creating the split and returning. + """ + if isinstance(total, int): + indices = list(range(total)) + else: + indices = total + + split = int(np.floor(val_ratio * len(indices))) + # print(f"Users left out for validation (ratio={val_ratio}) = {split} ") + if shuffle: + np.random.shuffle(indices) + return indices[split:], indices[:split] + + +# pylint: disable=too-many-arguments, too-many-locals +def do_fl_partitioning( + path_to_dataset, pool_size, alpha, num_classes, val_ratio=0.0, seed=None +): + """Torchvision (e.g. CIFAR-10) datasets using LDA.""" + images, labels = torch.load(path_to_dataset) + idx = np.array(range(len(images))) + dataset = [idx, labels] + partitions, _ = create_lda_partitions( + dataset, + num_partitions=pool_size, + concentration=alpha, + accept_imbalanced=True, + seed=seed, + ) + + # Show label distribution for first partition (purely informative) + partition_zero = partitions[0][1] + hist, _ = np.histogram(partition_zero, bins=list(range(num_classes + 1))) + print( + "Class histogram for 0-th partition" + f"(alpha={alpha}, {num_classes} classes): {hist}" + ) + + # now save partitioned dataset to disk + # first delete dir containing splits (if exists), then create it + splits_dir = path_to_dataset.parent / "federated" + if splits_dir.exists(): + shutil.rmtree(splits_dir) + Path.mkdir(splits_dir, parents=True) + + for idx in range(pool_size): + labels = partitions[idx][1] + image_idx = partitions[idx][0] + imgs = images[image_idx] + + # create dir + Path.mkdir(splits_dir / str(idx)) + + if val_ratio > 0.0: + # split data according to val_ratio + train_idx, val_idx = get_random_id_splits(len(labels), val_ratio) + val_imgs = imgs[val_idx] + val_labels = labels[val_idx] + + with open(splits_dir / str(idx) / "val.pt", "wb") as fil: + torch.save([val_imgs, val_labels], fil) + + # remaining images for training + imgs = imgs[train_idx] + labels = labels[train_idx] + + with open(splits_dir / str(idx) / "train.pt", "wb") as fil: + torch.save([imgs, labels], fil) + + return splits_dir + + +def mnist_transformation(img): + """Return TorchVision transformation for MNIST.""" + return transforms.Compose( + [ + transforms.ToTensor(), + transforms.Normalize(mean=(0.5,), std=(0.5,)), + ] + )(img) + + +class TorchVisionFL(VisionDataset): + """TorchVision FL class. + + Use this class by either passing a path to a torch file (.pt) containing (data, + targets) or pass the data, targets directly instead. + + This is just a trimmed down version of torchvision.datasets.MNIST. + """ + + def __init__( + self, + path_to_data=None, + data=None, + targets=None, + transform: Optional[Callable] = None, + ) -> None: + """Initialize dataset.""" + path = path_to_data.parent if path_to_data else None + super().__init__(path, transform=transform) + self.transform = transform + + if path_to_data: + # load data and targets (path_to_data points to an specific .pt file) + self.data, self.targets = torch.load(path_to_data) + else: + self.data = data + self.targets = targets + + def __getitem__(self, index: int) -> Tuple[Any, Any]: + """Return a tuple (data, target).""" + img, target = self.data[index], int(self.targets[index]) + + # doing this so that it is consistent with all other datasets + # to return a PIL Image + if not isinstance(img, Image.Image): # if not PIL image + if not isinstance(img, np.ndarray): # if torch tensor + img = img.numpy() + + img = Image.fromarray(img) + + if self.transform is not None: + img = self.transform(img) + + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target + + def __len__(self) -> int: + """Return length of dataset.""" + return len(self.data) + + +def get_mnist(path_to_data="flanders/datasets_files/mnist/data"): + """Download MNIST dataset.""" + # download dataset and load train set + train_set = datasets.MNIST(root=path_to_data, train=True, download=True) + + # fuse all data splits into a single "training.pt" + data_loc = Path(path_to_data) / "MNIST" + training_data = data_loc / "training.pt" + print("Generating unified MNIST dataset") + torch.save([train_set.data, np.array(train_set.targets)], training_data) + + test_set = datasets.MNIST( + root=path_to_data, train=False, transform=mnist_transformation + ) + + # returns path where training data is and testset + return training_data, test_set + + +def get_fmnist(path_to_data="flanders/datasets_files/fmnist/data"): + """Download FashionMNIST dataset.""" + # download dataset and load train set + train_set = datasets.FashionMNIST(root=path_to_data, train=True, download=True) + + # fuse all data splits into a single "training.pt" + data_loc = Path(path_to_data) / "FashionMNIST" + training_data = data_loc / "training.pt" + print("Generating unified FashionMNIST dataset") + torch.save([train_set.data, np.array(train_set.targets)], training_data) + + test_set = datasets.FashionMNIST( + root=path_to_data, train=False, transform=mnist_transformation + ) + + # returns path where training data is and testset + return training_data, test_set + + +def dataset_partitioner( + dataset: torch.utils.data.Dataset, + batch_size: int, + client_id: int, + number_of_clients: int, + workers: int = 1, +) -> torch.utils.data.DataLoader: + """Make datasets partitions for a specific client_id. + + Parameters + ---------- + dataset: torch.utils.data.Dataset + Dataset to be partitioned into *number_of_clients* subsets. + batch_size: int + Size of mini-batches used by the returned DataLoader. + client_id: int + Unique integer used for selecting a specific partition. + number_of_clients: int + Total number of clients launched during training. + This value dictates the number of partitions to be created. + + Returns + ------- + data_loader: torch.utils.data.Dataset + DataLoader for specific client_id considering number_of_clients partitions. + """ + # Set the seed so we are sure to generate the same global batches + # indices across all clients + np.random.seed(123) + + # Get the data corresponding to this client + dataset_size = len(dataset) + nb_samples_per_clients = dataset_size // number_of_clients + dataset_indices = list(range(dataset_size)) + np.random.shuffle(dataset_indices) + + # Get starting and ending indices w.r.t CLIENT_ID + start_ind = int(client_id) * nb_samples_per_clients + end_ind = start_ind + nb_samples_per_clients + data_sampler = SubsetRandomSampler(dataset_indices[start_ind:end_ind]) + data_loader = torch.utils.data.DataLoader( + dataset, + batch_size=batch_size, + shuffle=False, + sampler=data_sampler, + num_workers=workers, + ) + return data_loader diff --git a/baselines/flanders/flanders/dataset_preparation.py b/baselines/flanders/flanders/dataset_preparation.py new file mode 100644 index 000000000000..3c1cfbe6a5d2 --- /dev/null +++ b/baselines/flanders/flanders/dataset_preparation.py @@ -0,0 +1,490 @@ +# Copyright 2020 Adap GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Commonly used functions for generating partitioned datasets.""" + +# pylint: disable=invalid-name + +from typing import List, Optional, Tuple, Union + +import numpy as np +from numpy.random import BitGenerator, Generator, SeedSequence + +XY = Tuple[np.ndarray, np.ndarray] +XYList = List[XY] +PartitionedDataset = Tuple[XYList, XYList] + + +def float_to_int(i: float) -> int: + """Return float as int but raise if decimal is dropped.""" + if not i.is_integer(): + raise Exception("Cast would drop decimals") + + return int(i) + + +def sort_by_label(x: np.ndarray, y: np.ndarray) -> XY: + """Sort by label. + + Assuming two labels and four examples the resulting label order would be 1,1,2,2 + """ + idx = np.argsort(y, axis=0).reshape((y.shape[0])) + return (x[idx], y[idx]) + + +def sort_by_label_repeating(x: np.ndarray, y: np.ndarray) -> XY: + """Sort by label in repeating groups. + + Assuming two labels and four examples the resulting label order would be 1,2,1,2. + + Create sorting index which is applied to by label sorted x, y + + .. code-block:: python + + # given: + y = [ + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9 + ] + + # use: + idx = [ + 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19 + ] + + # so that y[idx] becomes: + y = [ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 + ] + """ + x, y = sort_by_label(x, y) + + num_example = x.shape[0] + num_class = np.unique(y).shape[0] + idx = ( + np.array(range(num_example), np.int64) + .reshape((num_class, num_example // num_class)) + .transpose() + .reshape(num_example) + ) + + return (x[idx], y[idx]) + + +def split_at_fraction(x: np.ndarray, y: np.ndarray, fraction: float) -> Tuple[XY, XY]: + """Split x, y at a certain fraction.""" + splitting_index = float_to_int(x.shape[0] * fraction) + # Take everything BEFORE splitting_index + x_0, y_0 = x[:splitting_index], y[:splitting_index] + # Take everything AFTER splitting_index + x_1, y_1 = x[splitting_index:], y[splitting_index:] + return (x_0, y_0), (x_1, y_1) + + +def shuffle(x: np.ndarray, y: np.ndarray) -> XY: + """Shuffle x and y.""" + idx = np.random.permutation(len(x)) + return x[idx], y[idx] + + +def partition(x: np.ndarray, y: np.ndarray, num_partitions: int) -> List[XY]: + """Return x, y as list of partitions.""" + return list(zip(np.split(x, num_partitions), np.split(y, num_partitions))) + + +def combine_partitions(xy_list_0: XYList, xy_list_1: XYList) -> XYList: + """Combine two lists of ndarray Tuples into one list.""" + return [ + (np.concatenate([x_0, x_1], axis=0), np.concatenate([y_0, y_1], axis=0)) + for (x_0, y_0), (x_1, y_1) in zip(xy_list_0, xy_list_1) + ] + + +def create_partitions( + unpartitioned_dataset: XY, + iid_fraction: float, + num_partitions: int, +) -> XYList: + """Create partitioned version of a training or test set. + + Currently tested and supported are MNIST and FashionMNIST + """ + x, y = unpartitioned_dataset + + x, y = shuffle(x, y) + x, y = sort_by_label_repeating(x, y) + + (x_0, y_0), (x_1, y_1) = split_at_fraction(x, y, fraction=iid_fraction) + + # Shift in second split of dataset the classes into two groups + x_1, y_1 = _shift(x_1, y_1) + + xy_0_partitions = partition(x_0, y_0, num_partitions) + xy_1_partitions = partition(x_1, y_1, num_partitions) + + xy_partitions = combine_partitions(xy_0_partitions, xy_1_partitions) + + # Adjust x and y shape + return [adjust_xy_shape(xy) for xy in xy_partitions] + + +def create_partitioned_dataset( + keras_dataset: Tuple[XY, XY], + iid_fraction: float, + num_partitions: int, +) -> Tuple[PartitionedDataset, XY]: + """Create partitioned version of keras dataset. + + Currently tested and supported are MNIST and FashionMNIST + """ + xy_train, xy_test = keras_dataset + + xy_train_partitions = create_partitions( + unpartitioned_dataset=xy_train, + iid_fraction=iid_fraction, + num_partitions=num_partitions, + ) + + xy_test_partitions = create_partitions( + unpartitioned_dataset=xy_test, + iid_fraction=iid_fraction, + num_partitions=num_partitions, + ) + + return (xy_train_partitions, xy_test_partitions), adjust_xy_shape(xy_test) + + +def log_distribution(xy_partitions: XYList) -> None: + """Print label distribution for list of paritions.""" + distro = [np.unique(y, return_counts=True) for _, y in xy_partitions] + for d in distro: + print(d) + + +def adjust_xy_shape(xy: XY) -> XY: + """Adjust shape of both x and y.""" + x, y = xy + if x.ndim == 3: + x = adjust_x_shape(x) + if y.ndim == 2: + y = adjust_y_shape(y) + return (x, y) + + +def adjust_x_shape(nda: np.ndarray) -> np.ndarray: + """Turn shape (x, y, z) into (x, y, z, 1).""" + nda_adjusted = np.reshape(nda, (nda.shape[0], nda.shape[1], nda.shape[2], 1)) + return nda_adjusted + + +def adjust_y_shape(nda: np.ndarray) -> np.ndarray: + """Turn shape (x, 1) into (x).""" + nda_adjusted = np.reshape(nda, (nda.shape[0])) + return nda_adjusted + + +def split_array_at_indices( + x: np.ndarray, split_idx: np.ndarray +) -> List[List[np.ndarray]]: + """Split an array `x` into list of elements using starting indices from `split_idx`. + + This function should be used with `unique_indices` from `np.unique()` after + sorting by label. + + Args: + x (np.ndarray): Original array of dimension (N,a,b,c,...) + split_idx (np.ndarray): 1-D array contaning increasing number of + indices to be used as partitions. Initial value must be zero. Last value + must be less than N. + + Returns + ------- + List[List[np.ndarray]]: List of list of samples. + """ + if split_idx.ndim != 1: + raise ValueError("Variable `split_idx` must be a 1-D numpy array.") + if split_idx.dtype != np.int64: + raise ValueError("Variable `split_idx` must be of type np.int64.") + if split_idx[0] != 0: + raise ValueError("First value of `split_idx` must be 0.") + if split_idx[-1] >= x.shape[0]: + raise ValueError( + """Last value in `split_idx` must be less than + the number of samples in `x`.""" + ) + if not np.all(split_idx[:-1] <= split_idx[1:]): + raise ValueError("Items in `split_idx` must be in increasing order.") + + num_splits: int = len(split_idx) + split_idx = np.append(split_idx, x.shape[0]) + + list_samples_split: List[List[np.ndarray]] = [[] for _ in range(num_splits)] + for j in range(num_splits): + tmp_x = x[split_idx[j] : split_idx[j + 1]] # noqa: E203 + for sample in tmp_x: + list_samples_split[j].append(sample) + + return list_samples_split + + +def exclude_classes_and_normalize( + distribution: np.ndarray, exclude_dims: List[bool], eps: float = 1e-5 +) -> np.ndarray: + """Exclude classes from a distribution. + + This function is particularly useful when sampling without replacement. + Classes for which no sample is available have their probabilities are set to 0. + Classes that had probabilities originally set to 0 are incremented with + `eps` to allow sampling from remaining items. + + Args: + distribution (np.array): Distribution being used. + exclude_dims (List[bool]): Dimensions to be excluded. + eps (float, optional): Small value to be addad to non-excluded dimensions. + Defaults to 1e-5. + + Returns + ------- + np.ndarray: Normalized distributions. + """ + if np.any(distribution < 0) or (not np.isclose(np.sum(distribution), 1.0)): + raise ValueError("distribution must sum to 1 and have only positive values.") + + if distribution.size != len(exclude_dims): + raise ValueError( + """Length of distribution must be equal + to the length `exclude_dims`.""" + ) + if eps < 0: + raise ValueError("""The value of `eps` must be positive and small.""") + + distribution[[not x for x in exclude_dims]] += eps + distribution[exclude_dims] = 0.0 + sum_rows = np.sum(distribution) + np.finfo(float).eps + distribution = distribution / sum_rows + + return distribution + + +def sample_without_replacement( + distribution: np.ndarray, + list_samples: List[List[np.ndarray]], + num_samples: int, + empty_classes: List[bool], +) -> Tuple[XY, List[bool]]: + """Sample from a list without replacement using a given distribution. + + Args: + distribution (np.ndarray): Distribution used for sampling. + list_samples(List[List[np.ndarray]]): List of samples. + num_samples (int): Total number of items to be sampled. + empty_classes (List[bool]): List of booleans indicating which classes are empty. + This is useful to differentiate which classes should still be sampled. + + Returns + ------- + XY: Dataset contaning samples + List[bool]: empty_classes. + """ + if np.sum([len(x) for x in list_samples]) < num_samples: + raise ValueError( + """Number of samples in `list_samples` is less than `num_samples`""" + ) + + # Make sure empty classes are not sampled + # and solves for rare cases where + if not empty_classes: + empty_classes = len(distribution) * [False] + + distribution = exclude_classes_and_normalize( + distribution=distribution, exclude_dims=empty_classes + ) + + data: List[np.ndarray] = [] + target: List[np.ndarray] = [] + + for _ in range(num_samples): + sample_class = np.where(np.random.multinomial(1, distribution) == 1)[0][0] + sample: np.ndarray = list_samples[sample_class].pop() + + data.append(sample) + target.append(sample_class) + + # If last sample of the class was drawn, then set the + # probability density function (PDF) to zero for that class. + if len(list_samples[sample_class]) == 0: + empty_classes[sample_class] = True + # Be careful to distinguish between classes that had zero probability + # and classes that are now empty + distribution = exclude_classes_and_normalize( + distribution=distribution, exclude_dims=empty_classes + ) + data_array: np.ndarray = np.concatenate([data], axis=0) + target_array: np.ndarray = np.array(target, dtype=np.int64) + + return (data_array, target_array), empty_classes + + +def get_partitions_distributions(partitions: XYList) -> Tuple[np.ndarray, List[int]]: + """Evaluate the distribution over classes for a set of partitions. + + Args: + partitions (XYList): Input partitions + + Returns + ------- + np.ndarray: Distributions of size (num_partitions, num_classes) + """ + # Get largest available label + labels = set() + for _, y in partitions: + labels.update(set(y)) + list_labels = sorted(labels) + bin_edges = np.arange(len(list_labels) + 1) + + # Pre-allocate distributions + distributions = np.zeros((len(partitions), len(list_labels)), dtype=np.float32) + for idx, (_, _y) in enumerate(partitions): + hist, _ = np.histogram(_y, bin_edges) + distributions[idx] = hist / hist.sum() + + return distributions, list_labels + + +def create_lda_partitions( + dataset: XY, + dirichlet_dist: Optional[np.ndarray] = None, + num_partitions: int = 100, + concentration: Union[float, np.ndarray, List[float]] = 0.5, + accept_imbalanced: bool = False, + seed: Optional[Union[int, SeedSequence, BitGenerator, Generator]] = None, +) -> Tuple[XYList, np.ndarray]: + r"""Create imbalanced non-iid partitions. + + Create imbalanced non-iid partitions using Latent Dirichlet Allocation (LDA) + without resampling. + + Args: + dataset (XY): Dataset containing samples X and labels Y. + dirichlet_dist (numpy.ndarray, optional): previously generated distribution to + be used. This is useful when applying the same distribution for train and + validation sets. + num_partitions (int, optional): Number of partitions to be created. + Defaults to 100. + concentration (float, np.ndarray, List[float]): Dirichlet Concentration + (:math:`\\alpha`) parameter. Set to float('inf') to get uniform partitions. + An :math:`\\alpha \\to \\Inf` generates uniform distributions over classes. + An :math:`\\alpha \\to 0.0` generates one class per client. Defaults to 0.5. + accept_imbalanced (bool): Whether or not to accept imbalanced output classes. + Default False. + seed (None, int, SeedSequence, BitGenerator, Generator): + A seed to initialize the BitGenerator for generating the Dirichlet + distribution. This is defined in Numpy's official documentation as follows: + If None, then fresh, unpredictable entropy will be pulled from the OS. + One may also pass in a SeedSequence instance. + Additionally, when passed a BitGenerator, it will be wrapped by Generator. + If passed a Generator, it will be returned unaltered. + See official Numpy Documentation for further details. + + Returns + ------- + Tuple[XYList, numpy.ndarray]: List of XYList containing partitions + for each dataset and the dirichlet probability density functions. + """ + # pylint: disable=too-many-arguments,too-many-locals + + x, y = dataset + x, y = shuffle(x, y) + x, y = sort_by_label(x, y) + + if (x.shape[0] % num_partitions) and (not accept_imbalanced): + raise ValueError( + """Total number of samples must be a multiple of `num_partitions`. + If imbalanced classes are allowed, set + `accept_imbalanced=True`.""" + ) + + num_samples = num_partitions * [0] + for j in range(x.shape[0]): + num_samples[j % num_partitions] += 1 + + # Get number of classes and verify if they matching with + classes, start_indices = np.unique(y, return_index=True) + + # Make sure that concentration is np.array and + # check if concentration is appropriate + concentration = np.asarray(concentration) + + # Check if concentration is Inf, if so create uniform partitions + partitions: List[XY] = [(_, _) for _ in range(num_partitions)] + if float("inf") in concentration: + partitions = create_partitions( + unpartitioned_dataset=(x, y), + iid_fraction=1.0, + num_partitions=num_partitions, + ) + dirichlet_dist = get_partitions_distributions(partitions)[0] + + return partitions, dirichlet_dist + + if concentration.size == 1: + concentration = np.repeat(concentration, classes.size) + elif concentration.size != classes.size: # Sequence + raise ValueError( + f"The size of the provided concentration ({concentration.size}) ", + f"must be either 1 or equal number of classes {classes.size})", + ) + + # Split into list of list of samples per class + list_samples_per_class: List[List[np.ndarray]] = split_array_at_indices( + x, start_indices + ) + + if dirichlet_dist is None: + dirichlet_dist = np.random.default_rng(seed).dirichlet( + alpha=concentration, size=num_partitions + ) + + if dirichlet_dist.size != 0: + if dirichlet_dist.shape != (num_partitions, classes.size): + raise ValueError( + f"""The shape of the provided dirichlet distribution + ({dirichlet_dist.shape}) must match the provided number + of partitions and classes ({num_partitions},{classes.size})""" + ) + + # Assuming balanced distribution + empty_classes = classes.size * [False] + for partition_id in range(num_partitions): + partitions[partition_id], empty_classes = sample_without_replacement( + distribution=dirichlet_dist[partition_id].copy(), + list_samples=list_samples_per_class, + num_samples=num_samples[partition_id], + empty_classes=empty_classes, + ) + + return partitions, dirichlet_dist + + +def _shift(x: np.ndarray, y: np.ndarray) -> XY: + """Shift data. + + Shift x_1, y_1 so that the first half contains only labels 0 to 4 and the second + half 5 to 9. + """ + x, y = sort_by_label(x, y) + + (x_0, y_0), (x_1, y_1) = split_at_fraction(x, y, fraction=0.5) + (x_0, y_0), (x_1, y_1) = shuffle(x_0, y_0), shuffle(x_1, y_1) + x, y = np.concatenate([x_0, x_1], axis=0), np.concatenate([y_0, y_1], axis=0) + return x, y diff --git a/baselines/flanders/flanders/main.py b/baselines/flanders/flanders/main.py new file mode 100644 index 000000000000..022c38b1ef32 --- /dev/null +++ b/baselines/flanders/flanders/main.py @@ -0,0 +1,279 @@ +"""FLANDERS main scrip.""" + +import importlib +import os +import random +import shutil + +import flwr as fl +import hydra +import numpy as np +import pandas as pd +import torch +from flwr.server.client_manager import SimpleClientManager +from hydra.core.hydra_config import HydraConfig +from hydra.utils import instantiate +from omegaconf import DictConfig, OmegaConf + +from .attacks import fang_attack, gaussian_attack, lie_attack, minmax_attack, no_attack +from .client import FMnistClient, MnistClient +from .dataset import do_fl_partitioning, get_fmnist, get_mnist +from .server import EnhancedServer +from .utils import fmnist_evaluate, l2_norm, mnist_evaluate + + +# pylint: disable=too-many-locals, too-many-branches, too-many-statements +@hydra.main(config_path="conf", config_name="base", version_base=None) +def main(cfg: DictConfig) -> None: + """Run the baseline. + + Parameters + ---------- + cfg : DictConfig + An omegaconf object that stores the hydra config. + """ + # 0. Set random seed + seed = cfg.seed + np.random.seed(seed) + np.random.set_state( + np.random.RandomState(seed).get_state() # pylint: disable=no-member + ) + random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + + # 1. Print parsed config + print(OmegaConf.to_yaml(cfg)) + + # Skip if: + # - strategy = bulyan and num_malicious > 20 + # - attack_fn != gaussian and num_malicious = 0 + if cfg.strategy.name == "bulyan" and cfg.server.num_malicious > 20: + print( + "Skipping experiment because strategy is bulyan and num_malicious is > 20" + ) + return + # skip if attack_fn is not gaussian and num_malicious is 0, but continue if + # attack_fn is na + if ( + cfg.server.attack_fn != "gaussian" + and cfg.server.num_malicious == 0 + and cfg.server.attack_fn != "na" + ): + print( + "Skipping experiment because attack_fn is not gaussian and " + "num_malicious is 0" + ) + return + + attacks = { + "na": no_attack, + "gaussian": gaussian_attack, + "lie": lie_attack, + "fang": fang_attack, # OPT + "minmax": minmax_attack, # AGR-MM + } + + clients = { + "mnist": (MnistClient, mnist_evaluate), + "fmnist": (FMnistClient, fmnist_evaluate), + } + + # Delete old client_params + if os.path.exists(cfg.server.history_dir): + shutil.rmtree(cfg.server.history_dir) + + dataset_name = cfg.dataset + attack_fn = cfg.server.attack_fn + num_malicious = cfg.server.num_malicious + + # 2. Prepare your dataset + if dataset_name in ["mnist", "fmnist"]: + if dataset_name == "mnist": + train_path, _ = get_mnist() + elif dataset_name == "fmnist": + train_path, _ = get_fmnist() + fed_dir = do_fl_partitioning( + train_path, + pool_size=cfg.server.pool_size, + alpha=cfg.server.noniidness, + num_classes=10, + val_ratio=0.2, + seed=seed, + ) + else: + raise ValueError("Dataset not supported") + + # 3. Define your clients + # pylint: disable=no-else-return + def client_fn(cid: str, dataset_name: str = dataset_name): + client = clients[dataset_name][0] + if dataset_name in ["mnist", "fmnist"]: + return client(cid, fed_dir) + else: + raise ValueError("Dataset not supported") + + # 4. Define your strategy + strategy = None + if cfg.strategy.name == "flanders": + function_path = cfg.aggregate_fn.aggregate_fn.function + module_name, function_name = function_path.rsplit(".", 1) + module = importlib.import_module(module_name, package=__package__) + aggregation_fn = getattr(module, function_name) + + strategy = instantiate( + cfg.strategy.strategy, + evaluate_fn=clients[dataset_name][1], + on_fit_config_fn=fit_config, + fraction_fit=1, + fraction_evaluate=0, + min_fit_clients=cfg.server.pool_size, + min_evaluate_clients=0, + num_clients_to_keep=cfg.server.pool_size - num_malicious, + aggregate_fn=aggregation_fn, + aggregate_parameters=cfg.aggregate_fn.aggregate_fn.parameters, + min_available_clients=cfg.server.pool_size, + window=cfg.server.warmup_rounds, + distance_function=l2_norm, + maxiter=cfg.strategy.strategy.maxiter, + alpha=cfg.strategy.strategy.alpha, + beta=int(cfg.strategy.strategy.beta), + ) + elif cfg.strategy.name == "krum": + strategy = instantiate( + cfg.strategy.strategy, + evaluate_fn=clients[dataset_name][1], + on_fit_config_fn=fit_config, + fraction_fit=1, + fraction_evaluate=0, + min_fit_clients=cfg.server.pool_size, + min_evaluate_clients=0, + num_clients_to_keep=cfg.strategy.strategy.num_clients_to_keep, + min_available_clients=cfg.server.pool_size, + num_malicious_clients=num_malicious, + ) + elif cfg.strategy.name == "fedavg": + strategy = instantiate( + cfg.strategy.strategy, + evaluate_fn=clients[dataset_name][1], + on_fit_config_fn=fit_config, + fraction_fit=1, + fraction_evaluate=0, + min_fit_clients=cfg.server.pool_size, + min_evaluate_clients=0, + min_available_clients=cfg.server.pool_size, + ) + elif cfg.strategy.name == "bulyan": + # Get aggregation rule function + strategy = instantiate( + cfg.strategy.strategy, + evaluate_fn=clients[dataset_name][1], + on_fit_config_fn=fit_config, + fraction_fit=1, + fraction_evaluate=0, + min_fit_clients=cfg.server.pool_size, + min_evaluate_clients=0, + min_available_clients=cfg.server.pool_size, + num_malicious_clients=num_malicious, + to_keep=cfg.strategy.strategy.to_keep, + ) + elif cfg.strategy.name == "trimmedmean": + strategy = instantiate( + cfg.strategy.strategy, + evaluate_fn=clients[dataset_name][1], + on_fit_config_fn=fit_config, + fraction_fit=1, + fraction_evaluate=0, + min_fit_clients=cfg.server.pool_size, + min_evaluate_clients=0, + min_available_clients=cfg.server.pool_size, + beta=cfg.strategy.strategy.beta, + ) + elif cfg.strategy.name == "fedmedian": + strategy = instantiate( + cfg.strategy.strategy, + evaluate_fn=clients[dataset_name][1], + on_fit_config_fn=fit_config, + fraction_fit=1, + fraction_evaluate=0, + min_fit_clients=cfg.server.pool_size, + min_evaluate_clients=0, + min_available_clients=cfg.server.pool_size, + ) + else: + raise ValueError("Strategy not supported") + + # 5. Start Simulation + history = fl.simulation.start_simulation( + client_fn=client_fn, + num_clients=cfg.server.pool_size, + client_resources=cfg.client_resources, + server=EnhancedServer( + warmup_rounds=cfg.server.warmup_rounds, + num_malicious=num_malicious, + attack_fn=attacks[attack_fn], # type: ignore + magnitude=cfg.server.magnitude, + client_manager=SimpleClientManager(), + strategy=strategy, + sampling=cfg.server.sampling, + history_dir=cfg.server.history_dir, + dataset_name=dataset_name, + threshold=cfg.server.threshold, + omniscent=cfg.server.omniscent, + ), + config=fl.server.ServerConfig(num_rounds=cfg.server.num_rounds), + strategy=strategy, + ) + + save_path = HydraConfig.get().runtime.output_dir + + rounds, test_loss = zip(*history.losses_centralized) + _, test_accuracy = zip(*history.metrics_centralized["accuracy"]) + _, test_auc = zip(*history.metrics_centralized["auc"]) + _, truep = zip(*history.metrics_centralized["TP"]) + _, truen = zip(*history.metrics_centralized["TN"]) + _, falsep = zip(*history.metrics_centralized["FP"]) + _, falsen = zip(*history.metrics_centralized["FN"]) + + if not os.path.exists(os.path.join(save_path, "outputs")): + os.makedirs(os.path.join(save_path, "outputs")) + path_to_save = [os.path.join(save_path, "results.csv"), "outputs/all_results.csv"] + + for file_name in path_to_save: + data = pd.DataFrame( + { + "round": rounds, + "loss": test_loss, + "accuracy": test_accuracy, + "auc": test_auc, + "TP": truep, + "TN": truen, + "FP": falsep, + "FN": falsen, + "attack_fn": [attack_fn for _ in range(len(rounds))], + "dataset_name": [dataset_name for _ in range(len(rounds))], + "num_malicious": [num_malicious for _ in range(len(rounds))], + "strategy": [cfg.strategy.name for _ in range(len(rounds))], + "aggregate_fn": [ + cfg.aggregate_fn.aggregate_fn.function for _ in range(len(rounds)) + ], + } + ) + if os.path.exists(file_name): + data.to_csv(file_name, mode="a", header=False, index=False) + else: + data.to_csv(file_name, index=False, header=True) + + +# pylint: disable=unused-argument +def fit_config(server_round): + """Return a configuration with static batch size and (local) epochs.""" + config = { + "epochs": 1, # number of local epochs + "batch_size": 32, + } + return config + + +if __name__ == "__main__": + main() diff --git a/baselines/flanders/flanders/models.py b/baselines/flanders/flanders/models.py new file mode 100644 index 000000000000..2fd10f5496d3 --- /dev/null +++ b/baselines/flanders/flanders/models.py @@ -0,0 +1,164 @@ +"""Models for FLANDERS experiments.""" + +import itertools + +import torch +import torch.nn as nn +import torch.nn.functional as F +from sklearn.metrics import roc_auc_score +from sklearn.preprocessing import LabelBinarizer + + +def roc_auc_multiclass(y_true, y_pred): + """Compute the ROC AUC for multiclass classification.""" + l_b = LabelBinarizer() + l_b.fit(y_true) + y_true = l_b.transform(y_true) + y_pred = l_b.transform(y_pred) + return roc_auc_score(y_true, y_pred, multi_class="ovr") + + +class MnistNet(nn.Module): + """Neural network for MNIST classification.""" + + def __init__(self): + super().__init__() + self.fc1 = nn.Linear(28 * 28, 128) + self.fc2 = nn.Linear(128, 10) + + def forward(self, x): + """Forward pass through the network.""" + x = x.view(-1, 28 * 28) + x = F.relu(self.fc1(x)) + x = self.fc2(x) + return F.log_softmax(x, dim=1) + + +def train_mnist(model, dataloader, epochs, device): + """Train the network on the training set.""" + criterion = nn.CrossEntropyLoss() + optimizer = torch.optim.Adam(model.parameters(), lr=0.001) + + for epoch in range(epochs): + for i, (images, labels) in enumerate(dataloader): + images = images.view(-1, 28 * 28).to(device) + labels = labels.to(device) + + optimizer.zero_grad() + outputs = model(images) + loss = criterion(outputs, labels) + loss.backward() + optimizer.step() + + if (i + 1) % 100 == 0: + print( + f"Epoch [{epoch+1}/{epochs}], " + f"Step [{i+1}/{len(dataloader)}], " + f"Loss: {loss.item():.4f}" + ) + + +# pylint: disable=too-many-locals +def test_mnist(model, dataloader, device): + """Validate the network on the entire test set.""" + loss = 0 + model.eval() + criterion = nn.CrossEntropyLoss() + y_true, y_pred = [], [] + with torch.no_grad(): + n_correct = 0 + n_samples = 0 + for images, labels in dataloader: + images = images.reshape(-1, 28 * 28).to(device) + labels = labels.to(device) + outputs = model(images) + # max returns (value ,index) + _, predicted = torch.max(outputs.data, 1) + n_samples += labels.size(0) + n_correct += (predicted == labels).sum().item() + loss += criterion(outputs, labels).item() + y_true.append(labels.cpu().numpy()) + y_pred.append(predicted.cpu().numpy()) + y_true = list(itertools.chain(*y_true)) + y_pred = list(itertools.chain(*y_pred)) + auc = roc_auc_multiclass(y_true, y_pred) + acc = n_correct / n_samples + return loss, acc, auc + + +class FMnistNet(nn.Module): + """Neural network for Fashion MNIST classification.""" + + def __init__(self): + super().__init__() + self.fc1 = nn.Linear(784, 256) + self.fc2 = nn.Linear(256, 128) + self.fc3 = nn.Linear(128, 64) + self.fc4 = nn.Linear(64, 10) + + # Dropout module with a 0.2 drop probability + self.dropout = nn.Dropout(p=0.2) + + def forward(self, x): + """Forward pass through the network.""" + # Flatten the input tensor + x = x.view(x.shape[0], -1) + # Set the activation functions + x = self.dropout(F.relu(self.fc1(x))) + x = self.dropout(F.relu(self.fc2(x))) + x = self.dropout(F.relu(self.fc3(x))) + x = F.log_softmax(self.fc4(x), dim=1) + + return x + + +def train_fmnist(model, dataloader, epochs, device): + """Train the network on the training set.""" + criterion = nn.NLLLoss(reduction="sum") + optimizer = torch.optim.Adam(model.parameters(), lr=0.003) + + for epoch in range(epochs): + for i, (images, labels) in enumerate(dataloader): + images = images.view(-1, 28 * 28).to(device) + labels = labels.to(device) + + optimizer.zero_grad() + outputs = model(images) + loss = criterion(outputs, labels) + loss.backward() + optimizer.step() + + if (i + 1) % 100 == 0: + print( + f"Epoch [{epoch+1}/{epochs}], " + f"Step [{i+1}/{len(dataloader)}], " + f"Loss: {loss.item():.4f}" + ) + + +# pylint: disable=too-many-locals +def test_fmnist(model, dataloader, device): + """Validate the network on the entire test set.""" + loss = 0 + model.eval() + criterion = nn.NLLLoss(reduction="sum") + y_true, y_pred = [], [] + with torch.no_grad(): + n_correct = 0 + n_samples = 0 + for images, labels in dataloader: + images = images.reshape(-1, 28 * 28).to(device) + labels = labels.to(device) + outputs = model(images) + # max returns (value ,index) + _, predicted = torch.max(outputs.data, 1) + n_samples += labels.size(0) + n_correct += (predicted == labels).sum().item() + loss += criterion(outputs, labels).item() + y_true.append(labels.cpu().numpy()) + y_pred.append(predicted.cpu().numpy()) + y_true = list(itertools.chain(*y_true)) + y_pred = list(itertools.chain(*y_pred)) + auc = roc_auc_multiclass(y_true, y_pred) + acc = n_correct / n_samples + return loss, acc, auc diff --git a/baselines/flanders/flanders/server.py b/baselines/flanders/flanders/server.py new file mode 100644 index 000000000000..622aa890a966 --- /dev/null +++ b/baselines/flanders/flanders/server.py @@ -0,0 +1,384 @@ +"""Server with enhanced functionality. + +It can be used to simulate an attacker that controls a fraction of the clients and to +save the parameters of each client in its memory. +""" + +import timeit +from logging import DEBUG, INFO +from typing import Any, Callable, Dict, List, Tuple, Union + +import numpy as np +from flwr.common import DisconnectRes, EvaluateRes, FitRes, parameters_to_ndarrays +from flwr.common.logger import log +from flwr.server.client_proxy import ClientProxy +from flwr.server.history import History +from flwr.server.server import Server, fit_clients + +from .strategy import Flanders +from .utils import flatten_params, save_params, update_confusion_matrix + +FitResultsAndFailures = Tuple[ + List[Tuple[ClientProxy, FitRes]], + List[Union[Tuple[ClientProxy, FitRes], BaseException]], +] +EvaluateResultsAndFailures = Tuple[ + List[Tuple[ClientProxy, EvaluateRes]], + List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], +] +ReconnectResultsAndFailures = Tuple[ + List[Tuple[ClientProxy, DisconnectRes]], + List[Union[Tuple[ClientProxy, DisconnectRes], BaseException]], +] + + +class EnhancedServer(Server): + """Server with enhanced functionality.""" + + # pylint: disable=too-many-arguments,too-many-instance-attributes + def __init__( + self, + num_malicious: int, + warmup_rounds: int, + attack_fn: Callable, + dataset_name: str, + *args: Any, + threshold: float = 0.0, + to_keep: int = 1, + magnitude: float = 0.0, + sampling: int = 0, + history_dir: str = "clients_params", + omniscent: bool = True, + **kwargs: Any, + ) -> None: + """Create a new EnhancedServer instance. + + Parameters + ---------- + num_malicious : int + Number of malicious clients + warmup_rounds : int + Number of warmup rounds + attack_fn : Callable + Attack function to be used + dataset_name : str + Name of the dataset + threshold : float, optional + Threshold used by the attacks, by default 0.0 + to_keep : int, optional + Number of clients to keep (i.e., to classify as "good"), by default 1 + magnitude : float, optional + Magnitude of the Gaussian attack, by default 0.0 + sampling : int, optional + Number of parameters to sample, by default 0 + history_dir : str, optional + Directory where to save the parameters, by default "clients_params" + omniscent : bool, optional + Whether to use the omniscent attack, by default True + """ + super().__init__(*args, **kwargs) + self.num_malicious = num_malicious + self.warmup_rounds = warmup_rounds + self.attack_fn = attack_fn + self.sampling = sampling + self.aggregated_parameters: List = [] + self.params_indexes: List = [] + self.history_dir = history_dir + self.dataset_name = dataset_name + self.magnitude = magnitude + self.threshold = threshold + self.to_keep = to_keep + self.omniscent = omniscent + self.malicious_lst: List = [] + self.confusion_matrix = {"TP": 0, "TN": 0, "FP": 0, "FN": 0} + self.clients_state: Dict[str, bool] = {} + self.good_clients_idx: List[int] = [] + self.malicious_clients_idx: List[int] = [] + + # pylint: disable=too-many-locals + def fit(self, num_rounds, timeout): + """Run federated averaging for a number of rounds.""" + history = History() + + # Initialize parameters + log(INFO, "Initializing global parameters") + self.parameters = self._get_initial_parameters(timeout=timeout) + log(INFO, "Evaluating initial parameters") + res = self.strategy.evaluate(0, parameters=self.parameters) + + if res is not None: + log( + INFO, + "initial parameters (loss, other metrics): %s, %s", + res[0], + res[1], + ) + res[1]["TP"] = 0 + res[1]["TN"] = 0 + res[1]["FP"] = 0 + res[1]["FN"] = 0 + history.add_loss_centralized(server_round=0, loss=res[0]) + history.add_metrics_centralized(server_round=0, metrics=res[1]) + + # Run federated learning for num_rounds + log(INFO, "FL starting") + start_time = timeit.default_timer() + + for current_round in range(1, num_rounds + 1): + # Train model and replace previous global model + res_fit = self.fit_round( + server_round=current_round, + timeout=timeout, + ) + if res_fit is not None: + parameters_prime, fit_metrics, _ = res_fit # fit_metrics_aggregated + if parameters_prime: + self.parameters = parameters_prime + history.add_metrics_distributed_fit( + server_round=current_round, metrics=fit_metrics + ) + + # Evaluate model using strategy implementation + res_cen = self.strategy.evaluate(current_round, parameters=self.parameters) + if res_cen is not None: + loss_cen, metrics_cen = res_cen + # Update confusion matrix + if current_round > self.warmup_rounds: + self.confusion_matrix = update_confusion_matrix( + self.confusion_matrix, + self.clients_state, + self.malicious_clients_idx, + self.good_clients_idx, + ) + + for key, val in self.confusion_matrix.items(): + metrics_cen[key] = val + + log( + INFO, + "fit progress: (%s, %s, %s, %s)", + current_round, + loss_cen, + metrics_cen, + timeit.default_timer() - start_time, + ) + history.add_loss_centralized(server_round=current_round, loss=loss_cen) + history.add_metrics_centralized( + server_round=current_round, metrics=metrics_cen + ) + + # Evaluate model on a sample of available clients + res_fed = self.evaluate_round(server_round=current_round, timeout=timeout) + if res_fed is not None: + loss_fed, evaluate_metrics_fed, _ = res_fed + if loss_fed is not None: + history.add_loss_distributed( + server_round=current_round, loss=loss_fed + ) + history.add_metrics_distributed( + server_round=current_round, metrics=evaluate_metrics_fed + ) + + # Bookkeeping + end_time = timeit.default_timer() + elapsed = end_time - start_time + log(INFO, "FL finished in %s", elapsed) + return history + + # pylint: disable-msg=R0915 + def fit_round( + self, + server_round, + timeout, + ): + # pylint: disable-msg=R0912 + """Perform a single round of federated learning.""" + # Get clients and their respective instructions from strategy + client_instructions = self.strategy.configure_fit( + server_round=server_round, + parameters=self.parameters, + client_manager=self._client_manager, + ) + + if not client_instructions: + log(INFO, "fit_round %s: no clients selected, cancel", server_round) + return None + log( + DEBUG, + "fit_round %s: strategy sampled %s clients (out of %s)", + server_round, + len(client_instructions), + self._client_manager.num_available(), + ) + + # Randomly decide which client is malicious + size = self.num_malicious + if server_round <= self.warmup_rounds: + size = 0 + log(INFO, "Selecting %s malicious clients", size) + self.malicious_lst = np.random.choice( + [proxy.cid for proxy, _ in client_instructions], size=size, replace=False + ) + + # Create dict clients_state to keep track of malicious clients + # and send the information to the clients + clients_state = {} + for _, (proxy, ins) in enumerate(client_instructions): + clients_state[proxy.cid] = False + ins.config["malicious"] = False + if proxy.cid in self.malicious_lst: + clients_state[proxy.cid] = True + ins.config["malicious"] = True + + # Sort clients states + clients_state = {k: clients_state[k] for k in sorted(clients_state)} + log( + DEBUG, + "fit_round %s: malicious clients selected %s, clients_state %s", + server_round, + self.malicious_lst, + clients_state, + ) + + # Collect `fit` results from all clients participating in this round + results, failures = fit_clients( + client_instructions=client_instructions, + max_workers=self.max_workers, + timeout=timeout, + ) + log( + DEBUG, + "fit_round %s received %s results and %s failures", + server_round, + len(results), + len(failures), + ) + + # Save parameters of each client as time series + ordered_results = [0 for _ in range(len(results))] + for proxy, fitres in results: + params = flatten_params(parameters_to_ndarrays(fitres.parameters)) + if self.sampling > 0: + # if the sampling number is greater than the number of + # parameters, just sample all of them + self.sampling = min(self.sampling, len(params)) + if len(self.params_indexes) == 0: + # Sample a random subset of parameters + self.params_indexes = np.random.randint( + 0, len(params), size=self.sampling + ) + + params = params[self.params_indexes] + + save_params(params, fitres.metrics["cid"], params_dir=self.history_dir) + + # Re-arrange results in the same order as clients' cids impose + ordered_results[int(fitres.metrics["cid"])] = (proxy, fitres) + + log(INFO, "Clients state: %s", clients_state) + + # Initialize aggregated_parameters if it is the first round + if self.aggregated_parameters == []: + for key, val in clients_state.items(): + if val is False: + self.aggregated_parameters = parameters_to_ndarrays( + ordered_results[int(key)][1].parameters + ) + break + + # Apply attack function + # the server simulates an attacker that controls a fraction of the clients + if self.attack_fn is not None and server_round > self.warmup_rounds: + log(INFO, "Applying attack function") + results, _ = self.attack_fn( + ordered_results, + clients_state, + omniscent=self.omniscent, + magnitude=self.magnitude, + w_re=self.aggregated_parameters, + threshold=self.threshold, + d=len(self.aggregated_parameters), + dataset_name=self.dataset_name, + to_keep=self.to_keep, + malicious_num=self.num_malicious, + num_layers=len(self.aggregated_parameters), + ) + + # Update saved parameters time series after the attack + for _, fitres in results: + if clients_state[fitres.metrics["cid"]]: + if self.sampling > 0: + params = flatten_params( + parameters_to_ndarrays(fitres.parameters) + )[self.params_indexes] + else: + params = flatten_params( + parameters_to_ndarrays(fitres.parameters) + ) + log( + INFO, + "Saving parameters of client %s with shape %s after the attack", + fitres.metrics["cid"], + params.shape, + ) + save_params( + params, + fitres.metrics["cid"], + params_dir=self.history_dir, + remove_last=True, + ) + else: + results = ordered_results + + # Aggregate training results + log(INFO, "fit_round - Aggregating training results") + good_clients_idx = [] + malicious_clients_idx = [] + aggregated_result = self.strategy.aggregate_fit(server_round, results, failures) + if isinstance(self.strategy, Flanders): + parameters_aggregated, metrics_aggregated = aggregated_result + malicious_clients_idx = metrics_aggregated["malicious_clients_idx"] + good_clients_idx = metrics_aggregated["good_clients_idx"] + + log(INFO, "Malicious clients: %s", malicious_clients_idx) + + log(INFO, "clients_state: %s", clients_state) + + # For clients detected as malicious, replace the last params in + # their history with tha current global model, otherwise the + # forecasting in next round won't be reliable (see the paper for + # more details) + if server_round > self.warmup_rounds: + log(INFO, "Saving parameters of clients") + for idx in malicious_clients_idx: + if self.sampling > 0: + new_params = flatten_params( + parameters_to_ndarrays(parameters_aggregated) + )[self.params_indexes] + else: + new_params = flatten_params( + parameters_to_ndarrays(parameters_aggregated) + ) + + log( + INFO, + "Saving parameters of client %s with shape %s", + idx, + new_params.shape, + ) + save_params( + new_params, + idx, + params_dir=self.history_dir, + remove_last=True, + rrl=False, + ) + else: + # Aggregate training results + log(INFO, "fit_round - Aggregating training results") + parameters_aggregated, metrics_aggregated = aggregated_result + + self.clients_state = clients_state + self.good_clients_idx = good_clients_idx + self.malicious_clients_idx = malicious_clients_idx + return parameters_aggregated, metrics_aggregated, (results, failures) diff --git a/baselines/flanders/flanders/strategy.py b/baselines/flanders/flanders/strategy.py new file mode 100644 index 000000000000..36dbc1182653 --- /dev/null +++ b/baselines/flanders/flanders/strategy.py @@ -0,0 +1,375 @@ +"""FLANDERS strategy.""" + +import importlib +import typing +from logging import INFO, WARNING +from typing import Callable, Dict, List, Optional, Tuple, Union + +import numpy as np +from flwr.common import ( + FitIns, + FitRes, + MetricsAggregationFn, + NDArrays, + Parameters, + Scalar, + ndarrays_to_parameters, + parameters_to_ndarrays, +) +from flwr.common.logger import log +from flwr.server.client_manager import ClientManager +from flwr.server.client_proxy import ClientProxy +from flwr.server.strategy.aggregate import aggregate +from flwr.server.strategy.fedavg import FedAvg + +from .utils import load_all_time_series + +WARNING_MIN_AVAILABLE_CLIENTS_TOO_LOW = """ +Setting `min_available_clients` lower than `min_fit_clients` or +`min_evaluate_clients` can cause the server to fail when there are too few clients +connected to the server. `min_available_clients` must be set to a value larger +than or equal to the values of `min_fit_clients` and `min_evaluate_clients`. +""" + + +class Flanders(FedAvg): + """Aggregation function based on MAR. + + Take a look at the paper for more details about the parameters. + """ + + # pylint: disable=too-many-arguments,too-many-instance-attributes, too-many-locals + def __init__( + self, + fraction_fit: float = 1.0, + fraction_evaluate: float = 1.0, + min_fit_clients: int = 2, + min_evaluate_clients: int = 2, + min_available_clients: int = 2, + evaluate_fn: Optional[ + Callable[ + [int, NDArrays, Dict[str, Scalar]], + Optional[Tuple[float, Dict[str, Scalar]]], + ] + ] = None, + on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, + on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, + accept_failures: bool = True, + initial_parameters: Optional[Parameters] = None, + fit_metrics_aggregation_fn: Optional[MetricsAggregationFn] = None, + evaluate_metrics_aggregation_fn: Optional[MetricsAggregationFn] = None, + num_clients_to_keep: int = 1, + aggregate_fn: Callable = aggregate, + aggregate_parameters: Optional[Dict[str, Scalar]] = None, + window: int = 0, + maxiter: int = 100, + alpha: float = 1, + beta: float = 1, + distance_function=None, + ) -> None: + """Initialize FLANDERS. + + Parameters + ---------- + fraction_fit : float, optional + Fraction of clients used during the fit phase, by default 1.0 + fraction_evaluate : float, optional + Fraction of clients used during the evaluate phase, by default 1.0 + min_fit_clients : int, optional + Minimum number of clients used during the fit phase, by default 2 + min_evaluate_clients : int, optional + Minimum number of clients used during the evaluate phase, by + default 2 + min_available_clients : int, optional + Minimum number of clients available for training and evaluation, by + default 2 + evaluate_fn : Optional[Callable[[int, NDArrays, Dict[str, Scalar]], + Optional[Tuple[float, Dict[str, Scalar]]]]], optional + Evaluation function, by default None + on_fit_config_fn : Optional[Callable[[int], Dict[str, Scalar]]], + optional + Function to generate the config fed to the clients during the fit + phase, by default None + on_evaluate_config_fn : Optional[Callable[[int], Dict[str, Scalar]]], + optional + Function to generate the config fed to the clients during the + evaluate phase, by default None + accept_failures : bool, optional + Whether to accept failures from clients, by default True + initial_parameters : Optional[Parameters], optional + Initial model parameters, by default None + fit_metrics_aggregation_fn : Optional[MetricsAggregationFn], optional + Function to aggregate metrics during the fit phase, by default None + evaluate_metrics_aggregation_fn : Optional[MetricsAggregationFn], + optional + Function to aggregate metrics during the evaluate phase, by default + None + num_clients_to_keep : int, optional + Number of clients to keep (i.e., to classify as "good"), by default + 1 + aggregate_fn : Callable[[List[Tuple[NDArrays, int]]], NDArrays], + optional + Function to aggregate the parameters, by default FedAvg + window : int, optional + Sliding window size used as a "training set" of MAR, by default 0 + maxiter : int, optional + Maximum number of iterations of MAR, by default 100 + alpha : float, optional + Alpha parameter (regularization), by default 1 + beta : float, optional + Beta parameter (regularization), by default 1 + distance_function : Callable, optional + Distance function used to compute the distance between predicted + params and real ones, by default None + """ + super().__init__( + fraction_fit=fraction_fit, + fraction_evaluate=fraction_evaluate, + min_fit_clients=min_fit_clients, + min_evaluate_clients=min_evaluate_clients, + min_available_clients=min_available_clients, + evaluate_fn=evaluate_fn, + on_fit_config_fn=on_fit_config_fn, + on_evaluate_config_fn=on_evaluate_config_fn, + accept_failures=accept_failures, + initial_parameters=initial_parameters, + fit_metrics_aggregation_fn=fit_metrics_aggregation_fn, + evaluate_metrics_aggregation_fn=evaluate_metrics_aggregation_fn, + ) + self.num_clients_to_keep = num_clients_to_keep + self.window = window + self.maxiter = maxiter + self.alpha = alpha + self.beta = beta + self.params_indexes = None + self.distance_function = distance_function + self.aggregate_fn = aggregate_fn + self.aggregate_parameters = aggregate_parameters + if self.aggregate_parameters is None: + self.aggregate_parameters = {} + + @typing.no_type_check + def configure_fit( + self, server_round: int, parameters: Parameters, client_manager: ClientManager + ) -> List[Tuple[ClientProxy, FitIns]]: + """Configure the next round of training.""" + # Sample clients + sample_size, min_num_clients = self.num_fit_clients( + client_manager.num_available() + ) + + # Custom FitIns object for each client + fit_ins_list = [ + FitIns( + parameters, + ( + {} + if not self.on_fit_config_fn + else self.on_fit_config_fn(server_round) + ), + ) + for _ in range(sample_size) + ] + + clients = client_manager.sample( + num_clients=sample_size, min_num_clients=min_num_clients + ) + + # Return client/config pairs + result = [] + for client, fit in zip(clients, fit_ins_list): + result.append((client, fit)) + return result + + # pylint: disable=too-many-locals,too-many-statements + @typing.no_type_check + def aggregate_fit( + self, + server_round: int, + results: List[Tuple[ClientProxy, FitRes]], + failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], + ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + """Apply MAR forecasting to exclude malicious clients from FedAvg. + + Parameters + ---------- + server_round : int + Current server round. + results : List[Tuple[ClientProxy, FitRes]] + List of results from the clients. + failures : List[Union[Tuple[ClientProxy, FitRes], BaseException]] + List of failures from the clients. + + Returns + ------- + parameters_aggregated: Optional[Parameters] + Aggregated parameters. + metrics_aggregated: Dict[str, Scalar] + Aggregated metrics. + malicious_clients_idx: List[int] + List of malicious clients' cids (indexes). + """ + good_clients_idx = [] + malicious_clients_idx = [] + if server_round > 1: + if server_round < self.window: + self.window = server_round + params_tensor = load_all_time_series( + params_dir="clients_params", window=self.window + ) + params_tensor = np.transpose( + params_tensor, (0, 2, 1) + ) # (clients, params, time) + ground_truth = params_tensor[:, :, -1].copy() + pred_step = 1 + log(INFO, "Computing MAR on params_tensor %s", params_tensor.shape) + predicted_matrix = mar( + params_tensor[:, :, :-1], + pred_step, + maxiter=self.maxiter, + alpha=self.alpha, + beta=self.beta, + ) + + log(INFO, "Computing anomaly scores") + anomaly_scores = self.distance_function( + ground_truth, predicted_matrix[:, :, 0] + ) + log(INFO, "Anomaly scores: %s", anomaly_scores) + + log(INFO, "Selecting good clients") + good_clients_idx = sorted( + np.argsort(anomaly_scores)[: self.num_clients_to_keep] + ) # noqa + malicious_clients_idx = sorted( + np.argsort(anomaly_scores)[self.num_clients_to_keep :] + ) # noqa + + avg_anomaly_score_gc = np.mean(anomaly_scores[good_clients_idx]) + log( + INFO, "Average anomaly score for good clients: %s", avg_anomaly_score_gc + ) + + avg_anomaly_score_m = np.mean(anomaly_scores[malicious_clients_idx]) + log( + INFO, + "Average anomaly score for malicious clients: %s", + avg_anomaly_score_m, + ) + + results = np.array(results)[good_clients_idx].tolist() + log(INFO, "Good clients: %s", good_clients_idx) + + log(INFO, "Applying aggregate_fn") + # Convert results + weights_results = [ + (parameters_to_ndarrays(fit_res.parameters), fit_res.num_examples) + for _, fit_res in results + ] + + # Check that self.aggregate_fn has num_malicious parameter + if "num_malicious" in self.aggregate_fn.__code__.co_varnames: + # Count the number of malicious clients in + # good_clients_idx by checking FitRes + clients_state = { + str(fit_res.metrics["cid"]): fit_res.metrics["malicious"] + for _, fit_res in results + } + num_malicious = sum([clients_state[str(cid)] for cid in good_clients_idx]) + log( + INFO, + "Number of malicious clients in good_clients_idx after filtering: %s", + num_malicious, + ) + self.aggregate_parameters["num_malicious"] = num_malicious + + if "aggregation_rule" in self.aggregate_fn.__code__.co_varnames: + module = importlib.import_module( + self.aggregate_parameters["aggregation_module_name"] + ) + function_name = self.aggregate_parameters["aggregation_name"] + self.aggregate_parameters["aggregation_rule"] = getattr( + module, function_name + ) + # Remove aggregation_module_name and aggregation_name + # from self.aggregate_parameters + aggregate_parameters = self.aggregate_parameters.copy() + del aggregate_parameters["aggregation_module_name"] + del aggregate_parameters["aggregation_name"] + try: + parameters_aggregated = ndarrays_to_parameters( + self.aggregate_fn(weights_results, **aggregate_parameters) + ) + except ValueError as err: + log(WARNING, "Error in aggregate_fn: %s", err) + parameters_aggregated = ndarrays_to_parameters( + aggregate(weights_results) + ) + else: + parameters_aggregated = ndarrays_to_parameters( + self.aggregate_fn(weights_results, **self.aggregate_parameters) + ) + + # Aggregate custom metrics if aggregation fn was provided + metrics_aggregated = {} + if self.fit_metrics_aggregation_fn: + fit_metrics = [(res.num_examples, res.metrics) for _, res in results] + metrics_aggregated = self.fit_metrics_aggregation_fn(fit_metrics) + elif server_round == 1: # Only log this warning once + log(WARNING, "No fit_metrics_aggregation_fn provided") + + # Add good_clients_idx and malicious_clients_idx to metrics_aggregated + metrics_aggregated["good_clients_idx"] = good_clients_idx + metrics_aggregated["malicious_clients_idx"] = malicious_clients_idx + + return parameters_aggregated, metrics_aggregated + + +# pylint: disable=too-many-locals, too-many-arguments, invalid-name +def mar(X, pred_step, alpha=1, beta=1, maxiter=100): + """Forecast the next tensor of params. + + Forecast the next tensor of params by using MAR algorithm. + + Code provided by Xinyu Chen at: + https://towardsdatascience.com/ matrix-autoregressive-model-for-multidimensional- + time-series-forecasting-6a4d7dce5143 + + With some modifications. + """ + m, n, T = X.shape + start = 0 + + A = np.random.randn(m, m) + B = np.random.randn(n, n) + X_norm = (X - np.min(X)) / np.max(X) + + for _ in range(maxiter): + temp0 = B.T @ B + temp1 = np.zeros((m, m)) + temp2 = np.zeros((m, m)) + identity_m = np.identity(m) + + for t in range(start, T): + temp1 += X_norm[:, :, t] @ B @ X_norm[:, :, t - 1].T + temp2 += X_norm[:, :, t - 1] @ temp0 @ X_norm[:, :, t - 1].T + + temp2 += alpha * identity_m + A = temp1 @ np.linalg.inv(temp2) + + temp0 = A.T @ A + temp1 = np.zeros((n, n)) + temp2 = np.zeros((n, n)) + identity_n = np.identity(n) + + for t in range(start, T): + temp1 += X_norm[:, :, t].T @ A @ X_norm[:, :, t - 1] + temp2 += X_norm[:, :, t - 1].T @ temp0 @ X_norm[:, :, t - 1] + + temp2 += beta * identity_n + B = temp1 @ np.linalg.inv(temp2) + + tensor = np.append(X, np.zeros((m, n, pred_step)), axis=2) + for s in range(pred_step): + tensor[:, :, T + s] = A @ tensor[:, :, T + s - 1] @ B.T + return tensor[:, :, -pred_step:] diff --git a/baselines/flanders/flanders/utils.py b/baselines/flanders/flanders/utils.py new file mode 100644 index 000000000000..619e685e51cd --- /dev/null +++ b/baselines/flanders/flanders/utils.py @@ -0,0 +1,182 @@ +"""Collection of help functions needed by the strategies.""" + +import os +from threading import Lock +from typing import Callable, Dict, List, Optional, Tuple + +import numpy as np +import torch +from flwr.common import NDArrays, Parameters, Scalar, parameters_to_ndarrays +from natsort import natsorted +from torch.utils.data import DataLoader +from torchvision import transforms +from torchvision.datasets import MNIST, FashionMNIST + +from .client import set_params +from .models import FMnistNet, MnistNet, test_fmnist, test_mnist + +lock = Lock() + + +def l2_norm(true_matrix, predicted_matrix): + """Compute the l2 norm between two matrices. + + Parameters + ---------- + true_matrix : ndarray + The true matrix. + predicted_matrix : ndarray + The predicted matrix by MAR. + + Returns + ------- + anomaly_scores : ndarray + 1-d array of anomaly scores. + """ + delta = np.subtract(true_matrix, predicted_matrix) + anomaly_scores = np.sum(delta**2, axis=-1) ** (1.0 / 2) + return anomaly_scores + + +def save_params( + parameters, cid, params_dir="clients_params", remove_last=False, rrl=False +): + """Save parameters in a file. + + Args: + - parameters (ndarray): decoded parameters to append at the end of the file + - cid (int): identifier of the client + - remove_last (bool): + if True, remove the last saved parameters and replace with "parameters" + - rrl (bool): + if True, remove the last saved parameters and replace with the ones + saved before this round. + """ + new_params = parameters + # Save parameters in clients_params/cid_params + path_file = f"{params_dir}/{cid}_params.npy" + if os.path.exists(params_dir) is False: + os.mkdir(params_dir) + if os.path.exists(path_file): + # load old parameters + old_params = np.load(path_file, allow_pickle=True) + if remove_last: + old_params = old_params[:-1] + if rrl: + new_params = old_params[-1] + # add new parameters + new_params = np.vstack((old_params, new_params)) + + # save parameters + np.save(path_file, new_params) + + +def load_all_time_series(params_dir="clients_params", window=0): + """Load all time series. + + Load all time series in order to have a tensor of shape (m,T,n) + where: + - T := time; + - m := number of clients; + - n := number of parameters. + """ + files = os.listdir(params_dir) + files = natsorted(files) + data = [] + for file in files: + data.append(np.load(os.path.join(params_dir, file), allow_pickle=True)) + + return np.array(data)[:, -window:, :] + + +def flatten_params(params): + """Transform a list of (layers-)parameters into a single vector of shape (n).""" + return np.concatenate(params, axis=None).ravel() + + +# pylint: disable=unused-argument +def evaluate_aggregated( + evaluate_fn: Optional[ + Callable[[int, NDArrays, Dict[str, Scalar]], Tuple[float, Dict[str, Scalar]]] + ], + server_round: int, + parameters: Parameters, +): + """Evaluate model parameters using an evaluation function.""" + if evaluate_fn is None: + # No evaluation function provided + return None + parameters_ndarrays = parameters_to_ndarrays(parameters) + eval_res = evaluate_fn(server_round, parameters_ndarrays, {}) + if eval_res is None: + return None + loss, metrics = eval_res + + return loss, metrics + + +# pylint: disable=unused-argument +def mnist_evaluate(server_round: int, parameters: NDArrays, config: Dict[str, Scalar]): + """Evaluate MNIST model on the test set.""" + # determine device + if torch.cuda.is_available(): + device = torch.device("cuda") + elif torch.backends.mps.is_available(): + device = torch.device("mps") + else: + device = torch.device("cpu") + + model = MnistNet() + set_params(model, parameters) + model.to(device) + + testset = MNIST("", train=False, download=True, transform=transforms.ToTensor()) + testloader = DataLoader(testset, batch_size=32, shuffle=False, num_workers=1) + loss, accuracy, auc = test_mnist(model, testloader, device=device) + + return loss, {"accuracy": accuracy, "auc": auc} + + +# pylint: disable=unused-argument +def fmnist_evaluate(server_round: int, parameters: NDArrays, config: Dict[str, Scalar]): + """Evaluate MNIST model on the test set.""" + # determine device + if torch.cuda.is_available(): + device = torch.device("cuda") + elif torch.backends.mps.is_available(): + device = torch.device("mps") + else: + device = torch.device("cpu") + + model = FMnistNet() + set_params(model, parameters) + model.to(device) + + testset = FashionMNIST( + "", train=False, download=True, transform=transforms.ToTensor() + ) + testloader = DataLoader(testset, batch_size=32, shuffle=False, num_workers=1) + loss, accuracy, auc = test_fmnist(model, testloader, device=device) + + return loss, {"accuracy": accuracy, "auc": auc} + + +def update_confusion_matrix( + confusion_matrix: Dict[str, int], + clients_states: Dict[str, bool], + malicious_clients_idx: List, + good_clients_idx: List, +): + """Update TN, FP, FN, TP of confusion matrix.""" + for client_idx, client_state in clients_states.items(): + if int(client_idx) in malicious_clients_idx: + if client_state: + confusion_matrix["TP"] += 1 + else: + confusion_matrix["FP"] += 1 + elif int(client_idx) in good_clients_idx: + if client_state: + confusion_matrix["FN"] += 1 + else: + confusion_matrix["TN"] += 1 + return confusion_matrix diff --git a/baselines/flanders/plotting/FLANDERS_results.ipynb b/baselines/flanders/plotting/FLANDERS_results.ipynb new file mode 100644 index 000000000000..4f3fdcc9b0d8 --- /dev/null +++ b/baselines/flanders/plotting/FLANDERS_results.ipynb @@ -0,0 +1 @@ +{"cells":[{"cell_type":"markdown","metadata":{"id":"Cg37xeuu7Xy5"},"source":["# Preliminaries"]},{"cell_type":"code","execution_count":92,"metadata":{"id":"J_Dh3sGVyb2w"},"outputs":[],"source":["import pandas as pd\n","from natsort import natsorted\n","import matplotlib.pyplot as plt"]},{"cell_type":"code","execution_count":93,"metadata":{"id":"FjlCyr_B8OdT"},"outputs":[],"source":["results_dir = \"../outputs/\""]},{"cell_type":"markdown","metadata":{"id":"VX2oCpZf7Z7y"},"source":["# Prepare data"]},{"cell_type":"markdown","metadata":{"id":"P_3Z05w0wvNB"},"source":["## Utils"]},{"cell_type":"code","execution_count":94,"metadata":{},"outputs":[],"source":["def divide_results_by_dataset(results_dir, file=\"all_results.csv\"):\n"," \"\"\"Divide csv results into multiple files distinguished by dataset and if strategy is FLANDERS or not (e.g., all_results_mnist_flanders and all_results_mnist_no_flanders).\"\"\"\n"," results = pd.read_csv(results_dir + file, float_precision='round_trip')\n"," datasets = natsorted(results[\"dataset_name\"].unique())\n"," for dataset in datasets:\n"," flanders = results[(results[\"dataset_name\"] == dataset) & (results[\"strategy\"] == \"flanders\")]\n"," no_flanders = results[(results[\"dataset_name\"] == dataset) & (results[\"strategy\"] != \"flanders\")]\n"," flanders.to_csv(results_dir + \"all_results_\" + dataset + \"_flanders.csv\", index=False)\n"," no_flanders.to_csv(results_dir + \"all_results_\" + dataset + \"_no_flanders.csv\", index=False)\n"," "]},{"cell_type":"code","execution_count":95,"metadata":{"id":"fZSDCuT497HV"},"outputs":[],"source":["def print_unique_data(results_df):\n"," for col in [\"attack_fn\", \"num_malicious\", \"dataset_name\", \"strategy\", \"aggregate_fn\"]:\n"," print(f\"Unique values in {col}: {results_df[col].unique()}\")"]},{"cell_type":"code","execution_count":96,"metadata":{"id":"8GcIZNuu8q5Y"},"outputs":[],"source":["def translate_cols(df, attack_dict, dataset_dict, strategy_dict, aggregate_dict):\n"," column_names = [\"attack_fn\", \"dataset_name\", \"strategy\", \"aggregate_fn\"]\n"," for idx, d in enumerate([attack_dict, dataset_dict, strategy_dict, aggregate_dict]):\n"," df[column_names[idx]] = df[column_names[idx]].replace(d)\n"," return df"]},{"cell_type":"code","execution_count":97,"metadata":{"id":"oHcF2pl8sdOG"},"outputs":[],"source":["attack_dict = {\n"," \"gaussian\": \"GAUSS\",\n"," \"lie\": \"LIE\",\n"," \"fang\": \"OPT\",\n"," \"minmax\": \"AGR-MM\",\n"," \"adaptive\": \"MAR-ATK\"\n","}\n","\n","dataset_dict = {\n"," \"mnist\": \"MNIST\",\n"," \"fmnist\": \"FMNIST\",\n"," \"cifar\": \"CIFAR-10\",\n"," \"cifar100\": \"CIFAR-100\"\n","}\n","\n","strategy_dict = {\n"," \"flanders\": \"FLANDERS\",\n"," \"fedavg\": \"FedAvg\",\n"," \"fedmedian\": \"FedMedian\",\n"," \"trimmedmean\": \"TrimmedMean\",\n"," \"bulyan\": \"Bulyan\",\n"," \"krum\": \"MultiKrum\",\n"," \"fldetector\": \"FLDetector\"\n","}\n","\n","aggregate_dict = {\n"," \"flwr.server.strategy.aggregate.aggregate\": \"FedAvg\",\n"," \"flwr.server.strategy.aggregate.aggregate_median\": \"FedMedian\",\n"," \"flwr.server.strategy.aggregate.aggregate_trimmed_avg\": \"TrimmedMean\",\n"," \"flwr.server.strategy.aggregate.aggregate_bulyan\": \"Bulyan\",\n"," \"flwr.server.strategy.aggregate.aggregate_krum\": \"MultiKrum\"\n","}"]},{"cell_type":"code","execution_count":98,"metadata":{},"outputs":[],"source":["divide_results_by_dataset(results_dir)"]},{"cell_type":"markdown","metadata":{"id":"y0XCCkuhwydB"},"source":["## MNIST"]},{"cell_type":"markdown","metadata":{"id":"NG2-2cpnyjkY"},"source":["### Use this shortcut"]},{"cell_type":"code","execution_count":99,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":443},"executionInfo":{"elapsed":244,"status":"ok","timestamp":1716376729975,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-120},"id":"lY85nEb6yrXu","outputId":"5439a3bc-684f-492f-b615-53e2252cd94c"},"outputs":[{"data":{"text/html":["
\n","\n","\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
roundlossaccuracyaucTPTNFPFNattack_fndataset_namenum_maliciousstrategyaggregate_fn
00720.8331210.10450.5034220000GAUSSMNIST0FLANDERSFedAvg
11664.5222520.20890.5631160000GAUSSMNIST0FLANDERSFedAvg
22624.6338260.35600.6447310000GAUSSMNIST0FLANDERSFedAvg
33581.4764720.47730.710941010000GAUSSMNIST0FLANDERSFedAvg
44545.1142050.54300.746970020000GAUSSMNIST0FLANDERSFedAvg
..........................................
356546724.2817250.10250.5004790000AGR-MMMNIST80dncFedAvg
356647724.3501380.10240.5004210000AGR-MMMNIST80dncFedAvg
356748724.5352630.10250.5004790000AGR-MMMNIST80dncFedAvg
356849724.5888810.10280.5005980000AGR-MMMNIST80dncFedAvg
356950724.7838510.10280.5006020000AGR-MMMNIST80dncFedAvg
\n","

7548 rows × 13 columns

\n","
"],"text/plain":[" round loss accuracy auc TP TN FP FN attack_fn \\\n","0 0 720.833121 0.1045 0.503422 0 0 0 0 GAUSS \n","1 1 664.522252 0.2089 0.563116 0 0 0 0 GAUSS \n","2 2 624.633826 0.3560 0.644731 0 0 0 0 GAUSS \n","3 3 581.476472 0.4773 0.710941 0 100 0 0 GAUSS \n","4 4 545.114205 0.5430 0.746970 0 200 0 0 GAUSS \n","... ... ... ... ... .. ... .. .. ... \n","3565 46 724.281725 0.1025 0.500479 0 0 0 0 AGR-MM \n","3566 47 724.350138 0.1024 0.500421 0 0 0 0 AGR-MM \n","3567 48 724.535263 0.1025 0.500479 0 0 0 0 AGR-MM \n","3568 49 724.588881 0.1028 0.500598 0 0 0 0 AGR-MM \n","3569 50 724.783851 0.1028 0.500602 0 0 0 0 AGR-MM \n","\n"," dataset_name num_malicious strategy aggregate_fn \n","0 MNIST 0 FLANDERS FedAvg \n","1 MNIST 0 FLANDERS FedAvg \n","2 MNIST 0 FLANDERS FedAvg \n","3 MNIST 0 FLANDERS FedAvg \n","4 MNIST 0 FLANDERS FedAvg \n","... ... ... ... ... \n","3565 MNIST 80 dnc FedAvg \n","3566 MNIST 80 dnc FedAvg \n","3567 MNIST 80 dnc FedAvg \n","3568 MNIST 80 dnc FedAvg \n","3569 MNIST 80 dnc FedAvg \n","\n","[7548 rows x 13 columns]"]},"execution_count":99,"metadata":{},"output_type":"execute_result"}],"source":["# CSV pre-processing MNIST\n","results_flanders_file = results_dir + \"all_results_mnist_flanders.csv\"\n","results_no_flanders_file = results_dir + \"all_results_mnist_no_flanders.csv\"\n","results_flanders_df = pd.read_csv(results_flanders_file)\n","results_no_flanders_df = pd.read_csv(results_no_flanders_file)\n","results_flanders_df = translate_cols(results_flanders_df, attack_dict ,dataset_dict, strategy_dict, aggregate_dict)\n","results_no_flanders_df = translate_cols(results_no_flanders_df, attack_dict ,dataset_dict, strategy_dict, aggregate_dict)\n","mnist_df = pd.concat([results_flanders_df, results_no_flanders_df])\n","mnist_df"]},{"cell_type":"code","execution_count":100,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":9,"status":"ok","timestamp":1716115669854,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-120},"id":"hg3ysnqiNrms","outputId":"bae8ab71-ce7b-409a-b154-6ad42f9dfc3b"},"outputs":[{"name":"stdout","output_type":"stream","text":["Unique values in attack_fn: ['GAUSS' 'LIE' 'OPT' 'AGR-MM']\n","Unique values in num_malicious: [ 0 20 60 80]\n","Unique values in dataset_name: ['MNIST']\n","Unique values in strategy: ['FLANDERS' 'FedAvg' 'TrimmedMean' 'FedMedian' 'MultiKrum' 'Bulyan' 'dnc']\n","Unique values in aggregate_fn: ['FedAvg' 'TrimmedMean' 'FedMedian' 'MultiKrum' 'Bulyan'\n"," 'flanders.strategies.aggregate.aggregate_dnc']\n"]}],"source":["print_unique_data(mnist_df)"]},{"cell_type":"markdown","metadata":{"id":"dE_uqUeuyl6M"},"source":["### Step-by-step processing"]},{"cell_type":"code","execution_count":101,"metadata":{"id":"R9Cpe8bF8a2z"},"outputs":[],"source":["results_flanders_file = results_dir + \"all_results_mnist_flanders.csv\"\n","results_no_flanders_file = results_dir + \"all_results_mnist_no_flanders.csv\""]},{"cell_type":"code","execution_count":102,"metadata":{"id":"8nPsIraZ7nJK"},"outputs":[],"source":["results_flanders_df = pd.read_csv(results_flanders_file)\n","results_no_flanders_df = pd.read_csv(results_no_flanders_file)"]},{"cell_type":"code","execution_count":103,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":3,"status":"ok","timestamp":1707513800371,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-60},"id":"oC_C6WVMshle","outputId":"20708ffb-24d9-4d94-fc83-6ab93c8d4ed0"},"outputs":[{"name":"stdout","output_type":"stream","text":["Unique values in attack_fn: ['gaussian' 'lie' 'fang' 'minmax']\n","Unique values in num_malicious: [ 0 20 60 80]\n","Unique values in dataset_name: ['mnist']\n","Unique values in strategy: ['flanders']\n","Unique values in aggregate_fn: ['flwr.server.strategy.aggregate.aggregate'\n"," 'flwr.server.strategy.aggregate.aggregate_trimmed_avg'\n"," 'flwr.server.strategy.aggregate.aggregate_median'\n"," 'flwr.server.strategy.aggregate.aggregate_krum'\n"," 'flwr.server.strategy.aggregate.aggregate_bulyan'\n"," 'flanders.strategies.aggregate.aggregate_dnc']\n"]}],"source":["print_unique_data(results_flanders_df)"]},{"cell_type":"code","execution_count":104,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":423,"status":"ok","timestamp":1707478795736,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-60},"id":"2xSStl9-52cc","outputId":"391a3d7c-c4b5-486c-85a2-97d9a5ddb30d"},"outputs":[{"name":"stdout","output_type":"stream","text":["Unique values in attack_fn: ['gaussian' 'lie' 'fang' 'minmax']\n","Unique values in num_malicious: [ 0 20 60 80]\n","Unique values in dataset_name: ['mnist']\n","Unique values in strategy: ['fedavg' 'trimmedmean' 'fedmedian' 'krum' 'bulyan' 'dnc']\n","Unique values in aggregate_fn: ['flwr.server.strategy.aggregate.aggregate']\n"]}],"source":["print_unique_data(results_no_flanders_df)"]},{"cell_type":"markdown","metadata":{"id":"8dEepZY28raZ"},"source":["Translate strings"]},{"cell_type":"code","execution_count":105,"metadata":{"id":"zNPGc6YJ7E_J"},"outputs":[],"source":["results_flanders_df = translate_cols(results_flanders_df, attack_dict ,dataset_dict, strategy_dict, aggregate_dict)"]},{"cell_type":"code","execution_count":106,"metadata":{"id":"AQaNnF1K7TQc"},"outputs":[],"source":["results_no_flanders_df = translate_cols(results_no_flanders_df, attack_dict ,dataset_dict, strategy_dict, aggregate_dict)"]},{"cell_type":"code","execution_count":107,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":393,"status":"ok","timestamp":1707478246670,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-60},"id":"MXYnAzh8V-9t","outputId":"8f09ab5c-cd3e-4627-9f76-5f474ec09227"},"outputs":[{"name":"stdout","output_type":"stream","text":["Unique values in attack_fn: ['GAUSS' 'LIE' 'OPT' 'AGR-MM']\n","Unique values in num_malicious: [ 0 20 60 80]\n","Unique values in dataset_name: ['MNIST']\n","Unique values in strategy: ['FLANDERS']\n","Unique values in aggregate_fn: ['FedAvg' 'TrimmedMean' 'FedMedian' 'MultiKrum' 'Bulyan'\n"," 'flanders.strategies.aggregate.aggregate_dnc']\n"]}],"source":["print_unique_data(results_flanders_df)"]},{"cell_type":"code","execution_count":108,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":8,"status":"ok","timestamp":1707472989224,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-60},"id":"7xmPF5X77VQk","outputId":"f8e8331e-cde6-4413-f795-f0fe1a9cdd19"},"outputs":[{"name":"stdout","output_type":"stream","text":["Unique values in attack_fn: ['GAUSS' 'LIE' 'OPT' 'AGR-MM']\n","Unique values in num_malicious: [ 0 20 60 80]\n","Unique values in dataset_name: ['MNIST']\n","Unique values in strategy: ['FedAvg' 'TrimmedMean' 'FedMedian' 'MultiKrum' 'Bulyan' 'dnc']\n","Unique values in aggregate_fn: ['FedAvg']\n"]}],"source":["print_unique_data(results_no_flanders_df)"]},{"cell_type":"markdown","metadata":{"id":"mjTmoV2M9YTk"},"source":["Concatenate the 2 dataframes, namely FLANDERS+f and baselines:"]},{"cell_type":"code","execution_count":109,"metadata":{"id":"apvpT9Ve8wwv"},"outputs":[],"source":["mnist_df = pd.concat([results_flanders_df, results_no_flanders_df])"]},{"cell_type":"code","execution_count":110,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":424},"executionInfo":{"elapsed":5,"status":"ok","timestamp":1707513807441,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-60},"id":"aZuW73BW9Iu3","outputId":"3f558906-0d55-4ccd-e64f-5b62203ae746"},"outputs":[{"data":{"text/html":["
\n","\n","\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
roundlossaccuracyaucTPTNFPFNattack_fndataset_namenum_maliciousstrategyaggregate_fn
00720.8331210.10450.5034220000GAUSSMNIST0FLANDERSFedAvg
11664.5222520.20890.5631160000GAUSSMNIST0FLANDERSFedAvg
22624.6338260.35600.6447310000GAUSSMNIST0FLANDERSFedAvg
33581.4764720.47730.710941010000GAUSSMNIST0FLANDERSFedAvg
44545.1142050.54300.746970020000GAUSSMNIST0FLANDERSFedAvg
..........................................
356546724.2817250.10250.5004790000AGR-MMMNIST80dncFedAvg
356647724.3501380.10240.5004210000AGR-MMMNIST80dncFedAvg
356748724.5352630.10250.5004790000AGR-MMMNIST80dncFedAvg
356849724.5888810.10280.5005980000AGR-MMMNIST80dncFedAvg
356950724.7838510.10280.5006020000AGR-MMMNIST80dncFedAvg
\n","

7548 rows × 13 columns

\n","
"],"text/plain":[" round loss accuracy auc TP TN FP FN attack_fn \\\n","0 0 720.833121 0.1045 0.503422 0 0 0 0 GAUSS \n","1 1 664.522252 0.2089 0.563116 0 0 0 0 GAUSS \n","2 2 624.633826 0.3560 0.644731 0 0 0 0 GAUSS \n","3 3 581.476472 0.4773 0.710941 0 100 0 0 GAUSS \n","4 4 545.114205 0.5430 0.746970 0 200 0 0 GAUSS \n","... ... ... ... ... .. ... .. .. ... \n","3565 46 724.281725 0.1025 0.500479 0 0 0 0 AGR-MM \n","3566 47 724.350138 0.1024 0.500421 0 0 0 0 AGR-MM \n","3567 48 724.535263 0.1025 0.500479 0 0 0 0 AGR-MM \n","3568 49 724.588881 0.1028 0.500598 0 0 0 0 AGR-MM \n","3569 50 724.783851 0.1028 0.500602 0 0 0 0 AGR-MM \n","\n"," dataset_name num_malicious strategy aggregate_fn \n","0 MNIST 0 FLANDERS FedAvg \n","1 MNIST 0 FLANDERS FedAvg \n","2 MNIST 0 FLANDERS FedAvg \n","3 MNIST 0 FLANDERS FedAvg \n","4 MNIST 0 FLANDERS FedAvg \n","... ... ... ... ... \n","3565 MNIST 80 dnc FedAvg \n","3566 MNIST 80 dnc FedAvg \n","3567 MNIST 80 dnc FedAvg \n","3568 MNIST 80 dnc FedAvg \n","3569 MNIST 80 dnc FedAvg \n","\n","[7548 rows x 13 columns]"]},"execution_count":110,"metadata":{},"output_type":"execute_result"}],"source":["mnist_df"]},{"cell_type":"code","execution_count":111,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":3,"status":"ok","timestamp":1707480685917,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-60},"id":"Ub0W-iA69LpR","outputId":"bfbcaf9e-575c-4d02-e9f0-0be7e74accb4"},"outputs":[{"name":"stdout","output_type":"stream","text":["Unique values in attack_fn: ['GAUSS' 'LIE' 'OPT' 'AGR-MM']\n","Unique values in num_malicious: [ 0 20 60 80]\n","Unique values in dataset_name: ['MNIST']\n","Unique values in strategy: ['FLANDERS' 'FedAvg' 'TrimmedMean' 'FedMedian' 'MultiKrum' 'Bulyan' 'dnc']\n","Unique values in aggregate_fn: ['FedAvg' 'TrimmedMean' 'FedMedian' 'MultiKrum' 'Bulyan'\n"," 'flanders.strategies.aggregate.aggregate_dnc']\n"]}],"source":["print_unique_data(mnist_df)"]},{"cell_type":"markdown","metadata":{"id":"E3TZ_fJuTVuU"},"source":["## Fashion MNIST"]},{"cell_type":"markdown","metadata":{"id":"45GAIKG9Tmyb"},"source":["### Use this shortcut"]},{"cell_type":"code","execution_count":112,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":443},"executionInfo":{"elapsed":327,"status":"ok","timestamp":1716376732776,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-120},"id":"Qju5S7VmTpB_","outputId":"fbe0aed8-164c-4343-9949-e9fa7cc7f0a7"},"outputs":[{"data":{"text/html":["
\n","\n","\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
roundlossaccuracyaucTPTNFPFNattack_fndataset_namenum_maliciousstrategyaggregate_fn
0023082.3338130.06310.4795000000GAUSSFMNIST0FLANDERSFedAvg
1121920.1315610.19770.5542780000GAUSSFMNIST0FLANDERSFedAvg
2217859.0960200.42100.6783330000GAUSSFMNIST0FLANDERSFedAvg
3315559.0449260.49200.717778010000GAUSSFMNIST0FLANDERSFedAvg
4414684.1937220.50010.722278020000GAUSSFMNIST0FLANDERSFedAvg
..........................................
35654623279.5649070.10000.5000000000AGR-MMFMNIST80dncFedAvg
35664723290.9804420.10000.5000000000AGR-MMFMNIST80dncFedAvg
35674823302.2510220.10000.5000000000AGR-MMFMNIST80dncFedAvg
35684923312.5125960.10000.5000000000AGR-MMFMNIST80dncFedAvg
35695023326.1161770.10000.5000000000AGR-MMFMNIST80dncFedAvg
\n","

6884 rows × 13 columns

\n","
"],"text/plain":[" round loss accuracy auc TP TN FP FN attack_fn \\\n","0 0 23082.333813 0.0631 0.479500 0 0 0 0 GAUSS \n","1 1 21920.131561 0.1977 0.554278 0 0 0 0 GAUSS \n","2 2 17859.096020 0.4210 0.678333 0 0 0 0 GAUSS \n","3 3 15559.044926 0.4920 0.717778 0 100 0 0 GAUSS \n","4 4 14684.193722 0.5001 0.722278 0 200 0 0 GAUSS \n","... ... ... ... ... .. ... .. .. ... \n","3565 46 23279.564907 0.1000 0.500000 0 0 0 0 AGR-MM \n","3566 47 23290.980442 0.1000 0.500000 0 0 0 0 AGR-MM \n","3567 48 23302.251022 0.1000 0.500000 0 0 0 0 AGR-MM \n","3568 49 23312.512596 0.1000 0.500000 0 0 0 0 AGR-MM \n","3569 50 23326.116177 0.1000 0.500000 0 0 0 0 AGR-MM \n","\n"," dataset_name num_malicious strategy aggregate_fn \n","0 FMNIST 0 FLANDERS FedAvg \n","1 FMNIST 0 FLANDERS FedAvg \n","2 FMNIST 0 FLANDERS FedAvg \n","3 FMNIST 0 FLANDERS FedAvg \n","4 FMNIST 0 FLANDERS FedAvg \n","... ... ... ... ... \n","3565 FMNIST 80 dnc FedAvg \n","3566 FMNIST 80 dnc FedAvg \n","3567 FMNIST 80 dnc FedAvg \n","3568 FMNIST 80 dnc FedAvg \n","3569 FMNIST 80 dnc FedAvg \n","\n","[6884 rows x 13 columns]"]},"execution_count":112,"metadata":{},"output_type":"execute_result"}],"source":["# CSV pre-processing FMNIST\n","results_flanders_file = results_dir + \"all_results_fmnist_flanders.csv\"\n","results_no_flanders_file = results_dir + \"all_results_fmnist_no_flanders.csv\"\n","results_flanders_df = pd.read_csv(results_flanders_file)\n","results_no_flanders_df = pd.read_csv(results_no_flanders_file)\n","results_flanders_df = translate_cols(results_flanders_df, attack_dict ,dataset_dict, strategy_dict, aggregate_dict)\n","results_no_flanders_df = translate_cols(results_no_flanders_df, attack_dict ,dataset_dict, strategy_dict, aggregate_dict)\n","fmnist_df = pd.concat([results_flanders_df, results_no_flanders_df])\n","fmnist_df"]},{"cell_type":"code","execution_count":113,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":1,"status":"ok","timestamp":1716047458204,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-120},"id":"08YXqMTZNpN6","outputId":"9de6cdc8-241e-4867-cd53-645434b99ce2"},"outputs":[{"name":"stdout","output_type":"stream","text":["Unique values in attack_fn: ['GAUSS' 'LIE' 'OPT' 'AGR-MM']\n","Unique values in num_malicious: [ 0 20 60 80]\n","Unique values in dataset_name: ['FMNIST']\n","Unique values in strategy: ['FLANDERS' 'FedAvg' 'TrimmedMean' 'FedMedian' 'MultiKrum' 'Bulyan' 'dnc']\n","Unique values in aggregate_fn: ['FedAvg' 'TrimmedMean' 'FedMedian' 'MultiKrum' 'Bulyan']\n"]}],"source":["print_unique_data(fmnist_df)"]},{"cell_type":"markdown","metadata":{"id":"9vVX6wsxT-rc"},"source":["### Step-by-step processing"]},{"cell_type":"code","execution_count":114,"metadata":{"id":"j0ZnLmVnUBT3"},"outputs":[],"source":["results_flanders_file = results_dir + \"all_results_fmnist_flanders.csv\"\n","results_no_flanders_file = results_dir + \"all_results_fmnist_no_flanders.csv\""]},{"cell_type":"code","execution_count":115,"metadata":{"id":"qsYaQiAWUBOw"},"outputs":[],"source":["results_flanders_df = pd.read_csv(results_flanders_file)\n","results_no_flanders_df = pd.read_csv(results_no_flanders_file)"]},{"cell_type":"code","execution_count":116,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":250,"status":"ok","timestamp":1709217712591,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-60},"id":"m1VKWq_jUHyY","outputId":"5bd5b442-4ab4-473d-bc60-b6f5fdc6320d"},"outputs":[{"name":"stdout","output_type":"stream","text":["Unique values in attack_fn: ['gaussian' 'lie' 'fang' 'minmax']\n","Unique values in num_malicious: [ 0 20 60 80]\n","Unique values in dataset_name: ['fmnist']\n","Unique values in strategy: ['flanders']\n","Unique values in aggregate_fn: ['flwr.server.strategy.aggregate.aggregate'\n"," 'flwr.server.strategy.aggregate.aggregate_trimmed_avg'\n"," 'flwr.server.strategy.aggregate.aggregate_median'\n"," 'flwr.server.strategy.aggregate.aggregate_krum'\n"," 'flwr.server.strategy.aggregate.aggregate_bulyan']\n"]}],"source":["print_unique_data(results_flanders_df)"]},{"cell_type":"code","execution_count":117,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":4,"status":"ok","timestamp":1709217720407,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-60},"id":"IxPT9D6DUJN3","outputId":"ded1f8b2-baf5-437b-abe3-25bad7a3c4ad"},"outputs":[{"name":"stdout","output_type":"stream","text":["Unique values in attack_fn: ['gaussian' 'lie' 'fang' 'minmax']\n","Unique values in num_malicious: [ 0 20 60 80]\n","Unique values in dataset_name: ['fmnist']\n","Unique values in strategy: ['fedavg' 'trimmedmean' 'fedmedian' 'krum' 'bulyan' 'dnc']\n","Unique values in aggregate_fn: ['flwr.server.strategy.aggregate.aggregate']\n"]}],"source":["print_unique_data(results_no_flanders_df)"]},{"cell_type":"markdown","metadata":{"id":"X8k98LNrUaLp"},"source":["Translate strings"]},{"cell_type":"code","execution_count":118,"metadata":{"id":"zHNwpvZMUaLq"},"outputs":[],"source":["results_flanders_df = translate_cols(results_flanders_df, attack_dict ,dataset_dict, strategy_dict, aggregate_dict)"]},{"cell_type":"code","execution_count":119,"metadata":{"id":"9zMOOjCiUaLr"},"outputs":[],"source":["results_no_flanders_df = translate_cols(results_no_flanders_df, attack_dict ,dataset_dict, strategy_dict, aggregate_dict)"]},{"cell_type":"code","execution_count":120,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":4,"status":"ok","timestamp":1709217802421,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-60},"id":"tygeoDz6UaLr","outputId":"2441a31a-ead0-4adf-8b95-73d75c6b3739"},"outputs":[{"name":"stdout","output_type":"stream","text":["Unique values in attack_fn: ['GAUSS' 'LIE' 'OPT' 'AGR-MM']\n","Unique values in num_malicious: [ 0 20 60 80]\n","Unique values in dataset_name: ['FMNIST']\n","Unique values in strategy: ['FLANDERS']\n","Unique values in aggregate_fn: ['FedAvg' 'TrimmedMean' 'FedMedian' 'MultiKrum' 'Bulyan']\n"]}],"source":["print_unique_data(results_flanders_df)"]},{"cell_type":"code","execution_count":121,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":3,"status":"ok","timestamp":1709217803343,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-60},"id":"b8Xo1VseUaLr","outputId":"1fbb0a64-0695-4a89-eec1-e0a18ba54c40"},"outputs":[{"name":"stdout","output_type":"stream","text":["Unique values in attack_fn: ['GAUSS' 'LIE' 'OPT' 'AGR-MM']\n","Unique values in num_malicious: [ 0 20 60 80]\n","Unique values in dataset_name: ['FMNIST']\n","Unique values in strategy: ['FedAvg' 'TrimmedMean' 'FedMedian' 'MultiKrum' 'Bulyan' 'dnc']\n","Unique values in aggregate_fn: ['FedAvg']\n"]}],"source":["print_unique_data(results_no_flanders_df)"]},{"cell_type":"markdown","metadata":{"id":"zkmqmUTzUaLr"},"source":["Concatenate the 2 dataframes, namely FLANDERS+f and baselines:"]},{"cell_type":"code","execution_count":122,"metadata":{"id":"m-wRVa9eUaLr"},"outputs":[],"source":["fmnist_df = pd.concat([results_flanders_df, results_no_flanders_df])"]},{"cell_type":"code","execution_count":123,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":423},"executionInfo":{"elapsed":4,"status":"ok","timestamp":1709217813677,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-60},"id":"IhpjH5n1UaLs","outputId":"73f9a359-9f74-4e5e-b518-25af272b2207"},"outputs":[{"data":{"text/html":["
\n","\n","\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
roundlossaccuracyaucTPTNFPFNattack_fndataset_namenum_maliciousstrategyaggregate_fn
0023082.3338130.06310.4795000000GAUSSFMNIST0FLANDERSFedAvg
1121920.1315610.19770.5542780000GAUSSFMNIST0FLANDERSFedAvg
2217859.0960200.42100.6783330000GAUSSFMNIST0FLANDERSFedAvg
3315559.0449260.49200.717778010000GAUSSFMNIST0FLANDERSFedAvg
4414684.1937220.50010.722278020000GAUSSFMNIST0FLANDERSFedAvg
..........................................
35654623279.5649070.10000.5000000000AGR-MMFMNIST80dncFedAvg
35664723290.9804420.10000.5000000000AGR-MMFMNIST80dncFedAvg
35674823302.2510220.10000.5000000000AGR-MMFMNIST80dncFedAvg
35684923312.5125960.10000.5000000000AGR-MMFMNIST80dncFedAvg
35695023326.1161770.10000.5000000000AGR-MMFMNIST80dncFedAvg
\n","

6884 rows × 13 columns

\n","
"],"text/plain":[" round loss accuracy auc TP TN FP FN attack_fn \\\n","0 0 23082.333813 0.0631 0.479500 0 0 0 0 GAUSS \n","1 1 21920.131561 0.1977 0.554278 0 0 0 0 GAUSS \n","2 2 17859.096020 0.4210 0.678333 0 0 0 0 GAUSS \n","3 3 15559.044926 0.4920 0.717778 0 100 0 0 GAUSS \n","4 4 14684.193722 0.5001 0.722278 0 200 0 0 GAUSS \n","... ... ... ... ... .. ... .. .. ... \n","3565 46 23279.564907 0.1000 0.500000 0 0 0 0 AGR-MM \n","3566 47 23290.980442 0.1000 0.500000 0 0 0 0 AGR-MM \n","3567 48 23302.251022 0.1000 0.500000 0 0 0 0 AGR-MM \n","3568 49 23312.512596 0.1000 0.500000 0 0 0 0 AGR-MM \n","3569 50 23326.116177 0.1000 0.500000 0 0 0 0 AGR-MM \n","\n"," dataset_name num_malicious strategy aggregate_fn \n","0 FMNIST 0 FLANDERS FedAvg \n","1 FMNIST 0 FLANDERS FedAvg \n","2 FMNIST 0 FLANDERS FedAvg \n","3 FMNIST 0 FLANDERS FedAvg \n","4 FMNIST 0 FLANDERS FedAvg \n","... ... ... ... ... \n","3565 FMNIST 80 dnc FedAvg \n","3566 FMNIST 80 dnc FedAvg \n","3567 FMNIST 80 dnc FedAvg \n","3568 FMNIST 80 dnc FedAvg \n","3569 FMNIST 80 dnc FedAvg \n","\n","[6884 rows x 13 columns]"]},"execution_count":123,"metadata":{},"output_type":"execute_result"}],"source":["fmnist_df"]},{"cell_type":"code","execution_count":124,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":6,"status":"ok","timestamp":1709217818750,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-60},"id":"XYwnEFa2UaLs","outputId":"44dcb4de-f892-4555-b5d8-8cfcacd37137"},"outputs":[{"name":"stdout","output_type":"stream","text":["Unique values in attack_fn: ['GAUSS' 'LIE' 'OPT' 'AGR-MM']\n","Unique values in num_malicious: [ 0 20 60 80]\n","Unique values in dataset_name: ['FMNIST']\n","Unique values in strategy: ['FLANDERS' 'FedAvg' 'TrimmedMean' 'FedMedian' 'MultiKrum' 'Bulyan' 'dnc']\n","Unique values in aggregate_fn: ['FedAvg' 'TrimmedMean' 'FedMedian' 'MultiKrum' 'Bulyan']\n"]}],"source":["print_unique_data(fmnist_df)"]},{"cell_type":"markdown","metadata":{"id":"1TUxrAF6w6cY"},"source":["## Unify datasets"]},{"cell_type":"code","execution_count":125,"metadata":{"id":"R2wOP2Eex7X2"},"outputs":[],"source":["all_datasets_df = pd.concat([mnist_df, fmnist_df])"]},{"cell_type":"code","execution_count":126,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":443},"executionInfo":{"elapsed":428,"status":"ok","timestamp":1716376740426,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-120},"id":"jwDN17ygyFK7","outputId":"c3db739f-98b7-4070-f826-4344553e9ab7"},"outputs":[{"data":{"text/html":["
\n","\n","\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
roundlossaccuracyaucTPTNFPFNattack_fndataset_namenum_maliciousstrategyaggregate_fn
00720.8331210.10450.5034220000GAUSSMNIST0FLANDERSFedAvg
11664.5222520.20890.5631160000GAUSSMNIST0FLANDERSFedAvg
22624.6338260.35600.6447310000GAUSSMNIST0FLANDERSFedAvg
33581.4764720.47730.710941010000GAUSSMNIST0FLANDERSFedAvg
44545.1142050.54300.746970020000GAUSSMNIST0FLANDERSFedAvg
..........................................
35654623279.5649070.10000.5000000000AGR-MMFMNIST80dncFedAvg
35664723290.9804420.10000.5000000000AGR-MMFMNIST80dncFedAvg
35674823302.2510220.10000.5000000000AGR-MMFMNIST80dncFedAvg
35684923312.5125960.10000.5000000000AGR-MMFMNIST80dncFedAvg
35695023326.1161770.10000.5000000000AGR-MMFMNIST80dncFedAvg
\n","

14432 rows × 13 columns

\n","
"],"text/plain":[" round loss accuracy auc TP TN FP FN attack_fn \\\n","0 0 720.833121 0.1045 0.503422 0 0 0 0 GAUSS \n","1 1 664.522252 0.2089 0.563116 0 0 0 0 GAUSS \n","2 2 624.633826 0.3560 0.644731 0 0 0 0 GAUSS \n","3 3 581.476472 0.4773 0.710941 0 100 0 0 GAUSS \n","4 4 545.114205 0.5430 0.746970 0 200 0 0 GAUSS \n","... ... ... ... ... .. ... .. .. ... \n","3565 46 23279.564907 0.1000 0.500000 0 0 0 0 AGR-MM \n","3566 47 23290.980442 0.1000 0.500000 0 0 0 0 AGR-MM \n","3567 48 23302.251022 0.1000 0.500000 0 0 0 0 AGR-MM \n","3568 49 23312.512596 0.1000 0.500000 0 0 0 0 AGR-MM \n","3569 50 23326.116177 0.1000 0.500000 0 0 0 0 AGR-MM \n","\n"," dataset_name num_malicious strategy aggregate_fn \n","0 MNIST 0 FLANDERS FedAvg \n","1 MNIST 0 FLANDERS FedAvg \n","2 MNIST 0 FLANDERS FedAvg \n","3 MNIST 0 FLANDERS FedAvg \n","4 MNIST 0 FLANDERS FedAvg \n","... ... ... ... ... \n","3565 FMNIST 80 dnc FedAvg \n","3566 FMNIST 80 dnc FedAvg \n","3567 FMNIST 80 dnc FedAvg \n","3568 FMNIST 80 dnc FedAvg \n","3569 FMNIST 80 dnc FedAvg \n","\n","[14432 rows x 13 columns]"]},"execution_count":126,"metadata":{},"output_type":"execute_result"}],"source":["all_datasets_df"]},{"cell_type":"code","execution_count":127,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":2,"status":"ok","timestamp":1716376741411,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-120},"id":"VWrdDPtPyHHA","outputId":"1ef8b853-63f8-4eac-a785-578684dbf0a6"},"outputs":[{"name":"stdout","output_type":"stream","text":["Unique values in attack_fn: ['GAUSS' 'LIE' 'OPT' 'AGR-MM']\n","Unique values in num_malicious: [ 0 20 60 80]\n","Unique values in dataset_name: ['MNIST' 'FMNIST']\n","Unique values in strategy: ['FLANDERS' 'FedAvg' 'TrimmedMean' 'FedMedian' 'MultiKrum' 'Bulyan' 'dnc']\n","Unique values in aggregate_fn: ['FedAvg' 'TrimmedMean' 'FedMedian' 'MultiKrum' 'Bulyan'\n"," 'flanders.strategies.aggregate.aggregate_dnc']\n"]}],"source":["print_unique_data(all_datasets_df)"]},{"cell_type":"markdown","metadata":{"id":"OlES57TEn2Ng"},"source":["# Tables\n"]},{"cell_type":"markdown","metadata":{"id":"hcHkTXfGbapg"},"source":["## Accuracy"]},{"cell_type":"markdown","metadata":{"id":"7F1YDs12sZbE"},"source":["### Best with improvment w.r.t. baseline"]},{"cell_type":"code","execution_count":128,"metadata":{"id":"GuQM8bzXnIGx"},"outputs":[],"source":["def accuracy_table(input_df, b):\n"," # Define strategies and attacks\n"," strategies = ['FedAvg', 'FLANDERS + FedAvg', 'FedMedian', 'FLANDERS + FedMedian', 'TrimmedMean', 'FLANDERS + TrimmedMean', 'MultiKrum', 'FLANDERS + MultiKrum', 'Bulyan', 'FLANDERS + Bulyan']\n"," attacks = ['GAUSS', 'LIE', 'OPT', 'AGR-MM']\n"," dataset_names = [\"MNIST\", \"FMNIST\"]\n","\n"," # Create MultiIndex for the columns\n"," columns = pd.MultiIndex.from_product([dataset_names, attacks], names=['Dataset', 'Attack'])\n","\n"," # Create an empty DataFrame with the defined columns and strategies\n"," df = pd.DataFrame(index=strategies, columns=columns)\n","\n"," filtered_df = input_df[(input_df['num_malicious'] == b) & (input_df['round'] >= 3)]\n"," baseline_df = filtered_df[filtered_df['strategy'] != 'FLANDERS']\n"," flanders_df = filtered_df[filtered_df['strategy'] == 'FLANDERS']\n","\n"," # Populate the DataFrame\n"," for strategy in ['FedAvg', 'TrimmedMean', 'FedMedian', 'MultiKrum', 'Bulyan']:\n"," for dataset in dataset_names:\n"," for attack in attacks:\n"," df.loc[strategy, (dataset, attack)] = round(baseline_df[(baseline_df['strategy']==strategy) & (baseline_df['attack_fn']==attack) & (baseline_df['dataset_name']==dataset)]['accuracy'].max(), 2)\n"," df.loc[f\"FLANDERS + {strategy}\", (dataset, attack)] = round(flanders_df[(flanders_df['aggregate_fn']==strategy) & (flanders_df['attack_fn']==attack) & (flanders_df['dataset_name']==dataset)]['accuracy'].max(), 2)\n","\n"," return df\n"]},{"cell_type":"code","execution_count":129,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":457},"executionInfo":{"elapsed":1243,"status":"ok","timestamp":1715943873268,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-120},"id":"PruXJg2vA87b","outputId":"98d83851-626b-4877-fd09-8f2f01200c65"},"outputs":[{"data":{"text/html":["
\n","\n","\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
DatasetMNISTFMNIST
AttackGAUSSGAUSS
FedAvg0.860.68
FLANDERS + FedAvg0.840.64
FedMedian0.830.71
FLANDERS + FedMedian0.760.73
TrimmedMean0.850.69
FLANDERS + TrimmedMean0.780.7
MultiKrum0.680.66
FLANDERS + MultiKrum0.740.73
Bulyan0.860.62
FLANDERS + Bulyan0.870.65
\n","
"],"text/plain":["Dataset MNIST FMNIST\n","Attack GAUSS GAUSS\n","FedAvg 0.86 0.68\n","FLANDERS + FedAvg 0.84 0.64\n","FedMedian 0.83 0.71\n","FLANDERS + FedMedian 0.76 0.73\n","TrimmedMean 0.85 0.69\n","FLANDERS + TrimmedMean 0.78 0.7\n","MultiKrum 0.68 0.66\n","FLANDERS + MultiKrum 0.74 0.73\n","Bulyan 0.86 0.62\n","FLANDERS + Bulyan 0.87 0.65"]},"execution_count":129,"metadata":{},"output_type":"execute_result"}],"source":["# Table 19\n","acc_0 = accuracy_table(all_datasets_df, 0).dropna(axis=1, how='all')\n","acc_0"]},{"cell_type":"code","execution_count":130,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":457},"executionInfo":{"elapsed":858,"status":"ok","timestamp":1715944158330,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-120},"id":"8bV7hABWbyMS","outputId":"6d24f87d-da15-40b2-8880-3b6dcefda1fd"},"outputs":[{"data":{"text/html":["
\n","\n","\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
DatasetMNISTFMNIST
AttackGAUSSLIEOPTAGR-MMGAUSSLIEOPTAGR-MM
FedAvg0.20.170.670.450.250.170.570.11
FLANDERS + FedAvg0.880.870.480.880.660.670.570.64
FedMedian0.80.660.790.590.660.650.670.6
FLANDERS + FedMedian0.850.850.660.830.710.690.630.73
TrimmedMean0.860.520.730.610.690.540.620.58
FLANDERS + TrimmedMean0.810.850.780.830.690.70.630.73
MultiKrum0.780.770.810.820.740.650.70.67
FLANDERS + MultiKrum0.820.860.840.820.730.70.730.71
Bulyan0.820.840.840.830.710.720.690.76
FLANDERS + Bulyan0.90.840.790.850.650.650.660.65
\n","
"],"text/plain":["Dataset MNIST FMNIST \n","Attack GAUSS LIE OPT AGR-MM GAUSS LIE OPT AGR-MM\n","FedAvg 0.2 0.17 0.67 0.45 0.25 0.17 0.57 0.11\n","FLANDERS + FedAvg 0.88 0.87 0.48 0.88 0.66 0.67 0.57 0.64\n","FedMedian 0.8 0.66 0.79 0.59 0.66 0.65 0.67 0.6\n","FLANDERS + FedMedian 0.85 0.85 0.66 0.83 0.71 0.69 0.63 0.73\n","TrimmedMean 0.86 0.52 0.73 0.61 0.69 0.54 0.62 0.58\n","FLANDERS + TrimmedMean 0.81 0.85 0.78 0.83 0.69 0.7 0.63 0.73\n","MultiKrum 0.78 0.77 0.81 0.82 0.74 0.65 0.7 0.67\n","FLANDERS + MultiKrum 0.82 0.86 0.84 0.82 0.73 0.7 0.73 0.71\n","Bulyan 0.82 0.84 0.84 0.83 0.71 0.72 0.69 0.76\n","FLANDERS + Bulyan 0.9 0.84 0.79 0.85 0.65 0.65 0.66 0.65"]},"execution_count":130,"metadata":{},"output_type":"execute_result"}],"source":["# Table 15\n","acc_20 = accuracy_table(all_datasets_df, 20)\n","acc_20"]},{"cell_type":"markdown","metadata":{},"source":["Bulyan is NaN because it cannot work when the number of malicious clients is > 25%"]},{"cell_type":"code","execution_count":131,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":477},"executionInfo":{"elapsed":1126,"status":"ok","timestamp":1716115710006,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-120},"id":"xSvgEwLoPmh3","outputId":"1c65e66e-7374-46f0-a2a5-0964bc40e49a"},"outputs":[{"data":{"text/html":["
\n","\n","\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
DatasetMNISTFMNIST
AttackGAUSSLIEOPTAGR-MMGAUSSLIEOPTAGR-MM
FedAvg0.190.150.20.160.280.10.190.1
FLANDERS + FedAvg0.760.880.850.850.690.670.710.67
FedMedian0.80.190.160.290.650.10.10.1
FLANDERS + FedMedian0.80.860.830.860.710.690.710.71
TrimmedMean0.250.20.330.10.330.10.170.1
FLANDERS + TrimmedMean0.780.870.840.830.70.710.730.74
MultiKrum0.790.140.220.150.710.10.120.1
FLANDERS + MultiKrum0.880.880.860.780.720.710.730.69
BulyanNaNNaNNaNNaNNaNNaNNaNNaN
FLANDERS + Bulyan0.890.870.90.850.680.640.60.69
\n","
"],"text/plain":["Dataset MNIST FMNIST \n","Attack GAUSS LIE OPT AGR-MM GAUSS LIE OPT AGR-MM\n","FedAvg 0.19 0.15 0.2 0.16 0.28 0.1 0.19 0.1\n","FLANDERS + FedAvg 0.76 0.88 0.85 0.85 0.69 0.67 0.71 0.67\n","FedMedian 0.8 0.19 0.16 0.29 0.65 0.1 0.1 0.1\n","FLANDERS + FedMedian 0.8 0.86 0.83 0.86 0.71 0.69 0.71 0.71\n","TrimmedMean 0.25 0.2 0.33 0.1 0.33 0.1 0.17 0.1\n","FLANDERS + TrimmedMean 0.78 0.87 0.84 0.83 0.7 0.71 0.73 0.74\n","MultiKrum 0.79 0.14 0.22 0.15 0.71 0.1 0.12 0.1\n","FLANDERS + MultiKrum 0.88 0.88 0.86 0.78 0.72 0.71 0.73 0.69\n","Bulyan NaN NaN NaN NaN NaN NaN NaN NaN\n","FLANDERS + Bulyan 0.89 0.87 0.9 0.85 0.68 0.64 0.6 0.69"]},"execution_count":131,"metadata":{},"output_type":"execute_result"}],"source":["# Table 17\n","acc_60 = accuracy_table(all_datasets_df, 60)\n","acc_60"]},{"cell_type":"code","execution_count":132,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":477},"executionInfo":{"elapsed":1188,"status":"ok","timestamp":1716050662469,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-120},"id":"dM_AMm_jcCye","outputId":"90533ef1-500f-40a3-f565-2c2ca1d68aaa"},"outputs":[{"data":{"text/html":["
\n","\n","\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
DatasetMNISTFMNIST
AttackGAUSSLIEOPTAGR-MMGAUSSLIEOPTAGR-MM
FedAvg0.210.160.310.130.240.10.180.1
FLANDERS + FedAvg0.850.860.880.850.690.70.690.66
FedMedian0.340.170.140.090.30.10.140.1
FLANDERS + FedMedian0.870.840.80.80.730.740.720.72
TrimmedMean0.170.150.210.140.210.10.120.1
FLANDERS + TrimmedMean0.810.850.810.820.740.730.70.69
MultiKrum0.820.210.320.110.720.10.150.1
FLANDERS + MultiKrum0.870.830.870.850.680.730.720.7
BulyanNaNNaNNaNNaNNaNNaNNaNNaN
FLANDERS + Bulyan0.840.840.830.80.690.720.690.68
\n","
"],"text/plain":["Dataset MNIST FMNIST \n","Attack GAUSS LIE OPT AGR-MM GAUSS LIE OPT AGR-MM\n","FedAvg 0.21 0.16 0.31 0.13 0.24 0.1 0.18 0.1\n","FLANDERS + FedAvg 0.85 0.86 0.88 0.85 0.69 0.7 0.69 0.66\n","FedMedian 0.34 0.17 0.14 0.09 0.3 0.1 0.14 0.1\n","FLANDERS + FedMedian 0.87 0.84 0.8 0.8 0.73 0.74 0.72 0.72\n","TrimmedMean 0.17 0.15 0.21 0.14 0.21 0.1 0.12 0.1\n","FLANDERS + TrimmedMean 0.81 0.85 0.81 0.82 0.74 0.73 0.7 0.69\n","MultiKrum 0.82 0.21 0.32 0.11 0.72 0.1 0.15 0.1\n","FLANDERS + MultiKrum 0.87 0.83 0.87 0.85 0.68 0.73 0.72 0.7\n","Bulyan NaN NaN NaN NaN NaN NaN NaN NaN\n","FLANDERS + Bulyan 0.84 0.84 0.83 0.8 0.69 0.72 0.69 0.68"]},"execution_count":132,"metadata":{},"output_type":"execute_result"}],"source":["# Table 3\n","acc_80 = accuracy_table(all_datasets_df, 80)\n","acc_80"]},{"cell_type":"markdown","metadata":{"id":"CZX8c37MsgFL"},"source":["### Best w.r.t. number of attackers"]},{"cell_type":"code","execution_count":133,"metadata":{"id":"xgIKM1obsmd2"},"outputs":[],"source":["def accuracy_table_attackers(input_df, aggregate_fn):\n"," # Define strategies and attacks\n"," attacks = ['GAUSS', 'LIE', 'OPT', 'AGR-MM']\n"," dataset_names = [\"MNIST\", \"FMNIST\"]\n"," num_malicious = [0, 20, 60, 80]\n","\n"," # Create MultiIndex for the columns\n"," columns = pd.MultiIndex.from_product([dataset_names, num_malicious], names=['Dataset', '# Malicious'])\n","\n"," #######\n"," #columns = pd.MultiIndex.from_product([['MNIST', 'CIFAR-10'], ['GAUSS', 'LIE', 'OPT', 'AGR-MM'], ['LAST', 'BEST']])\n"," #######\n","\n","\n"," # Create an empty DataFrame with the defined columns and strategies\n"," df = pd.DataFrame(index=attacks, columns=columns)\n","\n"," filtered_df = input_df[(input_df['aggregate_fn'] == aggregate_fn) & (input_df['round'] >= 3)]\n"," baseline_df = filtered_df[filtered_df['strategy'] != 'FLANDERS']\n"," flanders_df = filtered_df[filtered_df['strategy'] == 'FLANDERS']\n","\n"," # Populate the DataFrame\n"," for dataset in dataset_names:\n"," for attack in attacks:\n"," for b in num_malicious:\n"," if b == 0:\n"," df.loc[attack, (dataset, b)] = round(flanders_df[(flanders_df['num_malicious']==b) & (flanders_df['attack_fn']=='GAUSS') & (flanders_df['dataset_name']==dataset)]['accuracy'].max(), 2)\n"," else:\n"," df.loc[attack, (dataset, b)] = round(flanders_df[(flanders_df['num_malicious']==b) & (flanders_df['attack_fn']==attack) & (flanders_df['dataset_name']==dataset)]['accuracy'].max(), 2)\n","\n"," return df"]},{"cell_type":"code","execution_count":134,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":206},"executionInfo":{"elapsed":305,"status":"ok","timestamp":1715954054792,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-120},"id":"0n9vhQiQuxk_","outputId":"500dfb31-f525-4289-f337-2c945bf50cd2"},"outputs":[{"data":{"text/html":["
\n","\n","\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
DatasetMNISTFMNIST
# Malicious02060800206080
GAUSS0.740.820.880.870.730.730.720.68
LIE0.740.860.880.830.730.70.710.73
OPT0.740.840.860.870.730.730.730.72
AGR-MM0.740.820.780.850.730.710.690.7
\n","
"],"text/plain":["Dataset MNIST FMNIST \n","# Malicious 0 20 60 80 0 20 60 80\n","GAUSS 0.74 0.82 0.88 0.87 0.73 0.73 0.72 0.68\n","LIE 0.74 0.86 0.88 0.83 0.73 0.7 0.71 0.73\n","OPT 0.74 0.84 0.86 0.87 0.73 0.73 0.73 0.72\n","AGR-MM 0.74 0.82 0.78 0.85 0.73 0.71 0.69 0.7"]},"execution_count":134,"metadata":{},"output_type":"execute_result"}],"source":["# Table 20\n","acc_att = accuracy_table_attackers(all_datasets_df, 'MultiKrum')\n","acc_att"]},{"cell_type":"markdown","metadata":{"id":"g6yDorrubUw1"},"source":["## Precision and Recall"]},{"cell_type":"code","execution_count":135,"metadata":{"id":"y0PlDlG0bKek"},"outputs":[],"source":["def pr_table(input_df, b):\n"," strategies = ['FLANDERS']\n"," attacks = ['GAUSS', 'LIE', 'OPT', 'AGR-MM']\n"," dataset_names = [\"MNIST\", \"FMNIST\"]\n","\n"," # Create MultiIndex for the columns\n"," columns = pd.MultiIndex.from_product([strategies, attacks, ['P', 'R']], names=['Strategy', 'Attack', 'P/R'])\n","\n"," # Create an empty DataFrame with the defined columns and strategies\n"," df = pd.DataFrame(index=dataset_names, columns=columns)\n","\n"," filtered_df = input_df[(input_df['num_malicious'] == b) & (input_df['round'] == 50) & (input_df['aggregate_fn']=='FedAvg')]\n"," flanders_df = filtered_df[filtered_df['strategy'] == 'FLANDERS']\n"," strat_dfs = [flanders_df]\n","\n"," # Populate the DataFrame\n"," for dataset in dataset_names:\n"," for attack in attacks:\n"," for idx, strategy in enumerate(strategies):\n"," tp = strat_dfs[idx][(strat_dfs[idx]['attack_fn']==attack) & (strat_dfs[idx]['dataset_name']==dataset)]['TP'].iloc[0]\n"," fp = strat_dfs[idx][(strat_dfs[idx]['attack_fn']==attack) & (strat_dfs[idx]['dataset_name']==dataset)]['FP'].iloc[0]\n"," fn = strat_dfs[idx][(strat_dfs[idx]['attack_fn']==attack) & (strat_dfs[idx]['dataset_name']==dataset)]['FN'].iloc[0]\n"," df.loc[dataset, (strategy, attack, 'P')] = round(tp / (tp+fp), 2)\n"," df.loc[dataset, (strategy, attack, 'R')] = round(tp / (tp+fn), 2)\n","\n"," return df"]},{"cell_type":"code","execution_count":136,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":257},"executionInfo":{"elapsed":248,"status":"ok","timestamp":1716367268457,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-120},"id":"rpQyziNRh3dn","outputId":"26e2c0f9-144d-4cad-d13f-1e2351aa2081"},"outputs":[{"data":{"text/html":["
\n","\n","\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
StrategyFLANDERS
AttackGAUSSLIEOPTAGR-MM
P/RPRPRPRPR
MNIST1.01.01.01.00.150.151.01.0
FMNIST1.01.01.01.00.160.161.01.0
\n","
"],"text/plain":["Strategy FLANDERS \n","Attack GAUSS LIE OPT AGR-MM \n","P/R P R P R P R P R\n","MNIST 1.0 1.0 1.0 1.0 0.15 0.15 1.0 1.0\n","FMNIST 1.0 1.0 1.0 1.0 0.16 0.16 1.0 1.0"]},"execution_count":136,"metadata":{},"output_type":"execute_result"}],"source":["# Table 1\n","pr_20 = pr_table(all_datasets_df, 20)\n","pr_20"]},{"cell_type":"code","execution_count":137,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":257},"executionInfo":{"elapsed":327,"status":"ok","timestamp":1716367273542,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-120},"id":"ccW0Ups3iMvZ","outputId":"ba945e17-1bbe-414f-9aa0-b03fb0fe107f"},"outputs":[{"data":{"text/html":["
\n","\n","\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
StrategyFLANDERS
AttackGAUSSLIEOPTAGR-MM
P/RPRPRPRPR
MNIST1.01.01.01.01.01.01.01.0
FMNIST1.01.01.01.01.01.01.01.0
\n","
"],"text/plain":["Strategy FLANDERS \n","Attack GAUSS LIE OPT AGR-MM \n","P/R P R P R P R P R\n","MNIST 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0\n","FMNIST 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0"]},"execution_count":137,"metadata":{},"output_type":"execute_result"}],"source":["# Table 2\n","pr_60 = pr_table(all_datasets_df, 60)\n","pr_60"]},{"cell_type":"code","execution_count":138,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":257},"executionInfo":{"elapsed":308,"status":"ok","timestamp":1716376750779,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-120},"id":"05a0Gv5piS2v","outputId":"0cf6555a-4cc8-4aa5-f9b6-c8286afa130a"},"outputs":[{"data":{"text/html":["
\n","\n","\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
StrategyFLANDERS
AttackGAUSSLIEOPTAGR-MM
P/RPRPRPRPR
MNIST1.01.01.01.01.01.01.01.0
FMNIST1.01.01.01.01.01.01.01.0
\n","
"],"text/plain":["Strategy FLANDERS \n","Attack GAUSS LIE OPT AGR-MM \n","P/R P R P R P R P R\n","MNIST 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0\n","FMNIST 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0"]},"execution_count":138,"metadata":{},"output_type":"execute_result"}],"source":["# Table 3\n","pr_80 = pr_table(all_datasets_df, 80)\n","pr_80"]},{"cell_type":"markdown","metadata":{"id":"bN7dTn2u0r6K"},"source":["# Plots"]},{"cell_type":"markdown","metadata":{"id":"xZ0wiadBsVUh"},"source":["## Accuracy over rounds"]},{"cell_type":"code","execution_count":139,"metadata":{"id":"LQ_uYJCtjdJS"},"outputs":[],"source":["df_mnist_acc_flanders = all_datasets_df[(all_datasets_df['strategy']=='FLANDERS') & (all_datasets_df['num_malicious']==80) & (all_datasets_df['dataset_name']=='MNIST') & (all_datasets_df['aggregate_fn']=='MultiKrum')]\n","df_mnist_acc_fedavg = all_datasets_df[(all_datasets_df['strategy']=='FedAvg') & (all_datasets_df['num_malicious']==80) & (all_datasets_df['dataset_name']=='MNIST')]\n","df_no_attack = all_datasets_df[(all_datasets_df['strategy']=='FedAvg') & (all_datasets_df['num_malicious']==0) & (all_datasets_df['dataset_name']=='MNIST')]"]},{"cell_type":"code","execution_count":140,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":408},"executionInfo":{"elapsed":651,"status":"ok","timestamp":1714668544212,"user":{"displayName":"Edoardo Gabrielli","userId":"12318890431187689267"},"user_tz":-120},"id":"NDkD_Qnd1iT7","outputId":"928cfa90-04a2-4624-9825-adb0d124aaf8"},"outputs":[{"data":{"image/png":"iVBORw0KGgoAAAANSUhEUgAAA68AAAGHCAYAAABf+GSbAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy80BEi2AAAACXBIWXMAAA9hAAAPYQGoP6dpAADc1klEQVR4nOzdd3wURRvA8d9eTe8NElIIvUvvTUABFREsYMWKvXfF3iv2jq8aQFEQKdJFgdB7C4SQhJBKer+67x8bgjEEQggkwPP1c5/kdmdn5y6RyXMz84yiqqqKEEIIIYQQQgjRiOkaugFCCCGEEEIIIcTJSPAqhBBCCCGEEKLRk+BVCCGEEEIIIUSjJ8GrEEIIIYQQQohGT4JXIYQQQgghhBCNngSvQgghhBBCCCEaPQlehRBCCCGEEEI0ehK8CiGEEEIIIYRo9CR4FUIIIYQQQgjR6DXa4HXfvn18/PHH3HLLLXTs2BGDwYCiKLz66qunVe+yZcsYNWoUAQEBuLq60qZNG5599lmKi4vrqeVCCCFEw5I+VAghxPnI0NANqMnnn3/O1KlT67XODz74gEceeQRFURgwYADBwcGsWrWK119/nd9++43Vq1cTEBBQr/cUQgghzjbpQ4UQQpyPGu3Ia4cOHXjssceIiYlh79693HjjjadV39atW3n00UfR6/UsWLCAv//+m19++YWEhAQuvvhi9u3bx+TJk+up9UIIIUTDkT5UCCHE+ajRjrzefvvtVZ7rdKcXZ7/xxhuoqsqkSZMYOXJk5XE3Nze+/fZbmjdvzm+//UZcXBxt2rQ5rXsJIYQQDUn6UCGEEOejRjvyWp+sVisLFiwAYOLEidXOR0RE0K9fPwDmzJlzVtsmhBBCNGbShwohhGgsLojgdf/+/ZSWlgLQvXv345Y5enzr1q1nrV1CCCFEYyd9qBBCiMbiggheExMTAfDx8cHT0/O4ZZo1a1alrBBCCCGkDxVCCNF4NNo1r/WpqKgIAHd39xrLeHh4AFBYWHjCuiwWCxaLpfK50+kkNzcXf39/FEWph9YKIYQ416iqSlFREU2bNj3t9aWNjfShQgghzqRT6UMviOC1Pr3xxhu89NJLDd0MIYQQjVBKSgphYWEN3YxGS/pQIYQQNalNH3pBBK9HpzmVlJTUWOboButeXl4nrOvpp5/mkUceqXxeUFBAeHg4KSkpJ71WCCHE+amwsJBmzZrVOK32XCZ9qBBCiDPpVPrQCyJ4jYyMBCA/P5+ioqLjvjEpKSlVytbEbDZjNpurHffy8pKOVwghLnDn49RX6UOFEEKcDbXpQ8+vhTk1aN26NW5ubgBs2rTpuGWOHu/atetZa5cQQgjR2EkfKoQQorG4IIJXk8nE6NGjAZg+fXq188nJycTGxgIwduzYs9o2IYQQojGTPlQIIURjcV4Fr5988glt2rThpptuqnbuqaeeQlEUpk2bxqJFiyqPl5aWctttt+FwOBg3bhxt2rQ5m00WQgghGgXpQ4UQQjR2jXbN65YtW7jnnnsqnyckJADw5ZdfMn/+/Mrjc+bMoUmTJgBkZ2ezb98+QkJCqtXXtWtX3nvvPR555BFGjRrFoEGDCAoKYtWqVaSnp9O6dWu++OKLM/yqhBBCiDNP+lAhhBDno0YbvBYWFrJ+/fpqxw8fPszhw4crn/97v7iTefjhh+nYsSPvvfceGzZsoKSkhPDwcJ5++mmefvrp8zJLpBBCiAuP9KFCiMZGVVUcDgd2u72hmyLOIqPRiF6vr7f6FFVV1Xqr7QJUWFiIt7c3BQUFkilRCCEuUNIX1I28b0Kc/1RVJT8/nyNHjuBwOBq6OaIB+Pj4EBISUmM24VPpCxrtyKsQQgghhBDi3JaRkUF+fn7lllgGg+G83FZMVKeqKqWlpWRlZQFULlM5HRK8CiGEEEIIIeqdw+GgoKCAwMBAAgICGro5ogG4uroCkJWVRVBQ0GlPIT6vsg0LIYQQQgghGgebzYaqqri7uzd0U0QDOrpXuM1mO+26JHgVQgghhBBCnDEyTfjCVp8/fwlehRBCCCGEEEI0ehK8CiGEEEIIIYRo9CR4FUIIIYQQQgjR6EnwKoQQQgghhBCnITExkfvuu49WrVrh5uaGm5sb7dq1495772XHjh3HveaJJ55AURSuvfba455fuXIliqLw66+/Hvf8fffdV209qdVqZerUqVx00UV4eXnh4+ND+/btufPOO4mLi6tSdufOnYwfP56IiAhcXFwIDQ1l+PDhfPzxx3V4B84O2SpHCCGEEEIIIepo/vz5XHvttRgMBq6//no6d+6MTqcjLi6O2bNn8/nnn5OYmEhERETlNaqqMmPGDCIjI5k3bx5FRUV4enqedlvGjRvHn3/+yYQJE7jjjjuw2WzExcUxf/58+vbtS5s2bQCIjY1lyJAhhIeHc8cddxASEkJKSgrr1q1j6tSp3H///afdljNBglchhBBCCCGEqIOEhASuu+46IiIiWL58OU2aNKly/q233uKzzz5Dp6s64XXlypUcPnyYFStWcMkllzB79mxuvvnm02rLxo0bmT9/Pq+99hrPPPNMlXOffPIJ+fn5lc9fe+01vL292bhxIz4+PlXKZmVlnVY7ziSZNiyEEEIIIYQQdfD2229TUlLCtGnTqgWuAAaDgQceeIBmzZpVOR4TE0O7du0YMmQIw4YNIyYm5rTbkpCQAEC/fv2qndPr9fj7+1cp2759+2qBK0BQUNBpt+VMkeBVCCGEEEIIIepg/vz5tGjRgl69etX6GovFwm+//caECRMAmDBhAitWrCAjI+O02nJ0WnJMTAx2u/2kZTdv3syuXbtO655nmwSvQgghhBBCiLNGVVVKrfZG81BVtU6vo7CwkLS0NDp06FDtXH5+PtnZ2ZWPsrKyynPz588nPz+f6667DoArr7wSo9HIzJkz6/aGVujduzeDBg3i66+/JiwsjIkTJ/LZZ59x6NChamUfe+wxSktL6dKlC3379uXJJ59kyZIl2Gy202rDmSZrXoUQQgghhBBnTZnNQbspixu6GZX2vHwJbqZTD4sKCwsB8PDwqHZu8ODBbN++vfL5O++8w2OPPQZoI6Pdu3enRYsWAHh6ejJ69GhiYmJ46KGH6vAKNIqisHjxYt59911++uknZsyYwYwZM7j33nu55ppr+PLLLyunCQ8fPpy1a9fyxhtvsHjxYtauXcvbb79NYGAg33zzDVdccUWd23EmycirEEIIIYQQQpyio9mBi4uLq5378ssvWbp0KT/99FOV4/n5+SxcuJBBgwZx4MCByke/fv3YtGkT+/fvP602mc1mnn32Wfbu3UtaWhozZsygd+/e/PLLL9x3331Vyvbo0YPZs2eTl5fHhg0bePrppykqKmL8+PHs2bPntNpxpsjIqxBCCCGEEOKscTXq2fPyJQ3djEquRn2drvP29qZJkybHXTd6dA1sUlJSleOzZs3CYrHw3nvv8d5771W7LiYmhpdeegkAFxcXgCpTjv+ttLS0sszxNGnShOuuu45x48bRvn17fvnlF77//nsMhqohoMlkokePHvTo0YNWrVoxadIkZs2axQsvvFDzi28gErwKIYQQQgghzhpFUeo0TbcxGj16NN988w0bNmygZ8+eJy0fExNDhw4djhsYfvnll0yfPr0yeD2agGnfvn3HrWvfvn1V9o6tidFopFOnTsTHx5OdnU1ISEiNZbt37w5Aenr6SettCDJtWAghhBBCCCHq4IknnsDNzY1bb72VzMzMauf/nQwqJSWFf/75h2uuuYbx48dXe0yaNIkDBw6wfv16QBs57dKlCz/99FOVPVoBNm/ezLp16xg5cmTlsfj4+OMmZ8rPz2ft2rX4+voSGBgIwF9//XXcRFULFy4EoHXr1qf+ZpwF58dHHkIIIYQQQghxlrVs2ZLp06czYcIEWrduzfXXX0/nzp1RVZXExESmT5+OTqcjLCyM6dOno6pqjcmQRo0ahcFgICYmpnLa8fvvv88ll1xCly5duOWWW2jatCl79+7lq6++okmTJjz99NOV12/fvp2JEycycuRIBgwYgJ+fH6mpqfzvf/8jLS2NDz/8EL1emyJ9//33U1paytixY2nTpg1Wq5XY2Fh+/vlnIiMjmTRp0pl/8+pAUeuaG1oAWpYxb29vCgoK8PLyaujmCCGEaADSF9SNvG9CnN/Ky8tJTEwkKirqhGszzwcJCQm89957LF26lMOHD6MoChEREQwePJjJkyfTuXNnOnXqREFBAcnJyTXWM2TIEPbs2UNqamrl2tT169fz6quvsmbNGoqKiggODmbkyJG8+OKLhIaGVl6blZXFtGnT+PPPP9m/fz9HjhzB09OTiy66iHvuuYdx48ZVll20aBGzZs0iNjaWw4cPY7VaCQ8PZ+TIkTz33HMEBQXV23tzst+DU+kLJHg9TdLxCiGEkL6gbuR9E+L8diEFr6Jm9Rm8yppXIYQQQgghhBCNngSvQgghhBBCCCEaPQlehRBCCCGEEEI0ehK8CiGEEEIIIYRo9CR4FUIIIYQQQgjR6EnwKoQQQgghhBCi0ZPgVQghhBBCCCFEoyfBqxBCCCGEEEKIRk+CVyGEEEIIIYQQjZ6hoRsghBDi7Ci3OXAx6hu6GXXmVJ3kW/LJLssmpyyH7LJscstzK58XWYtoF9COIc2G0Nq3NYqiNHSThRBCCFGPJHgVQojzXEpuKQ/9vI3NyXn4uZsI93Mjwt+NCD83mvm5EeHvjq+bkSPFFrIKLWQWlpNZaCGzqJyswnKKyu14uhjwcjFqX12NeLkY8XI14G42oFcUdIqCooBOUdDptK8ApVYHJRY7JRYHpVY7xRY7pVYHVoeTYE8XQn1dCfN1JdTHlWa+bij6MjZlbiK9JJ3M0kwySzLJKMkgszSTrNIsbE7bCV/rysMr+WzbZzRxb8LgZoMZHDaYcLf2JB8pJdjDiI+LAQ93N1zMJgCKS8soKS0jOMDvjP8chBBCCHF6JHgVQojz2KJd6Tz+6w6Kyu0A5JZYyS2xsi0l/xRrUlH0xSjGPHSmPHTGPBRjLoq+BJwuqA63iofrv76647QGgXqyrsaOwWMfBu9tGDz2oujsJyztY/LB2+SDt8EHd50n7ngQGRSJTudKbNo69uZrwe+MuBnMiJuB6jBjL2mN0xKAwelCuHcgXZpF0qlJUzytTrIO5XLb1Vec4vshhBBCiLNNglchhGgAdocTAIP+zKQesNgdvLEwju9jk1AMhQS3nIfONYkozzaEunTA3dmG8pKmHM61kJxbQkGpjSAvF4I8zQR6GjC6ZWDRJ5Ln2E9meSI55RnYVMspt0NBj4fSDF9Dc4LM0TR1aUWYRyRGnYldOdvYWfAX2c4NOHWlldc4LIE4LSGoNi+cdm/0Dh+CXP3RFzjxshuwq0ZKVQM5qoEy1UCpasBa2Z1dAcpI9O7xGDz2YvDci85QjNFrR2X9qUDqEVhw5Fg7Z8T8zLLrY+r4bgshhLgQff/990yaNImNGzfSvXv3aueTkpKIiorinXfe4bHHHgNg5cqVDBkypMY6Z8yYwXXXXXfG2nyuk+BVCCHOsr3phdz54yaKy+3cP7QlN/SOwGSovyA2OaeE+6ZvZWdqAXr3ffhG/EapWgg22Jm7kZ1sBMDN4EbXyK7c2rsnoR6h7MrZzPas7cTm7KE8r7xavQoKQW5BhHqEag/PUPxc/CixlVBgKaDQWkhuWR65pXkUWgvILj9Csb2YIjWJIlsSh2wroBiMuUa8TF7klOdoFevAZHcjqCQan6I2OC0hFDhdcQsOI7HYRl6pjUMFJ3/dLkYdUQEehPuaCTA3ITpwFC1DPFFd09hdsJWMkiMcyj9CRnEOeeV5lDoKceqKURQndrup3t5/IYQQ4mQeeOABevToUe14nz59GqA15w4JXoUQoh7ZnDa2Zm5l5eGVrEldg4fRg4ltJ3JJ5CUYdAZWxGVy//StlFgdALw8fw/fxybx+CWtuaxTk9NOMrRwZzpP/rqDIks5Xk2XoXqvxKJCG782PNLtEQ4WHGRD+gY2ZW6i0FrI6tTVrE5dXa0eL5MXnQI70TmwM00N4ViyFQwWV8rLrBTllVGUUkp2SSnJpblMumY0bTtGArB+625mrl5GEBCNSrmhmEJTNkWmbArNRyj3KKDUUUJOeQ4uOhd8C5vR3NGe5ubW+AZ44hXlgZeHG26uLrRtGUmgnw8puWVsSsxiz86DFB9KxR7RnMAQf5p6u9DE25UmPi409XbFx81Yw/sXzkB6H/f9yiuxsPFQGiaDelrvuxBCCHEqBgwYwPjx4xu6GeccCV6FEOI0FVgKWJO6hpWHV7I6dTVF1qIq53es2sHHWz+mpfkyFsSG4XQa6Rvtz8gOIXy04gCHcku5f8ZWvll1kKdHtaV3c/9TbkNGQTnvLdnHrM2HUYw5BLSahUWfBMDENhN5pPsjmPVm+jTtw/Vtrycnv4BV8evYnLWJPYU7ybfl4eMIwqs0EHOhLw9dcyNtoiMBWLdlFz+vXF7tnmZLOV4lxVgsx6YTu5hNuLm6YDIaMBmNeLiH4unRFU93N7w83GjfujkOtzIySjJo69sWV6Mbel3VUWfV4cCalET52liOxO2FvXF0iIujbU7FSK3RiHuP7ngMGYrHkCGYmnqf8vt1lK+7mRFto+p8vRBCCCHOHglehRDnFJvTxvr09Zj1Zlr6tMTHxadB2mF32vk75W9+2f8L69PX41Adled8zb4MCBvAoLBBJBUm8dOen0gtTiW1+Etco91p6zaKj8c8gL+bD+O6hfHNqkS+/PsA29PTmPj9fro1N3BDr5aMbt0Dg+7E/0wXlNn48u8EvluTSLnNicFrOx6hc7BQjpvOnSu9ricstSXf7pvPZRf3Izw0BIB9CYf4e2Ec4EE4fQj/T73FJcemDYcE+tO1Y2s83FzxdDHhG78P19hVKBs3gN2O8cBWCu6+G6+Rl9K5XUs6t2t50vevmWezKs8dBQUUr1xJ4ZKllMTGopaVVb9Ip0Pv54cjO5uS2LWUxK4l87XXMLdsiceQIXgOHYJL586yRY4QQohGr6ioiOzs7GrH/f39pR87AQlehRDnBIfTwYLEBXy+7XMOFx+uPO7v4k8L3xa08Dn2aB/QHqPOeEbakVOWw+z42fyy/xcySjIqj0d7RzOo2SAGNxtMp4BO6HXafqoFpTb+2tCa9JylmPz+QWfKY591FqPmLOCioIvIs+SRXZaNqUUuOlXLshsHPLcBnl/nQphrR4ZE9Gd826FEekegKApl5RYOpWfzw9ok5iUcplSXhM7nMH5eKdhMCTgAn/JgOhwZQpbDSRb7AMg4klsZvAYH+BHZrAl+Pl74eXvi4+2Jl4e79vB0w9PDvfK1RTZrQkhxPvlz5lA4fwGO3NzKc4rRiC0hgbTHHiP7008JuHsyXqNGoRhO3r3Yc3MpWraMoiVLKVm3DuzHsgwrrq64tG6NS7u2mNu0waVtW8wtW6JzccFyMJHiv/6i+K+/KN2yBUt8PJb4eHK++gpTVBS+EybgPfZK9J6etfqZ2vPyUMvLMTZpUqvyQgghTpOqgq305OXOFqMbnOWA8dZbbz3u8fT0dEJCQs5qW84lErwKIRo1p+pkcdJiPtv2GUmFSYA2sulmdCO1OJWc8hxy0nNYn76+8poIrwhe7PMi3UOqZ/6rC1VV2Zm9kxlxM1ictLhyr1Efsw9XtbyKcS3HEe51bOzS6VQpKLORklvKAzO2cjC7BDdTf96/7B7srlv5dte3xOfFsyZtTbV7eRq9cNg8KHHkg76UFMtGfti/kR/2f4DB7oOvJRyHxY1sUy64pKELL8a14lobgAqd7H3p5zYc7/aeeHq4V07ZjWh2LDiLjgjlwVuvqfH12lJTKdy1i/Lduyn+ZxWWffsqz+sDAvC+7DK8x16JsWlT8n76iZzv/4c1MZG0J54k+9PP8J88Ge/LLwO9Hkd+PrbUNGxpqdjS0rClpmGJi6N082ZwOivrNbdsgefwEXgOH4a5VSsUvf647TM3j8LcPAr/227FkZ9P8apVWjC78m+siYlkvv46WR9+iPcVl+M7cSIurVpVud5ZXk7Zli2UrF1LyZpYyvfuxfuqsTR97bUT/BYIIYSoN7ZSeL1pQ7fimGfSwOR+8nL1aMqUKQwYMKDacT8/2Xf8RCR4FUI0SqqqsvzQcj7d9ikH8g8A4G32ZlL7SUxoMwE3oxultlIS8hM4kH+g8rErexfJhclMWjyJa1tfy0NdH8LD5FGn+yfkJ7D80HKWHVpGXG5c5blWPu3p5jMaV1tXEvZZeGxTKoVlSRSV2ykst1FssaP+K/9PU28Xvrm5B+2aegFNuSTiUpYn/MXB7CSMdlcUixG1VI+tSKWooJxh/XvQPDqCGdvX8fuepaRYdqJzO4TdkM8RQz64w9FVogo6Wvq2pGNAB9r5t6NHSA+ivKuv4VRVFbW0VBtltFhQLRacFguqxYpqtWA/kk357t2VD0dB1fS+itGIx8UX433lGDz6968yshpw99343ngjeTHTyZ02DWtyMulPP03W229r9yit+dN1l/bt8Rw+HM8RIzA3P/W1p3ofH7wvvxzvyy/HUVxCwR9zyZs+HeuBBPJn/kz+zJ9x69EDn/HjsGVlUbp2LaWbt6Baqm77Y886UsMdhBBCiPrXsWNHhg0b1tDNOOdI8CqEaBRUVeVw0WF25+xmV/YuYtNjic+LB8DT6MlN7W/ihrY3VAlE3YxudAzsSMfAjpXHCq2FvL/pfX6L/42f9/3MypSVTOkzhYFhA0/aBqfqZMeRHaw4tILlh5ZzqOhQ5TkdRtxs3SjI7MnmvU3ZDEDSCevrkH+ISH8XhvVqj7uzFPAC4EDSYRbN2F1RqqTadTn74mlnVri7z2AmtO/OvOVrSLPr2Fh4gP0lO0FfTv/wztzSbQCdgtrhYnA57v1Vm43SzZspWr6C4hUrsKWmnvQ9qGQ04tKqFS7t2+PauROew4ah9645MZLew4OAu+7E9/rryZ85g5xvv8ORl3fsfGAAxqZNMYWGYmzaFGNYM9z798cUFlr7Np2E3sMdv4kT8Z0wgdL1G8ibPp2i5csp3biR0o0bq5Q1BAXh3rcv7n374Na7N8agoHprhxBCiJMwummjnY2F0a2hWyBqSYJXIcRpU1X1pMkFHE4HxbZiim3FFFmLKLIWkVeeR1xuHLtzdrM7ZzcFlqqjfe5Gd25oewM3tb8JL5NXrdriZfLixb4vMjJqJC/Gvsjh4sPcu/xeRkaN5KmeT+Hnok3HsTgspBSmkFyYTFJhEgcLDrImdc2xvUcBk86Ej9KOQynR2IvbUeDQphQZdApNPfW0DvGiW/MgwnxdsZSWsOyv1TisFkx2CwO2r6HT/p0AxG9uyb577iMiTFvD4u/rjUGvx8/HCx9vD3y8PPHx8sDXbMT7z/nw4tMk2u249eqF/x13cMOVIyre3+Enfa8dRUWUrFpF0Yq/KP7nH5yFhdULGY3oTCYUsxnFbEZnMqHz8sKlbVtc2rfHpX17zK1aojOd+t6neg93/G+/Hd+JEynfuxe9nx/Gpk3Rmc2nXFddKYqCe+9euPfuhS0jg7yff6Z4+QqMYWG49+mDe7++mKKitPdRVWHzNPAcC66+Z62NQghxQVOUsz5NV5wfJHgVQpwSp+okuTCZHUd2aI/sHcTnxaOiolf0GHQGDIoBvU77XoeOEnsJJbbqI4z/ZdQZae3bmvYB7Wnv354hzYbUOZtwrya9mD1mNp9t+4wf9vzAn4l/sjZtLe3825FcmExacRoq1ff2dDe4M6jZIPo1GUzMChPrE8tRUIk2FOJrzMBLZ8FDsaGzwojInowcFA1AZnYuqxcX4l2Yz8WxywjI0zIIqkDLpHjUl58lrzwfn/HjCfD15q1n70VXEYSqqkrxihVkPPca9rR0rSGKQun69ZSuX4+5XVsC7rgDzxEjqq0DVR0Oynfv1rLvrl1L6ZYtYLNVntf7+eExeDCeQ4fg1rMnOnf3GteS1iedmxtu3brVb6VxC2DXbxDSETpdB14nT7BkDAkh6MEHCXrwweonnU5Y8hys+xS2zYBJC0F/ZhJ9CSGEEOL0SfAqxDkoNi2WFYdW0K9pP/qF9sOkP/URstpyOB3szN7J2rS1bM/ezs4jOym0Hmc0Dy2wPZrMqCZmvRlPk2flo4VPC9r7t6d9QHta+bTCWI/Bg6vBlXs73s/gJkN4bfOrxOfFE5sWW3nehBlPpy9uNm/M5R54lgVyTc8x9LqoB7d+v5GdqQXocdLdlElTfQk6RcHL0x0f7wB8vTwJCTq2H6uftycPRPhimfo/1LIy9D4+NHnjdQz+/qS/8CKWvXvJmPICBb/PJeTFFyqTCFkPHybz1dcoXrkSAEPTJoQ8+ywubduS+7//kffLLCx79pL68CMYw8Pxv/VW3Hp0p3TDBi1gXb++2uiqKToaz6FD8BgyFNfOnc5KsHpG5R+ChU/A/j+157t+g+UvQ/RQ6DIRWo8G4/GnTtfIYYc/7oft07Xn7cdK4CqEEKJOvvvuOxYtWlTt+JgxY2q8ZtWqVZSXl1c73qlTJzp16lSv7TufNPrgddasWXz66ads374dq9VKixYtuP7663n44YcxGk/tD42SkhI++ugjfvvtN/bv309ZWRn+/v50796dO++8kyuuuOIMvQoh6s/ipMU89c9T2FU7P+/7GQ+jB0PDh3JJ5CX0adKnXoK/3PJc1qSuYVXqKmLTYqtN5zXrzbT3b0/HgI50CuxEO/92mPVmHKoDu9OO3Wmv/N6hOnAzuFUGq2ci0HY4HKRn5ZCacYTUjCPk5BeSX1BEXkERZeUWhg/oyc+jf2ZR0iJyiwr5a9Eu3GxemJyuKBybgqsAh3OtvPX5GlJyy/BzM/Jov0D6tOyGn4+2lYxOp6t+/+ISsl56ifJ58wBw69mTpu+8jTE4GICoWb+Q+9NPHPnoY8q2bCHxqnH433IzOnd3sr/4UkseZDTiP2kSAZPvQuemrb0Jfvpp/CdPJi9mOnk//ojt0CEyXnyx2v11np649+qFW7gJj+AyTF0GQkQ/8GlWrWy9sFshYwekrIeyPOh6E/j8d6fYeuCwwdpP4e+3tMyUOiN0vRGy9sKhtXBgmfZw8YYO46HL9RDa9eTbHdjK4ddbYd8CUPQw5lPoMqH+298ISB8qhBBn3ueff37c44MHD67xmo8++ui4x1944QUJXk9AUVW1+ry5RuKhhx5i6tSpGAwGhg4dioeHBytWrCA/P5/+/fuzZMkSXF1dT14RkJOTw8CBA9mzZw8eHh707dsXHx8fDhw4wJYtWwB44IEHmDp16im1sbCwEG9vbwoKCvDyqt2aPCHqal7CPJ5b8xzNDzsYlRHMvG5OEjm2wbWnyZOhzYYyqNkg3AxuKCgoSsWj4j8Aq9OKzWHD6rRidVixOW1YHVayy7KJTYtlV/auKlNqvUxe9G3al67BXekU2IlWvq3O2D6qAA6nyoGsYnQK+Lqb8HE1YtDrtOm1xSVkLFqMobAQL1cX8orKSMnMY/OuA9gcCjZVx94m0ZT6+eCnK8dLsdKveweuuexiACxWG8tWbcTD3RV3N1fc3Vy0r64uJORZufOnreSX2ojwd+P7ST2JCjjxmpyy7dtJffwJbIcOgV5P4P334X/HHccd7bSlp5Px2msUL1te5bhb796ETHkec/PmNd7HWVpK/q+/kjPte+zZ2bhddBHuffvg3qcPLi2jUP58BHbOqnqRTwREDoDI/tqjrsFsSTakbNCC1ZQNkLYF7P/6tNjoBoOegN73gqEWH05kx8PBleARDP4twK959ZHT5LUw/2E4sld7HtEPRr8PQW205zkJsH2GNt238Ni+v4R2h4GPQatLjx/ElhfCzImQtAr0Zrj6e2gz6lTejeNqjH2B9KFCiIZWXl5OYmIiUVFRuLic4gwZcd442e/BqfQFjTZ4/f333xk7diweHh78/fffdO3aFYDs7GyGDh3Kzp07efTRR3n33XdrVd+DDz7IRx99RLdu3ViyZEmVPZQWLlzImDFjsNvtrF27lt69e9e6ndLxipM5VHiIzNJMugR2Oa1R0Tnxc3gh9gXCM528HgNGiwNjeDOKX32ARexiSdISjpTV33Yfbf3a0j+0PwPCBtAxoCMG3ZmdqJFVWM4/8dn8vf8Iq+KPkF9adfqxSacSVJbHA5t+puORhBPWZVP0fNnxChZE9cXNbKBzmA/dInzpGuGDv7sZp6pWPLQ9WZ0qJOWU8OIfu7HYnXRu5sO3N3cnwOP4SYZsaWkULlxIwYKFWPZqwZWhaRNC330Xt4p/q06kaMUKMl99DdVuJ+jJJ/AaNeqkCa+OUlUVHI5jW9XkHoSfb4TMXdooYufr4Mg+SNsKqqPqxV5hENwOgtpCUDsIbAOBrcFYEcA4nZCXqNWVsevY14JDVOPqC816QVk+pKzTjgW2gdHvaYFy9YZD8hqI/eTY9N9KCng3g4AWWjBblg87f9FOufnDiFeh84TjB6NOJyT+Ddumw94/jgXVwR1gwCPQ7krQVXyQUJINP42D9G1g8oQJMyCq+h57ddHY+gLpQ4UQjYEErwIukOC1Z8+ebNy4kVdffZVnn322yrnVq1czYMAAzGYzmZmZeJ9g+4ajOnbsyK5du/jll1+4+uqrq50fMWIES5cu5f333+fhhx+udTul4xU1OZB3gC93fMnipMWoqHiaPBkeMZxLIy+lZ0hP9Lrar0P8Zd8vvLLuFXyLVD6IMeGWVwY6HTid6Dw8CH3/PdwG9Gdr1lYWJy1m+5HtOFUnqqriRPsKVH416o2YdCaMeiNGnRGT3oRJZ8LN6Eb34O70C+1HkNuxrUNUVcV+5Aj2rCPYs7KOPY5oz50lJXheeim+11yNUssMtU6nyvrEHBbvTOXv/UdIzK2676a7SY/VasWG9j51z9zLo5tn4mMtoVxvZHtAC+w6Papej6uLCVdXMx5uZvyKc/HYvRWAfyK6837HsVgMtc90e3GbID6eeBFupqrBuj03l8JFiyhcsJCyzZuPnTAY8Bo1kpBnnz3hVjL/pTqdoKrVR2hVFXb8DAWHod0YCGhZcyX7l8Ds26G8ANyDtFHEyH7aOUsRHFqvjTAmr4HULdWDWQBFB75RWjB6JA6sxce/V0BraNZTC1jDe2tB5tFsvdtnaomPSitmAXSeAMNfAY9AbervnrkQ+7EWNGo31QJcWylkH4D/TEuv1PVmGPYiuNVyw/biLFj7CWz89tjr8G8B/R+BiL4QczXkxGsB8Q2/QdOLaldvLTS2vkD6UCFEYyDBq4D6DV4b5ZrX1NRUNlbsyTdx4sRq5/v370+zZs1ISUlh4cKFTJhw8rVKtf0fJiAg4NQaKxpEobWQ3+N/p9ReSqRXJBFeEUR4ReB2nH26CiwF7M3dy56cPezO3k1cbhx+Ln7c3P5mhoYPRadUX8N4Ovbl7uPLHV+yNHlp5TEfsw/5lnxmx89mdvxs/F38GRE5glFRo+gU2OmEbfhxz4+8vfFtzFaVt+b74JaXgyk6mmaffUr6s89RumkTKZPvJuiJx+l68810Cz69DK/OkhIs++PJ27cSy759WPbvp3z//uNvufIvpZs2kfv99wQ++ABeo0ejHGdtqKqqrN60i4W7MliSWE6O9d9lVILNTq7p15qBrQLp0syH6XMWYUAlcsVS/NYuBsAa0Zz8m66jZ4CFprp8vOxH0BWlQ1EyFKajluSQ26wjWUtTGZi8iYH6fPbc9QxrrW5sT8mn1OpApwOdoqBXFBSl4nudwtA2QTwyvBUG/bF2WVNSyHzrLYr/WgmOiuBPUXDr0QOv0aPxHDEcg++pb7FyvPcHSzHMe0BLSASw4hVo0hk6Xg3trwLvij1RnU74521Y+SagQlgPuOYH8Gp6rC6zJ7Qcpj1AC2YzdmrrRSsfe6AsF3L/NZKtN2sjsyEdILhjxdf2NW8joyjaetFWl2hJlDZ/r03n3bdQW4O6dx4UpGhlDS5agqXe92qjrKAFvyXZkHPg2KMsT7s2vNepvakeQTD8Zej3EGz4CtZ9rtU39x60Fc2qNvp84xwIbHVqdZ9DpA8VQghxvmqUI6/z58/n8ssvx8/Pj5ycnOOWueqqq5gzZw6PP/44b7/99knrnDJlCq+88soJpzwFBAQQFxdXq0+hj5JPjc+uQmshP+35iZ/2/ESRraja+SDXICK8Iwj3DKfEVsLunN2kFKXUWF8Lnxbc0fEOLom85KQjoaqqYlftGBTDcad47s3Zy5c7vmT5oWPrGYdHDOeuTnfRwqcFmzM382fSnyxNXlolAZK/iz8tfFvQ3Ls5Ud5RRHlH0dy7OYGugXy36zs+3PIhilPlo6VhBG9JRu/nR+QvP2MKC0O1Wkl/+WUKftWCHe/x42gyZUqNo5+qw4EtNbVy5NRWOXqqjaDa0tKwpdTwful0GAICMAQFVTwCMQQGYggKwllSQs633+I4UjHyFhlF4bhryAhvjtFoZNTQPpRa7czckMLHf2wmPOcwHbIP0qIglTJ3d8rCwgjo3Iaorh0Z2PvYaJg1OZnURx6lfPduAHyvn0hQ50J0W7464c8KoMTagtSVJhy5+eg8PGj61pt4XnzxSa/793uV++OPHJn6EWpZGQAu7dvjddlleI0aWZmMqd4c2adN/83ep03/De8Nh9b9a7RU0dZ9dhwH+xfD/oqshj1uh0veqN1a0/9SVW208sheLWAMbKuNVOpP43PNw5tg/kNaoHyUWwD0vBN63AbuZzG4sRTBpu+0qcolWeDfUgtcz0Aiq8bUF0gfKoRoLGTkVcAFMPKamJgIQHh4zdkrmzVrVqXsyTz55JNs2LCBxYsXExERQb9+/SqTTWzevJl+/frx7bffnrTTtVgsWCzHpjcWnmQ0StSPAksBP+3VgtZimzYd8OgWK4eKDpFcmExueS5ZZVlklWWxMWNjletDPUJp79+edv7taOvXls1Zm5m+dzoH8g/w5Kon+Wz7Z9zW4TYui76sMhFRXnkeO7N3ao8j2tejW8QYdAaMOuOxr4qBrLIsABQULom8hDs73UlL32NTPns26UnPJj15ptczrEtbx5+Jf7IiZQU55TnkpOewPn19lTa7GdwotZcC8M6ujgRv2YZiMhH26SeYwsK0e5lMNHnlFVxatiTzrbcp+PU3rElJhH30ETp3dyz74ynfu4fyvXsp37MHy779qMdJy/5fhsBAzK1aYW7dmryQcJZZPFlYYKYMPaqqpXJSVXAWqyiFDpqWZZLZ/25G7F/DpfvX4JqUiNd7b1EY2JQdXfuSt/0gqf/E0iIjgR/yUzCozqo3TNgIf4Pe25tDnTrh2rEjOk9Psj/+GGdpKXpvb5q8/Dye2f+DLdoILBH9wCtU2+vTs+Lh1RSsJTBnMu7FB4ga5kPqrg6U7TnI4Xu1REqBDz5wbL1oDcr37yf9+ecp375D+1n07EnI889hblnDFF6nA0qOQGEqFKZVPCq+dw/UMuGeKAvuzl/hjwfAVqK9jvHTIKKPNiK553ft/KG1kLxae4A2QnrZB3DR9Sf9edZIUcAzWHvUl7DucMdK2PStNvrafix0uvbYutqzyewJ/R7UAufEVdpIrkvtA6tzlfShQgghzleNMngtKtJG1Nzda87y6eHhAdS+43N3d2fevHk888wzvPfeeyxevLjynL+/P8OGDSM0NPSk9bzxxhu89NJLtbqnOH01Ba2TO09meMTwKtNtCywFHCo8RFJhEsmFybgYXGjn3452fu3wcfGpUm/f0L7c3P5mZuydwY97fyS5MJkpsVP4YvsXdArsdNIR26PbwfybTtFxaeSl3NnpTqJ9oiuPq6oKTmfl2kajzsiAsAEMCBtAub2cuNw4kgqTOFhwkMSCRJIKkkgpSqkMXN/IHkz4gmUANH3zDdwuqrpOT1EU/G6+GVNUFKmPPErZps0kjLgEZ3k52Ku2EUAxmzGEBGMMPDp6GnRsNDU4CHOLFhSZPfhjexq/bj7Mzj0FgApUD3o7Kwd4zfgdHXRJ7DRG8kire5gRMYir9//FmIOrCTuSRtjiXwH4dxojfXAw7j164NqxA7a0NMq276B8zx4cBQWUrFpFyapVlWXdunen6ZRHMC6/T0sgZHCBsV9oQVFN7lwJP9+AMXUTER1jyQy9hLyl28n5+msKFszHvXcf3Hv1xK1XL4whIcd+VlYr2V9+RfZXX4HNhs7Dg6AnHsdn/Pjq03wtRdra1M3fa1NwndXf60rrPgO/aC2I63S1ll0XtC1nljyrTXEFiBoI477Vpr+CNkrZ43btkZ+iTSfe9Zt2rys/h6Zdar5nQ9IboNdd2qMxMLpCqxEN3YqzRvpQIYQQ56tGGbyeCenp6YwZM4YdO3bw6quvMmHCBIKCgtizZw/PPfccL730Er///jurVq3C09OzxnqefvppHnnkkcrnhYWFlZ9gi/r1Z+KfvLLuFYqs2h9iLX1bMrnTZIZFDDvuGlFvszcdAzvSMbBjrer3MnlxV+e7uLHdjfyy7xe+3/09aSVppJWkVZaJ9IqkU2AnOgR0oFNAJ8I8w7A77dictsqvR7/3c/EjxD2kyj3s2dkcfuBBynfvxr13bzyGDMFjyODKKacuBhe6BHWhS1CXKtdZHVZSilLQb9hB2dvPA2hrSUfVvKWHx8CBRP48k5S779G2bQH0Pj64tGuLuW1bXNq1w6VtO0wR4cfdxqWwzMq6pDx++zORZXuysDq00VEFlRBdCc0MRZhxoABulPJCqzhCDvyMUrGlTkddEn+6PMvi8NuY3uYGnjs8jIs3zGPwoc0UePlj7tqNVsP649GzJ8bQptWmXqtWK+X79lO2cwfl23dgSUrEc+jF+I+8COWXa6E4U0tKNGEmhJ1kXa9XE7hlASx4BGVbDCH+f+J63cVkzDuEPS2dgtmzKZg9GwBjRDjuPXvh0qEDeT/9iCX+gPZ+Dh1KyAtTqk8PztoLG7/RkhT9O7mRojs2+uvVVBsV9gzRps/una+tK135uvYI6wEdxmmB6OGKWQIDHoUhzx7LjPtfPs2g/0PaQ4izTPpQIYQQjUGjDF6PdnwlJSU1liku1v5orO0amZtvvpmNGzfy9ttv8/jjj1ce79GjB/Pnz6dbt25s376dd99994SfCpvNZszm2mcuFaeu2FrM6+tfZ97BeYA20npPl3u4OPziek+uBOBmdOOWDrdwXZvrmH9wPrnluXTw70D7gPZ4m+s+xdCSkEDKnXdhS00FoPjvvyn++294UVs7eTSQdWnXDux2HMXFOIuKcBQW4SwuIuDIETJeeh0cDrzHjMF/8uSqN3A6tYzD/2KOjqb573Mo370bY2gohiZNqgWJFquNuKQMNsZnsuNwPgeyy0gtdlBg1wHHyjZ1VwiwZhGmL8LTpCck0I+wkEAucm4lKu4z9Acq1tJ1ug763gfLX8EQv5jRmV8wOmwLPPA5Nt+x5JVY6expPulWMIrJhGvHDrh27ABHk8zsmQs/XA72MghqDxN/rv16RaMLjPkUQjrB4mfwVpfjOakLpS2epXRXAiXrN1C+eze25EPkJx+CWdoeqXo/P0Kefw7PSy891ma7FeLma1lsj07bBW0NZY/boe1l4BFS81pRSzHELdBGag/+pQWsR4NWF28Y+xW0vrR2r0uIk5A+VAghxPmqUQavkZGRAKTUlDjmX+eOlj2R1NRUli7VMr8eL6ui0Whk/Pjx7Ny5k2XLlsmUpga0NWsrT696mtTiVHSKjjs73cmdne6sXId6JrkYXBjfany91FWyfgOH778fZ2EhxvBwQp5/nvLduyn+6y/KduygfPduynfvJvuTT1CMRlSbrca63Lp3J+SVl48FUg4bzL0P4pfA+G8hemiV8jo3N9x69AC0KcuqqlZeO+XHFfy8uwhLtf/1tdG+QHcjl3UJZXy3MLwVC4XFJYQE+uPj7YnuyD5Y8Oix4C2gtban59F9Mif+DNti4M+n4PAG+KI/xmEvEtTzzuprPZ1OKM3RRlMdFu256tCmwzod2veH1sPfb2rlW46A8d9paxhPhaJA78kQ1AZm3YIuexse+Xfj0fN2mPwpDtWF0k2bKF2/gbLt2zG3akXgQw8eyx5cXqBNC173ORSlV9SphzajtKA1alDN61j/zewBna/VHkWZFdN/fwWjG4z5BHwjT+11CXEC0ocKIYQ4XzXK4PWiijV9OTk5lZmp/mvTpk0AlRuvn8ihiimUUPOnzEeTTOTm5p5ye8XpszltfLH9C77Z+Q1O1UmoRyhvDHiDi4Lqbx/Gs6Vg7lzSnnsebDZcu3Qh7LNPMfj54TGgPwGT78KenU3x339T9NdflKyJrcxkC1rgqfP0RO/lic7TC1PzKIIefRTd0ezBDhv8eivs/UN7/vNNcNtibSsToKS0jLTMbFLSskg6nE5iShr33Tye4EA/NiTmErOnDAcGFFT8TCrNvAy0DvagczM/erUKoXkTv+ojpOUFsPR5LYBz2sHgCoOegD73Vc1wqyhw0Q1aQPfHfXBwJSx6UmtrYBstUC1Kh6IM7fsTrRH9t16TYcRrp5cBt/lguOMvmH2HNuIZ+zFs/BZ9j9vx7PcgnkOGVC1flKG93k3fgaViTaB7EHS7RXt4n3xtX408g6HPPdpDiDNA+lAhhBDnq0YZvIaFhdGjRw82btzI9OnTj7vBekpKCmazmVEnWAN41L+TSKxfv57hw4dXK7Nu3TqA43by4sxwOB3YnDZSi1N5fs3z7MzWtta4IvoKnu75NB4mjwZu4alRVZXszz4j++NPAPC89FKavvkGuv+kBDcEBOAzbhw+48bhtFiwHzmCzt0dvafnibPg/jtw1Zu0kc/Mndh+uIqfg5/mQLaDgqLiapclpqThNLlyT8xmHCqMaBvI1AndcDWdeGsgnA7Y8j9Y8RqUVmyB02okjHwLfCNqvs6nGdwwR8s2u3QKJK/RHtUo4OavJdNRdNpaT51BG9nU6cFghq43Q7ebT9zO2vKLgtuWQvxSbd1p2laI/UibCtzzDuj7gLbnaexH2npWh1W7LqA19HsAOl5Tt+1ohDjLpA8VQghxvmqUwSvAM888w9ixY3nzzTcZOXJk5afDOTk53HOPNmJx3333VUnLP2fOHJ5++mlCQ0NZvvzYXpvh4eGVHfmDDz7IwoULq0yV+umnn/j555+B42/oLuouuTCZBQcXsOzQMnLLcrE6rdiddqwOK47K/Ss1niZPpvSewqVRZ2btn6OoiNLNmynbtAmduzveY8dWyTR7MuX79mNLPYzOzR2duzs6D+2r3t0djEYyprxAwe+/A+B/x+0EPvxw9Qy1/6Ezmyu3vTlh220WLNNvxC1xMQ7FQMbgDwjtPhq+GY4xJ57BB99kJ7eCYsbPx4vQkEAiw5oQFd6EgAB/rv92I9nFVto28eLDCV1PHrge/BsWP6Nl9wUIaAWXvA4tq//RevwXptMCwuihsOUHLdj2DK7Y0iZEWx/qEQT6Mz8dvApF0bLOthyu7ZW68g1I3wZrPoT1X4K9HCoSUNGsF/R7CFpdWm1tsRCNnfShQgghzkeNNni98soreeCBB/joo4/o3bs3F198Me7u7ixfvpz8/Hz69evHK6+8UuWagoIC9u3bR/lx9rH87rvvGDJkCHv37qVt27b07t2bgIAA9u7dy+7duwG44YYbuP7609gzUQCQU5bDoqRFLDi4oHI09WT6NOnDy/1erpat93Q4S0oo3bKF0vXrK5Pz4Dy2v+iRTz7F8+KL8b3+etx69jhuQiF7bi6F8+eTP3sOlri4k99UrydkyhR8r73mtNputdlJSknjUFomiUnJ9Dr4IZ2cO7GjZ5p6LSGWSEJdfeH6WTi/uZiw0nSmNFuDfuIMXFzdKutRVZVHZ21nx+ECfN2MfHVjN9xMJ/jfPidBGy2Nm689d/GBwU9Dj9vqFmj6R8PwRrj+TVG0BEmtLoH9i+Cv1yFD29OVViO1jL7hvRu0iUKcDulDhRBCnI8abfAKMHXqVPr168enn35KbGwsNpuN6OhonnrqKR5++GFMptpP4evQoQO7du3igw8+4M8//2Tjxo1YLBZ8fX255JJLuPXWW7nmmtMLOC5kqqqyJHkJcw/MJTYttnJUVa/o6d20N6OjRtPKtxUmvQmjzohJb8KkM2HUGyu/Hrdeux17Ti7OwgIcBRWP/GPfO0tKcJaW4iwrRS0tw1lW8SgpwZqcDI6qo7vatig9sSYlU7pxI0VLllC0ZAnmli3wnTgRr8uvQGc2UbxqFfmzZ1O88u/KvVIVoxFz69Y4y8twlpTiLC7GWVJSGRDrvLwIfe9dPAYMqPX7ZrPbScvMJj0zGx9vT9pEa9NxC4tL+PzHOehUBzfyK53YjR09y0IfIrrdaNq2rJi26xeFbsLP8L/LcE9ZCX+9ACPfrkwiNG1NErO3pKLXKXwysSvN/NyO35DUzbDxOy0brtOmTd3tcZsWuLr51fr1nHMUBVqP1EZXUzeDq68WcAtxHpA+VAghzp7du3fzxhtv8Ndff5GdnY2/vz9DhgzhmWeeoX379pXlvv/+eyZNmlT53Gw2Ex4ezogRI3j++ecJDg4mMjKS5OTkk95z2rRp3HLLLWfi5TRaiqqqakM34lxWWFiIt7c3BQUFtd5y4HyjqirvbnqXH/b8UHmsg38HLou+jEsiLyHANaBO9VoSE0m57XZsaWknL1wDY2gobr164d6rJ269elWZJly+bz95M6ZT8Mc81NJSAHTu7ihmM45/JR1x6dAB77FX4j16NHofnyr1q6qKWhEs67y80J1kCwiL1UbS4XQSklI5eCiV5MMZ2CsC7C7tW3LzeG39mVNV+eCLH7mq9Eeiijag6oyo1/yArk0N69N2/w6zKtaGXvIG9LmH2APZ3PjdBhxOlecva8dt/f+zFs1aqmW93fSttv7zqOih2hThoLYnfC1CiGOkL6gbed+EOL+Vl5dXJo5z+U8OkPPJ7NmzmTBhAn5+ftx2221ERUWRlJTEt99+S05ODjNnzmTs2LHAseD15ZdfJioqivLyclavXs2PP/5IREQEu3btYsmSJZVbmgEsXLiQGTNm8MEHHxAQcOzv6r59+9K8efOz/npP1cl+D06lL2jUI6/i3PDVjq8qA9db2t/CVS2vIsr79JJ2OMvLSX3oYS1w1enQe3mh9/ZG5+ON3tsbvbcPei8vdJ4e6Fzd0Lm6onNzRefmhuLqis7VDWNYGKawmrPCurRuRZMXXyTo0UcpmPM7edOnY01KgpIS9P7+eF9xBd5jr8SlVasa61AUBcXNjXSrwpGsMjqHmapMP3Y6neh0OrCWYi/J4ZPPvkFvK8aVcrwopyfl+BjtBLo5Cc5ZBT99B6U56EqzebQkB2wloDehXPMjyon2AW1/JeS/rE35XfwM2YZg7v3TE4dT5aquodzaL1Irp6qQc0DLorstRsskDNqa1HZXaqOtzXrVbvsXIYQQQogLXEJCAjfeeCPNmzfnn3/+ITAwsPLcgw8+yIABA7jxxhvZsWNHlUBz5MiRdO/eHYDbb78df39/3n//febOnVttW7KMjAxmzJjBlVdeWastzs5nEryK0xKzN4ZPtmnZdZ/s8SQ3tLuhXurNfO01LPv2offzI2rOHIzBQfVS7/HoPT3xu+lGfG+4nrLNm3Farbj37IlirN0azzUHsrnrx80UW+y0DfHksjZehOmLSTyUigE797kvhLj5GIBHj1eBDSioePyXyRPGfaOtzzyZvg9AbiJsnobPgjtYprphdnXiHg/Kq3Zta5r/JMnCJwK636ptceNetxFyIYQQQogL1TvvvENpaSlfffVVlcAVICAggC+//JJBgwbx9ttv88UXX9RYz9ChQ3n//fdJTEw8000+p0nwKursj4Q/eHPDmwDc0/meegtc83//nfxZv4KiEPruO2c0cP03RafDrUePU7rm962pPDZrO3anNvt+b0YRezOKcMFOC0MJrxm+AvZW3ECPavYCV28UF28we4GLt5YUyd1f2zbmvw/PJmCqYZ1qtRegUHTxG8Tv3kvX8nX4K0Va4lxbtRcKLS/RRlmjL5ZMukIIIYQQdTRv3jwiIyMZUEPOk4EDBxIZGcmCBQtOWE9CQgIA/v7+9d7G84kEr6JOlicv5/k1zwNwQ9sbmNx5cr3Ua4mPJ+OllwEIuPde3Pv2rZd664vVZiP5cAbREaF8vSqRN/7UMhCH6ovoaMzmkN2LRKcPZU49NzKXzuzFppg4dOk0jNFDcDUbcDXpcTHoMOjrN2hMzS/jtu83Epd/P+2MV/PW2PZ0DA+o2D/VqGUL1hnA6Fb7gFgIIYQQop6pqkqZvayhm1HJ1eB63F0nTqagoIC0tDTGjBlzwnKdOnXijz/+oKioqMq12dnZlJeXs2bNGl5++WVcXV257LLLTrkdFxIJXsUpW5u2lsf/eRyn6mRM9Bge7/F4nf6H/y9nSQmHH3oYtawM9759CLi7fgLi02G320k6nEF8YgrxiSkcSs3E7nBibtefn7dkAHB5G296eOlpFdmBFpFheHh6kDzzcVonrMShKtxrvZclcxRgZZW6TXodLkYdob5uPHhxSy5pH1zn93HH4Xxu+98mjhRZCPR04a2br6ZjmPfJLxRCCCGEOMvK7GX0mt6roZtRaf3E9bgZT/2D/aPBqKen5wnLHT1fWFhYeWzYsGFVykRERBATE0NoaM35WoQEr+IUbcvaxoN/PYjNaWNY+DBe7PsiOuX0RxBVVSX9xZewJiRgCAyk6TvvoOj19dDiqtLyy3h2zk4UReGZUW1oEXT8f2ziE1NYtnojiYfSsNmPrRN1qAo71FCSKgLX50a35fYB/8nytvpDWid8B0Bi3zdxpHfDPyWfMpuDMpuDo/m9rQ4nVoeTwvRCJv+0mQEtA3jh8nY1tqkmi3Zl8NDPWym3OWkT4sm3t/Qg1Mf1lOoQQgghhBCn5mhQ+u8R1eM5XpD76aef0qpVKwwGA8HBwbRu3VpL8ilOSIJXUSvZZdnMPTCXb3d9S5m9jD5N+vDWwLcw6OrnVyh/1iwK580DnY7Q99/DcAbm+6/cl8XDP28jr1RbBLoq/gg39AxlRISJrMwjdGwbTfNw7dMum93O/oMpAHh6uNEyqhlNmoTw+eYCkg4XY9QrvHdNF67o3LTqTbb8AMte0L4f8Sot+k7m23+dVlUVi91JeUUgW2Jx8PvWVL765yCr4rO59MNV3Nw3kgeHtcTL5cQJo1RV5etVB3njzzhUFQa1CuSTiRfheZLrhBBCCCEakqvBlfUT1zd0Myq5Gur2ob+3tzdNmjRhx44dJyy3Y8cOQkNDq2wD07Nnz8psw6L2JHgVNXI4HcSmxTI7fjYrU1ZiV+0AdAnswodDPsSkr/0G9ydSvncvma++BkDgQw+dctKkk3E4VT5ctp9P/jqAqkKEpw5XtZy4YhPT1h7m53U2OhuPYDAYKoPX5uGhTBjSnpamLGzpOyk5OIPAHXvpozrYaW5F085DCfd0A4sXmD20G+35A+Y9qH3f/2Hoe3+1tiiKgotRj4tRj0/Fsccuac3V3cN4dcFelu7J5NvViczdlsoTl7RhfLcwdLqqU4kdTpUSq503/4xj+vpDANzYO4IXLm9X7+tohRBCCCHqm6IodZqm2xhddtllfP3116xevZr+/ftXO79q1SqSkpK46667GqB15x8JXkUVdqedjJIM5iXMY86BOaSXpFee6xTYiXEtxzG6+WjMenOd7+EoLsaakIDlwAEsBxIoXLQI1WrFfdBA/G+/rbJcuc3BO4v3EehpZlK/SMyGU59GnFVUzoMztrH2YA4AXfUpfGF7myByKDebyMWDfNWTfNUd40YfSvOicLPlYzi8iZ4lmdUrVKA3O2HHTtgxFRQ9NOkMTbvA1p9AdULXm+DiF06pnRH+7nx9U3f+2X+El+btJuFICU/8toOP/4rHbNBTZnVQYrVTanVgtTuPNUeB50a349Z+kfWy7lgIIYQQQtTe448/zk8//cRdd93FP//8UyVbcG5uLpMnT8bNzY3HH3+8AVt5/pDg9QKUWZLJR1s/4kjpEYptxdrDqn39b+Y3L5MXl0dfzlUtr6KVb6tTvpfTaqVs82ZK1qyhPG4floQE7Onp1coZmjah6Ztvovxrrv+rC/bw0zptZPHXzYd546qO9Ij0O+k98wuL2ROfyJ9bEvn9kJ4iG7iZ9Lx5RUu6LnqGILsWyLooVpqSS1MlV7uwHNgVq7UHbX3rfrUZO2mBLeQimnceQLeoAEyp6+HQOkheC4WHIW2L9gBoezlc9qEWVdbBwFaBLHpoIP+LTWLqsnhScmvOxOfvbuKNqzoyon1Ine4lhBBCCCFOT8uWLfnf//7H9ddfT8eOHbntttuIiooiKSmJb7/9luzsbGbMmEF0dHRDN/W8IMHrBejLHV/yR8IfNZ5XUOge0p1xLccxLGLYKY+yWg8fpmTVKor/WUXJ+vWopaXVyhgCAzG1iMbcoiXm6Gg8LxmBwde38vyCHemVgauvm5EDWcVc/cVaJvYK58lL2+Dtemxdp1NVSUnLZM/+RHbvT2R/Wh7JDi/22bX6WgV58Nn1XWmx+hGwJ2n7qt78B5g9oSwfyvJITU9jwfo9FOVlUYoLO9XmeEV149KuLRjZPrjqOtKmHaHH7dr3+SlaIHsoFgyuMOwFbWua02DU67h9QHOu6hrG9sP5uBj0uJm0h6tJj5vJgJtJj9mgk9FWIYQQQogGdvXVV9OmTRveeOONyoDV39+fIUOG8Mwzz9ChQ4eGbuJ5Q1HVo7lPRV0UFhbi7e1NQUFBlUXYjVWZvYyhvwyl2FbMAxc9QAufFniYPPAwVjwqvjfqTy3pj6O4mJwvvqBo+QqsiYlVzukDA/DoPwDXLl0wt2yBuXlz9D4+NdZ1KKeU0R+toshi5+7B0dw1sDlv/hnHzI1aAqVATzMvXdGekR1CUBSFz374jS0JmaQ5PEhzuJOvulTWNbKtP+9e1w33DR/B8pe1ab43zoHmg6rd1+lUWbQ7g7xSK8PbBRPk6VKtjBBCHM+51hc0FvK+CXF+Ky8vJzExkaioKFxc5O+qC9XJfg9OpS+QkdcLzLLkZRTbign1COW2jrfVyzY31sOHSZk8GeuBBO2AXo/rRV3wGDAQj4EDMLduXWU68Anrsju5b8YWiix2ukX48sjwVhj1Ot4c14kxXUJ5evYOknJKuSdmC8PaBtOuqRezkt1It0RU1qFToEekHxN7hXNF56Yoe+dpgSvAqHeOG7gC6HQKozo2Oa33QgghhBBCCHFmSPB6gZkdPxuAsS3G1kvgWrp5M4fvux9HXh6GwECCn34K9/790dfxE/S3FsWx43AB3q5GPppwEcaK7Llpmdkc2rOdbpZ9mA2eHHD4sWxvJsv2akmVDDqFvi0CGNkhhOHtggnwqJjqnL4d5lRkd+t5F/S47Xi3FUIIIYQQQjRyErxeQA4VHmJT5iYUFMa0GHPa9RXMnUv6c8+j2myY27Wl2WefYQype/Kgo9vEALx3dWeaeLuwMy6Bf9Zv40DS4cpyw8MUXg3LhLh52NGjtBlJ2/5j8fb5TzKnogyYMQFspRA9FC55vc5tE0IIIYQQQjQsCV4vIL8f+B2AvqF9CXGve5CpOp0c+XAqOV99BYDn8OE0fetNdG51368rNb+Mx2ZtB+C2/lH0ifTi7c9/IvOIlglYpyh0bxnECM99+B34FmVT8rGLNy2FrU9B5ABoPRJajwI3P5h5PRSmgn9LGD8N9PLrLoQQQgghxLlK/pq/QNidduYemAtoU4bryllaStqTT1K0dBkA/nfdReCDD9R6Tevx2BxOHpixlYIyG53CvHny0jYY9Qpuri64mIxc3tpAN+sazPELwWHVLnLxhi7Xa5l94xZCbgIkLNceCx8Dj2AoztQyC0/8GVx96tw+IYQQQgghRMOT4PUCEZsWS1ZZFj5mH4Y0G1KraxxFRdjS0rRHejr29HSKV/6NJT4exWikyWuv4n3FFafVLodT5e1Fe9mcnIdJUXl7bDtMBi0QnjiiO34LbkO3Y/OxC5p21dattr8KTBUjvcNfgex42LcA9v0JKRu0wFVngGt/BH/ZV0sIIYQQQohznQSvF4g58XMAuKz5ZZj0phrLle3cRearr2JJSMBZXHzcMno/P8I++QS3rhedcjtsDie70wpZfzCH1fsz2ZScT5ld262pizGDlIQDtAntCUDAutcgbbO2f2rHcdD9NgjtWr1SRYHAVtqj/8NQfEQbgfWJgIg+p9xGIYQQQgghROMjwesFIKcsh5UpKwEY27LmKcNlu3dz6LbbcBYWVh7T+/pibNIEQ9MmGJs2xdi0KV4jR2IMDq71/W0OJz+sTWblviw2J+dRanVUOW/ASSf3Eu6/pAe9LmqvHdz9O+ycBYoObpkPYd1rfT88AqHzdbUvL4QQQgghhGj0JHi9AMw/OB+7aqeDfwda+baqdl5VVfau3oztwbsxlRaz2y+SXwbfyJBBnRjXpwVBXnXfVLrc5uDemC0sj8uqPGbEgb+ujABdOV3DvRnTtx2d20Zj0Ou1AsVZMP9h7fv+j5xa4CqEEEIIIYQ4L0nwep5TVbVyyvC/R11VVWX74QL+3JXOjn+28OCCD/C2lrDXN4IpfW6n1OnChr+SeffvQwxtE8SEns0Y1CoIvU6p9b2LLXZu+jqWLYeLMBt0PDqiFf1bBLJ3+xZcTP706tqBAF/v/zYY5j0IZbkQ3AEGPVkv74MQQgghhBDi3CbB63luR/YOEgoScNG7MDJqJABxGYXc8cMmUnLLCC/M4K3Vn+NtLSGjSXN0r7zPX50iWBWfzcwNh9iUnMfSPZks3ZNJiJcL13QP4/reEQSfYDRWVVW2xCVx3y+7SC/TYcDJO2Nac0UPLXFSu6aDa27w9pmwbyHojDD2SzDUvD5XCCGEEEIIceGQ4PU8d3TUdXjEcDxNngC8+MduUnLLaFmew5vrv8bNWoKxbVsGfj8Nvbc2Ejq+Wxjju4URn1nEzI0pzN5ymIzCcj5acYAv/jnIdT2acffgaJp4u1bey6mq7N53kLkrN/FLspFC1YwJB7e1N9E3OuDkjS04DH9WjLQOeRpCOtTvmyGEEEIIIYQ4Z9V9c07R6JXaSlmUtAg4NmV4/cEc1h3MJbw0m483fY1bSQHmNm2I/O7bysD131oGe/L8Ze1Y98zFfDzhInqHe2Cz2/lhbTKD3l7Js3N2kppfRuaRXN79IoaPZyxiRrKJQtWMl0kh5rYePHnjpQT4+Zy4saoKc+8DSwGEdoe+D9b32yGEEEIIIcQZ8dlnn6EoCr169aqxTFZWFk899RQdO3bEw8MDFxcXWrRowaRJk1i9enWVst9//z2KolQ+DAYDoaGh3HLLLaSmptaqTS+++CKKoqDT6UhJSal2vrCwEFdXVxRF4b777qs8npSUVHnfV1999bh1X3/99SiKgoeHR63aUl9k5PU8tjR5KSW2Epp5NqN7sJb06OMVBwgszeP99V+jFuRgbtmS8GnfYfD1PWFd5tx4Lt//FpdlzcESEMYfDGZqdndi1jv5ZVMKV3ZuSmEhrLKGUaYaCPNxYfodfQj3d6tdYzd9Cwf/0rbFGfsF6OVXUwghhBBCnBtiYmKIjIxkw4YNHDhwgBYtWlQ5v2HDBkaPHk1RURHXXXcdkydPxmw2k5iYyO+//87333/P33//zcCBA6tc9/LLLxMVFUV5eTnr1q3j+++/Z/Xq1ezatQsXl9olVTWbzcyYMYMnnniiyvHZs2ef8DoXFxdmzJjBc889V+V4SUkJc+fOrfX965OMvJ7HZsdrv5BXtrgSRVHYnJzL6gPZ3LZnAe4FOZiiown/ftqJA9cj++DXW+Gz3rB7NgoqLsUpXFP8I6tdHmKe9zuMVFfzx5ZEFuf5UqYaaBnkwW/39Kt94Jp7EJY8r30/7EUIaHl6L1wIIYQQQoizJDExkdjYWN5//30CAwOJiYmpcj4vL48rr7wSg8HAtm3b+P7777n33nu5/fbbee2119i1axfTp0/H1dW1Wt0jR47khhtu4Pbbb+ebb77hscceIyEhgT/++KPW7Rs1ahQzZsyodnz69OmMHj36hNft2bOH7du3Vzk+d+5crFYrw4cPr3Ub6osEr+eppIIktmRtQafoGBM9BoCPlh8gvDCDganaL2Doe+9i8Pc/fgVH9sNvt8OnvWDXb4AKbS+HO1bA2K8oDuqBgkpHy1Y+Mn3KFtd7edXwLWNCcvjlrj4nTOhURWkuzLkbbKUQOQB63lkPr14IIYQQQoizIyYmBl9fX0aPHs348eOrBa9ffPEF6enpfPjhh7Rp06ba9YqiMGHCBHr06HHSew0YMACAhISEWrdv4sSJbNu2jbi4uMpjGRkZrFixgokTJ9Z4XZ8+fYiKimL69OlVjsfExHDppZfi5+dX6zbUFwlez1NH17r2bdqXYPdgtqfk8/f+I0zcvwxFVfEcPhyX4/zPQ3kBzL4LPusFO2cBKrS5DO5aBdf+hDWoE7NSAnn+yOW8wsNs8L4C1TsMd7WEGwzLmZp/P76/XQMJK7R1rDXJT4E/n4IP2kPKOjB5wphPQSe/kkIIIYQQ4twRExPDVVddhclkYsKECcTHx7Nx48bK8/PmzcPV1ZWrrrrqtO+VlJQEgO9Jlvz928CBAwkLC6sShP788894eHiccOQVYMKECcycORO14u/67OxslixZcsKg90yShYXnqd05uwHoH9ofgI9XxFcZdQ24957jX7jwCdgxU/u+9WgY/CQ06QzAkZw8vv15PplHcgHo3G8YXYc+j6IokPQPbP4e9szV1q4e/Evbp7Xv/dBhHOiNWp1ZcbBmKuz8BZx27ViTznDJ6+AbUf9vhBBCCCGEaFRUVUUtK2voZlRSKpIW1cXmzZuJi4vj448/BqB///6EhYURExNTOZIaFxdH69atMRqNVa4tKirCYrFUPnd1dcXd3b1KmYKCArKzsykvL2f9+vW89NJLmM1mLrvsstq/PkXhuuuuY8aMGbz88svAsYDbbDaf8NqJEyfy+uuvs2bNGvr3788vv/yCi4sLV1xxBYsWLap1G+qLBK/nqX25+wBo7duaXakFLNubxVMnG3VNWFERuCpw01xoPqjyVHJqBl9P/4OS0jI8Pdy4fuwltG4efuza5oO1R14SrPsctvwImbtgzl2w/GXoPglSt8K+BceuiRoI/R+G5kOgjv9gCCGEEEKIc4taVsa+rt0auhmVWm/ZjOJWy1wt/xETE0NwcDBDhgwBtEDx2muv5aeffuK9995Dr9dTWFh43Ky8N954I3Pnzq18fu+99/LJJ59UKTNs2LAqzyMjI/npp58ICws7pXZOnDiRd999l40bN+Lr68vGjRt5/fXXT3pd+/bt6dSpEzNmzKB///5Mnz6dMWPG4FbH9+t0yRzN81CBpYD0knQAWvu1PjbqevgEo67WUpj/sPZ9r7uqBK5FxSV8/sNsSkrLCGsSxON3TawauP6bbySMfAse3gUXTwGPYChMhRWvVgSuCrS9Qls7e/M8iB4qgasQQgghhDjnOBwOZs6cyZAhQ0hMTOTAgQMcOHCAXr16kZmZyfLlywHw9PSkuLi42vUvv/wyS5cuZenSpTXe49NPP2Xp0qX8+uuvjBo1iuzs7CqjpVarlYyMjCoPh8NRrZ6LLrqINm3aMH36dGJiYggJCWHo0KG1ep0TJ05k1qxZHDhwgNjY2AabMgwy8npeOjrqGuoRyuEclcW7M3ly/zIUTjDq+vdb2qipVygMrZoO29PDnUsG9WJ/Ygq3XD0Ks8l08ka4+cGAR6HPfbDjF239rG+kNo1YsgkLIYQQQlywFFdXWm/Z3NDNqKQcJ8tvbaxYsYL09HRmzpzJzJkzq52PiYlhxIgRtGnThu3bt2Oz2apMHe7UqdNJ79GzZ0+6d9e2vLzyyivp378/EydOZN++fXh4eBAbG1s56ntUYmIikZGR1eqaOHEin3/+OZ6enlx77bXoaplrZsKECTz99NPccccd+Pv7M2LEiFpddyZI8Hoe2pu7F4A2fm34ZIWWYXjQiUZdM3ZCrDZPn9HvgdkTVVWxWK24VHyyM7hPVwb1vqjWv+SVDGboeqP2EEIIIYQQFzxFUeo8TbcxiYmJISgoiE8//bTaudmzZzNnzhy++OILLrvsMtatW8ecOXO45ppr6nw/vV7PG2+8wZAhQ/jkk0946qmn6Ny5c7WR25CQkONeP3HiRKZMmUJ6ejo//vhjre8bHh5Ov379WLlyJXfffTcGQ8OFkHW6s9PpPPUgRpw1R0deA03N+WZXOk/sO8Goq9MB8x4E1QHtxkDrkTicTn5d8BeHUjO475bxuLqYtX9kZHqvEEKcNulDhRDi3FdWVsbs2bO5+uqrGT9+fLXzTZs2ZcaMGfzxxx/cfffdfPzxxzz88MN06dKFVq1aVSmrnmiHjv8YPHgwPXv25MMPP+Shhx7C19e32rrYmkRHR/Phhx9SVlZGz549a31PgFdffZW//vqLa6+99pSuq2916j0jIiJ47bXXyMrKqu/2iHoQl6ft4bQ7yYNmBRkMOlGG4Y3fQOpmMHvBpW9htdmY9vN81m3ZRXpWDgeSDp/NpgshxHlP+lAhhDj3/fHHHxQVFXHFFVcc93zv3r0JDAwkJiYGPz8/5syZg8VioXPnzkyaNInPPvuMr7/+milTptCuXTtAG+Gsjccff5zMzEy+//77U273gw8+yFNPPXXK1w0aNIgXX3yRtm3bnvK19alOwWtqaipTpkwhPDycG2+8kXXr1tV3u0QdWRwWEvMTAVgXZ2bCiUZdCw5rmYABhr2I0yOYH39bxO79iRgNeiZdM5qObaLP8isQQojzm/ShQghx7ouJicHFxYXhw4cf97xOp2P06NEsWrSInJwc+vTpw65du7j//vvZsGEDjz32GPfffz8xMTH07NmTf/75hyeeeKJW977qqquIjo7m3XffPW5ypvOZop7KOHWFjRs38sknn/DLL79gsVhQFIWuXbty3333cd111510v6DzSWFhId7e3hQUFODl5dXQzWFPzh6unX8tZsUDt3W388WK91BQifp9TtXgVVVhxgTY/yc064U66U9mL/qH1Rt3YNDrueuGK2kReWopuIUQ4kJ1Kn2B9KHHNLY+VAhRv8rLy0lMTCQqKgoXF5eGbo5oICf7PTiVvqBOI689evTgf//7H4cPH+b111+nWbNmbN68mVtvvZWwsDCefvppDh06VJeqxWk6ut7VlfATj7ru/UMLXHVGuHwqf63dxuqNO1CA66+6RAJXIYQ4Q6QPFUIIIermtDJG+Pv789RTT5GYmMjvv//OsGHDyM3N5a233iI6OpqxY8dW7m8kzo6jmYZD0rwYWNNa1/ICWFgxLaH/w5R7N+fvdVsBuGLEALq0k61shBDiTJM+VAghhDg19ZLuUFEUrrjiChYvXkxcXBx33nknDoeDP/74gxEjRtC+fXu+/fZbnE5nfdxOnMDRkdf+G46gQ0XtP6j6qOvGb6E4A/xbwIBHcTGbefC2a7h8WH8G9+naAK0WQogLl/ShQgghRO3Ua67+5ORkvvnmG3777TdAS/scHBzM3r17ufPOO+nWrRuHD0v22jPFqTrZl7cPRVXpflB7nwOuPc5eUvv+1Mr3vgeM2rxzPx8vhvbrdtbaKoQQoirpQ4UQQogTq5fgdcmSJVxxxRW0aNGCd955h5KSEm699Va2bdtGWloaS5YsoXfv3mzfvp2HH364Pm4pjiO1KJUSWwnt0gz4lZdQYnQlYFD/qoVKcuDwRgA+iy1mZ1xCA7RUCCHEUdKHCiGEELVjqOuFhYWFTJs2jc8//5z4+HhUVSU0NJS7776bu+66C39//8qyw4YNY+jQoXTp0oUVK1bUS8NFdUf3dx0c7w5YiIvqRHeTqWqhA8sAlSxDKAn5ULw8lnYtI9Hr9We7uUIIccGSPlQIIYQ4dXUaeb377rsJCwvjkUceYf/+/fTu3ZsZM2aQlJTEM888U6XTrbyRTkf37t3Jz88/pXvNmjWLwYMH4+vri7u7O507d+btt9/GZrPVpekAzJ07lyuuuIKQkBBMJhNBQUH07duXl19+uc51NgZxuXGgqnTeWwpARuc+1QvFLwFgu705Hu6u3D7xCglchRDiLJI+VAhxoanDzpziPFKfP/86jbx++eWXmEwmJk6cyIMPPkj37t1rdd3AgQNPqfEPPfQQU6dOxWAwMHToUDw8PFixYgVPPvkk8+bNY8mSJbi6uta6PqvVyg033MCsWbNwdXWlT58+BAcHk5GRwe7du/noo4+YMmVKretrbOJy44hOB5/8csr0JhzdelUt4LDjjF+KDthLK24aN5IAX+8GaasQQlyopA8VQlwojEYjiqJQUlJySv/eiPNLaak2sGY0Gk+7rjoFr1OmTOHuu+8mODj4lK675ZZbuOWWW2pV9vfff2fq1Kl4eHjw999/07WrlgU3OzuboUOHsnr1ap5//nnefffdWt//jjvuYNasWVx55ZV8/fXXBAQEVJ5zOp1s2LDhlF5PYxOXG8ewfVo2yo3BbWga4lvlvHp4AzpLASW44tV2CC2jmjVEM4UQ4oImfagQ4kKh1+vx9vbmyJEjWCwWvLy8MBgMKIrS0E0TZ4GqqpSWlpKVlYWPj0+9zPZU1EY6jt+zZ082btzIq6++yrPPPlvl3OrVqxkwYABms5nMzEy8vU8+erh8+XKGDRtGhw4d2LJlS71E/qCtW/L29qagoAAvL696qbMucstzGTRzIB994SAkH17vcQO3PXcHg1sHVZbJ+vkhgvZOY4vSmYgH5uEvo65CCFEvGktfcJT0oUKIxkJVVQoKCsjKysLhcDR0c0QD8PHxISQkpMYPLU6lL6jTyGteXh47d+4kOjqa0NDQ45ZJTU0lISGBTp064ePjc0r1p6amsnGjlhF34sSJ1c7379+fZs2akZKSwsKFC5kwYcJJ6/z4448BbRpVfXW6jcm+3H1EZEFIPlj0RjYGt+UlP7cqZVwO/QWArvUlErgKIUQDkT5UCHEhURQFHx8fvL29cTgc2O32hm6SOIuMRmO95tepU/A6depUXnnlFdavX19jx5uens6QIUN4+eWXq33qezJbt24FwM/Pj6ioqOOW6d69OykpKWzduvWkHa/D4WD58uWAtmYoIyODmTNnsm/fPsxmMxdddBHjxo3Dw8PjlNrZmOzL3UeviinDmwNbYTGaCfX919qC/BS8SpJQFR1tR97eQK0UQgghfagQ4kKkKAoGgwGDoc6bnQhRt+B14cKFNG/e/IRJJrp3705UVBTz588/5Y43MTERgPDw8BrLNGvWrErZEzl48CDFxcUArFu3jnvuuafy+VGPP/44M2fOZOjQoafU1sYiLi+Oofu0GeBrmnYi2NMFs+Ffn3JUZBlWwnpi9j61dVZCCCHqj/ShQgghRN3UaaucpKQkWrdufdJybdq0qVXH+F9FRUUAuLu711jm6Ce8hYWFJ60vJyen8vvbbruNbt26sXHjRoqKiti2bRujRo3iyJEjjBkzhvj4+BPWZbFYKCwsrPJoDHL2bqdZNjj1etaHtKOZ37FR13/Wb8Oye4H2pOXwBmqhEEIIkD60MfahQgghzg11Cl6PLqo9GS8vr1Pek+5M+HdOqtDQUBYvXkz37t3x8PCgc+fO/PHHH3To0IHi4mLefPPNE9b1xhtv4O3tXfk4+ul1Qyq3l9N0cwoAR1q3p8TkSrOK9a7JqRnM/3MZStI/WuFWlzRUM4UQQiB9aGPrQ4UQQpw76hS8BgYGEhcXd9Jy+/btw8/P75Tr9/T0BKCkpKTGMkenLNUmO+HR+kDbasBsNlc5r9frueuuuwBYtmzZCet6+umnKSgoqHykpKSc9P5n2oH8A/SK07K3HWjdG4Bmvm6oqsqcRX8TTSImbOAVCsEdGrKpQghxwZM+tHH1oUIIIc4ddQpee/fuzbZt2/jnn39qLLNq1Sq2bt1K7969T7n+yMhIgBN2akfPHS17svqOpmZu3rz5ccscPZ6enn7CusxmM15eXlUeDS1hTyxRmeDUwbomWnDazM+NLTv3kXw4g466A1rBlsNB9tUSQogGJX1o4+pDhRBCnDvqFLzefffdqKrK+PHjmTt3brXzc+fOZfz48SiKwuTJk0+5/osuugjQ1tnUtN5n06ZNAJUbr5+Ih4dH5fqi7Ozs45Y5evxczJZoWbYSgNw2TdlfruXgCvE0Mm/ZalBVLjIlaQVbypRhIYRoaNKHCiGEEHVTp+B16NCh3HfffWRnZ3PVVVcRHBzMwIEDGThwICEhIVx11VUcOXKEyZMnM2LEiFOuPywsjB49egAwffr0audXr15NSkoKZrOZUaNG1arOq6++Gqh5StPSpUsBbWP3c43fuv0AOAf1Ii2/DICMQ8kUFJXQyqsM1/IM0Juh+aCGbKYQQgikDxVCCCHqqk7BK8BHH33E1KlT8ff358iRI6xevZrVq1eTlZWFv78/H3zwAZ9++mmdG/bMM88A8Oabb7Jly5bK4zk5Odxzzz0A3HfffVWSXsyZM4c2bdpw8cUXV6vvgQcewNfXl4ULF/Lll19WOTdz5kxiYmIqy51LytNSCT1UghMwDhyJ3ali0utIqfi0/dLQikyOkf3BVHPmSSGEEGeP9KFCCCHEqVPUf6cRrAOHw8HmzZtJTk4GtH3lunfvjl6vP8mVJ/fggw/y0UcfYTQaufjii3F3d2f58uXk5+fTr18/li5diqvrsS1hvv/+eyZNmkRERARJSUnV6lu6dClXXHEF5eXltG/fnrZt25KQkFC5ofvzzz/Pyy+/fEptPJo1sqCgoEHW7hz4aiq2979gXzMd/p+v4MZvNhHp78ajFxnZte8gj7vMwJASCyPfhl53nfX2CSHEhaCufYH0oQ3Xh5bby9matZVuwd0w6U1n9d5CCCGOOZW+wHC6N9Pr9fTs2fOMTBWaOnUq/fr149NPPyU2NhabzUZ0dDRPPfUUDz/8MCbTqXU2w4cPZ/v27bz++ussW7aMuXPn4uXlxahRo3jwwQfrND2roRUtWYILkNS1CdY8K6Ala7p8WC8u798R3n5YK9jy3HttQghxvpM+tOFM2z2Nz7Z9xuPdH+em9jc1dHOEEELUwmmPvF7oGvJTY/uRI+wfOBBFhfnvXo7OdBsfrzjA9b3CeW1sR9g9B2bdAv4t4f5NZ7VtQghxIWnoWTjnqoZ83+5ccidr09cyvtV4Xujzwlm9txBCiGPO6shrXFwc+/bto7CwkJri4Jtukk80z4Si5StQVDjQBMJbdGPttlIAzE4Ldrsdw/4lWsFWkmVYCCEaI+lDG87+PC3ZYW5ZbgO3RAghRG3VOXhdt24dd955J7t3766xjKqqKIoiHe8ZUrZ9OwBboxWu8GvNz7nahvS7tm/jg6y9PF6sZX+UKcNCCNG4SB/asLLLsskpzwEgt1yCVyGEOFfUKXjdv38/w4cPp6SkhD59+pCZmUliYiLXXXcd8fHxbNu2DYfDwdixY2X61BlUEr8HgFR/hVbbZzMsO4uL9DrClRK6mzyh5AiYPCG8TwO3VAghxFHShza8+Lz4yu8leBVCiHNHnYLXt956i5KSEj777DMmT57MpEmTSExMrEyVv3v3bm666Sbi4+NZu3ZtvTZYHGNNPIAOUDwduK2Zyj0AxoqTKRVfo4eAQbIoCiFEYyF9aMM7OmUYJHgVQohzSZ32ef3rr7+Ijo5m8uTJxz3fvn175s+fT0JCAq+99tppNVAcnyMnE12JAwDf0Cbkd5zEDPsQ5jr6skvXAbXFMGh5CQx6ooFbKoQQ4t+kD214/w5ei23FWByWBmyNEEKI2qpT8Jqenk6HDh0qnx/dj85qtVYea9KkCYMGDWL27Nmn2URxPNZVPwOQ7w7R3Saypf3TPG2/gyn2O9jW4XmUG36D63+BkI4N3FIhhBD/Jn1ow4vPi8doV+mQ5ETvUMkrz2voJgkhhKiFOgWvrq6uGAzHZhx7enoCkJmZWaWcl5cXKSkpiPpn3bAQgHRfaOXXmpTcMgDcFBvtWzdvyKYJIYQ4AelDG5bdaedA/gHGrFWZMsPJJZvVyuRNQgghGrc6Ba+hoaEcOnSo8nmLFi0AqqzNUVWVLVu24Ovre5pNFNWU5lJWkawpw0+hlW8r9qVpa3Y8dHbatohoyNYJIYQ4AelDG1ZyYTI2p40uyQoAQQWqbJcjhBDniDoFr7169WLPnj2UlWmjfZdeeikADz/8MAsWLGDnzp3cfffdJCQk0KNHj/prrdDsnUduiTbNLC/QlWC3YLJLnQAM7NICF7O5IVsnhBDiBKQPbVj78/ajd6hEpWv9pqsFGXkVQohzRJ2C11GjRlFeXs78+fMBiI6O5s477yQ9PZ0rrriCLl268NVXX2EymXj11VfrtcEC2DmL8iJtypk+PAxFUUjJ0/4I6tchsgEbJoQQ4mSkD21Y+/P2E5kJRrsKgKtVMg4LIcS5ok5b5Vx11VXYbLYqxz799FNatmzJrFmzyM3NpW3btjzzzDO0b9++XhoqKhSmQdJq9IUhAHhHt0FVVQ7nlgLQzNetIVsnhBDiJKQPbVj78/bTMk2tfO5ihVSZNiyEEOeEOgWvx6PT6XjkkUd45JFH6qtKcTy7ZuOwgMmirdVp0rorq7bGUWSxAxAmwasQQpxzpA89e/bn7eea1GPBq6tVlZFXIYQ4R9Rp2vCtt97KE0/I/qENYucsLMXaZw45ntCyaQf+2Z4AgJdZwdWkb8jWCSGEOAnpQxtOgaWAjJKMKiOvrhaZNiyEEOeKOgWvP/30E4mJifXdFnEy2QcgfRvZJVpCpgxfhXD3SPYcOgJAuJ+MugohRGMnfWjDic+Lx7tEJTj/2DFZ8yqEEOeOOgWvISEhKIpS320RJ7PrVwCO6JoBUBzixaFDWRTYtB9jdJB3gzVNCCFE7Ugf2nD25+2nZcWUYcXFBdCCV8k2LIQQ54Y6Ba/Dhw9nzZo11RJOiDNIVWHnLABKylwB0IU1Zfe+g5SqRgDC/WXkVQghGjvpQxvOv5M1uVVsQ+RihdyyHFRVPdGlQgghGoE6Ba8vvvgiFouFO+64g6Kiovpukzie9G2QcwAMrpCtbYvj3rwlu/YfpKQieJVMw0II0fhJH9pw4vPiaVUx8urery8ABicoNjtFNvlZCCFEY1enbMPTpk3j0ksv5YcffmDBggUMGzaMyMhIXF1dq5VVFIXnn3/+tBt6wdupTRlWW12Cx6+bADCFRFOUWkoZgQCE+VV//4UQQjQu0oc2DKfqJCF3P9Hp2nP3Pn0rz7laIKcsBy+TVwO1TgghRG3UKXh98cUXK9fr5OTk8PPPP1croygKqqpKx1sfnA7Y9RsAZc2G41q+AScQEN6Z0NSDlCbJyKsQQpwrpA9tGClFKQSmleFiA52HB+aWLVDc3FBLSyuTNkV5RzV0M4UQQpxAnYLXKVOmSLKJsyl5DRSlg4s3SaUmFCDPW0e/jj2JjOjEJ2+swKBTaOLt0tAtFUIIcRLShzaMf693de3UCUWnQ+fuhqO0VFv3KhmHhRCi0avzyKs4iyqmDNNuDOnxO2kKFAd7oigKh3JKAWjq44pBX6clzEIIIc4i6UMbxv68/ZXrXV27dAFA7+aOg2xt5LVMglchhGjsJNpp7OwW2DNX+77j1RQf3A+AMzQYu8NBSp6WvKmZrHcVQggharQ/99g2Oa5dOgPa9GEAV4sqI69CCHEOqNPIqziLDiyH8nzwCIGIfjhTngHAGdyUJ1//jEz3CEAv612FEEKIEzicupemedr3rp06AaBzd9eey16vQghxTqhT8Pryyy/XuqwkmzhNB5ZpX9tfiarocEvPB0D1a4Yzz0mBTRs8b+YnwasQQpwLpA89+0psJbjvTwVAHxmO3scHqBq8ysirEEI0fqeVbfh4G3r/OwmFZEqsB6UVnwT7NSezJJPAXAcAFtemkFdAqWoE7BK8CiHEOUL60LPv3/u7enTpWnn8aPDqYoUMCV6FEKLRq1Pw+sILLxz3uNPpJDk5mb/++ouUlBRuu+02wsLCTquBFzxLxabpZi8OJGzE3wpOBQ47te1x8q3a6Wa+suZVCCHOBdKHnn1apmHt+6PJmgB0Hlrw6iZrXoUQ4pxQr8HrUWVlZdxxxx0sXryYLVu21KlhooKlUPtq9iRtxyb8gRJ/N9Jz8nGoCrlldgDCZeRVCCHOCdKHnn37c+IYmVY1WRNUHXnNKZM1r0II0didkWzDrq6ufPXVV1gsFqZMmXImbnHhODry6uJFwYE47VCIP1abHavejAq4mfT4uZsaro1CCCHqjfSh9S9/707crOB0MWFu2bLy+L/XvBZaC7E5bA3VRCGEELVwxrbKcXNzo3v37syfP/9M3eLCUH5s5NV+KAUAR1AIACYvXwCa+brJhvdCCHEekT60/qiqin5vAgC69q1R9PrKc/qK4NXNqvWheZa8s99AIYQQtXZG93nV6XRkZWWdyVuc/ypGXi0GF1wz8gHwbt6Wizq0wt03CJA9XoUQ4nwkfWj9SC9JJ/xQOQC+XXtWOXd05NXLpuWRkHWvQgjRuJ2xfV7T0tJYvXo1wcHBZ+oW5z+nA6xa8JpgySUk1wlAi+59uGjwYF6YuwsoIUz2eBWizhwOBzabTBUUNTMajej/NVp3NkgfWn/25+2vzDTsflG3Kud0Hh4AeNj1gIPcMglehRCiMatT8PrPP//UeK6oqIi9e/fy6aefUlhYyE033VTnxl3wrMWV3+4rPkxUxWwmc2QkFruDeTvSAejfIqAhWifEOU1VVTIyMsjPz2/opohzgI+PDyEhIfWyREP60LMr4fAO+lbkYnLt3KnKOd1/pg3nlEvSJiGEaMzqFLwOHjz4pB24qqp0796dV155pU4NExxb76o3cSh5N23s4NQp5Lt5sH53JrklVoK9zAxuHdiw7RTiHHQ0cA0KCsLNTdaNi+NTVZXS0tLK6btNmjQ57TqlDz27CrduBqA8xAeDv3/l8bwSK/8kFdMKcLFqI7MybVgIIRq3OgWvAwcOrLHjNZlMhIaGMmzYMK655hoMhjM2M/n89689XvMO7NEOBXrzxucxbHCGAyau6d4Mg/6MLl0W4rzjcDgqA1f/f/0xK8TxuLpqeQWysrIICgo67SnE0oeeXfo9BwBQ2reucvzLfw6yKPYwnwPmcm1ZjgSvQgjRuNWpV1y5cmU9N0McV8Uer6rZA2tSMgDWoGBKnAZSLSYUBa7p3qwhWyjEOenoGlc3N1kvLmrn6O+KzWY77eBV+tCzp9xeTuBBbc2NX7feVc7FZRRSanABwFCu7ZkuwasQQjRuMmTXmFWMvGa5eOJ9pAQAq08wSQ4vQFvr2sxP/vgWoq5kqrCoLfldOTcl5MXTIk2bEhzYs3+Vc0nZJZQZzQDobXZ0TlWCVyGEaOQkeG3MygsA2Gc206SiPz1i8uCQXQteJ/QMb6iWCSGEEI1e0s5YPMrBZtTh0vrYtGGbw0lKXhllBnPlMVcL5JRJwiYhhGjM6hS8fvLJJ+j1eubNm1djmXnz5qHX6/nyyy/r3LgLXsXI636DQkie9snxNqcH5RjwdTUwrK1soSCEEOca6UPPnsKtm7SvzQNRjMbK44fzyminJvCt+V0cOm0auItVpg0LIURjV6fgde7cuQQGBjJ69Ogay4waNYqAgADmzJlT58Zd8CrWvO7HTnDFNjlbTE0BGN+9GSaDDJwLIURtrVy5EkVR+PXXXxu0HdKHnj3m+MMA2NpEVjmelF3CjfqlDNLvQDVqU8LdKoJXVVXPdjOFEELUUp2in7i4ODp06IBOV/Pler2ejh07snfv3jo37oJXsVVOVn4ZJgc4DXr2umjbNMiUYSHE8Xz//fcoioKLiwupqanVzg8ePJgOHTqckXs/8cQTKIrCtddee9zzsbGxvPjii8fdW/f111/n999/PyPtamykDz17dIVavgglqOqWconZJUTr0gBQDVrw6mIBi8NCqb307DZSCCFErdUpeD1y5AghISEnLRcSElK5N56oA0sRFgXs2VrnW+QbgFPR0ybARPNAjwZunBCiMbNYLLz55ptn7X6qqjJjxgwiIyOZN28eRUVF1crExsby0ksvXfDBq/ShZ4+uzAKA2cunyvGk7GKiFS14VYzaSKuPwwRAbplMHRZCiMaqTsGrp6cnaWlpJy2XlpYmW1GcDkshCUYjQRXrXQ+atU+O7x7WriFbJYQ4B3Tp0oWvv/66Vv9W14eVK1dy+PBhvvvuO+x2O7Nnzz4r9z0XSR969hjKrAC4ePlVOZ6TlYqPon0wrDNoe7wGqO7auXJJ2iSEEI1VnYLXzp07ExsbS0pKSo1lUlJSiI2NpWPHjnVu3AXPUsQ+k4kmuUeDVz983Ixc0v7kn9gLIS5szzzzDA6Ho1ajr3a7nVdeeYXo6GjMZjORkZE888wzWCyWWt8vJiaGdu3aMWTIEIYNG0ZMTEyV8y+++CKPP/44AFFRUSiKgqIoJCUloSgKJSUl/O9//6s8fssttwCQnJzMPffcQ+vWrXF1dcXf35+rr76apKSkam3Iz8/n4YcfJjIyErPZTFhYGDfddBPZ2dk1tttisXDZZZfh7e1NbGxsrV/v6ZA+9Owxlmn7t7r5+Fc5rsvZX/m9weAAIMCpBa+StEkIIRqvOgWvEydOxGq1ctVVV5GRkVHtfEZGBuPGjcNmszFx4sTTbuQFq7yAeJORJhXJmtI8AhjdPgizJGoSQpxEVFQUN910U61GX2+//XamTJlC165d+eCDDxg0aBBvvPEG1113Xa3uZbFY+O2335gwYQIAEyZMYMWKFVX6h6uuuqry/AcffMCPP/7Ijz/+SGBgID/++CNms5kBAwZUHr/rrrsA2LhxI7GxsVx33XV89NFHTJ48meXLlzN48GBKS4+tTSwuLmbAgAF8/PHHjBgxgqlTpzJ58mTi4uI4fPjwcdtdVlbG5ZdfTmxsLMuWLaNv3761er2nS/rQs0NVVUzlWmDq7h1Qedxqd+JVnFT53Gy0AeDj0LbNkeBVCCEaL0NdLrr55puZNm0aa9asITo6mtGjR9OmTRtAS0SxcOFCSktL6dOnD7feemu9NviCYikiS6+na8XIa5pHIPr9W4DODdsuIc5zFqutxnM6nYLRYKhVWUVRMBnrVrY+PPvss/zwww+89dZbTJ069bhltm/fzv/+9z9uv/12vv76awDuuecegoKCePfdd/nrr78YMmTICe8zf/588vPzK4PdK6+8kjvvvJOZM2fy0EMPAdCpUye6du3KjBkzuPLKK4mMjKy8/oYbbmDy5Mk0b96cG264oUrdo0ePZvz48VWOXX755fTp04fffvuNG2+8EYB33nmHXbt2MXv2bMaOHVtZ9rnnnjtu9tji4mIuu+wydu/ezYoVK+jSpcsJX2N9kj707Cizl+Fq0X72nr7HtpY7lFtKc+XYBzomo40yTHgfXfMqwasQQjRadfpLSa/Xs2DBAiZNmsScOXP49ddfURQtW9/RPxLGjBnDtGnTMBhO74+xWbNm8emnn7J9+3asVistWrTg+uuv5+GHH8b4rz3b6mLhwoWVWxVcfPHFLFu27LTqq3eWQgoMeoLztaclnh60DfWpfK+FEGfGU298VuO5ti0juXPimMrnU979CqvNftyy0RGh3HfLscDrlanTKCktO27ZZk2DeOSOCXVs8fE1b96cG2+8ka+++oqnnnqKJk2aVCuzcOFCAB555JEqxx999FHeffddFixYcNLgNSYmhu7du9OiRQtAW9M5evRoYmJiKoPXunJ1da383mazUVhYSIsWLfDx8WHLli2Vwetvv/1G586dqwSuR/3338yCggJGjBjBwYMHWblyJe3btz+tNp4q6UPPjgJLAa7akldcvY9NG07KLqlM1gSgN1QEuHbtvc4pkzWvQgjRWNW5V/Ty8uK3335jx44dLFq0iOTkZADCw8O59NJL6dz59EcHH3roIaZOnYrBYGDo0KF4eHiwYsUKnnzySebNm8eSJUuq/GFzKvLy8rjjjjtQFKXx7ulmKUIp12NwgkWvx8tDoWlwwMmvE0KICs899xw//vgjb7755nFHX5OTk9HpdJWB51EhISH4+PhU/ttek/z8fBYuXMh9993HgQMHKo/369eP3377jf3799OqVas6t7+srIw33niDadOmkZqaWuXf64KCgsrvExISGDduXK3qfOihhygvL2fr1q1nPXA9SvrQM6+wKJuKXEzoPTwrjyfllHCp7tg2UrqKbMPuVm1Jjoy8CiFE43Xac9Q6depEp06d6qMtVfz+++9MnToVDw8P/v77b7p27QpAdnY2Q4cOZfXq1Tz//PO8++67dar//vvvJzMzk8mTJ/P555/XZ9PrT3khZov2aXGGhzdNDSUSvApxFrz59D01ntPpqo7ivfzYnTWW/e+I3/MPTqp12fpydCru0dHX+r7/rFmzsFgsvPfee7z33nvVzsfExPDSSy/VqW7Q/q2eNm0aDz30EH369MHb2xtFUbjuuutwOp11qnPMmDHMnDmTN998kx9++OGE+62eadKHnjlFeVm4A04FdG7HgvTDWTmEKVoSr3ydD7qKNa9HR2kleBVCiMar0Wb+ef311wF46qmnKjtdgICAAD77TJvS98knn1T55L225syZQ0xMDI888gg9e/asnwbXN7sFHBY887U/KPM8fTEoKk0keBXijDObjDU+jP+Zxnmisv9dw3oqZevTc889h91u56233qp2LiIiAqfTSXx8fJXjmZmZ5OfnExERccK6Y2Ji6NChA7Nmzar2GDZsGNOnT68se6IAuaZzv/76KzfffDPvvfce48ePZ/jw4fTv37/aXrHR0dHs2rXrhG096sorr+S7775j+vTp3HvvvbW65lxzwfehQGmBFqBazXqUf31AYc3UMg1bjD5kmsLRVUwbdrFqXyV4FUKIxqtOweuiRYsYOnQoK1asqLHM8uXLGTp0KEuXLj3l+lNTU9m4cSPAcTMt9u/fn2bNmmGxWCrXa9VWdnY2kydPpnXr1rz88sun3LazxlJEuaLgn689zffwQacohAT6nfAyIYT4r+joaG644Qa+/PLLatltR40aBcCHH35Y5fj7778PULmm8XhSUlL4559/uOaaaxg/fny1x6RJkzhw4ADr168HwN1d24rkv4Hn0XPHO67X66tNS/34449xOBxVjo0bN47t27czZ86canUcb1rrTTfdxEcffcQXX3zBk08+WeNrPBOkDz07jgavNteqHwwZ8rTp7TbfFpQbfSqDV2NFZmIJXoUQovGq00f906ZN+3979x0eRbk2cPg3W7K7yW46JEBIQkeRKkWlWLCCIioooEex93b0eET97McuiopiB4+hiAeUpqJYAFGaCDZUIMHQScKWZPvufH/MZiEmgSSEZBOe+7r22s3MOzPPhtE3z7yNVatW0a9fv2rL9O/fn5UrVzJ16lTOOOOMWp1/3bp1AKSmptKuXbsqy/Tt25fCwkLWrVsXXX6hJm688UaKioqYM2cOZrO5VnE1KJ8Tu05HsraGOmXmRFqkp1Rq9RFCiJq4//77+e9//8vvv/9eYZxnz549ueKKK3jjjTew2+2cfPLJrFq1imnTpjFy5MiDTtY0ffp0VFVlxIgRVe4fNmwYBoOBvLw8BgwYwPHHHx+NZcyYMRiNRs477zwSEhI4/vjj+eKLL5g4cSKtW7emXbt2DBgwgHPPPZf//ve/JCUlceyxx/Ldd9/xxRdfkJZWcd3Of/3rX3z44YeMHj2aq666iuOPP56SkhLmzZvHlClTqhxDesstt+B0Orn//vtJSkrivvvuq8uvttakDm0YXoeWhAYtcfu3BUKkegrAAIaMLvh3lKIzat3PDR6t3/A+7z5C4RB6nb7BYxZCCHFwdcqE1qxZQ69evbDZbNWWsdls9O7dm1WrVtX6/Pn5+YA2cUV12rZtW6FsTcycOZMPP/yQ22+/nYEDB9Y6rgbldeLQ67BGJibteFxX2gzs27gxCSGarI4dO3LZZZcxbdq0Svveeust2rdvz9SpU5k7dy6ZmZlMmDCBhx566KDnzMvLIzs7u9rJhZKTkxk0aBCzZs1i4sSJ9OvXj8cee4wpU6bw6aefEg6Hyc/PJyEhgYkTJ3LdddfxwAMP4PF4uOKKKxgwYACTJk1Cr9eTl5eH1+tl4MCBfPHFF5x11lkVrmW1Wlm2bBkPPfQQc+fOZdq0abRs2ZKhQ4eSlZVV7Xe47777cDgc0QS2IboRSx3aMHxOOwDheFN0218lbjpGZho2ZXYlWLIlOmGT4vGioKCiYvfZSbOkVTqnEEKIxlWn5HXnzp0MGDDgkOXatm3Ljz/+WOvzu1wuYH8Xs6pYrVYAnE5njc65a9cubr75Zjp06BAdC1QXPp8Pn88X/bmm16/9hVzs0+lIdGuVaodunene85gjcy0hRLMxfvx4xo8fX+W+qVOnMnXq1ErbDQYDDz74IA8++GCtrrVhw4ZDlvnqq68q/PzAAw/wwAMPVCrXpUsXvvnmm0rbk5OTeeeddyptLygoqLQtNTWVl19+mZdffrnKWE455ZQquxA//fTTVY4HPlKkDm2AOhQIOLXxvOoBkzXlH7BMjpLembDFHu02rJa5STYls8+3jxJviSSvQggRg+o05jUuLi5aOR5MaWlpo87ieKDrrruOffv28dZbbxEfH1/n8zz55JMkJSVFX+VPr+udz4ldr8cWaXm1ZbQ4MtcRQgjRoKQObYA6FAiVar9jJWF/vFv3Ommn7NR+aNEZ4tPQR7oNh8vKSDVr80rIuFchhIhNdaoVO3XqxLfffovb7a62jNvt5ttvv6V9+/a1Pn95V6qysrJqy5SWlgLaWnmHMm3aNObPn88NN9zAKaecUut4DjRhwgQcDkf0VVhYeFjnq5bPhUPRRZNXRyhEOEbX0hNCCFFzUoc2QB0KhEu176+LtDID2HdtwawECCpGSM5Bl5AW7TYcLisj1ZQCQLGn+IjFJYQQou7q1G34vPPO4+GHH+aWW27h7bffrrTEgaqq3HrrrTgcDs4///xanz83NxfgoJVa+b7ysgdTPvvk6tWrK1W85TNvrl27Nrpv5syZZGZmVnkuk8mEyWSqcl+98jpxqjrigtqPH379PT0GVT+5hxBCiKZB6tAGqEMBNZK8Gw4YWxzeoy2TU2bNJUmnx2hrEe02DJChSwKk5VUIIWJVnZLX2267jTfeeINp06axYcMGrrrqKrp27QrAxo0beeedd1i3bh2ZmZncfvvttT5/7969ASguLiY/P7/K2RLXrFkDUGH9ukMpP6Yqdrs9Ot7K6/XWJtwjw+fA49dmOvTrdWRktTroGolCCCGaBqlDG4bi1uKIsyVFt1kcmwEIp3WK7GuBoldBUUFVaKFqrbSSvAohRGyqU7fh5ORkFi5cSJs2bfjhhx+49dZbOeOMMzjjjDO49dZb+eGHH2jTpg0LFy4kNbX265JmZWVFlxA4cIH7csuXL6ewsBCTyRRdo/BgPvroI1RVrfL17rvvAjB06NDotpo8iT7ifC58keS11GSklYx5FUKIZkHq0Iahc2sTQ8UlJgPg8Ydo4dsKgKWVNgGiOTkDRSHa+pqmapNcSfIqhBCxqc4zQfTs2ZONGzfywgsvcNZZZ9G1a1e6du3KmWeeyQsvvMBvv/1Gr1696hxY+Xp7Tz31FD/88EN0e3FxMTfddBOgrdGXlLT/iercuXPp2rUrQ4cOrfN1Y4bXSdCn/fO4TGbSU5MOcYAQQoimQurQI6983VZzovYAoKC4jA46bbImcyR5tVpteFVjdK3X1JC2dm2xV8a8CiFELKpTt+Fy8fHx3H777VV2ayouLuaNN97gnXfe4aeffqr1uUeOHMltt93GSy+9xAknnMDQoUNJSEhgyZIl2O12Bg4cyGOPPVbhGIfDwe+//x4zXZYOi89FOJK8lsaZaWmt++yOQgghYo/UoUeOP+TH5AsBEJ+cDkBBURn9IsvkkK51G7ZZjJRgi7a8Joe08bjS8iqEELHpsJLXv1NVlU8//ZS3336bBQsWEAgEDut8kyZNYuDAgUyePJkVK1YQCATo0KED9957L3feeSdxcXH1FHkM8jnBp41xdZss2BIkeRVCiOZM6tD64/Q7sUSWk01I0tZr3bFrO+lKZF3ZSPKaEGfgL9VGvFGb3CkpqP1OSjySvAohRCyql+Q1Pz+fd955h6lTp7Jjx47oIvB9+vTh8ssvP6xzX3zxxVx88cU1Kjt+/HjGjx9fq/PX5ZgG4XNh8EaS17gErAmWQxwghBCiKZI6tP45fU7iI8mr3qotB+TdsVHbZ8okMU4b26rTKTh1idgM2tJB1qA214S0vAohRGyqc/Lq8/n48MMPefvtt1m6dGl0ogZFUbjnnnu4/PLLOfbYY+sz1qNKwOsgzqsAKrbMNqQmH3otPiGEEE2D1KFHltPvxKINeUVnjSSqxX8C4E3qwIE1aqkuCZ1xGwDx/shD46AbT9CDxSAPjoUQIpbUOnldu3Ytb7/9NjNnzsThcKCqKgaDgWHDhrFhwwa2bt3KU089dSRiPao4/KXYPNrn1KwcTM24e5cQQhwtpA5tGE6/k6Roy6u2/E2CawsAuhadK5R1G5KjY14NngBxljj8YT/7vPuwWCV5FUKIWFKj5HXfvn28//77vP3229GJI1RVpWvXrlx11VVcfvnltGzZksGDB7N169YjGvDRwhF0YfNo41xNaWmNHI0QQoi6kjq04TnLSmihzdeEzmrF5Q3QOlgIekhoc0yFst645Ohsw2F3Gampqewq20Wxp5jW1tYNHboQQoiDqNFSOa1ateKOO+5gw4YNJCQkcNVVV/Htt9/y66+/cvfdd9OyZcsjHefRRVWxBz3YPNqTYLehXufVEkIcJfLz87nlllvo3Lkz8fHxxMfHc+yxx3LzzTezYcOGKo+55557UBSFSy65pMr9X3/9NYqi8OGHH1a5/5ZbbkFRlArb/H4/kyZNonfv3iQmJpKcnEy3bt247rrr2LhxY4WyP/30E6NGjSInJwez2UybNm0444wzePnll+vwG4gNUoc2vDJ7UfSzLiGBrcVuOkRmGra0qtgd2xeXEm15DZe5STVrS+vIuFchhIg9NcqK/H4/iqKQlZXFf//7X04++eQjHdfRzV+GXadgc2s//rlH1psTQtTOggULuOSSSzAYDFx66aX07NkTnU7Hxo0bmTNnDq+99hr5+fnk5OREj1FVlRkzZpCbm8v8+fNxuVzYbLbDjuWiiy7ik08+YezYsVx77bUEAgE2btzIggULOOmkk+jatSsAK1as4NRTTyU7O5trr72WzMxMCgsL+f7775k0aRK33nrrYcfSGKQObXheh5Z4BkwGFL2erbtLOFvZo+1Mr9htOGRKRWcsT17LJHkVQogYVqPktXv37vz0009s27aN0047je7du3PVVVdx6aWXkiZdWuufz4Vdp6NDZMxrfFpK48YjhGhSNm/ezJgxY8jJyWHJkiW0atWqwv6nn36aV199FZ2uYuebr7/+mm3btvHll19y1llnMWfOHK644orDimX16tUsWLCA//znP9x3330V9r3yyivY7fboz//5z39ISkpi9erVJCcnVyi7Z8+ew4qjMUkd2vC8Ti3xDFm0+SLs2zaiV1Q8OisWa8WW7rAldX+34dLSaPJa7JUHx0IIEWtq1G14/fr1rFq1iuuuuw6bzcaGDRu48847adOmDZdccgmfffZZdGp/UQ98TpxhHaag9qOtZYvGjUcI0aQ888wzlJWV8e6771ZKXAEMBgO33XYbbdu2rbA9Ly+PY489llNPPZXTTz+dvLy8w45l8+bNAAwcOLDSPr1eXyF527x5M926dauUuAJNumut1KENz+e0AxCONwEQ2PM7AI6EXPhbt3YS0g7oNlxGmlm7J6XlVQghYk+NkleAvn37MmXKFHbu3Mm7777LwIED8fv9zJ49m2HDhpGTk1Np7JKoI58LT0BrFA/oFNJaJjduPEKIJmXBggV07NiRAQMG1PgYn8/H//73P8aOHQvA2LFj+fLLL9m1a9dhxVLeLTkvL49gMHjIsmvXruXnn38+rGvGIqlDG1bA5dQ+JGgTH8bt26RtT+lUqawuIV26DQshRBNR4+S1nMVi4YorrmDp0qX8/vvv3HPPPWRkZLBt2zaKi7UuNgMHDuSNN97A4XDUe8BHBa8Dn1/7p3GZ40i0WRs5ICGODqqq4vYHY+ZVl9Y4p9PJjh07OO644yrts9vtFBUVRV8ejye6b8GCBdjtdsaMGQPAyJEjMRqNzJw5s+6/UOCEE07g5JNP5s033yQrK4tx48bx6quv8tdff1Uqe/fdd+N2u+nVqxcnnXQS//73v1m8eDGBQOCwYoglUoc2jFCpCwAlkrwmu/MBMGZ0qVQ2zpaO3lDebdhJmiXS8uqR5FUIIWLNYU1j26lTJ5566in+85//sHDhQt566y0++eQTvvvuO77//nvuuOMORowYcdh//Bx1fC78vkjyajLRJkHWmROiIXgCIY598LPGDiPq10fPIj6udv+bdjq1FiertfJDr1NOOYX169dHf3722We5++67Aa1ltG/fvnTs2BEAm83G8OHDycvL44477qjjNwBFUfjss8947rnneP/995kxYwYzZszg5ptv5uKLL+b111+PdhM+44wz+O6773jyySf57LPP+O6773jmmWdo0aIFb731FiNGjKhzHLFI6tAjRy0tA7Q1Xh2eAG1D20AHSdndKpVNiI/HY9DGxoZcLml5FUKIGFbrlteq6PV6RowYwbx58ygsLOQ///kPHTp0wOv1Mnv27Pq4xNHF5yQcaXktjTNjizw5FkKIQymfHbi0tLTSvtdff53PP/+c999/v8J2u93OokWLOPnkk9m0aVP0NXDgQNasWcMff/xxWDGZTCbuv/9+fvvtN3bs2MGMGTM44YQT+OCDD7jlllsqlO3Xrx9z5sxh3759rFq1igkTJuByuRg1ahS//vrrYcURq6QOrX+qW5uu32BNpGCviw7KTgAsrY6pVNZmNuAyaPVs2C1L5QghRCyr9wVEMzMzmTBhAhMmTOCbb77hnXfeqe9LNH9eJ3i15DVgS6Jlusw2LERDsBj1/ProWY0dRpTFqK/1MUlJSbRq1arKcaPlY2ALCgoqbJ89ezY+n4/nn3+e559/vtJxeXl5PPLIIwCYzWaACl2OD+R2u6NlqtKqVSvGjBnDRRddRLdu3fjggw+YOnUqhr+tZx0XF0e/fv3o168fnTt35sorr2T27Nk89NBD1X/5ZkDq0PqhK/MCEJeYxLZtW+ip+Aiix5CSW6mszWzEYbBiwkvY7SHFpNW5Jd4SwmoYnVIvz/mFEELUg3pPXg908skny3p2deFzofdpsyEqSanEW6r/Q1AIUX8URal1N91YNHz4cN566y1WrVpF//79D1k+Ly+P4447rsrE8PXXX2f69OnR5LV8Aqbff/+9ynP9/vvvFdaOrY7RaKRHjx78+eefFBUVkZmZWW3Zvn37ArBz585Dnrc5kTq0boLhIAaPHwBTYgpl238DoNiURYbeWKm8zWxgl8FKJl4IhUnRaV3uQ2oIp89Jsjm5wWIXQghxcPI4MQaFvQ6MnshU/omyBqAQonbuuece4uPjueqqq9i9e3el/QdOBFVYWMjSpUu5+OKLGTVqVKXXlVdeyaZNm1i5ciWgtZz26tWL999/v8IarQBr167l+++/55xzzolu+/PPP6ucnMlut/Pdd9+RkpJCixbacmBfffVVlZNULVq0CIAuXSpPtiPE35X6S7FouSvmpBTYqz1oKbO1r7K8zWyg2JAU/Vnv8WGL07rfS9dhIYSILU2/iaEZcnlLsEV65PlNiY0bjBCiyenUqRPTp09n7NixdOnShUsvvZSePXuiqir5+flMnz4dnU5HVlYW06dPR1XVaidDGjZsGAaDgby8vGi344kTJ3LWWWfRq1cvxo8fT+vWrfntt9944403aNWqFRMmTIgev379esaNG8c555zD4MGDSU1NZfv27UybNo0dO3bw4osvotdr3aNvvfVW3G43F1xwAV27dsXv97NixQpmzZpFbm4uV1555ZH/5Ykmz+l3YvFpn422JMwF3wCgplVeJgfAZjJSQiKKIYwa1EXXenX5XRR7i2lP1UmvEEKIhifJawxyeO1YI8nrLk/zWSJCCNFwzj//fH766Seef/55Fi9ezDvvvIOiKOTk5DB8+HBuuOEGevbsSY8ePcjOzqZnz55Vnic5OZlBgwYxa9YsJk6ciMFg4NRTT2XZsmU8/vjjvPTSS7hcLjIyMhg3bhwPP/wwLVu2jB4/ZMgQHnvsMT755BMmTpzI3r17sdls9O7dm6effpqLLrooWva5555j9uzZLFq0iDfeeAO/3092djY33XQTDzzwQHRWYiEOxul3RltedQlW0r0FAFhaV56sCcBqNrBPtaEzqISC+9d6LXAWSMurEELEGEleY5Dd78Tm0brO6ZKSDlFaCCGq1qFDB1599dWDltmwYcMhz/PVV19V2jZgwADmz59/yGNbtmzJv//9b/79738fsuzZZ5/N2WeffchyQhyM0+ck3qfVoW6DiRx1OyiQmtO9yvJ6nUKZPhG9USXk3Z+8AhR7itlVtovN9s1ssm9is30zmx2b+cv5F6FwqMG+kxBCxLITW5/I86dUnvDxSJDkNQbZA65ot2FTsiSvQgghRE05A07iI92G9/h89FTsAJgzqx8z7Y1LQWcIAxWT16dXP82Tq548ovEKIURT5wlWvQLBkSDJawyyBz20jdwDlhapjRuMEEII0YQ4fU6yIsmr067NUF2iSyPVXP0cEv64FHRGrbU2VFpK5w6dAQirYQyKgezEbDokd4i+2iW2w2yQlQCEEALArG+4/x9K8hqDHD4PnSJDXZMyWjRuMEIIIUQTcuCYV59rGwAl8Tkc7FFw0JyKzqAlr+GyMi7qfBGdUjqRGJdITmIOxiqW2BFCCNHwJHmNQW63H4gjqFNokSlL5QghhBA15fLYMUceAOs82wHwJHY86DFhSxo6Y6TbsMuFQWegT0afIxqnEEKI2pN1XmNNKIjXo1WgLpORlinWRg5ICCGEaDrcjv0zBFsCuwBQU9sd9Bh9fBJKecuro+jIBSeEEOKwSPIaa/wuAn5tzcNSs4ns1i0PcYAQQgghyvmddgDCcQYSQ3sAiEvLOegxVouZgFHrGhy2Fx/R+IQQQtSdJK+xxusk7NP+WcosCVgT4hs5ICGEEKLp8LvsAITjzaSF9gJga3Hw5DXRbMCrj9OOiyS/QgghYo8kr7HG5wKfAoA33tbIwQghhBBNS8DlBEC1mGnBPgBSWh2827DNbMBtNAEQdjmObIBCCCHqTJLXWONzokRaXr3mhEYORgghhGhaQi4XAGGzCb2i4lcNxKe0OugxVpOBUoNFO66s9IjHKIQQom4keY0xqteJ0au1vNoxNXI0QgghRNOiut0AhI3a/BFFunTQHfzPHZvZiMugDdMJlZYd2QCFEELUmSSvMcbjLibBo30Oxic3aixCCCFEUxJWwyhlkUpUH3kQbDz0xIc2swGHQevtFPZ4jlh8QgghDo8krzHG7t6NNVJvqtbkRo1FCCGEaErKAmWYfdqSN4oupG2zZB7yOJvZyD69tjRd2OM/cgEKIYQ4LJK8xhi7t5hEj1bx6pOSGzcYIUSTNHXqVBRFYc2aNVXuLygoQFEUnnvuuei2r7/+GkVRqn3NnDmzocIXos6cfieWSPKq0wUACCS0PuRxNrOBYkMSAGFf4MgFKIQQ4rAYGjsAUZHdsy/a8mpKSW7UWIQQR5/bbruNfv36Vdp+4oknNkI0QtSO0+ckPtJwqle8AChJbQ55XKLZyF5DMgBhb+hIhSeEEOIwSfIaYxx+B5naXBPEt0hr3GCEEEedwYMHM2rUqMYOQ4g6cfldWHza5zidNvGSMfXga7yC1vK6R58CgBoCNRhEMcifSEIIEWuk23CMsZfZMUd6LCW2atG4wQghhBBNiNPvxBJpeU3Qa0veJLTIPuRxVrOBncbU6M/hMplxWAghYpE8VowxbpdWYYYU6NTl4IuqCyFEfXO5XBQVFVXanpaWhqIojRCREDWnjXnVPlsN2hiclFaHrkuNeh2lcWkoOhU1rBC2F6FPSjqSoQohhKgDSV5jjMel9RkuM+vp3DKlkaMR4iijqhBwN3YU+xnjoYETxquuuqrK7Tt37iQz89CztgrRmJw+JynlEzYZVVyqhbS0mvVi0pusKEYV1acQKtqOMafDkQxVCCFEHUjyGmMCZdojY7fJSKLF2MjRCHGUCbjhiUPPTNpg7tsBcQkNeskHH3yQwYMHV9qemppaRWkhYovT76R1pNuwzhhmt9KS9gZ9jY61WYzaX0U+CJfsOnJBCiGEqDNJXmNM0B0EoMxsRq+TLnpCiIbVvXt3Tj/99MYOQ4g6ObDbsM6oss/YssbH2sxGQgYdChDet/vIBCiEEOKwSPIaazxhAErjzI0ciBBHIWO81toZK4zxjR2BEE2K03/AUjnGMKXmmnd1t5kNBA16jIQI24uPUIRCCCEOhySvsURV0WnL0lFqsjZuLEIcjRSlwbvpCiHqj9PrwHxAy2sgoebDAGxmA36jUUteHZK8CiFELJKlcmJJ0Icxkry6zcmNGooQQgjR1PhKHdE/bHSGMGpimxofazMZ8RriAAg79x2B6IQQQhwuaXmNIQFPMWavAqh4LTI5ihDi8Lzzzjt8+umnlbaff/751R6zbNkyvF5vpe09evSgR48e9RqfEPXN73QAoCqg6MGYcug1XsvZzAY8BhMAYZfziMQnhBDi8EjyGkPsrh3YIqt0qLb0xg1GCNHkvfbaa1VuP+WUU6o95qWXXqpy+0MPPSTJq4h5wdJI0mkMoygQ3yKnxsfazEZKDRYAwqWuIxGeEEKIwyTJawyxl+7E5tHWpzMkJTduMEKIJmv8+PGMHz/+oGVUVa3w8ymnnFJpmxBNiaqqhEpLAdAbtXs5KbN2La8ugzbmPVwWQ+s9CyGEiJIxrzHEXrYHm0f7HCdrKgohhBA15gl6iPNqy83pjWGKVRsZqSk1Pt5mNmCPJK8ht+eIxCiEEOLwxHzyOnv2bE455RRSUlJISEigZ8+ePPPMMwQCgVqdZ926dTz55JMMHTqUjIwMjEYjKSkpDB48mMmTJ9f6fEeC3bM3mry2bJfVuMEIIYRo8o6mOtTldxEfmWnYYAizk3SSLMYaH28zG7EbbACEPb4jEaIQQojDFNPdhu+44w4mTZqEwWDgtNNOw2q18uWXX/Lvf/+b+fPns3jxYiwWyyHPEwwG6dOnDwBWq5V+/fqRkZHBtm3b+O6771i+fDnvvfcen332GcnJyUf4W1XP4dxLVmR9upY5NZ8hUQghhPi7o60Odfqd0eRVZ1TZZ2iBoig1Pj7RbKBYnwhA2BcAVdWWzxJCCBEzYrbl9aOPPmLSpElYrVZWrlzJZ599xv/+9z/+/PNPunfvzvLly/m///u/Gp/v+OOP54MPPqCoqIgvv/ySGTNmsGzZMtatW0erVq1YtWoV//znP4/gNzo0974iAMIKJKbVvKuTEEIIcaCjsQ51+p1YIsmr3him1JRZq+NtZiNFxiQAwgEFvI76DlEIIcRhitnk9YknngDg3nvvjT7xBUhPT+fVV18F4JVXXsHhOHTlYjAYWLNmDaNHj8ZkMlXY1717d5555hkAZs6c2ahdn8r2aevKeU0KyVbTIUoLIYQQVTsa61Cnz4kl0ntJZ1Txxreq1fE2swFn+YRNAQXcxfUdohBCiMMUk8nr9u3bWb16NQDjxo2rtH/QoEG0bdsWn8/HokWLDvt6vXv3BsDj8VBUVHTY56srr12b4t9j1pNci3E6QgghRLmjtQ51BVzE+7RZhnVGFTWxdsNvrGYD7vJ1XoM6SV6FEOIQCkvc3D17PZ/+vLPBrhmTyeu6desASE1NpV27dlWW6du3b4Wyh+PPP/8EIC4ujtRGnOU34NKm5veYDLWaZEIIIYQod7TWoRVaXg1hDMlta3W8zWzAU568SsurEEJUa4/Ly8PzfuG057/mw7XbePaz3wmHG2a5vZicsCk/Px+A7Ozq12dr27ZthbJ1papqtMvTueeeW6lLVENSyrTuVt64OAz6mHyuIIQQIsYdrXVoxTGvKuYWObU63mTQEzDHAxAOKqilRch0TUIIsZ/DHeD1pZt599sCPIEQAIM6pnP3WV3Q6Rrm/5gxmby6XC4AEhISqi1jtVoBcDqdh3WtRx55hO+++w6r1cpTTz11yPI+nw+fb/8U+od7/QPpPNr6dD7ToWd/FEIIIapytNahTr+TtuWnNqgkt6xdyyuA0Vr+O1MI79uFvt6iE0KIpsvtD/LutwW8/s1mnJH1tHu2TebfZ3XhpI7pDRpLTCavDeW9997j0UcfRafT8c4779CpU6dDHvPkk0/yyCOPHJF4jJ4wAD5z9X9wCCGEELEg1upQp89JvF/rtuYyxpORXPu61JQQjwooQNi+R5JXIcRRb+kfe/nnB+spKtWeDnbOsHL3mV0449iMWi1HVl9ism+qzaYtEl5WVlZtmdLSUgASExPrdI3Zs2dz1VVXAfDmm28yevToGh03YcIEHA5H9FVYWFin6/9dKBzC5NU++y1J9XJOIYQQR5+jsQ4FcPld0W7DxYZEMhLNtT6HzWIkaNRS1vC+vfUWmxBCNDWqqvLO8nzGv7uKolIf2anxvHhJLz65fQhndstslMQVYrTlNTc3F+CglVr5vvKytTFnzhzGjRtHOBzm9ddfj1bANWEymY7ImB6X34XNo30OJqTV+/mFEEIcHY7GOhQiY14jEzbZTSmYjbVvN7WZDfgMRoyBEGFHST1HKISoKV8wxIdrt/FXsZsEk4EEkwFb5N1qNmA16WmfbiUlIa6xQ22W/MEw//fRz8xao9UVo47P4j8XHIfJ0Pj9UWIyeS2fdr+4uJj8/PwqZ0tcs2YNQIX162rio48+YsyYMYRCIV577TWuvfbaww+4Huzz7cPm1ro7GVq0buRohBBCNFVHYx0KWvIaH2l5dcW3qNM5bCYjPkMcVryEnfvqMTohRE2oqsqin3bx9Kcb+avEfdCyFqOeu8/qwviTctE30GRBNbXL4eXjH7czd912CorLGNQxneE9WnH6MRnYzLG9okhxqY8b3/+BVQUl6BS4b9gxXD2oXaO1tP5dTHYbzsrKol+/fgBMnz690v7ly5dTWFiIyWRi2LBhNT7v/PnzufjiiwkGg7z22mtcf/319Rbz4XJ49mGLdBuOy6jd2nRCCFGVX375hcsuu4w2bdpgMplo3bo1l156Kb/88kuFclOnTkVRlOjLbDbTuXNnbrnlFnbv3g1oLXQHlqnuNXXq1Eb4puJAR2MdCuD0OqLdhj1JdXsIbDtwrVdX/U0mJYQ4tHV/7WPUlO+4efoP/FXipqXNxPiTchnbvy3n9WzNqV1a0D83lWNaJZKZaMYTCPHYgl8ZNWUFm/a4jlhce10+3li6mQlzNvDG0s18tXEP2/a5UdWKS8OU+YL8b+02LntrJSc+tYQnP9nIxl0uvIEwX/y2hztnref4x7/guvfW8PGP2yn1BY9YzHX1204nI175llUFJdhMBt4e349rBrePmcQVYrTlFeC+++7jggsu4KmnnuKcc86JPh0uLi7mpptuAuCWW24hKWn/+NC5c+cyYcIE2rRpw5IlSyqcb9GiRYwaNYpgMMiUKVO47rrrGu7L1IC9bBe2yAMmXQtJXoUQh2fOnDmMHTuW1NRUrr76atq1a0dBQQFvv/02H374ITNnzuSCCy6ocMyjjz5Ku3bt8Hq9LF++nNdee41Fixbx888/8+KLL0bHSYL2/9QZM2bwwgsvkJ6+f6bBk046qcG+o6je0VaHAvg8LvSRvyX9KbWfaRjAZjZSZtBm/A+XHrk/hoUQ+xWWuHnms9+Zv34HoLWoXjekPdcNaU+CqepUJRxWmbm6kCcW/ca6v+wMm7Sc24Z25PqTO2Csh+Umg6EwX/++l1lrCvly4x5CVaxhmhCnp2NLKx1b2giGwyz+ZXd0+RiAfrkpXNA7i+PaJPLFb3tYsGEHW/aWsfjX3Sz+dTcmg45TurTg1C4tGdK5Ba2TG3e1kcW/7OKOWT/i9ofITYvnrSv60rGlrVFjqkrMJq8jR47ktttu46WXXuKEE05g6NChJCQksGTJEux2OwMHDuSxxx6rcIzD4eD333/H6/VW2L5nzx4uvPBC/H4/WVlZrFixghUrVlR53eeee67CH2INpcS+jYzIWB1Ty4wGv74QovnYvHkz//jHP2jfvj1Lly6lRYv9XShvv/12Bg8ezD/+8Q82bNhA+/bto/vOOecc+vbtC8A111xDWloaEydO5OOPP2bs2LEVrrFr1y5mzJjByJEj6zRuUhxZR1sdGggFoCwycQQqSsvKXaVrwmY24DRE1np1eyAUAH1sd/EToqkKhVVe/OIPXl+6BX8wjKLAqD5Z3HVmFzKTDj7hmk6nMG5ANqd0acH9c3/iq9/38tziP1j40y6eHdWD49rUbfLTzXtLmb1mG//7YRt7XfuX9eqdncyJ7dPYWuJm0+5SthSVUuYPsX6bg/XbHNFy7dITuKB3G0b2akN2Wnx0e4+sZO48vRO/73axYP1OFmzYQUGxm89+2c1nv2g9nDq0SGBI5xYM6dSCAe1TiY9ruDRt+sq/uG/uTwAM7JjG5HF9SI6PzfHEMZu8AkyaNImBAwcyefJkVqxYQSAQoEOHDtx7773ceeedxMXV7Jfqdruj68pt27aNadOmVVv24YcfbpSKd9/2AgDCioqtRWqDX18I0Xw8++yzuN1u3njjjQqJK0B6ejqvv/46J598Ms888wxTpkyp9jynnXYaEydOJD8//0iHLI6Ao6kOdfgd0fGuOqOKuUVunc6jJa/aEjvhoA48+8Dasp6iFEKU8wZC3DZjHYt/1RK3kzqkcf/wY+jWunZJZ+tkC++M78fHP+7gkfm/8NtOJ+dP/pYLerchNy2eFjYT6VZT9D3NGoeqaq29BcVuthaXsbXYTUHk/cBxtmkJcVzYpw2j+7alc0bFFshAKMzWYjeb9rj4Y3cpnkCIM4/NoFfb5Gq72CqKQtfMRLpmJnLXmZ35daeTz3/dzdI/9vJjoZ3Ne8vYvLeMd78tIE6v4/icFI7PSaFX22R6ZSeTbj0yk919tG4793+kJa6XnZDNQ+d1q5fW6yMlppNXgIsvvpiLL764RmXHjx/P+PHjK23Pzc2t1C891rj37ATAb1JIsdZ+en8hhCg3f/58cnNzGTx4cJX7hwwZQm5uLgsXLjzoeTZv3gxAWprMgN5UHS116IEzDStGleS0VnU6j81soNig1cGhgALuYklehahndrefa6atYc3WfcQZdDw7qgcjerau87hKRVEY2bsNgzql89C8X1i4YScfrt1Wp3PpFDi1S0tG923LaV1bEmeoOokz6nWRLsNWzj6ubjF3a51Et9ZJ3HF6ZxyeACs2FbH0zyKW/rGX7XYP320p5rstxdFj2iRb6JWdTO+2yfTJSaH3QRLlmlr8yy7umr0eVYXLT8zhkRHdYmp8a1ViPnk9Wvj2aVPy+82QFqPN9EI0d6qq4gl6Dl2wgVgMllpXIg6Hgx07dnD++ecftFyPHj2YN28eLtf+cX0Oh4OioiK8Xi/ffvstjz76KBaLhXPPPbdO8QvRUJw+JxaflmCHDToyk+o2dsxmNuI2RiZsCkaSVyFEvdlh93DFO6v4c08pNrOBty7vy4D29fOANN1qYvK4PlzSdy/fbymmqNTHXpePolJ/5N1HMDJ21WoykJMWT25aQvQ9Oy2ezhk2Uhth+Z0ki5FzurfinO6tUFWV/KIyvttSzI9/2fmx0M6mvaVst3vYbvewcIPW4HVi+zQev+A4OrSw1umay/8s4pbp6wiFVS7s04aHz4v9xBUkeY0ZYYc2q2HApCM5XsbXCNEYPEEPA6YPaOwwolaOW0m8Mf7QBQ9QnozabAefZKF8v9O5f0bV008/vUKZnJwc8vLyaNNGJpETsc3ldxEfaXn1Gw1kJtate50227DW8hoO6CR5FaIe/b7LxRXvrGKX00tmoplpV/WnS2b9Twg0pHMLhnSuvFxWOKzi8AQIqyqpCXExm6gpikL7Flbat7By6YAcAJzeAD9tc/BjoZbMLvtzL99tKeacF5dxwykduOmUDrVa23rt1hKufW8N/lCYs7tl8sxFPdDF2HJD1ZHkNUaES7U+9gGTniSLJK9CiLopT0oPbFGtSlVJ7uTJk+ncuTMGg4GMjAy6dOmCThe7416EKOf0O6PL5PgMJtLqODbMZjbiMUjLqxD1beWWYq55bw0ub5BOLa1Mu6p/g8+uq9MppDRCq2p9SDQbGdgxnYEdtTkFCkvc/N/HP/P173t5acmfzF+/g8dHHhfdfzA/b3cw/t3VeAIhhnRuwaSxvTDE8BjXv5PkNUYYyrRaN2AyxvQgaSGaM4vBwspxKxs7jCiLofYVe1JSEq1atWLDhg0HLbdhwwbatGlDYmJidFv//v2jsw0L0ZRUSF5NFvR1bEGwmQ37k9eAAmWSvApxKHtcXhas34ndE8Bk0Gkvoz76ubjUz1OfbsQfDNMvN4U3L+8bszPZNhVtU+N5d3w/Fv20i0fm/0J+URmXvrWSkb1a88C5x1Y7udOmPS4uf2cVLm+Q/rmpvH7Z8ZgMNW+xjQWSvMaIOI+2UHGwhrM/CiHqn6Iote6mG4vOPfdc3nzzTZYvX86gQYMq7V+2bBkFBQVcf/31jRCdEPXP6XNGuw37zHXvhlgxedXBzx/C8ePBWrkLohBHs1BYZemfe5m56i+W/LYnOpb0YM7qlsGkMb1r1b1VVE9RFIb3aMXgzuk8/9nvvPf9Vj76cQdf/LaHtqnxmI06zAa99m7UYzbqWbG5iJIyP93bJPHW+L5Y4prev4UkrzFAVVXMnrD22ZLQyNEIIZq6f/3rX7z//vtcf/31LF26tMJswSUlJdxwww3Ex8fzr3/9qxGjFKL+uPyu6IRNAWtync+TaDbijiSvobAR9m6EqcPhinlgy6yPUIVo0rbbPXywupDZawrZ4di/JnSf7GS6tU7CHwzjC4bwBcORVwhfIMygTuncelqnOveKENVLNBt55PzjuOj4LO6b+xM/b3fy205nteXLu20nmpvmMEVJXmNAWaCMhMgEp0p8cqPGIoRo+jp16sS0adO49NJL6d69O1dffTXt2rWjoKCAt99+m6KiImbMmEGHDh0aO1Qh6oXT7yQ10m04ZKv7OrMmgw6/SeuuHzS2hEQdFP0O7w6DK+ZDkkxeJo4+qqry/ZYS3li6ma//2Ev5ylnJ8UYu6N2GMf2yj8jES6J2emQl8/HNg9iwzY7LG8QbCOENhvEGQvgCIbyBMAa9wshebZrs2F+Q5DUm2H12Ej2R/xMkJjdqLEKI5mH06NF07dqVJ598MpqwpqWlceqpp3Lfffdx3HF1WJhOiBjl9JaQFek2TGrdW0gVRUGXoPWACnt8MH4hTBsBJZthaiSBTc6uh4iFiH3hsMqSjXt49etNrPvLHt1+Yvs0xvRvy1ndMqULcIzR6xR6Z6c0dhhHlCSvMcDutWONtLyGElMbNxghRLPRvXt3pk+ffshy48ePZ/z48bU69913383dd99dx8iEqF9Od1F0wiZz2uGNT9VbteRVdZehpuSiXLkQpp0H+wr2t8CmtjvMiGOHqqoEwyr+YJhAKIw/GMYfCpORaJYJJI9SgVCY+et3MOWbzfyxuxSAOIOOi/tmcfWg9rRLlyFuovFI8hoD9pTuJSmSvKrJ9bNQsxBCCHG0cPnsWPxaDyZbavJhncsYSV6VUAjV70dJzoYrP9ES2OJN+xPY9I6HG3aj+XLjbp759Hfyi8rwh8LRbqAHapNsYcplx9M9K6nhAxSNwukNMPeH7by5bAvb9ml/mFpNBi47IYerBuXS0mZu5AiFkOQ1Juwq2UmryBNjfYtWjRuMEEII0cQ4/a5oy2tyi+TDOpfRao1+DpeVoTOZILE1jF8E742ITOI0DLpdWPUJ9EbocQlkxl7X/K3FZTw6/1eWbNxTbRmDGsIW9OHbaefG5xdw31mdGdolHcJh1GAIwiHUUBgt41W198hLjWTBil4Pig50SvSzolNAibyqoIZCEAqhBoOowSAEg6ihEGogCGpYO4deBzpdxc+qiur3owYC2nvkc9jvh1Bofxw6Pegicej0kVgO/vtSdLpIvErF8uGwFu+Bv5NgCFQVxaBHMRrBYEAxGlEMRu3daKhwnui5y8//999lOPIZUOLi0JlNKGYzOpP2rpjM6Exx2vHlx8D+c0S/hHYN5YDPKJF/F7SW99UF+5i1upCFP+3AG9AmEE23xnHlwHZcdkIOSZamObGPaJ4keY0Bjt3bAFBRMbaQ2QyFEEKI2nAGPcRHktfUFoc33ssab8Kjj8MS8hMuLYXUyHAeWwZcsQDeOx/2/AIrX6v+JCtfh3Oe1pbZqSZZ+7uCojJ2OryU+oKU+YK4Iu+l3iCeMjdpcQptUyxkJZvJSraQYjFqOU84TMjpJFRcTLC4hGBxEaED3tVAgJAKBcVu8kvcnKTCSYpCTnoC2fF6FHcplJailroIu1yoHk/FwD6BLXX6TYqYptMRMhjxKAZ8ip7hOiNn6A3o4uJIizeQZAC+DrDH72d3+YOBQADFYECJj0dnsWiv+Hh08RYUi7bM3IEPEA78jKJoxxqN0XeMkXdFgbCqPQwIh/Z/VsMoegO6hATtOuXvkc8oCuGyMsJuN2F3GeEyd/QzYbXyMZGfFYOesNeH6vNq714vYZ8X1etDDYfQxZlQTCaUuDgUU5z2wCDOpD2AUHT7H0Ac+PCh/CECKmo4DCoHPIzQHnKoofIHM5GHNKEghMKg12kPV/Q6FL2h4s9/f+iiUyIPPnSR/Qc8zFHKH+6gPQgKBFD9Ae1agYD2CgW1339cnPZvEReHLi4u+rMaDO7/dzvgQZAaCGjlzBZ0FjM6iyX6WTGbMaSlYWqgSSAleY0B7r27AfCbwZIk3YaFEEKImgqFQ5SqgWjyak05vG6u5Wu9WkJ+wmVlFXdaW8CVC+GH98Bjr/oEO9bBlq9gwR3w13dw7gsQV/0YwV177Lw6czm//fAbLTx20jxO0rxO0rwOsr0O0j0ObIGKCWVR5FUbGZHX/jghdLADDAZCig6fqhBGQWc0YLXEodPrQa/fn5QroKBQoVU1HNZaYcPhSp+rFUluMBpQ9AbtD2yDHvQG0O1PblDDWqtkKISqaufTxcWhGPf/MR59GfTatUPatUOhMFv3uti+rwwlrGIy6miXbiW1qplXyxOPcBgVdX8yQqTVVK/XWi8PeEdhf2JSnixEP/srJjR/P295QoQSSUD2/z5Vvz+SYGmJlur3V463tsJh9H4fVnxY/76vGALVHKb6/eB2H/zeEUedhEGDyH7rzQa5liSvMcBfsheAoFklQSZsEkIIIWrM5XcBYIn8Pa+3VfpTvFbK13pN9bkqJ68AlhQYeHu1x6t+P+GvXyD8xbOEl/2P8LofCJ90D2FTOmGXi8COHfj/KsRfWIh9cz6mfcWMPayIocxgxm6yVnh5EhIJ2JLZ7VVRUEmxGBnePZNjW9m0Tqqqis5kQmezoU9M1N5ttui7YtD+RJy/fgf/+nA93kCY9ukJvHF5Xzq2PLzfcWP4fksxE+b8RH669m+aaDbg9AYBOPPYDB4e0Y3WyZYqjw2HVVZsLmb6qq38vN3JwI7pXNw3i15tk7XWtwamhsMs/2U7T368njJvkDiDjjijnjijAZNRj8mox2DQs9fhpXBfGb5ACEUFBe1eUFQwhIP0zIhnxDHpnNwuCbMaRPX5Uf0+0OlR4oyVW+YMBtRgkLDHQ9jtIezRWjrVyM8oygEPEA441qh1O66YzAeiLYPAAa2LushnrSVRDQYPaFWNtLKWaT8D6BLi0cVXbplFUbT4Ii2zqttNqKwM1e1GDQS1LthmE4rJjGI2oYu8Kzo9asCvPSjw+VF9Pu2/ab8PAoHIg5gDungT6S6vEmmAPeDBQ7SFVlf5oYzBEG1l1R6uaF3P1XAIQuFoq6yqhrXrlT+sCasVu+6XP8yJPijSymit24aKXdiNRu37RcbzV9VCrh0TV6FlNvpvHwho//ZeD6rHS9jr1f7tvV6MbRpuGTFJXmNA2F4CgGpSsclSOUIIIUSNufwuDEEVY6QpSGc9vMTKZjbgNmoT01SVvAa2b2ffrA8I7tlDyOUi7HBo3XYjn8Nud6Rk+azHZTD7oSqvZYq8e41mTNltsWVnYchoiTEjA0PLDAwZGRgzWmJo2RLFYomOW/QGw2yzeyi0e9la4mHbPg/b93nYZnezfZ+Hfe797WZxeh3Xn9yem07piCWu9suanNezNe3SE7juvTVsKSrjgsnf8uzoHpzVLbNRErfacnoDPLloIzNW/QVAS5uJx0Yex+BO6by0ZBNvLdvC4l938+2mIu48ozPjT8rFEJlluajUx+w125i5+i+2Fruj5/xr1V/MWPUXHVtaubhvFiN7t2nQyYxWb7Vz7Qc/4w0ogBGCgBe0D8G/lY5Db1LISrGQk5ZAblo82anxDO7UQtZmFU2SJK8xILtMq2jj4sIkWat+6ieEEEKIypw+R7TVFYiu01pX5d2GAUKlpdHtajBIyX/fZ+9LL1UeF1oFxWjUxgKqbvSKB8WgQnImvyR2Y7k3gV3xqbhSM7jk/BO46NTj0NdiWRqLETpZTHSqZo7HMl+Q7XYPOx1eOrW0VtuiWFPHtUli3q2DuOn9H1hVUMIN7/9At9aJXH9yB4YdlxlN9mLNZ7/s4sGPf2a3U+tTPm5ANv8+u2t0AqJ7z+nKyN6tuX/uz6zduo/HF/7G3HXbuXZwez7/bTeLf9lFIBSZxdpkYGTvNgzsmMZnv+zmk593smlPKU8s2sjTn/7OqV1acEHvLLJT40myGEmyGLGZDeh09Zvgb9hm56qpq/EGwpzWtSX3nN0FXyCMNxDCG4y8B0L4g2Fa2EzkpiXQJsUiyx6JZkOS1xhwYkIOAdZjNSgkW6oYdyGEEEKIKjlc26PjXYNxpugsqnVlMxujyWt5y6vn51/Y9eCDeH/9FQDL8cdjPflk9ImJ6JMS0SUmok9MJJxg5Yu/3OR7wB4AhyeAo8zD2UXTGOWeiY4iysKlPBO4nROP78vEc7pWPd7yMCWYDHTOsNE5o/5a1tKtJt6/ZgATP/+DaSsK+GWHk9tmrOPZVAvXDm7P6OPb1qllt755/CE++Xkns1YXsjJf69nWLj2BJy/szgntK88r0jUzkdnXn8isNYU89clGftnh5I5ZP0b392ybzKX9szm3Zyvi47Q/m88+rhWPnN+NBet3MnttIev+svPFb3v44reKszgripb0JlqMpFtNjOzVmjH9szEb6/Z7+mO3iyveWUWpL8gJ7VN59dI+dT6XEE2VJK8xQFeqjdcJmgzEGeTJmBBCCFFTTkdBdJmcsOXwWl1Ba3ndF0leg0VF7H7yKUr++18Ih9ElJtLyX3eTfNFF2pi8A4TDKrfOWMfCn3ZWOueXnMcCXTYvGF/lOF0BX1kfRH/sZEjocdjxNqQ4g457z+nK9UPa8953W5n2XQGFJR4e/PgXXvziT644MZdze7bCqNPtn88pMiurAqRZ4zAZ6j/ZUlWVH/6y8+HaQuav30mpT+s6q9cpXD+kPbcN7XTQJE+nUxjbP5szjs3giUW/sfSPvZzVLZNxA7Lp1rrqCcASzUbGDchm3IBsNu1xMXvNNr75Yy92dwCHJ4AnEEJVwekN4vQG2bbPw4+Fdl75ajPXDWnHpQNySDDV/M/wrcVlXPbWSva5A/Rsm8xbV/STxFUclSR5jQGBkn0ABE2yjpYQQghRGy7X9v3dhg+zyzBoLa9ugzZ+seill6PbE4cPJ2PCvRjS06s87qlPN7Lwp50Y9QoX9ckiNSGO5HgjyZY4kuKNJFtOwKleROrXt6Ev/B4+uBwG3AhnPAqGptXrKiUhjttP78R1Q9oze20hby7bQmGJhxe++IMXvvij2uOSLEbuH34Mo4/PqvF4WZc3wJ97SgmHVW2uGlUlrKrahL2qyi87nMxeU8jmvfvHJ2enxjP6+CwuPD6LNrXoMp1uNTHx4l41Ll+uY0sbE4Ydw4Rhx0S3+YNhnF4tkXV4Avy83cHr32xhu93DE4s28urXm7l6YDuuGJhLovngf//tdHi49K2V7HH56JJhY9qV/bDWIvEVojmROz8GhPUqBksId0JiY4cihBBCNCnO0t3E+7RxifrDnKwJIhM2GUzRn41ZWWQ+9CDWwYOrPea97wp4Y6m2Guqzo3oysnd1M2+mQe4C+PIx+HaStlbstlUw6l1IyTns2BuaJU7P5SfmMq5/Not+3sXby7awaU/p/hVhUCmfiDUcVnF4Atzz4Qbmr9/BExd0p21qfLXnLvMFmbqigCnfbMbl/fskRFXEYtQzrHsrRvfNon9uar2PNa2tOIOOdKuJdKt2L/XJTmFs/2w+WredV7/eTH5RGc9//gdvLNvC5SfmcEL7NNqmxNMq2Vyhdbqo1Mdlb61k2z4PuWnx/Pea/iTHN62HHULUJ0leY0Bg2PEck/UJK82dGjsUIYQQoklxuvdGuw3HJR7+GM9Es4HVGV05eecGjhl7Aek334zOUn3r3Re/7ubheb8AcPeZnQ+SuEbojVpra/ZJMPd62L4WXh8MI6dA12Fa1ucvA68DfE7t3esEcxIkZYEtE3T13F1U1ZbfIOTXXuEgqAdZkxWgfG1XFAyKwohOZkZ07qadKxSAcCByvgCEAgQDPub9uI3pK//CuWkTd7+4mn+ckM2w7q3RKQoYLZCcjU9vYfrKv5j81SaKSrUm9XSrCatJj05RUBTQKQp6nYKiKKQmGBnRszXDe7SO+dZIo17H6L5tubBPFgs27GDyV5v4Y3cpk7/azOSvNgParzTDZqZtqoWslHh+3eFk894yWieZef+aAQ06q7EQsSi2/ys/SgTcTgBCcTJluRCi/rz66qvcfPPN9O/fn5UrV1ZZZs+ePUycOJGFCxeSn59PMBgkKyuLwYMHc/XVVzNo0KBo2alTp3LllVdGf9br9WRkZHDGGWfwn//8hzY1WOft4Ycf5pFHHkFRFLZu3Urbtm0r7Hc6nWRkZOD1ern55pt55ZVXACgoKKBdu3YAPPbYYzzwwAOVzn3ppZcyffp0EhISKD1glljRvO3z7It2GzYnHX4PJpvZyE8tOnLVWfez6a5zDtq9dX2hnVtnrCOswph+bbn51I41v1CXs+GGZTB7vJbAzhyrrSHrdYIaqv44nQESW0NS28irDehNEPJB0Lc/AQ3692/zl0HAAwF35D3yubxsyF/99eqJAbgQuNDA/r8+V0VeB/Bgo0+4BY+o6ThtrTn2mO5079IRnXKwOUH2wOYfj0DUR4YeOD8OzjtTZf02B99tLmavy0txqR9fKAylQCm4/4JcoEe8gbtO7ULmzi+g8pBqIRqfNQOyBzTIpSR5jQGq1wFAWJJXIUQ9ysvLIzc3l1WrVrFp0yY6dqz4h/WqVasYPnw4LpeLMWPGcMMNN2AymcjPz+ejjz5i6tSpfPPNNwwZMqTCcY8++ijt2rXD6/Xy/fffM3XqVJYvX87PP/+M2VyzVgGTycSMGTO45557KmyfM2fOQY8zm83MmDGjUvJaVlbGxx9/XOPri+ajyJRIqk+b5dVgO/xuw+Wtd6GwitsfqnZSncISN1dPW40nEOLkzi14bORxtV/3NDkbrvwUvngIvn8VPPv279MZtNZWcxLEWcFrB+cOrVXU/pf2imX6uMjLCDqj9jkyyZWK1i3Y7g6gqoACSYqHREpJxkWyzkVPtkAA2PARbGjE73EE6YDekRegZbVVNaqHgU8bKCgh6qLTmXDp7Aa5lCSvMUD1ai2viknGvAoh6kd+fj4rVqxgzpw5XH/99eTl5fHQQw9F9+/bt4+RI0diMBj48ccf6dq1a4XjH3/8cWbOnImliu6S55xzDn379gXgmmuuIT09naeffpp58+Zx8cUX1yi+YcOGVZm8Tp8+neHDh/O///2v2uPmzJnD+vXr6dmzZ3T7xx9/jN/v5+yzz+bLL7+sUQyieeiecz17Sl4H/kBXD2Ne4+P06HUKobCKyxusMnm1u/2Mf3cVRaV+jm2VyORL+9R9HU1DHJz9JJxwk9YaakrUElajBf6eDIdDULob7IXgKATHNu2lhvYniwbTAYljnHYeY/zf3iOvA8vqDBWTTd0hvo+qai/+9q4o2rkOksgrgBVw2D3cP+cnvvljLwBtLX7u6GfhvOwAcaXbYN9WLUl3F9ftdyuEaBgtujTYpSR5jQE6v7ZUjs4iyasQon7k5eWRkpLC8OHDGTVqVKXkdcqUKezcuZOZM2dWSlxBW9pi7NixNbrW4MGDefrpp9m8eXON4xs3bhyjRo1i48aN0evv2rWLL7/8kg8++KDa5PXEE09k3bp1TJ8+vULympeXx9lnn01qamqNYxDNQ7JyHE5XC7Tk9fBnG1YUBavJgMMTwOUNkJlUsTXfFwxx3X/XRschvltfM78mtz10GZ1e6zKc2BpomC561VKUgyaoNdEm2cLUK/ux6Kdd7HR4uKRfW2yHmHlXCHF0k0VFY4ArsQPr9N3Rp7Vv7FCEEM1EXl4eF154IXFxcYwdO5Y///yT1atXR/fPnz8fi8XChRdeeNjXKigoACAlJaXGxwwZMoSsrCymT58e3TZr1iysVivDhw8/6LFjx45l5syZqKo2w2xRURGLFy9m3LhxtQ9eNHm7nV7ig16gfmYbBm3GYdDW6DxQcWTm11X5JdhMBt69sj8ZidJV/XAoisLwHq24ZnB7SVyFEIckLa8x4JRrnmnsEIQQaAvdqx5PY4cRpVgstR9DB6xdu5aNGzfy8svaGpWDBg0iKyuLvLw8+vXrB8DGjRvp0qULRmPFPxZdLhc+ny/6s8ViIeFva2c6HA6Kiorwer2sXLmSRx55BJPJxLnnnlvz76YojBkzhhkzZvDoo48C+xNuk8l00GPHjRvHE088wbfffsugQYP44IMPMJvNjBgxgk8/lYFhR5vdTi8dg9o9Wx/dhoFIEuXB5Q1Et23c5eTqqWvYbvdgMxt44x996ZIpc1UIIURDkuRVCCEiVI+H3/sc39hhRHX5YS1KfPXrIFYnLy+PjIwMTj31VEBLFC+55BLef/99nn/+efR6PU6nE2sVf+j/4x//4OOPP47+fOCMv+VOP/30Cj/n5uby/vvvk5WVVas4x40bx3PPPcfq1atJSUlh9erVPPHEE4c8rlu3bvTo0YMZM2YwaNAgpk+fzvnnn098HX5Xounb5fDSI6C1vOoS6rfltdSntbx+/utu7pi5jjJ/iNy0eN66oh8dW9bPtYQQQtScdBsWQohmJBQKMXPmTE499VTy8/PZtGkTmzZtYsCAAezevZslS5YAYLPZqlxO5tFHH+Xzzz/n888/r/YakydP5vPPP+fDDz9k2LBhFBUVVWgt9fv97Nq1q8IrFKq89Efv3r3p2rUr06dPJy8vj8zMTE477bQafc9x48Yxe/ZsNm3axIoVK6TL8FFsyj+Op096HAC6ephtGLS1XgFc3iCvfb2Z6/67hjJ/iJM6pPHRzQMlcRVCiEYiLa9CCBGhWCx0+WFtY4cRpVQx0++hfPnll9GJmGbOnFlpf15eHmeeeSZdu3Zl/fr1BAKBCl2He/Tocchr9O/fPzrb8MiRIxk0aBDjxo3j999/x2q1smLFimirb7n8/Hxyc3MrnWvcuHG89tpr2Gw2LrnkEnSHmuE0YuzYsUyYMIFrr72WtLQ0zjzzzBodJ5ofk0GPwevGT32OedX+m3jxiz/Y7dS6JP/jhBwePO/Yus8qLIQQ4rBJ8iqEEBGKotSpm24sycvLo2XLlkyePLnSvjlz5jB37lymTJnCueeey/fff8/cuXNrvLxNVfR6PU8++SSnnnoqr7zyCvfeey89e/as1HKbmZlZ5fHjxo3jwQcfZOfOnfz3v/+t8XWzs7MZOHAgX3/9NTfeeCMGg1RnR7NQmdaLoP7GvGr3026nD71O4eHzjuUfJ+bWy7mFEELUndT2QgjRTHg8HubMmcPo0aMZNWpUpf2tW7dmxowZzJs3jxtvvJGXX36ZO++8k169etG5c+cKZctn8q2JU045hf79+/Piiy9yxx13kJKSUmlcbHU6dOjAiy++iMfjoX///jW+Jmhr0X711VdccskltTpOND/h0jKg/pLXdKvWDT7JYuTVS/swsGN6vZxXCCHE4ZHkVQghmol58+bhcrkYMWJElftPOOEEWrRoQV5eHpdccglz587lvPPOo2fPnowZM4Z+/fphNBopLCxk9uzZgNbCWRP/+te/GD16NFOnTuWGG26oVdy33357rcqXO/nkkzn55JPrdKxoPtRgMDpLuC7h8Nd5Bbh0QDZ6ncK5PVqRk1Y/5xRCCHH4JHkVQohmIi8vD7PZzBlnnFHlfp1Ox/Dhw8nLy6O4uJgTTzyRn3/+mYkTJ7Jw4UJmzZpFOBymTZs2DBo0iDfeeIPBgwfX6NoXXnghHTp04LnnnuPaa69Fr9fX51cTolrhsrLoZ309Ja9pVhM3n9qxXs4lhBCi/ihqbfqGiUqcTidJSUk4HA4SExMbOxwhRA14vV7y8/Np164dZrO5scMRTcCh7hmpC+qmPn5vge3b2TT0dBSTia7rf6zfAIUQQhxxtakLZMo8IYQQQjRZoXoe7yqEECJ2SfIqhBBCiCYrHJ1pWMamCiFEcyfJqxBCCCGarHCplrzqE6TlVQghmjtJXoUQQgjRZJUnr9JtWAghmj9JXoUQQgjRZIUkeRVCiKOGJK9CCCGEaLLCLhnzKoQQRwtJXoUQRy1ZKUzUlNwrsat8wia9tLwKIUSzJ8mrEOKoYzQaAXC73Y0ciWgqyu+V8ntHxI5ot2GZsEkIIZo9Q2MHIIQQDU2v15OcnMyePXsAiI+PR1GURo5KxCJVVXG73ezZs4fk5GT0en1jhyT+JizrvAohxFFDklchxFEpMzMTIJrACnEwycnJ0XtGxJb9sw3LmFchhGjuYj55nT17NpMnT2b9+vX4/X46duzIpZdeyp133lmn7ltr167lqaeeYunSpTgcDlq1asW5557L//3f/9GyZcsj8A2EELFIURRatWpFy5YtCQQCjR2OiGFGo7HJtrgeDXVoi9tvI/mSizHl5jbK9YUQQjQcRY3hWSjuuOMOJk2ahMFg4LTTTsNqtfLll19it9sZNGgQixcvxmKx1Ph8H374IWPHjiUYDNKvXz/atWvHmjVr2LJlCxkZGSxfvpyOHTvWKkan00lSUhIOh4PExMTafkUhhBDNQCzWBVKHCiGEaApqVReoMWru3LkqoFqtVnXt2rXR7Xv37lW7d++uAupdd91V4/Nt375djY+PVwH19ddfj24PBoPqZZddpgJqv3791HA4XKs4HQ6HCqgOh6NWxwkhhGg+Yq0ukDpUCCFEU1GbuiBmZxt+4oknALj33nvp06dPdHt6ejqvvvoqAK+88goOh6NG53vxxRdxu92cfvrpXHfdddHter2e1157jaSkJFavXs3ixYvr8VsIIYQQDU/qUCGEEM1RTCav27dvZ/Xq1QCMGzeu0v5BgwbRtm1bfD4fixYtqtE5586dW+35rFYrI0aMAGDOnDl1DVsIIYRodFKHCiGEaK5iMnldt24dAKmpqbRr167KMn379q1Q9mBcLhebNm2qcNzhnE8IIYSIVVKHCiGEaK5iMnnNz88HIDs7u9oybdu2rVD2YAoKCqKfqztnbc4nhBBCxCqpQ4UQQjRXMblUjsvlAiAhofo126yRxcidTmeNz3ewc9b0fD6fD5/PF/25fLxQTeIQQgjRPJXXAWoMTOAvdagQQoimpDZ1aEwmr7HsySef5JFHHqm0vfypsxBCiKOXy+UiKSmpscOIWVKHCiGEqE5N6tCYTF5tNhsAZWVl1ZYpLS0FqNG6cOXnKz9nVb+Ump5vwoQJ/POf/4z+HA6HKSkpIS0tDUVRDhlLVZxOJ23btqWwsFDWuRM1IveMqC25Z44sVVVxuVy0bt26sUOROlSIQ5B7RtSW3DNHVm3q0JhMXnNzcwEoLCystkz5vvKyB5OTkxP9/Ndff9G9e/c6n89kMmEymSpsS05OPmQMNZGYmCj/QYhakXtG1JbcM0dOrLS4Sh0qRM3IPSNqS+6ZI6emdWhMTtjUu3dvAIqLi6ud/GHNmjUAFdavq05iYiIdO3ascNzhnE8IIYSIVVKHCiGEaK5iMnnNysqiX79+AEyfPr3S/uXLl1NYWIjJZGLYsGE1OucFF1xQ7flKS0uZP38+ABdeeGFdwxZCCCEandShQgghmquYTF4B7rvvPgCeeuopfvjhh+j24uJibrrpJgBuueWWCk3Mc+fOpWvXrgwdOrTS+e644w7i4+P54osvePPNN6PbQ6EQN910E3a7nX79+nHmmWceqa9ULZPJxEMPPVSpK5UQ1ZF7RtSW3DNHF6lDhaie3DOituSeiR2KGgvz+lfj9ttv56WXXsJoNDJ06FASEhJYsmQJdrudgQMH8vnnn2OxWKLlp06dypVXXklOTk6FdenKzZ49m7FjxxIKhRgwYAC5ubmsXr2aLVu2kJGRwfLly6Ndo4QQQoimTOpQIYQQzU3MtrwCTJo0iVmzZnHiiSeyYsUKFi1aRFZWFk899RRffvllhUq3JkaPHs3KlSu58MIL2bJlC3PnziUUCnHzzTezfv16qXSFEEI0G1KHCiGEaG5iuuVVCCGEEEIIIYSAGG95FUIIIYQQQgghQJLXRjd79mxOOeUUUlJSSEhIoGfPnjzzzDMEAoHGDk00sEAgwJIlS/jXv/5Fv379SE5Oxmg0kpmZyYgRI1i4cOFBj//iiy8YNmwY6enpWCwWunbtyv33309paWkDfQMRC+655x4URUFRFB5//PFqy8n9IpoDqUNFOalDRX2QOrQJUEWjuf3221VANRgM6plnnqleeOGFanJysgqogwYNUt1ud2OHKBrQ559/rgIqoGZmZqrDhw9XL774YvW4446Lbr/uuuvUcDhc6diJEyeqgKooijpkyBB19OjRamZmpgqoXbp0Uffu3dsI30g0tG+//VbV6XSqoigqoD722GNVlpP7RTQHUoeKA0kdKg6X1KFNgySvjWTu3LkqoFqtVnXt2rXR7Xv37lW7d++uAupdd93ViBGKhrZkyRL1oosuUpcuXVpp38yZM1W9Xq8C6rRp0yrs++GHH1RFUVS9Xq8uWrQour2srEwdOnSoCqgXXXTREY9fNK6ysjK1U6dOaps2bdSRI0dWW/HK/SKaA6lDxd9JHSoOh9ShTYckr42kX79+KqA+/vjjlfYtW7ZMBVSTyaTa7fZGiE7EoquvvloF1KFDh1bYPnr0aBVQr7nmmkrHFBQUqDqdTgXU3377raFCFY3gtttuUwF14cKF6hVXXFFtxSv3i2gOpA4VtSV1qDgYqUObDhnz2gi2b9/O6tWrARg3blyl/YMGDaJt27b4fD4WLVrU0OGJGNW7d28ACgsLo9v8fn90HE9V91JOTg4DBw4EYO7cuQ0QpWgMX3/9NS+//DKXX345w4YNq7ac3C+iOZA6VNSF1KGiOlKHNi2SvDaCdevWAZCamkq7du2qLNO3b98KZYX4888/AWjVqlV02x9//IHb7Qb23zN/J/dS81ZaWspVV11FRkYGL7744kHLyv0imgOpQ0VdSB0qqiJ1aNNjaOwAjkb5+fkAZGdnV1umbdu2FcqKo9uuXbuYOnUqABdddFF0e/n9kZycjM1mq/JYuZeat7vvvpv8/Hzmzp1LSkrKQcvK/SKaA6lDRW1JHSqqI3Vo0yMtr43A5XIBkJCQUG0Zq9UKgNPpbJCYROwKBoNcdtllOBwOunfvzvXXXx/dJ/fS0W3x4sW8/vrrjBkzhpEjRx6yvNwvojmQ+1jUhtShojpShzZNkrwKEeNuuOEGlixZQlpaGh9++CFxcXGNHZKIAQ6Hg6uvvpoWLVrw8ssvN3Y4QggRk6QOFVWROrTpkm7DjaC8q0FZWVm1ZcoXOU5MTGyQmERsuv3223n77bdJSUnh888/p3PnzhX2y7109LrjjjvYtm0bs2bNIj09vUbHyP0imgO5j0VNSR0qqiN1aNMlyWsjyM3NBSrOePd35fvKy4qjz1133cVLL71EcnIyixcvjs6UeKDy+8Nut+NyuaocgyH3UvM0d+5cDAYDr776Kq+++mqFfRs3bgTg7bff5osvviAzM5OZM2fK/SKaBalDRU1IHSoORurQpkuS10ZQ/j/Q4uJi8vPzq5wtcc2aNQD06dOnQWMTseGee+5h4sSJJCUlsXjx4mpntevSpQvx8fG43W7WrFnDqaeeWqmM3EvNVzAY5Jtvvql2f0FBAQUFBeTk5AByv4jmQepQcShSh4qakDq0aZIxr40gKyuLfv36ATB9+vRK+5cvX05hYSEmk+mg602J5unee+/l2WefJSkpic8//zx6r1QlLi6O4cOHA1XfS1u3bmXFihUAXHDBBUcmYNEo7HY7qqpW+briiisAeOyxx1BVlYKCAkDuF9E8SB0qDkbqUFETUoc2YapoFHPnzlUB1Wq1qmvXro1uLyoqUrt3764C6l133dWIEYrGcP/996uAmpycrK5atapGx6xdu1ZVFEXV6/XqJ598Et1eVlamDh06VAXUiy666EiFLGLQFVdcoQLqY489Vmmf3C+iOZA6VFRF6lBRH6QOjW2Kqqpq46TN4vbbb+ell17CaDQydOhQEhISWLJkCXa7nYEDB/L5559jsVgaO0zRQObNm8f5558PaItcd+vWrcpy6enpPPfccxW2vfDCC/zzn/9EURROPvlkWrZsybJly9i5cyddunRh+fLlNZ6QQDR948ePZ9q0aTz22GM88MADlfbL/SKaA6lDxYGkDhX1RerQGNfY2fPRbtasWeqQIUPUxMRE1WKxqMcdd5z61FNPqT6fr7FDEw3s3XffVYFDvnJycqo8/vPPP1fPPvtsNTU1VTWZTGqnTp3UCRMmqE6ns2G/iGh0B3tqXE7uF9EcSB0qykkdKuqL1KGxTVpehRBCCCGEEELEPJmwSQghhBBCCCFEzJPkVQghhBBCCCFEzJPkVQghhBBCCCFEzJPkVQghhBBCCCFEzJPkVQghhBBCCCFEzJPkVQghhBBCCCFEzJPkVQghhBBCCCFEzJPkVQghhBBCCCFEzJPkVYgmLjc3F0VRKrxMJhNZWVmcf/75LFiwoLFDPGLGjx+PoihMnTq1sUMRQgjRBEkdKnWoaFoMjR2AEKJ+DBw4kI4dOwLgcDhYt24d8+bNY968edx5551MnDixkSMUQgghYpPUoUI0DZK8CtFMXHPNNYwfPz76czAY5M477+SVV17hhRdeYOzYsfTr16/xAhRCCCFilNShQjQN0m1YiGbKYDDw7LPPkpiYCMD8+fMbOSIhhBCiaZA6VIjYJMmrEM2Y2WymU6dOAOzevbvCvmAwyJQpUzjppJNISkqKlr3tttvYvn17pXMVFBSgKAq5ubnVXq987FBBQUG127/66ivOPPNMUlJSsFgs9OnTh/fee6/ac5aUlHDHHXeQk5ODyWQiOzubW265hZKSkpr/IoQQQohakjpUiNgjyasQzZzT6QQgIyMjus3n83HOOedw4403sm7dOgYOHMjIkSPx+Xy8/PLL9OrVix9++KHeY3nnnXcYOnQoJSUlnH322fTq1Yt169ZxxRVX8OKLL1Yqv3v3bk444QQmTZqEy+Xi3HPP5fjjjycvL4/+/fuzb9++eo9RCCGEKCd1qBAxRhVCNGk5OTkqoL777ruV9v3666+qXq9XAXX16tXR7f/+979VQO3QoYOan58f3e73+9Wrr75aBdR27dqpPp8vui8/P18F1JycnEPGcuA5D9xuNBrV+fPnV9j37rvvqoCalJSkut3uCvtGjRqlAurgwYNVu90e3V5cXKwOGDBABar97kIIIcShSB0qdahoWqTlVYhmyOFwsHjxYi688EJCoRAPPPAAffv2BcDr9TJ58mQAXnjhhQpdmIxGIy+99BIZGRnk5+fz4Ycf1mtct956K+eee26FbePHj6dr1644HA7WrFkT3V5YWMicOXNQFIUpU6aQlJQU3ZeamsqUKVPqNTYhhBACpA4VIpZJ8ipEM3HllVdG16hLTk7mrLPO4s8//+T999/nsccei5Zbs2YNpaWlpKamct5551U6T3x8PGPGjAHgq6++qtcYq7oewDHHHANQYZzQ0qVLCYfD9OnTh2OPPbbSMb169aJHjx71Gp8QQoijk9ShQjQNslSOEM3EgWvU7d27l2XLluFyubjxxhvp1KkT/fv3B/ZXbu3atav2XB06dKhQtr5kZ2dXub18Nkev1xvdtm3bNuDgcbZr144NGzbUY4RCCCGORlKHCtE0SPIqRDPx9zXqHA4HF1xwAV999RUXX3wxv/76K/Hx8Uc0hnA4fND9Op109hBCCBF7pA4VommQ/wqEaKaSkpKYNWsWqampbN26lYkTJwLQpk0bAPLz86s9dsuWLRXKAsTFxQHgcrmqPCYQCLBz5856if3Aa/99yYADHWyfEEIIUVdShwoRmyR5FaIZa9GiBQ888AAAzz33HHa7nb59+2K1WikpKWHevHmVjvF4PMycOROAU089tcK54uLiKCkpYc+ePZWO++yzzwgGg/UW+5AhQ1AUhR9++IGNGzdW2r9+/Xrp7iSEEOKIkTpUiNgjyasQzdxNN91EdnY2DoeD559/HrPZzM033wzAXXfdxdatW6NlA4EAt99+O7t27aJdu3aMGjUqus9oNDJkyBAAHnjggQrdm9avX88tt9xSr3FnZ2dzwQUXEA6HufHGG6Nr7QHs27ePm266CVVV6/WaQgghxIGkDhUitkjyKkQzZzKZePjhhwGYNGkSJSUlPPLIIwwdOpRNmzZxzDHHMHz4cMaMGUPHjh158803SUtLY/bs2dFuTuUef/xx4uLiePPNNznmmGMYPXo0J510Ev369eOUU04hJyenXmOfPHkyHTp04Ouvv6Zdu3ZcdNFFXHjhhbRv357du3czYsSIer2eEEIIcSCpQ4WILZK8CnEUuPzyyzn22GNxuVw8++yzmEwmPv30U1599VV69uzJsmXLmDt3LkajkVtvvZX169dz/PHHVzrPgAED+OabbzjzzDPZtWsXCxcuxO12M2nSJN599916jzszM5OVK1dy6623Eh8fz4IFC1i9ejVjxozh+++/JyUlpd6vKYQQQhxI6lAhYoeiSp8BIYQQQgghhBAxTlpehRBCCCGEEELEPElehRBCCCGEEELEPElehRBCCCGEEELEPElehRBCCCGEEELEPElehRBCCCGEEELEPElehRBCCCGEEELEPElehRBCCCGEEELEPElehRBCCCGEEELEPElehRBCCCGEEELEPElehRBCCCGEEELEPElehRBCCCGEEELEPElehRBCCCGEEELEPElehRBCCCGEEELEvP8HHigTh2m+CKAAAAAASUVORK5CYII=","text/plain":["
"]},"metadata":{},"output_type":"display_data"}],"source":["# Figure 3\n","num_plots = 2\n","plt.style.use('default')\n","fig, axs = plt.subplots(1, num_plots, figsize=(11, 4))\n","\n","data = [df_mnist_acc_flanders, df_mnist_acc_fedavg]\n","\n","for i in range(num_plots):\n"," if i == 0:\n"," acc = df_no_attack[df_no_attack[\"attack_fn\"]=='GAUSS']['accuracy'].to_list()\n"," axs[i].plot(acc, label=\"No Attack\", linestyle='--', color='slategray')\n"," for attack in ['GAUSS', 'LIE', 'OPT', 'AGR-MM']:\n"," acc = data[i][data[i]['attack_fn']==attack]['accuracy'].to_list()\n"," x = [i for i in range(len(data))]\n"," axs[i].plot(acc, label=attack)\n"," axs[i].set_ylim((0,1.0))\n"," axs[i].set_xlabel('Round', fontsize=16)\n"," axs[i].set_ylabel('Accuracy', fontsize=16)\n"," axs[i].legend(prop={'size': 12})\n"," axs[i].tick_params(axis='both', which='major', labelsize=16)\n"," axs[i].tick_params(axis='both', which='minor', labelsize=16)\n","\n","plt.show()"]},{"cell_type":"code","execution_count":null,"metadata":{},"outputs":[],"source":[]}],"metadata":{"colab":{"authorship_tag":"ABX9TyODCHCYl18UhHkKwq6LlRvG","collapsed_sections":["P_3Z05w0wvNB","dE_uqUeuyl6M","9vVX6wsxT-rc","RctMDJMZyPq2","R9VNz7Cv9RHn","6V4padUiYeac","pZ863s6JJbph","EJDtdXqLJX0H"],"provenance":[]},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.9.18"}},"nbformat":4,"nbformat_minor":0} diff --git a/baselines/flanders/pyproject.toml b/baselines/flanders/pyproject.toml new file mode 100644 index 000000000000..416247f9c7bb --- /dev/null +++ b/baselines/flanders/pyproject.toml @@ -0,0 +1,151 @@ +[build-system] +requires = ["poetry-core>=1.4.0"] +build-backend = "poetry.masonry.api" + +[tool.poetry] +name = "flanders" +version = "1.0.0" +description = "FLANDERS" +license = "Apache-2.0" +authors = ["Edoardo Gabrielli "] +readme = "README.md" +homepage = "https://flower.dev" +repository = "https://github.com/adap/flower" +documentation = "https://flower.dev" +classifiers = [ + "Development Status :: 3 - Alpha", + "Intended Audience :: Developers", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: Apache Software License", + "Operating System :: MacOS :: MacOS X", + "Operating System :: POSIX :: Linux", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: Implementation :: CPython", + "Topic :: Scientific/Engineering", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + "Topic :: Scientific/Engineering :: Mathematics", + "Topic :: Software Development", + "Topic :: Software Development :: Libraries", + "Topic :: Software Development :: Libraries :: Python Modules", + "Typing :: Typed", +] + +[tool.poetry.dependencies] +python = ">=3.10, <3.12.0" +hydra-core = "1.3.2" # don't change this +flwr = {extras = ["simulation"], version = "1.6.0" } +torch = [ + { platform = "darwin", version = "2.1.1" }, + { platform = "linux", url = "https://download.pytorch.org/whl/cu118/torch-2.1.1%2Bcu118-cp310-cp310-linux_x86_64.whl" } + ] +torchvision = [ + { platform = "darwin", version = "0.16.1"}, + { platform = "linux", url = "https://download.pytorch.org/whl/cu118/torchvision-0.16.1%2Bcu118-cp310-cp310-linux_x86_64.whl" } + ] +pandas = "^2.1.3" +scikit-learn = "1.3.2" +ipykernel = "^6.27.1" +natsort = "^8.4.0" +seaborn = "^0.13.0" + +[tool.poetry.dev-dependencies] +isort = "==5.11.5" +black = "==23.1.0" +docformatter = "==1.5.1" +mypy = "==1.4.1" +pylint = "==2.8.2" +flake8 = "==3.9.2" +pytest = "==6.2.4" +pytest-watch = "==4.2.0" +ruff = "==0.0.272" +types-requests = "==2.27.7" +virtualenv = "20.21.0" + +[tool.isort] +line_length = 88 +indent = " " +multi_line_output = 3 +include_trailing_comma = true +force_grid_wrap = 0 +use_parentheses = true + +[tool.black] +line-length = 88 +target-version = ["py38", "py39", "py310", "py311"] + +[tool.pytest.ini_options] +minversion = "6.2" +addopts = "-qq" +testpaths = [ + "flwr_baselines", +] + +[tool.mypy] +ignore_missing_imports = true +strict = false +plugins = "numpy.typing.mypy_plugin" + +[tool.pylint."MESSAGES CONTROL"] +disable = "bad-continuation,duplicate-code,too-few-public-methods,useless-import-alias" +good-names = "i,j,k,_,x,y,X,Y" +signature-mutators="hydra.main.main" + +[tool.pylint.typecheck] +generated-members="numpy.*, torch.*, tensorflow.*" + +[[tool.mypy.overrides]] +module = [ + "importlib.metadata.*", + "importlib_metadata.*", +] +follow_imports = "skip" +follow_imports_for_stubs = true +disallow_untyped_calls = false + +[[tool.mypy.overrides]] +module = "torch.*" +follow_imports = "skip" +follow_imports_for_stubs = true + +[tool.docformatter] +wrap-summaries = 88 +wrap-descriptions = 88 + +[tool.ruff] +target-version = "py38" +line-length = 88 +select = ["D", "E", "F", "W", "B", "ISC", "C4"] +fixable = ["D", "E", "F", "W", "B", "ISC", "C4"] +ignore = ["B024", "B027"] +exclude = [ + ".bzr", + ".direnv", + ".eggs", + ".git", + ".hg", + ".mypy_cache", + ".nox", + ".pants.d", + ".pytype", + ".ruff_cache", + ".svn", + ".tox", + ".venv", + "__pypackages__", + "_build", + "buck-out", + "build", + "dist", + "node_modules", + "venv", + "proto", +] + +[tool.ruff.pydocstyle] +convention = "numpy" diff --git a/baselines/flanders/run.sh b/baselines/flanders/run.sh new file mode 100644 index 000000000000..435c358c4ee7 --- /dev/null +++ b/baselines/flanders/run.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +python -m flanders.main --multirun server.num_rounds=50 dataset=mnist strategy=flanders aggregate_fn=fedavg,trimmedmean,fedmedian,krum,bulyan server.pool_size=100 server.num_malicious=0,20,60,80 server.attack_fn=gaussian,lie,fang,minmax server.warmup_rounds=2 client_resources.num_cpus=0.1 client_resources.num_gpus=0.1 + +python -m flanders.main --multirun server.num_rounds=50 dataset=mnist strategy=fedavg,trimmedmean,fedmedian,krum,bulyan server.pool_size=100 server.num_malicious=0,20,60,80 server.attack_fn=gaussian,lie,fang,minmax server.warmup_rounds=2 client_resources.num_cpus=0.1 client_resources.num_gpus=0.1 + +python -m flanders.main --multirun server.num_rounds=50 dataset=fmnist strategy=flanders aggregate_fn=fedavg,trimmedmean,fedmedian,krum,bulyan server.pool_size=100 server.num_malicious=0,20,60,80 server.attack_fn=gaussian,lie,fang,minmax server.warmup_rounds=2 client_resources.num_cpus=0.1 client_resources.num_gpus=0.1 + +python -m flanders.main --multirun server.num_rounds=50 dataset=fmnist strategy=fedavg,trimmedmean,fedmedian,krum,bulyan server.pool_size=100 server.num_malicious=0,20,60,80 server.attack_fn=gaussian,lie,fang,minmax server.warmup_rounds=2 client_resources.num_cpus=0.1 client_resources.num_gpus=0.1 \ No newline at end of file diff --git a/benchmarks/flowertune-llm/README.md b/benchmarks/flowertune-llm/README.md new file mode 100644 index 000000000000..c3e1b2b7dd53 --- /dev/null +++ b/benchmarks/flowertune-llm/README.md @@ -0,0 +1,68 @@ +[![FlowerTune LLM Leaderboard](_static/flower_llm.png)](https://flower.ai/benchmarks/llm-leaderboard) + +# FlowerTune LLM Leaderboard + +This repository guides you through the process of federated LLM instruction tuning with a +pre-trained [Mistral-7B](https://huggingface.co/mistralai/Mistral-7B-v0.3) model across 4 domains --- general NLP, finance, medical and code. + +Please follow the instructions to run and evaluate the federated LLMs. + +## Create a new project + +As the first step, please register for a Flower account on [flower.ai/login](https://flower.ai/login). +Then, create a new Python environment and install Flower. + +> [!TIP] +> We recommend using `pyenv` with the `virtualenv` plugin to create your environment with Python >= 3.10.0. Other managers, such as Conda, will likely work as well. Check the [documentation](https://flower.ai/docs/framework/how-to-install-flower.html) for alternative ways to install Flower. + +```shell +pip install flwr +``` + +In the new environment, create a new Flower project using the `FlowerTune` template. You will be prompted for a name to give to your app/project, your username, and for your choice of LLM challenge: +```shell +flwr new --framework=FlowerTune +``` + +The `flwr new` command will generate a directory with the following structure: + +```bash + +├── README.md # Instructions +├── pyproject.toml # Environment dependencies and configs +└── + ├── __init__.py + ├── client_app.py # Flower ClientApp build + ├── dataset.py # Dataset and tokenizer build + ├── models.py # Model build + ├── server_app.py # Flower ServerApp build + └── strategy.py # Flower strategy build +``` + +This can serve as the starting point for you to build up your own federated LLM fine-tuning methods. + +> [!IMPORTANT] +> Please note that if you intend to submit your project as an entry to the [LLM Leaderboard](https://flower.ai/benchmarks/llm-leaderboard) modifications to the `[tool.flwr.app.config.static]` section and `options.num-supernodes` under the `[tool.flwr.federations.local-simulation]` section in the `pyproject.toml` are not allowed and will invalidate the submission. + + +## Run FlowerTune LLM challenges + +With a new project directory created, running a baseline challenge can be done by: + +1. Navigate inside the directory that you just created. + + +2. Follow the `Environments setup` section of `README.md` in the project directory to install the project dependencies. + + +3. Run the challenge as indicated in the `Running the challenge` section in the `README.md`. + +## Evaluate fine-tuned LLMs + +Once the LLM fine-tuning finished, evaluate the performance of your fine-tuned LLM +following the `README.md` in [`evaluation`](https://github.com/adap/flower/tree/main/benchmarks/flowertune-llm/evaluation) directory. + + +> [!NOTE] +> If you have any questions about running FlowerTune LLM challenges or evaluation, please feel free to make posts at our dedicated [FlowerTune Category](https://discuss.flower.ai/c/flowertune-llm-leaderboard/) on [Flower Discuss](https://discuss.flower.ai) forum, +or join our [Slack channel](https://flower.ai/join-slack/) to ask questions in the `#flowertune-llm-leaderboard` channel. diff --git a/benchmarks/flowertune-llm/_static/flower_llm.png b/benchmarks/flowertune-llm/_static/flower_llm.png new file mode 100644 index 000000000000..e9a0ba3bf30e Binary files /dev/null and b/benchmarks/flowertune-llm/_static/flower_llm.png differ diff --git a/benchmarks/flowertune-llm/evaluation/README.md b/benchmarks/flowertune-llm/evaluation/README.md new file mode 100644 index 000000000000..e8ac82d1ccee --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/README.md @@ -0,0 +1,49 @@ +# FlowerTune LLM Evaluation + +This directory provides various evaluation metrics to assess the quality of your fine-tuned LLMs. +If you are participating [LLM Leaderboard](https://flower.ai/benchmarks/llm-leaderboard), evaluating your fine-tuned LLM is the final step prior to have your submission added to the [LLM Leaderboard](https://flower.ai/benchmarks/llm-leaderboard#how-to-participate). The evaluation scores generated here will be displayed as the definitive values on the LLM Leaderboard. + +## How to run + +Navigate to the directory corresponding to your selected challenge ([`general NLP`](https://github.com/adap/flower/tree/main/benchmarks/flowertune-llm/evaluation/general-nlp), [`finance`](https://github.com/adap/flower/tree/main/benchmarks/flowertune-llm/evaluation/finance), [`medical`](https://github.com/adap/flower/tree/main/benchmarks/flowertune-llm/evaluation/medical), or [`code`](https://github.com/adap/flower/tree/main/benchmarks/flowertune-llm/evaluation/code)) and follow the instructions there to execute the evaluation. + +> [!NOTE] +> If you wish to participate in the LLM Leaderboard, you must not modify the evaluation code and should use the exact command provided in the respective directory to run the evaluation. + + +## Baseline results + +The default template generated by `flwr new` (see the [Project Creation Instructions](https://github.com/adap/flower/tree/main/benchmarks/flowertune-llm#create-a-new-project)) for each challenge will produce results as follows, which serve as the lower bound on the LLM Leaderboard. + +### General NLP + +| | STEM | SS | Humanities | Avg | +|:-------:|:-----:|:-----:|:----------:|:-----:| +| Acc (%) | 12.37 | 13.49 | 12.60 | 12.82 | + +### Finance + +| | FPB | FIQA | TFNS | Avg | +|:-------:|:-----:|:-----:|:-----:|:-----:| +| Acc (%) | 44.55 | 62.50 | 28.77 | 45.27 | + +### Medical + +| | PubMedQA | MedMCQA | MedQA | Avg | +|:-------:|:--------:|:-------:|:-----:|:-----:| +| Acc (%) | 59.00 | 23.69 | 27.10 | 36.60 | + +### Code + +| | MBPP | HumanEval | MultiPL-E (JS) | MultiPL-E (C++) | Avg | +|:----------:|:-----:|:---------:|:--------------:|:---------------:|:-----:| +| Pass@1 (%) | 31.60 | 23.78 | 28.57 | 25.47 | 27.36 | + +> [!NOTE] +> In the LLM Leaderboard, we rank the submissions based on the **average** value derived from different evaluation datasets for each challenge. + + +## Make submission on FlowerTune LLM Leaderboard + +If your LLM outperforms the listed benchmarks in any challenge, +we encourage you to submit your code and model to the FlowerTune LLM Leaderboard without hesitation (see the [How-to-participate Instructions](https://flower.ai/benchmarks/llm-leaderboard#how-to-participate)). diff --git a/benchmarks/flowertune-llm/evaluation/code/README.md b/benchmarks/flowertune-llm/evaluation/code/README.md new file mode 100644 index 000000000000..fd63ced2f1e2 --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/code/README.md @@ -0,0 +1,70 @@ +# Evaluation for Code challenge + +We leverage the code generation evaluation metrics provided by [bigcode-evaluation-harness](https://github.com/bigcode-project/bigcode-evaluation-harness/tree/main) to evaluate our fine-tuned LLMs. +Three datasets have been selected for this evaluation: [MBPP](https://huggingface.co/datasets/google-research-datasets/mbpp) (Python), [HumanEval](https://huggingface.co/datasets/openai/openai_humaneval) (Python), and [MultiPL-E](https://github.com/nuprl/MultiPL-E) (JavaScript, C++). + +> [!WARNING] +> The evaluation process takes ~30 GB VRAM. On a 40GB A100 it requires 15-30mins depending on the dataset to complete. + +## Environment Setup + +```shell +git clone --depth=1 https://github.com/adap/flower.git && mv flower/benchmarks/flowertune-llm/evaluation/code ./flowertune-eval-code && rm -rf flower && cd flowertune-eval-code +``` + +Create a new Python environment (we recommend Python 3.10), activate it, then install dependencies with: + +```shell +# From a new python environment, run: +pip install -r requirements.txt + +# Log in HuggingFace account +huggingface-cli login +``` + +After that, install `Node.js` and `g++` for the evaluation of JavaScript, C++: + +```shell +# Install nvm (Node Version Manager) +curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.7/install.sh | bash + +# Restart your terminal + +# Download and install Node.js (you may need to restart the terminal) +nvm install 20 + +# Install g++ +sudo apt-get install g++ +``` + +Then, download the `main.py` script from `bigcode-evaluation-harness` repository. + +```shell +git clone https://github.com/bigcode-project/bigcode-evaluation-harness.git && cd bigcode-evaluation-harness && git checkout 0f3e95f0806e78a4f432056cdb1be93604a51d69 && mv main.py ../ && cd .. && rm -rf bigcode-evaluation-harness +``` + + +## Generate model answers & calculate pass@1 score + +> [!NOTE] +> Evaluation needs to be run on MBPP, HumanEval, MultiPL-E (JS) and MultiPL-E (C++). + +```bash +python main.py \ +--model=mistralai/Mistral-7B-v0.3 \ +--peft_model=/path/to/fine-tuned-peft-model-dir/ \ # e.g., ./peft_1 +--max_length_generation=1024 \ # change to 2048 when running mbpp +--batch_size=4 \ +--use_auth_token \ +--allow_code_execution \ +--save_generations \ +--save_references \ +--tasks=humaneval \ # chosen from [mbpp, humaneval, multiple-js, multiple-cpp] +--metric_output_path=./evaluation_results_humaneval.json # change dataset name based on your choice +``` + +The model answers and pass@1 scores will be saved to `generations_{dataset_name}.json` and `evaluation_results_{dataset_name}.json`, respectively. + + +> [!NOTE] +> Please ensure that you provide all **four pass@1 scores** for the evaluation datasets when submitting to the LLM Leaderboard (see the [`Make Submission`](https://github.com/adap/flower/tree/main/benchmarks/flowertune-llm/evaluation#make-submission-on-flowertune-llm-leaderboard) section). diff --git a/benchmarks/flowertune-llm/evaluation/code/requirements.txt b/benchmarks/flowertune-llm/evaluation/code/requirements.txt new file mode 100644 index 000000000000..9c9e3f8e27a1 --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/code/requirements.txt @@ -0,0 +1,8 @@ +peft==0.6.2 +datasets==2.20.0 +evaluate==0.3.0 +sentencepiece==0.2.0 +protobuf==5.27.1 +bitsandbytes==0.43.1 +hf_transfer==0.1.8 +git+https://github.com/bigcode-project/bigcode-evaluation-harness.git@0f3e95f0806e78a4f432056cdb1be93604a51d69 diff --git a/benchmarks/flowertune-llm/evaluation/finance/README.md b/benchmarks/flowertune-llm/evaluation/finance/README.md new file mode 100644 index 000000000000..b5595433a238 --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/finance/README.md @@ -0,0 +1,40 @@ +# Evaluation for Finance challenge + +We build a sentiment classification pipeline on finance-related text to evaluate our fine-tuned LLMs. +Three datasets have been selected for this evaluation: [FPB](https://huggingface.co/datasets/takala/financial_phrasebank), [FIQA](https://huggingface.co/datasets/pauri32/fiqa-2018), and [TFNS](https://huggingface.co/datasets/zeroshot/twitter-financial-news-sentiment). + + +## Environment Setup + +```shell +git clone --depth=1 https://github.com/adap/flower.git && mv flower/benchmarks/flowertune-llm/evaluation/finance ./flowertune-eval-finance && rm -rf flower && cd flowertune-eval-finance +``` + +Create a new Python environment (we recommend Python 3.10), activate it, then install dependencies with: + +```shell +# From a new python environment, run: +pip install -r requirements.txt + +# Log in HuggingFace account +huggingface-cli login +``` + +## Generate model decision & calculate accuracy + +> [!NOTE] +> Please ensure that you use `quantization=4` to run the evaluation if you wish to participate in the LLM Leaderboard. + +```bash +python eval.py \ +--peft-path=/path/to/fine-tuned-peft-model-dir/ \ # e.g., ./peft_1 +--run-name=fl \ # specified name for this run +--batch-size=32 \ +--quantization=4 \ +--datasets=fpb,fiqa,tfns +``` + +The model answers and accuracy values will be saved to `benchmarks/generation_{dataset_name}_{run_name}.jsonl` and `benchmarks/acc_{dataset_name}_{run_name}.txt`, respectively. + +> [!NOTE] +> Please ensure that you provide all **three accuracy values (FPB, FIQA, TFNS)** for three evaluation datasets when submitting to the LLM Leaderboard (see the [`Make Submission`](https://github.com/adap/flower/tree/main/benchmarks/flowertune-llm/evaluation#make-submission-on-flowertune-llm-leaderboard) section). diff --git a/benchmarks/flowertune-llm/evaluation/finance/benchmarks.py b/benchmarks/flowertune-llm/evaluation/finance/benchmarks.py new file mode 100644 index 000000000000..2b1a174e571f --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/finance/benchmarks.py @@ -0,0 +1,135 @@ +import torch +from sklearn.metrics import accuracy_score +from tqdm import tqdm +from utils import ( + add_instruct, + change_target, + format_example, + generate_label, + load_data, + save_results, +) + + +def infer_fiqa(model, tokenizer, batch_size, run_name): + name = "fiqa" + dataset = load_data("pauri32/fiqa-2018", concat=True) + + # Post process + dataset["output"] = dataset.sentiment_score.apply(generate_label) + dataset["instruction"] = dataset.apply(add_instruct, axis=1) + dataset = dataset[["sentence", "output", "instruction"]] + dataset.columns = ["input", "output", "instruction"] + + dataset[["context", "target"]] = dataset.apply( + format_example, axis=1, result_type="expand" + ) + + # Print example + print(f"\n\nPrompt example:\n{dataset['context'][0]}\n\n") + + # Run inference + dataset, acc = inference(dataset, model, tokenizer, batch_size) + + # Save results and generations + save_results(name, run_name, dataset, acc) + + +def infer_fpb(model, tokenizer, batch_size, run_name): + name = "fpb" + dataset = load_data("takala/financial_phrasebank", "sentences_50agree") + + # Post process + dataset.columns = ["input", "output"] + dic = {0: "negative", 1: "neutral", 2: "positive"} + dataset["output"] = dataset["output"].apply(lambda x: dic[x]) + + dataset["instruction"] = ( + "What is the sentiment of this news? Please choose an answer from {negative/neutral/positive}." + ) + dataset[["context", "target"]] = dataset.apply( + format_example, axis=1, result_type="expand" + ) + + # Print example + print(f"\n\nPrompt example:\n{dataset['context'][0]}\n\n") + + # Run inference + dataset, acc = inference(dataset, model, tokenizer, batch_size) + + # Save results and generations + save_results(name, run_name, dataset, acc) + + +def infer_tfns(model, tokenizer, batch_size, run_name): + name = "tfns" + dataset = load_data( + "zeroshot/twitter-financial-news-sentiment", valid_set="validation" + ) + + # Post process + dic = {0: "negative", 1: "positive", 2: "neutral"} + dataset["label"] = dataset["label"].apply(lambda x: dic[x]) + + dataset["instruction"] = ( + "What is the sentiment of this tweet? Please choose an answer from {negative/neutral/positive}." + ) + + dataset.columns = ["input", "output", "instruction"] + dataset[["context", "target"]] = dataset.apply( + format_example, axis=1, result_type="expand" + ) + + # print example + print(f"\n\nPrompt example:\n{dataset['context'][0]}\n\n") + + # Run inference + dataset, acc = inference(dataset, model, tokenizer, batch_size) + + # Save results and generations + save_results(name, run_name, dataset, acc) + + +def inference(dataset, model, tokenizer, batch_size): + context = dataset["context"].tolist() + + last_batch = dataset.shape[0] % batch_size + total_steps = dataset.shape[0] // batch_size + 1 + print( + f"Total len: {len(context)}. Batch size: {batch_size}. Total steps: {total_steps}" + ) + + out_text_list = [] + for i in tqdm(range(total_steps)): + idx_s = i * batch_size + tmp_context = ( + context[idx_s : idx_s + last_batch] + if i == total_steps - 1 + else context[idx_s : idx_s + batch_size] + ) + + if tmp_context: + tokens = tokenizer( + tmp_context, + return_tensors="pt", + padding=True, + max_length=512, + return_token_type_ids=False, + ) + for k in tokens.keys(): + tokens[k] = tokens[k].cuda() + res = model.generate( + **tokens, max_length=512, eos_token_id=tokenizer.eos_token_id + ) + res_sentences = [tokenizer.decode(i, skip_special_tokens=True) for i in res] + out_text = [o.split("Answer: ")[1] for o in res_sentences] + out_text_list += out_text + torch.cuda.empty_cache() + + dataset["out_text"] = out_text_list + dataset["new_target"] = dataset["target"].apply(change_target) + dataset["new_out"] = dataset["out_text"].apply(change_target) + + acc = accuracy_score(dataset["new_target"], dataset["new_out"]) + + return dataset, acc diff --git a/benchmarks/flowertune-llm/evaluation/finance/eval.py b/benchmarks/flowertune-llm/evaluation/finance/eval.py new file mode 100644 index 000000000000..3e85b2fe21af --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/finance/eval.py @@ -0,0 +1,64 @@ +import argparse + +import torch +from peft import PeftModel +from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig + +from benchmarks import infer_fiqa, infer_fpb, infer_tfns + +# Fixed seed +torch.manual_seed(2024) + +parser = argparse.ArgumentParser() +parser.add_argument( + "--base-model-name-path", type=str, default="mistralai/Mistral-7B-v0.3" +) +parser.add_argument("--run-name", type=str, default="fl") +parser.add_argument("--peft-path", type=str, default=None) +parser.add_argument("--datasets", type=str, default="fpb") +parser.add_argument("--batch-size", type=int, default=32) +parser.add_argument("--quantization", type=int, default=4) +args = parser.parse_args() + + +# Load model and tokenizer +if args.quantization == 4: + quantization_config = BitsAndBytesConfig(load_in_4bit=True) + torch_dtype = torch.float32 +elif args.quantization == 8: + quantization_config = BitsAndBytesConfig(load_in_8bit=True) + torch_dtype = torch.float16 +else: + raise ValueError( + f"Use 4-bit or 8-bit quantization. You passed: {args.quantization}/" + ) + +model = AutoModelForCausalLM.from_pretrained( + args.base_model_name_path, + quantization_config=quantization_config, + torch_dtype=torch_dtype, +) +if args.peft_path is not None: + model = PeftModel.from_pretrained( + model, args.peft_path, torch_dtype=torch_dtype + ).to("cuda") + +tokenizer = AutoTokenizer.from_pretrained(args.base_model_name_path) + +if not tokenizer.pad_token or tokenizer.pad_token_id == tokenizer.eos_token_id: + tokenizer.add_special_tokens({"pad_token": "[PAD]"}) + model.resize_token_embeddings(len(tokenizer)) + + +# Evaluate +model = model.eval() +with torch.no_grad(): + for dataset in args.datasets.split(","): + if dataset == "fpb": + infer_fpb(model, tokenizer, args.batch_size, args.run_name) + elif dataset == "fiqa": + infer_fiqa(model, tokenizer, args.batch_size, args.run_name) + elif dataset == "tfns": + infer_tfns(model, tokenizer, args.batch_size, args.run_name) + else: + raise ValueError("Undefined Dataset.") diff --git a/benchmarks/flowertune-llm/evaluation/finance/requirements.txt b/benchmarks/flowertune-llm/evaluation/finance/requirements.txt new file mode 100644 index 000000000000..89dcf40b819f --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/finance/requirements.txt @@ -0,0 +1,7 @@ +peft==0.6.2 +scikit-learn==1.5.0 +datasets==2.20.0 +sentencepiece==0.2.0 +protobuf==5.27.1 +bitsandbytes==0.43.1 +hf_transfer==0.1.8 diff --git a/benchmarks/flowertune-llm/evaluation/finance/utils.py b/benchmarks/flowertune-llm/evaluation/finance/utils.py new file mode 100644 index 000000000000..900d1de3e096 --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/finance/utils.py @@ -0,0 +1,70 @@ +import os + +import datasets +from datasets import Dataset + + +def load_data(dataset_path, name=None, concat=False, valid_set=None): + dataset = datasets.load_dataset(dataset_path, name, trust_remote_code=True) + + if concat: + dataset = datasets.concatenate_datasets( + [dataset["train"], dataset["validation"], dataset["test"]] + ) + + if valid_set: + dataset = dataset[valid_set] + else: + dataset = dataset if concat else dataset["train"] + dataset = dataset.train_test_split(0.25, seed=42)["test"] + + dataset = dataset.to_pandas() + + return dataset + + +def format_example(example: dict): + context = f"Instruction: {example['instruction']}\n" + if example.get("input"): + context += f"Input: {example['input']}\n" + context += "Answer: " + target = example["output"] + return {"context": context, "target": target} + + +def generate_label(value): + return "negative" if value < -0.1 else "neutral" if value < 0.1 else "positive" + + +def add_instruct(content): + tag = "tweet" if content.format == "post" else "news" + return f"What is the sentiment of this {tag}? Please choose an answer from {{negative/neutral/positive}}." + + +def change_target(x): + if "positive" in x or "Positive" in x: + return "positive" + elif "negative" in x or "Negative" in x: + return "negative" + else: + return "neutral" + + +def save_results(dataset_name, run_name, dataset, acc): + path = "./benchmarks/" + if not os.path.exists(path): + os.makedirs(path) + + # Save results + results_path = os.path.join(path, f"acc_{dataset_name}_{run_name}.txt") + with open(results_path, "w") as f: + f.write(f"Accuracy: {acc}. ") + print(f"Accuracy: {acc}. ") + + # Save generations + generation_path = os.path.join(path, f"generation_{dataset_name}_{run_name}.jsonl") + dataset = Dataset.from_pandas(dataset) + dataset = dataset.remove_columns( + ["input", "output", "instruction", "target", "out_text"] + ) + dataset.to_json(generation_path, orient="records") diff --git a/benchmarks/flowertune-llm/evaluation/general-nlp/README.md b/benchmarks/flowertune-llm/evaluation/general-nlp/README.md new file mode 100644 index 000000000000..c3fd71da6ea2 --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/general-nlp/README.md @@ -0,0 +1,41 @@ +# Evaluation for General NLP challenge + +We build up a multi-task language understanding pipeline to evaluate our fined-tuned LLMs. +The [MMLU](https://huggingface.co/datasets/lukaemon/mmlu) dataset is used for this evaluation, encompassing three categories: STEM, social sciences (SS), and humanities. + + +## Environment Setup + +```shell +git clone --depth=1 https://github.com/adap/flower.git && mv flower/benchmarks/flowertune-llm/evaluation/general-nlp ./flowertune-eval-general-nlp && rm -rf flower && cd flowertune-eval-general-nlp +``` + +Create a new Python environment (we recommend Python 3.10), activate it, then install dependencies with: + +```shell +# From a new python environment, run: +pip install -r requirements.txt + +# Log in HuggingFace account +huggingface-cli login +``` + +## Generate model decision & calculate accuracy + +> [!NOTE] +> Please ensure that you use `quantization=4` to run the evaluation if you wish to participate in the LLM Leaderboard. + +```bash +python eval.py \ +--peft-path=/path/to/fine-tuned-peft-model-dir/ \ # e.g., ./peft_1 +--run-name=fl \ # specified name for this run +--batch-size=16 \ +--quantization=4 \ +--category=stem,social_sciences,humanities +``` + +The model answers and accuracy values will be saved to `benchmarks/generation_{dataset_name}_{category_name}_{run_name}.jsonl` and `benchmarks/acc_{dataset_name}_{category_name}_{run_name}.txt`, respectively. + + +> [!NOTE] +> Please ensure that you provide all **three accuracy values (STEM, SS, Humanities)** for three evaluation categories when submitting to the LLM Leaderboard (see the [`Make Submission`](https://github.com/adap/flower/tree/main/benchmarks/flowertune-llm/evaluation#make-submission-on-flowertune-llm-leaderboard) section). diff --git a/benchmarks/flowertune-llm/evaluation/general-nlp/benchmarks.py b/benchmarks/flowertune-llm/evaluation/general-nlp/benchmarks.py new file mode 100644 index 000000000000..c20522e7ed79 --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/general-nlp/benchmarks.py @@ -0,0 +1,201 @@ +import json + +import pandas as pd +from sklearn.metrics import accuracy_score +from torch.utils.data import DataLoader +from tqdm import tqdm +from utils import format_answer, format_example, save_results + +from datasets import Dataset, load_dataset + +INSTRUCTIONS = { + "mmlu": "Answer the following multiple choice question.", +} + +MMLU_CATEGORY = { + "stem": [ + "abstract_algebra", + "anatomy", + "astronomy", + "college_biology", + "college_chemistry", + "college_computer_science", + "college_mathematics", + "college_physics", + "computer_security", + "conceptual_physics", + "electrical_engineering", + "elementary_mathematics", + "high_school_biology", + "high_school_chemistry", + "high_school_computer_science", + "high_school_mathematics", + "high_school_physics", + "high_school_statistics", + "machine_learning", + ], + "social_sciences": [ + "econometrics", + "high_school_geography", + "high_school_government_and_politics", + "high_school_macroeconomics", + "high_school_microeconomics", + "high_school_psychology", + "human_sexuality", + "professional_psychology", + "public_relations", + "security_studies", + "sociology", + "us_foreign_policy", + ], + "humanities": [ + "formal_logic", + "high_school_european_history", + "high_school_us_history", + "high_school_world_history", + "international_law", + "jurisprudence", + "logical_fallacies", + "moral_disputes", + "moral_scenarios", + "philosophy", + "prehistory", + "professional_law", + "world_religions", + ], + "other": [ + "business_ethics", + "clinical_knowledge", + "college_medicine", + "global_facts", + "human_aging", + "management", + "marketing", + "medical_genetics", + "miscellaneous", + "nutrition", + "professional_accounting", + "professional_medicine", + "virology", + ], +} + + +def infer_mmlu(model, tokenizer, batch_size, category, run_name): + name = "mmlu" + answer_type = "mcq" + + # Download dataset + dataframes = [] + for subset in MMLU_CATEGORY[category]: + subset_data = load_dataset( + "lukaemon/mmlu", + subset, + split="test", + trust_remote_code=True, + ) + subset_df = pd.DataFrame(subset_data.map(lambda x: {"subset": subset, **x})) + dataframes.append(subset_df) + + dataset_df = pd.concat(dataframes, axis=0) + dataset = Dataset.from_pandas(dataset_df) + if "__index_level_0__" in dataset.column_names: + dataset = dataset.remove_columns("__index_level_0__") + + # Post process + instruction = INSTRUCTIONS[name] + + def post_process(row): + options = [row["A"], row["B"], row["C"], row["D"]] + row["prompt"] = format_example(row["input"], options) + row["gold"] = row["target"] + row["subset"] = row["subset"] + row["prompt"] = f"{instruction}\n{row['prompt']}\nThe answer is:\n" + return row + + dataset = dataset.map(post_process) + + # Generate results + generate_results( + name, run_name, dataset, model, tokenizer, batch_size, answer_type, category + ) + + +def generate_results( + name, run_name, dataset, model, tokenizer, batch_size, answer_type, category +): + # Run inference + prediction = inference(dataset, model, tokenizer, batch_size) + + # Calculate accuracy + acc = accuracy_compute(prediction, answer_type) + + # Save results and generations + save_results(name, category, run_name, prediction, acc) + + +def inference(dataset, model, tokenizer, batch_size): + columns_process = ["prompt", "gold"] + if "subset" in dataset.features: + columns_process.append("subset") + dataset_process = pd.DataFrame(dataset, columns=dataset.features)[columns_process] + dataset_process = dataset_process.assign(output="Null") + temperature = 1.0 + + inference_data = json.loads(dataset_process.to_json(orient="records")) + data_loader = DataLoader(inference_data, batch_size=batch_size, shuffle=False) + + batch_counter = 0 + for batch in tqdm(data_loader, total=len(data_loader), position=0, leave=True): + prompts = [ + f"<|im_start|>question\n{prompt}<|im_end|>\n<|im_start|>answer\n" + for prompt in batch["prompt"] + ] + if batch_counter == 0: + print(prompts[0]) + + # Process tokenizer + stop_seq = ["###"] + if tokenizer.eos_token is not None: + stop_seq.append(tokenizer.eos_token) + if tokenizer.pad_token is not None: + stop_seq.append(tokenizer.pad_token) + max_new_tokens = len( + tokenizer(batch["gold"][0], add_special_tokens=False)["input_ids"] + ) + + outputs = [] + for prompt in prompts: + input_ids = tokenizer.encode(prompt, return_tensors="pt").to("cuda") + output_ids = model.generate( + inputs=input_ids, + max_new_tokens=max_new_tokens, + do_sample=False, + top_p=1.0, + temperature=temperature, + pad_token_id=tokenizer.eos_token_id, + ) + output_ids = output_ids[0][len(input_ids[0]) :] + output = tokenizer.decode(output_ids, skip_special_tokens=True) + outputs.append(output) + + for prompt, out in zip(batch["prompt"], outputs): + dataset_process.loc[dataset_process["prompt"] == prompt, "output"] = out + batch_counter += 1 + + return dataset_process + + +def accuracy_compute(dataset, answer_type): + dataset = json.loads(dataset.to_json(orient="records")) + preds, golds = [], [] + for row in dataset: + answer = row["gold"].lower() + output = row["output"].lower() + pred, gold = format_answer(output, answer, answer_type=answer_type) + preds.append(pred) + golds.append(gold) + + accuracy = accuracy_score(preds, golds) + + return accuracy diff --git a/benchmarks/flowertune-llm/evaluation/general-nlp/eval.py b/benchmarks/flowertune-llm/evaluation/general-nlp/eval.py new file mode 100644 index 000000000000..c50928610c44 --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/general-nlp/eval.py @@ -0,0 +1,68 @@ +import argparse + +import torch +from peft import PeftModel +from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig + +from benchmarks import MMLU_CATEGORY, infer_mmlu + +# Fixed seed +torch.manual_seed(2024) + +parser = argparse.ArgumentParser() +parser.add_argument( + "--base-model-name-path", type=str, default="mistralai/Mistral-7B-v0.3" +) +parser.add_argument("--run-name", type=str, default="fl") +parser.add_argument("--peft-path", type=str, default=None) +parser.add_argument( + "--datasets", + type=str, + default="mmlu", + help="The dataset to infer on", +) +parser.add_argument( + "--category", + type=str, + default=None, + help="The category for MMLU dataset, chosen from [stem, social_sciences, humanities, other]", +) +parser.add_argument("--batch-size", type=int, default=16) +parser.add_argument("--quantization", type=int, default=4) +args = parser.parse_args() + + +# Load model and tokenizer +if args.quantization == 4: + quantization_config = BitsAndBytesConfig(load_in_4bit=True) + torch_dtype = torch.float32 +elif args.quantization == 8: + quantization_config = BitsAndBytesConfig(load_in_8bit=True) + torch_dtype = torch.float16 +else: + raise ValueError( + f"Use 4-bit or 8-bit quantization. You passed: {args.quantization}/" + ) + +model = AutoModelForCausalLM.from_pretrained( + args.base_model_name_path, + quantization_config=quantization_config, + torch_dtype=torch_dtype, +) +if args.peft_path is not None: + model = PeftModel.from_pretrained( + model, args.peft_path, torch_dtype=torch_dtype + ).to("cuda") + +tokenizer = AutoTokenizer.from_pretrained(args.base_model_name_path) + +# Evaluate +for dataset in args.datasets.split(","): + if dataset == "mmlu": + for cate in args.category.split(","): + if cate not in MMLU_CATEGORY.keys(): + raise ValueError("Undefined Category.") + else: + infer_mmlu(model, tokenizer, args.batch_size, cate, args.run_name) + else: + raise ValueError("Undefined Dataset.") diff --git a/benchmarks/flowertune-llm/evaluation/general-nlp/requirements.txt b/benchmarks/flowertune-llm/evaluation/general-nlp/requirements.txt new file mode 100644 index 000000000000..f5c46e869ce2 --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/general-nlp/requirements.txt @@ -0,0 +1,8 @@ +peft==0.6.2 +pandas==2.2.2 +scikit-learn==1.5.0 +datasets==2.20.0 +sentencepiece==0.2.0 +protobuf==5.27.1 +bitsandbytes==0.43.1 +hf_transfer==0.1.8 diff --git a/benchmarks/flowertune-llm/evaluation/general-nlp/utils.py b/benchmarks/flowertune-llm/evaluation/general-nlp/utils.py new file mode 100644 index 000000000000..71334ca6c199 --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/general-nlp/utils.py @@ -0,0 +1,84 @@ +import os +import re + + +def format_example(question, choices): + if not question.endswith("?") and not question.endswith("."): + question += "?" + options_str = "\n".join([f"{chr(65+i)}. {choices[i]}" for i in range(len(choices))]) + prompt = "Question: " + question + "\n\nOptions:\n" + options_str + return prompt + + +def save_results(dataset_name, category, run_name, dataset, acc): + path = "./benchmarks/" + if not os.path.exists(path): + os.makedirs(path) + + # Save results + cate_name = f"_{category}" if category else "" + results_path = os.path.join(path, f"acc_{dataset_name}{cate_name}_{run_name}.txt") + with open(results_path, "w") as f: + f.write(f"Accuracy: {acc}. ") + print(f"Accuracy: {acc}. ") + + # Save generations + generation_path = os.path.join( + path, f"generation_{dataset_name}{cate_name}_{run_name}.jsonl" + ) + dataset.to_json(generation_path, orient="records") + + +def format_answer(output_full, answer, answer_type="mcq"): + output = output_full + default = (output_full, answer) + if "\n##" in output: + try: + output = output.split("\n##")[1].split("\n")[0].strip().lower() + except Exception: + return default + if "###" in answer: + try: + answer = answer.split("answer is:")[1].split("###")[0].strip() + except Exception: + return default + + output = re.sub(r"[^a-zA-Z0-9]", " ", output).strip() + output = re.sub(" +", " ", output) + + if answer_type == "boolean": + output = clean_boolean_answer(output) + elif answer_type == "mcq": + output = clean_mcq_answer(output) + + if output in ["a", "b", "c", "d", "e", "yes", "no"]: + return output, answer + else: + return default + + +def clean_mcq_answer(output): + output = clean_answer(output) + try: + output = output[0] + except Exception: + return output + return output + + +def clean_boolean_answer(output): + if "yesyes" in output: + output = output.replace("yesyes", "yes") + elif "nono" in output: + output = output.replace("nono", "no") + elif "yesno" in output: + output = output.replace("yesno", "yes") + elif "noyes" in output: + output = output.replace("noyes", "no") + output = clean_answer(output) + return output + + +def clean_answer(output): + output_clean = output.encode("ascii", "ignore").decode("ascii") + return output_clean diff --git a/benchmarks/flowertune-llm/evaluation/medical/README.md b/benchmarks/flowertune-llm/evaluation/medical/README.md new file mode 100644 index 000000000000..628489ce8de6 --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/medical/README.md @@ -0,0 +1,41 @@ +# Evaluation for Medical challenge + +We build up a medical question answering (QA) pipeline to evaluate our fined-tuned LLMs. +Three datasets have been selected for this evaluation: [PubMedQA](https://huggingface.co/datasets/bigbio/pubmed_qa), [MedMCQA](https://huggingface.co/datasets/medmcqa), and [MedQA](https://huggingface.co/datasets/bigbio/med_qa). + + +## Environment Setup + +```shell +git clone --depth=1 https://github.com/adap/flower.git && mv flower/benchmarks/flowertune-llm/evaluation/medical ./flowertune-eval-medical && rm -rf flower && cd flowertune-eval-medical +``` + +Create a new Python environment (we recommend Python 3.10), activate it, then install dependencies with: + +```shell +# From a new python environment, run: +pip install -r requirements.txt + +# Log in HuggingFace account +huggingface-cli login +``` + +## Generate model decision & calculate accuracy + +> [!NOTE] +> Please ensure that you use `quantization=4` to run the evaluation if you wish to participate in the LLM Leaderboard. + +```bash +python eval.py \ +--peft-path=/path/to/fine-tuned-peft-model-dir/ \ # e.g., ./peft_1 +--run-name=fl \ # specified name for this run +--batch-size=16 \ +--quantization=4 \ +--datasets=pubmedqa,medmcqa,medqa +``` + +The model answers and accuracy values will be saved to `benchmarks/generation_{dataset_name}_{run_name}.jsonl` and `benchmarks/acc_{dataset_name}_{run_name}.txt`, respectively. + + +> [!NOTE] +> Please ensure that you provide all **three accuracy values (PubMedQA, MedMCQA, MedQA)** for three evaluation datasets when submitting to the LLM Leaderboard (see the [`Make Submission`](https://github.com/adap/flower/tree/main/benchmarks/flowertune-llm/evaluation#make-submission-on-flowertune-llm-leaderboard) section). diff --git a/benchmarks/flowertune-llm/evaluation/medical/benchmarks.py b/benchmarks/flowertune-llm/evaluation/medical/benchmarks.py new file mode 100644 index 000000000000..c72e2a7894da --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/medical/benchmarks.py @@ -0,0 +1,174 @@ +import json + +import pandas as pd +from sklearn.metrics import accuracy_score +from torch.utils.data import DataLoader +from tqdm import tqdm +from utils import format_answer, format_example, save_results + +import datasets + +# The instructions refer to Meditron evaluation: +# https://github.com/epfLLM/meditron/blob/main/evaluation/instructions.json +INSTRUCTIONS = { + "pubmedqa": "As an expert doctor in clinical science and medical knowledge, can you tell me if the following statement is correct? Answer yes, no, or maybe.", + "medqa": "You are a medical doctor taking the US Medical Licensing Examination. You need to demonstrate your understanding of basic and clinical science, medical knowledge, and mechanisms underlying health, disease, patient care, and modes of therapy. Show your ability to apply the knowledge essential for medical practice. For the following multiple-choice question, select one correct answer from A to E. Base your answer on the current and standard practices referenced in medical guidelines.", + "medmcqa": "You are a medical doctor answering realworld medical entrance exam questions. Based on your understanding of basic and clinical science, medical knowledge, and mechanisms underlying health, disease, patient care, and modes of therapy, answer the following multiple-choice question. Select one correct answer from A to D. Base your answer on the current and standard practices referenced in medical guidelines.", +} + + +def infer_pubmedqa(model, tokenizer, batch_size, run_name): + name = "pubmedqa" + answer_type = "boolean" + dataset = datasets.load_dataset( + "bigbio/pubmed_qa", + "pubmed_qa_labeled_fold0_source", + split="test", + trust_remote_code=True, + ) + # Post process + instruction = INSTRUCTIONS[name] + + def post_process(row): + context = "\n".join(row["CONTEXTS"]) + row["prompt"] = f"{context}\n{row['QUESTION']}" + row["gold"] = row["final_decision"] + row["long_answer"] = row["LONG_ANSWER"] + row["prompt"] = f"{instruction}\n{row['prompt']}\nThe answer is:\n" + return row + + dataset = dataset.map(post_process) + + # Generate results + generate_results(name, run_name, dataset, model, tokenizer, batch_size, answer_type) + + +def infer_medqa(model, tokenizer, batch_size, run_name): + name = "medqa" + answer_type = "mcq" + dataset = datasets.load_dataset( + "bigbio/med_qa", + "med_qa_en_4options_source", + split="test", + trust_remote_code=True, + ) + + # Post process + instruction = INSTRUCTIONS[name] + + def post_process(row): + choices = [opt["value"] for opt in row["options"]] + row["prompt"] = format_example(row["question"], choices) + for opt in row["options"]: + if opt["value"] == row["answer"]: + row["gold"] = opt["key"] + break + row["prompt"] = f"{instruction}\n{row['prompt']}\nThe answer is:\n" + return row + + dataset = dataset.map(post_process) + + # Generate results + generate_results(name, run_name, dataset, model, tokenizer, batch_size, answer_type) + + +def infer_medmcqa(model, tokenizer, batch_size, run_name): + name = "medmcqa" + answer_type = "mcq" + dataset = datasets.load_dataset( + "medmcqa", split="validation", trust_remote_code=True + ) + + # Post process + instruction = INSTRUCTIONS[name] + + def post_process(row): + options = [row["opa"], row["opb"], row["opc"], row["opd"]] + answer = int(row["cop"]) + row["prompt"] = format_example(row["question"], options) + row["gold"] = chr(ord("A") + answer) if answer in [0, 1, 2, 3] else None + row["prompt"] = f"{instruction}\n{row['prompt']}\nThe answer is:\n" + return row + + dataset = dataset.map(post_process) + + # Generate results + generate_results(name, run_name, dataset, model, tokenizer, batch_size, answer_type) + + +def generate_results( + name, run_name, dataset, model, tokenizer, batch_size, answer_type +): + # Run inference + prediction = inference(dataset, model, tokenizer, batch_size) + + # Calculate accuracy + acc = accuracy_compute(prediction, answer_type) + + # Save results and generations + save_results(name, run_name, prediction, acc) + + +def inference(dataset, model, tokenizer, batch_size): + columns_process = ["prompt", "gold"] + dataset_process = pd.DataFrame(dataset, columns=dataset.features)[columns_process] + dataset_process = dataset_process.assign(output="Null") + temperature = 1.0 + + inference_data = json.loads(dataset_process.to_json(orient="records")) + data_loader = DataLoader(inference_data, batch_size=batch_size, shuffle=False) + + batch_counter = 0 + for batch in tqdm(data_loader, total=len(data_loader), position=0, leave=True): + prompts = [ + f"<|im_start|>question\n{prompt}<|im_end|>\n<|im_start|>answer\n" + for prompt in batch["prompt"] + ] + if batch_counter == 0: + print(prompts[0]) + + # Process tokenizer + stop_seq = ["###"] + if tokenizer.eos_token is not None: + stop_seq.append(tokenizer.eos_token) + if tokenizer.pad_token is not None: + stop_seq.append(tokenizer.pad_token) + max_new_tokens = len( + tokenizer(batch["gold"][0], add_special_tokens=False)["input_ids"] + ) + + outputs = [] + for prompt in prompts: + input_ids = tokenizer.encode(prompt, return_tensors="pt").to("cuda") + output_ids = model.generate( + inputs=input_ids, + max_new_tokens=max_new_tokens, + do_sample=False, + top_p=1.0, + temperature=temperature, + pad_token_id=tokenizer.eos_token_id, + ) + output_ids = output_ids[0][len(input_ids[0]) :] + output = tokenizer.decode(output_ids, skip_special_tokens=True) + outputs.append(output) + + for prompt, out in zip(batch["prompt"], outputs): + dataset_process.loc[dataset_process["prompt"] == prompt, "output"] = out + batch_counter += 1 + + return dataset_process + + +def accuracy_compute(dataset, answer_type): + dataset = json.loads(dataset.to_json(orient="records")) + preds, golds = [], [] + for row in dataset: + answer = row["gold"].lower() + output = row["output"].lower() + pred, gold = format_answer(output, answer, answer_type=answer_type) + preds.append(pred) + golds.append(gold) + + accuracy = accuracy_score(preds, golds) + + return accuracy diff --git a/benchmarks/flowertune-llm/evaluation/medical/eval.py b/benchmarks/flowertune-llm/evaluation/medical/eval.py new file mode 100644 index 000000000000..7405e1493e4d --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/medical/eval.py @@ -0,0 +1,62 @@ +import argparse + +import torch +from peft import PeftModel +from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig + +from benchmarks import infer_medmcqa, infer_medqa, infer_pubmedqa + +# Fixed seed +torch.manual_seed(2024) + +parser = argparse.ArgumentParser() +parser.add_argument( + "--base-model-name-path", type=str, default="mistralai/Mistral-7B-v0.3" +) +parser.add_argument("--run-name", type=str, default="fl") +parser.add_argument("--peft-path", type=str, default=None) +parser.add_argument( + "--datasets", + type=str, + default="pubmedqa", + help="The dataset to infer on: [pubmedqa, medqa, medmcqa]", +) +parser.add_argument("--batch-size", type=int, default=16) +parser.add_argument("--quantization", type=int, default=4) +args = parser.parse_args() + + +# Load model and tokenizer +if args.quantization == 4: + quantization_config = BitsAndBytesConfig(load_in_4bit=True) + torch_dtype = torch.float32 +elif args.quantization == 8: + quantization_config = BitsAndBytesConfig(load_in_8bit=True) + torch_dtype = torch.float16 +else: + raise ValueError( + f"Use 4-bit or 8-bit quantization. You passed: {args.quantization}/" + ) + +model = AutoModelForCausalLM.from_pretrained( + args.base_model_name_path, + quantization_config=quantization_config, + torch_dtype=torch_dtype, +) +if args.peft_path is not None: + model = PeftModel.from_pretrained( + model, args.peft_path, torch_dtype=torch_dtype + ).to("cuda") + +tokenizer = AutoTokenizer.from_pretrained(args.base_model_name_path) + +# Evaluate +for dataset in args.datasets.split(","): + if dataset == "pubmedqa": + infer_pubmedqa(model, tokenizer, args.batch_size, args.run_name) + elif dataset == "medqa": + infer_medqa(model, tokenizer, args.batch_size, args.run_name) + elif dataset == "medmcqa": + infer_medmcqa(model, tokenizer, args.batch_size, args.run_name) + else: + raise ValueError("Undefined Dataset.") diff --git a/benchmarks/flowertune-llm/evaluation/medical/requirements.txt b/benchmarks/flowertune-llm/evaluation/medical/requirements.txt new file mode 100644 index 000000000000..f5c46e869ce2 --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/medical/requirements.txt @@ -0,0 +1,8 @@ +peft==0.6.2 +pandas==2.2.2 +scikit-learn==1.5.0 +datasets==2.20.0 +sentencepiece==0.2.0 +protobuf==5.27.1 +bitsandbytes==0.43.1 +hf_transfer==0.1.8 diff --git a/benchmarks/flowertune-llm/evaluation/medical/utils.py b/benchmarks/flowertune-llm/evaluation/medical/utils.py new file mode 100644 index 000000000000..44d0763d39d4 --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/medical/utils.py @@ -0,0 +1,81 @@ +import os +import re + + +def format_example(question, choices): + if not question.endswith("?") and not question.endswith("."): + question += "?" + options_str = "\n".join([f"{chr(65+i)}. {choices[i]}" for i in range(len(choices))]) + prompt = "Question: " + question + "\n\nOptions:\n" + options_str + return prompt + + +def save_results(dataset_name, run_name, dataset, acc): + path = "./benchmarks/" + if not os.path.exists(path): + os.makedirs(path) + + # Save results + results_path = os.path.join(path, f"acc_{dataset_name}_{run_name}.txt") + with open(results_path, "w") as f: + f.write(f"Accuracy: {acc}. ") + print(f"Accuracy: {acc}. ") + + # Save generations + generation_path = os.path.join(path, f"generation_{dataset_name}_{run_name}.jsonl") + dataset.to_json(generation_path, orient="records") + + +def format_answer(output_full, answer, answer_type="mcq"): + output = output_full + default = (output_full, answer) + if "\n##" in output: + try: + output = output.split("\n##")[1].split("\n")[0].strip().lower() + except Exception: + return default + if "###" in answer: + try: + answer = answer.split("answer is:")[1].split("###")[0].strip() + except Exception: + return default + + output = re.sub(r"[^a-zA-Z0-9]", " ", output).strip() + output = re.sub(" +", " ", output) + + if answer_type == "boolean": + output = clean_boolean_answer(output) + elif answer_type == "mcq": + output = clean_mcq_answer(output) + + if output in ["a", "b", "c", "d", "e", "yes", "no"]: + return output, answer + else: + return default + + +def clean_mcq_answer(output): + output = clean_answer(output) + try: + output = output[0] + except Exception: + return output + return output + + +def clean_boolean_answer(output): + if "yesyes" in output: + output = output.replace("yesyes", "yes") + elif "nono" in output: + output = output.replace("nono", "no") + elif "yesno" in output: + output = output.replace("yesno", "yes") + elif "noyes" in output: + output = output.replace("noyes", "no") + output = clean_answer(output) + return output + + +def clean_answer(output): + output_clean = output.encode("ascii", "ignore").decode("ascii") + return output_clean diff --git a/datasets/README.md b/datasets/README.md index cf5caac3e1cd..25db77233558 100644 --- a/datasets/README.md +++ b/datasets/README.md @@ -7,6 +7,21 @@ [![Slack](https://img.shields.io/badge/Chat-Slack-red)](https://flower.ai/join-slack) Flower Datasets (`flwr-datasets`) is a library to quickly and easily create datasets for federated learning, federated evaluation, and federated analytics. It was created by the `Flower Labs` team that also created Flower: A Friendly Federated Learning Framework. + + +> [!TIP] +> For complete documentation that includes API docs, how-to guides and tutorials, please visit the [Flower Datasets Documentation](https://flower.ai/docs/datasets/) and for full FL example see the [Flower Examples page](https://github.com/adap/flower/tree/main/examples). + +## Installation + +For a complete installation guide visit the [Flower Datasets Documentation](https://flower.ai/docs/datasets/) + +```bash +pip install flwr-datasets[vision] +``` + +## Overview + Flower Datasets library supports: * **downloading datasets** - choose the dataset from Hugging Face's `datasets`, * **partitioning datasets** - customize the partitioning scheme, @@ -21,43 +36,30 @@ Thanks to using Hugging Face's `datasets` used under the hood, Flower Datasets i * Jax, * Arrow. -Create **custom partitioning schemes** or choose from the **implemented partitioning schemes**: +Create **custom partitioning schemes** or choose from the **implemented [partitioning schemes](https://flower.ai/docs/datasets/ref-api/flwr_datasets.partitioner.html#module-flwr_datasets.partitioner)**: + * Partitioner (the abstract base class) `Partitioner` * IID partitioning `IidPartitioner(num_partitions)` -* Natural ID partitioner `NaturalIdPartitioner` -* Size partitioner (the abstract base class for the partitioners dictating the division based the number of samples) `SizePartitioner` -* Linear partitioner `LinearPartitioner` -* Square partitioner `SquarePartitioner` -* Exponential partitioner `ExponentialPartitioner` -* more to come in future releases. - -# Installation - -## With pip - -Flower Datasets can be installed from PyPi - -```bash -pip install flwr-datasets -``` - -Install with an extension: - -* for image datasets: - -```bash -pip install flwr-datasets[vision] -``` - -* for audio datasets: - -```bash -pip install flwr-datasets[audio] -``` - -If you plan to change the type of the dataset to run the code with your ML framework, make sure to have it installed too. - -# Usage +* Dirichlet partitioning `DirichletPartitioner(num_partitions, partition_by, alpha)` +* Distribution partitioning `DistributionPartitioner(distribution_array, num_partitions, num_unique_labels_per_partition, partition_by, preassigned_num_samples_per_label, rescale)` +* InnerDirichlet partitioning `InnerDirichletPartitioner(partition_sizes, partition_by, alpha)` +* Pathological partitioning `PathologicalPartitioner(num_partitions, partition_by, num_classes_per_partition, class_assignment_mode)` +* Natural ID partitioning `NaturalIdPartitioner(partition_by)` +* Size based partitioning (the abstract base class for the partitioners dictating the division based the number of samples) `SizePartitioner` +* Linear partitioning `LinearPartitioner(num_partitions)` +* Square partitioning `SquarePartitioner(num_partitions)` +* Exponential partitioning `ExponentialPartitioner(num_partitions)` +* more to come in the future releases (contributions are welcome). +

+ Comparison of partitioning schemes. +
+ Comparison of Partitioning Schemes on CIFAR10 +

+ +PS: This plot was generated using a library function (see [flwr_datasets.visualization](https://flower.ai/docs/datasets/ref-api/flwr_datasets.visualization.html) package for more). + + +## Usage Flower Datasets exposes the `FederatedDataset` abstraction to represent the dataset needed for federated learning/evaluation/analytics. It has two powerful methods that let you handle the dataset preprocessing: `load_partition(partition_id, split)` and `load_split(split)`. @@ -65,18 +67,20 @@ Here's a basic quickstart example of how to partition the MNIST dataset: ``` from flwr_datasets import FederatedDataset +from flwr_datasets.partitioners import IidPartitioner # The train split of the MNIST dataset will be partitioned into 100 partitions -mnist_fds = FederatedDataset("mnist", partitioners={"train": 100} +partitioner = IidPartitioner(num_partitions=100) +fds = FederatedDataset("ylecun/mnist", partitioners={"train": partitioner}) -mnist_partition_0 = mnist_fds.load_partition(0, "train") +partition = fds.load_partition(0) -centralized_data = mnist_fds.load_split("test") +centralized_data = fds.load_split("test") ``` For more details, please refer to the specific how-to guides or tutorial. They showcase customization and more advanced features. -# Future release +## Future release Here are a few of the things that we will work on in future releases: @@ -85,6 +89,6 @@ Here are a few of the things that we will work on in future releases: * ✅ More out-of-the-box `Partitioner`s. * ✅ Passing `Partitioner`s via `FederatedDataset`'s `partitioners` argument. * ✅ Customization of the dataset splitting before the partitioning. -* Simplification of the dataset transformation to the popular frameworks/types. +* ✅ Simplification of the dataset transformation to the popular frameworks/types. * Creation of the synthetic data, * Support for Vertical FL. diff --git a/datasets/dev/build-flwr-datasets-docs.sh b/datasets/dev/build-flwr-datasets-docs.sh index aefa47f147f8..ed41a87a414b 100755 --- a/datasets/dev/build-flwr-datasets-docs.sh +++ b/datasets/dev/build-flwr-datasets-docs.sh @@ -1,4 +1,20 @@ #!/bin/bash + +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + # Generating the docs, rename and move the files such that the meet the convention used in Flower. # Note that it involves two runs of sphinx-build that are necessary. # The first run generates the .rst files (and the html files that are discarded) diff --git a/datasets/dev/format.sh b/datasets/dev/format.sh index 8292e0b3ed79..b7dca9accabf 100755 --- a/datasets/dev/format.sh +++ b/datasets/dev/format.sh @@ -1,11 +1,34 @@ #!/bin/bash + +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + set -e cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/../ # Python -echo "Formatting started" +echo "Formatting started: Python" python -m isort flwr_datasets/ python -m black -q flwr_datasets/ python -m docformatter -i -r flwr_datasets/ python -m ruff check --fix flwr_datasets/ -echo "Formatting done" +echo "Formatting done: Python" + +# Notebooks +echo "Formatting started: Notebooks" +python -m black --ipynb -q doc/source/*.ipynb +KEYS="metadata.celltoolbar metadata.language_info metadata.toc metadata.notify_time metadata.varInspector metadata.accelerator metadata.vscode cell.metadata.id cell.metadata.heading_collapsed cell.metadata.hidden cell.metadata.code_folding cell.metadata.tags cell.metadata.init_cell cell.metadata.vscode cell.metadata.pycharm" +python -m nbstripout --keep-output doc/source/*.ipynb --extra-keys "$KEYS" +echo "Formatting done: Notebooks" diff --git a/datasets/dev/publish.sh b/datasets/dev/publish.sh new file mode 100755 index 000000000000..d76ce6de7879 --- /dev/null +++ b/datasets/dev/publish.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +set -e +cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/../ + +python -m poetry publish -u __token__ -p ${PYPI_TOKEN} diff --git a/datasets/dev/rm-caches.sh b/datasets/dev/rm-caches.sh new file mode 100755 index 000000000000..8de3aa940d8e --- /dev/null +++ b/datasets/dev/rm-caches.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +set -e +cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/../ + +find . -type d -name __pycache__ -exec rm -r {} \+ +rm -rf .mypy_cache +rm -rf .pytest_cache +rm -rf .cache +rm -rf doc/build diff --git a/datasets/doc/source/_static/readme/comparison_of_partitioning_schemes.png b/datasets/doc/source/_static/readme/comparison_of_partitioning_schemes.png new file mode 100644 index 000000000000..ed2e323ef649 Binary files /dev/null and b/datasets/doc/source/_static/readme/comparison_of_partitioning_schemes.png differ diff --git a/datasets/doc/source/_static/tutorial-quickstart/choose-hf-dataset.png b/datasets/doc/source/_static/tutorial-quickstart/choose-hf-dataset.png new file mode 100644 index 000000000000..ffce2008e178 Binary files /dev/null and b/datasets/doc/source/_static/tutorial-quickstart/choose-hf-dataset.png differ diff --git a/datasets/doc/source/_static/tutorial-quickstart/copy-dataset-name.png b/datasets/doc/source/_static/tutorial-quickstart/copy-dataset-name.png new file mode 100644 index 000000000000..df6deb7cc997 Binary files /dev/null and b/datasets/doc/source/_static/tutorial-quickstart/copy-dataset-name.png differ diff --git a/datasets/doc/source/_static/tutorial-quickstart/partitioner-flexibility.png b/datasets/doc/source/_static/tutorial-quickstart/partitioner-flexibility.png new file mode 100644 index 000000000000..53148d6360f8 Binary files /dev/null and b/datasets/doc/source/_static/tutorial-quickstart/partitioner-flexibility.png differ diff --git a/datasets/doc/source/conf.py b/datasets/doc/source/conf.py index 755147bc9e1d..dcba63dd221c 100644 --- a/datasets/doc/source/conf.py +++ b/datasets/doc/source/conf.py @@ -38,7 +38,7 @@ author = "The Flower Authors" # The full version, including alpha/beta/rc tags -release = "0.1.0" +release = "0.3.0" # -- General configuration --------------------------------------------------- @@ -110,9 +110,9 @@ def find_test_modules(package_path): # Sphinx redirects, implemented after the doc filename changes. # To prevent 404 errors and redirect to the new pages. -# redirects = { -# } - +redirects = { + "how-to-visualize-label-distribution": "tutorial-visualize-label-distribution.html", +} # -- Options for HTML output ------------------------------------------------- @@ -162,7 +162,7 @@ def find_test_modules(package_path): .. raw:: html
- + Open in Colab """ @@ -180,3 +180,7 @@ def find_test_modules(package_path): # -- Options for MyST config ------------------------------------- # Enable this option to link to headers (`#`, `##`, or `###`) myst_heading_anchors = 3 + +# -- Options for sphinx_copybutton ------------------------------------- +copybutton_exclude = '.linenos, .gp, .go' +copybutton_prompt_text = ">>> " diff --git a/datasets/doc/source/how-to-install-flwr-datasets.rst b/datasets/doc/source/how-to-install-flwr-datasets.rst index d2fd7923a817..2068fc11da85 100644 --- a/datasets/doc/source/how-to-install-flwr-datasets.rst +++ b/datasets/doc/source/how-to-install-flwr-datasets.rst @@ -42,5 +42,5 @@ If everything worked, it should print the version of Flower Datasets to the comm .. code-block:: none - 0.0.1 + 0.3.0 diff --git a/datasets/doc/source/how-to-use-with-local-data.rst b/datasets/doc/source/how-to-use-with-local-data.rst index 276f6d6936ee..3a44ed4f6f38 100644 --- a/datasets/doc/source/how-to-use-with-local-data.rst +++ b/datasets/doc/source/how-to-use-with-local-data.rst @@ -37,14 +37,6 @@ CSV data_files = [ "path-to-my-file-1.csv", "path-to-my-file-2.csv", ...] dataset = load_dataset("csv", data_files=data_files) - # Divided Dataset - data_files = { - "train": single_train_file_or_list_of_files, - "test": single_test_file_or_list_of_files, - "can-have-more-splits": ... - } - dataset = load_dataset("csv", data_files=data_files) - partitioner = ChosenPartitioner(...) partitioner.dataset = dataset partition = partitioner.load_partition(partition_id=0) @@ -60,18 +52,10 @@ JSON # Single file data_files = "path-to-my-file.json" - # Multitple Files + # Multiple Files data_files = [ "path-to-my-file-1.json", "path-to-my-file-2.json", ...] dataset = load_dataset("json", data_files=data_files) - # Divided Dataset - data_files = { - "train": single_train_file_or_list_of_files, - "test": single_test_file_or_list_of_files, - "can-have-more-splits": ... - } - dataset = load_dataset("json", data_files=data_files) - partitioner = ChosenPartitioner(...) partitioner.dataset = dataset partition = partitioner.load_partition(partition_id=0) @@ -103,7 +87,12 @@ Then, the path you can give is `./mnist`. from flwr_datasets.partitioner import ChosenPartitioner # Directly from a directory - dataset = load_dataset("imagefolder", data_dir="/path/to/folder") + dataset_dict = load_dataset("imagefolder", data_dir="/path/to/folder") + # Note that what we just loaded is a DatasetDict, we need to choose a single split + # and assign it to the partitioner.dataset + # e.g. "train" split but that depends on the structure of your directory + dataset = dataset_dict["train"] + partitioner = ChosenPartitioner(...) partitioner.dataset = dataset partition = partitioner.load_partition(partition_id=0) @@ -134,7 +123,11 @@ Analogously to the image datasets, there are two methods here: from datasets import load_dataset from flwr_datasets.partitioner import ChosenPartitioner - dataset = load_dataset("audiofolder", data_dir="/path/to/folder") + dataset_dict = load_dataset("audiofolder", data_dir="/path/to/folder") + # Note that what we just loaded is a DatasetDict, we need to choose a single split + # and assign it to the partitioner.dataset + # e.g. "train" split but that depends on the structure of your directory + dataset = dataset_dict["train"] partitioner = ChosenPartitioner(...) partitioner.dataset = dataset @@ -230,7 +223,7 @@ Partitioner abstraction is designed to allow for a single dataset assignment. .. code-block:: python - partitioner.dataset = your_dataset + partitioner.dataset = your_dataset # (your_dataset must be of type dataset.Dataset) If you need to do the same partitioning on a different dataset, create a new Partitioner for that, e.g.: diff --git a/datasets/doc/source/index.rst b/datasets/doc/source/index.rst index 2144c527f8cd..d6b51fc84ad6 100644 --- a/datasets/doc/source/index.rst +++ b/datasets/doc/source/index.rst @@ -1,12 +1,22 @@ Flower Datasets =============== -Flower Datasets (``flwr-datasets``) is a library to quickly and easily create datasets for federated -learning/analytics/evaluation. It is created by the ``Flower Labs`` team that also created `Flower `_ - a Friendly Federated Learning Framework. +Flower Datasets (``flwr-datasets``) is a library that enables the quick and easy creation of datasets for federated learning/analytics/evaluation. It enables heterogeneity (non-iidness) simulation and division of datasets with the preexisting notion of IDs. The library was created by the ``Flower Labs`` team that also created `Flower `_ : A Friendly Federated Learning Framework. + +Try out an interactive demo to generate code and visualize heterogeneous divisions at the :ref:`bottom of this page`. Flower Datasets Framework ------------------------- +Install +~~~~~~~ + +.. code-block:: bash + + python -m pip install "flwr-datasets[vision]" + +Check out all the details on how to install Flower Datasets in :doc:`how-to-install-flwr-datasets`. + Tutorials ~~~~~~~~~ @@ -17,6 +27,8 @@ A learning-oriented series of tutorials is the best place to start. :caption: Tutorial tutorial-quickstart + tutorial-use-partitioners + tutorial-visualize-label-distribution How-to guides ~~~~~~~~~~~~~ @@ -47,15 +59,29 @@ Information-oriented API reference and other reference material. flwr_datasets +.. toctree:: + :maxdepth: 1 + :caption: Reference docs + ref-telemetry Main features ------------- Flower Datasets library supports: -- **downloading datasets** - choose the dataset from Hugging Face's ``dataset`` -- **partitioning datasets** - customize the partitioning scheme -- **creating centralized datasets** - leave parts of the dataset unpartitioned (e.g. for centralized evaluation) +- **Downloading datasets** - choose the dataset from Hugging Face's ``dataset`` (`link `_)(*) +- **Partitioning datasets** - choose one of the implemented partitioning scheme or create your own. +- **Creating centralized datasets** - leave parts of the dataset unpartitioned (e.g. for centralized evaluation) +- **Visualization of the partitioned datasets** - visualize the label distribution of the partitioned dataset (and compare the results on different parameters of the same partitioning schemes, different datasets, different partitioning schemes, or any mix of them) + +.. note:: + + (*) Once the dataset is available on HuggingFace Hub it can be **immediately** used in ``Flower Datasets`` (no approval from the Flower team needed, no custom code needed). + + +.. image:: ./_static/readme/comparison_of_partitioning_schemes.png + :align: center + :alt: Comparison of Partitioning Schemes on CIFAR10 Thanks to using Hugging Face's ``datasets`` used under the hood, Flower Datasets integrates with the following popular formats/frameworks: @@ -67,32 +93,47 @@ Thanks to using Hugging Face's ``datasets`` used under the hood, Flower Datasets - Jax - Arrow -Install -------- +Here are a few of the ``Partitioner`` s that are available: (for a full list see `link `_ ) -The simplest install is +* Partitioner (the abstract base class) ``Partitioner`` +* IID partitioning ``IidPartitioner(num_partitions)`` +* Dirichlet partitioning ``DirichletPartitioner(num_partitions, partition_by, alpha)`` +* Distribution partitioning ``DistributionPartitioner(distribution_array, num_partitions, num_unique_labels_per_partition, partition_by, preassigned_num_samples_per_label, rescale)`` +* InnerDirichlet partitioning ``InnerDirichletPartitioner(partition_sizes, partition_by, alpha)`` +* PathologicalPartitioner ``PathologicalPartitioner(num_partitions, partition_by, num_classes_per_partition, class_assignment_mode)`` +* Natural ID partitioner ``NaturalIdPartitioner(partition_by)`` +* Size partitioner (the abstract base class for the partitioners dictating the division based the number of samples) ``SizePartitioner`` +* Linear partitioner ``LinearPartitioner(num_partitions)`` +* Square partitioner ``SquarePartitioner(num_partitions)`` +* Exponential partitioner ``ExponentialPartitioner(num_partitions)`` +* more to come in the future releases (contributions are welcome). -.. code-block:: bash - python -m pip install flwr-datasets +How To Use the library +---------------------- +Learn how to use the ``flwr-datasets`` library from the :doc:`tutorial-quickstart` examples . -If you plan to use the image datasets +Distinguishing Features +----------------------- +What makes Flower Datasets stand out from other libraries? -.. code-block:: bash +* Access to the largest online repository of datasets: - python -m pip install flwr-datasets[vision] + * The library functionality is independent of the dataset, so you can use any dataset available on `🤗Hugging Face Datasets `_, which means that others can immediately benefit from the dataset you added. -If you plan to use the audio datasets + * Out-of-the-box reproducibility across different projects. -.. code-block:: bash + * Access to naturally dividable datasets (with some notion of id) and datasets typically used in centralized ML that need partitioning. - python -m pip install flwr-datasets[audio] +* Customizable levels of dataset heterogeneity: -Check out the full details on the download in :doc:`how-to-install-flwr-datasets`. + * Each ``Partitioner`` takes arguments that allow you to customize the partitioning scheme to your needs. -How To Use the library ----------------------- -Learn how to use the ``flwr-datasets`` library from the :doc:`tutorial-quickstart` examples . + * Partitioning can also be applied to the dataset with naturally available division. + +* Flexible and open for extensions API. + + * New custom partitioning schemes (``Partitioner`` subclasses) integrated with the whole ecosystem. Join the Flower Community ------------------------- @@ -104,3 +145,16 @@ The Flower Community is growing quickly - we're a friendly group of researchers, :shadow: Join us on Slack + +.. _demo: +Demo +---- + +.. raw:: html + + + + diff --git a/datasets/doc/source/ref-telemetry.md b/datasets/doc/source/ref-telemetry.md new file mode 100644 index 000000000000..a4fc9b6b0061 --- /dev/null +++ b/datasets/doc/source/ref-telemetry.md @@ -0,0 +1,66 @@ +# Telemetry + +The Flower Datasets open-source project collects **anonymous** usage metrics to make well-informed decisions to improve Flower Datasets. Doing this enables the Flower team to understand how Flower Datasets is used and what challenges users might face. + +**Flower is a friendly framework for collaborative AI and data science.** Staying true to this statement, Flower makes it easy to disable telemetry for users that do not want to share anonymous usage metrics. + +## Principles + +We follow strong principles guarding anonymous usage metrics collection: + +- **Optional:** You will always be able to disable telemetry; read on to learn “[How to opt-out](#how-to-opt-out)”. +- **Anonymous:** The reported usage metrics are anonymous and do not contain any personally identifiable information (PII). See “[Collected metrics](#collected-metrics)” to understand what metrics are being reported. +- **Transparent:** You can easily inspect what anonymous metrics are being reported; see the section “[How to inspect what is being reported](#how-to-inspect-what-is-being-reported)” +- **Open for feedback:** You can always reach out to us if you have feedback; see the section “[How to contact us](#how-to-contact-us)” for details. + +## How to opt-out + +When Flower Datasets starts, it will check for an environment variable called `FLWR_TELEMETRY_ENABLED`. Telemetry can easily be disabled by setting `FLWR_TELEMETRY_ENABLED=0`. Assuming you are using Flower Datasets in a Flower server or client, simply do so by prepending your command as in: + +```bash +FLWR_TELEMETRY_ENABLED=0 python server.py # or client.py +``` + +Alternatively, you can export `FLWR_TELEMETRY_ENABLED=0` in, for example, `.bashrc` (or whatever configuration file applies to your environment) to disable Flower Datasets telemetry permanently. + +## Collected metrics + +Flower telemetry collects the following metrics: + +**Flower version.** Understand which versions of Flower Datasets are currently being used. This helps us to decide whether we should invest effort into releasing a patch version for an older version of Flower Datasets or instead use the bandwidth to build new features. + +**Operating system.** Enables us to answer questions such as: *Should we create more guides for Linux, macOS, or Windows?* + +**Python version.** Knowing the Python version helps us, for example, to decide whether we should invest effort into supporting old versions of Python or stop supporting them and start taking advantage of new Python features. + +**Hardware properties.** Understanding the hardware environment that Flower Datasets is being used in helps to decide whether we should, for example, put more effort into supporting low-resource environments. + +**Dataset and Partitioners names.** Knowing what datasets and Partitioners are used enables us to provide more detailed code examples and tutorials and better prioritize work on development and support for them. + +**Cluster.** Flower telemetry assigns a random in-memory cluster ID each time a Flower workload starts. This allows us to understand which device types not only start Flower workloads but also successfully complete them. + +**Source.** Flower telemetry tries to store a random source ID in `~/.flwr/source` the first time a telemetry event is generated. The source ID is important to identify whether an issue is recurring or whether an issue is triggered by multiple clusters running concurrently (which often happens in simulation). For example, if a device runs multiple workloads at the same time, and this results in an issue, then, in order to reproduce the issue, multiple workloads must be started at the same time. + +You may delete the source ID at any time. If you wish for all events logged under a specific source ID to be deleted, you can send a deletion request mentioning the source ID to `telemetry@flower.ai`. All events related to that source ID will then be permanently deleted. + +We will not collect any personally identifiable information. If you think any of the metrics collected could be misused in any way, please [get in touch with us](#how-to-contact-us). We will update this page to reflect any changes to the metrics collected and publish changes in the changelog. + +If you think other metrics would be helpful for us to better guide our decisions, please let us know! We will carefully review them; if we are confident that they do not compromise user privacy, we may add them. + +## How to inspect what is being reported + +We wanted to make it very easy for you to inspect what anonymous usage metrics are reported. You can view all the reported telemetry information by setting the environment variable `FLWR_TELEMETRY_LOGGING=1`. Logging is disabled by default. You may use logging independently from `FLWR_TELEMETRY_ENABLED` so that you can inspect the telemetry feature without sending any metrics. + +```bash +FLWR_TELEMETRY_LOGGING=1 python server.py # or client.py +``` + +The inspect Flower telemetry without sending any anonymous usage metrics, use both environment variables: + +```bash +FLWR_TELEMETRY_ENABLED=0 FLWR_TELEMETRY_LOGGING=1 python server.py # or client.py +``` + +## How to contact us + +We want to hear from you. If you have any feedback or ideas on how to improve the way we handle anonymous usage metrics, reach out to us via [Slack](https://flower.ai/join-slack/) (channel `#telemetry`) or email (`telemetry@flower.ai`). diff --git a/datasets/doc/source/tutorial-quickstart.ipynb b/datasets/doc/source/tutorial-quickstart.ipynb new file mode 100644 index 000000000000..fe71bdf58567 --- /dev/null +++ b/datasets/doc/source/tutorial-quickstart.ipynb @@ -0,0 +1,564 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "79c178bc47ac1b2f", + "metadata": {}, + "source": [ + "# Quickstart\n", + "\n", + "Start with `Flower Datasets` as fast as possible by learning the essentials." + ] + }, + { + "cell_type": "markdown", + "id": "e0f34a29f74b13cb", + "metadata": {}, + "source": [ + "## Install Flower Datasets" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "initial_id", + "metadata": {}, + "outputs": [], + "source": [ + "! pip install -q \"flwr-datasets[vision]\"" + ] + }, + { + "cell_type": "markdown", + "id": "f19a191a", + "metadata": {}, + "source": [ + "If you want to use audio datasets install:\n", + "\n", + "```bash\n", + "! pip install -q \"flwr-datasets[audio]\"\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "499dd2f0d23d871e", + "metadata": {}, + "source": [ + "## Choose the dataset\n", + "\n", + "To choose the dataset, go to Hugging Face [Datasets Hub](https://huggingface.co/datasets) and search for your dataset by name. You will pass that names to the `dataset` parameter of `FederatedDataset`. Note that the name is case-sensitive.\n", + "\n", + "
\n", + " \"Choose\n", + "
" + ] + }, + { + "cell_type": "markdown", + "id": "a9d449e6", + "metadata": {}, + "source": [ + "Note that once the dataset is available on HuggingFace Hub it can be immediately used in `Flower Datasets` (no approval from Flower team is needed, no custom code needed). \n", + "\n", + "Here is how it looks for `CIFAR10` dataset." + ] + }, + { + "cell_type": "markdown", + "id": "b7d66b23efb1a289", + "metadata": {}, + "source": [ + "
\n", + " \"Choose\n", + "
" + ] + }, + { + "cell_type": "markdown", + "id": "e0c146753048fb2a", + "metadata": {}, + "source": [ + "## Partition the dataset\n", + "\n", + "To partition a dataset (in a basic scenario), you need to choose two things:\n", + "1) A dataset (identified by a name),\n", + "2) A partitioning scheme (by selecting one of the supported partitioning schemes, [see all of them here](https://flower.ai/docs/datasets/ref-api/flwr_datasets.partitioner.html), or creating a custom partitioning scheme).\n", + "\n", + "\n", + "\n", + "**1) Dataset choice**\n", + "\n", + "We will pass the name of the dataset to `FederatedDataset(dataset=\"some-name\", other-parameters)`. In this example it will be: `FederatedDataset(dataset=\"uoft-cs/cifar10\", other-parameters)`\n", + "\n", + "**2) Partitioner choice**\n", + "\n", + "We will partition the dataset in an IID manner using `IidPartitioner` ([link to the docs](https://flower.ai/docs/datasets/ref-api/flwr_datasets.partitioner.IidPartitioner.html#flwr_datasets.partitioner.IidPartitioner)). \n", + "Only the train split of the dataset will be processed. In general, we do `FederatedDataset(dataset=\"some-name\", partitioners={\"split-name\": partitioning_scheme})`, which for this example looks like:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a759c5b6f25c9dd4", + "metadata": {}, + "outputs": [], + "source": [ + "from flwr_datasets import FederatedDataset\n", + "from flwr_datasets.partitioner import IidPartitioner\n", + "\n", + "fds = FederatedDataset(\n", + " dataset=\"uoft-cs/cifar10\", partitioners={\"train\": IidPartitioner(num_partitions=10)}\n", + ")\n", + "\n", + "# Load the first partition of the \"train\" split\n", + "partition = fds.load_partition(0, \"train\")\n", + "# You can access the whole \"test\" split of the base dataset (it hasn't been partitioned)\n", + "centralized_dataset = fds.load_split(\"test\")" + ] + }, + { + "cell_type": "markdown", + "id": "de75d15c3f5b2383", + "metadata": {}, + "source": [ + "Now we have 10 partitions created from the train split of the CIFAR10 dataset and the test split\n", + "for the centralized evaluation. Later we will convert the type of the dataset from Hugging Face's `Dataset` type to the format required by PyTorch/TensorFlow frameworks." + ] + }, + { + "cell_type": "markdown", + "id": "efa7dbb120505f1f", + "metadata": {}, + "source": [ + "## Investigate the partition" + ] + }, + { + "cell_type": "markdown", + "id": "bf986a1a9f0284cd", + "metadata": {}, + "source": [ + "### Features\n", + "\n", + "Now we will determine the names of the features of your dataset (you can alternatively do that directly on the Hugging Face\n", + "website). The names can vary along different datasets e.g. \"img\" or \"image\", \"label\" or \"labels\". Additionally, if the label column is of [ClassLabel](https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.ClassLabel) type, we will also see the names of labels." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f7ff7cecdda8a931", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'img': Image(mode=None, decode=True, id=None),\n", + " 'label': ClassLabel(names=['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'], id=None)}" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Note this dataset has\n", + "partition.features" + ] + }, + { + "cell_type": "markdown", + "id": "2e69ed05193a098a", + "metadata": {}, + "source": [ + "### Indexing\n", + "\n", + "To see the first sample of the partition, we can index it like a Python list." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2f2097d4c5121a1b", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'img': ,\n", + " 'label': 1}" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "partition[0]" + ] + }, + { + "cell_type": "markdown", + "id": "a10ad2b97c4dd92a", + "metadata": {}, + "source": [ + "Then we can additionally choose the specific column." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "aa7f0e2e29841f54", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "1" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "partition[0][\"label\"]" + ] + }, + { + "cell_type": "markdown", + "id": "3fe1cef9a121dbc5", + "metadata": {}, + "source": [ + "We can also use slicing (take a few samples). Let's take the first 3 samples of the first partition:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "779818b365682c60", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'img': [,\n", + " ,\n", + " ],\n", + " 'label': [1, 2, 6]}" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "partition[:3]" + ] + }, + { + "cell_type": "markdown", + "id": "a354aa36fc586438", + "metadata": {}, + "source": [ + "We get a dictionary where the keys are the names of the columns and the values are list of the corresponding values of each row of the dataset. So to take the first 3 labels we can do:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "25fca62a8f2fbe51", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[1, 2, 6]" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "partition[:3][\"label\"]" + ] + }, + { + "cell_type": "markdown", + "id": "4e4790671ffe2142", + "metadata": {}, + "source": [ + "Note that the indexing by column first is also possible but discouraged because the whole column will be loaded into the memory." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7836fe6d65c673b2", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[1, 2, 6]" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "partition[\"label\"][:3]" + ] + }, + { + "cell_type": "markdown", + "id": "c3c46099625437fc", + "metadata": {}, + "source": [ + "You can also select a subset of the dataset and keep the same type (dataset.Dataset) instead of receiving a dictionary of values." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "708abab74de3d5a1", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Dataset({\n", + " features: ['img', 'label'],\n", + " num_rows: 3\n", + "})" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "partition.select([0, 1, 2])" + ] + }, + { + "cell_type": "markdown", + "id": "462f707b4f078a8d", + "metadata": {}, + "source": [ + "And this dataset contains the same samples as we saw before." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "19d2e3cc74d93c4d", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'img': [,\n", + " ,\n", + " ],\n", + " 'label': [1, 2, 6]}" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "partition.select([0, 1, 2])[:]" + ] + }, + { + "cell_type": "markdown", + "id": "b5e683cfaddf92f", + "metadata": {}, + "source": [ + "## Use with PyTorch/NumPy/TensorFlow\n", + "\n", + "For more detailed instructions, go to:\n", + "\n", + "* [how-to-use-with-pytorch](https://flower.ai/docs/datasets/how-to-use-with-pytorch.html)\n", + "\n", + "* [how-to-use-with-numpy](https://flower.ai/docs/datasets/how-to-use-with-numpy.html)\n", + "\n", + "* [how-to-use-with-tensorflow](https://flower.ai/docs/datasets/how-to-use-with-tensorflow.html)" + ] + }, + { + "cell_type": "markdown", + "id": "de14f09f0ee4f6ac", + "metadata": {}, + "source": [ + "### PyTorch\n", + "\n", + "Transform the `Dataset` into the `DataLoader`, use the `PyTorch transforms` (`Compose` and all the others are possible)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a94321ee", + "metadata": {}, + "outputs": [], + "source": [ + "! pip install -q torch torchvision" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "544c0e73054f3445", + "metadata": {}, + "outputs": [], + "source": [ + "from torch.utils.data import DataLoader\n", + "from torchvision.transforms import ToTensor\n", + "\n", + "transforms = ToTensor()\n", + "\n", + "\n", + "def apply_transforms(batch):\n", + " # For CIFAR-10 the \"img\" column contains the images we want to apply the transforms to\n", + " batch[\"img\"] = [transforms(img) for img in batch[\"img\"]]\n", + " return batch\n", + "\n", + "\n", + "partition_torch = partition.with_transform(apply_transforms)\n", + "dataloader = DataLoader(partition_torch, batch_size=64)" + ] + }, + { + "cell_type": "markdown", + "id": "b93678a5", + "metadata": {}, + "source": [ + "The `Dataloader` created this way does not return a `Tuple` when iterating over it but a `Dict` with the names of the columns as keys and features as values. Look below for an example." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5edd3ce2", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Return type when iterating over dataloader: \n", + "torch.Size([64, 3, 32, 32])\n", + "torch.Size([64])\n" + ] + } + ], + "source": [ + "for batch in dataloader:\n", + " print(f\"Return type when iterating over a dataloader: {type(batch)}\")\n", + " print(batch[\"img\"].shape)\n", + " print(batch[\"label\"].shape)\n", + " break" + ] + }, + { + "cell_type": "markdown", + "id": "71531613", + "metadata": {}, + "source": [ + "### NumPy\n", + "\n", + "NumPy can be used as input to the TensorFlow and scikit-learn models. The transformation is very simple." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6b98b3e1", + "metadata": {}, + "outputs": [], + "source": [ + "partition_np = partition.with_format(\"numpy\")\n", + "X_train, y_train = partition_np[\"img\"], partition_np[\"label\"]" + ] + }, + { + "cell_type": "markdown", + "id": "e4867834", + "metadata": {}, + "source": [ + "### TensorFlow Dataset\n", + "\n", + "Transformation to TensorFlow Dataset is a one-liner." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a69ce677", + "metadata": {}, + "outputs": [], + "source": [ + "! pip install -q tensorflow" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "db86f1aa", + "metadata": {}, + "outputs": [], + "source": [ + "tf_dataset = partition.to_tf_dataset(\n", + " columns=\"img\", label_cols=\"label\", batch_size=64, shuffle=True\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "61fd797c", + "metadata": {}, + "source": [ + "## Final remarks\n", + "\n", + "Congratulations, you now know the basics of Flower Datasets and are ready to perform basic dataset preparation for Federated Learning." + ] + }, + { + "cell_type": "markdown", + "id": "cbdfe1b5", + "metadata": {}, + "source": [ + "## Next \n", + "\n", + "This is the first quickstart tutorial from the Flower Datasets series. See other tutorials:\n", + "\n", + "* [Use Partitioners](https://flower.ai/docs/datasets/tutorial-use-partitioners.html)\n", + "\n", + "* [Visualize Label Distribution](https://flower.ai/docs/datasets/tutorial-visualize-label-distribution.html)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "flwr", + "language": "python", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/datasets/doc/source/tutorial-quickstart.rst b/datasets/doc/source/tutorial-quickstart.rst deleted file mode 100644 index e820e116fc61..000000000000 --- a/datasets/doc/source/tutorial-quickstart.rst +++ /dev/null @@ -1,99 +0,0 @@ -Quickstart -========== - -Run Flower Datasets as fast as possible by learning only the essentials. - -Install Federated Datasets --------------------------- -On the command line, run - -.. code-block:: bash - - python -m pip install "flwr-datasets[vision]" - -Install the ML framework ------------------------- -TensorFlow - -.. code-block:: bash - - pip install tensorflow - -PyTorch - -.. code-block:: bash - - pip install torch torchvision - -Choose the dataset ------------------- -Choose the dataset by going to Hugging Face `Datasets Hub `_ and searching for your -dataset by name that you will pass to the `dataset` parameter of `FederatedDataset`. Note that the name is case sensitive. - -Partition the dataset ---------------------- -To iid partition your dataset, choose the split you want to partition and the number of partitions:: - - from flwr_datasets import FederatedDataset - - fds = FederatedDataset(dataset="cifar10", partitioners={"train": 10}) - partition = fds.load_partition(0, "train") - centralized_dataset = fds.load_split("test") - -Now you're ready to go. You have ten partitions created from the train split of the CIFAR10 dataset and the test split -for the centralized evaluation. We will convert the type of the dataset from Hugging Face's `Dataset` type to the one -supported by your framework. - -Display the features --------------------- -Determine the names of the features of your dataset (you can alternatively do that directly on the Hugging Face -website). The names can vary along different datasets e.g. "img" or "image", "label" or "labels". You will also see -the names of label categories. Type:: - - partition.features - -In case of CIFAR10, you should see the following output. - -.. code-block:: none - - {'img': Image(decode=True, id=None), - 'label': ClassLabel(names=['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', - 'frog', 'horse', 'ship', 'truck'], id=None)} - -Note that the image is denoted by "img" which is crucial for the next steps (conversion you the ML -framework of your choice). - -Conversion ----------- -For more detailed instructions, go to :doc:`how-to-use-with-pytorch`, :doc:`how-to-use-with-numpy`, or -:doc:`how-to-use-with-tensorflow`. - -PyTorch DataLoader -^^^^^^^^^^^^^^^^^^ -Transform the Dataset into the DataLoader, use the PyTorch transforms (`Compose` and all the others are also -possible):: - - from torch.utils.data import DataLoader - from torchvision.transforms import ToTensor - - transforms = ToTensor() - def apply_transforms(batch): - batch["img"] = [transforms(img) for img in batch["img"]] - return batch - partition_torch = partition.with_transform(apply_transforms) - dataloader = DataLoader(partition_torch, batch_size=64) - -NumPy -^^^^^ -NumPy can be used as input to the TensorFlow and scikit-learn models and it is very straightforward:: - - partition_np = partition.with_format("numpy") - X_train, y_train = partition_np["img"], partition_np["label"] - -TensorFlow Dataset -^^^^^^^^^^^^^^^^^^ -Transformation to TensorFlow Dataset is a one-liner:: - - tf_dataset = partition.to_tf_dataset(columns="img", label_cols="label", batch_size=64, - shuffle=True) - diff --git a/datasets/doc/source/tutorial-use-partitioners.ipynb b/datasets/doc/source/tutorial-use-partitioners.ipynb new file mode 100644 index 000000000000..30ff55ede91d --- /dev/null +++ b/datasets/doc/source/tutorial-use-partitioners.ipynb @@ -0,0 +1,363 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Use Partitioners\n", + "\n", + "Understand `Partitioner`s interactions with `FederatedDataset`." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Install" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "! pip install -q \"flwr-datasets[vision]\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## What is `Partitioner`?\n", + "\n", + "`Partitioner` is an object responsible for dividing a dataset according to a chosen strategy. There are many `Partitioner`s that you can use (see the full list [here](https://flower.ai/docs/datasets/ref-api/flwr_datasets.partitioner.html)) and all of them inherit from the `Partitioner` object which is an abstract class providing basic structure and methods that need to be implemented for any new `Partitioner` to integrate with the rest of `Flower Datasets` code. The creation of different `Partitioner` differs, but the behavior is the same = they produce the same type of objects." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### `IidPartitioner` Creation\n", + "\n", + "Let's create (instantiate) the most basic partitioner, [IidPartitioner](https://flower.ai/docs/datasets/ref-api/flwr_datasets.partitioner.IidPartitioner.html#flwr_datasets.partitioner.IidPartitioner) and learn how it interacts with `FederatedDataset`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from flwr_datasets.partitioner import IidPartitioner\n", + "\n", + "# Set the partitioner to create 10 partitions\n", + "partitioner = IidPartitioner(num_partitions=10)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Right now the partitioner does not have access to any data therefore it has nothing to partition. `FederatedDataset` is responsible for assigning data to a `partitioner`(s).\n", + "\n", + "What **part** of the data is assigned to partitioner?\n", + "\n", + "In centralized (traditional) ML, there exist a strong concept of the splits of the dataset. Typically you can hear about train/valid/test splits. In FL research, if we don't have an already divided datasets (e.g. by `user_id`), we simulate such division using a centralized dataset. The goal of that operation is to simulate an FL scenario where the data is spread across clients. In Flower Datasets you decide what split of the dataset will be partitioned. You can also resplit the datasets such that you use a more non-custom split, or merge the whole train and test split into a single dataset. That's not a part of this tutorial (if you are curious how to do that see [Divider docs](https://flower.ai/docs/datasets/ref-api/flwr_datasets.preprocessor.Divider.html), [Merger docs](https://flower.ai/docs/datasets/ref-api/flwr_datasets.preprocessor.Merger.html) and `preprocessor` parameter docs of [FederatedDataset](https://flower.ai/docs/datasets/ref-api/flwr_datasets.FederatedDataset.html)).\n", + "\n", + "Let's see how you specify the split for partitioning." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### How do you specify the split to partition?\n", + "\n", + "The specification of the split happens as you specify the `partitioners` argument for `FederatedDataset`. It maps `partition_id: str` to the partitioner that will be used for that split of the data. In the example below we're using the `train` split of the `cifar10` dataset to partition.\n", + "\n", + "> If you're unsure why/how we chose the name of the `dataset` and how to customize it, see the [first tutorial]((https://flower.ai/docs/datasets/quickstart-tutorial.html))." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Dataset({\n", + " features: ['img', 'label'],\n", + " num_rows: 5000\n", + "})" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from flwr_datasets import FederatedDataset\n", + "\n", + "# Create the federated dataset passing the partitioner\n", + "fds = FederatedDataset(dataset=\"uoft-cs/cifar10\", partitioners={\"train\": partitioner})\n", + "\n", + "# Load the first partition\n", + "iid_partition = fds.load_partition(partition_id=0)\n", + "iid_partition" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'img': [,\n", + " ,\n", + " ],\n", + " 'label': [1, 2, 6]}" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Let's take a look at the first three samples\n", + "iid_partition[:3]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Use Different `Partitioners`" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Why would you need to use different `Partitioner`s?**\n", + "\n", + "There are a few ways that the data partitioning is simulated in the literature. `Flower Datasets` let's you work with many different approaches that have been proposed so far. It enables you to simulate partitions with different properties and different levels of heterogeneity and use those settings to evaluate your Federated Learning algorithms.\n", + "\n", + "\n", + "**How to use different `Partitioner`s?**\n", + "\n", + "To use a different `Partitioner` you just need to create a different object (note it has typically different parameters that you need to specify). Then you pass it as before to the `FederatedDataset`.\n", + "\n", + "
\n", + " \"Partitioner\n", + "
\n", + "See the only changing part in yellow.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Creating non-IID partitions: Use ``PathologicalPartitioner``\n", + "\n", + "Now, we are going to create partitions that have only a subset of labels in each partition by using [PathologicalPartitioner](https://flower.ai/docs/datasets/ref-api/flwr_datasets.partitioner.PathologicalPartitioner.html#flwr_datasets.partitioner.PathologicalPartitioner). In this scenario we have the exact control about the number of unique labels on each partition. The smaller the number is the more heterogenous the division gets. Let's have a look at how it works with `num_classes_per_partition=2`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Dataset({\n", + " features: ['img', 'label'],\n", + " num_rows: 2501\n", + "})" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from flwr_datasets.partitioner import PathologicalPartitioner\n", + "\n", + "# Set the partitioner to create 10 partitions with 2 classes per partition\n", + "# Partition using column \"label\" (a column in the huggingface representation of CIFAR-10)\n", + "pathological_partitioner = PathologicalPartitioner(\n", + " num_partitions=10, partition_by=\"label\", num_classes_per_partition=2\n", + ")\n", + "\n", + "# Create the federated dataset passing the partitioner\n", + "fds = FederatedDataset(\n", + " dataset=\"uoft-cs/cifar10\", partitioners={\"train\": pathological_partitioner}\n", + ")\n", + "\n", + "# Load the first partition\n", + "partition_pathological = fds.load_partition(partition_id=0)\n", + "partition_pathological" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'img': [,\n", + " ,\n", + " ],\n", + " 'label': [0, 0, 7]}" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Let's take a look at the first three samples\n", + "partition_pathological[:3]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([0, 7])" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import numpy as np\n", + "\n", + "# We can use `np.unique` to get a list of the unique labels that are present\n", + "# in this data partition. As expected, there are just two labels. This means\n", + "# that this partition has only images with numbers 0 and 7.\n", + "np.unique(partition_pathological[\"label\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Creating non-IID partitions: Use ``DirichletPartitioner``\n", + "\n", + "With the [DirichletPartitioner](https://flower.ai/docs/datasets/ref-api/flwr_datasets.partitioner.DirichletPartitioner.html#flwr_datasets.partitioner.DirichletPartitioner), the primary tool for controlling heterogeneity is the `alpha` parameter; the smaller the value gets, the more heterogeneous the federated datasets are. Instead of choosing the exact number of classes on each partition, here we sample the probability distribution from the Dirichlet distribution, which tells how the samples associated with each class will be divided." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Dataset({\n", + " features: ['img', 'label'],\n", + " num_rows: 5433\n", + "})" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from flwr_datasets.partitioner import DirichletPartitioner\n", + "\n", + "# Set the partitioner to create 10 partitions with alpha 0.1 (so fairly non-IID)\n", + "# Partition using column \"label\" (a column in the huggingface representation of CIFAR-10)\n", + "dirichlet_partitioner = DirichletPartitioner(\n", + " num_partitions=10, alpha=0.1, partition_by=\"label\"\n", + ")\n", + "\n", + "# Create the federated dataset passing the partitioner\n", + "fds = FederatedDataset(\n", + " dataset=\"uoft-cs/cifar10\", partitioners={\"train\": dirichlet_partitioner}\n", + ")\n", + "\n", + "# Load the first partition\n", + "partition_from_dirichlet = fds.load_partition(partition_id=0)\n", + "partition_from_dirichlet" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'img': [,\n", + " ,\n", + " ,\n", + " ,\n", + " ],\n", + " 'label': [4, 4, 0, 1, 4]}" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Let's take a look at the first five samples\n", + "partition_from_dirichlet[:5]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Final remarks\n", + "Congratulations, you now know how to use different `Partitioner`s with `FederatedDataset` in Flower Datasets." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Next Steps\n", + "This is the second quickstart tutorial from the Flower Datasets series. See next tutorials:\n", + "\n", + "* [Visualize Label Distribution](https://flower.ai/docs/datasets/tutorial-visualize-label-distribution.html)\n", + "\n", + "Previous tutorials:\n", + "\n", + "* [Quickstart Basics](https://flower.ai/docs/datasets/tutorial-quickstart.html)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "flwr", + "language": "python", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/datasets/doc/source/tutorial-visualize-label-distribution.ipynb b/datasets/doc/source/tutorial-visualize-label-distribution.ipynb new file mode 100644 index 000000000000..d37edde78559 --- /dev/null +++ b/datasets/doc/source/tutorial-visualize-label-distribution.ipynb @@ -0,0 +1,1117 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "fb7e89caa9e6d772", + "metadata": {}, + "source": [ + "# Visualize Label Distribution" + ] + }, + { + "cell_type": "markdown", + "id": "67c54a8d7c872547", + "metadata": {}, + "source": [ + "Learn how to visualize and compare partitioned datasets when applying different `Partitioner`s or parameters.\n", + "\n", + "If you partition datasets to simulate heterogeneity through label skew and/or size skew, you can now effortlessly visualize the partitioned dataset using `flwr-datasets`.\n", + "\n", + "All the described visualization functions are compatible with all ``Partitioner`` you can find in\n", + "[flwr_datasets.partitioner](https://flower.ai/docs/datasets/ref-api/flwr_datasets.partitioner.html#module-flwr_datasets.partitioner)\n" + ] + }, + { + "cell_type": "markdown", + "id": "7220467f2c6ba432", + "metadata": {}, + "source": [ + "## Install Flower Datasets" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c46514b679f394ce", + "metadata": {}, + "outputs": [], + "source": [ + "! pip install -q \"flwr-datasets[vision]\"" + ] + }, + { + "cell_type": "markdown", + "id": "d7ffd5b6836a5ee0", + "metadata": {}, + "source": [ + "## Plot Label Distribution" + ] + }, + { + "cell_type": "markdown", + "id": "38fbbdfe6b930916", + "metadata": {}, + "source": [ + "### Bar plot" + ] + }, + { + "cell_type": "markdown", + "id": "a5778edf97a7ee04", + "metadata": {}, + "source": [ + "Let's visualize the result of `DirichletPartitioner`.\n", + "We will create a `FederatedDataset` and assign `DirichletPartitioner` to the `train` split:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "42397afaaf50529e", + "metadata": {}, + "outputs": [], + "source": [ + "from flwr_datasets import FederatedDataset\n", + "from flwr_datasets.partitioner import DirichletPartitioner\n", + "from flwr_datasets.visualization import plot_label_distributions\n", + "\n", + "\n", + "fds = FederatedDataset(\n", + " dataset=\"cifar10\",\n", + " partitioners={\n", + " \"train\": DirichletPartitioner(\n", + " num_partitions=10,\n", + " partition_by=\"label\",\n", + " alpha=0.3,\n", + " seed=42,\n", + " min_partition_size=0,\n", + " ),\n", + " },\n", + ")\n", + "\n", + "partitioner = fds.partitioners[\"train\"]" + ] + }, + { + "cell_type": "markdown", + "id": "c4d5855ee8a605d3", + "metadata": {}, + "source": [ + "Once we have the partitioner with the dataset assigned, we are ready to pass it to the plotting function:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f75b48256ed68897", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAuYAAAHHCAYAAADzgZ1dAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy80BEi2AAAACXBIWXMAAA9hAAAPYQGoP6dpAABmH0lEQVR4nO3deVzN2f8H8Ndt31etpJKk7GTJ2oxGtvnaZgZjRhGGKcRYx6CQxr4TxiSGwRjb2BMxTJLImixTw3dQBpVC2/38/vDt83MV03a7n/J6Ph738ehzPudzzvvce+l9T+eej0wQBAFERERERKRSaqoOgIiIiIiImJgTEREREUkCE3MiIiIiIglgYk5EREREJAFMzImIiIiIJICJORERERGRBDAxJyIiIiKSACbmREREREQSwMSciIiIiEgCmJgT0b/y9fWFg4NDieoGBQVBJpMpN6BK4OnpiYYNG1Zomw4ODvD19a3QNktq48aNkMlkSElJUXpfb75fUlJSIJPJsHDhQqX3DVSf9yARvX+YmBP9i8KEpvCho6ODevXqISAgAKmpqUrvvzDJKHzo6enBzc0N3333HTIzMyusn/v37yMoKAgJCQn/Wvf58+cICgpCdHR0hfVfEWQyGQICAlQdhtJFR0crvCe0tbVhZWUFT09PzJ07F48ePaqQfqT6OgPSjo2IqKyYmBOV0KxZs7B582asXLkSbdu2xZo1a+Dh4YHnz59XSv9r1qzB5s2bsXjxYtSvXx8hISHo2rUrBEGokPbv37+P4ODgYhPz9evXIykpSTx+/vw5goODi02KvvvuO7x48aJCYqJ3GzNmDDZv3ox169Zh4sSJMDMzw8yZM+Hq6orjx48r1P3yyy/x4sUL2Nvbl7j9d73O7/Lm+0UZ+B4koupIQ9UBEFUV3bp1g7u7OwBg2LBhMDc3x+LFi7F3714MHDiwXG0/f/4cenp676zzySefoEaNGgCAkSNHol+/fti1axfOnj0LDw+PMvedn58PuVz+zjqampolbk9DQwMaGvyvpTJ06NABn3zyiULZpUuX0KVLF/Tr1w/Xr1+HjY0NAEBdXR3q6upKjSc7Oxv6+vqler8oA9+DRFRVccacqIw+/PBDAEBycrJY9tNPP6FFixbQ1dWFmZkZBgwYgHv37ilcV7h2OT4+Hh07doSenh6+/fbbcvWfm5uLGTNmoEWLFjA2Noa+vj46dOiAEydOKFzz+lrfpUuXwsnJCdra2li9ejVatmwJABgyZIi4RGLjxo0AFNcMp6SkwMLCAgAQHBws1g0KCgJQ/Pre/Px8zJ49W+zPwcEB3377LXJychTqOTg4oGfPnjh9+jRatWoFHR0d1KlTB5s2bSr18/M2e/fuRY8ePWBrawttbW04OTlh9uzZKCgoKLZ+fHw82rZtC11dXTg6OiIsLKxInZycHMycORN169aFtrY27OzsMGnSpCLje1NeXh6Cg4Ph7OwMHR0dmJubo3379oiMjCzz+Jo0aYKlS5ciPT0dK1euFMuLW2N+/vx5eHt7o0aNGuL4hg4dCuDfX2dfX18YGBjgzp076N69OwwNDTFo0CDx3Nu+k7BkyRLY29tDV1cXnTp1wtWrVxXOe3p6wtPTs8h11ek9SET0NpxSICqjO3fuAADMzc0BACEhIZg+fTo+++wzDBs2DI8ePcKKFSvQsWNHXLx4ESYmJuK1jx8/Rrdu3TBgwAB88cUXsLKyKlf/mZmZ+OGHHzBw4EAMHz4cz549w4YNG+Dt7Y1z586hadOmCteGh4fj5cuXGDFiBLS1tdGnTx88e/YMM2bMwIgRI9ChQwcAQNu2bYv0a2FhgTVr1mDUqFHo06cP+vbtCwBo3LjxW2MdNmwYIiIi8Mknn+Cbb75BbGwsQkNDkZiYiN27dyvUvX37Nj755BP4+fnBx8cHP/74I3x9fdGiRQs0aNCg1M/TmzZu3AgDAwOMHz8eBgYGOH78OGbMmIHMzEwsWLBAoe7Tp0/RvXt3fPbZZxg4cCB27NiBUaNGQUtLS0xg5XI5/vOf/+D06dMYMWIEXF1dceXKFSxZsgQ3b97Enj173hpLUFAQQkNDMWzYMLRq1QqZmZk4f/48Lly4gI8++qjMYyx8/o4ePYqQkJBi66SlpaFLly6wsLDAlClTYGJigpSUFOzatQtAyV7n/Px8eHt7o3379li4cOG//tVn06ZNePbsGfz9/fHy5UssW7YMH374Ia5cuVKqfwNV/T1IRPRWAhG9U3h4uABAOHbsmPDo0SPh3r17wrZt2wRzc3NBV1dX+O9//yukpKQI6urqQkhIiMK1V65cETQ0NBTKO3XqJAAQwsLCStT/zJkzBQBCUlKS8OjRIyE5OVlYu3atoK2tLVhZWQnZ2dlCfn6+kJOTo3Dd06dPBSsrK2Ho0KFiWXJysgBAMDIyEtLS0hTqx8XFCQCE8PDwIjH4+PgI9vb24vGjR48EAMLMmTPfGm+hhIQEAYAwbNgwhXoTJkwQAAjHjx8Xy+zt7QUAwqlTp8SytLQ0QVtbW/jmm2/e+TwJgiAAEPz9/d9Z5/nz50XKvvrqK0FPT094+fKlWFb4Oi1atEgsy8nJEZo2bSpYWloKubm5giAIwubNmwU1NTXh999/V2gzLCxMACCcOXNGYXw+Pj7icZMmTYQePXr867jedOLECQGA8Msvv7y1TpMmTQRTU1PxuPB9nJycLAiCIOzevVsAIMTFxb21jXe9zj4+PgIAYcqUKcWee/39Uvi+K/z3Uig2NlYAIIwbN04s69Spk9CpU6d/bVOq70EiovLgUhaiEvLy8oKFhQXs7OwwYMAAGBgYYPfu3ahZsyZ27doFuVyOzz77DP/884/4sLa2hrOzc5ElJdra2hgyZEip+ndxcYGFhQUcHR3x1VdfoW7dujhw4AD09PSgrq4OLS0tAK9mcJ88eYL8/Hy4u7vjwoULRdrq16+fuBRA2Q4ePAgAGD9+vEL5N998AwA4cOCAQrmbm5s4Yw+8mh11cXHBn3/+WSHx6Orqij8/e/YM//zzDzp06IDnz5/jxo0bCnU1NDTw1VdficdaWlr46quvkJaWhvj4eADAL7/8AldXV9SvX1/htS9cavTma/86ExMTXLt2Dbdu3aqQsb3OwMAAz549e2ffALB//37k5eWVuZ9Ro0aVuG7v3r1Rs2ZN8bhVq1Zo3bq1+B5RFqm9B4mI3oZLWYhKaNWqVahXrx40NDRgZWUFFxcXqKm9+mx769YtCIIAZ2fnYq9988twNWvWFBPpkvr1119hZGQETU1N1KpVC05OTgrnIyIisGjRIty4cUMh0XJ0dCzSVnFlyvLXX39BTU0NdevWVSi3traGiYkJ/vrrL4Xy2rVrF2nD1NQUT58+rZB4rl27hu+++w7Hjx8vst1kRkaGwrGtrS309fUVyurVqwfg1TrnNm3a4NatW0hMTHzrB520tLS3xjJr1iz06tUL9erVQ8OGDdG1a1d8+eWX71ySUVJZWVkwNDR86/lOnTqhX79+CA4OxpIlS+Dp6YnevXvj888/h7a2don60NDQQK1atUocU3H/PurVq4cdO3aUuI2ykNp7kIjobZiYE5VQq1atxF1Z3iSXyyGTyXDo0KFid74wMDBQOH591rakOnbsKO7K8qaffvoJvr6+6N27NyZOnAhLS0uoq6sjNDRUXIte3v7Lq6Q3fHnbziFCBWwLmZ6ejk6dOsHIyAizZs2Ck5MTdHR0cOHCBUyePPlfd6cpjlwuR6NGjbB48eJiz9vZ2b312o4dO+LOnTvYu3cvjh49ih9++AFLlixBWFgYhg0bVupYCuXl5eHmzZvvvEGSTCbDzp07cfbsWfz22284cuQIhg4dikWLFuHs2bNF3rPF0dbWFj+cVhSZTFbsa/22L+eWtu2SUOZ7kIjoXZiYE1UAJycnCIIAR0dHcUa1Mu3cuRN16tTBrl27FJKPmTNnlriN0twpsTR17e3tIZfLcevWLbi6uorlqampSE9PL9W+2uUVHR2Nx48fY9euXejYsaNY/vrOOq+7f/++uAVgoZs3bwKAuEOIk5MTLl26hM6dO5fpbpNmZmYYMmQIhgwZgqysLHTs2BFBQUHlSsx37tyJFy9ewNvb+1/rtmnTBm3atEFISAi2bt2KQYMGYdu2bRg2bFiF3z2zuCU7N2/eVNjBxdTUtNglI2/OalfV9yAR0btwjTlRBejbty/U1dURHBxcZFZNEAQ8fvxYqf0XzvC93ndsbCxiYmJK3EZh8pmenv6vdQt33yhJ3e7duwMAli5dqlBeOMPco0ePEsdYXsU9T7m5uVi9enWx9fPz87F27VqFumvXroWFhQVatGgBAPjss8/w999/Y/369UWuf/HiBbKzs98az5vvCwMDA9StW/dft1l8l0uXLiEwMBCmpqbw9/d/a72nT58Wea8W7t5T2H9pXueS2LNnD/7++2/x+Ny5c4iNjUW3bt3EMicnJ9y4cUPh7qWXLl3CmTNnFNqqqu9BIqJ34Yw5UQVwcnLCnDlzMHXqVKSkpKB3794wNDREcnIydu/ejREjRmDChAlK679nz57YtWsX+vTpgx49eiA5ORlhYWFwc3NDVlZWicdgYmKCsLAwGBoaQl9fH61bty52Pbquri7c3Nywfft21KtXD2ZmZmjYsGGxSyeaNGkCHx8frFu3TlxKcu7cOURERKB379744IMPyj3+150/fx5z5swpUu7p6Ym2bdvC1NQUPj4+GDNmDGQyGTZv3vzWJQq2traYN28eUlJSUK9ePWzfvh0JCQlYt26d+L2BL7/8Ejt27MDIkSNx4sQJtGvXDgUFBbhx4wZ27NiBI0eOvHUJlJubGzw9PdGiRQuYmZnh/Pnz2LlzJwICAko01t9//x0vX75EQUEBHj9+jDNnzmDfvn0wNjbG7t27YW1t/dZrIyIisHr1avTp0wdOTk549uwZ1q9fDyMjIzGRLc3rXBJ169ZF+/btMWrUKOTk5GDp0qUwNzfHpEmTxDpDhw7F4sWL4e3tDT8/P6SlpSEsLAwNGjRQ+E6AlN+DRERlpqLdYIiqjMJt5t61rVyhX3/9VWjfvr2gr68v6OvrC/Xr1xf8/f2FpKQksU6nTp2EBg0alLj/wq3fHj169NY6crlcmDt3rmBvby9oa2sLzZo1E/bv3//WbesWLFhQbDt79+4V3NzcBA0NDYWtE99sRxAE4Y8//hBatGghaGlpKWxb9+ZWdYIgCHl5eUJwcLDg6OgoaGpqCnZ2dsLUqVMVticUhFdb1RW3feDbttB7E4C3PmbPni0IgiCcOXNGaNOmjaCrqyvY2toKkyZNEo4cOSIAEE6cOKHQZ4MGDYTz588LHh4ego6OjmBvby+sXLmySL+5ubnCvHnzhAYNGgja2tqCqamp0KJFCyE4OFjIyMhQGN/r2yXOmTNHaNWqlWBiYiLo6uoK9evXF0JCQsStGN+mcLvEwoempqZgYWEhdOzYUQgJCSmyFaYgFN0u8cKFC8LAgQOF2rVrC9ra2oKlpaXQs2dP4fz58wrXve119vHxEfT19YuN713vu0WLFgl2dnaCtra20KFDB+HSpUtFrv/pp5+EOnXqCFpaWkLTpk2FI0eOVJn3IBFRecgEgd9mISIiIiJSNa4xJyIiIiKSACbmREREREQSwMSciIiIiEgCmJgTEREREUkAE3MiIiIiIglgYk5EREREJAG8wVAFkcvluH//PgwNDSv8NtZERESkHIIg4NmzZ7C1tYWaGucrSbWYmFeQ+/fvw87OTtVhEBERURncu3cPtWrVUnUY9J5TaWJ+6tQpLFiwAPHx8Xjw4AF2796N3r17i+cFQcDMmTOxfv16pKeno127dlizZg2cnZ3FOk+ePMHo0aPx22+/QU1NDf369cOyZctgYGAg1rl8+TL8/f0RFxcHCwsLjB49WuEW0ADwyy+/YPr06UhJSYGzszPmzZsn3pa6JAwNDQG8+odtZGRUxmeEiIiIKlNmZibs7OzE3+NEqqTSxDw7OxtNmjTB0KFD0bdv3yLn58+fj+XLlyMiIgKOjo6YPn06vL29cf36dejo6AAABg0ahAcPHiAyMhJ5eXkYMmQIRowYga1btwJ49Q+uS5cu8PLyQlhYGK5cuYKhQ4fCxMQEI0aMAAD88ccfGDhwIEJDQ9GzZ09s3boVvXv3xoULF9CwYcMSjaVw+YqRkRETcyIioiqGy1BJCmSCIAiqDgJ49Q/i9RlzQRBga2uLb775BhMmTAAAZGRkwMrKChs3bsSAAQOQmJgINzc3xMXFwd3dHQBw+PBhdO/eHf/9739ha2uLNWvWYNq0aXj48CG0tLQAAFOmTMGePXtw48YNAED//v2RnZ2N/fv3i/G0adMGTZs2RVhYWIniz8zMhLGxMTIyMpiYExERVRH8/U1SItlvOSQnJ+Phw4fw8vISy4yNjdG6dWvExMQAAGJiYmBiYiIm5QDg5eUFNTU1xMbGinU6duwoJuUA4O3tjaSkJDx9+lSs83o/hXUK+yEiIiIiUjbJfvnz4cOHAAArKyuFcisrK/Hcw4cPYWlpqXBeQ0MDZmZmCnUcHR2LtFF4ztTUFA8fPnxnP8XJyclBTk6OeJyZmVma4RERERERKZDsjLnUhYaGwtjYWHxwRxYiIiIiKg/JJubW1tYAgNTUVIXy1NRU8Zy1tTXS0tIUzufn5+PJkycKdYpr4/U+3lan8Hxxpk6dioyMDPFx79690g6RiIiIiEgk2cTc0dER1tbWiIqKEssyMzMRGxsLDw8PAICHhwfS09MRHx8v1jl+/Djkcjlat24t1jl16hTy8vLEOpGRkXBxcYGpqalY5/V+CusU9lMcbW1tcQcW7sRCREREROWl0sQ8KysLCQkJSEhIAPDqC58JCQm4e/cuZDIZAgMDMWfOHOzbtw9XrlzB4MGDYWtrK+7c4urqiq5du2L48OE4d+4czpw5g4CAAAwYMAC2trYAgM8//xxaWlrw8/PDtWvXsH37dixbtgzjx48X4xg7diwOHz6MRYsW4caNGwgKCsL58+cREBBQ2U8JEREREb2vBBU6ceKEAKDIw8fHRxAEQZDL5cL06dMFKysrQVtbW+jcubOQlJSk0Mbjx4+FgQMHCgYGBoKRkZEwZMgQ4dmzZwp1Ll26JLRv317Q1tYWatasKXz//fdFYtmxY4dQr149QUtLS2jQoIFw4MCBUo0lIyNDACBkZGSU7kkgIiIileHvb5ISyexjXtVxH1QiIqKqh7+/SUoku8aciIiIiOh9wsSciIiIiEgCmJgTEREREUkAE3MiIiIiIglgYk5EREREJAFMzImIiIiIJEBD1QEQEREpW50N/ZTex59+vyq9DyKq3jhjTkREREQkAUzMiYiIiIgkgIk5EREREZEEMDEnIiIiIpIAJuZERERERBLAxJyIiIiISAKYmBMRERERSQATcyIiIiIiCWBiTkREREQkAUzMiYiIiIgkgIk5EREREZEEMDEnIiIiIpIAJuZERERERBLAxJyIiIiISAKYmBMRERERSQATcyIiIiIiCWBiTkREREQkAUzMiYiIiIgkQEPVARARESlb33pmqg6BiOhfccaciIiIiEgCmJgTEREREUkAE3MiIiIiIglgYk5EREREJAFMzImIiIiIJICJORERERGRBDAxJyIiIiKSACbmREREREQSwMSciIiIiEgCmJgTEREREUkAE3MiIiIiIglgYk5EREREJAFMzImIiIiIJICJORERERGRBDAxJyIiIiKSACbmREREREQSwMSciIiIiEgCmJgTEREREUkAE3MiIiIiIglgYk5EREREJAFMzImIiIiIJICJORERERGRBDAxJyIiIiKSAA1VB0BERKRsk9w7qDoEIqJ/xRlzIiIiIiIJYGJORERERCQBXMpCREREVAnkcjlyc3NVHQZVIk1NTairq5e4PhNzIiIiIiXLzc1FcnIy5HK5qkOhSmZiYgJra2vIZLJ/rcvEnIiIiEiJBEHAgwcPoK6uDjs7O6ipcSXx+0AQBDx//hxpaWkAABsbm3+9hok5ERERkRLl5+fj+fPnsLW1hZ6enqrDoUqkq6sLAEhLS4OlpeW/LmvhRzYiIiIiJSooKAAAaGlpqTgSUoXCD2N5eXn/WpeJOREREVElKMkaY6p+SvO6MzEnIiIiIpIAJuZEREREVCE2btwIExOTcrcjk8mwZ8+ecrdT1TAxJyIiIiKRr68vevfureow3ktMzImIiIiIJEDSiXlBQQGmT58OR0dH6OrqwsnJCbNnz4YgCGIdQRAwY8YM2NjYQFdXF15eXrh165ZCO0+ePMGgQYNgZGQEExMT+Pn5ISsrS6HO5cuX0aFDB+jo6MDOzg7z58+vlDESERERVRWLFy9Go0aNoK+vDzs7O3z99ddFcioA2LNnD5ydnaGjowNvb2/cu3dP4fzevXvRvHlz6OjooE6dOggODkZ+fn6xfebm5iIgIAA2NjbQ0dGBvb09QkNDlTI+VZN0Yj5v3jysWbMGK1euRGJiIubNm4f58+djxYoVYp358+dj+fLlCAsLQ2xsLPT19eHt7Y2XL1+KdQYNGoRr164hMjIS+/fvx6lTpzBixAjxfGZmJrp06QJ7e3vEx8djwYIFCAoKwrp16yp1vERERERSpqamhuXLl+PatWuIiIjA8ePHMWnSJIU6z58/R0hICDZt2oQzZ84gPT0dAwYMEM///vvvGDx4MMaOHYvr169j7dq12LhxI0JCQortc/ny5di3bx927NiBpKQkbNmyBQ4ODsocpspI+gZDf/zxB3r16oUePXoAABwcHPDzzz/j3LlzAF7Nli9duhTfffcdevXqBQDYtGkTrKyssGfPHgwYMACJiYk4fPgw4uLi4O7uDgBYsWIFunfvjoULF8LW1hZbtmxBbm4ufvzxR2hpaaFBgwZISEjA4sWLFRJ4IiIiovdZYGCg+LODgwPmzJmDkSNHYvXq1WJ5Xl4eVq5cidatWwMAIiIi4OrqinPnzqFVq1YIDg7GlClT4OPjAwCoU6cOZs+ejUmTJmHmzJlF+rx79y6cnZ3Rvn17yGQy2NvbK3eQKiTpGfO2bdsiKioKN2/eBABcunQJp0+fRrdu3QAAycnJePjwIby8vMRrjI2N0bp1a8TExAAAYmJiYGJiIiblAODl5QU1NTXExsaKdTp27Kiw8b+3tzeSkpLw9OlTpY+TiIiIqCo4duwYOnfujJo1a8LQ0BBffvklHj9+jOfPn4t1NDQ00LJlS/G4fv36MDExQWJiIoBX+dysWbNgYGAgPoYPH44HDx4otFPI19cXCQkJcHFxwZgxY3D06FHlD1RFJD1jPmXKFGRmZqJ+/fpQV1dHQUEBQkJCMGjQIADAw4cPAQBWVlYK11lZWYnnHj58CEtLS4XzGhoaMDMzU6jj6OhYpI3Cc6ampkViy8nJQU5OjnicmZlZnqESERERSVpKSgp69uyJUaNGISQkBGZmZjh9+jT8/PyQm5sr3uHy32RlZSE4OBh9+/Ytck5HR6dIWfPmzZGcnIxDhw7h2LFj+Oyzz+Dl5YWdO3eWe0xSI+nEfMeOHdiyZQu2bt0qLi8JDAyEra2t+OcPVQkNDUVwcLBKYyAiIiKqLPHx8ZDL5Vi0aBHU1F4tutixY0eRevn5+Th//jxatWoFAEhKSkJ6ejpcXV0BvEq0k5KSULdu3RL3bWRkhP79+6N///745JNP0LVrVzx58gRmZmYVMDLpkHRiPnHiREyZMkX8wkCjRo3w119/ITQ0FD4+PrC2tgYApKamwsbGRrwuNTUVTZs2BQBYW1sjLS1Nod38/Hw8efJEvN7a2hqpqakKdQqPC+u8aerUqRg/frx4nJmZCTs7u3KMloiIiEgaMjIykJCQoFBWo0YN5OXlYcWKFfj4449x5swZhIWFFblWU1MTo0ePxvLly6GhoYGAgAC0adNGTNRnzJiBnj17onbt2vjkk0+gpqaGS5cu4erVq5gzZ06R9hYvXgwbGxs0a9YMampq+OWXX2BtbV0hNzKSGkmvMX/+/Ln4iayQuro65HI5AMDR0RHW1taIiooSz2dmZiI2NhYeHh4AAA8PD6SnpyM+Pl6sc/z4ccjlcvFLCR4eHjh16hTy8vLEOpGRkXBxcSl2GQsAaGtrw8jISOFBREREVB1ER0ejWbNmCo/Nmzdj8eLFmDdvHho2bIgtW7YUu22hnp4eJk+ejM8//xzt2rWDgYEBtm/fLp739vbG/v37cfToUbRs2RJt2rTBkiVL3vqlTkNDQ8yfPx/u7u5o2bIlUlJScPDgwSI5YnUgE17fFFxifH19cezYMaxduxYNGjTAxYsXMWLECAwdOhTz5s0D8GpLxe+//x4RERFwdHTE9OnTcfnyZVy/fl1cp9StWzekpqYiLCwMeXl5GDJkCNzd3bF161YArz4Vuri4oEuXLpg8eTKuXr2KoUOHYsmSJSXelSUzMxPGxsbIyMhgkk5EJDFpLzYpvQ9L3cFK74MqXmX8/n758iWSk5Ph6OhY7Bpqqt5K8/pLeinLihUrMH36dHz99ddIS0uDra0tvvrqK8yYMUOsM2nSJGRnZ2PEiBFIT09H+/btcfjwYYWBb9myBQEBAejcuTPU1NTQr18/LF++XDxvbGyMo0ePwt/fHy1atECNGjUwY8YMbpVIVY5sVBul9yGsOav0PoiIiN5Hkp4xr0o4Y05SwMScqHicMae34Yw5KVtpXv/qtziHiIiIiKgKYmJORERERCQBTMyJiIiIiCSAiTkRERERkQQwMSciIiIikgAm5kREREREEsDEnIiIiIhIApiYExEREZFKpaSkQCaTISEhQdWhqJSk7/xJREREVF1Vxk3hXleWG8R5enqiadOmWLp0acUHREVwxpyIiIiIykQQBOTn56s6jGqDiTkRERERFeHr64uTJ09i2bJlkMlkkMlk2LhxI2QyGQ4dOoQWLVpAW1sbp0+fhq+vL3r37q1wfWBgIDw9PcVjuVyO+fPno27dutDW1kbt2rUREhJSbN8FBQUYOnQo6tevj7t37ypxlNLCpSxEREREVMSyZctw8+ZNNGzYELNmzQIAXLt2DQAwZcoULFy4EHXq1IGpqWmJ2ps6dSrWr1+PJUuWoH379njw4AFu3LhRpF5OTg4GDhyIlJQU/P7777CwsKi4QUkcE3MiIiIiKsLY2BhaWlrQ09ODtbU1AIiJ9KxZs/DRRx+VuK1nz55h2bJlWLlyJXx8fAAATk5OaN++vUK9rKws9OjRAzk5OThx4gSMjY0raDRVA5eyEBEREVGpuLu7l6p+YmIicnJy0Llz53fWGzhwILKzs3H06NH3LikHmJgTERERUSnp6+srHKupqUEQBIWyvLw88WddXd0Stdu9e3dcvnwZMTEx5Q+yCmJiTkRERETF0tLSQkFBwb/Ws7CwwIMHDxTKXt+T3NnZGbq6uoiKinpnO6NGjcL333+P//znPzh58mSZYq7KuMaciIiIiIrl4OCA2NhYpKSkwMDAAHK5vNh6H374IRYsWIBNmzbBw8MDP/30E65evYpmzZoBAHR0dDB58mRMmjQJWlpaaNeuHR49eoRr167Bz89Poa3Ro0ejoKAAPXv2xKFDh4qsQ6/OOGNORERERMWaMGEC1NXV4ebmBgsLi7duXejt7Y3p06dj0qRJaNmyJZ49e4bBgwcr1Jk+fTq++eYbzJgxA66urujfvz/S0tKKbS8wMBDBwcHo3r07/vjjjwofl1TJhDcXBFGZZGZmwtjYGBkZGTAyMlJ1OPSeqoy7yJXlznFEqpb2YpPS+7DUHfzvlUhyKuP398uXL5GcnAxHR0fo6OgopQ+SrtK8/pwxJyIiIiKSACbmREREREQSwMSciIiIiEgCmJgTEREREUkAE3MiIiIiIglgYk5EREREJAFMzImIiIiIJICJORERERGRBDAxJyIiIiKSACbmRERERFQqvr6+6N279zvrODg4YOnSpZUST3WhoeoAiIiIiN5HdTb0q9T+/vT7tVL7i4uLg76+fqX2WdUxMSciIiKiCmdhYaHqEKocLmUhIiIiomLt3LkTjRo1gq6uLszNzeHl5YXs7Gzx/MKFC2FjYwNzc3P4+/sjLy9PPPfmUhaZTIY1a9agW7du0NXVRZ06dbBz587KHI7kMTEnIiIioiIePHiAgQMHYujQoUhMTER0dDT69u0LQRAAACdOnMCdO3dw4sQJREREYOPGjdi4ceM725w+fTr69euHS5cuYdCgQRgwYAASExMrYTRVA5eyEBEREVERDx48QH5+Pvr27Qt7e3sAQKNGjcTzpqamWLlyJdTV1VG/fn306NEDUVFRGD58+Fvb/PTTTzFs2DAAwOzZsxEZGYkVK1Zg9erVyh1MFcEZcyIiIiIqokmTJujcuTMaNWqETz/9FOvXr8fTp0/F8w0aNIC6urp4bGNjg7S0tHe26eHhUeSYM+b/j4k5ERERERWhrq6OyMhIHDp0CG5ublixYgVcXFyQnJwMANDU1FSoL5PJIJfLVRFqtcHEnIiIiIiKJZPJ0K5dOwQHB+PixYvQ0tLC7t27y9ze2bNnixy7urqWN8xqg2vMiYiIiKiI2NhYREVFoUuXLrC0tERsbCwePXoEV1dXXL58uUxt/vLLL3B3d0f79u2xZcsWnDt3Dhs2bKjgyKsuJuZEREREVISRkRFOnTqFpUuXIjMzE/b29li0aBG6deuG7du3l6nN4OBgbNu2DV9//TVsbGzw888/w83NrYIjr7qYmBMRUbVn8aISOtGthD6oWqnsO3GWlqurKw4fPlzsueK2RXx9z3IASElJKVLH1tYWR48erYDoqieuMSciIiIikgAm5kREREREEsClLERERESkdIV3DKW344w5EREREZEEMDEnIiIiIpIAJuZERERERBLAxJyIiIiISAKYmBMRERERSQB3ZVGxCb8PV2r7CzusV2r7RERERFQxOGNOREREREV4enoiMDBQ1WG8VzhjTkRERKQCyv6r+Zv4V3Tp44w5ERERESldbm6uqkOQPCbmRERERFQsuVyOSZMmwczMDNbW1ggKChLP3b17F7169YKBgQGMjIzw2WefITU1VTwfFBSEpk2b4ocffoCjoyN0dHQAADt37kSjRo2gq6sLc3NzeHl5ITs7W7zuhx9+gKurK3R0dFC/fn2sXr260saralzKQkRERETFioiIwPjx4xEbG4uYmBj4+vqiXbt26Ny5s5iUnzx5Evn5+fD390f//v0RHR0tXn/79m38+uuv2LVrF9TV1fHgwQMMHDgQ8+fPR58+ffDs2TP8/vvvEAQBALBlyxbMmDEDK1euRLNmzXDx4kUMHz4c+vr68PHxUdGzUHmYmBMRERFRsRo3boyZM2cCAJydnbFy5UpERUUBAK5cuYLk5GTY2dkBADZt2oQGDRogLi4OLVu2BPBq+cqmTZtgYWEBALhw4QLy8/PRt29f2NvbAwAaNWok9jdz5kwsWrQIffv2BQA4Ojri+vXrWLt27XuRmHMpCxEREREVq3HjxgrHNjY2SEtLQ2JiIuzs7MSkHADc3NxgYmKCxMREscze3l5MygGgSZMm6Ny5Mxo1aoRPP/0U69evx9OnTwEA2dnZuHPnDvz8/GBgYCA+5syZgzt37ih5pNLAGXMiIiIiKpampqbCsUwmg1wuL/H1+vr6Csfq6uqIjIzEH3/8gaNHj2LFihWYNm0aYmNjoaenBwBYv349WrduXeS69wFnzImIiIioVFxdXXHv3j3cu3dPLLt+/TrS09Ph5ub2zmtlMhnatWuH4OBgXLx4EVpaWti9ezesrKxga2uLP//8E3Xr1lV4ODo6KntIksAZcyIiIiIqFS8vLzRq1AiDBg3C0qVLkZ+fj6+//hqdOnWCu7v7W6+LjY1FVFQUunTpAktLS8TGxuLRo0dwdXUFAAQHB2PMmDEwNjZG165dkZOTg/Pnz+Pp06cYP358ZQ1PZZiYExEREVGpyGQy7N27F6NHj0bHjh2hpqaGrl27YsWKFe+8zsjICKdOncLSpUuRmZkJe3t7LFq0CN26dQMADBs2DHp6eliwYAEmTpwIfX19NGrU6L25A6lMKNyfhsolMzMTxsbGyMjIgJGRUYmvU/Zdv3iXr/eLbFQbpfchrDmr9D6IKprwZJPS+5CZDVZ6H1Txyvr7uzRevnyJ5ORkhb286f1Rmtefa8yJiIiIiCRA8ktZ/v77b0yePBmHDh3C8+fPUbduXYSHh4vrlwRBwMyZM7F+/Xqkp6ejXbt2WLNmDZydncU2njx5gtGjR+O3336Dmpoa+vXrh2XLlsHAwECsc/nyZfj7+yMuLg4WFhYYPXo0Jk2apPTxedU2VXofRERERCR9kp4xf/r0Kdq1awdNTU0cOnQI169fx6JFi2Bq+v/J7Pz587F8+XKEhYUhNjYW+vr68Pb2xsuXL8U6gwYNwrVr1xAZGYn9+/fj1KlTGDFihHg+MzMTXbp0gb29PeLj47FgwQIEBQVh3bp1lTpeIiIiInp/SXrGfN68ebCzs0N4eLhY9vp2OYIgYOnSpfjuu+/Qq1cvAK/uOmVlZYU9e/ZgwIABSExMxOHDhxEXFyfOsq9YsQLdu3fHwoULYWtriy1btiA3Nxc//vgjtLS00KBBAyQkJGDx4sUKCTwRERERkbJIesZ83759cHd3x6effgpLS0s0a9YM69f//5cZk5OT8fDhQ3h5eYllxsbGaN26NWJiYgAAMTExMDExUdi6x8vLC2pqaoiNjRXrdOzYEVpaWmIdb29vJCUliXejelNOTg4yMzMVHkREREREZSXpxPzPP/8U14sfOXIEo0aNwpgxYxAREQEAePjwIQDAyspK4TorKyvx3MOHD2FpaalwXkNDA2ZmZgp1imvj9T7eFBoaCmNjY/Hx+i1piYiIiIhKS9KJuVwuR/PmzTF37lw0a9YMI0aMwPDhwxEWFqbq0DB16lRkZGSIj9fvfEVEREREVFqSXmNuY2NT5Laurq6u+PXXXwEA1tbWAIDU1FTY2NiIdVJTU9G0aVOxTlpamkIb+fn5ePLkiXi9tbU1UlNTFeoUHhfWeZO2tja0tbXLODIiIqLS4X0KiKo/Sc+Yt2vXDklJSQplN2/ehL29PYBXXwS1trZGVFSUeD4zMxOxsbHw8PAAAHh4eCA9PR3x8fFinePHj0Mul6N169ZinVOnTiEvL0+sExkZCRcXF4UdYIiIiIiIlEXSifm4ceNw9uxZzJ07F7dv38bWrVuxbt06+Pv7A3h1O9jAwEDMmTMH+/btw5UrVzB48GDY2tqid+/eAF7NsHft2hXDhw/HuXPncObMGQQEBGDAgAGwtbUFAHz++efQ0tKCn58frl27hu3bt2PZsmUYP368qoZOREREpHKCIGDEiBEwMzODTCZDQkKCqkOq1iS9lKVly5bYvXs3pk6dilmzZsHR0RFLly7FoEGDxDqTJk1CdnY2RowYgfT0dLRv3x6HDx9WuOXpli1bEBAQgM6dO4s3GFq+fLl43tjYGEePHoW/vz9atGiBGjVqYMaMGdwqkYiIiJTm8F/Kv5Hh67razy/1NYcPH8bGjRsRHR2NOnXqoEaNGkqIjApJOjEHgJ49e6Jnz55vPS+TyTBr1izMmjXrrXXMzMywdevWd/bTuHFj/P7772WOk4iIiKi6uXPnDmxsbNC2bdtiz+fm5ipsN03lI/nEnKSPX0giIiKqfnx9fcUtqmUyGezt7eHg4ICGDRtCQ0MDP/30Exo1aoQTJ07g5MmTmDhxIi5dugQzMzP4+Phgzpw50NB4lWo+e/YMI0eOxJ49e2BkZIRJkyZh7969aNq0KZYuXarCUUqLpNeYExEREZFqLFu2DLNmzUKtWrXw4MEDxMXFAQAiIiKgpaWFM2fOICwsDH///Te6d++Oli1b4tKlS1izZg02bNiAOXPmiG2NHz8eZ86cwb59+xAZGYnff/8dFy5cUNXQJIsz5kRERERUhLGxMQwNDaGurq6wfbSzszPmz///9erTpk2DnZ0dVq5cCZlMhvr16+P+/fuYPHkyZsyYgezsbERERGDr1q3o3LkzACA8PFzchIP+HxNzIiIiIiqxFi1aKBwnJibCw8MDMplMLGvXrh2ysrLw3//+F0+fPkVeXh5atWolnjc2NoaLi0ulxVxVMDEnIqJqT7is/D+ZyzwHK70PIinQ19dXdQjVFhNzomrE0b2mqkMgIqL3TOFd2QVBEGfNz5w5A0NDQ9SqVQumpqbQ1NREXFwcateuDQDIyMjAzZs30bFjR1WGLjlMzFWsuWVDVYdAREREVGZff/01li5ditGjRyMgIABJSUmYOXMmxo8fDzU1NRgaGsLHxwcTJ06EmZkZLC0tMXPmTKipqSksfyEm5ipn8ULJHegquX0iIiJ6r9WsWRMHDx7ExIkT0aRJE5iZmcHPzw/fffedWGfx4sUYOXIkevbsKW6XeO/ePYUbQhITcyIiIiKVKMudOCtbYGAgAgMDxePo6Ohi63Xq1Annzp17azuGhobYsmWLeJydnY3g4GDeZf0NZdrHvE6dOnj8+HGR8vT0dNSpU6fcQRERERFR9XHx4kX8/PPPuHPnDi5cuIBBgwYBAHr16qXiyKSlTDPmKSkpKCgoKFKek5ODv//+u9xBEREREVH1snDhQiQlJUFLSwstWrTA77//jho1aqg6LEkpVWK+b98+8ecjR47A2NhYPC4oKEBUVBQcHBwqLDgiIiIiqvqaNWuG+Ph4VYcheaVKzHv37g0AkMlk8PHxUTinqakJBwcHLFq0qMKCIyIiIiJ6X5QqMZfL5QAAR0dHxMXF8c8PREREREQVpExrzJOTkys6DiIiIiKi91qZt0uMiopCVFQU0tLSxJn0Qj/++GO5AyMiIiIiep+UKTEPDg7GrFmz4O7uDhsbG961iYiIiIionMqUmIeFhWHjxo348ssvKzoeIiIiIqL3UpluMJSbm4u2bdtWdCxEREREJHGenp4KdwOlilOmGfNhw4Zh69atmD59ekXHQ0RERPReSHuxqVL7s9QdXKn9UemVKTF/+fIl1q1bh2PHjqFx48bQ1NRUOL948eIKCY6IiIiI6H1RpqUsly9fRtOmTaGmpoarV6/i4sWL4iMhIaGCQyQiIiIiVcjOzsbgwYNhYGAAGxubIjeSfPr0KQYPHgxTU1Po6emhW7duuHXrlkKd9evXw87ODnp6eujTpw8WL14MExOTShxF1VGmGfMTJ05UdBxEREREJDETJ07EyZMnsXfvXlhaWuLbb7/FhQsX0LRpUwCAr68vbt26hX379sHIyAiTJ09G9+7dcf36dWhqauLMmTMYOXIk5s2bh//85z84duwYl0K/Q5n3MaeKIVy+oNT2ZZ5cT0ZERESll5WVhQ0bNuCnn35C586dAQARERGoVasWAIgJ+ZkzZ8RNQbZs2QI7Ozvs2bMHn376KVasWIFu3bphwoQJAIB69erhjz/+wP79+1UzKIkrU2L+wQcfvHPv8uPHj5c5ICIiIiJSvTt37iA3NxetW7cWy8zMzODi4gIASExMhIaGhsJ5c3NzuLi4IDExEQCQlJSEPn36KLTbqlUrJuZvUabEvPDPF4Xy8vKQkJCAq1evwsfHpyLiIiIiIiJ6r5QpMV+yZEmx5UFBQcjKyipXQERERESkek5OTtDU1ERsbCxq164N4NWXPW/evIlOnTrB1dUV+fn5iI2NFZeyPH78GElJSXBzcwMAuLi4IC4uTqHdN4/p/5VpV5a3+eKLL/Djjz9WZJNEREREpAIGBgbw8/PDxIkTcfz4cVy9ehW+vr5QU3uVPjo7O6NXr14YPnw4Tp8+jUuXLuGLL75AzZo10atXLwDA6NGjcfDgQSxevBi3bt3C2rVrcejQoXcuiX6fVWhiHhMTAx0dnYpskoiIiIhUZMGCBejQoQM+/vhjeHl5oX379mjRooV4Pjw8HC1atEDPnj3h4eEBQRBw8OBB8R437dq1Q1hYGBYvXowmTZrg8OHDGDduHPPFtyjTUpa+ffsqHAuCgAcPHuD8+fPcAoeIiIioBKrCnTgNDAywefNmbN68WSybOHGi+LOpqSk2bXr3HUyHDx+O4cOHKxzXrVu34oOtBsqUmBsbGyscq6mpwcXFBbNmzUKXLl0qJDAiIiIiqvoWLlyIjz76CPr6+jh06BAiIiKwevVqVYclSWVKzMPDwys6DiIiIiKqhs6dO4f58+fj2bNnqFOnDpYvX45hw4apOixJKtcNhuLj48V9Khs0aIBmzZpVSFBUtRya0lHVIRAREZFE7dixQ9UhVBllSszT0tIwYMAAREdHw8TEBACQnp6ODz74ANu2bYOFhUVFxkhEREREVO2VaVeW0aNH49mzZ7h27RqePHmCJ0+e4OrVq8jMzMSYMWMqOkYiIiIiomqvTDPmhw8fxrFjx+Dq6iqWubm5YdWqVfzyJxERERFRGZRpxlwul4v7U75OU1MTcrm83EEREREREb1vypSYf/jhhxg7dizu378vlv39998YN24cOnfuXGHBERERERG9L8qUmK9cuRKZmZlwcHCAk5MTnJyc4OjoiMzMTKxYsaKiYyQiIiIiqvbKtMbczs4OFy5cwLFjx3Djxg0AgKurK7y8vCo0OCIiIiKSFk9PTzRt2hRLly5VdSjVTqkS8+PHjyMgIABnz56FkZERPvroI3z00UcAgIyMDDRo0ABhYWHo0KGDUoIlIiIiqi6EJ+++lX1Fk5kNrtT+qPRKtZRl6dKlGD58OIyMjIqcMzY2xldffYXFixdXWHBERERE9H7Jzc1VdQgqU6rE/NKlS+jatetbz3fp0gXx8fHlDoqIiIiIVC87OxuDBw+GgYEBbGxssGjRIoXzOTk5mDBhAmrWrAl9fX20bt0a0dHRCnVOnz6NDh06QFdXF3Z2dhgzZgyys7PF8w4ODpg9ezYGDx4MIyMjjBgxojKGJkmlSsxTU1OL3SaxkIaGBh49elTuoIiIiIhI9SZOnIiTJ09i7969OHr0KKKjo3HhwgXxfEBAAGJiYrBt2zZcvnwZn376Kbp27Ypbt24BAO7cuYOuXbuiX79+uHz5MrZv347Tp08jICBAoZ+FCxeiSZMmuHjxIqZPn16pY5SSUq0xr1mzJq5evYq6desWe/7y5cuwsbGpkMCIiIiISHWysrKwYcMG/PTTT+J22BEREahVqxYA4O7duwgPD8fdu3dha2sLAJgwYQIOHz6M8PBwzJ07F6GhoRg0aBACAwMBAM7Ozli+fDk6deqENWvWQEdHB8Crrbi/+eabyh+kxJQqMe/evTumT5+Orl27ik9koRcvXmDmzJno2bNnhQZIRERERJXvzp07yM3NRevWrcUyMzMzuLi4AACuXLmCgoIC1KtXT+G6nJwcmJubA3i1DPry5cvYsmWLeF4QBMjlciQnJ4t3kXd3d1f2cKqEUiXm3333HXbt2oV69eohICBAfGFu3LiBVatWoaCgANOmTVNKoERERO8zR/eaqg6BSEFWVhbU1dURHx8PdXV1hXMGBgZina+++gpjxowpcn3t2rXFn/X19ZUbbBVRqsTcysoKf/zxB0aNGoWpU6dCEAQAgEwmg7e3N1atWgUrKyulBEpERERElcfJyQmampqIjY0Vk+inT5/i5s2b6NSpE5o1a4aCggKkpaW9davs5s2b4/r1629dBk2KSn2DIXt7exw8eBBPnz7F7du3IQgCnJ2dYWpqqoz4iIiIiEgFDAwM4Ofnh4kTJ8Lc3ByWlpaYNm0a1NRe7R1Sr149DBo0CIMHD8aiRYvQrFkzPHr0CFFRUWjcuDF69OiByZMno02bNggICMCwYcOgr6+P69evIzIyEitXrlTxCKWnTHf+BABTU1O0bNmyImMhIiIiem9UhRv+LFiwAFlZWfj4449haGiIb775BhkZGeL58PBwzJkzB9988w3+/vtv1KhRA23atBG/c9i4cWOcPHkS06ZNQ4cOHSAIApycnNC/f39VDUnSypyYExEREVH1ZmBggM2bN2Pz5s1i2cSJE8WfNTU1ERwcjODg4Le20bJlSxw9evSt51NSUiok1uqgVPuYExERERGRcjAxJyIiIiKSACbmREREREQSwDXmRNVI33pmqg6BiIiIyogz5kREREREEsDEnIiIiIhIApiYExERERFJANeYE5HkyEa1UXofwpqzSu+DiIioNDhjTkREREQkAUzMiYiIiIgkgEtZiIiIiFRAHh1Yqf2peS6t1P6CgoKwZ88eJCQkVGq/VVmVmjH//vvvIZPJEBgYKJa9fPkS/v7+MDc3h4GBAfr164fU1FSF6+7evYsePXpAT08PlpaWmDhxIvLz8xXqREdHo3nz5tDW1kbdunWxcePGShgREREREdErVSYxj4uLw9q1a9G4cWOF8nHjxuG3337DL7/8gpMnT+L+/fvo27eveL6goAA9evRAbm4u/vjjD0RERGDjxo2YMWOGWCc5ORk9evTABx98gISEBAQGBmLYsGE4cuRIpY2PiIiISGrkcjnmz5+PunXrQltbG7Vr10ZISAgAYPLkyahXrx709PRQp04dTJ8+HXl5eQCAjRs3Ijg4GJcuXYJMJoNMJuOkZwlUiaUsWVlZGDRoENavX485c+aI5RkZGdiwYQO2bt2KDz/8EAAQHh4OV1dXnD17Fm3atMHRo0dx/fp1HDt2DFZWVmjatClmz56NyZMnIygoCFpaWggLC4OjoyMWLVoEAHB1dcXp06exZMkSeHt7q2TMRERERKo2depUrF+/HkuWLEH79u3x4MED3LhxAwBgaGiIjRs3wtbWFleuXMHw4cNhaGiISZMmoX///rh69SoOHz6MY8eOAQCMjY1VOZQqoUok5v7+/ujRowe8vLwUEvP4+Hjk5eXBy8tLLKtfvz5q166NmJgYtGnTBjExMWjUqBGsrKzEOt7e3hg1ahSuXbuGZs2aISYmRqGNwjqvL5l5U05ODnJycsTjzMzMChgpEVUndTb0U2r7f/r9qtT2iej99uzZMyxbtgwrV66Ej48PAMDJyQnt27cHAHz33XdiXQcHB0yYMAHbtm3DpEmToKurCwMDA2hoaMDa2lol8VdFkk/Mt23bhgsXLiAuLq7IuYcPH0JLSwsmJiYK5VZWVnj48KFY5/WkvPB84bl31cnMzMSLFy+gq6tbpO/Q0FAEBweXeVxEREREUpaYmIicnBx07ty52PPbt2/H8uXLcefOHWRlZSE/Px9GRkaVHGX1Iuk15vfu3cPYsWOxZcsW6OjoqDocBVOnTkVGRob4uHfvnqpDIiIiIqowxU1MFoqJicGgQYPQvXt37N+/HxcvXsS0adOQm5tbiRFWP5JOzOPj45GWlobmzZtDQ0MDGhoaOHnyJJYvXw4NDQ1YWVkhNzcX6enpCtelpqaKfzaxtrYusktL4fG/1TEyMnrrm1JbWxtGRkYKDyIiIqLqwtnZGbq6uoiKiipy7o8//oC9vT2mTZsGd3d3ODs746+//lKoo6WlhYKCgsoKt1qQ9FKWzp0748qVKwplQ4YMQf369TF58mTY2dlBU1MTUVFR6Nfv1VrOpKQk3L17Fx4eHgAADw8PhISEIC0tDZaWlgCAyMhIGBkZwc3NTaxz8OBBhX4iIyPFNoiIiIjeNzo6Opg8eTImTZoELS0ttGvXDo8ePcK1a9fg7OyMu3fvYtu2bWjZsiUOHDiA3bt3K1zv4OCA5ORkJCQkoFatWjA0NIS2traKRlM1SDoxNzQ0RMOGDRXK9PX1YW5uLpb7+flh/PjxMDMzg5GREUaPHg0PDw+0adMGANClSxe4ubnhyy+/xPz58/Hw4UN899138Pf3F98cI0eOxMqVKzFp0iQMHToUx48fx44dO3DgwIHKHTARERG9Nyr7hj9lMX36dGhoaGDGjBm4f/8+bGxsMHLkSPj5+WHcuHEICAhATk4OevTogenTpyMoKEi8tl+/fti1axc++OADpKenIzw8HL6+viobS1Ug6cS8JJYsWQI1NTX069cPOTk58Pb2xurVq8Xz6urq2L9/P0aNGgUPDw/o6+vDx8cHs2bNEus4OjriwIEDGDduHJYtW4ZatWrhhx9+4FaJRERE9F5TU1PDtGnTMG3atCLn5s+fj/nz5yuUvb6jnba2Nnbu3KnsEKuVKpeYR0dHKxzr6Ohg1apVWLVq1Vuvsbe3L7JU5U2enp64ePFiRYRIRERERFRqkv7yJxERERHR+4KJORERERGRBFS5pSxEVP05utdUdQhERESVjjPmREREREQSwMSciIiIiEgCmJgTEREREUkAE3MiIiIiIgnglz+JSHL61jNTdQhERESVjok5EUmOV21TVYdARPTe8/T0RNOmTbF06dJizzs4OCAwMFDhbp8lERQUhD179iAhIaHcMVY3TMyJiIiIVODFtO6V2p9uyLvvgl5acXFx0NfXr9A233dMzImIqNrLibyp9D50PZXeBZGkWFhYvPN8Xl4eNDU1Kyma6oFf/iQiIiKiYuXn5yMgIADGxsaoUaMGpk+fDkEQALxayvL6MheZTIY1a9bgP//5D/T19RESEgIA+P7772FlZQVDQ0P4+fnh5cuXqhhKlcDEnIiIiIiKFRERAQ0NDZw7dw7Lli3D4sWL8cMPP7y1flBQEPr06YMrV65g6NCh2LFjB4KCgjB37lycP38eNjY2WL16dSWOoGrhUhYiIiXh7jJEVNXZ2dlhyZIlkMlkcHFxwZUrV7BkyRIMHz682Pqff/45hgwZIh4PGDAAfn5+8PPzAwDMmTMHx44d46z5W3DGnIiIiIiK1aZNG8hkMvHYw8MDt27dQkFBQbH13d3dFY4TExPRunVrhTIPD4+KD7Sa4Iw5EZGScNtHInrfcJeW8mFiTkSkJM0tG6o6BCKicomNjVU4Pnv2LJydnaGurl6i611dXREbG4vBgwcrtEHF41IWIiIiIirW3bt3MX78eCQlJeHnn3/GihUrMHbs2BJfP3bsWPz4448IDw/HzZs3MXPmTFy7dk2JEVdtnDEnIiIiUoGKvuGPMgwePBgvXrxAq1atoK6ujrFjx2LEiBElvr5///64c+cOJk2ahJcvX6Jfv34YNWoUjhw5osSoqy4m5kRERERURHR0tPjzmjVripxPSUlROC7c3/xN3377Lb799luFsnnz5pU7vuqIS1mIiIiIiCSAiTkRERERkQQwMSciIiIikgCuMSciyeE2g0RE9D7ijDkRERERkQQwMSciIiIikgAm5kREREREEsA15iqWE3lTqe3reiq1eSIiIiKqIJwxJyIiIiKSACbmRERERPRe2LhxI0xMTN5ZJygoCE2bNhWPfX190bt3b6XGVYhLWYiIiIhU4FbrBpXan3PstUrtD3iVCAcGBiI9Pb3S+y6rCRMmYPTo0Srpm4k5EREREdH/GBgYwMDAQCV9cykLERFVe/899pfSH0TV0eHDh9G+fXuYmJjA3NwcPXv2xJ07dwAA0dHRkMlkCrPhCQkJkMlkSElJQXR0NIYMGYKMjAzIZDLIZDIEBQUBAJ4+fYrBgwfD1NQUenp66NatG27duiW2U7jkZP/+/XBxcYGenh4++eQTPH/+HBEREXBwcICpqSnGjBmDgoIC8bp/a7fQnj174OzsDB0dHXh7e+PevXviuTeXsrxJLpcjNDQUjo6O0NXVRZMmTbBz584yPsOKOGNORKQkFi+U3IGuktsnUgLZqDZK70NYc1bpfbwvsrOzMX78eDRu3BhZWVmYMWMG+vTpg4SEhH+9tm3btli6dClmzJiBpKQkABBnon19fXHr1i3s27cPRkZGmDx5Mrp3747r169DU1MTAPD8+XMsX74c27Ztw7Nnz9C3b1/06dMHJiYmOHjwIP7880/069cP7dq1Q//+/UvVbkhICDZt2gQtLS18/fXXGDBgAM6cOVOi5yQ0NBQ//fQTwsLC4OzsjFOnTuGLL76AhYUFOnXqVNqnWAETcyIiIqo0h6Z0VHUIVAr9+vVTOP7xxx9hYWGB69ev/+u1WlpaMDY2hkwmg7W1tVhemDifOXMGbdu2BQBs2bIFdnZ22LNnDz799FMAQF5eHtasWQMnJycAwCeffILNmzcjNTUVBgYGcHNzwwcffIATJ06gf//+pWp35cqVaN26NQAgIiICrq6uOHfuHFq1avXOMeXk5GDu3Lk4duwYPDw8AAB16tTB6dOnsXbtWibmRERERKQct27dwowZMxAbG4t//vkHcrkcAHD37l3o6emVqc3ExERoaGiIiTEAmJubw8XFBYmJiWKZnp6emJQDgJWVFRwcHBTWf1tZWSEtLa1U7WpoaKBly5bicf369WFiYoLExMR/Tcxv376N58+f46OPPlIoz83NRbNmzUr6FLwVE3MiIiIiKtbHH38Me3t7rF+/Hra2tpDL5WjYsCFyc3PFBFkQBLF+Xl5ehfVduPSkkEwmK7as8MNCZcjKygIAHDhwADVr1lQ4p62tXe72mZgTERFRpWlu2VDVIVAJPX78GElJSVi/fj06dOgAADh9+rR43sLCAgDw4MEDmJqaAkCRtedaWloKX84EAFdXV+Tn5yM2NlZcclLYl5ubW5njLWm7+fn5OH/+vDg7npSUhPT0dLi6uv5rH25ubtDW1sbdu3fLvWylOEzMiYiIqNLUiL2g/E48Byu/j/eAqakpzM3NsW7dOtjY2ODu3buYMmWKeL5u3bqws7NDUFAQQkJCcPPmTSxatEihDQcHB2RlZSEqKgpNmjSBnp4enJ2d0atXLwwfPhxr166FoaEhpkyZgpo1a6JXr15ljrek7WpqamL06NFYvnw5NDQ0EBAQgDZt2vzrMhYAMDQ0xIQJEzBu3DjI5XK0b98eGRkZOHPmDIyMjODj41Pm+AEm5kREREQqoYob/pSGmpoatm3bhjFjxqBhw4ZwcXHB8uXL4enpCeBVgvvzzz9j1KhRaNy4MVq2bIk5c+aIX7IEXu3MMnLkSPTv3x+PHz/GzJkzERQUhPDwcIwdOxY9e/ZEbm4uOnbsiIMHDxZZqlJaJWlXT08PkydPxueff46///4bHTp0wIYNG0rcx+zZs2FhYYHQ0FD8+eefMDExQfPmzfHtt9+WK3YAkAmvLwyiMsvMzISxsTEyMjJgZGRU4uteTOuuxKgA3ZCDSm0fAA7/NUnpfXS1n6/0PqqDCb8PV3ofCzusV3ofaS82Kb0PS13lz6gJT5Q7DpkZZwVLqjLusKjsJKvOhn7/Xqmc/vT7Vel9KPv3HlC6331l/f1dGi9fvkRycjIcHR2ho6OjlD5Iukrz+vMGQ0REREREEsDEnIiIiIhIApiYExERERFJABNzIiIiIiIJYGJORERERCQBTMyJiIiIiCSAiTkRERERkQQwMSciIiIikgAm5kRERERUaikpKZDJZEhISCh3W76+vujdu3e526nqNFQdABERERFVPXZ2dnjw4AFq1Kih6lCqDSbmRERERCqwVeZSqf19LiRVaHvq6uqwtrZ+63lBEFBQUAANDaabJcWlLERERERUrMOHD6N9+/YwMTGBubk5evbsiTt37gAoupQlOjoaMpkMhw4dQosWLaCtrY3Tp08jKCgITZs2xdq1a2FnZwc9PT189tlnyMjIKFO/r/e9a9cufPDBB9DT00OTJk0QExOj0M7p06fRoUMH6Orqws7ODmPGjEF2dnbFP1EVhB9hiKqRSe4dVB0CERFVI9nZ2Rg/fjwaN26MrKwszJgxA3369HnnuvIpU6Zg4cKFqFOnDkxNTREdHY3bt29jx44d+O2335CZmQk/Pz98/fXX2LJlS6n7VVP7/3nladOmYeHChXB2dsa0adMwcOBA3L59GxoaGrhz5w66du2KOXPm4Mcff8SjR48QEBCAgIAAhIeHV/RTVSGYmBMRERFRsfr166dw/OOPP8LCwgLXr1+HgYFBsdfMmjULH330kULZy5cvsWnTJtSsWRMAsGLFCvTo0QOLFi0qdjnMu/pt2LChWD5hwgT06NEDABAcHIwGDRrg9u3bqF+/PkJDQzFo0CAEBgYCAJydnbF8+XJ06tQJa9asgY6OTumejErAxJyIiKgK6FvPTNUh0Hvo1q1bmDFjBmJjY/HPP/9ALpcDAO7evQs3N7dir3F3dy9SVrt2bTEpBwAPDw/I5XIkJSUVm5i/q9/XE/PGjRuLP9vY2AAA0tLSUL9+fVy6dAmXL19WmJUXBAFyuRzJyclwdXUtzVNRKZiYExEREVGxPv74Y9jb22P9+vWwtbWFXC5Hw4YNkZub+9Zr9PX1K61fTU1N8WeZTAYAYhKflZWFr776CmPGjCnSfu3atcsdozIwMSciIiKiIh4/foykpCSsX78eHTq8+g7T6dOny9TW3bt3cf/+fdja2gIAzp49CzU1Nbi4FN2ZpqL6bd68Oa5fv466deuWKWZVYGJORERElea/x/5Seh/OIUrv4r1gamoKc3NzrFu3DjY2Nrh79y6mTJlSprZ0dHTg4+ODhQsXIjMzE2PGjMFnn31W7DKWiup38uTJaNOmDQICAjBs2DDo6+vj+vXriIyMxMqVK8s0DmVjYk5UjVi8qIROdCuhDyIiUjk1NTVs27YNY8aMQcOGDeHi4oLly5fD09Oz1G3VrVsXffv2Rffu3fHkyRP07NkTq1evVmq/jRs3xsmTJzFt2jR06NABgiDAyckJ/fv3L3X8lYWJOREREZEKVPQNf5TBy8sL169fVygTBKHYnz09PRWO3zRq1CiMGjWq2HMbN24sVb8ODg5F+jIxMSlS1rJlSxw9evStMUkNbzBERERERCQBkk7MQ0ND0bJlSxgaGsLS0hK9e/dGUpLip8uXL1/C398f5ubmMDAwQL9+/ZCamqpQ5+7du+jRowf09PRgaWmJiRMnIj8/X6FOdHQ0mjdvDm1tbdStW7fIJzciIiIiImWS9FKWkydPwt/fHy1btkR+fj6+/fZbdOnSBdevXxe34hk3bhwOHDiAX375BcbGxggICEDfvn1x5swZAEBBQQF69OgBa2tr/PHHH3jw4AEGDx4MTU1NzJ07FwCQnJyMHj16YOTIkdiyZQuioqIwbNgw2NjYwNvbW2XjJyot4fIFpfch8xys9D6IiKj6CAoKQlBQkKrDqBIknZgfPnxY4Xjjxo2wtLREfHw8OnbsiIyMDGzYsAFbt27Fhx9+CAAIDw+Hq6srzp49izZt2uDo0aO4fv06jh07BisrKzRt2hSzZ8/G5MmTERQUBC0tLYSFhcHR0RGLFi0CALi6uuL06dNYsmQJE3MiIiIiqhSSTszflJGRAQAwM3t197P4+Hjk5eXBy8tLrFO/fn3Url0bMTExaNOmDWJiYtCoUSNYWVmJdby9vTFq1Chcu3YNzZo1Q0xMjEIbhXUKb+FanJycHOTk5IjHmZmZFTFEonLJibyp9D50PZXeBRER0XtJ0mvMXyeXyxEYGIh27dqJt2J9+PAhtLS0YGJiolDXysoKDx8+FOu8npQXni889646mZmZePGi+P3nQkNDYWxsLD7s7OzKPUYiIiKqvt61YwlVX6V53atMYu7v74+rV69i27Ztqg4FADB16lRkZGSIj3v37qk6JCIiIpIgdXV1AHjnbeyp+nr+/DkAQFNT81/rVomlLAEBAdi/fz9OnTqFWrVqieXW1tbIzc1Fenq6wqx5amqqeCcpa2trnDt3TqG9wl1bXq/z5k4uqampMDIygq5u8XdT0dbWhra2drnHRkRERNWbhoYG9PT08OjRI2hqakJNrcrMi1I5CIKA58+fIy0tDSYmJuIHtHeRdGIuCAJGjx6N3bt3Izo6Go6OjgrnW7RoAU1NTURFRaFfv34AgKSkJNy9exceHh4AAA8PD4SEhCAtLQ2WlpYAgMjISBgZGcHNzU2sc/DgQYW2IyMjxTaIiN5XdTb0U3off/r9qvQ+iFRJJpPBxsYGycnJ+Ouvv1QdDlUyExMTcTL430g6Mff398fWrVuxd+9eGBoaimvCjY2NoaurC2NjY/j5+WH8+PEwMzODkZERRo8eDQ8PD7Rp0wYA0KVLF7i5ueHLL7/E/Pnz8fDhQ3z33Xfw9/cXZ7xHjhyJlStXYtKkSRg6dCiOHz+OHTt24MCBAyobOxEREVUfWlpacHZ25nKW94ympmaJZsoLSToxX7NmDYBXt3h9XXh4OHx9fQEAS5YsgZqaGvr164ecnBx4e3tj9erVYl11dXXs378fo0aNgoeHB/T19eHj44NZs2aJdRwdHXHgwAGMGzcOy5YtQ61atfDDDz9wq0Qieu/1rWem6hCIqg01NTXo6OioOgySMEkn5iX5FquOjg5WrVqFVatWvbWOvb19kaUqb/L09MTFixdLHSMRERERUUXgtw+IiIiIiCSAiTkRERERkQQwMSciIiIikgAm5kREREREEsDEnIiIiIhIApiYExERERFJABNzIiIiIiIJkPQ+5u+D/x5T7q15nUOU2jwRERERVRDOmBMRERERSQATcyIiIiIiCeBSFiq3TuuuKr8TLskhIiKiao4z5kREREREEsAZcyIieiuv2qaqDoGI6L3BGXMiIiIiIgngjDmV2+65d5Tex+dcY05ERETVHBNzIiJ6q+aWDVUdAhHRe4NLWYiIiIiIJICJORERERGRBDAxJyIiIiKSAK4xJyKit7J4UQmd6FZCH0REVQBnzImIiIiIJICJORERERGRBDAxJyIiIiKSACbmREREREQSwC9/EhERUaWJO5ev9D6cld4DkXJwxpyIiIiISAI4Y070P3U29FN6H3/6/ar0PoiIiKhq4ow5EREREZEEMDEnIiIiIpIALmUh+p++9cxUHQIRERG9x5iYE/2PV21TVYdARERE7zEuZSEiIiIikgDOmBNVI/899pfS+3AOUXoXJCHC5QtK70PmOVjpfRARVQWcMSciIiIikgAm5kREREREEsDEnIiIiIhIArjGXMXizuUrtX1npbZORERERBWFiTkRSY7Fi0roRLcS+iCqQJPcO6g6BCJSMibmRNWIsv8CA1TOX2FeLtqm9D50Q7gTCBERSQvXmBMRERERSQBnzImIiKqAGrHK31Me3FOeSKU4Y05EREREJAFMzImIiIiIJIBLWYiIiKqAnMibSu9D11PpXRDRO3DGnIiIiIhIApiYExERERFJABNzIiIiIiIJYGJORERERCQB/PIn0f80t2yo6hCIiIjoPcbEnOh/LF5UQie6ldAHERERVUlMzImI6K22fXBI6X18LixVeh9ERFUB15gTEREREUkAE3MiIiIiIgngUhYiIiURLl9Qavsyz8FKbZ+IiCoXE3MiIiVR9i3Ueft0IqLqhYk5ERFRFfDfY38pvQ/nEKV3QUTvwMSciIioCog7l6/0PpyV3gMRvQu//ElEREREJAFMzImIiIiIJICJORERERGRBHCNORGRkij7y3r8oh4RUfXCxPwNq1atwoIFC/Dw4UM0adIEK1asQKtWrVQdFhERlQO/OElEVQGXsrxm+/btGD9+PGbOnIkLFy6gSZMm8Pb2RlpamqpDIyIiIqJqjjPmr1m8eDGGDx+OIUOGAADCwsJw4MAB/Pjjj5gyZYqKoyOiqkbZs7ScoSUiql6YmP9Pbm4u4uPjMXXqVLFMTU0NXl5eiImJUWFkRO8f3kiFiIjeR0zM/+eff/5BQUEBrKysFMqtrKxw48aNIvVzcnKQk5MjHmdkZAAAMjMzS9XvcxSUIdqSK208ZaHsMQCVM44XczcrvQ/dGb2V2n51eS2yCqrHOPjvu2Q4jpKpDmMApDeOwrqCICgrHKISY2JeRqGhoQgODi5Sbmdnp4Jo3m64sbGqQ6gQ1WUcWFT1x1FtXotqMI7q8lpwHNJRHcYAlG0cz549g3E1GT9VXUzM/6dGjRpQV1dHamqqQnlqaiqsra2L1J86dSrGjx8vHsvlcjx58gTm5uaQyWRKiTEzMxN2dna4d+8ejIyMlNJHZagO46gOYwCqxziqwxgAjkNKqsMYgOoxjsoYgyAIePbsGWxtbZXSPlFpMDH/Hy0tLbRo0QJRUVHo3bs3gFfJdlRUFAICAorU19bWhra2tkKZiYlJJUQKGBkZVdn/ZF9XHcZRHcYAVI9xVIcxAByHlFSHMQDVYxzKHgNnykkqmJi/Zvz48fDx8YG7uztatWqFpUuXIjs7W9ylhYiIiIhIWZiYv6Z///549OgRZsyYgYcPH6Jp06Y4fPhwkS+EEhERERFVNCbmbwgICCh26YoUaGtrY+bMmUWW0FQ11WEc1WEMQPUYR3UYA8BxSEl1GANQPcZRHcZAVBoygfsDERERERGpnJqqAyAiIiIiIibmRERERESSwMSciIiIiEgCmJgTEREREUkAE/MqYtWqVXBwcICOjg5at26Nc+fOqTqkUjt16hQ+/vhj2NraQiaTYc+ePaoOqdRCQ0PRsmVLGBoawtLSEr1790ZSUpKqwyqVNWvWoHHjxuINOzw8PHDo0CFVh1Vu33//PWQyGQIDA1UdSqkEBQVBJpMpPOrXr6/qsErt77//xhdffAFzc3Po6uqiUaNGOH/+vKrDKhUHB4cir4VMJoO/v7+qQyuxgoICTJ8+HY6OjtDV1YWTkxNmz56NqrjPw7NnzxAYGAh7e3vo6uqibdu2iIuLU3VYRErFxLwK2L59O8aPH4+ZM2fiwoULaNKkCby9vZGWlqbq0EolOzsbTZo0wapVq1QdSpmdPHkS/v7+OHv2LCIjI5GXl4cuXbogOztb1aGVWK1atfD9998jPj4e58+fx4cffohevXrh2rVrqg6tzOLi4rB27Vo0btxY1aGUSYMGDfDgwQPxcfr0aVWHVCpPnz5Fu3btoKmpiUOHDuH69etYtGgRTE1NVR1aqcTFxSm8DpGRkQCATz/9VMWRldy8efOwZs0arFy5EomJiZg3bx7mz5+PFStWqDq0Uhs2bBgiIyOxefNmXLlyBV26dIGXlxf+/vtvVYdGpDwCSV6rVq0Ef39/8bigoECwtbUVQkNDVRhV+QAQdu/ereowyi0tLU0AIJw8eVLVoZSLqamp8MMPP6g6jDJ59uyZ4OzsLERGRgqdOnUSxo4dq+qQSmXmzJlCkyZNVB1GuUyePFlo3769qsOocGPHjhWcnJwEuVyu6lBKrEePHsLQoUMVyvr27SsMGjRIRRGVzfPnzwV1dXVh//79CuXNmzcXpk2bpqKoiJSPM+YSl5ubi/j4eHh5eYllampq8PLyQkxMjAojIwDIyMgAAJiZmak4krIpKCjAtm3bkJ2dDQ8PD1WHUyb+/v7o0aOHwr+RqubWrVuwtbVFnTp1MGjQINy9e1fVIZXKvn374O7ujk8//RSWlpZo1qwZ1q9fr+qwyiU3Nxc//fQThg4dCplMpupwSqxt27aIiorCzZs3AQCXLl3C6dOn0a1bNxVHVjr5+fkoKCiAjo6OQrmurm6V+4sSUWnwzp8S988//6CgoABWVlYK5VZWVrhx44aKoiIAkMvlCAwMRLt27dCwYUNVh1MqV65cgYeHB16+fAkDAwPs3r0bbm5uqg6r1LZt24YLFy5U6XWnrVu3xsaNG+Hi4oIHDx4gODgYHTp0wNWrV2FoaKjq8Erkzz//xJo1azB+/Hh8++23iIuLw5gxY6ClpQUfHx9Vh1cme/bsQXp6Onx9fVUdSqlMmTIFmZmZqF+/PtTV1VFQUICQkBAMGjRI1aGViqGhITw8PDB79my4urrCysoKP//8M2JiYlC3bl1Vh0ekNEzMicrI398fV69erZKzNy4uLkhISEBGRgZ27twJHx8fnDx5skol5/fu3cPYsWMRGRlZZFatKnl9JrNx48Zo3bo17O3tsWPHDvj5+akwspKTy+Vwd3fH3LlzAQDNmjXD1atXERYWVmUT8w0bNqBbt26wtbVVdSilsmPHDmzZsgVbt25FgwYNkJCQgMDAQNja2la512Lz5s0YOnQoatasCXV1dTRv3hwDBw5EfHy8qkMjUhom5hJXo0YNqKurIzU1VaE8NTUV1tbWKoqKAgICsH//fpw6dQq1atVSdTilpqWlJc46tWjRAnFxcVi2bBnWrl2r4shKLj4+HmlpaWjevLlYVlBQgFOnTmHlypXIycmBurq6CiMsGxMTE9SrVw+3b99WdSglZmNjU+RDnaurK3799VcVRVQ+f/31F44dO4Zdu3apOpRSmzhxIqZMmYIBAwYAABo1aoS//voLoaGhVS4xd3JywsmTJ5GdnY3MzEzY2Nigf//+qFOnjqpDI1IarjGXOC0tLbRo0QJRUVFimVwuR1RUVJVdE1yVCYKAgIAA7N69G8ePH4ejo6OqQ6oQcrkcOTk5qg6jVDp37owrV64gISFBfLi7u2PQoEFISEiokkk5AGRlZeHOnTuwsbFRdSgl1q5duyLbht68eRP29vYqiqh8wsPDYWlpiR49eqg6lFJ7/vw51NQUf7Wrq6tDLperKKLy09fXh42NDZ4+fYojR46gV69eqg6JSGk4Y14FjB8/Hj4+PnB3d0erVq2wdOlSZGdnY8iQIaoOrVSysrIUZgGTk5ORkJAAMzMz1K5dW4WRlZy/vz+2bt2KvXv3wtDQEA8fPgQAGBsbQ1dXV8XRlczUqVPRrVs31K5dG8+ePcPWrVsRHR2NI0eOqDq0UjE0NCyytl9fXx/m5uZVas3/hAkT8PHHH8Pe3h7379/HzJkzoa6ujoEDB6o6tBIbN24c2rZti7lz5+Kzzz7DuXPnsG7dOqxbt07VoZWaXC5HeHg4fHx8oKFR9X5FfvzxxwgJCUHt2rXRoEEDXLx4EYsXL8bQoUNVHVqpHTlyBIIgwMXFBbdv38bEiRNRv379Kve7j6hUVL0tDJXMihUrhNq1awtaWlpCq1athLNnz6o6pFI7ceKEAKDIw8fHR9WhlVhx8QMQwsPDVR1aiQ0dOlSwt7cXtLS0BAsLC6Fz587C0aNHVR1WhaiK2yX2799fsLGxEbS0tISaNWsK/fv3F27fvq3qsErtt99+Exo2bChoa2sL9evXF9atW6fqkMrkyJEjAgAhKSlJ1aGUSWZmpjB27Fihdu3ago6OjlCnTh1h2rRpQk5OjqpDK7Xt27cLderUEbS0tARra2vB399fSE9PV3VYREolE4QqeDswIiIiIqJqhmvMiYiIiIgkgIk5EREREZEEMDEnIiIiIpIAJuZERERERBLAxJyIiIiISAKYmBMRERERSQATcyIiIiIiCWBiTkTvtZSUFMhkMiQkJLyznqenJwIDAyslJiIiej8xMSciyfH19YVMJoNMJoOWlhbq1q2LWbNmIT8/v9zt9u7dW6HMzs4ODx48QMOGDQEA0dHRkMlkSE9PV6i3a9cuzJ49u1z9/5s3PyQUHhc+DA0N0aBBA/j7++PWrVtKjYWIiCofE3MikqSuXbviwYMHuHXrFr755hsEBQVhwYIFZWqroKAAcrm82HPq6uqwtraGhobGO9swMzODoaFhmfovr2PHjuHBgwe4dOkS5s6di8TERDRp0gRRUVEqiYeIiJSDiTkRSZK2tjasra1hb2+PUaNGwcvLC/v27QMALF68GI0aNYK+vj7s7Ozw9ddfIysrS7x248aNMDExwb59++Dm5gZtbW0MHToUERER2Lt3rzgDHR0drTBLnZKSgg8++AAAYGpqCplMBl9fXwBFl7I8ffoUgwcPhqmpKfT09NCtWzeFWezCGI4cOQJXV1cYGBiIHzZKy9zcHNbW1qhTpw569eqFY8eOoXXr1vDz80NBQUEZnl0iIpIiJuZEVCXo6uoiNzcXAKCmpobly5fj2rVriIiIwPHjxzFp0iSF+s+fP8e8efPwww8/4Nq1a1i+fDk+++wzMTl+8OAB2rZtq3CNnZ0dfv31VwBAUlISHjx4gGXLlhUbj6+vL86fP499+/YhJiYGgiCge/fuyMvLU4hh4cKF2Lx5M06dOoW7d+9iwoQJ5X4u1NTUMHbsWPz111+Ij48vd3tERCQN7/7bLRGRigmCgKioKBw5cgSjR48GAIWZawcHB8yZMwcjR47E6tWrxfK8vDysXr0aTZo0Ect0dXWRk5MDa2vrYvtSV1eHmZkZAMDS0hImJibF1rt16xb27duHM2fOiMn9li1bYGdnhz179uDTTz8VYwgLC4OTkxMAICAgALNmzSrbE/GG+vXrA3i1Dr1Vq1YV0iYREakWE3MikqT9+/fDwMAAeXl5kMvl+PzzzxEUFATg1Zrr0NBQ3LhxA5mZmcjPz8fLly/x/Plz6OnpAQC0tLTQuHFjpcSWmJgIDQ0NtG7dWiwzNzeHi4sLEhMTxTI9PT0xKQcAGxsbpKWlVUgMgiAAAGQyWYW0R0REqselLEQkSR988AESEhJw69YtvHjxAhEREdDX10dKSgp69uyJxo0b49dff0V8fDxWrVoFAOJSF+DV7Liqk1ZNTU2FY5lMJibU5VX4AcDR0bFC2iMiItXjjDkRSZK+vj7q1q1bpDw+Ph5yuRyLFi2CmtqruYUdO3aUqE0tLa1//bKklpYWALyznqurK/Lz8xEbGysuZXn8+DGSkpLg5uZWoljKQy6XY/ny5XB0dESzZs2U3h8REVUOzpgTUZVSt25d5OXlYcWKFfjzzz+xefNmhIWFlehaBwcHXL58GUlJSfjnn38UvqhZyN7eHjKZDPv378ejR48Udnsp5OzsjF69emH48OE4ffo0Ll26hC+++AI1a9ZEr169yj3GNz1+/BgPHz7En3/+iX379sHLywvnzp3Dhg0boK6uXuH9ERGRajAxJ6IqpUmTJli8eDHmzZuHhg0bYsuWLQgNDS3RtcOHD4eLiwvc3d1hYWGBM2fOFKlTs2ZNBAcHY8qUKbCyskJAQECxbYWHh6NFixbo2bMnPDw8IAgCDh48WGT5SkXw8vKCjY0NGjVqhClTpsDV1RWXL18Wt3YkIqLqQSZU1IJHIiIiIiIqM86YExERERFJABNzIiIiIiIJYGJORERERCQBTMyJiIiIiCSAiTkRERERkQQwMSciIiIikgAm5kREREREEsDEnIiIiIhIApiYExERERFJABNzIiIiIiIJYGJORERERCQBTMyJiIiIiCTg/wD1w8OR9+ukgwAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "fig, ax, df = plot_label_distributions(\n", + " partitioner,\n", + " label_name=\"label\",\n", + " plot_type=\"bar\",\n", + " size_unit=\"absolute\",\n", + " partition_id_axis=\"x\",\n", + " legend=True,\n", + " verbose_labels=True,\n", + " title=\"Per Partition Labels Distribution\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "be05badab744d9f7", + "metadata": {}, + "source": [ + "You can configure many details directly using the function parameters. The ones that can interest you the most are:\n", + "\n", + "* `size_unit` to have the sizes normalized such that they sum up to 1 and express the fraction of the data in each partition,\n", + "* `legend` and `verbose_labels` in case the dataset has more descriptive names and not numbers,\n", + "* `cmap` to change the values of the bars (for an overview of the available colors, have a look at [link](https://matplotlib.org/stable/users/explain/colors/colormaps.html); check out `cmap=\"tab20b\"`) \n", + "\n", + " And for even greater control, you can specify `plot_kwargs` and `legend_kwargs` as `Dict`, which will be further passed to the `plot` and `legend` functions." + ] + }, + { + "cell_type": "markdown", + "id": "3dbf6dc4ede79f05", + "metadata": {}, + "source": [ + "You can also inspect the exact numbers that were used to create this plot. Three objects are returned (see reference [here](https://flower.ai/docs/datasets/ref-api/flwr_datasets.visualization.plot_label_distributions.html#flwr_datasets.visualization.plot_label_distributions)). Let's inspect the returned DataFrame." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f6edd14d8b260e9e", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
airplaneautomobilebirdcatdeerdogfroghorseshiptruck
Partition ID
08177941462212343225456384149
11416697530340903868
2041124543511158421
37621591100511201662198213512175
424371421924004251151477
5677917025255247727445900
6422244863809290380506
7122281159721741038172716825154
825629342751848151122401417
91136107350357126711223
\n", + "
" + ], + "text/plain": [ + " airplane automobile bird cat deer dog frog horse ship \\\n", + "Partition ID \n", + "0 817 794 1462 2123 432 25 456 384 14 \n", + "1 1416 6 97 5 3 0 3409 0 3 \n", + "2 0 4 11 2 454 3 511 15 84 \n", + "3 762 159 1100 51 120 166 2 1982 1351 \n", + "4 2 43 714 2 19 2400 425 1 151 \n", + "5 67 79 170 25 2552 477 27 44 590 \n", + "6 422 2 4 486 380 92 90 380 50 \n", + "7 122 2811 597 2174 1038 1727 1 682 515 \n", + "8 256 29 342 75 1 84 8 1511 2240 \n", + "9 1136 1073 503 57 1 26 71 1 2 \n", + "\n", + " truck \n", + "Partition ID \n", + "0 9 \n", + "1 868 \n", + "2 21 \n", + "3 2175 \n", + "4 477 \n", + "5 0 \n", + "6 6 \n", + "7 4 \n", + "8 1417 \n", + "9 23 " + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "df" + ] + }, + { + "cell_type": "markdown", + "id": "2902213a", + "metadata": {}, + "source": [ + "Each row represents a unique partition ID, and the columns represent unique labels (either in the verbose version if `verbose_labels=True` or typically `int` values otherwise, representing the partition IDs).\n", + "That you can index the DataFrame `df[partition_id, label_id]` to get the number of samples in `partition_id` for the specified `label_id.`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8ffe4039", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "714" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "df.loc[4, \"bird\"]" + ] + }, + { + "cell_type": "markdown", + "id": "2e6c17af529a668f", + "metadata": {}, + "source": [ + "Let's see a plot with `size_unit=\"percent\"`, which is another excellent way to understand the partitions. In this mode, the number of datapoints for each class in a given partition are normalized, so they sum up to 100." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a241894a47f3cc9f", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAtQAAAHHCAYAAACfh89YAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy80BEi2AAAACXBIWXMAAA9hAAAPYQGoP6dpAABgdklEQVR4nO3deVxU5f///+ew76KkLIqIior7brikJe/MpVxLzRJyKwOX3MtccE1Tc0stK7dssXJr00zT0gy3MEtzS9NPuWUqggkC5/eHP+fbCBowDAP4uN9uc7txrnPOdb0OTPnk4ppzTIZhGAIAAACQKw72LgAAAAAozAjUAAAAgBUI1AAAAIAVCNQAAACAFQjUAAAAgBUI1AAAAIAVCNQAAACAFQjUAAAAgBUI1AAAAIAVCNRAERcdHa1y5cpl69jx48fLZDLZtqB80KJFC1WvXj1P+yxXrpyio6PztM/sWrp0qUwmk06ePGnzsW5/v5w8eVImk0kzZsyw+dhS0XkPAri3EKhRpN0KIrdebm5uqlSpkmJjY3Xu3Dmbj38rHNx6eXh4qGrVqnr55ZeVmJiYZ+P8+eefGj9+vBISEv7z2GvXrmn8+PHaunVrno2fF0wmk2JjY+1dhs1t3brV4j3h6uoqf39/tWjRQlOmTNGFCxfyZJyC+nOWCnZtAJAbBGrcEyZMmKAVK1Zo/vz5aty4sRYuXKiIiAhdu3YtX8ZfuHChVqxYoVmzZqlKlSqaPHmyHnnkERmGkSf9//nnn4qLi8syUC9evFiHDx82b1+7dk1xcXFZhpmXX35Z//zzT57UhLsbOHCgVqxYoTfffFPDhw9XiRIlNG7cOIWHh2vLli0Wxz799NP6559/FBISku3+7/Zzvpvb3y+2wHsQQFHjZO8CgPzQunVr1a9fX5LUp08f+fn5adasWVq3bp26d+9uVd/Xrl2Th4fHXY/p0qWL7rvvPknSc889p86dO2v16tX64YcfFBERkeux09LSlJGRcddjnJ2ds92fk5OTnJz430J+aNasmbp06WLRtn//fj388MPq3LmzDh48qMDAQEmSo6OjHB0dbVpPcnKyPD09c/R+sQXegwAKI2aocU966KGHJEknTpwwt7377ruqV6+e3N3dVaJECXXr1k2nT5+2OO/W2ty9e/fqgQcekIeHh1566SWrxk9NTdXYsWNVr149FStWTJ6enmrWrJm++eYbi3P+vZZ19uzZqlChglxdXbVgwQI1aNBAkvTMM8+YlxIsXbpUkuWa2JMnT6pkyZKSpLi4OPOx48ePl5T1+tW0tDRNnDjRPF65cuX00ksvKSUlxeK4cuXKqV27dtq+fbsaNmwoNzc3lS9fXsuXL8/x9+dO1q1bp7Zt2yooKEiurq6qUKGCJk6cqPT09CyP37t3rxo3bix3d3eFhoZq0aJFmY5JSUnRuHHjVLFiRbm6uio4OFgjRozIdH23u3HjhuLi4hQWFiY3Nzf5+fmpadOm2rRpU66vr1atWpo9e7YuX76s+fPnm9uzWkO9Z88etWrVSvfdd5/5+nr16iXpv3/O0dHR8vLy0vHjx9WmTRt5e3urR48e5n13WnP/2muvKSQkRO7u7mrevLl+/vlni/0tWrRQixYtMp1XlN6DAJAVpgFwTzp+/Lgkyc/PT5I0efJkjRkzRk888YT69OmjCxcuaN68eXrggQf0448/ytfX13zuxYsX1bp1a3Xr1k1PPfWU/P39rRo/MTFRb731lrp3766+ffvq6tWrevvtt9WqVSvt2rVLtWvXtjh3yZIlun79uvr16ydXV1d17NhRV69e1dixY9WvXz81a9ZMktS4ceNM45YsWVILFy5U//791bFjR3Xq1EmSVLNmzTvW2qdPHy1btkxdunTR0KFDFR8fr6lTp+rQoUNas2aNxbHHjh1Tly5d1Lt3b0VFRemdd95RdHS06tWrp2rVquX4+3S7pUuXysvLS0OGDJGXl5e2bNmisWPHKjExUa+++qrFsZcuXVKbNm30xBNPqHv37lq1apX69+8vFxcXc/DMyMjQY489pu3bt6tfv34KDw/XgQMH9Nprr+nIkSNau3btHWsZP368pk6dqj59+qhhw4ZKTEzUnj17tG/fPv3vf//L9TXe+v599dVXmjx5cpbHnD9/Xg8//LBKliypUaNGydfXVydPntTq1aslZe/nnJaWplatWqlp06aaMWPGf/6VZfny5bp69apiYmJ0/fp1zZkzRw899JAOHDiQo/8GCvt7EACyZABF2JIlSwxJxtdff21cuHDBOH36tPHBBx8Yfn5+hru7u/F///d/xsmTJw1HR0dj8uTJFuceOHDAcHJysmhv3ry5IclYtGhRtsYfN26cIck4fPiwceHCBePEiRPGG2+8Ybi6uhr+/v5GcnKykZaWZqSkpFicd+nSJcPf39/o1auXue3EiROGJMPHx8c4f/68xfG7d+82JBlLlizJVENUVJQREhJi3r5w4YIhyRg3btwd670lISHBkGT06dPH4rhhw4YZkowtW7aY20JCQgxJxrfffmtuO3/+vOHq6moMHTr0rt8nwzAMSUZMTMxdj7l27Vqmtmeffdbw8PAwrl+/bm679XOaOXOmuS0lJcWoXbu2UapUKSM1NdUwDMNYsWKF4eDgYHz33XcWfS5atMiQZOzYscPi+qKioszbtWrVMtq2bfuf13W7b775xpBkfPTRR3c8platWkbx4sXN27fexydOnDAMwzDWrFljSDJ27959xz7u9nOOiooyJBmjRo3Kct+/3y+33ne3/nu5JT4+3pBkvPDCC+a25s2bG82bN//PPgvqexAAcoslH7gnREZGqmTJkgoODla3bt3k5eWlNWvWqHTp0lq9erUyMjL0xBNP6K+//jK/AgICFBYWlmnphaurq5555pkcjV+5cmWVLFlSoaGhevbZZ1WxYkV9/vnn8vDwkKOjo1xcXCTdnDH9+++/lZaWpvr162vfvn2Z+urcubP5T+a29sUXX0iShgwZYtE+dOhQSdLnn39u0V61alXzDLl0czaycuXK+u233/KkHnd3d/PXV69e1V9//aVmzZrp2rVr+vXXXy2OdXJy0rPPPmvednFx0bPPPqvz589r7969kqSPPvpI4eHhqlKlisXP/taSnNt/9v/m6+urX375RUePHs2Ta/s3Ly8vXb169a5jS9Jnn32mGzdu5Hqc/v37Z/vYDh06qHTp0ubthg0bqlGjRub3iK0UtPcgAGSFJR+4J7z++uuqVKmSnJyc5O/vr8qVK8vB4ebvk0ePHpVhGAoLC8vy3Ns/pFW6dGlzAM6uTz75RD4+PnJ2dlaZMmVUoUIFi/3Lli3TzJkz9euvv1oEpNDQ0Ex9ZdVmK7///rscHBxUsWJFi/aAgAD5+vrq999/t2gvW7Zspj6KFy+uS5cu5Uk9v/zyi15++WVt2bIl020Hr1y5YrEdFBQkT09Pi7ZKlSpJurmO9/7779fRo0d16NChO/6Ccv78+TvWMmHCBLVv316VKlVS9erV9cgjj+jpp5++69KF7EpKSpK3t/cd9zdv3lydO3dWXFycXnvtNbVo0UIdOnTQk08+KVdX12yN4eTkpDJlymS7pqz++6hUqZJWrVqV7T5yo6C9BwEgKwRq3BMaNmxovsvH7TIyMmQymfTll19meScFLy8vi+1/z5Jm1wMPPGC+y8ft3n33XUVHR6tDhw4aPny4SpUqJUdHR02dOtW81tra8a2V3Qdt3OlOFEYe3B7w8uXLat68uXx8fDRhwgRVqFBBbm5u2rdvn0aOHPmfdzvJSkZGhmrUqKFZs2ZluT84OPiO5z7wwAM6fvy41q1bp6+++kpvvfWWXnvtNS1atEh9+vTJcS233LhxQ0eOHLnrg2lMJpM+/vhj/fDDD/r000+1ceNG9erVSzNnztQPP/yQ6T2bFVdXV/MvlXnFZDJl+bO+04dGc9p3dtjyPQgAd0Kgxj2vQoUKMgxDoaGh5hnM/PTxxx+rfPnyWr16tUVoGDduXLb7yMmT5XJybEhIiDIyMnT06FGFh4eb28+dO6fLly/n6L7I1tq6dasuXryo1atX64EHHjC3//tOLf/2559/mm8Fd8uRI0ckyXzHiQoVKmj//v1q2bJlrp7OV6JECT3zzDN65plnlJSUpAceeEDjx4+3KlB//PHH+ueff9SqVav/PPb+++/X/fffr8mTJ+u9995Tjx499MEHH6hPnz55/rTBrJa2HDlyxOKOIMWLF89yacXts8iF9T0IAHfCGmrc8zp16iRHR0fFxcVlmsUyDEMXL1606fi3ZtT+PXZ8fLx27tyZ7T5uhcbLly//57G37uaQnWPbtGkjSZo9e7ZF+60Z3bZt22a7Rmtl9X1KTU3VggULsjw+LS1Nb7zxhsWxb7zxhkqWLKl69epJkp544gn98ccfWrx4cabz//nnHyUnJ9+xntvfF15eXqpYseJ/3m7vbvbv36/BgwerePHiiomJueNxly5dyvRevXU3mFvj5+TnnB1r167VH3/8Yd7etWuX4uPj1bp1a3NbhQoV9Ouvv1o87XH//v3asWOHRV+F9T0IAHfCDDXueRUqVNCkSZP04osv6uTJk+rQoYO8vb114sQJrVmzRv369dOwYcNsNn67du20evVqdezYUW3bttWJEye0aNEiVa1aVUlJSdm+Bl9fXy1atEje3t7y9PRUo0aNslxv7e7urqpVq+rDDz9UpUqVVKJECVWvXj3LJQa1atVSVFSU3nzzTfOSi127dmnZsmXq0KGDHnzwQauv/9/27NmjSZMmZWpv0aKFGjdurOLFiysqKkoDBw6UyWTSihUr7vin/KCgIE2bNk0nT55UpUqV9OGHHyohIUFvvvmmeV38008/rVWrVum5557TN998oyZNmig9PV2//vqrVq1apY0bN95xqVDVqlXVokUL1atXTyVKlNCePXv08ccfZ/vx6d99952uX7+u9PR0Xbx4UTt27ND69etVrFgxrVmzRgEBAXc8d9myZVqwYIE6duyoChUq6OrVq1q8eLF8fHzMATQnP+fsqFixopo2bar+/fsrJSVFs2fPlp+fn0aMGGE+plevXpo1a5ZatWql3r176/z581q0aJGqVatmsea9IL8HASBX7HR3ESBf3Lrd2N1uL3bLJ598YjRt2tTw9PQ0PD09jSpVqhgxMTHG4cOHzcc0b97cqFatWrbHv3ULsAsXLtzxmIyMDGPKlClGSEiI4erqatSpU8f47LPP7nj7sldffTXLftatW2dUrVrVcHJysriF3u39GIZhfP/990a9evUMFxcXi9uX3X7LMsMwjBs3bhhxcXFGaGio4ezsbAQHBxsvvviixW3qDOPmLcuyuo3cnW6ldjtJd3xNnDjRMAzD2LFjh3H//fcb7u7uRlBQkDFixAhj48aNhiTjm2++sRizWrVqxp49e4yIiAjDzc3NCAkJMebPn59p3NTUVGPatGlGtWrVDFdXV6N48eJGvXr1jLi4OOPKlSsW1/fv2+ZNmjTJaNiwoeHr62u4u7sbVapUMSZPnmy+Jd+d3Lpt3q2Xs7OzUbJkSeOBBx4wJk+enOmWiIaR+bZ5+/btM7p3726ULVvWcHV1NUqVKmW0a9fO2LNnj8V5d/o5R0VFGZ6enlnWd7f33cyZM43g4GDD1dXVaNasmbF///5M57/77rtG+fLlDRcXF6N27drGxo0bC817EAByy2QYfFIDAAAAyC3WUAMAAABWIFADAAAAViBQAwAAAFYgUAMAAABWIFADAAAAViBQAwAAAFbgwS6SMjIy9Oeff8rb2zvPH9cLAABswzAMXb16VUFBQXJwYI4Q9kOglvTnn38qODjY3mUAAIBcOH36tMqUKWPvMnAPI1BL8vb2lnTzP0gfHx87VwMAALIjMTFRwcHB5n/HAXshUEvmZR4+Pj4EagAAChmWa8LeWHAEAAAAWIFADQAAAFiBQA0AAABYgUANAAAAWIFADQAAAFiBQA0AAABYgUANAAAAWIFADQAAAFiBQA0AAABYgUANAAAAWMGugfrbb7/Vo48+qqCgIJlMJq1du9Ziv2EYGjt2rAIDA+Xu7q7IyEgdPXrU4pi///5bPXr0kI+Pj3x9fdW7d28lJSXl41UAAADgXmbXQJ2cnKxatWrp9ddfz3L/9OnTNXfuXC1atEjx8fHy9PRUq1atdP36dfMxPXr00C+//KJNmzbps88+07fffqt+/frl1yUAAADgHmcyDMOwdxGSZDKZtGbNGnXo0EHSzdnpoKAgDR06VMOGDZMkXblyRf7+/lq6dKm6deumQ4cOqWrVqtq9e7fq168vSdqwYYPatGmj//u//1NQUFC2xk5MTFSxYsV05coV+fj42OT6AABA3uLfbxQUBXYN9YkTJ3T27FlFRkaa24oVK6ZGjRpp586dkqSdO3fK19fXHKYlKTIyUg4ODoqPj8/3mgEAAHDvcbJ3AXdy9uxZSZK/v79Fu7+/v3nf2bNnVapUKYv9Tk5OKlGihPmYrKSkpCglJcW8nZiYmFdlAwAA4B5TYAO1LU2dOlVxcXFW9fFep6zXfeelJ1fH2HyME0sO2bT/0GfCbdq/ZPtrkLiO7CoK1yDx33d28bPIvqLw30ZRuAbAVgpsoA4ICJAknTt3ToGBgeb2c+fOqXbt2uZjzp8/b3FeWlqa/v77b/P5WXnxxRc1ZMgQ83ZiYqKCg4NzVN9D/3PL0fEAkN92frrFpv0TfgDgpgK7hjo0NFQBAQHavHmzuS0xMVHx8fGKiIiQJEVEROjy5cvau3ev+ZgtW7YoIyNDjRo1umPfrq6u8vHxsXgBAAAAuWHXGeqkpCQdO3bMvH3ixAklJCSoRIkSKlu2rAYPHqxJkyYpLCxMoaGhGjNmjIKCgsx3AgkPD9cjjzyivn37atGiRbpx44ZiY2PVrVu3bN/hAwAA/LcfPxpm8zFCn/nc5mMAtmDXQL1nzx49+OCD5u1byzCioqK0dOlSjRgxQsnJyerXr58uX76spk2basOGDXJz+3/LLVauXKnY2Fi1bNlSDg4O6ty5s+bOnZvv1wIAAIB7k10DdYsWLXS322CbTCZNmDBBEyZMuOMxJUqU0HvvvWeL8gAAAID/VGA/lAhkl60/eCXx4SsAAHBnBfZDiQAAAEBhQKAGAAAArECgBgAAAKzAGmoAKKJ4ABUA5A9mqAEAAAArMEMNAAD+U+MJbexdAlBgMUMNAAAAWIFADQAAAFiBJR8AAOA/7XM6ZPMxWFSCwooZagAAAMAKBGoAAADACgRqAAAAwAqsoQYAFFg8nAZAYcAMNQAAAGAFAjUAAABgBZZ85NL3n662+Rid+ve2+RgAAGTH6lX7bD5Gm9o2HwKwCWaoAQAAACsQqAEAAAArEKgBAAAAKxCoAQAAACsQqAEAAAArEKgBAAAAKxCoAQAAACsQqAEAAAArEKgBAAAAKxCoAQAAACvw6HEUeg/9z83eJQAAgHsYM9QAAACAFQjUAAAAgBUI1AAAAIAVCNQAAACAFfhQIgq97z9dbfMxOvXvbfMxAABA4USgBpBndn66xeZjhD4TbvMxAADICZZ8AAAAAFYgUAMAAABWIFADAAAAVmANNQCg4Gpw3d4VAMB/YoYaAAAAsAKBGgAAALACSz5yqfGENvYuAQAA5JP09HTduHHD3mUgHzk7O8vR0TFbxxKoAQAA7sAwDJ09e1aXL1+2dymwA19fXwUEBMhkMt31OAJ1Lq1c/rbNxxhaP8bmYwAAgDu7FaZLlSolDw+P/wxWKBoMw9C1a9d0/vx5SVJgYOBdjydQAwAAZCE9Pd0cpv38/OxdDvKZu7u7JOn8+fMqVarUXZd/8KFEAACALNxaM+3h4WHnSmAvt372/7V+nhlqALjNQ/9zs3cJAAoQlnncu7L7sydQ3+N2frrFpv2HPhNu0/5RsLhd/yIfRuGzBQCAgoVAfY+zfQAi/AAAUNAtXbpUgwcPtvpuJiaTSWvWrFGHDh3ypK7CgjXUAAAARUB0dPQ9F2QLCmaoAeA233+62uZjdOrf2+ZjAADyBzPUAAAARdysWbNUo0YNeXp6Kjg4WM8//7ySkpIyHbd27VqFhYXJzc1NrVq10unTpy32r1u3TnXr1pWbm5vKly+vuLg4paWlZTlmamqqYmNjFRgYKDc3N4WEhGjq1Kk2uT57Y4YaAG7TeEIbe5cAAHnKwcFBc+fOVWhoqH777Tc9//zzGjFihBYsWGA+5tq1a5o8ebKWL18uFxcXPf/88+rWrZt27NghSfruu+/Us2dPzZ07V82aNdPx48fVr18/SdK4ceMyjTl37lytX79eq1atUtmyZXX69OlMAb2oIFADAAAUcYMHDzZ/Xa5cOU2aNEnPPfecRaC+ceOG5s+fr0aNGkmSli1bpvDwcO3atUsNGzZUXFycRo0apaioKElS+fLlNXHiRI0YMSLLQH3q1CmFhYWpadOmMplMCgkJse1F2hFLPgAAAIq4r7/+Wi1btlTp0qXl7e2tp59+WhcvXtS1a9fMxzg5OalBgwbm7SpVqsjX11eHDh2SJO3fv18TJkyQl5eX+dW3b1+dOXPGop9boqOjlZCQoMqVK2vgwIH66quvbH+hdkKgBgAAKMJOnjypdu3aqWbNmvrkk0+0d+9evf7665JurnPOrqSkJMXFxSkhIcH8OnDggI4ePSo3t8wPxKpbt65OnDihiRMn6p9//tETTzyhLl265Nl1FSQs+QAAACjC9u7dq4yMDM2cOVMODjfnUletWpXpuLS0NO3Zs0cNGzaUJB0+fFiXL19WePjNh7TVrVtXhw8fVsWKFbM9to+Pj7p27aquXbuqS5cueuSRR/T333+rRIkSeXBlBQeBGgAAoIi4cuWKEhISLNruu+8+3bhxQ/PmzdOjjz6qHTt2aNGiRZnOdXZ21oABAzR37lw5OTkpNjZW999/vzlgjx07Vu3atVPZsmXVpUsXOTg4aP/+/fr55581adKkTP3NmjVLgYGBqlOnjhwcHPTRRx8pICBAvr6+trh0u2LJBwAAQBGxdetW1alTx+K1YsUKzZo1S9OmTVP16tW1cuXKLG9f5+HhoZEjR+rJJ59UkyZN5OXlpQ8//NC8v1WrVvrss8/01VdfqUGDBrr//vv12muv3fHDht7e3po+fbrq16+vBg0a6OTJk/riiy/Ms+RFCTPUAAAARcDSpUu1dOnSO+5/4YUXLLaffvpp89fR0dGKjo6WJHXq1OmOfbRq1UqtWrW6437DMMxf9+3bV3379v2PqouGovcrAgAAAJCPCnSgTk9P15gxYxQaGip3d3dVqFBBEydOtPjtxzAMjR07VoGBgXJ3d1dkZKSOHj1qx6oBAABwLynQgXratGlauHCh5s+fr0OHDmnatGmaPn265s2bZz5m+vTpmjt3rhYtWqT4+Hh5enqqVatWun79uh0rBwAAwL2iQK+h/v7779W+fXu1bdtW0s0n+7z//vvatWuXpJuz07Nnz9bLL7+s9u3bS5KWL18uf39/rV27Vt26dbNb7QAAALg3FOgZ6saNG2vz5s06cuSIpJtP6Nm+fbtat24tSTpx4oTOnj2ryMhI8znFihVTo0aNtHPnzjv2m5KSosTERIsXAAAAkBsFeoZ61KhRSkxMVJUqVeTo6Kj09HRNnjxZPXr0kCSdPXtWkuTv729xnr+/v3lfVqZOnaq4uDjbFQ4AAIB7RoEO1KtWrdLKlSv13nvvqVq1akpISNDgwYMVFBSkqKioXPf74osvasiQIebtxMREBQcH56iP8F6Ncz0+AAAAio4CHaiHDx+uUaNGmddC16hRQ7///rumTp2qqKgoBQQESJLOnTunwMBA83nnzp1T7dq179ivq6urXF1dbVo7AAAA7g0Feg31tWvXMj1Nx9HRURkZGZKk0NBQBQQEaPPmzeb9iYmJio+PV0RERL7WCgAAgHtTgQ7Ujz76qCZPnqzPP/9cJ0+e1Jo1azRr1ix17NhRkmQymTR48GBNmjRJ69ev14EDB9SzZ08FBQWpQ4cO9i0eAACgiDt58qRMJpMSEhLsXYpdFeglH/PmzdOYMWP0/PPP6/z58woKCtKzzz6rsWPHmo8ZMWKEkpOT1a9fP12+fFlNmzbVhg0b5ObmZsfKAQBAUXZiyaF8HS/0mfAcHd+iRQvVrl1bs2fPtk1BsFCgA7W3t7dmz5591zeDyWTShAkTNGHChPwrDAAAoBAzDEPp6elycirQUbDQKNBLPgAAAJAz0dHR2rZtm+bMmSOTySSTyaSlS5fKZDLpyy+/VL169eTq6qrt27crOjo60zLZwYMHq0WLFubtjIwMTZ8+XRUrVpSrq6vKli2ryZMnZzl2enq6evXqpSpVqujUqVM2vMqChV9LAAAAipA5c+boyJEjql69uvkv+L/88oukm8/4mDFjhsqXL6/ixYtnq78XX3xRixcv1muvvaamTZvqzJkz+vXXXzMdl5KSou7du+vkyZP67rvvVLJkyby7qAKOQA0AAFCEFCtWTC4uLvLw8DDfYvhWAJ4wYYL+97//Zbuvq1evas6cOZo/f775GSAVKlRQ06ZNLY5LSkpS27ZtlZKSom+++UbFihXLo6spHAjUAFBEff/papv236l/b5v2DyDv1a9fP0fHHzp0SCkpKWrZsuVdj+vevbvKlCmjLVu2yN3d3ZoSCyXWUAMAANwjPD09LbYdHBxkGIZF240bN8xfZzcct2nTRj/99JN27txpfZGFEIEaAACgiHFxcVF6evp/HleyZEmdOXPGou3f95QOCwuTu7u7xUP0stK/f3+98soreuyxx7Rt27Zc1VyYseQDAAAb2/npFpuPkdP7FOeUW/ICm/aPvFWuXDnFx8fr5MmT8vLyMj9l+nYPPfSQXn31VS1fvlwRERF699139fPPP6tOnTqSJDc3N40cOVIjRoyQi4uLmjRpogsXLuiXX35R796Wy74GDBig9PR0tWvXTl9++WWmddZFGTPUAAAARcywYcPk6OioqlWrqmTJkne8hV2rVq00ZswYjRgxQg0aNNDVq1fVs2dPi2PGjBmjoUOHauzYsQoPD1fXrl11/vz5LPsbPHiw4uLi1KZNG33//fd5fl0FFTPUAAAAOWTrvwhYq1KlSpnWM0dHR2d5bFxcnOLi4u7Yl4ODg0aPHq3Ro0dn2leuXLlMa7CHDBmiIUOG5LzoQowZagAAAMAKzFADyDONJ7SxdwkAAOQ7ZqgBAAAAKxCoAQAAACsQqAEAAAArEKgBAAAAKxCoAQAAACsQqAEAAAArEKgBAAAAKxCoAQAA7hHR0dHq0KHDXY8pV66cZs+enS/1FBU82AUAACCH3uv0er6O9+TqmHwba/fu3fL09My38YoCAjVQQOz8dIvNxwh9JtzmY6Dg4MmVAHKjZMmS9i6h0GHJBwAAQBHz8ccfq0aNGnJ3d5efn58iIyOVnJxs3j9jxgwFBgbKz89PMTExunHjhnnf7Us+TCaTFi5cqNatW8vd3V3ly5fXxx9/nJ+XU+AxQw0At1m5/G2bjzG0fv79+RbAveXMmTPq3r27pk+fro4dO+rq1av67rvvZBiGJOmbb75RYGCgvvnmGx07dkxdu3ZV7dq11bdv3zv2OWbMGL3yyiuaM2eOVqxYoW7duunAgQMKD+cvnxKBGgAAoEg5c+aM0tLS1KlTJ4WEhEiSatSoYd5fvHhxzZ8/X46OjqpSpYratm2rzZs33zVQP/744+rTp48kaeLEidq0aZPmzZunBQsW2PZiCgmWfAAAABQhtWrVUsuWLVWjRg09/vjjWrx4sS5dumTeX61aNTk6Opq3AwMDdf78+bv2GRERkWn70KFDeVt4IUagBgAAKEIcHR21adMmffnll6patarmzZunypUr68SJE5IkZ2dni+NNJpMyMjLsUWqRQaAGAAAoYkwmk5o0aaK4uDj9+OOPcnFx0Zo1a3Ld3w8//JBpm/XT/w9rqAEAAIqQ+Ph4bd68WQ8//LBKlSql+Ph4XbhwQeHh4frpp59y1edHH32k+vXrq2nTplq5cqV27dqlt9+2/Qe4CwsCNQAAQBHi4+Ojb7/9VrNnz1ZiYqJCQkI0c+ZMtW7dWh9++GGu+oyLi9MHH3yg559/XoGBgXr//fdVtWrVPK688CJQAwAKLG5hiIIqP59cmFPh4eHasGFDlvuWLl2aqe32x4yfPHky0zFBQUH66quv8qC6ook11AAAAIAVCNQAAACAFVjyAQAAgDu69YRF3Bkz1AAAAIAVCNQAAACAFQjUAAAAgBVYQw0ARZStbznH7eYA4CZmqAEAAAArEKgBAAAAKxCoAQAAipgWLVpo8ODB9i7jnsEaagAAgBw6u9C2n1G4XUD/3vk6HnKGGWoAAADcVWpqqr1LKNAI1AAAAEVQRkaGRowYoRIlSiggIEDjx4837zt16pTat28vLy8v+fj46IknntC5c+fM+8ePH6/atWvrrbfeUmhoqNzc3CRJH3/8sWrUqCF3d3f5+fkpMjJSycnJ5vPeeusthYeHy83NTVWqVNGCBQvy7XrtiSUfAAAARdCyZcs0ZMgQxcfHa+fOnYqOjlaTJk3UsmVLc5jetm2b0tLSFBMTo65du2rr1q3m848dO6ZPPvlEq1evlqOjo86cOaPu3btr+vTp6tixo65evarvvvvO/GjylStXauzYsZo/f77q1KmjH3/8UX379pWnp6eioqLs9F3IHwRqAACAIqhmzZoaN26cJCksLEzz58/X5s2bJUkHDhzQiRMnFBwcLElavny5qlWrpt27d6tBgwaSbi7zWL58uUqWLClJ2rdvn9LS0tSpUyeFhIRIkmrUqGEeb9y4cZo5c6Y6deokSQoNDdXBgwf1xhtvFPlAzZIPAACAIqhmzZoW24GBgTp//rwOHTqk4OBgc5iWpKpVq8rX11eHDh0yt4WEhJjDtCTVqlVLLVu2VI0aNfT4449r8eLFunTpkiQpOTlZx48fV+/eveXl5WV+TZo0ScePH7fxldofM9QAAABFkLOzs8W2yWRSRkZGts/39PS02HZ0dNSmTZv0/fff66uvvtK8efM0evRoxcfHy8PDQ5K0ePFiNWrUKNN5RR0z1AAAAPeQ8PBwnT59WqdPnza3HTx4UJcvX1bVqlXveq7JZFKTJk0UFxenH3/8US4uLlqzZo38/f0VFBSk3377TRUrVrR4hYaG2vqS7I4ZagAAgHtIZGSkatSooR49emj27NlKS0vT888/r+bNm6t+/fp3PC8+Pl6bN2/Www8/rFKlSik+Pl4XLlxQeHi4JCkuLk4DBw5UsWLF9MgjjyglJUV79uzRpUuXNGTIkPy6PLsgUAMAANxDTCaT1q1bpwEDBuiBBx6Qg4ODHnnkEc2bN++u5/n4+Ojbb7/V7NmzlZiYqJCQEM2cOVOtW7eWJPXp00ceHh569dVXNXz4cHl6eqpGjRr3xBMbCdQAAAA5VNCfXPjv29/dsnbtWvPXZcuW1bp16+54/vjx4y3uWy3dXCqyYcOGu4775JNP6sknn8xJqUUCa6gBAAAAKxCoAQAAACsQqAEAAAArEKgBAAAAKxCoAQAAACsQqAEAAAAr5DpQnzlzRl26dFHJkiVVokQJPfroo/rtt9/ysjYAAACgwMt1oO7Vq5eqV6+ubdu2acuWLfL3978n7zsIAACAe1u2A/WgQYOUnJxs3j527JhGjhypqlWrqnbt2ho0aJAOHz6c5wX+8ccfeuqpp+Tn5yd3d3fVqFFDe/bsMe83DENjx45VYGCg3N3dFRkZqaNHj+Z5HQAAAEBWsh2oy5Qpo3r16mn9+vWSpK5du6pRo0YaNWqUhg4dqscee0w9evTI0+IuXbqkJk2ayNnZWV9++aUOHjyomTNnqnjx4uZjpk+frrlz52rRokWKj4+Xp6enWrVqpevXr+dpLQAAAIWFYRjq16+fSpQoIZPJpISEBHuXVKRl+9Hjw4cPV5cuXfT8889r6dKlmjdvnho1aqStW7cqPT1d06dPV5cuXfK0uGnTpik4OFhLliwxt4WGhpq/NgxDs2fP1ssvv6z27dtLkpYvXy5/f3+tXbtW3bp1y9N6AAAAJGl1m7b5Ol6nLz7P0fEbNmzQ0qVLtXXrVpUvX1733XefjSqDlMM11KGhofryyy/VuXNnNW/eXCdPntSMGTM0e/ZsPf744zKZTHla3Pr161W/fn09/vjjKlWqlOrUqaPFixeb9584cUJnz55VZGSkua1YsWJq1KiRdu7cmae1AAAAFBbHjx9XYGCgGjdurICAADk5Wc6hpqam2qmyoinHH0q8ePGievTood27d+vHH39URESEfvrpJ1vUpt9++00LFy5UWFiYNm7cqP79+2vgwIFatmyZJOns2bOSJH9/f4vz/P39zfuykpKSosTERIsXAABAURAdHa0BAwbo1KlTMplMKleunFq0aKHY2FgNHjxY9913n1q1aiVJ2rZtmxo2bChXV1cFBgZq1KhRSktLM/d19epV9ejRQ56engoMDNRrr72mFi1aaPDgwXa6uoIp24F68+bN8vf3V8mSJVWmTBn9+uuveueddzR16lR1795dI0aM0D///JOnxWVkZKhu3bqaMmWK6tSpo379+qlv375atGiRVf1OnTpVxYoVM7+Cg4PzqGIAAAD7mjNnjiZMmKAyZcrozJkz2r17tyRp2bJlcnFx0Y4dO7Ro0SL98ccfatOmjRo0aKD9+/dr4cKFevvttzVp0iRzX0OGDNGOHTu0fv16bdq0Sd9995327dtnr0srsLIdqGNiYjRixAhdu3ZN8+fPN/9m8uCDD2rfvn1ydnZW7dq187S4wMBAVa1a1aItPDxcp06dkiQFBARIks6dO2dxzLlz58z7svLiiy/qypUr5tfp06fztG4AAAB7KVasmLy9veXo6KiAgACVLFlSkhQWFqbp06ercuXKqly5shYsWKDg4GDNnz9fVapUUYcOHRQXF6eZM2cqIyNDV69e1bJlyzRjxgy1bNlS1atX15IlS5Senm7nKyx4sh2oz5w5o7Zt28rNzU2PPPKILly4YN7n6uqqyZMna/Xq1XlaXJMmTTLdiu/IkSMKCQmRdHNNd0BAgDZv3mzen5iYqPj4eEVERNyxX1dXV/n4+Fi8AAAAirJ69epZbB86dEgREREWn4Fr0qSJkpKS9H//93/67bffdOPGDTVs2NC8v1ixYqpcuXK+1VxYZPsuH4899pi6dOmixx57TNu3b1ebNm0yHVOtWrU8Le6FF15Q48aNNWXKFD3xxBPatWuX3nzzTb355puSJJPJpMGDB2vSpEkKCwtTaGioxowZo6CgIHXo0CFPawEA5L/wXo3tXQJQZHh6etq7hCIr2zPUb7/9tp599llduXJFTz31lGbPnm3Dsm5q0KCB1qxZo/fff1/Vq1fXxIkTNXv2bIv7XY8YMUIDBgxQv3791KBBAyUlJWnDhg1yc3OzeX0AAACFVXh4uHbu3CnDMMxtO3bskLe3t8qUKaPy5cvL2dnZvAZbkq5cuaIjR47Yo9wCLdsz1C4uLhowYIAta8lSu3bt1K5duzvuN5lMmjBhgiZMmJCPVQEAABRuzz//vGbPnq0BAwYoNjZWhw8f1rhx4zRkyBA5ODjI29tbUVFRGj58uEqUKKFSpUpp3LhxcnBwyPNbJRd2Ob5tHgAAAAq/0qVL64svvtCuXbtUq1YtPffcc+rdu7defvll8zGzZs1SRESE2rVrp8jISDVp0kTh4eGsBLhNtmeoAQAAcFNOn1yY3wYPHmxxr+itW7dmeVzz5s21a9euO/bj7e2tlStXmreTk5MVFxenfv365VWpRQKBGgAAAFn68ccf9euvv6phw4a6cuWKeYlt+/bt7VxZwUKgBgAAwB3NmDFDhw8flouLi+rVq6fvvvtO9913n73LKlByHKjLly+v3bt3y8/Pz6L98uXLqlu3rn777bc8Kw4AAAD2U6dOHe3du9feZRR4Of5Q4smTJ7N8Qk5KSor++OOPPCkKAAAAKCyyPUO9fv1689cbN25UsWLFzNvp6enavHmzypUrl6fFAQAAAAVdtgP1rScPmkwmRUVFWexzdnZWuXLlNHPmzDwtDgAAACjosh2oMzIyJEmhoaHavXs3i9EBAAAA5eJDiSdOnLBFHQAAAEChlKvb5m3evFmbN2/W+fPnzTPXt7zzzjt5Uhhwr3nofzx1CgCAwijHd/mIi4vTww8/rM2bN+uvv/7SpUuXLF4AAAAoeFq0aGHx9ETknRzPUC9atEhLly7V008/bYt6AAAACryze17P1/EC6sfk63jImRzPUKempqpx48a2qAUAAAAodHIcqPv06aP33nvPFrUAAAAgDyQnJ6tnz57y8vJSYGBgplsbX7p0ST179lTx4sXl4eGh1q1b6+jRoxbHLF68WMHBwfLw8FDHjh01a9Ys+fr65uNVFB45XvJx/fp1vfnmm/r6669Vs2ZNOTs7W+yfNWtWnhUHAACAnBs+fLi2bdumdevWqVSpUnrppZe0b98+1a5dW5IUHR2to0ePav369fLx8dHIkSPVpk0bHTx4UM7OztqxY4eee+45TZs2TY899pi+/vprjRkzxr4XVYDlOFD/9NNP5h/Gzz//bLHPZDLlSVEAAADInaSkJL399tt699131bJlS0nSsmXLVKZMGUkyB+kdO3aYl/GuXLlSwcHBWrt2rR5//HHNmzdPrVu31rBhwyRJlSpV0vfff6/PPvvMPhdVwOU4UH/zzTe2qAMAAAB54Pjx40pNTVWjRo3MbSVKlFDlypUlSYcOHZKTk5PFfj8/P1WuXFmHDh2SJB0+fFgdO3a06Ldhw4YE6jvI8RrqW44dO6aNGzfqn3/+kSQZhpFnRQEAAACFRY4D9cWLF9WyZUtVqlRJbdq00ZkzZyRJvXv31tChQ/O8QAAAAGRfhQoV5OzsrPj4eHPbpUuXdOTIEUlSeHi40tLSLPZfvHhRhw8fVtWqVSVJlStX1u7duy36vX0b/0+OA/ULL7wgZ2dnnTp1Sh4eHub2rl27asOGDXlaHAAAAHLGy8tLvXv31vDhw7Vlyxb9/PPPio6OloPDzdgXFham9u3bq2/fvtq+fbv279+vp556SqVLl1b79u0lSQMGDNAXX3yhWbNm6ejRo3rjjTf05Zdf8nm5O8hxoP7qq680bdo088L2W8LCwvT777/nWWEAAADInVdffVXNmjXTo48+qsjISDVt2lT16tUz71+yZInq1aundu3aKSIiQoZh6IsvvjDfva1JkyZatGiRZs2apVq1amnDhg164YUX5ObmZq9LKtBy/KHE5ORki5npW/7++2+5urrmSVEAAAAFWUF/cqGXl5dWrFihFStWmNuGDx9u/rp48eJavnz5Xfvo27ev+vbta7FdsWLFvC+2CMjxDHWzZs0sfgAmk0kZGRmaPn26HnzwwTwtDgAAAPYxY8YM7d+/X8eOHdO8efO0bNkyRUVF2busAinHM9TTp09Xy5YttWfPHqWmpmrEiBH65Zdf9Pfff2vHjh22qBEAAAD5bNeuXZo+fbquXr2q8uXLa+7cuerTp4+9yyqQchyoq1evriNHjmj+/Pny9vZWUlKSOnXqpJiYGAUGBtqiRgAAAOSzVatW2buEQiPHgVqSihUrptGjR+d1LQAAAEChk+M11EuWLNFHH32Uqf2jjz7SsmXL8qQoAAAAoLDIcaCeOnWq7rvvvkztpUqV0pQpU/KkKAAAAKCwyHGgPnXqlEJDQzO1h4SE6NSpU3lSFAAAAFBY5DhQlypVSj/99FOm9v3798vPzy9PigIAAAAKixwH6u7du2vgwIH65ptvlJ6ervT0dG3ZskWDBg1St27dbFEjAAAAUGDl+C4fEydO1MmTJ9WyZUs5Od08PSMjQz179mQNNQAAQAHVokUL1a5dW7Nnz7Z3KUVOjgK1YRg6e/asli5dqkmTJikhIUHu7u6qUaOGQkJCbFUjAABAgTJzYN18HW/o3H35Oh5yJseBumLFivrll18UFhamsLAwW9UFAACAQiQ1NVUuLi72LsMucrSG2sHBQWFhYbp48aKt6gEAAICVkpOT1bNnT3l5eSkwMFAzZ8602J+SkqJhw4apdOnS8vT0VKNGjbR161aLY7Zv365mzZrJ3d1dwcHBGjhwoJKTk837y5Urp4kTJ6pnz57y8fFRv3798uPSCqQcr6F+5ZVXNHz4cC1cuFDVq1e3RU0AgDwQ3quxvUsAYCfDhw/Xtm3btG7dOpUqVUovvfSS9u3bp9q1a0uSYmNjdfDgQX3wwQcKCgrSmjVr9Mgjj+jAgQMKCwvT8ePH9cgjj2jSpEl65513dOHCBcXGxio2NlZLliwxjzNjxgyNHTtW48aNs9OVFgw5DtQ9e/bUtWvXVKtWLbm4uMjd3d1i/99//51nxQEAACBnkpKS9Pbbb+vdd99Vy5YtJUnLli1TmTJlJN18psiSJUt06tQpBQUFSZKGDRumDRs2aMmSJZoyZYqmTp2qHj16aPDgwZKksLAwzZ07V82bN9fChQvl5uYmSXrooYc0dOjQ/L/IAibHgZpPhgIAABRcx48fV2pqqho1amRuK1GihCpXrixJOnDggNLT01WpUiWL81JSUszPFNm/f79++uknrVy50rzfMAxlZGToxIkTCg8PlyTVr1/f1pdTKOQ4UEdFRdmijkJn9Srbf9q2TW2bDwEAAO4xSUlJcnR01N69e+Xo6Gixz8vLy3zMs88+q4EDB2Y6v2zZsuavPT09bVtsIZHjQC3d/M1nyZIlOn78uObMmaNSpUrpyy+/VNmyZVWtWrW8rhEAAADZVKFCBTk7Oys+Pt4cfi9duqQjR46oefPmqlOnjtLT03X+/Hk1a9Ysyz7q1q2rgwcPqmLFivlZeqGV4yclbtu2TTVq1FB8fLxWr16tpKQkSTf/NHCvL0gHAACwNy8vL/Xu3VvDhw/Xli1b9PPPPys6OloODjdjX6VKldSjRw/17NlTq1ev1okTJ7Rr1y5NnTpVn3/+uSRp5MiR+v777xUbG6uEhAQdPXpU69atU2xsrD0vrcDK8Qz1qFGjNGnSJA0ZMkTe3t7m9oceekjz58/P0+IAAAAKooL+oJVXX31VSUlJevTRR+Xt7a2hQ4fqypUr5v1LlizRpEmTNHToUP3xxx+67777dP/996tdu3aSpJo1a2rbtm0aPXq0mjVrJsMwVKFCBXXt2tVel1Sg5ThQHzhwQO+9916m9lKlSumvv/7Kk6IAAACQe15eXlqxYoVWrFhhbhs+fLj5a2dnZ8XFxSkuLu6OfTRo0EBfffXVHfefPHkyT2otCnK85MPX11dnzpzJ1P7jjz+qdOnSeVIUAAAAUFjkOFB369ZNI0eO1NmzZ2UymZSRkaEdO3Zo2LBh6tmzpy1qBAAAAAqsHAfqKVOmqEqVKgoODlZSUpKqVq2qBx54QI0bN9bLL79sixoBAACAAivHa6hdXFy0ePFijR07VgcOHFBSUpLq1KmjsLAwW9QHAAAAFGjZDtQZGRl69dVXtX79eqWmpqply5YaN25cpkePAwAAAPeSbC/5mDx5sl566SV5eXmpdOnSmjNnjmJiYmxZGwAAAFDgZTtQL1++XAsWLNDGjRu1du1affrpp1q5cqUyMjJsWR8AAABQoGU7UJ86dUpt2rQxb0dGRspkMunPP/+0SWEAAABAYZDtQJ2WliY3NzeLNmdnZ924cSPPiwIAAAAKi2x/KNEwDEVHR8vV1dXcdv36dT333HPy9PQ0t61evTpvKwQAAAAKsGwH6qioqExtTz31VJ4WA+RG4wlt/vsgAADy0BcJsfk6Xpva8/NtrPHjx2vt2rVKSEjItzELu2wH6iVLltiyDiDXVi5/2+ZjDK3PHW0AAEDWcvykRAAAABRsGRkZmj59uipWrChXV1eVLVtWkydPliSNHDlSlSpVkoeHh8qXL68xY8aYPxO3dOlSxcXFaf/+/TKZTDKZTFq6dKkdr6RwyPGTEgEAAFCwvfjii1q8eLFee+01NW3aVGfOnNGvv/4qSfL29tbSpUsVFBSkAwcOqG/fvvL29taIESPUtWtX/fzzz9qwYYO+/vprSVKxYsXseSmFAoEaAACgCLl69armzJmj+fPnmz8DV6FCBTVt2lSS9PLLL5uPLVeunIYNG6YPPvhAI0aMkLu7u7y8vOTk5KSAgAC71F8YEagBAACKkEOHDiklJUUtW7bMcv+HH36ouXPn6vjx40pKSlJaWpp8fHzyucqipVCtoX7llVdkMpk0ePBgc9v169cVExMjPz8/eXl5qXPnzjp37pz9igQAALAjd3f3O+7buXOnevTooTZt2uizzz7Tjz/+qNGjRys1NTUfKyx6Ck2g3r17t9544w3VrFnTov2FF17Qp59+qo8++kjbtm3Tn3/+qU6dOtmpSgAAAPsKCwuTu7u7Nm/enGnf999/r5CQEI0ePVr169dXWFiYfv/9d4tjXFxclJ6enl/lFgmFYslHUlKSevToocWLF2vSpEnm9itXrujtt9/We++9p4ceekjSzdv7hYeH64cfftD9999vr5IBAADsws3NTSNHjtSIESPk4uKiJk2a6MKFC/rll18UFhamU6dO6YMPPlCDBg30+eefa82aNRbnlytXTidOnFBCQoLKlCkjb29viwf7IbNCEahjYmLUtm1bRUZGWgTqvXv36saNG4qMjDS3ValSRWXLltXOnTvvGKhTUlKUkpJi3k5MTLRd8QAAoMjJzwet5MaYMWPk5OSksWPH6s8//1RgYKCee+459e7dWy+88IJiY2OVkpKitm3basyYMRo/frz53M6dO2v16tV68MEHdfnyZS1ZskTR0dF2u5bCoMAH6g8++ED79u3T7t27M+07e/asXFxc5Ovra9Hu7++vs2fP3rHPqVOnKi4uLq9LBQAAKBAcHBw0evRojR49OtO+6dOna/r06RZt//58mqurqz7++GNbl1ikFOg11KdPn9agQYO0cuVKubm55Vm/L774oq5cuWJ+nT59Os/6BgAAwL2lQAfqvXv36vz586pbt66cnJzk5OSkbdu2ae7cuXJycpK/v79SU1N1+fJli/POnTt313snurq6ysfHx+IFAAAA5EaBXvLRsmVLHThwwKLtmWeeUZUqVTRy5EgFBwfL2dlZmzdvVufOnSVJhw8f1qlTpxQREWGPkgEAKJL27V2VD6PUzocxgLxXoAO1t7e3qlevbtHm6ekpPz8/c3vv3r01ZMgQlShRQj4+PhowYIAiIiK4wwcAAADyRYEO1Nnx2muvycHBQZ07d1ZKSopatWqlBQsW2LssAADMHvpf3n0OCEDBU+gC9datWy223dzc9Prrr+v111+3T0EAAAC4pxXoDyUCAAAABR2BGgAAALACgRoAAACwAoEaAACgiGnRooXF0w9vV65cOc2ePTvH/Y4fP161a9fOdV1FVaH7UCIAAIC99Xmpcb6O99aU7/O0v927d8vT0zNP+7yXEagBAAXW6lX7bD5Gm9o2HwIocEqWLHnX/Tdu3JCzs3M+VVP4seQDAACgCEpLS1NsbKyKFSum++67T2PGjJFhGJIyL/kwmUxauHChHnvsMXl6emry5MmSpFdeeUX+/v7y9vZW7969df36dXtcSoFHoAYAACiCli1bJicnJ+3atUtz5szRrFmz9NZbb93x+PHjx6tjx446cOCAevXqpVWrVmn8+PGaMmWK9uzZo8DAQB6edwcs+QAAACiCgoOD9dprr8lkMqly5co6cOCAXnvtNfXt2zfL45988kk988wz5u1u3bqpd+/e6t27tyRp0qRJ+vrrr5mlzgIz1AAAAEXQ/fffL5PJZN6OiIjQ0aNHlZ6enuXx9evXt9g+dOiQGjVqZNEWERGR94UWAQRqAAAAcNcPKxCoAQAAiqD4+HiL7R9++EFhYWFydHTM1vnh4eFZ9oHMCNQAAABF0KlTpzRkyBAdPnxY77//vubNm6dBgwZl+/xBgwbpnXfe0ZIlS3TkyBGNGzdOv/zyiw0rLrz4UCIAAEAO5fWDVmyhZ8+e+ueff9SwYUM5Ojpq0KBB6tevX7bP79q1q44fP64RI0bo+vXr6ty5s/r376+NGzfasOrCiUANAABQxGzdutX89cKFCzPtP3nypMX2rftT3+6ll17SSy+9ZNE2bdo0q+sraljyAQAAAFiBQA0AAABYgUANAAAAWIFADQAAAFiBQA0AAABYgUANAAAAWIFADQAAAFiBQA0AAABYgUANAAAAWIFADQAAgAJv6dKl8vX1vesx48ePV+3atc3b0dHR6tChg03rknj0OAAAQI7FDkrI1/Hmz6mdr+NJNwPs4MGDdfny5XwfO7eGDRumAQMG5Pu4BGoAAAAUCV5eXvLy8sr3cVnyAQAosNySF9j8BRRVGzZsUNOmTeXr6ys/Pz+1a9dOx48flyRt3bpVJpPJYvY5ISFBJpNJJ0+e1NatW/XMM8/oypUrMplMMplMGj9+vCTp0qVL6tmzp4oXLy4PDw+1bt1aR48eNfdza2nGZ599psqVK8vDw0NdunTRtWvXtGzZMpUrV07FixfXwIEDlZ6ebj7vv/q9Ze3atQoLC5Obm5tatWql06dPm/fdvuTjdhkZGZo6dapCQ0Pl7u6uWrVq6eOPP87ld/j/YYYaKCgaXLd3BQCAIiQ5OVlDhgxRzZo1lZSUpLFjx6pjx45KSEj4z3MbN26s2bNna+zYsTp8+LAkmWd+o6OjdfToUa1fv14+Pj4aOXKk2rRpo4MHD8rZ2VmSdO3aNc2dO1cffPCBrl69qk6dOqljx47y9fXVF198od9++02dO3dWkyZN1LVr1xz1O3nyZC1fvlwuLi56/vnn1a1bN+3YsSNb35OpU6fq3Xff1aJFixQWFqZvv/1WTz31lEqWLKnmzZvn9FtsRqAGAAAogjp37myx/c4776hkyZI6ePDgf57r4uKiYsWKyWQyKSAgwNx+K/Du2LFDjRs3liStXLlSwcHBWrt2rR5//HFJ0o0bN7Rw4UJVqFBBktSlSxetWLFC586dk5eXl6pWraoHH3xQ33zzjbp27ZqjfufPn69GjRpJkpYtW6bw8HDt2rVLDRs2vOs1paSkaMqUKfr6668VEREhSSpfvry2b9+uN954g0ANAAAAS0ePHtXYsWMVHx+vv/76SxkZGZKkU6dOycPDI1d9Hjp0SE5OTuZAK0l+fn6qXLmyDh06ZG7z8PAwh2lJ8vf3V7ly5SzWN/v7++v8+fM56tfJyUkNGjQwb1epUkW+vr46dOjQfwbqY8eO6dq1a/rf//5n0Z6amqo6depk91uQJQI1AABAEfToo48qJCREixcvVlBQkDIyMlS9enWlpqaag61hGObjb9y4kWdj31qicYvJZMqy7VbIzw9JSUmSpM8//1ylS5e22Ofq6mpV33woEQAAoIi5ePGiDh8+rJdfflktW7ZUeHi4Ll26ZN5fsmRJSdKZM2fMbbevrXZxcbH40KAkhYeHKy0tTfHx8ZnGqlq1aq7rzW6/aWlp2rNnj3n78OHDunz5ssLDw/9zjKpVq8rV1VWnTp1SxYoVLV7BwcG5rl1ihhoAAKDIKV68uPz8/PTmm28qMDBQp06d0qhRo8z7b4XI8ePHa/LkyTpy5Ihmzpxp0Ue5cuWUlJSkzZs3q1atWvLw8FBYWJjat2+vvn376o033pC3t7dGjRql0qVLq3379rmuN7v9Ojs7a8CAAZo7d66cnJwUGxur+++//z+Xe0iSt7e3hg0bphdeeEEZGRlq2rSprly5oh07dsjHx0dRUVG5rp9ADQAAkEP2eNBKTjg4OOiDDz7QwIEDVb16dVWuXFlz585VixYtJN0Mpu+//7769++vmjVrqkGDBpo0aZL5w3/SzTt9PPfcc+ratasuXryocePGafz48VqyZIkGDRqkdu3aKTU1VQ888IC++OKLTEs6cio7/Xp4eGjkyJF68skn9ccff6hZs2Z6++23sz3GxIkTVbJkSU2dOlW//fabfH19VbduXb300ktW1W4y/r145h6VmJioYsWK6cqVK/Lx8cnWOX1eamzjqqS3pnxv8zFWt2lr0/47ffG5TfuXpJkD69p8jKFz99l8jLN7Xrf5GAH1Y2zaf1G4BqnovKe+SIi1af9tas+3af9S/jyNLj+C0dmF2f8HP7cC+ve2af+Nm1oXOLLj++1TcnR8bv79zonr16/rxIkTCg0NlZubW573j4Ivu+8BZqiBAmLlctv/gzs0H8IoAAD3Gj6UCAAAAFiBQA0AAABYgUANAAAAWIFADQAAAFiBDyXe49ymhNq7BPz/wnvZ/s4xAAAg7xGo73GrV9n21l1tatu0ewAAALsjUAMFhK1/uZH4BQcAAFtgDTUAAMA95uTJkzKZTEpISLC6r+joaHXo0MHqfgozZqgBAADuMcHBwTpz5ozuu+8+e5dSJBCoAQAAcig/HsX+bzl9LPt/cXR0VEBAwB33G4ah9PR0OTkRFbODJR+55Ja8wOYvAACA3NqwYYOaNm0qX19f+fn5qV27djp+/LikzEs+tm7dKpPJpC+//FL16tWTq6urtm/frvHjx6t27dp64403FBwcLA8PDz3xxBO6cuVKrsb999irV6/Wgw8+KA8PD9WqVUs7d+606Gf79u1q1qyZ3N3dFRwcrIEDByo5OTnvv1F5gF877nEHv21h7xIAAIANJCcna8iQIapZs6aSkpI0duxYdezY8a7rpkeNGqUZM2aofPnyKl68uLZu3apjx45p1apV+vTTT5WYmKjevXvr+eef18qVK3M8roPD/5vLHT16tGbMmKGwsDCNHj1a3bt317Fjx+Tk5KTjx4/rkUce0aRJk/TOO+/owoULio2NVWxsrJYsWZLX3yqrEagBAACKoM6dO1tsv/POOypZsqQOHjwoLy+vLM+ZMGGC/ve//1m0Xb9+XcuXL1fp0qUlSfPmzVPbtm01c+bMLJeN3G3c6tWrm9uHDRumtm3bSpLi4uJUrVo1HTt2TFWqVNHUqVPVo0cPDR48WJIUFhamuXPnqnnz5lq4cKHc3Nxy9s2wMZZ8AAAAFEFHjx5V9+7dVb58efn4+KhcuXKSpFOnTt3xnPr162dqK1u2rDlMS1JERIQyMjJ0+PBhq8atWbOm+evAwEBJ0vnz5yVJ+/fv19KlS+Xl5WV+tWrVShkZGTpx4sR/X3w+Y4YaAACgCHr00UcVEhKixYsXKygoSBkZGapevbpSU1PveI6np2e+jevs7Gz+2mQySZIyMjIkSUlJSXr22Wc1cODATP2XLVvW6hrzGoEaAACgiLl48aIOHz6sxYsXq1mzZpJufsgvN06dOqU///xTQUFBkqQffvhBDg4Oqly5ss3GrVu3rg4ePKiKFSvmqub8RqAGAAAoYooXLy4/Pz+9+eabCgwM1KlTpzRq1Khc9eXm5qaoqCjNmDFDiYmJGjhwoJ544oks10/n1bgjR47U/fffr9jYWPXp00eenp46ePCgNm3apPnz5+fqOmyJNdQAAABFjIODgz744APt3btX1atX1wsvvKBXX301V31VrFhRnTp1Ups2bfTwww+rZs2aWrAg69v75tW4NWvW1LZt23TkyBE1a9ZMderU0dixY82z5AUNM9QAUEStXrXPpv23qW3T7oECLa8ftGILkZGROnjwoEWbYRhZft2iRQuL7dv1799f/fv3z3Lf0qVLczRuuXLlMo3l6+ubqa1Bgwb66quv7lhTQUKgBoAiivvMA0D+YMkHAAAAYAUCNQAAALI0fvz4uz5ZETcV6EA9depUNWjQQN7e3ipVqpQ6dOiQ6Sbi169fV0xMjPz8/OTl5aXOnTvr3LlzdqoYAAAA95oCHai3bdummJgY/fDDD9q0aZNu3Lihhx9+WMnJyeZjXnjhBX366af66KOPtG3bNv3555/q1KmTHasGAADAvaRAfyhxw4YNFttLly5VqVKltHfvXj3wwAO6cuWK3n77bb333nt66KGHJElLlixReHi4fvjhB91///32KBsAABQhd7v7BYq27P7sC/QM9e2uXLkiSSpRooQkae/evbpx44YiIyPNx1SpUkVly5bVzp0779hPSkqKEhMTLV4AAAD/duvR2NeuXbNzJbCXWz/7fz8mPSsFeob63zIyMjR48GA1adJE1atXlySdPXtWLi4u8vX1tTjW399fZ8+evWNfU6dOVVxcnC3LBQAAhZyjo6N8fX11/vx5SZKHh4dMJpOdq0J+MAxD165d0/nz5+Xr6ytHR8e7Hl9oAnVMTIx+/vnnXD+H/t9efPFFDRkyxLydmJio4OBgq/sFAABFy63Ha98K1bi3+Pr6ZvmI9dsVikAdGxurzz77TN9++63KlCljbg8ICFBqaqouX75sMUt97ty5u168q6urXF1dbVkyAAAoAkwmkwIDA1WqVCnduHHD3uUgHzk7O//nzPQtBTpQG4ahAQMGaM2aNdq6datCQ0Mt9terV0/Ozs7avHmzOnfuLEk6fPiwTp06pYiICHuUDAAAiiBHR8dshyvcewp0oI6JidF7772ndevWydvb27wuulixYnJ3d1exYsXUu3dvDRkyRCVKlJCPj48GDBigiIgI7vABAACAfFGgA/XChQslSS1atLBoX7JkiaKjoyVJr732mhwcHNS5c2elpKSoVatWWrBgQT5XCgAAgHtVgQ7U2bn3n5ubm15//XW9/vrr+VARgLtZufxtm48xtH6MzccAACAnCnSgBlC4hPdqbO8SAADIdwRqAHlm9ap9Nh+jTW2bDwEAQI4UqiclAgAAAAUNgRoAAACwAoEaAAAAsAKBGgAAALACgRoAAACwAoEaAAAAsAKBGgAAALACgRoAAACwAoEaAAAAsAKBGgAAALACgRoAAACwAoEaAAAAsAKBGgAAALACgRoAAACwAoEaAAAAsIKTvQsAAKDIa3Dd3hUAsCFmqAEAAAArEKgBAAAAK7DkI5f27V2VD6PUzocxAAAAYA1mqAEAAAArMEMNAICN7XM6ZPMx2th8BAB3QqAGCgi35AX2LgEAAOQCSz4AAAAAKxCoAQAAACsQqAEAAAArsIYaAAAbW71qn83HaFPb5kMAuANmqAEAAAArEKgBAAAAK7DkAyggePomAACFEzPUAAAAgBUI1AAAAIAVCNQAAACAFVhDDQC3Ce/V2N4lAAAKEWaoAQAAACswQw0At+EhHACAnGCGGgAAALACgRoAAACwAoEaAAAAsAKBGgAAALACgRoAAACwAoEaAAAAsAKBGgAAALACgRoAAACwAoEaAAAAsAKBGgAAALACjx4HAMDG3JIX2LsEADbEDDUAAABgBWaoAQCwsX17V+XDKLXzYQwAWWGGGgAAALACM9QAcBvWuwIAcoIZagAAAMAKBGoAAADACgRqAAAAwAoEagAAAMAKBGoAAADACgRqAAAAwArcNg+FXnivxvYuAUUMD+EAAOREkQnUr7/+ul599VWdPXtWtWrV0rx589SwYUN7l4V8sHrVPpuP0aa2zYcAkAV+uQFQGBSJQP3hhx9qyJAhWrRokRo1aqTZs2erVatWOnz4sEqVKmXv8oB7Bg9EAQDci4rEGupZs2apb9++euaZZ1S1alUtWrRIHh4eeuedd+xdGgAAAIq4Qj9DnZqaqr179+rFF180tzk4OCgyMlI7d+60Y2XIL8yKAgAAeyr0gfqvv/5Senq6/P39Ldr9/f3166+/ZnlOSkqKUlJSzNtXrlyRJCUmJmZ73LS0lP8+yEo5qSe3bH0d+XENqSlJNh+jKPwsJNtfBz+L7CsK11EUrkHiOrKrIF7DreMNw7BFOUC2mYxC/i78888/Vbp0aX3//feKiIgwt48YMULbtm1TfHx8pnPGjx+vuLi4/CwTAADYyOnTp1WmTBl7l4F7WKGfob7vvvvk6Oioc+fOWbSfO3dOAQEBWZ7z4osvasiQIebtjIwM/f333/Lz85PJZMrzGhMTExUcHKzTp0/Lx8cnz/vPL1xHwVEUrkEqGtdRFK5B4joKkqJwDVL+XIdhGLp69aqCgoJs0j+QXYU+ULu4uKhevXravHmzOnToIOlmQN68ebNiY2OzPMfV1VWurq4Wbb6+vjauVPLx8SnU/3O8hesoOIrCNUhF4zqKwjVIXEdBUhSuQbL9dRQrVsxmfQPZVegDtSQNGTJEUVFRql+/vho2bKjZs2crOTlZzzzzjL1LAwAAQBFXJAJ1165ddeHCBY0dO1Znz55V7dq1tWHDhkwfVAQAAADyWpEI1JIUGxt7xyUe9ubq6qpx48ZlWmZS2HAdBUdRuAapaFxHUbgGiesoSIrCNUhF5zqA7Cj0d/kAAAAA7KlIPCkRAAAAsBcCNQAAAGAFAjUAAABgBQI1AAAAYAUCdT54/fXXVa5cObm5ualRo0batWuXvUvKkW+//VaPPvqogoKCZDKZtHbtWnuXlGNTp05VgwYN5O3trVKlSqlDhw46fPiwvcvKsYULF6pmzZrmByVEREToyy+/tHdZVnnllVdkMpk0ePBge5eSI+PHj5fJZLJ4ValSxd5l5coff/yhp556Sn5+fnJ3d1eNGjW0Z88ee5eVbeXKlcv0szCZTIqJibF3aTmSnp6uMWPGKDQ0VO7u7qpQoYImTpyownbvgKtXr2rw4MEKCQmRu7u7GjdurN27d9u7LMCmCNQ29uGHH2rIkCEaN26c9u3bp1q1aqlVq1Y6f/68vUvLtuTkZNWqVUuvv/66vUvJtW3btikmJkY//PCDNm3apBs3bujhhx9WcnKyvUvLkTJlyuiVV17R3r17tWfPHj300ENq3769fvnlF3uXliu7d+/WG2+8oZo1a9q7lFypVq2azpw5Y35t377d3iXl2KVLl9SkSRM5Ozvryy+/1MGDBzVz5kwVL17c3qVl2+7duy1+Dps2bZIkPf7443auLGemTZumhQsXav78+Tp06JCmTZum6dOna968efYuLUf69OmjTZs2acWKFTpw4IAefvhhRUZG6o8//rB3aYDtGLCphg0bGjExMebt9PR0IygoyJg6daodq8o9ScaaNWvsXYbVzp8/b0gytm3bZu9SrFa8eHHjrbfesncZOXb16lUjLCzM2LRpk9G8eXNj0KBB9i4pR8aNG2fUqlXL3mVYbeTIkUbTpk3tXUaeGjRokFGhQgUjIyPD3qXkSNu2bY1evXpZtHXq1Mno0aOHnSrKuWvXrhmOjo7GZ599ZtFet25dY/To0XaqCrA9ZqhtKDU1VXv37lVkZKS5zcHBQZGRkdq5c6cdK8OVK1ckSSVKlLBzJbmXnp6uDz74QMnJyYqIiLB3OTkWExOjtm3bWvz3UdgcPXpUQUFBKl++vHr06KFTp07Zu6QcW79+verXr6/HH39cpUqVUp06dbR48WJ7l5Vrqampevfdd9WrVy+ZTCZ7l5MjjRs31ubNm3XkyBFJ0v79+7V9+3a1bt3azpVlX1pamtLT0+Xm5mbR7u7uXij/ggNkV5F5UmJB9Ndffyk9PT3TI9D9/f3166+/2qkqZGRkaPDgwWrSpImqV69u73Jy7MCBA4qIiND169fl5eWlNWvWqGrVqvYuK0c++OAD7du3r1Cvq2zUqJGWLl2qypUr68yZM4qLi1OzZs30888/y9vb297lZdtvv/2mhQsXasiQIXrppZe0e/duDRw4UC4uLoqKirJ3eTm2du1aXb58WdHR0fYuJcdGjRqlxMREValSRY6OjkpPT9fkyZPVo0cPe5eWbd7e3oqIiNDEiRMVHh4uf39/vf/++9q5c6cqVqxo7/IAmyFQ454TExOjn3/+udDOllSuXFkJCQm6cuWKPv74Y0VFRWnbtm2FJlSfPn1agwYN0qZNmzLNYhUm/541rFmzpho1aqSQkBCtWrVKvXv3tmNlOZORkaH69etrypQpkqQ6dero559/1qJFiwploH777bfVunVrBQUF2buUHFu1apVWrlyp9957T9WqVVNCQoIGDx6soKCgQvWzWLFihXr16qXSpUvL0dFRdevWVffu3bV37157lwbYDIHahu677z45Ojrq3LlzFu3nzp1TQECAnaq6t8XGxuqzzz7Tt99+qzJlyti7nFxxcXExz/TUq1dPu3fv1pw5c/TGG2/YubLs2bt3r86fP6+6deua29LT0/Xtt99q/vz5SklJkaOjox0rzB1fX19VqlRJx44ds3cpORIYGJjpl7Hw8HB98skndqoo937//Xd9/fXXWr16tb1LyZXhw4dr1KhR6tatmySpRo0a+v333zV16tRCFagrVKigbdu2KTk5WYmJiQoMDFTXrl1Vvnx5e5cG2AxrqG3IxcVF9erV0+bNm81tGRkZ2rx5c6Fc81qYGYah2NhYrVmzRlu2bFFoaKi9S8ozGRkZSklJsXcZ2dayZUsdOHBACQkJ5lf9+vXVo0cPJSQkFMowLUlJSUk6fvy4AgMD7V1KjjRp0iTTLSSPHDmikJAQO1WUe0uWLFGpUqXUtm1be5eSK9euXZODg+U/y46OjsrIyLBTRdbx9PRUYGCgLl26pI0bN6p9+/b2LgmwGWaobWzIkCGKiopS/fr11bBhQ82ePVvJycl65pln7F1atiUlJVnMup04cUIJCQkqUaKEypYta8fKsi8mJkbvvfee1q1bJ29vb509e1aSVKxYMbm7u9u5uux78cUX1bp1a5UtW1ZXr17Ve++9p61bt2rjxo32Li3bvL29M61d9/T0lJ+fX6Fa0z5s2DA9+uijCgkJ0Z9//qlx48bJ0dFR3bt3t3dpOfLCCy+ocePGmjJlip544gnt2rVLb775pt588017l5YjGRkZWrJkiaKiouTkVDj/aXv00Uc1efJklS1bVtWqVdOPP/6oWbNmqVevXvYuLUc2btwowzBUuXJlHTt2TMOHD1eVKlUK1b97QI7Z+zYj94J58+YZZcuWNVxcXIyGDRsaP/zwg71LypFvvvnGkJTpFRUVZe/Ssi2r+iUZS5YssXdpOdKrVy8jJCTEcHFxMUqWLGm0bNnS+Oqrr+xdltUK423zunbtagQGBhouLi5G6dKlja5duxrHjh2zd1m58umnnxrVq1c3XF1djSpVqhhvvvmmvUvKsY0bNxqSjMOHD9u7lFxLTEw0Bg0aZJQtW9Zwc3Mzypcvb4wePdpISUmxd2k58uGHHxrly5c3XFxcjICAACMmJsa4fPmyvcsCbMpkGIXsEUwAAABAAcIaagAAAMAKBGoAAADACgRqAAAAwAoEagAAAMAKBGoAAADACgRqAAAAwAoEagAAAMAKBGoAhdLJkydlMpmUkJBw1+NatGihwYMH50tNAIB7E4EaQJ6Jjo6WyWSSyWSSi4uLKlasqAkTJigtLc3qfjt06GDRFhwcrDNnzpgfV75161aZTCZdvnzZ4rjVq1dr4sSJVo3/X24P97e2b728vb1VrVo1xcTE6OjRozatBQCQ/wjUAPLUI488ojNnzujo0aMaOnSoxo8fr1dffTVXfaWnpysjIyPLfY6OjgoICJCTk9Nd+yhRooS8vb1zNb61vv76a505c0b79+/XlClTdOjQIdWqVUubN2+2Sz0AANsgUAPIU66urgoICFBISIj69++vyMhIrV+/XpI0a9Ys1ahRQ56engoODtbzzz+vpKQk87lLly6Vr6+v1q9fr6pVq8rV1VW9evXSsmXLtG7dOvOM79atWy1mhU+ePKkHH3xQklS8eHGZTCZFR0dLyrzk49KlS+rZs6eKFy8uDw8PtW7d2mLW+FYNGzduVHh4uLy8vMy/JOSUn5+fAgICVL58ebVv315ff/21GjVqpN69eys9PT0X310AQEFEoAZgU+7u7kpNTZUkOTg4aO7cufrll1+0bNkybdmyRSNGjLA4/tq1a5o2bZreeust/fLLL5o7d66eeOIJc6g9c+aMGjdubHFOcHCwPvnkE0nS4cOHdebMGc2ZMyfLeqKjo7Vnzx6tX79eO3fulGEYatOmjW7cuGFRw4wZM7RixQp9++23OnXqlIYNG2b198LBwUGDBg3S77//rr1791rdHwCgYLj730oBIJcMw9DmzZu1ceNGDRgwQJIsZorLlSunSZMm6bnnntOCBQvM7Tdu3NCCBQtUq1Ytc5u7u7tSUlIUEBCQ5ViOjo4qUaKEJKlUqVLy9fXN8rijR49q/fr12rFjhzmUr1y5UsHBwVq7dq0ef/xxcw2LFi1ShQoVJEmxsbGaMGFC7r4Rt6lSpYqkm+usGzZsmCd9AgDsi0ANIE999tln8vLy0o0bN5SRkaEnn3xS48ePl3RzTfHUqVP166+/KjExUWlpabp+/bquXbsmDw8PSZKLi4tq1qxpk9oOHTokJycnNWrUyNzm5+enypUr69ChQ+Y2Dw8Pc5iWpMDAQJ0/fz5PajAMQ5JkMpnypD8AgP2x5ANAnnrwwQeVkJCgo0eP6p9//tGyZcvk6empkydPql27dqpZs6Y++eQT7d27V6+//rokmZeESDdno+0dNp2dnS22TSaTOQhb61ZwDw0NzZP+AAD2xww1gDzl6empihUrZmrfu3evMjIyNHPmTDk43PxdftWqVdnq08XF5T8/xOfi4iJJdz0uPDxcaWlpio+PNy/5uHjxog4fPqyqVatmqxZrZGRkaO7cuQoNDVWdOnVsPh4AIH8wQw0gX1SsWFE3btzQvHnz9Ntvv2nFihVatGhRts4tV66cfvrpJx0+fFh//fWXxQcIbwkJCZHJZNJnn32mCxcuWNw95JawsDC1b99effv21fbt27V//3499dRTKl26tNq3b2/1Nd7u4sWLOnv2rH777TetX79ekZGR2rVrl95++205Ojrm+XgAAPsgUAPIF7Vq1dKsWbM0bdo0Va9eXStXrtTUqVOzdW7fvn1VuXJl1a9fXyVLltSOHTsyHVO6dGnFxcVp1KhR8vf3V2xsbJZ9LVmyRPXq1VO7du0UEREhwzD0xRdfZFrmkRciIyMVGBioGjVqaNSoUQoPD9dPP/1kvsUfAKBoMBl5tTAQAAAAuAcxQw0AAABYgUANAAAAWIFADQAAAFiBQA0AAABYgUANAAAAWIFADQAAAFiBQA0AAABYgUANAAAAWIFADQAAAFiBQA0AAABYgUANAAAAWIFADQAAAFjh/wNDLA/3AnNA0AAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "fig, ax, df = plot_label_distributions(\n", + " partitioner,\n", + " label_name=\"label\",\n", + " plot_type=\"bar\",\n", + " size_unit=\"percent\",\n", + " partition_id_axis=\"x\",\n", + " legend=True,\n", + " verbose_labels=True,\n", + " cmap=\"tab20b\",\n", + " title=\"Per Partition Labels Distribution\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "e7d1fe2e1c0f14c", + "metadata": {}, + "source": [ + "### Heatmap" + ] + }, + { + "cell_type": "markdown", + "id": "ad6a2e53de3cc084", + "metadata": {}, + "source": [ + "You might want to visualize the results of partitioning as a heatmap, which can be especially useful for binary labels. Here's how:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "14a4b4e574866120", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAykAAAFJCAYAAACfAWIZAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy80BEi2AAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOzddVhUWR/A8e/Q3S0NomC32F3Yuuqua/caa3cXuit269q66rp2t66d2CAqikWXdM37B6+jI6CAwKCezz7zrHPuuWd+h3sZ7rknrkQqlUoRBEEQBEEQBEEoJJQUHYAgCIIgCIIgCMLHRCNFEARBEARBEIRCRTRSBEEQBEEQBEEoVEQjRRAEQRAEQRCEQkU0UgRBEARBEARBKFREI0UQBEEQBEEQhEJFNFIEQRAEQRAEQShURCNFEARBEARBEIRCRTRSBEEQBEEQBEEoVEQjRRAEIRPdu3fH3t4+W3mnTp2KRCLJ34AKQJ06dShZsmSelmlvb0/37t3ztMzs2rBhAxKJhOfPn+f7Z316vjx//hyJRMK8efPy/bPh+zkHBUEQ3hONFEEQMnh/cff+paGhgYuLC4MGDSIoKCjfP//9Bdf7l5aWFm5ubkycOJHo6Og8+5w3b94wdepUvL29v5g3Li6OqVOncvbs2Tz7/LwgkUgYNGiQosPId2fPnpU7J9TV1TE3N6dOnTrMnj2bkJCQPPmcwnqcoXDHJgiCkNdEI0UQhCxNnz6dzZs3s3TpUqpVq8aKFStwd3cnLi6uQD5/xYoVbN68mfnz51O8eHFmzZpFkyZNkEqleVL+mzdvmDZtWqaNlDVr1uDr6yt7HxcXx7Rp0zK9QJw4cSLx8fF5EpPweUOGDGHz5s2sXr2aUaNGYWRkxJQpU3B1deX06dNyebt06UJ8fDx2dnbZLv9zx/lzPj1f8oM4BwVB+JGoKDoAQRAKr6ZNm1KxYkUAevfujbGxMfPnz2ffvn38/PPPX1V2XFwcWlpan83Tvn17TExMAOjfvz/t2rVj9+7dXLlyBXd391x/dkpKCmlpaZ/No6qqmu3yVFRUUFERX6cFoWbNmrRv314u7c6dOzRq1Ih27drx8OFDLC0tAVBWVkZZWTlf44mNjUVbWztH50t+EOegIAjfG9GTIghCttWrVw8Af39/WdqWLVuoUKECmpqaGBkZ0alTJ16+fCm33/u5Djdv3qRWrVpoaWkxfvz4r/r8pKQkJk+eTIUKFdDX10dbW5uaNWty5swZuX0+nhuwcOFCnJycUFdXZ/ny5VSqVAmAHj16yIYRbdiwAZCfY/D8+XNMTU0BmDZtmizv1KlTgcznA6SkpDBjxgzZ59nb2zN+/HgSExPl8tnb29O8eXMuXLhA5cqV0dDQwNHRkU2bNuX455OVffv24eHhgZWVFerq6jg5OTFjxgxSU1MzzX/z5k2qVauGpqYmDg4OrFy5MkOexMREpkyZgrOzM+rq6tjY2DB69OgM9ftUcnIy06ZNo2jRomhoaGBsbEyNGjU4ceJErutXpkwZFi5cSGRkJEuXLpWlZzYn5caNGzRu3BgTExNZ/Xr27Al8+Th3794dHR0dnj59SrNmzdDV1aVz586ybVnNYVqwYAF2dnZoampSu3Zt7t+/L7e9Tp061KlTJ8N+39M5KAiCkFPitosgCNn29OlTAIyNjQGYNWsWkyZNokOHDvTu3ZuQkBCWLFlCrVq1uH37NgYGBrJ9w8LCaNq0KZ06deLXX3/F3Nz8qz4/OjqatWvX8vPPP9OnTx/evXvHX3/9RePGjbl27Rply5aV23f9+vUkJCTQt29f1NXVadOmDe/evWPy5Mn07duXmjVrAlCtWrUMn2tqasqKFSsYMGAAbdq0oW3btgCULl06y1h79+7Nxo0bad++PSNGjODq1at4enry6NEj9uzZI5f3yZMntG/fnl69etGtWzfWrVtH9+7dqVChAiVKlMjxz+lTGzZsQEdHh+HDh6Ojo8Pp06eZPHky0dHR/Pnnn3J5IyIiaNasGR06dODnn39m586dDBgwADU1NdnFfFpaGi1btuTChQv07dsXV1dX7t27x4IFC3j8+DF79+7NMpapU6fi6elJ7969qVy5MtHR0dy4cYNbt27RsGHDXNfx/c/v+PHjzJo1K9M8wcHBNGrUCFNTU8aOHYuBgQHPnz9n9+7dQPaOc0pKCo0bN6ZGjRrMmzfvi72BmzZt4t27dwwcOJCEhAQWLVpEvXr1uHfvXo5+B771c1AQBCHHpIIgCJ9Yv369FJCePHlSGhISIn358qV0+/btUmNjY6mmpqb01atX0ufPn0uVlZWls2bNktv33r17UhUVFbn02rVrSwHpypUrs/X5U6ZMkQJSX19faUhIiNTf31+6atUqqbq6utTc3FwaGxsrTUlJkSYmJsrtFxERITU3N5f27NlTlubv7y8FpHp6etLg4GC5/NevX5cC0vXr12eIoVu3blI7OzvZ+5CQECkgnTJlSpbxvuft7S0FpL1795bLN3LkSCkgPX36tCzNzs5OCkjPnz8vSwsODpaqq6tLR4wY8dmfk1QqlQLSgQMHfjZPXFxchrR+/fpJtbS0pAkJCbK098fJy8tLlpaYmCgtW7as1MzMTJqUlCSVSqXSzZs3S5WUlKT//fefXJkrV66UAtKLFy/K1a9bt26y92XKlJF6eHh8sV6fOnPmjBSQ/vPPP1nmKVOmjNTQ0FD2/v157O/vL5VKpdI9e/ZIAen169ezLONzx7lbt25SQDp27NhMt318vrw/797/vrx39epVKSAdNmyYLK127drS2rVrf7HMwnoOCoIg5Acx3EsQhCw1aNAAU1NTbGxs6NSpEzo6OuzZs4ciRYqwe/du0tLS6NChA6GhobKXhYUFRYsWzTDsSl1dnR49euTo84sVK4apqSkODg7069cPZ2dnDh06hJaWFsrKyqipqQHpd/bDw8NJSUmhYsWK3Lp1K0NZ7dq1kw2XyW+HDx8GYPjw4XLpI0aMAODQoUNy6W5ubrKeHEi/a16sWDGePXuWJ/FoamrK/v3u3TtCQ0OpWbMmcXFx+Pj4yOVVUVGhX79+svdqamr069eP4OBgbt68CcA///yDq6srxYsXlzv274fjfXrsP2ZgYMCDBw/w8/PLk7p9TEdHh3fv3n32swEOHjxIcnJyrj9nwIAB2c7bunVrihQpIntfuXJlqlSpIjtH8kthOwcFQRBySgz3EgQhS8uWLcPFxQUVFRXMzc0pVqwYSkrp9zb8/PyQSqUULVo0030/nUhcpEgRWaMiu/7991/09PRQVVXF2toaJycnue0bN27Ey8sLHx8fuYtOBweHDGVllpZfXrx4gZKSEs7OznLpFhYWGBgY8OLFC7l0W1vbDGUYGhoSERGRJ/E8ePCAiRMncvr06QxLOEdFRcm9t7KyQltbWy7NxcUFSJ8XUbVqVfz8/Hj06FGWjb7g4OAsY5k+fTqtWrXCxcWFkiVL0qRJE7p06fLZYUvZFRMTg66ubpbba9euTbt27Zg2bRoLFiygTp06tG7dml9++QV1dfVsfYaKigrW1tbZjimz3w8XFxd27tyZ7TJyo7Cdg4IgCDklGimCIGSpcuXKstW9PpWWloZEIuHIkSOZrqCko6Mj9/7ju/nZVatWLdnqXp/asmUL3bt3p3Xr1owaNQozMzOUlZXx9PSUzV352s//Wtl9uF5WK1BJ82Cp5cjISGrXro2enh7Tp0/HyckJDQ0Nbt26xZgxY764yllm0tLSKFWqFPPnz890u42NTZb71qpVi6dPn7Jv3z6OHz/O2rVrWbBgAStXrqR37945juW95ORkHj9+/NmHUUokEnbt2sWVK1c4cOAAx44do2fPnnh5eXHlypUM52xm1NXVZQ31vCKRSDI91lktbJDTsrMjP89BQRCE3BCNFEEQcsXJyQmpVIqDg4PsTntB2rVrF46OjuzevVvuQmzKlCnZLiMnT+jOSV47OzvS0tLw8/PD1dVVlh4UFERkZGSOntvxtc6ePUtYWBi7d++mVq1asvSPV2j72Js3b2TL6r73+PFjANlKU05OTty5c4f69evn6innRkZG9OjRgx49ehATE0OtWrWYOnXqVzVSdu3aRXx8PI0bN/5i3qpVq1K1alVmzZrFtm3b6Ny5M9u3b6d37955/tT2zIa1PX78WG4lMENDw0yHVX3a2/GtnoOCIAi5IeakCIKQK23btkVZWZlp06ZluNsqlUoJCwvL189/f+f348++evUqly9fznYZ7y/EIyMjv5j3/SpO2cnbrFkzABYuXCiX/r7nwcPDI9sxfq3Mfk5JSUksX7480/wpKSmsWrVKLu+qVaswNTWlQoUKAHTo0IHXr1+zZs2aDPvHx8cTGxubZTyfnhc6Ojo4Ozt/ceniz7lz5w5Dhw7F0NCQgQMHZpkvIiIiw7n6fhW495+fk+OcHXv37uX169ey99euXePq1as0bdpUlubk5ISPjw8hISGytDt37nDx4kW5sr7Vc1AQBCE3RE+KIAi54uTkxMyZMxk3bhzPnz+ndevW6Orq4u/vz549e+jbty8jR47Mt89v3rw5u3fvpk2bNnh4eODv78/KlStxc3MjJiYm23UwMDBg5cqV6Orqoq2tTZUqVTKdv6KpqYmbmxs7duzAxcUFIyMjSpYsmenwojJlytCtWzdWr14tG2517do1Nm7cSOvWralbt+5X1/9jN27cYObMmRnS69SpQ7Vq1TA0NKRbt24MGTIEiUTC5s2bsxzGY2Vlxdy5c3n+/DkuLi7s2LEDb29vVq9eLZtn1KVLF3bu3En//v05c+YM1atXJzU1FR8fH3bu3MmxY8eyHCbo5uZGnTp1qFChAkZGRty4cYNdu3YxaNCgbNX1v//+IyEhgdTUVMLCwrh48SL79+9HX1+fPXv2YGFhkeW+GzduZPny5bRp0wYnJyfevXvHmjVr0NPTk13U5+Q4Z4ezszM1atRgwIABJCYmsnDhQoyNjRk9erQsT8+ePZk/fz6NGzemV69eBAcHs3LlSkqUKCE3h6gwn4OCIAh5TkGrigmCUIi9X7r1c0u1vvfvv/9Ka9SoIdXW1pZqa2tLixcvLh04cKDU19dXlqd27drSEiVKZPvz3y+nGhISkmWetLQ06ezZs6V2dnZSdXV1ably5aQHDx7McinYP//8M9Ny9u3bJ3Vzc5OqqKjILUf8aTlSqVR66dIlaYUKFaRqampyS8F+uvyrVCqVJicnS6dNmyZ1cHCQqqqqSm1sbKTjxo2TW/JXKk1f/jWzJXmzWpb2U0CWrxkzZkilUqn04sWL0qpVq0o1NTWlVlZW0tGjR0uPHTsmBaRnzpyR+8wSJUpIb9y4IXV3d5dqaGhI7ezspEuXLs3wuUlJSdK5c+dKS5QoIVVXV5caGhpKK1SoIJ02bZo0KipKrn4fL0E8c+ZMaeXKlaUGBgZSTU1NafHixaWzZs2SLW+clfdLEL9/qaqqSk1NTaW1atWSzpo1K8Py0lJpxiWIb926Jf3555+ltra2UnV1damZmZm0efPm0hs3bsjtl9Vx7tatm1RbWzvT+D533nl5eUltbGyk6urq0po1a0rv3LmTYf8tW7ZIHR0dpWpqatKyZctKjx079s2cg4IgCPlBIpWKWXGCIAiCIAiCIBQeYk6KIAiCIAiCIAiFimikCIIgCIIgCIJQqIhGiiAIgiAIgiAIhYpopAiCIAiCIAiCUKiIRoogCIIgCIIgCIWKaKQIgiAIgiAIglCoiEaKIAiCIAiCIAiFinjifCFxxu+aokMoMM/C3yg6hALTqkQtRYdQYB4G+is6hALhZpHxafTfq5S0VEWHUGDCYqMUHUKBMdDUVXQIBSYxJUnRIRSY2699FR1CgWhXpoGiQ8igzLxOOcp/Z+T2fIrk+yIaKYIgCIIgCIKQSxIkig7huyQaKYIgCIIgCIKQW6KNki9EI0UQBEEQBEEQckn0pOQP0UgRBEEQBEEQhFySiDZKvhCNFEEQBEEQBEHIJdGTkj8K/RLEz58/RyKR4O3t/dVlde/endatW391OYIgCIIgCIKQTpLDl5Adhb4nxcbGhrdv32JiYqLoUAq9tNQ0Dm7bzdWzF4mOiELfyBD3+jVp1qkVkv/3Rd6+dJ3zR04T8OQ5se9imLB4JjaOdrIyQoNCmNhreKbl9xk7iAo1qhRIXd576fOMa4fPE/j8FbGR72jze1eKViiRrX1fPX7O37NXYWptTveZQ3NcZtjrIM7uPMJLn2dIU9MwLmJO68G/omdimFfVy3P/7tzFtk1bCQ8Lx7moM8NGD8etZPZ+XoXB2B5DCAsOzZBex6MhnX/rAcDTR4/Zs2kn/r5PUVJSwsbRjqEzxqKmrkZoUAgH/96Dz90HREdEYmBkSJW6NfDo2BoV1UL/dZfBpnUbOXfmHC+ev0BdXZ1SpUsxYMhv2NnbfXnnQi4kOIRVS1Zw9fIVEhISKGJtzdjJ4ynuVhyA2pVqZLpf/yG/8XOXXwoy1FzbvXUnW9ZsxKNdK3oN7su76HdsX7+FOzduExoUgp6BPpVrVOXnnl3Q1tEGwP/JM/Zs+4dH9x7yLioaUwszGrdsRvP2rRRcm4zu3PZmx5bt+Pn4EhYaxvQ/ZlGjdk0AUlJSWLdyDVcvXeHt67do62hTvlJF+gzsh4lp+t/zwDdv2bxuI7dv3CI8PBxjExMaNmlE5x5dUFVVVWTV5OzYtI2L5y7w6sVL1NTVcSvlRs8BfbC2s5HlGT1oOPdu35Xbr1mr5gwePVQu7cShY+zesYvXL1+hpaVNzXq1GDhiSEFUQ47/Qz/+23+S1/4veRcRxa8j++JWuUyW+aMjoji8aTevnwUQHhiCe9M6NO/eXi7P/avenNtzjLDAEFJTUzGxMKVGi/qUqyV/3RD8KpCjW/fi/9CPtLQ0zKwt6DyiDwYmRvlS14Ighnvlj0L/V1tZWRkLC4sst0ulUlJTU1FRKfRVyXfH/j3IuSOn6D6sH5a2RXjh58+mRWvQ1NakXsvGACQmJOLs5kKFGlXYsuSvDGUYmRgzd/MSubQLR89wfPdhSlTI+gssvyQnJmFma0mpWhXZu3hztvdLiI3n8Ood2Lk5ERcdk+MyI4LC2DpzJaVrV6JGm4aoaWoQ+joIZbXC84fzUyePn2TJ/MWMGj8at5Il2LltB8MHDePv3dsxNPo2vvwnLJxJWmqa7P3rFy9ZMNGTiv9vHD999JhFk+fS9KdW/Ny/O8rKSrz0D0CilP4XIvDlG6TSNLoM6oWZpTmvX7xi05I1JCUk8lPvzgqp09fwvnWbtj+1w7WEK6mpqaxaupJhA4eyddc2NDU1FR1err2LjmZQ7wGUrVCePxbNw8DAgFcvX6Gr9+H5HbuP7JPb5+qlK/wxcw6169Yu6HBzxc/nMccPHMXO6cNzdcJDw4gIC6fbgF7Y2NkSEhTMyvlLCQ8NZ/T08QA8e/wEfUMDhk4YibGZCb73H7HCaylKSko0a9tCUdXJVEJ8Ak5FnWjaohlTxkyU35aQgJ+vH116dsOxqDMx0e9YumAxE0eOY+XGNQAEvAggLU3KsLEjKWJjjf/TZ8yf/Sfx8fEM+H2gIqqUqXved2nRthUursVITU1lw6q/mDBsDKu2/oXGR7+HTVo2o0vv7rL36hrqcuXs3r6L3X//Q6+BfSnm5kpiQgJBbwMLqhpykhKTsLC3pkI9d7bOW/PF/KnJKWjr6VC3bRMuHjqdaR4tHS3qtG2MqZUFyirK+Ny6z7/Lt6Ctp4tLWTcAwgJDWDV5PhXrudOggwfqmhoEv3qLSiFqlOaGGO6VPwrFlf3Ro0eZOXMm9+/fR1lZGXd3dxYtWoSTkxPPnz/HwcGB27dvU7ZsWc6ePUvdunU5fPgwEydO5N69exw/fpyzZ8+yd+9eBgwYwMyZMwkLC6N58+asWbMGfX39HH8uIPvsf//9lyVLlnD16lWKFi3KypUrcXd3l5Vz4cIFxo0bx40bNzAxMaFNmzZ4enqira1dID+/95498qNMlfKUqlQWABNzU26cv8zzx89kearWS787GRoUkmkZSspK6BsayKV5X75JhRqV0dDUyJe4P8exTHEcyxTP8X7HN+zGtWpZJEpKPLn1IMdl/rfrKI5lilGnUzNZmqG5cY7jKEg7tvxNizYt8WjZHIBR40dz6cJFDu47SJceXRUcXfbo6uvJvT+yaz+mlua4lHIFYMeaLdRr2ZimHVrK8lhYW8n+XbJiGUpW/NCYNrU0J+j1G84eOvlNNlLmL10o937CtIk0b9AM30c+lC1fTjFB5YFtG7diam7GuCnjZWmWRazk8hibyP++XTx/gXIVymNlXaRAYvwa8XHxLJz5JwNGDmbX5h2ydDtHe0ZPnyB7b1HEks69u7Jw1jxSU1JRVlGmfrNGcmVZWFni+9CHK/9dKnSNlCrVqlKlWtVMt+no6PDnkvlyaUNGDuW3Hv0ICgzC3MKcyu5VqOz+4S67VRErXr54yYHdewtVI2Xm/Dly74dPGM3Pzdvj5+tHqbKlZenq6hoYGWd+Q+hd9Ds2rV7PlD9mUK5ieVm6g7Nj/gT9BcXKlaBYuez3shuaGdOix08A3DxzOdM8jiVc5N5Xb1aX2+eu8sLnqayRcnz7AYqVc6Ppr21k+YwtTHMafqEjelLyR6GYkxIbG8vw4cO5ceMGp06dQklJiTZt2pCWlpblPmPHjmXOnDk8evSI0qXTvySePHnCzp07OXDgAEePHuX27dv89ttvX/25EyZMYOTIkXh7e+Pi4sLPP/9MSkoKAE+fPqVJkya0a9eOu3fvsmPHDi5cuMCgQYPy4CeTM46uRfG585Cg128BePXsBU8ePqZEhdJf2DNrL5748/LZC6o3+jbuXgLcO3+dyJBwqrfJ3VNppWlpPL3jg5GFCTv/WMvSgdPZPHUpfjcffHlnBUlOTsbXx5dKlSvJ0pSUlKhYuRL3791XYGS5l5KcwtUzF6jesDYSiYToyCj8fZ+gq6/PnBFTGN65P3+OmY7fA5/PlhMXG4+2rk4BRZ2/YmPSewX19PS+kLNwu/jfRYq7Fmfy2Im0atScXp17cGDP/izzh4eFc/nCJZq18ijAKHNvzaIVVKhaiTIVv9yQjI2JQ0tLC2UV5SzzxMXEoqP77T8lPjYmFolEgo5O1r+PsbEx6Bby8zsuNhZArucP4MyJU3Rs1pb+v/Zm/Yq1JCQkyLbdvn6TNGkaYSGh9P2lJ7+27sTsSdMJCQou0NgLilQq5ck9H0LeBGHv5gxAWloavrfuY2JpzvpZS5nVewzLx//Bw2t3FBxtXhBzUvJDoehJadeundz7devWYWpqysOHD7P8Mps+fToNGzaUS0tISGDTpk0UKZJ+p23JkiV4eHjg5eWV6ZCxz31uyZIlZekjR47EwyP9j+O0adMoUaIET548oXjx4nh6etK5c2eGDh0KQNGiRVm8eDG1a9dmxYoVaGhk7H1ITEwkMTFRLi0pKQk1NbVM65pdjds3JyEunqn9xyBRUkKalkarLu2pUrd6rsu8ePwcFjZWOLm6fDlzIRAeGMq5nUf5ZUJ/lJSz/qP/ObHRsSQnJHH14FlqtG9M7Y7N8L/ry57Fm+k0ri+2xRVz5+tzIiMjSU1NzXAXz8jYiIDnLxQU1de5feUGcTFxVG+Q3kAOCUz/Y35g27/81OsXbBztuXzqP+aPn83U5XMxL2KZoYzgN4GcOXCM9r2+vV6UT6WlpbFo3kJKlymNo7OTosP5Km9fv2Hfv3v56ZeO/NqjKz4PHrHYayGqqqo0ad40Q/6jh46gpa1FrW9gqNeFU+d49vgJf6xc+MW80ZFR/LP5bxq2aJJlHp/7D7l45j8mzJmad0EqQFJiIquXrqReo/qy+Tefev3yFXt37qbfkKxvLipaWloaqxYtx610CewdPwzlq9OwHuYW5hiZGOP/xJ91K9bwKuAVkzynAunzb6RpUnZs+pv+Q39DS1ubTWvWM37oGJZvWl2o5uB8jYS4eOb0G09KSgpKSkq07NWRoqXTe8Jjo9+RlJDIuX3HadixBY07t8LP+xFbvdbQa8rvOLoVVXD0uSeaHfmjUDRS/Pz8mDx5MlevXiU0NFTWkxEQEICbm1um+1SsWDFDmq2trayBAuDu7p7ecvf1zbSR8rnP/biR8r6nBsDSMv1CKDg4mOLFi3Pnzh3u3r3L1q1bZXmkUilpaWn4+/vj6uqa4XM9PT2ZNm2aXFrXQb3pPqRPpnXNrpv/XeXa2Uv0HDkAKztrXj57wT9rtqJvnD6BPqeSEpO4fu4yzToWvgmbmUlLS+Pgir+p3rYhRpa57z6WSqUAOJcvQaUm6T83czsrXj95gffpK4WykfI9unD8DCUrlsHAOH2hAmla+nGp1bQe1RvWAcDWyZ5Hd+5z8cQ52nbvJLd/RGg4CyfPpUKNKtRqUq9AY88PXnPm8ezpM1b8tUrRoXy1tLQ0irkWp+/AfgC4FHPB/5k/+3bvzbSRcmT/IRo0aYS6unqGbYVJaHAIfy1dzZR5M1FT//xNp7jYOGaNm4qNnS0du2feiH7x7DlzJsygQ7dfKFupfKZ5vgUpKSlMmzAFKVKGjh6RaZ6Q4BDGDB1F7fp1aN66cA1r+9gyr8U8f/aceSsWyqU3a9Vc9m8HJ0eMTIwYN2QUb169wcrairQ0KSkpKfQfOpAKVdKvX8ZMnUDnlh24e8ubClUq8T1Q01Bn8J/jSExI5Ok9Xw5v2o2RuQmOJVxk3+GuFUtTo3n6d7KVvQ0vfJ9x7fh/33YjRYz3yheFopHSokUL7OzsWLNmDVZWVqSlpVGyZEmSkpKy3Ccv5ntk93M/vsPx/kR836CJiYmhX79+DBmScXUOW1vbTD933LhxDB8uv4LW5Zd3M82bE7vXb6dx++ZUqp0+X6aIvQ3hwaEc/edArhopty5eIykxkar1M19lp7BJik8k0P8VQS/ecHJT+qRbqVQKUil/dh9Hh9G9sPt/t/PnaOlqoaSshHERM7l0YyszXj9+nh+hfzUDAwOUlZUJDwuXSw8PC8fIpHDPpclMWHAIj7zv89v4YbI0fSMDAKxsrOXyWtoUISxEfkWwyLAI5o2biZNrUboM7p3v8eY3r7nzuHThIsvWrMDM3OzLOxRyxibG2Dvay6XZ2dtx/vTZDHnv3L5DwIsApsyelmFbYfPU9wlREZGM7PPh70FaWhoP797nyJ4D7DixF2VlZeLj4pgxehKampqMmTEx04VfXj4PYOqICTRs0YSfunbKsP1bkZKSwrTxUwh6G4TX8oWZ9qKEhoQy4rffKVGqJMPHjVJAlNmz3GsJ1y5d5c9l8zE1+/yNsPer1L19/RorayuM/r9yla3Dh5X5DAwN0NPXI/g7GvKlpKSEsUX6d5SVvQ0hr4M4t/c4jiVc0NLTQUlZCTNr+ZvGZkUseO77VBHh5hkxcT5/KLyREhYWhq+vL2vWrKFmzfQL6QsXLuSqrICAAN68eYOVVfoEzCtXrqCkpESxYsXy7XPLly/Pw4cPcXb+8sXve+rq6hnuCH7tUC9I7/l4v8rRe0pKSrK7Fzl18fg5Slcun2Eyc2GlrqlOj9nD5NJun7xMwKOntBr8K/qm2VvhSllFBQsHa8Lfyi8uEBEYip5x4Vx+WFVVlWLFi3Hj+g3ZkJi0tDRuXr9Buw7tv7B34XPxxDn09PUpVfnDmH4Tc1MMjA0JfP1GLm/Q67dyk+UjQsOZN24mds4O9BjaHyWlQjH1LlekUinz//Di/JlzLF29HKtPJpd/q0qWKUXAiwC5tFcBLzHPpMf78L6DFHMthrNL4b/LWrpCGRasWyaXtnTuQqxtrWn9c3uUlZWJi41j+qhJqKqqMm725Ex7XAL8XzBl+HjqNq5P597dCir8PPe+gfL65SvmL1+U6SI2IcEhjPjtd4oWL8boSWML5e+rVCplxfylXDp/gblLvbCwyji09FNP/dIvuo2M028SuZVKH53xKuClrIHzLjqa6KhozMzN8ylyxZOmpZGSnD6HV0VFBWsnO0LfBMnlCX0b/E0vPwyiJyW/KLyRYmhoiLGxMatXr8bS0pKAgADGjh2bq7I0NDTo1q0b8+bNIzo6miFDhtChQ4dMh3rl1eeOGTOGqlWrMmjQIHr37o22tjYPHz7kxIkTLF26NFf1yK1SlctyZMd+jExNsLQtwsunLzi59yjVGtaS5Yl9F0N4SBiRYREABL1Kn2SvZ6gvt6pX8JsgnjzwZdDUkQVah08lJSQSERQmex8ZEk7QizdoamtmeF6JREkJ00/u0Gjp6aCiqiKXnp0yKzerzf5l27Ap5oCtmxP+dx/z5PYjfh7XNz+qmSc6/vozs6bMoLhr8f8vQbydhPgE2Wpf34q0tDQunjiPe/2aKH80r0gikdC4bXP2b92FjYMdNo52XDp1nsBXb+g/fijwvoEyA2NTE37q1Zl3UdGy/d/3xHxLvObM48TR48yZPxctLS3CQtPPWx0dbdQzme/2rfjp544M7NWfzes3UbdBPR49eMiBPfsZOX60XL7YmFjOnjrDb0MLfiGS3NDU0sLukx4iDQ0NdPT0sHO0Jy42jmkjJ5KUmMjQCSOJi40jLjYOAD0DfZSVlXnx7DlTho+nXKXytPipNRH/7x1VUlZG3yDzlSoVJT4ujtevXsvev33zlieP/dDV08PYxJipYyfh5/uY2V5zSUtLJTws/fzV1dNDVVWVkOAQhg8YgrmlBf2H/EZUZKSsrPcX94XBMq/FnD1xmslzpqOppSXrsdbW0UZdXZ03r95w9sRpKrlXRk9fD/8nz1i1eAUly5aWrd5lbWuNe81qrFq4nCFjhqGlrcX6lX9hbWtDmQplC7xOiQkJhAV+uBEXHhzGm+cv0dLRzrLB8Ob5SyD9b2hs9DvePH+JsooK5tbpjbaze45RxMkWY3NTUpJT8L19n9v/XaNV7w89gTVbNmD7gnU4uBbFsWRRHns/xOfmPXpP/T0fa5v/RBMlfyi8kaKkpMT27dsZMmQIJUuWpFixYixevJg6derkuCxnZ2fatm1Ls2bNCA8Pp3nz5ixfvjxfP7d06dKcO3eOCRMmULNmTaRSKU5OTnTs2DHH8X+tTv26sn/Lv/y9fAPvoqLRNzKkZtO6eHT6sNTfnau32LTww5roa/9Iv+vn8XMbWnRuK0u/dOIcBiZGuJb7MDdHEQL9X7Hdc7Xs/ZltBwEoWaMCzfp24MLuE9y/cJP+87PfwPxSmQAuFUvSqHsbrhw8w6kt+zGyNKX14F+xLuaQaZmFQYNGDYiMiGDtyrWEh4VR1KUoXksWZLkkZmH1yPs+4SGhVG9UJ8O2Bq2bkpyUzI41m4l9F4uNgy3DZo7DzDL9TuTD2/cIfhNE8JsgRneTv7Bdc2hbQYSfp/bs2g3AoL7yy7GOnzIRj5bfxkpXmXEt4crMP2ezetkqNq3dgIWVJYOGD6FhU/nld08dP4lUKqV+49yt1FfYPHv8BL9HvgD81ll+GOLKv9dhZmnO5XMXiY6M4tyJM5w7cUa23dTcjFU71hdovF/i+8iX4b99uLhcsTD9xlxjjyZ0692DS/9dBKBPl55y+81fvoiyFcpx89oNXr96zetXr+nYQn4hm9NXz+dz9Nl3aM8BAMYMkp9PM3z8KBp6NEZVVYXbN26xd+e/JCQkYGpmRo06Nen0yVyjEZPGsHrxCqaMmoBEIqFU2TLMnO+pkOe8vX4awNppi2TvD2/6F4DytavQfmBXTu48xK1zVxi9bIYsz9LRH5Zifv0sgDsXbmBgaiTLk5SYxP61O4gKi0RVTRXTIuZ0GNyd0tUqyPYrUbksrfp04tze4xxY/w+mVmb8MqI39sWzPxqlMBI9KflDIn0/S/gbN3XqVPbu3Yu3t7eiQ8mVM37XFB1CgXkW/ubLmbLp0KodSCQSWeOisGlVotaXM30nHgb6KzqEAuFmUXgbqnktJS1V0SEUmLDYKEWHUGAMNL/95YyzKzEl67mt35vbr33zrKx/lm5CIoH2AwvfM7balSl8Ny1qLOmVo/wXBmd8mLaQUeEb/CkI2SSVSnnp84wa7Rp9ObMgCIIgCF8klUrxf/iYBh0L7yprhY1EkrOXkD0KH+4lCLklkUjov2CcosMQBEEQhO+GRCJh9PKZig7jmyJW98of301PytSpU7/ZoV6CIAiCIAjCt0n0pOQP0ZMiCIIgCIIgCLkmWh75QTRSBEEQBEEQBCGXxHCv/CEaKYIgCIIgCIKQS2IIV/4QjRRBEARBEARByCXRk5I/RCOlkPiRHgT0Iz075EfiaFxE0SEUiLS0NEWHIOQDc91v66GnX6P3zh9n5abVP01QdAgFRkVJWdEh/Ljy8RJuxYoVrFixgufPnwNQokQJJk+eTNOmTQFISEhgxIgRbN++ncTERBo3bszy5csxNzeXlREQEMCAAQM4c+YMOjo6dOvWDU9P+QeJnj17luHDh/PgwQNsbGyYOHEi3bt3z7+KZcN3s7qXIAiCIAiCIBQ0SQ7/ywlra2vmzJnDzZs3uXHjBvXq1aNVq1Y8ePAAgGHDhnHgwAH++ecfzp07x5s3b2jbtq1s/9TUVDw8PEhKSuLSpUts3LiRDRs2MHnyZFkef39/PDw8qFu3Lt7e3gwdOpTevXtz7NixvPkB5ZLoSREEQRAEQRCEXMrPsTAtWsg/VHPWrFmsWLGCK1euYG1tzV9//cW2bduoV68eAOvXr8fV1ZUrV65QtWpVjh8/zsOHDzl58iTm5uaULVuWGTNmMGbMGKZOnYqamhorV67EwcEBLy8vAFxdXblw4QILFiygcePG+Vi7zxM9KYIgCIIgCIKQWzl8UEpiYiLR0dFyr8TExC9+TGpqKtu3byc2NhZ3d3du3rxJcnIyDRo0kOUpXrw4tra2XL58GYDLly9TqlQpueFfjRs3Jjo6WtYbc/nyZbky3ud5X4aiiEaKIAiCIAiCIORSTod7eXp6oq+vL/fy9PTMsvx79+6ho6ODuro6/fv3Z8+ePbi5uREYGIiamhoGBgZy+c3NzQkMDAQgMDBQroHyfvv7bZ/LEx0dTXx8/Nf+eHLth2+kbNiwIcPB/dTUqVMpW7as7H337t1p3bp1vsYlCIIgCIIgFH45feL8uHHjiIqKknuNGzcuy/KLFSuGt7c3V69eZcCAAXTr1o2HDx8WYA0Vo8DnpGzYsIGhQ4cSGRlZ0B+dayNHjmTw4MGKDiNbxvcYSlhwaIb02h4N+OW37oS8DWLXX9t48uAxKcnJlKhQmk79u6FnqJ9hn+TkZOYMm8Ir/wAmLp6FjZNdQVQhT4UEB7N88XKuXLpMQkIC1tbWjJ86EVc3V0WHlq82r9/EyqUr+OnnDgwdOUzR4WTb3dt32LHlb/x8HxMWGsa0uTOpUbumbPvc6Z4cP3xUbp9KVSszZ+GfsvePfR6zZtlKfB/5oqSkRK26tRjw+0A0tbQKrB55YcuGzaxatpKfOv3EkBFDAfhz9h/cuHad0NBQNDW1KFW6JP0H/4ad/bf1u9mxZXsC3wZmSG/dvg3Dxozg9avXLF+0lHve90hOTqKyexV+HzkMI+NvfwWuTes2cu7MOV48f4G6ujqlSpdiwJDCeQzbla5PVbtSWBuYkZiSjG/wczZeP8ib6JDP7tfCrRZNXKthom3Iu4QYLj2/y+abh0hOTQHAzdyRNqXq4mRijZGWPp4n13E14L5cGVXtStGkeDUcja3R09Bm2N55+Ie/ybe65sTm9Zs4f+YsL54HoK6uRsnSpRgw+DdsPzqGr1+9YtnCpdz1vktychJV3KsydNTwQnMOP3vox7l9x3n1LIB3EVF0Hd2fkpXLfnafp/d9ObBxF0Ev32JgYkj9dk2pWLeabLvngPFEhIRn2M+9cW3a9PkZgH9XbcXv7iOiI6JQ11DHzsWRZl3aYlbEIk/rV9ByOhleXV0ddXX1bOdXU1PD2dkZgAoVKnD9+nUWLVpEx44dSUpKIjIyUu6Ge1BQEBYW6T9TCwsLrl27JldeUFCQbNv7/79P+ziPnp4empqaOapbXhIT57NBR0cHHR0dRYeRLeMWTict9cMSqW9evGLhxDlUqFGZxIQEFk6ci7WDLcM9xwOwb/Mulk33YozXVJSU5DvWdq/7GwNjQ175BxRoHfJKdHQ0/Xv2o3zFCngtno+BoSEvA16iq6ur6NDy1aMHD9m3ey/ORZ0VHUqOxcfH41TUmaYtmjFl7KRM81SqWpnRk8bK3quqqsn+HRoSyughw6lTvy5DRg4lNjaW5QuWMnfGHKZ6Ts/3+PPKoweP2L9nH06fHMNixYvRsEkjzC3Su+HXr/6L4YOGsXPfPygrfzvLj67auIbUj76n/J8+Y8SgYdRpUJf4+HhGDhqGU1FnFqxYBMC6lWsZN3wMK9avyvA99a3xvnWbtj+1w7WEK6mpqaxaupJhA4eyddc2hV4MZKaEhRNHHl3ELzQAZSVlfq3QjKlN+jF49x8kpiRluk8tx/J0qejB0gs78An2x0rPlCG1fkaKlPXX9gOgoaqGf/gbTvpdY1z9HpmWo6GixsMgfy74ezOoRsd8q2NueN+6TZuf2uHq9v9juGwlwwcNZfM/6ccwPj6e4QOH4uxSlEUrlwCwdsVqxg4bxcoNawrFOZyUkIilvTWV6lVj05+rvpg/PCiUdZ7LqNqoFj//3pMn93zYtWILuob6FCtbAoDBc8Yh/WiJ9sCXb1gzfRGl3cvL0oo42lKuZmUMTAyJi4njxM6DrJ2xiLHLZqGkrPify7ciLS2NxMREKlSogKqqKqdOnaJdu3YA+Pr6EhAQgLu7OwDu7u7MmjWL4OBgzMzMADhx4gR6enq4ubnJ8hw+fFjuM06cOCErQ1FyfEYcPXqUGjVqYGBggLGxMc2bN+fp06dA+hrLEolErpfE29sbiUTC8+fPOXv2LD169CAqKgqJRIJEImHq1KkARERE0LVrVwwNDdHS0qJp06b4+fnJynk/LOvgwYMUK1YMLS0t2rdvT1xcHBs3bsTe3h5DQ0OGDBlCamqqbL8vlfve3r17KVq0KBoaGjRu3JiXL1/Ktn063OtTaWlpeHp64uDggKamJmXKlGHXrl05/dHmCV19PfSNDGSvu9dvY2pphkspV54+9CMsOITuw/tSxN6GIvY29Bjejxd+/vjeke82vH/jDg9v3addr18UUo+8sHXDFszMzZkwdSJuJUtgVcSKKu5VsLaxVnRo+SYuLo5pE6cyZuJYdPW+vcZYlWpV6dm/NzXqZP0sHVU1NYyMjWWvj+t55eIllJVVGDJqGDZ2thR3c2XomOH8d+Ycr1++KogqfLW4uDimT57G6PFjMjSoW7ZtRdnyZbG0sqRY8WL0HtCX4KAgAt++VVC0uWNgaIixibHsdfnCJYpYF6Fs+XLcv3OPwLeBjJsyASdnJ5ycnRg3dQK+j3y4df2mokP/avOXLsSjpQeOTo4UdSnKhGkTCQoMxPeRj6JDy2D68dWcfnKdl5FBPA9/w+L//sZMxwgn46y/Q4uZ2eMT7M/5Z7cIjonA+81j/nt2m6KmtrI8t175sO3WEa6+uJdlOWef3mSn93Huvnmcp3XKC15LFtCshQcOTo44uxRl/NSJBAUGyY7hvTt3CXwbyPgpE2Xn8IRpk/ApROdw8fIlafJzK0pWKZet/FeOn8fIzIQW3dpjbm1J9aZ1KVW1PP8dPCXLo6Ovi66hvuz16OY9jC1McSzhIstTtWFNHN2KYmRmgrWjLU06tSQyNIKIkLA8r2NBen9Nm91XTowbN47z58/z/Plz7t27x7hx4zh79iydO3dGX1+fXr16MXz4cM6cOcPNmzfp0aMH7u7uVK1aFYBGjRrh5uZGly5duHPnDseOHWPixIkMHDhQ1pvTv39/nj17xujRo/Hx8WH58uXs3LmTYcMUOxIjx42U2NhYhg8fzo0bNzh16hRKSkq0adMmWw84q1atGgsXLkRPT4+3b9/y9u1bRo4cCaTP87hx4wb79+/n8uXLSKVSmjVrRnJysmz/uLg4Fi9ezPbt2zl69Chnz56lTZs2HD58mMOHD7N582ZWrVol10DIbrmzZs1i06ZNXLx4kcjISDp16pTtn4mnpyebNm1i5cqVPHjwgGHDhvHrr79y7ty5bJeRH1KSU7h65iLVGtZGIpGQnJyMBAkqqqqyPCpqqkgkEp489JWlRUdEsXnxWnqM7I+aulpmRX8TLpz/j+JuxZk4ejweDZrR/Zeu7N+9T9Fh5SuvOfNwr1GNSlUqKzqUfHPnljftmraiW4dfWTjXi6ioKNm25KRkVFVV5O5Uvv8Svncn6wuiwmTBH164V3enYpVKn80XHx/P4QOHsLSywuyTCY/fkuTkZE4cOU7Tlh5IJBKSkpKQSCSoqn34nlJTU0NJSYl7d+4qMNL8ERsTA4Cenp6CI/kyLdX0np6YxLgs8/gGP8fJ2IaiJumNEnNdI8pbu3Lr5aMCiVERYmNigQ/HMDkpOctz+K73HYXE+LVePH5G0dLF5dJcyroR8PhZpvlTklO4df4qlepWy/KiPCkhketnLmFkZoK+sWGex1yQlCSSHL1yIjg4mK5du1KsWDHq16/P9evXOXbsGA0bNgRgwYIFNG/enHbt2lGrVi0sLCzYvXu3bH9lZWUOHjyIsrIy7u7u/Prrr3Tt2pXp0z+MLnBwcODQoUOcOHGCMmXK4OXlxdq1axW6/DDkYrjX++6k99atW4epqWm2JvCoqamhr6+PRCKRjYMD8PPzY//+/Vy8eJFq1dLHN27duhUbGxv27t3LTz/9BKT/MVuxYgVOTk4AtG/fns2bNxMUFISOjg5ubm7UrVuXM2fO0LFjxxyVu3TpUqpUqQLAxo0bcXV15dq1a1Su/PmLvcTERGbPns3Jkydl3WKOjo5cuHCBVatWUbt27S/+XPKL95UbxMfEUa1B+l1px+LOqGmos3v9dtp07YAUKbvX7yAtLY2o8EgApFIpGxasolaz+tgXdSQ06PNjjwuzN6/fsHfXHjp27kTXnt149PARC+bNR0VVhWYtPBQdXp47eewEj318Wbt5naJDyTeV3CtTs04tLKwsePP6DX+tWMO4YaNZsmY5ysrKlKtYnhWLlrFjy9+07diehPgE1ixfDUB4WOG/U3fy+Eke+zxm9ca1WebZ889uVixZTnx8PLZ2tixYtgDVj248fGv+O3uemJgYmjZvBkCJUiXQ0NBg1ZIV9BnYD6lUyqqlK0lNTSUstPAfw5xIS0tj0byFlC5TGkdnJ0WH81kSJPSq0oqHQc8IiMw4n+i9889uoauhzWyPQUgkElSUlDny6CK77p7Kcp9vWVpaGou9FlLqo2Po9v9zeOWS5fQd2B+pVMrKJSu+6XP4XWQ0OvryDWkdA10S4hJITkxC9ZMbmg+ue5MQG0+FuhmHC106epbDW/aQlJCIqZU5fSb/jorqtz77IP+elPLXX399druGhgbLli1j2bJlWeaxs7PLMJzrU3Xq1OH27du5ijG/5Pis8PPzY/LkyVy9epXQ0FBZD0pAQABauZyY+ujRI1RUVGSNBABjY2OKFSvGo0cf7r5oaWnJGiiQvjyavb293HwRc3NzgoODc1SuiooKlSp9uGtZvHhxDAwMePTo0RcbKU+ePCEuLk7Won0vKSmJcuUy70ZNTEzMsB52UmJSnvdaXDx+jhIVy2Dw/zsUuvp69Bs3hK3L1nNm/3EkEgmVartj62SP5P93ns8cOE5CfAJNf2qZp7EoQlpaGsXditN/0AAAXIoX49mTZ+z9d+9310gJCgxi4bwFLFy+OEeT8b419RrWl/3b0dkJR2cnurT7mTu3vClfqQL2jg6MmTyOFYuWs3bFGpSVlGjToR2GRkZIJIV7vHNQYBCLvRYyf+nCzx7Dhk0bUbFKJcJCw9i+ZRuTx01m+doV3+xxP7z/EJXdq2BiagKkDwWbNmcG8+fM498du1BSUqJeowa4FHeRfU99L7zmzOPZ02es+OvLcwIUra97W+wMLRl3aMln85W0cKJ96fqsuvwvfiEBWOiZ0LtKayLiotl550QBRVtw5s/1wv/pM5atXSlLMzQ0ZPrcmXh5/smu7f+gpKRE/UYNcCle7Ls7h7Ny/dQlipUrgb6RQYZt5WpWoWgZV95FRHNu/wm2zF/DbzNHyfU8fWvy82GOP7IcN1JatGiBnZ0da9aswcrKirS0NEqWLElSUpKssSCVSmX5Px5W9bU+vVsokUgyTcvO0LO8EvP/rvpDhw5RpEgRuW1ZXTR4enoybdo0ubRug3vTfUjfPIsrLDiUR9736T9+qFy6W/lSzPprPjFR71BSVkJLR5tRnQdiYmEKgM+dhzzz8WNg6+5y+80eOonKdavRY3j/PIsxvxmbmGDv4CCXZu9gz9nTZxQUUf7xfeRDRHgEPTt3l6Wlpqbifcub3Tv/5czlc9/UxOrssipihb6BPq9fvaZ8pQoA1G/ckPqNGxIeFo6mpgZIJOz6eydWRSwVHO3n+fr4EhEeQe8uPWVpqamp3Lntze5/dnPq4hmUlZVlC3nY2NpQolQJmtVrwn9nz9OgccPPlF44Bb4N5Oa1G8z4Y5ZceqWqlfl7704iIyNRVlZGV1eXNo1bYtXISkGR5j2vufO4dOEiy9aswMzcTNHhfFafqm2pZOPG+MPLCIuL+mzeX8o35ezTm5x8fBWAFxFv0VBR47fqP/HPnZNIkX52/2/JgrleXL5wkSWrl2c4hpWrVmHHvl1y53Crxs2xKvJtnsO6BnrEREXLpcVEvkNDSyNDL0pESBh+9x7RdWS/TMvS1NZEU1sTU0tzbIs6MKX7cO5f86Zcjc8PcS3McjrPRMieHDVSwsLC8PX1Zc2aNdSsmb4s6IULF2TbTU3TL3Tfvn2LoWH63Xtvb2+5MtTU1OQmtgO4urqSkpLC1atXZcOy3n/W+5UHciO75aakpHDjxg1Zr4mvry+RkZG4un55mVo3NzfU1dUJCAjI9tCucePGMXz4cLm0Ky/zdrz8pRPn0NXXo1QWSwrq6KdPyPW584B3UdGUqZK++kanfl1o1aW9LF9UeCSLJs2lz9hBOBQr3MMRPlW6TCkCXsivTBYQEICF5be91GFmKlSuyOYdW+TSZk2bhZ29Hb92+/W7bKBA+hLT0VHRGBsbZ9j2fqnPIwcOoaamRoXKFQs6vBypWKkCG//eLJfmOX0WtvZ2dO6a+TGUSqVIpVKSkjJfaamwO3LgEAaGhlStnvkKMu+X1Lx1/SYRERFUr1mjAKPLH1KplPl/eHH+zDmWrl5e6C9a+1RtS1W7Ukw8sozgmIzLy35KXUVV7kYlQJo0/cahRALS76CNIpVKWfjHfM6fPcfiVcs+ewzfn8M3r98gIjyCGrW+zXPYzsURn9vyy0T73X2ErYtjhrzXT19CR0+X4hVKZaNkKUilpObhDW1FyOkSxEL25KiRYmhoiLGxMatXr8bS0pKAgADGjv2wFKizszM2NjZMnTqVWbNm8fjxY7y8vOTKsLe3JyYmhlOnTlGmTBm0tLQoWrQorVq1ok+fPqxatQpdXV3Gjh1LkSJFaNWqVa4rl91yVVVVGTx4MIsXL0ZFRYVBgwZRtWrVLw71AtDV1WXkyJEMGzaMtLQ0atSoQVRUFBcvXkRPT49u3bpl2Cez9bHzcqhXWloal06cx71+zQwXNhdPnMPSpgi6+ro8feTHztVbqN+6CRbW6V+yRmYm8rFqagBgamGOoUnGC8HCrGPnTvTr0ZeN6zZQv2F9Ht5/yP7d+xg9YeyXd/7GaGtrZxjTrqmpgZ6+XqEf6/6x+Lg4Xr96LXsf+OYtTx77oaunh56eLpv+2kjNurUwMjLizes3rF66EivrIlSs+uEO3N5/duNWqiSaWprcvHaD1UtW0Pu3vugU8qWntbS1cXSW/4OvoamJvr4ejs6OvHn1mlMnTlG5amUMDA0IDgph68bNqGuo4169WhalFl5paWkcOXCYJh5NUFGR/1N0eP8h7BzsMDA05MHd+yyZv4iffu6Arb1tFqV9O7zmzOPE0ePMmT8XLS0t2RwFHR1t1DU0FBydvH7u7ajlWJ7Zp9YRn5yIgWb671BcUgJJqZlfVF5/+ZCWJWrzLOwVj0MCsNQz4ZfyTbke8IC0/7dQNFTUsNT78LfGTNcIByMr3iXGERobCYCOmhamOgYYaaU/w8tKP72nIiL+HZHx7/Krytkyf+48Th49wWyvT4+hDuoa6X/bD+0/iL2DPQaGBty/e5/FXgvp8EtHuWepKFJifAJhgR/mnIYHhfLG/yWaOtoYmmZ8lkvVRrW4ePQshzb/S6V61Xlyz4e7l27SY/xAuXxpaWncOHOZCnXcM1x/hAWFcOfiTVzKuKKtp0tUWARn9h5DVU2N4uVL5k9FC4poo+SLHDVSlJSU2L59O0OGDKFkyZIUK1aMxYsXU6dOHSD9Yv/vv/9mwIABlC5dmkqVKjFz5kzZBHVIX+Grf//+dOzYkbCwMKZMmcLUqVNZv349v//+O82bNycpKYlatWpx+PDhr54Qmp1ytbS0GDNmDL/88guvX7+mZs2aX5yo9LEZM2ZgamqKp6cnz549w8DAgPLlyzN+/Pivij23fLwfEB4SRvVGGXt2gl69Ze+GncTGxGBsZkrTji1p0LqpAqLMf64l3PCcN4eVS1ewYc16LK0s+X3EUBo3U+xqFULWfB/5MmLgUNn7FYvSJwI2ataEoaOH8+zJU44fPkrMuxiMTUyoWKUi3fv2Qk3tQyPf5+EjNqxZT0J8PDZ2tgwbO4KGTb/9Y66mrsZd7zv8s30n76LfYWRkRJlyZVixdiWGRt/eyjg3r90gKDCIZi0zzg97+SKANctWER0djYWVBb/26EqHXwrXszJya8+u9FV3BvWVv7gbP2UiHpn8LBSpqWt1AGY1k4918fm/Of3kOgBDanbCTMeIiUeWA7DT+wRSqZTOFZphpKVPdEIM118+YOvND5N2nU1smPlRmb2qtAbgtN81Fv+3HYDKtiUYUutnWZ5RdbsCsP32MbbfPpbHNc2Zvbv2ADCkn/zPZdyUCbL5ji9fBLB62Uqio6KxsLKkS49udOyc/VVD89urpy9YNXWB7P3BjemrolaoU5WOg7pzfMcBbp69zLgVswEwMjeh57iBHNiwiwuHzqBvbED7Ab/KnpHy3pO7PkSGhlOpXsYbJyqqqvg/8uPCoVPEx8aho6+Hg6szv80alWFS/rdG9KTkD4n0035ZQSHO/v8L/0dQ0uLbubMvZF9C8rc55Cin1JS/9VVosi/tO5o/8CUqSt/nkMjM9N45M8/Kmtl0IPcDnyi84ZCV1T9NUHQIBeby87xbonvHkg0ggY6DuudZmXmlVam6ig4hg1Z/5ex5Ivt6LfhyJiHnz0kRBEEQBEHQUtXAQs+Yvfe+v8VIfmRSqZSnDx7TuNO3v8pnQcnPhzn+yH6cW4KCIAiCIOSZuOQEeu+Y/uWMwjdFIpEwfuVsRYfxjRENj/wgGimCIAiCIAiCkEuicyR/iEaKIAiCIAiCIOSSmDifP0QjRRAEQRAEQRBySTRR8odopAiCIAiCIAhCbonxXvlCNFIEQRAEQRAEIZdEEyV/iEZKIeEX8lLRIRSYYqaF44m7BUH1B3qmxvWXDxUdQoEw0/n2HpyYW7aGFooOocD8SM9JmdCgp6JDKDARcdGKDqHA2BtZKjqEH5ZYVjh//DhXUIIgCIIgCIKQx8TE+fwhGimCIAiCIAiCkFuijZIvRCNFEARBEARBEHJJ9KTkD9FIEQRBEARBEIRcEnNS8oeSogNQpDp16jB06NAst9vb27Nw4cIclzt16lTKli2b67gEQRAEQRCEb4Mkh/8J2SN6Uj7j+vXraGtrKzqMDF76+nP98HmCXrwmNvIdrQb/StEKJbK172u/52z3XINJEXO6zRgit+1dRBTndx7F/64vKUnJGJgb06RXeywcrElNSeXC7uP43/UlMjgcdS0N7NycqfVTE3QM9fKjmrkSFxvHX6vWcuHseSIiIijq4sLgEUMo7uYKQJ3KNTPdr//gAXTq8ktBhvpVvG/dZtumrfg88iUsNBTPeXOoVbe2bPvZ02fZu2sPvj4+REdFs37bRlyKuSgwYnnPHvpxfv8JXj8L4F1EFF1G9aNE5bJZ5o+OiOLQxl28fhZAWGAI1ZrWoUWPDhny3b18kxPbDxAREoaxhRlNf21D8fIlZdulUikndhzk+qkLxMfGY1/ckdZ9fsHE0iw/qvlZo7oNIiw4NEN63eaN6DKwJ2cPn+Tq2Yu8ePKchPh4lv7zF1o6mX8fJSclM3PYRF4+e8HUpXOwdbLP5+hz5u7tO+zcsh0/38eEhYYxbe4Mqtf+8LvYoGqdTPfrM6g/HX/tBEB0VDRLvRZz5cIlJEoSatatzcBhg9DU0iqIKuSbzes3sXLpCn76uQNDRw5TdDjZ9u+mHezZvFMuzdLGij/XLQEg6E0g21Zv5PF9H5KTkyldsSzdBvVG39AAgId37jN75JRMy562dC5OxZzzNf6cOLz3IEf2HiQ4MBgAWwdbOnXrTIWqleTySaVSpo2exK2rNxg/azJVa1YDwP/JM3Zt3cGjuw+IjorGzMKcJq08aPlT64Kuyhft3PA3uzZtl0uzsinCwo3LAUhKSmLTinVcOnOB5KRkylQqR+/f+2NgZADAu6hoFs+eT8Cz57yLfoe+gT4Vq1Xh595d0NL+tn9XP0d0pOQP0Uj5DFNT089uT05ORlVVtYCi+ehzE5Mws7WkVK2K7FuyJdv7JcTGc3j1P9i5OREbFZNh298zV2Lj6kS7ET3Q1NUmMigUDW1NAFKSkgl+8YaqLethZmNJQmw8p7cdYM+iTXSZOihP6/c1/pw1F/+nzxg/dSLGpiacOHKcEQOHsWHHZkzNTPn38F65/NcuX+GPmXOpVa+OQuLNrfj4BJxdiuLRsjnjR43LsD0hPp7SZUtTr2F95s70VECEn5ecmIilXREq1q3Glnmrvpg/JTkFbT1d6rZryoWDpzLN88L3KdsXrqPxL61wrVAK7wvX2fzHSgb/MQ4L2yIAnNt3nEtHzvDToG4YmRlzfPsB1s1czLAFU1BVK9jf5UmLZiNNS5O9f/XiJV7jZ1GpZhUAkhKTKFmxLCUrluXf9X9/tqx/1m3FwMiQl89e5GvMuZUQn4BjUSeatGjG1LGTMmzfeehfuffXLl/Da9Yf1KxbS5bmOWUm4WFhzF08j5SUFObNnMv8OV5MmJ6xvG/FowcP2bd7L85FC88FeU5Y29swdu6HhoaycvoyzgnxCcwdOx1bR3vG/zkVgF0b/sZrkidTF3uipKSEi1sxlu5YK1ferg3beXD7Lo4uTgVWh+wwMTWhW7+eWFkXQYqU00dPMmv8NBb+tRRbB3tZvv3/7Mn0LvkTXz8MDAwYPmk0JmamPLr/kGV/LkZJSYnm7VoWYE2yx8belknzpsveKyl/WJ5747K/uHX1BsMnj0ZLR4u/Fq/Ga4onM5bMBUCipESlalXo1LMzevr6BL55y1+LVhGzIIbfJ44o8LoUFNE7kj9+6OFeACkpKQwaNAh9fX1MTEyYNGkSUqkUyDjcSyKRsGLFClq2bIm2tjazZs0CYM6cOZibm6Orq0uvXr1ISEjI15gdSxejRrtG2e49ee/Exr24Vi2DpZNthm3XDp1D19iApr3bY+log4GpEfYlXTAwMwZAXUuDn0b1onjl0hhZmmLlbEv9X1sS9Pw10WGReVGtr5aYkMi5M+foN3gAZcqXxdrGmh59e1LEpgj7/t0LgLGJsdzrwrkLlKtQDqsiVooNPofcq7vT97d+1M6icdXEoyk9+/aiUpVKmW5XtGLlStL451aUrFI2W/mNzIxp2bMDFWpXRUNLM9M8Fw+dwaWsG7VbNcLM2pJGnVpi5WjD5aPngPS7nBcPnaZeu6aUqFQGSztrOg7qTnREFA+ve+dRzbJPz0APfSMD2evO1VuYWZpTrJQbAI3aNMOjQyucin/+Avbu9ds8uHWXDr1/LYiwc6VytSr07N+bGnUy78k0MjaWe106f4GyH/1evvB/wfUr1xg+fhSuJd0oVbY0A0cM4eyJ04SGZOyN+hbExcUxbeJUxkwci66erqLDyRUlJWUMjAxlL1399F51vwc+hASF0HfUIGwc7LBxsKPf6MH4P37KQ+97AKioqsrtq6Ony63L16jVuF6hG99fuXpVKrpXxsqmCEVsrOnSpzsamhr4PPCR5Xnm95S9O3YzZGzG3rCGHo3p8/sASpYtjYWVJXUb1adB04ZcPn+xIKuRbUrK8sdV7//HNS4mltNHTtJtQE9Kli+No4szv40egu8DHx4/9AVAR1eHRq2a4lSsKKYWZpQqX4ZGrZric++BIqskfKN++EbKxo0bUVFR4dq1ayxatIj58+ezdu3aLPNPnTqVNm3acO/ePXr27MnOnTuZOnUqs2fP5saNG1haWrJ8+fICrEH23PvvBlEh4VRrXT/T7U+8H2FhX4T9S7eybPBMNk1ezN2z1z5bZlJ8IkgkqGtp5EfIOZaamkpaaipqampy6Wrq6ty7czdD/vCwcK5cvEyzls0LKkQhH714/Azn0sXl0lzKuPHi8TMAwoNDeRcZjXOpD3k0tDWxcXbgha9/gcb6qZTkFK6cuUCNRnVydIEWFRHJxkVr6D1yIOoaal/e4RsQERbO1YtXaNKimSzt4f0H6OjqUMz1w7GrUKkCEiUJPg8eKSLMr+Y1Zx7uNapRqUplRYeSa0Fv3jKoY2+GdRnAcs+FhAaHAOmjDCQgN9JAVVUNiUSC732fTMu6dfk676JjqNW4XkGEnmupqamcP3WWhIREipdMH0acmJCA1/S59Bs6EENjo2yVExsbW2gbp4Gv39Dvp+4M6tyXxbO8CA1KP67PHj8lNSWFUhXKyPIWsbXGxMyUxw8yP67hoWFc++8KrmVKZrr9eyGRSHL0ErLnhx/uZWNjw4IFC5BIJBQrVox79+6xYMEC+vTpk2n+X375hR49esjed+rUiV69etGrVy8AZs6cycmTJ/O9NyUnIgJD+e+fY3Qa31eu2/ZjUcHheJ++SsUmNajSoi6B/q84vfUASirKlKxRIUP+lKRkzu88gmuV0qhrFo5Gipa2FiVKlWTTuo3YOdhjaGTIqeMneXjvAUWsi2TIf+zQEbS0teSGlAjfrpjIaHT05edH6RjoERMZLdv+Pk0+j65sm6LcunyduJhYqjes/eXM/yeVSvlr/grqeDTAwcWJ0KDgfIyw4Bw/fCz99/KjXpeIsHAMDA3l8imrqKCnp0d4WHhBh/jVTh47wWMfX9ZuXqfoUHLNuXhR+o4chKWNFZFhEezZ8g8zhk1kzpqFOLu6oK6hwfa1m+nQszNSqZQdf20hLS2NyPCITMs7d+QUpSuUwdjUuIBrkj3Pn/oz+rdhJCUloampyfiZk7C1twNg7ZJVFC/pStWa7tkq69G9h1w4fZ7Jc6d/OXMBK+rqwm+jf8fKpggR4eHs2ridyb+Pw2vdYiIjIlBRVUFbR0duH31DAyIjIuXSFs6Yx41LV0lKTKKCeyX6jyw8w8LzgxjulT9++J6UqlWryrVq3d3d8fPzIzU1NdP8FStWlHv/6NEjqlSpIpfm7v75L6rExESio6PlXslJybmsweelpaVxcNV2qrVugJFF1nNspFIp5vZW1GzfGHM7K8rUqUyp2pW4c+ZqhrypKakcWP43UqBBt9b5EndujZ82EaRS2nu0oWGN+uze8S/1GtVHopTxVD984DANGjdEXV1dAZEKwgf/HTtDqYpls30XFuDk/qMkxCXg0aF1/gWmAEcPHqZeowaofae/l0GBQSyct4Aps6Z90989ZSqXp0rtatg62lO6UjlGzppAXEwcV89dRM9AnyGTRnD7yg16t+xM39ZdiIuJxb6oI0qZ3EUOCwnj7s071G6aeU9/YVDE1pqFfy1n3spFNGnlwcLZXgQ8f8HVC5e5e+sOvQf3z1Y5L549Z9b4aXTq3plylTPeAFS0clUq4F6nOnZO9pStVJ5xcyYTGxvL5bM5G5rWfWAv5q5awOgZ4wl6E8im5d9ugzw7JJKcvYTs+eEbKTmVF6t9eXp6oq+vL/c6sml3HkSXUVJ8IkH+rzm1ZT9ePSfg1XMCl/efJuTlW7x6TiDg4VMAtA10MbaSX+HI2MqMd2FRcmnpDZRtRIdF8NOonoWmF+W9ItZFWLRqKUfOHeefA7tYuWE1qSmpWBWxlMt39/YdXr4IwKNVCwVFKuQ1HQM9YqLke0RiIqNlPSfv//9pr0lM5LsMvSsFKTQohIfe96jVJGfDXHzuPOCpz2P6tvyV3h6/MLbnUACmDxnP2nmFb8hpdtzzvsvLFy9p1spDLt3Q2IjICPk78KkpKURHR2OUg4ZdYeD7yIeI8Ah6du5Orco1qFW5Brdv3mbX9n+oVblGljfICjttHW0srC0JehMIQKmKZZm/aTnL/1nHin83MGDs70SEhmNqaZ5h3/PHTqOrp0N598I5hw7Sh65ZWVvhXKwo3fr1xMHZgQP/7OXurTsEvnnLzx7taF23Ga3rpg9TnDNpJuOHjJIrI+D5CyYOG0vjlk3p2O3bWE1SW0cHK2srAl+/xcDQkJTkFGJj5BfeiYqIxOD/q7a9Z2BkSBFbaypWr0Lf4b9xfP8RIr7BXs/sk+TwlX2enp5UqlQJXV1dzMzMaN26Nb6+vnJ56tSpk2FIWf/+8g3ngIAAPDw80NLSwszMjFGjRpGSkiKX5+zZs5QvXx51dXWcnZ3ZsGFDjmLNaz/8cK+rV+V7Cq5cuULRokVlq5R8iaurK1evXqVr165yZXzOuHHjGD58uFzalttHshlxzqhrqtNt5u9yad6nr/Dy4TNaDPoFfdP0P/BFitoRHig/ATUiMBQ9EwPZ+/cNlIigMDqO6Y1mFsuhFgaamppoamryLvod165co//gAXLbD+0/iEvxYji7fJur6ggZ2bk48uSeLzU8PtyN9bvrg52LIwBGZiboGujx5L4vVg42ACTExfPyiT9VG2c+obsgXDhxFj19fUpXLpej/X7p3502XTvK3keGhTN/oif9x/2OYyFavjUnjuw/hEtxF5w+We3KrWQJYt7F8NjHF5fixQC4ffM20jQpxUu4KiLUXKtQuSKbd8ivyjhr2izs7O34tduv2f7bU9gkxMcT/DYIAyP5YXnvJ9M/uH2P6MioDA0RqVTK+WOnqdGgDioq384lSVqalOTkZH7p2YVGzZvIbRvcvT+9BvWlUrWqsrQA/+dMGDqWek0a0KVP9wKONvcS4uMJfBNIzYZ1cHRxQllFhXu37lK1Vvryym8CXhEaHIJLieJZlpGWlr4YUXJy/owYKQzys3Pk3LlzDBw4kEqVKpGSksL48eNp1KgRDx8+lLtx3qdPH6ZP/zCEUOuj5dlTU1Px8PDAwsKCS5cu8fbtW7p27YqqqiqzZ88GwN/fHw8PD/r378/WrVs5deoUvXv3xtLSksaNG+djDbP27Xwj5JOAgACGDx9Ov379uHXrFkuWLMHLyyvb+//+++90796dihUrUr16dbZu3cqDBw9wdHTMch91dfUM3fw5Wf40KSGRyKAw2fuo0AiCX7xBQ0cLPWMDubwSJSVMrS3k0rR0dVBWVZFLr9CoOn/PWsmVA2coVrkUgc9ecefsNRp1bwOkN1D2L9tK8Is3tBnaDWmalNjIdwBo6GiiXEj+uFy7fBUpYGtrw+tXr1mxeDm29rY0/WgSbmxMLOdOnWXA7wMVF+hXiouL49XLV7L3b9684bHvY/T09LCwtCA6KorAwCDZykcBLwIAMDZOX9VM0RLjEwgLDJG9Dw8O443/S7R0tDEwzfzO+Bv/l0D6+R8bHcMb/5coq6hgbpPeS1bdoy6rpszn/IGTFC9fkjsXb/D66Qva9ku/YymRSKjuUY/T/x7GxMIUIzMTju84gJ6hPm6VyuZvhbOQlpbGxRPnqNagVoaL06jwSKIiIgl+EwTAq+cBaGhqYmRmgo6uDsZmJnL5NTTTv1PMLM0xKmTj+uPj4nj96rXs/ds3gTx57Ieunh7mFul31mNjYzl/+hz9hgzIsL+dgx2VqlZm/ux5DB0znJSUFJbMW0SdhvUwMTXJkL8w09bWxtFZfoldTU0N9PT1MqQXZttWbaRc1YqYmJsSERbO7k07UFJSwr1uDQDOHT1NEVtrdA308Hvoy5bl62jStjlWNvLzAx/cvkdIYDB1CvFQr42r1lGhSiVMzU2Jj4vn3Mkz3Pe+y9R5szA0Nsp0mKapuRkWVul/Y188e87EoWMoV7kCrTu0lfUoKCkroW9gUJBV+aJNK9ZTsVql9OMaGs7OjX+jpKREjXq10NLRpl7TBmxavg4dXR20tLVYt3g1Lm7FcHFLv3lw68oNoiIicSpeFA1NDV49f8nmVespVtIVM4uMvWjfi/ycDH/06FG59xs2bMDMzIybN29Sq9aHObVaWlpYWFh8ujsAx48f5+HDh5w8eRJzc3PKli3LjBkzGDNmDFOnTkVNTY2VK1fi4OAguwZ2dXXlwoULLFiwQDRSFKVr167Ex8dTuXJllJWV+f333+nbt2+29+/YsSNPnz5l9OjRJCQk0K5dOwYMGMCxY8fyLeZA/9fsnLtG9v7s34cAKFG9PE37/MTFPSd5cOEmfb3GZLtMS0cbWg3+lf92HePyvtPomxpS75fmuFVLv7sbExHN09vpq+hsmrxYbt8OY/pg65p1o6wgxcbEsmb5KkKCQ9DV06VWvTr0HtBH7g7d6ROnkEql1G/cQIGRfh2fhz4M7vehkbVkfvoxadq8GROnTeK/cxeYPW2mbPuUcenPkujZtxe9+vUu2GAz8epZAGumLpC9P7RxFwDla1elw6BunNh5kJtnLzN2+SxZnsWjZ8v+/fpZAN4XrmNgaiTLY1fMiU6/9+T43/s5tm0fJpamdBndX/aMFIDarRqRlJDE7lXbSIiLw764Ez0mDC7wZ6S89/D2PcKCQ6nZqE6GbWcOn2D/1g/PD5kzahoAPYf3p0bDjPkLM99Hvowc+GFp1pWLlgHQqFljRk9Of87PmROnkUql1G2U+cXquGkTWeK1iFGDhyORKFGzbi0GDR+c/8ELmQoPDWPZ7AXEvHuHrr4exUq6MnWxJ3oG+gC8ffWaneu2EvMuBlNzU1r+0o6m7TIOrz139BRF3YphZWtd0FXItqiISBbO/pPwsAi0tbWwd3Jg6rxZlKtUPlv7Xzz7H1GRUZw9fpqzx0/L0s0szFi7c1N+hZ0r4aGhLJo5j3fR79DT16d4KVdmLf1Ddly7DeyFREmC19S5pCQnU6ZiOXoP/TCsSE1djVOHjrNx+TqSk5MxMTOhco2qtP6lnaKqVCBy2kRJTEwkMTFRLi2zG9iZiYpKH4ZvZCTfON66dStbtmzBwsKCFi1aMGnSJFlvyuXLlylVqhTm5h8aio0bN2bAgAE8ePCAcuXKcfnyZRo0kL8uaty4MUOHDs1h7fKORPr+oSCCQq25nHdzUg6v2YkECU37/JRnZeal5m41FB1CgVFV/nHuA/z3zDvPytq5dAMgocOgbnlWZl4x0zH8cqbvhK1h5nflvkeaqt/uJPac8g9/o+gQCoyeeuEdlpzXElISv5zpO1CmSNZDyxSlx/apOcpv5wPTpk2TS5syZQpTp36+nLS0NFq2bElkZCQXLlyQpa9evRo7OzusrKy4e/cuY8aMoXLlyuzenX5t2bdvX168eCF3Az0uLg5tbW0OHz5M06ZNcXFxoUePHowb9+EB0YcPH8bDw4O4uDg0NTN/Rll++nGuoH4QUqmUlz7+/Dy+n6JDEYRckUqlPHvgR/8Z3+/TiQVBEITvR06XIB43bmyGucnZ6UUZOHAg9+/fl2ugAHIjgEqVKoWlpSX169fn6dOnODl9O8NIPyUaKd8ZiURCvxwM8xKEwkYikTB2xawvZxQEQRCEQiCnw72yO7TrY4MGDeLgwYOcP38ea+vPD498/2iMJ0+e4OTkhIWFBdeuyT+gOygofb7j+3ksFhYWsrSP8+jp6SmkFwXEEsSCIAiCIAiCkGv5+cR5qVTKoEGD2LNnD6dPn8bBweGL+3h7ewNgaZm+sIy7uzv37t0jOPjDQ39PnDiBnp4ebm5usjynTp2SK+fEiRNffPZffhKNFEEQBEEQBEEohAYOHMiWLVvYtm0burq6BAYGEhgYSHx8PABPnz5lxowZ3Lx5k+fPn7N//366du1KrVq1KF26NACNGjXCzc2NLl26cOfOHY4dO8bEiRMZOHCgrEenf//+PHv2jNGjR+Pj48Py5cvZuXMnw4YNyzK2/CYaKYIgCIIgCIKQS/nZk7JixQqioqKoU6cOlpaWsteOHTsAUFNT4+TJkzRq1IjixYszYsQI2rVrx4EDB2RlKCsrc/DgQZSVlXF3d+fXX3+la9eucs9VcXBw4NChQ5w4cYIyZcrg5eXF2rVrFbb8MIg5KYIgCIIgCIKQazmdOJ8TX1qE18bGhnPnzn2xHDs7Ow4fPvzZPHXq1OH27ds5ii8/iUaKIAiCIAiCIORSfj5x/kcmGimFhF/oS0WHUGB+pGeH/Eiq25dWdAgFQknpxxkl+yM9Ret1VIiiQygw228fV3QIBWZU3S6KDqHAGKCr6BB+WPn5xPkfmbhaFARBEARBEIRcEo2U/CEaKYIgCIIgCIKQS/k5J+VH9uOMWxAEQRAEQRAE4ZsgelIEQRAEQRAEIZfEcK/8IRopgiAIgiAIgpBLoomSP0QjRRAEQRAEQRBySfSk5A/RSMljU6dOZe/evXh7e+fbZ9R1rkhJS2fMdAxJTk3hecRbjjy8QEhsZJb7mOsY0ai4O0X0zTDS0mP//XNc8JePMTvlti1dj6ImNuhp6JCYksSLiLccfnSRkJiI/KnsV/hr1VrWrf5LLs3Wzpa/d+9QUET5Z9O6jZw7c44Xz1+grq5OqdKlGDDkN+zs7RQd2ldZt/ov1q9ZJ5dma2fL1l1/A7B/9z5OHDvBY19f4mLjOHz6KLq6388ynN/rcd28PpN6Df4N24/qNajvb3jfkn+oWKu2rRk1fkxBh5tre7ftYtvaTTRr24Lug/oAEBkeweaV67l705uE+HisrIvQ5tcOVK1VTW7fW1eus2vTDl48e46amiquZUoyesaEAq9DvaKVKGVZFDNdI5JTU3gR/oaDD//77He+ua4xTYpXw9rADCMtffbeO8N/z+SP5YSGvTDS0s+w70V/b3bfPZ0hvXfVNriaO7D+6j7uBz79+op9pY4t2xP4NjBDeuv2bRg2ZgS/9xuE9y1vuW0t27ZixLhRBRRh3li3+i82ZPIdvGXX37x985aOrdpnut80zxnUbVCvIEIUvmOikfINcjQuwiX/O7yKDEJJSYkmxavRu2ob5p3dTHJqSqb7qCqrEh4bxd03frQoUSvX5b6ODOb2Kx8i49+hpaZBQ5eq9K7ahjkn1yOl8D1UwcHJkUXLF8veKysrKzCa/ON96zZtf2qHawlXUlNTWbV0JcMGDmXrrm1oamoqOryv4uDowIJli2TvlVU+HMOEhASquFehinsVVi1bqYjw8tX3elxv/79exd3S67V62UqGDRrKln/k69WiTSt69+sje6+hoaGIcHPliY8fJw4exc7RXi59qecCYmNiGTNzIrr6elw4dY4F0/9gzgovHIo6AXDl/CVWeS3l515dKFmuNGmpqQQ8D1BALcDJ2IZL/t4ERAahJJHQzLUGfd3b8efpDSRl8fdGTVmFsNgo7rx5TKuStTPNs/DcNpQ+uvtsoWdC/2rtufP6cYa8tRzL501l8tCqjWtITU2Tvfd/+owRg4ZRp0FdWVrz1i3o2a+37P23dP5+zMHRgfmZfAebmZux58h+ubwH9uzj7y3bqFKtaoHGqGiiJyV/iNW9MpGWlsYff/yBs7Mz6urq2NraMmvWLADGjBmDi4sLWlpaODo6MmnSJJKTkwHYsGED06ZN486dO0gkEiQSCRs2bMjz+P66uo+brx4RFBPO2+hQdnqfwFBLD2t9syz3eRUVxKFHF7jz5jEpaam5LvdqwH38w98QEf+O11EhHPW5jKGmLoZaenlez7ygrKyMsYmx7GVgaKDokPLF/KUL8WjpgaOTI0VdijJh2kSCAgPxfeSj6NC+WoZjaGAg29bhl4782r0LJUqVUFyA+eh7Pa7zlyykWYsP9Ro/NfN6aWioyx17bR1tBUWcMwnx8SyZ7UW/EYPQ1tWR2+b7wIembZrj7OqCuZUF7bp0RFtHm2eP03sHUlNT2bB0DV36dadRy6ZY2RTB2t6WanVqKKIqrLmym+svHxL0Loy30aFsv30MIy09rA3Ms9znZWQQBx+ex/u1b5Z/b2KT4nmXGCd7uZk7EhoTydOwV3L5rPRMqe1cgR23j+Vpvb6WgaGh3Ll5+cIlilgXoWz5crI8Ghoa3+T5+6msvoM/TTc2Mea/s+ep26A+Wlpaig26gEly+J+QPaInJRPjxo1jzZo1LFiwgBo1avD27Vt8fNL/eOrq6rJhwwasrKy4d+8effr0QVdXl9GjR9OxY0fu37/P0aNHOXnyJAD6+hm7s/OahooaAHHJiQVarqqyCpVs3QiLjSIq/l2efnZeeRXwkpaNW6CurkaJUiXpP2gAFpYWig4r38XGxACgp1c4G4858erlK1o3bYmamjolS5Wg36D+mFt8/8cwM9/Tcf1YVvU6ceQ4xw8fw8jYmOq1qtO9d89v4m702kUrKVelIqUrlGX3lp1y24qVKM6ls/9RvmpFtHS0uXz2AslJSZQoWxIA/8dPCQ8NQyJRYnTf34kMj8Te2YFf+/XA1kHxw/w0VNUBiEtKyLMylSVKVLB25dzTm3LpqsoqdK7YjN13T/MuMS7PPi+vJScnc+LIcX7q3FHujvqJoyc4ceQ4RsZGVKtZna69u38T5++nXr18RZv/fweX+Mx3sO8jH/we+zF09AgFRCl8j0Qj5RPv3r1j0aJFLF26lG7dugHg5OREjRrpd7EmTpwoy2tvb8/IkSPZvn07o0ePRlNTEx0dHVRUVLD4zEVUYmIiiYnyF/4pySmoqOb8cEiAliVr4x/+hqB3YTnePzflutuVpplbddRV1AiOCWfNlT2kStMyL0iB3EqWYMLUidja2xEWEsq6NX/xW+8BbN65BW3tb/OOVnakpaWxaN5CSpcpjaOzk6LD+SpuJdwYP2UCNna2hIWGsWHNOgb2+Y1N2zej9R0fw8x8T8f1Y2lpaSz2WkipT+rVsEkjLCwtMDE14anfU1YsWUbAiwBm/zlHgdF+2cXT5/H3e4bnCq9Mtw+bMpqF0/+kZ+vOKCsro6ahzshp47EoYgVA0P/nOfyz8W+6/tYLMwszDuzcy7Rh41m0aSU6eoqbcyUBWpesg3/YawLz8O9NSUtnNFTVuf7ygVx6q5J1eBH+hgeFYA7K5/x39jwxMTE0bd5Mlla/cUMsLC0wNjXhmd9TVi1dQcCLAGb+OVuBkeacWwk3xk2ZgO3/v4PXr1nHoD6/sTGT7+BD+w5i52BPqTKlFBSt4ojhXvlDNFI+8ejRIxITE6lfv36m23fs2MHixYt5+vQpMTExpKSk5PiupqenJ9OmTZNLq9apMdV/aZrjeFuXqou5rjErLv6T431zW+7t1z74hQagq65FbacK/FqhKcsv/pNlt76iuFd3l/3buagzbqVK0M6jDadPnKJF65YKjCx/ec2Zx7Onz1jx1ypFh/LVqn56DEu68VOLdpw+eZrmrVooMLKC9z0d14/Nn5ter+Vr5evVqm1r2b+dnJ0xNjHm9wGDef3qFUWsrQs4yuwJDQ5hw7I1TPxjOmpqapnm2bFuK7ExsUyaNwNdfT2uX7jCgul/MH2RJ7aO9kil6XP72v76k2wy/W+jf6d/xx5cPneRhi2aFFh9PtW2dH0s9IxZ+l/eLj5Sxa4kPsH+RCfEytJKWDjibGLD/LNb8vSz8sPh/Yeo7F4FE1MTWVrLtq1k/3ZydsLYxJhhv/3O61evKWJdRBFh5srH38FORZ1xLelGh0y+gxMTEjl57ARde3VXQJSKJ5oo+UPMSfnE5yajXr58mc6dO9OsWTMOHjzI7du3mTBhAklJSTn6jHHjxhEVFSX3qvJTwxzH2qpkHVzNHVh16V+iEmJyvH9uy01ISSI0NhL/8DdsvnEIMx0jSloU/ju7urq62NjZ8urlqy9n/kZ5zZ3HpQsXWbJqGWbmWc9R+lbp6upiY2vzXR/DzHyvx3X+/+u1eOWX6+VWMn3eUWE+9s8ePyUqIoox/YbRqUFrOjVozcM79zmy5yCdGrQm8PVbju49xIBRQyhVvgz2Tg781O1nnIo5c3TfYQAMjAwBsLazlZWrqqaKuaUFocEhCqkXQJtS9XCzcGTFxX/y9O+NoaYuRU1tufrivly6s4ktxtoGzGw2kD9aDOWPFkMB6Fa5BQOq/5Rnn/+1At8GcvPaDZq3/vxNE9eSbgC8LsTnb3a8/w7+tB5nT58hISGBJh6Ka0Qr0vt5yNl9CdkjelI+UbRoUTQ1NTl16hS9e/eW23bp0iXs7OyYMOHDMpAvXryQy6OmpkZq6ud7FNTV1VFXV5dLy+lQr1Yl61DSwolVl/8lIj46R/vmabkSCUhAWanwr5oVFxfH61evaNLs+/sSlUqlzP/Di/NnzrF09XKs/j905HsTFxfH69evaWzy/R3DzHyvx1UqlbLgDy/Onz3HklXZq5efb/qqT8YmJl/IqTilypdm3l9L5NJW/LEIKxtrWv3cjqT/D/OVKMlfpCgpKSFNSx8y6+jijKqqKm9evqJ4qfQL25SUFEKCgjA1Ny2AWmTUplQ9Slk6s/ziTsLj8u7vDUAl25LEJMbxKOiZXPppv2tcfXFPLm1UvW7su3+Oh4Vo+NeRA4cwMDSU63HIzJPHfgAYmxgXRFj55v13cKNPvoMP7TtI9Vo1MDA0VFBkiiYaHvlBNFI+oaGhwZgxYxg9ejRqampUr16dkJAQHjx4QNGiRQkICGD79u1UqlSJQ4cOsWfPHrn97e3t8ff3x9vbG2tra3R1dTM0SL5W61J1KVekGBuvHyAhJQkd9fRVNBKSE7MccqUsUcJM1wgAFSUl9DV0sNQzISklmbC4qGyVa6SlRxkrFx6HBBCbFI++hg51nSuSnJqCT/DzPK1jXli6YDHVa9XAwtKS0JAQ1q5ai7KSMg2a5LzXqrDzmjOPE0ePM2f+XLS0tAgLTR8vrqOjjfo3OFHzvWULl1KtZnUsLC0IDQll3eq1KCkpU79xAwDCQsMIDwuT3V1/9uQpWlpamFtYoKf/7U8u/16Pq9fceZw8ehxPr8zr9frVK04cPU7V6tXQ19fnqd8TFs9fRNnyZXEu6qzg6LOmqaWVYXK7uoYGunq62DrYkZKSgkURS9bMX0aX/j3R0dPl+sUr3L3pzZhZkwDQ0taiYYsm7NzwN8amppiam7J/Z/rfmaq1C36Fr7al61Heujjrru4nMSUJ3f//XYhPTiIlLfMliJUlSpjrpl+MKyspo6+hi5WeKYmpyYR99NwtCVDJtgQ3Xj4kTSq/hP37Vb8+FRkXnecNpdxKS0vjyIHDNPFogorKh8up169ec/LoCapWr4qevj7P/J6ydMFiypQri1MhPn8zs2zhUqrXrJ7ekxcSyvr/fwc3+P93MKT3bt657c0fC+cpMFLFEk2U/CEaKZmYNGkSKioqTJ48mTdv3mBpaUn//v3p1asXw4YNY9CgQSQmJuLh4cGkSZOYOnWqbN927dqxe/du6tatS2RkJOvXr6d79+55Gl81+9IA9K8m/xClHbePc/PVIwA6lG2IoaYeqy7/C4CehjbDaneW5a3tXIHazhV4GvpKludL5aakpuJgVIQajuXQVFUnJjEO/7DXLL+wk9ik+DytY14IDg5hyvgpREdFYWBoQOmyZVi1YQ2G3+Gdnj27dgMwqO9AufTxUybi0dJDESHlieDgYKZNnEJ0VDQGhgaUKlOaVetXyY7hvt175R72+L7+4yaPp1mLb7fe732vx3Xv/+s1uF/GejVr4YGKiio3rl1n5987SIhPwMzcjDr16tCtVw9FhJtnVFRUGOc5ha1rNjJ34gwS4hOwsLJk4JihlK9aUZbv1/49UFJWZumc+SQlJuHs6sLkebPQ+WQ544JQ3aEsAANrdJBL337rKNdfPgSgU7nGGGrpyeYw6mnoMKJuF1neukUrUrdoRZ6EvpSb51jU1A4jLb0MQ72+FTev3SAoMIhmn/wuqqqocPPaDXZt30lCfAKm5mbUqleHrj27KSjS3AvJ5Dt45fpVcj0mh/cfxNTMjEpVKyswUsUSQ7jyh0QqlRa+J/D9gEYfWPTlTDnQv1o7noa+4sTjq3labl4Y/dEfL+H7kZZW+FZ4yw9KSj/OVL4f6a/Dm2jFzfcoaJtuHMrT8n6r3oEnoS857ns5T8vNC6N+oL83P8rzN8z1Ct+Qz1H7F+Qo/58th+VTJN+XH+ev7Q9EQ0UNIy19zj29pehQBEEQhO+Yhooaxtr6nH1yQ9GhCILiSCQ5ewnZIoZ7fYcSUpKYfXLdlzMKgiAIwldISElixvE1ig5DEBRK6QfpxSpoopEiCIIgCIIgCLkk5qTkDzHcSxAEQRAEQRCEQkX0pAiCIAiCIAhCLomelPwhelIEQRAEQRAEIZckOXzlhKenJ5UqVUJXVxczMzNat26Nr6+vXJ6EhAQGDhyIsbExOjo6tGvXjqCgILk8AQEBeHh4oKWlhZmZGaNGjSIlRf5ZR2fPnqV8+fKoq6vj7OzMhg0bchht3hKNFEEQBEEQBEHIJUkO/8uJc+fOMXDgQK5cucKJEydITk6mUaNGxMbGyvIMGzaMAwcO8M8//3Du3DnevHlD27ZtZdtTU1Px8PAgKSmJS5cusXHjRjZs2MDkyZNlefz9/fHw8KBu3bp4e3szdOhQevfuzbFjx77+B5RL4jkphcT9t36KDqHAWPz/ScQ/grfRoYoOocAYaekrOoQCoaasqugQCsyIAwsVHUKBGVevu6JDKDBPQl8qOoQC425fStEhCHnMRMdI0SFkMOHwshzln9Vs4JczZSEkJAQzMzPOnTtHrVq1iIqKwtTUlG3bttG+ffrDuH18fHB1deXy5ctUrVqVI0eO0Lx5c968eYO5uTkAK1euZMyYMYSEhKCmpsaYMWM4dOgQ9+9/eLhqp06diIyM5OjRo7mO92uInhRBEARBEARByKX8HO71qaioKACMjNIbazdv3iQ5OZkGDRrI8hQvXhxbW1suX05/wOrly5cpVaqUrIEC0LhxY6Kjo3nw4IEsz8dlvM/zvgxFEBPnBUEQBEEQBCGXcjpxPjExkcTERLk0dXV11NXVP7tfWloaQ4cOpXr16pQsWRKAwMBA1NTUMDAwkMtrbm5OYGCgLM/HDZT3299v+1ye6Oho4uPj0dTUzFEd84LoSREEQRAEQRCEXMppT4qnpyf6+vpyL09Pzy9+zsCBA7l//z7bt2/Pp5oULqInRRAEQRAEQRByLWc9KePGjWP48OFyaV/qRRk0aBAHDx7k/PnzWFtby9ItLCxISkoiMjJSrjclKCgICwsLWZ5r167Jlfd+9a+P83y6IlhQUBB6enoK6UUB0ZNCnTp1GDp0qKLDEARBEARBEL5BEokkRy91dXX09PTkXlk1UqRSKYMGDWLPnj2cPn0aBwcHue0VKlRAVVWVU6dOydJ8fX0JCAjA3d0dAHd3d+7du0dwcLAsz4kTJ9DT08PNzU2W5+My3ud5X4YiiJ6U79jurf+wdc1GPNq1pOfgvgCs9FrK3ZveRISGo6GpQbGSrvzatzvWdjYAPH/yjN3bduFz7yHvoqIxtTCjUcumNG/fSpFVyZU9/+xmz67dvH37FgAHR0d69OmJe3XF/cLlhT3b/mHrmk14tGtJj0F9AJg8dBwP79yXy9ewRRP6Dc+4gsi7qGhG9B5CeGgYGw/8jbaOToHEnR13bnuzY8t2/Hx8CQsNY/ofs6hRuyYAKSkprFu5hquXrvD29Vu0dbQpX6kifQb2w8TURFZGdFQ0S7wWcvm/S0iUlKhVtxaDhg9BU0tLUdXKNu9bt9m2eSu+j3wJCw1l9rw51KpTW7Y9PCycFUuWce3KNWLevaNM+bIMGzUCG1sbBUadrrlbTSrauGGpZ0JyajJ+IS/Z4X2cwHdhWe5Tw6Esfd3byqUlpSbTe8cM2fs+VdtQ07GcXJ67b/yYd3YzACbaBrQqWRs3c0f0NXSIiH/Hped32P/gPKlpqXlYwy87svcgR/cdIjgw/W6krb0dHbr9QoWqlQCICAtnw4q/uHPzNvFxcRSxsaZ9l05Uq11DVsa76HesWbSc65euIlFSwr1WdXoP7o+mlmLuZD598Jiz+47z+tkLoiOi6D56ACWrlPvsPk/u+3Jgw04CX77FwMSQBu08qFSvmmz7paNnuXzsHOEh6eeGhY0VDX7ywLX8h5W4oiOiOLhpF353H5EQn4CZlTn12zWjtHuF/KnoV/K+dZttm7bi8//fXc95c6hVt/aXd/wG/Uh1za78fJTjwIED2bZtG/v27UNXV1c2h0RfXx9NTU309fXp1asXw4cPx8jICD09PQYPHoy7uztVq1YFoFGjRri5udGlSxf++OMPAgMDmThxIgMHDpQ1jvr378/SpUsZPXo0PXv25PTp0+zcuZNDhw5lK05HR0euX7+OsbH8Cq6RkZGUL1+eZ8+e5bjuopGSz5KSklBTUyvwz33i85gTB45i52Qvl+7o4kzNBnUwNTMl5t07dmzYxoxRk1n+91qUlZV5+vgJ+ob6/D5hBMZmpvjef8RKr6UoKSnRrG2LAq/H1zA1N6X/4N+wsbVBKpVy5OBhxg4fzfptG3F0clR0eLkiO66O9hm2NfBoTMeenWXvs7ors/zPxdg52RMemvXFo6IkxCfgVNSJpi2aMWXMRPltCQn4+frRpWc3HIs6ExP9jqULFjNx5DhWblwjyzd7ygzCQsP4c8l8UlJS+GOGJ16e85g4Y/KnH1foxMcn4Fy0KB4tmzNh1Di5bVKplHEjx6CiosIcr7loa2uzfevfDP1tCFv+2aaw7vj3ipvZc/LxVfzDX6MkUeKnMg0ZXa8bYw8uISk1Ocv94pISGHNwsey9lIyr4t9548faK3tk75NTPzyAzFLPBAkS1l/bT9C7cKwNzOhZuRXqKmpsv12w6/sbm5rQpV8PrKyLIJVKOXP0JJ4TpjN/7VJsHexYOHsecTGxjJ89BT19Pc6fPMu8qZ7MW7UIRxdnABbM+IPw8HCmec0mJSWFJXMWsHzeYkZMHlOgdXkvKTERK3trKtevzsY/Vnwxf1hQKH/NXoJ7o9r8MrQ3fncf8c+KTegZ6lOsXAkA9I0NafZrW0wszQC4ceYSG+YuZ9ifk7CwtQJg+5J1xMfG02PsQLR1dbh94Rqb569m6NwJFHG0zb8K51J8fALOLum/u+M/+d393vxIdc2u/Hzi/IoV6b93derUkUtfv3493bt3B2DBggUoKSnRrl07EhMTady4McuXL5flVVZW5uDBgwwYMAB3d3e0tbXp1q0b06dPl+VxcHDg0KFDDBs2jEWLFmFtbc3atWtp3LhxtuJ8/vw5qakZbwwlJiby+vXrHNY63Q/VSImNjWXAgAHs3r0bXV1dRo4cKbc9MTGRCRMm8PfffxMZGUnJkiWZO3eu3Ilx4cIFxo0bx40bNzAxMaFNmzZ4enqira0NgL29Pb169cLPz4+9e/fStm3bAn9iZ3xcPAtnzqP/yMH8u1l+clWjFk1k/zazNOfnXl0Y0WswIYHBWBSxpH6zRnL5LawsePzQh6v/Xf7mGik1atWUe99vYH/27NrNg3v3v8lGSnx8PItmedF/5GB2bd6RYbu6hjqGRoafLePYvsPExsTyU9dO3L56M79CzbUq1apSpVrVTLfp6Ojw55L5cmlDRg7ltx79CAoMwtzCnBf+z7l2+SorNqymmGtxAAaPHMq4YaPpP+Q3uR6Xwsi9unuWPX0vA17y4N59Nu3YKjt/R44bTcvGzTl57AQtWrcsyFAzeN+z8d6aK7tZ1m4sDkZW+Ia8yHI/KVKiEmI+W3ZKakqWee69fcK9t09k70NiIzjic5F6RSsVeCOlcnX5c/fXPt05uu8Qvg99sHWww/fBI/oNG4SLazEAOnT9mQP/7OHp4yc4ujjz8nkAt67dYN6qRTgXdwGgz+8DmDFmMj1+642RScE/Y8q1fCm5Ho4vuXz8HEZmJrTs/hMA5taWPPd5wvmDJ2WNlBKVysjt07RzGy4dP8eLx89kjZTnvs9o1+cXbIumD21p0N6D8wdO8urZi0LZSPnc7+735keqa2GQnccZamhosGzZMpYty/p5LXZ2dhw+fPiz5dSpU4fbt2/nKL79+/fL/n3s2DH09T88My01NZVTp05hb2+fozLf+6HmpIwaNYpz586xb98+jh8/ztmzZ7l165Zs+6BBg7h8+TLbt2/n7t27/PTTTzRp0gQ/v/QHLT59+pQmTZrQrl077t69y44dO7hw4QKDBg2S+5x58+ZRpkwZbt++zaRJkwq0jgBrF62gQtVKlKlY9rP5EuITOHPkJGaW5hibZX3xFhcTi45u4RkSlBupqamcPHaChPgESpb+Nh/utXbhSspXrUjpCmUz3f7fybP0aPULw3oMZOuajSQmJMhtf/k8gH82bWfwuGFIlL6PX/3YmFgkEgk6/x+y9vDeA3R0dWQNFIAKlSogUVLi0YOHigozTyQnJwGgrv6hZ1ZJSQk1NVXuet9RVFhZ0lTVACAmKf6z+TRU1JjfajgLWo1gaK2fKaJvmiFPcXN7lrYdzdzmQ+hWqTk6ap/vNdJU1SA28fOfm99SU1P579RZEhISKF4i/XwsVsKVi2fO8y76HWlpafx36ixJSUmULFsaAN8Hj9DW0ZE1UADKVCiHREnC44c+CqlHTr3wfYZLaVe5NJeyJXjx+Gmm+dNS07h94RpJCUnYFftw88i+mCPel24Q9y6WtLT0PMnJyTiVKJav8QtCbuTnE+cLu9atW9O6dWskEgndunWTvW/dujWdOnXixIkTeHl55arsH6YnJSYmhr/++ostW7ZQv359ADZu3ChbISEgIID169cTEBCAlVX6nZyRI0dy9OhR1q9fz+zZs/H09KRz586yifZFixZl8eLF1K5dmxUrVqChkf5HuV69eowYMaLgKwlcOHWOZ4+fMnflgizzHN17iM0r15OQkICVjTVT5s1EVTXzp2j73H/ExTP/MX7OlPwKOV899XtCvx59SUpKQlNTk9nz5uDg6PDlHQuZC6fP4+/3lDkr52e6vWb92piam2FoYsSLp8/ZsnoDr1++ZvT08QAkJyWzcMafdO3fA1NzM4LeBmVazrckKTGR1UtXUq9RfbR10nsyw8PDMTCU701SVlFBT0+X8LBwRYSZZ+zs7TG3sGDl0hWMGj8GTU1NdmzdTnBQMGGFbOieBAm/VmjK4+AXvI4KzjJf4Lsw1l7dy8uIIDTVNGjmWp1JDfsw7tBSIuKjAbj71o8bLx8SEhOBma4RP5VpwIi6XZh+fE2mdxjNdIxo6FKlwHtR3nv+1J+xA4eTlJSEhqYmY2dOwsbeDoBRU8czb5onXVp0QFlZGXUNdcbOnISldfrfnIjwCPQN9eXKU1ZRRldXl4jwiAKvS268i4xCx0BPLk1XX4+EuASSE5NQ/X8j++2LVywZP5eUpGTUNNTpPnoAFjZWsn26jOjHZq/VTO4+DCVlJdTU1eg+eoBsiJggFCb5OdyrsEtLSwPSh4tdv34dE5O8G7HwwzRSnj59SlJSElWqVJGlGRkZUaxY+l2Ze/fukZqaiouLi9x+iYmJsklAd+7c4e7du2zdulW2XSqVkpaWhr+/P66u6XePKlas+NlYMnuIT1JiEmrqXzd3JTQ4hHVL1zB53ozPllWzQR1KVyxLRFgE+3fsxmvaHGYt+TPDPgHPnjN3wgw6dPuZspXKf1VsimJrb8eGvzcSExPLmZOnmTVlBkvXLP+mGiqhwSGsX7qGSX9Oz3J+U8OPhvHZOdpjaGzItBETCXz9Fosilmxds5EidjbUali3oMLOVykpKUybMAUpUoaOVswNgYKmoqLCrD89mTNjNs3qNUZZWZkKlStStZp7pvM4FKlrJQ+K6Jsx88Rfn833JPQlT0JffngfEsCc5oOpV7Qi/949DcDVFx8WhHgVFczLiCC8Wg3D1cyBh0HyEzENNXUZVbcL1wIecPapYoYzFrG1ZsHaZcTGxnL53AUWz/Zi1uI/sLG3Y9tfm4iNiWXa/Nno6etz9cJl/pzqyezFf2Lv9O18J+UFUysLhs+bREJcPHcv32T70vUMmD5S1lA5+vc+4uPi6DdlGNp6Oty/5s1mr9UMnDkKSzvrL5QuCEJB8/f3z/Myf5hGypfExMSgrKzMzZs3UVZWltv2fihJTEwM/fr1Y8iQIRn2t7X9MEb2/fyUrHh6ejJt2jS5tAHDB/HbyIzl5sRT3ydERUQyqs/vsrS0tDQe3n3AkT0H2X5iD8rKymjraKOto42VdRFc3IrRrUUnrl64TM36H1bnePk8gKkjJtKgRRPad+30VXEpkqqqKtY26SsfFXctjs/DR/zz9w5GTxir4Miy79nj9OM6uu9QWVpaWhqP/n9c/z6+O8M5W/T/Y97fN1Lu375LgP8LOtSXX6WtR6vOtPu1Ax17dOZbkZKSwrTxUwh6G4TX8oWyXhRIv/EQGSF/xzk1JYXo6HcYGRsVdKh5rrhrcTZs20RMTAzJyckYGhrSp1svirsV//LOBaRLRQ/KWhVj1sm/ZL0h2ZUqTeNFxFvMdLI+ViGxEUQnxGKuayTXSDHQ1GVc/R74hb5k/bX9We6f31RVVWU9I87FiuLn85gDu/bR5uf2HN5zgMUbVmLrkN6z4uDsyMO79zmy9yADRgzG0MiQqIgoufJSU1J59+7dF+ebFRa6BvrERMof93dR0Whoach6UQBUVFVkvSLWTna8fPKcC4dO0b5/F0IDg7l45AwjF0yVzVGxsrfB/6EfF4+epX2/XwuuQoKQDT9yT8rHTp06xalTpwgODpb1sLy3bt26HJf3wzRSnJycUFVV5erVq7IGRUREBI8fP6Z27dqUK1eO1NRUgoODqVmzZqZllC9fnocPH+Ls7PxVsWT2EJ8n4S+zyJ19pSuUYcG6pXJpS+cuooitNW1+bpfhQhYAKUil6cOB3gvwf8HU4ROo07genXt3/eq4CpO0NClJSVmvNFQYlSpfhvmfHNdlcxdSxNaa1j+3z/S4Pn+SfvFmYJx+YTNy2jiSkpJk25/4+LH8j0XMWDwXCyuLfIw+b71voLx++Yr5yxfJTdADcCtVgph3MTx+5CubnHzrxi2kaWm4lnBTRMj54v2Nk5cBL/F95EOfAX0VHFG6LhU9qGDtiuepdYTGRuZ4f4lEgrW+OXff+mWZx1BTDx11TSLj332Ult5A8Q9/w5orewpVz5I0TUpycjKJCem9559ezCgpKcn+mBcr4UpsTAxPfP1wLlYUgLu3vZGmSXEpRA3Rz7Er5ojPrXtyaX53HmHn4vTZ/dKkUlKS01dtS05M/66SKMn/rCRKSkjTCs+xFYT3RBMFpk2bxvTp06lYsSKWlpZ50nD7YRopOjo69OrVi1GjRmFsbIyZmRkTJkz4H3t3HRZV9gZw/Dt0d7cIIirYgd3d7bqrrrV269rdiYrdrWu7/uyOtRWTsAMBlZDOmd8f7I47K6ggMIDns888j3PvuYf37MDMvPcUKn9PIC5SpAidO3emS5cuLFiwgNKlS/P+/XtOnz6Np6cnTZo04ffff6dSpUoMGDCAnj17oqury6NHjzh58iQ+Pj5fieATTU3Nz5aH1Yj9/mWKtXV0cPjP0rRaWproG+jj4OxEyNsQ/jp7gZLlymBgZEDY+zD2b9+NhqYGZSulDVF79ewFk4aNo1T5MjRr14qIsLS70iqqKhgaGf73R+ZpK5Yux6uKF5ZWVsTFxnLi2Anu3LrNQh9vZYeWKdo6OvI7r//Q1NJC38AAh0KOhAQFc/H0ecpULIe+oT4vn75g4/K1FPMsLh9CYmVrrXB91Me0O512jnZ5ap+U+Lg4gt58Wqow+G0wTwIfo29ggKmZKZNHT+BxQCAzF8xBKk0lPCxtLoa+gQHq6uo4FnKigldF5s+ay9Dfh5OaksrS+d7Uqlcnz6/sBRAXF0fQ6zfy58FBb3kcEIi+oQFWVlacOXUaIyNjLK0sefbkKYsXLKJajepUqFTxC7Xmjq7lmlLJyQPvCztISE7CUCvt9youOUFhyeB/a1GiJk8/vCY0Ohydv+ekmOkace5J2lAtTTUNWpWoyY3Xj/iYEIOFngkdStfnXXS4fEUvY219xtTtTlhsJDvvHMdA81PP2tdWDctuW1ZvoEzFcphZWBAfF8fF0+d44HuPSfOmY+doj7WtDSsWLKVbv57oG+hz7dIV7t68w7jZkwGwd3KgTIVyLJ+3mD7DB5KaksIa7xVUrV1DKSt7ASTGJ/Ah5L38efi7DwQ9f42Ong7G5p/H5FW/BpePnuXw5j1UqF2Fxw8CuPvXTXqMHSgvc2TrPtxKl8DY3ITE+ATuXLzOs4eB9JqQNgrAwtYKMysL9qzcSrOubdHR1+XBdV8e3/Oj+5gBn/3MvCAuLo43//rbffv2LYEBgRgYGGBlnX9uBH2LH6mt30oiKRiL0XyPlStXsnHjRn755Zdsq/OHSVIA5s2bR0xMDM2aNUNfX5/hw4fz8eOnrvUNGzYwffp0hg8fTlBQEGZmZlSqVImmTZsC4Onpyfnz5xk3bhzVqlVDJpNRuHBhOnTooKwmZYqGhjqP7j3k8J5DxEbHYGhsRLGSxZnpMw9DYyMArpy/TFTkRy6cPMuFk2fl15pbWrByV+a76pQpMiKCaROnEvYhLG3FHNfCLPTxpkKlCsoOLVupqatx/5Yv/9t7iMT4BEwtzKhUrTJtfskfv5f/FuAXwLB+n4YrrvBOS/4bNGlI156/8tfFywD0+qW7wnULly+mVNm0DebGTpnAkvnejBgwFBWJCtVq1WDg8O8bSplb/B/5M6jPpw04ly5K2z+kUdPGjJs8gbAPYfgsWkJ4WDimZmY0bNKQbj27Z1RdrqpTJO3valxdxXhWX9nHpee+QNrGjGa6Rsw6vQEAXQ0tuldsgaGWHrFJ8bwID2bayTW8jUr7UiyVSbE3tqKqcyl01LWIiI/mQchT9t47TcrfGzUWtyqMlb4pVvqmLG41UuFnd9meu3vjREZE4j1zPhFh4ejq6uJYuBCT5k2Xz+mbMHcqm1dtYMaYySTEx2Nta8OgMcMp96/3pKETRrHaezkTh45BRUWStpnjoL652o5/e/30JSsnfVqZ59DG3QCUq+lFx4G/cnzXIW6evcK4lbMAMLU0o8fYgRza+AcX/3cGI1Mj2vXtIl9+GCDmYzQ7l24gKuIjWjra2Dja0mvCYIqUTOvtVFVTo8e4gRzZuo/1s3xITEjEzMqCjgO64V42b67O6P/In4G//etvd+Gnv93xU3J/lc+c9CO19VuJnpS0fQErV6789YKZIJF9ywLMQo578IXhDQWNlb5y7ggqQ3DUB2WHkGtMdPJXT1tWaaimvxJeQTT8T+9srW9sne74vXvO/vtnv144l42p3U3ZIeSafy9W8L12LN2ABOg48NdsqzM7eTnlzaRGyDqzL8xZU5bZf994+Vaj6+TNv5fv8fvvv6Onp5etW2/8UD0pgiAIgnJoq2tioW/MgvNblR2KkE1kMhlPHwQwYMYoZYciCEolelIgISGB1atXc+rUKTw9PT/b2mLhwvS3UPgSkaQIgiAIOS4+OZEhB7K2oZeQN0kkEsavmq3sMARB+cTqXty7d49SpUoB8ODBA4VzWZ1EL5IUQRAEQRAEQcgikaLA2bPZP4xXJCmCIAiCIAiCkEUSkabkCJGkCIIgCIIgCEIWic0coVatWl/8/3DmzJlM1ymSFEEQBEEQBEEQsuyf+Sj/SE5OxtfXlwcPHtC1a9cs1SmSFEEQBEEQBEHIItGTAosWLUr3+OTJk4mJydrGuiJJySMs9X6cvUN+JNYGeX+X8+wSn5yo7BByxfijy5UdQq4ZUeNnZYeQa4y19ZUdQq4JeP9S2SHkmvL2xZQdQq5RVVFVdgg/LJGiZOznn3+mQoUKzJ8/P9PXiiRFEARBEARBELJITJzP2JUrV9DS0srStSJJEQRBEARBEISsEsO9aN26tcJzmUxGcHAwN2/ezPIu9CJJEQRBEARBEIQsEikKGBoaKjxXUVHBzc2NqVOnUr9+/SzVKZIUQRAEQRAEQcgiMXEeNmzYkO11iiQlHTVr1qRUqVJ4e3srOxRBEARBEAQhDxMpyie3bt3Cz88PgOLFi1O6dOks1yWSlAJuy4ZNnD97npcvXqKpqYmHpwd9B/bDwclRXmZA73743r6jcF2L1i0ZOfb33A43R+z9Yw/bN28jPCwcF1cXho4aRrESxZUdVrbav3sf+/fsIzg4GIBCzs782qs7XlW8lBxZ5ty7c5c/tu7kcUAgYR/CmDJnGlVqVFMo8/L5S9YuW8XdO3eRpqbiUMiRSbOmYmllKS/z6P5D1q9ci/9DP1RUVChcxIXZ3vPQ1NLM1fY0LFqZ0nZFsdI3JSk1hWdhb9h37zSh0eHfdH05+2L08mqNb1AAKy7vVjjXrHgNqjmXQltdi6dhb9h+6wjvYiLk53U0tOhYuiGeNq7IZDJuv/HnD9/jJKYkZ2sbM2v/9t1sX7uZxq2b8+uAXrwLCaX/Tz3TLTts4u941azK2WOnWD53cbpl1u7dgqGxUQ5G/P3iYuNYt2oNF89dICIiAtciRRg4fDDuxdxJSUlh7YrVXP3rKsFBb9HV06Vs+XL8NqAvZubKXx0w+PEr7p24wodXIcR9jKFen7Y4lXLLsPy5jX/y+Oq9z44bWZvRbtJvAPgeu8zzOwF8DAlDVUMNS2c7KrSqjZFV2iqX0R8i2Tl+Wbr11+nVGuey7tnQsqxLTU1l45r1nDh6gvDwMMzMzGjYtDFdundN9476glnzOLT/IAOGDqJdp/ZKiDjrvuU7BMCDe/dZvXwVjx48REVVBdciRVi4dBGaWZwwnf+INOXdu3d07NiRc+fOYWRkBEBkZCS1atVi586dmJubZ7pOkaQUcHdu36F1uzYULeZOamoqq5etZOiAIWzdvR1tbW15uWatWtDzt17y51ldiSGvOXXiFEsXLmHk2FEUK1GcP7bvYtiAoezYtxNjExNlh5dtzC3N6TOwH/YO9shkMo4ePsLoYaPYsH0TzoWdlR3eN0uIT8DZtTANmzVm8ujPJ9q9fRPEkN8G0qhZY7r0+hVdXR1ePHuBhoaGvMyj+w8ZPWQUnbr+xIDhg1BVVeXp46dIVHL/Q6SIuSPnntzkRfhbVCUqtPSoxeDqnZl8bCVJqV9OFkx1DGlbsi6P37/67FyDol7Udi3PxuuH+BAbSfMSNRhU/ScmH1tJijQVgB4VW2KopYf3+W2oqqjStXwzfi7bhHXXDuREU7/JE/9ATh4+hqOzk/yYqbkZq/dsVih36vAxDu3aT6mKZQGoXKsapSqUVSizbI43yUlJeT5BAZg7YzbPnz5j3OQJmJqbcfLocYb3H8KmXVvR1tEmMCCQLt274lLEleioKJYuXMzY4b+zevM6ZYdOSmISJnaWFKlcklOr9n61fOUO9ajQqpb8uVQqZd/0tTiX+ZRYBAe+oniNspg52SCTSrlx4CxHl2yn7aTfUNfUQNfEgM5zBivU63/pDvdOXMW+eOHsa1wWbd+8jYN7DzBm0jicnAsR4OfP7Gkz0dXTpW2HdgplL5w9z6MHD/NEwpkV3/Id4sG9+wwfOJSff+3CkJHDUFNV5fHjx0hUVJQcfe4Rw71g4MCBREdH8/DhQ9zd0/7eHz16RNeuXRk0aBA7duzIdJ0/zm9QBmJjY+nSpQt6enpYW1uzYMEChfMRERF06dIFY2NjdHR0aNSoEY8fP1Yos2bNGuzt7dHR0aFVq1YsXLhQnkUq28Kl3jRu1gTnws64FnFl7OTxhIaEEODnr1BOS0sTUzNT+UNXT1dJEWevXVt30KxVc5o0b0oh50KMHDsKTS1NDh88rOzQslXV6tWoXLUy9g72ODg68Fv/PmjraPPw/gNlh5YpFSpXpHufnlStWS3d8+tXrqVi5Yr0HtgHVzdXbOxsqVy9CsYmxvIyy719aNW+NZ26dMbJuRD2jg7UrFtLIZHJLUsu7uDKi3sER33gzcd3bLzxJ6a6hjgaW3/xOolEQvdKLfnz4QXe/6t35B91XCtwxO8Sd98GEvTxHRuuH8JIW59Stml3uK30TSlh7cKWm//jRfhbnn54za47xyjnUBxDLb0caevXxMfHs2TmAvoMH4iu/qcYVFVVMTYxVnhcv3QVr5pV5V+CNDU1Fc6rqKjw4M49ajeqp5S2ZEZiQiIXzp6nz8B+lCxTCjt7O37t3QNbe1sO7t2Pnp4eC328qV2vDg6ODhT3KMHgkcMI8A8gNCRE2eFjX8KF8i1qUqh00W8qr6GthY6hnvzx4WUwiXHxFKlcUl6m0aBOFKlcEhMbc0ztLKnRtRkx4VF8eJXWXhUVFYU6dAz1eOEbQKGy7qhr5f7f8X89vPeAKtWr4lW1MtY21tSsU4vyFSvg/9BPodz7d+9ZssCb8VMnoqaWP+8Jf8t3iCULF9O2Yzt+6dYF58LOODg5UqdeXaW85yqLJJOPgujYsWMsX75cnqAAFCtWjGXLlnH06NEs1fnDJykjR47k/PnzHDx4kBMnTnDu3Dlu374tP9+tWzdu3rzJoUOHuHLlCjKZjMaNG5OcnHYX9PLly/Tp04fBgwfj6+tLvXr1mDFjhrKa81Wxf+/6aWBgoHD85NETNKnTkF/ad2alz3ISEhKUEV62Sk5OJsA/gPIVysuPqaioUK5CeR7ksy/vmZGamsqp4ydJiE+ghKeHssPJNlKplGt/XcXOwZ7fB4+kbaOWDOjel8vnL8rLRIRH4P/QDyNjYwb16k/bRq0Y1ncw930/H36iDNrqacPNYpPiv1iuabFqRCfEcvm572fnzHSNMNTWxy/0ufxYQnIiz8OCcDa1A8DZzI7YpHheRgTLy/iFPkcmk1HI1DYbWpJ56xavpEzFcniWLfXFck8Dn/DiyTPqfCEBuXDiDJqamlSqUSWbo8x+qamppKamfvaFTVNTk/t30/+9jI2JQSKRoKeX/zeYDLjsi23RQuibGmZYJik+bSNYTZ30e/Dfvwwm7HUoRauUyokQM624Zwlu37zF65dpvZxPAh9z/+49KlauJC8jlUqZMWkaHX/uRKF81Jv9Nf/9DhERHs6jBw8xNjahT/deNKvfmAG9+3LX964yw8x1EokkU4+CSCqVoq6u/tlxdXV1pFJplur8oZOUmJgY1q1bx/z586lTpw4eHh5s2rSJlJQUAB4/fsyhQ4dYu3Yt1apVo2TJkmzbto2goCAOHDgAwNKlS2nUqBEjRoygSJEi9OvXj0aNGimxVRmTSqUsWeCNR0lPnF0+dZnXa1ifCdMmsWSVD7/82oXjR44xdcJk5QWaTSIjI0lNTcXEVHFYl4mpCeEfwpQUVc55+vgJdavWppZXDebNnMvM+bMp5FxI2WFlm8iICOLj4tm5eTvlK1Vg9uJ5VKlZlcmjJ3L3ti8AwW/fArB57UYat2jKLO+5uLi5MmrgcN68eqPE6NPunrUvVZ8n71/zNup9huUKm9lTpVApttz8X7rnDf7uCYlKiFU4HpUYi6FWWg+ooZYe0QlxCuelMhmxSfEYaOV+L+nlMxd49vgpP/Xq+tWyZ46cwNbRHrcSGc87OH30JFXrVEdTM3fnGGWFjq4OxT1KsHn9Rj68/0Bqaionjh7n4f2HhKXzPpSYmMgqnxXUqV833/dox0ZG8/rhU9y+kFzIpDKu7D6JZWE7TGwt0i0TcNkXIyszLAvb5VCkmdO568/UrleHX9p3prZXDXr+0p22HdtTr+GnZVa3b96Gqpoqbf4z/Cs/S+87RFBQ2nvu+jVradayBQuWLKKImxtD+g7k9avXygw3V0ky+V9mXbhwgWbNmmFjY4NEIpF/B/1Ht27dPkuEGjZsqFAmPDyczp07Y2BggJGRET169CDm76TzH/fu3aNatWpoaWlhb2/P3LlzvznG2rVrM3jwYN7+/TkMEBQUxNChQ6lTp06m2ww/+JyUp0+fkpSURMWKFeXHTExMcHNLGzLh5+eHmpqawnlTU1Pc3NzkKxcEBATQqlUrhXorVKjA4cMZDydKTEwkMTFR8VhSYo5/4C6cM59nT5+xfO0qheMtWreU/7uwiwumZqYM7juQoDdvsLXLGx8Kwtc5ODmycccmYmJiOXvqDDMmTcNnzfICk6hIpTIAvKpXoW2ntA9+lyKuPLr3kMP7D1GyTClkf5dp2qoZDZum3SxwdXPlzo3bHDt8hJ79eisneKBTmUbYGJoz78ymDMtoqmnQvUILttz831d7W/KLD+/es2HZGibMnfrV4R+JiYlcOn2Btr90yLBMwEN/gl6+ZuCYYdkdao4ZN2UCc6bNok2TlqiqquLqVoQ69esS4B+gUC4lJYXJYycik8Gw30coKdrs8/jqPTS0tb440f7yzmNEBL2n2cgu6Z5PSUrm6Y2HlG5cNafCzLSzp85w8thJJkybhJNzIZ4EPsZn4ZK/J9A3IsDPn707d7Nmy/oCddc8ve8Qsr/vkLdo3ZImzZsCUKSoG7du3OR/h/6kz4B+Sok1t+X0yxwbG0vJkiXp3r37Z5sm/qNhw4YKywD/9ztl586dCQ4O5uTJkyQnJ/Prr7/Su3dvtm/fDkBUVBT169enbt26rFy5kvv379O9e3eMjIzo3fvrn50+Pj40b94cJycn7O3tAXj9+jUlSpRg69atWWr3D52kKMusWbOYMmWKwrERo0cxKgdX01o4Zz5/XbqMz+oVWFimf7fqH/+sfPXmdf5OUoyMjFBVVSU8THElpfCwcEzMTJUUVc5RV1fH7u83hqLuRfF/5MfuHbsYNW60kiPLHoZGhqiqquL4n1VlHJwceXD3PoD8dU2vzLuQd7kTaDo6lm6Ah40r889uJjI+OsNy5nrGmOkZ0b/qpy/p/3zJWd52LBOPriAq4e/hFlq68n8DGGjq8joyFICPCTHoa+ko1K0ikaCrof1ZD0xOexb4hI8RkYz6bYj8mFQqxe/eQ44dOMz24/tQVVUF4Or5yyQmJlK9fu0M6zt95AROLs4ULuKS06FnG1s7W5as8iE+Pp642FhMzcyYPHYiNrY28jIpKSlMGjOB0OAQFi1fku97UWQyGQGX7+Ja0QNVNdV0y1zecYxX9x/TdHgX9IwN0i3z/LY/KUnJuFbKO0NXVyxZTueunalTvy4AhV0KExocwrZNW2jYtBH3fO8RERFB++Zt5NekpqayfLEPe3b+wa6De5QVepZl9B3C1CxtQQCnQoo3wxwLOREaEpqrMSpXzmYpjRo1+uooHU1NTaysrNI95+fnx7Fjx7hx4wblypUD0kYCNW7cmPnz52NjY8O2bdtISkpi/fr1aGhoULx4cXx9fVm4cOE3JSn29vbcvn2bU6dO4e+fNmfJ3d2dunXrZrK1n/zQSUrhwoVRV1fn2rVrODg4AGkT5QMDA6lRowbu7mnLQ167do3KlSsDEBYWRkBAAMWKFQPAzc2NGzduKNT73+f/NWbMGIYNU7wLGJWUM18cZDIZi+Yu4MK58yxdtVzhQzEjjwMCgU9vPvmVuro6bkXduHnjJtVr1QDSvhzdunGTNu3bKjm6nCeVykhKUu5ys9lJXV0dt2JFefOfIQRvXr/Gwjpt+WEraytMzc0+G2bw5vVrKnhVRBk6lm5AKVs3Fp7bQlhs5BfLhkR9YMqx//R0etRES02DXXdOEBH/kVSplI/x0RS1cOLN30mJlpoGhUxtOf/0FgDPPrxBV0MbB2MrXkWkTUZ2syiERCLheVhQ9jfyCzzKlGTBOh+FY8vnemNjb0fLTm3lCQrAmaMnKVe5AoZG6c9fiI+P58q5S/zUM/277nmdtrY22traREdFcePqdX4b2Bf4lKAEvX6D94olGbY/PwkOfEXU+wjcqpT87JxMJuOvncd54RtA02G/YGBmlGE9AZd9cfQsgrZ+3knaEhMSkEgUR8urqKrKx93Xb9SAshXKKZwfOWgY9Rs1oFGzJrkWZ3b42ncIaxtrzMzNePXypcLx1y9fUSmfLYH/PfJCj9m5c+ewsLDA2NiY2rVrM336dExN027cXblyBSMjI3mCAlC3bl1UVFS4du0arVq14sqVK1SvXl2hx7tBgwbMmTOHiIgIjI2NP/uZAGfOnGHAgAFcvXoVAwMD6tWrR716aXMKP378SPHixVm5ciXVqqW/IM6X/NBJip6eHj169GDkyJGYmppiYWHBuHHjUPl72TxXV1datGhBr169WLVqFfr6+owePRpbW1tatGgBpC25Vr16dRYuXEizZs04c+YMR48e/eIvrKam5mfdcInRKTnSxgVz5nPq2AlmLZiDjo6OfAy0np4umlpaBL15w8ljJ6hUpTKGhoY8ffyEJQsXU6pMKVxc88+dyox0+LkTMyZNo6h70b+XIN5JQnyCvFu6oFixdDleVbywtLIiLjaWE8dOcOfWbRb6eCs7tEyJj4sj6M2nL9HBb0N4EvgYfQMDLK0sad+5I9PHT8GjVElKlS3FjavXuXLpLxYs8wbSPijad+7ApjUbKexamMKuLpw4cpzXL18xaeaUDH5qzulUpiEVHEqw/PIfJKQkyeeDxCcnkpz6+d98ijT1s/kqcUlpi1j8+/jpx9dpXKwq72LC+RAbSYsSNYmMj8Y3KG34UEh0GA+Cn/BLuSZsu3UUVYkKnco04Oarh3xMUByDnNO0dXRwKKTYs6WppYW+gYHC8eCgt/jde8iYWZMyrOuvsxdJTU2ler2aORVujrh+5RoyZDg4OPDmTRArlyzDwcmBxs2akJKSwsTR4wn0D2T2wjmkpkrl79MGhgbpTkTNTckJSUS9/9QbHf0hkrDXIWjqaqNnknEyFfCXLxaFbNKdZ3J5xzGe3nhI/b7tUNfSIO5j2u+khrYmahqf2vvxXTjBT17RcEDHbGzR96tcrQpbN27G0soSJ+dCPA4I5I/tu2jcrDGQ1uv730RTTU0NE1NTHBwdlBFyln3tO4REIuGnXzqzbtVaXFxdcXVz5ejhI7x8+ZLpc2cqOfrck9kUJb1h/+l9N/xWDRs2pHXr1hQqVIinT58yduxYGjVqxJUrV1BVVSUkJAQLC8W/RTU1NUxMTAj5exXBkJAQCv2nR8zS0lJ+LqMkxdvbm169en22IBOAoaEhv/32GwsXLhRJSlbMmzePmJgYmjVrhr6+PsOHD+fjx4/y8xs2bGDw4ME0bdqUpKQkqlevzpEjR+QfHFWqVGHlypVMmTKF8ePH06BBA4YOHYqPj09GPzJXHdizD4CBv/VXOD520ngaN2uCmpo6N6/f4I8du0iIT8DC0oKatWvStcevygg329WtX5fIiAjWrlxLeFgYrkVcWbB00WeT6fO7yIgIpk2cStiHMHT19HBxLcxCH28qVKqg7NAyJcAvgBH9h8qfr1yctqFb/cYNGDVxDFVrVmPw78PYuWkbyxYtwd7BnkmzpuJRylN+TZuO7UhKSmKF9zKio6Jxdi3MnMXzsbHL/VWtarqk3bUaUUvxzv/G64e48iJtZaeu5ZthqmvEwnNbvrne4/5X0FDV4OeyTdDR0OLJh9csubBDvkcKwLprB+hUuiFDa3RO28wxyJ9dd45nQ6tyxtmjpzAxN6VkuYx3Jz5z5CQVq3mhq6ecZZSzKiYmhjXLV/H+3Xv0DQyoUbsGPfv2Rk1NjeC3wVy+cAmAHj8rvu96r1hC6bJllBGy3PuXwfxv0afx5Ff3nALAtZInNbs149afFwi8co9OMwfIyyTFJ/D8tj+V29f/rD4AvwtpK2geXqg4Tr1Gl6YKSxUH/nUXXSMD7Nzz1upYg0cMZd2qNSyau4CIiAjMzMxo3qo5XXsWjM/Nf/vadwiA9j91JDEpiaWLFhP1MQqXIi4sWrYkXw8Xz6zMToZPb9j/pEmTmDx5cpZ+fseOnxJ5Dw8PPD09KVy4MOfOncvypPVvdffuXebMmZPh+fr16zN//vws1S2RyWSyrAYmpK9Xr174+/tz8eLFrxf+2/tv3IG6IMgDvaJCDohPTvx6oQJg+sns3WBveM1fCHj/ksMPL2Rrvdmhf5X8tTv297DQS/8uYUG09XbW9ixIz7mNhwAJNbs1y7Y6s9PPZfLmaps5QVUl/bk/BY25ft67ybj6ytc3Ov23rmWaZrknRSKRsH//flq2bPnFcubm5kyfPp3ffvuN9evXM3z4cCIiPu27lZKSgpaWFrt376ZVq1Z06dKFqKgohZXDzp49S+3atQkPD8+wJ0VLS4sHDx7g4pL+6JsnT57g4eFBfHzmF4P5oZcgzi7z58/n7t27PHnyhKVLl7Jp0ya6dv36UpuCIPzYtNQ1Mdcz5mTAFWWHIgiZJpPJCA58SbnmNZQdiiAoVWaXINbU1MTAwEDhkZ0rvL5584awsDCsrdM2Dvby8iIyMpJbt27Jy5w5cwapVCpfwdbLy4sLFy7I9wEEOHnyJG5ubhkmKAC2trY8eJDx3nP37t2Tx5FZIknJBtevX6devXp4eHiwcuVKlixZQs+ePZUdliAIeVxCciKjDy8hMaXgLHAg/DgkEgmdZg5EzyT9lbkE4UeR05s5xsTE4Ovri6+vLwDPnz/H19eXV69eERMTw8iRI7l69SovXrzg9OnTtGjRAhcXFxo0aACkrbLVsGFDevXqxfXr17l8+TIDBgygY8eO2NikLYbw008/oaGhQY8ePXj48CG7du1i8eLFny309F+NGzdmwoQJ6W4CHh8fz6RJk2jaNGvzgH/4OSnZ4Y8//lB2CIIgCIIgCIIS5PQo9ps3b1KrVi35838Sh65du7JixQru3bvHpk2biIyMxMbGhvr16zNt2jSF3plt27YxYMAA6tSpg4qKCm3atGHJkiXy84aGhpw4cYL+/ftTtmxZzMzMmDhx4leXHx4/fjz79u2jSJEiDBgwQL7XoL+/P8uWLSM1NZVx48Zlqd0iSREEQRAEQRCErMrhybY1a9bkS1PIjx//+qIoJiYm8o0bM+Lp6Zmp+dSQtgLYX3/9Rd++fRkzZow8TolEQoMGDVi2bJl8lbDMEkmKIAiCIAiCIGTRj74ekKOjI0eOHCEiIoInT54gk8lwdXX94lyWbyGSFEEQBEEQBEHIoswuQVxQGRsbU758+WyrTyQpgiAIgiAIgpBFeWHH+YJIJCl5REj0B2WHkGt+3j5R2SHkmk0dJys7hFxjZ/T5ztIF0aBqeWv365xk/gPtHSKVSpUdQq7pVj5rK+0Iedu/N3MVhIJAJCmCIAiCIAiCkEWiJyVniCRFEARBEARBELJIpCg5QyQpgiAIgiAIgpBFYuJ8zhBJiiAIgiAIgiBklchRcoRIUgRBEARBEAQhi1QkKsoOoUD6Yf+vymQyevfujYmJCRKJBF9fX2WHJAiCIAiCIOQzkkw+hG/zw/akHDt2jI0bN3Lu3DmcnZ0xMzNTdkjZbv/23Wxbs5kmbZrz64BeAEwcMoZHdx8olKvXrCG/DeuvcOzssVP8ufsgwa+D0NbVwatGFXoN6Ztrsf+jXcl6tC9VFxsDcwCehr1h1ZV9XH7u+9VrG7p5MafZYM48vsHQgwsUzvWr0o7WHrXR19TF920AM06u41VkiPx8UQsnhlT/ieJWhZHKpJwKvM78c5uJT07M1vZ9i92bdrBn806FYzb2tizauByAU4ePc/nMBZ4/fkp8XDzrD25DV09Pofy+bX9w5+pNXjx9jpqaOhsObc+1+LPb5vWbOH/2PC9fvERTUxMPTw/6DuqHo5OjskPLsn3b/mDrmk00adOCHgN7Ex0Vzc4NW7l78w4fQt9jYGRIhaqV6NT9F3T1dOXXta7Z5LO6hk0YRdU6NXIz/EzbsmEzF86e4+WLV2hqalDC04O+A/vh8PdrGPw2mPbN26R77dTZ06lVt3Zuhvvd4mLjWLdqDRfPXSAiIgLXIkUYOHww7sXcgbSbZutXr+PwgT+JiYnGw9ODYb+PwM7BXsmRfx/f23fYvnkb/n4BhH34wKz5s6leK2//bmZVamoq61at5cTR44SFhWFmZk7jZo3p1vPXfL3yU4fmbQkJDvnseMu2rRj6+3AO7TvI6eMnCQwIJC42jsNnjqKvr6+ESJUt/77GedkPm6Q8ffoUa2trKleunO75pKQkNDQ0cjmq7PPEP5CTfx7D0dnps3N1mzSgQ/fO8ueampoK5//84wB/7t7PL7/9iqu7GwkJCbwPeZfTIafrXXQYiy/s4FVECBKJhGbFq7O45Qg6bB7N07A3GV5nY2DOsJo/c+u132fnfq3QnE6lGzLh6HKCPr6nf9X2rGg7hlYbRpCUmoy5rjGr243neMAVZp3egJ6mNiNrdWVao36MOLQoJ5ubITsnBybMmyp/rqKqKv93YmIiJcuXpmT50uxYuyXd61OSU6hUowquxYpy9uipHI83J/nevkPrdm1wL+5Oamoqq3xWMrT/ELbt2Y62trayw8u0x/6BnPjzGI6FC8mPhX8IIyIsnK59e2Dv6MD70HesXOhD+IdwRk0dq3D9gN+HULpCWfnz/yaoeZHv7Tu0atcG92J/v4bLVjJswBC27E57DS0sLThw7E+Faw7tP8iOLdupWLmSkqLOurkzZvP86TPGTZ6AqbkZJ48eZ3j/IWzatRVzC3N2bN7Gvl17GDNpHNY21qxbtZYRg4axadfWz96f85P4+ARcirjSpHlTxo4co+xwctTWTVs4sGc/46dMoFBhZ/wf+TFjygz09PRo16m9ssPLslWb1pCa+mkPoedPnzF8wFBq1q0FQGJCIhW8KlLBqyKrl61SVphKl4/z0Dzth0xSunXrxqZNm4C0ta0dHR1xcnKiRIkSqKmpsXXrVjw8PDh79iznz59n5MiR3L17FxMTE7p27cr06dNRU0v7XxcdHU2fPn04cOAABgYGjBo1ioMHD1KqVCm8vb2V0r74+HgWz1hAnxED2bNl12fnNbU0MTZJf5O2mOgYdqzfwugZE/EsW1J+3OlfX6By0/lntxWe+1zaRfuS9fC0ds0wSVGRSJjZZAArLu+htF1R9DV1FM53LtOINVf3c+7pLQDGH1nGmX6rqO1SjmMBV6heuAwp0hRmnlqPDBkA00+uZW+3edgbWfI6MjQHWvplqqqqGGXwmjVp0xyAh773M7y+fbefADh37HT2B5fLFvp4KzwfN2U8Tes2JsDPn1JlSisnqCyKj4vHe/o8+v7nb9XR2YlRU8fJn1vZWtO5Zxe8Z8wnNSUVVbVPSaqunh7Gpia5Gvf3WrBUMdkfO3k8zes1kb+GqqqqmJqZKpS5ePY8tevWRkdH8e85r0tMSOTC2fPMmDeLkmVKAfBr7x78dekyB/fup0efXuzeuZtfunehao1qQNr/j1YNm3Pp/EXq1K+rxOi/j1cVL7yqeCk7jFzx4O59qtWsRuVqVQCwtrHm5PGTPHr4SMmRfR8jY8XPne2btmJrZyt/r233U1oCdufW7c+u/ZGI1b1yxg85J2Xx4sVMnToVOzs7goODuXHjBgCbNm1CQ0ODy5cvs3LlSoKCgmjcuDHly5fn7t27rFixgnXr1jF9+nR5XcOGDePy5cscOnSIkydPcvHiRW7fVu4f61rvlZSpVA7PsqXSPX/x1Dl+bfETQ3/tz7Y1m0hMSJCfu3fzDjKpjPAPYQzu2pfe7bqxYPJsPrx7n0vRZ0xFIqGhmxfa6prcDQ7MsNxvXm2IiPvI/gdnPztna2iBuZ4x115++kIfkxTP/eAneNoUAUBDVY3k1FR5ggKQmJIEQGnbotnVnEwJCXpLn/bdGPhzb5bMXMCHUOW/HnlFbEwMAAYGBkqOJPPWLF5B2UrlKVnu68lVbEwcOjo6CgnKP3V0bd6JUX2GcvrICWQyWQY15F2xMbFAxq9hgJ8/jwMf06RFs9wMK1ukpqaSmpr6Wc+8pqYm9+/eI/jtW8LDwihbobz8nJ6eHu7Fi/Hw/oP/VifkUSVKenDz+k1evXwFwOPAx9zzvUulygUnSUtOTubk0RM0at4kXw9hywkSiSRTD+Hb/JA9KYaGhujr66OqqoqVlZX8uKurK3PnzpU/HzduHPb29vj4+CCRSChatChv377l999/Z+LEicTGxrJp0ya2b99OnTp1ANiwYQM2NjZf/PmJiYkkJirObUhKTEJD8/uHl136e27C7JUL0z1frU4NzC0tMDYz4eXTF2xdvZGg10HyISShwSHIZDL2bfuD7gN6o6Onw451W5k6YgIL1i1FXV39u2PMLBcze7b8NA0NNXXikhIYenABz8KC0i1b2taNVh61aL95dLrnzXSNAAiL+6hwPCzuo/zc9VcPGV7zF7qWb8q2W0fRVtdicPWfFK7PTS5Fi9B31GBs7GyJCA9n7+adTBoyhvnrlqCdz+4qZzepVMri+d54lvTE2aWwssPJlEunz/Ms8AlzV3p/tWxU5Ed2b9lBvWYNFY537P4zHqVLoqmlie+N26xetJyE+AR571p+IJVKWbLAG48vvIaHD/6JYyEnPEp65HJ0309HV4fiHiXYvH4jjoWcMDYx5vSJUzy8/xBbO1vCw8IBMPlPT6mxibH8nJD3/dKtC3ExcfzUpiMqKipIpVJ69/uNBo0bKDu0bHPx3AViYmJo1LSxskMRfhA/ZJKSkbJlyyo89/Pzw8vLSyHrrVKlCjExMbx584aIiAiSk5OpUKGC/LyhoSFubm5f/DmzZs1iypQpCsf6DBtAv+EDvyv+D+/es8FnDRPmTc1wPs2/v+Q4OjthbGrMlOHjCQkKxsrWGqlURkpKCt0H9qZU+TIADJkwkl5tuvDwzn1KVSjzXTFmxYvwt7Tf/Dt6mjrUK1KRaY360WPXlM8SFR11LWY07s+UE2uIjI/O8s97GvaGCUdXMKLWLwyq1gmpVMr2O8f4EBup0LuSW0pX/PR76VjYCVf3IvT/qRdXzl2mduN6uR5PXrJg9nyePX3GinX5ayz0h3fvWeezmknzp3/15kRcbBwzxkzG3tGBDt06K5xr36WT/N/OroVJTEjgwM69+SpJWThnAc+fPmPZ2pXpnk9MSOTUsZN07dktdwPLRuOmTGDOtFm0adISVVVVXN2KUKd+XQL8A5QdmpBNzpw8zYljx5k8YwqFnAvxOPAxixd4Y2ZuRuNmny9wkR8dOfQ/KnhVxMy84C009L3EcK+cIZKUf9HV1f16oWwwZswYhg0bpnDscdir7673WeATPkZEMqr3EPkxqVSK372HHN1/mB0n9qGqqjhUxNU9LaH6J0n5Z2y7vZODvIyhkSH6hga8V9KQrxRpqnweiF/oc4pbFaZzmUZMO7lWoZy9kSW2hhYsaTVSfkzl7wTz1rBttFg3jA+xkQCY6hjK//3P84B3L+XPj/pf5qj/ZUx0DIlPThsO90vZJrxRwnyU/9LV08PazoaQt8HKDkWpFsyZz1+XLrNszQosLC2UHU6mPA1I+1sd0WuQ/JhUKuXRvQcc3f8nu04eQFVVlfi4OKaNmoC2tja/TxsvnwuXEVd3N3Zv3klyUjLqGrnf65lZi+Ys4MqlyyxdvTzD1/Ds6TMkJCTQoEmjXI4u+9ja2bJklQ/x8fHExcZiambG5LETsbG1weTv99zw8AhM/7XKZER4BC5FXJQVspBJyxb78HO3X6jbIO3GUWFXF0KCQ9iyYXOBSFJCgkO4df0m0+bOUHYoeZIYwZUzRJLyBe7u7uzduxeZTCbvTbl8+TL6+vrY2dlhbGyMuro6N27cwMEh7Uv9x48fCQwMpHr16hnWq6mp+dmKLRox3z/Uy6NMSRau91E4tmyON7YOdrTs1PazBAXgxZNnABiZpg01KFoibUnMoFdBmP59tyQ6Kproj1GYW5p/d4zZQUUiQV318y9gz8Pf0mbjCIVj/at0QFdDm7lnNxIS/YEUaSrvYyKo6FiCgPdpSYmuhjYe1i7s9j35WZ3hfw8La1miJkmpSVx9mfHk9NySEB9P6NsQqtetqexQlEImk7Fw7gIunD2Pz+rl2Nh+eXhlXuRZtiSL1i9TOOYzxxu7f/2txsXGMXXkBNTV1Rkzc+I3DQd98eQZevp6eT5BkclkeM9dyIVz51myatkXX8P/HTxMlepVMTZOf+GI/ERbWxttbW2io6K4cfU6vw3si7WNDSampty+cRPXIq5A2hwdv4ePaNGmpXIDFr5ZQkLCZxv6qaio5Ms5Yuk5+uf/MDI2ptIPshBC5oksJSeIJOUL+vXrh7e3NwMHDmTAgAEEBAQwadIkhg0bhoqKCvr6+nTt2pWRI0diYmKChYUFkyZNQkVFRSkTo7R1dHAopLhXhKaWFvoGBjgUciQkKJiLp89TpmI59A31efn0BRuXr6WYZ3H56l029raUr1KRDT6r+W34AHR0ddi2ZhM29raUKO2Z620aVK0jl577EhIVho6GFo3dq1DOvhh998z6rGxSajJPPiiu+BWdGAegcHzb7aP0qtSKlxEhBH18R/8q7XkfE8GZJzflZTqWboBvUADxyYlUcvRgaI3OLLmwQ15fbtqycgNlvcpjZmlORFg4uzfuQEVFhSq10xLhyPAIIsMjCAlK61l59ewl2jramFmYo2eQtl79h9D3xERH8+Hde6TSVHlyamVrjVY+W7Z3wez5nDx2gtkL56Cjo0PYhzAA9PR00dTSUnJ030ZbR+ez5cG1tLTQMzDA0dmJuNg4powYT1JiIkPGjSAuNo642LTfPQMjQ1RVVbnx1zUiwyMpUswNDQ0N7t66w95tf9CiQ2sltChzFs6Zz6ljJ5m54L+voR6aWp9u4Lx5/Ya7d3yZt3hBRlXlC9evXEOGDAcHB968CWLlkmU4ODnQuFnaBOR2Hduxef0m7OztsbKxZv3KtZiamcpX+8qv4uLiePP603vv27dvCQwIxMDAACtrqy9cmf9UqVaVTes3YmllSaHCzgT6B7Br206atGiq7NC+m1Qq5eifR2jYpOFnvblhH8IIDwsn6HXa8OtnT56ho6ODpZUlBob5bzGTrBKT4XOGSFK+wNbWliNHjjBy5EhKliyJiYkJPXr0YPz48fIyCxcupE+fPjRt2lS+BPHr16/RyoNfltTU1bh/y5f/7T1EYnwCphZmVKpWmTa/dFAoN3DMMDYuW8usMVOQqKhQrGQJxs+d8tWhJjnBRMeQ6Y36Y65rRExSHIHvX9F3zyx5j8bUhn2xMTSn566pX6npkw3XD6GtrsnE+r3Q19ThTlAA/fbOJik1WV6mhFVh+lZui466Fs/D3zL95FoOP7qY7e37FmHvP7Bkxnyio6IxMDTErYQ7033mYmBkCMDJP48pbPY4eWjaIgh9Rw6iZsO0BR3+2Lid8yfOyMv8/ttQACYumE7xUvlrMvL+PfsAGNBbcQPSsZPG06R5/h9WAWlDNx/7pc1X6Ne5p8K5lTvWY2FtiaqqKscOHGbDsjUgk2Fla023fr2o1zTvT9Q9sGc/AIN+U3wNx0wapzA05n+HDmNuYUH5ShXIz2JiYlizfBXv371H38CAGrVr0LNvb/l7aqcunYlPSGD+zLnExMTgUdKDeYsX5Os9UgD8H/kz8F+v8dKFSwBo1LQx46dMUFZYOWLoqGGsWbGa+bPnExERjpmZOS3atOTXXt2VHdp3u3X9JqEhoTRO5/310L4DbFyzQf580N/vy6MnjqVRsx9ngr1IUXKGRFZQ+iLziNjYWGxtbVmwYAE9evT45uvuv814Sd2C5uftE7OtrnUdJnLj9SNW/rUn2+rMTps6TlZ2CLnGzih/zQvJqtDoH2fFJXO9/D/E6ltJpdKvFyog1NIZ+ivkfynSVGWHkCusDPLG0PN/23c3c3uQtS5ZJ4ciKVhET8p3unPnDv7+/lSoUIGPHz8ydWraHf0WLVooObKCT09DG3sjSwbsm6PsUARBEARB+FGJrpQcIZKUbDB//nwCAgLQ0NCgbNmyXLx4ETMzsURfTotJiqf+qv5fLygIgiAIgpBDxBLEOUMkKd+pdOnS3Lp1S9lhCIIgCIIgCEog5s3nDJGkCIIgCIIgCEIWiZ6UnKHy9SKCIAiCIAiCIKRPkslH5ly4cIFmzZphY2ODRCLhwIEDCudlMhkTJ07E2toabW1t6taty+PHjxXKhIeH07lzZwwMDDAyMqJHjx7ExMQolLl37x7VqlVDS0sLe3t75s6dm+lYs5NIUgRBEARBEAQhiySSzD0yKzY2lpIlS7Js2bJ0z8+dO5clS5awcuVKrl27hq6uLg0aNCAhIUFepnPnzjx8+JCTJ09y+PBhLly4QO/eveXno6KiqF+/Po6Ojty6dYt58+YxefJkVq9enfmAs4kY7iUIgiAIgiAIWaSSw5NSGjVqRKNGjdI9J5PJ8Pb2Zvz48fKVZTdv3oylpSUHDhygY8eO+Pn5cezYMW7cuEG5cuUAWLp0KY0bN2b+/PnY2Niwbds2kpKSWL9+PRoaGhQvXhxfX18WLlyokMzkJpGk5BFqKj/OS3G6z3Jlh5BrfqRdiJZc3KXsEHLFoGodvl6ogPiR9g6JiI9Wdgi5JiI+Stkh5Jp7wU+UHUKuaetZW9kh/MAyl6QkJiaSmJiocExTUzNLG7g+f/6ckJAQ6tatKz9maGhIxYoVuXLlCh07duTKlSsYGRnJExSAunXroqKiwrVr12jVqhVXrlyhevXqaGhoyMs0aNCAOXPmEBERgbFx7u+bJYZ7CYIgCIIgCEIWSSSSTD1mzZqFoaGhwmPWrFlZ+tkhISEAWFpaKhy3tLSUnwsJCcHCQnHDZTU1NUxMTBTKpFfHv39Gbvtxbt8LgiAIgiAIQjbL7GCvMWPGMGzYMIVjWelFKehEkiIIgiAIgiAIWZTZJYizOrQrPVZWVgCEhoZibW0tPx4aGkqpUqXkZd69e6dwXUpKCuHh4fLrraysCA0NVSjzz/N/yuQ2MdxLEARBEARBELIqZ1cg/qJChQphZWXF6dOn5ceioqK4du0aXl5eAHh5eREZGamw+fiZM2eQSqVUrFhRXubChQskJyfLy5w8eRI3NzelzEeBApak1KxZkyFDhig7DEEQBEEQBOEHIcnkf5kVExODr68vvr6+QNpkeV9fX169eoVEImHIkCFMnz6dQ4cOcf/+fbp06YKNjQ0tW7YEwN3dnYYNG9KrVy+uX7/O5cuXGTBgAB07dsTGxgaAn376CQ0NDXr06MHDhw/ZtWsXixcv/mxYWm4Sw70KmKMHDnPs4P94F5LWRefg5Ej7rj9RtlJ5ACLCwtm4Yh13b90hPi4OW3s72v7Skco1qn5WV3JSEiP7DuXFk2csXOuDs2vhXG1Ldtn7xx62b95GeFg4Lq4uDB01jGIliis7rO/ie/sO27dsI8AvgLAPH5g5fzbVa9aQn58xeRpHDx9RuKaCV0UWLvXO5UjT52hsTVXnUlgbmGOgpcv2W0fxf/fii9dUcChORUcPjLT1+Rgfw/mnt7j7NlChjJeTJ+Xti2OorUdcUgIPQ55yKvAaKdJUAKo5l6aYpTNmekYkp6byOjKEEwFXCYuNzKGWfh/f23fYvnkb/n+/zrPmz6Z6rRpfvzCf2bpxC6uWraRdx3YMGj5E4ZxMJmPk4BFcu3KVGfNmUb1mdeUE+Q2y4/03OiqaNYuXc+Ova0hUVPCqXoWeA/ugraOtlDZ9ScSHcP5Yt437N31JSkzEwsaKHsP6UqhI2mdFQnwCu9dv586VG8RERWNuZUHdFo2o1aQeADHRMRzY8gcPb90j7P0H9A0NKONVnlZdO6Cjq5Pr7XkT8JybRy/y7uVbYiOjaTawMy5limVY/rX/M/bMWffZ8d7eo9E11P/mOh/ffMi9c9d59yKIhNh4Ok/pj4WDTfY2Lhv9KO9LmSHJ4SWIb968Sa1ateTP/0kcunbtysaNGxk1ahSxsbH07t2byMhIqlatyrFjx9DS0pJfs23bNgYMGECdOnVQUVGhTZs2LFmyRH7e0NCQEydO0L9/f8qWLYuZmRkTJ05U2vLDIJKUL0pKSlJYii0/MDU345fffsXGzhaZTMbZY6eYNW4qC9f64FDIEe+Z84mLiWXszEkYGBpw4dQ55k+exfxVi3Eu4qJQ16aV6zExNeHFk2dKas33O3XiFEsXLmHk2FEUK1GcP7bvYtiAoezYtxNjExNlh5dl8fEJuLi60qR5U8aNHJNumYqVKzF24nj5c3UN9dwK76s0VNUJiQrj9ht/OpVp+NXy5R2KU9etEofunyPo4ztsjSxpUaIGCSmJBLx7CYCHtSt1i1TkwP1zvI4MwVTXkFYeaUtyHvP/CwAnExuuvXpA0Md3qEhUqFekIl3LN2XpxZ0kp6bkVHOzLD4+AZciaa/z2Axe5/zO76Efh/YfpLCrS7rn/9ixK0ubnylDdrz/Lpo2l/DwcKYsmElKSgpLZy9i+fwlDJ/4u5Jbpyg2OoYZwybiXrIYw6aPQd/QgNCgYHT1dOVldq7ejJ/vA3qPHICZpTkPbt9ji886jEyMKe1VjsiwcCLDIujQ6xdsHGz58O4Dm5euJTI8gv7jc//ubXJiEub21pSoVpY/fbZ/83XdZg1FQ/vT/AId/U//D76lzuSkJGxdHSlSvgSnNh7Icvy55Ud4X8qsnH6LqlmzJrIv7GkgkUiYOnUqU6dOzbCMiYkJ27d/+ffa09OTixcvZjnO7FaghntB2rr+o0aNwsTEBCsrKyZPniw/9+rVK1q0aIGenh4GBga0b99eYZLQ5MmTKVWqFGvXrqVQoULyDHTPnj14eHigra2NqakpdevWJTY2Vn7d2rVrcXd3R0tLi6JFi7J8ufL2AalQpRLlKlXAxs4WW3s7fu7VDS1tLQIe+QMQ8NCPxq2bU8TdDSsba9p36YSuni5PAxXXkr919Qa+N27za7+eymhGttm1dQfNWjWnSfOmFHIuxMixo9DU0uTwwcPKDu27eFXxone/36hRq2aGZTTUNTA1M5U/DAwMci2+r3n84RWnH1/HL/T5N5UvaVOEm68e8SDkKRHx0TwIfsLN14+oWqi0vIyDsSWvI0K4H/yYyPhonn54w/3gx9gaflp2ccvN/+EbFMD7mAhCo8PYd/8MRtr62BiYZ3sbs4P8da5dU9mh5Ii4uDimTpzCqLG/o6+v/9n5xwGB7Nq2k9ETxiohusz73vff1y9ecfv6TQaMHEyRYkUp5lmCXoP7cunMecI/hCmzaZ85svsQJuam9BjeD2c3F8ytLChRtiQWNp8m2D55FECVujUoWrI4ZlYW1GxcF3tnR54FpLXXzsmBAROGU6pSWSxsrChWqgRtunbA99otUlNTc71NhTzdqNKmHi5lM9fTrm2gi66hvvwhUfn01epb6ixWuTSVWtTGoXj6iXpeU9Dfl7JGiZNSCrACl6Rs2rQJXV1drl27xty5c5k6dSonT55EKpXSokULwsPDOX/+PCdPnuTZs2d06KC4MduTJ0/Yu3cv+/btw9fXl+DgYDp16kT37t3x8/Pj3LlztG7dWp7Rbtu2jYkTJzJjxgz8/PyYOXMmEyZMYNOmTcpovoLU1FQunj5HQkICRYsXBcCtuDuXz14gOioaqVTKxdPnSEpKokQpT/l1keERLJ+/mCHjRqChqZVR9XlecnIyAf4BlK9QXn5MRUWFchXK8+D+AyVGljvu3LpN03qN6dS6A/NnzeVj5Edlh5RlaiqqpEgVezpSUlOxNbJARZL2NvYqIhRrQ3N5UmKsrU8Rc0cev3+VYb1aamk9pfHJiRmWEXLOorkL8KriRbmK5T87l5CQwJQJUxg6ajimZqZKiO77ZOX9N+ChH7p6ergULSKvp2TZ0khUJAT+nejkFb5Xb1KoiDPLpi9kUIdeTOr/O+ePnlYo41LMjTtXbxLxIRyZTIbf3QeEBgVToqxnBrVCXGwcWjraqKqq5nQTss22iT6sGjKLvfPWE/T4pbLDEZRAIsncQ/g2BW64l6enJ5MmTQLA1dUVHx8f+YoH9+/f5/nz59jb2wOwefNmihcvzo0bNyhfPu1DMikpic2bN2NunnZn9fbt26SkpNC6dWscHR0B8PDwkP+8SZMmsWDBAlq3bg2krbLw6NEjVq1aRdeuXXOn0f/x4ulzRvcfRlJSElra2oyePgF7p7TYR04ey/wps/ilWXtUVVXR1NJk9PQJWNuljX+VyWQsmbWQBs2b4FK0CKHBoV/6UXlaZGQkqampmJgqDusyMTXh1YuC/UFS0asSNWrVxNrWmqA3QaxetpIRg4aycsOafPXh/48nH15T1s4dv9DnBEd9wMbAnDL27qipqKKjoUVMYhz3gx+jo6FFj0otkQCqKqpcf/WQC89up1unBGjkXoWX4cG8iwnP1fYIaUMxA/0DWb1pbbrnly5cQgnPElSrUS2XI/s+3/P+GxEegaGxoUJ9qmqq6OvrExEekett+ZJ3we84c/gkDVo3oWnHVjwPfMq2FRtQVVOjar20+Qmd+/7KxiWrGfZzX1RVVZGoSOg2uDduHunP84j+GMWfO/ZRs1HddM/nNbqG+tTp0gLLQrakJqfw4MJN9sxZS8fxfbB0slV2eEIuyspkeOHrCmSS8m/W1ta8e/cOPz8/7O3t5QkKQLFixTAyMsLPz0+epDg6OsoTFICSJUtSp04dPDw8aNCgAfXr16dt27YYGxsTGxvL06dP6dGjB7169ZJfk5KSgqGh4gfNvyUmJpKYqHjnNikxEY1sWjPb1sGORWuXERsby5Xzl1gycwEzlszF3smR7es2ExsTy5SFMzEwNOTapSvMmzyLmUvm4VS4EP/be4j4+DjadG6fLbEIylG3QT35vwu7uFDYxYUOLdty59ZtylX4/K51XnfuyU30NLXp7dUakBCbFIdvUADVnEvLezWdTGyoXrgMhx9e5E1kKKa6hjRyr0KNwmU5//TWZ3U2KV4dCz0T1l07kLuNEQgNCWXJAm8W+ninu1fApfMXuX3zFuu2blBCdN/ne95/8xOZTIqTa2Ha/toJAEeXQgS9eM25/52UJymnDh3jmd9jBk8ehamFGQEP/Ni6bD1GJsYUL6P4WR0fG4f3xDnYONjR4ue2ud6erDCxNsfE+tP3BRtXRyLfh3P7xF806t1OiZEJuS2nJ87/qApckqKurjg5WCKRIJVKv/l6XV1dheeqqqqcPHmSv/76ixMnTrB06VLGjRvHtWvX0NFJW31kzZo18nWm/31dRmbNmsWUKVMUjvUbPogBIwZ/c5xfoq6uLr8z5+LmymP/QP7cc5BWndpyZP+fLNm4EodCaXf2Crk48+jeA44eOEzf4QO5d+cuAQ/9aVevuUKdI34bRI26tRg8dkS2xJgbjIyMUFVVJTxM8S55eFg4Jvlw+Mj3sLWzxcjIiDev3+TLJCVFmsqB++c49OACepraRCfEUc6hGAkpScQlxQNQx7UCd4MCuf3GD4B3MeGoq6rRvEQNLjy9xb+nHDYpVhU3c0fWXTtAVELs5z9QyFEB/gFEhEfQ85fu8mOpqancvePLvt37aNGmJUFvgmhcW3FRhQm/j8OzVEmWrvLJ7ZC/2fe8/xqbGPMxQnFYZmpKKtHR0RibKGefgowYmRhj46DYW2DtYMvNy9cASEpMYu/GHQycMIKSFcsAYO/syKunLzi297BCkhIfF8+C8bPQ0tZi4MThqKnl368mVoXseCuGfAlCtsi/7wSZ5O7uzuvXr3n9+rW8N+XRo0dERkZSrFjGSwxCWqJTpUoVqlSpwsSJE3F0dGT//v0MGzYMGxsbnj17RufOnb85ljFjxny27vTziKDMN+obyaQykpOTSUxI6735b8avoqIiT+R6DepD5x5d5OfCw8KYMmI8IyaNoYi7W47FmBPU1dVxK+rGzRs35csjSqVSbt24SZv2+eNOXXZ5F/qOjx8/YmZmpuxQvotUJpUnFR7WLgS+eylPPtRV1ZChuPrJp9VQJPD3uSbFquJuWYj11w4RGR+dO4ELCsqVL8umHVsUjs2aOgMHJ0c6d/kZQyNDWrRqqXC+a6dfGDh0EJWrVcnFSL9fZt5/3Yq7ExsTw5OAx7i4uQJw744vMqmMIsWK5m7gX+FSzI2QN8EKx0KDgjG1SOtZSE1JITUlFYnK5+399ypF8bFxLBg3EzV1dQZNHoV6PltR87/evwqWLz8s/DjEcK+c8cMkKXXr1sXDw4POnTvj7e1NSkoK/fr1o0aNGpQrVy7D665du8bp06epX78+FhYWXLt2jffv3+Pu7g7AlClTGDRoEIaGhjRs2JDExERu3rxJREREhhvgaGpqfjbEQSPuQ7a0c8vqDZSpWA4zCwvi4+K4ePocD3zvMWnedOwc7bG2tWHFgqV069cTfQN9rl26wt2bdxg3ezIA5pYWCvVpaaetzW9lY42ZRd5cAelLOvzciRmTplHUvejfSxDvJCE+gSbNmyo7tO8SFxdH0Os38ufBQW95HBCIvqEBBgYGbFizjhq1a2FqakrQmzcsX7IMW3s7KnhV/EKtuUdDVQ0TnU9DIo11DLDSNyU+OZGPCTGflTfVMcTWyII3ke/QVtekspMnFnom7Lt3Rl4m4N0LvAqVJDjqQ9pwLx1DartWIODdS3ny0rRYNTxsXNlx+yhJKUnoaaT9fiekJMn3UslL4uLiePOv1/nt27cEBgRiYGCAlbXVF67M23R0dXF2cVY4pqWtjaGhgfx4epPlLawssbHNu/tHfO/7r72TA2UqlGP5vMX0GT6Q1JQU1nivoGrtGnmu97d+q8bMHDaRwzv3U766F88CnnDuyGm6DU4b+qytq4ObRzH+WLsVDQ0NTC3NCbj3iL9OX6Bj77QbYfGxccwfN4OkhCR6jxpAQlw8CXFpPaP6hgaoqObu2j5JCYlEvvu0ilrU+wjevXqLlq4OBqZGn5W/feIyhmbGmNpakvL3nJTXfs9oPeLXTNWZEBNHVHgksRFpN00igtO+D/yzWlheU1Dfl76HGO2VM36YJEUikXDw4EEGDhxI9erVUVFRoWHDhixduvSL1xkYGHDhwgW8vb2JiorC0dGRBQsW0KhRIwB69uyJjo4O8+bNY+TIkejq6uLh4cGQIUNyoVWfi4yIxHvmfCLCwtHV1cWxcCEmzZtOqfJp3e0T5k5l86oNzBgzmYT4eKxtbRg0ZjjlKlVQSrw5rW79ukRGRLB25VrCw8JwLeLKgqWLPptMn9/4P/JnUJ/+8udLF6VtyNSoaWNGjB7J08dPOXr4KDHR0ZiZm1G+UkV69emdZ/b9sTG0oHvFFvLnjdzT7o7feePP/vtnqeVSjlK2biw6vw34uzezUElMdY2QSqU8D3/Lmqv7FXpCzv89pKuOawUMtHSJTYon4N1LTgdek5ep4FgCgO4VWyrEs+/eGXyDAnKotVnn/8ifgb/963Ve+Ol1Hj9lgrLCEjKQHe+/QyeMYrX3ciYOHYOKiiRtM8dBfZXVpAw5u7kwYOJw9mzYwcFtezG3MuenPl3xqv1poYO+YwazZ8N2Vs1dSmx0DKYW5rTp2lG+mePLJ8955p+2HPHv3RWHO8/buBQzK8WbZjkt9EWQwuaM53embYhbrEppGvRsy5UDp3l06TY95o8E0obind91lJiIKNQ11DGzt6LNyO7Yuzt/c50AT339ObFur7zMkZW7AKjUojZeLevkUGuzTrwvfU70pOQMiexLu8MIucYvJP9umJhZ5npGyg4h1/xIf11LL+3KtrrSNmGUsf/+2WyrM7sMqtbh64UKiMzM58vvwuKilB1CromI/3Haei/4ydcLfaNja/YgkSBPLvKatp61lR1CrjDTy3s3Gc89uZGp8jVd8t/cUGUocPukCIKQ/xUyteH04+vKDkMQBAFIm9/2JuA5lVvlj+WRhdwlyeR/wrf5YYZ7CYKQfyw8t1XZIQiCIMhJJBJ6/j3MSxD+SyQeOUMkKYIgCIIgCIKQRWKflJwhkhRBEARBEARByCKRo+QMkaQIgiAIgiAIQhaJ4V45QyQpgiAIgiAIgpBlIknJCSJJEQRBEARBEIQsEsO9coZIUvIIM10jZYcg5IDUPLiLeU4pbGan7BByRUJykrJDyDXB0R+UHUKucTT6cXbKTkz5cX6H/UKfKzsE4QcghnvlDJGkCIIgCIIgCEIWiZ6UnCGSFEEQBEEQBEHIMpGl5ASRpAiCIAiCIAhCFonhXjlDJCmCIAiCIAiCkEViuFfOUFF2AHlNt27daNmy5RfLODk54e3tnSvxCIIgCIIgCHmXJJP/Cd9G9KRkwY0bN9DV1VV2GN/E9/Ydtm/ZRoBfAGEfPjBz/myq16yRbtl5M+dwcN8BBg0bTPufOuZypNlv/+597N+zj+DgYAAKOTvza6/ueFXxUnJk3yc1NZWNa9Zz4ugJwsPDMDMzo2HTxnTp3hVJOrdzFsyax6H9BxkwdBDtOrVXQsSfe+X/lKv/O0fI8yBiIqNoM6QbbuVKZFj+dcBzzu78H2HB70hOTMLAzJgytb2o0Ki6vMyFvce5tP+kwnUm1ub0mfe7/PmdM1d5+NdtQl4EkZSQyLBV09DS1c7+Bn6De3fusmvrDh4HBBL2IYwpc6ZTtUY1+fk5U2dx4sgxhWvKV6rAbO958ufbNmzh6l9XeBr4BDV1dQ6d+l+uxZ8ZezfvYv+WPxSOWdvbMG/9UgBC34awffUmAh/4k5ycjGe5UnQd0BNDYyN5+ZioaDYvW8ftqzdRkUgoX60Sv/Trjpa2cl6/b7V+9To2rFmvcMzB0YFte3YAkJiYyDJvH06fPEVyUjIVKlVg2O8jMDE1UUa432z3ph3s2bxT4ZiNvS2LNi4H4NTh41w+c4Hnj58SHxfP+oPb0NXTUyg/4KdevA99p3CsU89faNmpbc4Gn466RSpS0sYVCz1TkqXJPA97y58Pz/MuJiLDa7ycPClvXxxrAzMAXkeGcvjRBV5FhKRbvn2pelQpVIp9985w/ukt+XEddS3alKxDCavCSGUy7r0NZO+9MySlJmdvI7PJ5vWbOH/2PC9fvERTUxMPTw/6DuqHo5OjskNTHpF35AiRpGSBubm5skP4ZvHxCbi4utKkeVPGjRyTYbnzZ8/x8MFDzMzNcjG6nGVuaU6fgf2wd7BHJpNx9PARRg8bxYbtm3Au7Kzs8LJs++ZtHNx7gDGTxuHkXIgAP39mT5uJrp4ubTu0Uyh74ex5HuXB1zU5MQkLBxtKVq/A3sWbvlpeXVODsvWqYOFgjbqmBm8CnnN0wx7UNTUoXbuSvJyZnSU/jf5N/lxFVVXx5yYl4exZFGfPopz740j2NSgL4uPjKezqQqNmjZk0ekK6ZcpXqsCoCaPlz9XVNRTOJ6ckU6N2TYqVKM7RP5Xbnq+xc7Jn9JxJ8ueqf782CfEJzBk9FQdnJ8bOmwzAno07WDBhFpOXzEJFJa3Df/nsxUSGRTB69kRSU1NZPc+HdYtW0n/s0FxvS2YVci7EomWL5c9V1T79Xi5dtIQrl64wddZ09PR0WTRvIeNGjWXFupXKCDVT7JwcmDBvqvz5v//eEhMTKVm+NCXLl2bH2i0Z1tG+20/UaVJf/lxZSaeLmT0Xn93hVUQIKhIVmhavRt8q7Zh1akOGyYKLmT233/jxPPwtyakp1C1Sgb6V2zH79AY+JsQolPW0dsXR2IbI+OjP6vmlXBMMtPRYfnk3qioq/FSmER1L12fzzbx508H39h1at2uDe3F3UlNTWeWzkqH9h7Btz3a08/hNg5wiekdyxg873GvPnj14eHigra2NqakpdevWJTY2Vn5+/vz5WFtbY2pqSv/+/UlO/vQm9d/hXhKJhBUrVtCoUSO0tbVxdnZmz549udmcDHlV8aJ3v9+oUatmhmXev3uH97yFTJw2GTW1gpO3Vq1ejcpVK2PvYI+DowO/9e+Dto42D+8/UHZo3+XhvQdUqV4Vr6qVsbaxpmadWpSvWAH/h34K5d6/e8+SBd6Mnzoxz72uhUu6U7NdI9zKe3xTeSsnW4pXLo25nRVG5iaUqFqWQh5uvA54plBORUUVPSMD+UNHX7HHs0LD6lRuXhtbF4dsa0tWVaxcie59elK1ZvUMy6hraGBiaip/6BvoK5zv1qs7bTu1p1A+SLpVVFQxMjGWP/QNDQB4/NCf96Hv6T1yAPaFHLEv5MhvowbyPPApj3zvAxD08g33btyh57C+uLgXwa2EO10G9OTquctEfAhXZrO+iaqqKqZmpvKHkZERADExMfzv4GEGDB1I2fJlcXMvypiJ43hw736+eJ9SVVV8TQ3+fk0BmrRpTstObXF1d/tiHVo62gp1aGlr5XTY6Vr51x6uv3pISHQYb6Pes+3WUUx0DLE3sszwmi03/8el574EfXzHu5hwdtw+jopEQhFzxR4FQy092pSsw5abh0mVShXOWeqbUMzKmZ13jvEyIphnYUHsuXuK0nbuGGjlzREbC328adK8Cc6FnXEt4sq4KeMJDQkhwM9f2aEpjUSSuYfwbX7IJCU4OJhOnTrRvXt3/Pz8OHfuHK1bt0YmkwFw9uxZnj59ytmzZ9m0aRMbN25k48aNX6xzwoQJtGnThrt379K5c2c6duyIn5/fF6/JC6RSKdMmTqXTL53zde/C16SmpnLq+EkS4hMo4fltX4zzquKeJbh98xavX74C4EngY+7fvUfFyp96FKRSKTMmTaPjz53yxRfYzAp5EUTQ45c4FC2scDwi9D1LBkxl+dCZHFy+jY8fMh6qkR/cve1Lm0Yt6Nr+Z7znLODjx4/KDinLQt8GM6BDT4b+0pfls7z58O49AMnJyUgAdXV1eVl1dQ0kEgkBD9K+9DzxC0BHTxdnNxd5mRJlPJFIJDzxf5yr7ciKN6/f0LJRc9q3aMfU8ZMJDUkbDhTgF0BKSgrlKpSTl3V0csTSypIH+SBJCQl6S5/23Rj4c2+WzFzAh9D3ma7j4I699Gj5M7//NoRDu/aRmpo3NsDVVtcEIC4p4Zuv0VBTQ0VFhbjkePkxCfBzucaceXydkOiwz65xMrEhLimB15Gh8mOB718ik8lwMrbJegNyUWxMWq+RgYHBV0oWXDk5J2Xy5MlIJBKFR9GiReXnExIS6N+/P6ampujp6dGmTRtCQ0MV6nj16hVNmjRBR0cHCwsLRo4cSUpKSra0PSflrduruSQ4OJiUlBRat26No2PaHQ8Pj09fXI2NjfHx8UFVVZWiRYvSpEkTTp8+Ta9evTKss127dvTs2ROAadOmcfLkSZYuXcry5ctztjHfadumLaiqqtKuY96Yq5Ddnj5+wm+/9iYpKQltbW1mzp9NIedCyg7ru3Tu+jNxsbH80r4zKioqSKVSevbtTb2Gn4ZMbN+8DVU1Vdr8Z/hXfrd04DTiomOQpkqp1ro+pWpVlJ+zdXGgae+OmFqbExMZzcX9J9gybRm9Zo9AU0l3Z79Hea8KVKtZHSsbK94GvWXdijWMGTqKpWuWy4dK5RcuRV3pPWIA1vY2RIZFsH/rbqYNHc/sNd64uBdBU0uLnWu30L57Z2QyGbvWbUUqlRIZnpZkRoZHYmBkqFCnqqoqegZ6fIzI24loseLFGDtpHPaODoR9CGPjmvX079WPzTu3EB4Whrq6Ovr6ij1kJiYmhIfl7R4il6JF6DtqMDZ2tkSEh7N3804mDRnD/HVL0NbR+aY6GrZqSiFXZ/T09Ql85MeOtVuIDIugS78eORz9l0mA1p61eRb2huDoD998XfPiNYiKjyXg3Uv5sTpFKiKVyjj/9Ha61xho6hKdGKdwTCqTEZccj34e7Un5N6lUyuL53niW9MTZpfDXLyiwcrZ7pHjx4pw6dUr+/N+jI4YOHcr//vc/du/ejaGhIQMGDKB169ZcvnwZSLtJ26RJE6ysrPjrr78IDg6mS5cuqKurM3PmzByN+3v9kElKyZIlqVOnDh4eHjRo0ID69evTtm1bjI2NgbRfhn9/CbC2tub+/ftfrNPLy+uz576+vumWTUxMJDExUfFYUiKamppZaE3W+fv5s3vnH6zfujHdCdcFgYOTIxt3bCImJpazp84wY9I0fNYsz9eJytlTZzh57CQTpk3CybkQTwIf47Nwyd8T6BsR4OfP3p27WbNlfYF7XX+Z0I+kxCSCnrzk3K4jGFuaUbxyaSBtCNk/LBzAprADy4bMwO/aXUrVrJhRlXlW7Xp15P92dimMs0thfmnTibu3fSlTvqwSI8u8khXKyP/t4OxEYfciDOnch2vnL1OzUV0GTRjOhiWrOXHgCBKJBK9aVXFydUalAPz+VvrXQh0uri4UK1GMds3acObUmVx/z89OpSt++h10LOyEq3sR+v/UiyvnLlO7cb1vqqNpuxYKdaipqbNm0XI69eyCuob6F67MWW1L1sNK34zFF7Z/8zV1i1SgtF1RfC7uIkWa1htkZ2RJjcJlmXf26/Pu8qsFs+fz7OkzVqxbpexQlCqn36rU1NSwsrL67PjHjx9Zt24d27dvp3bt2gBs2LABd3d3rl69SqVKlThx4gSPHj3i1KlTWFpaUqpUKaZNm8bvv//O5MmT0dDQ+KzevOKHHO6lqqrKyZMnOXr0KMWKFWPp0qW4ubnx/PlzQHHYAaTNOZH+Zxzp95g1axaGhoYKj8ULvLOt/m91744vEeERtGnaihoVq1KjYlVCgkPw8V5K22atcj2enKCuro6dvT1F3YvSd2A/XIq4sHvHLmWH9V1WLFlO566dqVO/LoVdCtOgcUPadWrPtk1pk1Pv+d4jIiKC9s3bUNurBrW9ahASHMLyxT50aJH7q+ZkJyMLUyzsrSldqxLlG1bn4r4TGZbV0tXGxMqMiNDPh1jkRza2NhgaGRL0JkjZoXw3XT1drOysCX2bNuzJo1wpFm5ezvLd61mxdyN9Rw8m4kM45tZp8wGMTIyIilQc6paamkpMVAyGf99cyi/09fWxd7Dnzes3mJiakpycTHS04mTq8PDwPL+613/p6ulhbWdDyNvgLNfh4l6E1NRU3v9nqEpuauNZh+JWzvhc2vXZ5PeM1HIpTx3Xiqy4vJu3UZ+GvBU2tUNPU4fJDfqwsMVwFrYYjqmuIS09ajKxfm8AohJj0ddU7HlSkUjQUdcmOiGWvGzBnPn8dekyS1ctw8LSQtnhKFVmh3slJiYSFRWl8Pjvzet/e/z4MTY2Njg7O9O5c2devUob7n3r1i2Sk5OpW7euvGzRokVxcHDgypUrAFy5cgUPDw8sLT/Nr2rQoAFRUVE8fPgwh/6PZI8fsicF0hKPKlWqUKVKFSZOnIijoyP79+/Pcn1Xr16lS5cuCs9Lly6dbtkxY8YwbNgwhWNRSbn/ZtSgcSPKVSivcGzYwCE0aNyIJs2a5Ho8uUEqlZGUlDeXdfxWiQkJSCSK9xdUVFXliXT9Rg0o+68x7gAjBw2jfqMGNCpAr6tMJiX1C2NqkxISiXgXRgkj/QzL5Cfv370j6mMUpqamyg7luyXEx/MuOBQjE8UE45/J9A/v3Ccq8iNlvNLen1zc3YiLieV54FMKFUkbUvLozn1kMhkuRV1zN/jvFBcXR1BQEA3MGuLm7oaamhq3btykZu1aALx68ZLQkFBKeGS8JHdelBAfT+jbEKrXrZnlOl48eYZERQWDvxcWyG1tPOvgaeOKz8WdhMd92/yv2q4VqO9WiRWXdyvMKwG48fohgf8a+gXQp0pbbr5+xLWXaaMzXoS/RUdDCzsjS978fb2ruSMSiYQXEW+zoVXZTyaTsXDuAi6cPY/P6uXY2OaPuTM5KbM9KbNmzWLKlCkKxyZNmsTkyZM/K1uxYkU2btyIm5sbwcHBTJkyhWrVqvHgwQNCQkLQ0NCQL8bxD0tLS0L+nvsWEhKikKD8c/6fc3nZD5mkXLt2jdOnT1O/fn0sLCy4du0a79+/x93dnXv37mWpzt27d1OuXDmqVq3Ktm3buH79OuvWrUu3rKam5mfd/InROTOBKS4ujqDXb+TPg4Pe8jggEH1DA6ysrDD8zzhvNTU1TE1NcCgA652vWLocrypeWFpZERcby4ljJ7hz6zYLfbyVHdp3qVytCls3bsbSyhIn50I8Dgjkj+27aNysMQCGRobpvq4mpqY4OCp/VSv4O4EI/TTW++P7cEJfBqGlq4Oh2ed3xm+evIyhqRGmNml36175P+Pa/85TrkFVeZnT2//EpXQxDM2MiYmI4sK+40hUVCjm9elmQUxkFLEfo+W9K+9eB6OprYmBqTHaet82jj67xMfFKfSKhLwN5kngY/QNDDAw0Gfzuk1Uq1UdExMT3ga9ZbXPSmzsbClX6dONhdCQUKKjongXGopUmsqTwLRJ5LZ2tt88LyA3bF+1idKVymFmaU5EWDj7Nu9CRUUFr1ppr9/5Y2ewdbBD38iAx48C2Lp8PQ1bN8XG3hYAW0c7PMuXZu2iFXQf/BupKals8llLpZpVMDbL2z0Oy7x9qFytClbWVnx4/4H1q9eioqJKnQZ10dPTo0mLpvgsWoqBgQG6urp4z1tECY8SFM/jScqWlRso61Ve/pru3rgDFRUVqtROW60uMjyCyPAIQoLSelZePXuJto42Zhbm6BnoE/jQn8f+gRQvlbbKZuAjfzavWE+1OjXQ09f70o/OEe1K1qWMnTtrr+4nISUZfc20+SAJyYkkS9P/fK7jWoHG7lXYfPN/hMdFya9JTEkiKTWZuKSEzybep0qlRCXEyvdfCY0O51HIMzqWbsAfvidQlajStmQd7rzxIyqP9qQsmD2fk8dOMHvhHHR0dAj7kPZ+qqeni6ZW/pv/lx0kmRyYlN7N6oyGfzZq1Ej+b09PTypWrIijoyN//PFHgV/y+YdMUgwMDLhw4QLe3t5ERUXh6OjIggULaNSoEbt2ZW0o0JQpU9i5cyf9+vXD2tqaHTt2UKxYsWyOPPP8H/kzqE9/+fOli5YA0KhpY8ZNTn9vhoIiMiKCaROnEvYhDF09PVxcC7PQx5sKlSooO7TvMnjEUNatWsOiuQuIiIjAzMyM5q2a07Xnr8oO7ZsFP3vNtpmf9oE4te0QAB7VytHst45c2Huc+xdv0t97HJB25+7sH0f4+D48bSlbC1NqdWxCmX/tkRIV/pGDy7YRHxOLjr4edm6F6DZ5ILoGn77w3D59RWHDx63T0xa2aNq7A57VFXsVc1qAXwDD+w+RP1+xeBkA9Rs3ZMioYTx78pQTR44REx2DqZkZ5SqWo1vvHgrjhzeuXq+w4eNvXdIW71iwzJtSZdPvyVWG8A9hLJu5iJjoaPQNDXAr4c7kJbPkk+GD3wTxx/ptxETHYG5pTvOf2tCoTTOFOvqNHswmn7XMGjUZiUSF8tUq0aV/d2U0J1PevXvHlPGTiPoYhZGxER4lPVm1YZV8DuTAoYNQkagw/vdxCps55nVh7z+wZMZ8oqOiMTA0xK2EO9N95spf05N/HlPY7HHy0LEA9B05iJoN66Cmrs5fZy+yZ9NOkpOTsbCyoHGb5jRt2yLdn5fTqjqn/b0Mqt5J4fi2W0e4/iptSMxPZRphomOAz6W07wlVCpVCTVWN7hUVYz7qd5lj/n9988/ecvN/tC1Zh/5VOiBDxt23gey9e/p7mpOj9u/ZB8CA3v0Vjo+dNJ4mzQtOb31mZLYnJb2b1d/KyMiIIkWK8OTJE+rVq0dSUhKRkZEKvSmhoaHyOSxWVlZcv35doY5/Vv9Kb55LXiKR/bPurpBlEomE/fv307JlyyzX8T46b6/kkp0KwFzYb5aSR5bTzA3HA69mW11/rtwBEgnNfuuYbXVmlzouuZvMKFNmVjbK7xyN8vaHdXZ6G/XjvK4bbvyZbXUNrNaRx+9fZSoByU0T6il3VbTcYqaX93pPH7zN3FLoJWyyPkw1JiYGBwcHJk+eTNeuXTE3N2fHjh20adMGgICAAIoWLcqVK1eoVKkSR48epWnTpgQHB2NhkTYaYfXq1YwcOZJ3797l6QU8fsiJ84Ig5F0ymYyXfk+p0bahskMRBEEAQEtNAzNdI848vqHsUIS8SJLJRyaMGDGC8+fP8+LFC/766y9atWqFqqoqnTp1wtDQkB49ejBs2DDOnj3LrVu3+PXXX/Hy8qJSpbSRBvXr16dYsWL88ssv3L17l+PHjzN+/Hj69++fpxMU+EGHewmCkHdJJBIGLB6v7DAEQRDkElKSmHRs5dcLCj+kzG7QmBlv3ryhU6dOhIWFYW5uTtWqVbl69Srm5uYALFq0CBUVFdq0aUNiYiINGjRQ2KNPVVWVw4cP07dvX7y8vNDV1aVr165MnTo1x2LOLiJJyQZixJwgCIIgCMKPKSeTlJ07d37xvJaWFsuWLWPZsmUZlnF0dOTIkSPZHVqOE0mKIAiCIAiCIGTVDzTXNjeJJEUQBEEQBEEQsigne1J+ZCJJEQRBEARBEIQs+pFWLc1NIkkRBEEQBEEQhCwSPSk5QyQpecSPlIX7BgUqO4RcU9KmiLJDyDX3MrlOfH5V2sZN2SHkmkImNsoOQcgBdkYWyg4h13h+x34U+Y1Yw0coaESSIgiCIAiCIAhZJPmR7jTnIpGkCIIgCIIgCEIWieFeOUMkKYIgCIIgCIKQRaIjJWeIJEUQBEEQBEEQskxkKTlBJCmCIAiCIAiCkEUiRckZKsoOIL978eIFEokEX19fZYciCIIgCIIg5DKJRJKph/BtCmxPSs2aNSlVqhTe3t7KDiXPiY2NZc2K1Vw4e4GIiHCKuBVhyIihuBcvpuzQvpk0Vcr/duznxrm/iIr8iKGJEZVqV6Nhh+YKbwAhr99yYNMuHj8IQJqaipW9Lb3GDMTE3BSAjxGR7N+wC3/fhyTGx2Npa02D9s0oXbm8spr2VVs2bOL82fO8fPESTU1NPDw96DuwHw5OjvIyB/cd4OSxEwQGBBAXG8fRsyfQ19dXYtSf1HYtj4e1Kxb6JiSnpvAy/C2HH13kfUxEhtdY6pvSsGhl7IwsMNEx5MD9s1x8dkehzLh6PTDRMfzs2svPfdl37wwAfau0w8XMXuH8X8/vsvfe6WxoWdbt376b7Ws307h1c34d0It3IaH0/6lnumWHTfwdr5pVOXvsFMvnLk63zNq9WzA0NsrBiHPG3j/2sH3zNsLDwnFxdWHoqGEUK1Fc2WFlu/fv3rF8yXKu/nWFhIQE7OzsGDt5PO7F3JUdWo7asmEzK31W0K5Te4aMGKrscD7z2v8Z149cIOTFG2Ijo2k1uAuuZb/t9+9N4At2zFyFuZ0l3aYPyVSdc7v8nm6dNTo0pmKTGlluT3b4ls+buTNmc/P6TT58eI+Otg4lPD3oO6gfjk5Oygs8l4mJ8zmjwCYpXyOTyUhNTUVN7cf7XzB72iyePX3GxGkTMTM34/iR4wzuO4hte7ZjbpE/1s8/sfd/XDx6hi5DemHtYMvLJy/YumQtWrra1GpWH4D3waEsHD0dr7o1aNKpNVo6WgS/CkJdXV1ez+ZFq4mPjaPP+MHoGehz4/wV1s1dxu8LpmBf2DGjH69Ud27foXW7NhQt5k5qaiqrl61k6IAhbN29HW1tbQASExKoWLkSFStXYpXPCiVHrKiwqT1/PfflVWQoKhIJjd2r0turDfPObCQpNSXdazRU1QiL/cjdt4G0KJH+h7b3+e2o/CtBtTIwo0/lttz9z748V17c47j/X/LnGf3M3PLEP5CTh4/h6OwkP2ZqbsbqPZsVyp06fIxDu/ZTqmJZACrXqkapCmUVyiyb401yUlK+TFBOnTjF0oVLGDl2FMVKFOeP7bsYNmAoO/btxNjERNnhZZuoqCj6dP+NMuXKsmDJQoyMjXn96nWeuYmQU/wePuLgvgO4uLooO5QMJScmYeFgjUf1chxYsuWbr0uIjefI6l04FitMXFRMpuvst2S8wvPn9/w5um4vbuVLZL4R2exbPm/c3ItSv1EDLK2siIqKYv2qtQztP4Tdh/aiqqqq5BbkEpGj5IgCOdyrW7dunD9/nsWLF8u71jZu3IhEIuHo0aOULVsWTU1NLl26RLdu3WjZsqXC9UOGDKFmzZry51KplLlz5+Li4oKmpiYODg7MmDEj3Z+dmppK9+7dKVq0KK9evcrBVmZNYkIC58+co/+g/pQqUxo7e3t6/NYTO3s79u/Zr+zwvtlz/8d4VixDifKlMLU0p0yV8riXKsHLwGfyMn9u3UuxsiVp9WsH7As7Ym5tiWfFMugbGcjLPPN/Qo2m9XAqUhgzKwsadWiBjq4Or54+V0azvsnCpd40btYE58LOuBZxZezk8YSGhBDg5y8v0/6njvzSrQvFSyj/Q+6/1lzdx43XjwiNDiM46gM77xzHRMcAOyPLDK95HRnK4UcX8A0KIEWamm6Z2KR4ohPj5I9ils58iInkadgbhXLJqSkK5RJTkrK1fZkRHx/PkpkL6DN8ILr6evLjqqqqGJsYKzyuX7qKV82q8i8GmpqaCudVVFR4cOcetRvVU1ZzvsuurTto1qo5TZo3pZBzIUaOHYWmliaHDx5WdmjZatvGrVhYWjJu8niKlSiOja0NFb0qYmdvp+zQckxcXBxTxk/m9/Gj0TfIu8mYc8miVGvbgCLlMve+eWLjPtwrlcLG5fMbW99Sp56RvsLj8e1HOLg7Y2Rhmuk2ZLdv+bxp0bolpcqUxtrGGreibvTq9xvvQkMJCQ5WYuS5S5LJ/4RvUyCTlMWLF+Pl5UWvXr0IDg4mODgYe/u0IR6jR49m9uzZ+Pn54enp+U31jRkzhtmzZzNhwgQePXrE9u3bsbT8/AtVYmIi7dq1w9fXl4sXL+Lg4JCt7coOKamppKamoqGpoXBcU1OTe753lRRV5hUq6krAvUeEBoUA8Ob5K54+CqRY2bTXVCqV8uDmXSxtrPCZNI/ffxnA3BFTuHv1lkI9zkVduH3xGrHRMUilUm5euEpyUjKuJfLPsIvYmLQ7dwYGBl8pmTdpqWsCEJeUkG11qkpUKGvnzvVXDz47V8auKFMb9mVErS40dq+KuqryelPXLV5JmYrl8Cxb6ovlngY+4cWTZ9T5QgJy4cQZNDU1qVSjSjZHmfOSk5MJ8A+gfIVPwyxVVFQoV6E8D+5//hrmZ5cuXKRosaKMHzWWJnUb0+2nLhzad1DZYeWoBbPn41W1MuUrVlB2KNnu/oUbRL4Pp0qrutlSX+zHaJ7d9cezet4ccvy1z5v4+HiOHDqMta0NFul8TyqoVCSSTD2Eb1MgxzoZGhqioaGBjo4OVlZWAPj7p2X9U6dOpV69b7/TGB0dzeLFi/Hx8aFr164AFC5cmKpVqyqUi4mJoUmTJiQmJnL27FkMDT8fG58X6OrqUsKzBBvXbsCxkBMmJiacOn6SB/cfYJuP7uTVb9uEhPh4pvUbjURFBZlUSrOf21ChZmUAoj9GkRifwIm9h2n2cxtadG2P3+37rJm1lMEzRuNaoigAPUb1Z/285Yzq3B8VVVU0NDXoPXYQFjb5481VKpWyZIE3HiU9cXYprOxwMk0CtCxRk+dhQYREh2VbvSWsXdBS1+TG64cKx++88SciLoqPCbHYGJrRpFg1zPWM2XTjz2z72d/q8pkLPHv8lNkrFn617JkjJ7B1tMftC8nz6aMnqVqnOpqamtkZZq6IjIwkNTUVE1PFYV0mpia8evFSSVHljLdBbzmwZz8dOnekS/eu+D3yY9H8haipq9G4WRNlh5ftTh0/SaB/AGu3rFd2KNkuPOQD5/84xk/j+qCSTcOaHly6hYaWZqZ7c3LDlz5v9u3ey4oly4iPj8fB0QHvZYsVhlYXfCLxyAkFMkn5knLlymWqvJ+fH4mJidSpU+eL5Tp16oSdnR1nzpyRD8fISGJiIomJiYrHkhNz7cvFhKmTmDV1Bi0bNkdVVZUiRYtQt0E9he7bvO72pevcOH+FbsP7YO1gy5vnr9i7dhuGJsZUqlMVmVQGgGfFMtRu0RAAe2dHnvk/5uLRM/Ik5fC2fcTFxjFw2ij0DPS5e/UW6+YuZ+issdg62Wf48/OKhXPm8+zpM5avXaXsULKktWcdrAxM8bm4K1vrrehYAv93z4lKiFU4fvXlffm/Q6I/EJUQS98q7TDVMSQs7mO2xvAlH969Z8OyNUyYOxUNDY0vlk1MTOTS6Qu0/aVDhmUCHvoT9PI1A8cMy+5QhWwmlUopWqwofQb0BaBIUTeePXnGgb0HClySEhoSivf8RXgvX5Ivk+cvkUqlHF6xgyqt62FibZ5t9d6/cJNiXqVR08h7X/C/9HlTv1EDylesQNiHD+zYsp0Jo8ezYt2qAve6Z0R0juSMHy5J0dXVVXiuoqKCTCZTOJacnCz/99cSjn80btyYrVu3cuXKFWrXrv3FsrNmzWLKlCkKx0aOGcWosemv8JHd7OztWLZmBfHx8cTGxGJmbsaE0eOxsbXNlZ+fHfZv3EX9Nk0oV70SALZO9oS/+8CJPYepVKcqegb6qKiqYmVvo3CdlZ0NTx+lTaR+HxzK+f+dYpzPDGwc0nqR7Ao58PRRIBeOnKZTv2652qbMWjhnPn9duozP6hVYWOaPBQ/+rZVHbYpZObPs0i4+JsR8/YJvZKytj6u5Axuvf7135FVE2phpM12jXE1SngU+4WNEJKN+GyI/JpVK8bv3kGMHDrP9+D75hNOr5y+TmJhI9foZv6+cPnICJxdnChfJu5OSv8TIyAhVVVXCw8IVjoeHhWNipvxx+dnJ1MwMp0KFFI45FXLi3JmzSooo5wT4+RMRHkH3zt3kx1JTU/G97cu+P/Zy9sr5fDuxOik+kZDnbwh9+ZZTm9OG68lkMpDJmNdtDO1H9cCxWOb+Hl8HPCc8+D3N+/+UEyF/l6993ujp6aGnp4e9gz3FPUrQqFZ9Lpw9T72G9ZUQbe4T80xyRoFNUjQ0NEhNTX+C7b+Zm5vz4IHimGdfX195N6Wrqyva2tqcPn2anj3TXxIUoG/fvpQoUYLmzZvzv//9jxo1Ml42cMyYMQwbpnjHMzo5NoPSOUdbWxttbW2ioqK4fuUa/Qb3z/UYsio5MfGztcbTEk4pAGrqaji6FpLPWfnHu7chmFiYAZCUmDZhWkWi8nk9UmlOhf7dZDIZi+Yu4MK58yxdtRwbW5uvX5THtPKojYe1C8sv/0F4XFS21l3eoQQxiXH4hT77alkbw7QP26jE3P378yhTkgXrfBSOLZ/rjY29HS07tVX44nbm6EnKVa6AoVH6Q0jj4+O5cu4SP/XskqMx5yR1dXXcirpx88ZNqtdKe++USqXcunGTNu3bKjm67OVZ0oNXLxUXVXn16hVW1lZKiijnlK1Qji27tiocmzFlBo5Ojvzc9ed8m6AAaGpr8utMxWWU75y6wiu/p7QY+DOG5plfke7++RtYOtli4ZB33tOz8nkjk8mQyWQKN3wLOpGi5IwCm6Q4OTlx7do1Xrx4gZ6eHtIMvnTWrl2befPmsXnzZry8vNi6dSsPHjygdOnSAGhpafH7778zatQoNDQ0qFKlCu/fv+fhw4f06NFDoa6BAweSmppK06ZNOXr06GfzVv6hqan5WRdoUkzuLYN67a+ryJDh4OjIm9dvWLbYBwcnR5o0a5prMXyvEuVLc3z3n5iYm2LtYMvrZy85c/A4XnWrycvUbdWI9fOW41rcDVcPdx7dvsf9674MnjkGACs7a8ytLdm+bAOtu3dEV1+Pu1dv4+/7kD4T8t4a/v9YMGc+p46dYNaCOejo6BD2IW0uh56eLppaWgCEfQgjPCyMoDdpK1s9e/IUHR0dLK0sMVDyfKnWnrUpY1eU9dcOkZiShL6mDgDxyUmkSNP/O1CVqGCpn3ZHXVVFFUMtfWwMzElMTSYsNlJeTgKUdyjOzdePkP6nh9RUx5DSdkXxD31ObFICNoZmNC9Rk6cf3hAc9SFH2poRbR0dHAoprgSkqaWFvoGBwvHgoLf43XvImFmTMqzrr7MXSU1NpXq9mjkVbq7o8HMnZkyaRlH3on8vQbyThPgEmjTPP+9L36JD54789mtvNq3fSJ16dXj04BGH9h1k1LjRyg4t2+nq6n42d0FbWwsDQ4M8OYcuKSGRiNBPc+Mi34cT+vIt2rraGJgZK5SVqKhgbqeYWOoY6KGmrqZw/FvrTIxPIOD6PWr+lLd+37/2eRP0JogzJ09RvlJFjIyNeB/6jq0bt6CppYlXFS8lR5+LxHivHFFgk5QRI0bQtWtXihUrRnx8PBs2bEi3XIMGDZgwYQKjRo0iISGB7t2706VLF+7f/zR2fcKECaipqTFx4kTevn2LtbU1ffr0Sbe+IUOGIJVKady4MceOHaNy5co50r7vERMTw0qflbx/9w4DAwNq1KnJb/36oKaef34d2vf+mcPb9rFz5WZiPkZhaGJE1YY1adShpbxMKa9ydOzbjRN7DrN7zVYsbK3pOXogLsWKAKCqpka/ScM4uGk3K6d5k5iQgLm1Jb8M6UWJciWV1LKvO7BnHwADf1Ps+Ro7abx8TPuBvfvZsGad/Fz/Xn0/K6MsVQqVAqB/1fYKx3fePsaN148A6Fi6AcY6Bqy4vBsAAy09htf6RV62lms5armW48mH1/IyAK7mjpjoGHDt5ecrQqVKUyli7kj1wmXQUFUnMj6a+28fczLwWnY3MducPXoKE3NTSpYrnWGZM0dOUrGaF7p6ehmWyQ/q1q9LZEQEa1euJTwsDNcirixYuuizyfT5nXvxYsyaP5uVPivYuGYD1jbWDB4+hAaNGyg7tB9eyPM37Jy1Wv787Pa05a9LVC1L497tubTvJA8u3aLPwm9PKL9W5z/8rt5FBhSrlLc+e772eaOpqcHdO3f5Y8cuoqOiMTE1oWTpUqxct7pA7W/0NWK4V86QyP47IUNQig8x4V8vVED4/mdzvYKspE0RZYeQa2af2ZhtdfWr0p4nH15zIuBKttWZXbqWy1t3OnOSjaGZskMQhO9y8OGFbKvrf6t2IZFIFJKLvKR5serKDiFXmOvnveQn+OO7TJW3Nsx/80iVoUDukyIIQv6lpaaBqa4h557cVHYogiAIQNo8i9f+z6ja5seYCC5kjtjMMWfkn/E9giD8EBJSkph2Yo2ywxAEQZCTSCT0WTRG2WEIeZXIO3KE6EkRBEEQBEEQhCzKjZ6UZcuW4eTkhJaWFhUrVuT69evZ3Iq8RyQpgiAIgiAIgpBFkkw+MmvXrl0MGzaMSZMmcfv2bUqWLEmDBg149y5zc2HyG5GkCIIgCIIgCEJWSSSZe2TSwoUL6dWrF7/++ivFihVj5cqV6OjosH79+hxoTN4hkhRBEARBEARByKKcHO6VlJTErVu3qFu3rvyYiooKdevW5cqVvLcCZnYSE+cFQRAEQRAEIYsy2zmSmJhIYmKiwrH0NvoG+PDhA6mpqVhaWioct7S0xN/fP9Ox5isy4YeVkJAgmzRpkiwhIUHZoeSoH6WdMploa0El2low/Sht/VHaKZOJtgrfZtKkSTJA4TFp0qR0ywYFBckA2V9//aVwfOTIkbIKFSrkQrTKIzZz/IFFRUVhaGjIx48fMTAwUHY4OeZHaSeIthZUoq0F04/S1h+lnSDaKnybzPSkJCUloaOjw549e2jZsqX8eNeuXYmMjOTgwYM5Ha7SiDkpXaGm2gAADxlJREFUgiAIgiAIgpBLNDU1MTAwUHikl6AAaGhoULZsWU6fPi0/JpVKOX36NF5eXrkVslKIOSmCIAiCIAiCkEcNGzaMrl27Uq5cOSpUqIC3tzexsbH8+uuvyg4tR4kkRRAEQRAEQRDyqA4dOvD+/XsmTpxISEgIpUqV4tixY59Npi9oRJLyA9PU1GTSpEkZdjEWFD9KO0G0taASbS2YfpS2/ijtBNFWIecMGDCAAQMGKDuMXCUmzguCIAiCIAiCkKeIifOCIAiCIAiCIOQpIkkRBEEQBEEQBCFPEUmKIAiCIAiCIAh5ikhSBEEQBEEQBEHIU0SS8oNatmwZTk5OaGlpUbFiRa5fv67skHLEhQsXaNasGTY2NkgkEg4cOKDskHLErFmzKF++PPr6+lhYWNCyZUsCAgKUHVaOWLFiBZ6envINsLy8vDh69Kiyw8pxs2fPRiKRMGTIEGWHku0mT56MRCJReBQtWlTZYeWYoKAgfv75Z0xNTdHW1sbDw4ObN28qO6xs5+Tk9NnrKpFI6N+/v7JDy3apqalMmDCBQoUKoa2tTeHChZk2bRoFcW2i6OhohgwZgqOjI9ra2lSuXJkbN24oOyyhABJJyg9o165dDBs2jEmTJnH79m1KlixJgwYNePfunbJDy3axsbGULFmSZcuWKTuUHHX+/Hn69+/P1atXOXnyJMnJydSvX5/Y2Fhlh5bt7OzsmD17Nrdu3eLmzZvUrl2bFi1a8PDhQ2WHlmNu3LjBqlWr8PT0VHYoOaZ48eIEBwfLH5cuXVJ2SDkiIiKCKlWqoK6uztGjR3n06BELFizA2NhY2aFluxs3bii8pidPngSgXbt2So4s+82ZM4cVK1bg4+ODn58fc+bMYe7cuSxdulTZoWW7nj17cvLkSbZs2cL9+/epX78+devWJSgoSNmhCQWMWIL4B1SxYkXKly+Pj48PAFKpFHt7ewYOHMjo0aOVHF3OkUgk7N+/n5YtWyo7lBz3/v17LCwsOH/+PNWrV1d2ODnOxMSEefPm0aNHD2WHku1iYmIoU6YMy5cvZ/r06ZQqVQpvb29lh5WtJk+ezIEDB/D19VV2KDlu9OjRXL58mYsXLyo7lFw3ZMgQDh8+zOPHj5FIJMoOJ1s1bdoUS0tL1q1bJz/Wpk0btLW12bp1qxIjy17x8fHo6+tz8OBBmjRpIj9etmxZGjVqxPTp05UYnVDQiJ6UH0xSUhK3bt2ibt268mMqKirUrVuXK1euKDEyITt9/PgRSPvyXpClpqayc+dOYmNj8fLyUnY4OaJ///40adJE4W+2IHr8+DE2NjY4OzvTuXNnXr16peyQcsShQ4coV64c7dq1w8LCgtKlS7NmzRplh5XjkpKS2Lp1K927dy9wCQpA5cqVOX36NIGBgQDcvXuXS5cu0ahRIyVHlr1SUlJITU1FS0tL4bi2tnaB7f0UlEfsOP+D+fDhA6mpqVhaWioct7S0xN/fX0lRCdlJKpUyZMgQqlSpQokSJZQdTo64f/8+Xl5eJCQkoKenx/79+ylWrJiyw8p2O3fu5Pbt2wV+vHfFihXZuHEjbm5uBAcHM2XKFKpVq8aDBw/Q19dXdnjZ6tmzZ6xYsYJhw4YxduxYbty4waBBg9DQ0KBr167KDi/HHDhwgMjISLp166bsUHLE6NGjiYqKomjRoqiqqpKamsqMGTPo3LmzskPLVvr6+nh5eTFt2jTc3d2xtLRkx44dXLlyBRcXF2WHJxQwIkkRhAKmf//+PHjwoEDf1XJzc8PX15ePHz+yZ88eunbtyvnz5wtUovL69WsGDx7MyZMnP7trWdD8+26zp6cnFStWxNHRkT/++KPADeGTSqWUK1eOmTNnAlC6dGkePHjAypUrC3SSsm7dOho1aoSNjY2yQ8kRf/zxB9u2bWP79u0UL14cX19fhgwZgo2NTYF7Xbds2UL37t2xtbVFVVWVMmXK0KlTJ27duqXs0IQCRiQpPxgzMzNUVVUJDQ1VOB4aGoqVlZWSohKyy4ABAzh8+DAXLlzAzs5O2eHkGA0NDfldu7Jly3Ljxg0WL17MqlWrlBxZ9rl16xbv3r2jTJky8mOpqalcuHABHx8fEhMTUVVVVWKEOcfIyIgiRYrw5MkTZYeS7aytrT9Lpt3d3dm7d6+SIsp5L1++5NSpU+zbt0/ZoeSYkSNHMnr0aDp27AiAh4cHL1++ZNasWQUuSSlcuDDnz58nNjaWqKgorK2t6dChA87OzsoOTShgxJyUH4yGhgZly5bl9OnT8mNSqZTTp08X2DH9PwKZTMaAAQPYv38/Z86coVChQsoOKVdJpVISExOVHUa2qlOnDvfv38fX11f+KFeuHJ07d8bX17fAJiiQtljA06dPsba2VnYo2a5KlSqfLQ8eGBiIo6OjkiLKeRs2bMDCwkJhonVBExcXh4qK4lcqVVVVpFKpkiLKebq6ulhbWxMREcHx48dp0aKFskMSChjRk/IDGjZsGF27dqVcuXJUqFABb29vYmNj+fXXX5UdWraLiYlRuBv7/PlzfH19MTExwcHBQYmRZa/+/fuzfft2Dh48iL6+PiEhIQAYGhqira2t5Oiy15gxY2jUqBEODg5ER0ezfft2zp07x/Hjx5UdWrbS19f/bE6Rrq4upqamBW6u0YgRI2jWrBmOjo68ffuWSZMmoaqqSqdOnZQdWrYbOnQolStXZubMmbRv357r16+zevVqVq9erezQcoRUKmXDhg107doVNbWC+5WjWbNmzJgxAwcHB4oXL86dO3dYuHAh3bt3V3Zo2e748ePIZDLc3Nx48uQJI0eOpGjRogXyO4SgZDLhh7R06VKZg4ODTENDQ1ahQgXZ1atXlR1Sjjh79qwM+OzRtWtXZYeWrdJrIyDbsGGDskPLdt27d5c5OjrKNDQ0ZObm5rI6derITpw4oeywckWNGjVkgwcPVnYY2a5Dhw4ya2trmYaGhszW1lbWoUMH2ZMnT5QdVo75888/ZSVKlJBpamrKihYtKlu9erWyQ8oxx48flwGygIAAZYeSo6KiomSDBw+WOTg4yLS0tGTOzs6ycePGyRITE5UdWrbbtWuXzNnZWaahoSGzsrKS9e/fXxYZGanssIQCSOyTIgiCIAiCIAhCniLmpAiCIAiCIAiCkKeIJEUQBEEQBEEQhDxFJCmCIAiCIAiCIOQpIkkRBEEQBEEQBCFPEUmKIAiCIAiCIAh5ikhSBEEQBEEQBEHIU0SSIgiCIAiCIAhCniKSFEEQhB/MixcvkEgk+Pr6frFczZo1GTJkSK7EJAiCIAj/JpIUQRCEPKBbt25IJBIkEgkaGhq4uLgwdepUUlJSvrveli1bKhyzt7cnODiYEiVKAHDu3DkkEgmRkZEK5fbt28e0adO+6+d/zX8Tpn+e//PQ19enePHi9O/fn8ePH+doLIIgCELeIZIUQRCEPKJhw4YEBwfz+PFjhg8fzuTJk5k3b16W6kpNTUUqlaZ7TlVVFSsrK9TU1L5Yh4mJCfr6+ln6+d/r1KlTBAcHc/fuXWbOnImfnx8lS5bk9OnTSolHEARByF0iSREEQcgjNDU1sbKywtHRkb59+1K3bl0OHToEwMKFC/Hw8EBXVxd7e3v69etHTEyM/NqNGzdiZGTEoUOHKFasGJqamnTv3p1Nmzb9v537CYlyC+M4/p2xBhxdZFIpImkZNlFKFEntBDeFMKtaRIQQQgujTYTLikCECppIXBQh4iYQapBAqIhIomigBLFBEGtjVBK0sD9jM3chDkxG15t2e+/l+9nN4bznfeZdzW+ec15u376d70w8ePCgoHsxNTVFc3MzAGVlZYRCIdra2oDF270+fPjA0aNHKSsrIxqNsn///oLuxkINw8PDxGIxSktL88HrnyovL6eiooJNmzYRj8e5e/cuTU1NHDt2jG/fvv3C05Uk/ZcYUiQpoIqLi/n69SsA4XCYRCLB2NgYfX193L9/n9OnTxfMn52dpbu7m2vXrjE2NkYikeDQoUP5oDA9Pc2+ffsKrqmurmZwcBCAdDrN9PQ0ly9f/mE9bW1tPHv2jGQyyePHj8nlchw4cIBMJlNQw4ULF+jv7+fhw4e8fv2aU6dOLftZhMNhTp48yatXr0ilUsteT5IUbD/v9UuS/nW5XI579+4xPDzMiRMnAAo6GjU1NZw/f57jx4/T09OTH89kMvT09NDY2JgfKy4u5suXL1RUVPzwXkVFRaxduxaA9evXs2bNmh/Om5iYIJlMMjIykg86AwMDVFdXc+vWLQ4ePJivobe3l82bNwPQ0dHBuXPnfu1BfGfr1q3A/LmVPXv2rMiakqRgMqRIUkAMDQ1RWlpKJpMhm81y+PBhzpw5A8yf0ejq6uLly5d8/PiRubk5Pn/+zOzsLNFoFIBIJEJDQ8NvqW18fJxVq1bR1NSUHysvL6e+vp7x8fH8WDQazQcUgMrKSt6+fbsiNeRyOQBCodCKrCdJCi63e0lSQDQ3N/P8+XMmJib49OkTfX19lJSUMDU1RWtrKw0NDQwODpJKpbh69SpAfjsYzHdN/vQP+NWrVxd8DoVC+XCxXAthqLa2dkXWkyQFl50USQqIkpIS6urqFo2nUimy2SwXL14kHJ7/b+nmzZtLWjMSifztQfNIJALw03mxWIy5uTmePHmS3+41MzNDOp1m27ZtS6plObLZLIlEgtraWnbu3Pnb7ydJ+rPspEhSwNXV1ZHJZLhy5QqTk5P09/fT29u7pGtramoYHR0lnU7z/v37gkPuCzZu3EgoFGJoaIh3794VvDVswZYtW4jH47S3t/Po0SNevHjBkSNHqKqqIh6PL/s7fm9mZoY3b94wOTlJMpmkpaWFp0+fcv36dYqKilb8fpKkYDGkSFLANTY2cunSJbq7u9m+fTsDAwN0dXUt6dr29nbq6+vZvXs369atY2RkZNGcqqoqzp49S2dnJxs2bKCjo+OHa924cYNdu3bR2trK3r17yeVy3LlzZ9EWr5XQ0tJCZWUlO3bsoLOzk1gsxujoaP51yZKk/7dQbqU2C0uSJEnSCrCTIkmSJClQDCmSJEmSAsWQIkmSJClQDCmSJEmSAsWQIkmSJClQDCmSJEmSAsWQIkmSJClQDCmSJEmSAsWQIkmSJClQDCmSJEmSAsWQIkmSJClQDCmSJEmSAuUvyiNrucDcFZIAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "fig, ax, df = plot_label_distributions(\n", + " partitioner,\n", + " label_name=\"label\",\n", + " plot_type=\"heatmap\",\n", + " size_unit=\"absolute\",\n", + " partition_id_axis=\"x\",\n", + " legend=True,\n", + " verbose_labels=True,\n", + " title=\"Per Partition Labels Distribution\",\n", + " plot_kwargs={\"annot\": True},\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "5167593e67fa3dbb", + "metadata": {}, + "source": [ + "Note: we used the `plot_kwargs={\"annot\": True}` to add the number directly to the plot." + ] + }, + { + "cell_type": "markdown", + "id": "e2e41273551ac32a", + "metadata": {}, + "source": [ + "If you are a `pandas` fan, then you might be interested that a similar heatmap can be created with the DataFrame object for visualization in jupyter notebook:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fcc90b52bfd650cf", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
 airplaneautomobilebirdcatdeerdogfroghorseshiptruck
Partition ID          
08177941462212343225456384149
11416697530340903868
2041124543511158421
37621591100511201662198213512175
424371421924004251151477
5677917025255247727445900
6422244863809290380506
7122281159721741038172716825154
825629342751848151122401417
91136107350357126711223
\n" + ], + "text/plain": [ + "" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "df.style.background_gradient(axis=None, cmap=\"Greens\", vmin=0)" + ] + }, + { + "cell_type": "markdown", + "id": "37d85e1b40d54918", + "metadata": {}, + "source": [ + "## Plot Comparison of Label Distributions" + ] + }, + { + "cell_type": "markdown", + "id": "4f49259a3de7dd17", + "metadata": {}, + "source": [ + "Now, once you know how to visualize a single partitioned dataset, you'll learn how to compare a few of them on a single plot.\n", + "\n", + "Let's compare:\n", + "\n", + "- IidPartitioner,\n", + "- DirichletPartitioner,\n", + "- ShardPartitioner\n", + "still using the `cifar10` dataset.\n", + "\n", + "We need to create a list of partitioners. Each partitioner needs to have a dataset assigned to it (it does not have to be the same dataset so you can also compare the same partitioning on different datasets)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9e84a9192a266f3e", + "metadata": {}, + "outputs": [], + "source": [ + "from flwr_datasets import FederatedDataset\n", + "from flwr_datasets.partitioner import (\n", + " IidPartitioner,\n", + " DirichletPartitioner,\n", + " ShardPartitioner,\n", + ")\n", + "\n", + "partitioner_list = []\n", + "title_list = [\"IidPartitioner\", \"DirichletPartitioner\", \"ShardPartitioner\"]\n", + "\n", + "## IidPartitioner\n", + "fds = FederatedDataset(\n", + " dataset=\"cifar10\",\n", + " partitioners={\n", + " \"train\": IidPartitioner(num_partitions=10),\n", + " },\n", + ")\n", + "partitioner_list.append(fds.partitioners[\"train\"])\n", + "\n", + "## DirichletPartitioner\n", + "fds = FederatedDataset(\n", + " dataset=\"cifar10\",\n", + " partitioners={\n", + " \"train\": DirichletPartitioner(\n", + " num_partitions=10,\n", + " partition_by=\"label\",\n", + " alpha=1.0,\n", + " min_partition_size=0,\n", + " ),\n", + " },\n", + ")\n", + "partitioner_list.append(fds.partitioners[\"train\"])\n", + "\n", + "## ShardPartitioner\n", + "fds = FederatedDataset(\n", + " dataset=\"cifar10\",\n", + " partitioners={\n", + " \"train\": ShardPartitioner(\n", + " num_partitions=10, partition_by=\"label\", num_shards_per_partition=2\n", + " )\n", + " },\n", + ")\n", + "partitioner_list.append(fds.partitioners[\"train\"])" + ] + }, + { + "cell_type": "markdown", + "id": "d18bae80", + "metadata": {}, + "source": [ + "Now let's visualize them side by side" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f2ee2864", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA5UAAAHlCAYAAABlFdg7AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy80BEi2AAAACXBIWXMAAA9hAAAPYQGoP6dpAACMIElEQVR4nOzdd1QUZ9sG8GtpSwdFUFCkKyp2xVgQE1Ek9t4Sxd7QoLHEGBUsIfYa0RiDRk3sJbGLLbHGHisqATFqhBiKIIKwz/eHH/u60pZ1l2Xh+p2z5zAzzzxzz87cAzfTJEIIASIiIiIiIiIV6Gk7ACIiIiIiItJdLCqJiIiIiIhIZSwqiYiIiIiISGUsKomIiIiIiEhlLCqJiIiIiIhIZSwqiYiIiIiISGUsKomIiIiIiEhlLCqJiIiIiIhIZSwqiYiIiIiISGUsKolITiKRICQkRNthvLeNGzfC09MThoaGsLa21nY4BQoMDISzs7NSbUNCQiCRSDQaz8mTJyGRSHDy5EmNLqc4ODs7o0OHDtoOg4iIqNRjUUn0lujoaIwYMQKurq4wNjaGpaUlmjdvjmXLliE9PV3b4ZES7t69i8DAQLi5uWHt2rX47rvv8m2bU6TlfExNTVGzZk189dVXSElJUVtMT548QUhICK5du1Zo25cvXyIkJKRUFHWquHHjBnr06AEnJycYGxujcuXKaNOmDVasWKHt0CgPJ0+eRLdu3VCpUiUYGRnBzs4OHTt2xK5du+RtYmNjIZFIsHDhQoX53s69tz99+vRRWMaBAwcgkUjg4OAAmUyWZxzOzs4KfZiZmcHb2xs//vhjnu3nzp2LTp06oWLFioX+M+3x48fo1asXrK2tYWlpic6dO+Ovv/4qwrdERFT6GWg7AKKSYv/+/ejZsyekUikGDBgALy8vZGZm4vTp05g0aRJu3bpVYIFSGqSnp8PAQLcPCydPnoRMJsOyZcvg7u6u1Dzh4eEwNzdHamoqjhw5grlz5+L48eM4c+aMWs4MPnnyBKGhoXB2dka9evUUpq1du1bhD+WXL18iNDQUANCqVSuFtl999RW++OKL946nIC1btkR6ejqMjIw0upy8nD17Fh9++CGqVq2KYcOGoVKlSnj06BHOnz+PZcuWYezYscUeE+Vv5syZmDVrFjw8PDBixAg4OTnh+fPnOHDgALp3747NmzejX79+BfYxbtw4NG7cWGHcu2fuN2/eDGdnZ8TGxuL48ePw8/PLs6969erh888/BwA8ffoU33//PQYOHIiMjAwMGzZMoe1XX32FSpUqoX79+jh8+HC+8aWmpuLDDz9EcnIyvvzySxgaGmLJkiXw9fXFtWvXYGNjU+D6ERGVFbr91yORmsTExKBPnz5wcnLC8ePHYW9vL582ZswYPHjwAPv379dihJojk8mQmZkJY2NjGBsbazuc9xYfHw8ARbrstUePHqhQoQIAYOTIkejevTt27dqF8+fPo2nTpirHkpWVle+ZlRyGhoZK92dgYKDxol9PT09r+8HcuXNhZWWFixcv5tp+OduVSoYdO3Zg1qxZ6NGjB3766SeF/XjSpEk4fPgwXr9+XWg/Pj4+6NGjR77T09LSsHfvXoSFhSEiIgKbN2/Ot6isXLkyPvnkE/lwYGAgXF1dsWTJklxFZUxMDJydnfHvv//C1tY23+WvWrUK9+/fxx9//CEvfgMCAuDl5YVFixbh66+/LnQdiYjKAl7+SgRg/vz5SE1Nxbp16xQKyhzu7u747LPP5MNZWVmYPXs23NzcIJVK4ezsjC+//BIZGRkK8+Xc03Xy5Ek0atQIJiYmqF27tvzSxl27dqF27dowNjZGw4YNcfXqVYX5AwMDYW5ujr/++gv+/v4wMzODg4MDZs2aBSGEQtuFCxeiWbNmsLGxgYmJCRo2bIgdO3bkWheJRIKgoCBs3rwZtWrVglQqxaFDh+TT3r4M7MWLFwgODoazszOkUins7OzQpk0bXLlyRaHP7du3o2HDhjAxMUGFChXwySef4PHjx3muy+PHj9GlSxeYm5vD1tYWEydORHZ2dj5bRtGqVavkMTs4OGDMmDFISkpS+L5nzpwJALC1tVX5HtGPPvoIwJs/PDMzMzFjxgw0bNgQVlZWMDMzg4+PD06cOKEwz9uX+C1dulS+b6xatUr+x+igQYPkl+etX79e/r3knJmJjY2V/4EbGhoqb5uzDnndU1nUffH06dPw9vaGsbExXF1dc10emNc9la1atYKXlxdu376NDz/8EKampqhcuTLmz5+f67t7+PAhOnXqBDMzM9jZ2WH8+PE4fPiwUvdpRkdHo1atWnn+Q8DOzi7XuE2bNsHb2xumpqYoV64cWrZsiSNHjuRqV9g6A0BSUhKCg4Ph6OgIqVQKd3d3zJs3T+GfAm9v42+//Raurq4wNTVF27Zt8ejRIwghMHv2bFSpUgUmJibo3Lkz/vvvv1zLOnjwIHx8fGBmZgYLCwu0b98et27dUmjzzz//YNCgQahSpQqkUins7e3RuXNnxMbGFvgdAsDx48fl/VtbW6Nz5864c+eOQpucfenBgwcIDAyEtbU1rKysMGjQILx8+bLQZUyfPh3ly5fHDz/8kOc/Rvz9/dVyP+vu3buRnp6Onj17ok+fPti1axdevXql1Ly2trbw9PREdHR0rmnK3se8Y8cONG7cWOFsqqenJ1q3bo1t27Yp1QcRUVnAopIIwK+//gpXV1c0a9ZMqfZDhw7FjBkz0KBBA/mlUGFhYbnuBQKABw8eoF+/fujYsSPCwsKQmJiIjh07YvPmzRg/fjw++eQThIaGIjo6Gr169cp1Zis7Oxvt2rVDxYoVMX/+fDRs2BAzZ86UF085li1bhvr162PWrFn4+uuvYWBggJ49e+Z5hvX48eMYP348evfujWXLluX7B9bIkSMRHh6O7t27Y9WqVZg4cSJMTEwU/kBdv349evXqBX19fYSFhWHYsGHYtWsXWrRooVDw5ayLv78/bGxssHDhQvj6+mLRokVKXVYcEhKCMWPGwMHBAYsWLUL37t2xZs0atG3bVn5GZOnSpejatSuAN5e0bty4Ed26dSu073fl/BFqY2ODlJQUfP/992jVqhXmzZuHkJAQJCQkwN/fP897JCMiIrBixQoMHz4cixYtQteuXTFr1iwAwPDhw7Fx40Zs3LgRLVu2zDWvra0twsPDAQBdu3aVty1oHYq6L/bo0QNt2rTBokWLUK5cOQQGBuYqaPKSmJiIdu3aoW7duli0aBE8PT0xZcoUHDx4UN4mLS0NH330ESIjIzFu3DhMmzYNZ8+exZQpUwrtHwCcnJxw+fJl3Lx5s9C2oaGh+PTTT2FoaIhZs2YhNDQUjo6OOH78eJHX+eXLl/D19cWmTZswYMAALF++HM2bN8fUqVMxYcKEXMvevHkzVq1ahbFjx+Lzzz/HqVOn0KtXL3z11Vc4dOgQpkyZguHDh+PXX3/FxIkTFebduHEj2rdvD3Nzc8ybNw/Tp0/H7du30aJFC4WCsXv37ti9ezcGDRqEVatWYdy4cXjx4gXi4uIK/F4iIyPh7++P+Ph4hISEYMKECTh79iyaN2+eZ0Haq1cvvHjxAmFhYejVqxfWr18vv/w6P/fv38fdu3fRpUsXWFhYFNi2MC9evMC///6r8Hn7GLh582Z8+OGHqFSpEvr06YMXL17g119/VarvrKws/P333yhXrpxKsclkMvz5559o1KhRrmne3t6Ijo7GixcvVOqbiKjUEURlXHJysgAgOnfurFT7a9euCQBi6NChCuMnTpwoAIjjx4/Lxzk5OQkA4uzZs/Jxhw8fFgCEiYmJePjwoXz8mjVrBABx4sQJ+biBAwcKAGLs2LHycTKZTLRv314YGRmJhIQE+fiXL18qxJOZmSm8vLzERx99pDAegNDT0xO3bt3KtW4AxMyZM+XDVlZWYsyYMfl+F5mZmcLOzk54eXmJ9PR0+fh9+/YJAGLGjBm51mXWrFkKfdSvX180bNgw32UIIUR8fLwwMjISbdu2FdnZ2fLxK1euFADEDz/8IB83c+ZMAUDhu8lPTtuoqCiRkJAgYmJixJo1a4RUKhUVK1YUaWlpIisrS2RkZCjMl5iYKCpWrCgGDx4sHxcTEyMACEtLSxEfH6/Q/uLFiwKAiIiIyBXDwIEDhZOTk3w4ISEh13Z4N94cquyLv/32m3xcfHy8kEql4vPPP5ePO3HiRK790NfXVwAQP/74o3xcRkaGqFSpkujevbt83KJFiwQAsWfPHvm49PR04enpmavPvBw5ckTo6+sLfX190bRpUzF58mRx+PBhkZmZqdDu/v37Qk9PT3Tt2lVhfxDiTX4UdZ1nz54tzMzMxL179xT6+uKLL4S+vr6Ii4sTQvxvG9va2oqkpCR5u6lTpwoAom7duuL169fy8X379hVGRkbi1atXQgghXrx4IaytrcWwYcMUlvPPP/8IKysr+fjExEQBQCxYsKDA7ysv9erVE3Z2duL58+fycdevXxd6enpiwIAB8nE5+9Lb+7AQQnTt2lXY2NgUuIy9e/cKAGLJkiVKxZTzvb29Pjn7WV6fmJgYIYQQz549EwYGBmLt2rXy+Zo1a5bnsdrJyUm0bdtWJCQkiISEBHHjxg3x6aefCgAFHsMKyrecae8es4QQ4ttvvxUAxN27d5X6DoiISjueqaQyL+cpn8r+x/3AgQMAkOsMRs4DIt49M1izZk2F+/KaNGkC4M0lllWrVs01Pq+nCgYFBcl/zrl8NTMzE5GRkfLxJiYm8p8TExORnJwMHx+fXJeqAoCvry9q1qxZyJq+uS/xwoULePLkSZ7TL126hPj4eIwePVrhPrz27dvD09Mzz7OkI0eOVBj28fEp9EmKkZGRyMzMRHBwMPT0/nfYGjZsGCwtLd/7ftfq1avD1tYWLi4uGDFiBNzd3bF//36YmppCX19f/tAamUyG//77D1lZWWjUqFGe32337t0LvEdLnVTZF318fOTDtra2qF69ulJPsjQ3N1e4X83IyAje3t4K8x46dAiVK1dGp06d5OOMjY1z3c+WnzZt2uDcuXPo1KkTrl+/jvnz58Pf3x+VK1fGL7/8Im+3Z88eyGQyzJgxQ2F/AJDr8mBl1nn79u3w8fFBuXLlFM6Y+fn5ITs7G7/99ptCnz179oSVlZV8OCd3P/nkE4V7Xps0aYLMzEz5peBHjx5FUlIS+vbtq7AcfX19NGnSRH5JtYmJCYyMjHDy5EkkJiYq9d0Bbx5Oc+3aNQQGBqJ8+fLy8XXq1EGbNm3k+8vb8srH58+fF/j046IeMwsyY8YMHD16VOFTqVIlAMCWLVugp6eH7t27y9v37dsXBw8ezPN7OXLkCGxtbWFra4vatWtj48aNGDRoEBYsWKBSbDlP/JZKpbmm5Rzv+FRwIqI3+KAeKvMsLS0BQOnLmB4+fAg9Pb1cTxatVKkSrK2t8fDhQ4XxbxeOAOR/jDo6OuY5/t0/lvT09ODq6qowrlq1agCgcDnbvn37MGfOHFy7dk3hfrq8nl7q4uKS7/q9bf78+Rg4cCAcHR3RsGFDfPzxxxgwYIA8npx1rV69eq55PT09cfr0aYVxxsbGuQqucuXKFfqHc37LMTIygqura67vvKh27twJS0tLGBoaokqVKnBzc1OYvmHDBixatAh3795VePhIXt+jst+tOrzvvggo9/0DQJUqVXLtS+XKlcOff/6pEI+bm1uudso+hRcAGjdujF27diEzMxPXr1/H7t27sWTJEvTo0QPXrl1DzZo1ER0dDT09PaX+MaLMOt+/fx9//vlnvv8MePchQarm9P379wH8757dd+Uci6RSKebNm4fPP/8cFStWxAcffIAOHTpgwIAB8oIrLwXlY40aNXD48GGkpaXBzMws33XJuVQ0MTFRHk9+carj0s/atWvn++CdnHtmnz9/jufPnwMA6tevj8zMTGzfvh3Dhw9XaN+kSRPMmTMH2dnZuHnzJubMmYPExESVn2Sc84+6d+9PBiC/r/Ptf+YREZVlLCqpzLO0tISDg4NS93G9TdlXTejr6xdpvHjnATzK+P3339GpUye0bNkSq1atgr29PQwNDREREYGffvopV3tl/xDq1asXfHx8sHv3bhw5cgQLFizAvHnzsGvXLgQEBBQ5zvzWWdtatmwpf/rruzZt2oTAwEB06dIFkyZNgp2dnfz+0bweAKKNPzLfd19UZp9T5/6qDCMjI/kDUqpVq4ZBgwZh+/btue4lLowycctkMrRp0waTJ0/Os23OP3EK67OwZeXcK7hx48Y8i8O3z3IGBwejY8eO2LNnDw4fPozp06cjLCwMx48fR/369fNcjipU2a6enp4A3rxTVFPu37+PixcvAgA8PDxyTd+8eXOuorJChQryAtXf3x+enp7o0KEDli1blue9sYUpX748pFIpnj59mmtazjgHB4ci90tEVBqxqCQC0KFDB3z33Xc4d+5coa+QcHJygkwmw/3791GjRg35+GfPniEpKQlOTk5qjU0mk+Gvv/5S+MP23r17AP73BMOdO3fC2NgYhw8fVrhUKyIi4r2Xb29vj9GjR2P06NGIj49HgwYNMHfuXAQEBMjXNSoqKtfZl6ioKLV9F28v5+2ztpmZmYiJicn3TIc67NixA66urti1a5dC8VaU4qYo77osStvi3heVief27dsQQiisx4MHD96r35wHpeT8Ie/m5gaZTIbbt2/neu+nKtzc3JCamqrR/ShnOcCbJ9kqsyw3Nzd8/vnn+Pzzz3H//n3Uq1cPixYtwqZNm/Js/3aevOvu3buoUKGCwllKVVWrVg3Vq1fH3r17sWzZMpibm793n+/avHkzDA0NsXHjxlyF7+nTp7F8+XLExcXleSY6R/v27eHr64uvv/4aI0aMKPK66+npoXbt2rh06VKuaRcuXICrq6taLgEmIioNeE8lEYDJkyfDzMwMQ4cOxbNnz3JNj46OxrJlywAAH3/8MYA3Txp92+LFiwG8+UNG3VauXCn/WQiBlStXwtDQEK1btwbw5myDRCJReDVHbGws9uzZo/Iys7OzkZycrDDOzs4ODg4O8svBGjVqBDs7O6xevVrhErGDBw/izp07avsu/Pz8YGRkhOXLlyucQVm3bh2Sk5M18p3nyPmD9u3lXrhwAefOnVO6j5w/Zt99Gm5eTE1NlW6rjX2xIP7+/nj8+LHC/Y+vXr3C2rVrlZr/xIkTeZ4hy7kXMOeyzi5dukBPTw+zZs3K9bRkVc6c9urVC+fOncPhw4dzTUtKSkJWVlaR+8yLv78/LC0t8fXXX+f5DseEhAQAb55G++5rM9zc3GBhYZHnpZg57O3tUa9ePWzYsEFh/7l58yaOHDki31/UITQ0FM+fP8fQoUPz/H6OHDmCffv2qdz/5s2b4ePjg969e6NHjx4Kn0mTJgEAfv7550L7mTJlCp4/f670PviuHj164OLFiwqFZVRUFI4fP46ePXuq1CcRUWnEM5VEePMH208//YTevXujRo0aGDBgALy8vJCZmYmzZ89i+/btCAwMBADUrVsXAwcOxHfffYekpCT4+vrijz/+wIYNG9ClSxd8+OGHao3N2NgYhw4dwsCBA9GkSRMcPHgQ+/fvx5dffim/B6x9+/ZYvHgx2rVrh379+iE+Ph7ffvst3N3dFe55K4oXL16gSpUq6NGjB+rWrQtzc3NERkbi4sWLWLRoEQDA0NAQ8+bNw6BBg+Dr64u+ffvi2bNn8teUjB8/Xi3fga2tLaZOnYrQ0FC0a9cOnTp1QlRUlPwdkG8/QEbdOnTogF27dqFr165o3749YmJisHr1atSsWROpqalK9eHm5gZra2usXr0aFhYWMDMzQ5MmTfK8/9LExAQ1a9bE1q1bUa1aNZQvXx5eXl7w8vLK1ba498XCjBgxAitXrkTfvn3x2Wefwd7eHps3b5Y/1KSws7Bjx47Fy5cv0bVrV3h6esrzb+vWrXB2dsagQYMAvLlHc9q0aZg9ezZ8fHzQrVs3SKVSXLx4EQ4ODggLCytS3JMmTcIvv/yCDh06IDAwEA0bNkRaWhpu3LiBHTt2IDY2Nt/Lo4vC0tIS4eHh+PTTT9GgQQP06dMHtra2iIuLw/79+9G8eXOsXLkS9+7dQ+vWrdGrVy/UrFkTBgYG2L17N549e5bnq2LetmDBAgQEBKBp06YYMmQI0tPTsWLFClhZWan0ztb89O7dGzdu3MDcuXNx9epV9O3bF05OTnj+/DkOHTqEY8eO5XnpvTIuXLiABw8eKDyg7G2VK1dGgwYNsHnz5kJfVxMQEAAvLy8sXrwYY8aMkb9Tc+PGjXj48KH8nZy//fYb5syZAwD49NNP5Wd9R48ejbVr16J9+/aYOHEiDA0NsXjxYlSsWFH+QCwiIgJfKUL0tnv37olhw4YJZ2dnYWRkJCwsLETz5s3FihUr5K8FEEKI169fi9DQUOHi4iIMDQ2Fo6OjmDp1qkIbId485r59+/a5loM8HnOf12P3Bw4cKMzMzER0dLRo27atMDU1FRUrVhQzZ87M9SqFdevWCQ8PDyGVSoWnp6eIiIjI9QqK/Jb99rScR+tnZGSISZMmibp16woLCwthZmYm6tatK1atWpVrvq1bt4r69esLqVQqypcvL/r37y/+/vtvhTY56/KuvGLMz8qVK4Wnp6cwNDQUFStWFKNGjRKJiYl59leUV4oU1FYmk4mvv/5aODk5CalUKurXry/27duX61UgeW2/t+3du1fUrFlTGBgYKLxe5N1+hBDi7NmzomHDhsLIyEhhm+T1Xb3vvujr6yt8fX3lw/m9UqRWrVq55s0r9r/++ku0b99emJiYCFtbW/H555+LnTt3CgDi/PnzeX43OQ4ePCgGDx4sPD09hbm5uTAyMhLu7u5i7Nix4tmzZ7na//DDD/L9rly5csLX11ccPXq0yOssxJvXfUydOlW4u7sLIyMjUaFCBdGsWTOxcOFC+StN8tvGOd/Z9u3bFcZHREQIAOLixYu52vv7+wsrKythbGws3NzcRGBgoLh06ZIQQoh///1XjBkzRnh6egozMzNhZWUlmjRpIrZt21bg95cjMjJSNG/eXJiYmAhLS0vRsWNHcfv2bYU2+e37OTHnvNajMMeOHROdO3cWdnZ2wsDAQNja2oqOHTuKvXv3ytsU9EqRd78zIYQYO3asACCio6PzXW5ISIgAIK5fvy6EyH9bCyHE+vXrc73SJ+c1OXl93n31zaNHj0SPHj2EpaWlMDc3Fx06dBD3799X5ushIiozJEJo6CkLRPTeAgMDsWPHDqXPiBGVREuXLsX48ePx999/o3LlytoOh4iIiNSM91QSEZHavPvevlevXmHNmjXw8PBgQUlERFRK8Z5KIiJSm27duqFq1aqoV68ekpOTsWnTJty9exebN2/WdmhERESkISwqiYhIbfz9/fH9999j8+bNyM7ORs2aNbFlyxb07t1b26ERERGRhvCeSiIiIiIiIlIZ76kkIiIiIiIilbGoJCIiIiIiIpWxqCQiIiIiIiKVsagkIiIiIiIilbGoJCIiIiIiIpWxqCQiIiIiIiKVsagkIiIiIiIilbGoJCIiIiIiIpWxqCQiIiIiIiKVsagkIiIiIiIilbGoJCIiIiIiIpWxqCQiIiIiIiKVsagkIiIiIiIilbGoJCIiIiIiIpWxqCQiIiIiIiKVsagkIiIiIiIilbGoJCIiIiIiIpWxqCQiIiIiIiKVsagkIiIiIiIilbGoLGNCQkIgkUiUaiuRSBASEqLZgIqoKPGvX78eEokEsbGxmg2K6D0UZZ8GVMvLnFy4dOlSoW1btWqFVq1aFal/XcBjB5UkEokEQUFBWo2hJOb6yZMnIZFIcPLkyULbxsbGQiKRYP369RqPi4gKx6KylCvKH5PK9pXzMTY2RrVq1RAUFIRnz56pIdo3Xr58iZCQEKV+qQDA119/jT179qht+UTvI688cXBwgL+/P5YvX44XL15oO0SNyS8XeeygsuTGjRvo0aMHnJycYGxsjMqVK6NNmzZYsWKFtkMrUE6RlvPR19dH1apV0bVrV1y7dk2ty1q1apXSxeBPP/2EpUuXqnX5RKR+LCrLmK+++grp6env1cesWbOwceNGrFy5Es2aNUN4eDiaNm2Kly9fqiXGly9fIjQ0NM8/DPOKP78/DD/99FOkp6fDyclJLXERFUVOnoSHh2Ps2LEAgODgYNSuXRt//vmnvF1RczI9PR1fffWV2uNVh8KKNB47qLQ7e/YsGjVqhOvXr2PYsGFYuXIlhg4dCj09PSxbtkzb4Smlb9++2LhxI3744Qf069cPx48fxwcffKDWwjK/orJly5ZIT09Hy5Yt5ePyKyqdnJyQnp6OTz/9VG1xEZHqDLQdABUvAwMDGBi832YPCAhAo0aNAABDhw6FjY0NFi9ejL1796Jv374q9yuTyZCZmVlgm6LEr6+vD319fZXj0bSc9TU2NtZ2KKQBb+cJAEydOhXHjx9Hhw4d0KlTJ9y5cwcmJiZK7dNv7yu6vL/w2KEePHaUXHPnzoWVlRUuXrwIa2trhWnx8fHFGktaWhrMzMyKPF+DBg3wySefyIebN2+OTp06ITw8HGvWrHmvmF6+fAlTU9N8p+vp6Sm9X+dc9VCSFba+RKUJz1SWMXndV5SRkYHx48fD1tYWFhYW6NSpE/7++2+l+/zoo48AADExMQCAhQsXolmzZrCxsYGJiQkaNmyIHTt25Jov556SzZs3o1atWpBKpVi9ejVsbW0BAKGhofLLcHLuIXs3folEgrS0NGzYsEHeNjAwEED+90WtWrVKvjwHBweMGTMGSUlJCm1atWoFLy8v3L59Gx9++CFMTU1RuXJlzJ8/P9d6ZGRkYObMmXB3d4dUKoWjoyMmT56MjIyMQtf30KFDSn/PpPs++ugjTJ8+HQ8fPsSmTZsA5J2TBe0red1T+fjxYwwZMgQODg6QSqVwcXHBqFGjchVaGRkZmDBhAmxtbWFmZoauXbsiISGh0LiV2ccLysWCvg+Axw4eO0qP6Oho1KpVK1dBCQB2dna5xu3ZswdeXl6QSqWoVatWru368OFDjB49GtWrV4eJiQlsbGzQs2fPXPtmzj576tQpjB49GnZ2dqhSpYp8+nfffQc3NzeYmJjA29sbv//+u9Lr9G6e7t27F+3bt5cfb9zc3DB79mxkZ2crzJeTC5cvX0bLli1hamqKL7/8Es7Ozrh16xZOnTolz72cezvfvaeyVatW2L9/Px4+fChv6+zsDCD/eyqPHz8OHx8fmJmZwdraGp07d8adO3cU2uQcDx48eIDAwEBYW1vDysoKgwYNyvPKiU2bNqFhw4YwMTFB+fLl0adPHzx69Eip9SUqK3imkjB06FBs2rQJ/fr1Q7NmzXD8+HG0b99e6fmjo6MBADY2NgCAZcuWoVOnTujfvz8yMzOxZcsW9OzZE/v27cvV7/Hjx7Ft2zYEBQWhQoUKqFu3LsLDwzFq1Ch07doV3bp1AwDUqVMnz2Vv3LgRQ4cOhbe3N4YPHw4AcHNzyzfWkJAQhIaGws/PD6NGjUJUVBTCw8Nx8eJFnDlzBoaGhvK2iYmJaNeuHbp164ZevXphx44dmDJlCmrXro2AgAAAb84YdOrUCadPn8bw4cNRo0YN3LhxA0uWLMG9e/dyXVr37vrm/HKksuPTTz/Fl19+iSNHjmDYsGH5tlN2X3ny5Am8vb2RlJSE4cOHw9PTE48fP8aOHTvw8uVLGBkZyduOHTsW5cqVw8yZMxEbG4ulS5ciKCgIW7duzTcOZffxouYiwGMHjx2lj5OTE86dO4ebN2/Cy8urwLanT5/Grl27MHr0aFhYWGD58uXo3r074uLi5Dlx8eJFnD17Fn369EGVKlUQGxuL8PBwtGrVCrdv3851Fmz06NGwtbXFjBkzkJaWBgBYt24dRowYgWbNmiE4OBh//fUXOnXqhPLly8PR0bHQdXo3T9evXw9zc3NMmDAB5ubmOH78OGbMmIGUlBQsWLBAYd7nz58jICAAffr0wSeffIKKFSuiVatWGDt2LMzNzTFt2jQAQMWKFfNc9rRp05CcnIy///4bS5YsAQCYm5vnG2tkZCQCAgLg6uqKkJAQpKenY8WKFWjevDmuXLmSK2969eoFFxcXhIWF4cqVK/j+++9hZ2eHefPmydvMnTsX06dPR69evTB06FAkJCRgxYoVaNmyJa5evarwD4S81peozBBUqkVERAgA4uLFi0IIIWbOnCne3uzXrl0TAMTo0aMV5uvXr58AIGbOnJmrr8jISJGQkCAePXoktmzZImxsbISJiYn4+++/hRBCvHz5UqGvzMxM4eXlJT766COF8QCEnp6euHXrlsL4hISEXMvO8W78QghhZmYmBg4cmO+6x8TECCGEiI+PF0ZGRqJt27YiOztb3m7lypUCgPjhhx/k43x9fQUA8eOPP8rHZWRkiEqVKonu3bvLx23cuFHo6emJ33//XWHZq1evFgDEmTNnCl1fKl3ezbm8WFlZifr16wsh8t6nC9pX3s2NAQMGCD09vTyXJ5PJFGLy8/OTjxNCiPHjxwt9fX2RlJQkH+fr6yt8fX3lw0XZxwvLRR47eOwo7Y4cOSL09fWFvr6+aNq0qZg8ebI4fPiwyMzMVGgHQBgZGYkHDx7Ix12/fl0AECtWrJCPezcnhBDi3LlzufaxnH22RYsWIisrSz4+MzNT2NnZiXr16omMjAz5+O+++04AUMj1mJgYAUCEhoaKhIQE8c8//4iTJ0+K+vXrCwBi586d+cY0YsQIYWpqKl69eiUfl5MLq1evztW+Vq1aCsvOceLECQFAnDhxQj6uffv2wsnJKVfbnHgjIiLk4+rVqyfs7OzE8+fP5eOuX78u9PT0xIABA+Tjco4HgwcPVuiza9euwsbGRj4cGxsr9PX1xdy5cxXa3bhxQxgYGCiML2h9icoCXv5axh04cAAAMG7cOIXxwcHB+c7j5+cHW1tbODo6ok+fPjA3N8fu3btRuXJlAICJiYm8bWJiIpKTk+Hj44MrV67k6svX1xc1a9ZUw5oULjIyEpmZmQgODoae3v92/WHDhsHS0hL79+9XaG9ubq5wX4mRkRG8vb3x119/ycdt374dNWrUgKenJ/7991/5J+dyoRMnTij0WZzrSyWXubl5oU+BVWZfkclk2LNnDzp27Khw/2aOdy+rHT58uMI4Hx8fZGdn4+HDh/kuo6j7eEF47OCxo7Rr06YNzp07h06dOuH69euYP38+/P39UblyZfzyyy8Kbf38/BTOjtepUweWlpYK+8nbOfH69Ws8f/4c7u7usLa2zjMvhg0bpnA/8KVLlxAfH4+RI0cqXLUQGBgIKyurPNdh5syZsLW1RaVKldCqVStER0dj3rx58rP/b8f04sUL/Pvvv/Dx8cHLly9x9+5dhb6kUikGDRpU4HemLk+fPsW1a9cQGBiI8uXLy8fXqVMHbdq0kf+987aRI0cqDPv4+OD58+dISUkBAOzatQsymQy9evVSyNNKlSrBw8MjV54W5/oSlTS8/LWMe/jwIfT09HJd9lW9evV85/n2229RrVo1GBgYoGLFiqhevbrCH1r79u3DnDlzcO3atVz3XL3LxcVFDWuhnJw/nN9dNyMjI7i6uub6w7pKlSq5Yi5XrpzCkzvv37+PO3fuyO/lete7D2YozvWlkis1NTXP+6vepsy+kpCQgJSUlEIvs8tRtWpVheFy5coBeFPA5aeo+3hBeOzgsaMsaNy4MXbt2oXMzExcv34du3fvxpIlS9CjRw9cu3ZN/s+Bd/MReLOfvJ2P6enpCAsLQ0REBB4/fgwhhHxacnJyrvnf3U9y9k0PDw+F8YaGhnB1dc0z/uHDh6Nnz57Q09ODtbW1/D7eHLdu3cJXX32F48ePy4uv/GKqXLmyQjGrSfnlKQDUqFEDhw8fzvXwooKOiZaWlrh//z6EELm+vxxvX/YOFO/6EpU0LCqpyLy9vfM8KwIAv//+Ozp16oSWLVti1apVsLe3h6GhISIiIvDTTz/lav/2fzxLmvye/vj2L3WZTIbatWtj8eLFebZ9936Vkry+VDz+/vtvJCcnw93dvcB2mthXlNmn31XUfbwgPHbw2FGWGBkZoXHjxmjcuDGqVauGQYMGYfv27Zg5cyYA5faTsWPHIiIiAsHBwWjatCmsrKwgkUjQp08fyGSyXPOqYz/x8PCAn59fntOSkpLg6+sLS0tLzJo1C25ubjA2NsaVK1cwZcqUXDGV9P22sG0gk8kgkUhw8ODBPNu+e39nSV9fIk1iUVnGOTk5QSaTITo6WuG/e1FRUSr1t3PnThgbG+Pw4cMK/9mMiIhQuo+8zkqoo33OO+eioqIU/kObmZmJmJiYfH+JFsTNzQ3Xr19H69atixw3lU0bN24EAPj7+793X7a2trC0tMTNmzffu6/8FGUff58c4LGDSrOcf6Y8ffq0SPPt2LEDAwcOxKJFi+TjXr16leupw/nJ2Xfv378vv7QaeHMpbUxMDOrWrVukeE6ePInnz59j165dCu+SzHkyrLKKss+rkqfvunv3LipUqFDkV6y4ublBCAEXFxdUq1atSPMSlTW8p7KMy3kS4fLlyxXG5/WiYWXo6+tDIpEoPFo8Nja2wBeivyvnaXbK/tI0MzNTqq2fnx+MjIywfPlyhf8Er1u3DsnJyUV64m2OXr164fHjx1i7dm2uaenp6fKn7xEBb57gOXv2bLi4uKB///7v3Z+enh66dOmCX3/9FZcuXco1vaAzkMoqyj6ubC7mhceO/+GxQ3edOHEiz7zLuZ+voFtL8qKvr5+rvxUrVuR6fUd+GjVqBFtbW6xevVrhFUPr169XKVdzzta9HVNmZiZWrVpVpH6KcqwwMzPL81Lfd9nb26NevXrYsGGDQt83b97EkSNH8PHHHxcpRgDo1q0b9PX1ERoamms7CCHw/PnzIvdJVFrxTGUZV69ePfTt2xerVq1CcnIymjVrhmPHjuHBgwcq9de+fXssXrwY7dq1Q79+/RAfH49vv/0W7u7uCvcTFcTExAQ1a9bE1q1bUa1aNZQvXx5eXl753jfWsGFDREZGYvHixXBwcICLiwuaNGmSq52trS2mTp2K0NBQtGvXDp06dUJUVBRWrVqFxo0bKzxYQ1mffvoptm3bhpEjR+LEiRNo3rw5srOzcffuXWzbtg2HDx/O93I/Kt0OHjyIu3fvIisrC8+ePcPx48dx9OhRODk54ZdfflHbS7u//vprHDlyBL6+vvJXUzx9+hTbt2/H6dOn83xfXlEUZR9XNhfzwmMHjx2lwdixY/Hy5Ut07doVnp6eyMzMxNmzZ7F161Y4OzsX+SEuHTp0wMaNG2FlZYWaNWvi3LlziIyMlL/eozCGhoaYM2cORowYgY8++gi9e/dGTEwMIiIi8r2nsiDNmjVDuXLlMHDgQIwbNw4SiQQbN24s8j+wGjZsiPDwcMyZMwfu7u6ws7NTOJP6btutW7diwoQJaNy4MczNzdGxY8c82y5YsAABAQFo2rQphgwZIn+liJWVVa73+yrDzc0Nc+bMwdSpUxEbG4suXbrAwsICMTEx2L17N4YPH46JEycWuV+iUqnYnzdLxaqwV4oIIUR6eroYN26csLGxEWZmZqJjx47i0aNH+b5SpKBXJQghxLp164SHh4eQSqXC09NTRERE5PvahDFjxuTZx9mzZ0XDhg2FkZGRQhx59XP37l3RsmVLYWJiIgDIXxHw7msBcqxcuVJ4enoKQ0NDUbFiRTFq1CiRmJio0MbX11fUqlUrV1wDBw7M9WjzzMxMMW/ePFGrVi0hlUpFuXLlRMOGDUVoaKhITk5Wan2p9MjZ73I+RkZGolKlSqJNmzZi2bJlIiUlRaF9UXPj3bwUQoiHDx+KAQMGCFtbWyGVSoWrq6sYM2aM/BUC+eVuXo/vf/eVIkIov48Xlos8djgpjOOxo/Q5ePCgGDx4sPD09BTm5ubCyMhIuLu7i7Fjx4pnz57J2+W3TZ2cnBRec5OYmCgGDRokKlSoIMzNzYW/v7+4e/durnaF5diqVauEi4uLkEqlolGjRuK3337Lles5r+hYsGBBget45swZ8cEHHwgTExPh4OAgf21KXseSvHJBCCH++ecf0b59e2FhYaHwapO8jkmpqamiX79+wtraWgCQ51FerxQRQojIyEjRvHlzYWJiIiwtLUXHjh3F7du3FdrkHA8SEhIUxueX+zt37hQtWrQQZmZmwszMTHh6eooxY8aIqKgopdaXqCyQCKGG66OIiIiIiIioTOI9lURERERERKQyFpVERERERESkMhaVREREREREpDIWlURERERERKQyFpVERERERESkMhaVREREREREpDIDbQdQ3GQyGZ48eQILCwtIJBJth0NUJggh8OLFCzg4OEBPT/3/y2JeExU/TeY1c5qo+Gn6dzWVbmWuqHzy5AkcHR21HQZRmfTo0SNUqVJF7f0yr4m0RxN5zZwm0h5N/a6m0q3MFZUWFhYA3iSMpaWllqMhKhtSUlLg6Ogozz91Y14TFT9N5jVzmqj4afp3NZVuZa6ozLmMxtLSkr+oiIqZpi5jY14TaY8m8po5TaQ9vOScVMELpomIiIiIiEhlLCqJiIiIiIhIZSwqiYiIiIiISGUsKomIiIiIiEhlLCqJiIiIiIhIZSwqiYiIiIiISGVl7pUiObZZNYAp9NXWX2Nv9X+VVfyc1N6ntE01tfcpqdNArf0lmKi1OwDAlfib6u/0/0XGJaq9z133/lN7nzGXHqu9TxF+Xu19vo/3yeu8clgTOQi8Xx6+m2+q5sv75IQq+7wm9ml10URu6LKSlNfiv58hsjTwS4HU5vALzf1+JfVp5zRf2yFQKcczlURERERERKQyFpVERERERESkMhaVREREREREpDKdKypfvHiB4OBgODk5wcTEBM2aNcPFixe1HRYREREREVGZpHNF5dChQ3H06FFs3LgRN27cQNu2beHn54fHj/mgBSIiIiIiouKmU0Vleno6du7cifnz56Nly5Zwd3dHSEgI3N3dER4eru3wiIiIiIiIyhydeqVIVlYWsrOzYWxsrDDexMQEp0+fznOejIwMZGRkyIdTUlI0GiMREREREVFZolNnKi0sLNC0aVPMnj0bT548QXZ2NjZt2oRz587h6dOnec4TFhYGKysr+cfR0bGYoyYiIiIiIiq9dKqoBICNGzdCCIHKlStDKpVi+fLl6Nu3L/T08l6VqVOnIjk5Wf559OhRMUdMRERERERUeunU5a8A4ObmhlOnTiEtLQ0pKSmwt7dH79694erqmmd7qVQKqVRazFESERERERGVDTp3pjKHmZkZ7O3tkZiYiMOHD6Nz587aDomIiIiIiKjM0bkzlYcPH4YQAtWrV8eDBw8wadIkeHp6YtCgQdoOjYiIiIiIqMzRuTOVycnJGDNmDDw9PTFgwAC0aNEChw8fhqGhobZDIyIiIiIiKnN07kxlr1690KtXL22HQURERERERNDBM5VERERERERUckiEEELbQRSnlJQUWFlZITk5GZaWltoOh6hM0HTeMa+Jip8m8445TVT8mHf0PnimkoiIiIiIiFTGopKIiIiIiIhUxqKSiIiIiIiIVMaikoiIiIiIiFTGopKIiIiIiIhUxqKSiIiIiIiIVMaikoiIiIiIiFTGopKIiIiIiIhUxqKSiIiIiIiIVMaikoiIiIiIiFTGopKIiIiIiIhUxqKSiIiIiIiIVMaikoiIiIiIiFTGopKIiIiIiIhUxqKSiIiIiIiIVMaikoiIiIiIiFRmoO0AtGWbVQOYQl+tfTb2Vv/XWcXPSe19AoC0TTW19iep00Ct/SWYqLU7uSvxN9XaX2Rcolr7A4Bd9/5Te585Yi49Vmt/Ivy8Wvt7X9EfecNcX7m8Liy31J0j2qZKjiaYqD9nNEETeahp6s5zdeZ2ScprTfyuJvX66nsvbYdASvhryE5th0ClHM9UEhERERERkcpYVBIREREREZHKWFQSERERERGRylhUEhERERERkcp0qqjMzs7G9OnT4eLiAhMTE7i5uWH27NkQQmg7NCIiIiIiojJJp57+Om/ePISHh2PDhg2oVasWLl26hEGDBsHKygrjxo3TdnhERERERERljk4VlWfPnkXnzp3Rvn17AICzszN+/vln/PHHH1qOjIiIiIiIqGzSqctfmzVrhmPHjuHevXsAgOvXr+P06dMICAjId56MjAykpKQofIiIiIiIiEg9dOpM5RdffIGUlBR4enpCX18f2dnZmDt3Lvr375/vPGFhYQgNDS3GKImIiIiIiMoOnSoqt23bhs2bN+Onn35CrVq1cO3aNQQHB8PBwQEDBw7Mc56pU6diwoQJ8uGUlBQ4OjoWV8hERERERDotOzsbr1+/1nYYVIz09fVhYGAAiUSiVHudKionTZqEL774An369AEA1K5dGw8fPkRYWFi+RaVUKoVUKi3OMImIiIiISoXU1FT8/ffffNtCGWRqagp7e3sYGRkV2lanisqXL19CT0/xNlB9fX3IZDItRUREREREVDplZ2fj77//hqmpKWxtbZU+a0W6TQiBzMxMJCQkICYmBh4eHrlqsHfpVFHZsWNHzJ07F1WrVkWtWrVw9epVLF68GIMHD9Z2aEREREREpcrr168hhICtrS1MTEy0HQ4VIxMTExgaGuLhw4fIzMyEsbFxge11qqhcsWIFpk+fjtGjRyM+Ph4ODg4YMWIEZsyYoe3QiIiIiIhKJZ6hLJsKOzv5Np0qKi0sLLB06VIsXbpU26EQERERERERdOw9lURERERERFSySEQZe5RTSkoKrKyskJycDEtLS22HQ1QmaDrvmNdExU+TececJip+eeXdq1evEBMTAxcXl0LvqdN169evR3BwMJKSkt6rH4lEgt27d6NLly5qiUubirL9eaaSiIiIiIh0XmBgYKko5nQRi0oiIiIiIiJSGYtKIiIiIiIq1RYvXozatWvDzMwMjo6OGD16NFJTU3O127NnDzw8PGBsbAx/f388evRIYfrevXvRoEEDGBsbw9XVFaGhocjKyspzmZmZmQgKCoK9vT2MjY3h5OSEsLAwjayftrGoJCIiIiKiUk1PTw/Lly/HrVu3sGHDBhw/fhyTJ09WaPPy5UvMnTsXP/74I86cOYOkpCT06dNHPv3333/HgAED8Nlnn+H27dtYs2YN1q9fj7lz5+a5zOXLl+OXX37Btm3bEBUVhc2bN8PZ2VmTq6k1OvVKESIiIiIioqIKDg6W/+zs7Iw5c+Zg5MiRWLVqlXz869evsXLlSjRp0gQAsGHDBtSoUQN//PEHvL29ERoaii+++AIDBw4EALi6umL27NmYPHkyZs6cmWuZcXFx8PDwQIsWLSCRSODk5KTZldQinqkkIiIiIqJSLTIyEq1bt0blypVhYWGBTz/9FM+fP8fLly/lbQwMDNC4cWP5sKenJ6ytrXHnzh0AwPXr1zFr1iyYm5vLP8OGDcPTp08V+skRGBiIa9euoXr16hg3bhyOHDmi+RXVEhaVRERERERUasXGxqJDhw6oU6cOdu7cicuXL+Pbb78F8Oa+R2WlpqYiNDQU165dk39u3LiB+/fv5/nKjQYNGiAmJgazZ89Geno6evXqhR49eqhtvUoSXv5KRERERESl1uXLlyGTybBo0SLo6b05p7Zt27Zc7bKysnDp0iV4e3sDAKKiopCUlIQaNWoAeFMkRkVFwd3dXellW1paonfv3ujduzd69OiBdu3a4b///kP58uXVsGYlB4tKIiIiIiIqFZKTk3Ht2jWFcRUqVMDr16+xYsUKdOzYEWfOnMHq1atzzWtoaIixY8di+fLlMDAwQFBQED744AN5kTljxgx06NABVatWRY8ePaCnp4fr16/j5s2bmDNnTq7+Fi9eDHt7e9SvXx96enrYvn07KlWqBGtra02sulbx8lciIiIiIioVTp48ifr16yt8Nm7ciMWLF2PevHnw8vLC5s2b83y1h6mpKaZMmYJ+/fqhefPmMDc3x9atW+XT/f39sW/fPhw5cgSNGzfGBx98gCVLluT7AB4LCwvMnz8fjRo1QuPGjREbG4sDBw7Iz5aWJhIhhNB2EMUpJSUFVlZWSE5OhqWlpbbDISoTNJ13zGui4qfJvGNOExW/vPLu1atXiImJgYuLS573DFLpVpTtX/rKZCIiIiIiIio2LCqJiIiIiIhIZSwqiYiIiIiISGUsKomIiIiIiEhlLCqJiIiIiIhIZWX2PZXbrBrAFPpq66+xt2a+yip+eT+iWFXSNtXU2h8ASOo0UHufAJBgov4+r8TfVHufkXGJau1v173/1NofAMRceqz2PgFAhJ/XSL+qUndelzZFOU69e+zRxLHjbeo6jqhy3FDHcUEXjgMFefsYUZLyOvojb5jrM6dLMnX/nUKaYTL3gLZDoFKOZyqJiIiIiIhIZSwqiYiIiIiISGUsKomIiIiIiEhlZfaeSiIiIiIiKjrJqA+KdXkl6V7vt8XGxsLFxQVXr15FvXr1tB2OVunUmUpnZ2dIJJJcnzFjxmg7NCIiIiIiKgFatWqF4OBgbYdRpujUmcqLFy8iOztbPnzz5k20adMGPXv21GJURERERESkK4QQyM7OhoGBTpVCJZpOnam0tbVFpUqV5J99+/bBzc0Nvr6+2g6NiIiIiIi0LDAwEKdOncKyZcvkVzWuX78eEokEBw8eRMOGDSGVSnH69GkEBgaiS5cuCvMHBwejVatW8mGZTIb58+fD3d0dUqkUVatWxdy5c/NcdnZ2NgYPHgxPT0/ExcVpcC1LHp0tzzMzM7Fp0yZMmDABEokk33YZGRnIyMiQD6ekpBRHeEREREREVMyWLVuGe/fuwcvLC7NmzQIA3Lp1CwDwxRdfYOHChXB1dUW5cuWU6m/q1KlYu3YtlixZghYtWuDp06e4e/durnYZGRno27cvYmNj8fvvv8PW1lZ9K6UDdLao3LNnD5KSkhAYGFhgu7CwMISGhhZPUEREREREpDVWVlYwMjKCqakpKlWqBADyInDWrFlo06aN0n29ePECy5Ytw8qVKzFw4EAAgJubG1q0aKHQLjU1Fe3bt0dGRgZOnDgBKysrNa2N7tCpy1/ftm7dOgQEBMDBwaHAdlOnTkVycrL88+jRo2KKkIiIiIiISopGjRoVqf2dO3eQkZGB1q1bF9iub9++SEtLw5EjR8pkQQnoaFH58OFDREZGYujQoYW2lUqlsLS0VPgQEREREVHZYmZmpjCsp6cHIYTCuNevX8t/NjExUarfjz/+GH/++SfOnTv3/kHqKJ0sKiMiImBnZ4f27dtrOxQiIiIiIipBjIyMFN4YkR9bW1s8ffpUYdy1a9fkP3t4eMDExATHjh0rsJ9Ro0bhm2++QadOnXDq1CmVYtZ1OndPpUwmQ0REBAYOHMjHABMRERERkQJnZ2dcuHABsbGxMDc3h0wmy7PdRx99hAULFuDHH39E06ZNsWnTJty8eRP169cHABgbG2PKlCmYPHkyjIyM0Lx5cyQkJODWrVsYMmSIQl9jx45FdnY2OnTogIMHD+a677K007mqLDIyEnFxcRg8eLC2QyEiIiIiKnNE+Hlth1CgiRMnYuDAgahZsybS09MRERGRZzt/f39Mnz4dkydPxqtXrzB48GAMGDAAN27ckLeZPn06DAwMMGPGDDx58gT29vYYOXJknv0FBwdDJpPh448/xqFDh9CsWTONrF9JpHNFZdu2bXNd+0xERERERAQA1apVy3V/Y35vjAgNDS3wTRF6enqYNm0apk2blmuas7NzrrpkwoQJmDBhQtGD1nE6eU8lERERERERlQwSUcZO+6WkpMDKygrJycl8EixRMdF03jGviYqfJvOOOU1U/PLKu1evXiEmJgYuLi4wNjbWcoRU3Iqy/XmmkoiIiIiIiFTGopKIiIiIiIhUxqKSiIiIiIiIVMaikoiIiIiIiFTGopKIiIiIiIhUxqKSiIiIiIiIVMaikoiIiIiIiFRmoO0AiIiIiIhId7iu616sy/tryE619RUYGIikpCTs2bMn3zbOzs4IDg5GcHCw2pZb2rGoJCIiIiIi+n8XL16EmZmZtsPQKSwqiYiIiIiI/p+tra22Q9A5vKeSiIiIiIhKlR07dqB27dowMTGBjY0N/Pz8kJaWJp++cOFC2Nvbw8bGBmPGjMHr16/l05ydnbF06VL5sEQiQXh4OAICAmBiYgJXV1fs2LGjOFenxGNRSUREREREpcbTp0/Rt29fDB48GHfu3MHJkyfRrVs3CCEAACdOnEB0dDROnDiBDRs2YP369Vi/fn2BfU6fPh3du3fH9evX0b9/f/Tp0wd37twphrXRDbz8lYiIiIiISo2nT58iKysL3bp1g5OTEwCgdu3a8unlypXDypUroa+vD09PT7Rv3x7Hjh3DsGHD8u2zZ8+eGDp0KABg9uzZOHr0KFasWIFVq1ZpdmV0BM9UEhERERFRqVG3bl20bt0atWvXRs+ePbF27VokJibKp9eqVQv6+vryYXt7e8THxxfYZ9OmTXMN80zl/7CoJCIiIiKiUkNfXx9Hjx7FwYMHUbNmTaxYsQLVq1dHTEwMAMDQ0FChvUQigUwm00aopQaLSiIiIiIiKlUkEgmaN2+O0NBQXL16FUZGRti9e7fK/Z0/fz7XcI0aNd43zFKjzN5Tuc2qAUyhX3hDJTT2Vv/XWMXPSe19AoC0TTWN9Cup00Aj/SaYqL/PK/E31d5nZFxi4Y2KaNe9/9TeZ8ylx2rtT4SfL7xRMVJXXmsip/Ojaq5rKpc1paBjhKp5/nYuFzUHNZFfuqKw40BJyuuE9J/xylADvwhIbSpO4P1kuqAk5XVxuHDhAo4dO4a2bdvCzs4OFy5cQEJCAmrUqIE///xTpT63b9+ORo0aoUWLFti8eTP++OMPrFu3Ts2R664yW1QSEREREVHR/TVkp7ZDKJClpSV+++03LF26FCkpKXBycsKiRYsQEBCArVu3qtRnaGgotmzZgtGjR8Pe3h4///wzatasqebIdReLSiIiIiIiKjVq1KiBQ4cO5Tktr1eHvP1OSgCIjY3N1cbBwQFHjhxRQ3SlE++pJCIiIiIiIpWxqCQiIiIiIiKV6VxR+fjxY3zyySewsbGBiYkJateujUuXLmk7LCIiIiIiKoWEEOjSpYu2wyjRdOqeysTERDRv3hwffvghDh48CFtbW9y/fx/lypXTdmhERERERERlkk4VlfPmzYOjoyMiIiLk41xcXAqcJyMjAxkZGfLhlJQUjcVHRERERERU1ujU5a+//PILGjVqhJ49e8LOzg7169fH2rVrC5wnLCwMVlZW8o+jo2MxRUtERERERFT66VRR+ddffyE8PBweHh44fPgwRo0ahXHjxmHDhg35zjN16lQkJyfLP48ePSrGiImIiIiIiEo3nbr8VSaToVGjRvj6668BAPXr18fNmzexevVqDBw4MM95pFIppFJpcYZJRERERERUZujUmUp7e3vUrFlTYVyNGjUQFxenpYiIiIiIiIjKNp06U9m8eXNERUUpjLt37x6cnJy0FBERERERUdky8fdhxbq8hT4FP0PlXa1atUK9evWwdOlSzQREuejUmcrx48fj/Pnz+Prrr/HgwQP89NNP+O677zBmzBhth0ZERERERFQm6VRR2bhxY+zevRs///wzvLy8MHv2bCxduhT9+/fXdmhERERERFQKZWZmajuEEk+nikoA6NChA27cuIFXr17hzp07GDaseE+/ExERERFRySaTyTB58mSUL18elSpVQkhIiHxaXFwcOnfuDHNzc1haWqJXr1549uyZfHpISAjq1auH77//Hi4uLjA2NgYA7NixA7Vr14aJiQlsbGzg5+eHtLQ0+Xzff/89atSoAWNjY3h6emLVqlXFtr7apvI9lampqYiNjcWLFy9gYWEBFxcXmJmZqTM2IiIiIiKiItuwYQMmTJiACxcu4Ny5cwgMDETz5s3RunVreUF56tQpZGVlYcyYMejduzdOnjwpn//BgwfYuXMndu3aBX19fTx9+hR9+/bF/Pnz0bVrV7x48QK///47hBAAgM2bN2PGjBlYuXIl6tevj6tXr2LYsGEwMzPL9y0VpUmRi8pDhw5h7ty5OH/+PGQymXy8vr4+mjVrhmnTpqFNmzZqDVITeiVfgaWlpbbDoELYaaDPdhp4rpMm+lzoo/4+MUQDfZYgzGvdpGqev513Rc1BjeSXrtCh44CtSV9YmjCnSzIRPkDbIRDlqU6dOpg5cyYAwMPDAytXrsSxY8cAADdu3EBMTAwcHR0BAD/++CNq1aqFixcvonHjxgDeXPL6448/wtbWFgBw5coVZGVloVu3bvKHhNauXVu+vJkzZ2LRokXo1q0bAMDFxQW3b9/GmjVrWFS+a8mSJZg4cSL09fXRqlUreHl5wdzcHKmpqbhx4wZ+++03BAQEYMmSJRg7dqymYiYiIiIiIspXnTp1FIbt7e0RHx+PO3fuwNHRUV5QAkDNmjVhbW2NO3fuyItKJycneUEJAHXr1kXr1q1Ru3Zt+Pv7o23btujRowfKlSuHtLQ0REdHY8iQIQq35mVlZcHKykrDa1oyKF1U3rlzB1OmTMEHH3yALVu2KGyIHHFxcejbty8mTpyINm3awNPTU63BEhERERERFcbQ0FBhWCKRKFxlWZh3b+vT19fH0aNHcfbsWRw5cgQrVqzAtGnTcOHCBZiamgIA1q5diyZNmuSaryxQ+kE9a9asgbm5Ofbt25dnQQkAVatWxa+//gozMzOsXVu098kQERERERFpUo0aNfDo0SM8evRIPu727dtISkpCzZo1C5xXIpGgefPmCA0NxdWrV2FkZITdu3ejYsWKcHBwwF9//QV3d3eFj4uLi6ZXqURQ+kzl6dOn0bNnT5QrV67AduXLl0fPnj1x6tSp9w6OiIiIiIhIXfz8/FC7dm30798fS5cuRVZWFkaPHg1fX180atQo3/kuXLiAY8eOoW3btrCzs8OFCxeQkJCAGjVqAABCQ0Mxbtw4WFlZoV27dsjIyMClS5eQmJiICRMmFNfqaY3SRWVMTAwGDx6sVNu6detix44dKgdFREREREQl00If3b0iUSKRYO/evRg7dixatmwJPT09tGvXDitWrChwPktLS/z2229YunQpUlJS4OTkhEWLFiEgIAAAMHToUJiammLBggWYNGkSzMzMULt2bQQHBxfDWmmf0kVlSkqK0jeaWlpaIiUlReWgiIiIiIiIVPH2q0Fy7NmzR/5z1apVsXfv3nznDwkJUXivJfDmstlDhw4VuNx+/fqhX79+RQm11FD6nsrs7GxIJBKl2hb1RlgiIiIiIiLSTUV6pciPP/6I8+fPF9ru3r17KgdEREREREREuqNIReWRI0dw5MgRpdoqe1aTiIiIiIiIdJfSRSUvZyUiIiIiIqJ3KX1PJREREREREdG7WFQSERERERGRypS+/LVTp05F6jjnHTBERERERERUeildVP75559FevgOH9RDRERERERU+ildVMbGxmowDCIiIiIiovcnhMCIESOwY8cOJCYm4urVq6hXr562wyrVivRKESIiIiIiKtsOPZxcrMtr5zS/SO0PHTqE9evX4+TJk3B1dUWFChU0FBnlKLNF5TarBjCF/nv309hb/V9hFT8ntfeZQ9qmmtr7lNRpoPY+ASDBRP19Xom/qdb+IuMS1dofAOy695/a+4y59FjtfQKACD+vkX5Vpa68Logmcl5T3vdYUtKPF5o4RhQkv+OHJo4Dynr7eKGuPC9JeZ0+qzsMpYbaDoMKUMu1mBORVPLXkJ3aDqFYRUdHw97eHs2aNctzemZmJoyMjIo5qtKNT38lIiIiIqJSITAwEGPHjkVcXBwkEgmcnZ3RqlUrBAUFITg4GBUqVIC/vz8A4NSpU/D29oZUKoW9vT2++OILZGVlyft68eIF+vfvDzMzM9jb22PJkiVo1aoVgoODtbR2JReLSiIiIiIiKhWWLVuGWbNmoUqVKnj69CkuXrwIANiwYQOMjIxw5swZrF69Go8fP8bHH3+Mxo0b4/r16wgPD8e6deswZ84ceV8TJkzAmTNn8Msvv+Do0aP4/fffceXKFW2tWommO9dxERERERERFcDKygoWFhbQ19dHpUqV5OM9PDwwf/7/7s2cNm0aHB0dsXLlSkgkEnh6euLJkyeYMmUKZsyYgbS0NGzYsAE//fQTWrduDQCIiIiAg4NDsa+TLtCpM5UhISGQSCQKH09PT22HRUREREREJVjDhg0Vhu/cuYOmTZsqvAaxefPmSE1Nxd9//42//voLr1+/hre3t3y6lZUVqlevXmwx6xKdO1NZq1YtREZGyocNDHRuFYiIiIiIqBiZmZlpO4RSTeWK7PDhw1i3bh3++usvJCYmQgihMF0ikSA6Ovq9A3yXgYGBwqlsIiIiIiKioqhRowZ27twJIYT8bOWZM2dgYWGBKlWqoFy5cjA0NMTFixdRtWpVAEBycjLu3buHli1bajP0EkmlonLBggX44osvULFiRXh7e6N27drqjitf9+/fh4ODA4yNjdG0aVOEhYXJN3ReMjIykJGRIR9OSUkpjjCJiIiIiKiEGj16NJYuXYqxY8ciKCgIUVFRmDlzJiZMmAA9PT1YWFhg4MCBmDRpEsqXLw87OzvMnDkTenp6CpfM0hsqFZXLli3DRx99hAMHDsDQsPjeH9WkSROsX78e1atXx9OnTxEaGgofHx/cvHkTFhYWec4TFhaG0NDQYouRiIiIiIhKtsqVK+PAgQOYNGkS6tati/Lly2PIkCH46quv5G0WL16MkSNHokOHDrC0tMTkyZPx6NEjGBsbazHykkmlojIxMRE9evQo1oISAAICAuQ/16lTB02aNIGTkxO2bduGIUOG5DnP1KlTMWHCBPlwSkoKHB0dNR4rEREREVFp1M5pfuGNtCg4OFjhXZInT57Ms52vry/++OOPfPuxsLDA5s2b5cNpaWkIDQ3F8OHD1RVqqaFSUent7Y2oqCh1x1Jk1tbWqFatGh48eJBvG6lUCqlUWoxRERERERGRrrt69Sru3r0Lb29vJCcnY9asWQCAzp07azmykkelV4qsWrUKu3btwk8//aTueIokNTUV0dHRsLe312ocRERERERU+ixcuBB169aFn58f0tLS8Pvvv6NChQraDqvEUelMZe/evZGVlYVPP/0Uo0aNQpUqVaCvr6/QRiKR4Pr162oJMsfEiRPRsWNHODk54cmTJ5g5cyb09fXRt29ftS6HiIiIiIjKtvr16+Py5cvaDkMnqFRUli9fHjY2NvDw8FB3PAX6+++/0bdvXzx//hy2trZo0aIFzp8/D1tb22KNg4iIiIiIiN5QqajM72ZXTduyZYtWlktERERERER5U+meSiIiIiIiIiJAxTOVAJCdnY1NmzZh//79ePjwIQDAyckJHTp0QP/+/XPdY0lERERERESlj0QIIYo6U3JyMvz9/XHx4kVYWFjA1dUVABATE4OUlBR4e3vj8OHDsLS0VHvA7yslJQVWVlZITk4ukfERlUaazjvmNVHx02TeMaeJil9eeffq1SvExMTAxcUFxsbGWo6QiltRtr9Kl79OmzYNly9fxooVK5CQkIArV67gypUriI+Px8qVK3Hp0iVMmzZNpeCJiIiIiIhId6hUVO7evRujR4/G6NGjYWhoKB9vaGiIUaNGYdSoUdi5c6fagiQiIiIiInofrVq1QnBwsLbDKJVUuqfy+fPnqF69er7TPT098d9//6kcFBERERERlUzx6T8W6/LsTAYU6/Ko6FQ6U+nu7o5ffvkl3+m//PIL3NzcVA6KiIiIiIiIdINKReXo0aNx5MgRfPzxxzhy5AhiY2MRGxuLw4cPo3379jh69CiCgoLUHSsREREREVGh0tLSMGDAAJibm8Pe3h6LFi1SmJ6YmIgBAwagXLlyMDU1RUBAAO7fv6/QZu3atXB0dISpqSm6du2KxYsXw9rauhjXQneodPnr6NGjER8fj2+++QaHDx9WmGZoaIgZM2Zg1KhRagmQiIiIiIioKCZNmoRTp05h7969sLOzw5dffokrV66gXr16AIDAwEDcv38fv/zyCywtLTFlyhR8/PHHuH37NgwNDXHmzBmMHDkS8+bNQ6dOnRAZGYnp06drd6VKMJXfUxkSEoKgoCBERkYqvKfSz88PFSpUUFuAREREREREykpNTcW6deuwadMmtG7dGgCwYcMGVKlSBQDkxeSZM2fQrFkzAMDmzZvh6OiIPXv2oGfPnlixYgUCAgIwceJEAEC1atVw9uxZ7Nu3TzsrVcKpXFQCQIUKFdCnTx91xUJERERERPReoqOjkZmZiSZNmsjHlS9fXv6g0Tt37sDAwEBhuo2NDapXr447d+4AAKKiotC1a1eFfr29vVlU5kOpojIuLg4AULVqVYXhwuS0JyIiIiIiotJJqaLS2dkZEokE6enpMDIykg8XJjs7+70DJCIiIiIiUpabmxsMDQ1x4cIF+UmuxMRE3Lt3D76+vqhRowaysrJw4cIF+eWvz58/R1RUFGrWrAkAqF69Oi5evKjQ77vD9D9KFZU//PADJBIJDA0NFYaJiIiIiIhKEnNzcwwZMgSTJk2CjY0N7OzsMG3aNOjpvXnxhYeHBzp37oxhw4ZhzZo1sLCwwBdffIHKlSujc+fOAICxY8eiZcuWWLx4MTp27Ijjx4/j4MGDrIHyoVRRGRgYWOAwERERERFRSbFgwQKkpqaiY8eOsLCwwOeff47k5GT59IiICHz22Wfo0KEDMjMz0bJlSxw4cEB+Eq158+ZYvXo1QkND8dVXX8Hf3x/jx4/HypUrtbVKJZpECCGKOtPgwYMxYsQIhZtb3/bHH39g9erV+OGHH947QHVLSUmBlZUVkpOTYWlpqe1wiMoETecd85qo+Gky75jTRMUvr7x79eoVYmJi4OLiAmNjYy1HqH3Dhg3D3bt38fvvv2s7lGJRlO2vp8oC1q9fj+jo6Hynx8TEYMOGDap0TUREREREpHULFy7E9evX8eDBA6xYsQIbNmzAwIEDtR1WifRerxTJz5MnT2BiYqKJromIiIiIiDTujz/+wPz58/HixQu4urpi+fLlGDp0qLbDKpGULir37t2LvXv3yoe/++47REZG5mqXlJSEyMhING7cWD0REhERERERFbNt27ZpOwSdoXRRefv2bWzfvh0AIJFIcOHCBVy+fFmhjUQigZmZmfxJSSXZNqsGMIW+Wvpq7K2RE74AgCp+TmrtT9qmmlr7e5ukTgO19peg5pPdV+JvqrdDAJFxiWrvEwB23ftPrf3FXHqs1v5yiPDzGulXVdEfecNcXz15/a73zUVpm2pqzxHKn7qPH2/TxLGkqN7n2PP28SWvY0NJymt1/q4mzdDk30CkPh4Xbmk7BCrllD4STJ06FVOnTgUA6OnpYd26dejXr5/GAiMiIiIiIqKST6V/L8lkMnXHQURERERERDpIpae/EhEREREREQFKFpV6enowMDBAZmamfFhfX7/Aj4GB5q+x/+abbyCRSBAcHKzxZREREREREVFuSlV+M2bMgEQikReKOcPadPHiRaxZswZ16tTRahxERERERERlmVJFZUhISIHDxS01NRX9+/fH2rVrMWfOHK3GQkREREREJV+rVq1Qr149LF26VNuhlDoqXaM6a9YsdOvWDV5eXnlOv3XrFnbu3IkZM2a8V3D5GTNmDNq3bw8/P79Ci8qMjAxkZGTIh1NSUjQSExERERFRWSD++7FYlycpP6BYl0dFp9KDekJCQvDnn3/mO/3mzZsIDQ1VOaiCbNmyBVeuXEFYWJhS7cPCwmBlZSX/ODo6aiQuIiIiIiIqu3KeP1MWaeTpr//99x+MjIzU3u+jR4/w2WefYfPmzTA2NlZqnqlTpyI5OVn+efTokdrjIiIiIiKikiMtLQ0DBgyAubk57O3tsWjRIoXpGRkZmDhxIipXrgwzMzM0adIEJ0+eVGhz+vRp+Pj4wMTEBI6Ojhg3bhzS0tLk052dnTF79mwMGDAAlpaWGD58eHGsWomk9OWvv/32m8IXvWvXLjx48CBXu6SkJGzduhW1a9dWS4Bvu3z5MuLj49GgQQP5uOzsbPz2229YuXIlMjIyoK+vrzCPVCqFVCpVeyxERERERFQyTZo0CadOncLevXthZ2eHL7/8EleuXEG9evUAAEFBQbh9+za2bNkCBwcH7N69G+3atcONGzfg4eGB6OhotGvXDnPmzMEPP/yAhIQEBAUFISgoCBEREfLlLFy4EDNmzMDMmTO1tKYlg9JF5YkTJ+SXtEokEuzatQu7du3Ks23NmjWxYsUK9UT4ltatW+PGjRsK4wYNGgRPT09MmTIlV0FJRERERERlS2pqKtatW4dNmzahdevWAIANGzagSpUqAIC4uDhEREQgLi4ODg4OAICJEyfi0KFDiIiIwNdff42wsDD0799f/upCDw8PLF++HL6+vggPD5dfNfnRRx/h888/L/6VLGGULionT56MoKAgCCFgZ2eH1atXo3v37gptJBIJTE1Nlb40tagsLCxyPRzIzMwMNjY2+T40iIiIiIiIyo7o6GhkZmaiSZMm8nHly5dH9erVAQA3btxAdnY2qlWrpjBfRkYGbGxsAADXr1/Hn3/+ic2bN8unCyEgk8kQExODGjVqAAAaNWqk6dXRCUoXlSYmJjAxMUFGRgaWLFmC2rVry790IiIiIiIiXZCamgp9fX1cvnw515WO5ubm8jYjRozAuHHjcs1ftWpV+c9mZmaaDVZHFPmVIkZGRpg8eTKWLVuGpk2baiKmInn3hloiIiIiIiq73NzcYGhoiAsXLsgLwMTERNy7dw++vr6oX78+srOzER8fDx8fnzz7aNCgAW7fvg13d/fiDF1nFfnprxKJBB4eHvj33381EQ8REREREZHKzM3NMWTIEEyaNAnHjx/HzZs3ERgYCD29N6VPtWrV0L9/fwwYMAC7du1CTEwM/vjjD4SFhWH//v0AgClTpuDs2bMICgrCtWvXcP/+fezduxdBQUHaXLUSq8hnKgHgyy+/xIQJE9CzZ0/5tcm6plfyFVhaWmo7DCqAnZr7a+ek5g411CcALMz7n2aqG6Lm/koot+N/MK8JgPqPH2/TVN4XVwwKx5cSfmzg72oiUtWCBQuQmpqKjh07wsLCAp9//jmSk5Pl0yMiIjBnzhx8/vnnePz4MSpUqIAPPvgAHTp0AADUqVMHp06dwrRp0+Dj4wMhBNzc3NC7d29trVKJJhFCiKLONG7cOBw7dgz37t1Dq1at4OzsDBMTE8WOJRIsW7ZMbYGqS0pKCqysrJCcnMxfVETFRNN5x7wmKn6azDvmNFHxyyvvXr16hZiYGLi4uGjsQZxUchVl+6t0pnLlypXyn48dO5Znm5JaVBIREREREZH6qFRUymQydcdBREREREREOqjID+ohIiIiIiIiysGikoiIiIiIiFSmclF58OBBtGnTBjY2NjAwMIC+vn6uDxEREREREZVuKhWVO3fuRIcOHfDs2TP06dMHMpkMffv2RZ8+fWBiYoI6depgxowZ6o6ViIiIiIiIShiVisqwsDB4e3vj6tWrCA0NBQAMHjwYmzdvxs2bN/H06VO4uLioNVAiIiIiIiIqeVQqKm/fvo0+ffpAX18fBgZvHiD7+vVrAICzszNGjx6NefPmqS9KIiIiIiIiKpFUKipNTU1hZGQEALC2toZUKsXTp0/l0ytWrIiYmBj1REhEREREREQllkpFZfXq1XH79m35cL169bBx40ZkZWXh1atX+Omnn1C1alW1BUlEREREREQlk4EqM3Xt2hXLly/HwoULIZVKMW3aNHTu3BnW1taQSCRIS0vDDz/8oO5YiYiIiIhIy2Qng4t1eXqtlhbr8kJCQrBnzx5cu3atWJery1QqKidOnIiJEyfKhzt06ICTJ09i165d0NfXR/v27fHhhx+qLUgiIiIiIiIqmYp0+eurV6+wdetWfPPNN/j+++8V7qP08fHBkiVLsHDhQhaURERERESkNTKZDPPnz4e7uzukUimqVq2KuXPnAgCmTJmCatWqwdTUFK6urpg+fbr8oaPr169HaGgorl+/DolEAolEgvXr12txTXSD0mcq4+Pj0axZM8TExEAIAeDNA3v27NkDPz8/jQVIRERERERUFFOnTsXatWuxZMkStGjRAk+fPsXdu3cBABYWFli/fj0cHBxw48YNDBs2DBYWFpg8eTJ69+6Nmzdv4tChQ4iMjAQAWFlZaXNVdILSReXs2bMRGxuL8ePH46OPPsKDBw8we/ZsjBgxAtHR0ZqMkYiIiIiISCkvXrzAsmXLsHLlSgwcOBAA4ObmhhYtWgAAvvrqK3lbZ2dnTJw4EVu2bMHkyZNhYmICc3NzGBgYoFKlSlqJXxcpXVQeOXIEAwYMwMKFC+XjKlasiH79+iEqKgrVq1fXSICass2qAUyhr7b+GnurdHtqgar4Oam9T2mbamrtT1KngVr7A4AEE7V3iSvxN9Xf6f+LjEtUa3+77v2n1v5yxFx6rPY+Rfh5tff5Pt7O65yc1EQeqVNBOamJ/FInTeRqWaDq8UjZY01+xxBljwElKa9lv0+BzEyq7TCoAPpbS87+QvkrSXldHO7cuYOMjAy0bt06z+lbt27F8uXLER0djdTUVGRlZcHS0rKYoyxdlL6nMi4uTl7d52jRogWEEHj27JnaAyMiIiIiIioqE5P8/+t57tw59O/fHx9//DH27duHq1evYtq0acjMzCzGCEsfpYvKjIwMGBsbK4zLGc7KylJvVERERERERCrw8PCAiYkJjh07lmva2bNn4eTkhGnTpqFRo0bw8PDAw4cPFdoYGRkhOzu7uMItFYp0zWZsbCyuXLkiH05OTgYA3L9/H9bW1rnaN2hQsi/dIiIiIiKi0sXY2BhTpkzB5MmTYWRkhObNmyMhIQG3bt2Ch4cH4uLisGXLFjRu3Bj79+/H7t27FeZ3dnZGTEwMrl27hipVqsDCwgJSKS/FL0iRisrp06dj+vTpucaPHj1aYVgIAYlEwgqfiIiIiIiK3fTp02FgYIAZM2bgyZMnsLe3x8iRIzFkyBCMHz8eQUFByMjIQPv27TF9+nSEhITI5+3evTt27dqFDz/8EElJSYiIiEBgYKDW1kUXKF1URkREaDIOpYSHhyM8PByxsbEAgFq1amHGjBkICAjQbmBERERERGWEXqul2g6hUHp6epg2bRqmTZuWa9r8+fMxf/58hXHBwcHyn6VSKXbs2KHpEEsVpYvKnMfxalOVKlXwzTffwMPDA0IIbNiwAZ07d8bVq1dRq1YtbYdHRERERERU5qj/PRga1LFjR4XhuXPnIjw8HOfPn2dRSUREREREpAU6VVS+LTs7G9u3b0daWhqaNm2ab7uMjAxkZGTIh1NSUoojPCIiIiIiojJB6VeKlBQ3btyAubk5pFIpRo4cid27d6NmzZr5tg8LC4OVlZX84+joWIzREhERERERlW46V1RWr14d165dw4ULFzBq1CgMHDgQt2/fzrf91KlTkZycLP88evSoGKMlIiIiIiIq3XTu8lcjIyO4u7sDABo2bIiLFy9i2bJlWLNmTZ7tpVIp3ytDRERERESkITp3pvJdMplM4Z5JIiIiIiIiKj46daZy6tSpCAgIQNWqVfHixQv89NNPOHnyJA4fPqzt0IiIiIiIiMoknSoq4+PjMWDAADx9+hRWVlaoU6cODh8+jDZt2mg7NCIiIiIiojJJpy5/XbduHWJjY5GRkYH4+HhERkayoCQiIiIiIrlWrVohODg43+nOzs5YunRpkfsNCQlBvXr1VI6rNNOpM5VERERERKRd6dM+Ltblmcw9oNb+Ll68CDMzM7X2WdaV2aKyV/IVWFpaajsMyoOdBvps56SBTjXU90If9fYnN0RD/ZYgzOvipYlcLQtUPWYoO1++xxAdPAbo+cyDHnO6RBOttB0BUdHZ2toWOP3169cwNDQspmhKB526/JWIiIiIiKgwWVlZCAoKgpWVFSpUqIDp06dDCAEg9+WvEokE4eHh6NSpE8zMzDB37lwAwDfffIOKFSvCwsICQ4YMwatXr7SxKjqBRSUREREREZUqGzZsgIGBAf744w8sW7YMixcvxvfff59v+5CQEHTt2hU3btzA4MGDsW3bNoSEhODrr7/GpUuXYG9vj1WrVhXjGuiWMnv5KxERERERlU6Ojo5YsmQJJBIJqlevjhs3bmDJkiUYNmxYnu379euHQYMGyYf79OmDIUOGYMiQN/cOzJkzB5GRkTxbmQ+eqSQiIiIiolLlgw8+gEQikQ83bdoU9+/fR3Z2dp7tGzVqpDB8584dNGnSRGFc06ZN1R9oKcGikoiIiIiIyjQ+Dfb9sKgkIiIiIqJS5cKFCwrD58+fh4eHB/T19ZWav0aNGnn2QXljUUlERERERKVKXFwcJkyYgKioKPz8889YsWIFPvvsM6Xn/+yzz/DDDz8gIiIC9+7dw8yZM3Hr1i0NRqzb+KAeIiIiIiIqVQYMGID09HR4e3tDX18fn332GYYPH670/L1790Z0dDQmT56MV69eoXv37hg1ahQOHz6swah1l0TkvLCljEhJSYGVlRWSk5P5knSiYqLpvGNeExU/TeYdc5qo+OWVd69evUJMTAxcXFxgbGys5QipuBVl+/PyVyIiIiIiIlIZi0oiIiIiIiJSGYtKIiIiIiIiUhmLSiIiIiIiIlIZi0oiIiIiIiJSGYtKIiIiIiIiUhmLSiIiIiIiIlKZgbYD0JZtVg1gCn219dfYW71fZRU/J7X2l0PapppG+pXUaaD2PhNM1Nvflfib6u3w/0XGJWqk3133/lNrfzGXHqu1PwAQ4efV3uf7UHde59CV/H5fmjo+vOvt44Wqea6pfFYXdR0X3j0OaCKP31WS8joh/We8MlTzLwNSq5Kei/RGO6f52g6BSjmeqSQiIiIiIiKVsagkIiIiIiIqwdavXw9ra+sC24SEhKBevXry4cDAQHTp0kWjceUos5e/EhERERFR0d1vUqtYl+dx4VaxLg94U8QFBwcjKSmp2JetqokTJ2Ls2LFaWTaLSiIiIiIiIh1nbm4Oc3NzrSxbpy5/DQsLQ+PGjWFhYQE7Ozt06dIFUVFR2g6LiIiIiIhKkEOHDqFFixawtraGjY0NOnTogOjoaADAyZMnIZFIFM5CXrt2DRKJBLGxsTh58iQGDRqE5ORkSCQSSCQShISEAAASExMxYMAAlCtXDqampggICMD9+/fl/eRcprpv3z5Ur14dpqam6NGjB16+fIkNGzbA2dkZ5cqVw7hx45CdnS2fr7B+c+zZswceHh4wNjaGv78/Hj16JJ/27uWv75LJZAgLC4OLiwtMTExQt25d7NixQ8VvWJFOFZWnTp3CmDFjcP78eRw9ehSvX79G27ZtkZaWpu3QiIiIiIiohEhLS8OECRNw6dIlHDt2DHp6eujatStkMlmh8zZr1gxLly6FpaUlnj59iqdPn2LixIkA3tyneOnSJfzyyy84d+4chBD4+OOP8fr1a/n8L1++xPLly7FlyxYcOnQIJ0+eRNeuXXHgwAEcOHAAGzduxJo1axQKOmX7nTt3Ln788UecOXMGSUlJ6NOnj9LfSVhYGH788UesXr0at27dwvjx4/HJJ5/g1KlTSveRH526/PXQoUMKw+vXr4ednR0uX76Mli1baikqIiIiIiIqSbp3764w/MMPP8DW1ha3b98udF4jIyNYWVlBIpGgUqVK8vH379/HL7/8gjNnzqBZs2YAgM2bN8PR0RF79uxBz549AQCvX79GeHg43NzcAAA9evTAxo0b8ezZM5ibm6NmzZr48MMPceLECfTu3btI/a5cuRJNmjQBAGzYsAE1atTAH3/8AW9v7wLXKSMjA19//TUiIyPRtGlTAICrqytOnz6NNWvWwNfXt9DvpSA6VVS+Kzk5GQBQvnz5fNtkZGQgIyNDPpySkqLxuIiIiIiISHvu37+PGTNm4MKFC/j333/lZyjj4uJgamqqUp937tyBgYGBvKgDABsbG1SvXh137tyRjzM1NZUXlABQsWJFODs7K9zvWLFiRcTHxxepXwMDAzRu3Fg+7OnpCWtra9y5c6fQovLBgwd4+fIl2rRpozA+MzMT9evXV/YryJfOFpUymQzBwcFo3rw5vLy88m0XFhaG0NDQYoyMiIiIiIi0qWPHjnBycsLatWvh4OAAmUwGLy8vZGZmyos7IYS8/duXmb4vQ0NDhWGJRJLnOGUuxVWX1NRUAMD+/ftRuXJlhWlSqfS9+9epeyrfNmbMGNy8eRNbtmwpsN3UqVORnJws/7x9MysREREREZUuz58/R1RUFL766iu0bt0aNWrUQGJiony6ra0tAODp06fycdeuXVPow8jISOFBOgBQo0YNZGVl4cKFC7mWVbNmTZXjVbbfrKwsXLp0ST4cFRWFpKQk1KhRo9Bl1KxZE1KpFHFxcXB3d1f4ODo6qhx7Dp08UxkUFIR9+/bht99+Q5UqVQpsK5VK1VJ9ExERERFRyVeuXDnY2Njgu+++g729PeLi4vDFF1/Ip+cUUiEhIZg7dy7u3buHRYsWKfTh7OyM1NRUHDt2DHXr1oWpqSk8PDzQuXNnDBs2DGvWrIGFhQW++OILVK5cGZ07d1Y5XmX7NTQ0xNixY7F8+XIYGBggKCgIH3zwQaGXvgKAhYUFJk6ciPHjx0Mmk6FFixZITk7GmTNnYGlpiYEDB6ocP6BjZyqFEAgKCsLu3btx/PhxuLi4aDskIiIiIiIqQfT09LBlyxZcvnwZXl5eGD9+PBYsWCCfbmhoiJ9//hl3795FnTp1MG/ePMyZM0ehj2bNmmHkyJHo3bs3bG1tMX/+fABAREQEGjZsiA4dOqBp06YQQuDAgQO5Lm8tKmX6NTU1xZQpU9CvXz80b94c5ubm2Lp1q9LLmD17NqZPn46wsDDUqFED7dq1w/79+9VSU0nE2xcTl3CjR4/GTz/9hL1796J69ery8VZWVjAxMVGqj5SUFFhZWWEt3GAKfbXF1thbvSd9q/g5qbW/HNI21TTSr6ROA7X3maDcJlXalfib6u3w/0XGJRbeSAW77v2n1v5iLj1Wa38AIMLPK9UuJ++Sk5NhaWmp9jg0ldc5dCW/35emjg/vevt4oWqeayqf1UVdx4V3jwOayON3lYS8zun7wT+rYWGp5l8GpFYlPRfpjXZO8wttk1dOv3r1CjExMXBxcYGxsbGmw6QSpijbX6fOVIaHhyM5ORmtWrWCvb29/FOUCp2IiIiIiIjUR6fuqdShk6pERERERERlgk6dqSQiIiIiIqKSRafuqVQHTd/bRUS5Fdc9lcxrouJTHPdUMqeJig/vqaR3ldp7KomIiIiIiKhkYVFJREREREREKmNRSURERERERCpjUUlEREREREQqY1FJREREREREKmNRSUREREREZUZsbCwkEgmuXbv23n0FBgaiS5cu792PrjPQdgBERERERKQ7fpJUL9bl9RNRau3P0dERT58+RYUKFdTab1nGopKIiIiIiMoMfX19VKpUKd/pQghkZ2fDwIClkrJ4+SsREREREZUqhw4dQosWLWBtbQ0bGxt06NAB0dHRAHJf/nry5ElIJBIcPHgQDRs2hFQqxenTpxESEoJ69ephzZo1cHR0hKmpKXr16oXk5GSVlvv2snft2oUPP/wQpqamqFu3Ls6dO6fQz+nTp+Hj4wMTExM4Ojpi3LhxSEtLU/8XpSYsKomIiIiIqFRJS0vDhAkTcOnSJRw7dgx6enro2rUrZDJZvvN88cUX+Oabb3Dnzh3UqVMHAPDgwQNs27YNv/76Kw4dOoSrV69i9OjR773cadOmYeLEibh27RqqVauGvn37IisrCwAQHR2Ndu3aoXv37vjzzz+xdetWnD59GkFBQWr4ZjSD53SJiIiIiKhU6d69u8LwDz/8AFtbW9y+fRvm5uZ5zjNr1iy0adNGYdyrV6/w448/onLlygCAFStWoH379li0aFGel9AWtFwvLy/5+IkTJ6J9+/YAgNDQUNSqVQsPHjyAp6cnwsLC0L9/fwQHBwMAPDw8sHz5cvj6+iI8PBzGxsZF+zKKAc9UEhERERFRqXL//n307dsXrq6usLS0hLOzMwAgLi4u33kaNWqUa1zVqlXlBSUANG3aFDKZDFFReT88SNnl5pwJBQB7e3sAQHx8PADg+vXrWL9+PczNzeUff39/yGQyxMTEFL7yWsAzlUREREREVKp07NgRTk5OWLt2LRwcHCCTyeDl5YXMzMx85zEzMyu25RoaGsp/lkgkACC/RDY1NRUjRozAuHHjcvVftWrV945RE1hUEhERERFRqfH8+XNERUVh7dq18PHxAfDmwTeqiIuLw5MnT+Dg4AAAOH/+PPT09FC9eu7XqqhruQ0aNMDt27fh7u6uUszawKKSiIiIiIhKjXLlysHGxgbfffcd7O3tERcXhy+++EKlvoyNjTFw4EAsXLgQKSkpGDduHHr16pXn/ZTqWu6UKVPwwQcfICgoCEOHDoWZmRlu376No0ePYuXKlSqth6bxnkoiIiIiIio19PT0sGXLFly+fBleXl4YP348FixYoFJf7u7u6NatGz7++GO0bdsWderUwapVqzS63Dp16uDUqVO4d+8efHx8UL9+fcyYMUN+trQkkgghhLaDKE4pKSmwsrLCWrjBFPpq7buxt3pP/Fbxc1Jrf2+TtqmmkX4ldRqovc8EE7V3iSvxN9XeZ2Rcotr62nXvP7X1lSPm0mO19ynCzyvVLifvkpOTYWlpqfY4NJnX71J3nhcmr+OAJvJXUqeByrmmiXwqiDpzTZ0Ky1tN5KAmlIS8zuk7cd9IWJpJ1do3qddk/ZL73jz6n4U+awttk1dOv3r1CjExMXBxcSmRTxzVtJCQEOzZs0f+Psuypijbn2cqiYiIiIiISGUsKomIiIiIiEhlLCqJiIiIiIjeERISUmYvfS0qnSsqf/vtN3Ts2BEODg6QSCTYs2ePtkMiIiIiIiIqs3SuqExLS0PdunXx7bffajsUIiIiIqJSr4w915P+X1G2u869pzIgIAABAQHaDoOIiIiIqFTT13/zRPXMzEyYmGjgcfxUor18+RIAYGhoWGhbnSsqiyojIwMZGRny4ZSUFC1GQ0RERESkGwwMDGBqaoqEhAQYGhpCT0/nLnIkFQgh8PLlS8THx8Pa2lr+z4WClPqiMiwsDKGhodoOg4iIiIhIp0gkEtjb2yMmJgYPHz7UdjhUzKytrVGpUiWl2pb6onLq1KmYMGGCfDglJQWOjo5ajIiIiIiISDcYGRnBw8MDmZmZ2g6FipGhoaFSZyhzlPqiUiqVQiqVajsMIiIiIiKdpKenB2NjY22HQSUYL4wmIiIiIiIilencmcrU1FQ8ePBAPhwTE4Nr166hfPnyqFq1qhYjIyIiIiIiKnt0rqi8dOkSPvzwQ/lwzv2SAwcOxPr167UUFRERERERUdmkc0Vlq1at+AJWIiIiIiKiEoL3VBIREREREZHKWFQSERERERGRyiSijF1LmpKSAisrKyQnJ8PS0lLb4RCVCZrOO+Y1UfHTZN4xp4mKH/OO3gfPVBIREREREZHKWFQSERERERGRylhUEhERERERkcpYVBIREREREZHKWFQSERERERGRylhUEhERERERkcpYVBIREREREZHKWFQSERERERGRylhUEhERERERkcpYVBIREREREZHKWFQSERERERGRylhUEhERERERkcpYVBIREREREZHKWFQSERERERGRylhUEhERERERkcpYVBIREREREZHKDLQdgLZss2oAU+irpa/G3ur/Gqv4Oam1P2mbamrpR1KngVr6eVeCiUa6xZX4mxrpNzIuUSP9AsCue/+pvc+YS4/V3icAiPDzGulXVerMa1Vp4nhQVOo+fqhDUY9Byhxr3ue4kXNsUDWXNZGn2pDXsaEk5bX472eILA39giC1mHTrd22HQEpY6LNW2yFQKcczlURERERERKQyFpVERERERESkMhaVREREREREpDKdLCq//fZbODs7w9jYGE2aNMEff/yh7ZCIiIiIiIjKJJ0rKrdu3YoJEyZg5syZuHLlCurWrQt/f3/Ex8drOzQiIiIiIqIyR+eKysWLF2PYsGEYNGgQatasidWrV8PU1BQ//PCDtkMjIiIiIiIqc3SqqMzMzMTly5fh5+cnH6enpwc/Pz+cO3cuz3kyMjKQkpKi8CEiIiIiIiL10Kmi8t9//0V2djYqVqyoML5ixYr4559/8pwnLCwMVlZW8o+jo2NxhEpERERERFQm6FRRqYqpU6ciOTlZ/nn06JG2QyIiIiIiIio1DLQdQFFUqFAB+vr6ePbsmcL4Z8+eoVKlSnnOI5VKIZVKiyM8IiIiIiKiMkenzlQaGRmhYcOGOHbsmHycTCbDsWPH0LRpUy1GRkREREREVDbp1JlKAJgwYQIGDhyIRo0awdvbG0uXLkVaWhoGDRqk7dCIiIiIiIjKHJ0rKnv37o2EhATMmDED//zzD+rVq4dDhw7lengPERERERERaZ7OFZUAEBQUhKCgIG2HQUREREREVObp1D2VREREREREVLKwqCQiIiIiIiKVSYQQQttBFKeUlBRYWVkhOTkZlpaW2g6HqEzQdN4xr4mKnybzjjlNVPyYd/Q+eKaSiIiIiIiIVMaikoiIiIiIiFTGopKIiIiIiIhUxqKSiIiIiIiIVMaikoiIiIiIiFTGopKIiIiIiIhUZqDtAIpbzhtUUlJStBwJUdmRk2+aeoMR85qo+Gkyr5nTRMVP07+rqXQrc0Xl8+fPAQCOjo5ajoSo7Hnx4gWsrKzU3i/zmkh7NJHXzGki7dHU72oq3cpcUVm+fHkAQFxcXKlImJSUFDg6OuLRo0el5kW1XCfdUJR1EkLgxYsXcHBw0EgszOuSj+ukG0pKXpe2nAa4v+iKsrxOmv5dTaVbmSsq9fTe3EZqZWVVag4WAGBpaVmq1gfgOukKZddJk38YMq91B9dJN2g7r0trTgNle3/RJWV1nUrLP3Go+PFBPURERERERKQyFpVERERERESksjJXVEqlUsycORNSqVTboahFaVsfgOukK0rSOpWkWNShtK0PwHXSFSVlnUpKHOrEddINXCci1UgEnxtMREREREREKipzZyqJiIiIiIhIfVhUEhERERERkcpYVBIREREREZHKWFQSERERERGRyspUUfntt9/C2dkZxsbGaNKkCf744w9th6S0sLAwNG7cGBYWFrCzs0OXLl0QFRWl0KZVq1aQSCQKn5EjR2op4sKFhITkitfT01M+/dWrVxgzZgxsbGxgbm6O7t2749mzZ1qMuHDOzs651kkikWDMmDEASv42+u2339CxY0c4ODhAIpFgz549CtOFEJgxYwbs7e1hYmICPz8/3L9/X6HNf//9h/79+8PS0hLW1tYYMmQIUlNTNRYz87pkYV6XvG3EvC5epS2vmdMlc/voYl5T6VZmisqtW7diwoQJmDlzJq5cuYK6devC398f8fHx2g5NKadOncKYMWNw/vx5HD16FK9fv0bbtm2Rlpam0G7YsGF4+vSp/DN//nwtRaycWrVqKcR7+vRp+bTx48fj119/xfbt23Hq1Ck8efIE3bp102K0hbt48aLC+hw9ehQA0LNnT3mbkryN0tLSULduXXz77bd5Tp8/fz6WL1+O1atX48KFCzAzM4O/vz9evXolb9O/f3/cunULR48exb59+/Dbb79h+PDhGomXeV0yMa9L1jZiXhev0pjXzOmSt310La+pDBBlhLe3txgzZox8ODs7Wzg4OIiwsDAtRqW6+Ph4AUCcOnVKPs7X11d89tln2guqiGbOnCnq1q2b57SkpCRhaGgotm/fLh93584dAUCcO3eumCJ8f5999plwc3MTMplMCKFb2wiA2L17t3xYJpOJSpUqiQULFsjHJSUlCalUKn7++WchhBC3b98WAMTFixflbQ4ePCgkEol4/Pix2mNkXpc8zOuSjXld/HQ9r5nTJZ8u5DWVfmXiTGVmZiYuX74MPz8/+Tg9PT34+fnh3LlzWoxMdcnJyQCA8uXLK4zfvHkzKlSoAC8vL0ydOhUvX77URnhKu3//PhwcHODq6or+/fsjLi4OAHD58mW8fv1aYZt5enqiatWqOrPNMjMzsWnTJgwePBgSiUQ+Xte2UY6YmBj8888/CtvEysoKTZo0kW+Tc+fOwdraGo0aNZK38fPzg56eHi5cuKDWeJjXJRfzuuRvoxzMa80rDXnNnC7Z2+ddJS2vqWww0HYAxeHff/9FdnY2KlasqDC+YsWKuHv3rpaiUp1MJkNwcDCaN28OLy8v+fh+/frByckJDg4O+PPPPzFlyhRERUVh165dWow2f02aNMH69etRvXp1PH36FKGhofDx8cHNmzfxzz//wMjICNbW1grzVKxYEf/88492Ai6iPXv2ICkpCYGBgfJxuraN3pbzveeVRznT/vnnH9jZ2SlMNzAwQPny5dW+3ZjXJXOfYV6X/G30Nua1ZpWGvGZOl+ztk5eSltdUNpSJorK0GTNmDG7evKlwTwMAhevga9euDXt7e7Ru3RrR0dFwc3Mr7jALFRAQIP+5Tp06aNKkCZycnLBt2zaYmJhoMTL1WLduHQICAuDg4CAfp2vbiIoP81o3MK+pKEpDXjOnS/b2ISopysTlrxUqVIC+vn6up5E9e/YMlSpV0lJUqgkKCsK+fftw4sQJVKlSpcC2TZo0AQA8ePCgOEJ7b9bW1qhWrRoePHiASpUqITMzE0lJSQptdGWbPXz4EJGRkRg6dGiB7XRpG+V87wXlUaVKlXI9TCMrKwv//fef2rcb87rk7zMA87qkY15rTmnNa+Z0yVfS8prKhjJRVBoZGaFhw4Y4duyYfJxMJsOxY8fQtGlTLUamPCEEgoKCsHv3bhw/fhwuLi6FznPt2jUAgL29vYajU4/U1FRER0fD3t4eDRs2hKGhocI2i4qKQlxcnE5ss4iICNjZ2aF9+/YFttOlbeTi4oJKlSopbJOUlBRcuHBBvk2aNm2KpKQkXL58Wd7m+PHjkMlk8l/K6sK8Lvn7DMC8LumY1+pX2vOaOV3ylbS8pjJCyw8KKjZbtmwRUqlUrF+/Xty+fVsMHz5cWFtbi3/++UfboSll1KhRwsrKSpw8eVI8ffpU/nn58qUQQogHDx6IWbNmiUuXLomYmBixd+9e4erqKlq2bKnlyPP3+eefi5MnT4qYmBhx5swZ4efnJypUqCDi4+OFEEKMHDlSVK1aVRw/flxcunRJNG3aVDRt2lTLURcuOztbVK1aVUyZMkVhvC5soxcvXoirV6+Kq1evCgBi8eLF4urVq+Lhw4dCCCG++eYbYW1tLfbu3Sv+/PNP0blzZ+Hi4iLS09PlfbRr107Ur19fXLhwQZw+fVp4eHiIvn37aiRe5nXJw7wueduIeV28SlteM6dL5vbRtbym0q/MFJVCCLFixQpRtWpVYWRkJLy9vcX58+e1HZLSAOT5iYiIEEIIERcXJ1q2bCnKly8vpFKpcHd3F5MmTRLJycnaDbwAvXv3Fvb29sLIyEhUrlxZ9O7dWzx48EA+PT09XYwePVqUK1dOmJqaiq5du4qnT59qMWLlHD58WAAQUVFRCuN1YRudOHEiz/1s4MCBQog3jymfPn26qFixopBKpaJ169a51vP58+eib9++wtzcXFhaWopBgwaJFy9eaCxm5nXJwrwueduIeV28SlteM6dL5vbRxbym0k0ihBAaPBFKREREREREpViZuKeSiIiIiIiININFJREREREREamMRSURERERERGpjEUlERERERERqYxFJREREREREamMRSURERERERGpjEUlERERERERqYxFJREREREREamMRSURERERERGpjEUlERERERERqYxFJREREREREamMRSURERERERGpjEUlERERERERqYxFJREREREREamMRSURERERERGpjEUlERERERERqYxFJREREREREamMRSURERERERGpjEUlERERERERqYxFJREREREREamMRSURERERERGpjEUlERERERERqYxFJREREREREamMRSURERERERGpjEUlERERERERqYxFJREREREREamMRSURERERERGpjEUlERERERERqYxFJRGRCpydnREYGKjtMJQSEhICiUSiMK644o+NjYVEIsH69evl4wIDA2Fubq7xZeeQSCQICQkptuURERGVNSwqiYjeEh0djREjRsDV1RXGxsawtLRE8+bNsWzZMqSnp2s7PK06cOBAiS3OSnJsREREpZ2BtgMgIiop9u/fj549e0IqlWLAgAHw8vJCZmYmTp8+jUmTJuHWrVv47rvvtB2mWkRFRUFPr2j/Vzxw4AC+/fbbIhVvTk5OSE9Ph6GhYREjLJqCYktPT4eBAX/dERERaQp/yxIRAYiJiUGfPn3g5OSE48ePw97eXj5tzJgxePDgAfbv36/FCNVLKpVqtP+srCzIZDIYGRnB2NhYo8sqjLaXT0REVNrx8lciIgDz589Hamoq1q1bp1BQ5nB3d8dnn32W7/z//fcfJk6ciNq1a8Pc3ByWlpYICAjA9evXc7VdsWIFatWqBVNTU5QrVw6NGjXCTz/9JJ/+4sULBAcHw9nZGVKpFHZ2dmjTpg2uXLlS6HqcPn0ajRs3hrGxMdzc3LBmzZo82717T+Xr168RGhoKDw8PGBsbw8bGBi1atMDRo0cBvLkP8ttvvwXw5h7FnA/wv/smFy5ciKVLl8LNzQ1SqRS3b9/O857KHH/99Rf8/f1hZmYGBwcHzJo1C0II+fSTJ09CIpHg5MmTCvO922dBseWMe/cM5tWrVxEQEABLS0uYm5ujdevWOH/+vEKb9evXQyKR4MyZM5gwYQJsbW1hZmaGrl27IiEhIe8NQEREVAbxTCUREYBff/0Vrq6uaNasmUrz//XXX9izZw969uwJFxcXPHv2DGvWrIGvry9u374NBwcHAMDatWsxbtw49OjRA5999hlevXqFP//8ExcuXEC/fv0AACNHjsSOHTsQFBSEmjVr4vnz5zh9+jTu3LmDBg0a5BvDjRs30LZtW9ja2iIkJARZWVmYOXMmKlasWGj8ISEhCAsLw9ChQ+Ht7Y2UlBRcunQJV65cQZs2bTBixAg8efIER48excaNG/PsIyIiAq9evcLw4cMhlUpRvnx5yGSyPNtmZ2ejXbt2+OCDDzB//nwcOnQIM2fORFZWFmbNmlVovG9TJra33bp1Cz4+PrD8v/buLSTKrY0D+F8nNWfGs5MpmaV51oQEpTxTjJFGgpUSpQhZMKkXhQZRTYkGKVGpSILmTdsKCwwSjyCWKWF0YYZpechDYCKpF+VpZu0Lab6m0bT52hd7+/9dzaxZ61nP+yLCw7vWeq2tkZOTAzMzM5SVlSE6Ohqtra0IDQ3V65+ZmQk7Ozuo1WoMDQ3h1q1byMjIwMOHD38rTyIiov8qFpVEtO7NzMxgbGwMhw4dMjpGYGAg+vr69PYpnjhxAj4+PqioqMClS5cALO3b9Pf3R3V19YqxamtrkZ6ejhs3bujacnJyVs3h8uXLEELg+fPn2Lp1KwAgMTERgYGBq46tra3FgQMHVtwzunv3bnh5eaGpqQnHjx9fts/o6Cg+fPgAhUKhaxsaGlq27+zsLPbv34+ioiIAgEqlwsGDB3H9+nVkZWXB0dFx1Zx/J7cfXbx4EQsLC2hra4O7uzsAICUlBd7e3sjJyUFra6tefwcHBzQ2Nuqefmq1WhQVFWF6eho2NjZrzpOIiOi/istfiWjdm5mZAQBYWVkZHcPCwkJXUGo0GkxOTkIul8Pb21tv2aqtrS1GR0fR2dm5YixbW1u8fPkSnz59WvP8Go0GDQ0NSEhI0BWUAODr64vY2NhVx9va2uLt27d4//79muf8WWJiol5BuZqMjAzdZxMTE2RkZGB+fh7Nzc1G57AajUaDxsZGJCQk6ApKAHB2dsaxY8fQ1tam+3v47tSpU3rLaSMiIqDRaPDx48d/LE8iIqJ/ExaVRLTuWVtbA1jay2gsrVaLmzdvwtPTExYWFnB0dIRCoUBXVxemp6d1/c6fPw+5XI6QkBB4enrizJkzePHihV6sgoICdHd3w9XVFSEhIbhy5QoGBgZ+Of/ExAS+ffsGT09Pg9+8vb1XzT83NxdTU1Pw8vJCYGAgsrOz0dXVtcarX7J9+/Y19zU1NdUr6gDAy8sLwMpPN/+EiYkJfP36ddl74uvrC61Wi5GREb32H4t0ALCzswMAfPny5R/Lk4iI6N+ERSURrXvW1tZwcXFBd3e30TGuXbuGs2fPIjIyEvfu3UNDQwOamprg7++vt6/Q19cXvb29ePDgAcLDw/H48WOEh4dDrVbr+hw9ehQDAwMoLi6Gi4sLCgsL4e/vj7q6uv/rOn8lMjIS/f39uHv3LgICAlBeXo5du3ahvLx8zTEsLS3/aE4/Ph38kUaj+aPzrEYikSzb/uOhQkREROsZi0oiIgDx8fHo7+9HR0eHUeMfPXqEmJgYVFRUIDk5GUqlEvv27cPU1JRBX5lMhqSkJFRWVmJ4eBhxcXHIz8/H7Oysro+zszNUKhVqamowODgIBwcH5Ofnrzi/QqGApaXlsstXe3t713QN9vb2SEtLw/379zEyMoKdO3fqnZq6UpFnDK1Wa/D0ta+vD8DSybTA/54I/nwPl1t2utbcFAoFpFLpsvfk3bt3MDU1haur65piERER0RIWlUREWDoIRyaT4eTJkxgfHzf4vb+/H7dv315xvEQiMXhyVV1djbGxMb22yclJve/m5ubw8/ODEAILCwvQaDR6y2UBYNOmTXBxccHc3Nwv54+NjUVNTQ2Gh4d17T09PWhoaFhx3Ep5yeVy7NixQ29OmUwGwLDIM1ZJSYnusxACJSUlMDMzw969ewEAbm5ukEgkePbsmd640tJSg1hrzU0ikUCpVOLJkyd6y2zHx8dRVVWF8PBw3XJoIiIiWhue/kpEBMDDwwNVVVVISkqCr68vUlJSEBAQgPn5ebS3t6O6ulrvvY4/i4+PR25uLtLS0rBnzx68efMGf/31l8G+QaVSic2bNyMsLAxOTk7o6elBSUkJ4uLiYGVlhampKWzZsgWHDx9GUFAQ5HI5mpub0dnZqXca7HKuXr2K+vp6REREQKVSYXFxUfdOzNX2R/r5+SE6OhrBwcGwt7fHq1evdK81+S44OBgAkJWVhdjYWEgkEiQnJ69yZ5e3ceNG1NfXIzU1FaGhoairq0NtbS0uXLigO+zHxsYGR44cQXFxMUxMTODh4YGnT5/i8+fPBvF+J7e8vDw0NTUhPDwcKpUKGzZsQFlZGebm5lBQUGDU9RAREa1rgoiIdPr6+kR6errYtm2bMDc3F1ZWViIsLEwUFxeL2dlZXT83NzeRmpqq+z47OyvOnTsnnJ2dhaWlpQgLCxMdHR0iKipKREVF6fqVlZWJyMhI4eDgICwsLISHh4fIzs4W09PTQggh5ubmRHZ2tggKChJWVlZCJpOJoKAgUVpauqb8W1tbRXBwsDA3Nxfu7u7izp07Qq1Wi5//3f+cf15enggJCRG2trbC0tJS+Pj4iPz8fDE/P6/rs7i4KDIzM4VCoRAmJia6mIODgwKAKCwsNMjn+2+VlZW6ttTUVCGTyUR/f79QKpVCKpUKJycnoVarhUaj0Rs/MTEhEhMThVQqFXZ2duL06dOiu7vbIOZKuQkhBAChVqv14r5+/VrExsYKuVwupFKpiImJEe3t7Xp9KisrBQDR2dmp197S0iIAiJaWFoPrJSIiWo9MhOBJA0RERERERGQc7qkkIiIiIiIio7GoJCIiIiIiIqOxqCQiIiIiIiKjsagkIiIiIiIio7GoJCIiIiIiIqOxqCQiIiIiIiKjsagkIiIiIiIio7GoJCIiIiIiIqOxqCQiIiIiIiKjsagkIiIiIiIio7GoJCIiIiIiIqOxqCQiIiIiIiKj/Q3rqt/t87q2rwAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "from flwr_datasets.visualization import plot_comparison_label_distribution\n", + "\n", + "fig, axes, df_list = plot_comparison_label_distribution(\n", + " partitioner_list=partitioner_list,\n", + " label_name=\"label\",\n", + " subtitle=\"Comparison of Partitioning Schemes on CIFAR10\",\n", + " titles=title_list,\n", + " legend=True,\n", + " verbose_labels=True,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "862899eb04695380", + "metadata": {}, + "source": [ + "## Bonus: Natural Id Dataset" + ] + }, + { + "cell_type": "markdown", + "id": "4f3f8aaf", + "metadata": {}, + "source": [ + "Nothing stops you from using the `NaturalIdPartitioner` to visualize a dataset with the `id` in it and does not need the artificial partitioning but has the pre-existing partitions. For that dataset, we use `NaturalIdPartitioner`. Let's look at the `speech-commands` dataset that has `speaker_id`, and there are quite a few speakers; therefore, we will show only the first 20 partitions. And since we have quite a few different labels, let's specify `legend_kwargs={\"ncols\": 2}` to display them in two columns (we will also shift the legend slightly to the right)." + ] + }, + { + "cell_type": "markdown", + "id": "f016d21a", + "metadata": {}, + "source": [ + "You'll be using the [Google SpeechCommands](https://huggingface.co/datasets/google/speech_commands) dataset, which is a speech-based dataset. For this, you'll need to install the `\"audio\"` extension for Flower Datasets. It can be easily done like this:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fd5ca8f4", + "metadata": {}, + "outputs": [], + "source": [ + "! pip install -q \"flwr-datasets[audio]\"" + ] + }, + { + "cell_type": "markdown", + "id": "90ea3642", + "metadata": {}, + "source": [ + "With everything ready, let's visualize the partitions for a naturally partitioned dataset." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4fe70116", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAxQAAAHHCAYAAAAmth45AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy80BEi2AAAACXBIWXMAAA9hAAAPYQGoP6dpAACYNUlEQVR4nOzdeXhM1/8H8Pdk31dZyUpCQjZiiS2pLbZ+RexSkSJaxE5oFUkIpXZalDYJpWqn2lqaSlQQsQWVBinf+GqIImQh6/z+UPfXaYLMmCXL+/U88zy59557zufkTpjP3HPuEYnFYjGIiIiIiIhkoKbqAIiIiIiIqPZiQkFERERERDJjQkFERERERDJjQkFERERERDJjQkFERERERDJjQkFERERERDJjQkFERERERDJjQkFERERERDJjQkFERERERDJjQkFUx4WFhcHR0bFaZaOioiASiRQbkBIEBASgRYsWcq3T0dERYWFhcq2zuuLj4yESiXD79m2Ft/Xv98vt27chEomwbNkyhbcN1J33IBFRfcKEguq0lx/EXr50dHTg6uqKiIgI3L9/X+Htv/xw9PKlp6cHd3d3fPLJJ3j69Knc2vnzzz8RFRWFS5cuvbFsUVERoqKikJSUJLf25UEkEiEiIkLVYShcUlKSxHtCW1sbVlZWCAgIwKJFi/DgwQO5tFNTrzNQs2MjIiLpMaGgeiEmJgZbt27FunXr0L59e6xfvx5+fn4oKipSSvvr16/H1q1bsWLFCjRr1gyxsbHo2bMnxGKxXOr/888/ER0dXWVCsWnTJmRmZgrbRUVFiI6OrvLD3CeffIJnz57JJSZ6vUmTJmHr1q348ssvMXPmTJiZmWH+/Plwc3PDL7/8IlF2xIgRePbsGRwcHKpd/+uu8+v8+/2iCHwPEhHVLRqqDoBIGXr16gVfX18AwJgxY2Bubo4VK1bgwIEDGDZs2FvVXVRUBD09vdeWGThwIBo0aAAA+PDDDzFgwADs3bsXZ86cgZ+fn8xtl5WVoaKi4rVlNDU1q12fhoYGNDT4z4IydOrUCQMHDpTYl56ejh49emDAgAG4du0abGxsAADq6upQV1dXaDyFhYXQ19eX6v2iCHwPEhHVPrxDQfVSly5dAAC3bt0S9n3zzTdo1aoVdHV1YWZmhqFDh+LOnTsS570cm3/+/Hl07twZenp6+Pjjj9+q/ZKSEsybNw+tWrWCsbEx9PX10alTJxw/flzinH+OZV+1ahUaN24MbW1tfPHFF2jdujUA4P333xeG0sTHxwOQHBN/+/ZtWFhYAACio6OFslFRUQCqHr9eVlaGBQsWCO05Ojri448/RnFxsUQ5R0dH9O3bFydPnkSbNm2go6MDZ2dnbNmyRerfz6scOHAAffr0ga2tLbS1tdG4cWMsWLAA5eXlVZY/f/482rdvD11dXTg5OWHDhg2VyhQXF2P+/Plo0qQJtLW1YWdnh8jIyEr9+7fS0lJER0fDxcUFOjo6MDc3R8eOHXHs2DGZ++fl5YVVq1YhLy8P69atE/ZXNYfi3LlzCAwMRIMGDYT+jRo1CsCbr3NYWBgMDAyQlZWF3r17w9DQECEhIcKxV825WblyJRwcHKCrqwt/f39cvXpV4nhAQAACAgIqnVeX3oNERFQZvwaieikrKwsAYG5uDgCIjY3F3LlzMXjwYIwZMwYPHjzA2rVr0blzZ1y8eBEmJibCuQ8fPkSvXr0wdOhQvPfee7Cysnqr9p8+fYrNmzdj2LBhCA8PR35+Pr766isEBgbi7Nmz8Pb2ljg3Li4Oz58/x9ixY6GtrY3+/fsjPz8f8+bNw9ixY9GpUycAQPv27Su1a2FhgfXr12PcuHHo378/goODAQCenp6vjHXMmDFISEjAwIEDMX36dKSmpmLx4sXIyMjAvn37JMrevHkTAwcOxOjRozFy5Eh8/fXXCAsLQ6tWrdC8eXOpf0//Fh8fDwMDA0ybNg0GBgb45ZdfMG/ePDx9+hSfffaZRNnHjx+jd+/eGDx4MIYNG4adO3di3Lhx0NLSEj54V1RU4D//+Q9OnjyJsWPHws3NDVeuXMHKlStx/fp17N+//5WxREVFYfHixRgzZgzatGmDp0+f4ty5c7hw4QK6d+8ucx9f/v6OHj2K2NjYKsvk5uaiR48esLCwwOzZs2FiYoLbt29j7969AKp3ncvKyhAYGIiOHTti2bJlb7zLtmXLFuTn52PChAl4/vw5Vq9ejS5duuDKlStS/Q3U9vcgERFVQUxUh8XFxYkBiH/++WfxgwcPxHfu3BHv2LFDbG5uLtbV1RX/73//E9++fVusrq4ujo2NlTj3ypUrYg0NDYn9/v7+YgDiDRs2VKv9+fPniwGIMzMzxQ8ePBDfunVLvHHjRrG2trbYyspKXFhYKC4rKxMXFxdLnPf48WOxlZWVeNSoUcK+W7duiQGIjYyMxLm5uRLl09LSxADEcXFxlWIYOXKk2MHBQdh+8OCBGIB4/vz5r4z3pUuXLokBiMeMGSNRbsaMGWIA4l9++UXY5+DgIAYgPnHihLAvNzdXrK2tLZ4+ffprf09isVgMQDxhwoTXlikqKqq074MPPhDr6emJnz9/Lux7eZ2WL18u7CsuLhZ7e3uLLS0txSUlJWKxWCzeunWrWE1NTfzrr79K1LlhwwYxAHFKSopE/0aOHClse3l5ifv06fPGfv3b8ePHxQDEu3btemUZLy8vsampqbD98n1869YtsVgsFu/bt08MQJyWlvbKOl53nUeOHCkGIJ49e3aVx/75fnn5vnv59/JSamqqGIB46tSpwj5/f3+xv7//G+usqe9BIiKSDYc8Ub3QrVs3WFhYwM7ODkOHDoWBgQH27duHhg0bYu/evaioqMDgwYPx119/CS9ra2u4uLhUGnqkra2N999/X6r2mzZtCgsLCzg5OeGDDz5AkyZN8MMPP0BPTw/q6urQ0tIC8OIb80ePHqGsrAy+vr64cOFCpboGDBggDBlRtB9//BEAMG3aNIn906dPBwD88MMPEvvd3d2FOyTAi2+jmzZtij/++EMu8ejq6go/5+fn46+//kKnTp1QVFSE33//XaKshoYGPvjgA2FbS0sLH3zwAXJzc3H+/HkAwK5du+Dm5oZmzZpJXPuXQ9L+fe3/ycTEBL/99htu3Lghl779k4GBAfLz81/bNgAcOnQIpaWlMrczbty4apcNCgpCw4YNhe02bdqgbdu2wntEUWrae5CIiCrjkCeqFz7//HO4urpCQ0MDVlZWaNq0KdTUXuTTN27cgFgshouLS5Xn/nuSasOGDYUEoLr27NkDIyMjaGpqolGjRmjcuLHE8YSEBCxfvhy///67xAdEJyenSnVVtU9R/vvf/0JNTQ1NmjSR2G9tbQ0TExP897//ldhvb29fqQ5TU1M8fvxYLvH89ttv+OSTT/DLL79UeuzukydPJLZtbW2hr68vsc/V1RXAi3H87dq1w40bN5CRkfHKBC03N/eVscTExKBfv35wdXVFixYt0LNnT4wYMeK1Q3eqq6CgAIaGhq887u/vjwEDBiA6OhorV65EQEAAgoKCMHz4cGhra1erDQ0NDTRq1KjaMVX19+Hq6oqdO3dWuw5Z1LT3IBERVcaEguqFNm3aCE95+reKigqIRCL89NNPVT5Jx8DAQGL7n9+SV1fnzp2Fpzz92zfffIOwsDAEBQVh5syZsLS0hLq6OhYvXizMtXjb9t9WdRcae9WTiMRyeDxuXl4e/P39YWRkhJiYGDRu3Bg6Ojq4cOECZs2a9canXVWloqICHh4eWLFiRZXH7ezsXnlu586dkZWVhQMHDuDo0aPYvHkzVq5ciQ0bNmDMmDFSx/JSaWkprl+//tqF+UQiEXbv3o0zZ87g+++/x5EjRzBq1CgsX74cZ86cqfSerYq2traQVMuLSCSq8lq/atK8tHVXhyLfg0REVDUmFFTvNW7cGGKxGE5OTsI32Mq0e/duODs7Y+/evRIfmubPn1/tOqRZWViasg4ODqioqMCNGzfg5uYm7L9//z7y8vKkWhfhbSUlJeHhw4fYu3cvOnfuLOz/55O6/unPP/8UHoX60vXr1wFAeOJQ48aNkZ6ejq5du8q0OrOZmRnef/99vP/++ygoKEDnzp0RFRX1VgnF7t278ezZMwQGBr6xbLt27dCuXTvExsZi+/btCAkJwY4dOzBmzBi5rzZd1dCu69evSzwRytTUtMqhRf++i1Bb34NERFQ1zqGgei84OBjq6uqIjo6u9C2mWCzGw4cPFdr+y29U/9l2amoqTp8+Xe06Xn5ozsvLe2PZl0/zqU7Z3r17AwBWrVolsf/lN/p9+vSpdoxvq6rfU0lJCb744osqy5eVlWHjxo0SZTdu3AgLCwu0atUKADB48GDcvXsXmzZtqnT+s2fPUFhY+Mp4/v2+MDAwQJMmTd74uNnXSU9Px5QpU2BqaooJEya8stzjx48rvVdfPg3sZfvSXOfq2L9/P+7evStsnz17FqmpqejVq5ewr3Hjxvj9998lVvtOT09HSkqKRF219T1IRERV4x0KqvcaN26MhQsX4qOPPsLt27cRFBQEQ0ND3Lp1C/v27cPYsWMxY8YMhbXft29f7N27F/3790efPn1w69YtbNiwAe7u7igoKKh2H0xMTLBhwwYYGhpCX18fbdu2rXK+ha6uLtzd3fHdd9/B1dUVZmZmaNGiRZVDbLy8vDBy5Eh8+eWXwpCjs2fPIiEhAUFBQXjnnXfeuv//dO7cOSxcuLDS/oCAALRv3x6mpqYYOXIkJk2aBJFIhK1bt75yKIutrS2WLFmC27dvw9XVFd999x0uXbqEL7/8UpgXM2LECOzcuRMffvghjh8/jg4dOqC8vBy///47du7ciSNHjrxyqJy7uzsCAgLQqlUrmJmZ4dy5c9i9ezciIiKq1ddff/0Vz58/R3l5OR4+fIiUlBQcPHgQxsbG2LdvH6ytrV95bkJCAr744gv0798fjRs3Rn5+PjZt2gQjIyPhA7g017k6mjRpgo4dO2LcuHEoLi7GqlWrYG5ujsjISKHMqFGjsGLFCgQGBmL06NHIzc3Fhg0b0Lx5c4k5LzX5PUhERDJQ0dOliJTi5eM2X/d4zZf27Nkj7tixo1hfX1+sr68vbtasmXjChAnizMxMoYy/v7+4efPm1W7/5SMwHzx48MoyFRUV4kWLFokdHBzE2traYh8fH/GhQ4de+fjOzz77rMp6Dhw4IHZ3dxdraGhIPEL23/WIxWLxqVOnxK1atRJraWlJPL7z34/sFIvF4tLSUnF0dLTYyclJrKmpKbazsxN/9NFHEo9pFYtfPLKzqseovupRov8G4JWvBQsWiMVisTglJUXcrl07sa6urtjW1lYcGRkpPnLkiBiA+Pjx4xJtNm/eXHzu3Dmxn5+fWEdHR+zg4CBet25dpXZLSkrES5YsETdv3lysra0tNjU1Fbdq1UocHR0tfvLkiUT//vnY2IULF4rbtGkjNjExEevq6oqbNWsmjo2NFR5J+yovHxv78qWpqSm2sLAQd+7cWRwbG1vpkcBiceXHxl64cEE8bNgwsb29vVhbW1tsaWkp7tu3r/jcuXMS573qOo8cOVKsr69fZXyve98tX75cbGdnJ9bW1hZ36tRJnJ6eXun8b775Ruzs7CzW0tISe3t7i48cOVJr3oNERCQbkVjMmWpERERERCQbzqEgIiIiIiKZMaEgIiIiIiKZMaEgIiIiIiKZMaEgIiIiIiKZMaEgIiIiIiKZMaEgIiIiIiKZcWE7ABUVFfjzzz9haGgIkUik6nCIiIioGsRiMfLz82Fraws1NX5HSqQqTCgA/Pnnn7Czs1N1GERERCSDO3fuoFGjRqoOg6jeYkIBwNDQEMCLf5CMjIxUHA0RERFVx9OnT2FnZyf8P05EqsGEAhCGORkZGTGhICIiqmU4XJlItTjgkIiIiIiIZMaEgoiIiIiIZMaEgoiIiIiIZMaEgoiIiIiIZMaEgoiIiIiIZMaEgoiIiIiIZMaEgoiIiIiIZMaEgoiIiIiIZMaEgoiIiIiIZMaVsomIiKjeKi8vR2lpqarDIKpxNDU1oa6uXq2yKk0oTpw4gc8++wznz59HTk4O9u3bh6CgIOG4WCzG/PnzsWnTJuTl5aFDhw5Yv349XFxchDKPHj3CxIkT8f3330NNTQ0DBgzA6tWrYWBgoIIeERERUW0gFotx79495OXlqToUohrLxMQE1tbWEIlEry2n0oSisLAQXl5eGDVqFIKDgysdX7p0KdasWYOEhAQ4OTlh7ty5CAwMxLVr16CjowMACAkJQU5ODo4dO4bS0lK8//77GDt2LLZv367s7hAREVEt8TKZsLS0hJ6e3hs/MBHVJ2KxGEVFRcjNzQUA2NjYvLa8SCwWi5UR2JuIRCKJOxRisRi2traYPn06ZsyYAQB48uQJrKysEB8fj6FDhyIjIwPu7u5IS0uDr68vAODw4cPo3bs3/ve//8HW1rZabT99+hTGxsZ48uQJjIyMFNI/IiIiki9Z//8uLy/H9evXYWlpCXNzcwVGSFS7PXz4ELm5uXB1dX3t8KcaOyn71q1buHfvHrp16ybsMzY2Rtu2bXH69GkAwOnTp2FiYiIkEwDQrVs3qKmpITU1VekxExERUc33cs6Enp6eiiMhqtle/o28aZ5RjZ2Ufe/ePQCAlZWVxH4rKyvh2L1792BpaSlxXENDA2ZmZkKZqhQXF6O4uFjYfvr0qbzCJiIiolqCw5yIXq+6fyM1NqFQpMWLFyM6OrpaZc2W9JKq7kezfpI6Ht3ZAVKVf/ZpkkLrr6lt1IVroYw2eL2rj9e7emw39Je6jT8/3CdVeWVcC2W8p6T9XSn69wTUzPeUMq4FESlPjR3yZG1tDQC4f/++xP779+8Lx6ytrYXJIi+VlZXh0aNHQpmqfPTRR3jy5InwunPnjpyjJyIiIqKqxMfHw8TE5K3rEYlE2L9//1vXQ2+vxiYUTk5OsLa2RmJiorDv6dOnSE1NhZ+fHwDAz88PeXl5OH/+vFDml19+QUVFBdq2bfvKurW1tWFkZCTxIiIiIqLqCQsLk3jUP9VvKh3yVFBQgJs3bwrbt27dwqVLl2BmZgZ7e3tMmTIFCxcuhIuLi/DYWFtbW+EN7Obmhp49eyI8PBwbNmxAaWkpIiIiMHTo0Go/4YnkQ9dUV9Uh0N94LYiIiEiZVHqH4ty5c/Dx8YGPjw8AYNq0afDx8cG8efMAAJGRkZg4cSLGjh2L1q1bo6CgAIcPHxbWoACAbdu2oVmzZujatSt69+6Njh074ssvv1RJf4iIiIjquxUrVsDDwwP6+vqws7PD+PHjUVBQUKnc/v374eLiAh0dHQQGBlYagn7gwAG0bNkSOjo6cHZ2RnR0NMrKyqpss6SkBBEREbCxsYGOjg4cHBywePFihfSPKlPpHYqAgAC8bhkMkUiEmJgYxMTEvLKMmZkZF7EjIiIiqiHU1NSwZs0aODk54Y8//sD48eMRGRmJL774QihTVFSE2NhYbNmyBVpaWhg/fjyGDh2KlJQUAMCvv/6K0NBQrFmzBp06dUJWVhbGjh0LAJg/f36lNtesWYODBw9i586dsLe3x507dzhHVonq5VOeSP50jHXeXIiUgteCiIhUacqUKcLPjo6OWLhwIT788EOJhKK0tBTr1q0T5rwmJCTAzc0NZ8+eRZs2bRAdHY3Zs2dj5MiRAABnZ2csWLAAkZGRVSYU2dnZcHFxQceOHSESieDg4KDYTpKEGjspm4iIiIhqn59//hldu3ZFw4YNYWhoiBEjRuDhw4coKioSymhoaKB169bCdrNmzWBiYoKMjAwAQHp6OmJiYmBgYCC8wsPDkZOTI1HPS2FhYbh06RKaNm2KSZMm4ejRo4rvKAmYUBARERGRXNy+fRt9+/aFp6cn9uzZg/Pnz+Pzzz8H8GKeQ3UVFBQgOjoaly5dEl5XrlzBjRs3JObSvtSyZUvcunULCxYswLNnzzB48GAMHDhQbv2i1+OQJ5ILW0MtVYdAf+O1ICIiVTl//jwqKiqwfPlyqKm9+N56586dlcqVlZXh3LlzaNOmDQAgMzMTeXl5cHNzA/AiQcjMzESTJk2q3baRkRGGDBmCIUOGYODAgejZsycePXoEMzMzOfSMXocJBRERERFJ7cmTJ7h06ZLEvgYNGqC0tBRr167Fu+++i5SUFGzYsKHSuZqampg4cSLWrFkDDQ0NREREoF27dkKCMW/ePPTt2xf29vYYOHAg1NTUkJ6ejqtXr2LhwoWV6luxYgVsbGzg4+MDNTU17Nq1C9bW1nJZQI/ejAkFEUmtrqx10dzDStUh0N+U8Z7iAwuqp678fZPiJSUlCY/+f2n06NFYsWIFlixZgo8++gidO3fG4sWLERoaKlFOT08Ps2bNwvDhw3H37l106tQJX331lXA8MDAQhw4dQkxMDJYsWQJNTU00a9YMY8aMqTIWQ0NDLF26FDdu3IC6ujpat26NH3/8UbhLQorFhIKIiIiIpBIfH4/4+PhXHp86darE9ogRI4Sfw8LCEBYWBgAIDg5+ZR2BgYEIDAx85fF/Lj0QHh6O8PDwN0RNisKEguTC2oDj9msKXgsiIiJSJt4HIiIiIiIimTGhICIiIiIimXHIE8lFQ0NtVYdAf+O1ICIiImXiHQoiIiIiIpIZEwoiIiIiIpIZhzyRXDTQZW5aU/BaEBERkTLxkwcREREREcmMCQUREREREcmMQ55ILqz01FUdAv2N14KIiKjmE4vF+OCDD7B79248fvwYFy9ehLe3t6rDkgkTihqguYdVra5fWdo3a6DqEEiJdIx1VB2CXJg6miq0/tAeTRRaPwDYGip+9XXnRkYKb4Oqp6787clCd3aAUtt79mmSUturd4r2Kbc9vf5SFT98+DDi4+ORlJQEZ2dnNGhQez/nMKEgIiIiIlKyrKws2NjYoH379gpro6SkBFpaiv9SiHMoSC4a6KhJ/SLF4LUgIqqbDh06BBMTE5SXlwMALl26BJFIhNmzZwtlxowZg/feew8AsGfPHjRv3hza2tpwdHTE8uXLhXLr1q1DixYthO39+/dDJBJhw4YNwr5u3brhk08+UXS36qWwsDBMnDgR2dnZEIlEcHR0RHFxMSZNmgRLS0vo6OigY8eOSEtLE86Jj4+HiYmJRD0vr9tLUVFR8Pb2xubNm+Hk5AQdHeXcceQnCSIiIqJaoFOnTsjPz8fFixcBAMnJyWjQoAGSkpKEMsnJyQgICMD58+cxePBgDB06FFeuXEFUVBTmzp2L+Ph4AIC/vz+uXbuGBw8eVFlXaWkpTp8+jYCAACX2sP5YvXo1YmJi0KhRI+Tk5CAtLQ2RkZHYs2cPEhIScOHCBTRp0gSBgYF49OiRVHXfvHkTe/bswd69e3Hp0iXFdOBfmFAQERER1QLGxsbw9vYWPvQnJSVh6tSpuHjxIgoKCnD37l3cvHkT/v7+WLFiBbp27Yq5c+fC1dUVYWFhiIiIwGeffQYAaNGiBczMzJCcnCzUNX36dGH77NmzKC0tVehwnPrM2NgYhoaGUFdXh7W1NfT09LB+/Xp89tln6NWrF9zd3bFp0ybo6uriq6++kqrukpISbNmyBT4+PvD09FRQDyQxoSAiIiKqJfz9/ZGUlASxWIxff/0VwcHBcHNzw8mTJ5GcnAxbW1u4uLggIyMDHTp0kDi3Q4cOuHHjBsrLyyESidC5c2ckJSUhLy8P165dw/jx41FcXIzff/8dycnJaN26NfT09FTU0/olKysLpaWlEtdMU1MTbdq0QUZGhlR1OTg4wMLCQt4hvhYnZZNcmOnoqzoE+huvBRFR3RUQEICvv/4a6enp0NTURLNmzRAQEICkpCQ8fvwY/v7+UtX15Zdf4tdff4WPjw+MjIyEJCM5OVmqukjx1NTUIBaLJfaVlpZWKqevr/zPAbxDQURERFRLvJxHsXLlSuED/8uEIikpSZjz4ObmhpSUFIlzU1JS4OrqCnX1F+sVvZxHsWvXLuG8gIAA/Pzzz0hJSeH8CSVq3LgxtLS0JK5ZaWkp0tLS4O7uDgCwsLBAfn4+CgsLhTLKmiPxJkwoiIiIiGoJU1NTeHp6Ytu2bcIH/s6dO+PChQu4fv26kGRMnz4diYmJWLBgAa5fv46EhASsW7cOM2bMEOry9PSEqakptm/fLpFQ7N+/H8XFxZWGTJHi6OvrY9y4cZg5cyYOHz6Ma9euITw8HEVFRRg9ejQAoG3bttDT08PHH3+MrKwsbN++XZhkr2oc8kRyYaLNMZY1Ba8FEZFsastCc/7+/rh06ZKQBJiZmcHd3R33799H06ZNAQAtW7bEzp07MW/ePCxYsAA2NjaIiYlBWFiYUI9IJEKnTp3www8/oGPHjgBeJBlGRkZo2rSpSobOyJWUC82p2qeffoqKigqMGDEC+fn58PX1xZEjR2Bq+mKBVDMzM3zzzTeYOXMmNm3ahK5duyIqKgpjx45VceRMKIiIiIhqlVWrVmHVqlUS+6oa+jJgwAAMGDDgtXXt379fYltNTU3qx5SSbKZMmYIpU6YI2zo6OlizZg3WrFnzynOCgoIQFBQksS88PFz4OSoqClFRUXKO9M045ImIiIiIiGTGOxRvoGOs+BUGrQ0UvyR6XdDQUFvVIciFrqmuqkN4a8r4u7A1VPzfRSsbQ4W30dLGQKH1929sptD6laUuXIu6Qhl/e0RUt/AOBRERERERyYx3KIiIiOSE3+4TUX3EOxRERERERCQzJhRERERERCQzJhRERERERCQzJhRERERERCQzJhRERERERCQzJhRERERERCqWlJQEkUiEvLw8VYciNT42lugfpvVzVXUIRESkImZLeim1vUezfpL6nICAAHh7e2PVqlXyD6iOET/YrNT2RBZjpCpfl64l71AQERER1RFisRhlZWWqDoOUpKSkRNUhAGBCQURERFQrhIWFITk5GatXr4ZIJIJIJEJ8fDxEIhF++ukntGrVCtra2jh58iQqKiqwePFiODk5QVdXF15eXti9e7dEfVevXkWvXr1gYGAAKysrjBgxAn/99ZeKele/VHUtb9++DQA4f/48fH19oaenh/bt2yMzM1M4LyoqCt7e3ti8eTOcnJygo6MDAMjLy8OYMWNgYWEBIyMjdOnSBenp6RJtHjhwAC1btoSOjg6cnZ0RHR0tt+STQ57egKue1hwNdJn/knzxPVU9rWwMFd6GMq6Fl6WuwtuoC5RxvXWMdRTeRl20evVqXL9+HS1atEBMTAwA4LfffgMAzJ49G8uWLYOzszNMTU2xePFifPPNN9iwYQNcXFxw4sQJvPfee7CwsIC/vz/y8vLQpUsXjBkzBitXrsSzZ88wa9YsDB48GL/88osqu1kvvO5azpkzB8uXL4eFhQU+/PBDjBo1CikpKcK5N2/exJ49e7B3716oq6sDAAYNGgRdXV389NNPMDY2xsaNG9G1a1dcv34dZmZm+PXXXxEaGoo1a9agU6dOyMrKwtixYwEA8+fPf+v+MKEguTDTMVB1CPQ3Xguiuqurn52qQyAVMjY2hpaWFvT09GBtbQ0A+P333wEAMTEx6N69OwCguLgYixYtws8//ww/Pz8AgLOzM06ePImNGzfC398f69atg4+PDxYtWiTU//XXX8POzg7Xr1+HqyvnFCrS665lbGws/P39AbxIFPv06YPnz58LdyNKSkqwZcsWWFhYAABOnjyJs2fPIjc3F9ra2gCAZcuWYf/+/di9ezfGjh2L6OhozJ49GyNHjgTw4v2wYMECREZGMqEgIiIiIsDX11f4+ebNmygqKhISjJdKSkrg4+MDAEhPT8fx48dhYFD5S6isrCwmFCrk6ekp/GxjYwMAyM3Nhb29PQDAwcFBSCaAF9eyoKAA5ubmEvU8e/YMWVlZQpmUlBTExsYKx8vLy/H8+XMUFRVBT0/vrWJmQkFERCQn1gYcJkuqoa+vL/xcUFAAAPjhhx/QsGFDiXIvv8EuKCjAu+++iyVLllSq6+WHWFINTU1N4WeRSAQAqKioEPb981oDL66ljY0NkpKSKtVlYmIilImOjkZwcHClMi/vfLwNJhREREREtYSWlhbKy8tfW8bd3R3a2trIzs4Whs78W8uWLbFnzx44OjpCQ4MfB1WhOteyOlq2bIl79+5BQ0MDjo6OryyTmZmJJk2avHV7VeE7iIiIiKiWcHR0RGpqKm7fvg0DAwOJb65fMjQ0xIwZMzB16lRUVFSgY8eOePLkCVJSUmBkZISRI0diwoQJ2LRpE4YNG4bIyEiYmZnh5s2b2LFjBzZv3ixM9iXFqc61rI5u3brBz88PQUFBWLp0KVxdXfHnn3/ihx9+QP/+/eHr64t58+ahb9++sLe3x8CBA6Gmpob09HRcvXoVCxcufOu+MKEgIiIigmwLzSnbjBkzMHLkSLi7u+PZs2eIi4urstyCBQtgYWGBxYsX448//oCJiQlatmyJjz/+GABga2uLlJQUzJo1Cz169EBxcTEcHBzQs2dPqKnVjSfgSbvQnLJV91q+iUgkwo8//og5c+bg/fffx4MHD2BtbY3OnTvDysoKABAYGIhDhw4hJiYGS5YsgaamJpo1a4YxY+TzO2JCQURERFRLuLq64vTp0xL7wsLCKpUTiUSYPHkyJk+e/Mq6XFxcsHfvXnmHSNVUnWvp7e0NsVgsbEdFRSEqKqpSXYaGhlizZg3WrFnzyvYCAwMRGBj4VjG/St1IQYmIiIiISCWYUBARERERkcyYUBARERERkcw4h6IeqCvPRW9upvnmQqQUtoaKf0+1sjFUeBvK4GWpq+oQ3lr/xmYKb8NKT/FPlFFGG3WBMq63Mv4NISLl4R0KIiIiIiKSGRMKIiIiIiKSGRMKIiIiIiKSGRMKIiIiIiKSGRMKIiIiOWloqC3Vi0hewsLCEBQU9FZ1xMfHw8TERNiOioqCt7f3W9VJ9QOf8kRyYaBpouoQ6G+8FkREJIshQ4agd+/eqg6j3ggICIC3tzdWrVql6lDeGhMKIiIiIgC2G/ortb0/P9yn1PbeRFdXF7q6tf9R1y+J/7tMqe2JHGYotb2apEYPeSovL8fcuXPh5OQEXV1dNG7cGAsWLIBYLBbKiMVizJs3DzY2NtDV1UW3bt1w48YNFUZNREREpBi7d++Gh4cHdHV1YW5ujm7duqGwsFA4vmzZMtjY2MDc3BwTJkxAaWmpcKy4uBgzZsxAw4YNoa+vj7Zt2yIpKUk4/u8hT/+WlpaG7t27o0GDBjA2Noa/vz8uXLigiG7WeWFhYUhOTsbq1ashEokgEonQoEEDLFv2/0lQUFAQNDU1UVBQAAD43//+B5FIhJs3bwIAHj9+jNDQUJiamkJPTw+9evVS2WfgGp1QLFmyBOvXr8e6deuQkZGBJUuWYOnSpVi7dq1QZunSpVizZg02bNiA1NRU6OvrIzAwEM+fP1dh5ERERETylZOTg2HDhmHUqFHIyMhAUlISgoODhS9ajx8/jqysLBw/fhwJCQmIj49HfHy8cH5ERAROnz6NHTt24PLlyxg0aBB69uxZ7Q+h+fn5GDlyJE6ePIkzZ87AxcUFvXv3Rn5+viK6W6etXr0afn5+CA8PR05ODnJycjBixAghwROLxfj1119hYmKCkydPAgCSk5PRsGFDNGnSBMCLpOTcuXM4ePAgTp8+DbFYjN69e0skkcpSo4c8nTp1Cv369UOfPn0AAI6Ojvj2229x9uxZAC9+2atWrcInn3yCfv36AQC2bNkCKysr7N+/H0OHDlVZ7ERERETylJOTg7KyMgQHB8PBwQEA4OHhIRw3NTXFunXroK6ujmbNmqFPnz5ITExEeHg4srOzERcXh+zsbNja2gIAZsyYgcOHDyMuLg6LFi16Y/tdunSR2P7yyy9hYmKC5ORk9O3bV449rfuMjY2hpaUFPT09WFtbA3jx+42Li0N5eTmuXr0KLS0tDBkyBElJSejZsyeSkpLg7+8PALhx4wYOHjyIlJQUtG/fHgCwbds22NnZYf/+/Rg0aJBS+1Oj71C0b98eiYmJuH79OgAgPT0dJ0+eRK9evQAAt27dwr1799CtWzfhHGNjY7Rt2xanT59+Zb3FxcV4+vSpxIuIiIioJvPy8kLXrl3h4eGBQYMGYdOmTXj8+LFwvHnz5lBXVxe2bWxskJubCwC4cuUKysvL4erqCgMDA+GVnJyMrKysarV///59hIeHw8XFBcbGxjAyMkJBQQGys7Pl29F6qlOnTsjPz8fFixeRnJwMf39/BAQECHctkpOTERAQAADIyMiAhoYG2rZtK5xvbm6Opk2bIiMjQ+mx1+g7FLNnz8bTp0/RrFkzqKuro7y8HLGxsQgJCQEA3Lt3DwBgZWUlcZ6VlZVwrCqLFy9GdHR0tWJoZWMoY/TVp+hHB9aVRxOa6eirOgS5cG5kpOoQaoUGuor/vsNKT/3NhWo4Z2NLVYcgFw10avT3W/WKibaewtuwNtBSeBt1kbq6Oo4dO4ZTp07h6NGjWLt2LebMmYPU1FQAgKampkR5kUiEiooKAEBBQQHU1dVx/vx5iaQDAAwMDKrV/siRI/Hw4UOsXr0aDg4O0NbWhp+fH0pKSuTQOzIxMYGXlxeSkpJw+vRpdO/eHZ07d8aQIUNw/fp13LhxQ7hDUdPU6H/Bd+7ciW3btmH79u24cOECEhISsGzZMiQkJLxVvR999BGePHkivO7cuSOniImIiIgURyQSoUOHDoiOjsbFixehpaWFffve/LQoHx8flJeXIzc3F02aNJF4vRxy8yYpKSmYNGkSevfujebNm0NbWxt//fXX23ap3tLS0kJ5ebnEPn9/fxw/fhwnTpxAQEAAzMzM4ObmhtjYWNjY2MDV1RUA4ObmhrKyMiGZBICHDx8iMzMT7u7uSu0HUMMTipkzZ2L27NkYOnQoPDw8MGLECEydOhWLFy8GAOEP4P79+xLn3b9//7V/HNra2jAyMpJ4EREREdVkqampWLRoEc6dO4fs7Gzs3bsXDx48gJub2xvPdXV1RUhICEJDQ7F3717cunULZ8+exeLFi/HDDz9Uq30XFxds3boVGRkZSE1NRUhISJ16zKyyOTo6IjU1Fbdv38Zff/2FiooKBAQE4MiRI9DQ0ECzZs0AvFivYtu2bRJ3J1xcXNCvXz+Eh4fj5MmTSE9Px3vvvYeGDRsK84qVqUYnFEVFRVBTkwxRXV1duH3n5OQEa2trJCYmCsefPn2K1NRU+Pn5KTVWIiIiIkUyMjLCiRMn0Lt3b7i6uuKTTz7B8uXLhbmlbxIXF4fQ0FBMnz4dTZs2RVBQENLS0mBvb1+t87/66is8fvwYLVu2xIgRIzBp0iRYWtaNYZeqMGPGDKirq8Pd3R0WFhbIzs5Gp06dUFFRIZE8BAQEoLy8XJg/8VJcXBxatWqFvn37ws/PD2KxGD/++GOloW/KUKPnULz77ruIjY2Fvb09mjdvjosXL2LFihUYNWoUgBe3/aZMmYKFCxfCxcUFTk5OmDt3Lmxtbd96+XkiIiKqX2raQnP/5ubmhsOHD1d57J+Ph33p3yswa2pqIjo6+pXzSMPCwhAWFiZsR0VFISoqStj28fFBWlqaxDkDBw6sVuyqUNMXmnN1da3yIUIvvzh/KSgoSGINtpdMTU2xZcsWhcUnjRqdUKxduxZz587F+PHjkZubC1tbW3zwwQeYN2+eUCYyMhKFhYUYO3Ys8vLy0LFjRxw+fBg6OjoqjJyIiOqj5ubK/2aQiEjVanRCYWhoiFWrVlXKsP9JJBIhJiYGMTExyguMiIiIiIgA1PA5FEREREREVLMxoSAiIiIiIpkxoSAiIiIiIpnV6DkUVHsYapmrOgT6G68FERERKRPvUBARERERkcx4h+INlPEIwAa6zOuqw0RbT9Uh0N8CnY1VHYJcuJoYKrwNKz11hbdRF5jp6Ks6BPqbmY6Bwtvo4aj4NohIefhJloiIiIiIZMaEgoiIiIikEhYWhqCgIFWHQTUEhzwRERERAfDdNkSp7Z0L+U6p7cnT6tWrIRaLVR3Ga4kvzlVqeyKfBUptryZhQkFEREREAICSkhJoaWm9sZyxcd2YS0fywSFPRERERLVAQEAAJk6ciClTpsDU1BRWVlbYtGkTCgsL8f7778PQ0BBNmjTBTz/9BAAoLy/H6NGj4eTkBF1dXTRt2hSrV6+WqPPl0KXY2FjY2tqiadOm+Pjjj9G2bdtK7Xt5eSEmJkbivH/GNmnSJERGRsLMzAzW1taIiopS2O+iLiguLsakSZNgaWkJHR0ddOzYEWlpaQCApKQkiEQiJCYmwtfXF3p6emjfvj0yMzMl6jhw4ABatmwJHR0dODs7Izo6GmVlZUrvCxMKon+w0lOX6kVE9E8NdNSkehFJKyEhAQ0aNMDZs2cxceJEjBs3DoMGDUL79u1x4cIF9OjRAyNGjEBRUREqKirQqFEj7Nq1C9euXcO8efPw8ccfY+fOnRJ1JiYmIjMzE8eOHcOhQ4cQEhKCs2fPIisrSyjz22+/4fLlyxg+fPhrY9PX10dqaiqWLl2KmJgYHDt2TGG/i9ouMjISe/bsQUJCAi5cuIAmTZogMDAQjx49EsrMmTMHy5cvx7lz56ChoYFRo0YJx3799VeEhoZi8uTJuHbtGjZu3Ij4+HjExsYqvS/814yIiIiolvDy8sInn3wCFxcXfPTRR9DR0UGDBg0QHh4OFxcXzJs3Dw8fPsTly5ehqamJ6Oho+Pr6wsnJCSEhIXj//fcrJRT6+vrYvHkzmjdvLry8vLywfft2ocy2bdvQtm1bNGnS5JWxeXp6Yv78+XBxcUFoaCh8fX2RmJiosN9FbVZYWIj169fjs88+Q69eveDu7o5NmzZBV1cXX331lVAuNjYW/v7+cHd3x+zZs3Hq1Ck8f/4cABAdHY3Zs2dj5MiRcHZ2Rvfu3bFgwQJs3LhR6f1hQkFERERUS3h6ego/q6urw9zcHB4eHsI+KysrAEBubi4A4PPPP0erVq1gYWEBAwMDfPnll8jOzpao08PDo9K8iZCQECGhEIvF+PbbbxESElLt2ADAxsZGiIMkZWVlobS0FB06dBD2aWpqok2bNsjIyBD2/fN3amNjA+D/r216ejpiYmJgYGAgvMLDw5GTk4OioiIl9eQFTsomIiIiqiU0NSUX3BWJRBL7RCIRAKCiogI7duzAjBkzsHz5cvj5+cHQ0BCfffYZUlNTJerQ16+8sOSwYcMwa9YsXLhwAc+ePcOdO3cwZMjrn4JVVWwVFRVS9Y8kveraAkBBQQGio6MRHBxc6TwdHR3lBPg3JhREREREdVBKSgrat2+P8ePHC/v+OS/idRo1agR/f39s27YNz549Q/fu3WFpaamoUOudxo0bQ0tLCykpKXBwcAAAlJaWIi0tDVOmTKlWHS1btkRmZuZrh6EpCxMKIiIiojrIxcUFW7ZswZEjR+Dk5IStW7ciLS0NTk5O1To/JCQE8+fPR0lJCVauXKngaOsXfX19jBs3DjNnzoSZmRns7e2xdOlSFBUVYfTo0UhPT39jHfPmzUPfvn1hb2+PgQMHQk1NDenp6bh69SoWLlyohF78PyYUb1AXnsLRQLf294FqluZmmm8uVAvaUIa68G+IszG/lST5cjUxVHUI9cIHH3yAixcvYsiQIRCJRBg2bBjGjx8vPFb2TQYOHIiIiAioq6tzVWwF+PTTT1FRUYERI0YgPz8fvr6+OHLkCExNTat1fmBgIA4dOoSYmBgsWbIEmpqaaNasGcaMGaPgyCtjQkFERESEmr9ydVJSUqV9t2/frrTvnytYx8XFIS4uTuL44sWLhZ/j4+Nf2Z6JiYnwRKF/+/d5VcW2f//+V9atDDV95WodHR2sWbMGa9asqXQsICCg0krk3t7elfYFBgYiMDBQoXFWBxMKkgsDTRNVh0B/47UgIiIiZWJCQUREVEs0NNRWdQhERJXU/sG9RERERESkMkwoiIiIiIhIZkwoiIiIiIhIZkwoiIiIiIhIZkwoiIiIiIhIZkwoiIiIiIhIZkwoiIiIiIhIZlyH4g3MdPQV3oaVnnqtrl9ZzHQMVB2CXFgbaKk6BPqbs7GlwttQ9L8hNvqNFVq/sijjWriaGCq8jbqAi2PWXAEBAfD29saqVatUHQqRBCYURERERAD6Hhih1PYO9duq1Pbqm4qkKUptTy1glVLbq0k45ImIiIiIqBYrKSlRaftMKIiIiIhqiYqKCkRGRsLMzAzW1taIiooSjmVnZ6Nfv34wMDCAkZERBg8ejPv37wvHw8LCEBQUJFHflClTEBAQIGzv3r0bHh4e0NXVhbm5Obp164bCwkLh+ObNm+Hm5gYdHR00a9YMX3zxhaK6Wqfdvn0bIpGo0uvltTh58iQ6deoEXV1d2NnZYdKkSRLXwdHREQsWLEBoaCiMjIwwduxYAMCePXvQvHlzaGtrw9HREcuXL1dKf5hQEBEREdUSCQkJ0NfXR2pqKpYuXYqYmBgcO3YMFRUV6NevHx49eoTk5GQcO3YMf/zxB4YMGVLtunNycjBs2DCMGjUKGRkZSEpKQnBwMMRiMQBg27ZtmDdvHmJjY5GRkYFFixZh7ty5SEhIUFR36yw7Ozvk5OQIr4sXL8Lc3BydO3dGVlYWevbsiQEDBuDy5cv47rvvcPLkSUREREjUsWzZMnh5eeHixYuYO3cuzp8/j8GDB2Po0KG4cuUKoqKiMHfuXMTHxyu8P5xDQURERFRLeHp6Yv78+QAAFxcXrFu3DomJiQCAK1eu4NatW7CzswMAbNmyBc2bN0daWhpat279xrpzcnJQVlaG4OBgODg4AAA8PDyE4/Pnz8fy5csRHBwMAHBycsK1a9ewceNGjBw5Uq79rOvU1dVhbW0NAHj+/DmCgoLg5+eHqKgojB07FiEhIZgyZQqAF9d5zZo18Pf3x/r166GjowMA6NKlC6ZPny7UGRISgq5du2Lu3LkAAFdXV1y7dg2fffYZwsLCFNof3qEgIiIiqiU8PT0ltm1sbJCbm4uMjAzY2dkJyQQAuLu7w8TEBBkZGdWq28vLC127doWHhwcGDRqETZs24fHjxwCAwsJCZGVlYfTo0TAwMBBeCxcuRFZWlvw6WA+NGjUK+fn52L59O9TU1JCeno74+HiJ33NgYCAqKipw69Yt4TxfX1+JejIyMtChQweJfR06dMCNGzdQXl6u0D7wDgXJhagoT/qT9OQeBoHXgoioLtPU1JTYFolEqKioqNa5ampqwvCll0pLS4Wf1dXVcezYMZw6dQpHjx7F2rVrMWfOHKSmpkJP78V/FJs2bULbtm0l6lBXrxuPp1eFhQsX4siRIzh79iwMDV882rqgoAAffPABJk2aVKm8vb298LO+vuKXNqguJhREREREtZybmxvu3LmDO3fuCHcprl27hry8PLi7uwMALCwscPXqVYnzLl26JJGkiEQidOjQAR06dMC8efPg4OCAffv2Ydq0abC1tcUff/yBkJAQ5XWsDtuzZw9iYmLw008/oXHj/19TqGXLlrh27RqaNGkiVX1ubm5ISUmR2JeSkgJXV1eFJ31MKIiIiIhquW7dusHDwwMhISFYtWoVysrKMH78ePj7+wtDY7p06YLPPvsMW7ZsgZ+fH7755htcvXoVPj4+AIDU1FQkJiaiR48esLS0RGpqKh48eAA3NzcAQHR0NCZNmgRjY2P07NkTxcXFOHfuHB4/foxp06aprO+10dWrVxEaGopZs2ahefPmuHfvHgBAS0sLs2bNQrt27RAREYExY8ZAX18f165dw7Fjx7Bu3bpX1jl9+nS0bt0aCxYswJAhQ3D69GmsW7dOKU/i4hwKIiIiolpOJBLhwIEDMDU1RefOndGtWzc4Ozvju+++E8oEBgZi7ty5iIyMROvWrZGfn4/Q0FDhuJGREU6cOIHevXvD1dUVn3zyCZYvX45evXoBAMaMGYPNmzcjLi4OHh4e8Pf3R3x8PJycnJTe39ru3LlzKCoqwsKFC2FjYyO8goOD4enpieTkZFy/fh2dOnWCj48P5s2bB1tb29fW2bJlS+zcuRM7duxAixYtMG/ePMTExCh8QjbAOxREREREAGr+ytVJSUmV9u3fv1/42d7eHgcOHHhtHdHR0YiOjq7ymJubGw4fPvza84cPH47hw4e/MdaaoCavXB0WFvbaD/qtW7fG0aNHX3n89u3bVe4fMGAABgwY8JbRSY8JxRuYaCt+tqqriaHC2yCSJzMdxU8EU0YbBpomCm9D0QzyHkp/koX843hbyrgWzsaWCm+DqkcZ/7cSkfJwyBMREREREcmMCQUREREREcmMCQUREREREcmMcyiIiIhqiQa6/B6QiGoe/stEREREREQyY0JBREREREQyY0JBREREREQy4xwKIiIiOVHG+ilERDUN71AQERER1QIBAQGYMmWKqsMgqoR3KIiIiIgAfPDLGKW2t7HLZqW2V99U7H1fqe2pBccptb2ahAkFyYW48KHU54j0FBAI8VoQERGRUnHIExGRAjkbW0r1qokMNE2kfhG9jpmOgVQv+n8VFRWIjIyEmZkZrK2tERUVJRxbsWIFPDw8oK+vDzs7O4wfPx4FBQXC8fj4eJiYmGD//v1wcXGBjo4OAgMDcefOHaFMVFQUvL29sXHjRtjZ2UFPTw+DBw/GkydPAAAnTpyApqYm7t27JxHXlClT0KlTJ8V2vo7Jz89HSEgI9PX1YWNjg5UrV0oMa3v8+DFCQ0NhamoKPT099OrVCzdu3FBt0K/AhIKIiIiolkhISIC+vj5SU1OxdOlSxMTE4NixYwAANTU1rFmzBr/99hsSEhLwyy+/IDIyUuL8oqIixMbGYsuWLUhJSUFeXh6GDh0qUebmzZvYuXMnvv/+exw+fBgXL17E+PHjAQCdO3eGs7Mztm7dKpQvLS3Ftm3bMGrUKAX3vm6ZNm0aUlJScPDgQRw7dgy//vorLly4IBwPCwvDuXPncPDgQZw+fRpisRi9e/dGaWmpCqOuGhMKIiIiolrC09MT8+fPh4uLC0JDQ+Hr64vExEQAL+4SvPPOO3B0dESXLl2wcOFC7Ny5U+L80tJSrFu3Dn5+fmjVqhUSEhJw6tQpnD17Vijz/PlzbNmyBd7e3ujcuTPWrl2LHTt2CHclRo8ejbi4/58v8P333+P58+cYPHiwEn4DdUN+fj4SEhKwbNkydO3aFS1atEBcXBzKy8sBADdu3MDBgwexefNmdOrUCV5eXti2bRvu3r2L/fv3qzb4KnAOBdE/uJoYqjoEIiKiV/L09JTYtrGxQW5uLgDg559/xuLFi/H777/j6dOnKCsrw/Pnz1FUVAQ9vReT5TQ0NNC6dWvh/GbNmsHExAQZGRlo06YNAMDe3h4NGzYUyvj5+aGiogKZmZmwtrZGWFgYPvnkE5w5cwbt2rVDfHw8Bg8eDH19Pja5uv744w+UlpYKv3MAMDY2RtOmTQEAGRkZ0NDQQNu2bYXj5ubmaNq0KTIyMpQe75swoSAiIqolmptpqjoEUjFNTcn3gEgkQkVFBW7fvo2+ffti3LhxiI2NhZmZGU6ePInRo0ejpKRESCjkwdLSEu+++y7i4uLg5OSEn376CUlJSXKrn2ofmYc85eTkYODAgbCwsICZmRneffdd/PHHH/KMjYiIiIiq4fz586ioqMDy5cvRrl07uLq64s8//6xUrqysDOfOnRO2MzMzkZeXBzc3N2Ffdna2xLlnzpyBmpqa8O05AIwZMwbfffcdvvzySzRu3BgdOnRQUM/qJmdnZ2hqaiItLU3Y9+TJE1y/fh0A4ObmhrKyMqSmpgrHHz58iMzMTLi7uys93jeROaEYNWoUWrRogeTkZPzyyy+wsrLC8OHD5RkbEREREVVDkyZNUFpairVr1+KPP/7A1q1bsWHDhkrlNDU1MXHiRKSmpuL8+fMICwtDu3btJIbe6OjoYOTIkUhPT8evv/6KSZMmYfDgwbC2thbKBAYGwsjICAsXLsT77yt3vYe6wNDQECNHjsTMmTNx/Phx/Pbbbxg9ejTU1NQgEong4uKCfv36ITw8HCdPnkR6ejree+89NGzYEP369VN1+JVUO6GYPHkyCgsLhe2bN29i1qxZcHd3h7e3NyZPnozMzEy5B3j37l289957MDc3h66uLjw8PCQya7FYjHnz5sHGxga6urro1q1bjX2kFhEREZEieHl5YcWKFViyZAlatGiBbdu2YfHixZXK6enpYdasWRg+fDg6dOgAAwMDfPfddxJlmjRpguDgYPTu3Rs9evSAp6cnvvjiC4kyampqCAsLQ3l5OUJDQxXat7pqxYoV8PPzQ9++fdGtWzd06NABbm5u0NHRAQDExcWhVatW6Nu3L/z8/CAWi/Hjjz9WGvZWE1R7DkWjRo3QqlUrLF26FP/5z38wZMgQtG3bVnh81d69exESEiLX4B4/fowOHTrgnXfewU8//QQLCwvcuHEDpqamQpmlS5dizZo1SEhIgJOTE+bOnYvAwEBcu3ZNuCBEREREb1LTV66uap7CP5/4M3XqVEydOlXi+IgRIyqdExwcjODg4Ne2NW7cOIwbN+61Ze7evYvevXvDxsbmteVUpaavXG1oaIht27YJ24WFhYiOjsbYsWMBAKamptiyZYuqwpNKtROKmTNnYuDAgRg/fjzi4+Oxdu1atG3bFklJSSgvL8fSpUsxcOBAuQa3ZMkS2NnZSTyazMnJSfhZLBZj1apV+OSTT4TbP1u2bIGVlRX2799f6bnKRERERPR2njx5gitXrmD79u04ePCgqsOptS5evIjff/8dbdq0wZMnTxATEwMANXJI05tINYfi5Uz+AQMGwN/fH7dv38ayZcuwatUqDBo0CCKRSK7BHTx4EL6+vhg0aBAsLS3h4+ODTZs2Ccdv3bqFe/fuoVu3bsI+Y2NjtG3bFqdPn5ZrLERERET04gNvjx498OGHH6J79+6qDqdWW7ZsGby8vNCtWzcUFhbi119/RYMGDVQdltSknpT98OFDhISEIC0tDRcvXoSfnx8uX76siNjwxx9/YP369XBxccGRI0cwbtw4TJo0CQkJCQAgLLBiZWUlcZ6VlVWlJeH/qbi4GE+fPpV4EREREdVlYWFhyMvLe22ZqKgoXLp06bVlkpKSUFRUhJUrV8ovuHrIx8cH58+fR0FBAR49eoRjx47Bw8ND1WHJpNpDnhITEzF8+HA8ePAAtra22LVrF77++mscP34cw4YNQ58+fRAdHQ1dXV25BVdRUQFfX18sWrQIwItf/NWrV7FhwwaMHDlS5noXL16M6OjoapVta+0vczvV5WxsqfA26gIDTRNVh0B/M9GW3/PMVdkGVY+hlnmdaIOqx1bEa0FE0qn2HYoJEyYgMjISRUVFWLduHaZMmQIAeOedd3DhwgVoamrC29tbrsHZ2NhUetaum5sbsrOzAUB4fNn9+/clyty/f1/i0Wb/9tFHH+HJkyfC686dO3KNm4iIiIiovqh2QpGTk4M+ffpAR0cHPXv2xIMHD4Rj2traiI2Nxd69e+UaXIcOHSo9ivb69etwcHAA8GJOh7W1NRITE4XjT58+RWpqKvz8/F5Zr7a2NoyMjCReREREREQkvWoPefrPf/6DgQMH4j//+Q9OnjyJ3r17VyrTvHlzuQY3depUtG/fHosWLcLgwYNx9uxZfPnll/jyyy8BvFhufsqUKVi4cCFcXFyEx8ba2toiKChIrrHQGxTlqToCeonXgoiIiJSo2gnFV199hY0bN+L333/He++9h1GjRikyLgBA69atsW/fPnz00UeIiYmBk5MTVq1aJbHeRWRkJAoLCzF27Fjk5eWhY8eOOHz4MNegICIiIiJSgmonFFpaWpg4caIiY6lS37590bdv31ceF4lEiImJEZ7dS0REREREyiP1Y2OJiIiIqOYICAgQHpZDpArVvkNBREREVJfNOT1Wqe3F+n2p1Pbqm9KNQ5XanuYHO+RSz8v1Qvbv3y+X+pSBdyiIiIiIiEhmTCiIiIiIaonCwkKEhobCwMAANjY2WL58ucTxx48fIzQ0FKamptDT00OvXr1w48YNiTKbNm2CnZ0d9PT00L9/f6xYsQImJiZK7AUBwO7du+Hh4QFdXV2Ym5ujW7dumDlzJhISEnDgwAGIRCKIRCIkJSUBAK5cuYIuXboI5ceOHYuCggKhvrCwMAQFBSE6OhoWFhYwMjLChx9+iJKSEoX3ReqEwtnZGQ8fPqy0Py8vD87OznIJioiIqDYy0daT6kUkrZkzZyI5ORkHDhzA0aNHkZSUhAsXLgjHw8LCcO7cORw8eBCnT5+GWCxG7969UVpaCgBISUnBhx9+iMmTJ+PSpUvo3r07YmNjVdWdeisnJwfDhg3DqFGjkJGRgaSkJAQHB2P+/PkYPHgwevbsiZycHOTk5KB9+/YoLCxEYGAgTE1NkZaWhl27duHnn39GRESERL2JiYlCfd9++y327t2L6OhohfdH6jkUt2/fRnl5eaX9xcXFuHv3rlyCIvlyNTFUdQhyYaPfWNUhyEVDQ21Vh/DWzHQMFN5GW2t/hbdhqGWu8DYU/r5VwrojBn9mSX+Sg3TFDUulrF9TyvIADDRNpD+pHhIXVv7S8E1EUuZGvBayKSgowFdffYVvvvkGXbt2BQAkJCSgUaNGAIAbN27g4MGDSElJQfv27QEA27Ztg52dHfbv349BgwZh7dq16NWrF2bMmAEAcHV1xalTp3Do0CHVdKqeysnJQVlZGYKDg4UFmz08PAAAurq6KC4uhrW1tVA+ISEBz58/x5YtW6Cvrw8AWLduHd59910sWbIEVlZWAF48lfXrr7+Gnp4emjdvjpiYGMycORMLFiyAmpriBiZVO6E4ePCg8PORI0dgbGwsbJeXlyMxMRGOjo5yDY6IiIiIXsjKykJJSQnatm0r7DMzM0PTpk0BABkZGdDQ0JA4bm5ujqZNmyIjIwMAkJmZif79+0vU26ZNGyYUSubl5YWuXbvCw8MDgYGB6NGjBwYOHAhTU9Mqy2dkZMDLy0tIJgCgQ4cOqKioQGZmppBQeHl5QU/v/zN8Pz8/FBQU4M6dO0LiogjVTiherjwtEokwcuRIiWOamppwdHSsNI6PiIiIiIgkqaur49ixYzh16hSOHj2KtWvXYs6cOUhNTVV1aDKp9r2PiooKVFRUwN7eHrm5ucJ2RUUFiouLkZmZ+doF6IiIiIhIdo0bN4ampqbEh87Hjx/j+vXrAAA3NzeUlZVJHH/48CEyMzPh7u4OAGjatCnS0tIk6v33NimHSCRChw4dEB0djYsXL0JLSwv79u2DlpZWpekFbm5uSE9PR2FhobAvJSUFampqwh0qAEhPT8ezZ8+E7TNnzsDAwAB2dnYK7YvUg6lu3bqFBg0aKCIWIiIiInoFAwMDjB49GjNnzsQvv/yCq1evIiwsTBgb7+Lign79+iE8PBwnT55Eeno63nvvPTRs2BD9+vUDAEycOBE//vgjVqxYgRs3bmDjxo346aefIBKJVNm1eic1NRWLFi3CuXPnkJ2djb179+LBgwdwc3ODo6MjLl++jMzMTPz1118oLS1FSEgIdHR0MHLkSFy9ehXHjx/HxIkTMWLECGG4EwCUlJRg9OjRuHbtGn788UfMnz8fERERCp0/Aci4sF1iYiISExOFOxX/9PXXX8slMCIiIiKS9Nlnn6GgoADvvvsuDA0NMX36dDx58kQ4HhcXh8mTJ6Nv374oKSlB586d8eOPP0JT88WTDDp06IANGzYgOjoan3zyCQIDAzF16lSsW7dOVV2ql4yMjHDixAmsWrUKT58+hYODA5YvX45evXrB19cXSUlJ8PX1RUFBAY4fP46AgAAcOXIEkydPRuvWraGnp4cBAwZgxYoVEvV27doVLi4u6Ny5M4qLizFs2DBERUUpvD9SJxTR0dGIiYmBr68vbGxsmNESEREpiZmO/psLkcxqw8rVBgYG2Lp1K7Zu3SrsmzlzpvCzqakptmzZ8to6wsPDER4eLrHdpEkT+QerYvJauVoR3NzccPjw4SqPWVhY4OjRo5X2e3h44Jdffnlj3dHR0Up5VOw/SZ1QbNiwAfHx8RgxYoQi4iEiIiIiBVq2bBm6d+8OfX19/PTTT0hISMAXX3yh6rCoFpM6oSgpKRGebUwkePTkzWX+TXFPL6vfeC2IiOg1zp49i6VLlyI/Px/Ozs5Ys2YNxowZo+qwqBaTOqEYM2YMtm/fjrlz5yoiHiIiIiJSoJ07d6o6BFKA+Ph4lbUtdULx/PlzfPnll/j555/h6ekpTPJ56d+TQ4iIiIiIqO6SOqG4fPkyvL29AQBXr16VOMYJ2kREVJ+Z6RioOgQiIqWTOqE4fvy4IuKosURFedKdoPfmIv9moGki/Un1kDKuhTI0N9d8cyHi3141iW/9T+pzRDVwzoz4QZZU5WXpg41+Y+lPIoXgtSCqW2Re5eLmzZs4cuSIsBqfWCyWW1BERERERFQ7SJ1QPHz4EF27doWrqyt69+6NnJwcAMDo0aMxffp0uQdIREREREQ1l9QJxdSpU6GpqYns7Gzo6f3/GIMhQ4a8coEOIiIiIiKqm6ROKI4ePYolS5agUaNGEvtdXFzw3//+V26BERERERHVVykpKfDw8ICmpiaCgoJeua8mkHpSdmFhocSdiZcePXoEbW1tuQRFREREpGxr0scptb1JXuuV2l5UVBT279+PS5cuKbVdVXke865S29OZ971c65s2bRq8vb3x008/wcDA4JX7agKp71B06tQJW7ZsEbZFIhEqKiqwdOlSvPPOO3INjoiIiIioPsrKykKXLl3QqFEjmJiYvHJfTSB1QrF06VJ8+eWX6NWrF0pKShAZGYkWLVrgxIkTWLJkiSJiJCIiIiJA+BK3SZMm0NbWhr29PWJjYwEAs2bNgqurK/T09ODs7Iy5c+eitLQUwItVlKOjo5Geng6RSASRSKTSlZUJKC4uxqRJk2BpaQkdHR107NgRaWlpuH37NkQiER4+fIhRo0YJ16qqfTWF1AlFixYtcP36dXTs2BH9+vVDYWEhgoODcfHiRTRuzOdKExERESnKRx99hE8//RRz587FtWvXsH37dlhZWQEADA0NER8fj2vXrmH16tXYtGkTVq5cCeDFw3OmT5+O5s2bIycnBzk5ORgyZIgqu1LvRUZGYs+ePUhISMCFCxfQpEkTBAYGwtDQEDk5OTAyMsKqVauQk5ODQYMGVdpXk66f1HMoAMDY2Bhz5syRdyxERERE9Ar5+flYvXo11q1bh5EjRwIAGjdujI4dOwIAPvnkE6Gso6MjZsyYgR07diAyMhK6urowMDCAhoYGrK2tVRI//b/CwkKsX78e8fHx6NWrFwBg06ZNOHbsGL7++mvMnDkTIpEIxsbGwvXS19evtK+mkDqhiIuLg4GBAQYNGiSxf9euXSgqKhLe4EREREQkPxkZGSguLkbXrl2rPP7dd99hzZo1yMrKQkFBAcrKymBkZKTkKKk6srKyUFpaig4dOgj7NDU10aZNG2RkZKgwMtlIPeRp8eLFaNCgQaX9lpaWWLRokVyCIiIiIiJJurq6rzx2+vRphISEoHfv3jh06BAuXryIOXPmoKSkRIkRUn0l9R2K7OxsODk5Vdrv4OCA7OxsuQRF8mWiXfkxv/ImfpIv9TkiadvIvCBd/T79pWwBcDa2lPocaTXQkTqPl4oyroWrluLnS4kfZElVXuQgfRuiojzpTpDhT8lA00T6k2oY8a3/SX2O1Nfj0RPpystwvSHlewoW0j+5sC5cb6q5XFxcoKuri8TERIwZM0bi2KlTp+Dg4CAxJP3f64NpaWmhvLxcKbHS6zVu3BhaWlpISUmBg8OLf9BKS0uRlpaGKVOmqDY4GUidUFhaWuLy5ctwdHSU2J+eng5zc3N5xUVERERE/6Cjo4NZs2YhMjISWlpa6NChAx48eIDffvsNLi4uyM7Oxo4dO9C6dWv88MMP2Ldvn8T5jo6OuHXrFi5duoRGjRrB0NCQa4ipiL6+PsaNG4eZM2fCzMwM9vb2WLp0KYqKijB69GhVhyc1qb8qHTZsGCZNmoTjx4+jvLwc5eXl+OWXXzB58mQMHTpUETESEREREYC5c+di+vTpmDdvHtzc3DBkyBDk5ubiP//5D6ZOnYqIiAh4e3vj1KlTmDt3rsS5AwYMQM+ePfHOO+/AwsIC3377rYp6QQDw6aefYsCAARgxYgRatmyJmzdv4siRIzA1NVV1aFKT+g7FggULcPv2bXTt2hUaGi9Or6ioQGhoKOdQEBERUa2l7JWrZaGmpoY5c+ZU+bTNpUuXYunSpRL7/jl8RltbG7t371Z0iDWGvFeuljcdHR2sWbMGa9asqfJ4Xl5etfbVBFIlFGKxGPfu3UN8fDwWLlyIS5cuQVdXFx4eHsL4LyIiIiIiqj+kTiiaNGkijNVzcXFRVFxERERERFQLSDWHQk1NDS4uLnj48KGi4iEiIiIiolpE6knZn376KWbOnImrV68qIh4iIiIiIqpFpJ6UHRoaiqKiInh5eUFLS6vSIiuPHj2SW3BERERERFSzSZ1QrFq1SgFhEBERERFRbSR1QjFy5EhFxEFERERERLWQ1AkFAGRlZSEuLg5ZWVlYvXo1LC0t8dNPP8He3h7NmzeXd4wqVXE2WaryagH9pW7DUEuxK4yb6RgotH6SjpmOvqpDeGviQukfzCDSk/KER0+kKy/Dk6ul7YfUfVCC8sx7Up+jFiD/OGqFojxVR1A7KOH3JJK2jRr4t0dE/0/qSdnJycnw8PBAamoq9u7di4KCAgBAeno65s+fL/cAiYiIagtDLXOpXkREdYHUCcXs2bOxcOFCHDt2DFpaWsL+Ll264MyZM3INjoiIiIheCAgIkFj5+t8cHR1lmusaFRUFb29vmeMiknrI05UrV7B9+/ZK+y0tLfHXX3/JJSgiIiIiZdueOUGp7Q1v+rlc60tLS4O+fu0fVisvhZO6KbU9/TU/K7W9mkTqOxQmJibIycmptP/ixYto2LChXIIiIiIiIulYWFhAT+/VE05KS0uVGA3VJ1InFEOHDsWsWbNw7949iEQiVFRUICUlBTNmzEBoaKgiYqTa4NET6V+kGLwWRER1VllZGSIiImBsbIwGDRpg7ty5EIvFACoPeRKJRFi/fj3+85//QF9fH7GxsQBeLFJsZWUFQ0NDjB49Gs+fP1dFV+q94uJiTJo0CZaWltDR0UHHjh2RlpYGAEhKSoJIJEJiYiJ8fX2hp6eH9u3bIzMzU8VRV03qhGLRokVo1qwZ7OzsUFBQAHd3d3Tu3Bnt27fHJ598oogYiYiIiAhAQkICNDQ0cPbsWaxevRorVqzA5s2bX1k+KioK/fv3x5UrVzBq1Cjs3LkTUVFRWLRoEc6dOwcbGxt88cUXSuwBvRQZGYk9e/YgISEBFy5cQJMmTRAYGCixSPScOXOwfPlynDt3DhoaGhg1apQKI341qedQaGlpYdOmTZg3bx6uXLmCgoIC+Pj4wMXFRRHxERER0d8CYanqEEjF7OzssHLlSohEIjRt2hRXrlzBypUrER4eXmX54cOH4/333xe2hw4ditGjR2P06NEAgIULF+Lnn3/mXQolKywsxPr16xEfH49evXoBADZt2oRjx47hq6++QuvWrQEAsbGx8Pf3B/DiwUh9+vTB8+fPoaOjo7LYq1LtOxQVFRVYsmQJOnTogNatW+Pzzz/HO++8g8GDBzOZICIiIlKCdu3aQSQSCdt+fn64ceMGysvLqyzv6+srsZ2RkYG2bdtK7PPz85N/oPRaWVlZKC0tRYcOHYR9mpqaaNOmDTIyMoR9np6ews82NjYAgNzcXOUFWk3VTihiY2Px8ccfw8DAAA0bNsTq1asxYYJyn4ZARERERNXHpz7VbpqamsLPLxPJiooKVYXzStVOKLZs2YIvvvgCR44cwf79+/H9999j27ZtNbJTRERERHVRamqqxPaZM2fg4uICdXX1ap3v5uZWZR2kXI0bN4aWlhZSUlKEfaWlpUhLS4O7u7sKI5NNtROK7Oxs9O7dW9ju1q0bRCIR/vzzT4UERkRERESSsrOzMW3aNGRmZuLbb7/F2rVrMXny5GqfP3nyZHz99deIi4vD9evXMX/+fPz2228KjJiqoq+vj3HjxmHmzJk4fPgwrl27hvDwcBQVFQnzW2qTak/KLisrqzQBRFNTs+4/01gJj9Q00DRRaP02+o0VWr+yiJ/kS1Ve9OYiKuFsXAcmVRblKbyJ4u8vSVVex0f6NsS/pry50D+IgsdI34iCld8vlPoczTcXUTpl/H2Lb/1PujYcZGikLpDl/736+rtSgdDQUDx79gxt2rSBuro6Jk+ejLFjx1b7/CFDhiArKwuRkZF4/vw5BgwYgHHjxuHIkSMKjJqq8umnn6KiogIjRoxAfn4+fH19ceTIEZiamqo6NKlVO6EQi8UICwuDtra2sO/58+f48MMPJcbn7d27V74REhERESmBvFeulrekpCTh5/Xr11c6fvv2bYntl+tT/NvHH3+Mjz/+WGLfkiVL3jq+mqamr1yto6ODNWvWYM2aNZWOBQQEVLp+3t7er7ymqlbthGLkyJGV9r333ntyDYaIiKg2U/QdZyKimqjaCUVcXJwi4yAiIiIiolpI6pWyiYiIiIiIXmJCQUREREREMmNCQUREREREMqv2HAoiIiJSLWkffQvU48ffEpHS1Ko7FJ9++ilEIhGmTJki7Hv+/DkmTJgAc3NzGBgYYMCAAbh//77qgiQiIiIiqkdqzR2KtLQ0bNy4EZ6enhL7p06dih9++AG7du2CsbExIiIiEBwcLLGUOSle+YNnUp9Tq7LZWoTXgoiIiJSpVnyOKCgoQEhICDZt2iSxeuCTJ0/w1VdfYcWKFejSpQtatWqFuLg4nDp1CmfOnFFhxERERERE9UOtSCgmTJiAPn36oFu3bhL7z58/j9LSUon9zZo1g729PU6fPv3K+oqLi/H06VOJFxEREVFNFhAQIDHsWx7i4+NhYmIi1zpJdiKRCPv37692+aSkJIhEIuTl5Skspuqo8UOeduzYgQsXLiAtLa3SsXv37kFLS6vSH4KVlRXu3bv3yjoXL16M6OjoarVfcvUvqeLVCZaquFIY5D2U/iQL+cfxtsozX31Nq6IWoJg43hZX0q2e/GvSvW91FBTHW3uQJV15i3cUEwdRNYmf5Et9jkjaNgql+/sW6UnZgIwO/zdSOQ39rafDUqW2V988GNpeqe1Z7Dj11nXk5ORIjMaRh6ioKOzfvx+XLl2Sa73/VKPvUNy5cweTJ0/Gtm3boKMjv48LH330EZ48eSK87ty5I7e6iYiIiIikVVJSAmtra2hra6s6FKnV6ITi/PnzyM3NRcuWLaGhoQENDQ0kJydjzZo10NDQgJWVFUpKSird5rl//z6sra1fWa+2tjaMjIwkXkRERG/tQZZ0LyIplZWVISIiAsbGxmjQoAHmzp0LsVgM4MWQ7hkzZqBhw4bQ19dH27ZtkZSUJHF+fHw87O3toaenh/79++PhQxlGMZBcBAQEICIiAlOmTEGDBg0QGBhYacjTqVOn4O3tDR0dHfj6+mL//v0QiUSV7jacP38evr6+0NPTQ/v27ZGZmQngxfWOjo5Geno6RCIRRCIR4uPj5d6XGp1QdO3aFVeuXMGlS5eEl6+vL0JCQoSfNTU1kZiYKJyTmZmJ7Oxs+Pn5qTByIiIiIvlLSEiAhoYGzp49i9WrV2PFihXYvHkzACAiIgKnT5/Gjh07cPnyZQwaNAg9e/bEjRs3AACpqakYPXo0IiIicOnSJbzzzjtYuHChKrtT7yUkJEBLSwspKSnYsGGDxLGnT5/i3XffhYeHBy5cuIAFCxZg1qxZVdYzZ84cLF++HOfOnYOGhgZGjRoFABgyZAimT5+O5s2bIycnBzk5ORgyZIjc+1Gj51AYGhqiRYsWEvv09fVhbm4u7B89ejSmTZsGMzMzGBkZYeLEifDz80O7du1UETIRERGRwtjZ2WHlypUQiURo2rQprly5gpUrVyIwMBBxcXHIzs6Gra0tAGDGjBk4fPgw4uLisGjRIqxevRo9e/ZEZOSLuSKurq44deoUDh8+rMou1WsuLi5YurTquTTbt2+HSCTCpk2boKOjA3d3d9y9exfh4eGVysbGxsLf3x8AMHv2bPTp0wfPnz+Hrq4uDAwMoKGh8drRO2+rRt+hqI6VK1eib9++GDBgADp37gxra2vs3btX1WERERERyV27du0gEv3/NHg/Pz/cuHEDV65cQXl5OVxdXWFgYCC8kpOTkZX1YnhdRkYG2rZtK1EfR3SoVqtWrV55LDMzE56enhLziNu0aVNl2X+u02ZjYwMAyM3NlVOUb1aj71BU5d9jAXV0dPD555/j888/V01ARERERCpWUFAAdXV1nD9/Hurq6hLHDAwMVBQVvYm+vr5c6tHU1BR+fplwVlRUyKXu6qh1CQURERFRfZWamiqxfebMGbi4uMDHxwfl5eXIzc1Fp06dqjzXzc2tyvOpZmratCm++eYbFBcXC09+qmoZhTfR0tJCeXm5vMOTUOuHPBERERHVF9nZ2Zg2bRoyMzPx7bffYu3atZg8eTJcXV0REhKC0NBQ7N27F7du3cLZs2exePFi/PDDDwCASZMm4fDhw1i2bBlu3LiBdevWcf5EDTZ8+HBUVFRg7NixyMjIwJEjR7Bs2TIAkBj29iaOjo64desWLl26hL/++gvFxcVyj5UJBdE/2Og3lupFRESkTKGhoXj27BnatGmDCRMmYPLkyRg7diwAIC4uDqGhoZg+fTqaNm2KoKAgpKWlwd7eHsCL+RebNm3C6tWr4eXlhaNHj+KTTz5RZXfoNYyMjPD999/j0qVL8Pb2xpw5czBv3jwAkGp9tgEDBqBnz5545513YGFhgW+//VbusXLIExERERFq/srV/5xHun79+krHNTU1ER0djejo6FfWMWrUKOGRoi9Nnz5dbjHWJPJYuVqR/j0vGICwpshL7du3R3p6urC9bds2aGpqCkliQEBApXO8vb0l9mlra2P37t1yjLwyJhRERERERDXQli1b4OzsjIYNGyI9PR2zZs3C4MGDoaurq+rQJDChqA/+91/pz7GQfxhvq/x+oVTlNd9chGT16In05zhIV/zZw2fSt0GKIcv1ptpLGde7KE/xbRDVAffu3cO8efNw79492NjYYNCgQYiNjVV1WJUwoSC5kPbDPsAP/IrCa0FERFQ3REZGCgsR1mSclE1ERERERDLjHQoiIiJ54VAeIqqHeIeCiIiIiIhkxoSCiIiIiIhkxoSCiIiIiIhkxjkUREREtQUf4UtENRDvUBARERHVASKRCPv371d1GFRNAQEBmDJlSrXL79+/H02aNIG6urpU5ykD71AQERERAThzL0qp7bWzlm97OTk5MDU1lWudtVl2dx+ltmd/7KJC6//ggw/w/vvvY9KkSTA0NERYWBjy8vJqRBLJhIKIiIioDrC2tlZ1CKQgBQUFyM3NRWBgIGxtbVUdTiVMKN4g99f/SVXeXpZGHmRJV97iHamKi5/kS1c/AJHUZyhe+V/PVB2CXNjoN1Z1CG+tzrynHkj3npJljKj4t6tSlRcFSFd/Xfm7KM+8J1V5tQDFxEFKIu1cEAfFhFHbBAQEwNPTEzo6Oti8eTO0tLTw4YcfIioqCsCLIU/79u1DUFAQbt++DScnJ+zZswdr165FamoqXFxcsGHDBvj5+Ql1njx5Eh999BHOnTuHBg0aoH///li8eDH09fVV1Mv6qbi4GHPmzMG3336LvLw8tGjRAkuWLEFAQACSkpLwzjsvPvt16dIFAODv74/k5GQAL647ABw/fhwBAQEqiZ9zKIiIiIhqiYSEBOjr6yM1NRVLly5FTEwMjh079sryc+bMwYwZM3Dp0iW4urpi2LBhKCsrAwBkZWWhZ8+eGDBgAC5fvozvvvsOJ0+eREREhLK6Q3+LiIjA6dOnsWPHDly+fBmDBg1Cz549cePGDbRv3x6ZmZkAgD179iAnJwcHDx7E4MGD0bNnT+Tk5CAnJwft27dXWfxMKIiIiIhqCU9PT8yfPx8uLi4IDQ2Fr68vEhMTX1l+xowZ6NOnD1xdXREdHY3//ve/uHnzJgBg8eLFCAkJwZQpU+Di4oL27dtjzZo12LJlC54/f66sLtV72dnZiIuLw65du9CpUyc0btwYM2bMQMeOHREXFwctLS1YWloCAMzMzGBtbQ0jIyPo6upCW1sb1tbWsLa2hpaWlsr6wCFPRERERLWEp6enxLaNjQ1yc3OrVd7GxgYAkJubi2bNmiE9PR2XL1/Gtm3bhDJisRgVFRW4desW3Nzc5Bw9VeXKlSsoLy+Hq6urxP7i4mKYm5urKCrpMKEgIiIiqiU0NTUltkUiESoqKqpV/uVY+5flCwoK8MEHH2DSpEmVzrO3l2lWKMmgoKAA6urqOH/+PNTV1SWOGRgYqCgq6TChICIikhPx+d+kKi/iZGNSoZYtW+LatWto0qSJqkOp13x8fFBeXo7c3Fx06tSp2udpaWmhvLxcgZFVHxMKkou68qSZuoDXgoiIqmPWrFlo164dIiIiMGbMGOjr6+PatWs4duwY1q1bp+rw6g1XV1eEhIQgNDQUy5cvh4+PDx48eIDExER4enqiT58+VZ7n6OiII0eOIDMzE+bm5jA2Nq50B0tZOCmbiIiIqB7y9PREcnIyrl+/jk6dOsHHxwfz5s2rkesc1HVxcXEIDQ3F9OnT0bRpUwQFBSEtLe21Q8/Cw8PRtGlT+Pr6wsLCAikpKUqMWBLvUBARERFB/itXy1tSUlKlff9cJVksFgs/Ozo6SmwDgImJSaV9rVu3xtGjR+UaZ02h6JWr39Y/r6empiaio6MRHR1dZdmqrp2FhUWNuXa8Q0FERERERDJjQkFERERERDJjQkFERERERDLjHIqaoChP1REQkaI8eqLqCOglXotqeZZ0R+pz9IOlK1929oZU5TV9pKufiJSLdyiIiIiIiEhmTCiIiIiIiEhmTCiIiIiIiEhmTCiIiIiIiEhmTCiIiIiIiEhmTCiIiIiI6oCkpCSIRCLk5eVV+5yoqCh4e3srLCaSTkBAAKZMmaLqMKTGx8YSERERAbiet0yp7bmazJBrfe3bt0dOTg6MjY3lWm9AQAC8vb2xatUqudaraBnezZTantul35XaXk3ChIKIiIioDtDS0oK1tbWqw6B6iEOeiIiIiGqJiooKLF68GE5OTtDV1YWXlxd2794NoOohT5s2bYKdnR309PTQv39/rFixAiYmJpXq3bp1KxwdHWFsbIyhQ4ciPz8fABAWFobk5GSsXr0aIpEIIpEIt2/fVkJP677CwkKEhobCwMAANjY2WL58ucTxx48fIzQ0FKamptDT00OvXr1w48aLRSHFYjEsLCyEaw8A3t7esLGxEbZPnjwJbW1tFBUVAQBEIhE2b96M/v37Q09PDy4uLjh48KBc+sKEguSiKLdI6hcpBq8FEVHdtXjxYmzZsgUbNmzAb7/9hqlTp+K9995DcnJypbIpKSn48MMPMXnyZFy6dAndu3dHbGxspXJZWVnYv38/Dh06hEOHDiE5ORmffvopAGD16tXw8/NDeHg4cnJykJOTAzs7O4X3sz6YOXMmkpOTceDAARw9ehRJSUm4cOGCcDwsLAznzp3DwYMHcfr0aYjFYvTu3RulpaUQiUTo3LkzkpKSALxIPjIyMvDs2TP8/vuLoVfJyclo3bo19PT0hDqjo6MxePBgXL58Gb1790ZISAgePXr01n1hQkFERERUCxQXF2PRokX4+uuvERgYCGdnZ4SFheG9997Dxo0bK5Vfu3YtevXqhRkzZsDV1RXjx49Hr169KpWrqKhAfHw8WrRogU6dOmHEiBFITEwEABgbG0NLSwt6enqwtraGtbU11NXVFd7Xuq6goABfffUVli1bhq5du8LDwwMJCQkoKysDANy4cQMHDx7E5s2b0alTJ3h5eWHbtm24e/cu9u/fD+DF3JaXCcWJEyfg4+MjsS8pKQn+/v4S7YaFhWHYsGFo0qQJFi1ahIKCApw9e/at+8M5FDXBoyfSlXdQcP01lLTfpOsrKI639iBLuvIW7ygmjrdQckLKPgDQCZB/HG+r5Le/pCqvKUMb/1t/Sary9sHS1f8w46F0J6Bm/m2U3y+Uqrws16L8wTOpytfXb9xkuWsp7XtKGde7Lrp58yaKiorQvXt3if0lJSXw8fGpVD4zMxP9+/eX2NemTRscOnRIYp+joyMMDQ2FbRsbG+Tm5soxcvq3rKwslJSUoG3btsI+MzMzNG3aFACQkZEBDQ0NiePm5uZo2rQpMjIyAAD+/v6YPHkyHjx4gOTkZAQEBMDa2hpJSUkYPXo0Tp06hcjISIl2PT09hZ/19fVhZGQkl2vNhIKIiIioFigoKAAA/PDDD2jYsKHEMW1tbWRlSf9lDwBoakqmbCKRCBUVFbIFSUrj4eEBMzMzJCcnIzk5GbGxsbC2tsaSJUuQlpaG0tJStG/fXuIcRV3r+voFDBEREVGt4u7uDm1tbWRnZ6NJkyYSr6rmNTRt2hRpaWkS+/69XR1aWlooLy+XOW6qrHHjxtDU1ERqaqqw7/Hjx7h+/ToAwM3NDWVlZRLHHz58iMzMTLi7uwN4kQx06tQJBw4cwG+//YaOHTvC09MTxcXF2LhxI3x9faGvr5x70rxDQURERFQLGBoaYsaMGZg6dSoqKirQsWNHPHnyBCkpKTAyMoKDg+SY6IkTJ6Jz585YsWIF3n33Xfzyyy/46aefIBKJpGrX0dERqampuH37NgwMDGBmZgY1NX4n/TYMDAwwevRozJw5E+bm5rC0tMScOXOE36uLiwv69euH8PBwbNy4EYaGhpg9ezYaNmyIfv36CfUEBARg+vTp8PX1hYGBAQCgc+fO2LZtG2bOnKm0/vDdQERERFRLLFiwAHPnzsXixYvh5uaGnj174ocffoCTk1Olsh06dMCGDRuwYsUKeHl54fDhw5g6dSp0dHSkanPGjBlQV1eHu7s7LCwskJ2dLa/u1GufffYZOnXqhHfffRfdunVDx44d0apVK+F4XFwcWrVqhb59+8LPzw9isRg//vijxLAlf39/lJeXIyAgQNgXEBBQaZ+i8Q4FEREREeS/crUiiEQiTJ48GZMnT67yuFgsltgODw9HeHi4xHaTJk2E7aioKERFRUmcM2XKFEyZMkXYdnV1xenTp98+eCWr6StXGxgYYOvWrdi6dauw7593FUxNTbFly5bX1uHt7V3pmv/7+r3073IAJNYseRtMKIj+weBPKSe0SfvELSKq0/gkKappli1bhu7du0NfXx8//fQTEhIS8MUXX6g6LKpjmFAQERER1VFnz57F0qVLkZ+fD2dnZ6xZswZjxoxRdVhUxzChICIiIqqjdu7cqeoQqB5gQkFERFRLSDukCuCwKiJSPP47Q0REREREMmNCQUREREREMuOQpxpA/CRfqvLSLUfDW+Qkf+V/Sf+eklahDO/bmkjR/VDG74n/htQvzx4q/j2Vf+2hVOWlWzWBiJSN/+YTEREREZHMeIeC5EIZ32hR9fBaEBERkTLxDgUREREREcmMdyiIiIiIAPxZuFmp7dnqc4E5RTrt1Eyp7fnd+l2p7dUkvENBREREREQyY0JBREQkJ+X3C6V6EUlr9+7d8PDwgK6uLszNzdGtWzcUFr54L23evBlubm7Q0dFBs2bN8MUXXwjntW/fHrNmzZKo68GDB9DU1MSJEycAAMXFxZgxYwYaNmwIfX19tG3bFklJSUL5+Ph4mJiY4MiRI3Bzc4OBgQF69uyJnJwcxXe8DnJ0dMSqVask9nl7eyMqKgoAIBKJsH79evTq1Qu6urpwdnbG7t27lR9oNdTohGLx4sVo3bo1DA0NYWlpiaCgIGRmZkqUef78OSZMmABzc3MYGBhgwIABuH//vooiJiIiIlKMnJwcDBs2DKNGjUJGRgaSkpIQHBwMsViMbdu2Yd68eYiNjUVGRgYWLVqEuXPnIiEhAQAQEhKCHTt2QCwWC/V99913sLW1RadOnQAAEREROH36NHbs2IHLly9j0KBB6NmzJ27cuCGcU1RUhGXLlmHr1q04ceIEsrOzMWPGDOX+IuqRuXPnYsCAAUhPT0dISAiGDh2KjIwMVYdVSY1OKJKTkzFhwgScOXMGx44dQ2lpKXr06CFk4gAwdepUfP/999i1axeSk5Px559/Ijg4WIVRExEREclfTk4OysrKEBwcDEdHR3h4eGD8+PEwMDDA/PnzsXz5cgQHB8PJyQnBwcGYOnUqNm7cCAAYPHgw/vzzT5w8eVKob/v27Rg2bBhEIhGys7MRFxeHXbt2oVOnTmjcuDFmzJiBjh07Ii4uTjintLQUGzZsgK+vL1q2bImIiAgkJiYq/XdRXwwaNAhjxoyBq6srFixYAF9fX6xdu1bVYVVSoydlHz58WGI7Pj4elpaWOH/+PDp37ownT57gq6++wvbt29GlSxcAQFxcHNzc3HDmzBm0a9dOFWETEREphCzDpDQVEAephpeXF7p27QoPDw8EBgaiR48eGDhwILS0tJCVlYXRo0cjPDxcKF9WVgZjY2MAgIWFBXr06IFt27ahU6dOuHXrFk6fPi0kHFeuXEF5eTlcXV0l2iwuLoa5ubmwraenh8aNGwvbNjY2yM3NVWS36zU/P79K25cuXVJNMK9RoxOKf3vy5AkAwMzMDABw/vx5lJaWolu3bkKZZs2awd7eHqdPn35lQlFcXIzi4mJh++nTpwqMmoiIiOjtqaur49ixYzh16hSOHj2KtWvXYs6cOfj+++8BAJs2bULbtm0rnfNSSEgIJk2ahLVr12L79u3w8PCAh4cHAKCgoADq6uo4f/68xDkAYGBgIPysqSmZoopEIolhVFR9ampqlX53paWlKorm7dSahKKiogJTpkxBhw4d0KJFCwDAvXv3oKWlBRMTE4myVlZWuHfv3ivrWrx4MaKjo6vV7t2b0i0SZi9V6Ree7b0qVXn9AOnqryvfaP31+yOpylsoKI63VpSn6gjeWlFukdTn6Csgjrf1MOOhVOVrYh/qivxr0l0LHQXFQcrBBThlJxKJ0KFDB3To0AHz5s2Dg4MDUlJSYGtriz/++AMhISGvPLdfv34YO3YsDh8+jO3btyM0NFQ45uPjg/LycuTm5gpzKkixLCwsJCa0P336FLdu3ZIoc+bMGYnrdObMGfj4+CgtxuqqNQnFhAkTcPXqVYmxf7L66KOPMG3aNGH76dOnsLOze+t6iYiIiBQlNTUViYmJ6NGjBywtLZGamooHDx7Azc0N0dHRmDRpEoyNjdGzZ08UFxfj3LlzePz4sfCZR19fH0FBQZg7dy4yMjIwbNgwoW5XV1eEhIQgNDQUy5cvh4+PDx48eIDExER4enqiT58+qup2ndWlSxfEx8fj3XffhYmJCebNm1fp7tCuXbvg6+uLjh07Ytu2bTh79iy++uorFUX8arUioYiIiMChQ4dw4sQJNGrUSNhvbW2NkpIS5OXlSdyluH//PqytrV9Zn7a2NrS1tRUZMhEREZFcGRkZ4cSJE1i1ahWePn0KBwcHLF++HL169QLwYn7DZ599hpkzZ0JfXx8eHh6YMmWKRB0hISHo3bs3OnfuDHt7yXEVcXFxWLhwIaZPn467d++iQYMGaNeuHfr27ausLtYrH330EW7duoW+ffvC2NgYCxYsqHSHIjo6Gjt27MD48eNhY2ODb7/9Fu7u7iqK+NVqdEIhFosxceJE7Nu3D0lJSXBycpI43qpVK2hqaiIxMREDBgwAAGRmZiI7O7vSJBYiIiKi16npK1e7ublVemDNPw0fPhzDhw9/bR29evV65ZwHTU1NREdHv3JYeFhYGMLCwiT2BQUF1dg5FDV95WojIyPs2LFDYt/IkSMltm1tbXH06FFlhiWTGp1QTJgwAdu3b8eBAwdgaGgozIswNjaGrq4ujI2NMXr0aEybNg1mZmYwMjLCxIkT4efnxyc8EREREREpQY1OKNavXw8ACAgIkNgfFxcnZMgrV66EmpoaBgwYgOLiYgQGBkqsDElERERERIpToxOK6txC09HRweeff47PP/9cCRERERERESleTR1KVpUanVBQ7VH4gI8ArCl4LYiIiEiZmFAQERHJCdfTIKL6SE3VARARERERUe3FhIKIiIiIiGTGhIKIiIiIiGTGORREJLVnDxU/8TvvicKbUApF90MZv6ec3ZlSn2P/gQICeUvl9wulKq+poDiID48gqmt4h4KIiIiolgsLC0NQUJCqw6B6incoiIiIiADkl+5TanuGmv3lVtfq1atr1boFyvCjSVOlttc7T/q7uXUFEwoiIiKiWs7Y2FjVIVA9xiFPRERERLXE7t274eHhAV1dXZibm6Nbt24oLCyUGPL04MEDWFtbY9GiRcJ5p06dgpaWFhITE1UUOf1bQEAAJk2ahMjISJiZmcHa2hpRUVHC8ezsbPTr1w8GBgYwMjLC4MGDcf/+fdUF/BpMKIiIiIhqgZycHAwbNgyjRo1CRkYGkpKSEBwcXGmok4WFBb7++mtERUXh3LlzyM/Px4gRIxAREYGuXbuqKHqqSkJCAvT19ZGamoqlS5ciJiYGx44dQ0VFBfr164dHjx4hOTkZx44dwx9//IEhQ4aoOuQqccgTERERUS2Qk5ODsrIyBAcHw8HBAQDg4eFRZdnevXsjPDwcISEh8PX1hb6+PhYvXqzMcKkaPD09MX/+fACAi4sL1q1bJ9xFunLlCm7dugU7OzsAwJYtW9C8eXOkpaWhdevWKou5KrxDQURERFQLeHl5oWvXrvDw8MCgQYOwadMmPH78+JXlly1bhrKyMuzatQvbtm2Dtra2EqOl6vD09JTYtrGxQW5uLjIyMmBnZyckEwDg7u4OExMTZGRkKDvMN+IdCiIiIjn56/dHUpW3UFAcVDepq6vj2LFjOHXqFI4ePYq1a9dizpw5SE1NrbJ8VlYW/vzzT1RUVOD27duvvJtBqqOpKbnijUgkQkVFhYqikR3vUBARERHVEiKRCB06dEB0dDQuXrwILS0t7NtX+XG3JSUleO+99zBkyBAsWLAAY8aMQW5urgoiJlm4ubnhzp07uHPnjrDv2rVryMvLg7u7uwojqxrvUBAREdUS5X9xhen6LDU1FYmJiejRowcsLS2RmpqKBw8ewM3NDZcvX5YoO2fOHDx58gRr1qyBgYEBfvzxR4waNQqHDh1SUfQkjW7dusHDwwMhISFYtWoVysrKMH78ePj7+8PX11fV4VXChILkIu+JqiOgl3gtiIjqJiMjI5w4cQKrVq3C06dP4eDggOXLl6NXr1747rvvhHJJSUlYtWoVjh8/DiMjIwDA1q1b4eXlhfXr12PcuHGq6gJVk0gkwoEDBzBx4kR07twZampq6NmzJ9auXavq0KrEhKIGeJjxUKry+lLWn/vr/6Q8A7CX+gyqLvEt6a6HyEFBgbyFwgeK/5b08WPFr/iqjH4omjJ+T3VF/jXp/q3VUVAcNV1d+LuQlTxXrlYENzc3HD58uMpj8fHxws8BAQEoLS2VOO7o6IgnT+rXN041feXqpKSkSvv2798v/Gxvb48DBw4oL6C3wDkUREREREQkMyYUREREREQkMw55IvqHbx03SVV+uHiGgiIhIiIiqh14h4KIiIiIiGTGhIKIiIiIiGTGhIKIiIiIiGTGhIKIiIiIiGTGhIKIiIiIiGTGpzwRERHJycV06RYadFNQHEREysSEgoiIqJa4/P3/pD7Hb40CAiGVEYvF+OCDD7B79248fvwYxsbGCAsLw6pVq1QdGtVjTCiIiIiIAIhxXKntifCO1OccPnwY8fHxSEpKgrOzM9TU1KCrq6uA6Gq/7aKmSm1vuDhTqe3VJEwoaoDCB88UWv/dm9LXb6+AON7Wrdt1YyjBs71XpSqvH6CYOEg576nHj6VroyZSxr8hf/3+SKryFlLWDwBphx9KVb63DG1Q9eQ9UXUEtVdWVhZsbGzQvn17VYdCJOCkbCIiIqJaICwsDBMnTkR2djZEIhEcHR0REBCAKVOmAAA+/vhjtG3bttJ5Xl5eiImJEbY3b94MNzc36OjooFmzZvjiiy+U1QX625YtW2Bubo7i4mKJ/UFBQRgxYgQA4MCBA2jZsiV0dHTg7OyM6OholJWVAXgx9C0qKgr29vbQ1taGra0tJk2apPR+vMSEgoiIiKgWWL16NWJiYtCoUSPk5OQgLS1N4nhISAjOnj2LrKwsYd9vv/2Gy5cvY/jw4QCAbdu2Yd68eYiNjUVGRgYWLVqEuXPnIiEhQal9qe8GDRqE8vJyHDx4UNiXm5uLH374AaNGjcKvv/6K0NBQTJ48GdeuXcPGjRsRHx+P2NhYAMCePXuwcuVKbNy4ETdu3MD+/fvh4eGhqu5wyBPJR10Y1lFX8FoQEdVNxsbGMDQ0hLq6OqytrSsdb968Oby8vLB9+3bMnTsXwIsEom3btmjSpAkAYP78+Vi+fDmCg4MBAE5OTsIH1pEjRyqvM/Wcrq4uhg8fjri4OAwaNAgA8M0338De3h4BAQHo3r07Zs+eLVwTZ2dnLFiwAJGRkZg/fz6ys7NhbW2Nbt26QVNTE/b29mjTpo3K+sM7FERERER1REhICLZv3w7gxbCYb7/9FiEhIQCAwsJCZGVlYfTo0TAwMBBeCxculLirQcoRHh6Oo0eP4u7duwCA+Ph4hIWFQSQSIT09HTExMRLXKTw8HDk5OSgqKsKgQYPw7NkzODs7Izw8HPv27ROGQ6kC71AQERER1RHDhg3DrFmzcOHCBTx79gx37tzBkCFDAAAFBQUAgE2bNlWaa6Gurq70WOs7Hx8feHl5YcuWLejRowd+++03/PDDDwBeXKvo6GjhTtI/6ejowM7ODpmZmfj5559x7NgxjB8/Hp999hmSk5Ohqamp7K4woSAiIiKqKxo1agR/f39s27YNz549Q/fu3WFpaQkAsLKygq2tLf744w/hrgWp1pgxY7Bq1SrcvXsX3bp1g52dHQCgZcuWyMzMFIaqVUVXVxfvvvsu3n33XUyYMAHNmjXDlStX0LJlS2WFL2BCQURERFSHhISEYP78+SgpKcHKlSsljkVHR2PSpEkwNjZGz549UVxcjHPnzuHx48eYNm2aiiKuv4YPH44ZM2Zg06ZN2LJli7B/3rx56Nu3L+zt7TFw4ECoqakhPT0dV69excKFCxEfH4/y8nK0bdsWenp6+Oabb6CrqwsHBweV9INzKIiIiIjqkIEDB+Lhw4coKipCUFCQxLExY8Zg8+bNiIuLg4eHB/z9/REfHw8nJyfVBFvPGRsbY8CAATAwMJC4VoGBgTh06BCOHj2K1q1bo127dli5cqWQMJiYmGDTpk3o0KEDPD098fPPP+P777+Hubm5SvrBOxREREREkG3lamWbMmWKsO4EACQlJVUqY2JigufPn7+yjuHDhwuPka3LasvK1Xfv3kVISAi0tbUl9gcGBiIwMLDKc4KCgioli6rEhIKIiKiW4GOhieqOx48fIykpCUlJSbV+cUEmFG+gjH+8854otn7+B1SzFOUWSVVeX0Fx1HSK/rugmuViunT/TrnJ0AbfU9Vz67b0/2fIcj2I6jsfHx88fvwYS5YsQdOmTVUdzlthQkFEREREpGS3b99WdQhyw0nZREREREQkMyYUREREVC+JxRwSTPQ61f0bYUJBRERE9crLlYSLiqSb00ZU37z8G3nT6tucQ0FERET1irq6OkxMTJCbmwsA0NPTg0gkUnFURDWHWCxGUVERcnNzYWJiAnV19deWZ0JBRERE9Y61tTUACEkFEVVmYmIi/K28DhMKIiIiqndEIhFsbGxgaWmJ0tJSVYdDVONoamq+8c7ES0woSC74fPeag9eCiKj61NXVq/2hiYiqxknZREREREQkMyYUREREREQkMyYUREREREQkM86hqAEeP+bCOtXBuQE1x63b0r9n3RQQx9tSxntK0W0oow+yXG8/BcRBdeM9C0j/nuL7iahm4x0KIiIiIiKSGRMKIiIiIiKSGRMKIiIiIiKSGRMKIiIiIiKSGRMKIiIiIiKSGRMKIiIiIiKSWZ1JKD7//HM4OjpCR0cHbdu2xdmzZ1UdEhERERFRnVcnEorvvvsO06ZNw/z583HhwgV4eXkhMDAQubm5qg6NiIiIiKhOqxMJxYoVKxAeHo73338f7u7u2LBhA/T09PD111+rOjQiIiIiojqt1icUJSUlOH/+PLp16ybsU1NTQ7du3XD69GkVRkZEREREVPdpqDqAt/XXX3+hvLwcVlZWEvutrKzw+++/V3lOcXExiouLhe0nT54AAJ4+fVqpbJG4XKp4qqrjTRTdhrT1y9QGal4bMl0LJbSRX1omVXntmngtauB7qqZeb0W3URP/9pTRRk28FspooyZeC2W08ar6X+4Xi8VS1UdE8iUS1/K/wj///BMNGzbEqVOn4OfnJ+yPjIxEcnIyUlNTK50TFRWF6OhoZYZJRERECnLnzh00atRI1WEQ1Vu1/g5FgwYNoK6ujvv370vsv3//Pqytras856OPPsK0adOE7YqKCjx69Ajm5uYQiURvbPPp06ews7PDnTt3YGRk9HYdqMNt1IU+sI2aUz/bqFlt1IU+sI2aU7+sbYjFYuTn58PW1lYhMRFR9dT6hEJLSwutWrVCYmIigoKCALxIEBITExEREVHlOdra2tDW1pbYZ2JiInXbRkZGCvuHtS61URf6wDZqTv1so2a1URf6wDZqTv2ytGFsbKzAaIioOmp9QgEA06ZNw8iRI+Hr64s2bdpg1apVKCwsxPvvv6/q0IiIiIiI6rQ6kVAMGTIEDx48wLx583Dv3j14e3vj8OHDlSZqExERERGRfNWJhAIAIiIiXjnESd60tbUxf/78SsOm2IZy62cbNauNutAHtlFz6mcbNauNutAHIlKcWv+UJyIiIiIiUp1av7AdERERERGpDhMK+r/27j2qyfv+A/j7IRCIkSEgmARMuCkIAlNQJro6aw7CrELtlDq0MKzn6MIK2lLtOqbVVqRWWqEUqqWUemlrVy8pXaVABWunoGAUHUO8YdUolYkKqMTk+/vDQ36CTiF80bX7vM7JOfKQvD/PE/kAn+cGIYQQQgghFqOBghBCCCGEEGIxGigIIYQQQgghFqOBwgI5OTnw8PCAnZ0dwsLCUFVVxS17z549mDZtGhQKBQRBwI4dO7hlA0B6ejrGjBkDe3t7uLq6IiYmBvX19Vxr5ObmIigoyPzHicaNG4evv/6aa43uVq9eDUEQkJKSwi1z+fLlEAShy8PPz49bPgCcP38ec+bMgbOzMyQSCQIDA3Hw4EFu+R4eHvdsgyAI0Gg03GoYjUakpaXB09MTEokE3t7eWLlyJXjf7+H69etISUmBSqWCRCJBeHg4Dhw4YHHew3qNMYa//vWvkMvlkEgkUKvVaGho4Fpj27ZtiIiIgLOzMwRBgE6n45ZvMBiwZMkSBAYGQiqVQqFQ4LnnnsOFCxe4bsPy5cvh5+cHqVQKR0dHqNVqVFZWcq1xtwULFkAQBLzzzjtcayQkJNzTJ5GRkdy3o66uDtOnT4eDgwOkUinGjBmDs2fPcsm/X68LgoA1a9Zw24bW1lYkJSXB3d0dEokE/v7+yMvL63F+T2pcunQJCQkJUCgUGDBgACIjI3vde4SQR4sGil767LPPsHjxYixbtgw1NTUIDg7GlClT0NTUxCW/ra0NwcHByMnJ4ZLXXUVFBTQaDfbv34+SkhIYDAZERESgra2NWw13d3esXr0a1dXVOHjwIJ588klER0fj2LFj3Grc7cCBA3j//fcRFBTEPTsgIAB6vd782Lt3L7fsK1euYPz48bCxscHXX3+Nf/7zn1i7di0cHR251Thw4ECX9S8pKQEAzJw5k1uNjIwM5Obm4t1330VdXR0yMjLw5ptvIjs7m1sNAHj++edRUlKCjRs3ora2FhEREVCr1Th//rxFeQ/rtTfffBNZWVnIy8tDZWUlpFIppkyZgps3b3Kr0dbWhgkTJiAjI4P7NrS3t6OmpgZpaWmoqanBtm3bUF9fj+nTp3OrAQDDhw/Hu+++i9raWuzduxceHh6IiIjAjz/+yK1Gp+3bt2P//v1QKBS92oae1oiMjOzSL5988gnXGidPnsSECRPg5+eH8vJyHDlyBGlpabCzs+OSf/e66/V6fPjhhxAEAc888wy3bVi8eDF27dqFTZs2oa6uDikpKUhKSoJWq+VSgzGGmJgYnDp1Cjt37sShQ4egUqmgVqu5/pwihHDGSK+MHTuWaTQa88dGo5EpFAqWnp7OvRYAtn37du65d2tqamIAWEVFRb/WcXR0ZB988AH33OvXr7Nhw4axkpISNnHiRJacnMwte9myZSw4OJhbXndLlixhEyZM6Lf8+0lOTmbe3t7MZDJxy5w6dSpLTEzssmzGjBksLi6OW4329nYmEolYUVFRl+WjR49mr776ap/zu/eayWRiMpmMrVmzxryspaWF2drask8++YRLjbudPn2aAWCHDh2yKPth+Z2qqqoYANbY2NhvNa5evcoAsNLSUq41zp07x9zc3NjRo0eZSqVib7/9tkX5/6lGfHw8i46OtjizJzViY2PZnDlz+i2/u+joaPbkk09yrREQEMBWrFjRZVlf+rB7jfr6egaAHT161LzMaDQyFxcXtmHDBotqEEL6Hx2h6IWOjg5UV1dDrVabl1lZWUGtVmPfvn2Pcc0sd/XqVQCAk5NTv+QbjUZ8+umnaGtrw7hx47jnazQaTJ06tcv/CU8NDQ1QKBTw8vJCXFxcj09N6AmtVovQ0FDMnDkTrq6uGDVqFDZs2MAtv7uOjg5s2rQJiYmJEASBW254eDjKyspw/PhxAMDhw4exd+9eREVFcatx+/ZtGI3Ge/bkSiQSrkeNOp0+fRoXL17s8nXl4OCAsLCwn2yvA3f6XRAEDBo0qF/yOzo6sH79ejg4OCA4OJhbrslkwty5c5GamoqAgABuud2Vl5fD1dUVvr6+WLhwIZqbm7llm0wmfPXVVxg+fDimTJkCV1dXhIWFcT+ttdOlS5fw1VdfYd68eVxzw8PDodVqcf78eTDGsHv3bhw/fhwRERFc8m/dugUAXXrdysoKtra2/dLrhBA+aKDohcuXL8NoNGLIkCFdlg8ZMgQXL158TGtlOZPJhJSUFIwfPx4jR47kml1bW4uBAwfC1tYWCxYswPbt2+Hv78+1xqeffoqamhqkp6dzze0UFhaGjz76CLt27UJubi5Onz6NX//617h+/TqX/FOnTiE3NxfDhg1DcXExFi5ciBdeeAGFhYVc8rvbsWMHWlpakJCQwDV36dKlePbZZ+Hn5wcbGxuMGjUKKSkpiIuL41bD3t4e48aNw8qVK3HhwgUYjUZs2rQJ+/btg16v51anU2c//1x6HQBu3ryJJUuWYPbs2fjFL37BNbuoqAgDBw6EnZ0d3n77bZSUlGDw4MHc8jMyMmBtbY0XXniBW2Z3kZGR+Pjjj1FWVoaMjAxUVFQgKioKRqORS35TUxNaW1uxevVqREZG4ptvvsHTTz+NGTNmoKKigkuNuxUWFsLe3h4zZszgmpudnQ1/f3+4u7tDLBYjMjISOTk5eOKJJ7jk+/n5QalU4pVXXsGVK1fQ0dGBjIwMnDt3rl96nRDCh/XjXgHy+Gg0Ghw9erRf9vr4+vpCp9Ph6tWr+Nvf/ob4+HhUVFRwGyp++OEHJCcno6SkpMfnH/fW3XvYg4KCEBYWBpVKha1bt3LZ62cymRAaGopVq1YBAEaNGoWjR48iLy8P8fHxfc7vLj8/H1FRURadf/4gW7duxebNm7FlyxYEBARAp9MhJSUFCoWC63Zs3LgRiYmJcHNzg0gkwujRozF79mxUV1dzq/FzZTAYMGvWLDDGkJubyz1/0qRJ0Ol0uHz5MjZs2IBZs2ahsrISrq6ufc6urq7GunXrUFNTw/XIWnfPPvus+d+BgYEICgqCt7c3ysvLMXny5D7nm0wmAEB0dDQWLVoEAPjlL3+Jf/zjH8jLy8PEiRP7XONuH374IeLi4rh/f8zOzsb+/fuh1WqhUqmwZ88eaDQaKBQKLkeKbWxssG3bNsybNw9OTk4QiURQq9WIiorifqMHQgg/dISiFwYPHgyRSIRLly51WX7p0iXIZLLHtFaWSUpKQlFREXbv3g13d3fu+WKxGD4+PggJCUF6ejqCg4Oxbt06bvnV1dVoamrC6NGjYW1tDWtra1RUVCArKwvW1tbc9irebdCgQRg+fDhOnDjBJU8ul98zYI0YMYLraVWdGhsbUVpaiueff557dmpqqvkoRWBgIObOnYtFixZxP3Lk7e2NiooKtLa24ocffkBVVRUMBgO8vLy41gFg7uefQ693DhONjY0oKSnhfnQCAKRSKXx8fPCrX/0K+fn5sLa2Rn5+Ppfs7777Dk1NTVAqleZeb2xsxIsvvggPDw8uNe7Hy8sLgwcP5tbvgwcPhrW19SPp+e+++w719fXc+/3GjRv485//jMzMTEybNg1BQUFISkpCbGws3nrrLW51QkJCoNPp0NLSAr1ej127dqG5ublfep0QwgcNFL0gFosREhKCsrIy8zKTyYSysrJ+uT6gPzDGkJSUhO3bt+Pbb7+Fp6fnI6lrMpnM58byMHnyZNTW1kKn05kfoaGhiIuLg06ng0gk4larU2trK06ePAm5XM4lb/z48ffcsvf48eNQqVRc8u9WUFAAV1dXTJ06lXt2e3s7rKy6fisRiUTmPbK8SaVSyOVyXLlyBcXFxYiOjuZew9PTEzKZrEuvX7t2DZWVlT+ZXgf+f5hoaGhAaWkpnJ2dH0ldnv0+d+5cHDlypEuvKxQKpKamori4mEuN+zl37hyam5u59btYLMaYMWMeSc/n5+cjJCSE63UswJ2vJ4PB8Mj63cHBAS4uLmhoaMDBgwf7pdcJIXzQKU+9tHjxYsTHxyM0NBRjx47FO++8g7a2NvzhD3/gkt/a2tplj9jp06eh0+ng5OQEpVLZ53yNRoMtW7Zg586dsLe3N58P7uDgAIlE0ud8AHjllVcQFRUFpVKJ69evY8uWLSgvL+f6w9/e3v6e6z6kUimcnZ25XQ/y0ksvYdq0aVCpVLhw4QKWLVsGkUiE2bNnc8lftGgRwsPDsWrVKsyaNQtVVVVYv3491q9fzyW/k8lkQkFBAeLj42Ftzb/lp02bhjfeeANKpRIBAQE4dOgQMjMzkZiYyLVOcXExGGPw9fXFiRMnkJqaCj8/P4t772G9lpKSgtdffx3Dhg2Dp6cn0tLSoFAoEBMTw63Gv//9b5w9e9b8tyE6f9mUyWQ9OhLyoHy5XI7f/e53qKmpQVFREYxGo7nfnZycIBaL+7wNzs7OeOONNzB9+nTI5XJcvnwZOTk5OH/+fK9uTfyw96n7IGRjYwOZTAZfX18uNZycnPDaa6/hmWeegUwmw8mTJ/Hyyy/Dx8cHU6ZM4bYdqampiI2NxRNPPIFJkyZh165d+PLLL1FeXs4lH7gz+H7++edYu3Ztj9e7NzUmTpyI1NRUSCQSqFQqVFRU4OOPP0ZmZia3Gp9//jlcXFygVCpRW1uL5ORkxMTEcLvwmxDSDx7rPaZ+orKzs5lSqWRisZiNHTuW7d+/n1v27t27GYB7HvHx8Vzy75cNgBUUFHDJZ4yxxMREplKpmFgsZi4uLmzy5Mnsm2++4Zb/n/C+bWxsbCyTy+VMLBYzNzc3Fhsby06cOMEtnzHGvvzySzZy5Ehma2vL/Pz82Pr167nmM8ZYcXExA8Dq6+u5ZzPG2LVr11hycjJTKpXMzs6OeXl5sVdffZXdunWLa53PPvuMeXl5MbFYzGQyGdNoNKylpcXivIf1mslkYmlpaWzIkCHM1taWTZ48udfv4cNqFBQU3Pfzy5Yt63N+561o7/fYvXs3l224ceMGe/rpp5lCoWBisZjJ5XI2ffp0VlVVxfV96s6S28Y+qEZ7ezuLiIhgLi4uzMbGhqlUKjZ//nx28eJF7tuRn5/PfHx8mJ2dHQsODmY7duzgmv/+++8ziURicW88rIZer2cJCQlMoVAwOzs75uvry9auXdurW1E/rMa6deuYu7s7s7GxYUqlkv3lL3/h/v2EEMKXwBhd5UQIIYQQQgixDF1DQQghhBBCCLEYDRSEEEIIIYQQi9FAQQghhBBCCLEYDRSEEEIIIYQQi9FAQQghhBBCCLEYDRSEEEIIIYQQi9FAQQghhBBCCLEYDRSEkJ+kM2fOQBAE6HS6Bz7vN7/5DVJSUh7JOhFCCCH/i2igIIRwk5CQAEEQIAgCxGIxfHx8sGLFCty+fbvPuTExMV2WDR06FHq9HiNHjgQAlJeXQxAEtLS0dHnetm3bsHLlyj7Vf5juw03nx50Pe3t7BAQEQKPRoKGhoV/XhRBCCHnUaKAghHAVGRkJvV6PhoYGvPjii1i+fDnWrFljUZbRaITJZLrv50QiEWQyGaytrR+Y4eTkBHt7e4vq91VpaSn0ej0OHz6MVatWoa6uDsHBwSgrK3ss60MIIYT0BxooCCFc2draQiaTQaVSYeHChVCr1dBqtQCAzMxMBAYGQiqVYujQofjjH/+I1tZW82s/+ugjDBo0CFqtFv7+/rC1tUViYiIKCwuxc+dO8x7/8vLyLkcFzpw5g0mTJgEAHB0dIQgCEhISANx7ytOVK1fw3HPPwdHREQMGDEBUVFSXowad61BcXIwRI0Zg4MCB5iGpt5ydnSGTyeDl5YXo6GiUlpYiLCwM8+bNg9FotODdJYQQQv770EBBCOlXEokEHR0dAAArKytkZWXh2LFjKCwsxLfffouXX365y/Pb29uRkZGBDz74AMeOHUNWVhZmzZpl/qVer9cjPDy8y2uGDh2KL774AgBQX18PvV6PdevW3Xd9EhIScPDgQWi1Wuzbtw+MMfz2t7+FwWDosg5vvfUWNm7ciD179uDs2bN46aWX+vxeWFlZITk5GY2Njaiuru5zHiGEEPLf4MHnChBCiIUYYygrK0NxcTH+9Kc/AUCXIwUeHh54/fXXsWDBArz33nvm5QaDAe+99x6Cg4PNyyQSCW7dugWZTHbfWiKRCE5OTgAAV1dXDBo06L7Pa2hogFarxffff28eSjZv3oyhQ4dix44dmDlzpnkd8vLy4O3tDQBISkrCihUrLHsjuvHz8wNw5zqLsWPHcskkhBBCHicaKAghXBUVFWHgwIEwGAwwmUz4/e9/j+XLlwO4c01Beno6/vWvf+HatWu4ffs2bt68ifb2dgwYMAAAIBaLERQU1C/rVldXB2tra4SFhZmXOTs7w9fXF3V1deZlAwYMMA8TACCXy9HU1MRlHRhjAABBELjkEUIIIY8bnfJECOFq0qRJ0Ol0aGhowI0bN1BYWAipVIozZ87gqaeeQlBQEL744gtUV1cjJycHAMynRAF3jkY87l+2bWxsunwsCIJ5EOirzsHF09OTSx4hhBDyuNERCkIIV1KpFD4+Pvcsr66uhslkwtq1a2FldWdfxtatW3uUKRaLH3oRs1gsBoAHPm/EiBG4ffs2Kisrzac8NTc3o76+Hv7+/j1al74wmUzIysqCp6cnRo0a1e/1CCGEkEeBjlAQQh4JHx8fGAwGZGdn49SpU9i4cSPy8vJ69FoPDw8cOXIE9fX1uHz5cpcLqDupVCoIgoCioiL8+OOPXe4e1WnYsGGIjo7G/PnzsXfvXhw+fBhz5syBm5sboqOj+7yN3TU3N+PixYs4deoUtFot1Go1qqqqkJ+fD5FIxL0eIYQQ8jjQQEEIeSSCg4ORmZmJjIwMjBw5Eps3b0Z6enqPXjt//nz4+voiNDQULi4u+P777+95jpubG1577TUsXboUQ4YMQVJS0n2zCgoKEBISgqeeegrjxo0DYwx///vf7znNiQe1Wg25XI7AwEAsXboUI0aMwJEjR8y3uCWEEEJ+DgTG68RgQgghhBBCyP8cOkJBCCGEEEIIsRgNFIQQQgghhBCL0UBBCCGEEEIIsRgNFIQQQgghhBCL0UBBCCGEEEIIsRgNFIQQQgghhBCL0UBBCCGEEEIIsRgNFIQQQgghhBCL0UBBCCGEEEIIsRgNFIQQQgghhBCL0UBBCCGEEEIIsRgNFIQQQgghhBCL/R9Yw+Tn1pgx6AAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "from flwr_datasets import FederatedDataset\n", + "from flwr_datasets.partitioner import NaturalIdPartitioner\n", + "from flwr_datasets.visualization import plot_label_distributions\n", + "\n", + "\n", + "fds = FederatedDataset(\n", + " dataset=\"google/speech_commands\",\n", + " subset=\"v0.01\",\n", + " partitioners={\n", + " \"train\": NaturalIdPartitioner(\n", + " partition_by=\"speaker_id\",\n", + " ),\n", + " },\n", + ")\n", + "\n", + "partitioner = fds.partitioners[\"train\"]\n", + "\n", + "fix, ax, df = plot_label_distributions(\n", + " partitioner=partitioner,\n", + " label_name=\"label\",\n", + " max_num_partitions=20,\n", + " plot_type=\"bar\",\n", + " size_unit=\"percent\",\n", + " partition_id_axis=\"x\",\n", + " legend=True,\n", + " title=\"Per Partition Labels Distribution\",\n", + " verbose_labels=True,\n", + " legend_kwargs={\"ncols\": 2, \"bbox_to_anchor\": (1.25, 0.5)},\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "4442c99c", + "metadata": {}, + "source": [ + "## More resources\n", + "\n", + "If you are looking for more resources, feel free to check:\n", + "\n", + "* `flwr-dataset` documentation\n", + " * [plot_label_distributions](https://flower.ai/docs/datasets/ref-api/flwr_datasets.visualization.plot_label_distributions.html#flwr_datasets.visualization.plot_label_distributions)\n", + " * [plot_comparison_label_distribution](https://flower.ai/docs/datasets/ref-api/flwr_datasets.visualization.plot_comparison_label_distribution.html#flwr_datasets.visualization.plot_comparison_label_distribution)\n", + "* if you want to do any custom modification of the returned plots\n", + " * [matplotlib](https://matplotlib.org/)\n", + " * [seaborn](https://seaborn.pydata.org/)\n", + " * or plot directly using pandas object [pd.DataFrame.plot](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.plot.html)\n", + "\n", + "\n", + "This was the last tutorial. \n", + "\n", + "Previous tutorials:\n", + "\n", + "* [Quickstart Basics](https://flower.ai/docs/datasets/tutorial-quickstart.html)\n", + "\n", + "* [Use Partitioners](https://flower.ai/docs/datasets/tutorial-use-partitioners.html)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "flwr", + "language": "python", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/datasets/flwr_datasets/__init__.py b/datasets/flwr_datasets/__init__.py index 2d6ecb414498..bd68fa43c606 100644 --- a/datasets/flwr_datasets/__init__.py +++ b/datasets/flwr_datasets/__init__.py @@ -17,14 +17,18 @@ from flwr_datasets import partitioner, preprocessor from flwr_datasets import utils as utils +from flwr_datasets import visualization from flwr_datasets.common.version import package_version as _package_version from flwr_datasets.federated_dataset import FederatedDataset __all__ = [ "FederatedDataset", + "metrics", "partitioner", "preprocessor", "utils", + "visualization", ] + __version__ = _package_version diff --git a/datasets/flwr_datasets/common/__init__.py b/datasets/flwr_datasets/common/__init__.py index b4f12f8641b3..efb4eaf55b70 100644 --- a/datasets/flwr_datasets/common/__init__.py +++ b/datasets/flwr_datasets/common/__init__.py @@ -13,3 +13,12 @@ # limitations under the License. # ============================================================================== """Common components in Flower Datasets.""" + + +from .telemetry import EventType as EventType +from .telemetry import event as event + +__all__ = [ + "EventType", + "event", +] diff --git a/datasets/flwr_datasets/common/telemetry.py b/datasets/flwr_datasets/common/telemetry.py new file mode 100644 index 000000000000..4bf80b93467d --- /dev/null +++ b/datasets/flwr_datasets/common/telemetry.py @@ -0,0 +1,224 @@ +# Copyright 2023 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flower telemetry.""" + + +import datetime +import json +import logging +import os +import platform +import urllib.request +import uuid +from concurrent.futures import Future, ThreadPoolExecutor +from enum import Enum, auto +from pathlib import Path +from typing import Any, Optional, Union, cast + +from flwr_datasets.common.version import package_name, package_version + +FLWR_TELEMETRY_ENABLED = os.getenv("FLWR_TELEMETRY_ENABLED", "1") +FLWR_TELEMETRY_LOGGING = os.getenv("FLWR_TELEMETRY_LOGGING", "0") + +TELEMETRY_EVENTS_URL = "https://telemetry.flower.ai/api/v1/event" + +LOGGER_NAME = "flwr-datasets-telemetry" +LOGGER_LEVEL = logging.DEBUG + + +def _configure_logger(log_level: int) -> None: + console_handler = logging.StreamHandler() + console_handler.setLevel(log_level) + console_handler.setFormatter( + logging.Formatter( + "%(levelname)s %(name)s %(asctime)s | %(filename)s:%(lineno)d | %(message)s" + ) + ) + + logger = logging.getLogger(LOGGER_NAME) + logger.setLevel(log_level) + logger.addHandler(console_handler) + + +_configure_logger(LOGGER_LEVEL) + + +def log(msg: Union[str, Exception]) -> None: + """Log message using logger at DEBUG level.""" + logging.getLogger(LOGGER_NAME).log(LOGGER_LEVEL, msg) + + +def _get_home() -> Path: + return Path().home() + + +def _get_source_id() -> str: + """Get existing or new source ID.""" + source_id = "unavailable" + # Check if .flwr in home exists + try: + home = _get_home() + except RuntimeError: + # If the home directory can’t be resolved, RuntimeError is raised. + return source_id + + flwr_dir = home.joinpath(".flwr") + # Create .flwr directory if it does not exist yet. + try: + flwr_dir.mkdir(parents=True, exist_ok=True) + except PermissionError: + return source_id + + source_file = flwr_dir.joinpath("source") + + # If no source_file exists create one and write it + if not source_file.exists(): + try: + source_file.touch(exist_ok=True) + source_file.write_text(str(uuid.uuid4()), encoding="utf-8") + except PermissionError: + return source_id + + source_id = source_file.read_text(encoding="utf-8").strip() + + try: + uuid.UUID(source_id) + except ValueError: + source_id = "invalid" + + return source_id + + +# Using str as first base type to make it JSON serializable as +# otherwise the following exception will be thrown when serializing +# the event dict: +# TypeError: Object of type EventType is not JSON serializable +class EventType(str, Enum): + """Types of telemetry events.""" + + # This method combined with auto() will set the property value to + # the property name e.g. + # `START_CLIENT = auto()` becomes `START_CLIENT = "START_CLIENT"` + # The type signature is not compatible with mypy, pylint and flake8 + # so each of those needs to be disabled for this line. + # pylint: disable-next=no-self-argument,arguments-differ,line-too-long + def _generate_next_value_(name: str, start: int, count: int, last_values: list[Any]) -> Any: # type: ignore # noqa: E501 + return name + + PING = auto() + + LOAD_PARTITION_CALLED = auto() + LOAD_SPLIT_CALLED = auto() + PLOT_LABEL_DISTRIBUTION_CALLED = auto() + PLOT_COMPARISON_LABEL_DISTRIBUTION_CALLED = auto() + + +# Use the ThreadPoolExecutor with max_workers=1 to have a queue +# and also ensure that telemetry calls are not blocking. +state: dict[str, Union[Optional[str], Optional[ThreadPoolExecutor]]] = { + # Will be assigned ThreadPoolExecutor(max_workers=1) + # in event() the first time it's required + "executor": None, + "source": None, + "cluster": None, +} + + +# In Python 3.7 pylint will throw an error stating that +# "Value 'Future' is unsubscriptable". +# This pylint disable line can be remove when dropping support +# for Python 3.7 +# pylint: disable-next=unsubscriptable-object +def event( + event_type: EventType, + event_details: Optional[dict[str, Any]] = None, +) -> Future: # type: ignore + """Submit create_event to ThreadPoolExecutor to avoid blocking.""" + if state["executor"] is None: + state["executor"] = ThreadPoolExecutor(max_workers=1) + + executor: ThreadPoolExecutor = cast(ThreadPoolExecutor, state["executor"]) + + result = executor.submit(create_event, event_type, event_details) + return result + + +def create_event(event_type: EventType, event_details: Optional[dict[str, Any]]) -> str: + """Create telemetry event.""" + if state["source"] is None: + state["source"] = _get_source_id() + + if state["cluster"] is None: + state["cluster"] = str(uuid.uuid4()) + + if event_details is None: + event_details = {} + + date = datetime.datetime.now(tz=datetime.timezone.utc).isoformat() + context = { + "source": state["source"], + "cluster": state["cluster"], + "date": date, + "package": { + "package_name": package_name, + "package_version": package_version, + }, + "hw": { + "cpu_count": os.cpu_count(), + }, + "platform": { + "system": platform.system(), + "release": platform.release(), + "platform": platform.platform(), + "python_implementation": platform.python_implementation(), + "python_version": platform.python_version(), + "machine": platform.machine(), + "architecture": platform.architecture(), + "version": platform.uname().version, + }, + } + payload = { + "event_type": event_type, + "event_details": event_details, + "context": context, + } + payload_json = json.dumps(payload) + if FLWR_TELEMETRY_LOGGING == "1": + log(" - ".join([date, "POST", payload_json])) + + # If telemetry is not disabled with setting FLWR_TELEMETRY_ENABLED=0 + # create a request and send it to the telemetry backend + if FLWR_TELEMETRY_ENABLED == "1": + request = urllib.request.Request( + url=TELEMETRY_EVENTS_URL, + data=payload_json.encode("utf-8"), + headers={ + "User-Agent": f"{package_name}/{package_version}", + "Content-Type": "application/json", + }, + method="POST", + ) + try: + with urllib.request.urlopen(request, timeout=60) as response: + result = response.read() + + response_json: str = result.decode("utf-8") + + return response_json + except urllib.error.URLError as ex: + if FLWR_TELEMETRY_LOGGING == "1": + log(ex) + + return "disabled" diff --git a/datasets/flwr_datasets/common/telemetry_test.py b/datasets/flwr_datasets/common/telemetry_test.py new file mode 100644 index 000000000000..f46b7b9a2ddf --- /dev/null +++ b/datasets/flwr_datasets/common/telemetry_test.py @@ -0,0 +1,115 @@ +# Copyright 2023 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Telemetry tests.""" + + +import time +import unittest +from typing import Callable +from unittest import mock + +from flwr_datasets.common.telemetry import EventType, _get_source_id, event + + +class TelemetryTest(unittest.TestCase): + """Tests for the telemetry module.""" + + @mock.patch("flwr_datasets.common.telemetry.FLWR_TELEMETRY_ENABLED", "1") + def test_event(self) -> None: + """Test if sending works against the actual API.""" + # Prepare + expected = '{\n "status": "created"\n}' + + # Execute + future = event(EventType.PING) + actual = future.result() + + # Assert + self.assertEqual(actual, expected) + + @mock.patch("flwr_datasets.common.telemetry.FLWR_TELEMETRY_ENABLED", "1") + def test_not_blocking(self) -> None: + """Test if the code is blocking. + + If the code does not block duration_actual should be less than + 0.001s. + """ + # Prepare + # Use 0.1ms as any blocking networked call would take longer. + duration_max = 0.001 + start = time.time() + + # Execute + event(EventType.PING) + duration_actual = time.time() - start + + # Assert + self.assertLess(duration_actual, duration_max) + + @mock.patch("flwr_datasets.common.telemetry.FLWR_TELEMETRY_ENABLED", "0") + def test_telemetry_disabled(self) -> None: + """Test opt-out.""" + # Prepare + expected = "disabled" + + # Execute + future = event(EventType.PING) + actual = future.result() + + # Assert + self.assertEqual(actual, expected) + + def test_get_source_id(self) -> None: + """Test if _get_source_id returns an ID successfully. + + This test might fail if the UNIX user invoking the test has no home directory. + """ + # Prepare + # nothing to prepare + + # Execute + source_id = _get_source_id() + + # Assert + # source_id should be len 36 as it's a uuid4 in the current + # implementation + self.assertIsNotNone(source_id) + self.assertEqual(len(source_id), 36) + + def test_get_source_id_no_home(self) -> None: + """Test if _get_source_id returns unavailable without a home dir.""" + + # Prepare + def new_callable() -> Callable[[], None]: + def _new_failing_get_home() -> None: + raise RuntimeError + + return _new_failing_get_home + + except_value = "unavailable" + + # Execute + with mock.patch( + "flwr_datasets.common.telemetry._get_home", + new_callable=new_callable, + ): + source_id = _get_source_id() + + # Assert + self.assertEqual(source_id, except_value) + + +if __name__ == "__main__": + unittest.main() diff --git a/datasets/flwr_datasets/common/typing.py b/datasets/flwr_datasets/common/typing.py index ffaefaeec313..d6d37b468494 100644 --- a/datasets/flwr_datasets/common/typing.py +++ b/datasets/flwr_datasets/common/typing.py @@ -15,7 +15,7 @@ """Flower Datasets type definitions.""" -from typing import Any, List +from typing import Any import numpy as np import numpy.typing as npt @@ -23,4 +23,4 @@ NDArray = npt.NDArray[Any] NDArrayInt = npt.NDArray[np.int_] NDArrayFloat = npt.NDArray[np.float_] -NDArrays = List[NDArray] +NDArrays = list[NDArray] diff --git a/datasets/flwr_datasets/common/version.py b/datasets/flwr_datasets/common/version.py index 48c3fc5aaa9c..3e4c9a31fd6c 100644 --- a/datasets/flwr_datasets/common/version.py +++ b/datasets/flwr_datasets/common/version.py @@ -19,15 +19,14 @@ import importlib.metadata as importlib_metadata -from typing import Tuple -def _check_package(name: str) -> Tuple[str, str]: +def _check_package(name: str) -> tuple[str, str]: version: str = importlib_metadata.version(name) return name, version -def _version() -> Tuple[str, str]: +def _version() -> tuple[str, str]: """Read and return Flower Dataset package name and version. Returns diff --git a/datasets/flwr_datasets/federated_dataset.py b/datasets/flwr_datasets/federated_dataset.py index 5d98d01d4941..72ea54773564 100644 --- a/datasets/flwr_datasets/federated_dataset.py +++ b/datasets/flwr_datasets/federated_dataset.py @@ -15,10 +15,11 @@ """FederatedDataset.""" -from typing import Dict, Optional, Tuple, Union +from typing import Any, Dict, Optional, Tuple, Union import datasets from datasets import Dataset, DatasetDict +from flwr_datasets.common import EventType, event from flwr_datasets.partitioner import Partitioner from flwr_datasets.preprocessor import Preprocessor from flwr_datasets.utils import ( @@ -35,8 +36,9 @@ class FederatedDataset: Download, partition data among clients (edge devices), or load full dataset. - Partitions are created using IidPartitioner. Support for different partitioners - specification and types will come in future releases. + Partitions are created per-split-basis using Partitioners from + `flwr_datasets.partitioner` specified in `partitioners` (see `partitioners` + parameter for more information). Parameters ---------- @@ -52,30 +54,60 @@ class FederatedDataset: no operation is applied. partitioners : Dict[str, Union[Partitioner, int]] A dictionary mapping the Dataset split (a `str`) to a `Partitioner` or an `int` - (representing the number of IID partitions that this split should be partitioned - into). One or multiple `Partitioner` objects can be specified in that manner, - but at most, one per split. + (representing the number of IID partitions that this split should be + partitioned into, i.e., using the default partitioner + `IidPartitioner `_). One or multiple `Partitioner` + objects can be specified in that manner, but at most, one per split. shuffle : bool - Whether to randomize the order of samples. Applied prior to resplitting, - speratelly to each of the present splits in the dataset. It uses the `seed` - argument. Defaults to True. + Whether to randomize the order of samples. Applied prior to preprocessing + operations, speratelly to each of the present splits in the dataset. It uses + the `seed` argument. Defaults to True. seed : Optional[int] Seed used for dataset shuffling. It has no effect if `shuffle` is False. The - seed cannot be set in the later stages. If `None`, then fresh, unpredictable entropy - will be pulled from the OS. Defaults to 42. + seed cannot be set in the later stages. If `None`, then fresh, unpredictable + entropy will be pulled from the OS. Defaults to 42. + load_dataset_kwargs : Any + Additional keyword arguments passed to `datasets.load_dataset` function. + Currently used paramters used are dataset => path (in load_dataset), + subset => name (in load_dataset). You can pass e.g., `num_proc=4`, + `trust_remote_code=True`. Do not pass any parameters that modify the + return type such as another type than DatasetDict is returned. Examples -------- Use MNIST dataset for Federated Learning with 100 clients (edge devices): - >>> mnist_fds = FederatedDataset(dataset="mnist", partitioners={"train": 100}) - >>> # Load partition for client with ID 10. - >>> partition = mnist_fds.load_partition(10, "train") + >>> from flwr_datasets import FederatedDataset + >>> + >>> fds = FederatedDataset(dataset="mnist", partitioners={"train": 100}) + >>> # Load partition for a client with ID 10. + >>> partition = fds.load_partition(10) >>> # Use test split for centralized evaluation. - >>> centralized = mnist_fds.load_split("test") + >>> centralized = fds.load_split("test") + + Use CIFAR10 dataset for Federated Laerning with 100 clients: + + >>> from flwr_datasets import FederatedDataset + >>> from flwr_datasets.partitioner import DirichletPartitioner + >>> + >>> partitioner = DirichletPartitioner(num_partitions=10, partition_by="label", + >>> alpha=0.5, min_partition_size=10) + >>> fds = FederatedDataset(dataset="cifar10", partitioners={"train": partitioner}) + >>> partition = fds.load_partition(partition_id=0) + + Visualize the partitioned datasets: + + >>> from flwr_datasets.visualization import plot_label_distributions + >>> + >>> _ = plot_label_distributions( + >>> partitioner=fds.partitioners["train"], + >>> label_name="label", + >>> legend=True, + >>> ) """ - # pylint: disable=too-many-instance-attributes + # pylint: disable=too-many-instance-attributes, too-many-arguments def __init__( self, *, @@ -85,6 +117,7 @@ def __init__( partitioners: Dict[str, Union[Partitioner, int]], shuffle: bool = True, seed: Optional[int] = 42, + **load_dataset_kwargs: Any, ) -> None: _check_if_dataset_tested(dataset) self._dataset_name: str = dataset @@ -102,6 +135,10 @@ def __init__( self._dataset: Optional[DatasetDict] = None # Indicate if the dataset is prepared for `load_partition` or `load_split` self._dataset_prepared: bool = False + self._event = { + "load_partition": {split: False for split in self._partitioners}, + } + self._load_dataset_kwargs = load_dataset_kwargs def load_partition( self, @@ -124,6 +161,11 @@ def load_partition( not need to provide this argument, but if `partitioners={"train": 10, "test": 100}`, you need to set it to differentiate which partitioner should be used. + The split names you can choose from vary from dataset to dataset. You need + to check the dataset on the `Hugging Face Hub`_ to see which splits are available. You can resplit the dataset + by using the `preprocessor` parameter (to rename, merge, divide, etc. the + available splits). Returns ------- @@ -141,7 +183,20 @@ def load_partition( self._check_if_split_possible_to_federate(split) partitioner: Partitioner = self._partitioners[split] self._assign_dataset_to_partitioner(split) - return partitioner.load_partition(partition_id) + partition = partitioner.load_partition(partition_id) + if not self._event["load_partition"][split]: + event( + EventType.LOAD_PARTITION_CALLED, + { + "federated_dataset_id": id(self), + "dataset_name": self._dataset_name, + "split": split, + "partitioner": partitioner.__class__.__name__, + "num_partitions": partitioner.num_partitions, + }, + ) + self._event["load_partition"][split] = True + return partition def load_split(self, split: str) -> Dataset: """Load the full split of the dataset. @@ -153,6 +208,11 @@ def load_split(self, split: str) -> Dataset: ---------- split : str Split name of the downloaded dataset (e.g. "train", "test"). + The split names you can choose from vary from dataset to dataset. You need + to check the dataset on the `Hugging Face Hub`_ to see which splits are available. You can resplit the dataset + by using the `preprocessor` parameter (to rename, merge, divide, etc. the + available splits). Returns ------- @@ -164,7 +224,20 @@ def load_split(self, split: str) -> Dataset: if self._dataset is None: raise ValueError("Dataset is not loaded yet.") self._check_if_split_present(split) - return self._dataset[split] + dataset_split = self._dataset[split] + + if not self._event["load_split"][split]: + event( + EventType.LOAD_SPLIT_CALLED, + { + "federated_dataset_id": id(self), + "dataset_name": self._dataset_name, + "split": split, + }, + ) + self._event["load_split"][split] = True + + return dataset_split @property def partitioners(self) -> Dict[str, Partitioner]: @@ -238,14 +311,23 @@ def _prepare_dataset(self) -> None: happen before the resplitting. """ self._dataset = datasets.load_dataset( - path=self._dataset_name, name=self._subset + path=self._dataset_name, name=self._subset, **self._load_dataset_kwargs ) + if not isinstance(self._dataset, datasets.DatasetDict): + raise ValueError( + "Probably one of the specified parameter in `load_dataset_kwargs` " + "change the return type of the datasets.load_dataset function. " + "Make sure to use parameter such that the return type is DatasetDict. " + f"The return type is currently: {type(self._dataset)}." + ) if self._shuffle: # Note it shuffles all the splits. The self._dataset is DatasetDict # so e.g. {"train": train_data, "test": test_data}. All splits get shuffled. self._dataset = self._dataset.shuffle(seed=self._seed) if self._preprocessor: self._dataset = self._preprocessor(self._dataset) + available_splits = list(self._dataset.keys()) + self._event["load_split"] = {split: False for split in available_splits} self._dataset_prepared = True def _check_if_no_split_keyword_possible(self) -> None: diff --git a/datasets/flwr_datasets/federated_dataset_test.py b/datasets/flwr_datasets/federated_dataset_test.py index f65aa6346f3a..bbdfa42292c2 100644 --- a/datasets/flwr_datasets/federated_dataset_test.py +++ b/datasets/flwr_datasets/federated_dataset_test.py @@ -17,7 +17,7 @@ import unittest -from typing import Dict, Union +from typing import Union from unittest.mock import Mock, patch import numpy as np @@ -27,26 +27,51 @@ import datasets from datasets import Dataset, DatasetDict, concatenate_datasets from flwr_datasets.federated_dataset import FederatedDataset -from flwr_datasets.mock_utils_test import _load_mocked_dataset -from flwr_datasets.partitioner import IidPartitioner, Partitioner +from flwr_datasets.mock_utils_test import ( + _load_mocked_dataset, + _load_mocked_dataset_dict_by_partial_download, +) +from flwr_datasets.partitioner import IidPartitioner, NaturalIdPartitioner, Partitioner mocked_datasets = ["cifar100", "svhn", "sentiment140", "speech_commands"] +mocked_by_partial_download_datasets = [ + "flwrlabs/pacs", + "flwrlabs/cinic10", + "flwrlabs/caltech101", + "flwrlabs/office-home", + "flwrlabs/fed-isic2019", +] + +natural_id_datasets = [ + "flwrlabs/femnist", +] + +mocked_natural_id_datasets = [ + "flwrlabs/ucf101", + "flwrlabs/ambient-acoustic-context", + "LIUM/tedlium", +] + @parameterized_class( ("dataset_name", "test_split", "subset"), [ # Downloaded - # #Image datasets + # Image ("mnist", "test", ""), ("cifar10", "test", ""), ("fashion_mnist", "test", ""), ("sasha/dog-food", "test", ""), ("zh-plus/tiny-imagenet", "valid", ""), - # Text + ("Mike0307/MNIST-M", "test", ""), + ("flwrlabs/usps", "test", ""), + # Tabular ("scikit-learn/adult-census-income", None, ""), - # Mocked - # #Image + ("jlh/uci-mushrooms", None, ""), + ("scikit-learn/iris", None, ""), + # Mocked by local recreation + # Image ("cifar100", "test", ""), # Note: there's also the extra split and full_numbers subset ("svhn", "test", "cropped_digits"), @@ -54,6 +79,13 @@ ("sentiment140", "test", ""), # aka twitter # Audio ("speech_commands", "test", "v0.01"), + # Mocked by partial download + # Image + ("flwrlabs/pacs", None, ""), + ("flwrlabs/cinic10", "test", ""), + ("flwrlabs/caltech101", None, ""), + ("flwrlabs/office-home", None, ""), + ("flwrlabs/fed-isic2019", "test", ""), ], ) class BaseFederatedDatasetsTest(unittest.TestCase): @@ -79,10 +111,29 @@ def setUp(self) -> None: self.mock_load_dataset.return_value = _load_mocked_dataset( self.dataset_name, [200, 100], ["train", self.test_split], self.subset ) + elif self.dataset_name in mocked_by_partial_download_datasets: + split_names = ["train"] + skip_take_lists = [[(0, 30), (1000, 30), (2000, 40)]] + # If the dataset has split test update the mocking to include it + if self.test_split is not None: + split_names.append(self.test_split) + skip_take_lists.append([(0, 30), (100, 30), (200, 40)]) + mock_return_value = _load_mocked_dataset_dict_by_partial_download( + dataset_name=self.dataset_name, + split_names=split_names, + skip_take_lists=skip_take_lists, + subset_name=None if self.subset == "" else self.subset, + ) + self.patcher = patch("datasets.load_dataset") + self.mock_load_dataset = self.patcher.start() + self.mock_load_dataset.return_value = mock_return_value def tearDown(self) -> None: """Clean up after the dataset mocking.""" - if self.dataset_name in mocked_datasets: + if ( + self.dataset_name in mocked_datasets + or self.dataset_name in mocked_by_partial_download_datasets + ): patch.stopall() @parameterized.expand( # type: ignore @@ -144,10 +195,10 @@ def test_multiple_partitioners(self) -> None: dataset_test_partition0 = dataset_fds.load_partition(0, self.test_split) dataset = datasets.load_dataset(self.dataset_name) - self.assertEqual( - len(dataset_test_partition0), - len(dataset[self.test_split]) // num_test_partitions, - ) + expected_len = len(dataset[self.test_split]) // num_test_partitions + mod = len(dataset[self.test_split]) % num_test_partitions + expected_len += 1 if 0 < mod else 0 + self.assertEqual(len(dataset_test_partition0), expected_len) def test_no_need_for_split_keyword_if_one_partitioner(self) -> None: """Test if partitions got with and without split args are the same.""" @@ -166,7 +217,7 @@ def test_resplit_dataset_into_one(self) -> None: if self.test_split is None: return dataset = datasets.load_dataset(self.dataset_name) - dataset_length = sum([len(ds) for ds in dataset.values()]) + dataset_length = sum(len(ds) for ds in dataset.values()) fds = FederatedDataset( dataset=self.dataset_name, partitioners={"train": 100}, @@ -213,9 +264,26 @@ def resplit(dataset: DatasetDict) -> DatasetDict: ) full = fds.load_split("full") dataset = datasets.load_dataset(self.dataset_name) - dataset_length = sum([len(ds) for ds in dataset.values()]) + dataset_length = sum(len(ds) for ds in dataset.values()) self.assertEqual(len(full), dataset_length) + def test_use_load_dataset_kwargs(self) -> None: + """Test if the FederatedDataset works correctly with load_dataset_kwargs.""" + try: + fds = FederatedDataset( + dataset=self.dataset_name, + shuffle=False, + partitioners={"train": 10}, + num_proc=2, + ) + _ = fds.load_partition(0) + # Try to catch as broad as possible + except Exception as e: # pylint: disable=broad-except + self.fail( + f"Error when using load_dataset_kwargs: {e}. " + f"This code should not raise any exceptions." + ) + class ShufflingResplittingOnArtificialDatasetTest(unittest.TestCase): """Test shuffling and resplitting using small artificial dataset. @@ -225,7 +293,6 @@ class ShufflingResplittingOnArtificialDatasetTest(unittest.TestCase): The load_dataset method is mocked and the artificial dataset is returned. """ - # pylint: disable=no-self-use def _dummy_setup(self, train_rows: int = 10, test_rows: int = 5) -> DatasetDict: """Create a dummy DatasetDict with train, test splits.""" data_train = { @@ -318,7 +385,7 @@ def test_dict_of_partitioners_passes_partitioners(self) -> None: """Test if partitioners are passed directly (no recreation).""" num_train_partitions = 100 num_test_partitions = 100 - partitioners: Dict[str, Union[Partitioner, int]] = { + partitioners: dict[str, Union[Partitioner, int]] = { "train": IidPartitioner(num_partitions=num_train_partitions), "test": IidPartitioner(num_partitions=num_test_partitions), } @@ -352,7 +419,7 @@ def test_mixed_type_partitioners_passes_instantiated_partitioners(self) -> None: """Test if an instantiated partitioner is passed directly.""" num_train_partitions = 100 num_test_partitions = 100 - partitioners: Dict[str, Union[Partitioner, int]] = { + partitioners: dict[str, Union[Partitioner, int]] = { "train": IidPartitioner(num_partitions=num_train_partitions), "test": num_test_partitions, } @@ -366,7 +433,7 @@ def test_mixed_type_partitioners_creates_from_int(self) -> None: """Test if an IidPartitioner partitioner is created.""" num_train_partitions = 100 num_test_partitions = 100 - partitioners: Dict[str, Union[Partitioner, int]] = { + partitioners: dict[str, Union[Partitioner, int]] = { "train": IidPartitioner(num_partitions=num_train_partitions), "test": num_test_partitions, } @@ -380,17 +447,86 @@ def test_mixed_type_partitioners_creates_from_int(self) -> None: ) +@parameterized_class( + ("dataset_name", "test_split", "subset", "partition_by"), + [ + ("flwrlabs/femnist", "", "", "writer_id"), + ("flwrlabs/ucf101", "test", None, "video_id"), + ("flwrlabs/ambient-acoustic-context", "", None, "speaker_id"), + ("LIUM/tedlium", "test", "release3", "speaker_id"), + ], +) +class NaturalIdPartitionerIntegrationTest(unittest.TestCase): + """General FederatedDataset tests with NaturalIdPartitioner.""" + + dataset_name = "" + test_split = "" + subset = "" + partition_by = "" + + def setUp(self) -> None: + """Mock the dataset download prior to each method if needed. + + If the `dataset_name` is in the `mocked_datasets` list, then the dataset + download is mocked. + """ + if self.dataset_name in mocked_natural_id_datasets: + mock_return_value = _load_mocked_dataset_dict_by_partial_download( + dataset_name=self.dataset_name, + split_names=["train"], + skip_take_lists=[[(0, 30), (1000, 30), (2000, 40)]], + subset_name=self.subset, + ) + self.patcher = patch("datasets.load_dataset") + self.mock_load_dataset = self.patcher.start() + self.mock_load_dataset.return_value = mock_return_value + + def tearDown(self) -> None: + """Clean up after the dataset mocking.""" + if self.dataset_name in mocked_natural_id_datasets: + patch.stopall() + + def test_if_the_partitions_have_unique_values(self) -> None: + """Test if each partition has a single unique id value.""" + fds = FederatedDataset( + dataset=self.dataset_name, + partitioners={ + "train": NaturalIdPartitioner(partition_by=self.partition_by) + }, + ) + for partition_id in range(fds.partitioners["train"].num_partitions): + partition = fds.load_partition(partition_id) + unique_ids_in_partition = list(set(partition[self.partition_by])) + self.assertEqual(len(unique_ids_in_partition), 1) + + def tests_if_the_columns_are_unchanged(self) -> None: + """Test if the columns are unchanged after partitioning.""" + fds = FederatedDataset( + dataset=self.dataset_name, + partitioners={ + "train": NaturalIdPartitioner(partition_by=self.partition_by) + }, + ) + dataset = fds.load_split("train") + columns_in_dataset = set(dataset.column_names) + + for partition_id in range(fds.partitioners["train"].num_partitions): + partition = fds.load_partition(partition_id) + columns_in_partition = set(partition.column_names) + self.assertEqual(columns_in_partition, columns_in_dataset) + + class IncorrectUsageFederatedDatasets(unittest.TestCase): """Test incorrect usages in FederatedDatasets.""" - def test_no_partitioner_for_split(self) -> None: # pylint: disable=R0201 + def test_no_partitioner_for_split(self) -> None: """Test using load_partition with missing partitioner.""" dataset_fds = FederatedDataset(dataset="mnist", partitioners={"train": 100}) with pytest.raises(ValueError): dataset_fds.load_partition(0, "test") - def test_no_split_in_the_dataset(self) -> None: # pylint: disable=R0201 + def test_no_split_in_the_dataset(self) -> None: """Test using load_partition with non-existent split name.""" dataset_fds = FederatedDataset( dataset="mnist", partitioners={"non-existent-split": 100} @@ -399,15 +535,14 @@ def test_no_split_in_the_dataset(self) -> None: # pylint: disable=R0201 with pytest.raises(ValueError): dataset_fds.load_partition(0, "non-existent-split") - def test_unsupported_dataset(self) -> None: # pylint: disable=R0201 + def test_unsupported_dataset(self) -> None: """Test creating FederatedDataset for unsupported dataset.""" with pytest.warns(UserWarning): FederatedDataset(dataset="food101", partitioners={"train": 100}) def test_cannot_use_the_old_split_names(self) -> None: """Test if the initial split names can not be used.""" - dataset = datasets.load_dataset("mnist") - sum([len(ds) for ds in dataset.values()]) + datasets.load_dataset("mnist") fds = FederatedDataset( dataset="mnist", partitioners={"train": 100}, @@ -416,6 +551,23 @@ def test_cannot_use_the_old_split_names(self) -> None: with self.assertRaises(ValueError): fds.load_partition(0, "train") + def test_use_load_dataset_kwargs(self) -> None: + """Test if the FederatedDataset raises with incorrect load_dataset_kwargs. + + The FederatedDataset should throw an error when the load_dataset_kwargs make the + return type different from a DatasetDict. + + Use split which makes the load_dataset return a Dataset. + """ + fds = FederatedDataset( + dataset="mnist", + shuffle=False, + partitioners={"train": 10}, + split="train", + ) + with self.assertRaises(ValueError): + _ = fds.load_partition(0) + def datasets_are_equal(ds1: Dataset, ds2: Dataset) -> bool: """Check if two Datasets have the same values.""" diff --git a/datasets/flwr_datasets/metrics/__init__.py b/datasets/flwr_datasets/metrics/__init__.py new file mode 100644 index 000000000000..e82cb3088822 --- /dev/null +++ b/datasets/flwr_datasets/metrics/__init__.py @@ -0,0 +1,23 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Metrics package.""" + + +from flwr_datasets.metrics.utils import compute_counts, compute_frequencies + +__all__ = [ + "compute_counts", + "compute_frequencies", +] diff --git a/datasets/flwr_datasets/metrics/utils.py b/datasets/flwr_datasets/metrics/utils.py new file mode 100644 index 000000000000..14e1f8d68110 --- /dev/null +++ b/datasets/flwr_datasets/metrics/utils.py @@ -0,0 +1,263 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utils for metrics computation.""" + + +import warnings +from typing import Optional, Union + +import pandas as pd + +from flwr_datasets.partitioner import Partitioner + + +def compute_counts( + partitioner: Partitioner, + column_name: str, + verbose_names: bool = False, + max_num_partitions: Optional[int] = None, +) -> pd.DataFrame: + """Compute the counts of unique values in a given column in the partitions. + + Take into account all possible labels in dataset when computing count for each + partition (assign 0 as the size when there are no values for a label in the + partition). + + Parameters + ---------- + partitioner : Partitioner + Partitioner with an assigned dataset. + column_name : str + Column name identifying label based on which the count will be calculated. + verbose_names : bool + Whether to use verbose versions of the values in the column specified by + `column_name`. The verbose values are possible to extract if the column is a + feature of type `ClassLabel`. + max_num_partitions : Optional[int] + The maximum number of partitions that will be used. If greater than the + total number of partitions in a partitioner, it won't have an effect. If left + as None, then all partitions will be used. + + Returns + ------- + dataframe: pd.DataFrame + DataFrame where the row index represent the partition id and the column index + represent the unique values found in column specified by `column_name` + (e.g. represeting the labels). The value of the dataframe.loc[i, j] represents + the count of the label j, in the partition of index i. + + Examples + -------- + Generate DataFrame with label counts resulting from DirichletPartitioner on cifar10 + + >>> from flwr_datasets import FederatedDataset + >>> from flwr_datasets.partitioner import DirichletPartitioner + >>> from flwr_datasets.metrics import compute_counts + >>> + >>> fds = FederatedDataset( + >>> dataset="cifar10", + >>> partitioners={ + >>> "train": DirichletPartitioner( + >>> num_partitions=20, + >>> partition_by="label", + >>> alpha=0.3, + >>> min_partition_size=0, + >>> ), + >>> }, + >>> ) + >>> partitioner = fds.partitioners["train"] + >>> counts_dataframe = compute_counts( + >>> partitioner=partitioner, + >>> column_name="label" + >>> ) + """ + if column_name not in partitioner.dataset.column_names: + raise ValueError( + f"The specified 'column_name': '{column_name}' is not present in the " + f"dataset. The dataset contains columns {partitioner.dataset.column_names}." + ) + + if max_num_partitions is None: + max_num_partitions = partitioner.num_partitions + else: + max_num_partitions = min(max_num_partitions, partitioner.num_partitions) + assert isinstance(max_num_partitions, int) + partition = partitioner.load_partition(0) + + try: + # Unique labels are needed to represent the correct count of each class + # (some of the classes can have zero samples that's why this + # adjustment is needed) + unique_labels = partition.features[column_name].str2int( + partition.features[column_name].names + ) + except AttributeError: # If the column_name is not formally a Label + unique_labels = partitioner.dataset.unique(column_name) + + partition_id_to_label_absolute_size = {} + for partition_id in range(max_num_partitions): + partition = partitioner.load_partition(partition_id) + partition_id_to_label_absolute_size[partition_id] = _compute_counts( + partition[column_name], unique_labels + ) + + dataframe = pd.DataFrame.from_dict( + partition_id_to_label_absolute_size, orient="index" + ) + dataframe.index.name = "Partition ID" + + if verbose_names: + # Adjust the column name values of the dataframe + current_labels = dataframe.columns + try: + legend_names = partitioner.dataset.features[column_name].int2str( + [int(v) for v in current_labels] + ) + dataframe.columns = legend_names + except AttributeError: + warnings.warn( + "The verbose names can not be established. " + "The column specified by 'column_name' needs to be of type " + "'ClassLabel' to create a verbose names. " + "The available names will used.", + stacklevel=1, + ) + return dataframe + + +def compute_frequencies( + partitioner: Partitioner, + column_name: str, + verbose_names: bool = False, + max_num_partitions: Optional[int] = None, +) -> pd.DataFrame: + """Compute the frequencies of unique values in a given column in the partitions. + + The frequencies sum up to 1 for a given partition id. This function takes into + account all possible labels in the dataset when computing the count for each + partition (assign 0 as the size when there are no values for a label in the + partition). + + Parameters + ---------- + partitioner : Partitioner + Partitioner with an assigned dataset. + column_name : str + Column name identifying label based on which the count will be calculated. + verbose_names : bool + Whether to use verbose versions of the values in the column specified by + `column_name`. The verbose value are possible to extract if the column is a + feature of type `ClassLabel`. + max_num_partitions : Optional[int] + The maximum number of partitions that will be used. If greater than the + total number of partitions in a partitioner, it won't have an effect. If left + as None, then all partitions will be used. + + Returns + ------- + dataframe: pd.DataFrame + DataFrame where the row index represent the partition id and the column index + represent the unique values found in column specified by `column_name` + (e.g. represeting the labels). The value of the dataframe.loc[i, j] represnt + the ratio of the label j to the total number of sample of in partition i. + + Examples + -------- + Generate DataFrame with label counts resulting from DirichletPartitioner on cifar10 + + >>> from flwr_datasets import FederatedDataset + >>> from flwr_datasets.partitioner import DirichletPartitioner + >>> from flwr_datasets.metrics import compute_frequencies + >>> + >>> fds = FederatedDataset( + >>> dataset="cifar10", + >>> partitioners={ + >>> "train": DirichletPartitioner( + >>> num_partitions=20, + >>> partition_by="label", + >>> alpha=0.3, + >>> min_partition_size=0, + >>> ), + >>> }, + >>> ) + >>> partitioner = fds.partitioners["train"] + >>> counts_dataframe = compute_frequencies( + >>> partitioner=partitioner, + >>> column_name="label" + >>> ) + """ + dataframe = compute_counts( + partitioner, column_name, verbose_names, max_num_partitions + ) + dataframe = dataframe.div(dataframe.sum(axis=1), axis=0) + return dataframe + + +def _compute_counts( + labels: Union[list[int], list[str]], unique_labels: Union[list[int], list[str]] +) -> pd.Series: + """Compute the count of labels when taking into account all possible labels. + + Also known as absolute frequency. + + Parameters + ---------- + labels: Union[List[int], List[str]] + The labels from the datasets. + unique_labels: Union[List[int], List[str]] + The reference all unique label. Needed to avoid missing any label, instead + having the value equal to zero for them. + + Returns + ------- + label_counts: pd.Series + The pd.Series with label as indices and counts as values. + """ + if len(unique_labels) != len(set(unique_labels)): + raise ValueError("unique_labels must contain unique elements only.") + labels_series = pd.Series(labels) + label_counts = labels_series.value_counts() + label_counts_with_zeros = pd.Series(index=unique_labels, data=0) + label_counts_with_zeros = label_counts_with_zeros.add( + label_counts, fill_value=0 + ).astype(int) + return label_counts_with_zeros + + +def _compute_frequencies( + labels: Union[list[int], list[str]], unique_labels: Union[list[int], list[str]] +) -> pd.Series: + """Compute the distribution of labels when taking into account all possible labels. + + Also known as relative frequency. + + Parameters + ---------- + labels: Union[List[int], List[str]] + The labels from the datasets. + unique_labels: Union[List[int], List[str]] + The reference all unique label. Needed to avoid missing any label, instead + having the value equal to zero for them. + + Returns + ------- + The pd.Series with label as indices and probabilities as values. + """ + counts = _compute_counts(labels, unique_labels) + if len(labels) == 0: + frequencies = counts.astype(float) + return frequencies + frequencies = counts.divide(len(labels)) + return frequencies diff --git a/datasets/flwr_datasets/metrics/utils_test.py b/datasets/flwr_datasets/metrics/utils_test.py new file mode 100644 index 000000000000..9fab9994c430 --- /dev/null +++ b/datasets/flwr_datasets/metrics/utils_test.py @@ -0,0 +1,200 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for metrics utils.""" + + +import unittest + +import pandas as pd +from parameterized import parameterized, parameterized_class + +import datasets +from datasets import ClassLabel +from flwr_datasets.metrics.utils import ( + _compute_counts, + _compute_frequencies, + compute_counts, + compute_frequencies, +) +from flwr_datasets.partitioner import IidPartitioner + + +@parameterized_class( + ("dataset", "result"), + [ + ( + datasets.Dataset.from_dict({"feature": list(range(10)), "label": [0] * 10}), + pd.DataFrame([[5], [5]], index=pd.Index([0, 1], name="Partition ID")), + ), + ( + datasets.Dataset.from_dict( + {"feature": list(range(10)), "label": [0] * 5 + [1] * 5} + ), + pd.DataFrame([[5, 0], [0, 5]], index=pd.Index([0, 1], name="Partition ID")), + ), + ( + datasets.Dataset.from_dict( + {"feature": list(range(10)), "label": [0, 0, 0, 1, 1] + [1, 1, 1, 1, 2]} + ), + pd.DataFrame( + [[3, 2, 0], [0, 4, 1]], index=pd.Index([0, 1], name="Partition ID") + ), + ), + ], +) +class TestPublicMetricsUtils(unittest.TestCase): + """Test metrics utils.""" + + dataset: datasets.Dataset + result: pd.DataFrame + + def test_compute_counts(self) -> None: + """Test if the counts are computed correctly.""" + iid_partitioner = IidPartitioner(num_partitions=2) + iid_partitioner.dataset = self.dataset + count = compute_counts(iid_partitioner, column_name="label") + pd.testing.assert_frame_equal(count, self.result) + + def test_compute_frequencies(self) -> None: + """Test if the frequencies are computed correctly.""" + iid_partitioner = IidPartitioner(num_partitions=2) + iid_partitioner.dataset = self.dataset + frequencies = compute_frequencies(iid_partitioner, column_name="label") + result = self.result.div(self.result.sum(axis=1), axis=0) + pd.testing.assert_frame_equal(frequencies, result) + + def test_compute_counts_with_verbose_label(self) -> None: + """Test if the counts are computed correctly.""" + iid_partitioner = IidPartitioner(num_partitions=2) + dataset = self.dataset + new_col_names = [ + str(col_id) for col_id in range(len(self.dataset.unique("label"))) + ] + dataset = dataset.cast_column( + "label", + ClassLabel( + num_classes=len(self.dataset.unique("label")), names=new_col_names + ), + ) + iid_partitioner.dataset = dataset + result = self.result.copy() + result.columns = new_col_names + count = compute_counts(iid_partitioner, column_name="label", verbose_names=True) + pd.testing.assert_frame_equal(count, result) + + def test_compute_frequencies_with_verbose_label(self) -> None: + """Test if the frequencies are computed correctly.""" + iid_partitioner = IidPartitioner(num_partitions=2) + dataset = self.dataset + new_col_names = [ + str(col_id) for col_id in range(len(self.dataset.unique("label"))) + ] + dataset = dataset.cast_column( + "label", + ClassLabel( + num_classes=len(self.dataset.unique("label")), names=new_col_names + ), + ) + iid_partitioner.dataset = dataset + result = self.result.copy() + result.columns = new_col_names + result = result.div(result.sum(axis=1), axis=0) + frequencies = compute_frequencies( + iid_partitioner, column_name="label", verbose_names=True + ) + pd.testing.assert_frame_equal(frequencies, result) + + def test_compute_count_with_smaller_max_partitions(self) -> None: + """Test is compute_count works when the max_partitions None: + """Test is compute_count works when the max_partitions>total partitions.""" + iid_partitioner = IidPartitioner(num_partitions=2) + iid_partitioner.dataset = self.dataset + count = compute_counts( + iid_partitioner, column_name="label", max_num_partitions=3 + ) + pd.testing.assert_frame_equal(count, self.result) + + +class TestPrivateMetricsUtils(unittest.TestCase): + """Test metrics utils.""" + + @parameterized.expand( # type: ignore + [ + ([1, 2, 2, 3], [1, 2, 3, 4], pd.Series([1, 2, 1, 0], index=[1, 2, 3, 4])), + ([], [1, 2, 3], pd.Series([0, 0, 0], index=[1, 2, 3])), + ([1, 1, 2], [1, 2, 3, 4], pd.Series([2, 1, 0, 0], index=[1, 2, 3, 4])), + ] + ) + def test__compute_counts(self, labels, unique_labels, expected) -> None: + """Test if the counts are computed correctly.""" + result = _compute_counts(labels, unique_labels) + pd.testing.assert_series_equal(result, expected) + + @parameterized.expand( # type: ignore + [ + ( + [1, 1, 2, 2, 2, 3], + [1, 2, 3, 4], + pd.Series([0.3333, 0.5, 0.1667, 0.0], index=[1, 2, 3, 4]), + ), + ([], [1, 2, 3], pd.Series([0.0, 0.0, 0.0], index=[1, 2, 3])), + ( + ["a", "b", "b", "c"], + ["a", "b", "c", "d"], + pd.Series([0.25, 0.50, 0.25, 0.0], index=["a", "b", "c", "d"]), + ), + ] + ) + def test_compute_distribution(self, labels, unique_labels, expected) -> None: + """Test if the distributions are computed correctly.""" + result = _compute_frequencies(labels, unique_labels) + pd.testing.assert_series_equal(result, expected, atol=0.001) + + @parameterized.expand( # type: ignore + [ + (["a", "b", "b", "c"], ["a", "b", "c"]), + ([1, 2, 2, 3, 3, 3, 4], [1, 2, 3, 4]), + ] + ) + def test_distribution_sum_to_one(self, labels, unique_labels) -> None: + """Test if distributions sum up to one.""" + result = _compute_frequencies(labels, unique_labels) + self.assertAlmostEqual(result.sum(), 1.0) + + def test_compute_counts_non_unique_labels(self) -> None: + """Test if not having the unique labels raises ValueError.""" + labels = [1, 2, 3] + unique_labels = [1, 2, 2, 3] + with self.assertRaises(ValueError): + _compute_counts(labels, unique_labels) + + def test_compute_distribution_non_unique_labels(self) -> None: + """Test if not having the unique labels raises ValueError.""" + labels = [1, 1, 2, 3] + unique_labels = [1, 1, 2, 3] + with self.assertRaises(ValueError): + _compute_frequencies(labels, unique_labels) + + +if __name__ == "__main__": + unittest.main() diff --git a/datasets/flwr_datasets/mock_utils_test.py b/datasets/flwr_datasets/mock_utils_test.py index 78aff1f1cdd7..0976166648eb 100644 --- a/datasets/flwr_datasets/mock_utils_test.py +++ b/datasets/flwr_datasets/mock_utils_test.py @@ -19,7 +19,7 @@ import random import string from datetime import datetime, timedelta -from typing import Any, Dict, List, Set, Tuple, Union +from typing import Any, Optional, Union import numpy as np from PIL import Image @@ -30,7 +30,7 @@ def _generate_artificial_strings( num_rows: int, num_unique: int, string_length: int, seed: int = 42 -) -> List[str]: +) -> list[str]: """Create list of strings for categories or labels mocking. Note to keep the seed the same if you reuse this function for in creation of the @@ -53,7 +53,7 @@ def _generate_artificial_strings( List of generated strings. """ random.seed(seed) - unique_strings: Set[str] = set() + unique_strings: set[str] = set() while len(unique_strings) < num_unique: random_str = "".join( random.choices(string.ascii_letters + string.digits, k=string_length) @@ -68,7 +68,7 @@ def _generate_artificial_strings( return artificial_column -def _generate_artificial_categories(num_rows: int, choices: List[Any]) -> List[str]: +def _generate_artificial_categories(num_rows: int, choices: list[Any]) -> list[str]: """Create list of strings from given `choices` list.""" artificial_column = choices.copy() remaining_to_allocate = num_rows - len(choices) @@ -82,7 +82,7 @@ def _generate_random_word(length: int) -> str: return "".join(random.choices(string.ascii_letters, k=length)) -def _generate_random_text_column(num_rows: int, length: int) -> List[str]: +def _generate_random_text_column(num_rows: int, length: int) -> list[str]: """Generate a list of random text of specified length.""" text_col = [] for _ in range(num_rows): @@ -98,7 +98,7 @@ def _generate_random_sentence( ) -> str: """Generate a random sentence with words of random lengths.""" sentence_length = random.randint(min_sentence_length, max_sentence_length) - sentence: List[str] = [] + sentence: list[str] = [] while len(" ".join(sentence)) < sentence_length: word_length = random.randint(min_word_length, max_word_length) word = _generate_random_word(word_length) @@ -112,7 +112,7 @@ def _generate_random_sentences( max_word_length: int, min_sentence_length: int, max_sentence_length: int, -) -> List[str]: +) -> list[str]: """Generate a list of random sentences.""" text_col = [ _generate_random_sentence( @@ -123,7 +123,7 @@ def _generate_random_sentences( return text_col -def _make_num_rows_none(column: List[Any], num_none: int) -> List[Any]: +def _make_num_rows_none(column: list[Any], num_none: int) -> list[Any]: """Assign none num_none times to the given list.""" column_copy = column.copy() none_positions = random.sample(range(len(column_copy)), num_none) @@ -154,7 +154,7 @@ def _generate_random_date_column( end_date: datetime, date_format: str = "%a %b %d %H:%M:%S %Y", as_string: bool = True, -) -> List[Union[str, datetime]]: +) -> list[Union[str, datetime]]: """Generate a list of random dates.""" return [ _generate_random_date(start_date, end_date, date_format, as_string) @@ -162,21 +162,21 @@ def _generate_random_date_column( ] -def _generate_random_int_column(num_rows: int, min_int: int, max_int: int) -> List[int]: +def _generate_random_int_column(num_rows: int, min_int: int, max_int: int) -> list[int]: """Generate a list of ints.""" return [random.randint(min_int, max_int) for _ in range(num_rows)] -def _generate_random_bool_column(num_rows: int) -> List[bool]: +def _generate_random_bool_column(num_rows: int) -> list[bool]: """Generate a list of bools.""" return [random.choice([True, False]) for _ in range(num_rows)] def _generate_random_image_column( num_rows: int, - image_size: Union[Tuple[int, int], Tuple[int, int, int]], + image_size: Union[tuple[int, int], tuple[int, int, int]], simulate_type: str, -) -> List[Any]: +) -> list[Any]: """Simulate the images with the format that is found in HF Hub. Directly using `Image.fromarray` does not work because it creates `PIL.Image.Image`. @@ -190,7 +190,7 @@ def _generate_random_image_column( pil_imgs = [] for np_image in np_images: # Convert the NumPy array to a PIL image - pil_img_beg = Image.fromarray(np_image) # type: ignore + pil_img_beg = Image.fromarray(np_image) # Save the image to an in-memory bytes buffer in_memory_file = io.BytesIO() @@ -207,7 +207,7 @@ def generate_random_audio_column( num_rows: int, sampling_rate: int, length_in_samples: int, -) -> List[Dict[str, Any]]: +) -> list[dict[str, Any]]: """Simulate the audio column. Audio column in the datset is comprised from an array or floats, sample_rate and a @@ -365,8 +365,8 @@ def _mock_speach_commands(num_rows: int) -> Dataset: def _load_mocked_dataset( dataset_name: str, - num_rows: List[int], - split_names: List[str], + num_rows: list[int], + split_names: list[str], subset: str = "", ) -> DatasetDict: dataset_dict = {} @@ -375,3 +375,65 @@ def _load_mocked_dataset( for params in zip(num_rows, split_names): dataset_dict[params[1]] = dataset_creation_fnc(params[0]) return datasets.DatasetDict(dataset_dict) + + +def _load_mocked_dataset_by_partial_download( + dataset_name: str, + split_name: str, + skip_take_list: list[tuple[int, int]], + subset_name: Optional[str] = None, +) -> Dataset: + """Download a partial dataset. + + This functionality is not supported in the datasets library. This is an informal + way of achieving partial dataset download by using the `streaming=True` and creating + a dataset.Dataset from in-memory objects. + + Parameters + ---------- + dataset_name: str + Name of the dataset (passed to load_dataset). + split_name: str + Name of the split (passed to load_dataset) e.g. "train". + skip_take_list: List[Tuple[int, int]] + The streaming mode has a specific type of accessing the data, the first tuple + value is how many samples to skip, the second is how many samples to take. Due + to this mechanism, diverse samples can be taken (especially if the dataset is + sorted by the natural_id for NaturalIdPartitioner). + subset_name: Optional[str] + Name of the subset (passed to load_dataset) e.g. "v0.01" for speech_commands. + + Returns + ------- + dataset: Dataset + The dataset with the requested samples. + """ + dataset = datasets.load_dataset( + dataset_name, name=subset_name, split=split_name, streaming=True + ) + dataset_list = [] + # It's a list of dict such that each dict represent a single sample of the dataset + # The sample is exactly the same as if the full dataset was downloaded and indexed + for skip, take in skip_take_list: + # dataset.skip(n).take(m) in streaming mode is equivalent (in terms of return) + # to the fully downloaded dataset index: dataset[n+1: (n+1 + m)] + dataset_list.extend(list(dataset.skip(skip).take(take))) + return Dataset.from_list(dataset_list) + + +def _load_mocked_dataset_dict_by_partial_download( + dataset_name: str, + split_names: list[str], + skip_take_lists: list[list[tuple[int, int]]], + subset_name: Optional[str] = None, +) -> DatasetDict: + """Like _load_mocked_dataset_by_partial_download but for many splits.""" + assert len(split_names) == len( + skip_take_lists + ), "The split_names should be thesame length as the skip_take_lists." + dataset_dict = {} + for split_name, skip_take_list in zip(split_names, skip_take_lists): + dataset_dict[split_name] = _load_mocked_dataset_by_partial_download( + dataset_name, split_name, skip_take_list, subset_name + ) + return DatasetDict(dataset_dict) diff --git a/datasets/flwr_datasets/partitioner/__init__.py b/datasets/flwr_datasets/partitioner/__init__.py index 0d1edbfcb04a..a14efa1cc905 100644 --- a/datasets/flwr_datasets/partitioner/__init__.py +++ b/datasets/flwr_datasets/partitioner/__init__.py @@ -16,25 +16,33 @@ from .dirichlet_partitioner import DirichletPartitioner +from .distribution_partitioner import DistributionPartitioner from .exponential_partitioner import ExponentialPartitioner +from .grouped_natural_id_partitioner import GroupedNaturalIdPartitioner +from .id_to_size_fnc_partitioner import IdToSizeFncPartitioner from .iid_partitioner import IidPartitioner from .inner_dirichlet_partitioner import InnerDirichletPartitioner from .linear_partitioner import LinearPartitioner from .natural_id_partitioner import NaturalIdPartitioner from .partitioner import Partitioner +from .pathological_partitioner import PathologicalPartitioner from .shard_partitioner import ShardPartitioner from .size_partitioner import SizePartitioner from .square_partitioner import SquarePartitioner __all__ = [ + "DirichletPartitioner", + "DistributionPartitioner", + "ExponentialPartitioner", + "GroupedNaturalIdPartitioner", + "IdToSizeFncPartitioner", "IidPartitioner", - "Partitioner", + "InnerDirichletPartitioner", + "LinearPartitioner", "NaturalIdPartitioner", - "DirichletPartitioner", + "Partitioner", + "PathologicalPartitioner", + "ShardPartitioner", "SizePartitioner", - "LinearPartitioner", - "InnerDirichletPartitioner", "SquarePartitioner", - "ShardPartitioner", - "ExponentialPartitioner", ] diff --git a/datasets/flwr_datasets/partitioner/dirichlet_partitioner.py b/datasets/flwr_datasets/partitioner/dirichlet_partitioner.py index f3feb2174bde..55c190087f7c 100644 --- a/datasets/flwr_datasets/partitioner/dirichlet_partitioner.py +++ b/datasets/flwr_datasets/partitioner/dirichlet_partitioner.py @@ -16,7 +16,7 @@ import warnings -from typing import Dict, List, Optional, Union +from typing import Optional, Union import numpy as np @@ -78,7 +78,7 @@ class DirichletPartitioner(Partitioner): >>> print(partition[0]) # Print the first example {'image': , 'label': 4} - >>> partition_sizes = partition_sizes = [ + >>> partition_sizes = [ >>> len(fds.load_partition(partition_id)) for partition_id in range(10) >>> ] >>> print(sorted(partition_sizes)) @@ -89,7 +89,7 @@ def __init__( # pylint: disable=R0913 self, num_partitions: int, partition_by: str, - alpha: Union[int, float, List[float], NDArrayFloat], + alpha: Union[int, float, list[float], NDArrayFloat], min_partition_size: int = 10, self_balancing: bool = False, shuffle: bool = True, @@ -110,8 +110,8 @@ def __init__( # pylint: disable=R0913 # Utility attributes # The attributes below are determined during the first call to load_partition self._avg_num_of_samples_per_partition: Optional[float] = None - self._unique_classes: Optional[Union[List[int], List[str]]] = None - self._partition_id_to_indices: Dict[int, List[int]] = {} + self._unique_classes: Optional[Union[list[int], list[str]]] = None + self._partition_id_to_indices: dict[int, list[int]] = {} self._partition_id_to_indices_determined = False def load_partition(self, partition_id: int) -> datasets.Dataset: @@ -142,7 +142,7 @@ def num_partitions(self) -> int: return self._num_partitions def _initialize_alpha( - self, alpha: Union[int, float, List[float], NDArrayFloat] + self, alpha: Union[int, float, list[float], NDArrayFloat] ) -> NDArrayFloat: """Convert alpha to the used format in the code a NDArrayFloat. @@ -164,7 +164,7 @@ def _initialize_alpha( alpha = np.array([float(alpha)], dtype=float).repeat(self._num_partitions) elif isinstance(alpha, float): alpha = np.array([alpha], dtype=float).repeat(self._num_partitions) - elif isinstance(alpha, List): + elif isinstance(alpha, list): if len(alpha) != self._num_partitions: raise ValueError( "If passing alpha as a List, it needs to be of length of equal to " @@ -217,7 +217,7 @@ def _determine_partition_id_to_indices_if_needed( sampling_try = 0 while True: # Prepare data structure to store indices assigned to partition ids - partition_id_to_indices: Dict[int, List[int]] = {} + partition_id_to_indices: dict[int, list[int]] = {} for nid in range(self._num_partitions): partition_id_to_indices[nid] = [] diff --git a/datasets/flwr_datasets/partitioner/dirichlet_partitioner_test.py b/datasets/flwr_datasets/partitioner/dirichlet_partitioner_test.py index b2407b5d5822..ed38e8ee2a41 100644 --- a/datasets/flwr_datasets/partitioner/dirichlet_partitioner_test.py +++ b/datasets/flwr_datasets/partitioner/dirichlet_partitioner_test.py @@ -17,7 +17,7 @@ # pylint: disable=W0212 import unittest -from typing import Tuple, Union +from typing import Union import numpy as np from numpy.typing import NDArray @@ -33,7 +33,7 @@ def _dummy_setup( num_rows: int, partition_by: str, self_balancing: bool = True, -) -> Tuple[Dataset, DirichletPartitioner]: +) -> tuple[Dataset, DirichletPartitioner]: """Create a dummy dataset and partitioner for testing.""" data = { partition_by: [i % 3 for i in range(num_rows)], diff --git a/datasets/flwr_datasets/partitioner/distribution_partitioner.py b/datasets/flwr_datasets/partitioner/distribution_partitioner.py new file mode 100644 index 000000000000..86be62b36070 --- /dev/null +++ b/datasets/flwr_datasets/partitioner/distribution_partitioner.py @@ -0,0 +1,424 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Distribution partitioner class that works with Hugging Face Datasets.""" + + +from collections import Counter +from typing import Optional, Union + +import numpy as np + +import datasets +from flwr_datasets.common.typing import NDArray, NDArrayFloat, NDArrayInt +from flwr_datasets.partitioner.partitioner import Partitioner + + +class DistributionPartitioner(Partitioner): # pylint: disable=R0902 + """Partitioner based on a distribution. + + Inspired from implementations of Li et al. Federated Optimization in + Heterogeneous Networks (2020) https://arxiv.org/abs/1812.06127. + + Given a 2-dimensional user-specified distribution, the algorithm splits the dataset + for each unique label per partition where each label is assigned to the partitions + in a deterministic pathological manner. The 1st dimension is the number of unique + labels and the 2nd-dimension is the number of buckets into which the samples + associated with each label will be divided. That is, given a distribution array of + shape, + `num_unique_labels_per_partition` x `num_partitions` + ( `num_unique_labels`, ---------------------------------------------------- ), + `num_unique_labels` + the label_id at the i'th row is assigned to the partition_id based on the following + approach. + + First, for an i'th row, generate a list of `id`s according to the formula: + id = alpha + beta + where, + alpha = (i - num_unique_labels_per_partition + 1) \ + + (j % num_unique_labels_per_partition), + alpha = alpha + (alpha >= 0 ? 0 : num_unique_labels), + beta = num_unique_labels * (j // num_unique_labels_per_partition) + and j in {0, 1, 2, ..., `num_columns`}. Then, sort the list of `id`s in ascending + order. The j'th index in this sorted list corresponds to the partition_id that the + i'th unique label (and the underlying distribution array value) will be assigned to. + So, for a dataset with 10 unique labels and a configuration with 20 partitions and + 2 unique labels per partition, the 0'th row of the distribution array (corresponding + to class 0) will be assigned to partitions [0, 9, 10, 19], 1st row (class 1) to + [0, 1, 10, 11], 2nd row (class 2) to [1, 2, 11, 12], 3rd row (class 3) to + [2, 3, 12, 13], etc ... . Alternatively, the distribution can be interpreted as + partition 0 having classes 0 and 1, partition 1 having classes 1 and 2, partition 2 + having classes 2 and 3, etc ... The list representing the unique labels is sorted + in ascending order. + + Parameters + ---------- + distribution_array : Union[NDArrayInt, NDArrayFloat] + A 2-dimensional numpy array of the probability distribution of samples + for all labels in all partitions. The array shape should be + (`num_unique_labels`, + `num_unique_labels_per_partition*num_partitions/num_unique_labels`), + such that the first row of the array corresponds to the sample distribution + of the first unique label (in ascending order). The values may be scaled per + label such that the sum of the label distributions across all partitions are + equal to the original unpartitioned label distribution + - see the `rescale` argument. + num_partitions : int + The total number of partitions that the data will be divided into. The number of + partitions must be an integer multiple of the number of unique labels in the + dataset. + num_unique_labels_per_partition : int + Number of unique labels assigned to a single partition. + partition_by : str + Column name of the labels (targets) based on which sampling works. + preassigned_num_samples_per_label : int + The number of samples that each unique label in each partition will first + be assigned before the `distribution_array` values are assigned. This + value has no effect if `rescale` is set to False. + rescale : bool, default=True + Whether to partition samples according to the values in + `distribution_array` or rescale based on the original unpartitioned class label + distribution. `float` values are rounded to the nearest `int`. All samples for + any label_id are exhausted during the partitioning by randomly assigning any + unassigned samples from round-off errors to one of the label_id's partition_ids. + shuffle : bool, default=True + Whether to randomize the order of samples. Shuffling applied after the + samples assignment to nodes. + seed : int, default=42 + Seed used for dataset shuffling. It has no effect if `shuffle` is False. + + Examples + -------- + In order to reproduce the power-law distrbution of the paper, follow this setup: + + >>> from flwr_datasets import FederatedDataset + >>> from flwr_datasets.partitioner import DistributionPartitioner + >>> from pprint import pprint + >>> import numpy as np + >>> + >>> num_partitions = 1_000 + >>> num_unique_labels_per_partition = 2 + >>> num_unique_labels = 10 + >>> preassigned_num_samples_per_label = 5 + >>> + >>> # Generate a vector from a log-normal probability distribution + >>> rng = np.random.default_rng(2024) + >>> mu, sigma = 0., 2. + >>> distribution_array = rng.lognormal( + >>> mu, + >>> sigma, + >>> (num_partitions*num_unique_labels_per_partition), + >>> ) + >>> distribution_array = distribution_array.reshape((num_unique_labels, -1)) + >>> + >>> partitioner = DistributionPartitioner( + >>> distribution_array=distribution_array, + >>> num_partitions=num_partitions, + >>> num_unique_labels_per_partition=num_unique_labels_per_partition, + >>> partition_by="label", # MNIST dataset has a target column `label` + >>> preassigned_num_samples_per_label=preassigned_num_samples_per_label, + >>> ) + >>> fds = FederatedDataset(dataset="mnist", partitioners={"train": partitioner}) + >>> partition = fds.load_partition(0) + >>> print(partition[0]) # Print the first example + {'image': , + 'label': 0} + >>> distributions = { + >>> partition_id: fds.load_partition(partition_id=partition_id) + >>> .to_pandas()["label"] + >>> .value_counts() + >>> .to_dict() + >>> for partition_id in range(10) + >>> } + >>> pprint(distributions) + {0: {0: 40, 1: 5}, + 1: {2: 36, 1: 5}, + 2: {3: 52, 2: 7}, + 3: {3: 14, 4: 6}, + 4: {4: 47, 5: 28}, + 5: {6: 30, 5: 5}, + 6: {6: 19, 7: 11}, + 7: {8: 22, 7: 11}, + 8: {9: 11, 8: 5}, + 9: {0: 124, 9: 13}} + """ + + def __init__( # pylint: disable=R0913 + self, + distribution_array: Union[NDArrayInt, NDArrayFloat], + num_partitions: int, + num_unique_labels_per_partition: int, + partition_by: str, + preassigned_num_samples_per_label: int, + rescale: bool = True, + shuffle: bool = True, + seed: Optional[int] = 42, + ) -> None: + super().__init__() + # Attributes based on the constructor + self._distribution_array = distribution_array + self._num_partitions = num_partitions + self._num_unique_labels_per_partition = num_unique_labels_per_partition + self._partition_by = partition_by + self._preassigned_num_samples_per_label = preassigned_num_samples_per_label + self._rescale = rescale + self._shuffle = shuffle + self._seed = seed + self._rng = np.random.default_rng(seed=self._seed) # NumPy random generator + + # Utility attributes + # The attributes below are determined during the first call to load_partition + self._num_unique_labels: int = 0 + self._num_columns: int = 0 + self._partition_id_to_indices_determined = False + self._partition_id_to_indices: dict[int, list[int]] = {} + + def load_partition(self, partition_id: int) -> datasets.Dataset: + """Load a partition based on the partition index. + + Parameters + ---------- + partition_id : int + the index that corresponds to the requested partition + + Returns + ------- + dataset_partition : Dataset + single partition of a dataset + """ + # The partitioning is done lazily - only when the first partition is + # requested. Only the first call creates the indices assignments for all the + # partition indices. + self._check_distribution_array_shape_if_needed() + self._check_num_unique_labels_per_partition_if_needed() + self._check_distribution_array_sum_if_needed() + self._check_num_partitions_correctness_if_needed() + self._determine_partition_id_to_indices_if_needed() + return self.dataset.select(self._partition_id_to_indices[partition_id]) + + @property + def num_partitions(self) -> int: + """Total number of partitions.""" + return self._num_partitions + + def _determine_partition_id_to_indices_if_needed( # pylint: disable=R0914 + self, + ) -> None: + """Create an assignment of indices to the partition indices.""" + if self._partition_id_to_indices_determined: + return + + # Compute the label distribution from the dataset + unique_labels = sorted(self.dataset.unique(self._partition_by)) + labels = np.asarray(self.dataset[self._partition_by]) + unique_label_to_indices = {} + unique_label_distribution = {} + + for unique_label in unique_labels: + unique_label_to_indices[unique_label] = np.where(labels == unique_label)[0] + unique_label_distribution[unique_label] = len( + unique_label_to_indices[unique_label] + ) + + if self._rescale: + # Compute the normalized distribution for each class label + self._distribution_array = self._distribution_array / np.sum( + self._distribution_array, axis=-1, keepdims=True + ) + + # Compute the total preassigned number of samples per label for all labels + # and partitions. This sum will be subtracted from the label distribution + # of the original dataset, and added back later. It ensures that + # (1) each partition will have at least + # `self._preassigned_num_samples_per_label`, and + # (2) there is sufficient indices to sample from the dataset. + total_preassigned_samples = int( + self._preassigned_num_samples_per_label * self._num_columns + ) + + label_distribution = np.fromiter( + unique_label_distribution.values(), + dtype=float, + ) + + self._check_total_preassigned_samples_within_limit( + label_distribution, total_preassigned_samples + ) + + # Subtract the preassigned total amount from the label distribution, + # we'll add these back later. + label_distribution -= total_preassigned_samples + + # Rescale normalized distribution with the actual label distribution. + # Each row represents the number of samples to be taken for that class label + # and the sum of each row equals the total of each class label. + label_sampling_matrix = np.floor( + self._distribution_array * label_distribution[:, np.newaxis] + ).astype(int) + + # Add back the preassigned total amount + label_sampling_matrix += self._preassigned_num_samples_per_label + else: + label_sampling_matrix = self._distribution_array.astype(int) + + # Create the label sampling dictionary + label_samples = dict( + zip(unique_label_distribution.keys(), label_sampling_matrix) + ) + + # Create indices split from dataset + split_indices_per_label = {} + for unique_label in unique_labels: + # Compute cumulative sum of samples to identify splitting points + cumsum_division_numbers = np.cumsum(label_samples[unique_label]) + split_indices = np.split( + unique_label_to_indices[unique_label], cumsum_division_numbers + ) + if self._rescale: + # Randomly append unassigned samples (which are in the last split that + # exceeds `self._num_columns`) to one of the `self._num_columns` + # partitions. Unassigned samples originate from float-to-int rounding + # errors of the normalizing algorithm. + if len(split_indices) > self._num_columns: + last_split = split_indices.pop() + random_index = self._rng.integers(0, self._num_columns) + split_indices[random_index] = np.append( + split_indices[random_index], last_split + ) + assert len(split_indices) == self._num_columns + split_indices_per_label[unique_label] = split_indices + + # Initialize sampling tracker. Keys are the unique class labels. + # Values are the smallest indices of each array in `label_samples` + # which will be sampled next. Once a sample is taken from a label/key, + # increment the value (index) by 1. + index_tracker = {k: 0 for k in unique_labels} + + # Prepare data structure to store indices assigned to partition ids + self._partition_id_to_indices = { + partition_id: [] for partition_id in range(self._num_partitions) + } + + for partition_id in range(self._num_partitions): + # Get the `num_unique_labels_per_partition` labels for each partition. Use + # `numpy.roll` to get indices of adjacent sorted labels for pathological + # label distributions. + labels_per_client = np.roll(unique_labels, -partition_id)[ + : self._num_unique_labels_per_partition + ] + for label in labels_per_client: + index_to_sample = index_tracker[label] + self._partition_id_to_indices[partition_id].extend( + split_indices_per_label[label][index_to_sample] + ) + index_tracker[label] += 1 + + # Shuffle the indices to avoid datasets with targets in sequences like + # [00000, 11111, ...]) if the shuffle is True + if self._shuffle: + for indices in self._partition_id_to_indices.values(): + # In place shuffling + self._rng.shuffle(indices) + self._partition_id_to_indices_determined = True + + def _check_distribution_array_shape_if_needed(self) -> None: + """Test distribution array shape correctness.""" + if not self._partition_id_to_indices_determined: + if not isinstance(self._distribution_array, np.ndarray): + raise TypeError("Input must be a NumPy array.") + + if self._distribution_array.ndim != 2: + raise ValueError("The distribution array is not 2-dimensional.") + + self._num_unique_labels = len(self.dataset.unique(self._partition_by)) + self._num_columns = int( + self._num_unique_labels_per_partition + * self._num_partitions + / self._num_unique_labels + ) + + if self._distribution_array.shape[0] != self._num_unique_labels: + raise ValueError( + "The expected number of rows in `distribution_array` must equal to " + "the number of unique labels in the dataset, which is " + f"{self._num_unique_labels}, but the number of rows in " + f"`distribution_array` is {self._distribution_array.shape[0]}." + ) + + if self._distribution_array.shape[1] != self._num_columns: + raise ValueError( + "The expected number of columns in `distribution_array` is " + f"{self._num_columns} (refer to the documentation for the " + "expression), but the number of columns in `distribution_array` " + f"is {self._distribution_array.shape[1]}." + ) + + def _check_num_unique_labels_per_partition_if_needed(self) -> None: + """Test number of unique labels do not exceed self.num_unique_labels.""" + if self._num_unique_labels_per_partition > self._num_unique_labels: + raise ValueError( + "The specified `num_unique_labels_per_partition`" + f"={self._num_unique_labels_per_partition} is greater than the number " + f"of unique labels in the given dataset={self._num_unique_labels} " + f"as specified by the label column `{self._partition_by}`." + "Reduce the `num_unique_labels_per_partition` or make use of a " + "different dataset to apply this partitioning." + ) + + def _check_distribution_array_sum_if_needed(self) -> None: + """Test correctness of distribution array sum.""" + if not self._partition_id_to_indices_determined and not self._rescale: + labels = self.dataset[self._partition_by] + unique_labels_counter = sorted(Counter(labels).items()) + unique_labels_counter_vals = [v for _, v in unique_labels_counter] + + if any(self._distribution_array.sum(1) > unique_labels_counter_vals): + raise ValueError( + "The sum of at least one unique label distribution array " + "exceeds that of the unique labels counter in the given dataset= " + f"{dict(unique_labels_counter)}." + ) + + def _check_num_partitions_correctness_if_needed(self) -> None: + """Test num_partitions when the dataset is given.""" + if not self._partition_id_to_indices_determined: + if self._num_partitions > self.dataset.num_rows: + raise ValueError( + f"The number of partitions ({self._num_partitions}) needs to be " + "smaller than the number of samples in the dataset " + f"({self.dataset.num_rows})." + ) + if self._num_partitions % self._num_unique_labels != 0: + raise ValueError( + f"The number of partitions ({self._num_partitions}) is not " + f"divisible by the number of unique labels " + f"{({self._num_unique_labels})}." + ) + if not self._num_partitions > 0: + raise ValueError( + "The number of partitions needs to be greater than zero." + ) + + def _check_total_preassigned_samples_within_limit( + self, label_distribution: NDArray, total_preassigned_samples: int + ) -> None: + """Test total preassigned samples do not exceed minimum allowable.""" + if any(label_distribution - total_preassigned_samples < self._num_columns): + raise ValueError( + "There is insufficient samples to partition by applying the specified " + "`preassigned_num_samples_per_label`" + f"={self._preassigned_num_samples_per_label}. Reduce the " + "`preassigned_num_samples_per_label` or use a different dataset with " + "more samples to apply this partition." + ) diff --git a/datasets/flwr_datasets/partitioner/distribution_partitioner_test.py b/datasets/flwr_datasets/partitioner/distribution_partitioner_test.py new file mode 100644 index 000000000000..306e208a706b --- /dev/null +++ b/datasets/flwr_datasets/partitioner/distribution_partitioner_test.py @@ -0,0 +1,182 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Test cases for DistributionPartitioner.""" + + +import unittest +from collections import Counter +from typing import Any, Union + +import numpy as np +from parameterized import parameterized_class + +from datasets import Dataset +from flwr_datasets.common.typing import NDArrayFloat, NDArrayInt +from flwr_datasets.partitioner.distribution_partitioner import DistributionPartitioner + + +def _dummy_dataset_setup( + num_samples: int, partition_by: str, num_unique_classes: int +) -> Dataset: + """Create a dummy dataset for testing.""" + data = { + partition_by: np.tile( + np.arange(num_unique_classes), num_samples // num_unique_classes + 1 + )[:num_samples], + "features": np.random.randn(num_samples), + } + return Dataset.from_dict(data) + + +def _dummy_distribution_setup( + num_partitions: int, + num_unique_labels_per_partition: int, + num_unique_labels: int, + random_mode: bool = False, +) -> Union[NDArrayFloat, NDArrayInt]: + """Create a dummy distribution for testing.""" + num_columns = num_unique_labels_per_partition * num_partitions / num_unique_labels + if random_mode: + rng = np.random.default_rng(2024) + return rng.integers(1, 10, size=(num_unique_labels, int(num_columns))) + return np.tile(np.arange(num_columns) + 1.0, (num_unique_labels, 1)) + + +# pylint: disable=R0913 +def _get_partitioner( + num_partitions: int, + num_unique_labels_per_partition: int, + num_samples: int, + num_unique_labels: int, + preassigned_num_samples_per_label: int, + rescale_mode: bool = True, +) -> tuple[DistributionPartitioner, dict[int, Dataset]]: + """Create DistributionPartitioner instance.""" + dataset = _dummy_dataset_setup( + num_samples, + "labels", + num_unique_labels, + ) + distribution = _dummy_distribution_setup( + num_partitions, + num_unique_labels_per_partition, + num_unique_labels, + ) + partitioner = DistributionPartitioner( + distribution_array=distribution, + num_partitions=num_partitions, + num_unique_labels_per_partition=num_unique_labels_per_partition, + partition_by="labels", + preassigned_num_samples_per_label=preassigned_num_samples_per_label, + rescale=rescale_mode, + ) + partitioner.dataset = dataset + partitions: dict[int, Dataset] = { + pid: partitioner.load_partition(pid) for pid in range(num_partitions) + } + + return partitioner, partitions + + +# mypy: disable-error-code="attr-defined" +@parameterized_class( + ( + "num_partitions", + "num_unique_labels_per_partition", + "num_samples", + "num_unique_labels", + "preassigned_num_samples_per_label", + ), + [ + (10, 2, 200, 10, 5), + (10, 2, 200, 10, 0), + (20, 1, 200, 10, 5), + ], +) +# pylint: disable=E1101 +class TestDistributionPartitioner(unittest.TestCase): + """Unit tests for DistributionPartitioner.""" + + def test_correct_num_classes_when_partitioned(self) -> None: + """Test correct number of unique classes.""" + _, partitions = _get_partitioner( + num_partitions=self.num_partitions, + num_unique_labels_per_partition=self.num_unique_labels_per_partition, + num_samples=self.num_samples, + num_unique_labels=self.num_unique_labels, + preassigned_num_samples_per_label=self.preassigned_num_samples_per_label, + ) + unique_classes_per_partition = { + pid: np.unique(partition["labels"]) for pid, partition in partitions.items() + } + + for unique_classes in unique_classes_per_partition.values(): + self.assertEqual(self.num_unique_labels_per_partition, len(unique_classes)) + + def test_correct_num_times_classes_sampled_across_partitions(self) -> None: + """Test correct number of times each unique class is drawn from distribution.""" + partitioner, partitions = _get_partitioner( + num_partitions=self.num_partitions, + num_unique_labels_per_partition=self.num_unique_labels_per_partition, + num_samples=self.num_samples, + num_unique_labels=self.num_unique_labels, + preassigned_num_samples_per_label=self.preassigned_num_samples_per_label, + ) + + partitioned_distribution: dict[Any, list[Any]] = { + label: [] for label in partitioner.dataset.unique("labels") + } + + num_columns = ( + self.num_unique_labels_per_partition + * self.num_partitions + / self.num_unique_labels + ) + for _, partition in partitions.items(): + for label in partition.unique("labels"): + value_counts = Counter(partition["labels"]) + partitioned_distribution[label].append(value_counts[label]) + + for label in partitioner.dataset.unique("labels"): + self.assertEqual(num_columns, len(partitioned_distribution[label])) + + def test_exact_distribution_assignment(self) -> None: + """Test that exact distribution is allocated to each class.""" + partitioner, partitions = _get_partitioner( + num_partitions=self.num_partitions, + num_unique_labels_per_partition=self.num_unique_labels_per_partition, + num_samples=self.num_samples, + num_unique_labels=self.num_unique_labels, + preassigned_num_samples_per_label=self.preassigned_num_samples_per_label, + rescale_mode=False, + ) + partitioned_distribution: dict[Any, list[Any]] = { + label: [] for label in partitioner.dataset.unique("labels") + } + + for _, partition in partitions.items(): + for label in partition.unique("labels"): + value_counts = Counter(partition["labels"]) + partitioned_distribution[label].append(value_counts[label]) + + for idx, label in enumerate(sorted(partitioner.dataset.unique("labels"))): + np.testing.assert_array_equal( + partitioner._distribution_array[idx], # pylint: disable=W0212 + partitioned_distribution[label], + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/datasets/flwr_datasets/partitioner/exponential_partitioner.py b/datasets/flwr_datasets/partitioner/exponential_partitioner.py index d35944f29f6f..1bf838df5909 100644 --- a/datasets/flwr_datasets/partitioner/exponential_partitioner.py +++ b/datasets/flwr_datasets/partitioner/exponential_partitioner.py @@ -17,10 +17,10 @@ import numpy as np -from flwr_datasets.partitioner.size_partitioner import SizePartitioner +from flwr_datasets.partitioner.id_to_size_fnc_partitioner import IdToSizeFncPartitioner -class ExponentialPartitioner(SizePartitioner): +class ExponentialPartitioner(IdToSizeFncPartitioner): """Partitioner creates partitions of size that are correlated with exp(id). The amount of data each client gets is correlated with the exponent of partition ID. @@ -35,6 +35,15 @@ class ExponentialPartitioner(SizePartitioner): ---------- num_partitions : int The total number of partitions that the data will be divided into. + + Examples + -------- + >>> from flwr_datasets import FederatedDataset + >>> from flwr_datasets.partitioner import ExponentialPartitioner + >>> + >>> partitioner = ExponentialPartitioner(num_partitions=10) + >>> fds = FederatedDataset(dataset="mnist", partitioners={"train": partitioner}) + >>> partition = fds.load_partition(0) """ def __init__(self, num_partitions: int) -> None: diff --git a/datasets/flwr_datasets/partitioner/grouped_natural_id_partitioner.py b/datasets/flwr_datasets/partitioner/grouped_natural_id_partitioner.py new file mode 100644 index 000000000000..4ce4f3717190 --- /dev/null +++ b/datasets/flwr_datasets/partitioner/grouped_natural_id_partitioner.py @@ -0,0 +1,224 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Grouped natural id partitioner class that works with Hugging Face Datasets.""" + + +from typing import Any, Literal + +import numpy as np + +import datasets +from flwr_datasets.common.typing import NDArrayInt +from flwr_datasets.partitioner.partitioner import Partitioner + + +class GroupedNaturalIdPartitioner(Partitioner): + """Partition dataset by creating groups of natural ids. + + Conceptually, you can think of this partitioner as a way of creating an organization + of x users instead of each user represetning a separate partition. You can change + the nature of the problem from cross-device to cross-silo (cross organization). + + Parameters + ---------- + partition_by: str + The name of the column that contains the unique values of partitions. + group_size: int + The number of unique ids that will be placed in a single group. + mode: Literal["allow-smaller", "allow-bigger", "drop-reminder", ""strict"] + The mode that will be used to handle the remainder of the unique ids. + - "allow-smaller": The last group can be smaller than the group_size. + - "allow-bigger": The first group can be bigger than the group_size. + - "drop-reminder": The last group will be dropped if it is smaller than the + group_size. + - "strict": Raises a ValueError if the remainder is not zero. In this mode, you + expect each group to have the same size. + sort_unique_ids: bool + If True, the unique natural ids will be sorted before creating the groups. + + Examples + -------- + Partition users in the "sentiment140" (aka Twitter) dataset into groups of two + users following the default mode: + + >>> from flwr_datasets import FederatedDataset + >>> from flwr_datasets.partitioner import GroupedNaturalIdPartitioner + >>> + >>> partitioner = GroupedNaturalIdPartitioner(partition_by="user", group_size=2) + >>> fds = FederatedDataset(dataset="sentiment140", + >>> partitioners={"train": partitioner}) + >>> partition = fds.load_partition(0) + """ + + def __init__( + self, + partition_by: str, + group_size: int, + mode: Literal[ + "allow-smaller", "allow-bigger", "drop-reminder", "strict" + ] = "allow-smaller", + sort_unique_ids: bool = False, + ) -> None: + super().__init__() + self._partition_id_to_natural_ids: dict[int, list[Any]] = {} + self._natural_id_to_partition_id: dict[Any, int] = {} + self._partition_id_to_indices: dict[int, NDArrayInt] = {} + self._partition_by = partition_by + self._mode = mode + self._sort_unique_ids = sort_unique_ids + + if group_size < 0: + raise ValueError("group_size must be a positive integer") + self._group_size = group_size + + def _create_int_partition_id_to_natural_id(self) -> None: + """Create a mapping from int indices to unique client ids from dataset. + + Natural ids come from the column specified in `partition_by`. + """ + unique_natural_ids = self.dataset.unique(self._partition_by) + if self._mode != "allow-smaller" and self._group_size > len(unique_natural_ids): + raise ValueError( + "The group size needs to be smaller than the number of the unique " + "natural ids unless you are using allow-smaller mode which will " + "result in a single partition." + ) + if self._sort_unique_ids: + unique_natural_ids = sorted(unique_natural_ids) + num_unique_natural_ids = len(unique_natural_ids) + remainder = num_unique_natural_ids % self._group_size + num_groups = num_unique_natural_ids // self._group_size + if num_groups == 0 and self._mode == "allow-smaller": + num_groups = 1 + remainder = 0 + # Note that the number of groups might be different that this number + # due to certain modes, it's a base value. + + if self._mode == "allow-bigger": + groups_of_natural_ids = np.array_split(unique_natural_ids, num_groups) + elif self._mode == "drop-reminder": + # Narrow down the unique_natural_ids to not have a bigger group + # which is the behavior of the np.array_split + unique_natural_ids = unique_natural_ids[ + : int(num_groups * self._group_size) + ] + groups_of_natural_ids = np.array_split(unique_natural_ids, num_groups) + elif self._mode == "allow-smaller": + if remainder > 0: + last_group_ids = unique_natural_ids[-remainder:] + unique_natural_ids = unique_natural_ids[ + : int(num_groups * self._group_size) + ] + groups_of_natural_ids = np.array_split(unique_natural_ids, num_groups) + if remainder > 0: + groups_of_natural_ids.append(np.array(last_group_ids)) + elif self._mode == "strict": + if remainder != 0: + raise ValueError( + "Strict mode requires that the number of unique natural ids is " + "perfectly divisible by the group size. " + f"Found remainder: {remainder}. Please pass the group_size that " + f"enables strict mode or relax the mode parameter. Refer to the " + f"documentation of the mode parameter for the available modes." + ) + groups_of_natural_ids = np.array_split(unique_natural_ids, num_groups) + else: + raise ValueError( + f"Given {self._mode} is not a valid mode. Refer to the documentation of" + " the mode parameter for the available modes." + ) + + self._partition_id_to_natural_ids = {} + for group_of_natural_ids_id, group_of_natural_ids in enumerate( + groups_of_natural_ids + ): + self._partition_id_to_natural_ids[group_of_natural_ids_id] = ( + group_of_natural_ids.tolist() + ) + + def _create_natural_id_to_int_partition_id(self) -> None: + """Create a mapping from unique client ids from dataset to int indices. + + Natural ids come from the column specified in `partition_by`. This object is + inverse of the `self._partition_id_to_natural_id`. This method assumes that + `self._partition_id_to_natural_id` already exists. + """ + self._natural_id_to_partition_id = {} + for partition_id, natural_ids in self._partition_id_to_natural_ids.items(): + for natural_id in natural_ids: + self._natural_id_to_partition_id[natural_id] = partition_id + + def _create_partition_id_to_indices(self) -> None: + natural_id_to_indices = {} # type: ignore + natural_ids = np.array(self.dataset[self._partition_by]) + + for index, natural_id in enumerate(natural_ids): + if natural_id not in natural_id_to_indices: + natural_id_to_indices[natural_id] = [] + natural_id_to_indices[natural_id].append(index) + + self._partition_id_to_indices = {} + for partition_id, natural_id_group in self._partition_id_to_natural_ids.items(): + indices = [] + for natural_id in natural_id_group: + indices.extend(natural_id_to_indices[natural_id]) + self._partition_id_to_indices[partition_id] = np.array(indices) + + def load_partition(self, partition_id: int) -> datasets.Dataset: + """Load a single partition corresponding to a single `partition_id`. + + The choice of the partition is based on unique integers assigned to each + natural id present in the dataset in the `partition_by` column. + + + Parameters + ---------- + partition_id : int + the index that corresponds to the requested partition + + Returns + ------- + dataset_partition : Dataset + single dataset partition + """ + if len(self._partition_id_to_natural_ids) == 0: + self._create_int_partition_id_to_natural_id() + self._create_natural_id_to_int_partition_id() + + if len(self._partition_id_to_indices) == 0: + self._create_partition_id_to_indices() + + return self.dataset.select(self._partition_id_to_indices[partition_id]) + + @property + def num_partitions(self) -> int: + """Total number of partitions.""" + if len(self._partition_id_to_natural_ids) == 0: + self._create_int_partition_id_to_natural_id() + self._create_natural_id_to_int_partition_id() + return len(self._partition_id_to_natural_ids) + + @property + def partition_id_to_natural_ids(self) -> dict[int, list[Any]]: + """Partition id to the corresponding group of natural ids present. + + Natural ids are the unique values in `partition_by` column in dataset. + """ + return self._partition_id_to_natural_ids + + @property + def natural_id_to_partition_id(self) -> dict[Any, int]: + """Natural id to the corresponding partition id.""" + return self._natural_id_to_partition_id diff --git a/datasets/flwr_datasets/partitioner/grouped_natural_id_partitioner_test.py b/datasets/flwr_datasets/partitioner/grouped_natural_id_partitioner_test.py new file mode 100644 index 000000000000..014d18c1dc15 --- /dev/null +++ b/datasets/flwr_datasets/partitioner/grouped_natural_id_partitioner_test.py @@ -0,0 +1,310 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Test GroupedNaturalIdPartitioner.""" + + +import unittest +from typing import Literal + +from parameterized import parameterized, parameterized_class + +from datasets import Dataset +from flwr_datasets.partitioner.grouped_natural_id_partitioner import ( + GroupedNaturalIdPartitioner, +) + + +def _create_dataset(num_rows: int, n_unique_natural_ids: int) -> Dataset: + """Create dataset based on the number of rows and unique natural ids.""" + data = { + "features": list(range(num_rows)), + "natural_id": [f"{i % n_unique_natural_ids}" for i in range(num_rows)], + "labels": [i % 2 for i in range(num_rows)], + } + dataset = Dataset.from_dict(data) + return dataset + + +# mypy: disable-error-code="attr-defined" +@parameterized_class( + ("sort_unique_ids",), + [ + (False,), + (True,), + ], +) +# pylint: disable=no-member +class TestGroupedNaturalIdPartitioner(unittest.TestCase): + """Test GroupedNaturalIdPartitioner.""" + + @parameterized.expand( # type: ignore + # num_rows, num_unique_natural_ids, group_size, expected_num_partitions + [ + [10, 10, 2, 5], + [11, 10, 2, 5], + [100, 10, 2, 5], + [12, 6, 3, 2], + ] + ) + def test_strict_mode_num_partitions_and_partition_sizes( + self, + num_rows: int, + num_unique_natural_id: int, + group_size: int, + expected_num_partitions: int, + ) -> None: + """Test strict mode with valid group size.""" + dataset = _create_dataset(num_rows, num_unique_natural_id) + partitioner = GroupedNaturalIdPartitioner( + partition_by="natural_id", + group_size=group_size, + mode="strict", + sort_unique_ids=self.sort_unique_ids, + ) + partitioner.dataset = dataset + # Trigger partitioning + _ = partitioner.load_partition(0) + self.assertEqual(partitioner.num_partitions, expected_num_partitions) + + @parameterized.expand( # type: ignore + # num_rows, num_unique_natural_ids, group_size, expected_num_partitions, + # expected_num_unique_natural_ids + [ + [10, 10, 2, [2, 2, 2, 2, 2]], + [100, 10, 2, [2, 2, 2, 2, 2]], + [12, 6, 3, [3, 3]], + # The cases in which the partitions should be smaller + [10, 7, 2, [2, 2, 2, 1]], + [10, 3, 2, [2, 1]], + ] + ) + def test_allow_smaller_mode_num_partitions_and_partition_sizes( + self, + num_rows: int, + num_unique_natural_id: int, + group_size: int, + expected_num_unique_natural_ids: list[int], + ) -> None: + """Test allow-smaller mode handles the remainder correctly.""" + dataset = _create_dataset(num_rows, num_unique_natural_id) + partitioner = GroupedNaturalIdPartitioner( + partition_by="natural_id", + group_size=group_size, + mode="allow-smaller", + sort_unique_ids=self.sort_unique_ids, + ) + partitioner.dataset = dataset + # Trigger partitioning + partitions = [ + partitioner.load_partition(i) for i in range(partitioner.num_partitions) + ] + unique_natural_ids = [ + len(partition.unique("natural_id")) for partition in partitions + ] + self.assertEqual(unique_natural_ids, expected_num_unique_natural_ids) + + @parameterized.expand( # type: ignore + # num_rows, num_unique_natural_ids, group_size, expected_num_partitions, + # expected_num_unique_natural_ids + [ + [10, 10, 2, [2, 2, 2, 2, 2]], + [100, 10, 2, [2, 2, 2, 2, 2]], + [12, 6, 3, [3, 3]], + # The cases in which the partitions should be smaller + [10, 7, 2, [3, 2, 2]], + [10, 3, 2, [3]], + ] + ) + def test_allow_bigger_mode_num_partitions_and_partition_sizes( + self, + num_rows: int, + num_unique_natural_id: int, + group_size: int, + expected_num_unique_natural_ids: list[int], + ) -> None: + """Test allow-bigger mode handles the remainder correctly.""" + dataset = _create_dataset(num_rows, num_unique_natural_id) + partitioner = GroupedNaturalIdPartitioner( + partition_by="natural_id", + group_size=group_size, + mode="allow-bigger", + sort_unique_ids=self.sort_unique_ids, + ) + partitioner.dataset = dataset + # Trigger partitioning + partitions = [ + partitioner.load_partition(i) for i in range(partitioner.num_partitions) + ] + unique_natural_ids = [ + len(partition.unique("natural_id")) for partition in partitions + ] + self.assertEqual(unique_natural_ids, expected_num_unique_natural_ids) + + @parameterized.expand( # type: ignore + # num_rows, num_unique_natural_ids, group_size, expected_num_partitions, + # expected_num_unique_natural_ids + [ + [10, 10, 2, [2, 2, 2, 2, 2]], + [100, 10, 2, [2, 2, 2, 2, 2]], + [12, 6, 3, [3, 3]], + # The cases in which the partitions should be smaller + [10, 7, 2, [2, 2, 2]], + [10, 3, 2, [2]], + ] + ) + def test_drop_reminder_mode_num_partitions_and_partition_sizes( + self, + num_rows: int, + num_unique_natural_id: int, + group_size: int, + expected_num_unique_natural_ids: list[int], + ) -> None: + """Test drop reminder mode.""" + dataset = _create_dataset(num_rows, num_unique_natural_id) + partitioner = GroupedNaturalIdPartitioner( + partition_by="natural_id", + group_size=group_size, + mode="drop-reminder", + sort_unique_ids=self.sort_unique_ids, + ) + partitioner.dataset = dataset + # Trigger partitioning + partitions = [ + partitioner.load_partition(i) for i in range(partitioner.num_partitions) + ] + unique_natural_ids = [ + len(partition.unique("natural_id")) for partition in partitions + ] + self.assertEqual(unique_natural_ids, expected_num_unique_natural_ids) + + @parameterized.expand( # type: ignore + # mode, num_rows, num_unique_natural_ids, group_size + [ + ["strict", 10, 10, 2], + ["allow-smaller", 10, 7, 2], + ["allow-bigger", 10, 7, 2], + ["drop-reminder", 10, 7, 2], + ["strict", 12, 6, 3], + ["allow-smaller", 12, 6, 3], + ["allow-bigger", 12, 6, 3], + ["drop-reminder", 12, 6, 3], + ["allow-smaller", 10, 2, 3], + ] + ) + def test_no_overlapping_natural_ids( + self, + mode: Literal["allow-smaller", "allow-bigger", "drop-reminder", "strict"], + num_rows: int, + num_unique_natural_id: int, + group_size: int, + ) -> None: + """Test that no natural_ids overlap across partitions.""" + dataset = _create_dataset(num_rows, num_unique_natural_id) + partitioner = GroupedNaturalIdPartitioner( + partition_by="natural_id", + group_size=group_size, + mode=mode, + sort_unique_ids=self.sort_unique_ids, + ) + partitioner.dataset = dataset + + # Trigger partitioning + partitions = [ + partitioner.load_partition(i) for i in range(partitioner.num_partitions) + ] + + # Check for overlaps between partitions + seen_natural_ids: set[str] = set() + for partition in partitions: + natural_ids_in_partition = set(partition.unique("natural_id")) + + # Check if there is any overlap with previously seen natural IDs + overlap = seen_natural_ids.intersection(natural_ids_in_partition) + self.assertTrue( + len(overlap) == 0, + f"Overlapping natural IDs found between partitions in mode: {mode}. " + f"Overlapping IDs: {overlap}", + ) + + # Add the natural IDs from this partition to the seen set + seen_natural_ids.update(natural_ids_in_partition) + + def test_group_size_bigger_than_num_unique_natural_ids_allow_smaller(self) -> None: + """Test the allow-smaller mode with group size > number of unique natural ids. + + That's the only mode that should work in this scenario. + """ + dataset = _create_dataset(num_rows=10, n_unique_natural_ids=2) + expected_num_unique_natural_ids = [2] + partitioner = GroupedNaturalIdPartitioner( + partition_by="natural_id", + group_size=3, + mode="allow-smaller", + sort_unique_ids=self.sort_unique_ids, + ) + partitioner.dataset = dataset + # Trigger partitioning + partitions = [ + partitioner.load_partition(i) for i in range(partitioner.num_partitions) + ] + unique_natural_ids = [ + len(partition.unique("natural_id")) for partition in partitions + ] + + self.assertEqual(unique_natural_ids, expected_num_unique_natural_ids) + + def test_strict_mode_with_invalid_group_size(self) -> None: + """Test strict mode raises if group_size does not divide unique IDs evenly.""" + dataset = _create_dataset(num_rows=10, n_unique_natural_ids=3) + partitioner = GroupedNaturalIdPartitioner( + partition_by="natural_id", + group_size=2, + mode="strict", + sort_unique_ids=self.sort_unique_ids, + ) + partitioner.dataset = dataset + with self.assertRaises(ValueError) as context: + _ = partitioner.load_partition(0) + self.assertIn( + "Strict mode requires that the number of unique natural ids is perfectly " + "divisible by the group size.", + str(context.exception), + ) + + def test_too_big_group_size(self) -> None: + """Test raises if the group size > than the number of unique natural ids.""" + n_unique_natural_ids = 3 + dataset = _create_dataset( + num_rows=10, n_unique_natural_ids=n_unique_natural_ids + ) + partitioner = GroupedNaturalIdPartitioner( + partition_by="natural_id", + group_size=n_unique_natural_ids + 1, + mode="allow-bigger", + sort_unique_ids=self.sort_unique_ids, + ) + partitioner.dataset = dataset + with self.assertRaises(ValueError) as context: + _ = partitioner.load_partition(0) + self.assertIn( + "The group size needs to be smaller than the number of the unique " + "natural ids unless you are using allow-smaller mode which will " + "result in a single partition.", + str(context.exception), + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/datasets/flwr_datasets/partitioner/id_to_size_fnc_partitioner.py b/datasets/flwr_datasets/partitioner/id_to_size_fnc_partitioner.py new file mode 100644 index 000000000000..bd6336eb0801 --- /dev/null +++ b/datasets/flwr_datasets/partitioner/id_to_size_fnc_partitioner.py @@ -0,0 +1,145 @@ +# Copyright 2023 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""IdToSizeFncPartitioner class.""" + + +from typing import Callable, Union + +import numpy as np + +import datasets +from flwr_datasets.partitioner.partitioner import Partitioner + + +class IdToSizeFncPartitioner(Partitioner): + """Base class for the deterministic size partitioning based on the `partition_id`. + + The client with `partition_id` has the following relationship regarding the number + of samples. + + `partition_id_to_size_fn(partition_id)` ~ number of samples for `partition_id` + + If the function doesn't transform the `partition_id` it's a linear correlation + between the number of sample for the partition and the value of `partition_id`. For + instance, if the partition ids range from 1 to M, partition with id 1 gets 1 unit of + data, client 2 gets 2 units, and so on, up to partition M which gets M units. + + Note that size corresponding to the `partition_id` is deterministic, yet in case of + different dataset shuffling the assignment of samples to `partition_id` will vary. + + Parameters + ---------- + num_partitions : int + The total number of partitions that the data will be divided into. + partition_id_to_size_fn : Callable + Function that defines the relationship between partition id and the number of + samples. + """ + + def __init__( + self, + num_partitions: int, + partition_id_to_size_fn: Callable, # type: ignore[type-arg] + ) -> None: + super().__init__() + if num_partitions <= 0: + raise ValueError("The number of partitions must be greater than zero.") + self._num_partitions = num_partitions + self._partition_id_to_size_fn = partition_id_to_size_fn + + self._partition_id_to_size: dict[int, int] = {} + self._partition_id_to_indices: dict[int, list[int]] = {} + # A flag to perform only a single compute to determine the indices + self._partition_id_to_indices_determined = False + + def load_partition(self, partition_id: int) -> datasets.Dataset: + """Load a single partition based on the partition index. + + The number of samples is dependent on the partition partition_id. + + Parameters + ---------- + partition_id : int + the index that corresponds to the requested partition + + Returns + ------- + dataset_partition: Dataset + single dataset partition + """ + # The partitioning is done lazily - only when the first partition is requested. + # A single run creates the indices assignments for all the partition indices. + self._determine_partition_id_to_indices_if_needed() + return self.dataset.select(self._partition_id_to_indices[partition_id]) + + @property + def num_partitions(self) -> int: + """Total number of partitions.""" + self._determine_partition_id_to_indices_if_needed() + return self._num_partitions + + @property + def partition_id_to_size(self) -> dict[int, int]: + """Node id to the number of samples.""" + return self._partition_id_to_size + + @property + def partition_id_to_indices(self) -> dict[int, list[int]]: + """Node id to the list of indices.""" + return self._partition_id_to_indices + + def _determine_partition_id_to_size(self) -> None: + """Determine data quantity associated with partition indices.""" + data_division_in_units = self._partition_id_to_size_fn( + np.linspace(start=1, stop=self._num_partitions, num=self._num_partitions) + ) + total_units: Union[int, float] = data_division_in_units.sum() + # Normalize the units to get the fraction total dataset + partition_sizes_as_fraction = data_division_in_units / total_units + # Calculate the number of samples + partition_sizes_as_num_of_samples = np.array( + partition_sizes_as_fraction * len(self.dataset), dtype=np.int64 + ) + # Check if any sample is not allocated because of multiplication with fractions. + assigned_samples = np.sum(partition_sizes_as_num_of_samples) + left_unassigned_samples = len(self.dataset) - assigned_samples + # If there is any sample(s) left unassigned, assign it to the largest partition. + partition_sizes_as_num_of_samples[-1] += left_unassigned_samples + for idx, partition_size in enumerate(partition_sizes_as_num_of_samples): + self._partition_id_to_size[idx] = partition_size + + self._check_if_partition_id_to_size_possible() + + def _determine_partition_id_to_indices_if_needed(self) -> None: + """Create an assignment of indices to the partition indices..""" + if self._partition_id_to_indices_determined is True: + return + self._determine_partition_id_to_size() + total_samples_assigned = 0 + for idx, quantity in self._partition_id_to_size.items(): + self._partition_id_to_indices[idx] = list( + range(total_samples_assigned, total_samples_assigned + quantity) + ) + total_samples_assigned += quantity + self._partition_id_to_indices_determined = True + + def _check_if_partition_id_to_size_possible(self) -> None: + all_positive = all(value >= 1 for value in self.partition_id_to_size.values()) + if not all_positive: + raise ValueError( + f"The given specification of the parameter num_partitions" + f"={self._num_partitions} for the given dataset results " + f"in the partitions sizes that are not greater than 0." + ) diff --git a/datasets/flwr_datasets/partitioner/id_to_size_fnc_partitioner_test.py b/datasets/flwr_datasets/partitioner/id_to_size_fnc_partitioner_test.py new file mode 100644 index 000000000000..905aa8cc9303 --- /dev/null +++ b/datasets/flwr_datasets/partitioner/id_to_size_fnc_partitioner_test.py @@ -0,0 +1,104 @@ +# Copyright 2023 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""IdToSizeFuncitonPartitioner tests.""" + + +import unittest + +from parameterized import parameterized + +from datasets import Dataset +from flwr_datasets.partitioner.linear_partitioner import LinearPartitioner + + +def _dummy_dataset(num_rows: int) -> Dataset: + data = { + "features": list(range(num_rows)), + "labels": [i % 2 for i in range(num_rows)], + } + dataset = Dataset.from_dict(data) + return dataset + + +class TestLinearPartitioner(unittest.TestCase): + """Test LinearPartitioner.""" + + @parameterized.expand( # type: ignore + [ + (1, 100), + (10, 100), + (5, 55), # This will leave some undivided samples + ] + ) + def test_linear_distribution(self, num_partitions: int, num_rows: int) -> None: + """Test the linear distribution of samples.""" + dataset = _dummy_dataset(num_rows) + partitioner = LinearPartitioner(num_partitions=num_partitions) + partitioner.dataset = dataset + # Run a single partition loading to trigger the division + _ = partitioner.load_partition(0) + total_samples = sum(partitioner.partition_id_to_size.values()) + self.assertEqual(total_samples, num_rows) + + # Testing if each partition is getting more than the previous one + last_count = 0 + for i in range(num_partitions): + current_count = partitioner.partition_id_to_size[i] + self.assertGreaterEqual(current_count, last_count) + last_count = current_count + + @parameterized.expand( # type: ignore + [ + (10, 100), + (5, 55), # This will leave some undivided samples + (7, 77), # This will leave some undivided samples + ] + ) + def test_undivided_samples(self, num_partitions: int, num_rows: int) -> None: + """Test the logic for distributing undivided samples.""" + dataset = _dummy_dataset(num_rows) + partitioner = LinearPartitioner(num_partitions=num_partitions) + partitioner.dataset = dataset + # If there are any undivided samples, they should be added to the largest + # partition + last_partition_id = num_partitions - 1 + actual_samples_in_last_partition = len( + partitioner.load_partition(last_partition_id) + ) + expected_samples_in_last_partition = partitioner.partition_id_to_size[ + last_partition_id + ] + self.assertEqual( + expected_samples_in_last_partition, actual_samples_in_last_partition + ) + + def test_meaningless_params(self) -> None: + """Test if the params leading to partition size not greater than zero raises.""" + num_rows = 10 + num_partitions = 100 + dataset = _dummy_dataset(num_rows) + partitioner = LinearPartitioner(num_partitions=num_partitions) + partitioner.dataset = dataset + with self.assertRaises(ValueError) as context: + partitioner.load_partition(1) + self.assertIn( + "The given specification of the parameter num_partitions=100 for the given " + "dataset results in the partitions sizes that are not greater than 0.", + str(context.exception), + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/datasets/flwr_datasets/partitioner/iid_partitioner.py b/datasets/flwr_datasets/partitioner/iid_partitioner.py index ceddd386c7d3..f0f470072eb5 100644 --- a/datasets/flwr_datasets/partitioner/iid_partitioner.py +++ b/datasets/flwr_datasets/partitioner/iid_partitioner.py @@ -26,6 +26,15 @@ class IidPartitioner(Partitioner): ---------- num_partitions : int The total number of partitions that the data will be divided into. + + Examples + -------- + >>> from flwr_datasets import FederatedDataset + >>> from flwr_datasets.partitioner import IidPartitioner + >>> + >>> partitioner = IidPartitioner(num_partitions=10) + >>> fds = FederatedDataset(dataset="mnist", partitioners={"train": partitioner}) + >>> partition = fds.load_partition(0) """ def __init__(self, num_partitions: int) -> None: diff --git a/datasets/flwr_datasets/partitioner/iid_partitioner_test.py b/datasets/flwr_datasets/partitioner/iid_partitioner_test.py index 64c37c4e7127..cbdc67be7fa5 100644 --- a/datasets/flwr_datasets/partitioner/iid_partitioner_test.py +++ b/datasets/flwr_datasets/partitioner/iid_partitioner_test.py @@ -16,7 +16,6 @@ import unittest -from typing import Tuple from parameterized import parameterized @@ -24,7 +23,7 @@ from flwr_datasets.partitioner.iid_partitioner import IidPartitioner -def _dummy_setup(num_partitions: int, num_rows: int) -> Tuple[Dataset, IidPartitioner]: +def _dummy_setup(num_partitions: int, num_rows: int) -> tuple[Dataset, IidPartitioner]: """Create a dummy dataset and partitioner based on given arguments. The partitioner has automatically the dataset assigned to it. diff --git a/datasets/flwr_datasets/partitioner/inner_dirichlet_partitioner.py b/datasets/flwr_datasets/partitioner/inner_dirichlet_partitioner.py index e3e46813dfc8..e62b8fdbb212 100644 --- a/datasets/flwr_datasets/partitioner/inner_dirichlet_partitioner.py +++ b/datasets/flwr_datasets/partitioner/inner_dirichlet_partitioner.py @@ -14,7 +14,7 @@ # ============================================================================== """InnerDirichlet partitioner.""" import warnings -from typing import Dict, List, Optional, Union +from typing import Optional, Union import numpy as np @@ -68,9 +68,9 @@ class InnerDirichletPartitioner(Partitioner): # pylint: disable=R0902 def __init__( # pylint: disable=R0913 self, - partition_sizes: Union[List[int], NDArrayInt], + partition_sizes: Union[list[int], NDArrayInt], partition_by: str, - alpha: Union[int, float, List[float], NDArrayFloat], + alpha: Union[int, float, list[float], NDArrayFloat], shuffle: bool = True, seed: Optional[int] = 42, ) -> None: @@ -87,11 +87,11 @@ def __init__( # pylint: disable=R0913 self._initialized_alpha = False self._rng = np.random.default_rng(seed=self._seed) # NumPy random generator # The attributes below are determined during the first call to load_partition - self._unique_classes: Optional[Union[List[int], List[str]]] = None + self._unique_classes: Optional[Union[list[int], list[str]]] = None self._num_unique_classes: Optional[int] = None self._num_partitions = len(self._partition_sizes) - self._partition_id_to_indices: Dict[int, List[int]] = {} + self._partition_id_to_indices: dict[int, list[int]] = {} self._partition_id_to_indices_determined = False def load_partition(self, partition_id: int) -> datasets.Dataset: @@ -130,7 +130,7 @@ def num_partitions(self) -> int: return self._num_partitions def _initialize_alpha_if_needed( - self, alpha: Union[int, float, List[float], NDArrayFloat] + self, alpha: Union[int, float, list[float], NDArrayFloat] ) -> NDArrayFloat: """Convert alpha to the used format in the code a NDArrayFloat. @@ -159,7 +159,7 @@ def _initialize_alpha_if_needed( elif isinstance(alpha, float): assert self._num_unique_classes is not None alpha = np.array([alpha], dtype=float).repeat(self._num_unique_classes) - elif isinstance(alpha, List): + elif isinstance(alpha, list): if len(alpha) != self._num_unique_classes: raise ValueError( "When passing alpha as a List, its length needs needs to be " @@ -304,10 +304,10 @@ def _check_the_sum_of_partition_sizes(self) -> None: def _instantiate_partition_sizes( - partition_sizes: Union[List[int], NDArrayInt] + partition_sizes: Union[list[int], NDArrayInt] ) -> NDArrayInt: """Transform list to the ndarray of ints if needed.""" - if isinstance(partition_sizes, List): + if isinstance(partition_sizes, list): partition_sizes = np.asarray(partition_sizes) elif isinstance(partition_sizes, np.ndarray): pass diff --git a/datasets/flwr_datasets/partitioner/inner_dirichlet_partitioner_test.py b/datasets/flwr_datasets/partitioner/inner_dirichlet_partitioner_test.py index 86dc8a5df532..8df09d01f916 100644 --- a/datasets/flwr_datasets/partitioner/inner_dirichlet_partitioner_test.py +++ b/datasets/flwr_datasets/partitioner/inner_dirichlet_partitioner_test.py @@ -15,7 +15,7 @@ """Test DirichletPartitioner.""" # pylint: disable=W0212 import unittest -from typing import List, Tuple, Union +from typing import Union from datasets import Dataset from flwr_datasets.common.typing import NDArrayFloat, NDArrayInt @@ -27,9 +27,9 @@ def _dummy_setup( num_rows: int, partition_by: str, - partition_sizes: Union[List[int], NDArrayInt], - alpha: Union[float, List[float], NDArrayFloat], -) -> Tuple[Dataset, InnerDirichletPartitioner]: + partition_sizes: Union[list[int], NDArrayInt], + alpha: Union[float, list[float], NDArrayFloat], +) -> tuple[Dataset, InnerDirichletPartitioner]: """Create a dummy dataset and partitioner for testing.""" data = { partition_by: [i % 3 for i in range(num_rows)], diff --git a/datasets/flwr_datasets/partitioner/linear_partitioner.py b/datasets/flwr_datasets/partitioner/linear_partitioner.py index 84d419ab5592..07fea16df5e0 100644 --- a/datasets/flwr_datasets/partitioner/linear_partitioner.py +++ b/datasets/flwr_datasets/partitioner/linear_partitioner.py @@ -15,10 +15,10 @@ """LinearPartitioner class.""" -from flwr_datasets.partitioner.size_partitioner import SizePartitioner +from flwr_datasets.partitioner.id_to_size_fnc_partitioner import IdToSizeFncPartitioner -class LinearPartitioner(SizePartitioner): +class LinearPartitioner(IdToSizeFncPartitioner): """Partitioner creates partitions of size that are linearly correlated with id. The amount of data each client gets is linearly correlated with the partition ID. @@ -29,6 +29,15 @@ class LinearPartitioner(SizePartitioner): ---------- num_partitions : int The total number of partitions that the data will be divided into. + + Examples + -------- + >>> from flwr_datasets import FederatedDataset + >>> from flwr_datasets.partitioner import LinearPartitioner + >>> + >>> partitioner = LinearPartitioner(num_partitions=10) + >>> fds = FederatedDataset(dataset="mnist", partitioners={"train": partitioner}) + >>> partition = fds.load_partition(0) """ def __init__(self, num_partitions: int) -> None: diff --git a/datasets/flwr_datasets/partitioner/natural_id_partitioner.py b/datasets/flwr_datasets/partitioner/natural_id_partitioner.py index 85f1b3af43c2..64b51855e1f4 100644 --- a/datasets/flwr_datasets/partitioner/natural_id_partitioner.py +++ b/datasets/flwr_datasets/partitioner/natural_id_partitioner.py @@ -15,8 +15,6 @@ """Natural id partitioner class that works with Hugging Face Datasets.""" -from typing import Dict - import numpy as np from tqdm import tqdm @@ -37,6 +35,7 @@ class NaturalIdPartitioner(Partitioner): Examples -------- "flwrlabs/shakespeare" dataset + >>> from flwr_datasets import FederatedDataset >>> from flwr_datasets.partitioner import NaturalIdPartitioner >>> @@ -46,6 +45,7 @@ class NaturalIdPartitioner(Partitioner): >>> partition = fds.load_partition(0) "sentiment140" (aka Twitter) dataset + >>> from flwr_datasets import FederatedDataset >>> from flwr_datasets.partitioner import NaturalIdPartitioner >>> @@ -60,9 +60,9 @@ def __init__( partition_by: str, ): super().__init__() - self._partition_id_to_natural_id: Dict[int, str] = {} - self._natural_id_to_partition_id: Dict[str, int] = {} - self._partition_id_to_indices: Dict[int, NDArrayInt] = {} + self._partition_id_to_natural_id: dict[int, str] = {} + self._natural_id_to_partition_id: dict[str, int] = {} + self._partition_id_to_indices: dict[int, NDArrayInt] = {} self._partition_by = partition_by def _create_int_partition_id_to_natural_id(self) -> None: @@ -136,16 +136,15 @@ def num_partitions(self) -> int: return len(self._partition_id_to_natural_id) @property - def partition_id_to_natural_id(self) -> Dict[int, str]: + def partition_id_to_natural_id(self) -> dict[int, str]: """Node id to corresponding natural id present. Natural ids are the unique values in `partition_by` column in dataset. """ return self._partition_id_to_natural_id - # pylint: disable=R0201 @partition_id_to_natural_id.setter - def partition_id_to_natural_id(self, value: Dict[int, str]) -> None: + def partition_id_to_natural_id(self, value: dict[int, str]) -> None: raise AttributeError( "Setting the partition_id_to_natural_id dictionary is not allowed." ) diff --git a/datasets/flwr_datasets/partitioner/natural_id_partitioner_test.py b/datasets/flwr_datasets/partitioner/natural_id_partitioner_test.py index f447634ad9ed..d3147985dca9 100644 --- a/datasets/flwr_datasets/partitioner/natural_id_partitioner_test.py +++ b/datasets/flwr_datasets/partitioner/natural_id_partitioner_test.py @@ -18,7 +18,6 @@ import itertools import math import unittest -from typing import Tuple from parameterized import parameterized @@ -28,7 +27,7 @@ def _dummy_setup( num_rows: int, n_unique_natural_ids: int -) -> Tuple[Dataset, NaturalIdPartitioner]: +) -> tuple[Dataset, NaturalIdPartitioner]: """Create a dummy dataset and partitioner based on given arguments. The partitioner has automatically the dataset assigned to it. @@ -86,7 +85,7 @@ def test_load_partition_max_partition_size( print(num_unique_natural_ids) _, partitioner = _dummy_setup(num_rows, num_unique_natural_ids) max_size = max( - [len(partitioner.load_partition(i)) for i in range(num_unique_natural_ids)] + len(partitioner.load_partition(i)) for i in range(num_unique_natural_ids) ) self.assertEqual(max_size, math.ceil(num_rows / num_unique_natural_ids)) diff --git a/datasets/flwr_datasets/partitioner/partitioner.py b/datasets/flwr_datasets/partitioner/partitioner.py index 10ade52640e8..0404a11e772c 100644 --- a/datasets/flwr_datasets/partitioner/partitioner.py +++ b/datasets/flwr_datasets/partitioner/partitioner.py @@ -44,12 +44,17 @@ def dataset(self) -> Dataset: @dataset.setter def dataset(self, value: Dataset) -> None: if self._dataset is not None: - raise Exception( + raise ValueError( "The dataset should be assigned only once to the partitioner." "This operation might also wipe out the saved references to the " "created partitions (in case the partitioning scheme needs to create " "the full partitioning also in order to return a single partition)." ) + if not isinstance(value, Dataset): + raise TypeError( + f"The dataset object you want to assign to the partitioner should be " + f"of type `datasets.Dataset` but given {type(value)}." + ) self._dataset = value @abstractmethod diff --git a/datasets/flwr_datasets/partitioner/partitioner_test.py b/datasets/flwr_datasets/partitioner/partitioner_test.py new file mode 100644 index 000000000000..be0c988e6a9a --- /dev/null +++ b/datasets/flwr_datasets/partitioner/partitioner_test.py @@ -0,0 +1,59 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Abstract partitioner tests.""" + + +import unittest + +import datasets +from datasets import Dataset +from flwr_datasets.partitioner.partitioner import Partitioner + + +class DummyPartitioner(Partitioner): + """Dummy partitioner for testing.""" + + def load_partition(self, partition_id: int) -> Dataset: + """Return always a dummy dataset.""" + return datasets.Dataset.from_dict({"feature": [0, 1, 2]}) + + @property + def num_partitions(self) -> int: + """Return always 0.""" + return 0 + + +class TestPartitioner(unittest.TestCase): + """Test Partitioner.""" + + def test_dataset_setter_incorrect_type(self) -> None: + """Test if the incorrect type of the dataset to dataset.setter method raises.""" + train_split = datasets.Dataset.from_dict({"feature": [0, 1, 2]}) + test_split = datasets.Dataset.from_dict({"feature": [0, 1, 2]}) + dataset = datasets.DatasetDict({"train": train_split, "test": test_split}) + partitioner = DummyPartitioner() + + with self.assertRaises(Exception) as context: + partitioner.dataset = dataset + self.assertIn( + "The dataset object you want to assign to the partitioner should be of " + "type `datasets.Dataset` but given " + ".", + str(context.exception), + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/datasets/flwr_datasets/partitioner/pathological_partitioner.py b/datasets/flwr_datasets/partitioner/pathological_partitioner.py new file mode 100644 index 000000000000..d114ccbda02f --- /dev/null +++ b/datasets/flwr_datasets/partitioner/pathological_partitioner.py @@ -0,0 +1,305 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Pathological partitioner class that works with Hugging Face Datasets.""" + + +import warnings +from typing import Any, Literal, Optional + +import numpy as np + +import datasets +from flwr_datasets.common.typing import NDArray +from flwr_datasets.partitioner.partitioner import Partitioner + + +# pylint: disable=too-many-arguments, too-many-instance-attributes +class PathologicalPartitioner(Partitioner): + """Partition dataset such that each partition has a chosen number of classes. + + Implementation based on Federated Learning on Non-IID Data Silos: An Experimental + Study https://arxiv.org/pdf/2102.02079. + + The algorithm firstly determines which classe will be assigned to which partitions. + For each partition `num_classes_per_partition` are sampled in a way chosen in + `class_assignment_mode`. Given the information about the required classes for each + partition, it is determined into how many parts the samples corresponding to this + label should be divided. Such division is performed for each class. + + Parameters + ---------- + num_partitions : int + The total number of partitions that the data will be divided into. + partition_by : str + Column name of the labels (targets) based on which partitioning works. + num_classes_per_partition: int + The (exact) number of unique classes that each partition will have. + class_assignment_mode: Literal["random", "deterministic", "first-deterministic"] + The way how the classes are assigned to the partitions. The default is "random". + The possible values are: + + - "random": Randomly assign classes to the partitions. For each partition choose + the `num_classes_per_partition` classes without replacement. + - "first-deterministic": Assign the first class for each partition in a + deterministic way (class id is the partition_id % num_unique_classes). + The rest of the classes are assigned randomly. In case the number of + partitions is smaller than the number of unique classes, not all classes will + be used in the first iteration, otherwise all the classes will be used (such + it will be present in at least one partition). + - "deterministic": Assign all the classes to the partitions in a deterministic + way. Classes are assigned based on the formula: partion_id has classes + identified by the index: (partition_id + i) % num_unique_classes + where i in {0, ..., num_classes_per_partition}. So, partition 0 will have + classes 0, 1, 2, ..., `num_classes_per_partition`-1, partition 1 will have + classes 1, 2, 3, ...,`num_classes_per_partition`, .... + + The list representing the unique lables is sorted in ascending order. In case + of numbers starting from zero the class id corresponds to the number itself. + `class_assignment_mode="first-deterministic"` was used in the orginal paper, + here we provide the option to use the other modes as well. + shuffle: bool + Whether to randomize the order of samples. Shuffling applied after the + samples assignment to partitions. + seed: int + Seed used for dataset shuffling. It has no effect if `shuffle` is False. + + Examples + -------- + In order to mimic the original behavior of the paper follow the setup below + (the `class_assignment_mode="first-deterministic"`): + + >>> from flwr_datasets.partitioner import PathologicalPartitioner + >>> from flwr_datasets import FederatedDataset + >>> + >>> partitioner = PathologicalPartitioner( + >>> num_partitions=10, + >>> partition_by="label", + >>> num_classes_per_partition=2, + >>> class_assignment_mode="first-deterministic" + >>> ) + >>> fds = FederatedDataset(dataset="mnist", partitioners={"train": partitioner}) + >>> partition = fds.load_partition(0) + """ + + def __init__( + self, + num_partitions: int, + partition_by: str, + num_classes_per_partition: int, + class_assignment_mode: Literal[ + "random", "deterministic", "first-deterministic" + ] = "random", + shuffle: bool = True, + seed: Optional[int] = 42, + ) -> None: + super().__init__() + self._num_partitions = num_partitions + self._partition_by = partition_by + self._num_classes_per_partition = num_classes_per_partition + self._class_assignment_mode = class_assignment_mode + self._shuffle = shuffle + self._seed = seed + self._rng = np.random.default_rng(seed=self._seed) + + # Utility attributes + self._partition_id_to_indices: dict[int, list[int]] = {} + self._partition_id_to_unique_labels: dict[int, list[Any]] = { + pid: [] for pid in range(self._num_partitions) + } + self._unique_labels: list[Any] = [] + # Count in how many partitions the label is used + self._unique_label_to_times_used_counter: dict[Any, int] = {} + self._partition_id_to_indices_determined = False + + def load_partition(self, partition_id: int) -> datasets.Dataset: + """Load a partition based on the partition index. + + Parameters + ---------- + partition_id : int + The index that corresponds to the requested partition. + + Returns + ------- + dataset_partition : Dataset + Single partition of a dataset. + """ + # The partitioning is done lazily - only when the first partition is + # requested. Only the first call creates the indices assignments for all the + # partition indices. + self._check_num_partitions_correctness_if_needed() + self._determine_partition_id_to_indices_if_needed() + return self.dataset.select(self._partition_id_to_indices[partition_id]) + + @property + def num_partitions(self) -> int: + """Total number of partitions.""" + self._check_num_partitions_correctness_if_needed() + self._determine_partition_id_to_indices_if_needed() + return self._num_partitions + + def _determine_partition_id_to_indices_if_needed(self) -> None: + """Create an assignment of indices to the partition indices.""" + if self._partition_id_to_indices_determined: + return + self._determine_partition_id_to_unique_labels() + assert self._unique_labels is not None + self._count_partitions_having_each_unique_label() + + labels = np.asarray(self.dataset[self._partition_by]) + self._check_correctness_of_unique_label_to_times_used_counter(labels) + for partition_id in range(self._num_partitions): + self._partition_id_to_indices[partition_id] = [] + + unused_labels = [] + for unique_label in self._unique_labels: + if self._unique_label_to_times_used_counter[unique_label] == 0: + unused_labels.append(unique_label) + continue + # Get the indices in the original dataset where the y == unique_label + unique_label_to_indices = np.where(labels == unique_label)[0] + + split_unique_labels_to_indices = np.array_split( + unique_label_to_indices, + self._unique_label_to_times_used_counter[unique_label], + ) + + split_index = 0 + for partition_id in range(self._num_partitions): + if unique_label in self._partition_id_to_unique_labels[partition_id]: + self._partition_id_to_indices[partition_id].extend( + split_unique_labels_to_indices[split_index] + ) + split_index += 1 + + if len(unused_labels) >= 1: + warnings.warn( + f"Classes: {unused_labels} will NOT be used due to the chosen " + f"configuration. If it is undesired behavior consider setting" + f" 'first_class_deterministic_assignment=True' which in case when" + f" the number of classes is smaller than the number of partitions will " + f"utilize all the classes for the created partitions.", + stacklevel=1, + ) + if self._shuffle: + for indices in self._partition_id_to_indices.values(): + # In place shuffling + self._rng.shuffle(indices) + + self._partition_id_to_indices_determined = True + + def _check_num_partitions_correctness_if_needed(self) -> None: + """Test num_partitions when the dataset is given (in load_partition).""" + if not self._partition_id_to_indices_determined: + if self._num_partitions > self.dataset.num_rows: + raise ValueError( + "The number of partitions needs to be smaller than the number of " + "samples in the dataset." + ) + + def _determine_partition_id_to_unique_labels(self) -> None: + """Determine the assignment of unique labels to the partitions.""" + self._unique_labels = sorted(self.dataset.unique(self._partition_by)) + num_unique_classes = len(self._unique_labels) + + if self._num_classes_per_partition > num_unique_classes: + raise ValueError( + f"The specified `num_classes_per_partition`" + f"={self._num_classes_per_partition} is greater than the number " + f"of unique classes in the given dataset={num_unique_classes}. " + f"Reduce the `num_classes_per_partition` or make use different dataset " + f"to apply this partitioning." + ) + if self._class_assignment_mode == "first-deterministic": + # if self._first_class_deterministic_assignment: + for partition_id in range(self._num_partitions): + label = self._unique_labels[partition_id % num_unique_classes] + self._partition_id_to_unique_labels[partition_id].append(label) + + while ( + len(self._partition_id_to_unique_labels[partition_id]) + < self._num_classes_per_partition + ): + label = self._rng.choice(self._unique_labels, size=1)[0] + if label not in self._partition_id_to_unique_labels[partition_id]: + self._partition_id_to_unique_labels[partition_id].append(label) + elif self._class_assignment_mode == "deterministic": + for partition_id in range(self._num_partitions): + labels = [] + for i in range(self._num_classes_per_partition): + label = self._unique_labels[ + (partition_id + i) % len(self._unique_labels) + ] + labels.append(label) + self._partition_id_to_unique_labels[partition_id] = labels + elif self._class_assignment_mode == "random": + for partition_id in range(self._num_partitions): + labels = self._rng.choice( + self._unique_labels, + size=self._num_classes_per_partition, + replace=False, + ).tolist() + self._partition_id_to_unique_labels[partition_id] = labels + else: + raise ValueError( + f"The supported class_assignment_mode are: 'random', 'deterministic', " + f"'first-deterministic'. You provided: {self._class_assignment_mode}." + ) + + def _count_partitions_having_each_unique_label(self) -> None: + """Count the number of partitions that have each unique label. + + This computation is based on the assigment of the label to the partition_id in + the `_determine_partition_id_to_unique_labels` method. + Given: + * partition 0 has only labels: 0,1 (not necessarily just two samples it can have + many samples but either from 0 or 1) + * partition 1 has only labels: 1, 2 (same count note as above) + * and there are only two partitions then the following will be computed: + { + 0: 1, + 1: 2, + 2: 1 + } + """ + for unique_label in self._unique_labels: + self._unique_label_to_times_used_counter[unique_label] = 0 + for unique_labels in self._partition_id_to_unique_labels.values(): + for unique_label in unique_labels: + self._unique_label_to_times_used_counter[unique_label] += 1 + + def _check_correctness_of_unique_label_to_times_used_counter( + self, labels: NDArray + ) -> None: + """Check if partitioning is possible given the presence requirements. + + The number of times the label can be used must be smaller or equal to the number + of times that the label is present in the dataset. + """ + for unique_label in self._unique_labels: + num_unique = np.sum(labels == unique_label) + if self._unique_label_to_times_used_counter[unique_label] > num_unique: + raise ValueError( + f"Label: {unique_label} is needed to be assigned to more " + f"partitions " + f"({self._unique_label_to_times_used_counter[unique_label]})" + f" than there are samples (corresponding to this label) in the " + f"dataset ({num_unique}). Please decrease the `num_partitions`, " + f"`num_classes_per_partition` to avoid this situation, " + f"or try `class_assigment_mode='deterministic'` to create a more " + f"even distribution of classes along the partitions. " + f"Alternatively use a different dataset if you can not adjust" + f" the any of these parameters." + ) diff --git a/datasets/flwr_datasets/partitioner/pathological_partitioner_test.py b/datasets/flwr_datasets/partitioner/pathological_partitioner_test.py new file mode 100644 index 000000000000..5a3b13bb1436 --- /dev/null +++ b/datasets/flwr_datasets/partitioner/pathological_partitioner_test.py @@ -0,0 +1,279 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Test cases for PathologicalPartitioner.""" + + +import unittest + +import numpy as np +from parameterized import parameterized, parameterized_class + +import datasets +from datasets import Dataset +from flwr_datasets.partitioner.pathological_partitioner import PathologicalPartitioner + + +def _dummy_dataset_setup( + num_samples: int, + partition_by: str, + num_unique_classes: int, + string_partition_by: bool = False, +) -> Dataset: + """Create a dummy dataset for testing.""" + data = { + partition_by: np.tile( + np.arange(num_unique_classes), num_samples // num_unique_classes + 1 + )[:num_samples], + "features": np.random.randn(num_samples), + } + if string_partition_by: + data[partition_by] = data[partition_by].astype(str) + return Dataset.from_dict(data) + + +def _dummy_heterogeneous_dataset_setup( + num_samples: int, partition_by: str, num_unique_classes: int +) -> Dataset: + """Create a dummy dataset for testing.""" + data = { + partition_by: np.tile( + np.arange(num_unique_classes), num_samples // num_unique_classes + 1 + )[:num_samples], + "features": np.random.randn(num_samples), + } + return Dataset.from_dict(data) + + +@parameterized_class(("string_partition_by",), [(False,), (True,)]) +class TestClassConstrainedPartitioner(unittest.TestCase): + """Unit tests for PathologicalPartitioner.""" + + @parameterized.expand( # type: ignore + [ + # num_partition, num_classes_per_partition, num_samples, total_classes + (3, 1, 60, 3), # Single class per partition scenario + (5, 2, 100, 5), + (5, 2, 100, 10), + (4, 3, 120, 6), + ] + ) + def test_correct_num_classes_when_partitioned( + self, + num_partitions: int, + num_classes_per_partition: int, + num_samples: int, + num_unique_classes: int, + ) -> None: + """Test correct number of unique classes.""" + dataset = _dummy_dataset_setup(num_samples, "labels", num_unique_classes) + partitioner = PathologicalPartitioner( + num_partitions=num_partitions, + partition_by="labels", + num_classes_per_partition=num_classes_per_partition, + ) + partitioner.dataset = dataset + partitions: dict[int, Dataset] = { + pid: partitioner.load_partition(pid) for pid in range(num_partitions) + } + unique_classes_per_partition = { + pid: np.unique(partition["labels"]) for pid, partition in partitions.items() + } + + for unique_classes in unique_classes_per_partition.values(): + self.assertEqual(num_classes_per_partition, len(unique_classes)) + + def test_first_class_deterministic_assignment(self) -> None: + """Test deterministic assignment of first classes to partitions. + + Test if all the classes are used (which has to be the case, given num_partitions + >= than the number of unique classes). + """ + partition_by = "labels" + dataset = _dummy_dataset_setup(100, partition_by, 10) + partitioner = PathologicalPartitioner( + num_partitions=10, + partition_by="labels", + num_classes_per_partition=2, + class_assignment_mode="first-deterministic", + ) + partitioner.dataset = dataset + partitioner.load_partition(0) + expected_classes = set( + range(10) + # pylint: disable=unsubscriptable-object + if isinstance(dataset[partition_by][0], int) + else [str(i) for i in range(10)] + ) + actual_classes = set() + for pid in range(10): + partition = partitioner.load_partition(pid) + actual_classes.update(np.unique(partition["labels"])) + self.assertEqual(expected_classes, actual_classes) + + @parameterized.expand( + [ # type: ignore + # num_partitions, num_classes_per_partition, num_samples, num_unique_classes + (4, 2, 80, 8), + (10, 2, 100, 10), + ] + ) + def test_deterministic_class_assignment( + self, num_partitions, num_classes_per_partition, num_samples, num_unique_classes + ): + """Test deterministic assignment of classes to partitions.""" + dataset = _dummy_dataset_setup(num_samples, "labels", num_unique_classes) + partitioner = PathologicalPartitioner( + num_partitions=num_partitions, + partition_by="labels", + num_classes_per_partition=num_classes_per_partition, + class_assignment_mode="deterministic", + ) + partitioner.dataset = dataset + partitions = { + pid: partitioner.load_partition(pid) for pid in range(num_partitions) + } + + # Verify each partition has the expected classes, order does not matter + for pid, partition in partitions.items(): + expected_labels = sorted( + [ + (pid + i) % num_unique_classes + for i in range(num_classes_per_partition) + ] + ) + # pylint: disable=unsubscriptable-object + if isinstance(dataset["labels"][0], str): + expected_labels = [str(label) for label in expected_labels] + actual_labels = sorted(np.unique(partition["labels"])) + self.assertTrue( + np.array_equal(expected_labels, actual_labels), + f"Partition {pid} does not have the expected labels: " + f"{expected_labels} but instead {actual_labels}.", + ) + + @parameterized.expand( + [ # type: ignore + # num_partitions, num_classes_per_partition, num_samples, num_unique_classes + (10, 3, 20, 3), + ] + ) + def test_too_many_partitions_for_a_class( + self, num_partitions, num_classes_per_partition, num_samples, num_unique_classes + ) -> None: + """Test too many partitions for the number of samples in a class.""" + dataset_1 = _dummy_dataset_setup( + num_samples // 2, "labels", num_unique_classes - 1 + ) + # Create a skewed part of the dataset for the last label + data = { + "labels": np.array([num_unique_classes - 1] * (num_samples // 2)), + "features": np.random.randn(num_samples // 2), + } + # pylint: disable=unsubscriptable-object + if isinstance(dataset_1["labels"][0], str): + data["labels"] = data["labels"].astype(str) + dataset_2 = Dataset.from_dict(data) + dataset = datasets.concatenate_datasets([dataset_1, dataset_2]) + + partitioner = PathologicalPartitioner( + num_partitions=num_partitions, + partition_by="labels", + num_classes_per_partition=num_classes_per_partition, + class_assignment_mode="random", + ) + partitioner.dataset = dataset + + with self.assertRaises(ValueError) as context: + _ = partitioner.load_partition(0) + self.assertEqual( + str(context.exception), + "Label: 0 is needed to be assigned to more partitions (10) than there are " + "samples (corresponding to this label) in the dataset (5). " + "Please decrease the `num_partitions`, `num_classes_per_partition` to " + "avoid this situation, or try `class_assigment_mode='deterministic'` to " + "create a more even distribution of classes along the partitions. " + "Alternatively use a different dataset if you can not adjust the any of " + "these parameters.", + ) + + @parameterized.expand( # type: ignore + [ + # num_partitions, num_classes_per_partition, num_samples, num_unique_classes + (10, 11, 100, 10), # 11 > 10 + (5, 11, 100, 10), # 11 > 10 + (10, 20, 100, 5), # 20 > 5 + ] + ) + def test_more_classes_per_partition_than_num_unique_classes_in_dataset_raises( + self, + num_partitions: int, + num_classes_per_partition: int, + num_samples: int, + num_unique_classes: int, + ) -> None: + """Test more num_classes_per_partition > num_unique_classes in the dataset.""" + dataset = _dummy_dataset_setup(num_samples, "labels", num_unique_classes) + with self.assertRaises(ValueError) as context: + partitioner = PathologicalPartitioner( + num_partitions=num_partitions, + partition_by="labels", + num_classes_per_partition=num_classes_per_partition, + ) + partitioner.dataset = dataset + partitioner.load_partition(0) + self.assertEqual( + str(context.exception), + "The specified " + f"`num_classes_per_partition`={num_classes_per_partition} is " + f"greater than the number of unique classes in the given " + f"dataset={len(dataset.unique('labels'))}. Reduce the " + f"`num_classes_per_partition` or make use different dataset " + f"to apply this partitioning.", + ) + + @parameterized.expand( # type: ignore + [ + # num_classes_per_partition should be irrelevant since the exception should + # be raised at the very beginning + # num_partitions, num_classes_per_partition, num_samples + (10, 2, 5), + (10, 10, 5), + (100, 10, 99), + ] + ) + def test_more_partitions_than_samples_raises( + self, num_partitions: int, num_classes_per_partition: int, num_samples: int + ) -> None: + """Test if generation of more partitions that there are samples raises.""" + # The number of unique classes in the dataset should be irrelevant since the + # exception should be raised at the very beginning + dataset = _dummy_dataset_setup(num_samples, "labels", num_unique_classes=5) + with self.assertRaises(ValueError) as context: + partitioner = PathologicalPartitioner( + num_partitions=num_partitions, + partition_by="labels", + num_classes_per_partition=num_classes_per_partition, + ) + partitioner.dataset = dataset + partitioner.load_partition(0) + self.assertEqual( + str(context.exception), + "The number of partitions needs to be smaller than the number of " + "samples in the dataset.", + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/datasets/flwr_datasets/partitioner/shard_partitioner.py b/datasets/flwr_datasets/partitioner/shard_partitioner.py index 31eac2309fff..3001df6dcb69 100644 --- a/datasets/flwr_datasets/partitioner/shard_partitioner.py +++ b/datasets/flwr_datasets/partitioner/shard_partitioner.py @@ -17,7 +17,7 @@ # pylint: disable=R0912, R0914 import math -from typing import Dict, List, Optional +from typing import Optional import numpy as np @@ -149,15 +149,15 @@ def __init__( # pylint: disable=R0913 ) -> None: super().__init__() # Attributes based on the constructor - _check_if_natual_number(num_partitions, "num_partitions") + _check_if_natural_number(num_partitions, "num_partitions") self._num_partitions = num_partitions self._partition_by = partition_by - _check_if_natual_number( + _check_if_natural_number( num_shards_per_partition, "num_shards_per_partition", True ) self._num_shards_per_partition = num_shards_per_partition self._num_shards_used: Optional[int] = None - _check_if_natual_number(shard_size, "shard_size", True) + _check_if_natural_number(shard_size, "shard_size", True) self._shard_size = shard_size self._keep_incomplete_shard = keep_incomplete_shard self._shuffle = shuffle @@ -165,7 +165,7 @@ def __init__( # pylint: disable=R0913 # Utility attributes self._rng = np.random.default_rng(seed=self._seed) # NumPy random generator - self._partition_id_to_indices: Dict[int, List[int]] = {} + self._partition_id_to_indices: dict[int, list[int]] = {} self._partition_id_to_indices_determined = False def load_partition(self, partition_id: int) -> datasets.Dataset: @@ -299,7 +299,7 @@ def _determine_partition_id_to_indices_if_needed( nid_to_shard_indices = np.split( shard_indices_array, indices_on_which_to_split_shards )[:-1] - partition_id_to_indices: Dict[int, List[int]] = { + partition_id_to_indices: dict[int, list[int]] = { cid: [] for cid in range(self._num_partitions) } # Compute partition_id to sample indices based on the shard indices @@ -360,7 +360,7 @@ def _check_possibility_of_partitions_creation(self) -> None: ) -def _check_if_natual_number( +def _check_if_natural_number( number: Optional[int], parameter_name: str, none_acceptable: bool = False ) -> None: if none_acceptable and number is None: diff --git a/datasets/flwr_datasets/partitioner/shard_partitioner_test.py b/datasets/flwr_datasets/partitioner/shard_partitioner_test.py index d6fa8b529595..be8edf9d2764 100644 --- a/datasets/flwr_datasets/partitioner/shard_partitioner_test.py +++ b/datasets/flwr_datasets/partitioner/shard_partitioner_test.py @@ -17,7 +17,7 @@ # pylint: disable=W0212, R0913 import unittest -from typing import Optional, Tuple +from typing import Optional from datasets import Dataset from flwr_datasets.partitioner.shard_partitioner import ShardPartitioner @@ -30,7 +30,7 @@ def _dummy_setup( num_shards_per_partition: Optional[int], shard_size: Optional[int], keep_incomplete_shard: bool = False, -) -> Tuple[Dataset, ShardPartitioner]: +) -> tuple[Dataset, ShardPartitioner]: """Create a dummy dataset for testing.""" data = { partition_by: [i % 3 for i in range(num_rows)], diff --git a/datasets/flwr_datasets/partitioner/size_partitioner.py b/datasets/flwr_datasets/partitioner/size_partitioner.py index 35937d8b9cc7..a79b6b7249f2 100644 --- a/datasets/flwr_datasets/partitioner/size_partitioner.py +++ b/datasets/flwr_datasets/partitioner/size_partitioner.py @@ -1,4 +1,4 @@ -# Copyright 2023 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,72 +15,56 @@ """SizePartitioner class.""" -from typing import Callable, Dict, List, Union - -import numpy as np +import warnings +from collections.abc import Sequence import datasets from flwr_datasets.partitioner.partitioner import Partitioner class SizePartitioner(Partitioner): - """Base class for the deterministic size partitioning based on the `partition_id`. - - The client with `partition_id` has the following relationship regarding the number - of samples. - - `partition_id_to_size_fn(partition_id)` ~ number of samples for `partition_id` - - If the function doesn't transform the `partition_id` it's a linear correlation - between the number of sample for the partition and the value of `partition_id`. For - instance, if the partition ids range from 1 to M, partition with id 1 gets 1 unit of - data, client 2 gets 2 units, and so on, up to partition M which gets M units. - - Note that size corresponding to the `partition_id` is deterministic, yet in case of - different dataset shuffling the assignment of samples to `partition_id` will vary. + """Partitioner that creates each partition with the size specified by a user. Parameters ---------- - num_partitions : int - The total number of partitions that the data will be divided into. - partition_id_to_size_fn : Callable - Function that defines the relationship between partition id and the number of - samples. + partition_sizes : Sequence[int] + The size of each partition. partition_id 0 will have partition_sizes[0] + samples, partition_id 1 will have partition_sizes[1] samples, etc. + + Examples + -------- + >>> from flwr_datasets import FederatedDataset + >>> from flwr_datasets.partitioner import SizePartitioner + >>> + >>> partition_sizes = [15_000, 5_000, 30_000] + >>> partitioner = SizePartitioner(partition_sizes) + >>> fds = FederatedDataset(dataset="cifar10", partitioners={"train": partitioner}) """ - def __init__( - self, - num_partitions: int, - partition_id_to_size_fn: Callable, # type: ignore[type-arg] - ) -> None: + def __init__(self, partition_sizes: Sequence[int]) -> None: super().__init__() - if num_partitions <= 0: - raise ValueError("The number of partitions must be greater than zero.") - self._num_partitions = num_partitions - self._partition_id_to_size_fn = partition_id_to_size_fn - - self._partition_id_to_size: Dict[int, int] = {} - self._partition_id_to_indices: Dict[int, List[int]] = {} - # A flag to perform only a single compute to determine the indices + self._pre_ds_validate_partition_sizes(partition_sizes) + self._partition_sizes = partition_sizes + self._partition_id_to_indices: dict[int, list[int]] = {} self._partition_id_to_indices_determined = False def load_partition(self, partition_id: int) -> datasets.Dataset: - """Load a single partition based on the partition index. + """Load a single partition of the size of partition_sizes[partition_id]. - The number of samples is dependent on the partition partition_id. + For example if given partition_sizes=[20_000, 10_000, 30_000], + then partition_id=0 will return a partition of size 20_000, + partition_id=1 will return a partition of size 10_000, etc. Parameters ---------- partition_id : int - the index that corresponds to the requested partition + The index that corresponds to the requested partition. Returns ------- - dataset_partition: Dataset - single dataset partition + dataset_partition : Dataset + Single dataset partition. """ - # The partitioning is done lazily - only when the first partition is requested. - # A single run creates the indices assignments for all the partition indices. self._determine_partition_id_to_indices_if_needed() return self.dataset.select(self._partition_id_to_indices[partition_id]) @@ -88,58 +72,57 @@ def load_partition(self, partition_id: int) -> datasets.Dataset: def num_partitions(self) -> int: """Total number of partitions.""" self._determine_partition_id_to_indices_if_needed() - return self._num_partitions + return len(self._partition_sizes) @property - def partition_id_to_size(self) -> Dict[int, int]: - """Node id to the number of samples.""" - return self._partition_id_to_size - - @property - def partition_id_to_indices(self) -> Dict[int, List[int]]: - """Node id to the list of indices.""" + def partition_id_to_indices(self) -> dict[int, list[int]]: + """Partition id to indices (the result of partitioning).""" + self._determine_partition_id_to_indices_if_needed() return self._partition_id_to_indices - def _determine_partition_id_to_size(self) -> None: - """Determine data quantity associated with partition indices.""" - data_division_in_units = self._partition_id_to_size_fn( - np.linspace(start=1, stop=self._num_partitions, num=self._num_partitions) - ) - total_units: Union[int, float] = data_division_in_units.sum() - # Normalize the units to get the fraction total dataset - partition_sizes_as_fraction = data_division_in_units / total_units - # Calculate the number of samples - partition_sizes_as_num_of_samples = np.array( - partition_sizes_as_fraction * len(self.dataset), dtype=np.int64 - ) - # Check if any sample is not allocated because of multiplication with fractions. - assigned_samples = np.sum(partition_sizes_as_num_of_samples) - left_unassigned_samples = len(self.dataset) - assigned_samples - # If there is any sample(s) left unassigned, assign it to the largest partition. - partition_sizes_as_num_of_samples[-1] += left_unassigned_samples - for idx, partition_size in enumerate(partition_sizes_as_num_of_samples): - self._partition_id_to_size[idx] = partition_size - - self._check_if_partition_id_to_size_possible() - - def _determine_partition_id_to_indices_if_needed(self) -> None: - """Create an assignment of indices to the partition indices..""" - if self._partition_id_to_indices_determined is True: + def _determine_partition_id_to_indices_if_needed( + self, + ) -> None: + """Create an assignment of indices to the partition indices.""" + if self._partition_id_to_indices_determined: return - self._determine_partition_id_to_size() - total_samples_assigned = 0 - for idx, quantity in self._partition_id_to_size.items(): - self._partition_id_to_indices[idx] = list( - range(total_samples_assigned, total_samples_assigned + quantity) - ) - total_samples_assigned += quantity + self._post_ds_validate_partition_sizes() + start = 0 + end = 0 + for partition_id, partition_size in enumerate(self._partition_sizes): + end += partition_size + indices = list(range(start, end)) + self._partition_id_to_indices[partition_id] = indices + start = end self._partition_id_to_indices_determined = True - def _check_if_partition_id_to_size_possible(self) -> None: - all_positive = all(value >= 1 for value in self.partition_id_to_size.values()) - if not all_positive: + def _pre_ds_validate_partition_sizes(self, partition_sizes: Sequence[int]) -> None: + """Check if the partition sizes are valid (no information about the dataset).""" + if not isinstance(partition_sizes, Sequence): + raise ValueError("Partition sizes must be a sequence.") + if len(partition_sizes) == 0: + raise ValueError("Partition sizes must not be empty.") + if not all( + isinstance(partition_size, int) for partition_size in partition_sizes + ): + raise ValueError("All partition sizes must be integers.") + if not all(partition_size > 0 for partition_size in partition_sizes): + raise ValueError("All partition sizes must be greater than zero.") + + def _post_ds_validate_partition_sizes(self) -> None: + """Validate the partition sizes against the dataset size.""" + desired_partition_sizes = sum(self._partition_sizes) + dataset_size = len(self.dataset) + if desired_partition_sizes > dataset_size: raise ValueError( - f"The given specification of the parameter num_partitions" - f"={self._num_partitions} for the given dataset results " - f"in the partitions sizes that are not greater than 0." + f"The sum of partition sizes sum({self._partition_sizes})" + f"= {desired_partition_sizes} is greater than the size of" + f" the dataset {dataset_size}." + ) + if desired_partition_sizes < dataset_size: + warnings.warn( + f"The sum of partition sizes is {desired_partition_sizes}, which is" + f"smaller than the size of the dataset: {dataset_size}. " + f"Ignore this warning if it is the desired behavior.", + stacklevel=1, ) diff --git a/datasets/flwr_datasets/partitioner/size_partitioner_test.py b/datasets/flwr_datasets/partitioner/size_partitioner_test.py index 086ca3731e58..be8edf9d2764 100644 --- a/datasets/flwr_datasets/partitioner/size_partitioner_test.py +++ b/datasets/flwr_datasets/partitioner/size_partitioner_test.py @@ -12,92 +12,380 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""SizePartitioner tests.""" +"""Test ShardPartitioner.""" +# pylint: disable=W0212, R0913 import unittest - -from parameterized import parameterized +from typing import Optional from datasets import Dataset -from flwr_datasets.partitioner.linear_partitioner import LinearPartitioner +from flwr_datasets.partitioner.shard_partitioner import ShardPartitioner -def _dummy_dataset(num_rows: int) -> Dataset: +def _dummy_setup( + num_rows: int, + partition_by: str, + num_partitions: int, + num_shards_per_partition: Optional[int], + shard_size: Optional[int], + keep_incomplete_shard: bool = False, +) -> tuple[Dataset, ShardPartitioner]: + """Create a dummy dataset for testing.""" data = { + partition_by: [i % 3 for i in range(num_rows)], "features": list(range(num_rows)), - "labels": [i % 2 for i in range(num_rows)], } dataset = Dataset.from_dict(data) - return dataset + partitioner = ShardPartitioner( + num_partitions=num_partitions, + num_shards_per_partition=num_shards_per_partition, + partition_by=partition_by, + shard_size=shard_size, + keep_incomplete_shard=keep_incomplete_shard, + ) + partitioner.dataset = dataset + return dataset, partitioner + + +class TestShardPartitionerSpec1(unittest.TestCase): + """Test first possible initialization of ShardPartitioner. + + Specify num_shards_per_partition and shard_size arguments. + """ + + def test_correct_num_partitions(self) -> None: + """Test the correct number of partitions is created.""" + partition_by = "label" + num_rows = 113 + num_partitions = 3 + num_shards_per_partition = 3 + shard_size = 10 + keep_incomplete_shard = False + _, partitioner = _dummy_setup( + num_rows, + partition_by, + num_partitions, + num_shards_per_partition, + shard_size, + keep_incomplete_shard, + ) + _ = partitioner.load_partition(0) + num_partitions_created = len(partitioner._partition_id_to_indices.keys()) + self.assertEqual(num_partitions_created, num_partitions) + def test_correct_partition_sizes(self) -> None: + """Test if the partitions sizes are as theoretically calculated.""" + partition_by = "label" + num_rows = 113 + num_partitions = 3 + num_shards_per_partition = 3 + shard_size = 10 + keep_incomplete_shard = False + _, partitioner = _dummy_setup( + num_rows, + partition_by, + num_partitions, + num_shards_per_partition, + shard_size, + keep_incomplete_shard, + ) + sizes = [len(partitioner.load_partition(i)) for i in range(num_partitions)] + sizes = sorted(sizes) + self.assertEqual(sizes, [30, 30, 30]) -class TestLinearPartitioner(unittest.TestCase): - """Test LinearPartitioner.""" + def test_unique_samples(self) -> None: + """Test if each partition has unique samples. - @parameterized.expand( # type: ignore - [ - (1, 100), - (10, 100), - (5, 55), # This will leave some undivided samples + (No duplicates along partitions). + """ + partition_by = "label" + num_rows = 113 + num_partitions = 3 + num_shards_per_partition = 3 + shard_size = 10 + keep_incomplete_shard = False + _, partitioner = _dummy_setup( + num_rows, + partition_by, + num_partitions, + num_shards_per_partition, + shard_size, + keep_incomplete_shard, + ) + partitions = [ + partitioner.load_partition(i)["features"] for i in range(num_partitions) ] - ) - def test_linear_distribution(self, num_partitions: int, num_rows: int) -> None: - """Test the linear distribution of samples.""" - dataset = _dummy_dataset(num_rows) - partitioner = LinearPartitioner(num_partitions=num_partitions) - partitioner.dataset = dataset - # Run a single partition loading to trigger the division + combined_list = [item for sublist in partitions for item in sublist] + combined_set = set(combined_list) + self.assertEqual(len(combined_list), len(combined_set)) + + +class TestShardPartitionerSpec2(unittest.TestCase): + """Test second possible initialization of ShardPartitioner. + + Specify shard_size and keep_incomplete_shard=False. This setting creates partitions + that might have various sizes (each shard is same size). + """ + + def test_correct_num_partitions(self) -> None: + """Test the correct number of partitions is created.""" + partition_by = "label" + num_rows = 113 + num_partitions = 3 + num_shards_per_partition = None + shard_size = 10 + keep_incomplete_shard = False + _, partitioner = _dummy_setup( + num_rows, + partition_by, + num_partitions, + num_shards_per_partition, + shard_size, + keep_incomplete_shard, + ) _ = partitioner.load_partition(0) - total_samples = sum(partitioner.partition_id_to_size.values()) - self.assertEqual(total_samples, num_rows) - - # Testing if each partition is getting more than the previous one - last_count = 0 - for i in range(num_partitions): - current_count = partitioner.partition_id_to_size[i] - self.assertGreaterEqual(current_count, last_count) - last_count = current_count - - @parameterized.expand( # type: ignore - [ - (10, 100), - (5, 55), # This will leave some undivided samples - (7, 77), # This will leave some undivided samples + num_partitions_created = len(partitioner._partition_id_to_indices.keys()) + self.assertEqual(num_partitions_created, num_partitions) + + def test_correct_partition_sizes(self) -> None: + """Test if the partitions sizes are as theoretically calculated.""" + partition_by = "label" + num_rows = 113 + num_partitions = 3 + num_shards_per_partition = None + shard_size = 10 + keep_incomplete_shard = False + _, partitioner = _dummy_setup( + num_rows, + partition_by, + num_partitions, + num_shards_per_partition, + shard_size, + keep_incomplete_shard, + ) + sizes = [len(partitioner.load_partition(i)) for i in range(num_partitions)] + sizes = sorted(sizes) + self.assertEqual(sizes, [30, 40, 40]) + + def test_unique_samples(self) -> None: + """Test if each partition has unique samples. + + (No duplicates along partitions). + """ + partition_by = "label" + num_rows = 113 + num_partitions = 3 + num_shards_per_partition = None + shard_size = 10 + keep_incomplete_shard = False + _, partitioner = _dummy_setup( + num_rows, + partition_by, + num_partitions, + num_shards_per_partition, + shard_size, + keep_incomplete_shard, + ) + partitions = [ + partitioner.load_partition(i)["features"] for i in range(num_partitions) ] - ) - def test_undivided_samples(self, num_partitions: int, num_rows: int) -> None: - """Test the logic for distributing undivided samples.""" - dataset = _dummy_dataset(num_rows) - partitioner = LinearPartitioner(num_partitions=num_partitions) - partitioner.dataset = dataset - # If there are any undivided samples, they should be added to the largest - # partition - last_partition_id = num_partitions - 1 - actual_samples_in_last_partition = len( - partitioner.load_partition(last_partition_id) + combined_list = [item for sublist in partitions for item in sublist] + combined_set = set(combined_list) + self.assertEqual(len(combined_list), len(combined_set)) + + +class TestShardPartitionerSpec3(unittest.TestCase): + """Test third possible initialization of ShardPartitioner. + + Specify shard_size and keep_incomplete_shard=True. This setting creates partitions + that might have various sizes (each shard is same size). + """ + + def test_correct_num_partitions(self) -> None: + """Test the correct number of partitions is created.""" + partition_by = "label" + num_rows = 113 + num_partitions = 3 + num_shards_per_partition = None + shard_size = 10 + keep_incomplete_shard = True + _, partitioner = _dummy_setup( + num_rows, + partition_by, + num_partitions, + num_shards_per_partition, + shard_size, + keep_incomplete_shard, + ) + _ = partitioner.load_partition(0) + num_partitions_created = len(partitioner._partition_id_to_indices.keys()) + self.assertEqual(num_partitions_created, num_partitions) + + def test_correct_partition_sizes(self) -> None: + """Test if the partitions sizes are as theoretically calculated.""" + partition_by = "label" + num_rows = 113 + num_partitions = 3 + num_shards_per_partition = None + shard_size = 10 + keep_incomplete_shard = True + _, partitioner = _dummy_setup( + num_rows, + partition_by, + num_partitions, + num_shards_per_partition, + shard_size, + keep_incomplete_shard, + ) + sizes = [len(partitioner.load_partition(i)) for i in range(num_partitions)] + sizes = sorted(sizes) + self.assertEqual(sizes, [33, 40, 40]) + + def test_unique_samples(self) -> None: + """Test if each partition has unique samples. + + (No duplicates along partitions). + """ + partition_by = "label" + num_rows = 113 + num_partitions = 3 + num_shards_per_partition = None + shard_size = 10 + keep_incomplete_shard = True + _, partitioner = _dummy_setup( + num_rows, + partition_by, + num_partitions, + num_shards_per_partition, + shard_size, + keep_incomplete_shard, ) - expected_samples_in_last_partition = partitioner.partition_id_to_size[ - last_partition_id + partitions = [ + partitioner.load_partition(i)["features"] for i in range(num_partitions) ] - self.assertEqual( - expected_samples_in_last_partition, actual_samples_in_last_partition + combined_list = [item for sublist in partitions for item in sublist] + combined_set = set(combined_list) + self.assertEqual(len(combined_list), len(combined_set)) + + +class TestShardPartitionerSpec4(unittest.TestCase): + """Test fourth possible initialization of ShardPartitioner. + + Specify num_shards_per_partition but not shard_size arguments. + """ + + def test_correct_num_partitions(self) -> None: + """Test the correct number of partitions is created.""" + partition_by = "label" + num_rows = 113 + num_partitions = 3 + num_shards_per_partition = 3 + shard_size = None + keep_incomplete_shard = False + _, partitioner = _dummy_setup( + num_rows, + partition_by, + num_partitions, + num_shards_per_partition, + shard_size, + keep_incomplete_shard, ) + _ = partitioner.load_partition(0) + num_partitions_created = len(partitioner._partition_id_to_indices.keys()) + self.assertEqual(num_partitions_created, num_partitions) + + def test_correct_partition_sizes(self) -> None: + """Test if the partitions sizes are as theoretically calculated.""" + partition_by = "label" + num_rows = 113 + num_partitions = 3 + num_shards_per_partition = 3 + shard_size = None + keep_incomplete_shard = False + _, partitioner = _dummy_setup( + num_rows, + partition_by, + num_partitions, + num_shards_per_partition, + shard_size, + keep_incomplete_shard, + ) + sizes = [len(partitioner.load_partition(i)) for i in range(num_partitions)] + sizes = sorted(sizes) + self.assertEqual(sizes, [36, 36, 36]) + + def test_unique_samples(self) -> None: + """Test if each partition has unique samples. + + (No duplicates along partitions). + """ + partition_by = "label" + num_rows = 113 + num_partitions = 3 + num_shards_per_partition = 3 + shard_size = None + keep_incomplete_shard = False + _, partitioner = _dummy_setup( + num_rows, + partition_by, + num_partitions, + num_shards_per_partition, + shard_size, + keep_incomplete_shard, + ) + partitions = [ + partitioner.load_partition(i)["features"] for i in range(num_partitions) + ] + combined_list = [item for sublist in partitions for item in sublist] + combined_set = set(combined_list) + self.assertEqual(len(combined_list), len(combined_set)) + + +class TestShardPartitionerIncorrectSpec(unittest.TestCase): + """Test the incorrect specification cases. - def test_meaningless_params(self) -> None: - """Test if the params leading to partition size not greater than zero raises.""" + The lack of correctness can be caused by the num_partitions, shard_size and + num_shards_per_partition can create. + """ + + def test_incorrect_specification(self) -> None: + """Test if the given specification makes the partitioning possible.""" + partition_by = "label" num_rows = 10 - num_partitions = 100 - dataset = _dummy_dataset(num_rows) - partitioner = LinearPartitioner(num_partitions=num_partitions) - partitioner.dataset = dataset - with self.assertRaises(ValueError) as context: - partitioner.load_partition(1) - self.assertIn( - "The given specification of the parameter num_partitions=100 for the given " - "dataset results in the partitions sizes that are not greater than 0.", - str(context.exception), + num_partitions = 3 + num_shards_per_partition = 2 + shard_size = 10 + keep_incomplete_shard = False + _, partitioner = _dummy_setup( + num_rows, + partition_by, + num_partitions, + num_shards_per_partition, + shard_size, + keep_incomplete_shard, + ) + with self.assertRaises(ValueError): + _ = partitioner.load_partition(0) + + def test_too_big_shard_size(self) -> None: + """Test if it is impossible to create an empty partition.""" + partition_by = "label" + num_rows = 20 + num_partitions = 3 + num_shards_per_partition = None + shard_size = 10 + keep_incomplete_shard = False + _, partitioner = _dummy_setup( + num_rows, + partition_by, + num_partitions, + num_shards_per_partition, + shard_size, + keep_incomplete_shard, ) + with self.assertRaises(ValueError): + _ = partitioner.load_partition(2).num_rows if __name__ == "__main__": diff --git a/datasets/flwr_datasets/partitioner/square_partitioner.py b/datasets/flwr_datasets/partitioner/square_partitioner.py index 4c894e47eedf..d48af247e5cb 100644 --- a/datasets/flwr_datasets/partitioner/square_partitioner.py +++ b/datasets/flwr_datasets/partitioner/square_partitioner.py @@ -17,10 +17,10 @@ import numpy as np -from flwr_datasets.partitioner.size_partitioner import SizePartitioner +from flwr_datasets.partitioner.id_to_size_fnc_partitioner import IdToSizeFncPartitioner -class SquarePartitioner(SizePartitioner): +class SquarePartitioner(IdToSizeFncPartitioner): """Partitioner creates partitions of size that are correlated with squared id. The amount of data each client gets is correlated with the squared partition ID. @@ -31,6 +31,15 @@ class SquarePartitioner(SizePartitioner): ---------- num_partitions : int The total number of partitions that the data will be divided into. + + Examples + -------- + >>> from flwr_datasets import FederatedDataset + >>> from flwr_datasets.partitioner import SquarePartitioner + >>> + >>> partitioner = SquarePartitioner(num_partitions=10) + >>> fds = FederatedDataset(dataset="mnist", partitioners={"train": partitioner}) + >>> partition = fds.load_partition(0) """ def __init__(self, num_partitions: int) -> None: diff --git a/datasets/flwr_datasets/preprocessor/__init__.py b/datasets/flwr_datasets/preprocessor/__init__.py index bab5d82a2035..67b2aaadc3d2 100644 --- a/datasets/flwr_datasets/preprocessor/__init__.py +++ b/datasets/flwr_datasets/preprocessor/__init__.py @@ -20,7 +20,7 @@ from .preprocessor import Preprocessor __all__ = [ + "Divider", "Merger", "Preprocessor", - "Divider", ] diff --git a/datasets/flwr_datasets/preprocessor/divider_test.py b/datasets/flwr_datasets/preprocessor/divider_test.py index ed282fbc18be..bb92d72c1c4a 100644 --- a/datasets/flwr_datasets/preprocessor/divider_test.py +++ b/datasets/flwr_datasets/preprocessor/divider_test.py @@ -15,7 +15,7 @@ """Divider tests.""" import unittest -from typing import Dict, Union +from typing import Union from parameterized import parameterized_class @@ -84,14 +84,14 @@ class TestDivider(unittest.TestCase): """Divider tests.""" divide_config: Union[ - Dict[str, float], - Dict[str, int], - Dict[str, Dict[str, float]], - Dict[str, Dict[str, int]], + dict[str, float], + dict[str, int], + dict[str, dict[str, float]], + dict[str, dict[str, int]], ] divide_split: str drop_remaining_splits: bool - split_name_to_size: Dict[str, int] + split_name_to_size: dict[str, int] def setUp(self) -> None: """Set up the dataset with 3 splits for tests.""" diff --git a/datasets/flwr_datasets/preprocessor/merger.py b/datasets/flwr_datasets/preprocessor/merger.py index 2b76dbbafe4b..e47993dd686e 100644 --- a/datasets/flwr_datasets/preprocessor/merger.py +++ b/datasets/flwr_datasets/preprocessor/merger.py @@ -18,7 +18,6 @@ import collections import warnings from functools import reduce -from typing import Dict, List, Tuple import datasets from datasets import Dataset, DatasetDict @@ -56,9 +55,9 @@ class Merger: def __init__( self, - merge_config: Dict[str, Tuple[str, ...]], + merge_config: dict[str, tuple[str, ...]], ) -> None: - self._merge_config: Dict[str, Tuple[str, ...]] = merge_config + self._merge_config: dict[str, tuple[str, ...]] = merge_config self._check_duplicate_merge_splits() def __call__(self, dataset: DatasetDict) -> DatasetDict: @@ -70,7 +69,7 @@ def resplit(self, dataset: DatasetDict) -> DatasetDict: """Resplit the dataset according to the `merge_config`.""" resplit_dataset = {} for divide_to, divided_from__list in self._merge_config.items(): - datasets_from_list: List[Dataset] = [] + datasets_from_list: list[Dataset] = [] for divide_from in divided_from__list: datasets_from_list.append(dataset[divide_from]) if len(datasets_from_list) > 1: diff --git a/datasets/flwr_datasets/preprocessor/merger_test.py b/datasets/flwr_datasets/preprocessor/merger_test.py index d5c69387e53d..0dd534229eb0 100644 --- a/datasets/flwr_datasets/preprocessor/merger_test.py +++ b/datasets/flwr_datasets/preprocessor/merger_test.py @@ -16,7 +16,6 @@ import unittest -from typing import Dict, Tuple import pytest @@ -39,28 +38,28 @@ def setUp(self) -> None: def test_resplitting_train_size(self) -> None: """Test if resplitting for just renaming keeps the lengths correct.""" - strategy: Dict[str, Tuple[str, ...]] = {"new_train": ("train",)} + strategy: dict[str, tuple[str, ...]] = {"new_train": ("train",)} merger = Merger(strategy) new_dataset = merger(self.dataset_dict) self.assertEqual(len(new_dataset["new_train"]), 3) def test_resplitting_valid_size(self) -> None: """Test if resplitting for just renaming keeps the lengths correct.""" - strategy: Dict[str, Tuple[str, ...]] = {"new_valid": ("valid",)} + strategy: dict[str, tuple[str, ...]] = {"new_valid": ("valid",)} merger = Merger(strategy) new_dataset = merger(self.dataset_dict) self.assertEqual(len(new_dataset["new_valid"]), 2) def test_resplitting_test_size(self) -> None: """Test if resplitting for just renaming keeps the lengths correct.""" - strategy: Dict[str, Tuple[str, ...]] = {"new_test": ("test",)} + strategy: dict[str, tuple[str, ...]] = {"new_test": ("test",)} merger = Merger(strategy) new_dataset = merger(self.dataset_dict) self.assertEqual(len(new_dataset["new_test"]), 1) def test_resplitting_train_the_same(self) -> None: """Test if resplitting for just renaming keeps the dataset the same.""" - strategy: Dict[str, Tuple[str, ...]] = {"new_train": ("train",)} + strategy: dict[str, tuple[str, ...]] = {"new_train": ("train",)} merger = Merger(strategy) new_dataset = merger(self.dataset_dict) self.assertTrue( @@ -69,7 +68,7 @@ def test_resplitting_train_the_same(self) -> None: def test_combined_train_valid_size(self) -> None: """Test if the resplitting that combines the datasets has correct size.""" - strategy: Dict[str, Tuple[str, ...]] = { + strategy: dict[str, tuple[str, ...]] = { "train_valid_combined": ("train", "valid") } merger = Merger(strategy) @@ -78,7 +77,7 @@ def test_combined_train_valid_size(self) -> None: def test_resplitting_test_with_combined_strategy_size(self) -> None: """Test if the resplitting that combines the datasets has correct size.""" - strategy: Dict[str, Tuple[str, ...]] = { + strategy: dict[str, tuple[str, ...]] = { "train_valid_combined": ("train", "valid"), "test": ("test",), } @@ -88,7 +87,7 @@ def test_resplitting_test_with_combined_strategy_size(self) -> None: def test_invalid_resplit_strategy_exception_message(self) -> None: """Test if the resplitting raises error when non-existing split is given.""" - strategy: Dict[str, Tuple[str, ...]] = { + strategy: dict[str, tuple[str, ...]] = { "new_train": ("invalid_split",), "new_test": ("test",), } @@ -100,16 +99,16 @@ def test_invalid_resplit_strategy_exception_message(self) -> None: def test_nonexistent_split_in_strategy(self) -> None: """Test if the exception is raised when the nonexistent split name is given.""" - strategy: Dict[str, Tuple[str, ...]] = {"new_split": ("nonexistent_split",)} + strategy: dict[str, tuple[str, ...]] = {"new_split": ("nonexistent_split",)} merger = Merger(strategy) with self.assertRaisesRegex( ValueError, "The given dataset key 'nonexistent_split' is not present" ): merger(self.dataset_dict) - def test_duplicate_merge_split_name(self) -> None: # pylint: disable=R0201 + def test_duplicate_merge_split_name(self) -> None: """Test that the new split names are not the same.""" - strategy: Dict[str, Tuple[str, ...]] = { + strategy: dict[str, tuple[str, ...]] = { "new_train": ("train", "valid"), "test": ("train",), } @@ -119,7 +118,7 @@ def test_duplicate_merge_split_name(self) -> None: # pylint: disable=R0201 def test_empty_dataset_dict(self) -> None: """Test that the error is raised when the empty DatasetDict is given.""" empty_dataset = DatasetDict({}) - strategy: Dict[str, Tuple[str, ...]] = {"new_train": ("train",)} + strategy: dict[str, tuple[str, ...]] = {"new_train": ("train",)} merger = Merger(strategy) with self.assertRaisesRegex( ValueError, "The given dataset key 'train' is not present" diff --git a/datasets/flwr_datasets/utils.py b/datasets/flwr_datasets/utils.py index 0ecb96ac9456..1657c2a0ebd3 100644 --- a/datasets/flwr_datasets/utils.py +++ b/datasets/flwr_datasets/utils.py @@ -16,7 +16,7 @@ import warnings -from typing import Dict, List, Optional, Tuple, Union, cast +from typing import Optional, Union, cast from datasets import Dataset, DatasetDict, concatenate_datasets from flwr_datasets.partitioner import IidPartitioner, Partitioner @@ -25,21 +25,40 @@ tested_datasets = [ "mnist", + "ylecun/mnist", "cifar10", + "uoft-cs/cifar10", "fashion_mnist", + "zalando-datasets/fashion_mnist", "sasha/dog-food", "zh-plus/tiny-imagenet", "scikit-learn/adult-census-income", "cifar100", + "uoft-cs/cifar100", "svhn", + "ufldl-stanford/svhn", "sentiment140", + "stanfordnlp/sentiment140", "speech_commands", + "LIUM/tedlium", + "flwrlabs/femnist", + "flwrlabs/ucf101", + "flwrlabs/ambient-acoustic-context", + "jlh/uci-mushrooms", + "Mike0307/MNIST-M", + "flwrlabs/usps", + "scikit-learn/iris", + "flwrlabs/pacs", + "flwrlabs/cinic10", + "flwrlabs/caltech101", + "flwrlabs/office-home", + "flwrlabs/fed-isic2019", ] def _instantiate_partitioners( - partitioners: Dict[str, Union[Partitioner, int]] -) -> Dict[str, Partitioner]: + partitioners: dict[str, Union[Partitioner, int]] +) -> dict[str, Partitioner]: """Transform the partitioners from the initial format to instantiated objects. Parameters @@ -52,8 +71,8 @@ def _instantiate_partitioners( partitioners : Dict[str, Partitioner] Partitioners specified as split to Partitioner object. """ - instantiated_partitioners: Dict[str, Partitioner] = {} - if isinstance(partitioners, Dict): + instantiated_partitioners: dict[str, Partitioner] = {} + if isinstance(partitioners, dict): for split, partitioner in partitioners.items(): if isinstance(partitioner, Partitioner): instantiated_partitioners[split] = partitioner @@ -76,10 +95,10 @@ def _instantiate_partitioners( def _instantiate_merger_if_needed( - merger: Optional[Union[Preprocessor, Dict[str, Tuple[str, ...]]]] + merger: Optional[Union[Preprocessor, dict[str, tuple[str, ...]]]] ) -> Optional[Preprocessor]: """Instantiate `Merger` if preprocessor is merge_config.""" - if merger and isinstance(merger, Dict): + if merger and isinstance(merger, dict): merger = Merger(merge_config=merger) return cast(Optional[Preprocessor], merger) @@ -94,8 +113,8 @@ def _check_if_dataset_tested(dataset: str) -> None: def divide_dataset( - dataset: Dataset, division: Union[List[float], Tuple[float, ...], Dict[str, float]] -) -> Union[List[Dataset], DatasetDict]: + dataset: Dataset, division: Union[list[float], tuple[float, ...], dict[str, float]] +) -> Union[list[Dataset], DatasetDict]: """Divide the dataset according to the `division`. The division support varying number of splits, which you can name. The splits are @@ -127,7 +146,8 @@ def divide_dataset( >>> division = [0.8, 0.2] >>> train, test = divide_dataset(dataset=partition, division=division) - Use `divide_dataset` with division specified as a dict. + Use `divide_dataset` with division specified as a dict + (this accomplishes the same goal as the example with a list above). >>> from flwr_datasets import FederatedDataset >>> from flwr_datasets.utils import divide_dataset @@ -142,12 +162,12 @@ def divide_dataset( dataset_length = len(dataset) ranges = _create_division_indices_ranges(dataset_length, division) if isinstance(division, (list, tuple)): - split_partition: List[Dataset] = [] + split_partition: list[Dataset] = [] for single_range in ranges: split_partition.append(dataset.select(single_range)) return split_partition if isinstance(division, dict): - split_partition_dict: Dict[str, Dataset] = {} + split_partition_dict: dict[str, Dataset] = {} for split_name, single_range in zip(division.keys(), ranges): split_partition_dict[split_name] = dataset.select(single_range) return DatasetDict(split_partition_dict) @@ -159,8 +179,8 @@ def divide_dataset( def _create_division_indices_ranges( dataset_length: int, - division: Union[List[float], Tuple[float, ...], Dict[str, float]], -) -> List[range]: + division: Union[list[float], tuple[float, ...], dict[str, float]], +) -> list[range]: ranges = [] if isinstance(division, (list, tuple)): start_idx = 0 @@ -178,7 +198,7 @@ def _create_division_indices_ranges( ranges.append(range(start_idx, end_idx)) start_idx = end_idx else: - TypeError( + raise TypeError( f"The type of the `division` should be dict, " f"tuple or list but is {type(division)} instead. " ) @@ -186,7 +206,7 @@ def _create_division_indices_ranges( def _check_division_config_types_correctness( - division: Union[List[float], Tuple[float, ...], Dict[str, float]] + division: Union[list[float], tuple[float, ...], dict[str, float]] ) -> None: if isinstance(division, (list, tuple)): if not all(isinstance(x, float) for x in division): @@ -205,7 +225,7 @@ def _check_division_config_types_correctness( def _check_division_config_values_correctness( - division: Union[List[float], Tuple[float, ...], Dict[str, float]] + division: Union[list[float], tuple[float, ...], dict[str, float]] ) -> None: if isinstance(division, (list, tuple)): if not all(0 < x <= 1 for x in division): @@ -243,7 +263,7 @@ def _check_division_config_values_correctness( def _check_division_config_correctness( - division: Union[List[float], Tuple[float, ...], Dict[str, float]] + division: Union[list[float], tuple[float, ...], dict[str, float]] ) -> None: _check_division_config_types_correctness(division) _check_division_config_values_correctness(division) @@ -251,14 +271,14 @@ def _check_division_config_correctness( def concatenate_divisions( partitioner: Partitioner, - partition_division: Union[List[float], Tuple[float, ...], Dict[str, float]], + partition_division: Union[list[float], tuple[float, ...], dict[str, float]], division_id: Union[int, str], ) -> Dataset: - """Create a dataset by concatenation of all partitions in the same division. + """Create a dataset by concatenation of divisions from all partitions. The divisions are created based on the `partition_division` and accessed based - on the `division_id`. It can be used to create e.g. centralized dataset from - federated on-edge test sets. + on the `division_id`. This fuction can be used to create e.g. centralized dataset + from federated on-edge test sets. Parameters ---------- @@ -279,6 +299,35 @@ def concatenate_divisions( ------- concatenated_divisions : Dataset A dataset created as concatenation of the divisions from all partitions. + + Examples + -------- + Use `concatenate_divisions` with division specified as a list. + + >>> from flwr_datasets import FederatedDataset + >>> from flwr_datasets.utils import concatenate_divisions + >>> + >>> fds = FederatedDataset(dataset="mnist", partitioners={"train": 100}) + >>> concatenated_divisions = concatenate_divisions( + ... partitioner=fds.partitioners["train"], + ... partition_division=[0.8, 0.2], + ... division_id=1 + ... ) + >>> print(concatenated_divisions) + + Use `concatenate_divisions` with division specified as a dict. + This accomplishes the same goal as the example with a list above. + + >>> from flwr_datasets import FederatedDataset + >>> from flwr_datasets.utils import concatenate_divisions + >>> + >>> fds = FederatedDataset(dataset="mnist", partitioners={"train": 100}) + >>> concatenated_divisions = concatenate_divisions( + ... partitioner=fds["train"], + ... partition_division={"train": 0.8, "test": 0.2}, + ... division_id="test" + ... ) + >>> print(concatenated_divisions) """ _check_division_config_correctness(partition_division) divisions = [] @@ -293,7 +342,7 @@ def concatenate_divisions( ) partition = divide_dataset(partition, partition_division) division = partition[division_id] - elif isinstance(partition_division, Dict): + elif isinstance(partition_division, dict): partition = divide_dataset(partition, partition_division) division = partition[division_id] else: diff --git a/datasets/flwr_datasets/utils_test.py b/datasets/flwr_datasets/utils_test.py index 4add9f88eeb5..3c94570471ac 100644 --- a/datasets/flwr_datasets/utils_test.py +++ b/datasets/flwr_datasets/utils_test.py @@ -14,7 +14,7 @@ # ============================================================================== """Utils tests.""" import unittest -from typing import Dict, List, Tuple, Union +from typing import Union from parameterized import parameterized_class @@ -62,8 +62,8 @@ class UtilsTests(unittest.TestCase): """Utils for tests.""" - partition_division: Union[List[float], Tuple[float, ...], Dict[str, float]] - sizes: Tuple[int] + partition_division: Union[list[float], tuple[float, ...], dict[str, float]] + sizes: tuple[int] division_id: Union[int, str] expected_concatenation_size: int diff --git a/datasets/flwr_datasets/visualization/__init__.py b/datasets/flwr_datasets/visualization/__init__.py new file mode 100644 index 000000000000..b55e406c71db --- /dev/null +++ b/datasets/flwr_datasets/visualization/__init__.py @@ -0,0 +1,24 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Visualization package.""" + + +from .comparison_label_distribution import plot_comparison_label_distribution +from .label_distribution import plot_label_distributions + +__all__ = [ + "plot_comparison_label_distribution", + "plot_label_distributions", +] diff --git a/datasets/flwr_datasets/visualization/bar_plot.py b/datasets/flwr_datasets/visualization/bar_plot.py new file mode 100644 index 000000000000..2b09fb189c7a --- /dev/null +++ b/datasets/flwr_datasets/visualization/bar_plot.py @@ -0,0 +1,142 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Label distribution bar plotting.""" + + +from typing import Any, Optional, Union + +import numpy as np +import pandas as pd +from matplotlib import colors as mcolors +from matplotlib import pyplot as plt +from matplotlib.axes import Axes + + +# pylint: disable=too-many-arguments,too-many-locals,too-many-branches +def _plot_bar( + dataframe: pd.DataFrame, + axis: Optional[Axes], + figsize: Optional[tuple[float, float]], + title: str, + colormap: Optional[Union[str, mcolors.Colormap]], + partition_id_axis: str, + size_unit: str, + legend: bool, + legend_title: Optional[str], + plot_kwargs: Optional[dict[str, Any]], + legend_kwargs: Optional[dict[str, Any]], +) -> Axes: + if axis is None: + if figsize is None: + figsize = _initialize_figsize( + partition_id_axis=partition_id_axis, num_partitions=dataframe.shape[0] + ) + _, axis = plt.subplots(figsize=figsize) + + # Handle plot_kwargs + if plot_kwargs is None: + plot_kwargs = {} + + kind = "bar" if partition_id_axis == "x" else "barh" + if "kind" not in plot_kwargs: + plot_kwargs["kind"] = kind + + # Handle non-optional parameters + plot_kwargs["title"] = title + + # Handle optional parameters + if colormap is not None: + plot_kwargs["colormap"] = colormap + elif "colormap" not in plot_kwargs: + plot_kwargs["colormap"] = "RdYlGn" + + if "xlabel" not in plot_kwargs and "ylabel" not in plot_kwargs: + xlabel, ylabel = _initialize_xy_labels( + size_unit=size_unit, partition_id_axis=partition_id_axis + ) + plot_kwargs["xlabel"] = xlabel + plot_kwargs["ylabel"] = ylabel + + # Make the x ticks readable (they appear 90 degrees rotated by default) + if "rot" not in plot_kwargs: + plot_kwargs["rot"] = 0 + + # Handle hard-coded parameters + # Legend is handled separately (via axes.legend call not in the plot()) + if "legend" not in plot_kwargs: + plot_kwargs["legend"] = False + + # Make the bar plot stacked + if "stacked" not in plot_kwargs: + plot_kwargs["stacked"] = True + + axis = dataframe.plot( + ax=axis, + **plot_kwargs, + ) + + if legend: + if legend_kwargs is None: + legend_kwargs = {} + + if legend_title is not None: + legend_kwargs["title"] = legend_title + elif "title" not in legend_kwargs: + legend_kwargs["title"] = "Labels" + + if "loc" not in legend_kwargs: + legend_kwargs["loc"] = "outside center right" + + if "bbox_to_anchor" not in legend_kwargs: + max_len_label_str = max(len(str(column)) for column in dataframe.columns) + shift = min(0.05 + max_len_label_str / 100, 0.15) + legend_kwargs["bbox_to_anchor"] = (1.0 + shift, 0.5) + + handles, legend_labels = axis.get_legend_handles_labels() + _ = axis.figure.legend( + handles=handles[::-1], labels=legend_labels[::-1], **legend_kwargs + ) + + # Heuristic to make the partition id on xticks non-overlapping + if partition_id_axis == "x": + xticklabels = axis.get_xticklabels() + if len(xticklabels) > 20: + # Make every other xtick label not visible + for i, label in enumerate(xticklabels): + if i % 2 == 1: + label.set_visible(False) + return axis + + +def _initialize_figsize( + partition_id_axis: str, + num_partitions: int, +) -> tuple[float, float]: + figsize = (0.0, 0.0) + if partition_id_axis == "x": + figsize = (6.4, 4.8) + elif partition_id_axis == "y": + figsize = (6.4, np.sqrt(num_partitions)) + return figsize + + +def _initialize_xy_labels(size_unit: str, partition_id_axis: str) -> tuple[str, str]: + xlabel = "Partition ID" + ylabel = "Count" if size_unit == "absolute" else "Percent %" + + if partition_id_axis == "y": + xlabel, ylabel = ylabel, xlabel + + return xlabel, ylabel diff --git a/datasets/flwr_datasets/visualization/comparison_label_distribution.py b/datasets/flwr_datasets/visualization/comparison_label_distribution.py new file mode 100644 index 000000000000..17b9a9aec251 --- /dev/null +++ b/datasets/flwr_datasets/visualization/comparison_label_distribution.py @@ -0,0 +1,282 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Comparison of label distribution plotting.""" + + +from typing import Any, Literal, Optional, Union + +import matplotlib.colors as mcolors +import matplotlib.pyplot as plt +import pandas as pd +from matplotlib.axes import Axes +from matplotlib.figure import Figure + +from flwr_datasets.common import EventType, event +from flwr_datasets.partitioner import Partitioner +from flwr_datasets.visualization.constants import PLOT_TYPES +from flwr_datasets.visualization.label_distribution import plot_label_distributions + + +# pylint: disable=too-many-arguments,too-many-locals +def plot_comparison_label_distribution( + partitioner_list: list[Partitioner], + label_name: Union[str, list[str]], + plot_type: Literal["bar", "heatmap"] = "bar", + size_unit: Literal["percent", "absolute"] = "percent", + max_num_partitions: Optional[int] = 30, + partition_id_axis: Literal["x", "y"] = "y", + figsize: Optional[tuple[float, float]] = None, + subtitle: str = "Comparison of Per Partition Label Distribution", + titles: Optional[list[str]] = None, + cmap: Optional[Union[str, mcolors.Colormap]] = None, + legend: bool = False, + legend_title: Optional[str] = None, + verbose_labels: bool = True, + plot_kwargs_list: Optional[list[Optional[dict[str, Any]]]] = None, + legend_kwargs: Optional[dict[str, Any]] = None, +) -> tuple[Figure, list[Axes], list[pd.DataFrame]]: + """Compare the label distribution across multiple partitioners. + + Parameters + ---------- + partitioner_list : List[Partitioner] + List of partitioners to be compared. + label_name : Union[str, List[str]] + Column name or list of column names identifying labels for each partitioner. + plot_type : Literal["bar", "heatmap"] + Type of plot, either "bar" or "heatmap". + size_unit : Literal["percent", "absolute"] + "absolute" for raw counts, or "percent" to normalize values to 100%. + max_num_partitions : Optional[int] + Maximum number of partitions to include in the plot. If None, all partitions + are included. + partition_id_axis : Literal["x", "y"] + Axis on which the partition IDs will be marked, either "x" or "y". + figsize : Optional[Tuple[float, float]] + Size of the figure. If None, a default size is calculated. + subtitle : str + Subtitle for the figure. Defaults to "Comparison of Per Partition Label + Distribution" + titles : Optional[List[str]] + Titles for each subplot. If None, no titles are set. + cmap : Optional[Union[str, mcolors.Colormap]] + Colormap for determining the colorspace of the plot. + legend : bool + Whether to include a legend. If True, it will be included right-hand side after + all the plots. + legend_title : Optional[str] + Title for the legend. If None, the defaults will be takes based on the type of + plot. + verbose_labels : bool + Whether to use verbose versions of the labels. + plot_kwargs_list: Optional[List[Optional[Dict[str, Any]]]] + List of plot_kwargs. Any key value pair that can be passed to a plot function + that are not supported directly. In case of the parameter doubling + (e.g. specifying cmap here too) the chosen value will be taken from the + explicit arguments (e.g. cmap specified as an argument to this function not + the value in this dictionary). + legend_kwargs: Optional[Dict[str, Any]] + Any key value pair that can be passed to a figure.legend in case of bar plot or + cbar_kws in case of heatmap that are not supported directly. In case of + parameter doubling (e.g. specifying legend_title here too) the + chosen value will be taken from the explicit arguments (e.g. legend_title + specified as an argument to this function not the value in this dictionary). + + Returns + ------- + fig : Figure + The figure object containing the plots. + axes_list : List[Axes] + List of Axes objects for the plots. + dataframe_list : List[pd.DataFrame] + List of DataFrames used for each plot. + + Examples + -------- + Compare the difference of using different alpha (concentration) parameters in + DirichletPartitioner. + + >>> from flwr_datasets import FederatedDataset + >>> from flwr_datasets.partitioner import DirichletPartitioner + >>> from flwr_datasets.visualization import plot_comparison_label_distribution + >>> + >>> partitioner_list = [] + >>> alpha_list = [10_000.0, 100.0, 1.0, 0.1, 0.01, 0.00001] + >>> for alpha in alpha_list: + >>> fds = FederatedDataset( + >>> dataset="cifar10", + >>> partitioners={ + >>> "train": DirichletPartitioner( + >>> num_partitions=20, + >>> partition_by="label", + >>> alpha=alpha, + >>> min_partition_size=0, + >>> ), + >>> }, + >>> ) + >>> partitioner_list.append(fds.partitioners["train"]) + >>> fig, axes, dataframe_list = plot_comparison_label_distribution( + >>> partitioner_list=partitioner_list, + >>> label_name="label", + >>> titles=[f"Concentration = {alpha}" for alpha in alpha_list], + >>> ) + """ + event( + EventType.PLOT_COMPARISON_LABEL_DISTRIBUTION_CALLED, + { + "num_compare": len(partitioner_list), + "plot_type": plot_type, + }, + ) + num_partitioners = len(partitioner_list) + if isinstance(label_name, str): + label_name = [label_name] * num_partitioners + elif isinstance(label_name, list): + pass + else: + raise TypeError( + f"Label name has to be of type List[str] or str but given " + f"{type(label_name)}" + ) + figsize = _initialize_comparison_figsize(figsize, num_partitioners) + axes_sharing = _initialize_axis_sharing(size_unit, plot_type, partition_id_axis) + fig, axes = plt.subplots( + 1, num_partitioners, layout="constrained", figsize=figsize, **axes_sharing + ) + + if titles is None: + titles = ["" for _ in range(num_partitioners)] + + if plot_kwargs_list is None: + plot_kwargs_list = [None] * num_partitioners + + dataframe_list = [] + for idx, (partitioner, single_label_name, plot_kwargs) in enumerate( + zip(partitioner_list, label_name, plot_kwargs_list) + ): + if idx == (num_partitioners - 1): + *_, dataframe = plot_label_distributions( + partitioner=partitioner, + label_name=single_label_name, + plot_type=plot_type, + size_unit=size_unit, + partition_id_axis=partition_id_axis, + axis=axes[idx], + max_num_partitions=max_num_partitions, + cmap=cmap, + legend=legend, + legend_title=legend_title, + verbose_labels=verbose_labels, + plot_kwargs=plot_kwargs, + legend_kwargs=legend_kwargs, + ) + dataframe_list.append(dataframe) + else: + *_, dataframe = plot_label_distributions( + partitioner=partitioner, + label_name=single_label_name, + plot_type=plot_type, + size_unit=size_unit, + partition_id_axis=partition_id_axis, + axis=axes[idx], + max_num_partitions=max_num_partitions, + cmap=cmap, + legend=False, + plot_kwargs=plot_kwargs, + ) + dataframe_list.append(dataframe) + + # Do not use the xlabel and ylabel on each subplot plot + # (instead use global = per figure xlabel and ylabel) + for idx, axis in enumerate(axes): + axis.set_xlabel("") + axis.set_ylabel("") + axis.set_title(titles[idx]) + _set_tick_on_value_axes(axes, partition_id_axis, size_unit) + + # Set up figure xlabel and ylabel + xlabel, ylabel = _initialize_comparison_xy_labels( + plot_type, size_unit, partition_id_axis + ) + fig.supxlabel(xlabel) + fig.supylabel(ylabel) + fig.suptitle(subtitle) + + fig.tight_layout() + return fig, axes, dataframe_list + + +def _initialize_comparison_figsize( + figsize: Optional[tuple[float, float]], num_partitioners: int +) -> tuple[float, float]: + if figsize is not None: + return figsize + x_value = 4 + (num_partitioners - 1) * 2 + y_value = 4.8 + figsize = (x_value, y_value) + return figsize + + +def _initialize_comparison_xy_labels( + plot_type: Literal["bar", "heatmap"], + size_unit: Literal["percent", "absolute"], + partition_id_axis: Literal["x", "y"], +) -> tuple[str, str]: + if plot_type == "bar": + xlabel = "Partition ID" + ylabel = "Class distribution" if size_unit == "percent" else "Class Count" + elif plot_type == "heatmap": + xlabel = "Partition ID" + ylabel = "Label" + else: + raise ValueError( + f"Invalid plot_type: {plot_type}. Must be one of {PLOT_TYPES}." + ) + + if partition_id_axis == "y": + xlabel, ylabel = ylabel, xlabel + + return xlabel, ylabel + + +def _initialize_axis_sharing( + size_unit: Literal["percent", "absolute"], + plot_type: Literal["bar", "heatmap"], + partition_id_axis: Literal["x", "y"], +) -> dict[str, bool]: + # Do not intervene when the size_unit is percent and plot_type is heatmap + if size_unit == "percent": + return {} + if plot_type == "heatmap": + return {} + if partition_id_axis == "x": + return {"sharey": True} + if partition_id_axis == "y": + return {"sharex": True} + return {"sharex": False, "sharey": False} + + +def _set_tick_on_value_axes( + axes: list[Axes], + partition_id_axis: Literal["x", "y"], + size_unit: Literal["percent", "absolute"], +) -> None: + if partition_id_axis == "x" and size_unit == "absolute": + # Exclude this case due to sharing of y-axis (and thus y-ticks) + # They must remain set and the number are displayed only on the first plot + pass + else: + for axis in axes[1:]: + axis.set_yticks([]) diff --git a/datasets/flwr_datasets/visualization/constants.py b/datasets/flwr_datasets/visualization/constants.py new file mode 100644 index 000000000000..b3c9cd2a7400 --- /dev/null +++ b/datasets/flwr_datasets/visualization/constants.py @@ -0,0 +1,19 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Constants for plot types and size units.""" + +PLOT_TYPES = ("bar", "heatmap") +SIZE_UNITS = ("absolute", "percent") +AXIS_TYPES = ("x", "y") diff --git a/datasets/flwr_datasets/visualization/heatmap_plot.py b/datasets/flwr_datasets/visualization/heatmap_plot.py new file mode 100644 index 000000000000..b5a0e640eb1b --- /dev/null +++ b/datasets/flwr_datasets/visualization/heatmap_plot.py @@ -0,0 +1,104 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Label distribution heatmap plotting.""" + + +from typing import Any, Optional, Union + +import numpy as np +import pandas as pd +import seaborn as sns +from matplotlib import colors as mcolors +from matplotlib import pyplot as plt +from matplotlib.axes import Axes + + +# pylint: disable=too-many-arguments,too-many-locals +def _plot_heatmap( + dataframe: pd.DataFrame, + axis: Optional[Axes], + figsize: Optional[tuple[float, float]], + title: str, + colormap: Optional[Union[str, mcolors.Colormap]], + partition_id_axis: str, + size_unit: str, + legend: bool, + legend_title: Optional[str], + plot_kwargs: Optional[dict[str, Any]], + legend_kwargs: Optional[dict[str, Any]], +) -> Axes: + if axis is None: + if figsize is None: + figsize = _initialize_figsize( + partition_id_axis=partition_id_axis, + num_partitions=dataframe.shape[0], + num_labels=dataframe.shape[1], + ) + _, axis = plt.subplots(figsize=figsize) + + # Handle plot_kwargs + if plot_kwargs is None: + plot_kwargs = {} + + # Handle optional parameters + if colormap is not None: + plot_kwargs["cmap"] = colormap + elif "cmap" not in plot_kwargs: + plot_kwargs["cmap"] = sns.light_palette("seagreen", as_cmap=True) + + if "fmt" not in plot_kwargs: + plot_kwargs["fmt"] = ",d" if size_unit == "absolute" else "0.2f" + + if legend_kwargs is None: + legend_kwargs = {} + if legend: + plot_kwargs["cbar"] = True + + if legend_title is not None: + legend_kwargs["label"] = legend_title + else: + legend_kwargs["label"] = _initialize_cbar_title(size_unit) + else: + plot_kwargs["cbar"] = False + + if partition_id_axis == "x": + dataframe = dataframe.T + + sns.heatmap( + dataframe, + ax=axis, + **plot_kwargs, + cbar_kws=legend_kwargs, + ) + axis.set_title(title) + return axis + + +def _initialize_figsize( + partition_id_axis: str, + num_partitions: int, + num_labels: int, +) -> tuple[float, float]: + figsize = (0.0, 0.0) + if partition_id_axis == "x": + figsize = (3 * np.sqrt(num_partitions), np.sqrt(num_labels)) + elif partition_id_axis == "y": + figsize = (3 * np.sqrt(num_labels), np.sqrt(num_partitions)) + + return figsize + + +def _initialize_cbar_title(size_unit: str) -> Optional[str]: + return "Count" if size_unit == "absolute" else "Percent %" diff --git a/datasets/flwr_datasets/visualization/label_distribution.py b/datasets/flwr_datasets/visualization/label_distribution.py new file mode 100644 index 000000000000..b1183c225b86 --- /dev/null +++ b/datasets/flwr_datasets/visualization/label_distribution.py @@ -0,0 +1,249 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Label distribution plotting.""" + + +from typing import Any, Optional, Union + +import matplotlib.colors as mcolors +import pandas as pd +from matplotlib.axes import Axes +from matplotlib.figure import Figure + +from flwr_datasets.common import EventType, event +from flwr_datasets.metrics.utils import compute_counts, compute_frequencies +from flwr_datasets.partitioner import Partitioner +from flwr_datasets.visualization.bar_plot import _plot_bar +from flwr_datasets.visualization.heatmap_plot import _plot_heatmap +from flwr_datasets.visualization.utils import _validate_parameters + +# pylint: disable=too-many-arguments,too-many-locals + + +def plot_label_distributions( + partitioner: Partitioner, + label_name: str, + plot_type: str = "bar", + size_unit: str = "absolute", + max_num_partitions: Optional[int] = None, + partition_id_axis: str = "x", + axis: Optional[Axes] = None, + figsize: Optional[tuple[float, float]] = None, + title: str = "Per Partition Label Distribution", + cmap: Optional[Union[str, mcolors.Colormap]] = None, + legend: bool = False, + legend_title: Optional[str] = None, + verbose_labels: bool = True, + plot_kwargs: Optional[dict[str, Any]] = None, + legend_kwargs: Optional[dict[str, Any]] = None, +) -> tuple[Figure, Axes, pd.DataFrame]: + """Plot the label distribution of the partitions. + + Parameters + ---------- + partitioner : Partitioner + Partitioner with an assigned dataset. + label_name : str + Column name identifying label based on which the plot will be created. + plot_type : str + Type of plot, either "bar" or "heatmap". + size_unit : str + "absolute" or "percent". "absolute" - (number of samples). "percent" - + normalizes each value, so they sum up to 100%. + max_num_partitions : Optional[int] + The number of partitions that will be used. If left None, then all partitions + will be used. + partition_id_axis : str + "x" or "y". The axis on which the partition_id will be marked. + axis : Optional[Axes] + Matplotlib Axes object to plot on. + figsize : Optional[Tuple[float, float]] + Size of the figure. + title : str + Title of the plot. + cmap : Optional[Union[str, mcolors.Colormap]] + Colormap for determining the colorspace of the plot. + legend : bool + Include the legend. + legend_title : Optional[str] + Title for the legend. If None, the defaults will be takes based on the type of + plot. + verbose_labels : bool + Whether to use verbose versions of the labels. These values are used as columns + of the returned dataframe and as labels on the legend in a bar plot and columns/ + rows ticks in a heatmap plot. + plot_kwargs: Optional[Dict[str, Any]] + Any key value pair that can be passed to a plot function that are not supported + directly. In case of the parameter doubling (e.g. specifying cmap here too) the + chosen value will be taken from the explicit arguments (e.g. cmap specified as + an argument to this function not the value in this dictionary). + legend_kwargs: Optional[Dict[str, Any]] + Any key value pair that can be passed to a figure.legend in case of bar plot or + cbar_kws in case of heatmap that are not supported directly. In case of the + parameter doubling (e.g. specifying legend_title here too) the + chosen value will be taken from the explicit arguments (e.g. legend_title + specified as an argument to this function not the value in this dictionary). + + Returns + ------- + fig : Figure + The figure object. + axis : Axes + The Axes object with the plot. + dataframe : pd.DataFrame + The DataFrame where each row represents the partition id and each column + represents the class. + + Examples + -------- + Visualize the label distribution resulting from DirichletPartitioner. + + >>> from flwr_datasets import FederatedDataset + >>> from flwr_datasets.partitioner import DirichletPartitioner + >>> from flwr_datasets.visualization import plot_label_distributions + >>> + >>> fds = FederatedDataset( + >>> dataset="cifar10", + >>> partitioners={ + >>> "train": DirichletPartitioner( + >>> num_partitions=20, + >>> partition_by="label", + >>> alpha=0.3, + >>> min_partition_size=0, + >>> ), + >>> }, + >>> ) + >>> partitioner = fds.partitioners["train"] + >>> figure, axis, dataframe = plot_label_distributions( + >>> partitioner=partitioner, + >>> label_name="label", + >>> legend=True, + >>> verbose_labels=True, + >>> ) + + Alternatively you can visualize each partition in terms of fraction of the data + available on that partition instead of the absolute count + + >>> from flwr_datasets import FederatedDataset + >>> from flwr_datasets.partitioner import DirichletPartitioner + >>> from flwr_datasets.visualization import plot_label_distributions + >>> + >>> fds = FederatedDataset( + >>> dataset="cifar10", + >>> partitioners={ + >>> "train": DirichletPartitioner( + >>> num_partitions=20, + >>> partition_by="label", + >>> alpha=0.3, + >>> min_partition_size=0, + >>> ), + >>> }, + >>> ) + >>> partitioner = fds.partitioners["train"] + >>> figure, axis, dataframe = plot_label_distributions( + >>> partitioner=partitioner, + >>> label_name="label", + >>> size_unit="percent", + >>> legend=True, + >>> verbose_labels=True, + >>> ) + >>> + + You can also visualize the data as a heatmap by changing the `plot_type` from + default "bar" to "heatmap" + + >>> from flwr_datasets import FederatedDataset + >>> from flwr_datasets.partitioner import DirichletPartitioner + >>> from flwr_datasets.visualization import plot_label_distributions + >>> + >>> fds = FederatedDataset( + >>> dataset="cifar10", + >>> partitioners={ + >>> "train": DirichletPartitioner( + >>> num_partitions=20, + >>> partition_by="label", + >>> alpha=0.3, + >>> min_partition_size=0, + >>> ), + >>> }, + >>> ) + >>> partitioner = fds.partitioners["train"] + >>> figure, axis, dataframe = plot_label_distributions( + >>> partitioner=partitioner, + >>> label_name="label", + >>> size_unit="percent", + >>> plot_type="heatmap", + >>> legend=True, + >>> plot_kwargs={"annot": True}, + >>> ) + + You can also visualize the returned DataFrame in Jupyter Notebook + >>> dataframe.style.background_gradient(axis=None) + """ + event( + EventType.PLOT_LABEL_DISTRIBUTION_CALLED, + { + "plot_type": plot_type, + }, + ) + _validate_parameters(plot_type, size_unit, partition_id_axis) + + dataframe = pd.DataFrame() + if size_unit == "absolute": + dataframe = compute_counts( + partitioner=partitioner, + column_name=label_name, + verbose_names=verbose_labels, + max_num_partitions=max_num_partitions, + ) + elif size_unit == "percent": + dataframe = compute_frequencies( + partitioner=partitioner, + column_name=label_name, + verbose_names=verbose_labels, + max_num_partitions=max_num_partitions, + ) + dataframe = dataframe * 100.0 + + if plot_type == "bar": + axis = _plot_bar( + dataframe, + axis, + figsize, + title, + cmap, + partition_id_axis, + size_unit, + legend, + legend_title, + plot_kwargs, + legend_kwargs, + ) + elif plot_type == "heatmap": + axis = _plot_heatmap( + dataframe, + axis, + figsize, + title, + cmap, + partition_id_axis, + size_unit, + legend, + legend_title, + plot_kwargs, + legend_kwargs, + ) + assert axis is not None + return axis.figure, axis, dataframe diff --git a/datasets/flwr_datasets/visualization/utils.py b/datasets/flwr_datasets/visualization/utils.py new file mode 100644 index 000000000000..c2f1846de20e --- /dev/null +++ b/datasets/flwr_datasets/visualization/utils.py @@ -0,0 +1,36 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Plotting utils.""" + + +from flwr_datasets.visualization.constants import AXIS_TYPES, PLOT_TYPES, SIZE_UNITS + + +def _validate_parameters( + plot_type: str, size_unit: str, partition_id_axis: str +) -> None: + if plot_type not in PLOT_TYPES: + raise ValueError( + f"Invalid plot_type: {plot_type}. Must be one of {PLOT_TYPES}." + ) + if size_unit not in SIZE_UNITS: + raise ValueError( + f"Invalid size_unit: {size_unit}. Must be one of {SIZE_UNITS}." + ) + if partition_id_axis not in AXIS_TYPES: + raise ValueError( + f"Invalid partition_id_axis: {partition_id_axis}. " + f"Must be one of {AXIS_TYPES}." + ) diff --git a/datasets/pyproject.toml b/datasets/pyproject.toml index 36c0aef5ec2c..73523af2039e 100644 --- a/datasets/pyproject.toml +++ b/datasets/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "flwr-datasets" -version = "0.1.0" +version = "0.3.0" description = "Flower Datasets" license = "Apache-2.0" authors = ["The Flower Authors "] @@ -31,7 +31,6 @@ classifiers = [ "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", - "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", @@ -54,18 +53,21 @@ exclude = [ [tool.poetry.dependencies] python = "^3.8" numpy = "^1.21.0" -datasets = "^2.14.6" +datasets = ">=2.14.6 <2.20.0" pillow = { version = ">=6.2.1", optional = true } soundfile = { version = ">=0.12.1", optional = true } librosa = { version = ">=0.10.0.post2", optional = true } tqdm ="^4.66.1" +pyarrow = "==16.1.0" +matplotlib = "^3.7.5" +seaborn = "^0.13.0" [tool.poetry.dev-dependencies] isort = "==5.13.2" black = { version = "==24.2.0", extras = ["jupyter"] } docformatter = "==1.7.5" mypy = "==1.4.0" -pylint = "==2.13.9" +pylint = "==3.0.3" flake8 = "==3.9.2" parameterized = "==0.9.0" pytest = "==7.1.2" @@ -89,10 +91,10 @@ known_first_party = ["flwr_datasets"] [tool.black] line-length = 88 -target-version = ["py38", "py39", "py310", "py311"] +target-version = ["py39", "py310", "py311"] [tool.pylint."MESSAGES CONTROL"] -disable = "bad-continuation,duplicate-code,too-few-public-methods,useless-import-alias" +disable = "duplicate-code,too-few-public-methods,useless-import-alias" [tool.pytest.ini_options] minversion = "6.2" @@ -127,7 +129,7 @@ wrap-summaries = 88 wrap-descriptions = 88 [tool.ruff] -target-version = "py38" +target-version = "py39" line-length = 88 select = ["D", "E", "F", "W", "B", "ISC", "C4", "UP"] fixable = ["D", "E", "F", "W", "B", "ISC", "C4", "UP"] diff --git a/dev/bootstrap.sh b/dev/bootstrap.sh index 154fe0f1cbaf..bfcdc8a4369e 100755 --- a/dev/bootstrap.sh +++ b/dev/bootstrap.sh @@ -9,8 +9,8 @@ cd "$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"/../ ./dev/rm-caches.sh # Upgrade/install spcific versions of `pip`, `setuptools`, and `poetry` -python -m pip install -U pip==24.0.0 -python -m pip install -U setuptools==69.5.1 +python -m pip install -U pip==24.1.2 +python -m pip install -U setuptools==70.3.0 python -m pip install -U poetry==1.7.1 # Use `poetry` to install project dependencies diff --git a/dev/build-docker-image-matrix.py b/dev/build-docker-image-matrix.py index 5b9c63434bfb..52c96e3cca7a 100644 --- a/dev/build-docker-image-matrix.py +++ b/dev/build-docker-image-matrix.py @@ -1,11 +1,11 @@ """ -Usage: python dev/build-docker-image-matrix.py --flwr-version +Usage: python dev/build-docker-image-matrix.py --flwr-version """ import argparse +import json from dataclasses import asdict, dataclass from enum import Enum -import json from typing import Any, Callable, Dict, List, Optional @@ -22,7 +22,6 @@ class Distro: LATEST_SUPPORTED_PYTHON_VERSION = "3.11" SUPPORTED_PYTHON_VERSIONS = [ - "3.8", "3.9", "3.10", LATEST_SUPPORTED_PYTHON_VERSION, @@ -135,7 +134,7 @@ def tag_latest_ubuntu_with_flwr_version(image: BaseImage) -> List[str]: ubuntu_base_images = generate_base_images( flwr_version, SUPPORTED_PYTHON_VERSIONS, - [Distro(DistroName.UBUNTU, "22.04")], + [Distro(DistroName.UBUNTU, "24.04")], ) # alpine base images for the latest supported python version alpine_base_images = generate_base_images( @@ -158,12 +157,30 @@ def tag_latest_ubuntu_with_flwr_version(image: BaseImage) -> List[str]: + generate_binary_images( "supernode", base_images, + tag_latest_alpine_with_flwr_version, + lambda image: image.distro.name == DistroName.UBUNTU + or ( + image.distro.name == DistroName.ALPINE + and image.python_version == LATEST_SUPPORTED_PYTHON_VERSION + ), + ) + # ubuntu images for each supported python version + + generate_binary_images( + "serverapp", + base_images, tag_latest_ubuntu_with_flwr_version, lambda image: image.distro.name == DistroName.UBUNTU, ) # ubuntu images for each supported python version + generate_binary_images( - "serverapp", + "superexec", + base_images, + tag_latest_ubuntu_with_flwr_version, + lambda image: image.distro.name == DistroName.UBUNTU, + ) + # ubuntu images for each supported python version + + generate_binary_images( + "clientapp", base_images, tag_latest_ubuntu_with_flwr_version, lambda image: image.distro.name == DistroName.UBUNTU, diff --git a/dev/build-docs.sh b/dev/build-docs.sh index f8d4f91508de..f4bf958b0ebf 100755 --- a/dev/build-docs.sh +++ b/dev/build-docs.sh @@ -8,9 +8,7 @@ cd $ROOT ./dev/build-baseline-docs.sh cd $ROOT -./dev/update-examples.sh -cd examples/doc -make docs +python dev/build-example-docs.py cd $ROOT ./datasets/dev/build-flwr-datasets-docs.sh diff --git a/dev/build-example-docs.py b/dev/build-example-docs.py new file mode 100644 index 000000000000..772a26272fd7 --- /dev/null +++ b/dev/build-example-docs.py @@ -0,0 +1,283 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Build the Flower Example docs.""" + +import os +import re +import shutil +import subprocess +from pathlib import Path + +ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) +INDEX = os.path.join(ROOT, "examples", "doc", "source", "index.rst") + +initial_text = """ +Flower Examples Documentation +----------------------------- + +Welcome to Flower Examples' documentation. `Flower `_ is +a friendly federated learning framework. + +Join the Flower Community +------------------------- + +The Flower Community is growing quickly - we're a friendly group of researchers, +engineers, students, professionals, academics, and other enthusiasts. + +.. button-link:: https://flower.ai/join-slack + :color: primary + :shadow: + + Join us on Slack + +Quickstart Examples +------------------- + +Flower Quickstart Examples are a collection of demo projects that show how you +can use Flower in combination with other existing frameworks or technologies. + +""" + +table_headers = ( + "\n.. list-table::\n :widths: 50 15 15 15\n " + ":header-rows: 1\n\n * - Title\n - Framework\n - Dataset\n - Tags\n\n" +) + +categories = { + "quickstart": {"table": table_headers, "list": ""}, + "advanced": {"table": table_headers, "list": ""}, + "other": {"table": table_headers, "list": ""}, +} + +urls = { + # Frameworks + "Android": "https://www.android.com/", + "C++": "https://isocpp.org/", + "Docker": "https://www.docker.com/", + "JAX": "https://jax.readthedocs.io/en/latest/", + "Java": "https://www.java.com/", + "Keras": "https://keras.io/", + "Kotlin": "https://kotlinlang.org/", + "mlcube": "https://docs.mlcommons.org/mlcube/", + "MLX": "https://ml-explore.github.io/mlx/build/html/index.html", + "MONAI": "https://monai.io/", + "PEFT": "https://huggingface.co/docs/peft/index", + "Swift": "https://www.swift.org/", + "TensorFlowLite": "https://www.tensorflow.org/lite", + "fastai": "https://fast.ai/", + "lifelines": "https://lifelines.readthedocs.io/en/latest/index.html", + "lightning": "https://lightning.ai/docs/pytorch/stable/", + "numpy": "https://numpy.org/", + "opacus": "https://opacus.ai/", + "pandas": "https://pandas.pydata.org/", + "scikit-learn": "https://scikit-learn.org/", + "tabnet": "https://github.com/titu1994/tf-TabNet", + "tensorboard": "https://www.tensorflow.org/tensorboard", + "tensorflow": "https://www.tensorflow.org/", + "torch": "https://pytorch.org/", + "torchvision": "https://pytorch.org/vision/stable/index.html", + "transformers": "https://huggingface.co/docs/transformers/index", + "wandb": "https://wandb.ai/home", + "whisper": "https://huggingface.co/openai/whisper-tiny", + "xgboost": "https://xgboost.readthedocs.io/en/stable/", + # Datasets + "Adult Census Income": "https://www.kaggle.com/datasets/uciml/adult-census-income/data", + "Alpaca-GPT4": "https://huggingface.co/datasets/vicgalle/alpaca-gpt4", + "CIFAR-10": "https://huggingface.co/datasets/uoft-cs/cifar10", + "HIGGS": "https://archive.ics.uci.edu/dataset/280/higgs", + "IMDB": "https://huggingface.co/datasets/stanfordnlp/imdb", + "Iris": "https://scikit-learn.org/stable/auto_examples/datasets/plot_iris_dataset.html", + "MNIST": "https://huggingface.co/datasets/ylecun/mnist", + "MedNIST": "https://medmnist.com/", + "Oxford Flower-102": "https://www.robots.ox.ac.uk/~vgg/data/flowers/102/", + "SpeechCommands": "https://huggingface.co/datasets/google/speech_commands", + "Titanic": "https://www.kaggle.com/competitions/titanic", + "Waltons": "https://lifelines.readthedocs.io/en/latest/lifelines.datasets.html#lifelines.datasets.load_waltons", +} + + +def _convert_to_link(search_result): + if "," in search_result: + result = "" + for part in search_result.split(","): + result += f"{_convert_to_link(part)}, " + return result[:-2] + else: + search_result = search_result.strip() + name, url = search_result, urls.get(search_result, None) + if url: + return f"`{name.strip()} <{url.strip()}>`_" + else: + return search_result + + +def _read_metadata(example): + with open(os.path.join(example, "README.md")) as f: + content = f.read() + + metadata_match = re.search(r"^---(.*?)^---", content, re.DOTALL | re.MULTILINE) + if not metadata_match: + raise ValueError("Metadata block not found") + metadata = metadata_match.group(1) + + title_match = re.search(r"^# (.+)$", content, re.MULTILINE) + if not title_match: + raise ValueError("Title not found in metadata") + title = title_match.group(1).strip() + + tags_match = re.search(r"^tags:\s*\[(.+?)\]$", metadata, re.MULTILINE) + if not tags_match: + raise ValueError("Tags not found in metadata") + tags = tags_match.group(1).strip() + + dataset_match = re.search( + r"^dataset:\s*\[(.*?)\]$", metadata, re.DOTALL | re.MULTILINE + ) + if not dataset_match: + raise ValueError("Dataset not found in metadata") + dataset = dataset_match.group(1).strip() + + framework_match = re.search( + r"^framework:\s*\[(.*?|)\]$", metadata, re.DOTALL | re.MULTILINE + ) + if not framework_match: + raise ValueError("Framework not found in metadata") + framework = framework_match.group(1).strip() + + dataset = _convert_to_link(re.sub(r"\s+", " ", dataset).strip()) + framework = _convert_to_link(re.sub(r"\s+", " ", framework).strip()) + return title, tags, dataset, framework + + +def _add_table_entry(example, tag, table_var): + title, tags, dataset, framework = _read_metadata(example) + example_name = Path(example).stem + table_entry = ( + f" * - `{title} <{example_name}.html>`_ \n " + f"- {framework} \n - {dataset} \n - {tags}\n\n" + ) + if tag in tags: + categories[table_var]["table"] += table_entry + categories[table_var]["list"] += f" {example_name}\n" + return True + return False + + +def _copy_markdown_files(example): + for file in os.listdir(example): + if file.endswith(".md"): + src = os.path.join(example, file) + dest = os.path.join( + ROOT, "examples", "doc", "source", os.path.basename(example) + ".md" + ) + shutil.copyfile(src, dest) + + +def _add_gh_button(example): + gh_text = f'[View on GitHub](https://github.com/adap/flower/blob/main/examples/{example})' + readme_file = os.path.join(ROOT, "examples", "doc", "source", example + ".md") + with open(readme_file, "r+") as f: + content = f.read() + if gh_text not in content: + content = re.sub( + r"(^# .+$)", rf"\1\n\n{gh_text}", content, count=1, flags=re.MULTILINE + ) + f.seek(0) + f.write(content) + f.truncate() + + +def _copy_images(example): + static_dir = os.path.join(example, "_static") + dest_dir = os.path.join(ROOT, "examples", "doc", "source", "_static") + if os.path.isdir(static_dir): + for file in os.listdir(static_dir): + if file.endswith((".jpg", ".png", ".jpeg")): + shutil.copyfile( + os.path.join(static_dir, file), os.path.join(dest_dir, file) + ) + + +def _add_all_entries(): + examples_dir = os.path.join(ROOT, "examples") + for example in sorted(os.listdir(examples_dir)): + example_path = os.path.join(examples_dir, example) + if os.path.isdir(example_path) and example != "doc": + _copy_markdown_files(example_path) + _add_gh_button(example) + _copy_images(example) + + +def _main(): + if os.path.exists(INDEX): + os.remove(INDEX) + + with open(INDEX, "w") as index_file: + index_file.write(initial_text) + + examples_dir = os.path.join(ROOT, "examples") + for example in sorted(os.listdir(examples_dir)): + example_path = os.path.join(examples_dir, example) + if os.path.isdir(example_path) and example != "doc": + _copy_markdown_files(example_path) + _add_gh_button(example) + _copy_images(example_path) + if not _add_table_entry(example_path, "quickstart", "quickstart"): + if not _add_table_entry(example_path, "comprehensive", "comprehensive"): + if not _add_table_entry(example_path, "advanced", "advanced"): + _add_table_entry(example_path, "", "other") + + with open(INDEX, "a") as index_file: + index_file.write(categories["quickstart"]["table"]) + + index_file.write("\nAdvanced Examples\n-----------------\n") + index_file.write( + "Advanced Examples are mostly for users that are both familiar with " + "Federated Learning but also somewhat familiar with Flower's main " + "features.\n" + ) + index_file.write(categories["advanced"]["table"]) + + index_file.write("\nOther Examples\n--------------\n") + index_file.write( + "Flower Examples are a collection of example projects written with " + "Flower that explore different domains and features. You can check " + "which examples already exist and/or contribute your own example.\n" + ) + index_file.write(categories["other"]["table"]) + + _add_all_entries() + + index_file.write( + "\n.. toctree::\n :maxdepth: 1\n :caption: Quickstart\n :hidden:\n\n" + ) + index_file.write(categories["quickstart"]["list"]) + + index_file.write( + "\n.. toctree::\n :maxdepth: 1\n :caption: Advanced\n :hidden:\n\n" + ) + index_file.write(categories["advanced"]["list"]) + + index_file.write( + "\n.. toctree::\n :maxdepth: 1\n :caption: Others\n :hidden:\n\n" + ) + index_file.write(categories["other"]["list"]) + + index_file.write("\n") + + +if __name__ == "__main__": + _main() + subprocess.call(f"cd {ROOT}/examples/doc && make html", shell=True) diff --git a/dev/changelog_config.toml b/dev/changelog_config.toml index 898f9bfdb221..3c155387ef93 100644 --- a/dev/changelog_config.toml +++ b/dev/changelog_config.toml @@ -3,970 +3,979 @@ type = ["ci", "docs", "feat", "fix", "refactor", "break"] -project = ["framework", "baselines", "datasets", "examples"] +project = [ + "framework", + "baselines", + "datasets", + "examples", + "benchmarks", + "glossary", +] scope = "skip" pattern_template = "^({types})\\(({projects})(?::({scope}))?\\) ([A-Z][^\\n]*[^\\.\\n])$" -allowed_verbs=[ - "Abandon", - "Abort", - "Abstract", - "Accept", - "Accomodate", - "Accompany", - "Account", - "Accumulate", - "Accuse", - "Ache", - "Achieve", - "Acknowledge", - "Acquire", - "Act", - "Activate", - "Active", - "Adapt", - "Add", - "Address", - "Adhere", - "Adjust", - "Admit", - "Adopt", - "Advance", - "Advise", - "Advocate", - "Affect", - "Affirm", - "Afford", - "Agree", - "Aim", - "Align", - "Allow", - "Alter", - "Analyse", - "Analyze", - "Anchor", - "Annotate", - "Announce", - "Annoy", - "Annul", - "Answer", - "Appeal", - "Appear", - "Append", - "Applicate", - "Apply", - "Appoint", - "Appreciate", - "Approach", - "Approve", - "Argue", - "Arise", - "Arrange", - "Arrest", - "Arrive", - "Ask", - "Assert", - "Assess", - "Assign", - "Assist", - "Associate", - "Assume", - "Assure", - "Attach", - "Attack", - "Attempt", - "Attend", - "Attract", - "Augment", - "Avoid", - "Awake", - "Back", - "Backport", - "Backup", - "Bake", - "Base", - "Battle", - "Be", - "Bear", - "Beat", - "Become", - "Begin", - "Behave", - "Believe", - "Belong", - "Bend", - "Benefit", - "Better", - "Beware", - "Bind", - "Blacklist", - "Blame", - "Blend", - "Block", - "Blow", - "Blur", - "Bootstrap", - "Born", - "Borrow", - "Bother", - "Break", - "Bridge", - "Bring", - "Broadcast", - "Buffer", - "Build", - "Bump", - "Bundle", - "Burn", - "Busy", - "Buy", - "Bypass", - "Cache", - "Calculate", - "Call", - "Cancel", - "Capitalize", - "Capture", - "Care", - "Carry", - "Carryout", - "Cast", - "Catch", - "Categorize", - "Cause", - "Center", - "Centralize", - "Challenge", - "Change", - "Chant", - "Charge", - "Chase", - "Chat", - "Check", - "Choose", - "Circle", - "Claim", - "Clarify", - "Clean", - "Cleanse", - "Clear", - "Climb", - "Clip", - "Close", - "Clothe", - "Coalesce", - "Collapse", - "Collect", - "Combine", - "Come", - "Command", - "Comment", - "Commit", - "Compare", - "Compensate", - "Compile", - "Complain", - "Complement", - "Complete", - "Compose", - "Compress", - "Compute", - "Conceal", - "Concentrate", - "Conclude", - "Concur", - "Conduct", - "Configure", - "Confirm", - "Confront", - "Connect", - "Connote", - "Consider", - "Consist", - "Consolidate", - "Constitute", - "Construct", - "Consume", - "Contact", - "Contain", - "Contest", - "Continue", - "Contribute", - "Control", - "Convert", - "Convey", - "Cook", - "Coordinate", - "Cope", - "Copy", - "Correct", - "Cost", - "Counsel", - "Count", - "Cover", - "Create", - "Cross", - "Cry", - "Cut", - "Cycle", - "Damage", - "Dance", - "Deal", - "Debate", - "Decide", - "Declare", - "Decode", - "Deconstruct", - "Decouple", - "Decrease", - "Dedup", - "Duplicate", - "Deduplicate", - "Default", - "Defeat", - "Defend", - "Defer", - "Define", - "Delay", - "Delegate", - "Delete", - "Deliver", - "Demand", - "Demolish", - "Demonstrate", - "Deny", - "Depart", - "Depend", - "Depict", - "Deprecate", - "Derive", - "Describe", - "Deserialize", - "Design", - "Desire", - "Destroy", - "Detail", - "Detect", - "Determine", - "Develop", - "Devote", - "Die", - "Dim", - "Direct", - "Disable", - "Disallow", - "Disappear", - "Disconnect", - "Discontinue", - "Discourage", - "Discover", - "Discuss", - "Dislike", - "Dismiss", - "Dispatch", - "Displace", - "Display", - "Distinguish", - "Divide", - "Do", - "Document", - "Dominate", - "Downgrade", - "Download", - "Draw", - "Dread", - "Dress", - "Drink", - "Drive", - "Drop", - "Dry", - "Dump", - "Duplicate", - "Earn", - "Eat", - "Echo", - "Edit", - "Educate", - "Elaborate", - "Elect", - "Elevate", - "Eliminate", - "Embed", - "Emerge", - "Emit", - "Employ", - "Empty", - "Enable", - "Encapsulate", - "Encourage", - "End", - "Endorse", - "Endure", - "Enforce", - "Engage", - "Enhance", - "Enjoy", - "Enquire", - "Enroll", - "Ensure", - "Enter", - "Enumerate", - "Equal", - "Equate", - "Erase", - "Escape", - "Establish", - "Estimate", - "Evaluate", - "Examine", - "Except", - "Exclude", - "Excuse", - "Execute", - "Exempt", - "Exercise", - "Exert", - "Exist", - "Exit", - "Expand", - "Expect", - "Experience", - "Explain", - "Explore", - "Export", - "Expose", - "Express", - "Extend", - "Extract", - "Face", - "Factor", - "Fail", - "Fall", - "Fault", - "Favor", - "Fear", - "Feature", - "Feed", - "Feel", - "Fetch", - "Fight", - "Fill", - "Filter", - "Find", - "Finish", - "Fit", - "Fix", - "Flatten", - "Flee", - "Flip", - "Float", - "Flow", - "Flunk", - "Flush", - "Fly", - "Focus", - "Fold", - "Follow", - "Force", - "Foresee", - "Forget", - "Fork", - "Form", - "Formalize", - "Format", - "Forward", - "Found", - "Free", - "Freeze", - "Gain", - "Gather", - "Generalize", - "Generate", - "Get", - "Gitignore", - "Give", - "Giveup", - "Glance", - "Go", - "Going", - "Govern", - "Grant", - "Grin", - "Group", - "Grow", - "Guard", - "Guess", - "Guide", - "Hack", - "Halt", - "Hand", - "Handle", - "Hang", - "Happen", - "Hardcode", - "Harm", - "Hate", - "Have", - "Head", - "Hear", - "Help", - "Hide", - "Highlight", - "Hint", - "Hire", - "Hit", - "Hold", - "Hook", - "Hope", - "House", - "Hurt", - "Identify", - "Ignore", - "Illuminate", - "Illustrate", - "Imagine", - "Impersonate", - "Implement", - "Imply", - "Import", - "Importune", - "Impose", - "Improve", - "Include", - "Incorporate", - "Increase", - "Incur", - "Indent", - "Indicate", - "Infer", - "Influence", - "Inform", - "Inherit", - "Init", - "Initialize", - "Initiate", - "Injure", - "In-line", - "Inline", - "Insist", - "Install", - "Instantiate", - "Instruct", - "Integrate", - "Intend", - "Intercept", - "Internalize", - "Interpret", - "Introduce", - "Invalidate", - "Invert", - "Invest", - "Investigate", - "Invite", - "Invoke", - "Involve", - "Isolate", - "Issue", - "Join", - "Journey", - "Joy", - "Judge", - "Jump", - "Justify", - "Keep", - "Key", - "Kick", - "Kill", - "Kiss", - "Knock", - "Know", - "Label", - "Lack", - "Land", - "Last", - "Laugh", - "Launch", - "Lay", - "Lead", - "Lean", - "Leap", - "Learn", - "Leave", - "Let", - "Lie", - "Lift", - "Light", - "Like", - "Limit", - "Link", - "List", - "Listen", - "Live", - "Load", - "Localize", - "Locate", - "Lock", - "Log", - "Login", - "Look", - "Loop", - "Lose", - "Love", - "Lower", - "Maintain", - "Make", - "Manage", - "Map", - "Mark", - "Marry", - "Match", - "Materialize", - "Matter", - "Mean", - "Measure", - "Meet", - "Memoize", - "Menace", - "Mention", - "Merge", - "Migrate", - "Mind", - "Mirror", - "Misinform", - "Miss", - "Mix", - "Mock", - "Modernize", - "Modify", - "Monitor", - "Monomorphize", - "Move", - "Mutate", - "Name", - "Navigate", - "Near", - "Need", - "Nod", - "Normalize", - "Notarize", - "Note", - "Notice", - "Notify", - "Observe", - "Obtain", - "Occupy", - "Occur", - "Offer", - "Officiate", - "Omit", - "Open", - "Operate", - "Optimise", - "Optimize", - "Order", - "Organise", - "Organize", - "Output", - "Overhaul", - "Override", - "Overwrite", - "Owe", - "Own", - "Pack", - "Package", - "Paint", - "Panic", - "Parameterize", - "Parse", - "Partake", - "Pass", - "Patch", - "Pause", - "Pay", - "Perform", - "Permit", - "Persist", - "Persuade", - "Pick", - "Pin", - "Ping", - "Pipe", - "Place", - "Plan", - "Play", - "Plow", - "Point", - "Ponder", - "Populate", - "Port", - "Position", - "Possess", - "Pour", - "Predict", - "Prefer", - "Prefix", - "Prepare", - "Present", - "Preserve", - "Press", - "Presume", - "Prevent", - "Print", - "Prioritize", - "Privatize", - "Proceed", - "Process", - "Procure", - "Produce", - "Prolong", - "Promise", - "Promote", - "Propagate", - "Propose", - "Prosecute", - "Protect", - "Protest", - "Prove", - "Provide", - "Prune", - "Publish", - "Pull", - "Purchase", - "Purge", - "Pursue", - "Push", - "Put", - "Puton", - "Qualify", - "Query", - "Question", - "Queue", - "Quit", - "Quote", - "Race", - "Raise", - "Randomize", - "Reach", - "React", - "Read", - "Realise", - "Realize", - "Reapply", - "Rearrange", - "Reason", - "Rebuild", - "Recall", - "Receive", - "Reckon", - "Recognise", - "Recognize", - "Recommend", - "Reconnect", - "Record", - "Recover", - "Recur", - "Redact", - "Re-define", - "Redefine", - "Re-design", - "Redesign", - "Redirect", - "Re-do", - "Redo", - "Reduce", - "Re-enable", - "Refactor", - "Refer", - "Reference", - "Refine", - "Reflect", - "Reformat", - "Refresh", - "Refuse", - "Regard", - "Regenerate", - "Register", - "Reimplement", - "Re-instate", - "Reinstate", - "Reject", - "Relate", - "Relax", - "Release", - "Reload", - "Rely", - "Remain", - "Remember", - "Remind", - "Remove", - "Rename", - "Render", - "Re-order", - "Reorder", - "Reorganise", - "Reorganize", - "Repair", - "Reparent", - "Repeat", - "Repel", - "Rephrase", - "Replace", - "Reply", - "Report", - "Reposition", - "Represent", - "Request", - "Require", - "Rerender", - "Rerun", - "Re-scale", - "Rescale", - "Research", - "Re-set", - "Reset", - "Reside", - "Resize", - "Resolve", - "Respect", - "Respond", - "Rest", - "Restart", - "Restore", - "Restrict", - "Restructure", - "Result", - "Resume", - "Resurface", - "Retain", - "Retire", - "Retreat", - "Retrieve", - "Retry", - "Return", - "Reuse", - "Revamp", - "Reveal", - "Reverse", - "Revert", - "Review", - "Revise", - "Revisit", - "Revoke", - "Reword", - "Re-wrap", - "Rewrap", - "Rewrite", - "Ride", - "Ring", - "Rise", - "Roll", - "Rotate", - "Round", - "Route", - "Rule", - "Run", - "Sale", - "Salute", - "Sample", - "Sanitize", - "Save", - "Say", - "Scale", - "Scope", - "Score", - "Scroll", - "Search", - "Secure", - "See", - "Seek", - "Seem", - "Select", - "Self-initialize", - "Sell", - "Send", - "Separate", - "Serialize", - "Serve", - "Set", - "Settle", - "Shake", - "Shape", - "Share", - "Shift", - "Shoot", - "Shorten", - "Shout", - "Show", - "Shrink", - "Shuffle", - "Shut", - "Sign", - "Signify", - "Silence", - "Simplify", - "Simulate", - "Sing", - "Sit", - "Size", - "Skip", - "Sleep", - "Slide", - "Slip", - "Smile", - "Solve", - "Sort", - "Sound", - "Source", - "Spawn", - "Speak", - "Specify", - "Spend", - "Split", - "Spread", - "Stand", - "Standardize", - "Stare", - "Start", - "State", - "Stay", - "Steal", - "Steer", - "Step", - "Stick", - "Stop", - "Store", - "Stress", - "Stretch", - "Strike", - "Stringify", - "Strip", - "Struggle", - "Stub", - "Study", - "Style", - "Subclass", - "Submit", - "Substitute", - "Subtract", - "Succeed", - "Suffer", - "Suggest", - "Suit", - "Supply", - "Support", - "Suppose", - "Suppress", - "Surround", - "Survive", - "Suspect", - "Swallow", - "Swap", - "Sway", - "Switch", - "Sync", - "Synchronise", - "Synchronize", - "Synthesize", - "Take", - "Talk", - "Talkover", - "Target", - "Teach", - "Tell", - "Tempt", - "Tend", - "Terminate", - "Test", - "Testify", - "Thank", - "Think", - "Threaten", - "Throw", - "Tie", - "Time", - "Toggle", - "Touch", - "Track", - "Trade", - "Train", - "Transfer", - "Transform", - "Translate", - "Transpile", - "Trash", - "Travel", - "Tread", - "Treat", - "Trigger", - "Trim", - "Truncate", - "Trust", - "Try", - "Tune", - "Turn", - "Tweak", - "Twist", - "Unblock", - "Uncomment", - "Uncover", - "Understand", - "Undertake", - "Undo", - "Undry", - "Unescape", - "Unfold", - "Unify", - "Unignore", - "Unite", - "Unload", - "Unlock", - "Unpack", - "Unregister", - "Unskip", - "Unsubscribe", - "Untrack", - "Unwrap", - "Update", - "Upgrade", - "Upload", - "Urge", - "Use", - "Utter", - "Validate", - "Value", - "Vanish", - "Vary", - "Verbosify", - "Verify", - "View", - "Visit", - "Vocalize", - "Voice", - "Vote", - "Wait", - "Wake", - "Walk", - "Want", - "Warn", - "Warrant", - "Wash", - "Watch", - "Wear", - "Weep", - "Weigh", - "Welcome", - "Whitelist", - "Win", - "Wipe", - "Wire", - "Wish", - "Withdraw", - "Wonder", - "Work", - "Workout", - "Worry", - "Wrap", - "Write" +allowed_verbs = [ + "Abandon", + "Abort", + "Abstract", + "Accept", + "Accomodate", + "Accompany", + "Account", + "Accumulate", + "Accuse", + "Ache", + "Achieve", + "Acknowledge", + "Acquire", + "Act", + "Activate", + "Active", + "Adapt", + "Add", + "Address", + "Adhere", + "Adjust", + "Admit", + "Adopt", + "Advance", + "Advise", + "Advocate", + "Affect", + "Affirm", + "Afford", + "Agree", + "Aim", + "Align", + "Allow", + "Alter", + "Amend", + "Analyse", + "Analyze", + "Anchor", + "Annotate", + "Announce", + "Annoy", + "Annul", + "Answer", + "Appeal", + "Appear", + "Append", + "Applicate", + "Apply", + "Appoint", + "Appreciate", + "Approach", + "Approve", + "Argue", + "Arise", + "Arrange", + "Arrest", + "Arrive", + "Ask", + "Assert", + "Assess", + "Assign", + "Assist", + "Associate", + "Assume", + "Assure", + "Attach", + "Attack", + "Attempt", + "Attend", + "Attract", + "Augment", + "Avoid", + "Awake", + "Back", + "Backport", + "Backup", + "Bake", + "Base", + "Battle", + "Be", + "Bear", + "Beat", + "Become", + "Begin", + "Behave", + "Believe", + "Belong", + "Bend", + "Benefit", + "Better", + "Beware", + "Bind", + "Blacklist", + "Blame", + "Blend", + "Block", + "Blow", + "Blur", + "Bootstrap", + "Born", + "Borrow", + "Bother", + "Break", + "Bridge", + "Bring", + "Broadcast", + "Buffer", + "Build", + "Bump", + "Bundle", + "Burn", + "Busy", + "Buy", + "Bypass", + "Cache", + "Calculate", + "Call", + "Cancel", + "Capitalize", + "Capture", + "Care", + "Carry", + "Carryout", + "Cast", + "Catch", + "Categorize", + "Cause", + "Center", + "Centralize", + "Challenge", + "Change", + "Chant", + "Charge", + "Chase", + "Chat", + "Check", + "Choose", + "Circle", + "Claim", + "Clarify", + "Clean", + "Cleanse", + "Clear", + "Climb", + "Clip", + "Close", + "Clothe", + "Coalesce", + "Collapse", + "Collect", + "Combine", + "Come", + "Command", + "Comment", + "Commit", + "Compare", + "Compensate", + "Compile", + "Complain", + "Complement", + "Complete", + "Compose", + "Compress", + "Compute", + "Conceal", + "Concentrate", + "Conclude", + "Concur", + "Conduct", + "Configure", + "Confirm", + "Confront", + "Connect", + "Connote", + "Consider", + "Consist", + "Consolidate", + "Constitute", + "Construct", + "Consume", + "Contact", + "Contain", + "Contest", + "Continue", + "Contribute", + "Control", + "Convert", + "Convey", + "Cook", + "Coordinate", + "Cope", + "Copy", + "Correct", + "Cost", + "Counsel", + "Count", + "Cover", + "Create", + "Cross", + "Cry", + "Cut", + "Cycle", + "Damage", + "Dance", + "Deal", + "Debate", + "Decide", + "Declare", + "Decode", + "Deconstruct", + "Decouple", + "Decrease", + "Dedup", + "Duplicate", + "Deduplicate", + "Default", + "Defeat", + "Defend", + "Defer", + "Define", + "Delay", + "Delegate", + "Delete", + "Deliver", + "Demand", + "Demolish", + "Demonstrate", + "Deny", + "Depart", + "Depend", + "Depict", + "Deprecate", + "Derive", + "Describe", + "Deserialize", + "Design", + "Desire", + "Destroy", + "Detail", + "Detect", + "Determine", + "Develop", + "Devote", + "Die", + "Dim", + "Direct", + "Disable", + "Disallow", + "Disappear", + "Disconnect", + "Discontinue", + "Discourage", + "Discover", + "Discuss", + "Dislike", + "Dismiss", + "Dispatch", + "Displace", + "Display", + "Distinguish", + "Divide", + "Do", + "Document", + "Dominate", + "Downgrade", + "Download", + "Draw", + "Dread", + "Dress", + "Drink", + "Drive", + "Drop", + "Dry", + "Dump", + "Duplicate", + "Earn", + "Eat", + "Echo", + "Edit", + "Educate", + "Elaborate", + "Elect", + "Elevate", + "Eliminate", + "Embed", + "Emerge", + "Emit", + "Employ", + "Empty", + "Enable", + "Encapsulate", + "Encourage", + "End", + "Endorse", + "Endure", + "Enforce", + "Engage", + "Enhance", + "Enjoy", + "Enquire", + "Enroll", + "Ensure", + "Enter", + "Enumerate", + "Equal", + "Equate", + "Erase", + "Escape", + "Establish", + "Estimate", + "Evaluate", + "Examine", + "Except", + "Exclude", + "Excuse", + "Execute", + "Exempt", + "Exercise", + "Exert", + "Exist", + "Exit", + "Expand", + "Expect", + "Experience", + "Explain", + "Explore", + "Export", + "Expose", + "Express", + "Extend", + "Extract", + "Face", + "Factor", + "Fail", + "Fall", + "Fault", + "Favor", + "Fear", + "Feature", + "Feed", + "Feel", + "Fetch", + "Fight", + "Fill", + "Filter", + "Find", + "Finish", + "Fit", + "Fix", + "Flatten", + "Flee", + "Flip", + "Float", + "Flow", + "Flunk", + "Flush", + "Fly", + "Focus", + "Fold", + "Follow", + "Force", + "Foresee", + "Forget", + "Fork", + "Form", + "Formalize", + "Format", + "Forward", + "Found", + "Free", + "Freeze", + "Gain", + "Gather", + "Generalize", + "Generate", + "Get", + "Gitignore", + "Give", + "Giveup", + "Glance", + "Go", + "Going", + "Govern", + "Grant", + "Grin", + "Group", + "Grow", + "Guard", + "Guess", + "Guide", + "Hack", + "Halt", + "Hand", + "Handle", + "Hang", + "Happen", + "Hardcode", + "Harm", + "Hate", + "Have", + "Head", + "Hear", + "Help", + "Hide", + "Highlight", + "Hint", + "Hire", + "Hit", + "Hold", + "Hook", + "Hope", + "House", + "Hurt", + "Identify", + "Ignore", + "Illuminate", + "Illustrate", + "Imagine", + "Impersonate", + "Implement", + "Imply", + "Import", + "Importune", + "Impose", + "Improve", + "Include", + "Incorporate", + "Increase", + "Incur", + "Indent", + "Indicate", + "Infer", + "Influence", + "Inform", + "Inherit", + "Init", + "Initialize", + "Initiate", + "Injure", + "In-line", + "Inline", + "Insist", + "Install", + "Instantiate", + "Instruct", + "Integrate", + "Intend", + "Intercept", + "Internalize", + "Interpret", + "Introduce", + "Invalidate", + "Invert", + "Invest", + "Investigate", + "Invite", + "Invoke", + "Involve", + "Isolate", + "Issue", + "Join", + "Journey", + "Joy", + "Judge", + "Jump", + "Justify", + "Keep", + "Key", + "Kick", + "Kill", + "Kiss", + "Knock", + "Know", + "Label", + "Lack", + "Land", + "Last", + "Laugh", + "Launch", + "Lay", + "Lead", + "Lean", + "Leap", + "Learn", + "Leave", + "Let", + "Lie", + "Lift", + "Light", + "Like", + "Limit", + "Link", + "List", + "Listen", + "Live", + "Load", + "Localize", + "Locate", + "Lock", + "Log", + "Login", + "Look", + "Loop", + "Lose", + "Love", + "Lower", + "Maintain", + "Make", + "Manage", + "Map", + "Mark", + "Marry", + "Match", + "Materialize", + "Matter", + "Mean", + "Measure", + "Meet", + "Memoize", + "Menace", + "Mention", + "Merge", + "Migrate", + "Mind", + "Mirror", + "Misinform", + "Miss", + "Mix", + "Mock", + "Modernize", + "Modify", + "Monitor", + "Monomorphize", + "Move", + "Mutate", + "Name", + "Navigate", + "Near", + "Need", + "Nod", + "Normalize", + "Notarize", + "Note", + "Notice", + "Notify", + "Observe", + "Obtain", + "Occupy", + "Occur", + "Offer", + "Officiate", + "Omit", + "Open", + "Operate", + "Optimise", + "Optimize", + "Order", + "Organise", + "Organize", + "Output", + "Overhaul", + "Override", + "Overwrite", + "Owe", + "Own", + "Pack", + "Package", + "Paint", + "Panic", + "Parameterize", + "Parse", + "Partake", + "Pass", + "Patch", + "Pause", + "Pay", + "Perform", + "Permit", + "Persist", + "Persuade", + "Pick", + "Pin", + "Ping", + "Pipe", + "Place", + "Plan", + "Play", + "Plow", + "Point", + "Ponder", + "Populate", + "Port", + "Position", + "Possess", + "Pour", + "Predict", + "Prefer", + "Prefix", + "Prepare", + "Present", + "Preserve", + "Press", + "Presume", + "Prevent", + "Print", + "Prioritize", + "Privatize", + "Proceed", + "Process", + "Procure", + "Produce", + "Prolong", + "Promise", + "Promote", + "Prompt", + "Propagate", + "Propose", + "Prosecute", + "Protect", + "Protest", + "Prove", + "Provide", + "Prune", + "Publish", + "Pull", + "Purchase", + "Purge", + "Pursue", + "Push", + "Put", + "Puton", + "Qualify", + "Query", + "Question", + "Queue", + "Quit", + "Quote", + "Race", + "Raise", + "Randomize", + "Reach", + "React", + "Read", + "Realise", + "Realize", + "Reapply", + "Rearrange", + "Reason", + "Rebuild", + "Recall", + "Receive", + "Reckon", + "Recognise", + "Recognize", + "Recommend", + "Reconnect", + "Record", + "Recover", + "Recur", + "Redact", + "Re-define", + "Redefine", + "Re-design", + "Redesign", + "Redirect", + "Re-do", + "Redo", + "Reduce", + "Re-enable", + "Refactor", + "Refer", + "Reference", + "Refine", + "Reflect", + "Reformat", + "Refresh", + "Refuse", + "Regard", + "Regenerate", + "Register", + "Reimplement", + "Re-instate", + "Reinstate", + "Reject", + "Relate", + "Relax", + "Release", + "Reload", + "Rely", + "Remain", + "Remember", + "Remind", + "Remove", + "Rename", + "Render", + "Re-order", + "Reorder", + "Reorganise", + "Reorganize", + "Repair", + "Reparent", + "Repeat", + "Repel", + "Rephrase", + "Replace", + "Reply", + "Report", + "Reposition", + "Represent", + "Request", + "Require", + "Rerender", + "Rerun", + "Re-scale", + "Rescale", + "Research", + "Re-set", + "Reset", + "Reside", + "Resize", + "Resolve", + "Respect", + "Respond", + "Rest", + "Restart", + "Restore", + "Restrict", + "Restructure", + "Result", + "Resume", + "Resurface", + "Retain", + "Retire", + "Retreat", + "Retrieve", + "Retry", + "Return", + "Reuse", + "Revamp", + "Reveal", + "Reverse", + "Revert", + "Review", + "Revise", + "Revisit", + "Revoke", + "Reword", + "Re-wrap", + "Rewrap", + "Rewrite", + "Ride", + "Ring", + "Rise", + "Roll", + "Rotate", + "Round", + "Route", + "Rule", + "Run", + "Sale", + "Salute", + "Sample", + "Sanitize", + "Save", + "Say", + "Scale", + "Scope", + "Score", + "Scroll", + "Search", + "Secure", + "See", + "Seek", + "Seem", + "Select", + "Self-initialize", + "Sell", + "Send", + "Separate", + "Serialize", + "Serve", + "Set", + "Settle", + "Shake", + "Shape", + "Share", + "Shift", + "Shoot", + "Shorten", + "Shout", + "Show", + "Shrink", + "Shuffle", + "Shut", + "Sign", + "Signify", + "Silence", + "Simplify", + "Simulate", + "Sing", + "Sit", + "Size", + "Skip", + "Sleep", + "Slide", + "Slip", + "Smile", + "Solve", + "Sort", + "Sound", + "Source", + "Spawn", + "Speak", + "Specify", + "Spend", + "Split", + "Spread", + "Stand", + "Standardize", + "Stare", + "Start", + "State", + "Stay", + "Steal", + "Steer", + "Step", + "Stick", + "Stop", + "Store", + "Stress", + "Stretch", + "Strike", + "Stringify", + "Strip", + "Struggle", + "Stub", + "Study", + "Style", + "Subclass", + "Submit", + "Substitute", + "Subtract", + "Succeed", + "Suffer", + "Suggest", + "Suit", + "Supply", + "Support", + "Suppose", + "Suppress", + "Surround", + "Survive", + "Suspect", + "Swallow", + "Swap", + "Sway", + "Switch", + "Sync", + "Synchronise", + "Synchronize", + "Synthesize", + "Take", + "Talk", + "Talkover", + "Target", + "Teach", + "Tell", + "Tempt", + "Tend", + "Terminate", + "Test", + "Testify", + "Thank", + "Think", + "Threaten", + "Throw", + "Tie", + "Time", + "Toggle", + "Touch", + "Track", + "Trade", + "Train", + "Transfer", + "Transform", + "Translate", + "Transpile", + "Trash", + "Travel", + "Tread", + "Treat", + "Trigger", + "Trim", + "Truncate", + "Trust", + "Try", + "Tune", + "Turn", + "Tweak", + "Twist", + "Unblock", + "Uncomment", + "Uncover", + "Understand", + "Undertake", + "Undo", + "Undry", + "Unescape", + "Unfold", + "Unify", + "Unignore", + "Unite", + "Unload", + "Unlock", + "Unpack", + "Unregister", + "Unskip", + "Unsubscribe", + "Untrack", + "Unwrap", + "Update", + "Upgrade", + "Upload", + "Urge", + "Use", + "Utter", + "Validate", + "Value", + "Vanish", + "Vary", + "Verbosify", + "Verify", + "View", + "Visit", + "Vocalize", + "Voice", + "Vote", + "Wait", + "Wake", + "Walk", + "Want", + "Warn", + "Warrant", + "Wash", + "Watch", + "Wear", + "Weep", + "Weigh", + "Welcome", + "Whitelist", + "Win", + "Wipe", + "Wire", + "Wish", + "Withdraw", + "Wonder", + "Work", + "Workout", + "Worry", + "Wrap", + "Write", ] diff --git a/dev/check_pr_title.py b/dev/check_pr_title.py index 33b7a4664e9f..b4fcccafc6f5 100644 --- a/dev/check_pr_title.py +++ b/dev/check_pr_title.py @@ -19,7 +19,6 @@ import sys import tomllib - if __name__ == "__main__": pr_title = sys.argv[1] @@ -47,7 +46,7 @@ error = "it doesn't have the correct format" # This check is there to ignore dependabot PRs from title checks - if pr_title.startswith("chore"): + if pr_title.startswith("build"): sys.exit(0) elif not match: valid = False diff --git a/dev/format.sh b/dev/format.sh index 05248b5eed3d..a3129b932e5d 100755 --- a/dev/format.sh +++ b/dev/format.sh @@ -2,7 +2,11 @@ set -e cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/../ +taplo fmt + # Python +python -m flwr_tool.check_copyright src/py/flwr +python -m flwr_tool.init_py_fix src/py/flwr python -m isort --skip src/py/flwr/proto src/py python -m black -q --exclude src/py/flwr/proto src/py python -m docformatter -i -r src/py/flwr -e src/py/flwr/proto @@ -16,6 +20,11 @@ find src/proto/flwr/proto -name *.proto | grep "\.proto" | xargs clang-format -i python -m black -q examples python -m docformatter -i -r examples +# Benchmarks +python -m isort benchmarks +python -m black -q benchmarks +python -m docformatter -i -r benchmarks + # E2E python -m isort e2e python -m black -q e2e @@ -29,3 +38,6 @@ python -m nbstripout examples/*/*.ipynb --extra-keys "$KEYS" # Markdown python -m mdformat --number doc/source examples + +# RST +docstrfmt doc/source diff --git a/dev/publish-nightly.sh b/dev/publish-nightly.sh index a42af1f17cfc..0c03cdda9f49 100755 --- a/dev/publish-nightly.sh +++ b/dev/publish-nightly.sh @@ -24,16 +24,12 @@ cd "$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"/../ # The version name in the pyproject.toml will be appended with "-dev" and the current date. # The result will be a release on PyPi of the package "flwr-nightly" of version e.g. # "0.1.1.dev20200716" as seen at https://pypi.org/project/flwr-nightly/ -# If the script is called with the flag `--skip-publish`, the name and version are changed -# in the pyproject.toml but the package won't be published. if [[ $(git log --since="24 hours ago" --pretty=oneline) ]]; then sed -i -E "s/^name = \"(.+)\"/name = \"\1-nightly\"/" pyproject.toml sed -i -E "s/^version = \"(.+)\"/version = \"\1.dev$(date '+%Y%m%d')\"/" pyproject.toml - if [ "$1" != "--skip-publish" ]; then - python -m poetry build - python -m poetry publish -u __token__ -p $PYPI_TOKEN - fi + python -m poetry build + python -m poetry publish -u __token__ -p $PYPI_TOKEN else echo "There were no commits in the last 24 hours." fi diff --git a/dev/setup-defaults.sh b/dev/setup-defaults.sh index 36cbfe4df671..af5f0cb9d3ce 100755 --- a/dev/setup-defaults.sh +++ b/dev/setup-defaults.sh @@ -1,7 +1,7 @@ #!/bin/bash set -e -version=${1:-3.8.17} +version=${1:-3.9.20} # To install pyenv and virtualenv plugin function install_pyenv(){ diff --git a/dev/test.sh b/dev/test.sh index 7cabf35abf41..b8eeed14bc46 100755 --- a/dev/test.sh +++ b/dev/test.sh @@ -11,11 +11,11 @@ clang-format --Werror --dry-run src/proto/flwr/proto/* echo "- clang-format: done" echo "- isort: start" -python -m isort --check-only --skip src/py/flwr/proto src/py/flwr e2e +python -m isort --check-only --skip src/py/flwr/proto src/py/flwr benchmarks e2e echo "- isort: done" echo "- black: start" -python -m black --exclude "src\/py\/flwr\/proto" --check src/py/flwr examples e2e +python -m black --exclude "src\/py\/flwr\/proto" --check src/py/flwr benchmarks examples e2e echo "- black: done" echo "- init_py_check: start" @@ -23,9 +23,13 @@ python -m flwr_tool.init_py_check src/py/flwr src/py/flwr_tool echo "- init_py_check: done" echo "- docformatter: start" -python -m docformatter -c -r src/py/flwr e2e -e src/py/flwr/proto +python -m docformatter -c -r src/py/flwr e2e -e src/py/flwr/proto echo "- docformatter: done" +echo "- docsig: start" +docsig src/py/flwr +echo "- docsig: done" + echo "- ruff: start" python -m ruff check src/py/flwr echo "- ruff: done" @@ -56,8 +60,28 @@ echo "- mdformat: done" echo "- All Markdown checks passed" +echo "- Start TOML checks" + +echo "- taplo: start" +taplo fmt --check +echo "- taplo: done" + +echo "- All TOML checks passed" + +echo "- Start rST checks" + +echo "- docstrfmt: start" +docstrfmt --check doc/source +echo "- docstrfmt: done" + +echo "- All rST checks passed" + echo "- Start license checks" +echo "- copyright: start" +python -m flwr_tool.check_copyright src/py/flwr +echo "- copyright: done" + echo "- licensecheck: start" python -m licensecheck -u poetry --fail-licenses gpl --zero echo "- licensecheck: done" diff --git a/dev/update-examples.sh b/dev/update-examples.sh deleted file mode 100755 index 1076b4621984..000000000000 --- a/dev/update-examples.sh +++ /dev/null @@ -1,91 +0,0 @@ -#!/bin/bash -set -e -cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/../ - -ROOT=`pwd` -INDEX=$ROOT/examples/doc/source/index.md -INSERT_LINE=6 - -copy_markdown_files () { - for file in $1/*.md; do - # Copy the README into the source of the Example docs as the name of the example - if [[ $(basename "$file") = "README.md" ]]; then - cp $file $ROOT/examples/doc/source/$1.md 2>&1 >/dev/null - else - # If the example contains other markdown files, copy them to the source of the Example docs - cp $file $ROOT/examples/doc/source/$(basename "$file") 2>&1 >/dev/null - fi - done -} - -add_gh_button () { - gh_text="[\"View](https://github.com/adap/flower/blob/main/examples/$1)" - readme_file="$ROOT/examples/doc/source/$1.md" - - if ! grep -Fq "$gh_text" "$readme_file"; then - awk -v text="$gh_text" ' - /^# / && !found { - print $0 "\n" text; - found=1; - next; - } - { print } - ' "$readme_file" > tmpfile && mv tmpfile "$readme_file" - fi -} - -copy_images () { - if [ -d "$1/_static" ]; then - cp $1/_static/**.{jpg,png,jpeg} $ROOT/examples/doc/source/_static/ 2>/dev/null || true - fi -} - -add_to_index () { - (echo $INSERT_LINE; echo a; echo $1; echo .; echo wq) | ed $INDEX 2>&1 >/dev/null -} - -add_single_entry () { - # Copy markdown files to correct folder - copy_markdown_files $1 - - # Add button linked to GitHub - add_gh_button $1 - - # Copy all images of the _static folder into the examples - # docs static folder - copy_images $1 - - # Insert the name of the example into the index file - add_to_index $1 -} - -add_all_entries () { - cd $ROOT/examples - # Iterate through each folder in examples/ - for d in $(printf '%s\n' */ | sort -V); do - # Add entry based on the name of the folder - example=${d%/} - - if [[ $example != doc ]]; then - add_single_entry $example - fi - done -} - -# Clean up before starting -rm -f $ROOT/examples/doc/source/*.md -rm -f $INDEX - -# Create empty index file -touch $INDEX - -echo "# Flower Examples Documentation" >> $INDEX -echo "" >> $INDEX -echo "\`\`\`{toctree}" >> $INDEX -echo "---" >> $INDEX -echo "maxdepth: 1" >> $INDEX -echo "---" >> $INDEX - -add_all_entries - -echo "\`\`\`" >> $INDEX diff --git a/dev/update_python.py b/dev/update_python.py new file mode 100644 index 000000000000..5eea6af75488 --- /dev/null +++ b/dev/update_python.py @@ -0,0 +1,238 @@ +"""Script to update Python versions in the codebase.""" + +import argparse +import re +from pathlib import Path + + +def _compute_old_version(new_version): + """Compute the old version as the immediate previous minor version.""" + major_str, minor_str = new_version.split(".") + major = int(major_str) + minor = int(minor_str) + + if minor > 0: + old_minor = minor - 1 + old_version = f"{major}.{old_minor}" + else: + raise ValueError("Minor version is 0, can't infer previous version.") + return old_version + + +def _update_python_versions( + new_full_version, + patch_only=False, + dry_run=False, +): + """Update Python version strings in the specified files.""" + new_major_minor = ".".join(new_full_version.split(".")[:2]) + + if patch_only: + print(f"Updating patch version for {new_major_minor} to {new_full_version}") + + # Define the version pattern to match any full version with the same major.minor + version_pattern = re.escape(new_major_minor) + r"\.\d+" + + # Define the file patterns and corresponding replacement patterns + replacements = { + # Shell scripts + "dev/*.sh": [ + # Update version in scripts + ( + r"(version=\$\{1:-)" + version_pattern + r"(\})", + r"\g<1>" + new_full_version + r"\g<2>", + ), + # Update pyenv uninstall commands + ( + r"(pyenv uninstall -f flower-)" + version_pattern, + r"\g<1>" + new_full_version, + ), + ], + # Python files + "**/*.py": [ + # Update version assignments + ( + r'(["\'])' + version_pattern + r'(["\'])', + r"\g<1>" + new_full_version + r"\g<2>", + ), + ], + # Documentation files + "doc/source/conf.py": [ + # Update Python full version in conf.py + ( + r"(\.\.\s*\|python_full_version\|\s*replace::\s*)" + + version_pattern, + r"\g<1>" + new_full_version, + ), + ], + } + else: + # Compute old_version as immediate previous minor version + old_version = _compute_old_version(new_major_minor) + + print(f"Determined old version: {old_version}") + print( + f"Updating to new version: {new_major_minor} " + f"(full version: {new_full_version})" + ) + + # Define the file patterns and corresponding replacement patterns + replacements = { + # action.yml files + ".github/actions/bootstrap/action.yml": [ + # Update default Python version + ( + r"^(\s*default:\s*)" + re.escape(old_version) + r"(\s*)$", + r"\g<1>" + new_major_minor + r"\g<2>", + ), + ], + # YAML workflow files + ".github/workflows/*.yml": [ + # Update specific python-version entries + ( + r"^(\s*python-version:\s*)" + re.escape(old_version) + r"(\s*)$", + r"\g<1>" + new_major_minor + r"\g<2>", + ), + ( + r"(['\"]?)" + re.escape(old_version) + r"(['\"]?,?\s*)", + lambda m: ( + "" if m.group(2).strip() == "," else "" + ), # Handle the case where a comma follows + ), + ], + # Shell scripts + "dev/*.sh": [ + # Update version in scripts + ( + r"(version=\$\{1:-)" + re.escape(old_version) + r"(\.\d+)?(\})", + r"\g<1>" + new_full_version + r"\g<3>", + ), + # Update pyenv uninstall commands + ( + r"(pyenv uninstall -f flower-)" + + re.escape(old_version) + + r"(\.\d+)?", + r"\g<1>" + new_full_version, + ), + ], + # pyproject.toml files + "**/pyproject.toml": [ + # Update python version constraints + ( + r'(python\s*=\s*">=)' + + re.escape(old_version) + + r'(,\s*<\d+\.\d+")', + r"\g<1>" + new_major_minor + r"\g<2>", + ), + ], + "dev/*.py": [ + # Update version assignments + ( + r'(["\'])' + re.escape(old_version) + r'(\.\d+)?(["\'],?)\s*\n?', + lambda m: ( + "" if m.group(3) == "," else "" + ), # Remove version and handle comma if present + ), + ], + # Python files + "**/*.py": [ + # Update version assignments + ( + r'(["\'])' + re.escape(old_version) + r'(\.\d+)?(["\'])', + r"\g<1>" + new_full_version + r"\g<3>", + ), + ], + # Documentation files + "doc/source/conf.py": [ + # Update Python version in conf.py + ( + r"(\.\.\s*\|python_version\|\s*replace::\s*)" + + re.escape(old_version), + r"\g<1>" + new_major_minor, + ), + # Update Python full version in conf.py + ( + r"(\.\.\s*\|python_full_version\|\s*replace::\s*)" + + re.escape(old_version) + + r"\.\d+", + r"\g<1>" + new_full_version, + ), + ], + # ReStructuredText files + "doc/source/*.rst": [ + # Update Python version in rst files + ( + r"(`Python\s*" + + re.escape(old_version) + + r"\s*`_)", + r"`Python " + + new_major_minor + + " `_", + ), + ], + # PO files for localization + "doc/locales/*/LC_MESSAGES/framework-docs.po": [ + # Update Python version in localization files + ( + r"(`Python\s*" + + re.escape(old_version) + + r"\s*`_)", + r"`Python " + + new_major_minor + + " `_", + ), + ], + } + + # Process each file pattern + for file_pattern, patterns in replacements.items(): + for file_path in Path().rglob(file_pattern): + if not file_path.is_file(): + continue + content = file_path.read_text() + original_content = content + for pattern, repl in patterns: + if callable(repl): + content = re.sub(pattern, repl, content, flags=re.MULTILINE) + else: + content = re.sub(pattern, repl, content, flags=re.MULTILINE) + if content != original_content: + if dry_run: + print(f"Would update {file_path}") + else: + file_path.write_text(content) + print(f"Updated {file_path}") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Script to update Python versions in the codebase." + ) + parser.add_argument( + "new_full_version", help="New full Python version to use (e.g., 3.9.22)" + ) + parser.add_argument( + "--patch-only", + action="store_true", + help="Update only the patch version for matching major.minor versions.", + ) + parser.add_argument( + "--dry-run", + action="store_true", + help="Show changes without modifying files.", + ) + args = parser.parse_args() + + _update_python_versions( + new_full_version=args.new_full_version, + patch_only=args.patch_only, + dry_run=args.dry_run, + ) diff --git a/dev/update_version.py b/dev/update_version.py new file mode 100644 index 000000000000..cbb4d8e138c2 --- /dev/null +++ b/dev/update_version.py @@ -0,0 +1,150 @@ +"""Utility used to bump the version of the package.""" + +import argparse +import re +import sys +from pathlib import Path + + +REPLACE_CURR_VERSION = { + "doc/source/conf.py": [ + ".. |stable_flwr_version| replace:: {version}", + ], + "src/py/flwr/cli/new/templates/app/pyproject.*.toml.tpl": [ + "flwr[simulation]>={version}", + ], + "src/docker/complete/compose.yml": ["FLWR_VERSION:-{version}"], + "src/docker/distributed/client/compose.yml": ["FLWR_VERSION:-{version}"], + "src/docker/distributed/server/compose.yml": ["FLWR_VERSION:-{version}"], +} + +REPLACE_NEXT_VERSION = { + "pyproject.toml": ['version = "{version}"'], + "doc/source/conf.py": [ + 'release = "{version}"', + ], + "examples/doc/source/conf.py": ['release = "{version}"'], + "baselines/doc/source/conf.py": ['release = "{version}"'], +} + +EXAMPLES = { + "examples/*/pyproject.toml": [ + "flwr[simulation]=={version}", + "flwr[simulation]>={version}", + ], +} + + +def _get_next_version(curr_version, increment): + """Calculate the next version based on the type of release.""" + major, minor, patch_version = map(int, curr_version.split(".")) + if increment == "patch": + patch_version += 1 + elif increment == "minor": + minor += 1 + patch_version = 0 + elif increment == "major": + major += 1 + minor = 0 + patch_version = 0 + else: + raise ValueError( + "Invalid increment type. Must be 'major', 'minor', or 'patch'." + ) + return f"{major}.{minor}.{patch_version}" + + +def _update_versions(file_patterns, replace_strings, new_version, check): + """Update the version strings in the specified files.""" + wrong = False + for pattern in file_patterns: + files = list(Path(__file__).parents[1].glob(pattern)) + for file_path in files: + if not file_path.is_file(): + continue + content = file_path.read_text() + original_content = content + for s in replace_strings: + # Construct regex pattern to match any version number in the string + escaped_s = re.escape(s).replace(r"\{version\}", r"(\d+\.\d+\.\d+)") + regex_pattern = re.compile(escaped_s) + content = regex_pattern.sub(s.format(version=new_version), content) + if content != original_content: + wrong = True + if check: + print(f"{file_path} would be updated") + else: + file_path.write_text(content) + print(f"Updated {file_path}") + + return wrong + + +if __name__ == "__main__": + conf_path = Path("doc/source/conf.py") + + if not conf_path.is_file(): + raise FileNotFoundError(f"{conf_path} not found!") + + content = conf_path.read_text() + + # Search for the current non-updated version + match = re.search(r"\.\.\s*\|stable_flwr_version\|\s*replace::\s*(\S+)", content) + + parser = argparse.ArgumentParser( + description="Utility used to bump the version of the package." + ) + parser.add_argument( + "--old_version", + help="Current (non-updated) version of the package, soon to be the old version.", + default=match.group(1) if match else None, + ) + parser.add_argument( + "--check", action="store_true", help="Fails if any file would be modified." + ) + parser.add_argument( + "--examples", action="store_true", help="Also modify flwr version in examples." + ) + + group = parser.add_mutually_exclusive_group() + group.add_argument( + "--patch", action="store_true", help="Increment the patch version." + ) + group.add_argument( + "--major", action="store_true", help="Increment the major version." + ) + args = parser.parse_args() + + if not args.old_version: + raise ValueError("Version not found in conf.py, please provide current version") + + # Determine the type of version increment + if args.major: + increment = "major" + elif args.patch: + increment = "patch" + else: + increment = "minor" + + curr_version = _get_next_version(args.old_version, increment) + next_version = _get_next_version(curr_version, "minor") + + wrong = False + + # Update files with next version + for file_pattern, strings in REPLACE_NEXT_VERSION.items(): + if not _update_versions([file_pattern], strings, next_version, args.check): + wrong = True + + # Update files with current version + for file_pattern, strings in REPLACE_CURR_VERSION.items(): + if not _update_versions([file_pattern], strings, curr_version, args.check): + wrong = True + + if args.examples: + for file_pattern, strings in EXAMPLES.items(): + if not _update_versions([file_pattern], strings, curr_version, args.check): + wrong = True + + if wrong and args.check: + sys.exit("Some version haven't been updated.") diff --git a/dev/venv-create.sh b/dev/venv-create.sh index 63e82131d2fb..112f3a4b2917 100755 --- a/dev/venv-create.sh +++ b/dev/venv-create.sh @@ -2,7 +2,7 @@ set -e cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/../ -version=${1:-3.8.17} +version=${1:-3.9.20} # Check if the directory for the Python version does not exist and if so, # install the right Python version through pyenv diff --git a/dev/venv-delete.sh b/dev/venv-delete.sh index 3a74d2fb8a4e..50bed76b203f 100755 --- a/dev/venv-delete.sh +++ b/dev/venv-delete.sh @@ -2,6 +2,6 @@ set -e cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/../ -version=${1:-3.8.17} +version=${1:-3.9.20} pyenv uninstall -f flower-$version diff --git a/dev/venv-reset.sh b/dev/venv-reset.sh index 69713f7df62a..5ab05f29c137 100755 --- a/dev/venv-reset.sh +++ b/dev/venv-reset.sh @@ -2,7 +2,7 @@ set -e cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/../ -version=${1:-3.8.17} +version=${1:-3.9.20} # Delete caches, venv, and lock file ./dev/rm-caches.sh diff --git a/doc/build-versioned-docs.sh b/doc/build-versioned-docs.sh index 6c1b6dd9c5fc..772250865143 100755 --- a/doc/build-versioned-docs.sh +++ b/doc/build-versioned-docs.sh @@ -20,7 +20,7 @@ cd doc # Get a list of languages based on the folders in locales languages="en `find locales/ -mindepth 1 -maxdepth 1 -type d -exec basename '{}' \;`" # Get a list of tags, excluding those before v1.0.0 -versions="`git for-each-ref '--format=%(refname:lstrip=-1)' refs/tags/ | grep -iE '^v((([1-9]|[0-9]{2,}).*\.([5-9]|[0-9]{2,}).*)|([2-9]|[0-9]{2,}).*)$'`" +versions="`git for-each-ref '--format=%(refname:lstrip=-1)' refs/tags/ | grep -iE '^v((([1-9]|[0-9]{2,}).*\.([8-9]|[0-9]{2,}).*)|([2-9]|[0-9]{2,}).*)$'`" for current_version in ${versions}; do @@ -82,9 +82,9 @@ done # Build the main version (main for GH CI, local branch for local) if [ $GITHUB_ACTIONS ] then - git switch main + git checkout --force main else - git switch $current_branch + git checkout --force $current_branch fi current_version=main diff --git a/doc/locales/fr/LC_MESSAGES/framework-docs.po b/doc/locales/fr/LC_MESSAGES/framework-docs.po index 67edf687bbbe..a11f44f6bd59 100644 --- a/doc/locales/fr/LC_MESSAGES/framework-docs.po +++ b/doc/locales/fr/LC_MESSAGES/framework-docs.po @@ -3,7 +3,7 @@ msgid "" msgstr "" "Project-Id-Version: Flower Docs\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2024-05-28 11:47+0200\n" +"POT-Creation-Date: 2024-10-10 00:29+0000\n" "PO-Revision-Date: 2023-09-05 17:54+0000\n" "Last-Translator: Charles Beauville \n" "Language: fr\n" @@ -13,299 +13,467 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.15.0\n" +"Generated-By: Babel 2.16.0\n" -#: ../../source/contributor-explanation-architecture.rst:2 -msgid "Flower Architecture" -msgstr "Architecture florale" +#: ../../source/contributor-explanation-public-and-private-apis.rst:2 +msgid "Public and private APIs" +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:5 -msgid "Edge Client Engine" -msgstr "Moteur client Edge" +#: ../../source/contributor-explanation-public-and-private-apis.rst:4 +msgid "" +"In Python, everything is public. To enable developers to understand which" +" components can be relied upon, Flower declares a public API. Components " +"that are part of the public API can be relied upon. Changes to the public" +" API are announced in the release notes and are subject to deprecation " +"policies." +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:7 +#: ../../source/contributor-explanation-public-and-private-apis.rst:9 msgid "" -"`Flower `_ core framework architecture with Edge " -"Client Engine" -msgstr "`Flower `_ architecture de base avec Edge Client Engine" +"Everything that is not part of the public API is part of the private API." +" Even though Python allows accessing them, user code should never use " +"those components. Private APIs can change at any time, even in patch " +"releases." +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:13 -msgid "Virtual Client Engine" -msgstr "Moteur de client virtuel" +#: ../../source/contributor-explanation-public-and-private-apis.rst:13 +msgid "" +"How can you determine whether a component is part of the public API or " +"not? Easy:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:15 +msgid "`Use the Flower API reference documentation `_" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:16 +msgid "`Use the Flower CLI reference documentation `_" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:18 +msgid "" +"Everything listed in the reference documentation is part of the public " +"API. This document explains how Flower maintainers define the public API " +"and how you can determine whether a component is part of the public API " +"or not by reading the Flower source code." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:23 +#, fuzzy +msgid "Flower public API" +msgstr "Flower ClientApp." + +#: ../../source/contributor-explanation-public-and-private-apis.rst:25 +msgid "Flower has a well-defined public API. Let's look at this in more detail." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:29 +msgid "" +"Every component that is reachable by recursively following " +"``__init__.__all__`` starting from the root package (``flwr``) is part of" +" the public API." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:32 +msgid "" +"If you want to determine whether a component " +"(class/function/generator/...) is part of the public API or not, you need" +" to start at the root of the ``flwr`` package. Let's use ``tree -L 1 -d " +"src/py/flwr`` to look at the Python sub-packages contained ``flwr``:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:46 +msgid "" +"Contrast this with the definition of ``__all__`` in the root " +"``src/py/flwr/__init__.py``:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:59 +msgid "" +"You can see that ``flwr`` has six subpackages (``cli``, ``client``, " +"``common``, ``proto``, ``server``, ``simulation``), but only four of them" +" are \"exported\" via ``__all__`` (``client``, ``common``, ``server``, " +"``simulation``)." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:63 +msgid "" +"What does this mean? It means that ``client``, ``common``, ``server`` and" +" ``simulation`` are part of the public API, but ``cli`` and ``proto`` are" +" not. The ``flwr`` subpackages ``cli`` and ``proto`` are private APIs. A " +"private API can change completely from one release to the next (even in " +"patch releases). It can change in a breaking way, it can be renamed (for " +"example, ``flwr.cli`` could be renamed to ``flwr.command``) and it can " +"even be removed completely." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:70 +msgid "Therefore, as a Flower user:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:72 +msgid "``from flwr import client`` ✅ Ok, you're importing a public API." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:73 +msgid "" +"``from flwr import proto`` ❌ Not recommended, you're importing a private " +"API." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:75 +msgid "" +"What about components that are nested deeper in the hierarchy? Let's look" +" at Flower strategies to see another typical pattern. Flower strategies " +"like ``FedAvg`` are often imported using ``from flwr.server.strategy " +"import FedAvg``. Let's look at " +"``src/py/flwr/server/strategy/__init__.py``:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:91 +msgid "" +"What's notable here is that all strategies are implemented in dedicated " +"modules (e.g., ``fedavg.py``). In ``__init__.py``, we *import* the " +"components we want to make part of the public API and then *export* them " +"via ``__all__``. Note that we export the component itself (for example, " +"the ``FedAvg`` class), but not the module it is defined in (for example, " +"``fedavg.py``). This allows us to move the definition of ``FedAvg`` into " +"a different module (or even a module in a subpackage) without breaking " +"the public API (as long as we update the import path in ``__init__.py``)." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:99 +msgid "Therefore:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:101 +msgid "" +"``from flwr.server.strategy import FedAvg`` ✅ Ok, you're importing a " +"class that is part of the public API." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:103 +msgid "" +"``from flwr.server.strategy import fedavg`` ❌ Not recommended, you're " +"importing a private module." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:106 +msgid "" +"This approach is also implemented in the tooling that automatically " +"builds API reference docs." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:110 +msgid "Flower public API of private packages" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:112 +msgid "" +"We also use this to define the public API of private subpackages. Public," +" in this context, means the API that other ``flwr`` subpackages should " +"use. For example, ``flwr.server.driver`` is a private subpackage (it's " +"not exported via ``src/py/flwr/server/__init__.py``'s ``__all__``)." +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:15 +#: ../../source/contributor-explanation-public-and-private-apis.rst:117 msgid "" -"`Flower `_ core framework architecture with Virtual " -"Client Engine" +"Still, the private sub-package ``flwr.server.driver`` defines a " +"\"public\" API using ``__all__`` in " +"``src/py/flwr/server/driver/__init__.py``:" msgstr "" -"`Flower `_ architecture de base avec moteur de client " -"virtuel" -#: ../../source/contributor-explanation-architecture.rst:21 -msgid "Virtual Client Engine and Edge Client Engine in the same workload" -msgstr "Moteur client virtuel et moteur client Edge dans la même charge de travail" +#: ../../source/contributor-explanation-public-and-private-apis.rst:132 +msgid "" +"The interesting part is that both ``GrpcDriver`` and ``InMemoryDriver`` " +"are never used by Flower framework users, only by other parts of the " +"Flower framework codebase. Those other parts of the codebase import, for " +"example, ``InMemoryDriver`` using ``from flwr.server.driver import " +"InMemoryDriver`` (i.e., the ``InMemoryDriver`` exported via ``__all__``)," +" not ``from flwr.server.driver.in_memory_driver import InMemoryDriver`` " +"(``in_memory_driver.py`` is the module containing the actual " +"``InMemoryDriver`` class definition)." +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:23 +#: ../../source/contributor-explanation-public-and-private-apis.rst:140 msgid "" -"`Flower `_ core framework architecture with both " -"Virtual Client Engine and Edge Client Engine" +"This is because ``flwr.server.driver`` defines a public interface for " +"other ``flwr`` subpackages. This allows codeowners of " +"``flwr.server.driver`` to refactor the package without breaking other " +"``flwr``-internal users." msgstr "" -"`Flower `_ architecture de base avec un moteur de " -"client virtuel et un moteur de client périphérique" #: ../../source/contributor-how-to-build-docker-images.rst:2 -msgid "How to build Docker Flower images locally" +msgid "How to Build Docker Flower Images Locally" msgstr "" #: ../../source/contributor-how-to-build-docker-images.rst:4 msgid "" "Flower provides pre-made docker images on `Docker Hub " "`_ that include all necessary dependencies" -" for running the SuperLink. You can also build your own custom docker " -"images from scratch with a different version of Python or Ubuntu if that " -"is what you need. In this guide, we will explain what images exist and " -"how to build them locally." +" for running the SuperLink, SuperNode or ServerApp. You can also build " +"your own custom docker images from scratch with a different version of " +"Python or Linux distribution (Ubuntu/Alpine) if that is what you need. In" +" this guide, we will explain what images exist and how to build them " +"locally." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:9 +#: ../../source/contributor-how-to-build-docker-images.rst:10 msgid "" "Before we can start, we need to meet a few prerequisites in our local " "development environment." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:11 +#: ../../source/contributor-how-to-build-docker-images.rst:13 #, fuzzy -msgid "Clone the flower repository." +msgid "Clone the ``flower`` repository." msgstr "**Fourche le dépôt de Flower**" -#: ../../source/contributor-how-to-build-docker-images.rst:17 -#: ../../source/how-to-run-flower-using-docker.rst:144 -msgid "Verify the Docker daemon is running." -msgstr "" - #: ../../source/contributor-how-to-build-docker-images.rst:19 -#: ../../source/how-to-run-flower-using-docker.rst:146 -msgid "" -"Please follow the first section on :doc:`Run Flower using Docker ` which covers this step in more detail." -msgstr "" - -#: ../../source/contributor-how-to-build-docker-images.rst:23 -msgid "" -"Currently, Flower provides two images, a ``base`` image and a " -"``superlink`` image. The base image, as the name suggests, contains basic" -" dependencies that the SuperLink needs. This includes system " -"dependencies, Python and Python tools. The SuperLink image is based on " -"the base image, but it additionally installs the SuperLink using ``pip``." +msgid "Verify the Docker daemon is running." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:28 +#: ../../source/contributor-how-to-build-docker-images.rst:21 msgid "" "The build instructions that assemble the images are located in the " "respective Dockerfiles. You can find them in the subdirectories of " "``src/docker``." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:31 +#: ../../source/contributor-how-to-build-docker-images.rst:24 msgid "" -"Both, base and SuperLink image are configured via build arguments. " -"Through build arguments, we can make our build more flexible. For " -"example, in the base image, we can specify the version of Python to " -"install using the ``PYTHON_VERSION`` build argument. Some of the build " -"arguments have default values, others must be specified when building the" -" image. All available build arguments for each image are listed in one of" -" the tables below." +"Flower Docker images are configured via build arguments. Through build " +"arguments, we can make the creation of images more flexible. For example," +" in the base image, we can specify the version of Python to install using" +" the ``PYTHON_VERSION`` build argument. Some of the build arguments have " +"default values, others must be specified when building the image. All " +"available build arguments for each image are listed in one of the tables " +"below." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:38 +#: ../../source/contributor-how-to-build-docker-images.rst:32 #, fuzzy -msgid "Building the base image" +msgid "Building the Base Image" msgstr "Chargement des données" -#: ../../source/contributor-how-to-build-docker-images.rst:44 -#: ../../source/contributor-how-to-build-docker-images.rst:86 +#: ../../source/contributor-how-to-build-docker-images.rst:38 +#: ../../source/contributor-how-to-build-docker-images.rst:104 #, fuzzy msgid "Build argument" msgstr "Amélioration de la documentation" -#: ../../source/contributor-how-to-build-docker-images.rst:45 -#: ../../source/contributor-how-to-build-docker-images.rst:87 +#: ../../source/contributor-how-to-build-docker-images.rst:39 +#: ../../source/contributor-how-to-build-docker-images.rst:105 #, fuzzy msgid "Description" msgstr "Dépréciations" -#: ../../source/contributor-how-to-build-docker-images.rst:46 -#: ../../source/contributor-how-to-build-docker-images.rst:88 +#: ../../source/contributor-how-to-build-docker-images.rst:40 +#: ../../source/contributor-how-to-build-docker-images.rst:106 #, fuzzy msgid "Required" msgstr "Changements nécessaires" -#: ../../source/contributor-how-to-build-docker-images.rst:47 -#: ../../source/contributor-how-to-build-docker-images.rst:89 +#: ../../source/contributor-how-to-build-docker-images.rst:41 +#: ../../source/contributor-how-to-build-docker-images.rst:107 +#: ../../source/docker/persist-superlink-state.rst:19 +#: ../../source/docker/pin-version.rst:12 +#: ../../source/docker/set-environment-variables.rst:8 #, fuzzy msgid "Example" msgstr "Exemples de PyTorch" +#: ../../source/contributor-how-to-build-docker-images.rst:42 +msgid "``DISTRO``" +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:43 +#, fuzzy +msgid "The Linux distribution to use as the base image." +msgstr "Chargement des données" + +#: ../../source/contributor-how-to-build-docker-images.rst:44 #: ../../source/contributor-how-to-build-docker-images.rst:48 -#: ../../source/contributor-how-to-build-docker-images.rst:94 +#: ../../source/contributor-how-to-build-docker-images.rst:52 +#: ../../source/contributor-how-to-build-docker-images.rst:68 +#: ../../source/contributor-how-to-build-docker-images.rst:75 +#: ../../source/contributor-how-to-build-docker-images.rst:110 #, fuzzy -msgid "``PYTHON_VERSION``" +msgid "No" +msgstr "Aucun" + +#: ../../source/contributor-how-to-build-docker-images.rst:45 +msgid "``ubuntu``" +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:46 +#, fuzzy +msgid "``DISTRO_VERSION``" msgstr "Version Python" +#: ../../source/contributor-how-to-build-docker-images.rst:47 +msgid "Version of the Linux distribution." +msgstr "" + #: ../../source/contributor-how-to-build-docker-images.rst:49 -msgid "Version of ``python`` to be installed." +msgid ":substitution-code:`|ubuntu_version|`" msgstr "" #: ../../source/contributor-how-to-build-docker-images.rst:50 -#: ../../source/contributor-how-to-build-docker-images.rst:54 -#: ../../source/contributor-how-to-build-docker-images.rst:58 -#: ../../source/contributor-how-to-build-docker-images.rst:108 #, fuzzy -msgid "Yes" -msgstr "Types" +msgid "``PYTHON_VERSION``" +msgstr "Version Python" #: ../../source/contributor-how-to-build-docker-images.rst:51 -#, fuzzy -msgid "``3.11``" -msgstr "1.0.0rc1" +msgid "Version of ``python`` to be installed." +msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:52 +#: ../../source/contributor-how-to-build-docker-images.rst:53 +msgid "``3.11`` or ``3.11.1``" +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:54 msgid "``PIP_VERSION``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:53 +#: ../../source/contributor-how-to-build-docker-images.rst:55 msgid "Version of ``pip`` to be installed." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:55 +#: ../../source/contributor-how-to-build-docker-images.rst:56 +#: ../../source/contributor-how-to-build-docker-images.rst:60 +#: ../../source/contributor-how-to-build-docker-images.rst:64 +#: ../../source/contributor-how-to-build-docker-images.rst:114 #, fuzzy -msgid "``23.0.1``" -msgstr "1.0.0rc1" +msgid "Yes" +msgstr "Types" -#: ../../source/contributor-how-to-build-docker-images.rst:56 -msgid "``SETUPTOOLS_VERSION``" +#: ../../source/contributor-how-to-build-docker-images.rst:57 +msgid ":substitution-code:`|pip_version|`" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:57 -msgid "Version of ``setuptools`` to be installed." +#: ../../source/contributor-how-to-build-docker-images.rst:58 +msgid "``SETUPTOOLS_VERSION``" msgstr "" #: ../../source/contributor-how-to-build-docker-images.rst:59 -#, fuzzy -msgid "``69.0.2``" -msgstr "``1.0.0b0``" - -#: ../../source/contributor-how-to-build-docker-images.rst:60 -#: ../../source/contributor-how-to-build-docker-images.rst:98 -msgid "``UBUNTU_VERSION``" +msgid "Version of ``setuptools`` to be installed." msgstr "" #: ../../source/contributor-how-to-build-docker-images.rst:61 -msgid "Version of the official Ubuntu Docker image." +msgid ":substitution-code:`|setuptools_version|`" msgstr "" #: ../../source/contributor-how-to-build-docker-images.rst:62 -msgid "Defaults to ``22.04``." +msgid "``FLWR_VERSION``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:65 -msgid "" -"The following example creates a base image with Python 3.11.0, pip 23.0.1" -" and setuptools 69.0.2:" +#: ../../source/contributor-how-to-build-docker-images.rst:63 +msgid "Version of Flower to be installed." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:76 -msgid "" -"The name of image is ``flwr_base`` and the tag ``0.1.0``. Remember that " -"the build arguments as well as the name and tag can be adapted to your " -"needs. These values serve as examples only." +#: ../../source/contributor-how-to-build-docker-images.rst:65 +msgid ":substitution-code:`|stable_flwr_version|`" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:80 -#, fuzzy -msgid "Building the SuperLink image" -msgstr "Démarrer le serveur" - -#: ../../source/contributor-how-to-build-docker-images.rst:90 -msgid "``BASE_REPOSITORY``" +#: ../../source/contributor-how-to-build-docker-images.rst:66 +msgid "``FLWR_PACKAGE``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:91 -msgid "The repository name of the base image." +#: ../../source/contributor-how-to-build-docker-images.rst:67 +msgid "The Flower package to be installed." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:92 -msgid "Defaults to ``flwr/base``." +#: ../../source/contributor-how-to-build-docker-images.rst:69 +msgid "``flwr`` or ``flwr-nightly``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:95 +#: ../../source/contributor-how-to-build-docker-images.rst:70 #, fuzzy -msgid "The Python version of the base image." -msgstr "Évaluer la réponse d'un client." +msgid "``FLWR_VERSION_REF``" +msgstr "Version Python" -#: ../../source/contributor-how-to-build-docker-images.rst:96 -msgid "Defaults to ``py3.11``." +#: ../../source/contributor-how-to-build-docker-images.rst:71 +msgid "" +"A `direct reference " +"`_ without the ``@`` specifier. If both " +"``FLWR_VERSION`` and ``FLWR_VERSION_REF`` are specified, the " +"``FLWR_VERSION_REF`` has precedence." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:99 +#: ../../source/contributor-how-to-build-docker-images.rst:76 #, fuzzy -msgid "The Ubuntu version of the base image." -msgstr "Chargement des données" +msgid "`Direct Reference Examples`_" +msgstr "Demande pour un nouveau Flower Example" -#: ../../source/contributor-how-to-build-docker-images.rst:100 -msgid "Defaults to ``ubuntu22.04``." +#: ../../source/contributor-how-to-build-docker-images.rst:78 +msgid "" +"The following example creates a base Ubuntu/Alpine image with Python " +"``3.11.0``, pip :substitution-code:`|pip_version|`, setuptools " +":substitution-code:`|setuptools_version|` and Flower :substitution-" +"code:`|stable_flwr_version|`:" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:102 -msgid "``FLWR_PACKAGE``" +#: ../../source/contributor-how-to-build-docker-images.rst:93 +msgid "" +"In this example, we specify our image name as ``flwr_base`` and the tag " +"as ``0.1.0``. Remember that the build arguments as well as the name and " +"tag can be adapted to your needs. These values serve as examples only." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:103 -msgid "The PyPI package to install." +#: ../../source/contributor-how-to-build-docker-images.rst:98 +#, fuzzy +msgid "Building a Flower Binary Image" +msgstr "Chargement des données" + +#: ../../source/contributor-how-to-build-docker-images.rst:108 +msgid "``BASE_REPOSITORY``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:104 -#, fuzzy -msgid "Defaults to ``flwr``." -msgstr "Flux de travail" +#: ../../source/contributor-how-to-build-docker-images.rst:109 +msgid "The repository name of the base image." +msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:106 -msgid "``FLWR_VERSION``" +#: ../../source/contributor-how-to-build-docker-images.rst:111 +msgid "``flwr/base``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:107 -msgid "Version of Flower to be installed." +#: ../../source/contributor-how-to-build-docker-images.rst:112 +msgid "``BASE_IMAGE``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:109 +#: ../../source/contributor-how-to-build-docker-images.rst:113 #, fuzzy -msgid "``1.8.0``" -msgstr "``1.0.0b0``" +msgid "The Tag of the Flower base image." +msgstr "Chargement des données" -#: ../../source/contributor-how-to-build-docker-images.rst:112 -msgid "" -"The following example creates a SuperLink image with the official Flower " -"base image py3.11-ubuntu22.04 and Flower 1.8.0:" +#: ../../source/contributor-how-to-build-docker-images.rst:115 +msgid ":substitution-code:`|stable_flwr_version|-py3.11-ubuntu|ubuntu_version|`" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:122 +#: ../../source/contributor-how-to-build-docker-images.rst:117 msgid "" -"The name of image is ``flwr_superlink`` and the tag ``0.1.0``. Remember " -"that the build arguments as well as the name and tag can be adapted to " -"your needs. These values serve as examples only." +"For example, to build a SuperLink image with the latest Flower version, " +"Python 3.11 and Ubuntu 22.04, run the following:" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:125 +#: ../../source/contributor-how-to-build-docker-images.rst:128 msgid "" "If you want to use your own base image instead of the official Flower " -"base image, all you need to do is set the ``BASE_REPOSITORY``, " -"``PYTHON_VERSION`` and ``UBUNTU_VERSION`` build arguments." +"base image, all you need to do is set the ``BASE_REPOSITORY`` build " +"argument to ``flwr_base`` (as we've specified above)." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:138 +#: ../../source/contributor-how-to-build-docker-images.rst:140 msgid "After creating the image, we can test whether the image is working:" msgstr "" +#: ../../source/contributor-how-to-build-docker-images.rst:147 +#, fuzzy +msgid "Direct Reference Examples" +msgstr "Demande pour un nouveau Flower Example" + #: ../../source/contributor-how-to-contribute-translations.rst:2 #, fuzzy msgid "Contribute translations" @@ -344,7 +512,7 @@ msgid "" "`_." msgstr "" -#: ../../source/contributor-how-to-contribute-translations.rst:29 +#: ../../source/contributor-how-to-contribute-translations.rst:28 msgid "" "Once you are signed in to Weblate, you can navigate to the `Flower " "Framework project `_." msgstr "" -#: ../../source/contributor-how-to-contribute-translations.rst:67 +#: ../../source/contributor-how-to-contribute-translations.rst:64 msgid "Add new languages" msgstr "" -#: ../../source/contributor-how-to-contribute-translations.rst:69 +#: ../../source/contributor-how-to-contribute-translations.rst:66 msgid "" "If you want to add a new language, you will first have to contact us, " "either on `Slack `_, or by opening an issue" " on our `GitHub repo `_." msgstr "" -#: ../../source/contributor-how-to-create-new-messages.rst:2 -msgid "Creating New Messages" -msgstr "Création de nouveaux messages" - -#: ../../source/contributor-how-to-create-new-messages.rst:4 -msgid "" -"This is a simple guide for creating a new type of message between the " -"server and clients in Flower." -msgstr "" -"Voici un guide simple pour créer un nouveau type de message entre le " -"serveur et les clients dans Flower." - -#: ../../source/contributor-how-to-create-new-messages.rst:6 -msgid "" -"Let's suppose we have the following example functions in " -":code:`server.py` and :code:`numpy_client.py`..." -msgstr "" -"Supposons que nous ayons les fonctions suivantes dans :code:`server.py` " -"et :code:`numpy_client.py`..." - -#: ../../source/contributor-how-to-create-new-messages.rst:8 -msgid "Server's side:" -msgstr "Côté serveur :" - -#: ../../source/contributor-how-to-create-new-messages.rst:17 -msgid "Client's side:" -msgstr "Côté client :" - -#: ../../source/contributor-how-to-create-new-messages.rst:26 -msgid "" -"Let's now see what we need to implement in order to get this simple " -"function between the server and client to work!" -msgstr "" -"Voyons maintenant ce que nous devons mettre en œuvre pour que cette " -"simple fonction entre le serveur et le client fonctionne !" - -#: ../../source/contributor-how-to-create-new-messages.rst:30 -msgid "Message Types for Protocol Buffers" -msgstr "Types de messages pour les tampons de protocole" - -#: ../../source/contributor-how-to-create-new-messages.rst:32 -#, fuzzy -msgid "" -"The first thing we need to do is to define a message type for the RPC " -"system in :code:`transport.proto`. Note that we have to do it for both " -"the request and response messages. For more details on the syntax of " -"proto3, please see the `official documentation `_." -msgstr "" -"La première chose à faire est de définir un type de message pour le " -"système RPC dans :code:`transport.proto`. Notez que nous devons le faire " -"à la fois pour les messages de demande et de réponse. Pour plus de " -"détails sur la syntaxe de proto3, veuillez consulter la `documentation " -"officielle `_." - -#: ../../source/contributor-how-to-create-new-messages.rst:35 -msgid "Within the :code:`ServerMessage` block:" -msgstr "Dans le bloc :code:`ServerMessage` :" - -#: ../../source/contributor-how-to-create-new-messages.rst:52 -msgid "Within the ClientMessage block:" -msgstr "Dans le bloc ClientMessage :" - -#: ../../source/contributor-how-to-create-new-messages.rst:70 -msgid "" -"Make sure to also add a field of the newly created message type in " -":code:`oneof msg`." -msgstr "" -"Veille à ajouter également un champ du type de message nouvellement créé " -"dans :code:`oneof msg`." - -#: ../../source/contributor-how-to-create-new-messages.rst:72 -msgid "Once that is done, we will compile the file with:" -msgstr "Une fois que c'est fait, nous compilerons le fichier avec :" - -#: ../../source/contributor-how-to-create-new-messages.rst:78 -msgid "If it compiles successfully, you should see the following message:" -msgstr "S'il se compile avec succès, tu devrais voir le message suivant :" - -#: ../../source/contributor-how-to-create-new-messages.rst:87 -msgid "Serialization and Deserialization Functions" -msgstr "Fonctions de sérialisation et de désérialisation" - -#: ../../source/contributor-how-to-create-new-messages.rst:89 -msgid "" -"Our next step is to add functions to serialize and deserialize Python " -"datatypes to or from our defined RPC message types. You should add these " -"functions in :code:`serde.py`." -msgstr "" -"La prochaine étape consiste à ajouter des fonctions pour sérialiser et " -"désérialiser les types de données Python vers ou à partir des types de " -"messages RPC définis. Tu dois ajouter ces fonctions dans " -":code:`serde.py`." - -#: ../../source/contributor-how-to-create-new-messages.rst:91 -msgid "The four functions:" -msgstr "Les quatre fonctions :" - -#: ../../source/contributor-how-to-create-new-messages.rst:112 -msgid "Sending the Message from the Server" -msgstr "Envoi du message à partir du serveur" - -#: ../../source/contributor-how-to-create-new-messages.rst:114 -msgid "" -"Now write the request function in your Client Proxy class (e.g., " -":code:`grpc_client_proxy.py`) using the serde functions you just created:" -msgstr "" -"Écris maintenant la fonction de demande dans ta classe Client Proxy (par " -"exemple, :code:`grpc_client_proxy.py`) en utilisant les fonctions serde " -"que tu viens de créer :" - -#: ../../source/contributor-how-to-create-new-messages.rst:128 -msgid "Receiving the Message by the Client" -msgstr "Réception du message par le client" - -#: ../../source/contributor-how-to-create-new-messages.rst:130 -msgid "" -"Last step! Modify the code in :code:`message_handler.py` to check the " -"field of your message and call the :code:`example_response` function. " -"Remember to use the serde functions!" -msgstr "" -"Dernière étape ! Modifie le code dans :code:`message_handler.py` pour " -"vérifier le champ de ton message et appeler la fonction " -":code:`example_response`. N'oublie pas d'utiliser les fonctions serde !" - -#: ../../source/contributor-how-to-create-new-messages.rst:132 -msgid "Within the handle function:" -msgstr "Dans le cadre de la fonction de poignée :" - -#: ../../source/contributor-how-to-create-new-messages.rst:139 -msgid "And add a new function:" -msgstr "Et ajoute une nouvelle fonction :" - -#: ../../source/contributor-how-to-create-new-messages.rst:149 -msgid "Hopefully, when you run your program you will get the intended result!" -msgstr "" -"Avec un peu de chance, lorsque tu exécuteras ton programme, tu obtiendras" -" le résultat escompté !" - #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:2 msgid "Develop in VSCode Dev Containers" msgstr "Utiliser les conteneurs VS Code Remote" @@ -570,17 +598,17 @@ msgstr "" "formater le code ou exécuter des tests. À cette fin, nous utilisons " "l'extension VSCode Remote Containers. Qu'est-ce que c'est ?" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:7 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:8 +#, fuzzy msgid "" "The Visual Studio Code Remote - Containers extension lets you use a " "Docker container as a fully-featured development environment. It allows " "you to open any folder inside (or mounted into) a container and take " "advantage of Visual Studio Code's full feature set. A " -":code:`devcontainer.json` file in your project tells VS Code how to " -"access (or create) a development container with a well-defined tool and " -"runtime stack. This container can be used to run an application or to " -"separate tools, libraries, or runtimes needed for working with a " -"codebase." +"``devcontainer.json`` file in your project tells VS Code how to access " +"(or create) a development container with a well-defined tool and runtime " +"stack. This container can be used to run an application or to separate " +"tools, libraries, or runtimes needed for working with a codebase." msgstr "" "L'extension Visual Studio Code Remote - Containers te permet d'utiliser " "un conteneur Docker comme environnement de développement complet. Elle te" @@ -593,7 +621,7 @@ msgstr "" " les outils, les bibliothèques ou les exécutions nécessaires pour " "travailler avec une base de code." -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:9 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:16 msgid "" "Workspace files are mounted from the local file system or copied or " "cloned into the container. Extensions are installed and run inside the " @@ -609,7 +637,7 @@ msgstr "" "environnement de développement simplement en te connectant à un autre " "conteneur." -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:11 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:22 #, fuzzy msgid "" "Source: `Official VSCode documentation " @@ -618,19 +646,19 @@ msgstr "" "Source : `Documentation officielle de VSCode " "`_" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:15 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:26 msgid "Getting started" msgstr "Pour commencer" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:17 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:28 #, fuzzy msgid "" -"Configuring and setting up the :code:`Dockerfile` as well the " -"configuration for the devcontainer can be a bit more involved. The good " -"thing is you don't have to do it. Usually it should be enough to install " -"`Docker `_ on your system and " -"ensure its available on your command line. Additionally, install the " -"`VSCode Containers Extension `_ on your system and ensure its" +" available on your command line. Additionally, install the `VSCode " +"Containers Extension `_." msgstr "" "La configuration et le paramétrage du :code:`Dockerfile` ainsi que la " @@ -641,7 +669,7 @@ msgstr "" "`VSCode Containers Extension `_." -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:19 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:35 msgid "" "Now you should be good to go. When starting VSCode, it will ask you to " "run in the container environment and - if you confirm - automatically " @@ -658,7 +686,7 @@ msgstr "" "inférieur gauche de ta fenêtre VSCode et sélectionner l'option " "*(Re)Ouvrir le dossier dans le conteneur*." -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:21 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:41 msgid "" "In some cases your setup might be more involved. For those cases consult " "the following sources:" @@ -666,7 +694,7 @@ msgstr "" "Dans certains cas, ton installation peut être plus complexe. Pour ces " "cas-là, consulte les sources suivantes :" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:23 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:44 #, fuzzy msgid "" "`Developing inside a Container " @@ -677,7 +705,7 @@ msgstr "" "`_" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:24 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:46 #, fuzzy msgid "" "`Remote development in Containers " @@ -709,7 +737,7 @@ msgstr "" "supprimer ``poetry.lock`` (``rm poetry.lock``) avant d'exécuter ``poetry " "install``)." -#: ../../source/contributor-how-to-install-development-versions.rst:12 +#: ../../source/contributor-how-to-install-development-versions.rst:14 msgid "" "``flwr = { version = \"1.0.0a0\", allow-prereleases = true }`` (without " "extras)" @@ -717,7 +745,7 @@ msgstr "" "``flwr = { version = \"1.0.0a0\", allow-prereleases = true }`` (sans " "extras)" -#: ../../source/contributor-how-to-install-development-versions.rst:13 +#: ../../source/contributor-how-to-install-development-versions.rst:15 msgid "" "``flwr = { version = \"1.0.0a0\", allow-prereleases = true, extras = " "[\"simulation\"] }`` (with extras)" @@ -725,7 +753,7 @@ msgstr "" "``flwr = { version = \"1.0.0a0\", allow-prereleases = true, extras = " "[\"simulation\"] }`` (avec extras)" -#: ../../source/contributor-how-to-install-development-versions.rst:15 +#: ../../source/contributor-how-to-install-development-versions.rst:18 msgid "" "Install ``flwr`` from a local copy of the Flower source code via " "``pyproject.toml``:" @@ -733,11 +761,11 @@ msgstr "" "Installez ``flwr`` à partir d'une copie locale du code source de Flower " "via ``pyproject.toml`` :" -#: ../../source/contributor-how-to-install-development-versions.rst:17 +#: ../../source/contributor-how-to-install-development-versions.rst:20 msgid "``flwr = { path = \"../../\", develop = true }`` (without extras)" msgstr "``flwr = { path = \"../../\", develop = true }`` (sans extras)" -#: ../../source/contributor-how-to-install-development-versions.rst:18 +#: ../../source/contributor-how-to-install-development-versions.rst:21 msgid "" "``flwr = { path = \"../../\", develop = true, extras = [\"simulation\"] " "}`` (with extras)" @@ -745,11 +773,11 @@ msgstr "" "``flwr = { path = \"../../\", develop = true, extras = [\"simulation\"] " "}`` (avec extras)" -#: ../../source/contributor-how-to-install-development-versions.rst:20 +#: ../../source/contributor-how-to-install-development-versions.rst:23 msgid "Install ``flwr`` from a local wheel file via ``pyproject.toml``:" msgstr "Installez ``flwr`` à partir d'un fichier local via ``pyproject.toml`` :" -#: ../../source/contributor-how-to-install-development-versions.rst:22 +#: ../../source/contributor-how-to-install-development-versions.rst:25 #, fuzzy msgid "" "``flwr = { path = \"../../dist/flwr-1.8.0-py3-none-any.whl\" }`` (without" @@ -758,7 +786,7 @@ msgstr "" "``flwr = { path = \"../../dist/flwr-1.0.0-py3-none-any.whl\" }`` (sans " "extras)" -#: ../../source/contributor-how-to-install-development-versions.rst:23 +#: ../../source/contributor-how-to-install-development-versions.rst:26 #, fuzzy msgid "" "``flwr = { path = \"../../dist/flwr-1.8.0-py3-none-any.whl\", extras = " @@ -767,7 +795,7 @@ msgstr "" "``flwr = { path = \"../../dist/flwr-1.0.0-py3-none-any.whl\", extras = " "[\"simulation\"] }`` (avec extras)" -#: ../../source/contributor-how-to-install-development-versions.rst:25 +#: ../../source/contributor-how-to-install-development-versions.rst:29 msgid "" "Please refer to the Poetry documentation for further details: `Poetry " "Dependency Specification `_" -#: ../../source/contributor-how-to-install-development-versions.rst:28 +#: ../../source/contributor-how-to-install-development-versions.rst:33 msgid "Using pip (recommended on Colab)" msgstr "Utiliser pip (recommandé sur Colab)" -#: ../../source/contributor-how-to-install-development-versions.rst:30 +#: ../../source/contributor-how-to-install-development-versions.rst:35 msgid "Install a ``flwr`` pre-release from PyPI:" msgstr "Installe une pré-version de ``flwr`` depuis PyPI :" -#: ../../source/contributor-how-to-install-development-versions.rst:32 +#: ../../source/contributor-how-to-install-development-versions.rst:37 msgid "``pip install -U --pre flwr`` (without extras)" msgstr "``pip install -U --pre flwr`` (sans les extras)" -#: ../../source/contributor-how-to-install-development-versions.rst:33 -msgid "``pip install -U --pre flwr[simulation]`` (with extras)" -msgstr "``pip install -U --pre flwr[simulation]`` (avec les extras)" +#: ../../source/contributor-how-to-install-development-versions.rst:38 +msgid "``pip install -U --pre 'flwr[simulation]'`` (with extras)" +msgstr "``pip install -U --pre 'flwr[simulation]'`` (avec les extras)" -#: ../../source/contributor-how-to-install-development-versions.rst:35 +#: ../../source/contributor-how-to-install-development-versions.rst:40 msgid "" "Python packages can be installed from git repositories. Use one of the " "following commands to install the Flower directly from GitHub." @@ -802,11 +830,11 @@ msgstr "" "Utilise l'une des commandes suivantes pour installer Flower directement à" " partir de GitHub." -#: ../../source/contributor-how-to-install-development-versions.rst:37 +#: ../../source/contributor-how-to-install-development-versions.rst:43 msgid "Install ``flwr`` from the default GitHub branch (``main``):" msgstr "Installez ``flwr`` à partir de la branche GitHub par défaut (``main``) :" -#: ../../source/contributor-how-to-install-development-versions.rst:39 +#: ../../source/contributor-how-to-install-development-versions.rst:45 msgid "" "``pip install flwr@git+https://github.com/adap/flower.git`` (without " "extras)" @@ -814,21 +842,21 @@ msgstr "" "``pip install flwr@git+https://github.com/adap/flower.git`` (sans les " "extras)" -#: ../../source/contributor-how-to-install-development-versions.rst:40 +#: ../../source/contributor-how-to-install-development-versions.rst:46 msgid "" -"``pip install flwr[simulation]@git+https://github.com/adap/flower.git`` " -"(with extras)" +"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git'``" +" (with extras)" msgstr "" -"``pip install flwr[simulation]@git+https://github.com/adap/flower.git`` " -"(avec les extras)" +"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git'``" +" (avec les extras)" -#: ../../source/contributor-how-to-install-development-versions.rst:42 +#: ../../source/contributor-how-to-install-development-versions.rst:49 msgid "Install ``flwr`` from a specific GitHub branch (``branch-name``):" msgstr "" "Installez ``flwr`` à partir d'une branche GitHub spécifique (``nom-" "branche``) :" -#: ../../source/contributor-how-to-install-development-versions.rst:44 +#: ../../source/contributor-how-to-install-development-versions.rst:51 msgid "" "``pip install flwr@git+https://github.com/adap/flower.git@branch-name`` " "(without extras)" @@ -836,19 +864,20 @@ msgstr "" "``pip install flwr@git+https://github.com/adap/flower.git@nom-branche`` " "(sans les extras)" -#: ../../source/contributor-how-to-install-development-versions.rst:45 +#: ../../source/contributor-how-to-install-development-versions.rst:53 +#, fuzzy msgid "" -"``pip install flwr[simulation]@git+https://github.com/adap/flower.git" -"@branch-name`` (with extras)" +"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git" +"@branch-name'`` (with extras)" msgstr "" -"``pip install flwr[simulation]@git+https://github.com/adap/flower.git" -"@nom-de-branche`` (avec des extras)" +"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git" +"``@nom-de-la-branche'`` (avec des extras)" -#: ../../source/contributor-how-to-install-development-versions.rst:49 +#: ../../source/contributor-how-to-install-development-versions.rst:57 msgid "Open Jupyter Notebooks on Google Colab" msgstr "Ouvre les carnets Jupyter sur Google Colab" -#: ../../source/contributor-how-to-install-development-versions.rst:51 +#: ../../source/contributor-how-to-install-development-versions.rst:59 #, fuzzy msgid "" "Open the notebook ``doc/source/tutorial-series-get-started-with-flower-" @@ -857,7 +886,7 @@ msgstr "" "Ouvrir le notebook ``doc/source/tutorial/Flower-1-Intro-to-FL-" "PyTorch.ipynb`` :" -#: ../../source/contributor-how-to-install-development-versions.rst:53 +#: ../../source/contributor-how-to-install-development-versions.rst:61 #, fuzzy msgid "" "https://colab.research.google.com/github/adap/flower/blob/main/doc/source" @@ -866,7 +895,7 @@ msgstr "" "https://colab.research.google.com/github/adap/flower/blob/main/doc/source" "/tutorial-get-started-with-flower-pytorch.ipynb" -#: ../../source/contributor-how-to-install-development-versions.rst:55 +#: ../../source/contributor-how-to-install-development-versions.rst:63 msgid "" "Open a development version of the same notebook from branch `branch-name`" " by changing ``main`` to ``branch-name`` (right after ``blob``):" @@ -875,7 +904,7 @@ msgstr "" "`nom-branche` en remplaçant `main` par `nom-branche` (juste après `blob`)" " :" -#: ../../source/contributor-how-to-install-development-versions.rst:57 +#: ../../source/contributor-how-to-install-development-versions.rst:66 #, fuzzy msgid "" "https://colab.research.google.com/github/adap/flower/blob/branch-" @@ -884,21 +913,21 @@ msgstr "" "https://colab.research.google.com/github/adap/flower/blob/branch-" "name/doc/source/tutorial-get-started-with-flower-pytorch.ipynb" -#: ../../source/contributor-how-to-install-development-versions.rst:59 +#: ../../source/contributor-how-to-install-development-versions.rst:68 msgid "Install a `whl` on Google Colab:" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:61 +#: ../../source/contributor-how-to-install-development-versions.rst:70 msgid "" "In the vertical icon grid on the left hand side, select ``Files`` > " "``Upload to session storage``" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:62 +#: ../../source/contributor-how-to-install-development-versions.rst:72 msgid "Upload the whl (e.g., ``flwr-1.8.0-py3-none-any.whl``)" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:63 +#: ../../source/contributor-how-to-install-development-versions.rst:73 msgid "" "Change ``!pip install -q 'flwr[simulation]' torch torchvision " "matplotlib`` to ``!pip install -q 'flwr-1.8.0-py3-none-" @@ -917,11 +946,11 @@ msgstr "" "Ce document décrit le processus de diffusion actuel, qui peut ou non " "changer à l'avenir." -#: ../../source/contributor-how-to-release-flower.rst:7 +#: ../../source/contributor-how-to-release-flower.rst:8 msgid "During the release" msgstr "Lors de la sortie" -#: ../../source/contributor-how-to-release-flower.rst:9 +#: ../../source/contributor-how-to-release-flower.rst:10 msgid "" "The version number of a release is stated in ``pyproject.toml``. To " "release a new version of Flower, the following things need to happen (in " @@ -931,14 +960,14 @@ msgstr "" "Pour publier une nouvelle version de Flower, les choses suivantes doivent" " se produire (dans cet ordre) :" -#: ../../source/contributor-how-to-release-flower.rst:11 +#: ../../source/contributor-how-to-release-flower.rst:13 msgid "" "Run ``python3 src/py/flwr_tool/update_changelog.py `` in " "order to add every new change to the changelog (feel free to make manual " "changes to the changelog afterwards until it looks good)." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:12 +#: ../../source/contributor-how-to-release-flower.rst:16 msgid "" "Once the changelog has been updated with all the changes, run ``./dev" "/prepare-release-changelog.sh v``, where ```` " @@ -948,7 +977,7 @@ msgid "" "the contributors. Open a pull request with those changes." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:13 +#: ../../source/contributor-how-to-release-flower.rst:22 msgid "" "Once the pull request is merged, tag the release commit with the version " "number as soon as the PR is merged: ``git tag v`` (notice " @@ -957,33 +986,33 @@ msgid "" "artifacts and the relevant part of the changelog." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:14 +#: ../../source/contributor-how-to-release-flower.rst:26 msgid "Check the draft release on GitHub, and if everything is good, publish it." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:17 +#: ../../source/contributor-how-to-release-flower.rst:29 msgid "After the release" msgstr "Après la publication" -#: ../../source/contributor-how-to-release-flower.rst:19 +#: ../../source/contributor-how-to-release-flower.rst:31 msgid "Create a pull request which contains the following changes:" msgstr "Crée une demande de pull qui contient les modifications suivantes :" -#: ../../source/contributor-how-to-release-flower.rst:21 +#: ../../source/contributor-how-to-release-flower.rst:33 msgid "Increase the minor version in ``pyproject.toml`` by one." msgstr "Augmente la version mineure de ``pyproject.toml`` d'une unité." -#: ../../source/contributor-how-to-release-flower.rst:22 +#: ../../source/contributor-how-to-release-flower.rst:34 msgid "Update all files which contain the current version number if necessary." msgstr "" "Mets à jour tous les fichiers qui contiennent le numéro de version actuel" " si nécessaire." -#: ../../source/contributor-how-to-release-flower.rst:23 +#: ../../source/contributor-how-to-release-flower.rst:35 msgid "Add a new ``Unreleased`` section in ``changelog.md``." msgstr "Ajoute une nouvelle section ``Unreleased`` dans ``changelog.md``." -#: ../../source/contributor-how-to-release-flower.rst:25 +#: ../../source/contributor-how-to-release-flower.rst:37 msgid "" "Merge the pull request on the same day (i.e., before a new nightly " "release gets published to PyPI)." @@ -991,15 +1020,15 @@ msgstr "" "Fusionne la pull request le jour même (c'est-à-dire avant qu'une nouvelle" " version nightly ne soit publiée sur PyPI)." -#: ../../source/contributor-how-to-release-flower.rst:28 +#: ../../source/contributor-how-to-release-flower.rst:41 msgid "Publishing a pre-release" msgstr "Publier une pré-version" -#: ../../source/contributor-how-to-release-flower.rst:31 +#: ../../source/contributor-how-to-release-flower.rst:44 msgid "Pre-release naming" msgstr "Nom de la pré-version" -#: ../../source/contributor-how-to-release-flower.rst:33 +#: ../../source/contributor-how-to-release-flower.rst:46 msgid "" "PyPI supports pre-releases (alpha, beta, release candidate). Pre-releases" " MUST use one of the following naming patterns:" @@ -1008,39 +1037,39 @@ msgstr "" "Les préversions DOIVENT utiliser l'un des modèles de dénomination " "suivants :" -#: ../../source/contributor-how-to-release-flower.rst:35 +#: ../../source/contributor-how-to-release-flower.rst:49 msgid "Alpha: ``MAJOR.MINOR.PATCHaN``" msgstr "Alpha : ``MAJOR.MINOR.PATCHaN``" -#: ../../source/contributor-how-to-release-flower.rst:36 +#: ../../source/contributor-how-to-release-flower.rst:50 msgid "Beta: ``MAJOR.MINOR.PATCHbN``" msgstr "Bêta : ``MAJOR.MINOR.PATCHbN``" -#: ../../source/contributor-how-to-release-flower.rst:37 +#: ../../source/contributor-how-to-release-flower.rst:51 msgid "Release candidate (RC): ``MAJOR.MINOR.PATCHrcN``" msgstr "Candidat à la publication (RC) : ``MAJOR.MINOR.PATCHrcN``" -#: ../../source/contributor-how-to-release-flower.rst:39 +#: ../../source/contributor-how-to-release-flower.rst:53 msgid "Examples include:" msgstr "Voici quelques exemples :" -#: ../../source/contributor-how-to-release-flower.rst:41 +#: ../../source/contributor-how-to-release-flower.rst:55 msgid "``1.0.0a0``" msgstr "``1.0.0a0``" -#: ../../source/contributor-how-to-release-flower.rst:42 +#: ../../source/contributor-how-to-release-flower.rst:56 msgid "``1.0.0b0``" msgstr "``1.0.0b0``" -#: ../../source/contributor-how-to-release-flower.rst:43 +#: ../../source/contributor-how-to-release-flower.rst:57 msgid "``1.0.0rc0``" msgstr "``1.0.0rc0``" -#: ../../source/contributor-how-to-release-flower.rst:44 +#: ../../source/contributor-how-to-release-flower.rst:58 msgid "``1.0.0rc1``" msgstr "1.0.0rc1" -#: ../../source/contributor-how-to-release-flower.rst:46 +#: ../../source/contributor-how-to-release-flower.rst:60 msgid "" "This is in line with PEP-440 and the recommendations from the Python " "Packaging Authority (PyPA):" @@ -1048,11 +1077,11 @@ msgstr "" "Ceci est conforme au PEP-440 et aux recommandations de l'Autorité de " "l'emballage Python (PyPA) :" -#: ../../source/contributor-how-to-release-flower.rst:49 +#: ../../source/contributor-how-to-release-flower.rst:63 msgid "`PEP-440 `_" msgstr "`PEP-440 `_" -#: ../../source/contributor-how-to-release-flower.rst:50 +#: ../../source/contributor-how-to-release-flower.rst:64 msgid "" "`PyPA Choosing a versioning scheme " "`_" -#: ../../source/contributor-how-to-release-flower.rst:52 +#: ../../source/contributor-how-to-release-flower.rst:67 msgid "" "Note that the approach defined by PyPA is not compatible with SemVer " "2.0.0 spec, for details consult the `Semantic Versioning Specification " @@ -1074,17 +1103,17 @@ msgstr "" "Versioning Specification `_ (en particulier le point 11 sur la préséance)." -#: ../../source/contributor-how-to-release-flower.rst:55 +#: ../../source/contributor-how-to-release-flower.rst:73 msgid "Pre-release classification" msgstr "Classification avant publication" -#: ../../source/contributor-how-to-release-flower.rst:57 +#: ../../source/contributor-how-to-release-flower.rst:75 msgid "Should the next pre-release be called alpha, beta, or release candidate?" msgstr "" "La prochaine préversion doit-elle être appelée alpha, bêta ou release " "candidate ?" -#: ../../source/contributor-how-to-release-flower.rst:59 +#: ../../source/contributor-how-to-release-flower.rst:77 msgid "" "RC: feature complete, no known issues (apart from issues that are " "classified as \"won't fix\" for the next stable release) - if no issues " @@ -1095,11 +1124,11 @@ msgstr "" "version stable) - si aucun problème n'apparaît, cette version deviendra " "la prochaine version stable" -#: ../../source/contributor-how-to-release-flower.rst:60 +#: ../../source/contributor-how-to-release-flower.rst:80 msgid "Beta: feature complete, allowed to have known issues" msgstr "Bêta : fonctionnalité complète, autorisée à avoir des problèmes connus" -#: ../../source/contributor-how-to-release-flower.rst:61 +#: ../../source/contributor-how-to-release-flower.rst:81 msgid "Alpha: not feature complete, allowed to have known issues" msgstr "" "Alpha : les fonctionnalités ne sont pas complètes, les problèmes connus " @@ -1122,22 +1151,22 @@ msgstr "" "Anaconda. Tu peux suivre les instructions ou choisir la configuration que" " tu préfères." -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:9 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:10 msgid "Python Version" msgstr "Version Python" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:11 -#: ../../source/how-to-install-flower.rst:8 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:12 +#: ../../source/how-to-install-flower.rst:7 #, fuzzy msgid "" -"Flower requires at least `Python 3.8 `_, " +"Flower requires at least `Python 3.9 `_, " "but `Python 3.10 `_ or above is " "recommended." msgstr "" -"Flower nécessite `Python 3.7 `_ ou plus, " -"nous recommandons `Python 3.8 `_." +"Flower nécessite `Python 3.9 `_ ou plus, " +"nous recommandons `Python 3.10 `_." -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:14 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:17 msgid "" "Due to a known incompatibility with `ray " "`_, we currently recommend utilizing at " @@ -1145,12 +1174,12 @@ msgid "" "simulations." msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:19 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:22 #, fuzzy msgid "Virtualenv with Pyenv/Virtualenv" msgstr "Virutualenv avec Pyenv/Virtualenv" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:21 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:24 msgid "" "One of the recommended virtual environment is `pyenv " "`_/`virtualenv `_ pour plus de " "détails." -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:23 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:29 #, fuzzy msgid "" "Once Pyenv is set up, you can use it to install `Python Version 3.10 " @@ -1172,19 +1201,19 @@ msgstr "" "Une fois Pyenv mis en place, tu peux l'utiliser pour installer `Python " "Version 3.7 `_ ou supérieure :" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:29 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:36 msgid "Create the virtualenv with:" msgstr "Crée le virtualenv avec :" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:36 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:42 msgid "Activate the virtualenv by running the following command:" msgstr "Active la virtualenv en exécutant la commande suivante :" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:44 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:49 msgid "Virtualenv with Poetry" msgstr "Virtualenv et la poésie" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:46 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:51 msgid "" "The Flower examples are based on `Poetry `_ to manage dependencies. After installing Poetry you " @@ -1194,7 +1223,7 @@ msgstr "" "poetry.org/docs/>`_ pour gérer les dépendances. Après l'installation de " "Poetry, il te suffit de créer un environnement virtuel avec :" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:52 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:58 msgid "" "If you open a new terminal you can activate the previously created " "virtual environment with the following command:" @@ -1202,15 +1231,16 @@ msgstr "" "Si tu ouvres un nouveau terminal, tu peux activer l'environnement virtuel" " précédemment créé avec la commande suivante :" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:60 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:66 msgid "Virtualenv with Anaconda" msgstr "Virtualenv avec Anaconda" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:62 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:68 +#, fuzzy msgid "" "If you prefer to use Anaconda for your virtual environment then install " "and setup the `conda `_ package. After setting it up you can " +"/user-guide/install/index.html>`_ package. After setting it up you can " "create a virtual environment with:" msgstr "" "Si tu préfères utiliser Anaconda pour ton environnement virtuel, installe" @@ -1219,15 +1249,15 @@ msgstr "" "guide/install/index.html>`_. Après l'avoir configuré, tu peux créer un " "environnement virtuel avec :" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:68 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:76 msgid "and activate the virtual environment with:" msgstr "et active l'environnement virtuel avec :" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:76 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:83 msgid "And then?" msgstr "Et ensuite ?" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:78 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:85 msgid "" "As soon as you created your virtual environment you clone one of the " "`Flower examples `_." @@ -1239,11 +1269,11 @@ msgstr "" msgid "Write documentation" msgstr "Rédiger de la documentation" -#: ../../source/contributor-how-to-write-documentation.rst:6 +#: ../../source/contributor-how-to-write-documentation.rst:5 msgid "Project layout" msgstr "Schéma du projet" -#: ../../source/contributor-how-to-write-documentation.rst:8 +#: ../../source/contributor-how-to-write-documentation.rst:7 msgid "" "The Flower documentation lives in the ``doc`` directory. The Sphinx-based" " documentation system supports both reStructuredText (``.rst`` files) and" @@ -1254,7 +1284,7 @@ msgstr "" "reStructuredText (fichiers `.rst`) et Markdown (fichiers `.md`)." #: ../../source/contributor-how-to-write-documentation.rst:10 -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:169 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:196 #, fuzzy msgid "" "Note that, in order to build the documentation locally (with ``poetry run" @@ -1266,20 +1296,20 @@ msgstr "" "make html``, comme décrit plus bas), `Pandoc " "_` doit être installé sur le système." -#: ../../source/contributor-how-to-write-documentation.rst:14 +#: ../../source/contributor-how-to-write-documentation.rst:15 msgid "Edit an existing page" msgstr "Modifier une page existante" -#: ../../source/contributor-how-to-write-documentation.rst:16 +#: ../../source/contributor-how-to-write-documentation.rst:17 msgid "Edit an existing ``.rst`` (or ``.md``) file under ``doc/source/``" msgstr "Modifier un fichier ``.rst`` (ou ``.md``) existant sous ``doc/source/``" -#: ../../source/contributor-how-to-write-documentation.rst:17 +#: ../../source/contributor-how-to-write-documentation.rst:18 #: ../../source/contributor-how-to-write-documentation.rst:27 msgid "Compile the docs: ``cd doc``, then ``poetry run make html``" msgstr "Compilez les documents : ``cd doc``, puis ``poetry run make html``" -#: ../../source/contributor-how-to-write-documentation.rst:18 +#: ../../source/contributor-how-to-write-documentation.rst:19 #: ../../source/contributor-how-to-write-documentation.rst:28 msgid "Open ``doc/build/html/index.html`` in the browser to check the result" msgstr "" @@ -1318,11 +1348,11 @@ msgstr "" "quelques recommandations sur les points de départ pour augmenter tes " "chances de voir ton PR accepté dans la base de code de Flower." -#: ../../source/contributor-ref-good-first-contributions.rst:11 +#: ../../source/contributor-ref-good-first-contributions.rst:9 msgid "Where to start" msgstr "Par où commencer" -#: ../../source/contributor-ref-good-first-contributions.rst:13 +#: ../../source/contributor-ref-good-first-contributions.rst:11 msgid "" "Until the Flower core library matures it will be easier to get PR's " "accepted if they only touch non-core areas of the codebase. Good " @@ -1333,25 +1363,25 @@ msgstr "" " non essentielles de la base de code. Les bons candidats pour commencer " "sont :" -#: ../../source/contributor-ref-good-first-contributions.rst:17 +#: ../../source/contributor-ref-good-first-contributions.rst:14 msgid "Documentation: What's missing? What could be expressed more clearly?" msgstr "" "Documentation : Qu'est-ce qui manque ? Qu'est-ce qui pourrait être " "exprimé plus clairement ?" -#: ../../source/contributor-ref-good-first-contributions.rst:18 +#: ../../source/contributor-ref-good-first-contributions.rst:15 msgid "Baselines: See below." msgstr "Références : voir ci-dessous." -#: ../../source/contributor-ref-good-first-contributions.rst:19 +#: ../../source/contributor-ref-good-first-contributions.rst:16 msgid "Examples: See below." msgstr "Exemples : voir ci-dessous." -#: ../../source/contributor-ref-good-first-contributions.rst:23 +#: ../../source/contributor-ref-good-first-contributions.rst:19 msgid "Request for Flower Baselines" msgstr "Demande pour une nouvelle Flower Baseline" -#: ../../source/contributor-ref-good-first-contributions.rst:25 +#: ../../source/contributor-ref-good-first-contributions.rst:21 #, fuzzy msgid "" "If you are not familiar with Flower Baselines, you should probably check-" @@ -1362,7 +1392,7 @@ msgstr "" "probablement consulter notre `guide de contribution pour les baselines " "`_." -#: ../../source/contributor-ref-good-first-contributions.rst:27 +#: ../../source/contributor-ref-good-first-contributions.rst:25 #, fuzzy msgid "" "You should then check out the open `issues " @@ -1377,7 +1407,7 @@ msgstr "" " laquelle tu aimerais travailler et qui n'a pas d'assignés, n'hésite pas " "à te l'attribuer et à commencer à travailler dessus !" -#: ../../source/contributor-ref-good-first-contributions.rst:31 +#: ../../source/contributor-ref-good-first-contributions.rst:30 msgid "" "Otherwise, if you don't find a baseline you'd like to work on, be sure to" " open a new issue with the baseline request template!" @@ -1430,12 +1460,13 @@ msgstr "" "protocole SecAgg peut être considéré comme un cas particulier du " "protocole SecAgg+." -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:8 -msgid "The :code:`SecAgg+` abstraction" +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:9 +#, fuzzy +msgid "The ``SecAgg+`` abstraction" msgstr "L'abstraction :code:`SecAgg+`" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:10 -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:161 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:11 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:163 msgid "" "In this implementation, each client will be assigned with a unique index " "(int) for secure aggregation, and thus many python dictionaries used have" @@ -1446,8 +1477,8 @@ msgstr "" "dictionnaires python utilisés ont des clés de type int plutôt que de type" " ClientProxy." -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:65 -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:198 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:67 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:204 msgid "" "The Flower server will execute and process received results in the " "following order:" @@ -1455,11 +1486,12 @@ msgstr "" "Le serveur Flower exécutera et traitera les résultats reçus dans l'ordre " "suivant :" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:159 -msgid "The :code:`LightSecAgg` abstraction" +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:161 +#, fuzzy +msgid "The ``LightSecAgg`` abstraction" msgstr "L'abstraction :code:`LightSecAgg`" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:271 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:277 msgid "Types" msgstr "Types" @@ -1476,7 +1508,7 @@ msgstr "" "de Flower mais qui n'ont pas l'habitude de contribuer à des projets " "GitHub." -#: ../../source/contributor-tutorial-contribute-on-github.rst:6 +#: ../../source/contributor-tutorial-contribute-on-github.rst:7 #, fuzzy msgid "" "If you're familiar with how contributing on GitHub works, you can " @@ -1489,15 +1521,15 @@ msgstr "" "contributors.html>`_ et des exemples de `bonnes premières contributions " "`_." -#: ../../source/contributor-tutorial-contribute-on-github.rst:10 +#: ../../source/contributor-tutorial-contribute-on-github.rst:12 msgid "Setting up the repository" msgstr "Mise en place du référentiel" -#: ../../source/contributor-tutorial-contribute-on-github.rst:21 +#: ../../source/contributor-tutorial-contribute-on-github.rst:29 msgid "**Create a GitHub account and setup Git**" msgstr "**Créer un compte GitHub et configurer Git**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:13 +#: ../../source/contributor-tutorial-contribute-on-github.rst:15 #, fuzzy msgid "" "Git is a distributed version control tool. This allows for an entire " @@ -1512,7 +1544,7 @@ msgstr "" "locale, tu peux suivre ce `guide `_ pour le mettre en place." -#: ../../source/contributor-tutorial-contribute-on-github.rst:16 +#: ../../source/contributor-tutorial-contribute-on-github.rst:21 msgid "" "GitHub, itself, is a code hosting platform for version control and " "collaboration. It allows for everyone to collaborate and work from " @@ -1522,7 +1554,7 @@ msgstr "" "contrôle des versions et la collaboration. Il permet à chacun de " "collaborer et de travailler de n'importe où sur des dépôts à distance." -#: ../../source/contributor-tutorial-contribute-on-github.rst:18 +#: ../../source/contributor-tutorial-contribute-on-github.rst:25 msgid "" "If you haven't already, you will need to create an account on `GitHub " "`_." @@ -1530,7 +1562,7 @@ msgstr "" "Si ce n'est pas déjà fait, tu devras créer un compte sur `GitHub " "`_." -#: ../../source/contributor-tutorial-contribute-on-github.rst:20 +#: ../../source/contributor-tutorial-contribute-on-github.rst:28 msgid "" "The idea behind the generic Git and GitHub workflow boils down to this: " "you download code from a remote repository on GitHub, make changes " @@ -1542,15 +1574,15 @@ msgstr "" " des modifications localement et tu en gardes une trace à l'aide de Git, " "puis tu télécharges ton nouvel historique à nouveau sur GitHub." -#: ../../source/contributor-tutorial-contribute-on-github.rst:32 +#: ../../source/contributor-tutorial-contribute-on-github.rst:42 msgid "**Forking the Flower repository**" msgstr "**Fourche le dépôt de Flower**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:24 +#: ../../source/contributor-tutorial-contribute-on-github.rst:32 #, fuzzy msgid "" "A fork is a personal copy of a GitHub repository. To create one for " -"Flower, you must navigate to ``_ (while " +"Flower, you must navigate to https://github.com/adap/flower (while " "connected to your GitHub account) and click the ``Fork`` button situated " "on the top right of the page." msgstr "" @@ -1559,7 +1591,7 @@ msgstr "" "étant connecté à ton compte GitHub) et cliquer sur le bouton ``Fork`` " "situé en haut à droite de la page." -#: ../../source/contributor-tutorial-contribute-on-github.rst:29 +#: ../../source/contributor-tutorial-contribute-on-github.rst:38 msgid "" "You can change the name if you want, but this is not necessary as this " "version of Flower will be yours and will sit inside your own account " @@ -1572,11 +1604,11 @@ msgstr "" " devrais voir dans le coin supérieur gauche que tu es en train de " "regarder ta propre version de Flower." -#: ../../source/contributor-tutorial-contribute-on-github.rst:47 +#: ../../source/contributor-tutorial-contribute-on-github.rst:59 msgid "**Cloning your forked repository**" msgstr "**Clonage de ton dépôt forké**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:35 +#: ../../source/contributor-tutorial-contribute-on-github.rst:45 msgid "" "The next step is to download the forked repository on your machine to be " "able to make changes to it. On your forked repository page, you should " @@ -1588,7 +1620,7 @@ msgstr "" "forké, tu dois d'abord cliquer sur le bouton ``Code`` à droite, ce qui te" " permettra de copier le lien HTTPS du dépôt." -#: ../../source/contributor-tutorial-contribute-on-github.rst:41 +#: ../../source/contributor-tutorial-contribute-on-github.rst:52 msgid "" "Once you copied the \\, you can open a terminal on your machine, " "navigate to the place you want to download the repository to and type:" @@ -1597,7 +1629,7 @@ msgstr "" "machine, naviguer jusqu'à l'endroit où tu veux télécharger le référentiel" " et taper :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:47 +#: ../../source/contributor-tutorial-contribute-on-github.rst:59 #, fuzzy msgid "" "This will create a ``flower/`` (or the name of your fork if you renamed " @@ -1606,15 +1638,15 @@ msgstr "" "Cela créera un dossier `flower/` (ou le nom de ta fourche si tu l'as " "renommée) dans le répertoire de travail actuel." -#: ../../source/contributor-tutorial-contribute-on-github.rst:66 +#: ../../source/contributor-tutorial-contribute-on-github.rst:78 msgid "**Add origin**" msgstr "**Ajouter l'origine**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:50 +#: ../../source/contributor-tutorial-contribute-on-github.rst:62 msgid "You can then go into the repository folder:" msgstr "Tu peux ensuite aller dans le dossier du référentiel :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:56 +#: ../../source/contributor-tutorial-contribute-on-github.rst:68 msgid "" "And here we will need to add an origin to our repository. The origin is " "the \\ of the remote fork repository. To obtain it, we can do as " @@ -1626,7 +1658,7 @@ msgstr "" "indiqué précédemment en allant sur notre dépôt fork sur notre compte " "GitHub et en copiant le lien." -#: ../../source/contributor-tutorial-contribute-on-github.rst:61 +#: ../../source/contributor-tutorial-contribute-on-github.rst:75 msgid "" "Once the \\ is copied, we can type the following command in our " "terminal:" @@ -1634,11 +1666,11 @@ msgstr "" "Une fois que le \\ est copié, nous pouvons taper la commande " "suivante dans notre terminal :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:90 +#: ../../source/contributor-tutorial-contribute-on-github.rst:102 msgid "**Add upstream**" msgstr "**Ajouter en amont**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:69 +#: ../../source/contributor-tutorial-contribute-on-github.rst:81 #, fuzzy msgid "" "Now we will add an upstream address to our repository. Still in the same " @@ -1648,13 +1680,13 @@ msgstr "" "Toujours dans le même directroy, nous devons exécuter la commande " "suivante :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:76 +#: ../../source/contributor-tutorial-contribute-on-github.rst:88 msgid "The following diagram visually explains what we did in the previous steps:" msgstr "" "Le schéma suivant explique visuellement ce que nous avons fait dans les " "étapes précédentes :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:80 +#: ../../source/contributor-tutorial-contribute-on-github.rst:92 msgid "" "The upstream is the GitHub remote address of the parent repository (in " "this case Flower), i.e. the one we eventually want to contribute to and " @@ -1668,7 +1700,7 @@ msgstr "" "simplement l'adresse distante GitHub du dépôt forké que nous avons créé, " "c'est-à-dire la copie (fork) dans notre propre compte." -#: ../../source/contributor-tutorial-contribute-on-github.rst:84 +#: ../../source/contributor-tutorial-contribute-on-github.rst:97 msgid "" "To make sure our local version of the fork is up-to-date with the latest " "changes from the Flower repository, we can execute the following command:" @@ -1677,11 +1709,11 @@ msgstr "" "dernières modifications du dépôt Flower, nous pouvons exécuter la " "commande suivante :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:93 +#: ../../source/contributor-tutorial-contribute-on-github.rst:105 msgid "Setting up the coding environment" msgstr "Mise en place de l'environnement de codage" -#: ../../source/contributor-tutorial-contribute-on-github.rst:95 +#: ../../source/contributor-tutorial-contribute-on-github.rst:107 #, fuzzy msgid "" "This can be achieved by following this :doc:`getting started guide for " @@ -1694,11 +1726,11 @@ msgstr "" "fois que tu es capable d'écrire du code et de le tester, tu peux enfin " "commencer à faire des changements !" -#: ../../source/contributor-tutorial-contribute-on-github.rst:100 +#: ../../source/contributor-tutorial-contribute-on-github.rst:113 msgid "Making changes" msgstr "Apporter des changements" -#: ../../source/contributor-tutorial-contribute-on-github.rst:102 +#: ../../source/contributor-tutorial-contribute-on-github.rst:115 msgid "" "Before making any changes make sure you are up-to-date with your " "repository:" @@ -1706,15 +1738,15 @@ msgstr "" "Avant de faire des changements, assure-toi que tu es à jour avec ton " "référentiel :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:108 +#: ../../source/contributor-tutorial-contribute-on-github.rst:121 msgid "And with Flower's repository:" msgstr "Et avec le référentiel de Flower :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:122 +#: ../../source/contributor-tutorial-contribute-on-github.rst:134 msgid "**Create a new branch**" msgstr "**Créer une nouvelle branche**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:115 +#: ../../source/contributor-tutorial-contribute-on-github.rst:128 msgid "" "To make the history cleaner and easier to work with, it is good practice " "to create a new branch for each feature/project that needs to be " @@ -1724,7 +1756,7 @@ msgstr "" "une bonne pratique de créer une nouvelle branche pour chaque " "fonctionnalité/projet qui doit être mis en œuvre." -#: ../../source/contributor-tutorial-contribute-on-github.rst:118 +#: ../../source/contributor-tutorial-contribute-on-github.rst:131 msgid "" "To do so, just run the following command inside the repository's " "directory:" @@ -1732,21 +1764,21 @@ msgstr "" "Pour ce faire, il suffit d'exécuter la commande suivante dans le " "répertoire du référentiel :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:125 +#: ../../source/contributor-tutorial-contribute-on-github.rst:136 msgid "**Make changes**" msgstr "**Apporter des modifications**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:125 +#: ../../source/contributor-tutorial-contribute-on-github.rst:137 msgid "Write great code and create wonderful changes using your favorite editor!" msgstr "" "Écris du bon code et crée de merveilleuses modifications à l'aide de ton " "éditeur préféré !" -#: ../../source/contributor-tutorial-contribute-on-github.rst:138 +#: ../../source/contributor-tutorial-contribute-on-github.rst:149 msgid "**Test and format your code**" msgstr "**Teste et mets en forme ton code**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:128 +#: ../../source/contributor-tutorial-contribute-on-github.rst:139 msgid "" "Don't forget to test and format your code! Otherwise your code won't be " "able to be merged into the Flower repository. This is done so the " @@ -1756,15 +1788,15 @@ msgstr "" "pourra pas être fusionné dans le dépôt Flower, et ce, afin que la base de" " code reste cohérente et facile à comprendre." -#: ../../source/contributor-tutorial-contribute-on-github.rst:131 +#: ../../source/contributor-tutorial-contribute-on-github.rst:143 msgid "To do so, we have written a few scripts that you can execute:" msgstr "Pour ce faire, nous avons écrit quelques scripts que tu peux exécuter :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:150 +#: ../../source/contributor-tutorial-contribute-on-github.rst:162 msgid "**Stage changes**" msgstr "**Changements de scène**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:141 +#: ../../source/contributor-tutorial-contribute-on-github.rst:152 msgid "" "Before creating a commit that will update your history, you must specify " "to Git which files it needs to take into account." @@ -1772,48 +1804,51 @@ msgstr "" "Avant de créer un commit qui mettra à jour ton historique, tu dois " "spécifier à Git les fichiers qu'il doit prendre en compte." -#: ../../source/contributor-tutorial-contribute-on-github.rst:143 +#: ../../source/contributor-tutorial-contribute-on-github.rst:155 msgid "This can be done with:" msgstr "Cela peut se faire avec :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:149 +#: ../../source/contributor-tutorial-contribute-on-github.rst:161 +#, fuzzy msgid "" "To check which files have been modified compared to the last version " "(last commit) and to see which files are staged for commit, you can use " -"the :code:`git status` command." +"the ``git status`` command." msgstr "" "Pour vérifier quels fichiers ont été modifiés par rapport à la dernière " "version (last commit) et pour voir quels fichiers sont mis à disposition " "pour le commit, tu peux utiliser la commande :code:`git status`." -#: ../../source/contributor-tutorial-contribute-on-github.rst:160 +#: ../../source/contributor-tutorial-contribute-on-github.rst:173 msgid "**Commit changes**" msgstr "**Commit changes**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:153 +#: ../../source/contributor-tutorial-contribute-on-github.rst:165 +#, fuzzy msgid "" -"Once you have added all the files you wanted to commit using :code:`git " -"add`, you can finally create your commit using this command:" +"Once you have added all the files you wanted to commit using ``git add``," +" you can finally create your commit using this command:" msgstr "" "Une fois que tu as ajouté tous les fichiers que tu voulais livrer à " "l'aide de :code:`git add`, tu peux enfin créer ta livraison à l'aide de " "cette commande :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:159 +#: ../../source/contributor-tutorial-contribute-on-github.rst:172 +#, fuzzy msgid "" "The \\ is there to explain to others what the commit " "does. It should be written in an imperative style and be concise. An " -"example would be :code:`git commit -m \"Add images to README\"`." +"example would be ``git commit -m \"Add images to README\"``." msgstr "" "Le ``commit_message`` est là pour expliquer aux autres ce que fait le " "commit. Il doit être écrit dans un style impératif et être concis. Un " "exemple serait :code:`git commit -m \"Ajouter des images au README\"`." -#: ../../source/contributor-tutorial-contribute-on-github.rst:171 +#: ../../source/contributor-tutorial-contribute-on-github.rst:185 msgid "**Push the changes to the fork**" msgstr "**Pousser les changements vers la fourche**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:163 +#: ../../source/contributor-tutorial-contribute-on-github.rst:176 msgid "" "Once we have committed our changes, we have effectively updated our local" " history, but GitHub has no way of knowing this unless we push our " @@ -1824,7 +1859,7 @@ msgstr "" "moyen de le savoir à moins que nous ne poussions nos modifications vers " "l'adresse distante de notre origine :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:170 +#: ../../source/contributor-tutorial-contribute-on-github.rst:184 msgid "" "Once this is done, you will see on the GitHub that your forked repo was " "updated with the changes you have made." @@ -1832,15 +1867,15 @@ msgstr "" "Une fois que c'est fait, tu verras sur GitHub que ton repo forké a été " "mis à jour avec les modifications que tu as apportées." -#: ../../source/contributor-tutorial-contribute-on-github.rst:174 +#: ../../source/contributor-tutorial-contribute-on-github.rst:188 msgid "Creating and merging a pull request (PR)" msgstr "Créer et fusionner une pull request (PR)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:206 +#: ../../source/contributor-tutorial-contribute-on-github.rst:226 msgid "**Create the PR**" msgstr "**Créer le PR**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:177 +#: ../../source/contributor-tutorial-contribute-on-github.rst:191 msgid "" "Once you have pushed changes, on the GitHub webpage of your repository " "you should see the following message:" @@ -1848,12 +1883,12 @@ msgstr "" "Une fois que tu as poussé les modifications, sur la page web GitHub de " "ton dépôt, tu devrais voir le message suivant :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:181 +#: ../../source/contributor-tutorial-contribute-on-github.rst:196 #, fuzzy msgid "Otherwise you can always find this option in the ``Branches`` page." msgstr "Sinon, tu peux toujours trouver cette option dans la page `Branches`." -#: ../../source/contributor-tutorial-contribute-on-github.rst:183 +#: ../../source/contributor-tutorial-contribute-on-github.rst:198 #, fuzzy msgid "" "Once you click the ``Compare & pull request`` button, you should see " @@ -1862,13 +1897,13 @@ msgstr "" "Une fois que tu as cliqué sur le bouton `Compare & pull request`, tu " "devrais voir quelque chose de similaire à ceci :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:187 +#: ../../source/contributor-tutorial-contribute-on-github.rst:203 msgid "At the top you have an explanation of which branch will be merged where:" msgstr "" "En haut, tu as une explication de quelle branche sera fusionnée à quel " "endroit :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:191 +#: ../../source/contributor-tutorial-contribute-on-github.rst:207 msgid "" "In this example you can see that the request is to merge the branch " "``doc-fixes`` from my forked repository to branch ``main`` from the " @@ -1878,14 +1913,14 @@ msgstr "" "branche ``doc-fixes`` de mon dépôt forké à la branche ``main`` du dépôt " "Flower." -#: ../../source/contributor-tutorial-contribute-on-github.rst:193 +#: ../../source/contributor-tutorial-contribute-on-github.rst:210 msgid "" "The title should be changed to adhere to the :ref:`pr_title_format` " "guidelines, otherwise it won't be possible to merge the PR. So in this " "case, a correct title might be ``docs(framework:skip) Fix typos``." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:196 +#: ../../source/contributor-tutorial-contribute-on-github.rst:214 msgid "" "The input box in the middle is there for you to describe what your PR " "does and to link it to existing issues. We have placed comments (that " @@ -1897,11 +1932,11 @@ msgstr "" "commentaires (qui ne seront pas rendus une fois le PR ouvert) pour te " "guider tout au long du processus." -#: ../../source/contributor-tutorial-contribute-on-github.rst:199 +#: ../../source/contributor-tutorial-contribute-on-github.rst:218 msgid "It is important to follow the instructions described in comments." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:201 +#: ../../source/contributor-tutorial-contribute-on-github.rst:220 msgid "" "At the bottom you will find the button to open the PR. This will notify " "reviewers that a new PR has been opened and that they should look over it" @@ -1911,7 +1946,7 @@ msgstr "" "qui informera les réviseurs qu'un nouveau PR a été ouvert et qu'ils " "doivent le consulter pour le fusionner ou demander des modifications." -#: ../../source/contributor-tutorial-contribute-on-github.rst:204 +#: ../../source/contributor-tutorial-contribute-on-github.rst:224 msgid "" "If your PR is not yet ready for review, and you don't want to notify " "anyone, you have the option to create a draft pull request:" @@ -1920,11 +1955,11 @@ msgstr "" " personne, tu as la possibilité de créer un brouillon de demande de " "traction :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:209 +#: ../../source/contributor-tutorial-contribute-on-github.rst:230 msgid "**Making new changes**" msgstr "**Faire de nouveaux changements**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:209 +#: ../../source/contributor-tutorial-contribute-on-github.rst:229 msgid "" "Once the PR has been opened (as draft or not), you can still push new " "commits to it the same way we did before, by making changes to the branch" @@ -1934,11 +1969,11 @@ msgstr "" "toujours y pousser de nouveaux commits de la même manière qu'auparavant, " "en apportant des modifications à la branche associée au PR." -#: ../../source/contributor-tutorial-contribute-on-github.rst:231 +#: ../../source/contributor-tutorial-contribute-on-github.rst:253 msgid "**Review the PR**" msgstr "**Review the PR**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:212 +#: ../../source/contributor-tutorial-contribute-on-github.rst:233 msgid "" "Once the PR has been opened or once the draft PR has been marked as " "ready, a review from code owners will be automatically requested:" @@ -1947,7 +1982,7 @@ msgstr "" " étant prêt, une révision des propriétaires de code sera automatiquement " "demandée :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:216 +#: ../../source/contributor-tutorial-contribute-on-github.rst:238 msgid "" "Code owners will then look into the code, ask questions, request changes " "or validate the PR." @@ -1955,11 +1990,11 @@ msgstr "" "Les propriétaires du code vont alors se pencher sur le code, poser des " "questions, demander des modifications ou valider le RP." -#: ../../source/contributor-tutorial-contribute-on-github.rst:218 +#: ../../source/contributor-tutorial-contribute-on-github.rst:241 msgid "Merging will be blocked if there are ongoing requested changes." msgstr "La fusion sera bloquée s'il y a des changements demandés en cours." -#: ../../source/contributor-tutorial-contribute-on-github.rst:222 +#: ../../source/contributor-tutorial-contribute-on-github.rst:245 msgid "" "To resolve them, just push the necessary changes to the branch associated" " with the PR:" @@ -1967,11 +2002,11 @@ msgstr "" "Pour les résoudre, il suffit de pousser les changements nécessaires vers " "la branche associée au PR :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:226 +#: ../../source/contributor-tutorial-contribute-on-github.rst:250 msgid "And resolve the conversation:" msgstr "Et résous la conversation :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:230 +#: ../../source/contributor-tutorial-contribute-on-github.rst:254 msgid "" "Once all the conversations have been resolved, you can re-request a " "review." @@ -1979,11 +2014,11 @@ msgstr "" "Une fois que toutes les conversations ont été résolues, tu peux " "redemander un examen." -#: ../../source/contributor-tutorial-contribute-on-github.rst:251 +#: ../../source/contributor-tutorial-contribute-on-github.rst:274 msgid "**Once the PR is merged**" msgstr "**Une fois que le PR est fusionné**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:234 +#: ../../source/contributor-tutorial-contribute-on-github.rst:256 msgid "" "If all the automatic tests have passed and reviewers have no more changes" " to request, they can approve the PR and merge it." @@ -1992,7 +2027,7 @@ msgstr "" " de modifications à demander, ils peuvent approuver le PR et le " "fusionner." -#: ../../source/contributor-tutorial-contribute-on-github.rst:238 +#: ../../source/contributor-tutorial-contribute-on-github.rst:261 msgid "" "Once it is merged, you can delete the branch on GitHub (a button should " "appear to do so) and also delete it locally by doing:" @@ -2001,19 +2036,19 @@ msgstr "" "(un bouton devrait apparaître pour le faire) et aussi la supprimer " "localement en faisant :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:245 +#: ../../source/contributor-tutorial-contribute-on-github.rst:269 msgid "Then you should update your forked repository by doing:" msgstr "Ensuite, tu dois mettre à jour ton dépôt forké en faisant :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:254 +#: ../../source/contributor-tutorial-contribute-on-github.rst:277 msgid "Example of first contribution" msgstr "Exemple de première contribution" -#: ../../source/contributor-tutorial-contribute-on-github.rst:257 +#: ../../source/contributor-tutorial-contribute-on-github.rst:280 msgid "Problem" msgstr "Problème" -#: ../../source/contributor-tutorial-contribute-on-github.rst:259 +#: ../../source/contributor-tutorial-contribute-on-github.rst:282 #, fuzzy msgid "" "For our documentation, we've started to use the `Diàtaxis framework " @@ -2022,7 +2057,7 @@ msgstr "" "Pour notre documentation, nous avons commencé à utiliser le cadre " "`Diàtaxis `_." -#: ../../source/contributor-tutorial-contribute-on-github.rst:261 +#: ../../source/contributor-tutorial-contribute-on-github.rst:285 #, fuzzy msgid "" "Our \"How to\" guides should have titles that continue the sentence \"How" @@ -2032,7 +2067,7 @@ msgstr "" "la phrase \"Comment faire pour...\", par exemple, \"Comment passer à " "Flower 1.0\"." -#: ../../source/contributor-tutorial-contribute-on-github.rst:263 +#: ../../source/contributor-tutorial-contribute-on-github.rst:288 msgid "" "Most of our guides do not follow this new format yet, and changing their " "title is (unfortunately) more involved than one might think." @@ -2041,7 +2076,7 @@ msgstr "" "changer leur titre est (malheureusement) plus compliqué qu'on ne le " "pense." -#: ../../source/contributor-tutorial-contribute-on-github.rst:265 +#: ../../source/contributor-tutorial-contribute-on-github.rst:291 #, fuzzy msgid "" "This issue is about changing the title of a doc from present continuous " @@ -2050,7 +2085,7 @@ msgstr "" "Cette question porte sur le changement du titre d'un document du présent " "continu au présent simple." -#: ../../source/contributor-tutorial-contribute-on-github.rst:267 +#: ../../source/contributor-tutorial-contribute-on-github.rst:294 #, fuzzy msgid "" "Let's take the example of \"Saving Progress\" which we changed to \"Save " @@ -2060,21 +2095,21 @@ msgstr "" "remplacé par \"Sauvegarder la progression\". Est-ce que cela passe notre " "contrôle ?" -#: ../../source/contributor-tutorial-contribute-on-github.rst:269 +#: ../../source/contributor-tutorial-contribute-on-github.rst:297 #, fuzzy msgid "Before: \"How to saving progress\" ❌" msgstr "Avant : \"Comment sauvegarder les progrès\" ❌" -#: ../../source/contributor-tutorial-contribute-on-github.rst:271 +#: ../../source/contributor-tutorial-contribute-on-github.rst:299 #, fuzzy msgid "After: \"How to save progress\" ✅" msgstr "Après : \"Comment sauvegarder la progression\" ✅" -#: ../../source/contributor-tutorial-contribute-on-github.rst:274 +#: ../../source/contributor-tutorial-contribute-on-github.rst:302 msgid "Solution" msgstr "Solution" -#: ../../source/contributor-tutorial-contribute-on-github.rst:276 +#: ../../source/contributor-tutorial-contribute-on-github.rst:304 #, fuzzy msgid "" "This is a tiny change, but it'll allow us to test your end-to-end setup. " @@ -2084,12 +2119,12 @@ msgstr "" "configuration de bout en bout. Après avoir cloné et configuré le repo " "Flower, voici ce que tu dois faire :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:278 +#: ../../source/contributor-tutorial-contribute-on-github.rst:307 #, fuzzy msgid "Find the source file in ``doc/source``" msgstr "Trouve le fichier source dans `doc/source`" -#: ../../source/contributor-tutorial-contribute-on-github.rst:279 +#: ../../source/contributor-tutorial-contribute-on-github.rst:308 #, fuzzy msgid "" "Make the change in the ``.rst`` file (beware, the dashes under the title " @@ -2098,7 +2133,7 @@ msgstr "" "Effectue la modification dans le fichier `.rst` (attention, les tirets " "sous le titre doivent être de la même longueur que le titre lui-même)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:280 +#: ../../source/contributor-tutorial-contribute-on-github.rst:310 #, fuzzy msgid "" "Build the docs and `check the result `_" -#: ../../source/contributor-tutorial-contribute-on-github.rst:283 +#: ../../source/contributor-tutorial-contribute-on-github.rst:314 msgid "Rename file" msgstr "Renommer le fichier" -#: ../../source/contributor-tutorial-contribute-on-github.rst:285 +#: ../../source/contributor-tutorial-contribute-on-github.rst:316 msgid "" "You might have noticed that the file name still reflects the old wording." " If we just change the file, then we break all existing links to it - it " @@ -2124,22 +2159,22 @@ msgstr "" "important** d'éviter cela, car briser des liens peut nuire à notre " "classement dans les moteurs de recherche." -#: ../../source/contributor-tutorial-contribute-on-github.rst:288 +#: ../../source/contributor-tutorial-contribute-on-github.rst:320 #, fuzzy msgid "Here's how to change the file name:" msgstr "Voici comment changer le nom du fichier :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:290 +#: ../../source/contributor-tutorial-contribute-on-github.rst:322 #, fuzzy msgid "Change the file name to ``save-progress.rst``" msgstr "Change le nom du fichier en `save-progress.rst`" -#: ../../source/contributor-tutorial-contribute-on-github.rst:291 +#: ../../source/contributor-tutorial-contribute-on-github.rst:323 #, fuzzy msgid "Add a redirect rule to ``doc/source/conf.py``" msgstr "Ajouter une règle de redirection à `doc/source/conf.py`" -#: ../../source/contributor-tutorial-contribute-on-github.rst:293 +#: ../../source/contributor-tutorial-contribute-on-github.rst:325 #, fuzzy msgid "" "This will cause a redirect from ``saving-progress.html`` to ``save-" @@ -2148,11 +2183,11 @@ msgstr "" "Cela entraînera une redirection de `saving-progress.html` vers `save-" "progress.html`, les anciens liens continueront à fonctionner." -#: ../../source/contributor-tutorial-contribute-on-github.rst:296 +#: ../../source/contributor-tutorial-contribute-on-github.rst:329 msgid "Apply changes in the index file" msgstr "Applique les changements dans le fichier d'index" -#: ../../source/contributor-tutorial-contribute-on-github.rst:298 +#: ../../source/contributor-tutorial-contribute-on-github.rst:331 #, fuzzy msgid "" "For the lateral navigation bar to work properly, it is very important to " @@ -2163,16 +2198,16 @@ msgstr "" "très important de mettre également à jour le fichier `index.rst`. C'est " "là que nous définissons toute l'arborescence de la barre de navigation." -#: ../../source/contributor-tutorial-contribute-on-github.rst:301 +#: ../../source/contributor-tutorial-contribute-on-github.rst:335 #, fuzzy msgid "Find and modify the file name in ``index.rst``" msgstr "Trouve et modifie le nom du fichier dans `index.rst`" -#: ../../source/contributor-tutorial-contribute-on-github.rst:304 +#: ../../source/contributor-tutorial-contribute-on-github.rst:338 msgid "Open PR" msgstr "Open PR" -#: ../../source/contributor-tutorial-contribute-on-github.rst:306 +#: ../../source/contributor-tutorial-contribute-on-github.rst:340 #, fuzzy msgid "" "Commit the changes (commit messages are always imperative: \"Do " @@ -2181,36 +2216,36 @@ msgstr "" "Valide les modifications (les messages de validation sont toujours " "impératifs : \"Fais quelque chose\", dans ce cas \"Modifie...\")" -#: ../../source/contributor-tutorial-contribute-on-github.rst:307 +#: ../../source/contributor-tutorial-contribute-on-github.rst:342 msgid "Push the changes to your fork" msgstr "Transmets les changements à ta fourchette" -#: ../../source/contributor-tutorial-contribute-on-github.rst:308 +#: ../../source/contributor-tutorial-contribute-on-github.rst:343 msgid "" "Open a PR (as shown above) with title ``docs(framework) Update how-to " "guide title``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:309 +#: ../../source/contributor-tutorial-contribute-on-github.rst:344 msgid "Wait for it to be approved!" msgstr "Attends qu'elle soit approuvée !" -#: ../../source/contributor-tutorial-contribute-on-github.rst:310 +#: ../../source/contributor-tutorial-contribute-on-github.rst:345 msgid "Congrats! 🥳 You're now officially a Flower contributor!" msgstr "" "Félicitations 🥳 Tu es désormais officiellement une contributrice de " "Flower !" -#: ../../source/contributor-tutorial-contribute-on-github.rst:314 -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:548 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:946 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:727 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:713 +#: ../../source/contributor-tutorial-contribute-on-github.rst:348 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:573 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1012 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:811 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:857 #: ../../source/tutorial-series-what-is-federated-learning.ipynb:367 msgid "Next steps" msgstr "Prochaines étapes" -#: ../../source/contributor-tutorial-contribute-on-github.rst:316 +#: ../../source/contributor-tutorial-contribute-on-github.rst:350 msgid "" "Once you have made your first PR, and want to contribute more, be sure to" " check out the following :" @@ -2218,37 +2253,37 @@ msgstr "" "Une fois que tu auras fait ton premier RP, et que tu voudras contribuer " "davantage, ne manque pas de consulter les sites suivants :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:318 +#: ../../source/contributor-tutorial-contribute-on-github.rst:353 #, fuzzy msgid "" ":doc:`Good first contributions `, where you should particularly look into the " -":code:`baselines` contributions." +"``baselines`` contributions." msgstr "" "`Bonnes premières contributions `_, où vous devriez " "particulièrement regarder les contributions :code:`baselines`." -#: ../../source/contributor-tutorial-contribute-on-github.rst:322 +#: ../../source/contributor-tutorial-contribute-on-github.rst:357 #: ../../source/fed/0000-20200102-fed-template.md:60 msgid "Appendix" msgstr "Annexe" -#: ../../source/contributor-tutorial-contribute-on-github.rst:327 +#: ../../source/contributor-tutorial-contribute-on-github.rst:362 msgid "PR title format" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:329 +#: ../../source/contributor-tutorial-contribute-on-github.rst:364 msgid "We enforce the following PR title format:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:335 +#: ../../source/contributor-tutorial-contribute-on-github.rst:370 msgid "" "(or ``(:skip) `` to ignore the PR in the " "changelog)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:337 +#: ../../source/contributor-tutorial-contribute-on-github.rst:372 msgid "" "Where ```` needs to be in ``{ci, fix, feat, docs, refactor, " "break}``, ```` should be in ``{framework, baselines, datasets, " @@ -2257,51 +2292,51 @@ msgid "" "verb in the imperative mood." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:341 +#: ../../source/contributor-tutorial-contribute-on-github.rst:377 #, fuzzy msgid "Valid examples:" msgstr "Exemples de PyTorch" -#: ../../source/contributor-tutorial-contribute-on-github.rst:343 +#: ../../source/contributor-tutorial-contribute-on-github.rst:379 msgid "``feat(framework) Add flwr build CLI command``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:344 +#: ../../source/contributor-tutorial-contribute-on-github.rst:380 msgid "``refactor(examples:skip) Improve quickstart-pytorch logging``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:345 +#: ../../source/contributor-tutorial-contribute-on-github.rst:381 msgid "``ci(*:skip) Enforce PR title format``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:347 +#: ../../source/contributor-tutorial-contribute-on-github.rst:383 #, fuzzy msgid "Invalid examples:" msgstr "Exemples de PyTorch" -#: ../../source/contributor-tutorial-contribute-on-github.rst:349 +#: ../../source/contributor-tutorial-contribute-on-github.rst:385 msgid "``feat(framework): Add flwr build CLI command`` (extra ``:``)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:350 +#: ../../source/contributor-tutorial-contribute-on-github.rst:386 msgid "" "``feat(*) Add flwr build CLI command`` (missing ``skip`` flag along with " "``*``)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:351 +#: ../../source/contributor-tutorial-contribute-on-github.rst:387 msgid "``feat(skip) Add flwr build CLI command`` (missing ````)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:352 +#: ../../source/contributor-tutorial-contribute-on-github.rst:388 msgid "``feat(framework) add flwr build CLI command`` (non capitalised verb)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:353 +#: ../../source/contributor-tutorial-contribute-on-github.rst:389 msgid "``feat(framework) Add flwr build CLI command.`` (dot at the end)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:354 +#: ../../source/contributor-tutorial-contribute-on-github.rst:390 msgid "``Add flwr build CLI command.`` (missing ``()``)" msgstr "" @@ -2310,14 +2345,18 @@ msgid "Get started as a contributor" msgstr "Devenez un·e contributeur·ice" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:5 -#: ../../source/how-to-run-flower-using-docker.rst:132 +#: ../../source/docker/run-as-subprocess.rst:11 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:16 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:18 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:13 +#: ../../source/docker/tutorial-quickstart-docker.rst:11 msgid "Prerequisites" msgstr "Prérequis" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:7 #, fuzzy -msgid "`Python 3.8 `_ or above" -msgstr "`Python 3.7 `_ ou plus" +msgid "`Python 3.9 `_ or above" +msgstr "`Python 3.10 `_ ou plus" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:8 msgid "`Poetry 1.3 `_ or above" @@ -2336,7 +2375,7 @@ msgstr "" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:12 #, fuzzy msgid "" -"Flower uses :code:`pyproject.toml` to manage dependencies and configure " +"Flower uses ``pyproject.toml`` to manage dependencies and configure " "development tools (the ones which support it). Poetry is a build tool " "which supports `PEP 517 `_." msgstr "" @@ -2345,13 +2384,14 @@ msgstr "" "le supportent). Poetry est un outil qui support `PEP 517 " "`_." -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:18 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:17 msgid "Developer Machine Setup" msgstr "Setup de la machine" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:21 -msgid "Preliminarities" -msgstr "" +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:20 +#, fuzzy +msgid "Preliminaries" +msgstr "Principes" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:22 msgid "Some system-wide dependencies are needed." @@ -2367,113 +2407,113 @@ msgid "" "installation actions to add `brew` to your PATH." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:28 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:29 msgid "" "Install `xz` (to install different Python versions) and `pandoc` to build" -" the docs::" +" the docs:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:34 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:36 msgid "For Ubuntu" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:35 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:38 msgid "" "Ensure you system (Ubuntu 22.04+) is up-to-date, and you have all " -"necessary packages::" +"necessary packages:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:44 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:47 #, fuzzy msgid "Create Flower Dev Environment" msgstr "Créer/Supprimer l'environment virtuel" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:46 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:49 #, fuzzy msgid "" -"1. Clone the `Flower repository `_ from " -"GitHub::" +"Clone the `Flower repository `_ from " +"GitHub:" msgstr "" "Pour commencer, cloner la `repo Flower `_" " depuis GitHub::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:52 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:56 msgid "" "Let's create the Python environment for all-things Flower. If you wish to" -" use :code:`pyenv`, we provide two convenience scripts that you can use. " -"If you prefer using something else than :code:`pyenv`, create a new " -"environment, activate and skip to the last point where all packages are " -"installed." +" use ``pyenv``, we provide two convenience scripts that you can use. If " +"you prefer using something else than ``pyenv``, create a new environment," +" activate and skip to the last point where all packages are installed." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:54 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:61 #, fuzzy msgid "" -"If you don't have :code:`pyenv` installed, the following script that will" -" install it, set it up, and create the virtual environment (with " -":code:`Python 3.8.17` by default)::" +"If you don't have ``pyenv`` installed, the following script that will " +"install it, set it up, and create the virtual environment (with " +":substitution-code:`Python |python_full_version|` by default):" msgstr "" "Si vous n'avez pas :code:`pyenv` installé, vous pouvez utiliser le script" " suivant qui l'installera, le configurera et créera l'environnement " -"virtuel (avec :code:`Python 3.8.17` par défaut)::" +"virtuel (avec :code:`Python 3.9.20` par défaut)::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:58 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:69 #, fuzzy msgid "" -"If you already have :code:`pyenv` installed (along with the :code:`pyenv-" -"virtualenv` plugin), you can use the following convenience script (with " -":code:`Python 3.8.17` by default)::" +"If you already have ``pyenv`` installed (along with the ``pyenv-" +"virtualenv`` plugin), you can use the following convenience script (with " +":substitution-code:`Python |python_full_version|` by default):" msgstr "" "Si vous n'avez pas :code:`pyenv` installé, vous pouvez utiliser le script" " suivant qui l'installera, le configurera et créera l'environnement " -"virtuel (avec :code:`Python 3.8.17` par défaut)::" +"virtuel (avec :code:`Python 3.9.20` par défaut)::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:62 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:77 #, fuzzy msgid "" -"3. Install the Flower package in development mode (think :code:`pip " -"install -e`) along with all necessary dependencies::" +"3. Install the Flower package in development mode (think ``pip install " +"-e``) along with all necessary dependencies:" msgstr "" "Troisièmement, installez le paquet Flower en mode de développement ( " ":code :`pip install -e`) avec toutes les dépendances nécessaires :" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:69 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:85 msgid "Convenience Scripts" msgstr "Scripts pratiques" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:71 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:87 +#, fuzzy msgid "" "The Flower repository contains a number of convenience scripts to make " -"recurring development tasks easier and less error-prone. See the " -":code:`/dev` subdirectory for a full list. The following scripts are " -"amongst the most important ones:" +"recurring development tasks easier and less error-prone. See the ``/dev``" +" subdirectory for a full list. The following scripts are amongst the most" +" important ones:" msgstr "" "La repo de Flower contient un certain nombre de scripts de commodité pour" " rendre les tâches de développement récurrentes plus faciles et moins " "problématiques. Voir le sous-répertoire :code :`/dev` pour une liste " "complète. Les scripts suivants sont parmis les plus importants :" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:77 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:92 msgid "Create/Delete Virtual Environment" msgstr "Créer/Supprimer l'environment virtuel" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:85 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:101 msgid "Compile ProtoBuf Definitions" msgstr "Compiler les définitions ProtoBuf" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:92 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:108 msgid "Auto-Format Code" msgstr "Formatter le code" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:99 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:115 msgid "Run Linters and Tests" msgstr "Vérifier le format et tester le code" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:106 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:122 msgid "Add a pre-commit hook" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:108 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:124 msgid "" "Developers may integrate a pre-commit hook into their workflow utilizing " "the `pre-commit `_ library. The pre-" @@ -2481,50 +2521,50 @@ msgid "" "``./dev/format.sh`` and ``./dev/test.sh`` scripts." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:110 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:128 msgid "There are multiple ways developers can use this:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:112 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:130 msgid "Install the pre-commit hook to your local git directory by simply running:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:118 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:136 msgid "" "Each ``git commit`` will trigger the execution of formatting and " "linting/test scripts." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:119 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:138 msgid "" "If in a hurry, bypass the hook using ``--no-verify`` with the ``git " -"commit`` command. ::" +"commit`` command." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:124 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:145 msgid "" "For developers who prefer not to install the hook permanently, it is " "possible to execute a one-time check prior to committing changes by using" " the following command:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:130 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:152 msgid "" "This executes the formatting and linting checks/tests on all the files " "without modifying the default behavior of ``git commit``." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:133 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:156 msgid "Run Github Actions (CI) locally" msgstr "Exécuter les GitHub Actions (CI) localement" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:135 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:158 #, fuzzy msgid "" "Developers could run the full set of Github Actions workflows under their" " local environment by using `Act `_. " "Please refer to the installation instructions under the linked repository" -" and run the next command under Flower main cloned repository folder::" +" and run the next command under Flower main cloned repository folder:" msgstr "" "Il est possible d'exécuter l'ensemble des Github Actions sous leur " "environnement local en utilisant `Act _`." @@ -2532,7 +2572,7 @@ msgstr "" "fois installé, exécuter la commande suivante dans le dossier principale " "de Flower :" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:142 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:167 msgid "" "The Flower default workflow would run by setting up the required Docker " "machines underneath." @@ -2540,28676 +2580,35894 @@ msgstr "" "Le workflow par défaut de Flower sera exécuté en configurant les machines" " Docker requises en arrière plan." -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:147 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:171 #, fuzzy msgid "Build Release" msgstr "Inédit" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:149 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:173 +#, fuzzy msgid "" "Flower uses Poetry to build releases. The necessary command is wrapped in" -" a simple script::" +" a simple script:" msgstr "" "Flower utilise Poetry pour construire les nouvelles versions. La commande" " nécessaire est comprise dans un script simple ::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:154 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:180 +#, fuzzy msgid "" -"The resulting :code:`.whl` and :code:`.tar.gz` releases will be stored in" -" the :code:`/dist` subdirectory." +"The resulting ``.whl`` and ``.tar.gz`` releases will be stored in the " +"``/dist`` subdirectory." msgstr "" "Les versions résultantes :code:`.whl` et :code:`.tar.gz` seront stockées " "dans le sous-répertoire:code:`/dist`." -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:159 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:184 #, fuzzy msgid "Build Documentation" msgstr "Amélioration de la documentation" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:161 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:186 msgid "" "Flower's documentation uses `Sphinx `_. " "There's no convenience script to re-build the documentation yet, but it's" -" pretty easy::" +" pretty easy:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:167 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:194 msgid "This will generate HTML documentation in ``doc/build/html``." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:2 -msgid "Example: FedBN in PyTorch - From Centralized To Federated" -msgstr "Exemple : FedBN dans PyTorch - De la centralisation à la fédération" - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:4 +#: ../../source/docker/enable-tls.rst:2 #, fuzzy +msgid "Enable TLS for Secure Connections" +msgstr "Collecte centralisée des données" + +#: ../../source/docker/enable-tls.rst:4 msgid "" -"This tutorial will show you how to use Flower to build a federated " -"version of an existing machine learning workload with `FedBN " -"`_, a federated training strategy " -"designed for non-iid data. We are using PyTorch to train a Convolutional " -"Neural Network(with Batch Normalization layers) on the CIFAR-10 dataset. " -"When applying FedBN, only few changes needed compared to :doc:`Example: " -"PyTorch - From Centralized To Federated `." +"When operating in a production environment, it is strongly recommended to" +" enable Transport Layer Security (TLS) for each Flower Component to " +"ensure secure communication." msgstr "" -"Ce tutoriel te montrera comment utiliser Flower pour construire une " -"version fédérée d'une charge de travail d'apprentissage automatique " -"existante avec `FedBN `_, une stratégie" -" de formation fédérée conçue pour les données non-identifiées. Nous " -"utilisons PyTorch pour former un réseau neuronal convolutif (avec des " -"couches de normalisation par lots) sur l'ensemble de données CIFAR-10. " -"Lors de l'application de FedBN, seules quelques modifications sont " -"nécessaires par rapport à `Exemple : PyTorch - De la centralisation à la " -"fédération `_." - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:9 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:10 -msgid "Centralized Training" -msgstr "Formation centralisée" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:10 -#, fuzzy +#: ../../source/docker/enable-tls.rst:7 msgid "" -"All files are revised based on :doc:`Example: PyTorch - From Centralized " -"To Federated `. The only " -"thing to do is modifying the file called :code:`cifar.py`, revised part " -"is shown below:" +"To enable TLS, you will need a PEM-encoded root certificate, a PEM-" +"encoded private key and a PEM-encoded certificate chain." msgstr "" -"Tous les fichiers sont révisés sur la base de `Exemple : PyTorch - From " -"Centralized To Federated `_. La seule chose à faire est de modifier " -"le fichier appelé :code:`cifar.py`, la partie révisée est montrée ci-" -"dessous :" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:13 +#: ../../source/docker/enable-tls.rst:12 msgid "" -"The model architecture defined in class Net() is added with Batch " -"Normalization layers accordingly." +"For testing purposes, you can generate your own self-signed certificates." +" The `Enable SSL connections `__ page contains a section that" +" will guide you through the process." msgstr "" -"L'architecture du modèle définie dans la classe Net() est ajoutée avec " -"les couches de normalisation par lots en conséquence." -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:41 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:157 -msgid "You can now run your machine learning workload:" +#: ../../source/docker/enable-tls.rst:17 +msgid "" +"Because Flower containers, by default, run with a non-root user ``app``, " +"the mounted files and directories must have the proper permissions for " +"the user ID ``49999``." msgstr "" -"Tu peux maintenant exécuter ta charge de travail d'apprentissage " -"automatique :" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:47 -#, fuzzy +#: ../../source/docker/enable-tls.rst:20 msgid "" -"So far this should all look fairly familiar if you've used PyTorch " -"before. Let's take the next step and use what we've built to create a " -"federated learning system within FedBN, the system consists of one server" -" and two clients." +"For example, to change the user ID of all files in the ``certificates/`` " +"directory, you can run ``sudo chown -R 49999:49999 certificates/*``." msgstr "" -"Jusqu'à présent, tout ceci devrait te sembler assez familier si tu as " -"déjà utilisé PyTorch. Passons à l'étape suivante et utilisons ce que nous" -" avons construit pour créer un système d'apprentissage fédéré au sein de " -"FedBN, le système se compose d'un serveur et de deux clients." -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:51 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:167 -msgid "Federated Training" -msgstr "Formation fédérée" +#: ../../source/docker/enable-tls.rst:23 +#: ../../source/docker/persist-superlink-state.rst:15 +msgid "" +"If you later want to delete the directory, you can change the user ID " +"back to the current user ID by running ``sudo chown -R $USER:$(id -gn) " +"state``." +msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:53 +#: ../../source/docker/enable-tls.rst:27 #, fuzzy +msgid "SuperLink" +msgstr "flower-superlink" + +#: ../../source/docker/enable-tls.rst:29 msgid "" -"If you have read :doc:`Example: PyTorch - From Centralized To Federated " -"`, the following parts are" -" easy to follow, only :code:`get_parameters` and :code:`set_parameters` " -"function in :code:`client.py` needed to revise. If not, please read the " -":doc:`Example: PyTorch - From Centralized To Federated `. first." +"Assuming all files we need are in the local ``certificates`` directory, " +"we can use the flag ``--volume`` to mount the local directory into the " +"``/app/certificates/`` directory of the container:" msgstr "" -"Si vous avez lu `Exemple : PyTorch - From Centralized To Federated " -"`_, les parties suivantes sont faciles à suivre, seules " -"les fonctions :code:`get_parameters` et :code:`set_parameters` dans " -":code:`client.py` ont besoin d'être révisées. Si ce n'est pas le cas, " -"veuillez lire `Exemple : PyTorch - From Centralized To Federated " -"`. d'abord." -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:56 +#: ../../source/docker/enable-tls.rst +#, fuzzy +msgid "Understanding the command" +msgstr "Entraîne le modèle" + +#: ../../source/docker/enable-tls.rst:45 ../../source/docker/enable-tls.rst:92 +#: ../../source/docker/enable-tls.rst:125 +#: ../../source/docker/tutorial-quickstart-docker.rst:66 +#: ../../source/docker/tutorial-quickstart-docker.rst:103 +#: ../../source/docker/tutorial-quickstart-docker.rst:217 +#: ../../source/docker/tutorial-quickstart-docker.rst:305 +msgid "``docker run``: This tells Docker to run a container from an image." +msgstr "" + +#: ../../source/docker/enable-tls.rst:46 ../../source/docker/enable-tls.rst:93 +#: ../../source/docker/enable-tls.rst:126 +#: ../../source/docker/tutorial-quickstart-docker.rst:67 +#: ../../source/docker/tutorial-quickstart-docker.rst:104 +#: ../../source/docker/tutorial-quickstart-docker.rst:218 +#: ../../source/docker/tutorial-quickstart-docker.rst:306 +msgid "``--rm``: Remove the container once it is stopped or the command exits." +msgstr "" + +#: ../../source/docker/enable-tls.rst msgid "" -"Our example consists of one *server* and two *clients*. In FedBN, " -":code:`server.py` keeps unchanged, we can start the server directly." +"``--volume ./certificates/:/app/certificates/:ro``: Mount the " +"``certificates`` directory in" msgstr "" -"Notre exemple consiste en un *serveur* et deux *clients*. Dans FedBN, " -":code:`server.py` reste inchangé, nous pouvons démarrer le serveur " -"directement." -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:62 +#: ../../source/docker/enable-tls.rst msgid "" -"Finally, we will revise our *client* logic by changing " -":code:`get_parameters` and :code:`set_parameters` in :code:`client.py`, " -"we will exclude batch normalization parameters from model parameter list " -"when sending to or receiving from the server." +"the current working directory of the host machine as a read-only volume " +"at the" msgstr "" -"Enfin, nous allons réviser notre logique *client* en modifiant " -":code:`get_parameters` et :code:`set_parameters` dans :code:`client.py`, " -"nous allons exclure les paramètres de normalisation des lots de la liste " -"des paramètres du modèle lors de l'envoi ou de la réception depuis le " -"serveur." -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:85 -msgid "Now, you can now open two additional terminal windows and run" -msgstr "Tu peux maintenant ouvrir deux autres fenêtres de terminal et lancer" +#: ../../source/docker/enable-tls.rst +msgid "``/app/certificates`` directory inside the container." +msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:91 +#: ../../source/docker/enable-tls.rst msgid "" -"in each window (make sure that the server is still running before you do " -"so) and see your (previously centralized) PyTorch project run federated " -"learning with FedBN strategy across two clients. Congratulations!" +"This allows the container to access the TLS certificates that are stored " +"in the certificates" msgstr "" -"dans chaque fenêtre (assure-toi que le serveur est toujours en cours " -"d'exécution avant de le faire) et tu verras ton projet PyTorch " -"(auparavant centralisé) exécuter l'apprentissage fédéré avec la stratégie" -" FedBN sur deux clients. Félicitations !" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:94 -#: ../../source/example-jax-from-centralized-to-federated.rst:277 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:310 -#: ../../source/tutorial-quickstart-jax.rst:283 -msgid "Next Steps" -msgstr "Prochaines étapes" +#: ../../source/docker/enable-tls.rst +msgid "directory." +msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:96 -#, fuzzy +#: ../../source/docker/enable-tls.rst +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"The full source code for this example can be found `here " -"`_. Our example is of course somewhat over-" -"simplified because both clients load the exact same dataset, which isn't " -"realistic. You're now prepared to explore this topic further. How about " -"using different subsets of CIFAR-10 on each client? How about adding more" -" clients?" +":substitution-code:`flwr/superlink:|stable_flwr_version|`: The name of " +"the image to be run and the specific" msgstr "" -"Le code source complet de cet exemple se trouve ici " -"`_. Notre exemple est bien sûr un peu trop " -"simplifié parce que les deux clients chargent exactement le même ensemble" -" de données, ce qui n'est pas réaliste. Tu es maintenant prêt à " -"approfondir ce sujet. Pourquoi ne pas utiliser différents sous-ensembles " -"de CIFAR-10 sur chaque client ? Pourquoi ne pas ajouter d'autres clients " -"?" - -#: ../../source/example-jax-from-centralized-to-federated.rst:2 -msgid "Example: JAX - Run JAX Federated" -msgstr "Exemple : JAX - Exécuter JAX Federated" -#: ../../source/example-jax-from-centralized-to-federated.rst:4 -#: ../../source/tutorial-quickstart-jax.rst:10 -#, fuzzy +#: ../../source/docker/enable-tls.rst msgid "" -"This tutorial will show you how to use Flower to build a federated " -"version of an existing JAX workload. We are using JAX to train a linear " -"regression model on a scikit-learn dataset. We will structure the example" -" similar to our `PyTorch - From Centralized To Federated " -"`_ walkthrough. First, we build a centralized " -"training approach based on the `Linear Regression with JAX " -"`_" -" tutorial`. Then, we build upon the centralized training code to run the " -"training in a federated fashion." +"tag of the image. The tag :substitution-code:`|stable_flwr_version|` " +"represents a specific version of the image." msgstr "" -"Ce tutoriel te montrera comment utiliser Flower pour construire une " -"version fédérée d'une charge de travail JAX existante. Nous utilisons JAX" -" pour entraîner un modèle de régression linéaire sur un ensemble de " -"données scikit-learn. Nous structurerons l'exemple de la même manière que" -" notre présentation `PyTorch - De la centralisation à la fédération " -"`_. Tout d'abord, nous construisons une approche" -" d'entraînement centralisée basée sur le tutoriel `Régression linéaire " -"avec JAX " -"`_." -" Ensuite, nous nous appuyons sur le code d'entraînement centralisé pour " -"exécuter l'entraînement de manière fédérée." -#: ../../source/example-jax-from-centralized-to-federated.rst:10 -#: ../../source/tutorial-quickstart-jax.rst:16 +#: ../../source/docker/enable-tls.rst msgid "" -"Before we start building our JAX example, we need install the packages " -":code:`jax`, :code:`jaxlib`, :code:`scikit-learn`, and :code:`flwr`:" +"``--ssl-ca-certfile certificates/ca.crt``: Specify the location of the CA" +" certificate file" msgstr "" -"Avant de commencer à construire notre exemple JAX, nous devons installer " -"les paquets :code:`jax`, :code:`jaxlib`, :code:`scikit-learn`, et " -":code:`flwr` :" -#: ../../source/example-jax-from-centralized-to-federated.rst:18 -#: ../../source/tutorial-quickstart-jax.rst:24 -msgid "Linear Regression with JAX" -msgstr "Régression linéaire avec JAX" +#: ../../source/docker/enable-tls.rst +#, fuzzy +msgid "inside the container." +msgstr "Utiliser les conteneurs VS Code Remote" -#: ../../source/example-jax-from-centralized-to-federated.rst:20 -#: ../../source/tutorial-quickstart-jax.rst:26 +#: ../../source/docker/enable-tls.rst msgid "" -"We begin with a brief description of the centralized training code based " -"on a :code:`Linear Regression` model. If you want a more in-depth " -"explanation of what's going on then have a look at the official `JAX " -"documentation `_." +"The ``certificates/ca.crt`` file is a certificate that is used to verify " +"the identity of the" msgstr "" -"Nous commençons par une brève description du code d'entraînement " -"centralisé basé sur un modèle :code:`Régression linéaire`. Si tu veux une" -" explication plus approfondie de ce qui se passe, jette un coup d'œil à " -"la documentation officielle `JAX `_." -#: ../../source/example-jax-from-centralized-to-federated.rst:23 -#: ../../source/tutorial-quickstart-jax.rst:29 +#: ../../source/docker/enable-tls.rst +#, fuzzy +msgid "SuperLink." +msgstr "flower-superlink" + +#: ../../source/docker/enable-tls.rst msgid "" -"Let's create a new file called :code:`jax_training.py` with all the " -"components required for a traditional (centralized) linear regression " -"training. First, the JAX packages :code:`jax` and :code:`jaxlib` need to " -"be imported. In addition, we need to import :code:`sklearn` since we use " -":code:`make_regression` for the dataset and :code:`train_test_split` to " -"split the dataset into a training and test set. You can see that we do " -"not yet import the :code:`flwr` package for federated learning. This will" -" be done later." +"``--ssl-certfile certificates/server.pem``: Specify the location of the " +"SuperLink's" msgstr "" -"Créons un nouveau fichier appelé :code:`jax_training.py` avec tous les " -"composants nécessaires pour un apprentissage traditionnel (centralisé) de" -" la régression linéaire. Tout d'abord, les paquets JAX :code:`jax` et " -":code:`jaxlib` doivent être importés. En outre, nous devons importer " -":code:`sklearn` puisque nous utilisons :code:`make_regression` pour le " -"jeu de données et :code:`train_test_split` pour diviser le jeu de données" -" en un jeu d'entraînement et un jeu de test. Tu peux voir que nous " -"n'avons pas encore importé le paquet :code:`flwr` pour l'apprentissage " -"fédéré, ce qui sera fait plus tard." -#: ../../source/example-jax-from-centralized-to-federated.rst:37 -#: ../../source/tutorial-quickstart-jax.rst:43 -msgid "" -"The :code:`load_data()` function loads the mentioned training and test " -"sets." +#: ../../source/docker/enable-tls.rst +msgid "TLS certificate file inside the container." msgstr "" -"La fonction :code:`load_data()` charge les ensembles d'entraînement et de" -" test mentionnés." -#: ../../source/example-jax-from-centralized-to-federated.rst:47 -#: ../../source/tutorial-quickstart-jax.rst:53 +#: ../../source/docker/enable-tls.rst msgid "" -"The model architecture (a very simple :code:`Linear Regression` model) is" -" defined in :code:`load_model()`." +"The ``certificates/server.pem`` file is used to identify the SuperLink " +"and to encrypt the" msgstr "" -"L'architecture du modèle (un modèle :code:`Régression linéaire` très " -"simple) est définie dans :code:`load_model()`." -#: ../../source/example-jax-from-centralized-to-federated.rst:59 -#: ../../source/tutorial-quickstart-jax.rst:65 -msgid "" -"We now need to define the training (function :code:`train()`), which " -"loops over the training set and measures the loss (function " -":code:`loss_fn()`) for each batch of training examples. The loss function" -" is separate since JAX takes derivatives with a :code:`grad()` function " -"(defined in the :code:`main()` function and called in :code:`train()`)." +#: ../../source/docker/enable-tls.rst +msgid "data that is transmitted over the network." msgstr "" -"Nous devons maintenant définir l'entraînement (fonction :code:`train()`)," -" qui boucle sur l'ensemble d'entraînement et mesure la perte (fonction " -":code:`loss_fn()`) pour chaque lot d'exemples d'entraînement. La fonction" -" de perte est séparée puisque JAX prend des dérivés avec une fonction " -":code:`grad()` (définie dans la fonction :code:`main()` et appelée dans " -":code:`train()`)." -#: ../../source/example-jax-from-centralized-to-federated.rst:77 -#: ../../source/tutorial-quickstart-jax.rst:83 +#: ../../source/docker/enable-tls.rst msgid "" -"The evaluation of the model is defined in the function " -":code:`evaluation()`. The function takes all test examples and measures " -"the loss of the linear regression model." +"``--ssl-keyfile certificates/server.key``: Specify the location of the " +"SuperLink's" +msgstr "" + +#: ../../source/docker/enable-tls.rst +msgid "TLS private key file inside the container." msgstr "" -"L'évaluation du modèle est définie dans la fonction :code:`evaluation()`." -" La fonction prend tous les exemples de test et mesure la perte du modèle" -" de régression linéaire." -#: ../../source/example-jax-from-centralized-to-federated.rst:88 -#: ../../source/tutorial-quickstart-jax.rst:94 +#: ../../source/docker/enable-tls.rst msgid "" -"Having defined the data loading, model architecture, training, and " -"evaluation we can put everything together and train our model using JAX. " -"As already mentioned, the :code:`jax.grad()` function is defined in " -":code:`main()` and passed to :code:`train()`." +"The ``certificates/server.key`` file is used to decrypt the data that is " +"transmitted over" msgstr "" -"Après avoir défini le chargement des données, l'architecture du modèle, " -"l'entraînement et l'évaluation, nous pouvons tout assembler et entraîner " -"notre modèle à l'aide de JAX. Comme nous l'avons déjà mentionné, la " -"fonction :code:`jax.grad()` est définie dans :code:`main()` et transmise " -"à :code:`train()`." -#: ../../source/example-jax-from-centralized-to-federated.rst:105 -#: ../../source/tutorial-quickstart-jax.rst:111 -msgid "You can now run your (centralized) JAX linear regression workload:" +#: ../../source/docker/enable-tls.rst +msgid "the network." msgstr "" -"Tu peux maintenant exécuter ta charge de travail (centralisée) de " -"régression linéaire JAX :" -#: ../../source/example-jax-from-centralized-to-federated.rst:111 -#: ../../source/tutorial-quickstart-jax.rst:117 +#: ../../source/docker/enable-tls.rst:72 +#, fuzzy +msgid "SuperNode" +msgstr "flower-superlink" + +#: ../../source/docker/enable-tls.rst:74 msgid "" -"So far this should all look fairly familiar if you've used JAX before. " -"Let's take the next step and use what we've built to create a simple " -"federated learning system consisting of one server and two clients." +"Assuming that the ``ca.crt`` certificate already exists locally, we can " +"use the flag ``--volume`` to mount the local certificate into the " +"container's ``/app/`` directory." msgstr "" -"Jusqu'à présent, tout cela devrait te sembler assez familier si tu as " -"déjà utilisé JAX. Passons à l'étape suivante et utilisons ce que nous " -"avons construit pour créer un simple système d'apprentissage fédéré " -"composé d'un serveur et de deux clients." -#: ../../source/example-jax-from-centralized-to-federated.rst:115 -#: ../../source/tutorial-quickstart-jax.rst:121 -msgid "JAX meets Flower" -msgstr "JAX rencontre Flower" - -#: ../../source/example-jax-from-centralized-to-federated.rst:117 -#: ../../source/tutorial-quickstart-jax.rst:123 +#: ../../source/docker/enable-tls.rst:79 msgid "" -"The concept of federating an existing workload is always the same and " -"easy to understand. We have to start a *server* and then use the code in " -":code:`jax_training.py` for the *clients* that are connected to the " -"*server*. The *server* sends model parameters to the clients. The " -"*clients* run the training and update the parameters. The updated " -"parameters are sent back to the *server*, which averages all received " -"parameter updates. This describes one round of the federated learning " -"process, and we repeat this for multiple rounds." +"If you're generating self-signed certificates and the ``ca.crt`` " +"certificate doesn't exist on the SuperNode, you can copy it over after " +"the generation step." +msgstr "" + +#: ../../source/docker/enable-tls.rst +msgid "``--volume ./ca.crt:/app/ca.crt/:ro``: Mount the ``ca.crt`` file from the" msgstr "" -"Le concept de fédération d'une charge de travail existante est toujours " -"le même et facile à comprendre. Nous devons démarrer un *serveur*, puis " -"utiliser le code dans :code:`jax_training.py` pour les *clients* qui sont" -" connectés au *serveur*.Le *serveur* envoie les paramètres du modèle aux " -"clients.Les *clients* exécutent la formation et mettent à jour les " -"paramètres.Les paramètres mis à jour sont renvoyés au *serveur*, qui fait" -" la moyenne de toutes les mises à jour de paramètres reçues.Ceci décrit " -"un tour du processus d'apprentissage fédéré, et nous répétons cette " -"opération pour plusieurs tours." -#: ../../source/example-jax-from-centralized-to-federated.rst:123 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:181 -#: ../../source/tutorial-quickstart-jax.rst:129 +#: ../../source/docker/enable-tls.rst msgid "" -"Our example consists of one *server* and two *clients*. Let's set up " -":code:`server.py` first. The *server* needs to import the Flower package " -":code:`flwr`. Next, we use the :code:`start_server` function to start a " -"server and tell it to perform three rounds of federated learning." +"current working directory of the host machine as a read-only volume at " +"the ``/app/ca.crt``" msgstr "" -"Notre exemple consiste en un *serveur* et deux *clients*. Commençons par " -"configurer :code:`server.py`. Le *serveur* doit importer le paquet Flower" -" :code:`flwr`. Ensuite, nous utilisons la fonction :code:`start_server` " -"pour démarrer un serveur et lui demander d'effectuer trois cycles " -"d'apprentissage fédéré." -#: ../../source/example-jax-from-centralized-to-federated.rst:133 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:191 -#: ../../source/tutorial-quickstart-jax.rst:139 -msgid "We can already start the *server*:" -msgstr "Nous pouvons déjà démarrer le *serveur* :" +#: ../../source/docker/enable-tls.rst +#, fuzzy +msgid "directory inside the container." +msgstr "Utiliser les conteneurs VS Code Remote" -#: ../../source/example-jax-from-centralized-to-federated.rst:139 -#: ../../source/tutorial-quickstart-jax.rst:145 +#: ../../source/docker/enable-tls.rst msgid "" -"Finally, we will define our *client* logic in :code:`client.py` and build" -" upon the previously defined JAX training in :code:`jax_training.py`. Our" -" *client* needs to import :code:`flwr`, but also :code:`jax` and " -":code:`jaxlib` to update the parameters on our JAX model:" +":substitution-code:`flwr/supernode:|stable_flwr_version|`: The name of " +"the image to be run and the specific" msgstr "" -"Enfin, nous allons définir la logique de notre *client* dans " -":code:`client.py` et nous appuyer sur la formation JAX définie " -"précédemment dans :code:`jax_training.py`. Notre *client* doit importer " -":code:`flwr`, mais aussi :code:`jax` et :code:`jaxlib` pour mettre à jour" -" les paramètres de notre modèle JAX :" -#: ../../source/example-jax-from-centralized-to-federated.rst:154 -#: ../../source/tutorial-quickstart-jax.rst:160 +#: ../../source/docker/enable-tls.rst msgid "" -"Implementing a Flower *client* basically means implementing a subclass of" -" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " -"Our implementation will be based on :code:`flwr.client.NumPyClient` and " -"we'll call it :code:`FlowerClient`. :code:`NumPyClient` is slightly " -"easier to implement than :code:`Client` if you use a framework with good " -"NumPy interoperability (like JAX) because it avoids some of the " -"boilerplate that would otherwise be necessary. :code:`FlowerClient` needs" -" to implement four methods, two methods for getting/setting model " -"parameters, one method for training the model, and one method for testing" -" the model:" +"``--root-certificates ca.crt``: This specifies the location of the CA " +"certificate file" msgstr "" -"L'implémentation d'un *client* Flower signifie essentiellement " -"l'implémentation d'une sous-classe de :code:`flwr.client.Client` ou " -":code:`flwr.client.NumPyClient`. Notre implémentation sera basée sur " -":code:`flwr.client.NumPyClient` et nous l'appellerons " -":code:`FlowerClient`. :code:`NumPyClient` est légèrement plus facile à " -"implémenter que :code:`Client` si vous utilisez un framework avec une " -"bonne interopérabilité NumPy (comme JAX) parce qu'il évite une partie du " -"boilerplate qui serait autrement nécessaire. :code:`FlowerClient` doit " -"implémenter quatre méthodes, deux méthodes pour obtenir/régler les " -"paramètres du modèle, une méthode pour former le modèle, et une méthode " -"pour tester le modèle :" -#: ../../source/example-jax-from-centralized-to-federated.rst:161 -#: ../../source/tutorial-quickstart-jax.rst:167 -msgid ":code:`set_parameters (optional)`" -msgstr ":code:`set_parameters (optional)`" +#: ../../source/docker/enable-tls.rst +msgid "The ``ca.crt`` file is used to verify the identity of the SuperLink." +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:160 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:219 -#: ../../source/tutorial-quickstart-jax.rst:166 -msgid "" -"set the model parameters on the local model that are received from the " -"server" -msgstr "règle les paramètres du modèle local reçus du serveur" +#: ../../source/docker/enable-tls.rst:105 +msgid "SuperExec" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:161 -#: ../../source/tutorial-quickstart-jax.rst:167 -msgid "transform parameters to NumPy :code:`ndarray`'s" -msgstr "transforme les paramètres en NumPy :code:`ndarray`'s" +#: ../../source/docker/enable-tls.rst:107 +msgid "" +"Assuming all files we need are in the local ``certificates`` directory " +"where the SuperExec will be executed from, we can use the flag " +"``--volume`` to mount the local directory into the ``/app/certificates/``" +" directory of the container:" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:162 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:220 -#: ../../source/tutorial-quickstart-jax.rst:168 +#: ../../source/docker/enable-tls.rst msgid "" -"loop over the list of model parameters received as NumPy " -":code:`ndarray`'s (think list of neural network layers)" +":substitution-code:`flwr/superexec:|stable_flwr_version|`: The name of " +"the image to be run and the specific" msgstr "" -"boucle sur la liste des paramètres du modèle reçus sous forme de NumPy " -":code:`ndarray`'s (pensez à la liste des couches du réseau neuronal)" -#: ../../source/example-jax-from-centralized-to-federated.rst:163 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:221 -#: ../../source/tutorial-quickstart-jax.rst:169 -#: ../../source/tutorial-quickstart-pytorch.rst:155 -#: ../../source/tutorial-quickstart-scikitlearn.rst:118 -msgid ":code:`get_parameters`" -msgstr ":code:`get_parameters`" +#: ../../source/docker/enable-tls.rst +msgid "SuperExec." +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:164 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:222 -#: ../../source/tutorial-quickstart-jax.rst:170 +#: ../../source/docker/enable-tls.rst msgid "" -"get the model parameters and return them as a list of NumPy " -":code:`ndarray`'s (which is what :code:`flwr.client.NumPyClient` expects)" +"``--ssl-certfile certificates/server.pem``: Specify the location of the " +"SuperExec's" msgstr "" -"récupère les paramètres du modèle et les renvoie sous forme de liste de " -":code:`ndarray` NumPy (ce qui correspond à ce que " -":code:`flwr.client.NumPyClient` attend)" -#: ../../source/example-jax-from-centralized-to-federated.rst:167 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:225 -#: ../../source/tutorial-quickstart-jax.rst:173 -#: ../../source/tutorial-quickstart-pytorch.rst:161 -#: ../../source/tutorial-quickstart-scikitlearn.rst:125 -msgid ":code:`fit`" -msgstr ":code:`fit`" - -#: ../../source/example-jax-from-centralized-to-federated.rst:166 -#: ../../source/example-jax-from-centralized-to-federated.rst:170 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:224 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:228 -#: ../../source/tutorial-quickstart-jax.rst:172 -#: ../../source/tutorial-quickstart-jax.rst:176 +#: ../../source/docker/enable-tls.rst msgid "" -"update the parameters of the local model with the parameters received " -"from the server" +"The ``certificates/server.pem`` file is used to identify the SuperExec " +"and to encrypt the" msgstr "" -"mettre à jour les paramètres du modèle local avec les paramètres reçus du" -" serveur" -#: ../../source/example-jax-from-centralized-to-federated.rst:167 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:225 -#: ../../source/tutorial-quickstart-jax.rst:173 -msgid "train the model on the local training set" -msgstr "entraîne le modèle sur l'ensemble d'apprentissage local" +#: ../../source/docker/enable-tls.rst +msgid "" +"``--ssl-keyfile certificates/server.key``: Specify the location of the " +"SuperExec's" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:168 -#: ../../source/tutorial-quickstart-jax.rst:174 -msgid "get the updated local model parameters and return them to the server" +#: ../../source/docker/enable-tls.rst +msgid "" +"``--executor-config root-" +"certificates=\\\"certificates/superlink_ca.crt\\\"``: Specify the" msgstr "" -"récupère les paramètres du modèle local mis à jour et les renvoie au " -"serveur" -#: ../../source/example-jax-from-centralized-to-federated.rst:172 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:230 -#: ../../source/tutorial-quickstart-jax.rst:178 -#: ../../source/tutorial-quickstart-pytorch.rst:164 -#: ../../source/tutorial-quickstart-scikitlearn.rst:128 -msgid ":code:`evaluate`" -msgstr ":code:`évaluer`" +#: ../../source/docker/enable-tls.rst +msgid "" +"location of the CA certificate file inside the container that the " +"SuperExec executor" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:171 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:229 -#: ../../source/tutorial-quickstart-jax.rst:177 -msgid "evaluate the updated model on the local test set" -msgstr "évaluer le modèle mis à jour sur l'ensemble de test local" +#: ../../source/docker/enable-tls.rst +msgid "should use to verify the SuperLink's identity." +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:172 -#: ../../source/tutorial-quickstart-jax.rst:178 -msgid "return the local loss to the server" -msgstr "renvoie la perte locale au serveur" +#: ../../source/docker/index.rst:2 +msgid "Run Flower using Docker" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:174 -#: ../../source/tutorial-quickstart-jax.rst:180 +#: ../../source/docker/index.rst:4 msgid "" -"The challenging part is to transform the JAX model parameters from " -":code:`DeviceArray` to :code:`NumPy ndarray` to make them compatible with" -" `NumPyClient`." +"Start your Flower journey with our pre-made Docker images on Docker Hub, " +"supporting ``amd64`` and ``arm64v8`` architectures." msgstr "" -"La partie la plus difficile consiste à transformer les paramètres du " -"modèle JAX de :code:`DeviceArray` en :code:`NumPy ndarray` pour les " -"rendre compatibles avec `NumPyClient`." -#: ../../source/example-jax-from-centralized-to-federated.rst:176 -#: ../../source/tutorial-quickstart-jax.rst:182 +#: ../../source/docker/index.rst:7 msgid "" -"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make" -" use of the functions :code:`train()` and :code:`evaluate()` previously " -"defined in :code:`jax_training.py`. So what we really do here is we tell " -"Flower through our :code:`NumPyClient` subclass which of our already " -"defined functions to call for training and evaluation. We included type " -"annotations to give you a better understanding of the data types that get" -" passed around." +"Our Quickstart guide walks you through containerizing a Flower project " +"and running it end to end using Docker." msgstr "" -"Les deux méthodes :code:`NumPyClient` :code:`fit` et :code:`evaluate` " -"utilisent les fonctions :code:`train()` et :code:`evaluate()` définies " -"précédemment dans :code:`jax_training.py`. Ce que nous faisons vraiment " -"ici, c'est que nous indiquons à Flower, par le biais de notre sous-classe" -" :code:`NumPyClient`, laquelle de nos fonctions déjà définies doit être " -"appelée pour l'entraînement et l'évaluation. Nous avons inclus des " -"annotations de type pour te donner une meilleure compréhension des types " -"de données qui sont transmis." -#: ../../source/example-jax-from-centralized-to-federated.rst:245 -#: ../../source/tutorial-quickstart-jax.rst:251 -msgid "Having defined the federation process, we can run it." -msgstr "Après avoir défini le processus de fédération, nous pouvons l'exécuter." +#: ../../source/docker/index.rst:11 +#, fuzzy +msgid "Getting Started" +msgstr "Pour commencer" -#: ../../source/example-jax-from-centralized-to-federated.rst:268 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:301 -#: ../../source/tutorial-quickstart-jax.rst:274 -msgid "And that's it. You can now open two additional terminal windows and run" +#: ../../source/docker/index.rst:19 +msgid "Running in Production" +msgstr "" + +#: ../../source/docker/index.rst:28 +#, fuzzy +msgid "Advanced Options" +msgstr "Options d'installation avancées" + +#: ../../source/docker/index.rst:40 +#, fuzzy +msgid "Run Flower using Docker Compose" +msgstr "Serveur de Flower" + +#: ../../source/docker/persist-superlink-state.rst:2 +msgid "Persist the State of the SuperLink" msgstr "" -"Tu peux maintenant ouvrir deux autres fenêtres de terminal et exécuter " -"les commandes suivantes" -#: ../../source/example-jax-from-centralized-to-federated.rst:274 -#: ../../source/tutorial-quickstart-jax.rst:280 +#: ../../source/docker/persist-superlink-state.rst:4 msgid "" -"in each window (make sure that the server is still running before you do " -"so) and see your JAX project run federated learning across two clients. " -"Congratulations!" +"By default, the Flower SuperLink keeps its state in-memory. When using " +"the Docker flag ``--rm``, the state is not persisted between container " +"starts." msgstr "" -"dans chaque fenêtre (assure-toi que le serveur est toujours en cours " -"d'exécution avant de le faire) et tu verras que ton projet JAX exécute " -"l'apprentissage fédéré sur deux clients. Félicitations !" -#: ../../source/example-jax-from-centralized-to-federated.rst:279 -#: ../../source/tutorial-quickstart-jax.rst:285 -#, fuzzy +#: ../../source/docker/persist-superlink-state.rst:7 msgid "" -"The source code of this example was improved over time and can be found " -"here: `Quickstart JAX `_. Our example is somewhat over-simplified because both " -"clients load the same dataset." +"If you want to persist the state of the SuperLink on your host system, " +"all you need to do is specify a directory where you want to save the file" +" on your host system and a name for the database file." msgstr "" -"Le code source de cet exemple a été amélioré au fil du temps et peut être" -" trouvé ici : `Quickstart JAX " -"`_. " -"Notre exemple est quelque peu simplifié à l'extrême car les deux clients " -"chargent le même jeu de données." -#: ../../source/example-jax-from-centralized-to-federated.rst:282 -#: ../../source/tutorial-quickstart-jax.rst:288 +#: ../../source/docker/persist-superlink-state.rst:11 msgid "" -"You're now prepared to explore this topic further. How about using a more" -" sophisticated model or using a different dataset? How about adding more " -"clients?" +"By default, the SuperLink container runs with a non-root user called " +"``app`` with the user ID ``49999``. It is recommended to create a new " +"directory and change the user ID of the directory to ``49999`` to ensure " +"the mounted directory has the proper permissions." msgstr "" -"Tu es maintenant prêt à approfondir ce sujet. Pourquoi ne pas utiliser un" -" modèle plus sophistiqué ou un ensemble de données différent ? Pourquoi " -"ne pas ajouter d'autres clients ?" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:2 -msgid "Example: PyTorch - From Centralized To Federated" -msgstr "Exemple : PyTorch - De la centralisation à la fédération" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:4 +#: ../../source/docker/persist-superlink-state.rst:21 msgid "" -"This tutorial will show you how to use Flower to build a federated " -"version of an existing machine learning workload. We are using PyTorch to" -" train a Convolutional Neural Network on the CIFAR-10 dataset. First, we " -"introduce this machine learning task with a centralized training approach" -" based on the `Deep Learning with PyTorch " -"`_ " -"tutorial. Then, we build upon the centralized training code to run the " -"training in a federated fashion." +"In the example below, we create a new directory called ``state``, change " +"the user ID and tell Docker via the flag ``--volume`` to mount the local " +"``state`` directory into the ``/app/state`` directory of the container. " +"Lastly, we use the flag ``--database`` to specify the name of the " +"database file." msgstr "" -"Ce tutoriel te montrera comment utiliser Flower pour construire une " -"version fédérée d'une charge de travail d'apprentissage automatique " -"existante. Nous utilisons PyTorch pour entraîner un réseau neuronal " -"convolutif sur l'ensemble de données CIFAR-10. Tout d'abord, nous " -"présentons cette tâche d'apprentissage automatique avec une approche " -"d'entraînement centralisée basée sur le tutoriel `Deep Learning with " -"PyTorch " -"`_. " -"Ensuite, nous nous appuyons sur le code d'entraînement centralisé pour " -"exécuter l'entraînement de manière fédérée." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:12 +#: ../../source/docker/persist-superlink-state.rst:36 msgid "" -"We begin with a brief description of the centralized CNN training code. " -"If you want a more in-depth explanation of what's going on then have a " -"look at the official `PyTorch tutorial " -"`_." +"As soon as the SuperLink starts, the file ``state.db`` is created in the " +"``state`` directory on your host system. If the file already exists, the " +"SuperLink tries to restore the state from the file. To start the " +"SuperLink with an empty database, ensure that there is no database called" +" ``state.db`` in the ``state`` directory (``rm state.db``) before you " +"execute the ``docker run`` command above." msgstr "" -"Nous commençons par une brève description du code d'entraînement CNN " -"centralisé. Si tu veux une explication plus approfondie de ce qui se " -"passe, jette un coup d'œil au tutoriel officiel `PyTorch " -"`_." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:15 -msgid "" -"Let's create a new file called :code:`cifar.py` with all the components " -"required for a traditional (centralized) training on CIFAR-10. First, all" -" required packages (such as :code:`torch` and :code:`torchvision`) need " -"to be imported. You can see that we do not import any package for " -"federated learning. You can keep all these imports as they are even when " -"we add the federated learning components at a later point." +#: ../../source/docker/pin-version.rst:2 +msgid "Pin a Docker Image to a Specific Version" msgstr "" -"Créons un nouveau fichier appelé :code:`cifar.py` avec tous les " -"composants requis pour une formation traditionnelle (centralisée) sur le " -"CIFAR-10. Tout d'abord, tous les paquets requis (tels que :code:`torch` " -"et :code:`torchvision`) doivent être importés. Tu peux voir que nous " -"n'importons aucun paquet pour l'apprentissage fédéré. Tu peux conserver " -"toutes ces importations telles quelles même lorsque nous ajouterons les " -"composants d'apprentissage fédéré à un moment ultérieur." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:32 +#: ../../source/docker/pin-version.rst:4 msgid "" -"As already mentioned we will use the CIFAR-10 dataset for this machine " -"learning workload. The model architecture (a very simple Convolutional " -"Neural Network) is defined in :code:`class Net()`." +"It may happen that we update the images behind the tags. Such updates " +"usually include security updates of system dependencies that should not " +"change the functionality of Flower. However, if you want to ensure that " +"you use a fixed version of the Docker image in your deployments, you can " +"`specify the digest " +"`_ of the image instead of the tag." msgstr "" -"Comme nous l'avons déjà mentionné, nous utiliserons l'ensemble de données" -" CIFAR-10 pour cette charge de travail d'apprentissage automatique. " -"L'architecture du modèle (un réseau neuronal convolutif très simple) est " -"définie dans :code:`class Net()`." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:56 +#: ../../source/docker/pin-version.rst:14 msgid "" -"The :code:`load_data()` function loads the CIFAR-10 training and test " -"sets. The :code:`transform` normalized the data after loading." +"The following command returns the current image digest referenced by the " +":substitution-code:`superlink:|stable_flwr_version|` tag:" msgstr "" -"La fonction :code:`load_data()` charge les ensembles d'entraînement et de" -" test CIFAR-10. La fonction :code:`transform` normalise les données après" -" leur chargement." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:74 -msgid "" -"We now need to define the training (function :code:`train()`) which loops" -" over the training set, measures the loss, backpropagates it, and then " -"takes one optimizer step for each batch of training examples." +#: ../../source/docker/pin-version.rst:23 +msgid "This will output" msgstr "" -"Nous devons maintenant définir la formation (fonction :code:`train()`) " -"qui passe en boucle sur l'ensemble de la formation, mesure la perte, la " -"rétropropage, puis effectue une étape d'optimisation pour chaque lot " -"d'exemples de formation." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:76 -msgid "" -"The evaluation of the model is defined in the function :code:`test()`. " -"The function loops over all test samples and measures the loss of the " -"model based on the test dataset." +#: ../../source/docker/pin-version.rst:30 +msgid "Next, we can pin the digest when running a new SuperLink container:" msgstr "" -"L'évaluation du modèle est définie dans la fonction :code:`test()`. La " -"fonction boucle sur tous les échantillons de test et mesure la perte du " -"modèle en fonction de l'ensemble des données de test." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:136 -msgid "" -"Having defined the data loading, model architecture, training, and " -"evaluation we can put everything together and train our CNN on CIFAR-10." +#: ../../source/docker/run-as-root-user.rst:2 +msgid "Run with Root User Privileges" msgstr "" -"Après avoir défini le chargement des données, l'architecture du modèle, " -"la formation et l'évaluation, nous pouvons tout mettre ensemble et former" -" notre CNN sur CIFAR-10." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:163 +#: ../../source/docker/run-as-root-user.rst:4 msgid "" -"So far, this should all look fairly familiar if you've used PyTorch " -"before. Let's take the next step and use what we've built to create a " -"simple federated learning system consisting of one server and two " -"clients." +"Flower Docker images, by default, run with a non-root user " +"(username/groupname: ``app``, UID/GID: ``49999``). Using root user is " +"**not recommended** unless it is necessary for specific tasks during the " +"build process." msgstr "" -"Jusqu'à présent, tout cela devrait te sembler assez familier si tu as " -"déjà utilisé PyTorch. Passons à l'étape suivante et utilisons ce que nous" -" avons construit pour créer un simple système d'apprentissage fédéré " -"composé d'un serveur et de deux clients." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:169 +#: ../../source/docker/run-as-root-user.rst:8 msgid "" -"The simple machine learning project discussed in the previous section " -"trains the model on a single dataset (CIFAR-10), we call this centralized" -" learning. This concept of centralized learning, as shown in the previous" -" section, is probably known to most of you, and many of you have used it " -"previously. Normally, if you'd want to run machine learning workloads in " -"a federated fashion, then you'd have to change most of your code and set " -"everything up from scratch. This can be a considerable effort." +"Always make sure to run the container as a non-root user in production to" +" maintain security best practices." msgstr "" -"Le projet simple d'apprentissage automatique discuté dans la section " -"précédente entraîne le modèle sur un seul ensemble de données (CIFAR-10)," -" nous appelons cela l'apprentissage centralisé. Ce concept " -"d'apprentissage centralisé, comme le montre la section précédente, est " -"probablement connu de la plupart d'entre vous, et beaucoup d'entre vous " -"l'ont déjà utilisé. Normalement, si tu veux exécuter des charges de " -"travail d'apprentissage automatique de manière fédérée, tu dois alors " -"changer la plupart de ton code et tout mettre en place à partir de zéro, " -"ce qui peut représenter un effort considérable." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:173 -msgid "" -"However, with Flower you can evolve your pre-existing code into a " -"federated learning setup without the need for a major rewrite." +#: ../../source/docker/run-as-root-user.rst:12 +msgid "Run a Container with Root User Privileges" msgstr "" -"Cependant, avec Flower, tu peux faire évoluer ton code préexistant vers " -"une configuration d'apprentissage fédéré sans avoir besoin d'une " -"réécriture majeure." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:175 +#: ../../source/docker/run-as-root-user.rst:14 msgid "" -"The concept is easy to understand. We have to start a *server* and then " -"use the code in :code:`cifar.py` for the *clients* that are connected to " -"the *server*. The *server* sends model parameters to the clients. The " -"*clients* run the training and update the parameters. The updated " -"parameters are sent back to the *server* which averages all received " -"parameter updates. This describes one round of the federated learning " -"process and we repeat this for multiple rounds." +"Run the Docker image with the ``-u`` flag and specify ``root`` as the " +"username:" msgstr "" -"Le concept est facile à comprendre. Nous devons démarrer un *serveur* et " -"utiliser le code dans :code:`cifar.py` pour les *clients* qui sont " -"connectés au *serveur*. Le *serveur* envoie les paramètres du modèle aux " -"clients. Les *clients* exécutent la formation et mettent à jour les " -"paramètres. Les paramètres mis à jour sont renvoyés au *serveur* qui fait" -" la moyenne de toutes les mises à jour de paramètres reçues. Ceci décrit " -"un tour du processus d'apprentissage fédéré et nous répétons cette " -"opération pour plusieurs tours." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:197 -msgid "" -"Finally, we will define our *client* logic in :code:`client.py` and build" -" upon the previously defined centralized training in :code:`cifar.py`. " -"Our *client* needs to import :code:`flwr`, but also :code:`torch` to " -"update the parameters on our PyTorch model:" +#: ../../source/docker/run-as-root-user.rst:21 +msgid "This command will run the Docker container with root user privileges." +msgstr "" + +#: ../../source/docker/run-as-root-user.rst:24 +msgid "Run the Build Process with Root User Privileges" msgstr "" -"Enfin, nous allons définir notre logique *client* dans :code:`client.py` " -"et nous appuyer sur la formation centralisée définie précédemment dans " -":code:`cifar.py`. Notre *client* doit importer :code:`flwr`, mais aussi " -":code:`torch` pour mettre à jour les paramètres de notre modèle PyTorch :" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:213 +#: ../../source/docker/run-as-root-user.rst:26 msgid "" -"Implementing a Flower *client* basically means implementing a subclass of" -" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " -"Our implementation will be based on :code:`flwr.client.NumPyClient` and " -"we'll call it :code:`CifarClient`. :code:`NumPyClient` is slightly easier" -" to implement than :code:`Client` if you use a framework with good NumPy " -"interoperability (like PyTorch or TensorFlow/Keras) because it avoids " -"some of the boilerplate that would otherwise be necessary. " -":code:`CifarClient` needs to implement four methods, two methods for " -"getting/setting model parameters, one method for training the model, and " -"one method for testing the model:" +"If you want to switch to the root user during the build process of the " +"Docker image to install missing system dependencies, you can use the " +"``USER root`` directive within your Dockerfile." msgstr "" -"Implementing a Flower *client* basically means implementing a subclass of" -" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " -"Our implementation will be based on :code:`flwr.client.NumPyClient` and " -"we'll call it :code:`CifarClient`. :code:`NumPyClient` is slightly easier" -" to implement than :code:`Client` if you use a framework with good NumPy " -"interoperability (like PyTorch or TensorFlow/Keras) because it avoids " -"some of the boilerplate that would otherwise be necessary. " -":code:`CifarClient` needs to implement four methods, two methods for " -"getting/setting model parameters, one method for training the model, and " -"one method for testing the model:" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:219 -msgid ":code:`set_parameters`" -msgstr ":code:`set_parameters`" +#: ../../source/docker/run-as-root-user.rst:30 +#, fuzzy +msgid "SuperNode Dockerfile" +msgstr "Démarrer le serveur" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:226 -msgid "get the updated local model weights and return them to the server" -msgstr "récupère les poids du modèle local mis à jour et les renvoie au serveur" +#: ../../source/docker/run-as-subprocess.rst:2 +#, fuzzy +msgid "Run ClientApp as a Subprocess" +msgstr "Vérifier le format et tester le code" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:230 -msgid "return the local loss and accuracy to the server" -msgstr "renvoie la perte locale et la précision au serveur" +#: ../../source/docker/run-as-subprocess.rst:4 +msgid "" +"In this mode, the ClientApp is executed as a subprocess within the " +"SuperNode Docker container, rather than running in a separate container. " +"This approach reduces the number of running containers, which can be " +"beneficial for environments with limited resources. However, it also " +"means that the ClientApp is no longer isolated from the SuperNode, which " +"may introduce additional security concerns." +msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:232 +#: ../../source/docker/run-as-subprocess.rst:13 msgid "" -"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make" -" use of the functions :code:`train()` and :code:`test()` previously " -"defined in :code:`cifar.py`. So what we really do here is we tell Flower " -"through our :code:`NumPyClient` subclass which of our already defined " -"functions to call for training and evaluation. We included type " -"annotations to give you a better understanding of the data types that get" -" passed around." +"Before running the ClientApp as a subprocess, ensure that the FAB " +"dependencies have been installed in the SuperNode images. This can be " +"done by extending the SuperNode image:" msgstr "" -"Les deux méthodes :code:`NumPyClient` :code:`fit` et :code:`evaluate` " -"utilisent les fonctions :code:`train()` et :code:`test()` définies " -"précédemment dans :code:`cifar.py`. Ce que nous faisons vraiment ici, " -"c'est que nous indiquons à Flower, par le biais de notre sous-classe " -":code:`NumPyClient`, laquelle de nos fonctions déjà définies doit être " -"appelée pour l'entraînement et l'évaluation. Nous avons inclus des " -"annotations de type pour te donner une meilleure compréhension des types " -"de données qui sont transmis." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:280 +#: ../../source/docker/run-as-subprocess.rst:17 #, fuzzy +msgid "Dockerfile.supernode" +msgstr "Serveur de Flower" + +#: ../../source/docker/run-as-subprocess.rst:31 msgid "" -"All that's left to do it to define a function that loads both model and " -"data, creates a :code:`CifarClient`, and starts this client. You load " -"your data and model by using :code:`cifar.py`. Start :code:`CifarClient` " -"with the function :code:`fl.client.start_client()` by pointing it at the " -"same IP address we used in :code:`server.py`:" +"Next, build the SuperNode Docker image by running the following command " +"in the directory where Dockerfile is located:" msgstr "" -"Il ne reste plus qu'à définir une fonction qui charge le modèle et les " -"données, crée un :code:`CifarClient` et démarre ce client. Tu charges tes" -" données et ton modèle en utilisant :code:`cifar.py`. Démarre " -":code:`CifarClient` avec la fonction :code:`fl.client.start_client()` en " -"la faisant pointer sur la même adresse IP que celle que nous avons " -"utilisée dans :code:`server.py` :" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:307 -msgid "" -"in each window (make sure that the server is running before you do so) " -"and see your (previously centralized) PyTorch project run federated " -"learning across two clients. Congratulations!" +#: ../../source/docker/run-as-subprocess.rst:39 +msgid "Run the ClientApp as a Subprocess" msgstr "" -"dans chaque fenêtre (assure-toi que le serveur fonctionne avant de le " -"faire) et tu verras ton projet PyTorch (auparavant centralisé) exécuter " -"l'apprentissage fédéré sur deux clients. Félicitations !" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:312 -#, fuzzy +#: ../../source/docker/run-as-subprocess.rst:41 msgid "" -"The full source code for this example: `PyTorch: From Centralized To " -"Federated (Code) `_. Our example is, of course, " -"somewhat over-simplified because both clients load the exact same " -"dataset, which isn't realistic. You're now prepared to explore this topic" -" further. How about using different subsets of CIFAR-10 on each client? " -"How about adding more clients?" +"Start the SuperNode with the flag ``--isolation subprocess``, which tells" +" the SuperNode to execute the ClientApp as a subprocess:" msgstr "" -"Le code source complet de cet exemple : `PyTorch : From Centralized To " -"Federated (Code) `_. Notre exemple est, bien sûr, " -"un peu trop simplifié parce que les deux clients chargent exactement le " -"même ensemble de données, ce qui n'est pas réaliste. Tu es maintenant " -"prêt à explorer davantage ce sujet. Pourquoi ne pas utiliser différents " -"sous-ensembles de CIFAR-10 sur chaque client ? Pourquoi ne pas ajouter " -"d'autres clients ?" -#: ../../source/explanation-differential-privacy.rst:2 -#: ../../source/explanation-differential-privacy.rst:11 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:303 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:2 #, fuzzy -msgid "Differential Privacy" -msgstr "Confidentialité différentielle" +msgid "Run Flower Quickstart Examples with Docker Compose" +msgstr "Démarrage rapide XGBoost" -#: ../../source/explanation-differential-privacy.rst:3 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:4 msgid "" -"The information in datasets like healthcare, financial transactions, user" -" preferences, etc., is valuable and has the potential for scientific " -"breakthroughs and provides important business insights. However, such " -"data is also sensitive and there is a risk of compromising individual " -"privacy." +"Flower provides a set of `quickstart examples " +"`_ to help you get " +"started with the framework. These examples are designed to demonstrate " +"the capabilities of Flower and by default run using the Simulation " +"Engine. This guide demonstrates how to run them using Flower's Deployment" +" Engine via Docker Compose." msgstr "" -#: ../../source/explanation-differential-privacy.rst:6 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:12 msgid "" -"Traditional methods like anonymization alone would not work because of " -"attacks like Re-identification and Data Linkage. That's where " -"differential privacy comes in. It provides the possibility of analyzing " -"data while ensuring the privacy of individuals." +"Some quickstart examples may have limitations or requirements that " +"prevent them from running on every environment. For more information, " +"please see Limitations_." msgstr "" -#: ../../source/explanation-differential-privacy.rst:12 -msgid "" -"Imagine two datasets that are identical except for a single record (for " -"instance, Alice's data). Differential Privacy (DP) guarantees that any " -"analysis (M), like calculating the average income, will produce nearly " -"identical results for both datasets (O and O' would be similar). This " -"preserves group patterns while obscuring individual details, ensuring the" -" individual's information remains hidden in the crowd." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:18 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:15 +#: ../../source/docker/tutorial-quickstart-docker.rst:13 +msgid "Before you start, make sure that:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:-1 -msgid "DP Intro" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:20 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:22 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:17 +#: ../../source/docker/tutorial-quickstart-docker.rst:15 +msgid "The ``flwr`` CLI is :doc:`installed <../how-to-install-flower>` locally." msgstr "" -#: ../../source/explanation-differential-privacy.rst:22 -msgid "" -"One of the most commonly used mechanisms to achieve DP is adding enough " -"noise to the output of the analysis to mask the contribution of each " -"individual in the data while preserving the overall accuracy of the " -"analysis." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:21 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:18 +#: ../../source/docker/tutorial-quickstart-docker.rst:16 +msgid "The Docker daemon is running." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:22 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:19 +msgid "Docker Compose is `installed `_." msgstr "" -#: ../../source/explanation-differential-privacy.rst:25 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:25 #, fuzzy -msgid "Formal Definition" -msgstr "Compiler les définitions ProtoBuf" +msgid "Run the Quickstart Example" +msgstr "Demande pour un nouveau Flower Example" -#: ../../source/explanation-differential-privacy.rst:26 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:27 msgid "" -"Differential Privacy (DP) provides statistical guarantees against the " -"information an adversary can infer through the output of a randomized " -"algorithm. It provides an unconditional upper bound on the influence of a" -" single individual on the output of the algorithm by adding noise [1]. A " -"randomized mechanism M provides (:math:`\\epsilon`, " -":math:`\\delta`)-differential privacy if for any two neighboring " -"databases, D :sub:`1` and D :sub:`2`, that differ in only a single " -"record, and for all possible outputs S ⊆ Range(A):" +"Clone the quickstart example you like to run. For example, ``quickstart-" +"pytorch``:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:32 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:35 msgid "" -"\\small\n" -"P[M(D_{1} \\in A)] \\leq e^{\\delta} P[M(D_{2} \\in A)] + \\delta" +"Download the `compose.yml " +"`_" +" file into the example directory:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:38 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:44 +#, fuzzy +msgid "Build and start the services using the following command:" +msgstr "Active la virtualenv en exécutant la commande suivante :" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:50 +#, fuzzy msgid "" -"The :math:`\\epsilon` parameter, also known as the privacy budget, is a " -"metric of privacy loss. It also controls the privacy-utility trade-off; " -"lower :math:`\\epsilon` values indicate higher levels of privacy but are " -"likely to reduce utility as well. The :math:`\\delta` parameter accounts " -"for a small probability on which the upper bound :math:`\\epsilon` does " -"not hold. The amount of noise needed to achieve differential privacy is " -"proportional to the sensitivity of the output, which measures the maximum" -" change in the output due to the inclusion or removal of a single record." +"Append the following lines to the end of the ``pyproject.toml`` file and " +"save it:" +msgstr "Augmente la version mineure de ``pyproject.toml`` d'une unité." + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:52 +#: ../../source/docker/tutorial-quickstart-docker.rst:324 +msgid "pyproject.toml" msgstr "" -#: ../../source/explanation-differential-privacy.rst:45 -#, fuzzy -msgid "Differential Privacy in Machine Learning" -msgstr "Confidentialité différentielle" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:61 +msgid "" +"You can customize the string that follows ``tool.flwr.federations.`` to " +"fit your needs. However, please note that the string cannot contain a dot" +" (``.``)." +msgstr "" -#: ../../source/explanation-differential-privacy.rst:46 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:64 msgid "" -"DP can be utilized in machine learning to preserve the privacy of the " -"training data. Differentially private machine learning algorithms are " -"designed in a way to prevent the algorithm to learn any specific " -"information about any individual data points and subsequently prevent the" -" model from revealing sensitive information. Depending on the stage at " -"which noise is introduced, various methods exist for applying DP to " -"machine learning algorithms. One approach involves adding noise to the " -"training data (either to the features or labels), while another method " -"entails injecting noise into the gradients of the loss function during " -"model training. Additionally, such noise can be incorporated into the " -"model's output." +"In this example, ``local-deployment`` has been used. Just remember to " +"replace ``local-deployment`` with your chosen name in both the " +"``tool.flwr.federations.`` string and the corresponding ``flwr run .`` " +"command." msgstr "" -#: ../../source/explanation-differential-privacy.rst:53 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:68 #, fuzzy -msgid "Differential Privacy in Federated Learning" -msgstr "Mise à l'échelle de l'apprentissage fédéré" +msgid "Run the example:" +msgstr "Fédérer l'exemple" -#: ../../source/explanation-differential-privacy.rst:54 -msgid "" -"Federated learning is a data minimization approach that allows multiple " -"parties to collaboratively train a model without sharing their raw data. " -"However, federated learning also introduces new privacy challenges. The " -"model updates between parties and the central server can leak information" -" about the local data. These leaks can be exploited by attacks such as " -"membership inference and property inference attacks, or model inversion " -"attacks." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:74 +msgid "Follow the logs of the SuperExec service:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:58 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:80 msgid "" -"DP can play a crucial role in federated learning to provide privacy for " -"the clients' data." +"That is all it takes! You can monitor the progress of the run through the" +" logs of the SuperExec." msgstr "" -#: ../../source/explanation-differential-privacy.rst:60 -msgid "" -"Depending on the granularity of privacy provision or the location of " -"noise addition, different forms of DP exist in federated learning. In " -"this explainer, we focus on two approaches of DP utilization in federated" -" learning based on where the noise is added: at the server (also known as" -" the center) or at the client (also known as the local)." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:84 +msgid "Run a Different Quickstart Example" msgstr "" -#: ../../source/explanation-differential-privacy.rst:63 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:86 msgid "" -"**Central Differential Privacy**: DP is applied by the server and the " -"goal is to prevent the aggregated model from leaking information about " -"each client's data." +"To run a different quickstart example, such as ``quickstart-tensorflow``," +" first, shut down the Docker Compose services of the current example:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:65 -msgid "" -"**Local Differential Privacy**: DP is applied on the client side before " -"sending any information to the server and the goal is to prevent the " -"updates that are sent to the server from leaking any information about " -"the client's data." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:93 +msgid "After that, you can repeat the steps above." msgstr "" -#: ../../source/explanation-differential-privacy.rst:-1 -#: ../../source/explanation-differential-privacy.rst:68 -#: ../../source/how-to-use-differential-privacy.rst:11 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:96 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:102 #, fuzzy -msgid "Central Differential Privacy" -msgstr "Confidentialité différentielle" +msgid "Limitations" +msgstr "Simulation de moniteur" -#: ../../source/explanation-differential-privacy.rst:69 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:101 +#, fuzzy +msgid "Quickstart Example" +msgstr "Démarrage rapide de JAX" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:103 +#, fuzzy +msgid "quickstart-fastai" +msgstr "Démarrage rapide fastai" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:104 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:106 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:115 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:117 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:121 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:123 +#: ../../source/ref-changelog.md:33 ../../source/ref-changelog.md:399 +#: ../../source/ref-changelog.md:676 ../../source/ref-changelog.md:740 +#: ../../source/ref-changelog.md:798 ../../source/ref-changelog.md:867 +#: ../../source/ref-changelog.md:929 +msgid "None" +msgstr "Aucun" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:105 +#, fuzzy +msgid "quickstart-huggingface" +msgstr "Quickstart tutorials" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:107 +#, fuzzy +msgid "quickstart-jax" +msgstr "Démarrage rapide de JAX" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:108 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:110 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:125 +#, fuzzy msgid "" -"In this approach, which is also known as user-level DP, the central " -"server is responsible for adding noise to the globally aggregated " -"parameters. It should be noted that trust in the server is required." +"The example has not yet been updated to work with the latest ``flwr`` " +"version." msgstr "" +"Les exemples de code couvrant scikit-learn et PyTorch Lightning ont été " +"mis à jour pour fonctionner avec la dernière version de Flower." + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:109 +#, fuzzy +msgid "quickstart-mlcube" +msgstr "Démarrage rapide de JAX" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:111 +#, fuzzy +msgid "quickstart-mlx" +msgstr "Démarrage rapide de JAX" -#: ../../source/explanation-differential-privacy.rst:76 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:112 msgid "" -"While there are various ways to implement central DP in federated " -"learning, we concentrate on the algorithms proposed by [2] and [3]. The " -"overall approach is to clip the model updates sent by the clients and add" -" some amount of noise to the aggregated model. In each iteration, a " -"random set of clients is chosen with a specific probability for training." -" Each client performs local training on its own data. The update of each " -"client is then clipped by some value `S` (sensitivity `S`). This would " -"limit the impact of any individual client which is crucial for privacy " -"and often beneficial for robustness. A common approach to achieve this is" -" by restricting the `L2` norm of the clients' model updates, ensuring " -"that larger updates are scaled down to fit within the norm `S`." +"`Requires to run on macOS with Apple Silicon `_." msgstr "" -#: ../../source/explanation-differential-privacy.rst:-1 -msgid "clipping" -msgstr "" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:114 +#, fuzzy +msgid "quickstart-monai" +msgstr "Démarrage rapide de JAX" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:116 +#, fuzzy +msgid "quickstart-pandas" +msgstr "Démarrage rapide des Pandas" -#: ../../source/explanation-differential-privacy.rst:89 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:118 +#, fuzzy +msgid "quickstart-pytorch-lightning" +msgstr "Démarrage rapide de PyTorch Lightning" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:119 msgid "" -"Afterwards, the Gaussian mechanism is used to add noise in order to " -"distort the sum of all clients' updates. The amount of noise is scaled to" -" the sensitivity value to obtain a privacy guarantee. The Gaussian " -"mechanism is used with a noise sampled from `N (0, σ²)` where `σ = ( " -"noise_scale * S ) / (number of sampled clients)`." +"Requires an older pip version that is not supported by the Flower Docker " +"images." msgstr "" -#: ../../source/explanation-differential-privacy.rst:94 -msgid "Clipping" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:120 +#, fuzzy +msgid "quickstart-pytorch" +msgstr "Démarrage rapide de PyTorch" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:122 +#, fuzzy +msgid "quickstart-sklearn-tabular" +msgstr "Démarrage rapide de scikit-learn" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:124 +#, fuzzy +msgid "quickstart-tabnet" +msgstr "Démarrage rapide de JAX" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:126 +#, fuzzy +msgid "quickstart-tensorflow" +msgstr "Démarrage rapide de TensorFlow" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:127 +msgid "Only runs on AMD64." msgstr "" -#: ../../source/explanation-differential-privacy.rst:96 +#: ../../source/docker/set-environment-variables.rst:2 +#, fuzzy +msgid "Set Environment Variables" +msgstr "Mise en place de l'environnement de codage" + +#: ../../source/docker/set-environment-variables.rst:4 msgid "" -"There are two forms of clipping commonly used in Central DP: Fixed " -"Clipping and Adaptive Clipping." +"To set a variable inside a Docker container, you can use the ``-e " +"=`` flag. Multiple ``-e`` flags can be used to set multiple " +"environment variables for a container." msgstr "" -#: ../../source/explanation-differential-privacy.rst:98 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:2 +#, fuzzy +msgid "Deploy Flower on Multiple Machines with Docker Compose" +msgstr "Démarrage rapide XGBoost" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:4 msgid "" -"**Fixed Clipping** : A predefined fix threshold is set for the magnitude " -"of clients' updates. Any update exceeding this threshold is clipped back " -"to the threshold value." +"This guide will help you set up a Flower project on multiple machines " +"using Docker Compose." msgstr "" -#: ../../source/explanation-differential-privacy.rst:100 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:7 msgid "" -"**Adaptive Clipping** : The clipping threshold dynamically adjusts based " -"on the observed update distribution [4]. It means that the clipping value" -" is tuned during the rounds with respect to the quantile of the update " -"norm distribution." +"You will learn how to run the Flower client and server components on two " +"separate machines, with Flower configured to use TLS encryption and " +"persist SuperLink state across restarts. A server consists of a SuperLink" +" and ``SuperExec``. For more details about the Flower architecture, refer" +" to the :doc:`../explanation-flower-architecture` explainer page." msgstr "" -#: ../../source/explanation-differential-privacy.rst:102 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:13 msgid "" -"The choice between fixed and adaptive clipping depends on various factors" -" such as privacy requirements, data distribution, model complexity, and " -"others." +"This guide assumes you have completed the :doc:`tutorial-quickstart-" +"docker-compose` tutorial. It is highly recommended that you follow and " +"understand the contents of that tutorial before proceeding with this " +"guide." msgstr "" -#: ../../source/explanation-differential-privacy.rst:-1 -#: ../../source/explanation-differential-privacy.rst:105 -#: ../../source/how-to-use-differential-privacy.rst:96 -#, fuzzy -msgid "Local Differential Privacy" -msgstr "Confidentialité différentielle" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:20 +msgid "Before you begin, make sure you have the following prerequisites:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:23 +msgid "The Docker daemon is running on your local machine and the remote machine." +msgstr "" -#: ../../source/explanation-differential-privacy.rst:107 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:24 msgid "" -"In this approach, each client is responsible for performing DP. Local DP " -"avoids the need for a fully trusted aggregator, but it should be noted " -"that local DP leads to a decrease in accuracy but better privacy in " -"comparison to central DP." +"Docker Compose V2 is installed on both your local machine and the remote " +"machine." msgstr "" -#: ../../source/explanation-differential-privacy.rst:116 -msgid "In this explainer, we focus on two forms of achieving Local DP:" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:25 +msgid "You can connect to the remote machine from your local machine." msgstr "" -#: ../../source/explanation-differential-privacy.rst:118 -msgid "" -"Each client adds noise to the local updates before sending them to the " -"server. To achieve (:math:`\\epsilon`, :math:`\\delta`)-DP, considering " -"the sensitivity of the local model to be ∆, Gaussian noise is applied " -"with a noise scale of σ where:" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:26 +msgid "Ports ``9091`` and ``9093`` are accessible on the remote machine." msgstr "" -#: ../../source/explanation-differential-privacy.rst:120 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:30 msgid "" -"\\small\n" -"\\frac{∆ \\times \\sqrt{2 \\times " -"\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}\n" -"\n" +"The guide uses the |quickstart_sklearn_tabular|_ example as an example " +"project." msgstr "" -#: ../../source/explanation-differential-privacy.rst:125 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:32 msgid "" -"Each client adds noise to the gradients of the model during the local " -"training (DP-SGD). More specifically, in this approach, gradients are " -"clipped and an amount of calibrated noise is injected into the gradients." +"If your project has a different name or location, please remember to " +"adjust the commands/paths accordingly." msgstr "" -#: ../../source/explanation-differential-privacy.rst:128 -msgid "" -"Please note that these two approaches are providing privacy at different " -"levels." +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:36 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:22 +#: ../../source/docker/tutorial-quickstart-docker.rst:19 +msgid "Step 1: Set Up" msgstr "" -#: ../../source/explanation-differential-privacy.rst:131 -#, fuzzy -msgid "**References:**" -msgstr "Référence" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:38 +msgid "Clone the Flower repository and change to the ``distributed`` directory:" +msgstr "" -#: ../../source/explanation-differential-privacy.rst:133 -msgid "[1] Dwork et al. The Algorithmic Foundations of Differential Privacy." +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:45 +msgid "Get the IP address from the remote machine and save it for later." msgstr "" -#: ../../source/explanation-differential-privacy.rst:135 -#, fuzzy +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:46 msgid "" -"[2] McMahan et al. Learning Differentially Private Recurrent Language " -"Models." +"Use the ``certs.yml`` Compose file to generate your own self-signed " +"certificates. If you have certificates, you can continue with Step 2." msgstr "" -"McMahan, H. Brendan, et al. \"Learning differentially private recurrent " -"language models\", arXiv preprint arXiv:1710.06963 (2017)." -#: ../../source/explanation-differential-privacy.rst:137 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:51 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:221 +msgid "These certificates should be used only for development purposes." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:53 msgid "" -"[3] Geyer et al. Differentially Private Federated Learning: A Client " -"Level Perspective." +"For production environments, you may have to use dedicated services to " +"obtain your certificates." msgstr "" -#: ../../source/explanation-differential-privacy.rst:139 -#, fuzzy -msgid "[4] Galen et al. Differentially Private Learning with Adaptive Clipping." +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:56 +msgid "" +"First, set the environment variables ``SUPERLINK_IP`` and " +"``SUPEREXEC_IP`` with the IP address from the remote machine. For " +"example, if the IP is ``192.168.2.33``, execute:" msgstr "" -"Andrew, Galen, et al. \"Differentially private learning with adaptive " -"clipping\" Advances in Neural Information Processing Systems 34 (2021) : " -"17455-17466." -#: ../../source/explanation-federated-evaluation.rst:2 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:292 -msgid "Federated evaluation" -msgstr "Évaluation fédérée" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:65 +msgid "Next, generate the self-signed certificates:" +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:4 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:72 +msgid "Step 2: Copy the Server Compose Files" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:74 msgid "" -"There are two main approaches to evaluating models in federated learning " -"systems: centralized (or server-side) evaluation and federated (or " -"client-side) evaluation." +"Use the method that works best for you to copy the ``server`` directory, " +"the certificates, and your Flower project to the remote machine." msgstr "" -"Il existe deux approches principales pour évaluer les modèles dans les " -"systèmes d'apprentissage fédérés : l'évaluation centralisée (ou côté " -"serveur) et l'évaluation fédérée (ou côté client)." -#: ../../source/explanation-federated-evaluation.rst:8 -msgid "Centralized Evaluation" -msgstr "Évaluation centralisée" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:77 +msgid "For example, you can use ``scp`` to copy the directories:" +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:11 -msgid "Built-In Strategies" -msgstr "Stratégies intégrées" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:87 +#, fuzzy +msgid "Step 3: Start the Flower Server Components" +msgstr "Démarrer le serveur" -#: ../../source/explanation-federated-evaluation.rst:13 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:89 msgid "" -"All built-in strategies support centralized evaluation by providing an " -"evaluation function during initialization. An evaluation function is any " -"function that can take the current global model parameters as input and " -"return evaluation results:" +"Log into the remote machine using ``ssh`` and run the following command " +"to start the SuperLink and SuperExec services:" msgstr "" -"Toutes les stratégies intégrées prennent en charge l'évaluation " -"centralisée en fournissant une fonction d'évaluation lors de " -"l'initialisation. Une fonction d'évaluation est une fonction qui peut " -"prendre les paramètres du modèle global actuel comme entrée et renvoyer " -"les résultats de l'évaluation :" -#: ../../source/explanation-federated-evaluation.rst:58 -msgid "Custom Strategies" -msgstr "Stratégies personnalisées" - -#: ../../source/explanation-federated-evaluation.rst:60 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:102 msgid "" -"The :code:`Strategy` abstraction provides a method called " -":code:`evaluate` that can directly be used to evaluate the current global" -" model parameters. The current server implementation calls " -":code:`evaluate` after parameter aggregation and before federated " -"evaluation (see next paragraph)." +"The Path of the ``PROJECT_DIR`` should be relative to the location of the" +" ``server`` Docker Compose files." msgstr "" -"L'abstraction :code:`Strategy` fournit une méthode appelée " -":code:`evaluate` qui peut être directement utilisée pour évaluer les " -"paramètres du modèle global actuel. L'implémentation actuelle du serveur " -"appelle :code:`evaluate` après l'agrégation des paramètres et avant " -"l'évaluation fédérée (voir le paragraphe suivant)." -#: ../../source/explanation-federated-evaluation.rst:65 -msgid "Federated Evaluation" -msgstr "Évaluation fédérée" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:105 +msgid "Go back to your terminal on your local machine." +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:68 -msgid "Implementing Federated Evaluation" -msgstr "Mise en œuvre de l'évaluation fédérée" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:108 +#, fuzzy +msgid "Step 4: Start the Flower Client Components" +msgstr "Démarrer le serveur" -#: ../../source/explanation-federated-evaluation.rst:70 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:110 msgid "" -"Client-side evaluation happens in the :code:`Client.evaluate` method and " -"can be configured from the server side." +"On your local machine, run the following command to start the client " +"components:" msgstr "" -"L'évaluation côté client se fait dans la méthode :code:`Client.evaluate` " -"et peut être configurée côté serveur." - -#: ../../source/explanation-federated-evaluation.rst:101 -msgid "Configuring Federated Evaluation" -msgstr "Configuration de l'évaluation fédérée" -#: ../../source/explanation-federated-evaluation.rst:103 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:120 msgid "" -"Federated evaluation can be configured from the server side. Built-in " -"strategies support the following arguments:" +"The Path of the ``PROJECT_DIR`` should be relative to the location of the" +" ``client`` Docker Compose files." msgstr "" -"L'évaluation fédérée peut être configurée du côté du serveur. Les " -"stratégies intégrées prennent en charge les arguments suivants :" -#: ../../source/explanation-federated-evaluation.rst:105 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:124 +#, fuzzy +msgid "Step 5: Run Your Flower Project" +msgstr "Serveur de Flower" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:126 msgid "" -":code:`fraction_evaluate`: a :code:`float` defining the fraction of " -"clients that will be selected for evaluation. If " -":code:`fraction_evaluate` is set to :code:`0.1` and :code:`100` clients " -"are connected to the server, then :code:`10` will be randomly selected " -"for evaluation. If :code:`fraction_evaluate` is set to :code:`0.0`, " -"federated evaluation will be disabled." +"Specify the remote SuperExec IP addresses and the path to the root " +"certificate in the ``[tool.flwr.federations.remote-superexec]`` table in " +"the ``pyproject.toml`` file. Here, we have named our remote federation " +"``remote-superexec``:" msgstr "" -":code:`fraction_evaluate` : un :code:`float` définissant la fraction de " -"clients qui sera sélectionnée pour l'évaluation. Si " -":code:`fraction_evaluate` est défini à :code:`0.1` et que :code:`100` " -"clients sont connectés au serveur, alors :code:`10` sera sélectionné " -"aléatoirement pour l'évaluation. Si :code:`fraction_evaluate` est défini " -"à :code:`0.0`, l'évaluation fédérée sera désactivée." -#: ../../source/explanation-federated-evaluation.rst:106 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:130 +#, fuzzy +msgid "examples/quickstart-sklearn-tabular/pyproject.toml" +msgstr "Démarrage rapide de scikit-learn" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:139 msgid "" -":code:`min_evaluate_clients`: an :code:`int`: the minimum number of " -"clients to be selected for evaluation. If :code:`fraction_evaluate` is " -"set to :code:`0.1`, :code:`min_evaluate_clients` is set to 20, and " -":code:`100` clients are connected to the server, then :code:`20` clients " -"will be selected for evaluation." +"The Path of the ``root-certificates`` should be relative to the location " +"of the ``pyproject.toml`` file." msgstr "" -"si :code:`fraction_evaluate` est réglé sur :code:`0.1`, " -":code:`min_evaluate_clients` est réglé sur 20, et que :code:`100` clients" -" sont connectés au serveur, alors :code:`20` clients seront sélectionnés " -"pour l'évaluation." -#: ../../source/explanation-federated-evaluation.rst:107 -msgid "" -":code:`min_available_clients`: an :code:`int` that defines the minimum " -"number of clients which need to be connected to the server before a round" -" of federated evaluation can start. If fewer than " -":code:`min_available_clients` are connected to the server, the server " -"will wait until more clients are connected before it continues to sample " -"clients for evaluation." +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:142 +msgid "To run the project, execute:" msgstr "" -":code:`min_available_clients` : un :code:`int` qui définit le nombre " -"minimum de clients qui doivent être connectés au serveur avant qu'un " -"cycle d'évaluation fédérée puisse commencer. Si moins de " -":code:`min_available_clients` sont connectés au serveur, le serveur " -"attendra que d'autres clients soient connectés avant de continuer à " -"échantillonner des clients pour l'évaluation." -#: ../../source/explanation-federated-evaluation.rst:108 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:148 msgid "" -":code:`on_evaluate_config_fn`: a function that returns a configuration " -"dictionary which will be sent to the selected clients. The function will " -"be called during each round and provides a convenient way to customize " -"client-side evaluation from the server side, for example, to configure " -"the number of validation steps performed." +"That's it! With these steps, you've set up Flower on two separate " +"machines and are ready to start using it." msgstr "" -":code:`on_evaluate_config_fn` : une fonction qui renvoie un dictionnaire " -"de configuration qui sera envoyé aux clients sélectionnés. Cette fonction" -" sera appelée à chaque tour et offre un moyen pratique de personnaliser " -"l'évaluation côté client depuis le côté serveur, par exemple pour " -"configurer le nombre d'étapes de validation effectuées." - -#: ../../source/explanation-federated-evaluation.rst:135 -msgid "Evaluating Local Model Updates During Training" -msgstr "Évaluer les mises à jour du modèle local pendant la formation" -#: ../../source/explanation-federated-evaluation.rst:137 -msgid "" -"Model parameters can also be evaluated during training. " -":code:`Client.fit` can return arbitrary evaluation results as a " -"dictionary:" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:152 +msgid "Step 6: Clean Up" msgstr "" -"Les paramètres du modèle peuvent également être évalués pendant la " -"formation. :code:`Client.fit` peut renvoyer des résultats d'évaluation " -"arbitraires sous forme de dictionnaire :" -#: ../../source/explanation-federated-evaluation.rst:177 -msgid "Full Code Example" -msgstr "Exemple de code complet" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:154 +#, fuzzy +msgid "Shut down the Flower client components:" +msgstr "Client de Flower" -#: ../../source/explanation-federated-evaluation.rst:179 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:161 +msgid "Shut down the Flower server components and delete the SuperLink state:" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst:2 #, fuzzy +msgid "Quickstart with Docker" +msgstr "Démarrage rapide XGBoost" + +#: ../../source/docker/tutorial-quickstart-docker.rst:4 msgid "" -"For a full code example that uses both centralized and federated " -"evaluation, see the *Advanced TensorFlow Example* (the same approach can " -"be applied to workloads implemented in any other framework): " -"https://github.com/adap/flower/tree/main/examples/advanced-tensorflow" +"This quickstart aims to guide you through the process of containerizing a" +" Flower project and running it end to end using Docker on your local " +"machine." msgstr "" -"Pour un exemple de code complet qui utilise à la fois l'évaluation " -"centralisée et fédérée, voir l'*Exemple TensorFlow avancé* (la même " -"approche peut être appliquée aux charges de travail mises en œuvre dans " -"n'importe quel autre framework) : " -"https://github.com/adap/flower/tree/main/examples/advanced-tensorflow" -#: ../../source/fed/0000-20200102-fed-template.md:10 -msgid "FED Template" -msgstr "Modèle FED" +#: ../../source/docker/tutorial-quickstart-docker.rst:7 +msgid "" +"This tutorial does not use production-ready settings, so you can focus on" +" understanding the basic workflow that uses the minimum configurations." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:12 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:12 -msgid "Table of Contents" -msgstr "Table des matières" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:32 +#: ../../source/docker/tutorial-quickstart-docker.rst:21 +msgid "Create a new Flower project (PyTorch):" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:14 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:14 -msgid "[Table of Contents](#table-of-contents)" -msgstr "[Table des matières](#table-of-contents)" +#: ../../source/docker/tutorial-quickstart-docker.rst:39 +msgid "Create a new Docker bridge network called ``flwr-network``:" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:15 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:15 -msgid "[Summary](#summary)" -msgstr "[Résumé](#résumé)" +#: ../../source/docker/tutorial-quickstart-docker.rst:45 +msgid "" +"User-defined networks, such as ``flwr-network``, enable IP resolution of " +"container names, a feature absent in the default bridge network. This " +"simplifies quickstart example by avoiding the need to determine host IP " +"first." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:16 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:16 -msgid "[Motivation](#motivation)" -msgstr "[Motivation](#motivation)" +#: ../../source/docker/tutorial-quickstart-docker.rst:50 +#, fuzzy +msgid "Step 2: Start the SuperLink" +msgstr "Démarrer le serveur" -#: ../../source/fed/0000-20200102-fed-template.md:17 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:17 -msgid "[Goals](#goals)" -msgstr "[Buts](#buts)" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:62 +#: ../../source/docker/tutorial-quickstart-docker.rst:52 +#, fuzzy +msgid "Open your terminal and run:" +msgstr "Ouvre un autre terminal et démarre le deuxième client :" -#: ../../source/fed/0000-20200102-fed-template.md:18 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:18 -msgid "[Non-Goals](#non-goals)" -msgstr "[Non-objectifs](#non-objectifs)" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "Understand the command" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:19 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:19 -msgid "[Proposal](#proposal)" -msgstr "[Proposition](#proposition)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``-p 9091:9091 -p 9092:9092``: Map port ``9091`` and ``9092`` of the " +"container to the same port of" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:20 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:23 -msgid "[Drawbacks](#drawbacks)" -msgstr "[Inconvénients](#inconvénients)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "the host machine, allowing other services to access the Driver API on" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:21 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:24 -msgid "[Alternatives Considered](#alternatives-considered)" -msgstr "[Alternatives envisagées](#alternatives-considered)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``http://localhost:9091`` and the Fleet API on ``http://localhost:9092``." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:22 -msgid "[Appendix](#appendix)" -msgstr "[Annexe](#appendix)" +#: ../../source/docker/tutorial-quickstart-docker.rst:71 +#: ../../source/docker/tutorial-quickstart-docker.rst:108 +#: ../../source/docker/tutorial-quickstart-docker.rst:219 +#: ../../source/docker/tutorial-quickstart-docker.rst:309 +msgid "" +"``--network flwr-network``: Make the container join the network named " +"``flwr-network``." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:24 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:28 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:76 -msgid "Summary" -msgstr "Résumé" +#: ../../source/docker/tutorial-quickstart-docker.rst:72 +msgid "``--name superlink``: Assign the name ``superlink`` to the container." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:26 -#, fuzzy -msgid "\\[TODO - sentence 1: summary of the problem\\]" -msgstr "[TODO - phrase 1 : résumé du problème]" +#: ../../source/docker/tutorial-quickstart-docker.rst:73 +#: ../../source/docker/tutorial-quickstart-docker.rst:110 +#: ../../source/docker/tutorial-quickstart-docker.rst:220 +#: ../../source/docker/tutorial-quickstart-docker.rst:311 +msgid "" +"``--detach``: Run the container in the background, freeing up the " +"terminal." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:28 -#, fuzzy -msgid "\\[TODO - sentence 2: summary of the solution\\]" -msgstr "[TODO - phrase 2 : résumé de la solution]" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"tag of the image. The tag :substitution-code:`|stable_flwr_version|` " +"represents a :doc:`specific version ` of the image." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:30 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:47 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:77 -msgid "Motivation" -msgstr "Motivation" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--insecure``: This flag tells the container to operate in an insecure " +"mode, allowing" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:32 -#: ../../source/fed/0000-20200102-fed-template.md:36 -#: ../../source/fed/0000-20200102-fed-template.md:40 -#: ../../source/fed/0000-20200102-fed-template.md:44 -#: ../../source/fed/0000-20200102-fed-template.md:48 -#: ../../source/fed/0000-20200102-fed-template.md:54 -#: ../../source/fed/0000-20200102-fed-template.md:58 -#, fuzzy -msgid "\\[TODO\\]" -msgstr "[TODO]" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "unencrypted communication." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:34 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:53 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:78 -msgid "Goals" -msgstr "Objectifs" +#: ../../source/docker/tutorial-quickstart-docker.rst:80 +#, fuzzy +msgid "Step 3: Start the SuperNode" +msgstr "Démarrer le serveur" -#: ../../source/fed/0000-20200102-fed-template.md:38 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:59 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:79 -msgid "Non-Goals" -msgstr "Non-objectifs" +#: ../../source/docker/tutorial-quickstart-docker.rst:82 +msgid "Start two SuperNode containers." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:42 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:65 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:80 -msgid "Proposal" -msgstr "Proposition" +#: ../../source/docker/tutorial-quickstart-docker.rst:84 +msgid "Start the first container:" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:46 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:85 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:129 -msgid "Drawbacks" -msgstr "Inconvénients" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``-p 9094:9094``: Map port ``9094`` of the container to the same port of" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:50 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:86 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:135 -msgid "Alternatives Considered" -msgstr "Alternatives envisagées" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "the host machine, allowing other services to access the SuperNode API on" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:52 -#, fuzzy -msgid "\\[Alternative 1\\]" -msgstr "[Alternative 1]" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``http://localhost:9094``." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:56 -#, fuzzy -msgid "\\[Alternative 2\\]" -msgstr "[Alternative 2]" +#: ../../source/docker/tutorial-quickstart-docker.rst:109 +msgid "``--name supernode-1``: Assign the name ``supernode-1`` to the container." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:10 -msgid "Flower Enhancement Doc" -msgstr "Doc sur l'amélioration des fleurs" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``flwr/supernode:|stable_flwr_version|``: This is the name of the image " +"to be run and the specific tag" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:20 -msgid "[Enhancement Doc Template](#enhancement-doc-template)" -msgstr "[Modèle de document d'amélioration](#enhancement-doc-template)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "of the image." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:21 -msgid "[Metadata](#metadata)" -msgstr "[Métadonnées](#métadonnées)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--superlink superlink:9092``: Connect to the SuperLink's Fleet API at " +"the address" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:22 -msgid "[Workflow](#workflow)" -msgstr "[Workflow](#workflow)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``superlink:9092``." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:25 -msgid "[GitHub Issues](#github-issues)" -msgstr "[GitHub Issues](#github-issues)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--node-config \"partition-id=0 num-partitions=2\"``: Set the partition " +"ID to ``0`` and the" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:26 -msgid "[Google Docs](#google-docs)" -msgstr "[Google Docs](#google-docs)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "number of partitions to ``2`` for the SuperNode configuration." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:30 -msgid "A Flower Enhancement is a standardized development process to" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--supernode-address 0.0.0.0:9094``: Set the address and port number " +"that the SuperNode" msgstr "" -"Une amélioration de la fleur est un processus de développement " -"standardisé pour" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:32 -msgid "provide a common structure for proposing larger changes" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "is listening on." msgstr "" -"fournir une structure commune pour proposer des changements plus " -"importants" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:33 -msgid "ensure that the motivation for a change is clear" -msgstr "s'assurer que la motivation du changement est claire" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--isolation process``: Tells the SuperNode that the ClientApp is " +"created by separate" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:34 -msgid "persist project information in a version control system" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "independent process. The SuperNode does not attempt to create it." msgstr "" -"conserver les informations sur le projet dans un système de contrôle des " -"versions" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:35 -msgid "document the motivation for impactful user-facing changes" +#: ../../source/docker/tutorial-quickstart-docker.rst:124 +#, fuzzy +msgid "Start the second container:" +msgstr "Démarrer le serveur" + +#: ../../source/docker/tutorial-quickstart-docker.rst:142 +msgid "Step 4: Start the ClientApp" msgstr "" -"documenter la motivation des changements qui ont un impact sur " -"l'utilisateur" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:36 -msgid "reserve GitHub issues for tracking work in flight" -msgstr "réserve les problèmes GitHub pour le suivi du travail en vol" +#: ../../source/docker/tutorial-quickstart-docker.rst:144 +msgid "" +"The ClientApp Docker image comes with a pre-installed version of Flower " +"and serves as a base for building your own ClientApp image. In order to " +"install the FAB dependencies, you will need to create a Dockerfile that " +"extends the ClientApp image and installs the required dependencies." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:37 +#: ../../source/docker/tutorial-quickstart-docker.rst:149 msgid "" -"ensure community participants can successfully drive changes to " -"completion across one or more releases while stakeholders are adequately " -"represented throughout the process" +"Create a ClientApp Dockerfile called ``Dockerfile.clientapp`` and paste " +"the following code into it:" msgstr "" -"s'assurer que les participants de la communauté peuvent mener à bien les " -"changements dans le cadre d'une ou plusieurs versions et que les parties " -"prenantes sont représentées de manière adéquate tout au long du processus" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:39 -msgid "Hence, an Enhancement Doc combines aspects of" -msgstr "Par conséquent, un document d'amélioration combine des aspects de" +#: ../../source/docker/tutorial-quickstart-docker.rst:152 +#, fuzzy +msgid "Dockerfile.clientapp" +msgstr "Flower ClientApp." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:41 -msgid "a feature, and effort-tracking document" -msgstr "une caractéristique, et un document de suivi des efforts" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "Understand the Dockerfile" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:42 -msgid "a product requirements document" -msgstr "un document sur les exigences du produit" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +":substitution-code:`FROM flwr/clientapp:|stable_flwr_version|`: This line" +" specifies that the Docker image" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:43 -msgid "a design document" -msgstr "un document de conception" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:45 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"into one file, which is created incrementally in collaboration with the " -"community." +"to be built from is the ``flwr/clientapp image``, version :substitution-" +"code:`|stable_flwr_version|`." msgstr "" -"en un seul fichier, qui est créé progressivement en collaboration avec la" -" communauté." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:49 +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``WORKDIR /app``: Set the working directory for the container to ``/app``." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"For far-fetching changes or features proposed to Flower, an abstraction " -"beyond a single GitHub issue or pull request is required to understand " -"and communicate upcoming changes to the project." +"Any subsequent commands that reference a directory will be relative to " +"this directory." msgstr "" -"Pour les changements lointains ou les fonctionnalités proposées à Flower," -" une abstraction au-delà d'une simple question GitHub ou d'une demande de" -" tirage est nécessaire pour comprendre et communiquer les changements à " -"venir dans le projet." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:51 +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``COPY pyproject.toml .``: Copy the ``pyproject.toml`` file" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"The purpose of this process is to reduce the amount of \"tribal " -"knowledge\" in our community. By moving decisions from Slack threads, " -"video calls, and hallway conversations into a well-tracked artifact, this" -" process aims to enhance communication and discoverability." +"from the current working directory into the container's ``/app`` " +"directory." msgstr "" -"L'objectif de ce processus est de réduire la quantité de \"connaissances " -"tribales\" dans notre communauté. En déplaçant les décisions des fils de " -"discussion Slack, des appels vidéo et des conversations de couloir vers " -"un artefact bien suivi, ce processus vise à améliorer la communication et" -" la découvrabilité." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:55 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Roughly any larger, user-facing enhancement should follow the Enhancement" -" process. If an enhancement would be described in either written or " -"verbal communication to anyone besides the author or developer, then " -"consider creating an Enhancement Doc." +"``RUN sed -i 's/.*flwr\\[simulation\\].*//' pyproject.toml``: Remove the " +"``flwr`` dependency" msgstr "" -"Si une amélioration doit être décrite par écrit ou verbalement à " -"quelqu'un d'autre que l'auteur ou le développeur, il faut envisager de " -"créer un document d'amélioration." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:57 +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "from the ``pyproject.toml``." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Similarly, any technical effort (refactoring, major architectural change)" -" that will impact a large section of the development community should " -"also be communicated widely. The Enhancement process is suited for this " -"even if it will have zero impact on the typical user or operator." +"``python -m pip install -U --no-cache-dir .``: Run the ``pip`` install " +"command to" msgstr "" -"De même, tout effort technique (refactorisation, changement architectural" -" majeur) qui aura un impact sur une grande partie de la communauté de " -"développement doit également être communiqué à grande échelle. Le " -"processus d'amélioration est adapté à cela, même s'il n'aura aucun impact" -" sur l'utilisateur ou l'opérateur type." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:61 +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "install the dependencies defined in the ``pyproject.toml`` file" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"For small changes and additions, going through the Enhancement process " -"would be time-consuming and unnecessary. This includes, for example, " -"adding new Federated Learning algorithms, as these only add features " -"without changing how Flower works or is used." +"The ``-U`` flag indicates that any existing packages should be upgraded, " +"and" msgstr "" -"Pour les petits changements et ajouts, passer par le processus " -"d'amélioration prendrait beaucoup de temps et serait inutile. Cela " -"inclut, par exemple, l'ajout de nouveaux algorithmes d'apprentissage " -"fédéré, car ceux-ci ne font qu'ajouter des fonctionnalités sans changer " -"le fonctionnement ou l'utilisation de Flower." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:63 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Enhancements are different from feature requests, as they are already " -"providing a laid-out path for implementation and are championed by " -"members of the community." +"``--no-cache-dir`` prevents pip from using the cache to speed up the " +"installation." msgstr "" -"Les améliorations sont différentes des demandes de fonctionnalités, car " -"elles fournissent déjà un chemin tracé pour la mise en œuvre et sont " -"défendues par les membres de la communauté." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:67 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"An Enhancement is captured in a Markdown file that follows a defined " -"template and a workflow to review and store enhancement docs for " -"reference — the Enhancement Doc." +"``ENTRYPOINT [\"flwr-clientapp\"]``: Set the command ``flwr-clientapp`` " +"to be" msgstr "" -"Une amélioration est capturée dans un fichier Markdown qui suit un modèle" -" défini et un flux de travail pour examiner et stocker les documents " -"d'amélioration pour référence - le Doc d'amélioration." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:69 -msgid "Enhancement Doc Template" -msgstr "Modèle de document d'amélioration" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "the default command run when the container is started." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:71 +#: ../../source/docker/tutorial-quickstart-docker.rst:186 msgid "" -"Each enhancement doc is provided as a Markdown file having the following " -"structure" +"Note that `flwr `__ is already installed " +"in the ``flwr/clientapp`` base image, so only other package dependencies " +"such as ``flwr-datasets``, ``torch``, etc., need to be installed. As a " +"result, the ``flwr`` dependency is removed from the ``pyproject.toml`` " +"after it has been copied into the Docker image (see line 5)." msgstr "" -"Chaque document d'amélioration est fourni sous la forme d'un fichier " -"Markdown ayant la structure suivante" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:73 -msgid "Metadata (as [described below](#metadata) in form of a YAML preamble)" +#: ../../source/docker/tutorial-quickstart-docker.rst:192 +msgid "" +"Next, build the ClientApp Docker image by running the following command " +"in the directory where the Dockerfile is located:" msgstr "" -"Métadonnées (comme [décrit ci-dessous](#metadata) sous la forme d'un " -"préambule YAML)" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:74 -msgid "Title (same as in metadata)" -msgstr "Titre (le même que dans les métadonnées)" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:75 -msgid "Table of Contents (if needed)" -msgstr "Table des matières (si nécessaire)" +#: ../../source/docker/tutorial-quickstart-docker.rst:201 +msgid "" +"The image name was set as ``flwr_clientapp`` with the tag ``0.0.1``. " +"Remember that these values are merely examples, and you can customize " +"them according to your requirements." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:81 -msgid "Notes/Constraints/Caveats (optional)" -msgstr "Notes/Contraintes/Cavats (facultatif)" +#: ../../source/docker/tutorial-quickstart-docker.rst:205 +#, fuzzy +msgid "Start the first ClientApp container:" +msgstr "Utilisation du moteur du client virtuel" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:82 -msgid "Design Details (optional)" -msgstr "Détails de la conception (facultatif)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``flwr_clientapp:0.0.1``: This is the name of the image to be run and the" +" specific tag" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:83 -msgid "Graduation Criteria" -msgstr "Critères d'obtention du diplôme" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--supernode supernode-1:9094``: Connect to the SuperNode's Fleet API at" +" the address" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:84 -msgid "Upgrade/Downgrade Strategy (if applicable)" -msgstr "Stratégie de mise à niveau/rétrogradation (le cas échéant)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``supernode-1:9094``." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:88 -msgid "As a reference, this document follows the above structure." -msgstr "À titre de référence, ce document suit la structure ci-dessus." +#: ../../source/docker/tutorial-quickstart-docker.rst:226 +msgid "Start the second ClientApp container:" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:90 -#: ../../source/ref-api/flwr.common.Metadata.rst:2 -msgid "Metadata" -msgstr "Métadonnées" +#: ../../source/docker/tutorial-quickstart-docker.rst:237 +#, fuzzy +msgid "Step 5: Start the SuperExec" +msgstr "Démarrer le serveur" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:92 +#: ../../source/docker/tutorial-quickstart-docker.rst:239 msgid "" -"**fed-number** (Required) The `fed-number` of the last Flower Enhancement" -" Doc + 1. With this number, it becomes easy to reference other proposals." +"The procedure for building and running a SuperExec image is almost " +"identical to the ClientApp image." msgstr "" -"**numérofed** (Obligatoire) Le `numérofed` du dernier document " -"d'amélioration de la fleur + 1. Avec ce numéro, il devient facile de " -"faire référence à d'autres propositions." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:94 -msgid "**title** (Required) The title of the proposal in plain language." -msgstr "**titre** (obligatoire) Le titre de la proposition en langage clair." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:96 +#: ../../source/docker/tutorial-quickstart-docker.rst:242 msgid "" -"**status** (Required) The current status of the proposal. See " -"[workflow](#workflow) for the possible states." +"Similar to the ClientApp image, you will need to create a Dockerfile that" +" extends the SuperExec image and installs the required FAB dependencies." msgstr "" -"**status** (obligatoire) L'état actuel de la proposition. Voir " -"[workflow](#workflow) pour les états possibles." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:98 +#: ../../source/docker/tutorial-quickstart-docker.rst:245 msgid "" -"**authors** (Required) A list of authors of the proposal. This is simply " -"the GitHub ID." +"Create a SuperExec Dockerfile called ``Dockerfile.superexec`` and paste " +"the following code in:" msgstr "" -"**authors** (Obligatoire) Une liste des auteurs de la proposition, il " -"s'agit simplement de l'identifiant GitHub." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:100 -msgid "" -"**creation-date** (Required) The date that the proposal was first " -"submitted in a PR." +#: ../../source/docker/tutorial-quickstart-docker.rst:248 +msgid "Dockerfile.superexec" msgstr "" -"**creation-date** (Obligatoire) Date à laquelle la proposition a été " -"soumise pour la première fois dans un RP." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:102 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"**last-updated** (Optional) The date that the proposal was last changed " -"significantly." +":substitution-code:`FROM flwr/superexec:|stable_flwr_version|`: This line" +" specifies that the Docker image" msgstr "" -"**dernière mise à jour** (Facultatif) La date à laquelle la proposition a" -" été modifiée de manière significative pour la dernière fois." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:104 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"**see-also** (Optional) A list of other proposals that are relevant to " -"this one." +"to be built from is the ``flwr/superexec image``, version :substitution-" +"code:`|stable_flwr_version|`." msgstr "" -"**see-also** (Facultatif) Une liste d'autres propositions qui sont " -"pertinentes par rapport à celle-ci." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:106 -msgid "**replaces** (Optional) A list of proposals that this one replaces." -msgstr "**replaces** (Facultatif) Une liste de propositions que celle-ci remplace." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``ENTRYPOINT [\"flower-superexec\"``: Set the command ``flower-" +"superexec`` to be" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:108 -msgid "**superseded-by** (Optional) A list of proposals that this one supersedes." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``\"--executor\", \"flwr.superexec.deployment:executor\"]`` Use the" msgstr "" -"**superseded-by** (Facultatif) Une liste de propositions que celle-ci " -"remplace." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:111 -msgid "Workflow" -msgstr "Flux de travail" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``flwr.superexec.deployment:executor`` executor to run the ServerApps." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:113 +#: ../../source/docker/tutorial-quickstart-docker.rst:283 msgid "" -"The idea forming the enhancement should already have been discussed or " -"pitched in the community. As such, it needs a champion, usually the " -"author, who shepherds the enhancement. This person also has to find " -"committers to Flower willing to review the proposal." +"Afterward, in the directory that holds the Dockerfile, execute this " +"Docker command to build the SuperExec image:" msgstr "" -"L'idée à l'origine de l'amélioration doit déjà avoir fait l'objet d'une " -"discussion ou d'une présentation au sein de la communauté. À ce titre, " -"elle a besoin d'un champion, généralement l'auteur, qui se charge de " -"l'amélioration. Cette personne doit également trouver des committers to " -"Flower prêts à examiner la proposition." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:115 -msgid "" -"New enhancements are checked in with a file name in the form of `NNNN-" -"YYYYMMDD-enhancement-title.md`, with `NNNN` being the Flower Enhancement " -"Doc number, to `enhancements`. All enhancements start in `provisional` " -"state as part of a pull request. Discussions are done as part of the pull" -" request review." +#: ../../source/docker/tutorial-quickstart-docker.rst:290 +#, fuzzy +msgid "Start the SuperExec container:" +msgstr "Démarrer le serveur" + +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``-p 9093:9093``: Map port ``9093`` of the container to the same port of" msgstr "" -"Les nouvelles améliorations sont enregistrées avec un nom de fichier de " -"la forme `NNN-YYYMMDD-enhancement-title.md`, `NNNN` étant le numéro du " -"document d'amélioration de la fleur, dans `enhancements`. Toutes les " -"améliorations commencent à l'état `provisionnel` dans le cadre d'une " -"demande d'extraction. Les discussions sont effectuées dans le cadre de " -"l'examen de la demande d'extraction." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:117 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Once an enhancement has been reviewed and approved, its status is changed" -" to `implementable`. The actual implementation is then done in separate " -"pull requests. These pull requests should mention the respective " -"enhancement as part of their description. After the implementation is " -"done, the proposal status is changed to `implemented`." +"the host machine, allowing you to access the SuperExec API on " +"``http://localhost:9093``." msgstr "" -"Une fois qu'une amélioration a été examinée et approuvée, son statut " -"passe à `implémentable`. L'implémentation réelle est alors réalisée dans " -"des demandes d'extension séparées. Ces demandes d'extension doivent " -"mentionner l'amélioration concernée dans leur description. Une fois " -"l'implémentation réalisée, le statut de la proposition passe à " -"`implémented`." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:119 -msgid "" -"Under certain conditions, other states are possible. An Enhancement has " -"the following states:" +#: ../../source/docker/tutorial-quickstart-docker.rst:310 +msgid "``--name superexec``: Assign the name ``superexec`` to the container." msgstr "" -"Sous certaines conditions, d'autres états sont possibles. Une " -"amélioration a les états suivants :" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:121 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"`provisional`: The enhancement has been proposed and is actively being " -"defined. This is the starting state while the proposal is being fleshed " -"out and actively defined and discussed." +"``flwr_superexec:0.0.1``: This is the name of the image to be run and the" +" specific tag" msgstr "" -"`provisoire` : L'amélioration a été proposée et est en cours de " -"définition. C'est l'état de départ pendant que la proposition est étoffée" -" et activement définie et discutée." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:122 -msgid "`implementable`: The enhancement has been reviewed and approved." -msgstr "`implementable` : L'amélioration a été examinée et approuvée." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:123 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"`implemented`: The enhancement has been implemented and is no longer " -"actively changed." +"``--executor-config superlink=\\\"superlink:9091\\\"``: Configure the " +"SuperExec executor to" msgstr "" -"`implemented` : L'amélioration a été mise en œuvre et n'est plus " -"activement modifiée." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:124 -msgid "`deferred`: The enhancement is proposed but not actively being worked on." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "connect to the SuperLink running on port ``9091``." msgstr "" -"`deferred` : L'amélioration est proposée mais n'est pas activement " -"travaillée." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:125 -msgid "" -"`rejected`: The authors and reviewers have decided that this enhancement " -"is not moving forward." +#: ../../source/docker/tutorial-quickstart-docker.rst:320 +msgid "Step 6: Run the Quickstart Project" msgstr "" -"`rejeté` : Les auteurs et les réviseurs ont décidé que cette amélioration" -" n'allait pas de l'avant." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:126 -msgid "`withdrawn`: The authors have withdrawn the enhancement." -msgstr "`withdrawn` : Les auteurs ont retiré l'amélioration." +#: ../../source/docker/tutorial-quickstart-docker.rst:322 +#, fuzzy +msgid "Add the following lines to the ``pyproject.toml``:" +msgstr "Augmente la version mineure de ``pyproject.toml`` d'une unité." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:127 -msgid "`replaced`: The enhancement has been replaced by a new enhancement." -msgstr "`replaced` : L'amélioration a été remplacée par une nouvelle amélioration." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:131 -msgid "" -"Adding an additional process to the ones already provided by GitHub " -"(Issues and Pull Requests) adds more complexity and can be a barrier for " -"potential first-time contributors." +#: ../../source/docker/tutorial-quickstart-docker.rst:331 +msgid "Run the ``quickstart-docker`` project by executing the command:" msgstr "" -"L'ajout d'un processus supplémentaire à ceux déjà fournis par GitHub " -"(Issues et Pull Requests) ajoute plus de complexité et peut constituer un" -" obstacle pour les éventuels nouveaux contributeurs." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:133 -msgid "" -"Expanding the proposal template beyond the single-sentence description " -"currently required in the features issue template may be a heavy burden " -"for non-native English speakers." +#: ../../source/docker/tutorial-quickstart-docker.rst:337 +msgid "Follow the SuperExec logs to track the execution of the run:" msgstr "" -"Élargir le modèle de proposition au-delà de la description d'une seule " -"phrase actuellement requise dans le modèle de questions sur les " -"caractéristiques peut constituer une lourde charge pour les personnes " -"dont l'anglais n'est pas la langue maternelle." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:137 -msgid "GitHub Issues" -msgstr "Questions sur GitHub" +#: ../../source/docker/tutorial-quickstart-docker.rst:344 +#, fuzzy +msgid "Step 7: Update the Application" +msgstr "Étape 3 : Sérialisation personnalisée" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:139 +#: ../../source/docker/tutorial-quickstart-docker.rst:346 msgid "" -"Using GitHub Issues for these kinds of enhancements is doable. One could " -"use, for example, tags, to differentiate and filter them from other " -"issues. The main issue is in discussing and reviewing an enhancement: " -"GitHub issues only have a single thread for comments. Enhancements " -"usually have multiple threads of discussion at the same time for various " -"parts of the doc. Managing these multiple discussions can be confusing " -"when using GitHub Issues." +"Change the application code. For example, change the ``seed`` in " +"``quickstart_docker/task.py`` to ``43`` and save it:" msgstr "" -"Il est possible d'utiliser GitHub Issues pour ce type d'améliorations. On" -" pourrait utiliser, par exemple, des balises pour les différencier et les" -" filtrer par rapport aux autres problèmes. Le principal problème concerne" -" la discussion et la révision d'une amélioration : les GitHub Issues " -"n'ont qu'un seul fil de discussion pour les commentaires. Les " -"améliorations ont généralement plusieurs fils de discussion en même temps" -" pour différentes parties de la documentation. La gestion de ces " -"multiples discussions peut être déroutante lorsque l'on utilise GitHub " -"Issues." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:141 -msgid "Google Docs" -msgstr "Google Docs" +#: ../../source/docker/tutorial-quickstart-docker.rst:349 +#, fuzzy +msgid "quickstart_docker/task.py" +msgstr "Démarrage rapide des Pandas" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:143 -msgid "" -"Google Docs allow for multiple threads of discussions. But as Google Docs" -" are hosted outside the project, their discoverability by the community " -"needs to be taken care of. A list of links to all proposals has to be " -"managed and made available for the community. Compared to shipping " -"proposals as part of Flower's repository, the potential for missing links" -" is much higher." +#: ../../source/docker/tutorial-quickstart-docker.rst:356 +msgid "Stop the current ClientApp containers:" msgstr "" -"Les Google Docs permettent de multiplier les fils de discussion. Mais " -"comme les Google Docs sont hébergés en dehors du projet, il faut veiller " -"à ce que la communauté puisse les découvrir. Une liste de liens vers " -"toutes les propositions doit être gérée et mise à la disposition de la " -"communauté. Par rapport à l'envoi de propositions dans le cadre du " -"référentiel de Flower, le risque de liens manquants est beaucoup plus " -"élevé." - -#: ../../source/fed/index.md:1 -msgid "FED - Flower Enhancement Doc" -msgstr "FED - Doc pour l'amélioration des fleurs" -#: ../../source/how-to-aggregate-evaluation-results.rst:2 +#: ../../source/docker/tutorial-quickstart-docker.rst:362 #, fuzzy -msgid "Aggregate evaluation results" -msgstr "Résultats globaux de l'évaluation." +msgid "Rebuild the FAB and ClientApp image:" +msgstr "Chargement des données" -#: ../../source/how-to-aggregate-evaluation-results.rst:4 -msgid "" -"The Flower server does not prescribe a way to aggregate evaluation " -"results, but it enables the user to fully customize result aggregation." +#: ../../source/docker/tutorial-quickstart-docker.rst:368 +msgid "Launch two new ClientApp containers based on the newly built image:" msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:8 -msgid "Aggregate Custom Evaluation Results" -msgstr "Agréger les résultats de l'évaluation personnalisée" +#: ../../source/docker/tutorial-quickstart-docker.rst:383 +msgid "Run the updated project:" +msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:10 -msgid "" -"The same :code:`Strategy`-customization approach can be used to aggregate" -" custom evaluation results coming from individual clients. Clients can " -"return custom metrics to the server by returning a dictionary:" +#: ../../source/docker/tutorial-quickstart-docker.rst:390 +msgid "Step 8: Clean Up" msgstr "" -"La même approche de personnalisation :code:`Stratégie` peut être utilisée" -" pour agréger les résultats d'évaluation personnalisés provenant de " -"clients individuels. Les clients peuvent renvoyer des mesures " -"personnalisées au serveur en renvoyant un dictionnaire :" -#: ../../source/how-to-aggregate-evaluation-results.rst:36 -msgid "" -"The server can then use a customized strategy to aggregate the metrics " -"provided in these dictionaries:" +#: ../../source/docker/tutorial-quickstart-docker.rst:392 +msgid "Remove the containers and the bridge network:" msgstr "" -"Le serveur peut alors utiliser une stratégie personnalisée pour agréger " -"les mesures fournies dans ces dictionnaires :" -#: ../../source/how-to-authenticate-supernodes.rst:2 -msgid "Authenticate SuperNodes" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:408 +#: ../../source/docker/tutorial-quickstart-docker.rst:404 +#, fuzzy +msgid "Where to Go Next" +msgstr "Par où commencer" + +#: ../../source/docker/tutorial-quickstart-docker.rst:406 +msgid ":doc:`enable-tls`" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:4 -msgid "" -"Flower has built-in support for authenticated SuperNodes that you can use" -" to verify the identities of each SuperNode connecting to a SuperLink. " -"Flower node authentication works similar to how GitHub SSH authentication" -" works:" +#: ../../source/docker/tutorial-quickstart-docker.rst:407 +msgid ":doc:`persist-superlink-state`" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:7 -msgid "SuperLink (server) stores a list of known (client) node public keys" +#: ../../source/docker/tutorial-quickstart-docker.rst:408 +msgid ":doc:`tutorial-quickstart-docker-compose`" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:8 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:2 +#, fuzzy +msgid "Quickstart with Docker Compose" +msgstr "Démarrage rapide XGBoost" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:4 msgid "" -"Using ECDH, both SuperNode and SuperLink independently derive a shared " -"secret" +"This quickstart shows you how to set up Flower using Docker Compose in a " +"single command, allowing you to focus on developing your application " +"without worrying about the underlying infrastructure." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:9 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:8 msgid "" -"Shared secret is used to compute the HMAC value of the message sent from " -"SuperNode to SuperLink as a token" +"You will also learn how to easily enable TLS encryption and persist " +"application state locally, giving you the freedom to choose the " +"configuration that best suits your project's needs." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:10 -msgid "SuperLink verifies the token" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:24 +msgid "Clone the Docker Compose ``complete`` directory:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:12 -#, fuzzy +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:38 msgid "" -"We recommend you to check out the complete `code example " -"`_ demonstrating federated learning with Flower in an " -"authenticated setting." +"Export the path of the newly created project. The path should be relative" +" to the location of the Docker Compose files:" msgstr "" -"Réfère-toi à l'exemple de code complet " -"`_ " -"pour en savoir plus." -#: ../../source/how-to-authenticate-supernodes.rst:15 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:45 msgid "" -"This guide covers a preview feature that might change in future versions " -"of Flower." +"Setting the ``PROJECT_DIR`` helps Docker Compose locate the " +"``pyproject.toml`` file, allowing it to install dependencies in the " +"SuperExec and SuperNode images correctly." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:18 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:49 +#, fuzzy +msgid "Step 2: Run Flower in Insecure Mode" +msgstr "Serveur de Flower" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:51 msgid "" -"For increased security, node authentication can only be used when " -"encrypted connections (SSL/TLS) are enabled." +"To begin, start Flower with the most basic configuration. In this setup, " +"Flower will run without TLS and without persisting the state." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:21 -msgid "Enable node authentication in :code:`SuperLink`" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:56 +msgid "" +"Without TLS, the data sent between the services remains **unencrypted**. " +"Use it only for development purposes." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:23 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:59 msgid "" -"To enable node authentication, first you need to configure SSL/TLS " -"connections to secure the SuperLink<>SuperNode communication. You can " -"find the complete guide `here `_. After configuring secure connections, you" -" can enable client authentication in a long-running Flower " -":code:`SuperLink`. Use the following terminal command to start a Flower " -":code:`SuperNode` that has both secure connections and node " -"authentication enabled:" +"For production-oriented use cases, :ref:`enable TLS` for secure data" +" transmission." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:36 -msgid "Let's break down the authentication flags:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:70 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:184 +msgid "``docker compose``: The Docker command to run the Docker Compose tool." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:38 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:71 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:185 msgid "" -"The first flag :code:`--auth-list-public-keys` expects a path to a CSV " -"file storing all known node public keys. You need to store all known node" -" public keys that are allowed to participate in a federation in one CSV " -"file (:code:`.csv`)." +"``-f compose.yml``: Specify the YAML file that contains the basic Flower " +"service definitions." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:40 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:72 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:190 msgid "" -"A valid CSV file storing known node public keys should list the keys in " -"OpenSSH format, separated by commas and without any comments. For an " -"example, refer to our code sample, which contains a CSV file with two " -"known node public keys." +"``--build``: Rebuild the images for each service if they don't already " +"exist." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:42 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:73 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:191 msgid "" -"The second and third flags :code:`--auth-superlink-private-key` and :code" -":`--auth-superlink-public-key` expect paths to the server's private and " -"public keys. For development purposes, you can generate a private and " -"public key pair using :code:`ssh-keygen -t ecdsa -b 384`." +"``-d``: Detach the containers from the terminal and run them in the " +"background." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:45 -msgid "" -"In Flower 1.9, there is no support for dynamically removing, editing, or " -"adding known node public keys to the SuperLink. To change the set of " -"known nodes, you need to shut the server down, edit the CSV file, and " -"start the server again. Support for dynamically changing the set of known" -" nodes is on the roadmap to be released in Flower 1.10 (ETA: June)." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:76 +msgid "Step 3: Run the Quickstart Project" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:51 -msgid "Enable node authentication in :code:`SuperNode`" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:78 +msgid "" +"Now that the Flower services have been started via Docker Compose, it is " +"time to run the quickstart example." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:53 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:81 msgid "" -"Similar to the long-running Flower server (:code:`SuperLink`), you can " -"easily enable node authentication in the long-running Flower client " -"(:code:`SuperNode`). Use the following terminal command to start an " -"authenticated :code:`SuperNode`:" +"To ensure the ``flwr`` CLI connects to the SuperExec, you need to specify" +" the SuperExec addresses in the ``pyproject.toml`` file." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:64 -msgid "" -"The :code:`--auth-supernode-private-key` flag expects a path to the " -"node's private key file and the :code:`--auth-supernode-public-key` flag " -"expects a path to the node's public key file. For development purposes, " -"you can generate a private and public key pair using :code:`ssh-keygen -t" -" ecdsa -b 384`." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:84 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:232 +msgid "Add the following lines to the ``quickstart-compose/pyproject.toml``:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:68 -msgid "Security notice" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:86 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:234 +msgid "quickstart-compose/pyproject.toml" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:70 -msgid "" -"The system's security relies on the credentials of the SuperLink and each" -" SuperNode. Therefore, it is imperative to safeguard and safely store the" -" credentials to avoid security risks such as Public Key Infrastructure " -"(PKI) impersonation attacks. The node authentication mechanism also " -"involves human interaction, so please ensure that all of the " -"communication is done in a secure manner, using trusted communication " -"methods." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:93 +msgid "Execute the command to run the quickstart example:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:75 -#: ../../source/how-to-enable-ssl-connections.rst:65 -#: ../../source/how-to-use-built-in-mods.rst:85 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:287 -msgid "Conclusion" -msgstr "Conclusion" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:99 +msgid "Monitor the SuperExec logs and wait for the summary to appear:" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:106 +#, fuzzy +msgid "Step 4: Update the Application" +msgstr "Étape 3 : Sérialisation personnalisée" -#: ../../source/how-to-authenticate-supernodes.rst:77 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:108 +msgid "In the next step, change the application code." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:110 msgid "" -"You should now have learned how to start a long-running Flower server " -"(:code:`SuperLink`) and client (:code:`SuperNode`) with node " -"authentication enabled. You should also know the significance of the " -"private key and store it safely to minimize security risks." +"For example, go to the ``task.py`` file in the ``quickstart-" +"compose/quickstart_compose/`` directory and add a ``print`` call in the " +"``get_weights`` function:" msgstr "" -#: ../../source/how-to-configure-clients.rst:2 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:114 +msgid "quickstart-compose/quickstart_compose/task.py" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:125 #, fuzzy -msgid "Configure clients" -msgstr "Configurer les clients" +msgid "Rebuild and restart the services." +msgstr "Nous pouvons déjà démarrer le *serveur* :" -#: ../../source/how-to-configure-clients.rst:4 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:129 msgid "" -"Along with model parameters, Flower can send configuration values to " -"clients. Configuration values can be used for various purposes. They are," -" for example, a popular way to control client-side hyperparameters from " -"the server." +"If you have modified the dependencies listed in your ``pyproject.toml`` " +"file, it is essential to rebuild images." msgstr "" -"En plus des paramètres du modèle, Flower peut envoyer des valeurs de " -"configuration aux clients. Les valeurs de configuration peuvent être " -"utilisées à diverses fins. Elles constituent, par exemple, un moyen " -"populaire de contrôler les hyperparamètres côté client à partir du " -"serveur." -#: ../../source/how-to-configure-clients.rst:7 -msgid "Configuration values" -msgstr "Valeurs de configuration" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:132 +msgid "If you haven't made any changes, you can skip this step." +msgstr "" -#: ../../source/how-to-configure-clients.rst:9 -msgid "" -"Configuration values are represented as a dictionary with ``str`` keys " -"and values of type ``bool``, ``bytes``, ``double`` (64-bit precision " -"float), ``int``, or ``str`` (or equivalent types in different languages)." -" Here is an example of a configuration dictionary in Python:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:134 +msgid "Run the following command to rebuild and restart the services:" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:140 +msgid "Run the updated quickstart example:" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:147 +msgid "In the SuperExec logs, you should find the ``Get weights`` line:" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:164 +msgid "Step 5: Persisting the SuperLink State" msgstr "" -"Les valeurs de configuration sont représentées sous forme de dictionnaire" -" avec des clés `str`` et des valeurs de type `bool`, `bytes`, `double` " -"(float de précision 64 bits), `int`, ou `str` (ou des types équivalents " -"dans d'autres langages). Voici un exemple de dictionnaire de " -"configuration en Python :" -#: ../../source/how-to-configure-clients.rst:20 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:166 msgid "" -"Flower serializes these configuration dictionaries (or *config dict* for " -"short) to their ProtoBuf representation, transports them to the client " -"using gRPC, and then deserializes them back to Python dictionaries." +"In this step, Flower services are configured to persist the state of the " +"SuperLink service, ensuring that it maintains its state even after a " +"restart." msgstr "" -"Flower sérialise ces dictionnaires de configuration (ou *config dict* en " -"abrégé) dans leur représentation ProtoBuf, les transporte vers le client " -"à l'aide de gRPC, puis les désérialise à nouveau en dictionnaires Python." -#: ../../source/how-to-configure-clients.rst:24 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:171 msgid "" -"Currently, there is no support for directly sending collection types " -"(e.g., ``Set``, ``List``, ``Map``) as values in configuration " -"dictionaries. There are several workarounds to send collections as values" -" by converting them to one of the supported value types (and converting " -"them back on the client-side)." +"When working with Docker Compose on Linux, you may need to create the " +"``state`` directory first and change its ownership to ensure proper " +"access and permissions." msgstr "" -"Actuellement, il n'est pas possible d'envoyer directement des types de " -"collections (par exemple, ``Set``, ``List``, ``Map``) en tant que valeurs" -" dans les dictionnaires de configuration. Il existe plusieurs solutions " -"pour envoyer des collections en tant que valeurs en les convertissant en " -"l'un des types de valeurs pris en charge (et en les reconvertissant du " -"côté client)." -#: ../../source/how-to-configure-clients.rst:26 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:174 msgid "" -"One can, for example, convert a list of floating-point numbers to a JSON " -"string, then send the JSON string using the configuration dictionary, and" -" then convert the JSON string back to a list of floating-point numbers on" -" the client." +"For more information, consult the following page: :doc:`persist-" +"superlink-state`." msgstr "" -"On peut, par exemple, convertir une liste de nombres à virgule flottante " -"en une chaîne JSON, puis envoyer la chaîne JSON à l'aide du dictionnaire " -"de configuration, et enfin reconvertir la chaîne JSON en une liste de " -"nombres à virgule flottante sur le client." -#: ../../source/how-to-configure-clients.rst:30 -msgid "Configuration through built-in strategies" -msgstr "Configuration par le biais de stratégies intégrées" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:176 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:226 +msgid "Run the command:" +msgstr "" -#: ../../source/how-to-configure-clients.rst:32 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst msgid "" -"The easiest way to send configuration values to clients is to use a " -"built-in strategy like :code:`FedAvg`. Built-in strategies support so-" -"called configuration functions. A configuration function is a function " -"that the built-in strategy calls to get the configuration dictionary for " -"the current round. It then forwards the configuration dictionary to all " -"the clients selected during that round." +"``-f with-state.yml``: Specifies the path to an additional Docker Compose" +" file that" msgstr "" -"La façon la plus simple d'envoyer des valeurs de configuration aux " -"clients est d'utiliser une stratégie intégrée comme :code:`FedAvg`. Les " -"stratégies intégrées prennent en charge ce que l'on appelle les fonctions" -" de configuration. Une fonction de configuration est une fonction que la " -"stratégie intégrée appelle pour obtenir le dictionnaire de configuration " -"pour le tour en cours. Elle transmet ensuite le dictionnaire de " -"configuration à tous les clients sélectionnés au cours de ce tour." -#: ../../source/how-to-configure-clients.rst:34 -msgid "" -"Let's start with a simple example. Imagine we want to send (a) the batch " -"size that the client should use, (b) the current global round of " -"federated learning, and (c) the number of epochs to train on the client-" -"side. Our configuration function could look like this:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst +msgid "contains the configuration for persisting the SuperLink state." msgstr "" -"Commençons par un exemple simple. Imaginons que nous voulions envoyer (a)" -" la taille du lot que le client doit utiliser, (b) le cycle global actuel" -" de l'apprentissage fédéré et (c) le nombre d'époques à former du côté " -"client. Notre fonction de configuration pourrait ressembler à ceci :" -#: ../../source/how-to-configure-clients.rst:47 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst msgid "" -"To make the built-in strategies use this function, we can pass it to " -"``FedAvg`` during initialization using the parameter " -":code:`on_fit_config_fn`:" +"Docker merges Compose files according to `merging rules " +"`_." msgstr "" -"Pour que les stratégies intégrées utilisent cette fonction, nous pouvons " -"la passer à ``FedAvg`` lors de l'initialisation en utilisant le paramètre" -" :code:`on_fit_config_fn` :" -#: ../../source/how-to-configure-clients.rst:56 -msgid "One the client side, we receive the configuration dictionary in ``fit``:" -msgstr "Côté client, nous recevons le dictionnaire de configuration dans ``fit`` :" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:193 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:247 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:375 +msgid "Rerun the ``quickstart-compose`` project:" +msgstr "" -#: ../../source/how-to-configure-clients.rst:67 -msgid "" -"There is also an `on_evaluate_config_fn` to configure evaluation, which " -"works the same way. They are separate functions because one might want to" -" send different configuration values to `evaluate` (for example, to use a" -" different batch size)." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:199 +msgid "Check the content of the ``state`` directory:" msgstr "" -"Il existe également une fonction `on_evaluate_config_fn` pour configurer " -"l'évaluation, qui fonctionne de la même manière. Ce sont des fonctions " -"séparées car on peut vouloir envoyer différentes valeurs de configuration" -" à `evaluate` (par exemple, pour utiliser une taille de lot différente)." -#: ../../source/how-to-configure-clients.rst:69 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:206 msgid "" -"The built-in strategies call this function every round (that is, every " -"time `Strategy.configure_fit` or `Strategy.configure_evaluate` runs). " -"Calling `on_evaluate_config_fn` every round allows us to vary/change the " -"config dict over consecutive rounds. If we wanted to implement a " -"hyperparameter schedule, for example, to increase the number of local " -"epochs during later rounds, we could do the following:" +"You should see a ``state.db`` file in the ``state`` directory. If you " +"restart the service, the state file will be used to restore the state " +"from the previously saved data. This ensures that the data persists even " +"if the containers are stopped and started again." msgstr "" -"Les stratégies intégrées appellent cette fonction à chaque tour " -"(c'est-à-dire à chaque fois que `Strategy.configure_fit` ou " -"`Strategy.configure_evaluate` s'exécute). Appeler `on_evaluate_config_fn`" -" à chaque tour nous permet de varier/changer le dict de config au cours " -"de tours consécutifs. Si nous voulions mettre en place un calendrier " -"d'hyperparamètres, par exemple, pour augmenter le nombre d'époques " -"locales au cours des derniers tours, nous pourrions faire ce qui suit :" - -#: ../../source/how-to-configure-clients.rst:82 -msgid "The :code:`FedAvg` strategy will call this function *every round*." -msgstr "La stratégie :code:`FedAvg` appellera cette fonction *à chaque tour*." -#: ../../source/how-to-configure-clients.rst:85 -msgid "Configuring individual clients" -msgstr "Configuration des clients individuels" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:214 +msgid "Step 6: Run Flower with TLS" +msgstr "" -#: ../../source/how-to-configure-clients.rst:87 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:216 msgid "" -"In some cases, it is necessary to send different configuration values to " -"different clients." +"To demonstrate how to enable TLS, generate self-signed certificates using" +" the ``certs.yml`` Compose file." msgstr "" -"Dans certains cas, il est nécessaire d'envoyer des valeurs de " -"configuration différentes à des clients différents." -#: ../../source/how-to-configure-clients.rst:89 -#, fuzzy +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:223 msgid "" -"This can be achieved by customizing an existing strategy or by " -":doc:`implementing a custom strategy from scratch `. Here's a nonsensical example that customizes :code:`FedAvg`" -" by adding a custom ``\"hello\": \"world\"`` configuration key/value pair" -" to the config dict of a *single client* (only the first client in the " -"list, the other clients in this round to not receive this \"special\" " -"config value):" +"For production environments, use a service like `Let's Encrypt " +"`_ to obtain your certificates." msgstr "" -"Ceci peut être réalisé en personnalisant une stratégie existante ou en " -"`mettant en œuvre une stratégie personnalisée à partir de zéro " -"`_. " -"Voici un exemple absurde qui personnalise :code:`FedAvg` en ajoutant une " -"paire clé/valeur de configuration personnalisée ``\"hello\" : \"world\"``" -" au config dict d'un *seul client* (uniquement le premier client de la " -"liste, les autres clients de cette série ne recevant pas cette valeur de " -"configuration \"spéciale\") :" -#: ../../source/how-to-configure-logging.rst:2 -#, fuzzy -msgid "Configure logging" -msgstr "Configurer les clients" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:241 +msgid "Restart the services with TLS enabled:" +msgstr "" -#: ../../source/how-to-configure-logging.rst:4 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:255 +msgid "Step 7: Add another SuperNode" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:257 msgid "" -"The Flower logger keeps track of all core events that take place in " -"federated learning workloads. It presents information by default " -"following a standard message format:" +"You can add more SuperNodes and ClientApps by duplicating their " +"definitions in the ``compose.yml`` file." msgstr "" -#: ../../source/how-to-configure-logging.rst:13 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:260 msgid "" -"containing relevant information including: log message level (e.g. " -":code:`INFO`, :code:`DEBUG`), a timestamp, the line where the logging " -"took place from, as well as the log message itself. In this way, the " -"logger would typically display information on your terminal as follows:" +"Just give each new SuperNode and ClientApp service a unique service name " +"like ``supernode-3``, ``clientapp-3``, etc." msgstr "" -#: ../../source/how-to-configure-logging.rst:34 -msgid "Saving log to file" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:263 +msgid "In ``compose.yml``, add the following:" msgstr "" -#: ../../source/how-to-configure-logging.rst:36 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:265 +msgid "compose.yml" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:316 msgid "" -"By default, the Flower log is outputted to the terminal where you launch " -"your Federated Learning workload from. This applies for both gRPC-based " -"federation (i.e. when you do :code:`fl.server.start_server`) and when " -"using the :code:`VirtualClientEngine` (i.e. when you do " -":code:`fl.simulation.start_simulation`). In some situations you might " -"want to save this log to disk. You can do so by calling the " -"`fl.common.logger.configure() " -"`_" -" function. For example:" +"If you also want to enable TLS for the new SuperNodes, duplicate the " +"SuperNode definition for each new SuperNode service in the ``with-" +"tls.yml`` file." msgstr "" -#: ../../source/how-to-configure-logging.rst:53 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:319 msgid "" -"With the above, Flower will record the log you see on your terminal to " -":code:`log.txt`. This file will be created in the same directory as were " -"you are running the code from. If we inspect we see the log above is also" -" recorded but prefixing with :code:`identifier` each line:" +"Make sure that the names of the services match with the one in the " +"``compose.yml`` file." msgstr "" -"Avec ce qui précède, Flower enregistrera le log que vous voyez sur votre " -"terminal dans :code:`log.txt`. Ce fichier sera créé dans le répertoire " -"depuis lequel le code est exécuté. Si nous inspectons nous voyons que le " -"log ci-dessous est également enregistré mais préfixé avec " -":code:`identifier` sur chaque ligne :" -#: ../../source/how-to-configure-logging.rst:74 -msgid "Log your own messages" -msgstr "Loggez vos propres messages" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:321 +msgid "In ``with-tls.yml``, add the following:" +msgstr "" -#: ../../source/how-to-configure-logging.rst:76 -msgid "" -"You might expand the information shown by default with the Flower logger " -"by adding more messages relevant to your application. You can achieve " -"this easily as follows." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:323 +msgid "with-tls.yml" msgstr "" -#: ../../source/how-to-configure-logging.rst:102 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:345 +msgid "Step 8: Persisting the SuperLink State and Enabling TLS" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:347 msgid "" -"In this way your logger will show, in addition to the default messages, " -"the ones introduced by the clients as specified above." +"To run Flower with persisted SuperLink state and enabled TLS, a slight " +"change in the ``with-state.yml`` file is required:" msgstr "" -#: ../../source/how-to-configure-logging.rst:128 -msgid "Log to a remote service" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:350 +msgid "Comment out the lines 2-4 and uncomment the lines 5-9:" msgstr "" -#: ../../source/how-to-configure-logging.rst:130 -msgid "" -"The :code:`fl.common.logger.configure` function, also allows specifying a" -" host to which logs can be pushed (via :code:`POST`) through a native " -"Python :code:`logging.handler.HTTPHandler`. This is a particularly useful" -" feature in :code:`gRPC`-based Federated Learning workloads where " -"otherwise gathering logs from all entities (i.e. the server and the " -"clients) might be cumbersome. Note that in Flower simulation, the server " -"automatically displays all logs. You can still specify a " -":code:`HTTPHandler` should you wish to backup or analyze the logs " -"somewhere else." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:352 +msgid "with-state.yml" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:2 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:369 #, fuzzy -msgid "Enable SSL connections" -msgstr "Collecte centralisée des données" +msgid "Restart the services:" +msgstr "Démarrer le serveur" -#: ../../source/how-to-enable-ssl-connections.rst:4 -#, fuzzy -msgid "" -"This guide describes how to a SSL-enabled secure Flower server " -"(:code:`SuperLink`) can be started and how a Flower client " -"(:code:`SuperNode`) can establish a secure connections to it." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:383 +msgid "Step 9: Merge Multiple Compose Files" msgstr "" -"Ce guide décrit comment démarrer un serveur Flower sécurisé par SSL et " -"comment un client Flower peut établir une connexion sécurisée avec lui." -#: ../../source/how-to-enable-ssl-connections.rst:7 -#, fuzzy +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:385 msgid "" -"A complete code example demonstrating a secure connection can be found " -"`here `_." +"You can merge multiple Compose files into a single file. For instance, if" +" you wish to combine the basic configuration with the TLS configuration, " +"execute the following command:" msgstr "" -"Un exemple de code complet démontrant une connexion sécurisée peut être " -"trouvé ici `_." -#: ../../source/how-to-enable-ssl-connections.rst:10 -#, fuzzy +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:394 msgid "" -"The code example comes with a :code:`README.md` file which explains how " -"to start it. Although it is already SSL-enabled, it might be less " -"descriptive on how it does so. Stick to this guide for a deeper " -"introduction to the topic." +"This will merge the contents of ``compose.yml`` and ``with-tls.yml`` into" +" a new file called ``my_compose.yml``." msgstr "" -"L'exemple de code est accompagné d'un fichier README.md qui t'expliquera " -"comment le démarrer. Bien qu'il soit déjà activé par SSL, il peut être " -"moins descriptif sur la façon de procéder. Tiens-toi en à ce guide pour " -"une introduction plus approfondie sur le sujet." -#: ../../source/how-to-enable-ssl-connections.rst:16 -msgid "Certificates" -msgstr "Certificats" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:398 +msgid "Step 10: Clean Up" +msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:18 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:400 +msgid "Remove all services and volumes:" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:410 #, fuzzy +msgid ":doc:`run-quickstart-examples-docker-compose`" +msgstr "Démarrage rapide XGBoost" + +#: ../../source/docker/use-a-different-version.rst:2 +msgid "Use a Different Flower Version" +msgstr "" + +#: ../../source/docker/use-a-different-version.rst:4 msgid "" -"Using SSL-enabled connections requires certificates to be passed to the " -"server and client. For the purpose of this guide we are going to generate" -" self-signed certificates. As this can become quite complex we are going " -"to ask you to run the script in :code:`examples/advanced-" -"tensorflow/certificates/generate.sh` with the following command sequence:" +"If you want to use a different version of Flower, for example Flower " +"nightly, you can do so by changing the tag. All available versions are on" +" `Docker Hub `__." msgstr "" -"L'utilisation de connexions compatibles avec le protocole SSL nécessite " -"que des certificats soient transmis au serveur et au client. Pour les " -"besoins de ce guide, nous allons générer des certificats auto-signés. " -"Comme cela peut devenir assez complexe, nous allons te demander " -"d'exécuter le script dans :code:`examples/advanced-" -"tensorflow/certificates/generate.sh`" -#: ../../source/how-to-enable-ssl-connections.rst:29 -#, fuzzy +#: ../../source/docker/use-a-different-version.rst:10 msgid "" -"This will generate the certificates in :code:`examples/advanced-" -"tensorflow/.cache/certificates`." +"When using Flower nightly, the SuperLink nightly image must be paired " +"with the corresponding SuperNode and ServerApp nightly images released on" +" the same day. To ensure the versions are in sync, using the concrete " +"tag, e.g., ``1.10.0.dev20240610`` instead of ``nightly`` is recommended." msgstr "" -"Cela générera les certificats dans :code:`examples/advanced-" -"tensorflow/.cache/certificates`." -#: ../../source/how-to-enable-ssl-connections.rst:31 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:2 +msgid "Example: FedBN in PyTorch - From Centralized To Federated" +msgstr "Exemple : FedBN dans PyTorch - De la centralisation à la fédération" + +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:4 #, fuzzy msgid "" -"The approach for generating SSL certificates in the context of this " -"example can serve as an inspiration and starting point, but it should not" -" be used as a reference for production environments. Please refer to " -"other sources regarding the issue of correctly generating certificates " -"for production environments. For non-critical prototyping or research " -"projects, it might be sufficient to use the self-signed certificates " -"generated using the scripts mentioned in this guide." +"This tutorial will show you how to use Flower to build a federated " +"version of an existing machine learning workload with `FedBN " +"`_, a federated training strategy " +"designed for non-iid data. We are using PyTorch to train a Convolutional " +"Neural Network(with Batch Normalization layers) on the CIFAR-10 dataset. " +"When applying FedBN, only few changes needed compared to :doc:`Example: " +"PyTorch - From Centralized To Federated `." msgstr "" -"L'approche de la génération des certificats SSL dans cet exemple peut " -"servir d'inspiration et de point de départ, mais ne doit pas être " -"considérée comme complète pour les environnements de production." +"Ce tutoriel te montrera comment utiliser Flower pour construire une " +"version fédérée d'une charge de travail d'apprentissage automatique " +"existante avec `FedBN `_, une stratégie" +" de formation fédérée conçue pour les données non-identifiées. Nous " +"utilisons PyTorch pour former un réseau neuronal convolutif (avec des " +"couches de normalisation par lots) sur l'ensemble de données CIFAR-10. " +"Lors de l'application de FedBN, seules quelques modifications sont " +"nécessaires par rapport à `Exemple : PyTorch - De la centralisation à la " +"fédération `_." -#: ../../source/how-to-enable-ssl-connections.rst:39 -#, fuzzy -msgid "Server (SuperLink)" -msgstr "flower-superlink" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:12 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:12 +msgid "Centralized Training" +msgstr "Formation centralisée" -#: ../../source/how-to-enable-ssl-connections.rst:41 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:14 #, fuzzy msgid "" -"Use the following terminal command to start a sever (SuperLink) that uses" -" the previously generated certificates:" +"All files are revised based on :doc:`Example: PyTorch - From Centralized " +"To Federated `. The only " +"thing to do is modifying the file called ``cifar.py``, revised part is " +"shown below:" msgstr "" -"Nous allons maintenant montrer comment écrire un client qui utilise les " -"scripts générés précédemment :" +"Tous les fichiers sont révisés sur la base de `Exemple : PyTorch - From " +"Centralized To Federated `_. La seule chose à faire est de modifier " +"le fichier appelé :code:`cifar.py`, la partie révisée est montrée ci-" +"dessous :" -#: ../../source/how-to-enable-ssl-connections.rst:47 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:18 msgid "" -"When providing certificates, the server expects a tuple of three " -"certificates paths: CA certificate, server certificate and server private" -" key." +"The model architecture defined in class Net() is added with Batch " +"Normalization layers accordingly." msgstr "" +"L'architecture du modèle définie dans la classe Net() est ajoutée avec " +"les couches de normalisation par lots en conséquence." -#: ../../source/how-to-enable-ssl-connections.rst:51 -#, fuzzy -msgid "Client (SuperNode)" -msgstr "Codes d'état du client." +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:47 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:171 +msgid "You can now run your machine learning workload:" +msgstr "" +"Tu peux maintenant exécuter ta charge de travail d'apprentissage " +"automatique :" -#: ../../source/how-to-enable-ssl-connections.rst:53 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:53 #, fuzzy msgid "" -"Use the following terminal command to start a client (SuperNode) that " -"uses the previously generated certificates:" +"So far this should all look fairly familiar if you've used PyTorch " +"before. Let's take the next step and use what we've built to create a " +"federated learning system within FedBN, the system consists of one server" +" and two clients." msgstr "" -"Nous allons maintenant montrer comment écrire un client qui utilise les " -"scripts générés précédemment :" +"Jusqu'à présent, tout ceci devrait te sembler assez familier si tu as " +"déjà utilisé PyTorch. Passons à l'étape suivante et utilisons ce que nous" +" avons construit pour créer un système d'apprentissage fédéré au sein de " +"FedBN, le système se compose d'un serveur et de deux clients." + +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:58 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:182 +msgid "Federated Training" +msgstr "Formation fédérée" -#: ../../source/how-to-enable-ssl-connections.rst:61 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:60 #, fuzzy msgid "" -"When setting :code:`root_certificates`, the client expects a file path to" -" PEM-encoded root certificates." +"If you have read :doc:`Example: PyTorch - From Centralized To Federated " +"`, the following parts are" +" easy to follow, only ``get_parameters`` and ``set_parameters`` function " +"in ``client.py`` needed to revise. If not, please read the :doc:`Example:" +" PyTorch - From Centralized To Federated `. first." msgstr "" -"En définissant :code:`root_certificates`, le client s'attend à recevoir " -"les certificats racine codés en PEM sous forme de chaîne d'octets. Nous " -"utilisons à nouveau :code:`Path` pour simplifier la lecture de ces " -"certificats sous forme de chaînes d'octets." +"Si vous avez lu `Exemple : PyTorch - From Centralized To Federated " +"`_, les parties suivantes sont faciles à suivre, seules " +"les fonctions :code:`get_parameters` et :code:`set_parameters` dans " +":code:`client.py` ont besoin d'être révisées. Si ce n'est pas le cas, " +"veuillez lire `Exemple : PyTorch - From Centralized To Federated " +"`. d'abord." -#: ../../source/how-to-enable-ssl-connections.rst:67 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:66 #, fuzzy msgid "" -"You should now have learned how to generate self-signed certificates " -"using the given script, start an SSL-enabled server and have a client " -"establish a secure connection to it." +"Our example consists of one *server* and two *clients*. In FedBN, " +"``server.py`` keeps unchanged, we can start the server directly." msgstr "" -"Tu devrais maintenant avoir appris à générer des certificats auto-signés " -"à l'aide du script donné, à démarrer un serveur compatible SSL et à " -"demander à un client d'établir une connexion sécurisée avec lui." +"Notre exemple consiste en un *serveur* et deux *clients*. Dans FedBN, " +":code:`server.py` reste inchangé, nous pouvons démarrer le serveur " +"directement." -#: ../../source/how-to-enable-ssl-connections.rst:72 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:73 #, fuzzy -msgid "Additional resources" -msgstr "Ressources supplémentaires" - -#: ../../source/how-to-enable-ssl-connections.rst:74 msgid "" -"These additional sources might be relevant if you would like to dive " -"deeper into the topic of certificates:" +"Finally, we will revise our *client* logic by changing ``get_parameters``" +" and ``set_parameters`` in ``client.py``, we will exclude batch " +"normalization parameters from model parameter list when sending to or " +"receiving from the server." msgstr "" -"Ces sources supplémentaires peuvent être pertinentes si tu souhaites " -"approfondir le sujet des certificats :" +"Enfin, nous allons réviser notre logique *client* en modifiant " +":code:`get_parameters` et :code:`set_parameters` dans :code:`client.py`, " +"nous allons exclure les paramètres de normalisation des lots de la liste " +"des paramètres du modèle lors de l'envoi ou de la réception depuis le " +"serveur." -#: ../../source/how-to-enable-ssl-connections.rst:76 -msgid "`Let's Encrypt `_" -msgstr "`Let's Encrypt `_" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:102 +msgid "Now, you can now open two additional terminal windows and run" +msgstr "Tu peux maintenant ouvrir deux autres fenêtres de terminal et lancer" -#: ../../source/how-to-enable-ssl-connections.rst:77 -msgid "`certbot `_" -msgstr "`certbot `_" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:108 +msgid "" +"in each window (make sure that the server is still running before you do " +"so) and see your (previously centralized) PyTorch project run federated " +"learning with FedBN strategy across two clients. Congratulations!" +msgstr "" +"dans chaque fenêtre (assure-toi que le serveur est toujours en cours " +"d'exécution avant de le faire) et tu verras ton projet PyTorch " +"(auparavant centralisé) exécuter l'apprentissage fédéré avec la stratégie" +" FedBN sur deux clients. Félicitations !" -#: ../../source/how-to-implement-strategies.rst:2 -#, fuzzy -msgid "Implement strategies" -msgstr "Mettre en place des stratégies" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:113 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:349 +#: ../../source/tutorial-quickstart-jax.rst:319 +msgid "Next Steps" +msgstr "Prochaines étapes" -#: ../../source/how-to-implement-strategies.rst:4 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:115 +#, fuzzy msgid "" -"The strategy abstraction enables implementation of fully custom " -"strategies. A strategy is basically the federated learning algorithm that" -" runs on the server. Strategies decide how to sample clients, how to " -"configure clients for training, how to aggregate updates, and how to " -"evaluate models. Flower provides a few built-in strategies which are " -"based on the same API described below." +"The full source code for this example can be found `here " +"`_. Our example is of course somewhat over-" +"simplified because both clients load the exact same dataset, which isn't " +"realistic. You're now prepared to explore this topic further. How about " +"using different subsets of CIFAR-10 on each client? How about adding more" +" clients?" msgstr "" -"L'abstraction de la stratégie permet de mettre en œuvre des stratégies " -"entièrement personnalisées. Une stratégie est essentiellement " -"l'algorithme d'apprentissage fédéré qui s'exécute sur le serveur. Les " -"stratégies décident comment échantillonner les clients, comment " -"configurer les clients pour la formation, comment agréger les mises à " -"jour et comment évaluer les modèles. Flower fournit quelques stratégies " -"intégrées qui sont basées sur la même API que celle décrite ci-dessous." +"Le code source complet de cet exemple se trouve ici " +"`_. Notre exemple est bien sûr un peu trop " +"simplifié parce que les deux clients chargent exactement le même ensemble" +" de données, ce qui n'est pas réaliste. Tu es maintenant prêt à " +"approfondir ce sujet. Pourquoi ne pas utiliser différents sous-ensembles " +"de CIFAR-10 sur chaque client ? Pourquoi ne pas ajouter d'autres clients " +"?" -#: ../../source/how-to-implement-strategies.rst:11 -msgid "The :code:`Strategy` abstraction" -msgstr "L'abstraction :code:`Stratégie`" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:2 +msgid "Example: PyTorch - From Centralized To Federated" +msgstr "Exemple : PyTorch - De la centralisation à la fédération" -#: ../../source/how-to-implement-strategies.rst:13 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:4 msgid "" -"All strategy implementation are derived from the abstract base class " -":code:`flwr.server.strategy.Strategy`, both built-in implementations and " -"third party implementations. This means that custom strategy " -"implementations have the exact same capabilities at their disposal as " -"built-in ones." +"This tutorial will show you how to use Flower to build a federated " +"version of an existing machine learning workload. We are using PyTorch to" +" train a Convolutional Neural Network on the CIFAR-10 dataset. First, we " +"introduce this machine learning task with a centralized training approach" +" based on the `Deep Learning with PyTorch " +"`_ " +"tutorial. Then, we build upon the centralized training code to run the " +"training in a federated fashion." msgstr "" -"Toutes les implémentations de stratégies sont dérivées de la classe de " -"base abstraite :code:`flwr.server.strategy.Strategy`, qu'il s'agisse " -"d'implémentations intégrées ou d'implémentations tierces. Cela signifie " -"que les implémentations de stratégies personnalisées ont exactement les " -"mêmes capacités à leur disposition que les implémentations intégrées." +"Ce tutoriel te montrera comment utiliser Flower pour construire une " +"version fédérée d'une charge de travail d'apprentissage automatique " +"existante. Nous utilisons PyTorch pour entraîner un réseau neuronal " +"convolutif sur l'ensemble de données CIFAR-10. Tout d'abord, nous " +"présentons cette tâche d'apprentissage automatique avec une approche " +"d'entraînement centralisée basée sur le tutoriel `Deep Learning with " +"PyTorch " +"`_. " +"Ensuite, nous nous appuyons sur le code d'entraînement centralisé pour " +"exécuter l'entraînement de manière fédérée." -#: ../../source/how-to-implement-strategies.rst:18 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:14 msgid "" -"The strategy abstraction defines a few abstract methods that need to be " -"implemented:" +"We begin with a brief description of the centralized CNN training code. " +"If you want a more in-depth explanation of what's going on then have a " +"look at the official `PyTorch tutorial " +"`_." msgstr "" -"L'abstraction de la stratégie définit quelques méthodes abstraites qui " -"doivent être mises en œuvre :" +"Nous commençons par une brève description du code d'entraînement CNN " +"centralisé. Si tu veux une explication plus approfondie de ce qui se " +"passe, jette un coup d'œil au tutoriel officiel `PyTorch " +"`_." -#: ../../source/how-to-implement-strategies.rst:75 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:18 +#, fuzzy msgid "" -"Creating a new strategy means implementing a new :code:`class` (derived " -"from the abstract base class :code:`Strategy`) that implements for the " -"previously shown abstract methods:" +"Let's create a new file called ``cifar.py`` with all the components " +"required for a traditional (centralized) training on CIFAR-10. First, all" +" required packages (such as ``torch`` and ``torchvision``) need to be " +"imported. You can see that we do not import any package for federated " +"learning. You can keep all these imports as they are even when we add the" +" federated learning components at a later point." msgstr "" -"La création d'une nouvelle stratégie implique la mise en œuvre d'une " -"nouvelle :code:`classe` (dérivée de la classe de base abstraite " -":code:`Stratégie`) qui met en œuvre les méthodes abstraites présentées " -"précédemment :" - -#: ../../source/how-to-implement-strategies.rst:100 -msgid "The Flower server calls these methods in the following order:" -msgstr "Le serveur Flower appelle ces méthodes dans l'ordre suivant :" - -#: ../../source/how-to-implement-strategies.rst:177 -msgid "The following sections describe each of those methods in more detail." -msgstr "Les sections suivantes décrivent chacune de ces méthodes plus en détail." - -#: ../../source/how-to-implement-strategies.rst:180 -msgid "The :code:`initialize_parameters` method" -msgstr "La méthode :code:`initialize_parameters` (initialisation des paramètres)" +"Créons un nouveau fichier appelé :code:`cifar.py` avec tous les " +"composants requis pour une formation traditionnelle (centralisée) sur le " +"CIFAR-10. Tout d'abord, tous les paquets requis (tels que :code:`torch` " +"et :code:`torchvision`) doivent être importés. Tu peux voir que nous " +"n'importons aucun paquet pour l'apprentissage fédéré. Tu peux conserver " +"toutes ces importations telles quelles même lorsque nous ajouterons les " +"composants d'apprentissage fédéré à un moment ultérieur." -#: ../../source/how-to-implement-strategies.rst:182 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:36 +#, fuzzy msgid "" -":code:`initialize_parameters` is called only once, at the very beginning " -"of an execution. It is responsible for providing the initial global model" -" parameters in a serialized form (i.e., as a :code:`Parameters` object)." +"As already mentioned we will use the CIFAR-10 dataset for this machine " +"learning workload. The model architecture (a very simple Convolutional " +"Neural Network) is defined in ``class Net()``." msgstr "" -":code:`initialize_parameters` n'est appelé qu'une seule fois, au tout " -"début d'une exécution. Il est chargé de fournir les paramètres initiaux " -"du modèle global sous une forme sérialisée (c'est-à-dire sous la forme " -"d'un objet :code:`Parameters`)." +"Comme nous l'avons déjà mentionné, nous utiliserons l'ensemble de données" +" CIFAR-10 pour cette charge de travail d'apprentissage automatique. " +"L'architecture du modèle (un réseau neuronal convolutif très simple) est " +"définie dans :code:`class Net()`." -#: ../../source/how-to-implement-strategies.rst:184 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:62 +#, fuzzy msgid "" -"Built-in strategies return user-provided initial parameters. The " -"following example shows how initial parameters can be passed to " -":code:`FedAvg`:" +"The ``load_data()`` function loads the CIFAR-10 training and test sets. " +"The ``transform`` normalized the data after loading." msgstr "" -"Les stratégies intégrées renvoient les paramètres initiaux fournis par " -"l'utilisateur. L'exemple suivant montre comment les paramètres initiaux " -"peuvent être transmis à :code:`FedAvg` :" +"La fonction :code:`load_data()` charge les ensembles d'entraînement et de" +" test CIFAR-10. La fonction :code:`transform` normalise les données après" +" leur chargement." -#: ../../source/how-to-implement-strategies.rst:209 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:84 +#, fuzzy msgid "" -"The Flower server will call :code:`initialize_parameters`, which either " -"returns the parameters that were passed to :code:`initial_parameters`, or" -" :code:`None`. If no parameters are returned from " -":code:`initialize_parameters` (i.e., :code:`None`), the server will " -"randomly select one client and ask it to provide its parameters. This is " -"a convenience feature and not recommended in practice, but it can be " -"useful for prototyping. In practice, it is recommended to always use " -"server-side parameter initialization." +"We now need to define the training (function ``train()``) which loops " +"over the training set, measures the loss, backpropagates it, and then " +"takes one optimizer step for each batch of training examples." msgstr "" -"Le serveur Flower appelle :code:`initialize_parameters`, qui renvoie les " -"paramètres passés à :code:`initial_parameters`, ou :code:`None`. Si aucun" -" paramètre n'est renvoyé par :code:`initialize_parameters` (c'est-à-dire " -":code:`None`), le serveur sélectionne au hasard un client et lui demande " -"de fournir ses paramètres. Il s'agit d'une fonction de commodité qui " -"n'est pas recommandée dans la pratique, mais qui peut être utile pour le " -"prototypage. Dans la pratique, il est recommandé de toujours utiliser " -"l'initialisation des paramètres du côté du serveur." +"Nous devons maintenant définir la formation (fonction :code:`train()`) " +"qui passe en boucle sur l'ensemble de la formation, mesure la perte, la " +"rétropropage, puis effectue une étape d'optimisation pour chaque lot " +"d'exemples de formation." -#: ../../source/how-to-implement-strategies.rst:213 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:88 +#, fuzzy msgid "" -"Server-side parameter initialization is a powerful mechanism. It can be " -"used, for example, to resume training from a previously saved checkpoint." -" It is also the fundamental capability needed to implement hybrid " -"approaches, for example, to fine-tune a pre-trained model using federated" -" learning." +"The evaluation of the model is defined in the function ``test()``. The " +"function loops over all test samples and measures the loss of the model " +"based on the test dataset." msgstr "" -"L'initialisation des paramètres côté serveur est un mécanisme puissant. " -"Elle peut être utilisée, par exemple, pour reprendre l'entraînement à " -"partir d'un point de contrôle précédemment sauvegardé. C'est également la" -" capacité fondamentale nécessaire pour mettre en œuvre des approches " -"hybrides, par exemple, pour affiner un modèle pré-entraîné à l'aide de " -"l'apprentissage fédéré." - -#: ../../source/how-to-implement-strategies.rst:216 -msgid "The :code:`configure_fit` method" -msgstr "La méthode :code:`configure_fit`" +"L'évaluation du modèle est définie dans la fonction :code:`test()`. La " +"fonction boucle sur tous les échantillons de test et mesure la perte du " +"modèle en fonction de l'ensemble des données de test." -#: ../../source/how-to-implement-strategies.rst:218 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:149 msgid "" -":code:`configure_fit` is responsible for configuring the upcoming round " -"of training. What does *configure* mean in this context? Configuring a " -"round means selecting clients and deciding what instructions to send to " -"these clients. The signature of :code:`configure_fit` makes this clear:" +"Having defined the data loading, model architecture, training, and " +"evaluation we can put everything together and train our CNN on CIFAR-10." msgstr "" -":code:`configure_fit` est chargé de configurer le prochain tour de " -"formation. Que signifie *configurer* dans ce contexte ? Configurer un " -"tour signifie sélectionner des clients et décider des instructions à leur" -" envoyer. La signature de :code:`configure_fit` l'indique clairement :" +"Après avoir défini le chargement des données, l'architecture du modèle, " +"la formation et l'évaluation, nous pouvons tout mettre ensemble et former" +" notre CNN sur CIFAR-10." -#: ../../source/how-to-implement-strategies.rst:231 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:177 msgid "" -"The return value is a list of tuples, each representing the instructions " -"that will be sent to a particular client. Strategy implementations " -"usually perform the following steps in :code:`configure_fit`:" +"So far, this should all look fairly familiar if you've used PyTorch " +"before. Let's take the next step and use what we've built to create a " +"simple federated learning system consisting of one server and two " +"clients." msgstr "" -"La valeur de retour est une liste de tuples, chacun représentant les " -"instructions qui seront envoyées à un client particulier. Les " -"implémentations de stratégies effectuent généralement les étapes " -"suivantes dans :code:`configure_fit` :" +"Jusqu'à présent, tout cela devrait te sembler assez familier si tu as " +"déjà utilisé PyTorch. Passons à l'étape suivante et utilisons ce que nous" +" avons construit pour créer un simple système d'apprentissage fédéré " +"composé d'un serveur et de deux clients." -#: ../../source/how-to-implement-strategies.rst:233 -#: ../../source/how-to-implement-strategies.rst:280 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:184 msgid "" -"Use the :code:`client_manager` to randomly sample all (or a subset of) " -"available clients (each represented as a :code:`ClientProxy` object)" +"The simple machine learning project discussed in the previous section " +"trains the model on a single dataset (CIFAR-10), we call this centralized" +" learning. This concept of centralized learning, as shown in the previous" +" section, is probably known to most of you, and many of you have used it " +"previously. Normally, if you'd want to run machine learning workloads in " +"a federated fashion, then you'd have to change most of your code and set " +"everything up from scratch. This can be a considerable effort." msgstr "" -"Utilise le :code:`client_manager` pour échantillonner au hasard tous les " -"clients disponibles (ou un sous-ensemble d'entre eux) (chacun représenté " -"par un objet :code:`ClientProxy`)" +"Le projet simple d'apprentissage automatique discuté dans la section " +"précédente entraîne le modèle sur un seul ensemble de données (CIFAR-10)," +" nous appelons cela l'apprentissage centralisé. Ce concept " +"d'apprentissage centralisé, comme le montre la section précédente, est " +"probablement connu de la plupart d'entre vous, et beaucoup d'entre vous " +"l'ont déjà utilisé. Normalement, si tu veux exécuter des charges de " +"travail d'apprentissage automatique de manière fédérée, tu dois alors " +"changer la plupart de ton code et tout mettre en place à partir de zéro, " +"ce qui peut représenter un effort considérable." -#: ../../source/how-to-implement-strategies.rst:234 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:191 msgid "" -"Pair each :code:`ClientProxy` with the same :code:`FitIns` holding the " -"current global model :code:`parameters` and :code:`config` dict" +"However, with Flower you can evolve your pre-existing code into a " +"federated learning setup without the need for a major rewrite." msgstr "" -"Associe chaque :code:`ClientProxy` au même :code:`FitIns` contenant le " -"modèle global actuel :code:`parameters` et :code:`config` dict" +"Cependant, avec Flower, tu peux faire évoluer ton code préexistant vers " +"une configuration d'apprentissage fédéré sans avoir besoin d'une " +"réécriture majeure." -#: ../../source/how-to-implement-strategies.rst:236 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:194 #, fuzzy msgid "" -"More sophisticated implementations can use :code:`configure_fit` to " -"implement custom client selection logic. A client will only participate " -"in a round if the corresponding :code:`ClientProxy` is included in the " -"list returned from :code:`configure_fit`." +"The concept is easy to understand. We have to start a *server* and then " +"use the code in ``cifar.py`` for the *clients* that are connected to the " +"*server*. The *server* sends model parameters to the clients. The " +"*clients* run the training and update the parameters. The updated " +"parameters are sent back to the *server* which averages all received " +"parameter updates. This describes one round of the federated learning " +"process and we repeat this for multiple rounds." msgstr "" -"Les implémentations plus sophistiquées peuvent utiliser " -":code:`configure_fit` pour mettre en œuvre une logique de sélection des " -"clients personnalisée. Un client ne participera à un tour que si le " -":code:`ClientProxy` correspondant est inclus dans la liste renvoyée par " -":code:`configure_fit`." +"Le concept est facile à comprendre. Nous devons démarrer un *serveur* et " +"utiliser le code dans :code:`cifar.py` pour les *clients* qui sont " +"connectés au *serveur*. Le *serveur* envoie les paramètres du modèle aux " +"clients. Les *clients* exécutent la formation et mettent à jour les " +"paramètres. Les paramètres mis à jour sont renvoyés au *serveur* qui fait" +" la moyenne de toutes les mises à jour de paramètres reçues. Ceci décrit " +"un tour du processus d'apprentissage fédéré et nous répétons cette " +"opération pour plusieurs tours." -#: ../../source/how-to-implement-strategies.rst:240 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:201 +#: ../../source/tutorial-quickstart-jax.rst:147 +#, fuzzy msgid "" -"The structure of this return value provides a lot of flexibility to the " -"user. Since instructions are defined on a per-client basis, different " -"instructions can be sent to each client. This enables custom strategies " -"to train, for example, different models on different clients, or use " -"different hyperparameters on different clients (via the :code:`config` " -"dict)." +"Our example consists of one *server* and two *clients*. Let's set up " +"``server.py`` first. The *server* needs to import the Flower package " +"``flwr``. Next, we use the ``start_server`` function to start a server " +"and tell it to perform three rounds of federated learning." msgstr "" -"La structure de cette valeur de retour offre beaucoup de souplesse à " -"l'utilisateur. Comme les instructions sont définies par client, des " -"instructions différentes peuvent être envoyées à chaque client, ce qui " -"permet d'élaborer des stratégies personnalisées pour former, par exemple," -" différents modèles sur différents clients, ou utiliser différents " -"hyperparamètres sur différents clients (via le dict :code:`config`)." +"Notre exemple consiste en un *serveur* et deux *clients*. Commençons par " +"configurer :code:`server.py`. Le *serveur* doit importer le paquet Flower" +" :code:`flwr`. Ensuite, nous utilisons la fonction :code:`start_server` " +"pour démarrer un serveur et lui demander d'effectuer trois cycles " +"d'apprentissage fédéré." -#: ../../source/how-to-implement-strategies.rst:243 -msgid "The :code:`aggregate_fit` method" -msgstr "La méthode :code:`aggregate_fit` (agrégation)" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:215 +#: ../../source/tutorial-quickstart-jax.rst:161 +msgid "We can already start the *server*:" +msgstr "Nous pouvons déjà démarrer le *serveur* :" -#: ../../source/how-to-implement-strategies.rst:245 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:221 +#, fuzzy msgid "" -":code:`aggregate_fit` is responsible for aggregating the results returned" -" by the clients that were selected and asked to train in " -":code:`configure_fit`." +"Finally, we will define our *client* logic in ``client.py`` and build " +"upon the previously defined centralized training in ``cifar.py``. Our " +"*client* needs to import ``flwr``, but also ``torch`` to update the " +"parameters on our PyTorch model:" msgstr "" -":code:`aggregate_fit` est chargé d'agréger les résultats renvoyés par les" -" clients qui ont été sélectionnés et à qui on a demandé de s'entraîner " -"dans :code:`configure_fit`." +"Enfin, nous allons définir notre logique *client* dans :code:`client.py` " +"et nous appuyer sur la formation centralisée définie précédemment dans " +":code:`cifar.py`. Notre *client* doit importer :code:`flwr`, mais aussi " +":code:`torch` pour mettre à jour les paramètres de notre modèle PyTorch :" -#: ../../source/how-to-implement-strategies.rst:258 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:238 +#, fuzzy msgid "" -"Of course, failures can happen, so there is no guarantee that the server " -"will get results from all the clients it sent instructions to (via " -":code:`configure_fit`). :code:`aggregate_fit` therefore receives a list " -"of :code:`results`, but also a list of :code:`failures`." +"Implementing a Flower *client* basically means implementing a subclass of" +" either ``flwr.client.Client`` or ``flwr.client.NumPyClient``. Our " +"implementation will be based on ``flwr.client.NumPyClient`` and we'll " +"call it ``CifarClient``. ``NumPyClient`` is slightly easier to implement " +"than ``Client`` if you use a framework with good NumPy interoperability " +"(like PyTorch or TensorFlow/Keras) because it avoids some of the " +"boilerplate that would otherwise be necessary. ``CifarClient`` needs to " +"implement four methods, two methods for getting/setting model parameters," +" one method for training the model, and one method for testing the model:" msgstr "" -"Bien sûr, des échecs peuvent se produire, il n'y a donc aucune garantie " -"que le serveur obtienne des résultats de tous les clients auxquels il a " -"envoyé des instructions (via :code:`configure_fit`). " -":code:`aggregate_fit` reçoit donc une liste de :code:`résultats`, mais " -"aussi une liste de :code:`échecs`." +"Implementing a Flower *client* basically means implementing a subclass of" +" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " +"Our implementation will be based on :code:`flwr.client.NumPyClient` and " +"we'll call it :code:`CifarClient`. :code:`NumPyClient` is slightly easier" +" to implement than :code:`Client` if you use a framework with good NumPy " +"interoperability (like PyTorch or TensorFlow/Keras) because it avoids " +"some of the boilerplate that would otherwise be necessary. " +":code:`CifarClient` needs to implement four methods, two methods for " +"getting/setting model parameters, one method for training the model, and " +"one method for testing the model:" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:249 +#, fuzzy +msgid "``set_parameters``" +msgstr ":code:`set_parameters`" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:248 +#: ../../source/tutorial-quickstart-jax.rst:192 +msgid "" +"set the model parameters on the local model that are received from the " +"server" +msgstr "règle les paramètres du modèle local reçus du serveur" -#: ../../source/how-to-implement-strategies.rst:260 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:249 +#: ../../source/tutorial-quickstart-jax.rst:194 +#, fuzzy msgid "" -":code:`aggregate_fit` returns an optional :code:`Parameters` object and a" -" dictionary of aggregated metrics. The :code:`Parameters` return value is" -" optional because :code:`aggregate_fit` might decide that the results " -"provided are not sufficient for aggregation (e.g., too many failures)." +"loop over the list of model parameters received as NumPy ``ndarray``'s " +"(think list of neural network layers)" msgstr "" -":code:`aggregate_fit` renvoie un objet :code:`Parameters` facultatif et " -"un dictionnaire de métriques agrégées. La valeur de retour " -":code:`Parameters` est facultative car :code:`aggregate_fit` peut décider" -" que les résultats fournis ne sont pas suffisants pour l'agrégation (par " -"exemple, trop d'échecs)." +"boucle sur la liste des paramètres du modèle reçus sous forme de NumPy " +":code:`ndarray`'s (pensez à la liste des couches du réseau neuronal)" -#: ../../source/how-to-implement-strategies.rst:263 -msgid "The :code:`configure_evaluate` method" -msgstr "La méthode :code:`configure_evaluate` (en anglais)" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:252 +#: ../../source/tutorial-quickstart-jax.rst:197 +#: ../../source/tutorial-quickstart-scikitlearn.rst:129 +#, fuzzy +msgid "``get_parameters``" +msgstr ":code:`get_parameters`" -#: ../../source/how-to-implement-strategies.rst:265 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:252 +#: ../../source/tutorial-quickstart-jax.rst:197 +#, fuzzy msgid "" -":code:`configure_evaluate` is responsible for configuring the upcoming " -"round of evaluation. What does *configure* mean in this context? " -"Configuring a round means selecting clients and deciding what " -"instructions to send to these clients. The signature of " -":code:`configure_evaluate` makes this clear:" +"get the model parameters and return them as a list of NumPy ``ndarray``'s" +" (which is what ``flwr.client.NumPyClient`` expects)" msgstr "" -":code:`configure_evaluate` est chargé de configurer le prochain tour " -"d'évaluation. Que signifie *configurer* dans ce contexte ? Configurer un " -"tour signifie sélectionner des clients et décider des instructions à leur" -" envoyer. La signature de :code:`configure_evaluate` l'indique clairement" -" :" +"récupère les paramètres du modèle et les renvoie sous forme de liste de " +":code:`ndarray` NumPy (ce qui correspond à ce que " +":code:`flwr.client.NumPyClient` attend)" -#: ../../source/how-to-implement-strategies.rst:278 -msgid "" -"The return value is a list of tuples, each representing the instructions " -"that will be sent to a particular client. Strategy implementations " -"usually perform the following steps in :code:`configure_evaluate`:" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:257 +#: ../../source/tutorial-quickstart-jax.rst:202 +#: ../../source/tutorial-quickstart-scikitlearn.rst:136 +msgid "``fit``" msgstr "" -"La valeur de retour est une liste de tuples, chacun représentant les " -"instructions qui seront envoyées à un client particulier. Les " -"implémentations de stratégies effectuent généralement les étapes " -"suivantes dans :code:`configure_evaluate` :" -#: ../../source/how-to-implement-strategies.rst:281 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:255 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:260 +#: ../../source/tutorial-quickstart-jax.rst:200 +#: ../../source/tutorial-quickstart-jax.rst:205 msgid "" -"Pair each :code:`ClientProxy` with the same :code:`EvaluateIns` holding " -"the current global model :code:`parameters` and :code:`config` dict" +"update the parameters of the local model with the parameters received " +"from the server" msgstr "" -"Associe chaque :code:`ClientProxy` au même :code:`EvaluateIns` contenant " -"le modèle global actuel :code:`parameters` et :code:`config` dict" +"mettre à jour les paramètres du modèle local avec les paramètres reçus du" +" serveur" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:257 +#: ../../source/tutorial-quickstart-jax.rst:202 +msgid "train the model on the local training set" +msgstr "entraîne le modèle sur l'ensemble d'apprentissage local" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:258 +msgid "get the updated local model weights and return them to the server" +msgstr "récupère les poids du modèle local mis à jour et les renvoie au serveur" -#: ../../source/how-to-implement-strategies.rst:283 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:263 +#: ../../source/tutorial-quickstart-jax.rst:208 +#: ../../source/tutorial-quickstart-scikitlearn.rst:139 #, fuzzy -msgid "" -"More sophisticated implementations can use :code:`configure_evaluate` to " -"implement custom client selection logic. A client will only participate " -"in a round if the corresponding :code:`ClientProxy` is included in the " -"list returned from :code:`configure_evaluate`." -msgstr "" -"Les implémentations plus sophistiquées peuvent utiliser " -":code:`configure_evaluate` pour mettre en œuvre une logique de sélection " -"des clients personnalisée. Un client ne participera à un tour que si le " -":code:`ClientProxy` correspondant est inclus dans la liste renvoyée par " -":code:`configure_evaluate`." +msgid "``evaluate``" +msgstr ":code:`évaluer`" -#: ../../source/how-to-implement-strategies.rst:287 -msgid "" -"The structure of this return value provides a lot of flexibility to the " -"user. Since instructions are defined on a per-client basis, different " -"instructions can be sent to each client. This enables custom strategies " -"to evaluate, for example, different models on different clients, or use " -"different hyperparameters on different clients (via the :code:`config` " -"dict)." -msgstr "" -"La structure de cette valeur de retour offre beaucoup de souplesse à " -"l'utilisateur. Comme les instructions sont définies par client, des " -"instructions différentes peuvent être envoyées à chaque client. Cela " -"permet aux stratégies personnalisées d'évaluer, par exemple, différents " -"modèles sur différents clients, ou d'utiliser différents hyperparamètres " -"sur différents clients (via le dict :code:`config`)." +#: ../../source/example-pytorch-from-centralized-to-federated.rst:262 +#: ../../source/tutorial-quickstart-jax.rst:207 +msgid "evaluate the updated model on the local test set" +msgstr "évaluer le modèle mis à jour sur l'ensemble de test local" -#: ../../source/how-to-implement-strategies.rst:291 -msgid "The :code:`aggregate_evaluate` method" -msgstr "La méthode :code:`aggregate_evaluate` (agréger_évaluer)" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:263 +msgid "return the local loss and accuracy to the server" +msgstr "renvoie la perte locale et la précision au serveur" -#: ../../source/how-to-implement-strategies.rst:293 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:265 +#, fuzzy msgid "" -":code:`aggregate_evaluate` is responsible for aggregating the results " -"returned by the clients that were selected and asked to evaluate in " -":code:`configure_evaluate`." +"The two ``NumPyClient`` methods ``fit`` and ``evaluate`` make use of the " +"functions ``train()`` and ``test()`` previously defined in ``cifar.py``. " +"So what we really do here is we tell Flower through our ``NumPyClient`` " +"subclass which of our already defined functions to call for training and " +"evaluation. We included type annotations to give you a better " +"understanding of the data types that get passed around." msgstr "" -":code:`aggregate_evaluate` est chargé d'agréger les résultats renvoyés " -"par les clients qui ont été sélectionnés et à qui l'on a demandé " -"d'évaluer dans :code:`configure_evaluate`." +"Les deux méthodes :code:`NumPyClient` :code:`fit` et :code:`evaluate` " +"utilisent les fonctions :code:`train()` et :code:`test()` définies " +"précédemment dans :code:`cifar.py`. Ce que nous faisons vraiment ici, " +"c'est que nous indiquons à Flower, par le biais de notre sous-classe " +":code:`NumPyClient`, laquelle de nos fonctions déjà définies doit être " +"appelée pour l'entraînement et l'évaluation. Nous avons inclus des " +"annotations de type pour te donner une meilleure compréhension des types " +"de données qui sont transmis." -#: ../../source/how-to-implement-strategies.rst:306 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:315 +#, fuzzy msgid "" -"Of course, failures can happen, so there is no guarantee that the server " -"will get results from all the clients it sent instructions to (via " -":code:`configure_evaluate`). :code:`aggregate_evaluate` therefore " -"receives a list of :code:`results`, but also a list of :code:`failures`." +"All that's left to do it to define a function that loads both model and " +"data, creates a ``CifarClient``, and starts this client. You load your " +"data and model by using ``cifar.py``. Start ``CifarClient`` with the " +"function ``fl.client.start_client()`` by pointing it at the same IP " +"address we used in ``server.py``:" msgstr "" -"Bien sûr, des échecs peuvent se produire, il n'y a donc aucune garantie " -"que le serveur obtienne des résultats de tous les clients auxquels il a " -"envoyé des instructions (via :code:`configure_evaluate`). " -":code:`aggregate_evaluate` reçoit donc une liste de :code:`résultats`, " -"mais aussi une liste d' :code:`échecs`." +"Il ne reste plus qu'à définir une fonction qui charge le modèle et les " +"données, crée un :code:`CifarClient` et démarre ce client. Tu charges tes" +" données et ton modèle en utilisant :code:`cifar.py`. Démarre " +":code:`CifarClient` avec la fonction :code:`fl.client.start_client()` en " +"la faisant pointer sur la même adresse IP que celle que nous avons " +"utilisée dans :code:`server.py` :" -#: ../../source/how-to-implement-strategies.rst:308 -msgid "" -":code:`aggregate_evaluate` returns an optional :code:`float` (loss) and a" -" dictionary of aggregated metrics. The :code:`float` return value is " -"optional because :code:`aggregate_evaluate` might decide that the results" -" provided are not sufficient for aggregation (e.g., too many failures)." +#: ../../source/example-pytorch-from-centralized-to-federated.rst:338 +#: ../../source/tutorial-quickstart-jax.rst:309 +msgid "And that's it. You can now open two additional terminal windows and run" msgstr "" -":code:`aggregate_evaluate` renvoie un :code:`float` facultatif (perte) et" -" un dictionnaire de mesures agrégées. La valeur de retour :code:`float` " -"est facultative car :code:`aggregate_evaluate` peut décider que les " -"résultats fournis ne sont pas suffisants pour l'agrégation (par exemple, " -"trop d'échecs)." - -#: ../../source/how-to-implement-strategies.rst:311 -msgid "The :code:`evaluate` method" -msgstr "La méthode :code:`évaluer`" +"Tu peux maintenant ouvrir deux autres fenêtres de terminal et exécuter " +"les commandes suivantes" -#: ../../source/how-to-implement-strategies.rst:313 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:344 msgid "" -":code:`evaluate` is responsible for evaluating model parameters on the " -"server-side. Having :code:`evaluate` in addition to " -":code:`configure_evaluate`/:code:`aggregate_evaluate` enables strategies " -"to perform both servers-side and client-side (federated) evaluation." +"in each window (make sure that the server is running before you do so) " +"and see your (previously centralized) PyTorch project run federated " +"learning across two clients. Congratulations!" msgstr "" -"le fait d'avoir :code:`evaluate` en plus de " -":code:`configure_evaluate`/:code:`aggregate_evaluate` permet aux " -"stratégies d'effectuer des évaluations à la fois côté serveur et côté " -"client (fédéré)." +"dans chaque fenêtre (assure-toi que le serveur fonctionne avant de le " +"faire) et tu verras ton projet PyTorch (auparavant centralisé) exécuter " +"l'apprentissage fédéré sur deux clients. Félicitations !" -#: ../../source/how-to-implement-strategies.rst:323 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:351 +#, fuzzy msgid "" -"The return value is again optional because the strategy might not need to" -" implement server-side evaluation or because the user-defined " -":code:`evaluate` method might not complete successfully (e.g., it might " -"fail to load the server-side evaluation data)." +"The full source code for this example: `PyTorch: From Centralized To " +"Federated (Code) `_. Our example is, of course, " +"somewhat over-simplified because both clients load the exact same " +"dataset, which isn't realistic. You're now prepared to explore this topic" +" further. How about using different subsets of CIFAR-10 on each client? " +"How about adding more clients?" msgstr "" -"La valeur de retour est à nouveau facultative parce que la stratégie peut" -" ne pas avoir besoin de mettre en œuvre l'évaluation côté serveur ou " -"parce que la méthode :code:`evaluate` définie par l'utilisateur peut ne " -"pas se terminer avec succès (par exemple, elle peut échouer à charger les" -" données de l'évaluation côté serveur)." - -#: ../../source/how-to-install-flower.rst:2 -#, fuzzy -msgid "Install Flower" -msgstr "Installer Flower" +"Le code source complet de cet exemple : `PyTorch : From Centralized To " +"Federated (Code) `_. Notre exemple est, bien sûr, " +"un peu trop simplifié parce que les deux clients chargent exactement le " +"même ensemble de données, ce qui n'est pas réaliste. Tu es maintenant " +"prêt à explorer davantage ce sujet. Pourquoi ne pas utiliser différents " +"sous-ensembles de CIFAR-10 sur chaque client ? Pourquoi ne pas ajouter " +"d'autres clients ?" -#: ../../source/how-to-install-flower.rst:6 +#: ../../source/explanation-differential-privacy.rst:2 +#: ../../source/explanation-differential-privacy.rst:14 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:303 #, fuzzy -msgid "Python version" -msgstr "Version Python" - -#: ../../source/how-to-install-flower.rst:12 -msgid "Install stable release" -msgstr "Installe la version stable" +msgid "Differential Privacy" +msgstr "Confidentialité différentielle" -#: ../../source/how-to-install-flower.rst:15 -#: ../../source/how-to-upgrade-to-flower-next.rst:46 -msgid "Using pip" +#: ../../source/explanation-differential-privacy.rst:4 +msgid "" +"The information in datasets like healthcare, financial transactions, user" +" preferences, etc., is valuable and has the potential for scientific " +"breakthroughs and provides important business insights. However, such " +"data is also sensitive and there is a risk of compromising individual " +"privacy." msgstr "" -#: ../../source/how-to-install-flower.rst:17 +#: ../../source/explanation-differential-privacy.rst:9 msgid "" -"Stable releases are available on `PyPI " -"`_::" +"Traditional methods like anonymization alone would not work because of " +"attacks like Re-identification and Data Linkage. That's where " +"differential privacy comes in. It provides the possibility of analyzing " +"data while ensuring the privacy of individuals." msgstr "" -"Les versions stables sont disponibles sur `PyPI " -"`_: :" -#: ../../source/how-to-install-flower.rst:21 +#: ../../source/explanation-differential-privacy.rst:16 msgid "" -"For simulations that use the Virtual Client Engine, ``flwr`` should be " -"installed with the ``simulation`` extra::" +"Imagine two datasets that are identical except for a single record (for " +"instance, Alice's data). Differential Privacy (DP) guarantees that any " +"analysis (M), like calculating the average income, will produce nearly " +"identical results for both datasets (O and O' would be similar). This " +"preserves group patterns while obscuring individual details, ensuring the" +" individual's information remains hidden in the crowd." msgstr "" -"Pour les simulations qui utilisent le moteur de client virtuel, ``flwr`` " -"doit être installé avec l'option ``simulation``: :" -#: ../../source/how-to-install-flower.rst:27 -msgid "Using conda (or mamba)" +#: ../../source/explanation-differential-privacy.rst:-1 +msgid "DP Intro" msgstr "" -#: ../../source/how-to-install-flower.rst:29 -msgid "Flower can also be installed from the ``conda-forge`` channel." +#: ../../source/explanation-differential-privacy.rst:27 +msgid "" +"One of the most commonly used mechanisms to achieve DP is adding enough " +"noise to the output of the analysis to mask the contribution of each " +"individual in the data while preserving the overall accuracy of the " +"analysis." msgstr "" -#: ../../source/how-to-install-flower.rst:31 +#: ../../source/explanation-differential-privacy.rst:32 +#, fuzzy +msgid "Formal Definition" +msgstr "Compiler les définitions ProtoBuf" + +#: ../../source/explanation-differential-privacy.rst:34 msgid "" -"If you have not added ``conda-forge`` to your channels, you will first " -"need to run the following::" +"Differential Privacy (DP) provides statistical guarantees against the " +"information an adversary can infer through the output of a randomized " +"algorithm. It provides an unconditional upper bound on the influence of a" +" single individual on the output of the algorithm by adding noise [1]. A " +"randomized mechanism M provides (:math:`\\epsilon`, " +":math:`\\delta`)-differential privacy if for any two neighboring " +"databases, D :sub:`1` and D :sub:`2`, that differ in only a single " +"record, and for all possible outputs S ⊆ Range(A):" msgstr "" -#: ../../source/how-to-install-flower.rst:36 +#: ../../source/explanation-differential-privacy.rst:42 msgid "" -"Once the ``conda-forge`` channel has been enabled, ``flwr`` can be " -"installed with ``conda``::" +"\\small\n" +"P[M(D_{1} \\in A)] \\leq e^{\\epsilon} P[M(D_{2} \\in A)] + \\delta" msgstr "" -#: ../../source/how-to-install-flower.rst:40 -msgid "or with ``mamba``::" +#: ../../source/explanation-differential-privacy.rst:47 +msgid "" +"The :math:`\\epsilon` parameter, also known as the privacy budget, is a " +"metric of privacy loss. It also controls the privacy-utility trade-off; " +"lower :math:`\\epsilon` values indicate higher levels of privacy but are " +"likely to reduce utility as well. The :math:`\\delta` parameter accounts " +"for a small probability on which the upper bound :math:`\\epsilon` does " +"not hold. The amount of noise needed to achieve differential privacy is " +"proportional to the sensitivity of the output, which measures the maximum" +" change in the output due to the inclusion or removal of a single record." msgstr "" -#: ../../source/how-to-install-flower.rst:46 -msgid "Verify installation" -msgstr "Vérifie l'installation" - -#: ../../source/how-to-install-flower.rst:48 +#: ../../source/explanation-differential-privacy.rst:56 #, fuzzy -msgid "" -"The following command can be used to verify if Flower was successfully " -"installed. If everything worked, it should print the version of Flower to" -" the command line::" -msgstr "" -"La commande suivante peut être utilisée pour vérifier si Flower a été " -"installé avec succès. Si tout a fonctionné, la version de Flower devrait " -"être imprimée sur la ligne de commande: :" +msgid "Differential Privacy in Machine Learning" +msgstr "Confidentialité différentielle" -#: ../../source/how-to-install-flower.rst:55 -msgid "Advanced installation options" -msgstr "Options d'installation avancées" +#: ../../source/explanation-differential-privacy.rst:58 +msgid "" +"DP can be utilized in machine learning to preserve the privacy of the " +"training data. Differentially private machine learning algorithms are " +"designed in a way to prevent the algorithm to learn any specific " +"information about any individual data points and subsequently prevent the" +" model from revealing sensitive information. Depending on the stage at " +"which noise is introduced, various methods exist for applying DP to " +"machine learning algorithms. One approach involves adding noise to the " +"training data (either to the features or labels), while another method " +"entails injecting noise into the gradients of the loss function during " +"model training. Additionally, such noise can be incorporated into the " +"model's output." +msgstr "" -#: ../../source/how-to-install-flower.rst:58 +#: ../../source/explanation-differential-privacy.rst:69 #, fuzzy -msgid "Install via Docker" -msgstr "Installer Flower" +msgid "Differential Privacy in Federated Learning" +msgstr "Mise à l'échelle de l'apprentissage fédéré" -#: ../../source/how-to-install-flower.rst:60 -msgid ":doc:`How to run Flower using Docker `" +#: ../../source/explanation-differential-privacy.rst:71 +msgid "" +"Federated learning is a data minimization approach that allows multiple " +"parties to collaboratively train a model without sharing their raw data. " +"However, federated learning also introduces new privacy challenges. The " +"model updates between parties and the central server can leak information" +" about the local data. These leaks can be exploited by attacks such as " +"membership inference and property inference attacks, or model inversion " +"attacks." msgstr "" -#: ../../source/how-to-install-flower.rst:63 -msgid "Install pre-release" -msgstr "Installer la version pre-release" - -#: ../../source/how-to-install-flower.rst:65 +#: ../../source/explanation-differential-privacy.rst:78 msgid "" -"New (possibly unstable) versions of Flower are sometimes available as " -"pre-release versions (alpha, beta, release candidate) before the stable " -"release happens::" +"DP can play a crucial role in federated learning to provide privacy for " +"the clients' data." msgstr "" -"Les nouvelles versions (éventuellement instables) de Flower sont parfois " -"disponibles en tant que versions préliminaires (alpha, bêta, release " -"candidate) avant que la version stable n'arrive : :" -#: ../../source/how-to-install-flower.rst:69 +#: ../../source/explanation-differential-privacy.rst:81 msgid "" -"For simulations that use the Virtual Client Engine, ``flwr`` pre-releases" -" should be installed with the ``simulation`` extra::" +"Depending on the granularity of privacy provision or the location of " +"noise addition, different forms of DP exist in federated learning. In " +"this explainer, we focus on two approaches of DP utilization in federated" +" learning based on where the noise is added: at the server (also known as" +" the center) or at the client (also known as the local)." msgstr "" -"Pour les simulations qui utilisent le moteur de client virtuel, les " -"versions de ``flwr`` doivent être installées avec l'option " -"``simulation``: :" -#: ../../source/how-to-install-flower.rst:74 -msgid "Install nightly release" -msgstr "Installer la version nightly" - -#: ../../source/how-to-install-flower.rst:76 +#: ../../source/explanation-differential-privacy.rst:86 msgid "" -"The latest (potentially unstable) changes in Flower are available as " -"nightly releases::" +"**Central Differential Privacy**: DP is applied by the server and the " +"goal is to prevent the aggregated model from leaking information about " +"each client's data." msgstr "" -"Les dernières modifications (potentiellement instables) de Flower sont " -"disponibles sous forme de versions nocturnes: :" -#: ../../source/how-to-install-flower.rst:80 +#: ../../source/explanation-differential-privacy.rst:88 msgid "" -"For simulations that use the Virtual Client Engine, ``flwr-nightly`` " -"should be installed with the ``simulation`` extra::" +"**Local Differential Privacy**: DP is applied on the client side before " +"sending any information to the server and the goal is to prevent the " +"updates that are sent to the server from leaking any information about " +"the client's data." msgstr "" -"Pour les simulations qui utilisent le moteur de client virtuel, ``flwr-" -"nightly`` doit être installé avec l'option ``simulation``: :" -#: ../../source/how-to-monitor-simulation.rst:2 +#: ../../source/explanation-differential-privacy.rst:-1 +#: ../../source/explanation-differential-privacy.rst:93 +#: ../../source/how-to-use-differential-privacy.rst:15 #, fuzzy -msgid "Monitor simulation" -msgstr "Simulation de moniteur" +msgid "Central Differential Privacy" +msgstr "Confidentialité différentielle" -#: ../../source/how-to-monitor-simulation.rst:4 +#: ../../source/explanation-differential-privacy.rst:95 msgid "" -"Flower allows you to monitor system resources while running your " -"simulation. Moreover, the Flower simulation engine is powerful and " -"enables you to decide how to allocate resources per client manner and " -"constrain the total usage. Insights from resource consumption can help " -"you make smarter decisions and speed up the execution time." +"In this approach, which is also known as user-level DP, the central " +"server is responsible for adding noise to the globally aggregated " +"parameters. It should be noted that trust in the server is required." msgstr "" -"Flower te permet de surveiller les ressources du système pendant " -"l'exécution de ta simulation. De plus, le moteur de simulation de Flower " -"est puissant et te permet de décider comment allouer les ressources par " -"manière de client et de limiter l'utilisation totale. Les informations " -"sur la consommation des ressources peuvent t'aider à prendre des " -"décisions plus intelligentes et à accélérer le temps d'exécution." -#: ../../source/how-to-monitor-simulation.rst:6 +#: ../../source/explanation-differential-privacy.rst:104 msgid "" -"The specific instructions assume you are using macOS and have the " -"`Homebrew `_ package manager installed." +"While there are various ways to implement central DP in federated " +"learning, we concentrate on the algorithms proposed by [2] and [3]. The " +"overall approach is to clip the model updates sent by the clients and add" +" some amount of noise to the aggregated model. In each iteration, a " +"random set of clients is chosen with a specific probability for training." +" Each client performs local training on its own data. The update of each " +"client is then clipped by some value `S` (sensitivity `S`). This would " +"limit the impact of any individual client which is crucial for privacy " +"and often beneficial for robustness. A common approach to achieve this is" +" by restricting the `L2` norm of the clients' model updates, ensuring " +"that larger updates are scaled down to fit within the norm `S`." msgstr "" -"Les instructions spécifiques supposent que tu utilises macOS et que le " -"gestionnaire de paquets `Homebrew `_ est installé." - -#: ../../source/how-to-monitor-simulation.rst:10 -msgid "Downloads" -msgstr "Téléchargements" -#: ../../source/how-to-monitor-simulation.rst:16 -msgid "" -"`Prometheus `_ is used for data collection, while" -" `Grafana `_ will enable you to visualize the " -"collected data. They are both well integrated with `Ray " -"`_ which Flower uses under the hood." +#: ../../source/explanation-differential-privacy.rst:-1 +msgid "clipping" msgstr "" -"`Prometheus `_ est utilisé pour la collecte de " -"données, tandis que `Grafana `_ te permettra de " -"visualiser les données collectées. Ils sont tous deux bien intégrés à " -"`Ray `_ que Flower utilise sous le capot." -#: ../../source/how-to-monitor-simulation.rst:18 +#: ../../source/explanation-differential-privacy.rst:120 msgid "" -"Overwrite the configuration files (depending on your device, it might be " -"installed on a different path)." +"Afterwards, the Gaussian mechanism is used to add noise in order to " +"distort the sum of all clients' updates. The amount of noise is scaled to" +" the sensitivity value to obtain a privacy guarantee. The Gaussian " +"mechanism is used with a noise sampled from `N (0, σ²)` where `σ = ( " +"noise_scale * S ) / (number of sampled clients)`." msgstr "" -"Écrase les fichiers de configuration (selon ton appareil, il se peut " -"qu'il soit installé sur un chemin différent)." - -#: ../../source/how-to-monitor-simulation.rst:20 -msgid "If you are on an M1 Mac, it should be:" -msgstr "Si tu es sur un Mac M1, il devrait l'être :" -#: ../../source/how-to-monitor-simulation.rst:27 -msgid "On the previous generation Intel Mac devices, it should be:" +#: ../../source/explanation-differential-privacy.rst:126 +msgid "Clipping" msgstr "" -"Sur les appareils Mac Intel de la génération précédente, ce devrait être " -"le cas :" -#: ../../source/how-to-monitor-simulation.rst:34 +#: ../../source/explanation-differential-privacy.rst:128 msgid "" -"Open the respective configuration files and change them. Depending on " -"your device, use one of the two following commands:" +"There are two forms of clipping commonly used in Central DP: Fixed " +"Clipping and Adaptive Clipping." msgstr "" -"Ouvre les fichiers de configuration respectifs et modifie-les. Selon ton " -"appareil, utilise l'une des deux commandes suivantes :" -#: ../../source/how-to-monitor-simulation.rst:44 +#: ../../source/explanation-differential-privacy.rst:131 msgid "" -"and then delete all the text in the file and paste a new Prometheus " -"config you see below. You may adjust the time intervals to your " -"requirements:" +"**Fixed Clipping** : A predefined fix threshold is set for the magnitude " +"of clients' updates. Any update exceeding this threshold is clipped back " +"to the threshold value." msgstr "" -"puis supprime tout le texte du fichier et colle une nouvelle " -"configuration Prometheus que tu vois ci-dessous. Tu peux adapter les " -"intervalles de temps à tes besoins :" -#: ../../source/how-to-monitor-simulation.rst:59 +#: ../../source/explanation-differential-privacy.rst:133 msgid "" -"Now after you have edited the Prometheus configuration, do the same with " -"the Grafana configuration files. Open those using one of the following " -"commands as before:" +"**Adaptive Clipping** : The clipping threshold dynamically adjusts based " +"on the observed update distribution [4]. It means that the clipping value" +" is tuned during the rounds with respect to the quantile of the update " +"norm distribution." msgstr "" -"Maintenant, après avoir édité la configuration de Prometheus, fais de " -"même avec les fichiers de configuration de Grafana. Ouvre ces derniers à " -"l'aide de l'une des commandes suivantes, comme précédemment :" -#: ../../source/how-to-monitor-simulation.rst:69 +#: ../../source/explanation-differential-privacy.rst:137 msgid "" -"Your terminal editor should open and allow you to apply the following " -"configuration as before." +"The choice between fixed and adaptive clipping depends on various factors" +" such as privacy requirements, data distribution, model complexity, and " +"others." msgstr "" -"Ton éditeur de terminal devrait s'ouvrir et te permettre d'appliquer la " -"configuration suivante comme précédemment." -#: ../../source/how-to-monitor-simulation.rst:84 +#: ../../source/explanation-differential-privacy.rst:-1 +#: ../../source/explanation-differential-privacy.rst:141 +#: ../../source/how-to-use-differential-privacy.rst:113 +#, fuzzy +msgid "Local Differential Privacy" +msgstr "Confidentialité différentielle" + +#: ../../source/explanation-differential-privacy.rst:143 msgid "" -"Congratulations, you just downloaded all the necessary software needed " -"for metrics tracking. Now, let’s start it." +"In this approach, each client is responsible for performing DP. Local DP " +"avoids the need for a fully trusted aggregator, but it should be noted " +"that local DP leads to a decrease in accuracy but better privacy in " +"comparison to central DP." msgstr "" -"Félicitations, tu viens de télécharger tous les logiciels nécessaires au " -"suivi des métriques, maintenant, démarrons-le." -#: ../../source/how-to-monitor-simulation.rst:88 -msgid "Tracking metrics" -msgstr "Suivi des mesures" +#: ../../source/explanation-differential-privacy.rst:152 +msgid "In this explainer, we focus on two forms of achieving Local DP:" +msgstr "" -#: ../../source/how-to-monitor-simulation.rst:90 +#: ../../source/explanation-differential-privacy.rst:154 msgid "" -"Before running your Flower simulation, you have to start the monitoring " -"tools you have just installed and configured." +"Each client adds noise to the local updates before sending them to the " +"server. To achieve (:math:`\\epsilon`, :math:`\\delta`)-DP, considering " +"the sensitivity of the local model to be ∆, Gaussian noise is applied " +"with a noise scale of σ where:" msgstr "" -"Avant de lancer ta simulation Flower, tu dois démarrer les outils de " -"surveillance que tu viens d'installer et de configurer." -#: ../../source/how-to-monitor-simulation.rst:97 +#: ../../source/explanation-differential-privacy.rst:158 msgid "" -"Please include the following argument in your Python code when starting a" -" simulation." +"\\small\n" +"\\frac{∆ \\times \\sqrt{2 \\times " +"\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}" msgstr "" -"Tu dois inclure l'argument suivant dans ton code Python lorsque tu " -"démarres une simulation." -#: ../../source/how-to-monitor-simulation.rst:108 -msgid "Now, you are ready to start your workload." -msgstr "Maintenant, tu es prêt à commencer ta charge de travail." +#: ../../source/explanation-differential-privacy.rst:163 +msgid "" +"Each client adds noise to the gradients of the model during the local " +"training (DP-SGD). More specifically, in this approach, gradients are " +"clipped and an amount of calibrated noise is injected into the gradients." +msgstr "" -#: ../../source/how-to-monitor-simulation.rst:110 +#: ../../source/explanation-differential-privacy.rst:167 msgid "" -"Shortly after the simulation starts, you should see the following logs in" -" your terminal:" +"Please note that these two approaches are providing privacy at different " +"levels." msgstr "" -"Peu de temps après le début de la simulation, tu devrais voir les " -"journaux suivants dans ton terminal :" -#: ../../source/how-to-monitor-simulation.rst:117 -msgid "You can look at everything at ``_ ." -msgstr "Tu peux tout regarder sur ``_ ." +#: ../../source/explanation-differential-privacy.rst:169 +#, fuzzy +msgid "**References:**" +msgstr "Référence" -#: ../../source/how-to-monitor-simulation.rst:119 -msgid "" -"It's a Ray Dashboard. You can navigate to Metrics (on the left panel, the" -" lowest option)." +#: ../../source/explanation-differential-privacy.rst:171 +msgid "[1] Dwork et al. The Algorithmic Foundations of Differential Privacy." msgstr "" -"Il s'agit d'un tableau de bord Ray. Tu peux naviguer vers Metrics (sur le" -" panneau de gauche, l'option la plus basse)." -#: ../../source/how-to-monitor-simulation.rst:121 +#: ../../source/explanation-differential-privacy.rst:173 +#, fuzzy msgid "" -"Or alternatively, you can just see them in Grafana by clicking on the " -"right-up corner, “View in Grafana”. Please note that the Ray dashboard is" -" only accessible during the simulation. After the simulation ends, you " -"can only use Grafana to explore the metrics. You can start Grafana by " -"going to ``http://localhost:3000/``." +"[2] McMahan et al. Learning Differentially Private Recurrent Language " +"Models." msgstr "" -"Ou alors, tu peux simplement les voir dans Grafana en cliquant sur le " -"coin supérieur droit, \"View in Grafana\". Sache que le tableau de bord " -"Ray n'est accessible que pendant la simulation. Une fois la simulation " -"terminée, tu ne peux utiliser Grafana que pour explorer les métriques. Tu" -" peux démarrer Grafana en te rendant sur `http://localhost:3000/``." +"McMahan, H. Brendan, et al. \"Learning differentially private recurrent " +"language models\", arXiv preprint arXiv:1710.06963 (2017)." -#: ../../source/how-to-monitor-simulation.rst:123 +#: ../../source/explanation-differential-privacy.rst:175 msgid "" -"After you finish the visualization, stop Prometheus and Grafana. This is " -"important as they will otherwise block, for example port :code:`3000` on " -"your machine as long as they are running." +"[3] Geyer et al. Differentially Private Federated Learning: A Client " +"Level Perspective." msgstr "" -"Après avoir terminé la visualisation, arrête Prometheus et Grafana. C'est" -" important car sinon ils bloqueront, par exemple, le port :code:`3000` " -"sur ta machine tant qu'ils seront en cours d'exécution." -#: ../../source/how-to-monitor-simulation.rst:132 -msgid "Resource allocation" -msgstr "Allocation des ressources" - -#: ../../source/how-to-monitor-simulation.rst:134 -msgid "" -"You must understand how the Ray library works to efficiently allocate " -"system resources to simulation clients on your own." +#: ../../source/explanation-differential-privacy.rst:177 +#, fuzzy +msgid "[4] Galen et al. Differentially Private Learning with Adaptive Clipping." msgstr "" -"Tu dois comprendre le fonctionnement de la bibliothèque Ray pour allouer " -"efficacement les ressources du système aux clients de simulation de ton " -"côté." +"Andrew, Galen, et al. \"Differentially private learning with adaptive " +"clipping\" Advances in Neural Information Processing Systems 34 (2021) : " +"17455-17466." + +#: ../../source/explanation-federated-evaluation.rst:2 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:292 +msgid "Federated evaluation" +msgstr "Évaluation fédérée" -#: ../../source/how-to-monitor-simulation.rst:136 +#: ../../source/explanation-federated-evaluation.rst:4 msgid "" -"Initially, the simulation (which Ray handles under the hood) starts by " -"default with all the available resources on the system, which it shares " -"among the clients. It doesn't mean it divides it equally among all of " -"them, nor that the model training happens at all of them simultaneously. " -"You will learn more about that in the later part of this blog. You can " -"check the system resources by running the following:" +"There are two main approaches to evaluating models in federated learning " +"systems: centralized (or server-side) evaluation and federated (or " +"client-side) evaluation." msgstr "" -"Au départ, la simulation (que Ray gère sous le capot) démarre par défaut " -"avec toutes les ressources disponibles sur le système, qu'elle partage " -"entre les clients. Cela ne signifie pas qu'elle les divise de manière " -"égale entre tous, ni que l'apprentissage du modèle se fait sur tous les " -"clients simultanément. Tu en apprendras plus à ce sujet dans la suite de " -"ce blog. Tu peux vérifier les ressources du système en exécutant ce qui " -"suit :" +"Il existe deux approches principales pour évaluer les modèles dans les " +"systèmes d'apprentissage fédérés : l'évaluation centralisée (ou côté " +"serveur) et l'évaluation fédérée (ou côté client)." -#: ../../source/how-to-monitor-simulation.rst:143 -msgid "In Google Colab, the result you see might be similar to this:" -msgstr "Dans Google Colab, le résultat que tu obtiens peut ressembler à ceci :" +#: ../../source/explanation-federated-evaluation.rst:8 +msgid "Centralized Evaluation" +msgstr "Évaluation centralisée" + +#: ../../source/explanation-federated-evaluation.rst:11 +msgid "Built-In Strategies" +msgstr "Stratégies intégrées" -#: ../../source/how-to-monitor-simulation.rst:155 +#: ../../source/explanation-federated-evaluation.rst:13 msgid "" -"However, you can overwrite the defaults. When starting a simulation, do " -"the following (you don't need to overwrite all of them):" +"All built-in strategies support centralized evaluation by providing an " +"evaluation function during initialization. An evaluation function is any " +"function that can take the current global model parameters as input and " +"return evaluation results:" msgstr "" -"Cependant, tu peux écraser les valeurs par défaut. Lorsque tu démarres " -"une simulation, fais ce qui suit (tu n'as pas besoin de les écraser " -"toutes) :" +"Toutes les stratégies intégrées prennent en charge l'évaluation " +"centralisée en fournissant une fonction d'évaluation lors de " +"l'initialisation. Une fonction d'évaluation est une fonction qui peut " +"prendre les paramètres du modèle global actuel comme entrée et renvoyer " +"les résultats de l'évaluation :" -#: ../../source/how-to-monitor-simulation.rst:175 -msgid "Let’s also specify the resource for a single client." -msgstr "Spécifions également la ressource pour un seul client." +#: ../../source/explanation-federated-evaluation.rst:61 +msgid "Custom Strategies" +msgstr "Stratégies personnalisées" -#: ../../source/how-to-monitor-simulation.rst:205 +#: ../../source/explanation-federated-evaluation.rst:63 +#, fuzzy msgid "" -"Now comes the crucial part. Ray will start a new client only when it has " -"all the required resources (such that they run in parallel) when the " -"resources allow." +"The ``Strategy`` abstraction provides a method called ``evaluate`` that " +"can directly be used to evaluate the current global model parameters. The" +" current server implementation calls ``evaluate`` after parameter " +"aggregation and before federated evaluation (see next paragraph)." msgstr "" -"Ray ne démarrera un nouveau client que lorsqu'il disposera de toutes les " -"ressources nécessaires (de manière à ce qu'ils fonctionnent en parallèle)" -" lorsque les ressources le permettront." +"L'abstraction :code:`Strategy` fournit une méthode appelée " +":code:`evaluate` qui peut être directement utilisée pour évaluer les " +"paramètres du modèle global actuel. L'implémentation actuelle du serveur " +"appelle :code:`evaluate` après l'agrégation des paramètres et avant " +"l'évaluation fédérée (voir le paragraphe suivant)." -#: ../../source/how-to-monitor-simulation.rst:207 +#: ../../source/explanation-federated-evaluation.rst:69 +msgid "Federated Evaluation" +msgstr "Évaluation fédérée" + +#: ../../source/explanation-federated-evaluation.rst:72 +msgid "Implementing Federated Evaluation" +msgstr "Mise en œuvre de l'évaluation fédérée" + +#: ../../source/explanation-federated-evaluation.rst:74 +#, fuzzy msgid "" -"In the example above, only one client will be run, so your clients won't " -"run concurrently. Setting :code:`client_num_gpus = 0.5` would allow " -"running two clients and therefore enable them to run concurrently. Be " -"careful not to require more resources than available. If you specified " -":code:`client_num_gpus = 2`, the simulation wouldn't start (even if you " -"had 2 GPUs but decided to set 1 in :code:`ray_init_args`)." +"Client-side evaluation happens in the ``Client.evaluate`` method and can " +"be configured from the server side." msgstr "" -"Dans l'exemple ci-dessus, un seul client sera exécuté, donc tes clients " -"ne fonctionneront pas simultanément. En définissant " -":code:`client_num_gpus = 0.5`, tu pourras exécuter deux clients et donc " -"les faire fonctionner simultanément. Fais attention à ne pas demander " -"plus de ressources que celles disponibles. Si tu as spécifié " -":code:`client_num_gpus = 2`, la simulation ne démarrera pas (même si tu " -"as 2 GPU mais que tu as décidé d'en définir 1 dans " -":code:`ray_init_args`)." - -#: ../../source/how-to-monitor-simulation.rst:212 ../../source/ref-faq.rst:2 -msgid "FAQ" -msgstr "FAQ" +"L'évaluation côté client se fait dans la méthode :code:`Client.evaluate` " +"et peut être configurée côté serveur." -#: ../../source/how-to-monitor-simulation.rst:214 -msgid "Q: I don't see any metrics logged." -msgstr "Q : Je ne vois aucune mesure enregistrée." +#: ../../source/explanation-federated-evaluation.rst:108 +msgid "Configuring Federated Evaluation" +msgstr "Configuration de l'évaluation fédérée" -#: ../../source/how-to-monitor-simulation.rst:216 +#: ../../source/explanation-federated-evaluation.rst:110 msgid "" -"A: The timeframe might not be properly set. The setting is in the top " -"right corner (\"Last 30 minutes\" by default). Please change the " -"timeframe to reflect the period when the simulation was running." +"Federated evaluation can be configured from the server side. Built-in " +"strategies support the following arguments:" msgstr "" -"R : Il se peut que le délai ne soit pas correctement défini. Le paramètre" -" se trouve dans le coin supérieur droit (\"Dernières 30 minutes\" par " -"défaut). Modifie le délai pour qu'il corresponde à la période pendant " -"laquelle la simulation s'est déroulée." +"L'évaluation fédérée peut être configurée du côté du serveur. Les " +"stratégies intégrées prennent en charge les arguments suivants :" -#: ../../source/how-to-monitor-simulation.rst:218 +#: ../../source/explanation-federated-evaluation.rst:113 +#, fuzzy msgid "" -"Q: I see “Grafana server not detected. Please make sure the Grafana " -"server is running and refresh this page” after going to the Metrics tab " -"in Ray Dashboard." +"``fraction_evaluate``: a ``float`` defining the fraction of clients that " +"will be selected for evaluation. If ``fraction_evaluate`` is set to " +"``0.1`` and ``100`` clients are connected to the server, then ``10`` will" +" be randomly selected for evaluation. If ``fraction_evaluate`` is set to " +"``0.0``, federated evaluation will be disabled." msgstr "" -"Q : Je vois s'afficher \"Serveur Grafana non détecté. Vérifie que le " -"serveur Grafana fonctionne et actualise cette page\" après avoir accédé à" -" l'onglet Métriques dans Ray Dashboard." +":code:`fraction_evaluate` : un :code:`float` définissant la fraction de " +"clients qui sera sélectionnée pour l'évaluation. Si " +":code:`fraction_evaluate` est défini à :code:`0.1` et que :code:`100` " +"clients sont connectés au serveur, alors :code:`10` sera sélectionné " +"aléatoirement pour l'évaluation. Si :code:`fraction_evaluate` est défini " +"à :code:`0.0`, l'évaluation fédérée sera désactivée." -#: ../../source/how-to-monitor-simulation.rst:220 +#: ../../source/explanation-federated-evaluation.rst:118 +#, fuzzy msgid "" -"A: You probably don't have Grafana running. Please check the running " -"services" +"``min_evaluate_clients``: an ``int``: the minimum number of clients to be" +" selected for evaluation. If ``fraction_evaluate`` is set to ``0.1``, " +"``min_evaluate_clients`` is set to 20, and ``100`` clients are connected " +"to the server, then ``20`` clients will be selected for evaluation." msgstr "" -"R : Grafana n'est probablement pas en cours d'exécution. Vérifie les " -"services en cours d'exécution" +"si :code:`fraction_evaluate` est réglé sur :code:`0.1`, " +":code:`min_evaluate_clients` est réglé sur 20, et que :code:`100` clients" +" sont connectés au serveur, alors :code:`20` clients seront sélectionnés " +"pour l'évaluation." -#: ../../source/how-to-monitor-simulation.rst:226 +#: ../../source/explanation-federated-evaluation.rst:122 +#, fuzzy msgid "" -"Q: I see \"This site can't be reached\" when going to " -"``_." +"``min_available_clients``: an ``int`` that defines the minimum number of " +"clients which need to be connected to the server before a round of " +"federated evaluation can start. If fewer than ``min_available_clients`` " +"are connected to the server, the server will wait until more clients are " +"connected before it continues to sample clients for evaluation." msgstr "" -"Q : Je vois \"This site can't be reached\" quand je vais sur " -"``_." +":code:`min_available_clients` : un :code:`int` qui définit le nombre " +"minimum de clients qui doivent être connectés au serveur avant qu'un " +"cycle d'évaluation fédérée puisse commencer. Si moins de " +":code:`min_available_clients` sont connectés au serveur, le serveur " +"attendra que d'autres clients soient connectés avant de continuer à " +"échantillonner des clients pour l'évaluation." -#: ../../source/how-to-monitor-simulation.rst:228 +#: ../../source/explanation-federated-evaluation.rst:127 +#, fuzzy msgid "" -"A: Either the simulation has already finished, or you still need to start" -" Prometheus." +"``on_evaluate_config_fn``: a function that returns a configuration " +"dictionary which will be sent to the selected clients. The function will " +"be called during each round and provides a convenient way to customize " +"client-side evaluation from the server side, for example, to configure " +"the number of validation steps performed." msgstr "" -"R : Soit la simulation est déjà terminée, soit tu dois encore démarrer " -"Prometheus." +":code:`on_evaluate_config_fn` : une fonction qui renvoie un dictionnaire " +"de configuration qui sera envoyé aux clients sélectionnés. Cette fonction" +" sera appelée à chaque tour et offre un moyen pratique de personnaliser " +"l'évaluation côté client depuis le côté serveur, par exemple pour " +"configurer le nombre d'étapes de validation effectuées." -#: ../../source/how-to-monitor-simulation.rst:232 -msgid "Resources" -msgstr "Ressources" +#: ../../source/explanation-federated-evaluation.rst:157 +msgid "Evaluating Local Model Updates During Training" +msgstr "Évaluer les mises à jour du modèle local pendant la formation" -#: ../../source/how-to-monitor-simulation.rst:234 +#: ../../source/explanation-federated-evaluation.rst:159 #, fuzzy msgid "" -"Ray Dashboard: ``_" +"Model parameters can also be evaluated during training. ``Client.fit`` " +"can return arbitrary evaluation results as a dictionary:" msgstr "" -"Tableau de bord Ray : ``_" +"Les paramètres du modèle peuvent également être évalués pendant la " +"formation. :code:`Client.fit` peut renvoyer des résultats d'évaluation " +"arbitraires sous forme de dictionnaire :" -#: ../../source/how-to-monitor-simulation.rst:236 -#, fuzzy -msgid "Ray Metrics: ``_" -msgstr "" -"Ray Metrics : ``_" +#: ../../source/explanation-federated-evaluation.rst:201 +msgid "Full Code Example" +msgstr "Exemple de code complet" -#: ../../source/how-to-run-flower-using-docker.rst:2 -msgid "Run Flower using Docker" +#: ../../source/explanation-federated-evaluation.rst:203 +#, fuzzy +msgid "" +"For a full code example that uses both centralized and federated " +"evaluation, see the *Advanced TensorFlow Example* (the same approach can " +"be applied to workloads implemented in any other framework): " +"https://github.com/adap/flower/tree/main/examples/advanced-tensorflow" msgstr "" +"Pour un exemple de code complet qui utilise à la fois l'évaluation " +"centralisée et fédérée, voir l'*Exemple TensorFlow avancé* (la même " +"approche peut être appliquée aux charges de travail mises en œuvre dans " +"n'importe quel autre framework) : " +"https://github.com/adap/flower/tree/main/examples/advanced-tensorflow" -#: ../../source/how-to-run-flower-using-docker.rst:4 +#: ../../source/explanation-flower-architecture.rst:-1 msgid "" -"The simplest way to get started with Flower is by using the pre-made " -"Docker images, which you can find on `Docker Hub " -"`__." +"Explore the federated learning architecture of the Flower framework, " +"featuring multi-run, concurrent execution, and scalable, secure machine " +"learning while preserving data privacy." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:7 -msgid "Before you start, make sure that the Docker daemon is running:" -msgstr "" +#: ../../source/explanation-flower-architecture.rst:2 +msgid "Flower Architecture" +msgstr "Architecture florale" -#: ../../source/how-to-run-flower-using-docker.rst:14 +#: ../../source/explanation-flower-architecture.rst:4 msgid "" -"If you do not see the version of Docker but instead get an error saying " -"that the command was not found, you will need to install Docker first. " -"You can find installation instruction `here `_." +"This page explains the architecture of deployed Flower federated learning" +" system." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:20 +#: ../../source/explanation-flower-architecture.rst:6 msgid "" -"On Linux, Docker commands require ``sudo`` privilege. If you want to " -"avoid using ``sudo``, you can follow the `Post-installation steps " -"`_ on the " -"official Docker website." +"In federated learning (FL), there is typically one server and a number of" +" clients that are connected to the server. This is often called a " +"federation." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:26 +#: ../../source/explanation-flower-architecture.rst:9 msgid "" -"To ensure optimal performance and compatibility, the SuperLink, SuperNode" -" and ServerApp image must have the same version when running together. " -"This guarantees seamless integration and avoids potential conflicts or " -"issues that may arise from using different versions." +"The role of the server is to coordinate the training process. The role of" +" each client is to receive tasks from the server, execute those tasks and" +" return the results back to the server." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:31 -#, fuzzy -msgid "Flower SuperLink" -msgstr "flower-superlink" +#: ../../source/explanation-flower-architecture.rst:13 +msgid "This is sometimes called a hub-and-spoke topology:" +msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:34 +#: ../../source/explanation-flower-architecture.rst:21 #, fuzzy -msgid "Quickstart" -msgstr "Démarrage rapide de JAX" - -#: ../../source/how-to-run-flower-using-docker.rst:36 -msgid "If you're looking to try out Flower, you can use the following command:" -msgstr "" +msgid "Hub-and-spoke topology in federated learning" +msgstr "Qu'est-ce que l'apprentissage fédéré ?" -#: ../../source/how-to-run-flower-using-docker.rst:42 +#: ../../source/explanation-flower-architecture.rst:21 msgid "" -"The command pulls the Docker image with the tag ``1.8.0`` from Docker " -"Hub. The tag specifies the Flower version. In this case, Flower 1.8.0. " -"The ``--rm`` flag tells Docker to remove the container after it exits." +"Hub-and-spoke topology in federated learning (one server, multiple " +"clients)." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:48 +#: ../../source/explanation-flower-architecture.rst:23 msgid "" -"By default, the Flower SuperLink keeps state in-memory. When using the " -"Docker flag ``--rm``, the state is not persisted between container " -"starts. We will show below how to save the state in a file on your host " -"system." +"In a real-world deployment, we typically want to run different projects " +"on such a federation. Each project could use different hyperparameters, " +"different model architectures, different aggregation strategies, or even " +"different machine learning frameworks like PyTorch and TensorFlow." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:52 +#: ../../source/explanation-flower-architecture.rst:28 msgid "" -"The ``-p :`` flag tells Docker to map the ports " -"``9091``/``9092`` of the host to ``9091``/``9092`` of the container, " -"allowing you to access the Driver API on ``http://localhost:9091`` and " -"the Fleet API on ``http://localhost:9092``. Lastly, any flag that comes " -"after the tag is passed to the Flower SuperLink. Here, we are passing the" -" flag ``--insecure``." +"This is why, in Flower, both the server side and the client side are " +"split into two parts. One part is long-lived and responsible for " +"communicating across the network, the other part is short-lived and " +"executes task-specific code." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:59 -#: ../../source/how-to-run-flower-using-docker.rst:238 -#: ../../source/how-to-run-flower-using-docker.rst:354 -msgid "" -"The ``--insecure`` flag enables insecure communication (using HTTP, not " -"HTTPS) and should only be used for testing purposes. We strongly " -"recommend enabling `SSL `__ when " -"deploying to a production environment." +#: ../../source/explanation-flower-architecture.rst:32 +msgid "A Flower `server` consists of **SuperLink** and ``ServerApp``:" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:64 +#: ../../source/explanation-flower-architecture.rst:34 msgid "" -"You can use ``--help`` to view all available flags that the SuperLink " -"supports:" +"**SuperLink**: a long-running process that forwards task instructions to " +"clients (SuperNodes) and receives task results back." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:71 -msgid "Mounting a volume to store the state on the host system" -msgstr "" - -#: ../../source/how-to-run-flower-using-docker.rst:73 +#: ../../source/explanation-flower-architecture.rst:36 msgid "" -"If you want to persist the state of the SuperLink on your host system, " -"all you need to do is specify a path where you want to save the file on " -"your host system and a name for the database file. In the example below, " -"we tell Docker via the flag ``--volume`` to mount the user's home " -"directory (``~/`` on your host) into the ``/app/`` directory of the " -"container. Furthermore, we use the flag ``--database`` to specify the " -"name of the database file." +"``ServerApp``: a short-lived process with project-spcific code that " +"customizes all server-side aspects of federated learning systems (client " +"selection, client configuration, result aggregation). This is what AI " +"researchers and AI engineers write when they build Flower apps." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:86 -msgid "" -"As soon as the SuperLink starts, the file ``state.db`` is created in the " -"user's home directory on your host system. If the file already exists, " -"the SuperLink tries to restore the state from the file. To start the " -"SuperLink with an empty database, simply remove the ``state.db`` file." +#: ../../source/explanation-flower-architecture.rst:41 +msgid "A Flower `client` consists of **SuperNode** and ``ClientApp``:" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:91 -#: ../../source/how-to-run-flower-using-docker.rst:260 -#: ../../source/how-to-run-flower-using-docker.rst:375 -#, fuzzy -msgid "Enabling SSL for secure connections" -msgstr "Collecte centralisée des données" - -#: ../../source/how-to-run-flower-using-docker.rst:93 +#: ../../source/explanation-flower-architecture.rst:43 msgid "" -"To enable SSL, you will need a PEM-encoded root certificate, a PEM-" -"encoded private key and a PEM-encoded certificate chain." +"**SuperNode**: a long-running process that connects to the SuperLink, " +"asks for tasks, executes tasks (for example, \"train this model on your " +"local data\") and returns task results back to the SuperLink." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:97 +#: ../../source/explanation-flower-architecture.rst:46 msgid "" -"For testing purposes, you can generate your own self-signed certificates." -" The `Enable SSL connections `__ page contains a section that" -" will guide you through the process." +"``ClientApp``: a short-lived process with project-specific code that " +"customizes all client-side aspects of federated learning systems (local " +"model training and evaluation, pre- and post-processing). This is what AI" +" researchers and AI engineers write when they build Flower apps." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:101 +#: ../../source/explanation-flower-architecture.rst:51 msgid "" -"Assuming all files we need are in the local ``certificates`` directory, " -"we can use the flag ``--volume`` to mount the local directory into the " -"``/app/`` directory of the container. This allows the SuperLink to access" -" the files within the container. Finally, we pass the names of the " -"certificates to the SuperLink with the ``--certificates`` flag." +"Why SuperNode and SuperLink? Well, in federated learning, the clients are" +" the actual stars of the show. They hold the training data and they run " +"the actual training. This is why Flower decided to name them " +"**SuperNode**. The **SuperLink** is then responsible for acting as the " +"`missing link` between all those SuperNodes." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:113 +#: ../../source/explanation-flower-architecture.rst:62 #, fuzzy -msgid "Flower SuperNode" -msgstr "Serveur de Flower" +msgid "Basic Flower architecture" +msgstr "Architecture florale" + +#: ../../source/explanation-flower-architecture.rst:62 +#, fuzzy +msgid "The basic Flower architecture for federated learning." +msgstr "Qu'est-ce que l'apprentissage fédéré ?" -#: ../../source/how-to-run-flower-using-docker.rst:115 +#: ../../source/explanation-flower-architecture.rst:64 msgid "" -"The SuperNode Docker image comes with a pre-installed version of Flower " -"and serves as a base for building your own SuperNode image." +"In a Flower app project, users will typically develop the ``ServerApp`` " +"and the ``ClientApp``. All the network communication between `server` and" +" `clients` is taken care of by the SuperLink and SuperNodes." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:120 +#: ../../source/explanation-flower-architecture.rst:70 msgid "" -"The SuperNode Docker image currently works only with the 1.9.0-nightly " -"release. A stable version will be available when Flower 1.9.0 (stable) " -"gets released (ETA: May). A SuperNode nightly image must be paired with " -"the corresponding SuperLink and ServerApp nightly images released on the " -"same day. To ensure the versions are in sync, using the concrete tag, " -"e.g., ``1.9.0.dev20240501`` instead of ``nightly`` is recommended." +"For more details, please refer to the |serverapp_link|_ and " +"|clientapp_link|_ documentation." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:126 +#: ../../source/explanation-flower-architecture.rst:73 msgid "" -"We will use the ``quickstart-pytorch`` example, which you can find in the" -" Flower repository, to illustrate how you can dockerize your ClientApp." +"With *multi-run*, multiple ``ServerApp``\\s and ``ClientApp``\\s are now " +"capable of running on the same federation consisting of a single long-" +"running SuperLink and multiple long-running SuperNodes. This is sometimes" +" referred to as `multi-tenancy` or `multi-job`." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:134 +#: ../../source/explanation-flower-architecture.rst:78 msgid "" -"Before we can start, we need to meet a few prerequisites in our local " -"development environment. You can skip the first part if you want to run " -"your ClientApp instead of the ``quickstart-pytorch`` example." +"As shown in the figure below, two projects, each consisting of a " +"``ServerApp`` and a ``ClientApp``, could share the same SuperLink and " +"SuperNodes." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:138 +#: ../../source/explanation-flower-architecture.rst:87 #, fuzzy -msgid "Clone the Flower repository." -msgstr "**Fourche le dépôt de Flower**" - -#: ../../source/how-to-run-flower-using-docker.rst:152 -msgid "Creating a SuperNode Dockerfile" -msgstr "" +msgid "Multi-tenancy federated learning architecture" +msgstr "Stratégie de moyenne fédérée." -#: ../../source/how-to-run-flower-using-docker.rst:154 -#: ../../source/how-to-run-flower-using-docker.rst:289 -msgid "Let's assume the following project layout:" -msgstr "" +#: ../../source/explanation-flower-architecture.rst:87 +#, fuzzy +msgid "Multi-tenancy federated learning architecture with Flower" +msgstr "Étape 2 : Apprentissage fédéré avec Flower" -#: ../../source/how-to-run-flower-using-docker.rst:163 +#: ../../source/explanation-flower-architecture.rst:89 msgid "" -"First, we need to create a ``requirements.txt`` file in the directory " -"where the ``ClientApp`` code is located. In the file, we list all the " -"dependencies that the ClientApp requires." +"To illustrate how multi-run works, consider one federated learning " +"training run where a ``ServerApp`` and a ``ClientApp`` are participating " +"in ``[run 1]``. Note that a SuperNode will only run a ``ClientApp`` if it" +" is selected to participate in the training run." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:175 +#: ../../source/explanation-flower-architecture.rst:94 msgid "" -"Note that `flwr `__ is already installed " -"in the ``flwr/supernode`` base image, so you only need to include other " -"package dependencies in your ``requirements.txt``, such as ``torch``, " -"``tensorflow``, etc." +"In ``[run 1]`` below, all the SuperNodes are selected and therefore run " +"their corresponding ``ClientApp``\\s:" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:179 -msgid "" -"Next, we create a Dockerfile. If you use the ``quickstart-pytorch`` " -"example, create a new file called ``Dockerfile.supernode`` in ``examples" -"/quickstart-pytorch``." -msgstr "" +#: ../../source/explanation-flower-architecture.rst:103 +#, fuzzy +msgid "Multi-tenancy federated learning architecture - Run 1" +msgstr "Stratégie de moyenne fédérée." -#: ../../source/how-to-run-flower-using-docker.rst:182 +#: ../../source/explanation-flower-architecture.rst:103 msgid "" -"The ``Dockerfile.supernode`` contains the instructions that assemble the " -"SuperNode image." +"Run 1 in a multi-run federated learning architecture with Flower. All " +"SuperNodes participate in the training round." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:196 +#: ../../source/explanation-flower-architecture.rst:106 msgid "" -"In the first two lines, we instruct Docker to use the SuperNode image " -"tagged ``nightly`` as a base image and set our working directory to " -"``/app``. The following instructions will now be executed in the ``/app``" -" directory. Next, we install the ClientApp dependencies by copying the " -"``requirements.txt`` file into the image and run ``pip install``. In the " -"last two lines, we copy the ``client.py`` module into the image and set " -"the entry point to ``flower-client-app`` with the argument " -"``client:app``. The argument is the object reference of the ClientApp " -"(``:``) that will be run inside the ClientApp." +"However, in ``[run 2]``, only the first and third SuperNodes are selected" +" to participate in the training:" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:205 +#: ../../source/explanation-flower-architecture.rst:115 #, fuzzy -msgid "Building the SuperNode Docker image" -msgstr "Démarrer le serveur" +msgid "Multi-tenancy federated learning architecture - Run 2" +msgstr "Stratégie de moyenne fédérée." -#: ../../source/how-to-run-flower-using-docker.rst:207 +#: ../../source/explanation-flower-architecture.rst:115 msgid "" -"Next, we build the SuperNode Docker image by running the following " -"command in the directory where Dockerfile and ClientApp code are located." +"Run 2 in a multi-run federated learning architecture with Flower. Only " +"the first and third SuperNodes are selected to participate in the " +"training round." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:214 +#: ../../source/explanation-flower-architecture.rst:118 msgid "" -"We gave the image the name ``flwr_supernode``, and the tag ``0.0.1``. " -"Remember that the here chosen values only serve as an example. You can " -"change them to your needs." -msgstr "" - -#: ../../source/how-to-run-flower-using-docker.rst:219 -#, fuzzy -msgid "Running the SuperNode Docker image" -msgstr "Démarrer le serveur" - -#: ../../source/how-to-run-flower-using-docker.rst:221 -msgid "Now that we have built the SuperNode image, we can finally run it." -msgstr "" - -#: ../../source/how-to-run-flower-using-docker.rst:229 -#: ../../source/how-to-run-flower-using-docker.rst:345 -msgid "Let's break down each part of this command:" -msgstr "" - -#: ../../source/how-to-run-flower-using-docker.rst:231 -#: ../../source/how-to-run-flower-using-docker.rst:347 -msgid "``docker run``: This is the command to run a new Docker container." +"Therefore, with Flower multi-run, different projects (each consisting of " +"a ``ServerApp`` and ``ClientApp``) can run on different sets of clients." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:232 -#: ../../source/how-to-run-flower-using-docker.rst:348 +#: ../../source/explanation-flower-architecture.rst:121 msgid "" -"``--rm``: This option specifies that the container should be " -"automatically removed when it stops." +"To help you start and manage all of the concurrently executing training " +"runs, Flower offers one additional long-running server-side service " +"called **SuperExec**. When you type ``flwr run`` to start a new training " +"run, the ``flwr`` CLI bundles your local project (mainly your " +"``ServerApp`` and ``ClientApp``) and sends it to the **SuperExec**. The " +"**SuperExec** will then take care of starting and managing your " +"``ServerApp``, which in turn selects SuperNodes to execute your " +"``ClientApp``." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:233 -msgid "``flwr_supernode:0.0.1``: The name the tag of the Docker image to use." -msgstr "" - -#: ../../source/how-to-run-flower-using-docker.rst:234 -#: ../../source/how-to-run-flower-using-docker.rst:350 -msgid "``--insecure``: This option enables insecure communication." -msgstr "" - -#: ../../source/how-to-run-flower-using-docker.rst +#: ../../source/explanation-flower-architecture.rst:128 msgid "" -"``--server 192.168.1.100:9092``: This option specifies the address of the" -" SuperLinks Fleet" +"This architecture allows many users to (concurrently) run their projects " +"on the same federation, simply by typing ``flwr run`` on their local " +"developer machine." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst -msgid "API to connect to. Remember to update it with your SuperLink IP." +#: ../../source/explanation-flower-architecture.rst:137 +msgid "Flower Deployment Engine with SuperExec" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:248 -msgid "" -"To test running Flower locally, you can create a `bridge network " -"`__, use the ``--network`` argument and pass the " -"name of the Docker network to run your SuperNodes." -msgstr "" - -#: ../../source/how-to-run-flower-using-docker.rst:252 -msgid "" -"Any argument that comes after the tag is passed to the Flower SuperNode " -"binary. To see all available flags that the SuperNode supports, run:" +#: ../../source/explanation-flower-architecture.rst:137 +msgid "The SuperExec service for managing concurrent training runs in Flower." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:262 +#: ../../source/explanation-flower-architecture.rst:141 msgid "" -"To enable SSL, we will need to mount a PEM-encoded root certificate into " -"your SuperNode container." +"This explanation covers the Flower Deployment Engine. An explanation " +"covering the Flower Simulation Engine will follow." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:264 +#: ../../source/explanation-flower-architecture.rst:146 msgid "" -"Assuming the certificate already exists locally, we can use the flag " -"``--volume`` to mount the local certificate into the container's " -"``/app/`` directory. This allows the SuperNode to access the certificate " -"within the container. Use the ``--certificates`` flag when starting the " -"container." +"As we continue to enhance Flower at a rapid pace, we'll periodically " +"update this explainer document. Feel free to share any feedback with us." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:275 -#, fuzzy -msgid "Flower ServerApp" -msgstr "Serveur de Flower" +#: ../../source/fed/0000-20200102-fed-template.md:10 +msgid "FED Template" +msgstr "Modèle FED" -#: ../../source/how-to-run-flower-using-docker.rst:277 -msgid "" -"The procedure for building and running a ServerApp image is almost " -"identical to the SuperNode image." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:12 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:12 +msgid "Table of Contents" +msgstr "Table des matières" -#: ../../source/how-to-run-flower-using-docker.rst:279 -msgid "" -"Similar to the SuperNode image, the ServerApp Docker image comes with a " -"pre-installed version of Flower and serves as a base for building your " -"own ServerApp image." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:14 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:14 +msgid "[Table of Contents](#table-of-contents)" +msgstr "[Table des matières](#table-of-contents)" -#: ../../source/how-to-run-flower-using-docker.rst:282 -msgid "" -"We will use the same ``quickstart-pytorch`` example as we do in the " -"Flower SuperNode section. If you have not already done so, please follow " -"the `SuperNode Prerequisites`_ before proceeding." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:15 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:15 +msgid "[Summary](#summary)" +msgstr "[Résumé](#résumé)" -#: ../../source/how-to-run-flower-using-docker.rst:287 -msgid "Creating a ServerApp Dockerfile" -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:16 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:16 +msgid "[Motivation](#motivation)" +msgstr "[Motivation](#motivation)" -#: ../../source/how-to-run-flower-using-docker.rst:298 -msgid "" -"First, we need to create a Dockerfile in the directory where the " -"``ServerApp`` code is located. If you use the ``quickstart-pytorch`` " -"example, create a new file called ``Dockerfile.serverapp`` in ``examples" -"/quickstart-pytorch``." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:17 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:17 +msgid "[Goals](#goals)" +msgstr "[Buts](#buts)" -#: ../../source/how-to-run-flower-using-docker.rst:302 -msgid "" -"The ``Dockerfile.serverapp`` contains the instructions that assemble the " -"ServerApp image." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:18 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:18 +msgid "[Non-Goals](#non-goals)" +msgstr "[Non-objectifs](#non-objectifs)" -#: ../../source/how-to-run-flower-using-docker.rst:313 -msgid "" -"In the first two lines, we instruct Docker to use the ServerApp image " -"tagged ``1.8.0`` as a base image and set our working directory to " -"``/app``. The following instructions will now be executed in the ``/app``" -" directory. In the last two lines, we copy the ``server.py`` module into " -"the image and set the entry point to ``flower-server-app`` with the " -"argument ``server:app``. The argument is the object reference of the " -"ServerApp (``:``) that will be run inside the " -"ServerApp container." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:19 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:19 +msgid "[Proposal](#proposal)" +msgstr "[Proposition](#proposition)" -#: ../../source/how-to-run-flower-using-docker.rst:321 -#, fuzzy -msgid "Building the ServerApp Docker image" -msgstr "Démarrer le serveur" +#: ../../source/fed/0000-20200102-fed-template.md:20 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:23 +msgid "[Drawbacks](#drawbacks)" +msgstr "[Inconvénients](#inconvénients)" -#: ../../source/how-to-run-flower-using-docker.rst:323 -msgid "" -"Next, we build the ServerApp Docker image by running the following " -"command in the directory where Dockerfile and ServerApp code are located." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:21 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:24 +msgid "[Alternatives Considered](#alternatives-considered)" +msgstr "[Alternatives envisagées](#alternatives-considered)" -#: ../../source/how-to-run-flower-using-docker.rst:330 -msgid "" -"We gave the image the name ``flwr_serverapp``, and the tag ``0.0.1``. " -"Remember that the here chosen values only serve as an example. You can " -"change them to your needs." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:22 +msgid "[Appendix](#appendix)" +msgstr "[Annexe](#appendix)" + +#: ../../source/fed/0000-20200102-fed-template.md:24 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:28 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:76 +msgid "Summary" +msgstr "Résumé" -#: ../../source/how-to-run-flower-using-docker.rst:335 +#: ../../source/fed/0000-20200102-fed-template.md:26 #, fuzzy -msgid "Running the ServerApp Docker image" -msgstr "Démarrer le serveur" +msgid "\\[TODO - sentence 1: summary of the problem\\]" +msgstr "[TODO - phrase 1 : résumé du problème]" -#: ../../source/how-to-run-flower-using-docker.rst:337 -msgid "Now that we have built the ServerApp image, we can finally run it." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:28 +#, fuzzy +msgid "\\[TODO - sentence 2: summary of the solution\\]" +msgstr "[TODO - phrase 2 : résumé de la solution]" -#: ../../source/how-to-run-flower-using-docker.rst:349 -msgid "``flwr_serverapp:0.0.1``: The name the tag of the Docker image to use." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:30 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:47 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:77 +msgid "Motivation" +msgstr "Motivation" -#: ../../source/how-to-run-flower-using-docker.rst -msgid "" -"``--server 192.168.1.100:9091``: This option specifies the address of the" -" SuperLinks Driver" -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:32 +#: ../../source/fed/0000-20200102-fed-template.md:36 +#: ../../source/fed/0000-20200102-fed-template.md:40 +#: ../../source/fed/0000-20200102-fed-template.md:44 +#: ../../source/fed/0000-20200102-fed-template.md:48 +#: ../../source/fed/0000-20200102-fed-template.md:54 +#: ../../source/fed/0000-20200102-fed-template.md:58 +#, fuzzy +msgid "\\[TODO\\]" +msgstr "[TODO]" -#: ../../source/how-to-run-flower-using-docker.rst:363 -msgid "" -"To test running Flower locally, you can create a `bridge network " -"`__, use the ``--network`` argument and pass the " -"name of the Docker network to run your ServerApps." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:34 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:53 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:78 +msgid "Goals" +msgstr "Objectifs" -#: ../../source/how-to-run-flower-using-docker.rst:367 -msgid "" -"Any argument that comes after the tag is passed to the Flower ServerApp " -"binary. To see all available flags that the ServerApp supports, run:" -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:38 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:59 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:79 +msgid "Non-Goals" +msgstr "Non-objectifs" -#: ../../source/how-to-run-flower-using-docker.rst:377 -msgid "" -"To enable SSL, we will need to mount a PEM-encoded root certificate into " -"your ServerApp container." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:42 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:65 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:80 +msgid "Proposal" +msgstr "Proposition" -#: ../../source/how-to-run-flower-using-docker.rst:379 -msgid "" -"Assuming the certificate already exists locally, we can use the flag " -"``--volume`` to mount the local certificate into the container's " -"``/app/`` directory. This allows the ServerApp to access the certificate " -"within the container. Use the ``--certificates`` flag when starting the " -"container." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:46 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:85 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:129 +msgid "Drawbacks" +msgstr "Inconvénients" + +#: ../../source/fed/0000-20200102-fed-template.md:50 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:86 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:135 +msgid "Alternatives Considered" +msgstr "Alternatives envisagées" -#: ../../source/how-to-run-flower-using-docker.rst:390 +#: ../../source/fed/0000-20200102-fed-template.md:52 #, fuzzy -msgid "Advanced Docker options" -msgstr "Options d'installation avancées" +msgid "\\[Alternative 1\\]" +msgstr "[Alternative 1]" -#: ../../source/how-to-run-flower-using-docker.rst:393 -msgid "Using a different Flower version" -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:56 +#, fuzzy +msgid "\\[Alternative 2\\]" +msgstr "[Alternative 2]" -#: ../../source/how-to-run-flower-using-docker.rst:395 -msgid "" -"If you want to use a different version of Flower, for example Flower " -"nightly, you can do so by changing the tag. All available versions are on" -" `Docker Hub `__." -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:10 +msgid "Flower Enhancement Doc" +msgstr "Doc sur l'amélioration des fleurs" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:20 +msgid "[Enhancement Doc Template](#enhancement-doc-template)" +msgstr "[Modèle de document d'amélioration](#enhancement-doc-template)" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:21 +msgid "[Metadata](#metadata)" +msgstr "[Métadonnées](#métadonnées)" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:22 +msgid "[Workflow](#workflow)" +msgstr "[Workflow](#workflow)" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:25 +msgid "[GitHub Issues](#github-issues)" +msgstr "[GitHub Issues](#github-issues)" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:26 +msgid "[Google Docs](#google-docs)" +msgstr "[Google Docs](#google-docs)" -#: ../../source/how-to-run-flower-using-docker.rst:400 -msgid "Pinning a Docker image to a specific version" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:30 +msgid "A Flower Enhancement is a standardized development process to" msgstr "" +"Une amélioration de la fleur est un processus de développement " +"standardisé pour" -#: ../../source/how-to-run-flower-using-docker.rst:402 -msgid "" -"It may happen that we update the images behind the tags. Such updates " -"usually include security updates of system dependencies that should not " -"change the functionality of Flower. However, if you want to ensure that " -"you always use the same image, you can specify the hash of the image " -"instead of the tag." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:32 +msgid "provide a common structure for proposing larger changes" msgstr "" +"fournir une structure commune pour proposer des changements plus " +"importants" -#: ../../source/how-to-run-flower-using-docker.rst:407 -msgid "" -"The following command returns the current image hash referenced by the " -"``superlink:1.8.0`` tag:" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:33 +msgid "ensure that the motivation for a change is clear" +msgstr "s'assurer que la motivation du changement est claire" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:34 +msgid "persist project information in a version control system" msgstr "" +"conserver les informations sur le projet dans un système de contrôle des " +"versions" -#: ../../source/how-to-run-flower-using-docker.rst:414 -msgid "Next, we can pin the hash when running a new SuperLink container:" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:35 +msgid "document the motivation for impactful user-facing changes" msgstr "" +"documenter la motivation des changements qui ont un impact sur " +"l'utilisateur" -#: ../../source/how-to-run-flower-using-docker.rst:423 -#, fuzzy -msgid "Setting environment variables" -msgstr "Mise en place de l'environnement de codage" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:36 +msgid "reserve GitHub issues for tracking work in flight" +msgstr "réserve les problèmes GitHub pour le suivi du travail en vol" -#: ../../source/how-to-run-flower-using-docker.rst:425 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:37 msgid "" -"To set a variable inside a Docker container, you can use the ``-e " -"=`` flag." +"ensure community participants can successfully drive changes to " +"completion across one or more releases while stakeholders are adequately " +"represented throughout the process" msgstr "" +"s'assurer que les participants de la communauté peuvent mener à bien les " +"changements dans le cadre d'une ou plusieurs versions et que les parties " +"prenantes sont représentées de manière adéquate tout au long du processus" -#: ../../source/how-to-run-simulations.rst:2 -#, fuzzy -msgid "Run simulations" -msgstr "Simulation de moniteur" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:39 +msgid "Hence, an Enhancement Doc combines aspects of" +msgstr "Par conséquent, un document d'amélioration combine des aspects de" -#: ../../source/how-to-run-simulations.rst:8 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:41 +msgid "a feature, and effort-tracking document" +msgstr "une caractéristique, et un document de suivi des efforts" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:42 +msgid "a product requirements document" +msgstr "un document sur les exigences du produit" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:43 +msgid "a design document" +msgstr "un document de conception" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:45 msgid "" -"Simulating Federated Learning workloads is useful for a multitude of use-" -"cases: you might want to run your workload on a large cohort of clients " -"but without having to source, configure and mange a large number of " -"physical devices; you might want to run your FL workloads as fast as " -"possible on the compute systems you have access to without having to go " -"through a complex setup process; you might want to validate your " -"algorithm on different scenarios at varying levels of data and system " -"heterogeneity, client availability, privacy budgets, etc. These are among" -" some of the use-cases where simulating FL workloads makes sense. Flower " -"can accommodate these scenarios by means of its `VirtualClientEngine " -"`_ or " -"VCE." +"into one file, which is created incrementally in collaboration with the " +"community." msgstr "" +"en un seul fichier, qui est créé progressivement en collaboration avec la" +" communauté." -#: ../../source/how-to-run-simulations.rst:10 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:49 msgid "" -"The :code:`VirtualClientEngine` schedules, launches and manages `virtual`" -" clients. These clients are identical to `non-virtual` clients (i.e. the " -"ones you launch via the command `flwr.client.start_client `_) in the sense that they can be configure by " -"creating a class inheriting, for example, from `flwr.client.NumPyClient " -"`_ and therefore behave in an " -"identical way. In addition to that, clients managed by the " -":code:`VirtualClientEngine` are:" +"For far-fetching changes or features proposed to Flower, an abstraction " +"beyond a single GitHub issue or pull request is required to understand " +"and communicate upcoming changes to the project." msgstr "" +"Pour les changements lointains ou les fonctionnalités proposées à Flower," +" une abstraction au-delà d'une simple question GitHub ou d'une demande de" +" tirage est nécessaire pour comprendre et communiquer les changements à " +"venir dans le projet." -#: ../../source/how-to-run-simulations.rst:12 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:51 msgid "" -"resource-aware: this means that each client gets assigned a portion of " -"the compute and memory on your system. You as a user can control this at " -"the beginning of the simulation and allows you to control the degree of " -"parallelism of your Flower FL simulation. The fewer the resources per " -"client, the more clients can run concurrently on the same hardware." +"The purpose of this process is to reduce the amount of \"tribal " +"knowledge\" in our community. By moving decisions from Slack threads, " +"video calls, and hallway conversations into a well-tracked artifact, this" +" process aims to enhance communication and discoverability." msgstr "" +"L'objectif de ce processus est de réduire la quantité de \"connaissances " +"tribales\" dans notre communauté. En déplaçant les décisions des fils de " +"discussion Slack, des appels vidéo et des conversations de couloir vers " +"un artefact bien suivi, ce processus vise à améliorer la communication et" +" la découvrabilité." -#: ../../source/how-to-run-simulations.rst:13 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:55 msgid "" -"self-managed: this means that you as a user do not need to launch clients" -" manually, instead this gets delegated to :code:`VirtualClientEngine`'s " -"internals." +"Roughly any larger, user-facing enhancement should follow the Enhancement" +" process. If an enhancement would be described in either written or " +"verbal communication to anyone besides the author or developer, then " +"consider creating an Enhancement Doc." msgstr "" +"Si une amélioration doit être décrite par écrit ou verbalement à " +"quelqu'un d'autre que l'auteur ou le développeur, il faut envisager de " +"créer un document d'amélioration." -#: ../../source/how-to-run-simulations.rst:14 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:57 msgid "" -"ephemeral: this means that a client is only materialized when it is " -"required in the FL process (e.g. to do `fit() `_). The object is destroyed afterwards," -" releasing the resources it was assigned and allowing in this way other " -"clients to participate." +"Similarly, any technical effort (refactoring, major architectural change)" +" that will impact a large section of the development community should " +"also be communicated widely. The Enhancement process is suited for this " +"even if it will have zero impact on the typical user or operator." msgstr "" +"De même, tout effort technique (refactorisation, changement architectural" +" majeur) qui aura un impact sur une grande partie de la communauté de " +"développement doit également être communiqué à grande échelle. Le " +"processus d'amélioration est adapté à cela, même s'il n'aura aucun impact" +" sur l'utilisateur ou l'opérateur type." -#: ../../source/how-to-run-simulations.rst:16 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:61 msgid "" -"The :code:`VirtualClientEngine` implements `virtual` clients using `Ray " -"`_, an open-source framework for scalable Python " -"workloads. In particular, Flower's :code:`VirtualClientEngine` makes use " -"of `Actors `_ to " -"spawn `virtual` clients and run their workload." -msgstr "" - -#: ../../source/how-to-run-simulations.rst:20 -msgid "Launch your Flower simulation" +"For small changes and additions, going through the Enhancement process " +"would be time-consuming and unnecessary. This includes, for example, " +"adding new Federated Learning algorithms, as these only add features " +"without changing how Flower works or is used." msgstr "" +"Pour les petits changements et ajouts, passer par le processus " +"d'amélioration prendrait beaucoup de temps et serait inutile. Cela " +"inclut, par exemple, l'ajout de nouveaux algorithmes d'apprentissage " +"fédéré, car ceux-ci ne font qu'ajouter des fonctionnalités sans changer " +"le fonctionnement ou l'utilisation de Flower." -#: ../../source/how-to-run-simulations.rst:22 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:63 msgid "" -"Running Flower simulations still require you to define your client class," -" a strategy, and utility functions to download and load (and potentially " -"partition) your dataset. With that out of the way, launching your " -"simulation is done with `start_simulation `_ and a minimal example looks" -" as follows:" +"Enhancements are different from feature requests, as they are already " +"providing a laid-out path for implementation and are championed by " +"members of the community." msgstr "" +"Les améliorations sont différentes des demandes de fonctionnalités, car " +"elles fournissent déjà un chemin tracé pour la mise en œuvre et sont " +"défendues par les membres de la communauté." -#: ../../source/how-to-run-simulations.rst:44 -#, fuzzy -msgid "VirtualClientEngine resources" -msgstr "Moteur de client virtuel" - -#: ../../source/how-to-run-simulations.rst:45 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:67 msgid "" -"By default the VCE has access to all system resources (i.e. all CPUs, all" -" GPUs, etc) since that is also the default behavior when starting Ray. " -"However, in some settings you might want to limit how many of your system" -" resources are used for simulation. You can do this via the " -":code:`ray_init_args` input argument to :code:`start_simulation` which " -"the VCE internally passes to Ray's :code:`ray.init` command. For a " -"complete list of settings you can configure check the `ray.init " -"`_" -" documentation. Do not set :code:`ray_init_args` if you want the VCE to " -"use all your system's CPUs and GPUs." +"An Enhancement is captured in a Markdown file that follows a defined " +"template and a workflow to review and store enhancement docs for " +"reference — the Enhancement Doc." msgstr "" +"Une amélioration est capturée dans un fichier Markdown qui suit un modèle" +" défini et un flux de travail pour examiner et stocker les documents " +"d'amélioration pour référence - le Doc d'amélioration." -#: ../../source/how-to-run-simulations.rst:62 -msgid "Assigning client resources" -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:69 +msgid "Enhancement Doc Template" +msgstr "Modèle de document d'amélioration" -#: ../../source/how-to-run-simulations.rst:63 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:71 msgid "" -"By default the :code:`VirtualClientEngine` assigns a single CPU core (and" -" nothing else) to each virtual client. This means that if your system has" -" 10 cores, that many virtual clients can be concurrently running." +"Each enhancement doc is provided as a Markdown file having the following " +"structure" msgstr "" +"Chaque document d'amélioration est fourni sous la forme d'un fichier " +"Markdown ayant la structure suivante" -#: ../../source/how-to-run-simulations.rst:65 -msgid "" -"More often than not, you would probably like to adjust the resources your" -" clients get assigned based on the complexity (i.e. compute and memory " -"footprint) of your FL workload. You can do so when starting your " -"simulation by setting the argument `client_resources` to " -"`start_simulation `_." -" Two keys are internally used by Ray to schedule and spawn workloads (in " -"our case Flower clients):" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:73 +msgid "Metadata (as [described below](#metadata) in form of a YAML preamble)" msgstr "" +"Métadonnées (comme [décrit ci-dessous](#metadata) sous la forme d'un " +"préambule YAML)" -#: ../../source/how-to-run-simulations.rst:67 -msgid ":code:`num_cpus` indicates the number of CPU cores a client would get." -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:74 +msgid "Title (same as in metadata)" +msgstr "Titre (le même que dans les métadonnées)" -#: ../../source/how-to-run-simulations.rst:68 -msgid "" -":code:`num_gpus` indicates the **ratio** of GPU memory a client gets " -"assigned." -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:75 +msgid "Table of Contents (if needed)" +msgstr "Table des matières (si nécessaire)" -#: ../../source/how-to-run-simulations.rst:70 -msgid "Let's see a few examples:" -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:81 +msgid "Notes/Constraints/Caveats (optional)" +msgstr "Notes/Contraintes/Cavats (facultatif)" -#: ../../source/how-to-run-simulations.rst:89 -msgid "" -"While the :code:`client_resources` can be used to control the degree of " -"concurrency in your FL simulation, this does not stop you from running " -"dozens, hundreds or even thousands of clients in the same round and " -"having orders of magnitude more `dormant` (i.e. not participating in a " -"round) clients. Let's say you want to have 100 clients per round but your" -" system can only accommodate 8 clients concurrently. The " -":code:`VirtualClientEngine` will schedule 100 jobs to run (each " -"simulating a client sampled by the strategy) and then will execute them " -"in a resource-aware manner in batches of 8." -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:82 +msgid "Design Details (optional)" +msgstr "Détails de la conception (facultatif)" -#: ../../source/how-to-run-simulations.rst:91 -msgid "" -"To understand all the intricate details on how resources are used to " -"schedule FL clients and how to define custom resources, please take a " -"look at the `Ray documentation `_." -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:83 +msgid "Graduation Criteria" +msgstr "Critères d'obtention du diplôme" -#: ../../source/how-to-run-simulations.rst:94 -#, fuzzy -msgid "Simulation examples" -msgstr "Exemples de PyTorch" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:84 +msgid "Upgrade/Downgrade Strategy (if applicable)" +msgstr "Stratégie de mise à niveau/rétrogradation (le cas échéant)" -#: ../../source/how-to-run-simulations.rst:96 -msgid "" -"A few ready-to-run complete examples for Flower simulation in " -"Tensorflow/Keras and PyTorch are provided in the `Flower repository " -"`_. You can run them on Google Colab too:" -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:88 +msgid "As a reference, this document follows the above structure." +msgstr "À titre de référence, ce document suit la structure ci-dessus." -#: ../../source/how-to-run-simulations.rst:98 -#, fuzzy -msgid "" -"`Tensorflow/Keras Simulation " -"`_: 100 clients collaboratively train a MLP model on MNIST." -msgstr "" -"`Quickstart TensorFlow (Code) " -"`_" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:90 +#: ../../source/ref-api/flwr.common.Metadata.rst:2 +msgid "Metadata" +msgstr "Métadonnées" -#: ../../source/how-to-run-simulations.rst:99 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:92 msgid "" -"`PyTorch Simulation `_: 100 clients collaboratively train a CNN model on " -"MNIST." +"**fed-number** (Required) The `fed-number` of the last Flower Enhancement" +" Doc + 1. With this number, it becomes easy to reference other proposals." msgstr "" +"**numérofed** (Obligatoire) Le `numérofed` du dernier document " +"d'amélioration de la fleur + 1. Avec ce numéro, il devient facile de " +"faire référence à d'autres propositions." -#: ../../source/how-to-run-simulations.rst:104 -#, fuzzy -msgid "Multi-node Flower simulations" -msgstr "Simulation de moniteur" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:94 +msgid "**title** (Required) The title of the proposal in plain language." +msgstr "**titre** (obligatoire) Le titre de la proposition en langage clair." -#: ../../source/how-to-run-simulations.rst:106 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:96 msgid "" -"Flower's :code:`VirtualClientEngine` allows you to run FL simulations " -"across multiple compute nodes. Before starting your multi-node simulation" -" ensure that you:" -msgstr "" - -#: ../../source/how-to-run-simulations.rst:108 -msgid "Have the same Python environment in all nodes." -msgstr "" - -#: ../../source/how-to-run-simulations.rst:109 -msgid "Have a copy of your code (e.g. your entire repo) in all nodes." +"**status** (Required) The current status of the proposal. See " +"[workflow](#workflow) for the possible states." msgstr "" +"**status** (obligatoire) L'état actuel de la proposition. Voir " +"[workflow](#workflow) pour les états possibles." -#: ../../source/how-to-run-simulations.rst:110 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:98 msgid "" -"Have a copy of your dataset in all nodes (more about this in " -":ref:`simulation considerations `)" +"**authors** (Required) A list of authors of the proposal. This is simply " +"the GitHub ID." msgstr "" +"**authors** (Obligatoire) Une liste des auteurs de la proposition, il " +"s'agit simplement de l'identifiant GitHub." -#: ../../source/how-to-run-simulations.rst:111 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:100 msgid "" -"Pass :code:`ray_init_args={\"address\"=\"auto\"}` to `start_simulation " -"`_ so the " -":code:`VirtualClientEngine` attaches to a running Ray instance." +"**creation-date** (Required) The date that the proposal was first " +"submitted in a PR." msgstr "" +"**creation-date** (Obligatoire) Date à laquelle la proposition a été " +"soumise pour la première fois dans un RP." -#: ../../source/how-to-run-simulations.rst:112 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:102 msgid "" -"Start Ray on you head node: on the terminal type :code:`ray start " -"--head`. This command will print a few lines, one of which indicates how " -"to attach other nodes to the head node." +"**last-updated** (Optional) The date that the proposal was last changed " +"significantly." msgstr "" +"**dernière mise à jour** (Facultatif) La date à laquelle la proposition a" +" été modifiée de manière significative pour la dernière fois." -#: ../../source/how-to-run-simulations.rst:113 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:104 msgid "" -"Attach other nodes to the head node: copy the command shown after " -"starting the head and execute it on terminal of a new node: for example " -":code:`ray start --address='192.168.1.132:6379'`" +"**see-also** (Optional) A list of other proposals that are relevant to " +"this one." msgstr "" +"**see-also** (Facultatif) Une liste d'autres propositions qui sont " +"pertinentes par rapport à celle-ci." -#: ../../source/how-to-run-simulations.rst:115 -msgid "" -"With all the above done, you can run your code from the head node as you " -"would if the simulation was running on a single node." -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:106 +msgid "**replaces** (Optional) A list of proposals that this one replaces." +msgstr "**replaces** (Facultatif) Une liste de propositions que celle-ci remplace." -#: ../../source/how-to-run-simulations.rst:117 -msgid "" -"Once your simulation is finished, if you'd like to dismantle your cluster" -" you simply need to run the command :code:`ray stop` in each node's " -"terminal (including the head node)." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:108 +msgid "**superseded-by** (Optional) A list of proposals that this one supersedes." msgstr "" +"**superseded-by** (Facultatif) Une liste de propositions que celle-ci " +"remplace." -#: ../../source/how-to-run-simulations.rst:120 -msgid "Multi-node simulation good-to-know" -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:111 +msgid "Workflow" +msgstr "Flux de travail" -#: ../../source/how-to-run-simulations.rst:122 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:113 msgid "" -"Here we list a few interesting functionality when running multi-node FL " -"simulations:" +"The idea forming the enhancement should already have been discussed or " +"pitched in the community. As such, it needs a champion, usually the " +"author, who shepherds the enhancement. This person also has to find " +"committers to Flower willing to review the proposal." msgstr "" +"L'idée à l'origine de l'amélioration doit déjà avoir fait l'objet d'une " +"discussion ou d'une présentation au sein de la communauté. À ce titre, " +"elle a besoin d'un champion, généralement l'auteur, qui se charge de " +"l'amélioration. Cette personne doit également trouver des committers to " +"Flower prêts à examiner la proposition." -#: ../../source/how-to-run-simulations.rst:124 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:115 msgid "" -"User :code:`ray status` to check all nodes connected to your head node as" -" well as the total resources available to the " -":code:`VirtualClientEngine`." -msgstr "" - -#: ../../source/how-to-run-simulations.rst:126 -msgid "" -"When attaching a new node to the head, all its resources (i.e. all CPUs, " -"all GPUs) will be visible by the head node. This means that the " -":code:`VirtualClientEngine` can schedule as many `virtual` clients as " -"that node can possible run. In some settings you might want to exclude " -"certain resources from the simulation. You can do this by appending " -"`--num-cpus=` and/or `--num-" -"gpus=` in any :code:`ray start` command (including " -"when starting the head)" +"New enhancements are checked in with a file name in the form of `NNNN-" +"YYYYMMDD-enhancement-title.md`, with `NNNN` being the Flower Enhancement " +"Doc number, to `enhancements`. All enhancements start in `provisional` " +"state as part of a pull request. Discussions are done as part of the pull" +" request review." msgstr "" +"Les nouvelles améliorations sont enregistrées avec un nom de fichier de " +"la forme `NNN-YYYMMDD-enhancement-title.md`, `NNNN` étant le numéro du " +"document d'amélioration de la fleur, dans `enhancements`. Toutes les " +"améliorations commencent à l'état `provisionnel` dans le cadre d'une " +"demande d'extraction. Les discussions sont effectuées dans le cadre de " +"l'examen de la demande d'extraction." -#: ../../source/how-to-run-simulations.rst:132 -#, fuzzy -msgid "Considerations for simulations" -msgstr "Simulation de moniteur" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:117 +msgid "" +"Once an enhancement has been reviewed and approved, its status is changed" +" to `implementable`. The actual implementation is then done in separate " +"pull requests. These pull requests should mention the respective " +"enhancement as part of their description. After the implementation is " +"done, the proposal status is changed to `implemented`." +msgstr "" +"Une fois qu'une amélioration a été examinée et approuvée, son statut " +"passe à `implémentable`. L'implémentation réelle est alors réalisée dans " +"des demandes d'extension séparées. Ces demandes d'extension doivent " +"mentionner l'amélioration concernée dans leur description. Une fois " +"l'implémentation réalisée, le statut de la proposition passe à " +"`implémented`." -#: ../../source/how-to-run-simulations.rst:135 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:119 msgid "" -"We are actively working on these fronts so to make it trivial to run any " -"FL workload with Flower simulation." +"Under certain conditions, other states are possible. An Enhancement has " +"the following states:" msgstr "" +"Sous certaines conditions, d'autres états sont possibles. Une " +"amélioration a les états suivants :" -#: ../../source/how-to-run-simulations.rst:138 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:121 msgid "" -"The current VCE allows you to run Federated Learning workloads in " -"simulation mode whether you are prototyping simple scenarios on your " -"personal laptop or you want to train a complex FL pipeline across " -"multiple high-performance GPU nodes. While we add more capabilities to " -"the VCE, the points below highlight some of the considerations to keep in" -" mind when designing your FL pipeline with Flower. We also highlight a " -"couple of current limitations in our implementation." +"`provisional`: The enhancement has been proposed and is actively being " +"defined. This is the starting state while the proposal is being fleshed " +"out and actively defined and discussed." msgstr "" +"`provisoire` : L'amélioration a été proposée et est en cours de " +"définition. C'est l'état de départ pendant que la proposition est étoffée" +" et activement définie et discutée." -#: ../../source/how-to-run-simulations.rst:141 -#, fuzzy -msgid "GPU resources" -msgstr "Ressources" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:122 +msgid "`implementable`: The enhancement has been reviewed and approved." +msgstr "`implementable` : L'amélioration a été examinée et approuvée." -#: ../../source/how-to-run-simulations.rst:143 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:123 msgid "" -"The VCE assigns a share of GPU memory to a client that specifies the key " -":code:`num_gpus` in :code:`client_resources`. This being said, Ray (used " -"internally by the VCE) is by default:" +"`implemented`: The enhancement has been implemented and is no longer " +"actively changed." +msgstr "" +"`implemented` : L'amélioration a été mise en œuvre et n'est plus " +"activement modifiée." + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:124 +msgid "`deferred`: The enhancement is proposed but not actively being worked on." msgstr "" +"`deferred` : L'amélioration est proposée mais n'est pas activement " +"travaillée." -#: ../../source/how-to-run-simulations.rst:146 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:125 msgid "" -"not aware of the total VRAM available on the GPUs. This means that if you" -" set :code:`num_gpus=0.5` and you have two GPUs in your system with " -"different (e.g. 32GB and 8GB) VRAM amounts, they both would run 2 clients" -" concurrently." +"`rejected`: The authors and reviewers have decided that this enhancement " +"is not moving forward." msgstr "" +"`rejeté` : Les auteurs et les réviseurs ont décidé que cette amélioration" +" n'allait pas de l'avant." -#: ../../source/how-to-run-simulations.rst:147 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:126 +msgid "`withdrawn`: The authors have withdrawn the enhancement." +msgstr "`withdrawn` : Les auteurs ont retiré l'amélioration." + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:127 +msgid "`replaced`: The enhancement has been replaced by a new enhancement." +msgstr "`replaced` : L'amélioration a été remplacée par une nouvelle amélioration." + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:131 msgid "" -"not aware of other unrelated (i.e. not created by the VCE) workloads are " -"running on the GPU. Two takeaways from this are:" +"Adding an additional process to the ones already provided by GitHub " +"(Issues and Pull Requests) adds more complexity and can be a barrier for " +"potential first-time contributors." msgstr "" +"L'ajout d'un processus supplémentaire à ceux déjà fournis par GitHub " +"(Issues et Pull Requests) ajoute plus de complexité et peut constituer un" +" obstacle pour les éventuels nouveaux contributeurs." -#: ../../source/how-to-run-simulations.rst:149 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:133 msgid "" -"Your Flower server might need a GPU to evaluate the `global model` after " -"aggregation (by instance when making use of the `evaluate method `_)" +"Expanding the proposal template beyond the single-sentence description " +"currently required in the features issue template may be a heavy burden " +"for non-native English speakers." msgstr "" +"Élargir le modèle de proposition au-delà de la description d'une seule " +"phrase actuellement requise dans le modèle de questions sur les " +"caractéristiques peut constituer une lourde charge pour les personnes " +"dont l'anglais n'est pas la langue maternelle." + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:137 +msgid "GitHub Issues" +msgstr "Questions sur GitHub" -#: ../../source/how-to-run-simulations.rst:150 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:139 msgid "" -"If you want to run several independent Flower simulations on the same " -"machine you need to mask-out your GPUs with " -":code:`CUDA_VISIBLE_DEVICES=\"\"` when launching your " -"experiment." +"Using GitHub Issues for these kinds of enhancements is doable. One could " +"use, for example, tags, to differentiate and filter them from other " +"issues. The main issue is in discussing and reviewing an enhancement: " +"GitHub issues only have a single thread for comments. Enhancements " +"usually have multiple threads of discussion at the same time for various " +"parts of the doc. Managing these multiple discussions can be confusing " +"when using GitHub Issues." msgstr "" +"Il est possible d'utiliser GitHub Issues pour ce type d'améliorations. On" +" pourrait utiliser, par exemple, des balises pour les différencier et les" +" filtrer par rapport aux autres problèmes. Le principal problème concerne" +" la discussion et la révision d'une amélioration : les GitHub Issues " +"n'ont qu'un seul fil de discussion pour les commentaires. Les " +"améliorations ont généralement plusieurs fils de discussion en même temps" +" pour différentes parties de la documentation. La gestion de ces " +"multiples discussions peut être déroutante lorsque l'on utilise GitHub " +"Issues." + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:141 +msgid "Google Docs" +msgstr "Google Docs" -#: ../../source/how-to-run-simulations.rst:153 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:143 msgid "" -"In addition, the GPU resource limits passed to :code:`client_resources` " -"are not `enforced` (i.e. they can be exceeded) which can result in the " -"situation of client using more VRAM than the ratio specified when " -"starting the simulation." +"Google Docs allow for multiple threads of discussions. But as Google Docs" +" are hosted outside the project, their discoverability by the community " +"needs to be taken care of. A list of links to all proposals has to be " +"managed and made available for the community. Compared to shipping " +"proposals as part of Flower's repository, the potential for missing links" +" is much higher." msgstr "" +"Les Google Docs permettent de multiplier les fils de discussion. Mais " +"comme les Google Docs sont hébergés en dehors du projet, il faut veiller " +"à ce que la communauté puisse les découvrir. Une liste de liens vers " +"toutes les propositions doit être gérée et mise à la disposition de la " +"communauté. Par rapport à l'envoi de propositions dans le cadre du " +"référentiel de Flower, le risque de liens manquants est beaucoup plus " +"élevé." + +#: ../../source/fed/index.md:1 +msgid "FED - Flower Enhancement Doc" +msgstr "FED - Doc pour l'amélioration des fleurs" -#: ../../source/how-to-run-simulations.rst:156 +#: ../../source/how-to-aggregate-evaluation-results.rst:2 #, fuzzy -msgid "TensorFlow with GPUs" -msgstr "Exemples de TensorFlow" +msgid "Aggregate evaluation results" +msgstr "Résultats globaux de l'évaluation." -#: ../../source/how-to-run-simulations.rst:158 +#: ../../source/how-to-aggregate-evaluation-results.rst:4 msgid "" -"When `using a GPU with TensorFlow " -"`_ nearly your entire GPU memory of" -" all your GPUs visible to the process will be mapped. This is done by " -"TensorFlow for optimization purposes. However, in settings such as FL " -"simulations where we want to split the GPU into multiple `virtual` " -"clients, this is not a desirable mechanism. Luckily we can disable this " -"default behavior by `enabling memory growth " -"`_." +"The Flower server does not prescribe a way to aggregate evaluation " +"results, but it enables the user to fully customize result aggregation." msgstr "" -#: ../../source/how-to-run-simulations.rst:160 +#: ../../source/how-to-aggregate-evaluation-results.rst:8 +msgid "Aggregate Custom Evaluation Results" +msgstr "Agréger les résultats de l'évaluation personnalisée" + +#: ../../source/how-to-aggregate-evaluation-results.rst:10 +#, fuzzy msgid "" -"This would need to be done in the main process (which is where the server" -" would run) and in each Actor created by the VCE. By means of " -":code:`actor_kwargs` we can pass the reserved key `\"on_actor_init_fn\"` " -"in order to specify a function to be executed upon actor initialization. " -"In this case, to enable GPU growth for TF workloads. It would look as " -"follows:" +"The same ``Strategy``-customization approach can be used to aggregate " +"custom evaluation results coming from individual clients. Clients can " +"return custom metrics to the server by returning a dictionary:" msgstr "" +"La même approche de personnalisation :code:`Stratégie` peut être utilisée" +" pour agréger les résultats d'évaluation personnalisés provenant de " +"clients individuels. Les clients peuvent renvoyer des mesures " +"personnalisées au serveur en renvoyant un dictionnaire :" -#: ../../source/how-to-run-simulations.rst:179 -#, fuzzy +#: ../../source/how-to-aggregate-evaluation-results.rst:39 msgid "" -"This is precisely the mechanism used in `Tensorflow/Keras Simulation " -"`_ example." +"The server can then use a customized strategy to aggregate the metrics " +"provided in these dictionaries:" msgstr "" -"`Quickstart TensorFlow (Code) " -"`_" +"Le serveur peut alors utiliser une stratégie personnalisée pour agréger " +"les mesures fournies dans ces dictionnaires :" -#: ../../source/how-to-run-simulations.rst:183 -msgid "Multi-node setups" +#: ../../source/how-to-authenticate-supernodes.rst:2 +msgid "Authenticate SuperNodes" msgstr "" -#: ../../source/how-to-run-simulations.rst:185 +#: ../../source/how-to-authenticate-supernodes.rst:4 msgid "" -"The VCE does not currently offer a way to control on which node a " -"particular `virtual` client is executed. In other words, if more than a " -"single node have the resources needed by a client to run, then any of " -"those nodes could get the client workload scheduled onto. Later in the FL" -" process (i.e. in a different round) the same client could be executed by" -" a different node. Depending on how your clients access their datasets, " -"this might require either having a copy of all dataset partitions on all " -"nodes or a dataset serving mechanism (e.g. using nfs, a database) to " -"circumvent data duplication." +"Flower has built-in support for authenticated SuperNodes that you can use" +" to verify the identities of each SuperNode connecting to a SuperLink. " +"Flower node authentication works similar to how GitHub SSH authentication" +" works:" msgstr "" -#: ../../source/how-to-run-simulations.rst:187 -msgid "" -"By definition virtual clients are `stateless` due to their ephemeral " -"nature. A client state can be implemented as part of the Flower client " -"class but users need to ensure this saved to persistent storage (e.g. a " -"database, disk) and that can be retrieve later by the same client " -"regardless on which node it is running from. This is related to the point" -" above also since, in some way, the client's dataset could be seen as a " -"type of `state`." +#: ../../source/how-to-authenticate-supernodes.rst:8 +msgid "SuperLink (server) stores a list of known (client) node public keys" msgstr "" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:2 -#, fuzzy -msgid "Save and load model checkpoints" -msgstr "Sauvegarde et chargement des points de contrôle PyTorch" +#: ../../source/how-to-authenticate-supernodes.rst:9 +msgid "" +"Using ECDH, both SuperNode and SuperLink independently derive a shared " +"secret" +msgstr "" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:4 +#: ../../source/how-to-authenticate-supernodes.rst:10 msgid "" -"Flower does not automatically save model updates on the server-side. This" -" how-to guide describes the steps to save (and load) model checkpoints in" -" Flower." +"Shared secret is used to compute the HMAC value of the message sent from " +"SuperNode to SuperLink as a token" msgstr "" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:8 +#: ../../source/how-to-authenticate-supernodes.rst:12 +msgid "SuperLink verifies the token" +msgstr "" + +#: ../../source/how-to-authenticate-supernodes.rst:14 #, fuzzy -msgid "Model checkpointing" -msgstr "Point de contrôle du modèle" +msgid "" +"We recommend you to check out the complete `code example " +"`_ demonstrating federated learning with Flower in an " +"authenticated setting." +msgstr "" +"Réfère-toi à l'exemple de code complet " +"`_ " +"pour en savoir plus." -#: ../../source/how-to-save-and-load-model-checkpoints.rst:10 +#: ../../source/how-to-authenticate-supernodes.rst:20 msgid "" -"Model updates can be persisted on the server-side by customizing " -":code:`Strategy` methods. Implementing custom strategies is always an " -"option, but for many cases it may be more convenient to simply customize " -"an existing strategy. The following code example defines a new " -":code:`SaveModelStrategy` which customized the existing built-in " -":code:`FedAvg` strategy. In particular, it customizes " -":code:`aggregate_fit` by calling :code:`aggregate_fit` in the base class " -"(:code:`FedAvg`). It then continues to save returned (aggregated) weights" -" before it returns those aggregated weights to the caller (i.e., the " -"server):" +"This guide covers a preview feature that might change in future versions " +"of Flower." msgstr "" -"Les mises à jour du modèle peuvent être conservées côté serveur en " -"personnalisant les méthodes :code:`Strategy`. L'implémentation de " -"stratégies personnalisées est toujours possible, mais dans de nombreux " -"cas, il peut être plus pratique de simplement personnaliser une stratégie" -" existante. L'exemple de code suivant définit une nouvelle " -":code:`SaveModelStrategy` qui personnalise la stratégie intégrée " -":code:`FedAvg` existante. En particulier, il personnalise " -":code:`aggregate_fit` en appelant :code:`aggregate_fit` dans la classe de" -" base (:code:`FedAvg`). Il continue ensuite à sauvegarder les poids " -"retournés (agrégés) avant de renvoyer ces poids agrégés à l'appelant " -"(c'est-à-dire le serveur) :" - -#: ../../source/how-to-save-and-load-model-checkpoints.rst:47 -#, fuzzy -msgid "Save and load PyTorch checkpoints" -msgstr "Sauvegarde et chargement des points de contrôle PyTorch" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:49 -#, fuzzy +#: ../../source/how-to-authenticate-supernodes.rst:24 msgid "" -"Similar to the previous example but with a few extra steps, we'll show " -"how to store a PyTorch checkpoint we'll use the ``torch.save`` function. " -"Firstly, ``aggregate_fit`` returns a ``Parameters`` object that has to be" -" transformed into a list of NumPy ``ndarray``'s, then those are " -"transformed into the PyTorch ``state_dict`` following the ``OrderedDict``" -" class structure." +"For increased security, node authentication can only be used when " +"encrypted connections (SSL/TLS) are enabled." msgstr "" -"Comme dans l'exemple précédent, mais avec quelques étapes " -"supplémentaires, nous allons montrer comment stocker un point de contrôle" -" PyTorch en utilisant la fonction ``torch.save``. Tout d'abord, " -"``aggregate_fit`` renvoie un objet ``Parameters`` qui doit être " -"transformé en une liste de `ndarray`` NumPy, puis ceux-ci sont " -"transformés en ``state_dict`` PyTorch en suivant la structure de la " -"classe ``OrderedDict``." -#: ../../source/how-to-save-and-load-model-checkpoints.rst:85 -msgid "" -"To load your progress, you simply append the following lines to your " -"code. Note that this will iterate over all saved checkpoints and load the" -" latest one:" +#: ../../source/how-to-authenticate-supernodes.rst:28 +msgid "Enable node authentication in ``SuperLink``" msgstr "" -"Pour charger ta progression, il te suffit d'ajouter les lignes suivantes " -"à ton code. Note que cela va itérer sur tous les points de contrôle " -"sauvegardés et charger le plus récent :" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:97 +#: ../../source/how-to-authenticate-supernodes.rst:30 msgid "" -"Return/use this object of type ``Parameters`` wherever necessary, such as" -" in the ``initial_parameters`` when defining a ``Strategy``." +"To enable node authentication, first you need to configure SSL/TLS " +"connections to secure the SuperLink<>SuperNode communication. You can " +"find the complete guide `here `_. After configuring secure connections, you" +" can enable client authentication in a long-running Flower ``SuperLink``." +" Use the following terminal command to start a Flower ``SuperNode`` that " +"has both secure connections and node authentication enabled:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:2 -msgid "Upgrade to Flower 1.0" -msgstr "Passe à Flower 1.0" +#: ../../source/how-to-authenticate-supernodes.rst:47 +msgid "Let's break down the authentication flags:" +msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:4 +#: ../../source/how-to-authenticate-supernodes.rst:49 msgid "" -"Flower 1.0 is here. Along with new features, Flower 1.0 provides a stable" -" foundation for future growth. Compared to Flower 0.19 (and other 0.x " -"series releases), there are a few breaking changes that make it necessary" -" to change the code of existing 0.x-series projects." +"The first flag ``--auth-list-public-keys`` expects a path to a CSV file " +"storing all known node public keys. You need to store all known node " +"public keys that are allowed to participate in a federation in one CSV " +"file (``.csv``)." msgstr "" -"Flower 1.0 est arrivé. En plus de nouvelles fonctionnalités, Flower 1.0 " -"fournit une base stable pour la croissance future. Par rapport à Flower " -"0.19 (et aux autres versions de la série 0.x), il y a quelques " -"changements qui nécessitent de modifier le code des projets de la série " -"0.x existants." - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:8 -#: ../../source/how-to-upgrade-to-flower-next.rst:43 -msgid "Install update" -msgstr "Installer la mise à jour" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:10 +#: ../../source/how-to-authenticate-supernodes.rst:53 msgid "" -"Here's how to update an existing installation to Flower 1.0 using either " -"pip or Poetry:" +"A valid CSV file storing known node public keys should list the keys in " +"OpenSSH format, separated by commas and without any comments. For an " +"example, refer to our code sample, which contains a CSV file with two " +"known node public keys." msgstr "" -"Voici comment mettre à jour une installation existante vers Flower 1.0 en" -" utilisant soit pip soit Poetry :" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:12 -msgid "pip: add ``-U`` when installing." -msgstr "pip : ajoute ``-U`` lors de l'installation." +#: ../../source/how-to-authenticate-supernodes.rst:57 +msgid "" +"The second and third flags ``--auth-superlink-private-key`` and ``--auth-" +"superlink-public-key`` expect paths to the server's private and public " +"keys. For development purposes, you can generate a private and public key" +" pair using ``ssh-keygen -t ecdsa -b 384``." +msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:14 +#: ../../source/how-to-authenticate-supernodes.rst:64 msgid "" -"``python -m pip install -U flwr`` (when using ``start_server`` and " -"``start_client``)" +"In Flower 1.9, there is no support for dynamically removing, editing, or " +"adding known node public keys to the SuperLink. To change the set of " +"known nodes, you need to shut the server down, edit the CSV file, and " +"start the server again. Support for dynamically changing the set of known" +" nodes is on the roadmap to be released in Flower 1.10 (ETA: June)." msgstr "" -"``python -m pip install -U flwr`` (lors de l'utilisation de " -"``start_server`` et ``start_client``)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:15 +#: ../../source/how-to-authenticate-supernodes.rst:71 +msgid "Enable node authentication in ``SuperNode``" +msgstr "" + +#: ../../source/how-to-authenticate-supernodes.rst:73 msgid "" -"``python -m pip install -U flwr[simulation]`` (when using " -"``start_simulation``)" +"Similar to the long-running Flower server (``SuperLink``), you can easily" +" enable node authentication in the long-running Flower client " +"(``SuperNode``). Use the following terminal command to start an " +"authenticated ``SuperNode``:" msgstr "" -"``python -m pip install -U flwr[simulation]`` (lors de l'utilisation de " -"``start_simulation``)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:17 +#: ../../source/how-to-authenticate-supernodes.rst:85 msgid "" -"Poetry: update the ``flwr`` dependency in ``pyproject.toml`` and then " -"reinstall (don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` " -"before running ``poetry install``)." +"The ``--auth-supernode-private-key`` flag expects a path to the node's " +"private key file and the ``--auth-supernode-public-key`` flag expects a " +"path to the node's public key file. For development purposes, you can " +"generate a private and public key pair using ``ssh-keygen -t ecdsa -b " +"384``." msgstr "" -"Poetry : mettez à jour la dépendance ``flwr`` dans ``pyproject.toml`` " -"puis réinstallez (n'oubliez pas de supprimer ``poetry.lock`` via ``rm " -"poetry.lock`` avant d'exécuter ``poetry install``)." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:19 -msgid "``flwr = \"^1.0.0\"`` (when using ``start_server`` and ``start_client``)" +#: ../../source/how-to-authenticate-supernodes.rst:91 +msgid "Security notice" msgstr "" -"``flwr = \"^1.0.0\"`` (lors de l'utilisation de ``start_server`` et " -"``start_client``)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:20 +#: ../../source/how-to-authenticate-supernodes.rst:93 msgid "" -"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` (when " -"using ``start_simulation``)" +"The system's security relies on the credentials of the SuperLink and each" +" SuperNode. Therefore, it is imperative to safeguard and safely store the" +" credentials to avoid security risks such as Public Key Infrastructure " +"(PKI) impersonation attacks. The node authentication mechanism also " +"involves human interaction, so please ensure that all of the " +"communication is done in a secure manner, using trusted communication " +"methods." msgstr "" -"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` (lors de " -"l'utilisation de ``start_simulation``)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:24 -#: ../../source/how-to-upgrade-to-flower-next.rst:100 -msgid "Required changes" -msgstr "Changements nécessaires" +#: ../../source/how-to-authenticate-supernodes.rst:100 +#: ../../source/how-to-enable-ssl-connections.rst:71 +#: ../../source/how-to-use-built-in-mods.rst:95 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:287 +msgid "Conclusion" +msgstr "Conclusion" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:26 -msgid "The following breaking changes require manual updates." +#: ../../source/how-to-authenticate-supernodes.rst:102 +msgid "" +"You should now have learned how to start a long-running Flower server " +"(``SuperLink``) and client (``SuperNode``) with node authentication " +"enabled. You should also know the significance of the private key and " +"store it safely to minimize security risks." msgstr "" -"Les changements de rupture suivants nécessitent des mises à jour " -"manuelles." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:29 -msgid "General" -msgstr "Généralités" +#: ../../source/how-to-configure-clients.rst:2 +#, fuzzy +msgid "Configure clients" +msgstr "Configurer les clients" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:31 +#: ../../source/how-to-configure-clients.rst:4 msgid "" -"Pass all arguments as keyword arguments (not as positional arguments). " -"Here's an example:" +"Along with model parameters, Flower can send configuration values to " +"clients. Configuration values can be used for various purposes. They are," +" for example, a popular way to control client-side hyperparameters from " +"the server." msgstr "" -"Passe tous les arguments comme des arguments de mots-clés (et non comme " -"des arguments de position). Voici un exemple :" +"En plus des paramètres du modèle, Flower peut envoyer des valeurs de " +"configuration aux clients. Les valeurs de configuration peuvent être " +"utilisées à diverses fins. Elles constituent, par exemple, un moyen " +"populaire de contrôler les hyperparamètres côté client à partir du " +"serveur." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:33 +#: ../../source/how-to-configure-clients.rst:9 +msgid "Configuration values" +msgstr "Valeurs de configuration" + +#: ../../source/how-to-configure-clients.rst:11 msgid "" -"Flower 0.19 (positional arguments): ``start_client(\"127.0.0.1:8080\", " -"FlowerClient())``" +"Configuration values are represented as a dictionary with ``str`` keys " +"and values of type ``bool``, ``bytes``, ``double`` (64-bit precision " +"float), ``int``, or ``str`` (or equivalent types in different languages)." +" Here is an example of a configuration dictionary in Python:" msgstr "" -"Flower 0.19 (arguments positionnels) : ``start_client(\"127.0.0.1:8080\"," -" FlowerClient())``" +"Les valeurs de configuration sont représentées sous forme de dictionnaire" +" avec des clés `str`` et des valeurs de type `bool`, `bytes`, `double` " +"(float de précision 64 bits), `int`, ou `str` (ou des types équivalents " +"dans d'autres langages). Voici un exemple de dictionnaire de " +"configuration en Python :" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:34 +#: ../../source/how-to-configure-clients.rst:25 msgid "" -"Flower 1.0 (keyword arguments): " -"``start_client(server_address=\"127.0.0.1:8080\", " -"client=FlowerClient())``" +"Flower serializes these configuration dictionaries (or *config dict* for " +"short) to their ProtoBuf representation, transports them to the client " +"using gRPC, and then deserializes them back to Python dictionaries." msgstr "" -"Fleur 1.0 (arguments de mots-clés) : " -"``start_client(server_address=\"127.0.0.1:8080\", " -"client=FlowerClient())``" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:37 -#: ../../source/ref-api/flwr.client.Client.rst:2 -msgid "Client" -msgstr "Client" +"Flower sérialise ces dictionnaires de configuration (ou *config dict* en " +"abrégé) dans leur représentation ProtoBuf, les transporte vers le client " +"à l'aide de gRPC, puis les désérialise à nouveau en dictionnaires Python." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:39 +#: ../../source/how-to-configure-clients.rst:31 msgid "" -"Subclasses of ``NumPyClient``: change ``def get_parameters(self):``` to " -"``def get_parameters(self, config):``" +"Currently, there is no support for directly sending collection types " +"(e.g., ``Set``, ``List``, ``Map``) as values in configuration " +"dictionaries. There are several workarounds to send collections as values" +" by converting them to one of the supported value types (and converting " +"them back on the client-side)." msgstr "" -"Sous-classes de ``NumPyClient`` : changez ``def get_parameters(self):`` " -"en ``def get_parameters(self, config):``" +"Actuellement, il n'est pas possible d'envoyer directement des types de " +"collections (par exemple, ``Set``, ``List``, ``Map``) en tant que valeurs" +" dans les dictionnaires de configuration. Il existe plusieurs solutions " +"pour envoyer des collections en tant que valeurs en les convertissant en " +"l'un des types de valeurs pris en charge (et en les reconvertissant du " +"côté client)." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:40 +#: ../../source/how-to-configure-clients.rst:36 msgid "" -"Subclasses of ``Client``: change ``def get_parameters(self):``` to ``def " -"get_parameters(self, ins: GetParametersIns):``" +"One can, for example, convert a list of floating-point numbers to a JSON " +"string, then send the JSON string using the configuration dictionary, and" +" then convert the JSON string back to a list of floating-point numbers on" +" the client." msgstr "" -"Sous-classes de ``Client`` : changez ``def get_parameters(self):`` en " -"``def get_parameters(self, ins : GetParametersIns):``" +"On peut, par exemple, convertir une liste de nombres à virgule flottante " +"en une chaîne JSON, puis envoyer la chaîne JSON à l'aide du dictionnaire " +"de configuration, et enfin reconvertir la chaîne JSON en une liste de " +"nombres à virgule flottante sur le client." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:43 -msgid "Strategies / ``start_server`` / ``start_simulation``" -msgstr "Stratégies / ``démarrer_serveur`` / ``démarrer_simulation``" +#: ../../source/how-to-configure-clients.rst:41 +msgid "Configuration through built-in strategies" +msgstr "Configuration par le biais de stratégies intégrées" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:45 +#: ../../source/how-to-configure-clients.rst:43 +#, fuzzy msgid "" -"Pass ``ServerConfig`` (instead of a dictionary) to ``start_server`` and " -"``start_simulation``. Here's an example:" +"The easiest way to send configuration values to clients is to use a " +"built-in strategy like ``FedAvg``. Built-in strategies support so-called " +"configuration functions. A configuration function is a function that the " +"built-in strategy calls to get the configuration dictionary for the " +"current round. It then forwards the configuration dictionary to all the " +"clients selected during that round." msgstr "" -"Passez ``ServerConfig`` (au lieu d'un dictionnaire) à ``start_server`` et" -" ``start_simulation``. Voici un exemple :" +"La façon la plus simple d'envoyer des valeurs de configuration aux " +"clients est d'utiliser une stratégie intégrée comme :code:`FedAvg`. Les " +"stratégies intégrées prennent en charge ce que l'on appelle les fonctions" +" de configuration. Une fonction de configuration est une fonction que la " +"stratégie intégrée appelle pour obtenir le dictionnaire de configuration " +"pour le tour en cours. Elle transmet ensuite le dictionnaire de " +"configuration à tous les clients sélectionnés au cours de ce tour." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:47 +#: ../../source/how-to-configure-clients.rst:49 msgid "" -"Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " -"\"round_timeout\": 600.0}, ...)``" +"Let's start with a simple example. Imagine we want to send (a) the batch " +"size that the client should use, (b) the current global round of " +"federated learning, and (c) the number of epochs to train on the client-" +"side. Our configuration function could look like this:" msgstr "" -"Flower 0.19 : ``start_server(..., config={\"num_rounds\" : 3, " -"\"round_timeout\" : 600.0}, ...)``" +"Commençons par un exemple simple. Imaginons que nous voulions envoyer (a)" +" la taille du lot que le client doit utiliser, (b) le cycle global actuel" +" de l'apprentissage fédéré et (c) le nombre d'époques à former du côté " +"client. Notre fonction de configuration pourrait ressembler à ceci :" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:48 +#: ../../source/how-to-configure-clients.rst:65 +#, fuzzy msgid "" -"Flower 1.0: ``start_server(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" +"To make the built-in strategies use this function, we can pass it to " +"``FedAvg`` during initialization using the parameter " +"``on_fit_config_fn``:" msgstr "" -"Flower 1.0 : ``start_server(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" +"Pour que les stratégies intégrées utilisent cette fonction, nous pouvons " +"la passer à ``FedAvg`` lors de l'initialisation en utilisant le paramètre" +" :code:`on_fit_config_fn` :" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:50 +#: ../../source/how-to-configure-clients.rst:75 +msgid "One the client side, we receive the configuration dictionary in ``fit``:" +msgstr "Côté client, nous recevons le dictionnaire de configuration dans ``fit`` :" + +#: ../../source/how-to-configure-clients.rst:86 msgid "" -"Replace ``num_rounds=1`` in ``start_simulation`` with the new " -"``config=ServerConfig(...)`` (see previous item)" +"There is also an `on_evaluate_config_fn` to configure evaluation, which " +"works the same way. They are separate functions because one might want to" +" send different configuration values to `evaluate` (for example, to use a" +" different batch size)." msgstr "" -"Remplacer ``num_rounds=1`` dans ``start_simulation`` par le nouveau " -"``config=ServerConfig(...)`` (voir point précédent)" +"Il existe également une fonction `on_evaluate_config_fn` pour configurer " +"l'évaluation, qui fonctionne de la même manière. Ce sont des fonctions " +"séparées car on peut vouloir envoyer différentes valeurs de configuration" +" à `evaluate` (par exemple, pour utiliser une taille de lot différente)." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:51 +#: ../../source/how-to-configure-clients.rst:90 msgid "" -"Remove ``force_final_distributed_eval`` parameter from calls to " -"``start_server``. Distributed evaluation on all clients can be enabled by" -" configuring the strategy to sample all clients for evaluation after the " -"last round of training." +"The built-in strategies call this function every round (that is, every " +"time `Strategy.configure_fit` or `Strategy.configure_evaluate` runs). " +"Calling `on_evaluate_config_fn` every round allows us to vary/change the " +"config dict over consecutive rounds. If we wanted to implement a " +"hyperparameter schedule, for example, to increase the number of local " +"epochs during later rounds, we could do the following:" msgstr "" -"Supprime le paramètre ``force_final_distributed_eval`` des appels à " -"``start_server``. L'évaluation distribuée sur tous les clients peut être " -"activée en configurant la stratégie pour échantillonner tous les clients " -"pour l'évaluation après le dernier tour de formation." - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:52 -msgid "Rename parameter/ndarray conversion functions:" -msgstr "Renomme les fonctions de conversion des paramètres et des tableaux :" +"Les stratégies intégrées appellent cette fonction à chaque tour " +"(c'est-à-dire à chaque fois que `Strategy.configure_fit` ou " +"`Strategy.configure_evaluate` s'exécute). Appeler `on_evaluate_config_fn`" +" à chaque tour nous permet de varier/changer le dict de config au cours " +"de tours consécutifs. Si nous voulions mettre en place un calendrier " +"d'hyperparamètres, par exemple, pour augmenter le nombre d'époques " +"locales au cours des derniers tours, nous pourrions faire ce qui suit :" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:54 -msgid "``parameters_to_weights`` --> ``parameters_to_ndarrays``" -msgstr "``parameters_to_weights`` --> ``parameters_to_ndarrays``" +#: ../../source/how-to-configure-clients.rst:107 +#, fuzzy +msgid "The ``FedAvg`` strategy will call this function *every round*." +msgstr "La stratégie :code:`FedAvg` appellera cette fonction *à chaque tour*." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:55 -msgid "``weights_to_parameters`` --> ``ndarrays_to_parameters``" -msgstr "``Poids_à_paramètres`` --> ``Réseaux_à_paramètres``" +#: ../../source/how-to-configure-clients.rst:110 +msgid "Configuring individual clients" +msgstr "Configuration des clients individuels" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:57 +#: ../../source/how-to-configure-clients.rst:112 msgid "" -"Strategy initialization: if the strategy relies on the default values for" -" ``fraction_fit`` and ``fraction_evaluate``, set ``fraction_fit`` and " -"``fraction_evaluate`` manually to ``0.1``. Projects that do not manually " -"create a strategy (by calling ``start_server`` or ``start_simulation`` " -"without passing a strategy instance) should now manually initialize " -"FedAvg with ``fraction_fit`` and ``fraction_evaluate`` set to ``0.1``." +"In some cases, it is necessary to send different configuration values to " +"different clients." msgstr "" -"Initialisation de la stratégie : si la stratégie repose sur les valeurs " -"par défaut de ``fraction_fit`` et ``fraction_evaluate``, fixer " -"manuellement ``fraction_fit`` et ``fraction_evaluate`` à `0.1``. Les " -"projets qui ne créent pas manuellement une stratégie (en appelant " -"``start_server` ou ``start_simulation`` sans passer une instance de " -"stratégie) doivent maintenant initialiser manuellement FedAvg avec " -"``fraction_fit`` et ``fraction_evaluate`` fixés à ``0.1``." - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:58 -msgid "Rename built-in strategy parameters (e.g., ``FedAvg``):" -msgstr "Renommer les paramètres de stratégie intégrés (par exemple, ``FedAvg``) :" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:60 -msgid "``fraction_eval`` --> ``fraction_evaluate``" -msgstr "``fraction_eval`` --> ``fraction_evaluate``" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:61 -msgid "``min_eval_clients`` --> ``min_evaluate_clients``" -msgstr "``min_eval_clients` --> ``min_evaluate_clients``" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:62 -msgid "``eval_fn`` --> ``evaluate_fn``" -msgstr "``eval_fn`` --> ``evaluate_fn``" +"Dans certains cas, il est nécessaire d'envoyer des valeurs de " +"configuration différentes à des clients différents." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:64 +#: ../../source/how-to-configure-clients.rst:115 +#, fuzzy msgid "" -"Rename ``rnd`` to ``server_round``. This impacts multiple methods and " -"functions, for example, ``configure_fit``, ``aggregate_fit``, " -"``configure_evaluate``, ``aggregate_evaluate``, and ``evaluate_fn``." +"This can be achieved by customizing an existing strategy or by " +":doc:`implementing a custom strategy from scratch `. Here's a nonsensical example that customizes ``FedAvg`` by " +"adding a custom ``\"hello\": \"world\"`` configuration key/value pair to " +"the config dict of a *single client* (only the first client in the list, " +"the other clients in this round to not receive this \"special\" config " +"value):" msgstr "" -"Renommez ``rnd`` en ``server_round``. Cela a un impact sur plusieurs " -"méthodes et fonctions, par exemple, ``configure_fit``, ``aggregate_fit``," -" ``configure_evaluate``, ``aggregate_evaluate``, et ``evaluate_fn``." - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:65 -msgid "Add ``server_round`` and ``config`` to ``evaluate_fn``:" -msgstr "Ajoute ``server_round`` et ``config`` à `evaluate_fn`` :" +"Ceci peut être réalisé en personnalisant une stratégie existante ou en " +"`mettant en œuvre une stratégie personnalisée à partir de zéro " +"`_. " +"Voici un exemple absurde qui personnalise :code:`FedAvg` en ajoutant une " +"paire clé/valeur de configuration personnalisée ``\"hello\" : \"world\"``" +" au config dict d'un *seul client* (uniquement le premier client de la " +"liste, les autres clients de cette série ne recevant pas cette valeur de " +"configuration \"spéciale\") :" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:67 -msgid "" -"Flower 0.19: ``def evaluate(parameters: NDArrays) -> " -"Optional[Tuple[float, Dict[str, Scalar]]]:``" -msgstr "" -"Flower 0.19 : ``def evaluate(parameters : NDArrays) -> " -"Optional[Tuple[float, Dict[str, Scalar]]]]:``" +#: ../../source/how-to-configure-logging.rst:2 +#, fuzzy +msgid "Configure logging" +msgstr "Configurer les clients" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:68 +#: ../../source/how-to-configure-logging.rst:4 msgid "" -"Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, " -"config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " -"Scalar]]]:``" +"The Flower logger keeps track of all core events that take place in " +"federated learning workloads. It presents information by default " +"following a standard message format:" msgstr "" -"Flower 1.0 : ``def evaluate(server_round : int, parameters : NDArrays, " -"config : Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " -"Scalar]]]:``" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:71 -msgid "Custom strategies" -msgstr "Stratégies personnalisées" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:73 +#: ../../source/how-to-configure-logging.rst:13 msgid "" -"The type of parameter ``failures`` has changed from " -"``List[BaseException]`` to ``List[Union[Tuple[ClientProxy, FitRes], " -"BaseException]]`` (in ``aggregate_fit``) and " -"``List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]]`` (in " -"``aggregate_evaluate``)" +"containing relevant information including: log message level (e.g. " +"``INFO``, ``DEBUG``), a timestamp, the line where the logging took place " +"from, as well as the log message itself. In this way, the logger would " +"typically display information on your terminal as follows:" msgstr "" -"Le type du paramètre ``failures`` a changé de ``List[BaseException]`` à " -"``List[Union[Tuple[ClientProxy, FitRes], BaseException]]`` (dans " -"``aggregate_fit``) et ``List[Union[Tuple[ClientProxy, EvaluateRes], " -"BaseException]]`` (dans ``aggregate_evaluate``)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:74 -msgid "" -"The ``Strategy`` method ``evaluate`` now receives the current round of " -"federated learning/evaluation as the first parameter:" +#: ../../source/how-to-configure-logging.rst:35 +msgid "Saving log to file" msgstr "" -"La méthode ``Stratégie`` `évaluer`` reçoit maintenant le cycle actuel " -"d'apprentissage/évaluation fédéré comme premier paramètre :" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:76 +#: ../../source/how-to-configure-logging.rst:37 msgid "" -"Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " -"Optional[Tuple[float, Dict[str, Scalar]]]:``" +"By default, the Flower log is outputted to the terminal where you launch " +"your Federated Learning workload from. This applies for both gRPC-based " +"federation (i.e. when you do ``fl.server.start_server``) and when using " +"the ``VirtualClientEngine`` (i.e. when you do " +"``fl.simulation.start_simulation``). In some situations you might want to" +" save this log to disk. You can do so by calling the " +"`fl.common.logger.configure() " +"`_" +" function. For example:" msgstr "" -"Flower 0.19 : ``def evaluate(self, parameters : Parameters) -> " -"Optional[Tuple[float, Dict[str, Scalar]]]]:``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:77 +#: ../../source/how-to-configure-logging.rst:59 +#, fuzzy msgid "" -"Flower 1.0: ``def evaluate(self, server_round: int, parameters: " -"Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" +"With the above, Flower will record the log you see on your terminal to " +"``log.txt``. This file will be created in the same directory as were you " +"are running the code from. If we inspect we see the log above is also " +"recorded but prefixing with ``identifier`` each line:" msgstr "" -"Flower 1.0 : ``def evaluate(self, server_round : int, parameters : " -"Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]]:``" +"Avec ce qui précède, Flower enregistrera le log que vous voyez sur votre " +"terminal dans :code:`log.txt`. Ce fichier sera créé dans le répertoire " +"depuis lequel le code est exécuté. Si nous inspectons nous voyons que le " +"log ci-dessous est également enregistré mais préfixé avec " +":code:`identifier` sur chaque ligne :" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:80 -msgid "Optional improvements" -msgstr "Améliorations facultatives" +#: ../../source/how-to-configure-logging.rst:81 +msgid "Log your own messages" +msgstr "Loggez vos propres messages" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:82 +#: ../../source/how-to-configure-logging.rst:83 msgid "" -"Along with the necessary changes above, there are a number of potential " -"improvements that just became possible:" +"You might expand the information shown by default with the Flower logger " +"by adding more messages relevant to your application. You can achieve " +"this easily as follows." msgstr "" -"En plus des changements nécessaires mentionnés ci-dessus, il existe un " -"certain nombre d'améliorations potentielles qui viennent d'être rendues " -"possibles :" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:84 +#: ../../source/how-to-configure-logging.rst:114 msgid "" -"Remove \"placeholder\" methods from subclasses of ``Client`` or " -"``NumPyClient``. If you, for example, use server-side evaluation, then " -"empty placeholder implementations of ``evaluate`` are no longer " -"necessary." +"In this way your logger will show, in addition to the default messages, " +"the ones introduced by the clients as specified above." msgstr "" -"Supprime les méthodes \"placeholder\" des sous-classes de ``Client`` ou " -"de ``NumPyClient``. Si tu utilises, par exemple, l'évaluation côté " -"serveur, alors les implémentations \"placeholder\" de ``evaluate`` ne " -"sont plus nécessaires." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:85 -msgid "" -"Configure the round timeout via ``start_simulation``: " -"``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, " -"round_timeout=600.0), ...)``" +#: ../../source/how-to-configure-logging.rst:140 +msgid "Log to a remote service" msgstr "" -"Configurez le délai d'attente de la ronde via ``start_simulation`` : " -"``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, " -"round_timeout=600.0), ...)``" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:89 -#: ../../source/how-to-upgrade-to-flower-next.rst:317 -msgid "Further help" -msgstr "Aide supplémentaire" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:91 +#: ../../source/how-to-configure-logging.rst:142 msgid "" -"Most official `Flower code examples " -"`_ are already updated" -" to Flower 1.0, they can serve as a reference for using the Flower 1.0 " -"API. If there are further questions, `join the Flower Slack " -"`_ and use the channel ``#questions``." +"The ``fl.common.logger.configure`` function, also allows specifying a " +"host to which logs can be pushed (via ``POST``) through a native Python " +"``logging.handler.HTTPHandler``. This is a particularly useful feature in" +" ``gRPC``-based Federated Learning workloads where otherwise gathering " +"logs from all entities (i.e. the server and the clients) might be " +"cumbersome. Note that in Flower simulation, the server automatically " +"displays all logs. You can still specify a ``HTTPHandler`` should you " +"wish to backup or analyze the logs somewhere else." msgstr "" -"La plupart des `exemples de code Flower officiels " -"`_ sont déjà mis à " -"jour vers Flower 1.0, ils peuvent servir de référence pour l'utilisation " -"de l'API Flower 1.0. Si vous avez d'autres questions, `joins le Slack " -"Flower `_ et utilise le canal " -"``#questions``." -#: ../../source/how-to-upgrade-to-flower-next.rst:2 +#: ../../source/how-to-enable-ssl-connections.rst:2 #, fuzzy -msgid "Upgrade to Flower Next" -msgstr "Passe à Flower 1.0" +msgid "Enable SSL connections" +msgstr "Collecte centralisée des données" -#: ../../source/how-to-upgrade-to-flower-next.rst:4 +#: ../../source/how-to-enable-ssl-connections.rst:4 +#, fuzzy msgid "" -"Welcome to the migration guide for updating Flower to Flower Next! " -"Whether you're a seasoned user or just getting started, this guide will " -"help you smoothly transition your existing setup to take advantage of the" -" latest features and improvements in Flower Next, starting from version " -"1.8." +"This guide describes how to a SSL-enabled secure Flower server " +"(``SuperLink``) can be started and how a Flower client (``SuperNode``) " +"can establish a secure connections to it." msgstr "" +"Ce guide décrit comment démarrer un serveur Flower sécurisé par SSL et " +"comment un client Flower peut établir une connexion sécurisée avec lui." -#: ../../source/how-to-upgrade-to-flower-next.rst:9 +#: ../../source/how-to-enable-ssl-connections.rst:8 +#, fuzzy msgid "" -"This guide shows how to reuse pre-``1.8`` Flower code with minimum code " -"changes by using the *compatibility layer* in Flower Next. In another " -"guide, we will show how to run Flower Next end-to-end with pure Flower " -"Next APIs." -msgstr "" - -#: ../../source/how-to-upgrade-to-flower-next.rst:13 -msgid "Let's dive in!" +"A complete code example demonstrating a secure connection can be found " +"`here `_." msgstr "" +"Un exemple de code complet démontrant une connexion sécurisée peut être " +"trouvé ici `_." -#: ../../source/how-to-upgrade-to-flower-next.rst:48 +#: ../../source/how-to-enable-ssl-connections.rst:11 #, fuzzy msgid "" -"Here's how to update an existing installation of Flower to Flower Next " -"with ``pip``:" +"The code example comes with a ``README.md`` file which explains how to " +"start it. Although it is already SSL-enabled, it might be less " +"descriptive on how it does so. Stick to this guide for a deeper " +"introduction to the topic." msgstr "" -"Voici comment mettre à jour une installation existante vers Flower 1.0 en" -" utilisant soit pip soit Poetry :" +"L'exemple de code est accompagné d'un fichier README.md qui t'expliquera " +"comment le démarrer. Bien qu'il soit déjà activé par SSL, il peut être " +"moins descriptif sur la façon de procéder. Tiens-toi en à ce guide pour " +"une introduction plus approfondie sur le sujet." -#: ../../source/how-to-upgrade-to-flower-next.rst:54 -msgid "or if you need Flower Next with simulation:" -msgstr "" +#: ../../source/how-to-enable-ssl-connections.rst:16 +msgid "Certificates" +msgstr "Certificats" -#: ../../source/how-to-upgrade-to-flower-next.rst:61 +#: ../../source/how-to-enable-ssl-connections.rst:18 +#, fuzzy msgid "" -"Ensure you set the following version constraint in your " -"``requirements.txt``" -msgstr "" - -#: ../../source/how-to-upgrade-to-flower-next.rst:71 -msgid "or ``pyproject.toml``:" +"Using SSL-enabled connections requires certificates to be passed to the " +"server and client. For the purpose of this guide we are going to generate" +" self-signed certificates. As this can become quite complex we are going " +"to ask you to run the script in ``examples/advanced-" +"tensorflow/certificates/generate.sh`` with the following command " +"sequence:" msgstr "" +"L'utilisation de connexions compatibles avec le protocole SSL nécessite " +"que des certificats soient transmis au serveur et au client. Pour les " +"besoins de ce guide, nous allons générer des certificats auto-signés. " +"Comme cela peut devenir assez complexe, nous allons te demander " +"d'exécuter le script dans :code:`examples/advanced-" +"tensorflow/certificates/generate.sh`" -#: ../../source/how-to-upgrade-to-flower-next.rst:82 +#: ../../source/how-to-enable-ssl-connections.rst:29 #, fuzzy -msgid "Using Poetry" -msgstr "Utiliser la poésie (recommandé)" +msgid "" +"This will generate the certificates in ``examples/advanced-" +"tensorflow/.cache/certificates``." +msgstr "" +"Cela générera les certificats dans :code:`examples/advanced-" +"tensorflow/.cache/certificates`." -#: ../../source/how-to-upgrade-to-flower-next.rst:84 +#: ../../source/how-to-enable-ssl-connections.rst:32 #, fuzzy msgid "" -"Update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall " -"(don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` before " -"running ``poetry install``)." +"The approach for generating SSL certificates in the context of this " +"example can serve as an inspiration and starting point, but it should not" +" be used as a reference for production environments. Please refer to " +"other sources regarding the issue of correctly generating certificates " +"for production environments. For non-critical prototyping or research " +"projects, it might be sufficient to use the self-signed certificates " +"generated using the scripts mentioned in this guide." msgstr "" -"Poetry : mettez à jour la dépendance ``flwr`` dans ``pyproject.toml`` " -"puis réinstallez (n'oubliez pas de supprimer ``poetry.lock`` via ``rm " -"poetry.lock`` avant d'exécuter ``poetry install``)." +"L'approche de la génération des certificats SSL dans cet exemple peut " +"servir d'inspiration et de point de départ, mais ne doit pas être " +"considérée comme complète pour les environnements de production." -#: ../../source/how-to-upgrade-to-flower-next.rst:86 +#: ../../source/how-to-enable-ssl-connections.rst:40 #, fuzzy -msgid "" -"Ensure you set the following version constraint in your " -"``pyproject.toml``:" -msgstr "Augmente la version mineure de ``pyproject.toml`` d'une unité." +msgid "Server (SuperLink)" +msgstr "flower-superlink" -#: ../../source/how-to-upgrade-to-flower-next.rst:102 +#: ../../source/how-to-enable-ssl-connections.rst:42 +#, fuzzy msgid "" -"In Flower Next, the *infrastructure* and *application layers* have been " -"decoupled. Instead of starting a client in code via ``start_client()``, " -"you create a |clientapp_link|_ and start it via the command line. Instead" -" of starting a server in code via ``start_server()``, you create a " -"|serverapp_link|_ and start it via the command line. The long-running " -"components of server and client are called SuperLink and SuperNode. The " -"following non-breaking changes that require manual updates and allow you " -"to run your project both in the traditional way and in the Flower Next " -"way:" +"Use the following terminal command to start a sever (SuperLink) that uses" +" the previously generated certificates:" msgstr "" +"Nous allons maintenant montrer comment écrire un client qui utilise les " +"scripts générés précédemment :" -#: ../../source/how-to-upgrade-to-flower-next.rst:109 -#, fuzzy -msgid "|clientapp_link|_" -msgstr "client" - -#: ../../source/how-to-upgrade-to-flower-next.rst:110 +#: ../../source/how-to-enable-ssl-connections.rst:52 msgid "" -"Wrap your existing client with |clientapp_link|_ instead of launching it " -"via |startclient_link|_. Here's an example:" +"When providing certificates, the server expects a tuple of three " +"certificates paths: CA certificate, server certificate and server private" +" key." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:132 +#: ../../source/how-to-enable-ssl-connections.rst:56 #, fuzzy -msgid "|serverapp_link|_" -msgstr "serveur" +msgid "Client (SuperNode)" +msgstr "Codes d'état du client." -#: ../../source/how-to-upgrade-to-flower-next.rst:133 +#: ../../source/how-to-enable-ssl-connections.rst:58 +#, fuzzy msgid "" -"Wrap your existing strategy with |serverapp_link|_ instead of starting " -"the server via |startserver_link|_. Here's an example:" +"Use the following terminal command to start a client (SuperNode) that " +"uses the previously generated certificates:" msgstr "" +"Nous allons maintenant montrer comment écrire un client qui utilise les " +"scripts générés précédemment :" -#: ../../source/how-to-upgrade-to-flower-next.rst:154 -msgid "Deployment" +#: ../../source/how-to-enable-ssl-connections.rst:67 +#, fuzzy +msgid "" +"When setting ``root_certificates``, the client expects a file path to " +"PEM-encoded root certificates." msgstr "" +"En définissant :code:`root_certificates`, le client s'attend à recevoir " +"les certificats racine codés en PEM sous forme de chaîne d'octets. Nous " +"utilisons à nouveau :code:`Path` pour simplifier la lecture de ces " +"certificats sous forme de chaînes d'octets." -#: ../../source/how-to-upgrade-to-flower-next.rst:155 +#: ../../source/how-to-enable-ssl-connections.rst:73 +#, fuzzy msgid "" -"Run the ``SuperLink`` using |flowernext_superlink_link|_ before running, " -"in sequence, |flowernext_clientapp_link|_ (2x) and " -"|flowernext_serverapp_link|_. There is no need to execute `client.py` and" -" `server.py` as Python scripts." +"You should now have learned how to generate self-signed certificates " +"using the given script, start an SSL-enabled server and have a client " +"establish a secure connection to it." msgstr "" +"Tu devrais maintenant avoir appris à générer des certificats auto-signés " +"à l'aide du script donné, à démarrer un serveur compatible SSL et à " +"demander à un client d'établir une connexion sécurisée avec lui." + +#: ../../source/how-to-enable-ssl-connections.rst:78 +#, fuzzy +msgid "Additional resources" +msgstr "Ressources supplémentaires" -#: ../../source/how-to-upgrade-to-flower-next.rst:158 +#: ../../source/how-to-enable-ssl-connections.rst:80 msgid "" -"Here's an example to start the server without HTTPS (only for " -"prototyping):" +"These additional sources might be relevant if you would like to dive " +"deeper into the topic of certificates:" msgstr "" +"Ces sources supplémentaires peuvent être pertinentes si tu souhaites " +"approfondir le sujet des certificats :" + +#: ../../source/how-to-enable-ssl-connections.rst:83 +msgid "`Let's Encrypt `_" +msgstr "`Let's Encrypt `_" + +#: ../../source/how-to-enable-ssl-connections.rst:84 +msgid "`certbot `_" +msgstr "`certbot `_" + +#: ../../source/how-to-implement-strategies.rst:2 +#, fuzzy +msgid "Implement strategies" +msgstr "Mettre en place des stratégies" -#: ../../source/how-to-upgrade-to-flower-next.rst:174 +#: ../../source/how-to-implement-strategies.rst:4 msgid "" -"Here's another example to start with HTTPS. Use the ``--certificates`` " -"command line argument to pass paths to (CA certificate, server " -"certificate, and server private key)." +"The strategy abstraction enables implementation of fully custom " +"strategies. A strategy is basically the federated learning algorithm that" +" runs on the server. Strategies decide how to sample clients, how to " +"configure clients for training, how to aggregate updates, and how to " +"evaluate models. Flower provides a few built-in strategies which are " +"based on the same API described below." msgstr "" +"L'abstraction de la stratégie permet de mettre en œuvre des stratégies " +"entièrement personnalisées. Une stratégie est essentiellement " +"l'algorithme d'apprentissage fédéré qui s'exécute sur le serveur. Les " +"stratégies décident comment échantillonner les clients, comment " +"configurer les clients pour la formation, comment agréger les mises à " +"jour et comment évaluer les modèles. Flower fournit quelques stratégies " +"intégrées qui sont basées sur la même API que celle décrite ci-dessous." -#: ../../source/how-to-upgrade-to-flower-next.rst:201 +#: ../../source/how-to-implement-strategies.rst:11 #, fuzzy -msgid "Simulation in CLI" -msgstr "Simulation de moniteur" +msgid "The ``Strategy`` abstraction" +msgstr "L'abstraction :code:`Stratégie`" -#: ../../source/how-to-upgrade-to-flower-next.rst:202 +#: ../../source/how-to-implement-strategies.rst:13 +#, fuzzy msgid "" -"Wrap your existing client and strategy with |clientapp_link|_ and " -"|serverapp_link|_, respectively. There is no need to use |startsim_link|_" -" anymore. Here's an example:" +"All strategy implementation are derived from the abstract base class " +"``flwr.server.strategy.Strategy``, both built-in implementations and " +"third party implementations. This means that custom strategy " +"implementations have the exact same capabilities at their disposal as " +"built-in ones." msgstr "" +"Toutes les implémentations de stratégies sont dérivées de la classe de " +"base abstraite :code:`flwr.server.strategy.Strategy`, qu'il s'agisse " +"d'implémentations intégrées ou d'implémentations tierces. Cela signifie " +"que les implémentations de stratégies personnalisées ont exactement les " +"mêmes capacités à leur disposition que les implémentations intégrées." -#: ../../source/how-to-upgrade-to-flower-next.rst:232 +#: ../../source/how-to-implement-strategies.rst:18 msgid "" -"Run |flower_simulation_link|_ in CLI and point to the ``server_app`` / " -"``client_app`` object in the code instead of executing the Python script." -" Here's an example (assuming the ``server_app`` and ``client_app`` " -"objects are in a ``sim.py`` module):" +"The strategy abstraction defines a few abstract methods that need to be " +"implemented:" msgstr "" +"L'abstraction de la stratégie définit quelques méthodes abstraites qui " +"doivent être mises en œuvre :" -#: ../../source/how-to-upgrade-to-flower-next.rst:249 +#: ../../source/how-to-implement-strategies.rst:67 +#, fuzzy msgid "" -"Set default resources for each |clientapp_link|_ using the ``--backend-" -"config`` command line argument instead of setting the " -"``client_resources`` argument in |startsim_link|_. Here's an example:" +"Creating a new strategy means implementing a new ``class`` (derived from " +"the abstract base class ``Strategy``) that implements for the previously " +"shown abstract methods:" msgstr "" +"La création d'une nouvelle stratégie implique la mise en œuvre d'une " +"nouvelle :code:`classe` (dérivée de la classe de base abstraite " +":code:`Stratégie`) qui met en œuvre les méthodes abstraites présentées " +"précédemment :" -#: ../../source/how-to-upgrade-to-flower-next.rst:275 -msgid "Simulation in a Notebook" -msgstr "" +#: ../../source/how-to-implement-strategies.rst:97 +msgid "The Flower server calls these methods in the following order:" +msgstr "Le serveur Flower appelle ces méthodes dans l'ordre suivant :" + +#: ../../source/how-to-implement-strategies.rst:174 +msgid "The following sections describe each of those methods in more detail." +msgstr "Les sections suivantes décrivent chacune de ces méthodes plus en détail." + +#: ../../source/how-to-implement-strategies.rst:177 +#, fuzzy +msgid "The ``initialize_parameters`` method" +msgstr "La méthode :code:`initialize_parameters` (initialisation des paramètres)" -#: ../../source/how-to-upgrade-to-flower-next.rst:276 +#: ../../source/how-to-implement-strategies.rst:179 +#, fuzzy msgid "" -"Run |runsim_link|_ in your notebook instead of |startsim_link|_. Here's " -"an example:" +"``initialize_parameters`` is called only once, at the very beginning of " +"an execution. It is responsible for providing the initial global model " +"parameters in a serialized form (i.e., as a ``Parameters`` object)." msgstr "" +":code:`initialize_parameters` n'est appelé qu'une seule fois, au tout " +"début d'une exécution. Il est chargé de fournir les paramètres initiaux " +"du modèle global sous une forme sérialisée (c'est-à-dire sous la forme " +"d'un objet :code:`Parameters`)." -#: ../../source/how-to-upgrade-to-flower-next.rst:319 +#: ../../source/how-to-implement-strategies.rst:183 #, fuzzy msgid "" -"Some official `Flower code examples `_ " -"are already updated to Flower Next so they can serve as a reference for " -"using the Flower Next API. If there are further questions, `join the " -"Flower Slack `_ and use the channel " -"``#questions``. You can also `participate in Flower Discuss " -"`_ where you can find us answering questions," -" or share and learn from others about migrating to Flower Next." +"Built-in strategies return user-provided initial parameters. The " +"following example shows how initial parameters can be passed to " +"``FedAvg``:" msgstr "" -"La plupart des `exemples de code Flower officiels " -"`_ sont déjà mis à " -"jour vers Flower 1.0, ils peuvent servir de référence pour l'utilisation " -"de l'API Flower 1.0. Si vous avez d'autres questions, `joins le Slack " -"Flower `_ et utilise le canal " -"``#questions``." +"Les stratégies intégrées renvoient les paramètres initiaux fournis par " +"l'utilisateur. L'exemple suivant montre comment les paramètres initiaux " +"peuvent être transmis à :code:`FedAvg` :" -#: ../../source/how-to-upgrade-to-flower-next.rst:325 +#: ../../source/how-to-implement-strategies.rst:209 #, fuzzy -msgid "Important" -msgstr "Changements importants :" - -#: ../../source/how-to-upgrade-to-flower-next.rst:328 msgid "" -"As we continuously enhance Flower Next at a rapid pace, we'll be " -"periodically updating this guide. Please feel free to share any feedback " -"with us!" +"The Flower server will call ``initialize_parameters``, which either " +"returns the parameters that were passed to ``initial_parameters``, or " +"``None``. If no parameters are returned from ``initialize_parameters`` " +"(i.e., ``None``), the server will randomly select one client and ask it " +"to provide its parameters. This is a convenience feature and not " +"recommended in practice, but it can be useful for prototyping. In " +"practice, it is recommended to always use server-side parameter " +"initialization." msgstr "" +"Le serveur Flower appelle :code:`initialize_parameters`, qui renvoie les " +"paramètres passés à :code:`initial_parameters`, ou :code:`None`. Si aucun" +" paramètre n'est renvoyé par :code:`initialize_parameters` (c'est-à-dire " +":code:`None`), le serveur sélectionne au hasard un client et lui demande " +"de fournir ses paramètres. Il s'agit d'une fonction de commodité qui " +"n'est pas recommandée dans la pratique, mais qui peut être utile pour le " +"prototypage. Dans la pratique, il est recommandé de toujours utiliser " +"l'initialisation des paramètres du côté du serveur." -#: ../../source/how-to-upgrade-to-flower-next.rst:334 -msgid "Happy migrating! 🚀" +#: ../../source/how-to-implement-strategies.rst:218 +msgid "" +"Server-side parameter initialization is a powerful mechanism. It can be " +"used, for example, to resume training from a previously saved checkpoint." +" It is also the fundamental capability needed to implement hybrid " +"approaches, for example, to fine-tune a pre-trained model using federated" +" learning." msgstr "" +"L'initialisation des paramètres côté serveur est un mécanisme puissant. " +"Elle peut être utilisée, par exemple, pour reprendre l'entraînement à " +"partir d'un point de contrôle précédemment sauvegardé. C'est également la" +" capacité fondamentale nécessaire pour mettre en œuvre des approches " +"hybrides, par exemple, pour affiner un modèle pré-entraîné à l'aide de " +"l'apprentissage fédéré." -#: ../../source/how-to-use-built-in-mods.rst:2 -msgid "Use Built-in Mods" -msgstr "" +#: ../../source/how-to-implement-strategies.rst:224 +#, fuzzy +msgid "The ``configure_fit`` method" +msgstr "La méthode :code:`configure_fit`" -#: ../../source/how-to-use-built-in-mods.rst:4 +#: ../../source/how-to-implement-strategies.rst:226 +#, fuzzy msgid "" -"**Note: This tutorial covers experimental features. The functionality and" -" interfaces may change in future versions.**" +"``configure_fit`` is responsible for configuring the upcoming round of " +"training. What does *configure* mean in this context? Configuring a round" +" means selecting clients and deciding what instructions to send to these " +"clients. The signature of ``configure_fit`` makes this clear:" msgstr "" +":code:`configure_fit` est chargé de configurer le prochain tour de " +"formation. Que signifie *configurer* dans ce contexte ? Configurer un " +"tour signifie sélectionner des clients et décider des instructions à leur" +" envoyer. La signature de :code:`configure_fit` l'indique clairement :" -#: ../../source/how-to-use-built-in-mods.rst:6 +#: ../../source/how-to-implement-strategies.rst:239 +#, fuzzy msgid "" -"In this tutorial, we will learn how to utilize built-in mods to augment " -"the behavior of a ``ClientApp``. Mods (sometimes also called Modifiers) " -"allow us to perform operations before and after a task is processed in " -"the ``ClientApp``." +"The return value is a list of tuples, each representing the instructions " +"that will be sent to a particular client. Strategy implementations " +"usually perform the following steps in ``configure_fit``:" msgstr "" +"La valeur de retour est une liste de tuples, chacun représentant les " +"instructions qui seront envoyées à un client particulier. Les " +"implémentations de stratégies effectuent généralement les étapes " +"suivantes dans :code:`configure_fit` :" -#: ../../source/how-to-use-built-in-mods.rst:9 -msgid "What are Mods?" +#: ../../source/how-to-implement-strategies.rst:243 +#: ../../source/how-to-implement-strategies.rst:307 +#, fuzzy +msgid "" +"Use the ``client_manager`` to randomly sample all (or a subset of) " +"available clients (each represented as a ``ClientProxy`` object)" msgstr "" +"Utilise le :code:`client_manager` pour échantillonner au hasard tous les " +"clients disponibles (ou un sous-ensemble d'entre eux) (chacun représenté " +"par un objet :code:`ClientProxy`)" -#: ../../source/how-to-use-built-in-mods.rst:11 +#: ../../source/how-to-implement-strategies.rst:245 +#, fuzzy msgid "" -"A Mod is a callable that wraps around a ``ClientApp``. It can manipulate " -"or inspect the incoming ``Message`` and the resulting outgoing " -"``Message``. The signature for a ``Mod`` is as follows:" -msgstr "" - -#: ../../source/how-to-use-built-in-mods.rst:18 -msgid "A typical mod function might look something like this:" -msgstr "" - -#: ../../source/how-to-use-built-in-mods.rst:31 -msgid "Using Mods" -msgstr "" - -#: ../../source/how-to-use-built-in-mods.rst:33 -msgid "To use mods in your ``ClientApp``, you can follow these steps:" -msgstr "" - -#: ../../source/how-to-use-built-in-mods.rst:36 -msgid "1. Import the required mods" -msgstr "" - -#: ../../source/how-to-use-built-in-mods.rst:38 -msgid "First, import the built-in mod you intend to use:" -msgstr "" - -#: ../../source/how-to-use-built-in-mods.rst:46 -msgid "2. Define your client function" +"Pair each ``ClientProxy`` with the same ``FitIns`` holding the current " +"global model ``parameters`` and ``config`` dict" msgstr "" +"Associe chaque :code:`ClientProxy` au même :code:`FitIns` contenant le " +"modèle global actuel :code:`parameters` et :code:`config` dict" -#: ../../source/how-to-use-built-in-mods.rst:48 +#: ../../source/how-to-implement-strategies.rst:248 +#, fuzzy msgid "" -"Define your client function (``client_fn``) that will be wrapped by the " -"mod(s):" -msgstr "" - -#: ../../source/how-to-use-built-in-mods.rst:57 -msgid "3. Create the ``ClientApp`` with mods" +"More sophisticated implementations can use ``configure_fit`` to implement" +" custom client selection logic. A client will only participate in a round" +" if the corresponding ``ClientProxy`` is included in the list returned " +"from ``configure_fit``." msgstr "" +"Les implémentations plus sophistiquées peuvent utiliser " +":code:`configure_fit` pour mettre en œuvre une logique de sélection des " +"clients personnalisée. Un client ne participera à un tour que si le " +":code:`ClientProxy` correspondant est inclus dans la liste renvoyée par " +":code:`configure_fit`." -#: ../../source/how-to-use-built-in-mods.rst:59 +#: ../../source/how-to-implement-strategies.rst:254 +#, fuzzy msgid "" -"Create your ``ClientApp`` and pass the mods as a list to the ``mods`` " -"argument. The order in which you provide the mods matters:" +"The structure of this return value provides a lot of flexibility to the " +"user. Since instructions are defined on a per-client basis, different " +"instructions can be sent to each client. This enables custom strategies " +"to train, for example, different models on different clients, or use " +"different hyperparameters on different clients (via the ``config`` dict)." msgstr "" +"La structure de cette valeur de retour offre beaucoup de souplesse à " +"l'utilisateur. Comme les instructions sont définies par client, des " +"instructions différentes peuvent être envoyées à chaque client, ce qui " +"permet d'élaborer des stratégies personnalisées pour former, par exemple," +" différents modèles sur différents clients, ou utiliser différents " +"hyperparamètres sur différents clients (via le dict :code:`config`)." -#: ../../source/how-to-use-built-in-mods.rst:72 +#: ../../source/how-to-implement-strategies.rst:261 #, fuzzy -msgid "Order of execution" -msgstr "Dépréciations" +msgid "The ``aggregate_fit`` method" +msgstr "La méthode :code:`aggregate_fit` (agrégation)" -#: ../../source/how-to-use-built-in-mods.rst:74 +#: ../../source/how-to-implement-strategies.rst:263 +#, fuzzy msgid "" -"When the ``ClientApp`` runs, the mods are executed in the order they are " -"provided in the list:" -msgstr "" - -#: ../../source/how-to-use-built-in-mods.rst:76 -msgid "``example_mod_1`` (outermost mod)" -msgstr "" - -#: ../../source/how-to-use-built-in-mods.rst:77 -msgid "``example_mod_2`` (next mod)" +"``aggregate_fit`` is responsible for aggregating the results returned by " +"the clients that were selected and asked to train in ``configure_fit``." msgstr "" +":code:`aggregate_fit` est chargé d'agréger les résultats renvoyés par les" +" clients qui ont été sélectionnés et à qui on a demandé de s'entraîner " +"dans :code:`configure_fit`." -#: ../../source/how-to-use-built-in-mods.rst:78 +#: ../../source/how-to-implement-strategies.rst:277 +#, fuzzy msgid "" -"Message handler (core function that handles the incoming ``Message`` and " -"returns the outgoing ``Message``)" +"Of course, failures can happen, so there is no guarantee that the server " +"will get results from all the clients it sent instructions to (via " +"``configure_fit``). ``aggregate_fit`` therefore receives a list of " +"``results``, but also a list of ``failures``." msgstr "" +"Bien sûr, des échecs peuvent se produire, il n'y a donc aucune garantie " +"que le serveur obtienne des résultats de tous les clients auxquels il a " +"envoyé des instructions (via :code:`configure_fit`). " +":code:`aggregate_fit` reçoit donc une liste de :code:`résultats`, mais " +"aussi une liste de :code:`échecs`." -#: ../../source/how-to-use-built-in-mods.rst:79 -msgid "``example_mod_2`` (on the way back)" +#: ../../source/how-to-implement-strategies.rst:282 +#, fuzzy +msgid "" +"``aggregate_fit`` returns an optional ``Parameters`` object and a " +"dictionary of aggregated metrics. The ``Parameters`` return value is " +"optional because ``aggregate_fit`` might decide that the results provided" +" are not sufficient for aggregation (e.g., too many failures)." msgstr "" +":code:`aggregate_fit` renvoie un objet :code:`Parameters` facultatif et " +"un dictionnaire de métriques agrégées. La valeur de retour " +":code:`Parameters` est facultative car :code:`aggregate_fit` peut décider" +" que les résultats fournis ne sont pas suffisants pour l'agrégation (par " +"exemple, trop d'échecs)." -#: ../../source/how-to-use-built-in-mods.rst:80 -msgid "``example_mod_1`` (outermost mod on the way back)" -msgstr "" +#: ../../source/how-to-implement-strategies.rst:288 +#, fuzzy +msgid "The ``configure_evaluate`` method" +msgstr "La méthode :code:`configure_evaluate` (en anglais)" -#: ../../source/how-to-use-built-in-mods.rst:82 +#: ../../source/how-to-implement-strategies.rst:290 +#, fuzzy msgid "" -"Each mod has a chance to inspect and modify the incoming ``Message`` " -"before passing it to the next mod, and likewise with the outgoing " -"``Message`` before returning it up the stack." +"``configure_evaluate`` is responsible for configuring the upcoming round " +"of evaluation. What does *configure* mean in this context? Configuring a " +"round means selecting clients and deciding what instructions to send to " +"these clients. The signature of ``configure_evaluate`` makes this clear:" msgstr "" +":code:`configure_evaluate` est chargé de configurer le prochain tour " +"d'évaluation. Que signifie *configurer* dans ce contexte ? Configurer un " +"tour signifie sélectionner des clients et décider des instructions à leur" +" envoyer. La signature de :code:`configure_evaluate` l'indique clairement" +" :" -#: ../../source/how-to-use-built-in-mods.rst:87 +#: ../../source/how-to-implement-strategies.rst:303 +#, fuzzy msgid "" -"By following this guide, you have learned how to effectively use mods to " -"enhance your ``ClientApp``'s functionality. Remember that the order of " -"mods is crucial and affects how the input and output are processed." +"The return value is a list of tuples, each representing the instructions " +"that will be sent to a particular client. Strategy implementations " +"usually perform the following steps in ``configure_evaluate``:" msgstr "" +"La valeur de retour est une liste de tuples, chacun représentant les " +"instructions qui seront envoyées à un client particulier. Les " +"implémentations de stratégies effectuent généralement les étapes " +"suivantes dans :code:`configure_evaluate` :" -#: ../../source/how-to-use-built-in-mods.rst:89 -msgid "Enjoy building a more robust and flexible ``ClientApp`` with mods!" +#: ../../source/how-to-implement-strategies.rst:309 +#, fuzzy +msgid "" +"Pair each ``ClientProxy`` with the same ``EvaluateIns`` holding the " +"current global model ``parameters`` and ``config`` dict" msgstr "" +"Associe chaque :code:`ClientProxy` au même :code:`EvaluateIns` contenant " +"le modèle global actuel :code:`parameters` et :code:`config` dict" -#: ../../source/how-to-use-differential-privacy.rst:2 +#: ../../source/how-to-implement-strategies.rst:312 #, fuzzy -msgid "Use Differential Privacy" -msgstr "Confidentialité différentielle" - -#: ../../source/how-to-use-differential-privacy.rst:3 msgid "" -"This guide explains how you can utilize differential privacy in the " -"Flower framework. If you are not yet familiar with differential privacy, " -"you can refer to :doc:`explanation-differential-privacy`." +"More sophisticated implementations can use ``configure_evaluate`` to " +"implement custom client selection logic. A client will only participate " +"in a round if the corresponding ``ClientProxy`` is included in the list " +"returned from ``configure_evaluate``." msgstr "" +"Les implémentations plus sophistiquées peuvent utiliser " +":code:`configure_evaluate` pour mettre en œuvre une logique de sélection " +"des clients personnalisée. Un client ne participera à un tour que si le " +":code:`ClientProxy` correspondant est inclus dans la liste renvoyée par " +":code:`configure_evaluate`." -#: ../../source/how-to-use-differential-privacy.rst:7 +#: ../../source/how-to-implement-strategies.rst:318 +#, fuzzy msgid "" -"Differential Privacy in Flower is in a preview phase. If you plan to use " -"these features in a production environment with sensitive data, feel free" -" contact us to discuss your requirements and to receive guidance on how " -"to best use these features." +"The structure of this return value provides a lot of flexibility to the " +"user. Since instructions are defined on a per-client basis, different " +"instructions can be sent to each client. This enables custom strategies " +"to evaluate, for example, different models on different clients, or use " +"different hyperparameters on different clients (via the ``config`` dict)." msgstr "" +"La structure de cette valeur de retour offre beaucoup de souplesse à " +"l'utilisateur. Comme les instructions sont définies par client, des " +"instructions différentes peuvent être envoyées à chaque client. Cela " +"permet aux stratégies personnalisées d'évaluer, par exemple, différents " +"modèles sur différents clients, ou d'utiliser différents hyperparamètres " +"sur différents clients (via le dict :code:`config`)." -#: ../../source/how-to-use-differential-privacy.rst:12 +#: ../../source/how-to-implement-strategies.rst:325 +#, fuzzy +msgid "The ``aggregate_evaluate`` method" +msgstr "La méthode :code:`aggregate_evaluate` (agréger_évaluer)" + +#: ../../source/how-to-implement-strategies.rst:327 +#, fuzzy msgid "" -"This approach consists of two seprate phases: clipping of the updates and" -" adding noise to the aggregated model. For the clipping phase, Flower " -"framework has made it possible to decide whether to perform clipping on " -"the server side or the client side." +"``aggregate_evaluate`` is responsible for aggregating the results " +"returned by the clients that were selected and asked to evaluate in " +"``configure_evaluate``." msgstr "" +":code:`aggregate_evaluate` est chargé d'agréger les résultats renvoyés " +"par les clients qui ont été sélectionnés et à qui l'on a demandé " +"d'évaluer dans :code:`configure_evaluate`." -#: ../../source/how-to-use-differential-privacy.rst:15 +#: ../../source/how-to-implement-strategies.rst:341 +#, fuzzy msgid "" -"**Server-side Clipping**: This approach has the advantage of the server " -"enforcing uniform clipping across all clients' updates and reducing the " -"communication overhead for clipping values. However, it also has the " -"disadvantage of increasing the computational load on the server due to " -"the need to perform the clipping operation for all clients." +"Of course, failures can happen, so there is no guarantee that the server " +"will get results from all the clients it sent instructions to (via " +"``configure_evaluate``). ``aggregate_evaluate`` therefore receives a list" +" of ``results``, but also a list of ``failures``." msgstr "" +"Bien sûr, des échecs peuvent se produire, il n'y a donc aucune garantie " +"que le serveur obtienne des résultats de tous les clients auxquels il a " +"envoyé des instructions (via :code:`configure_evaluate`). " +":code:`aggregate_evaluate` reçoit donc une liste de :code:`résultats`, " +"mais aussi une liste d' :code:`échecs`." -#: ../../source/how-to-use-differential-privacy.rst:16 +#: ../../source/how-to-implement-strategies.rst:346 +#, fuzzy msgid "" -"**Client-side Clipping**: This approach has the advantage of reducing the" -" computational overhead on the server. However, it also has the " -"disadvantage of lacking centralized control, as the server has less " -"control over the clipping process." +"``aggregate_evaluate`` returns an optional ``float`` (loss) and a " +"dictionary of aggregated metrics. The ``float`` return value is optional " +"because ``aggregate_evaluate`` might decide that the results provided are" +" not sufficient for aggregation (e.g., too many failures)." msgstr "" +":code:`aggregate_evaluate` renvoie un :code:`float` facultatif (perte) et" +" un dictionnaire de mesures agrégées. La valeur de retour :code:`float` " +"est facultative car :code:`aggregate_evaluate` peut décider que les " +"résultats fournis ne sont pas suffisants pour l'agrégation (par exemple, " +"trop d'échecs)." -#: ../../source/how-to-use-differential-privacy.rst:21 +#: ../../source/how-to-implement-strategies.rst:352 #, fuzzy -msgid "Server-side Clipping" -msgstr "Logique côté serveur" +msgid "The ``evaluate`` method" +msgstr "La méthode :code:`évaluer`" -#: ../../source/how-to-use-differential-privacy.rst:22 +#: ../../source/how-to-implement-strategies.rst:354 +#, fuzzy msgid "" -"For central DP with server-side clipping, there are two :code:`Strategy` " -"classes that act as wrappers around the actual :code:`Strategy` instance " -"(for example, :code:`FedAvg`). The two wrapper classes are " -":code:`DifferentialPrivacyServerSideFixedClipping` and " -":code:`DifferentialPrivacyServerSideAdaptiveClipping` for fixed and " -"adaptive clipping." +"``evaluate`` is responsible for evaluating model parameters on the " +"server-side. Having ``evaluate`` in addition to " +"``configure_evaluate``/``aggregate_evaluate`` enables strategies to " +"perform both servers-side and client-side (federated) evaluation." msgstr "" +"le fait d'avoir :code:`evaluate` en plus de " +":code:`configure_evaluate`/:code:`aggregate_evaluate` permet aux " +"stratégies d'effectuer des évaluations à la fois côté serveur et côté " +"client (fédéré)." -#: ../../source/how-to-use-differential-privacy.rst:-1 +#: ../../source/how-to-implement-strategies.rst:364 #, fuzzy -msgid "server side clipping" -msgstr "Logique côté serveur" - -#: ../../source/how-to-use-differential-privacy.rst:31 msgid "" -"The code sample below enables the :code:`FedAvg` strategy to use server-" -"side fixed clipping using the " -":code:`DifferentialPrivacyServerSideFixedClipping` wrapper class. The " -"same approach can be used with " -":code:`DifferentialPrivacyServerSideAdaptiveClipping` by adjusting the " -"corresponding input parameters." +"The return value is again optional because the strategy might not need to" +" implement server-side evaluation or because the user-defined " +"``evaluate`` method might not complete successfully (e.g., it might fail " +"to load the server-side evaluation data)." msgstr "" +"La valeur de retour est à nouveau facultative parce que la stratégie peut" +" ne pas avoir besoin de mettre en œuvre l'évaluation côté serveur ou " +"parce que la méthode :code:`evaluate` définie par l'utilisateur peut ne " +"pas se terminer avec succès (par exemple, elle peut échouer à charger les" +" données de l'évaluation côté serveur)." -#: ../../source/how-to-use-differential-privacy.rst:52 +#: ../../source/how-to-install-flower.rst:2 #, fuzzy -msgid "Client-side Clipping" -msgstr "Logique côté client" +msgid "Install Flower" +msgstr "Installer Flower" -#: ../../source/how-to-use-differential-privacy.rst:53 -msgid "" -"For central DP with client-side clipping, the server sends the clipping " -"value to selected clients on each round. Clients can use existing Flower " -":code:`Mods` to perform the clipping. Two mods are available for fixed " -"and adaptive client-side clipping: :code:`fixedclipping_mod` and " -":code:`adaptiveclipping_mod` with corresponding server-side wrappers " -":code:`DifferentialPrivacyClientSideFixedClipping` and " -":code:`DifferentialPrivacyClientSideAdaptiveClipping`." +#: ../../source/how-to-install-flower.rst:5 +#, fuzzy +msgid "Python version" +msgstr "Version Python" + +#: ../../source/how-to-install-flower.rst:11 +msgid "Install stable release" +msgstr "Installe la version stable" + +#: ../../source/how-to-install-flower.rst:14 +#: ../../source/how-to-upgrade-to-flower-next.rst:66 +msgid "Using pip" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:-1 +#: ../../source/how-to-install-flower.rst:16 #, fuzzy -msgid "client side clipping" -msgstr "Logique côté client" - -#: ../../source/how-to-use-differential-privacy.rst:63 -msgid "" -"The code sample below enables the :code:`FedAvg` strategy to use " -"differential privacy with client-side fixed clipping using both the " -":code:`DifferentialPrivacyClientSideFixedClipping` wrapper class and, on " -"the client, :code:`fixedclipping_mod`:" +msgid "Stable releases are available on `PyPI `_:" msgstr "" +"Les versions stables sont disponibles sur `PyPI " +"`_: :" -#: ../../source/how-to-use-differential-privacy.rst:80 +#: ../../source/how-to-install-flower.rst:22 +#, fuzzy msgid "" -"In addition to the server-side strategy wrapper, the :code:`ClientApp` " -"needs to configure the matching :code:`fixedclipping_mod` to perform the " -"client-side clipping:" +"For simulations that use the Virtual Client Engine, ``flwr`` should be " +"installed with the ``simulation`` extra:" msgstr "" +"Pour les simulations qui utilisent le moteur de client virtuel, ``flwr`` " +"doit être installé avec l'option ``simulation``: :" -#: ../../source/how-to-use-differential-privacy.rst:97 -msgid "" -"To utilize local differential privacy (DP) and add noise to the client " -"model parameters before transmitting them to the server in Flower, you " -"can use the `LocalDpMod`. The following hyperparameters need to be set: " -"clipping norm value, sensitivity, epsilon, and delta." +#: ../../source/how-to-install-flower.rst:30 +msgid "Using conda (or mamba)" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:-1 -msgid "local DP mod" +#: ../../source/how-to-install-flower.rst:32 +msgid "Flower can also be installed from the ``conda-forge`` channel." msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:104 -msgid "Below is a code example that shows how to use :code:`LocalDpMod`:" +#: ../../source/how-to-install-flower.rst:34 +msgid "" +"If you have not added ``conda-forge`` to your channels, you will first " +"need to run the following:" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:122 +#: ../../source/how-to-install-flower.rst:42 msgid "" -"Please note that the order of mods, especially those that modify " -"parameters, is important when using multiple modifiers. Typically, " -"differential privacy (DP) modifiers should be the last to operate on " -"parameters." +"Once the ``conda-forge`` channel has been enabled, ``flwr`` can be " +"installed with ``conda``:" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:125 -msgid "Local Training using Privacy Engines" +#: ../../source/how-to-install-flower.rst:49 +msgid "or with ``mamba``:" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:126 +#: ../../source/how-to-install-flower.rst:56 +msgid "Verify installation" +msgstr "Vérifie l'installation" + +#: ../../source/how-to-install-flower.rst:58 +#, fuzzy msgid "" -"For ensuring data instance-level privacy during local model training on " -"the client side, consider leveraging privacy engines such as Opacus and " -"TensorFlow Privacy. For examples of using Flower with these engines, " -"please refer to the Flower examples directory (`Opacus " -"`_, `Tensorflow" -" Privacy `_)." +"The following command can be used to verify if Flower was successfully " +"installed. If everything worked, it should print the version of Flower to" +" the command line:" msgstr "" +"La commande suivante peut être utilisée pour vérifier si Flower a été " +"installé avec succès. Si tout a fonctionné, la version de Flower devrait " +"être imprimée sur la ligne de commande: :" -#: ../../source/how-to-use-strategies.rst:2 -#, fuzzy -msgid "Use strategies" -msgstr "Stratégies personnalisées" +#: ../../source/how-to-install-flower.rst:68 +msgid "Advanced installation options" +msgstr "Options d'installation avancées" -#: ../../source/how-to-use-strategies.rst:4 -msgid "" -"Flower allows full customization of the learning process through the " -":code:`Strategy` abstraction. A number of built-in strategies are " -"provided in the core framework." -msgstr "" -"Flower permet une personnalisation complète du processus d'apprentissage " -"grâce à l'abstraction :code:`Stratégie`. Un certain nombre de stratégies " -"intégrées sont fournies dans le cadre principal." +#: ../../source/how-to-install-flower.rst:71 +#, fuzzy +msgid "Install via Docker" +msgstr "Installer Flower" -#: ../../source/how-to-use-strategies.rst:6 -msgid "" -"There are three ways to customize the way Flower orchestrates the " -"learning process on the server side:" +#: ../../source/how-to-install-flower.rst:73 +msgid ":doc:`Run Flower using Docker `" msgstr "" -"Il y a trois façons de personnaliser la manière dont Flower orchestre le " -"processus d'apprentissage du côté du serveur :" -#: ../../source/how-to-use-strategies.rst:8 -msgid "Use an existing strategy, for example, :code:`FedAvg`" -msgstr "Utilise une stratégie existante, par exemple :code:`FedAvg`" - -#: ../../source/how-to-use-strategies.rst:9 -#: ../../source/how-to-use-strategies.rst:40 -msgid "Customize an existing strategy with callback functions" -msgstr "Personnalise une stratégie existante avec des fonctions de rappel" - -#: ../../source/how-to-use-strategies.rst:10 -#: ../../source/how-to-use-strategies.rst:87 -msgid "Implement a novel strategy" -msgstr "Mets en place une nouvelle stratégie" - -#: ../../source/how-to-use-strategies.rst:14 -msgid "Use an existing strategy" -msgstr "Utilise une stratégie existante" +#: ../../source/how-to-install-flower.rst:76 +msgid "Install pre-release" +msgstr "Installer la version pre-release" -#: ../../source/how-to-use-strategies.rst:16 +#: ../../source/how-to-install-flower.rst:78 +#, fuzzy msgid "" -"Flower comes with a number of popular federated learning strategies " -"built-in. A built-in strategy can be instantiated as follows:" +"New (possibly unstable) versions of Flower are sometimes available as " +"pre-release versions (alpha, beta, release candidate) before the stable " +"release happens:" msgstr "" -"Flower intègre un certain nombre de stratégies d'apprentissage fédéré " -"populaires. Une stratégie intégrée peut être instanciée comme suit :" +"Les nouvelles versions (éventuellement instables) de Flower sont parfois " +"disponibles en tant que versions préliminaires (alpha, bêta, release " +"candidate) avant que la version stable n'arrive : :" -#: ../../source/how-to-use-strategies.rst:25 +#: ../../source/how-to-install-flower.rst:85 +#, fuzzy msgid "" -"This creates a strategy with all parameters left at their default values " -"and passes it to the :code:`start_server` function. It is usually " -"recommended to adjust a few parameters during instantiation:" +"For simulations that use the Virtual Client Engine, ``flwr`` pre-releases" +" should be installed with the ``simulation`` extra:" msgstr "" -"Cela crée une stratégie dont tous les paramètres sont laissés à leur " -"valeur par défaut et la transmet à la fonction :code:`start_server`. Il " -"est généralement recommandé d'ajuster quelques paramètres lors de " -"l'instanciation :" +"Pour les simulations qui utilisent le moteur de client virtuel, les " +"versions de ``flwr`` doivent être installées avec l'option " +"``simulation``: :" + +#: ../../source/how-to-install-flower.rst:93 +msgid "Install nightly release" +msgstr "Installer la version nightly" -#: ../../source/how-to-use-strategies.rst:42 +#: ../../source/how-to-install-flower.rst:95 +#, fuzzy msgid "" -"Existing strategies provide several ways to customize their behaviour. " -"Callback functions allow strategies to call user-provided code during " -"execution." +"The latest (potentially unstable) changes in Flower are available as " +"nightly releases:" msgstr "" -"Les stratégies existantes offrent plusieurs façons de personnaliser leur " -"comportement. Les fonctions de rappel permettent aux stratégies d'appeler" -" le code fourni par l'utilisateur pendant l'exécution." - -#: ../../source/how-to-use-strategies.rst:45 -msgid "Configuring client fit and client evaluate" -msgstr "Configurer l'adaptation et l'évaluation du client" +"Les dernières modifications (potentiellement instables) de Flower sont " +"disponibles sous forme de versions nocturnes: :" -#: ../../source/how-to-use-strategies.rst:47 +#: ../../source/how-to-install-flower.rst:101 +#, fuzzy msgid "" -"The server can pass new configuration values to the client each round by " -"providing a function to :code:`on_fit_config_fn`. The provided function " -"will be called by the strategy and must return a dictionary of " -"configuration key values pairs that will be sent to the client. It must " -"return a dictionary of arbitrary configuration values :code:`client.fit`" -" and :code:`client.evaluate` functions during each round of federated " -"learning." +"For simulations that use the Virtual Client Engine, ``flwr-nightly`` " +"should be installed with the ``simulation`` extra:" msgstr "" -"Le serveur peut transmettre de nouvelles valeurs de configuration au " -"client à chaque tour en fournissant une fonction à " -":code:`on_fit_config_fn`. La fonction fournie sera appelée par la " -"stratégie et doit renvoyer un dictionnaire de paires de valeurs de clés " -"de configuration qui seront envoyées au client. Elle doit renvoyer un " -"dictionnaire de valeurs de configuration arbitraires :code:`client.fit` " -"et :code:`client.evaluate` au cours de chaque tour d'apprentissage " -"fédéré." +"Pour les simulations qui utilisent le moteur de client virtuel, ``flwr-" +"nightly`` doit être installé avec l'option ``simulation``: :" -#: ../../source/how-to-use-strategies.rst:75 +#: ../../source/how-to-monitor-simulation.rst:2 +#, fuzzy +msgid "Monitor simulation" +msgstr "Simulation de moniteur" + +#: ../../source/how-to-monitor-simulation.rst:4 msgid "" -"The :code:`on_fit_config_fn` can be used to pass arbitrary configuration " -"values from server to client, and poetentially change these values each " -"round, for example, to adjust the learning rate. The client will receive " -"the dictionary returned by the :code:`on_fit_config_fn` in its own " -":code:`client.fit()` function." +"Flower allows you to monitor system resources while running your " +"simulation. Moreover, the Flower simulation engine is powerful and " +"enables you to decide how to allocate resources per client manner and " +"constrain the total usage. Insights from resource consumption can help " +"you make smarter decisions and speed up the execution time." msgstr "" -"Le :code:`on_fit_config_fn` peut être utilisé pour passer des valeurs de " -"configuration arbitraires du serveur au client, et changer poétiquement " -"ces valeurs à chaque tour, par exemple pour ajuster le taux " -"d'apprentissage. Le client recevra le dictionnaire renvoyé par le " -":code:`on_fit_config_fn` dans sa propre fonction :code:`client.fit()`." +"Flower te permet de surveiller les ressources du système pendant " +"l'exécution de ta simulation. De plus, le moteur de simulation de Flower " +"est puissant et te permet de décider comment allouer les ressources par " +"manière de client et de limiter l'utilisation totale. Les informations " +"sur la consommation des ressources peuvent t'aider à prendre des " +"décisions plus intelligentes et à accélérer le temps d'exécution." -#: ../../source/how-to-use-strategies.rst:78 +#: ../../source/how-to-monitor-simulation.rst:9 msgid "" -"Similar to :code:`on_fit_config_fn`, there is also " -":code:`on_evaluate_config_fn` to customize the configuration sent to " -":code:`client.evaluate()`" +"The specific instructions assume you are using macOS and have the " +"`Homebrew `_ package manager installed." msgstr "" -"Comme pour :code:`on_fit_config_fn`, il existe aussi " -":code:`on_evaluate_config_fn` pour personnaliser la configuration envoyée" -" à :code:`client.evaluate()`" +"Les instructions spécifiques supposent que tu utilises macOS et que le " +"gestionnaire de paquets `Homebrew `_ est installé." -#: ../../source/how-to-use-strategies.rst:81 -msgid "Configuring server-side evaluation" -msgstr "Configuration de l'évaluation côté serveur" +#: ../../source/how-to-monitor-simulation.rst:13 +msgid "Downloads" +msgstr "Téléchargements" -#: ../../source/how-to-use-strategies.rst:83 +#: ../../source/how-to-monitor-simulation.rst:19 msgid "" -"Server-side evaluation can be enabled by passing an evaluation function " -"to :code:`evaluate_fn`." +"`Prometheus `_ is used for data collection, while" +" `Grafana `_ will enable you to visualize the " +"collected data. They are both well integrated with `Ray " +"`_ which Flower uses under the hood." msgstr "" -"L'évaluation côté serveur peut être activée en passant une fonction " -"d'évaluation à :code:`evaluate_fn`." +"`Prometheus `_ est utilisé pour la collecte de " +"données, tandis que `Grafana `_ te permettra de " +"visualiser les données collectées. Ils sont tous deux bien intégrés à " +"`Ray `_ que Flower utilise sous le capot." -#: ../../source/how-to-use-strategies.rst:89 -#, fuzzy +#: ../../source/how-to-monitor-simulation.rst:23 msgid "" -"Writing a fully custom strategy is a bit more involved, but it provides " -"the most flexibility. Read the `Implementing Strategies `_ guide to learn more." +"Overwrite the configuration files (depending on your device, it might be " +"installed on a different path)." msgstr "" -"L'écriture d'une stratégie entièrement personnalisée est un peu plus " -"complexe, mais c'est celle qui offre le plus de souplesse. Lis le guide " -"`Implémentation des stratégies `_ pour " -"en savoir plus." +"Écrase les fichiers de configuration (selon ton appareil, il se peut " +"qu'il soit installé sur un chemin différent)." -#: ../../source/index.rst:34 -msgid "Tutorial" -msgstr "Tutoriel" +#: ../../source/how-to-monitor-simulation.rst:26 +msgid "If you are on an M1 Mac, it should be:" +msgstr "Si tu es sur un Mac M1, il devrait l'être :" -#: ../../source/index.rst:44 -msgid "Quickstart tutorials" -msgstr "Quickstart tutorials" +#: ../../source/how-to-monitor-simulation.rst:33 +msgid "On the previous generation Intel Mac devices, it should be:" +msgstr "" +"Sur les appareils Mac Intel de la génération précédente, ce devrait être " +"le cas :" -#: ../../source/index.rst:74 ../../source/index.rst:78 -msgid "How-to guides" -msgstr "Guides" +#: ../../source/how-to-monitor-simulation.rst:40 +msgid "" +"Open the respective configuration files and change them. Depending on " +"your device, use one of the two following commands:" +msgstr "" +"Ouvre les fichiers de configuration respectifs et modifie-les. Selon ton " +"appareil, utilise l'une des deux commandes suivantes :" -#: ../../source/index.rst:99 -msgid "Legacy example guides" +#: ../../source/how-to-monitor-simulation.rst:51 +msgid "" +"and then delete all the text in the file and paste a new Prometheus " +"config you see below. You may adjust the time intervals to your " +"requirements:" msgstr "" +"puis supprime tout le texte du fichier et colle une nouvelle " +"configuration Prometheus que tu vois ci-dessous. Tu peux adapter les " +"intervalles de temps à tes besoins :" -#: ../../source/index.rst:108 ../../source/index.rst:112 -msgid "Explanations" -msgstr "Explications" +#: ../../source/how-to-monitor-simulation.rst:67 +msgid "" +"Now after you have edited the Prometheus configuration, do the same with " +"the Grafana configuration files. Open those using one of the following " +"commands as before:" +msgstr "" +"Maintenant, après avoir édité la configuration de Prometheus, fais de " +"même avec les fichiers de configuration de Grafana. Ouvre ces derniers à " +"l'aide de l'une des commandes suivantes, comme précédemment :" -#: None:-1 -msgid "API reference" -msgstr "Référence pour l'API" +#: ../../source/how-to-monitor-simulation.rst:78 +msgid "" +"Your terminal editor should open and allow you to apply the following " +"configuration as before." +msgstr "" +"Ton éditeur de terminal devrait s'ouvrir et te permettre d'appliquer la " +"configuration suivante comme précédemment." -#: ../../source/index.rst:137 -msgid "Reference docs" -msgstr "Référence pour la documentation" +#: ../../source/how-to-monitor-simulation.rst:94 +msgid "" +"Congratulations, you just downloaded all the necessary software needed " +"for metrics tracking. Now, let’s start it." +msgstr "" +"Félicitations, tu viens de télécharger tous les logiciels nécessaires au " +"suivi des métriques, maintenant, démarrons-le." -#: ../../source/index.rst:153 -#, fuzzy -msgid "Contributor tutorials" -msgstr "Configuration du contributeur" +#: ../../source/how-to-monitor-simulation.rst:98 +msgid "Tracking metrics" +msgstr "Suivi des mesures" -#: ../../source/index.rst:160 -#, fuzzy -msgid "Contributor how-to guides" -msgstr "Guide pour les contributeurs" +#: ../../source/how-to-monitor-simulation.rst:100 +msgid "" +"Before running your Flower simulation, you have to start the monitoring " +"tools you have just installed and configured." +msgstr "" +"Avant de lancer ta simulation Flower, tu dois démarrer les outils de " +"surveillance que tu viens d'installer et de configurer." -#: ../../source/index.rst:173 -#, fuzzy -msgid "Contributor explanations" -msgstr "Explications" +#: ../../source/how-to-monitor-simulation.rst:108 +msgid "" +"Please include the following argument in your Python code when starting a" +" simulation." +msgstr "" +"Tu dois inclure l'argument suivant dans ton code Python lorsque tu " +"démarres une simulation." -#: ../../source/index.rst:179 -#, fuzzy -msgid "Contributor references" -msgstr "Configuration du contributeur" +#: ../../source/how-to-monitor-simulation.rst:119 +msgid "Now, you are ready to start your workload." +msgstr "Maintenant, tu es prêt à commencer ta charge de travail." -#: ../../source/index.rst:-1 +#: ../../source/how-to-monitor-simulation.rst:121 msgid "" -"Check out the documentation of the main Flower Framework enabling easy " -"Python development for Federated Learning." +"Shortly after the simulation starts, you should see the following logs in" +" your terminal:" msgstr "" +"Peu de temps après le début de la simulation, tu devrais voir les " +"journaux suivants dans ton terminal :" -#: ../../source/index.rst:2 +#: ../../source/how-to-monitor-simulation.rst:127 #, fuzzy -msgid "Flower Framework Documentation" -msgstr "Rédiger de la documentation" +msgid "You can look at everything at http://127.0.0.1:8265 ." +msgstr "Tu peux tout regarder sur ``_ ." -#: ../../source/index.rst:7 +#: ../../source/how-to-monitor-simulation.rst:129 msgid "" -"Welcome to Flower's documentation. `Flower `_ is a " -"friendly federated learning framework." +"It's a Ray Dashboard. You can navigate to Metrics (on the left panel, the" +" lowest option)." msgstr "" -"Bienvenue sur la documentation de Flower. `Flower `_ " -"est un framework de federated learning convivial et facile à utiliser." - -#: ../../source/index.rst:11 -msgid "Join the Flower Community" -msgstr "Rejoignez la communauté de Flower" +"Il s'agit d'un tableau de bord Ray. Tu peux naviguer vers Metrics (sur le" +" panneau de gauche, l'option la plus basse)." -#: ../../source/index.rst:13 +#: ../../source/how-to-monitor-simulation.rst:132 msgid "" -"The Flower Community is growing quickly - we're a friendly group of " -"researchers, engineers, students, professionals, academics, and other " -"enthusiasts." +"Or alternatively, you can just see them in Grafana by clicking on the " +"right-up corner, “View in Grafana”. Please note that the Ray dashboard is" +" only accessible during the simulation. After the simulation ends, you " +"can only use Grafana to explore the metrics. You can start Grafana by " +"going to ``http://localhost:3000/``." msgstr "" -"Le communauté de Flower s'agrandit rapidement - on est un super groupe de" -" chercheurs, ingénieurs, étudiants, professionnels, académiques, et " -"autres hobbyistes." - -#: ../../source/index.rst:15 -msgid "Join us on Slack" -msgstr "Join us on Slack" - -#: ../../source/index.rst:23 -msgid "Flower Framework" -msgstr "Flower Framework" +"Ou alors, tu peux simplement les voir dans Grafana en cliquant sur le " +"coin supérieur droit, \"View in Grafana\". Sache que le tableau de bord " +"Ray n'est accessible que pendant la simulation. Une fois la simulation " +"terminée, tu ne peux utiliser Grafana que pour explorer les métriques. Tu" +" peux démarrer Grafana en te rendant sur `http://localhost:3000/``." -#: ../../source/index.rst:25 +#: ../../source/how-to-monitor-simulation.rst:137 +#, fuzzy msgid "" -"The user guide is targeted at researchers and developers who want to use " -"Flower to bring existing machine learning workloads into a federated " -"setting. One of Flower's design goals was to make this simple. Read on to" -" learn more." +"After you finish the visualization, stop Prometheus and Grafana. This is " +"important as they will otherwise block, for example port ``3000`` on your" +" machine as long as they are running." msgstr "" -"Ce guide utilisateur s'adresse à des chercheurs et des développeurs qui " -"veulent utiliser Flower pour transposer des workloads de Machine Learning" -" existantes dans un scenario fédéré. Un des buts de Flower est de rendre " -"cela le plus evident possible. Lisez la suite pour en apprendre plus." +"Après avoir terminé la visualisation, arrête Prometheus et Grafana. C'est" +" important car sinon ils bloqueront, par exemple, le port :code:`3000` " +"sur ta machine tant qu'ils seront en cours d'exécution." -#: ../../source/index.rst:30 -msgid "Tutorials" -msgstr "Tutoriels" +#: ../../source/how-to-monitor-simulation.rst:147 +msgid "Resource allocation" +msgstr "Allocation des ressources" -#: ../../source/index.rst:32 +#: ../../source/how-to-monitor-simulation.rst:149 msgid "" -"A learning-oriented series of federated learning tutorials, the best " -"place to start." +"You must understand how the Ray library works to efficiently allocate " +"system resources to simulation clients on your own." msgstr "" -"Une serie de tutoriels de Federated Learning, l'endroit parfait pour " -"débuter." +"Tu dois comprendre le fonctionnement de la bibliothèque Ray pour allouer " +"efficacement les ressources du système aux clients de simulation de ton " +"côté." -#: ../../source/index.rst:61 -#, fuzzy +#: ../../source/how-to-monitor-simulation.rst:152 msgid "" -"QUICKSTART TUTORIALS: :doc:`PyTorch ` | " -":doc:`TensorFlow ` | :doc:`🤗 Transformers" -" ` | :doc:`JAX ` | :doc:`Pandas ` | :doc:`fastai " -"` | :doc:`PyTorch Lightning ` | :doc:`scikit-learn ` | :doc:`XGBoost ` | " -":doc:`Android ` | :doc:`iOS `" +"Initially, the simulation (which Ray handles under the hood) starts by " +"default with all the available resources on the system, which it shares " +"among the clients. It doesn't mean it divides it equally among all of " +"them, nor that the model training happens at all of them simultaneously. " +"You will learn more about that in the later part of this blog. You can " +"check the system resources by running the following:" msgstr "" -"QUICKSTART TUTORIALS: :ref:`PyTorch ` | " -":ref:`TensorFlow ` | :ref:`🤗 Transformers " -"` | :ref:`JAX ` | :ref:`Pandas " -"` | :ref:`fastai ` | :ref:`PyTorch " -"Lightning ` | :ref:`MXNet ` | :ref:`scikit-learn ` | :ref:`XGBoost " -"` | :ref:`Android ` | :ref:`iOS " -"`" +"Au départ, la simulation (que Ray gère sous le capot) démarre par défaut " +"avec toutes les ressources disponibles sur le système, qu'elle partage " +"entre les clients. Cela ne signifie pas qu'elle les divise de manière " +"égale entre tous, ni que l'apprentissage du modèle se fait sur tous les " +"clients simultanément. Tu en apprendras plus à ce sujet dans la suite de " +"ce blog. Tu peux vérifier les ressources du système en exécutant ce qui " +"suit :" -#: ../../source/index.rst:63 -msgid "We also made video tutorials for PyTorch:" +#: ../../source/how-to-monitor-simulation.rst:164 +msgid "In Google Colab, the result you see might be similar to this:" +msgstr "Dans Google Colab, le résultat que tu obtiens peut ressembler à ceci :" + +#: ../../source/how-to-monitor-simulation.rst:175 +msgid "" +"However, you can overwrite the defaults. When starting a simulation, do " +"the following (you don't need to overwrite all of them):" msgstr "" +"Cependant, tu peux écraser les valeurs par défaut. Lorsque tu démarres " +"une simulation, fais ce qui suit (tu n'as pas besoin de les écraser " +"toutes) :" -#: ../../source/index.rst:68 -#, fuzzy -msgid "And TensorFlow:" -msgstr "Exemples de TensorFlow" +#: ../../source/how-to-monitor-simulation.rst:195 +msgid "Let’s also specify the resource for a single client." +msgstr "Spécifions également la ressource pour un seul client." -#: ../../source/index.rst:76 +#: ../../source/how-to-monitor-simulation.rst:225 msgid "" -"Problem-oriented how-to guides show step-by-step how to achieve a " -"specific goal." +"Now comes the crucial part. Ray will start a new client only when it has " +"all the required resources (such that they run in parallel) when the " +"resources allow." msgstr "" -"Guides orientés sur la résolutions étapes par étapes de problèmes ou " -"objectifs specifiques." +"Ray ne démarrera un nouveau client que lorsqu'il disposera de toutes les " +"ressources nécessaires (de manière à ce qu'ils fonctionnent en parallèle)" +" lorsque les ressources le permettront." -#: ../../source/index.rst:110 +#: ../../source/how-to-monitor-simulation.rst:228 +#, fuzzy msgid "" -"Understanding-oriented concept guides explain and discuss key topics and " -"underlying ideas behind Flower and collaborative AI." +"In the example above, only one client will be run, so your clients won't " +"run concurrently. Setting ``client_num_gpus = 0.5`` would allow running " +"two clients and therefore enable them to run concurrently. Be careful not" +" to require more resources than available. If you specified " +"``client_num_gpus = 2``, the simulation wouldn't start (even if you had 2" +" GPUs but decided to set 1 in ``ray_init_args``)." msgstr "" -"Guides orientés sur la compréhension et l'explication des sujets et idées" -" de fonds sur lesquels sont construits Flower et l'IA collaborative." +"Dans l'exemple ci-dessus, un seul client sera exécuté, donc tes clients " +"ne fonctionneront pas simultanément. En définissant " +":code:`client_num_gpus = 0.5`, tu pourras exécuter deux clients et donc " +"les faire fonctionner simultanément. Fais attention à ne pas demander " +"plus de ressources que celles disponibles. Si tu as spécifié " +":code:`client_num_gpus = 2`, la simulation ne démarrera pas (même si tu " +"as 2 GPU mais que tu as décidé d'en définir 1 dans " +":code:`ray_init_args`)." -#: ../../source/index.rst:120 -#, fuzzy -msgid "References" -msgstr "Référence" +#: ../../source/how-to-monitor-simulation.rst:235 ../../source/ref-faq.rst:2 +msgid "FAQ" +msgstr "FAQ" -#: ../../source/index.rst:122 -msgid "Information-oriented API reference and other reference material." -msgstr "Référence de l'API orientée sur l'information pure." +#: ../../source/how-to-monitor-simulation.rst:237 +msgid "Q: I don't see any metrics logged." +msgstr "Q : Je ne vois aucune mesure enregistrée." -#: ../../source/index.rst:131::1 -msgid ":py:obj:`flwr `\\" +#: ../../source/how-to-monitor-simulation.rst:239 +msgid "" +"A: The timeframe might not be properly set. The setting is in the top " +"right corner (\"Last 30 minutes\" by default). Please change the " +"timeframe to reflect the period when the simulation was running." msgstr "" +"R : Il se peut que le délai ne soit pas correctement défini. Le paramètre" +" se trouve dans le coin supérieur droit (\"Dernières 30 minutes\" par " +"défaut). Modifie le délai pour qu'il corresponde à la période pendant " +"laquelle la simulation s'est déroulée." -#: ../../source/index.rst:131::1 flwr:1 of -msgid "Flower main package." +#: ../../source/how-to-monitor-simulation.rst:243 +msgid "" +"Q: I see “Grafana server not detected. Please make sure the Grafana " +"server is running and refresh this page” after going to the Metrics tab " +"in Ray Dashboard." msgstr "" +"Q : Je vois s'afficher \"Serveur Grafana non détecté. Vérifie que le " +"serveur Grafana fonctionne et actualise cette page\" après avoir accédé à" +" l'onglet Métriques dans Ray Dashboard." -#: ../../source/index.rst:148 -#, fuzzy -msgid "Contributor docs" -msgstr "Configuration du contributeur" - -#: ../../source/index.rst:150 -#, fuzzy +#: ../../source/how-to-monitor-simulation.rst:246 msgid "" -"The Flower community welcomes contributions. The following docs are " -"intended to help along the way." +"A: You probably don't have Grafana running. Please check the running " +"services" msgstr "" -"Les auteurs de Flower sont heureux d'accueillir des contributions " -"externes. Les guides suivant sont là pour vous accompagner dans cette " -"direction." +"R : Grafana n'est probablement pas en cours d'exécution. Vérifie les " +"services en cours d'exécution" -#: ../../source/ref-api-cli.rst:2 +#: ../../source/how-to-monitor-simulation.rst:252 #, fuzzy -msgid "Flower CLI reference" -msgstr "Client de Flower" +msgid "" +"Q: I see \"This site can't be reached\" when going to " +"http://127.0.0.1:8265." +msgstr "" +"Q : Je vois \"This site can't be reached\" quand je vais sur " +"``_." -#: ../../source/ref-api-cli.rst:7 -#, fuzzy -msgid "flower-simulation" -msgstr "Simulation de moniteur" +#: ../../source/how-to-monitor-simulation.rst:254 +msgid "" +"A: Either the simulation has already finished, or you still need to start" +" Prometheus." +msgstr "" +"R : Soit la simulation est déjà terminée, soit tu dois encore démarrer " +"Prometheus." -#: ../../source/ref-api-cli.rst:17 -msgid "flower-superlink" -msgstr "flower-superlink" +#: ../../source/how-to-monitor-simulation.rst:257 +msgid "Resources" +msgstr "Ressources" -#: ../../source/ref-api-cli.rst:27 +#: ../../source/how-to-monitor-simulation.rst:259 #, fuzzy -msgid "flower-client-app" -msgstr "Flower ClientApp." +msgid "" +"Ray Dashboard: https://docs.ray.io/en/latest/ray-observability/getting-" +"started.html" +msgstr "" +"Tableau de bord Ray : ``_" -#: ../../source/ref-api-cli.rst:37 +#: ../../source/how-to-monitor-simulation.rst:261 #, fuzzy -msgid "flower-server-app" -msgstr "flower-driver-api" +msgid "Ray Metrics: https://docs.ray.io/en/latest/cluster/metrics.html" +msgstr "" +"Ray Metrics : ``_" -#: ../../source/ref-api/flwr.rst:2 +#: ../../source/how-to-run-simulations.rst:2 #, fuzzy -msgid "flwr" -msgstr "Fleur" +msgid "Run simulations" +msgstr "Simulation de moniteur" -#: ../../source/ref-api/flwr.rst:25 ../../source/ref-api/flwr.server.rst:51 -msgid "Modules" +#: ../../source/how-to-run-simulations.rst:8 +msgid "" +"Simulating Federated Learning workloads is useful for a multitude of use-" +"cases: you might want to run your workload on a large cohort of clients " +"but without having to source, configure and mange a large number of " +"physical devices; you might want to run your FL workloads as fast as " +"possible on the compute systems you have access to without having to go " +"through a complex setup process; you might want to validate your " +"algorithm on different scenarios at varying levels of data and system " +"heterogeneity, client availability, privacy budgets, etc. These are among" +" some of the use-cases where simulating FL workloads makes sense. Flower " +"can accommodate these scenarios by means of its `VirtualClientEngine " +"`_ or " +"VCE." msgstr "" -#: ../../source/ref-api/flwr.rst:35::1 -msgid ":py:obj:`flwr.client `\\" +#: ../../source/how-to-run-simulations.rst:19 +msgid "" +"The ``VirtualClientEngine`` schedules, launches and manages `virtual` " +"clients. These clients are identical to `non-virtual` clients (i.e. the " +"ones you launch via the command `flwr.client.start_client `_) in the sense that they can be configure by " +"creating a class inheriting, for example, from `flwr.client.NumPyClient " +"`_ and therefore behave in an " +"identical way. In addition to that, clients managed by the " +"``VirtualClientEngine`` are:" msgstr "" -#: ../../source/ref-api/flwr.rst:35::1 flwr.client:1 of -#, fuzzy -msgid "Flower client." -msgstr "Client de Flower" - -#: ../../source/ref-api/flwr.rst:35::1 -msgid ":py:obj:`flwr.common `\\" +#: ../../source/how-to-run-simulations.rst:26 +msgid "" +"resource-aware: this means that each client gets assigned a portion of " +"the compute and memory on your system. You as a user can control this at " +"the beginning of the simulation and allows you to control the degree of " +"parallelism of your Flower FL simulation. The fewer the resources per " +"client, the more clients can run concurrently on the same hardware." msgstr "" -#: ../../source/ref-api/flwr.rst:35::1 flwr.common:1 of -msgid "Common components shared between server and client." -msgstr "Composants communs partagés entre le serveur et le client." - -#: ../../source/ref-api/flwr.rst:35::1 -msgid ":py:obj:`flwr.server `\\" +#: ../../source/how-to-run-simulations.rst:31 +msgid "" +"self-managed: this means that you as a user do not need to launch clients" +" manually, instead this gets delegated to ``VirtualClientEngine``'s " +"internals." msgstr "" -#: ../../source/ref-api/flwr.rst:35::1 -#: ../../source/ref-api/flwr.server.rst:40::1 flwr.server:1 -#: flwr.server.server.Server:1 of -#, fuzzy -msgid "Flower server." -msgstr "Serveur de Flower" - -#: ../../source/ref-api/flwr.rst:35::1 -msgid ":py:obj:`flwr.simulation `\\" +#: ../../source/how-to-run-simulations.rst:33 +msgid "" +"ephemeral: this means that a client is only materialized when it is " +"required in the FL process (e.g. to do `fit() `_). The object is destroyed afterwards," +" releasing the resources it was assigned and allowing in this way other " +"clients to participate." msgstr "" -#: ../../source/ref-api/flwr.rst:35::1 flwr.simulation:1 of -#, fuzzy -msgid "Flower simulation." -msgstr "Simulation de moniteur" - -#: ../../source/ref-api/flwr.client.rst:2 -msgid "client" -msgstr "client" - -#: ../../source/ref-api/flwr.client.rst:13 -#: ../../source/ref-api/flwr.common.rst:13 -#: ../../source/ref-api/flwr.server.rst:13 -#: ../../source/ref-api/flwr.simulation.rst:13 -#, fuzzy -msgid "Functions" -msgstr "Les quatre fonctions :" - -#: ../../source/ref-api/flwr.client.rst:25::1 -msgid ":py:obj:`run_client_app `\\ \\(\\)" +#: ../../source/how-to-run-simulations.rst:38 +msgid "" +"The ``VirtualClientEngine`` implements `virtual` clients using `Ray " +"`_, an open-source framework for scalable Python " +"workloads. In particular, Flower's ``VirtualClientEngine`` makes use of " +"`Actors `_ to spawn " +"`virtual` clients and run their workload." msgstr "" -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.supernode.app.run_client_app:1 of -#, fuzzy -msgid "Run Flower client app." -msgstr "Client de Flower" +#: ../../source/how-to-run-simulations.rst:45 +msgid "Launch your Flower simulation" +msgstr "" -#: ../../source/ref-api/flwr.client.rst:25::1 -#, fuzzy -msgid ":py:obj:`run_supernode `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" +#: ../../source/how-to-run-simulations.rst:47 +msgid "" +"Running Flower simulations still require you to define your client class," +" a strategy, and utility functions to download and load (and potentially " +"partition) your dataset. With that out of the way, launching your " +"simulation is done with `start_simulation `_ and a minimal example looks" +" as follows:" +msgstr "" -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.supernode.app.run_supernode:1 of +#: ../../source/how-to-run-simulations.rst:73 #, fuzzy -msgid "Run Flower SuperNode." -msgstr "Serveur de Flower" +msgid "VirtualClientEngine resources" +msgstr "Moteur de client virtuel" -#: ../../source/ref-api/flwr.client.rst:25::1 +#: ../../source/how-to-run-simulations.rst:75 msgid "" -":py:obj:`start_client `\\ \\(\\*\\, " -"server\\_address\\[\\, client\\_fn\\, ...\\]\\)" +"By default the VCE has access to all system resources (i.e. all CPUs, all" +" GPUs, etc) since that is also the default behavior when starting Ray. " +"However, in some settings you might want to limit how many of your system" +" resources are used for simulation. You can do this via the " +"``ray_init_args`` input argument to ``start_simulation`` which the VCE " +"internally passes to Ray's ``ray.init`` command. For a complete list of " +"settings you can configure check the `ray.init " +"`_" +" documentation. Do not set ``ray_init_args`` if you want the VCE to use " +"all your system's CPUs and GPUs." msgstr "" -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.app.start_client:1 of -msgid "Start a Flower client node which connects to a Flower server." +#: ../../source/how-to-run-simulations.rst:97 +msgid "Assigning client resources" msgstr "" -#: ../../source/ref-api/flwr.client.rst:25::1 +#: ../../source/how-to-run-simulations.rst:99 msgid "" -":py:obj:`start_numpy_client `\\ \\(\\*\\," -" server\\_address\\, client\\)" +"By default the ``VirtualClientEngine`` assigns a single CPU core (and " +"nothing else) to each virtual client. This means that if your system has " +"10 cores, that many virtual clients can be concurrently running." msgstr "" -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.app.start_numpy_client:1 of -msgid "Start a Flower NumPyClient which connects to a gRPC server." +#: ../../source/how-to-run-simulations.rst:103 +msgid "" +"More often than not, you would probably like to adjust the resources your" +" clients get assigned based on the complexity (i.e. compute and memory " +"footprint) of your FL workload. You can do so when starting your " +"simulation by setting the argument `client_resources` to " +"`start_simulation `_." +" Two keys are internally used by Ray to schedule and spawn workloads (in " +"our case Flower clients):" msgstr "" -#: ../../source/ref-api/flwr.client.rst:27 -#: ../../source/ref-api/flwr.common.rst:32 -#: ../../source/ref-api/flwr.server.rst:28 -#: ../../source/ref-api/flwr.server.strategy.rst:17 -#: ../../source/ref-api/flwr.server.workflow.rst:17 -msgid "Classes" +#: ../../source/how-to-run-simulations.rst:110 +msgid "``num_cpus`` indicates the number of CPU cores a client would get." msgstr "" -#: ../../source/ref-api/flwr.client.rst:34::1 -msgid ":py:obj:`Client `\\ \\(\\)" +#: ../../source/how-to-run-simulations.rst:111 +msgid "``num_gpus`` indicates the **ratio** of GPU memory a client gets assigned." msgstr "" -#: ../../source/ref-api/flwr.client.rst:34::1 -#: flwr.client.client.Client:1 of -msgid "Abstract base class for Flower clients." +#: ../../source/how-to-run-simulations.rst:113 +msgid "Let's see a few examples:" msgstr "" -#: ../../source/ref-api/flwr.client.rst:34::1 +#: ../../source/how-to-run-simulations.rst:132 msgid "" -":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, " -"mods\\]\\)" +"While the ``client_resources`` can be used to control the degree of " +"concurrency in your FL simulation, this does not stop you from running " +"dozens, hundreds or even thousands of clients in the same round and " +"having orders of magnitude more `dormant` (i.e. not participating in a " +"round) clients. Let's say you want to have 100 clients per round but your" +" system can only accommodate 8 clients concurrently. The " +"``VirtualClientEngine`` will schedule 100 jobs to run (each simulating a " +"client sampled by the strategy) and then will execute them in a resource-" +"aware manner in batches of 8." msgstr "" -#: ../../source/ref-api/flwr.client.rst:34::1 -#: flwr.client.client_app.ClientApp:1 of -#, fuzzy -msgid "Flower ClientApp." -msgstr "Flower ClientApp." - -#: ../../source/ref-api/flwr.client.rst:34::1 -msgid ":py:obj:`NumPyClient `\\ \\(\\)" +#: ../../source/how-to-run-simulations.rst:140 +msgid "" +"To understand all the intricate details on how resources are used to " +"schedule FL clients and how to define custom resources, please take a " +"look at the `Ray documentation `_." msgstr "" -#: ../../source/ref-api/flwr.client.rst:34::1 -#: flwr.client.numpy_client.NumPyClient:1 of -msgid "Abstract base class for Flower clients using NumPy." -msgstr "" +#: ../../source/how-to-run-simulations.rst:145 +#, fuzzy +msgid "Simulation examples" +msgstr "Exemples de PyTorch" -#: flwr.client.client.Client:1 flwr.client.numpy_client.NumPyClient:1 -#: flwr.server.client_manager.ClientManager:1 -#: flwr.server.driver.driver.Driver:1 flwr.server.strategy.strategy.Strategy:1 -#: of -msgid "Bases: :py:class:`~abc.ABC`" +#: ../../source/how-to-run-simulations.rst:147 +msgid "" +"A few ready-to-run complete examples for Flower simulation in " +"Tensorflow/Keras and PyTorch are provided in the `Flower repository " +"`_. You can run them on Google Colab too:" msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:15 -#: ../../source/ref-api/flwr.client.ClientApp.rst:15 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:15 -#: ../../source/ref-api/flwr.common.Array.rst:15 -#: ../../source/ref-api/flwr.common.ClientMessage.rst:15 -#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:15 -#: ../../source/ref-api/flwr.common.Context.rst:15 -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:15 -#: ../../source/ref-api/flwr.common.Error.rst:15 -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:15 -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:15 -#: ../../source/ref-api/flwr.common.EventType.rst:15 -#: ../../source/ref-api/flwr.common.FitIns.rst:15 -#: ../../source/ref-api/flwr.common.FitRes.rst:15 -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:15 -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:15 -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:15 -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:15 -#: ../../source/ref-api/flwr.common.Message.rst:15 -#: ../../source/ref-api/flwr.common.MessageType.rst:15 -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:15 -#: ../../source/ref-api/flwr.common.Metadata.rst:15 -#: ../../source/ref-api/flwr.common.MetricsRecord.rst:15 -#: ../../source/ref-api/flwr.common.Parameters.rst:15 -#: ../../source/ref-api/flwr.common.ParametersRecord.rst:15 -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:15 -#: ../../source/ref-api/flwr.common.RecordSet.rst:15 -#: ../../source/ref-api/flwr.common.ServerMessage.rst:15 -#: ../../source/ref-api/flwr.common.Status.rst:15 -#: ../../source/ref-api/flwr.server.ClientManager.rst:15 -#: ../../source/ref-api/flwr.server.Driver.rst:15 -#: ../../source/ref-api/flwr.server.History.rst:15 -#: ../../source/ref-api/flwr.server.LegacyContext.rst:15 -#: ../../source/ref-api/flwr.server.Server.rst:15 -#: ../../source/ref-api/flwr.server.ServerApp.rst:15 -#: ../../source/ref-api/flwr.server.ServerConfig.rst:15 -#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:15 -#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:15 -#: ../../source/ref-api/flwr.server.strategy.Krum.rst:15 -#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:15 -#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:15 -#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:15 -#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:15 -msgid "Methods" +#: ../../source/how-to-run-simulations.rst:151 +#, fuzzy +msgid "" +"`Tensorflow/Keras Simulation " +"`_: 100 clients collaboratively train a MLP model on MNIST." msgstr "" +"`Quickstart TensorFlow (Code) " +"`_" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`evaluate `\\ \\(ins\\)" +#: ../../source/how-to-run-simulations.rst:154 +msgid "" +"`PyTorch Simulation `_: 100 clients collaboratively train a CNN model on " +"MNIST." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.evaluate:1 -#: flwr.client.numpy_client.NumPyClient.evaluate:1 of +#: ../../source/how-to-run-simulations.rst:159 #, fuzzy -msgid "Evaluate the provided parameters using the locally held dataset." -msgstr "évaluer le modèle mis à jour sur l'ensemble de test local" +msgid "Multi-node Flower simulations" +msgstr "Simulation de moniteur" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`fit `\\ \\(ins\\)" +#: ../../source/how-to-run-simulations.rst:161 +msgid "" +"Flower's ``VirtualClientEngine`` allows you to run FL simulations across " +"multiple compute nodes. Before starting your multi-node simulation ensure" +" that you:" msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: flwr.client.client.Client.fit:1 of -msgid "Refine the provided parameters using the locally held dataset." +#: ../../source/how-to-run-simulations.rst:164 +msgid "Have the same Python environment in all nodes." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`get_context `\\ \\(\\)" +#: ../../source/how-to-run-simulations.rst:165 +msgid "Have a copy of your code (e.g. your entire repo) in all nodes." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.get_context:1 -#: flwr.client.numpy_client.NumPyClient.get_context:1 of -#, fuzzy -msgid "Get the run context from this client." -msgstr "Évaluer la réponse d'un client." - -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`get_parameters `\\ \\(ins\\)" +#: ../../source/how-to-run-simulations.rst:166 +msgid "" +"Have a copy of your dataset in all nodes (more about this in " +":ref:`simulation considerations `)" msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.get_parameters:1 -#: flwr.client.numpy_client.NumPyClient.get_parameters:1 of -#, fuzzy -msgid "Return the current local model parameters." -msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" +#: ../../source/how-to-run-simulations.rst:168 +msgid "" +"Pass ``ray_init_args={\"address\"=\"auto\"}`` to `start_simulation `_ so the " +"``VirtualClientEngine`` attaches to a running Ray instance." +msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`get_properties `\\ \\(ins\\)" +#: ../../source/how-to-run-simulations.rst:171 +msgid "" +"Start Ray on you head node: on the terminal type ``ray start --head``. " +"This command will print a few lines, one of which indicates how to attach" +" other nodes to the head node." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: flwr.client.client.Client.get_properties:1 of -msgid "Return set of client's properties." +#: ../../source/how-to-run-simulations.rst:174 +msgid "" +"Attach other nodes to the head node: copy the command shown after " +"starting the head and execute it on terminal of a new node: for example " +"``ray start --address='192.168.1.132:6379'``" msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`set_context `\\ \\(context\\)" +#: ../../source/how-to-run-simulations.rst:178 +msgid "" +"With all the above done, you can run your code from the head node as you " +"would if the simulation was running on a single node." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.set_context:1 -#: flwr.client.numpy_client.NumPyClient.set_context:1 of -msgid "Apply a run context to this client." +#: ../../source/how-to-run-simulations.rst:181 +msgid "" +"Once your simulation is finished, if you'd like to dismantle your cluster" +" you simply need to run the command ``ray stop`` in each node's terminal " +"(including the head node)." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`to_client `\\ \\(\\)" +#: ../../source/how-to-run-simulations.rst:185 +msgid "Multi-node simulation good-to-know" msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: flwr.client.client.Client.to_client:1 of -msgid "Return client (itself)." +#: ../../source/how-to-run-simulations.rst:187 +msgid "" +"Here we list a few interesting functionality when running multi-node FL " +"simulations:" msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:46 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:46 -#: ../../source/ref-api/flwr.common.Array.rst:28 -#: ../../source/ref-api/flwr.common.ClientMessage.rst:25 -#: ../../source/ref-api/flwr.common.Code.rst:19 -#: ../../source/ref-api/flwr.common.Context.rst:25 -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:25 -#: ../../source/ref-api/flwr.common.Error.rst:25 -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:25 -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:25 -#: ../../source/ref-api/flwr.common.EventType.rst:165 -#: ../../source/ref-api/flwr.common.FitIns.rst:25 -#: ../../source/ref-api/flwr.common.FitRes.rst:25 -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:25 -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:25 -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:25 -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:25 -#: ../../source/ref-api/flwr.common.Message.rst:37 -#: ../../source/ref-api/flwr.common.MessageType.rst:25 -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:25 -#: ../../source/ref-api/flwr.common.Metadata.rst:25 -#: ../../source/ref-api/flwr.common.Parameters.rst:25 -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:25 -#: ../../source/ref-api/flwr.common.RecordSet.rst:25 -#: ../../source/ref-api/flwr.common.ServerMessage.rst:25 -#: ../../source/ref-api/flwr.common.Status.rst:25 -#: ../../source/ref-api/flwr.server.LegacyContext.rst:25 -#: ../../source/ref-api/flwr.server.ServerConfig.rst:25 -msgid "Attributes" +#: ../../source/how-to-run-simulations.rst:189 +msgid "" +"User ``ray status`` to check all nodes connected to your head node as " +"well as the total resources available to the ``VirtualClientEngine``." msgstr "" -#: flwr.client.client.Client.evaluate:1::1 of -msgid ":py:obj:`context `\\" +#: ../../source/how-to-run-simulations.rst:192 +msgid "" +"When attaching a new node to the head, all its resources (i.e. all CPUs, " +"all GPUs) will be visible by the head node. This means that the " +"``VirtualClientEngine`` can schedule as many `virtual` clients as that " +"node can possible run. In some settings you might want to exclude certain" +" resources from the simulation. You can do this by appending `--num-" +"cpus=` and/or `--num-gpus=` in " +"any ``ray start`` command (including when starting the head)" msgstr "" -#: ../../source/ref-api/flwr.common.Parameters.rst:2 -#: flwr.client.app.start_client flwr.client.app.start_numpy_client -#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit -#: flwr.client.client.Client.get_parameters -#: flwr.client.client.Client.get_properties -#: flwr.client.numpy_client.NumPyClient.evaluate -#: flwr.client.numpy_client.NumPyClient.fit -#: flwr.client.numpy_client.NumPyClient.get_parameters -#: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.common.context.Context flwr.common.message.Error -#: flwr.common.message.Message flwr.common.message.Message.create_error_reply -#: flwr.common.message.Message.create_reply flwr.common.message.Metadata -#: flwr.common.record.parametersrecord.Array flwr.server.app.start_server -#: flwr.server.client_manager.ClientManager.register -#: flwr.server.client_manager.ClientManager.unregister -#: flwr.server.client_manager.SimpleClientManager.register -#: flwr.server.client_manager.SimpleClientManager.unregister -#: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.driver.Driver.create_message -#: flwr.server.driver.driver.Driver.pull_messages -#: flwr.server.driver.driver.Driver.push_messages -#: flwr.server.driver.driver.Driver.send_and_receive -#: flwr.server.strategy.bulyan.Bulyan -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit -#: flwr.server.strategy.fedadagrad.FedAdagrad -#: flwr.server.strategy.fedadam.FedAdam flwr.server.strategy.fedavg.FedAvg -#: flwr.server.strategy.fedavg_android.FedAvgAndroid -#: flwr.server.strategy.fedavgm.FedAvgM flwr.server.strategy.fedopt.FedOpt -#: flwr.server.strategy.fedprox.FedProx -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg -#: flwr.server.strategy.fedyogi.FedYogi flwr.server.strategy.krum.Krum -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate -#: flwr.server.strategy.strategy.Strategy.aggregate_fit -#: flwr.server.strategy.strategy.Strategy.configure_evaluate -#: flwr.server.strategy.strategy.Strategy.configure_fit -#: flwr.server.strategy.strategy.Strategy.evaluate -#: flwr.server.strategy.strategy.Strategy.initialize_parameters -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow -#: flwr.simulation.app.start_simulation -#: flwr.simulation.run_simulation.run_simulation of +#: ../../source/how-to-run-simulations.rst:202 #, fuzzy -msgid "Parameters" -msgstr "Paramètres du modèle." +msgid "Considerations for simulations" +msgstr "Simulation de moniteur" -#: flwr.client.client.Client.evaluate:3 of +#: ../../source/how-to-run-simulations.rst:206 msgid "" -"The evaluation instructions containing (global) model parameters received" -" from the server and a dictionary of configuration values used to " -"customize the local evaluation process." +"We are actively working on these fronts so to make it trivial to run any " +"FL workload with Flower simulation." msgstr "" -#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit -#: flwr.client.client.Client.get_parameters -#: flwr.client.client.Client.get_properties -#: flwr.client.numpy_client.NumPyClient.evaluate -#: flwr.client.numpy_client.NumPyClient.fit -#: flwr.client.numpy_client.NumPyClient.get_parameters -#: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.common.message.Message.create_reply flwr.server.app.start_server -#: flwr.server.client_manager.ClientManager.num_available -#: flwr.server.client_manager.ClientManager.register -#: flwr.server.client_manager.SimpleClientManager.num_available -#: flwr.server.client_manager.SimpleClientManager.register -#: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.driver.Driver.create_message -#: flwr.server.driver.driver.Driver.pull_messages -#: flwr.server.driver.driver.Driver.push_messages -#: flwr.server.driver.driver.Driver.send_and_receive -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate -#: flwr.server.strategy.strategy.Strategy.aggregate_fit -#: flwr.server.strategy.strategy.Strategy.configure_evaluate -#: flwr.server.strategy.strategy.Strategy.configure_fit -#: flwr.server.strategy.strategy.Strategy.evaluate -#: flwr.server.strategy.strategy.Strategy.initialize_parameters -#: flwr.simulation.app.start_simulation of +#: ../../source/how-to-run-simulations.rst:209 +msgid "" +"The current VCE allows you to run Federated Learning workloads in " +"simulation mode whether you are prototyping simple scenarios on your " +"personal laptop or you want to train a complex FL pipeline across " +"multiple high-performance GPU nodes. While we add more capabilities to " +"the VCE, the points below highlight some of the considerations to keep in" +" mind when designing your FL pipeline with Flower. We also highlight a " +"couple of current limitations in our implementation." +msgstr "" + +#: ../../source/how-to-run-simulations.rst:217 #, fuzzy -msgid "Returns" +msgid "GPU resources" msgstr "Ressources" -#: flwr.client.client.Client.evaluate:8 of +#: ../../source/how-to-run-simulations.rst:219 msgid "" -"The evaluation result containing the loss on the local dataset and other " -"details such as the number of local data examples used for evaluation." -msgstr "" - -#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit -#: flwr.client.client.Client.get_parameters -#: flwr.client.client.Client.get_properties -#: flwr.client.numpy_client.NumPyClient.get_parameters -#: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.common.message.Message.create_reply flwr.server.app.start_server -#: flwr.server.client_manager.ClientManager.num_available -#: flwr.server.client_manager.ClientManager.register -#: flwr.server.client_manager.SimpleClientManager.num_available -#: flwr.server.client_manager.SimpleClientManager.register -#: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.driver.Driver.create_message -#: flwr.server.driver.driver.Driver.pull_messages -#: flwr.server.driver.driver.Driver.push_messages -#: flwr.server.driver.driver.Driver.send_and_receive -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate -#: flwr.server.strategy.strategy.Strategy.aggregate_fit -#: flwr.server.strategy.strategy.Strategy.configure_evaluate -#: flwr.server.strategy.strategy.Strategy.configure_fit -#: flwr.server.strategy.strategy.Strategy.evaluate -#: flwr.server.strategy.strategy.Strategy.initialize_parameters -#: flwr.simulation.app.start_simulation of -msgid "Return type" +"The VCE assigns a share of GPU memory to a client that specifies the key " +"``num_gpus`` in ``client_resources``. This being said, Ray (used " +"internally by the VCE) is by default:" msgstr "" -#: flwr.client.client.Client.fit:3 of +#: ../../source/how-to-run-simulations.rst:222 msgid "" -"The training instructions containing (global) model parameters received " -"from the server and a dictionary of configuration values used to " -"customize the local training process." +"not aware of the total VRAM available on the GPUs. This means that if you" +" set ``num_gpus=0.5`` and you have two GPUs in your system with different" +" (e.g. 32GB and 8GB) VRAM amounts, they both would run 2 clients " +"concurrently." msgstr "" -#: flwr.client.client.Client.fit:8 of +#: ../../source/how-to-run-simulations.rst:225 msgid "" -"The training result containing updated parameters and other details such " -"as the number of local training examples used for training." +"not aware of other unrelated (i.e. not created by the VCE) workloads are " +"running on the GPU. Two takeaways from this are:" msgstr "" -#: flwr.client.client.Client.get_parameters:3 of +#: ../../source/how-to-run-simulations.rst:228 msgid "" -"The get parameters instructions received from the server containing a " -"dictionary of configuration values." +"Your Flower server might need a GPU to evaluate the `global model` after " +"aggregation (by instance when making use of the `evaluate method `_)" msgstr "" -#: flwr.client.client.Client.get_parameters:7 of -#, fuzzy -msgid "The current local model parameters." -msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" - -#: flwr.client.client.Client.get_properties:3 of +#: ../../source/how-to-run-simulations.rst:231 msgid "" -"The get properties instructions received from the server containing a " -"dictionary of configuration values." +"If you want to run several independent Flower simulations on the same " +"machine you need to mask-out your GPUs with " +"``CUDA_VISIBLE_DEVICES=\"\"`` when launching your experiment." msgstr "" -#: flwr.client.client.Client.get_properties:7 of -msgid "The current client properties." +#: ../../source/how-to-run-simulations.rst:235 +msgid "" +"In addition, the GPU resource limits passed to ``client_resources`` are " +"not `enforced` (i.e. they can be exceeded) which can result in the " +"situation of client using more VRAM than the ratio specified when " +"starting the simulation." msgstr "" -#: ../../source/ref-api/flwr.client.ClientApp.rst:2 +#: ../../source/how-to-run-simulations.rst:240 #, fuzzy -msgid "ClientApp" -msgstr "client" +msgid "TensorFlow with GPUs" +msgstr "Exemples de TensorFlow" -#: flwr.client.client_app.ClientApp:1 flwr.common.constant.MessageType:1 -#: flwr.common.constant.MessageTypeLegacy:1 flwr.common.context.Context:1 -#: flwr.common.message.Error:1 flwr.common.message.Message:1 -#: flwr.common.message.Metadata:1 flwr.common.record.parametersrecord.Array:1 -#: flwr.common.record.recordset.RecordSet:1 flwr.common.typing.ClientMessage:1 -#: flwr.common.typing.DisconnectRes:1 flwr.common.typing.EvaluateIns:1 -#: flwr.common.typing.EvaluateRes:1 flwr.common.typing.FitIns:1 -#: flwr.common.typing.FitRes:1 flwr.common.typing.GetParametersIns:1 -#: flwr.common.typing.GetParametersRes:1 flwr.common.typing.GetPropertiesIns:1 -#: flwr.common.typing.GetPropertiesRes:1 flwr.common.typing.Parameters:1 -#: flwr.common.typing.ReconnectIns:1 flwr.common.typing.ServerMessage:1 -#: flwr.common.typing.Status:1 flwr.server.history.History:1 -#: flwr.server.server.Server:1 flwr.server.server_app.ServerApp:1 -#: flwr.server.server_config.ServerConfig:1 -#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 -#: of -msgid "Bases: :py:class:`object`" +#: ../../source/how-to-run-simulations.rst:242 +msgid "" +"When `using a GPU with TensorFlow " +"`_ nearly your entire GPU memory of" +" all your GPUs visible to the process will be mapped. This is done by " +"TensorFlow for optimization purposes. However, in settings such as FL " +"simulations where we want to split the GPU into multiple `virtual` " +"clients, this is not a desirable mechanism. Luckily we can disable this " +"default behavior by `enabling memory growth " +"`_." msgstr "" -#: flwr.client.app.start_client:41 flwr.client.app.start_numpy_client:36 -#: flwr.client.client_app.ClientApp:4 -#: flwr.client.client_app.ClientApp.evaluate:4 -#: flwr.client.client_app.ClientApp.query:4 -#: flwr.client.client_app.ClientApp.train:4 flwr.server.app.start_server:41 -#: flwr.server.server_app.ServerApp:4 flwr.server.server_app.ServerApp.main:4 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:29 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:22 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:21 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:14 -#: of -#, fuzzy -msgid "Examples" -msgstr "Exemples de PyTorch" - -#: flwr.client.client_app.ClientApp:5 of +#: ../../source/how-to-run-simulations.rst:249 msgid "" -"Assuming a typical `Client` implementation named `FlowerClient`, you can " -"wrap it in a `ClientApp` as follows:" +"This would need to be done in the main process (which is where the server" +" would run) and in each Actor created by the VCE. By means of " +"``actor_kwargs`` we can pass the reserved key `\"on_actor_init_fn\"` in " +"order to specify a function to be executed upon actor initialization. In " +"this case, to enable GPU growth for TF workloads. It would look as " +"follows:" msgstr "" -#: flwr.client.client_app.ClientApp:16 of +#: ../../source/how-to-run-simulations.rst:272 +#, fuzzy msgid "" -"If the above code is in a Python module called `client`, it can be " -"started as follows:" +"This is precisely the mechanism used in `Tensorflow/Keras Simulation " +"`_ example." msgstr "" +"`Quickstart TensorFlow (Code) " +"`_" -#: flwr.client.client_app.ClientApp:21 of -msgid "" -"In this `client:app` example, `client` refers to the Python module " -"`client.py` in which the previous code lives in and `app` refers to the " -"global attribute `app` that points to an object of type `ClientApp`." -msgstr "" - -#: flwr.client.client_app.ClientApp.evaluate:1::1 of -msgid ":py:obj:`evaluate `\\ \\(\\)" -msgstr "" - -#: flwr.client.client_app.ClientApp.evaluate:1 -#: flwr.client.client_app.ClientApp.evaluate:1::1 of -msgid "Return a decorator that registers the evaluate fn with the client app." +#: ../../source/how-to-run-simulations.rst:276 +msgid "Multi-node setups" msgstr "" -#: flwr.client.client_app.ClientApp.evaluate:1::1 of -msgid ":py:obj:`query `\\ \\(\\)" +#: ../../source/how-to-run-simulations.rst:278 +msgid "" +"The VCE does not currently offer a way to control on which node a " +"particular `virtual` client is executed. In other words, if more than a " +"single node have the resources needed by a client to run, then any of " +"those nodes could get the client workload scheduled onto. Later in the FL" +" process (i.e. in a different round) the same client could be executed by" +" a different node. Depending on how your clients access their datasets, " +"this might require either having a copy of all dataset partitions on all " +"nodes or a dataset serving mechanism (e.g. using nfs, a database) to " +"circumvent data duplication." msgstr "" -#: flwr.client.client_app.ClientApp.evaluate:1::1 -#: flwr.client.client_app.ClientApp.query:1 of -msgid "Return a decorator that registers the query fn with the client app." +#: ../../source/how-to-run-simulations.rst:286 +msgid "" +"By definition virtual clients are `stateless` due to their ephemeral " +"nature. A client state can be implemented as part of the Flower client " +"class but users need to ensure this saved to persistent storage (e.g. a " +"database, disk) and that can be retrieve later by the same client " +"regardless on which node it is running from. This is related to the point" +" above also since, in some way, the client's dataset could be seen as a " +"type of `state`." msgstr "" -#: flwr.client.client_app.ClientApp.evaluate:1::1 of +#: ../../source/how-to-save-and-load-model-checkpoints.rst:2 #, fuzzy -msgid ":py:obj:`train `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" +msgid "Save and load model checkpoints" +msgstr "Sauvegarde et chargement des points de contrôle PyTorch" -#: flwr.client.client_app.ClientApp.evaluate:1::1 -#: flwr.client.client_app.ClientApp.train:1 of -msgid "Return a decorator that registers the train fn with the client app." +#: ../../source/how-to-save-and-load-model-checkpoints.rst:4 +msgid "" +"Flower does not automatically save model updates on the server-side. This" +" how-to guide describes the steps to save (and load) model checkpoints in" +" Flower." msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:2 -msgid "NumPyClient" -msgstr "NumPyClient" +#: ../../source/how-to-save-and-load-model-checkpoints.rst:8 +#, fuzzy +msgid "Model checkpointing" +msgstr "Point de contrôle du modèle" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: ../../source/how-to-save-and-load-model-checkpoints.rst:10 +#, fuzzy msgid "" -":py:obj:`evaluate `\\ \\(parameters\\, " -"config\\)" -msgstr "" - -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -msgid ":py:obj:`fit `\\ \\(parameters\\, config\\)" +"Model updates can be persisted on the server-side by customizing " +"``Strategy`` methods. Implementing custom strategies is always an option," +" but for many cases it may be more convenient to simply customize an " +"existing strategy. The following code example defines a new " +"``SaveModelStrategy`` which customized the existing built-in ``FedAvg`` " +"strategy. In particular, it customizes ``aggregate_fit`` by calling " +"``aggregate_fit`` in the base class (``FedAvg``). It then continues to " +"save returned (aggregated) weights before it returns those aggregated " +"weights to the caller (i.e., the server):" msgstr "" +"Les mises à jour du modèle peuvent être conservées côté serveur en " +"personnalisant les méthodes :code:`Strategy`. L'implémentation de " +"stratégies personnalisées est toujours possible, mais dans de nombreux " +"cas, il peut être plus pratique de simplement personnaliser une stratégie" +" existante. L'exemple de code suivant définit une nouvelle " +":code:`SaveModelStrategy` qui personnalise la stratégie intégrée " +":code:`FedAvg` existante. En particulier, il personnalise " +":code:`aggregate_fit` en appelant :code:`aggregate_fit` dans la classe de" +" base (:code:`FedAvg`). Il continue ensuite à sauvegarder les poids " +"retournés (agrégés) avant de renvoyer ces poids agrégés à l'appelant " +"(c'est-à-dire le serveur) :" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.numpy_client.NumPyClient.fit:1 of +#: ../../source/how-to-save-and-load-model-checkpoints.rst:53 #, fuzzy -msgid "Train the provided parameters using the locally held dataset." -msgstr "entraîne le modèle sur l'ensemble d'apprentissage local" +msgid "Save and load PyTorch checkpoints" +msgstr "Sauvegarde et chargement des points de contrôle PyTorch" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -msgid ":py:obj:`get_context `\\ \\(\\)" +#: ../../source/how-to-save-and-load-model-checkpoints.rst:55 +#, fuzzy +msgid "" +"Similar to the previous example but with a few extra steps, we'll show " +"how to store a PyTorch checkpoint we'll use the ``torch.save`` function. " +"Firstly, ``aggregate_fit`` returns a ``Parameters`` object that has to be" +" transformed into a list of NumPy ``ndarray``'s, then those are " +"transformed into the PyTorch ``state_dict`` following the ``OrderedDict``" +" class structure." msgstr "" +"Comme dans l'exemple précédent, mais avec quelques étapes " +"supplémentaires, nous allons montrer comment stocker un point de contrôle" +" PyTorch en utilisant la fonction ``torch.save``. Tout d'abord, " +"``aggregate_fit`` renvoie un objet ``Parameters`` qui doit être " +"transformé en une liste de `ndarray`` NumPy, puis ceux-ci sont " +"transformés en ``state_dict`` PyTorch en suivant la structure de la " +"classe ``OrderedDict``." -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: ../../source/how-to-save-and-load-model-checkpoints.rst:98 msgid "" -":py:obj:`get_parameters `\\ " -"\\(config\\)" +"To load your progress, you simply append the following lines to your " +"code. Note that this will iterate over all saved checkpoints and load the" +" latest one:" msgstr "" +"Pour charger ta progression, il te suffit d'ajouter les lignes suivantes " +"à ton code. Note que cela va itérer sur tous les points de contrôle " +"sauvegardés et charger le plus récent :" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: ../../source/how-to-save-and-load-model-checkpoints.rst:111 msgid "" -":py:obj:`get_properties `\\ " -"\\(config\\)" +"Return/use this object of type ``Parameters`` wherever necessary, such as" +" in the ``initial_parameters`` when defining a ``Strategy``." msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.numpy_client.NumPyClient.get_properties:1 of -msgid "Return a client's set of properties." -msgstr "" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:2 +msgid "Upgrade to Flower 1.0" +msgstr "Passe à Flower 1.0" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:4 msgid "" -":py:obj:`set_context `\\ " -"\\(context\\)" -msgstr "" - -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -msgid ":py:obj:`to_client `\\ \\(\\)" +"Flower 1.0 is here. Along with new features, Flower 1.0 provides a stable" +" foundation for future growth. Compared to Flower 0.19 (and other 0.x " +"series releases), there are a few breaking changes that make it necessary" +" to change the code of existing 0.x-series projects." msgstr "" +"Flower 1.0 est arrivé. En plus de nouvelles fonctionnalités, Flower 1.0 " +"fournit une base stable pour la croissance future. Par rapport à Flower " +"0.19 (et aux autres versions de la série 0.x), il y a quelques " +"changements qui nécessitent de modifier le code des projets de la série " +"0.x existants." -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.numpy_client.NumPyClient.to_client:1 of -#, fuzzy -msgid "Convert to object to Client type and return it." -msgstr "Convertit l'objet des paramètres en ndarrays NumPy." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:10 +#: ../../source/how-to-upgrade-to-flower-next.rst:63 +msgid "Install update" +msgstr "Installer la mise à jour" -#: flwr.client.numpy_client.NumPyClient.evaluate:1::1 of -msgid ":py:obj:`context `\\" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:12 +msgid "" +"Here's how to update an existing installation to Flower 1.0 using either " +"pip or Poetry:" msgstr "" +"Voici comment mettre à jour une installation existante vers Flower 1.0 en" +" utilisant soit pip soit Poetry :" -#: flwr.client.numpy_client.NumPyClient.evaluate:3 -#: flwr.client.numpy_client.NumPyClient.fit:3 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:5 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:8 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:5 -#: flwr.server.strategy.strategy.Strategy.configure_fit:5 -#: flwr.server.strategy.strategy.Strategy.evaluate:8 of -#, fuzzy -msgid "The current (global) model parameters." -msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:14 +msgid "pip: add ``-U`` when installing." +msgstr "pip : ajoute ``-U`` lors de l'installation." -#: flwr.client.numpy_client.NumPyClient.evaluate:5 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:16 msgid "" -"Configuration parameters which allow the server to influence evaluation " -"on the client. It can be used to communicate arbitrary values from the " -"server to the client, for example, to influence the number of examples " -"used for evaluation." +"``python -m pip install -U flwr`` (when using ``start_server`` and " +"``start_client``)" msgstr "" +"``python -m pip install -U flwr`` (lors de l'utilisation de " +"``start_server`` et ``start_client``)" -#: flwr.client.numpy_client.NumPyClient.evaluate:11 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:17 msgid "" -"* **loss** (*float*) -- The evaluation loss of the model on the local " -"dataset. * **num_examples** (*int*) -- The number of examples used for " -"evaluation. * **metrics** (*Dict[str, Scalar]*) -- A dictionary mapping " -"arbitrary string keys to values of type bool, bytes, float, int, or " -"str. It can be used to communicate arbitrary values back to the server." +"``python -m pip install -U 'flwr[simulation]'`` (when using " +"``start_simulation``)" msgstr "" +"``python -m pip install -U 'flwr[simulation]'`` (lors de l'utilisation de" +" ``start_simulation``)" -#: flwr.client.numpy_client.NumPyClient.evaluate:11 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:19 msgid "" -"**loss** (*float*) -- The evaluation loss of the model on the local " -"dataset." +"Poetry: update the ``flwr`` dependency in ``pyproject.toml`` and then " +"reinstall (don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` " +"before running ``poetry install``)." msgstr "" +"Poetry : mettez à jour la dépendance ``flwr`` dans ``pyproject.toml`` " +"puis réinstallez (n'oubliez pas de supprimer ``poetry.lock`` via ``rm " +"poetry.lock`` avant d'exécuter ``poetry install``)." -#: flwr.client.numpy_client.NumPyClient.evaluate:12 of -msgid "**num_examples** (*int*) -- The number of examples used for evaluation." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:23 +msgid "``flwr = \"^1.0.0\"`` (when using ``start_server`` and ``start_client``)" msgstr "" +"``flwr = \"^1.0.0\"`` (lors de l'utilisation de ``start_server`` et " +"``start_client``)" -#: flwr.client.numpy_client.NumPyClient.evaluate:13 -#: flwr.client.numpy_client.NumPyClient.fit:13 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:24 msgid "" -"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " -"string keys to values of type bool, bytes, float, int, or str. It can be " -"used to communicate arbitrary values back to the server." +"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` (when " +"using ``start_simulation``)" msgstr "" +"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` (lors de " +"l'utilisation de ``start_simulation``)" -#: flwr.client.numpy_client.NumPyClient.evaluate:19 of -msgid "" -"The previous return type format (int, float, float) and the extended " -"format (int, float, float, Dict[str, Scalar]) have been deprecated and " -"removed since Flower 0.19." -msgstr "" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:28 +#: ../../source/how-to-upgrade-to-flower-next.rst:121 +msgid "Required changes" +msgstr "Changements nécessaires" -#: flwr.client.numpy_client.NumPyClient.fit:5 of -msgid "" -"Configuration parameters which allow the server to influence training on " -"the client. It can be used to communicate arbitrary values from the " -"server to the client, for example, to set the number of (local) training " -"epochs." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:30 +msgid "The following breaking changes require manual updates." msgstr "" +"Les changements de rupture suivants nécessitent des mises à jour " +"manuelles." -#: flwr.client.numpy_client.NumPyClient.fit:11 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:33 +msgid "General" +msgstr "Généralités" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:35 msgid "" -"* **parameters** (*NDArrays*) -- The locally updated model parameters. * " -"**num_examples** (*int*) -- The number of examples used for training. * " -"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " -"string keys to values of type bool, bytes, float, int, or str. It can " -"be used to communicate arbitrary values back to the server." +"Pass all arguments as keyword arguments (not as positional arguments). " +"Here's an example:" msgstr "" +"Passe tous les arguments comme des arguments de mots-clés (et non comme " +"des arguments de position). Voici un exemple :" -#: flwr.client.numpy_client.NumPyClient.fit:11 of -#, fuzzy -msgid "**parameters** (*NDArrays*) -- The locally updated model parameters." -msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" - -#: flwr.client.numpy_client.NumPyClient.fit:12 of -msgid "**num_examples** (*int*) -- The number of examples used for training." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:38 +msgid "" +"Flower 0.19 (positional arguments): ``start_client(\"127.0.0.1:8080\", " +"FlowerClient())``" msgstr "" +"Flower 0.19 (arguments positionnels) : ``start_client(\"127.0.0.1:8080\"," +" FlowerClient())``" -#: flwr.client.numpy_client.NumPyClient.get_parameters:3 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:39 msgid "" -"Configuration parameters requested by the server. This can be used to " -"tell the client which parameters are needed along with some Scalar " -"attributes." +"Flower 1.0 (keyword arguments): " +"``start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())``" msgstr "" +"Fleur 1.0 (arguments de mots-clés) : " +"``start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())``" -#: flwr.client.numpy_client.NumPyClient.get_parameters:8 of -#, fuzzy -msgid "**parameters** -- The local model parameters as a list of NumPy ndarrays." -msgstr "renvoie le poids du modèle sous la forme d'une liste de ndarrays NumPy" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:43 +#: ../../source/ref-api/flwr.client.Client.rst:2 +msgid "Client" +msgstr "Client" -#: flwr.client.numpy_client.NumPyClient.get_properties:3 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:45 msgid "" -"Configuration parameters requested by the server. This can be used to " -"tell the client which properties are needed along with some Scalar " -"attributes." +"Subclasses of ``NumPyClient``: change ``def get_parameters(self):``` to " +"``def get_parameters(self, config):``" msgstr "" +"Sous-classes de ``NumPyClient`` : changez ``def get_parameters(self):`` " +"en ``def get_parameters(self, config):``" -#: flwr.client.numpy_client.NumPyClient.get_properties:8 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:47 msgid "" -"**properties** -- A dictionary mapping arbitrary string keys to values of" -" type bool, bytes, float, int, or str. It can be used to communicate " -"arbitrary property values back to the server." -msgstr "" - -#: ../../source/ref-api/flwr.client.run_client_app.rst:2 -msgid "run\\_client\\_app" +"Subclasses of ``Client``: change ``def get_parameters(self):``` to ``def " +"get_parameters(self, ins: GetParametersIns):``" msgstr "" +"Sous-classes de ``Client`` : changez ``def get_parameters(self):`` en " +"``def get_parameters(self, ins : GetParametersIns):``" -#: ../../source/ref-api/flwr.client.run_supernode.rst:2 -#, fuzzy -msgid "run\\_supernode" -msgstr "flower-superlink" - -#: ../../source/ref-api/flwr.client.start_client.rst:2 -#, fuzzy -msgid "start\\_client" -msgstr "start_client" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:51 +msgid "Strategies / ``start_server`` / ``start_simulation``" +msgstr "Stratégies / ``démarrer_serveur`` / ``démarrer_simulation``" -#: flwr.client.app.start_client:3 flwr.client.app.start_numpy_client:9 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:53 msgid "" -"The IPv4 or IPv6 address of the server. If the Flower server runs on the " -"same machine on port 8080, then `server_address` would be " -"`\"[::]:8080\"`." -msgstr "" - -#: flwr.client.app.start_client:7 of -msgid "A callable that instantiates a Client. (default: None)" +"Pass ``ServerConfig`` (instead of a dictionary) to ``start_server`` and " +"``start_simulation``. Here's an example:" msgstr "" +"Passez ``ServerConfig`` (au lieu d'un dictionnaire) à ``start_server`` et" +" ``start_simulation``. Voici un exemple :" -#: flwr.client.app.start_client:9 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:56 msgid "" -"An implementation of the abstract base class `flwr.client.Client` " -"(default: None)" +"Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " +"\"round_timeout\": 600.0}, ...)``" msgstr "" +"Flower 0.19 : ``start_server(..., config={\"num_rounds\" : 3, " +"\"round_timeout\" : 600.0}, ...)``" -#: flwr.client.app.start_client:12 flwr.client.app.start_numpy_client:15 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:58 msgid "" -"The maximum length of gRPC messages that can be exchanged with the Flower" -" server. The default should be sufficient for most models. Users who " -"train very large models might need to increase this value. Note that the " -"Flower server needs to be started with the same value (see " -"`flwr.server.start_server`), otherwise it will not know about the " -"increased limit and block larger messages." +"Flower 1.0: ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" msgstr "" +"Flower 1.0 : ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" -#: flwr.client.app.start_client:19 flwr.client.app.start_numpy_client:22 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:61 msgid "" -"The PEM-encoded root certificates as a byte string or a path string. If " -"provided, a secure connection using the certificates will be established " -"to an SSL-enabled Flower server." +"Replace ``num_rounds=1`` in ``start_simulation`` with the new " +"``config=ServerConfig(...)`` (see previous item)" msgstr "" +"Remplacer ``num_rounds=1`` dans ``start_simulation`` par le nouveau " +"``config=ServerConfig(...)`` (voir point précédent)" -#: flwr.client.app.start_client:23 flwr.client.app.start_numpy_client:26 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:63 msgid "" -"Starts an insecure gRPC connection when True. Enables HTTPS connection " -"when False, using system certificates if `root_certificates` is None." +"Remove ``force_final_distributed_eval`` parameter from calls to " +"``start_server``. Distributed evaluation on all clients can be enabled by" +" configuring the strategy to sample all clients for evaluation after the " +"last round of training." msgstr "" +"Supprime le paramètre ``force_final_distributed_eval`` des appels à " +"``start_server``. L'évaluation distribuée sur tous les clients peut être " +"activée en configurant la stratégie pour échantillonner tous les clients " +"pour l'évaluation après le dernier tour de formation." -#: flwr.client.app.start_client:26 flwr.client.app.start_numpy_client:29 of -msgid "" -"Configure the transport layer. Allowed values: - 'grpc-bidi': gRPC, " -"bidirectional streaming - 'grpc-rere': gRPC, request-response " -"(experimental) - 'rest': HTTP (experimental)" -msgstr "" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:66 +msgid "Rename parameter/ndarray conversion functions:" +msgstr "Renomme les fonctions de conversion des paramètres et des tableaux :" -#: flwr.client.app.start_client:31 of -msgid "" -"The maximum number of times the client will try to connect to the server " -"before giving up in case of a connection error. If set to None, there is " -"no limit to the number of tries." -msgstr "" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:68 +msgid "``parameters_to_weights`` --> ``parameters_to_ndarrays``" +msgstr "``parameters_to_weights`` --> ``parameters_to_ndarrays``" -#: flwr.client.app.start_client:35 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:69 +msgid "``weights_to_parameters`` --> ``ndarrays_to_parameters``" +msgstr "``Poids_à_paramètres`` --> ``Réseaux_à_paramètres``" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:71 msgid "" -"The maximum duration before the client stops trying to connect to the " -"server in case of connection error. If set to None, there is no limit to " -"the total time." +"Strategy initialization: if the strategy relies on the default values for" +" ``fraction_fit`` and ``fraction_evaluate``, set ``fraction_fit`` and " +"``fraction_evaluate`` manually to ``0.1``. Projects that do not manually " +"create a strategy (by calling ``start_server`` or ``start_simulation`` " +"without passing a strategy instance) should now manually initialize " +"FedAvg with ``fraction_fit`` and ``fraction_evaluate`` set to ``0.1``." msgstr "" +"Initialisation de la stratégie : si la stratégie repose sur les valeurs " +"par défaut de ``fraction_fit`` et ``fraction_evaluate``, fixer " +"manuellement ``fraction_fit`` et ``fraction_evaluate`` à `0.1``. Les " +"projets qui ne créent pas manuellement une stratégie (en appelant " +"``start_server` ou ``start_simulation`` sans passer une instance de " +"stratégie) doivent maintenant initialiser manuellement FedAvg avec " +"``fraction_fit`` et ``fraction_evaluate`` fixés à ``0.1``." -#: flwr.client.app.start_client:42 flwr.client.app.start_numpy_client:37 of -msgid "Starting a gRPC client with an insecure server connection:" -msgstr "" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:77 +msgid "Rename built-in strategy parameters (e.g., ``FedAvg``):" +msgstr "Renommer les paramètres de stratégie intégrés (par exemple, ``FedAvg``) :" -#: flwr.client.app.start_client:49 flwr.client.app.start_numpy_client:44 of -msgid "Starting an SSL-enabled gRPC client using system certificates:" -msgstr "" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:79 +msgid "``fraction_eval`` --> ``fraction_evaluate``" +msgstr "``fraction_eval`` --> ``fraction_evaluate``" -#: flwr.client.app.start_client:60 flwr.client.app.start_numpy_client:52 of -msgid "Starting an SSL-enabled gRPC client using provided certificates:" -msgstr "" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:80 +msgid "``min_eval_clients`` --> ``min_evaluate_clients``" +msgstr "``min_eval_clients` --> ``min_evaluate_clients``" -#: ../../source/ref-api/flwr.client.start_numpy_client.rst:2 -#, fuzzy -msgid "start\\_numpy\\_client" -msgstr "start_numpy_client" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:81 +msgid "``eval_fn`` --> ``evaluate_fn``" +msgstr "``eval_fn`` --> ``evaluate_fn``" -#: flwr.client.app.start_numpy_client:5 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:83 msgid "" -"This function is deprecated since 1.7.0. Use " -":code:`flwr.client.start_client` instead and first convert your " -":code:`NumPyClient` to type :code:`flwr.client.Client` by executing its " -":code:`to_client()` method." -msgstr "" - -#: flwr.client.app.start_numpy_client:13 of -msgid "An implementation of the abstract base class `flwr.client.NumPyClient`." +"Rename ``rnd`` to ``server_round``. This impacts multiple methods and " +"functions, for example, ``configure_fit``, ``aggregate_fit``, " +"``configure_evaluate``, ``aggregate_evaluate``, and ``evaluate_fn``." msgstr "" +"Renommez ``rnd`` en ``server_round``. Cela a un impact sur plusieurs " +"méthodes et fonctions, par exemple, ``configure_fit``, ``aggregate_fit``," +" ``configure_evaluate``, ``aggregate_evaluate``, et ``evaluate_fn``." -#: ../../source/ref-api/flwr.common.rst:2 -msgid "common" -msgstr "commun" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:86 +msgid "Add ``server_round`` and ``config`` to ``evaluate_fn``:" +msgstr "Ajoute ``server_round`` et ``config`` à `evaluate_fn`` :" -#: ../../source/ref-api/flwr.common.rst:30::1 -msgid ":py:obj:`array_from_numpy `\\ \\(ndarray\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:88 +msgid "" +"Flower 0.19: ``def evaluate(parameters: NDArrays) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]:``" msgstr "" +"Flower 0.19 : ``def evaluate(parameters : NDArrays) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]]:``" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.record.conversion_utils.array_from_numpy:1 of -#, fuzzy -msgid "Create Array from NumPy ndarray." -msgstr "Convertit l'objet des paramètres en ndarrays NumPy." - -#: ../../source/ref-api/flwr.common.rst:30::1 -msgid ":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:90 +msgid "" +"Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, " +"config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " +"Scalar]]]:``" msgstr "" +"Flower 1.0 : ``def evaluate(server_round : int, parameters : NDArrays, " +"config : Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " +"Scalar]]]:``" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.bytes_to_ndarray:1 of -msgid "Deserialize NumPy ndarray from bytes." -msgstr "Désérialise le tableau numérique NumPy à partir d'octets." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:94 +msgid "Custom strategies" +msgstr "Stratégies personnalisées" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:96 msgid "" -":py:obj:`configure `\\ \\(identifier\\[\\, " -"filename\\, host\\]\\)" +"The type of parameter ``failures`` has changed from " +"``List[BaseException]`` to ``List[Union[Tuple[ClientProxy, FitRes], " +"BaseException]]`` (in ``aggregate_fit``) and " +"``List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]]`` (in " +"``aggregate_evaluate``)" msgstr "" +"Le type du paramètre ``failures`` a changé de ``List[BaseException]`` à " +"``List[Union[Tuple[ClientProxy, FitRes], BaseException]]`` (dans " +"``aggregate_fit``) et ``List[Union[Tuple[ClientProxy, EvaluateRes], " +"BaseException]]`` (dans ``aggregate_evaluate``)" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.logger.configure:1 of -msgid "Configure logging to file and/or remote log server." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:100 +msgid "" +"The ``Strategy`` method ``evaluate`` now receives the current round of " +"federated learning/evaluation as the first parameter:" msgstr "" -"Configure la journalisation vers un fichier et/ou un serveur de " -"journalisation distant." +"La méthode ``Stratégie`` `évaluer`` reçoit maintenant le cycle actuel " +"d'apprentissage/évaluation fédéré comme premier paramètre :" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:103 msgid "" -":py:obj:`event `\\ \\(event\\_type\\[\\, " -"event\\_details\\]\\)" +"Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]:``" msgstr "" +"Flower 0.19 : ``def evaluate(self, parameters : Parameters) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]]:``" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.telemetry.event:1 of -msgid "Submit create_event to ThreadPoolExecutor to avoid blocking." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:105 +msgid "" +"Flower 1.0: ``def evaluate(self, server_round: int, parameters: " +"Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" msgstr "" +"Flower 1.0 : ``def evaluate(self, server_round : int, parameters : " +"Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]]:``" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:109 +msgid "Optional improvements" +msgstr "Améliorations facultatives" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:111 msgid "" -":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " -"\\*\\*kwargs\\)" +"Along with the necessary changes above, there are a number of potential " +"improvements that just became possible:" msgstr "" +"En plus des changements nécessaires mentionnés ci-dessus, il existe un " +"certain nombre d'améliorations potentielles qui viennent d'être rendues " +"possibles :" -#: ../../source/ref-api/flwr.common.rst:30::1 logging.Logger.log:1 -#: of -msgid "Log 'msg % args' with the integer severity 'level'." -msgstr "Enregistre 'msg % args' avec le niveau de sévérité entier 'level'." - -#: ../../source/ref-api/flwr.common.rst:30::1 -msgid ":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:114 +msgid "" +"Remove \"placeholder\" methods from subclasses of ``Client`` or " +"``NumPyClient``. If you, for example, use server-side evaluation, then " +"empty placeholder implementations of ``evaluate`` are no longer " +"necessary." msgstr "" +"Supprime les méthodes \"placeholder\" des sous-classes de ``Client`` ou " +"de ``NumPyClient``. Si tu utilises, par exemple, l'évaluation côté " +"serveur, alors les implémentations \"placeholder\" de ``evaluate`` ne " +"sont plus nécessaires." -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.ndarray_to_bytes:1 of -msgid "Serialize NumPy ndarray to bytes." -msgstr "Sérialise le tableau numérique NumPy en octets." - -#: ../../source/ref-api/flwr.common.rst:30::1 -msgid ":py:obj:`now `\\ \\(\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:117 +msgid "" +"Configure the round timeout via ``start_simulation``: " +"``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, " +"round_timeout=600.0), ...)``" msgstr "" +"Configurez le délai d'attente de la ronde via ``start_simulation`` : " +"``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, " +"round_timeout=600.0), ...)``" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.date.now:1 of -msgid "Construct a datetime from time.time() with time zone set to UTC." -msgstr "" -"Construit une date à partir de time.time() avec le fuseau horaire réglé " -"sur UTC." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:121 +#: ../../source/how-to-upgrade-to-flower-next.rst:349 +msgid "Further help" +msgstr "Aide supplémentaire" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:123 msgid "" -":py:obj:`ndarrays_to_parameters `\\ " -"\\(ndarrays\\)" +"Most official `Flower code examples " +"`_ are already updated" +" to Flower 1.0, they can serve as a reference for using the Flower 1.0 " +"API. If there are further questions, `join the Flower Slack " +"`_ and use the channel ``#questions``." msgstr "" +"La plupart des `exemples de code Flower officiels " +"`_ sont déjà mis à " +"jour vers Flower 1.0, ils peuvent servir de référence pour l'utilisation " +"de l'API Flower 1.0. Si vous avez d'autres questions, `joins le Slack " +"Flower `_ et utilise le canal " +"``#questions``." -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.ndarrays_to_parameters:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarrays_to_parameters:1 -#: of -msgid "Convert NumPy ndarrays to parameters object." -msgstr "Convertit les ndarrays NumPy en objets de paramètres." +#: ../../source/how-to-upgrade-to-flower-next.rst:2 +#, fuzzy +msgid "Upgrade to Flower Next" +msgstr "Passe à Flower 1.0" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:4 msgid "" -":py:obj:`parameters_to_ndarrays `\\ " -"\\(parameters\\)" +"Welcome to the migration guide for updating Flower to Flower Next! " +"Whether you're a seasoned user or just getting started, this guide will " +"help you smoothly transition your existing setup to take advantage of the" +" latest features and improvements in Flower Next, starting from version " +"1.8." msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.parameters_to_ndarrays:1 of -msgid "Convert parameters object to NumPy ndarrays." -msgstr "Convertit l'objet des paramètres en ndarrays NumPy." - -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:11 msgid "" -":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, " -"data\\)" +"This guide shows how to reuse pre-``1.8`` Flower code with minimum code " +"changes by using the *compatibility layer* in Flower Next. In another " +"guide, we will show how to run Flower Next end-to-end with pure Flower " +"Next APIs." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.parametersrecord.Array:1 of -msgid "Array type." +#: ../../source/how-to-upgrade-to-flower-next.rst:15 +msgid "Let's dive in!" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:68 +#, fuzzy msgid "" -":py:obj:`ClientMessage `\\ " -"\\(\\[get\\_properties\\_res\\, ...\\]\\)" +"Here's how to update an existing installation of Flower to Flower Next " +"with ``pip``:" msgstr "" +"Voici comment mettre à jour une installation existante vers Flower 1.0 en" +" utilisant soit pip soit Poetry :" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.ClientMessage:1 of -msgid "ClientMessage is a container used to hold one result message." +#: ../../source/how-to-upgrade-to-flower-next.rst:74 +msgid "or if you need Flower Next with simulation:" msgstr "" -"ClientMessage est un conteneur utilisé pour contenir un message de " -"résultat." -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`Code `\\ \\(value\\)" +#: ../../source/how-to-upgrade-to-flower-next.rst:80 +msgid "" +"Ensure you set the following version constraint in your " +"``requirements.txt``" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.Code:1 of -msgid "Client status codes." -msgstr "Codes d'état du client." +#: ../../source/how-to-upgrade-to-flower-next.rst:90 +msgid "or ``pyproject.toml``:" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-next.rst:101 +#, fuzzy +msgid "Using Poetry" +msgstr "Utiliser la poésie (recommandé)" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:103 #, fuzzy msgid "" -":py:obj:`ConfigsRecord `\\ " -"\\(\\[configs\\_dict\\, keep\\_input\\]\\)" +"Update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall " +"(don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` before " +"running ``poetry install``)." msgstr "" -"Flower 1.0 : ``start_server(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" +"Poetry : mettez à jour la dépendance ``flwr`` dans ``pyproject.toml`` " +"puis réinstallez (n'oubliez pas de supprimer ``poetry.lock`` via ``rm " +"poetry.lock`` avant d'exécuter ``poetry install``)." -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.configsrecord.ConfigsRecord:1 of +#: ../../source/how-to-upgrade-to-flower-next.rst:106 #, fuzzy -msgid "Configs record." -msgstr "Configurer les clients" +msgid "" +"Ensure you set the following version constraint in your " +"``pyproject.toml``:" +msgstr "Augmente la version mineure de ``pyproject.toml`` d'une unité." -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`Context `\\ \\(state\\)" +#: ../../source/how-to-upgrade-to-flower-next.rst:123 +msgid "" +"In Flower Next, the *infrastructure* and *application layers* have been " +"decoupled. Instead of starting a client in code via ``start_client()``, " +"you create a |clientapp_link|_ and start it via the command line. Instead" +" of starting a server in code via ``start_server()``, you create a " +"|serverapp_link|_ and start it via the command line. The long-running " +"components of server and client are called SuperLink and SuperNode. The " +"following non-breaking changes that require manual updates and allow you " +"to run your project both in the traditional way and in the Flower Next " +"way:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.context.Context:1 of -msgid "State of your run." -msgstr "" +#: ../../source/how-to-upgrade-to-flower-next.rst:132 +#, fuzzy +msgid "|clientapp_link|_" +msgstr "client" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`DisconnectRes `\\ \\(reason\\)" +#: ../../source/how-to-upgrade-to-flower-next.rst:134 +msgid "" +"Wrap your existing client with |clientapp_link|_ instead of launching it " +"via |startclient_link|_. Here's an example:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.DisconnectRes:1 of -msgid "DisconnectRes message from client to server." -msgstr "Message DisconnectRes envoyé par le client au serveur." +#: ../../source/how-to-upgrade-to-flower-next.rst:157 +#, fuzzy +msgid "|serverapp_link|_" +msgstr "serveur" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:159 msgid "" -":py:obj:`EvaluateIns `\\ \\(parameters\\, " -"config\\)" +"Wrap your existing strategy with |serverapp_link|_ instead of starting " +"the server via |startserver_link|_. Here's an example:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.EvaluateIns:1 of -msgid "Evaluate instructions for a client." -msgstr "Évaluer les instructions pour un client." +#: ../../source/how-to-upgrade-to-flower-next.rst:180 +msgid "Deployment" +msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:182 msgid "" -":py:obj:`EvaluateRes `\\ \\(status\\, loss\\, " -"num\\_examples\\, metrics\\)" +"Run the ``SuperLink`` using |flowernext_superlink_link|_ before running, " +"in sequence, |flowernext_clientapp_link|_ (2x) and " +"|flowernext_serverapp_link|_. There is no need to execute `client.py` and" +" `server.py` as Python scripts." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.EvaluateRes:1 of -msgid "Evaluate response from a client." -msgstr "Évaluer la réponse d'un client." +#: ../../source/how-to-upgrade-to-flower-next.rst:185 +msgid "" +"Here's an example to start the server without HTTPS (only for " +"prototyping):" +msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`EventType `\\ \\(value\\)" +#: ../../source/how-to-upgrade-to-flower-next.rst:201 +msgid "" +"Here's another example to start with HTTPS. Use the ``--ssl-ca-" +"certfile``, ``--ssl-certfile``, and ``--ssl-keyfile`` command line " +"options to pass paths to (CA certificate, server certificate, and server " +"private key)." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.telemetry.EventType:1 of -msgid "Types of telemetry events." -msgstr "Types d'événements télémétriques." +#: ../../source/how-to-upgrade-to-flower-next.rst:229 +#, fuzzy +msgid "Simulation in CLI" +msgstr "Simulation de moniteur" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`FitIns `\\ \\(parameters\\, config\\)" +#: ../../source/how-to-upgrade-to-flower-next.rst:231 +msgid "" +"Wrap your existing client and strategy with |clientapp_link|_ and " +"|serverapp_link|_, respectively. There is no need to use |startsim_link|_" +" anymore. Here's an example:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.FitIns:1 of -msgid "Fit instructions for a client." -msgstr "Instructions d'ajustement pour un client." - -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:264 msgid "" -":py:obj:`FitRes `\\ \\(status\\, parameters\\, " -"num\\_examples\\, metrics\\)" +"Run |flower_simulation_link|_ in CLI and point to the ``server_app`` / " +"``client_app`` object in the code instead of executing the Python script." +" Here's an example (assuming the ``server_app`` and ``client_app`` " +"objects are in a ``sim.py`` module):" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.FitRes:1 of -msgid "Fit response from a client." -msgstr "Réponse adaptée d'un client." +#: ../../source/how-to-upgrade-to-flower-next.rst:281 +msgid "" +"Set default resources for each |clientapp_link|_ using the ``--backend-" +"config`` command line argument instead of setting the " +"``client_resources`` argument in |startsim_link|_. Here's an example:" +msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`Error `\\ \\(code\\[\\, reason\\]\\)" +#: ../../source/how-to-upgrade-to-flower-next.rst:305 +msgid "Simulation in a Notebook" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.message.Error:1 of -msgid "A dataclass that stores information about an error that occurred." +#: ../../source/how-to-upgrade-to-flower-next.rst:307 +msgid "" +"Run |runsim_link|_ in your notebook instead of |startsim_link|_. Here's " +"an example:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`GetParametersIns `\\ \\(config\\)" +#: ../../source/how-to-upgrade-to-flower-next.rst:351 +#, fuzzy +msgid "" +"Some official `Flower code examples `_ " +"are already updated to Flower Next so they can serve as a reference for " +"using the Flower Next API. If there are further questions, `join the " +"Flower Slack `_ and use the channel " +"``#questions``. You can also `participate in Flower Discuss " +"`_ where you can find us answering questions," +" or share and learn from others about migrating to Flower Next." msgstr "" +"La plupart des `exemples de code Flower officiels " +"`_ sont déjà mis à " +"jour vers Flower 1.0, ils peuvent servir de référence pour l'utilisation " +"de l'API Flower 1.0. Si vous avez d'autres questions, `joins le Slack " +"Flower `_ et utilise le canal " +"``#questions``." -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetParametersIns:1 of -msgid "Parameters request for a client." -msgstr "Demande de paramètres pour un client." +#: ../../source/how-to-upgrade-to-flower-next.rst:358 +#, fuzzy +msgid "Important" +msgstr "Changements importants :" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:360 msgid "" -":py:obj:`GetParametersRes `\\ \\(status\\, " -"parameters\\)" +"As we continuously enhance Flower Next at a rapid pace, we'll be " +"periodically updating this guide. Please feel free to share any feedback " +"with us!" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetParametersRes:1 of -msgid "Response when asked to return parameters." -msgstr "Réponse lorsqu'on te demande de renvoyer des paramètres." - -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`GetPropertiesIns `\\ \\(config\\)" +#: ../../source/how-to-upgrade-to-flower-next.rst:366 +msgid "Happy migrating! 🚀" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetPropertiesIns:1 of -msgid "Properties request for a client." -msgstr "Demande de propriétés pour un client." +#: ../../source/how-to-use-built-in-mods.rst:2 +msgid "Use Built-in Mods" +msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-built-in-mods.rst:4 msgid "" -":py:obj:`GetPropertiesRes `\\ \\(status\\, " -"properties\\)" +"**Note: This tutorial covers experimental features. The functionality and" +" interfaces may change in future versions.**" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetPropertiesRes:1 of -msgid "Properties response from a client." -msgstr "Réponse des propriétés d'un client." - -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-built-in-mods.rst:7 msgid "" -":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " -"error\\]\\)" -msgstr "" - -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.message.Message:1 of -msgid "State of your application from the viewpoint of the entity using it." +"In this tutorial, we will learn how to utilize built-in mods to augment " +"the behavior of a ``ClientApp``. Mods (sometimes also called Modifiers) " +"allow us to perform operations before and after a task is processed in " +"the ``ClientApp``." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`MessageType `\\ \\(\\)" +#: ../../source/how-to-use-built-in-mods.rst:12 +msgid "What are Mods?" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.constant.MessageType:1 of -msgid "Message type." +#: ../../source/how-to-use-built-in-mods.rst:14 +msgid "" +"A Mod is a callable that wraps around a ``ClientApp``. It can manipulate " +"or inspect the incoming ``Message`` and the resulting outgoing " +"``Message``. The signature for a ``Mod`` is as follows:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`MessageTypeLegacy `\\ \\(\\)" +#: ../../source/how-to-use-built-in-mods.rst:23 +msgid "A typical mod function might look something like this:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.constant.MessageTypeLegacy:1 of -msgid "Legacy message type." +#: ../../source/how-to-use-built-in-mods.rst:36 +msgid "Using Mods" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid "" -":py:obj:`Metadata `\\ \\(run\\_id\\, " -"message\\_id\\, src\\_node\\_id\\, ...\\)" +#: ../../source/how-to-use-built-in-mods.rst:38 +msgid "To use mods in your ``ClientApp``, you can follow these steps:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.message.Metadata:1 of -msgid "A dataclass holding metadata associated with the current message." +#: ../../source/how-to-use-built-in-mods.rst:41 +msgid "1. Import the required mods" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid "" -":py:obj:`MetricsRecord `\\ " -"\\(\\[metrics\\_dict\\, keep\\_input\\]\\)" +#: ../../source/how-to-use-built-in-mods.rst:43 +msgid "First, import the built-in mod you intend to use:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.metricsrecord.MetricsRecord:1 of -msgid "Metrics record." +#: ../../source/how-to-use-built-in-mods.rst:51 +msgid "2. Define your client function" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`NDArray `\\" +#: ../../source/how-to-use-built-in-mods.rst:53 +msgid "" +"Define your client function (``client_fn``) that will be wrapped by the " +"mod(s):" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid "" -"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " -":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" +#: ../../source/how-to-use-built-in-mods.rst:62 +msgid "3. Create the ``ClientApp`` with mods" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-built-in-mods.rst:64 msgid "" -":py:obj:`Parameters `\\ \\(tensors\\, " -"tensor\\_type\\)" +"Create your ``ClientApp`` and pass the mods as a list to the ``mods`` " +"argument. The order in which you provide the mods matters:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.Parameters:1 of -msgid "Model parameters." -msgstr "Paramètres du modèle." +#: ../../source/how-to-use-built-in-mods.rst:78 +#, fuzzy +msgid "Order of execution" +msgstr "Dépréciations" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-built-in-mods.rst:80 msgid "" -":py:obj:`ParametersRecord `\\ " -"\\(\\[array\\_dict\\, keep\\_input\\]\\)" +"When the ``ClientApp`` runs, the mods are executed in the order they are " +"provided in the list:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.parametersrecord.ParametersRecord:1 of -#, fuzzy -msgid "Parameters record." -msgstr "Paramètres du modèle." - -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`ReconnectIns `\\ \\(seconds\\)" +#: ../../source/how-to-use-built-in-mods.rst:83 +msgid "``example_mod_1`` (outermost mod)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.ReconnectIns:1 of -msgid "ReconnectIns message from server to client." -msgstr "Message de reconnexion du serveur au client." +#: ../../source/how-to-use-built-in-mods.rst:84 +msgid "``example_mod_2`` (next mod)" +msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-built-in-mods.rst:85 msgid "" -":py:obj:`RecordSet `\\ " -"\\(\\[parameters\\_records\\, ...\\]\\)" +"Message handler (core function that handles the incoming ``Message`` and " +"returns the outgoing ``Message``)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.recordset.RecordSet:1 of -msgid "RecordSet stores groups of parameters, metrics and configs." +#: ../../source/how-to-use-built-in-mods.rst:87 +msgid "``example_mod_2`` (on the way back)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid "" -":py:obj:`ServerMessage `\\ " -"\\(\\[get\\_properties\\_ins\\, ...\\]\\)" +#: ../../source/how-to-use-built-in-mods.rst:88 +msgid "``example_mod_1`` (outermost mod on the way back)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.ServerMessage:1 of -msgid "ServerMessage is a container used to hold one instruction message." +#: ../../source/how-to-use-built-in-mods.rst:90 +msgid "" +"Each mod has a chance to inspect and modify the incoming ``Message`` " +"before passing it to the next mod, and likewise with the outgoing " +"``Message`` before returning it up the stack." msgstr "" -"ServerMessage est un conteneur utilisé pour contenir un message " -"d'instruction." -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`Status `\\ \\(code\\, message\\)" +#: ../../source/how-to-use-built-in-mods.rst:97 +msgid "" +"By following this guide, you have learned how to effectively use mods to " +"enhance your ``ClientApp``'s functionality. Remember that the order of " +"mods is crucial and affects how the input and output are processed." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.Status:1 of -msgid "Client status." -msgstr "Statut du client." - -#: ../../source/ref-api/flwr.common.Array.rst:2 -msgid "Array" +#: ../../source/how-to-use-built-in-mods.rst:101 +msgid "Enjoy building a more robust and flexible ``ClientApp`` with mods!" msgstr "" -#: flwr.common.record.parametersrecord.Array:3 of +#: ../../source/how-to-use-differential-privacy.rst:2 +#, fuzzy +msgid "Use Differential Privacy" +msgstr "Confidentialité différentielle" + +#: ../../source/how-to-use-differential-privacy.rst:4 msgid "" -"A dataclass containing serialized data from an array-like or tensor-like " -"object along with some metadata about it." +"This guide explains how you can utilize differential privacy in the " +"Flower framework. If you are not yet familiar with differential privacy, " +"you can refer to :doc:`explanation-differential-privacy`." msgstr "" -#: flwr.common.record.parametersrecord.Array:6 of +#: ../../source/how-to-use-differential-privacy.rst:10 msgid "" -"A string representing the data type of the serialised object (e.g. " -"`np.float32`)" +"Differential Privacy in Flower is in a preview phase. If you plan to use " +"these features in a production environment with sensitive data, feel free" +" contact us to discuss your requirements and to receive guidance on how " +"to best use these features." msgstr "" -#: flwr.common.record.parametersrecord.Array:8 of +#: ../../source/how-to-use-differential-privacy.rst:17 msgid "" -"A list representing the shape of the unserialized array-like object. This" -" is used to deserialize the data (depending on the serialization method) " -"or simply as a metadata field." +"This approach consists of two separate phases: clipping of the updates " +"and adding noise to the aggregated model. For the clipping phase, Flower " +"framework has made it possible to decide whether to perform clipping on " +"the server side or the client side." msgstr "" -#: flwr.common.record.parametersrecord.Array:12 of +#: ../../source/how-to-use-differential-privacy.rst:21 msgid "" -"A string indicating the type of serialisation mechanism used to generate " -"the bytes in `data` from an array-like or tensor-like object." +"**Server-side Clipping**: This approach has the advantage of the server " +"enforcing uniform clipping across all clients' updates and reducing the " +"communication overhead for clipping values. However, it also has the " +"disadvantage of increasing the computational load on the server due to " +"the need to perform the clipping operation for all clients." msgstr "" -#: flwr.common.record.parametersrecord.Array:15 of -msgid "A buffer of bytes containing the data." +#: ../../source/how-to-use-differential-privacy.rst:26 +msgid "" +"**Client-side Clipping**: This approach has the advantage of reducing the" +" computational overhead on the server. However, it also has the " +"disadvantage of lacking centralized control, as the server has less " +"control over the clipping process." msgstr "" -#: ../../source/ref-api/flwr.common.Array.rst:26::1 -#, fuzzy -msgid ":py:obj:`numpy `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.common.Array.rst:26::1 -#: flwr.common.record.parametersrecord.Array.numpy:1 of +#: ../../source/how-to-use-differential-privacy.rst:31 #, fuzzy -msgid "Return the array as a NumPy array." -msgstr "renvoie le poids du modèle sous la forme d'une liste de ndarrays NumPy" +msgid "Server-side Clipping" +msgstr "Logique côté serveur" -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of -msgid ":py:obj:`dtype `\\" +#: ../../source/how-to-use-differential-privacy.rst:33 +msgid "" +"For central DP with server-side clipping, there are two ``Strategy`` " +"classes that act as wrappers around the actual ``Strategy`` instance (for" +" example, ``FedAvg``). The two wrapper classes are " +"``DifferentialPrivacyServerSideFixedClipping`` and " +"``DifferentialPrivacyServerSideAdaptiveClipping`` for fixed and adaptive " +"clipping." msgstr "" -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of -#, fuzzy -msgid ":py:obj:`shape `\\" -msgstr "serveur.stratégie.Stratégie" - -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +#: ../../source/how-to-use-differential-privacy.rst:-1 #, fuzzy -msgid ":py:obj:`stype `\\" -msgstr "serveur.stratégie.Stratégie" +msgid "server side clipping" +msgstr "Logique côté serveur" -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of -msgid ":py:obj:`data `\\" +#: ../../source/how-to-use-differential-privacy.rst:43 +msgid "" +"The code sample below enables the ``FedAvg`` strategy to use server-side " +"fixed clipping using the ``DifferentialPrivacyServerSideFixedClipping`` " +"wrapper class. The same approach can be used with " +"``DifferentialPrivacyServerSideAdaptiveClipping`` by adjusting the " +"corresponding input parameters." msgstr "" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:2 +#: ../../source/how-to-use-differential-privacy.rst:64 #, fuzzy -msgid "ClientMessage" -msgstr "Côté client" +msgid "Client-side Clipping" +msgstr "Logique côté client" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 -msgid ":py:obj:`evaluate_res `\\" +#: ../../source/how-to-use-differential-privacy.rst:66 +msgid "" +"For central DP with client-side clipping, the server sends the clipping " +"value to selected clients on each round. Clients can use existing Flower " +"``Mods`` to perform the clipping. Two mods are available for fixed and " +"adaptive client-side clipping: ``fixedclipping_mod`` and " +"``adaptiveclipping_mod`` with corresponding server-side wrappers " +"``DifferentialPrivacyClientSideFixedClipping`` and " +"``DifferentialPrivacyClientSideAdaptiveClipping``." msgstr "" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 -msgid ":py:obj:`fit_res `\\" -msgstr "" +#: ../../source/how-to-use-differential-privacy.rst:-1 +#, fuzzy +msgid "client side clipping" +msgstr "Logique côté client" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +#: ../../source/how-to-use-differential-privacy.rst:78 msgid "" -":py:obj:`get_parameters_res " -"`\\" +"The code sample below enables the ``FedAvg`` strategy to use differential" +" privacy with client-side fixed clipping using both the " +"``DifferentialPrivacyClientSideFixedClipping`` wrapper class and, on the " +"client, ``fixedclipping_mod``:" msgstr "" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +#: ../../source/how-to-use-differential-privacy.rst:97 msgid "" -":py:obj:`get_properties_res " -"`\\" -msgstr "" - -#: ../../source/ref-api/flwr.common.Code.rst:2 -msgid "Code" +"In addition to the server-side strategy wrapper, the ``ClientApp`` needs " +"to configure the matching ``fixedclipping_mod`` to perform the client-" +"side clipping:" msgstr "" -#: flwr.common.typing.Code:1 of -msgid "Bases: :py:class:`~enum.Enum`" +#: ../../source/how-to-use-differential-privacy.rst:115 +msgid "" +"To utilize local differential privacy (DP) and add noise to the client " +"model parameters before transmitting them to the server in Flower, you " +"can use the `LocalDpMod`. The following hyperparameters need to be set: " +"clipping norm value, sensitivity, epsilon, and delta." msgstr "" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 -msgid ":py:obj:`OK `\\" +#: ../../source/how-to-use-differential-privacy.rst:-1 +msgid "local DP mod" msgstr "" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 -msgid "" -":py:obj:`GET_PROPERTIES_NOT_IMPLEMENTED " -"`\\" +#: ../../source/how-to-use-differential-privacy.rst:125 +msgid "Below is a code example that shows how to use ``LocalDpMod``:" msgstr "" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 +#: ../../source/how-to-use-differential-privacy.rst:140 msgid "" -":py:obj:`GET_PARAMETERS_NOT_IMPLEMENTED " -"`\\" +"Please note that the order of mods, especially those that modify " +"parameters, is important when using multiple modifiers. Typically, " +"differential privacy (DP) modifiers should be the last to operate on " +"parameters." msgstr "" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 -msgid ":py:obj:`FIT_NOT_IMPLEMENTED `\\" +#: ../../source/how-to-use-differential-privacy.rst:145 +msgid "Local Training using Privacy Engines" msgstr "" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 +#: ../../source/how-to-use-differential-privacy.rst:147 msgid "" -":py:obj:`EVALUATE_NOT_IMPLEMENTED " -"`\\" +"For ensuring data instance-level privacy during local model training on " +"the client side, consider leveraging privacy engines such as Opacus and " +"TensorFlow Privacy. For examples of using Flower with these engines, " +"please refer to the Flower examples directory (`Opacus " +"`_, `Tensorflow" +" Privacy `_)." msgstr "" -#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:2 +#: ../../source/how-to-use-strategies.rst:2 #, fuzzy -msgid "ConfigsRecord" -msgstr "Configurer les clients" +msgid "Use strategies" +msgstr "Stratégies personnalisées" -#: flwr.common.record.configsrecord.ConfigsRecord:1 of +#: ../../source/how-to-use-strategies.rst:4 +#, fuzzy msgid "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -"[:py:class:`str`, :py:class:`int` | :py:class:`float` | :py:class:`str` |" -" :py:class:`bytes` | :py:class:`bool` | :py:class:`~typing.List`\\ " -"[:py:class:`int`] | :py:class:`~typing.List`\\ [:py:class:`float`] | " -":py:class:`~typing.List`\\ [:py:class:`str`] | :py:class:`~typing.List`\\" -" [:py:class:`bytes`] | :py:class:`~typing.List`\\ [:py:class:`bool`]]" -msgstr "" - -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`clear `\\ \\(\\)" +"Flower allows full customization of the learning process through the " +"``Strategy`` abstraction. A number of built-in strategies are provided in" +" the core framework." msgstr "" +"Flower permet une personnalisation complète du processus d'apprentissage " +"grâce à l'abstraction :code:`Stratégie`. Un certain nombre de stratégies " +"intégrées sont fournies dans le cadre principal." -#: flwr.common.record.typeddict.TypedDict.clear:1 -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid "Remove all items from R." +#: ../../source/how-to-use-strategies.rst:7 +msgid "" +"There are three ways to customize the way Flower orchestrates the " +"learning process on the server side:" msgstr "" +"Il y a trois façons de personnaliser la manière dont Flower orchestre le " +"processus d'apprentissage du côté du serveur :" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`count_bytes `\\ \\(\\)" -msgstr "" +#: ../../source/how-to-use-strategies.rst:10 +#, fuzzy +msgid "Use an existing strategy, for example, ``FedAvg``" +msgstr "Utilise une stratégie existante, par exemple :code:`FedAvg`" -#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:1 -#: flwr.common.record.metricsrecord.MetricsRecord.count_bytes:1 -#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:1 -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid "Return number of Bytes stored in this object." -msgstr "" +#: ../../source/how-to-use-strategies.rst:11 +#: ../../source/how-to-use-strategies.rst:43 +msgid "Customize an existing strategy with callback functions" +msgstr "Personnalise une stratégie existante avec des fonctions de rappel" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -msgstr "" +#: ../../source/how-to-use-strategies.rst:12 +#: ../../source/how-to-use-strategies.rst:99 +msgid "Implement a novel strategy" +msgstr "Mets en place une nouvelle stratégie" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 -#: flwr.common.record.typeddict.TypedDict.get:1 of -msgid "d defaults to None." -msgstr "" +#: ../../source/how-to-use-strategies.rst:15 +msgid "Use an existing strategy" +msgstr "Utilise une stratégie existante" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`items `\\ \\(\\)" +#: ../../source/how-to-use-strategies.rst:17 +msgid "" +"Flower comes with a number of popular federated learning strategies " +"built-in. A built-in strategy can be instantiated as follows:" msgstr "" +"Flower intègre un certain nombre de stratégies d'apprentissage fédéré " +"populaires. Une stratégie intégrée peut être instanciée comme suit :" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`keys `\\ \\(\\)" +#: ../../source/how-to-use-strategies.rst:27 +#, fuzzy +msgid "" +"This creates a strategy with all parameters left at their default values " +"and passes it to the ``start_server`` function. It is usually recommended" +" to adjust a few parameters during instantiation:" msgstr "" +"Cela crée une stratégie dont tous les paramètres sont laissés à leur " +"valeur par défaut et la transmet à la fonction :code:`start_server`. Il " +"est généralement recommandé d'ajuster quelques paramètres lors de " +"l'instanciation :" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +#: ../../source/how-to-use-strategies.rst:45 +msgid "" +"Existing strategies provide several ways to customize their behaviour. " +"Callback functions allow strategies to call user-provided code during " +"execution." msgstr "" +"Les stratégies existantes offrent plusieurs façons de personnaliser leur " +"comportement. Les fonctions de rappel permettent aux stratégies d'appeler" +" le code fourni par l'utilisateur pendant l'exécution." -#: flwr.common.record.typeddict.TypedDict.clear:1::1 -#: flwr.common.record.typeddict.TypedDict.pop:1 of -msgid "If key is not found, d is returned if given, otherwise KeyError is raised." -msgstr "" +#: ../../source/how-to-use-strategies.rst:49 +msgid "Configuring client fit and client evaluate" +msgstr "Configurer l'adaptation et l'évaluation du client" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/how-to-use-strategies.rst:51 +#, fuzzy msgid "" -":py:obj:`update `\\ \\(\\[E\\, " -"\\]\\*\\*F\\)" -msgstr "" - -#: flwr.common.record.typeddict.TypedDict.clear:1::1 -#: flwr.common.record.typeddict.TypedDict.update:1 of -msgid "Update R from dict/iterable E and F." +"The server can pass new configuration values to the client each round by " +"providing a function to ``on_fit_config_fn``. The provided function will " +"be called by the strategy and must return a dictionary of configuration " +"key values pairs that will be sent to the client. It must return a " +"dictionary of arbitrary configuration values ``client.fit`` and " +"``client.evaluate`` functions during each round of federated learning." msgstr "" +"Le serveur peut transmettre de nouvelles valeurs de configuration au " +"client à chaque tour en fournissant une fonction à " +":code:`on_fit_config_fn`. La fonction fournie sera appelée par la " +"stratégie et doit renvoyer un dictionnaire de paires de valeurs de clés " +"de configuration qui seront envoyées au client. Elle doit renvoyer un " +"dictionnaire de valeurs de configuration arbitraires :code:`client.fit` " +"et :code:`client.evaluate` au cours de chaque tour d'apprentissage " +"fédéré." -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`values `\\ \\(\\)" +#: ../../source/how-to-use-strategies.rst:84 +#, fuzzy +msgid "" +"The ``on_fit_config_fn`` can be used to pass arbitrary configuration " +"values from server to client, and potentially change these values each " +"round, for example, to adjust the learning rate. The client will receive " +"the dictionary returned by the ``on_fit_config_fn`` in its own " +"``client.fit()`` function." msgstr "" +"Le :code:`on_fit_config_fn` peut être utilisé pour passer des valeurs de " +"configuration arbitraires du serveur au client, et changer poétiquement " +"ces valeurs à chaque tour, par exemple pour ajuster le taux " +"d'apprentissage. Le client recevra le dictionnaire renvoyé par le " +":code:`on_fit_config_fn` dans sa propre fonction :code:`client.fit()`." -#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:3 of -msgid "This function counts booleans as occupying 1 Byte." +#: ../../source/how-to-use-strategies.rst:89 +#, fuzzy +msgid "" +"Similar to ``on_fit_config_fn``, there is also ``on_evaluate_config_fn`` " +"to customize the configuration sent to ``client.evaluate()``" msgstr "" +"Comme pour :code:`on_fit_config_fn`, il existe aussi " +":code:`on_evaluate_config_fn` pour personnaliser la configuration envoyée" +" à :code:`client.evaluate()`" -#: ../../source/ref-api/flwr.common.Context.rst:2 -msgid "Context" -msgstr "" +#: ../../source/how-to-use-strategies.rst:93 +msgid "Configuring server-side evaluation" +msgstr "Configuration de l'évaluation côté serveur" -#: flwr.common.context.Context:3 of +#: ../../source/how-to-use-strategies.rst:95 +#, fuzzy msgid "" -"Holds records added by the entity in a given run and that will stay " -"local. This means that the data it holds will never leave the system it's" -" running from. This can be used as an intermediate storage or scratchpad " -"when executing mods. It can also be used as a memory to access at " -"different points during the lifecycle of this entity (e.g. across " -"multiple rounds)" +"Server-side evaluation can be enabled by passing an evaluation function " +"to ``evaluate_fn``." msgstr "" +"L'évaluation côté serveur peut être activée en passant une fonction " +"d'évaluation à :code:`evaluate_fn`." -#: ../../source/ref-api/flwr.common.Context.rst:28::1 +#: ../../source/how-to-use-strategies.rst:101 #, fuzzy -msgid ":py:obj:`state `\\" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:2 -msgid "DisconnectRes" +msgid "" +"Writing a fully custom strategy is a bit more involved, but it provides " +"the most flexibility. Read the `Implementing Strategies `_ guide to learn more." msgstr "" +"L'écriture d'une stratégie entièrement personnalisée est un peu plus " +"complexe, mais c'est celle qui offre le plus de souplesse. Lis le guide " +"`Implémentation des stratégies `_ pour " +"en savoir plus." -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:28::1 -msgid ":py:obj:`reason `\\" -msgstr "" +#: ../../source/index.rst:34 +msgid "Tutorial" +msgstr "Tutoriel" -#: ../../source/ref-api/flwr.common.Error.rst:2 -msgid "Error" -msgstr "" +#: ../../source/index.rst:44 +msgid "Quickstart tutorials" +msgstr "Quickstart tutorials" -#: flwr.common.message.Error:3 of -msgid "An identifier for the error." -msgstr "" +#: ../../source/index.rst:81 ../../source/index.rst:85 +msgid "How-to guides" +msgstr "Guides" -#: flwr.common.message.Error:5 of -msgid "A reason for why the error arose (e.g. an exception stack-trace)" +#: ../../source/index.rst:106 +msgid "Legacy example guides" msgstr "" -#: flwr.common.Error.code:1::1 of -msgid ":py:obj:`code `\\" -msgstr "" +#: ../../source/index.rst:114 ../../source/index.rst:119 +msgid "Explanations" +msgstr "Explications" -#: flwr.common.Error.code:1 flwr.common.Error.code:1::1 of -msgid "Error code." -msgstr "" +#: None:-1 +msgid "API reference" +msgstr "Référence pour l'API" -#: flwr.common.Error.code:1::1 of -msgid ":py:obj:`reason `\\" -msgstr "" +#: ../../source/index.rst:145 +msgid "Reference docs" +msgstr "Référence pour la documentation" -#: flwr.common.Error.code:1::1 flwr.common.Error.reason:1 of -msgid "Reason reported about the error." -msgstr "" +#: ../../source/index.rst:160 +#, fuzzy +msgid "Contributor tutorials" +msgstr "Configuration du contributeur" -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:2 +#: ../../source/index.rst:167 #, fuzzy -msgid "EvaluateIns" +msgid "Contributor how-to guides" +msgstr "Guide pour les contributeurs" + +#: ../../source/index.rst:179 +#, fuzzy +msgid "Contributor explanations" msgstr "Explications" -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 -msgid ":py:obj:`parameters `\\" -msgstr "" +#: ../../source/index.rst:185 +#, fuzzy +msgid "Contributor references" +msgstr "Configuration du contributeur" -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 -msgid ":py:obj:`config `\\" +#: ../../source/index.rst:-1 +msgid "" +"Check out the documentation of the main Flower Framework enabling easy " +"Python development for Federated Learning." msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:2 -msgid "EvaluateRes" -msgstr "" +#: ../../source/index.rst:2 +#, fuzzy +msgid "Flower Framework Documentation" +msgstr "Rédiger de la documentation" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 -msgid ":py:obj:`status `\\" +#: ../../source/index.rst:7 +msgid "" +"Welcome to Flower's documentation. `Flower `_ is a " +"friendly federated learning framework." msgstr "" +"Bienvenue sur la documentation de Flower. `Flower `_ " +"est un framework de federated learning convivial et facile à utiliser." -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 -msgid ":py:obj:`loss `\\" -msgstr "" +#: ../../source/index.rst:11 +msgid "Join the Flower Community" +msgstr "Rejoignez la communauté de Flower" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 -msgid ":py:obj:`num_examples `\\" +#: ../../source/index.rst:13 +msgid "" +"The Flower Community is growing quickly - we're a friendly group of " +"researchers, engineers, students, professionals, academics, and other " +"enthusiasts." msgstr "" +"Le communauté de Flower s'agrandit rapidement - on est un super groupe de" +" chercheurs, ingénieurs, étudiants, professionnels, académiques, et " +"autres hobbyistes." -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 -msgid ":py:obj:`metrics `\\" -msgstr "" +#: ../../source/index.rst:16 +msgid "Join us on Slack" +msgstr "Join us on Slack" -#: ../../source/ref-api/flwr.common.EventType.rst:2 -msgid "EventType" +#: ../../source/index.rst:23 +msgid "Flower Framework" +msgstr "Flower Framework" + +#: ../../source/index.rst:25 +msgid "" +"The user guide is targeted at researchers and developers who want to use " +"Flower to bring existing machine learning workloads into a federated " +"setting. One of Flower's design goals was to make this simple. Read on to" +" learn more." msgstr "" +"Ce guide utilisateur s'adresse à des chercheurs et des développeurs qui " +"veulent utiliser Flower pour transposer des workloads de Machine Learning" +" existantes dans un scenario fédéré. Un des buts de Flower est de rendre " +"cela le plus evident possible. Lisez la suite pour en apprendre plus." -#: flwr.common.telemetry.EventType:1 of -msgid "Bases: :py:class:`str`, :py:class:`~enum.Enum`" +#: ../../source/index.rst:30 +msgid "Tutorials" +msgstr "Tutoriels" + +#: ../../source/index.rst:32 +msgid "" +"A learning-oriented series of federated learning tutorials, the best " +"place to start." msgstr "" +"Une serie de tutoriels de Federated Learning, l'endroit parfait pour " +"débuter." -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/index.rst:62 #, fuzzy msgid "" -":py:obj:`encode `\\ \\(\\[encoding\\, " -"errors\\]\\)" -msgstr "" -"Flower 1.0 : ``start_server(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.encode:1 of -msgid "Encode the string using the codec registered for encoding." -msgstr "" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`replace `\\ \\(old\\, new\\[\\, " -"count\\]\\)" +"QUICKSTART TUTORIALS: :doc:`PyTorch ` | " +":doc:`TensorFlow ` | :doc:`MLX ` | :doc:`🤗 Transformers ` | :doc:`JAX ` | :doc:`Pandas " +"` | :doc:`fastai ` | :doc:`PyTorch Lightning ` | :doc:`scikit-learn ` | " +":doc:`XGBoost ` | :doc:`Android ` | :doc:`iOS `" msgstr "" +"QUICKSTART TUTORIALS: :ref:`PyTorch ` | " +":ref:`TensorFlow ` | :ref:`🤗 Transformers " +"` | :ref:`JAX ` | :ref:`Pandas " +"` | :ref:`fastai ` | :ref:`PyTorch " +"Lightning ` | :ref:`MXNet ` | :ref:`scikit-learn ` | :ref:`XGBoost " +"` | :ref:`Android ` | :ref:`iOS " +"`" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.replace:1 of -msgid "Return a copy with all occurrences of substring old replaced by new." +#: ../../source/index.rst:70 +msgid "We also made video tutorials for PyTorch:" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/index.rst:75 #, fuzzy -msgid "" -":py:obj:`split `\\ \\(\\[sep\\, " -"maxsplit\\]\\)" -msgstr "serveur.stratégie.Stratégie" +msgid "And TensorFlow:" +msgstr "Exemples de TensorFlow" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.rsplit:1 flwr.common.EventType.split:1 of +#: ../../source/index.rst:83 msgid "" -"Return a list of the substrings in the string, using sep as the separator" -" string." +"Problem-oriented how-to guides show step-by-step how to achieve a " +"specific goal." msgstr "" +"Guides orientés sur la résolutions étapes par étapes de problèmes ou " +"objectifs specifiques." -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/index.rst:116 msgid "" -":py:obj:`rsplit `\\ \\(\\[sep\\, " -"maxsplit\\]\\)" +"Understanding-oriented concept guides explain and discuss key topics and " +"underlying ideas behind Flower and collaborative AI." msgstr "" +"Guides orientés sur la compréhension et l'explication des sujets et idées" +" de fonds sur lesquels sont construits Flower et l'IA collaborative." -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/index.rst:128 #, fuzzy -msgid ":py:obj:`join `\\ \\(iterable\\, \\/\\)" -msgstr "serveur.stratégie.Stratégie" +msgid "References" +msgstr "Référence" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.join:1 of -msgid "Concatenate any number of strings." +#: ../../source/index.rst:130 +msgid "Information-oriented API reference and other reference material." +msgstr "Référence de l'API orientée sur l'information pure." + +#: ../../source/index.rst:139::1 +msgid ":py:obj:`flwr `\\" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/index.rst:139::1 flwr:1 of +msgid "Flower main package." +msgstr "" + +#: ../../source/index.rst:155 #, fuzzy -msgid ":py:obj:`capitalize `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" +msgid "Contributor docs" +msgstr "Configuration du contributeur" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.capitalize:1 of -msgid "Return a capitalized version of the string." +#: ../../source/index.rst:157 +#, fuzzy +msgid "" +"The Flower community welcomes contributions. The following docs are " +"intended to help along the way." msgstr "" +"Les auteurs de Flower sont heureux d'accueillir des contributions " +"externes. Les guides suivant sont là pour vous accompagner dans cette " +"direction." -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api-cli.rst:2 #, fuzzy -msgid ":py:obj:`casefold `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" +msgid "Flower CLI reference" +msgstr "Client de Flower" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.casefold:1 of -msgid "Return a version of the string suitable for caseless comparisons." +#: ../../source/ref-api-cli.rst:7 +#, fuzzy +msgid "flwr CLI" +msgstr "Client de Flower" + +#: ../../flwr:1 +msgid "flwr is the Flower command line interface." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api-cli.rst #, fuzzy -msgid ":py:obj:`title `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" +msgid "Options" +msgstr "Solution" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.title:1 of -msgid "Return a version of the string where each word is titlecased." +#: ../../flwr:1 +msgid "Install completion for the current shell." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../flwr:1 msgid "" -":py:obj:`center `\\ \\(width\\[\\, " -"fillchar\\]\\)" +"Show completion for the current shell, to copy it or customize the " +"installation." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.center:1 of -msgid "Return a centered string of length width." +#: ../../flwr build:1 +msgid "Build a Flower App into a Flower App Bundle (FAB)." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../flwr build:1 msgid "" -":py:obj:`count `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" +"You can run ``flwr build`` without any arguments to bundle the app " +"located in the current directory. Alternatively, you can you can specify " +"a path using the ``--app`` option to bundle an app located at the " +"provided path. For example:" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -"Return the number of non-overlapping occurrences of substring sub in " -"string S[start:end]." +#: ../../flwr build:1 +msgid "``flwr build --app ./apps/flower-hello-world``." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`expandtabs `\\ " -"\\(\\[tabsize\\]\\)" +#: ../../flwr build:1 +msgid "Path of the Flower App to bundle into a FAB" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.expandtabs:1 of -msgid "Return a copy where all tab characters are expanded using spaces." -msgstr "" +#: ../../flwr install:1 +#, fuzzy +msgid "Install a Flower App Bundle." +msgstr "Installer Flower" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`find `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" +#: ../../flwr install:1 +msgid "It can be ran with a single FAB file argument:" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -"Return the lowest index in S where substring sub is found, such that sub " -"is contained within S[start:end]." +#: ../../flwr install:1 +msgid "``flwr install ./target_project.fab``" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`partition `\\ \\(sep\\, \\/\\)" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.partition:1 flwr.common.EventType.rpartition:1 of -msgid "Partition the string into three parts using the given separator." +#: ../../flwr install:1 +msgid "The target install directory can be specified with ``--flwr-dir``:" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`index `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" +#: ../../flwr install:1 +msgid "``flwr install ./target_project.fab --flwr-dir ./docs/flwr``" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../flwr install:1 msgid "" -":py:obj:`ljust `\\ \\(width\\[\\, " -"fillchar\\]\\)" +"This will install ``target_project`` to ``./docs/flwr/``. By default, " +"``flwr-dir`` is equal to:" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.ljust:1 of -msgid "Return a left-justified string of length width." +#: ../../flwr install:1 +msgid "``$FLWR_HOME/`` if ``$FLWR_HOME`` is defined" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`lower `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.lower:1 of -msgid "Return a copy of the string converted to lowercase." +#: ../../flwr install:1 +msgid "``$XDG_DATA_HOME/.flwr/`` if ``$XDG_DATA_HOME`` is defined" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`lstrip `\\ \\(\\[chars\\]\\)" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.lstrip:1 of -msgid "Return a copy of the string with leading whitespace removed." +#: ../../flwr install:1 +msgid "``$HOME/.flwr/`` in all other cases" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`rfind `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" +#: ../../flwr install:1 +msgid "The desired install path." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -"Return the highest index in S where substring sub is found, such that sub" -" is contained within S[start:end]." -msgstr "" +#: ../../source/ref-api-cli.rst +#, fuzzy +msgid "Arguments" +msgstr "Amélioration de la documentation" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`rindex `\\ \\(sub\\[\\, " -"start\\[\\, end\\]\\]\\)" +#: ../../flwr install:1 log:1 new:1 run:1 +#, fuzzy +msgid "Optional argument" +msgstr "Améliorations facultatives" + +#: ../../flwr install:1 +msgid "The source FAB file to install." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`rjust `\\ \\(width\\[\\, " -"fillchar\\]\\)" +#: ../../flwr log:1 +msgid "Get logs from a Flower project run." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.rjust:1 of -msgid "Return a right-justified string of length width." +#: ../../flwr log:1 +msgid "Flag to stream or print logs from the Flower run" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../flwr log run #, fuzzy -msgid ":py:obj:`rstrip `\\ \\(\\[chars\\]\\)" -msgstr "serveur.stratégie.Stratégie" +msgid "default" +msgstr "Flux de travail" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.rstrip:1 of -msgid "Return a copy of the string with trailing whitespace removed." +#: ../../flwr log:1 +msgid "``True``" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../flwr log:1 #, fuzzy -msgid ":py:obj:`rpartition `\\ \\(sep\\, \\/\\)" -msgstr "serveur.stratégie.Stratégie" +msgid "Required argument" +msgstr "Amélioration de la documentation" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../flwr log:1 #, fuzzy -msgid "" -":py:obj:`splitlines `\\ " -"\\(\\[keepends\\]\\)" -msgstr "serveur.stratégie.Stratégie" +msgid "The Flower run ID to query" +msgstr "Rejoignez la communauté de Flower" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.splitlines:1 of -msgid "Return a list of the lines in the string, breaking at line boundaries." +#: ../../flwr log:1 +msgid "Path of the Flower project to run" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../flwr log:1 +msgid "Name of the federation to run the app on" +msgstr "" + +#: ../../flwr new:1 #, fuzzy -msgid ":py:obj:`strip `\\ \\(\\[chars\\]\\)" -msgstr "serveur.stratégie.Stratégie" +msgid "Create new Flower App." +msgstr "Serveur de Flower" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.strip:1 of -msgid "Return a copy of the string with leading and trailing whitespace removed." +#: ../../flwr new:1 +msgid "The ML framework to use" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../flwr new #, fuzzy -msgid ":py:obj:`swapcase `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" +msgid "options" +msgstr "Solution" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.swapcase:1 of +#: ../../flwr new:1 msgid "" -"Convert uppercase characters to lowercase and lowercase characters to " -"uppercase." +"PyTorch | TensorFlow | sklearn | HuggingFace | JAX | MLX | NumPy | " +"FlowerTune | Flower Baseline" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`translate `\\ \\(table\\, \\/\\)" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.translate:1 of -msgid "Replace each character in the string using the given translation table." +#: ../../flwr new:1 +msgid "The Flower username of the author" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../flwr new:1 #, fuzzy -msgid ":py:obj:`upper `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" +msgid "The name of the Flower App" +msgstr "Chargement des données" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.upper:1 of -msgid "Return a copy of the string converted to uppercase." +#: ../../flwr run:1 +#, fuzzy +msgid "Run Flower App." +msgstr "Serveur de Flower" + +#: ../../flwr run:1 +msgid "Override configuration key-value pairs, should be of the format:" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../flwr run:1 msgid "" -":py:obj:`startswith `\\ \\(prefix\\[\\," -" start\\[\\, end\\]\\]\\)" +"`--run-config 'key1=\"value1\" key2=\"value2\"' --run-config " +"'key3=\"value3\"'`" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "Return True if S starts with the specified prefix, False otherwise." +#: ../../flwr run:1 +msgid "" +"Note that `key1`, `key2`, and `key3` in this example need to exist inside" +" the `pyproject.toml` in order to be properly overriden." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../flwr run:1 msgid "" -":py:obj:`endswith `\\ \\(suffix\\[\\, " -"start\\[\\, end\\]\\]\\)" +"Use `--stream` with `flwr run` to display logs; logs are not streamed by " +"default." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "Return True if S ends with the specified suffix, False otherwise." +#: ../../flwr run:1 +#, fuzzy +msgid "``False``" +msgstr ":code:`évaluer`" + +#: ../../flwr run:1 +#, fuzzy +msgid "Path of the Flower App to run." +msgstr "Chargement des données" + +#: ../../flwr run:1 +msgid "Name of the federation to run the app on." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`removeprefix `\\ " -"\\(prefix\\, \\/\\)" -msgstr "" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.removeprefix:1 of -msgid "Return a str with the given prefix string removed if present." -msgstr "" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`removesuffix `\\ " -"\\(suffix\\, \\/\\)" -msgstr "" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.removesuffix:1 of -msgid "Return a str with the given suffix string removed if present." -msgstr "" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api-cli.rst:16 #, fuzzy -msgid ":py:obj:`isascii `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" +msgid "flower-simulation" +msgstr "Simulation de moniteur" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isascii:1 of -msgid "Return True if all characters in the string are ASCII, False otherwise." -msgstr "" +#: ../../source/ref-api-cli.rst:26 +msgid "flower-superlink" +msgstr "flower-superlink" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api-cli.rst:36 #, fuzzy -msgid ":py:obj:`islower `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.islower:1 of -msgid "Return True if the string is a lowercase string, False otherwise." -msgstr "" +msgid "flower-supernode" +msgstr "Serveur de Flower" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api-cli.rst:46 #, fuzzy -msgid ":py:obj:`isupper `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" +msgid "flower-server-app" +msgstr "flower-driver-api" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isupper:1 of -msgid "Return True if the string is an uppercase string, False otherwise." +#: ../../source/ref-api-cli.rst:50 +msgid "" +"Note that since version ``1.11.0``, ``flower-server-app`` no longer " +"supports passing a reference to a `ServerApp` attribute. Instead, you " +"need to pass the path to Flower app via the argument ``--app``. This is " +"the path to a directory containing a `pyproject.toml`. You can create a " +"valid Flower app by executing ``flwr new`` and following the prompt." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api-cli.rst:64 #, fuzzy -msgid ":py:obj:`istitle `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.istitle:1 of -msgid "Return True if the string is a title-cased string, False otherwise." -msgstr "" +msgid "flower-superexec" +msgstr "flower-superlink" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.rst:2 #, fuzzy -msgid ":py:obj:`isspace `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" +msgid "flwr" +msgstr "Fleur" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isspace:1 of -msgid "Return True if the string is a whitespace string, False otherwise." +#: ../../source/ref-api/flwr.client.rst:43 ../../source/ref-api/flwr.rst:25 +#: ../../source/ref-api/flwr.server.rst:48 +msgid "Modules" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.rst:35::1 #, fuzzy -msgid ":py:obj:`isdecimal `\\ \\(\\)" +msgid ":py:obj:`flwr.client `\\" msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isdecimal:1 of -msgid "Return True if the string is a decimal string, False otherwise." -msgstr "" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.rst:35::1 flwr.client:1 of #, fuzzy -msgid ":py:obj:`isdigit `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isdigit:1 of -msgid "Return True if the string is a digit string, False otherwise." -msgstr "" +msgid "Flower client." +msgstr "Client de Flower" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.rst:35::1 #, fuzzy -msgid ":py:obj:`isnumeric `\\ \\(\\)" +msgid ":py:obj:`flwr.common `\\" msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isnumeric:1 of -msgid "Return True if the string is a numeric string, False otherwise." -msgstr "" +#: ../../source/ref-api/flwr.rst:35::1 flwr.common:1 of +msgid "Common components shared between server and client." +msgstr "Composants communs partagés entre le serveur et le client." -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.rst:35::1 #, fuzzy -msgid ":py:obj:`isalpha `\\ \\(\\)" +msgid ":py:obj:`flwr.server `\\" msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isalpha:1 of -msgid "Return True if the string is an alphabetic string, False otherwise." -msgstr "" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.rst:35::1 +#: ../../source/ref-api/flwr.server.rst:37::1 flwr.server:1 +#: flwr.server.server.Server:1 of #, fuzzy -msgid ":py:obj:`isalnum `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isalnum:1 of -msgid "Return True if the string is an alpha-numeric string, False otherwise." -msgstr "" +msgid "Flower server." +msgstr "Serveur de Flower" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.rst:35::1 #, fuzzy -msgid ":py:obj:`isidentifier `\\ \\(\\)" +msgid ":py:obj:`flwr.simulation `\\" msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isidentifier:1 of -msgid "Return True if the string is a valid Python identifier, False otherwise." -msgstr "" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.rst:35::1 flwr.simulation:1 of #, fuzzy -msgid ":py:obj:`isprintable `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" +msgid "Flower simulation." +msgstr "Simulation de moniteur" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isprintable:1 of -msgid "Return True if the string is printable, False otherwise." -msgstr "" +#: ../../source/ref-api/flwr.client.rst:2 +msgid "client" +msgstr "client" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.mod.rst:13 +#: ../../source/ref-api/flwr.client.rst:13 +#: ../../source/ref-api/flwr.common.rst:13 +#: ../../source/ref-api/flwr.server.rst:13 +#: ../../source/ref-api/flwr.simulation.rst:13 #, fuzzy -msgid ":py:obj:`zfill `\\ \\(width\\, \\/\\)" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.zfill:1 of -msgid "" -"Pad a numeric string with zeros on the left, to fill a field of the given" -" width." -msgstr "" +msgid "Functions" +msgstr "Les quatre fonctions :" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.rst:23::1 msgid "" -":py:obj:`format `\\ \\(\\*args\\, " -"\\*\\*kwargs\\)" -msgstr "" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "Return a formatted version of S, using substitutions from args and kwargs." -msgstr "" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`format_map `\\ \\(mapping\\)" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "Return a formatted version of S, using substitutions from mapping." -msgstr "" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`maketrans `\\" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.maketrans:1 of -msgid "Return a translation table usable for str.translate()." +":py:obj:`start_client `\\ \\(\\*\\, " +"server\\_address\\[\\, client\\_fn\\, ...\\]\\)" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`PING `\\" +#: ../../source/ref-api/flwr.client.rst:23::1 +#: flwr.client.app.start_client:1 of +msgid "Start a Flower client node which connects to a Flower server." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`START_CLIENT_ENTER `\\" +#: ../../source/ref-api/flwr.client.rst:23::1 +msgid "" +":py:obj:`start_numpy_client `\\ \\(\\*\\," +" server\\_address\\, client\\)" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`START_CLIENT_LEAVE `\\" +#: ../../source/ref-api/flwr.client.rst:23::1 +#: flwr.client.app.start_numpy_client:1 of +msgid "Start a Flower NumPyClient which connects to a gRPC server." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`START_SERVER_ENTER `\\" +#: ../../source/ref-api/flwr.client.mod.rst:30 +#: ../../source/ref-api/flwr.client.rst:25 +#: ../../source/ref-api/flwr.common.rst:32 +#: ../../source/ref-api/flwr.server.rst:24 +#: ../../source/ref-api/flwr.server.strategy.rst:17 +#: ../../source/ref-api/flwr.server.workflow.rst:17 +msgid "Classes" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`START_SERVER_LEAVE `\\" +#: ../../source/ref-api/flwr.client.rst:32::1 +msgid ":py:obj:`Client `\\ \\(\\)" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_DRIVER_API_ENTER " -"`\\" +#: ../../source/ref-api/flwr.client.rst:32::1 +#: flwr.client.client.Client:1 of +msgid "Abstract base class for Flower clients." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.client.rst:32::1 msgid "" -":py:obj:`RUN_DRIVER_API_LEAVE " -"`\\" +":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, " +"mods\\]\\)" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_FLEET_API_ENTER " -"`\\" -msgstr "" +#: ../../source/ref-api/flwr.client.rst:32::1 +#: flwr.client.client_app.ClientApp:1 of +#, fuzzy +msgid "Flower ClientApp." +msgstr "Flower ClientApp." -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_FLEET_API_LEAVE " -"`\\" +#: ../../source/ref-api/flwr.client.rst:32::1 +msgid ":py:obj:`NumPyClient `\\ \\(\\)" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_SUPERLINK_ENTER " -"`\\" +#: ../../source/ref-api/flwr.client.rst:32::1 +#: flwr.client.numpy_client.NumPyClient:1 of +msgid "Abstract base class for Flower clients using NumPy." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_SUPERLINK_LEAVE " -"`\\" -msgstr "" +#: ../../source/ref-api/flwr.client.rst:50::1 +#, fuzzy +msgid ":py:obj:`flwr.client.mod `\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`START_SIMULATION_ENTER " -"`\\" -msgstr "" +#: ../../source/ref-api/flwr.client.rst:50::1 flwr.client.mod:1 of +#, fuzzy +msgid "Flower Built-in Mods." +msgstr "Client de Flower" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`START_SIMULATION_LEAVE " -"`\\" +#: flwr.client.client.Client:1 flwr.client.numpy_client.NumPyClient:1 +#: flwr.server.client_manager.ClientManager:1 +#: flwr.server.driver.driver.Driver:1 flwr.server.strategy.strategy.Strategy:1 +#: of +msgid "Bases: :py:class:`~abc.ABC`" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`DRIVER_CONNECT `\\" -msgstr "" - -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`DRIVER_DISCONNECT `\\" -msgstr "" - -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`START_DRIVER_ENTER `\\" -msgstr "" - -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`START_DRIVER_LEAVE `\\" -msgstr "" - -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_CLIENT_APP_ENTER " -"`\\" -msgstr "" - -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_CLIENT_APP_LEAVE " -"`\\" -msgstr "" - -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_SERVER_APP_ENTER " -"`\\" -msgstr "" - -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_SERVER_APP_LEAVE " -"`\\" +#: ../../source/ref-api/flwr.client.Client.rst:15 +#: ../../source/ref-api/flwr.client.ClientApp.rst:15 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:15 +#: ../../source/ref-api/flwr.client.mod.LocalDpMod.rst:15 +#: ../../source/ref-api/flwr.common.Array.rst:15 +#: ../../source/ref-api/flwr.common.ClientMessage.rst:15 +#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:15 +#: ../../source/ref-api/flwr.common.Context.rst:15 +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:15 +#: ../../source/ref-api/flwr.common.Error.rst:15 +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:15 +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:15 +#: ../../source/ref-api/flwr.common.EventType.rst:15 +#: ../../source/ref-api/flwr.common.FitIns.rst:15 +#: ../../source/ref-api/flwr.common.FitRes.rst:15 +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:15 +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:15 +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:15 +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:15 +#: ../../source/ref-api/flwr.common.Message.rst:15 +#: ../../source/ref-api/flwr.common.MessageType.rst:15 +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:15 +#: ../../source/ref-api/flwr.common.Metadata.rst:15 +#: ../../source/ref-api/flwr.common.MetricsRecord.rst:15 +#: ../../source/ref-api/flwr.common.Parameters.rst:15 +#: ../../source/ref-api/flwr.common.ParametersRecord.rst:15 +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:15 +#: ../../source/ref-api/flwr.common.RecordSet.rst:15 +#: ../../source/ref-api/flwr.common.ServerMessage.rst:15 +#: ../../source/ref-api/flwr.common.Status.rst:15 +#: ../../source/ref-api/flwr.server.ClientManager.rst:15 +#: ../../source/ref-api/flwr.server.Driver.rst:15 +#: ../../source/ref-api/flwr.server.History.rst:15 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:15 +#: ../../source/ref-api/flwr.server.Server.rst:15 +#: ../../source/ref-api/flwr.server.ServerApp.rst:15 +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:15 +#: ../../source/ref-api/flwr.server.ServerConfig.rst:15 +#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:15 +#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:15 +#: ../../source/ref-api/flwr.server.strategy.Krum.rst:15 +#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:15 +#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:15 +#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:15 +#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:15 +msgid "Methods" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_SUPERNODE_ENTER " -"`\\" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`evaluate `\\ \\(ins\\)" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.evaluate:1 +#: flwr.client.numpy_client.NumPyClient.evaluate:1 of #, fuzzy -msgid "" -":py:obj:`RUN_SUPERNODE_LEAVE " -"`\\" -msgstr "serveur.stratégie.Stratégie" - -#: flwr.common.EventType.capitalize:3 of -msgid "" -"More specifically, make the first character have upper case and the rest " -"lower case." -msgstr "" - -#: flwr.common.EventType.center:3 flwr.common.EventType.ljust:3 -#: flwr.common.EventType.rjust:3 of -msgid "Padding is done using the specified fill character (default is a space)." -msgstr "" +msgid "Evaluate the provided parameters using the locally held dataset." +msgstr "évaluer le modèle mis à jour sur l'ensemble de test local" -#: flwr.common.EventType.count:1 of -msgid "" -"Return the number of non-overlapping occurrences of substring sub in " -"string S[start:end]. Optional arguments start and end are interpreted as" -" in slice notation." +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`fit `\\ \\(ins\\)" msgstr "" -#: flwr.common.EventType.encode:3 of -msgid "encoding" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: flwr.client.client.Client.fit:1 of +msgid "Refine the provided parameters using the locally held dataset." msgstr "" -#: flwr.common.EventType.encode:4 of -msgid "The encoding in which to encode the string." +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`get_context `\\ \\(\\)" msgstr "" -#: flwr.common.EventType.encode:9 of -msgid "errors" -msgstr "" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.get_context:1 +#: flwr.client.numpy_client.NumPyClient.get_context:1 of +#, fuzzy +msgid "Get the run context from this client." +msgstr "Évaluer la réponse d'un client." -#: flwr.common.EventType.encode:6 of -msgid "" -"The error handling scheme to use for encoding errors. The default is " -"'strict' meaning that encoding errors raise a UnicodeEncodeError. Other " -"possible values are 'ignore', 'replace' and 'xmlcharrefreplace' as well " -"as any other name registered with codecs.register_error that can handle " -"UnicodeEncodeErrors." +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`get_parameters `\\ \\(ins\\)" msgstr "" -#: flwr.common.EventType.endswith:1 of -msgid "" -"Return True if S ends with the specified suffix, False otherwise. With " -"optional start, test S beginning at that position. With optional end, " -"stop comparing S at that position. suffix can also be a tuple of strings " -"to try." -msgstr "" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.get_parameters:1 +#: flwr.client.numpy_client.NumPyClient.get_parameters:1 of +#, fuzzy +msgid "Return the current local model parameters." +msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" -#: flwr.common.EventType.expandtabs:3 of -msgid "If tabsize is not given, a tab size of 8 characters is assumed." +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`get_properties `\\ \\(ins\\)" msgstr "" -#: flwr.common.EventType.find:1 flwr.common.EventType.index:1 of -msgid "" -"Return the lowest index in S where substring sub is found, such that sub " -"is contained within S[start:end]. Optional arguments start and end are " -"interpreted as in slice notation." +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: flwr.client.client.Client.get_properties:1 of +msgid "Return set of client's properties." msgstr "" -#: flwr.common.EventType.find:5 flwr.common.EventType.rfind:5 of -msgid "Return -1 on failure." +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`set_context `\\ \\(context\\)" msgstr "" -#: flwr.common.EventType.format:1 of -msgid "" -"Return a formatted version of S, using substitutions from args and " -"kwargs. The substitutions are identified by braces ('{' and '}')." +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.set_context:1 +#: flwr.client.numpy_client.NumPyClient.set_context:1 of +msgid "Apply a run context to this client." msgstr "" -#: flwr.common.EventType.format_map:1 of -msgid "" -"Return a formatted version of S, using substitutions from mapping. The " -"substitutions are identified by braces ('{' and '}')." +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`to_client `\\ \\(\\)" msgstr "" -#: flwr.common.EventType.index:5 flwr.common.EventType.rindex:5 of -msgid "Raises ValueError when the substring is not found." +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: flwr.client.client.Client.to_client:1 of +msgid "Return client (itself)." msgstr "" -#: flwr.common.EventType.isalnum:3 of -msgid "" -"A string is alpha-numeric if all characters in the string are alpha-" -"numeric and there is at least one character in the string." +#: ../../source/ref-api/flwr.client.Client.rst:46 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:46 +#: ../../source/ref-api/flwr.common.Array.rst:28 +#: ../../source/ref-api/flwr.common.ClientMessage.rst:25 +#: ../../source/ref-api/flwr.common.Code.rst:19 +#: ../../source/ref-api/flwr.common.Context.rst:25 +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:25 +#: ../../source/ref-api/flwr.common.Error.rst:25 +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:25 +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:25 +#: ../../source/ref-api/flwr.common.EventType.rst:165 +#: ../../source/ref-api/flwr.common.FitIns.rst:25 +#: ../../source/ref-api/flwr.common.FitRes.rst:25 +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:25 +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:25 +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:25 +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:25 +#: ../../source/ref-api/flwr.common.Message.rst:37 +#: ../../source/ref-api/flwr.common.MessageType.rst:25 +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:25 +#: ../../source/ref-api/flwr.common.Metadata.rst:25 +#: ../../source/ref-api/flwr.common.Parameters.rst:25 +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:25 +#: ../../source/ref-api/flwr.common.RecordSet.rst:25 +#: ../../source/ref-api/flwr.common.ServerMessage.rst:25 +#: ../../source/ref-api/flwr.common.Status.rst:25 +#: ../../source/ref-api/flwr.server.Driver.rst:40 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:25 +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:25 +#: ../../source/ref-api/flwr.server.ServerConfig.rst:25 +msgid "Attributes" msgstr "" -#: flwr.common.EventType.isalpha:3 of -msgid "" -"A string is alphabetic if all characters in the string are alphabetic and" -" there is at least one character in the string." +#: flwr.client.Client.context:1::1 of +msgid ":py:obj:`context `\\" msgstr "" -#: flwr.common.EventType.isascii:3 of -msgid "" -"ASCII characters have code points in the range U+0000-U+007F. Empty " -"string is ASCII too." +#: flwr.client.Client.context:1 flwr.client.Client.context:1::1 +#: flwr.client.NumPyClient.context:1 +#: flwr.client.NumPyClient.context:1::1 of +msgid "Getter for `Context` client attribute." msgstr "" -#: flwr.common.EventType.isdecimal:3 of -msgid "" -"A string is a decimal string if all characters in the string are decimal " -"and there is at least one character in the string." -msgstr "" +#: ../../source/ref-api/flwr.common.Parameters.rst:2 +#: flwr.client.app.start_client flwr.client.app.start_numpy_client +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.mod.localdp_mod.LocalDpMod +#: flwr.client.numpy_client.NumPyClient.evaluate +#: flwr.client.numpy_client.NumPyClient.fit +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.context.Context flwr.common.message.Error +#: flwr.common.message.Message flwr.common.message.Message.create_error_reply +#: flwr.common.message.Message.create_reply flwr.common.message.Metadata +#: flwr.common.record.configsrecord.ConfigsRecord +#: flwr.common.record.metricsrecord.MetricsRecord +#: flwr.common.record.parametersrecord.Array +#: flwr.common.record.parametersrecord.ParametersRecord +#: flwr.common.record.recordset.RecordSet flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.ClientManager.unregister +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.unregister +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.serverapp_components.ServerAppComponents +#: flwr.server.strategy.bulyan.Bulyan +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.fedadagrad.FedAdagrad +#: flwr.server.strategy.fedadam.FedAdam flwr.server.strategy.fedavg.FedAvg +#: flwr.server.strategy.fedavg_android.FedAvgAndroid +#: flwr.server.strategy.fedavgm.FedAvgM flwr.server.strategy.fedopt.FedOpt +#: flwr.server.strategy.fedprox.FedProx +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg +#: flwr.server.strategy.fedyogi.FedYogi flwr.server.strategy.krum.Krum +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow +#: flwr.simulation.run_simulation.run_simulation of +#, fuzzy +msgid "Parameters" +msgstr "Paramètres du modèle." -#: flwr.common.EventType.isdigit:3 of +#: flwr.client.client.Client.evaluate:3 of msgid "" -"A string is a digit string if all characters in the string are digits and" -" there is at least one character in the string." +"The evaluation instructions containing (global) model parameters received" +" from the server and a dictionary of configuration values used to " +"customize the local evaluation process." msgstr "" -#: flwr.common.EventType.isidentifier:3 of -msgid "" -"Call keyword.iskeyword(s) to test whether string s is a reserved " -"identifier, such as \"def\" or \"class\"." -msgstr "" +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.numpy_client.NumPyClient.evaluate +#: flwr.client.numpy_client.NumPyClient.fit +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.message.Message.create_reply flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.num_available +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.SimpleClientManager.num_available +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters of +#, fuzzy +msgid "Returns" +msgstr "Ressources" -#: flwr.common.EventType.islower:3 of +#: flwr.client.client.Client.evaluate:8 of msgid "" -"A string is lowercase if all cased characters in the string are lowercase" -" and there is at least one cased character in the string." +"The evaluation result containing the loss on the local dataset and other " +"details such as the number of local data examples used for evaluation." msgstr "" -#: flwr.common.EventType.isnumeric:3 of -msgid "" -"A string is numeric if all characters in the string are numeric and there" -" is at least one character in the string." +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.message.Message.create_reply flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.num_available +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.SimpleClientManager.num_available +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters of +msgid "Return type" msgstr "" -#: flwr.common.EventType.isprintable:3 of +#: flwr.client.client.Client.fit:3 of msgid "" -"A string is printable if all of its characters are considered printable " -"in repr() or if it is empty." +"The training instructions containing (global) model parameters received " +"from the server and a dictionary of configuration values used to " +"customize the local training process." msgstr "" -#: flwr.common.EventType.isspace:3 of +#: flwr.client.client.Client.fit:8 of msgid "" -"A string is whitespace if all characters in the string are whitespace and" -" there is at least one character in the string." +"The training result containing updated parameters and other details such " +"as the number of local training examples used for training." msgstr "" -#: flwr.common.EventType.istitle:3 of +#: flwr.client.client.Client.get_parameters:3 of msgid "" -"In a title-cased string, upper- and title-case characters may only follow" -" uncased characters and lowercase characters only cased ones." +"The get parameters instructions received from the server containing a " +"dictionary of configuration values." msgstr "" -#: flwr.common.EventType.isupper:3 of -msgid "" -"A string is uppercase if all cased characters in the string are uppercase" -" and there is at least one cased character in the string." -msgstr "" +#: flwr.client.client.Client.get_parameters:7 of +#, fuzzy +msgid "The current local model parameters." +msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" -#: flwr.common.EventType.join:3 of +#: flwr.client.client.Client.get_properties:3 of msgid "" -"The string whose method is called is inserted in between each given " -"string. The result is returned as a new string." +"The get properties instructions received from the server containing a " +"dictionary of configuration values." msgstr "" -#: flwr.common.EventType.join:6 of -msgid "Example: '.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'" +#: flwr.client.client.Client.get_properties:7 of +msgid "The current client properties." msgstr "" -#: flwr.common.EventType.lstrip:3 flwr.common.EventType.rstrip:3 -#: flwr.common.EventType.strip:3 of -msgid "If chars is given and not None, remove characters in chars instead." -msgstr "" +#: ../../source/ref-api/flwr.client.ClientApp.rst:2 +#, fuzzy +msgid "ClientApp" +msgstr "client" -#: flwr.common.EventType.maketrans:3 of -msgid "" -"If there is only one argument, it must be a dictionary mapping Unicode " -"ordinals (integers) or characters to Unicode ordinals, strings or None. " -"Character keys will be then converted to ordinals. If there are two " -"arguments, they must be strings of equal length, and in the resulting " -"dictionary, each character in x will be mapped to the character at the " -"same position in y. If there is a third argument, it must be a string, " -"whose characters will be mapped to None in the result." +#: flwr.client.client_app.ClientApp:1 flwr.client.mod.localdp_mod.LocalDpMod:1 +#: flwr.common.constant.MessageType:1 flwr.common.constant.MessageTypeLegacy:1 +#: flwr.common.context.Context:1 flwr.common.message.Error:1 +#: flwr.common.message.Message:1 flwr.common.message.Metadata:1 +#: flwr.common.record.parametersrecord.Array:1 +#: flwr.common.record.recordset.RecordSet:1 flwr.common.typing.ClientMessage:1 +#: flwr.common.typing.DisconnectRes:1 flwr.common.typing.EvaluateIns:1 +#: flwr.common.typing.EvaluateRes:1 flwr.common.typing.FitIns:1 +#: flwr.common.typing.FitRes:1 flwr.common.typing.GetParametersIns:1 +#: flwr.common.typing.GetParametersRes:1 flwr.common.typing.GetPropertiesIns:1 +#: flwr.common.typing.GetPropertiesRes:1 flwr.common.typing.Parameters:1 +#: flwr.common.typing.ReconnectIns:1 flwr.common.typing.ServerMessage:1 +#: flwr.common.typing.Status:1 flwr.server.history.History:1 +#: flwr.server.server.Server:1 flwr.server.server_app.ServerApp:1 +#: flwr.server.server_config.ServerConfig:1 +#: flwr.server.serverapp_components.ServerAppComponents:1 +#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 +#: of +msgid "Bases: :py:class:`object`" msgstr "" -#: flwr.common.EventType.partition:3 of -msgid "" -"This will search for the separator in the string. If the separator is " -"found, returns a 3-tuple containing the part before the separator, the " -"separator itself, and the part after it." -msgstr "" +#: flwr.client.app.start_client:41 flwr.client.app.start_numpy_client:36 +#: flwr.client.client_app.ClientApp:4 +#: flwr.client.client_app.ClientApp.evaluate:4 +#: flwr.client.client_app.ClientApp.query:4 +#: flwr.client.client_app.ClientApp.train:4 +#: flwr.client.mod.localdp_mod.LocalDpMod:22 +#: flwr.common.record.configsrecord.ConfigsRecord:20 +#: flwr.common.record.metricsrecord.MetricsRecord:19 +#: flwr.common.record.parametersrecord.ParametersRecord:22 +#: flwr.common.record.recordset.RecordSet:23 flwr.server.app.start_server:41 +#: flwr.server.server_app.ServerApp:4 flwr.server.server_app.ServerApp.main:4 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:29 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:22 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:21 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:14 +#: of +#, fuzzy +msgid "Examples" +msgstr "Exemples de PyTorch" -#: flwr.common.EventType.partition:7 of +#: flwr.client.client_app.ClientApp:5 of msgid "" -"If the separator is not found, returns a 3-tuple containing the original " -"string and two empty strings." +"Assuming a typical `Client` implementation named `FlowerClient`, you can " +"wrap it in a `ClientApp` as follows:" msgstr "" -#: flwr.common.EventType.removeprefix:3 of +#: flwr.client.client_app.ClientApp:16 of msgid "" -"If the string starts with the prefix string, return string[len(prefix):]." -" Otherwise, return a copy of the original string." +"If the above code is in a Python module called `client`, it can be " +"started as follows:" msgstr "" -#: flwr.common.EventType.removesuffix:3 of +#: flwr.client.client_app.ClientApp:21 of msgid "" -"If the string ends with the suffix string and that suffix is not empty, " -"return string[:-len(suffix)]. Otherwise, return a copy of the original " -"string." +"In this `client:app` example, `client` refers to the Python module " +"`client.py` in which the previous code lives in and `app` refers to the " +"global attribute `app` that points to an object of type `ClientApp`." msgstr "" -#: flwr.common.EventType.replace:5 of -msgid "count" +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid ":py:obj:`evaluate `\\ \\(\\)" msgstr "" -#: flwr.common.EventType.replace:4 of -msgid "" -"Maximum number of occurrences to replace. -1 (the default value) means " -"replace all occurrences." +#: flwr.client.client_app.ClientApp.evaluate:1 +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid "Return a decorator that registers the evaluate fn with the client app." msgstr "" -#: flwr.common.EventType.replace:7 of -msgid "" -"If the optional argument count is given, only the first count occurrences" -" are replaced." +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid ":py:obj:`query `\\ \\(\\)" msgstr "" -#: flwr.common.EventType.rfind:1 flwr.common.EventType.rindex:1 of -msgid "" -"Return the highest index in S where substring sub is found, such that sub" -" is contained within S[start:end]. Optional arguments start and end are " -"interpreted as in slice notation." +#: flwr.client.client_app.ClientApp.evaluate:1::1 +#: flwr.client.client_app.ClientApp.query:1 of +msgid "Return a decorator that registers the query fn with the client app." msgstr "" -#: flwr.common.EventType.rpartition:3 of -msgid "" -"This will search for the separator in the string, starting at the end. If" -" the separator is found, returns a 3-tuple containing the part before the" -" separator, the separator itself, and the part after it." +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +#, fuzzy +msgid ":py:obj:`train `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" + +#: flwr.client.client_app.ClientApp.evaluate:1::1 +#: flwr.client.client_app.ClientApp.train:1 of +msgid "Return a decorator that registers the train fn with the client app." msgstr "" -#: flwr.common.EventType.rpartition:7 of +#: ../../source/ref-api/flwr.client.NumPyClient.rst:2 +msgid "NumPyClient" +msgstr "NumPyClient" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 msgid "" -"If the separator is not found, returns a 3-tuple containing two empty " -"strings and the original string." +":py:obj:`evaluate `\\ \\(parameters\\, " +"config\\)" msgstr "" -#: flwr.common.EventType.rsplit:7 flwr.common.EventType.split:7 of -msgid "sep" +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid ":py:obj:`fit `\\ \\(parameters\\, config\\)" msgstr "" -#: flwr.common.EventType.rsplit:4 flwr.common.EventType.split:4 of -msgid "The separator used to split the string." +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.numpy_client.NumPyClient.fit:1 of +#, fuzzy +msgid "Train the provided parameters using the locally held dataset." +msgstr "entraîne le modèle sur l'ensemble d'apprentissage local" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid ":py:obj:`get_context `\\ \\(\\)" msgstr "" -#: flwr.common.EventType.rsplit:6 flwr.common.EventType.split:6 of +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 msgid "" -"When set to None (the default value), will split on any whitespace " -"character (including \\\\n \\\\r \\\\t \\\\f and spaces) and will discard" -" empty strings from the result." +":py:obj:`get_parameters `\\ " +"\\(config\\)" msgstr "" -#: flwr.common.EventType.rsplit:11 flwr.common.EventType.split:11 of -msgid "maxsplit" +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid "" +":py:obj:`get_properties `\\ " +"\\(config\\)" msgstr "" -#: flwr.common.EventType.rsplit:10 flwr.common.EventType.split:10 of +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.numpy_client.NumPyClient.get_properties:1 of +msgid "Return a client's set of properties." +msgstr "" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 msgid "" -"Maximum number of splits (starting from the left). -1 (the default value)" -" means no limit." +":py:obj:`set_context `\\ " +"\\(context\\)" msgstr "" -#: flwr.common.EventType.rsplit:13 of -msgid "Splitting starts at the end of the string and works to the front." +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid ":py:obj:`to_client `\\ \\(\\)" msgstr "" -#: flwr.common.EventType.split:13 of -msgid "" -"Note, str.split() is mainly useful for data that has been intentionally " -"delimited. With natural text that includes punctuation, consider using " -"the regular expression module." +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.numpy_client.NumPyClient.to_client:1 of +#, fuzzy +msgid "Convert to object to Client type and return it." +msgstr "Convertit l'objet des paramètres en ndarrays NumPy." + +#: flwr.client.NumPyClient.context:1::1 of +msgid ":py:obj:`context `\\" msgstr "" -#: flwr.common.EventType.splitlines:3 of +#: flwr.client.numpy_client.NumPyClient.evaluate:3 +#: flwr.client.numpy_client.NumPyClient.fit:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:5 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:8 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:5 +#: flwr.server.strategy.strategy.Strategy.configure_fit:5 +#: flwr.server.strategy.strategy.Strategy.evaluate:8 of +#, fuzzy +msgid "The current (global) model parameters." +msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" + +#: flwr.client.numpy_client.NumPyClient.evaluate:5 of msgid "" -"Line breaks are not included in the resulting list unless keepends is " -"given and true." +"Configuration parameters which allow the server to influence evaluation " +"on the client. It can be used to communicate arbitrary values from the " +"server to the client, for example, to influence the number of examples " +"used for evaluation." msgstr "" -#: flwr.common.EventType.startswith:1 of +#: flwr.client.numpy_client.NumPyClient.evaluate:11 of msgid "" -"Return True if S starts with the specified prefix, False otherwise. With " -"optional start, test S beginning at that position. With optional end, " -"stop comparing S at that position. prefix can also be a tuple of strings " -"to try." +"* **loss** (*float*) -- The evaluation loss of the model on the local " +"dataset. * **num_examples** (*int*) -- The number of examples used for " +"evaluation. * **metrics** (*Dict[str, Scalar]*) -- A dictionary mapping " +"arbitrary string keys to values of type bool, bytes, float, int, or " +"str. It can be used to communicate arbitrary values back to the server." msgstr "" -#: flwr.common.EventType.title:3 of +#: flwr.client.numpy_client.NumPyClient.evaluate:11 of msgid "" -"More specifically, words start with uppercased characters and all " -"remaining cased characters have lower case." +"**loss** (*float*) -- The evaluation loss of the model on the local " +"dataset." msgstr "" -#: flwr.common.EventType.translate:5 of -#, fuzzy -msgid "table" -msgstr "Database" +#: flwr.client.numpy_client.NumPyClient.evaluate:12 of +msgid "**num_examples** (*int*) -- The number of examples used for evaluation." +msgstr "" -#: flwr.common.EventType.translate:4 of +#: flwr.client.numpy_client.NumPyClient.evaluate:13 +#: flwr.client.numpy_client.NumPyClient.fit:13 of msgid "" -"Translation table, which must be a mapping of Unicode ordinals to Unicode" -" ordinals, strings, or None." +"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " +"string keys to values of type bool, bytes, float, int, or str. It can be " +"used to communicate arbitrary values back to the server." msgstr "" -#: flwr.common.EventType.translate:7 of +#: flwr.client.numpy_client.NumPyClient.evaluate:19 of msgid "" -"The table must implement lookup/indexing via __getitem__, for instance a " -"dictionary or list. If this operation raises LookupError, the character " -"is left untouched. Characters mapped to None are deleted." +"The previous return type format (int, float, float) and the extended " +"format (int, float, float, Dict[str, Scalar]) have been deprecated and " +"removed since Flower 0.19." msgstr "" -#: flwr.common.EventType.zfill:3 of -msgid "The string is never truncated." +#: flwr.client.numpy_client.NumPyClient.fit:5 of +msgid "" +"Configuration parameters which allow the server to influence training on " +"the client. It can be used to communicate arbitrary values from the " +"server to the client, for example, to set the number of (local) training " +"epochs." msgstr "" -#: ../../source/ref-api/flwr.common.FitIns.rst:2 -msgid "FitIns" +#: flwr.client.numpy_client.NumPyClient.fit:11 of +msgid "" +"* **parameters** (*NDArrays*) -- The locally updated model parameters. * " +"**num_examples** (*int*) -- The number of examples used for training. * " +"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " +"string keys to values of type bool, bytes, float, int, or str. It can " +"be used to communicate arbitrary values back to the server." msgstr "" -#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 -msgid ":py:obj:`parameters `\\" -msgstr "" +#: flwr.client.numpy_client.NumPyClient.fit:11 of +#, fuzzy +msgid "**parameters** (*NDArrays*) -- The locally updated model parameters." +msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" -#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 -msgid ":py:obj:`config `\\" +#: flwr.client.numpy_client.NumPyClient.fit:12 of +msgid "**num_examples** (*int*) -- The number of examples used for training." msgstr "" -#: ../../source/ref-api/flwr.common.FitRes.rst:2 -msgid "FitRes" +#: flwr.client.numpy_client.NumPyClient.get_parameters:3 of +msgid "" +"Configuration parameters requested by the server. This can be used to " +"tell the client which parameters are needed along with some Scalar " +"attributes." msgstr "" -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 -msgid ":py:obj:`status `\\" +#: flwr.client.numpy_client.NumPyClient.get_parameters:8 of +#, fuzzy +msgid "**parameters** -- The local model parameters as a list of NumPy ndarrays." +msgstr "renvoie le poids du modèle sous la forme d'une liste de ndarrays NumPy" + +#: flwr.client.numpy_client.NumPyClient.get_properties:3 of +msgid "" +"Configuration parameters requested by the server. This can be used to " +"tell the client which properties are needed along with some Scalar " +"attributes." msgstr "" -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 -msgid ":py:obj:`parameters `\\" +#: flwr.client.numpy_client.NumPyClient.get_properties:8 of +msgid "" +"**properties** -- A dictionary mapping arbitrary string keys to values of" +" type bool, bytes, float, int, or str. It can be used to communicate " +"arbitrary property values back to the server." msgstr "" -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 -msgid ":py:obj:`num_examples `\\" +#: ../../source/ref-api/flwr.client.mod.rst:2 +msgid "mod" msgstr "" -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 -msgid ":py:obj:`metrics `\\" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid "" +":py:obj:`adaptiveclipping_mod `\\ " +"\\(msg\\, ctxt\\, call\\_next\\)" msgstr "" -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:2 +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:1 of #, fuzzy -msgid "GetParametersIns" -msgstr ":code:`get_parameters`" +msgid "Client-side adaptive clipping modifier." +msgstr "Logique côté client" -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:28::1 -msgid ":py:obj:`config `\\" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid "" +":py:obj:`fixedclipping_mod `\\ " +"\\(msg\\, ctxt\\, call\\_next\\)" msgstr "" -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:2 +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:1 of #, fuzzy -msgid "GetParametersRes" -msgstr ":code:`get_parameters`" +msgid "Client-side fixed clipping modifier." +msgstr "Logique côté client" -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 -msgid ":py:obj:`status `\\" -msgstr "" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#, fuzzy +msgid ":py:obj:`make_ffn `\\ \\(ffn\\, mods\\)" +msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 -msgid ":py:obj:`parameters `\\" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.utils.make_ffn:1 of +msgid "." msgstr "" -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:2 -msgid "GetPropertiesIns" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid "" +":py:obj:`message_size_mod `\\ \\(msg\\," +" ctxt\\, call\\_next\\)" msgstr "" -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:28::1 -msgid ":py:obj:`config `\\" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.comms_mods.message_size_mod:1 of +msgid "Message size mod." msgstr "" -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:2 -msgid "GetPropertiesRes" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid "" +":py:obj:`parameters_size_mod `\\ " +"\\(msg\\, ctxt\\, call\\_next\\)" msgstr "" -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 -msgid ":py:obj:`status `\\" -msgstr "" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.comms_mods.parameters_size_mod:1 of +#, fuzzy +msgid "Parameters size mod." +msgstr "Paramètres du modèle." -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 -msgid ":py:obj:`properties `\\" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid "" +":py:obj:`secagg_mod `\\ \\(msg\\, ctxt\\, " +"call\\_next\\)" msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:2 -#, fuzzy -msgid "Message" -msgstr "Côté serveur" - -#: flwr.common.Message.content:1::1 flwr.common.Message.metadata:1 -#: flwr.common.message.Message:3 of -msgid "A dataclass including information about the message to be executed." +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.secure_aggregation.secagg_mod.secagg_mod:1 of +msgid "Handle incoming message and return results, following the SecAgg protocol." msgstr "" -#: flwr.common.message.Message:5 of +#: ../../source/ref-api/flwr.client.mod.rst:28::1 msgid "" -"Holds records either sent by another entity (e.g. sent by the server-side" -" logic to a client, or vice-versa) or that will be sent to it." +":py:obj:`secaggplus_mod `\\ \\(msg\\, " +"ctxt\\, call\\_next\\)" msgstr "" -#: flwr.common.message.Message:8 of +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.secure_aggregation.secaggplus_mod.secaggplus_mod:1 of msgid "" -"A dataclass that captures information about an error that took place when" -" processing another message." +"Handle incoming message and return results, following the SecAgg+ " +"protocol." msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: ../../source/ref-api/flwr.client.mod.rst:35::1 msgid "" -":py:obj:`create_error_reply `\\ " -"\\(error\\[\\, ttl\\]\\)" +":py:obj:`LocalDpMod `\\ \\(clipping\\_norm\\," +" sensitivity\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.create_error_reply:1 of -msgid "Construct a reply message indicating an error happened." +#: ../../source/ref-api/flwr.client.mod.rst:35::1 +#: flwr.client.mod.localdp_mod.LocalDpMod:1 of +#, fuzzy +msgid "Modifier for local differential privacy." +msgstr "Confidentialité différentielle" + +#: ../../source/ref-api/flwr.client.mod.LocalDpMod.rst:2 +msgid "LocalDpMod" msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.client.mod.localdp_mod.LocalDpMod:3 of msgid "" -":py:obj:`create_reply `\\ " -"\\(content\\[\\, ttl\\]\\)" +"This mod clips the client model updates and adds noise to the params " +"before sending them to the server." msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.create_reply:1 of -msgid "Create a reply to this message with specified content and TTL." +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:12 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:10 +#: flwr.client.mod.localdp_mod.LocalDpMod:6 of +msgid "It operates on messages of type `MessageType.TRAIN`." msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -msgid ":py:obj:`has_content `\\ \\(\\)" +#: flwr.client.mod.localdp_mod.LocalDpMod:8 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:15 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:8 +#: of +msgid "The value of the clipping norm." msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.has_content:1 of -msgid "Return True if message has content, else False." +#: flwr.client.mod.localdp_mod.LocalDpMod:10 of +msgid "The sensitivity of the client model." msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -msgid ":py:obj:`has_error `\\ \\(\\)" +#: flwr.client.mod.localdp_mod.LocalDpMod:12 of +msgid "" +"The privacy budget. Smaller value of epsilon indicates a higher level of " +"privacy protection." msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.has_error:1 of -msgid "Return True if message has an error, else False." +#: flwr.client.mod.localdp_mod.LocalDpMod:15 of +msgid "" +"The failure probability. The probability that the privacy mechanism fails" +" to provide the desired level of privacy. A smaller value of delta " +"indicates a stricter privacy guarantee." msgstr "" -#: flwr.common.Message.content:1::1 of -msgid ":py:obj:`content `\\" +#: flwr.client.mod.localdp_mod.LocalDpMod:23 of +msgid "Create an instance of the local DP mod and add it to the client-side mods:" msgstr "" -#: flwr.common.Message.content:1 flwr.common.Message.content:1::1 -#: of -#, fuzzy -msgid "The content of this message." -msgstr "Évaluer la réponse d'un client." - -#: flwr.common.Message.content:1::1 of -msgid ":py:obj:`error `\\" +#: ../../source/ref-api/flwr.client.mod.adaptiveclipping_mod.rst:2 +msgid "adaptiveclipping\\_mod" msgstr "" -#: flwr.common.Message.content:1::1 flwr.common.Message.error:1 of -msgid "Error captured by this message." +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:3 of +msgid "" +"This mod needs to be used with the " +"DifferentialPrivacyClientSideAdaptiveClipping server-side strategy " +"wrapper." msgstr "" -#: flwr.common.Message.content:1::1 of -msgid ":py:obj:`metadata `\\" +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:6 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:6 of +msgid "The wrapper sends the clipping_norm value to the client." msgstr "" -#: flwr.common.message.Message.create_error_reply:3 of -msgid "The error that was encountered." +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:8 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:8 of +msgid "This mod clips the client model updates before sending them to the server." msgstr "" -#: flwr.common.message.Message.create_error_reply:5 -#: flwr.common.message.Message.create_reply:9 of +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:10 of msgid "" -"Time-to-live for this message in seconds. If unset, it will be set based " -"on the remaining time for the received message before it expires. This " -"follows the equation: ttl = msg.meta.ttl - (reply.meta.created_at - " -"msg.meta.created_at)" +"It also sends KEY_NORM_BIT to the server for computing the new clipping " +"value." msgstr "" -#: flwr.common.message.Message.create_error_reply:5 -#: flwr.common.message.Message.create_reply:9 of -msgid "" -"Time-to-live for this message in seconds. If unset, it will be set based " -"on the remaining time for the received message before it expires. This " -"follows the equation:" -msgstr "" +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:15 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:13 +#: flwr.server.driver.driver.Driver.send_and_receive:18 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:54 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:61 +#: of +#, fuzzy +msgid "Notes" +msgstr "Aucun" -#: flwr.common.message.Message.create_error_reply:9 -#: flwr.common.message.Message.create_reply:13 of -msgid "ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at)" +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:16 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:14 of +msgid "Consider the order of mods when using multiple." msgstr "" -#: flwr.common.message.Message.create_reply:3 of -msgid "" -"The method generates a new `Message` as a reply to this message. It " -"inherits 'run_id', 'src_node_id', 'dst_node_id', and 'message_type' from " -"this message and sets 'reply_to_message' to the ID of this message." +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:18 of +msgid "Typically, adaptiveclipping_mod should be the last to operate on params." msgstr "" -#: flwr.common.message.Message.create_reply:7 of -msgid "The content for the reply message." +#: ../../source/ref-api/flwr.client.mod.fixedclipping_mod.rst:2 +msgid "fixedclipping\\_mod" msgstr "" -#: flwr.common.message.Message.create_reply:16 of -msgid "A new `Message` instance representing the reply." +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:3 of +msgid "" +"This mod needs to be used with the " +"DifferentialPrivacyClientSideFixedClipping server-side strategy wrapper." msgstr "" -#: ../../source/ref-api/flwr.common.MessageType.rst:2 -msgid "MessageType" +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:16 of +msgid "Typically, fixedclipping_mod should be the last to operate on params." msgstr "" -#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 -msgid ":py:obj:`EVALUATE `\\" +#: ../../source/ref-api/flwr.client.mod.make_ffn.rst:2 +msgid "make\\_ffn" msgstr "" -#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 -msgid ":py:obj:`QUERY `\\" +#: ../../source/ref-api/flwr.client.mod.message_size_mod.rst:2 +msgid "message\\_size\\_mod" msgstr "" -#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 -msgid ":py:obj:`TRAIN `\\" +#: flwr.client.mod.comms_mods.message_size_mod:3 of +msgid "This mod logs the size in bytes of the message being transmited." msgstr "" -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:2 -msgid "MessageTypeLegacy" -msgstr "" +#: ../../source/ref-api/flwr.client.mod.parameters_size_mod.rst:2 +#, fuzzy +msgid "parameters\\_size\\_mod" +msgstr "Paramètres du modèle." -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 -msgid ":py:obj:`GET_PARAMETERS `\\" +#: flwr.client.mod.comms_mods.parameters_size_mod:3 of +msgid "" +"This mod logs the number of parameters transmitted in the message as well" +" as their size in bytes." msgstr "" -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 -msgid ":py:obj:`GET_PROPERTIES `\\" +#: ../../source/ref-api/flwr.client.mod.secagg_mod.rst:2 +msgid "secagg\\_mod" msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.run_id:1 flwr.common.message.Metadata:3 of -msgid "An identifier for the current run." -msgstr "" +#: ../../source/ref-api/flwr.client.mod.secaggplus_mod.rst:2 +#, fuzzy +msgid "secaggplus\\_mod" +msgstr "Flux de travail" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.message_id:1 flwr.common.message.Metadata:5 of -msgid "An identifier for the current message." -msgstr "" +#: ../../source/ref-api/flwr.client.start_client.rst:2 +#, fuzzy +msgid "start\\_client" +msgstr "start_client" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.src_node_id:1 flwr.common.message.Metadata:7 of -msgid "An identifier for the node sending this message." +#: flwr.client.app.start_client:3 flwr.client.app.start_numpy_client:9 of +msgid "" +"The IPv4 or IPv6 address of the server. If the Flower server runs on the " +"same machine on port 8080, then `server_address` would be " +"`\"[::]:8080\"`." msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.dst_node_id:1 flwr.common.message.Metadata:9 of -msgid "An identifier for the node receiving this message." +#: flwr.client.app.start_client:7 of +msgid "A callable that instantiates a Client. (default: None)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.reply_to_message:1 flwr.common.message.Metadata:11 of -msgid "An identifier for the message this message replies to." +#: flwr.client.app.start_client:9 of +msgid "" +"An implementation of the abstract base class `flwr.client.Client` " +"(default: None)" msgstr "" -#: flwr.common.message.Metadata:13 of +#: flwr.client.app.start_client:12 flwr.client.app.start_numpy_client:15 of msgid "" -"An identifier for grouping messages. In some settings, this is used as " -"the FL round." +"The maximum length of gRPC messages that can be exchanged with the Flower" +" server. The default should be sufficient for most models. Users who " +"train very large models might need to increase this value. Note that the " +"Flower server needs to be started with the same value (see " +"`flwr.server.start_server`), otherwise it will not know about the " +"increased limit and block larger messages." msgstr "" -#: flwr.common.message.Metadata:16 of -msgid "Time-to-live for this message in seconds." +#: flwr.client.app.start_client:19 flwr.client.app.start_numpy_client:22 of +msgid "" +"The PEM-encoded root certificates as a byte string or a path string. If " +"provided, a secure connection using the certificates will be established " +"to an SSL-enabled Flower server." msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.message_type:1 flwr.common.message.Metadata:18 of -msgid "A string that encodes the action to be executed on the receiving end." +#: flwr.client.app.start_client:23 flwr.client.app.start_numpy_client:26 of +msgid "" +"Starts an insecure gRPC connection when True. Enables HTTPS connection " +"when False, using system certificates if `root_certificates` is None." msgstr "" -#: flwr.common.message.Metadata:21 of +#: flwr.client.app.start_client:26 flwr.client.app.start_numpy_client:29 of msgid "" -"An identifier that can be used when loading a particular data partition " -"for a ClientApp. Making use of this identifier is more relevant when " -"conducting simulations." +"Configure the transport layer. Allowed values: - 'grpc-bidi': gRPC, " +"bidirectional streaming - 'grpc-rere': gRPC, request-response " +"(experimental) - 'rest': HTTP (experimental)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -#, fuzzy -msgid ":py:obj:`created_at `\\" -msgstr "serveur.stratégie.Stratégie" - -#: flwr.common.Metadata.created_at:1 -#: flwr.common.Metadata.created_at:1::1 of -msgid "Unix timestamp when the message was created." +#: flwr.client.app.start_client:31 of +msgid "" +"The maximum number of times the client will try to connect to the server " +"before giving up in case of a connection error. If set to None, there is " +"no limit to the number of tries." msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`dst_node_id `\\" +#: flwr.client.app.start_client:35 of +msgid "" +"The maximum duration before the client stops trying to connect to the " +"server in case of connection error. If set to None, there is no limit to " +"the total time." msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`group_id `\\" +#: flwr.client.app.start_client:42 flwr.client.app.start_numpy_client:37 of +msgid "Starting a gRPC client with an insecure server connection:" msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.group_id:1 of -msgid "An identifier for grouping messages." +#: flwr.client.app.start_client:49 flwr.client.app.start_numpy_client:44 of +msgid "Starting an SSL-enabled gRPC client using system certificates:" msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`message_id `\\" +#: flwr.client.app.start_client:60 flwr.client.app.start_numpy_client:52 of +msgid "Starting an SSL-enabled gRPC client using provided certificates:" msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`message_type `\\" -msgstr "" +#: ../../source/ref-api/flwr.client.start_numpy_client.rst:2 +#, fuzzy +msgid "start\\_numpy\\_client" +msgstr "start_numpy_client" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`partition_id `\\" +#: flwr.client.app.start_numpy_client:5 of +msgid "" +"This function is deprecated since 1.7.0. Use " +":code:`flwr.client.start_client` instead and first convert your " +":code:`NumPyClient` to type :code:`flwr.client.Client` by executing its " +":code:`to_client()` method." msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.partition_id:1 of -msgid "An identifier telling which data partition a ClientApp should use." +#: flwr.client.app.start_numpy_client:13 of +msgid "An implementation of the abstract base class `flwr.client.NumPyClient`." msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`reply_to_message `\\" -msgstr "" +#: ../../source/ref-api/flwr.common.rst:2 +msgid "common" +msgstr "commun" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`run_id `\\" +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid ":py:obj:`array_from_numpy `\\ \\(ndarray\\)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`src_node_id `\\" -msgstr "" +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.record.conversion_utils.array_from_numpy:1 of +#, fuzzy +msgid "Create Array from NumPy ndarray." +msgstr "Convertit l'objet des paramètres en ndarrays NumPy." -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`ttl `\\" +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid ":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 flwr.common.Metadata.ttl:1 -#: of -msgid "Time-to-live for this message." -msgstr "" +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.bytes_to_ndarray:1 of +msgid "Deserialize NumPy ndarray from bytes." +msgstr "Désérialise le tableau numérique NumPy à partir d'octets." -#: ../../source/ref-api/flwr.common.MetricsRecord.rst:2 -msgid "MetricsRecord" +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid "" +":py:obj:`configure `\\ \\(identifier\\[\\, " +"filename\\, host\\]\\)" msgstr "" -#: flwr.common.record.metricsrecord.MetricsRecord:1 of -msgid "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -"[:py:class:`str`, :py:class:`int` | :py:class:`float` | " -":py:class:`~typing.List`\\ [:py:class:`int`] | :py:class:`~typing.List`\\" -" [:py:class:`float`]]" +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.logger.configure:1 of +msgid "Configure logging to file and/or remote log server." msgstr "" +"Configure la journalisation vers un fichier et/ou un serveur de " +"journalisation distant." -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`clear `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid "" +":py:obj:`event `\\ \\(event\\_type\\[\\, " +"event\\_details\\]\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`count_bytes `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.telemetry.event:1 of +msgid "Submit create_event to ThreadPoolExecutor to avoid blocking." msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid "" +":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " +"\\*\\*kwargs\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`items `\\ \\(\\)" -msgstr "" +#: ../../source/ref-api/flwr.common.rst:30::1 logging.Logger.log:1 +#: of +msgid "Log 'msg % args' with the integer severity 'level'." +msgstr "Enregistre 'msg % args' avec le niveau de sévérité entier 'level'." -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`keys `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid ":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" -msgstr "" +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.ndarray_to_bytes:1 of +msgid "Serialize NumPy ndarray to bytes." +msgstr "Sérialise le tableau numérique NumPy en octets." -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/ref-api/flwr.common.rst:30::1 msgid "" -":py:obj:`update `\\ \\(\\[E\\, " -"\\]\\*\\*F\\)" -msgstr "" - -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`values `\\ \\(\\)" +":py:obj:`ndarrays_to_parameters `\\ " +"\\(ndarrays\\)" msgstr "" -#: ../../source/ref-api/flwr.common.NDArray.rst:2 -msgid "NDArray" -msgstr "" +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.ndarrays_to_parameters:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarrays_to_parameters:1 +#: of +msgid "Convert NumPy ndarrays to parameters object." +msgstr "Convertit les ndarrays NumPy en objets de paramètres." -#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 -msgid ":py:obj:`tensors `\\" +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid ":py:obj:`now `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 -msgid ":py:obj:`tensor_type `\\" +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.date.now:1 of +msgid "Construct a datetime from time.time() with time zone set to UTC." msgstr "" +"Construit une date à partir de time.time() avec le fuseau horaire réglé " +"sur UTC." -#: ../../source/ref-api/flwr.common.ParametersRecord.rst:2 -#, fuzzy -msgid "ParametersRecord" -msgstr "Paramètres du modèle." - -#: flwr.common.record.parametersrecord.ParametersRecord:1 of +#: ../../source/ref-api/flwr.common.rst:30::1 msgid "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -"[:py:class:`str`, :py:class:`~flwr.common.record.parametersrecord.Array`]" +":py:obj:`parameters_to_ndarrays `\\ " +"\\(parameters\\)" msgstr "" -#: flwr.common.record.parametersrecord.ParametersRecord:3 of +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.parameters_to_ndarrays:1 of +msgid "Convert parameters object to NumPy ndarrays." +msgstr "Convertit l'objet des paramètres en ndarrays NumPy." + +#: ../../source/ref-api/flwr.common.rst:68::1 msgid "" -"A dataclass storing named Arrays in order. This means that it holds " -"entries as an OrderedDict[str, Array]. ParametersRecord objects can be " -"viewed as an equivalent to PyTorch's state_dict, but holding serialised " -"tensors instead." +":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, " +"data\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`clear `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.parametersrecord.Array:1 of +msgid "Array type." msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`count_bytes `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`ClientMessage `\\ " +"\\(\\[get\\_properties\\_res\\, ...\\]\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.ClientMessage:1 of +msgid "ClientMessage is a container used to hold one result message." msgstr "" +"ClientMessage est un conteneur utilisé pour contenir un message de " +"résultat." -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`items `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`Code `\\ \\(value\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`keys `\\ \\(\\)" -msgstr "" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.Code:1 of +msgid "Client status codes." +msgstr "Codes d'état du client." -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" -msgstr "" +#: ../../source/ref-api/flwr.common.rst:68::1 +#, fuzzy +msgid ":py:obj:`Config `\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/ref-api/flwr.common.rst:68::1 msgid "" -":py:obj:`update `\\ \\(\\[E\\, " -"\\]\\*\\*F\\)" -msgstr "" - -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`values `\\ \\(\\)" +"alias of :py:class:`dict`\\ [:py:class:`str`, :py:class:`bool` | " +":py:class:`bytes` | :py:class:`float` | :py:class:`int` | " +":py:class:`str`]" msgstr "" -#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:3 of +#: ../../source/ref-api/flwr.common.rst:68::1 +#, fuzzy msgid "" -"Note that a small amount of Bytes might also be included in this counting" -" that correspond to metadata of the serialized object (e.g. of NumPy " -"array) needed for deseralization." +":py:obj:`ConfigsRecord `\\ " +"\\(\\[configs\\_dict\\, keep\\_input\\]\\)" msgstr "" +"Flower 1.0 : ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:2 +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.configsrecord.ConfigsRecord:1 of #, fuzzy -msgid "ReconnectIns" -msgstr "Collecte centralisée des données" +msgid "Configs record." +msgstr "Configurer les clients" -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:28::1 -msgid ":py:obj:`seconds `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`Context `\\ \\(node\\_id\\, " +"node\\_config\\, state\\, run\\_config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.RecordSet.rst:2 -msgid "RecordSet" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.context.Context:1 of +msgid "Context of your run." msgstr "" -#: flwr.common.RecordSet.configs_records:1::1 of -msgid ":py:obj:`configs_records `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`DisconnectRes `\\ \\(reason\\)" msgstr "" -#: flwr.common.RecordSet.configs_records:1 -#: flwr.common.RecordSet.configs_records:1::1 of -msgid "Dictionary holding ConfigsRecord instances." -msgstr "" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.DisconnectRes:1 of +msgid "DisconnectRes message from client to server." +msgstr "Message DisconnectRes envoyé par le client au serveur." -#: flwr.common.RecordSet.configs_records:1::1 of -msgid ":py:obj:`metrics_records `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`Error `\\ \\(code\\[\\, reason\\]\\)" msgstr "" -#: flwr.common.RecordSet.configs_records:1::1 -#: flwr.common.RecordSet.metrics_records:1 of -msgid "Dictionary holding MetricsRecord instances." +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.message.Error:1 of +msgid "A dataclass that stores information about an error that occurred." msgstr "" -#: flwr.common.RecordSet.configs_records:1::1 of -msgid ":py:obj:`parameters_records `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`EvaluateIns `\\ \\(parameters\\, " +"config\\)" msgstr "" -#: flwr.common.RecordSet.configs_records:1::1 -#: flwr.common.RecordSet.parameters_records:1 of -msgid "Dictionary holding ParametersRecord instances." +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.EvaluateIns:1 of +msgid "Evaluate instructions for a client." +msgstr "Évaluer les instructions pour un client." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`EvaluateRes `\\ \\(status\\, loss\\, " +"num\\_examples\\, metrics\\)" msgstr "" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:2 -#, fuzzy -msgid "ServerMessage" -msgstr "Côté serveur" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.EvaluateRes:1 of +msgid "Evaluate response from a client." +msgstr "Évaluer la réponse d'un client." -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 -msgid ":py:obj:`evaluate_ins `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`EventType `\\ \\(value\\)" msgstr "" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 -msgid ":py:obj:`fit_ins `\\" -msgstr "" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.telemetry.EventType:1 of +msgid "Types of telemetry events." +msgstr "Types d'événements télémétriques." -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 -msgid "" -":py:obj:`get_parameters_ins " -"`\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`FitIns `\\ \\(parameters\\, config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.FitIns:1 of +msgid "Fit instructions for a client." +msgstr "Instructions d'ajustement pour un client." + +#: ../../source/ref-api/flwr.common.rst:68::1 msgid "" -":py:obj:`get_properties_ins " -"`\\" +":py:obj:`FitRes `\\ \\(status\\, parameters\\, " +"num\\_examples\\, metrics\\)" msgstr "" -#: ../../source/ref-api/flwr.common.Status.rst:2 -#, fuzzy -msgid "Status" -msgstr "Statut du client." - -#: ../../source/ref-api/flwr.common.Status.rst:29::1 -msgid ":py:obj:`code `\\" -msgstr "" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.FitRes:1 of +msgid "Fit response from a client." +msgstr "Réponse adaptée d'un client." -#: ../../source/ref-api/flwr.common.Status.rst:29::1 -msgid ":py:obj:`message `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`GetParametersIns `\\ \\(config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.array_from_numpy.rst:2 -msgid "array\\_from\\_numpy" -msgstr "" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetParametersIns:1 of +msgid "Parameters request for a client." +msgstr "Demande de paramètres pour un client." -#: ../../source/ref-api/flwr.common.bytes_to_ndarray.rst:2 -msgid "bytes\\_to\\_ndarray" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`GetParametersRes `\\ \\(status\\, " +"parameters\\)" msgstr "" -#: ../../source/ref-api/flwr.common.configure.rst:2 -#, fuzzy -msgid "configure" -msgstr "Configurer les clients" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetParametersRes:1 of +msgid "Response when asked to return parameters." +msgstr "Réponse lorsqu'on te demande de renvoyer des paramètres." -#: ../../source/ref-api/flwr.common.event.rst:2 -msgid "event" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`GetPropertiesIns `\\ \\(config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.log.rst:2 -msgid "log" -msgstr "" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetPropertiesIns:1 of +msgid "Properties request for a client." +msgstr "Demande de propriétés pour un client." -#: logging.Logger.log:3 of +#: ../../source/ref-api/flwr.common.rst:68::1 msgid "" -"To pass exception information, use the keyword argument exc_info with a " -"true value, e.g." +":py:obj:`GetPropertiesRes `\\ \\(status\\, " +"properties\\)" msgstr "" -"Pour transmettre des informations sur les exceptions, utilise l'argument " -"mot-clé exc_info avec une valeur vraie, par ex." -#: logging.Logger.log:6 of -#, python-format -msgid "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" -msgstr "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetPropertiesRes:1 of +msgid "Properties response from a client." +msgstr "Réponse des propriétés d'un client." -#: ../../source/ref-api/flwr.common.ndarray_to_bytes.rst:2 -msgid "ndarray\\_to\\_bytes" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " +"error\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.ndarrays_to_parameters.rst:2 -msgid "ndarrays\\_to\\_parameters" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.message.Message:1 of +msgid "State of your application from the viewpoint of the entity using it." msgstr "" -#: ../../source/ref-api/flwr.common.now.rst:2 -msgid "now" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`MessageType `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.parameters_to_ndarrays.rst:2 -msgid "parameters\\_to\\_ndarrays" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.constant.MessageType:1 of +msgid "Message type." msgstr "" -#: ../../source/ref-api/flwr.server.rst:2 -msgid "server" -msgstr "serveur" - -#: ../../source/ref-api/flwr.server.rst:26::1 -msgid ":py:obj:`run_driver_api `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`MessageTypeLegacy `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 -#: flwr.server.app.run_driver_api:1 of -#, fuzzy -msgid "Run Flower server (Driver API)." -msgstr "flower-driver-api" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.constant.MessageTypeLegacy:1 of +msgid "Legacy message type." +msgstr "" + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`Metadata `\\ \\(run\\_id\\, " +"message\\_id\\, src\\_node\\_id\\, ...\\)" +msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 -msgid ":py:obj:`run_fleet_api `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.message.Metadata:1 of +msgid "A dataclass holding metadata associated with the current message." msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 -#: flwr.server.app.run_fleet_api:1 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "Run Flower server (Fleet API)." -msgstr "flower-fleet-api" +msgid ":py:obj:`Metrics `\\" +msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.rst:26::1 -msgid ":py:obj:`run_server_app `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`MetricsRecord `\\ " +"\\(\\[metrics\\_dict\\, keep\\_input\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 -#: flwr.server.run_serverapp.run_server_app:1 of +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.metricsrecord.MetricsRecord:1 of #, fuzzy -msgid "Run Flower server app." -msgstr "Serveur de Flower" +msgid "Metrics recod." +msgstr "Paramètres du modèle." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`NDArray `\\" +msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 -msgid ":py:obj:`run_superlink `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " +":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 -#: flwr.server.app.run_superlink:1 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "Run Flower SuperLink (Driver API and Fleet API)." -msgstr "flower-fleet-api" +msgid ":py:obj:`NDArrays `\\" +msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.common.rst:68::1 msgid "" -":py:obj:`start_server `\\ \\(\\*\\[\\, " -"server\\_address\\, server\\, ...\\]\\)" +"alias of :py:class:`list`\\ [:py:class:`~numpy.ndarray`\\ " +"[:py:obj:`~typing.Any`, :py:class:`~numpy.dtype`\\ " +"[:py:obj:`~typing.Any`]]]" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 -#: flwr.server.app.start_server:1 of -msgid "Start a Flower server using the gRPC transport layer." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`Parameters `\\ \\(tensors\\, " +"tensor\\_type\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:40::1 -msgid ":py:obj:`ClientManager `\\ \\(\\)" -msgstr "" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.Parameters:1 of +msgid "Model parameters." +msgstr "Paramètres du modèle." -#: ../../source/ref-api/flwr.server.rst:40::1 -#: flwr.server.client_manager.ClientManager:1 of -msgid "Abstract base class for managing Flower clients." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`ParametersRecord `\\ " +"\\(\\[array\\_dict\\, keep\\_input\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:40::1 +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.parametersrecord.ParametersRecord:1 of #, fuzzy -msgid ":py:obj:`Driver `\\ \\(\\)" +msgid "Parameters record." +msgstr "Paramètres du modèle." + +#: ../../source/ref-api/flwr.common.rst:68::1 +#, fuzzy +msgid ":py:obj:`Properties `\\" msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.rst:40::1 -#: flwr.server.driver.driver.Driver:1 of -msgid "Abstract base Driver class for the Driver API." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`ReconnectIns `\\ \\(seconds\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:40::1 -msgid ":py:obj:`History `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.ReconnectIns:1 of +msgid "ReconnectIns message from server to client." +msgstr "Message de reconnexion du serveur au client." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`RecordSet `\\ " +"\\(\\[parameters\\_records\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:40::1 -#: flwr.server.history.History:1 of -msgid "History class for training and/or evaluation metrics collection." +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.recordset.RecordSet:1 of +msgid "RecordSet stores groups of parameters, metrics and configs." msgstr "" -#: ../../source/ref-api/flwr.server.rst:40::1 +#: ../../source/ref-api/flwr.common.rst:68::1 msgid "" -":py:obj:`LegacyContext `\\ \\(state\\[\\, " -"config\\, strategy\\, ...\\]\\)" +":py:obj:`ServerMessage `\\ " +"\\(\\[get\\_properties\\_ins\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:40::1 -#: flwr.server.compat.legacy_context.LegacyContext:1 of -msgid "Legacy Context." +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.ServerMessage:1 of +msgid "ServerMessage is a container used to hold one instruction message." msgstr "" +"ServerMessage est un conteneur utilisé pour contenir un message " +"d'instruction." -#: ../../source/ref-api/flwr.server.rst:40::1 -msgid "" -":py:obj:`Server `\\ \\(\\*\\, client\\_manager\\[\\, " -"strategy\\]\\)" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`Status `\\ \\(code\\, message\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:40::1 -#, fuzzy -msgid "" -":py:obj:`ServerApp `\\ \\(\\[server\\, config\\, " -"strategy\\, ...\\]\\)" -msgstr "serveur.stratégie.Stratégie" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.Status:1 of +msgid "Client status." +msgstr "Statut du client." -#: ../../source/ref-api/flwr.server.rst:40::1 -#: flwr.server.server_app.ServerApp:1 of -#, fuzzy -msgid "Flower ServerApp." -msgstr "Serveur de Flower" +#: ../../source/ref-api/flwr.common.Array.rst:2 +msgid "Array" +msgstr "" -#: ../../source/ref-api/flwr.server.rst:40::1 -#, fuzzy +#: flwr.common.record.parametersrecord.Array:3 of msgid "" -":py:obj:`ServerConfig `\\ \\(\\[num\\_rounds\\," -" round\\_timeout\\]\\)" +"A dataclass containing serialized data from an array-like or tensor-like " +"object along with some metadata about it." msgstr "" -"Flower 1.0 : ``start_server(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" -#: ../../source/ref-api/flwr.server.rst:40::1 -#: flwr.server.server_config.ServerConfig:1 of -#, fuzzy -msgid "Flower server config." -msgstr "Serveur de Flower" +#: flwr.common.record.parametersrecord.Array:6 of +msgid "" +"A string representing the data type of the serialised object (e.g. " +"`np.float32`)" +msgstr "" -#: ../../source/ref-api/flwr.server.rst:40::1 -msgid ":py:obj:`SimpleClientManager `\\ \\(\\)" +#: flwr.common.record.parametersrecord.Array:8 of +msgid "" +"A list representing the shape of the unserialized array-like object. This" +" is used to deserialize the data (depending on the serialization method) " +"or simply as a metadata field." msgstr "" -#: ../../source/ref-api/flwr.server.rst:40::1 -#: flwr.server.client_manager.SimpleClientManager:1 of -msgid "Provides a pool of available clients." +#: flwr.common.record.parametersrecord.Array:12 of +msgid "" +"A string indicating the type of serialisation mechanism used to generate " +"the bytes in `data` from an array-like or tensor-like object." +msgstr "" + +#: flwr.common.record.parametersrecord.Array:15 of +msgid "A buffer of bytes containing the data." msgstr "" -#: ../../source/ref-api/flwr.server.rst:59::1 +#: ../../source/ref-api/flwr.common.Array.rst:26::1 #, fuzzy -msgid ":py:obj:`flwr.server.strategy `\\" +msgid ":py:obj:`numpy `\\ \\(\\)" msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.rst:59::1 -#: flwr.server.strategy:1 of -msgid "Contains the strategy abstraction and different implementations." +#: ../../source/ref-api/flwr.common.Array.rst:26::1 +#: flwr.common.record.parametersrecord.Array.numpy:1 of +#, fuzzy +msgid "Return the array as a NumPy array." +msgstr "renvoie le poids du modèle sous la forme d'une liste de ndarrays NumPy" + +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`dtype `\\" msgstr "" -#: ../../source/ref-api/flwr.server.rst:59::1 +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of #, fuzzy -msgid ":py:obj:`flwr.server.workflow `\\" +msgid ":py:obj:`shape `\\" msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.rst:59::1 -#: flwr.server.workflow:1 of +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of #, fuzzy -msgid "Workflows." -msgstr "Flux de travail" +msgid ":py:obj:`stype `\\" +msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.ClientManager.rst:2 +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`data `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.ClientMessage.rst:2 #, fuzzy -msgid "ClientManager" -msgstr "client" +msgid "ClientMessage" +msgstr "Côté client" -#: flwr.server.client_manager.ClientManager.all:1::1 of -msgid ":py:obj:`all `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +msgid ":py:obj:`evaluate_res `\\" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1 -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.all:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of -msgid "Return all available clients." +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +msgid ":py:obj:`fit_res `\\" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of -msgid ":py:obj:`num_available `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +msgid "" +":py:obj:`get_parameters_res " +"`\\" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.num_available:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.num_available:1 of -msgid "Return the number of available clients." +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +msgid "" +":py:obj:`get_properties_res " +"`\\" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of -msgid ":py:obj:`register `\\ \\(client\\)" +#: ../../source/ref-api/flwr.common.Code.rst:2 +msgid "Code" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.register:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.register:1 of -msgid "Register Flower ClientProxy instance." +#: flwr.common.typing.Code:1 of +msgid "Bases: :py:class:`~enum.Enum`" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid ":py:obj:`OK `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.Code.rst:26::1 msgid "" -":py:obj:`sample `\\ " -"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" +":py:obj:`GET_PROPERTIES_NOT_IMPLEMENTED " +"`\\" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.sample:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.sample:1 of -msgid "Sample a number of Flower ClientProxy instances." +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid "" +":py:obj:`GET_PARAMETERS_NOT_IMPLEMENTED " +"`\\" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of -msgid ":py:obj:`unregister `\\ \\(client\\)" +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid ":py:obj:`FIT_NOT_IMPLEMENTED `\\" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.unregister:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.unregister:1 of -msgid "Unregister Flower ClientProxy instance." +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid "" +":py:obj:`EVALUATE_NOT_IMPLEMENTED " +"`\\" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of +#: ../../source/ref-api/flwr.common.Config.rst:2 +#, fuzzy +msgid "Config" +msgstr "Configurer les clients" + +#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:2 +#, fuzzy +msgid "ConfigsRecord" +msgstr "Configurer les clients" + +#: flwr.common.record.configsrecord.ConfigsRecord:1 of msgid "" -":py:obj:`wait_for `\\ " -"\\(num\\_clients\\, timeout\\)" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`int` | :py:class:`float` | :py:class:`str` |" +" :py:class:`bytes` | :py:class:`bool` | :py:class:`list`\\ " +"[:py:class:`int`] | :py:class:`list`\\ [:py:class:`float`] | " +":py:class:`list`\\ [:py:class:`str`] | :py:class:`list`\\ " +"[:py:class:`bytes`] | :py:class:`list`\\ [:py:class:`bool`]]" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.wait_for:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.wait_for:1 of -msgid "Wait until at least `num_clients` are available." +#: flwr.common.record.configsrecord.ConfigsRecord:3 of +msgid "" +"A :code:`ConfigsRecord` is a Python dictionary designed to ensure that " +"each key-value pair adheres to specified data types. A " +":code:`ConfigsRecord` is one of the types of records that a " +"`flwr.common.RecordSet `_ supports " +"and can therefore be used to construct :code:`common.Message` objects." msgstr "" -#: flwr.server.client_manager.ClientManager.num_available:3 -#: flwr.server.client_manager.SimpleClientManager.num_available:3 of -msgid "**num_available** -- The number of currently available clients." +#: flwr.common.record.configsrecord.ConfigsRecord:9 of +msgid "" +"A dictionary that stores basic types (i.e. `str`, `int`, `float`, `bytes`" +" as defined in `ConfigsScalar`) and lists of such types (see " +"`ConfigsScalarList`)." msgstr "" -#: flwr.server.client_manager.ClientManager.register:6 -#: flwr.server.client_manager.SimpleClientManager.register:6 of +#: flwr.common.record.configsrecord.ConfigsRecord:13 of msgid "" -"**success** -- Indicating if registration was successful. False if " -"ClientProxy is already registered or can not be registered for any " -"reason." +"A boolean indicating whether config passed should be deleted from the " +"input dictionary immediately after adding them to the record. When set to" +" True, the data is duplicated in memory. If memory is a concern, set it " +"to False." msgstr "" -#: flwr.server.client_manager.ClientManager.unregister:3 -#: flwr.server.client_manager.SimpleClientManager.unregister:3 of -msgid "This method is idempotent." -msgstr "" - -#: ../../source/ref-api/flwr.server.Driver.rst:2 -#, fuzzy -msgid "Driver" -msgstr "serveur" - -#: flwr.server.driver.driver.Driver.create_message:1::1 of -#, fuzzy +#: flwr.common.record.configsrecord.ConfigsRecord:21 of msgid "" -":py:obj:`create_message `\\ " -"\\(content\\, message\\_type\\, ...\\[\\, ttl\\]\\)" -msgstr "" -"Flower 1.0 : ``start_server(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" - -#: flwr.server.driver.driver.Driver.create_message:1 -#: flwr.server.driver.driver.Driver.create_message:1::1 of -msgid "Create a new message with specified parameters." -msgstr "" - -#: flwr.server.driver.driver.Driver.create_message:1::1 of -msgid ":py:obj:`get_node_ids `\\ \\(\\)" +"The usage of a :code:`ConfigsRecord` is envisioned for sending " +"configuration values telling the target node how to perform a certain " +"action (e.g. train/evaluate a model ). You can use standard Python built-" +"in types such as :code:`float`, :code:`str` , :code:`bytes`. All types " +"allowed are defined in :code:`flwr.common.ConfigsRecordValues`. While " +"lists are supported, we encourage you to use a :code:`ParametersRecord` " +"instead if these are of high dimensionality." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.get_node_ids:1 of -msgid "Get node IDs." +#: flwr.common.record.configsrecord.ConfigsRecord:29 of +msgid "" +"Let's see some examples of how to construct a :code:`ConfigsRecord` from " +"scratch:" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 of +#: flwr.common.record.configsrecord.ConfigsRecord:42 of msgid "" -":py:obj:`pull_messages `\\ " -"\\(message\\_ids\\)" +"Just like the other types of records in a :code:`flwr.common.RecordSet`, " +"types are enforced. If you need to add a custom data structure or object," +" we recommend to serialise it into bytes and save it as such (bytes are " +"allowed in a :code:`ConfigsRecord`)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.pull_messages:1 of -msgid "Pull messages based on message IDs." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 of -msgid "" -":py:obj:`push_messages `\\ " -"\\(messages\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.push_messages:1 of -msgid "Push messages to specified node IDs." +#: collections.abc.MutableMapping.clear:1::1 +#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:1 +#: flwr.common.record.metricsrecord.MetricsRecord.count_bytes:1 +#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:1 of +msgid "Return number of Bytes stored in this object." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "" -":py:obj:`send_and_receive `\\ " -"\\(messages\\, \\*\\[\\, timeout\\]\\)" +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" msgstr "" "Flower 1.0 : ``start_server(..., " "config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " "...)``" -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.send_and_receive:1 of -msgid "Push messages to specified node IDs and pull the reply messages." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:3 of -msgid "" -"This method constructs a new `Message` with given content and metadata. " -"The `run_id` and `src_node_id` will be set automatically." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:6 of -msgid "" -"The content for the new message. This holds records that are to be sent " -"to the destination node." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:9 of -msgid "" -"The type of the message, defining the action to be executed on the " -"receiving end." +#: collections.abc.MutableMapping.clear:1::1 +#: collections.abc.MutableMapping.pop:1 of +msgid "If key is not found, d is returned if given, otherwise KeyError is raised." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:12 of -msgid "The ID of the destination node to which the message is being sent." -msgstr "" +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`popitem `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.driver.driver.Driver.create_message:14 of -msgid "" -"The ID of the group to which this message is associated. In some " -"settings, this is used as the FL round." +#: collections.abc.MutableMapping.clear:1::1 +#: collections.abc.MutableMapping.popitem:1 of +msgid "as a 2-tuple; but raise KeyError if D is empty." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:17 of +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy msgid "" -"Time-to-live for the round trip of this message, i.e., the time from " -"sending this message to receiving a reply. It specifies in seconds the " -"duration for which the message and its potential reply are considered " -"valid. If unset, the default TTL (i.e., `common.DEFAULT_TTL`) will be " -"used." -msgstr "" +":py:obj:`setdefault `\\ " +"\\(k\\[\\,d\\]\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.driver.driver.Driver.create_message:23 of +#: collections.abc.MutableMapping.clear:1::1 of msgid "" -"**message** -- A new `Message` instance with the specified content and " -"metadata." +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" msgstr "" -#: flwr.server.driver.driver.Driver.pull_messages:3 of +#: collections.abc.MutableMapping.clear:1::1 +#: collections.abc.MutableMapping.update:1 of msgid "" -"This method is used to collect messages from the SuperLink that " -"correspond to a set of given message IDs." +"If E present and has a .keys() method, does: for k in E: D[k] = E[k] " +"If E present and lacks .keys() method, does: for (k, v) in E: D[k] = " +"v In either case, this is followed by: for k, v in F.items(): D[k] = v" msgstr "" -#: flwr.server.driver.driver.Driver.pull_messages:6 of -msgid "An iterable of message IDs for which reply messages are to be retrieved." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" msgstr "" -#: flwr.server.driver.driver.Driver.pull_messages:9 of -msgid "**messages** -- An iterable of messages received." +#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:3 of +msgid "This function counts booleans as occupying 1 Byte." msgstr "" -#: flwr.server.driver.driver.Driver.push_messages:3 of -msgid "" -"This method takes an iterable of messages and sends each message to the " -"node specified in `dst_node_id`." +#: ../../source/ref-api/flwr.common.Context.rst:2 +msgid "Context" msgstr "" -#: flwr.server.driver.driver.Driver.push_messages:6 -#: flwr.server.driver.driver.Driver.send_and_receive:7 of -msgid "An iterable of messages to be sent." +#: flwr.common.context.Context:3 of +msgid "The ID that identifies the node." msgstr "" -#: flwr.server.driver.driver.Driver.push_messages:9 of +#: flwr.common.context.Context:5 of msgid "" -"**message_ids** -- An iterable of IDs for the messages that were sent, " -"which can be used to pull replies." +"A config (key/value mapping) unique to the node and independent of the " +"`run_config`. This config persists across all runs this node participates" +" in." msgstr "" -#: flwr.server.driver.driver.Driver.send_and_receive:3 of +#: flwr.common.context.Context:8 of msgid "" -"This method sends a list of messages to their destination node IDs and " -"then waits for the replies. It continues to pull replies until either all" -" replies are received or the specified timeout duration is exceeded." +"Holds records added by the entity in a given run and that will stay " +"local. This means that the data it holds will never leave the system it's" +" running from. This can be used as an intermediate storage or scratchpad " +"when executing mods. It can also be used as a memory to access at " +"different points during the lifecycle of this entity (e.g. across " +"multiple rounds)" msgstr "" -#: flwr.server.driver.driver.Driver.send_and_receive:9 of +#: flwr.common.context.Context:15 of msgid "" -"The timeout duration in seconds. If specified, the method will wait for " -"replies for this duration. If `None`, there is no time limit and the " -"method will wait until replies for all messages are received." +"A config (key/value mapping) held by the entity in a given run and that " +"will stay local. It can be used at any point during the lifecycle of this" +" entity (e.g. across multiple rounds)" msgstr "" -#: flwr.server.driver.driver.Driver.send_and_receive:14 of -msgid "**replies** -- An iterable of reply messages received from the SuperLink." -msgstr "" +#: ../../source/ref-api/flwr.common.Context.rst:31::1 +#, fuzzy +msgid ":py:obj:`node_id `\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.driver.driver.Driver.send_and_receive:18 -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:53 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:60 -#: of +#: ../../source/ref-api/flwr.common.Context.rst:31::1 #, fuzzy -msgid "Notes" -msgstr "Aucun" +msgid ":py:obj:`node_config `\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.driver.driver.Driver.send_and_receive:19 of -msgid "" -"This method uses `push_messages` to send the messages and `pull_messages`" -" to collect the replies. If `timeout` is set, the method may not return " -"replies for all sent messages. A message remains valid until its TTL, " -"which is not affected by `timeout`." +#: ../../source/ref-api/flwr.common.Context.rst:31::1 +#, fuzzy +msgid ":py:obj:`state `\\" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.common.Context.rst:31::1 +#, fuzzy +msgid ":py:obj:`run_config `\\" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:2 +msgid "DisconnectRes" msgstr "" -#: ../../source/ref-api/flwr.server.History.rst:2 -msgid "History" +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:28::1 +msgid ":py:obj:`reason `\\" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of -msgid "" -":py:obj:`add_loss_centralized " -"`\\ \\(server\\_round\\, " -"loss\\)" +#: ../../source/ref-api/flwr.common.Error.rst:2 +msgid "Error" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1 -#: flwr.server.history.History.add_loss_centralized:1::1 of -#, fuzzy -msgid "Add one loss entry (from centralized evaluation)." -msgstr "Évaluation centralisée" +#: flwr.common.message.Error:3 of +msgid "An identifier for the error." +msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of -msgid "" -":py:obj:`add_loss_distributed " -"`\\ \\(server\\_round\\, " -"loss\\)" +#: flwr.common.message.Error:5 of +msgid "A reason for why the error arose (e.g. an exception stack-trace)" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_loss_distributed:1 of -msgid "Add one loss entry (from distributed evaluation)." +#: flwr.common.Error.code:1::1 of +msgid ":py:obj:`code `\\" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of -msgid "" -":py:obj:`add_metrics_centralized " -"`\\ \\(server\\_round\\, " -"metrics\\)" +#: flwr.common.Error.code:1 flwr.common.Error.code:1::1 of +msgid "Error code." msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_metrics_centralized:1 of -#, fuzzy -msgid "Add metrics entries (from centralized evaluation)." -msgstr "Évaluation centralisée" +#: flwr.common.Error.code:1::1 of +msgid ":py:obj:`reason `\\" +msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of -msgid "" -":py:obj:`add_metrics_distributed " -"`\\ \\(server\\_round\\, " -"metrics\\)" +#: flwr.common.Error.code:1::1 flwr.common.Error.reason:1 of +msgid "Reason reported about the error." msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_metrics_distributed:1 of -msgid "Add metrics entries (from distributed evaluation)." +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:2 +#, fuzzy +msgid "EvaluateIns" +msgstr "Explications" + +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 +msgid ":py:obj:`parameters `\\" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of -msgid "" -":py:obj:`add_metrics_distributed_fit " -"`\\ \\(server\\_round\\," -" ...\\)" +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 +msgid ":py:obj:`config `\\" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_metrics_distributed_fit:1 of -msgid "Add metrics entries (from distributed fit)." +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:2 +msgid "EvaluateRes" msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:2 -msgid "LegacyContext" +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`status `\\" msgstr "" -#: flwr.server.compat.legacy_context.LegacyContext:1 of -msgid "Bases: :py:class:`~flwr.common.context.Context`" +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`loss `\\" msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 -#, fuzzy -msgid ":py:obj:`config `\\" -msgstr "serveur.stratégie.Stratégie" +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`num_examples `\\" +msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 -#, fuzzy -msgid ":py:obj:`strategy `\\" -msgstr "serveur.stratégie.Stratégie" +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`metrics `\\" +msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 -msgid ":py:obj:`client_manager `\\" +#: ../../source/ref-api/flwr.common.EventType.rst:2 +msgid "EventType" msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 -#, fuzzy -msgid ":py:obj:`history `\\" -msgstr "serveur.stratégie.Stratégie" +#: flwr.common.telemetry.EventType:1 of +msgid "Bases: :py:class:`str`, :py:class:`~enum.Enum`" +msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid ":py:obj:`state `\\" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.server.Server.rst:2 -msgid "Server" -msgstr "Serveur" - -#: flwr.server.server.Server.client_manager:1::1 of -msgid ":py:obj:`client_manager `\\ \\(\\)" -msgstr "" - -#: flwr.server.server.Server.client_manager:1 -#: flwr.server.server.Server.client_manager:1::1 of -msgid "Return ClientManager." -msgstr "" - -#: flwr.server.server.Server.client_manager:1::1 of msgid "" -":py:obj:`disconnect_all_clients " -"`\\ \\(timeout\\)" +":py:obj:`encode `\\ \\(\\[encoding\\, " +"errors\\]\\)" msgstr "" +"Flower 1.0 : ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.disconnect_all_clients:1 of -msgid "Send shutdown signal to all clients." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.encode:1 of +msgid "Encode the string using the codec registered for encoding." msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -":py:obj:`evaluate_round `\\ " -"\\(server\\_round\\, timeout\\)" -msgstr "" - -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.evaluate_round:1 of -msgid "Validate current global model on a number of clients." -msgstr "" - -#: flwr.server.server.Server.client_manager:1::1 of -msgid ":py:obj:`fit `\\ \\(num\\_rounds\\, timeout\\)" +":py:obj:`replace `\\ \\(old\\, new\\[\\, " +"count\\]\\)" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.fit:1 of -msgid "Run federated averaging for a number of rounds." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.replace:1 of +msgid "Return a copy with all occurrences of substring old replaced by new." msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy msgid "" -":py:obj:`fit_round `\\ \\(server\\_round\\," -" timeout\\)" -msgstr "" +":py:obj:`split `\\ \\(\\[sep\\, " +"maxsplit\\]\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.fit_round:1 of -msgid "Perform a single round of federated averaging." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.rsplit:1 flwr.common.EventType.split:1 of +msgid "" +"Return a list of the substrings in the string, using sep as the separator" +" string." msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -":py:obj:`set_max_workers `\\ " -"\\(max\\_workers\\)" +":py:obj:`rsplit `\\ \\(\\[sep\\, " +"maxsplit\\]\\)" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.set_max_workers:1 of -msgid "Set the max_workers used by ThreadPoolExecutor." -msgstr "" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`join `\\ \\(iterable\\, \\/\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.server.Server.client_manager:1::1 of -msgid ":py:obj:`set_strategy `\\ \\(strategy\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.join:1 of +msgid "Concatenate any number of strings." msgstr "" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.set_strategy:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Replace server strategy." -msgstr "stratégie.du.serveur" +msgid ":py:obj:`capitalize `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.ServerApp.rst:2 -#, fuzzy -msgid "ServerApp" -msgstr "serveur" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.capitalize:1 of +msgid "Return a capitalized version of the string." +msgstr "" -#: flwr.server.server_app.ServerApp:5 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Use the `ServerApp` with an existing `Strategy`:" -msgstr "Utilise une stratégie existante" +msgid ":py:obj:`casefold `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.server_app.ServerApp:15 of -msgid "Use the `ServerApp` with a custom main function:" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.casefold:1 of +msgid "Return a version of the string suitable for caseless comparisons." msgstr "" -#: flwr.server.server_app.ServerApp.main:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid ":py:obj:`main `\\ \\(\\)" +msgid ":py:obj:`title `\\ \\(\\)" msgstr "serveur.stratégie.Stratégie" -#: flwr.server.server_app.ServerApp.main:1 -#: flwr.server.server_app.ServerApp.main:1::1 of -msgid "Return a decorator that registers the main fn with the server app." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.title:1 of +msgid "Return a version of the string where each word is titlecased." msgstr "" -#: ../../source/ref-api/flwr.server.ServerConfig.rst:2 -#, fuzzy -msgid "ServerConfig" -msgstr "serveur" - -#: flwr.server.server_config.ServerConfig:3 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -"All attributes have default values which allows users to configure just " -"the ones they care about." +":py:obj:`center `\\ \\(width\\[\\, " +"fillchar\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 -msgid ":py:obj:`num_rounds `\\" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.center:1 of +msgid "Return a centered string of length width." msgstr "" -#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 -msgid ":py:obj:`round_timeout `\\" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`count `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:2 -msgid "SimpleClientManager" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +"Return the number of non-overlapping occurrences of substring sub in " +"string S[start:end]." msgstr "" -#: flwr.server.client_manager.SimpleClientManager:1 of -msgid "Bases: :py:class:`~flwr.server.client_manager.ClientManager`" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`expandtabs `\\ " +"\\(\\[tabsize\\]\\)" msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of -msgid ":py:obj:`all `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.expandtabs:1 of +msgid "Return a copy where all tab characters are expanded using spaces." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -":py:obj:`num_available `\\" -" \\(\\)" +":py:obj:`find `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -":py:obj:`register `\\ " -"\\(client\\)" +"Return the lowest index in S where substring sub is found, such that sub " +"is contained within S[start:end]." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of -msgid "" -":py:obj:`sample `\\ " -"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" -msgstr "" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`partition `\\ \\(sep\\, \\/\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of -msgid "" -":py:obj:`unregister `\\ " -"\\(client\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.partition:1 flwr.common.EventType.rpartition:1 of +msgid "Partition the string into three parts using the given separator." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -":py:obj:`wait_for `\\ " -"\\(num\\_clients\\[\\, timeout\\]\\)" +":py:obj:`index `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" msgstr "" -#: flwr.server.client_manager.SimpleClientManager.wait_for:3 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -"Blocks until the requested number of clients is available or until a " -"timeout is reached. Current timeout default: 1 day." -msgstr "" - -#: flwr.server.client_manager.SimpleClientManager.wait_for:6 of -msgid "The number of clients to wait for." -msgstr "" - -#: flwr.server.client_manager.SimpleClientManager.wait_for:8 of -msgid "The time in seconds to wait for, defaults to 86400 (24h)." +":py:obj:`ljust `\\ \\(width\\[\\, " +"fillchar\\]\\)" msgstr "" -#: flwr.server.client_manager.SimpleClientManager.wait_for:11 of -msgid "**success**" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.ljust:1 of +msgid "Return a left-justified string of length width." msgstr "" -#: ../../source/ref-api/flwr.server.run_driver_api.rst:2 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "run\\_driver\\_api" -msgstr "flower-driver-api" - -#: ../../source/ref-api/flwr.server.run_fleet_api.rst:2 -msgid "run\\_fleet\\_api" -msgstr "" +msgid ":py:obj:`lower `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.run_server_app.rst:2 -msgid "run\\_server\\_app" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.lower:1 of +msgid "Return a copy of the string converted to lowercase." msgstr "" -#: ../../source/ref-api/flwr.server.run_superlink.rst:2 -#, fuzzy -msgid "run\\_superlink" -msgstr "flower-superlink" - -#: ../../source/ref-api/flwr.server.start_server.rst:2 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "start\\_server" -msgstr "serveur.start_server" +msgid ":py:obj:`lstrip `\\ \\(\\[chars\\]\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.app.start_server:3 of -msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.lstrip:1 of +msgid "Return a copy of the string with leading whitespace removed." msgstr "" -#: flwr.server.app.start_server:5 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -"A server implementation, either `flwr.server.Server` or a subclass " -"thereof. If no instance is provided, then `start_server` will create one." +":py:obj:`rfind `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" msgstr "" -#: flwr.server.app.start_server:9 flwr.simulation.app.start_simulation:28 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -"Currently supported values are `num_rounds` (int, default: 1) and " -"`round_timeout` in seconds (float, default: None)." +"Return the highest index in S where substring sub is found, such that sub" +" is contained within S[start:end]." msgstr "" -#: flwr.server.app.start_server:12 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -"An implementation of the abstract base class " -"`flwr.server.strategy.Strategy`. If no strategy is provided, then " -"`start_server` will use `flwr.server.strategy.FedAvg`." +":py:obj:`rindex `\\ \\(sub\\[\\, " +"start\\[\\, end\\]\\]\\)" msgstr "" -#: flwr.server.app.start_server:16 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -"An implementation of the abstract base class `flwr.server.ClientManager`." -" If no implementation is provided, then `start_server` will use " -"`flwr.server.client_manager.SimpleClientManager`." +":py:obj:`rjust `\\ \\(width\\[\\, " +"fillchar\\]\\)" msgstr "" -#: flwr.server.app.start_server:21 of -msgid "" -"The maximum length of gRPC messages that can be exchanged with the Flower" -" clients. The default should be sufficient for most models. Users who " -"train very large models might need to increase this value. Note that the " -"Flower clients need to be started with the same value (see " -"`flwr.client.start_client`), otherwise clients will not know about the " -"increased limit and block larger messages." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.rjust:1 of +msgid "Return a right-justified string of length width." msgstr "" -#: flwr.server.app.start_server:28 of -msgid "" -"Tuple containing root certificate, server certificate, and private key to" -" start a secure SSL-enabled server. The tuple is expected to have three " -"bytes elements in the following order: * CA certificate. * " -"server certificate. * server private key." -msgstr "" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`rstrip `\\ \\(\\[chars\\]\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.app.start_server:28 of -msgid "" -"Tuple containing root certificate, server certificate, and private key to" -" start a secure SSL-enabled server. The tuple is expected to have three " -"bytes elements in the following order:" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.rstrip:1 of +msgid "Return a copy of the string with trailing whitespace removed." msgstr "" -#: flwr.server.app.start_server:32 of -#, fuzzy -msgid "CA certificate." -msgstr "Certificats" - -#: flwr.server.app.start_server:33 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "server certificate." -msgstr "Certificats" +msgid ":py:obj:`rpartition `\\ \\(sep\\, \\/\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.app.start_server:34 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "server private key." -msgstr "stratégie.du.serveur" +msgid "" +":py:obj:`splitlines `\\ " +"\\(\\[keepends\\]\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.app.start_server:37 of -msgid "**hist** -- Object containing training and evaluation metrics." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.splitlines:1 of +msgid "Return a list of the lines in the string, breaking at line boundaries." msgstr "" -#: flwr.server.app.start_server:42 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Starting an insecure server:" -msgstr "Démarrer le serveur" +msgid ":py:obj:`strip `\\ \\(\\[chars\\]\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.app.start_server:46 of -#, fuzzy -msgid "Starting an SSL-enabled server:" -msgstr "Démarrer le serveur" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.strip:1 of +msgid "Return a copy of the string with leading and trailing whitespace removed." +msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:2 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "strategy" -msgstr "stratégie.du.serveur" +msgid ":py:obj:`swapcase `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.swapcase:1 of msgid "" -":py:obj:`Bulyan `\\ \\(\\*\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\)" +"Convert uppercase characters to lowercase and lowercase characters to " +"uppercase." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.bulyan.Bulyan:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Bulyan strategy." -msgstr "Stratégies intégrées" - -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`DPFedAvgAdaptive `\\ " -"\\(strategy\\, num\\_sampled\\_clients\\)" -msgstr "" +msgid ":py:obj:`translate `\\ \\(table\\, \\/\\)" +msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of -msgid "Wrapper for configuring a Strategy for DP with Adaptive Clipping." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.translate:1 of +msgid "Replace each character in the string using the given translation table." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`DPFedAvgFixed `\\ " -"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" -msgstr "" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`upper `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 of -msgid "Wrapper for configuring a Strategy for DP with Fixed Clipping." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.upper:1 of +msgid "Return a copy of the string converted to uppercase." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -":py:obj:`DifferentialPrivacyClientSideAdaptiveClipping " -"`\\ " -"\\(...\\)" +":py:obj:`startswith `\\ \\(prefix\\[\\," +" start\\[\\, end\\]\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 -#: of -msgid "Strategy wrapper for central DP with client-side adaptive clipping." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "Return True if S starts with the specified prefix, False otherwise." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -":py:obj:`DifferentialPrivacyServerSideAdaptiveClipping " -"`\\ " -"\\(...\\)" +":py:obj:`endswith `\\ \\(suffix\\[\\, " +"start\\[\\, end\\]\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 -#: of -msgid "Strategy wrapper for central DP with server-side adaptive clipping." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "Return True if S ends with the specified suffix, False otherwise." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -":py:obj:`DifferentialPrivacyClientSideFixedClipping " -"`\\ " -"\\(...\\)" +":py:obj:`removeprefix `\\ " +"\\(prefix\\, \\/\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 -#: of -msgid "Strategy wrapper for central DP with client-side fixed clipping." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.removeprefix:1 of +msgid "Return a str with the given prefix string removed if present." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -":py:obj:`DifferentialPrivacyServerSideFixedClipping " -"`\\ " -"\\(...\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 -#: of -msgid "Strategy wrapper for central DP with server-side fixed clipping." +":py:obj:`removesuffix `\\ " +"\\(suffix\\, \\/\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedAdagrad `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.removesuffix:1 of +msgid "Return a str with the given suffix string removed if present." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedadagrad.FedAdagrad:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "FedAdagrad strategy - Adaptive Federated Optimization using Adagrad." -msgstr "" -"`FedAdam` et `FedAdam` correspondent à la dernière version de l'article " -"sur l'optimisation fédérée adaptative." +msgid ":py:obj:`isascii `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedAdam `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isascii:1 of +msgid "Return True if all characters in the string are ASCII, False otherwise." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedadam.FedAdam:1 of -msgid "FedAdam - Adaptive Federated Optimization using Adam." -msgstr "" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`islower `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedAvg `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.islower:1 of +msgid "Return True if the string is a lowercase string, False otherwise." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedavg.FedAvg:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Federated Averaging strategy." -msgstr "Stratégie de moyenne fédérée." +msgid ":py:obj:`isupper `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedAvgAndroid `\\ " -"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isupper:1 of +msgid "Return True if the string is an uppercase string, False otherwise." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedAvgM `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`istitle `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.istitle:1 of +msgid "Return True if the string is a title-cased string, False otherwise." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedavgm.FedAvgM:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Federated Averaging with Momentum strategy." -msgstr "Stratégie de moyenne fédérée." +msgid ":py:obj:`isspace `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedMedian `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isspace:1 of +msgid "Return True if the string is a whitespace string, False otherwise." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedmedian.FedMedian:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Configurable FedMedian strategy implementation." -msgstr "Configuration de l'évaluation fédérée" +msgid ":py:obj:`isdecimal `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedOpt `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isdecimal:1 of +msgid "Return True if the string is a decimal string, False otherwise." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedopt.FedOpt:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Federated Optim strategy." -msgstr "Stratégie de moyenne fédérée." +msgid ":py:obj:`isdigit `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedProx `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isdigit:1 of +msgid "Return True if the string is a digit string, False otherwise." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedprox.FedProx:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Federated Optimization strategy." -msgstr "Stratégie de moyenne fédérée." +msgid ":py:obj:`isnumeric `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedTrimmedAvg `\\ " -"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isnumeric:1 of +msgid "Return True if the string is a numeric string, False otherwise." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 of -msgid "Federated Averaging with Trimmed Mean [Dong Yin, et al., 2021]." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`isalpha `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isalpha:1 of +msgid "Return True if the string is an alphabetic string, False otherwise." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedXgbBagging `\\ " -"\\(\\[evaluate\\_function\\]\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`isalnum `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isalnum:1 of +msgid "Return True if the string is an alpha-numeric string, False otherwise." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 of -msgid "Configurable FedXgbBagging strategy implementation." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`isidentifier `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isidentifier:1 of +msgid "Return True if the string is a valid Python identifier, False otherwise." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedXgbCyclic `\\ " -"\\(\\*\\*kwargs\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`isprintable `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isprintable:1 of +msgid "Return True if the string is printable, False otherwise." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 of -msgid "Configurable FedXgbCyclic strategy implementation." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`zfill `\\ \\(width\\, \\/\\)" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.zfill:1 of +msgid "" +"Pad a numeric string with zeros on the left, to fill a field of the given" +" width." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " +":py:obj:`format `\\ \\(\\*args\\, " "\\*\\*kwargs\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 of -msgid "Configurable FedXgbNnAvg strategy implementation." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "Return a formatted version of S, using substitutions from args and kwargs." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedYogi `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`format_map `\\ \\(mapping\\)" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "Return a formatted version of S, using substitutions from mapping." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedyogi.FedYogi:1 of -msgid "FedYogi [Reddi et al., 2020] strategy." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`maketrans `\\" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.maketrans:1 of +msgid "Return a translation table usable for str.translate()." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FaultTolerantFedAvg " -"`\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`PING `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 of -msgid "Configurable fault-tolerant FedAvg strategy implementation." +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`START_CLIENT_ENTER `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`Krum `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`START_CLIENT_LEAVE `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.krum.Krum:1 of -msgid "Krum [Blanchard et al., 2017] strategy." +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`START_SERVER_ENTER `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`START_SERVER_LEAVE `\\" +msgstr "" + +#: flwr.common.EventType.capitalize:1::1 of msgid "" -":py:obj:`QFedAvg `\\ \\(\\*\\[\\, " -"q\\_param\\, qffl\\_learning\\_rate\\, ...\\]\\)" +":py:obj:`START_SIMULATION_ENTER " +"`\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.qfedavg.QFedAvg:1 of -msgid "Configurable QFedAvg strategy implementation." +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`START_SIMULATION_LEAVE " +"`\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy -msgid ":py:obj:`Strategy `\\ \\(\\)" +msgid "" +":py:obj:`RUN_SUPEREXEC_ENTER " +"`\\" msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.strategy.Strategy:1 of -msgid "Abstract base class for server strategy implementations." -msgstr "" +#: flwr.common.EventType.capitalize:1::1 of +#, fuzzy +msgid "" +":py:obj:`RUN_SUPEREXEC_LEAVE " +"`\\" +msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:2 -msgid "Bulyan" -msgstr "" +#: flwr.common.EventType.capitalize:1::1 of +#, fuzzy +msgid "" +":py:obj:`CLI_FLOWER_SIMULATION_ENTER " +"`\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.bulyan.Bulyan:1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 -#: flwr.server.strategy.fedavgm.FedAvgM:1 -#: flwr.server.strategy.fedmedian.FedMedian:1 -#: flwr.server.strategy.fedopt.FedOpt:1 flwr.server.strategy.fedprox.FedProx:1 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 -#: flwr.server.strategy.krum.Krum:1 flwr.server.strategy.qfedavg.QFedAvg:1 of -msgid "Bases: :py:class:`~flwr.server.strategy.fedavg.FedAvg`" -msgstr "" +#: flwr.common.EventType.capitalize:1::1 of +#, fuzzy +msgid "" +":py:obj:`CLI_FLOWER_SIMULATION_LEAVE " +"`\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.bulyan.Bulyan:3 of -msgid "Implementation based on https://arxiv.org/abs/1802.07927." -msgstr "" +#: flwr.common.EventType.capitalize:1::1 of +#, fuzzy +msgid "" +":py:obj:`PYTHON_API_RUN_SIMULATION_ENTER " +"`\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.bulyan.Bulyan:5 -#: flwr.server.strategy.fedadagrad.FedAdagrad:5 -#: flwr.server.strategy.fedadam.FedAdam:5 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:5 -#: flwr.server.strategy.fedavgm.FedAvgM:5 flwr.server.strategy.fedopt.FedOpt:5 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:5 -#: flwr.server.strategy.fedyogi.FedYogi:5 flwr.server.strategy.krum.Krum:5 of -msgid "Fraction of clients used during training. Defaults to 1.0." +#: flwr.common.EventType.capitalize:1::1 of +#, fuzzy +msgid "" +":py:obj:`PYTHON_API_RUN_SIMULATION_LEAVE " +"`\\" +msgstr "serveur.stratégie.Stratégie" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SUPERLINK_ENTER " +"`\\" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:7 -#: flwr.server.strategy.fedadagrad.FedAdagrad:7 -#: flwr.server.strategy.fedadam.FedAdam:7 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:7 -#: flwr.server.strategy.fedavgm.FedAvgM:7 flwr.server.strategy.fedopt.FedOpt:7 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:7 -#: flwr.server.strategy.fedyogi.FedYogi:7 flwr.server.strategy.krum.Krum:7 of -msgid "Fraction of clients used during validation. Defaults to 1.0." +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SUPERLINK_LEAVE " +"`\\" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:9 -#: flwr.server.strategy.fedadagrad.FedAdagrad:9 -#: flwr.server.strategy.fedadam.FedAdam:9 flwr.server.strategy.fedavg.FedAvg:13 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:9 -#: flwr.server.strategy.fedavgm.FedAvgM:9 flwr.server.strategy.fedopt.FedOpt:9 -#: flwr.server.strategy.fedprox.FedProx:45 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:9 -#: flwr.server.strategy.fedyogi.FedYogi:9 flwr.server.strategy.krum.Krum:9 of -msgid "Minimum number of clients used during training. Defaults to 2." +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SUPERNODE_ENTER " +"`\\" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:11 -#: flwr.server.strategy.fedadagrad.FedAdagrad:11 -#: flwr.server.strategy.fedadam.FedAdam:11 -#: flwr.server.strategy.fedavg.FedAvg:15 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:11 -#: flwr.server.strategy.fedavgm.FedAvgM:11 -#: flwr.server.strategy.fedopt.FedOpt:11 -#: flwr.server.strategy.fedprox.FedProx:47 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:11 -#: flwr.server.strategy.fedyogi.FedYogi:11 flwr.server.strategy.krum.Krum:11 of -msgid "Minimum number of clients used during validation. Defaults to 2." -msgstr "" +#: flwr.common.EventType.capitalize:1::1 of +#, fuzzy +msgid "" +":py:obj:`RUN_SUPERNODE_LEAVE " +"`\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.bulyan.Bulyan:13 -#: flwr.server.strategy.fedadagrad.FedAdagrad:13 -#: flwr.server.strategy.fedadam.FedAdam:13 -#: flwr.server.strategy.fedavg.FedAvg:17 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:13 -#: flwr.server.strategy.fedavgm.FedAvgM:13 -#: flwr.server.strategy.fedopt.FedOpt:13 -#: flwr.server.strategy.fedprox.FedProx:49 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:13 -#: flwr.server.strategy.fedyogi.FedYogi:13 flwr.server.strategy.krum.Krum:13 of -msgid "Minimum number of total clients in the system. Defaults to 2." +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SERVER_APP_ENTER " +"`\\" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:15 flwr.server.strategy.krum.Krum:15 of -msgid "Number of malicious clients in the system. Defaults to 0." +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SERVER_APP_LEAVE " +"`\\" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:17 -#: flwr.server.strategy.fedadagrad.FedAdagrad:15 -#: flwr.server.strategy.fedadam.FedAdam:15 -#: flwr.server.strategy.fedavg.FedAvg:19 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:15 -#: flwr.server.strategy.fedavgm.FedAvgM:15 -#: flwr.server.strategy.fedopt.FedOpt:15 -#: flwr.server.strategy.fedprox.FedProx:51 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:15 -#: flwr.server.strategy.fedyogi.FedYogi:17 -#: flwr.server.strategy.fedyogi.FedYogi:18 -#: flwr.server.strategy.fedyogi.FedYogi:19 flwr.server.strategy.krum.Krum:20 of -msgid "Optional function used for validation. Defaults to None." +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_CLIENT_APP_ENTER " +"`\\" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:19 -#: flwr.server.strategy.fedadagrad.FedAdagrad:17 -#: flwr.server.strategy.fedadam.FedAdam:17 -#: flwr.server.strategy.fedavg.FedAvg:21 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:17 -#: flwr.server.strategy.fedavgm.FedAvgM:17 -#: flwr.server.strategy.fedopt.FedOpt:17 -#: flwr.server.strategy.fedprox.FedProx:53 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:17 -#: flwr.server.strategy.fedyogi.FedYogi:20 flwr.server.strategy.krum.Krum:22 of -msgid "Function used to configure training. Defaults to None." +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_CLIENT_APP_LEAVE " +"`\\" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:21 -#: flwr.server.strategy.fedadagrad.FedAdagrad:19 -#: flwr.server.strategy.fedadam.FedAdam:19 -#: flwr.server.strategy.fedavg.FedAvg:23 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:19 -#: flwr.server.strategy.fedavgm.FedAvgM:19 -#: flwr.server.strategy.fedopt.FedOpt:19 -#: flwr.server.strategy.fedprox.FedProx:55 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:19 -#: flwr.server.strategy.fedyogi.FedYogi:22 flwr.server.strategy.krum.Krum:24 of -msgid "Function used to configure validation. Defaults to None." +#: flwr.common.EventType.capitalize:3 of +msgid "" +"More specifically, make the first character have upper case and the rest " +"lower case." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:23 -#: flwr.server.strategy.fedadagrad.FedAdagrad:25 -#: flwr.server.strategy.fedadam.FedAdam:21 -#: flwr.server.strategy.fedavg.FedAvg:25 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:21 -#: flwr.server.strategy.fedavgm.FedAvgM:21 -#: flwr.server.strategy.fedopt.FedOpt:21 -#: flwr.server.strategy.fedprox.FedProx:57 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:21 -#: flwr.server.strategy.fedyogi.FedYogi:24 flwr.server.strategy.krum.Krum:26 of -msgid "Whether or not accept rounds containing failures. Defaults to True." +#: flwr.common.EventType.center:3 flwr.common.EventType.ljust:3 +#: flwr.common.EventType.rjust:3 of +msgid "Padding is done using the specified fill character (default is a space)." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:25 -#: flwr.server.strategy.fedadagrad.FedAdagrad:27 -#: flwr.server.strategy.fedadam.FedAdam:23 -#: flwr.server.strategy.fedavg.FedAvg:27 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:24 -#: flwr.server.strategy.fedavgm.FedAvgM:23 -#: flwr.server.strategy.fedopt.FedOpt:23 -#: flwr.server.strategy.fedprox.FedProx:59 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:23 -#: flwr.server.strategy.fedyogi.FedYogi:26 flwr.server.strategy.krum.Krum:28 of -#, fuzzy -msgid "Initial global model parameters." -msgstr "Initialise le modèle global" - -#: flwr.server.strategy.bulyan.Bulyan:27 of +#: flwr.common.EventType.count:1 of msgid "" -"Byzantine resilient aggregation rule that is used as the first step of " -"the Bulyan (e.g., Krum)" +"Return the number of non-overlapping occurrences of substring sub in " +"string S[start:end]. Optional arguments start and end are interpreted as" +" in slice notation." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:29 of -msgid "arguments to the first_aggregation rule" +#: flwr.common.EventType.encode:3 of +msgid "encoding" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +#: flwr.common.EventType.encode:4 of +msgid "The encoding in which to encode the string." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -#, fuzzy -msgid "Aggregate evaluation losses using weighted average." -msgstr "Résultats globaux de l'évaluation." +#: flwr.common.EventType.encode:9 of +msgid "errors" +msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.EventType.encode:6 of msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"The error handling scheme to use for encoding errors. The default is " +"'strict' meaning that encoding errors raise a UnicodeEncodeError. Other " +"possible values are 'ignore', 'replace' and 'xmlcharrefreplace' as well " +"as any other name registered with codecs.register_error that can handle " +"UnicodeEncodeErrors." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan.aggregate_fit:1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -#, fuzzy -msgid "Aggregate fit results using Bulyan." -msgstr "Résultats globaux de l'évaluation." - -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.EventType.endswith:1 of msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +"Return True if S ends with the specified suffix, False otherwise. With " +"optional start, test S beginning at that position. With optional end, " +"stop comparing S at that position. suffix can also be a tuple of strings " +"to try." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_evaluate:1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.configure_evaluate:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_evaluate:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_evaluate:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.configure_evaluate:1 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:1 of -#, fuzzy -msgid "Configure the next round of evaluation." -msgstr "Configuration de l'évaluation côté serveur" +#: flwr.common.EventType.expandtabs:3 of +msgid "If tabsize is not given, a tab size of 8 characters is assumed." +msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.EventType.find:1 flwr.common.EventType.index:1 of msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"Return the lowest index in S where substring sub is found, such that sub " +"is contained within S[start:end]. Optional arguments start and end are " +"interpreted as in slice notation." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_fit:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_fit:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_fit:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_fit:1 -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.configure_fit:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.configure_fit:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_fit:1 -#: flwr.server.strategy.fedprox.FedProx.configure_fit:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_fit:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.configure_fit:1 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.configure_fit:1 of -msgid "Configure the next round of training." +#: flwr.common.EventType.find:5 flwr.common.EventType.rfind:5 of +msgid "Return -1 on failure." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.EventType.format:1 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"Return a formatted version of S, using substitutions from args and " +"kwargs. The substitutions are identified by braces ('{' and '}')." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.evaluate:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.evaluate:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.evaluate:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.evaluate:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "Evaluate model parameters using an evaluation function." +#: flwr.common.EventType.format_map:1 of +msgid "" +"Return a formatted version of S, using substitutions from mapping. The " +"substitutions are identified by braces ('{' and '}')." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: flwr.common.EventType.index:5 flwr.common.EventType.rindex:5 of +msgid "Raises ValueError when the substring is not found." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.initialize_parameters:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.initialize_parameters:1 -#: flwr.server.strategy.fedavgm.FedAvgM.initialize_parameters:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -#, fuzzy -msgid "Initialize global model parameters." -msgstr "Initialise le modèle global" - -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.EventType.isalnum:3 of msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" - -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.num_evaluation_clients:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_evaluation_clients:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.num_evaluation_clients:1 of -msgid "Use a fraction of available clients for evaluation." +"A string is alpha-numeric if all characters in the string are alpha-" +"numeric and there is at least one character in the string." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.EventType.isalpha:3 of msgid "" -":py:obj:`num_fit_clients `\\" -" \\(num\\_available\\_clients\\)" -msgstr "" - -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.num_fit_clients:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_fit_clients:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.num_fit_clients:1 of -msgid "Return the sample size and the required number of available clients." +"A string is alphabetic if all characters in the string are alphabetic and" +" there is at least one character in the string." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:2 -msgid "DPFedAvgAdaptive" -msgstr "DPFedAvgAdaptive" - -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of -msgid "Bases: :py:class:`~flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed`" +#: flwr.common.EventType.isascii:3 of +msgid "" +"ASCII characters have code points in the range U+0000-U+007F. Empty " +"string is ASCII too." msgstr "" -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:3 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:3 of -msgid "This class is deprecated and will be removed in a future release." +#: flwr.common.EventType.isdecimal:3 of +msgid "" +"A string is a decimal string if all characters in the string are decimal " +"and there is at least one character in the string." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.EventType.isdigit:3 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +"A string is a digit string if all characters in the string are digits and" +" there is at least one character in the string." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -#, fuzzy -msgid "Aggregate evaluation losses using the given strategy." -msgstr "Résultats globaux de l'évaluation." - -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.EventType.isidentifier:3 of msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +"Call keyword.iskeyword(s) to test whether string s is a reserved " +"identifier, such as \"def\" or \"class\"." msgstr "" -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.aggregate_fit:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -msgid "Aggregate training results as in DPFedAvgFixed and update clip norms." +#: flwr.common.EventType.islower:3 of +msgid "" +"A string is lowercase if all cased characters in the string are lowercase" +" and there is at least one cased character in the string." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.EventType.isnumeric:3 of msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"A string is numeric if all characters in the string are numeric and there" +" is at least one character in the string." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:1 of -msgid "Configure the next round of evaluation using the specified strategy." +#: flwr.common.EventType.isprintable:3 of +msgid "" +"A string is printable if all of its characters are considered printable " +"in repr() or if it is empty." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.EventType.isspace:3 of msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"A string is whitespace if all characters in the string are whitespace and" +" there is at least one character in the string." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.EventType.istitle:3 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"In a title-cased string, upper- and title-case characters may only follow" +" uncased characters and lowercase characters only cased ones." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.evaluate:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.evaluate:1 of -msgid "Evaluate model parameters using an evaluation function from the strategy." +#: flwr.common.EventType.isupper:3 of +msgid "" +"A string is uppercase if all cased characters in the string are uppercase" +" and there is at least one cased character in the string." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.EventType.join:3 of msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"The string whose method is called is inserted in between each given " +"string. The result is returned as a new string." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.initialize_parameters:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.initialize_parameters:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.initialize_parameters:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.initialize_parameters:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.initialize_parameters:1 of -msgid "Initialize global model parameters using given strategy." +#: flwr.common.EventType.join:6 of +msgid "Example: '.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:3 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:6 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:3 -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:3 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:3 -#: flwr.server.strategy.strategy.Strategy.configure_fit:3 -#: flwr.server.strategy.strategy.Strategy.evaluate:6 of -#, fuzzy -msgid "The current round of federated learning." -msgstr "Qu'est-ce que l'apprentissage fédéré ?" - -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:7 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:10 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:7 -#: flwr.server.strategy.strategy.Strategy.configure_fit:7 -#: flwr.server.strategy.strategy.Strategy.initialize_parameters:3 of -msgid "The client manager which holds all currently connected clients." +#: flwr.common.EventType.lstrip:3 flwr.common.EventType.rstrip:3 +#: flwr.common.EventType.strip:3 of +msgid "If chars is given and not None, remove characters in chars instead." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:10 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:10 of +#: flwr.common.EventType.maketrans:3 of msgid "" -"**evaluate_configuration** -- A list of tuples. Each tuple in the list " -"identifies a `ClientProxy` and the `EvaluateIns` for this particular " -"`ClientProxy`. If a particular `ClientProxy` is not included in this " -"list, it means that this `ClientProxy` will not participate in the next " -"round of federated evaluation." +"If there is only one argument, it must be a dictionary mapping Unicode " +"ordinals (integers) or characters to Unicode ordinals, strings or None. " +"Character keys will be then converted to ordinals. If there are two " +"arguments, they must be strings of equal length, and in the resulting " +"dictionary, each character in x will be mapped to the character at the " +"same position in y. If there is a third argument, it must be a string, " +"whose characters will be mapped to None in the result." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:2 -msgid "DPFedAvgFixed" -msgstr "DPFedAvgFixed" +#: flwr.common.EventType.partition:3 of +msgid "" +"This will search for the separator in the string. If the separator is " +"found, returns a 3-tuple containing the part before the separator, the " +"separator itself, and the part after it." +msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 -#: flwr.server.strategy.fedavg.FedAvg:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of -msgid "Bases: :py:class:`~flwr.server.strategy.strategy.Strategy`" +#: flwr.common.EventType.partition:7 of +msgid "" +"If the separator is not found, returns a 3-tuple containing the original " +"string and two empty strings." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.EventType.removeprefix:3 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +"If the string starts with the prefix string, return string[len(prefix):]." +" Otherwise, return a copy of the original string." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.EventType.removesuffix:3 of msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +"If the string ends with the suffix string and that suffix is not empty, " +"return string[:-len(suffix)]. Otherwise, return a copy of the original " +"string." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_fit:1 of -msgid "Aggregate training results using unweighted aggregation." +#: flwr.common.EventType.replace:5 of +msgid "count" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.EventType.replace:4 of msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"Maximum number of occurrences to replace. -1 (the default value) means " +"replace all occurrences." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.EventType.replace:7 of msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"If the optional argument count is given, only the first count occurrences" +" are replaced." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:1 of +#: flwr.common.EventType.rfind:1 flwr.common.EventType.rindex:1 of msgid "" -"Configure the next round of training incorporating Differential Privacy " -"(DP)." +"Return the highest index in S where substring sub is found, such that sub" +" is contained within S[start:end]. Optional arguments start and end are " +"interpreted as in slice notation." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.EventType.rpartition:3 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"This will search for the separator in the string, starting at the end. If" +" the separator is found, returns a 3-tuple containing the part before the" +" separator, the separator itself, and the part after it." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.EventType.rpartition:7 of msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"If the separator is not found, returns a 3-tuple containing two empty " +"strings and the original string." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:3 of -msgid "" -"Configuration of the next training round includes information related to " -"DP, such as clip norm and noise stddev." +#: flwr.common.EventType.rsplit:7 flwr.common.EventType.split:7 of +msgid "sep" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:13 -#: flwr.server.strategy.strategy.Strategy.configure_fit:10 of -msgid "" -"**fit_configuration** -- A list of tuples. Each tuple in the list " -"identifies a `ClientProxy` and the `FitIns` for this particular " -"`ClientProxy`. If a particular `ClientProxy` is not included in this " -"list, it means that this `ClientProxy` will not participate in the next " -"round of federated learning." +#: flwr.common.EventType.rsplit:4 flwr.common.EventType.split:4 of +msgid "The separator used to split the string." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:2 -msgid "DifferentialPrivacyClientSideAdaptiveClipping" +#: flwr.common.EventType.rsplit:6 flwr.common.EventType.split:6 of +msgid "" +"When set to None (the default value), will split on any whitespace " +"character (including \\\\n \\\\r \\\\t \\\\f and spaces) and will discard" +" empty strings from the result." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:3 -#: of -msgid "Use `adaptiveclipping_mod` modifier at the client side." +#: flwr.common.EventType.rsplit:11 flwr.common.EventType.split:11 of +msgid "maxsplit" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:5 -#: of +#: flwr.common.EventType.rsplit:10 flwr.common.EventType.split:10 of msgid "" -"In comparison to `DifferentialPrivacyServerSideAdaptiveClipping`, which " -"performs clipping on the server-side, " -"`DifferentialPrivacyClientSideAdaptiveClipping` expects clipping to " -"happen on the client-side, usually by using the built-in " -"`adaptiveclipping_mod`." +"Maximum number of splits (starting from the left). -1 (the default value)" +" means no limit." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:10 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:3 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:10 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:3 -#: of -msgid "The strategy to which DP functionalities will be added by this wrapper." +#: flwr.common.EventType.rsplit:13 of +msgid "Splitting starts at the end of the string and works to the front." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:12 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:5 -#: of -msgid "The noise multiplier for the Gaussian mechanism for model updates." +#: flwr.common.EventType.split:13 of +msgid "" +"Note, str.split() is mainly useful for data that has been intentionally " +"delimited. With natural text that includes punctuation, consider using " +"the regular expression module." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:14 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:7 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:17 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:10 -#: of -msgid "The number of clients that are sampled on each round." +#: flwr.common.EventType.splitlines:3 of +msgid "" +"Line breaks are not included in the resulting list unless keepends is " +"given and true." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:16 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:9 -#: of +#: flwr.common.EventType.startswith:1 of msgid "" -"The initial value of clipping norm. Defaults to 0.1. Andrew et al. " -"recommends to set to 0.1." +"Return True if S starts with the specified prefix, False otherwise. With " +"optional start, test S beginning at that position. With optional end, " +"stop comparing S at that position. prefix can also be a tuple of strings " +"to try." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:19 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:12 -#: of -msgid "The desired quantile of updates which should be clipped. Defaults to 0.5." +#: flwr.common.EventType.title:3 of +msgid "" +"More specifically, words start with uppercased characters and all " +"remaining cased characters have lower case." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:21 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:14 -#: of +#: flwr.common.EventType.translate:5 of +#, fuzzy +msgid "table" +msgstr "Database" + +#: flwr.common.EventType.translate:4 of msgid "" -"The learning rate for the clipping norm adaptation. Defaults to 0.2. " -"Andrew et al. recommends to set to 0.2." +"Translation table, which must be a mapping of Unicode ordinals to Unicode" +" ordinals, strings, or None." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:24 -#: of +#: flwr.common.EventType.translate:7 of msgid "" -"The stddev of the noise added to the count of updates currently below the" -" estimate. Andrew et al. recommends to set to `expected_num_records/20`" +"The table must implement lookup/indexing via __getitem__, for instance a " +"dictionary or list. If this operation raises LookupError, the character " +"is left untouched. Characters mapped to None are deleted." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:30 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:23 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:22 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:15 -#: of -#, fuzzy -msgid "Create a strategy:" -msgstr "stratégie.du.serveur" - -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:34 -#: of -msgid "" -"Wrap the strategy with the " -"`DifferentialPrivacyClientSideAdaptiveClipping` wrapper:" +#: flwr.common.EventType.zfill:3 of +msgid "The string is never truncated." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:40 -#: of -msgid "On the client, add the `adaptiveclipping_mod` to the client-side mods:" +#: ../../source/ref-api/flwr.common.FitIns.rst:2 +msgid "FitIns" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 +msgid ":py:obj:`parameters `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 +msgid ":py:obj:`config `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_fit:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_fit:1 -#: of -#, fuzzy -msgid "Aggregate training results and update clip norms." -msgstr "Résultats globaux de l'évaluation." +#: ../../source/ref-api/flwr.common.FitRes.rst:2 +msgid "FitRes" +msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`status `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`parameters `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`num_examples `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`metrics `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:2 +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:2 #, fuzzy -msgid "DifferentialPrivacyClientSideFixedClipping" -msgstr "Confidentialité différentielle" +msgid "GetParametersIns" +msgstr ":code:`get_parameters`" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:3 -#: of -msgid "Use `fixedclipping_mod` modifier at the client side." +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:28::1 +msgid ":py:obj:`config `\\" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:5 -#: of -msgid "" -"In comparison to `DifferentialPrivacyServerSideFixedClipping`, which " -"performs clipping on the server-side, " -"`DifferentialPrivacyClientSideFixedClipping` expects clipping to happen " -"on the client-side, usually by using the built-in `fixedclipping_mod`." +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:2 +#, fuzzy +msgid "GetParametersRes" +msgstr ":code:`get_parameters`" + +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 +msgid ":py:obj:`status `\\" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:12 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:5 -#: of -msgid "" -"The noise multiplier for the Gaussian mechanism for model updates. A " -"value of 1.0 or higher is recommended for strong privacy." +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 +msgid ":py:obj:`parameters `\\" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:15 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:8 -#: of -msgid "The value of the clipping norm." +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:2 +msgid "GetPropertiesIns" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:26 -#: of -msgid "" -"Wrap the strategy with the `DifferentialPrivacyClientSideFixedClipping` " -"wrapper:" +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:28::1 +msgid ":py:obj:`config `\\" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:32 -#: of -msgid "On the client, add the `fixedclipping_mod` to the client-side mods:" +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:2 +msgid "GetPropertiesRes" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 +msgid ":py:obj:`status `\\" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 +msgid ":py:obj:`properties `\\" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_fit:1 -#: of +#: ../../source/ref-api/flwr.common.Message.rst:2 #, fuzzy -msgid "Add noise to the aggregated parameters." -msgstr "Puis sérialise le résultat agrégé :" +msgid "Message" +msgstr "Côté serveur" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +#: flwr.common.Message.content:1::1 flwr.common.Message.metadata:1 +#: flwr.common.message.Message:3 of +msgid "A dataclass including information about the message to be executed." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: flwr.common.message.Message:5 of msgid "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +"Holds records either sent by another entity (e.g. sent by the server-side" +" logic to a client, or vice-versa) or that will be sent to it." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: flwr.common.message.Message:8 of msgid "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" +"A dataclass that captures information about an error that took place when" +" processing another message." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.Message.rst:35::1 msgid "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" +":py:obj:`create_error_reply `\\ " +"\\(error\\[\\, ttl\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:2 -msgid "DifferentialPrivacyServerSideAdaptiveClipping" +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.create_error_reply:1 of +msgid "Construct a reply message indicating an error happened." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:17 -#: of +#: ../../source/ref-api/flwr.common.Message.rst:35::1 msgid "" -"The standard deviation of the noise added to the count of updates below " -"the estimate. Andrew et al. recommends to set to " -"`expected_num_records/20`" +":py:obj:`create_reply `\\ " +"\\(content\\[\\, ttl\\]\\)" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:27 -#: of -msgid "" -"Wrap the strategy with the DifferentialPrivacyServerSideAdaptiveClipping " -"wrapper" +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.create_reply:1 of +msgid "Create a reply to this message with specified content and TTL." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid ":py:obj:`has_content `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.has_content:1 of +msgid "Return True if message has content, else False." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid ":py:obj:`has_error `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.has_error:1 of +msgid "Return True if message has an error, else False." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`content `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.common.Message.content:1 flwr.common.Message.content:1::1 #: of -msgid "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:2 #, fuzzy -msgid "DifferentialPrivacyServerSideFixedClipping" -msgstr "Confidentialité différentielle" +msgid "The content of this message." +msgstr "Évaluer la réponse d'un client." -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:19 -#: of -msgid "" -"Wrap the strategy with the DifferentialPrivacyServerSideFixedClipping " -"wrapper" +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`error `\\" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +#: flwr.common.Message.content:1::1 flwr.common.Message.error:1 of +msgid "Error captured by this message." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`metadata `\\" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:1 -#: of -msgid "Compute the updates, clip, and pass them for aggregation." +#: flwr.common.message.Message.create_error_reply:3 of +msgid "The error that was encountered." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: flwr.common.message.Message.create_error_reply:5 +#: flwr.common.message.Message.create_reply:9 of msgid "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +"Time-to-live for this message in seconds. If unset, it will be set based " +"on the remaining time for the received message before it expires. This " +"follows the equation: ttl = msg.meta.ttl - (reply.meta.created_at - " +"msg.meta.created_at)" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: flwr.common.message.Message.create_error_reply:5 +#: flwr.common.message.Message.create_reply:9 of msgid "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +"Time-to-live for this message in seconds. If unset, it will be set based " +"on the remaining time for the received message before it expires. This " +"follows the equation:" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" +#: flwr.common.message.Message.create_error_reply:9 +#: flwr.common.message.Message.create_reply:13 of +msgid "ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at)" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: flwr.common.message.Message.create_reply:3 of msgid "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" +"The method generates a new `Message` as a reply to this message. It " +"inherits 'run_id', 'src_node_id', 'dst_node_id', and 'message_type' from " +"this message and sets 'reply_to_message' to the ID of this message." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:3 -#: of -msgid "Afterward, add noise to the aggregated parameters." +#: flwr.common.message.Message.create_reply:7 of +msgid "The content for the reply message." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:2 -#, fuzzy -msgid "FaultTolerantFedAvg" -msgstr "server.strategy.FaultTolerantFedAvg" +#: flwr.common.message.Message.create_reply:16 of +msgid "A new `Message` instance representing the reply." +msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +#: ../../source/ref-api/flwr.common.MessageType.rst:2 +msgid "MessageType" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`EVALUATE `\\" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_fit:1 -#: flwr.server.strategy.fedadagrad.FedAdagrad.aggregate_fit:1 -#: flwr.server.strategy.fedadam.FedAdam.aggregate_fit:1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_fit:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_fit:1 -#: flwr.server.strategy.fedavgm.FedAvgM.aggregate_fit:1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.aggregate_fit:1 -#: flwr.server.strategy.fedyogi.FedYogi.aggregate_fit:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_fit:1 of -msgid "Aggregate fit results using weighted average." +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`QUERY `\\" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`TRAIN `\\" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:2 +msgid "MessageTypeLegacy" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 +msgid ":py:obj:`GET_PARAMETERS `\\" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 +msgid ":py:obj:`GET_PROPERTIES `\\" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.run_id:1 flwr.common.message.Metadata:3 of +msgid "An identifier for the current run." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.message_id:1 flwr.common.message.Metadata:5 of +msgid "An identifier for the current message." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:2 -#: ../../source/ref-changelog.md:905 -msgid "FedAdagrad" -msgstr "FedAdagrad" +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.src_node_id:1 flwr.common.message.Metadata:7 of +msgid "An identifier for the node sending this message." +msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:1 -#: flwr.server.strategy.fedadam.FedAdam:1 -#: flwr.server.strategy.fedyogi.FedYogi:1 of -msgid "Bases: :py:class:`~flwr.server.strategy.fedopt.FedOpt`" +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.dst_node_id:1 flwr.common.message.Metadata:9 of +msgid "An identifier for the node receiving this message." msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:3 -#: flwr.server.strategy.fedadam.FedAdam:3 flwr.server.strategy.fedopt.FedOpt:3 -#: flwr.server.strategy.fedyogi.FedYogi:3 of -#, fuzzy -msgid "Implementation based on https://arxiv.org/abs/2003.00295v5" +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.reply_to_message:1 flwr.common.message.Metadata:11 of +msgid "An identifier for the message this message replies to." msgstr "" -"FedYogi - Stratégie d'apprentissage fédéré utilisant Yogi côté serveur. " -"Mise en oeuvre basée sur https://arxiv.org/abs/2003.00295" -#: flwr.server.strategy.fedadagrad.FedAdagrad:21 -#: flwr.server.strategy.fedadagrad.FedAdagrad:23 -#: flwr.server.strategy.fedadam.FedAdam:25 -#: flwr.server.strategy.fedadam.FedAdam:27 -#: flwr.server.strategy.fedavg.FedAvg:29 flwr.server.strategy.fedavg.FedAvg:31 -#: flwr.server.strategy.fedopt.FedOpt:25 flwr.server.strategy.fedopt.FedOpt:27 -#: flwr.server.strategy.fedprox.FedProx:61 -#: flwr.server.strategy.fedprox.FedProx:63 -#: flwr.server.strategy.fedyogi.FedYogi:28 -#: flwr.server.strategy.fedyogi.FedYogi:30 of -msgid "Metrics aggregation function, optional." +#: flwr.common.message.Metadata:13 of +msgid "" +"An identifier for grouping messages. In some settings, this is used as " +"the FL round." msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:29 -#: flwr.server.strategy.fedadam.FedAdam:29 -#: flwr.server.strategy.fedopt.FedOpt:29 of -msgid "Server-side learning rate. Defaults to 1e-1." +#: flwr.common.message.Metadata:16 of +msgid "Time-to-live for this message in seconds." msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:31 -#: flwr.server.strategy.fedadam.FedAdam:31 -#: flwr.server.strategy.fedopt.FedOpt:31 of -msgid "Client-side learning rate. Defaults to 1e-1." +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.message_type:1 flwr.common.message.Metadata:18 of +msgid "A string that encodes the action to be executed on the receiving end." msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:33 -#: flwr.server.strategy.fedadam.FedAdam:37 -#: flwr.server.strategy.fedopt.FedOpt:37 of -msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-9." +#: flwr.common.Metadata.created_at:1::1 of +#, fuzzy +msgid ":py:obj:`created_at `\\" +msgstr "serveur.stratégie.Stratégie" + +#: flwr.common.Metadata.created_at:1 +#: flwr.common.Metadata.created_at:1::1 of +msgid "Unix timestamp when the message was created." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`dst_node_id `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit `\\" -" \\(server\\_round\\, results\\, failures\\)" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`group_id `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.group_id:1 of +msgid "An identifier for grouping messages." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit `\\" -" \\(server\\_round\\, parameters\\, ...\\)" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`message_id `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`message_type `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`reply_to_message `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`run_id `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`src_node_id `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:2 -#, fuzzy -msgid "FedAdam" -msgstr "FedAdagrad" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`ttl `\\" +msgstr "" -#: flwr.server.strategy.fedadam.FedAdam:33 -#: flwr.server.strategy.fedyogi.FedYogi:36 of -msgid "Momentum parameter. Defaults to 0.9." +#: flwr.common.Metadata.created_at:1::1 flwr.common.Metadata.ttl:1 +#: of +msgid "Time-to-live for this message." msgstr "" -#: flwr.server.strategy.fedadam.FedAdam:35 -#: flwr.server.strategy.fedyogi.FedYogi:38 of -msgid "Second moment parameter. Defaults to 0.99." +#: ../../source/ref-api/flwr.common.Metrics.rst:2 +#, fuzzy +msgid "Metrics" +msgstr "Suivi des mesures" + +#: ../../source/ref-api/flwr.common.MetricsRecord.rst:2 +msgid "MetricsRecord" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.metricsrecord.MetricsRecord:1 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`int` | :py:class:`float` | " +":py:class:`list`\\ [:py:class:`int`] | :py:class:`list`\\ " +"[:py:class:`float`]]" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.metricsrecord.MetricsRecord:3 of msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"A :code:`MetricsRecord` is a Python dictionary designed to ensure that " +"each key-value pair adheres to specified data types. A " +":code:`MetricsRecord` is one of the types of records that a " +"`flwr.common.RecordSet `_ supports " +"and can therefore be used to construct :code:`common.Message` objects." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.metricsrecord.MetricsRecord:9 of msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +"A dictionary that stores basic types (i.e. `int`, `float` as defined in " +"`MetricsScalar`) and list of such types (see `MetricsScalarList`)." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.metricsrecord.MetricsRecord:12 of msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"A boolean indicating whether metrics should be deleted from the input " +"dictionary immediately after adding them to the record. When set to True," +" the data is duplicated in memory. If memory is a concern, set it to " +"False." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.metricsrecord.MetricsRecord:20 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"The usage of a :code:`MetricsRecord` is envisioned for communicating " +"results obtained when a node performs an action. A few typical examples " +"include: communicating the training accuracy after a model is trained " +"locally by a :code:`ClientApp`, reporting the validation loss obtained at" +" a :code:`ClientApp`, or, more generally, the output of executing a query" +" by the :code:`ClientApp`. Common to these examples is that the output " +"can be typically represented by a single scalar (:code:`int`, " +":code:`float`) or list of scalars." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.metricsrecord.MetricsRecord:28 of msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"Let's see some examples of how to construct a :code:`MetricsRecord` from " +"scratch:" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.metricsrecord.MetricsRecord:39 of msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"Since types are enforced, the types of the objects inserted are checked. " +"For a :code:`MetricsRecord`, value types allowed are those in defined in " +":code:`flwr.common.MetricsRecordValues`. Similarly, only :code:`str` keys" +" are allowed." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.metricsrecord.MetricsRecord:50 of msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"If you need a more versatily type of record try :code:`ConfigsRecord` or " +":code:`ParametersRecord`." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:2 -#, fuzzy -msgid "FedAvg" -msgstr "DP-FedAvg" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" +msgstr "" -#: flwr.server.strategy.fedavg.FedAvg:3 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:3 of -msgid "Implementation based on https://arxiv.org/abs/1602.05629" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg:5 flwr.server.strategy.fedprox.FedProx:37 -#: of -msgid "" -"Fraction of clients used during training. In case `min_fit_clients` is " -"larger than `fraction_fit * available_clients`, `min_fit_clients` will " -"still be sampled. Defaults to 1.0." +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgstr "serveur.stratégie.Stratégie" + +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg:9 flwr.server.strategy.fedprox.FedProx:41 -#: of -msgid "" -"Fraction of clients used during validation. In case " -"`min_evaluate_clients` is larger than `fraction_evaluate * " -"available_clients`, `min_evaluate_clients` will still be sampled. " -"Defaults to 1.0." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg:33 of -msgid "Enable (True) or disable (False) in-place aggregation of model updates." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" -msgstr "" +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`popitem `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" -msgstr "" +":py:obj:`setdefault `\\ " +"\\(k\\[\\,d\\]\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: collections.abc.MutableMapping.clear:1::1 of msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: ../../source/ref-api/flwr.common.NDArray.rst:2 +msgid "NDArray" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: ../../source/ref-api/flwr.common.NDArrays.rst:2 +msgid "NDArrays" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 +msgid ":py:obj:`tensors `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients `\\" -" \\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 +msgid ":py:obj:`tensor_type `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:2 +#: ../../source/ref-api/flwr.common.ParametersRecord.rst:2 #, fuzzy -msgid "FedAvgAndroid" -msgstr "DPFedAvgAdaptive" +msgid "ParametersRecord" +msgstr "Paramètres du modèle." -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.parametersrecord.ParametersRecord:1 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`~flwr.common.record.parametersrecord.Array`]" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.parametersrecord.ParametersRecord:3 of msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +"A dataclass storing named Arrays in order. This means that it holds " +"entries as an OrderedDict[str, Array]. ParametersRecord objects can be " +"viewed as an equivalent to PyTorch's state_dict, but holding serialised " +"tensors instead. A :code:`ParametersRecord` is one of the types of " +"records that a `flwr.common.RecordSet " +"`_ supports and can therefore be " +"used to construct :code:`common.Message` objects." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`bytes_to_ndarray " -"`\\ \\(tensor\\)" +#: flwr.common.record.parametersrecord.ParametersRecord:10 of +msgid "A dictionary that stores serialized array-like or tensor-like objects." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.bytes_to_ndarray:1 of -#, fuzzy -msgid "Deserialize NumPy array from bytes." -msgstr "Désérialise le tableau numérique NumPy à partir d'octets." - -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.parametersrecord.ParametersRecord:12 of msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"A boolean indicating whether parameters should be deleted from the input " +"dictionary immediately after adding them to the record. If False, the " +"dictionary passed to `set_parameters()` will be empty once exiting from " +"that function. This is the desired behaviour when working with very large" +" models/tensors/arrays. However, if you plan to continue working with " +"your parameters after adding it to the record, set this flag to True. " +"When set to True, the data is duplicated in memory." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.parametersrecord.ParametersRecord:23 of msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"The usage of :code:`ParametersRecord` is envisioned for storing data " +"arrays (e.g. parameters of a machine learning model). These first need to" +" be serialized into a :code:`flwr.common.Array` data structure." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.parametersrecord.ParametersRecord:27 of +msgid "Let's see some examples:" +msgstr "" + +#: flwr.common.record.parametersrecord.ParametersRecord:50 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"Now that the NumPy array is embedded into a :code:`ParametersRecord` it " +"could be sent if added as part of a :code:`common.Message` or it could be" +" saved as a persistent state of a :code:`ClientApp` via its context. " +"Regardless of the usecase, we will sooner or later want to recover the " +"array in its original NumPy representation. For the example above, where " +"the array was serialized using the built-in utility function, " +"deserialization can be done as follows:" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.parametersrecord.ParametersRecord:65 of msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"If you need finer control on how your arrays are serialized and " +"deserialized, you can construct :code:`Array` objects directly like this:" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.parametersrecord.ParametersRecord:83 of msgid "" -":py:obj:`ndarray_to_bytes " -"`\\ \\(ndarray\\)" +"Note that different arrays (e.g. from PyTorch, Tensorflow) might require " +"different serialization mechanism. Howerver, they often support a " +"conversion to NumPy, therefore allowing to use the same or similar steps " +"as in the example above." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarray_to_bytes:1 of -#, fuzzy -msgid "Serialize NumPy array to bytes." -msgstr "Sérialise le tableau numérique NumPy en octets." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" +msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`ndarrays_to_parameters " -"`\\ " -"\\(ndarrays\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgstr "serveur.stratégie.Stratégie" + +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`parameters_to_ndarrays " -"`\\ " -"\\(parameters\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.parameters_to_ndarrays:1 -#: of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "Convert parameters object to NumPy weights." -msgstr "Convertit l'objet des paramètres en ndarrays NumPy." +msgid ":py:obj:`popitem `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:2 +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "FedAvgM" -msgstr "DP-FedAvg" - -#: flwr.server.strategy.fedavgm.FedAvgM:3 of -msgid "Implementation based on https://arxiv.org/abs/1909.06335" -msgstr "" +msgid "" +":py:obj:`setdefault `\\ " +"\\(k\\[\\,d\\]\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.fedavgm.FedAvgM:25 of +#: collections.abc.MutableMapping.clear:1::1 of msgid "" -"Server-side learning rate used in server-side optimization. Defaults to " -"1.0." +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" msgstr "" -#: flwr.server.strategy.fedavgm.FedAvgM:28 of -msgid "Server-side momentum factor used for FedAvgM. Defaults to 0.0." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:3 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +"Note that a small amount of Bytes might also be included in this counting" +" that correspond to metadata of the serialized object (e.g. of NumPy " +"array) needed for deseralization." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-api/flwr.common.Properties.rst:2 +#, fuzzy +msgid "Properties" +msgstr "Prérequis" + +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:2 +#, fuzzy +msgid "ReconnectIns" +msgstr "Collecte centralisée des données" + +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:28::1 +msgid ":py:obj:`seconds `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +#: ../../source/ref-api/flwr.common.RecordSet.rst:2 +msgid "RecordSet" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.recordset.RecordSet:3 of msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"A :code:`RecordSet` is the unified mechanism by which parameters, metrics" +" and configs can be either stored as part of a `flwr.common.Context " +"`_ in your apps or communicated as part of a " +"`flwr.common.Message `_ between your apps." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.recordset.RecordSet:9 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"A dictionary of :code:`ParametersRecords` that can be used to record and " +"communicate model parameters and high-dimensional arrays." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.recordset.RecordSet:12 of msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"A dictionary of :code:`MetricsRecord` that can be used to record and " +"communicate scalar-valued metrics that are the result of performing and " +"action, for example, by a :code:`ClientApp`." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.recordset.RecordSet:16 of msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"A dictionary of :code:`ConfigsRecord` that can be used to record and " +"communicate configuration values to an entity (e.g. to a " +":code:`ClientApp`) for it to adjust how an action is performed." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.recordset.RecordSet:24 of msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"A :code:`RecordSet` can hold three types of records, each designed with " +"an specific purpose. What is common to all of them is that they are " +"Python dictionaries designed to ensure that each key-value pair adheres " +"to specified data types." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:2 -msgid "FedMedian" +#: flwr.common.record.recordset.RecordSet:29 of +msgid "Let's see an example." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.recordset.RecordSet:47 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +"Adding a :code:`ParametersRecord` follows the same steps as above but " +"first, the array needs to be serialized and represented as a " +":code:`flwr.common.Array`. If the array is a :code:`NumPy` array, you can" +" use the built-in utility function `array_from_numpy " +"`_. It is often possible to convert an" +" array first to :code:`NumPy` and then use the aforementioned function." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.recordset.RecordSet:66 of msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"For additional examples on how to construct each of the records types " +"shown above, please refer to the documentation for :code:`ConfigsRecord`," +" :code:`MetricsRecord` and :code:`ParametersRecord`." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedmedian.FedMedian.aggregate_fit:1 of -#, fuzzy -msgid "Aggregate fit results using median." -msgstr "Résultats globaux de l'évaluation." +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`configs_records `\\" +msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: flwr.common.RecordSet.configs_records:1 +#: flwr.common.RecordSet.configs_records:1::1 of +msgid "Dictionary holding ConfigsRecord instances." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`metrics_records `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: flwr.common.RecordSet.configs_records:1::1 +#: flwr.common.RecordSet.metrics_records:1 of +msgid "Dictionary holding MetricsRecord instances." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`parameters_records `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.common.RecordSet.configs_records:1::1 +#: flwr.common.RecordSet.parameters_records:1 of +msgid "Dictionary holding ParametersRecord instances." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" +#: ../../source/ref-api/flwr.common.ServerMessage.rst:2 +#, fuzzy +msgid "ServerMessage" +msgstr "Côté serveur" -#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:2 -msgid "FedOpt" +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +msgid ":py:obj:`evaluate_ins `\\" msgstr "" -#: flwr.server.strategy.fedopt.FedOpt:33 of -msgid "Momentum parameter. Defaults to 0.0." +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +msgid ":py:obj:`fit_ins `\\" msgstr "" -#: flwr.server.strategy.fedopt.FedOpt:35 of -msgid "Second moment parameter. Defaults to 0.0." +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +msgid "" +":py:obj:`get_parameters_ins " +"`\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +":py:obj:`get_properties_ins " +"`\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-api/flwr.common.Status.rst:2 +#, fuzzy +msgid "Status" +msgstr "Statut du client." + +#: ../../source/ref-api/flwr.common.Status.rst:29::1 +msgid ":py:obj:`code `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +#: ../../source/ref-api/flwr.common.Status.rst:29::1 +msgid ":py:obj:`message `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.common.array_from_numpy.rst:2 +msgid "array\\_from\\_numpy" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: ../../source/ref-api/flwr.common.bytes_to_ndarray.rst:2 +msgid "bytes\\_to\\_ndarray" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: ../../source/ref-api/flwr.common.configure.rst:2 +#, fuzzy +msgid "configure" +msgstr "Configurer les clients" + +#: ../../source/ref-api/flwr.common.event.rst:2 +msgid "event" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.common.log.rst:2 +msgid "log" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: logging.Logger.log:3 of msgid "" -":py:obj:`num_fit_clients `\\" -" \\(num\\_available\\_clients\\)" +"To pass exception information, use the keyword argument exc_info with a " +"true value, e.g." msgstr "" +"Pour transmettre des informations sur les exceptions, utilise l'argument " +"mot-clé exc_info avec une valeur vraie, par ex." -#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:2 -msgid "FedProx" +#: logging.Logger.log:6 of +#, python-format +msgid "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" +msgstr "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" + +#: ../../source/ref-api/flwr.common.ndarray_to_bytes.rst:2 +msgid "ndarray\\_to\\_bytes" msgstr "" -#: flwr.server.strategy.fedprox.FedProx:3 of -msgid "Implementation based on https://arxiv.org/abs/1812.06127" +#: ../../source/ref-api/flwr.common.ndarrays_to_parameters.rst:2 +msgid "ndarrays\\_to\\_parameters" msgstr "" -#: flwr.server.strategy.fedprox.FedProx:5 of -msgid "" -"The strategy in itself will not be different than FedAvg, the client " -"needs to be adjusted. A proximal term needs to be added to the loss " -"function during the training:" +#: ../../source/ref-api/flwr.common.now.rst:2 +msgid "now" msgstr "" -#: flwr.server.strategy.fedprox.FedProx:9 of -msgid "" -"\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" -"\n" +#: ../../source/ref-api/flwr.common.parameters_to_ndarrays.rst:2 +msgid "parameters\\_to\\_ndarrays" msgstr "" -#: flwr.server.strategy.fedprox.FedProx:12 of +#: ../../source/ref-api/flwr.server.rst:2 +msgid "server" +msgstr "serveur" + +#: ../../source/ref-api/flwr.server.rst:22::1 msgid "" -"Where $w^t$ are the global parameters and $w$ are the local weights the " -"function will be optimized with." +":py:obj:`start_server `\\ \\(\\*\\[\\, " +"server\\_address\\, server\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.fedprox.FedProx:15 of -msgid "In PyTorch, for example, the loss would go from:" +#: ../../source/ref-api/flwr.server.rst:22::1 +#: flwr.server.app.start_server:1 of +msgid "Start a Flower server using the gRPC transport layer." msgstr "" -#: flwr.server.strategy.fedprox.FedProx:21 of -msgid "To:" +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid ":py:obj:`ClientManager `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedprox.FedProx:30 of -msgid "" -"With `global_params` being a copy of the parameters before the training " -"takes place." +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.client_manager.ClientManager:1 of +msgid "Abstract base class for managing Flower clients." msgstr "" -#: flwr.server.strategy.fedprox.FedProx:65 of -msgid "" -"The weight of the proximal term used in the optimization. 0.0 makes this " -"strategy equivalent to FedAvg, and the higher the coefficient, the more " -"regularization will be used (that is, the client parameters will need to " -"be closer to the server parameters during training)." -msgstr "" +#: ../../source/ref-api/flwr.server.rst:37::1 +#, fuzzy +msgid ":py:obj:`Driver `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.driver.driver.Driver:1 of +msgid "Abstract base Driver class for the Driver API." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid ":py:obj:`History `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.history.History:1 of +msgid "History class for training and/or evaluation metrics collection." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.rst:37::1 +#, fuzzy msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" +":py:obj:`LegacyContext `\\ \\(context\\[\\, " +"config\\, strategy\\, ...\\]\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.compat.legacy_context.LegacyContext:1 of +msgid "Legacy Context." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.rst:37::1 msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`Server `\\ \\(\\*\\, client\\_manager\\[\\, " +"strategy\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.rst:37::1 +#, fuzzy msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" +":py:obj:`ServerApp `\\ \\(\\[server\\, config\\, " +"strategy\\, ...\\]\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.server_app.ServerApp:1 of +#, fuzzy +msgid "Flower ServerApp." +msgstr "Serveur de Flower" + +#: ../../source/ref-api/flwr.server.rst:37::1 +#, fuzzy msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" +":py:obj:`ServerAppComponents `\\ " +"\\(\\[server\\, config\\, ...\\]\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.fedprox.FedProx.configure_fit:3 of -msgid "Sends the proximal factor mu to the clients" +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.serverapp_components.ServerAppComponents:1 of +msgid "Components to construct a ServerApp." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:2 -msgid "FedTrimmedAvg" +#: ../../source/ref-api/flwr.server.rst:37::1 +#, fuzzy +msgid "" +":py:obj:`ServerConfig `\\ \\(\\[num\\_rounds\\," +" round\\_timeout\\]\\)" msgstr "" +"Flower 1.0 : ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:3 of -msgid "Implemented based on: https://arxiv.org/abs/1803.01498" -msgstr "" +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.server_config.ServerConfig:1 of +#, fuzzy +msgid "Flower server config." +msgstr "Serveur de Flower" -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:25 of -msgid "Fraction to cut off of both tails of the distribution. Defaults to 0.2." +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid ":py:obj:`SimpleClientManager `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.client_manager.SimpleClientManager:1 of +msgid "Provides a pool of available clients." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-api/flwr.server.rst:56::1 +#, fuzzy +msgid ":py:obj:`flwr.server.strategy `\\" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.server.rst:56::1 +#: flwr.server.strategy:1 of +msgid "Contains the strategy abstraction and different implementations." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.aggregate_fit:1 of -msgid "Aggregate fit results using trimmed average." -msgstr "" +#: ../../source/ref-api/flwr.server.rst:56::1 +#, fuzzy +msgid ":py:obj:`flwr.server.workflow `\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" +#: ../../source/ref-api/flwr.server.rst:56::1 +#: flwr.server.workflow:1 of +#, fuzzy +msgid "Workflows." +msgstr "Flux de travail" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.server.ClientManager.rst:2 +#, fuzzy +msgid "ClientManager" +msgstr "client" + +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`all `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: flwr.server.client_manager.ClientManager.all:1 +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.all:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid "Return all available clients." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`num_available `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.num_available:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.num_available:1 of +msgid "Return the number of available clients." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`register `\\ \\(client\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:2 -msgid "FedXgbBagging" +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.register:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.register:1 of +msgid "Register Flower ClientProxy instance." msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: flwr.server.client_manager.ClientManager.all:1::1 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +":py:obj:`sample `\\ " +"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of -#, fuzzy -msgid "Aggregate evaluation metrics using average." -msgstr "Résultats globaux de l'évaluation." +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.sample:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.sample:1 of +msgid "Sample a number of Flower ClientProxy instances." +msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`unregister `\\ \\(client\\)" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_fit:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_fit:1 of -msgid "Aggregate fit results using bagging." +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.unregister:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.unregister:1 of +msgid "Unregister Flower ClientProxy instance." msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: flwr.server.client_manager.ClientManager.all:1::1 of msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`wait_for `\\ " +"\\(num\\_clients\\, timeout\\)" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.wait_for:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.wait_for:1 of +msgid "Wait until at least `num_clients` are available." msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: flwr.server.client_manager.ClientManager.num_available:3 +#: flwr.server.client_manager.SimpleClientManager.num_available:3 of +msgid "**num_available** -- The number of currently available clients." msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: flwr.server.client_manager.ClientManager.register:6 +#: flwr.server.client_manager.SimpleClientManager.register:6 of msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"**success** -- Indicating if registration was successful. False if " +"ClientProxy is already registered or can not be registered for any " +"reason." msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.server.client_manager.ClientManager.unregister:3 +#: flwr.server.client_manager.SimpleClientManager.unregister:3 of +msgid "This method is idempotent." msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.Driver.rst:2 +#, fuzzy +msgid "Driver" +msgstr "serveur" + +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#, fuzzy msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`create_message `\\ " +"\\(content\\, message\\_type\\, ...\\[\\, ttl\\]\\)" msgstr "" +"Flower 1.0 : ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" -#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:2 -msgid "FedXgbCyclic" +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.create_message:1 of +msgid "Create a new message with specified parameters." msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +msgid ":py:obj:`get_node_ids `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_fit " -"`\\ \\(server\\_round\\," -" results\\, failures\\)" +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.get_node_ids:1 of +msgid "Get node IDs." msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`pull_messages `\\ " +"\\(message\\_ids\\)" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_fit " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.pull_messages:1 of +msgid "Pull messages based on message IDs." msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`push_messages `\\ " +"\\(messages\\)" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.push_messages:1 of +msgid "Push messages to specified node IDs." msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#, fuzzy msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`send_and_receive `\\ " +"\\(messages\\, \\*\\[\\, timeout\\]\\)" msgstr "" +"Flower 1.0 : ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.send_and_receive:1 of +msgid "Push messages to specified node IDs and pull the reply messages." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:2 +#: flwr.server.driver.driver.Driver.create_message:1::1 of #, fuzzy -msgid "FedXgbNnAvg" -msgstr "DP-FedAvg" +msgid ":py:obj:`run `\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:5 of +#: flwr.server.Driver.run:1 +#: flwr.server.driver.driver.Driver.create_message:1::1 of +#, fuzzy +msgid "Run information." +msgstr "Simulation de moniteur" + +#: flwr.server.driver.driver.Driver.create_message:3 of msgid "" -"This strategy is deprecated, but a copy of it is available in Flower " -"Baselines: " -"https://github.com/adap/flower/tree/main/baselines/hfedxgboost." +"This method constructs a new `Message` with given content and metadata. " +"The `run_id` and `src_node_id` will be set automatically." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.create_message:6 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" -msgstr "" - -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit " -"`\\ \\(server\\_round\\, " -"results\\, failures\\)" +"The content for the new message. This holds records that are to be sent " +"to the destination node." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.create_message:9 of msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"The type of the message, defining the action to be executed on the " +"receiving end." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +#: flwr.server.driver.driver.Driver.create_message:12 of +msgid "The ID of the destination node to which the message is being sent." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.create_message:14 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"The ID of the group to which this message is associated. In some " +"settings, this is used as the FL round." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.create_message:17 of msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"Time-to-live for the round trip of this message, i.e., the time from " +"sending this message to receiving a reply. It specifies in seconds the " +"duration for which the message and its potential reply are considered " +"valid. If unset, the default TTL (i.e., `common.DEFAULT_TTL`) will be " +"used." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.create_message:23 of msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"**message** -- A new `Message` instance with the specified content and " +"metadata." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.pull_messages:3 of msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:2 -msgid "FedYogi" -msgstr "" - -#: flwr.server.strategy.fedyogi.FedYogi:32 of -msgid "Server-side learning rate. Defaults to 1e-2." +"This method is used to collect messages from the SuperLink that " +"correspond to a set of given message IDs." msgstr "" -#: flwr.server.strategy.fedyogi.FedYogi:34 of -msgid "Client-side learning rate. Defaults to 0.0316." +#: flwr.server.driver.driver.Driver.pull_messages:6 of +msgid "An iterable of message IDs for which reply messages are to be retrieved." msgstr "" -#: flwr.server.strategy.fedyogi.FedYogi:40 of -msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-3." +#: flwr.server.driver.driver.Driver.pull_messages:9 of +msgid "**messages** -- An iterable of messages received." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.push_messages:3 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +"This method takes an iterable of messages and sends each message to the " +"node specified in `dst_node_id`." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: flwr.server.driver.driver.Driver.push_messages:6 +#: flwr.server.driver.driver.Driver.send_and_receive:7 of +msgid "An iterable of messages to be sent." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.push_messages:9 of msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +"**message_ids** -- An iterable of IDs for the messages that were sent, " +"which can be used to pull replies." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.send_and_receive:3 of msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"This method sends a list of messages to their destination node IDs and " +"then waits for the replies. It continues to pull replies until either all" +" replies are received or the specified timeout duration is exceeded." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.send_and_receive:9 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"The timeout duration in seconds. If specified, the method will wait for " +"replies for this duration. If `None`, there is no time limit and the " +"method will wait until replies for all messages are received." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: flwr.server.driver.driver.Driver.send_and_receive:14 of +msgid "**replies** -- An iterable of reply messages received from the SuperLink." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.send_and_receive:19 of msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"This method uses `push_messages` to send the messages and `pull_messages`" +" to collect the replies. If `timeout` is set, the method may not return " +"replies for all sent messages. A message remains valid until its TTL, " +"which is not affected by `timeout`." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.server.History.rst:2 +msgid "History" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.Krum.rst:2 -msgid "Krum" +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "" +":py:obj:`add_loss_centralized " +"`\\ \\(server\\_round\\, " +"loss\\)" msgstr "" -#: flwr.server.strategy.krum.Krum:3 of -msgid "Implementation based on https://arxiv.org/abs/1703.02757" -msgstr "" +#: flwr.server.history.History.add_loss_centralized:1 +#: flwr.server.history.History.add_loss_centralized:1::1 of +#, fuzzy +msgid "Add one loss entry (from centralized evaluation)." +msgstr "Évaluation centralisée" -#: flwr.server.strategy.krum.Krum:17 of +#: flwr.server.history.History.add_loss_centralized:1::1 of msgid "" -"Number of clients to keep before averaging (MultiKrum). Defaults to 0, in" -" that case classical Krum is applied." +":py:obj:`add_loss_distributed " +"`\\ \\(server\\_round\\, " +"loss\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_loss_distributed:1 of +msgid "Add one loss entry (from distributed evaluation)." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.history.History.add_loss_centralized:1::1 of msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +":py:obj:`add_metrics_centralized " +"`\\ \\(server\\_round\\, " +"metrics\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.krum.Krum.aggregate_fit:1 of +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_centralized:1 of #, fuzzy -msgid "Aggregate fit results using Krum." -msgstr "Résultats globaux de l'évaluation." +msgid "Add metrics entries (from centralized evaluation)." +msgstr "Évaluation centralisée" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.history.History.add_loss_centralized:1::1 of msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +":py:obj:`add_metrics_distributed " +"`\\ \\(server\\_round\\, " +"metrics\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_distributed:1 of +msgid "Add metrics entries (from distributed evaluation)." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.history.History.add_loss_centralized:1::1 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`add_metrics_distributed_fit " +"`\\ \\(server\\_round\\," +" ...\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_distributed_fit:1 of +msgid "Add metrics entries (from distributed fit)." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:2 +msgid "LegacyContext" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients `\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.server.compat.legacy_context.LegacyContext:1 of +msgid "Bases: :py:class:`~flwr.common.context.Context`" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:2 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 #, fuzzy -msgid "QFedAvg" -msgstr "DP-FedAvg" +msgid ":py:obj:`config `\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" -msgstr "" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#, fuzzy +msgid ":py:obj:`strategy `\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +msgid ":py:obj:`client_manager `\\" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" -msgstr "" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#, fuzzy +msgid ":py:obj:`history `\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#, fuzzy +msgid ":py:obj:`node_id `\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" -msgstr "" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#, fuzzy +msgid ":py:obj:`node_config `\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#, fuzzy +msgid ":py:obj:`state `\\" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#, fuzzy +msgid ":py:obj:`run_config `\\" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.server.Server.rst:2 +msgid "Server" +msgstr "Serveur" + +#: flwr.server.server.Server.client_manager:1::1 of +msgid ":py:obj:`client_manager `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.server.server.Server.client_manager:1 +#: flwr.server.server.Server.client_manager:1::1 of +msgid "Return ClientManager." msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: flwr.server.server.Server.client_manager:1::1 of msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`disconnect_all_clients " +"`\\ \\(timeout\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:2 -#, fuzzy -msgid "Strategy" -msgstr "stratégie.du.serveur" +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.disconnect_all_clients:1 of +msgid "Send shutdown signal to all clients." +msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: flwr.server.server.Server.client_manager:1::1 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +":py:obj:`evaluate_round `\\ " +"\\(server\\_round\\, timeout\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of -#, fuzzy -msgid "Aggregate evaluation results." -msgstr "Résultats globaux de l'évaluation." +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.evaluate_round:1 of +msgid "Validate current global model on a number of clients." +msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: flwr.server.server.Server.client_manager:1::1 of +msgid ":py:obj:`fit `\\ \\(num\\_rounds\\, timeout\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:1 of -#, fuzzy -msgid "Aggregate training results." -msgstr "Résultats globaux de l'évaluation." +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.fit:1 of +msgid "Run federated averaging for a number of rounds." +msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: flwr.server.server.Server.client_manager:1::1 of msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`fit_round `\\ \\(server\\_round\\," +" timeout\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.fit_round:1 of +msgid "Perform a single round of federated averaging." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: flwr.server.server.Server.client_manager:1::1 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`set_max_workers `\\ " +"\\(max\\_workers\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.evaluate:1 of -#, fuzzy -msgid "Evaluate the current model parameters." -msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.set_max_workers:1 of +msgid "Set the max_workers used by ThreadPoolExecutor." +msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: flwr.server.server.Server.client_manager:1::1 of +msgid ":py:obj:`set_strategy `\\ \\(strategy\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.initialize_parameters:1 of +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.set_strategy:1 of #, fuzzy -msgid "Initialize the (global) model parameters." -msgstr "Initialise le modèle global" +msgid "Replace server strategy." +msgstr "stratégie.du.serveur" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:5 of -msgid "" -"Successful updates from the previously selected and configured clients. " -"Each pair of `(ClientProxy, FitRes` constitutes a successful update from " -"one of the previously selected clients. Not that not all previously " -"selected clients are necessarily included in this list: a client might " -"drop out and not submit a result. For each client that did not submit an " -"update, there should be an `Exception` in `failures`." -msgstr "" +#: ../../source/ref-api/flwr.server.ServerApp.rst:2 +#, fuzzy +msgid "ServerApp" +msgstr "serveur" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:13 -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:13 of -msgid "Exceptions that occurred while the server was waiting for client updates." +#: flwr.server.server_app.ServerApp:5 of +#, fuzzy +msgid "Use the `ServerApp` with an existing `Strategy`:" +msgstr "Utilise une stratégie existante" + +#: flwr.server.server_app.ServerApp:17 of +msgid "Use the `ServerApp` with a custom main function:" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:16 of -msgid "" -"**aggregation_result** -- The aggregated evaluation result. Aggregation " -"typically uses some variant of a weighted average." +#: flwr.server.server_app.ServerApp.main:1::1 of +#, fuzzy +msgid ":py:obj:`main `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" + +#: flwr.server.server_app.ServerApp.main:1 +#: flwr.server.server_app.ServerApp.main:1::1 of +msgid "Return a decorator that registers the main fn with the server app." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:5 of +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:2 +#, fuzzy +msgid "ServerAppComponents" +msgstr "serveur" + +#: flwr.server.serverapp_components.ServerAppComponents:3 of msgid "" -"Successful updates from the previously selected and configured clients. " -"Each pair of `(ClientProxy, FitRes)` constitutes a successful update from" -" one of the previously selected clients. Not that not all previously " -"selected clients are necessarily included in this list: a client might " -"drop out and not submit a result. For each client that did not submit an " -"update, there should be an `Exception` in `failures`." +"A server implementation, either `flwr.server.Server` or a subclass " +"thereof. If no instance is provided, one will be created internally." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:17 of +#: flwr.server.app.start_server:9 +#: flwr.server.serverapp_components.ServerAppComponents:6 of msgid "" -"**parameters** -- If parameters are returned, then the server will treat " -"these as the new global model parameters (i.e., it will replace the " -"previous parameters with the ones returned from this method). If `None` " -"is returned (e.g., because there were only failures and no viable " -"results) then the server will no update the previous model parameters, " -"the updates received in this round are discarded, and the global model " -"parameters remain the same." +"Currently supported values are `num_rounds` (int, default: 1) and " +"`round_timeout` in seconds (float, default: None)." msgstr "" -#: flwr.server.strategy.strategy.Strategy.evaluate:3 of +#: flwr.server.serverapp_components.ServerAppComponents:9 of +#, fuzzy msgid "" -"This function can be used to perform centralized (i.e., server-side) " -"evaluation of model parameters." +"An implementation of the abstract base class " +"`flwr.server.strategy.Strategy`. If no strategy is provided, then " +"`flwr.server.strategy.FedAvg` will be used." msgstr "" +"Déclasser `flwr.server.strategy.DefaultStrategy` (migrer vers " +"`flwr.server.strategy.FedAvg`, qui est équivalent)" -#: flwr.server.strategy.strategy.Strategy.evaluate:11 of +#: flwr.server.serverapp_components.ServerAppComponents:13 of msgid "" -"**evaluation_result** -- The evaluation result, usually a Tuple " -"containing loss and a dictionary containing task-specific metrics (e.g., " -"accuracy)." +"An implementation of the class `flwr.server.ClientManager`. If no " +"implementation is provided, then `flwr.server.SimpleClientManager` will " +"be used." msgstr "" -#: flwr.server.strategy.strategy.Strategy.initialize_parameters:6 of +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 +#, fuzzy msgid "" -"**parameters** -- If parameters are returned, then the server will treat " -"these as the initial global model parameters." -msgstr "" +":py:obj:`client_manager " +"`\\" +msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.workflow.rst:2 +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 #, fuzzy -msgid "workflow" -msgstr "Flux de travail" +msgid ":py:obj:`config `\\" +msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 +#, fuzzy +msgid ":py:obj:`server `\\" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 +#, fuzzy +msgid ":py:obj:`strategy `\\" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.server.ServerConfig.rst:2 +#, fuzzy +msgid "ServerConfig" +msgstr "serveur" + +#: flwr.server.server_config.ServerConfig:3 of msgid "" -":py:obj:`DefaultWorkflow `\\ " -"\\(\\[fit\\_workflow\\, ...\\]\\)" +"All attributes have default values which allows users to configure just " +"the ones they care about." msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 -#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 of -msgid "Default workflow in Flower." +#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 +msgid ":py:obj:`num_rounds `\\" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 -msgid "" -":py:obj:`SecAggPlusWorkflow `\\ " -"\\(num\\_shares\\, ...\\[\\, ...\\]\\)" +#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 +msgid ":py:obj:`round_timeout `\\" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 -#: of -msgid "The workflow for the SecAgg+ protocol." +#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:2 +msgid "SimpleClientManager" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 -msgid "" -":py:obj:`SecAggWorkflow `\\ " -"\\(reconstruction\\_threshold\\, \\*\\)" +#: flwr.server.client_manager.SimpleClientManager:1 of +msgid "Bases: :py:class:`~flwr.server.client_manager.ClientManager`" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of -msgid "The workflow for the SecAgg protocol." +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid ":py:obj:`all `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:2 -#, fuzzy -msgid "DefaultWorkflow" -msgstr "Flux de travail" +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid "" +":py:obj:`num_available `\\" +" \\(\\)" +msgstr "" -#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:2 -#, fuzzy -msgid "SecAggPlusWorkflow" -msgstr "Flux de travail" - -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:3 -#: of +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of msgid "" -"The SecAgg+ protocol ensures the secure summation of integer vectors " -"owned by multiple parties, without accessing any individual integer " -"vector. This workflow allows the server to compute the weighted average " -"of model parameters across all clients, ensuring individual contributions" -" remain private. This is achieved by clients sending both, a weighting " -"factor and a weighted version of the locally updated parameters, both of " -"which are masked for privacy. Specifically, each client uploads \"[w, w *" -" params]\" with masks, where weighting factor 'w' is the number of " -"examples ('num_examples') and 'params' represents the model parameters " -"('parameters') from the client's `FitRes`. The server then aggregates " -"these contributions to compute the weighted average of model parameters." +":py:obj:`register `\\ " +"\\(client\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:14 -#: of +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of msgid "" -"The protocol involves four main stages: - 'setup': Send SecAgg+ " -"configuration to clients and collect their public keys. - 'share keys': " -"Broadcast public keys among clients and collect encrypted secret" +":py:obj:`sample `\\ " +"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:17 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:17 -#: of -msgid "key shares." +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid "" +":py:obj:`unregister `\\ " +"\\(client\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:18 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:18 -#: of +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of msgid "" -"'collect masked vectors': Forward encrypted secret key shares to target " -"clients and collect masked model parameters." +":py:obj:`wait_for `\\ " +"\\(num\\_clients\\[\\, timeout\\]\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:20 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:20 -#: of +#: flwr.server.client_manager.SimpleClientManager.wait_for:3 of msgid "" -"'unmask': Collect secret key shares to decrypt and aggregate the model " -"parameters." +"Blocks until the requested number of clients is available or until a " +"timeout is reached. Current timeout default: 1 day." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:22 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:22 -#: of -msgid "" -"Only the aggregated model parameters are exposed and passed to " -"`Strategy.aggregate_fit`, ensuring individual data privacy." +#: flwr.server.client_manager.SimpleClientManager.wait_for:6 of +msgid "The number of clients to wait for." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:25 -#: of -msgid "" -"The number of shares into which each client's private key is split under " -"the SecAgg+ protocol. If specified as a float, it represents the " -"proportion of all selected clients, and the number of shares will be set " -"dynamically in the run time. A private key can be reconstructed from " -"these shares, allowing for the secure aggregation of model updates. Each " -"client sends one share to each of its neighbors while retaining one." +#: flwr.server.client_manager.SimpleClientManager.wait_for:8 of +msgid "The time in seconds to wait for, defaults to 86400 (24h)." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:25 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:32 -#: of -msgid "" -"The minimum number of shares required to reconstruct a client's private " -"key, or, if specified as a float, it represents the proportion of the " -"total number of shares needed for reconstruction. This threshold ensures " -"privacy by allowing for the recovery of contributions from dropped " -"clients during aggregation, without compromising individual client data." +#: flwr.server.client_manager.SimpleClientManager.wait_for:11 of +msgid "**success**" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:31 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:38 -#: of -msgid "" -"The maximum value of the weight that can be assigned to any single " -"client's update during the weighted average calculation on the server " -"side, e.g., in the FedAvg algorithm." +#: ../../source/ref-api/flwr.server.start_server.rst:2 +#, fuzzy +msgid "start\\_server" +msgstr "serveur.start_server" + +#: flwr.server.app.start_server:3 of +msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:35 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:42 -#: of +#: flwr.server.app.start_server:5 of msgid "" -"The range within which model parameters are clipped before quantization. " -"This parameter ensures each model parameter is bounded within " -"[-clipping_range, clipping_range], facilitating quantization." +"A server implementation, either `flwr.server.Server` or a subclass " +"thereof. If no instance is provided, then `start_server` will create one." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:39 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:46 -#: of +#: flwr.server.app.start_server:12 of msgid "" -"The size of the range into which floating-point model parameters are " -"quantized, mapping each parameter to an integer in [0, " -"quantization_range-1]. This facilitates cryptographic operations on the " -"model updates." +"An implementation of the abstract base class " +"`flwr.server.strategy.Strategy`. If no strategy is provided, then " +"`start_server` will use `flwr.server.strategy.FedAvg`." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:43 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:50 -#: of +#: flwr.server.app.start_server:16 of msgid "" -"The range of values from which random mask entries are uniformly sampled " -"([0, modulus_range-1]). `modulus_range` must be less than 4294967296. " -"Please use 2**n values for `modulus_range` to prevent overflow issues." +"An implementation of the abstract base class `flwr.server.ClientManager`." +" If no implementation is provided, then `start_server` will use " +"`flwr.server.client_manager.SimpleClientManager`." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:47 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:54 -#: of +#: flwr.server.app.start_server:21 of msgid "" -"The timeout duration in seconds. If specified, the workflow will wait for" -" replies for this duration each time. If `None`, there is no time limit " -"and the workflow will wait until replies for all messages are received." +"The maximum length of gRPC messages that can be exchanged with the Flower" +" clients. The default should be sufficient for most models. Users who " +"train very large models might need to increase this value. Note that the " +"Flower clients need to be started with the same value (see " +"`flwr.client.start_client`), otherwise clients will not know about the " +"increased limit and block larger messages." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:61 -#: of +#: flwr.server.app.start_server:28 of msgid "" -"Generally, higher `num_shares` means more robust to dropouts while " -"increasing the computational costs; higher `reconstruction_threshold` " -"means better privacy guarantees but less tolerance to dropouts." +"Tuple containing root certificate, server certificate, and private key to" +" start a secure SSL-enabled server. The tuple is expected to have three " +"bytes elements in the following order: * CA certificate. * " +"server certificate. * server private key." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:58 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:64 -#: of -msgid "Too large `max_weight` may compromise the precision of the quantization." +#: flwr.server.app.start_server:28 of +msgid "" +"Tuple containing root certificate, server certificate, and private key to" +" start a secure SSL-enabled server. The tuple is expected to have three " +"bytes elements in the following order:" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:59 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:65 -#: of -msgid "`modulus_range` must be 2**n and larger than `quantization_range`." +#: flwr.server.app.start_server:32 of +#, fuzzy +msgid "CA certificate." +msgstr "Certificats" + +#: flwr.server.app.start_server:33 of +#, fuzzy +msgid "server certificate." +msgstr "Certificats" + +#: flwr.server.app.start_server:34 of +#, fuzzy +msgid "server private key." +msgstr "stratégie.du.serveur" + +#: flwr.server.app.start_server:37 of +msgid "**hist** -- Object containing training and evaluation metrics." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:66 -#: of +#: flwr.server.app.start_server:42 of +#, fuzzy +msgid "Starting an insecure server:" +msgstr "Démarrer le serveur" + +#: flwr.server.app.start_server:46 of +#, fuzzy +msgid "Starting an SSL-enabled server:" +msgstr "Démarrer le serveur" + +#: ../../source/ref-api/flwr.server.strategy.rst:2 +#, fuzzy +msgid "strategy" +msgstr "stratégie.du.serveur" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"When `num_shares` is a float, it is interpreted as the proportion of all " -"selected clients, and hence the number of shares will be determined in " -"the runtime. This allows for dynamic adjustment based on the total number" -" of participating clients." +":py:obj:`Bulyan `\\ \\(\\*\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:69 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.bulyan.Bulyan:1 of +#, fuzzy +msgid "Bulyan strategy." +msgstr "Stratégies intégrées" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"Similarly, when `reconstruction_threshold` is a float, it is interpreted " -"as the proportion of the number of shares needed for the reconstruction " -"of a private key. This feature enables flexibility in setting the " -"security threshold relative to the number of distributed shares." +":py:obj:`DPFedAvgAdaptive `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:73 -#: of -msgid "" -"`num_shares`, `reconstruction_threshold`, and the quantization parameters" -" (`clipping_range`, `quantization_range`, `modulus_range`) play critical " -"roles in balancing privacy, robustness, and efficiency within the SecAgg+" -" protocol." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of +msgid "Wrapper for configuring a Strategy for DP with Adaptive Clipping." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`collect_masked_vectors_stage " -"`\\" -" \\(driver\\, ...\\)" +":py:obj:`DPFedAvgFixed `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of -msgid "Execute the 'collect masked vectors' stage." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 of +msgid "Wrapper for configuring a Strategy for DP with Fixed Clipping." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`setup_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +":py:obj:`DifferentialPrivacyClientSideAdaptiveClipping " +"`\\ " +"\\(...\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.setup_stage:1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 #: of -msgid "Execute the 'setup' stage." +msgid "Strategy wrapper for central DP with client-side adaptive clipping." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`share_keys_stage " -"`\\ " -"\\(driver\\, context\\, state\\)" +":py:obj:`DifferentialPrivacyClientSideFixedClipping " +"`\\ " +"\\(...\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.share_keys_stage:1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 #: of -msgid "Execute the 'share keys' stage." +msgid "Strategy wrapper for central DP with client-side fixed clipping." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`unmask_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +":py:obj:`DifferentialPrivacyServerSideAdaptiveClipping " +"`\\ " +"\\(...\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.unmask_stage:1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 #: of -msgid "Execute the 'unmask' stage." +msgid "Strategy wrapper for central DP with server-side adaptive clipping." msgstr "" -#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:2 -#, fuzzy -msgid "SecAggWorkflow" -msgstr "Flux de travail" - -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"Bases: " -":py:class:`~flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow`" +":py:obj:`DifferentialPrivacyServerSideFixedClipping " +"`\\ " +"\\(...\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:3 of -msgid "" -"The SecAgg protocol ensures the secure summation of integer vectors owned" -" by multiple parties, without accessing any individual integer vector. " -"This workflow allows the server to compute the weighted average of model " -"parameters across all clients, ensuring individual contributions remain " -"private. This is achieved by clients sending both, a weighting factor and" -" a weighted version of the locally updated parameters, both of which are " -"masked for privacy. Specifically, each client uploads \"[w, w * params]\"" -" with masks, where weighting factor 'w' is the number of examples " -"('num_examples') and 'params' represents the model parameters " -"('parameters') from the client's `FitRes`. The server then aggregates " -"these contributions to compute the weighted average of model parameters." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 +#: of +msgid "Strategy wrapper for central DP with server-side fixed clipping." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:14 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"The protocol involves four main stages: - 'setup': Send SecAgg " -"configuration to clients and collect their public keys. - 'share keys': " -"Broadcast public keys among clients and collect encrypted secret" +":py:obj:`FaultTolerantFedAvg " +"`\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:54 of -msgid "" -"Each client's private key is split into N shares under the SecAgg " -"protocol, where N is the number of selected clients." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 of +msgid "Configurable fault-tolerant FedAvg strategy implementation." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:56 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"Generally, higher `reconstruction_threshold` means better privacy " -"guarantees but less tolerance to dropouts." +":py:obj:`FedAdagrad `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:60 of -msgid "" -"When `reconstruction_threshold` is a float, it is interpreted as the " -"proportion of the number of all selected clients needed for the " -"reconstruction of a private key. This feature enables flexibility in " -"setting the security threshold relative to the number of selected " -"clients." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedadagrad.FedAdagrad:1 of +#, fuzzy +msgid "FedAdagrad strategy - Adaptive Federated Optimization using Adagrad." msgstr "" +"`FedAdam` et `FedAdam` correspondent à la dernière version de l'article " +"sur l'optimisation fédérée adaptative." -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:64 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"`reconstruction_threshold`, and the quantization parameters " -"(`clipping_range`, `quantization_range`, `modulus_range`) play critical " -"roles in balancing privacy, robustness, and efficiency within the SecAgg " -"protocol." +":py:obj:`FedAdam `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of -msgid "" -":py:obj:`collect_masked_vectors_stage " -"`\\ " -"\\(driver\\, ...\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedadam.FedAdam:1 of +msgid "FedAdam - Adaptive Federated Optimization using Adam." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`setup_stage `\\" -" \\(driver\\, context\\, state\\)" +":py:obj:`FedAvg `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedavg.FedAvg:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of +#, fuzzy +msgid "Federated Averaging strategy." +msgstr "Stratégie de moyenne fédérée." + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`share_keys_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +":py:obj:`FedAvgAndroid `\\ " +"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`unmask_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +":py:obj:`FedAvgM `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:2 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedavgm.FedAvgM:1 of #, fuzzy -msgid "simulation" -msgstr "Simulation de moniteur" +msgid "Federated Averaging with Momentum strategy." +msgstr "Stratégie de moyenne fédérée." -#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`start_simulation `\\ \\(\\*\\," -" client\\_fn\\[\\, ...\\]\\)" +":py:obj:`FedMedian `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:18::1 -#: flwr.simulation.app.start_simulation:1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedmedian.FedMedian:1 of #, fuzzy -msgid "Start a Ray-based Flower simulation server." -msgstr "Simulation de moniteur" +msgid "Configurable FedMedian strategy implementation." +msgstr "Configuration de l'évaluation fédérée" -#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`run_simulation `\\ " -"\\(server\\_app\\, client\\_app\\, ...\\)" -msgstr "" - -#: ../../source/ref-api/flwr.simulation.rst:18::1 -#: flwr.simulation.run_simulation.run_simulation:1 of -msgid "Run a Flower App using the Simulation Engine." +":py:obj:`FedOpt `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.simulation.run_simulation.rst:2 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedopt.FedOpt:1 of #, fuzzy -msgid "run\\_simulation" -msgstr "Simulation de moniteur" +msgid "Federated Optim strategy." +msgstr "Stratégie de moyenne fédérée." -#: flwr.simulation.run_simulation.run_simulation:3 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"The `ServerApp` to be executed. It will send messages to different " -"`ClientApp` instances running on different (virtual) SuperNodes." +":py:obj:`FedProx `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: flwr.simulation.run_simulation.run_simulation:6 of -msgid "" -"The `ClientApp` to be executed by each of the SuperNodes. It will receive" -" messages sent by the `ServerApp`." -msgstr "" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedprox.FedProx:1 of +#, fuzzy +msgid "Federated Optimization strategy." +msgstr "Stratégie de moyenne fédérée." -#: flwr.simulation.run_simulation.run_simulation:9 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"Number of nodes that run a ClientApp. They can be sampled by a Driver in " -"the ServerApp and receive a Message describing what the ClientApp should " -"perform." +":py:obj:`FedTrimmedAvg `\\ " +"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" msgstr "" -#: flwr.simulation.run_simulation.run_simulation:13 of -msgid "A simulation backend that runs `ClientApp`s." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 of +msgid "Federated Averaging with Trimmed Mean [Dong Yin, et al., 2021]." msgstr "" -#: flwr.simulation.run_simulation.run_simulation:15 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"'A dictionary, e.g {\"\": , \"\": } to " -"configure a backend. Values supported in are those included by " -"`flwr.common.typing.ConfigsRecordValues`." +":py:obj:`FedXgbBagging `\\ " +"\\(\\[evaluate\\_function\\]\\)" msgstr "" -#: flwr.simulation.run_simulation.run_simulation:19 of -msgid "" -"A boolean to indicate whether to enable GPU growth on the main thread. " -"This is desirable if you make use of a TensorFlow model on your " -"`ServerApp` while having your `ClientApp` running on the same GPU. " -"Without enabling this, you might encounter an out-of-memory error because" -" TensorFlow, by default, allocates all GPU memory. Read more about how " -"`tf.config.experimental.set_memory_growth()` works in the TensorFlow " -"documentation: https://www.tensorflow.org/api/stable." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 of +msgid "Configurable FedXgbBagging strategy implementation." msgstr "" -#: flwr.simulation.run_simulation.run_simulation:26 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"When diabled, only INFO, WARNING and ERROR log messages will be shown. If" -" enabled, DEBUG-level logs will be displayed." +":py:obj:`FedXgbCyclic `\\ " +"\\(\\*\\*kwargs\\)" msgstr "" -#: ../../source/ref-api/flwr.simulation.start_simulation.rst:2 -#, fuzzy -msgid "start\\_simulation" -msgstr "démarrer_simulation" - -#: flwr.simulation.app.start_simulation:3 of -msgid "" -"A function creating client instances. The function must take a single " -"`str` argument called `cid`. It should return a single client instance of" -" type Client. Note that the created client instances are ephemeral and " -"will often be destroyed after a single method invocation. Since client " -"instances are not long-lived, they should not attempt to carry state over" -" method invocations. Any state required by the instance (model, dataset, " -"hyperparameters, ...) should be (re-)created in either the call to " -"`client_fn` or the call to any of the client methods (e.g., load " -"evaluation data in the `evaluate` method itself)." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 of +msgid "Configurable FedXgbCyclic strategy implementation." msgstr "" -#: flwr.simulation.app.start_simulation:13 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"The total number of clients in this simulation. This must be set if " -"`clients_ids` is not set and vice-versa." +":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " +"\\*\\*kwargs\\)" msgstr "" -#: flwr.simulation.app.start_simulation:16 of -msgid "" -"List `client_id`s for each client. This is only required if `num_clients`" -" is not set. Setting both `num_clients` and `clients_ids` with " -"`len(clients_ids)` not equal to `num_clients` generates an error." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 of +msgid "Configurable FedXgbNnAvg strategy implementation." msgstr "" -#: flwr.simulation.app.start_simulation:20 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"CPU and GPU resources for a single client. Supported keys are `num_cpus` " -"and `num_gpus`. To understand the GPU utilization caused by `num_gpus`, " -"as well as using custom resources, please consult the Ray documentation." +":py:obj:`FedYogi `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: flwr.simulation.app.start_simulation:25 of -msgid "" -"An implementation of the abstract base class `flwr.server.Server`. If no " -"instance is provided, then `start_server` will create one." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedyogi.FedYogi:1 of +msgid "FedYogi [Reddi et al., 2020] strategy." msgstr "" -#: flwr.simulation.app.start_simulation:31 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"An implementation of the abstract base class `flwr.server.Strategy`. If " -"no strategy is provided, then `start_server` will use " -"`flwr.server.strategy.FedAvg`." +":py:obj:`Krum `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -#: flwr.simulation.app.start_simulation:35 of -msgid "" -"An implementation of the abstract base class `flwr.server.ClientManager`." -" If no implementation is provided, then `start_simulation` will use " -"`flwr.server.client_manager.SimpleClientManager`." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.krum.Krum:1 of +msgid "Krum [Blanchard et al., 2017] strategy." msgstr "" -#: flwr.simulation.app.start_simulation:39 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"Optional dictionary containing arguments for the call to `ray.init`. If " -"ray_init_args is None (the default), Ray will be initialized with the " -"following default args: { \"ignore_reinit_error\": True, " -"\"include_dashboard\": False } An empty dictionary can be used " -"(ray_init_args={}) to prevent any arguments from being passed to " -"ray.init." +":py:obj:`QFedAvg `\\ \\(\\*\\[\\, " +"q\\_param\\, qffl\\_learning\\_rate\\, ...\\]\\)" msgstr "" -#: flwr.simulation.app.start_simulation:39 of -msgid "" -"Optional dictionary containing arguments for the call to `ray.init`. If " -"ray_init_args is None (the default), Ray will be initialized with the " -"following default args:" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.qfedavg.QFedAvg:1 of +msgid "Configurable QFedAvg strategy implementation." msgstr "" -#: flwr.simulation.app.start_simulation:43 of -msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" -msgstr "" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#, fuzzy +msgid ":py:obj:`Strategy `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.simulation.app.start_simulation:45 of -msgid "" -"An empty dictionary can be used (ray_init_args={}) to prevent any " -"arguments from being passed to ray.init." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.strategy.Strategy:1 of +msgid "Abstract base class for server strategy implementations." msgstr "" -#: flwr.simulation.app.start_simulation:48 of -msgid "" -"Set to True to prevent `ray.shutdown()` in case " -"`ray.is_initialized()=True`." +#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:2 +msgid "Bulyan" msgstr "" -#: flwr.simulation.app.start_simulation:50 of -msgid "" -"Optionally specify the type of actor to use. The actor object, which " -"persists throughout the simulation, will be the process in charge of " -"executing a ClientApp wrapping input argument `client_fn`." +#: flwr.server.strategy.bulyan.Bulyan:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 +#: flwr.server.strategy.fedavgm.FedAvgM:1 +#: flwr.server.strategy.fedmedian.FedMedian:1 +#: flwr.server.strategy.fedopt.FedOpt:1 flwr.server.strategy.fedprox.FedProx:1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 +#: flwr.server.strategy.krum.Krum:1 flwr.server.strategy.qfedavg.QFedAvg:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.fedavg.FedAvg`" msgstr "" -#: flwr.simulation.app.start_simulation:54 of -msgid "" -"If you want to create your own Actor classes, you might need to pass some" -" input argument. You can use this dictionary for such purpose." +#: flwr.server.strategy.bulyan.Bulyan:3 of +msgid "Implementation based on https://arxiv.org/abs/1802.07927." msgstr "" -#: flwr.simulation.app.start_simulation:57 of -msgid "" -"(default: \"DEFAULT\") Optional string (\"DEFAULT\" or \"SPREAD\") for " -"the VCE to choose in which node the actor is placed. If you are an " -"advanced user needed more control you can use lower-level scheduling " -"strategies to pin actors to specific compute nodes (e.g. via " -"NodeAffinitySchedulingStrategy). Please note this is an advanced feature." -" For all details, please refer to the Ray documentation: " -"https://docs.ray.io/en/latest/ray-core/scheduling/index.html" -msgstr "" +#: flwr.server.strategy.bulyan.Bulyan:5 +#: flwr.server.strategy.fedadagrad.FedAdagrad:5 +#: flwr.server.strategy.fedadam.FedAdam:5 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:5 +#: flwr.server.strategy.fedavgm.FedAvgM:5 flwr.server.strategy.fedopt.FedOpt:5 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:5 +#: flwr.server.strategy.fedyogi.FedYogi:5 flwr.server.strategy.krum.Krum:5 of +msgid "Fraction of clients used during training. Defaults to 1.0." +msgstr "" -#: flwr.simulation.app.start_simulation:66 of -msgid "**hist** -- Object containing metrics from training." +#: flwr.server.strategy.bulyan.Bulyan:7 +#: flwr.server.strategy.fedadagrad.FedAdagrad:7 +#: flwr.server.strategy.fedadam.FedAdam:7 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:7 +#: flwr.server.strategy.fedavgm.FedAvgM:7 flwr.server.strategy.fedopt.FedOpt:7 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:7 +#: flwr.server.strategy.fedyogi.FedYogi:7 flwr.server.strategy.krum.Krum:7 of +msgid "Fraction of clients used during validation. Defaults to 1.0." msgstr "" -#: ../../source/ref-changelog.md:1 -msgid "Changelog" -msgstr "Changelog" +#: flwr.server.strategy.bulyan.Bulyan:9 +#: flwr.server.strategy.fedadagrad.FedAdagrad:9 +#: flwr.server.strategy.fedadam.FedAdam:9 flwr.server.strategy.fedavg.FedAvg:13 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:9 +#: flwr.server.strategy.fedavgm.FedAvgM:9 flwr.server.strategy.fedopt.FedOpt:9 +#: flwr.server.strategy.fedprox.FedProx:45 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:9 +#: flwr.server.strategy.fedyogi.FedYogi:9 flwr.server.strategy.krum.Krum:9 of +msgid "Minimum number of clients used during training. Defaults to 2." +msgstr "" -#: ../../source/ref-changelog.md:3 -#, fuzzy -msgid "Unreleased" -msgstr "Inédit" +#: flwr.server.strategy.bulyan.Bulyan:11 +#: flwr.server.strategy.fedadagrad.FedAdagrad:11 +#: flwr.server.strategy.fedadam.FedAdam:11 +#: flwr.server.strategy.fedavg.FedAvg:15 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:11 +#: flwr.server.strategy.fedavgm.FedAvgM:11 +#: flwr.server.strategy.fedopt.FedOpt:11 +#: flwr.server.strategy.fedprox.FedProx:47 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:11 +#: flwr.server.strategy.fedyogi.FedYogi:11 flwr.server.strategy.krum.Krum:11 of +msgid "Minimum number of clients used during validation. Defaults to 2." +msgstr "" -#: ../../source/ref-changelog.md:5 ../../source/ref-changelog.md:19 -#: ../../source/ref-changelog.md:83 ../../source/ref-changelog.md:176 -#: ../../source/ref-changelog.md:276 ../../source/ref-changelog.md:360 -#: ../../source/ref-changelog.md:424 ../../source/ref-changelog.md:482 -#: ../../source/ref-changelog.md:551 ../../source/ref-changelog.md:680 -#: ../../source/ref-changelog.md:722 ../../source/ref-changelog.md:789 -#: ../../source/ref-changelog.md:855 ../../source/ref-changelog.md:900 -#: ../../source/ref-changelog.md:939 ../../source/ref-changelog.md:972 -#: ../../source/ref-changelog.md:1022 -msgid "What's new?" -msgstr "Quoi de neuf ?" +#: flwr.server.strategy.bulyan.Bulyan:13 +#: flwr.server.strategy.fedadagrad.FedAdagrad:13 +#: flwr.server.strategy.fedadam.FedAdam:13 +#: flwr.server.strategy.fedavg.FedAvg:17 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:13 +#: flwr.server.strategy.fedavgm.FedAvgM:13 +#: flwr.server.strategy.fedopt.FedOpt:13 +#: flwr.server.strategy.fedprox.FedProx:49 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:13 +#: flwr.server.strategy.fedyogi.FedYogi:13 flwr.server.strategy.krum.Krum:13 of +msgid "Minimum number of total clients in the system. Defaults to 2." +msgstr "" -#: ../../source/ref-changelog.md:7 ../../source/ref-changelog.md:71 -#: ../../source/ref-changelog.md:146 ../../source/ref-changelog.md:258 -#: ../../source/ref-changelog.md:348 ../../source/ref-changelog.md:412 -#: ../../source/ref-changelog.md:470 ../../source/ref-changelog.md:539 -#: ../../source/ref-changelog.md:601 ../../source/ref-changelog.md:620 -#: ../../source/ref-changelog.md:776 ../../source/ref-changelog.md:847 -#: ../../source/ref-changelog.md:884 ../../source/ref-changelog.md:927 -msgid "Incompatible changes" -msgstr "Changements incompatibles" +#: flwr.server.strategy.bulyan.Bulyan:15 flwr.server.strategy.krum.Krum:15 of +msgid "Number of malicious clients in the system. Defaults to 0." +msgstr "" -#: ../../source/ref-changelog.md:9 ../../source/ref-changelog.md:73 -#: ../../source/ref-changelog.md:350 ../../source/ref-changelog.md:414 -#: ../../source/ref-changelog.md:472 ../../source/ref-changelog.md:541 -#: ../../source/ref-changelog.md:603 -msgid "None" -msgstr "Aucun" +#: flwr.server.strategy.bulyan.Bulyan:17 +#: flwr.server.strategy.fedadagrad.FedAdagrad:15 +#: flwr.server.strategy.fedadam.FedAdam:15 +#: flwr.server.strategy.fedavg.FedAvg:19 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:15 +#: flwr.server.strategy.fedavgm.FedAvgM:15 +#: flwr.server.strategy.fedopt.FedOpt:15 +#: flwr.server.strategy.fedprox.FedProx:51 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:15 +#: flwr.server.strategy.fedyogi.FedYogi:17 +#: flwr.server.strategy.fedyogi.FedYogi:18 +#: flwr.server.strategy.fedyogi.FedYogi:19 flwr.server.strategy.krum.Krum:20 of +msgid "Optional function used for validation. Defaults to None." +msgstr "" -#: ../../source/ref-changelog.md:11 -#, fuzzy -msgid "v1.8.0 (2024-04-03)" -msgstr "v1.3.0 (2023-02-06)" +#: flwr.server.strategy.bulyan.Bulyan:19 +#: flwr.server.strategy.fedadagrad.FedAdagrad:17 +#: flwr.server.strategy.fedadam.FedAdam:17 +#: flwr.server.strategy.fedavg.FedAvg:21 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:17 +#: flwr.server.strategy.fedavgm.FedAvgM:17 +#: flwr.server.strategy.fedopt.FedOpt:17 +#: flwr.server.strategy.fedprox.FedProx:53 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:17 +#: flwr.server.strategy.fedyogi.FedYogi:20 flwr.server.strategy.krum.Krum:22 of +msgid "Function used to configure training. Defaults to None." +msgstr "" -#: ../../source/ref-changelog.md:13 ../../source/ref-changelog.md:77 -#: ../../source/ref-changelog.md:170 ../../source/ref-changelog.md:270 -#: ../../source/ref-changelog.md:354 ../../source/ref-changelog.md:418 -#: ../../source/ref-changelog.md:476 ../../source/ref-changelog.md:545 -#: ../../source/ref-changelog.md:614 -msgid "Thanks to our contributors" -msgstr "Merci à nos contributeurs" +#: flwr.server.strategy.bulyan.Bulyan:21 +#: flwr.server.strategy.fedadagrad.FedAdagrad:19 +#: flwr.server.strategy.fedadam.FedAdam:19 +#: flwr.server.strategy.fedavg.FedAvg:23 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:19 +#: flwr.server.strategy.fedavgm.FedAvgM:19 +#: flwr.server.strategy.fedopt.FedOpt:19 +#: flwr.server.strategy.fedprox.FedProx:55 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:19 +#: flwr.server.strategy.fedyogi.FedYogi:22 flwr.server.strategy.krum.Krum:24 of +msgid "Function used to configure validation. Defaults to None." +msgstr "" -#: ../../source/ref-changelog.md:15 ../../source/ref-changelog.md:79 -#: ../../source/ref-changelog.md:172 ../../source/ref-changelog.md:272 -#: ../../source/ref-changelog.md:356 ../../source/ref-changelog.md:420 -#: ../../source/ref-changelog.md:478 -msgid "" -"We would like to give our special thanks to all the contributors who made" -" the new version of Flower possible (in `git shortlog` order):" +#: flwr.server.strategy.bulyan.Bulyan:23 +#: flwr.server.strategy.fedadagrad.FedAdagrad:25 +#: flwr.server.strategy.fedadam.FedAdam:21 +#: flwr.server.strategy.fedavg.FedAvg:25 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:21 +#: flwr.server.strategy.fedavgm.FedAvgM:21 +#: flwr.server.strategy.fedopt.FedOpt:21 +#: flwr.server.strategy.fedprox.FedProx:57 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:21 +#: flwr.server.strategy.fedyogi.FedYogi:24 flwr.server.strategy.krum.Krum:26 of +msgid "Whether or not accept rounds containing failures. Defaults to True." msgstr "" -"Nous tenons à remercier tout particulièrement tous les contributeurs qui " -"ont rendu possible la nouvelle version de Flower (dans l'ordre `git " -"shortlog`) :" -#: ../../source/ref-changelog.md:17 +#: flwr.server.strategy.bulyan.Bulyan:25 +#: flwr.server.strategy.fedadagrad.FedAdagrad:27 +#: flwr.server.strategy.fedadam.FedAdam:23 +#: flwr.server.strategy.fedavg.FedAvg:27 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:24 +#: flwr.server.strategy.fedavgm.FedAvgM:23 +#: flwr.server.strategy.fedopt.FedOpt:23 +#: flwr.server.strategy.fedprox.FedProx:59 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:23 +#: flwr.server.strategy.fedyogi.FedYogi:26 flwr.server.strategy.krum.Krum:28 of +#, fuzzy +msgid "Initial global model parameters." +msgstr "Initialise le modèle global" + +#: flwr.server.strategy.bulyan.Bulyan:27 of msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata " -"Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, `Ikko Eltociear " -"Ashimine`, `Jack Cook`, `Javier`, `Raj Parekh`, `Robert Steiner`, " -"`Sebastian van der Voort`, `Taner Topal`, `Yan Gao`, `mohammadnaseri`, " -"`tabdar-khan` " +"Byzantine resilient aggregation rule that is used as the first step of " +"the Bulyan (e.g., Krum)" msgstr "" -#: ../../source/ref-changelog.md:21 -msgid "" -"**Introduce Flower Next high-level API (stable)** " -"([#3002](https://github.com/adap/flower/pull/3002), " -"[#2934](https://github.com/adap/flower/pull/2934), " -"[#2958](https://github.com/adap/flower/pull/2958), " -"[#3173](https://github.com/adap/flower/pull/3173), " -"[#3174](https://github.com/adap/flower/pull/3174), " -"[#2923](https://github.com/adap/flower/pull/2923), " -"[#2691](https://github.com/adap/flower/pull/2691), " -"[#3079](https://github.com/adap/flower/pull/3079), " -"[#2961](https://github.com/adap/flower/pull/2961), " -"[#2924](https://github.com/adap/flower/pull/2924), " -"[#3166](https://github.com/adap/flower/pull/3166), " -"[#3031](https://github.com/adap/flower/pull/3031), " -"[#3057](https://github.com/adap/flower/pull/3057), " -"[#3000](https://github.com/adap/flower/pull/3000), " -"[#3113](https://github.com/adap/flower/pull/3113), " -"[#2957](https://github.com/adap/flower/pull/2957), " -"[#3183](https://github.com/adap/flower/pull/3183), " -"[#3180](https://github.com/adap/flower/pull/3180), " -"[#3035](https://github.com/adap/flower/pull/3035), " -"[#3189](https://github.com/adap/flower/pull/3189), " -"[#3185](https://github.com/adap/flower/pull/3185), " -"[#3190](https://github.com/adap/flower/pull/3190), " -"[#3191](https://github.com/adap/flower/pull/3191), " -"[#3195](https://github.com/adap/flower/pull/3195), " -"[#3197](https://github.com/adap/flower/pull/3197))" +#: flwr.server.strategy.bulyan.Bulyan:29 of +msgid "arguments to the first_aggregation rule" msgstr "" -#: ../../source/ref-changelog.md:23 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The Flower Next high-level API is stable! Flower Next is the future of " -"Flower - all new features (like Flower Mods) will be built on top of it. " -"You can start to migrate your existing projects to Flower Next by using " -"`ServerApp` and `ClientApp` (check out `quickstart-pytorch` or " -"`quickstart-tensorflow`, a detailed migration guide will follow shortly)." -" Flower Next allows you to run multiple projects concurrently (we call " -"this multi-run) and execute the same project in either simulation " -"environments or deployment environments without having to change a single" -" line of code. The best part? It's fully compatible with existing Flower " -"projects that use `Strategy`, `NumPyClient` & co." +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:25 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of #, fuzzy -msgid "" -"**Introduce Flower Next low-level API (preview)** " -"([#3062](https://github.com/adap/flower/pull/3062), " -"[#3034](https://github.com/adap/flower/pull/3034), " -"[#3069](https://github.com/adap/flower/pull/3069))" -msgstr "" -"**Mettre à jour les exemples de code** " -"([#1291](https://github.com/adap/flower/pull/1291), " -"[#1286](https://github.com/adap/flower/pull/1286), " -"[#1282](https://github.com/adap/flower/pull/1282))" +msgid "Aggregate evaluation losses using weighted average." +msgstr "Résultats globaux de l'évaluation." -#: ../../source/ref-changelog.md:27 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"In addition to the Flower Next *high-level* API that uses `Strategy`, " -"`NumPyClient` & co, Flower 1.8 also comes with a preview version of the " -"new Flower Next *low-level* API. The low-level API allows for granular " -"control of every aspect of the learning process by sending/receiving " -"individual messages to/from client nodes. The new `ServerApp` supports " -"registering a custom `main` function that allows writing custom training " -"loops for methods like async FL, cyclic training, or federated analytics." -" The new `ClientApp` supports registering `train`, `evaluate` and `query`" -" functions that can access the raw message received from the `ServerApp`." -" New abstractions like `RecordSet`, `Message` and `Context` further " -"enable sending multiple models, multiple sets of config values and " -"metrics, stateful computations on the client node and implementations of " -"custom SMPC protocols, to name just a few." +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:29 +#: flwr.server.strategy.bulyan.Bulyan.aggregate_fit:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy +msgid "Aggregate fit results using Bulyan." +msgstr "Résultats globaux de l'évaluation." + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce Flower Mods (preview)** " -"([#3054](https://github.com/adap/flower/pull/3054), " -"[#2911](https://github.com/adap/flower/pull/2911), " -"[#3083](https://github.com/adap/flower/pull/3083))" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -"**Introduire la télémétrie optionnelle** " -"([#1533](https://github.com/adap/flower/pull/1533), " -"[#1544](https://github.com/adap/flower/pull/1544), " -"[#1584](https://github.com/adap/flower/pull/1584))" -#: ../../source/ref-changelog.md:31 -msgid "" -"Flower Modifiers (we call them Mods) can intercept messages and analyze, " -"edit or handle them directly. Mods can be used to develop pluggable " -"modules that work across different projects. Flower 1.8 already includes " -"mods to log the size of a message, the number of parameters sent over the" -" network, differential privacy with fixed clipping and adaptive clipping," -" local differential privacy and secure aggregation protocols SecAgg and " -"SecAgg+. The Flower Mods API is released as a preview, but researchers " -"can already use it to experiment with arbirtrary SMPC protocols." -msgstr "" - -#: ../../source/ref-changelog.md:33 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_evaluate:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.configure_evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.configure_evaluate:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:1 of #, fuzzy +msgid "Configure the next round of evaluation." +msgstr "Configuration de l'évaluation côté serveur" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Fine-tune LLMs with LLM FlowerTune** " -"([#3029](https://github.com/adap/flower/pull/3029), " -"[#3089](https://github.com/adap/flower/pull/3089), " -"[#3092](https://github.com/adap/flower/pull/3092), " -"[#3100](https://github.com/adap/flower/pull/3100), " -"[#3114](https://github.com/adap/flower/pull/3114), " -"[#3162](https://github.com/adap/flower/pull/3162), " -"[#3172](https://github.com/adap/flower/pull/3172))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," -" [#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475))" -#: ../../source/ref-changelog.md:35 -msgid "" -"We are introducing LLM FlowerTune, an introductory example that " -"demonstrates federated LLM fine-tuning of pre-trained Llama2 models on " -"the Alpaca-GPT4 dataset. The example is built to be easily adapted to use" -" different models and/or datasets. Read our blog post [LLM FlowerTune: " -"Federated LLM Fine-tuning with Flower](https://flower.ai/blog/2024-03-14" -"-llm-flowertune-federated-llm-finetuning-with-flower/) for more details." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_fit:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_fit:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_fit:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_fit:1 +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.configure_fit:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.configure_fit:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_fit:1 +#: flwr.server.strategy.fedprox.FedProx.configure_fit:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_fit:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.configure_fit:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.configure_fit:1 of +msgid "Configure the next round of training." msgstr "" -#: ../../source/ref-changelog.md:37 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce built-in Differential Privacy (preview)** " -"([#2798](https://github.com/adap/flower/pull/2798), " -"[#2959](https://github.com/adap/flower/pull/2959), " -"[#3038](https://github.com/adap/flower/pull/3038), " -"[#3147](https://github.com/adap/flower/pull/3147), " -"[#2909](https://github.com/adap/flower/pull/2909), " -"[#2893](https://github.com/adap/flower/pull/2893), " -"[#2892](https://github.com/adap/flower/pull/2892), " -"[#3039](https://github.com/adap/flower/pull/3039), " -"[#3074](https://github.com/adap/flower/pull/3074))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.evaluate:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "Evaluate model parameters using an evaluation function." msgstr "" -"**([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" -#: ../../source/ref-changelog.md:39 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Built-in Differential Privacy is here! Flower supports both central and " -"local differential privacy (DP). Central DP can be configured with either" -" fixed or adaptive clipping. The clipping can happen either on the " -"server-side or the client-side. Local DP does both clipping and noising " -"on the client-side. A new documentation page [explains Differential " -"Privacy approaches](https://flower.ai/docs/framework/explanation-" -"differential-privacy.html) and a new how-to guide describes [how to use " -"the new Differential Privacy components](https://flower.ai/docs/framework" -"/how-to-use-differential-privacy.html) in Flower." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:41 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.initialize_parameters:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.initialize_parameters:1 +#: flwr.server.strategy.fedavgm.FedAvgM.initialize_parameters:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of #, fuzzy +msgid "Initialize global model parameters." +msgstr "Initialise le modèle global" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce built-in Secure Aggregation (preview)** " -"([#3120](https://github.com/adap/flower/pull/3120), " -"[#3110](https://github.com/adap/flower/pull/3110), " -"[#3108](https://github.com/adap/flower/pull/3108))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**Introduire la télémétrie optionnelle** " -"([#1533](https://github.com/adap/flower/pull/1533), " -"[#1544](https://github.com/adap/flower/pull/1544), " -"[#1584](https://github.com/adap/flower/pull/1584))" -#: ../../source/ref-changelog.md:43 -msgid "" -"Built-in Secure Aggregation is here! Flower now supports different secure" -" aggregation protocols out-of-the-box. The best part? You can add secure " -"aggregation to your Flower projects with only a few lines of code. In " -"this initial release, we inlcude support for SecAgg and SecAgg+, but more" -" protocols will be implemented shortly. We'll also add detailed docs that" -" explain secure aggregation and how to use it in Flower. You can already " -"check out the new code example that shows how to use Flower to easily " -"combine Federated Learning, Differential Privacy and Secure Aggregation " -"in the same project." +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.num_evaluation_clients:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_evaluation_clients:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.num_evaluation_clients:1 of +msgid "Use a fraction of available clients for evaluation." msgstr "" -#: ../../source/ref-changelog.md:45 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce** `flwr` **CLI (preview)** " -"([#2942](https://github.com/adap/flower/pull/2942), " -"[#3055](https://github.com/adap/flower/pull/3055), " -"[#3111](https://github.com/adap/flower/pull/3111), " -"[#3130](https://github.com/adap/flower/pull/3130), " -"[#3136](https://github.com/adap/flower/pull/3136), " -"[#3094](https://github.com/adap/flower/pull/3094), " -"[#3059](https://github.com/adap/flower/pull/3059), " -"[#3049](https://github.com/adap/flower/pull/3049), " -"[#3142](https://github.com/adap/flower/pull/3142))" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" msgstr "" -"**Mise à jour de la documentation** " -"([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:47 -msgid "" -"A new `flwr` CLI command allows creating new Flower projects (`flwr new`)" -" and then running them using the Simulation Engine (`flwr run`)." +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.num_fit_clients:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_fit_clients:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.num_fit_clients:1 of +msgid "Return the sample size and the required number of available clients." msgstr "" -#: ../../source/ref-changelog.md:49 -#, fuzzy -msgid "" -"**Introduce Flower Next Simulation Engine** " -"([#3024](https://github.com/adap/flower/pull/3024), " -"[#3061](https://github.com/adap/flower/pull/3061), " -"[#2997](https://github.com/adap/flower/pull/2997), " -"[#2783](https://github.com/adap/flower/pull/2783), " -"[#3184](https://github.com/adap/flower/pull/3184), " -"[#3075](https://github.com/adap/flower/pull/3075), " -"[#3047](https://github.com/adap/flower/pull/3047), " -"[#2998](https://github.com/adap/flower/pull/2998), " -"[#3009](https://github.com/adap/flower/pull/3009), " -"[#3008](https://github.com/adap/flower/pull/3008))" +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:2 +msgid "DPFedAvgAdaptive" +msgstr "DPFedAvgAdaptive" + +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed`" msgstr "" -"**Introduire l'API REST (expérimentale)** " -"([#1594](https://github.com/adap/flower/pull/1594), " -"[#1690](https://github.com/adap/flower/pull/1690), " -"[#1695](https://github.com/adap/flower/pull/1695), " -"[#1712](https://github.com/adap/flower/pull/1712), " -"[#1802](https://github.com/adap/flower/pull/1802), " -"[#1770](https://github.com/adap/flower/pull/1770), " -"[#1733](https://github.com/adap/flower/pull/1733))" -#: ../../source/ref-changelog.md:51 -msgid "" -"The Flower Simulation Engine can now run Flower Next projects. For " -"notebook environments, there's also a new `run_simulation` function that " -"can run `ServerApp` and `ClientApp`." +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:3 of +msgid "This class is deprecated and will be removed in a future release." msgstr "" -#: ../../source/ref-changelog.md:53 -#, fuzzy +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"**Handle SuperNode connection errors** " -"([#2969](https://github.com/adap/flower/pull/2969))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"**Ajouter une nouvelle stratégie `FedProx`** " -"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:55 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +#, fuzzy +msgid "Aggregate evaluation losses using the given strategy." +msgstr "Résultats globaux de l'évaluation." + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"A SuperNode will now try to reconnect indefinitely to the SuperLink in " -"case of connection errors. The arguments `--max-retries` and `--max-wait-" -"time` can now be passed to the `flower-client-app` command. `--max-" -"retries` will define the number of tentatives the client should make " -"before it gives up trying to reconnect to the SuperLink, and, `--max-" -"wait-time` defines the time before the SuperNode gives up trying to " -"reconnect to the SuperLink." +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:57 -#, fuzzy -msgid "" -"**General updates to Flower Baselines** " -"([#2904](https://github.com/adap/flower/pull/2904), " -"[#2482](https://github.com/adap/flower/pull/2482), " -"[#2985](https://github.com/adap/flower/pull/2985), " -"[#2968](https://github.com/adap/flower/pull/2968))" +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.aggregate_fit:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "Aggregate training results as in DPFedAvgFixed and update clip norms." msgstr "" -"**Introduire une nouvelle fleur Référence : FedProx MNIST** " -"([#1513](https://github.com/adap/flower/pull/1513), " -"[#1680](https://github.com/adap/flower/pull/1680), " -"[#1681](https://github.com/adap/flower/pull/1681), " -"[#1679](https://github.com/adap/flower/pull/1679))" -#: ../../source/ref-changelog.md:59 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"There's a new [FedStar](https://flower.ai/docs/baselines/fedstar.html) " -"baseline. Several other baselined have been updated as well." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:61 -msgid "" -"**Improve documentation and translations** " -"([#3050](https://github.com/adap/flower/pull/3050), " -"[#3044](https://github.com/adap/flower/pull/3044), " -"[#3043](https://github.com/adap/flower/pull/3043), " -"[#2986](https://github.com/adap/flower/pull/2986), " -"[#3041](https://github.com/adap/flower/pull/3041), " -"[#3046](https://github.com/adap/flower/pull/3046), " -"[#3042](https://github.com/adap/flower/pull/3042), " -"[#2978](https://github.com/adap/flower/pull/2978), " -"[#2952](https://github.com/adap/flower/pull/2952), " -"[#3167](https://github.com/adap/flower/pull/3167), " -"[#2953](https://github.com/adap/flower/pull/2953), " -"[#3045](https://github.com/adap/flower/pull/3045), " -"[#2654](https://github.com/adap/flower/pull/2654), " -"[#3082](https://github.com/adap/flower/pull/3082), " -"[#2990](https://github.com/adap/flower/pull/2990), " -"[#2989](https://github.com/adap/flower/pull/2989))" +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:1 of +msgid "Configure the next round of evaluation using the specified strategy." msgstr "" -#: ../../source/ref-changelog.md:63 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"As usual, we merged many smaller and larger improvements to the " -"documentation. A special thank you goes to [Sebastian van der " -"Voort](https://github.com/svdvoort) for landing a big documentation PR!" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:65 -#, fuzzy +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"**General updates to Flower Examples** " -"([3134](https://github.com/adap/flower/pull/3134), " -"[2996](https://github.com/adap/flower/pull/2996), " -"[2930](https://github.com/adap/flower/pull/2930), " -"[2967](https://github.com/adap/flower/pull/2967), " -"[2467](https://github.com/adap/flower/pull/2467), " -"[2910](https://github.com/adap/flower/pull/2910), " -"[#2918](https://github.com/adap/flower/pull/2918), " -"[#2773](https://github.com/adap/flower/pull/2773), " -"[#3063](https://github.com/adap/flower/pull/3063), " -"[#3116](https://github.com/adap/flower/pull/3116), " -"[#3117](https://github.com/adap/flower/pull/3117))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"**Documentation mise à jour** " -"([#1494](https://github.com/adap/flower/pull/1494), " -"[#1496](https://github.com/adap/flower/pull/1496), " -"[#1500](https://github.com/adap/flower/pull/1500), " -"[#1503](https://github.com/adap/flower/pull/1503), " -"[#1505](https://github.com/adap/flower/pull/1505), " -"[#1524](https://github.com/adap/flower/pull/1524), " -"[#1518](https://github.com/adap/flower/pull/1518), " -"[#1519](https://github.com/adap/flower/pull/1519), " -"[#1515](https://github.com/adap/flower/pull/1515))" -#: ../../source/ref-changelog.md:67 -msgid "" -"Two new examples show federated training of a Vision Transformer (ViT) " -"and federated learning in a medical context using the popular MONAI " -"library. `quickstart-pytorch` and `quickstart-tensorflow` demonstrate the" -" new Flower Next `ServerApp` and `ClientApp`. Many other examples " -"received considerable updates as well." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.evaluate:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.evaluate:1 of +msgid "Evaluate model parameters using an evaluation function from the strategy." msgstr "" -#: ../../source/ref-changelog.md:69 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"**General improvements** " -"([#3171](https://github.com/adap/flower/pull/3171), " -"[3099](https://github.com/adap/flower/pull/3099), " -"[3003](https://github.com/adap/flower/pull/3003), " -"[3145](https://github.com/adap/flower/pull/3145), " -"[3017](https://github.com/adap/flower/pull/3017), " -"[3085](https://github.com/adap/flower/pull/3085), " -"[3012](https://github.com/adap/flower/pull/3012), " -"[3119](https://github.com/adap/flower/pull/3119), " -"[2991](https://github.com/adap/flower/pull/2991), " -"[2970](https://github.com/adap/flower/pull/2970), " -"[2980](https://github.com/adap/flower/pull/2980), " -"[3086](https://github.com/adap/flower/pull/3086), " -"[2932](https://github.com/adap/flower/pull/2932), " -"[2928](https://github.com/adap/flower/pull/2928), " -"[2941](https://github.com/adap/flower/pull/2941), " -"[2933](https://github.com/adap/flower/pull/2933), " -"[3181](https://github.com/adap/flower/pull/3181), " -"[2973](https://github.com/adap/flower/pull/2973), " -"[2992](https://github.com/adap/flower/pull/2992), " -"[2915](https://github.com/adap/flower/pull/2915), " -"[3040](https://github.com/adap/flower/pull/3040), " -"[3022](https://github.com/adap/flower/pull/3022), " -"[3032](https://github.com/adap/flower/pull/3032), " -"[2902](https://github.com/adap/flower/pull/2902), " -"[2931](https://github.com/adap/flower/pull/2931), " -"[3005](https://github.com/adap/flower/pull/3005), " -"[3132](https://github.com/adap/flower/pull/3132), " -"[3115](https://github.com/adap/flower/pull/3115), " -"[2944](https://github.com/adap/flower/pull/2944), " -"[3064](https://github.com/adap/flower/pull/3064), " -"[3106](https://github.com/adap/flower/pull/3106), " -"[2974](https://github.com/adap/flower/pull/2974), " -"[3178](https://github.com/adap/flower/pull/3178), " -"[2993](https://github.com/adap/flower/pull/2993), " -"[3186](https://github.com/adap/flower/pull/3186), " -"[3091](https://github.com/adap/flower/pull/3091), " -"[3125](https://github.com/adap/flower/pull/3125), " -"[3093](https://github.com/adap/flower/pull/3093), " -"[3013](https://github.com/adap/flower/pull/3013), " -"[3033](https://github.com/adap/flower/pull/3033), " -"[3133](https://github.com/adap/flower/pull/3133), " -"[3068](https://github.com/adap/flower/pull/3068), " -"[2916](https://github.com/adap/flower/pull/2916), " -"[2975](https://github.com/adap/flower/pull/2975), " -"[2984](https://github.com/adap/flower/pull/2984), " -"[2846](https://github.com/adap/flower/pull/2846), " -"[3077](https://github.com/adap/flower/pull/3077), " -"[3143](https://github.com/adap/flower/pull/3143), " -"[2921](https://github.com/adap/flower/pull/2921), " -"[3101](https://github.com/adap/flower/pull/3101), " -"[2927](https://github.com/adap/flower/pull/2927), " -"[2995](https://github.com/adap/flower/pull/2995), " -"[2972](https://github.com/adap/flower/pull/2972), " -"[2912](https://github.com/adap/flower/pull/2912), " -"[3065](https://github.com/adap/flower/pull/3065), " -"[3028](https://github.com/adap/flower/pull/3028), " -"[2922](https://github.com/adap/flower/pull/2922), " -"[2982](https://github.com/adap/flower/pull/2982), " -"[2914](https://github.com/adap/flower/pull/2914), " -"[3179](https://github.com/adap/flower/pull/3179), " -"[3080](https://github.com/adap/flower/pull/3080), " -"[2994](https://github.com/adap/flower/pull/2994), " -"[3187](https://github.com/adap/flower/pull/3187), " -"[2926](https://github.com/adap/flower/pull/2926), " -"[3018](https://github.com/adap/flower/pull/3018), " -"[3144](https://github.com/adap/flower/pull/3144), " -"[3011](https://github.com/adap/flower/pull/3011), " -"[#3152](https://github.com/adap/flower/pull/3152), " -"[#2836](https://github.com/adap/flower/pull/2836), " -"[#2929](https://github.com/adap/flower/pull/2929), " -"[#2943](https://github.com/adap/flower/pull/2943), " -"[#2955](https://github.com/adap/flower/pull/2955), " -"[#2954](https://github.com/adap/flower/pull/2954))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.initialize_parameters:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.initialize_parameters:1 of +msgid "Initialize global model parameters using given strategy." msgstr "" -#: ../../source/ref-changelog.md:75 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:6 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:3 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:3 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:3 +#: flwr.server.strategy.strategy.Strategy.configure_fit:3 +#: flwr.server.strategy.strategy.Strategy.evaluate:6 of #, fuzzy -msgid "v1.7.0 (2024-02-05)" -msgstr "v1.3.0 (2023-02-06)" +msgid "The current round of federated learning." +msgstr "Qu'est-ce que l'apprentissage fédéré ?" -#: ../../source/ref-changelog.md:81 -msgid "" -"`Aasheesh Singh`, `Adam Narozniak`, `Aml Hassan Esmil`, `Charles " -"Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo " -"Gabrielli`, `Gustavo Bertoli`, `HelinLin`, `Heng Pan`, `Javier`, `M S " -"Chaitanya Kumar`, `Mohammad Naseri`, `Nikos Vlachakis`, `Pritam Neog`, " -"`Robert Kuska`, `Robert Steiner`, `Taner Topal`, `Yahia Salaheldin " -"Shaaban`, `Yan Gao`, `Yasar Abbas` " +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:7 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:10 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:7 +#: flwr.server.strategy.strategy.Strategy.configure_fit:7 +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:3 of +msgid "The client manager which holds all currently connected clients." msgstr "" -#: ../../source/ref-changelog.md:85 -#, fuzzy +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:10 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:10 of msgid "" -"**Introduce stateful clients (experimental)** " -"([#2770](https://github.com/adap/flower/pull/2770), " -"[#2686](https://github.com/adap/flower/pull/2686), " -"[#2696](https://github.com/adap/flower/pull/2696), " -"[#2643](https://github.com/adap/flower/pull/2643), " -"[#2769](https://github.com/adap/flower/pull/2769))" +"**evaluate_configuration** -- A list of tuples. Each tuple in the list " +"identifies a `ClientProxy` and the `EvaluateIns` for this particular " +"`ClientProxy`. If a particular `ClientProxy` is not included in this " +"list, it means that this `ClientProxy` will not participate in the next " +"round of federated evaluation." msgstr "" -"**([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" -#: ../../source/ref-changelog.md:87 -msgid "" -"Subclasses of `Client` and `NumPyClient` can now store local state that " -"remains on the client. Let's start with the highlight first: this new " -"feature is compatible with both simulated clients (via " -"`start_simulation`) and networked clients (via `start_client`). It's also" -" the first preview of new abstractions like `Context` and `RecordSet`. " -"Clients can access state of type `RecordSet` via `state: RecordSet = " -"self.context.state`. Changes to this `RecordSet` are preserved across " -"different rounds of execution to enable stateful computations in a " -"unified way across simulation and deployment." +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:2 +msgid "DPFedAvgFixed" +msgstr "DPFedAvgFixed" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 +#: flwr.server.strategy.fedavg.FedAvg:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.strategy.Strategy`" msgstr "" -#: ../../source/ref-changelog.md:89 -#, fuzzy +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"**Improve performance** " -"([#2293](https://github.com/adap/flower/pull/2293))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"**Supprimer les stratégies expérimentales** " -"([#1280](https://github.com/adap/flower/pull/1280))" -#: ../../source/ref-changelog.md:91 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"Flower is faster than ever. All `FedAvg`-derived strategies now use in-" -"place aggregation to reduce memory consumption. The Flower client " -"serialization/deserialization has been rewritten from the ground up, " -"which results in significant speedups, especially when the client-side " -"training time is short." +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:93 -#, fuzzy -msgid "" -"**Support Federated Learning with Apple MLX and Flower** " -"([#2693](https://github.com/adap/flower/pull/2693))" +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_fit:1 of +msgid "Aggregate training results using unweighted aggregation." msgstr "" -"**Ajouter un nouvel exemple d'apprentissage fédéré utilisant fastai et " -"Flower** ([#1598](https://github.com/adap/flower/pull/1598))" -#: ../../source/ref-changelog.md:95 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"Flower has official support for federated learning using [Apple " -"MLX](https://ml-explore.github.io/mlx) via the new `quickstart-mlx` code " -"example." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:97 -#, fuzzy +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"**Introduce new XGBoost cyclic strategy** " -"([#2666](https://github.com/adap/flower/pull/2666), " -"[#2668](https://github.com/adap/flower/pull/2668))" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Introduction du SDK iOS (aperçu)** " -"([#1621](https://github.com/adap/flower/pull/1621), " -"[#1764](https://github.com/adap/flower/pull/1764))" -#: ../../source/ref-changelog.md:99 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:1 of msgid "" -"A new strategy called `FedXgbCyclic` supports a client-by-client style of" -" training (often called cyclic). The `xgboost-comprehensive` code example" -" shows how to use it in a full project. In addition to that, `xgboost-" -"comprehensive` now also supports simulation mode. With this, Flower " -"offers best-in-class XGBoost support." +"Configure the next round of training incorporating Differential Privacy " +"(DP)." msgstr "" -#: ../../source/ref-changelog.md:101 -#, fuzzy +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"**Support Python 3.11** " -"([#2394](https://github.com/adap/flower/pull/2394))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"**Support Python 3.10** " -"([#1320](https://github.com/adap/flower/pull/1320))" -#: ../../source/ref-changelog.md:103 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"Framework tests now run on Python 3.8, 3.9, 3.10, and 3.11. This will " -"ensure better support for users using more recent Python versions." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:105 -#, fuzzy +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:3 of msgid "" -"**Update gRPC and ProtoBuf dependencies** " -"([#2814](https://github.com/adap/flower/pull/2814))" +"Configuration of the next training round includes information related to " +"DP, such as clip norm and noise stddev." msgstr "" -"**Ajouter une nouvelle stratégie `FedProx`** " -"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:107 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:13 +#: flwr.server.strategy.strategy.Strategy.configure_fit:10 of msgid "" -"The `grpcio` and `protobuf` dependencies were updated to their latest " -"versions for improved security and performance." +"**fit_configuration** -- A list of tuples. Each tuple in the list " +"identifies a `ClientProxy` and the `FitIns` for this particular " +"`ClientProxy`. If a particular `ClientProxy` is not included in this " +"list, it means that this `ClientProxy` will not participate in the next " +"round of federated learning." msgstr "" -#: ../../source/ref-changelog.md:109 -#, fuzzy -msgid "" -"**Introduce Docker image for Flower server** " -"([#2700](https://github.com/adap/flower/pull/2700), " -"[#2688](https://github.com/adap/flower/pull/2688), " -"[#2705](https://github.com/adap/flower/pull/2705), " -"[#2695](https://github.com/adap/flower/pull/2695), " -"[#2747](https://github.com/adap/flower/pull/2747), " -"[#2746](https://github.com/adap/flower/pull/2746), " -"[#2680](https://github.com/adap/flower/pull/2680), " -"[#2682](https://github.com/adap/flower/pull/2682), " -"[#2701](https://github.com/adap/flower/pull/2701))" +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:2 +msgid "DifferentialPrivacyClientSideAdaptiveClipping" msgstr "" -"**([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" -#: ../../source/ref-changelog.md:111 -msgid "" -"The Flower server can now be run using an official Docker image. A new " -"how-to guide explains [how to run Flower using " -"Docker](https://flower.ai/docs/framework/how-to-run-flower-using-" -"docker.html). An official Flower client Docker image will follow." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:3 +#: of +msgid "Use `adaptiveclipping_mod` modifier at the client side." msgstr "" -#: ../../source/ref-changelog.md:113 -#, fuzzy +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:5 +#: of msgid "" -"**Introduce** `flower-via-docker-compose` **example** " -"([#2626](https://github.com/adap/flower/pull/2626))" +"In comparison to `DifferentialPrivacyServerSideAdaptiveClipping`, which " +"performs clipping on the server-side, " +"`DifferentialPrivacyClientSideAdaptiveClipping` expects clipping to " +"happen on the client-side, usually by using the built-in " +"`adaptiveclipping_mod`." msgstr "" -"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " -"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" -#: ../../source/ref-changelog.md:115 -#, fuzzy -msgid "" -"**Introduce** `quickstart-sklearn-tabular` **example** " -"([#2719](https://github.com/adap/flower/pull/2719))" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:10 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:3 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:10 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:3 +#: of +msgid "The strategy to which DP functionalities will be added by this wrapper." msgstr "" -"**Ajouter une nouvelle stratégie `FedProx`** " -"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:117 -#, fuzzy -msgid "" -"**Introduce** `custom-metrics` **example** " -"([#1958](https://github.com/adap/flower/pull/1958))" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:12 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:5 +#: of +msgid "The noise multiplier for the Gaussian mechanism for model updates." msgstr "" -"**Ajouter une nouvelle stratégie `FedProx`** " -"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:119 -#, fuzzy -msgid "" -"**Update code examples to use Flower Datasets** " -"([#2450](https://github.com/adap/flower/pull/2450), " -"[#2456](https://github.com/adap/flower/pull/2456), " -"[#2318](https://github.com/adap/flower/pull/2318), " -"[#2712](https://github.com/adap/flower/pull/2712))" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:14 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:7 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:17 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:10 +#: of +msgid "The number of clients that are sampled on each round." msgstr "" -"Mettre à jour les outils de développement " -"([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/ref-changelog.md:121 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:16 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:9 +#: of msgid "" -"Several code examples were updated to use [Flower " -"Datasets](https://flower.ai/docs/datasets/)." +"The initial value of clipping norm. Defaults to 0.1. Andrew et al. " +"recommends to set to 0.1." msgstr "" -#: ../../source/ref-changelog.md:123 -#, fuzzy +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:19 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:12 +#: of +msgid "The desired quantile of updates which should be clipped. Defaults to 0.5." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:21 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:14 +#: of msgid "" -"**General updates to Flower Examples** " -"([#2381](https://github.com/adap/flower/pull/2381), " -"[#2805](https://github.com/adap/flower/pull/2805), " -"[#2782](https://github.com/adap/flower/pull/2782), " -"[#2806](https://github.com/adap/flower/pull/2806), " -"[#2829](https://github.com/adap/flower/pull/2829), " -"[#2825](https://github.com/adap/flower/pull/2825), " -"[#2816](https://github.com/adap/flower/pull/2816), " -"[#2726](https://github.com/adap/flower/pull/2726), " -"[#2659](https://github.com/adap/flower/pull/2659), " -"[#2655](https://github.com/adap/flower/pull/2655))" +"The learning rate for the clipping norm adaptation. Defaults to 0.2. " +"Andrew et al. recommends to set to 0.2." msgstr "" -"**Améliorer l'API (expérimentale) du pilote** " -"([#1663](https://github.com/adap/flower/pull/1663), " -"[#1666](https://github.com/adap/flower/pull/1666), " -"[#1667](https://github.com/adap/flower/pull/1667), " -"[#1664](https://github.com/adap/flower/pull/1664), " -"[#1675](https://github.com/adap/flower/pull/1675), " -"[#1676](https://github.com/adap/flower/pull/1676), " -"[#1693](https://github.com/adap/flower/pull/1693), " -"[#1662](https://github.com/adap/flower/pull/1662), " -"[#1794](https://github.com/adap/flower/pull/1794))" -#: ../../source/ref-changelog.md:125 -msgid "Many Flower code examples received substantial updates." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:24 +#: of +msgid "" +"The stddev of the noise added to the count of updates currently below the" +" estimate. Andrew et al. recommends to set to `expected_num_records/20`" msgstr "" -#: ../../source/ref-changelog.md:127 ../../source/ref-changelog.md:220 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:30 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:23 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:22 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:15 +#: of #, fuzzy -msgid "**Update Flower Baselines**" -msgstr "Demande pour une nouvelle Flower Baseline" +msgid "Create a strategy:" +msgstr "stratégie.du.serveur" -#: ../../source/ref-changelog.md:129 -#, fuzzy +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:34 +#: of msgid "" -"HFedXGBoost ([#2226](https://github.com/adap/flower/pull/2226), " -"[#2771](https://github.com/adap/flower/pull/2771))" +"Wrap the strategy with the " +"`DifferentialPrivacyClientSideAdaptiveClipping` wrapper:" msgstr "" -"**Nouvel exemple de code JAX** " -"([#906](https://github.com/adap/flower/pull/906), " -"[#1143](https://github.com/adap/flower/pull/1143))" -#: ../../source/ref-changelog.md:130 -#, fuzzy -msgid "FedVSSL ([#2412](https://github.com/adap/flower/pull/2412))" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:40 +#: of +msgid "On the client, add the `adaptiveclipping_mod` to the client-side mods:" msgstr "" -"Amélioration de la documentation sur le serveur gRPC " -"([#841](https://github.com/adap/flower/pull/841))" -#: ../../source/ref-changelog.md:131 -#, fuzzy -msgid "FedNova ([#2179](https://github.com/adap/flower/pull/2179))" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" msgstr "" -"**Ajouter une nouvelle stratégie `FedProx`** " -"([#1619](https://github.com/adap/flower/pull/1619))" - -#: ../../source/ref-changelog.md:132 -#, fuzzy -msgid "HeteroFL ([#2439](https://github.com/adap/flower/pull/2439))" -msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" - -#: ../../source/ref-changelog.md:133 -#, fuzzy -msgid "FedAvgM ([#2246](https://github.com/adap/flower/pull/2246))" -msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" -#: ../../source/ref-changelog.md:134 -#, fuzzy -msgid "FedPara ([#2722](https://github.com/adap/flower/pull/2722))" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -"**Renommé stratégie q-FedAvg** " -"([#802](https://github.com/adap/flower/pull/802))" -#: ../../source/ref-changelog.md:136 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_fit:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_fit:1 +#: of #, fuzzy -msgid "" -"**Improve documentation** " -"([#2674](https://github.com/adap/flower/pull/2674), " -"[#2480](https://github.com/adap/flower/pull/2480), " -"[#2826](https://github.com/adap/flower/pull/2826), " -"[#2727](https://github.com/adap/flower/pull/2727), " -"[#2761](https://github.com/adap/flower/pull/2761), " -"[#2900](https://github.com/adap/flower/pull/2900))" -msgstr "" -"**Mise à jour de la documentation** " -"([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614))" +msgid "Aggregate training results and update clip norms." +msgstr "Résultats globaux de l'évaluation." -#: ../../source/ref-changelog.md:138 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**Improved testing and development infrastructure** " -"([#2797](https://github.com/adap/flower/pull/2797), " -"[#2676](https://github.com/adap/flower/pull/2676), " -"[#2644](https://github.com/adap/flower/pull/2644), " -"[#2656](https://github.com/adap/flower/pull/2656), " -"[#2848](https://github.com/adap/flower/pull/2848), " -"[#2675](https://github.com/adap/flower/pull/2675), " -"[#2735](https://github.com/adap/flower/pull/2735), " -"[#2767](https://github.com/adap/flower/pull/2767), " -"[#2732](https://github.com/adap/flower/pull/2732), " -"[#2744](https://github.com/adap/flower/pull/2744), " -"[#2681](https://github.com/adap/flower/pull/2681), " -"[#2699](https://github.com/adap/flower/pull/2699), " -"[#2745](https://github.com/adap/flower/pull/2745), " -"[#2734](https://github.com/adap/flower/pull/2734), " -"[#2731](https://github.com/adap/flower/pull/2731), " -"[#2652](https://github.com/adap/flower/pull/2652), " -"[#2720](https://github.com/adap/flower/pull/2720), " -"[#2721](https://github.com/adap/flower/pull/2721), " -"[#2717](https://github.com/adap/flower/pull/2717), " -"[#2864](https://github.com/adap/flower/pull/2864), " -"[#2694](https://github.com/adap/flower/pull/2694), " -"[#2709](https://github.com/adap/flower/pull/2709), " -"[#2658](https://github.com/adap/flower/pull/2658), " -"[#2796](https://github.com/adap/flower/pull/2796), " -"[#2692](https://github.com/adap/flower/pull/2692), " -"[#2657](https://github.com/adap/flower/pull/2657), " -"[#2813](https://github.com/adap/flower/pull/2813), " -"[#2661](https://github.com/adap/flower/pull/2661), " -"[#2398](https://github.com/adap/flower/pull/2398))" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:140 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"The Flower testing and development infrastructure has received " -"substantial updates. This makes Flower 1.7 the most tested release ever." +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:142 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**Update dependencies** " -"([#2753](https://github.com/adap/flower/pull/2753), " -"[#2651](https://github.com/adap/flower/pull/2651), " -"[#2739](https://github.com/adap/flower/pull/2739), " -"[#2837](https://github.com/adap/flower/pull/2837), " -"[#2788](https://github.com/adap/flower/pull/2788), " -"[#2811](https://github.com/adap/flower/pull/2811), " -"[#2774](https://github.com/adap/flower/pull/2774), " -"[#2790](https://github.com/adap/flower/pull/2790), " -"[#2751](https://github.com/adap/flower/pull/2751), " -"[#2850](https://github.com/adap/flower/pull/2850), " -"[#2812](https://github.com/adap/flower/pull/2812), " -"[#2872](https://github.com/adap/flower/pull/2872), " -"[#2736](https://github.com/adap/flower/pull/2736), " -"[#2756](https://github.com/adap/flower/pull/2756), " -"[#2857](https://github.com/adap/flower/pull/2857), " -"[#2757](https://github.com/adap/flower/pull/2757), " -"[#2810](https://github.com/adap/flower/pull/2810), " -"[#2740](https://github.com/adap/flower/pull/2740), " -"[#2789](https://github.com/adap/flower/pull/2789))" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:144 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**General improvements** " -"([#2803](https://github.com/adap/flower/pull/2803), " -"[#2847](https://github.com/adap/flower/pull/2847), " -"[#2877](https://github.com/adap/flower/pull/2877), " -"[#2690](https://github.com/adap/flower/pull/2690), " -"[#2889](https://github.com/adap/flower/pull/2889), " -"[#2874](https://github.com/adap/flower/pull/2874), " -"[#2819](https://github.com/adap/flower/pull/2819), " -"[#2689](https://github.com/adap/flower/pull/2689), " -"[#2457](https://github.com/adap/flower/pull/2457), " -"[#2870](https://github.com/adap/flower/pull/2870), " -"[#2669](https://github.com/adap/flower/pull/2669), " -"[#2876](https://github.com/adap/flower/pull/2876), " -"[#2885](https://github.com/adap/flower/pull/2885), " -"[#2858](https://github.com/adap/flower/pull/2858), " -"[#2867](https://github.com/adap/flower/pull/2867), " -"[#2351](https://github.com/adap/flower/pull/2351), " -"[#2886](https://github.com/adap/flower/pull/2886), " -"[#2860](https://github.com/adap/flower/pull/2860), " -"[#2828](https://github.com/adap/flower/pull/2828), " -"[#2869](https://github.com/adap/flower/pull/2869), " -"[#2875](https://github.com/adap/flower/pull/2875), " -"[#2733](https://github.com/adap/flower/pull/2733), " -"[#2488](https://github.com/adap/flower/pull/2488), " -"[#2646](https://github.com/adap/flower/pull/2646), " -"[#2879](https://github.com/adap/flower/pull/2879), " -"[#2821](https://github.com/adap/flower/pull/2821), " -"[#2855](https://github.com/adap/flower/pull/2855), " -"[#2800](https://github.com/adap/flower/pull/2800), " -"[#2807](https://github.com/adap/flower/pull/2807), " -"[#2801](https://github.com/adap/flower/pull/2801), " -"[#2804](https://github.com/adap/flower/pull/2804), " -"[#2851](https://github.com/adap/flower/pull/2851), " -"[#2787](https://github.com/adap/flower/pull/2787), " -"[#2852](https://github.com/adap/flower/pull/2852), " -"[#2672](https://github.com/adap/flower/pull/2672), " -"[#2759](https://github.com/adap/flower/pull/2759))" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:148 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:2 #, fuzzy -msgid "" -"**Deprecate** `start_numpy_client` " -"([#2563](https://github.com/adap/flower/pull/2563), " -"[#2718](https://github.com/adap/flower/pull/2718))" +msgid "DifferentialPrivacyClientSideFixedClipping" +msgstr "Confidentialité différentielle" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:3 +#: of +msgid "Use `fixedclipping_mod` modifier at the client side." msgstr "" -"**Nouvelles stratégies intégrées** " -"([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/ref-changelog.md:150 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:5 +#: of msgid "" -"Until now, clients of type `NumPyClient` needed to be started via " -"`start_numpy_client`. In our efforts to consolidate framework APIs, we " -"have introduced changes, and now all client types should start via " -"`start_client`. To continue using `NumPyClient` clients, you simply need " -"to first call the `.to_client()` method and then pass returned `Client` " -"object to `start_client`. The examples and the documentation have been " -"updated accordingly." +"In comparison to `DifferentialPrivacyServerSideFixedClipping`, which " +"performs clipping on the server-side, " +"`DifferentialPrivacyClientSideFixedClipping` expects clipping to happen " +"on the client-side, usually by using the built-in `fixedclipping_mod`." msgstr "" -#: ../../source/ref-changelog.md:152 -#, fuzzy +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:12 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:5 +#: of msgid "" -"**Deprecate legacy DP wrappers** " -"([#2749](https://github.com/adap/flower/pull/2749))" +"The noise multiplier for the Gaussian mechanism for model updates. A " +"value of 1.0 or higher is recommended for strong privacy." msgstr "" -"**Supprimez KerasClient** " -"([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/ref-changelog.md:154 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:26 +#: of msgid "" -"Legacy DP wrapper classes are deprecated, but still functional. This is " -"in preparation for an all-new pluggable version of differential privacy " -"support in Flower." +"Wrap the strategy with the `DifferentialPrivacyClientSideFixedClipping` " +"wrapper:" msgstr "" -#: ../../source/ref-changelog.md:156 -#, fuzzy -msgid "" -"**Make optional arg** `--callable` **in** `flower-client` **a required " -"positional arg** ([#2673](https://github.com/adap/flower/pull/2673))" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:32 +#: of +msgid "On the client, add the `fixedclipping_mod` to the client-side mods:" msgstr "" -"**Log** `Client` **exceptions dans le moteur de client virtuel** " -"([#1493](https://github.com/adap/flower/pull/1493))" -#: ../../source/ref-changelog.md:158 -#, fuzzy +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**Rename** `certificates` **to** `root_certificates` **in** `Driver` " -"([#2890](https://github.com/adap/flower/pull/2890))" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" msgstr "" -"**Rename** `rnd` **to** `server_round` " -"([#1321](https://github.com/adap/flower/pull/1321))" -#: ../../source/ref-changelog.md:160 -#, fuzzy +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**Drop experimental** `Task` **fields** " -"([#2866](https://github.com/adap/flower/pull/2866), " -"[#2865](https://github.com/adap/flower/pull/2865))" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -"**Rename** `Weights` **to** `NDArrays` " -"([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" -#: ../../source/ref-changelog.md:162 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_fit:1 +#: of +#, fuzzy +msgid "Add noise to the aggregated parameters." +msgstr "Puis sérialise le résultat agrégé :" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"Experimental fields `sa`, `legacy_server_message` and " -"`legacy_client_message` were removed from `Task` message. The removed " -"fields are superseded by the new `RecordSet` abstraction." +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:164 -#, fuzzy +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**Retire MXNet examples** " -"([#2724](https://github.com/adap/flower/pull/2724))" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Nouvel exemple de code scikit-learn** " -"([#748](https://github.com/adap/flower/pull/748))" -#: ../../source/ref-changelog.md:166 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"The development of the MXNet fremework has ended and the project is now " -"[archived on GitHub](https://github.com/apache/mxnet). Existing MXNet " -"examples won't receive updates." +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:168 -#, fuzzy -msgid "v1.6.0 (2023-11-28)" -msgstr "v1.4.0 (2023-04-21)" - -#: ../../source/ref-changelog.md:174 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " -"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " -"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`," -" `Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, " -"`Steve Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, " -"`cnxdeveloper`, `k3nfalt` " +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:178 -#, fuzzy -msgid "" -"**Add experimental support for Python 3.12** " -"([#2565](https://github.com/adap/flower/pull/2565))" +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:2 +msgid "DifferentialPrivacyServerSideAdaptiveClipping" msgstr "" -"**Ajouter la prise en charge expérimentale de Python 3.10 et Python " -"3.11** ([#1135](https://github.com/adap/flower/pull/1135))" -#: ../../source/ref-changelog.md:180 -#, fuzzy +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:17 +#: of msgid "" -"**Add new XGBoost examples** " -"([#2612](https://github.com/adap/flower/pull/2612), " -"[#2554](https://github.com/adap/flower/pull/2554), " -"[#2617](https://github.com/adap/flower/pull/2617), " -"[#2618](https://github.com/adap/flower/pull/2618), " -"[#2619](https://github.com/adap/flower/pull/2619), " -"[#2567](https://github.com/adap/flower/pull/2567))" +"The standard deviation of the noise added to the count of updates below " +"the estimate. Andrew et al. recommends to set to " +"`expected_num_records/20`" msgstr "" -"**([#1520](https://github.com/adap/flower/pull/1520), " -"[#1525](https://github.com/adap/flower/pull/1525), " -"[#1545](https://github.com/adap/flower/pull/1545), " -"[#1546](https://github.com/adap/flower/pull/1546), " -"[#1550](https://github.com/adap/flower/pull/1550), " -"[#1551](https://github.com/adap/flower/pull/1551), " -"[#1567](https://github.com/adap/flower/pull/1567))" -#: ../../source/ref-changelog.md:182 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:27 +#: of msgid "" -"We have added a new `xgboost-quickstart` example alongside a new " -"`xgboost-comprehensive` example that goes more in-depth." +"Wrap the strategy with the DifferentialPrivacyServerSideAdaptiveClipping " +"wrapper" msgstr "" -#: ../../source/ref-changelog.md:184 -#, fuzzy +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**Add Vertical FL example** " -"([#2598](https://github.com/adap/flower/pull/2598))" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" msgstr "" -"**Nouvel exemple de code CoreML pour iOS** " -"([#1289](https://github.com/adap/flower/pull/1289))" -#: ../../source/ref-changelog.md:186 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"We had many questions about Vertical Federated Learning using Flower, so " -"we decided to add an simple example for it on the [Titanic " -"dataset](https://www.kaggle.com/competitions/titanic/data) alongside a " -"tutorial (in the README)." +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:188 -#, fuzzy +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**Support custom** `ClientManager` **in** `start_driver()` " -"([#2292](https://github.com/adap/flower/pull/2292))" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"Ajout de la prise en charge d'un `ClientManager` personnalisé comme " -"paramètre de `start_simulation` " -"([#1171](https://github.com/adap/flower/pull/1171))" -#: ../../source/ref-changelog.md:190 -#, fuzzy +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**Update REST API to support create and delete nodes** " -"([#2283](https://github.com/adap/flower/pull/2283))" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Nouvelle stratégie expérimentale TensorBoard** " -"([#789](https://github.com/adap/flower/pull/789))" -#: ../../source/ref-changelog.md:192 -#, fuzzy +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**Update the Android SDK** " -"([#2187](https://github.com/adap/flower/pull/2187))" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " -"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" -#: ../../source/ref-changelog.md:194 -msgid "Add gRPC request-response capability to the Android SDK." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:196 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:2 #, fuzzy -msgid "" -"**Update the C++ SDK** " -"([#2537](https://github.com/adap/flower/pull/2537), " -"[#2528](https://github.com/adap/flower/pull/2528), " -"[#2523](https://github.com/adap/flower/pull/2523), " -"[#2522](https://github.com/adap/flower/pull/2522))" -msgstr "" -"Mettre à jour les outils de développement " -"([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310))" +msgid "DifferentialPrivacyServerSideFixedClipping" +msgstr "Confidentialité différentielle" -#: ../../source/ref-changelog.md:198 -msgid "Add gRPC request-response capability to the C++ SDK." +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:19 +#: of +msgid "" +"Wrap the strategy with the DifferentialPrivacyServerSideFixedClipping " +"wrapper" msgstr "" -#: ../../source/ref-changelog.md:200 -#, fuzzy +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**Make HTTPS the new default** " -"([#2591](https://github.com/adap/flower/pull/2591), " -"[#2636](https://github.com/adap/flower/pull/2636))" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" msgstr "" -"**Exemple de code mis à jour** " -"([#1344](https://github.com/adap/flower/pull/1344), " -"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/ref-changelog.md:202 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"Flower is moving to HTTPS by default. The new `flower-server` requires " -"passing `--certificates`, but users can enable `--insecure` to use HTTP " -"for prototyping. The same applies to `flower-client`, which can either " -"use user-provided credentials or gRPC-bundled certificates to connect to " -"an HTTPS-enabled server or requires opt-out via passing `--insecure` to " -"enable insecure HTTP connections." +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:204 -msgid "" -"For backward compatibility, `start_client()` and `start_numpy_client()` " -"will still start in insecure mode by default. In a future release, " -"insecure connections will require user opt-in by passing `insecure=True`." +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:1 +#: of +msgid "Compute the updates, clip, and pass them for aggregation." msgstr "" -#: ../../source/ref-changelog.md:206 -#, fuzzy +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**Unify client API** ([#2303](https://github.com/adap/flower/pull/2303), " -"[#2390](https://github.com/adap/flower/pull/2390), " -"[#2493](https://github.com/adap/flower/pull/2493))" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Mettre à jour les exemples de code** " -"([#1291](https://github.com/adap/flower/pull/1291), " -"[#1286](https://github.com/adap/flower/pull/1286), " -"[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/ref-changelog.md:208 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"Using the `client_fn`, Flower clients can interchangeably run as " -"standalone processes (i.e. via `start_client`) or in simulation (i.e. via" -" `start_simulation`) without requiring changes to how the client class is" -" defined and instantiated. The `to_client()` function is introduced to " -"convert a `NumPyClient` to a `Client`." +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:210 -#, fuzzy +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**Add new** `Bulyan` **strategy** " -"([#1817](https://github.com/adap/flower/pull/1817), " -"[#1891](https://github.com/adap/flower/pull/1891))" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -"**Nouvelles stratégies intégrées** " -"([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/ref-changelog.md:212 -#, fuzzy +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"The new `Bulyan` strategy implements Bulyan by [El Mhamdi et al., " -"2018](https://arxiv.org/abs/1802.07927)" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" msgstr "" -"La nouvelle stratégie `FedMedian` met en œuvre Federated Median " -"(FedMedian) par [Yin et al., 2018] " -"(https://arxiv.org/pdf/1803.01498v1.pdf)." -#: ../../source/ref-changelog.md:214 -#, fuzzy -msgid "" -"**Add new** `XGB Bagging` **strategy** " -"([#2611](https://github.com/adap/flower/pull/2611))" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:3 +#: of +msgid "Afterward, add noise to the aggregated parameters." msgstr "" -"**Ajouter une nouvelle stratégie `FedProx`** " -"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:216 ../../source/ref-changelog.md:218 +#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:2 #, fuzzy -msgid "" -"**Introduce `WorkloadState`** " -"([#2564](https://github.com/adap/flower/pull/2564), " -"[#2632](https://github.com/adap/flower/pull/2632))" +msgid "FaultTolerantFedAvg" +msgstr "server.strategy.FaultTolerantFedAvg" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"**Nouvelles stratégies intégrées** " -"([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/ref-changelog.md:222 -#, fuzzy +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " -"[#2286](https://github.com/adap/flower/pull/2286), " -"[#2509](https://github.com/adap/flower/pull/2509))" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"**Mettre à jour les exemples de code** " -"([#1291](https://github.com/adap/flower/pull/1291), " -"[#1286](https://github.com/adap/flower/pull/1286), " -"[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/ref-changelog.md:224 -#, fuzzy +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_fit:1 +#: flwr.server.strategy.fedadagrad.FedAdagrad.aggregate_fit:1 +#: flwr.server.strategy.fedadam.FedAdam.aggregate_fit:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_fit:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_fit:1 +#: flwr.server.strategy.fedavgm.FedAvgM.aggregate_fit:1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.aggregate_fit:1 +#: flwr.server.strategy.fedyogi.FedYogi.aggregate_fit:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_fit:1 of +msgid "Aggregate fit results using weighted average." +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"Baselines Docs ([#2290](https://github.com/adap/flower/pull/2290), " -"[#2400](https://github.com/adap/flower/pull/2400))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Nouvel exemple de code JAX** " -"([#906](https://github.com/adap/flower/pull/906), " -"[#1143](https://github.com/adap/flower/pull/1143))" -#: ../../source/ref-changelog.md:226 -#, fuzzy +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " -"[#2507](https://github.com/adap/flower/pull/2507))" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Exemple de code mis à jour** " -"([#1344](https://github.com/adap/flower/pull/1344), " -"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/ref-changelog.md:228 -#, fuzzy +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " -"[#2508](https://github.com/adap/flower/pull/2508))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"**Nouvelles stratégies intégrées** " -"([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/ref-changelog.md:230 -#, fuzzy -msgid "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" -msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" -#: ../../source/ref-changelog.md:232 -#, fuzzy -msgid "FjORD [#2431](https://github.com/adap/flower/pull/2431)" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"Amélioration de la documentation sur le serveur gRPC " -"([#841](https://github.com/adap/flower/pull/841))" -#: ../../source/ref-changelog.md:234 -#, fuzzy -msgid "MOON [#2421](https://github.com/adap/flower/pull/2421)" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**Ajouter une nouvelle stratégie `FedProx`** " -"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:236 -#, fuzzy -msgid "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" +#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:2 +#: ../../source/ref-changelog.md:1231 +msgid "FedAdagrad" +msgstr "FedAdagrad" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:1 +#: flwr.server.strategy.fedadam.FedAdam:1 +#: flwr.server.strategy.fedyogi.FedYogi:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.fedopt.FedOpt`" msgstr "" -"**Ajouter une nouvelle stratégie `FedProx`** " -"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:238 +#: flwr.server.strategy.fedadagrad.FedAdagrad:3 +#: flwr.server.strategy.fedadam.FedAdam:3 flwr.server.strategy.fedopt.FedOpt:3 +#: flwr.server.strategy.fedyogi.FedYogi:3 of #, fuzzy -msgid "FedPer [#2266](https://github.com/adap/flower/pull/2266)" -msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" +msgid "Implementation based on https://arxiv.org/abs/2003.00295v5" +msgstr "" +"FedYogi - Stratégie d'apprentissage fédéré utilisant Yogi côté serveur. " +"Mise en oeuvre basée sur https://arxiv.org/abs/2003.00295" -#: ../../source/ref-changelog.md:240 -#, fuzzy -msgid "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" -msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" +#: flwr.server.strategy.fedadagrad.FedAdagrad:21 +#: flwr.server.strategy.fedadagrad.FedAdagrad:23 +#: flwr.server.strategy.fedadam.FedAdam:25 +#: flwr.server.strategy.fedadam.FedAdam:27 +#: flwr.server.strategy.fedavg.FedAvg:29 flwr.server.strategy.fedavg.FedAvg:31 +#: flwr.server.strategy.fedopt.FedOpt:25 flwr.server.strategy.fedopt.FedOpt:27 +#: flwr.server.strategy.fedprox.FedProx:61 +#: flwr.server.strategy.fedprox.FedProx:63 +#: flwr.server.strategy.fedyogi.FedYogi:28 +#: flwr.server.strategy.fedyogi.FedYogi:30 of +msgid "Metrics aggregation function, optional." +msgstr "" -#: ../../source/ref-changelog.md:242 -#, fuzzy -msgid "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" -msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" +#: flwr.server.strategy.fedadagrad.FedAdagrad:29 +#: flwr.server.strategy.fedadam.FedAdam:29 +#: flwr.server.strategy.fedopt.FedOpt:29 of +msgid "Server-side learning rate. Defaults to 1e-1." +msgstr "" -#: ../../source/ref-changelog.md:244 -#, fuzzy +#: flwr.server.strategy.fedadagrad.FedAdagrad:31 +#: flwr.server.strategy.fedadam.FedAdam:31 +#: flwr.server.strategy.fedopt.FedOpt:31 of +msgid "Client-side learning rate. Defaults to 1e-1." +msgstr "" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:33 +#: flwr.server.strategy.fedadam.FedAdam:37 +#: flwr.server.strategy.fedopt.FedOpt:37 of +msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-9." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " -"[#2615](https://github.com/adap/flower/pull/2615))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"**Nouvelles stratégies intégrées** " -"([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/ref-changelog.md:246 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**General updates to Flower Examples** " -"([#2384](https://github.com/adap/flower/pull/2384), " -"[#2425](https://github.com/adap/flower/pull/2425), " -"[#2526](https://github.com/adap/flower/pull/2526), " -"[#2302](https://github.com/adap/flower/pull/2302), " -"[#2545](https://github.com/adap/flower/pull/2545))" +":py:obj:`aggregate_fit `\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -"Mettre à jour les outils de développement " -"([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/ref-changelog.md:248 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**General updates to Flower Baselines** " -"([#2301](https://github.com/adap/flower/pull/2301), " -"[#2305](https://github.com/adap/flower/pull/2305), " -"[#2307](https://github.com/adap/flower/pull/2307), " -"[#2327](https://github.com/adap/flower/pull/2327), " -"[#2435](https://github.com/adap/flower/pull/2435), " -"[#2462](https://github.com/adap/flower/pull/2462), " -"[#2463](https://github.com/adap/flower/pull/2463), " -"[#2461](https://github.com/adap/flower/pull/2461), " -"[#2469](https://github.com/adap/flower/pull/2469), " -"[#2466](https://github.com/adap/flower/pull/2466), " -"[#2471](https://github.com/adap/flower/pull/2471), " -"[#2472](https://github.com/adap/flower/pull/2472), " -"[#2470](https://github.com/adap/flower/pull/2470))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Améliorations générales** " -"([#1491](https://github.com/adap/flower/pull/1491), " -"[#1504](https://github.com/adap/flower/pull/1504), " -"[#1506](https://github.com/adap/flower/pull/1506), " -"[#1514](https://github.com/adap/flower/pull/1514), " -"[#1522](https://github.com/adap/flower/pull/1522), " -"[#1523](https://github.com/adap/flower/pull/1523), " -"[#1526](https://github.com/adap/flower/pull/1526), " -"[#1528](https://github.com/adap/flower/pull/1528), " -"[#1547](https://github.com/adap/flower/pull/1547), " -"[#1549](https://github.com/adap/flower/pull/1549), " -"[#1560](https://github.com/adap/flower/pull/1560), " -"[#1564](https://github.com/adap/flower/pull/1564), " -"[#1566](https://github.com/adap/flower/pull/1566))" -#: ../../source/ref-changelog.md:250 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**General updates to the simulation engine** " -"([#2331](https://github.com/adap/flower/pull/2331), " -"[#2447](https://github.com/adap/flower/pull/2447), " -"[#2448](https://github.com/adap/flower/pull/2448), " -"[#2294](https://github.com/adap/flower/pull/2294))" +":py:obj:`configure_fit `\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"Mettre à jour les outils de développement " -"([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/ref-changelog.md:252 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**General updates to Flower SDKs** " -"([#2288](https://github.com/adap/flower/pull/2288), " -"[#2429](https://github.com/adap/flower/pull/2429), " -"[#2555](https://github.com/adap/flower/pull/2555), " -"[#2543](https://github.com/adap/flower/pull/2543), " -"[#2544](https://github.com/adap/flower/pull/2544), " -"[#2597](https://github.com/adap/flower/pull/2597), " -"[#2623](https://github.com/adap/flower/pull/2623))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," -" [#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475))" -#: ../../source/ref-changelog.md:254 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**General improvements** " -"([#2309](https://github.com/adap/flower/pull/2309), " -"[#2310](https://github.com/adap/flower/pull/2310), " -"[#2313](https://github.com/adap/flower/pull/2313), " -"[#2316](https://github.com/adap/flower/pull/2316), " -"[#2317](https://github.com/adap/flower/pull/2317), " -"[#2349](https://github.com/adap/flower/pull/2349), " -"[#2360](https://github.com/adap/flower/pull/2360), " -"[#2402](https://github.com/adap/flower/pull/2402), " -"[#2446](https://github.com/adap/flower/pull/2446), " -"[#2561](https://github.com/adap/flower/pull/2561), " -"[#2273](https://github.com/adap/flower/pull/2273), " -"[#2267](https://github.com/adap/flower/pull/2267), " -"[#2274](https://github.com/adap/flower/pull/2274), " -"[#2275](https://github.com/adap/flower/pull/2275), " -"[#2432](https://github.com/adap/flower/pull/2432), " -"[#2251](https://github.com/adap/flower/pull/2251), " -"[#2321](https://github.com/adap/flower/pull/2321), " -"[#1936](https://github.com/adap/flower/pull/1936), " -"[#2408](https://github.com/adap/flower/pull/2408), " -"[#2413](https://github.com/adap/flower/pull/2413), " -"[#2401](https://github.com/adap/flower/pull/2401), " -"[#2531](https://github.com/adap/flower/pull/2531), " -"[#2534](https://github.com/adap/flower/pull/2534), " -"[#2535](https://github.com/adap/flower/pull/2535), " -"[#2521](https://github.com/adap/flower/pull/2521), " -"[#2553](https://github.com/adap/flower/pull/2553), " -"[#2596](https://github.com/adap/flower/pull/2596))" -msgstr "" - -#: ../../source/ref-changelog.md:256 ../../source/ref-changelog.md:346 -#: ../../source/ref-changelog.md:410 ../../source/ref-changelog.md:464 -#: ../../source/ref-changelog.md:531 -msgid "Flower received many improvements under the hood, too many to list here." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"Flower a reçu de nombreuses améliorations sous le capot, trop nombreuses " -"pour être énumérées ici." -#: ../../source/ref-changelog.md:260 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Remove support for Python 3.7** " -"([#2280](https://github.com/adap/flower/pull/2280), " -"[#2299](https://github.com/adap/flower/pull/2299), " -"[#2304](https://github.com/adap/flower/pull/2304), " -"[#2306](https://github.com/adap/flower/pull/2306), " -"[#2355](https://github.com/adap/flower/pull/2355), " -"[#2356](https://github.com/adap/flower/pull/2356))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**Nouvel exemple de code MLCube** " -"([#779](https://github.com/adap/flower/pull/779), " -"[#1034](https://github.com/adap/flower/pull/1034), " -"[#1065](https://github.com/adap/flower/pull/1065), " -"[#1090](https://github.com/adap/flower/pull/1090))" -#: ../../source/ref-changelog.md:262 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Python 3.7 support was deprecated in Flower 1.5, and this release removes" -" support. Flower now requires Python 3.8." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:264 +#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:2 #, fuzzy -msgid "" -"**Remove experimental argument** `rest` **from** `start_client` " -"([#2324](https://github.com/adap/flower/pull/2324))" -msgstr "" -"**Supprimer les stratégies expérimentales** " -"([#1280](https://github.com/adap/flower/pull/1280))" +msgid "FedAdam" +msgstr "FedAdagrad" -#: ../../source/ref-changelog.md:266 -msgid "" -"The (still experimental) argument `rest` was removed from `start_client` " -"and `start_numpy_client`. Use `transport=\"rest\"` to opt into the " -"experimental REST API instead." +#: flwr.server.strategy.fedadam.FedAdam:33 +#: flwr.server.strategy.fedyogi.FedYogi:36 of +msgid "Momentum parameter. Defaults to 0.9." msgstr "" -#: ../../source/ref-changelog.md:268 -#, fuzzy -msgid "v1.5.0 (2023-08-31)" -msgstr "v1.4.0 (2023-04-21)" +#: flwr.server.strategy.fedadam.FedAdam:35 +#: flwr.server.strategy.fedyogi.FedYogi:38 of +msgid "Second moment parameter. Defaults to 0.99." +msgstr "" -#: ../../source/ref-changelog.md:274 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " -"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " -"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " -"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:278 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce new simulation engine** " -"([#1969](https://github.com/adap/flower/pull/1969), " -"[#2221](https://github.com/adap/flower/pull/2221), " -"[#2248](https://github.com/adap/flower/pull/2248))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"**Introduire la télémétrie optionnelle** " -"([#1533](https://github.com/adap/flower/pull/1533), " -"[#1544](https://github.com/adap/flower/pull/1544), " -"[#1584](https://github.com/adap/flower/pull/1584))" -#: ../../source/ref-changelog.md:280 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The new simulation engine has been rewritten from the ground up, yet it " -"remains fully backwards compatible. It offers much improved stability and" -" memory handling, especially when working with GPUs. Simulations " -"transparently adapt to different settings to scale simulation in CPU-" -"only, CPU+GPU, multi-GPU, or multi-node multi-GPU environments." +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:282 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Comprehensive documentation includes a new [how-to run " -"simulations](https://flower.ai/docs/framework/how-to-run-" -"simulations.html) guide, new [simulation-" -"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " -"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" -"tensorflow.html) notebooks, and a new [YouTube tutorial " -"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)." +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:284 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Restructure Flower Docs** " -"([#1824](https://github.com/adap/flower/pull/1824), " -"[#1865](https://github.com/adap/flower/pull/1865), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1887](https://github.com/adap/flower/pull/1887), " -"[#1919](https://github.com/adap/flower/pull/1919), " -"[#1922](https://github.com/adap/flower/pull/1922), " -"[#1920](https://github.com/adap/flower/pull/1920), " -"[#1923](https://github.com/adap/flower/pull/1923), " -"[#1924](https://github.com/adap/flower/pull/1924), " -"[#1962](https://github.com/adap/flower/pull/1962), " -"[#2006](https://github.com/adap/flower/pull/2006), " -"[#2133](https://github.com/adap/flower/pull/2133), " -"[#2203](https://github.com/adap/flower/pull/2203), " -"[#2215](https://github.com/adap/flower/pull/2215), " -"[#2122](https://github.com/adap/flower/pull/2122), " -"[#2223](https://github.com/adap/flower/pull/2223), " -"[#2219](https://github.com/adap/flower/pull/2219), " -"[#2232](https://github.com/adap/flower/pull/2232), " -"[#2233](https://github.com/adap/flower/pull/2233), " -"[#2234](https://github.com/adap/flower/pull/2234), " -"[#2235](https://github.com/adap/flower/pull/2235), " -"[#2237](https://github.com/adap/flower/pull/2237), " -"[#2238](https://github.com/adap/flower/pull/2238), " -"[#2242](https://github.com/adap/flower/pull/2242), " -"[#2231](https://github.com/adap/flower/pull/2231), " -"[#2243](https://github.com/adap/flower/pull/2243), " -"[#2227](https://github.com/adap/flower/pull/2227))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:286 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Much effort went into a completely restructured Flower docs experience. " -"The documentation on [flower.ai/docs](https://flower.ai/docs) is now " -"divided into Flower Framework, Flower Baselines, Flower Android SDK, " -"Flower iOS SDK, and code example projects." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:288 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce Flower Swift SDK** " -"([#1858](https://github.com/adap/flower/pull/1858), " -"[#1897](https://github.com/adap/flower/pull/1897))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**Introduction du SDK iOS (aperçu)** " -"([#1621](https://github.com/adap/flower/pull/1621), " -"[#1764](https://github.com/adap/flower/pull/1764))" -#: ../../source/ref-changelog.md:290 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"This is the first preview release of the Flower Swift SDK. Flower support" -" on iOS is improving, and alongside the Swift SDK and code example, there" -" is now also an iOS quickstart tutorial." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:292 +#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:2 #, fuzzy -msgid "" -"**Introduce Flower Android SDK** " -"([#2131](https://github.com/adap/flower/pull/2131))" -msgstr "" -"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " -"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" +msgid "FedAvg" +msgstr "DP-FedAvg" -#: ../../source/ref-changelog.md:294 -msgid "" -"This is the first preview release of the Flower Kotlin SDK. Flower " -"support on Android is improving, and alongside the Kotlin SDK and code " -"example, there is now also an Android quickstart tutorial." +#: flwr.server.strategy.fedavg.FedAvg:3 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:3 of +msgid "Implementation based on https://arxiv.org/abs/1602.05629" msgstr "" -#: ../../source/ref-changelog.md:296 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg:5 flwr.server.strategy.fedprox.FedProx:37 +#: of msgid "" -"**Introduce new end-to-end testing infrastructure** " -"([#1842](https://github.com/adap/flower/pull/1842), " -"[#2071](https://github.com/adap/flower/pull/2071), " -"[#2072](https://github.com/adap/flower/pull/2072), " -"[#2068](https://github.com/adap/flower/pull/2068), " -"[#2067](https://github.com/adap/flower/pull/2067), " -"[#2069](https://github.com/adap/flower/pull/2069), " -"[#2073](https://github.com/adap/flower/pull/2073), " -"[#2070](https://github.com/adap/flower/pull/2070), " -"[#2074](https://github.com/adap/flower/pull/2074), " -"[#2082](https://github.com/adap/flower/pull/2082), " -"[#2084](https://github.com/adap/flower/pull/2084), " -"[#2093](https://github.com/adap/flower/pull/2093), " -"[#2109](https://github.com/adap/flower/pull/2109), " -"[#2095](https://github.com/adap/flower/pull/2095), " -"[#2140](https://github.com/adap/flower/pull/2140), " -"[#2137](https://github.com/adap/flower/pull/2137), " -"[#2165](https://github.com/adap/flower/pull/2165))" +"Fraction of clients used during training. In case `min_fit_clients` is " +"larger than `fraction_fit * available_clients`, `min_fit_clients` will " +"still be sampled. Defaults to 1.0." msgstr "" -"**Améliorer l'API (expérimentale) du pilote** " -"([#1663](https://github.com/adap/flower/pull/1663), " -"[#1666](https://github.com/adap/flower/pull/1666), " -"[#1667](https://github.com/adap/flower/pull/1667), " -"[#1664](https://github.com/adap/flower/pull/1664), " -"[#1675](https://github.com/adap/flower/pull/1675), " -"[#1676](https://github.com/adap/flower/pull/1676), " -"[#1693](https://github.com/adap/flower/pull/1693), " -"[#1662](https://github.com/adap/flower/pull/1662), " -"[#1794](https://github.com/adap/flower/pull/1794))" -#: ../../source/ref-changelog.md:298 +#: flwr.server.strategy.fedavg.FedAvg:9 flwr.server.strategy.fedprox.FedProx:41 +#: of msgid "" -"A new testing infrastructure ensures that new changes stay compatible " -"with existing framework integrations or strategies." +"Fraction of clients used during validation. In case " +"`min_evaluate_clients` is larger than `fraction_evaluate * " +"available_clients`, `min_evaluate_clients` will still be sampled. " +"Defaults to 1.0." msgstr "" -#: ../../source/ref-changelog.md:300 -#, fuzzy -msgid "**Deprecate Python 3.7**" -msgstr "**Créer le PR**" +#: flwr.server.strategy.fedavg.FedAvg:33 of +msgid "Enable (True) or disable (False) in-place aggregation of model updates." +msgstr "" -#: ../../source/ref-changelog.md:302 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Since Python 3.7 reached its end of life (EOL) on 2023-06-27, support for" -" Python 3.7 is now deprecated and will be removed in an upcoming release." +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:304 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add new** `FedTrimmedAvg` **strategy** " -"([#1769](https://github.com/adap/flower/pull/1769), " -"[#1853](https://github.com/adap/flower/pull/1853))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"**Ajouter un nouvel exemple de Federated Analytics avec Pandas** " -"([#1469](https://github.com/adap/flower/pull/1469), " -"[#1535](https://github.com/adap/flower/pull/1535))" -#: ../../source/ref-changelog.md:306 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The new `FedTrimmedAvg` strategy implements Trimmed Mean by [Dong Yin, " -"2018](https://arxiv.org/abs/1803.01498)." +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -"La nouvelle stratégie `FedMedian` met en œuvre Federated Median " -"(FedMedian) par [Yin et al., 2018] " -"(https://arxiv.org/pdf/1803.01498v1.pdf)." -#: ../../source/ref-changelog.md:308 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce start_driver** " -"([#1697](https://github.com/adap/flower/pull/1697))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Ajouter une nouvelle stratégie `FedProx`** " -"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:310 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"In addition to `start_server` and using the raw Driver API, there is a " -"new `start_driver` function that allows for running `start_server` " -"scripts as a Flower driver with only a single-line code change. Check out" -" the `mt-pytorch` code example to see a working example using " -"`start_driver`." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:312 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add parameter aggregation to** `mt-pytorch` **code example** " -"([#1785](https://github.com/adap/flower/pull/1785))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"**Nouvel exemple de code PyTorch avancé** " -"([#1007](https://github.com/adap/flower/pull/1007))" -#: ../../source/ref-changelog.md:314 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The `mt-pytorch` example shows how to aggregate parameters when writing a" -" driver script. The included `driver.py` and `server.py` have been " -"aligned to demonstrate both the low-level way and the high-level way of " -"building server-side logic." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:316 +#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:2 #, fuzzy +msgid "FedAvgAndroid" +msgstr "DPFedAvgAdaptive" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"**Migrate experimental REST API to Starlette** " -"([2171](https://github.com/adap/flower/pull/2171))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"**Nouvelle stratégie expérimentale TensorBoard** " -"([#789](https://github.com/adap/flower/pull/789))" -#: ../../source/ref-changelog.md:318 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"The (experimental) REST API used to be implemented in " -"[FastAPI](https://fastapi.tiangolo.com/), but it has now been migrated to" -" use [Starlette](https://www.starlette.io/) directly." +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:320 -#, fuzzy +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"Please note: The REST request-response API is still experimental and will" -" likely change significantly over time." +":py:obj:`bytes_to_ndarray " +"`\\ \\(tensor\\)" msgstr "" -"Remarque : l'API REST est encore expérimentale et est susceptible de " -"changer de manière significative au fil du temps." -#: ../../source/ref-changelog.md:322 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.bytes_to_ndarray:1 of #, fuzzy +msgid "Deserialize NumPy array from bytes." +msgstr "Désérialise le tableau numérique NumPy à partir d'octets." + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"**Introduce experimental gRPC request-response API** " -"([#1867](https://github.com/adap/flower/pull/1867), " -"[#1901](https://github.com/adap/flower/pull/1901))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Introduire les enveloppes de confidentialité différentielle (aperçu)** " -"([#1357](https://github.com/adap/flower/pull/1357), " -"[#1460](https://github.com/adap/flower/pull/1460))" -#: ../../source/ref-changelog.md:324 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"In addition to the existing gRPC API (based on bidirectional streaming) " -"and the experimental REST API, there is now a new gRPC API that uses a " -"request-response model to communicate with client nodes." +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:326 -#, fuzzy +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"Please note: The gRPC request-response API is still experimental and will" -" likely change significantly over time." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"Remarque : l'API REST est encore expérimentale et est susceptible de " -"changer de manière significative au fil du temps." -#: ../../source/ref-changelog.md:328 -#, fuzzy +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"**Replace the experimental** `start_client(rest=True)` **with the new** " -"`start_client(transport=\"rest\")` " -"([#1880](https://github.com/adap/flower/pull/1880))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"**Initialise** `start_simulation` **avec une liste d'ID de clients** " -"([#860](https://github.com/adap/flower/pull/860))" -#: ../../source/ref-changelog.md:330 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"The (experimental) `start_client` argument `rest` was deprecated in " -"favour of a new argument `transport`. `start_client(transport=\"rest\")` " -"will yield the same behaviour as `start_client(rest=True)` did before. " -"All code should migrate to the new argument `transport`. The deprecated " -"argument `rest` will be removed in a future release." +":py:obj:`ndarray_to_bytes " +"`\\ \\(ndarray\\)" msgstr "" -#: ../../source/ref-changelog.md:332 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarray_to_bytes:1 of #, fuzzy +msgid "Serialize NumPy array to bytes." +msgstr "Sérialise le tableau numérique NumPy en octets." + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"**Add a new gRPC option** " -"([#2197](https://github.com/adap/flower/pull/2197))" +":py:obj:`ndarrays_to_parameters " +"`\\ " +"\\(ndarrays\\)" msgstr "" -"**Ajouter une nouvelle stratégie `FedProx`** " -"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:334 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"We now start a gRPC server with the `grpc.keepalive_permit_without_calls`" -" option set to 0 by default. This prevents the clients from sending " -"keepalive pings when there is no outstanding stream." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:336 -#, fuzzy +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"**Improve example notebooks** " -"([#2005](https://github.com/adap/flower/pull/2005))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`parameters_to_ndarrays " +"`\\ " +"\\(parameters\\)" msgstr "" -"**Supprimer les stratégies expérimentales** " -"([#1280](https://github.com/adap/flower/pull/1280))" -#: ../../source/ref-changelog.md:338 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.parameters_to_ndarrays:1 +#: of #, fuzzy -msgid "There's a new 30min Federated Learning PyTorch tutorial!" -msgstr "Bienvenue au tutoriel sur l'apprentissage fédéré de la fleur !" +msgid "Convert parameters object to NumPy weights." +msgstr "Convertit l'objet des paramètres en ndarrays NumPy." -#: ../../source/ref-changelog.md:340 -msgid "" -"**Example updates** ([#1772](https://github.com/adap/flower/pull/1772), " -"[#1873](https://github.com/adap/flower/pull/1873), " -"[#1981](https://github.com/adap/flower/pull/1981), " -"[#1988](https://github.com/adap/flower/pull/1988), " -"[#1984](https://github.com/adap/flower/pull/1984), " -"[#1982](https://github.com/adap/flower/pull/1982), " -"[#2112](https://github.com/adap/flower/pull/2112), " -"[#2144](https://github.com/adap/flower/pull/2144), " -"[#2174](https://github.com/adap/flower/pull/2174), " -"[#2225](https://github.com/adap/flower/pull/2225), " -"[#2183](https://github.com/adap/flower/pull/2183))" +#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:2 +#, fuzzy +msgid "FedAvgM" +msgstr "DP-FedAvg" + +#: flwr.server.strategy.fedavgm.FedAvgM:3 of +msgid "Implementation based on https://arxiv.org/abs/1909.06335" msgstr "" -#: ../../source/ref-changelog.md:342 +#: flwr.server.strategy.fedavgm.FedAvgM:25 of msgid "" -"Many examples have received significant updates, including simplified " -"advanced-tensorflow and advanced-pytorch examples, improved macOS " -"compatibility of TensorFlow examples, and code examples for simulation. A" -" major upgrade is that all code examples now have a `requirements.txt` " -"(in addition to `pyproject.toml`)." +"Server-side learning rate used in server-side optimization. Defaults to " +"1.0." msgstr "" -#: ../../source/ref-changelog.md:344 -#, fuzzy +#: flwr.server.strategy.fedavgm.FedAvgM:28 of +msgid "Server-side momentum factor used for FedAvgM. Defaults to 0.0." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**General improvements** " -"([#1872](https://github.com/adap/flower/pull/1872), " -"[#1866](https://github.com/adap/flower/pull/1866), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1837](https://github.com/adap/flower/pull/1837), " -"[#1477](https://github.com/adap/flower/pull/1477), " -"[#2171](https://github.com/adap/flower/pull/2171))" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -"**Mise à jour de la documentation** " -"([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:352 -msgid "v1.4.0 (2023-04-21)" -msgstr "v1.4.0 (2023-04-21)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" -#: ../../source/ref-changelog.md:358 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " -"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " -"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " -"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " -"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " -"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " -"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " -"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " -"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" -#: ../../source/ref-changelog.md:362 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce support for XGBoost (**`FedXgbNnAvg` **strategy and " -"example)** ([#1694](https://github.com/adap/flower/pull/1694), " -"[#1709](https://github.com/adap/flower/pull/1709), " -"[#1715](https://github.com/adap/flower/pull/1715), " -"[#1717](https://github.com/adap/flower/pull/1717), " -"[#1763](https://github.com/adap/flower/pull/1763), " -"[#1795](https://github.com/adap/flower/pull/1795))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Introduire la prise en charge de XGBoost (**`FedXgbNnAvg` **stratégie " -"et exemple)** ([#1694](https://github.com/adap/flower/pull/1694), " -"[#1709](https://github.com/adap/flower/pull/1709), " -"[#1715](https://github.com/adap/flower/pull/1715), " -"[#1717](https://github.com/adap/flower/pull/1717), " -"[#1763](https://github.com/adap/flower/pull/1763), " -"[#1795](https://github.com/adap/flower/pull/1795))" -#: ../../source/ref-changelog.md:364 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"XGBoost is a tree-based ensemble machine learning algorithm that uses " -"gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg`" -" " -"[strategy](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," -" and a [code example](https://github.com/adap/flower/tree/main/examples" -"/xgboost-quickstart) that demonstrates the usage of this new strategy in " -"an XGBoost project." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"Nous avons ajouté une nouvelle [stratégie] `FedXgbNnAvg` " -"(https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," -" et un [exemple de code] " -"(https://github.com/adap/flower/tree/main/examples/xgboost-quickstart) " -"qui démontre l'utilisation de cette nouvelle stratégie dans un projet " -"XGBoost." -#: ../../source/ref-changelog.md:366 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce iOS SDK (preview)** " -"([#1621](https://github.com/adap/flower/pull/1621), " -"[#1764](https://github.com/adap/flower/pull/1764))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"**Introduction du SDK iOS (aperçu)** " -"([#1621](https://github.com/adap/flower/pull/1621), " -"[#1764](https://github.com/adap/flower/pull/1764))" -#: ../../source/ref-changelog.md:368 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"This is a major update for anyone wanting to implement Federated Learning" -" on iOS mobile devices. We now have a swift iOS SDK present under " -"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" -" that will facilitate greatly the app creating process. To showcase its " -"use, the [iOS " -"example](https://github.com/adap/flower/tree/main/examples/ios) has also " -"been updated!" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"Il s'agit d'une mise à jour majeure pour tous ceux qui souhaitent mettre " -"en œuvre l'apprentissage fédéré sur les appareils mobiles iOS. Nous " -"disposons désormais d'un SDK swift iOS présent sous " -"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" -" qui facilitera grandement le processus de création d'applications. Pour " -"présenter son utilisation, l'[exemple " -"iOS](https://github.com/adap/flower/tree/main/examples/ios) a également " -"été mis à jour !" -#: ../../source/ref-changelog.md:370 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce new \"What is Federated Learning?\" tutorial** " -"([#1657](https://github.com/adap/flower/pull/1657), " -"[#1721](https://github.com/adap/flower/pull/1721))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**Introduire un nouveau tutoriel \"Qu'est-ce que l'apprentissage fédéré ?" -" \"** ([#1657](https://github.com/adap/flower/pull/1657), " -"[#1721](https://github.com/adap/flower/pull/1721))" -#: ../../source/ref-changelog.md:372 -#, fuzzy -msgid "" -"A new [entry-level tutorial](https://flower.ai/docs/framework/tutorial-" -"what-is-federated-learning.html) in our documentation explains the basics" -" of Fedetated Learning. It enables anyone who's unfamiliar with Federated" -" Learning to start their journey with Flower. Forward it to anyone who's " -"interested in Federated Learning!" +#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:2 +msgid "FedMedian" msgstr "" -"Un nouveau [tutoriel d'entrée de gamme] " -"(https://flower.ai/docs/tutorial/Flower-0-What-is-FL.html) dans notre " -"documentation explique les bases de l'apprentissage fédéré. Il permet à " -"tous ceux qui ne connaissent pas l'apprentissage fédéré de commencer leur" -" voyage avec Flower. Fais-le suivre à tous ceux qui s'intéressent à " -"l'apprentissage fédéré !" -#: ../../source/ref-changelog.md:374 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce new Flower Baseline: FedProx MNIST** " -"([#1513](https://github.com/adap/flower/pull/1513), " -"[#1680](https://github.com/adap/flower/pull/1680), " -"[#1681](https://github.com/adap/flower/pull/1681), " -"[#1679](https://github.com/adap/flower/pull/1679))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"**Introduire une nouvelle fleur Référence : FedProx MNIST** " -"([#1513](https://github.com/adap/flower/pull/1513), " -"[#1680](https://github.com/adap/flower/pull/1680), " -"[#1681](https://github.com/adap/flower/pull/1681), " -"[#1679](https://github.com/adap/flower/pull/1679))" -#: ../../source/ref-changelog.md:376 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"This new baseline replicates the MNIST+CNN task from the paper [Federated" -" Optimization in Heterogeneous Networks (Li et al., " -"2018)](https://arxiv.org/abs/1812.06127). It uses the `FedProx` strategy," -" which aims at making convergence more robust in heterogeneous settings." +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"Cette nouvelle ligne de base reproduit la tâche MNIST+CNN de l'article " -"[Federated Optimization in Heterogeneous Networks (Li et al., 2018)] " -"(https://arxiv.org/abs/1812.06127). Elle utilise la stratégie `FedProx`, " -"qui vise à rendre la convergence plus robuste dans des contextes " -"hétérogènes." -#: ../../source/ref-changelog.md:378 -msgid "" -"**Introduce new Flower Baseline: FedAvg FEMNIST** " -"([#1655](https://github.com/adap/flower/pull/1655))" -msgstr "" -"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " -"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedmedian.FedMedian.aggregate_fit:1 of +#, fuzzy +msgid "Aggregate fit results using median." +msgstr "Résultats globaux de l'évaluation." -#: ../../source/ref-changelog.md:380 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"This new baseline replicates an experiment evaluating the performance of " -"the FedAvg algorithm on the FEMNIST dataset from the paper [LEAF: A " -"Benchmark for Federated Settings (Caldas et al., " -"2018)](https://arxiv.org/abs/1812.01097)." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"Cette nouvelle ligne de base reproduit une expérience évaluant les " -"performances de l'algorithme FedAvg sur le jeu de données FEMNIST tiré de" -" l'article [LEAF : A Benchmark for Federated Settings (Caldas et al., " -"2018)] (https://arxiv.org/abs/1812.01097)." -#: ../../source/ref-changelog.md:382 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce (experimental) REST API** " -"([#1594](https://github.com/adap/flower/pull/1594), " -"[#1690](https://github.com/adap/flower/pull/1690), " -"[#1695](https://github.com/adap/flower/pull/1695), " -"[#1712](https://github.com/adap/flower/pull/1712), " -"[#1802](https://github.com/adap/flower/pull/1802), " -"[#1770](https://github.com/adap/flower/pull/1770), " -"[#1733](https://github.com/adap/flower/pull/1733))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Introduire l'API REST (expérimentale)** " -"([#1594](https://github.com/adap/flower/pull/1594), " -"[#1690](https://github.com/adap/flower/pull/1690), " -"[#1695](https://github.com/adap/flower/pull/1695), " -"[#1712](https://github.com/adap/flower/pull/1712), " -"[#1802](https://github.com/adap/flower/pull/1802), " -"[#1770](https://github.com/adap/flower/pull/1770), " -"[#1733](https://github.com/adap/flower/pull/1733))" -#: ../../source/ref-changelog.md:384 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"A new REST API has been introduced as an alternative to the gRPC-based " -"communication stack. In this initial version, the REST API only supports " -"anonymous clients." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"Une nouvelle API REST a été introduite comme alternative à la pile de " -"communication basée sur gRPC. Dans cette version initiale, l'API REST ne " -"prend en charge que les clients anonymes." -#: ../../source/ref-changelog.md:386 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Please note: The REST API is still experimental and will likely change " -"significantly over time." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"Remarque : l'API REST est encore expérimentale et est susceptible de " -"changer de manière significative au fil du temps." -#: ../../source/ref-changelog.md:388 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Improve the (experimental) Driver API** " -"([#1663](https://github.com/adap/flower/pull/1663), " -"[#1666](https://github.com/adap/flower/pull/1666), " -"[#1667](https://github.com/adap/flower/pull/1667), " -"[#1664](https://github.com/adap/flower/pull/1664), " -"[#1675](https://github.com/adap/flower/pull/1675), " -"[#1676](https://github.com/adap/flower/pull/1676), " -"[#1693](https://github.com/adap/flower/pull/1693), " -"[#1662](https://github.com/adap/flower/pull/1662), " -"[#1794](https://github.com/adap/flower/pull/1794))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**Améliorer l'API (expérimentale) du pilote** " -"([#1663](https://github.com/adap/flower/pull/1663), " -"[#1666](https://github.com/adap/flower/pull/1666), " -"[#1667](https://github.com/adap/flower/pull/1667), " -"[#1664](https://github.com/adap/flower/pull/1664), " -"[#1675](https://github.com/adap/flower/pull/1675), " -"[#1676](https://github.com/adap/flower/pull/1676), " -"[#1693](https://github.com/adap/flower/pull/1693), " -"[#1662](https://github.com/adap/flower/pull/1662), " -"[#1794](https://github.com/adap/flower/pull/1794))" -#: ../../source/ref-changelog.md:390 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The Driver API is still an experimental feature, but this release " -"introduces some major upgrades. One of the main improvements is the " -"introduction of an SQLite database to store server state on disk (instead" -" of in-memory). Another improvement is that tasks (instructions or " -"results) that have been delivered will now be deleted. This greatly " -"improves the memory efficiency of a long-running Flower server." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"L'API du pilote est encore une fonction expérimentale, mais cette version" -" introduit quelques améliorations majeures. L'une des principales " -"améliorations est l'introduction d'une base de données SQLite pour " -"stocker l'état du serveur sur le disque (au lieu de la mémoire). Une " -"autre amélioration est que les tâches (instructions ou résultats) qui ont" -" été livrées seront désormais supprimées, ce qui améliore " -"considérablement l'efficacité de la mémoire d'un serveur Flower " -"fonctionnant depuis longtemps." -#: ../../source/ref-changelog.md:392 -msgid "" -"**Fix spilling issues related to Ray during simulations** " -"([#1698](https://github.com/adap/flower/pull/1698))" +#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:2 +msgid "FedOpt" msgstr "" -"**Répare les problèmes de déversement liés à Ray pendant les " -"simulations** ([#1698](https://github.com/adap/flower/pull/1698))" -#: ../../source/ref-changelog.md:394 -#, fuzzy -msgid "" -"While running long simulations, `ray` was sometimes spilling huge amounts" -" of data that would make the training unable to continue. This is now " -"fixed! 🎉" +#: flwr.server.strategy.fedopt.FedOpt:33 of +msgid "Momentum parameter. Defaults to 0.0." msgstr "" -"Lors de l'exécution de longues simulations, `ray` déversait parfois " -"d'énormes quantités de données qui rendaient l'entraînement incapable de " -"continuer. ce problème est maintenant corrigé ! 🎉" -#: ../../source/ref-changelog.md:396 -msgid "" -"**Add new example using** `TabNet` **and Flower** " -"([#1725](https://github.com/adap/flower/pull/1725))" +#: flwr.server.strategy.fedopt.FedOpt:35 of +msgid "Second moment parameter. Defaults to 0.0." msgstr "" -"**Ajouter un nouvel exemple utilisant** `TabNet` **et Flower** " -"([#1725](https://github.com/adap/flower/pull/1725))" -#: ../../source/ref-changelog.md:398 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"TabNet is a powerful and flexible framework for training machine learning" -" models on tabular data. We now have a federated example using Flower: " -"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples" -"/quickstart-tabnet)." +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -"TabNet est un cadre puissant et flexible pour former des modèles " -"d'apprentissage automatique sur des données tabulaires. Nous avons " -"maintenant un exemple fédéré utilisant Flower : [quickstart-" -"tabnet](https://github.com/adap/flower/tree/main/examples/quickstart-" -"tabnet)." -#: ../../source/ref-changelog.md:400 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add new how-to guide for monitoring simulations** " -"([#1649](https://github.com/adap/flower/pull/1649))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"**Ajouter un nouveau guide pratique pour le suivi des simulations** " -"([#1649](https://github.com/adap/flower/pull/1649))" -#: ../../source/ref-changelog.md:402 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"We now have a documentation guide to help users monitor their performance" -" during simulations." +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -"Nous avons maintenant un guide de documentation pour aider les " -"utilisateurs à surveiller leurs performances pendant les simulations." -#: ../../source/ref-changelog.md:404 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add training metrics to** `History` **object during simulations** " -"([#1696](https://github.com/adap/flower/pull/1696))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Ajouter des mesures de formation à** `History` **objet pendant les " -"simulations** ([#1696](https://github.com/adap/flower/pull/1696))" -#: ../../source/ref-changelog.md:406 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The `fit_metrics_aggregation_fn` can be used to aggregate training " -"metrics, but previous releases did not save the results in the `History` " -"object. This is now the case!" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"La fonction `fit_metrics_aggregation_fn` peut être utilisée pour agréger " -"les mesures d'entraînement, mais les versions précédentes " -"n'enregistraient pas les résultats dans l'objet `History`. c'est " -"désormais le cas !" -#: ../../source/ref-changelog.md:408 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**General improvements** " -"([#1659](https://github.com/adap/flower/pull/1659), " -"[#1646](https://github.com/adap/flower/pull/1646), " -"[#1647](https://github.com/adap/flower/pull/1647), " -"[#1471](https://github.com/adap/flower/pull/1471), " -"[#1648](https://github.com/adap/flower/pull/1648), " -"[#1651](https://github.com/adap/flower/pull/1651), " -"[#1652](https://github.com/adap/flower/pull/1652), " -"[#1653](https://github.com/adap/flower/pull/1653), " -"[#1659](https://github.com/adap/flower/pull/1659), " -"[#1665](https://github.com/adap/flower/pull/1665), " -"[#1670](https://github.com/adap/flower/pull/1670), " -"[#1672](https://github.com/adap/flower/pull/1672), " -"[#1677](https://github.com/adap/flower/pull/1677), " -"[#1684](https://github.com/adap/flower/pull/1684), " -"[#1683](https://github.com/adap/flower/pull/1683), " -"[#1686](https://github.com/adap/flower/pull/1686), " -"[#1682](https://github.com/adap/flower/pull/1682), " -"[#1685](https://github.com/adap/flower/pull/1685), " -"[#1692](https://github.com/adap/flower/pull/1692), " -"[#1705](https://github.com/adap/flower/pull/1705), " -"[#1708](https://github.com/adap/flower/pull/1708), " -"[#1711](https://github.com/adap/flower/pull/1711), " -"[#1713](https://github.com/adap/flower/pull/1713), " -"[#1714](https://github.com/adap/flower/pull/1714), " -"[#1718](https://github.com/adap/flower/pull/1718), " -"[#1716](https://github.com/adap/flower/pull/1716), " -"[#1723](https://github.com/adap/flower/pull/1723), " -"[#1735](https://github.com/adap/flower/pull/1735), " -"[#1678](https://github.com/adap/flower/pull/1678), " -"[#1750](https://github.com/adap/flower/pull/1750), " -"[#1753](https://github.com/adap/flower/pull/1753), " -"[#1736](https://github.com/adap/flower/pull/1736), " -"[#1766](https://github.com/adap/flower/pull/1766), " -"[#1760](https://github.com/adap/flower/pull/1760), " -"[#1775](https://github.com/adap/flower/pull/1775), " -"[#1776](https://github.com/adap/flower/pull/1776), " -"[#1777](https://github.com/adap/flower/pull/1777), " -"[#1779](https://github.com/adap/flower/pull/1779), " -"[#1784](https://github.com/adap/flower/pull/1784), " -"[#1773](https://github.com/adap/flower/pull/1773), " -"[#1755](https://github.com/adap/flower/pull/1755), " -"[#1789](https://github.com/adap/flower/pull/1789), " -"[#1788](https://github.com/adap/flower/pull/1788), " -"[#1798](https://github.com/adap/flower/pull/1798), " -"[#1799](https://github.com/adap/flower/pull/1799), " -"[#1739](https://github.com/adap/flower/pull/1739), " -"[#1800](https://github.com/adap/flower/pull/1800), " -"[#1804](https://github.com/adap/flower/pull/1804), " -"[#1805](https://github.com/adap/flower/pull/1805))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"**General improvements** " -"([#1659](https://github.com/adap/flower/pull/1659), " -"[#1646](https://github.com/adap/flower/pull/1646), " -"[#1647](https://github.com/adap/flower/pull/1647), " -"[#1471](https://github.com/adap/flower/pull/1471), " -"[#1648](https://github.com/adap/flower/pull/1648), " -"[#1651](https://github.com/adap/flower/pull/1651), " -"[#1652](https://github.com/adap/flower/pull/1652), " -"[#1653](https://github.com/adap/flower/pull/1653), " -"[#1659](https://github.com/adap/flower/pull/1659), " -"[#1665](https://github.com/adap/flower/pull/1665), " -"[#1670](https://github.com/adap/flower/pull/1670), " -"[#1672](https://github.com/adap/flower/pull/1672), " -"[#1677](https://github.com/adap/flower/pull/1677), " -"[#1684](https://github.com/adap/flower/pull/1684), " -"[#1683](https://github.com/adap/flower/pull/1683), " -"[#1686](https://github.com/adap/flower/pull/1686), " -"[#1682](https://github.com/adap/flower/pull/1682), " -"[#1685](https://github.com/adap/flower/pull/1685), " -"[#1692](https://github.com/adap/flower/pull/1692), " -"[#1705](https://github.com/ada" - -#: ../../source/ref-changelog.md:416 -msgid "v1.3.0 (2023-02-06)" -msgstr "v1.3.0 (2023-02-06)" -#: ../../source/ref-changelog.md:422 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" -#: ../../source/ref-changelog.md:426 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add support for** `workload_id` **and** `group_id` **in Driver API** " -"([#1595](https://github.com/adap/flower/pull/1595))" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" msgstr "" -"**Ajouter la prise en charge de** `workload_id` **et** `group_id` **dans " -"l'API du pilote** ([#1595](https://github.com/adap/flower/pull/1595))" -#: ../../source/ref-changelog.md:428 -msgid "" -"The (experimental) Driver API now supports a `workload_id` that can be " -"used to identify which workload a task belongs to. It also supports a new" -" `group_id` that can be used, for example, to indicate the current " -"training round. Both the `workload_id` and `group_id` enable client nodes" -" to decide whether they want to handle a task or not." +#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:2 +msgid "FedProx" msgstr "" -"L'API (expérimentale) Driver prend désormais en charge un `workload_id` " -"qui peut être utilisé pour identifier la charge de travail à laquelle une" -" tâche appartient. Elle prend également en charge un nouveau `group_id` " -"qui peut être utilisé, par exemple, pour indiquer le cycle de formation " -"en cours. Le `workload_id` et le `group_id` permettent tous deux aux " -"nœuds clients de décider s'ils veulent traiter une tâche ou non." -#: ../../source/ref-changelog.md:430 -msgid "" -"**Make Driver API and Fleet API address configurable** " -"([#1637](https://github.com/adap/flower/pull/1637))" +#: flwr.server.strategy.fedprox.FedProx:3 of +msgid "Implementation based on https://arxiv.org/abs/1812.06127" msgstr "" -"**Faire en sorte que l'adresse de l'API du conducteur et de l'API de la " -"flotte soit configurable** " -"([#1637](https://github.com/adap/flower/pull/1637))" -#: ../../source/ref-changelog.md:432 +#: flwr.server.strategy.fedprox.FedProx:5 of msgid "" -"The (experimental) long-running Flower server (Driver API and Fleet API) " -"can now configure the server address of both Driver API (via `--driver-" -"api-address`) and Fleet API (via `--fleet-api-address`) when starting:" +"The strategy in itself will not be different than FedAvg, the client " +"needs to be adjusted. A proximal term needs to be added to the loss " +"function during the training:" msgstr "" -"Le serveur Flower (expérimental) de longue durée (Driver API et Fleet " -"API) peut maintenant configurer l'adresse du serveur de Driver API (via " -"`--driver-api-address`) et de Fleet API (via `--fleet-api-address`) lors " -"de son démarrage :" -#: ../../source/ref-changelog.md:434 -#, fuzzy +#: flwr.server.strategy.fedprox.FedProx:9 of msgid "" -"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " -"\"0.0.0.0:8086\"`" +"\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" +"\n" msgstr "" -"``flower-superlink --driver-api-address \"0.0.0.0:8081\" --fleet-api-" -"address \"0.0.0.0:8086\" ``" - -#: ../../source/ref-changelog.md:436 -msgid "Both IPv4 and IPv6 addresses are supported." -msgstr "Les adresses IPv4 et IPv6 sont toutes deux prises en charge." -#: ../../source/ref-changelog.md:438 +#: flwr.server.strategy.fedprox.FedProx:12 of msgid "" -"**Add new example of Federated Learning using fastai and Flower** " -"([#1598](https://github.com/adap/flower/pull/1598))" +"Where $w^t$ are the global parameters and $w$ are the local weights the " +"function will be optimized with." msgstr "" -"**Ajouter un nouvel exemple d'apprentissage fédéré utilisant fastai et " -"Flower** ([#1598](https://github.com/adap/flower/pull/1598))" -#: ../../source/ref-changelog.md:440 -msgid "" -"A new code example (`quickstart-fastai`) demonstrates federated learning " -"with [fastai](https://www.fast.ai/) and Flower. You can find it here: " -"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples" -"/quickstart-fastai)." +#: flwr.server.strategy.fedprox.FedProx:15 of +msgid "In PyTorch, for example, the loss would go from:" msgstr "" -"Un nouvel exemple de code (`quickstart-fastai`) démontre l'apprentissage " -"fédéré avec [fastai](https://www.fast.ai/) et Flower. Tu peux le trouver " -"ici : [quickstart-" -"fastai](https://github.com/adap/flower/tree/main/examples/quickstart-" -"fastai)." -#: ../../source/ref-changelog.md:442 -msgid "" -"**Make Android example compatible with** `flwr >= 1.0.0` **and the latest" -" versions of Android** " -"([#1603](https://github.com/adap/flower/pull/1603))" +#: flwr.server.strategy.fedprox.FedProx:21 of +msgid "To:" msgstr "" -"**Rendre l'exemple Android compatible avec** `flwr >= 1.0.0` **et les " -"dernières versions d'Android** " -"([#1603](https://github.com/adap/flower/pull/1603))" -#: ../../source/ref-changelog.md:444 -#, fuzzy +#: flwr.server.strategy.fedprox.FedProx:30 of msgid "" -"The Android code example has received a substantial update: the project " -"is compatible with Flower 1.0 (and later), the UI received a full " -"refresh, and the project is updated to be compatible with newer Android " -"tooling." +"With `global_params` being a copy of the parameters before the training " +"takes place." msgstr "" -"L'exemple de code Android a reçu une mise à jour substantielle : le " -"projet est compatible avec Flower 1.0 et les versions ultérieures, " -"l'interface utilisateur a reçu un rafraîchissement complet, et le projet " -"est mis à jour pour être compatible avec les outils Android les plus " -"récents." -#: ../../source/ref-changelog.md:446 +#: flwr.server.strategy.fedprox.FedProx:65 of msgid "" -"**Add new `FedProx` strategy** " -"([#1619](https://github.com/adap/flower/pull/1619))" +"The weight of the proximal term used in the optimization. 0.0 makes this " +"strategy equivalent to FedAvg, and the higher the coefficient, the more " +"regularization will be used (that is, the client parameters will need to " +"be closer to the server parameters during training)." msgstr "" -"**Ajouter une nouvelle stratégie `FedProx`** " -"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:448 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"This " -"[strategy](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" -" is almost identical to " -"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," -" but helps users replicate what is described in this " -"[paper](https://arxiv.org/abs/1812.06127). It essentially adds a " -"parameter called `proximal_mu` to regularize the local models with " -"respect to the global models." +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -"Cette " -"[stratégie](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" -" est presque identique à " -"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," -" mais aide les utilisateurs à reproduire ce qui est décrit dans cet " -"[article](https://arxiv.org/abs/1812.06127). Elle ajoute essentiellement " -"un paramètre appelé `proximal_mu` pour régulariser les modèles locaux par" -" rapport aux modèles globaux." -#: ../../source/ref-changelog.md:450 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add new metrics to telemetry events** " -"([#1640](https://github.com/adap/flower/pull/1640))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"**Ajouter de nouvelles métriques aux événements de télémétrie** " -"([#1640](https://github.com/adap/flower/pull/1640))" -#: ../../source/ref-changelog.md:452 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"An updated event structure allows, for example, the clustering of events " -"within the same workload." +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -"Une structure d'événements mise à jour permet, par exemple, de regrouper " -"des événements au sein d'une même charge de travail." -#: ../../source/ref-changelog.md:454 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add new custom strategy tutorial section** " -"[#1623](https://github.com/adap/flower/pull/1623)" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Ajouter une nouvelle section de tutoriel sur les stratégies " -"personnalisées** [#1623](https://github.com/adap/flower/pull/1623)" -#: ../../source/ref-changelog.md:456 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The Flower tutorial now has a new section that covers implementing a " -"custom strategy from scratch: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"Le tutoriel sur les fleurs comporte désormais une nouvelle section qui " -"traite de la mise en œuvre d'une stratégie personnalisée à partir de zéro" -" : [Ouvrir dans " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source/tutorial/Flower-3-Building-a" -"-Strategy-PyTorch.ipynb)" -#: ../../source/ref-changelog.md:458 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add new custom serialization tutorial section** " -"([#1622](https://github.com/adap/flower/pull/1622))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"**Ajouter une nouvelle section de tutoriel sur la sérialisation " -"personnalisée** ([#1622](https://github.com/adap/flower/pull/1622))" -#: ../../source/ref-changelog.md:460 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The Flower tutorial now has a new section that covers custom " -"serialization: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -"/tutorial-customize-the-client-pytorch.ipynb)" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"Le tutoriel sur les fleurs comporte désormais une nouvelle section qui " -"traite de la sérialisation personnalisée : [Ouvrir dans " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source/tutorial/Flower-4" -"-Client-and-NumPyClient-PyTorch.ipynb)" -#: ../../source/ref-changelog.md:462 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**General improvements** " -"([#1638](https://github.com/adap/flower/pull/1638), " -"[#1634](https://github.com/adap/flower/pull/1634), " -"[#1636](https://github.com/adap/flower/pull/1636), " -"[#1635](https://github.com/adap/flower/pull/1635), " -"[#1633](https://github.com/adap/flower/pull/1633), " -"[#1632](https://github.com/adap/flower/pull/1632), " -"[#1631](https://github.com/adap/flower/pull/1631), " -"[#1630](https://github.com/adap/flower/pull/1630), " -"[#1627](https://github.com/adap/flower/pull/1627), " -"[#1593](https://github.com/adap/flower/pull/1593), " -"[#1616](https://github.com/adap/flower/pull/1616), " -"[#1615](https://github.com/adap/flower/pull/1615), " -"[#1607](https://github.com/adap/flower/pull/1607), " -"[#1609](https://github.com/adap/flower/pull/1609), " -"[#1608](https://github.com/adap/flower/pull/1608), " -"[#1603](https://github.com/adap/flower/pull/1603), " -"[#1590](https://github.com/adap/flower/pull/1590), " -"[#1580](https://github.com/adap/flower/pull/1580), " -"[#1599](https://github.com/adap/flower/pull/1599), " -"[#1600](https://github.com/adap/flower/pull/1600), " -"[#1601](https://github.com/adap/flower/pull/1601), " -"[#1597](https://github.com/adap/flower/pull/1597), " -"[#1595](https://github.com/adap/flower/pull/1595), " -"[#1591](https://github.com/adap/flower/pull/1591), " -"[#1588](https://github.com/adap/flower/pull/1588), " -"[#1589](https://github.com/adap/flower/pull/1589), " -"[#1587](https://github.com/adap/flower/pull/1587), " -"[#1573](https://github.com/adap/flower/pull/1573), " -"[#1581](https://github.com/adap/flower/pull/1581), " -"[#1578](https://github.com/adap/flower/pull/1578), " -"[#1574](https://github.com/adap/flower/pull/1574), " -"[#1572](https://github.com/adap/flower/pull/1572), " -"[#1586](https://github.com/adap/flower/pull/1586))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**General improvements** " -"([#1638](https://github.com/adap/flower/pull/1638), " -"[#1634](https://github.com/adap/flower/pull/1634), " -"[#1636](https://github.com/adap/flower/pull/1636), " -"[#1635](https://github.com/adap/flower/pull/1635), " -"[#1633](https://github.com/adap/flower/pull/1633), " -"[#1632](https://github.com/adap/flower/pull/1632), " -"[#1631](https://github.com/adap/flower/pull/1631), " -"[#1630](https://github.com/adap/flower/pull/1630), " -"[#1627](https://github.com/adap/flower/pull/1627), " -"[#1593](https://github.com/adap/flower/pull/1593), " -"[#1616](https://github.com/adap/flower/pull/1616), " -"[#1615](https://github.com/adap/flower/pull/1615), " -"[#1607](https://github.com/adap/flower/pull/1607), " -"[#1609](https://github.com/adap/flower/pull/1609), " -"[#1608](https://github.com/adap/flower/pull/1608), " -"[#1603](https://github.com/adap/flower/pull/1603), " -"[#1590](https://github.com/adap/flower/pull/1590), " -"[#1580](https://github.com/adap/flower/pull/1580), " -"[#1599](https://github.com/adap/flower/pull/1599), " -"[#1600](https://github.com/ada" -#: ../../source/ref-changelog.md:466 -msgid "" -"**Updated documentation** " -"([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614))" +#: flwr.server.strategy.fedprox.FedProx.configure_fit:3 of +msgid "Sends the proximal factor mu to the clients" msgstr "" -"**Mise à jour de la documentation** " -"([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:468 ../../source/ref-changelog.md:535 -msgid "" -"As usual, the documentation has improved quite a bit. It is another step " -"in our effort to make the Flower documentation the best documentation of " -"any project. Stay tuned and as always, feel free to provide feedback!" +#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:2 +msgid "FedTrimmedAvg" msgstr "" -"Comme d'habitude, la documentation s'est beaucoup améliorée. C'est une " -"autre étape dans notre effort pour faire de la documentation de Flower la" -" meilleure documentation de tout projet. Reste à l'écoute et comme " -"toujours, n'hésite pas à nous faire part de tes commentaires !" -#: ../../source/ref-changelog.md:474 -msgid "v1.2.0 (2023-01-13)" -msgstr "v1.2.0 (2023-01-13)" +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:3 of +msgid "Implemented based on: https://arxiv.org/abs/1803.01498" +msgstr "" -#: ../../source/ref-changelog.md:480 -msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." -" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:25 of +msgid "Fraction to cut off of both tails of the distribution. Defaults to 0.2." msgstr "" -"adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L. " -"Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" -#: ../../source/ref-changelog.md:484 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce new Flower Baseline: FedAvg MNIST** " -"([#1497](https://github.com/adap/flower/pull/1497), " -"[#1552](https://github.com/adap/flower/pull/1552))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"**Introduire une nouvelle fleur Référence : FedAvg MNIST** " -"([#1497](https://github.com/adap/flower/pull/1497), " -"[#1552](https://github.com/adap/flower/pull/1552))" -#: ../../source/ref-changelog.md:486 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Over the coming weeks, we will be releasing a number of new reference " -"implementations useful especially to FL newcomers. They will typically " -"revisit well known papers from the literature, and be suitable for " -"integration in your own application or for experimentation, in order to " -"deepen your knowledge of FL in general. Today's release is the first in " -"this series. [Read more.](https://flower.ai/blog/2023-01-12-fl-starter-" -"pack-fedavg-mnist-cnn/)" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"Au cours des prochaines semaines, nous publierons un certain nombre de " -"nouvelles implémentations de référence utiles en particulier pour les " -"nouveaux venus en FL. Elles revisiteront généralement des articles bien " -"connus de la littérature, et seront adaptées à l'intégration dans votre " -"propre application ou à l'expérimentation, afin d'approfondir votre " -"connaissance de FL en général. La publication d'aujourd'hui est la " -"première de cette série. [Lire la " -"suite.](https://flower.ai/blog/2023-01-12-fl-starter-pack-fedavg-mnist-" -"cnn/)" -#: ../../source/ref-changelog.md:488 -msgid "" -"**Improve GPU support in simulations** " -"([#1555](https://github.com/adap/flower/pull/1555))" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.aggregate_fit:1 of +msgid "Aggregate fit results using trimmed average." msgstr "" -"**Améliorer la prise en charge des GPU dans les simulations** " -"([#1555](https://github.com/adap/flower/pull/1555))" -#: ../../source/ref-changelog.md:490 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The Ray-based Virtual Client Engine (`start_simulation`) has been updated" -" to improve GPU support. The update includes some of the hard-earned " -"lessons from scaling simulations in GPU cluster environments. New " -"defaults make running GPU-based simulations substantially more robust." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"Le moteur client virtuel basé sur Ray (`start_simulation`) a été mis à " -"jour pour améliorer la prise en charge des GPU. La mise à jour inclut " -"certaines des leçons durement apprises lors de la mise à l'échelle des " -"simulations dans des environnements de grappes de GPU. De nouveaux " -"paramètres par défaut rendent l'exécution des simulations basées sur les " -"GPU beaucoup plus robuste." -#: ../../source/ref-changelog.md:492 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Improve GPU support in Jupyter Notebook tutorials** " -"([#1527](https://github.com/adap/flower/pull/1527), " -"[#1558](https://github.com/adap/flower/pull/1558))" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Améliorer la prise en charge du GPU dans les tutoriels Jupyter " -"Notebook** ([#1527](https://github.com/adap/flower/pull/1527), " -"[#1558](https://github.com/adap/flower/pull/1558))" -#: ../../source/ref-changelog.md:494 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Some users reported that Jupyter Notebooks have not always been easy to " -"use on GPU instances. We listened and made improvements to all of our " -"Jupyter notebooks! Check out the updated notebooks here:" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"Certains utilisateurs ont signalé que les carnets Jupyter n'ont pas " -"toujours été faciles à utiliser sur les instances GPU. Nous les avons " -"écoutés et avons apporté des améliorations à tous nos carnets Jupyter ! " -"Découvre les carnets mis à jour ici :" -#: ../../source/ref-changelog.md:496 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"[An Introduction to Federated Learning](https://flower.ai/docs/framework" -"/tutorial-get-started-with-flower-pytorch.html)" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"[Une introduction à l'apprentissage fédéré] " -"(https://flower.ai/docs/tutorial/Flower-1-Intro-to-FL-PyTorch.html)" -#: ../../source/ref-changelog.md:497 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"[Strategies in Federated Learning](https://flower.ai/docs/framework" -"/tutorial-use-a-federated-learning-strategy-pytorch.html)" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"[Stratégies d'apprentissage fédéré] " -"(https://flower.ai/docs/tutorial/Flower-2-Strategies-in-FL-PyTorch.html)" -#: ../../source/ref-changelog.md:498 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"[Building a Strategy](https://flower.ai/docs/framework/tutorial-build-a" -"-strategy-from-scratch-pytorch.html)" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"[Construire une stratégie] " -"(https://flower.ai/docs/tutorial/Flower-3-Building-a-Strategy-" -"PyTorch.html)" -#: ../../source/ref-changelog.md:499 -#, fuzzy -msgid "" -"[Client and NumPyClient](https://flower.ai/docs/framework/tutorial-" -"customize-the-client-pytorch.html)" +#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:2 +msgid "FedXgbBagging" msgstr "" -"[Client et NumPyClient] (https://flower.ai/docs/tutorial/Flower-4-Client-" -"and-NumPyClient-PyTorch.html)" -#: ../../source/ref-changelog.md:501 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"**Introduce optional telemetry** " -"([#1533](https://github.com/adap/flower/pull/1533), " -"[#1544](https://github.com/adap/flower/pull/1544), " -"[#1584](https://github.com/adap/flower/pull/1584))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"**Introduire la télémétrie optionnelle** " -"([#1533](https://github.com/adap/flower/pull/1533), " -"[#1544](https://github.com/adap/flower/pull/1544), " -"[#1584](https://github.com/adap/flower/pull/1584))" -#: ../../source/ref-changelog.md:503 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +#, fuzzy +msgid "Aggregate evaluation metrics using average." +msgstr "Résultats globaux de l'évaluation." + +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"After a [request for " -"feedback](https://github.com/adap/flower/issues/1534) from the community," -" the Flower open-source project introduces optional collection of " -"*anonymous* usage metrics to make well-informed decisions to improve " -"Flower. Doing this enables the Flower team to understand how Flower is " -"used and what challenges users might face." +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"À la suite d'une [demande de commentaires] " -"(https://github.com/adap/flower/issues/1534) de la part de la communauté," -" le projet open-source Flower introduit la collecte optionnelle de " -"mesures d'utilisation *anonymes* afin de prendre des décisions éclairées " -"pour améliorer Flower. Cela permet à l'équipe de Flower de comprendre " -"comment Flower est utilisé et quels sont les défis auxquels les " -"utilisateurs peuvent être confrontés." -#: ../../source/ref-changelog.md:505 -#, fuzzy -msgid "" -"**Flower is a friendly framework for collaborative AI and data science.**" -" Staying true to this statement, Flower makes it easy to disable " -"telemetry for users who do not want to share anonymous usage metrics. " -"[Read more.](https://flower.ai/docs/telemetry.html)." +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_fit:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_fit:1 of +msgid "Aggregate fit results using bagging." msgstr "" -"**Flower est un cadre convivial pour l'IA collaborative et la science des" -" données.** Restant fidèle à cette déclaration, Flower permet de " -"désactiver facilement la télémétrie pour les utilisateurs qui ne " -"souhaitent pas partager des métriques d'utilisation anonymes.[Lire la " -"suite.](https://flower.ai/docs/telemetry.html)." -#: ../../source/ref-changelog.md:507 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"**Introduce (experimental) Driver API** " -"([#1520](https://github.com/adap/flower/pull/1520), " -"[#1525](https://github.com/adap/flower/pull/1525), " -"[#1545](https://github.com/adap/flower/pull/1545), " -"[#1546](https://github.com/adap/flower/pull/1546), " -"[#1550](https://github.com/adap/flower/pull/1550), " -"[#1551](https://github.com/adap/flower/pull/1551), " -"[#1567](https://github.com/adap/flower/pull/1567))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**([#1520](https://github.com/adap/flower/pull/1520), " -"[#1525](https://github.com/adap/flower/pull/1525), " -"[#1545](https://github.com/adap/flower/pull/1545), " -"[#1546](https://github.com/adap/flower/pull/1546), " -"[#1550](https://github.com/adap/flower/pull/1550), " -"[#1551](https://github.com/adap/flower/pull/1551), " -"[#1567](https://github.com/adap/flower/pull/1567))" -#: ../../source/ref-changelog.md:509 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"Flower now has a new (experimental) Driver API which will enable fully " -"programmable, async, and multi-tenant Federated Learning and Federated " -"Analytics applications. Phew, that's a lot! Going forward, the Driver API" -" will be the abstraction that many upcoming features will be built on - " -"and you can start building those things now, too." +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"Flower dispose désormais d'une nouvelle API de pilote (expérimentale) qui" -" permettra de créer des applications Federated Learning et Federated " -"Analytics entièrement programmables, asynchrones et multi-tenant. Ouf, " -"c'est beaucoup ! À l'avenir, l'API de pilote sera l'abstraction sur " -"laquelle de nombreuses fonctionnalités à venir seront construites - et tu" -" peux commencer à construire ces choses dès maintenant, aussi." -#: ../../source/ref-changelog.md:511 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"The Driver API also enables a new execution mode in which the server runs" -" indefinitely. Multiple individual workloads can run concurrently and " -"start and stop their execution independent of the server. This is " -"especially useful for users who want to deploy Flower in production." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"L'API du pilote permet également un nouveau mode d'exécution dans lequel " -"le serveur s'exécute indéfiniment. Plusieurs charges de travail " -"individuelles peuvent s'exécuter simultanément et démarrer et arrêter " -"leur exécution indépendamment du serveur. Ceci est particulièrement utile" -" pour les utilisateurs qui souhaitent déployer Flower en production." -#: ../../source/ref-changelog.md:513 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"To learn more, check out the `mt-pytorch` code example. We look forward " -"to you feedback!" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"Pour en savoir plus, consulte l'exemple de code `mt-pytorch`. Nous " -"attendons tes commentaires avec impatience !" -#: ../../source/ref-changelog.md:515 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"Please note: *The Driver API is still experimental and will likely change" -" significantly over time.*" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"Remarque : *L'API du pilote est encore expérimentale et est susceptible " -"de changer de manière significative au fil du temps.*" -#: ../../source/ref-changelog.md:517 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"**Add new Federated Analytics with Pandas example** " -"([#1469](https://github.com/adap/flower/pull/1469), " -"[#1535](https://github.com/adap/flower/pull/1535))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**Ajouter un nouvel exemple de Federated Analytics avec Pandas** " -"([#1469](https://github.com/adap/flower/pull/1469), " -"[#1535](https://github.com/adap/flower/pull/1535))" -#: ../../source/ref-changelog.md:519 -msgid "" -"A new code example (`quickstart-pandas`) demonstrates federated analytics" -" with Pandas and Flower. You can find it here: [quickstart-" -"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" -"pandas)." +#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:2 +msgid "FedXgbCyclic" msgstr "" -"Un nouvel exemple de code (`quickstart-pandas`) démontre l'analyse " -"fédérée avec Pandas et Flower. Tu peux le trouver ici : [quickstart-" -"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" -"pandas)." -#: ../../source/ref-changelog.md:521 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"**Add new strategies: Krum and MultiKrum** " -"([#1481](https://github.com/adap/flower/pull/1481))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"**Ajouter de nouvelles stratégies : Krum et MultiKrum** " -"([#1481](https://github.com/adap/flower/pull/1481))" -#: ../../source/ref-changelog.md:523 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"Edoardo, a computer science student at the Sapienza University of Rome, " -"contributed a new `Krum` strategy that enables users to easily use Krum " -"and MultiKrum in their workloads." +":py:obj:`aggregate_fit " +"`\\ \\(server\\_round\\," +" results\\, failures\\)" msgstr "" -"Edoardo, étudiant en informatique à l'Université Sapienza de Rome, a " -"contribué à une nouvelle stratégie `Krum` qui permet aux utilisateurs " -"d'utiliser facilement Krum et MultiKrum dans leurs charges de travail." -#: ../../source/ref-changelog.md:525 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"**Update C++ example to be compatible with Flower v1.2.0** " -"([#1495](https://github.com/adap/flower/pull/1495))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Mettre à jour l'exemple C++ pour qu'il soit compatible avec Flower " -"v1.2.0** ([#1495](https://github.com/adap/flower/pull/1495))" -#: ../../source/ref-changelog.md:527 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"The C++ code example has received a substantial update to make it " -"compatible with the latest version of Flower." +":py:obj:`configure_fit " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -"L'exemple de code C++ a reçu une mise à jour substantielle pour le rendre" -" compatible avec la dernière version de Flower." -#: ../../source/ref-changelog.md:529 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"**General improvements** " -"([#1491](https://github.com/adap/flower/pull/1491), " -"[#1504](https://github.com/adap/flower/pull/1504), " -"[#1506](https://github.com/adap/flower/pull/1506), " -"[#1514](https://github.com/adap/flower/pull/1514), " -"[#1522](https://github.com/adap/flower/pull/1522), " -"[#1523](https://github.com/adap/flower/pull/1523), " -"[#1526](https://github.com/adap/flower/pull/1526), " -"[#1528](https://github.com/adap/flower/pull/1528), " -"[#1547](https://github.com/adap/flower/pull/1547), " -"[#1549](https://github.com/adap/flower/pull/1549), " -"[#1560](https://github.com/adap/flower/pull/1560), " -"[#1564](https://github.com/adap/flower/pull/1564), " -"[#1566](https://github.com/adap/flower/pull/1566))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"**Améliorations générales** " -"([#1491](https://github.com/adap/flower/pull/1491), " -"[#1504](https://github.com/adap/flower/pull/1504), " -"[#1506](https://github.com/adap/flower/pull/1506), " -"[#1514](https://github.com/adap/flower/pull/1514), " -"[#1522](https://github.com/adap/flower/pull/1522), " -"[#1523](https://github.com/adap/flower/pull/1523), " -"[#1526](https://github.com/adap/flower/pull/1526), " -"[#1528](https://github.com/adap/flower/pull/1528), " -"[#1547](https://github.com/adap/flower/pull/1547), " -"[#1549](https://github.com/adap/flower/pull/1549), " -"[#1560](https://github.com/adap/flower/pull/1560), " -"[#1564](https://github.com/adap/flower/pull/1564), " -"[#1566](https://github.com/adap/flower/pull/1566))" -#: ../../source/ref-changelog.md:533 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"**Updated documentation** " -"([#1494](https://github.com/adap/flower/pull/1494), " -"[#1496](https://github.com/adap/flower/pull/1496), " -"[#1500](https://github.com/adap/flower/pull/1500), " -"[#1503](https://github.com/adap/flower/pull/1503), " -"[#1505](https://github.com/adap/flower/pull/1505), " -"[#1524](https://github.com/adap/flower/pull/1524), " -"[#1518](https://github.com/adap/flower/pull/1518), " -"[#1519](https://github.com/adap/flower/pull/1519), " -"[#1515](https://github.com/adap/flower/pull/1515))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"**Documentation mise à jour** " -"([#1494](https://github.com/adap/flower/pull/1494), " -"[#1496](https://github.com/adap/flower/pull/1496), " -"[#1500](https://github.com/adap/flower/pull/1500), " -"[#1503](https://github.com/adap/flower/pull/1503), " -"[#1505](https://github.com/adap/flower/pull/1505), " -"[#1524](https://github.com/adap/flower/pull/1524), " -"[#1518](https://github.com/adap/flower/pull/1518), " -"[#1519](https://github.com/adap/flower/pull/1519), " -"[#1515](https://github.com/adap/flower/pull/1515))" -#: ../../source/ref-changelog.md:537 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"One highlight is the new [first time contributor " -"guide](https://flower.ai/docs/first-time-contributors.html): if you've " -"never contributed on GitHub before, this is the perfect place to start!" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"L'un des points forts est le nouveau [guide du premier contributeur] " -"(https://flower.ai/docs/first-time-contributors.html) : si tu n'as jamais" -" contribué sur GitHub auparavant, c'est l'endroit idéal pour commencer !" - -#: ../../source/ref-changelog.md:543 -msgid "v1.1.0 (2022-10-31)" -msgstr "v1.1.0 (2022-10-31)" -#: ../../source/ref-changelog.md:547 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"We would like to give our **special thanks** to all the contributors who " -"made the new version of Flower possible (in `git shortlog` order):" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"Nous aimerions **remercier tout particulièrement** tous les contributeurs" -" qui ont rendu possible la nouvelle version de Flower (dans l'ordre `git " -"shortlog`) :" -#: ../../source/ref-changelog.md:549 +#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:2 +#, fuzzy +msgid "FedXgbNnAvg" +msgstr "DP-FedAvg" + +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:5 of msgid "" -"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " -"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " -"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " -"`danielnugraha`, `edogab33`" +"This strategy is deprecated, but a copy of it is available in Flower " +"Baselines: " +"https://github.com/adap/flower/tree/main/baselines/hfedxgboost." msgstr "" -"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " -"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " -"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " -"`danielnugraha`, `edogab33`" -#: ../../source/ref-changelog.md:553 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce Differential Privacy wrappers (preview)** " -"([#1357](https://github.com/adap/flower/pull/1357), " -"[#1460](https://github.com/adap/flower/pull/1460))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"**Introduire les enveloppes de confidentialité différentielle (aperçu)** " -"([#1357](https://github.com/adap/flower/pull/1357), " -"[#1460](https://github.com/adap/flower/pull/1460))" -#: ../../source/ref-changelog.md:555 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The first (experimental) preview of pluggable Differential Privacy " -"wrappers enables easy configuration and usage of differential privacy " -"(DP). The pluggable DP wrappers enable framework-agnostic **and** " -"strategy-agnostic usage of both client-side DP and server-side DP. Head " -"over to the Flower docs, a new explainer goes into more detail." +":py:obj:`aggregate_fit " +"`\\ \\(server\\_round\\, " +"results\\, failures\\)" msgstr "" -"Le premier aperçu (expérimental) des wrappers enfichables de " -"confidentialité différentielle permet de configurer et d'utiliser " -"facilement la confidentialité différentielle (DP). Les wrappers DP " -"enfichables permettent une utilisation agnostique du cadre **et** de la " -"stratégie à la fois de la DP côté client et de la DP côté serveur. Va " -"voir les documents de Flower, un nouvel explicatif va plus loin dans les " -"détails." -#: ../../source/ref-changelog.md:557 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**New iOS CoreML code example** " -"([#1289](https://github.com/adap/flower/pull/1289))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Nouvel exemple de code CoreML pour iOS** " -"([#1289](https://github.com/adap/flower/pull/1289))" -#: ../../source/ref-changelog.md:559 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Flower goes iOS! A massive new code example shows how Flower clients can " -"be built for iOS. The code example contains both Flower iOS SDK " -"components that can be used for many tasks, and one task example running " -"on CoreML." +":py:obj:`configure_fit " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -"Flower passe à iOS ! Un nouvel exemple de code massif montre comment les " -"clients Flower peuvent être construits pour iOS. L'exemple de code " -"contient à la fois des composants Flower iOS SDK qui peuvent être " -"utilisés pour de nombreuses tâches, et un exemple de tâche fonctionnant " -"sur CoreML." -#: ../../source/ref-changelog.md:561 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**New FedMedian strategy** " -"([#1461](https://github.com/adap/flower/pull/1461))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"**Nouvelle stratégie de FedMedian** " -"([#1461](https://github.com/adap/flower/pull/1461))" -#: ../../source/ref-changelog.md:563 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The new `FedMedian` strategy implements Federated Median (FedMedian) by " -"[Yin et al., 2018](https://arxiv.org/pdf/1803.01498v1.pdf)." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"La nouvelle stratégie `FedMedian` met en œuvre Federated Median " -"(FedMedian) par [Yin et al., 2018] " -"(https://arxiv.org/pdf/1803.01498v1.pdf)." -#: ../../source/ref-changelog.md:565 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Log** `Client` **exceptions in Virtual Client Engine** " -"([#1493](https://github.com/adap/flower/pull/1493))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**Log** `Client` **exceptions dans le moteur de client virtuel** " -"([#1493](https://github.com/adap/flower/pull/1493))" -#: ../../source/ref-changelog.md:567 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"All `Client` exceptions happening in the VCE are now logged by default " -"and not just exposed to the configured `Strategy` (via the `failures` " -"argument)." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"Toutes les exceptions `Client` qui se produisent dans le VCE sont " -"maintenant enregistrées par défaut et ne sont pas seulement exposées à la" -" `Stratégie` configurée (via l'argument `failures`)." -#: ../../source/ref-changelog.md:569 -msgid "" -"**Improve Virtual Client Engine internals** " -"([#1401](https://github.com/adap/flower/pull/1401), " -"[#1453](https://github.com/adap/flower/pull/1453))" +#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:2 +msgid "FedYogi" msgstr "" -"**Améliorer le moteur du client virtuel** " -"([#1401](https://github.com/adap/flower/pull/1401), " -"[#1453](https://github.com/adap/flower/pull/1453))" -#: ../../source/ref-changelog.md:571 -msgid "" -"Some internals of the Virtual Client Engine have been revamped. The VCE " -"now uses Ray 2.0 under the hood, the value type of the `client_resources`" -" dictionary changed to `float` to allow fractions of resources to be " -"allocated." +#: flwr.server.strategy.fedyogi.FedYogi:32 of +msgid "Server-side learning rate. Defaults to 1e-2." msgstr "" -"Le VCE utilise maintenant Ray 2.0 sous le capot, le type de valeur du " -"dictionnaire `client_resources` a été remplacé par `float` pour permettre" -" l'allocation de fractions de ressources." -#: ../../source/ref-changelog.md:573 -msgid "" -"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " -"Client Engine**" +#: flwr.server.strategy.fedyogi.FedYogi:34 of +msgid "Client-side learning rate. Defaults to 0.0316." msgstr "" -"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " -"Client Engine**" -#: ../../source/ref-changelog.md:575 -msgid "" -"The Virtual Client Engine now has full support for optional `Client` (and" -" `NumPyClient`) methods." +#: flwr.server.strategy.fedyogi.FedYogi:40 of +msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-3." msgstr "" -"Le moteur de client virtuel prend désormais en charge les méthodes " -"optionnelles `Client` (et `NumPyClient`)." -#: ../../source/ref-changelog.md:577 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Provide type information to packages using** `flwr` " -"([#1377](https://github.com/adap/flower/pull/1377))" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -"**Fournir des informations de type aux paquets en utilisant** `flwr` " -"([#1377](https://github.com/adap/flower/pull/1377))" -#: ../../source/ref-changelog.md:579 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The package `flwr` is now bundled with a `py.typed` file indicating that " -"the package is typed. This enables typing support for projects or " -"packages that use `flwr` by enabling them to improve their code using " -"static type checkers like `mypy`." +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"Le paquet `flwr` est maintenant accompagné d'un fichier `py.typed` " -"indiquant que le paquet est typé. Cela permet de prendre en charge le " -"typage pour les projets ou les paquets qui utilisent `flwr` en leur " -"permettant d'améliorer leur code à l'aide de vérificateurs de types " -"statiques comme `mypy`." -#: ../../source/ref-changelog.md:581 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Updated code example** " -"([#1344](https://github.com/adap/flower/pull/1344), " -"[#1347](https://github.com/adap/flower/pull/1347))" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -"**Exemple de code mis à jour** " -"([#1344](https://github.com/adap/flower/pull/1344), " -"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/ref-changelog.md:583 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The code examples covering scikit-learn and PyTorch Lightning have been " -"updated to work with the latest version of Flower." +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"Les exemples de code couvrant scikit-learn et PyTorch Lightning ont été " -"mis à jour pour fonctionner avec la dernière version de Flower." -#: ../../source/ref-changelog.md:585 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Updated documentation** " -"([#1355](https://github.com/adap/flower/pull/1355), " -"[#1558](https://github.com/adap/flower/pull/1558), " -"[#1379](https://github.com/adap/flower/pull/1379), " -"[#1380](https://github.com/adap/flower/pull/1380), " -"[#1381](https://github.com/adap/flower/pull/1381), " -"[#1332](https://github.com/adap/flower/pull/1332), " -"[#1391](https://github.com/adap/flower/pull/1391), " -"[#1403](https://github.com/adap/flower/pull/1403), " -"[#1364](https://github.com/adap/flower/pull/1364), " -"[#1409](https://github.com/adap/flower/pull/1409), " -"[#1419](https://github.com/adap/flower/pull/1419), " -"[#1444](https://github.com/adap/flower/pull/1444), " -"[#1448](https://github.com/adap/flower/pull/1448), " -"[#1417](https://github.com/adap/flower/pull/1417), " -"[#1449](https://github.com/adap/flower/pull/1449), " -"[#1465](https://github.com/adap/flower/pull/1465), " -"[#1467](https://github.com/adap/flower/pull/1467))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"**Documentation mise à jour** " -"([#1355](https://github.com/adap/flower/pull/1355), " -"[#1558](https://github.com/adap/flower/pull/1558), " -"[#1379](https://github.com/adap/flower/pull/1379), " -"[#1380](https://github.com/adap/flower/pull/1380), " -"[#1381](https://github.com/adap/flower/pull/1381), " -"[#1332](https://github.com/adap/flower/pull/1332), " -"[#1391](https://github.com/adap/flower/pull/1391), " -"[#1403](https://github.com/adap/flower/pull/1403), " -"[#1364](https://github.com/adap/flower/pull/1364), " -"[#1409](https://github.com/adap/flower/pull/1409), " -"[#1419](https://github.com/adap/flower/pull/1419), " -"[#1444](https://github.com/adap/flower/pull/1444), " -"[#1448](https://github.com/adap/flower/pull/1448), " -"[#1417](https://github.com/adap/flower/pull/1417), " -"[#1449](https://github.com/adap/flower/pull/1449), " -"[#1465](https://github.com/adap/flower/pull/1465), " -"[#1467](https://github.com/adap/flower/pull/1467))" -#: ../../source/ref-changelog.md:587 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"There have been so many documentation updates that it doesn't even make " -"sense to list them individually." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"Il y a eu tellement de mises à jour de la documentation que cela n'a même" -" pas de sens de les énumérer individuellement." -#: ../../source/ref-changelog.md:589 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Restructured documentation** " -"([#1387](https://github.com/adap/flower/pull/1387))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**Documentation restructurée** " -"([#1387](https://github.com/adap/flower/pull/1387))" -#: ../../source/ref-changelog.md:591 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The documentation has been restructured to make it easier to navigate. " -"This is just the first step in a larger effort to make the Flower " -"documentation the best documentation of any project ever. Stay tuned!" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"La documentation a été restructurée pour faciliter la navigation. Ce " -"n'est que la première étape d'un effort plus important visant à faire de " -"la documentation de Flower la meilleure documentation de tous les projets" -#: ../../source/ref-changelog.md:593 -msgid "" -"**Open in Colab button** " -"([#1389](https://github.com/adap/flower/pull/1389))" +#: ../../source/ref-api/flwr.server.strategy.Krum.rst:2 +msgid "Krum" msgstr "" -"**Ouvrir dans le bouton Colab** " -"([#1389](https://github.com/adap/flower/pull/1389))" -#: ../../source/ref-changelog.md:595 -msgid "" -"The four parts of the Flower Federated Learning Tutorial now come with a " -"new `Open in Colab` button. No need to install anything on your local " -"machine, you can now use and learn about Flower in your browser, it's " -"only a single click away." +#: flwr.server.strategy.krum.Krum:3 of +msgid "Implementation based on https://arxiv.org/abs/1703.02757" msgstr "" -"Les quatre parties du didacticiel d'apprentissage fédéré Flower sont " -"maintenant accompagnées d'un nouveau bouton \"Ouvrir dans Colab\". Pas " -"besoin d'installer quoi que ce soit sur ta machine locale, tu peux " -"maintenant utiliser et apprendre à connaître Flower dans ton navigateur, " -"il te suffit d'un simple clic." -#: ../../source/ref-changelog.md:597 +#: flwr.server.strategy.krum.Krum:17 of msgid "" -"**Improved tutorial** ([#1468](https://github.com/adap/flower/pull/1468)," -" [#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475))" +"Number of clients to keep before averaging (MultiKrum). Defaults to 0, in" +" that case classical Krum is applied." msgstr "" -"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," -" [#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475))" -#: ../../source/ref-changelog.md:599 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The Flower Federated Learning Tutorial has two brand-new parts covering " -"custom strategies (still WIP) and the distinction between `Client` and " -"`NumPyClient`. The existing parts one and two have also been improved " -"(many small changes and fixes)." +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -"Le tutoriel sur l'apprentissage fédéré des fleurs a deux toutes nouvelles" -" parties couvrant les stratégies personnalisées (encore WIP) et la " -"distinction entre `Client` et `NumPyClient`. Les parties un et deux " -"existantes ont également été améliorées (beaucoup de petits changements " -"et de corrections)." - -#: ../../source/ref-changelog.md:605 -msgid "v1.0.0 (2022-07-28)" -msgstr "v1.0.0 (2022-07-28)" - -#: ../../source/ref-changelog.md:607 -msgid "Highlights" -msgstr "Points forts" - -#: ../../source/ref-changelog.md:609 -msgid "Stable **Virtual Client Engine** (accessible via `start_simulation`)" -msgstr "Moteur de client virtuel stable** (accessible via `start_simulation`)" -#: ../../source/ref-changelog.md:610 -msgid "All `Client`/`NumPyClient` methods are now optional" -msgstr "Toutes les méthodes `Client`/`NumPyClient` sont maintenant optionnelles" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" -#: ../../source/ref-changelog.md:611 -msgid "Configurable `get_parameters`" -msgstr "`get_parameters` configurable" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.krum.Krum.aggregate_fit:1 of +#, fuzzy +msgid "Aggregate fit results using Krum." +msgstr "Résultats globaux de l'évaluation." -#: ../../source/ref-changelog.md:612 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Tons of small API cleanups resulting in a more coherent developer " -"experience" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -"Des tonnes de petits nettoyages d'API résultant en une expérience plus " -"cohérente pour les développeurs" -#: ../../source/ref-changelog.md:616 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"We would like to give our **special thanks** to all the contributors who " -"made Flower 1.0 possible (in reverse [GitHub " -"Contributors](https://github.com/adap/flower/graphs/contributors) order):" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"Nous tenons à remercier **particulièrement** tous les contributeurs qui " -"ont rendu Flower 1.0 possible (dans l'ordre inverse de [GitHub " -"Contributors](https://github.com/adap/flower/graphs/contributors)) :" -#: ../../source/ref-changelog.md:618 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"[@rtaiello](https://github.com/rtaiello), " -"[@g-pichler](https://github.com/g-pichler), [@rob-" -"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" -"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " -"[@nfnt](https://github.com/nfnt), " -"[@tatiana-s](https://github.com/tatiana-s), " -"[@TParcollet](https://github.com/TParcollet), " -"[@vballoli](https://github.com/vballoli), " -"[@negedng](https://github.com/negedng), " -"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " -"[@hei411](https://github.com/hei411), " -"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " -"[@AmitChaulwar](https://github.com/AmitChaulwar), " -"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" -"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " -"[@lbhm](https://github.com/lbhm), " -"[@sishtiaq](https://github.com/sishtiaq), " -"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" -"/Jueun-Park), [@architjen](https://github.com/architjen), " -"[@PratikGarai](https://github.com/PratikGarai), " -"[@mrinaald](https://github.com/mrinaald), " -"[@zliel](https://github.com/zliel), " -"[@MeiruiJiang](https://github.com/MeiruiJiang), " -"[@sancarlim](https://github.com/sancarlim), " -"[@gubertoli](https://github.com/gubertoli), " -"[@Vingt100](https://github.com/Vingt100), " -"[@MakGulati](https://github.com/MakGulati), " -"[@cozek](https://github.com/cozek), " -"[@jafermarq](https://github.com/jafermarq), " -"[@sisco0](https://github.com/sisco0), " -"[@akhilmathurs](https://github.com/akhilmathurs), " -"[@CanTuerk](https://github.com/CanTuerk), " -"[@mariaboerner1987](https://github.com/mariaboerner1987), " -"[@pedropgusmao](https://github.com/pedropgusmao), " -"[@tanertopal](https://github.com/tanertopal), " -"[@danieljanes](https://github.com/danieljanes)." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"[@rtaiello](https://github.com/rtaiello), " -"[@g-pichler](https://github.com/g-pichler), [@rob-" -"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" -"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " -"[@nfnt](https://github.com/nfnt), " -"[@tatiana-s](https://github.com/tatiana-s), " -"[@TParcollet](https://github.com/TParcollet), " -"[@vballoli](https://github.com/vballoli), " -"[@negedng](https://github.com/negedng), " -"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " -"[@hei411](https://github.com/hei411), " -"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " -"[@AmitChaulwar](https://github.com/AmitChaulwar), " -"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" -"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " -"[@lbhm](https://github.com/lbhm), " -"[@sishtiaq](https://github.com/sishtiaq), " -"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" -"/Jueun-Park), [@architjen](https://github.com/architjen), " -"[@PratikGarai](https://github.com/PratikGarai), [@mrinaald](" -#: ../../source/ref-changelog.md:622 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**All arguments must be passed as keyword arguments** " -"([#1338](https://github.com/adap/flower/pull/1338))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"**Tous les arguments doivent être passés comme des arguments de mot-clé**" -" ([#1338](https://github.com/adap/flower/pull/1338))" -#: ../../source/ref-changelog.md:624 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Pass all arguments as keyword arguments, positional arguments are not " -"longer supported. Code that uses positional arguments (e.g., " -"`start_client(\"127.0.0.1:8080\", FlowerClient())`) must add the keyword " -"for each positional argument (e.g., " -"`start_client(server_address=\"127.0.0.1:8080\", " -"client=FlowerClient())`)." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"Le code qui utilise des arguments positionnels (par exemple, " -"``start_client(\"127.0.0.1:8080\", FlowerClient())`) doit ajouter le mot-" -"clé pour chaque argument positionnel (par exemple, " -"``start_client(server_address=\"127.0.0.1:8080\", " -"client=FlowerClient())`)." -#: ../../source/ref-changelog.md:626 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce configuration object** `ServerConfig` **in** `start_server` " -"**and** `start_simulation` " -"([#1317](https://github.com/adap/flower/pull/1317))" +":py:obj:`num_fit_clients `\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**Introduire l'objet de configuration** `ServerConfig` **dans** " -"`start_server` **et** `start_simulation` " -"([#1317](https://github.com/adap/flower/pull/1317))" -#: ../../source/ref-changelog.md:628 +#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:2 +#, fuzzy +msgid "QFedAvg" +msgstr "DP-FedAvg" + +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"Instead of a config dictionary `{\"num_rounds\": 3, \"round_timeout\": " -"600.0}`, `start_server` and `start_simulation` now expect a configuration" -" object of type `flwr.server.ServerConfig`. `ServerConfig` takes the same" -" arguments that as the previous config dict, but it makes writing type-" -"safe code easier and the default parameters values more transparent." +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -"Au lieu d'un dictionnaire de configuration `{\"num_rounds\" : 3, " -"\"round_timeout\" : 600.0}`, `start_server` et `start_simulation` " -"attendent maintenant un objet de configuration de type " -"`flwr.server.ServerConfig`. `ServerConfig` prend les mêmes arguments que " -"le dict de configuration précédent, mais il rend l'écriture de code " -"sécurisé plus facile et les valeurs des paramètres par défaut plus " -"transparentes." -#: ../../source/ref-changelog.md:630 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"**Rename built-in strategy parameters for clarity** " -"([#1334](https://github.com/adap/flower/pull/1334))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"**Renommer les paramètres de la stratégie intégrée pour plus de clarté** " -"([#1334](https://github.com/adap/flower/pull/1334))" -#: ../../source/ref-changelog.md:632 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"The following built-in strategy parameters were renamed to improve " -"readability and consistency with other API's:" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -"Les paramètres de stratégie intégrés suivants ont été renommés pour " -"améliorer la lisibilité et la cohérence avec d'autres API :" - -#: ../../source/ref-changelog.md:634 -msgid "`fraction_eval` --> `fraction_evaluate`" -msgstr "`fraction_eval` --> `fraction_evaluate`" - -#: ../../source/ref-changelog.md:635 -msgid "`min_eval_clients` --> `min_evaluate_clients`" -msgstr "`min_eval_clients` --> `min_evaluate_clients`" - -#: ../../source/ref-changelog.md:636 -msgid "`eval_fn` --> `evaluate_fn`" -msgstr "`eval_fn` --> `evaluate_fn`" -#: ../../source/ref-changelog.md:638 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"**Update default arguments of built-in strategies** " -"([#1278](https://github.com/adap/flower/pull/1278))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Mettre à jour les arguments par défaut des stratégies intégrées** " -"([#1278](https://github.com/adap/flower/pull/1278))" -#: ../../source/ref-changelog.md:640 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"All built-in strategies now use `fraction_fit=1.0` and " -"`fraction_evaluate=1.0`, which means they select *all* currently " -"available clients for training and evaluation. Projects that relied on " -"the previous default values can get the previous behaviour by " -"initializing the strategy in the following way:" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"Toutes les stratégies intégrées utilisent désormais `fraction_fit=1.0` et" -" `fraction_evaluate=1.0`, ce qui signifie qu'elles sélectionnent *tous* " -"les clients actuellement disponibles pour l'entraînement et l'évaluation." -" Les projets qui s'appuyaient sur les valeurs par défaut précédentes " -"peuvent retrouver le comportement antérieur en initialisant la stratégie " -"de la manière suivante :" - -#: ../../source/ref-changelog.md:642 -msgid "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" -msgstr "`stratégie = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" -#: ../../source/ref-changelog.md:644 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add** `server_round` **to** `Strategy.evaluate` " -"([#1334](https://github.com/adap/flower/pull/1334))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"**Ajouter** `server_round` **à** `Strategy.evaluate` " -"([#1334](https://github.com/adap/flower/pull/1334))" -#: ../../source/ref-changelog.md:646 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"The `Strategy` method `evaluate` now receives the current round of " -"federated learning/evaluation as the first parameter." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"La méthode `Stratégie` `évaluer` reçoit maintenant le cycle actuel " -"d'apprentissage/évaluation fédéré comme premier paramètre." -#: ../../source/ref-changelog.md:648 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add** `server_round` **and** `config` **parameters to** `evaluate_fn` " -"([#1334](https://github.com/adap/flower/pull/1334))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**Ajouter** `server_round` **et** `config` **paramètres à** `evaluate_fn`" -" ([#1334](https://github.com/adap/flower/pull/1334))" -#: ../../source/ref-changelog.md:650 +#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:2 +#, fuzzy +msgid "Strategy" +msgstr "stratégie.du.serveur" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"The `evaluate_fn` passed to built-in strategies like `FedAvg` now takes " -"three parameters: (1) The current round of federated learning/evaluation " -"(`server_round`), (2) the model parameters to evaluate (`parameters`), " -"and (3) a config dictionary (`config`)." +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"Le `evaluate_fn` passé aux stratégies intégrées comme `FedAvg` prend " -"maintenant trois paramètres : (1) le cycle actuel " -"d'apprentissage/évaluation fédéré (`server_round`), (2) les paramètres du" -" modèle à évaluer (`parameters`), et (3) un dictionnaire de configuration" -" (`config`)." -#: ../../source/ref-changelog.md:652 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +#, fuzzy +msgid "Aggregate evaluation results." +msgstr "Résultats globaux de l'évaluation." + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"**Rename** `rnd` **to** `server_round` " -"([#1321](https://github.com/adap/flower/pull/1321))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"**Rename** `rnd` **to** `server_round` " -"([#1321](https://github.com/adap/flower/pull/1321))" -#: ../../source/ref-changelog.md:654 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:1 of +#, fuzzy +msgid "Aggregate training results." +msgstr "Résultats globaux de l'évaluation." + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"Several Flower methods and functions (`evaluate_fn`, `configure_fit`, " -"`aggregate_fit`, `configure_evaluate`, `aggregate_evaluate`) receive the " -"current round of federated learning/evaluation as their first parameter. " -"To improve reaability and avoid confusion with *random*, this parameter " -"has been renamed from `rnd` to `server_round`." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"Plusieurs méthodes et fonctions de Flower (`evaluate_fn`, " -"`configure_fit`, `aggregate_fit`, `configure_evaluate`, " -"`aggregate_evaluate`) reçoivent le cycle actuel " -"d'apprentissage/évaluation fédéré comme premier paramètre. Pour améliorer" -" la fiabilité et éviter la confusion avec *random*, ce paramètre a été " -"renommé de `rnd` à `server_round`." -#: ../../source/ref-changelog.md:656 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"**Move** `flwr.dataset` **to** `flwr_baselines` " -"([#1273](https://github.com/adap/flower/pull/1273))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Déplacer** `flwr.dataset` **vers** `flwr_baselines` " -"([#1273](https://github.com/adap/flower/pull/1273))" - -#: ../../source/ref-changelog.md:658 -msgid "The experimental package `flwr.dataset` was migrated to Flower Baselines." -msgstr "Le paquet expérimental `flwr.dataset` a été migré vers Flower Baselines." -#: ../../source/ref-changelog.md:660 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"**Remove experimental strategies** " -"([#1280](https://github.com/adap/flower/pull/1280))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"**Supprimer les stratégies expérimentales** " -"([#1280](https://github.com/adap/flower/pull/1280))" -#: ../../source/ref-changelog.md:662 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.evaluate:1 of +#, fuzzy +msgid "Evaluate the current model parameters." +msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"Remove unmaintained experimental strategies (`FastAndSlow`, `FedFSv0`, " -"`FedFSv1`)." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"Supprimer les stratégies expérimentales non maintenues (`FastAndSlow`, " -"`FedFSv0`, `FedFSv1`)." -#: ../../source/ref-changelog.md:664 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:1 of +#, fuzzy +msgid "Initialize the (global) model parameters." +msgstr "Initialise le modèle global" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:5 of msgid "" -"**Rename** `Weights` **to** `NDArrays` " -"([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"Successful updates from the previously selected and configured clients. " +"Each pair of `(ClientProxy, FitRes` constitutes a successful update from " +"one of the previously selected clients. Not that not all previously " +"selected clients are necessarily included in this list: a client might " +"drop out and not submit a result. For each client that did not submit an " +"update, there should be an `Exception` in `failures`." msgstr "" -"**Rename** `Weights` **to** `NDArrays` " -"([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" -#: ../../source/ref-changelog.md:666 -msgid "" -"`flwr.common.Weights` was renamed to `flwr.common.NDArrays` to better " -"capture what this type is all about." +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:13 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:13 of +msgid "Exceptions that occurred while the server was waiting for client updates." msgstr "" -"`flwr.common.Weights` a été renommé en `flwr.common.NDArys` pour mieux " -"rendre compte de la nature de ce type." -#: ../../source/ref-changelog.md:668 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:16 of msgid "" -"**Remove antiquated** `force_final_distributed_eval` **from** " -"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"**aggregation_result** -- The aggregated evaluation result. Aggregation " +"typically uses some variant of a weighted average." msgstr "" -"**Supprimez l'ancien** `force_final_distributed_eval` **de** " -"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" -#: ../../source/ref-changelog.md:670 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:5 of msgid "" -"The `start_server` parameter `force_final_distributed_eval` has long been" -" a historic artefact, in this release it is finally gone for good." +"Successful updates from the previously selected and configured clients. " +"Each pair of `(ClientProxy, FitRes)` constitutes a successful update from" +" one of the previously selected clients. Not that not all previously " +"selected clients are necessarily included in this list: a client might " +"drop out and not submit a result. For each client that did not submit an " +"update, there should be an `Exception` in `failures`." msgstr "" -"Le paramètre `start_server` `force_final_distributed_eval` a longtemps " -"été un artefact historique, dans cette version il a finalement disparu " -"pour de bon." -#: ../../source/ref-changelog.md:672 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:17 of msgid "" -"**Make** `get_parameters` **configurable** " -"([#1242](https://github.com/adap/flower/pull/1242))" +"**parameters** -- If parameters are returned, then the server will treat " +"these as the new global model parameters (i.e., it will replace the " +"previous parameters with the ones returned from this method). If `None` " +"is returned (e.g., because there were only failures and no viable " +"results) then the server will no update the previous model parameters, " +"the updates received in this round are discarded, and the global model " +"parameters remain the same." msgstr "" -"**Make** `get_parameters` **configurable** " -"([#1242](https://github.com/adap/flower/pull/1242))" -#: ../../source/ref-changelog.md:674 +#: flwr.server.strategy.strategy.Strategy.evaluate:3 of msgid "" -"The `get_parameters` method now accepts a configuration dictionary, just " -"like `get_properties`, `fit`, and `evaluate`." +"This function can be used to perform centralized (i.e., server-side) " +"evaluation of model parameters." msgstr "" -"La méthode `get_parameters` accepte maintenant un dictionnaire de " -"configuration, tout comme `get_properties`, `fit`, et `evaluate`." -#: ../../source/ref-changelog.md:676 +#: flwr.server.strategy.strategy.Strategy.evaluate:11 of msgid "" -"**Replace** `num_rounds` **in** `start_simulation` **with new** `config` " -"**parameter** ([#1281](https://github.com/adap/flower/pull/1281))" +"**evaluation_result** -- The evaluation result, usually a Tuple " +"containing loss and a dictionary containing task-specific metrics (e.g., " +"accuracy)." msgstr "" -"**Remplace** `num_rounds` **dans** `start_simulation` **avec le nouveau**" -" `config` **paramètre** " -"([#1281](https://github.com/adap/flower/pull/1281))" -#: ../../source/ref-changelog.md:678 +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:6 of msgid "" -"The `start_simulation` function now accepts a configuration dictionary " -"`config` instead of the `num_rounds` integer. This improves the " -"consistency between `start_simulation` and `start_server` and makes " -"transitioning between the two easier." +"**parameters** -- If parameters are returned, then the server will treat " +"these as the initial global model parameters." msgstr "" -"La fonction `start_simulation` accepte maintenant un dictionnaire de " -"configuration `config` au lieu de l'entier `num_rounds`. Cela améliore la" -" cohérence entre `start_simulation` et `start_server` et facilite la " -"transition entre les deux." -#: ../../source/ref-changelog.md:682 +#: ../../source/ref-api/flwr.server.workflow.rst:2 +#, fuzzy +msgid "workflow" +msgstr "Flux de travail" + +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 msgid "" -"**Support Python 3.10** " -"([#1320](https://github.com/adap/flower/pull/1320))" +":py:obj:`DefaultWorkflow `\\ " +"\\(\\[fit\\_workflow\\, ...\\]\\)" msgstr "" -"**Support Python 3.10** " -"([#1320](https://github.com/adap/flower/pull/1320))" -#: ../../source/ref-changelog.md:684 -msgid "" -"The previous Flower release introduced experimental support for Python " -"3.10, this release declares Python 3.10 support as stable." +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 of +msgid "Default workflow in Flower." msgstr "" -"La version précédente de Flower a introduit la prise en charge " -"expérimentale de Python 3.10, cette version déclare la prise en charge de" -" Python 3.10 comme stable." -#: ../../source/ref-changelog.md:686 +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 msgid "" -"**Make all** `Client` **and** `NumPyClient` **methods optional** " -"([#1260](https://github.com/adap/flower/pull/1260), " -"[#1277](https://github.com/adap/flower/pull/1277))" +":py:obj:`SecAggPlusWorkflow `\\ " +"\\(num\\_shares\\, ...\\[\\, ...\\]\\)" msgstr "" -"**Rendre toutes les **méthodes `Client` **et** `NumPyClient` " -"**facultatives** ([#1260](https://github.com/adap/flower/pull/1260), " -"[#1277](https://github.com/adap/flower/pull/1277))" -#: ../../source/ref-changelog.md:688 -msgid "" -"The `Client`/`NumPyClient` methods `get_properties`, `get_parameters`, " -"`fit`, and `evaluate` are all optional. This enables writing clients that" -" implement, for example, only `fit`, but no other method. No need to " -"implement `evaluate` when using centralized evaluation!" +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 +#: of +msgid "The workflow for the SecAgg+ protocol." msgstr "" -"Les méthodes `Client`/`NumPyClient` `get_properties`, `get_parameters`, " -"`fit`, et `evaluate` sont toutes optionnelles. Cela permet d'écrire des " -"clients qui n'implémentent, par exemple, que `fit`, mais aucune autre " -"méthode. Pas besoin d'implémenter `evaluate` quand on utilise " -"l'évaluation centralisée !" -#: ../../source/ref-changelog.md:690 +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 msgid "" -"**Enable passing a** `Server` **instance to** `start_simulation` " -"([#1281](https://github.com/adap/flower/pull/1281))" +":py:obj:`SecAggWorkflow `\\ " +"\\(reconstruction\\_threshold\\, \\*\\)" msgstr "" -"**Autoriser le passage d'une **instance `Server` à** `start_simulation` " -"([#1281](https://github.com/adap/flower/pull/1281))" - -#: ../../source/ref-changelog.md:692 -msgid "" -"Similar to `start_server`, `start_simulation` now accepts a full `Server`" -" instance. This enables users to heavily customize the execution of " -"eperiments and opens the door to running, for example, async FL using the" -" Virtual Client Engine." -msgstr "" -"Comme pour `start_server`, `start_simulation` accepte maintenant une " -"instance complète de `Server`. Cela permet aux utilisateurs de " -"personnaliser fortement l'exécution des expériences et ouvre la porte à " -"l'exécution, par exemple, de FL asynchrones à l'aide du moteur de client " -"virtuel." - -#: ../../source/ref-changelog.md:694 -msgid "" -"**Update code examples** " -"([#1291](https://github.com/adap/flower/pull/1291), " -"[#1286](https://github.com/adap/flower/pull/1286), " -"[#1282](https://github.com/adap/flower/pull/1282))" -msgstr "" -"**Mettre à jour les exemples de code** " -"([#1291](https://github.com/adap/flower/pull/1291), " -"[#1286](https://github.com/adap/flower/pull/1286), " -"[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/ref-changelog.md:696 -msgid "" -"Many code examples received small or even large maintenance updates, " -"among them are" +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of +msgid "The workflow for the SecAgg protocol." msgstr "" -"De nombreux exemples de code ont reçu de petites ou même de grandes mises" -" à jour de maintenance" - -#: ../../source/ref-changelog.md:698 -msgid "`scikit-learn`" -msgstr "`scikit-learn`" - -#: ../../source/ref-changelog.md:699 -msgid "`simulation_pytorch`" -msgstr "`simulation_pytorch`" - -#: ../../source/ref-changelog.md:700 -msgid "`quickstart_pytorch`" -msgstr "`quickstart_pytorch` (démarrage rapide)" - -#: ../../source/ref-changelog.md:701 -msgid "`quickstart_simulation`" -msgstr "`quickstart_simulation`" -#: ../../source/ref-changelog.md:702 -msgid "`quickstart_tensorflow`" -msgstr "`quickstart_tensorflow`" +#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:2 +#, fuzzy +msgid "DefaultWorkflow" +msgstr "Flux de travail" -#: ../../source/ref-changelog.md:703 -msgid "`advanced_tensorflow`" -msgstr "`advanced_tensorflow` (en anglais)" +#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:2 +#, fuzzy +msgid "SecAggPlusWorkflow" +msgstr "Flux de travail" -#: ../../source/ref-changelog.md:705 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:3 +#: of msgid "" -"**Remove the obsolete simulation example** " -"([#1328](https://github.com/adap/flower/pull/1328))" +"The SecAgg+ protocol ensures the secure summation of integer vectors " +"owned by multiple parties, without accessing any individual integer " +"vector. This workflow allows the server to compute the weighted average " +"of model parameters across all clients, ensuring individual contributions" +" remain private. This is achieved by clients sending both, a weighting " +"factor and a weighted version of the locally updated parameters, both of " +"which are masked for privacy. Specifically, each client uploads \"[w, w *" +" params]\" with masks, where weighting factor 'w' is the number of " +"examples ('num_examples') and 'params' represents the model parameters " +"('parameters') from the client's `FitRes`. The server then aggregates " +"these contributions to compute the weighted average of model parameters." msgstr "" -"**Supprime l'exemple de simulation obsolète** " -"([#1328](https://github.com/adap/flower/pull/1328))" -#: ../../source/ref-changelog.md:707 -msgid "" -"Removes the obsolete `simulation` example and renames " -"`quickstart_simulation` to `simulation_tensorflow` so it fits withs the " -"naming of `simulation_pytorch`" +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:14 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:14 +#: of +msgid "The protocol involves four main stages:" msgstr "" -"Supprime l'exemple obsolète `simulation` et renomme " -"`quickstart_simulation` en `simulation_tensorflow` pour qu'il corresponde" -" au nom de `simulation_pytorch`" -#: ../../source/ref-changelog.md:709 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:16 +#: of msgid "" -"**Update documentation** " -"([#1223](https://github.com/adap/flower/pull/1223), " -"[#1209](https://github.com/adap/flower/pull/1209), " -"[#1251](https://github.com/adap/flower/pull/1251), " -"[#1257](https://github.com/adap/flower/pull/1257), " -"[#1267](https://github.com/adap/flower/pull/1267), " -"[#1268](https://github.com/adap/flower/pull/1268), " -"[#1300](https://github.com/adap/flower/pull/1300), " -"[#1304](https://github.com/adap/flower/pull/1304), " -"[#1305](https://github.com/adap/flower/pull/1305), " -"[#1307](https://github.com/adap/flower/pull/1307))" +"'setup': Send SecAgg+ configuration to clients and collect their public " +"keys." msgstr "" -"**Mise à jour de la documentation** " -"([#1223](https://github.com/adap/flower/pull/1223), " -"[#1209](https://github.com/adap/flower/pull/1209), " -"[#1251](https://github.com/adap/flower/pull/1251), " -"[#1257](https://github.com/adap/flower/pull/1257), " -"[#1267](https://github.com/adap/flower/pull/1267), " -"[#1268](https://github.com/adap/flower/pull/1268), " -"[#1300](https://github.com/adap/flower/pull/1300), " -"[#1304](https://github.com/adap/flower/pull/1304), " -"[#1305](https://github.com/adap/flower/pull/1305), " -"[#1307](https://github.com/adap/flower/pull/1307))" -#: ../../source/ref-changelog.md:711 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:17 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:17 +#: of msgid "" -"One substantial documentation update fixes multiple smaller rendering " -"issues, makes titles more succinct to improve navigation, removes a " -"deprecated library, updates documentation dependencies, includes the " -"`flwr.common` module in the API reference, includes support for markdown-" -"based documentation, migrates the changelog from `.rst` to `.md`, and " -"fixes a number of smaller details!" +"'share keys': Broadcast public keys among clients and collect encrypted " +"secret key shares." msgstr "" -"Une mise à jour substantielle de la documentation corrige plusieurs " -"petits problèmes de rendu, rend les titres plus succincts pour améliorer " -"la navigation, supprime une bibliothèque obsolète, met à jour les " -"dépendances de la documentation, inclut le module `flwr.common` dans la " -"référence de l'API, inclut le support de la documentation basée sur le " -"markdown, migre le changelog de `.rst` vers `.md`, et corrige un certain " -"nombre de détails plus petits !" - -#: ../../source/ref-changelog.md:713 ../../source/ref-changelog.md:768 -#: ../../source/ref-changelog.md:837 ../../source/ref-changelog.md:876 -msgid "**Minor updates**" -msgstr "**Mises à jour mineures**" -#: ../../source/ref-changelog.md:715 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:19 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:19 +#: of msgid "" -"Add round number to fit and evaluate log messages " -"([#1266](https://github.com/adap/flower/pull/1266))" +"'collect masked vectors': Forward encrypted secret key shares to target " +"clients and collect masked model parameters." msgstr "" -"Ajoute un chiffre rond pour ajuster et évaluer les messages du journal " -"([#1266](https://github.com/adap/flower/pull/1266))" -#: ../../source/ref-changelog.md:716 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:21 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:21 +#: of msgid "" -"Add secure gRPC connection to the `advanced_tensorflow` code example " -"([#847](https://github.com/adap/flower/pull/847))" +"'unmask': Collect secret key shares to decrypt and aggregate the model " +"parameters." msgstr "" -"Ajouter une connexion gRPC sécurisée à l'exemple de code " -"`advanced_tensorflow` ([#847](https://github.com/adap/flower/pull/847))" -#: ../../source/ref-changelog.md:717 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:23 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:23 +#: of msgid "" -"Update developer tooling " -"([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310))" +"Only the aggregated model parameters are exposed and passed to " +"`Strategy.aggregate_fit`, ensuring individual data privacy." msgstr "" -"Mettre à jour les outils de développement " -"([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/ref-changelog.md:718 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:26 +#: of msgid "" -"Rename ProtoBuf messages to improve consistency " -"([#1214](https://github.com/adap/flower/pull/1214), " -"[#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"The number of shares into which each client's private key is split under " +"the SecAgg+ protocol. If specified as a float, it represents the " +"proportion of all selected clients, and the number of shares will be set " +"dynamically in the run time. A private key can be reconstructed from " +"these shares, allowing for the secure aggregation of model updates. Each " +"client sends one share to each of its neighbors while retaining one." msgstr "" -"Renomme les messages ProtoBuf pour améliorer la cohérence " -"([#1214](https://github.com/adap/flower/pull/1214), " -"[#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" - -#: ../../source/ref-changelog.md:720 -msgid "v0.19.0 (2022-05-18)" -msgstr "v0.19.0 (2022-05-18)" -#: ../../source/ref-changelog.md:724 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:26 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:33 +#: of msgid "" -"**Flower Baselines (preview): FedOpt, FedBN, FedAvgM** " -"([#919](https://github.com/adap/flower/pull/919), " -"[#1127](https://github.com/adap/flower/pull/1127), " -"[#914](https://github.com/adap/flower/pull/914))" +"The minimum number of shares required to reconstruct a client's private " +"key, or, if specified as a float, it represents the proportion of the " +"total number of shares needed for reconstruction. This threshold ensures " +"privacy by allowing for the recovery of contributions from dropped " +"clients during aggregation, without compromising individual client data." msgstr "" -"**Flower Baselines (preview) : FedOpt, FedBN, FedAvgM** " -"([#919](https://github.com/adap/flower/pull/919), " -"[#1127](https://github.com/adap/flower/pull/1127), " -"[#914](https://github.com/adap/flower/pull/914))" -#: ../../source/ref-changelog.md:726 -#, fuzzy +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:32 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:39 +#: of msgid "" -"The first preview release of Flower Baselines has arrived! We're " -"kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " -"FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how " -"to use [Flower Baselines](https://flower.ai/docs/using-baselines.html). " -"With this first preview release we're also inviting the community to " -"[contribute their own baselines](https://flower.ai/docs/baselines/how-to-" -"contribute-baselines.html)." +"The maximum value of the weight that can be assigned to any single " +"client's update during the weighted average calculation on the server " +"side, e.g., in the FedAvg algorithm." msgstr "" -"La première version préliminaire de Flower Baselines est arrivée ! Nous " -"démarrons Flower Baselines avec des implémentations de FedOpt (FedYogi, " -"FedAdam, FedAdagrad), FedBN, et FedAvgM. Consultez la documentation sur " -"l'utilisation de [Flower Baselines](https://flower.ai/docs/using-" -"baselines.html). Avec cette première version préliminaire, nous invitons " -"également la communauté à [contribuer à leurs propres lignes de " -"base](https://flower.ai/docs/baselines/how-to-contribute-baselines.html)." -#: ../../source/ref-changelog.md:728 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:36 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:43 +#: of msgid "" -"**C++ client SDK (preview) and code example** " -"([#1111](https://github.com/adap/flower/pull/1111))" +"The range within which model parameters are clipped before quantization. " +"This parameter ensures each model parameter is bounded within " +"[-clipping_range, clipping_range], facilitating quantization." msgstr "" -"**SDK client C++ (aperçu) et exemple de code** " -"([#1111](https://github.com/adap/flower/pull/1111))" -#: ../../source/ref-changelog.md:730 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:40 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:47 +#: of msgid "" -"Preview support for Flower clients written in C++. The C++ preview " -"includes a Flower client SDK and a quickstart code example that " -"demonstrates a simple C++ client using the SDK." +"The size of the range into which floating-point model parameters are " +"quantized, mapping each parameter to an integer in [0, " +"quantization_range-1]. This facilitates cryptographic operations on the " +"model updates." msgstr "" -"L'aperçu C++ comprend un SDK pour les clients Flower et un exemple de " -"code de démarrage rapide qui démontre un client C++ simple utilisant le " -"SDK." -#: ../../source/ref-changelog.md:732 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:44 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:51 +#: of msgid "" -"**Add experimental support for Python 3.10 and Python 3.11** " -"([#1135](https://github.com/adap/flower/pull/1135))" +"The range of values from which random mask entries are uniformly sampled " +"([0, modulus_range-1]). `modulus_range` must be less than 4294967296. " +"Please use 2**n values for `modulus_range` to prevent overflow issues." msgstr "" -"**Ajouter la prise en charge expérimentale de Python 3.10 et Python " -"3.11** ([#1135](https://github.com/adap/flower/pull/1135))" -#: ../../source/ref-changelog.md:734 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:48 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:55 +#: of msgid "" -"Python 3.10 is the latest stable release of Python and Python 3.11 is due" -" to be released in October. This Flower release adds experimental support" -" for both Python versions." +"The timeout duration in seconds. If specified, the workflow will wait for" +" replies for this duration each time. If `None`, there is no time limit " +"and the workflow will wait until replies for all messages are received." msgstr "" -"Python 3.10 est la dernière version stable de Python et Python 3.11 " -"devrait sortir en octobre. Cette version de Flower ajoute une prise en " -"charge expérimentale pour les deux versions de Python." -#: ../../source/ref-changelog.md:736 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:62 +#: of msgid "" -"**Aggregate custom metrics through user-provided functions** " -"([#1144](https://github.com/adap/flower/pull/1144))" +"Generally, higher `num_shares` means more robust to dropouts while " +"increasing the computational costs; higher `reconstruction_threshold` " +"means better privacy guarantees but less tolerance to dropouts." msgstr "" -"**Agréger des mesures personnalisées grâce à des fonctions fournies par " -"l'utilisateur** ([#1144](https://github.com/adap/flower/pull/1144))" -#: ../../source/ref-changelog.md:738 -msgid "" -"Custom metrics (e.g., `accuracy`) can now be aggregated without having to" -" customize the strategy. Built-in strategies support two new arguments, " -"`fit_metrics_aggregation_fn` and `evaluate_metrics_aggregation_fn`, that " -"allow passing custom metric aggregation functions." +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:59 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:65 +#: of +msgid "Too large `max_weight` may compromise the precision of the quantization." msgstr "" -"Les stratégies intégrées prennent en charge deux nouveaux arguments, " -"`fit_metrics_aggregation_fn` et `evaluate_metrics_aggregation_fn`, qui " -"permettent de passer des fonctions d'agrégation de métriques " -"personnalisées." -#: ../../source/ref-changelog.md:740 -msgid "" -"**User-configurable round timeout** " -"([#1162](https://github.com/adap/flower/pull/1162))" +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:60 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:66 +#: of +msgid "`modulus_range` must be 2**n and larger than `quantization_range`." msgstr "" -"**Temps d'attente configurable par l'utilisateur** " -"([#1162](https://github.com/adap/flower/pull/1162))" -#: ../../source/ref-changelog.md:742 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:67 +#: of msgid "" -"A new configuration value allows the round timeout to be set for " -"`start_server` and `start_simulation`. If the `config` dictionary " -"contains a `round_timeout` key (with a `float` value in seconds), the " -"server will wait *at least* `round_timeout` seconds before it closes the " -"connection." +"When `num_shares` is a float, it is interpreted as the proportion of all " +"selected clients, and hence the number of shares will be determined in " +"the runtime. This allows for dynamic adjustment based on the total number" +" of participating clients." msgstr "" -"Si le dictionnaire `config` contient une clé `round_timeout` (avec une " -"valeur `float` en secondes), le serveur attendra *au moins* " -"`round_timeout` secondes avant de fermer la connexion." -#: ../../source/ref-changelog.md:744 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:70 +#: of msgid "" -"**Enable both federated evaluation and centralized evaluation to be used " -"at the same time in all built-in strategies** " -"([#1091](https://github.com/adap/flower/pull/1091))" +"Similarly, when `reconstruction_threshold` is a float, it is interpreted " +"as the proportion of the number of shares needed for the reconstruction " +"of a private key. This feature enables flexibility in setting the " +"security threshold relative to the number of distributed shares." msgstr "" -"**Permettre l'utilisation simultanée de l'évaluation fédérée et de " -"l'évaluation centralisée dans toutes les stratégies intégrées** " -"([#1091](https://github.com/adap/flower/pull/1091))" -#: ../../source/ref-changelog.md:746 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:74 +#: of msgid "" -"Built-in strategies can now perform both federated evaluation (i.e., " -"client-side) and centralized evaluation (i.e., server-side) in the same " -"round. Federated evaluation can be disabled by setting `fraction_eval` to" -" `0.0`." +"`num_shares`, `reconstruction_threshold`, and the quantization parameters" +" (`clipping_range`, `quantization_range`, `modulus_range`) play critical " +"roles in balancing privacy, robustness, and efficiency within the SecAgg+" +" protocol." msgstr "" -"Les stratégies intégrées peuvent maintenant effectuer une évaluation " -"fédérée (c'est-à-dire côté client) et une évaluation centralisée " -"(c'est-à-dire côté serveur) dans le même tour. L'évaluation fédérée peut " -"être désactivée en réglant `fraction_eval` sur `0.0`." -#: ../../source/ref-changelog.md:748 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"**Two new Jupyter Notebook tutorials** " -"([#1141](https://github.com/adap/flower/pull/1141))" +":py:obj:`collect_masked_vectors_stage " +"`\\" +" \\(driver\\, ...\\)" msgstr "" -"**Deux nouveaux tutoriels Jupyter Notebook** " -"([#1141](https://github.com/adap/flower/pull/1141))" -#: ../../source/ref-changelog.md:750 -msgid "" -"Two Jupyter Notebook tutorials (compatible with Google Colab) explain " -"basic and intermediate Flower features:" +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "Execute the 'collect masked vectors' stage." msgstr "" -"Deux tutoriels Jupyter Notebook (compatibles avec Google Colab) " -"expliquent les fonctionnalités de base et intermédiaires de Flower :" -#: ../../source/ref-changelog.md:752 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"*An Introduction to Federated Learning*: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" -"-Intro-to-FL-PyTorch.ipynb)" +":py:obj:`setup_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -"*Introduction à l'apprentissage fédéré* : [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" -"-Intro-to-FL-PyTorch.ipynb)" -#: ../../source/ref-changelog.md:754 -msgid "" -"*Using Strategies in Federated Learning*: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" -"-Strategies-in-FL-PyTorch.ipynb)" +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.setup_stage:1 +#: of +msgid "Execute the 'setup' stage." msgstr "" -"*Utiliser des stratégies dans l'apprentissage fédéré* : [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" -"-Strategies-in-FL-PyTorch.ipynb)" -#: ../../source/ref-changelog.md:756 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"**New FedAvgM strategy (Federated Averaging with Server Momentum)** " -"([#1076](https://github.com/adap/flower/pull/1076))" +":py:obj:`share_keys_stage " +"`\\ " +"\\(driver\\, context\\, state\\)" msgstr "" -"**Nouvelle stratégie FedAvgM (Federated Averaging with Server Momentum)**" -" ([#1076](https://github.com/adap/flower/pull/1076))" -#: ../../source/ref-changelog.md:758 -#, fuzzy -msgid "" -"The new `FedAvgM` strategy implements Federated Averaging with Server " -"Momentum \\[Hsu et al., 2019\\]." +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.share_keys_stage:1 +#: of +msgid "Execute the 'share keys' stage." msgstr "" -"La nouvelle stratégie `FedAvgM` met en œuvre la moyenne fédérée avec le " -"momentum du serveur [Hsu et al., 2019]." -#: ../../source/ref-changelog.md:760 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"**New advanced PyTorch code example** " -"([#1007](https://github.com/adap/flower/pull/1007))" +":py:obj:`unmask_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -"**Nouvel exemple de code PyTorch avancé** " -"([#1007](https://github.com/adap/flower/pull/1007))" -#: ../../source/ref-changelog.md:762 -msgid "" -"A new code example (`advanced_pytorch`) demonstrates advanced Flower " -"concepts with PyTorch." +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.unmask_stage:1 +#: of +msgid "Execute the 'unmask' stage." msgstr "" -"Un nouvel exemple de code (`advanced_pytorch`) démontre des concepts de " -"fleur avancés avec PyTorch." -#: ../../source/ref-changelog.md:764 +#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:2 +#, fuzzy +msgid "SecAggWorkflow" +msgstr "Flux de travail" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of msgid "" -"**New JAX code example** " -"([#906](https://github.com/adap/flower/pull/906), " -"[#1143](https://github.com/adap/flower/pull/1143))" +"Bases: " +":py:class:`~flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow`" msgstr "" -"**Nouvel exemple de code JAX** " -"([#906](https://github.com/adap/flower/pull/906), " -"[#1143](https://github.com/adap/flower/pull/1143))" -#: ../../source/ref-changelog.md:766 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:3 of msgid "" -"A new code example (`jax_from_centralized_to_federated`) shows federated " -"learning with JAX and Flower." +"The SecAgg protocol ensures the secure summation of integer vectors owned" +" by multiple parties, without accessing any individual integer vector. " +"This workflow allows the server to compute the weighted average of model " +"parameters across all clients, ensuring individual contributions remain " +"private. This is achieved by clients sending both, a weighting factor and" +" a weighted version of the locally updated parameters, both of which are " +"masked for privacy. Specifically, each client uploads \"[w, w * params]\"" +" with masks, where weighting factor 'w' is the number of examples " +"('num_examples') and 'params' represents the model parameters " +"('parameters') from the client's `FitRes`. The server then aggregates " +"these contributions to compute the weighted average of model parameters." msgstr "" -"Un nouvel exemple de code (`jax_from_centralized_to_federated`) montre " -"l'apprentissage fédéré avec JAX et Flower." -#: ../../source/ref-changelog.md:770 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:16 of msgid "" -"New option to keep Ray running if Ray was already initialized in " -"`start_simulation` ([#1177](https://github.com/adap/flower/pull/1177))" +"'setup': Send SecAgg configuration to clients and collect their public " +"keys." msgstr "" -"Nouvelle option pour continuer à faire fonctionner Ray si Ray a déjà été " -"initialisé dans `start_simulation` " -"([#1177](https://github.com/adap/flower/pull/1177))" -#: ../../source/ref-changelog.md:771 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:55 of msgid "" -"Add support for custom `ClientManager` as a `start_simulation` parameter " -"([#1171](https://github.com/adap/flower/pull/1171))" +"Each client's private key is split into N shares under the SecAgg " +"protocol, where N is the number of selected clients." msgstr "" -"Ajout de la prise en charge d'un `ClientManager` personnalisé comme " -"paramètre de `start_simulation` " -"([#1171](https://github.com/adap/flower/pull/1171))" -#: ../../source/ref-changelog.md:772 -#, fuzzy +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:57 of msgid "" -"New documentation for [implementing " -"strategies](https://flower.ai/docs/framework/how-to-implement-" -"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " -"[#1175](https://github.com/adap/flower/pull/1175))" +"Generally, higher `reconstruction_threshold` means better privacy " +"guarantees but less tolerance to dropouts." msgstr "" -"Nouvelle documentation pour [mettre en œuvre des " -"stratégies](https://flower.ai/docs/framework/how-to-implement-" -"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " -"[#1175](https://github.com/adap/flower/pull/1175))" -#: ../../source/ref-changelog.md:773 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:61 of msgid "" -"New mobile-friendly documentation theme " -"([#1174](https://github.com/adap/flower/pull/1174))" +"When `reconstruction_threshold` is a float, it is interpreted as the " +"proportion of the number of all selected clients needed for the " +"reconstruction of a private key. This feature enables flexibility in " +"setting the security threshold relative to the number of selected " +"clients." msgstr "" -"Nouveau thème de documentation adapté aux mobiles " -"([#1174](https://github.com/adap/flower/pull/1174))" -#: ../../source/ref-changelog.md:774 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:65 of msgid "" -"Limit version range for (optional) `ray` dependency to include only " -"compatible releases (`>=1.9.2,<1.12.0`) " -"([#1205](https://github.com/adap/flower/pull/1205))" +"`reconstruction_threshold`, and the quantization parameters " +"(`clipping_range`, `quantization_range`, `modulus_range`) play critical " +"roles in balancing privacy, robustness, and efficiency within the SecAgg " +"protocol." msgstr "" -"Limite la plage de versions pour la dépendance (optionnelle) `ray` pour " -"n'inclure que les versions compatibles (`>=1.9.2,<1.12.0`) " -"([#1205](https://github.com/adap/flower/pull/1205))" -#: ../../source/ref-changelog.md:778 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"**Remove deprecated support for Python 3.6** " -"([#871](https://github.com/adap/flower/pull/871))" +":py:obj:`collect_masked_vectors_stage " +"`\\ " +"\\(driver\\, ...\\)" msgstr "" -"**Supprime la prise en charge obsolète de Python 3.6** " -"([#871](https://github.com/adap/flower/pull/871))" -#: ../../source/ref-changelog.md:779 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"**Remove deprecated KerasClient** " -"([#857](https://github.com/adap/flower/pull/857))" +":py:obj:`setup_stage `\\" +" \\(driver\\, context\\, state\\)" msgstr "" -"**Supprimez KerasClient** " -"([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/ref-changelog.md:780 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"**Remove deprecated no-op extra installs** " -"([#973](https://github.com/adap/flower/pull/973))" +":py:obj:`share_keys_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -"**Supprimer les installations supplémentaires no-op dépréciées** " -"([#973](https://github.com/adap/flower/pull/973))" -#: ../../source/ref-changelog.md:781 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"**Remove deprecated proto fields from** `FitRes` **and** `EvaluateRes` " -"([#869](https://github.com/adap/flower/pull/869))" +":py:obj:`unmask_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -"**Supprimez les champs proto obsolètes de** `FitRes` **et** `EvaluateRes`" -" ([#869](https://github.com/adap/flower/pull/869))" -#: ../../source/ref-changelog.md:782 -msgid "" -"**Remove deprecated QffedAvg strategy (replaced by QFedAvg)** " -"([#1107](https://github.com/adap/flower/pull/1107))" -msgstr "" -"**Supprime la stratégie QffedAvg (remplacée par QFedAvg)** " -"([#1107](https://github.com/adap/flower/pull/1107))" +#: ../../source/ref-api/flwr.simulation.rst:2 +#, fuzzy +msgid "simulation" +msgstr "Simulation de moniteur" -#: ../../source/ref-changelog.md:783 +#: ../../source/ref-api/flwr.simulation.rst:18::1 msgid "" -"**Remove deprecated DefaultStrategy strategy** " -"([#1142](https://github.com/adap/flower/pull/1142))" +":py:obj:`run_simulation `\\ " +"\\(server\\_app\\, client\\_app\\, ...\\)" msgstr "" -"**Supprime la stratégie DefaultStrategy qui est obsolète** " -"([#1142](https://github.com/adap/flower/pull/1142))" -#: ../../source/ref-changelog.md:784 -msgid "" -"**Remove deprecated support for eval_fn accuracy return value** " -"([#1142](https://github.com/adap/flower/pull/1142))" +#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: flwr.simulation.run_simulation.run_simulation:1 of +msgid "Run a Flower App using the Simulation Engine." msgstr "" -"**Supprimer la prise en charge obsolète de la valeur de retour de la " -"précision eval_fn** ([#1142](https://github.com/adap/flower/pull/1142))" -#: ../../source/ref-changelog.md:785 +#: ../../source/ref-api/flwr.simulation.rst:18::1 +#, fuzzy msgid "" -"**Remove deprecated support for passing initial parameters as NumPy " -"ndarrays** ([#1142](https://github.com/adap/flower/pull/1142))" +":py:obj:`start_simulation `\\ " +"\\(\\*args\\, \\*\\*kwargs\\)" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: flwr.simulation.start_simulation:1 of +msgid "Log error stating that module `ray` could not be imported." msgstr "" -"**Supprime la prise en charge obsolète du passage des paramètres initiaux" -" en tant que ndarrays NumPy** " -"([#1142](https://github.com/adap/flower/pull/1142))" -#: ../../source/ref-changelog.md:787 -msgid "v0.18.0 (2022-02-28)" -msgstr "v0.18.0 (2022-02-28)" +#: ../../source/ref-api/flwr.simulation.run_simulation.rst:2 +#, fuzzy +msgid "run\\_simulation" +msgstr "Simulation de moniteur" -#: ../../source/ref-changelog.md:791 +#: flwr.simulation.run_simulation.run_simulation:3 of msgid "" -"**Improved Virtual Client Engine compatibility with Jupyter Notebook / " -"Google Colab** ([#866](https://github.com/adap/flower/pull/866), " -"[#872](https://github.com/adap/flower/pull/872), " -"[#833](https://github.com/adap/flower/pull/833), " -"[#1036](https://github.com/adap/flower/pull/1036))" +"The `ServerApp` to be executed. It will send messages to different " +"`ClientApp` instances running on different (virtual) SuperNodes." msgstr "" -"**Amélioration de la compatibilité du moteur de client virtuel avec " -"Jupyter Notebook / Google Colab** " -"([#866](https://github.com/adap/flower/pull/866), " -"[#872](https://github.com/adap/flower/pull/872), " -"[#833](https://github.com/adap/flower/pull/833), " -"[#1036](https://github.com/adap/flower/pull/1036))" -#: ../../source/ref-changelog.md:793 +#: flwr.simulation.run_simulation.run_simulation:6 of msgid "" -"Simulations (using the Virtual Client Engine through `start_simulation`) " -"now work more smoothly on Jupyter Notebooks (incl. Google Colab) after " -"installing Flower with the `simulation` extra (`pip install " -"flwr[simulation]`)." +"The `ClientApp` to be executed by each of the SuperNodes. It will receive" +" messages sent by the `ServerApp`." msgstr "" -"Les simulations (utilisant le moteur de client virtuel via " -"`start_simulation`) fonctionnent maintenant plus facilement sur les " -"Notebooks Jupyter (y compris Google Colab) après avoir installé Flower " -"avec l'option `simulation` (`pip install flwr[simulation]`)." -#: ../../source/ref-changelog.md:795 +#: flwr.simulation.run_simulation.run_simulation:9 of msgid "" -"**New Jupyter Notebook code example** " -"([#833](https://github.com/adap/flower/pull/833))" +"Number of nodes that run a ClientApp. They can be sampled by a Driver in " +"the ServerApp and receive a Message describing what the ClientApp should " +"perform." msgstr "" -"**Nouvel exemple de code Jupyter Notebook** " -"([#833](https://github.com/adap/flower/pull/833))" -#: ../../source/ref-changelog.md:797 -msgid "" -"A new code example (`quickstart_simulation`) demonstrates Flower " -"simulations using the Virtual Client Engine through Jupyter Notebook " -"(incl. Google Colab)." +#: flwr.simulation.run_simulation.run_simulation:12 of +msgid "A simulation backend that runs `ClientApp`s." msgstr "" -"Un nouvel exemple de code (`quickstart_simulation`) démontre des " -"simulations de Flower en utilisant le moteur de client virtuel via " -"Jupyter Notebook (y compris Google Colab)." -#: ../../source/ref-changelog.md:799 +#: flwr.simulation.run_simulation.run_simulation:14 of msgid "" -"**Client properties (feature preview)** " -"([#795](https://github.com/adap/flower/pull/795))" +"'A dictionary to configure a backend. Separate dictionaries to configure " +"different elements of backend. Supported top-level keys are `init_args` " +"for values parsed to initialisation of backend, `client_resources` to " +"define the resources for clients, and `actor` to define the actor " +"parameters. Values supported in are those included by " +"`flwr.common.typing.ConfigsRecordValues`." msgstr "" -"**Propriétés du client (aperçu des fonctionnalités)** " -"([#795](https://github.com/adap/flower/pull/795))" -#: ../../source/ref-changelog.md:801 +#: flwr.simulation.run_simulation.run_simulation:21 of msgid "" -"Clients can implement a new method `get_properties` to enable server-side" -" strategies to query client properties." +"A boolean to indicate whether to enable GPU growth on the main thread. " +"This is desirable if you make use of a TensorFlow model on your " +"`ServerApp` while having your `ClientApp` running on the same GPU. " +"Without enabling this, you might encounter an out-of-memory error because" +" TensorFlow, by default, allocates all GPU memory. Read more about how " +"`tf.config.experimental.set_memory_growth()` works in the TensorFlow " +"documentation: https://www.tensorflow.org/api/stable." msgstr "" -"Les clients peuvent implémenter une nouvelle méthode `get_properties` " -"pour permettre aux stratégies côté serveur d'interroger les propriétés du" -" client." -#: ../../source/ref-changelog.md:803 +#: flwr.simulation.run_simulation.run_simulation:28 of msgid "" -"**Experimental Android support with TFLite** " -"([#865](https://github.com/adap/flower/pull/865))" +"When disabled, only INFO, WARNING and ERROR log messages will be shown. " +"If enabled, DEBUG-level logs will be displayed." msgstr "" -"**Support expérimental d'Android avec TFLite** " -"([#865](https://github.com/adap/flower/pull/865))" -#: ../../source/ref-changelog.md:805 -msgid "" -"Android support has finally arrived in `main`! Flower is both client-" -"agnostic and framework-agnostic by design. One can integrate arbitrary " -"client platforms and with this release, using Flower on Android has " -"become a lot easier." -msgstr "" -"La prise en charge d'Android est enfin arrivée dans `main` ! Flower est à" -" la fois agnostique au niveau du client et du cadre de travail. On peut " -"intégrer des plates-formes client arbitraires et avec cette version, " -"l'utilisation de Flower sur Android est devenue beaucoup plus facile." +#: ../../source/ref-api/flwr.simulation.start_simulation.rst:2 +#, fuzzy +msgid "start\\_simulation" +msgstr "démarrer_simulation" + +#: ../../source/ref-changelog.md:1 +msgid "Changelog" +msgstr "Changelog" -#: ../../source/ref-changelog.md:807 +#: ../../source/ref-changelog.md:3 +#, fuzzy +msgid "v1.11.1 (2024-09-11)" +msgstr "v1.3.0 (2023-02-06)" + +#: ../../source/ref-changelog.md:5 ../../source/ref-changelog.md:37 +#: ../../source/ref-changelog.md:141 ../../source/ref-changelog.md:239 +#: ../../source/ref-changelog.md:339 ../../source/ref-changelog.md:403 +#: ../../source/ref-changelog.md:496 ../../source/ref-changelog.md:596 +#: ../../source/ref-changelog.md:680 ../../source/ref-changelog.md:744 +#: ../../source/ref-changelog.md:802 ../../source/ref-changelog.md:871 +#: ../../source/ref-changelog.md:940 +msgid "Thanks to our contributors" +msgstr "Merci à nos contributeurs" + +#: ../../source/ref-changelog.md:7 ../../source/ref-changelog.md:39 +#: ../../source/ref-changelog.md:143 ../../source/ref-changelog.md:241 +#: ../../source/ref-changelog.md:341 ../../source/ref-changelog.md:405 +#: ../../source/ref-changelog.md:498 ../../source/ref-changelog.md:598 +#: ../../source/ref-changelog.md:682 ../../source/ref-changelog.md:746 +#: ../../source/ref-changelog.md:804 msgid "" -"The example uses TFLite on the client side, along with a new " -"`FedAvgAndroid` strategy. The Android client and `FedAvgAndroid` are " -"still experimental, but they are a first step towards a fully-fledged " -"Android SDK and a unified `FedAvg` implementation that integrated the new" -" functionality from `FedAvgAndroid`." +"We would like to give our special thanks to all the contributors who made" +" the new version of Flower possible (in `git shortlog` order):" msgstr "" -"L'exemple utilise TFLite du côté client, ainsi qu'une nouvelle stratégie " -"`FedAvgAndroid`. Le client Android et `FedAvgAndroid` sont encore " -"expérimentaux, mais ils constituent un premier pas vers un SDK Android à " -"part entière et une implémentation unifiée de `FedAvg` intégrant la " -"nouvelle fonctionnalité de `FedAvgAndroid`." +"Nous tenons à remercier tout particulièrement tous les contributeurs qui " +"ont rendu possible la nouvelle version de Flower (dans l'ordre `git " +"shortlog`) :" -#: ../../source/ref-changelog.md:809 +#: ../../source/ref-changelog.md:9 msgid "" -"**Make gRPC keepalive time user-configurable and decrease default " -"keepalive time** ([#1069](https://github.com/adap/flower/pull/1069))" +"`Charles Beauville`, `Chong Shen Ng`, `Daniel J. Beutel`, `Heng Pan`, " +"`Javier`, `Robert Steiner`, `Yan Gao` " msgstr "" -"**Rendre le temps de garde gRPC configurable par l'utilisateur et " -"diminuer le temps de garde par défaut** " -"([#1069](https://github.com/adap/flower/pull/1069))" -#: ../../source/ref-changelog.md:811 +#: ../../source/ref-changelog.md:11 +#, fuzzy +msgid "Improvements" +msgstr "Améliorations facultatives" + +#: ../../source/ref-changelog.md:13 +#, fuzzy msgid "" -"The default gRPC keepalive time has been reduced to increase the " -"compatibility of Flower with more cloud environments (for example, " -"Microsoft Azure). Users can configure the keepalive time to customize the" -" gRPC stack based on specific requirements." +"**Implement** `keys/values/items` **methods for** `TypedDict` " +"([#4146](https://github.com/adap/flower/pull/4146))" msgstr "" -"Le temps de keepalive gRPC par défaut a été réduit pour augmenter la " -"compatibilité de Flower avec davantage d'environnements cloud (par " -"exemple, Microsoft Azure). Les utilisateurs peuvent configurer le temps " -"de keepalive pour personnaliser la pile gRPC en fonction d'exigences " -"spécifiques." +"**Make** `get_parameters` **configurable** " +"([#1242](https://github.com/adap/flower/pull/1242))" -#: ../../source/ref-changelog.md:813 +#: ../../source/ref-changelog.md:15 +#, fuzzy msgid "" -"**New differential privacy example using Opacus and PyTorch** " -"([#805](https://github.com/adap/flower/pull/805))" +"**Fix parsing of** `--executor-config` **if present** " +"([#4125](https://github.com/adap/flower/pull/4125))" msgstr "" -"**Nouvel exemple de confidentialité différentielle utilisant Opacus et " -"PyTorch** ([#805](https://github.com/adap/flower/pull/805))" +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:815 +#: ../../source/ref-changelog.md:17 +#, fuzzy msgid "" -"A new code example (`opacus`) demonstrates differentially-private " -"federated learning with Opacus, PyTorch, and Flower." +"**Adjust framework name in templates docstrings** " +"([#4127](https://github.com/adap/flower/pull/4127))" msgstr "" -"Un nouvel exemple de code (`opacus`) démontre l'apprentissage fédéré " -"différentiellement privé avec Opacus, PyTorch et Flower." +"**Nouvel exemple de code scikit-learn** " +"([#748](https://github.com/adap/flower/pull/748))" -#: ../../source/ref-changelog.md:817 +#: ../../source/ref-changelog.md:19 +#, fuzzy msgid "" -"**New Hugging Face Transformers code example** " -"([#863](https://github.com/adap/flower/pull/863))" +"**Update** `flwr new` **Hugging Face template** " +"([#4169](https://github.com/adap/flower/pull/4169))" msgstr "" "**Nouvel exemple de code pour les Transformers à visage embrassant** " "([#863](https://github.com/adap/flower/pull/863))" -#: ../../source/ref-changelog.md:819 +#: ../../source/ref-changelog.md:21 +#, fuzzy msgid "" -"A new code example (`quickstart_huggingface`) demonstrates usage of " -"Hugging Face Transformers with Flower." +"**Fix** `flwr new` **FlowerTune template** " +"([#4123](https://github.com/adap/flower/pull/4123))" msgstr "" -"Un nouvel exemple de code (`quickstart_huggingface`) démontre " -"l'utilisation des transformateurs Hugging Face avec Flower." +"**Nouvel exemple de code CoreML pour iOS** " +"([#1289](https://github.com/adap/flower/pull/1289))" -#: ../../source/ref-changelog.md:821 +#: ../../source/ref-changelog.md:23 +#, fuzzy msgid "" -"**New MLCube code example** " -"([#779](https://github.com/adap/flower/pull/779), " -"[#1034](https://github.com/adap/flower/pull/1034), " -"[#1065](https://github.com/adap/flower/pull/1065), " -"[#1090](https://github.com/adap/flower/pull/1090))" +"**Add buffer time after** `ServerApp` **thread initialization** " +"([#4119](https://github.com/adap/flower/pull/4119))" msgstr "" -"**Nouvel exemple de code MLCube** " -"([#779](https://github.com/adap/flower/pull/779), " -"[#1034](https://github.com/adap/flower/pull/1034), " -"[#1065](https://github.com/adap/flower/pull/1065), " -"[#1090](https://github.com/adap/flower/pull/1090))" +"**Ajouter des mesures de formation à** `History` **objet pendant les " +"simulations** ([#1696](https://github.com/adap/flower/pull/1696))" -#: ../../source/ref-changelog.md:823 +#: ../../source/ref-changelog.md:25 +#, fuzzy msgid "" -"A new code example (`quickstart_mlcube`) demonstrates usage of MLCube " -"with Flower." +"**Handle unsuitable resources for simulation** " +"([#4143](https://github.com/adap/flower/pull/4143))" msgstr "" -"Un nouvel exemple de code (`quickstart_mlcube`) démontre l'utilisation de" -" MLCube avec Flower." +"**Ajouter un nouveau guide pratique pour le suivi des simulations** " +"([#1649](https://github.com/adap/flower/pull/1649))" -#: ../../source/ref-changelog.md:825 +#: ../../source/ref-changelog.md:27 +#, fuzzy msgid "" -"**SSL-enabled server and client** " -"([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" +"**Update example READMEs** " +"([#4117](https://github.com/adap/flower/pull/4117))" msgstr "" -"**([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" +"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " +"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" -#: ../../source/ref-changelog.md:827 +#: ../../source/ref-changelog.md:29 +#, fuzzy msgid "" -"SSL enables secure encrypted connections between clients and servers. " -"This release open-sources the Flower secure gRPC implementation to make " -"encrypted communication channels accessible to all Flower users." +"**Update SuperNode authentication docs** " +"([#4160](https://github.com/adap/flower/pull/4160))" msgstr "" -"SSL permet d'établir des connexions cryptées et sécurisées entre les " -"clients et les serveurs. Cette version met en open-source " -"l'implémentation gRPC sécurisée de Flower afin de rendre les canaux de " -"communication cryptés accessibles à tous les utilisateurs de Flower." +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:829 -msgid "" -"**Updated** `FedAdam` **and** `FedYogi` **strategies** " -"([#885](https://github.com/adap/flower/pull/885), " -"[#895](https://github.com/adap/flower/pull/895))" -msgstr "" -"**Mise à jour** `FedAdam` **et** `FedYogi` **stratégies** " -"([#885](https://github.com/adap/flower/pull/885), " -"[#895](https://github.com/adap/flower/pull/895))" +#: ../../source/ref-changelog.md:31 ../../source/ref-changelog.md:111 +#: ../../source/ref-changelog.md:227 ../../source/ref-changelog.md:323 +#: ../../source/ref-changelog.md:397 ../../source/ref-changelog.md:472 +#: ../../source/ref-changelog.md:584 ../../source/ref-changelog.md:674 +#: ../../source/ref-changelog.md:738 ../../source/ref-changelog.md:796 +#: ../../source/ref-changelog.md:865 ../../source/ref-changelog.md:927 +#: ../../source/ref-changelog.md:946 ../../source/ref-changelog.md:1102 +#: ../../source/ref-changelog.md:1173 ../../source/ref-changelog.md:1210 +#: ../../source/ref-changelog.md:1253 +msgid "Incompatible changes" +msgstr "Changements incompatibles" -#: ../../source/ref-changelog.md:831 +#: ../../source/ref-changelog.md:35 +#, fuzzy +msgid "v1.11.0 (2024-08-30)" +msgstr "v1.3.0 (2023-02-06)" + +#: ../../source/ref-changelog.md:41 msgid "" -"`FedAdam` and `FedAdam` match the latest version of the Adaptive " -"Federated Optimization paper." -msgstr "" -"`FedAdam` et `FedAdam` correspondent à la dernière version de l'article " -"sur l'optimisation fédérée adaptative." +"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " +"Beutel`, `Daniel Nata Nugraha`, `Danny`, `Edoardo Gabrielli`, `Heng Pan`," +" `Javier`, `Meng Yan`, `Michal Danilowski`, `Mohammad Naseri`, `Robert " +"Steiner`, `Steve Laskaridis`, `Taner Topal`, `Yan Gao` " +msgstr "" + +#: ../../source/ref-changelog.md:43 ../../source/ref-changelog.md:147 +#: ../../source/ref-changelog.md:245 ../../source/ref-changelog.md:345 +#: ../../source/ref-changelog.md:409 ../../source/ref-changelog.md:502 +#: ../../source/ref-changelog.md:602 ../../source/ref-changelog.md:686 +#: ../../source/ref-changelog.md:750 ../../source/ref-changelog.md:808 +#: ../../source/ref-changelog.md:877 ../../source/ref-changelog.md:1006 +#: ../../source/ref-changelog.md:1048 ../../source/ref-changelog.md:1115 +#: ../../source/ref-changelog.md:1181 ../../source/ref-changelog.md:1226 +#: ../../source/ref-changelog.md:1265 ../../source/ref-changelog.md:1298 +#: ../../source/ref-changelog.md:1348 +msgid "What's new?" +msgstr "Quoi de neuf ?" -#: ../../source/ref-changelog.md:833 +#: ../../source/ref-changelog.md:45 msgid "" -"**Initialize** `start_simulation` **with a list of client IDs** " -"([#860](https://github.com/adap/flower/pull/860))" +"**Deliver Flower App Bundle (FAB) to SuperLink and SuperNodes** " +"([#4006](https://github.com/adap/flower/pull/4006), " +"[#3945](https://github.com/adap/flower/pull/3945), " +"[#3999](https://github.com/adap/flower/pull/3999), " +"[#4027](https://github.com/adap/flower/pull/4027), " +"[#3851](https://github.com/adap/flower/pull/3851), " +"[#3946](https://github.com/adap/flower/pull/3946), " +"[#4003](https://github.com/adap/flower/pull/4003), " +"[#4029](https://github.com/adap/flower/pull/4029), " +"[#3942](https://github.com/adap/flower/pull/3942), " +"[#3957](https://github.com/adap/flower/pull/3957), " +"[#4020](https://github.com/adap/flower/pull/4020), " +"[#4044](https://github.com/adap/flower/pull/4044), " +"[#3852](https://github.com/adap/flower/pull/3852), " +"[#4019](https://github.com/adap/flower/pull/4019), " +"[#4031](https://github.com/adap/flower/pull/4031), " +"[#4036](https://github.com/adap/flower/pull/4036), " +"[#4049](https://github.com/adap/flower/pull/4049), " +"[#4017](https://github.com/adap/flower/pull/4017), " +"[#3943](https://github.com/adap/flower/pull/3943), " +"[#3944](https://github.com/adap/flower/pull/3944), " +"[#4011](https://github.com/adap/flower/pull/4011), " +"[#3619](https://github.com/adap/flower/pull/3619))" msgstr "" -"**Initialise** `start_simulation` **avec une liste d'ID de clients** " -"([#860](https://github.com/adap/flower/pull/860))" -#: ../../source/ref-changelog.md:835 +#: ../../source/ref-changelog.md:47 msgid "" -"`start_simulation` can now be called with a list of client IDs " -"(`clients_ids`, type: `List[str]`). Those IDs will be passed to the " -"`client_fn` whenever a client needs to be initialized, which can make it " -"easier to load data partitions that are not accessible through `int` " -"identifiers." +"Dynamic code updates are here! `flwr run` can now ship and install the " +"latest version of your `ServerApp` and `ClientApp` to an already-running " +"federation (SuperLink and SuperNodes)." msgstr "" -"`start_simulation` peut maintenant être appelé avec une liste " -"d'identifiants de clients (`clients_ids`, type : `List[str]`). Ces " -"identifiants seront passés à `client_fn` chaque fois qu'un client doit " -"être initialisé, ce qui peut faciliter le chargement de partitions de " -"données qui ne sont pas accessibles par des identifiants `int`." -#: ../../source/ref-changelog.md:839 +#: ../../source/ref-changelog.md:49 msgid "" -"Update `num_examples` calculation in PyTorch code examples in " -"([#909](https://github.com/adap/flower/pull/909))" +"How does it work? `flwr run` bundles your Flower app into a single FAB " +"(Flower App Bundle) file. It then ships this FAB file, via the SuperExec," +" to both the SuperLink and those SuperNodes that need it. This allows you" +" to keep SuperExec, SuperLink and SuperNodes running as permanent " +"infrastructure, and then ship code updates (including completely new " +"projects!) dynamically." msgstr "" -"Mettre à jour le calcul de `num_examples` dans les exemples de code " -"PyTorch dans ([#909](https://github.com/adap/flower/pull/909))" -#: ../../source/ref-changelog.md:840 -msgid "" -"Expose Flower version through `flwr.__version__` " -"([#952](https://github.com/adap/flower/pull/952))" +#: ../../source/ref-changelog.md:51 +msgid "`flwr run` is all you need." msgstr "" -"Exposer la version de Flower à travers `flwr.__version__` " -"([#952](https://github.com/adap/flower/pull/952))" -#: ../../source/ref-changelog.md:841 +#: ../../source/ref-changelog.md:53 +#, fuzzy msgid "" -"`start_server` in `app.py` now returns a `History` object containing " -"metrics from training ([#974](https://github.com/adap/flower/pull/974))" +"**Introduce isolated** `ClientApp` **execution** " +"([#3970](https://github.com/adap/flower/pull/3970), " +"[#3976](https://github.com/adap/flower/pull/3976), " +"[#4002](https://github.com/adap/flower/pull/4002), " +"[#4001](https://github.com/adap/flower/pull/4001), " +"[#4034](https://github.com/adap/flower/pull/4034), " +"[#4037](https://github.com/adap/flower/pull/4037), " +"[#3977](https://github.com/adap/flower/pull/3977), " +"[#4042](https://github.com/adap/flower/pull/4042), " +"[#3978](https://github.com/adap/flower/pull/3978), " +"[#4039](https://github.com/adap/flower/pull/4039), " +"[#4033](https://github.com/adap/flower/pull/4033), " +"[#3971](https://github.com/adap/flower/pull/3971), " +"[#4035](https://github.com/adap/flower/pull/4035), " +"[#3973](https://github.com/adap/flower/pull/3973), " +"[#4032](https://github.com/adap/flower/pull/4032))" msgstr "" -"`start_server` dans `app.py` renvoie maintenant un objet `History` " -"contenant les métriques de l'entraînement " -"([#974](https://github.com/adap/flower/pull/974))" +"**Améliorations générales** " +"([#1491](https://github.com/adap/flower/pull/1491), " +"[#1504](https://github.com/adap/flower/pull/1504), " +"[#1506](https://github.com/adap/flower/pull/1506), " +"[#1514](https://github.com/adap/flower/pull/1514), " +"[#1522](https://github.com/adap/flower/pull/1522), " +"[#1523](https://github.com/adap/flower/pull/1523), " +"[#1526](https://github.com/adap/flower/pull/1526), " +"[#1528](https://github.com/adap/flower/pull/1528), " +"[#1547](https://github.com/adap/flower/pull/1547), " +"[#1549](https://github.com/adap/flower/pull/1549), " +"[#1560](https://github.com/adap/flower/pull/1560), " +"[#1564](https://github.com/adap/flower/pull/1564), " +"[#1566](https://github.com/adap/flower/pull/1566))" -#: ../../source/ref-changelog.md:842 +#: ../../source/ref-changelog.md:55 msgid "" -"Make `max_workers` (used by `ThreadPoolExecutor`) configurable " -"([#978](https://github.com/adap/flower/pull/978))" +"The SuperNode can now run your `ClientApp` in a fully isolated way. In an" +" enterprise deployment, this allows you to set strict limits on what the " +"`ClientApp` can and cannot do." msgstr "" -"Rendre `max_workers` (utilisé par `ThreadPoolExecutor`) configurable " -"([#978](https://github.com/adap/flower/pull/978))" -#: ../../source/ref-changelog.md:843 +#: ../../source/ref-changelog.md:57 +msgid "`flower-supernode` supports three `--isolation` modes:" +msgstr "" + +#: ../../source/ref-changelog.md:59 msgid "" -"Increase sleep time after server start to three seconds in all code " -"examples ([#1086](https://github.com/adap/flower/pull/1086))" +"Unset: The SuperNode runs the `ClientApp` in the same process (as in " +"previous versions of Flower). This is the default mode." msgstr "" -"Augmente le temps de sommeil après le démarrage du serveur à trois " -"secondes dans tous les exemples de code " -"([#1086](https://github.com/adap/flower/pull/1086))" -#: ../../source/ref-changelog.md:844 +#: ../../source/ref-changelog.md:60 msgid "" -"Added a new FAQ section to the documentation " -"([#948](https://github.com/adap/flower/pull/948))" +"`--isolation=subprocess`: The SuperNode starts a subprocess to run the " +"`ClientApp`." msgstr "" -"Ajout d'une nouvelle section FAQ à la documentation " -"([#948](https://github.com/adap/flower/pull/948))" -#: ../../source/ref-changelog.md:845 +#: ../../source/ref-changelog.md:61 msgid "" -"And many more under-the-hood changes, library updates, documentation " -"changes, and tooling improvements!" +"`--isolation=process`: The SuperNode expects an externally-managed " +"process to run the `ClientApp`. This external process is not managed by " +"the SuperNode, so it has to be started beforehand and terminated " +"manually. The common way to use this isolation mode is via the new " +"`flwr/clientapp` Docker image." msgstr "" -"Et bien d'autres changements sous le capot, des mises à jour de la " -"bibliothèque, des modifications de la documentation et des améliorations " -"de l'outillage !" -#: ../../source/ref-changelog.md:849 +#: ../../source/ref-changelog.md:63 +#, fuzzy msgid "" -"**Removed** `flwr_example` **and** `flwr_experimental` **from release " -"build** ([#869](https://github.com/adap/flower/pull/869))" +"**Improve Docker support for enterprise deployments** " +"([#4050](https://github.com/adap/flower/pull/4050), " +"[#4090](https://github.com/adap/flower/pull/4090), " +"[#3784](https://github.com/adap/flower/pull/3784), " +"[#3998](https://github.com/adap/flower/pull/3998), " +"[#4094](https://github.com/adap/flower/pull/4094), " +"[#3722](https://github.com/adap/flower/pull/3722))" msgstr "" -"**Supprimé** `flwr_example` **et** `flwr_experimental` **de la version " -"release build** ([#869](https://github.com/adap/flower/pull/869))" +"**Nouvel exemple de code MLCube** " +"([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" -#: ../../source/ref-changelog.md:851 +#: ../../source/ref-changelog.md:65 msgid "" -"The packages `flwr_example` and `flwr_experimental` have been deprecated " -"since Flower 0.12.0 and they are not longer included in Flower release " -"builds. The associated extras (`baseline`, `examples-pytorch`, `examples-" -"tensorflow`, `http-logger`, `ops`) are now no-op and will be removed in " -"an upcoming release." +"Flower 1.11 ships many Docker improvements that are especially useful for" +" enterprise deployments:" msgstr "" -"Les paquets `flwr_example` et `flwr_experimental` ont été dépréciés " -"depuis Flower 0.12.0 et ils ne sont plus inclus dans les builds de " -"Flower. Les extras associés (`baseline`, `examples-pytorch`, `examples-" -"tensorflow`, `http-logger`, `ops`) sont maintenant no-op et seront " -"supprimés dans une prochaine version." -#: ../../source/ref-changelog.md:853 -msgid "v0.17.0 (2021-09-24)" -msgstr "v0.17.0 (2021-09-24)" +#: ../../source/ref-changelog.md:67 +msgid "`flwr/supernode` comes with a new Alpine Docker image." +msgstr "" -#: ../../source/ref-changelog.md:857 +#: ../../source/ref-changelog.md:68 msgid "" -"**Experimental virtual client engine** " -"([#781](https://github.com/adap/flower/pull/781) " -"[#790](https://github.com/adap/flower/pull/790) " -"[#791](https://github.com/adap/flower/pull/791))" +"`flwr/clientapp` is a new image to be used with the `--isolation=process`" +" option. In this mode, SuperNode and `ClientApp` run in two different " +"Docker containers. `flwr/supernode` (preferably the Alpine version) runs " +"the long-running SuperNode with `--isolation=process`. `flwr/clientapp` " +"runs the `ClientApp`. This is the recommended way to deploy Flower in " +"enterprise settings." msgstr "" -"**Moteur expérimental de client virtuel** " -"([#781](https://github.com/adap/flower/pull/781) " -"[#790](https://github.com/adap/flower/pull/790) " -"[#791](https://github.com/adap/flower/pull/791))" -#: ../../source/ref-changelog.md:859 +#: ../../source/ref-changelog.md:69 msgid "" -"One of Flower's goals is to enable research at scale. This release " -"enables a first (experimental) peek at a major new feature, codenamed the" -" virtual client engine. Virtual clients enable simulations that scale to " -"a (very) large number of clients on a single machine or compute cluster. " -"The easiest way to test the new functionality is to look at the two new " -"code examples called `quickstart_simulation` and `simulation_pytorch`." +"New all-in-one Docker Compose enables you to easily start a full Flower " +"Deployment Engine on a single machine." msgstr "" -"L'un des objectifs de Flower est de permettre la recherche à grande " -"échelle. Cette version donne un premier aperçu (expérimental) d'une " -"nouvelle fonctionnalité majeure, connue sous le nom de code de moteur de " -"client virtuel. Les clients virtuels permettent des simulations qui " -"s'étendent à un (très) grand nombre de clients sur une seule machine ou " -"une grappe de calcul. La façon la plus simple de tester la nouvelle " -"fonctionnalité est de regarder les deux nouveaux exemples de code appelés" -" `quickstart_simulation` et `simulation_pytorch`." -#: ../../source/ref-changelog.md:861 +#: ../../source/ref-changelog.md:70 msgid "" -"The feature is still experimental, so there's no stability guarantee for " -"the API. It's also not quite ready for prime time and comes with a few " -"known caveats. However, those who are curious are encouraged to try it " -"out and share their thoughts." +"Completely new Docker documentation: " +"https://flower.ai/docs/framework/docker/index.html" msgstr "" -"La fonction est encore expérimentale, il n'y a donc aucune garantie de " -"stabilité pour l'API. Elle n'est pas non plus tout à fait prête pour le " -"prime time et s'accompagne de quelques mises en garde connues. Cependant," -" les personnes curieuses sont encouragées à l'essayer et à faire part de " -"leurs réflexions." -#: ../../source/ref-changelog.md:863 +#: ../../source/ref-changelog.md:72 +#, fuzzy msgid "" -"**New built-in strategies** " -"([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822))" +"**Improve SuperNode authentication** " +"([#4043](https://github.com/adap/flower/pull/4043), " +"[#4047](https://github.com/adap/flower/pull/4047), " +"[#4074](https://github.com/adap/flower/pull/4074))" msgstr "" -"**Nouvelles stratégies intégrées** " -"([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822))" +"**Mettre à jour les exemples de code** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/ref-changelog.md:865 +#: ../../source/ref-changelog.md:74 msgid "" -"FedYogi - Federated learning strategy using Yogi on server-side. " -"Implementation based on https://arxiv.org/abs/2003.00295" +"SuperNode auth has been improved in several ways, including improved " +"logging, improved testing, and improved error handling." msgstr "" -"FedYogi - Stratégie d'apprentissage fédéré utilisant Yogi côté serveur. " -"Mise en oeuvre basée sur https://arxiv.org/abs/2003.00295" -#: ../../source/ref-changelog.md:866 +#: ../../source/ref-changelog.md:76 +#, fuzzy msgid "" -"FedAdam - Federated learning strategy using Adam on server-side. " -"Implementation based on https://arxiv.org/abs/2003.00295" +"**Update** `flwr new` **templates** " +"([#3933](https://github.com/adap/flower/pull/3933), " +"[#3894](https://github.com/adap/flower/pull/3894), " +"[#3930](https://github.com/adap/flower/pull/3930), " +"[#3931](https://github.com/adap/flower/pull/3931), " +"[#3997](https://github.com/adap/flower/pull/3997), " +"[#3979](https://github.com/adap/flower/pull/3979), " +"[#3965](https://github.com/adap/flower/pull/3965), " +"[#4013](https://github.com/adap/flower/pull/4013), " +"[#4064](https://github.com/adap/flower/pull/4064))" msgstr "" -"FedAdam - Stratégie d'apprentissage fédéré utilisant Adam côté serveur. " -"Mise en œuvre basée sur https://arxiv.org/abs/2003.00295" +"**Mise à jour de la documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:868 +#: ../../source/ref-changelog.md:78 msgid "" -"**New PyTorch Lightning code example** " -"([#617](https://github.com/adap/flower/pull/617))" +"All `flwr new` templates have been updated to show the latest recommended" +" use of Flower APIs." msgstr "" -"**Nouvel exemple de code PyTorch Lightning** " -"([#617](https://github.com/adap/flower/pull/617))" -#: ../../source/ref-changelog.md:870 +#: ../../source/ref-changelog.md:80 +#, fuzzy msgid "" -"**New Variational Auto-Encoder code example** " -"([#752](https://github.com/adap/flower/pull/752))" +"**Improve Simulation Engine** " +"([#4095](https://github.com/adap/flower/pull/4095), " +"[#3913](https://github.com/adap/flower/pull/3913), " +"[#4059](https://github.com/adap/flower/pull/4059), " +"[#3954](https://github.com/adap/flower/pull/3954), " +"[#4071](https://github.com/adap/flower/pull/4071), " +"[#3985](https://github.com/adap/flower/pull/3985), " +"[#3988](https://github.com/adap/flower/pull/3988))" msgstr "" -"**Nouvel exemple de code d'autocodage variationnel** " -"([#752](https://github.com/adap/flower/pull/752))" +"**Nouvel exemple de code MLCube** " +"([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" -#: ../../source/ref-changelog.md:872 +#: ../../source/ref-changelog.md:82 msgid "" -"**New scikit-learn code example** " -"([#748](https://github.com/adap/flower/pull/748))" +"The Flower Simulation Engine comes with several updates, including " +"improved run config support, verbose logging, simulation backend " +"configuration via `flwr run`, and more." msgstr "" -"**Nouvel exemple de code scikit-learn** " -"([#748](https://github.com/adap/flower/pull/748))" -#: ../../source/ref-changelog.md:874 +#: ../../source/ref-changelog.md:84 +#, fuzzy msgid "" -"**New experimental TensorBoard strategy** " -"([#789](https://github.com/adap/flower/pull/789))" +"**Improve** `RecordSet` " +"([#4052](https://github.com/adap/flower/pull/4052), " +"[#3218](https://github.com/adap/flower/pull/3218), " +"[#4016](https://github.com/adap/flower/pull/4016))" msgstr "" -"**Nouvelle stratégie expérimentale TensorBoard** " -"([#789](https://github.com/adap/flower/pull/789))" +"**Mettre à jour les exemples de code** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/ref-changelog.md:878 +#: ../../source/ref-changelog.md:86 msgid "" -"Improved advanced TensorFlow code example " -"([#769](https://github.com/adap/flower/pull/769))" +"`RecordSet` is the core object to exchange model parameters, " +"configuration values and metrics between `ClientApp` and `ServerApp`. " +"This release ships several smaller improvements to `RecordSet` and " +"related `*Record` types." msgstr "" -"Amélioration de l'exemple de code TensorFlow avancé " -"([#769](https://github.com/adap/flower/pull/769))" -#: ../../source/ref-changelog.md:879 +#: ../../source/ref-changelog.md:88 +#, fuzzy msgid "" -"Warning when `min_available_clients` is misconfigured " -"([#830](https://github.com/adap/flower/pull/830))" +"**Update documentation** " +"([#3972](https://github.com/adap/flower/pull/3972), " +"[#3925](https://github.com/adap/flower/pull/3925), " +"[#4061](https://github.com/adap/flower/pull/4061), " +"[#3984](https://github.com/adap/flower/pull/3984), " +"[#3917](https://github.com/adap/flower/pull/3917), " +"[#3900](https://github.com/adap/flower/pull/3900), " +"[#4066](https://github.com/adap/flower/pull/4066), " +"[#3765](https://github.com/adap/flower/pull/3765), " +"[#4021](https://github.com/adap/flower/pull/4021), " +"[#3906](https://github.com/adap/flower/pull/3906), " +"[#4063](https://github.com/adap/flower/pull/4063), " +"[#4076](https://github.com/adap/flower/pull/4076), " +"[#3920](https://github.com/adap/flower/pull/3920), " +"[#3916](https://github.com/adap/flower/pull/3916))" msgstr "" -"Avertissement lorsque `min_available_clients` est mal configuré " -"([#830](https://github.com/adap/flower/pull/830))" +"**Mise à jour de la documentation** " +"([#1223](https://github.com/adap/flower/pull/1223), " +"[#1209](https://github.com/adap/flower/pull/1209), " +"[#1251](https://github.com/adap/flower/pull/1251), " +"[#1257](https://github.com/adap/flower/pull/1257), " +"[#1267](https://github.com/adap/flower/pull/1267), " +"[#1268](https://github.com/adap/flower/pull/1268), " +"[#1300](https://github.com/adap/flower/pull/1300), " +"[#1304](https://github.com/adap/flower/pull/1304), " +"[#1305](https://github.com/adap/flower/pull/1305), " +"[#1307](https://github.com/adap/flower/pull/1307))" + +#: ../../source/ref-changelog.md:90 +msgid "" +"Many parts of the documentation, including the main tutorial, have been " +"migrated to show new Flower APIs and other new Flower features like the " +"improved Docker support." +msgstr "" + +#: ../../source/ref-changelog.md:92 +msgid "" +"**Migrate code example to use new Flower APIs** " +"([#3758](https://github.com/adap/flower/pull/3758), " +"[#3701](https://github.com/adap/flower/pull/3701), " +"[#3919](https://github.com/adap/flower/pull/3919), " +"[#3918](https://github.com/adap/flower/pull/3918), " +"[#3934](https://github.com/adap/flower/pull/3934), " +"[#3893](https://github.com/adap/flower/pull/3893), " +"[#3833](https://github.com/adap/flower/pull/3833), " +"[#3922](https://github.com/adap/flower/pull/3922), " +"[#3846](https://github.com/adap/flower/pull/3846), " +"[#3777](https://github.com/adap/flower/pull/3777), " +"[#3874](https://github.com/adap/flower/pull/3874), " +"[#3873](https://github.com/adap/flower/pull/3873), " +"[#3935](https://github.com/adap/flower/pull/3935), " +"[#3754](https://github.com/adap/flower/pull/3754), " +"[#3980](https://github.com/adap/flower/pull/3980), " +"[#4089](https://github.com/adap/flower/pull/4089), " +"[#4046](https://github.com/adap/flower/pull/4046), " +"[#3314](https://github.com/adap/flower/pull/3314), " +"[#3316](https://github.com/adap/flower/pull/3316), " +"[#3295](https://github.com/adap/flower/pull/3295), " +"[#3313](https://github.com/adap/flower/pull/3313))" +msgstr "" + +#: ../../source/ref-changelog.md:94 +msgid "Many code examples have been migrated to use new Flower APIs." +msgstr "" + +#: ../../source/ref-changelog.md:96 +msgid "" +"**Update Flower framework, framework internals and quality " +"infrastructure** ([#4018](https://github.com/adap/flower/pull/4018), " +"[#4053](https://github.com/adap/flower/pull/4053), " +"[#4098](https://github.com/adap/flower/pull/4098), " +"[#4067](https://github.com/adap/flower/pull/4067), " +"[#4105](https://github.com/adap/flower/pull/4105), " +"[#4048](https://github.com/adap/flower/pull/4048), " +"[#4107](https://github.com/adap/flower/pull/4107), " +"[#4069](https://github.com/adap/flower/pull/4069), " +"[#3915](https://github.com/adap/flower/pull/3915), " +"[#4101](https://github.com/adap/flower/pull/4101), " +"[#4108](https://github.com/adap/flower/pull/4108), " +"[#3914](https://github.com/adap/flower/pull/3914), " +"[#4068](https://github.com/adap/flower/pull/4068), " +"[#4041](https://github.com/adap/flower/pull/4041), " +"[#4040](https://github.com/adap/flower/pull/4040), " +"[#3986](https://github.com/adap/flower/pull/3986), " +"[#4026](https://github.com/adap/flower/pull/4026), " +"[#3961](https://github.com/adap/flower/pull/3961), " +"[#3975](https://github.com/adap/flower/pull/3975), " +"[#3983](https://github.com/adap/flower/pull/3983), " +"[#4091](https://github.com/adap/flower/pull/4091), " +"[#3982](https://github.com/adap/flower/pull/3982), " +"[#4079](https://github.com/adap/flower/pull/4079), " +"[#4073](https://github.com/adap/flower/pull/4073), " +"[#4060](https://github.com/adap/flower/pull/4060), " +"[#4106](https://github.com/adap/flower/pull/4106), " +"[#4080](https://github.com/adap/flower/pull/4080), " +"[#3974](https://github.com/adap/flower/pull/3974), " +"[#3996](https://github.com/adap/flower/pull/3996), " +"[#3991](https://github.com/adap/flower/pull/3991), " +"[#3981](https://github.com/adap/flower/pull/3981), " +"[#4093](https://github.com/adap/flower/pull/4093), " +"[#4100](https://github.com/adap/flower/pull/4100), " +"[#3939](https://github.com/adap/flower/pull/3939), " +"[#3955](https://github.com/adap/flower/pull/3955), " +"[#3940](https://github.com/adap/flower/pull/3940), " +"[#4038](https://github.com/adap/flower/pull/4038))" +msgstr "" + +#: ../../source/ref-changelog.md:98 ../../source/ref-changelog.md:205 +msgid "" +"As always, many parts of the Flower framework and quality infrastructure " +"were improved and updated." +msgstr "" + +#: ../../source/ref-changelog.md:100 ../../source/ref-changelog.md:217 +#: ../../source/ref-changelog.md:309 ../../source/ref-changelog.md:1292 +msgid "Deprecations" +msgstr "Dépréciations" -#: ../../source/ref-changelog.md:880 +#: ../../source/ref-changelog.md:102 +#, fuzzy msgid "" -"Improved gRPC server docs " -"([#841](https://github.com/adap/flower/pull/841))" +"**Deprecate accessing `Context` via `Client.context`** " +"([#3797](https://github.com/adap/flower/pull/3797))" msgstr "" -"Amélioration de la documentation sur le serveur gRPC " -"([#841](https://github.com/adap/flower/pull/841))" +"**Supprimer les installations supplémentaires no-op dépréciées** " +"([#973](https://github.com/adap/flower/pull/973))" -#: ../../source/ref-changelog.md:881 +#: ../../source/ref-changelog.md:104 msgid "" -"Improved error message in `NumPyClient` " -"([#851](https://github.com/adap/flower/pull/851))" +"Now that both `client_fn` and `server_fn` receive a `Context` object, " +"accessing `Context` via `Client.context` is deprecated. `Client.context` " +"will be removed in a future release. If you need to access `Context` in " +"your `Client` implementation, pass it manually when creating the `Client`" +" instance in `client_fn`:" msgstr "" -"Amélioration du message d'erreur dans `NumPyClient` " -"([#851](https://github.com/adap/flower/pull/851))" -#: ../../source/ref-changelog.md:882 +#: ../../source/ref-changelog.md:113 +#, fuzzy msgid "" -"Improved PyTorch quickstart code example " -"([#852](https://github.com/adap/flower/pull/852))" +"**Update CLIs to accept an app directory instead of** `ClientApp` **and**" +" `ServerApp` ([#3952](https://github.com/adap/flower/pull/3952), " +"[#4077](https://github.com/adap/flower/pull/4077), " +"[#3850](https://github.com/adap/flower/pull/3850))" msgstr "" -"Exemple de code de démarrage rapide PyTorch amélioré " -"([#852](https://github.com/adap/flower/pull/852))" +"**Introduire la télémétrie optionnelle** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" -#: ../../source/ref-changelog.md:886 +#: ../../source/ref-changelog.md:115 msgid "" -"**Disabled final distributed evaluation** " -"([#800](https://github.com/adap/flower/pull/800))" +"The CLI commands `flower-supernode` and `flower-server-app` now accept an" +" app directory as argument (instead of references to a `ClientApp` or " +"`ServerApp`). An app directory is any directory containing a " +"`pyproject.toml` file (with the appropriate Flower config fields set). " +"The easiest way to generate a compatible project structure is to use " +"`flwr new`." msgstr "" -"**Désactivé l'évaluation finale distribuée** " -"([#800](https://github.com/adap/flower/pull/800))" -#: ../../source/ref-changelog.md:888 +#: ../../source/ref-changelog.md:117 +#, fuzzy msgid "" -"Prior behaviour was to perform a final round of distributed evaluation on" -" all connected clients, which is often not required (e.g., when using " -"server-side evaluation). The prior behaviour can be enabled by passing " -"`force_final_distributed_eval=True` to `start_server`." +"**Disable** `flower-client-app` **CLI command** " +"([#4022](https://github.com/adap/flower/pull/4022))" msgstr "" -"Le comportement précédent consistait à effectuer un dernier tour " -"d'évaluation distribuée sur tous les clients connectés, ce qui n'est " -"souvent pas nécessaire (par exemple, lors de l'utilisation de " -"l'évaluation côté serveur). Le comportement précédent peut être activé en" -" passant `force_final_distributed_eval=True` à `start_server`." +"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " +"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" -#: ../../source/ref-changelog.md:890 -msgid "" -"**Renamed q-FedAvg strategy** " -"([#802](https://github.com/adap/flower/pull/802))" +#: ../../source/ref-changelog.md:119 +msgid "`flower-client-app` has been disabled. Use `flower-supernode` instead." msgstr "" -"**Renommé stratégie q-FedAvg** " -"([#802](https://github.com/adap/flower/pull/802))" -#: ../../source/ref-changelog.md:892 +#: ../../source/ref-changelog.md:121 +#, fuzzy msgid "" -"The strategy named `QffedAvg` was renamed to `QFedAvg` to better reflect " -"the notation given in the original paper (q-FFL is the optimization " -"objective, q-FedAvg is the proposed solver). Note the original (now " -"deprecated) `QffedAvg` class is still available for compatibility reasons" -" (it will be removed in a future release)." +"**Use spaces instead of commas for separating config args** " +"([#4000](https://github.com/adap/flower/pull/4000))" msgstr "" -"La stratégie nommée `QffedAvg` a été renommée en `QFedAvg` pour mieux " -"refléter la notation donnée dans l'article original (q-FFL est l'objectif" -" d'optimisation, q-FedAvg est le solveur proposé). Notez que la classe " -"`QffedAvg` originale (maintenant obsolète) est toujours disponible pour " -"des raisons de compatibilité (elle sera supprimée dans une prochaine " -"version)." +"**Métriques personnalisées pour le serveur et les stratégies** " +"([#717](https://github.com/adap/flower/pull/717))" -#: ../../source/ref-changelog.md:894 +#: ../../source/ref-changelog.md:123 msgid "" -"**Deprecated and renamed code example** `simulation_pytorch` **to** " -"`simulation_pytorch_legacy` " -"([#791](https://github.com/adap/flower/pull/791))" +"When passing configs (run config, node config) to Flower, you now need to" +" separate key-value pairs using spaces instead of commas. For example:" msgstr "" -"**Exemple de code déprécié et renommé** `simulation_pytorch` **en** " -"`simulation_pytorch_legacy` " -"([#791](https://github.com/adap/flower/pull/791))" -#: ../../source/ref-changelog.md:896 -msgid "" -"This example has been replaced by a new example. The new example is based" -" on the experimental virtual client engine, which will become the new " -"default way of doing most types of large-scale simulations in Flower. The" -" existing example was kept for reference purposes, but it might be " -"removed in the future." +#: ../../source/ref-changelog.md:129 +msgid "Previously, you could pass configs using commas, like this:" msgstr "" -"Cet exemple a été remplacé par un nouvel exemple. Le nouvel exemple est " -"basé sur le moteur expérimental du client virtuel, qui deviendra la " -"nouvelle méthode par défaut pour effectuer la plupart des types de " -"simulations à grande échelle dans Flower. L'exemple existant a été " -"conservé à des fins de référence, mais il pourrait être supprimé à " -"l'avenir." -#: ../../source/ref-changelog.md:898 -msgid "v0.16.0 (2021-05-11)" -msgstr "v0.16.0 (2021-05-11)" +#: ../../source/ref-changelog.md:135 +#, fuzzy +msgid "" +"**Remove** `flwr example` **CLI command** " +"([#4084](https://github.com/adap/flower/pull/4084))" +msgstr "" +"**Supprimez KerasClient** " +"([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/ref-changelog.md:902 +#: ../../source/ref-changelog.md:137 msgid "" -"**New built-in strategies** " -"([#549](https://github.com/adap/flower/pull/549))" +"The experimental `flwr example` CLI command has been removed. Use `flwr " +"new` to generate a project and then run it using `flwr run`." msgstr "" -"**Nouvelles stratégies intégrées** " -"([#549](https://github.com/adap/flower/pull/549))" -#: ../../source/ref-changelog.md:904 -msgid "(abstract) FedOpt" -msgstr "(résumé) FedOpt" +#: ../../source/ref-changelog.md:139 +#, fuzzy +msgid "v1.10.0 (2024-07-24)" +msgstr "v1.0.0 (2022-07-28)" -#: ../../source/ref-changelog.md:907 +#: ../../source/ref-changelog.md:145 msgid "" -"**Custom metrics for server and strategies** " -"([#717](https://github.com/adap/flower/pull/717))" +"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " +"Beutel`, `Daniel Nata Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, " +"`Ikko Eltociear Ashimine`, `Javier`, `Jiahao Tan`, `Mohammad Naseri`, " +"`Robert Steiner`, `Sebastian van der Voort`, `Taner Topal`, `Yan Gao` " msgstr "" -"**Métriques personnalisées pour le serveur et les stratégies** " -"([#717](https://github.com/adap/flower/pull/717))" -#: ../../source/ref-changelog.md:909 +#: ../../source/ref-changelog.md:149 +#, fuzzy msgid "" -"The Flower server is now fully task-agnostic, all remaining instances of " -"task-specific metrics (such as `accuracy`) have been replaced by custom " -"metrics dictionaries. Flower 0.15 introduced the capability to pass a " -"dictionary containing custom metrics from client to server. As of this " -"release, custom metrics replace task-specific metrics on the server." +"**Introduce** `flwr run` **(beta)** " +"([#3810](https://github.com/adap/flower/pull/3810), " +"[#3826](https://github.com/adap/flower/pull/3826), " +"[#3880](https://github.com/adap/flower/pull/3880), " +"[#3807](https://github.com/adap/flower/pull/3807), " +"[#3800](https://github.com/adap/flower/pull/3800), " +"[#3814](https://github.com/adap/flower/pull/3814), " +"[#3811](https://github.com/adap/flower/pull/3811), " +"[#3809](https://github.com/adap/flower/pull/3809), " +"[#3819](https://github.com/adap/flower/pull/3819))" msgstr "" -"Le serveur Flower est maintenant totalement agnostique, toutes les " -"instances restantes de métriques spécifiques à une tâche (telles que " -"`accuracy`) ont été remplacées par des dictionnaires de métriques " -"personnalisées. Flower 0.15 a introduit la possibilité de passer un " -"dictionnaire contenant des métriques personnalisées du client au serveur." -" À partir de cette version, les métriques personnalisées remplacent les " -"métriques spécifiques à une tâche sur le serveur." +"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" -#: ../../source/ref-changelog.md:911 -#, fuzzy +#: ../../source/ref-changelog.md:151 +msgid "" +"Flower 1.10 ships the first beta release of the new `flwr run` command. " +"`flwr run` can run different projects using `flwr run path/to/project`, " +"it enables you to easily switch between different federations using `flwr" +" run . federation` and it runs your Flower project using either local " +"simulation or the new (experimental) SuperExec service. This allows " +"Flower to scale federatated learning from fast local simulation to large-" +"scale production deployment, seamlessly. All projects generated with " +"`flwr new` are immediately runnable using `flwr run`. Give it a try: use " +"`flwr new` to generate a project and then run it using `flwr run`." +msgstr "" + +#: ../../source/ref-changelog.md:153 +#, fuzzy +msgid "" +"**Introduce run config** " +"([#3751](https://github.com/adap/flower/pull/3751), " +"[#3750](https://github.com/adap/flower/pull/3750), " +"[#3845](https://github.com/adap/flower/pull/3845), " +"[#3824](https://github.com/adap/flower/pull/3824), " +"[#3746](https://github.com/adap/flower/pull/3746), " +"[#3728](https://github.com/adap/flower/pull/3728), " +"[#3730](https://github.com/adap/flower/pull/3730), " +"[#3725](https://github.com/adap/flower/pull/3725), " +"[#3729](https://github.com/adap/flower/pull/3729), " +"[#3580](https://github.com/adap/flower/pull/3580), " +"[#3578](https://github.com/adap/flower/pull/3578), " +"[#3576](https://github.com/adap/flower/pull/3576), " +"[#3798](https://github.com/adap/flower/pull/3798), " +"[#3732](https://github.com/adap/flower/pull/3732), " +"[#3815](https://github.com/adap/flower/pull/3815))" +msgstr "" +"**Introduire l'API REST (expérimentale)** " +"([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" + +#: ../../source/ref-changelog.md:155 msgid "" -"Custom metric dictionaries are now used in two user-facing APIs: they are" -" returned from Strategy methods `aggregate_fit`/`aggregate_evaluate` and " -"they enable evaluation functions passed to built-in strategies (via " -"`eval_fn`) to return more than two evaluation metrics. Strategies can " -"even return *aggregated* metrics dictionaries for the server to keep " -"track of." +"The new run config feature allows you to run your Flower project in " +"different configurations without having to change a single line of code. " +"You can now build a configurable `ServerApp` and `ClientApp` that read " +"configuration values at runtime. This enables you to specify config " +"values like `learning-rate=0.01` in `pyproject.toml` (under the " +"`[tool.flwr.app.config]` key). These config values can then be easily " +"overridden via `flwr run --run-config learning-rate=0.02`, and read from " +"`Context` using `lr = context.run_config[\"learning-rate\"]`. Create a " +"new project using `flwr new` to see run config in action." msgstr "" -"Les dictionnaires de métriques personnalisés sont maintenant utilisés " -"dans deux API orientées vers l'utilisateur : ils sont renvoyés par les " -"méthodes de stratégie `aggregate_fit`/`aggregate_evaluate` et ils " -"permettent aux fonctions d'évaluation passées aux stratégies intégrées " -"(via `eval_fn`) de renvoyer plus de deux métriques d'évaluation. Les " -"stratégies peuvent même renvoyer des dictionnaires de métriques " -"*agrégées* pour que le serveur puisse en garder la trace." -#: ../../source/ref-changelog.md:913 +#: ../../source/ref-changelog.md:157 #, fuzzy msgid "" -"Strategy implementations should migrate their `aggregate_fit` and " -"`aggregate_evaluate` methods to the new return type (e.g., by simply " -"returning an empty `{}`), server-side evaluation functions should migrate" -" from `return loss, accuracy` to `return loss, {\"accuracy\": accuracy}`." +"**Generalize** `client_fn` **signature to** `client_fn(context: Context) " +"-> Client` ([#3779](https://github.com/adap/flower/pull/3779), " +"[#3697](https://github.com/adap/flower/pull/3697), " +"[#3694](https://github.com/adap/flower/pull/3694), " +"[#3696](https://github.com/adap/flower/pull/3696))" msgstr "" -"Les implémentations de Stratey doivent migrer leurs méthodes " -"`aggregate_fit` et `aggregate_evaluate` vers le nouveau type de retour " -"(par exemple, en renvoyant simplement un `{}` vide), les fonctions " -"d'évaluation côté serveur doivent migrer de `return loss, accuracy` à " -"`return loss, {\"accuracy\" : accuracy}`." +"Mettre à jour les outils de développement " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/ref-changelog.md:915 +#: ../../source/ref-changelog.md:159 msgid "" -"Flower 0.15-style return types are deprecated (but still supported), " -"compatibility will be removed in a future release." +"The `client_fn` signature has been generalized to `client_fn(context: " +"Context) -> Client`. It now receives a `Context` object instead of the " +"(now depreacated) `cid: str`. `Context` allows accessing `node_id`, " +"`node_config` and `run_config`, among other things. This enables you to " +"build a configurable `ClientApp` that leverages the new run config " +"system." msgstr "" -"Les types de retour du style Flower 0.15 sont dépréciés (mais toujours " -"pris en charge), la compatibilité sera supprimée dans une prochaine " -"version." -#: ../../source/ref-changelog.md:917 +#: ../../source/ref-changelog.md:161 msgid "" -"**Migration warnings for deprecated functionality** " -"([#690](https://github.com/adap/flower/pull/690))" +"The previous signature `client_fn(cid: str)` is now deprecated and " +"support for it will be removed in a future release. Use " +"`client_fn(context: Context) -> Client` everywhere." msgstr "" -"**Avertissements de migration pour les fonctionnalités obsolètes** " -"([#690](https://github.com/adap/flower/pull/690))" -#: ../../source/ref-changelog.md:919 +#: ../../source/ref-changelog.md:163 +#, fuzzy msgid "" -"Earlier versions of Flower were often migrated to new APIs, while " -"maintaining compatibility with legacy APIs. This release introduces " -"detailed warning messages if usage of deprecated APIs is detected. The " -"new warning messages often provide details on how to migrate to more " -"recent APIs, thus easing the transition from one release to another." +"**Introduce new** `server_fn(context)` " +"([#3773](https://github.com/adap/flower/pull/3773), " +"[#3796](https://github.com/adap/flower/pull/3796), " +"[#3771](https://github.com/adap/flower/pull/3771))" msgstr "" -"Les versions antérieures de Flower ont souvent été migrées vers de " -"nouvelles API, tout en maintenant la compatibilité avec les anciennes " -"API. Cette version introduit des messages d'avertissement détaillés si " -"l'utilisation d'API obsolètes est détectée. Les nouveaux messages " -"d'avertissement fournissent souvent des détails sur la façon de migrer " -"vers des API plus récentes, facilitant ainsi la transition d'une version " -"à l'autre." +"**Introduire la télémétrie optionnelle** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" -#: ../../source/ref-changelog.md:921 +#: ../../source/ref-changelog.md:165 msgid "" -"Improved docs and docstrings " -"([#691](https://github.com/adap/flower/pull/691) " -"[#692](https://github.com/adap/flower/pull/692) " -"[#713](https://github.com/adap/flower/pull/713))" +"In addition to the new `client_fn(context:Context)`, a new " +"`server_fn(context: Context) -> ServerAppComponents` can now be passed to" +" `ServerApp` (instead of passing, for example, `Strategy`, directly). " +"This enables you to leverage the full `Context` on the server-side to " +"build a configurable `ServerApp`." msgstr "" -"Amélioration des docs et des docstrings " -"([#691](https://github.com/adap/flower/pull/691) " -"[#692](https://github.com/adap/flower/pull/692) " -"[#713](https://github.com/adap/flower/pull/713))" -#: ../../source/ref-changelog.md:923 -msgid "MXNet example and documentation" -msgstr "Exemple et documentation MXNet" +#: ../../source/ref-changelog.md:167 +#, fuzzy +msgid "" +"**Relaunch all** `flwr new` **templates** " +"([#3877](https://github.com/adap/flower/pull/3877), " +"[#3821](https://github.com/adap/flower/pull/3821), " +"[#3587](https://github.com/adap/flower/pull/3587), " +"[#3795](https://github.com/adap/flower/pull/3795), " +"[#3875](https://github.com/adap/flower/pull/3875), " +"[#3859](https://github.com/adap/flower/pull/3859), " +"[#3760](https://github.com/adap/flower/pull/3760))" +msgstr "" +"**Mise à jour de la documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:925 +#: ../../source/ref-changelog.md:169 msgid "" -"FedBN implementation in example PyTorch: From Centralized To Federated " -"([#696](https://github.com/adap/flower/pull/696) " -"[#702](https://github.com/adap/flower/pull/702) " -"[#705](https://github.com/adap/flower/pull/705))" +"All `flwr new` templates have been significantly updated to showcase new " +"Flower features and best practices. This includes using `flwr run` and " +"the new run config feature. You can now easily create a new project using" +" `flwr new` and, after following the instructions to install it, `flwr " +"run` it." msgstr "" -"Mise en œuvre de FedBN dans l'exemple PyTorch : De la centralisation à la" -" fédération ([#696](https://github.com/adap/flower/pull/696) " -"[#702](https://github.com/adap/flower/pull/702) " -"[#705](https://github.com/adap/flower/pull/705))" -#: ../../source/ref-changelog.md:929 +#: ../../source/ref-changelog.md:171 +#, fuzzy msgid "" -"**Serialization-agnostic server** " -"([#721](https://github.com/adap/flower/pull/721))" +"**Introduce** `flower-supernode` **(preview)** " +"([#3353](https://github.com/adap/flower/pull/3353))" msgstr "" -"**Serveur agnostique de sérialisation** " -"([#721](https://github.com/adap/flower/pull/721))" +"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " +"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" -#: ../../source/ref-changelog.md:931 +#: ../../source/ref-changelog.md:173 msgid "" -"The Flower server is now fully serialization-agnostic. Prior usage of " -"class `Weights` (which represents parameters as deserialized NumPy " -"ndarrays) was replaced by class `Parameters` (e.g., in `Strategy`). " -"`Parameters` objects are fully serialization-agnostic and represents " -"parameters as byte arrays, the `tensor_type` attributes indicates how " -"these byte arrays should be interpreted (e.g., for " -"serialization/deserialization)." +"The new `flower-supernode` CLI is here to replace `flower-client-app`. " +"`flower-supernode` brings full multi-app support to the Flower client-" +"side. It also allows to pass `--node-config` to the SuperNode, which is " +"accessible in your `ClientApp` via `Context` (using the new " +"`client_fn(context: Context)` signature)." msgstr "" -"Le serveur Flower est désormais totalement agnostique en matière de " -"sérialisation. L'utilisation antérieure de la classe `Weights` (qui " -"représente les paramètres sous forme de tableaux NumPy désérialisés) a " -"été remplacée par la classe `Parameters` (par exemple, dans `Strategy`). " -"Les objets `Parameters` sont totalement agnostiques en matière de " -"sérialisation et représentent les paramètres sous forme de tableaux " -"d'octets, les attributs `tensor_type` indiquent comment ces tableaux " -"d'octets doivent être interprétés (par exemple, pour la " -"sérialisation/désérialisation)." -#: ../../source/ref-changelog.md:933 +#: ../../source/ref-changelog.md:175 +#, fuzzy msgid "" -"Built-in strategies implement this approach by handling serialization and" -" deserialization to/from `Weights` internally. Custom/3rd-party Strategy " -"implementations should update to the slightly changed Strategy method " -"definitions. Strategy authors can consult PR " -"[#721](https://github.com/adap/flower/pull/721) to see how strategies can" -" easily migrate to the new format." +"**Introduce node config** " +"([#3782](https://github.com/adap/flower/pull/3782), " +"[#3780](https://github.com/adap/flower/pull/3780), " +"[#3695](https://github.com/adap/flower/pull/3695), " +"[#3886](https://github.com/adap/flower/pull/3886))" msgstr "" -"Les stratégies intégrées mettent en œuvre cette approche en gérant en " -"interne la sérialisation et la désérialisation de `Weights`. Les " -"implémentations de stratégies personnalisées ou tierces doivent être " -"mises à jour avec les définitions de méthodes de stratégie légèrement " -"modifiées. Les auteurs de stratégies peuvent consulter le PR " -"[#721](https://github.com/adap/flower/pull/721) pour voir comment les " -"stratégies peuvent facilement migrer vers le nouveau format." +"**Introduire une nouvelle fleur Référence : FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679))" -#: ../../source/ref-changelog.md:935 +#: ../../source/ref-changelog.md:177 msgid "" -"Deprecated `flwr.server.Server.evaluate`, use " -"`flwr.server.Server.evaluate_round` instead " -"([#717](https://github.com/adap/flower/pull/717))" +"A new node config feature allows you to pass a static configuration to " +"the SuperNode. This configuration is read-only and available to every " +"`ClientApp` running on that SuperNode. A `ClientApp` can access the node " +"config via `Context` (`context.node_config`)." msgstr "" -"Déclassé `flwr.server.Server.evaluate`, utiliser " -"`flwr.server.Server.evaluate_round` à la place " -"([#717](https://github.com/adap/flower/pull/717))" - -#: ../../source/ref-changelog.md:937 -msgid "v0.15.0 (2021-03-12)" -msgstr "v0.15.0 (2021-03-12)" -#: ../../source/ref-changelog.md:941 +#: ../../source/ref-changelog.md:179 msgid "" -"**Server-side parameter initialization** " -"([#658](https://github.com/adap/flower/pull/658))" +"**Introduce SuperExec (experimental)** " +"([#3605](https://github.com/adap/flower/pull/3605), " +"[#3723](https://github.com/adap/flower/pull/3723), " +"[#3731](https://github.com/adap/flower/pull/3731), " +"[#3589](https://github.com/adap/flower/pull/3589), " +"[#3604](https://github.com/adap/flower/pull/3604), " +"[#3622](https://github.com/adap/flower/pull/3622), " +"[#3838](https://github.com/adap/flower/pull/3838), " +"[#3720](https://github.com/adap/flower/pull/3720), " +"[#3606](https://github.com/adap/flower/pull/3606), " +"[#3602](https://github.com/adap/flower/pull/3602), " +"[#3603](https://github.com/adap/flower/pull/3603), " +"[#3555](https://github.com/adap/flower/pull/3555), " +"[#3808](https://github.com/adap/flower/pull/3808), " +"[#3724](https://github.com/adap/flower/pull/3724), " +"[#3658](https://github.com/adap/flower/pull/3658), " +"[#3629](https://github.com/adap/flower/pull/3629))" msgstr "" -"**Initialisation des paramètres côté serveur** " -"([#658](https://github.com/adap/flower/pull/658))" -#: ../../source/ref-changelog.md:943 +#: ../../source/ref-changelog.md:181 msgid "" -"Model parameters can now be initialized on the server-side. Server-side " -"parameter initialization works via a new `Strategy` method called " -"`initialize_parameters`." +"This is the first experimental release of Flower SuperExec, a new service" +" that executes your runs. It's not ready for production deployment just " +"yet, but don't hesitate to give it a try if you're interested." msgstr "" -"Les paramètres du modèle peuvent maintenant être initialisés côté " -"serveur. L'initialisation des paramètres côté serveur fonctionne via une " -"nouvelle méthode `Strategy` appelée `initialize_parameters`." -#: ../../source/ref-changelog.md:945 +#: ../../source/ref-changelog.md:183 +#, fuzzy msgid "" -"Built-in strategies support a new constructor argument called " -"`initial_parameters` to set the initial parameters. Built-in strategies " -"will provide these initial parameters to the server on startup and then " -"delete them to free the memory afterwards." +"**Add new federated learning with tabular data example** " +"([#3568](https://github.com/adap/flower/pull/3568))" msgstr "" -"Les stratégies intégrées prennent en charge un nouvel argument du " -"constructeur appelé `initial_parameters` pour définir les paramètres " -"initiaux. Les stratégies intégrées fourniront ces paramètres initiaux au " -"serveur au démarrage et les supprimeront ensuite pour libérer la mémoire." +"**Ajouter un nouvel exemple d'apprentissage fédéré utilisant fastai et " +"Flower** ([#1598](https://github.com/adap/flower/pull/1598))" -#: ../../source/ref-changelog.md:964 +#: ../../source/ref-changelog.md:185 msgid "" -"If no initial parameters are provided to the strategy, the server will " -"continue to use the current behaviour (namely, it will ask one of the " -"connected clients for its parameters and use these as the initial global " -"parameters)." +"A new code example exemplifies a federated learning setup using the " +"Flower framework on the Adult Census Income tabular dataset." msgstr "" -"Si aucun paramètre initial n'est fourni à la stratégie, le serveur " -"continuera à utiliser le comportement actuel (à savoir qu'il demandera à " -"l'un des clients connectés ses paramètres et les utilisera comme " -"paramètres globaux initiaux)." - -#: ../../source/ref-changelog.md:966 -msgid "Deprecations" -msgstr "Dépréciations" -#: ../../source/ref-changelog.md:968 +#: ../../source/ref-changelog.md:187 +#, fuzzy msgid "" -"Deprecate `flwr.server.strategy.DefaultStrategy` (migrate to " -"`flwr.server.strategy.FedAvg`, which is equivalent)" +"**Create generic adapter layer (preview)** " +"([#3538](https://github.com/adap/flower/pull/3538), " +"[#3536](https://github.com/adap/flower/pull/3536), " +"[#3540](https://github.com/adap/flower/pull/3540))" msgstr "" -"Déclasser `flwr.server.strategy.DefaultStrategy` (migrer vers " -"`flwr.server.strategy.FedAvg`, qui est équivalent)" - -#: ../../source/ref-changelog.md:970 -msgid "v0.14.0 (2021-02-18)" -msgstr "v0.14.0 (2021-02-18)" +"**Mettre à jour les exemples de code** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/ref-changelog.md:974 +#: ../../source/ref-changelog.md:189 msgid "" -"**Generalized** `Client.fit` **and** `Client.evaluate` **return values** " -"([#610](https://github.com/adap/flower/pull/610) " -"[#572](https://github.com/adap/flower/pull/572) " -"[#633](https://github.com/adap/flower/pull/633))" +"A new generic gRPC adapter layer allows 3rd-party frameworks to integrate" +" with Flower in a transparent way. This makes Flower more modular and " +"allows for integration into other federated learning solutions and " +"platforms." msgstr "" -"**Généralisé** `Client.fit` **et** `Client.evaluate` **valeurs de " -"retour** ([#610](https://github.com/adap/flower/pull/610) " -"[#572](https://github.com/adap/flower/pull/572) " -"[#633](https://github.com/adap/flower/pull/633))" -#: ../../source/ref-changelog.md:976 +#: ../../source/ref-changelog.md:191 +#, fuzzy msgid "" -"Clients can now return an additional dictionary mapping `str` keys to " -"values of the following types: `bool`, `bytes`, `float`, `int`, `str`. " -"This means one can return almost arbitrary values from `fit`/`evaluate` " -"and make use of them on the server side!" +"**Refactor Flower Simulation Engine** " +"([#3581](https://github.com/adap/flower/pull/3581), " +"[#3471](https://github.com/adap/flower/pull/3471), " +"[#3804](https://github.com/adap/flower/pull/3804), " +"[#3468](https://github.com/adap/flower/pull/3468), " +"[#3839](https://github.com/adap/flower/pull/3839), " +"[#3806](https://github.com/adap/flower/pull/3806), " +"[#3861](https://github.com/adap/flower/pull/3861), " +"[#3543](https://github.com/adap/flower/pull/3543), " +"[#3472](https://github.com/adap/flower/pull/3472), " +"[#3829](https://github.com/adap/flower/pull/3829), " +"[#3469](https://github.com/adap/flower/pull/3469))" msgstr "" -"Les clients peuvent maintenant renvoyer un dictionnaire supplémentaire " -"associant les clés `str` aux valeurs des types suivants : `bool`, " -"`bytes`, `float`, `int`, `str`. Cela signifie que l'on peut renvoyer des " -"valeurs presque arbitraires de `fit`/`evaluate` et les utiliser du côté " -"du serveur !" +"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" -#: ../../source/ref-changelog.md:978 +#: ../../source/ref-changelog.md:193 msgid "" -"This improvement also allowed for more consistent return types between " -"`fit` and `evaluate`: `evaluate` should now return a tuple `(float, int, " -"dict)` representing the loss, number of examples, and a dictionary " -"holding arbitrary problem-specific values like accuracy." +"The Simulation Engine was significantly refactored. This results in " +"faster and more stable simulations. It is also the foundation for " +"upcoming changes that aim to provide the next level of performance and " +"configurability in federated learning simulations." msgstr "" -"Cette amélioration a également permis de rendre plus cohérents les types " -"de retour entre `fit` et `evaluate` : `evaluate` devrait maintenant " -"retourner un tuple `(float, int, dict)` représentant la perte, le nombre " -"d'exemples, et un dictionnaire contenant des valeurs arbitraires " -"spécifiques au problème comme la précision." -#: ../../source/ref-changelog.md:980 +#: ../../source/ref-changelog.md:195 +#, fuzzy msgid "" -"In case you wondered: this feature is compatible with existing projects, " -"the additional dictionary return value is optional. New code should " -"however migrate to the new return types to be compatible with upcoming " -"Flower releases (`fit`: `List[np.ndarray], int, Dict[str, Scalar]`, " -"`evaluate`: `float, int, Dict[str, Scalar]`). See the example below for " -"details." +"**Optimize Docker containers** " +"([#3591](https://github.com/adap/flower/pull/3591))" msgstr "" -"Au cas où tu te poserais la question : cette fonctionnalité est " -"compatible avec les projets existants, la valeur de retour supplémentaire" -" du dictionnaire est facultative. Le nouveau code doit cependant migrer " -"vers les nouveaux types de retour pour être compatible avec les " -"prochaines versions de Flower (`fit` : `List[np.ndarray], int, Dict[str, " -"Scalar]`, `evaluate` : `float, int, Dict[str, Scalar]`). Voir l'exemple " -"ci-dessous pour plus de détails." +"Nouveau thème de documentation " +"([#551](https://github.com/adap/flower/pull/551))" -#: ../../source/ref-changelog.md:982 +#: ../../source/ref-changelog.md:197 msgid "" -"*Code example:* note the additional dictionary return values in both " -"`FlwrClient.fit` and `FlwrClient.evaluate`:" +"Flower Docker containers were optimized and updated to use that latest " +"Flower framework features." msgstr "" -"*Exemple de code:* note les valeurs de retour du dictionnaire " -"supplémentaires dans `FlwrClient.fit` et `FlwrClient.evaluate` :" -#: ../../source/ref-changelog.md:997 +#: ../../source/ref-changelog.md:199 +#, fuzzy msgid "" -"**Generalized** `config` **argument in** `Client.fit` **and** " -"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" +"**Improve logging** ([#3776](https://github.com/adap/flower/pull/3776), " +"[#3789](https://github.com/adap/flower/pull/3789))" msgstr "" -"**Généralisé** `config` **argument dans** `Client.fit` **et** " -"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" +"**Exemple de code mis à jour** " +"([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/ref-changelog.md:999 +#: ../../source/ref-changelog.md:201 msgid "" -"The `config` argument used to be of type `Dict[str, str]`, which means " -"that dictionary values were expected to be strings. The new release " -"generalizes this to enable values of the following types: `bool`, " -"`bytes`, `float`, `int`, `str`." +"Improved logging aims to be more concise and helpful to show you the " +"details you actually care about." msgstr "" -"L'argument `config` était auparavant de type `Dict[str, str]`, ce qui " -"signifie que les valeurs du dictionnaire devaient être des chaînes. La " -"nouvelle version généralise cela pour permettre les valeurs des types " -"suivants : `bool`, `bytes`, `float`, `int`, `str`." -#: ../../source/ref-changelog.md:1001 +#: ../../source/ref-changelog.md:203 +#, fuzzy msgid "" -"This means one can now pass almost arbitrary values to `fit`/`evaluate` " -"using the `config` dictionary. Yay, no more `str(epochs)` on the server-" -"side and `int(config[\"epochs\"])` on the client side!" +"**Refactor framework internals** " +"([#3621](https://github.com/adap/flower/pull/3621), " +"[#3792](https://github.com/adap/flower/pull/3792), " +"[#3772](https://github.com/adap/flower/pull/3772), " +"[#3805](https://github.com/adap/flower/pull/3805), " +"[#3583](https://github.com/adap/flower/pull/3583), " +"[#3825](https://github.com/adap/flower/pull/3825), " +"[#3597](https://github.com/adap/flower/pull/3597), " +"[#3802](https://github.com/adap/flower/pull/3802), " +"[#3569](https://github.com/adap/flower/pull/3569))" msgstr "" -"Cela signifie que l'on peut maintenant passer des valeurs presque " -"arbitraires à `fit`/`evaluate` en utilisant le dictionnaire `config`. " -"Yay, plus de `str(epochs)` du côté serveur et `int(config[\"epochs\"])` " -"du côté client !" +"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" + +#: ../../source/ref-changelog.md:207 +#, fuzzy +msgid "Documentation improvements" +msgstr "Améliorations facultatives" -#: ../../source/ref-changelog.md:1003 +#: ../../source/ref-changelog.md:209 +#, fuzzy msgid "" -"*Code example:* note that the `config` dictionary now contains non-`str` " -"values in both `Client.fit` and `Client.evaluate`:" +"**Add 🇰🇷 Korean translations** " +"([#3680](https://github.com/adap/flower/pull/3680))" msgstr "" -"*Exemple de code:* Notez que le dictionnaire `config` contient maintenant" -" des valeurs autres que `str` dans `Client.fit` et `Client.evaluate` :" - -#: ../../source/ref-changelog.md:1020 -msgid "v0.13.0 (2021-01-08)" -msgstr "v0.13.0 (2021-01-08)" +"**Ouvrir dans le bouton Colab** " +"([#1389](https://github.com/adap/flower/pull/1389))" -#: ../../source/ref-changelog.md:1024 +#: ../../source/ref-changelog.md:211 +#, fuzzy msgid "" -"New example: PyTorch From Centralized To Federated " -"([#549](https://github.com/adap/flower/pull/549))" +"**Update translations** " +"([#3586](https://github.com/adap/flower/pull/3586), " +"[#3679](https://github.com/adap/flower/pull/3679), " +"[#3570](https://github.com/adap/flower/pull/3570), " +"[#3681](https://github.com/adap/flower/pull/3681), " +"[#3617](https://github.com/adap/flower/pull/3617), " +"[#3674](https://github.com/adap/flower/pull/3674), " +"[#3671](https://github.com/adap/flower/pull/3671), " +"[#3572](https://github.com/adap/flower/pull/3572), " +"[#3631](https://github.com/adap/flower/pull/3631))" msgstr "" -"Nouvel exemple : PyTorch de centralisé à fédéré " -"([#549](https://github.com/adap/flower/pull/549))" +"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" -#: ../../source/ref-changelog.md:1025 -msgid "Improved documentation" -msgstr "Amélioration de la documentation" +#: ../../source/ref-changelog.md:213 +#, fuzzy +msgid "" +"**Update documentation** " +"([#3864](https://github.com/adap/flower/pull/3864), " +"[#3688](https://github.com/adap/flower/pull/3688), " +"[#3562](https://github.com/adap/flower/pull/3562), " +"[#3641](https://github.com/adap/flower/pull/3641), " +"[#3384](https://github.com/adap/flower/pull/3384), " +"[#3634](https://github.com/adap/flower/pull/3634), " +"[#3823](https://github.com/adap/flower/pull/3823), " +"[#3793](https://github.com/adap/flower/pull/3793), " +"[#3707](https://github.com/adap/flower/pull/3707))" +msgstr "" +"**Mise à jour de la documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:1026 -msgid "New documentation theme ([#551](https://github.com/adap/flower/pull/551))" +#: ../../source/ref-changelog.md:215 +msgid "" +"Updated documentation includes new install instructions for different " +"shells, a new Flower Code Examples documentation landing page, new `flwr`" +" CLI docs and an updated federated XGBoost code example." msgstr "" -"Nouveau thème de documentation " -"([#551](https://github.com/adap/flower/pull/551))" -#: ../../source/ref-changelog.md:1027 -msgid "New API reference ([#554](https://github.com/adap/flower/pull/554))" -msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" +#: ../../source/ref-changelog.md:219 +msgid "**Deprecate** `client_fn(cid: str)`" +msgstr "" -#: ../../source/ref-changelog.md:1028 +#: ../../source/ref-changelog.md:221 msgid "" -"Updated examples documentation " -"([#549](https://github.com/adap/flower/pull/549))" +"`client_fn` used to have a signature `client_fn(cid: str) -> Client`. " +"This signature is now deprecated. Use the new signature " +"`client_fn(context: Context) -> Client` instead. The new argument " +"`context` allows accessing `node_id`, `node_config`, `run_config` and " +"other `Context` features. When running using the simulation engine (or " +"using `flower-supernode` with a custom `--node-config partition-id=...`)," +" `context.node_config[\"partition-id\"]` will return an `int` partition " +"ID that can be used with Flower Datasets to load a different partition of" +" the dataset on each simulated or deployed SuperNode." msgstr "" -"Mise à jour de la documentation des exemples " -"([#549](https://github.com/adap/flower/pull/549))" -#: ../../source/ref-changelog.md:1029 +#: ../../source/ref-changelog.md:223 msgid "" -"Removed obsolete documentation " -"([#548](https://github.com/adap/flower/pull/548))" +"**Deprecate passing** `Server/ServerConfig/Strategy/ClientManager` **to**" +" `ServerApp` **directly**" msgstr "" -"Suppression de la documentation obsolète " -"([#548](https://github.com/adap/flower/pull/548))" - -#: ../../source/ref-changelog.md:1031 -msgid "Bugfix:" -msgstr "Correction de bogues :" -#: ../../source/ref-changelog.md:1033 +#: ../../source/ref-changelog.md:225 msgid "" -"`Server.fit` does not disconnect clients when finished, disconnecting the" -" clients is now handled in `flwr.server.start_server` " -"([#553](https://github.com/adap/flower/pull/553) " -"[#540](https://github.com/adap/flower/issues/540))." +"Creating `ServerApp` using `ServerApp(config=config, strategy=strategy)` " +"is now deprecated. Instead of passing " +"`Server/ServerConfig/Strategy/ClientManager` to `ServerApp` directly, " +"pass them wrapped in a `server_fn(context: Context) -> " +"ServerAppComponents` function, like this: " +"`ServerApp(server_fn=server_fn)`. `ServerAppComponents` can hold " +"references to `Server/ServerConfig/Strategy/ClientManager`. In addition " +"to that, `server_fn` allows you to access `Context` (for example, to read" +" the `run_config`)." msgstr "" -"`Server.fit` ne déconnecte pas les clients lorsqu'il est terminé, la " -"déconnexion des clients est maintenant gérée dans " -"`flwr.server.start_server` " -"([#553](https://github.com/adap/flower/pull/553) " -"[#540](https://github.com/adap/flower/issues/540))." - -#: ../../source/ref-changelog.md:1035 -msgid "v0.12.0 (2020-12-07)" -msgstr "v0.12.0 (2020-12-07)" -#: ../../source/ref-changelog.md:1037 ../../source/ref-changelog.md:1053 -msgid "Important changes:" -msgstr "Changements importants :" +#: ../../source/ref-changelog.md:229 +#, fuzzy +msgid "" +"**Remove support for `client_ids` in `start_simulation`** " +"([#3699](https://github.com/adap/flower/pull/3699))" +msgstr "" +"**Améliorer la prise en charge des GPU dans les simulations** " +"([#1555](https://github.com/adap/flower/pull/1555))" -#: ../../source/ref-changelog.md:1039 +#: ../../source/ref-changelog.md:231 msgid "" -"Added an example for embedded devices " -"([#507](https://github.com/adap/flower/pull/507))" +"The (rarely used) feature that allowed passing custom `client_ids` to the" +" `start_simulation` function was removed. This removal is part of a " +"bigger effort to refactor the simulation engine and unify how the Flower " +"internals work in simulation and deployment." msgstr "" -"Ajout d'un exemple pour les périphériques embarqués " -"([#507](https://github.com/adap/flower/pull/507))" -#: ../../source/ref-changelog.md:1040 +#: ../../source/ref-changelog.md:233 +#, fuzzy msgid "" -"Added a new NumPyClient (in addition to the existing KerasClient) " -"([#504](https://github.com/adap/flower/pull/504) " -"[#508](https://github.com/adap/flower/pull/508))" +"**Remove `flower-driver-api` and `flower-fleet-api`** " +"([#3418](https://github.com/adap/flower/pull/3418))" msgstr "" -"Ajout d'un nouveau NumPyClient (en plus du KerasClient existant) " -"([#504](https://github.com/adap/flower/pull/504) " -"[#508](https://github.com/adap/flower/pull/508))" +"**Supprimez KerasClient** " +"([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/ref-changelog.md:1041 +#: ../../source/ref-changelog.md:235 msgid "" -"Deprecated `flwr_example` package and started to migrate examples into " -"the top-level `examples` directory " -"([#494](https://github.com/adap/flower/pull/494) " -"[#512](https://github.com/adap/flower/pull/512))" +"The two deprecated CLI commands `flower-driver-api` and `flower-fleet-" +"api` were removed in an effort to streamline the SuperLink developer " +"experience. Use `flower-superlink` instead." msgstr "" -"Déclassement du paquet `flwr_example` et migration des exemples dans le " -"répertoire de premier niveau `examples` " -"([#494](https://github.com/adap/flower/pull/494) " -"[#512](https://github.com/adap/flower/pull/512))" -#: ../../source/ref-changelog.md:1043 -msgid "v0.11.0 (2020-11-30)" -msgstr "v0.11.0 (2020-11-30)" +#: ../../source/ref-changelog.md:237 +#, fuzzy +msgid "v1.9.0 (2024-06-10)" +msgstr "v1.3.0 (2023-02-06)" -#: ../../source/ref-changelog.md:1045 -msgid "Incompatible changes:" -msgstr "Changements incompatibles :" - -#: ../../source/ref-changelog.md:1047 +#: ../../source/ref-changelog.md:243 msgid "" -"Renamed strategy methods " -"([#486](https://github.com/adap/flower/pull/486)) to unify the naming of " -"Flower's public APIs. Other public methods/functions (e.g., every method " -"in `Client`, but also `Strategy.evaluate`) do not use the `on_` prefix, " -"which is why we're removing it from the four methods in Strategy. To " -"migrate rename the following `Strategy` methods accordingly:" +"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " +"Beutel`, `Daniel Nata Nugraha`, `Heng Pan`, `Javier`, `Mahdi Beitollahi`," +" `Robert Steiner`, `Taner Topal`, `Yan Gao`, `bapic`, `mohammadnaseri` " msgstr "" -"Renommé les méthodes de stratégie " -"([#486](https://github.com/adap/flower/pull/486)) pour unifier le nommage" -" des API publiques de Flower. D'autres méthodes/fonctions publiques (par " -"exemple, toutes les méthodes de `Client`, mais aussi `Strategy.evaluate`)" -" n'utilisent pas le préfixe `on_`, c'est pourquoi nous le supprimons des " -"quatre méthodes de Stratégie. Pour migrer, renommez les méthodes de " -"`Strategy` suivantes en conséquence :" - -#: ../../source/ref-changelog.md:1048 -msgid "`on_configure_evaluate` => `configure_evaluate`" -msgstr "`on_configure_evaluate` => `configure_evaluate`" - -#: ../../source/ref-changelog.md:1049 -msgid "`on_aggregate_evaluate` => `aggregate_evaluate`" -msgstr "`on_aggregate_evaluate` => `aggregate_evaluate`" - -#: ../../source/ref-changelog.md:1050 -msgid "`on_configure_fit` => `configure_fit`" -msgstr "`on_configure_fit` => `configure_fit`" - -#: ../../source/ref-changelog.md:1051 -msgid "`on_aggregate_fit` => `aggregate_fit`" -msgstr "`on_aggregate_fit` => `aggregate_fit`" -#: ../../source/ref-changelog.md:1055 +#: ../../source/ref-changelog.md:247 +#, fuzzy msgid "" -"Deprecated `DefaultStrategy` " -"([#479](https://github.com/adap/flower/pull/479)). To migrate use " -"`FedAvg` instead." +"**Introduce built-in authentication (preview)** " +"([#2946](https://github.com/adap/flower/pull/2946), " +"[#3388](https://github.com/adap/flower/pull/3388), " +"[#2948](https://github.com/adap/flower/pull/2948), " +"[#2917](https://github.com/adap/flower/pull/2917), " +"[#3386](https://github.com/adap/flower/pull/3386), " +"[#3308](https://github.com/adap/flower/pull/3308), " +"[#3001](https://github.com/adap/flower/pull/3001), " +"[#3409](https://github.com/adap/flower/pull/3409), " +"[#2999](https://github.com/adap/flower/pull/2999), " +"[#2979](https://github.com/adap/flower/pull/2979), " +"[#3389](https://github.com/adap/flower/pull/3389), " +"[#3503](https://github.com/adap/flower/pull/3503), " +"[#3366](https://github.com/adap/flower/pull/3366), " +"[#3357](https://github.com/adap/flower/pull/3357))" msgstr "" -"Déclassé `DefaultStrategy` " -"([#479](https://github.com/adap/flower/pull/479)). Pour migrer, utilisez " -"`FedAvg` à la place." +"**Documentation mise à jour** " +"([#1494](https://github.com/adap/flower/pull/1494), " +"[#1496](https://github.com/adap/flower/pull/1496), " +"[#1500](https://github.com/adap/flower/pull/1500), " +"[#1503](https://github.com/adap/flower/pull/1503), " +"[#1505](https://github.com/adap/flower/pull/1505), " +"[#1524](https://github.com/adap/flower/pull/1524), " +"[#1518](https://github.com/adap/flower/pull/1518), " +"[#1519](https://github.com/adap/flower/pull/1519), " +"[#1515](https://github.com/adap/flower/pull/1515))" -#: ../../source/ref-changelog.md:1056 +#: ../../source/ref-changelog.md:249 msgid "" -"Simplified examples and baselines " -"([#484](https://github.com/adap/flower/pull/484))." +"Flower 1.9 introduces the first build-in version of client node " +"authentication. In previous releases, users often wrote glue code to " +"connect Flower to external authentication systems. With this release, the" +" SuperLink can authenticate SuperNodes using a built-in authentication " +"system. A new [how-to guide](https://flower.ai/docs/framework/how-to-" +"authenticate-supernodes.html) and a new [code " +"example](https://github.com/adap/flower/tree/main/examples/flower-" +"authentication) help you to get started." msgstr "" -"Exemples simplifiés et lignes de base " -"([#484](https://github.com/adap/flower/pull/484))." -#: ../../source/ref-changelog.md:1057 +#: ../../source/ref-changelog.md:251 msgid "" -"Removed presently unused `on_conclude_round` from strategy interface " -"([#483](https://github.com/adap/flower/pull/483))." +"This is the first preview release of the Flower-native authentication " +"system. Many additional features are on the roadmap for upcoming Flower " +"releases - stay tuned." msgstr "" -"Suppression de `on_conclude_round` actuellement inutilisé de l'interface " -"de stratégie ([#483](https://github.com/adap/flower/pull/483))." -#: ../../source/ref-changelog.md:1058 +#: ../../source/ref-changelog.md:253 +#, fuzzy msgid "" -"Set minimal Python version to 3.6.1 instead of 3.6.9 " -"([#471](https://github.com/adap/flower/pull/471))." +"**Introduce end-to-end Docker support** " +"([#3483](https://github.com/adap/flower/pull/3483), " +"[#3266](https://github.com/adap/flower/pull/3266), " +"[#3390](https://github.com/adap/flower/pull/3390), " +"[#3283](https://github.com/adap/flower/pull/3283), " +"[#3285](https://github.com/adap/flower/pull/3285), " +"[#3391](https://github.com/adap/flower/pull/3391), " +"[#3403](https://github.com/adap/flower/pull/3403), " +"[#3458](https://github.com/adap/flower/pull/3458), " +"[#3533](https://github.com/adap/flower/pull/3533), " +"[#3453](https://github.com/adap/flower/pull/3453), " +"[#3486](https://github.com/adap/flower/pull/3486), " +"[#3290](https://github.com/adap/flower/pull/3290))" msgstr "" -"Fixe la version minimale de Python à 3.6.1 au lieu de 3.6.9 " -"([#471](https://github.com/adap/flower/pull/471))." +"**Introduire l'API REST (expérimentale)** " +"([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" -#: ../../source/ref-changelog.md:1059 +#: ../../source/ref-changelog.md:255 msgid "" -"Improved `Strategy` docstrings " -"([#470](https://github.com/adap/flower/pull/470))." +"Full Flower Next Docker support is here! With the release of Flower 1.9, " +"Flower provides stable Docker images for the Flower SuperLink, the Flower" +" SuperNode, and the Flower `ServerApp`. This set of images enables you to" +" run all Flower components in Docker. Check out the new [how-to " +"guide](https://flower.ai/docs/framework/how-to-run-flower-using-" +"docker.html) to get stated." msgstr "" -"Amélioration des docstrings `Stratégie` " -"([#470](https://github.com/adap/flower/pull/470))." -#: ../../source/ref-example-projects.rst:2 +#: ../../source/ref-changelog.md:257 #, fuzzy -msgid "Example projects" -msgstr "Exemples de PyTorch" +msgid "" +"**Re-architect Flower Next simulation engine** " +"([#3307](https://github.com/adap/flower/pull/3307), " +"[#3355](https://github.com/adap/flower/pull/3355), " +"[#3272](https://github.com/adap/flower/pull/3272), " +"[#3273](https://github.com/adap/flower/pull/3273), " +"[#3417](https://github.com/adap/flower/pull/3417), " +"[#3281](https://github.com/adap/flower/pull/3281), " +"[#3343](https://github.com/adap/flower/pull/3343), " +"[#3326](https://github.com/adap/flower/pull/3326))" +msgstr "" +"**Mise à jour de la documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-example-projects.rst:4 +#: ../../source/ref-changelog.md:259 msgid "" -"Flower comes with a number of usage examples. The examples demonstrate " -"how Flower can be used to federate different kinds of existing machine " -"learning pipelines, usually leveraging popular machine learning " -"frameworks such as `PyTorch `_ or `TensorFlow " -"`_." +"Flower Next simulations now use a new in-memory `Driver` that improves " +"the reliability of simulations, especially in notebook environments. This" +" is a significant step towards a complete overhaul of the Flower Next " +"simulation architecture." msgstr "" -"Flower est livré avec un certain nombre d'exemples d'utilisation, qui " -"montrent comment Flower peut être utilisé pour fédérer différents types " -"de pipelines d'apprentissage automatique existants, qui s'appuient " -"généralement sur des frameworks d'apprentissage automatique populaires " -"tels que `PyTorch `_ ou `TensorFlow " -"`_." -#: ../../source/ref-example-projects.rst:10 +#: ../../source/ref-changelog.md:261 #, fuzzy msgid "" -"The following examples are available as standalone projects. Quickstart " -"TensorFlow/Keras ---------------------------" -msgstr "Les exemples suivants sont disponibles sous forme de projets autonomes." +"**Upgrade simulation engine** " +"([#3354](https://github.com/adap/flower/pull/3354), " +"[#3378](https://github.com/adap/flower/pull/3378), " +"[#3262](https://github.com/adap/flower/pull/3262), " +"[#3435](https://github.com/adap/flower/pull/3435), " +"[#3501](https://github.com/adap/flower/pull/3501), " +"[#3482](https://github.com/adap/flower/pull/3482), " +"[#3494](https://github.com/adap/flower/pull/3494))" +msgstr "" +"**Mise à jour de la documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-example-projects.rst:14 +#: ../../source/ref-changelog.md:263 msgid "" -"The TensorFlow/Keras quickstart example shows CIFAR-10 image " -"classification with MobileNetV2:" +"The Flower Next simulation engine comes with improved and configurable " +"logging. The Ray-based simulation backend in Flower 1.9 was updated to " +"use Ray 2.10." msgstr "" -"L'exemple de démarrage rapide TensorFlow/Keras montre la classification " -"d'images CIFAR-10 avec MobileNetV2 :" -#: ../../source/ref-example-projects.rst:17 +#: ../../source/ref-changelog.md:265 #, fuzzy msgid "" -"`Quickstart TensorFlow (Code) " -"`_" +"**Introduce FedPFT baseline** " +"([#3268](https://github.com/adap/flower/pull/3268))" msgstr "" -"`Quickstart TensorFlow (Code) " -"`_" +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-example-projects.rst:18 -#, fuzzy -msgid ":doc:`Quickstart TensorFlow (Tutorial) `" +#: ../../source/ref-changelog.md:267 +msgid "" +"FedPFT allows you to perform one-shot Federated Learning by leveraging " +"widely available foundational models, dramatically reducing communication" +" costs while delivering high performing models. This is work led by Mahdi" +" Beitollahi from Huawei Noah's Ark Lab (Montreal, Canada). Read all the " +"details in their paper: \"Parametric Feature Transfer: One-shot Federated" +" Learning with Foundation Models\" " +"([arxiv](https://arxiv.org/abs/2402.01862))" msgstr "" -"`Quickstart TensorFlow (Tutorial) `_" -#: ../../source/ref-example-projects.rst:19 +#: ../../source/ref-changelog.md:269 +#, fuzzy msgid "" -"`Quickstart TensorFlow (Blog Post) `_" +"**Launch additional** `flwr new` **templates for Apple MLX, Hugging Face " +"Transformers, scikit-learn and TensorFlow** " +"([#3291](https://github.com/adap/flower/pull/3291), " +"[#3139](https://github.com/adap/flower/pull/3139), " +"[#3284](https://github.com/adap/flower/pull/3284), " +"[#3251](https://github.com/adap/flower/pull/3251), " +"[#3376](https://github.com/adap/flower/pull/3376), " +"[#3287](https://github.com/adap/flower/pull/3287))" msgstr "" -"`Quickstart TensorFlow (Blog Post) `_" - -#: ../../source/ref-example-projects.rst:23 -#: ../../source/tutorial-quickstart-pytorch.rst:5 -msgid "Quickstart PyTorch" -msgstr "Démarrage rapide de PyTorch" +"**Nouvel exemple de code MLCube** " +"([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" -#: ../../source/ref-example-projects.rst:25 +#: ../../source/ref-changelog.md:271 msgid "" -"The PyTorch quickstart example shows CIFAR-10 image classification with a" -" simple Convolutional Neural Network:" +"The `flwr` CLI's `flwr new` command is starting to become everone's " +"favorite way of creating new Flower projects. This release introduces " +"additional `flwr new` templates for Apple MLX, Hugging Face Transformers," +" scikit-learn and TensorFlow. In addition to that, existing templates " +"also received updates." msgstr "" -"L'exemple de démarrage rapide PyTorch montre la classification d'images " -"CIFAR-10 avec un simple réseau neuronal convolutif :" -#: ../../source/ref-example-projects.rst:28 +#: ../../source/ref-changelog.md:273 #, fuzzy msgid "" -"`Quickstart PyTorch (Code) " -"`_" +"**Refine** `RecordSet` **API** " +"([#3209](https://github.com/adap/flower/pull/3209), " +"[#3331](https://github.com/adap/flower/pull/3331), " +"[#3334](https://github.com/adap/flower/pull/3334), " +"[#3335](https://github.com/adap/flower/pull/3335), " +"[#3375](https://github.com/adap/flower/pull/3375), " +"[#3368](https://github.com/adap/flower/pull/3368))" msgstr "" -"`Quickstart PyTorch (Code) " -"`_" +"**Mise à jour de la documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-example-projects.rst:29 -#, fuzzy -msgid ":doc:`Quickstart PyTorch (Tutorial) `" +#: ../../source/ref-changelog.md:275 +msgid "" +"`RecordSet` is part of the Flower Next low-level API preview release. In " +"Flower 1.9, `RecordSet` received a number of usability improvements that " +"make it easier to build `RecordSet`-based `ServerApp`s and `ClientApp`s." msgstr "" -"`Quickstart PyTorch (Tutorial) `_" - -#: ../../source/ref-example-projects.rst:33 -msgid "PyTorch: From Centralized To Federated" -msgstr "PyTorch : De la centralisation à la fédération" -#: ../../source/ref-example-projects.rst:35 +#: ../../source/ref-changelog.md:277 +#, fuzzy msgid "" -"This example shows how a regular PyTorch project can be federated using " -"Flower:" +"**Beautify logging** ([#3379](https://github.com/adap/flower/pull/3379), " +"[#3430](https://github.com/adap/flower/pull/3430), " +"[#3461](https://github.com/adap/flower/pull/3461), " +"[#3360](https://github.com/adap/flower/pull/3360), " +"[#3433](https://github.com/adap/flower/pull/3433))" msgstr "" -"Cet exemple montre comment un projet PyTorch ordinaire peut être fédéré à" -" l'aide de Flower :" +"Mettre à jour les outils de développement " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/ref-example-projects.rst:37 -#, fuzzy +#: ../../source/ref-changelog.md:279 msgid "" -"`PyTorch: From Centralized To Federated (Code) " -"`_" +"Logs received a substantial update. Not only are logs now much nicer to " +"look at, but they are also more configurable." msgstr "" -"`PyTorch : De la centralisation à la fédération (Code) " -"`_" -#: ../../source/ref-example-projects.rst:38 +#: ../../source/ref-changelog.md:281 #, fuzzy msgid "" -":doc:`PyTorch: From Centralized To Federated (Tutorial) `" +"**Improve reliability** " +"([#3564](https://github.com/adap/flower/pull/3564), " +"[#3561](https://github.com/adap/flower/pull/3561), " +"[#3566](https://github.com/adap/flower/pull/3566), " +"[#3462](https://github.com/adap/flower/pull/3462), " +"[#3225](https://github.com/adap/flower/pull/3225), " +"[#3514](https://github.com/adap/flower/pull/3514), " +"[#3535](https://github.com/adap/flower/pull/3535), " +"[#3372](https://github.com/adap/flower/pull/3372))" msgstr "" -"`PyTorch : De la centralisation à la fédération (Tutoriel) " -"`_" - -#: ../../source/ref-example-projects.rst:42 -msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" -msgstr "Apprentissage fédéré sur Raspberry Pi et Nvidia Jetson" +"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" -#: ../../source/ref-example-projects.rst:44 +#: ../../source/ref-changelog.md:283 msgid "" -"This example shows how Flower can be used to build a federated learning " -"system that run across Raspberry Pi and Nvidia Jetson:" +"Flower 1.9 includes reliability improvements across many parts of the " +"system. One example is a much improved SuperNode shutdown procedure." msgstr "" -"Cet exemple montre comment Flower peut être utilisé pour construire un " -"système d'apprentissage fédéré qui fonctionne sur Raspberry Pi et Nvidia " -"Jetson :" -#: ../../source/ref-example-projects.rst:46 +#: ../../source/ref-changelog.md:285 #, fuzzy msgid "" -"`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " -"`_" +"**Update Swift and C++ SDKs** " +"([#3321](https://github.com/adap/flower/pull/3321), " +"[#2763](https://github.com/adap/flower/pull/2763))" msgstr "" -"`L'apprentissage fédéré sur Raspberry Pi et Nvidia Jetson (Code) " -"`_" +"**Exemple de code mis à jour** " +"([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/ref-example-projects.rst:47 +#: ../../source/ref-changelog.md:287 +msgid "" +"In the C++ SDK, communication-related code is now separate from main " +"client logic. A new abstract class `Communicator` has been introduced " +"alongside a gRPC implementation of it." +msgstr "" + +#: ../../source/ref-changelog.md:289 +msgid "" +"**Improve testing, tooling and CI/CD infrastructure** " +"([#3294](https://github.com/adap/flower/pull/3294), " +"[#3282](https://github.com/adap/flower/pull/3282), " +"[#3311](https://github.com/adap/flower/pull/3311), " +"[#2878](https://github.com/adap/flower/pull/2878), " +"[#3333](https://github.com/adap/flower/pull/3333), " +"[#3255](https://github.com/adap/flower/pull/3255), " +"[#3349](https://github.com/adap/flower/pull/3349), " +"[#3400](https://github.com/adap/flower/pull/3400), " +"[#3401](https://github.com/adap/flower/pull/3401), " +"[#3399](https://github.com/adap/flower/pull/3399), " +"[#3346](https://github.com/adap/flower/pull/3346), " +"[#3398](https://github.com/adap/flower/pull/3398), " +"[#3397](https://github.com/adap/flower/pull/3397), " +"[#3347](https://github.com/adap/flower/pull/3347), " +"[#3502](https://github.com/adap/flower/pull/3502), " +"[#3387](https://github.com/adap/flower/pull/3387), " +"[#3542](https://github.com/adap/flower/pull/3542), " +"[#3396](https://github.com/adap/flower/pull/3396), " +"[#3496](https://github.com/adap/flower/pull/3496), " +"[#3465](https://github.com/adap/flower/pull/3465), " +"[#3473](https://github.com/adap/flower/pull/3473), " +"[#3484](https://github.com/adap/flower/pull/3484), " +"[#3521](https://github.com/adap/flower/pull/3521), " +"[#3363](https://github.com/adap/flower/pull/3363), " +"[#3497](https://github.com/adap/flower/pull/3497), " +"[#3464](https://github.com/adap/flower/pull/3464), " +"[#3495](https://github.com/adap/flower/pull/3495), " +"[#3478](https://github.com/adap/flower/pull/3478), " +"[#3271](https://github.com/adap/flower/pull/3271))" +msgstr "" + +#: ../../source/ref-changelog.md:291 +msgid "" +"As always, the Flower tooling, testing, and CI/CD infrastructure has " +"received many updates." +msgstr "" + +#: ../../source/ref-changelog.md:293 msgid "" -"`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " -"`_" +"**Improve documentation** " +"([#3530](https://github.com/adap/flower/pull/3530), " +"[#3539](https://github.com/adap/flower/pull/3539), " +"[#3425](https://github.com/adap/flower/pull/3425), " +"[#3520](https://github.com/adap/flower/pull/3520), " +"[#3286](https://github.com/adap/flower/pull/3286), " +"[#3516](https://github.com/adap/flower/pull/3516), " +"[#3523](https://github.com/adap/flower/pull/3523), " +"[#3545](https://github.com/adap/flower/pull/3545), " +"[#3498](https://github.com/adap/flower/pull/3498), " +"[#3439](https://github.com/adap/flower/pull/3439), " +"[#3440](https://github.com/adap/flower/pull/3440), " +"[#3382](https://github.com/adap/flower/pull/3382), " +"[#3559](https://github.com/adap/flower/pull/3559), " +"[#3432](https://github.com/adap/flower/pull/3432), " +"[#3278](https://github.com/adap/flower/pull/3278), " +"[#3371](https://github.com/adap/flower/pull/3371), " +"[#3519](https://github.com/adap/flower/pull/3519), " +"[#3267](https://github.com/adap/flower/pull/3267), " +"[#3204](https://github.com/adap/flower/pull/3204), " +"[#3274](https://github.com/adap/flower/pull/3274))" msgstr "" -"`L'apprentissage fédéré sur Raspberry Pi et Nvidia Jetson (Blog Post) " -"`_" -#: ../../source/ref-faq.rst:4 +#: ../../source/ref-changelog.md:295 msgid "" -"This page collects answers to commonly asked questions about Federated " -"Learning with Flower." +"As always, the Flower documentation has received many updates. Notable " +"new pages include:" msgstr "" -"Cette page rassemble les réponses aux questions les plus fréquemment " -"posées sur l'apprentissage fédéré avec Flower." -#: ../../source/ref-faq.rst -#, fuzzy -msgid ":fa:`eye,mr-1` Can Flower run on Jupyter Notebooks / Google Colab?" +#: ../../source/ref-changelog.md:297 +msgid "" +"[How-to upgrate to Flower Next (Flower Next migration " +"guide)](https://flower.ai/docs/framework/how-to-upgrade-to-flower-" +"next.html)" msgstr "" -":fa:`eye,mr-1` Flower peut-il fonctionner sur les ordinateurs portables " -"Juptyter / Google Colab ?" -#: ../../source/ref-faq.rst:8 +#: ../../source/ref-changelog.md:299 msgid "" -"Yes, it can! Flower even comes with a few under-the-hood optimizations to" -" make it work even better on Colab. Here's a quickstart example:" +"[How-to run Flower using Docker](https://flower.ai/docs/framework/how-to-" +"run-flower-using-docker.html)" msgstr "" -"Oui, c'est possible ! Flower est même livré avec quelques optimisations " -"pour qu'il fonctionne encore mieux sur Colab. Voici un exemple de " -"démarrage rapide :" -#: ../../source/ref-faq.rst:10 -#, fuzzy +#: ../../source/ref-changelog.md:301 msgid "" -"`Flower simulation PyTorch " -"`_" +"[Flower Mods reference](https://flower.ai/docs/framework/ref-" +"api/flwr.client.mod.html#module-flwr.client.mod)" msgstr "" -"`Flower Quickstart (TensorFlow/Keras) " -"`_" -#: ../../source/ref-faq.rst:11 +#: ../../source/ref-changelog.md:303 #, fuzzy msgid "" -"`Flower simulation TensorFlow/Keras " -"`_" +"**General updates to Flower Examples** " +"([#3205](https://github.com/adap/flower/pull/3205), " +"[#3226](https://github.com/adap/flower/pull/3226), " +"[#3211](https://github.com/adap/flower/pull/3211), " +"[#3252](https://github.com/adap/flower/pull/3252), " +"[#3427](https://github.com/adap/flower/pull/3427), " +"[#3410](https://github.com/adap/flower/pull/3410), " +"[#3426](https://github.com/adap/flower/pull/3426), " +"[#3228](https://github.com/adap/flower/pull/3228), " +"[#3342](https://github.com/adap/flower/pull/3342), " +"[#3200](https://github.com/adap/flower/pull/3200), " +"[#3202](https://github.com/adap/flower/pull/3202), " +"[#3394](https://github.com/adap/flower/pull/3394), " +"[#3488](https://github.com/adap/flower/pull/3488), " +"[#3329](https://github.com/adap/flower/pull/3329), " +"[#3526](https://github.com/adap/flower/pull/3526), " +"[#3392](https://github.com/adap/flower/pull/3392), " +"[#3474](https://github.com/adap/flower/pull/3474), " +"[#3269](https://github.com/adap/flower/pull/3269))" msgstr "" -"`Flower Quickstart (TensorFlow/Keras) " -"`_" +"**Mise à jour de la documentation** " +"([#1223](https://github.com/adap/flower/pull/1223), " +"[#1209](https://github.com/adap/flower/pull/1209), " +"[#1251](https://github.com/adap/flower/pull/1251), " +"[#1257](https://github.com/adap/flower/pull/1257), " +"[#1267](https://github.com/adap/flower/pull/1267), " +"[#1268](https://github.com/adap/flower/pull/1268), " +"[#1300](https://github.com/adap/flower/pull/1300), " +"[#1304](https://github.com/adap/flower/pull/1304), " +"[#1305](https://github.com/adap/flower/pull/1305), " +"[#1307](https://github.com/adap/flower/pull/1307))" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` How can I run Federated Learning on a Raspberry Pi?" +#: ../../source/ref-changelog.md:305 +msgid "As always, Flower code examples have received many updates." msgstr "" -":fa:`eye,mr-1` Comment puis-je faire fonctionner l'apprentissage fédéré " -"sur un Raspberry Pi ?" -#: ../../source/ref-faq.rst:15 -#, fuzzy +#: ../../source/ref-changelog.md:307 msgid "" -"Find the `blog post about federated learning on embedded device here " -"`_" -" and the corresponding `GitHub code example " -"`_." -msgstr "" -"Trouve le `blog post about federated learning on embedded device ici " -"`_" -" et l'exemple de code GitHub correspondant " -"`_." +"**General improvements** " +"([#3532](https://github.com/adap/flower/pull/3532), " +"[#3318](https://github.com/adap/flower/pull/3318), " +"[#3565](https://github.com/adap/flower/pull/3565), " +"[#3296](https://github.com/adap/flower/pull/3296), " +"[#3305](https://github.com/adap/flower/pull/3305), " +"[#3246](https://github.com/adap/flower/pull/3246), " +"[#3224](https://github.com/adap/flower/pull/3224), " +"[#3475](https://github.com/adap/flower/pull/3475), " +"[#3297](https://github.com/adap/flower/pull/3297), " +"[#3317](https://github.com/adap/flower/pull/3317), " +"[#3429](https://github.com/adap/flower/pull/3429), " +"[#3196](https://github.com/adap/flower/pull/3196), " +"[#3534](https://github.com/adap/flower/pull/3534), " +"[#3240](https://github.com/adap/flower/pull/3240), " +"[#3365](https://github.com/adap/flower/pull/3365), " +"[#3407](https://github.com/adap/flower/pull/3407), " +"[#3563](https://github.com/adap/flower/pull/3563), " +"[#3344](https://github.com/adap/flower/pull/3344), " +"[#3330](https://github.com/adap/flower/pull/3330), " +"[#3436](https://github.com/adap/flower/pull/3436), " +"[#3300](https://github.com/adap/flower/pull/3300), " +"[#3327](https://github.com/adap/flower/pull/3327), " +"[#3254](https://github.com/adap/flower/pull/3254), " +"[#3253](https://github.com/adap/flower/pull/3253), " +"[#3419](https://github.com/adap/flower/pull/3419), " +"[#3289](https://github.com/adap/flower/pull/3289), " +"[#3208](https://github.com/adap/flower/pull/3208), " +"[#3245](https://github.com/adap/flower/pull/3245), " +"[#3319](https://github.com/adap/flower/pull/3319), " +"[#3203](https://github.com/adap/flower/pull/3203), " +"[#3423](https://github.com/adap/flower/pull/3423), " +"[#3352](https://github.com/adap/flower/pull/3352), " +"[#3292](https://github.com/adap/flower/pull/3292), " +"[#3261](https://github.com/adap/flower/pull/3261))" +msgstr "" + +#: ../../source/ref-changelog.md:311 +#, fuzzy +msgid "**Deprecate Python 3.8 support**" +msgstr "**Créer le PR**" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Does Flower support federated learning on Android devices?" +#: ../../source/ref-changelog.md:313 +msgid "" +"Python 3.8 will stop receiving security fixes in [October " +"2024](https://devguide.python.org/versions/). Support for Python 3.8 is " +"now deprecated and will be removed in an upcoming release." msgstr "" -":fa:`eye,mr-1` Est-ce que Flower prend en charge l'apprentissage fédéré " -"sur les appareils Android ?" -#: ../../source/ref-faq.rst:19 +#: ../../source/ref-changelog.md:315 #, fuzzy msgid "" -"Yes, it does. Please take a look at our `blog post " -"`_ or check out the code examples:" +"**Deprecate (experimental)** `flower-driver-api` **and** `flower-fleet-" +"api` ([#3416](https://github.com/adap/flower/pull/3416), " +"[#3420](https://github.com/adap/flower/pull/3420))" msgstr "" -"Oui. Jetez un coup d'œil à notre `blog post " -"`_ ou consultez l'`exemple de code Android sur GitHub " -"`_." +"**Rename** `Weights` **to** `NDArrays` " +"([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" -#: ../../source/ref-faq.rst:21 +#: ../../source/ref-changelog.md:317 msgid "" -"`Android Kotlin example `_" -msgstr "" - -#: ../../source/ref-faq.rst:22 -msgid "`Android Java example `_" +"Flower 1.9 deprecates the two (experimental) commands `flower-driver-api`" +" and `flower-fleet-api`. Both commands will be removed in an upcoming " +"release. Use `flower-superlink` instead." msgstr "" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Can I combine federated learning with blockchain?" +#: ../../source/ref-changelog.md:319 +#, fuzzy +msgid "" +"**Deprecate** `--server` **in favor of** `--superlink` " +"([#3518](https://github.com/adap/flower/pull/3518))" msgstr "" -":fa:`eye,mr-1` Puis-je combiner l'apprentissage fédéré avec la blockchain" -" ?" +"**Autoriser le passage d'une **instance `Server` à** `start_simulation` " +"([#1281](https://github.com/adap/flower/pull/1281))" -#: ../../source/ref-faq.rst:26 +#: ../../source/ref-changelog.md:321 msgid "" -"Yes, of course. A list of available examples using Flower within a " -"blockchain environment is available here:" +"The commands `flower-server-app` and `flower-client-app` should use " +"`--superlink` instead of the now deprecated `--server`. Support for " +"`--server` will be removed in a future release." msgstr "" -"Oui, bien sûr, une liste d'exemples disponibles utilisant Flower dans un " -"environnement blockchain est disponible ici :" -#: ../../source/ref-faq.rst:28 +#: ../../source/ref-changelog.md:325 msgid "" -"`Flower meets Nevermined GitHub Repository `_." +"**Replace** `flower-superlink` **CLI option** `--certificates` **with** " +"`--ssl-ca-certfile` **,** `--ssl-certfile` **and** `--ssl-keyfile` " +"([#3512](https://github.com/adap/flower/pull/3512), " +"[#3408](https://github.com/adap/flower/pull/3408))" msgstr "" -"`Flower meets Nevermined GitHub Repository `_." -#: ../../source/ref-faq.rst:29 +#: ../../source/ref-changelog.md:327 msgid "" -"`Flower meets Nevermined YouTube video " -"`_." +"SSL-related `flower-superlink` CLI arguments were restructured in an " +"incompatible way. Instead of passing a single `--certificates` flag with " +"three values, you now need to pass three flags (`--ssl-ca-certfile`, " +"`--ssl-certfile` and `--ssl-keyfile`) with one value each. Check out the " +"[SSL connections](https://flower.ai/docs/framework/how-to-enable-ssl-" +"connections.html) documentation page for details." msgstr "" -"`Flower rencontre Nevermined vidéo YouTube " -"`_." -#: ../../source/ref-faq.rst:30 +#: ../../source/ref-changelog.md:329 #, fuzzy msgid "" -"`Flower meets KOSMoS `_." +"**Remove SuperLink** `--vce` **option** " +"([#3513](https://github.com/adap/flower/pull/3513))" msgstr "" -"`Flower rencontre KOSMoS `_." +"**Documentation restructurée** " +"([#1387](https://github.com/adap/flower/pull/1387))" -#: ../../source/ref-faq.rst:31 +#: ../../source/ref-changelog.md:331 msgid "" -"`Flower meets Talan blog post `_ ." +"Instead of separately starting a SuperLink and a `ServerApp` for " +"simulation, simulations must now be started using the single `flower-" +"simulation` command." msgstr "" -"`Flower meets Talan blog post `_ ." -#: ../../source/ref-faq.rst:32 +#: ../../source/ref-changelog.md:333 +#, fuzzy msgid "" -"`Flower meets Talan GitHub Repository " -"`_ ." +"**Merge** `--grpc-rere` **and** `--rest` **SuperLink options** " +"([#3527](https://github.com/adap/flower/pull/3527))" msgstr "" -"`Flower rencontre Talan Dépôt GitHub " -"`_ ." - -#: ../../source/ref-telemetry.md:1 -msgid "Telemetry" -msgstr "Télémétrie" +"**Rename** `rnd` **to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" -#: ../../source/ref-telemetry.md:3 +#: ../../source/ref-changelog.md:335 msgid "" -"The Flower open-source project collects **anonymous** usage metrics to " -"make well-informed decisions to improve Flower. Doing this enables the " -"Flower team to understand how Flower is used and what challenges users " -"might face." +"To simplify the usage of `flower-superlink`, previously separate sets of " +"CLI options for gRPC and REST were merged into one unified set of " +"options. Consult the [Flower CLI reference " +"documentation](https://flower.ai/docs/framework/ref-api-cli.html) for " +"details." msgstr "" -"Le projet open-source Flower recueille des mesures d'utilisation " -"**anonymes** afin de prendre des décisions éclairées pour améliorer " -"Flower. Cela permet à l'équipe de Flower de comprendre comment Flower est" -" utilisé et quels sont les défis auxquels les utilisateurs peuvent être " -"confrontés." -#: ../../source/ref-telemetry.md:5 +#: ../../source/ref-changelog.md:337 +#, fuzzy +msgid "v1.8.0 (2024-04-03)" +msgstr "v1.3.0 (2023-02-06)" + +#: ../../source/ref-changelog.md:343 msgid "" -"**Flower is a friendly framework for collaborative AI and data science.**" -" Staying true to this statement, Flower makes it easy to disable " -"telemetry for users that do not want to share anonymous usage metrics." +"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata " +"Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, `Ikko Eltociear " +"Ashimine`, `Jack Cook`, `Javier`, `Raj Parekh`, `Robert Steiner`, " +"`Sebastian van der Voort`, `Taner Topal`, `Yan Gao`, `mohammadnaseri`, " +"`tabdar-khan` " msgstr "" -"**Flower est un cadre convivial pour l'IA collaborative et la science des" -" données.** En restant fidèle à cette déclaration, Flower permet de " -"désactiver facilement la télémétrie pour les utilisateurs qui ne " -"souhaitent pas partager des mesures d'utilisation anonymes." - -#: ../../source/ref-telemetry.md:7 -msgid "Principles" -msgstr "Principes" -#: ../../source/ref-telemetry.md:9 -msgid "We follow strong principles guarding anonymous usage metrics collection:" +#: ../../source/ref-changelog.md:347 +msgid "" +"**Introduce Flower Next high-level API (stable)** " +"([#3002](https://github.com/adap/flower/pull/3002), " +"[#2934](https://github.com/adap/flower/pull/2934), " +"[#2958](https://github.com/adap/flower/pull/2958), " +"[#3173](https://github.com/adap/flower/pull/3173), " +"[#3174](https://github.com/adap/flower/pull/3174), " +"[#2923](https://github.com/adap/flower/pull/2923), " +"[#2691](https://github.com/adap/flower/pull/2691), " +"[#3079](https://github.com/adap/flower/pull/3079), " +"[#2961](https://github.com/adap/flower/pull/2961), " +"[#2924](https://github.com/adap/flower/pull/2924), " +"[#3166](https://github.com/adap/flower/pull/3166), " +"[#3031](https://github.com/adap/flower/pull/3031), " +"[#3057](https://github.com/adap/flower/pull/3057), " +"[#3000](https://github.com/adap/flower/pull/3000), " +"[#3113](https://github.com/adap/flower/pull/3113), " +"[#2957](https://github.com/adap/flower/pull/2957), " +"[#3183](https://github.com/adap/flower/pull/3183), " +"[#3180](https://github.com/adap/flower/pull/3180), " +"[#3035](https://github.com/adap/flower/pull/3035), " +"[#3189](https://github.com/adap/flower/pull/3189), " +"[#3185](https://github.com/adap/flower/pull/3185), " +"[#3190](https://github.com/adap/flower/pull/3190), " +"[#3191](https://github.com/adap/flower/pull/3191), " +"[#3195](https://github.com/adap/flower/pull/3195), " +"[#3197](https://github.com/adap/flower/pull/3197))" msgstr "" -"Nous suivons des principes stricts concernant la collecte de données " -"anonymes sur l'utilisation :" -#: ../../source/ref-telemetry.md:11 +#: ../../source/ref-changelog.md:349 msgid "" -"**Optional:** You will always be able to disable telemetry; read on to " -"learn “[How to opt-out](#how-to-opt-out)”." +"The Flower Next high-level API is stable! Flower Next is the future of " +"Flower - all new features (like Flower Mods) will be built on top of it. " +"You can start to migrate your existing projects to Flower Next by using " +"`ServerApp` and `ClientApp` (check out `quickstart-pytorch` or " +"`quickstart-tensorflow`, a detailed migration guide will follow shortly)." +" Flower Next allows you to run multiple projects concurrently (we call " +"this multi-run) and execute the same project in either simulation " +"environments or deployment environments without having to change a single" +" line of code. The best part? It's fully compatible with existing Flower " +"projects that use `Strategy`, `NumPyClient` & co." msgstr "" -"**Optionnel:** Tu pourras toujours désactiver la télémétrie ; lis la " -"suite pour apprendre \"[Comment se désengager](#how-to-opt-out)\"." -#: ../../source/ref-telemetry.md:12 +#: ../../source/ref-changelog.md:351 +#, fuzzy msgid "" -"**Anonymous:** The reported usage metrics are anonymous and do not " -"contain any personally identifiable information (PII). See “[Collected " -"metrics](#collected-metrics)” to understand what metrics are being " -"reported." +"**Introduce Flower Next low-level API (preview)** " +"([#3062](https://github.com/adap/flower/pull/3062), " +"[#3034](https://github.com/adap/flower/pull/3034), " +"[#3069](https://github.com/adap/flower/pull/3069))" msgstr "" -"**Anonyme:** Les mesures d'utilisation rapportées sont anonymes et ne " -"contiennent aucune information personnelle identifiable (PII). Voir " -"\"[Collected metrics](#collected-metrics)\" pour comprendre quelles " -"mesures sont rapportées." +"**Mettre à jour les exemples de code** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/ref-telemetry.md:13 +#: ../../source/ref-changelog.md:353 msgid "" -"**Transparent:** You can easily inspect what anonymous metrics are being " -"reported; see the section “[How to inspect what is being reported](#how-" -"to-inspect-what-is-being-reported)”" +"In addition to the Flower Next *high-level* API that uses `Strategy`, " +"`NumPyClient` & co, Flower 1.8 also comes with a preview version of the " +"new Flower Next *low-level* API. The low-level API allows for granular " +"control of every aspect of the learning process by sending/receiving " +"individual messages to/from client nodes. The new `ServerApp` supports " +"registering a custom `main` function that allows writing custom training " +"loops for methods like async FL, cyclic training, or federated analytics." +" The new `ClientApp` supports registering `train`, `evaluate` and `query`" +" functions that can access the raw message received from the `ServerApp`." +" New abstractions like `RecordSet`, `Message` and `Context` further " +"enable sending multiple models, multiple sets of config values and " +"metrics, stateful computations on the client node and implementations of " +"custom SMPC protocols, to name just a few." msgstr "" -"**Transparent:** Tu peux facilement inspecter les métriques anonymes qui " -"sont rapportées ; voir la section \"[Comment inspecter ce qui est " -"rapporté](#how-to-inspect-what-is-being-reported)\"" -#: ../../source/ref-telemetry.md:14 +#: ../../source/ref-changelog.md:355 #, fuzzy msgid "" -"**Open for feedback:** You can always reach out to us if you have " -"feedback; see the section “[How to contact us](#how-to-contact-us)” for " -"details." +"**Introduce Flower Mods (preview)** " +"([#3054](https://github.com/adap/flower/pull/3054), " +"[#2911](https://github.com/adap/flower/pull/2911), " +"[#3083](https://github.com/adap/flower/pull/3083))" msgstr "" -"**Ouvert pour les commentaires:** Tu peux toujours nous contacter si tu " -"as des commentaires ; voir la section \"[Comment nous contacter ](#how-" -"to-contact-us)\" pour plus de détails." - -#: ../../source/ref-telemetry.md:16 -msgid "How to opt-out" -msgstr "Comment se désinscrire" +"**Introduire la télémétrie optionnelle** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" -#: ../../source/ref-telemetry.md:18 +#: ../../source/ref-changelog.md:357 msgid "" -"When Flower starts, it will check for an environment variable called " -"`FLWR_TELEMETRY_ENABLED`. Telemetry can easily be disabled by setting " -"`FLWR_TELEMETRY_ENABLED=0`. Assuming you are starting a Flower server or " -"client, simply do so by prepending your command as in:" +"Flower Modifiers (we call them Mods) can intercept messages and analyze, " +"edit or handle them directly. Mods can be used to develop pluggable " +"modules that work across different projects. Flower 1.8 already includes " +"mods to log the size of a message, the number of parameters sent over the" +" network, differential privacy with fixed clipping and adaptive clipping," +" local differential privacy and secure aggregation protocols SecAgg and " +"SecAgg+. The Flower Mods API is released as a preview, but researchers " +"can already use it to experiment with arbirtrary SMPC protocols." msgstr "" -"Lorsque Flower démarre, il vérifie la présence d'une variable " -"d'environnement appelée `FLWR_TELEMETRY_ENABLED`. La télémétrie peut " -"facilement être désactivée en réglant `FLWR_TELEMETRY_ENABLED=0`. En " -"supposant que tu démarres un serveur ou un client Flower, fais-le " -"simplement en faisant précéder ta commande de la façon suivante :" -#: ../../source/ref-telemetry.md:24 +#: ../../source/ref-changelog.md:359 +#, fuzzy msgid "" -"Alternatively, you can export `FLWR_TELEMETRY_ENABLED=0` in, for example," -" `.bashrc` (or whatever configuration file applies to your environment) " -"to disable Flower telemetry permanently." +"**Fine-tune LLMs with LLM FlowerTune** " +"([#3029](https://github.com/adap/flower/pull/3029), " +"[#3089](https://github.com/adap/flower/pull/3089), " +"[#3092](https://github.com/adap/flower/pull/3092), " +"[#3100](https://github.com/adap/flower/pull/3100), " +"[#3114](https://github.com/adap/flower/pull/3114), " +"[#3162](https://github.com/adap/flower/pull/3162), " +"[#3172](https://github.com/adap/flower/pull/3172))" msgstr "" -"Tu peux aussi exporter `FLWR_TELEMETRY_ENABLED=0` dans, par exemple, " -"`.bashrc` (ou tout autre fichier de configuration qui s'applique à ton " -"environnement) pour désactiver la télémétrie de la fleur de façon " -"permanente." - -#: ../../source/ref-telemetry.md:26 -msgid "Collected metrics" -msgstr "Mesures collectées" - -#: ../../source/ref-telemetry.md:28 -msgid "Flower telemetry collects the following metrics:" -msgstr "La télémétrie des fleurs recueille les métriques suivantes :" +"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" -#: ../../source/ref-telemetry.md:30 +#: ../../source/ref-changelog.md:361 msgid "" -"**Flower version.** Understand which versions of Flower are currently " -"being used. This helps us to decide whether we should invest effort into " -"releasing a patch version for an older version of Flower or instead use " -"the bandwidth to build new features." +"We are introducing LLM FlowerTune, an introductory example that " +"demonstrates federated LLM fine-tuning of pre-trained Llama2 models on " +"the Alpaca-GPT4 dataset. The example is built to be easily adapted to use" +" different models and/or datasets. Read our blog post [LLM FlowerTune: " +"Federated LLM Fine-tuning with Flower](https://flower.ai/blog/2024-03-14" +"-llm-flowertune-federated-llm-finetuning-with-flower/) for more details." msgstr "" -"**Cela nous aide à décider si nous devons investir des efforts dans la " -"publication d'une version corrective pour une version plus ancienne de " -"Flower ou si nous devons plutôt utiliser la bande passante pour " -"développer de nouvelles fonctionnalités." -#: ../../source/ref-telemetry.md:32 +#: ../../source/ref-changelog.md:363 +#, fuzzy msgid "" -"**Operating system.** Enables us to answer questions such as: *Should we " -"create more guides for Linux, macOS, or Windows?*" +"**Introduce built-in Differential Privacy (preview)** " +"([#2798](https://github.com/adap/flower/pull/2798), " +"[#2959](https://github.com/adap/flower/pull/2959), " +"[#3038](https://github.com/adap/flower/pull/3038), " +"[#3147](https://github.com/adap/flower/pull/3147), " +"[#2909](https://github.com/adap/flower/pull/2909), " +"[#2893](https://github.com/adap/flower/pull/2893), " +"[#2892](https://github.com/adap/flower/pull/2892), " +"[#3039](https://github.com/adap/flower/pull/3039), " +"[#3074](https://github.com/adap/flower/pull/3074))" msgstr "" -"**Système d'exploitation.** Nous permet de répondre à des questions " -"telles que : *Faudrait-il créer plus de guides pour Linux, macOS ou " -"Windows ?" +"**([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" -#: ../../source/ref-telemetry.md:34 +#: ../../source/ref-changelog.md:365 msgid "" -"**Python version.** Knowing the Python version helps us, for example, to " -"decide whether we should invest effort into supporting old versions of " -"Python or stop supporting them and start taking advantage of new Python " -"features." +"Built-in Differential Privacy is here! Flower supports both central and " +"local differential privacy (DP). Central DP can be configured with either" +" fixed or adaptive clipping. The clipping can happen either on the " +"server-side or the client-side. Local DP does both clipping and noising " +"on the client-side. A new documentation page [explains Differential " +"Privacy approaches](https://flower.ai/docs/framework/explanation-" +"differential-privacy.html) and a new how-to guide describes [how to use " +"the new Differential Privacy components](https://flower.ai/docs/framework" +"/how-to-use-differential-privacy.html) in Flower." msgstr "" -"**Version de Python.** Connaître la version de Python nous aide, par " -"exemple, à décider si nous devons investir des efforts dans la prise en " -"charge des anciennes versions de Python ou cesser de les prendre en " -"charge et commencer à tirer parti des nouvelles fonctionnalités de " -"Python." -#: ../../source/ref-telemetry.md:36 +#: ../../source/ref-changelog.md:367 +#, fuzzy msgid "" -"**Hardware properties.** Understanding the hardware environment that " -"Flower is being used in helps to decide whether we should, for example, " -"put more effort into supporting low-resource environments." +"**Introduce built-in Secure Aggregation (preview)** " +"([#3120](https://github.com/adap/flower/pull/3120), " +"[#3110](https://github.com/adap/flower/pull/3110), " +"[#3108](https://github.com/adap/flower/pull/3108))" msgstr "" -"**Comprendre l'environnement matériel dans lequel Flower est utilisé " -"permet de décider si nous devrions, par exemple, faire plus d'efforts " -"pour prendre en charge les environnements à faibles ressources." +"**Introduire la télémétrie optionnelle** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" -#: ../../source/ref-telemetry.md:38 +#: ../../source/ref-changelog.md:369 msgid "" -"**Execution mode.** Knowing what execution mode Flower starts in enables " -"us to understand how heavily certain features are being used and better " -"prioritize based on that." +"Built-in Secure Aggregation is here! Flower now supports different secure" +" aggregation protocols out-of-the-box. The best part? You can add secure " +"aggregation to your Flower projects with only a few lines of code. In " +"this initial release, we inlcude support for SecAgg and SecAgg+, but more" +" protocols will be implemented shortly. We'll also add detailed docs that" +" explain secure aggregation and how to use it in Flower. You can already " +"check out the new code example that shows how to use Flower to easily " +"combine Federated Learning, Differential Privacy and Secure Aggregation " +"in the same project." msgstr "" -"**Mode d'exécution** Connaître le mode d'exécution dans lequel Flower " -"démarre nous permet de comprendre à quel point certaines fonctionnalités " -"sont utilisées et de mieux établir les priorités en fonction de cela." -#: ../../source/ref-telemetry.md:40 +#: ../../source/ref-changelog.md:371 +#, fuzzy msgid "" -"**Cluster.** Flower telemetry assigns a random in-memory cluster ID each " -"time a Flower workload starts. This allows us to understand which device " -"types not only start Flower workloads but also successfully complete " -"them." +"**Introduce** `flwr` **CLI (preview)** " +"([#2942](https://github.com/adap/flower/pull/2942), " +"[#3055](https://github.com/adap/flower/pull/3055), " +"[#3111](https://github.com/adap/flower/pull/3111), " +"[#3130](https://github.com/adap/flower/pull/3130), " +"[#3136](https://github.com/adap/flower/pull/3136), " +"[#3094](https://github.com/adap/flower/pull/3094), " +"[#3059](https://github.com/adap/flower/pull/3059), " +"[#3049](https://github.com/adap/flower/pull/3049), " +"[#3142](https://github.com/adap/flower/pull/3142))" msgstr "" -"**Cluster.** La télémétrie Flower attribue un ID de cluster en mémoire " -"aléatoire à chaque fois qu'une charge de travail Flower démarre. Cela " -"nous permet de comprendre quels types d'appareils non seulement démarrent" -" les charges de travail Flower, mais aussi les terminent avec succès." +"**Mise à jour de la documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-telemetry.md:42 +#: ../../source/ref-changelog.md:373 msgid "" -"**Source.** Flower telemetry tries to store a random source ID in " -"`~/.flwr/source` the first time a telemetry event is generated. The " -"source ID is important to identify whether an issue is recurring or " -"whether an issue is triggered by multiple clusters running concurrently " -"(which often happens in simulation). For example, if a device runs " -"multiple workloads at the same time, and this results in an issue, then, " -"in order to reproduce the issue, multiple workloads must be started at " -"the same time." +"A new `flwr` CLI command allows creating new Flower projects (`flwr new`)" +" and then running them using the Simulation Engine (`flwr run`)." msgstr "" -"**Source.** La télémétrie de Flower essaie de stocker un ID de source " -"aléatoire dans `~/.flwr/source` la première fois qu'un événement de " -"télémétrie est généré. L'ID de source est important pour identifier si un" -" problème est récurrent ou si un problème est déclenché par plusieurs " -"clusters fonctionnant simultanément (ce qui arrive souvent en " -"simulation). Par exemple, si un périphérique exécute plusieurs charges de" -" travail en même temps, et que cela entraîne un problème, alors, afin de " -"reproduire le problème, plusieurs charges de travail doivent être " -"démarrées en même temps." -#: ../../source/ref-telemetry.md:44 +#: ../../source/ref-changelog.md:375 +#, fuzzy msgid "" -"You may delete the source ID at any time. If you wish for all events " -"logged under a specific source ID to be deleted, you can send a deletion " -"request mentioning the source ID to `telemetry@flower.ai`. All events " -"related to that source ID will then be permanently deleted." +"**Introduce Flower Next Simulation Engine** " +"([#3024](https://github.com/adap/flower/pull/3024), " +"[#3061](https://github.com/adap/flower/pull/3061), " +"[#2997](https://github.com/adap/flower/pull/2997), " +"[#2783](https://github.com/adap/flower/pull/2783), " +"[#3184](https://github.com/adap/flower/pull/3184), " +"[#3075](https://github.com/adap/flower/pull/3075), " +"[#3047](https://github.com/adap/flower/pull/3047), " +"[#2998](https://github.com/adap/flower/pull/2998), " +"[#3009](https://github.com/adap/flower/pull/3009), " +"[#3008](https://github.com/adap/flower/pull/3008))" msgstr "" -"Tu peux supprimer l'identifiant de la source à tout moment. Si tu " -"souhaites que tous les événements enregistrés sous un identifiant de " -"source spécifique soient supprimés, tu peux envoyer une demande de " -"suppression mentionnant l'identifiant de source à `telemetry@flower.ai`. " -"Tous les événements liés à cet identifiant de source seront alors " -"définitivement supprimés." +"**Introduire l'API REST (expérimentale)** " +"([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" -#: ../../source/ref-telemetry.md:46 +#: ../../source/ref-changelog.md:377 msgid "" -"We will not collect any personally identifiable information. If you think" -" any of the metrics collected could be misused in any way, please [get in" -" touch with us](#how-to-contact-us). We will update this page to reflect " -"any changes to the metrics collected and publish changes in the " -"changelog." +"The Flower Simulation Engine can now run Flower Next projects. For " +"notebook environments, there's also a new `run_simulation` function that " +"can run `ServerApp` and `ClientApp`." msgstr "" -"Nous ne collecterons aucune information personnelle identifiable. Si tu " -"penses que l'une des métriques collectées pourrait être utilisée à " -"mauvais escient de quelque manière que ce soit, merci de [nous " -"contacter](#commentnouscontacter). Nous mettrons à jour cette page pour " -"refléter toute modification des métriques collectées et nous publierons " -"les changements dans le journal des modifications (changelog)." -#: ../../source/ref-telemetry.md:48 +#: ../../source/ref-changelog.md:379 +#, fuzzy msgid "" -"If you think other metrics would be helpful for us to better guide our " -"decisions, please let us know! We will carefully review them; if we are " -"confident that they do not compromise user privacy, we may add them." +"**Handle SuperNode connection errors** " +"([#2969](https://github.com/adap/flower/pull/2969))" msgstr "" -"Si tu penses que d'autres mesures nous seraient utiles pour mieux " -"orienter nos décisions, fais-le nous savoir ! Nous les examinerons " -"attentivement ; si nous sommes convaincus qu'elles ne compromettent pas " -"la vie privée des utilisateurs, nous pourrons les ajouter." - -#: ../../source/ref-telemetry.md:50 -msgid "How to inspect what is being reported" -msgstr "Comment inspecter ce qui est rapporté" +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-telemetry.md:52 +#: ../../source/ref-changelog.md:381 msgid "" -"We wanted to make it very easy for you to inspect what anonymous usage " -"metrics are reported. You can view all the reported telemetry information" -" by setting the environment variable `FLWR_TELEMETRY_LOGGING=1`. Logging " -"is disabled by default. You may use logging independently from " -"`FLWR_TELEMETRY_ENABLED` so that you can inspect the telemetry feature " -"without sending any metrics." +"A SuperNode will now try to reconnect indefinitely to the SuperLink in " +"case of connection errors. The arguments `--max-retries` and `--max-wait-" +"time` can now be passed to the `flower-client-app` command. `--max-" +"retries` will define the number of tentatives the client should make " +"before it gives up trying to reconnect to the SuperLink, and, `--max-" +"wait-time` defines the time before the SuperNode gives up trying to " +"reconnect to the SuperLink." msgstr "" -"Nous avons voulu qu'il soit très facile pour toi d'inspecter les mesures " -"d'utilisation anonymes qui sont rapportées. Tu peux voir toutes les " -"informations de télémétrie rapportées en définissant la variable " -"d'environnement `FLWR_TELEMETRY_LOGGING=1`. La journalisation est " -"désactivée par défaut. Tu peux utiliser la journalisation indépendamment " -"de `FLWR_TELEMETRY_ENABLED` afin d'inspecter la fonction de télémétrie " -"sans envoyer de mesures." -#: ../../source/ref-telemetry.md:58 +#: ../../source/ref-changelog.md:383 +#, fuzzy msgid "" -"The inspect Flower telemetry without sending any anonymous usage metrics," -" use both environment variables:" +"**General updates to Flower Baselines** " +"([#2904](https://github.com/adap/flower/pull/2904), " +"[#2482](https://github.com/adap/flower/pull/2482), " +"[#2985](https://github.com/adap/flower/pull/2985), " +"[#2968](https://github.com/adap/flower/pull/2968))" msgstr "" -"L'inspecteur Flower telemetry sans envoyer de métriques d'utilisation " -"anonymes, utilise les deux variables d'environnement :" +"**Introduire une nouvelle fleur Référence : FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679))" -#: ../../source/ref-telemetry.md:64 -msgid "How to contact us" -msgstr "Comment nous contacter" +#: ../../source/ref-changelog.md:385 +msgid "" +"There's a new [FedStar](https://flower.ai/docs/baselines/fedstar.html) " +"baseline. Several other baselined have been updated as well." +msgstr "" -#: ../../source/ref-telemetry.md:66 +#: ../../source/ref-changelog.md:387 msgid "" -"We want to hear from you. If you have any feedback or ideas on how to " -"improve the way we handle anonymous usage metrics, reach out to us via " -"[Slack](https://flower.ai/join-slack/) (channel `#telemetry`) or email " -"(`telemetry@flower.ai`)." +"**Improve documentation and translations** " +"([#3050](https://github.com/adap/flower/pull/3050), " +"[#3044](https://github.com/adap/flower/pull/3044), " +"[#3043](https://github.com/adap/flower/pull/3043), " +"[#2986](https://github.com/adap/flower/pull/2986), " +"[#3041](https://github.com/adap/flower/pull/3041), " +"[#3046](https://github.com/adap/flower/pull/3046), " +"[#3042](https://github.com/adap/flower/pull/3042), " +"[#2978](https://github.com/adap/flower/pull/2978), " +"[#2952](https://github.com/adap/flower/pull/2952), " +"[#3167](https://github.com/adap/flower/pull/3167), " +"[#2953](https://github.com/adap/flower/pull/2953), " +"[#3045](https://github.com/adap/flower/pull/3045), " +"[#2654](https://github.com/adap/flower/pull/2654), " +"[#3082](https://github.com/adap/flower/pull/3082), " +"[#2990](https://github.com/adap/flower/pull/2990), " +"[#2989](https://github.com/adap/flower/pull/2989))" msgstr "" -"Si tu as des commentaires ou des idées pour améliorer la façon dont nous " -"traitons les mesures d'utilisation anonymes, contacte-nous via " -"[Slack](https://flower.ai/join-slack/) (canal `#telemetry`) ou par " -"courriel (`telemetry@flower.ai`)." -#: ../../source/tutorial-quickstart-android.rst:-1 +#: ../../source/ref-changelog.md:389 msgid "" -"Read this Federated Learning quickstart tutorial for creating an Android " -"app using Flower." +"As usual, we merged many smaller and larger improvements to the " +"documentation. A special thank you goes to [Sebastian van der " +"Voort](https://github.com/svdvoort) for landing a big documentation PR!" msgstr "" -#: ../../source/tutorial-quickstart-android.rst:5 -#, fuzzy -msgid "Quickstart Android" -msgstr "Démarrage rapide des Pandas" - -#: ../../source/tutorial-quickstart-android.rst:10 +#: ../../source/ref-changelog.md:391 #, fuzzy msgid "" -"Let's build a federated learning system using TFLite and Flower on " -"Android!" +"**General updates to Flower Examples** " +"([3134](https://github.com/adap/flower/pull/3134), " +"[2996](https://github.com/adap/flower/pull/2996), " +"[2930](https://github.com/adap/flower/pull/2930), " +"[2967](https://github.com/adap/flower/pull/2967), " +"[2467](https://github.com/adap/flower/pull/2467), " +"[2910](https://github.com/adap/flower/pull/2910), " +"[#2918](https://github.com/adap/flower/pull/2918), " +"[#2773](https://github.com/adap/flower/pull/2773), " +"[#3063](https://github.com/adap/flower/pull/3063), " +"[#3116](https://github.com/adap/flower/pull/3116), " +"[#3117](https://github.com/adap/flower/pull/3117))" msgstr "" -"Construisons un système d'apprentissage fédéré en utilisant fastai et " -"Flower !" +"**Documentation mise à jour** " +"([#1494](https://github.com/adap/flower/pull/1494), " +"[#1496](https://github.com/adap/flower/pull/1496), " +"[#1500](https://github.com/adap/flower/pull/1500), " +"[#1503](https://github.com/adap/flower/pull/1503), " +"[#1505](https://github.com/adap/flower/pull/1505), " +"[#1524](https://github.com/adap/flower/pull/1524), " +"[#1518](https://github.com/adap/flower/pull/1518), " +"[#1519](https://github.com/adap/flower/pull/1519), " +"[#1515](https://github.com/adap/flower/pull/1515))" -#: ../../source/tutorial-quickstart-android.rst:12 -#, fuzzy +#: ../../source/ref-changelog.md:393 msgid "" -"Please refer to the `full code example " -"`_ to learn " -"more." +"Two new examples show federated training of a Vision Transformer (ViT) " +"and federated learning in a medical context using the popular MONAI " +"library. `quickstart-pytorch` and `quickstart-tensorflow` demonstrate the" +" new Flower Next `ServerApp` and `ClientApp`. Many other examples " +"received considerable updates as well." msgstr "" -"Réfère-toi à l'exemple de code complet " -"`_ " -"pour en savoir plus." -#: ../../source/tutorial-quickstart-fastai.rst:-1 +#: ../../source/ref-changelog.md:395 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with FastAI to train a vision model on CIFAR-10." +"**General improvements** " +"([#3171](https://github.com/adap/flower/pull/3171), " +"[3099](https://github.com/adap/flower/pull/3099), " +"[3003](https://github.com/adap/flower/pull/3003), " +"[3145](https://github.com/adap/flower/pull/3145), " +"[3017](https://github.com/adap/flower/pull/3017), " +"[3085](https://github.com/adap/flower/pull/3085), " +"[3012](https://github.com/adap/flower/pull/3012), " +"[3119](https://github.com/adap/flower/pull/3119), " +"[2991](https://github.com/adap/flower/pull/2991), " +"[2970](https://github.com/adap/flower/pull/2970), " +"[2980](https://github.com/adap/flower/pull/2980), " +"[3086](https://github.com/adap/flower/pull/3086), " +"[2932](https://github.com/adap/flower/pull/2932), " +"[2928](https://github.com/adap/flower/pull/2928), " +"[2941](https://github.com/adap/flower/pull/2941), " +"[2933](https://github.com/adap/flower/pull/2933), " +"[3181](https://github.com/adap/flower/pull/3181), " +"[2973](https://github.com/adap/flower/pull/2973), " +"[2992](https://github.com/adap/flower/pull/2992), " +"[2915](https://github.com/adap/flower/pull/2915), " +"[3040](https://github.com/adap/flower/pull/3040), " +"[3022](https://github.com/adap/flower/pull/3022), " +"[3032](https://github.com/adap/flower/pull/3032), " +"[2902](https://github.com/adap/flower/pull/2902), " +"[2931](https://github.com/adap/flower/pull/2931), " +"[3005](https://github.com/adap/flower/pull/3005), " +"[3132](https://github.com/adap/flower/pull/3132), " +"[3115](https://github.com/adap/flower/pull/3115), " +"[2944](https://github.com/adap/flower/pull/2944), " +"[3064](https://github.com/adap/flower/pull/3064), " +"[3106](https://github.com/adap/flower/pull/3106), " +"[2974](https://github.com/adap/flower/pull/2974), " +"[3178](https://github.com/adap/flower/pull/3178), " +"[2993](https://github.com/adap/flower/pull/2993), " +"[3186](https://github.com/adap/flower/pull/3186), " +"[3091](https://github.com/adap/flower/pull/3091), " +"[3125](https://github.com/adap/flower/pull/3125), " +"[3093](https://github.com/adap/flower/pull/3093), " +"[3013](https://github.com/adap/flower/pull/3013), " +"[3033](https://github.com/adap/flower/pull/3033), " +"[3133](https://github.com/adap/flower/pull/3133), " +"[3068](https://github.com/adap/flower/pull/3068), " +"[2916](https://github.com/adap/flower/pull/2916), " +"[2975](https://github.com/adap/flower/pull/2975), " +"[2984](https://github.com/adap/flower/pull/2984), " +"[2846](https://github.com/adap/flower/pull/2846), " +"[3077](https://github.com/adap/flower/pull/3077), " +"[3143](https://github.com/adap/flower/pull/3143), " +"[2921](https://github.com/adap/flower/pull/2921), " +"[3101](https://github.com/adap/flower/pull/3101), " +"[2927](https://github.com/adap/flower/pull/2927), " +"[2995](https://github.com/adap/flower/pull/2995), " +"[2972](https://github.com/adap/flower/pull/2972), " +"[2912](https://github.com/adap/flower/pull/2912), " +"[3065](https://github.com/adap/flower/pull/3065), " +"[3028](https://github.com/adap/flower/pull/3028), " +"[2922](https://github.com/adap/flower/pull/2922), " +"[2982](https://github.com/adap/flower/pull/2982), " +"[2914](https://github.com/adap/flower/pull/2914), " +"[3179](https://github.com/adap/flower/pull/3179), " +"[3080](https://github.com/adap/flower/pull/3080), " +"[2994](https://github.com/adap/flower/pull/2994), " +"[3187](https://github.com/adap/flower/pull/3187), " +"[2926](https://github.com/adap/flower/pull/2926), " +"[3018](https://github.com/adap/flower/pull/3018), " +"[3144](https://github.com/adap/flower/pull/3144), " +"[3011](https://github.com/adap/flower/pull/3011), " +"[#3152](https://github.com/adap/flower/pull/3152), " +"[#2836](https://github.com/adap/flower/pull/2836), " +"[#2929](https://github.com/adap/flower/pull/2929), " +"[#2943](https://github.com/adap/flower/pull/2943), " +"[#2955](https://github.com/adap/flower/pull/2955), " +"[#2954](https://github.com/adap/flower/pull/2954))" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:5 -msgid "Quickstart fastai" -msgstr "Démarrage rapide fastai" +#: ../../source/ref-changelog.md:401 +#, fuzzy +msgid "v1.7.0 (2024-02-05)" +msgstr "v1.3.0 (2023-02-06)" -#: ../../source/tutorial-quickstart-fastai.rst:10 -msgid "Let's build a federated learning system using fastai and Flower!" +#: ../../source/ref-changelog.md:407 +msgid "" +"`Aasheesh Singh`, `Adam Narozniak`, `Aml Hassan Esmil`, `Charles " +"Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo " +"Gabrielli`, `Gustavo Bertoli`, `HelinLin`, `Heng Pan`, `Javier`, `M S " +"Chaitanya Kumar`, `Mohammad Naseri`, `Nikos Vlachakis`, `Pritam Neog`, " +"`Robert Kuska`, `Robert Steiner`, `Taner Topal`, `Yahia Salaheldin " +"Shaaban`, `Yan Gao`, `Yasar Abbas` " msgstr "" -"Construisons un système d'apprentissage fédéré en utilisant fastai et " -"Flower !" -#: ../../source/tutorial-quickstart-fastai.rst:12 +#: ../../source/ref-changelog.md:411 #, fuzzy msgid "" -"Please refer to the `full code example " -"`_ " -"to learn more." +"**Introduce stateful clients (experimental)** " +"([#2770](https://github.com/adap/flower/pull/2770), " +"[#2686](https://github.com/adap/flower/pull/2686), " +"[#2696](https://github.com/adap/flower/pull/2696), " +"[#2643](https://github.com/adap/flower/pull/2643), " +"[#2769](https://github.com/adap/flower/pull/2769))" msgstr "" -"Réfère-toi à l'exemple de code complet " -"`_ " -"pour en savoir plus." +"**([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" -#: ../../source/tutorial-quickstart-huggingface.rst:-1 +#: ../../source/ref-changelog.md:413 msgid "" -"Check out this Federating Learning quickstart tutorial for using Flower " -"with HuggingFace Transformers in order to fine-tune an LLM." +"Subclasses of `Client` and `NumPyClient` can now store local state that " +"remains on the client. Let's start with the highlight first: this new " +"feature is compatible with both simulated clients (via " +"`start_simulation`) and networked clients (via `start_client`). It's also" +" the first preview of new abstractions like `Context` and `RecordSet`. " +"Clients can access state of type `RecordSet` via `state: RecordSet = " +"self.context.state`. Changes to this `RecordSet` are preserved across " +"different rounds of execution to enable stateful computations in a " +"unified way across simulation and deployment." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:5 -msgid "Quickstart 🤗 Transformers" -msgstr "Démarrage rapide 🤗 Transformateurs" - -#: ../../source/tutorial-quickstart-huggingface.rst:10 +#: ../../source/ref-changelog.md:415 +#, fuzzy msgid "" -"Let's build a federated learning system using Hugging Face Transformers " -"and Flower!" +"**Improve performance** " +"([#2293](https://github.com/adap/flower/pull/2293))" msgstr "" -"Construisons un système d'apprentissage fédéré à l'aide des " -"transformateurs Hugging Face et de Flower !" +"**Supprimer les stratégies expérimentales** " +"([#1280](https://github.com/adap/flower/pull/1280))" -#: ../../source/tutorial-quickstart-huggingface.rst:12 +#: ../../source/ref-changelog.md:417 msgid "" -"We will leverage Hugging Face to federate the training of language models" -" over multiple clients using Flower. More specifically, we will fine-tune" -" a pre-trained Transformer model (distilBERT) for sequence classification" -" over a dataset of IMDB ratings. The end goal is to detect if a movie " -"rating is positive or negative." +"Flower is faster than ever. All `FedAvg`-derived strategies now use in-" +"place aggregation to reduce memory consumption. The Flower client " +"serialization/deserialization has been rewritten from the ground up, " +"which results in significant speedups, especially when the client-side " +"training time is short." msgstr "" -"Nous nous appuierons sur Hugging Face pour fédérer l'entraînement de " -"modèles de langage sur plusieurs clients à l'aide de Flower. Plus " -"précisément, nous mettrons au point un modèle Transformer pré-entraîné " -"(distilBERT) pour la classification de séquences sur un ensemble de " -"données d'évaluations IMDB. L'objectif final est de détecter si " -"l'évaluation d'un film est positive ou négative." -#: ../../source/tutorial-quickstart-huggingface.rst:18 -msgid "Dependencies" -msgstr "Dépendances" - -#: ../../source/tutorial-quickstart-huggingface.rst:20 +#: ../../source/ref-changelog.md:419 +#, fuzzy msgid "" -"To follow along this tutorial you will need to install the following " -"packages: :code:`datasets`, :code:`evaluate`, :code:`flwr`, " -":code:`torch`, and :code:`transformers`. This can be done using " -":code:`pip`:" +"**Support Federated Learning with Apple MLX and Flower** " +"([#2693](https://github.com/adap/flower/pull/2693))" msgstr "" -"Pour suivre ce tutoriel, tu devras installer les paquets suivants : " -":code:`datasets`, :code:`evaluate`, :code:`flwr`, :code:`torch`, et " -":code:`transformers`. Cela peut être fait en utilisant :code:`pip` :" - -#: ../../source/tutorial-quickstart-huggingface.rst:30 -msgid "Standard Hugging Face workflow" -msgstr "Flux de travail standard pour le visage" - -#: ../../source/tutorial-quickstart-huggingface.rst:33 -msgid "Handling the data" -msgstr "Traitement des données" +"**Ajouter un nouvel exemple d'apprentissage fédéré utilisant fastai et " +"Flower** ([#1598](https://github.com/adap/flower/pull/1598))" -#: ../../source/tutorial-quickstart-huggingface.rst:35 +#: ../../source/ref-changelog.md:421 msgid "" -"To fetch the IMDB dataset, we will use Hugging Face's :code:`datasets` " -"library. We then need to tokenize the data and create :code:`PyTorch` " -"dataloaders, this is all done in the :code:`load_data` function:" +"Flower has official support for federated learning using [Apple " +"MLX](https://ml-explore.github.io/mlx) via the new `quickstart-mlx` code " +"example." msgstr "" -"Pour récupérer le jeu de données IMDB, nous utiliserons la bibliothèque " -":code:`datasets` de Hugging Face. Nous devons ensuite tokeniser les " -"données et créer des :code:`PyTorch` dataloaders, ce qui est fait dans la" -" fonction :code:`load_data` :" -#: ../../source/tutorial-quickstart-huggingface.rst:81 -msgid "Training and testing the model" -msgstr "Former et tester le modèle" - -#: ../../source/tutorial-quickstart-huggingface.rst:83 +#: ../../source/ref-changelog.md:423 +#, fuzzy msgid "" -"Once we have a way of creating our trainloader and testloader, we can " -"take care of the training and testing. This is very similar to any " -":code:`PyTorch` training or testing loop:" +"**Introduce new XGBoost cyclic strategy** " +"([#2666](https://github.com/adap/flower/pull/2666), " +"[#2668](https://github.com/adap/flower/pull/2668))" msgstr "" -"Une fois que nous avons trouvé un moyen de créer notre trainloader et " -"notre testloader, nous pouvons nous occuper de l'entraînement et du test." -" C'est très similaire à n'importe quelle boucle d'entraînement ou de test" -" :code:`PyTorch` :" - -#: ../../source/tutorial-quickstart-huggingface.rst:121 -msgid "Creating the model itself" -msgstr "Créer le modèle lui-même" +"**Introduction du SDK iOS (aperçu)** " +"([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" -#: ../../source/tutorial-quickstart-huggingface.rst:123 +#: ../../source/ref-changelog.md:425 msgid "" -"To create the model itself, we will just load the pre-trained distillBERT" -" model using Hugging Face’s :code:`AutoModelForSequenceClassification` :" +"A new strategy called `FedXgbCyclic` supports a client-by-client style of" +" training (often called cyclic). The `xgboost-comprehensive` code example" +" shows how to use it in a full project. In addition to that, `xgboost-" +"comprehensive` now also supports simulation mode. With this, Flower " +"offers best-in-class XGBoost support." msgstr "" -"Pour créer le modèle lui-même, nous allons simplement charger le modèle " -"distillBERT pré-entraîné en utilisant le " -":code:`AutoModelForSequenceClassification` de Hugging Face :" - -#: ../../source/tutorial-quickstart-huggingface.rst:136 -msgid "Federating the example" -msgstr "Fédérer l'exemple" - -#: ../../source/tutorial-quickstart-huggingface.rst:139 -msgid "Creating the IMDBClient" -msgstr "Création du client IMDBC" -#: ../../source/tutorial-quickstart-huggingface.rst:141 +#: ../../source/ref-changelog.md:427 +#, fuzzy msgid "" -"To federate our example to multiple clients, we first need to write our " -"Flower client class (inheriting from :code:`flwr.client.NumPyClient`). " -"This is very easy, as our model is a standard :code:`PyTorch` model:" +"**Support Python 3.11** " +"([#2394](https://github.com/adap/flower/pull/2394))" msgstr "" -"Pour fédérer notre exemple à plusieurs clients, nous devons d'abord " -"écrire notre classe de client Flower (héritant de " -":code:`flwr.client.NumPyClient`). C'est très facile, car notre modèle est" -" un modèle :code:`PyTorch` standard :" +"**Support Python 3.10** " +"([#1320](https://github.com/adap/flower/pull/1320))" -#: ../../source/tutorial-quickstart-huggingface.rst:169 +#: ../../source/ref-changelog.md:429 msgid "" -"The :code:`get_parameters` function lets the server get the client's " -"parameters. Inversely, the :code:`set_parameters` function allows the " -"server to send its parameters to the client. Finally, the :code:`fit` " -"function trains the model locally for the client, and the " -":code:`evaluate` function tests the model locally and returns the " -"relevant metrics." +"Framework tests now run on Python 3.8, 3.9, 3.10, and 3.11. This will " +"ensure better support for users using more recent Python versions." msgstr "" -"La fonction :code:`get_parameters` permet au serveur d'obtenir les " -"paramètres du client. Inversement, la fonction :code:`set_parameters` " -"permet au serveur d'envoyer ses paramètres au client. Enfin, la fonction " -":code:`fit` forme le modèle localement pour le client, et la fonction " -":code:`evaluate` teste le modèle localement et renvoie les mesures " -"correspondantes." - -#: ../../source/tutorial-quickstart-huggingface.rst:175 -msgid "Starting the server" -msgstr "Démarrer le serveur" -#: ../../source/tutorial-quickstart-huggingface.rst:177 +#: ../../source/ref-changelog.md:431 +#, fuzzy msgid "" -"Now that we have a way to instantiate clients, we need to create our " -"server in order to aggregate the results. Using Flower, this can be done " -"very easily by first choosing a strategy (here, we are using " -":code:`FedAvg`, which will define the global weights as the average of " -"all the clients' weights at each round) and then using the " -":code:`flwr.server.start_server` function:" +"**Update gRPC and ProtoBuf dependencies** " +"([#2814](https://github.com/adap/flower/pull/2814))" msgstr "" -"Maintenant que nous avons un moyen d'instancier les clients, nous devons " -"créer notre serveur afin d'agréger les résultats. Avec Flower, cela peut " -"être fait très facilement en choisissant d'abord une stratégie (ici, nous" -" utilisons :code:`FedAvg`, qui définira les poids globaux comme la " -"moyenne des poids de tous les clients à chaque tour) et en utilisant " -"ensuite la fonction :code:`flwr.server.start_server` :" +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/tutorial-quickstart-huggingface.rst:205 +#: ../../source/ref-changelog.md:433 msgid "" -"The :code:`weighted_average` function is there to provide a way to " -"aggregate the metrics distributed amongst the clients (basically this " -"allows us to display a nice average accuracy and loss for every round)." +"The `grpcio` and `protobuf` dependencies were updated to their latest " +"versions for improved security and performance." msgstr "" -"La fonction :code:`weighted_average` est là pour fournir un moyen " -"d'agréger les mesures réparties entre les clients (en gros, cela nous " -"permet d'afficher une belle moyenne de précision et de perte pour chaque " -"tour)." -#: ../../source/tutorial-quickstart-huggingface.rst:209 -msgid "Putting everything together" -msgstr "Tout assembler" - -#: ../../source/tutorial-quickstart-huggingface.rst:211 -msgid "We can now start client instances using:" -msgstr "Nous pouvons maintenant démarrer des instances de clients en utilisant :" - -#: ../../source/tutorial-quickstart-huggingface.rst:221 -msgid "" -"And they will be able to connect to the server and start the federated " -"training." -msgstr "Et ils pourront se connecter au serveur et démarrer la formation fédérée." - -#: ../../source/tutorial-quickstart-huggingface.rst:223 +#: ../../source/ref-changelog.md:435 #, fuzzy msgid "" -"If you want to check out everything put together, you should check out " -"the `full code example `_ ." -msgstr "" -"Si tu veux voir tout ce qui est mis ensemble, tu devrais consulter " -"l'exemple de code complet : " -"[https://github.com/adap/flower/tree/main/examples/quickstart-" -"huggingface](https://github.com/adap/flower/tree/main/examples" -"/quickstart-huggingface)." - -#: ../../source/tutorial-quickstart-huggingface.rst:226 -msgid "" -"Of course, this is a very basic example, and a lot can be added or " -"modified, it was just to showcase how simply we could federate a Hugging " -"Face workflow using Flower." +"**Introduce Docker image for Flower server** " +"([#2700](https://github.com/adap/flower/pull/2700), " +"[#2688](https://github.com/adap/flower/pull/2688), " +"[#2705](https://github.com/adap/flower/pull/2705), " +"[#2695](https://github.com/adap/flower/pull/2695), " +"[#2747](https://github.com/adap/flower/pull/2747), " +"[#2746](https://github.com/adap/flower/pull/2746), " +"[#2680](https://github.com/adap/flower/pull/2680), " +"[#2682](https://github.com/adap/flower/pull/2682), " +"[#2701](https://github.com/adap/flower/pull/2701))" msgstr "" -"Bien sûr, c'est un exemple très basique, et beaucoup de choses peuvent " -"être ajoutées ou modifiées, il s'agissait juste de montrer avec quelle " -"simplicité on pouvait fédérer un flux de travail Hugging Face à l'aide de" -" Flower." +"**([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" -#: ../../source/tutorial-quickstart-huggingface.rst:229 +#: ../../source/ref-changelog.md:437 msgid "" -"Note that in this example we used :code:`PyTorch`, but we could have very" -" well used :code:`TensorFlow`." +"The Flower server can now be run using an official Docker image. A new " +"how-to guide explains [how to run Flower using " +"Docker](https://flower.ai/docs/framework/how-to-run-flower-using-" +"docker.html). An official Flower client Docker image will follow." msgstr "" -"Notez que dans cet exemple, nous avons utilisé :code:`PyTorch`, mais nous" -" aurions très bien pu utiliser :code:`TensorFlow`." -#: ../../source/tutorial-quickstart-ios.rst:-1 +#: ../../source/ref-changelog.md:439 +#, fuzzy msgid "" -"Read this Federated Learning quickstart tutorial for creating an iOS app " -"using Flower to train a neural network on MNIST." +"**Introduce** `flower-via-docker-compose` **example** " +"([#2626](https://github.com/adap/flower/pull/2626))" msgstr "" +"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " +"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" -#: ../../source/tutorial-quickstart-ios.rst:5 -#, fuzzy -msgid "Quickstart iOS" -msgstr "Démarrage rapide XGBoost" - -#: ../../source/tutorial-quickstart-ios.rst:10 +#: ../../source/ref-changelog.md:441 #, fuzzy msgid "" -"In this tutorial we will learn how to train a Neural Network on MNIST " -"using Flower and CoreML on iOS devices." +"**Introduce** `quickstart-sklearn-tabular` **example** " +"([#2719](https://github.com/adap/flower/pull/2719))" msgstr "" -"Dans ce tutoriel, nous allons apprendre, comment former un réseau " -"neuronal convolutif sur MNIST en utilisant Flower et PyTorch." +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/tutorial-quickstart-ios.rst:12 +#: ../../source/ref-changelog.md:443 #, fuzzy msgid "" -"First of all, for running the Flower Python server, it is recommended to " -"create a virtual environment and run everything within a :doc:`virtualenv" -" `. For the Flower client " -"implementation in iOS, it is recommended to use Xcode as our IDE." +"**Introduce** `custom-metrics` **example** " +"([#1958](https://github.com/adap/flower/pull/1958))" msgstr "" -"Tout d'abord, il est recommandé de créer un environnement virtuel et de " -"tout exécuter au sein d'un `virtualenv `_." +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/tutorial-quickstart-ios.rst:15 +#: ../../source/ref-changelog.md:445 #, fuzzy msgid "" -"Our example consists of one Python *server* and two iPhone *clients* that" -" all have the same model." +"**Update code examples to use Flower Datasets** " +"([#2450](https://github.com/adap/flower/pull/2450), " +"[#2456](https://github.com/adap/flower/pull/2456), " +"[#2318](https://github.com/adap/flower/pull/2318), " +"[#2712](https://github.com/adap/flower/pull/2712))" msgstr "" -"Notre exemple consiste en un *serveur* et deux *clients* ayant tous le " -"même modèle." +"Mettre à jour les outils de développement " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/tutorial-quickstart-ios.rst:17 -#, fuzzy +#: ../../source/ref-changelog.md:447 msgid "" -"*Clients* are responsible for generating individual weight updates for " -"the model based on their local datasets. These updates are then sent to " -"the *server* which will aggregate them to produce a better model. " -"Finally, the *server* sends this improved version of the model back to " -"each *client*. A complete cycle of weight updates is called a *round*." +"Several code examples were updated to use [Flower " +"Datasets](https://flower.ai/docs/datasets/)." msgstr "" -"*Les clients* sont chargés de générer des mises à jour de poids " -"individuelles pour le modèle en fonction de leurs ensembles de données " -"locales. Ces mises à jour sont ensuite envoyées au *serveur* qui les " -"agrège pour produire un meilleur modèle. Enfin, le *serveur* renvoie " -"cette version améliorée du modèle à chaque *client*. Un cycle complet de " -"mises à jour de poids s'appelle un *round*." -#: ../../source/tutorial-quickstart-ios.rst:21 +#: ../../source/ref-changelog.md:449 #, fuzzy msgid "" -"Now that we have a rough idea of what is going on, let's get started to " -"setup our Flower server environment. We first need to install Flower. You" -" can do this by using pip:" +"**General updates to Flower Examples** " +"([#2381](https://github.com/adap/flower/pull/2381), " +"[#2805](https://github.com/adap/flower/pull/2805), " +"[#2782](https://github.com/adap/flower/pull/2782), " +"[#2806](https://github.com/adap/flower/pull/2806), " +"[#2829](https://github.com/adap/flower/pull/2829), " +"[#2825](https://github.com/adap/flower/pull/2825), " +"[#2816](https://github.com/adap/flower/pull/2816), " +"[#2726](https://github.com/adap/flower/pull/2726), " +"[#2659](https://github.com/adap/flower/pull/2659), " +"[#2655](https://github.com/adap/flower/pull/2655))" msgstr "" -"Maintenant que nous avons une idée générale de ce qui se passe, " -"commençons. Nous devons d'abord installer Flower. Tu peux le faire en " -"exécutant :" +"**Améliorer l'API (expérimentale) du pilote** " +"([#1663](https://github.com/adap/flower/pull/1663), " +"[#1666](https://github.com/adap/flower/pull/1666), " +"[#1667](https://github.com/adap/flower/pull/1667), " +"[#1664](https://github.com/adap/flower/pull/1664), " +"[#1675](https://github.com/adap/flower/pull/1675), " +"[#1676](https://github.com/adap/flower/pull/1676), " +"[#1693](https://github.com/adap/flower/pull/1693), " +"[#1662](https://github.com/adap/flower/pull/1662), " +"[#1794](https://github.com/adap/flower/pull/1794))" -#: ../../source/tutorial-quickstart-ios.rst:27 -msgid "Or Poetry:" +#: ../../source/ref-changelog.md:451 +msgid "Many Flower code examples received substantial updates." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:34 -#: ../../source/tutorial-quickstart-pytorch.rst:37 -#: ../../source/tutorial-quickstart-scikitlearn.rst:40 -#: ../../source/tutorial-quickstart-tensorflow.rst:29 -#: ../../source/tutorial-quickstart-xgboost.rst:55 -msgid "Flower Client" -msgstr "Client de la fleur" +#: ../../source/ref-changelog.md:453 ../../source/ref-changelog.md:546 +#, fuzzy +msgid "**Update Flower Baselines**" +msgstr "Demande pour une nouvelle Flower Baseline" -#: ../../source/tutorial-quickstart-ios.rst:36 +#: ../../source/ref-changelog.md:455 +#, fuzzy msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training using CoreML as our local training pipeline and " -"MNIST as our dataset. For simplicity reasons we will use the complete " -"Flower client with CoreML, that has been implemented and stored inside " -"the Swift SDK. The client implementation can be seen below:" +"HFedXGBoost ([#2226](https://github.com/adap/flower/pull/2226), " +"[#2771](https://github.com/adap/flower/pull/2771))" msgstr "" +"**Nouvel exemple de code JAX** " +"([#906](https://github.com/adap/flower/pull/906), " +"[#1143](https://github.com/adap/flower/pull/1143))" -#: ../../source/tutorial-quickstart-ios.rst:72 -msgid "" -"Let's create a new application project in Xcode and add :code:`flwr` as a" -" dependency in your project. For our application, we will store the logic" -" of our app in :code:`FLiOSModel.swift` and the UI elements in " -":code:`ContentView.swift`. We will focus more on :code:`FLiOSModel.swift`" -" in this quickstart. Please refer to the `full code example " -"`_ to learn more " -"about the app." +#: ../../source/ref-changelog.md:456 +#, fuzzy +msgid "FedVSSL ([#2412](https://github.com/adap/flower/pull/2412))" msgstr "" +"Amélioration de la documentation sur le serveur gRPC " +"([#841](https://github.com/adap/flower/pull/841))" -#: ../../source/tutorial-quickstart-ios.rst:75 -msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" +#: ../../source/ref-changelog.md:457 +#, fuzzy +msgid "FedNova ([#2179](https://github.com/adap/flower/pull/2179))" msgstr "" +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/tutorial-quickstart-ios.rst:83 -msgid "" -"Then add the mlmodel to the project simply by drag-and-drop, the mlmodel " -"will be bundled inside the application during deployment to your iOS " -"device. We need to pass the url to access mlmodel and run CoreML machine " -"learning processes, it can be retrieved by calling the function " -":code:`Bundle.main.url`. For the MNIST dataset, we need to preprocess it " -"into :code:`MLBatchProvider` object. The preprocessing is done inside " -":code:`DataLoader.swift`." -msgstr "" +#: ../../source/ref-changelog.md:458 +#, fuzzy +msgid "HeteroFL ([#2439](https://github.com/adap/flower/pull/2439))" +msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" -#: ../../source/tutorial-quickstart-ios.rst:99 -msgid "" -"Since CoreML does not allow the model parameters to be seen before " -"training, and accessing the model parameters during or after the training" -" can only be done by specifying the layer name, we need to know this " -"information beforehand, through looking at the model specification, which" -" are written as proto files. The implementation can be seen in " -":code:`MLModelInspect`." +#: ../../source/ref-changelog.md:459 +#, fuzzy +msgid "FedAvgM ([#2246](https://github.com/adap/flower/pull/2246))" +msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" + +#: ../../source/ref-changelog.md:460 +#, fuzzy +msgid "FedPara ([#2722](https://github.com/adap/flower/pull/2722))" msgstr "" +"**Renommé stratégie q-FedAvg** " +"([#802](https://github.com/adap/flower/pull/802))" -#: ../../source/tutorial-quickstart-ios.rst:102 +#: ../../source/ref-changelog.md:462 +#, fuzzy msgid "" -"After we have all of the necessary information, let's create our Flower " -"client." +"**Improve documentation** " +"([#2674](https://github.com/adap/flower/pull/2674), " +"[#2480](https://github.com/adap/flower/pull/2480), " +"[#2826](https://github.com/adap/flower/pull/2826), " +"[#2727](https://github.com/adap/flower/pull/2727), " +"[#2761](https://github.com/adap/flower/pull/2761), " +"[#2900](https://github.com/adap/flower/pull/2900))" msgstr "" +"**Mise à jour de la documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/tutorial-quickstart-ios.rst:117 +#: ../../source/ref-changelog.md:464 msgid "" -"Then start the Flower gRPC client and start communicating to the server " -"by passing our Flower client to the function :code:`startFlwrGRPC`." +"**Improved testing and development infrastructure** " +"([#2797](https://github.com/adap/flower/pull/2797), " +"[#2676](https://github.com/adap/flower/pull/2676), " +"[#2644](https://github.com/adap/flower/pull/2644), " +"[#2656](https://github.com/adap/flower/pull/2656), " +"[#2848](https://github.com/adap/flower/pull/2848), " +"[#2675](https://github.com/adap/flower/pull/2675), " +"[#2735](https://github.com/adap/flower/pull/2735), " +"[#2767](https://github.com/adap/flower/pull/2767), " +"[#2732](https://github.com/adap/flower/pull/2732), " +"[#2744](https://github.com/adap/flower/pull/2744), " +"[#2681](https://github.com/adap/flower/pull/2681), " +"[#2699](https://github.com/adap/flower/pull/2699), " +"[#2745](https://github.com/adap/flower/pull/2745), " +"[#2734](https://github.com/adap/flower/pull/2734), " +"[#2731](https://github.com/adap/flower/pull/2731), " +"[#2652](https://github.com/adap/flower/pull/2652), " +"[#2720](https://github.com/adap/flower/pull/2720), " +"[#2721](https://github.com/adap/flower/pull/2721), " +"[#2717](https://github.com/adap/flower/pull/2717), " +"[#2864](https://github.com/adap/flower/pull/2864), " +"[#2694](https://github.com/adap/flower/pull/2694), " +"[#2709](https://github.com/adap/flower/pull/2709), " +"[#2658](https://github.com/adap/flower/pull/2658), " +"[#2796](https://github.com/adap/flower/pull/2796), " +"[#2692](https://github.com/adap/flower/pull/2692), " +"[#2657](https://github.com/adap/flower/pull/2657), " +"[#2813](https://github.com/adap/flower/pull/2813), " +"[#2661](https://github.com/adap/flower/pull/2661), " +"[#2398](https://github.com/adap/flower/pull/2398))" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:124 +#: ../../source/ref-changelog.md:466 msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -"call the provided :code:`MLFlwrClient` and call :code:`startFlwrGRPC()`. " -"The attribute :code:`hostname` and :code:`port` tells the client which " -"server to connect to. This can be done by entering the hostname and port " -"in the application before clicking the start button to start the " -"federated learning process." +"The Flower testing and development infrastructure has received " +"substantial updates. This makes Flower 1.7 the most tested release ever." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:129 -#: ../../source/tutorial-quickstart-pytorch.rst:203 -#: ../../source/tutorial-quickstart-scikitlearn.rst:167 -#: ../../source/tutorial-quickstart-tensorflow.rst:98 -#: ../../source/tutorial-quickstart-xgboost.rst:309 -msgid "Flower Server" -msgstr "Serveur de Flower" - -#: ../../source/tutorial-quickstart-ios.rst:131 -#: ../../source/tutorial-quickstart-pytorch.rst:205 -#: ../../source/tutorial-quickstart-tensorflow.rst:100 +#: ../../source/ref-changelog.md:468 msgid "" -"For simple workloads we can start a Flower server and leave all the " -"configuration possibilities at their default values. In a file named " -":code:`server.py`, import Flower and start the server:" +"**Update dependencies** " +"([#2753](https://github.com/adap/flower/pull/2753), " +"[#2651](https://github.com/adap/flower/pull/2651), " +"[#2739](https://github.com/adap/flower/pull/2739), " +"[#2837](https://github.com/adap/flower/pull/2837), " +"[#2788](https://github.com/adap/flower/pull/2788), " +"[#2811](https://github.com/adap/flower/pull/2811), " +"[#2774](https://github.com/adap/flower/pull/2774), " +"[#2790](https://github.com/adap/flower/pull/2790), " +"[#2751](https://github.com/adap/flower/pull/2751), " +"[#2850](https://github.com/adap/flower/pull/2850), " +"[#2812](https://github.com/adap/flower/pull/2812), " +"[#2872](https://github.com/adap/flower/pull/2872), " +"[#2736](https://github.com/adap/flower/pull/2736), " +"[#2756](https://github.com/adap/flower/pull/2756), " +"[#2857](https://github.com/adap/flower/pull/2857), " +"[#2757](https://github.com/adap/flower/pull/2757), " +"[#2810](https://github.com/adap/flower/pull/2810), " +"[#2740](https://github.com/adap/flower/pull/2740), " +"[#2789](https://github.com/adap/flower/pull/2789))" msgstr "" -"Pour les charges de travail simples, nous pouvons démarrer un serveur " -"Flower et laisser toutes les possibilités de configuration à leurs " -"valeurs par défaut. Dans un fichier nommé :code:`server.py`, importe " -"Flower et démarre le serveur :" -#: ../../source/tutorial-quickstart-ios.rst:142 -#: ../../source/tutorial-quickstart-pytorch.rst:216 -#: ../../source/tutorial-quickstart-scikitlearn.rst:230 -#: ../../source/tutorial-quickstart-tensorflow.rst:112 -msgid "Train the model, federated!" -msgstr "Entraîne le modèle, fédéré !" +#: ../../source/ref-changelog.md:470 +msgid "" +"**General improvements** " +"([#2803](https://github.com/adap/flower/pull/2803), " +"[#2847](https://github.com/adap/flower/pull/2847), " +"[#2877](https://github.com/adap/flower/pull/2877), " +"[#2690](https://github.com/adap/flower/pull/2690), " +"[#2889](https://github.com/adap/flower/pull/2889), " +"[#2874](https://github.com/adap/flower/pull/2874), " +"[#2819](https://github.com/adap/flower/pull/2819), " +"[#2689](https://github.com/adap/flower/pull/2689), " +"[#2457](https://github.com/adap/flower/pull/2457), " +"[#2870](https://github.com/adap/flower/pull/2870), " +"[#2669](https://github.com/adap/flower/pull/2669), " +"[#2876](https://github.com/adap/flower/pull/2876), " +"[#2885](https://github.com/adap/flower/pull/2885), " +"[#2858](https://github.com/adap/flower/pull/2858), " +"[#2867](https://github.com/adap/flower/pull/2867), " +"[#2351](https://github.com/adap/flower/pull/2351), " +"[#2886](https://github.com/adap/flower/pull/2886), " +"[#2860](https://github.com/adap/flower/pull/2860), " +"[#2828](https://github.com/adap/flower/pull/2828), " +"[#2869](https://github.com/adap/flower/pull/2869), " +"[#2875](https://github.com/adap/flower/pull/2875), " +"[#2733](https://github.com/adap/flower/pull/2733), " +"[#2488](https://github.com/adap/flower/pull/2488), " +"[#2646](https://github.com/adap/flower/pull/2646), " +"[#2879](https://github.com/adap/flower/pull/2879), " +"[#2821](https://github.com/adap/flower/pull/2821), " +"[#2855](https://github.com/adap/flower/pull/2855), " +"[#2800](https://github.com/adap/flower/pull/2800), " +"[#2807](https://github.com/adap/flower/pull/2807), " +"[#2801](https://github.com/adap/flower/pull/2801), " +"[#2804](https://github.com/adap/flower/pull/2804), " +"[#2851](https://github.com/adap/flower/pull/2851), " +"[#2787](https://github.com/adap/flower/pull/2787), " +"[#2852](https://github.com/adap/flower/pull/2852), " +"[#2672](https://github.com/adap/flower/pull/2672), " +"[#2759](https://github.com/adap/flower/pull/2759))" +msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:144 -#: ../../source/tutorial-quickstart-pytorch.rst:218 -#: ../../source/tutorial-quickstart-tensorflow.rst:114 -#: ../../source/tutorial-quickstart-xgboost.rst:525 +#: ../../source/ref-changelog.md:474 +#, fuzzy msgid "" -"With both client and server ready, we can now run everything and see " -"federated learning in action. FL systems usually have a server and " -"multiple clients. We therefore have to start the server first:" +"**Deprecate** `start_numpy_client` " +"([#2563](https://github.com/adap/flower/pull/2563), " +"[#2718](https://github.com/adap/flower/pull/2718))" msgstr "" -"Le client et le serveur étant prêts, nous pouvons maintenant tout " -"exécuter et voir l'apprentissage fédéré en action. Les systèmes FL ont " -"généralement un serveur et plusieurs clients. Nous devons donc commencer " -"par démarrer le serveur :" +"**Nouvelles stratégies intégrées** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/tutorial-quickstart-ios.rst:152 +#: ../../source/ref-changelog.md:476 msgid "" -"Once the server is running we can start the clients in different " -"terminals. Build and run the client through your Xcode, one through Xcode" -" Simulator and the other by deploying it to your iPhone. To see more " -"about how to deploy your app to iPhone or Simulator visit `here " -"`_." +"Until now, clients of type `NumPyClient` needed to be started via " +"`start_numpy_client`. In our efforts to consolidate framework APIs, we " +"have introduced changes, and now all client types should start via " +"`start_client`. To continue using `NumPyClient` clients, you simply need " +"to first call the `.to_client()` method and then pass returned `Client` " +"object to `start_client`. The examples and the documentation have been " +"updated accordingly." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:156 +#: ../../source/ref-changelog.md:478 #, fuzzy msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system in your ios device. The full `source code " -"`_ for this " -"example can be found in :code:`examples/ios`." +"**Deprecate legacy DP wrappers** " +"([#2749](https://github.com/adap/flower/pull/2749))" msgstr "" -"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " -"premier système d'apprentissage fédéré. Le code source complet " -"`_ de cet exemple se trouve dans :code:`examples" -"/quickstart-mxnet`." +"**Supprimez KerasClient** " +"([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/tutorial-quickstart-jax.rst:-1 +#: ../../source/ref-changelog.md:480 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with Jax to train a linear regression model on a scikit-learn dataset." +"Legacy DP wrapper classes are deprecated, but still functional. This is " +"in preparation for an all-new pluggable version of differential privacy " +"support in Flower." msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:5 -msgid "Quickstart JAX" -msgstr "Démarrage rapide de JAX" - -#: ../../source/tutorial-quickstart-pandas.rst:-1 +#: ../../source/ref-changelog.md:482 +#, fuzzy msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with Pandas to perform Federated Analytics." +"**Make optional arg** `--callable` **in** `flower-client` **a required " +"positional arg** ([#2673](https://github.com/adap/flower/pull/2673))" msgstr "" +"**Log** `Client` **exceptions dans le moteur de client virtuel** " +"([#1493](https://github.com/adap/flower/pull/1493))" -#: ../../source/tutorial-quickstart-pandas.rst:5 -msgid "Quickstart Pandas" -msgstr "Démarrage rapide des Pandas" - -#: ../../source/tutorial-quickstart-pandas.rst:10 -msgid "Let's build a federated analytics system using Pandas and Flower!" -msgstr "Construisons un système d'analyse fédéré à l'aide de Pandas et de Flower !" - -#: ../../source/tutorial-quickstart-pandas.rst:12 +#: ../../source/ref-changelog.md:484 #, fuzzy msgid "" -"Please refer to the `full code example " -"`_ " -"to learn more." +"**Rename** `certificates` **to** `root_certificates` **in** `Driver` " +"([#2890](https://github.com/adap/flower/pull/2890))" msgstr "" -"Réfère-toi à l'exemple de code complet " -"`_ " -"pour en savoir plus." +"**Rename** `rnd` **to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" -#: ../../source/tutorial-quickstart-pytorch.rst:-1 +#: ../../source/ref-changelog.md:486 +#, fuzzy msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with PyTorch to train a CNN model on MNIST." +"**Drop experimental** `Task` **fields** " +"([#2866](https://github.com/adap/flower/pull/2866), " +"[#2865](https://github.com/adap/flower/pull/2865))" msgstr "" +"**Rename** `Weights` **to** `NDArrays` " +"([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" -#: ../../source/tutorial-quickstart-pytorch.rst:13 +#: ../../source/ref-changelog.md:488 msgid "" -"In this tutorial we will learn how to train a Convolutional Neural " -"Network on CIFAR10 using Flower and PyTorch." +"Experimental fields `sa`, `legacy_server_message` and " +"`legacy_client_message` were removed from `Task` message. The removed " +"fields are superseded by the new `RecordSet` abstraction." msgstr "" -"Dans ce tutoriel, nous allons apprendre à entraîner un réseau neuronal " -"convolutif sur CIFAR10 à l'aide de Flower et PyTorch." -#: ../../source/tutorial-quickstart-pytorch.rst:15 -#: ../../source/tutorial-quickstart-xgboost.rst:39 +#: ../../source/ref-changelog.md:490 #, fuzzy msgid "" -"First of all, it is recommended to create a virtual environment and run " -"everything within a :doc:`virtualenv `." +"**Retire MXNet examples** " +"([#2724](https://github.com/adap/flower/pull/2724))" msgstr "" -"Tout d'abord, il est recommandé de créer un environnement virtuel et de " -"tout exécuter au sein d'un `virtualenv `_." +"**Nouvel exemple de code scikit-learn** " +"([#748](https://github.com/adap/flower/pull/748))" -#: ../../source/tutorial-quickstart-pytorch.rst:17 -#: ../../source/tutorial-quickstart-scikitlearn.rst:14 +#: ../../source/ref-changelog.md:492 msgid "" -"Our example consists of one *server* and two *clients* all having the " -"same model." +"The development of the MXNet fremework has ended and the project is now " +"[archived on GitHub](https://github.com/apache/mxnet). Existing MXNet " +"examples won't receive updates." msgstr "" -"Notre exemple consiste en un *serveur* et deux *clients* ayant tous le " -"même modèle." -#: ../../source/tutorial-quickstart-pytorch.rst:19 +#: ../../source/ref-changelog.md:494 +#, fuzzy +msgid "v1.6.0 (2023-11-28)" +msgstr "v1.4.0 (2023-04-21)" + +#: ../../source/ref-changelog.md:500 msgid "" -"*Clients* are responsible for generating individual weight-updates for " -"the model based on their local datasets. These updates are then sent to " -"the *server* which will aggregate them to produce a better model. " -"Finally, the *server* sends this improved version of the model back to " -"each *client*. A complete cycle of weight updates is called a *round*." +"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " +"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " +"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`," +" `Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, " +"`Steve Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, " +"`cnxdeveloper`, `k3nfalt` " msgstr "" -"*Les clients* sont chargés de générer des mises à jour de poids " -"individuelles pour le modèle en fonction de leurs ensembles de données " -"locales. Ces mises à jour sont ensuite envoyées au *serveur* qui les " -"agrège pour produire un meilleur modèle. Enfin, le *serveur* renvoie " -"cette version améliorée du modèle à chaque *client*. Un cycle complet de " -"mises à jour de poids s'appelle un *round*." -#: ../../source/tutorial-quickstart-pytorch.rst:23 +#: ../../source/ref-changelog.md:504 +#, fuzzy msgid "" -"Now that we have a rough idea of what is going on, let's get started. We " -"first need to install Flower. You can do this by running :" +"**Add experimental support for Python 3.12** " +"([#2565](https://github.com/adap/flower/pull/2565))" msgstr "" -"Maintenant que nous avons une idée générale de ce qui se passe, " -"commençons. Nous devons d'abord installer Flower. Tu peux le faire en " -"exécutant :" +"**Ajouter la prise en charge expérimentale de Python 3.10 et Python " +"3.11** ([#1135](https://github.com/adap/flower/pull/1135))" -#: ../../source/tutorial-quickstart-pytorch.rst:29 +#: ../../source/ref-changelog.md:506 +#, fuzzy msgid "" -"Since we want to use PyTorch to solve a computer vision task, let's go " -"ahead and install PyTorch and the **torchvision** library:" +"**Add new XGBoost examples** " +"([#2612](https://github.com/adap/flower/pull/2612), " +"[#2554](https://github.com/adap/flower/pull/2554), " +"[#2617](https://github.com/adap/flower/pull/2617), " +"[#2618](https://github.com/adap/flower/pull/2618), " +"[#2619](https://github.com/adap/flower/pull/2619), " +"[#2567](https://github.com/adap/flower/pull/2567))" msgstr "" -"Puisque nous voulons utiliser PyTorch pour résoudre une tâche de vision " -"par ordinateur, allons-y et installons PyTorch et la bibliothèque " -"**torchvision** :" +"**([#1520](https://github.com/adap/flower/pull/1520), " +"[#1525](https://github.com/adap/flower/pull/1525), " +"[#1545](https://github.com/adap/flower/pull/1545), " +"[#1546](https://github.com/adap/flower/pull/1546), " +"[#1550](https://github.com/adap/flower/pull/1550), " +"[#1551](https://github.com/adap/flower/pull/1551), " +"[#1567](https://github.com/adap/flower/pull/1567))" -#: ../../source/tutorial-quickstart-pytorch.rst:39 +#: ../../source/ref-changelog.md:508 msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training with two clients and one server. Our training " -"procedure and network architecture are based on PyTorch's `Deep Learning " -"with PyTorch " -"`_." +"We have added a new `xgboost-quickstart` example alongside a new " +"`xgboost-comprehensive` example that goes more in-depth." msgstr "" -"Maintenant que nous avons installé toutes nos dépendances, lançons une " -"formation distribuée simple avec deux clients et un serveur. Notre " -"procédure de formation et l'architecture de notre réseau sont basées sur " -"`Deep Learning with PyTorch " -"`_ de" -" PyTorch." -#: ../../source/tutorial-quickstart-pytorch.rst:41 +#: ../../source/ref-changelog.md:510 +#, fuzzy msgid "" -"In a file called :code:`client.py`, import Flower and PyTorch related " -"packages:" +"**Add Vertical FL example** " +"([#2598](https://github.com/adap/flower/pull/2598))" msgstr "" -"Dans un fichier appelé :code:`client.py`, importe Flower et les paquets " -"liés à PyTorch :" - -#: ../../source/tutorial-quickstart-pytorch.rst:56 -msgid "In addition, we define the device allocation in PyTorch with:" -msgstr "En outre, nous définissons l'attribution des appareils dans PyTorch avec :" +"**Nouvel exemple de code CoreML pour iOS** " +"([#1289](https://github.com/adap/flower/pull/1289))" -#: ../../source/tutorial-quickstart-pytorch.rst:62 +#: ../../source/ref-changelog.md:512 msgid "" -"We use PyTorch to load CIFAR10, a popular colored image classification " -"dataset for machine learning. The PyTorch :code:`DataLoader()` downloads " -"the training and test data that are then normalized." +"We had many questions about Vertical Federated Learning using Flower, so " +"we decided to add an simple example for it on the [Titanic " +"dataset](https://www.kaggle.com/competitions/titanic/data) alongside a " +"tutorial (in the README)." msgstr "" -"Nous utilisons PyTorch pour charger CIFAR10, un ensemble de données de " -"classification d'images colorées populaire pour l'apprentissage " -"automatique. Le :code:`DataLoader()` de PyTorch télécharge les données " -"d'entraînement et de test qui sont ensuite normalisées." -#: ../../source/tutorial-quickstart-pytorch.rst:78 +#: ../../source/ref-changelog.md:514 +#, fuzzy msgid "" -"Define the loss and optimizer with PyTorch. The training of the dataset " -"is done by looping over the dataset, measure the corresponding loss and " -"optimize it." +"**Support custom** `ClientManager` **in** `start_driver()` " +"([#2292](https://github.com/adap/flower/pull/2292))" msgstr "" -"Définis la perte et l'optimiseur avec PyTorch L'entraînement de " -"l'ensemble de données se fait en bouclant sur l'ensemble de données, en " -"mesurant la perte correspondante et en l'optimisant." +"Ajout de la prise en charge d'un `ClientManager` personnalisé comme " +"paramètre de `start_simulation` " +"([#1171](https://github.com/adap/flower/pull/1171))" -#: ../../source/tutorial-quickstart-pytorch.rst:94 +#: ../../source/ref-changelog.md:516 +#, fuzzy msgid "" -"Define then the validation of the machine learning network. We loop over" -" the test set and measure the loss and accuracy of the test set." +"**Update REST API to support create and delete nodes** " +"([#2283](https://github.com/adap/flower/pull/2283))" msgstr "" -"Définis ensuite la validation du réseau d'apprentissage automatique. Nous" -" passons en boucle sur l'ensemble de test et mesurons la perte et la " -"précision de l'ensemble de test." +"**Nouvelle stratégie expérimentale TensorBoard** " +"([#789](https://github.com/adap/flower/pull/789))" -#: ../../source/tutorial-quickstart-pytorch.rst:113 +#: ../../source/ref-changelog.md:518 +#, fuzzy msgid "" -"After defining the training and testing of a PyTorch machine learning " -"model, we use the functions for the Flower clients." +"**Update the Android SDK** " +"([#2187](https://github.com/adap/flower/pull/2187))" msgstr "" -"Après avoir défini l'entraînement et le test d'un modèle d'apprentissage " -"automatique PyTorch, nous utilisons les fonctions pour les clients " -"Flower." +"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " +"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" -#: ../../source/tutorial-quickstart-pytorch.rst:115 -msgid "" -"The Flower clients will use a simple CNN adapted from 'PyTorch: A 60 " -"Minute Blitz':" +#: ../../source/ref-changelog.md:520 +msgid "Add gRPC request-response capability to the Android SDK." msgstr "" -"Les clients de Flower utiliseront un CNN simple adapté de \"PyTorch : A " -"60 Minute Blitz\" :" -#: ../../source/tutorial-quickstart-pytorch.rst:142 +#: ../../source/ref-changelog.md:522 +#, fuzzy msgid "" -"After loading the data set with :code:`load_data()` we define the Flower " -"interface." +"**Update the C++ SDK** " +"([#2537](https://github.com/adap/flower/pull/2537), " +"[#2528](https://github.com/adap/flower/pull/2528), " +"[#2523](https://github.com/adap/flower/pull/2523), " +"[#2522](https://github.com/adap/flower/pull/2522))" msgstr "" -"Après avoir chargé l'ensemble des données avec :code:`load_data()`, nous " -"définissons l'interface Flower." +"Mettre à jour les outils de développement " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/tutorial-quickstart-pytorch.rst:144 -#: ../../source/tutorial-quickstart-tensorflow.rst:54 -msgid "" -"The Flower server interacts with clients through an interface called " -":code:`Client`. When the server selects a particular client for training," -" it sends training instructions over the network. The client receives " -"those instructions and calls one of the :code:`Client` methods to run " -"your code (i.e., to train the neural network we defined earlier)." +#: ../../source/ref-changelog.md:524 +msgid "Add gRPC request-response capability to the C++ SDK." msgstr "" -"Le serveur Flower interagit avec les clients par le biais d'une interface" -" appelée :code:`Client`. Lorsque le serveur sélectionne un client " -"particulier pour la formation, il envoie des instructions de formation " -"sur le réseau. Le client reçoit ces instructions et appelle l'une des " -"méthodes :code:`Client` pour exécuter ton code (c'est-à-dire pour former " -"le réseau neuronal que nous avons défini plus tôt)." -#: ../../source/tutorial-quickstart-pytorch.rst:150 +#: ../../source/ref-changelog.md:526 +#, fuzzy msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses PyTorch. Implementing :code:`NumPyClient` usually means " -"defining the following methods (:code:`set_parameters` is optional " -"though):" +"**Make HTTPS the new default** " +"([#2591](https://github.com/adap/flower/pull/2591), " +"[#2636](https://github.com/adap/flower/pull/2636))" msgstr "" -"Flower fournit une classe de commodité appelée :code:`NumPyClient` qui " -"facilite la mise en œuvre de l'interface :code:`Client` lorsque ta charge" -" de travail utilise PyTorch. Mettre en œuvre :code:`NumPyClient` signifie" -" généralement définir les méthodes suivantes (:code:`set_parameters` est " -"cependant facultatif) :" - -#: ../../source/tutorial-quickstart-pytorch.rst:156 -#: ../../source/tutorial-quickstart-scikitlearn.rst:119 -msgid "return the model weight as a list of NumPy ndarrays" -msgstr "renvoie le poids du modèle sous la forme d'une liste de ndarrays NumPy" - -#: ../../source/tutorial-quickstart-pytorch.rst:157 -#: ../../source/tutorial-quickstart-scikitlearn.rst:121 -msgid ":code:`set_parameters` (optional)" -msgstr ":code:`set_parameters` (optionnel)" +"**Exemple de code mis à jour** " +"([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/tutorial-quickstart-pytorch.rst:158 -#: ../../source/tutorial-quickstart-scikitlearn.rst:121 +#: ../../source/ref-changelog.md:528 msgid "" -"update the local model weights with the parameters received from the " -"server" +"Flower is moving to HTTPS by default. The new `flower-server` requires " +"passing `--certificates`, but users can enable `--insecure` to use HTTP " +"for prototyping. The same applies to `flower-client`, which can either " +"use user-provided credentials or gRPC-bundled certificates to connect to " +"an HTTPS-enabled server or requires opt-out via passing `--insecure` to " +"enable insecure HTTP connections." msgstr "" -"mettre à jour les poids du modèle local avec les paramètres reçus du " -"serveur" - -#: ../../source/tutorial-quickstart-pytorch.rst:160 -#: ../../source/tutorial-quickstart-scikitlearn.rst:124 -msgid "set the local model weights" -msgstr "fixe les poids du modèle local" - -#: ../../source/tutorial-quickstart-pytorch.rst:161 -#: ../../source/tutorial-quickstart-scikitlearn.rst:125 -msgid "train the local model" -msgstr "entraîne le modèle local" - -#: ../../source/tutorial-quickstart-pytorch.rst:162 -#: ../../source/tutorial-quickstart-scikitlearn.rst:126 -msgid "receive the updated local model weights" -msgstr "recevoir les poids du modèle local mis à jour" - -#: ../../source/tutorial-quickstart-pytorch.rst:164 -#: ../../source/tutorial-quickstart-scikitlearn.rst:128 -msgid "test the local model" -msgstr "teste le modèle local" -#: ../../source/tutorial-quickstart-pytorch.rst:166 -msgid "which can be implemented in the following way:" -msgstr "qui peut être mis en œuvre de la manière suivante :" - -#: ../../source/tutorial-quickstart-pytorch.rst:189 -#: ../../source/tutorial-quickstart-tensorflow.rst:82 +#: ../../source/ref-changelog.md:530 msgid "" -"We can now create an instance of our class :code:`CifarClient` and add " -"one line to actually run this client:" +"For backward compatibility, `start_client()` and `start_numpy_client()` " +"will still start in insecure mode by default. In a future release, " +"insecure connections will require user opt-in by passing `insecure=True`." msgstr "" -"Nous pouvons maintenant créer une instance de notre classe " -":code:`CifarClient` et ajouter une ligne pour exécuter ce client :" -#: ../../source/tutorial-quickstart-pytorch.rst:196 -#: ../../source/tutorial-quickstart-tensorflow.rst:90 +#: ../../source/ref-changelog.md:532 #, fuzzy msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " -"implement a client of type :code:`NumPyClient` you'll need to first call " -"its :code:`to_client()` method. The string :code:`\"[::]:8080\"` tells " -"the client which server to connect to. In our case we can run the server " -"and the client on the same machine, therefore we use " -":code:`\"[::]:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we point the client at." +"**Unify client API** ([#2303](https://github.com/adap/flower/pull/2303), " +"[#2390](https://github.com/adap/flower/pull/2390), " +"[#2493](https://github.com/adap/flower/pull/2493))" msgstr "" -"C'est tout pour le client. Il nous suffit d'implémenter :code:`Client` ou" -" :code:`NumPyClient` et d'appeler :code:`fl.client.start_client()`. La " -"chaîne :code:`\"[: :]:8080\"` indique au client à quel serveur se " -"connecter. Dans notre cas, nous pouvons exécuter le serveur et le client " -"sur la même machine, c'est pourquoi nous utilisons :code:`\"[: " -":]:8080\"`. Si nous exécutons une charge de travail véritablement fédérée" -" avec le serveur et les clients fonctionnant sur des machines " -"différentes, tout ce qui doit changer est l'adresse " -":code:`server_address` vers laquelle nous dirigeons le client." +"**Mettre à jour les exemples de code** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/tutorial-quickstart-pytorch.rst:226 -#: ../../source/tutorial-quickstart-scikitlearn.rst:239 -#: ../../source/tutorial-quickstart-tensorflow.rst:122 -#: ../../source/tutorial-quickstart-xgboost.rst:533 +#: ../../source/ref-changelog.md:534 msgid "" -"Once the server is running we can start the clients in different " -"terminals. Open a new terminal and start the first client:" +"Using the `client_fn`, Flower clients can interchangeably run as " +"standalone processes (i.e. via `start_client`) or in simulation (i.e. via" +" `start_simulation`) without requiring changes to how the client class is" +" defined and instantiated. The `to_client()` function is introduced to " +"convert a `NumPyClient` to a `Client`." msgstr "" -"Une fois que le serveur fonctionne, nous pouvons démarrer les clients " -"dans différents terminaux. Ouvre un nouveau terminal et démarre le " -"premier client :" -#: ../../source/tutorial-quickstart-pytorch.rst:233 -#: ../../source/tutorial-quickstart-scikitlearn.rst:246 -#: ../../source/tutorial-quickstart-tensorflow.rst:129 -#: ../../source/tutorial-quickstart-xgboost.rst:540 -msgid "Open another terminal and start the second client:" -msgstr "Ouvre un autre terminal et démarre le deuxième client :" - -#: ../../source/tutorial-quickstart-pytorch.rst:239 -#: ../../source/tutorial-quickstart-scikitlearn.rst:252 -#: ../../source/tutorial-quickstart-xgboost.rst:546 +#: ../../source/ref-changelog.md:536 +#, fuzzy msgid "" -"Each client will have its own dataset. You should now see how the " -"training does in the very first terminal (the one that started the " -"server):" +"**Add new** `Bulyan` **strategy** " +"([#1817](https://github.com/adap/flower/pull/1817), " +"[#1891](https://github.com/adap/flower/pull/1891))" msgstr "" -"Chaque client aura son propre ensemble de données. Tu devrais maintenant " -"voir comment la formation se déroule dans le tout premier terminal (celui" -" qui a démarré le serveur) :" +"**Nouvelles stratégies intégrées** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/tutorial-quickstart-pytorch.rst:271 +#: ../../source/ref-changelog.md:538 #, fuzzy msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this example can be found in :code:`examples" -"/quickstart-pytorch`." +"The new `Bulyan` strategy implements Bulyan by [El Mhamdi et al., " +"2018](https://arxiv.org/abs/1802.07927)" msgstr "" -"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " -"premier système d'apprentissage fédéré. Le code source complet " -"`_ de cet exemple se trouve dans :code:`examples" -"/quickstart-pytorch`." +"La nouvelle stratégie `FedMedian` met en œuvre Federated Median " +"(FedMedian) par [Yin et al., 2018] " +"(https://arxiv.org/pdf/1803.01498v1.pdf)." -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:-1 +#: ../../source/ref-changelog.md:540 +#, fuzzy msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with PyTorch Lightning to train an Auto Encoder model on MNIST." +"**Add new** `XGB Bagging` **strategy** " +"([#2611](https://github.com/adap/flower/pull/2611))" msgstr "" +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:5 -msgid "Quickstart PyTorch Lightning" -msgstr "Démarrage rapide de PyTorch Lightning" - -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:10 +#: ../../source/ref-changelog.md:542 ../../source/ref-changelog.md:544 #, fuzzy msgid "" -"Let's build a horizontal federated learning system using PyTorch " -"Lightning and Flower!" +"**Introduce `WorkloadState`** " +"([#2564](https://github.com/adap/flower/pull/2564), " +"[#2632](https://github.com/adap/flower/pull/2632))" msgstr "" -"Construisons un système d'apprentissage fédéré en utilisant PyTorch " -"Lightning et Flower !" +"**Nouvelles stratégies intégrées** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:12 +#: ../../source/ref-changelog.md:548 #, fuzzy msgid "" -"Please refer to the `full code example " -"`_ to learn more." +"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " +"[#2286](https://github.com/adap/flower/pull/2286), " +"[#2509](https://github.com/adap/flower/pull/2509))" msgstr "" -"Réfère-toi à l'exemple de code complet " -"`_ pour en savoir plus." +"**Mettre à jour les exemples de code** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:-1 +#: ../../source/ref-changelog.md:550 +#, fuzzy msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with scikit-learn to train a linear regression model." +"Baselines Docs ([#2290](https://github.com/adap/flower/pull/2290), " +"[#2400](https://github.com/adap/flower/pull/2400))" msgstr "" +"**Nouvel exemple de code JAX** " +"([#906](https://github.com/adap/flower/pull/906), " +"[#1143](https://github.com/adap/flower/pull/1143))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:5 -msgid "Quickstart scikit-learn" -msgstr "Démarrage rapide de scikit-learn" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:10 +#: ../../source/ref-changelog.md:552 +#, fuzzy msgid "" -"In this tutorial, we will learn how to train a :code:`Logistic " -"Regression` model on MNIST using Flower and scikit-learn." +"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " +"[#2507](https://github.com/adap/flower/pull/2507))" msgstr "" -"Dans ce tutoriel, nous allons apprendre à former un :code:`modèle de " -"régression logistique` sur MNIST en utilisant Flower et scikit-learn." +"**Exemple de code mis à jour** " +"([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:12 +#: ../../source/ref-changelog.md:554 #, fuzzy msgid "" -"It is recommended to create a virtual environment and run everything " -"within this :doc:`virtualenv `." +"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " +"[#2508](https://github.com/adap/flower/pull/2508))" msgstr "" -"Il est recommandé de créer un environnement virtuel et de tout exécuter " -"dans ce `virtualenv `_." +"**Nouvelles stratégies intégrées** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:16 -msgid "" -"*Clients* are responsible for generating individual model parameter " -"updates for the model based on their local datasets. These updates are " -"then sent to the *server* which will aggregate them to produce an updated" -" global model. Finally, the *server* sends this improved version of the " -"model back to each *client*. A complete cycle of parameters updates is " -"called a *round*." -msgstr "" -"*Les clients* sont chargés de générer des mises à jour individuelles des " -"paramètres du modèle en fonction de leurs ensembles de données locales. " -"Ces mises à jour sont ensuite envoyées au *serveur* qui les agrège pour " -"produire un modèle global mis à jour. Enfin, le *serveur* renvoie cette " -"version améliorée du modèle à chaque *client*. Un cycle complet de mises " -"à jour des paramètres s'appelle un *round*." +#: ../../source/ref-changelog.md:556 +#, fuzzy +msgid "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" +msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:20 -msgid "" -"Now that we have a rough idea of what is going on, let's get started. We " -"first need to install Flower. You can do this by running:" +#: ../../source/ref-changelog.md:558 +#, fuzzy +msgid "FjORD [#2431](https://github.com/adap/flower/pull/2431)" msgstr "" -"Maintenant que nous avons une idée approximative de ce qui se passe, " -"commençons. Nous devons d'abord installer Flower. Tu peux le faire en " -"lançant :" +"Amélioration de la documentation sur le serveur gRPC " +"([#841](https://github.com/adap/flower/pull/841))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:26 +#: ../../source/ref-changelog.md:560 #, fuzzy -msgid "Since we want to use scikit-learn, let's go ahead and install it:" -msgstr "Puisque nous voulons utiliser scikt-learn, allons-y et installons-le :" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:32 -msgid "Or simply install all dependencies using Poetry:" -msgstr "Ou installe simplement toutes les dépendances à l'aide de Poetry :" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:42 -msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training with two clients and one server. However, before " -"setting up the client and server, we will define all functionalities that" -" we need for our federated learning setup within :code:`utils.py`. The " -":code:`utils.py` contains different functions defining all the machine " -"learning basics:" +msgid "MOON [#2421](https://github.com/adap/flower/pull/2421)" msgstr "" -"Maintenant que toutes nos dépendances sont installées, exécutons une " -"formation distribuée simple avec deux clients et un serveur. Cependant, " -"avant de configurer le client et le serveur, nous allons définir toutes " -"les fonctionnalités dont nous avons besoin pour notre configuration " -"d'apprentissage fédéré dans :code:`utils.py`. Le :code:`utils.py` " -"contient différentes fonctions définissant toutes les bases de " -"l'apprentissage automatique :" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:45 -msgid ":code:`get_model_parameters()`" -msgstr ":code:`get_model_parameters()`" +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:46 -msgid "Returns the parameters of a :code:`sklearn` LogisticRegression model" +#: ../../source/ref-changelog.md:562 +#, fuzzy +msgid "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" msgstr "" -"Renvoie les paramètres d'un modèle de régression logistique " -":code:`sklearn`" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:47 -msgid ":code:`set_model_params()`" -msgstr ":code:`set_model_params()`" +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:48 +#: ../../source/ref-changelog.md:564 #, fuzzy -msgid "Sets the parameters of a :code:`sklearn` LogisticRegression model" -msgstr "Définit les paramètres d'un modèle de régression logistique :code:`sklean`" +msgid "FedPer [#2266](https://github.com/adap/flower/pull/2266)" +msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:50 -msgid ":code:`set_initial_params()`" -msgstr ":code:`set_initial_params()`" +#: ../../source/ref-changelog.md:566 +#, fuzzy +msgid "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" +msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:50 -msgid "Initializes the model parameters that the Flower server will ask for" -msgstr "Initialise les paramètres du modèle que le serveur de Flower demandera" +#: ../../source/ref-changelog.md:568 +#, fuzzy +msgid "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" +msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:52 +#: ../../source/ref-changelog.md:570 +#, fuzzy msgid "" -"Please check out :code:`utils.py` `here " -"`_ for more details. The pre-defined functions are used in" -" the :code:`client.py` and imported. The :code:`client.py` also requires " -"to import several packages such as Flower and scikit-learn:" +"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " +"[#2615](https://github.com/adap/flower/pull/2615))" msgstr "" -"Tu peux consulter :code:`utils.py` `ici " -"`_ pour plus de détails. Les fonctions prédéfinies sont " -"utilisées dans :code:`client.py` et importées. :code:`client.py` " -"nécessite également d'importer plusieurs paquets tels que Flower et " -"scikit-learn :" +"**Nouvelles stratégies intégrées** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:67 +#: ../../source/ref-changelog.md:572 +#, fuzzy msgid "" -"Prior to local training, we need to load the MNIST dataset, a popular " -"image classification dataset of handwritten digits for machine learning, " -"and partition the dataset for FL. This can be conveniently achieved using" -" `Flower Datasets `_. The " -":code:`FederatedDataset.load_partition()` method loads the partitioned " -"training set for each partition ID defined in the :code:`--partition-id` " -"argument." +"**General updates to Flower Examples** " +"([#2384](https://github.com/adap/flower/pull/2384), " +"[#2425](https://github.com/adap/flower/pull/2425), " +"[#2526](https://github.com/adap/flower/pull/2526), " +"[#2302](https://github.com/adap/flower/pull/2302), " +"[#2545](https://github.com/adap/flower/pull/2545))" msgstr "" +"Mettre à jour les outils de développement " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:95 +#: ../../source/ref-changelog.md:574 +#, fuzzy msgid "" -"Next, the logistic regression model is defined and initialized with " -":code:`utils.set_initial_params()`." -msgstr "" -"Ensuite, le modèle de régression logistique est défini et initialisé avec" -" :code:`utils.set_initial_params()`." - -#: ../../source/tutorial-quickstart-scikitlearn.rst:107 -msgid "" -"The Flower server interacts with clients through an interface called " -":code:`Client`. When the server selects a particular client for training," -" it sends training instructions over the network. The client receives " -"those instructions and calls one of the :code:`Client` methods to run " -"your code (i.e., to fit the logistic regression we defined earlier)." -msgstr "" -"Le serveur Flower interagit avec les clients par le biais d'une interface" -" appelée :code:`Client`. Lorsque le serveur sélectionne un client " -"particulier pour la formation, il envoie des instructions de formation " -"sur le réseau. Le client reçoit ces instructions et appelle l'une des " -"méthodes :code:`Client` pour exécuter ton code (c'est-à-dire pour ajuster" -" la régression logistique que nous avons définie plus tôt)." - -#: ../../source/tutorial-quickstart-scikitlearn.rst:113 -msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses scikit-learn. Implementing :code:`NumPyClient` usually " -"means defining the following methods (:code:`set_parameters` is optional " -"though):" -msgstr "" -"Flower fournit une classe de commodité appelée :code:`NumPyClient` qui " -"facilite la mise en œuvre de l'interface :code:`Client` lorsque ta charge" -" de travail utilise scikit-learn. Mettre en œuvre :code:`NumPyClient` " -"signifie généralement définir les méthodes suivantes " -"(:code:`set_parameters` est cependant facultatif) :" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:122 -msgid "is directly imported with :code:`utils.set_model_params()`" -msgstr "est directement importé avec :code:`utils.set_model_params()`" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:130 -msgid "The methods can be implemented in the following way:" -msgstr "Les méthodes peuvent être mises en œuvre de la manière suivante :" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:153 -msgid "" -"We can now create an instance of our class :code:`MnistClient` and add " -"one line to actually run this client:" +"**General updates to Flower Baselines** " +"([#2301](https://github.com/adap/flower/pull/2301), " +"[#2305](https://github.com/adap/flower/pull/2305), " +"[#2307](https://github.com/adap/flower/pull/2307), " +"[#2327](https://github.com/adap/flower/pull/2327), " +"[#2435](https://github.com/adap/flower/pull/2435), " +"[#2462](https://github.com/adap/flower/pull/2462), " +"[#2463](https://github.com/adap/flower/pull/2463), " +"[#2461](https://github.com/adap/flower/pull/2461), " +"[#2469](https://github.com/adap/flower/pull/2469), " +"[#2466](https://github.com/adap/flower/pull/2466), " +"[#2471](https://github.com/adap/flower/pull/2471), " +"[#2472](https://github.com/adap/flower/pull/2472), " +"[#2470](https://github.com/adap/flower/pull/2470))" msgstr "" -"Nous pouvons maintenant créer une instance de notre classe " -":code:`MnistClient` et ajouter une ligne pour exécuter ce client :" +"**Améliorations générales** " +"([#1491](https://github.com/adap/flower/pull/1491), " +"[#1504](https://github.com/adap/flower/pull/1504), " +"[#1506](https://github.com/adap/flower/pull/1506), " +"[#1514](https://github.com/adap/flower/pull/1514), " +"[#1522](https://github.com/adap/flower/pull/1522), " +"[#1523](https://github.com/adap/flower/pull/1523), " +"[#1526](https://github.com/adap/flower/pull/1526), " +"[#1528](https://github.com/adap/flower/pull/1528), " +"[#1547](https://github.com/adap/flower/pull/1547), " +"[#1549](https://github.com/adap/flower/pull/1549), " +"[#1560](https://github.com/adap/flower/pull/1560), " +"[#1564](https://github.com/adap/flower/pull/1564), " +"[#1566](https://github.com/adap/flower/pull/1566))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:160 +#: ../../source/ref-changelog.md:576 #, fuzzy msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " -"implement a client of type :code:`NumPyClient` you'll need to first call " -"its :code:`to_client()` method. The string :code:`\"0.0.0.0:8080\"` tells" -" the client which server to connect to. In our case we can run the server" -" and the client on the same machine, therefore we use " -":code:`\"0.0.0.0:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we pass to the client." -msgstr "" -"C'est tout pour le client. Il nous suffit d'implémenter :code:`Client` ou" -" :code:`NumPyClient` et d'appeler :code:`fl.client.start_client()`. La " -"chaîne :code:`\"0.0.0:8080\"` indique au client à quel serveur se " -"connecter. Dans notre cas, nous pouvons exécuter le serveur et le client " -"sur la même machine, c'est pourquoi nous utilisons " -":code:`\"0.0.0:8080\"`. Si nous exécutons une charge de travail " -"véritablement fédérée avec le serveur et les clients s'exécutant sur des " -"machines différentes, tout ce qui doit changer est :code:`server_address`" -" que nous transmettons au client." - -#: ../../source/tutorial-quickstart-scikitlearn.rst:169 -msgid "" -"The following Flower server is a little bit more advanced and returns an " -"evaluation function for the server-side evaluation. First, we import " -"again all required libraries such as Flower and scikit-learn." +"**General updates to the simulation engine** " +"([#2331](https://github.com/adap/flower/pull/2331), " +"[#2447](https://github.com/adap/flower/pull/2447), " +"[#2448](https://github.com/adap/flower/pull/2448), " +"[#2294](https://github.com/adap/flower/pull/2294))" msgstr "" -"Le serveur Flower suivant est un peu plus avancé et renvoie une fonction " -"d'évaluation pour l'évaluation côté serveur. Tout d'abord, nous importons" -" à nouveau toutes les bibliothèques requises telles que Flower et scikit-" -"learn." - -#: ../../source/tutorial-quickstart-scikitlearn.rst:172 -msgid ":code:`server.py`, import Flower and start the server:" -msgstr ":code:`server.py`, importe Flower et démarre le serveur :" +"Mettre à jour les outils de développement " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:185 +#: ../../source/ref-changelog.md:578 #, fuzzy msgid "" -"The number of federated learning rounds is set in :code:`fit_round()` and" -" the evaluation is defined in :code:`get_evaluate_fn()`. The evaluation " -"function is called after each federated learning round and gives you " -"information about loss and accuracy. Note that we also make use of Flower" -" Datasets here to load the test split of the MNIST dataset for server-" -"side evaluation." +"**General updates to Flower SDKs** " +"([#2288](https://github.com/adap/flower/pull/2288), " +"[#2429](https://github.com/adap/flower/pull/2429), " +"[#2555](https://github.com/adap/flower/pull/2555), " +"[#2543](https://github.com/adap/flower/pull/2543), " +"[#2544](https://github.com/adap/flower/pull/2544), " +"[#2597](https://github.com/adap/flower/pull/2597), " +"[#2623](https://github.com/adap/flower/pull/2623))" msgstr "" -"Le nombre de tours d'apprentissage fédéré est défini dans " -":code:`fit_round()` et l'évaluation est définie dans " -":code:`get_evaluate_fn()`. La fonction d'évaluation est appelée après " -"chaque tour d'apprentissage fédéré et te donne des informations sur la " -"perte et la précision." +"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:213 +#: ../../source/ref-changelog.md:580 msgid "" -"The :code:`main` contains the server-side parameter initialization " -":code:`utils.set_initial_params()` as well as the aggregation strategy " -":code:`fl.server.strategy:FedAvg()`. The strategy is the default one, " -"federated averaging (or FedAvg), with two clients and evaluation after " -"each federated learning round. The server can be started with the command" -" :code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " -"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))`." +"**General improvements** " +"([#2309](https://github.com/adap/flower/pull/2309), " +"[#2310](https://github.com/adap/flower/pull/2310), " +"[#2313](https://github.com/adap/flower/pull/2313), " +"[#2316](https://github.com/adap/flower/pull/2316), " +"[#2317](https://github.com/adap/flower/pull/2317), " +"[#2349](https://github.com/adap/flower/pull/2349), " +"[#2360](https://github.com/adap/flower/pull/2360), " +"[#2402](https://github.com/adap/flower/pull/2402), " +"[#2446](https://github.com/adap/flower/pull/2446), " +"[#2561](https://github.com/adap/flower/pull/2561), " +"[#2273](https://github.com/adap/flower/pull/2273), " +"[#2267](https://github.com/adap/flower/pull/2267), " +"[#2274](https://github.com/adap/flower/pull/2274), " +"[#2275](https://github.com/adap/flower/pull/2275), " +"[#2432](https://github.com/adap/flower/pull/2432), " +"[#2251](https://github.com/adap/flower/pull/2251), " +"[#2321](https://github.com/adap/flower/pull/2321), " +"[#1936](https://github.com/adap/flower/pull/1936), " +"[#2408](https://github.com/adap/flower/pull/2408), " +"[#2413](https://github.com/adap/flower/pull/2413), " +"[#2401](https://github.com/adap/flower/pull/2401), " +"[#2531](https://github.com/adap/flower/pull/2531), " +"[#2534](https://github.com/adap/flower/pull/2534), " +"[#2535](https://github.com/adap/flower/pull/2535), " +"[#2521](https://github.com/adap/flower/pull/2521), " +"[#2553](https://github.com/adap/flower/pull/2553), " +"[#2596](https://github.com/adap/flower/pull/2596))" msgstr "" -"Le :code:`main` contient l'initialisation des paramètres côté serveur " -":code:`utils.set_initial_params()` ainsi que la stratégie d'agrégation " -":code:`fl.server.strategy:FedAvg()`. La stratégie est celle par défaut, " -"la moyenne fédérée (ou FedAvg), avec deux clients et une évaluation après" -" chaque tour d'apprentissage fédéré. Le serveur peut être démarré avec la" -" commande :code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " -"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))`." -#: ../../source/tutorial-quickstart-scikitlearn.rst:232 -msgid "" -"With both client and server ready, we can now run everything and see " -"federated learning in action. Federated learning systems usually have a " -"server and multiple clients. We, therefore, have to start the server " -"first:" +#: ../../source/ref-changelog.md:582 ../../source/ref-changelog.md:672 +#: ../../source/ref-changelog.md:736 ../../source/ref-changelog.md:790 +#: ../../source/ref-changelog.md:857 +msgid "Flower received many improvements under the hood, too many to list here." msgstr "" -"Le client et le serveur étant prêts, nous pouvons maintenant tout lancer " -"et voir l'apprentissage fédéré en action. Les systèmes d'apprentissage " -"fédéré ont généralement un serveur et plusieurs clients. Nous devons donc" -" commencer par lancer le serveur :" +"Flower a reçu de nombreuses améliorations sous le capot, trop nombreuses " +"pour être énumérées ici." -#: ../../source/tutorial-quickstart-scikitlearn.rst:286 +#: ../../source/ref-changelog.md:586 +#, fuzzy msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this example can be found in :code:`examples/sklearn-logreg-" -"mnist`." +"**Remove support for Python 3.7** " +"([#2280](https://github.com/adap/flower/pull/2280), " +"[#2299](https://github.com/adap/flower/pull/2299), " +"[#2304](https://github.com/adap/flower/pull/2304), " +"[#2306](https://github.com/adap/flower/pull/2306), " +"[#2355](https://github.com/adap/flower/pull/2355), " +"[#2356](https://github.com/adap/flower/pull/2356))" msgstr "" -"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " -"premier système d'apprentissage fédéré. Le code source complet " -"`_ de cet exemple se trouve dans :code:`examples/sklearn-logreg-" -"mnist`." +"**Nouvel exemple de code MLCube** " +"([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" -#: ../../source/tutorial-quickstart-tensorflow.rst:-1 +#: ../../source/ref-changelog.md:588 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with TensorFlow to train a MobilNetV2 model on CIFAR-10." -msgstr "" - -#: ../../source/tutorial-quickstart-tensorflow.rst:5 -msgid "Quickstart TensorFlow" -msgstr "Démarrage rapide de TensorFlow" - -#: ../../source/tutorial-quickstart-tensorflow.rst:13 -msgid "Let's build a federated learning system in less than 20 lines of code!" +"Python 3.7 support was deprecated in Flower 1.5, and this release removes" +" support. Flower now requires Python 3.8." msgstr "" -"Construisons un système d'apprentissage fédéré en moins de 20 lignes de " -"code !" - -#: ../../source/tutorial-quickstart-tensorflow.rst:15 -msgid "Before Flower can be imported we have to install it:" -msgstr "Avant de pouvoir importer une fleur, nous devons l'installer :" -#: ../../source/tutorial-quickstart-tensorflow.rst:21 +#: ../../source/ref-changelog.md:590 +#, fuzzy msgid "" -"Since we want to use the Keras API of TensorFlow (TF), we have to install" -" TF as well:" -msgstr "" -"Comme nous voulons utiliser l'API Keras de TensorFlow (TF), nous devons " -"également installer TF :" - -#: ../../source/tutorial-quickstart-tensorflow.rst:31 -msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" +"**Remove experimental argument** `rest` **from** `start_client` " +"([#2324](https://github.com/adap/flower/pull/2324))" msgstr "" -"Ensuite, dans un fichier appelé :code:`client.py`, importe Flower et " -"TensorFlow :" +"**Supprimer les stratégies expérimentales** " +"([#1280](https://github.com/adap/flower/pull/1280))" -#: ../../source/tutorial-quickstart-tensorflow.rst:38 +#: ../../source/ref-changelog.md:592 msgid "" -"We use the Keras utilities of TF to load CIFAR10, a popular colored image" -" classification dataset for machine learning. The call to " -":code:`tf.keras.datasets.cifar10.load_data()` downloads CIFAR10, caches " -"it locally, and then returns the entire training and test set as NumPy " -"ndarrays." +"The (still experimental) argument `rest` was removed from `start_client` " +"and `start_numpy_client`. Use `transport=\"rest\"` to opt into the " +"experimental REST API instead." msgstr "" -"Nous utilisons les utilitaires Keras de TF pour charger CIFAR10, un " -"ensemble de données de classification d'images colorées populaire pour " -"l'apprentissage automatique. L'appel à " -":code:`tf.keras.datasets.cifar10.load_data()` télécharge CIFAR10, le met " -"en cache localement, puis renvoie l'ensemble d'entraînement et de test " -"sous forme de NumPy ndarrays." -#: ../../source/tutorial-quickstart-tensorflow.rst:47 -msgid "" -"Next, we need a model. For the purpose of this tutorial, we use " -"MobilNetV2 with 10 output classes:" -msgstr "" -"Ensuite, nous avons besoin d'un modèle. Pour les besoins de ce tutoriel, " -"nous utilisons MobilNetV2 avec 10 classes de sortie :" +#: ../../source/ref-changelog.md:594 +#, fuzzy +msgid "v1.5.0 (2023-08-31)" +msgstr "v1.4.0 (2023-04-21)" -#: ../../source/tutorial-quickstart-tensorflow.rst:60 +#: ../../source/ref-changelog.md:600 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses Keras. The :code:`NumPyClient` interface defines three " -"methods which can be implemented in the following way:" +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " msgstr "" -"Flower fournit une classe de commodité appelée :code:`NumPyClient` qui " -"facilite la mise en œuvre de l'interface :code:`Client` lorsque ta charge" -" de travail utilise Keras. L'interface :code:`NumPyClient` définit trois " -"méthodes qui peuvent être mises en œuvre de la manière suivante :" -#: ../../source/tutorial-quickstart-tensorflow.rst:135 -msgid "Each client will have its own dataset." -msgstr "Chaque client aura son propre ensemble de données." - -#: ../../source/tutorial-quickstart-tensorflow.rst:137 +#: ../../source/ref-changelog.md:604 +#, fuzzy msgid "" -"You should now see how the training does in the very first terminal (the " -"one that started the server):" +"**Introduce new simulation engine** " +"([#1969](https://github.com/adap/flower/pull/1969), " +"[#2221](https://github.com/adap/flower/pull/2221), " +"[#2248](https://github.com/adap/flower/pull/2248))" msgstr "" -"Tu devrais maintenant voir comment la formation se déroule dans le tout " -"premier terminal (celui qui a démarré le serveur) :" +"**Introduire la télémétrie optionnelle** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" -#: ../../source/tutorial-quickstart-tensorflow.rst:169 -#, fuzzy +#: ../../source/ref-changelog.md:606 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this can be found in :code:`examples" -"/quickstart-tensorflow/client.py`." +"The new simulation engine has been rewritten from the ground up, yet it " +"remains fully backwards compatible. It offers much improved stability and" +" memory handling, especially when working with GPUs. Simulations " +"transparently adapt to different settings to scale simulation in CPU-" +"only, CPU+GPU, multi-GPU, or multi-node multi-GPU environments." msgstr "" -"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " -"premier système d'apprentissage fédéré. Le `code source complet " -"`_ pour cela se trouve dans :code:`examples" -"/quickstart-tensorflow/client.py`." -#: ../../source/tutorial-quickstart-xgboost.rst:-1 +#: ../../source/ref-changelog.md:608 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with XGBoost to train classification models on trees." +"Comprehensive documentation includes a new [how-to run " +"simulations](https://flower.ai/docs/framework/how-to-run-" +"simulations.html) guide, new [simulation-" +"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " +"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" +"tensorflow.html) notebooks, and a new [YouTube tutorial " +"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:5 -msgid "Quickstart XGBoost" -msgstr "Démarrage rapide XGBoost" - -#: ../../source/tutorial-quickstart-xgboost.rst:14 -#, fuzzy -msgid "Federated XGBoost" -msgstr "Formation fédérée" - -#: ../../source/tutorial-quickstart-xgboost.rst:16 +#: ../../source/ref-changelog.md:610 msgid "" -"EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient " -"implementation of gradient-boosted decision tree (**GBDT**), that " -"maximises the computational boundaries for boosted tree methods. It's " -"primarily designed to enhance both the performance and computational " -"speed of machine learning models. In XGBoost, trees are constructed " -"concurrently, unlike the sequential approach taken by GBDT." +"**Restructure Flower Docs** " +"([#1824](https://github.com/adap/flower/pull/1824), " +"[#1865](https://github.com/adap/flower/pull/1865), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1887](https://github.com/adap/flower/pull/1887), " +"[#1919](https://github.com/adap/flower/pull/1919), " +"[#1922](https://github.com/adap/flower/pull/1922), " +"[#1920](https://github.com/adap/flower/pull/1920), " +"[#1923](https://github.com/adap/flower/pull/1923), " +"[#1924](https://github.com/adap/flower/pull/1924), " +"[#1962](https://github.com/adap/flower/pull/1962), " +"[#2006](https://github.com/adap/flower/pull/2006), " +"[#2133](https://github.com/adap/flower/pull/2133), " +"[#2203](https://github.com/adap/flower/pull/2203), " +"[#2215](https://github.com/adap/flower/pull/2215), " +"[#2122](https://github.com/adap/flower/pull/2122), " +"[#2223](https://github.com/adap/flower/pull/2223), " +"[#2219](https://github.com/adap/flower/pull/2219), " +"[#2232](https://github.com/adap/flower/pull/2232), " +"[#2233](https://github.com/adap/flower/pull/2233), " +"[#2234](https://github.com/adap/flower/pull/2234), " +"[#2235](https://github.com/adap/flower/pull/2235), " +"[#2237](https://github.com/adap/flower/pull/2237), " +"[#2238](https://github.com/adap/flower/pull/2238), " +"[#2242](https://github.com/adap/flower/pull/2242), " +"[#2231](https://github.com/adap/flower/pull/2231), " +"[#2243](https://github.com/adap/flower/pull/2243), " +"[#2227](https://github.com/adap/flower/pull/2227))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:20 +#: ../../source/ref-changelog.md:612 msgid "" -"Often, for tabular data on medium-sized datasets with fewer than 10k " -"training examples, XGBoost surpasses the results of deep learning " -"techniques." +"Much effort went into a completely restructured Flower docs experience. " +"The documentation on [flower.ai/docs](https://flower.ai/docs) is now " +"divided into Flower Framework, Flower Baselines, Flower Android SDK, " +"Flower iOS SDK, and code example projects." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:23 +#: ../../source/ref-changelog.md:614 #, fuzzy -msgid "Why federated XGBoost?" -msgstr "Qu'est-ce que l'apprentissage fédéré ?" +msgid "" +"**Introduce Flower Swift SDK** " +"([#1858](https://github.com/adap/flower/pull/1858), " +"[#1897](https://github.com/adap/flower/pull/1897))" +msgstr "" +"**Introduction du SDK iOS (aperçu)** " +"([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" -#: ../../source/tutorial-quickstart-xgboost.rst:25 +#: ../../source/ref-changelog.md:616 msgid "" -"Indeed, as the demand for data privacy and decentralized learning grows, " -"there's an increasing requirement to implement federated XGBoost systems " -"for specialised applications, like survival analysis and financial fraud " -"detection." +"This is the first preview release of the Flower Swift SDK. Flower support" +" on iOS is improving, and alongside the Swift SDK and code example, there" +" is now also an iOS quickstart tutorial." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:27 +#: ../../source/ref-changelog.md:618 +#, fuzzy msgid "" -"Federated learning ensures that raw data remains on the local device, " -"making it an attractive approach for sensitive domains where data " -"security and privacy are paramount. Given the robustness and efficiency " -"of XGBoost, combining it with federated learning offers a promising " -"solution for these specific challenges." +"**Introduce Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" msgstr "" +"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " +"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" -#: ../../source/tutorial-quickstart-xgboost.rst:30 +#: ../../source/ref-changelog.md:620 msgid "" -"In this tutorial we will learn how to train a federated XGBoost model on " -"HIGGS dataset using Flower and :code:`xgboost` package. We use a simple " -"example (`full code xgboost-quickstart " -"`_)" -" with two *clients* and one *server* to demonstrate how federated XGBoost" -" works, and then we dive into a more complex example (`full code xgboost-" -"comprehensive `_) to run various experiments." +"This is the first preview release of the Flower Kotlin SDK. Flower " +"support on Android is improving, and alongside the Kotlin SDK and code " +"example, there is now also an Android quickstart tutorial." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:37 -msgid "Environment Setup" +#: ../../source/ref-changelog.md:622 +#, fuzzy +msgid "" +"**Introduce new end-to-end testing infrastructure** " +"([#1842](https://github.com/adap/flower/pull/1842), " +"[#2071](https://github.com/adap/flower/pull/2071), " +"[#2072](https://github.com/adap/flower/pull/2072), " +"[#2068](https://github.com/adap/flower/pull/2068), " +"[#2067](https://github.com/adap/flower/pull/2067), " +"[#2069](https://github.com/adap/flower/pull/2069), " +"[#2073](https://github.com/adap/flower/pull/2073), " +"[#2070](https://github.com/adap/flower/pull/2070), " +"[#2074](https://github.com/adap/flower/pull/2074), " +"[#2082](https://github.com/adap/flower/pull/2082), " +"[#2084](https://github.com/adap/flower/pull/2084), " +"[#2093](https://github.com/adap/flower/pull/2093), " +"[#2109](https://github.com/adap/flower/pull/2109), " +"[#2095](https://github.com/adap/flower/pull/2095), " +"[#2140](https://github.com/adap/flower/pull/2140), " +"[#2137](https://github.com/adap/flower/pull/2137), " +"[#2165](https://github.com/adap/flower/pull/2165))" msgstr "" +"**Améliorer l'API (expérimentale) du pilote** " +"([#1663](https://github.com/adap/flower/pull/1663), " +"[#1666](https://github.com/adap/flower/pull/1666), " +"[#1667](https://github.com/adap/flower/pull/1667), " +"[#1664](https://github.com/adap/flower/pull/1664), " +"[#1675](https://github.com/adap/flower/pull/1675), " +"[#1676](https://github.com/adap/flower/pull/1676), " +"[#1693](https://github.com/adap/flower/pull/1693), " +"[#1662](https://github.com/adap/flower/pull/1662), " +"[#1794](https://github.com/adap/flower/pull/1794))" -#: ../../source/tutorial-quickstart-xgboost.rst:41 +#: ../../source/ref-changelog.md:624 msgid "" -"We first need to install Flower and Flower Datasets. You can do this by " -"running :" +"A new testing infrastructure ensures that new changes stay compatible " +"with existing framework integrations or strategies." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:47 +#: ../../source/ref-changelog.md:626 #, fuzzy -msgid "" -"Since we want to use :code:`xgboost` package to build up XGBoost trees, " -"let's go ahead and install :code:`xgboost`:" -msgstr "Puisque nous voulons utiliser scikt-learn, allons-y et installons-le :" +msgid "**Deprecate Python 3.7**" +msgstr "**Créer le PR**" -#: ../../source/tutorial-quickstart-xgboost.rst:57 +#: ../../source/ref-changelog.md:628 msgid "" -"*Clients* are responsible for generating individual weight-updates for " -"the model based on their local datasets. Now that we have all our " -"dependencies installed, let's run a simple distributed training with two " -"clients and one server." +"Since Python 3.7 reached its end of life (EOL) on 2023-06-27, support for" +" Python 3.7 is now deprecated and will be removed in an upcoming release." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:60 +#: ../../source/ref-changelog.md:630 #, fuzzy msgid "" -"In a file called :code:`client.py`, import xgboost, Flower, Flower " -"Datasets and other related functions:" +"**Add new** `FedTrimmedAvg` **strategy** " +"([#1769](https://github.com/adap/flower/pull/1769), " +"[#1853](https://github.com/adap/flower/pull/1853))" msgstr "" -"Dans un fichier appelé :code:`client.py`, importe Flower et les paquets " -"liés à PyTorch :" +"**Ajouter un nouvel exemple de Federated Analytics avec Pandas** " +"([#1469](https://github.com/adap/flower/pull/1469), " +"[#1535](https://github.com/adap/flower/pull/1535))" -#: ../../source/tutorial-quickstart-xgboost.rst:87 -msgid "Dataset partition and hyper-parameter selection" +#: ../../source/ref-changelog.md:632 +#, fuzzy +msgid "" +"The new `FedTrimmedAvg` strategy implements Trimmed Mean by [Dong Yin, " +"2018](https://arxiv.org/abs/1803.01498)." msgstr "" +"La nouvelle stratégie `FedMedian` met en œuvre Federated Median " +"(FedMedian) par [Yin et al., 2018] " +"(https://arxiv.org/pdf/1803.01498v1.pdf)." -#: ../../source/tutorial-quickstart-xgboost.rst:89 +#: ../../source/ref-changelog.md:634 +#, fuzzy msgid "" -"Prior to local training, we require loading the HIGGS dataset from Flower" -" Datasets and conduct data partitioning for FL:" +"**Introduce start_driver** " +"([#1697](https://github.com/adap/flower/pull/1697))" msgstr "" +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/tutorial-quickstart-xgboost.rst:102 +#: ../../source/ref-changelog.md:636 msgid "" -"In this example, we split the dataset into two partitions with uniform " -"distribution (:code:`IidPartitioner(num_partitions=2)`). Then, we load " -"the partition for the given client based on :code:`node_id`:" +"In addition to `start_server` and using the raw Driver API, there is a " +"new `start_driver` function that allows for running `start_server` " +"scripts as a Flower driver with only a single-line code change. Check out" +" the `mt-pytorch` code example to see a working example using " +"`start_driver`." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:121 +#: ../../source/ref-changelog.md:638 +#, fuzzy msgid "" -"After that, we do train/test splitting on the given partition (client's " -"local data), and transform data format for :code:`xgboost` package." +"**Add parameter aggregation to** `mt-pytorch` **code example** " +"([#1785](https://github.com/adap/flower/pull/1785))" msgstr "" +"**Nouvel exemple de code PyTorch avancé** " +"([#1007](https://github.com/adap/flower/pull/1007))" -#: ../../source/tutorial-quickstart-xgboost.rst:134 +#: ../../source/ref-changelog.md:640 msgid "" -"The functions of :code:`train_test_split` and " -":code:`transform_dataset_to_dmatrix` are defined as below:" +"The `mt-pytorch` example shows how to aggregate parameters when writing a" +" driver script. The included `driver.py` and `server.py` have been " +"aligned to demonstrate both the low-level way and the high-level way of " +"building server-side logic." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:158 -msgid "Finally, we define the hyper-parameters used for XGBoost training." +#: ../../source/ref-changelog.md:642 +#, fuzzy +msgid "" +"**Migrate experimental REST API to Starlette** " +"([2171](https://github.com/adap/flower/pull/2171))" msgstr "" +"**Nouvelle stratégie expérimentale TensorBoard** " +"([#789](https://github.com/adap/flower/pull/789))" -#: ../../source/tutorial-quickstart-xgboost.rst:174 +#: ../../source/ref-changelog.md:644 msgid "" -"The :code:`num_local_round` represents the number of iterations for local" -" tree boost. We use CPU for the training in default. One can shift it to " -"GPU by setting :code:`tree_method` to :code:`gpu_hist`. We use AUC as " -"evaluation metric." +"The (experimental) REST API used to be implemented in " +"[FastAPI](https://fastapi.tiangolo.com/), but it has now been migrated to" +" use [Starlette](https://www.starlette.io/) directly." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:181 -msgid "Flower client definition for XGBoost" +#: ../../source/ref-changelog.md:646 +#, fuzzy +msgid "" +"Please note: The REST request-response API is still experimental and will" +" likely change significantly over time." msgstr "" +"Remarque : l'API REST est encore expérimentale et est susceptible de " +"changer de manière significative au fil du temps." -#: ../../source/tutorial-quickstart-xgboost.rst:183 +#: ../../source/ref-changelog.md:648 +#, fuzzy msgid "" -"After loading the dataset we define the Flower client. We follow the " -"general rule to define :code:`XgbClient` class inherited from " -":code:`fl.client.Client`." +"**Introduce experimental gRPC request-response API** " +"([#1867](https://github.com/adap/flower/pull/1867), " +"[#1901](https://github.com/adap/flower/pull/1901))" msgstr "" +"**Introduire les enveloppes de confidentialité différentielle (aperçu)** " +"([#1357](https://github.com/adap/flower/pull/1357), " +"[#1460](https://github.com/adap/flower/pull/1460))" -#: ../../source/tutorial-quickstart-xgboost.rst:193 +#: ../../source/ref-changelog.md:650 msgid "" -"The :code:`self.bst` is used to keep the Booster objects that remain " -"consistent across rounds, allowing them to store predictions from trees " -"integrated in earlier rounds and maintain other essential data structures" -" for training." +"In addition to the existing gRPC API (based on bidirectional streaming) " +"and the experimental REST API, there is now a new gRPC API that uses a " +"request-response model to communicate with client nodes." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:196 +#: ../../source/ref-changelog.md:652 +#, fuzzy msgid "" -"Then, we override :code:`get_parameters`, :code:`fit` and " -":code:`evaluate` methods insides :code:`XgbClient` class as follows." +"Please note: The gRPC request-response API is still experimental and will" +" likely change significantly over time." msgstr "" +"Remarque : l'API REST est encore expérimentale et est susceptible de " +"changer de manière significative au fil du temps." -#: ../../source/tutorial-quickstart-xgboost.rst:210 +#: ../../source/ref-changelog.md:654 +#, fuzzy msgid "" -"Unlike neural network training, XGBoost trees are not started from a " -"specified random weights. In this case, we do not use " -":code:`get_parameters` and :code:`set_parameters` to initialise model " -"parameters for XGBoost. As a result, let's return an empty tensor in " -":code:`get_parameters` when it is called by the server at the first " -"round." +"**Replace the experimental** `start_client(rest=True)` **with the new** " +"`start_client(transport=\"rest\")` " +"([#1880](https://github.com/adap/flower/pull/1880))" msgstr "" +"**Initialise** `start_simulation` **avec une liste d'ID de clients** " +"([#860](https://github.com/adap/flower/pull/860))" -#: ../../source/tutorial-quickstart-xgboost.rst:251 +#: ../../source/ref-changelog.md:656 msgid "" -"In :code:`fit`, at the first round, we call :code:`xgb.train()` to build " -"up the first set of trees. the returned Booster object and config are " -"stored in :code:`self.bst` and :code:`self.config`, respectively. From " -"the second round, we load the global model sent from server to " -":code:`self.bst`, and then update model weights on local training data " -"with function :code:`local_boost` as follows:" +"The (experimental) `start_client` argument `rest` was deprecated in " +"favour of a new argument `transport`. `start_client(transport=\"rest\")` " +"will yield the same behaviour as `start_client(rest=True)` did before. " +"All code should migrate to the new argument `transport`. The deprecated " +"argument `rest` will be removed in a future release." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:269 +#: ../../source/ref-changelog.md:658 +#, fuzzy msgid "" -"Given :code:`num_local_round`, we update trees by calling " -":code:`self.bst.update` method. After training, the last " -":code:`N=num_local_round` trees will be extracted to send to the server." +"**Add a new gRPC option** " +"([#2197](https://github.com/adap/flower/pull/2197))" msgstr "" +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/tutorial-quickstart-xgboost.rst:291 +#: ../../source/ref-changelog.md:660 msgid "" -"In :code:`evaluate`, we call :code:`self.bst.eval_set` function to " -"conduct evaluation on valid set. The AUC value will be returned." +"We now start a gRPC server with the `grpc.keepalive_permit_without_calls`" +" option set to 0 by default. This prevents the clients from sending " +"keepalive pings when there is no outstanding stream." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:294 +#: ../../source/ref-changelog.md:662 #, fuzzy msgid "" -"Now, we can create an instance of our class :code:`XgbClient` and add one" -" line to actually run this client:" +"**Improve example notebooks** " +"([#2005](https://github.com/adap/flower/pull/2005))" msgstr "" -"Nous pouvons maintenant créer une instance de notre classe " -":code:`MnistClient` et ajouter une ligne pour exécuter ce client :" +"**Supprimer les stratégies expérimentales** " +"([#1280](https://github.com/adap/flower/pull/1280))" -#: ../../source/tutorial-quickstart-xgboost.rst:300 +#: ../../source/ref-changelog.md:664 #, fuzzy +msgid "There's a new 30min Federated Learning PyTorch tutorial!" +msgstr "Bienvenue au tutoriel sur l'apprentissage fédéré de la fleur !" + +#: ../../source/ref-changelog.md:666 msgid "" -"That's it for the client. We only have to implement :code:`Client`and " -"call :code:`fl.client.start_client()`. The string :code:`\"[::]:8080\"` " -"tells the client which server to connect to. In our case we can run the " -"server and the client on the same machine, therefore we use " -":code:`\"[::]:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we point the client at." -msgstr "" -"C'est tout pour le client. Il nous suffit d'implémenter :code:`Client` ou" -" :code:`NumPyClient` et d'appeler :code:`fl.client.start_client()`. La " -"chaîne :code:`\"[: :]:8080\"` indique au client à quel serveur se " -"connecter. Dans notre cas, nous pouvons exécuter le serveur et le client " -"sur la même machine, c'est pourquoi nous utilisons :code:`\"[: " -":]:8080\"`. Si nous exécutons une charge de travail véritablement fédérée" -" avec le serveur et les clients fonctionnant sur des machines " -"différentes, tout ce qui doit changer est l'adresse " -":code:`server_address` vers laquelle nous dirigeons le client." - -#: ../../source/tutorial-quickstart-xgboost.rst:311 -#, fuzzy -msgid "" -"These updates are then sent to the *server* which will aggregate them to " -"produce a better model. Finally, the *server* sends this improved version" -" of the model back to each *client* to finish a complete FL round." +"**Example updates** ([#1772](https://github.com/adap/flower/pull/1772), " +"[#1873](https://github.com/adap/flower/pull/1873), " +"[#1981](https://github.com/adap/flower/pull/1981), " +"[#1988](https://github.com/adap/flower/pull/1988), " +"[#1984](https://github.com/adap/flower/pull/1984), " +"[#1982](https://github.com/adap/flower/pull/1982), " +"[#2112](https://github.com/adap/flower/pull/2112), " +"[#2144](https://github.com/adap/flower/pull/2144), " +"[#2174](https://github.com/adap/flower/pull/2174), " +"[#2225](https://github.com/adap/flower/pull/2225), " +"[#2183](https://github.com/adap/flower/pull/2183))" msgstr "" -"*Les clients* sont chargés de générer des mises à jour de poids " -"individuelles pour le modèle en fonction de leurs ensembles de données " -"locales. Ces mises à jour sont ensuite envoyées au *serveur* qui les " -"agrège pour produire un meilleur modèle. Enfin, le *serveur* renvoie " -"cette version améliorée du modèle à chaque *client*. Un cycle complet de " -"mises à jour de poids s'appelle un *round*." -#: ../../source/tutorial-quickstart-xgboost.rst:314 -#, fuzzy +#: ../../source/ref-changelog.md:668 msgid "" -"In a file named :code:`server.py`, import Flower and FedXgbBagging from " -":code:`flwr.server.strategy`." -msgstr "" -"Dans un fichier appelé :code:`client.py`, importe Flower et les paquets " -"liés au MXNet :" - -#: ../../source/tutorial-quickstart-xgboost.rst:316 -msgid "We first define a strategy for XGBoost bagging aggregation." +"Many examples have received significant updates, including simplified " +"advanced-tensorflow and advanced-pytorch examples, improved macOS " +"compatibility of TensorFlow examples, and code examples for simulation. A" +" major upgrade is that all code examples now have a `requirements.txt` " +"(in addition to `pyproject.toml`)." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:339 +#: ../../source/ref-changelog.md:670 +#, fuzzy msgid "" -"We use two clients for this example. An " -":code:`evaluate_metrics_aggregation` function is defined to collect and " -"wighted average the AUC values from clients." +"**General improvements** " +"([#1872](https://github.com/adap/flower/pull/1872), " +"[#1866](https://github.com/adap/flower/pull/1866), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1837](https://github.com/adap/flower/pull/1837), " +"[#1477](https://github.com/adap/flower/pull/1477), " +"[#2171](https://github.com/adap/flower/pull/2171))" msgstr "" +"**Mise à jour de la documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/tutorial-quickstart-xgboost.rst:342 -#, fuzzy -msgid "Then, we start the server:" -msgstr "Démarrer le serveur" - -#: ../../source/tutorial-quickstart-xgboost.rst:354 -msgid "Tree-based bagging aggregation" -msgstr "" +#: ../../source/ref-changelog.md:678 +msgid "v1.4.0 (2023-04-21)" +msgstr "v1.4.0 (2023-04-21)" -#: ../../source/tutorial-quickstart-xgboost.rst:356 +#: ../../source/ref-changelog.md:684 msgid "" -"You must be curious about how bagging aggregation works. Let's look into " -"the details." +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " +"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " +"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " +"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " +"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" msgstr "" +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " +"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " +"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " +"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " +"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" -#: ../../source/tutorial-quickstart-xgboost.rst:358 +#: ../../source/ref-changelog.md:688 msgid "" -"In file :code:`flwr.server.strategy.fedxgb_bagging.py`, we define " -":code:`FedXgbBagging` inherited from :code:`flwr.server.strategy.FedAvg`." -" Then, we override the :code:`aggregate_fit`, :code:`aggregate_evaluate` " -"and :code:`evaluate` methods as follows:" +"**Introduce support for XGBoost (**`FedXgbNnAvg` **strategy and " +"example)** ([#1694](https://github.com/adap/flower/pull/1694), " +"[#1709](https://github.com/adap/flower/pull/1709), " +"[#1715](https://github.com/adap/flower/pull/1715), " +"[#1717](https://github.com/adap/flower/pull/1717), " +"[#1763](https://github.com/adap/flower/pull/1763), " +"[#1795](https://github.com/adap/flower/pull/1795))" msgstr "" +"**Introduire la prise en charge de XGBoost (**`FedXgbNnAvg` **stratégie " +"et exemple)** ([#1694](https://github.com/adap/flower/pull/1694), " +"[#1709](https://github.com/adap/flower/pull/1709), " +"[#1715](https://github.com/adap/flower/pull/1715), " +"[#1717](https://github.com/adap/flower/pull/1717), " +"[#1763](https://github.com/adap/flower/pull/1763), " +"[#1795](https://github.com/adap/flower/pull/1795))" -#: ../../source/tutorial-quickstart-xgboost.rst:454 +#: ../../source/ref-changelog.md:690 msgid "" -"In :code:`aggregate_fit`, we sequentially aggregate the clients' XGBoost " -"trees by calling :code:`aggregate()` function:" +"XGBoost is a tree-based ensemble machine learning algorithm that uses " +"gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg`" +" " +"[strategy](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," +" and a [code example](https://github.com/adap/flower/tree/main/examples" +"/xgboost-quickstart) that demonstrates the usage of this new strategy in " +"an XGBoost project." msgstr "" +"Nous avons ajouté une nouvelle [stratégie] `FedXgbNnAvg` " +"(https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," +" et un [exemple de code] " +"(https://github.com/adap/flower/tree/main/examples/xgboost-quickstart) " +"qui démontre l'utilisation de cette nouvelle stratégie dans un projet " +"XGBoost." -#: ../../source/tutorial-quickstart-xgboost.rst:513 +#: ../../source/ref-changelog.md:692 msgid "" -"In this function, we first fetch the number of trees and the number of " -"parallel trees for the current and previous model by calling " -":code:`_get_tree_nums`. Then, the fetched information will be aggregated." -" After that, the trees (containing model weights) are aggregated to " -"generate a new tree model." +"**Introduce iOS SDK (preview)** " +"([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" msgstr "" +"**Introduction du SDK iOS (aperçu)** " +"([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" -#: ../../source/tutorial-quickstart-xgboost.rst:518 +#: ../../source/ref-changelog.md:694 msgid "" -"After traversal of all clients' models, a new global model is generated, " -"followed by the serialisation, and sending back to each client." -msgstr "" - -#: ../../source/tutorial-quickstart-xgboost.rst:523 -msgid "Launch Federated XGBoost!" +"This is a major update for anyone wanting to implement Federated Learning" +" on iOS mobile devices. We now have a swift iOS SDK present under " +"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" +" that will facilitate greatly the app creating process. To showcase its " +"use, the [iOS " +"example](https://github.com/adap/flower/tree/main/examples/ios) has also " +"been updated!" msgstr "" +"Il s'agit d'une mise à jour majeure pour tous ceux qui souhaitent mettre " +"en œuvre l'apprentissage fédéré sur les appareils mobiles iOS. Nous " +"disposons désormais d'un SDK swift iOS présent sous " +"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" +" qui facilitera grandement le processus de création d'applications. Pour " +"présenter son utilisation, l'[exemple " +"iOS](https://github.com/adap/flower/tree/main/examples/ios) a également " +"été mis à jour !" -#: ../../source/tutorial-quickstart-xgboost.rst:585 +#: ../../source/ref-changelog.md:696 msgid "" -"Congratulations! You've successfully built and run your first federated " -"XGBoost system. The AUC values can be checked in " -":code:`metrics_distributed`. One can see that the average AUC increases " -"over FL rounds." +"**Introduce new \"What is Federated Learning?\" tutorial** " +"([#1657](https://github.com/adap/flower/pull/1657), " +"[#1721](https://github.com/adap/flower/pull/1721))" msgstr "" +"**Introduire un nouveau tutoriel \"Qu'est-ce que l'apprentissage fédéré ?" +" \"** ([#1657](https://github.com/adap/flower/pull/1657), " +"[#1721](https://github.com/adap/flower/pull/1721))" -#: ../../source/tutorial-quickstart-xgboost.rst:590 +#: ../../source/ref-changelog.md:698 #, fuzzy msgid "" -"The full `source code `_ for this example can be found in :code:`examples" -"/xgboost-quickstart`." +"A new [entry-level tutorial](https://flower.ai/docs/framework/tutorial-" +"what-is-federated-learning.html) in our documentation explains the basics" +" of Fedetated Learning. It enables anyone who's unfamiliar with Federated" +" Learning to start their journey with Flower. Forward it to anyone who's " +"interested in Federated Learning!" msgstr "" -"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " -"premier système d'apprentissage fédéré. Le code source complet " -"`_ de cet exemple se trouve dans :code:`examples" -"/quickstart-mxnet`." +"Un nouveau [tutoriel d'entrée de gamme] " +"(https://flower.ai/docs/tutorial/Flower-0-What-is-FL.html) dans notre " +"documentation explique les bases de l'apprentissage fédéré. Il permet à " +"tous ceux qui ne connaissent pas l'apprentissage fédéré de commencer leur" +" voyage avec Flower. Fais-le suivre à tous ceux qui s'intéressent à " +"l'apprentissage fédéré !" -#: ../../source/tutorial-quickstart-xgboost.rst:594 -msgid "Comprehensive Federated XGBoost" +#: ../../source/ref-changelog.md:700 +msgid "" +"**Introduce new Flower Baseline: FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679))" msgstr "" +"**Introduire une nouvelle fleur Référence : FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679))" -#: ../../source/tutorial-quickstart-xgboost.rst:596 +#: ../../source/ref-changelog.md:702 msgid "" -"Now that you have known how federated XGBoost work with Flower, it's time" -" to run some more comprehensive experiments by customising the " -"experimental settings. In the xgboost-comprehensive example (`full code " -"`_), we provide more options to define various experimental" -" setups, including aggregation strategies, data partitioning and " -"centralised/distributed evaluation. We also support :doc:`Flower " -"simulation ` making it easy to simulate large " -"client cohorts in a resource-aware manner. Let's take a look!" +"This new baseline replicates the MNIST+CNN task from the paper [Federated" +" Optimization in Heterogeneous Networks (Li et al., " +"2018)](https://arxiv.org/abs/1812.06127). It uses the `FedProx` strategy," +" which aims at making convergence more robust in heterogeneous settings." msgstr "" +"Cette nouvelle ligne de base reproduit la tâche MNIST+CNN de l'article " +"[Federated Optimization in Heterogeneous Networks (Li et al., 2018)] " +"(https://arxiv.org/abs/1812.06127). Elle utilise la stratégie `FedProx`, " +"qui vise à rendre la convergence plus robuste dans des contextes " +"hétérogènes." -#: ../../source/tutorial-quickstart-xgboost.rst:603 -#, fuzzy -msgid "Cyclic training" -msgstr "Formation centralisée" - -#: ../../source/tutorial-quickstart-xgboost.rst:605 +#: ../../source/ref-changelog.md:704 msgid "" -"In addition to bagging aggregation, we offer a cyclic training scheme, " -"which performs FL in a client-by-client fashion. Instead of aggregating " -"multiple clients, there is only one single client participating in the " -"training per round in the cyclic training scenario. The trained local " -"XGBoost trees will be passed to the next client as an initialised model " -"for next round's boosting." +"**Introduce new Flower Baseline: FedAvg FEMNIST** " +"([#1655](https://github.com/adap/flower/pull/1655))" msgstr "" +"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " +"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" -#: ../../source/tutorial-quickstart-xgboost.rst:609 +#: ../../source/ref-changelog.md:706 msgid "" -"To do this, we first customise a :code:`ClientManager` in " -":code:`server_utils.py`:" +"This new baseline replicates an experiment evaluating the performance of " +"the FedAvg algorithm on the FEMNIST dataset from the paper [LEAF: A " +"Benchmark for Federated Settings (Caldas et al., " +"2018)](https://arxiv.org/abs/1812.01097)." msgstr "" +"Cette nouvelle ligne de base reproduit une expérience évaluant les " +"performances de l'algorithme FedAvg sur le jeu de données FEMNIST tiré de" +" l'article [LEAF : A Benchmark for Federated Settings (Caldas et al., " +"2018)] (https://arxiv.org/abs/1812.01097)." -#: ../../source/tutorial-quickstart-xgboost.rst:649 +#: ../../source/ref-changelog.md:708 msgid "" -"The customised :code:`ClientManager` samples all available clients in " -"each FL round based on the order of connection to the server. Then, we " -"define a new strategy :code:`FedXgbCyclic` in " -":code:`flwr.server.strategy.fedxgb_cyclic.py`, in order to sequentially " -"select only one client in given round and pass the received model to next" -" client." +"**Introduce (experimental) REST API** " +"([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" msgstr "" +"**Introduire l'API REST (expérimentale)** " +"([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" -#: ../../source/tutorial-quickstart-xgboost.rst:690 +#: ../../source/ref-changelog.md:710 msgid "" -"Unlike the original :code:`FedAvg`, we don't perform aggregation here. " -"Instead, we just make a copy of the received client model as global model" -" by overriding :code:`aggregate_fit`." +"A new REST API has been introduced as an alternative to the gRPC-based " +"communication stack. In this initial version, the REST API only supports " +"anonymous clients." msgstr "" +"Une nouvelle API REST a été introduite comme alternative à la pile de " +"communication basée sur gRPC. Dans cette version initiale, l'API REST ne " +"prend en charge que les clients anonymes." -#: ../../source/tutorial-quickstart-xgboost.rst:693 +#: ../../source/ref-changelog.md:712 msgid "" -"Also, the customised :code:`configure_fit` and :code:`configure_evaluate`" -" methods ensure the clients to be sequentially selected given FL round:" +"Please note: The REST API is still experimental and will likely change " +"significantly over time." msgstr "" +"Remarque : l'API REST est encore expérimentale et est susceptible de " +"changer de manière significative au fil du temps." -#: ../../source/tutorial-quickstart-xgboost.rst:757 -msgid "Customised data partitioning" +#: ../../source/ref-changelog.md:714 +msgid "" +"**Improve the (experimental) Driver API** " +"([#1663](https://github.com/adap/flower/pull/1663), " +"[#1666](https://github.com/adap/flower/pull/1666), " +"[#1667](https://github.com/adap/flower/pull/1667), " +"[#1664](https://github.com/adap/flower/pull/1664), " +"[#1675](https://github.com/adap/flower/pull/1675), " +"[#1676](https://github.com/adap/flower/pull/1676), " +"[#1693](https://github.com/adap/flower/pull/1693), " +"[#1662](https://github.com/adap/flower/pull/1662), " +"[#1794](https://github.com/adap/flower/pull/1794))" msgstr "" +"**Améliorer l'API (expérimentale) du pilote** " +"([#1663](https://github.com/adap/flower/pull/1663), " +"[#1666](https://github.com/adap/flower/pull/1666), " +"[#1667](https://github.com/adap/flower/pull/1667), " +"[#1664](https://github.com/adap/flower/pull/1664), " +"[#1675](https://github.com/adap/flower/pull/1675), " +"[#1676](https://github.com/adap/flower/pull/1676), " +"[#1693](https://github.com/adap/flower/pull/1693), " +"[#1662](https://github.com/adap/flower/pull/1662), " +"[#1794](https://github.com/adap/flower/pull/1794))" -#: ../../source/tutorial-quickstart-xgboost.rst:759 +#: ../../source/ref-changelog.md:716 msgid "" -"In :code:`dataset.py`, we have a function :code:`instantiate_partitioner`" -" to instantiate the data partitioner based on the given " -":code:`num_partitions` and :code:`partitioner_type`. Currently, we " -"provide four supported partitioner type to simulate the uniformity/non-" -"uniformity in data quantity (uniform, linear, square, exponential)." +"The Driver API is still an experimental feature, but this release " +"introduces some major upgrades. One of the main improvements is the " +"introduction of an SQLite database to store server state on disk (instead" +" of in-memory). Another improvement is that tasks (instructions or " +"results) that have been delivered will now be deleted. This greatly " +"improves the memory efficiency of a long-running Flower server." msgstr "" +"L'API du pilote est encore une fonction expérimentale, mais cette version" +" introduit quelques améliorations majeures. L'une des principales " +"améliorations est l'introduction d'une base de données SQLite pour " +"stocker l'état du serveur sur le disque (au lieu de la mémoire). Une " +"autre amélioration est que les tâches (instructions ou résultats) qui ont" +" été livrées seront désormais supprimées, ce qui améliore " +"considérablement l'efficacité de la mémoire d'un serveur Flower " +"fonctionnant depuis longtemps." -#: ../../source/tutorial-quickstart-xgboost.rst:790 -#, fuzzy -msgid "Customised centralised/distributed evaluation" -msgstr "Évaluation centralisée" - -#: ../../source/tutorial-quickstart-xgboost.rst:792 +#: ../../source/ref-changelog.md:718 msgid "" -"To facilitate centralised evaluation, we define a function in " -":code:`server_utils.py`:" +"**Fix spilling issues related to Ray during simulations** " +"([#1698](https://github.com/adap/flower/pull/1698))" msgstr "" +"**Répare les problèmes de déversement liés à Ray pendant les " +"simulations** ([#1698](https://github.com/adap/flower/pull/1698))" -#: ../../source/tutorial-quickstart-xgboost.rst:824 +#: ../../source/ref-changelog.md:720 +#, fuzzy msgid "" -"This function returns a evaluation function which instantiates a " -":code:`Booster` object and loads the global model weights to it. The " -"evaluation is conducted by calling :code:`eval_set()` method, and the " -"tested AUC value is reported." +"While running long simulations, `ray` was sometimes spilling huge amounts" +" of data that would make the training unable to continue. This is now " +"fixed! 🎉" msgstr "" +"Lors de l'exécution de longues simulations, `ray` déversait parfois " +"d'énormes quantités de données qui rendaient l'entraînement incapable de " +"continuer. ce problème est maintenant corrigé ! 🎉" -#: ../../source/tutorial-quickstart-xgboost.rst:827 +#: ../../source/ref-changelog.md:722 msgid "" -"As for distributed evaluation on the clients, it's same as the quick-" -"start example by overriding the :code:`evaluate()` method insides the " -":code:`XgbClient` class in :code:`client_utils.py`." +"**Add new example using** `TabNet` **and Flower** " +"([#1725](https://github.com/adap/flower/pull/1725))" msgstr "" +"**Ajouter un nouvel exemple utilisant** `TabNet` **et Flower** " +"([#1725](https://github.com/adap/flower/pull/1725))" -#: ../../source/tutorial-quickstart-xgboost.rst:831 -#, fuzzy -msgid "Flower simulation" -msgstr "Simulation de moniteur" - -#: ../../source/tutorial-quickstart-xgboost.rst:832 +#: ../../source/ref-changelog.md:724 msgid "" -"We also provide an example code (:code:`sim.py`) to use the simulation " -"capabilities of Flower to simulate federated XGBoost training on either a" -" single machine or a cluster of machines." +"TabNet is a powerful and flexible framework for training machine learning" +" models on tabular data. We now have a federated example using Flower: " +"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples" +"/quickstart-tabnet)." msgstr "" +"TabNet est un cadre puissant et flexible pour former des modèles " +"d'apprentissage automatique sur des données tabulaires. Nous avons " +"maintenant un exemple fédéré utilisant Flower : [quickstart-" +"tabnet](https://github.com/adap/flower/tree/main/examples/quickstart-" +"tabnet)." -#: ../../source/tutorial-quickstart-xgboost.rst:866 +#: ../../source/ref-changelog.md:726 msgid "" -"After importing all required packages, we define a :code:`main()` " -"function to perform the simulation process:" +"**Add new how-to guide for monitoring simulations** " +"([#1649](https://github.com/adap/flower/pull/1649))" msgstr "" +"**Ajouter un nouveau guide pratique pour le suivi des simulations** " +"([#1649](https://github.com/adap/flower/pull/1649))" -#: ../../source/tutorial-quickstart-xgboost.rst:921 +#: ../../source/ref-changelog.md:728 msgid "" -"We first load the dataset and perform data partitioning, and the pre-" -"processed data is stored in a :code:`list`. After the simulation begins, " -"the clients won't need to pre-process their partitions again." +"We now have a documentation guide to help users monitor their performance" +" during simulations." msgstr "" +"Nous avons maintenant un guide de documentation pour aider les " +"utilisateurs à surveiller leurs performances pendant les simulations." -#: ../../source/tutorial-quickstart-xgboost.rst:924 -msgid "Then, we define the strategies and other hyper-parameters:" +#: ../../source/ref-changelog.md:730 +msgid "" +"**Add training metrics to** `History` **object during simulations** " +"([#1696](https://github.com/adap/flower/pull/1696))" msgstr "" +"**Ajouter des mesures de formation à** `History` **objet pendant les " +"simulations** ([#1696](https://github.com/adap/flower/pull/1696))" -#: ../../source/tutorial-quickstart-xgboost.rst:975 +#: ../../source/ref-changelog.md:732 msgid "" -"After that, we start the simulation by calling " -":code:`fl.simulation.start_simulation`:" +"The `fit_metrics_aggregation_fn` can be used to aggregate training " +"metrics, but previous releases did not save the results in the `History` " +"object. This is now the case!" msgstr "" +"La fonction `fit_metrics_aggregation_fn` peut être utilisée pour agréger " +"les mesures d'entraînement, mais les versions précédentes " +"n'enregistraient pas les résultats dans l'objet `History`. c'est " +"désormais le cas !" -#: ../../source/tutorial-quickstart-xgboost.rst:995 +#: ../../source/ref-changelog.md:734 msgid "" -"One of key parameters for :code:`start_simulation` is :code:`client_fn` " -"which returns a function to construct a client. We define it as follows:" +"**General improvements** " +"([#1659](https://github.com/adap/flower/pull/1659), " +"[#1646](https://github.com/adap/flower/pull/1646), " +"[#1647](https://github.com/adap/flower/pull/1647), " +"[#1471](https://github.com/adap/flower/pull/1471), " +"[#1648](https://github.com/adap/flower/pull/1648), " +"[#1651](https://github.com/adap/flower/pull/1651), " +"[#1652](https://github.com/adap/flower/pull/1652), " +"[#1653](https://github.com/adap/flower/pull/1653), " +"[#1659](https://github.com/adap/flower/pull/1659), " +"[#1665](https://github.com/adap/flower/pull/1665), " +"[#1670](https://github.com/adap/flower/pull/1670), " +"[#1672](https://github.com/adap/flower/pull/1672), " +"[#1677](https://github.com/adap/flower/pull/1677), " +"[#1684](https://github.com/adap/flower/pull/1684), " +"[#1683](https://github.com/adap/flower/pull/1683), " +"[#1686](https://github.com/adap/flower/pull/1686), " +"[#1682](https://github.com/adap/flower/pull/1682), " +"[#1685](https://github.com/adap/flower/pull/1685), " +"[#1692](https://github.com/adap/flower/pull/1692), " +"[#1705](https://github.com/adap/flower/pull/1705), " +"[#1708](https://github.com/adap/flower/pull/1708), " +"[#1711](https://github.com/adap/flower/pull/1711), " +"[#1713](https://github.com/adap/flower/pull/1713), " +"[#1714](https://github.com/adap/flower/pull/1714), " +"[#1718](https://github.com/adap/flower/pull/1718), " +"[#1716](https://github.com/adap/flower/pull/1716), " +"[#1723](https://github.com/adap/flower/pull/1723), " +"[#1735](https://github.com/adap/flower/pull/1735), " +"[#1678](https://github.com/adap/flower/pull/1678), " +"[#1750](https://github.com/adap/flower/pull/1750), " +"[#1753](https://github.com/adap/flower/pull/1753), " +"[#1736](https://github.com/adap/flower/pull/1736), " +"[#1766](https://github.com/adap/flower/pull/1766), " +"[#1760](https://github.com/adap/flower/pull/1760), " +"[#1775](https://github.com/adap/flower/pull/1775), " +"[#1776](https://github.com/adap/flower/pull/1776), " +"[#1777](https://github.com/adap/flower/pull/1777), " +"[#1779](https://github.com/adap/flower/pull/1779), " +"[#1784](https://github.com/adap/flower/pull/1784), " +"[#1773](https://github.com/adap/flower/pull/1773), " +"[#1755](https://github.com/adap/flower/pull/1755), " +"[#1789](https://github.com/adap/flower/pull/1789), " +"[#1788](https://github.com/adap/flower/pull/1788), " +"[#1798](https://github.com/adap/flower/pull/1798), " +"[#1799](https://github.com/adap/flower/pull/1799), " +"[#1739](https://github.com/adap/flower/pull/1739), " +"[#1800](https://github.com/adap/flower/pull/1800), " +"[#1804](https://github.com/adap/flower/pull/1804), " +"[#1805](https://github.com/adap/flower/pull/1805))" msgstr "" +"**General improvements** " +"([#1659](https://github.com/adap/flower/pull/1659), " +"[#1646](https://github.com/adap/flower/pull/1646), " +"[#1647](https://github.com/adap/flower/pull/1647), " +"[#1471](https://github.com/adap/flower/pull/1471), " +"[#1648](https://github.com/adap/flower/pull/1648), " +"[#1651](https://github.com/adap/flower/pull/1651), " +"[#1652](https://github.com/adap/flower/pull/1652), " +"[#1653](https://github.com/adap/flower/pull/1653), " +"[#1659](https://github.com/adap/flower/pull/1659), " +"[#1665](https://github.com/adap/flower/pull/1665), " +"[#1670](https://github.com/adap/flower/pull/1670), " +"[#1672](https://github.com/adap/flower/pull/1672), " +"[#1677](https://github.com/adap/flower/pull/1677), " +"[#1684](https://github.com/adap/flower/pull/1684), " +"[#1683](https://github.com/adap/flower/pull/1683), " +"[#1686](https://github.com/adap/flower/pull/1686), " +"[#1682](https://github.com/adap/flower/pull/1682), " +"[#1685](https://github.com/adap/flower/pull/1685), " +"[#1692](https://github.com/adap/flower/pull/1692), " +"[#1705](https://github.com/ada" -#: ../../source/tutorial-quickstart-xgboost.rst:1038 -msgid "Arguments parser" -msgstr "" +#: ../../source/ref-changelog.md:742 +msgid "v1.3.0 (2023-02-06)" +msgstr "v1.3.0 (2023-02-06)" -#: ../../source/tutorial-quickstart-xgboost.rst:1040 +#: ../../source/ref-changelog.md:748 msgid "" -"In :code:`utils.py`, we define the arguments parsers for clients, server " -"and simulation, allowing users to specify different experimental " -"settings. Let's first see the sever side:" +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" msgstr "" +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" -#: ../../source/tutorial-quickstart-xgboost.rst:1086 +#: ../../source/ref-changelog.md:752 msgid "" -"This allows user to specify training strategies / the number of total " -"clients / FL rounds / participating clients / clients for evaluation, and" -" evaluation fashion. Note that with :code:`--centralised-eval`, the sever" -" will do centralised evaluation and all functionalities for client " -"evaluation will be disabled." -msgstr "" - -#: ../../source/tutorial-quickstart-xgboost.rst:1090 -msgid "Then, the argument parser on client side:" +"**Add support for** `workload_id` **and** `group_id` **in Driver API** " +"([#1595](https://github.com/adap/flower/pull/1595))" msgstr "" +"**Ajouter la prise en charge de** `workload_id` **et** `group_id` **dans " +"l'API du pilote** ([#1595](https://github.com/adap/flower/pull/1595))" -#: ../../source/tutorial-quickstart-xgboost.rst:1144 +#: ../../source/ref-changelog.md:754 msgid "" -"This defines various options for client data partitioning. Besides, " -"clients also have an option to conduct evaluation on centralised test set" -" by setting :code:`--centralised-eval`, as well as an option to perform " -"scaled learning rate based on the number of clients by setting :code" -":`--scaled-lr`." +"The (experimental) Driver API now supports a `workload_id` that can be " +"used to identify which workload a task belongs to. It also supports a new" +" `group_id` that can be used, for example, to indicate the current " +"training round. Both the `workload_id` and `group_id` enable client nodes" +" to decide whether they want to handle a task or not." msgstr "" +"L'API (expérimentale) Driver prend désormais en charge un `workload_id` " +"qui peut être utilisé pour identifier la charge de travail à laquelle une" +" tâche appartient. Elle prend également en charge un nouveau `group_id` " +"qui peut être utilisé, par exemple, pour indiquer le cycle de formation " +"en cours. Le `workload_id` et le `group_id` permettent tous deux aux " +"nœuds clients de décider s'ils veulent traiter une tâche ou non." -#: ../../source/tutorial-quickstart-xgboost.rst:1148 -msgid "We also have an argument parser for simulation:" +#: ../../source/ref-changelog.md:756 +msgid "" +"**Make Driver API and Fleet API address configurable** " +"([#1637](https://github.com/adap/flower/pull/1637))" msgstr "" +"**Faire en sorte que l'adresse de l'API du conducteur et de l'API de la " +"flotte soit configurable** " +"([#1637](https://github.com/adap/flower/pull/1637))" -#: ../../source/tutorial-quickstart-xgboost.rst:1226 -msgid "This integrates all arguments for both client and server sides." +#: ../../source/ref-changelog.md:758 +msgid "" +"The (experimental) long-running Flower server (Driver API and Fleet API) " +"can now configure the server address of both Driver API (via `--driver-" +"api-address`) and Fleet API (via `--fleet-api-address`) when starting:" msgstr "" +"Le serveur Flower (expérimental) de longue durée (Driver API et Fleet " +"API) peut maintenant configurer l'adresse du serveur de Driver API (via " +"`--driver-api-address`) et de Fleet API (via `--fleet-api-address`) lors " +"de son démarrage :" -#: ../../source/tutorial-quickstart-xgboost.rst:1229 +#: ../../source/ref-changelog.md:760 #, fuzzy -msgid "Example commands" -msgstr "Exemples de PyTorch" - -#: ../../source/tutorial-quickstart-xgboost.rst:1231 msgid "" -"To run a centralised evaluated experiment with bagging strategy on 5 " -"clients with exponential distribution for 50 rounds, we first start the " -"server as below:" +"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " +"\"0.0.0.0:8086\"`" msgstr "" +"``flower-superlink --driver-api-address \"0.0.0.0:8081\" --fleet-api-" +"address \"0.0.0.0:8086\" ``" -#: ../../source/tutorial-quickstart-xgboost.rst:1238 -#, fuzzy -msgid "Then, on each client terminal, we start the clients:" -msgstr "Ouvre un autre terminal et démarre le deuxième client :" +#: ../../source/ref-changelog.md:762 +msgid "Both IPv4 and IPv6 addresses are supported." +msgstr "Les adresses IPv4 et IPv6 sont toutes deux prises en charge." -#: ../../source/tutorial-quickstart-xgboost.rst:1244 -msgid "To run the same experiment with Flower simulation:" +#: ../../source/ref-changelog.md:764 +msgid "" +"**Add new example of Federated Learning using fastai and Flower** " +"([#1598](https://github.com/adap/flower/pull/1598))" msgstr "" +"**Ajouter un nouvel exemple d'apprentissage fédéré utilisant fastai et " +"Flower** ([#1598](https://github.com/adap/flower/pull/1598))" -#: ../../source/tutorial-quickstart-xgboost.rst:1250 -#, fuzzy +#: ../../source/ref-changelog.md:766 msgid "" -"The full `code `_ for this comprehensive example can be found in" -" :code:`examples/xgboost-comprehensive`." +"A new code example (`quickstart-fastai`) demonstrates federated learning " +"with [fastai](https://www.fast.ai/) and Flower. You can find it here: " +"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples" +"/quickstart-fastai)." msgstr "" -"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " -"premier système d'apprentissage fédéré. Le code source complet " -"`_ de cet exemple se trouve dans :code:`examples" -"/quickstart-mxnet`." - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:9 -#, fuzzy -msgid "Build a strategy from scratch" -msgstr "Élaborer une stratégie à partir de zéro" +"Un nouvel exemple de code (`quickstart-fastai`) démontre l'apprentissage " +"fédéré avec [fastai](https://www.fast.ai/) et Flower. Tu peux le trouver " +"ici : [quickstart-" +"fastai](https://github.com/adap/flower/tree/main/examples/quickstart-" +"fastai)." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:11 -#, fuzzy +#: ../../source/ref-changelog.md:768 msgid "" -"Welcome to the third part of the Flower federated learning tutorial. In " -"previous parts of this tutorial, we introduced federated learning with " -"PyTorch and Flower (`part 1 `__) and we learned how strategies " -"can be used to customize the execution on both the server and the clients" -" (`part 2 `__)." +"**Make Android example compatible with** `flwr >= 1.0.0` **and the latest" +" versions of Android** " +"([#1603](https://github.com/adap/flower/pull/1603))" msgstr "" -"Bienvenue dans la troisième partie du tutoriel sur l'apprentissage fédéré" -" Flower. Dans les parties précédentes de ce tutoriel, nous avons présenté" -" l'apprentissage fédéré avec PyTorch et Flower (`partie 1 " -"`__) " -"et nous avons appris comment les stratégies peuvent être utilisées pour " -"personnaliser l'exécution à la fois sur le serveur et sur les clients " -"(`partie 2 `__)." +"**Rendre l'exemple Android compatible avec** `flwr >= 1.0.0` **et les " +"dernières versions d'Android** " +"([#1603](https://github.com/adap/flower/pull/1603))" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:13 +#: ../../source/ref-changelog.md:770 +#, fuzzy msgid "" -"In this notebook, we'll continue to customize the federated learning " -"system we built previously by creating a custom version of FedAvg (again," -" using `Flower `__ and `PyTorch " -"`__)." +"The Android code example has received a substantial update: the project " +"is compatible with Flower 1.0 (and later), the UI received a full " +"refresh, and the project is updated to be compatible with newer Android " +"tooling." msgstr "" -"Dans ce carnet, nous allons continuer à personnaliser le système " -"d'apprentissage fédéré que nous avons construit précédemment en créant " -"une version personnalisée de FedAvg (encore une fois, en utilisant " -"`Flower `__ et `PyTorch `__)." +"L'exemple de code Android a reçu une mise à jour substantielle : le " +"projet est compatible avec Flower 1.0 et les versions ultérieures, " +"l'interface utilisateur a reçu un rafraîchissement complet, et le projet " +"est mis à jour pour être compatible avec les outils Android les plus " +"récents." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:15 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:16 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:15 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:15 +#: ../../source/ref-changelog.md:772 msgid "" -"`Star Flower on GitHub `__ ⭐️ and join " -"the Flower community on Slack to connect, ask questions, and get help: " -"`Join Slack `__ 🌼 We'd love to hear from " -"you in the ``#introductions`` channel! And if anything is unclear, head " -"over to the ``#questions`` channel." +"**Add new `FedProx` strategy** " +"([#1619](https://github.com/adap/flower/pull/1619))" msgstr "" -"`Star Flower on GitHub `__ ⭐️ et " -"rejoignez la communauté Flower sur Slack pour vous connecter, poser des " -"questions et obtenir de l'aide : `Join Slack `__ 🌼 Nous serions ravis d'avoir de vos nouvelles dans le canal " -"``#introductions`` ! Et si quelque chose n'est pas clair, rendez-vous sur" -" le canal ``#questions``." - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:17 -msgid "Let's build a new ``Strategy`` from scratch!" -msgstr "Construisons une nouvelle ``Stratégie`` à partir de zéro !" - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:29 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:29 -msgid "Preparation" -msgstr "Préparation" +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:31 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:32 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:31 +#: ../../source/ref-changelog.md:774 msgid "" -"Before we begin with the actual code, let's make sure that we have " -"everything we need." +"This " +"[strategy](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" +" is almost identical to " +"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," +" but helps users replicate what is described in this " +"[paper](https://arxiv.org/abs/1812.06127). It essentially adds a " +"parameter called `proximal_mu` to regularize the local models with " +"respect to the global models." msgstr "" -"Avant de commencer le code proprement dit, assurons-nous que nous " -"disposons de tout ce dont nous avons besoin." - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:43 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:44 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:43 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:43 -msgid "Installing dependencies" -msgstr "Installation des dépendances" - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:45 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:46 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:45 -msgid "First, we install the necessary packages:" -msgstr "Tout d'abord, nous installons les paquets nécessaires :" +"Cette " +"[stratégie](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" +" est presque identique à " +"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," +" mais aide les utilisateurs à reproduire ce qui est décrit dans cet " +"[article](https://arxiv.org/abs/1812.06127). Elle ajoute essentiellement " +"un paramètre appelé `proximal_mu` pour régulariser les modèles locaux par" +" rapport aux modèles globaux." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:65 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:66 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:65 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:65 +#: ../../source/ref-changelog.md:776 msgid "" -"Now that we have all dependencies installed, we can import everything we " -"need for this tutorial:" +"**Add new metrics to telemetry events** " +"([#1640](https://github.com/adap/flower/pull/1640))" msgstr "" -"Maintenant que toutes les dépendances sont installées, nous pouvons " -"importer tout ce dont nous avons besoin pour ce tutoriel :" +"**Ajouter de nouvelles métriques aux événements de télémétrie** " +"([#1640](https://github.com/adap/flower/pull/1640))" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:101 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:102 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:101 +#: ../../source/ref-changelog.md:778 msgid "" -"It is possible to switch to a runtime that has GPU acceleration enabled " -"(on Google Colab: ``Runtime > Change runtime type > Hardware acclerator: " -"GPU > Save``). Note, however, that Google Colab is not always able to " -"offer GPU acceleration. If you see an error related to GPU availability " -"in one of the following sections, consider switching back to CPU-based " -"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " -"has GPU acceleration enabled, you should see the output ``Training on " -"cuda``, otherwise it'll say ``Training on cpu``." +"An updated event structure allows, for example, the clustering of events " +"within the same workload." msgstr "" -"Il est possible de passer à un runtime dont l'accélération GPU est " -"activée (sur Google Colab : ``Runtime > Change runtime type > Hardware " -"acclerator : GPU > Save``). Note cependant que Google Colab n'est pas " -"toujours en mesure de proposer l'accélération GPU. Si tu vois une erreur " -"liée à la disponibilité du GPU dans l'une des sections suivantes, " -"envisage de repasser à une exécution basée sur le CPU en définissant " -"``DEVICE = torch.device(\"cpu\")``. Si le runtime a activé l'accélération" -" GPU, tu devrais voir apparaître le résultat ``Training on cuda``, sinon " -"il dira ``Training on cpu``." - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:114 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:115 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:114 -msgid "Data loading" -msgstr "Chargement des données" +"Une structure d'événements mise à jour permet, par exemple, de regrouper " +"des événements au sein d'une même charge de travail." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:116 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:116 +#: ../../source/ref-changelog.md:780 msgid "" -"Let's now load the CIFAR-10 training and test set, partition them into " -"ten smaller datasets (each split into training and validation set), and " -"wrap everything in their own ``DataLoader``. We introduce a new parameter" -" ``num_clients`` which allows us to call ``load_datasets`` with different" -" numbers of clients." +"**Add new custom strategy tutorial section** " +"[#1623](https://github.com/adap/flower/pull/1623)" msgstr "" -"Chargeons maintenant les ensembles d'entraînement et de test CIFAR-10, " -"divisons-les en dix ensembles de données plus petits (chacun divisé en " -"ensemble d'entraînement et de validation), et enveloppons le tout dans " -"leur propre ``DataLoader``. Nous introduisons un nouveau paramètre " -"``num_clients`` qui nous permet d'appeler ``load_datasets`` avec " -"différents nombres de clients." - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:167 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:168 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:167 -msgid "Model training/evaluation" -msgstr "Formation/évaluation du modèle" +"**Ajouter une nouvelle section de tutoriel sur les stratégies " +"personnalisées** [#1623](https://github.com/adap/flower/pull/1623)" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:169 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:170 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:169 +#: ../../source/ref-changelog.md:782 +#, fuzzy msgid "" -"Let's continue with the usual model definition (including " -"``set_parameters`` and ``get_parameters``), training and test functions:" +"The Flower tutorial now has a new section that covers implementing a " +"custom strategy from scratch: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" msgstr "" -"Continuons avec la définition habituelle du modèle (y compris " -"``set_parameters`` et ``get_parameters``), les fonctions d'entraînement " -"et de test :" - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:258 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:258 -msgid "Flower client" -msgstr "Client de Flower" +"Le tutoriel sur les fleurs comporte désormais une nouvelle section qui " +"traite de la mise en œuvre d'une stratégie personnalisée à partir de zéro" +" : [Ouvrir dans " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source/tutorial/Flower-3-Building-a" +"-Strategy-PyTorch.ipynb)" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:260 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:260 +#: ../../source/ref-changelog.md:784 msgid "" -"To implement the Flower client, we (again) create a subclass of " -"``flwr.client.NumPyClient`` and implement the three methods " -"``get_parameters``, ``fit``, and ``evaluate``. Here, we also pass the " -"``cid`` to the client and use it log additional details:" +"**Add new custom serialization tutorial section** " +"([#1622](https://github.com/adap/flower/pull/1622))" msgstr "" -"Pour mettre en œuvre le client Flower, nous créons (à nouveau) une sous-" -"classe de ``flwr.client.NumPyClient`` et mettons en œuvre les trois " -"méthodes ``get_parameters``, ``fit`` et ``evaluate``. Ici, nous " -"transmettons également le ``cid`` au client et l'utilisons pour consigner" -" des détails supplémentaires :" - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:308 -msgid "Let's test what we have so far before we continue:" -msgstr "Testons ce que nous avons jusqu'à présent avant de continuer :" - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:339 -msgid "Build a Strategy from scratch" -msgstr "Élaborer une stratégie à partir de zéro" +"**Ajouter une nouvelle section de tutoriel sur la sérialisation " +"personnalisée** ([#1622](https://github.com/adap/flower/pull/1622))" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:341 +#: ../../source/ref-changelog.md:786 +#, fuzzy msgid "" -"Let’s overwrite the ``configure_fit`` method such that it passes a higher" -" learning rate (potentially also other hyperparameters) to the optimizer " -"of a fraction of the clients. We will keep the sampling of the clients as" -" it is in ``FedAvg`` and then change the configuration dictionary (one of" -" the ``FitIns`` attributes)." +"The Flower tutorial now has a new section that covers custom " +"serialization: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-customize-the-client-pytorch.ipynb)" msgstr "" -"Remplaçons la méthode ``configure_fit`` de façon à ce qu'elle transmette " -"un taux d'apprentissage plus élevé (potentiellement aussi d'autres " -"hyperparamètres) à l'optimiseur d'une fraction des clients. Nous " -"garderons l'échantillonnage des clients tel qu'il est dans ``FedAvg`` et " -"changerons ensuite le dictionnaire de configuration (l'un des attributs " -"``FitIns``)." +"Le tutoriel sur les fleurs comporte désormais une nouvelle section qui " +"traite de la sérialisation personnalisée : [Ouvrir dans " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source/tutorial/Flower-4" +"-Client-and-NumPyClient-PyTorch.ipynb)" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:507 +#: ../../source/ref-changelog.md:788 msgid "" -"The only thing left is to use the newly created custom Strategy " -"``FedCustom`` when starting the experiment:" +"**General improvements** " +"([#1638](https://github.com/adap/flower/pull/1638), " +"[#1634](https://github.com/adap/flower/pull/1634), " +"[#1636](https://github.com/adap/flower/pull/1636), " +"[#1635](https://github.com/adap/flower/pull/1635), " +"[#1633](https://github.com/adap/flower/pull/1633), " +"[#1632](https://github.com/adap/flower/pull/1632), " +"[#1631](https://github.com/adap/flower/pull/1631), " +"[#1630](https://github.com/adap/flower/pull/1630), " +"[#1627](https://github.com/adap/flower/pull/1627), " +"[#1593](https://github.com/adap/flower/pull/1593), " +"[#1616](https://github.com/adap/flower/pull/1616), " +"[#1615](https://github.com/adap/flower/pull/1615), " +"[#1607](https://github.com/adap/flower/pull/1607), " +"[#1609](https://github.com/adap/flower/pull/1609), " +"[#1608](https://github.com/adap/flower/pull/1608), " +"[#1603](https://github.com/adap/flower/pull/1603), " +"[#1590](https://github.com/adap/flower/pull/1590), " +"[#1580](https://github.com/adap/flower/pull/1580), " +"[#1599](https://github.com/adap/flower/pull/1599), " +"[#1600](https://github.com/adap/flower/pull/1600), " +"[#1601](https://github.com/adap/flower/pull/1601), " +"[#1597](https://github.com/adap/flower/pull/1597), " +"[#1595](https://github.com/adap/flower/pull/1595), " +"[#1591](https://github.com/adap/flower/pull/1591), " +"[#1588](https://github.com/adap/flower/pull/1588), " +"[#1589](https://github.com/adap/flower/pull/1589), " +"[#1587](https://github.com/adap/flower/pull/1587), " +"[#1573](https://github.com/adap/flower/pull/1573), " +"[#1581](https://github.com/adap/flower/pull/1581), " +"[#1578](https://github.com/adap/flower/pull/1578), " +"[#1574](https://github.com/adap/flower/pull/1574), " +"[#1572](https://github.com/adap/flower/pull/1572), " +"[#1586](https://github.com/adap/flower/pull/1586))" msgstr "" -"Il ne reste plus qu'à utiliser la stratégie personnalisée nouvellement " -"créée ``FedCustom`` lors du démarrage de l'expérience :" - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:534 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:932 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:697 -msgid "Recap" -msgstr "Récapitulation" - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:536 -msgid "" -"In this notebook, we’ve seen how to implement a custom strategy. A custom" -" strategy enables granular control over client node configuration, result" -" aggregation, and more. To define a custom strategy, you only have to " -"overwrite the abstract methods of the (abstract) base class ``Strategy``." -" To make custom strategies even more powerful, you can pass custom " -"functions to the constructor of your new class (``__init__``) and then " -"call these functions whenever needed." -msgstr "" -"Dans ce carnet, nous avons vu comment mettre en place une stratégie " -"personnalisée. Une stratégie personnalisée permet un contrôle granulaire " -"sur la configuration des nœuds clients, l'agrégation des résultats, et " -"bien plus encore. Pour définir une stratégie personnalisée, il te suffit " -"d'écraser les méthodes abstraites de la classe de base (abstraite) " -"``Strategy``. Pour rendre les stratégies personnalisées encore plus " -"puissantes, tu peux passer des fonctions personnalisées au constructeur " -"de ta nouvelle classe (``__init__``) et appeler ensuite ces fonctions à " -"chaque fois que c'est nécessaire." +"**General improvements** " +"([#1638](https://github.com/adap/flower/pull/1638), " +"[#1634](https://github.com/adap/flower/pull/1634), " +"[#1636](https://github.com/adap/flower/pull/1636), " +"[#1635](https://github.com/adap/flower/pull/1635), " +"[#1633](https://github.com/adap/flower/pull/1633), " +"[#1632](https://github.com/adap/flower/pull/1632), " +"[#1631](https://github.com/adap/flower/pull/1631), " +"[#1630](https://github.com/adap/flower/pull/1630), " +"[#1627](https://github.com/adap/flower/pull/1627), " +"[#1593](https://github.com/adap/flower/pull/1593), " +"[#1616](https://github.com/adap/flower/pull/1616), " +"[#1615](https://github.com/adap/flower/pull/1615), " +"[#1607](https://github.com/adap/flower/pull/1607), " +"[#1609](https://github.com/adap/flower/pull/1609), " +"[#1608](https://github.com/adap/flower/pull/1608), " +"[#1603](https://github.com/adap/flower/pull/1603), " +"[#1590](https://github.com/adap/flower/pull/1590), " +"[#1580](https://github.com/adap/flower/pull/1580), " +"[#1599](https://github.com/adap/flower/pull/1599), " +"[#1600](https://github.com/ada" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:550 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:948 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:729 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:715 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:369 +#: ../../source/ref-changelog.md:792 msgid "" -"Before you continue, make sure to join the Flower community on Slack: " -"`Join Slack `__" +"**Updated documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" msgstr "" -"Avant de continuer, n'oublie pas de rejoindre la communauté Flower sur " -"Slack : `Join Slack `__" +"**Mise à jour de la documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:552 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:950 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:731 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:717 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:371 +#: ../../source/ref-changelog.md:794 ../../source/ref-changelog.md:861 msgid "" -"There's a dedicated ``#questions`` channel if you need help, but we'd " -"also love to hear who you are in ``#introductions``!" +"As usual, the documentation has improved quite a bit. It is another step " +"in our effort to make the Flower documentation the best documentation of " +"any project. Stay tuned and as always, feel free to provide feedback!" msgstr "" -"Il existe un canal dédié aux ``questions`` si vous avez besoin d'aide, " -"mais nous aimerions aussi savoir qui vous êtes dans ``#introductions`` !" +"Comme d'habitude, la documentation s'est beaucoup améliorée. C'est une " +"autre étape dans notre effort pour faire de la documentation de Flower la" +" meilleure documentation de tout projet. Reste à l'écoute et comme " +"toujours, n'hésite pas à nous faire part de tes commentaires !" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:554 -#, fuzzy +#: ../../source/ref-changelog.md:800 +msgid "v1.2.0 (2023-01-13)" +msgstr "v1.2.0 (2023-01-13)" + +#: ../../source/ref-changelog.md:806 msgid "" -"The `Flower Federated Learning Tutorial - Part 4 " -"`__ introduces ``Client``, the flexible API underlying " -"``NumPyClient``." +"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." +" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" msgstr "" -"Le `Tutoriel d'apprentissage fédéré Flower - Partie 4 " -"`__ présente ``Client``, l'API flexible qui sous-tend " -"``NumPyClient``." - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:9 -#, fuzzy -msgid "Customize the client" -msgstr "Création du client IMDBC" +"adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L. " +"Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:11 -#, fuzzy +#: ../../source/ref-changelog.md:810 msgid "" -"Welcome to the fourth part of the Flower federated learning tutorial. In " -"the previous parts of this tutorial, we introduced federated learning " -"with PyTorch and Flower (`part 1 `__), we learned how " -"strategies can be used to customize the execution on both the server and " -"the clients (`part 2 `__), and we built our own " -"custom strategy from scratch (`part 3 `__)." +"**Introduce new Flower Baseline: FedAvg MNIST** " +"([#1497](https://github.com/adap/flower/pull/1497), " +"[#1552](https://github.com/adap/flower/pull/1552))" msgstr "" -"Bienvenue dans la quatrième partie du tutoriel sur l'apprentissage fédéré" -" Flower. Dans les parties précédentes de ce tutoriel, nous avons présenté" -" l'apprentissage fédéré avec PyTorch et Flower (`partie 1 " -"`__), " -"nous avons appris comment les stratégies peuvent être utilisées pour " -"personnaliser l'exécution à la fois sur le serveur et les clients " -"(`partie 2 `__), et nous avons construit notre propre stratégie " -"personnalisée à partir de zéro (`partie 3 - WIP " -"`__)." +"**Introduire une nouvelle fleur Référence : FedAvg MNIST** " +"([#1497](https://github.com/adap/flower/pull/1497), " +"[#1552](https://github.com/adap/flower/pull/1552))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:14 +#: ../../source/ref-changelog.md:812 msgid "" -"In this notebook, we revisit ``NumPyClient`` and introduce a new " -"baseclass for building clients, simply named ``Client``. In previous " -"parts of this tutorial, we've based our client on ``NumPyClient``, a " -"convenience class which makes it easy to work with machine learning " -"libraries that have good NumPy interoperability. With ``Client``, we gain" -" a lot of flexibility that we didn't have before, but we'll also have to " -"do a few things the we didn't have to do before." +"Over the coming weeks, we will be releasing a number of new reference " +"implementations useful especially to FL newcomers. They will typically " +"revisit well known papers from the literature, and be suitable for " +"integration in your own application or for experimentation, in order to " +"deepen your knowledge of FL in general. Today's release is the first in " +"this series. [Read more.](https://flower.ai/blog/2023-01-12-fl-starter-" +"pack-fedavg-mnist-cnn/)" msgstr "" -"Dans ce carnet, nous revisitons `NumPyClient`` et introduisons une " -"nouvelle classe de base pour construire des clients, simplement appelée " -"`Client``. Dans les parties précédentes de ce tutoriel, nous avons basé " -"notre client sur ``NumPyClient``, une classe de commodité qui facilite le" -" travail avec les bibliothèques d'apprentissage automatique qui ont une " -"bonne interopérabilité NumPy. Avec ``Client``, nous gagnons beaucoup de " -"flexibilité que nous n'avions pas auparavant, mais nous devrons également" -" faire quelques choses que nous n'avions pas à faire auparavant." +"Au cours des prochaines semaines, nous publierons un certain nombre de " +"nouvelles implémentations de référence utiles en particulier pour les " +"nouveaux venus en FL. Elles revisiteront généralement des articles bien " +"connus de la littérature, et seront adaptées à l'intégration dans votre " +"propre application ou à l'expérimentation, afin d'approfondir votre " +"connaissance de FL en général. La publication d'aujourd'hui est la " +"première de cette série. [Lire la " +"suite.](https://flower.ai/blog/2023-01-12-fl-starter-pack-fedavg-mnist-" +"cnn/)" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:18 +#: ../../source/ref-changelog.md:814 msgid "" -"Let's go deeper and see what it takes to move from ``NumPyClient`` to " -"``Client``!" +"**Improve GPU support in simulations** " +"([#1555](https://github.com/adap/flower/pull/1555))" msgstr "" -"Allons plus loin et voyons ce qu'il faut faire pour passer de " -"``NumPyClient`` à ``Client`` !" - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:30 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:29 -msgid "Step 0: Preparation" -msgstr "Étape 0 : Préparation" +"**Améliorer la prise en charge des GPU dans les simulations** " +"([#1555](https://github.com/adap/flower/pull/1555))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:117 +#: ../../source/ref-changelog.md:816 msgid "" -"Let's now load the CIFAR-10 training and test set, partition them into " -"ten smaller datasets (each split into training and validation set), and " -"wrap everything in their own ``DataLoader``." +"The Ray-based Virtual Client Engine (`start_simulation`) has been updated" +" to improve GPU support. The update includes some of the hard-earned " +"lessons from scaling simulations in GPU cluster environments. New " +"defaults make running GPU-based simulations substantially more robust." msgstr "" -"Chargeons maintenant les ensembles d'entraînement et de test CIFAR-10, " -"divisons-les en dix ensembles de données plus petits (chacun divisé en " -"ensemble d'entraînement et de validation) et enveloppons le tout dans " -"leur propre ``DataLoader``." - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:259 -msgid "Step 1: Revisiting NumPyClient" -msgstr "Étape 1 : Revoir NumPyClient" +"Le moteur client virtuel basé sur Ray (`start_simulation`) a été mis à " +"jour pour améliorer la prise en charge des GPU. La mise à jour inclut " +"certaines des leçons durement apprises lors de la mise à l'échelle des " +"simulations dans des environnements de grappes de GPU. De nouveaux " +"paramètres par défaut rendent l'exécution des simulations basées sur les " +"GPU beaucoup plus robuste." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:261 +#: ../../source/ref-changelog.md:818 msgid "" -"So far, we've implemented our client by subclassing " -"``flwr.client.NumPyClient``. The three methods we implemented are " -"``get_parameters``, ``fit``, and ``evaluate``. Finally, we wrap the " -"creation of instances of this class in a function called ``client_fn``:" +"**Improve GPU support in Jupyter Notebook tutorials** " +"([#1527](https://github.com/adap/flower/pull/1527), " +"[#1558](https://github.com/adap/flower/pull/1558))" msgstr "" -"Jusqu'à présent, nous avons implémenté notre client en sous-classant " -"``flwr.client.NumPyClient``. Les trois méthodes que nous avons " -"implémentées sont ``get_parameters``, ``fit`` et ``evaluate``. Enfin, " -"nous enveloppons la création d'instances de cette classe dans une " -"fonction appelée ``client_fn`` :" +"**Améliorer la prise en charge du GPU dans les tutoriels Jupyter " +"Notebook** ([#1527](https://github.com/adap/flower/pull/1527), " +"[#1558](https://github.com/adap/flower/pull/1558))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:309 +#: ../../source/ref-changelog.md:820 msgid "" -"We've seen this before, there's nothing new so far. The only *tiny* " -"difference compared to the previous notebook is naming, we've changed " -"``FlowerClient`` to ``FlowerNumPyClient`` and ``client_fn`` to " -"``numpyclient_fn``. Let's run it to see the output we get:" +"Some users reported that Jupyter Notebooks have not always been easy to " +"use on GPU instances. We listened and made improvements to all of our " +"Jupyter notebooks! Check out the updated notebooks here:" msgstr "" -"Nous avons déjà vu cela auparavant, il n'y a rien de nouveau jusqu'à " -"présent. La seule *petite* différence par rapport au carnet précédent est" -" le nommage, nous avons changé ``FlowerClient`` en ``FlowerNumPyClient`` " -"et ``client_fn`` en ``numpyclient_fn``. Exécutons-le pour voir la sortie " -"que nous obtenons :" +"Certains utilisateurs ont signalé que les carnets Jupyter n'ont pas " +"toujours été faciles à utiliser sur les instances GPU. Nous les avons " +"écoutés et avons apporté des améliorations à tous nos carnets Jupyter ! " +"Découvre les carnets mis à jour ici :" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:339 +#: ../../source/ref-changelog.md:822 +#, fuzzy msgid "" -"This works as expected, two clients are training for three rounds of " -"federated learning." +"[An Introduction to Federated Learning](https://flower.ai/docs/framework" +"/tutorial-get-started-with-flower-pytorch.html)" msgstr "" -"Cela fonctionne comme prévu, deux clients s'entraînent pour trois tours " -"d'apprentissage fédéré." +"[Une introduction à l'apprentissage fédéré] " +"(https://flower.ai/docs/tutorial/Flower-1-Intro-to-FL-PyTorch.html)" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:341 +#: ../../source/ref-changelog.md:823 +#, fuzzy msgid "" -"Let's dive a little bit deeper and discuss how Flower executes this " -"simulation. Whenever a client is selected to do some work, " -"``start_simulation`` calls the function ``numpyclient_fn`` to create an " -"instance of our ``FlowerNumPyClient`` (along with loading the model and " -"the data)." +"[Strategies in Federated Learning](https://flower.ai/docs/framework" +"/tutorial-use-a-federated-learning-strategy-pytorch.html)" msgstr "" -"Plongeons un peu plus profondément et discutons de la façon dont Flower " -"exécute cette simulation. Chaque fois qu'un client est sélectionné pour " -"effectuer un travail, ``start_simulation`` appelle la fonction " -"``numpyclient_fn`` pour créer une instance de notre ``FlowerNumPyClient``" -" (en même temps qu'il charge le modèle et les données)." +"[Stratégies d'apprentissage fédéré] " +"(https://flower.ai/docs/tutorial/Flower-2-Strategies-in-FL-PyTorch.html)" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:343 +#: ../../source/ref-changelog.md:824 +#, fuzzy msgid "" -"But here's the perhaps surprising part: Flower doesn't actually use the " -"``FlowerNumPyClient`` object directly. Instead, it wraps the object to " -"makes it look like a subclass of ``flwr.client.Client``, not " -"``flwr.client.NumPyClient``. In fact, the Flower core framework doesn't " -"know how to handle ``NumPyClient``'s, it only knows how to handle " -"``Client``'s. ``NumPyClient`` is just a convenience abstraction built on " -"top of ``Client``." +"[Building a Strategy](https://flower.ai/docs/framework/tutorial-build-a" +"-strategy-from-scratch-pytorch.html)" msgstr "" -"Mais voici la partie la plus surprenante : Flower n'utilise pas " -"directement l'objet `FlowerNumPyClient`. Au lieu de cela, il enveloppe " -"l'objet pour le faire ressembler à une sous-classe de " -"`flwr.client.Client`, et non de `flwr.client.NumPyClient`. En fait, le " -"noyau de Flower ne sait pas comment gérer les `NumPyClient`, il sait " -"seulement comment gérer les `Client`. `NumPyClient` est juste une " -"abstraction de commodité construite au dessus de `Client`." +"[Construire une stratégie] " +"(https://flower.ai/docs/tutorial/Flower-3-Building-a-Strategy-" +"PyTorch.html)" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:345 +#: ../../source/ref-changelog.md:825 +#, fuzzy msgid "" -"Instead of building on top of ``NumPyClient``, we can directly build on " -"top of ``Client``." +"[Client and NumPyClient](https://flower.ai/docs/framework/tutorial-" +"customize-the-client-pytorch.html)" msgstr "" -"Au lieu de construire par-dessus `NumPyClient``, nous pouvons construire " -"directement par-dessus `Client``." - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:357 -msgid "Step 2: Moving from ``NumPyClient`` to ``Client``" -msgstr "Étape 2 : Passer de ``NumPyClient`` à ``Client``" +"[Client et NumPyClient] (https://flower.ai/docs/tutorial/Flower-4-Client-" +"and-NumPyClient-PyTorch.html)" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:359 +#: ../../source/ref-changelog.md:827 msgid "" -"Let's try to do the same thing using ``Client`` instead of " -"``NumPyClient``." +"**Introduce optional telemetry** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" msgstr "" -"Essayons de faire la même chose en utilisant ``Client`` au lieu de " -"``NumPyClient``." +"**Introduire la télémétrie optionnelle** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:465 +#: ../../source/ref-changelog.md:829 msgid "" -"Before we discuss the code in more detail, let's try to run it! Gotta " -"make sure our new ``Client``-based client works, right?" +"After a [request for " +"feedback](https://github.com/adap/flower/issues/1534) from the community," +" the Flower open-source project introduces optional collection of " +"*anonymous* usage metrics to make well-informed decisions to improve " +"Flower. Doing this enables the Flower team to understand how Flower is " +"used and what challenges users might face." msgstr "" -"Avant de discuter du code plus en détail, essayons de l'exécuter ! Nous " -"devons nous assurer que notre nouveau client basé sur le ``Client`` " -"fonctionne, n'est-ce pas ?" +"À la suite d'une [demande de commentaires] " +"(https://github.com/adap/flower/issues/1534) de la part de la communauté," +" le projet open-source Flower introduit la collecte optionnelle de " +"mesures d'utilisation *anonymes* afin de prendre des décisions éclairées " +"pour améliorer Flower. Cela permet à l'équipe de Flower de comprendre " +"comment Flower est utilisé et quels sont les défis auxquels les " +"utilisateurs peuvent être confrontés." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:490 +#: ../../source/ref-changelog.md:831 +#, fuzzy msgid "" -"That's it, we're now using ``Client``. It probably looks similar to what " -"we've done with ``NumPyClient``. So what's the difference?" +"**Flower is a friendly framework for collaborative AI and data science.**" +" Staying true to this statement, Flower makes it easy to disable " +"telemetry for users who do not want to share anonymous usage metrics. " +"[Read more.](https://flower.ai/docs/telemetry.html)." msgstr "" -"Voilà, nous utilisons maintenant ``Client``. Cela ressemble probablement " -"à ce que nous avons fait avec ``NumPyClient``. Alors quelle est la " -"différence ?" +"**Flower est un cadre convivial pour l'IA collaborative et la science des" +" données.** Restant fidèle à cette déclaration, Flower permet de " +"désactiver facilement la télémétrie pour les utilisateurs qui ne " +"souhaitent pas partager des métriques d'utilisation anonymes.[Lire la " +"suite.](https://flower.ai/docs/telemetry.html)." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:492 +#: ../../source/ref-changelog.md:833 msgid "" -"First of all, it's more code. But why? The difference comes from the fact" -" that ``Client`` expects us to take care of parameter serialization and " -"deserialization. For Flower to be able to send parameters over the " -"network, it eventually needs to turn these parameters into ``bytes``. " -"Turning parameters (e.g., NumPy ``ndarray``'s) into raw bytes is called " -"serialization. Turning raw bytes into something more useful (like NumPy " -"``ndarray``'s) is called deserialization. Flower needs to do both: it " -"needs to serialize parameters on the server-side and send them to the " -"client, the client needs to deserialize them to use them for local " -"training, and then serialize the updated parameters again to send them " -"back to the server, which (finally!) deserializes them again in order to " -"aggregate them with the updates received from other clients." +"**Introduce (experimental) Driver API** " +"([#1520](https://github.com/adap/flower/pull/1520), " +"[#1525](https://github.com/adap/flower/pull/1525), " +"[#1545](https://github.com/adap/flower/pull/1545), " +"[#1546](https://github.com/adap/flower/pull/1546), " +"[#1550](https://github.com/adap/flower/pull/1550), " +"[#1551](https://github.com/adap/flower/pull/1551), " +"[#1567](https://github.com/adap/flower/pull/1567))" msgstr "" -"First of all, it's more code. But why? The difference comes from the fact" -" that ``Client`` expects us to take care of parameter serialization and " -"deserialization. For Flower to be able to send parameters over the " -"network, it eventually needs to turn these parameters into ``bytes``. " -"Turning parameters (e.g., NumPy ``ndarray``'s) into raw bytes is called " -"serialization. Turning raw bytes into something more useful (like NumPy " -"``ndarray``'s) is called deserialization. Flower needs to do both: it " -"needs to serialize parameters on the server-side and send them to the " -"client, the client needs to deserialize them to use them for local " -"training, and then serialize the updated parameters again to send them " -"back to the server, which (finally!) deserializes them again in order to " -"aggregate them with the updates received from other clients." +"**([#1520](https://github.com/adap/flower/pull/1520), " +"[#1525](https://github.com/adap/flower/pull/1525), " +"[#1545](https://github.com/adap/flower/pull/1545), " +"[#1546](https://github.com/adap/flower/pull/1546), " +"[#1550](https://github.com/adap/flower/pull/1550), " +"[#1551](https://github.com/adap/flower/pull/1551), " +"[#1567](https://github.com/adap/flower/pull/1567))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:495 +#: ../../source/ref-changelog.md:835 msgid "" -"The only *real* difference between Client and NumPyClient is that " -"NumPyClient takes care of serialization and deserialization for you. It " -"can do so because it expects you to return parameters as NumPy ndarray's," -" and it knows how to handle these. This makes working with machine " -"learning libraries that have good NumPy support (most of them) a breeze." +"Flower now has a new (experimental) Driver API which will enable fully " +"programmable, async, and multi-tenant Federated Learning and Federated " +"Analytics applications. Phew, that's a lot! Going forward, the Driver API" +" will be the abstraction that many upcoming features will be built on - " +"and you can start building those things now, too." msgstr "" -"La seule *vraie* différence entre Client et NumPyClient est que " -"NumPyClient s'occupe de la sérialisation et de la désérialisation pour " -"toi. Il peut le faire parce qu'il s'attend à ce que tu renvoies des " -"paramètres sous forme de NumPy ndarray, et il sait comment les gérer. " -"Cela permet de travailler avec des bibliothèques d'apprentissage " -"automatique qui ont une bonne prise en charge de NumPy (la plupart " -"d'entre elles) en un clin d'œil." +"Flower dispose désormais d'une nouvelle API de pilote (expérimentale) qui" +" permettra de créer des applications Federated Learning et Federated " +"Analytics entièrement programmables, asynchrones et multi-tenant. Ouf, " +"c'est beaucoup ! À l'avenir, l'API de pilote sera l'abstraction sur " +"laquelle de nombreuses fonctionnalités à venir seront construites - et tu" +" peux commencer à construire ces choses dès maintenant, aussi." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:497 +#: ../../source/ref-changelog.md:837 msgid "" -"In terms of API, there's one major difference: all methods in Client take" -" exactly one argument (e.g., ``FitIns`` in ``Client.fit``) and return " -"exactly one value (e.g., ``FitRes`` in ``Client.fit``). The methods in " -"``NumPyClient`` on the other hand have multiple arguments (e.g., " -"``parameters`` and ``config`` in ``NumPyClient.fit``) and multiple return" -" values (e.g., ``parameters``, ``num_example``, and ``metrics`` in " -"``NumPyClient.fit``) if there are multiple things to handle. These " -"``*Ins`` and ``*Res`` objects in ``Client`` wrap all the individual " -"values you're used to from ``NumPyClient``." +"The Driver API also enables a new execution mode in which the server runs" +" indefinitely. Multiple individual workloads can run concurrently and " +"start and stop their execution independent of the server. This is " +"especially useful for users who want to deploy Flower in production." msgstr "" -"In terms of API, there's one major difference: all methods in Client take" -" exactly one argument (e.g., ``FitIns`` in ``Client.fit``) and return " -"exactly one value (e.g., ``FitRes`` in ``Client.fit``). The methods in " -"``NumPyClient`` on the other hand have multiple arguments (e.g., " -"``parameters`` and ``config`` in ``NumPyClient.fit``) and multiple return" -" values (e.g., ``parameters``, ``num_example``, and ``metrics`` in " -"``NumPyClient.fit``) if there are multiple things to handle. These " -"``*Ins`` and ``*Res`` objects in ``Client`` wrap all the individual " -"values you're used to from ``NumPyClient``." - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:510 -msgid "Step 3: Custom serialization" -msgstr "Étape 3 : Sérialisation personnalisée" +"L'API du pilote permet également un nouveau mode d'exécution dans lequel " +"le serveur s'exécute indéfiniment. Plusieurs charges de travail " +"individuelles peuvent s'exécuter simultanément et démarrer et arrêter " +"leur exécution indépendamment du serveur. Ceci est particulièrement utile" +" pour les utilisateurs qui souhaitent déployer Flower en production." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:512 +#: ../../source/ref-changelog.md:839 msgid "" -"Here we will explore how to implement custom serialization with a simple " -"example." +"To learn more, check out the `mt-pytorch` code example. We look forward " +"to you feedback!" msgstr "" -"Nous allons ici explorer comment mettre en œuvre une sérialisation " -"personnalisée à l'aide d'un exemple simple." +"Pour en savoir plus, consulte l'exemple de code `mt-pytorch`. Nous " +"attendons tes commentaires avec impatience !" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:514 +#: ../../source/ref-changelog.md:841 msgid "" -"But first what is serialization? Serialization is just the process of " -"converting an object into raw bytes, and equally as important, " -"deserialization is the process of converting raw bytes back into an " -"object. This is very useful for network communication. Indeed, without " -"serialization, you could not just a Python object through the internet." +"Please note: *The Driver API is still experimental and will likely change" +" significantly over time.*" msgstr "" -"Mais d'abord, qu'est-ce que la sérialisation ? La sérialisation est " -"simplement le processus de conversion d'un objet en octets bruts, et tout" -" aussi important, la désérialisation est le processus de reconversion des" -" octets bruts en objet. Ceci est très utile pour la communication réseau." -" En effet, sans la sérialisation, tu ne pourrais pas faire passer un " -"objet Python par Internet." +"Remarque : *L'API du pilote est encore expérimentale et est susceptible " +"de changer de manière significative au fil du temps.*" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:516 +#: ../../source/ref-changelog.md:843 msgid "" -"Federated Learning relies heavily on internet communication for training " -"by sending Python objects back and forth between the clients and the " -"server. This means that serialization is an essential part of Federated " -"Learning." +"**Add new Federated Analytics with Pandas example** " +"([#1469](https://github.com/adap/flower/pull/1469), " +"[#1535](https://github.com/adap/flower/pull/1535))" msgstr "" -"L'apprentissage fédéré s'appuie fortement sur la communication Internet " -"pour la formation en envoyant des objets Python dans les deux sens entre " -"les clients et le serveur, ce qui signifie que la sérialisation est un " -"élément essentiel de l'apprentissage fédéré." +"**Ajouter un nouvel exemple de Federated Analytics avec Pandas** " +"([#1469](https://github.com/adap/flower/pull/1469), " +"[#1535](https://github.com/adap/flower/pull/1535))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:518 +#: ../../source/ref-changelog.md:845 msgid "" -"In the following section, we will write a basic example where instead of " -"sending a serialized version of our ``ndarray``\\ s containing our " -"parameters, we will first convert the ``ndarray`` into sparse matrices, " -"before sending them. This technique can be used to save bandwidth, as in " -"certain cases where the weights of a model are sparse (containing many 0 " -"entries), converting them to a sparse matrix can greatly improve their " -"bytesize." +"A new code example (`quickstart-pandas`) demonstrates federated analytics" +" with Pandas and Flower. You can find it here: [quickstart-" +"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" +"pandas)." msgstr "" -"Dans la section suivante, nous allons écrire un exemple de base où, au " -"lieu d'envoyer une version sérialisée de nos ``ndarray`` contenant nos " -"paramètres, nous allons d'abord convertir les ``ndarray`` en matrices " -"éparses, avant de les envoyer. Cette technique peut être utilisée pour " -"économiser de la bande passante, car dans certains cas où les poids d'un " -"modèle sont épars (contenant de nombreuses entrées 0), les convertir en " -"une matrice éparse peut grandement améliorer leur taille en octets." - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:521 -msgid "Our custom serialization/deserialization functions" -msgstr "Nos fonctions de sérialisation/désérialisation personnalisées" +"Un nouvel exemple de code (`quickstart-pandas`) démontre l'analyse " +"fédérée avec Pandas et Flower. Tu peux le trouver ici : [quickstart-" +"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" +"pandas)." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:523 +#: ../../source/ref-changelog.md:847 msgid "" -"This is where the real serialization/deserialization will happen, " -"especially in ``ndarray_to_sparse_bytes`` for serialization and " -"``sparse_bytes_to_ndarray`` for deserialization." +"**Add new strategies: Krum and MultiKrum** " +"([#1481](https://github.com/adap/flower/pull/1481))" msgstr "" -"C'est là que la véritable sérialisation/désérialisation se produira, en " -"particulier dans ``ndarray_to_sparse_bytes`` pour la sérialisation et " -"``sparse_bytes_to_ndarray`` pour la désérialisation." +"**Ajouter de nouvelles stratégies : Krum et MultiKrum** " +"([#1481](https://github.com/adap/flower/pull/1481))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:525 +#: ../../source/ref-changelog.md:849 msgid "" -"Note that we imported the ``scipy.sparse`` library in order to convert " -"our arrays." +"Edoardo, a computer science student at the Sapienza University of Rome, " +"contributed a new `Krum` strategy that enables users to easily use Krum " +"and MultiKrum in their workloads." msgstr "" -"Notez que nous avons importé la bibliothèque ``scipy.sparse`` afin de " -"convertir nos tableaux." - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:613 -msgid "Client-side" -msgstr "Côté client" +"Edoardo, étudiant en informatique à l'Université Sapienza de Rome, a " +"contribué à une nouvelle stratégie `Krum` qui permet aux utilisateurs " +"d'utiliser facilement Krum et MultiKrum dans leurs charges de travail." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:615 +#: ../../source/ref-changelog.md:851 msgid "" -"To be able to serialize our ``ndarray``\\ s into sparse parameters, we " -"will just have to call our custom functions in our " -"``flwr.client.Client``." +"**Update C++ example to be compatible with Flower v1.2.0** " +"([#1495](https://github.com/adap/flower/pull/1495))" msgstr "" -"Pour pouvoir sérialiser nos ``ndarray`` en paramètres sparse, il nous " -"suffira d'appeler nos fonctions personnalisées dans notre " -"``flwr.client.Client``." +"**Mettre à jour l'exemple C++ pour qu'il soit compatible avec Flower " +"v1.2.0** ([#1495](https://github.com/adap/flower/pull/1495))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:617 +#: ../../source/ref-changelog.md:853 msgid "" -"Indeed, in ``get_parameters`` we need to serialize the parameters we got " -"from our network using our custom ``ndarrays_to_sparse_parameters`` " -"defined above." +"The C++ code example has received a substantial update to make it " +"compatible with the latest version of Flower." msgstr "" -"En effet, dans ``get_parameters`` nous devons sérialiser les paramètres " -"que nous avons obtenus de notre réseau en utilisant nos " -"``ndarrays_to_sparse_parameters`` personnalisés définis ci-dessus." +"L'exemple de code C++ a reçu une mise à jour substantielle pour le rendre" +" compatible avec la dernière version de Flower." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:619 +#: ../../source/ref-changelog.md:855 msgid "" -"In ``fit``, we first need to deserialize the parameters coming from the " -"server using our custom ``sparse_parameters_to_ndarrays`` and then we " -"need to serialize our local results with " -"``ndarrays_to_sparse_parameters``." +"**General improvements** " +"([#1491](https://github.com/adap/flower/pull/1491), " +"[#1504](https://github.com/adap/flower/pull/1504), " +"[#1506](https://github.com/adap/flower/pull/1506), " +"[#1514](https://github.com/adap/flower/pull/1514), " +"[#1522](https://github.com/adap/flower/pull/1522), " +"[#1523](https://github.com/adap/flower/pull/1523), " +"[#1526](https://github.com/adap/flower/pull/1526), " +"[#1528](https://github.com/adap/flower/pull/1528), " +"[#1547](https://github.com/adap/flower/pull/1547), " +"[#1549](https://github.com/adap/flower/pull/1549), " +"[#1560](https://github.com/adap/flower/pull/1560), " +"[#1564](https://github.com/adap/flower/pull/1564), " +"[#1566](https://github.com/adap/flower/pull/1566))" msgstr "" -"Dans ``fit``, nous devons d'abord désérialiser les paramètres provenant " -"du serveur en utilisant notre ``sparse_parameters_to_ndarrays`` " -"personnalisé, puis nous devons sérialiser nos résultats locaux avec " -"``ndarrays_to_sparse_parameters``." +"**Améliorations générales** " +"([#1491](https://github.com/adap/flower/pull/1491), " +"[#1504](https://github.com/adap/flower/pull/1504), " +"[#1506](https://github.com/adap/flower/pull/1506), " +"[#1514](https://github.com/adap/flower/pull/1514), " +"[#1522](https://github.com/adap/flower/pull/1522), " +"[#1523](https://github.com/adap/flower/pull/1523), " +"[#1526](https://github.com/adap/flower/pull/1526), " +"[#1528](https://github.com/adap/flower/pull/1528), " +"[#1547](https://github.com/adap/flower/pull/1547), " +"[#1549](https://github.com/adap/flower/pull/1549), " +"[#1560](https://github.com/adap/flower/pull/1560), " +"[#1564](https://github.com/adap/flower/pull/1564), " +"[#1566](https://github.com/adap/flower/pull/1566))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:621 +#: ../../source/ref-changelog.md:859 msgid "" -"In ``evaluate``, we will only need to deserialize the global parameters " -"with our custom function." +"**Updated documentation** " +"([#1494](https://github.com/adap/flower/pull/1494), " +"[#1496](https://github.com/adap/flower/pull/1496), " +"[#1500](https://github.com/adap/flower/pull/1500), " +"[#1503](https://github.com/adap/flower/pull/1503), " +"[#1505](https://github.com/adap/flower/pull/1505), " +"[#1524](https://github.com/adap/flower/pull/1524), " +"[#1518](https://github.com/adap/flower/pull/1518), " +"[#1519](https://github.com/adap/flower/pull/1519), " +"[#1515](https://github.com/adap/flower/pull/1515))" msgstr "" -"Dans ``evaluate``, nous n'aurons besoin que de désérialiser les " -"paramètres globaux avec notre fonction personnalisée." - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:725 -msgid "Server-side" -msgstr "Côté serveur" +"**Documentation mise à jour** " +"([#1494](https://github.com/adap/flower/pull/1494), " +"[#1496](https://github.com/adap/flower/pull/1496), " +"[#1500](https://github.com/adap/flower/pull/1500), " +"[#1503](https://github.com/adap/flower/pull/1503), " +"[#1505](https://github.com/adap/flower/pull/1505), " +"[#1524](https://github.com/adap/flower/pull/1524), " +"[#1518](https://github.com/adap/flower/pull/1518), " +"[#1519](https://github.com/adap/flower/pull/1519), " +"[#1515](https://github.com/adap/flower/pull/1515))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:727 +#: ../../source/ref-changelog.md:863 msgid "" -"For this example, we will just use ``FedAvg`` as a strategy. To change " -"the serialization and deserialization here, we only need to reimplement " -"the ``evaluate`` and ``aggregate_fit`` functions of ``FedAvg``. The other" -" functions of the strategy will be inherited from the super class " -"``FedAvg``." +"One highlight is the new [first time contributor " +"guide](https://flower.ai/docs/first-time-contributors.html): if you've " +"never contributed on GitHub before, this is the perfect place to start!" msgstr "" -"Pour cet exemple, nous utiliserons simplement ``FedAvg`` comme stratégie." -" Pour modifier la sérialisation et la désérialisation ici, il suffit de " -"réimplémenter les fonctions ``evaluate`` et ``aggregate_fit`` de " -"``FedAvg``. Les autres fonctions de la stratégie seront héritées de la " -"super-classe ``FedAvg``." +"L'un des points forts est le nouveau [guide du premier contributeur] " +"(https://flower.ai/docs/first-time-contributors.html) : si tu n'as jamais" +" contribué sur GitHub auparavant, c'est l'endroit idéal pour commencer !" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:729 -msgid "As you can see only one line as change in ``evaluate``:" -msgstr "Comme tu peux le voir, seule une ligne a été modifiée dans ``evaluate`` :" +#: ../../source/ref-changelog.md:869 +msgid "v1.1.0 (2022-10-31)" +msgstr "v1.1.0 (2022-10-31)" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:735 +#: ../../source/ref-changelog.md:873 msgid "" -"And for ``aggregate_fit``, we will first deserialize every result we " -"received:" +"We would like to give our **special thanks** to all the contributors who " +"made the new version of Flower possible (in `git shortlog` order):" msgstr "" -"Et pour ``aggregate_fit``, nous allons d'abord désérialiser chaque " -"résultat que nous avons reçu :" - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:744 -msgid "And then serialize the aggregated result:" -msgstr "Puis sérialise le résultat agrégé :" +"Nous aimerions **remercier tout particulièrement** tous les contributeurs" +" qui ont rendu possible la nouvelle version de Flower (dans l'ordre `git " +"shortlog`) :" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:903 -msgid "We can now run our custom serialization example!" +#: ../../source/ref-changelog.md:875 +msgid "" +"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " +"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " +"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " +"`danielnugraha`, `edogab33`" msgstr "" -"Nous pouvons maintenant exécuter notre exemple de sérialisation " -"personnalisée !" +"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " +"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " +"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " +"`danielnugraha`, `edogab33`" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:934 +#: ../../source/ref-changelog.md:879 msgid "" -"In this part of the tutorial, we've seen how we can build clients by " -"subclassing either ``NumPyClient`` or ``Client``. ``NumPyClient`` is a " -"convenience abstraction that makes it easier to work with machine " -"learning libraries that have good NumPy interoperability. ``Client`` is a" -" more flexible abstraction that allows us to do things that are not " -"possible in ``NumPyClient``. In order to do so, it requires us to handle " -"parameter serialization and deserialization ourselves." +"**Introduce Differential Privacy wrappers (preview)** " +"([#1357](https://github.com/adap/flower/pull/1357), " +"[#1460](https://github.com/adap/flower/pull/1460))" msgstr "" -"Dans cette partie du tutoriel, nous avons vu comment construire des " -"clients en sous-classant soit ``NumPyClient``, soit ``Client``. " -"``NumPyClient`` est une abstraction de commodité qui facilite le travail " -"avec les bibliothèques d'apprentissage automatique qui ont une bonne " -"interopérabilité NumPy. ``Client`` est une abstraction plus flexible qui " -"nous permet de faire des choses qui ne sont pas possibles dans " -"``NumPyClient``. Pour ce faire, elle nous oblige à gérer nous-mêmes la " -"sérialisation et la désérialisation des paramètres." +"**Introduire les enveloppes de confidentialité différentielle (aperçu)** " +"([#1357](https://github.com/adap/flower/pull/1357), " +"[#1460](https://github.com/adap/flower/pull/1460))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:952 +#: ../../source/ref-changelog.md:881 msgid "" -"This is the final part of the Flower tutorial (for now!), " -"congratulations! You're now well equipped to understand the rest of the " -"documentation. There are many topics we didn't cover in the tutorial, we " -"recommend the following resources:" +"The first (experimental) preview of pluggable Differential Privacy " +"wrappers enables easy configuration and usage of differential privacy " +"(DP). The pluggable DP wrappers enable framework-agnostic **and** " +"strategy-agnostic usage of both client-side DP and server-side DP. Head " +"over to the Flower docs, a new explainer goes into more detail." msgstr "" -"C'est la dernière partie du tutoriel Flower (pour l'instant !), " -"félicitations ! Tu es maintenant bien équipé pour comprendre le reste de " -"la documentation. Il y a de nombreux sujets que nous n'avons pas abordés " -"dans le tutoriel, nous te recommandons les ressources suivantes :" - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:954 -msgid "`Read Flower Docs `__" -msgstr "`Lire les docs sur les fleurs `__" +"Le premier aperçu (expérimental) des wrappers enfichables de " +"confidentialité différentielle permet de configurer et d'utiliser " +"facilement la confidentialité différentielle (DP). Les wrappers DP " +"enfichables permettent une utilisation agnostique du cadre **et** de la " +"stratégie à la fois de la DP côté client et de la DP côté serveur. Va " +"voir les documents de Flower, un nouvel explicatif va plus loin dans les " +"détails." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:955 +#: ../../source/ref-changelog.md:883 msgid "" -"`Check out Flower Code Examples " -"`__" +"**New iOS CoreML code example** " +"([#1289](https://github.com/adap/flower/pull/1289))" msgstr "" -"`Check out Flower Code Examples " -"`__" +"**Nouvel exemple de code CoreML pour iOS** " +"([#1289](https://github.com/adap/flower/pull/1289))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:956 -#, fuzzy +#: ../../source/ref-changelog.md:885 msgid "" -"`Use Flower Baselines for your research " -"`__" +"Flower goes iOS! A massive new code example shows how Flower clients can " +"be built for iOS. The code example contains both Flower iOS SDK " +"components that can be used for many tasks, and one task example running " +"on CoreML." msgstr "" -"`Utilise les lignes de base des fleurs pour ta recherche " -"`__" +"Flower passe à iOS ! Un nouvel exemple de code massif montre comment les " +"clients Flower peuvent être construits pour iOS. L'exemple de code " +"contient à la fois des composants Flower iOS SDK qui peuvent être " +"utilisés pour de nombreuses tâches, et un exemple de tâche fonctionnant " +"sur CoreML." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:957 -#, fuzzy +#: ../../source/ref-changelog.md:887 msgid "" -"`Watch Flower Summit 2023 videos `__" +"**New FedMedian strategy** " +"([#1461](https://github.com/adap/flower/pull/1461))" msgstr "" -"`Regardez les vidéos du Flower Summit 2022 `__" +"**Nouvelle stratégie de FedMedian** " +"([#1461](https://github.com/adap/flower/pull/1461))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:9 -msgid "Get started with Flower" +#: ../../source/ref-changelog.md:889 +msgid "" +"The new `FedMedian` strategy implements Federated Median (FedMedian) by " +"[Yin et al., 2018](https://arxiv.org/pdf/1803.01498v1.pdf)." msgstr "" +"La nouvelle stratégie `FedMedian` met en œuvre Federated Median " +"(FedMedian) par [Yin et al., 2018] " +"(https://arxiv.org/pdf/1803.01498v1.pdf)." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:11 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:11 -msgid "Welcome to the Flower federated learning tutorial!" -msgstr "Bienvenue au tutoriel sur l'apprentissage fédéré de la fleur !" +#: ../../source/ref-changelog.md:891 +msgid "" +"**Log** `Client` **exceptions in Virtual Client Engine** " +"([#1493](https://github.com/adap/flower/pull/1493))" +msgstr "" +"**Log** `Client` **exceptions dans le moteur de client virtuel** " +"([#1493](https://github.com/adap/flower/pull/1493))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:13 -#, fuzzy +#: ../../source/ref-changelog.md:893 msgid "" -"In this notebook, we'll build a federated learning system using Flower, " -"`Flower Datasets `__ and PyTorch. In " -"part 1, we use PyTorch for the model training pipeline and data loading. " -"In part 2, we continue to federate the PyTorch-based pipeline using " -"Flower." +"All `Client` exceptions happening in the VCE are now logged by default " +"and not just exposed to the configured `Strategy` (via the `failures` " +"argument)." msgstr "" -"Dans ce carnet, nous allons construire un système d'apprentissage fédéré " -"en utilisant Flower et PyTorch. Dans la première partie, nous utilisons " -"PyTorch pour le pipeline d'entraînement des modèles et le chargement des " -"données. Dans la deuxième partie, nous continuons à fédérer le pipeline " -"basé sur PyTorch en utilisant Flower." +"Toutes les exceptions `Client` qui se produisent dans le VCE sont " +"maintenant enregistrées par défaut et ne sont pas seulement exposées à la" +" `Stratégie` configurée (via l'argument `failures`)." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:17 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:19 -#, fuzzy -msgid "Let's get started!" -msgstr "Allons-y, déclarons-le !" +#: ../../source/ref-changelog.md:895 +msgid "" +"**Improve Virtual Client Engine internals** " +"([#1401](https://github.com/adap/flower/pull/1401), " +"[#1453](https://github.com/adap/flower/pull/1453))" +msgstr "" +"**Améliorer le moteur du client virtuel** " +"([#1401](https://github.com/adap/flower/pull/1401), " +"[#1453](https://github.com/adap/flower/pull/1453))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:31 +#: ../../source/ref-changelog.md:897 msgid "" -"Before we begin with any actual code, let's make sure that we have " -"everything we need." +"Some internals of the Virtual Client Engine have been revamped. The VCE " +"now uses Ray 2.0 under the hood, the value type of the `client_resources`" +" dictionary changed to `float` to allow fractions of resources to be " +"allocated." msgstr "" -"Avant de commencer à coder, assurons-nous que nous disposons de tout ce " -"dont nous avons besoin." +"Le VCE utilise maintenant Ray 2.0 sous le capot, le type de valeur du " +"dictionnaire `client_resources` a été remplacé par `float` pour permettre" +" l'allocation de fractions de ressources." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:45 -#, fuzzy +#: ../../source/ref-changelog.md:899 msgid "" -"Next, we install the necessary packages for PyTorch (``torch`` and " -"``torchvision``), Flower Datasets (``flwr-datasets``) and Flower " -"(``flwr``):" +"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " +"Client Engine**" msgstr "" -"Ensuite, nous installons les paquets nécessaires pour PyTorch (``torch`` " -"et ``torchvision``) et Flower (``flwr``) :" +"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " +"Client Engine**" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:105 -#, fuzzy +#: ../../source/ref-changelog.md:901 msgid "" -"It is possible to switch to a runtime that has GPU acceleration enabled " -"(on Google Colab: ``Runtime > Change runtime type > Hardware accelerator:" -" GPU > Save``). Note, however, that Google Colab is not always able to " -"offer GPU acceleration. If you see an error related to GPU availability " -"in one of the following sections, consider switching back to CPU-based " -"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " -"has GPU acceleration enabled, you should see the output ``Training on " -"cuda``, otherwise it'll say ``Training on cpu``." +"The Virtual Client Engine now has full support for optional `Client` (and" +" `NumPyClient`) methods." msgstr "" -"Il est possible de passer à un runtime dont l'accélération GPU est " -"activée (sur Google Colab : ``Runtime > Change runtime type > Hardware " -"acclerator : GPU > Save``). Note cependant que Google Colab n'est pas " -"toujours en mesure de proposer l'accélération GPU. Si tu vois une erreur " -"liée à la disponibilité du GPU dans l'une des sections suivantes, " -"envisage de repasser à une exécution basée sur le CPU en définissant " -"``DEVICE = torch.device(\"cpu\")``. Si le runtime a activé l'accélération" -" GPU, tu devrais voir apparaître le résultat ``Training on cuda``, sinon " -"il dira ``Training on cpu``." - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:118 -msgid "Loading the data" -msgstr "Chargement des données" +"Le moteur de client virtuel prend désormais en charge les méthodes " +"optionnelles `Client` (et `NumPyClient`)." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:120 -#, fuzzy +#: ../../source/ref-changelog.md:903 msgid "" -"Federated learning can be applied to many different types of tasks across" -" different domains. In this tutorial, we introduce federated learning by " -"training a simple convolutional neural network (CNN) on the popular " -"CIFAR-10 dataset. CIFAR-10 can be used to train image classifiers that " -"distinguish between images from ten different classes: 'airplane', " -"'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', and " -"'truck'." +"**Provide type information to packages using** `flwr` " +"([#1377](https://github.com/adap/flower/pull/1377))" msgstr "" -"L'apprentissage fédéré peut être appliqué à de nombreux types de tâches " -"dans différents domaines. Dans ce tutoriel, nous présentons " -"l'apprentissage fédéré en formant un simple réseau neuronal " -"convolutionnel (CNN) sur l'ensemble de données populaire CIFAR-10. " -"CIFAR-10 peut être utilisé pour former des classificateurs d'images qui " -"font la distinction entre les images de dix classes différentes :" +"**Fournir des informations de type aux paquets en utilisant** `flwr` " +"([#1377](https://github.com/adap/flower/pull/1377))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:131 +#: ../../source/ref-changelog.md:905 msgid "" -"We simulate having multiple datasets from multiple organizations (also " -"called the \"cross-silo\" setting in federated learning) by splitting the" -" original CIFAR-10 dataset into multiple partitions. Each partition will " -"represent the data from a single organization. We're doing this purely " -"for experimentation purposes, in the real world there's no need for data " -"splitting because each organization already has their own data (so the " -"data is naturally partitioned)." +"The package `flwr` is now bundled with a `py.typed` file indicating that " +"the package is typed. This enables typing support for projects or " +"packages that use `flwr` by enabling them to improve their code using " +"static type checkers like `mypy`." msgstr "" -"Nous simulons le fait d'avoir plusieurs ensembles de données provenant de" -" plusieurs organisations (également appelé le paramètre \"cross-silo\" " -"dans l'apprentissage fédéré) en divisant l'ensemble de données CIFAR-10 " -"original en plusieurs partitions. Chaque partition représentera les " -"données d'une seule organisation. Nous faisons cela purement à des fins " -"d'expérimentation, dans le monde réel, il n'y a pas besoin de diviser les" -" données parce que chaque organisation a déjà ses propres données (les " -"données sont donc naturellement partitionnées)." +"Le paquet `flwr` est maintenant accompagné d'un fichier `py.typed` " +"indiquant que le paquet est typé. Cela permet de prendre en charge le " +"typage pour les projets ou les paquets qui utilisent `flwr` en leur " +"permettant d'améliorer leur code à l'aide de vérificateurs de types " +"statiques comme `mypy`." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:133 -#, fuzzy +#: ../../source/ref-changelog.md:907 msgid "" -"Each organization will act as a client in the federated learning system. " -"So having ten organizations participate in a federation means having ten " -"clients connected to the federated learning server." +"**Updated code example** " +"([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" msgstr "" -"Chaque organisation agira comme un client dans le système d'apprentissage" -" fédéré. Ainsi, le fait que dix organisations participent à une " -"fédération signifie que dix clients sont connectés au serveur " -"d'apprentissage fédéré :" +"**Exemple de code mis à jour** " +"([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:144 +#: ../../source/ref-changelog.md:909 msgid "" -"Let's now create the Federated Dataset abstraction that from ``flwr-" -"datasets`` that partitions the CIFAR-10. We will create small training " -"and test set for each edge device and wrap each of them into a PyTorch " -"``DataLoader``:" +"The code examples covering scikit-learn and PyTorch Lightning have been " +"updated to work with the latest version of Flower." msgstr "" +"Les exemples de code couvrant scikit-learn et PyTorch Lightning ont été " +"mis à jour pour fonctionner avec la dernière version de Flower." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:198 -#, fuzzy +#: ../../source/ref-changelog.md:911 msgid "" -"We now have a list of ten training sets and ten validation sets " -"(``trainloaders`` and ``valloaders``) representing the data of ten " -"different organizations. Each ``trainloader``/``valloader`` pair contains" -" 4000 training examples and 1000 validation examples. There's also a " -"single ``testloader`` (we did not split the test set). Again, this is " -"only necessary for building research or educational systems, actual " -"federated learning systems have their data naturally distributed across " -"multiple partitions." +"**Updated documentation** " +"([#1355](https://github.com/adap/flower/pull/1355), " +"[#1558](https://github.com/adap/flower/pull/1558), " +"[#1379](https://github.com/adap/flower/pull/1379), " +"[#1380](https://github.com/adap/flower/pull/1380), " +"[#1381](https://github.com/adap/flower/pull/1381), " +"[#1332](https://github.com/adap/flower/pull/1332), " +"[#1391](https://github.com/adap/flower/pull/1391), " +"[#1403](https://github.com/adap/flower/pull/1403), " +"[#1364](https://github.com/adap/flower/pull/1364), " +"[#1409](https://github.com/adap/flower/pull/1409), " +"[#1419](https://github.com/adap/flower/pull/1419), " +"[#1444](https://github.com/adap/flower/pull/1444), " +"[#1448](https://github.com/adap/flower/pull/1448), " +"[#1417](https://github.com/adap/flower/pull/1417), " +"[#1449](https://github.com/adap/flower/pull/1449), " +"[#1465](https://github.com/adap/flower/pull/1465), " +"[#1467](https://github.com/adap/flower/pull/1467))" msgstr "" -"Nous avons maintenant une liste de dix ensembles de formation et dix " -"ensembles de validation (``trainloaders`` et ``valloaders``) représentant" -" les données de dix organisations différentes. Chaque paire " -"``trainloader`/``valloader`` contient 4500 exemples de formation et 500 " -"exemples de validation. Il y a également un seul ``testloader`` (nous " -"n'avons pas divisé l'ensemble de test). Encore une fois, cela n'est " -"nécessaire que pour construire des systèmes de recherche ou d'éducation, " -"les systèmes d'apprentissage fédérés actuels ont leurs données " -"naturellement distribuées à travers plusieurs partitions." +"**Documentation mise à jour** " +"([#1355](https://github.com/adap/flower/pull/1355), " +"[#1558](https://github.com/adap/flower/pull/1558), " +"[#1379](https://github.com/adap/flower/pull/1379), " +"[#1380](https://github.com/adap/flower/pull/1380), " +"[#1381](https://github.com/adap/flower/pull/1381), " +"[#1332](https://github.com/adap/flower/pull/1332), " +"[#1391](https://github.com/adap/flower/pull/1391), " +"[#1403](https://github.com/adap/flower/pull/1403), " +"[#1364](https://github.com/adap/flower/pull/1364), " +"[#1409](https://github.com/adap/flower/pull/1409), " +"[#1419](https://github.com/adap/flower/pull/1419), " +"[#1444](https://github.com/adap/flower/pull/1444), " +"[#1448](https://github.com/adap/flower/pull/1448), " +"[#1417](https://github.com/adap/flower/pull/1417), " +"[#1449](https://github.com/adap/flower/pull/1449), " +"[#1465](https://github.com/adap/flower/pull/1465), " +"[#1467](https://github.com/adap/flower/pull/1467))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:201 +#: ../../source/ref-changelog.md:913 msgid "" -"Let's take a look at the first batch of images and labels in the first " -"training set (i.e., ``trainloaders[0]``) before we move on:" +"There have been so many documentation updates that it doesn't even make " +"sense to list them individually." msgstr "" -"Jetons un coup d'œil au premier lot d'images et d'étiquettes du premier " -"ensemble d'entraînement (c'est-à-dire ``trainloaders[0]``) avant de " -"poursuivre :" +"Il y a eu tellement de mises à jour de la documentation que cela n'a même" +" pas de sens de les énumérer individuellement." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:240 +#: ../../source/ref-changelog.md:915 msgid "" -"The output above shows a random batch of images from the first " -"``trainloader`` in our list of ten ``trainloaders``. It also prints the " -"labels associated with each image (i.e., one of the ten possible labels " -"we've seen above). If you run the cell again, you should see another " -"batch of images." +"**Restructured documentation** " +"([#1387](https://github.com/adap/flower/pull/1387))" msgstr "" -"La sortie ci-dessus montre un lot aléatoire d'images provenant du premier" -" ``chargeur de formation`` de notre liste de dix ``chargeurs de " -"formation``. Elle imprime également les étiquettes associées à chaque " -"image (c'est-à-dire l'une des dix étiquettes possibles que nous avons " -"vues ci-dessus). Si tu exécutes à nouveau la cellule, tu devrais voir un " -"autre lot d'images." - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:252 -msgid "Step 1: Centralized Training with PyTorch" -msgstr "Étape 1 : Formation centralisée avec PyTorch" +"**Documentation restructurée** " +"([#1387](https://github.com/adap/flower/pull/1387))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:263 +#: ../../source/ref-changelog.md:917 msgid "" -"Next, we're going to use PyTorch to define a simple convolutional neural " -"network. This introduction assumes basic familiarity with PyTorch, so it " -"doesn't cover the PyTorch-related aspects in full detail. If you want to " -"dive deeper into PyTorch, we recommend `DEEP LEARNING WITH PYTORCH: A 60 " -"MINUTE BLITZ " -"`__." +"The documentation has been restructured to make it easier to navigate. " +"This is just the first step in a larger effort to make the Flower " +"documentation the best documentation of any project ever. Stay tuned!" msgstr "" -"Ensuite, nous allons utiliser PyTorch pour définir un simple réseau " -"neuronal convolutif. Cette introduction suppose une familiarité de base " -"avec PyTorch, elle ne couvre donc pas en détail les aspects liés à " -"PyTorch. Si tu veux plonger plus profondément dans PyTorch, nous te " -"recommandons `DEEP LEARNING WITH PYTORCH : A 60 MINUTE BLITZ " -"`__." - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:275 -msgid "Defining the model" -msgstr "Définir le modèle" +"La documentation a été restructurée pour faciliter la navigation. Ce " +"n'est que la première étape d'un effort plus important visant à faire de " +"la documentation de Flower la meilleure documentation de tous les projets" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:277 +#: ../../source/ref-changelog.md:919 msgid "" -"We use the simple CNN described in the `PyTorch tutorial " -"`__:" +"**Open in Colab button** " +"([#1389](https://github.com/adap/flower/pull/1389))" msgstr "" -"Nous utilisons le CNN simple décrit dans le tutoriel `PyTorch " -"`__ :" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:314 -msgid "Let's continue with the usual training and test functions:" -msgstr "Poursuivons avec les fonctions habituelles de formation et de test :" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:374 -msgid "Training the model" -msgstr "Entraîne le modèle" +"**Ouvrir dans le bouton Colab** " +"([#1389](https://github.com/adap/flower/pull/1389))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:376 +#: ../../source/ref-changelog.md:921 msgid "" -"We now have all the basic building blocks we need: a dataset, a model, a " -"training function, and a test function. Let's put them together to train " -"the model on the dataset of one of our organizations " -"(``trainloaders[0]``). This simulates the reality of most machine " -"learning projects today: each organization has their own data and trains " -"models only on this internal data:" +"The four parts of the Flower Federated Learning Tutorial now come with a " +"new `Open in Colab` button. No need to install anything on your local " +"machine, you can now use and learn about Flower in your browser, it's " +"only a single click away." msgstr "" -"Nous avons maintenant tous les éléments de base dont nous avons besoin : " -"un ensemble de données, un modèle, une fonction d'entraînement et une " -"fonction de test. Assemblons-les pour entraîner le modèle sur l'ensemble " -"de données de l'une de nos organisations (``trainloaders[0]``). Cela " -"simule la réalité de la plupart des projets d'apprentissage automatique " -"aujourd'hui : chaque organisation possède ses propres données et entraîne" -" les modèles uniquement sur ces données internes :" +"Les quatre parties du didacticiel d'apprentissage fédéré Flower sont " +"maintenant accompagnées d'un nouveau bouton \"Ouvrir dans Colab\". Pas " +"besoin d'installer quoi que ce soit sur ta machine locale, tu peux " +"maintenant utiliser et apprendre à connaître Flower dans ton navigateur, " +"il te suffit d'un simple clic." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:406 +#: ../../source/ref-changelog.md:923 msgid "" -"Training the simple CNN on our CIFAR-10 split for 5 epochs should result " -"in a test set accuracy of about 41%, which is not good, but at the same " -"time, it doesn't really matter for the purposes of this tutorial. The " -"intent was just to show a simplistic centralized training pipeline that " -"sets the stage for what comes next - federated learning!" +"**Improved tutorial** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" msgstr "" -"L'entraînement du CNN simple sur notre fractionnement CIFAR-10 pendant 5 " -"époques devrait se traduire par une précision de l'ensemble de test " -"d'environ 41 %, ce qui n'est pas bon, mais en même temps, cela n'a pas " -"vraiment d'importance pour les besoins de ce tutoriel. L'intention était " -"juste de montrer un pipeline d'entraînement centralisé simpliste qui " -"prépare le terrain pour ce qui vient ensuite - l'apprentissage fédéré !" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:418 -msgid "Step 2: Federated Learning with Flower" -msgstr "Étape 2 : Apprentissage fédéré avec Flower" +"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:420 +#: ../../source/ref-changelog.md:925 msgid "" -"Step 1 demonstrated a simple centralized training pipeline. All data was " -"in one place (i.e., a single ``trainloader`` and a single ``valloader``)." -" Next, we'll simulate a situation where we have multiple datasets in " -"multiple organizations and where we train a model over these " -"organizations using federated learning." +"The Flower Federated Learning Tutorial has two brand-new parts covering " +"custom strategies (still WIP) and the distinction between `Client` and " +"`NumPyClient`. The existing parts one and two have also been improved " +"(many small changes and fixes)." msgstr "" -"L'étape 1 a montré un simple pipeline de formation centralisé. Toutes les" -" données étaient au même endroit (c'est-à-dire un seul ``trainloader`` et" -" un seul ``valloader``). Ensuite, nous allons simuler une situation où " -"nous avons plusieurs ensembles de données dans plusieurs organisations et" -" où nous formons un modèle sur ces organisations à l'aide de " -"l'apprentissage fédéré." - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:432 -msgid "Updating model parameters" -msgstr "Mise à jour des paramètres du modèle" +"Le tutoriel sur l'apprentissage fédéré des fleurs a deux toutes nouvelles" +" parties couvrant les stratégies personnalisées (encore WIP) et la " +"distinction entre `Client` et `NumPyClient`. Les parties un et deux " +"existantes ont également été améliorées (beaucoup de petits changements " +"et de corrections)." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:434 +#: ../../source/ref-changelog.md:931 +msgid "v1.0.0 (2022-07-28)" +msgstr "v1.0.0 (2022-07-28)" + +#: ../../source/ref-changelog.md:933 +msgid "Highlights" +msgstr "Points forts" + +#: ../../source/ref-changelog.md:935 +msgid "Stable **Virtual Client Engine** (accessible via `start_simulation`)" +msgstr "Moteur de client virtuel stable** (accessible via `start_simulation`)" + +#: ../../source/ref-changelog.md:936 +msgid "All `Client`/`NumPyClient` methods are now optional" +msgstr "Toutes les méthodes `Client`/`NumPyClient` sont maintenant optionnelles" + +#: ../../source/ref-changelog.md:937 +msgid "Configurable `get_parameters`" +msgstr "`get_parameters` configurable" + +#: ../../source/ref-changelog.md:938 msgid "" -"In federated learning, the server sends the global model parameters to " -"the client, and the client updates the local model with the parameters " -"received from the server. It then trains the model on the local data " -"(which changes the model parameters locally) and sends the " -"updated/changed model parameters back to the server (or, alternatively, " -"it sends just the gradients back to the server, not the full model " -"parameters)." +"Tons of small API cleanups resulting in a more coherent developer " +"experience" msgstr "" -"Dans l'apprentissage fédéré, le serveur envoie les paramètres du modèle " -"global au client, et le client met à jour le modèle local avec les " -"paramètres reçus du serveur. Il entraîne ensuite le modèle sur les " -"données locales (ce qui modifie les paramètres du modèle localement) et " -"renvoie les paramètres du modèle mis à jour/changés au serveur (ou, " -"alternativement, il renvoie seulement les gradients au serveur, et non " -"pas les paramètres complets du modèle)." +"Des tonnes de petits nettoyages d'API résultant en une expérience plus " +"cohérente pour les développeurs" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:436 +#: ../../source/ref-changelog.md:942 msgid "" -"We need two helper functions to update the local model with parameters " -"received from the server and to get the updated model parameters from the" -" local model: ``set_parameters`` and ``get_parameters``. The following " -"two functions do just that for the PyTorch model above." +"We would like to give our **special thanks** to all the contributors who " +"made Flower 1.0 possible (in reverse [GitHub " +"Contributors](https://github.com/adap/flower/graphs/contributors) order):" msgstr "" -"Nous avons besoin de deux fonctions d'aide pour mettre à jour le modèle " -"local avec les paramètres reçus du serveur et pour obtenir les paramètres" -" mis à jour du modèle local : ``set_parameters`` et ``get_parameters``. " -"Les deux fonctions suivantes font exactement cela pour le modèle PyTorch " -"ci-dessus." +"Nous tenons à remercier **particulièrement** tous les contributeurs qui " +"ont rendu Flower 1.0 possible (dans l'ordre inverse de [GitHub " +"Contributors](https://github.com/adap/flower/graphs/contributors)) :" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:438 +#: ../../source/ref-changelog.md:944 msgid "" -"The details of how this works are not really important here (feel free to" -" consult the PyTorch documentation if you want to learn more). In " -"essence, we use ``state_dict`` to access PyTorch model parameter tensors." -" The parameter tensors are then converted to/from a list of NumPy " -"ndarray's (which Flower knows how to serialize/deserialize):" +"[@rtaiello](https://github.com/rtaiello), " +"[@g-pichler](https://github.com/g-pichler), [@rob-" +"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" +"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " +"[@nfnt](https://github.com/nfnt), " +"[@tatiana-s](https://github.com/tatiana-s), " +"[@TParcollet](https://github.com/TParcollet), " +"[@vballoli](https://github.com/vballoli), " +"[@negedng](https://github.com/negedng), " +"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " +"[@hei411](https://github.com/hei411), " +"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " +"[@AmitChaulwar](https://github.com/AmitChaulwar), " +"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" +"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " +"[@lbhm](https://github.com/lbhm), " +"[@sishtiaq](https://github.com/sishtiaq), " +"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" +"/Jueun-Park), [@architjen](https://github.com/architjen), " +"[@PratikGarai](https://github.com/PratikGarai), " +"[@mrinaald](https://github.com/mrinaald), " +"[@zliel](https://github.com/zliel), " +"[@MeiruiJiang](https://github.com/MeiruiJiang), " +"[@sancarlim](https://github.com/sancarlim), " +"[@gubertoli](https://github.com/gubertoli), " +"[@Vingt100](https://github.com/Vingt100), " +"[@MakGulati](https://github.com/MakGulati), " +"[@cozek](https://github.com/cozek), " +"[@jafermarq](https://github.com/jafermarq), " +"[@sisco0](https://github.com/sisco0), " +"[@akhilmathurs](https://github.com/akhilmathurs), " +"[@CanTuerk](https://github.com/CanTuerk), " +"[@mariaboerner1987](https://github.com/mariaboerner1987), " +"[@pedropgusmao](https://github.com/pedropgusmao), " +"[@tanertopal](https://github.com/tanertopal), " +"[@danieljanes](https://github.com/danieljanes)." msgstr "" -"Les détails de ce fonctionnement ne sont pas vraiment importants ici " -"(n'hésite pas à consulter la documentation PyTorch si tu veux en savoir " -"plus). En substance, nous utilisons ``state_dict`` pour accéder aux " -"tenseurs de paramètres du modèle PyTorch. Les tenseurs de paramètres sont" -" ensuite convertis en/depuis une liste de ndarray NumPy (que Flower sait " -"sérialiser/désérialiser) :" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:466 -msgid "Implementing a Flower client" -msgstr "Mise en place d'un client Flower" +"[@rtaiello](https://github.com/rtaiello), " +"[@g-pichler](https://github.com/g-pichler), [@rob-" +"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" +"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " +"[@nfnt](https://github.com/nfnt), " +"[@tatiana-s](https://github.com/tatiana-s), " +"[@TParcollet](https://github.com/TParcollet), " +"[@vballoli](https://github.com/vballoli), " +"[@negedng](https://github.com/negedng), " +"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " +"[@hei411](https://github.com/hei411), " +"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " +"[@AmitChaulwar](https://github.com/AmitChaulwar), " +"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" +"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " +"[@lbhm](https://github.com/lbhm), " +"[@sishtiaq](https://github.com/sishtiaq), " +"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" +"/Jueun-Park), [@architjen](https://github.com/architjen), " +"[@PratikGarai](https://github.com/PratikGarai), [@mrinaald](" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:468 +#: ../../source/ref-changelog.md:948 msgid "" -"With that out of the way, let's move on to the interesting part. " -"Federated learning systems consist of a server and multiple clients. In " -"Flower, we create clients by implementing subclasses of " -"``flwr.client.Client`` or ``flwr.client.NumPyClient``. We use " -"``NumPyClient`` in this tutorial because it is easier to implement and " -"requires us to write less boilerplate." +"**All arguments must be passed as keyword arguments** " +"([#1338](https://github.com/adap/flower/pull/1338))" msgstr "" -"Ceci étant dit, passons à la partie intéressante. Les systèmes " -"d'apprentissage fédérés se composent d'un serveur et de plusieurs " -"clients. Dans Flower, nous créons des clients en mettant en œuvre des " -"sous-classes de ``flwr.client.Client`` ou de ``flwr.client.NumPyClient``." -" Nous utilisons ``NumPyClient`` dans ce tutoriel parce qu'il est plus " -"facile à mettre en œuvre et qu'il nous oblige à rédiger moins de modèles " -"de chaudière." +"**Tous les arguments doivent être passés comme des arguments de mot-clé**" +" ([#1338](https://github.com/adap/flower/pull/1338))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:470 +#: ../../source/ref-changelog.md:950 +#, fuzzy msgid "" -"To implement the Flower client, we create a subclass of " -"``flwr.client.NumPyClient`` and implement the three methods " -"``get_parameters``, ``fit``, and ``evaluate``:" +"Pass all arguments as keyword arguments, positional arguments are not " +"longer supported. Code that uses positional arguments (e.g., " +"`start_client(\"127.0.0.1:8080\", FlowerClient())`) must add the keyword " +"for each positional argument (e.g., " +"`start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())`)." msgstr "" -"Pour mettre en œuvre le client Flower, nous créons une sous-classe de " -"``flwr.client.NumPyClient`` et mettons en œuvre les trois méthodes " -"``get_parameters``, ``fit`` et ``evaluate`` :" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:472 -msgid "``get_parameters``: Return the current local model parameters" -msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" +"Le code qui utilise des arguments positionnels (par exemple, " +"``start_client(\"127.0.0.1:8080\", FlowerClient())`) doit ajouter le mot-" +"clé pour chaque argument positionnel (par exemple, " +"``start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())`)." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:473 +#: ../../source/ref-changelog.md:952 msgid "" -"``fit``: Receive model parameters from the server, train the model " -"parameters on the local data, and return the (updated) model parameters " -"to the server" +"**Introduce configuration object** `ServerConfig` **in** `start_server` " +"**and** `start_simulation` " +"([#1317](https://github.com/adap/flower/pull/1317))" msgstr "" -"``fit`` : reçoit les paramètres du modèle du serveur, entraîne les " -"paramètres du modèle sur les données locales et renvoie les paramètres du" -" modèle (mis à jour) au serveur" +"**Introduire l'objet de configuration** `ServerConfig` **dans** " +"`start_server` **et** `start_simulation` " +"([#1317](https://github.com/adap/flower/pull/1317))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:474 +#: ../../source/ref-changelog.md:954 msgid "" -"``evaluate``: Receive model parameters from the server, evaluate the " -"model parameters on the local data, and return the evaluation result to " -"the server" +"Instead of a config dictionary `{\"num_rounds\": 3, \"round_timeout\": " +"600.0}`, `start_server` and `start_simulation` now expect a configuration" +" object of type `flwr.server.ServerConfig`. `ServerConfig` takes the same" +" arguments that as the previous config dict, but it makes writing type-" +"safe code easier and the default parameters values more transparent." msgstr "" -"``evaluate`` : reçoit les paramètres du modèle du serveur, évalue les " -"paramètres du modèle sur les données locales et renvoie le résultat de " -"l'évaluation au serveur" +"Au lieu d'un dictionnaire de configuration `{\"num_rounds\" : 3, " +"\"round_timeout\" : 600.0}`, `start_server` et `start_simulation` " +"attendent maintenant un objet de configuration de type " +"`flwr.server.ServerConfig`. `ServerConfig` prend les mêmes arguments que " +"le dict de configuration précédent, mais il rend l'écriture de code " +"sécurisé plus facile et les valeurs des paramètres par défaut plus " +"transparentes." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:476 +#: ../../source/ref-changelog.md:956 msgid "" -"We mentioned that our clients will use the previously defined PyTorch " -"components for model training and evaluation. Let's see a simple Flower " -"client implementation that brings everything together:" +"**Rename built-in strategy parameters for clarity** " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -"Nous avons mentionné que nos clients utiliseront les composants PyTorch " -"définis précédemment pour la formation et l'évaluation des modèles. " -"Voyons une simple mise en œuvre du client Flower qui réunit tout cela :" +"**Renommer les paramètres de la stratégie intégrée pour plus de clarté** " +"([#1334](https://github.com/adap/flower/pull/1334))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:513 +#: ../../source/ref-changelog.md:958 msgid "" -"Our class ``FlowerClient`` defines how local training/evaluation will be " -"performed and allows Flower to call the local training/evaluation through" -" ``fit`` and ``evaluate``. Each instance of ``FlowerClient`` represents a" -" *single client* in our federated learning system. Federated learning " -"systems have multiple clients (otherwise, there's not much to federate), " -"so each client will be represented by its own instance of " -"``FlowerClient``. If we have, for example, three clients in our workload," -" then we'd have three instances of ``FlowerClient``. Flower calls " -"``FlowerClient.fit`` on the respective instance when the server selects a" -" particular client for training (and ``FlowerClient.evaluate`` for " -"evaluation)." +"The following built-in strategy parameters were renamed to improve " +"readability and consistency with other API's:" msgstr "" -"Our class ``FlowerClient`` defines how local training/evaluation will be " -"performed and allows Flower to call the local training/evaluation through" -" ``fit`` and ``evaluate``. Each instance of ``FlowerClient`` represents a" -" *single client* in our federated learning system. Federated learning " -"systems have multiple clients (otherwise, there's not much to federate), " -"so each client will be represented by its own instance of " -"``FlowerClient``. If we have, for example, three clients in our workload," -" then we'd have three instances of ``FlowerClient``. Flower calls " -"``FlowerClient.fit`` on the respective instance when the server selects a" -" particular client for training (and ``FlowerClient.evaluate`` for " -"evaluation)." +"Les paramètres de stratégie intégrés suivants ont été renommés pour " +"améliorer la lisibilité et la cohérence avec d'autres API :" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:517 -msgid "Using the Virtual Client Engine" -msgstr "Utilisation du moteur du client virtuel" +#: ../../source/ref-changelog.md:960 +msgid "`fraction_eval` --> `fraction_evaluate`" +msgstr "`fraction_eval` --> `fraction_evaluate`" + +#: ../../source/ref-changelog.md:961 +msgid "`min_eval_clients` --> `min_evaluate_clients`" +msgstr "`min_eval_clients` --> `min_evaluate_clients`" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:519 +#: ../../source/ref-changelog.md:962 +msgid "`eval_fn` --> `evaluate_fn`" +msgstr "`eval_fn` --> `evaluate_fn`" + +#: ../../source/ref-changelog.md:964 msgid "" -"In this notebook, we want to simulate a federated learning system with 10" -" clients on a single machine. This means that the server and all 10 " -"clients will live on a single machine and share resources such as CPU, " -"GPU, and memory. Having 10 clients would mean having 10 instances of " -"``FlowerClient`` in memory. Doing this on a single machine can quickly " -"exhaust the available memory resources, even if only a subset of these " -"clients participates in a single round of federated learning." +"**Update default arguments of built-in strategies** " +"([#1278](https://github.com/adap/flower/pull/1278))" msgstr "" -"Dans ce carnet, nous voulons simuler un système d'apprentissage fédéré " -"avec 10 clients sur une seule machine. Cela signifie que le serveur et " -"les 10 clients vivront sur une seule machine et partageront des " -"ressources telles que le CPU, le GPU et la mémoire. Avoir 10 clients " -"signifierait avoir 10 instances de ``FlowerClient`` en mémoire. Faire " -"cela sur une seule machine peut rapidement épuiser les ressources mémoire" -" disponibles, même si seulement un sous-ensemble de ces clients participe" -" à un seul tour d'apprentissage fédéré." +"**Mettre à jour les arguments par défaut des stratégies intégrées** " +"([#1278](https://github.com/adap/flower/pull/1278))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:521 +#: ../../source/ref-changelog.md:966 msgid "" -"In addition to the regular capabilities where server and clients run on " -"multiple machines, Flower, therefore, provides special simulation " -"capabilities that create ``FlowerClient`` instances only when they are " -"actually necessary for training or evaluation. To enable the Flower " -"framework to create clients when necessary, we need to implement a " -"function called ``client_fn`` that creates a ``FlowerClient`` instance on" -" demand. Flower calls ``client_fn`` whenever it needs an instance of one " -"particular client to call ``fit`` or ``evaluate`` (those instances are " -"usually discarded after use, so they should not keep any local state). " -"Clients are identified by a client ID, or short ``cid``. The ``cid`` can " -"be used, for example, to load different local data partitions for " -"different clients, as can be seen below:" +"All built-in strategies now use `fraction_fit=1.0` and " +"`fraction_evaluate=1.0`, which means they select *all* currently " +"available clients for training and evaluation. Projects that relied on " +"the previous default values can get the previous behaviour by " +"initializing the strategy in the following way:" msgstr "" -"In addition to the regular capabilities where server and clients run on " -"multiple machines, Flower, therefore, provides special simulation " -"capabilities that create ``FlowerClient`` instances only when they are " -"actually necessary for training or evaluation. To enable the Flower " -"framework to create clients when necessary, we need to implement a " -"function called ``client_fn`` that creates a ``FlowerClient`` instance on" -" demand. Flower calls ``client_fn`` whenever it needs an instance of one " -"particular client to call ``fit`` or ``evaluate`` (those instances are " -"usually discarded after use, so they should not keep any local state). " -"Clients are identified by a client ID, or short ``cid``. The ``cid`` can " -"be used, for example, to load different local data partitions for " -"different clients, as can be seen below:" +"Toutes les stratégies intégrées utilisent désormais `fraction_fit=1.0` et" +" `fraction_evaluate=1.0`, ce qui signifie qu'elles sélectionnent *tous* " +"les clients actuellement disponibles pour l'entraînement et l'évaluation." +" Les projets qui s'appuyaient sur les valeurs par défaut précédentes " +"peuvent retrouver le comportement antérieur en initialisant la stratégie " +"de la manière suivante :" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:556 -msgid "Starting the training" -msgstr "Commencer la formation" +#: ../../source/ref-changelog.md:968 +msgid "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" +msgstr "`stratégie = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:558 +#: ../../source/ref-changelog.md:970 msgid "" -"We now have the class ``FlowerClient`` which defines client-side " -"training/evaluation and ``client_fn`` which allows Flower to create " -"``FlowerClient`` instances whenever it needs to call ``fit`` or " -"``evaluate`` on one particular client. The last step is to start the " -"actual simulation using ``flwr.simulation.start_simulation``." +"**Add** `server_round` **to** `Strategy.evaluate` " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -"Nous avons maintenant la classe ``FlowerClient`` qui définit " -"l'entraînement/évaluation côté client et ``client_fn`` qui permet à " -"Flower de créer des instances de ``FlowerClient`` chaque fois qu'il a " -"besoin d'appeler ``fit`` ou ``evaluate`` sur un client particulier. La " -"dernière étape consiste à démarrer la simulation réelle en utilisant " -"``flwr.simulation.start_simulation``." +"**Ajouter** `server_round` **à** `Strategy.evaluate` " +"([#1334](https://github.com/adap/flower/pull/1334))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:560 +#: ../../source/ref-changelog.md:972 msgid "" -"The function ``start_simulation`` accepts a number of arguments, amongst " -"them the ``client_fn`` used to create ``FlowerClient`` instances, the " -"number of clients to simulate (``num_clients``), the number of federated " -"learning rounds (``num_rounds``), and the strategy. The strategy " -"encapsulates the federated learning approach/algorithm, for example, " -"*Federated Averaging* (FedAvg)." +"The `Strategy` method `evaluate` now receives the current round of " +"federated learning/evaluation as the first parameter." msgstr "" -"La fonction ``start_simulation`` accepte un certain nombre d'arguments, " -"parmi lesquels le ``client_fn`` utilisé pour créer les instances " -"``FlowerClient``, le nombre de clients à simuler (``num_clients``), le " -"nombre de tours d'apprentissage fédéré (``num_rounds``), et la stratégie." -" La stratégie encapsule l'approche/algorithme d'apprentissage fédéré, par" -" exemple, *Federated Averaging* (FedAvg)." +"La méthode `Stratégie` `évaluer` reçoit maintenant le cycle actuel " +"d'apprentissage/évaluation fédéré comme premier paramètre." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:562 +#: ../../source/ref-changelog.md:974 msgid "" -"Flower has a number of built-in strategies, but we can also use our own " -"strategy implementations to customize nearly all aspects of the federated" -" learning approach. For this example, we use the built-in ``FedAvg`` " -"implementation and customize it using a few basic parameters. The last " -"step is the actual call to ``start_simulation`` which - you guessed it - " -"starts the simulation:" +"**Add** `server_round` **and** `config` **parameters to** `evaluate_fn` " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -"Flower dispose d'un certain nombre de stratégies intégrées, mais nous " -"pouvons également utiliser nos propres implémentations de stratégies pour" -" personnaliser presque tous les aspects de l'approche de l'apprentissage " -"fédéré. Pour cet exemple, nous utilisons l'implémentation intégrée " -"``FedAvg`` et nous la personnalisons en utilisant quelques paramètres de " -"base. La dernière étape est l'appel à ``start_simulation`` qui - tu l'as " -"deviné - démarre la simulation :" +"**Ajouter** `server_round` **et** `config` **paramètres à** `evaluate_fn`" +" ([#1334](https://github.com/adap/flower/pull/1334))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:608 -msgid "Behind the scenes" -msgstr "Dans les coulisses" +#: ../../source/ref-changelog.md:976 +msgid "" +"The `evaluate_fn` passed to built-in strategies like `FedAvg` now takes " +"three parameters: (1) The current round of federated learning/evaluation " +"(`server_round`), (2) the model parameters to evaluate (`parameters`), " +"and (3) a config dictionary (`config`)." +msgstr "" +"Le `evaluate_fn` passé aux stratégies intégrées comme `FedAvg` prend " +"maintenant trois paramètres : (1) le cycle actuel " +"d'apprentissage/évaluation fédéré (`server_round`), (2) les paramètres du" +" modèle à évaluer (`parameters`), et (3) un dictionnaire de configuration" +" (`config`)." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:610 -msgid "So how does this work? How does Flower execute this simulation?" +#: ../../source/ref-changelog.md:978 +msgid "" +"**Rename** `rnd` **to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" msgstr "" -"Alors, comment cela fonctionne-t-il ? Comment Flower exécute-t-il cette " -"simulation ?" +"**Rename** `rnd` **to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:612 -#, python-format +#: ../../source/ref-changelog.md:980 msgid "" -"When we call ``start_simulation``, we tell Flower that there are 10 " -"clients (``num_clients=10``). Flower then goes ahead an asks the " -"``FedAvg`` strategy to select clients. ``FedAvg`` knows that it should " -"select 100% of the available clients (``fraction_fit=1.0``), so it goes " -"ahead and selects 10 random clients (i.e., 100% of 10)." +"Several Flower methods and functions (`evaluate_fn`, `configure_fit`, " +"`aggregate_fit`, `configure_evaluate`, `aggregate_evaluate`) receive the " +"current round of federated learning/evaluation as their first parameter. " +"To improve reaability and avoid confusion with *random*, this parameter " +"has been renamed from `rnd` to `server_round`." msgstr "" -"Lorsque nous appelons ``start_simulation``, nous disons à Flower qu'il y " -"a 10 clients (``num_clients=10``). Flower demande alors à la stratégie " -"``FedAvg`` de sélectionner des clients. ``FedAvg` sait qu'il doit " -"sélectionner 100% des clients disponibles (``fraction_fit=1.0``), alors " -"il choisit 10 clients au hasard (c'est à dire 100% de 10)." +"Plusieurs méthodes et fonctions de Flower (`evaluate_fn`, " +"`configure_fit`, `aggregate_fit`, `configure_evaluate`, " +"`aggregate_evaluate`) reçoivent le cycle actuel " +"d'apprentissage/évaluation fédéré comme premier paramètre. Pour améliorer" +" la fiabilité et éviter la confusion avec *random*, ce paramètre a été " +"renommé de `rnd` à `server_round`." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:614 +#: ../../source/ref-changelog.md:982 msgid "" -"Flower then asks the selected 10 clients to train the model. When the " -"server receives the model parameter updates from the clients, it hands " -"those updates over to the strategy (*FedAvg*) for aggregation. The " -"strategy aggregates those updates and returns the new global model, which" -" then gets used in the next round of federated learning." +"**Move** `flwr.dataset` **to** `flwr_baselines` " +"([#1273](https://github.com/adap/flower/pull/1273))" msgstr "" -"Flower demande ensuite aux 10 clients sélectionnés d'entraîner le modèle." -" Lorsque le serveur reçoit les mises à jour des paramètres du modèle de " -"la part des clients, il les transmet à la stratégie (*FedAvg*) pour " -"qu'elle les agrège. La stratégie agrège ces mises à jour et renvoie le " -"nouveau modèle global, qui est ensuite utilisé dans le prochain cycle " -"d'apprentissage fédéré." +"**Déplacer** `flwr.dataset` **vers** `flwr_baselines` " +"([#1273](https://github.com/adap/flower/pull/1273))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:626 -msgid "Where's the accuracy?" -msgstr "Où est la précision ?" +#: ../../source/ref-changelog.md:984 +msgid "The experimental package `flwr.dataset` was migrated to Flower Baselines." +msgstr "Le paquet expérimental `flwr.dataset` a été migré vers Flower Baselines." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:628 +#: ../../source/ref-changelog.md:986 msgid "" -"You may have noticed that all metrics except for ``losses_distributed`` " -"are empty. Where did the ``{\"accuracy\": float(accuracy)}`` go?" +"**Remove experimental strategies** " +"([#1280](https://github.com/adap/flower/pull/1280))" msgstr "" -"Tu as peut-être remarqué que toutes les mesures, à l'exception de " -"``pertes_distribuées``, sont vides. Où est passée la ``{\"précision\" : " -"float(précision)}`` ?" +"**Supprimer les stratégies expérimentales** " +"([#1280](https://github.com/adap/flower/pull/1280))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:630 +#: ../../source/ref-changelog.md:988 msgid "" -"Flower can automatically aggregate losses returned by individual clients," -" but it cannot do the same for metrics in the generic metrics dictionary " -"(the one with the ``accuracy`` key). Metrics dictionaries can contain " -"very different kinds of metrics and even key/value pairs that are not " -"metrics at all, so the framework does not (and can not) know how to " -"handle these automatically." +"Remove unmaintained experimental strategies (`FastAndSlow`, `FedFSv0`, " +"`FedFSv1`)." msgstr "" -"Flower peut automatiquement agréger les pertes renvoyées par les clients " -"individuels, mais il ne peut pas faire la même chose pour les mesures " -"dans le dictionnaire de mesures générique (celui avec la clé " -"``accuracy``). Les dictionnaires de mesures peuvent contenir des types de" -" mesures très différents et même des paires clé/valeur qui ne sont pas " -"des mesures du tout, donc le cadre ne sait pas (et ne peut pas) savoir " -"comment les gérer automatiquement." +"Supprimer les stratégies expérimentales non maintenues (`FastAndSlow`, " +"`FedFSv0`, `FedFSv1`)." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:632 +#: ../../source/ref-changelog.md:990 msgid "" -"As users, we need to tell the framework how to handle/aggregate these " -"custom metrics, and we do so by passing metric aggregation functions to " -"the strategy. The strategy will then call these functions whenever it " -"receives fit or evaluate metrics from clients. The two possible functions" -" are ``fit_metrics_aggregation_fn`` and " -"``evaluate_metrics_aggregation_fn``." +"**Rename** `Weights` **to** `NDArrays` " +"([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -"En tant qu'utilisateurs, nous devons indiquer au framework comment " -"gérer/agréger ces métriques personnalisées, et nous le faisons en passant" -" des fonctions d'agrégation de métriques à la stratégie. La stratégie " -"appellera alors ces fonctions chaque fois qu'elle recevra des métriques " -"d'ajustement ou d'évaluation de la part des clients. Les deux fonctions " -"possibles sont ``fit_metrics_aggregation_fn`` et " -"``evaluate_metrics_aggregation_fn``." +"**Rename** `Weights` **to** `NDArrays` " +"([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:634 +#: ../../source/ref-changelog.md:992 msgid "" -"Let's create a simple weighted averaging function to aggregate the " -"``accuracy`` metric we return from ``evaluate``:" +"`flwr.common.Weights` was renamed to `flwr.common.NDArrays` to better " +"capture what this type is all about." msgstr "" -"Créons une simple fonction de calcul de la moyenne pondérée pour agréger " -"la mesure de \"précision\" que nous renvoie ``evaluate`` :" +"`flwr.common.Weights` a été renommé en `flwr.common.NDArys` pour mieux " +"rendre compte de la nature de ce type." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:660 +#: ../../source/ref-changelog.md:994 msgid "" -"The only thing left to do is to tell the strategy to call this function " -"whenever it receives evaluation metric dictionaries from the clients:" +"**Remove antiquated** `force_final_distributed_eval` **from** " +"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -"La seule chose qui reste à faire est d'indiquer à la stratégie d'appeler " -"cette fonction chaque fois qu'elle reçoit des dictionnaires de métriques " -"d'évaluation de la part des clients :" +"**Supprimez l'ancien** `force_final_distributed_eval` **de** " +"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:697 +#: ../../source/ref-changelog.md:996 msgid "" -"We now have a full system that performs federated training and federated " -"evaluation. It uses the ``weighted_average`` function to aggregate custom" -" evaluation metrics and calculates a single ``accuracy`` metric across " -"all clients on the server side." +"The `start_server` parameter `force_final_distributed_eval` has long been" +" a historic artefact, in this release it is finally gone for good." msgstr "" -"Nous avons maintenant un système complet qui effectue la formation " -"fédérée et l'évaluation fédérée. Il utilise la fonction ``moyenne " -"pondérée`` pour agréger les mesures d'évaluation personnalisées et " -"calcule une seule mesure de ``précision`` pour tous les clients du côté " -"du serveur." +"Le paramètre `start_server` `force_final_distributed_eval` a longtemps " +"été un artefact historique, dans cette version il a finalement disparu " +"pour de bon." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:699 +#: ../../source/ref-changelog.md:998 msgid "" -"The other two categories of metrics (``losses_centralized`` and " -"``metrics_centralized``) are still empty because they only apply when " -"centralized evaluation is being used. Part two of the Flower tutorial " -"will cover centralized evaluation." +"**Make** `get_parameters` **configurable** " +"([#1242](https://github.com/adap/flower/pull/1242))" msgstr "" -"Les deux autres catégories de mesures (``pertes_centralisées`` et " -"``métriques_centralisées``) sont toujours vides car elles ne s'appliquent" -" que lorsque l'évaluation centralisée est utilisée. La deuxième partie du" -" tutoriel sur les fleurs couvrira l'évaluation centralisée." - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:711 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:351 -msgid "Final remarks" -msgstr "Remarques finales" +"**Make** `get_parameters` **configurable** " +"([#1242](https://github.com/adap/flower/pull/1242))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:713 +#: ../../source/ref-changelog.md:1000 msgid "" -"Congratulations, you just trained a convolutional neural network, " -"federated over 10 clients! With that, you understand the basics of " -"federated learning with Flower. The same approach you've seen can be used" -" with other machine learning frameworks (not just PyTorch) and tasks (not" -" just CIFAR-10 images classification), for example NLP with Hugging Face " -"Transformers or speech with SpeechBrain." +"The `get_parameters` method now accepts a configuration dictionary, just " +"like `get_properties`, `fit`, and `evaluate`." msgstr "" -"Félicitations, tu viens d'entraîner un réseau neuronal convolutif, fédéré" -" sur 10 clients ! Avec ça, tu comprends les bases de l'apprentissage " -"fédéré avec Flower. La même approche que tu as vue peut être utilisée " -"avec d'autres cadres d'apprentissage automatique (pas seulement PyTorch) " -"et d'autres tâches (pas seulement la classification des images CIFAR-10)," -" par exemple le NLP avec Hugging Face Transformers ou la parole avec " -"SpeechBrain." +"La méthode `get_parameters` accepte maintenant un dictionnaire de " +"configuration, tout comme `get_properties`, `fit`, et `evaluate`." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:715 +#: ../../source/ref-changelog.md:1002 msgid "" -"In the next notebook, we're going to cover some more advanced concepts. " -"Want to customize your strategy? Initialize parameters on the server " -"side? Or evaluate the aggregated model on the server side? We'll cover " -"all this and more in the next tutorial." +"**Replace** `num_rounds` **in** `start_simulation` **with new** `config` " +"**parameter** ([#1281](https://github.com/adap/flower/pull/1281))" msgstr "" -"Dans le prochain cahier, nous allons aborder des concepts plus avancés. " -"Tu veux personnaliser ta stratégie ? Initialiser des paramètres côté " -"serveur ? Ou évaluer le modèle agrégé côté serveur ? Nous aborderons tout" -" cela et bien plus encore dans le prochain tutoriel." +"**Remplace** `num_rounds` **dans** `start_simulation` **avec le nouveau**" +" `config` **paramètre** " +"([#1281](https://github.com/adap/flower/pull/1281))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:733 -#, fuzzy +#: ../../source/ref-changelog.md:1004 msgid "" -"The `Flower Federated Learning Tutorial - Part 2 " -"`__ goes into more depth about strategies and all " -"the advanced things you can build with them." +"The `start_simulation` function now accepts a configuration dictionary " +"`config` instead of the `num_rounds` integer. This improves the " +"consistency between `start_simulation` and `start_server` and makes " +"transitioning between the two easier." msgstr "" -"Le `Tutoriel d'apprentissage fédéré Flower - Partie 2 " -"`__ va plus en profondeur sur les stratégies et toutes les " -"choses avancées que tu peux construire avec elles." - -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:9 -#, fuzzy -msgid "Use a federated learning strategy" -msgstr "Stratégie de moyenne fédérée." +"La fonction `start_simulation` accepte maintenant un dictionnaire de " +"configuration `config` au lieu de l'entier `num_rounds`. Cela améliore la" +" cohérence entre `start_simulation` et `start_server` et facilite la " +"transition entre les deux." -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:11 -#, fuzzy +#: ../../source/ref-changelog.md:1008 msgid "" -"Welcome to the next part of the federated learning tutorial. In previous " -"parts of this tutorial, we introduced federated learning with PyTorch and" -" Flower (`part 1 `__)." +"**Support Python 3.10** " +"([#1320](https://github.com/adap/flower/pull/1320))" msgstr "" -"Bienvenue dans la prochaine partie du tutoriel sur l'apprentissage " -"fédéré. Dans les parties précédentes de ce tutoriel, nous avons présenté " -"l'apprentissage fédéré avec PyTorch et Flower (`partie 1 " -"`__)." +"**Support Python 3.10** " +"([#1320](https://github.com/adap/flower/pull/1320))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:13 +#: ../../source/ref-changelog.md:1010 msgid "" -"In this notebook, we'll begin to customize the federated learning system " -"we built in the introductory notebook (again, using `Flower " -"`__ and `PyTorch `__)." +"The previous Flower release introduced experimental support for Python " +"3.10, this release declares Python 3.10 support as stable." msgstr "" -"Dans ce carnet, nous allons commencer à personnaliser le système " -"d'apprentissage fédéré que nous avons construit dans le carnet " -"d'introduction (toujours en utilisant `Flower `__ et " -"`PyTorch `__)." - -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:17 -#, fuzzy -msgid "Let's move beyond FedAvg with Flower strategies!" -msgstr "Dépassons FedAvg avec les stratégies florales !" - -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:309 -msgid "Strategy customization" -msgstr "Personnalisation de la stratégie" +"La version précédente de Flower a introduit la prise en charge " +"expérimentale de Python 3.10, cette version déclare la prise en charge de" +" Python 3.10 comme stable." -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:311 +#: ../../source/ref-changelog.md:1012 msgid "" -"So far, everything should look familiar if you've worked through the " -"introductory notebook. With that, we're ready to introduce a number of " -"new features." +"**Make all** `Client` **and** `NumPyClient` **methods optional** " +"([#1260](https://github.com/adap/flower/pull/1260), " +"[#1277](https://github.com/adap/flower/pull/1277))" msgstr "" -"Jusqu'à présent, tout devrait te sembler familier si tu as travaillé sur " -"le cahier d'introduction. Avec cela, nous sommes prêts à présenter un " -"certain nombre de nouvelles fonctionnalités." - -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:323 -msgid "Server-side parameter **initialization**" -msgstr "Paramètres côté serveur **initialisation**" +"**Rendre toutes les **méthodes `Client` **et** `NumPyClient` " +"**facultatives** ([#1260](https://github.com/adap/flower/pull/1260), " +"[#1277](https://github.com/adap/flower/pull/1277))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:325 +#: ../../source/ref-changelog.md:1014 msgid "" -"Flower, by default, initializes the global model by asking one random " -"client for the initial parameters. In many cases, we want more control " -"over parameter initialization though. Flower therefore allows you to " -"directly pass the initial parameters to the Strategy:" +"The `Client`/`NumPyClient` methods `get_properties`, `get_parameters`, " +"`fit`, and `evaluate` are all optional. This enables writing clients that" +" implement, for example, only `fit`, but no other method. No need to " +"implement `evaluate` when using centralized evaluation!" msgstr "" -"Flower, par défaut, initialise le modèle global en demandant à un client " -"aléatoire les paramètres initiaux. Dans de nombreux cas, nous voulons " -"cependant avoir plus de contrôle sur l'initialisation des paramètres. " -"Flower te permet donc de passer directement les paramètres initiaux à la " -"Stratégie :" +"Les méthodes `Client`/`NumPyClient` `get_properties`, `get_parameters`, " +"`fit`, et `evaluate` sont toutes optionnelles. Cela permet d'écrire des " +"clients qui n'implémentent, par exemple, que `fit`, mais aucune autre " +"méthode. Pas besoin d'implémenter `evaluate` quand on utilise " +"l'évaluation centralisée !" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:370 +#: ../../source/ref-changelog.md:1016 msgid "" -"Passing ``initial_parameters`` to the ``FedAvg`` strategy prevents Flower" -" from asking one of the clients for the initial parameters. If we look " -"closely, we can see that the logs do not show any calls to the " -"``FlowerClient.get_parameters`` method." +"**Enable passing a** `Server` **instance to** `start_simulation` " +"([#1281](https://github.com/adap/flower/pull/1281))" msgstr "" -"Le fait de passer ``initial_parameters`` à la stratégie ``FedAvg`` " -"empêche Flower de demander les paramètres initiaux à l'un des clients. Si" -" nous regardons de près, nous pouvons voir que les journaux ne montrent " -"aucun appel à la méthode ``FlowerClient.get_parameters``." +"**Autoriser le passage d'une **instance `Server` à** `start_simulation` " +"([#1281](https://github.com/adap/flower/pull/1281))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:382 -msgid "Starting with a customized strategy" -msgstr "Commencer par une stratégie personnalisée" +#: ../../source/ref-changelog.md:1018 +msgid "" +"Similar to `start_server`, `start_simulation` now accepts a full `Server`" +" instance. This enables users to heavily customize the execution of " +"eperiments and opens the door to running, for example, async FL using the" +" Virtual Client Engine." +msgstr "" +"Comme pour `start_server`, `start_simulation` accepte maintenant une " +"instance complète de `Server`. Cela permet aux utilisateurs de " +"personnaliser fortement l'exécution des expériences et ouvre la porte à " +"l'exécution, par exemple, de FL asynchrones à l'aide du moteur de client " +"virtuel." -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:384 +#: ../../source/ref-changelog.md:1020 msgid "" -"We've seen the function ``start_simulation`` before. It accepts a number " -"of arguments, amongst them the ``client_fn`` used to create " -"``FlowerClient`` instances, the number of clients to simulate " -"``num_clients``, the number of rounds ``num_rounds``, and the strategy." +"**Update code examples** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" msgstr "" -"Elle accepte un certain nombre d'arguments, parmi lesquels le " -"``client_fn`` utilisé pour créer les instances de ``FlowerClient``, le " -"nombre de clients à simuler ``num_clients``, le nombre de rounds " -"``num_rounds``, et la stratégie." +"**Mettre à jour les exemples de code** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:386 +#: ../../source/ref-changelog.md:1022 msgid "" -"The strategy encapsulates the federated learning approach/algorithm, for " -"example, ``FedAvg`` or ``FedAdagrad``. Let's try to use a different " -"strategy this time:" +"Many code examples received small or even large maintenance updates, " +"among them are" msgstr "" -"La stratégie englobe l'approche/l'algorithme d'apprentissage fédéré, par " -"exemple, ``FedAvg`` ou ``FedAdagrad``. Essayons d'utiliser une stratégie " -"différente cette fois-ci :" +"De nombreux exemples de code ont reçu de petites ou même de grandes mises" +" à jour de maintenance" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:424 -msgid "Server-side parameter **evaluation**" -msgstr "Paramètre côté serveur **évaluation**" +#: ../../source/ref-changelog.md:1024 +msgid "`scikit-learn`" +msgstr "`scikit-learn`" + +#: ../../source/ref-changelog.md:1025 +msgid "`simulation_pytorch`" +msgstr "`simulation_pytorch`" + +#: ../../source/ref-changelog.md:1026 +msgid "`quickstart_pytorch`" +msgstr "`quickstart_pytorch` (démarrage rapide)" + +#: ../../source/ref-changelog.md:1027 +msgid "`quickstart_simulation`" +msgstr "`quickstart_simulation`" + +#: ../../source/ref-changelog.md:1028 +msgid "`quickstart_tensorflow`" +msgstr "`quickstart_tensorflow`" + +#: ../../source/ref-changelog.md:1029 +msgid "`advanced_tensorflow`" +msgstr "`advanced_tensorflow` (en anglais)" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:426 +#: ../../source/ref-changelog.md:1031 msgid "" -"Flower can evaluate the aggregated model on the server-side or on the " -"client-side. Client-side and server-side evaluation are similar in some " -"ways, but different in others." +"**Remove the obsolete simulation example** " +"([#1328](https://github.com/adap/flower/pull/1328))" msgstr "" -"Flower peut évaluer le modèle agrégé côté serveur ou côté client. Les " -"évaluations côté client et côté serveur sont similaires à certains " -"égards, mais différentes à d'autres." +"**Supprime l'exemple de simulation obsolète** " +"([#1328](https://github.com/adap/flower/pull/1328))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:428 +#: ../../source/ref-changelog.md:1033 msgid "" -"**Centralized Evaluation** (or *server-side evaluation*) is conceptually " -"simple: it works the same way that evaluation in centralized machine " -"learning does. If there is a server-side dataset that can be used for " -"evaluation purposes, then that's great. We can evaluate the newly " -"aggregated model after each round of training without having to send the " -"model to clients. We're also fortunate in the sense that our entire " -"evaluation dataset is available at all times." +"Removes the obsolete `simulation` example and renames " +"`quickstart_simulation` to `simulation_tensorflow` so it fits withs the " +"naming of `simulation_pytorch`" msgstr "" -"**L'évaluation centralisée** (ou *évaluation côté serveur*) est " -"conceptuellement simple : elle fonctionne de la même manière que " -"l'évaluation dans l'apprentissage automatique centralisé. S'il existe un " -"ensemble de données côté serveur qui peut être utilisé à des fins " -"d'évaluation, alors c'est parfait. Nous pouvons évaluer le modèle " -"nouvellement agrégé après chaque cycle de formation sans avoir à envoyer " -"le modèle aux clients. Nous avons également la chance que l'ensemble de " -"notre ensemble de données d'évaluation soit disponible à tout moment." +"Supprime l'exemple obsolète `simulation` et renomme " +"`quickstart_simulation` en `simulation_tensorflow` pour qu'il corresponde" +" au nom de `simulation_pytorch`" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:430 +#: ../../source/ref-changelog.md:1035 msgid "" -"**Federated Evaluation** (or *client-side evaluation*) is more complex, " -"but also more powerful: it doesn't require a centralized dataset and " -"allows us to evaluate models over a larger set of data, which often " -"yields more realistic evaluation results. In fact, many scenarios require" -" us to use **Federated Evaluation** if we want to get representative " -"evaluation results at all. But this power comes at a cost: once we start " -"to evaluate on the client side, we should be aware that our evaluation " -"dataset can change over consecutive rounds of learning if those clients " -"are not always available. Moreover, the dataset held by each client can " -"also change over consecutive rounds. This can lead to evaluation results " -"that are not stable, so even if we would not change the model, we'd see " -"our evaluation results fluctuate over consecutive rounds." +"**Update documentation** " +"([#1223](https://github.com/adap/flower/pull/1223), " +"[#1209](https://github.com/adap/flower/pull/1209), " +"[#1251](https://github.com/adap/flower/pull/1251), " +"[#1257](https://github.com/adap/flower/pull/1257), " +"[#1267](https://github.com/adap/flower/pull/1267), " +"[#1268](https://github.com/adap/flower/pull/1268), " +"[#1300](https://github.com/adap/flower/pull/1300), " +"[#1304](https://github.com/adap/flower/pull/1304), " +"[#1305](https://github.com/adap/flower/pull/1305), " +"[#1307](https://github.com/adap/flower/pull/1307))" msgstr "" -"**L'évaluation fédérée** (ou évaluation côté client) est plus complexe, " -"mais aussi plus puissante : elle ne nécessite pas d'ensemble de données " -"centralisé et nous permet d'évaluer les modèles sur un plus grand " -"ensemble de données, ce qui donne souvent des résultats d'évaluation plus" -" réalistes. En fait, de nombreux scénarios exigent que nous utilisions " -"l'évaluation fédérée** si nous voulons obtenir des résultats d'évaluation" -" représentatifs. Mais cette puissance a un coût : une fois que nous " -"commençons à évaluer côté client, nous devons savoir que notre ensemble " -"de données d'évaluation peut changer au cours des cycles d'apprentissage " -"consécutifs si ces clients ne sont pas toujours disponibles. De plus, " -"l'ensemble de données détenu par chaque client peut également changer au " -"cours des cycles consécutifs. Cela peut conduire à des résultats " -"d'évaluation qui ne sont pas stables, donc même si nous ne changions pas " -"le modèle, nous verrions nos résultats d'évaluation fluctuer au cours des" -" cycles consécutifs." +"**Mise à jour de la documentation** " +"([#1223](https://github.com/adap/flower/pull/1223), " +"[#1209](https://github.com/adap/flower/pull/1209), " +"[#1251](https://github.com/adap/flower/pull/1251), " +"[#1257](https://github.com/adap/flower/pull/1257), " +"[#1267](https://github.com/adap/flower/pull/1267), " +"[#1268](https://github.com/adap/flower/pull/1268), " +"[#1300](https://github.com/adap/flower/pull/1300), " +"[#1304](https://github.com/adap/flower/pull/1304), " +"[#1305](https://github.com/adap/flower/pull/1305), " +"[#1307](https://github.com/adap/flower/pull/1307))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:433 +#: ../../source/ref-changelog.md:1037 msgid "" -"We've seen how federated evaluation works on the client side (i.e., by " -"implementing the ``evaluate`` method in ``FlowerClient``). Now let's see " -"how we can evaluate aggregated model parameters on the server-side:" +"One substantial documentation update fixes multiple smaller rendering " +"issues, makes titles more succinct to improve navigation, removes a " +"deprecated library, updates documentation dependencies, includes the " +"`flwr.common` module in the API reference, includes support for markdown-" +"based documentation, migrates the changelog from `.rst` to `.md`, and " +"fixes a number of smaller details!" msgstr "" -"Nous avons vu comment l'évaluation fédérée fonctionne du côté client " -"(c'est-à-dire en implémentant la méthode ``evaluate`` dans " -"``FlowerClient``). Voyons maintenant comment nous pouvons évaluer les " -"paramètres du modèle agrégé du côté serveur :" +"Une mise à jour substantielle de la documentation corrige plusieurs " +"petits problèmes de rendu, rend les titres plus succincts pour améliorer " +"la navigation, supprime une bibliothèque obsolète, met à jour les " +"dépendances de la documentation, inclut le module `flwr.common` dans la " +"référence de l'API, inclut le support de la documentation basée sur le " +"markdown, migre le changelog de `.rst` vers `.md`, et corrige un certain " +"nombre de détails plus petits !" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:490 -msgid "Sending/receiving arbitrary values to/from clients" -msgstr "Envoi/réception de valeurs arbitraires vers/depuis les clients" +#: ../../source/ref-changelog.md:1039 ../../source/ref-changelog.md:1094 +#: ../../source/ref-changelog.md:1163 ../../source/ref-changelog.md:1202 +msgid "**Minor updates**" +msgstr "**Mises à jour mineures**" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:492 -#, fuzzy +#: ../../source/ref-changelog.md:1041 msgid "" -"In some situations, we want to configure client-side execution (training," -" evaluation) from the server-side. One example for that is the server " -"asking the clients to train for a certain number of local epochs. Flower " -"provides a way to send configuration values from the server to the " -"clients using a dictionary. Let's look at an example where the clients " -"receive values from the server through the ``config`` parameter in " -"``fit`` (``config`` is also available in ``evaluate``). The ``fit`` " -"method receives the configuration dictionary through the ``config`` " -"parameter and can then read values from this dictionary. In this example," -" it reads ``server_round`` and ``local_epochs`` and uses those values to " -"improve the logging and configure the number of local training epochs:" +"Add round number to fit and evaluate log messages " +"([#1266](https://github.com/adap/flower/pull/1266))" msgstr "" -"In some situations, we want to configure client-side execution (training," -" evaluation) from the server-side. One example for that is the server " -"asking the clients to train for a certain number of local epochs. Flower " -"provides a way to send configuration values from the server to the " -"clients using a dictionary. Let's look at an example where the clients " -"receive values from the server through the ``config`` parameter in " -"``fit`` (``config`` is also available in ``evaluate``). The ``fit`` " -"method receives the configuration dictionary through the ``config`` " -"parameter and can then read values from this dictionary. In this example," -" it reads ``server_round`` and ``local_epochs`` and uses those values to " -"improve the logging and configure the number of local training epochs:" +"Ajoute un chiffre rond pour ajuster et évaluer les messages du journal " +"([#1266](https://github.com/adap/flower/pull/1266))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:546 +#: ../../source/ref-changelog.md:1042 msgid "" -"So how can we send this config dictionary from server to clients? The " -"built-in Flower Strategies provide way to do this, and it works similarly" -" to the way server-side evaluation works. We provide a function to the " -"strategy, and the strategy calls this function for every round of " -"federated learning:" +"Add secure gRPC connection to the `advanced_tensorflow` code example " +"([#847](https://github.com/adap/flower/pull/847))" msgstr "" -"Comment pouvons-nous donc envoyer ce dictionnaire de configuration du " -"serveur aux clients ? Les stratégies de Flower intégrées fournissent un " -"moyen de le faire, et cela fonctionne de la même façon que l'évaluation " -"côté serveur. Nous fournissons une fonction à la stratégie, et la " -"stratégie appelle cette fonction pour chaque cycle d'apprentissage fédéré" -" :" +"Ajouter une connexion gRPC sécurisée à l'exemple de code " +"`advanced_tensorflow` ([#847](https://github.com/adap/flower/pull/847))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:576 +#: ../../source/ref-changelog.md:1043 msgid "" -"Next, we'll just pass this function to the FedAvg strategy before " -"starting the simulation:" +"Update developer tooling " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" msgstr "" -"Ensuite, nous allons simplement passer cette fonction à la stratégie " -"FedAvg avant de commencer la simulation :" +"Mettre à jour les outils de développement " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:613 +#: ../../source/ref-changelog.md:1044 msgid "" -"As we can see, the client logs now include the current round of federated" -" learning (which they read from the ``config`` dictionary). We can also " -"configure local training to run for one epoch during the first and second" -" round of federated learning, and then for two epochs during the third " -"round." +"Rename ProtoBuf messages to improve consistency " +"([#1214](https://github.com/adap/flower/pull/1214), " +"[#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -"Comme nous pouvons le voir, les journaux des clients incluent maintenant " -"le cycle actuel d'apprentissage fédéré (qu'ils lisent depuis le " -"dictionnaire ``config``). Nous pouvons également configurer " -"l'apprentissage local pour qu'il s'exécute pendant une époque au cours du" -" premier et du deuxième cycle d'apprentissage fédéré, puis pendant deux " -"époques au cours du troisième cycle." +"Renomme les messages ProtoBuf pour améliorer la cohérence " +"([#1214](https://github.com/adap/flower/pull/1214), " +"[#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:615 +#: ../../source/ref-changelog.md:1046 +msgid "v0.19.0 (2022-05-18)" +msgstr "v0.19.0 (2022-05-18)" + +#: ../../source/ref-changelog.md:1050 msgid "" -"Clients can also return arbitrary values to the server. To do so, they " -"return a dictionary from ``fit`` and/or ``evaluate``. We have seen and " -"used this concept throughout this notebook without mentioning it " -"explicitly: our ``FlowerClient`` returns a dictionary containing a custom" -" key/value pair as the third return value in ``evaluate``." +"**Flower Baselines (preview): FedOpt, FedBN, FedAvgM** " +"([#919](https://github.com/adap/flower/pull/919), " +"[#1127](https://github.com/adap/flower/pull/1127), " +"[#914](https://github.com/adap/flower/pull/914))" msgstr "" -"Les clients peuvent également renvoyer des valeurs arbitraires au " -"serveur. Pour ce faire, ils renvoient un dictionnaire depuis ``fit`` " -"et/ou ``evaluate``. Nous avons vu et utilisé ce concept tout au long de " -"ce carnet sans le mentionner explicitement : notre ``FlowerClient`` " -"renvoie un dictionnaire contenant une paire clé/valeur personnalisée en " -"tant que troisième valeur de retour dans ``evaluate``." - -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:627 -msgid "Scaling federated learning" -msgstr "Mise à l'échelle de l'apprentissage fédéré" +"**Flower Baselines (preview) : FedOpt, FedBN, FedAvgM** " +"([#919](https://github.com/adap/flower/pull/919), " +"[#1127](https://github.com/adap/flower/pull/1127), " +"[#914](https://github.com/adap/flower/pull/914))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:629 +#: ../../source/ref-changelog.md:1052 +#, fuzzy msgid "" -"As a last step in this notebook, let's see how we can use Flower to " -"experiment with a large number of clients." +"The first preview release of Flower Baselines has arrived! We're " +"kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " +"FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how " +"to use [Flower Baselines](https://flower.ai/docs/using-baselines.html). " +"With this first preview release we're also inviting the community to " +"[contribute their own baselines](https://flower.ai/docs/baselines/how-to-" +"contribute-baselines.html)." msgstr "" -"Comme dernière étape de ce carnet, voyons comment nous pouvons utiliser " -"Flower pour expérimenter avec un grand nombre de clients." +"La première version préliminaire de Flower Baselines est arrivée ! Nous " +"démarrons Flower Baselines avec des implémentations de FedOpt (FedYogi, " +"FedAdam, FedAdagrad), FedBN, et FedAvgM. Consultez la documentation sur " +"l'utilisation de [Flower Baselines](https://flower.ai/docs/using-" +"baselines.html). Avec cette première version préliminaire, nous invitons " +"également la communauté à [contribuer à leurs propres lignes de " +"base](https://flower.ai/docs/baselines/how-to-contribute-baselines.html)." -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:651 -#, python-format +#: ../../source/ref-changelog.md:1054 msgid "" -"We now have 1000 partitions, each holding 45 training and 5 validation " -"examples. Given that the number of training examples on each client is " -"quite small, we should probably train the model a bit longer, so we " -"configure the clients to perform 3 local training epochs. We should also " -"adjust the fraction of clients selected for training during each round " -"(we don't want all 1000 clients participating in every round), so we " -"adjust ``fraction_fit`` to ``0.05``, which means that only 5% of " -"available clients (so 50 clients) will be selected for training each " -"round:" +"**C++ client SDK (preview) and code example** " +"([#1111](https://github.com/adap/flower/pull/1111))" msgstr "" -"Nous avons maintenant 1000 partitions, chacune contenant 45 exemples " -"d'entraînement et 5 exemples de validation. Etant donné que le nombre " -"d'exemples d'entraînement sur chaque client est assez faible, nous " -"devrions probablement entraîner le modèle un peu plus longtemps, nous " -"configurons donc les clients pour qu'ils effectuent 3 époques " -"d'entraînement local. Nous devrions également ajuster la fraction de " -"clients sélectionnés pour l'entraînement à chaque tour (nous ne voulons " -"pas que les 1000 clients participent à chaque tour), nous ajustons donc " -"``fraction_fit`` à ``0.05``, ce qui signifie que seulement 5% des clients" -" disponibles (donc 50 clients) seront sélectionnés pour l'entraînement à " -"chaque tour :" +"**SDK client C++ (aperçu) et exemple de code** " +"([#1111](https://github.com/adap/flower/pull/1111))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:699 +#: ../../source/ref-changelog.md:1056 msgid "" -"In this notebook, we've seen how we can gradually enhance our system by " -"customizing the strategy, initializing parameters on the server side, " -"choosing a different strategy, and evaluating models on the server-side. " -"That's quite a bit of flexibility with so little code, right?" +"Preview support for Flower clients written in C++. The C++ preview " +"includes a Flower client SDK and a quickstart code example that " +"demonstrates a simple C++ client using the SDK." msgstr "" -"Dans ce carnet, nous avons vu comment nous pouvons progressivement " -"améliorer notre système en personnalisant la stratégie, en initialisant " -"les paramètres côté serveur, en choisissant une stratégie différente et " -"en évaluant les modèles côté serveur. C'est une sacrée flexibilité avec " -"si peu de code, n'est-ce pas ?" +"L'aperçu C++ comprend un SDK pour les clients Flower et un exemple de " +"code de démarrage rapide qui démontre un client C++ simple utilisant le " +"SDK." -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:701 +#: ../../source/ref-changelog.md:1058 msgid "" -"In the later sections, we've seen how we can communicate arbitrary values" -" between server and clients to fully customize client-side execution. " -"With that capability, we built a large-scale Federated Learning " -"simulation using the Flower Virtual Client Engine and ran an experiment " -"involving 1000 clients in the same workload - all in a Jupyter Notebook!" +"**Add experimental support for Python 3.10 and Python 3.11** " +"([#1135](https://github.com/adap/flower/pull/1135))" msgstr "" -"Dans les sections ultérieures, nous avons vu comment nous pouvons " -"communiquer des valeurs arbitraires entre le serveur et les clients pour " -"personnaliser entièrement l'exécution côté client. Grâce à cette " -"capacité, nous avons construit une simulation d'apprentissage fédéré à " -"grande échelle en utilisant le moteur de client virtuel Flower et nous " -"avons mené une expérience impliquant 1000 clients dans la même charge de " -"travail - le tout dans un carnet Jupyter !" +"**Ajouter la prise en charge expérimentale de Python 3.10 et Python " +"3.11** ([#1135](https://github.com/adap/flower/pull/1135))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:719 -#, fuzzy +#: ../../source/ref-changelog.md:1060 msgid "" -"The `Flower Federated Learning Tutorial - Part 3 " -"`__ shows how to build a fully custom ``Strategy`` from " -"scratch." +"Python 3.10 is the latest stable release of Python and Python 3.11 is due" +" to be released in October. This Flower release adds experimental support" +" for both Python versions." msgstr "" -"Le `Tutoriel d'apprentissage fédéré Flower - Partie 3 [WIP] " -"`__ montre comment construire une ``Stratégie`` entièrement " -"personnalisée à partir de zéro." - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:9 -msgid "What is Federated Learning?" -msgstr "Qu'est-ce que l'apprentissage fédéré ?" +"Python 3.10 est la dernière version stable de Python et Python 3.11 " +"devrait sortir en octobre. Cette version de Flower ajoute une prise en " +"charge expérimentale pour les deux versions de Python." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:13 -#, fuzzy +#: ../../source/ref-changelog.md:1062 msgid "" -"In this tutorial, you will learn what federated learning is, build your " -"first system in Flower, and gradually extend it. If you work through all " -"parts of the tutorial, you will be able to build advanced federated " -"learning systems that approach the current state of the art in the field." +"**Aggregate custom metrics through user-provided functions** " +"([#1144](https://github.com/adap/flower/pull/1144))" msgstr "" -"Dans ce tutoriel, tu apprendras ce qu'est l'apprentissage fédéré, tu " -"construiras ton premier système dans Flower, et tu l'étendras " -"progressivement. Si tu travailles sur toutes les parties du tutoriel, tu " -"seras capable de construire des systèmes d'apprentissage fédéré avancés " -"qui se rapprochent de l'état actuel de l'art dans le domaine." +"**Agréger des mesures personnalisées grâce à des fonctions fournies par " +"l'utilisateur** ([#1144](https://github.com/adap/flower/pull/1144))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:15 +#: ../../source/ref-changelog.md:1064 msgid "" -"🧑‍🏫 This tutorial starts at zero and expects no familiarity with " -"federated learning. Only a basic understanding of data science and Python" -" programming is assumed." +"Custom metrics (e.g., `accuracy`) can now be aggregated without having to" +" customize the strategy. Built-in strategies support two new arguments, " +"`fit_metrics_aggregation_fn` and `evaluate_metrics_aggregation_fn`, that " +"allow passing custom metric aggregation functions." msgstr "" -"🧑‍🏫 Ce tutoriel part de zéro et n'attend aucune familiarité avec " -"l'apprentissage fédéré. Seule une compréhension de base de la science des" -" données et de la programmation Python est supposée." +"Les stratégies intégrées prennent en charge deux nouveaux arguments, " +"`fit_metrics_aggregation_fn` et `evaluate_metrics_aggregation_fn`, qui " +"permettent de passer des fonctions d'agrégation de métriques " +"personnalisées." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:17 -#, fuzzy +#: ../../source/ref-changelog.md:1066 msgid "" -"`Star Flower on GitHub `__ ⭐️ and join " -"the open-source Flower community on Slack to connect, ask questions, and " -"get help: `Join Slack `__ 🌼 We'd love to " -"hear from you in the ``#introductions`` channel! And if anything is " -"unclear, head over to the ``#questions`` channel." +"**User-configurable round timeout** " +"([#1162](https://github.com/adap/flower/pull/1162))" msgstr "" -"`Star Flower on GitHub `__ ⭐️ et " -"rejoignez la communauté Flower sur Slack pour vous connecter, poser des " -"questions et obtenir de l'aide : `Join Slack `__ 🌼 Nous serions ravis d'avoir de vos nouvelles dans le canal " -"``#introductions`` ! Et si quelque chose n'est pas clair, rendez-vous sur" -" le canal ``#questions``." - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:31 -msgid "Classic machine learning" -msgstr "Apprentissage automatique classique" +"**Temps d'attente configurable par l'utilisateur** " +"([#1162](https://github.com/adap/flower/pull/1162))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:33 +#: ../../source/ref-changelog.md:1068 msgid "" -"Before we begin to discuss federated learning, let us quickly recap how " -"most machine learning works today." +"A new configuration value allows the round timeout to be set for " +"`start_server` and `start_simulation`. If the `config` dictionary " +"contains a `round_timeout` key (with a `float` value in seconds), the " +"server will wait *at least* `round_timeout` seconds before it closes the " +"connection." msgstr "" -"Avant de commencer à discuter de l'apprentissage fédéré, récapitulons " -"rapidement la façon dont la plupart des apprentissages automatiques " -"fonctionnent aujourd'hui." +"Si le dictionnaire `config` contient une clé `round_timeout` (avec une " +"valeur `float` en secondes), le serveur attendra *au moins* " +"`round_timeout` secondes avant de fermer la connexion." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:35 +#: ../../source/ref-changelog.md:1070 msgid "" -"In machine learning, we have a model, and we have data. The model could " -"be a neural network (as depicted here), or something else, like classical" -" linear regression." +"**Enable both federated evaluation and centralized evaluation to be used " +"at the same time in all built-in strategies** " +"([#1091](https://github.com/adap/flower/pull/1091))" msgstr "" -"Dans l'apprentissage automatique, nous avons un modèle et des données. Le" -" modèle peut être un réseau neuronal (comme illustré ici), ou quelque " -"chose d'autre, comme la régression linéaire classique." +"**Permettre l'utilisation simultanée de l'évaluation fédérée et de " +"l'évaluation centralisée dans toutes les stratégies intégrées** " +"([#1091](https://github.com/adap/flower/pull/1091))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 -msgid "|d8bf04f23d9b46d8a23cc6f4887d7873|" +#: ../../source/ref-changelog.md:1072 +msgid "" +"Built-in strategies can now perform both federated evaluation (i.e., " +"client-side) and centralized evaluation (i.e., server-side) in the same " +"round. Federated evaluation can be disabled by setting `fraction_eval` to" +" `0.0`." msgstr "" +"Les stratégies intégrées peuvent maintenant effectuer une évaluation " +"fédérée (c'est-à-dire côté client) et une évaluation centralisée " +"(c'est-à-dire côté serveur) dans le même tour. L'évaluation fédérée peut " +"être désactivée en réglant `fraction_eval` sur `0.0`." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 -msgid "Model and data" -msgstr "Modèle et données" +#: ../../source/ref-changelog.md:1074 +msgid "" +"**Two new Jupyter Notebook tutorials** " +"([#1141](https://github.com/adap/flower/pull/1141))" +msgstr "" +"**Deux nouveaux tutoriels Jupyter Notebook** " +"([#1141](https://github.com/adap/flower/pull/1141))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:47 +#: ../../source/ref-changelog.md:1076 msgid "" -"We train the model using the data to perform a useful task. A task could " -"be to detect objects in images, transcribe an audio recording, or play a " -"game like Go." +"Two Jupyter Notebook tutorials (compatible with Google Colab) explain " +"basic and intermediate Flower features:" msgstr "" -"Nous entraînons le modèle en utilisant les données pour effectuer une " -"tâche utile. Une tâche peut consister à détecter des objets dans des " -"images, à transcrire un enregistrement audio ou à jouer à un jeu comme le" -" Go." +"Deux tutoriels Jupyter Notebook (compatibles avec Google Colab) " +"expliquent les fonctionnalités de base et intermédiaires de Flower :" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 -msgid "|5aa1711387d74d0f8b9c499e1a51627e|" +#: ../../source/ref-changelog.md:1078 +msgid "" +"*An Introduction to Federated Learning*: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" +"-Intro-to-FL-PyTorch.ipynb)" msgstr "" +"*Introduction à l'apprentissage fédéré* : [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" +"-Intro-to-FL-PyTorch.ipynb)" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 -msgid "Train model using data" -msgstr "Entraîne le modèle à l'aide des données" +#: ../../source/ref-changelog.md:1080 +msgid "" +"*Using Strategies in Federated Learning*: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" +"-Strategies-in-FL-PyTorch.ipynb)" +msgstr "" +"*Utiliser des stratégies dans l'apprentissage fédéré* : [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" +"-Strategies-in-FL-PyTorch.ipynb)" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:59 -#, fuzzy +#: ../../source/ref-changelog.md:1082 msgid "" -"Now, in practice, the training data we work with doesn't originate on the" -" machine we train the model on. It gets created somewhere else." +"**New FedAvgM strategy (Federated Averaging with Server Momentum)** " +"([#1076](https://github.com/adap/flower/pull/1076))" msgstr "" -"Dans la pratique, les données d'entraînement avec lesquelles nous " -"travaillons ne proviennent pas de la machine sur laquelle nous entraînons" -" le modèle. Elles sont créées ailleurs." +"**Nouvelle stratégie FedAvgM (Federated Averaging with Server Momentum)**" +" ([#1076](https://github.com/adap/flower/pull/1076))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:61 +#: ../../source/ref-changelog.md:1084 #, fuzzy msgid "" -"It originates on a smartphone by the user interacting with an app, a car " -"collecting sensor data, a laptop receiving input via the keyboard, or a " -"smart speaker listening to someone trying to sing a song." +"The new `FedAvgM` strategy implements Federated Averaging with Server " +"Momentum \\[Hsu et al., 2019\\]." msgstr "" -"Elle prend naissance sur un smartphone par l'interaction de l'utilisateur" -" avec une application, une voiture qui collecte des données de capteurs, " -"un ordinateur portable qui reçoit des entrées via le clavier, ou un haut-" -"parleur intelligent qui écoute quelqu'un qui essaie de chanter une " -"chanson." +"La nouvelle stratégie `FedAvgM` met en œuvre la moyenne fédérée avec le " +"momentum du serveur [Hsu et al., 2019]." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 -msgid "|2bc8e069228d4873804061ff4a95048c|" +#: ../../source/ref-changelog.md:1086 +msgid "" +"**New advanced PyTorch code example** " +"([#1007](https://github.com/adap/flower/pull/1007))" msgstr "" +"**Nouvel exemple de code PyTorch avancé** " +"([#1007](https://github.com/adap/flower/pull/1007))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 -msgid "Data on a phone" -msgstr "Données sur un téléphone" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:73 +#: ../../source/ref-changelog.md:1088 msgid "" -"What's also important to mention, this \"somewhere else\" is usually not " -"just one place, it's many places. It could be several devices all running" -" the same app. But it could also be several organizations, all generating" -" data for the same task." +"A new code example (`advanced_pytorch`) demonstrates advanced Flower " +"concepts with PyTorch." msgstr "" -"Il est également important de mentionner que cet \"ailleurs\" n'est " -"généralement pas un seul endroit, mais plusieurs. Il peut s'agir de " -"plusieurs appareils fonctionnant tous avec la même application. Mais il " -"peut également s'agir de plusieurs organisations, qui génèrent toutes des" -" données pour la même tâche." +"Un nouvel exemple de code (`advanced_pytorch`) démontre des concepts de " +"fleur avancés avec PyTorch." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 -msgid "|c258488766324dc9a6807f0e7c4fd5f4|" +#: ../../source/ref-changelog.md:1090 +msgid "" +"**New JAX code example** " +"([#906](https://github.com/adap/flower/pull/906), " +"[#1143](https://github.com/adap/flower/pull/1143))" msgstr "" +"**Nouvel exemple de code JAX** " +"([#906](https://github.com/adap/flower/pull/906), " +"[#1143](https://github.com/adap/flower/pull/1143))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 -msgid "Data is on many devices" -msgstr "Les données se trouvent sur de nombreux appareils" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:85 +#: ../../source/ref-changelog.md:1092 msgid "" -"So to use machine learning, or any kind of data analysis, the approach " -"that has been used in the past was to collect all data on a central " -"server. This server can be somewhere in a data center, or somewhere in " -"the cloud." +"A new code example (`jax_from_centralized_to_federated`) shows federated " +"learning with JAX and Flower." msgstr "" -"Ainsi, pour utiliser l'apprentissage automatique, ou tout autre type " -"d'analyse de données, l'approche utilisée par le passé consistait à " -"collecter toutes les données sur un serveur central. Ce serveur peut se " -"trouver quelque part dans un centre de données, ou quelque part dans le " -"cloud." +"Un nouvel exemple de code (`jax_from_centralized_to_federated`) montre " +"l'apprentissage fédéré avec JAX et Flower." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 -msgid "|d5f962c3f4ec48529efda980868c14b0|" +#: ../../source/ref-changelog.md:1096 +msgid "" +"New option to keep Ray running if Ray was already initialized in " +"`start_simulation` ([#1177](https://github.com/adap/flower/pull/1177))" msgstr "" +"Nouvelle option pour continuer à faire fonctionner Ray si Ray a déjà été " +"initialisé dans `start_simulation` " +"([#1177](https://github.com/adap/flower/pull/1177))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 -msgid "Central data collection" -msgstr "Collecte centralisée des données" +#: ../../source/ref-changelog.md:1097 +msgid "" +"Add support for custom `ClientManager` as a `start_simulation` parameter " +"([#1171](https://github.com/adap/flower/pull/1171))" +msgstr "" +"Ajout de la prise en charge d'un `ClientManager` personnalisé comme " +"paramètre de `start_simulation` " +"([#1171](https://github.com/adap/flower/pull/1171))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:97 +#: ../../source/ref-changelog.md:1098 #, fuzzy msgid "" -"Once all the data is collected in one place, we can finally use machine " -"learning algorithms to train our model on the data. This is the machine " -"learning approach that we've basically always relied on." +"New documentation for [implementing " +"strategies](https://flower.ai/docs/framework/how-to-implement-" +"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " +"[#1175](https://github.com/adap/flower/pull/1175))" msgstr "" -"Une fois que toutes les données sont rassemblées en un seul endroit, nous" -" pouvons enfin utiliser des algorithmes d'apprentissage automatique pour " -"entraîner notre modèle sur les données. C'est l'approche d'apprentissage " -"automatique sur laquelle nous nous sommes fondamentalement toujours " -"appuyés." +"Nouvelle documentation pour [mettre en œuvre des " +"stratégies](https://flower.ai/docs/framework/how-to-implement-" +"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " +"[#1175](https://github.com/adap/flower/pull/1175))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 -msgid "|a5eccea18d4c43a68b54b65043cabef8|" +#: ../../source/ref-changelog.md:1099 +msgid "" +"New mobile-friendly documentation theme " +"([#1174](https://github.com/adap/flower/pull/1174))" msgstr "" +"Nouveau thème de documentation adapté aux mobiles " +"([#1174](https://github.com/adap/flower/pull/1174))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 -msgid "Central model training" -msgstr "Formation au modèle central" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:130 -msgid "Challenges of classical machine learning" -msgstr "Les défis de l'apprentissage automatique classique" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:132 +#: ../../source/ref-changelog.md:1100 msgid "" -"The classic machine learning approach we've just seen can be used in some" -" cases. Great examples include categorizing holiday photos, or analyzing " -"web traffic. Cases, where all the data is naturally available on a " -"centralized server." +"Limit version range for (optional) `ray` dependency to include only " +"compatible releases (`>=1.9.2,<1.12.0`) " +"([#1205](https://github.com/adap/flower/pull/1205))" msgstr "" -"L'approche classique de l'apprentissage automatique que nous venons de " -"voir peut être utilisée dans certains cas. Parmi les grands exemples, on " -"peut citer la catégorisation des photos de vacances, ou l'analyse du " -"trafic web. Des cas, où toutes les données sont naturellement disponibles" -" sur un serveur centralisé." +"Limite la plage de versions pour la dépendance (optionnelle) `ray` pour " +"n'inclure que les versions compatibles (`>=1.9.2,<1.12.0`) " +"([#1205](https://github.com/adap/flower/pull/1205))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 -msgid "|f17662f7df2d42f68cac70a1fdeda8a7|" +#: ../../source/ref-changelog.md:1104 +msgid "" +"**Remove deprecated support for Python 3.6** " +"([#871](https://github.com/adap/flower/pull/871))" msgstr "" +"**Supprime la prise en charge obsolète de Python 3.6** " +"([#871](https://github.com/adap/flower/pull/871))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 -msgid "Centralized possible" -msgstr "Possibilité de centralisation" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:144 +#: ../../source/ref-changelog.md:1105 msgid "" -"But the approach can not be used in many other cases. Cases, where the " -"data is not available on a centralized server, or cases where the data " -"available on one server is not enough to train a good model." +"**Remove deprecated KerasClient** " +"([#857](https://github.com/adap/flower/pull/857))" msgstr "" -"Mais cette approche ne peut pas être utilisée dans de nombreux autres cas" -" : lorsque les données ne sont pas disponibles sur un serveur centralisé," -" ou lorsque les données disponibles sur un serveur ne sont pas " -"suffisantes pour former un bon modèle." +"**Supprimez KerasClient** " +"([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 -msgid "|241fc906441a4f038c625a19d30d01b2|" +#: ../../source/ref-changelog.md:1106 +msgid "" +"**Remove deprecated no-op extra installs** " +"([#973](https://github.com/adap/flower/pull/973))" msgstr "" +"**Supprimer les installations supplémentaires no-op dépréciées** " +"([#973](https://github.com/adap/flower/pull/973))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 -msgid "Centralized impossible" -msgstr "Impossible de centraliser" +#: ../../source/ref-changelog.md:1107 +msgid "" +"**Remove deprecated proto fields from** `FitRes` **and** `EvaluateRes` " +"([#869](https://github.com/adap/flower/pull/869))" +msgstr "" +"**Supprimez les champs proto obsolètes de** `FitRes` **et** `EvaluateRes`" +" ([#869](https://github.com/adap/flower/pull/869))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:156 -#, fuzzy +#: ../../source/ref-changelog.md:1108 msgid "" -"There are many reasons why the classic centralized machine learning " -"approach does not work for a large number of highly important real-world " -"use cases. Those reasons include:" +"**Remove deprecated QffedAvg strategy (replaced by QFedAvg)** " +"([#1107](https://github.com/adap/flower/pull/1107))" msgstr "" -"Il existe de nombreuses raisons pour lesquelles l'approche classique " -"centralisée de l'apprentissage automatique ne fonctionne pas pour un " -"grand nombre de cas d'utilisation très importants dans le monde réel, " -"notamment :" +"**Supprime la stratégie QffedAvg (remplacée par QFedAvg)** " +"([#1107](https://github.com/adap/flower/pull/1107))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:158 -#, fuzzy +#: ../../source/ref-changelog.md:1109 msgid "" -"**Regulations**: GDPR (Europe), CCPA (California), PIPEDA (Canada), LGPD " -"(Brazil), PDPL (Argentina), KVKK (Turkey), POPI (South Africa), FSS " -"(Russia), CDPR (China), PDPB (India), PIPA (Korea), APPI (Japan), PDP " -"(Indonesia), PDPA (Singapore), APP (Australia), and other regulations " -"protect sensitive data from being moved. In fact, those regulations " -"sometimes even prevent single organizations from combining their own " -"users' data for artificial intelligence training because those users live" -" in different parts of the world, and their data is governed by different" -" data protection regulations." +"**Remove deprecated DefaultStrategy strategy** " +"([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" -"**Réglementations** : GDPR (Europe), CCPA (Californie), PIPEDA (Canada), " -"LGPD (Brésil), PDPL (Argentine), KVKK (Turquie), POPI (Afrique du Sud), " -"FSS (Russie), CDPR (Chine), PDPB (Inde), PIPA (Corée), APPI (Japon), PDP " -"(Indonésie), PDPA (Singapour), APP (Australie), et d'autres " -"réglementations protègent les données sensibles contre le déplacement. En" -" fait, ces réglementations empêchent même parfois des organisations " -"individuelles de combiner les données de leurs propres utilisateurs pour " -"la formation à l'intelligence artificielle parce que ces utilisateurs " -"vivent dans différentes parties du monde, et que leurs données sont " -"régies par des réglementations différentes en matière de protection des " -"données." +"**Supprime la stratégie DefaultStrategy qui est obsolète** " +"([#1142](https://github.com/adap/flower/pull/1142))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:160 +#: ../../source/ref-changelog.md:1110 msgid "" -"**User preference**: In addition to regulation, there are use cases where" -" users just expect that no data leaves their device, ever. If you type " -"your passwords and credit card info into the digital keyboard of your " -"phone, you don't expect those passwords to end up on the server of the " -"company that developed that keyboard, do you? In fact, that use case was " -"the reason federated learning was invented in the first place." +"**Remove deprecated support for eval_fn accuracy return value** " +"([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" -"**Préférence de l'utilisateur** : En plus de la réglementation, il existe" -" des cas d'utilisation où les utilisateurs s'attendent tout simplement à " -"ce qu'aucune donnée ne quitte leur appareil, jamais. Si tu tapes tes mots" -" de passe et tes informations de carte de crédit sur le clavier numérique" -" de ton téléphone, tu ne t'attends pas à ce que ces mots de passe " -"finissent sur le serveur de l'entreprise qui a développé ce clavier, n" -"'est-ce pas ? En fait, ce cas d'utilisation est la raison pour laquelle " -"l'apprentissage fédéré a été inventé en premier lieu." +"**Supprimer la prise en charge obsolète de la valeur de retour de la " +"précision eval_fn** ([#1142](https://github.com/adap/flower/pull/1142))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:161 -#, fuzzy +#: ../../source/ref-changelog.md:1111 msgid "" -"**Data volume**: Some sensors, like cameras, produce such a high data " -"volume that it is neither feasible nor economic to collect all the data " -"(due to, for example, bandwidth or communication efficiency). Think about" -" a national rail service with hundreds of train stations across the " -"country. If each of these train stations is outfitted with a number of " -"security cameras, the volume of raw on-device data they produce requires " -"incredibly powerful and exceedingly expensive infrastructure to process " -"and store. And most of the data isn't even useful." +"**Remove deprecated support for passing initial parameters as NumPy " +"ndarrays** ([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" -"**volume de données** : certains capteurs, comme les caméras, produisent " -"un volume de données si important qu'il n'est ni possible ni économique " -"de collecter toutes les données (en raison, par exemple, de la bande " -"passante ou de l'efficacité des communications). Pensez à un service " -"ferroviaire national comptant des centaines de gares à travers le pays. " -"Si chacune de ces gares est équipée d'un certain nombre de caméras de " -"sécurité, le volume de données brutes sur les appareils qu'elles " -"produisent nécessite une infrastructure incroyablement puissante et " -"excessivement coûteuse pour les traiter et les stocker. Et la plupart de " -"ces données ne sont même pas utiles." +"**Supprime la prise en charge obsolète du passage des paramètres initiaux" +" en tant que ndarrays NumPy** " +"([#1142](https://github.com/adap/flower/pull/1142))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:164 -msgid "Examples where centralized machine learning does not work include:" -msgstr "" -"Voici quelques exemples où l'apprentissage automatique centralisé ne " -"fonctionne pas :" +#: ../../source/ref-changelog.md:1113 +msgid "v0.18.0 (2022-02-28)" +msgstr "v0.18.0 (2022-02-28)" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:166 -#, fuzzy +#: ../../source/ref-changelog.md:1117 msgid "" -"Sensitive healthcare records from multiple hospitals to train cancer " -"detection models" +"**Improved Virtual Client Engine compatibility with Jupyter Notebook / " +"Google Colab** ([#866](https://github.com/adap/flower/pull/866), " +"[#872](https://github.com/adap/flower/pull/872), " +"[#833](https://github.com/adap/flower/pull/833), " +"[#1036](https://github.com/adap/flower/pull/1036))" msgstr "" -"Des dossiers médicaux sensibles provenant de plusieurs hôpitaux pour " -"former des modèles de détection du cancer" +"**Amélioration de la compatibilité du moteur de client virtuel avec " +"Jupyter Notebook / Google Colab** " +"([#866](https://github.com/adap/flower/pull/866), " +"[#872](https://github.com/adap/flower/pull/872), " +"[#833](https://github.com/adap/flower/pull/833), " +"[#1036](https://github.com/adap/flower/pull/1036))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:167 +#: ../../source/ref-changelog.md:1119 msgid "" -"Financial information from different organizations to detect financial " -"fraud" -msgstr "" -"Informations financières provenant de différentes organisations pour " -"détecter les fraudes financières" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:168 -msgid "Location data from your electric car to make better range prediction" +"Simulations (using the Virtual Client Engine through `start_simulation`) " +"now work more smoothly on Jupyter Notebooks (incl. Google Colab) after " +"installing Flower with the `simulation` extra (`pip install " +"'flwr[simulation]'`)." msgstr "" -"Les données de localisation de ta voiture électrique pour mieux prédire " -"l'autonomie" +"Les simulations (utilisant le moteur de client virtuel via " +"`start_simulation`) fonctionnent maintenant plus facilement sur les " +"Notebooks Jupyter (y compris Google Colab) après avoir installé Flower " +"avec l'option `simulation` (`pip install 'flwr[simulation]'`)." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:169 -msgid "End-to-end encrypted messages to train better auto-complete models" +#: ../../source/ref-changelog.md:1121 +msgid "" +"**New Jupyter Notebook code example** " +"([#833](https://github.com/adap/flower/pull/833))" msgstr "" -"Messages cryptés de bout en bout pour former de meilleurs modèles " -"d'autocomplétion" +"**Nouvel exemple de code Jupyter Notebook** " +"([#833](https://github.com/adap/flower/pull/833))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:171 -#, fuzzy +#: ../../source/ref-changelog.md:1123 msgid "" -"The popularity of privacy-enhancing systems like the `Brave " -"`__ browser or the `Signal `__ " -"messenger shows that users care about privacy. In fact, they choose the " -"privacy-enhancing version over other alternatives, if such an alternative" -" exists. But what can we do to apply machine learning and data science to" -" these cases to utilize private data? After all, these are all areas that" -" would benefit significantly from recent advances in AI." +"A new code example (`quickstart_simulation`) demonstrates Flower " +"simulations using the Virtual Client Engine through Jupyter Notebook " +"(incl. Google Colab)." msgstr "" -"La popularité des systèmes améliorant la confidentialité comme le " -"navigateur `Brave `__ ou le messager `Signal " -"`__ montre que les utilisateurs se soucient de la " -"confidentialité. En fait, ils choisissent la version améliorant la " -"confidentialité plutôt que d'autres alternatives, si une telle " -"alternative existe. Mais que pouvons-nous faire pour appliquer " -"l'apprentissage automatique et la science des données à ces cas afin " -"d'utiliser les données privées ? Après tout, ce sont tous des domaines " -"qui bénéficieraient de manière significative des récentes avancées en " -"matière d'IA." - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:186 -msgid "Federated learning" -msgstr "Apprentissage fédéré" +"Un nouvel exemple de code (`quickstart_simulation`) démontre des " +"simulations de Flower en utilisant le moteur de client virtuel via " +"Jupyter Notebook (y compris Google Colab)." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:188 +#: ../../source/ref-changelog.md:1125 msgid "" -"Federated learning simply reverses this approach. It enables machine " -"learning on distributed data by moving the training to the data, instead " -"of moving the data to the training. Here's the single-sentence " -"explanation:" +"**Client properties (feature preview)** " +"([#795](https://github.com/adap/flower/pull/795))" msgstr "" -"L'apprentissage fédéré inverse simplement cette approche. Il permet " -"l'apprentissage automatique sur des données distribuées en déplaçant la " -"formation vers les données, au lieu de déplacer les données vers la " -"formation. Voici l'explication en une seule phrase :" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:190 -msgid "Central machine learning: move the data to the computation" -msgstr "Apprentissage automatique central : déplace les données vers le calcul" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:191 -msgid "Federated (machine) learning: move the computation to the data" -msgstr "Apprentissage (machine) fédéré : déplacer le calcul vers les données" +"**Propriétés du client (aperçu des fonctionnalités)** " +"([#795](https://github.com/adap/flower/pull/795))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:193 +#: ../../source/ref-changelog.md:1127 msgid "" -"By doing so, it enables us to use machine learning (and other data " -"science approaches) in areas where it wasn't possible before. We can now " -"train excellent medical AI models by enabling different hospitals to work" -" together. We can solve financial fraud by training AI models on the data" -" of different financial institutions. We can build novel privacy-" -"enhancing applications (such as secure messaging) that have better built-" -"in AI than their non-privacy-enhancing alternatives. And those are just a" -" few of the examples that come to mind. As we deploy federated learning, " -"we discover more and more areas that can suddenly be reinvented because " -"they now have access to vast amounts of previously inaccessible data." +"Clients can implement a new method `get_properties` to enable server-side" +" strategies to query client properties." msgstr "" -"Ce faisant, il nous permet d'utiliser l'apprentissage automatique (et " -"d'autres approches de science des données) dans des domaines où cela " -"n'était pas possible auparavant. Nous pouvons désormais former " -"d'excellents modèles d'IA médicale en permettant à différents hôpitaux de" -" travailler ensemble. Nous pouvons résoudre les fraudes financières en " -"formant des modèles d'IA sur les données de différentes institutions " -"financières. Nous pouvons créer de nouvelles applications d'amélioration " -"de la confidentialité (telles que la messagerie sécurisée) qui ont une " -"meilleure IA intégrée que leurs alternatives d'amélioration de la " -"confidentialité. Et ce ne sont là que quelques exemples qui me viennent à" -" l'esprit. Au fur et à mesure que nous déployons l'apprentissage fédéré, " -"nous découvrons de plus en plus de domaines qui peuvent soudainement être" -" réinventés parce qu'ils ont maintenant accès à de vastes quantités de " -"données auparavant inaccessibles." +"Les clients peuvent implémenter une nouvelle méthode `get_properties` " +"pour permettre aux stratégies côté serveur d'interroger les propriétés du" +" client." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:196 +#: ../../source/ref-changelog.md:1129 msgid "" -"So how does federated learning work, exactly? Let's start with an " -"intuitive explanation." +"**Experimental Android support with TFLite** " +"([#865](https://github.com/adap/flower/pull/865))" msgstr "" -"Comment fonctionne l'apprentissage fédéré ? Commençons par une " -"explication intuitive." - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:199 -msgid "Federated learning in five steps" -msgstr "L'apprentissage fédéré en cinq étapes" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:202 -msgid "Step 0: Initialize global model" -msgstr "Étape 0 : Initialisation du modèle global" +"**Support expérimental d'Android avec TFLite** " +"([#865](https://github.com/adap/flower/pull/865))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:204 +#: ../../source/ref-changelog.md:1131 msgid "" -"We start by initializing the model on the server. This is exactly the " -"same in classic centralized learning: we initialize the model parameters," -" either randomly or from a previously saved checkpoint." +"Android support has finally arrived in `main`! Flower is both client-" +"agnostic and framework-agnostic by design. One can integrate arbitrary " +"client platforms and with this release, using Flower on Android has " +"become a lot easier." msgstr "" -"Nous commençons par initialiser le modèle sur le serveur. C'est " -"exactement la même chose dans l'apprentissage centralisé classique : nous" -" initialisons les paramètres du modèle, soit de façon aléatoire, soit à " -"partir d'un point de contrôle précédemment sauvegardé." +"La prise en charge d'Android est enfin arrivée dans `main` ! Flower est à" +" la fois agnostique au niveau du client et du cadre de travail. On peut " +"intégrer des plates-formes client arbitraires et avec cette version, " +"l'utilisation de Flower sur Android est devenue beaucoup plus facile." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 -msgid "|0aa5aa05810b44b6a835cecce28f3137|" +#: ../../source/ref-changelog.md:1133 +msgid "" +"The example uses TFLite on the client side, along with a new " +"`FedAvgAndroid` strategy. The Android client and `FedAvgAndroid` are " +"still experimental, but they are a first step towards a fully-fledged " +"Android SDK and a unified `FedAvg` implementation that integrated the new" +" functionality from `FedAvgAndroid`." msgstr "" +"L'exemple utilise TFLite du côté client, ainsi qu'une nouvelle stratégie " +"`FedAvgAndroid`. Le client Android et `FedAvgAndroid` sont encore " +"expérimentaux, mais ils constituent un premier pas vers un SDK Android à " +"part entière et une implémentation unifiée de `FedAvg` intégrant la " +"nouvelle fonctionnalité de `FedAvgAndroid`." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 -msgid "Initialize global model" -msgstr "Initialise le modèle global" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:217 +#: ../../source/ref-changelog.md:1135 msgid "" -"Step 1: Send model to a number of connected organizations/devices (client" -" nodes)" +"**Make gRPC keepalive time user-configurable and decrease default " +"keepalive time** ([#1069](https://github.com/adap/flower/pull/1069))" msgstr "" -"Étape 1 : envoyer le modèle à un certain nombre d'organisations/appareils" -" connectés (nœuds clients)" +"**Rendre le temps de garde gRPC configurable par l'utilisateur et " +"diminuer le temps de garde par défaut** " +"([#1069](https://github.com/adap/flower/pull/1069))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:219 -#, fuzzy +#: ../../source/ref-changelog.md:1137 msgid "" -"Next, we send the parameters of the global model to the connected client " -"nodes (think: edge devices like smartphones or servers belonging to " -"organizations). This is to ensure that each participating node starts " -"their local training using the same model parameters. We often use only a" -" few of the connected nodes instead of all nodes. The reason for this is " -"that selecting more and more client nodes has diminishing returns." +"The default gRPC keepalive time has been reduced to increase the " +"compatibility of Flower with more cloud environments (for example, " +"Microsoft Azure). Users can configure the keepalive time to customize the" +" gRPC stack based on specific requirements." msgstr "" -"Ensuite, nous envoyons les paramètres du modèle global aux nœuds clients " -"connectés (par exemple, les appareils périphériques comme les smartphones" -" ou les serveurs appartenant à des organisations). Cela permet de " -"s'assurer que chaque nœud participant commence sa formation locale en " -"utilisant les mêmes paramètres de modèle. Nous n'utilisons souvent que " -"quelques-uns des nœuds connectés au lieu de tous les nœuds. La raison en " -"est que la sélection d'un nombre croissant de nœuds clients a des " -"rendements décroissants." +"Le temps de keepalive gRPC par défaut a été réduit pour augmenter la " +"compatibilité de Flower avec davantage d'environnements cloud (par " +"exemple, Microsoft Azure). Les utilisateurs peuvent configurer le temps " +"de keepalive pour personnaliser la pile gRPC en fonction d'exigences " +"spécifiques." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 -msgid "|c742940dd4bf4de09d8d0d5e8d179638|" +#: ../../source/ref-changelog.md:1139 +msgid "" +"**New differential privacy example using Opacus and PyTorch** " +"([#805](https://github.com/adap/flower/pull/805))" msgstr "" +"**Nouvel exemple de confidentialité différentielle utilisant Opacus et " +"PyTorch** ([#805](https://github.com/adap/flower/pull/805))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 -msgid "Send global model" -msgstr "Envoyer le modèle global" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:232 +#: ../../source/ref-changelog.md:1141 msgid "" -"Step 2: Train model locally on the data of each organization/device " -"(client node)" +"A new code example (`opacus`) demonstrates differentially-private " +"federated learning with Opacus, PyTorch, and Flower." msgstr "" -"Étape 2 : Entraîne le modèle localement sur les données de chaque " -"organisation/appareil (nœud client)" +"Un nouvel exemple de code (`opacus`) démontre l'apprentissage fédéré " +"différentiellement privé avec Opacus, PyTorch et Flower." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:234 +#: ../../source/ref-changelog.md:1143 msgid "" -"Now that all (selected) client nodes have the latest version of the " -"global model parameters, they start the local training. They use their " -"own local dataset to train their own local model. They don't train the " -"model until full convergence, but they only train for a little while. " -"This could be as little as one epoch on the local data, or even just a " -"few steps (mini-batches)." +"**New Hugging Face Transformers code example** " +"([#863](https://github.com/adap/flower/pull/863))" msgstr "" -"Maintenant que tous les nœuds clients (sélectionnés) disposent de la " -"dernière version des paramètres du modèle global, ils commencent " -"l'entraînement local. Ils utilisent leur propre ensemble de données " -"locales pour entraîner leur propre modèle local. Ils n'entraînent pas le " -"modèle jusqu'à la convergence totale, mais ils ne s'entraînent que " -"pendant un petit moment. Il peut s'agir d'une seule époque sur les " -"données locales, ou même de quelques étapes (mini-batchs)." +"**Nouvel exemple de code pour les Transformers à visage embrassant** " +"([#863](https://github.com/adap/flower/pull/863))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 -msgid "|1f169ab4601a47e1a226f1628f4ebddb|" +#: ../../source/ref-changelog.md:1145 +msgid "" +"A new code example (`quickstart_huggingface`) demonstrates usage of " +"Hugging Face Transformers with Flower." msgstr "" +"Un nouvel exemple de code (`quickstart_huggingface`) démontre " +"l'utilisation des transformateurs Hugging Face avec Flower." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 -msgid "Train on local data" -msgstr "Forme-toi aux données locales" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:247 -msgid "Step 3: Return model updates back to the server" -msgstr "Étape 3 : Renvoyer les mises à jour du modèle au serveur" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:249 +#: ../../source/ref-changelog.md:1147 msgid "" -"After local training, each client node has a slightly different version " -"of the model parameters they originally received. The parameters are all " -"different because each client node has different examples in its local " -"dataset. The client nodes then send those model updates back to the " -"server. The model updates they send can either be the full model " -"parameters or just the gradients that were accumulated during local " -"training." +"**New MLCube code example** " +"([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" msgstr "" -"Après l'entraînement local, chaque nœud client possède une version " -"légèrement différente des paramètres du modèle qu'il a reçus à l'origine." -" Les paramètres sont tous différents parce que chaque nœud client a des " -"exemples différents dans son ensemble de données local. Les nœuds clients" -" renvoient ensuite ces mises à jour du modèle au serveur. Les mises à " -"jour du modèle qu'ils envoient peuvent être soit les paramètres complets " -"du modèle, soit seulement les gradients qui ont été accumulés au cours de" -" l'entraînement local." +"**Nouvel exemple de code MLCube** " +"([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 -msgid "|12cfa9cde14440ecb8c8f6c1d7185bec|" +#: ../../source/ref-changelog.md:1149 +msgid "" +"A new code example (`quickstart_mlcube`) demonstrates usage of MLCube " +"with Flower." msgstr "" +"Un nouvel exemple de code (`quickstart_mlcube`) démontre l'utilisation de" +" MLCube avec Flower." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 -msgid "Send model updates" -msgstr "Envoyer les mises à jour du modèle" +#: ../../source/ref-changelog.md:1151 +msgid "" +"**SSL-enabled server and client** " +"([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" +msgstr "" +"**([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:262 -msgid "Step 4: Aggregate model updates into a new global model" +#: ../../source/ref-changelog.md:1153 +msgid "" +"SSL enables secure encrypted connections between clients and servers. " +"This release open-sources the Flower secure gRPC implementation to make " +"encrypted communication channels accessible to all Flower users." msgstr "" -"Étape 4 : Agréger les mises à jour des modèles dans un nouveau modèle " -"global" +"SSL permet d'établir des connexions cryptées et sécurisées entre les " +"clients et les serveurs. Cette version met en open-source " +"l'implémentation gRPC sécurisée de Flower afin de rendre les canaux de " +"communication cryptés accessibles à tous les utilisateurs de Flower." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:264 +#: ../../source/ref-changelog.md:1155 msgid "" -"The server receives model updates from the selected client nodes. If it " -"selected 100 client nodes, it now has 100 slightly different versions of " -"the original global model, each trained on the local data of one client. " -"But didn't we want to have one model that contains the learnings from the" -" data of all 100 client nodes?" +"**Updated** `FedAdam` **and** `FedYogi` **strategies** " +"([#885](https://github.com/adap/flower/pull/885), " +"[#895](https://github.com/adap/flower/pull/895))" msgstr "" -"Le serveur reçoit les mises à jour du modèle des nœuds clients " -"sélectionnés. S'il a sélectionné 100 nœuds clients, il dispose maintenant" -" de 100 versions légèrement différentes du modèle global original, " -"chacune ayant été formée sur les données locales d'un client. Mais ne " -"voulions-nous pas avoir un seul modèle qui contienne les apprentissages " -"des données de l'ensemble des 100 nœuds clients ?" +"**Mise à jour** `FedAdam` **et** `FedYogi` **stratégies** " +"([#885](https://github.com/adap/flower/pull/885), " +"[#895](https://github.com/adap/flower/pull/895))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:266 +#: ../../source/ref-changelog.md:1157 msgid "" -"In order to get one single model, we have to combine all the model " -"updates we received from the client nodes. This process is called " -"*aggregation*, and there are many different ways to do it. The most basic" -" way to do it is called *Federated Averaging* (`McMahan et al., 2016 " -"`__), often abbreviated as *FedAvg*. " -"*FedAvg* takes the 100 model updates and, as the name suggests, averages " -"them. To be more precise, it takes the *weighted average* of the model " -"updates, weighted by the number of examples each client used for " -"training. The weighting is important to make sure that each data example " -"has the same \"influence\" on the resulting global model. If one client " -"has 10 examples, and another client has 100 examples, then - without " -"weighting - each of the 10 examples would influence the global model ten " -"times as much as each of the 100 examples." +"`FedAdam` and `FedAdam` match the latest version of the Adaptive " +"Federated Optimization paper." msgstr "" -"In order to get one single model, we have to combine all the model " -"updates we received from the client nodes. This process is called " -"*aggregation*, and there are many different ways to do it. The most basic" -" way to do it is called *Federated Averaging* (`McMahan et al., 2016 " -"`__), often abbreviated as *FedAvg*. " -"*FedAvg* takes the 100 model updates and, as the name suggests, averages " -"them. To be more precise, it takes the *weighted average* of the model " -"updates, weighted by the number of examples each client used for " -"training. The weighting is important to make sure that each data example " -"has the same \"influence\" on the resulting global model. If one client " -"has 10 examples, and another client has 100 examples, then - without " -"weighting - each of the 10 examples would influence the global model ten " -"times as much as each of the 100 examples." +"`FedAdam` et `FedAdam` correspondent à la dernière version de l'article " +"sur l'optimisation fédérée adaptative." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 -msgid "|72939caf6e294b0986fee6dde96614d7|" +#: ../../source/ref-changelog.md:1159 +msgid "" +"**Initialize** `start_simulation` **with a list of client IDs** " +"([#860](https://github.com/adap/flower/pull/860))" msgstr "" +"**Initialise** `start_simulation` **avec une liste d'ID de clients** " +"([#860](https://github.com/adap/flower/pull/860))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 -msgid "Aggregate model updates" -msgstr "Mises à jour globales du modèle" +#: ../../source/ref-changelog.md:1161 +msgid "" +"`start_simulation` can now be called with a list of client IDs " +"(`clients_ids`, type: `List[str]`). Those IDs will be passed to the " +"`client_fn` whenever a client needs to be initialized, which can make it " +"easier to load data partitions that are not accessible through `int` " +"identifiers." +msgstr "" +"`start_simulation` peut maintenant être appelé avec une liste " +"d'identifiants de clients (`clients_ids`, type : `List[str]`). Ces " +"identifiants seront passés à `client_fn` chaque fois qu'un client doit " +"être initialisé, ce qui peut faciliter le chargement de partitions de " +"données qui ne sont pas accessibles par des identifiants `int`." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:280 -msgid "Step 5: Repeat steps 1 to 4 until the model converges" -msgstr "Étape 5 : répète les étapes 1 à 4 jusqu'à ce que le modèle converge" +#: ../../source/ref-changelog.md:1165 +msgid "" +"Update `num_examples` calculation in PyTorch code examples in " +"([#909](https://github.com/adap/flower/pull/909))" +msgstr "" +"Mettre à jour le calcul de `num_examples` dans les exemples de code " +"PyTorch dans ([#909](https://github.com/adap/flower/pull/909))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:282 +#: ../../source/ref-changelog.md:1166 msgid "" -"Steps 1 to 4 are what we call a single round of federated learning. The " -"global model parameters get sent to the participating client nodes (step " -"1), the client nodes train on their local data (step 2), they send their " -"updated models to the server (step 3), and the server then aggregates the" -" model updates to get a new version of the global model (step 4)." +"Expose Flower version through `flwr.__version__` " +"([#952](https://github.com/adap/flower/pull/952))" msgstr "" -"Les étapes 1 à 4 constituent ce que nous appelons un cycle unique " -"d'apprentissage fédéré. Les paramètres du modèle global sont envoyés aux " -"nœuds clients participants (étape 1), les nœuds clients s'entraînent sur " -"leurs données locales (étape 2), ils envoient leurs modèles mis à jour au" -" serveur (étape 3), et le serveur agrège ensuite les mises à jour du " -"modèle pour obtenir une nouvelle version du modèle global (étape 4)." +"Exposer la version de Flower à travers `flwr.__version__` " +"([#952](https://github.com/adap/flower/pull/952))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:284 -#, fuzzy +#: ../../source/ref-changelog.md:1167 msgid "" -"During a single round, each client node that participates in that " -"iteration only trains for a little while. This means that after the " -"aggregation step (step 4), we have a model that has been trained on all " -"the data of all participating client nodes, but only for a little while. " -"We then have to repeat this training process over and over again to " -"eventually arrive at a fully trained model that performs well across the " -"data of all client nodes." +"`start_server` in `app.py` now returns a `History` object containing " +"metrics from training ([#974](https://github.com/adap/flower/pull/974))" msgstr "" -"Au cours d'un seul tour, chaque nœud client qui participe à cette " -"itération ne s'entraîne que pendant un petit moment. Cela signifie " -"qu'après l'étape d'agrégation (étape 4), nous avons un modèle qui a été " -"entraîné sur toutes les données de tous les nœuds clients participants, " -"mais seulement pendant un petit moment. Nous devons ensuite répéter ce " -"processus d'entraînement encore et encore pour finalement arriver à un " -"modèle entièrement entraîné qui fonctionne bien sur l'ensemble des " -"données de tous les nœuds clients." +"`start_server` dans `app.py` renvoie maintenant un objet `History` " +"contenant les métriques de l'entraînement " +"([#974](https://github.com/adap/flower/pull/974))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:289 +#: ../../source/ref-changelog.md:1168 msgid "" -"Congratulations, you now understand the basics of federated learning. " -"There's a lot more to discuss, of course, but that was federated learning" -" in a nutshell. In later parts of this tutorial, we will go into more " -"detail. Interesting questions include: How can we select the best client " -"nodes that should participate in the next round? What's the best way to " -"aggregate model updates? How can we handle failing client nodes " -"(stragglers)?" +"Make `max_workers` (used by `ThreadPoolExecutor`) configurable " +"([#978](https://github.com/adap/flower/pull/978))" msgstr "" -"Félicitations, tu comprends maintenant les bases de l'apprentissage " -"fédéré. Il y a bien sûr beaucoup plus à discuter, mais c'était " -"l'apprentissage fédéré en quelques mots. Dans les parties suivantes de ce" -" tutoriel, nous irons plus en détail. Les questions intéressantes " -"comprennent : comment pouvons-nous sélectionner les meilleurs nœuds " -"clients qui devraient participer au prochain tour ? Quelle est la " -"meilleure façon d'agréger les mises à jour du modèle ? Comment pouvons-" -"nous gérer les nœuds clients qui échouent (stragglers) ?" +"Rendre `max_workers` (utilisé par `ThreadPoolExecutor`) configurable " +"([#978](https://github.com/adap/flower/pull/978))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:294 -#, fuzzy +#: ../../source/ref-changelog.md:1169 msgid "" -"Just like we can train a model on the decentralized data of different " -"client nodes, we can also evaluate the model on that data to receive " -"valuable metrics. This is called federated evaluation, sometimes " -"abbreviated as FE. In fact, federated evaluation is an integral part of " -"most federated learning systems." +"Increase sleep time after server start to three seconds in all code " +"examples ([#1086](https://github.com/adap/flower/pull/1086))" msgstr "" -"Tout comme nous pouvons former un modèle sur les données décentralisées " -"de différents nœuds clients, nous pouvons également évaluer le modèle sur" -" ces données pour recevoir des mesures précieuses. C'est ce qu'on appelle" -" l'évaluation fédérée, parfois abrégée en FE. En fait, l'évaluation " -"fédérée fait partie intégrante de la plupart des systèmes d'apprentissage" -" fédéré." +"Augmente le temps de sommeil après le démarrage du serveur à trois " +"secondes dans tous les exemples de code " +"([#1086](https://github.com/adap/flower/pull/1086))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:297 -msgid "Federated analytics" -msgstr "Analyses fédérées" +#: ../../source/ref-changelog.md:1170 +msgid "" +"Added a new FAQ section to the documentation " +"([#948](https://github.com/adap/flower/pull/948))" +msgstr "" +"Ajout d'une nouvelle section FAQ à la documentation " +"([#948](https://github.com/adap/flower/pull/948))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:299 +#: ../../source/ref-changelog.md:1171 msgid "" -"In many cases, machine learning isn't necessary to derive value from " -"data. Data analysis can yield valuable insights, but again, there's often" -" not enough data to get a clear answer. What's the average age at which " -"people develop a certain type of health condition? Federated analytics " -"enables such queries over multiple client nodes. It is usually used in " -"conjunction with other privacy-enhancing technologies like secure " -"aggregation to prevent the server from seeing the results submitted by " -"individual client nodes." +"And many more under-the-hood changes, library updates, documentation " +"changes, and tooling improvements!" msgstr "" -"Dans de nombreux cas, l'apprentissage automatique n'est pas nécessaire " -"pour tirer de la valeur des données. L'analyse des données peut donner " -"des indications précieuses, mais là encore, il n'y a souvent pas assez de" -" données pour obtenir une réponse claire. Quel est l'âge moyen auquel les" -" gens développent un certain type de problème de santé ? L'analyse " -"fédérée permet de telles requêtes sur plusieurs nœuds clients. Elle est " -"généralement utilisée en conjonction avec d'autres technologies de " -"renforcement de la confidentialité, comme l'agrégation sécurisée, pour " -"empêcher le serveur de voir les résultats soumis par les nœuds clients " -"individuels." +"Et bien d'autres changements sous le capot, des mises à jour de la " +"bibliothèque, des modifications de la documentation et des améliorations " +"de l'outillage !" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:305 +#: ../../source/ref-changelog.md:1175 msgid "" -"Differential privacy (DP) is often mentioned in the context of Federated " -"Learning. It is a privacy-preserving method used when analyzing and " -"sharing statistical data, ensuring the privacy of individual " -"participants. DP achieves this by adding statistical noise to the model " -"updates, ensuring any individual participants’ information cannot be " -"distinguished or re-identified. This technique can be considered an " -"optimization that provides a quantifiable privacy protection measure." +"**Removed** `flwr_example` **and** `flwr_experimental` **from release " +"build** ([#869](https://github.com/adap/flower/pull/869))" msgstr "" +"**Supprimé** `flwr_example` **et** `flwr_experimental` **de la version " +"release build** ([#869](https://github.com/adap/flower/pull/869))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:326 -msgid "Flower" -msgstr "Fleur" +#: ../../source/ref-changelog.md:1177 +msgid "" +"The packages `flwr_example` and `flwr_experimental` have been deprecated " +"since Flower 0.12.0 and they are not longer included in Flower release " +"builds. The associated extras (`baseline`, `examples-pytorch`, `examples-" +"tensorflow`, `http-logger`, `ops`) are now no-op and will be removed in " +"an upcoming release." +msgstr "" +"Les paquets `flwr_example` et `flwr_experimental` ont été dépréciés " +"depuis Flower 0.12.0 et ils ne sont plus inclus dans les builds de " +"Flower. Les extras associés (`baseline`, `examples-pytorch`, `examples-" +"tensorflow`, `http-logger`, `ops`) sont maintenant no-op et seront " +"supprimés dans une prochaine version." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:328 +#: ../../source/ref-changelog.md:1179 +msgid "v0.17.0 (2021-09-24)" +msgstr "v0.17.0 (2021-09-24)" + +#: ../../source/ref-changelog.md:1183 msgid "" -"Federated learning, federated evaluation, and federated analytics require" -" infrastructure to move machine learning models back and forth, train and" -" evaluate them on local data, and then aggregate the updated models. " -"Flower provides the infrastructure to do exactly that in an easy, " -"scalable, and secure way. In short, Flower presents a unified approach to" -" federated learning, analytics, and evaluation. It allows the user to " -"federate any workload, any ML framework, and any programming language." +"**Experimental virtual client engine** " +"([#781](https://github.com/adap/flower/pull/781) " +"[#790](https://github.com/adap/flower/pull/790) " +"[#791](https://github.com/adap/flower/pull/791))" msgstr "" -"L'apprentissage fédéré, l'évaluation fédérée et l'analyse fédérée " -"nécessitent une infrastructure pour déplacer les modèles d'apprentissage " -"automatique dans les deux sens, les entraîner et les évaluer sur des " -"données locales, puis agréger les modèles mis à jour. Flower fournit " -"l'infrastructure pour faire exactement cela de manière simple, évolutive " -"et sécurisée. En bref, Flower présente une approche unifiée de " -"l'apprentissage, de l'analyse et de l'évaluation fédérés. Il permet à " -"l'utilisateur de fédérer n'importe quelle charge de travail, n'importe " -"quel cadre de ML et n'importe quel langage de programmation." +"**Moteur expérimental de client virtuel** " +"([#781](https://github.com/adap/flower/pull/781) " +"[#790](https://github.com/adap/flower/pull/790) " +"[#791](https://github.com/adap/flower/pull/791))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 -msgid "|83a8daee45da4a98b8d6f24ae098fc50|" +#: ../../source/ref-changelog.md:1185 +msgid "" +"One of Flower's goals is to enable research at scale. This release " +"enables a first (experimental) peek at a major new feature, codenamed the" +" virtual client engine. Virtual clients enable simulations that scale to " +"a (very) large number of clients on a single machine or compute cluster. " +"The easiest way to test the new functionality is to look at the two new " +"code examples called `quickstart_simulation` and `simulation_pytorch`." msgstr "" +"L'un des objectifs de Flower est de permettre la recherche à grande " +"échelle. Cette version donne un premier aperçu (expérimental) d'une " +"nouvelle fonctionnalité majeure, connue sous le nom de code de moteur de " +"client virtuel. Les clients virtuels permettent des simulations qui " +"s'étendent à un (très) grand nombre de clients sur une seule machine ou " +"une grappe de calcul. La façon la plus simple de tester la nouvelle " +"fonctionnalité est de regarder les deux nouveaux exemples de code appelés" +" `quickstart_simulation` et `simulation_pytorch`." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 +#: ../../source/ref-changelog.md:1187 msgid "" -"Flower federated learning server and client nodes (car, scooter, personal" -" computer, roomba, and phone)" +"The feature is still experimental, so there's no stability guarantee for " +"the API. It's also not quite ready for prime time and comes with a few " +"known caveats. However, those who are curious are encouraged to try it " +"out and share their thoughts." msgstr "" -"Serveur d'apprentissage fédéré de Flower et nœuds clients (voiture, " -"scooter, ordinateur personnel, roomba et téléphone)" +"La fonction est encore expérimentale, il n'y a donc aucune garantie de " +"stabilité pour l'API. Elle n'est pas non plus tout à fait prête pour le " +"prime time et s'accompagne de quelques mises en garde connues. Cependant," +" les personnes curieuses sont encouragées à l'essayer et à faire part de " +"leurs réflexions." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:353 +#: ../../source/ref-changelog.md:1189 msgid "" -"Congratulations, you just learned the basics of federated learning and " -"how it relates to the classic (centralized) machine learning!" +"**New built-in strategies** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" msgstr "" -"Félicitations, tu viens d'apprendre les bases de l'apprentissage fédéré " -"et son rapport avec l'apprentissage automatique classique (centralisé) !" +"**Nouvelles stratégies intégrées** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:355 +#: ../../source/ref-changelog.md:1191 msgid "" -"In the next part of this tutorial, we are going to build a first " -"federated learning system with Flower." +"FedYogi - Federated learning strategy using Yogi on server-side. " +"Implementation based on https://arxiv.org/abs/2003.00295" msgstr "" -"Dans la prochaine partie de ce tutoriel, nous allons construire un " -"premier système d'apprentissage fédéré avec Flower." +"FedYogi - Stratégie d'apprentissage fédéré utilisant Yogi côté serveur. " +"Mise en oeuvre basée sur https://arxiv.org/abs/2003.00295" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:373 -#, fuzzy +#: ../../source/ref-changelog.md:1192 msgid "" -"The `Flower Federated Learning Tutorial - Part 1 " -"`__ shows how to build a simple federated learning system " -"with PyTorch and Flower." +"FedAdam - Federated learning strategy using Adam on server-side. " +"Implementation based on https://arxiv.org/abs/2003.00295" msgstr "" -"Le `Tutoriel d'apprentissage fédéré Flower - Partie 1 " -"`__ " -"montre comment construire un système d'apprentissage fédéré simple avec " -"PyTorch et Flower." +"FedAdam - Stratégie d'apprentissage fédéré utilisant Adam côté serveur. " +"Mise en œuvre basée sur https://arxiv.org/abs/2003.00295" -#~ msgid "Flower CLI commands" -#~ msgstr "Commandes CLI Flower" +#: ../../source/ref-changelog.md:1194 +msgid "" +"**New PyTorch Lightning code example** " +"([#617](https://github.com/adap/flower/pull/617))" +msgstr "" +"**Nouvel exemple de code PyTorch Lightning** " +"([#617](https://github.com/adap/flower/pull/617))" -#~ msgid "Contributor guide" -#~ msgstr "Guide pour les contributeurs" +#: ../../source/ref-changelog.md:1196 +msgid "" +"**New Variational Auto-Encoder code example** " +"([#752](https://github.com/adap/flower/pull/752))" +msgstr "" +"**Nouvel exemple de code d'autocodage variationnel** " +"([#752](https://github.com/adap/flower/pull/752))" -#~ msgid "API Reference - Flower CLI commands" -#~ msgstr "Référence API - Commandes CLI pour Flower" +#: ../../source/ref-changelog.md:1198 +msgid "" +"**New scikit-learn code example** " +"([#748](https://github.com/adap/flower/pull/748))" +msgstr "" +"**Nouvel exemple de code scikit-learn** " +"([#748](https://github.com/adap/flower/pull/748))" -#~ msgid "API Reference - flwr (Python package)" -#~ msgstr "Référence API - flwr (paquetage Python)" +#: ../../source/ref-changelog.md:1200 +msgid "" +"**New experimental TensorBoard strategy** " +"([#789](https://github.com/adap/flower/pull/789))" +msgstr "" +"**Nouvelle stratégie expérimentale TensorBoard** " +"([#789](https://github.com/adap/flower/pull/789))" -#~ msgid "Flower client." -#~ msgstr "Client de Flower" +#: ../../source/ref-changelog.md:1204 +msgid "" +"Improved advanced TensorFlow code example " +"([#769](https://github.com/adap/flower/pull/769))" +msgstr "" +"Amélioration de l'exemple de code TensorFlow avancé " +"([#769](https://github.com/adap/flower/pull/769))" -#~ msgid "Abstract base class for Flower clients." -#~ msgstr "" +#: ../../source/ref-changelog.md:1205 +msgid "" +"Warning when `min_available_clients` is misconfigured " +"([#830](https://github.com/adap/flower/pull/830))" +msgstr "" +"Avertissement lorsque `min_available_clients` est mal configuré " +"([#830](https://github.com/adap/flower/pull/830))" -#~ msgid "Evaluate the provided parameters using the locally held dataset." -#~ msgstr "évaluer le modèle mis à jour sur l'ensemble de test local" +#: ../../source/ref-changelog.md:1206 +msgid "" +"Improved gRPC server docs " +"([#841](https://github.com/adap/flower/pull/841))" +msgstr "" +"Amélioration de la documentation sur le serveur gRPC " +"([#841](https://github.com/adap/flower/pull/841))" -#~ msgid "Parameters" -#~ msgstr "Paramètres du modèle." +#: ../../source/ref-changelog.md:1207 +msgid "" +"Improved error message in `NumPyClient` " +"([#851](https://github.com/adap/flower/pull/851))" +msgstr "" +"Amélioration du message d'erreur dans `NumPyClient` " +"([#851](https://github.com/adap/flower/pull/851))" -#~ msgid "" -#~ "The evaluation instructions containing " -#~ "(global) model parameters received from " -#~ "the server and a dictionary of " -#~ "configuration values used to customize " -#~ "the local evaluation process." -#~ msgstr "" +#: ../../source/ref-changelog.md:1208 +msgid "" +"Improved PyTorch quickstart code example " +"([#852](https://github.com/adap/flower/pull/852))" +msgstr "" +"Exemple de code de démarrage rapide PyTorch amélioré " +"([#852](https://github.com/adap/flower/pull/852))" -#~ msgid "Returns" -#~ msgstr "Ressources" +#: ../../source/ref-changelog.md:1212 +msgid "" +"**Disabled final distributed evaluation** " +"([#800](https://github.com/adap/flower/pull/800))" +msgstr "" +"**Désactivé l'évaluation finale distribuée** " +"([#800](https://github.com/adap/flower/pull/800))" -#~ msgid "" -#~ "The evaluation result containing the " -#~ "loss on the local dataset and " -#~ "other details such as the number " -#~ "of local data examples used for " -#~ "evaluation." -#~ msgstr "" +#: ../../source/ref-changelog.md:1214 +msgid "" +"Prior behaviour was to perform a final round of distributed evaluation on" +" all connected clients, which is often not required (e.g., when using " +"server-side evaluation). The prior behaviour can be enabled by passing " +"`force_final_distributed_eval=True` to `start_server`." +msgstr "" +"Le comportement précédent consistait à effectuer un dernier tour " +"d'évaluation distribuée sur tous les clients connectés, ce qui n'est " +"souvent pas nécessaire (par exemple, lors de l'utilisation de " +"l'évaluation côté serveur). Le comportement précédent peut être activé en" +" passant `force_final_distributed_eval=True` à `start_server`." -#~ msgid "Return type" -#~ msgstr "" +#: ../../source/ref-changelog.md:1216 +msgid "" +"**Renamed q-FedAvg strategy** " +"([#802](https://github.com/adap/flower/pull/802))" +msgstr "" +"**Renommé stratégie q-FedAvg** " +"([#802](https://github.com/adap/flower/pull/802))" -#~ msgid "Refine the provided parameters using the locally held dataset." -#~ msgstr "" +#: ../../source/ref-changelog.md:1218 +msgid "" +"The strategy named `QffedAvg` was renamed to `QFedAvg` to better reflect " +"the notation given in the original paper (q-FFL is the optimization " +"objective, q-FedAvg is the proposed solver). Note the original (now " +"deprecated) `QffedAvg` class is still available for compatibility reasons" +" (it will be removed in a future release)." +msgstr "" +"La stratégie nommée `QffedAvg` a été renommée en `QFedAvg` pour mieux " +"refléter la notation donnée dans l'article original (q-FFL est l'objectif" +" d'optimisation, q-FedAvg est le solveur proposé). Notez que la classe " +"`QffedAvg` originale (maintenant obsolète) est toujours disponible pour " +"des raisons de compatibilité (elle sera supprimée dans une prochaine " +"version)." -#~ msgid "" -#~ "The training instructions containing (global)" -#~ " model parameters received from the " -#~ "server and a dictionary of configuration" -#~ " values used to customize the local" -#~ " training process." -#~ msgstr "" +#: ../../source/ref-changelog.md:1220 +msgid "" +"**Deprecated and renamed code example** `simulation_pytorch` **to** " +"`simulation_pytorch_legacy` " +"([#791](https://github.com/adap/flower/pull/791))" +msgstr "" +"**Exemple de code déprécié et renommé** `simulation_pytorch` **en** " +"`simulation_pytorch_legacy` " +"([#791](https://github.com/adap/flower/pull/791))" -#~ msgid "" -#~ "The training result containing updated " -#~ "parameters and other details such as " -#~ "the number of local training examples" -#~ " used for training." -#~ msgstr "" +#: ../../source/ref-changelog.md:1222 +msgid "" +"This example has been replaced by a new example. The new example is based" +" on the experimental virtual client engine, which will become the new " +"default way of doing most types of large-scale simulations in Flower. The" +" existing example was kept for reference purposes, but it might be " +"removed in the future." +msgstr "" +"Cet exemple a été remplacé par un nouvel exemple. Le nouvel exemple est " +"basé sur le moteur expérimental du client virtuel, qui deviendra la " +"nouvelle méthode par défaut pour effectuer la plupart des types de " +"simulations à grande échelle dans Flower. L'exemple existant a été " +"conservé à des fins de référence, mais il pourrait être supprimé à " +"l'avenir." -#~ msgid "Return the current local model parameters." -#~ msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" +#: ../../source/ref-changelog.md:1224 +msgid "v0.16.0 (2021-05-11)" +msgstr "v0.16.0 (2021-05-11)" -#~ msgid "" -#~ "The get parameters instructions received " -#~ "from the server containing a dictionary" -#~ " of configuration values." -#~ msgstr "" +#: ../../source/ref-changelog.md:1228 +msgid "" +"**New built-in strategies** " +"([#549](https://github.com/adap/flower/pull/549))" +msgstr "" +"**Nouvelles stratégies intégrées** " +"([#549](https://github.com/adap/flower/pull/549))" -#~ msgid "The current local model parameters." -#~ msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" +#: ../../source/ref-changelog.md:1230 +msgid "(abstract) FedOpt" +msgstr "(résumé) FedOpt" -#~ msgid "Return set of client's properties." -#~ msgstr "" +#: ../../source/ref-changelog.md:1233 +msgid "" +"**Custom metrics for server and strategies** " +"([#717](https://github.com/adap/flower/pull/717))" +msgstr "" +"**Métriques personnalisées pour le serveur et les stratégies** " +"([#717](https://github.com/adap/flower/pull/717))" -#~ msgid "" -#~ "The get properties instructions received " -#~ "from the server containing a dictionary" -#~ " of configuration values." -#~ msgstr "" +#: ../../source/ref-changelog.md:1235 +msgid "" +"The Flower server is now fully task-agnostic, all remaining instances of " +"task-specific metrics (such as `accuracy`) have been replaced by custom " +"metrics dictionaries. Flower 0.15 introduced the capability to pass a " +"dictionary containing custom metrics from client to server. As of this " +"release, custom metrics replace task-specific metrics on the server." +msgstr "" +"Le serveur Flower est maintenant totalement agnostique, toutes les " +"instances restantes de métriques spécifiques à une tâche (telles que " +"`accuracy`) ont été remplacées par des dictionnaires de métriques " +"personnalisées. Flower 0.15 a introduit la possibilité de passer un " +"dictionnaire contenant des métriques personnalisées du client au serveur." +" À partir de cette version, les métriques personnalisées remplacent les " +"métriques spécifiques à une tâche sur le serveur." -#~ msgid "The current client properties." -#~ msgstr "" +#: ../../source/ref-changelog.md:1237 +#, fuzzy +msgid "" +"Custom metric dictionaries are now used in two user-facing APIs: they are" +" returned from Strategy methods `aggregate_fit`/`aggregate_evaluate` and " +"they enable evaluation functions passed to built-in strategies (via " +"`eval_fn`) to return more than two evaluation metrics. Strategies can " +"even return *aggregated* metrics dictionaries for the server to keep " +"track of." +msgstr "" +"Les dictionnaires de métriques personnalisés sont maintenant utilisés " +"dans deux API orientées vers l'utilisateur : ils sont renvoyés par les " +"méthodes de stratégie `aggregate_fit`/`aggregate_evaluate` et ils " +"permettent aux fonctions d'évaluation passées aux stratégies intégrées " +"(via `eval_fn`) de renvoyer plus de deux métriques d'évaluation. Les " +"stratégies peuvent même renvoyer des dictionnaires de métriques " +"*agrégées* pour que le serveur puisse en garder la trace." -#~ msgid "Start a Flower client node which connects to a Flower server." -#~ msgstr "" +#: ../../source/ref-changelog.md:1239 +#, fuzzy +msgid "" +"Strategy implementations should migrate their `aggregate_fit` and " +"`aggregate_evaluate` methods to the new return type (e.g., by simply " +"returning an empty `{}`), server-side evaluation functions should migrate" +" from `return loss, accuracy` to `return loss, {\"accuracy\": accuracy}`." +msgstr "" +"Les implémentations de Stratey doivent migrer leurs méthodes " +"`aggregate_fit` et `aggregate_evaluate` vers le nouveau type de retour " +"(par exemple, en renvoyant simplement un `{}` vide), les fonctions " +"d'évaluation côté serveur doivent migrer de `return loss, accuracy` à " +"`return loss, {\"accuracy\" : accuracy}`." -#~ msgid "" -#~ "The IPv4 or IPv6 address of the" -#~ " server. If the Flower server runs" -#~ " on the same machine on port " -#~ "8080, then `server_address` would be " -#~ "`\"[::]:8080\"`." -#~ msgstr "" +#: ../../source/ref-changelog.md:1241 +msgid "" +"Flower 0.15-style return types are deprecated (but still supported), " +"compatibility will be removed in a future release." +msgstr "" +"Les types de retour du style Flower 0.15 sont dépréciés (mais toujours " +"pris en charge), la compatibilité sera supprimée dans une prochaine " +"version." -#~ msgid "An implementation of the abstract base class `flwr.client.Client`." -#~ msgstr "" +#: ../../source/ref-changelog.md:1243 +msgid "" +"**Migration warnings for deprecated functionality** " +"([#690](https://github.com/adap/flower/pull/690))" +msgstr "" +"**Avertissements de migration pour les fonctionnalités obsolètes** " +"([#690](https://github.com/adap/flower/pull/690))" -#~ msgid "" -#~ "The maximum length of gRPC messages " -#~ "that can be exchanged with the " -#~ "Flower server. The default should be " -#~ "sufficient for most models. Users who" -#~ " train very large models might need" -#~ " to increase this value. Note that" -#~ " the Flower server needs to be " -#~ "started with the same value (see " -#~ "`flwr.server.start_server`), otherwise it will " -#~ "not know about the increased limit " -#~ "and block larger messages." -#~ msgstr "" +#: ../../source/ref-changelog.md:1245 +msgid "" +"Earlier versions of Flower were often migrated to new APIs, while " +"maintaining compatibility with legacy APIs. This release introduces " +"detailed warning messages if usage of deprecated APIs is detected. The " +"new warning messages often provide details on how to migrate to more " +"recent APIs, thus easing the transition from one release to another." +msgstr "" +"Les versions antérieures de Flower ont souvent été migrées vers de " +"nouvelles API, tout en maintenant la compatibilité avec les anciennes " +"API. Cette version introduit des messages d'avertissement détaillés si " +"l'utilisation d'API obsolètes est détectée. Les nouveaux messages " +"d'avertissement fournissent souvent des détails sur la façon de migrer " +"vers des API plus récentes, facilitant ainsi la transition d'une version " +"à l'autre." -#~ msgid "" -#~ "The PEM-encoded root certificates as " -#~ "a byte string or a path string." -#~ " If provided, a secure connection " -#~ "using the certificates will be " -#~ "established to an SSL-enabled Flower " -#~ "server." -#~ msgstr "" +#: ../../source/ref-changelog.md:1247 +msgid "" +"Improved docs and docstrings " +"([#691](https://github.com/adap/flower/pull/691) " +"[#692](https://github.com/adap/flower/pull/692) " +"[#713](https://github.com/adap/flower/pull/713))" +msgstr "" +"Amélioration des docs et des docstrings " +"([#691](https://github.com/adap/flower/pull/691) " +"[#692](https://github.com/adap/flower/pull/692) " +"[#713](https://github.com/adap/flower/pull/713))" -#~ msgid "" -#~ "DEPRECATED - USE 'transport' INSTEAD. " -#~ "Defines whether or not the client " -#~ "is interacting with the server using " -#~ "the experimental REST API. This feature" -#~ " is experimental, it might change " -#~ "considerably in future versions of " -#~ "Flower." -#~ msgstr "" -#~ "DÉPRÉCIÉ - UTILISER 'transport' À LA " -#~ "PLACE Définit si le client interagit " -#~ "ou non avec le serveur à l'aide" -#~ " de l'API REST expérimentale. Cette " -#~ "fonctionnalité est expérimentale, elle " -#~ "pourrait changer considérablement dans les " -#~ "futures versions de Flower." +#: ../../source/ref-changelog.md:1249 +msgid "MXNet example and documentation" +msgstr "Exemple et documentation MXNet" -#~ msgid "" -#~ "Configure the transport layer. Allowed " +#: ../../source/ref-changelog.md:1251 +msgid "" +"FedBN implementation in example PyTorch: From Centralized To Federated " +"([#696](https://github.com/adap/flower/pull/696) " +"[#702](https://github.com/adap/flower/pull/702) " +"[#705](https://github.com/adap/flower/pull/705))" +msgstr "" +"Mise en œuvre de FedBN dans l'exemple PyTorch : De la centralisation à la" +" fédération ([#696](https://github.com/adap/flower/pull/696) " +"[#702](https://github.com/adap/flower/pull/702) " +"[#705](https://github.com/adap/flower/pull/705))" + +#: ../../source/ref-changelog.md:1255 +msgid "" +"**Serialization-agnostic server** " +"([#721](https://github.com/adap/flower/pull/721))" +msgstr "" +"**Serveur agnostique de sérialisation** " +"([#721](https://github.com/adap/flower/pull/721))" + +#: ../../source/ref-changelog.md:1257 +msgid "" +"The Flower server is now fully serialization-agnostic. Prior usage of " +"class `Weights` (which represents parameters as deserialized NumPy " +"ndarrays) was replaced by class `Parameters` (e.g., in `Strategy`). " +"`Parameters` objects are fully serialization-agnostic and represents " +"parameters as byte arrays, the `tensor_type` attributes indicates how " +"these byte arrays should be interpreted (e.g., for " +"serialization/deserialization)." +msgstr "" +"Le serveur Flower est désormais totalement agnostique en matière de " +"sérialisation. L'utilisation antérieure de la classe `Weights` (qui " +"représente les paramètres sous forme de tableaux NumPy désérialisés) a " +"été remplacée par la classe `Parameters` (par exemple, dans `Strategy`). " +"Les objets `Parameters` sont totalement agnostiques en matière de " +"sérialisation et représentent les paramètres sous forme de tableaux " +"d'octets, les attributs `tensor_type` indiquent comment ces tableaux " +"d'octets doivent être interprétés (par exemple, pour la " +"sérialisation/désérialisation)." + +#: ../../source/ref-changelog.md:1259 +msgid "" +"Built-in strategies implement this approach by handling serialization and" +" deserialization to/from `Weights` internally. Custom/3rd-party Strategy " +"implementations should update to the slightly changed Strategy method " +"definitions. Strategy authors can consult PR " +"[#721](https://github.com/adap/flower/pull/721) to see how strategies can" +" easily migrate to the new format." +msgstr "" +"Les stratégies intégrées mettent en œuvre cette approche en gérant en " +"interne la sérialisation et la désérialisation de `Weights`. Les " +"implémentations de stratégies personnalisées ou tierces doivent être " +"mises à jour avec les définitions de méthodes de stratégie légèrement " +"modifiées. Les auteurs de stratégies peuvent consulter le PR " +"[#721](https://github.com/adap/flower/pull/721) pour voir comment les " +"stratégies peuvent facilement migrer vers le nouveau format." + +#: ../../source/ref-changelog.md:1261 +msgid "" +"Deprecated `flwr.server.Server.evaluate`, use " +"`flwr.server.Server.evaluate_round` instead " +"([#717](https://github.com/adap/flower/pull/717))" +msgstr "" +"Déclassé `flwr.server.Server.evaluate`, utiliser " +"`flwr.server.Server.evaluate_round` à la place " +"([#717](https://github.com/adap/flower/pull/717))" + +#: ../../source/ref-changelog.md:1263 +msgid "v0.15.0 (2021-03-12)" +msgstr "v0.15.0 (2021-03-12)" + +#: ../../source/ref-changelog.md:1267 +msgid "" +"**Server-side parameter initialization** " +"([#658](https://github.com/adap/flower/pull/658))" +msgstr "" +"**Initialisation des paramètres côté serveur** " +"([#658](https://github.com/adap/flower/pull/658))" + +#: ../../source/ref-changelog.md:1269 +msgid "" +"Model parameters can now be initialized on the server-side. Server-side " +"parameter initialization works via a new `Strategy` method called " +"`initialize_parameters`." +msgstr "" +"Les paramètres du modèle peuvent maintenant être initialisés côté " +"serveur. L'initialisation des paramètres côté serveur fonctionne via une " +"nouvelle méthode `Strategy` appelée `initialize_parameters`." + +#: ../../source/ref-changelog.md:1271 +msgid "" +"Built-in strategies support a new constructor argument called " +"`initial_parameters` to set the initial parameters. Built-in strategies " +"will provide these initial parameters to the server on startup and then " +"delete them to free the memory afterwards." +msgstr "" +"Les stratégies intégrées prennent en charge un nouvel argument du " +"constructeur appelé `initial_parameters` pour définir les paramètres " +"initiaux. Les stratégies intégrées fourniront ces paramètres initiaux au " +"serveur au démarrage et les supprimeront ensuite pour libérer la mémoire." + +#: ../../source/ref-changelog.md:1290 +msgid "" +"If no initial parameters are provided to the strategy, the server will " +"continue to use the current behaviour (namely, it will ask one of the " +"connected clients for its parameters and use these as the initial global " +"parameters)." +msgstr "" +"Si aucun paramètre initial n'est fourni à la stratégie, le serveur " +"continuera à utiliser le comportement actuel (à savoir qu'il demandera à " +"l'un des clients connectés ses paramètres et les utilisera comme " +"paramètres globaux initiaux)." + +#: ../../source/ref-changelog.md:1294 +msgid "" +"Deprecate `flwr.server.strategy.DefaultStrategy` (migrate to " +"`flwr.server.strategy.FedAvg`, which is equivalent)" +msgstr "" +"Déclasser `flwr.server.strategy.DefaultStrategy` (migrer vers " +"`flwr.server.strategy.FedAvg`, qui est équivalent)" + +#: ../../source/ref-changelog.md:1296 +msgid "v0.14.0 (2021-02-18)" +msgstr "v0.14.0 (2021-02-18)" + +#: ../../source/ref-changelog.md:1300 +msgid "" +"**Generalized** `Client.fit` **and** `Client.evaluate` **return values** " +"([#610](https://github.com/adap/flower/pull/610) " +"[#572](https://github.com/adap/flower/pull/572) " +"[#633](https://github.com/adap/flower/pull/633))" +msgstr "" +"**Généralisé** `Client.fit` **et** `Client.evaluate` **valeurs de " +"retour** ([#610](https://github.com/adap/flower/pull/610) " +"[#572](https://github.com/adap/flower/pull/572) " +"[#633](https://github.com/adap/flower/pull/633))" + +#: ../../source/ref-changelog.md:1302 +msgid "" +"Clients can now return an additional dictionary mapping `str` keys to " +"values of the following types: `bool`, `bytes`, `float`, `int`, `str`. " +"This means one can return almost arbitrary values from `fit`/`evaluate` " +"and make use of them on the server side!" +msgstr "" +"Les clients peuvent maintenant renvoyer un dictionnaire supplémentaire " +"associant les clés `str` aux valeurs des types suivants : `bool`, " +"`bytes`, `float`, `int`, `str`. Cela signifie que l'on peut renvoyer des " +"valeurs presque arbitraires de `fit`/`evaluate` et les utiliser du côté " +"du serveur !" + +#: ../../source/ref-changelog.md:1304 +msgid "" +"This improvement also allowed for more consistent return types between " +"`fit` and `evaluate`: `evaluate` should now return a tuple `(float, int, " +"dict)` representing the loss, number of examples, and a dictionary " +"holding arbitrary problem-specific values like accuracy." +msgstr "" +"Cette amélioration a également permis de rendre plus cohérents les types " +"de retour entre `fit` et `evaluate` : `evaluate` devrait maintenant " +"retourner un tuple `(float, int, dict)` représentant la perte, le nombre " +"d'exemples, et un dictionnaire contenant des valeurs arbitraires " +"spécifiques au problème comme la précision." + +#: ../../source/ref-changelog.md:1306 +msgid "" +"In case you wondered: this feature is compatible with existing projects, " +"the additional dictionary return value is optional. New code should " +"however migrate to the new return types to be compatible with upcoming " +"Flower releases (`fit`: `List[np.ndarray], int, Dict[str, Scalar]`, " +"`evaluate`: `float, int, Dict[str, Scalar]`). See the example below for " +"details." +msgstr "" +"Au cas où tu te poserais la question : cette fonctionnalité est " +"compatible avec les projets existants, la valeur de retour supplémentaire" +" du dictionnaire est facultative. Le nouveau code doit cependant migrer " +"vers les nouveaux types de retour pour être compatible avec les " +"prochaines versions de Flower (`fit` : `List[np.ndarray], int, Dict[str, " +"Scalar]`, `evaluate` : `float, int, Dict[str, Scalar]`). Voir l'exemple " +"ci-dessous pour plus de détails." + +#: ../../source/ref-changelog.md:1308 +msgid "" +"*Code example:* note the additional dictionary return values in both " +"`FlwrClient.fit` and `FlwrClient.evaluate`:" +msgstr "" +"*Exemple de code:* note les valeurs de retour du dictionnaire " +"supplémentaires dans `FlwrClient.fit` et `FlwrClient.evaluate` :" + +#: ../../source/ref-changelog.md:1323 +msgid "" +"**Generalized** `config` **argument in** `Client.fit` **and** " +"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" +msgstr "" +"**Généralisé** `config` **argument dans** `Client.fit` **et** " +"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" + +#: ../../source/ref-changelog.md:1325 +msgid "" +"The `config` argument used to be of type `Dict[str, str]`, which means " +"that dictionary values were expected to be strings. The new release " +"generalizes this to enable values of the following types: `bool`, " +"`bytes`, `float`, `int`, `str`." +msgstr "" +"L'argument `config` était auparavant de type `Dict[str, str]`, ce qui " +"signifie que les valeurs du dictionnaire devaient être des chaînes. La " +"nouvelle version généralise cela pour permettre les valeurs des types " +"suivants : `bool`, `bytes`, `float`, `int`, `str`." + +#: ../../source/ref-changelog.md:1327 +msgid "" +"This means one can now pass almost arbitrary values to `fit`/`evaluate` " +"using the `config` dictionary. Yay, no more `str(epochs)` on the server-" +"side and `int(config[\"epochs\"])` on the client side!" +msgstr "" +"Cela signifie que l'on peut maintenant passer des valeurs presque " +"arbitraires à `fit`/`evaluate` en utilisant le dictionnaire `config`. " +"Yay, plus de `str(epochs)` du côté serveur et `int(config[\"epochs\"])` " +"du côté client !" + +#: ../../source/ref-changelog.md:1329 +msgid "" +"*Code example:* note that the `config` dictionary now contains non-`str` " +"values in both `Client.fit` and `Client.evaluate`:" +msgstr "" +"*Exemple de code:* Notez que le dictionnaire `config` contient maintenant" +" des valeurs autres que `str` dans `Client.fit` et `Client.evaluate` :" + +#: ../../source/ref-changelog.md:1346 +msgid "v0.13.0 (2021-01-08)" +msgstr "v0.13.0 (2021-01-08)" + +#: ../../source/ref-changelog.md:1350 +msgid "" +"New example: PyTorch From Centralized To Federated " +"([#549](https://github.com/adap/flower/pull/549))" +msgstr "" +"Nouvel exemple : PyTorch de centralisé à fédéré " +"([#549](https://github.com/adap/flower/pull/549))" + +#: ../../source/ref-changelog.md:1351 +msgid "Improved documentation" +msgstr "Amélioration de la documentation" + +#: ../../source/ref-changelog.md:1352 +msgid "New documentation theme ([#551](https://github.com/adap/flower/pull/551))" +msgstr "" +"Nouveau thème de documentation " +"([#551](https://github.com/adap/flower/pull/551))" + +#: ../../source/ref-changelog.md:1353 +msgid "New API reference ([#554](https://github.com/adap/flower/pull/554))" +msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" + +#: ../../source/ref-changelog.md:1354 +msgid "" +"Updated examples documentation " +"([#549](https://github.com/adap/flower/pull/549))" +msgstr "" +"Mise à jour de la documentation des exemples " +"([#549](https://github.com/adap/flower/pull/549))" + +#: ../../source/ref-changelog.md:1355 +msgid "" +"Removed obsolete documentation " +"([#548](https://github.com/adap/flower/pull/548))" +msgstr "" +"Suppression de la documentation obsolète " +"([#548](https://github.com/adap/flower/pull/548))" + +#: ../../source/ref-changelog.md:1357 +msgid "Bugfix:" +msgstr "Correction de bogues :" + +#: ../../source/ref-changelog.md:1359 +msgid "" +"`Server.fit` does not disconnect clients when finished, disconnecting the" +" clients is now handled in `flwr.server.start_server` " +"([#553](https://github.com/adap/flower/pull/553) " +"[#540](https://github.com/adap/flower/issues/540))." +msgstr "" +"`Server.fit` ne déconnecte pas les clients lorsqu'il est terminé, la " +"déconnexion des clients est maintenant gérée dans " +"`flwr.server.start_server` " +"([#553](https://github.com/adap/flower/pull/553) " +"[#540](https://github.com/adap/flower/issues/540))." + +#: ../../source/ref-changelog.md:1361 +msgid "v0.12.0 (2020-12-07)" +msgstr "v0.12.0 (2020-12-07)" + +#: ../../source/ref-changelog.md:1363 ../../source/ref-changelog.md:1379 +msgid "Important changes:" +msgstr "Changements importants :" + +#: ../../source/ref-changelog.md:1365 +msgid "" +"Added an example for embedded devices " +"([#507](https://github.com/adap/flower/pull/507))" +msgstr "" +"Ajout d'un exemple pour les périphériques embarqués " +"([#507](https://github.com/adap/flower/pull/507))" + +#: ../../source/ref-changelog.md:1366 +msgid "" +"Added a new NumPyClient (in addition to the existing KerasClient) " +"([#504](https://github.com/adap/flower/pull/504) " +"[#508](https://github.com/adap/flower/pull/508))" +msgstr "" +"Ajout d'un nouveau NumPyClient (en plus du KerasClient existant) " +"([#504](https://github.com/adap/flower/pull/504) " +"[#508](https://github.com/adap/flower/pull/508))" + +#: ../../source/ref-changelog.md:1367 +msgid "" +"Deprecated `flwr_example` package and started to migrate examples into " +"the top-level `examples` directory " +"([#494](https://github.com/adap/flower/pull/494) " +"[#512](https://github.com/adap/flower/pull/512))" +msgstr "" +"Déclassement du paquet `flwr_example` et migration des exemples dans le " +"répertoire de premier niveau `examples` " +"([#494](https://github.com/adap/flower/pull/494) " +"[#512](https://github.com/adap/flower/pull/512))" + +#: ../../source/ref-changelog.md:1369 +msgid "v0.11.0 (2020-11-30)" +msgstr "v0.11.0 (2020-11-30)" + +#: ../../source/ref-changelog.md:1371 +msgid "Incompatible changes:" +msgstr "Changements incompatibles :" + +#: ../../source/ref-changelog.md:1373 +msgid "" +"Renamed strategy methods " +"([#486](https://github.com/adap/flower/pull/486)) to unify the naming of " +"Flower's public APIs. Other public methods/functions (e.g., every method " +"in `Client`, but also `Strategy.evaluate`) do not use the `on_` prefix, " +"which is why we're removing it from the four methods in Strategy. To " +"migrate rename the following `Strategy` methods accordingly:" +msgstr "" +"Renommé les méthodes de stratégie " +"([#486](https://github.com/adap/flower/pull/486)) pour unifier le nommage" +" des API publiques de Flower. D'autres méthodes/fonctions publiques (par " +"exemple, toutes les méthodes de `Client`, mais aussi `Strategy.evaluate`)" +" n'utilisent pas le préfixe `on_`, c'est pourquoi nous le supprimons des " +"quatre méthodes de Stratégie. Pour migrer, renommez les méthodes de " +"`Strategy` suivantes en conséquence :" + +#: ../../source/ref-changelog.md:1374 +msgid "`on_configure_evaluate` => `configure_evaluate`" +msgstr "`on_configure_evaluate` => `configure_evaluate`" + +#: ../../source/ref-changelog.md:1375 +msgid "`on_aggregate_evaluate` => `aggregate_evaluate`" +msgstr "`on_aggregate_evaluate` => `aggregate_evaluate`" + +#: ../../source/ref-changelog.md:1376 +msgid "`on_configure_fit` => `configure_fit`" +msgstr "`on_configure_fit` => `configure_fit`" + +#: ../../source/ref-changelog.md:1377 +msgid "`on_aggregate_fit` => `aggregate_fit`" +msgstr "`on_aggregate_fit` => `aggregate_fit`" + +#: ../../source/ref-changelog.md:1381 +msgid "" +"Deprecated `DefaultStrategy` " +"([#479](https://github.com/adap/flower/pull/479)). To migrate use " +"`FedAvg` instead." +msgstr "" +"Déclassé `DefaultStrategy` " +"([#479](https://github.com/adap/flower/pull/479)). Pour migrer, utilisez " +"`FedAvg` à la place." + +#: ../../source/ref-changelog.md:1382 +msgid "" +"Simplified examples and baselines " +"([#484](https://github.com/adap/flower/pull/484))." +msgstr "" +"Exemples simplifiés et lignes de base " +"([#484](https://github.com/adap/flower/pull/484))." + +#: ../../source/ref-changelog.md:1383 +msgid "" +"Removed presently unused `on_conclude_round` from strategy interface " +"([#483](https://github.com/adap/flower/pull/483))." +msgstr "" +"Suppression de `on_conclude_round` actuellement inutilisé de l'interface " +"de stratégie ([#483](https://github.com/adap/flower/pull/483))." + +#: ../../source/ref-changelog.md:1384 +msgid "" +"Set minimal Python version to 3.6.1 instead of 3.6.9 " +"([#471](https://github.com/adap/flower/pull/471))." +msgstr "" +"Fixe la version minimale de Python à 3.6.1 au lieu de 3.6.9 " +"([#471](https://github.com/adap/flower/pull/471))." + +#: ../../source/ref-changelog.md:1385 +msgid "" +"Improved `Strategy` docstrings " +"([#470](https://github.com/adap/flower/pull/470))." +msgstr "" +"Amélioration des docstrings `Stratégie` " +"([#470](https://github.com/adap/flower/pull/470))." + +#: ../../source/ref-example-projects.rst:2 +#, fuzzy +msgid "Example projects" +msgstr "Exemples de PyTorch" + +#: ../../source/ref-example-projects.rst:4 +msgid "" +"Flower comes with a number of usage examples. The examples demonstrate " +"how Flower can be used to federate different kinds of existing machine " +"learning pipelines, usually leveraging popular machine learning " +"frameworks such as `PyTorch `_ or `TensorFlow " +"`_." +msgstr "" +"Flower est livré avec un certain nombre d'exemples d'utilisation, qui " +"montrent comment Flower peut être utilisé pour fédérer différents types " +"de pipelines d'apprentissage automatique existants, qui s'appuient " +"généralement sur des frameworks d'apprentissage automatique populaires " +"tels que `PyTorch `_ ou `TensorFlow " +"`_." + +#: ../../source/ref-example-projects.rst:9 +#, fuzzy +msgid "The following examples are available as standalone projects." +msgstr "Les exemples suivants sont disponibles sous forme de projets autonomes." + +#: ../../source/ref-example-projects.rst:12 +#, fuzzy +msgid "Quickstart TensorFlow/Keras" +msgstr "Démarrage rapide de TensorFlow" + +#: ../../source/ref-example-projects.rst:14 +msgid "" +"The TensorFlow/Keras quickstart example shows CIFAR-10 image " +"classification with MobileNetV2:" +msgstr "" +"L'exemple de démarrage rapide TensorFlow/Keras montre la classification " +"d'images CIFAR-10 avec MobileNetV2 :" + +#: ../../source/ref-example-projects.rst:17 +#, fuzzy +msgid "" +"`Quickstart TensorFlow (Code) " +"`_" +msgstr "" +"`Quickstart TensorFlow (Code) " +"`_" + +#: ../../source/ref-example-projects.rst:19 +#, fuzzy +msgid ":doc:`Quickstart TensorFlow (Tutorial) `" +msgstr "" +"`Quickstart TensorFlow (Tutorial) `_" + +#: ../../source/ref-example-projects.rst:20 +msgid "" +"`Quickstart TensorFlow (Blog Post) `_" +msgstr "" +"`Quickstart TensorFlow (Blog Post) `_" + +#: ../../source/ref-example-projects.rst:24 +#: ../../source/tutorial-quickstart-pytorch.rst:4 +msgid "Quickstart PyTorch" +msgstr "Démarrage rapide de PyTorch" + +#: ../../source/ref-example-projects.rst:26 +msgid "" +"The PyTorch quickstart example shows CIFAR-10 image classification with a" +" simple Convolutional Neural Network:" +msgstr "" +"L'exemple de démarrage rapide PyTorch montre la classification d'images " +"CIFAR-10 avec un simple réseau neuronal convolutif :" + +#: ../../source/ref-example-projects.rst:29 +#, fuzzy +msgid "" +"`Quickstart PyTorch (Code) " +"`_" +msgstr "" +"`Quickstart PyTorch (Code) " +"`_" + +#: ../../source/ref-example-projects.rst:31 +#, fuzzy +msgid ":doc:`Quickstart PyTorch (Tutorial) `" +msgstr "" +"`Quickstart PyTorch (Tutorial) `_" + +#: ../../source/ref-example-projects.rst:34 +msgid "PyTorch: From Centralized To Federated" +msgstr "PyTorch : De la centralisation à la fédération" + +#: ../../source/ref-example-projects.rst:36 +msgid "" +"This example shows how a regular PyTorch project can be federated using " +"Flower:" +msgstr "" +"Cet exemple montre comment un projet PyTorch ordinaire peut être fédéré à" +" l'aide de Flower :" + +#: ../../source/ref-example-projects.rst:38 +#, fuzzy +msgid "" +"`PyTorch: From Centralized To Federated (Code) " +"`_" +msgstr "" +"`PyTorch : De la centralisation à la fédération (Code) " +"`_" + +#: ../../source/ref-example-projects.rst:40 +#, fuzzy +msgid "" +":doc:`PyTorch: From Centralized To Federated (Tutorial) `" +msgstr "" +"`PyTorch : De la centralisation à la fédération (Tutoriel) " +"`_" + +#: ../../source/ref-example-projects.rst:44 +msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" +msgstr "Apprentissage fédéré sur Raspberry Pi et Nvidia Jetson" + +#: ../../source/ref-example-projects.rst:46 +msgid "" +"This example shows how Flower can be used to build a federated learning " +"system that run across Raspberry Pi and Nvidia Jetson:" +msgstr "" +"Cet exemple montre comment Flower peut être utilisé pour construire un " +"système d'apprentissage fédéré qui fonctionne sur Raspberry Pi et Nvidia " +"Jetson :" + +#: ../../source/ref-example-projects.rst:49 +#, fuzzy +msgid "" +"`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " +"`_" +msgstr "" +"`L'apprentissage fédéré sur Raspberry Pi et Nvidia Jetson (Code) " +"`_" + +#: ../../source/ref-example-projects.rst:51 +msgid "" +"`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " +"`_" +msgstr "" +"`L'apprentissage fédéré sur Raspberry Pi et Nvidia Jetson (Blog Post) " +"`_" + +#: ../../source/ref-faq.rst:4 +msgid "" +"This page collects answers to commonly asked questions about Federated " +"Learning with Flower." +msgstr "" +"Cette page rassemble les réponses aux questions les plus fréquemment " +"posées sur l'apprentissage fédéré avec Flower." + +#: ../../source/ref-faq.rst +#, fuzzy +msgid ":fa:`eye,mr-1` Can Flower run on Jupyter Notebooks / Google Colab?" +msgstr "" +":fa:`eye,mr-1` Flower peut-il fonctionner sur les ordinateurs portables " +"Juptyter / Google Colab ?" + +#: ../../source/ref-faq.rst:9 +msgid "" +"Yes, it can! Flower even comes with a few under-the-hood optimizations to" +" make it work even better on Colab. Here's a quickstart example:" +msgstr "" +"Oui, c'est possible ! Flower est même livré avec quelques optimisations " +"pour qu'il fonctionne encore mieux sur Colab. Voici un exemple de " +"démarrage rapide :" + +#: ../../source/ref-faq.rst:11 +#, fuzzy +msgid "" +"`Flower simulation PyTorch " +"`_" +msgstr "" +"`Flower Quickstart (TensorFlow/Keras) " +"`_" + +#: ../../source/ref-faq.rst:12 +#, fuzzy +msgid "" +"`Flower simulation TensorFlow/Keras " +"`_" +msgstr "" +"`Flower Quickstart (TensorFlow/Keras) " +"`_" + +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` How can I run Federated Learning on a Raspberry Pi?" +msgstr "" +":fa:`eye,mr-1` Comment puis-je faire fonctionner l'apprentissage fédéré " +"sur un Raspberry Pi ?" + +#: ../../source/ref-faq.rst:16 +#, fuzzy +msgid "" +"Find the `blog post about federated learning on embedded device here " +"`_" +" and the corresponding `GitHub code example " +"`_." +msgstr "" +"Trouve le `blog post about federated learning on embedded device ici " +"`_" +" et l'exemple de code GitHub correspondant " +"`_." + +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Does Flower support federated learning on Android devices?" +msgstr "" +":fa:`eye,mr-1` Est-ce que Flower prend en charge l'apprentissage fédéré " +"sur les appareils Android ?" + +#: ../../source/ref-faq.rst:20 +#, fuzzy +msgid "" +"Yes, it does. Please take a look at our `blog post " +"`_ or check out the code examples:" +msgstr "" +"Oui. Jetez un coup d'œil à notre `blog post " +"`_ ou consultez l'`exemple de code Android sur GitHub " +"`_." + +#: ../../source/ref-faq.rst:22 +msgid "" +"`Android Kotlin example `_" +msgstr "" + +#: ../../source/ref-faq.rst:23 +msgid "`Android Java example `_" +msgstr "" + +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Can I combine federated learning with blockchain?" +msgstr "" +":fa:`eye,mr-1` Puis-je combiner l'apprentissage fédéré avec la blockchain" +" ?" + +#: ../../source/ref-faq.rst:27 +msgid "" +"Yes, of course. A list of available examples using Flower within a " +"blockchain environment is available here:" +msgstr "" +"Oui, bien sûr, une liste d'exemples disponibles utilisant Flower dans un " +"environnement blockchain est disponible ici :" + +#: ../../source/ref-faq.rst:30 +msgid "`FLock: A Decentralised AI Training Platform `_." +msgstr "" + +#: ../../source/ref-faq.rst:30 +msgid "Contribute to on-chain training the model and earn rewards." +msgstr "" + +#: ../../source/ref-faq.rst:31 +#, fuzzy +msgid "Local blockchain with federated learning simulation." +msgstr "Mise à l'échelle de l'apprentissage fédéré" + +#: ../../source/ref-faq.rst:32 +msgid "" +"`Flower meets Nevermined GitHub Repository `_." +msgstr "" +"`Flower meets Nevermined GitHub Repository `_." + +#: ../../source/ref-faq.rst:33 +msgid "" +"`Flower meets Nevermined YouTube video " +"`_." +msgstr "" +"`Flower rencontre Nevermined vidéo YouTube " +"`_." + +#: ../../source/ref-faq.rst:34 +#, fuzzy +msgid "" +"`Flower meets KOSMoS `_." +msgstr "" +"`Flower rencontre KOSMoS `_." + +#: ../../source/ref-faq.rst:35 +msgid "" +"`Flower meets Talan blog post `_ ." +msgstr "" +"`Flower meets Talan blog post `_ ." + +#: ../../source/ref-faq.rst:36 +msgid "" +"`Flower meets Talan GitHub Repository " +"`_ ." +msgstr "" +"`Flower rencontre Talan Dépôt GitHub " +"`_ ." + +#: ../../source/ref-telemetry.md:1 +msgid "Telemetry" +msgstr "Télémétrie" + +#: ../../source/ref-telemetry.md:3 +msgid "" +"The Flower open-source project collects **anonymous** usage metrics to " +"make well-informed decisions to improve Flower. Doing this enables the " +"Flower team to understand how Flower is used and what challenges users " +"might face." +msgstr "" +"Le projet open-source Flower recueille des mesures d'utilisation " +"**anonymes** afin de prendre des décisions éclairées pour améliorer " +"Flower. Cela permet à l'équipe de Flower de comprendre comment Flower est" +" utilisé et quels sont les défis auxquels les utilisateurs peuvent être " +"confrontés." + +#: ../../source/ref-telemetry.md:5 +msgid "" +"**Flower is a friendly framework for collaborative AI and data science.**" +" Staying true to this statement, Flower makes it easy to disable " +"telemetry for users that do not want to share anonymous usage metrics." +msgstr "" +"**Flower est un cadre convivial pour l'IA collaborative et la science des" +" données.** En restant fidèle à cette déclaration, Flower permet de " +"désactiver facilement la télémétrie pour les utilisateurs qui ne " +"souhaitent pas partager des mesures d'utilisation anonymes." + +#: ../../source/ref-telemetry.md:7 +msgid "Principles" +msgstr "Principes" + +#: ../../source/ref-telemetry.md:9 +msgid "We follow strong principles guarding anonymous usage metrics collection:" +msgstr "" +"Nous suivons des principes stricts concernant la collecte de données " +"anonymes sur l'utilisation :" + +#: ../../source/ref-telemetry.md:11 +msgid "" +"**Optional:** You will always be able to disable telemetry; read on to " +"learn “[How to opt-out](#how-to-opt-out)”." +msgstr "" +"**Optionnel:** Tu pourras toujours désactiver la télémétrie ; lis la " +"suite pour apprendre \"[Comment se désengager](#how-to-opt-out)\"." + +#: ../../source/ref-telemetry.md:12 +msgid "" +"**Anonymous:** The reported usage metrics are anonymous and do not " +"contain any personally identifiable information (PII). See “[Collected " +"metrics](#collected-metrics)” to understand what metrics are being " +"reported." +msgstr "" +"**Anonyme:** Les mesures d'utilisation rapportées sont anonymes et ne " +"contiennent aucune information personnelle identifiable (PII). Voir " +"\"[Collected metrics](#collected-metrics)\" pour comprendre quelles " +"mesures sont rapportées." + +#: ../../source/ref-telemetry.md:13 +msgid "" +"**Transparent:** You can easily inspect what anonymous metrics are being " +"reported; see the section “[How to inspect what is being reported](#how-" +"to-inspect-what-is-being-reported)”" +msgstr "" +"**Transparent:** Tu peux facilement inspecter les métriques anonymes qui " +"sont rapportées ; voir la section \"[Comment inspecter ce qui est " +"rapporté](#how-to-inspect-what-is-being-reported)\"" + +#: ../../source/ref-telemetry.md:14 +#, fuzzy +msgid "" +"**Open for feedback:** You can always reach out to us if you have " +"feedback; see the section “[How to contact us](#how-to-contact-us)” for " +"details." +msgstr "" +"**Ouvert pour les commentaires:** Tu peux toujours nous contacter si tu " +"as des commentaires ; voir la section \"[Comment nous contacter ](#how-" +"to-contact-us)\" pour plus de détails." + +#: ../../source/ref-telemetry.md:16 +msgid "How to opt-out" +msgstr "Comment se désinscrire" + +#: ../../source/ref-telemetry.md:18 +msgid "" +"When Flower starts, it will check for an environment variable called " +"`FLWR_TELEMETRY_ENABLED`. Telemetry can easily be disabled by setting " +"`FLWR_TELEMETRY_ENABLED=0`. Assuming you are starting a Flower server or " +"client, simply do so by prepending your command as in:" +msgstr "" +"Lorsque Flower démarre, il vérifie la présence d'une variable " +"d'environnement appelée `FLWR_TELEMETRY_ENABLED`. La télémétrie peut " +"facilement être désactivée en réglant `FLWR_TELEMETRY_ENABLED=0`. En " +"supposant que tu démarres un serveur ou un client Flower, fais-le " +"simplement en faisant précéder ta commande de la façon suivante :" + +#: ../../source/ref-telemetry.md:24 +msgid "" +"Alternatively, you can export `FLWR_TELEMETRY_ENABLED=0` in, for example," +" `.bashrc` (or whatever configuration file applies to your environment) " +"to disable Flower telemetry permanently." +msgstr "" +"Tu peux aussi exporter `FLWR_TELEMETRY_ENABLED=0` dans, par exemple, " +"`.bashrc` (ou tout autre fichier de configuration qui s'applique à ton " +"environnement) pour désactiver la télémétrie de la fleur de façon " +"permanente." + +#: ../../source/ref-telemetry.md:26 +msgid "Collected metrics" +msgstr "Mesures collectées" + +#: ../../source/ref-telemetry.md:28 +msgid "Flower telemetry collects the following metrics:" +msgstr "La télémétrie des fleurs recueille les métriques suivantes :" + +#: ../../source/ref-telemetry.md:30 +msgid "" +"**Flower version.** Understand which versions of Flower are currently " +"being used. This helps us to decide whether we should invest effort into " +"releasing a patch version for an older version of Flower or instead use " +"the bandwidth to build new features." +msgstr "" +"**Cela nous aide à décider si nous devons investir des efforts dans la " +"publication d'une version corrective pour une version plus ancienne de " +"Flower ou si nous devons plutôt utiliser la bande passante pour " +"développer de nouvelles fonctionnalités." + +#: ../../source/ref-telemetry.md:32 +msgid "" +"**Operating system.** Enables us to answer questions such as: *Should we " +"create more guides for Linux, macOS, or Windows?*" +msgstr "" +"**Système d'exploitation.** Nous permet de répondre à des questions " +"telles que : *Faudrait-il créer plus de guides pour Linux, macOS ou " +"Windows ?" + +#: ../../source/ref-telemetry.md:34 +msgid "" +"**Python version.** Knowing the Python version helps us, for example, to " +"decide whether we should invest effort into supporting old versions of " +"Python or stop supporting them and start taking advantage of new Python " +"features." +msgstr "" +"**Version de Python.** Connaître la version de Python nous aide, par " +"exemple, à décider si nous devons investir des efforts dans la prise en " +"charge des anciennes versions de Python ou cesser de les prendre en " +"charge et commencer à tirer parti des nouvelles fonctionnalités de " +"Python." + +#: ../../source/ref-telemetry.md:36 +msgid "" +"**Hardware properties.** Understanding the hardware environment that " +"Flower is being used in helps to decide whether we should, for example, " +"put more effort into supporting low-resource environments." +msgstr "" +"**Comprendre l'environnement matériel dans lequel Flower est utilisé " +"permet de décider si nous devrions, par exemple, faire plus d'efforts " +"pour prendre en charge les environnements à faibles ressources." + +#: ../../source/ref-telemetry.md:38 +msgid "" +"**Execution mode.** Knowing what execution mode Flower starts in enables " +"us to understand how heavily certain features are being used and better " +"prioritize based on that." +msgstr "" +"**Mode d'exécution** Connaître le mode d'exécution dans lequel Flower " +"démarre nous permet de comprendre à quel point certaines fonctionnalités " +"sont utilisées et de mieux établir les priorités en fonction de cela." + +#: ../../source/ref-telemetry.md:40 +msgid "" +"**Cluster.** Flower telemetry assigns a random in-memory cluster ID each " +"time a Flower workload starts. This allows us to understand which device " +"types not only start Flower workloads but also successfully complete " +"them." +msgstr "" +"**Cluster.** La télémétrie Flower attribue un ID de cluster en mémoire " +"aléatoire à chaque fois qu'une charge de travail Flower démarre. Cela " +"nous permet de comprendre quels types d'appareils non seulement démarrent" +" les charges de travail Flower, mais aussi les terminent avec succès." + +#: ../../source/ref-telemetry.md:42 +msgid "" +"**Source.** Flower telemetry tries to store a random source ID in " +"`~/.flwr/source` the first time a telemetry event is generated. The " +"source ID is important to identify whether an issue is recurring or " +"whether an issue is triggered by multiple clusters running concurrently " +"(which often happens in simulation). For example, if a device runs " +"multiple workloads at the same time, and this results in an issue, then, " +"in order to reproduce the issue, multiple workloads must be started at " +"the same time." +msgstr "" +"**Source.** La télémétrie de Flower essaie de stocker un ID de source " +"aléatoire dans `~/.flwr/source` la première fois qu'un événement de " +"télémétrie est généré. L'ID de source est important pour identifier si un" +" problème est récurrent ou si un problème est déclenché par plusieurs " +"clusters fonctionnant simultanément (ce qui arrive souvent en " +"simulation). Par exemple, si un périphérique exécute plusieurs charges de" +" travail en même temps, et que cela entraîne un problème, alors, afin de " +"reproduire le problème, plusieurs charges de travail doivent être " +"démarrées en même temps." + +#: ../../source/ref-telemetry.md:44 +msgid "" +"You may delete the source ID at any time. If you wish for all events " +"logged under a specific source ID to be deleted, you can send a deletion " +"request mentioning the source ID to `telemetry@flower.ai`. All events " +"related to that source ID will then be permanently deleted." +msgstr "" +"Tu peux supprimer l'identifiant de la source à tout moment. Si tu " +"souhaites que tous les événements enregistrés sous un identifiant de " +"source spécifique soient supprimés, tu peux envoyer une demande de " +"suppression mentionnant l'identifiant de source à `telemetry@flower.ai`. " +"Tous les événements liés à cet identifiant de source seront alors " +"définitivement supprimés." + +#: ../../source/ref-telemetry.md:46 +msgid "" +"We will not collect any personally identifiable information. If you think" +" any of the metrics collected could be misused in any way, please [get in" +" touch with us](#how-to-contact-us). We will update this page to reflect " +"any changes to the metrics collected and publish changes in the " +"changelog." +msgstr "" +"Nous ne collecterons aucune information personnelle identifiable. Si tu " +"penses que l'une des métriques collectées pourrait être utilisée à " +"mauvais escient de quelque manière que ce soit, merci de [nous " +"contacter](#commentnouscontacter). Nous mettrons à jour cette page pour " +"refléter toute modification des métriques collectées et nous publierons " +"les changements dans le journal des modifications (changelog)." + +#: ../../source/ref-telemetry.md:48 +msgid "" +"If you think other metrics would be helpful for us to better guide our " +"decisions, please let us know! We will carefully review them; if we are " +"confident that they do not compromise user privacy, we may add them." +msgstr "" +"Si tu penses que d'autres mesures nous seraient utiles pour mieux " +"orienter nos décisions, fais-le nous savoir ! Nous les examinerons " +"attentivement ; si nous sommes convaincus qu'elles ne compromettent pas " +"la vie privée des utilisateurs, nous pourrons les ajouter." + +#: ../../source/ref-telemetry.md:50 +msgid "How to inspect what is being reported" +msgstr "Comment inspecter ce qui est rapporté" + +#: ../../source/ref-telemetry.md:52 +msgid "" +"We wanted to make it very easy for you to inspect what anonymous usage " +"metrics are reported. You can view all the reported telemetry information" +" by setting the environment variable `FLWR_TELEMETRY_LOGGING=1`. Logging " +"is disabled by default. You may use logging independently from " +"`FLWR_TELEMETRY_ENABLED` so that you can inspect the telemetry feature " +"without sending any metrics." +msgstr "" +"Nous avons voulu qu'il soit très facile pour toi d'inspecter les mesures " +"d'utilisation anonymes qui sont rapportées. Tu peux voir toutes les " +"informations de télémétrie rapportées en définissant la variable " +"d'environnement `FLWR_TELEMETRY_LOGGING=1`. La journalisation est " +"désactivée par défaut. Tu peux utiliser la journalisation indépendamment " +"de `FLWR_TELEMETRY_ENABLED` afin d'inspecter la fonction de télémétrie " +"sans envoyer de mesures." + +#: ../../source/ref-telemetry.md:58 +msgid "" +"The inspect Flower telemetry without sending any anonymous usage metrics," +" use both environment variables:" +msgstr "" +"L'inspecteur Flower telemetry sans envoyer de métriques d'utilisation " +"anonymes, utilise les deux variables d'environnement :" + +#: ../../source/ref-telemetry.md:64 +msgid "How to contact us" +msgstr "Comment nous contacter" + +#: ../../source/ref-telemetry.md:66 +msgid "" +"We want to hear from you. If you have any feedback or ideas on how to " +"improve the way we handle anonymous usage metrics, reach out to us via " +"[Slack](https://flower.ai/join-slack/) (channel `#telemetry`) or email " +"(`telemetry@flower.ai`)." +msgstr "" +"Si tu as des commentaires ou des idées pour améliorer la façon dont nous " +"traitons les mesures d'utilisation anonymes, contacte-nous via " +"[Slack](https://flower.ai/join-slack/) (canal `#telemetry`) ou par " +"courriel (`telemetry@flower.ai`)." + +#: ../../source/tutorial-quickstart-android.rst:-1 +msgid "" +"Read this Federated Learning quickstart tutorial for creating an Android " +"app using Flower." +msgstr "" + +#: ../../source/tutorial-quickstart-android.rst:4 +#, fuzzy +msgid "Quickstart Android" +msgstr "Démarrage rapide des Pandas" + +#: ../../source/tutorial-quickstart-android.rst:9 +#, fuzzy +msgid "" +"Let's build a federated learning system using TFLite and Flower on " +"Android!" +msgstr "" +"Construisons un système d'apprentissage fédéré en utilisant fastai et " +"Flower !" + +#: ../../source/tutorial-quickstart-android.rst:11 +#, fuzzy +msgid "" +"Please refer to the `full code example " +"`_ to learn " +"more." +msgstr "" +"Réfère-toi à l'exemple de code complet " +"`_ " +"pour en savoir plus." + +#: ../../source/tutorial-quickstart-fastai.rst:4 +msgid "Quickstart fastai" +msgstr "Démarrage rapide fastai" + +#: ../../source/tutorial-quickstart-fastai.rst:6 +#, fuzzy +msgid "" +"In this federated learning tutorial we will learn how to train a " +"SqueezeNet model on MNIST using Flower and fastai. It is recommended to " +"create a virtual environment and run everything within a :doc:`virtualenv" +" `." +msgstr "" +"Tout d'abord, il est recommandé de créer un environnement virtuel et de " +"tout exécuter au sein d'un `virtualenv `_." + +#: ../../source/tutorial-quickstart-fastai.rst:10 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:11 +msgid "Then, clone the code example directly from GitHub:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:18 +msgid "" +"This will create a new directory called `quickstart-fastai` containing " +"the following files:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:31 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:32 +#, fuzzy +msgid "Next, activate your environment, then run:" +msgstr "et active l'environnement virtuel avec :" + +#: ../../source/tutorial-quickstart-fastai.rst:41 +msgid "" +"This example by default runs the Flower Simulation Engine, creating a " +"federation of 10 nodes using `FedAvg `_ " +"as the aggregation strategy. The dataset will be partitioned using Flower" +" Dataset's `IidPartitioner `_." +" Let's run the project:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:54 +#: ../../source/tutorial-quickstart-huggingface.rst:61 +#: ../../source/tutorial-quickstart-mlx.rst:60 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:55 +#: ../../source/tutorial-quickstart-pytorch.rst:62 +#: ../../source/tutorial-quickstart-tensorflow.rst:62 +msgid "With default arguments you will see an output like this one:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:98 +#: ../../source/tutorial-quickstart-huggingface.rst:112 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:105 +#: ../../source/tutorial-quickstart-pytorch.rst:103 +#: ../../source/tutorial-quickstart-tensorflow.rst:103 +msgid "" +"You can also override the parameters defined in the " +"``[tool.flwr.app.config]`` section in ``pyproject.toml`` like this:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:108 +#, fuzzy +msgid "" +"Check the `source code `_ of this tutorial in ``examples/quickstart-fasai`` " +"in the Flower GitHub repository." +msgstr "" +"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " +"premier système d'apprentissage fédéré. Le code source complet " +"`_ de cet exemple se trouve dans :code:`examples" +"/quickstart-mxnet`." + +#: ../../source/tutorial-quickstart-huggingface.rst:-1 +msgid "" +"Check out this Federating Learning quickstart tutorial for using Flower " +"with 🤗 HuggingFace Transformers in order to fine-tune an LLM." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:4 +msgid "Quickstart 🤗 Transformers" +msgstr "Démarrage rapide 🤗 Transformateurs" + +#: ../../source/tutorial-quickstart-huggingface.rst:6 +#, fuzzy +msgid "" +"In this federated learning tutorial we will learn how to train a large " +"language model (LLM) on the `IMDB " +"`_ dataset using Flower" +" and the 🤗 Hugging Face Transformers library. It is recommended to create" +" a virtual environment and run everything within a :doc:`virtualenv " +"`." +msgstr "" +"Tout d'abord, il est recommandé de créer un environnement virtuel et de " +"tout exécuter au sein d'un `virtualenv `_." + +#: ../../source/tutorial-quickstart-huggingface.rst:12 +msgid "" +"Let's use ``flwr new`` to create a complete Flower+🤗 Hugging Face " +"project. It will generate all the files needed to run, by default with " +"the Flower Simulation Engine, a federation of 10 nodes using |fedavg|_ " +"The dataset will be partitioned using |flowerdatasets|_'s " +"|iidpartitioner|_." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:17 +#: ../../source/tutorial-quickstart-mlx.rst:17 +#: ../../source/tutorial-quickstart-pytorch.rst:18 +#: ../../source/tutorial-quickstart-tensorflow.rst:18 +#, fuzzy +msgid "" +"Now that we have a rough idea of what this example is about, let's get " +"started. First, install Flower in your new environment:" +msgstr "" +"Maintenant que nous avons une idée approximative de ce qui se passe, " +"commençons. Nous devons d'abord installer Flower. Tu peux le faire en " +"lançant :" + +#: ../../source/tutorial-quickstart-huggingface.rst:25 +msgid "" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``HuggingFace``), give a name to your " +"project, and type in your developer name:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:33 +#: ../../source/tutorial-quickstart-mlx.rst:32 +#: ../../source/tutorial-quickstart-pytorch.rst:34 +#: ../../source/tutorial-quickstart-tensorflow.rst:34 +msgid "" +"After running it you'll notice a new directory with your project name has" +" been created. It should have the following structure:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:47 +#: ../../source/tutorial-quickstart-mlx.rst:46 +#: ../../source/tutorial-quickstart-pytorch.rst:48 +#: ../../source/tutorial-quickstart-tensorflow.rst:48 +msgid "" +"If you haven't yet installed the project and its dependencies, you can do" +" so by:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:54 +#: ../../source/tutorial-quickstart-pytorch.rst:55 +#: ../../source/tutorial-quickstart-tensorflow.rst:55 +msgid "To run the project, do:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:102 +msgid "You can also run the project with GPU as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:109 +msgid "" +"This will use the default arguments where each ``ClientApp`` will use 2 " +"CPUs and at most 4 ``ClientApp``\\s will run in a given GPU." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:120 +#: ../../source/tutorial-quickstart-mlx.rst:110 +#: ../../source/tutorial-quickstart-pytorch.rst:111 +msgid "" +"What follows is an explanation of each component in the project you just " +"created: dataset partition, the model, defining the ``ClientApp`` and " +"defining the ``ServerApp``." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:124 +#: ../../source/tutorial-quickstart-mlx.rst:114 +#: ../../source/tutorial-quickstart-pytorch.rst:115 +#: ../../source/tutorial-quickstart-tensorflow.rst:112 +#, fuzzy +msgid "The Data" +msgstr "Chargement des données" + +#: ../../source/tutorial-quickstart-huggingface.rst:126 +msgid "" +"This tutorial uses |flowerdatasets|_ to easily download and partition the" +" `IMDB `_ dataset. In " +"this example you'll make use of the |iidpartitioner|_ to generate " +"``num_partitions`` partitions. You can choose |otherpartitioners|_ " +"available in Flower Datasets. To tokenize the text, we will also load the" +" tokenizer from the pre-trained Transformer model that we'll use during " +"training - more on that in the next section. Each ``ClientApp`` will call" +" this function to create dataloaders with the data that correspond to " +"their data partition." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:171 +#: ../../source/tutorial-quickstart-mlx.rst:155 +#: ../../source/tutorial-quickstart-pytorch.rst:150 +#: ../../source/tutorial-quickstart-tensorflow.rst:139 +#, fuzzy +msgid "The Model" +msgstr "Entraîne le modèle" + +#: ../../source/tutorial-quickstart-huggingface.rst:173 +#, fuzzy +msgid "" +"We will leverage 🤗 Hugging Face to federate the training of language " +"models over multiple clients using Flower. More specifically, we will " +"fine-tune a pre-trained Transformer model (|berttiny|_) for sequence " +"classification over the dataset of IMDB ratings. The end goal is to " +"detect if a movie rating is positive or negative. If you have access to " +"larger GPUs, feel free to use larger models!" +msgstr "" +"Nous nous appuierons sur Hugging Face pour fédérer l'entraînement de " +"modèles de langage sur plusieurs clients à l'aide de Flower. Plus " +"précisément, nous mettrons au point un modèle Transformer pré-entraîné " +"(distilBERT) pour la classification de séquences sur un ensemble de " +"données d'évaluations IMDB. L'objectif final est de détecter si " +"l'évaluation d'un film est positive ou négative." + +#: ../../source/tutorial-quickstart-huggingface.rst:185 +msgid "" +"Note that here, ``model_name`` is a string that will be loaded from the " +"``Context`` in the ClientApp and ServerApp." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:188 +msgid "" +"In addition to loading the pretrained model weights and architecture, we " +"also include two utility functions to perform both training (i.e. " +"``train()``) and evaluation (i.e. ``test()``) using the above model. " +"These functions should look fairly familiar if you have some prior " +"experience with PyTorch. Note these functions do not have anything " +"specific to Flower. That being said, the training function will normally " +"be called, as we'll see later, from a Flower client passing its own data." +" In summary, your clients can use standard training/testing functions to " +"perform local training or evaluation:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:228 +#: ../../source/tutorial-quickstart-mlx.rst:199 +#: ../../source/tutorial-quickstart-pytorch.rst:224 +#: ../../source/tutorial-quickstart-tensorflow.rst:168 +#, fuzzy +msgid "The ClientApp" +msgstr "client" + +#: ../../source/tutorial-quickstart-huggingface.rst:230 +msgid "" +"The main changes we have to make to use 🤗 Hugging Face with Flower will " +"be found in the ``get_weights()`` and ``set_weights()`` functions. Under " +"the hood, the ``transformers`` library uses PyTorch, which means we can " +"reuse the ``get_weights()`` and ``set_weights()`` code that we defined in" +" the :doc:`Quickstart PyTorch ` tutorial. As" +" a reminder, in ``get_weights()``, PyTorch model parameters are extracted" +" and represented as a list of NumPy arrays. The ``set_weights()`` " +"function that's the opposite: given a list of NumPy arrays it applies " +"them to an existing PyTorch model. Doing this in fairly easy in PyTorch." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:241 +#: ../../source/tutorial-quickstart-pytorch.rst:234 +msgid "" +"The specific implementation of ``get_weights()`` and ``set_weights()`` " +"depends on the type of models you use. The ones shown below work for a " +"wide range of PyTorch models but you might need to adjust them if you " +"have more exotic model architectures." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:257 +#: ../../source/tutorial-quickstart-pytorch.rst:250 +msgid "" +"The rest of the functionality is directly inspired by the centralized " +"case. The ``fit()`` method in the client trains the model using the local" +" dataset. Similarly, the ``evaluate()`` method is used to evaluate the " +"model received on a held-out validation set that the client might have:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:283 +msgid "" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparemeters defined in your " +"``pyproject.toml`` to configure the run. In this tutorial we access the " +"``local-epochs`` setting to control the number of epochs a ``ClientApp`` " +"will perform when running the ``fit()`` method. You could define " +"additional hyperparameters in ``pyproject.toml`` and access them here." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:316 +#: ../../source/tutorial-quickstart-mlx.rst:361 +#: ../../source/tutorial-quickstart-pytorch.rst:307 +#: ../../source/tutorial-quickstart-tensorflow.rst:232 +#, fuzzy +msgid "The ServerApp" +msgstr "serveur" + +#: ../../source/tutorial-quickstart-huggingface.rst:318 +msgid "" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"|serverappcomponents|_ as opposed to a |client|_ In this example we use " +"the `FedAvg` strategy. To it we pass a randomly initialized model that " +"will server as the global model to federated. Note that the value of " +"``fraction_fit`` is read from the run config. You can find the default " +"value defined in the ``pyproject.toml``." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:356 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system for an LLM." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:361 +msgid "" +"Check the source code of the extended version of this tutorial in " +"|quickstart_hf_link|_ in the Flower GitHub repository. For a " +"comprehensive example of a federated fine-tuning of an LLM with Flower, " +"refer to the |flowertune|_ example in the Flower GitHub repository." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:-1 +msgid "" +"Read this Federated Learning quickstart tutorial for creating an iOS app " +"using Flower to train a neural network on MNIST." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:4 +#, fuzzy +msgid "Quickstart iOS" +msgstr "Démarrage rapide XGBoost" + +#: ../../source/tutorial-quickstart-ios.rst:9 +#, fuzzy +msgid "" +"In this tutorial we will learn how to train a Neural Network on MNIST " +"using Flower and CoreML on iOS devices." +msgstr "" +"Dans ce tutoriel, nous allons apprendre, comment former un réseau " +"neuronal convolutif sur MNIST en utilisant Flower et PyTorch." + +#: ../../source/tutorial-quickstart-ios.rst:12 +#, fuzzy +msgid "" +"First of all, for running the Flower Python server, it is recommended to " +"create a virtual environment and run everything within a :doc:`virtualenv" +" `. For the Flower client " +"implementation in iOS, it is recommended to use Xcode as our IDE." +msgstr "" +"Tout d'abord, il est recommandé de créer un environnement virtuel et de " +"tout exécuter au sein d'un `virtualenv `_." + +#: ../../source/tutorial-quickstart-ios.rst:17 +#, fuzzy +msgid "" +"Our example consists of one Python *server* and two iPhone *clients* that" +" all have the same model." +msgstr "" +"Notre exemple consiste en un *serveur* et deux *clients* ayant tous le " +"même modèle." + +#: ../../source/tutorial-quickstart-ios.rst:20 +#, fuzzy +msgid "" +"*Clients* are responsible for generating individual weight updates for " +"the model based on their local datasets. These updates are then sent to " +"the *server* which will aggregate them to produce a better model. " +"Finally, the *server* sends this improved version of the model back to " +"each *client*. A complete cycle of weight updates is called a *round*." +msgstr "" +"*Les clients* sont chargés de générer des mises à jour de poids " +"individuelles pour le modèle en fonction de leurs ensembles de données " +"locales. Ces mises à jour sont ensuite envoyées au *serveur* qui les " +"agrège pour produire un meilleur modèle. Enfin, le *serveur* renvoie " +"cette version améliorée du modèle à chaque *client*. Un cycle complet de " +"mises à jour de poids s'appelle un *round*." + +#: ../../source/tutorial-quickstart-ios.rst:26 +#, fuzzy +msgid "" +"Now that we have a rough idea of what is going on, let's get started to " +"setup our Flower server environment. We first need to install Flower. You" +" can do this by using pip:" +msgstr "" +"Maintenant que nous avons une idée générale de ce qui se passe, " +"commençons. Nous devons d'abord installer Flower. Tu peux le faire en " +"exécutant :" + +#: ../../source/tutorial-quickstart-ios.rst:33 +msgid "Or Poetry:" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:40 +#: ../../source/tutorial-quickstart-scikitlearn.rst:43 +#: ../../source/tutorial-quickstart-xgboost.rst:65 +msgid "Flower Client" +msgstr "Client de la fleur" + +#: ../../source/tutorial-quickstart-ios.rst:42 +msgid "" +"Now that we have all our dependencies installed, let's run a simple " +"distributed training using CoreML as our local training pipeline and " +"MNIST as our dataset. For simplicity reasons we will use the complete " +"Flower client with CoreML, that has been implemented and stored inside " +"the Swift SDK. The client implementation can be seen below:" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:80 +msgid "" +"Let's create a new application project in Xcode and add ``flwr`` as a " +"dependency in your project. For our application, we will store the logic " +"of our app in ``FLiOSModel.swift`` and the UI elements in " +"``ContentView.swift``. We will focus more on ``FLiOSModel.swift`` in this" +" quickstart. Please refer to the `full code example " +"`_ to learn more " +"about the app." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:86 +msgid "Import Flower and CoreML related packages in ``FLiOSModel.swift``:" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:94 +msgid "" +"Then add the mlmodel to the project simply by drag-and-drop, the mlmodel " +"will be bundled inside the application during deployment to your iOS " +"device. We need to pass the url to access mlmodel and run CoreML machine " +"learning processes, it can be retrieved by calling the function " +"``Bundle.main.url``. For the MNIST dataset, we need to preprocess it into" +" ``MLBatchProvider`` object. The preprocessing is done inside " +"``DataLoader.swift``." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:112 +msgid "" +"Since CoreML does not allow the model parameters to be seen before " +"training, and accessing the model parameters during or after the training" +" can only be done by specifying the layer name, we need to know this " +"information beforehand, through looking at the model specification, which" +" are written as proto files. The implementation can be seen in " +"``MLModelInspect``." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:118 +msgid "" +"After we have all of the necessary information, let's create our Flower " +"client." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:133 +msgid "" +"Then start the Flower gRPC client and start communicating to the server " +"by passing our Flower client to the function ``startFlwrGRPC``." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:141 +msgid "" +"That's it for the client. We only have to implement ``Client`` or call " +"the provided ``MLFlwrClient`` and call ``startFlwrGRPC()``. The attribute" +" ``hostname`` and ``port`` tells the client which server to connect to. " +"This can be done by entering the hostname and port in the application " +"before clicking the start button to start the federated learning process." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:148 +#: ../../source/tutorial-quickstart-scikitlearn.rst:179 +#: ../../source/tutorial-quickstart-xgboost.rst:358 +msgid "Flower Server" +msgstr "Serveur de Flower" + +#: ../../source/tutorial-quickstart-ios.rst:150 +#, fuzzy +msgid "" +"For simple workloads we can start a Flower server and leave all the " +"configuration possibilities at their default values. In a file named " +"``server.py``, import Flower and start the server:" +msgstr "" +"Pour les charges de travail simples, nous pouvons démarrer un serveur " +"Flower et laisser toutes les possibilités de configuration à leurs " +"valeurs par défaut. Dans un fichier nommé :code:`server.py`, importe " +"Flower et démarre le serveur :" + +#: ../../source/tutorial-quickstart-ios.rst:161 +#: ../../source/tutorial-quickstart-scikitlearn.rst:254 +msgid "Train the model, federated!" +msgstr "Entraîne le modèle, fédéré !" + +#: ../../source/tutorial-quickstart-ios.rst:163 +#: ../../source/tutorial-quickstart-xgboost.rst:590 +msgid "" +"With both client and server ready, we can now run everything and see " +"federated learning in action. FL systems usually have a server and " +"multiple clients. We therefore have to start the server first:" +msgstr "" +"Le client et le serveur étant prêts, nous pouvons maintenant tout " +"exécuter et voir l'apprentissage fédéré en action. Les systèmes FL ont " +"généralement un serveur et plusieurs clients. Nous devons donc commencer " +"par démarrer le serveur :" + +#: ../../source/tutorial-quickstart-ios.rst:171 +msgid "" +"Once the server is running we can start the clients in different " +"terminals. Build and run the client through your Xcode, one through Xcode" +" Simulator and the other by deploying it to your iPhone. To see more " +"about how to deploy your app to iPhone or Simulator visit `here " +"`_." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:177 +#, fuzzy +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system in your ios device. The full `source code " +"`_ for this " +"example can be found in ``examples/ios``." +msgstr "" +"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " +"premier système d'apprentissage fédéré. Le code source complet " +"`_ de cet exemple se trouve dans :code:`examples" +"/quickstart-mxnet`." + +#: ../../source/tutorial-quickstart-jax.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with Jax to train a linear regression model on a scikit-learn dataset." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:4 +msgid "Quickstart JAX" +msgstr "Démarrage rapide de JAX" + +#: ../../source/tutorial-quickstart-jax.rst:9 +#, fuzzy +msgid "" +"This tutorial will show you how to use Flower to build a federated " +"version of an existing JAX workload. We are using JAX to train a linear " +"regression model on a scikit-learn dataset. We will structure the example" +" similar to our `PyTorch - From Centralized To Federated " +"`_ walkthrough. First, we build a centralized " +"training approach based on the `Linear Regression with JAX " +"`_" +" tutorial`. Then, we build upon the centralized training code to run the " +"training in a federated fashion." +msgstr "" +"Ce tutoriel te montrera comment utiliser Flower pour construire une " +"version fédérée d'une charge de travail JAX existante. Nous utilisons JAX" +" pour entraîner un modèle de régression linéaire sur un ensemble de " +"données scikit-learn. Nous structurerons l'exemple de la même manière que" +" notre présentation `PyTorch - De la centralisation à la fédération " +"`_. Tout d'abord, nous construisons une approche" +" d'entraînement centralisée basée sur le tutoriel `Régression linéaire " +"avec JAX " +"`_." +" Ensuite, nous nous appuyons sur le code d'entraînement centralisé pour " +"exécuter l'entraînement de manière fédérée." + +#: ../../source/tutorial-quickstart-jax.rst:20 +#, fuzzy +msgid "" +"Before we start building our JAX example, we need install the packages " +"``jax``, ``jaxlib``, ``scikit-learn``, and ``flwr``:" +msgstr "" +"Avant de commencer à construire notre exemple JAX, nous devons installer " +"les paquets :code:`jax`, :code:`jaxlib`, :code:`scikit-learn`, et " +":code:`flwr` :" + +#: ../../source/tutorial-quickstart-jax.rst:28 +msgid "Linear Regression with JAX" +msgstr "Régression linéaire avec JAX" + +#: ../../source/tutorial-quickstart-jax.rst:30 +#, fuzzy +msgid "" +"We begin with a brief description of the centralized training code based " +"on a ``Linear Regression`` model. If you want a more in-depth explanation" +" of what's going on then have a look at the official `JAX documentation " +"`_." +msgstr "" +"Nous commençons par une brève description du code d'entraînement " +"centralisé basé sur un modèle :code:`Régression linéaire`. Si tu veux une" +" explication plus approfondie de ce qui se passe, jette un coup d'œil à " +"la documentation officielle `JAX `_." + +#: ../../source/tutorial-quickstart-jax.rst:34 +#, fuzzy +msgid "" +"Let's create a new file called ``jax_training.py`` with all the " +"components required for a traditional (centralized) linear regression " +"training. First, the JAX packages ``jax`` and ``jaxlib`` need to be " +"imported. In addition, we need to import ``sklearn`` since we use " +"``make_regression`` for the dataset and ``train_test_split`` to split the" +" dataset into a training and test set. You can see that we do not yet " +"import the ``flwr`` package for federated learning. This will be done " +"later." +msgstr "" +"Créons un nouveau fichier appelé :code:`jax_training.py` avec tous les " +"composants nécessaires pour un apprentissage traditionnel (centralisé) de" +" la régression linéaire. Tout d'abord, les paquets JAX :code:`jax` et " +":code:`jaxlib` doivent être importés. En outre, nous devons importer " +":code:`sklearn` puisque nous utilisons :code:`make_regression` pour le " +"jeu de données et :code:`train_test_split` pour diviser le jeu de données" +" en un jeu d'entraînement et un jeu de test. Tu peux voir que nous " +"n'avons pas encore importé le paquet :code:`flwr` pour l'apprentissage " +"fédéré, ce qui sera fait plus tard." + +#: ../../source/tutorial-quickstart-jax.rst:51 +#, fuzzy +msgid "The ``load_data()`` function loads the mentioned training and test sets." +msgstr "" +"La fonction :code:`load_data()` charge les ensembles d'entraînement et de" +" test mentionnés." + +#: ../../source/tutorial-quickstart-jax.rst:63 +#, fuzzy +msgid "" +"The model architecture (a very simple ``Linear Regression`` model) is " +"defined in ``load_model()``." +msgstr "" +"L'architecture du modèle (un modèle :code:`Régression linéaire` très " +"simple) est définie dans :code:`load_model()`." + +#: ../../source/tutorial-quickstart-jax.rst:73 +#, fuzzy +msgid "" +"We now need to define the training (function ``train()``), which loops " +"over the training set and measures the loss (function ``loss_fn()``) for " +"each batch of training examples. The loss function is separate since JAX " +"takes derivatives with a ``grad()`` function (defined in the ``main()`` " +"function and called in ``train()``)." +msgstr "" +"Nous devons maintenant définir l'entraînement (fonction :code:`train()`)," +" qui boucle sur l'ensemble d'entraînement et mesure la perte (fonction " +":code:`loss_fn()`) pour chaque lot d'exemples d'entraînement. La fonction" +" de perte est séparée puisque JAX prend des dérivés avec une fonction " +":code:`grad()` (définie dans la fonction :code:`main()` et appelée dans " +":code:`train()`)." + +#: ../../source/tutorial-quickstart-jax.rst:95 +#, fuzzy +msgid "" +"The evaluation of the model is defined in the function ``evaluation()``. " +"The function takes all test examples and measures the loss of the linear " +"regression model." +msgstr "" +"L'évaluation du modèle est définie dans la fonction :code:`evaluation()`." +" La fonction prend tous les exemples de test et mesure la perte du modèle" +" de régression linéaire." + +#: ../../source/tutorial-quickstart-jax.rst:107 +#, fuzzy +msgid "" +"Having defined the data loading, model architecture, training, and " +"evaluation we can put everything together and train our model using JAX. " +"As already mentioned, the ``jax.grad()`` function is defined in " +"``main()`` and passed to ``train()``." +msgstr "" +"Après avoir défini le chargement des données, l'architecture du modèle, " +"l'entraînement et l'évaluation, nous pouvons tout assembler et entraîner " +"notre modèle à l'aide de JAX. Comme nous l'avons déjà mentionné, la " +"fonction :code:`jax.grad()` est définie dans :code:`main()` et transmise " +"à :code:`train()`." + +#: ../../source/tutorial-quickstart-jax.rst:126 +msgid "You can now run your (centralized) JAX linear regression workload:" +msgstr "" +"Tu peux maintenant exécuter ta charge de travail (centralisée) de " +"régression linéaire JAX :" + +#: ../../source/tutorial-quickstart-jax.rst:132 +msgid "" +"So far this should all look fairly familiar if you've used JAX before. " +"Let's take the next step and use what we've built to create a simple " +"federated learning system consisting of one server and two clients." +msgstr "" +"Jusqu'à présent, tout cela devrait te sembler assez familier si tu as " +"déjà utilisé JAX. Passons à l'étape suivante et utilisons ce que nous " +"avons construit pour créer un simple système d'apprentissage fédéré " +"composé d'un serveur et de deux clients." + +#: ../../source/tutorial-quickstart-jax.rst:137 +msgid "JAX meets Flower" +msgstr "JAX rencontre Flower" + +#: ../../source/tutorial-quickstart-jax.rst:139 +#, fuzzy +msgid "" +"The concept of federating an existing workload is always the same and " +"easy to understand. We have to start a *server* and then use the code in " +"``jax_training.py`` for the *clients* that are connected to the *server*." +" The *server* sends model parameters to the clients. The *clients* run " +"the training and update the parameters. The updated parameters are sent " +"back to the *server*, which averages all received parameter updates. This" +" describes one round of the federated learning process, and we repeat " +"this for multiple rounds." +msgstr "" +"Le concept de fédération d'une charge de travail existante est toujours " +"le même et facile à comprendre. Nous devons démarrer un *serveur*, puis " +"utiliser le code dans :code:`jax_training.py` pour les *clients* qui sont" +" connectés au *serveur*.Le *serveur* envoie les paramètres du modèle aux " +"clients.Les *clients* exécutent la formation et mettent à jour les " +"paramètres.Les paramètres mis à jour sont renvoyés au *serveur*, qui fait" +" la moyenne de toutes les mises à jour de paramètres reçues.Ceci décrit " +"un tour du processus d'apprentissage fédéré, et nous répétons cette " +"opération pour plusieurs tours." + +#: ../../source/tutorial-quickstart-jax.rst:167 +#, fuzzy +msgid "" +"Finally, we will define our *client* logic in ``client.py`` and build " +"upon the previously defined JAX training in ``jax_training.py``. Our " +"*client* needs to import ``flwr``, but also ``jax`` and ``jaxlib`` to " +"update the parameters on our JAX model:" +msgstr "" +"Enfin, nous allons définir la logique de notre *client* dans " +":code:`client.py` et nous appuyer sur la formation JAX définie " +"précédemment dans :code:`jax_training.py`. Notre *client* doit importer " +":code:`flwr`, mais aussi :code:`jax` et :code:`jaxlib` pour mettre à jour" +" les paramètres de notre modèle JAX :" + +#: ../../source/tutorial-quickstart-jax.rst:182 +#, fuzzy +msgid "" +"Implementing a Flower *client* basically means implementing a subclass of" +" either ``flwr.client.Client`` or ``flwr.client.NumPyClient``. Our " +"implementation will be based on ``flwr.client.NumPyClient`` and we'll " +"call it ``FlowerClient``. ``NumPyClient`` is slightly easier to implement" +" than ``Client`` if you use a framework with good NumPy interoperability " +"(like JAX) because it avoids some of the boilerplate that would otherwise" +" be necessary. ``FlowerClient`` needs to implement four methods, two " +"methods for getting/setting model parameters, one method for training the" +" model, and one method for testing the model:" +msgstr "" +"L'implémentation d'un *client* Flower signifie essentiellement " +"l'implémentation d'une sous-classe de :code:`flwr.client.Client` ou " +":code:`flwr.client.NumPyClient`. Notre implémentation sera basée sur " +":code:`flwr.client.NumPyClient` et nous l'appellerons " +":code:`FlowerClient`. :code:`NumPyClient` est légèrement plus facile à " +"implémenter que :code:`Client` si vous utilisez un framework avec une " +"bonne interopérabilité NumPy (comme JAX) parce qu'il évite une partie du " +"boilerplate qui serait autrement nécessaire. :code:`FlowerClient` doit " +"implémenter quatre méthodes, deux méthodes pour obtenir/régler les " +"paramètres du modèle, une méthode pour former le modèle, et une méthode " +"pour tester le modèle :" + +#: ../../source/tutorial-quickstart-jax.rst:194 +#, fuzzy +msgid "``set_parameters (optional)``" +msgstr ":code:`set_parameters (optional)`" + +#: ../../source/tutorial-quickstart-jax.rst:193 +#, fuzzy +msgid "transform parameters to NumPy ``ndarray``'s" +msgstr "transforme les paramètres en NumPy :code:`ndarray`'s" + +#: ../../source/tutorial-quickstart-jax.rst:203 +msgid "get the updated local model parameters and return them to the server" +msgstr "" +"récupère les paramètres du modèle local mis à jour et les renvoie au " +"serveur" + +#: ../../source/tutorial-quickstart-jax.rst:208 +msgid "return the local loss to the server" +msgstr "renvoie la perte locale au serveur" + +#: ../../source/tutorial-quickstart-jax.rst:210 +#, fuzzy +msgid "" +"The challenging part is to transform the JAX model parameters from " +"``DeviceArray`` to ``NumPy ndarray`` to make them compatible with " +"`NumPyClient`." +msgstr "" +"La partie la plus difficile consiste à transformer les paramètres du " +"modèle JAX de :code:`DeviceArray` en :code:`NumPy ndarray` pour les " +"rendre compatibles avec `NumPyClient`." + +#: ../../source/tutorial-quickstart-jax.rst:213 +#, fuzzy +msgid "" +"The two ``NumPyClient`` methods ``fit`` and ``evaluate`` make use of the " +"functions ``train()`` and ``evaluate()`` previously defined in " +"``jax_training.py``. So what we really do here is we tell Flower through " +"our ``NumPyClient`` subclass which of our already defined functions to " +"call for training and evaluation. We included type annotations to give " +"you a better understanding of the data types that get passed around." +msgstr "" +"Les deux méthodes :code:`NumPyClient` :code:`fit` et :code:`evaluate` " +"utilisent les fonctions :code:`train()` et :code:`evaluate()` définies " +"précédemment dans :code:`jax_training.py`. Ce que nous faisons vraiment " +"ici, c'est que nous indiquons à Flower, par le biais de notre sous-classe" +" :code:`NumPyClient`, laquelle de nos fonctions déjà définies doit être " +"appelée pour l'entraînement et l'évaluation. Nous avons inclus des " +"annotations de type pour te donner une meilleure compréhension des types " +"de données qui sont transmis." + +#: ../../source/tutorial-quickstart-jax.rst:286 +msgid "Having defined the federation process, we can run it." +msgstr "Après avoir défini le processus de fédération, nous pouvons l'exécuter." + +#: ../../source/tutorial-quickstart-jax.rst:315 +msgid "" +"in each window (make sure that the server is still running before you do " +"so) and see your JAX project run federated learning across two clients. " +"Congratulations!" +msgstr "" +"dans chaque fenêtre (assure-toi que le serveur est toujours en cours " +"d'exécution avant de le faire) et tu verras que ton projet JAX exécute " +"l'apprentissage fédéré sur deux clients. Félicitations !" + +#: ../../source/tutorial-quickstart-jax.rst:321 +#, fuzzy +msgid "" +"The source code of this example was improved over time and can be found " +"here: `Quickstart JAX `_. Our example is somewhat over-simplified because both " +"clients load the same dataset." +msgstr "" +"Le code source de cet exemple a été amélioré au fil du temps et peut être" +" trouvé ici : `Quickstart JAX " +"`_. " +"Notre exemple est quelque peu simplifié à l'extrême car les deux clients " +"chargent le même jeu de données." + +#: ../../source/tutorial-quickstart-jax.rst:325 +msgid "" +"You're now prepared to explore this topic further. How about using a more" +" sophisticated model or using a different dataset? How about adding more " +"clients?" +msgstr "" +"Tu es maintenant prêt à approfondir ce sujet. Pourquoi ne pas utiliser un" +" modèle plus sophistiqué ou un ensemble de données différent ? Pourquoi " +"ne pas ajouter d'autres clients ?" + +#: ../../source/tutorial-quickstart-mlx.rst:4 +#, fuzzy +msgid "Quickstart MLX" +msgstr "Démarrage rapide de JAX" + +#: ../../source/tutorial-quickstart-mlx.rst:6 +#, fuzzy +msgid "" +"In this federated learning tutorial we will learn how to train simple MLP" +" on MNIST using Flower and MLX. It is recommended to create a virtual " +"environment and run everything within a :doc:`virtualenv `." +msgstr "" +"Tout d'abord, il est recommandé de créer un environnement virtuel et de " +"tout exécuter au sein d'un `virtualenv `_." + +#: ../../source/tutorial-quickstart-mlx.rst:10 +msgid "" +"Let's use `flwr new` to create a complete Flower+MLX project. It will " +"generate all the files needed to run, by default with the Simulation " +"Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:25 +msgid "" +"Then, run the command below. You will be prompted to select of the " +"available templates (choose ``MLX``), give a name to your project, and " +"type in your developer name:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:53 +msgid "To run the project do:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:102 +msgid "" +"You can also override the parameters defined in " +"``[tool.flwr.app.config]`` section in the ``pyproject.toml`` like this:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:116 +msgid "" +"We will use `Flower Datasets `_ to " +"easily download and partition the `MNIST` dataset. In this example you'll" +" make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:157 +msgid "" +"We define the model as in the `centralized MLX example " +"`_, it's a " +"simple MLP:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:180 +msgid "" +"We also define some utility functions to test our model and to iterate " +"over batches." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:201 +msgid "" +"The main changes we have to make to use `MLX` with `Flower` will be found" +" in the ``get_params()`` and ``set_params()`` functions. Indeed, MLX " +"doesn't provide an easy way to convert the model parameters into a list " +"of ``np.array`` objects (the format we need for the serialization of the " +"messages to work)." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:206 +msgid "The way MLX stores its parameters is as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:219 +msgid "" +"Therefore, to get our list of ``np.array`` objects, we need to extract " +"each array and convert them into a NumPy array:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:228 +msgid "" +"For the ``set_params()`` function, we perform the reverse operation. We " +"receive a list of NumPy arrays and want to convert them into MLX " +"parameters. Therefore, we iterate through pairs of parameters and assign " +"them to the `weight` and `bias` keys of each layer dict:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:243 +msgid "" +"The rest of the functionality is directly inspired by the centralized " +"case. The ``fit()`` method in the client trains the model using the local" +" dataset:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:259 +msgid "" +"Here, after updating the parameters, we perform the training as in the " +"centralized case, and return the new parameters." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:262 +msgid "And for the ``evaluate()`` method of the client:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:272 +msgid "" +"We also begin by updating the parameters with the ones sent by the " +"server, and then we compute the loss and accuracy using the functions " +"defined above. In the constructor of the ``FlowerClient`` we instantiate " +"the `MLP` model as well as other components such as the optimizer." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:277 +#, fuzzy +msgid "Putting everything together we have:" +msgstr "Tout assembler" + +#: ../../source/tutorial-quickstart-mlx.rst:331 +msgid "" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that " +"``context`` enables you to get access to hyperparemeters defined in " +"``pyproject.toml`` to configure the run. In this tutorial we access, " +"among other hyperparameters, the ``local-epochs`` setting to control the " +"number of epochs a ``ClientApp`` will perform when running the ``fit()`` " +"method." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:363 +msgid "" +"To construct a ``ServerApp``, we define a ``server_fn()`` callback with " +"an identical signature to that of ``client_fn()``, but the return type is" +" `ServerAppComponents `_ as " +"opposed to `Client `_. In this example we use the " +"``FedAvg`` strategy." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:386 +#: ../../source/tutorial-quickstart-pytorch.rst:344 +#: ../../source/tutorial-quickstart-tensorflow.rst:266 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:390 +#, fuzzy +msgid "" +"Check the `source code `_ of the extended version of this tutorial in ``examples" +"/quickstart-mlx`` in the Flower GitHub repository." +msgstr "" +"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " +"premier système d'apprentissage fédéré. Le code source complet " +"`_ de cet exemple se trouve dans :code:`examples" +"/quickstart-mxnet`." + +#: ../../source/tutorial-quickstart-pandas.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with Pandas to perform Federated Analytics." +msgstr "" + +#: ../../source/tutorial-quickstart-pandas.rst:4 +msgid "Quickstart Pandas" +msgstr "Démarrage rapide des Pandas" + +#: ../../source/tutorial-quickstart-pandas.rst:9 +msgid "Let's build a federated analytics system using Pandas and Flower!" +msgstr "Construisons un système d'analyse fédéré à l'aide de Pandas et de Flower !" + +#: ../../source/tutorial-quickstart-pandas.rst:11 +#, fuzzy +msgid "" +"Please refer to the `full code example " +"`_ " +"to learn more." +msgstr "" +"Réfère-toi à l'exemple de code complet " +"`_ " +"pour en savoir plus." + +#: ../../source/tutorial-quickstart-pytorch.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with PyTorch to train a CNN model on MNIST." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:6 +#, fuzzy +msgid "" +"In this federated learning tutorial we will learn how to train a " +"Convolutional Neural Network on CIFAR-10 using Flower and PyTorch. It is " +"recommended to create a virtual environment and run everything within a " +":doc:`virtualenv `." +msgstr "" +"Tout d'abord, il est recommandé de créer un environnement virtuel et de " +"tout exécuter au sein d'un `virtualenv `_." + +#: ../../source/tutorial-quickstart-pytorch.rst:11 +msgid "" +"Let's use `flwr new` to create a complete Flower+PyTorch project. It will" +" generate all the files needed to run, by default with the Flower " +"Simulation Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:26 +msgid "" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``PyTorch``), give a name to your project, " +"and type in your developer name:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:117 +msgid "" +"This tutorial uses `Flower Datasets `_ " +"to easily download and partition the `CIFAR-10` dataset. In this example " +"you'll make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets. Each " +"``ClientApp`` will call this function to create dataloaders with the data" +" that correspond to their data partition." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:152 +msgid "" +"We defined a simple Convolutional Neural Network (CNN), but feel free to " +"replace it with a more sophisticated model if you'd like:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:177 +msgid "" +"In addition to defining the model architecture, we also include two " +"utility functions to perform both training (i.e. ``train()``) and " +"evaluation (i.e. ``test()``) using the above model. These functions " +"should look fairly familiar if you have some prior experience with " +"PyTorch. Note these functions do not have anything specific to Flower. " +"That being said, the training function will normally be called, as we'll " +"see later, from a Flower client passing its own data. In summary, your " +"clients can use standard training/testing functions to perform local " +"training or evaluation:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:226 +msgid "" +"The main changes we have to make to use `PyTorch` with `Flower` will be " +"found in the ``get_weights()`` and ``set_weights()`` functions. In " +"``get_weights()`` PyTorch model parameters are extracted and represented " +"as a list of NumPy arrays. The ``set_weights()`` function that's the " +"oposite: given a list of NumPy arrays it applies them to an existing " +"PyTorch model. Doing this in fairly easy in PyTorch." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:282 +msgid "" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparemeters defined in your " +"``pyproject.toml`` to configure the run. In this tutorial we access the " +"`local-epochs` setting to control the number of epochs a ``ClientApp`` " +"will perform when running the ``fit()`` method. You could define " +"additioinal hyperparameters in ``pyproject.toml`` and access them here." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:309 +msgid "" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"`ServerAppComponents `_ as " +"opposed to a `Client `_. In this example we use the " +"`FedAvg`. To it we pass a randomly initialized model that will server as " +"the global model to federated. Note that the value of ``fraction_fit`` is" +" read from the run config. You can find the default value defined in the " +"``pyproject.toml``." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:348 +#, fuzzy +msgid "" +"Check the `source code `_ of the extended version of this tutorial in " +"``examples/quickstart-pytorch`` in the Flower GitHub repository." +msgstr "" +"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " +"premier système d'apprentissage fédéré. Le code source complet " +"`_ de cet exemple se trouve dans :code:`examples" +"/quickstart-mxnet`." + +#: ../../source/tutorial-quickstart-pytorch.rst:354 +#: ../../source/tutorial-quickstart-tensorflow.rst:278 +#, fuzzy +msgid "Video tutorial" +msgstr "Tutoriel" + +#: ../../source/tutorial-quickstart-pytorch.rst:358 +msgid "" +"The video shown below shows how to setup a PyTorch + Flower project using" +" our previously recommended APIs. A new video tutorial will be released " +"that shows the new APIs (as the content above does)" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:4 +msgid "Quickstart PyTorch Lightning" +msgstr "Démarrage rapide de PyTorch Lightning" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:6 +#, fuzzy +msgid "" +"In this federated learning tutorial we will learn how to train an " +"AutoEncoder model on MNIST using Flower and PyTorch Lightning. It is " +"recommended to create a virtual environment and run everything within a " +":doc:`virtualenv `." +msgstr "" +"Tout d'abord, il est recommandé de créer un environnement virtuel et de " +"tout exécuter au sein d'un `virtualenv `_." + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:19 +msgid "" +"This will create a new directory called `quickstart-pytorch-lightning` " +"containing the following files:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:42 +msgid "" +"By default, Flower Simulation Engine will be started and it will create a" +" federation of 4 nodes using `FedAvg `_ " +"as the aggregation strategy. The dataset will be partitioned using Flower" +" Dataset's `IidPartitioner `_." +" To run the project, do:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:93 +msgid "" +"Each simulated `ClientApp` (two per round) will also log a summary of " +"their local training process. Expect this output to be similar to:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:115 +#, fuzzy +msgid "" +"Check the `source code `_ of this tutorial in ``examples" +"/quickstart-pytorch-lightning`` in the Flower GitHub repository." +msgstr "" +"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " +"premier système d'apprentissage fédéré. Le code source complet " +"`_ de cet exemple se trouve dans :code:`examples" +"/quickstart-mxnet`." + +#: ../../source/tutorial-quickstart-scikitlearn.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with scikit-learn to train a linear regression model." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:4 +msgid "Quickstart scikit-learn" +msgstr "Démarrage rapide de scikit-learn" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:9 +#, fuzzy +msgid "" +"In this tutorial, we will learn how to train a ``Logistic Regression`` " +"model on MNIST using Flower and scikit-learn." +msgstr "" +"Dans ce tutoriel, nous allons apprendre à former un :code:`modèle de " +"régression logistique` sur MNIST en utilisant Flower et scikit-learn." + +#: ../../source/tutorial-quickstart-scikitlearn.rst:12 +#, fuzzy +msgid "" +"It is recommended to create a virtual environment and run everything " +"within this :doc:`virtualenv `." +msgstr "" +"Il est recommandé de créer un environnement virtuel et de tout exécuter " +"dans ce `virtualenv `_." + +#: ../../source/tutorial-quickstart-scikitlearn.rst:15 +msgid "" +"Our example consists of one *server* and two *clients* all having the " +"same model." +msgstr "" +"Notre exemple consiste en un *serveur* et deux *clients* ayant tous le " +"même modèle." + +#: ../../source/tutorial-quickstart-scikitlearn.rst:17 +msgid "" +"*Clients* are responsible for generating individual model parameter " +"updates for the model based on their local datasets. These updates are " +"then sent to the *server* which will aggregate them to produce an updated" +" global model. Finally, the *server* sends this improved version of the " +"model back to each *client*. A complete cycle of parameters updates is " +"called a *round*." +msgstr "" +"*Les clients* sont chargés de générer des mises à jour individuelles des " +"paramètres du modèle en fonction de leurs ensembles de données locales. " +"Ces mises à jour sont ensuite envoyées au *serveur* qui les agrège pour " +"produire un modèle global mis à jour. Enfin, le *serveur* renvoie cette " +"version améliorée du modèle à chaque *client*. Un cycle complet de mises " +"à jour des paramètres s'appelle un *round*." + +#: ../../source/tutorial-quickstart-scikitlearn.rst:23 +msgid "" +"Now that we have a rough idea of what is going on, let's get started. We " +"first need to install Flower. You can do this by running:" +msgstr "" +"Maintenant que nous avons une idée approximative de ce qui se passe, " +"commençons. Nous devons d'abord installer Flower. Tu peux le faire en " +"lançant :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:30 +#, fuzzy +msgid "Since we want to use scikit-learn, let's go ahead and install it:" +msgstr "Puisque nous voulons utiliser scikt-learn, allons-y et installons-le :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:36 +msgid "Or simply install all dependencies using Poetry:" +msgstr "Ou installe simplement toutes les dépendances à l'aide de Poetry :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:45 +#, fuzzy +msgid "" +"Now that we have all our dependencies installed, let's run a simple " +"distributed training with two clients and one server. However, before " +"setting up the client and server, we will define all functionalities that" +" we need for our federated learning setup within ``utils.py``. The " +"``utils.py`` contains different functions defining all the machine " +"learning basics:" +msgstr "" +"Maintenant que toutes nos dépendances sont installées, exécutons une " +"formation distribuée simple avec deux clients et un serveur. Cependant, " +"avant de configurer le client et le serveur, nous allons définir toutes " +"les fonctionnalités dont nous avons besoin pour notre configuration " +"d'apprentissage fédéré dans :code:`utils.py`. Le :code:`utils.py` " +"contient différentes fonctions définissant toutes les bases de " +"l'apprentissage automatique :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:51 +#, fuzzy +msgid "``get_model_parameters()``" +msgstr ":code:`get_model_parameters()`" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:52 +#, fuzzy +msgid "Returns the parameters of a ``sklearn`` LogisticRegression model" +msgstr "" +"Renvoie les paramètres d'un modèle de régression logistique " +":code:`sklearn`" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:53 +#, fuzzy +msgid "``set_model_params()``" +msgstr ":code:`set_model_params()`" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:54 +#, fuzzy +msgid "Sets the parameters of a ``sklearn`` LogisticRegression model" +msgstr "Définit les paramètres d'un modèle de régression logistique :code:`sklean`" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:56 +#, fuzzy +msgid "``set_initial_params()``" +msgstr ":code:`set_initial_params()`" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:56 +msgid "Initializes the model parameters that the Flower server will ask for" +msgstr "Initialise les paramètres du modèle que le serveur de Flower demandera" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:58 +#, fuzzy +msgid "" +"Please check out ``utils.py`` `here " +"`_ for more details. The pre-defined functions are used in" +" the ``client.py`` and imported. The ``client.py`` also requires to " +"import several packages such as Flower and scikit-learn:" +msgstr "" +"Tu peux consulter :code:`utils.py` `ici " +"`_ pour plus de détails. Les fonctions prédéfinies sont " +"utilisées dans :code:`client.py` et importées. :code:`client.py` " +"nécessite également d'importer plusieurs paquets tels que Flower et " +"scikit-learn :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:75 +msgid "" +"Prior to local training, we need to load the MNIST dataset, a popular " +"image classification dataset of handwritten digits for machine learning, " +"and partition the dataset for FL. This can be conveniently achieved using" +" `Flower Datasets `_. The " +"``FederatedDataset.load_partition()`` method loads the partitioned " +"training set for each partition ID defined in the ``--partition-id`` " +"argument." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:106 +#, fuzzy +msgid "" +"Next, the logistic regression model is defined and initialized with " +"``utils.set_initial_params()``." +msgstr "" +"Ensuite, le modèle de régression logistique est défini et initialisé avec" +" :code:`utils.set_initial_params()`." + +#: ../../source/tutorial-quickstart-scikitlearn.rst:119 +#, fuzzy +msgid "" +"The Flower server interacts with clients through an interface called " +"``Client``. When the server selects a particular client for training, it " +"sends training instructions over the network. The client receives those " +"instructions and calls one of the ``Client`` methods to run your code " +"(i.e., to fit the logistic regression we defined earlier)." +msgstr "" +"Le serveur Flower interagit avec les clients par le biais d'une interface" +" appelée :code:`Client`. Lorsque le serveur sélectionne un client " +"particulier pour la formation, il envoie des instructions de formation " +"sur le réseau. Le client reçoit ces instructions et appelle l'une des " +"méthodes :code:`Client` pour exécuter ton code (c'est-à-dire pour ajuster" +" la régression logistique que nous avons définie plus tôt)." + +#: ../../source/tutorial-quickstart-scikitlearn.rst:124 +#, fuzzy +msgid "" +"Flower provides a convenience class called ``NumPyClient`` which makes it" +" easier to implement the ``Client`` interface when your workload uses " +"scikit-learn. Implementing ``NumPyClient`` usually means defining the " +"following methods (``set_parameters`` is optional though):" +msgstr "" +"Flower fournit une classe de commodité appelée :code:`NumPyClient` qui " +"facilite la mise en œuvre de l'interface :code:`Client` lorsque ta charge" +" de travail utilise scikit-learn. Mettre en œuvre :code:`NumPyClient` " +"signifie généralement définir les méthodes suivantes " +"(:code:`set_parameters` est cependant facultatif) :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:130 +msgid "return the model weight as a list of NumPy ndarrays" +msgstr "renvoie le poids du modèle sous la forme d'une liste de ndarrays NumPy" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:132 +#, fuzzy +msgid "``set_parameters`` (optional)" +msgstr ":code:`set_parameters` (optionnel)" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:132 +msgid "" +"update the local model weights with the parameters received from the " +"server" +msgstr "" +"mettre à jour les poids du modèle local avec les paramètres reçus du " +"serveur" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:133 +#, fuzzy +msgid "is directly imported with ``utils.set_model_params()``" +msgstr "est directement importé avec :code:`utils.set_model_params()`" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:135 +msgid "set the local model weights" +msgstr "fixe les poids du modèle local" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:136 +msgid "train the local model" +msgstr "entraîne le modèle local" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:137 +#, fuzzy +msgid "return the updated local model weights" +msgstr "recevoir les poids du modèle local mis à jour" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:139 +msgid "test the local model" +msgstr "teste le modèle local" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:141 +msgid "The methods can be implemented in the following way:" +msgstr "Les méthodes peuvent être mises en œuvre de la manière suivante :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:163 +#, fuzzy +msgid "" +"We can now create an instance of our class ``MnistClient`` and add one " +"line to actually run this client:" +msgstr "" +"Nous pouvons maintenant créer une instance de notre classe " +":code:`MnistClient` et ajouter une ligne pour exécuter ce client :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:170 +#, fuzzy +msgid "" +"That's it for the client. We only have to implement ``Client`` or " +"``NumPyClient`` and call ``fl.client.start_client()``. If you implement a" +" client of type ``NumPyClient`` you'll need to first call its " +"``to_client()`` method. The string ``\"0.0.0.0:8080\"`` tells the client " +"which server to connect to. In our case we can run the server and the " +"client on the same machine, therefore we use ``\"0.0.0.0:8080\"``. If we " +"run a truly federated workload with the server and clients running on " +"different machines, all that needs to change is the ``server_address`` we" +" pass to the client." +msgstr "" +"C'est tout pour le client. Il nous suffit d'implémenter :code:`Client` ou" +" :code:`NumPyClient` et d'appeler :code:`fl.client.start_client()`. La " +"chaîne :code:`\"0.0.0:8080\"` indique au client à quel serveur se " +"connecter. Dans notre cas, nous pouvons exécuter le serveur et le client " +"sur la même machine, c'est pourquoi nous utilisons " +":code:`\"0.0.0:8080\"`. Si nous exécutons une charge de travail " +"véritablement fédérée avec le serveur et les clients s'exécutant sur des " +"machines différentes, tout ce qui doit changer est :code:`server_address`" +" que nous transmettons au client." + +#: ../../source/tutorial-quickstart-scikitlearn.rst:181 +msgid "" +"The following Flower server is a little bit more advanced and returns an " +"evaluation function for the server-side evaluation. First, we import " +"again all required libraries such as Flower and scikit-learn." +msgstr "" +"Le serveur Flower suivant est un peu plus avancé et renvoie une fonction " +"d'évaluation pour l'évaluation côté serveur. Tout d'abord, nous importons" +" à nouveau toutes les bibliothèques requises telles que Flower et scikit-" +"learn." + +#: ../../source/tutorial-quickstart-scikitlearn.rst:185 +#, fuzzy +msgid "``server.py``, import Flower and start the server:" +msgstr ":code:`server.py`, importe Flower et démarre le serveur :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:198 +#, fuzzy +msgid "" +"The number of federated learning rounds is set in ``fit_round()`` and the" +" evaluation is defined in ``get_evaluate_fn()``. The evaluation function " +"is called after each federated learning round and gives you information " +"about loss and accuracy. Note that we also make use of Flower Datasets " +"here to load the test split of the MNIST dataset for server-side " +"evaluation." +msgstr "" +"Le nombre de tours d'apprentissage fédéré est défini dans " +":code:`fit_round()` et l'évaluation est définie dans " +":code:`get_evaluate_fn()`. La fonction d'évaluation est appelée après " +"chaque tour d'apprentissage fédéré et te donne des informations sur la " +"perte et la précision." + +#: ../../source/tutorial-quickstart-scikitlearn.rst:228 +#, fuzzy +msgid "" +"The ``main`` contains the server-side parameter initialization " +"``utils.set_initial_params()`` as well as the aggregation strategy " +"``fl.server.strategy:FedAvg()``. The strategy is the default one, " +"federated averaging (or FedAvg), with two clients and evaluation after " +"each federated learning round. The server can be started with the command" +" ``fl.server.start_server(server_address=\"0.0.0.0:8080\", " +"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))``." +msgstr "" +"Le :code:`main` contient l'initialisation des paramètres côté serveur " +":code:`utils.set_initial_params()` ainsi que la stratégie d'agrégation " +":code:`fl.server.strategy:FedAvg()`. La stratégie est celle par défaut, " +"la moyenne fédérée (ou FedAvg), avec deux clients et une évaluation après" +" chaque tour d'apprentissage fédéré. Le serveur peut être démarré avec la" +" commande :code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " +"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))`." + +#: ../../source/tutorial-quickstart-scikitlearn.rst:256 +msgid "" +"With both client and server ready, we can now run everything and see " +"federated learning in action. Federated learning systems usually have a " +"server and multiple clients. We, therefore, have to start the server " +"first:" +msgstr "" +"Le client et le serveur étant prêts, nous pouvons maintenant tout lancer " +"et voir l'apprentissage fédéré en action. Les systèmes d'apprentissage " +"fédéré ont généralement un serveur et plusieurs clients. Nous devons donc" +" commencer par lancer le serveur :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:264 +#: ../../source/tutorial-quickstart-xgboost.rst:598 +msgid "" +"Once the server is running we can start the clients in different " +"terminals. Open a new terminal and start the first client:" +msgstr "" +"Une fois que le serveur fonctionne, nous pouvons démarrer les clients " +"dans différents terminaux. Ouvre un nouveau terminal et démarre le " +"premier client :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:271 +#: ../../source/tutorial-quickstart-xgboost.rst:605 +msgid "Open another terminal and start the second client:" +msgstr "Ouvre un autre terminal et démarre le deuxième client :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:277 +#: ../../source/tutorial-quickstart-xgboost.rst:611 +msgid "" +"Each client will have its own dataset. You should now see how the " +"training does in the very first terminal (the one that started the " +"server):" +msgstr "" +"Chaque client aura son propre ensemble de données. Tu devrais maintenant " +"voir comment la formation se déroule dans le tout premier terminal (celui" +" qui a démarré le serveur) :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:311 +#, fuzzy +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system. The full `source code " +"`_ for this example can be found in ``examples/sklearn-logreg-" +"mnist``." +msgstr "" +"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " +"premier système d'apprentissage fédéré. Le code source complet " +"`_ de cet exemple se trouve dans :code:`examples/sklearn-logreg-" +"mnist`." + +#: ../../source/tutorial-quickstart-tensorflow.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with TensorFlow to train a CNN model on CIFAR-10." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:4 +msgid "Quickstart TensorFlow" +msgstr "Démarrage rapide de TensorFlow" + +#: ../../source/tutorial-quickstart-tensorflow.rst:6 +#, fuzzy +msgid "" +"In this tutorial we will learn how to train a Convolutional Neural " +"Network on CIFAR-10 using the Flower framework and TensorFlow. First of " +"all, it is recommended to create a virtual environment and run everything" +" within a :doc:`virtualenv `." +msgstr "" +"Tout d'abord, il est recommandé de créer un environnement virtuel et de " +"tout exécuter au sein d'un `virtualenv `_." + +#: ../../source/tutorial-quickstart-tensorflow.rst:11 +msgid "" +"Let's use `flwr new` to create a complete Flower+TensorFlow project. It " +"will generate all the files needed to run, by default with the Flower " +"Simulation Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:26 +msgid "" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``TensorFlow``), give a name to your project," +" and type in your developer name:" +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:114 +msgid "" +"This tutorial uses `Flower Datasets `_ " +"to easily download and partition the `CIFAR-10` dataset. In this example " +"you'll make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets. Each " +"``ClientApp`` will call this function to create the ``NumPy`` arrays that" +" correspond to their data partition." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:141 +msgid "" +"Next, we need a model. We defined a simple Convolutional Neural Network " +"(CNN), but feel free to replace it with a more sophisticated model if " +"you'd like:" +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:170 +msgid "" +"With `TensorFlow`, we can use the built-in ``get_weights()`` and " +"``set_weights()`` functions, which simplifies the implementation with " +"`Flower`. The rest of the functionality in the ClientApp is directly " +"inspired by the centralized case. The ``fit()`` method in the client " +"trains the model using the local dataset. Similarly, the ``evaluate()`` " +"method is used to evaluate the model received on a held-out validation " +"set that the client might have:" +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:203 +msgid "" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparameters defined in your " +"``pyproject.toml`` to configure the run. For example, in this tutorial we" +" access the `local-epochs` setting to control the number of epochs a " +"``ClientApp`` will perform when running the ``fit()`` method, in addition" +" to `batch-size`. You could define additional hyperparameters in " +"``pyproject.toml`` and access them here." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:234 +msgid "" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"`ServerAppComponents `_ as " +"opposed to a `Client `_. In this example we use the " +"`FedAvg`. To it we pass a randomly initialized model that will serve as " +"the global model to federate." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:270 +#, fuzzy +msgid "" +"Check the source code of the extended version of this tutorial in " +"|quickstart_tf_link|_ in the Flower GitHub repository." +msgstr "" +"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " +"premier système d'apprentissage fédéré. Le code source complet " +"`_ de cet exemple se trouve dans :code:`examples" +"/quickstart-mxnet`." + +#: ../../source/tutorial-quickstart-tensorflow.rst:282 +msgid "" +"The video shown below shows how to setup a TensorFlow + Flower project " +"using our previously recommended APIs. A new video tutorial will be " +"released that shows the new APIs (as the content above does)" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with XGBoost to train classification models on trees." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:4 +msgid "Quickstart XGBoost" +msgstr "Démarrage rapide XGBoost" + +#: ../../source/tutorial-quickstart-xgboost.rst:13 +#, fuzzy +msgid "Federated XGBoost" +msgstr "Formation fédérée" + +#: ../../source/tutorial-quickstart-xgboost.rst:15 +msgid "" +"EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient " +"implementation of gradient-boosted decision tree (**GBDT**), that " +"maximises the computational boundaries for boosted tree methods. It's " +"primarily designed to enhance both the performance and computational " +"speed of machine learning models. In XGBoost, trees are constructed " +"concurrently, unlike the sequential approach taken by GBDT." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:21 +msgid "" +"Often, for tabular data on medium-sized datasets with fewer than 10k " +"training examples, XGBoost surpasses the results of deep learning " +"techniques." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:25 +#, fuzzy +msgid "Why federated XGBoost?" +msgstr "Qu'est-ce que l'apprentissage fédéré ?" + +#: ../../source/tutorial-quickstart-xgboost.rst:27 +msgid "" +"Indeed, as the demand for data privacy and decentralized learning grows, " +"there's an increasing requirement to implement federated XGBoost systems " +"for specialised applications, like survival analysis and financial fraud " +"detection." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:31 +msgid "" +"Federated learning ensures that raw data remains on the local device, " +"making it an attractive approach for sensitive domains where data " +"security and privacy are paramount. Given the robustness and efficiency " +"of XGBoost, combining it with federated learning offers a promising " +"solution for these specific challenges." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:36 +msgid "" +"In this tutorial we will learn how to train a federated XGBoost model on " +"HIGGS dataset using Flower and ``xgboost`` package. We use a simple " +"example (`full code xgboost-quickstart " +"`_)" +" with two *clients* and one *server* to demonstrate how federated XGBoost" +" works, and then we dive into a more complex example (`full code xgboost-" +"comprehensive `_) to run various experiments." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:46 +msgid "Environment Setup" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:48 +#, fuzzy +msgid "" +"First of all, it is recommended to create a virtual environment and run " +"everything within a :doc:`virtualenv `." +msgstr "" +"Tout d'abord, il est recommandé de créer un environnement virtuel et de " +"tout exécuter au sein d'un `virtualenv `_." + +#: ../../source/tutorial-quickstart-xgboost.rst:51 +msgid "" +"We first need to install Flower and Flower Datasets. You can do this by " +"running :" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:57 +#, fuzzy +msgid "" +"Since we want to use ``xgboost`` package to build up XGBoost trees, let's" +" go ahead and install ``xgboost``:" +msgstr "Puisque nous voulons utiliser scikt-learn, allons-y et installons-le :" + +#: ../../source/tutorial-quickstart-xgboost.rst:67 +msgid "" +"*Clients* are responsible for generating individual weight-updates for " +"the model based on their local datasets. Now that we have all our " +"dependencies installed, let's run a simple distributed training with two " +"clients and one server." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:71 +#, fuzzy +msgid "" +"In a file called ``client.py``, import xgboost, Flower, Flower Datasets " +"and other related functions:" +msgstr "" +"Dans un fichier appelé :code:`client.py`, importe Flower et les paquets " +"liés à PyTorch :" + +#: ../../source/tutorial-quickstart-xgboost.rst:99 +msgid "Dataset partition and hyper-parameter selection" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:101 +msgid "" +"Prior to local training, we require loading the HIGGS dataset from Flower" +" Datasets and conduct data partitioning for FL:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:115 +msgid "" +"In this example, we split the dataset into 30 partitions with uniform " +"distribution (``IidPartitioner(num_partitions=30)``). Then, we load the " +"partition for the given client based on ``partition_id``:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:135 +msgid "" +"After that, we do train/test splitting on the given partition (client's " +"local data), and transform data format for ``xgboost`` package." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:149 +msgid "" +"The functions of ``train_test_split`` and " +"``transform_dataset_to_dmatrix`` are defined as below:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:174 +msgid "Finally, we define the hyper-parameters used for XGBoost training." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:190 +msgid "" +"The ``num_local_round`` represents the number of iterations for local " +"tree boost. We use CPU for the training in default. One can shift it to " +"GPU by setting ``tree_method`` to ``gpu_hist``. We use AUC as evaluation " +"metric." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:195 +msgid "Flower client definition for XGBoost" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:197 +msgid "" +"After loading the dataset we define the Flower client. We follow the " +"general rule to define ``XgbClient`` class inherited from " +"``fl.client.Client``." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:219 +msgid "" +"All required parameters defined above are passed to ``XgbClient``'s " +"constructor." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:221 +msgid "" +"Then, we override ``get_parameters``, ``fit`` and ``evaluate`` methods " +"insides ``XgbClient`` class as follows." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:236 +msgid "" +"Unlike neural network training, XGBoost trees are not started from a " +"specified random weights. In this case, we do not use ``get_parameters`` " +"and ``set_parameters`` to initialise model parameters for XGBoost. As a " +"result, let's return an empty tensor in ``get_parameters`` when it is " +"called by the server at the first round." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:278 +msgid "" +"In ``fit``, at the first round, we call ``xgb.train()`` to build up the " +"first set of trees. From the second round, we load the global model sent " +"from server to new build Booster object, and then update model weights on" +" local training data with function ``local_boost`` as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:298 +msgid "" +"Given ``num_local_round``, we update trees by calling " +"``bst_input.update`` method. After training, the last " +"``N=num_local_round`` trees will be extracted to send to the server." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:330 +msgid "" +"In ``evaluate``, after loading the global model, we call ``bst.eval_set``" +" function to conduct evaluation on valid set. The AUC value will be " +"returned." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:333 +#, fuzzy +msgid "" +"Now, we can create an instance of our class ``XgbClient`` and add one " +"line to actually run this client:" +msgstr "" +"Nous pouvons maintenant créer une instance de notre classe " +":code:`MnistClient` et ajouter une ligne pour exécuter ce client :" + +#: ../../source/tutorial-quickstart-xgboost.rst:350 +#, fuzzy +msgid "" +"That's it for the client. We only have to implement ``Client`` and call " +"``fl.client.start_client()``. The string ``\"[::]:8080\"`` tells the " +"client which server to connect to. In our case we can run the server and " +"the client on the same machine, therefore we use ``\"[::]:8080\"``. If we" +" run a truly federated workload with the server and clients running on " +"different machines, all that needs to change is the ``server_address`` we" +" point the client at." +msgstr "" +"C'est tout pour le client. Il nous suffit d'implémenter :code:`Client` ou" +" :code:`NumPyClient` et d'appeler :code:`fl.client.start_client()`. La " +"chaîne :code:`\"[: :]:8080\"` indique au client à quel serveur se " +"connecter. Dans notre cas, nous pouvons exécuter le serveur et le client " +"sur la même machine, c'est pourquoi nous utilisons :code:`\"[: " +":]:8080\"`. Si nous exécutons une charge de travail véritablement fédérée" +" avec le serveur et les clients fonctionnant sur des machines " +"différentes, tout ce qui doit changer est l'adresse " +":code:`server_address` vers laquelle nous dirigeons le client." + +#: ../../source/tutorial-quickstart-xgboost.rst:360 +#, fuzzy +msgid "" +"These updates are then sent to the *server* which will aggregate them to " +"produce a better model. Finally, the *server* sends this improved version" +" of the model back to each *client* to finish a complete FL round." +msgstr "" +"*Les clients* sont chargés de générer des mises à jour de poids " +"individuelles pour le modèle en fonction de leurs ensembles de données " +"locales. Ces mises à jour sont ensuite envoyées au *serveur* qui les " +"agrège pour produire un meilleur modèle. Enfin, le *serveur* renvoie " +"cette version améliorée du modèle à chaque *client*. Un cycle complet de " +"mises à jour de poids s'appelle un *round*." + +#: ../../source/tutorial-quickstart-xgboost.rst:364 +#, fuzzy +msgid "" +"In a file named ``server.py``, import Flower and FedXgbBagging from " +"``flwr.server.strategy``." +msgstr "" +"Dans un fichier appelé :code:`client.py`, importe Flower et les paquets " +"liés au MXNet :" + +#: ../../source/tutorial-quickstart-xgboost.rst:367 +msgid "We first define a strategy for XGBoost bagging aggregation." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:401 +msgid "" +"We use two clients for this example. An ``evaluate_metrics_aggregation`` " +"function is defined to collect and wighted average the AUC values from " +"clients. The ``config_func`` function is to return the current FL round " +"number to client's ``fit()`` and ``evaluate()`` methods." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:406 +#, fuzzy +msgid "Then, we start the server:" +msgstr "Démarrer le serveur" + +#: ../../source/tutorial-quickstart-xgboost.rst:418 +msgid "Tree-based bagging aggregation" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:420 +msgid "" +"You must be curious about how bagging aggregation works. Let's look into " +"the details." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:422 +msgid "" +"In file ``flwr.server.strategy.fedxgb_bagging.py``, we define " +"``FedXgbBagging`` inherited from ``flwr.server.strategy.FedAvg``. Then, " +"we override the ``aggregate_fit``, ``aggregate_evaluate`` and " +"``evaluate`` methods as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:519 +msgid "" +"In ``aggregate_fit``, we sequentially aggregate the clients' XGBoost " +"trees by calling ``aggregate()`` function:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:579 +msgid "" +"In this function, we first fetch the number of trees and the number of " +"parallel trees for the current and previous model by calling " +"``_get_tree_nums``. Then, the fetched information will be aggregated. " +"After that, the trees (containing model weights) are aggregated to " +"generate a new tree model." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:584 +msgid "" +"After traversal of all clients' models, a new global model is generated, " +"followed by the serialisation, and sending back to each client." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:588 +msgid "Launch Federated XGBoost!" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:664 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"XGBoost system. The AUC values can be checked in ``metrics_distributed``." +" One can see that the average AUC increases over FL rounds." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:668 +#, fuzzy +msgid "" +"The full `source code `_ for this example can be found in ``examples" +"/xgboost-quickstart``." +msgstr "" +"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " +"premier système d'apprentissage fédéré. Le code source complet " +"`_ de cet exemple se trouve dans :code:`examples" +"/quickstart-mxnet`." + +#: ../../source/tutorial-quickstart-xgboost.rst:673 +msgid "Comprehensive Federated XGBoost" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:675 +msgid "" +"Now that you have known how federated XGBoost work with Flower, it's time" +" to run some more comprehensive experiments by customising the " +"experimental settings. In the xgboost-comprehensive example (`full code " +"`_), we provide more options to define various experimental" +" setups, including aggregation strategies, data partitioning and " +"centralised/distributed evaluation. We also support :doc:`Flower " +"simulation ` making it easy to simulate large " +"client cohorts in a resource-aware manner. Let's take a look!" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:685 +#, fuzzy +msgid "Cyclic training" +msgstr "Formation centralisée" + +#: ../../source/tutorial-quickstart-xgboost.rst:687 +msgid "" +"In addition to bagging aggregation, we offer a cyclic training scheme, " +"which performs FL in a client-by-client fashion. Instead of aggregating " +"multiple clients, there is only one single client participating in the " +"training per round in the cyclic training scenario. The trained local " +"XGBoost trees will be passed to the next client as an initialised model " +"for next round's boosting." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:693 +msgid "To do this, we first customise a ``ClientManager`` in ``server_utils.py``:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:733 +msgid "" +"The customised ``ClientManager`` samples all available clients in each FL" +" round based on the order of connection to the server. Then, we define a " +"new strategy ``FedXgbCyclic`` in " +"``flwr.server.strategy.fedxgb_cyclic.py``, in order to sequentially " +"select only one client in given round and pass the received model to next" +" client." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:775 +msgid "" +"Unlike the original ``FedAvg``, we don't perform aggregation here. " +"Instead, we just make a copy of the received client model as global model" +" by overriding ``aggregate_fit``." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:778 +msgid "" +"Also, the customised ``configure_fit`` and ``configure_evaluate`` methods" +" ensure the clients to be sequentially selected given FL round:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:840 +msgid "Customised data partitioning" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:842 +msgid "" +"In ``dataset.py``, we have a function ``instantiate_partitioner`` to " +"instantiate the data partitioner based on the given ``num_partitions`` " +"and ``partitioner_type``. Currently, we provide four supported " +"partitioner type to simulate the uniformity/non-uniformity in data " +"quantity (uniform, linear, square, exponential)." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:873 +#, fuzzy +msgid "Customised centralised/distributed evaluation" +msgstr "Évaluation centralisée" + +#: ../../source/tutorial-quickstart-xgboost.rst:875 +msgid "" +"To facilitate centralised evaluation, we define a function in " +"``server_utils.py``:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:907 +msgid "" +"This function returns a evaluation function which instantiates a " +"``Booster`` object and loads the global model weights to it. The " +"evaluation is conducted by calling ``eval_set()`` method, and the tested " +"AUC value is reported." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:911 +msgid "" +"As for distributed evaluation on the clients, it's same as the quick-" +"start example by overriding the ``evaluate()`` method insides the " +"``XgbClient`` class in ``client_utils.py``." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:916 +#, fuzzy +msgid "Flower simulation" +msgstr "Simulation de moniteur" + +#: ../../source/tutorial-quickstart-xgboost.rst:918 +msgid "" +"We also provide an example code (``sim.py``) to use the simulation " +"capabilities of Flower to simulate federated XGBoost training on either a" +" single machine or a cluster of machines." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:954 +msgid "" +"After importing all required packages, we define a ``main()`` function to" +" perform the simulation process:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1010 +msgid "" +"We first load the dataset and perform data partitioning, and the pre-" +"processed data is stored in a ``list``. After the simulation begins, the " +"clients won't need to pre-process their partitions again." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1014 +msgid "Then, we define the strategies and other hyper-parameters:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1065 +msgid "" +"After that, we start the simulation by calling " +"``fl.simulation.start_simulation``:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1085 +msgid "" +"One of key parameters for ``start_simulation`` is ``client_fn`` which " +"returns a function to construct a client. We define it as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1126 +msgid "Arguments parser" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1128 +msgid "" +"In ``utils.py``, we define the arguments parsers for clients, server and " +"simulation, allowing users to specify different experimental settings. " +"Let's first see the sever side:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1175 +msgid "" +"This allows user to specify training strategies / the number of total " +"clients / FL rounds / participating clients / clients for evaluation, and" +" evaluation fashion. Note that with ``--centralised-eval``, the sever " +"will do centralised evaluation and all functionalities for client " +"evaluation will be disabled." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1180 +msgid "Then, the argument parser on client side:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1234 +msgid "" +"This defines various options for client data partitioning. Besides, " +"clients also have an option to conduct evaluation on centralised test set" +" by setting ``--centralised-eval``, as well as an option to perform " +"scaled learning rate based on the number of clients by setting " +"``--scaled-lr``." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1239 +msgid "We also have an argument parser for simulation:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1317 +msgid "This integrates all arguments for both client and server sides." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1320 +#, fuzzy +msgid "Example commands" +msgstr "Exemples de PyTorch" + +#: ../../source/tutorial-quickstart-xgboost.rst:1322 +msgid "" +"To run a centralised evaluated experiment with bagging strategy on 5 " +"clients with exponential distribution for 50 rounds, we first start the " +"server as below:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1329 +#, fuzzy +msgid "Then, on each client terminal, we start the clients:" +msgstr "Ouvre un autre terminal et démarre le deuxième client :" + +#: ../../source/tutorial-quickstart-xgboost.rst:1335 +msgid "To run the same experiment with Flower simulation:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1341 +#, fuzzy +msgid "" +"The full `code `_ for this comprehensive example can be found in" +" ``examples/xgboost-comprehensive``." +msgstr "" +"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " +"premier système d'apprentissage fédéré. Le code source complet " +"`_ de cet exemple se trouve dans :code:`examples" +"/quickstart-mxnet`." + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:9 +#, fuzzy +msgid "Build a strategy from scratch" +msgstr "Élaborer une stratégie à partir de zéro" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:11 +#, fuzzy +msgid "" +"Welcome to the third part of the Flower federated learning tutorial. In " +"previous parts of this tutorial, we introduced federated learning with " +"PyTorch and the Flower framework (`part 1 " +"`__) and we learned how strategies can be used to customize " +"the execution on both the server and the clients (`part 2 " +"`__)." +msgstr "" +"Bienvenue dans la troisième partie du tutoriel sur l'apprentissage fédéré" +" Flower. Dans les parties précédentes de ce tutoriel, nous avons présenté" +" l'apprentissage fédéré avec PyTorch et Flower (`partie 1 " +"`__) " +"et nous avons appris comment les stratégies peuvent être utilisées pour " +"personnaliser l'exécution à la fois sur le serveur et sur les clients " +"(`partie 2 `__)." + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:13 +#, fuzzy +msgid "" +"In this notebook, we'll continue to customize the federated learning " +"system we built previously by creating a custom version of FedAvg using " +"the Flower framework, Flower Datasets, and PyTorch." +msgstr "" +"Dans ce carnet, nous allons continuer à personnaliser le système " +"d'apprentissage fédéré que nous avons construit précédemment en créant " +"une version personnalisée de FedAvg (encore une fois, en utilisant " +"`Flower `__ et `PyTorch `__)." + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:15 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:16 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:15 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:15 +#, fuzzy +msgid "" +"`Star Flower on GitHub `__ ⭐️ and join " +"the Flower community on Flower Discuss and the Flower Slack to connect, " +"ask questions, and get help: - `Join Flower Discuss " +"`__ We'd love to hear from you in the " +"``Introduction`` topic! If anything is unclear, post in ``Flower Help - " +"Beginners``. - `Join Flower Slack `__ We'd " +"love to hear from you in the ``#introductions`` channel! If anything is " +"unclear, head over to the ``#questions`` channel." +msgstr "" +"`Star Flower on GitHub `__ ⭐️ et " +"rejoignez la communauté Flower sur Slack pour vous connecter, poser des " +"questions et obtenir de l'aide : `Join Slack `__ 🌼 Nous serions ravis d'avoir de vos nouvelles dans le canal " +"``#introductions`` ! Et si quelque chose n'est pas clair, rendez-vous sur" +" le canal ``#questions``." + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:18 +#, fuzzy +msgid "Let's build a new ``Strategy`` from scratch! 🌼" +msgstr "Construisons une nouvelle ``Stratégie`` à partir de zéro !" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:30 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:30 +msgid "Preparation" +msgstr "Préparation" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:32 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:33 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:32 +msgid "" +"Before we begin with the actual code, let's make sure that we have " +"everything we need." +msgstr "" +"Avant de commencer le code proprement dit, assurons-nous que nous " +"disposons de tout ce dont nous avons besoin." + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:44 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:45 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:44 +msgid "Installing dependencies" +msgstr "Installation des dépendances" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:46 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:47 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:46 +msgid "First, we install the necessary packages:" +msgstr "Tout d'abord, nous installons les paquets nécessaires :" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:66 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:67 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:66 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:66 +msgid "" +"Now that we have all dependencies installed, we can import everything we " +"need for this tutorial:" +msgstr "" +"Maintenant que toutes les dépendances sont installées, nous pouvons " +"importer tout ce dont nous avons besoin pour ce tutoriel :" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:106 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:106 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:106 +msgid "" +"It is possible to switch to a runtime that has GPU acceleration enabled " +"(on Google Colab: ``Runtime > Change runtime type > Hardware acclerator: " +"GPU > Save``). Note, however, that Google Colab is not always able to " +"offer GPU acceleration. If you see an error related to GPU availability " +"in one of the following sections, consider switching back to CPU-based " +"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " +"has GPU acceleration enabled, you should see the output ``Training on " +"cuda``, otherwise it'll say ``Training on cpu``." +msgstr "" +"Il est possible de passer à un runtime dont l'accélération GPU est " +"activée (sur Google Colab : ``Runtime > Change runtime type > Hardware " +"acclerator : GPU > Save``). Note cependant que Google Colab n'est pas " +"toujours en mesure de proposer l'accélération GPU. Si tu vois une erreur " +"liée à la disponibilité du GPU dans l'une des sections suivantes, " +"envisage de repasser à une exécution basée sur le CPU en définissant " +"``DEVICE = torch.device(\"cpu\")``. Si le runtime a activé l'accélération" +" GPU, tu devrais voir apparaître le résultat ``Training on cuda``, sinon " +"il dira ``Training on cpu``." + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:119 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:119 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:119 +msgid "Data loading" +msgstr "Chargement des données" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:121 +msgid "" +"Let's now load the CIFAR-10 training and test set, partition them into " +"ten smaller datasets (each split into training and validation set), and " +"wrap everything in their own ``DataLoader``." +msgstr "" +"Chargeons maintenant les ensembles d'entraînement et de test CIFAR-10, " +"divisons-les en dix ensembles de données plus petits (chacun divisé en " +"ensemble d'entraînement et de validation) et enveloppons le tout dans " +"leur propre ``DataLoader``." + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:163 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:163 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:169 +msgid "Model training/evaluation" +msgstr "Formation/évaluation du modèle" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:165 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:165 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:171 +msgid "" +"Let's continue with the usual model definition (including " +"``set_parameters`` and ``get_parameters``), training and test functions:" +msgstr "" +"Continuons avec la définition habituelle du modèle (y compris " +"``set_parameters`` et ``get_parameters``), les fonctions d'entraînement " +"et de test :" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:256 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:262 +msgid "Flower client" +msgstr "Client de Flower" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:258 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:264 +#, fuzzy +msgid "" +"To implement the Flower client, we (again) create a subclass of " +"``flwr.client.NumPyClient`` and implement the three methods " +"``get_parameters``, ``fit``, and ``evaluate``. Here, we also pass the " +"``partition_id`` to the client and use it log additional details. We then" +" create an instance of ``ClientApp`` and pass it the ``client_fn``." +msgstr "" +"Pour mettre en œuvre le client Flower, nous créons (à nouveau) une sous-" +"classe de ``flwr.client.NumPyClient`` et mettons en œuvre les trois " +"méthodes ``get_parameters``, ``fit`` et ``evaluate``. Ici, nous " +"transmettons également le ``cid`` au client et l'utilisons pour consigner" +" des détails supplémentaires :" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:311 +msgid "Let's test what we have so far before we continue:" +msgstr "Testons ce que nous avons jusqu'à présent avant de continuer :" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:357 +msgid "Build a Strategy from scratch" +msgstr "Élaborer une stratégie à partir de zéro" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:359 +msgid "" +"Let’s overwrite the ``configure_fit`` method such that it passes a higher" +" learning rate (potentially also other hyperparameters) to the optimizer " +"of a fraction of the clients. We will keep the sampling of the clients as" +" it is in ``FedAvg`` and then change the configuration dictionary (one of" +" the ``FitIns`` attributes)." +msgstr "" +"Remplaçons la méthode ``configure_fit`` de façon à ce qu'elle transmette " +"un taux d'apprentissage plus élevé (potentiellement aussi d'autres " +"hyperparamètres) à l'optimiseur d'une fraction des clients. Nous " +"garderons l'échantillonnage des clients tel qu'il est dans ``FedAvg`` et " +"changerons ensuite le dictionnaire de configuration (l'un des attributs " +"``FitIns``)." + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:523 +msgid "" +"The only thing left is to use the newly created custom Strategy " +"``FedCustom`` when starting the experiment:" +msgstr "" +"Il ne reste plus qu'à utiliser la stratégie personnalisée nouvellement " +"créée ``FedCustom`` lors du démarrage de l'expérience :" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:559 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:998 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:841 +msgid "Recap" +msgstr "Récapitulation" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:561 +msgid "" +"In this notebook, we’ve seen how to implement a custom strategy. A custom" +" strategy enables granular control over client node configuration, result" +" aggregation, and more. To define a custom strategy, you only have to " +"overwrite the abstract methods of the (abstract) base class ``Strategy``." +" To make custom strategies even more powerful, you can pass custom " +"functions to the constructor of your new class (``__init__``) and then " +"call these functions whenever needed." +msgstr "" +"Dans ce carnet, nous avons vu comment mettre en place une stratégie " +"personnalisée. Une stratégie personnalisée permet un contrôle granulaire " +"sur la configuration des nœuds clients, l'agrégation des résultats, et " +"bien plus encore. Pour définir une stratégie personnalisée, il te suffit " +"d'écraser les méthodes abstraites de la classe de base (abstraite) " +"``Strategy``. Pour rendre les stratégies personnalisées encore plus " +"puissantes, tu peux passer des fonctions personnalisées au constructeur " +"de ta nouvelle classe (``__init__``) et appeler ensuite ces fonctions à " +"chaque fois que c'est nécessaire." + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:575 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1014 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:813 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:859 +#, fuzzy +msgid "" +"Before you continue, make sure to join the Flower community on Flower " +"Discuss (`Join Flower Discuss `__) and on " +"Slack (`Join Slack `__)." +msgstr "" +"Avant de continuer, n'oublie pas de rejoindre la communauté Flower sur " +"Slack : `Join Slack `__" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:577 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1016 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:815 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:861 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:371 +msgid "" +"There's a dedicated ``#questions`` channel if you need help, but we'd " +"also love to hear who you are in ``#introductions``!" +msgstr "" +"Il existe un canal dédié aux ``questions`` si vous avez besoin d'aide, " +"mais nous aimerions aussi savoir qui vous êtes dans ``#introductions`` !" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:579 +#, fuzzy +msgid "" +"The `Flower Federated Learning Tutorial - Part 4 " +"`__ introduces ``Client``, the flexible API underlying " +"``NumPyClient``." +msgstr "" +"Le `Tutoriel d'apprentissage fédéré Flower - Partie 4 " +"`__ présente ``Client``, l'API flexible qui sous-tend " +"``NumPyClient``." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:9 +#, fuzzy +msgid "Customize the client" +msgstr "Création du client IMDBC" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:11 +#, fuzzy +msgid "" +"Welcome to the fourth part of the Flower federated learning tutorial. In " +"the previous parts of this tutorial, we introduced federated learning " +"with PyTorch and Flower (`part 1 `__), we learned how " +"strategies can be used to customize the execution on both the server and " +"the clients (`part 2 `__), and we built our own " +"custom strategy from scratch (`part 3 `__)." +msgstr "" +"Bienvenue dans la quatrième partie du tutoriel sur l'apprentissage fédéré" +" Flower. Dans les parties précédentes de ce tutoriel, nous avons présenté" +" l'apprentissage fédéré avec PyTorch et Flower (`partie 1 " +"`__), " +"nous avons appris comment les stratégies peuvent être utilisées pour " +"personnaliser l'exécution à la fois sur le serveur et les clients " +"(`partie 2 `__), et nous avons construit notre propre stratégie " +"personnalisée à partir de zéro (`partie 3 - WIP " +"`__)." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:14 +msgid "" +"In this notebook, we revisit ``NumPyClient`` and introduce a new " +"baseclass for building clients, simply named ``Client``. In previous " +"parts of this tutorial, we've based our client on ``NumPyClient``, a " +"convenience class which makes it easy to work with machine learning " +"libraries that have good NumPy interoperability. With ``Client``, we gain" +" a lot of flexibility that we didn't have before, but we'll also have to " +"do a few things the we didn't have to do before." +msgstr "" +"Dans ce carnet, nous revisitons `NumPyClient`` et introduisons une " +"nouvelle classe de base pour construire des clients, simplement appelée " +"`Client``. Dans les parties précédentes de ce tutoriel, nous avons basé " +"notre client sur ``NumPyClient``, une classe de commodité qui facilite le" +" travail avec les bibliothèques d'apprentissage automatique qui ont une " +"bonne interopérabilité NumPy. Avec ``Client``, nous gagnons beaucoup de " +"flexibilité que nous n'avions pas auparavant, mais nous devrons également" +" faire quelques choses que nous n'avions pas à faire auparavant." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:19 +#, fuzzy +msgid "" +"Let's go deeper and see what it takes to move from ``NumPyClient`` to " +"``Client``! 🌼" +msgstr "" +"Allons plus loin et voyons ce qu'il faut faire pour passer de " +"``NumPyClient`` à ``Client`` !" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:31 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:30 +msgid "Step 0: Preparation" +msgstr "Étape 0 : Préparation" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:121 +#, fuzzy +msgid "" +"Let's now define a loading function for the CIFAR-10 training and test " +"set, partition them into ``num_partitions`` smaller datasets (each split " +"into training and validation set), and wrap everything in their own " +"``DataLoader``." +msgstr "" +"Chargeons maintenant les ensembles d'entraînement et de test CIFAR-10, " +"divisons-les en dix ensembles de données plus petits (chacun divisé en " +"ensemble d'entraînement et de validation) et enveloppons le tout dans " +"leur propre ``DataLoader``." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:256 +msgid "Step 1: Revisiting NumPyClient" +msgstr "Étape 1 : Revoir NumPyClient" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:258 +#, fuzzy +msgid "" +"So far, we've implemented our client by subclassing " +"``flwr.client.NumPyClient``. The three methods we implemented are " +"``get_parameters``, ``fit``, and ``evaluate``." +msgstr "" +"Jusqu'à présent, nous avons implémenté notre client en sous-classant " +"``flwr.client.NumPyClient``. Les trois méthodes que nous avons " +"implémentées sont ``get_parameters``, ``fit`` et ``evaluate``. Enfin, " +"nous enveloppons la création d'instances de cette classe dans une " +"fonction appelée ``client_fn`` :" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:299 +msgid "" +"Then, we define the function ``numpyclient_fn`` that is used by Flower to" +" create the ``FlowerNumpyClient`` instances on demand. Finally, we create" +" the ``ClientApp`` and pass the ``numpyclient_fn`` to it." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:328 +#, fuzzy +msgid "" +"We've seen this before, there's nothing new so far. The only *tiny* " +"difference compared to the previous notebook is naming, we've changed " +"``FlowerClient`` to ``FlowerNumPyClient`` and ``client_fn`` to " +"``numpyclient_fn``. Next, we configure the number of federated learning " +"rounds using ``ServerConfig`` and create the ``ServerApp`` with this " +"config:" +msgstr "" +"Nous avons déjà vu cela auparavant, il n'y a rien de nouveau jusqu'à " +"présent. La seule *petite* différence par rapport au carnet précédent est" +" le nommage, nous avons changé ``FlowerClient`` en ``FlowerNumPyClient`` " +"et ``client_fn`` en ``numpyclient_fn``. Exécutons-le pour voir la sortie " +"que nous obtenons :" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:355 +msgid "" +"Finally, we specify the resources for each client and run the simulation " +"to see the output we get:" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:389 +#, fuzzy +msgid "" +"This works as expected, ten clients are training for three rounds of " +"federated learning." +msgstr "" +"Cela fonctionne comme prévu, deux clients s'entraînent pour trois tours " +"d'apprentissage fédéré." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:391 +#, fuzzy +msgid "" +"Let's dive a little bit deeper and discuss how Flower executes this " +"simulation. Whenever a client is selected to do some work, " +"``run_simulation`` launches the ``ClientApp`` object which in turn calls " +"the function ``numpyclient_fn`` to create an instance of our " +"``FlowerNumPyClient`` (along with loading the model and the data)." +msgstr "" +"Plongeons un peu plus profondément et discutons de la façon dont Flower " +"exécute cette simulation. Chaque fois qu'un client est sélectionné pour " +"effectuer un travail, ``start_simulation`` appelle la fonction " +"``numpyclient_fn`` pour créer une instance de notre ``FlowerNumPyClient``" +" (en même temps qu'il charge le modèle et les données)." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:393 +msgid "" +"But here's the perhaps surprising part: Flower doesn't actually use the " +"``FlowerNumPyClient`` object directly. Instead, it wraps the object to " +"makes it look like a subclass of ``flwr.client.Client``, not " +"``flwr.client.NumPyClient``. In fact, the Flower core framework doesn't " +"know how to handle ``NumPyClient``'s, it only knows how to handle " +"``Client``'s. ``NumPyClient`` is just a convenience abstraction built on " +"top of ``Client``." +msgstr "" +"Mais voici la partie la plus surprenante : Flower n'utilise pas " +"directement l'objet `FlowerNumPyClient`. Au lieu de cela, il enveloppe " +"l'objet pour le faire ressembler à une sous-classe de " +"`flwr.client.Client`, et non de `flwr.client.NumPyClient`. En fait, le " +"noyau de Flower ne sait pas comment gérer les `NumPyClient`, il sait " +"seulement comment gérer les `Client`. `NumPyClient` est juste une " +"abstraction de commodité construite au dessus de `Client`." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:395 +msgid "" +"Instead of building on top of ``NumPyClient``, we can directly build on " +"top of ``Client``." +msgstr "" +"Au lieu de construire par-dessus `NumPyClient``, nous pouvons construire " +"directement par-dessus `Client``." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:407 +msgid "Step 2: Moving from ``NumPyClient`` to ``Client``" +msgstr "Étape 2 : Passer de ``NumPyClient`` à ``Client``" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:409 +msgid "" +"Let's try to do the same thing using ``Client`` instead of " +"``NumPyClient``." +msgstr "" +"Essayons de faire la même chose en utilisant ``Client`` au lieu de " +"``NumPyClient``." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:519 +msgid "" +"Before we discuss the code in more detail, let's try to run it! Gotta " +"make sure our new ``Client``-based client works, right?" +msgstr "" +"Avant de discuter du code plus en détail, essayons de l'exécuter ! Nous " +"devons nous assurer que notre nouveau client basé sur le ``Client`` " +"fonctionne, n'est-ce pas ?" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:545 +msgid "" +"That's it, we're now using ``Client``. It probably looks similar to what " +"we've done with ``NumPyClient``. So what's the difference?" +msgstr "" +"Voilà, nous utilisons maintenant ``Client``. Cela ressemble probablement " +"à ce que nous avons fait avec ``NumPyClient``. Alors quelle est la " +"différence ?" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:547 +msgid "" +"First of all, it's more code. But why? The difference comes from the fact" +" that ``Client`` expects us to take care of parameter serialization and " +"deserialization. For Flower to be able to send parameters over the " +"network, it eventually needs to turn these parameters into ``bytes``. " +"Turning parameters (e.g., NumPy ``ndarray``'s) into raw bytes is called " +"serialization. Turning raw bytes into something more useful (like NumPy " +"``ndarray``'s) is called deserialization. Flower needs to do both: it " +"needs to serialize parameters on the server-side and send them to the " +"client, the client needs to deserialize them to use them for local " +"training, and then serialize the updated parameters again to send them " +"back to the server, which (finally!) deserializes them again in order to " +"aggregate them with the updates received from other clients." +msgstr "" +"First of all, it's more code. But why? The difference comes from the fact" +" that ``Client`` expects us to take care of parameter serialization and " +"deserialization. For Flower to be able to send parameters over the " +"network, it eventually needs to turn these parameters into ``bytes``. " +"Turning parameters (e.g., NumPy ``ndarray``'s) into raw bytes is called " +"serialization. Turning raw bytes into something more useful (like NumPy " +"``ndarray``'s) is called deserialization. Flower needs to do both: it " +"needs to serialize parameters on the server-side and send them to the " +"client, the client needs to deserialize them to use them for local " +"training, and then serialize the updated parameters again to send them " +"back to the server, which (finally!) deserializes them again in order to " +"aggregate them with the updates received from other clients." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:550 +msgid "" +"The only *real* difference between Client and NumPyClient is that " +"NumPyClient takes care of serialization and deserialization for you. It " +"can do so because it expects you to return parameters as NumPy ndarray's," +" and it knows how to handle these. This makes working with machine " +"learning libraries that have good NumPy support (most of them) a breeze." +msgstr "" +"La seule *vraie* différence entre Client et NumPyClient est que " +"NumPyClient s'occupe de la sérialisation et de la désérialisation pour " +"toi. Il peut le faire parce qu'il s'attend à ce que tu renvoies des " +"paramètres sous forme de NumPy ndarray, et il sait comment les gérer. " +"Cela permet de travailler avec des bibliothèques d'apprentissage " +"automatique qui ont une bonne prise en charge de NumPy (la plupart " +"d'entre elles) en un clin d'œil." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:552 +msgid "" +"In terms of API, there's one major difference: all methods in Client take" +" exactly one argument (e.g., ``FitIns`` in ``Client.fit``) and return " +"exactly one value (e.g., ``FitRes`` in ``Client.fit``). The methods in " +"``NumPyClient`` on the other hand have multiple arguments (e.g., " +"``parameters`` and ``config`` in ``NumPyClient.fit``) and multiple return" +" values (e.g., ``parameters``, ``num_example``, and ``metrics`` in " +"``NumPyClient.fit``) if there are multiple things to handle. These " +"``*Ins`` and ``*Res`` objects in ``Client`` wrap all the individual " +"values you're used to from ``NumPyClient``." +msgstr "" +"In terms of API, there's one major difference: all methods in Client take" +" exactly one argument (e.g., ``FitIns`` in ``Client.fit``) and return " +"exactly one value (e.g., ``FitRes`` in ``Client.fit``). The methods in " +"``NumPyClient`` on the other hand have multiple arguments (e.g., " +"``parameters`` and ``config`` in ``NumPyClient.fit``) and multiple return" +" values (e.g., ``parameters``, ``num_example``, and ``metrics`` in " +"``NumPyClient.fit``) if there are multiple things to handle. These " +"``*Ins`` and ``*Res`` objects in ``Client`` wrap all the individual " +"values you're used to from ``NumPyClient``." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:565 +msgid "Step 3: Custom serialization" +msgstr "Étape 3 : Sérialisation personnalisée" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:567 +msgid "" +"Here we will explore how to implement custom serialization with a simple " +"example." +msgstr "" +"Nous allons ici explorer comment mettre en œuvre une sérialisation " +"personnalisée à l'aide d'un exemple simple." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:569 +msgid "" +"But first what is serialization? Serialization is just the process of " +"converting an object into raw bytes, and equally as important, " +"deserialization is the process of converting raw bytes back into an " +"object. This is very useful for network communication. Indeed, without " +"serialization, you could not just a Python object through the internet." +msgstr "" +"Mais d'abord, qu'est-ce que la sérialisation ? La sérialisation est " +"simplement le processus de conversion d'un objet en octets bruts, et tout" +" aussi important, la désérialisation est le processus de reconversion des" +" octets bruts en objet. Ceci est très utile pour la communication réseau." +" En effet, sans la sérialisation, tu ne pourrais pas faire passer un " +"objet Python par Internet." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:571 +msgid "" +"Federated Learning relies heavily on internet communication for training " +"by sending Python objects back and forth between the clients and the " +"server. This means that serialization is an essential part of Federated " +"Learning." +msgstr "" +"L'apprentissage fédéré s'appuie fortement sur la communication Internet " +"pour la formation en envoyant des objets Python dans les deux sens entre " +"les clients et le serveur, ce qui signifie que la sérialisation est un " +"élément essentiel de l'apprentissage fédéré." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:573 +msgid "" +"In the following section, we will write a basic example where instead of " +"sending a serialized version of our ``ndarray``\\ s containing our " +"parameters, we will first convert the ``ndarray`` into sparse matrices, " +"before sending them. This technique can be used to save bandwidth, as in " +"certain cases where the weights of a model are sparse (containing many 0 " +"entries), converting them to a sparse matrix can greatly improve their " +"bytesize." +msgstr "" +"Dans la section suivante, nous allons écrire un exemple de base où, au " +"lieu d'envoyer une version sérialisée de nos ``ndarray`` contenant nos " +"paramètres, nous allons d'abord convertir les ``ndarray`` en matrices " +"éparses, avant de les envoyer. Cette technique peut être utilisée pour " +"économiser de la bande passante, car dans certains cas où les poids d'un " +"modèle sont épars (contenant de nombreuses entrées 0), les convertir en " +"une matrice éparse peut grandement améliorer leur taille en octets." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:576 +msgid "Our custom serialization/deserialization functions" +msgstr "Nos fonctions de sérialisation/désérialisation personnalisées" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:578 +msgid "" +"This is where the real serialization/deserialization will happen, " +"especially in ``ndarray_to_sparse_bytes`` for serialization and " +"``sparse_bytes_to_ndarray`` for deserialization." +msgstr "" +"C'est là que la véritable sérialisation/désérialisation se produira, en " +"particulier dans ``ndarray_to_sparse_bytes`` pour la sérialisation et " +"``sparse_bytes_to_ndarray`` pour la désérialisation." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:580 +msgid "" +"Note that we imported the ``scipy.sparse`` library in order to convert " +"our arrays." +msgstr "" +"Notez que nous avons importé la bibliothèque ``scipy.sparse`` afin de " +"convertir nos tableaux." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:668 +msgid "Client-side" +msgstr "Côté client" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:670 +msgid "" +"To be able to serialize our ``ndarray``\\ s into sparse parameters, we " +"will just have to call our custom functions in our " +"``flwr.client.Client``." +msgstr "" +"Pour pouvoir sérialiser nos ``ndarray`` en paramètres sparse, il nous " +"suffira d'appeler nos fonctions personnalisées dans notre " +"``flwr.client.Client``." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:672 +msgid "" +"Indeed, in ``get_parameters`` we need to serialize the parameters we got " +"from our network using our custom ``ndarrays_to_sparse_parameters`` " +"defined above." +msgstr "" +"En effet, dans ``get_parameters`` nous devons sérialiser les paramètres " +"que nous avons obtenus de notre réseau en utilisant nos " +"``ndarrays_to_sparse_parameters`` personnalisés définis ci-dessus." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:674 +msgid "" +"In ``fit``, we first need to deserialize the parameters coming from the " +"server using our custom ``sparse_parameters_to_ndarrays`` and then we " +"need to serialize our local results with " +"``ndarrays_to_sparse_parameters``." +msgstr "" +"Dans ``fit``, nous devons d'abord désérialiser les paramètres provenant " +"du serveur en utilisant notre ``sparse_parameters_to_ndarrays`` " +"personnalisé, puis nous devons sérialiser nos résultats locaux avec " +"``ndarrays_to_sparse_parameters``." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:676 +msgid "" +"In ``evaluate``, we will only need to deserialize the global parameters " +"with our custom function." +msgstr "" +"Dans ``evaluate``, nous n'aurons besoin que de désérialiser les " +"paramètres globaux avec notre fonction personnalisée." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:781 +msgid "Server-side" +msgstr "Côté serveur" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:783 +msgid "" +"For this example, we will just use ``FedAvg`` as a strategy. To change " +"the serialization and deserialization here, we only need to reimplement " +"the ``evaluate`` and ``aggregate_fit`` functions of ``FedAvg``. The other" +" functions of the strategy will be inherited from the super class " +"``FedAvg``." +msgstr "" +"Pour cet exemple, nous utiliserons simplement ``FedAvg`` comme stratégie." +" Pour modifier la sérialisation et la désérialisation ici, il suffit de " +"réimplémenter les fonctions ``evaluate`` et ``aggregate_fit`` de " +"``FedAvg``. Les autres fonctions de la stratégie seront héritées de la " +"super-classe ``FedAvg``." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:785 +msgid "As you can see only one line as change in ``evaluate``:" +msgstr "Comme tu peux le voir, seule une ligne a été modifiée dans ``evaluate`` :" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:791 +msgid "" +"And for ``aggregate_fit``, we will first deserialize every result we " +"received:" +msgstr "" +"Et pour ``aggregate_fit``, nous allons d'abord désérialiser chaque " +"résultat que nous avons reçu :" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:800 +msgid "And then serialize the aggregated result:" +msgstr "Puis sérialise le résultat agrégé :" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:959 +msgid "We can now run our custom serialization example!" +msgstr "" +"Nous pouvons maintenant exécuter notre exemple de sérialisation " +"personnalisée !" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1000 +msgid "" +"In this part of the tutorial, we've seen how we can build clients by " +"subclassing either ``NumPyClient`` or ``Client``. ``NumPyClient`` is a " +"convenience abstraction that makes it easier to work with machine " +"learning libraries that have good NumPy interoperability. ``Client`` is a" +" more flexible abstraction that allows us to do things that are not " +"possible in ``NumPyClient``. In order to do so, it requires us to handle " +"parameter serialization and deserialization ourselves." +msgstr "" +"Dans cette partie du tutoriel, nous avons vu comment construire des " +"clients en sous-classant soit ``NumPyClient``, soit ``Client``. " +"``NumPyClient`` est une abstraction de commodité qui facilite le travail " +"avec les bibliothèques d'apprentissage automatique qui ont une bonne " +"interopérabilité NumPy. ``Client`` est une abstraction plus flexible qui " +"nous permet de faire des choses qui ne sont pas possibles dans " +"``NumPyClient``. Pour ce faire, elle nous oblige à gérer nous-mêmes la " +"sérialisation et la désérialisation des paramètres." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1018 +msgid "" +"This is the final part of the Flower tutorial (for now!), " +"congratulations! You're now well equipped to understand the rest of the " +"documentation. There are many topics we didn't cover in the tutorial, we " +"recommend the following resources:" +msgstr "" +"C'est la dernière partie du tutoriel Flower (pour l'instant !), " +"félicitations ! Tu es maintenant bien équipé pour comprendre le reste de " +"la documentation. Il y a de nombreux sujets que nous n'avons pas abordés " +"dans le tutoriel, nous te recommandons les ressources suivantes :" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1020 +msgid "`Read Flower Docs `__" +msgstr "`Lire les docs sur les fleurs `__" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1021 +#, fuzzy +msgid "`Check out Flower Code Examples `__" +msgstr "" +"`Check out Flower Code Examples " +"`__" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1022 +#, fuzzy +msgid "" +"`Use Flower Baselines for your research " +"`__" +msgstr "" +"`Utilise les lignes de base des fleurs pour ta recherche " +"`__" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1023 +#, fuzzy +msgid "" +"`Watch Flower AI Summit 2024 videos `__" +msgstr "" +"`Regardez les vidéos du Flower Summit 2022 `__" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:9 +msgid "Get started with Flower" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:11 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:11 +msgid "Welcome to the Flower federated learning tutorial!" +msgstr "Bienvenue au tutoriel sur l'apprentissage fédéré de la fleur !" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:13 +#, fuzzy +msgid "" +"In this notebook, we'll build a federated learning system using the " +"Flower framework, Flower Datasets and PyTorch. In part 1, we use PyTorch " +"for the model training pipeline and data loading. In part 2, we federate " +"the PyTorch project using Flower." +msgstr "" +"Dans ce carnet, nous allons construire un système d'apprentissage fédéré " +"en utilisant Flower et PyTorch. Dans la première partie, nous utilisons " +"PyTorch pour le pipeline d'entraînement des modèles et le chargement des " +"données. Dans la deuxième partie, nous continuons à fédérer le pipeline " +"basé sur PyTorch en utilisant Flower." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:18 +#, fuzzy +msgid "Let's get started! 🌼" +msgstr "Allons-y, déclarons-le !" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:32 +msgid "" +"Before we begin with any actual code, let's make sure that we have " +"everything we need." +msgstr "" +"Avant de commencer à coder, assurons-nous que nous disposons de tout ce " +"dont nous avons besoin." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:44 +#, fuzzy +msgid "Install dependencies" +msgstr "Installation des dépendances" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:46 +#, fuzzy +msgid "" +"Next, we install the necessary packages for PyTorch (``torch`` and " +"``torchvision``), Flower Datasets (``flwr-datasets``) and Flower " +"(``flwr``):" +msgstr "" +"Ensuite, nous installons les paquets nécessaires pour PyTorch (``torch`` " +"et ``torchvision``) et Flower (``flwr``) :" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:109 +#, fuzzy +msgid "" +"It is possible to switch to a runtime that has GPU acceleration enabled " +"(on Google Colab: ``Runtime > Change runtime type > Hardware accelerator:" +" GPU > Save``). Note, however, that Google Colab is not always able to " +"offer GPU acceleration. If you see an error related to GPU availability " +"in one of the following sections, consider switching back to CPU-based " +"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " +"has GPU acceleration enabled, you should see the output ``Training on " +"cuda``, otherwise it'll say ``Training on cpu``." +msgstr "" +"Il est possible de passer à un runtime dont l'accélération GPU est " +"activée (sur Google Colab : ``Runtime > Change runtime type > Hardware " +"acclerator : GPU > Save``). Note cependant que Google Colab n'est pas " +"toujours en mesure de proposer l'accélération GPU. Si tu vois une erreur " +"liée à la disponibilité du GPU dans l'une des sections suivantes, " +"envisage de repasser à une exécution basée sur le CPU en définissant " +"``DEVICE = torch.device(\"cpu\")``. Si le runtime a activé l'accélération" +" GPU, tu devrais voir apparaître le résultat ``Training on cuda``, sinon " +"il dira ``Training on cpu``." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:122 +#, fuzzy +msgid "Load the data" +msgstr "Chargement des données" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:124 +#, fuzzy +msgid "" +"Federated learning can be applied to many different types of tasks across" +" different domains. In this tutorial, we introduce federated learning by " +"training a simple convolutional neural network (CNN) on the popular " +"CIFAR-10 dataset. CIFAR-10 can be used to train image classifiers that " +"distinguish between images from ten different classes: 'airplane', " +"'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', and " +"'truck'." +msgstr "" +"L'apprentissage fédéré peut être appliqué à de nombreux types de tâches " +"dans différents domaines. Dans ce tutoriel, nous présentons " +"l'apprentissage fédéré en formant un simple réseau neuronal " +"convolutionnel (CNN) sur l'ensemble de données populaire CIFAR-10. " +"CIFAR-10 peut être utilisé pour former des classificateurs d'images qui " +"font la distinction entre les images de dix classes différentes :" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:135 +#, fuzzy +msgid "" +"We simulate having multiple datasets from multiple organizations (also " +"called the \"cross-silo\" setting in federated learning) by splitting the" +" original CIFAR-10 dataset into multiple partitions. Each partition will " +"represent the data from a single organization. We're doing this purely " +"for experimentation purposes, in the real world there's no need for data " +"splitting because each organization already has their own data (the data " +"is naturally partitioned)." +msgstr "" +"Nous simulons le fait d'avoir plusieurs ensembles de données provenant de" +" plusieurs organisations (également appelé le paramètre \"cross-silo\" " +"dans l'apprentissage fédéré) en divisant l'ensemble de données CIFAR-10 " +"original en plusieurs partitions. Chaque partition représentera les " +"données d'une seule organisation. Nous faisons cela purement à des fins " +"d'expérimentation, dans le monde réel, il n'y a pas besoin de diviser les" +" données parce que chaque organisation a déjà ses propres données (les " +"données sont donc naturellement partitionnées)." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:137 +#, fuzzy +msgid "" +"Each organization will act as a client in the federated learning system. " +"Having ten organizations participate in a federation means having ten " +"clients connected to the federated learning server." +msgstr "" +"Chaque organisation agira comme un client dans le système d'apprentissage" +" fédéré. Ainsi, le fait que dix organisations participent à une " +"fédération signifie que dix clients sont connectés au serveur " +"d'apprentissage fédéré :" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:148 +msgid "" +"We use the Flower Datasets library (``flwr-datasets``) to partition " +"CIFAR-10 into ten partitions using ``FederatedDataset``. We will create a" +" small training and test set for each of the ten organizations and wrap " +"each of these into a PyTorch ``DataLoader``:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:196 +#, fuzzy +msgid "" +"We now have a function that can return a training set and validation set " +"(``trainloader`` and ``valloader``) representing one dataset from one of " +"ten different organizations. Each ``trainloader``/``valloader`` pair " +"contains 4000 training examples and 1000 validation examples. There's " +"also a single ``testloader`` (we did not split the test set). Again, this" +" is only necessary for building research or educational systems, actual " +"federated learning systems have their data naturally distributed across " +"multiple partitions." +msgstr "" +"Nous avons maintenant une liste de dix ensembles de formation et dix " +"ensembles de validation (``trainloaders`` et ``valloaders``) représentant" +" les données de dix organisations différentes. Chaque paire " +"``trainloader`/``valloader`` contient 4500 exemples de formation et 500 " +"exemples de validation. Il y a également un seul ``testloader`` (nous " +"n'avons pas divisé l'ensemble de test). Encore une fois, cela n'est " +"nécessaire que pour construire des systèmes de recherche ou d'éducation, " +"les systèmes d'apprentissage fédérés actuels ont leurs données " +"naturellement distribuées à travers plusieurs partitions." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:199 +#, fuzzy +msgid "" +"Let's take a look at the first batch of images and labels in the first " +"training set (i.e., ``trainloader`` from ``partition_id=0``) before we " +"move on:" +msgstr "" +"Jetons un coup d'œil au premier lot d'images et d'étiquettes du premier " +"ensemble d'entraînement (c'est-à-dire ``trainloaders[0]``) avant de " +"poursuivre :" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:241 +#, fuzzy +msgid "" +"The output above shows a random batch of images from the ``trainloader`` " +"from the first of ten partitions. It also prints the labels associated " +"with each image (i.e., one of the ten possible labels we've seen above). " +"If you run the cell again, you should see another batch of images." +msgstr "" +"La sortie ci-dessus montre un lot aléatoire d'images provenant du premier" +" ``chargeur de formation`` de notre liste de dix ``chargeurs de " +"formation``. Elle imprime également les étiquettes associées à chaque " +"image (c'est-à-dire l'une des dix étiquettes possibles que nous avons " +"vues ci-dessus). Si tu exécutes à nouveau la cellule, tu devrais voir un " +"autre lot d'images." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:253 +msgid "Step 1: Centralized Training with PyTorch" +msgstr "Étape 1 : Formation centralisée avec PyTorch" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:264 +msgid "" +"Next, we're going to use PyTorch to define a simple convolutional neural " +"network. This introduction assumes basic familiarity with PyTorch, so it " +"doesn't cover the PyTorch-related aspects in full detail. If you want to " +"dive deeper into PyTorch, we recommend `DEEP LEARNING WITH PYTORCH: A 60 " +"MINUTE BLITZ " +"`__." +msgstr "" +"Ensuite, nous allons utiliser PyTorch pour définir un simple réseau " +"neuronal convolutif. Cette introduction suppose une familiarité de base " +"avec PyTorch, elle ne couvre donc pas en détail les aspects liés à " +"PyTorch. Si tu veux plonger plus profondément dans PyTorch, nous te " +"recommandons `DEEP LEARNING WITH PYTORCH : A 60 MINUTE BLITZ " +"`__." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:276 +#, fuzzy +msgid "Define the model" +msgstr "Définir le modèle" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:278 +msgid "" +"We use the simple CNN described in the `PyTorch tutorial " +"`__:" +msgstr "" +"Nous utilisons le CNN simple décrit dans le tutoriel `PyTorch " +"`__ :" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:315 +msgid "Let's continue with the usual training and test functions:" +msgstr "Poursuivons avec les fonctions habituelles de formation et de test :" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:375 +#, fuzzy +msgid "Train the model" +msgstr "Entraîne le modèle" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:377 +#, fuzzy +msgid "" +"We now have all the basic building blocks we need: a dataset, a model, a " +"training function, and a test function. Let's put them together to train " +"the model on the dataset of one of our organizations " +"(``partition_id=0``). This simulates the reality of most machine learning" +" projects today: each organization has their own data and trains models " +"only on this internal data:" +msgstr "" +"Nous avons maintenant tous les éléments de base dont nous avons besoin : " +"un ensemble de données, un modèle, une fonction d'entraînement et une " +"fonction de test. Assemblons-les pour entraîner le modèle sur l'ensemble " +"de données de l'une de nos organisations (``trainloaders[0]``). Cela " +"simule la réalité de la plupart des projets d'apprentissage automatique " +"aujourd'hui : chaque organisation possède ses propres données et entraîne" +" les modèles uniquement sur ces données internes :" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:406 +#, fuzzy +msgid "" +"Training the simple CNN on our CIFAR-10 split for 5 epochs should result " +"in a test set accuracy of about 41%, which is not good, but at the same " +"time, it doesn't really matter for the purposes of this tutorial. The " +"intent was just to show a simple centralized training pipeline that sets " +"the stage for what comes next - federated learning!" +msgstr "" +"L'entraînement du CNN simple sur notre fractionnement CIFAR-10 pendant 5 " +"époques devrait se traduire par une précision de l'ensemble de test " +"d'environ 41 %, ce qui n'est pas bon, mais en même temps, cela n'a pas " +"vraiment d'importance pour les besoins de ce tutoriel. L'intention était " +"juste de montrer un pipeline d'entraînement centralisé simpliste qui " +"prépare le terrain pour ce qui vient ensuite - l'apprentissage fédéré !" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:418 +msgid "Step 2: Federated Learning with Flower" +msgstr "Étape 2 : Apprentissage fédéré avec Flower" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:420 +msgid "" +"Step 1 demonstrated a simple centralized training pipeline. All data was " +"in one place (i.e., a single ``trainloader`` and a single ``valloader``)." +" Next, we'll simulate a situation where we have multiple datasets in " +"multiple organizations and where we train a model over these " +"organizations using federated learning." +msgstr "" +"L'étape 1 a montré un simple pipeline de formation centralisé. Toutes les" +" données étaient au même endroit (c'est-à-dire un seul ``trainloader`` et" +" un seul ``valloader``). Ensuite, nous allons simuler une situation où " +"nous avons plusieurs ensembles de données dans plusieurs organisations et" +" où nous formons un modèle sur ces organisations à l'aide de " +"l'apprentissage fédéré." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:432 +#, fuzzy +msgid "Update model parameters" +msgstr "Mise à jour des paramètres du modèle" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:434 +#, fuzzy +msgid "" +"In federated learning, the server sends global model parameters to the " +"client, and the client updates the local model with parameters received " +"from the server. It then trains the model on the local data (which " +"changes the model parameters locally) and sends the updated/changed model" +" parameters back to the server (or, alternatively, it sends just the " +"gradients back to the server, not the full model parameters)." +msgstr "" +"Dans l'apprentissage fédéré, le serveur envoie les paramètres du modèle " +"global au client, et le client met à jour le modèle local avec les " +"paramètres reçus du serveur. Il entraîne ensuite le modèle sur les " +"données locales (ce qui modifie les paramètres du modèle localement) et " +"renvoie les paramètres du modèle mis à jour/changés au serveur (ou, " +"alternativement, il renvoie seulement les gradients au serveur, et non " +"pas les paramètres complets du modèle)." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:436 +msgid "" +"We need two helper functions to update the local model with parameters " +"received from the server and to get the updated model parameters from the" +" local model: ``set_parameters`` and ``get_parameters``. The following " +"two functions do just that for the PyTorch model above." +msgstr "" +"Nous avons besoin de deux fonctions d'aide pour mettre à jour le modèle " +"local avec les paramètres reçus du serveur et pour obtenir les paramètres" +" mis à jour du modèle local : ``set_parameters`` et ``get_parameters``. " +"Les deux fonctions suivantes font exactement cela pour le modèle PyTorch " +"ci-dessus." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:438 +#, fuzzy +msgid "" +"The details of how this works are not really important here (feel free to" +" consult the PyTorch documentation if you want to learn more). In " +"essence, we use ``state_dict`` to access PyTorch model parameter tensors." +" The parameter tensors are then converted to/from a list of NumPy " +"ndarray's (which the Flower ``NumPyClient`` knows how to " +"serialize/deserialize):" +msgstr "" +"Les détails de ce fonctionnement ne sont pas vraiment importants ici " +"(n'hésite pas à consulter la documentation PyTorch si tu veux en savoir " +"plus). En substance, nous utilisons ``state_dict`` pour accéder aux " +"tenseurs de paramètres du modèle PyTorch. Les tenseurs de paramètres sont" +" ensuite convertis en/depuis une liste de ndarray NumPy (que Flower sait " +"sérialiser/désérialiser) :" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:466 +#, fuzzy +msgid "Define the Flower ClientApp" +msgstr "Client de Flower" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:468 +#, fuzzy +msgid "" +"With that out of the way, let's move on to the interesting part. " +"Federated learning systems consist of a server and multiple clients. In " +"Flower, we create a ``ServerApp`` and a ``ClientApp`` to run the server-" +"side and client-side code, respectively." +msgstr "" +"Ceci étant dit, passons à la partie intéressante. Les systèmes " +"d'apprentissage fédérés se composent d'un serveur et de plusieurs " +"clients. Dans Flower, nous créons des clients en mettant en œuvre des " +"sous-classes de ``flwr.client.Client`` ou de ``flwr.client.NumPyClient``." +" Nous utilisons ``NumPyClient`` dans ce tutoriel parce qu'il est plus " +"facile à mettre en œuvre et qu'il nous oblige à rédiger moins de modèles " +"de chaudière." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:470 +#, fuzzy +msgid "" +"The first step toward creating a ``ClientApp`` is to implement a " +"subclasses of ``flwr.client.Client`` or ``flwr.client.NumPyClient``. We " +"use ``NumPyClient`` in this tutorial because it is easier to implement " +"and requires us to write less boilerplate. To implement ``NumPyClient``, " +"we create a subclass that implements the three methods " +"``get_parameters``, ``fit``, and ``evaluate``:" +msgstr "" +"Ceci étant dit, passons à la partie intéressante. Les systèmes " +"d'apprentissage fédérés se composent d'un serveur et de plusieurs " +"clients. Dans Flower, nous créons des clients en mettant en œuvre des " +"sous-classes de ``flwr.client.Client`` ou de ``flwr.client.NumPyClient``." +" Nous utilisons ``NumPyClient`` dans ce tutoriel parce qu'il est plus " +"facile à mettre en œuvre et qu'il nous oblige à rédiger moins de modèles " +"de chaudière." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:472 +msgid "``get_parameters``: Return the current local model parameters" +msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:473 +#, fuzzy +msgid "" +"``fit``: Receive model parameters from the server, train the model on the" +" local data, and return the updated model parameters to the server" +msgstr "" +"``fit`` : reçoit les paramètres du modèle du serveur, entraîne les " +"paramètres du modèle sur les données locales et renvoie les paramètres du" +" modèle (mis à jour) au serveur" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:474 +#, fuzzy +msgid "" +"``evaluate``: Receive model parameters from the server, evaluate the " +"model on the local data, and return the evaluation result to the server" +msgstr "" +"``evaluate`` : reçoit les paramètres du modèle du serveur, évalue les " +"paramètres du modèle sur les données locales et renvoie le résultat de " +"l'évaluation au serveur" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:476 +msgid "" +"We mentioned that our clients will use the previously defined PyTorch " +"components for model training and evaluation. Let's see a simple Flower " +"client implementation that brings everything together:" +msgstr "" +"Nous avons mentionné que nos clients utiliseront les composants PyTorch " +"définis précédemment pour la formation et l'évaluation des modèles. " +"Voyons une simple mise en œuvre du client Flower qui réunit tout cela :" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:513 +#, fuzzy +msgid "" +"Our class ``FlowerClient`` defines how local training/evaluation will be " +"performed and allows Flower to call the local training/evaluation through" +" ``fit`` and ``evaluate``. Each instance of ``FlowerClient`` represents a" +" *single client* in our federated learning system. Federated learning " +"systems have multiple clients (otherwise, there's not much to federate), " +"so each client will be represented by its own instance of " +"``FlowerClient``. If we have, for example, three clients in our workload," +" then we'd have three instances of ``FlowerClient`` (one on each of the " +"machines we'd start the client on). Flower calls ``FlowerClient.fit`` on " +"the respective instance when the server selects a particular client for " +"training (and ``FlowerClient.evaluate`` for evaluation)." +msgstr "" +"Our class ``FlowerClient`` defines how local training/evaluation will be " +"performed and allows Flower to call the local training/evaluation through" +" ``fit`` and ``evaluate``. Each instance of ``FlowerClient`` represents a" +" *single client* in our federated learning system. Federated learning " +"systems have multiple clients (otherwise, there's not much to federate), " +"so each client will be represented by its own instance of " +"``FlowerClient``. If we have, for example, three clients in our workload," +" then we'd have three instances of ``FlowerClient``. Flower calls " +"``FlowerClient.fit`` on the respective instance when the server selects a" +" particular client for training (and ``FlowerClient.evaluate`` for " +"evaluation)." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:516 +#, fuzzy +msgid "" +"In this notebook, we want to simulate a federated learning system with 10" +" clients *on a single machine*. This means that the server and all 10 " +"clients will live on a single machine and share resources such as CPU, " +"GPU, and memory. Having 10 clients would mean having 10 instances of " +"``FlowerClient`` in memory. Doing this on a single machine can quickly " +"exhaust the available memory resources, even if only a subset of these " +"clients participates in a single round of federated learning." +msgstr "" +"Dans ce carnet, nous voulons simuler un système d'apprentissage fédéré " +"avec 10 clients sur une seule machine. Cela signifie que le serveur et " +"les 10 clients vivront sur une seule machine et partageront des " +"ressources telles que le CPU, le GPU et la mémoire. Avoir 10 clients " +"signifierait avoir 10 instances de ``FlowerClient`` en mémoire. Faire " +"cela sur une seule machine peut rapidement épuiser les ressources mémoire" +" disponibles, même si seulement un sous-ensemble de ces clients participe" +" à un seul tour d'apprentissage fédéré." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:518 +#, fuzzy +msgid "" +"In addition to the regular capabilities where server and clients run on " +"multiple machines, Flower, therefore, provides special simulation " +"capabilities that create ``FlowerClient`` instances only when they are " +"actually necessary for training or evaluation. To enable the Flower " +"framework to create clients when necessary, we need to implement a " +"function that creates a ``FlowerClient`` instance on demand. We typically" +" call this function ``client_fn``. Flower calls ``client_fn`` whenever it" +" needs an instance of one particular client to call ``fit`` or " +"``evaluate`` (those instances are usually discarded after use, so they " +"should not keep any local state). In federated learning experiments using" +" Flower, clients are identified by a partition ID, or ``partition-id``. " +"This ``partition-id`` is used to load different local data partitions for" +" different clients, as can be seen below. The value of ``partition-id`` " +"is retrieved from the ``node_config`` dictionary in the ``Context`` " +"object, which holds the information that persists throughout each " +"training round." +msgstr "" +"In addition to the regular capabilities where server and clients run on " +"multiple machines, Flower, therefore, provides special simulation " +"capabilities that create ``FlowerClient`` instances only when they are " +"actually necessary for training or evaluation. To enable the Flower " +"framework to create clients when necessary, we need to implement a " +"function called ``client_fn`` that creates a ``FlowerClient`` instance on" +" demand. Flower calls ``client_fn`` whenever it needs an instance of one " +"particular client to call ``fit`` or ``evaluate`` (those instances are " +"usually discarded after use, so they should not keep any local state). " +"Clients are identified by a client ID, or short ``cid``. The ``cid`` can " +"be used, for example, to load different local data partitions for " +"different clients, as can be seen below:" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:522 +#, fuzzy +msgid "" +"With this, we have the class ``FlowerClient`` which defines client-side " +"training/evaluation and ``client_fn`` which allows Flower to create " +"``FlowerClient`` instances whenever it needs to call ``fit`` or " +"``evaluate`` on one particular client. Last, but definitely not least, we" +" create an instance of ``ClientApp`` and pass it the ``client_fn``. " +"``ClientApp`` is the entrypoint that a running Flower client uses to call" +" your code (as defined in, for example, ``FlowerClient.fit``)." +msgstr "" +"Nous avons maintenant la classe ``FlowerClient`` qui définit " +"l'entraînement/évaluation côté client et ``client_fn`` qui permet à " +"Flower de créer des instances de ``FlowerClient`` chaque fois qu'il a " +"besoin d'appeler ``fit`` ou ``evaluate`` sur un client particulier. La " +"dernière étape consiste à démarrer la simulation réelle en utilisant " +"``flwr.simulation.start_simulation``." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:563 +#, fuzzy +msgid "Define the Flower ServerApp" +msgstr "Serveur de Flower" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:565 +#, fuzzy +msgid "" +"On the server side, we need to configure a strategy which encapsulates " +"the federated learning approach/algorithm, for example, *Federated " +"Averaging* (FedAvg). Flower has a number of built-in strategies, but we " +"can also use our own strategy implementations to customize nearly all " +"aspects of the federated learning approach. For this example, we use the " +"built-in ``FedAvg`` implementation and customize it using a few basic " +"parameters:" +msgstr "" +"Flower dispose d'un certain nombre de stratégies intégrées, mais nous " +"pouvons également utiliser nos propres implémentations de stratégies pour" +" personnaliser presque tous les aspects de l'approche de l'apprentissage " +"fédéré. Pour cet exemple, nous utilisons l'implémentation intégrée " +"``FedAvg`` et nous la personnalisons en utilisant quelques paramètres de " +"base. La dernière étape est l'appel à ``start_simulation`` qui - tu l'as " +"deviné - démarre la simulation :" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:592 +msgid "" +"Similar to ``ClientApp``, we create a ``ServerApp`` using a utility " +"function ``server_fn``. In ``server_fn``, we pass an instance of " +"``ServerConfig`` for defining the number of federated learning rounds " +"(``num_rounds``) and we also pass the previously created ``strategy``. " +"The ``server_fn`` returns a ``ServerAppComponents`` object containing the" +" settings that define the ``ServerApp`` behaviour. ``ServerApp`` is the " +"entrypoint that Flower uses to call all your server-side code (for " +"example, the strategy)." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:629 +#, fuzzy +msgid "Run the training" +msgstr "Commencer la formation" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:631 +msgid "" +"In simulation, we often want to control the amount of resources each " +"client can use. In the next cell, we specify a ``backend_config`` " +"dictionary with the ``client_resources`` key (required) for defining the " +"amount of CPU and GPU resources each client can access." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:659 +msgid "" +"The last step is the actual call to ``run_simulation`` which - you " +"guessed it - runs the simulation. ``run_simulation`` accepts a number of " +"arguments: - ``server_app`` and ``client_app``: the previously created " +"``ServerApp`` and ``ClientApp`` objects, respectively - " +"``num_supernodes``: the number of ``SuperNodes`` to simulate which equals" +" the number of clients for Flower simulation - ``backend_config``: the " +"resource allocation used in this simulation" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:686 +msgid "Behind the scenes" +msgstr "Dans les coulisses" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:688 +msgid "So how does this work? How does Flower execute this simulation?" +msgstr "" +"Alors, comment cela fonctionne-t-il ? Comment Flower exécute-t-il cette " +"simulation ?" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:690 +#, fuzzy, python-format +msgid "" +"When we call ``run_simulation``, we tell Flower that there are 10 clients" +" (``num_supernodes=10``, where 1 ``SuperNode`` launches 1 ``ClientApp``)." +" Flower then goes ahead an asks the ``ServerApp`` to issue an " +"instructions to those nodes using the ``FedAvg`` strategy. ``FedAvg`` " +"knows that it should select 100% of the available clients " +"(``fraction_fit=1.0``), so it goes ahead and selects 10 random clients " +"(i.e., 100% of 10)." +msgstr "" +"Lorsque nous appelons ``start_simulation``, nous disons à Flower qu'il y " +"a 10 clients (``num_clients=10``). Flower demande alors à la stratégie " +"``FedAvg`` de sélectionner des clients. ``FedAvg` sait qu'il doit " +"sélectionner 100% des clients disponibles (``fraction_fit=1.0``), alors " +"il choisit 10 clients au hasard (c'est à dire 100% de 10)." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:692 +#, fuzzy +msgid "" +"Flower then asks the selected 10 clients to train the model. Each of the " +"10 ``ClientApp`` instances receives a message, which causes it to call " +"``client_fn`` to create an instance of ``FlowerClient``. It then calls " +"``.fit()`` on each the ``FlowerClient`` instances and returns the " +"resulting model parameter updates to the ``ServerApp``. When the " +"``ServerApp`` receives the model parameter updates from the clients, it " +"hands those updates over to the strategy (*FedAvg*) for aggregation. The " +"strategy aggregates those updates and returns the new global model, which" +" then gets used in the next round of federated learning." +msgstr "" +"Flower demande ensuite aux 10 clients sélectionnés d'entraîner le modèle." +" Lorsque le serveur reçoit les mises à jour des paramètres du modèle de " +"la part des clients, il les transmet à la stratégie (*FedAvg*) pour " +"qu'elle les agrège. La stratégie agrège ces mises à jour et renvoie le " +"nouveau modèle global, qui est ensuite utilisé dans le prochain cycle " +"d'apprentissage fédéré." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:705 +msgid "Where's the accuracy?" +msgstr "Où est la précision ?" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:707 +msgid "" +"You may have noticed that all metrics except for ``losses_distributed`` " +"are empty. Where did the ``{\"accuracy\": float(accuracy)}`` go?" +msgstr "" +"Tu as peut-être remarqué que toutes les mesures, à l'exception de " +"``pertes_distribuées``, sont vides. Où est passée la ``{\"précision\" : " +"float(précision)}`` ?" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:709 +msgid "" +"Flower can automatically aggregate losses returned by individual clients," +" but it cannot do the same for metrics in the generic metrics dictionary " +"(the one with the ``accuracy`` key). Metrics dictionaries can contain " +"very different kinds of metrics and even key/value pairs that are not " +"metrics at all, so the framework does not (and can not) know how to " +"handle these automatically." +msgstr "" +"Flower peut automatiquement agréger les pertes renvoyées par les clients " +"individuels, mais il ne peut pas faire la même chose pour les mesures " +"dans le dictionnaire de mesures générique (celui avec la clé " +"``accuracy``). Les dictionnaires de mesures peuvent contenir des types de" +" mesures très différents et même des paires clé/valeur qui ne sont pas " +"des mesures du tout, donc le cadre ne sait pas (et ne peut pas) savoir " +"comment les gérer automatiquement." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:711 +msgid "" +"As users, we need to tell the framework how to handle/aggregate these " +"custom metrics, and we do so by passing metric aggregation functions to " +"the strategy. The strategy will then call these functions whenever it " +"receives fit or evaluate metrics from clients. The two possible functions" +" are ``fit_metrics_aggregation_fn`` and " +"``evaluate_metrics_aggregation_fn``." +msgstr "" +"En tant qu'utilisateurs, nous devons indiquer au framework comment " +"gérer/agréger ces métriques personnalisées, et nous le faisons en passant" +" des fonctions d'agrégation de métriques à la stratégie. La stratégie " +"appellera alors ces fonctions chaque fois qu'elle recevra des métriques " +"d'ajustement ou d'évaluation de la part des clients. Les deux fonctions " +"possibles sont ``fit_metrics_aggregation_fn`` et " +"``evaluate_metrics_aggregation_fn``." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:713 +msgid "" +"Let's create a simple weighted averaging function to aggregate the " +"``accuracy`` metric we return from ``evaluate``:" +msgstr "" +"Créons une simple fonction de calcul de la moyenne pondérée pour agréger " +"la mesure de \"précision\" que nous renvoie ``evaluate`` :" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:781 +msgid "" +"We now have a full system that performs federated training and federated " +"evaluation. It uses the ``weighted_average`` function to aggregate custom" +" evaluation metrics and calculates a single ``accuracy`` metric across " +"all clients on the server side." +msgstr "" +"Nous avons maintenant un système complet qui effectue la formation " +"fédérée et l'évaluation fédérée. Il utilise la fonction ``moyenne " +"pondérée`` pour agréger les mesures d'évaluation personnalisées et " +"calcule une seule mesure de ``précision`` pour tous les clients du côté " +"du serveur." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:783 +msgid "" +"The other two categories of metrics (``losses_centralized`` and " +"``metrics_centralized``) are still empty because they only apply when " +"centralized evaluation is being used. Part two of the Flower tutorial " +"will cover centralized evaluation." +msgstr "" +"Les deux autres catégories de mesures (``pertes_centralisées`` et " +"``métriques_centralisées``) sont toujours vides car elles ne s'appliquent" +" que lorsque l'évaluation centralisée est utilisée. La deuxième partie du" +" tutoriel sur les fleurs couvrira l'évaluation centralisée." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:795 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:351 +msgid "Final remarks" +msgstr "Remarques finales" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:797 +msgid "" +"Congratulations, you just trained a convolutional neural network, " +"federated over 10 clients! With that, you understand the basics of " +"federated learning with Flower. The same approach you've seen can be used" +" with other machine learning frameworks (not just PyTorch) and tasks (not" +" just CIFAR-10 images classification), for example NLP with Hugging Face " +"Transformers or speech with SpeechBrain." +msgstr "" +"Félicitations, tu viens d'entraîner un réseau neuronal convolutif, fédéré" +" sur 10 clients ! Avec ça, tu comprends les bases de l'apprentissage " +"fédéré avec Flower. La même approche que tu as vue peut être utilisée " +"avec d'autres cadres d'apprentissage automatique (pas seulement PyTorch) " +"et d'autres tâches (pas seulement la classification des images CIFAR-10)," +" par exemple le NLP avec Hugging Face Transformers ou la parole avec " +"SpeechBrain." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:799 +msgid "" +"In the next notebook, we're going to cover some more advanced concepts. " +"Want to customize your strategy? Initialize parameters on the server " +"side? Or evaluate the aggregated model on the server side? We'll cover " +"all this and more in the next tutorial." +msgstr "" +"Dans le prochain cahier, nous allons aborder des concepts plus avancés. " +"Tu veux personnaliser ta stratégie ? Initialiser des paramètres côté " +"serveur ? Ou évaluer le modèle agrégé côté serveur ? Nous aborderons tout" +" cela et bien plus encore dans le prochain tutoriel." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:817 +#, fuzzy +msgid "" +"The `Flower Federated Learning Tutorial - Part 2 " +"`__ goes into more depth about strategies and all " +"the advanced things you can build with them." +msgstr "" +"Le `Tutoriel d'apprentissage fédéré Flower - Partie 2 " +"`__ va plus en profondeur sur les stratégies et toutes les " +"choses avancées que tu peux construire avec elles." + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:9 +#, fuzzy +msgid "Use a federated learning strategy" +msgstr "Stratégie de moyenne fédérée." + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:11 +#, fuzzy +msgid "" +"Welcome to the next part of the federated learning tutorial. In previous " +"parts of this tutorial, we introduced federated learning with PyTorch and" +" Flower (`part 1 `__)." +msgstr "" +"Bienvenue dans la prochaine partie du tutoriel sur l'apprentissage " +"fédéré. Dans les parties précédentes de ce tutoriel, nous avons présenté " +"l'apprentissage fédéré avec PyTorch et Flower (`partie 1 " +"`__)." + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:13 +#, fuzzy +msgid "" +"In this notebook, we'll begin to customize the federated learning system " +"we built in the introductory notebook again, using the Flower framework, " +"Flower Datasets, and PyTorch." +msgstr "" +"Dans ce carnet, nous allons commencer à personnaliser le système " +"d'apprentissage fédéré que nous avons construit dans le carnet " +"d'introduction (toujours en utilisant `Flower `__ et " +"`PyTorch `__)." + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:18 +#, fuzzy +msgid "Let's move beyond FedAvg with Flower strategies! 🌼" +msgstr "Dépassons FedAvg avec les stratégies florales !" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:121 +#, fuzzy +msgid "" +"Let's now load the CIFAR-10 training and test set, partition them into " +"ten smaller datasets (each split into training and validation set), and " +"wrap everything in their own ``DataLoader``. We introduce a new parameter" +" ``num_partitions`` which allows us to call ``load_datasets`` with " +"different numbers of partitions." +msgstr "" +"Chargeons maintenant les ensembles d'entraînement et de test CIFAR-10, " +"divisons-les en dix ensembles de données plus petits (chacun divisé en " +"ensemble d'entraînement et de validation), et enveloppons le tout dans " +"leur propre ``DataLoader``. Nous introduisons un nouveau paramètre " +"``num_clients`` qui nous permet d'appeler ``load_datasets`` avec " +"différents nombres de clients." + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:321 +msgid "Strategy customization" +msgstr "Personnalisation de la stratégie" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:323 +msgid "" +"So far, everything should look familiar if you've worked through the " +"introductory notebook. With that, we're ready to introduce a number of " +"new features." +msgstr "" +"Jusqu'à présent, tout devrait te sembler familier si tu as travaillé sur " +"le cahier d'introduction. Avec cela, nous sommes prêts à présenter un " +"certain nombre de nouvelles fonctionnalités." + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:335 +msgid "Server-side parameter **initialization**" +msgstr "Paramètres côté serveur **initialisation**" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:337 +#, fuzzy +msgid "" +"Flower, by default, initializes the global model by asking one random " +"client for the initial parameters. In many cases, we want more control " +"over parameter initialization though. Flower therefore allows you to " +"directly pass the initial parameters to the Strategy. We create an " +"instance of ``Net()`` and get the paramaters as follows:" +msgstr "" +"Flower, par défaut, initialise le modèle global en demandant à un client " +"aléatoire les paramètres initiaux. Dans de nombreux cas, nous voulons " +"cependant avoir plus de contrôle sur l'initialisation des paramètres. " +"Flower te permet donc de passer directement les paramètres initiaux à la " +"Stratégie :" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:358 +msgid "" +"Next, we create a ``server_fn`` that returns the components needed for " +"the server. Within ``server_fn``, we create a Strategy that uses the " +"initial parameters." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:393 +#, fuzzy +msgid "" +"Passing ``initial_parameters`` to the ``FedAvg`` strategy prevents Flower" +" from asking one of the clients for the initial parameters. In " +"``server_fn``, we pass this new ``strategy`` and a ``ServerConfig`` for " +"defining the number of federated learning rounds (``num_rounds``)." +msgstr "" +"Le fait de passer ``initial_parameters`` à la stratégie ``FedAvg`` " +"empêche Flower de demander les paramètres initiaux à l'un des clients. Si" +" nous regardons de près, nous pouvons voir que les journaux ne montrent " +"aucun appel à la méthode ``FlowerClient.get_parameters``." + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:395 +msgid "" +"Similar to the ``ClientApp``, we now create the ``ServerApp`` using the " +"``server_fn``:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:416 +msgid "" +"Last but not least, we specify the resources for each client and run the " +"simulation." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:448 +#, fuzzy +msgid "" +"If we look closely, we can see that the logs do not show any calls to the" +" ``FlowerClient.get_parameters`` method." +msgstr "" +"Le fait de passer ``initial_parameters`` à la stratégie ``FedAvg`` " +"empêche Flower de demander les paramètres initiaux à l'un des clients. Si" +" nous regardons de près, nous pouvons voir que les journaux ne montrent " +"aucun appel à la méthode ``FlowerClient.get_parameters``." + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:460 +msgid "Starting with a customized strategy" +msgstr "Commencer par une stratégie personnalisée" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:462 +#, fuzzy +msgid "" +"We've seen the function ``run_simulation`` before. It accepts a number of" +" arguments, amongst them the ``server_app`` which wraps around the " +"strategy and number of training rounds, ``client_app`` which wraps around" +" the ``client_fn`` used to create ``FlowerClient`` instances, and the " +"number of clients to simulate which equals ``num_supernodes``." +msgstr "" +"Elle accepte un certain nombre d'arguments, parmi lesquels le " +"``client_fn`` utilisé pour créer les instances de ``FlowerClient``, le " +"nombre de clients à simuler ``num_clients``, le nombre de rounds " +"``num_rounds``, et la stratégie." + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:464 +msgid "" +"The strategy encapsulates the federated learning approach/algorithm, for " +"example, ``FedAvg`` or ``FedAdagrad``. Let's try to use a different " +"strategy this time:" +msgstr "" +"La stratégie englobe l'approche/l'algorithme d'apprentissage fédéré, par " +"exemple, ``FedAvg`` ou ``FedAdagrad``. Essayons d'utiliser une stratégie " +"différente cette fois-ci :" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:509 +msgid "Server-side parameter **evaluation**" +msgstr "Paramètre côté serveur **évaluation**" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:511 +msgid "" +"Flower can evaluate the aggregated model on the server-side or on the " +"client-side. Client-side and server-side evaluation are similar in some " +"ways, but different in others." +msgstr "" +"Flower peut évaluer le modèle agrégé côté serveur ou côté client. Les " +"évaluations côté client et côté serveur sont similaires à certains " +"égards, mais différentes à d'autres." + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:513 +msgid "" +"**Centralized Evaluation** (or *server-side evaluation*) is conceptually " +"simple: it works the same way that evaluation in centralized machine " +"learning does. If there is a server-side dataset that can be used for " +"evaluation purposes, then that's great. We can evaluate the newly " +"aggregated model after each round of training without having to send the " +"model to clients. We're also fortunate in the sense that our entire " +"evaluation dataset is available at all times." +msgstr "" +"**L'évaluation centralisée** (ou *évaluation côté serveur*) est " +"conceptuellement simple : elle fonctionne de la même manière que " +"l'évaluation dans l'apprentissage automatique centralisé. S'il existe un " +"ensemble de données côté serveur qui peut être utilisé à des fins " +"d'évaluation, alors c'est parfait. Nous pouvons évaluer le modèle " +"nouvellement agrégé après chaque cycle de formation sans avoir à envoyer " +"le modèle aux clients. Nous avons également la chance que l'ensemble de " +"notre ensemble de données d'évaluation soit disponible à tout moment." + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:515 +msgid "" +"**Federated Evaluation** (or *client-side evaluation*) is more complex, " +"but also more powerful: it doesn't require a centralized dataset and " +"allows us to evaluate models over a larger set of data, which often " +"yields more realistic evaluation results. In fact, many scenarios require" +" us to use **Federated Evaluation** if we want to get representative " +"evaluation results at all. But this power comes at a cost: once we start " +"to evaluate on the client side, we should be aware that our evaluation " +"dataset can change over consecutive rounds of learning if those clients " +"are not always available. Moreover, the dataset held by each client can " +"also change over consecutive rounds. This can lead to evaluation results " +"that are not stable, so even if we would not change the model, we'd see " +"our evaluation results fluctuate over consecutive rounds." +msgstr "" +"**L'évaluation fédérée** (ou évaluation côté client) est plus complexe, " +"mais aussi plus puissante : elle ne nécessite pas d'ensemble de données " +"centralisé et nous permet d'évaluer les modèles sur un plus grand " +"ensemble de données, ce qui donne souvent des résultats d'évaluation plus" +" réalistes. En fait, de nombreux scénarios exigent que nous utilisions " +"l'évaluation fédérée** si nous voulons obtenir des résultats d'évaluation" +" représentatifs. Mais cette puissance a un coût : une fois que nous " +"commençons à évaluer côté client, nous devons savoir que notre ensemble " +"de données d'évaluation peut changer au cours des cycles d'apprentissage " +"consécutifs si ces clients ne sont pas toujours disponibles. De plus, " +"l'ensemble de données détenu par chaque client peut également changer au " +"cours des cycles consécutifs. Cela peut conduire à des résultats " +"d'évaluation qui ne sont pas stables, donc même si nous ne changions pas " +"le modèle, nous verrions nos résultats d'évaluation fluctuer au cours des" +" cycles consécutifs." + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:518 +msgid "" +"We've seen how federated evaluation works on the client side (i.e., by " +"implementing the ``evaluate`` method in ``FlowerClient``). Now let's see " +"how we can evaluate aggregated model parameters on the server-side:" +msgstr "" +"Nous avons vu comment l'évaluation fédérée fonctionne du côté client " +"(c'est-à-dire en implémentant la méthode ``evaluate`` dans " +"``FlowerClient``). Voyons maintenant comment nous pouvons évaluer les " +"paramètres du modèle agrégé du côté serveur :" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:549 +msgid "" +"We create a ``FedAvg`` strategy and pass ``evaluate_fn`` to it. Then, we " +"create a ``ServerApp`` that uses this strategy." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:586 +#, fuzzy +msgid "Finally, we run the simulation." +msgstr "Simulation de moniteur" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:613 +msgid "Sending/receiving arbitrary values to/from clients" +msgstr "Envoi/réception de valeurs arbitraires vers/depuis les clients" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:615 +#, fuzzy +msgid "" +"In some situations, we want to configure client-side execution (training," +" evaluation) from the server-side. One example for that is the server " +"asking the clients to train for a certain number of local epochs. Flower " +"provides a way to send configuration values from the server to the " +"clients using a dictionary. Let's look at an example where the clients " +"receive values from the server through the ``config`` parameter in " +"``fit`` (``config`` is also available in ``evaluate``). The ``fit`` " +"method receives the configuration dictionary through the ``config`` " +"parameter and can then read values from this dictionary. In this example," +" it reads ``server_round`` and ``local_epochs`` and uses those values to " +"improve the logging and configure the number of local training epochs:" +msgstr "" +"In some situations, we want to configure client-side execution (training," +" evaluation) from the server-side. One example for that is the server " +"asking the clients to train for a certain number of local epochs. Flower " +"provides a way to send configuration values from the server to the " +"clients using a dictionary. Let's look at an example where the clients " +"receive values from the server through the ``config`` parameter in " +"``fit`` (``config`` is also available in ``evaluate``). The ``fit`` " +"method receives the configuration dictionary through the ``config`` " +"parameter and can then read values from this dictionary. In this example," +" it reads ``server_round`` and ``local_epochs`` and uses those values to " +"improve the logging and configure the number of local training epochs:" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:674 +msgid "" +"So how can we send this config dictionary from server to clients? The " +"built-in Flower Strategies provide way to do this, and it works similarly" +" to the way server-side evaluation works. We provide a function to the " +"strategy, and the strategy calls this function for every round of " +"federated learning:" +msgstr "" +"Comment pouvons-nous donc envoyer ce dictionnaire de configuration du " +"serveur aux clients ? Les stratégies de Flower intégrées fournissent un " +"moyen de le faire, et cela fonctionne de la même façon que l'évaluation " +"côté serveur. Nous fournissons une fonction à la stratégie, et la " +"stratégie appelle cette fonction pour chaque cycle d'apprentissage fédéré" +" :" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:704 +#, fuzzy +msgid "" +"Next, we'll pass this function to the FedAvg strategy before starting the" +" simulation:" +msgstr "" +"Ensuite, nous allons simplement passer cette fonction à la stratégie " +"FedAvg avant de commencer la simulation :" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:749 +msgid "" +"As we can see, the client logs now include the current round of federated" +" learning (which they read from the ``config`` dictionary). We can also " +"configure local training to run for one epoch during the first and second" +" round of federated learning, and then for two epochs during the third " +"round." +msgstr "" +"Comme nous pouvons le voir, les journaux des clients incluent maintenant " +"le cycle actuel d'apprentissage fédéré (qu'ils lisent depuis le " +"dictionnaire ``config``). Nous pouvons également configurer " +"l'apprentissage local pour qu'il s'exécute pendant une époque au cours du" +" premier et du deuxième cycle d'apprentissage fédéré, puis pendant deux " +"époques au cours du troisième cycle." + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:751 +msgid "" +"Clients can also return arbitrary values to the server. To do so, they " +"return a dictionary from ``fit`` and/or ``evaluate``. We have seen and " +"used this concept throughout this notebook without mentioning it " +"explicitly: our ``FlowerClient`` returns a dictionary containing a custom" +" key/value pair as the third return value in ``evaluate``." +msgstr "" +"Les clients peuvent également renvoyer des valeurs arbitraires au " +"serveur. Pour ce faire, ils renvoient un dictionnaire depuis ``fit`` " +"et/ou ``evaluate``. Nous avons vu et utilisé ce concept tout au long de " +"ce carnet sans le mentionner explicitement : notre ``FlowerClient`` " +"renvoie un dictionnaire contenant une paire clé/valeur personnalisée en " +"tant que troisième valeur de retour dans ``evaluate``." + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:763 +msgid "Scaling federated learning" +msgstr "Mise à l'échelle de l'apprentissage fédéré" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:765 +msgid "" +"As a last step in this notebook, let's see how we can use Flower to " +"experiment with a large number of clients." +msgstr "" +"Comme dernière étape de ce carnet, voyons comment nous pouvons utiliser " +"Flower pour expérimenter avec un grand nombre de clients." + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:785 +msgid "" +"Note that we can reuse the ``ClientApp`` for different ``num-partitions``" +" since the Context is defined by the ``num_supernodes`` argument in " +"``run_simulation()``." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:787 +#, fuzzy, python-format +msgid "" +"We now have 1000 partitions, each holding 45 training and 5 validation " +"examples. Given that the number of training examples on each client is " +"quite small, we should probably train the model a bit longer, so we " +"configure the clients to perform 3 local training epochs. We should also " +"adjust the fraction of clients selected for training during each round " +"(we don't want all 1000 clients participating in every round), so we " +"adjust ``fraction_fit`` to ``0.025``, which means that only 2.5% of " +"available clients (so 25 clients) will be selected for training each " +"round:" +msgstr "" +"Nous avons maintenant 1000 partitions, chacune contenant 45 exemples " +"d'entraînement et 5 exemples de validation. Etant donné que le nombre " +"d'exemples d'entraînement sur chaque client est assez faible, nous " +"devrions probablement entraîner le modèle un peu plus longtemps, nous " +"configurons donc les clients pour qu'ils effectuent 3 époques " +"d'entraînement local. Nous devrions également ajuster la fraction de " +"clients sélectionnés pour l'entraînement à chaque tour (nous ne voulons " +"pas que les 1000 clients participent à chaque tour), nous ajustons donc " +"``fraction_fit`` à ``0.05``, ce qui signifie que seulement 5% des clients" +" disponibles (donc 50 clients) seront sélectionnés pour l'entraînement à " +"chaque tour :" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:843 +msgid "" +"In this notebook, we've seen how we can gradually enhance our system by " +"customizing the strategy, initializing parameters on the server side, " +"choosing a different strategy, and evaluating models on the server-side. " +"That's quite a bit of flexibility with so little code, right?" +msgstr "" +"Dans ce carnet, nous avons vu comment nous pouvons progressivement " +"améliorer notre système en personnalisant la stratégie, en initialisant " +"les paramètres côté serveur, en choisissant une stratégie différente et " +"en évaluant les modèles côté serveur. C'est une sacrée flexibilité avec " +"si peu de code, n'est-ce pas ?" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:845 +msgid "" +"In the later sections, we've seen how we can communicate arbitrary values" +" between server and clients to fully customize client-side execution. " +"With that capability, we built a large-scale Federated Learning " +"simulation using the Flower Virtual Client Engine and ran an experiment " +"involving 1000 clients in the same workload - all in a Jupyter Notebook!" +msgstr "" +"Dans les sections ultérieures, nous avons vu comment nous pouvons " +"communiquer des valeurs arbitraires entre le serveur et les clients pour " +"personnaliser entièrement l'exécution côté client. Grâce à cette " +"capacité, nous avons construit une simulation d'apprentissage fédéré à " +"grande échelle en utilisant le moteur de client virtuel Flower et nous " +"avons mené une expérience impliquant 1000 clients dans la même charge de " +"travail - le tout dans un carnet Jupyter !" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:863 +#, fuzzy +msgid "" +"The `Flower Federated Learning Tutorial - Part 3 " +"`__ shows how to build a fully custom ``Strategy`` from " +"scratch." +msgstr "" +"Le `Tutoriel d'apprentissage fédéré Flower - Partie 3 [WIP] " +"`__ montre comment construire une ``Stratégie`` entièrement " +"personnalisée à partir de zéro." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:9 +msgid "What is Federated Learning?" +msgstr "Qu'est-ce que l'apprentissage fédéré ?" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:13 +#, fuzzy +msgid "" +"In this tutorial, you will learn what federated learning is, build your " +"first system in Flower, and gradually extend it. If you work through all " +"parts of the tutorial, you will be able to build advanced federated " +"learning systems that approach the current state of the art in the field." +msgstr "" +"Dans ce tutoriel, tu apprendras ce qu'est l'apprentissage fédéré, tu " +"construiras ton premier système dans Flower, et tu l'étendras " +"progressivement. Si tu travailles sur toutes les parties du tutoriel, tu " +"seras capable de construire des systèmes d'apprentissage fédéré avancés " +"qui se rapprochent de l'état actuel de l'art dans le domaine." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:15 +msgid "" +"🧑‍🏫 This tutorial starts at zero and expects no familiarity with " +"federated learning. Only a basic understanding of data science and Python" +" programming is assumed." +msgstr "" +"🧑‍🏫 Ce tutoriel part de zéro et n'attend aucune familiarité avec " +"l'apprentissage fédéré. Seule une compréhension de base de la science des" +" données et de la programmation Python est supposée." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:17 +#, fuzzy +msgid "" +"`Star Flower on GitHub `__ ⭐️ and join " +"the open-source Flower community on Slack to connect, ask questions, and " +"get help: `Join Slack `__ 🌼 We'd love to " +"hear from you in the ``#introductions`` channel! And if anything is " +"unclear, head over to the ``#questions`` channel." +msgstr "" +"`Star Flower on GitHub `__ ⭐️ et " +"rejoignez la communauté Flower sur Slack pour vous connecter, poser des " +"questions et obtenir de l'aide : `Join Slack `__ 🌼 Nous serions ravis d'avoir de vos nouvelles dans le canal " +"``#introductions`` ! Et si quelque chose n'est pas clair, rendez-vous sur" +" le canal ``#questions``." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:19 +#, fuzzy +msgid "Let's get started!" +msgstr "Allons-y, déclarons-le !" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:31 +msgid "Classic machine learning" +msgstr "Apprentissage automatique classique" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:33 +msgid "" +"Before we begin to discuss federated learning, let us quickly recap how " +"most machine learning works today." +msgstr "" +"Avant de commencer à discuter de l'apprentissage fédéré, récapitulons " +"rapidement la façon dont la plupart des apprentissages automatiques " +"fonctionnent aujourd'hui." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:35 +msgid "" +"In machine learning, we have a model, and we have data. The model could " +"be a neural network (as depicted here), or something else, like classical" +" linear regression." +msgstr "" +"Dans l'apprentissage automatique, nous avons un modèle et des données. Le" +" modèle peut être un réseau neuronal (comme illustré ici), ou quelque " +"chose d'autre, comme la régression linéaire classique." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 +msgid "|ac0a9766e26044d6aea222a829859b20|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 +msgid "Model and data" +msgstr "Modèle et données" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:47 +msgid "" +"We train the model using the data to perform a useful task. A task could " +"be to detect objects in images, transcribe an audio recording, or play a " +"game like Go." +msgstr "" +"Nous entraînons le modèle en utilisant les données pour effectuer une " +"tâche utile. Une tâche peut consister à détecter des objets dans des " +"images, à transcrire un enregistrement audio ou à jouer à un jeu comme le" +" Go." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 +msgid "|36cd6e248b1443ce8a82b5a025bba368|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 +msgid "Train model using data" +msgstr "Entraîne le modèle à l'aide des données" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:59 +#, fuzzy +msgid "" +"Now, in practice, the training data we work with doesn't originate on the" +" machine we train the model on. It gets created somewhere else." +msgstr "" +"Dans la pratique, les données d'entraînement avec lesquelles nous " +"travaillons ne proviennent pas de la machine sur laquelle nous entraînons" +" le modèle. Elles sont créées ailleurs." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:61 +#, fuzzy +msgid "" +"It originates on a smartphone by the user interacting with an app, a car " +"collecting sensor data, a laptop receiving input via the keyboard, or a " +"smart speaker listening to someone trying to sing a song." +msgstr "" +"Elle prend naissance sur un smartphone par l'interaction de l'utilisateur" +" avec une application, une voiture qui collecte des données de capteurs, " +"un ordinateur portable qui reçoit des entrées via le clavier, ou un haut-" +"parleur intelligent qui écoute quelqu'un qui essaie de chanter une " +"chanson." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 +msgid "|bf4fb057f4774df39e1dcb5c71fd804a|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 +msgid "Data on a phone" +msgstr "Données sur un téléphone" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:73 +msgid "" +"What's also important to mention, this \"somewhere else\" is usually not " +"just one place, it's many places. It could be several devices all running" +" the same app. But it could also be several organizations, all generating" +" data for the same task." +msgstr "" +"Il est également important de mentionner que cet \"ailleurs\" n'est " +"généralement pas un seul endroit, mais plusieurs. Il peut s'agir de " +"plusieurs appareils fonctionnant tous avec la même application. Mais il " +"peut également s'agir de plusieurs organisations, qui génèrent toutes des" +" données pour la même tâche." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 +msgid "|71bb9f3c74c04f959b9bc1f02b736c95|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 +msgid "Data is on many devices" +msgstr "Les données se trouvent sur de nombreux appareils" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:85 +msgid "" +"So to use machine learning, or any kind of data analysis, the approach " +"that has been used in the past was to collect all data on a central " +"server. This server can be somewhere in a data center, or somewhere in " +"the cloud." +msgstr "" +"Ainsi, pour utiliser l'apprentissage automatique, ou tout autre type " +"d'analyse de données, l'approche utilisée par le passé consistait à " +"collecter toutes les données sur un serveur central. Ce serveur peut se " +"trouver quelque part dans un centre de données, ou quelque part dans le " +"cloud." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 +msgid "|7605632e1b0f49599ffacf841491fcfb|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 +msgid "Central data collection" +msgstr "Collecte centralisée des données" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:97 +#, fuzzy +msgid "" +"Once all the data is collected in one place, we can finally use machine " +"learning algorithms to train our model on the data. This is the machine " +"learning approach that we've basically always relied on." +msgstr "" +"Une fois que toutes les données sont rassemblées en un seul endroit, nous" +" pouvons enfin utiliser des algorithmes d'apprentissage automatique pour " +"entraîner notre modèle sur les données. C'est l'approche d'apprentissage " +"automatique sur laquelle nous nous sommes fondamentalement toujours " +"appuyés." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 +msgid "|91b1b5a7d3484eb7a2350c1923f18307|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 +msgid "Central model training" +msgstr "Formation au modèle central" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:130 +msgid "Challenges of classical machine learning" +msgstr "Les défis de l'apprentissage automatique classique" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:132 +msgid "" +"The classic machine learning approach we've just seen can be used in some" +" cases. Great examples include categorizing holiday photos, or analyzing " +"web traffic. Cases, where all the data is naturally available on a " +"centralized server." +msgstr "" +"L'approche classique de l'apprentissage automatique que nous venons de " +"voir peut être utilisée dans certains cas. Parmi les grands exemples, on " +"peut citer la catégorisation des photos de vacances, ou l'analyse du " +"trafic web. Des cas, où toutes les données sont naturellement disponibles" +" sur un serveur centralisé." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 +msgid "|5405ed430e4746e28b083b146fb71731|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 +msgid "Centralized possible" +msgstr "Possibilité de centralisation" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:144 +msgid "" +"But the approach can not be used in many other cases. Cases, where the " +"data is not available on a centralized server, or cases where the data " +"available on one server is not enough to train a good model." +msgstr "" +"Mais cette approche ne peut pas être utilisée dans de nombreux autres cas" +" : lorsque les données ne sont pas disponibles sur un serveur centralisé," +" ou lorsque les données disponibles sur un serveur ne sont pas " +"suffisantes pour former un bon modèle." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 +msgid "|a389e87dab394eb48a8949aa2397687b|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 +msgid "Centralized impossible" +msgstr "Impossible de centraliser" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:156 +#, fuzzy +msgid "" +"There are many reasons why the classic centralized machine learning " +"approach does not work for a large number of highly important real-world " +"use cases. Those reasons include:" +msgstr "" +"Il existe de nombreuses raisons pour lesquelles l'approche classique " +"centralisée de l'apprentissage automatique ne fonctionne pas pour un " +"grand nombre de cas d'utilisation très importants dans le monde réel, " +"notamment :" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:158 +#, fuzzy +msgid "" +"**Regulations**: GDPR (Europe), CCPA (California), PIPEDA (Canada), LGPD " +"(Brazil), PDPL (Argentina), KVKK (Turkey), POPI (South Africa), FSS " +"(Russia), CDPR (China), PDPB (India), PIPA (Korea), APPI (Japan), PDP " +"(Indonesia), PDPA (Singapore), APP (Australia), and other regulations " +"protect sensitive data from being moved. In fact, those regulations " +"sometimes even prevent single organizations from combining their own " +"users' data for artificial intelligence training because those users live" +" in different parts of the world, and their data is governed by different" +" data protection regulations." +msgstr "" +"**Réglementations** : GDPR (Europe), CCPA (Californie), PIPEDA (Canada), " +"LGPD (Brésil), PDPL (Argentine), KVKK (Turquie), POPI (Afrique du Sud), " +"FSS (Russie), CDPR (Chine), PDPB (Inde), PIPA (Corée), APPI (Japon), PDP " +"(Indonésie), PDPA (Singapour), APP (Australie), et d'autres " +"réglementations protègent les données sensibles contre le déplacement. En" +" fait, ces réglementations empêchent même parfois des organisations " +"individuelles de combiner les données de leurs propres utilisateurs pour " +"la formation à l'intelligence artificielle parce que ces utilisateurs " +"vivent dans différentes parties du monde, et que leurs données sont " +"régies par des réglementations différentes en matière de protection des " +"données." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:160 +msgid "" +"**User preference**: In addition to regulation, there are use cases where" +" users just expect that no data leaves their device, ever. If you type " +"your passwords and credit card info into the digital keyboard of your " +"phone, you don't expect those passwords to end up on the server of the " +"company that developed that keyboard, do you? In fact, that use case was " +"the reason federated learning was invented in the first place." +msgstr "" +"**Préférence de l'utilisateur** : En plus de la réglementation, il existe" +" des cas d'utilisation où les utilisateurs s'attendent tout simplement à " +"ce qu'aucune donnée ne quitte leur appareil, jamais. Si tu tapes tes mots" +" de passe et tes informations de carte de crédit sur le clavier numérique" +" de ton téléphone, tu ne t'attends pas à ce que ces mots de passe " +"finissent sur le serveur de l'entreprise qui a développé ce clavier, n" +"'est-ce pas ? En fait, ce cas d'utilisation est la raison pour laquelle " +"l'apprentissage fédéré a été inventé en premier lieu." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:161 +#, fuzzy +msgid "" +"**Data volume**: Some sensors, like cameras, produce such a high data " +"volume that it is neither feasible nor economic to collect all the data " +"(due to, for example, bandwidth or communication efficiency). Think about" +" a national rail service with hundreds of train stations across the " +"country. If each of these train stations is outfitted with a number of " +"security cameras, the volume of raw on-device data they produce requires " +"incredibly powerful and exceedingly expensive infrastructure to process " +"and store. And most of the data isn't even useful." +msgstr "" +"**volume de données** : certains capteurs, comme les caméras, produisent " +"un volume de données si important qu'il n'est ni possible ni économique " +"de collecter toutes les données (en raison, par exemple, de la bande " +"passante ou de l'efficacité des communications). Pensez à un service " +"ferroviaire national comptant des centaines de gares à travers le pays. " +"Si chacune de ces gares est équipée d'un certain nombre de caméras de " +"sécurité, le volume de données brutes sur les appareils qu'elles " +"produisent nécessite une infrastructure incroyablement puissante et " +"excessivement coûteuse pour les traiter et les stocker. Et la plupart de " +"ces données ne sont même pas utiles." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:164 +msgid "Examples where centralized machine learning does not work include:" +msgstr "" +"Voici quelques exemples où l'apprentissage automatique centralisé ne " +"fonctionne pas :" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:166 +#, fuzzy +msgid "" +"Sensitive healthcare records from multiple hospitals to train cancer " +"detection models" +msgstr "" +"Des dossiers médicaux sensibles provenant de plusieurs hôpitaux pour " +"former des modèles de détection du cancer" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:167 +msgid "" +"Financial information from different organizations to detect financial " +"fraud" +msgstr "" +"Informations financières provenant de différentes organisations pour " +"détecter les fraudes financières" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:168 +msgid "Location data from your electric car to make better range prediction" +msgstr "" +"Les données de localisation de ta voiture électrique pour mieux prédire " +"l'autonomie" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:169 +msgid "End-to-end encrypted messages to train better auto-complete models" +msgstr "" +"Messages cryptés de bout en bout pour former de meilleurs modèles " +"d'autocomplétion" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:171 +#, fuzzy +msgid "" +"The popularity of privacy-enhancing systems like the `Brave " +"`__ browser or the `Signal `__ " +"messenger shows that users care about privacy. In fact, they choose the " +"privacy-enhancing version over other alternatives, if such an alternative" +" exists. But what can we do to apply machine learning and data science to" +" these cases to utilize private data? After all, these are all areas that" +" would benefit significantly from recent advances in AI." +msgstr "" +"La popularité des systèmes améliorant la confidentialité comme le " +"navigateur `Brave `__ ou le messager `Signal " +"`__ montre que les utilisateurs se soucient de la " +"confidentialité. En fait, ils choisissent la version améliorant la " +"confidentialité plutôt que d'autres alternatives, si une telle " +"alternative existe. Mais que pouvons-nous faire pour appliquer " +"l'apprentissage automatique et la science des données à ces cas afin " +"d'utiliser les données privées ? Après tout, ce sont tous des domaines " +"qui bénéficieraient de manière significative des récentes avancées en " +"matière d'IA." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:186 +msgid "Federated learning" +msgstr "Apprentissage fédéré" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:188 +msgid "" +"Federated learning simply reverses this approach. It enables machine " +"learning on distributed data by moving the training to the data, instead " +"of moving the data to the training. Here's the single-sentence " +"explanation:" +msgstr "" +"L'apprentissage fédéré inverse simplement cette approche. Il permet " +"l'apprentissage automatique sur des données distribuées en déplaçant la " +"formation vers les données, au lieu de déplacer les données vers la " +"formation. Voici l'explication en une seule phrase :" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:190 +msgid "Central machine learning: move the data to the computation" +msgstr "Apprentissage automatique central : déplace les données vers le calcul" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:191 +msgid "Federated (machine) learning: move the computation to the data" +msgstr "Apprentissage (machine) fédéré : déplacer le calcul vers les données" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:193 +msgid "" +"By doing so, it enables us to use machine learning (and other data " +"science approaches) in areas where it wasn't possible before. We can now " +"train excellent medical AI models by enabling different hospitals to work" +" together. We can solve financial fraud by training AI models on the data" +" of different financial institutions. We can build novel privacy-" +"enhancing applications (such as secure messaging) that have better built-" +"in AI than their non-privacy-enhancing alternatives. And those are just a" +" few of the examples that come to mind. As we deploy federated learning, " +"we discover more and more areas that can suddenly be reinvented because " +"they now have access to vast amounts of previously inaccessible data." +msgstr "" +"Ce faisant, il nous permet d'utiliser l'apprentissage automatique (et " +"d'autres approches de science des données) dans des domaines où cela " +"n'était pas possible auparavant. Nous pouvons désormais former " +"d'excellents modèles d'IA médicale en permettant à différents hôpitaux de" +" travailler ensemble. Nous pouvons résoudre les fraudes financières en " +"formant des modèles d'IA sur les données de différentes institutions " +"financières. Nous pouvons créer de nouvelles applications d'amélioration " +"de la confidentialité (telles que la messagerie sécurisée) qui ont une " +"meilleure IA intégrée que leurs alternatives d'amélioration de la " +"confidentialité. Et ce ne sont là que quelques exemples qui me viennent à" +" l'esprit. Au fur et à mesure que nous déployons l'apprentissage fédéré, " +"nous découvrons de plus en plus de domaines qui peuvent soudainement être" +" réinventés parce qu'ils ont maintenant accès à de vastes quantités de " +"données auparavant inaccessibles." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:196 +msgid "" +"So how does federated learning work, exactly? Let's start with an " +"intuitive explanation." +msgstr "" +"Comment fonctionne l'apprentissage fédéré ? Commençons par une " +"explication intuitive." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:199 +msgid "Federated learning in five steps" +msgstr "L'apprentissage fédéré en cinq étapes" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:202 +msgid "Step 0: Initialize global model" +msgstr "Étape 0 : Initialisation du modèle global" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:204 +msgid "" +"We start by initializing the model on the server. This is exactly the " +"same in classic centralized learning: we initialize the model parameters," +" either randomly or from a previously saved checkpoint." +msgstr "" +"Nous commençons par initialiser le modèle sur le serveur. C'est " +"exactement la même chose dans l'apprentissage centralisé classique : nous" +" initialisons les paramètres du modèle, soit de façon aléatoire, soit à " +"partir d'un point de contrôle précédemment sauvegardé." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 +msgid "|89c412136a5146ec8dc32c0973729f12|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 +msgid "Initialize global model" +msgstr "Initialise le modèle global" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:217 +msgid "" +"Step 1: Send model to a number of connected organizations/devices (client" +" nodes)" +msgstr "" +"Étape 1 : envoyer le modèle à un certain nombre d'organisations/appareils" +" connectés (nœuds clients)" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:219 +#, fuzzy +msgid "" +"Next, we send the parameters of the global model to the connected client " +"nodes (think: edge devices like smartphones or servers belonging to " +"organizations). This is to ensure that each participating node starts " +"their local training using the same model parameters. We often use only a" +" few of the connected nodes instead of all nodes. The reason for this is " +"that selecting more and more client nodes has diminishing returns." +msgstr "" +"Ensuite, nous envoyons les paramètres du modèle global aux nœuds clients " +"connectés (par exemple, les appareils périphériques comme les smartphones" +" ou les serveurs appartenant à des organisations). Cela permet de " +"s'assurer que chaque nœud participant commence sa formation locale en " +"utilisant les mêmes paramètres de modèle. Nous n'utilisons souvent que " +"quelques-uns des nœuds connectés au lieu de tous les nœuds. La raison en " +"est que la sélection d'un nombre croissant de nœuds clients a des " +"rendements décroissants." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 +msgid "|9503d3dc3a144e8aa295f8800cd8a766|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 +msgid "Send global model" +msgstr "Envoyer le modèle global" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:232 +msgid "" +"Step 2: Train model locally on the data of each organization/device " +"(client node)" +msgstr "" +"Étape 2 : Entraîne le modèle localement sur les données de chaque " +"organisation/appareil (nœud client)" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:234 +msgid "" +"Now that all (selected) client nodes have the latest version of the " +"global model parameters, they start the local training. They use their " +"own local dataset to train their own local model. They don't train the " +"model until full convergence, but they only train for a little while. " +"This could be as little as one epoch on the local data, or even just a " +"few steps (mini-batches)." +msgstr "" +"Maintenant que tous les nœuds clients (sélectionnés) disposent de la " +"dernière version des paramètres du modèle global, ils commencent " +"l'entraînement local. Ils utilisent leur propre ensemble de données " +"locales pour entraîner leur propre modèle local. Ils n'entraînent pas le " +"modèle jusqu'à la convergence totale, mais ils ne s'entraînent que " +"pendant un petit moment. Il peut s'agir d'une seule époque sur les " +"données locales, ou même de quelques étapes (mini-batchs)." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 +msgid "|aadb59e29b9e445d8e239d9a8a7045cb|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 +msgid "Train on local data" +msgstr "Forme-toi aux données locales" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:247 +msgid "Step 3: Return model updates back to the server" +msgstr "Étape 3 : Renvoyer les mises à jour du modèle au serveur" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:249 +msgid "" +"After local training, each client node has a slightly different version " +"of the model parameters they originally received. The parameters are all " +"different because each client node has different examples in its local " +"dataset. The client nodes then send those model updates back to the " +"server. The model updates they send can either be the full model " +"parameters or just the gradients that were accumulated during local " +"training." +msgstr "" +"Après l'entraînement local, chaque nœud client possède une version " +"légèrement différente des paramètres du modèle qu'il a reçus à l'origine." +" Les paramètres sont tous différents parce que chaque nœud client a des " +"exemples différents dans son ensemble de données local. Les nœuds clients" +" renvoient ensuite ces mises à jour du modèle au serveur. Les mises à " +"jour du modèle qu'ils envoient peuvent être soit les paramètres complets " +"du modèle, soit seulement les gradients qui ont été accumulés au cours de" +" l'entraînement local." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 +msgid "|a7579ad7734347508e959d9e14f2f53d|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 +msgid "Send model updates" +msgstr "Envoyer les mises à jour du modèle" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:262 +msgid "Step 4: Aggregate model updates into a new global model" +msgstr "" +"Étape 4 : Agréger les mises à jour des modèles dans un nouveau modèle " +"global" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:264 +msgid "" +"The server receives model updates from the selected client nodes. If it " +"selected 100 client nodes, it now has 100 slightly different versions of " +"the original global model, each trained on the local data of one client. " +"But didn't we want to have one model that contains the learnings from the" +" data of all 100 client nodes?" +msgstr "" +"Le serveur reçoit les mises à jour du modèle des nœuds clients " +"sélectionnés. S'il a sélectionné 100 nœuds clients, il dispose maintenant" +" de 100 versions légèrement différentes du modèle global original, " +"chacune ayant été formée sur les données locales d'un client. Mais ne " +"voulions-nous pas avoir un seul modèle qui contienne les apprentissages " +"des données de l'ensemble des 100 nœuds clients ?" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:266 +msgid "" +"In order to get one single model, we have to combine all the model " +"updates we received from the client nodes. This process is called " +"*aggregation*, and there are many different ways to do it. The most basic" +" way to do it is called *Federated Averaging* (`McMahan et al., 2016 " +"`__), often abbreviated as *FedAvg*. " +"*FedAvg* takes the 100 model updates and, as the name suggests, averages " +"them. To be more precise, it takes the *weighted average* of the model " +"updates, weighted by the number of examples each client used for " +"training. The weighting is important to make sure that each data example " +"has the same \"influence\" on the resulting global model. If one client " +"has 10 examples, and another client has 100 examples, then - without " +"weighting - each of the 10 examples would influence the global model ten " +"times as much as each of the 100 examples." +msgstr "" +"In order to get one single model, we have to combine all the model " +"updates we received from the client nodes. This process is called " +"*aggregation*, and there are many different ways to do it. The most basic" +" way to do it is called *Federated Averaging* (`McMahan et al., 2016 " +"`__), often abbreviated as *FedAvg*. " +"*FedAvg* takes the 100 model updates and, as the name suggests, averages " +"them. To be more precise, it takes the *weighted average* of the model " +"updates, weighted by the number of examples each client used for " +"training. The weighting is important to make sure that each data example " +"has the same \"influence\" on the resulting global model. If one client " +"has 10 examples, and another client has 100 examples, then - without " +"weighting - each of the 10 examples would influence the global model ten " +"times as much as each of the 100 examples." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 +msgid "|73d15dd1d4fc41678b2d54815503fbe8|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 +msgid "Aggregate model updates" +msgstr "Mises à jour globales du modèle" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:280 +msgid "Step 5: Repeat steps 1 to 4 until the model converges" +msgstr "Étape 5 : répète les étapes 1 à 4 jusqu'à ce que le modèle converge" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:282 +msgid "" +"Steps 1 to 4 are what we call a single round of federated learning. The " +"global model parameters get sent to the participating client nodes (step " +"1), the client nodes train on their local data (step 2), they send their " +"updated models to the server (step 3), and the server then aggregates the" +" model updates to get a new version of the global model (step 4)." +msgstr "" +"Les étapes 1 à 4 constituent ce que nous appelons un cycle unique " +"d'apprentissage fédéré. Les paramètres du modèle global sont envoyés aux " +"nœuds clients participants (étape 1), les nœuds clients s'entraînent sur " +"leurs données locales (étape 2), ils envoient leurs modèles mis à jour au" +" serveur (étape 3), et le serveur agrège ensuite les mises à jour du " +"modèle pour obtenir une nouvelle version du modèle global (étape 4)." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:284 +#, fuzzy +msgid "" +"During a single round, each client node that participates in that " +"iteration only trains for a little while. This means that after the " +"aggregation step (step 4), we have a model that has been trained on all " +"the data of all participating client nodes, but only for a little while. " +"We then have to repeat this training process over and over again to " +"eventually arrive at a fully trained model that performs well across the " +"data of all client nodes." +msgstr "" +"Au cours d'un seul tour, chaque nœud client qui participe à cette " +"itération ne s'entraîne que pendant un petit moment. Cela signifie " +"qu'après l'étape d'agrégation (étape 4), nous avons un modèle qui a été " +"entraîné sur toutes les données de tous les nœuds clients participants, " +"mais seulement pendant un petit moment. Nous devons ensuite répéter ce " +"processus d'entraînement encore et encore pour finalement arriver à un " +"modèle entièrement entraîné qui fonctionne bien sur l'ensemble des " +"données de tous les nœuds clients." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:289 +msgid "" +"Congratulations, you now understand the basics of federated learning. " +"There's a lot more to discuss, of course, but that was federated learning" +" in a nutshell. In later parts of this tutorial, we will go into more " +"detail. Interesting questions include: How can we select the best client " +"nodes that should participate in the next round? What's the best way to " +"aggregate model updates? How can we handle failing client nodes " +"(stragglers)?" +msgstr "" +"Félicitations, tu comprends maintenant les bases de l'apprentissage " +"fédéré. Il y a bien sûr beaucoup plus à discuter, mais c'était " +"l'apprentissage fédéré en quelques mots. Dans les parties suivantes de ce" +" tutoriel, nous irons plus en détail. Les questions intéressantes " +"comprennent : comment pouvons-nous sélectionner les meilleurs nœuds " +"clients qui devraient participer au prochain tour ? Quelle est la " +"meilleure façon d'agréger les mises à jour du modèle ? Comment pouvons-" +"nous gérer les nœuds clients qui échouent (stragglers) ?" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:294 +#, fuzzy +msgid "" +"Just like we can train a model on the decentralized data of different " +"client nodes, we can also evaluate the model on that data to receive " +"valuable metrics. This is called federated evaluation, sometimes " +"abbreviated as FE. In fact, federated evaluation is an integral part of " +"most federated learning systems." +msgstr "" +"Tout comme nous pouvons former un modèle sur les données décentralisées " +"de différents nœuds clients, nous pouvons également évaluer le modèle sur" +" ces données pour recevoir des mesures précieuses. C'est ce qu'on appelle" +" l'évaluation fédérée, parfois abrégée en FE. En fait, l'évaluation " +"fédérée fait partie intégrante de la plupart des systèmes d'apprentissage" +" fédéré." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:297 +msgid "Federated analytics" +msgstr "Analyses fédérées" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:299 +msgid "" +"In many cases, machine learning isn't necessary to derive value from " +"data. Data analysis can yield valuable insights, but again, there's often" +" not enough data to get a clear answer. What's the average age at which " +"people develop a certain type of health condition? Federated analytics " +"enables such queries over multiple client nodes. It is usually used in " +"conjunction with other privacy-enhancing technologies like secure " +"aggregation to prevent the server from seeing the results submitted by " +"individual client nodes." +msgstr "" +"Dans de nombreux cas, l'apprentissage automatique n'est pas nécessaire " +"pour tirer de la valeur des données. L'analyse des données peut donner " +"des indications précieuses, mais là encore, il n'y a souvent pas assez de" +" données pour obtenir une réponse claire. Quel est l'âge moyen auquel les" +" gens développent un certain type de problème de santé ? L'analyse " +"fédérée permet de telles requêtes sur plusieurs nœuds clients. Elle est " +"généralement utilisée en conjonction avec d'autres technologies de " +"renforcement de la confidentialité, comme l'agrégation sécurisée, pour " +"empêcher le serveur de voir les résultats soumis par les nœuds clients " +"individuels." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:305 +msgid "" +"Differential privacy (DP) is often mentioned in the context of Federated " +"Learning. It is a privacy-preserving method used when analyzing and " +"sharing statistical data, ensuring the privacy of individual " +"participants. DP achieves this by adding statistical noise to the model " +"updates, ensuring any individual participants’ information cannot be " +"distinguished or re-identified. This technique can be considered an " +"optimization that provides a quantifiable privacy protection measure." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:326 +msgid "Flower" +msgstr "Fleur" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:328 +msgid "" +"Federated learning, federated evaluation, and federated analytics require" +" infrastructure to move machine learning models back and forth, train and" +" evaluate them on local data, and then aggregate the updated models. " +"Flower provides the infrastructure to do exactly that in an easy, " +"scalable, and secure way. In short, Flower presents a unified approach to" +" federated learning, analytics, and evaluation. It allows the user to " +"federate any workload, any ML framework, and any programming language." +msgstr "" +"L'apprentissage fédéré, l'évaluation fédérée et l'analyse fédérée " +"nécessitent une infrastructure pour déplacer les modèles d'apprentissage " +"automatique dans les deux sens, les entraîner et les évaluer sur des " +"données locales, puis agréger les modèles mis à jour. Flower fournit " +"l'infrastructure pour faire exactement cela de manière simple, évolutive " +"et sécurisée. En bref, Flower présente une approche unifiée de " +"l'apprentissage, de l'analyse et de l'évaluation fédérés. Il permet à " +"l'utilisateur de fédérer n'importe quelle charge de travail, n'importe " +"quel cadre de ML et n'importe quel langage de programmation." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 +msgid "|55472eef61274ba1b739408607e109df|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 +msgid "" +"Flower federated learning server and client nodes (car, scooter, personal" +" computer, roomba, and phone)" +msgstr "" +"Serveur d'apprentissage fédéré de Flower et nœuds clients (voiture, " +"scooter, ordinateur personnel, roomba et téléphone)" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:353 +msgid "" +"Congratulations, you just learned the basics of federated learning and " +"how it relates to the classic (centralized) machine learning!" +msgstr "" +"Félicitations, tu viens d'apprendre les bases de l'apprentissage fédéré " +"et son rapport avec l'apprentissage automatique classique (centralisé) !" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:355 +msgid "" +"In the next part of this tutorial, we are going to build a first " +"federated learning system with Flower." +msgstr "" +"Dans la prochaine partie de ce tutoriel, nous allons construire un " +"premier système d'apprentissage fédéré avec Flower." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:369 +msgid "" +"Before you continue, make sure to join the Flower community on Slack: " +"`Join Slack `__" +msgstr "" +"Avant de continuer, n'oublie pas de rejoindre la communauté Flower sur " +"Slack : `Join Slack `__" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:373 +#, fuzzy +msgid "" +"The `Flower Federated Learning Tutorial - Part 1 " +"`__ shows how to build a simple federated learning system " +"with PyTorch and Flower." +msgstr "" +"Le `Tutoriel d'apprentissage fédéré Flower - Partie 1 " +"`__ " +"montre comment construire un système d'apprentissage fédéré simple avec " +"PyTorch et Flower." + +#~ msgid "Flower CLI commands" +#~ msgstr "Commandes CLI Flower" + +#~ msgid "Contributor guide" +#~ msgstr "Guide pour les contributeurs" + +#~ msgid "API Reference - Flower CLI commands" +#~ msgstr "Référence API - Commandes CLI pour Flower" + +#~ msgid "API Reference - flwr (Python package)" +#~ msgstr "Référence API - flwr (paquetage Python)" + +#~ msgid "Flower client." +#~ msgstr "Client de Flower" + +#~ msgid "Abstract base class for Flower clients." +#~ msgstr "" + +#~ msgid "Evaluate the provided parameters using the locally held dataset." +#~ msgstr "évaluer le modèle mis à jour sur l'ensemble de test local" + +#~ msgid "Parameters" +#~ msgstr "Paramètres du modèle." + +#~ msgid "" +#~ "The evaluation instructions containing " +#~ "(global) model parameters received from " +#~ "the server and a dictionary of " +#~ "configuration values used to customize " +#~ "the local evaluation process." +#~ msgstr "" + +#~ msgid "Returns" +#~ msgstr "Ressources" + +#~ msgid "" +#~ "The evaluation result containing the " +#~ "loss on the local dataset and " +#~ "other details such as the number " +#~ "of local data examples used for " +#~ "evaluation." +#~ msgstr "" + +#~ msgid "Return type" +#~ msgstr "" + +#~ msgid "Refine the provided parameters using the locally held dataset." +#~ msgstr "" + +#~ msgid "" +#~ "The training instructions containing (global)" +#~ " model parameters received from the " +#~ "server and a dictionary of configuration" +#~ " values used to customize the local" +#~ " training process." +#~ msgstr "" + +#~ msgid "" +#~ "The training result containing updated " +#~ "parameters and other details such as " +#~ "the number of local training examples" +#~ " used for training." +#~ msgstr "" + +#~ msgid "Return the current local model parameters." +#~ msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" + +#~ msgid "" +#~ "The get parameters instructions received " +#~ "from the server containing a dictionary" +#~ " of configuration values." +#~ msgstr "" + +#~ msgid "The current local model parameters." +#~ msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" + +#~ msgid "Return set of client's properties." +#~ msgstr "" + +#~ msgid "" +#~ "The get properties instructions received " +#~ "from the server containing a dictionary" +#~ " of configuration values." +#~ msgstr "" + +#~ msgid "The current client properties." +#~ msgstr "" + +#~ msgid "Start a Flower client node which connects to a Flower server." +#~ msgstr "" + +#~ msgid "" +#~ "The IPv4 or IPv6 address of the" +#~ " server. If the Flower server runs" +#~ " on the same machine on port " +#~ "8080, then `server_address` would be " +#~ "`\"[::]:8080\"`." +#~ msgstr "" + +#~ msgid "An implementation of the abstract base class `flwr.client.Client`." +#~ msgstr "" + +#~ msgid "" +#~ "The maximum length of gRPC messages " +#~ "that can be exchanged with the " +#~ "Flower server. The default should be " +#~ "sufficient for most models. Users who" +#~ " train very large models might need" +#~ " to increase this value. Note that" +#~ " the Flower server needs to be " +#~ "started with the same value (see " +#~ "`flwr.server.start_server`), otherwise it will " +#~ "not know about the increased limit " +#~ "and block larger messages." +#~ msgstr "" + +#~ msgid "" +#~ "The PEM-encoded root certificates as " +#~ "a byte string or a path string." +#~ " If provided, a secure connection " +#~ "using the certificates will be " +#~ "established to an SSL-enabled Flower " +#~ "server." +#~ msgstr "" + +#~ msgid "" +#~ "DEPRECATED - USE 'transport' INSTEAD. " +#~ "Defines whether or not the client " +#~ "is interacting with the server using " +#~ "the experimental REST API. This feature" +#~ " is experimental, it might change " +#~ "considerably in future versions of " +#~ "Flower." +#~ msgstr "" +#~ "DÉPRÉCIÉ - UTILISER 'transport' À LA " +#~ "PLACE Définit si le client interagit " +#~ "ou non avec le serveur à l'aide" +#~ " de l'API REST expérimentale. Cette " +#~ "fonctionnalité est expérimentale, elle " +#~ "pourrait changer considérablement dans les " +#~ "futures versions de Flower." + +#~ msgid "" +#~ "Configure the transport layer. Allowed " #~ "values: - 'grpc-bidi': gRPC, " #~ "bidirectional streaming - 'grpc-rere': " #~ "gRPC, request-response (experimental) - " #~ "'rest': HTTP (experimental)" #~ msgstr "" -#~ "Valeurs autorisées : - 'grpc-bidi' " -#~ ": gRPC, flux bidirectionnel - 'grpc-" -#~ "rere' : gRPC, requête-réponse " -#~ "(expérimental) - 'rest' : HTTP " -#~ "(expérimental)" +#~ "Valeurs autorisées : - 'grpc-bidi' " +#~ ": gRPC, flux bidirectionnel - 'grpc-" +#~ "rere' : gRPC, requête-réponse " +#~ "(expérimental) - 'rest' : HTTP " +#~ "(expérimental)" + +#~ msgid "Starting a gRPC client with an insecure server connection:" +#~ msgstr "" + +#~ msgid "Starting an SSL-enabled gRPC client:" +#~ msgstr "" + +#~ msgid "Abstract base class for Flower clients using NumPy." +#~ msgstr "" + +#~ msgid "The current (global) model parameters." +#~ msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" + +#~ msgid "" +#~ "Configuration parameters which allow the " +#~ "server to influence evaluation on the" +#~ " client. It can be used to " +#~ "communicate arbitrary values from the " +#~ "server to the client, for example, " +#~ "to influence the number of examples " +#~ "used for evaluation." +#~ msgstr "" + +#~ msgid "" +#~ "* **loss** (*float*) -- The evaluation" +#~ " loss of the model on the local" +#~ " dataset. * **num_examples** (*int*) -- " +#~ "The number of examples used for " +#~ "evaluation. * **metrics** (*Dict[str, " +#~ "Scalar]*) -- A dictionary mapping " +#~ "arbitrary string keys to values of " +#~ "type bool, bytes, float, int, or " +#~ "str. It can be used to " +#~ "communicate arbitrary values back to the" +#~ " server." +#~ msgstr "" + +#~ msgid "" +#~ "**loss** (*float*) -- The evaluation " +#~ "loss of the model on the local " +#~ "dataset." +#~ msgstr "" + +#~ msgid "**num_examples** (*int*) -- The number of examples used for evaluation." +#~ msgstr "" + +#~ msgid "" +#~ "**metrics** (*Dict[str, Scalar]*) -- A " +#~ "dictionary mapping arbitrary string keys " +#~ "to values of type bool, bytes, " +#~ "float, int, or str. It can be " +#~ "used to communicate arbitrary values " +#~ "back to the server." +#~ msgstr "" + +#~ msgid "" +#~ "The previous return type format (int," +#~ " float, float) and the extended " +#~ "format (int, float, float, Dict[str, " +#~ "Scalar]) have been deprecated and " +#~ "removed since Flower 0.19." +#~ msgstr "" + +#~ msgid "Train the provided parameters using the locally held dataset." +#~ msgstr "entraîne le modèle sur l'ensemble d'apprentissage local" + +#~ msgid "" +#~ "Configuration parameters which allow the " +#~ "server to influence training on the " +#~ "client. It can be used to " +#~ "communicate arbitrary values from the " +#~ "server to the client, for example, " +#~ "to set the number of (local) " +#~ "training epochs." +#~ msgstr "" + +#~ msgid "" +#~ "* **parameters** (*NDArrays*) -- The " +#~ "locally updated model parameters. * " +#~ "**num_examples** (*int*) -- The number " +#~ "of examples used for training. * " +#~ "**metrics** (*Dict[str, Scalar]*) -- A " +#~ "dictionary mapping arbitrary string keys " +#~ "to values of type bool, bytes, " +#~ "float, int, or str. It can be " +#~ "used to communicate arbitrary values " +#~ "back to the server." +#~ msgstr "" + +#~ msgid "**parameters** (*NDArrays*) -- The locally updated model parameters." +#~ msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" + +#~ msgid "**num_examples** (*int*) -- The number of examples used for training." +#~ msgstr "" + +#~ msgid "" +#~ "Configuration parameters requested by the " +#~ "server. This can be used to tell" +#~ " the client which parameters are " +#~ "needed along with some Scalar " +#~ "attributes." +#~ msgstr "" + +#~ msgid "" +#~ "**parameters** -- The local model " +#~ "parameters as a list of NumPy " +#~ "ndarrays." +#~ msgstr "renvoie le poids du modèle sous la forme d'une liste de ndarrays NumPy" + +#~ msgid "Return a client's set of properties." +#~ msgstr "Renvoie l'ensemble des propriétés d'un client." + +#~ msgid "" +#~ "Configuration parameters requested by the " +#~ "server. This can be used to tell" +#~ " the client which properties are " +#~ "needed along with some Scalar " +#~ "attributes." +#~ msgstr "" + +#~ msgid "" +#~ "**properties** -- A dictionary mapping " +#~ "arbitrary string keys to values of " +#~ "type bool, bytes, float, int, or " +#~ "str. It can be used to communicate" +#~ " arbitrary property values back to " +#~ "the server." +#~ msgstr "" + +#~ msgid "Start a Flower NumPyClient which connects to a gRPC server." +#~ msgstr "" + +#~ msgid "An implementation of the abstract base class `flwr.client.NumPyClient`." +#~ msgstr "" + +#~ msgid "Starting a client with an insecure server connection:" +#~ msgstr "" + +#~ msgid "Starting a SSL-enabled client:" +#~ msgstr "" + +#~ msgid "Start a Ray-based Flower simulation server." +#~ msgstr "Simulation de moniteur" + +#~ msgid "" +#~ "A function creating client instances. " +#~ "The function must take a single " +#~ "`str` argument called `cid`. It should" +#~ " return a single client instance of" +#~ " type ClientLike. Note that the " +#~ "created client instances are ephemeral " +#~ "and will often be destroyed after " +#~ "a single method invocation. Since client" +#~ " instances are not long-lived, they" +#~ " should not attempt to carry state" +#~ " over method invocations. Any state " +#~ "required by the instance (model, " +#~ "dataset, hyperparameters, ...) should be " +#~ "(re-)created in either the call to " +#~ "`client_fn` or the call to any of" +#~ " the client methods (e.g., load " +#~ "evaluation data in the `evaluate` method" +#~ " itself)." +#~ msgstr "" +#~ "Une fonction créant des instances de " +#~ "client. La fonction doit prendre un " +#~ "seul argument `str` appelé `cid`. Elle" +#~ " doit retourner une seule instance de" +#~ " client de type ClientLike. Notez que" +#~ " les instances de client créées sont" +#~ " éphémères et seront souvent détruites " +#~ "après une seule invocation de méthode." +#~ " Puisque les instances de client ne" +#~ " sont pas de longue durée, elles " +#~ "ne doivent pas essayer de transporter" +#~ " l'état sur les invocations de " +#~ "méthode. Tout état requis par l'instance" +#~ " (modèle, jeu de données, hyperparamètres," +#~ " ...) doit être (re)créé dans l'appel" +#~ " à `client_fn` ou dans l'appel à " +#~ "n'importe quelle méthode de client (par" +#~ " exemple, charger les données d'évaluation" +#~ " dans la méthode `evaluate` elle-" +#~ "même)." + +#~ msgid "" +#~ "The total number of clients in " +#~ "this simulation. This must be set " +#~ "if `clients_ids` is not set and " +#~ "vice-versa." +#~ msgstr "" + +#~ msgid "" +#~ "List `client_id`s for each client. This" +#~ " is only required if `num_clients` is" +#~ " not set. Setting both `num_clients` " +#~ "and `clients_ids` with `len(clients_ids)` not" +#~ " equal to `num_clients` generates an " +#~ "error." +#~ msgstr "" + +#~ msgid "" +#~ "CPU and GPU resources for a single" +#~ " client. Supported keys are `num_cpus` " +#~ "and `num_gpus`. Example: `{\"num_cpus\": 4," +#~ " \"num_gpus\": 1}`. To understand the " +#~ "GPU utilization caused by `num_gpus`, " +#~ "consult the Ray documentation on GPU " +#~ "support." +#~ msgstr "" + +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.Server`. If no instance" +#~ " is provided, then `start_server` will " +#~ "create one." +#~ msgstr "" + +#~ msgid "" +#~ "Currently supported values are `num_rounds`" +#~ " (int, default: 1) and `round_timeout` " +#~ "in seconds (float, default: None)." +#~ msgstr "" + +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.Strategy`. If no " +#~ "strategy is provided, then `start_server` " +#~ "will use `flwr.server.strategy.FedAvg`." +#~ msgstr "" + +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.ClientManager`. If no " +#~ "implementation is provided, then " +#~ "`start_simulation` will use " +#~ "`flwr.server.client_manager.SimpleClientManager`." +#~ msgstr "" + +#~ msgid "" +#~ "Optional dictionary containing arguments for" +#~ " the call to `ray.init`. If " +#~ "ray_init_args is None (the default), Ray" +#~ " will be initialized with the " +#~ "following default args: { " +#~ "\"ignore_reinit_error\": True, \"include_dashboard\": " +#~ "False } An empty dictionary can " +#~ "be used (ray_init_args={}) to prevent " +#~ "any arguments from being passed to " +#~ "ray.init." +#~ msgstr "" + +#~ msgid "" +#~ "Optional dictionary containing arguments for" +#~ " the call to `ray.init`. If " +#~ "ray_init_args is None (the default), Ray" +#~ " will be initialized with the " +#~ "following default args:" +#~ msgstr "" + +#~ msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" +#~ msgstr "" + +#~ msgid "" +#~ "An empty dictionary can be used " +#~ "(ray_init_args={}) to prevent any arguments" +#~ " from being passed to ray.init." +#~ msgstr "" + +#~ msgid "" +#~ "Set to True to prevent `ray.shutdown()`" +#~ " in case `ray.is_initialized()=True`." +#~ msgstr "" + +#~ msgid "**hist** -- Object containing metrics from training." +#~ msgstr "" + +#~ msgid "Flower server." +#~ msgstr "Serveur de Flower" + +#~ msgid "Start a Flower server using the gRPC transport layer." +#~ msgstr "" + +#~ msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." +#~ msgstr "" + +#~ msgid "" +#~ "A server implementation, either " +#~ "`flwr.server.Server` or a subclass thereof." +#~ " If no instance is provided, then " +#~ "`start_server` will create one." +#~ msgstr "" + +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.strategy.Strategy`. If no " +#~ "strategy is provided, then `start_server` " +#~ "will use `flwr.server.strategy.FedAvg`." +#~ msgstr "" + +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.ClientManager`. If no " +#~ "implementation is provided, then " +#~ "`start_server` will use " +#~ "`flwr.server.client_manager.SimpleClientManager`." +#~ msgstr "" + +#~ msgid "" +#~ "The maximum length of gRPC messages " +#~ "that can be exchanged with the " +#~ "Flower clients. The default should be" +#~ " sufficient for most models. Users " +#~ "who train very large models might " +#~ "need to increase this value. Note " +#~ "that the Flower clients need to be" +#~ " started with the same value (see " +#~ "`flwr.client.start_client`), otherwise clients will" +#~ " not know about the increased limit" +#~ " and block larger messages." +#~ msgstr "" + +#~ msgid "" +#~ "Tuple containing root certificate, server " +#~ "certificate, and private key to start" +#~ " a secure SSL-enabled server. The " +#~ "tuple is expected to have three " +#~ "bytes elements in the following order:" +#~ " * CA certificate. * server " +#~ "certificate. * server private key." +#~ msgstr "" + +#~ msgid "" +#~ "Tuple containing root certificate, server " +#~ "certificate, and private key to start" +#~ " a secure SSL-enabled server. The " +#~ "tuple is expected to have three " +#~ "bytes elements in the following order:" +#~ msgstr "" + +#~ msgid "CA certificate." +#~ msgstr "Certificats" + +#~ msgid "server certificate." +#~ msgstr "Certificats" + +#~ msgid "server private key." +#~ msgstr "stratégie.du.serveur" + +#~ msgid "**hist** -- Object containing training and evaluation metrics." +#~ msgstr "" + +#~ msgid "Starting an insecure server:" +#~ msgstr "Démarrer le serveur" + +#~ msgid "Starting an SSL-enabled server:" +#~ msgstr "Démarrer le serveur" + +#~ msgid "Contains the strategy abstraction and different implementations." +#~ msgstr "" + +#~ msgid "Abstract base class for server strategy implementations." +#~ msgstr "" + +#~ msgid "The current round of federated learning." +#~ msgstr "Qu'est-ce que l'apprentissage fédéré ?" + +#~ msgid "" +#~ "Successful updates from the previously " +#~ "selected and configured clients. Each " +#~ "pair of `(ClientProxy, FitRes` constitutes " +#~ "a successful update from one of " +#~ "the previously selected clients. Not " +#~ "that not all previously selected clients" +#~ " are necessarily included in this " +#~ "list: a client might drop out and" +#~ " not submit a result. For each " +#~ "client that did not submit an " +#~ "update, there should be an `Exception`" +#~ " in `failures`." +#~ msgstr "" + +#~ msgid "" +#~ "Exceptions that occurred while the " +#~ "server was waiting for client updates." +#~ msgstr "" + +#~ msgid "" +#~ "**aggregation_result** -- The aggregated " +#~ "evaluation result. Aggregation typically uses" +#~ " some variant of a weighted average." +#~ msgstr "" + +#~ msgid "Aggregate training results." +#~ msgstr "Résultats globaux de l'évaluation." + +#~ msgid "" +#~ "Successful updates from the previously " +#~ "selected and configured clients. Each " +#~ "pair of `(ClientProxy, FitRes)` constitutes" +#~ " a successful update from one of " +#~ "the previously selected clients. Not " +#~ "that not all previously selected clients" +#~ " are necessarily included in this " +#~ "list: a client might drop out and" +#~ " not submit a result. For each " +#~ "client that did not submit an " +#~ "update, there should be an `Exception`" +#~ " in `failures`." +#~ msgstr "" + +#~ msgid "" +#~ "**parameters** -- If parameters are " +#~ "returned, then the server will treat " +#~ "these as the new global model " +#~ "parameters (i.e., it will replace the" +#~ " previous parameters with the ones " +#~ "returned from this method). If `None`" +#~ " is returned (e.g., because there " +#~ "were only failures and no viable " +#~ "results) then the server will no " +#~ "update the previous model parameters, " +#~ "the updates received in this round " +#~ "are discarded, and the global model " +#~ "parameters remain the same." +#~ msgstr "" + +#~ msgid "Configure the next round of evaluation." +#~ msgstr "Configuration de l'évaluation côté serveur" + +#~ msgid "The client manager which holds all currently connected clients." +#~ msgstr "" + +#~ msgid "" +#~ "**evaluate_configuration** -- A list of " +#~ "tuples. Each tuple in the list " +#~ "identifies a `ClientProxy` and the " +#~ "`EvaluateIns` for this particular " +#~ "`ClientProxy`. If a particular `ClientProxy`" +#~ " is not included in this list, " +#~ "it means that this `ClientProxy` will" +#~ " not participate in the next round" +#~ " of federated evaluation." +#~ msgstr "" + +#~ msgid "Configure the next round of training." +#~ msgstr "" + +#~ msgid "" +#~ "**fit_configuration** -- A list of " +#~ "tuples. Each tuple in the list " +#~ "identifies a `ClientProxy` and the " +#~ "`FitIns` for this particular `ClientProxy`." +#~ " If a particular `ClientProxy` is not" +#~ " included in this list, it means " +#~ "that this `ClientProxy` will not " +#~ "participate in the next round of " +#~ "federated learning." +#~ msgstr "" + +#~ msgid "Evaluate the current model parameters." +#~ msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" + +#~ msgid "" +#~ "This function can be used to " +#~ "perform centralized (i.e., server-side) " +#~ "evaluation of model parameters." +#~ msgstr "" + +#~ msgid "" +#~ "**evaluation_result** -- The evaluation " +#~ "result, usually a Tuple containing loss" +#~ " and a dictionary containing task-" +#~ "specific metrics (e.g., accuracy)." +#~ msgstr "" + +#~ msgid "Initialize the (global) model parameters." +#~ msgstr "Initialise le modèle global" + +#~ msgid "" +#~ "**parameters** -- If parameters are " +#~ "returned, then the server will treat " +#~ "these as the initial global model " +#~ "parameters." +#~ msgstr "" + +#~ msgid "Configurable FedAvg strategy implementation." +#~ msgstr "Configuration de l'évaluation fédérée" + +#~ msgid "Implementation based on https://arxiv.org/abs/1602.05629" +#~ msgstr "" + +#~ msgid "" +#~ "Fraction of clients used during " +#~ "training. In case `min_fit_clients` is " +#~ "larger than `fraction_fit * " +#~ "available_clients`, `min_fit_clients` will still " +#~ "be sampled. Defaults to 1.0." +#~ msgstr "" + +#~ msgid "" +#~ "Fraction of clients used during " +#~ "validation. In case `min_evaluate_clients` is" +#~ " larger than `fraction_evaluate * " +#~ "available_clients`, `min_evaluate_clients` will " +#~ "still be sampled. Defaults to 1.0." +#~ msgstr "" + +#~ msgid "Minimum number of clients used during training. Defaults to 2." +#~ msgstr "" + +#~ msgid "Minimum number of clients used during validation. Defaults to 2." +#~ msgstr "" + +#~ msgid "Minimum number of total clients in the system. Defaults to 2." +#~ msgstr "" + +#~ msgid "Optional function used for validation. Defaults to None." +#~ msgstr "" + +#~ msgid "Function used to configure training. Defaults to None." +#~ msgstr "" + +#~ msgid "Function used to configure validation. Defaults to None." +#~ msgstr "" + +#~ msgid "Whether or not accept rounds containing failures. Defaults to True." +#~ msgstr "" + +#~ msgid "Initial global model parameters." +#~ msgstr "Initialise le modèle global" + +#~ msgid "Metrics aggregation function, optional." +#~ msgstr "" + +#~ msgid "Aggregate evaluation losses using weighted average." +#~ msgstr "Résultats globaux de l'évaluation." + +#~ msgid "Aggregate fit results using weighted average." +#~ msgstr "" + +#~ msgid "Evaluate model parameters using an evaluation function." +#~ msgstr "" + +#~ msgid "Initialize global model parameters." +#~ msgstr "Initialise le modèle global" + +#~ msgid "Use a fraction of available clients for evaluation." +#~ msgstr "" + +#~ msgid "Return the sample size and the required number of available clients." +#~ msgstr "" + +#~ msgid "Configurable FedAvg with Momentum strategy implementation." +#~ msgstr "" + +#~ msgid "Federated Averaging with Momentum strategy." +#~ msgstr "Stratégie de moyenne fédérée." + +#~ msgid "Implementation based on https://arxiv.org/pdf/1909.06335.pdf" +#~ msgstr "" + +#~ msgid "Fraction of clients used during training. Defaults to 0.1." +#~ msgstr "" + +#~ msgid "Fraction of clients used during validation. Defaults to 0.1." +#~ msgstr "" + +#~ msgid "" +#~ "Server-side learning rate used in " +#~ "server-side optimization. Defaults to 1.0." +#~ msgstr "" + +#~ msgid "Server-side momentum factor used for FedAvgM. Defaults to 0.0." +#~ msgstr "" + +#~ msgid "Configurable QFedAvg strategy implementation." +#~ msgstr "" + +#~ msgid "Configurable fault-tolerant FedAvg strategy implementation." +#~ msgstr "" + +#~ msgid "Configurable FedAdagrad strategy implementation." +#~ msgstr "" + +#~ msgid "Federated Optim strategy interface." +#~ msgstr "" + +#~ msgid "Implementation based on https://arxiv.org/abs/2003.00295v5" +#~ msgstr "" +#~ "FedYogi - Stratégie d'apprentissage fédéré " +#~ "utilisant Yogi côté serveur. Mise en " +#~ "oeuvre basée sur https://arxiv.org/abs/2003.00295" + +#~ msgid "Fraction of clients used during training. Defaults to 1.0." +#~ msgstr "" + +#~ msgid "Fraction of clients used during validation. Defaults to 1.0." +#~ msgstr "" + +#~ msgid "Server-side learning rate. Defaults to 1e-1." +#~ msgstr "" + +#~ msgid "Client-side learning rate. Defaults to 1e-1." +#~ msgstr "" + +#~ msgid "Momentum parameter. Defaults to 0.0." +#~ msgstr "" + +#~ msgid "Second moment parameter. Defaults to 0.0." +#~ msgstr "" + +#~ msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-9." +#~ msgstr "" + +#~ msgid "Configurable FedProx strategy implementation." +#~ msgstr "" + +#~ msgid "Federated Optimization strategy." +#~ msgstr "Stratégie de moyenne fédérée." + +#~ msgid "Implementation based on https://arxiv.org/abs/1812.06127" +#~ msgstr "" + +#~ msgid "" +#~ "The strategy in itself will not be" +#~ " different than FedAvg, the client " +#~ "needs to be adjusted. A proximal " +#~ "term needs to be added to the " +#~ "loss function during the training:" +#~ msgstr "" + +#~ msgid "" +#~ "\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" +#~ "\n" +#~ msgstr "\\\\frac{\\Nmu}{2} || w - w^t ||^2" + +#~ msgid "" +#~ "Where $w^t$ are the global parameters" +#~ " and $w$ are the local weights " +#~ "the function will be optimized with." +#~ msgstr "" + +#~ msgid "In PyTorch, for example, the loss would go from:" +#~ msgstr "" + +#~ msgid "To:" +#~ msgstr "" + +#~ msgid "" +#~ "With `global_params` being a copy of " +#~ "the parameters before the training takes" +#~ " place." +#~ msgstr "" + +#~ msgid "" +#~ "The weight of the proximal term " +#~ "used in the optimization. 0.0 makes " +#~ "this strategy equivalent to FedAvg, and" +#~ " the higher the coefficient, the more" +#~ " regularization will be used (that " +#~ "is, the client parameters will need " +#~ "to be closer to the server " +#~ "parameters during training)." +#~ msgstr "" + +#~ msgid "Sends the proximal factor mu to the clients" +#~ msgstr "" + +#~ msgid "FedAdagrad strategy - Adaptive Federated Optimization using Adagrad." +#~ msgstr "" +#~ "Stratégie FedAdagrad - Optimisation fédérée" +#~ " adaptative à l'aide d'Adagrad." + +#~ msgid "Paper: https://arxiv.org/abs/2003.00295" +#~ msgstr "" + +#~ msgid "Federated learning strategy using Adagrad on server-side." +#~ msgstr "" +#~ "Construisons un système d'apprentissage fédéré" +#~ " en utilisant fastai et Flower !" + +#~ msgid "FedAdam - Adaptive Federated Optimization using Adam." +#~ msgstr "FedAdam - Optimisation fédérée adaptative utilisant Adam." + +#~ msgid "Momentum parameter. Defaults to 0.9." +#~ msgstr "" + +#~ msgid "Second moment parameter. Defaults to 0.99." +#~ msgstr "" + +#~ msgid "FedYogi [Reddi et al., 2020] strategy." +#~ msgstr "Stratégie FedYogi [Reddi et al., 2020]." + +#~ msgid "Adaptive Federated Optimization using Yogi." +#~ msgstr "Optimisation fédérée adaptative à l'aide de Yogi." + +#~ msgid "Federated learning strategy using Yogi on server-side." +#~ msgstr "L'apprentissage fédéré en cinq étapes" + +#~ msgid "Differential Privacy Wrappers in Flower" +#~ msgstr "Les enveloppes différentielles de confidentialité dans les fleurs" + +#~ msgid "Evaluation" +#~ msgstr "Solution" + +#~ msgid "Code examples" +#~ msgstr "Exemple de code complet" + +#~ msgid "" +#~ "Flower Quickstart (PyTorch): coming soon " +#~ "(the TensorFlow/Keras example can easily " +#~ "be changed to make it work with" +#~ " PyTorch)" +#~ msgstr "" + +#~ msgid "First time contributors" +#~ msgstr "Bonnes premières contributions" + +#~ msgid "First MXNet 1.6 example (MNIST)" +#~ msgstr "" + +#~ msgid "ImageNet (PyTorch/TensorFlow)" +#~ msgstr "" + +#~ msgid "LSTM (PyTorch/TensorFlow)" +#~ msgstr "" + +#~ msgid "Transformer (PyTorch/TensorFlow)" +#~ msgstr "" + +#~ msgid "BERT (PyTorch/TensorFlow)" +#~ msgstr "" + +#~ msgid "Logging" +#~ msgstr "Enregistrement" + +#~ msgid "|cce04c6f539b421a91f5dba40287193f|" +#~ msgstr "|cce04c6f539b421a91f5dba40287193f|" + +#~ msgid "|e392aef42ba248e19e35446f95a6d1ca|" +#~ msgstr "|e392aef42ba248e19e35446f95a6d1ca|" + +#~ msgid "|7e028f44defe4f31a02debc729f2010d|" +#~ msgstr "|7e028f44defe4f31a02debc729f2010d|" + +#~ msgid "|b89f7b7ae05e4ecd92baa69b7a9fe1be|" +#~ msgstr "|b89f7b7ae05e4ecd92baa69b7a9fe1be|" + +#~ msgid "|9c0445ce962744e1a1c0a4abc697a334|" +#~ msgstr "|9c0445ce962744e1a1c0a4abc697a334|" + +#~ msgid "|a3246766a6db412888131b3bcdad0971|" +#~ msgstr "|a3246766a6db412888131b3bcdad0971|" + +#~ msgid "|db6f2bee32f143b8a5085b6a8ce1acd1|" +#~ msgstr "|db6f2bee32f143b8a5085b6a8ce1acd1|" + +#~ msgid "|405653bc8f874e9595fd59cc82b3d48c|" +#~ msgstr "|405653bc8f874e9595fd59cc82b3d48c|" + +#~ msgid "|073a728154ed406e8fe54e1d9f18dcb9|" +#~ msgstr "|073a728154ed406e8fe54e1d9f18dcb9|" + +#~ msgid "|50e80ea4f22945848b65ed7eed35e0e1|" +#~ msgstr "|50e80ea4f22945848b65ed7eed35e0e1|" + +#~ msgid "|f3cf9148d85e4b68b66b6c255b25e327|" +#~ msgstr "|f3cf9148d85e4b68b66b6c255b25e327|" + +#~ msgid "|1fedb4f8714947e1b13f03696180c741|" +#~ msgstr "|1fedb4f8714947e1b13f03696180c741|" + +#~ msgid "|a32d4ad1ccb34461942d75c7b2b51d65|" +#~ msgstr "|a32d4ad1ccb34461942d75c7b2b51d65|" + +#~ msgid "|3531696c52904cd3b9944034ab959d48|" +#~ msgstr "|3531696c52904cd3b9944034ab959d48|" + +#~ msgid "An Introduction to Federated Learning" +#~ msgstr "Mise à l'échelle de l'apprentissage fédéré" + +#~ msgid "Strategies in Federated Learning" +#~ msgstr "Mise à l'échelle de l'apprentissage fédéré" + +#~ msgid "Building a Strategy" +#~ msgstr "Stratégies intégrées" + +#~ msgid "Client and NumPyClient" +#~ msgstr "NumPyClient" + +#~ msgid "Strategies" +#~ msgstr "Stratégies personnalisées" + +#~ msgid "SSL-enabled Server and Client" +#~ msgstr "" + +#~ msgid "About these documents" +#~ msgstr "À propos de ces documents" + +#~ msgid "Index" +#~ msgstr "Index" + +#~ msgid "Search" +#~ msgstr "Recherche" + +#~ msgid "Copyright" +#~ msgstr "Droits d'auteur" + +#~ msgid "Save Progress" +#~ msgstr "" + +#~ msgid "" +#~ "The Flower server does not prescribe " +#~ "a way to persist model updates or" +#~ " evaluation results. Flower does not " +#~ "(yet) automatically save model updates " +#~ "on the server-side. It's on the" +#~ " roadmap to provide a built-in " +#~ "way of doing this." +#~ msgstr "" + +#~ msgid "Release Process" +#~ msgstr "Publier Flower" + +#~ msgid "Virtual Env Installation" +#~ msgstr "Virtualenv avec Anaconda" + +#~ msgid "Install development versions" +#~ msgstr "Installer les versions de développement de Flower" + +#~ msgid "Set up a virtual env" +#~ msgstr "Mettre en place un environment virtuel" + +#~ msgid "" +#~ "Note that, in order to build the" +#~ " documentation locally (with ``poetry run" +#~ " make html``, like described below), " +#~ "`Pandoc _` needs " +#~ "to be installed on the system." +#~ msgstr "" +#~ "Notez que, pour construire la " +#~ "documentation localement (avec ``poetry run" +#~ " make html``, comme décrit ci-" +#~ "dessous), ``Pandoc _`" +#~ " doit être installé sur le système." + +#~ msgid "Llama 2 fine-tuning, with Hugging Face Transformers and PyTorch" +#~ msgstr "Un fine-tuning de LLaMA 2 avec Hugging Face et PyTorch" + +#~ msgid "XGBoost" +#~ msgstr "XGBoost" + +#~ msgid "Android ONNX on-device training" +#~ msgstr "" +#~ "Utiliser Android ONNX pour faire du " +#~ "training directement sur le téléphone" + +#~ msgid "Contribute on GitHub" +#~ msgstr "Contribuer sur GitHub" + +#~ msgid "How to write a good PR title" +#~ msgstr "Comment écrire un bon titre de PR" + +#~ msgid "" +#~ "A well-crafted PR title helps team" +#~ " members quickly understand the purpose " +#~ "and scope of the changes being " +#~ "proposed. Here's a guide to help " +#~ "you write a good GitHub PR title:" +#~ msgstr "" +#~ "Un titre de PR bien choisi permet" +#~ " aux autres développeurs de rapidement " +#~ "comprendre l'intérêt et le scope des " +#~ "changements proposés. Voici un guide " +#~ "pour vous aider à écrire des bons" +#~ " titres de PR :" + +#~ msgid "" +#~ "1. Be Clear and Concise: Provide a" +#~ " clear summary of the changes in " +#~ "a concise manner. 1. Use Actionable " +#~ "Verbs: Start with verbs like \"Add,\"" +#~ " \"Update,\" or \"Fix\" to indicate " +#~ "the purpose. 1. Include Relevant " +#~ "Information: Mention the affected feature " +#~ "or module for context. 1. Keep it" +#~ " Short: Avoid lengthy titles for easy" +#~ " readability. 1. Use Proper Capitalization" +#~ " and Punctuation: Follow grammar rules " +#~ "for clarity." +#~ msgstr "" +#~ "1. Soyez clair et concis : Donnez" +#~ " un résumé clair des changements de" +#~ " manière concise. 1. Utilisez des " +#~ "verbes actionnables : Commencez par des" +#~ " verbes comme \"Add\", \"Update\", ou " +#~ "\"Fix\" pour indiquer le but. 1. " +#~ "Inclure des renseignements pertinents : " +#~ "Mentionner la caractéristique ou le " +#~ "module concerné pour le contexte. 1. " +#~ "Gardez le court : Évitez les longs" +#~ " titres pour une lisibilité facile. " +#~ "1. Utiliser une bonne capitalisation et" +#~ " une ponctuation : Suivre les règles" +#~ " de grammaire pour la clarté." + +#~ msgid "" +#~ "Let's start with a few examples " +#~ "for titles that should be avoided " +#~ "because they do not provide meaningful" +#~ " information:" +#~ msgstr "" +#~ "Commençons par quelques exemples de " +#~ "titres qui devraient être évités parce" +#~ " qu'ils ne fournissent pas d'information" +#~ " significative :" + +#~ msgid "Implement Algorithm" +#~ msgstr "Implement Algorithm" + +#~ msgid "Database" +#~ msgstr "Base de données" + +#~ msgid "Add my_new_file.py to codebase" +#~ msgstr "Add my_new_file.py to codebase" + +#~ msgid "Improve code in module" +#~ msgstr "Improve code in module" + +#~ msgid "Change SomeModule" +#~ msgstr "Change SomeModule" + +#~ msgid "" +#~ "Here are a few positive examples " +#~ "which provide helpful information without " +#~ "repeating how they do it, as that" +#~ " is already visible in the \"Files" +#~ " changed\" section of the PR:" +#~ msgstr "" +#~ "Voici quelques bons exemples qui " +#~ "fournissent de l'information utile sans " +#~ "répéter comment ils le font, comme " +#~ "cela est déjà visible dans la " +#~ "section \"Files changed\" de la PR " +#~ ":" + +#~ msgid "Update docs banner to mention Flower Summit 2023" +#~ msgstr "Update docs banner to mention Flower Summit 2023" + +#~ msgid "Remove unnecessary XGBoost dependency" +#~ msgstr "Remove unnecessary XGBoost dependency" + +#~ msgid "Remove redundant attributes in strategies subclassing FedAvg" +#~ msgstr "Remove redundant attributes in strategies subclassing FedAvg" + +#~ msgid "Add CI job to deploy the staging system when the `main` branch changes" +#~ msgstr "" +#~ "Ajoute une tâche CI pour déployer " +#~ "le système de mise en scène " +#~ "lorsque la branche `main` change" + +#~ msgid "" +#~ "Add new amazing library which will " +#~ "be used to improve the simulation " +#~ "engine" +#~ msgstr "" +#~ "Add new amazing library which will " +#~ "be used to improve the simulation " +#~ "engine" + +#~ msgid "Differential privacy" +#~ msgstr "Confidentialité différentielle" + +#~ msgid "" +#~ "The Flower server does not prescribe " +#~ "a way to aggregate evaluation results," +#~ " but it enables the user to " +#~ "fully customize result aggregation." +#~ msgstr "" + +#~ msgid "Configure logging" +#~ msgstr "Configurer les clients" + +#~ msgid "" +#~ "The Flower logger keeps track of " +#~ "all core events that take place in" +#~ " federated learning workloads. It presents" +#~ " information by default following a " +#~ "standard message format:" +#~ msgstr "" +#~ "L'enregistreur de Flower garde la trace" +#~ " de tous les événements principaux " +#~ "qui ont lieu dans les charges de" +#~ " travail de l'apprentissage fédéré. Il " +#~ "présente les informations par défaut en" +#~ " suivant un format de message " +#~ "standard :" + +#~ msgid "" +#~ "containing relevant information including: log" +#~ " message level (e.g. :code:`INFO`, " +#~ ":code:`DEBUG`), a timestamp, the line " +#~ "where the logging took place from, " +#~ "as well as the log message itself." +#~ " In this way, the logger would " +#~ "typically display information on your " +#~ "terminal as follows:" +#~ msgstr "" + +#~ msgid "Saving log to file" +#~ msgstr "Enregistrement du journal dans un fichier" + +#~ msgid "" +#~ "By default, the Flower log is " +#~ "outputted to the terminal where you " +#~ "launch your Federated Learning workload " +#~ "from. This applies for both gRPC-" +#~ "based federation (i.e. when you do " +#~ ":code:`fl.server.start_server`) and when using " +#~ "the :code:`VirtualClientEngine` (i.e. when you" +#~ " do :code:`fl.simulation.start_simulation`). In " +#~ "some situations you might want to " +#~ "save this log to disk. You can " +#~ "do so by calling the " +#~ "`fl.common.logger.configure() " +#~ "`_" +#~ " function. For example:" +#~ msgstr "" + +#~ msgid "" +#~ "With the above, Flower will record " +#~ "the log you see on your terminal" +#~ " to :code:`log.txt`. This file will " +#~ "be created in the same directory " +#~ "as were you are running the code" +#~ " from. If we inspect we see the" +#~ " log above is also recorded but " +#~ "prefixing with :code:`identifier` each line:" +#~ msgstr "" +#~ "Avec ce qui précède, Flower enregistrera" +#~ " le journal que tu vois sur ton" +#~ " terminal dans :code:`log.txt`. Ce fichier" +#~ " sera créé dans le même répertoire" +#~ " que celui à partir duquel tu " +#~ "exécutes le code. Si nous inspectons," +#~ " nous voyons que le journal ci-" +#~ "dessus est également enregistré, mais en" +#~ " préfixant chaque ligne avec " +#~ ":code:`identifier` :" + +#~ msgid "Log your own messages" +#~ msgstr "Enregistrer tes propres messages" + +#~ msgid "" +#~ "You might expand the information shown" +#~ " by default with the Flower logger" +#~ " by adding more messages relevant to" +#~ " your application. You can achieve " +#~ "this easily as follows." +#~ msgstr "" +#~ "Tu peux élargir les informations " +#~ "affichées par défaut avec le logger " +#~ "Flower en ajoutant d'autres messages " +#~ "pertinents pour ton application. Tu peux" +#~ " y parvenir facilement en procédant " +#~ "comme suit." + +#~ msgid "" +#~ "In this way your logger will show," +#~ " in addition to the default messages," +#~ " the ones introduced by the clients" +#~ " as specified above." +#~ msgstr "" +#~ "De cette façon, ton logger affichera," +#~ " en plus des messages par défaut, " +#~ "ceux introduits par les clients comme" +#~ " spécifié ci-dessus." + +#~ msgid "Log to a remote service" +#~ msgstr "Se connecter à un service distant" + +#~ msgid "" +#~ "The :code:`fl.common.logger.configure` function, " +#~ "also allows specifying a host to " +#~ "which logs can be pushed (via " +#~ ":code:`POST`) through a native Python " +#~ ":code:`logging.handler.HTTPHandler`. This is a " +#~ "particularly useful feature in " +#~ ":code:`gRPC`-based Federated Learning workloads " +#~ "where otherwise gathering logs from all" +#~ " entities (i.e. the server and the" +#~ " clients) might be cumbersome. Note " +#~ "that in Flower simulation, the server" +#~ " automatically displays all logs. You " +#~ "can still specify a :code:`HTTPHandler` " +#~ "should you wish to backup or " +#~ "analyze the logs somewhere else." +#~ msgstr "" + +#~ msgid "Enable SSL connections" +#~ msgstr "Collecte centralisée des données" + +#~ msgid "Python version" +#~ msgstr "Version Python" + +#~ msgid "" +#~ "Flower requires at least `Python 3.7 " +#~ "`_, but `Python 3.8" +#~ " `_ or above is " +#~ "recommended." +#~ msgstr "" +#~ "Flower nécessite `Python 3.7 " +#~ "`_ ou plus, nous " +#~ "recommandons `Python 3.8 " +#~ "`_." + +#~ msgid "Run simulations" +#~ msgstr "Simulation de moniteur" + +#~ msgid "" +#~ "Simulating Federated Learning workloads is " +#~ "useful for a multitude of use-" +#~ "cases: you might want to run your" +#~ " workload on a large cohort of " +#~ "clients but without having to source," +#~ " configure and mange a large number" +#~ " of physical devices; you might want" +#~ " to run your FL workloads as " +#~ "fast as possible on the compute " +#~ "systems you have access to without " +#~ "having to go through a complex " +#~ "setup process; you might want to " +#~ "validate your algorithm on different " +#~ "scenarios at varying levels of data " +#~ "and system heterogeneity, client availability," +#~ " privacy budgets, etc. These are " +#~ "among some of the use-cases where" +#~ " simulating FL workloads makes sense. " +#~ "Flower can accommodate these scenarios " +#~ "by means of its `VirtualClientEngine " +#~ "`_ or VCE." +#~ msgstr "" + +#~ msgid "" +#~ "The :code:`VirtualClientEngine` schedules, launches" +#~ " and manages `virtual` clients. These " +#~ "clients are identical to `non-virtual`" +#~ " clients (i.e. the ones you launch" +#~ " via the command `flwr.client.start_client " +#~ "`_) in the" +#~ " sense that they can be configure " +#~ "by creating a class inheriting, for " +#~ "example, from `flwr.client.NumPyClient `_ and therefore" +#~ " behave in an identical way. In " +#~ "addition to that, clients managed by " +#~ "the :code:`VirtualClientEngine` are:" +#~ msgstr "" + +#~ msgid "" +#~ "resource-aware: this means that each " +#~ "client gets assigned a portion of " +#~ "the compute and memory on your " +#~ "system. You as a user can control" +#~ " this at the beginning of the " +#~ "simulation and allows you to control " +#~ "the degree of parallelism of your " +#~ "Flower FL simulation. The fewer the " +#~ "resources per client, the more clients" +#~ " can run concurrently on the same " +#~ "hardware." +#~ msgstr "" + +#~ msgid "" +#~ "self-managed: this means that you " +#~ "as a user do not need to " +#~ "launch clients manually, instead this " +#~ "gets delegated to :code:`VirtualClientEngine`'s " +#~ "internals." +#~ msgstr "" + +#~ msgid "" +#~ "ephemeral: this means that a client " +#~ "is only materialized when it is " +#~ "required in the FL process (e.g. " +#~ "to do `fit() `_). The object is" +#~ " destroyed afterwards, releasing the " +#~ "resources it was assigned and allowing" +#~ " in this way other clients to " +#~ "participate." +#~ msgstr "" + +#~ msgid "" +#~ "The :code:`VirtualClientEngine` implements `virtual`" +#~ " clients using `Ray `_, " +#~ "an open-source framework for scalable" +#~ " Python workloads. In particular, Flower's" +#~ " :code:`VirtualClientEngine` makes use of " +#~ "`Actors `_ to spawn `virtual` clients" +#~ " and run their workload." +#~ msgstr "" + +#~ msgid "Launch your Flower simulation" +#~ msgstr "" + +#~ msgid "" +#~ "Running Flower simulations still require " +#~ "you to define your client class, a" +#~ " strategy, and utility functions to " +#~ "download and load (and potentially " +#~ "partition) your dataset. With that out" +#~ " of the way, launching your " +#~ "simulation is done with `start_simulation " +#~ "`_ " +#~ "and a minimal example looks as " +#~ "follows:" +#~ msgstr "" + +#~ msgid "VirtualClientEngine resources" +#~ msgstr "Moteur de client virtuel" + +#~ msgid "" +#~ "By default the VCE has access to" +#~ " all system resources (i.e. all CPUs," +#~ " all GPUs, etc) since that is " +#~ "also the default behavior when starting" +#~ " Ray. However, in some settings you" +#~ " might want to limit how many " +#~ "of your system resources are used " +#~ "for simulation. You can do this " +#~ "via the :code:`ray_init_args` input argument" +#~ " to :code:`start_simulation` which the VCE" +#~ " internally passes to Ray's " +#~ ":code:`ray.init` command. For a complete " +#~ "list of settings you can configure " +#~ "check the `ray.init `_ " +#~ "documentation. Do not set " +#~ ":code:`ray_init_args` if you want the " +#~ "VCE to use all your system's CPUs" +#~ " and GPUs." +#~ msgstr "" + +#~ msgid "Assigning client resources" +#~ msgstr "" + +#~ msgid "" +#~ "By default the :code:`VirtualClientEngine` " +#~ "assigns a single CPU core (and " +#~ "nothing else) to each virtual client." +#~ " This means that if your system " +#~ "has 10 cores, that many virtual " +#~ "clients can be concurrently running." +#~ msgstr "" + +#~ msgid "" +#~ "More often than not, you would " +#~ "probably like to adjust the resources" +#~ " your clients get assigned based on" +#~ " the complexity (i.e. compute and " +#~ "memory footprint) of your FL workload." +#~ " You can do so when starting " +#~ "your simulation by setting the argument" +#~ " `client_resources` to `start_simulation `_. Two " +#~ "keys are internally used by Ray to" +#~ " schedule and spawn workloads (in our" +#~ " case Flower clients):" +#~ msgstr "" + +#~ msgid ":code:`num_cpus` indicates the number of CPU cores a client would get." +#~ msgstr "" + +#~ msgid "" +#~ ":code:`num_gpus` indicates the **ratio** of" +#~ " GPU memory a client gets assigned." +#~ msgstr "" + +#~ msgid "Let's see a few examples:" +#~ msgstr "" + +#~ msgid "" +#~ "While the :code:`client_resources` can be " +#~ "used to control the degree of " +#~ "concurrency in your FL simulation, this" +#~ " does not stop you from running " +#~ "dozens, hundreds or even thousands of" +#~ " clients in the same round and " +#~ "having orders of magnitude more " +#~ "`dormant` (i.e. not participating in a" +#~ " round) clients. Let's say you want" +#~ " to have 100 clients per round " +#~ "but your system can only accommodate " +#~ "8 clients concurrently. The " +#~ ":code:`VirtualClientEngine` will schedule 100 " +#~ "jobs to run (each simulating a " +#~ "client sampled by the strategy) and " +#~ "then will execute them in a " +#~ "resource-aware manner in batches of " +#~ "8." +#~ msgstr "" + +#~ msgid "" +#~ "To understand all the intricate details" +#~ " on how resources are used to " +#~ "schedule FL clients and how to " +#~ "define custom resources, please take a" +#~ " look at the `Ray documentation " +#~ "`_." +#~ msgstr "" + +#~ msgid "Simulation examples" +#~ msgstr "Exemples de PyTorch" + +#~ msgid "" +#~ "A few ready-to-run complete " +#~ "examples for Flower simulation in " +#~ "Tensorflow/Keras and PyTorch are provided " +#~ "in the `Flower repository " +#~ "`_. You can run " +#~ "them on Google Colab too:" +#~ msgstr "" + +#~ msgid "" +#~ "`Tensorflow/Keras Simulation " +#~ "`_: 100 clients collaboratively " +#~ "train a MLP model on MNIST." +#~ msgstr "" +#~ "`Quickstart TensorFlow (Code) " +#~ "`_" + +#~ msgid "" +#~ "`PyTorch Simulation " +#~ "`_: 100 clients collaboratively train" +#~ " a CNN model on MNIST." +#~ msgstr "" +#~ "`Quickstart PyTorch (Code) " +#~ "`_" + +#~ msgid "" +#~ "Flower's :code:`VirtualClientEngine` allows you " +#~ "to run FL simulations across multiple" +#~ " compute nodes. Before starting your " +#~ "multi-node simulation ensure that you:" +#~ msgstr "" + +#~ msgid "Have the same Python environment in all nodes." +#~ msgstr "" + +#~ msgid "Have a copy of your code (e.g. your entire repo) in all nodes." +#~ msgstr "" + +#~ msgid "" +#~ "Have a copy of your dataset in " +#~ "all nodes (more about this in " +#~ ":ref:`simulation considerations `)" +#~ msgstr "" + +#~ msgid "" +#~ "Pass :code:`ray_init_args={\"address\"=\"auto\"}` to " +#~ "`start_simulation `_ so the " +#~ ":code:`VirtualClientEngine` attaches to a " +#~ "running Ray instance." +#~ msgstr "" + +#~ msgid "" +#~ "Start Ray on you head node: on " +#~ "the terminal type :code:`ray start " +#~ "--head`. This command will print a " +#~ "few lines, one of which indicates " +#~ "how to attach other nodes to the" +#~ " head node." +#~ msgstr "" + +#~ msgid "" +#~ "Attach other nodes to the head " +#~ "node: copy the command shown after " +#~ "starting the head and execute it " +#~ "on terminal of a new node: for " +#~ "example :code:`ray start " +#~ "--address='192.168.1.132:6379'`" +#~ msgstr "" + +#~ msgid "" +#~ "With all the above done, you can" +#~ " run your code from the head " +#~ "node as you would if the " +#~ "simulation was running on a single " +#~ "node." +#~ msgstr "" + +#~ msgid "" +#~ "Once your simulation is finished, if " +#~ "you'd like to dismantle your cluster " +#~ "you simply need to run the command" +#~ " :code:`ray stop` in each node's " +#~ "terminal (including the head node)." +#~ msgstr "" + +#~ msgid "Multi-node simulation good-to-know" +#~ msgstr "" + +#~ msgid "" +#~ "Here we list a few interesting " +#~ "functionality when running multi-node FL" +#~ " simulations:" +#~ msgstr "" + +#~ msgid "" +#~ "User :code:`ray status` to check all " +#~ "nodes connected to your head node " +#~ "as well as the total resources " +#~ "available to the :code:`VirtualClientEngine`." +#~ msgstr "" + +#~ msgid "" +#~ "When attaching a new node to the" +#~ " head, all its resources (i.e. all" +#~ " CPUs, all GPUs) will be visible " +#~ "by the head node. This means that" +#~ " the :code:`VirtualClientEngine` can schedule " +#~ "as many `virtual` clients as that " +#~ "node can possible run. In some " +#~ "settings you might want to exclude " +#~ "certain resources from the simulation. " +#~ "You can do this by appending " +#~ "`--num-cpus=` and/or `--num-" +#~ "gpus=` in any :code:`ray " +#~ "start` command (including when starting " +#~ "the head)" +#~ msgstr "" + +#~ msgid "Considerations for simulations" +#~ msgstr "Simulation de moniteur" + +#~ msgid "" +#~ "We are actively working on these " +#~ "fronts so to make it trivial to" +#~ " run any FL workload with Flower " +#~ "simulation." +#~ msgstr "" + +#~ msgid "" +#~ "The current VCE allows you to run" +#~ " Federated Learning workloads in simulation" +#~ " mode whether you are prototyping " +#~ "simple scenarios on your personal laptop" +#~ " or you want to train a complex" +#~ " FL pipeline across multiple high-" +#~ "performance GPU nodes. While we add " +#~ "more capabilities to the VCE, the " +#~ "points below highlight some of the " +#~ "considerations to keep in mind when " +#~ "designing your FL pipeline with Flower." +#~ " We also highlight a couple of " +#~ "current limitations in our implementation." +#~ msgstr "" + +#~ msgid "GPU resources" +#~ msgstr "Ressources" + +#~ msgid "" +#~ "The VCE assigns a share of GPU " +#~ "memory to a client that specifies " +#~ "the key :code:`num_gpus` in " +#~ ":code:`client_resources`. This being said, Ray" +#~ " (used internally by the VCE) is " +#~ "by default:" +#~ msgstr "" + +#~ msgid "" +#~ "not aware of the total VRAM " +#~ "available on the GPUs. This means " +#~ "that if you set :code:`num_gpus=0.5` and" +#~ " you have two GPUs in your " +#~ "system with different (e.g. 32GB and " +#~ "8GB) VRAM amounts, they both would " +#~ "run 2 clients concurrently." +#~ msgstr "" + +#~ msgid "" +#~ "not aware of other unrelated (i.e. " +#~ "not created by the VCE) workloads " +#~ "are running on the GPU. Two " +#~ "takeaways from this are:" +#~ msgstr "" + +#~ msgid "" +#~ "Your Flower server might need a " +#~ "GPU to evaluate the `global model` " +#~ "after aggregation (by instance when " +#~ "making use of the `evaluate method " +#~ "`_)" +#~ msgstr "" + +#~ msgid "" +#~ "If you want to run several " +#~ "independent Flower simulations on the " +#~ "same machine you need to mask-out" +#~ " your GPUs with " +#~ ":code:`CUDA_VISIBLE_DEVICES=\"\"` when launching" +#~ " your experiment." +#~ msgstr "" + +#~ msgid "" +#~ "In addition, the GPU resource limits " +#~ "passed to :code:`client_resources` are not " +#~ "`enforced` (i.e. they can be exceeded)" +#~ " which can result in the situation" +#~ " of client using more VRAM than " +#~ "the ratio specified when starting the" +#~ " simulation." +#~ msgstr "" + +#~ msgid "TensorFlow with GPUs" +#~ msgstr "Exemples de TensorFlow" + +#~ msgid "" +#~ "When `using a GPU with TensorFlow " +#~ "`_ nearly your " +#~ "entire GPU memory of all your GPUs" +#~ " visible to the process will be " +#~ "mapped. This is done by TensorFlow " +#~ "for optimization purposes. However, in " +#~ "settings such as FL simulations where" +#~ " we want to split the GPU into" +#~ " multiple `virtual` clients, this is " +#~ "not a desirable mechanism. Luckily we" +#~ " can disable this default behavior by" +#~ " `enabling memory growth " +#~ "`_." +#~ msgstr "" + +#~ msgid "" +#~ "This would need to be done in " +#~ "the main process (which is where " +#~ "the server would run) and in each" +#~ " Actor created by the VCE. By " +#~ "means of :code:`actor_kwargs` we can " +#~ "pass the reserved key `\"on_actor_init_fn\"`" +#~ " in order to specify a function " +#~ "to be executed upon actor " +#~ "initialization. In this case, to enable" +#~ " GPU growth for TF workloads. It " +#~ "would look as follows:" +#~ msgstr "" + +#~ msgid "" +#~ "This is precisely the mechanism used " +#~ "in `Tensorflow/Keras Simulation " +#~ "`_ example." +#~ msgstr "" +#~ "`Quickstart TensorFlow (Code) " +#~ "`_" + +#~ msgid "Multi-node setups" +#~ msgstr "" + +#~ msgid "" +#~ "The VCE does not currently offer a" +#~ " way to control on which node a" +#~ " particular `virtual` client is executed." +#~ " In other words, if more than a" +#~ " single node have the resources " +#~ "needed by a client to run, then" +#~ " any of those nodes could get " +#~ "the client workload scheduled onto. " +#~ "Later in the FL process (i.e. in" +#~ " a different round) the same client" +#~ " could be executed by a different " +#~ "node. Depending on how your clients " +#~ "access their datasets, this might " +#~ "require either having a copy of " +#~ "all dataset partitions on all nodes " +#~ "or a dataset serving mechanism (e.g. " +#~ "using nfs, a database) to circumvent " +#~ "data duplication." +#~ msgstr "" + +#~ msgid "" +#~ "By definition virtual clients are " +#~ "`stateless` due to their ephemeral " +#~ "nature. A client state can be " +#~ "implemented as part of the Flower " +#~ "client class but users need to " +#~ "ensure this saved to persistent storage" +#~ " (e.g. a database, disk) and that " +#~ "can be retrieve later by the same" +#~ " client regardless on which node it" +#~ " is running from. This is related " +#~ "to the point above also since, in" +#~ " some way, the client's dataset could" +#~ " be seen as a type of `state`." +#~ msgstr "" + +#~ msgid "Save and load model checkpoints" +#~ msgstr "Sauvegarde et chargement des points de contrôle PyTorch" + +#~ msgid "" +#~ "Flower does not automatically save model" +#~ " updates on the server-side. This " +#~ "how-to guide describes the steps " +#~ "to save (and load) model checkpoints " +#~ "in Flower." +#~ msgstr "" + +#~ msgid "Legacy example guides" +#~ msgstr "" + +#~ msgid "Contributor tutorials" +#~ msgstr "Configuration du contributeur" + +#~ msgid "Contributor explanations" +#~ msgstr "Explications" + +#~ msgid "Flower Framework Documentation" +#~ msgstr "Documentation de Flower" + +#~ msgid "PyTorch" +#~ msgstr "Exemples de PyTorch" + +#~ msgid "TensorFlow" +#~ msgstr "TensorFlow" + +#~ msgid "Flower CLI reference" +#~ msgstr "Client de Flower" + +#~ msgid "flwr (Python API reference)" +#~ msgstr "Référence pour l'API" + +#~ msgid "Unreleased" +#~ msgstr "Inédit" + +#~ msgid "**Deprecate Python 3.7**" +#~ msgstr "**Deprecate Python 3.7**" + +#~ msgid "" +#~ "Since Python 3.7 reached its end " +#~ "of life (EOL) on 2023-06-27, support " +#~ "for Python 3.7 is now deprecated " +#~ "and will be removed in an upcoming" +#~ " release." +#~ msgstr "" +#~ "Étant donné que Python 3.7 a " +#~ "atteint sa fin de vie (EOL) le " +#~ "2023-06-27, la prise en charge de " +#~ "Python 3.7 est désormais dépréciée et" +#~ " sera supprimée dans une prochaine " +#~ "version." + +#~ msgid "" +#~ "**Add new** `FedTrimmedAvg` **strategy** " +#~ "([#1769](https://github.com/adap/flower/pull/1769), " +#~ "[#1853](https://github.com/adap/flower/pull/1853))" +#~ msgstr "" +#~ "**Ajouter un nouveau** `FedTrimmedAvg` " +#~ "**stratégie** " +#~ "([#1769](https://github.com/adap/flower/pull/1769), " +#~ "[#1853](https://github.com/adap/flower/pull/1853))" + +#~ msgid "" +#~ "The new `FedTrimmedAvg` strategy implements" +#~ " Trimmed Mean by [Dong Yin, " +#~ "2018](https://arxiv.org/abs/1803.01498)" +#~ msgstr "" +#~ "La nouvelle stratégie `FedTrimmedAvg` met " +#~ "en œuvre la moyenne trimmée par " +#~ "[Dong Yin, 2018](https://arxiv.org/abs/1803.01498)" + +#~ msgid "" +#~ "**Add parameter aggregation to** `mt-" +#~ "pytorch` **code example** " +#~ "([#1785](https://github.com/adap/flower/pull/1785))" +#~ msgstr "" +#~ "**Ajouter l'agrégation des paramètres à** " +#~ "`mt-pytorch` **exemple de code** " +#~ "([#1785](https://github.com/adap/flower/pull/1785))" + +#~ msgid "" +#~ "The `mt-pytorch` example shows how " +#~ "to aggregate parameters when writing a" +#~ " driver script. The included `driver.py`" +#~ " and `server.py` have been aligned to" +#~ " demonstrate both the low-level way" +#~ " and the high-level way of " +#~ "building server-side logic." +#~ msgstr "" +#~ "L'exemple `mt-pytorch` montre comment " +#~ "agréger des paramètres lors de " +#~ "l'écriture d'un script de pilote. Les" +#~ " fichiers `driver.py` et `server.py` inclus" +#~ " ont été alignés pour démontrer à " +#~ "la fois la manière de bas niveau" +#~ " et la manière de haut niveau " +#~ "de construire la logique côté serveur." + +#~ msgid "" +#~ "**Introduce (experimental) gRPC request-" +#~ "response API** " +#~ "([#1867](https://github.com/adap/flower/pull/1867), " +#~ "[#1901](https://github.com/adap/flower/pull/1901))" +#~ msgstr "" +#~ "**Introduire l'API demande-réponse gRPC " +#~ "(expérimentale)** " +#~ "([#1867](https://github.com/adap/flower/pull/1867), " +#~ "[#1901](https://github.com/adap/flower/pull/1901))" + +#~ msgid "" +#~ "In addition to the existing gRPC " +#~ "API (based on bidirectional streaming) " +#~ "and the experimental REST API, there " +#~ "is now a new gRPC API that " +#~ "uses a request-response model to " +#~ "communicate with client nodes." +#~ msgstr "" +#~ "En plus de l'API gRPC existante " +#~ "(basée sur un flux bidirectionnel) et" +#~ " de l'API REST expérimentale, il " +#~ "existe désormais une nouvelle API gRPC" +#~ " qui utilise un modèle demande-" +#~ "réponse pour communiquer avec les nœuds" +#~ " clients." + +#~ msgid "" +#~ "Please note: The gRPC request-response" +#~ " API is still experimental and will" +#~ " likely change significantly over time." +#~ msgstr "" +#~ "Remarque : l'API requête-réponse gRPC" +#~ " est encore expérimentale et est " +#~ "susceptible de changer de manière " +#~ "significative au fil du temps." + +#~ msgid "" +#~ "**Replace the eperimental** " +#~ "`start_client(rest=True)` **with the new** " +#~ "`start_client(transport=\"rest\")` " +#~ "([#1880](https://github.com/adap/flower/pull/1880))" +#~ msgstr "" +#~ "**Remplacez le fichier expérimental** " +#~ "`start_client(rest=True) **par le nouveau** " +#~ "`start_client(transport=\"rest\")` " +#~ "([#1880](https://github.com/adap/flower/pull/1880))" + +#~ msgid "" +#~ "The (experimental) `start_client` argument " +#~ "`rest` was deprecated in favor of " +#~ "a new argument `transport`. " +#~ "`start_client(transport=\"rest\")` will yield the" +#~ " same behaviour as `start_client(rest=True)` " +#~ "did before. All code should migrate " +#~ "to the new argument `transport`. The " +#~ "deprecated argument `rest` will be " +#~ "removed in a future release." +#~ msgstr "" + +#~ msgid "" +#~ "**Migrate experimental REST API to " +#~ "Starlette** ([2171](https://github.com/adap/flower/pull/2171))" +#~ msgstr "" +#~ "**Migrer l'API REST expérimentale vers " +#~ "Starlette** ([2171](https://github.com/adap/flower/pull/2171))" + +#~ msgid "" +#~ "The (experimental) REST API used to " +#~ "be implemented in " +#~ "[FastAPI](https://fastapi.tiangolo.com/), but it has" +#~ " now been migrated to use " +#~ "[Starlette](https://www.starlette.io/) directly." +#~ msgstr "" +#~ "L'API REST (expérimentale) était auparavant" +#~ " implémentée dans " +#~ "[FastAPI](https://fastapi.tiangolo.com/), mais elle " +#~ "a maintenant été migrée pour utiliser" +#~ " directement [Starlette](https://www.starlette.io/)." + +#~ msgid "" +#~ "**Add a new gRPC option** " +#~ "([#2197](https://github.com/adap/flower/pull/2197))" +#~ msgstr "" +#~ "**Ajouter une nouvelle option gRPC** " +#~ "([#2197](https://github.com/adap/flower/pull/2197))" + +#~ msgid "" +#~ "We now start a gRPC server with" +#~ " the `grpc.keepalive_permit_without_calls` option " +#~ "set to 0 by default. This prevents" +#~ " the clients from sending keepalive " +#~ "pings when there is no outstanding " +#~ "stream." +#~ msgstr "" +#~ "Nous démarrons maintenant un serveur " +#~ "gRPC avec l'option " +#~ "`grpc.keepalive_permit_without_calls` réglée sur 0" +#~ " par défaut, ce qui empêche les " +#~ "clients d'envoyer des pings de maintien" +#~ " lorsqu'il n'y a pas de flux en" +#~ " attente." + +#~ msgid "" +#~ "**General improvements** " +#~ "([#1872](https://github.com/adap/flower/pull/1872), " +#~ "[#1866](https://github.com/adap/flower/pull/1866), " +#~ "[#1884](https://github.com/adap/flower/pull/1884))" +#~ msgstr "" +#~ "**Mettre à jour les exemples de " +#~ "code** ([#1291](https://github.com/adap/flower/pull/1291), " +#~ "[#1286](https://github.com/adap/flower/pull/1286), " +#~ "[#1282](https://github.com/adap/flower/pull/1282))" + +#~ msgid "Example projects" +#~ msgstr "Exemples" + +#~ msgid "" +#~ "`Flower simulation PyTorch " +#~ "`_" +#~ msgstr "" +#~ "`Flower Quickstart (TensorFlow/Keras) " +#~ "`_" + +#~ msgid "" +#~ "`Android Kotlin example " +#~ "`_" +#~ msgstr "" + +#~ msgid "`Android Java example `_" +#~ msgstr "" + +#~ msgid "Build a strategy from scratch" +#~ msgstr "Élaborer une stratégie à partir de zéro" + +#~ msgid "Customize the client" +#~ msgstr "Création du client IMDBC" + +#~ msgid "Get started with Flower" +#~ msgstr "" + +#~ msgid "Quickstart Android" +#~ msgstr "Démarrage rapide d'Android" + +#~ msgid "" +#~ "Let's build a federated learning system" +#~ " using TFLite and Flower on Android!" +#~ msgstr "" +#~ "Construisons un système d'apprentissage fédéré" +#~ " en utilisant TFLite et Flower sur" +#~ " Android !" + +#~ msgid "" +#~ "Please refer to the `full code " +#~ "example " +#~ "`_ to" +#~ " learn more." +#~ msgstr "" +#~ "Réfère-toi à l'exemple de code " +#~ "complet " +#~ "`_ " +#~ "pour en savoir plus." + +#~ msgid "Quickstart iOS" +#~ msgstr "Démarrage rapide iOS" + +#~ msgid "" +#~ "In this tutorial we will learn how" +#~ " to train a Neural Network on " +#~ "MNIST using Flower and CoreML on " +#~ "iOS devices." +#~ msgstr "" +#~ "Dans ce tutoriel, nous allons apprendre" +#~ " à former un réseau neuronal sur " +#~ "MNIST en utilisant Flower et CoreML " +#~ "sur les appareils iOS." + +#~ msgid "" +#~ "First of all, for running the " +#~ "Flower Python server, it is recommended" +#~ " to create a virtual environment and" +#~ " run everything within a `virtualenv " +#~ "`_. " +#~ "For the Flower client implementation in" +#~ " iOS, it is recommended to use " +#~ "Xcode as our IDE." +#~ msgstr "" +#~ "Tout d'abord, pour l'exécution du " +#~ "serveur Flower Python, il est recommandé" +#~ " de créer un environnement virtuel et" +#~ " de tout exécuter au sein d'un " +#~ "`virtualenv `_. Pour l'implémentation du client" +#~ " Flower dans iOS, il est recommandé" +#~ " d'utiliser Xcode comme notre IDE." + +#~ msgid "" +#~ "Our example consists of one Python " +#~ "*server* and two iPhone *clients* that" +#~ " all have the same model." +#~ msgstr "" +#~ "Notre exemple se compose d'un *serveur*" +#~ " Python et de deux *clients* iPhone" +#~ " qui ont tous le même modèle." + +#~ msgid "" +#~ "*Clients* are responsible for generating " +#~ "individual weight updates for the model" +#~ " based on their local datasets. These" +#~ " updates are then sent to the " +#~ "*server* which will aggregate them to" +#~ " produce a better model. Finally, the" +#~ " *server* sends this improved version " +#~ "of the model back to each " +#~ "*client*. A complete cycle of weight " +#~ "updates is called a *round*." +#~ msgstr "" +#~ "*Les clients* sont chargés de générer" +#~ " des mises à jour de poids " +#~ "individuelles pour le modèle en fonction" +#~ " de leurs ensembles de données " +#~ "locaux. Ces mises à jour sont " +#~ "ensuite envoyées au *serveur* qui les" +#~ " agrège pour produire un meilleur " +#~ "modèle. Enfin, le *serveur* renvoie " +#~ "cette version améliorée du modèle à " +#~ "chaque *client*. Un cycle complet de " +#~ "mises à jour de poids s'appelle un" +#~ " *round*." + +#~ msgid "" +#~ "Now that we have a rough idea " +#~ "of what is going on, let's get " +#~ "started to setup our Flower server " +#~ "environment. We first need to install" +#~ " Flower. You can do this by " +#~ "using pip:" +#~ msgstr "" +#~ "Maintenant que nous avons une idée " +#~ "approximative de ce qui se passe, " +#~ "commençons à configurer notre environnement" +#~ " de serveur Flower. Nous devons " +#~ "d'abord installer Flower, ce que tu " +#~ "peux faire à l'aide de pip :" + +#~ msgid "Or Poetry:" +#~ msgstr "Ou de la poésie :" + +#~ msgid "" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training using CoreML " +#~ "as our local training pipeline and " +#~ "MNIST as our dataset. For simplicity " +#~ "reasons we will use the complete " +#~ "Flower client with CoreML, that has " +#~ "been implemented and stored inside the" +#~ " Swift SDK. The client implementation " +#~ "can be seen below:" +#~ msgstr "" +#~ "Maintenant que toutes nos dépendances " +#~ "sont installées, exécutons une simple " +#~ "formation distribuée en utilisant CoreML " +#~ "comme pipeline de formation local et " +#~ "MNIST comme ensemble de données. Pour" +#~ " des raisons de simplicité, nous " +#~ "utiliserons le client Flower complet " +#~ "avec CoreML, qui a été mis en " +#~ "œuvre et stocké à l'intérieur du " +#~ "SDK Swift. La mise en œuvre du " +#~ "client peut être vue ci-dessous :" + +#~ msgid "" +#~ "Let's create a new application project" +#~ " in Xcode and add :code:`flwr` as " +#~ "a dependency in your project. For " +#~ "our application, we will store the " +#~ "logic of our app in " +#~ ":code:`FLiOSModel.swift` and the UI elements" +#~ " in :code:`ContentView.swift`. We will " +#~ "focus more on :code:`FLiOSModel.swift` in " +#~ "this quickstart. Please refer to the " +#~ "`full code example " +#~ "`_ to " +#~ "learn more about the app." +#~ msgstr "" + +#~ msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" +#~ msgstr "" + +#~ msgid "" +#~ "Then add the mlmodel to the " +#~ "project simply by drag-and-drop, " +#~ "the mlmodel will be bundled inside " +#~ "the application during deployment to " +#~ "your iOS device. We need to pass" +#~ " the url to access mlmodel and " +#~ "run CoreML machine learning processes, " +#~ "it can be retrieved by calling the" +#~ " function :code:`Bundle.main.url`. For the " +#~ "MNIST dataset, we need to preprocess " +#~ "it into :code:`MLBatchProvider` object. The" +#~ " preprocessing is done inside " +#~ ":code:`DataLoader.swift`." +#~ msgstr "" + +#~ msgid "" +#~ "Since CoreML does not allow the " +#~ "model parameters to be seen before " +#~ "training, and accessing the model " +#~ "parameters during or after the training" +#~ " can only be done by specifying " +#~ "the layer name, we need to know" +#~ " this informations beforehand, through " +#~ "looking at the model specification, " +#~ "which are written as proto files. " +#~ "The implementation can be seen in " +#~ ":code:`MLModelInspect`." +#~ msgstr "" + +#~ msgid "" +#~ "After we have all of the necessary" +#~ " informations, let's create our Flower " +#~ "client." +#~ msgstr "" + +#~ msgid "" +#~ "Then start the Flower gRPC client " +#~ "and start communicating to the server" +#~ " by passing our Flower client to " +#~ "the function :code:`startFlwrGRPC`." +#~ msgstr "" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ "call the provided :code:`MLFlwrClient` and " +#~ "call :code:`startFlwrGRPC()`. The attribute " +#~ ":code:`hostname` and :code:`port` tells the" +#~ " client which server to connect to." +#~ " This can be done by entering " +#~ "the hostname and port in the " +#~ "application before clicking the start " +#~ "button to start the federated learning" +#~ " process." +#~ msgstr "" + +#~ msgid "" +#~ "Once the server is running we can" +#~ " start the clients in different " +#~ "terminals. Build and run the client " +#~ "through your Xcode, one through Xcode" +#~ " Simulator and the other by deploying" +#~ " it to your iPhone. To see more" +#~ " about how to deploy your app " +#~ "to iPhone or Simulator visit `here " +#~ "`_." +#~ msgstr "" +#~ "Une fois que le serveur fonctionne, " +#~ "nous pouvons démarrer les clients dans" +#~ " différents terminaux. Construis et exécute" +#~ " le client grâce à ton Xcode, " +#~ "l'un via le simulateur Xcode et " +#~ "l'autre en le déployant sur ton " +#~ "iPhone. Pour en savoir plus sur la" +#~ " façon de déployer ton application " +#~ "sur l'iPhone ou le simulateur, visite" +#~ " `ici `_." + +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system in your ios device. The " +#~ "full `source code " +#~ "`_ for" +#~ " this example can be found in " +#~ ":code:`examples/ios`." +#~ msgstr "" +#~ "Félicitations ! Tu as réussi à " +#~ "construire et à faire fonctionner ton" +#~ " premier système d'apprentissage fédéré " +#~ "dans ton appareil ios. Le `code " +#~ "source complet " +#~ "`_ de " +#~ "cet exemple se trouve dans " +#~ ":code:`examples/ios`." + +#~ msgid "" +#~ "`Star Flower on GitHub " +#~ "`__ ⭐️ and join " +#~ "the open-source Flower community on " +#~ "Slack to connect, ask questions, and " +#~ "get help: `Join Slack `__ 🌼 We'd love to hear" +#~ " from you in the ``#introductions`` " +#~ "channel! And if anything is unclear, " +#~ "head over to the ``#questions`` channel." +#~ msgstr "" +#~ "`Star Flower on GitHub " +#~ "`__ ⭐️ et rejoignez" +#~ " la communauté open-source Flower sur" +#~ " Slack pour vous connecter, poser des" +#~ " questions et obtenir de l'aide : " +#~ "`Join Slack `__ " +#~ "🌼 Nous serions ravis d'avoir de " +#~ "vos nouvelles dans le canal " +#~ "``#introductions`` ! Et si quelque chose" +#~ " n'est pas clair, dirigez-vous vers" +#~ " le canal ``#questions``." + +#~ msgid "|bd48315a61c14495babefe3c7918b493|" +#~ msgstr "" + +#~ msgid "|c00d9e5b0d324d96b86da8a78b05b14b|" +#~ msgstr "" + +#~ msgid "|faae2ee10f4149c9907563c4f48ec6ea|" +#~ msgstr "" + +#~ msgid "|13a655510351455292f145a61d6c15d6|" +#~ msgstr "" + +#~ msgid "|13949884182846e3a91433190a936ba9|" +#~ msgstr "" + +#~ msgid "|9bf26cc650b146e88b4745df040ece37|" +#~ msgstr "" + +#~ msgid "|1590915480fc41708bd43e48af9582f9|" +#~ msgstr "" + +#~ msgid "|e5ee96d702b64256b97b8ca99db10787|" +#~ msgstr "" + +#~ msgid "|84840b244edd47c481278ce534c126cd|" +#~ msgstr "" + +#~ msgid "|f33f5ebb3a844a2ba54bb6be3571b172|" +#~ msgstr "" + +#~ msgid "|5645db4ba9c945518d51ff234f35c797|" +#~ msgstr "" + +#~ msgid "|317af8d28fcc479ab981047d058c4751|" +#~ msgstr "" + +#~ msgid "|8bfd0e697a494d5385662debafade6bf|" +#~ msgstr "" + +#~ msgid "" +#~ "Differential privacy (DP) is often " +#~ "mentioned in the context of Federated" +#~ " Learning. It is a privacy-preserving" +#~ " method used when analyzing and " +#~ "sharing statistical data, ensuring the " +#~ "privacy of individual participants. DP " +#~ "achieves this by adding statistical " +#~ "noise to the model updates, ensuring " +#~ "any individual participants’ information " +#~ "cannot be distinguished or re-" +#~ "identified. This technique can be " +#~ "considered an optimization that provides " +#~ "a quantifiable privacy protection measure." +#~ msgstr "" +#~ "La confidentialité différentielle (DP) est " +#~ "souvent mentionnée dans le contexte de" +#~ " l'apprentissage fédéré. Il s'agit d'une" +#~ " méthode de préservation de la vie" +#~ " privée utilisée lors de l'analyse et" +#~ " du partage de données statistiques, " +#~ "garantissant la confidentialité des " +#~ "participants individuels. La DP y " +#~ "parvient en ajoutant un bruit " +#~ "statistique aux mises à jour du " +#~ "modèle, garantissant que toute information " +#~ "sur les participants individuels ne peut" +#~ " être distinguée ou réidentifiée. Cette " +#~ "technique peut être considérée comme une" +#~ " optimisation qui fournit une mesure " +#~ "quantifiable de protection de la vie " +#~ "privée." + +#~ msgid "|e5dc001d27ad460caeab669e957b3c36|" +#~ msgstr "" + +#~ msgid "API Reference - Flower binaries" +#~ msgstr "" + +#~ msgid "API Reference - flwr" +#~ msgstr "Référence pour l'API" + +#~ msgid "" +#~ "Defines whether or not the client " +#~ "is interacting with the server using " +#~ "the experimental REST API. This feature" +#~ " is experimental, it might change " +#~ "considerably in future versions of " +#~ "Flower." +#~ msgstr "" + +#~ msgid "Returns a client's set of properties." +#~ msgstr "" + +#~ msgid "" +#~ "Defines whether or not the client " +#~ "is interacting with the server using " +#~ "the experimental REST API. This feature" +#~ " is experimental, it might be change" +#~ " considerably in future versions of " +#~ "Flower." +#~ msgstr "" + +#~ msgid "" +#~ "A function creating client instances. " +#~ "The function must take a single " +#~ "str argument called `cid`. It should " +#~ "return a single client instance of " +#~ "type ClientLike. Note that the created" +#~ " client instances are ephemeral and " +#~ "will often be destroyed after a " +#~ "single method invocation. Since client " +#~ "instances are not long-lived, they " +#~ "should not attempt to carry state " +#~ "over method invocations. Any state " +#~ "required by the instance (model, " +#~ "dataset,hyperparameters, ...) should be " +#~ "(re-)created in either the call to " +#~ "`client_fn` or the call to any of" +#~ " the client methods (e.g., load " +#~ "evaluation data in the `evaluate` method" +#~ " itself)." +#~ msgstr "" + +#~ msgid "" +#~ "A function creating client instances. " +#~ "The function must take a single " +#~ "str argument called `cid`. It should " +#~ "return a single client instance of " +#~ "type ClientLike. Note that the created" +#~ " client instances are ephemeral and " +#~ "will often be destroyed after a " +#~ "single method invocation. Since client " +#~ "instances are not long-lived, they " +#~ "should not" +#~ msgstr "" + +#~ msgid "attempt to carry state over method invocations. Any state required by" +#~ msgstr "" + +#~ msgid "" +#~ "the instance (model, dataset,hyperparameters, " +#~ "...) should be (re-)created in either" +#~ " the call to `client_fn` or the " +#~ "call to any of the client methods" +#~ " (e.g., load evaluation data in the" +#~ " `evaluate` method itself)." +#~ msgstr "" + +#~ msgid "" +#~ "\\frac{\\mu}{2} || w - w^t ||^2\n" +#~ "\n" +#~ msgstr "" + +#~ msgid "" +#~ "Adaptive Federated Optimization using Adagrad" +#~ " (FedAdagrad) [Reddi et al., 2020] " +#~ "strategy." +#~ msgstr "" + +#~ msgid "" +#~ "Adaptive Federated Optimization using Adam " +#~ "(FedAdam) [Reddi et al., 2020] strategy." +#~ msgstr "" + +#~ msgid "" +#~ "Adaptive Federated Optimization using Yogi " +#~ "(FedYogi) [Reddi et al., 2020] strategy." +#~ msgstr "" + +#~ msgid "Contributing Baselines" +#~ msgstr "Configuration du contributeur" + +#~ msgid "" +#~ "Do you have a new federated " +#~ "learning paper and want to add a" +#~ " new baseline to Flower? Or do " +#~ "you want to add an experiment to" +#~ " an existing baseline paper? Great, " +#~ "we really appreciate your contribution." +#~ msgstr "" + +#~ msgid "" +#~ "The goal of Flower Baselines is to" +#~ " reproduce experiments from popular papers" +#~ " to accelerate researchers by enabling " +#~ "faster comparisons to new strategies, " +#~ "datasets, models, and federated pipelines " +#~ "in general." +#~ msgstr "" + +#~ msgid "" +#~ "Before you start to work on a " +#~ "new baseline or experiment, please check" +#~ " the `Flower Issues " +#~ "`_ or `Flower " +#~ "Pull Requests `_ " +#~ "to see if someone else is already" +#~ " working on it. Please open a " +#~ "new issue if you are planning to" +#~ " work on a new baseline or " +#~ "experiment with a short description of" +#~ " the corresponding paper and the " +#~ "experiment you want to contribute." +#~ msgstr "" + +#~ msgid "TL;DR: Adding a new Flower Baseline" +#~ msgstr "" + +#~ msgid "" +#~ "Let's say you want to contribute " +#~ "the code of your most recent " +#~ "Federated Learning publication, *FedAweseome*. " +#~ "There are only three steps necessary " +#~ "to create a new *FedAweseome* Flower " +#~ "Baseline:" +#~ msgstr "" + +#~ msgid "**Get the Flower source code on your machine**" +#~ msgstr "" + +#~ msgid "" +#~ "Fork the Flower codebase: got to " +#~ "the `Flower GitHub repo " +#~ "`_ and fork the " +#~ "code (click the *Fork* button in " +#~ "the top-right corner and follow " +#~ "the instructions)" +#~ msgstr "" + +#~ msgid "" +#~ "Clone the (forked) Flower source code:" +#~ " :code:`git clone " +#~ "git@github.com:[your_github_username]/flower.git`" +#~ msgstr "" + +#~ msgid "" +#~ "Open the code in your favorite " +#~ "editor (e.g., using VSCode: ``cd flower" +#~ " ; code .``)" +#~ msgstr "" + +#~ msgid "**Add the FedAwesome code**" +#~ msgstr "" + +#~ msgid "" +#~ "Add your :code:`FedAwesome` code under " +#~ ":code:`baselines/flwr_baselines/publications/[fedawesome]`" +#~ msgstr "" + +#~ msgid "Add a `pyproject.toml` with all necessary dependencies" +#~ msgstr "" -#~ msgid "Starting a gRPC client with an insecure server connection:" +#~ msgid "Add a `README.md` describing how to use your baseline" #~ msgstr "" -#~ msgid "Starting an SSL-enabled gRPC client:" +#~ msgid "**Open a pull request**" #~ msgstr "" -#~ msgid "Abstract base class for Flower clients using NumPy." +#~ msgid "Stage your changes: :code:`git add .`" #~ msgstr "" -#~ msgid "The current (global) model parameters." -#~ msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" +#~ msgid "" +#~ "Commit & push: :code:`git commit -m " +#~ "\"Create new FedAweseome baseline\" ; " +#~ "git push`" +#~ msgstr "" #~ msgid "" -#~ "Configuration parameters which allow the " -#~ "server to influence evaluation on the" -#~ " client. It can be used to " -#~ "communicate arbitrary values from the " -#~ "server to the client, for example, " -#~ "to influence the number of examples " -#~ "used for evaluation." +#~ "Open a pull request: go to *your*" +#~ " fork of the Flower codebase and " +#~ "create a pull request that targets " +#~ "the Flower ``main``` branch" #~ msgstr "" +#~ msgid "Further reading:" +#~ msgstr "Aide supplémentaire" + #~ msgid "" -#~ "* **loss** (*float*) -- The evaluation" -#~ " loss of the model on the local" -#~ " dataset. * **num_examples** (*int*) -- " -#~ "The number of examples used for " -#~ "evaluation. * **metrics** (*Dict[str, " -#~ "Scalar]*) -- A dictionary mapping " -#~ "arbitrary string keys to values of " -#~ "type bool, bytes, float, int, or " -#~ "str. It can be used to " -#~ "communicate arbitrary values back to the" -#~ " server." +#~ "`GitHub docs: About forks " +#~ "`_" #~ msgstr "" #~ msgid "" -#~ "**loss** (*float*) -- The evaluation " -#~ "loss of the model on the local " -#~ "dataset." +#~ "`GitHub docs: Creating a pull request" +#~ " `_" #~ msgstr "" -#~ msgid "**num_examples** (*int*) -- The number of examples used for evaluation." +#~ msgid "" +#~ "`GitHub docs: Creating a pull request" +#~ " from a fork `_" #~ msgstr "" +#~ msgid "Requirements" +#~ msgstr "Changements nécessaires" + #~ msgid "" -#~ "**metrics** (*Dict[str, Scalar]*) -- A " -#~ "dictionary mapping arbitrary string keys " -#~ "to values of type bool, bytes, " -#~ "float, int, or str. It can be " -#~ "used to communicate arbitrary values " -#~ "back to the server." +#~ "Contributing a new baseline is really" +#~ " easy. You only have to make " +#~ "sure that your federated learning " +#~ "experiments are running with Flower. As" +#~ " soon as you have created a " +#~ "Flower-based experiment, you can contribute" +#~ " it." #~ msgstr "" #~ msgid "" -#~ "The previous return type format (int," -#~ " float, float) and the extended " -#~ "format (int, float, float, Dict[str, " -#~ "Scalar]) have been deprecated and " -#~ "removed since Flower 0.19." +#~ "It is recommended (but not required) " +#~ "to use `Hydra `_ to " +#~ "execute the experiment." #~ msgstr "" -#~ msgid "Train the provided parameters using the locally held dataset." -#~ msgstr "entraîne le modèle sur l'ensemble d'apprentissage local" +#~ msgid "" +#~ "Please make sure to add your " +#~ "baseline or experiment to the " +#~ "corresponding directory as explained in " +#~ "`Executing Baseline `_. Give your baseline the " +#~ "unique identifier. For example, :code:`fedbn`" +#~ " refers to the paper \"FedBN: " +#~ "Federated Learning on non-IID Features" +#~ " via Local Batch Normalization\" and " +#~ "creates the corresponding directory " +#~ ":code:`flower/baselines/flwr_baselines/publications/fedbn`. Then" +#~ " you create the experiment directory " +#~ "with the experiment name. For example," +#~ " the experiment that measures the " +#~ "convergence has the directory " +#~ ":code:`flower/baselines/flwr_baselines/publications/fedbn/convergence_rate`." +#~ " This directory contains all your " +#~ "code and a :code:`README.md` with a " +#~ "link to the paper, the paper's " +#~ "abstract, and a detailed description of" +#~ " how to execute the experiments." +#~ msgstr "" #~ msgid "" -#~ "Configuration parameters which allow the " -#~ "server to influence training on the " -#~ "client. It can be used to " -#~ "communicate arbitrary values from the " -#~ "server to the client, for example, " -#~ "to set the number of (local) " -#~ "training epochs." +#~ "Please also check if :code:`pyproject.toml`" +#~ " and :code:`requirements.txt` (all in the" +#~ " directory `baselines " +#~ "`_ contain" +#~ " all required Python packages (libraries," +#~ " frameworks, ...). If the required " +#~ "Python package is not yet listed, " +#~ "please add it to :code:`pyproject.toml`. " +#~ "If you need a different version of" +#~ " a package already listed, please try" +#~ " to ensure your experiment runs with" +#~ " the existing version listed in " +#~ ":code:`pyproject.toml` (or :code:`requirements.txt`). " +#~ "If that doesn't work, open a " +#~ "GitHub Issue and request the version " +#~ "change." #~ msgstr "" #~ msgid "" -#~ "* **parameters** (*NDArrays*) -- The " -#~ "locally updated model parameters. * " -#~ "**num_examples** (*int*) -- The number " -#~ "of examples used for training. * " -#~ "**metrics** (*Dict[str, Scalar]*) -- A " -#~ "dictionary mapping arbitrary string keys " -#~ "to values of type bool, bytes, " -#~ "float, int, or str. It can be " -#~ "used to communicate arbitrary values " -#~ "back to the server." +#~ "The experiment also needs to contain " +#~ "a file with a downloader for the" +#~ " dataset - if possible automatic. " +#~ "This can be included in one of " +#~ "the files or as an extra file." #~ msgstr "" -#~ msgid "**parameters** (*NDArrays*) -- The locally updated model parameters." -#~ msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" +#~ msgid "" +#~ "Finally, please add plots for all " +#~ "experimental results your code is " +#~ "running to the :code:`experiment` directory" +#~ " and include them in :code:`README.md`. " +#~ "Doing this helps others and enables " +#~ "them to recognize your contributions " +#~ "quickly." +#~ msgstr "" -#~ msgid "**num_examples** (*int*) -- The number of examples used for training." +#~ msgid "" +#~ "We are aware that a few libraries" +#~ " are available only via Conda. " +#~ "However, we want to encourage you " +#~ "to ensure that your code also runs" +#~ " well outside of Conda to make " +#~ "it more accessible to the broader " +#~ "research community." +#~ msgstr "" + +#~ msgid "Here is a checklist for adding a new baseline:" #~ msgstr "" #~ msgid "" -#~ "Configuration parameters requested by the " -#~ "server. This can be used to tell" -#~ " the client which parameters are " -#~ "needed along with some Scalar " -#~ "attributes." +#~ "add required Python packages to " +#~ ":code:`pyproject.toml` or :code:`requirements.txt`" #~ msgstr "" #~ msgid "" -#~ "**parameters** -- The local model " -#~ "parameters as a list of NumPy " -#~ "ndarrays." -#~ msgstr "renvoie le poids du modèle sous la forme d'une liste de ndarrays NumPy" +#~ "add all required code under " +#~ ":code:`baselines/flwr_baselines/publications/[new_publication]`" +#~ msgstr "" -#~ msgid "Return a client's set of properties." -#~ msgstr "Renvoie l'ensemble des propriétés d'un client." +#~ msgid "add a dataset downloader" +#~ msgstr "" + +#~ msgid "add an experiment plot" +#~ msgstr "" + +#~ msgid "add a :code:`README.md`" +#~ msgstr "" + +#~ msgid "Usability" +#~ msgstr "" #~ msgid "" -#~ "Configuration parameters requested by the " -#~ "server. This can be used to tell" -#~ " the client which properties are " -#~ "needed along with some Scalar " -#~ "attributes." +#~ "Flower is known and loved for its" +#~ " usability. Therefore, make sure that " +#~ "your baseline or experiment can be " +#~ "executed with a single command such " +#~ "as :code:`./run.sh` or :code:`python3 " +#~ "main.py`. How you organize the " +#~ "experiments and the related code " +#~ "structure is up to you as an " +#~ "author, but please keep in mind to" +#~ " make sure that other users can " +#~ "easily understand and execute your " +#~ "baseline." +#~ msgstr "" + +#~ msgid "We look forward to your contribution!" +#~ msgstr "Exemple de première contribution" + +#~ msgid "flwr" +#~ msgstr "Fleur" + +#~ msgid "binaries" #~ msgstr "" +#~ msgid "Flower Baselines" +#~ msgstr "Demande pour une nouvelle Flower Baseline" + #~ msgid "" -#~ "**properties** -- A dictionary mapping " -#~ "arbitrary string keys to values of " -#~ "type bool, bytes, float, int, or " -#~ "str. It can be used to communicate" -#~ " arbitrary property values back to " -#~ "the server." +#~ "Flower Baselines are a collection of " +#~ "organised scripts used to reproduce " +#~ "results from well-known publications or" +#~ " benchmarks. You can check which " +#~ "baselines already exist and/or contribute " +#~ "your own baseline." +#~ msgstr "" + +#~ msgid "Flower requires `Python 3.7 `_ or above." +#~ msgstr "`Python 3.7 `_ ou plus" + +#~ msgid "|9e234df38403464899ad3aee36bf1b95|" +#~ msgstr "" + +#~ msgid "|081158351506446f9f772cb45ee68523|" +#~ msgstr "" + +#~ msgid "|e9325042b79c45ed96b5a8d2f6f3cdc9|" +#~ msgstr "" + +#~ msgid "|11b83bb107344db78a37266e080c4a7a|" #~ msgstr "" -#~ msgid "Start a Flower NumPyClient which connects to a gRPC server." +#~ msgid "|cd764bcf6d174a9cb62880ace9a8a6bd|" #~ msgstr "" -#~ msgid "An implementation of the abstract base class `flwr.client.NumPyClient`." +#~ msgid "|5c520984cced41e38f6bb4af416c3f84|" #~ msgstr "" -#~ msgid "Starting a client with an insecure server connection:" +#~ msgid "|66941b0608644cf1a2269a194d3bc0dd|" #~ msgstr "" -#~ msgid "Starting a SSL-enabled client:" +#~ msgid "|4b149f3a095b402bb8890275aabc9298|" #~ msgstr "" -#~ msgid "Start a Ray-based Flower simulation server." -#~ msgstr "Simulation de moniteur" +#~ msgid "|675cf7d3d53a4817b5d47529c0758158|" +#~ msgstr "" -#~ msgid "" -#~ "A function creating client instances. " -#~ "The function must take a single " -#~ "`str` argument called `cid`. It should" -#~ " return a single client instance of" -#~ " type ClientLike. Note that the " -#~ "created client instances are ephemeral " -#~ "and will often be destroyed after " -#~ "a single method invocation. Since client" -#~ " instances are not long-lived, they" -#~ " should not attempt to carry state" -#~ " over method invocations. Any state " -#~ "required by the instance (model, " -#~ "dataset, hyperparameters, ...) should be " -#~ "(re-)created in either the call to " -#~ "`client_fn` or the call to any of" -#~ " the client methods (e.g., load " -#~ "evaluation data in the `evaluate` method" -#~ " itself)." +#~ msgid "|7ca594e16ae7477790c2e3cf096ec7cd|" #~ msgstr "" -#~ "Une fonction créant des instances de " -#~ "client. La fonction doit prendre un " -#~ "seul argument `str` appelé `cid`. Elle" -#~ " doit retourner une seule instance de" -#~ " client de type ClientLike. Notez que" -#~ " les instances de client créées sont" -#~ " éphémères et seront souvent détruites " -#~ "après une seule invocation de méthode." -#~ " Puisque les instances de client ne" -#~ " sont pas de longue durée, elles " -#~ "ne doivent pas essayer de transporter" -#~ " l'état sur les invocations de " -#~ "méthode. Tout état requis par l'instance" -#~ " (modèle, jeu de données, hyperparamètres," -#~ " ...) doit être (re)créé dans l'appel" -#~ " à `client_fn` ou dans l'appel à " -#~ "n'importe quelle méthode de client (par" -#~ " exemple, charger les données d'évaluation" -#~ " dans la méthode `evaluate` elle-" -#~ "même)." -#~ msgid "" -#~ "The total number of clients in " -#~ "this simulation. This must be set " -#~ "if `clients_ids` is not set and " -#~ "vice-versa." +#~ msgid "|d669336577b545a081d5d74169a9bc4d|" #~ msgstr "" -#~ msgid "" -#~ "List `client_id`s for each client. This" -#~ " is only required if `num_clients` is" -#~ " not set. Setting both `num_clients` " -#~ "and `clients_ids` with `len(clients_ids)` not" -#~ " equal to `num_clients` generates an " -#~ "error." +#~ msgid "|00b3d6cde1ff410ba54eff58da4e033a|" #~ msgstr "" -#~ msgid "" -#~ "CPU and GPU resources for a single" -#~ " client. Supported keys are `num_cpus` " -#~ "and `num_gpus`. Example: `{\"num_cpus\": 4," -#~ " \"num_gpus\": 1}`. To understand the " -#~ "GPU utilization caused by `num_gpus`, " -#~ "consult the Ray documentation on GPU " -#~ "support." +#~ msgid "|29a11f5353084c1995c538f7edef71a5|" #~ msgstr "" -#~ msgid "" -#~ "An implementation of the abstract base" -#~ " class `flwr.server.Server`. If no instance" -#~ " is provided, then `start_server` will " -#~ "create one." +#~ msgid "|d62eda312fd44726bb5db2b761fe7e0d|" #~ msgstr "" -#~ msgid "" -#~ "Currently supported values are `num_rounds`" -#~ " (int, default: 1) and `round_timeout` " -#~ "in seconds (float, default: None)." +#~ msgid "Using Baselines" #~ msgstr "" -#~ msgid "" -#~ "An implementation of the abstract base" -#~ " class `flwr.server.Strategy`. If no " -#~ "strategy is provided, then `start_server` " -#~ "will use `flwr.server.strategy.FedAvg`." +#~ msgid "Structure" #~ msgstr "" #~ msgid "" -#~ "An implementation of the abstract base" -#~ " class `flwr.server.ClientManager`. If no " -#~ "implementation is provided, then " -#~ "`start_simulation` will use " -#~ "`flwr.server.client_manager.SimpleClientManager`." +#~ "All baselines are available in the " +#~ "directory `baselines " +#~ "`_. This " +#~ "directory has two different files:" #~ msgstr "" #~ msgid "" -#~ "Optional dictionary containing arguments for" -#~ " the call to `ray.init`. If " -#~ "ray_init_args is None (the default), Ray" -#~ " will be initialized with the " -#~ "following default args: { " -#~ "\"ignore_reinit_error\": True, \"include_dashboard\": " -#~ "False } An empty dictionary can " -#~ "be used (ray_init_args={}) to prevent " -#~ "any arguments from being passed to " -#~ "ray.init." +#~ "Both files contain all the information" +#~ " about required Python packages (libraries," +#~ " frameworks, ...) and their versions. " +#~ "You can install each library separately" +#~ " by using :code: `pip install` or " +#~ "you can use Poetry and run " +#~ "code:`poetry install` in the directory " +#~ "where you find the :code:`pyproject.toml` " +#~ "file. After installing all requirements, " +#~ "you can start to run your " +#~ "baseline." #~ msgstr "" #~ msgid "" -#~ "Optional dictionary containing arguments for" -#~ " the call to `ray.init`. If " -#~ "ray_init_args is None (the default), Ray" -#~ " will be initialized with the " -#~ "following default args:" +#~ "Go to the baseline that you want" +#~ " to execute. The directories and " +#~ "files are structured so that you " +#~ "can first find the paper with " +#~ "their unique identifier such that, for" +#~ " example, :code:`FedProx` refers to the " +#~ "paper \"Federated Optimization in " +#~ "Heterogeneous Networks\". The :code:`fedprox` " +#~ "section contains all available experiments " +#~ "from that paper." #~ msgstr "" -#~ msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" +#~ msgid "" +#~ "The experiment area contains a " +#~ ":code:`README.md` covering the corresponding " +#~ "paper, its abstract, and goal as " +#~ "well as a detailed description of " +#~ "how to run the baseline. Please " +#~ "use the :code:`README.md` to see how " +#~ "to execute each individual baseline." #~ msgstr "" -#~ msgid "" -#~ "An empty dictionary can be used " -#~ "(ray_init_args={}) to prevent any arguments" -#~ " from being passed to ray.init." +#~ msgid "Available Baselines" #~ msgstr "" #~ msgid "" -#~ "Set to True to prevent `ray.shutdown()`" -#~ " in case `ray.is_initialized()=True`." +#~ "The following table lists all currently" +#~ " available baselines and the corresponding" +#~ " papers. If you want to add a" +#~ " new baseline or experiment, please " +#~ "check the `Contributing Baselines " +#~ "`_ section." #~ msgstr "" -#~ msgid "**hist** -- Object containing metrics from training." +#~ msgid "Paper" #~ msgstr "" -#~ msgid "Flower server." -#~ msgstr "Serveur de Flower" - -#~ msgid "Start a Flower server using the gRPC transport layer." +#~ msgid "Experiment" #~ msgstr "" -#~ msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." +#~ msgid "Directory" #~ msgstr "" -#~ msgid "" -#~ "A server implementation, either " -#~ "`flwr.server.Server` or a subclass thereof." -#~ " If no instance is provided, then " -#~ "`start_server` will create one." +#~ msgid "`FedAvg `_" #~ msgstr "" -#~ msgid "" -#~ "An implementation of the abstract base" -#~ " class `flwr.server.strategy.Strategy`. If no " -#~ "strategy is provided, then `start_server` " -#~ "will use `flwr.server.strategy.FedAvg`." +#~ msgid "MNIST" #~ msgstr "" -#~ msgid "" -#~ "An implementation of the abstract base" -#~ " class `flwr.server.ClientManager`. If no " -#~ "implementation is provided, then " -#~ "`start_server` will use " -#~ "`flwr.server.client_manager.SimpleClientManager`." +#~ msgid ":code:`flower/baselines/flwr_baselines/publications/fedavg_mnist/`" #~ msgstr "" -#~ msgid "" -#~ "The maximum length of gRPC messages " -#~ "that can be exchanged with the " -#~ "Flower clients. The default should be" -#~ " sufficient for most models. Users " -#~ "who train very large models might " -#~ "need to increase this value. Note " -#~ "that the Flower clients need to be" -#~ " started with the same value (see " -#~ "`flwr.client.start_client`), otherwise clients will" -#~ " not know about the increased limit" -#~ " and block larger messages." +#~ msgid "`FedProx `_" #~ msgstr "" -#~ msgid "" -#~ "Tuple containing root certificate, server " -#~ "certificate, and private key to start" -#~ " a secure SSL-enabled server. The " -#~ "tuple is expected to have three " -#~ "bytes elements in the following order:" -#~ " * CA certificate. * server " -#~ "certificate. * server private key." +#~ msgid ":code:`flower/baselines/flwr_baselines/publications/fedprox_mnist/`" #~ msgstr "" -#~ msgid "" -#~ "Tuple containing root certificate, server " -#~ "certificate, and private key to start" -#~ " a secure SSL-enabled server. The " -#~ "tuple is expected to have three " -#~ "bytes elements in the following order:" +#~ msgid "`FedOpt `_" #~ msgstr "" -#~ msgid "CA certificate." -#~ msgstr "Certificats" - -#~ msgid "server certificate." -#~ msgstr "Certificats" - -#~ msgid "server private key." -#~ msgstr "stratégie.du.serveur" - -#~ msgid "**hist** -- Object containing training and evaluation metrics." +#~ msgid "sparse gradient task" #~ msgstr "" -#~ msgid "Starting an insecure server:" -#~ msgstr "Démarrer le serveur" - -#~ msgid "Starting an SSL-enabled server:" -#~ msgstr "Démarrer le serveur" - -#~ msgid "Contains the strategy abstraction and different implementations." +#~ msgid ":code:`flower/baselines/flwr_baselines/publications/adaptive_federated_optimization`" #~ msgstr "" -#~ msgid "Abstract base class for server strategy implementations." +#~ msgid "`FedBN `_" #~ msgstr "" - -#~ msgid "The current round of federated learning." -#~ msgstr "Qu'est-ce que l'apprentissage fédéré ?" - -#~ msgid "" -#~ "Successful updates from the previously " -#~ "selected and configured clients. Each " -#~ "pair of `(ClientProxy, FitRes` constitutes " -#~ "a successful update from one of " -#~ "the previously selected clients. Not " -#~ "that not all previously selected clients" -#~ " are necessarily included in this " -#~ "list: a client might drop out and" -#~ " not submit a result. For each " -#~ "client that did not submit an " -#~ "update, there should be an `Exception`" -#~ " in `failures`." + +#~ msgid "convergence rate" #~ msgstr "" -#~ msgid "" -#~ "Exceptions that occurred while the " -#~ "server was waiting for client updates." +#~ msgid ":code:`flower/baselines/flwr_baselines/publications/fedbn/convergence_rate`" #~ msgstr "" #~ msgid "" -#~ "**aggregation_result** -- The aggregated " -#~ "evaluation result. Aggregation typically uses" -#~ " some variant of a weighted average." +#~ "Flower requires `Python 3.7 " +#~ "`_ or above, we " +#~ "recommend `Python 3.8 " +#~ "`_." #~ msgstr "" +#~ "Flower nécessite `Python 3.7 " +#~ "`_ ou plus, nous " +#~ "recommandons `Python 3.8 " +#~ "`_." -#~ msgid "Aggregate training results." -#~ msgstr "Résultats globaux de l'évaluation." - -#~ msgid "" -#~ "Successful updates from the previously " -#~ "selected and configured clients. Each " -#~ "pair of `(ClientProxy, FitRes)` constitutes" -#~ " a successful update from one of " -#~ "the previously selected clients. Not " -#~ "that not all previously selected clients" -#~ " are necessarily included in this " -#~ "list: a client might drop out and" -#~ " not submit a result. For each " -#~ "client that did not submit an " -#~ "update, there should be an `Exception`" -#~ " in `failures`." +#~ msgid "|6baade94cd14454e82ead34fcc29a182|" #~ msgstr "" -#~ msgid "" -#~ "**parameters** -- If parameters are " -#~ "returned, then the server will treat " -#~ "these as the new global model " -#~ "parameters (i.e., it will replace the" -#~ " previous parameters with the ones " -#~ "returned from this method). If `None`" -#~ " is returned (e.g., because there " -#~ "were only failures and no viable " -#~ "results) then the server will no " -#~ "update the previous model parameters, " -#~ "the updates received in this round " -#~ "are discarded, and the global model " -#~ "parameters remain the same." +#~ msgid "|1209ecd819104c458d396cf665c7ed4f|" #~ msgstr "" -#~ msgid "Configure the next round of evaluation." -#~ msgstr "Configuration de l'évaluation côté serveur" +#~ msgid "|c088b02349304344a53f3ce1464225fb|" +#~ msgstr "" -#~ msgid "The client manager which holds all currently connected clients." +#~ msgid "|b54d50afc82a4a57a55997a9eaeb735b|" #~ msgstr "" -#~ msgid "" -#~ "**evaluate_configuration** -- A list of " -#~ "tuples. Each tuple in the list " -#~ "identifies a `ClientProxy` and the " -#~ "`EvaluateIns` for this particular " -#~ "`ClientProxy`. If a particular `ClientProxy`" -#~ " is not included in this list, " -#~ "it means that this `ClientProxy` will" -#~ " not participate in the next round" -#~ " of federated evaluation." +#~ msgid "|d17b57e97b714a25b43790d4b832fd87|" #~ msgstr "" -#~ msgid "Configure the next round of training." +#~ msgid "|38966d05301a4854aa73c8c5033bfaab|" #~ msgstr "" -#~ msgid "" -#~ "**fit_configuration** -- A list of " -#~ "tuples. Each tuple in the list " -#~ "identifies a `ClientProxy` and the " -#~ "`FitIns` for this particular `ClientProxy`." -#~ " If a particular `ClientProxy` is not" -#~ " included in this list, it means " -#~ "that this `ClientProxy` will not " -#~ "participate in the next round of " -#~ "federated learning." +#~ msgid "|231d55f7926d4a5db02dcd724ec62529|" #~ msgstr "" -#~ msgid "Evaluate the current model parameters." -#~ msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" +#~ msgid "|fb44f2e13a1b4b69b7a72234eedd13f4|" +#~ msgstr "" -#~ msgid "" -#~ "This function can be used to " -#~ "perform centralized (i.e., server-side) " -#~ "evaluation of model parameters." +#~ msgid "|1cfc77af5d164030942e84d14268c256|" #~ msgstr "" -#~ msgid "" -#~ "**evaluation_result** -- The evaluation " -#~ "result, usually a Tuple containing loss" -#~ " and a dictionary containing task-" -#~ "specific metrics (e.g., accuracy)." +#~ msgid "|0d50828231a64bc08223544a2d2fa216|" #~ msgstr "" -#~ msgid "Initialize the (global) model parameters." -#~ msgstr "Initialise le modèle global" +#~ msgid "|904387757ceb42fbaa1875f3e8061113|" +#~ msgstr "" -#~ msgid "" -#~ "**parameters** -- If parameters are " -#~ "returned, then the server will treat " -#~ "these as the initial global model " -#~ "parameters." +#~ msgid "|68608e1b7c4842458c528b431c715f5a|" #~ msgstr "" -#~ msgid "Configurable FedAvg strategy implementation." -#~ msgstr "Configuration de l'évaluation fédérée" +#~ msgid "|2adb106bda97480bb4b33eac472e321e|" +#~ msgstr "" -#~ msgid "Implementation based on https://arxiv.org/abs/1602.05629" +#~ msgid "|025f0a6f7a6145cba4bf8fa0e2495851|" #~ msgstr "" +#~ msgid "Before the release" +#~ msgstr "Avant la sortie" + #~ msgid "" -#~ "Fraction of clients used during " -#~ "training. In case `min_fit_clients` is " -#~ "larger than `fraction_fit * " -#~ "available_clients`, `min_fit_clients` will still " -#~ "be sampled. Defaults to 1.0." +#~ "Update the changelog (``changelog.md``) with" +#~ " all relevant changes that happened " +#~ "after the last release. If the " +#~ "last release was tagged ``v1.2.0``, you" +#~ " can use the following URL to " +#~ "see all commits that got merged " +#~ "into ``main`` since then:" #~ msgstr "" +#~ "Mettez à jour le journal des " +#~ "modifications (``changelog.md``) avec tous les" +#~ " changements pertinents qui se sont " +#~ "produits après la dernière version. Si" +#~ " la dernière version a été étiquetée" +#~ " ``v1.2.0``, vous pouvez utiliser l'URL " +#~ "suivante pour voir tous les commits " +#~ "qui ont été fusionnés dans ``main`` " +#~ "depuis lors :" #~ msgid "" -#~ "Fraction of clients used during " -#~ "validation. In case `min_evaluate_clients` is" -#~ " larger than `fraction_evaluate * " -#~ "available_clients`, `min_evaluate_clients` will " -#~ "still be sampled. Defaults to 1.0." +#~ "`GitHub: Compare v1.2.0...main " +#~ "`_" #~ msgstr "" +#~ "`GitHub : Compare v1.2.0...main " +#~ "`_" -#~ msgid "Minimum number of clients used during training. Defaults to 2." +#~ msgid "" +#~ "Thank the authors who contributed since" +#~ " the last release. This command helps" +#~ " extract them: ``git log --format='%aN' " +#~ "v1.1.0..HEAD | sort -u``. The command" +#~ " has the same order as ``git " +#~ "shortlog``." #~ msgstr "" +#~ "Remerciez les auteurs qui ont contribué" +#~ " depuis la dernière version. Cette " +#~ "commande permet de les extraire : " +#~ "``git log --format='%aN' v1.1.0..HEAD | " +#~ "sort -u``. La commande a le même" +#~ " ordre que ``git shortlog``." -#~ msgid "Minimum number of clients used during validation. Defaults to 2." +#~ msgid "" +#~ "Update the ``changelog.md`` section header " +#~ "``Unreleased`` to contain the version " +#~ "number and date for the release " +#~ "you are building. Create a pull " +#~ "request with the change." #~ msgstr "" +#~ "Mettez à jour l'en-tête de section" +#~ " ``changelog.md`` ``Unreleased`` pour qu'il " +#~ "contienne le numéro de version et " +#~ "la date de la version que vous " +#~ "construisez. Créez une demande de " +#~ "traction avec le changement." -#~ msgid "Minimum number of total clients in the system. Defaults to 2." +#~ msgid "" +#~ "Tag the release commit with the " +#~ "version number as soon as the PR" +#~ " is merged: ``git tag v0.12.3``, then" +#~ " ``git push --tags``" #~ msgstr "" +#~ "Marquez le commit de la version " +#~ "avec le numéro de version dès que" +#~ " le PR est fusionné : ``git tag" +#~ " v0.12.3``, puis ``git push --tags``" -#~ msgid "Optional function used for validation. Defaults to None." +#~ msgid "" +#~ "Build the release with ``./dev/build.sh``, " +#~ "then publish it with ``./dev/publish.sh``" #~ msgstr "" +#~ "Construisez la version avec " +#~ "``./dev/build.sh``, puis publiez-la avec " +#~ "``./dev/publish.sh``" -#~ msgid "Function used to configure training. Defaults to None." +#~ msgid "" +#~ "Create an entry in GitHub releases " +#~ "with the release notes for the " +#~ "previously tagged commit and attach the" +#~ " build artifacts (:code:`.whl` and " +#~ ":code:`.tar.gz`)." #~ msgstr "" +#~ "Crée une entrée dans GitHub releases " +#~ "avec les notes de version pour le" +#~ " commit précédemment étiqueté et attache" +#~ " les artefacts de construction " +#~ "(:code:`.whl` et :code:`.tar.gz`)." -#~ msgid "Function used to configure validation. Defaults to None." +#~ msgid "" +#~ "Second, create a virtual environment " +#~ "(and activate it). If you chose to" +#~ " use :code:`pyenv` (with the :code" +#~ ":`pyenv-virtualenv` plugin) and already " +#~ "have it installed , you can use" +#~ " the following convenience script (by " +#~ "default it will use :code:`Python " +#~ "3.8.17`, but you can change it by" +#~ " providing a specific :code:``)::" #~ msgstr "" +#~ "Deuxièmement, créer un environnement virtuel" +#~ " (et l'activer). Si vous choisissez " +#~ "d'utiliser :code:`pyenv` (avec le plugin " +#~ ":code:`pyenv-virtualenv`) et que vous " +#~ "l'avez déjà installé, vous pouvez " +#~ "utiliser le script suivant (par défaut" +#~ " il utilisera :code:`Python 3.8.17`, mais" +#~ " vous pouvez le changer en " +#~ "fournissant une :code:`` spécifique)::" -#~ msgid "Whether or not accept rounds containing failures. Defaults to True." -#~ msgstr "" +#~ msgid "server.strategy.FedAvg" +#~ msgstr "serveur.stratégie.FedAvg" -#~ msgid "Initial global model parameters." -#~ msgstr "Initialise le modèle global" +#~ msgid "server.strategy.FedAvgM" +#~ msgstr "stratégie.serveur.FedAvgM" -#~ msgid "Metrics aggregation function, optional." -#~ msgstr "" +#~ msgid "server.strategy.QFedAvg" +#~ msgstr "server.strategy.QFedAvg" -#~ msgid "Aggregate evaluation losses using weighted average." -#~ msgstr "Résultats globaux de l'évaluation." +#~ msgid "server.strategy.FedOpt" +#~ msgstr "serveur.stratégie.FedOpt" -#~ msgid "Aggregate fit results using weighted average." -#~ msgstr "" +#~ msgid "server.strategy.FedProx" +#~ msgstr "serveur.stratégie.FedProx" -#~ msgid "Evaluate model parameters using an evaluation function." -#~ msgstr "" +#~ msgid "server.strategy.FedAdagrad" +#~ msgstr "serveur.stratégie.FedAdagrad" -#~ msgid "Initialize global model parameters." -#~ msgstr "Initialise le modèle global" +#~ msgid "server.strategy.FedAdam" +#~ msgstr "serveur.stratégie.FedAdam" -#~ msgid "Use a fraction of available clients for evaluation." +#~ msgid "server.strategy.FedYogi" +#~ msgstr "serveur.stratégie.FedYogi" + +#~ msgid "" +#~ "`achiverram28`, `Adam Narozniak`, `Anass " +#~ "Anhari`, `Charles Beauville`, `Dana-Farber`," +#~ " `Daniel J. Beutel`, `Daniel Nata " +#~ "Nugraha`, `Edoardo Gabrielli`, `eunchung`, " +#~ "`Gustavo Bertoli`, `Heng Pan`, `Javier`, " +#~ "`Mahdi`, `Ruth Galindo`, `Steven Hé " +#~ "(Sīchàng)`, `Taner Topal`" #~ msgstr "" -#~ msgid "Return the sample size and the required number of available clients." +#~ msgid "" +#~ "Let's now load the CIFAR-10 training " +#~ "and test set, partition them into " +#~ "ten smaller datasets (each split into" +#~ " training and validation set), and " +#~ "wrap the resulting partitions by " +#~ "creating a PyTorch ``DataLoader`` for " +#~ "each of them:" #~ msgstr "" +#~ "Chargeons maintenant l'ensemble de formation" +#~ " et de test CIFAR-10, partitionnons-" +#~ "les en dix ensembles de données " +#~ "plus petits (chacun divisé en ensemble" +#~ " de formation et de validation), et" +#~ " enveloppons les partitions résultantes en" +#~ " créant un PyTorch ``DataLoader`` pour " +#~ "chacun d'entre eux :" -#~ msgid "Configurable FedAvg with Momentum strategy implementation." +#~ msgid "" +#~ "Let's build a horizontal federated " +#~ "learning system using XGBoost and " +#~ "Flower!" #~ msgstr "" +#~ "Construisons un système d'apprentissage fédéré" +#~ " horizontal en utilisant XGBoost et " +#~ "Flower !" -#~ msgid "Federated Averaging with Momentum strategy." -#~ msgstr "Stratégie de moyenne fédérée." +#~ msgid "" +#~ "Please refer to the `full code " +#~ "example `_ to learn " +#~ "more." +#~ msgstr "" +#~ "Réfère-toi à l'exemple de code " +#~ "complet `_ pour en " +#~ "savoir plus." -#~ msgid "Implementation based on https://arxiv.org/pdf/1909.06335.pdf" +#~ msgid "|3ff4c820a01d4a5abb022617de537c54|" #~ msgstr "" -#~ msgid "Fraction of clients used during training. Defaults to 0.1." +#~ msgid "|7f1889391ad448e2a65920165f0d798c|" #~ msgstr "" -#~ msgid "Fraction of clients used during validation. Defaults to 0.1." +#~ msgid "|a171dc4a0d044e70b5d585cc10ace0e0|" #~ msgstr "" -#~ msgid "" -#~ "Server-side learning rate used in " -#~ "server-side optimization. Defaults to 1.0." +#~ msgid "|fe518aa0d86341f7b2fc87bd6e3bbf0c|" #~ msgstr "" -#~ msgid "Server-side momentum factor used for FedAvgM. Defaults to 0.0." +#~ msgid "|6abfdf0dade44469ae9f08c8dc7d148c|" #~ msgstr "" -#~ msgid "Configurable QFedAvg strategy implementation." +#~ msgid "|b4f147db24bb4da9a786e1d6676a1c2d|" #~ msgstr "" -#~ msgid "Configurable fault-tolerant FedAvg strategy implementation." +#~ msgid "|5c62032f589a457bb37b5fee5b2adbde|" #~ msgstr "" -#~ msgid "Configurable FedAdagrad strategy implementation." +#~ msgid "|f154df1846dd44f79a94f1dc3ae8b088|" #~ msgstr "" -#~ msgid "Federated Optim strategy interface." +#~ msgid "|9d20be8160f7451fb0f33b194506503f|" #~ msgstr "" -#~ msgid "Implementation based on https://arxiv.org/abs/2003.00295v5" +#~ msgid "|3d949f76988443c59990d2e64f05c386|" #~ msgstr "" -#~ "FedYogi - Stratégie d'apprentissage fédéré " -#~ "utilisant Yogi côté serveur. Mise en " -#~ "oeuvre basée sur https://arxiv.org/abs/2003.00295" -#~ msgid "Fraction of clients used during training. Defaults to 1.0." +#~ msgid "|526c6d9140f6404f8a226d9056327b3b|" #~ msgstr "" -#~ msgid "Fraction of clients used during validation. Defaults to 1.0." +#~ msgid "|a5f6af14cd7c4550929b17f83b4f63c7|" #~ msgstr "" -#~ msgid "Server-side learning rate. Defaults to 1e-1." +#~ msgid "|bcd571c4f4ee4803a54f71b5c20448cb|" #~ msgstr "" -#~ msgid "Client-side learning rate. Defaults to 1e-1." +#~ msgid "|c76452ae1ed84965be7ef23c72b95845|" #~ msgstr "" -#~ msgid "Momentum parameter. Defaults to 0.0." +#~ msgid "" +#~ "Please follow the first section on " +#~ "`Run Flower using Docker " +#~ "`_ which covers this" +#~ " step in more detail." #~ msgstr "" -#~ msgid "Second moment parameter. Defaults to 0.0." +#~ msgid "" +#~ "Since `Flower 1.5 `_ we have " +#~ "introduced translations to our doc " +#~ "pages, but, as you might have " +#~ "noticed, the translations are often " +#~ "imperfect. If you speak languages other" +#~ " than English, you might be able " +#~ "to help us in our effort to " +#~ "make Federated Learning accessible to as" +#~ " many people as possible by " +#~ "contributing to those translations! This " +#~ "might also be a great opportunity " +#~ "for those wanting to become open " +#~ "source contributors with little prerequistes." #~ msgstr "" -#~ msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-9." +#~ msgid "" +#~ "You input your translation in the " +#~ "textbox at the top and then, once" +#~ " you are happy with it, you " +#~ "either press ``Save and continue`` (to" +#~ " save the translation and go to " +#~ "the next untranslated string), ``Save " +#~ "and stay`` (to save the translation " +#~ "and stay on the same page), " +#~ "``Suggest`` (to add your translation to" +#~ " suggestions for other users to " +#~ "view), or ``Skip`` (to go to the" +#~ " next untranslated string without saving" +#~ " anything)." #~ msgstr "" -#~ msgid "Configurable FedProx strategy implementation." +#~ msgid "" +#~ "If the section is completely empty " +#~ "(without any token) or non-existant, " +#~ "the changelog will just contain the " +#~ "title of the PR for the changelog" +#~ " entry, without any description." #~ msgstr "" -#~ msgid "Federated Optimization strategy." -#~ msgstr "Stratégie de moyenne fédérée." +#~ msgid "Example: Walk-Through PyTorch & MNIST" +#~ msgstr "Exemple : PyTorch et MNIST" -#~ msgid "Implementation based on https://arxiv.org/abs/1812.06127" +#~ msgid "" +#~ "In this tutorial we will learn, " +#~ "how to train a Convolutional Neural " +#~ "Network on MNIST using Flower and " +#~ "PyTorch." #~ msgstr "" +#~ "Dans ce tutoriel, nous allons apprendre," +#~ " comment former un réseau neuronal " +#~ "convolutif sur MNIST en utilisant Flower" +#~ " et PyTorch." #~ msgid "" -#~ "The strategy in itself will not be" -#~ " different than FedAvg, the client " -#~ "needs to be adjusted. A proximal " -#~ "term needs to be added to the " -#~ "loss function during the training:" +#~ "Since we want to use PyTorch to" +#~ " solve a computer vision task, let's" +#~ " go ahead an install PyTorch and " +#~ "the **torchvision** library:" #~ msgstr "" +#~ "Puisque nous voulons utiliser PyTorch " +#~ "pour résoudre une tâche de vision " +#~ "par ordinateur, installons PyTorch et la" +#~ " bibliothèque **torchvision** :" -#~ msgid "" -#~ "\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" -#~ "\n" -#~ msgstr "\\\\frac{\\Nmu}{2} || w - w^t ||^2" +#~ msgid "Ready... Set... Train!" +#~ msgstr "Prêts... prêts... entraînez-vous !" #~ msgid "" -#~ "Where $w^t$ are the global parameters" -#~ " and $w$ are the local weights " -#~ "the function will be optimized with." +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. Our training " +#~ "procedure and network architecture are " +#~ "based on PyTorch's `Basic MNIST Example" +#~ " `_. " +#~ "This will allow you see how easy" +#~ " it is to wrap your code with" +#~ " Flower and begin training in a " +#~ "federated way. We provide you with " +#~ "two helper scripts, namely *run-" +#~ "server.sh*, and *run-clients.sh*. Don't " +#~ "be afraid to look inside, they are" +#~ " simple enough =)." #~ msgstr "" +#~ "Maintenant que nous avons installé " +#~ "toutes nos dépendances, lançons un " +#~ "simple entraînement distribué avec deux " +#~ "clients et un serveur. Notre procédure" +#~ " d'entraînement et l'architecture de notre" +#~ " réseau sont basées sur l'exemple " +#~ "MNIST de base de PyTorch " +#~ "`_. Cela" +#~ " te permettra de voir à quel " +#~ "point il est facile d'envelopper ton " +#~ "code avec Flower et de commencer " +#~ "l'entraînement de manière fédérée. Nous " +#~ "te fournissons deux scripts d'aide, à" +#~ " savoir *run-server.sh*, et *run-" +#~ "clients.sh*. N'aie pas peur de regarder" +#~ " à l'intérieur, ils sont assez " +#~ "simples =)." -#~ msgid "In PyTorch, for example, the loss would go from:" -#~ msgstr "" +#~ msgid "" +#~ "Go ahead and launch on a terminal" +#~ " the *run-server.sh* script first as" +#~ " follows:" +#~ msgstr "Lance sur un terminal le script *run-server.sh* d'abord comme suit :" -#~ msgid "To:" -#~ msgstr "" +#~ msgid "Now that the server is up and running, go ahead and launch the clients." +#~ msgstr "Maintenant que le serveur est opérationnel, vas-y et lance les clients." #~ msgid "" -#~ "With `global_params` being a copy of " -#~ "the parameters before the training takes" -#~ " place." +#~ "Et voilà! You should be seeing the" +#~ " training procedure and, after a few" +#~ " iterations, the test accuracy for " +#~ "each client." #~ msgstr "" +#~ "Et voilà ! Tu devrais voir la " +#~ "procédure d'entraînement et, après quelques" +#~ " itérations, la précision du test " +#~ "pour chaque client." -#~ msgid "" -#~ "The weight of the proximal term " -#~ "used in the optimization. 0.0 makes " -#~ "this strategy equivalent to FedAvg, and" -#~ " the higher the coefficient, the more" -#~ " regularization will be used (that " -#~ "is, the client parameters will need " -#~ "to be closer to the server " -#~ "parameters during training)." -#~ msgstr "" +#~ msgid "Now, let's see what is really happening inside." +#~ msgstr "Maintenant, voyons ce qui se passe réellement à l'intérieur." -#~ msgid "Sends the proximal factor mu to the clients" +#~ msgid "" +#~ "Inside the server helper script *run-" +#~ "server.sh* you will find the following" +#~ " code that basically runs the " +#~ ":code:`server.py`" #~ msgstr "" +#~ "Dans le script d'aide au serveur " +#~ "*run-server.sh*, tu trouveras le code " +#~ "suivant qui exécute le fichier " +#~ ":code:`server.py`" -#~ msgid "FedAdagrad strategy - Adaptive Federated Optimization using Adagrad." +#~ msgid "" +#~ "We can go a bit deeper and " +#~ "see that :code:`server.py` simply launches " +#~ "a server that will coordinate three " +#~ "rounds of training. Flower Servers are" +#~ " very customizable, but for simple " +#~ "workloads, we can start a server " +#~ "using the :ref:`start_server ` function and leave " +#~ "all the configuration possibilities at " +#~ "their default values, as seen below." #~ msgstr "" -#~ "Stratégie FedAdagrad - Optimisation fédérée" -#~ " adaptative à l'aide d'Adagrad." +#~ "Nous pouvons aller un peu plus " +#~ "loin et voir que :code:`server.py` lance" +#~ " simplement un serveur qui coordonnera " +#~ "trois tours de formation. Flower Les " +#~ "serveurs sont très personnalisables, mais " +#~ "pour les charges de travail simples, " +#~ "nous pouvons démarrer un serveur à " +#~ "l'aide de la fonction :ref:`start_server " +#~ "` et " +#~ "laisser toutes les possibilités de " +#~ "configuration à leurs valeurs par " +#~ "défaut, comme on peut le voir " +#~ "ci-dessous." -#~ msgid "Paper: https://arxiv.org/abs/2003.00295" +#~ msgid "" +#~ "Next, let's take a look at the " +#~ "*run-clients.sh* file. You will see " +#~ "that it contains the main loop " +#~ "that starts a set of *clients*." #~ msgstr "" +#~ "Ensuite, jetons un coup d'œil au " +#~ "fichier *run-clients.sh*. Tu verras " +#~ "qu'il contient la boucle principale qui" +#~ " démarre un ensemble de *clients*." -#~ msgid "Federated learning strategy using Adagrad on server-side." +#~ msgid "" +#~ "**cid**: is the client ID. It is" +#~ " an integer that uniquely identifies " +#~ "client identifier." #~ msgstr "" -#~ "Construisons un système d'apprentissage fédéré" -#~ " en utilisant fastai et Flower !" +#~ "**cid** : c'est l'identifiant du client." +#~ " C'est un nombre entier qui identifie" +#~ " de façon unique l'identifiant du " +#~ "client." -#~ msgid "FedAdam - Adaptive Federated Optimization using Adam." -#~ msgstr "FedAdam - Optimisation fédérée adaptative utilisant Adam." +#~ msgid "**sever_address**: String that identifies IP and port of the server." +#~ msgstr "**sever_address** : Chaîne qui identifie l'IP et le port du serveur." -#~ msgid "Momentum parameter. Defaults to 0.9." +#~ msgid "" +#~ "**nb_clients**: This defines the number " +#~ "of clients being created. This piece " +#~ "of information is not required by " +#~ "the client, but it helps us " +#~ "partition the original MNIST dataset to" +#~ " make sure that every client is " +#~ "working on unique subsets of both " +#~ "*training* and *test* sets." #~ msgstr "" +#~ "**Cette information n'est pas requise " +#~ "par le client, mais elle nous aide" +#~ " à partitionner l'ensemble de données " +#~ "MNIST original pour nous assurer que " +#~ "chaque client travaille sur des sous-" +#~ "ensembles uniques des ensembles *formation*" +#~ " et *test*." -#~ msgid "Second moment parameter. Defaults to 0.99." +#~ msgid "" +#~ "Again, we can go deeper and look" +#~ " inside :code:`flwr_example/quickstart-" +#~ "pytorch/client.py`. After going through the" +#~ " argument parsing code at the " +#~ "beginning of our :code:`main` function, " +#~ "you will find a call to " +#~ ":code:`mnist.load_data`. This function is " +#~ "responsible for partitioning the original " +#~ "MNIST datasets (*training* and *test*) " +#~ "and returning a :code:`torch.utils.data.DataLoader`" +#~ " s for each of them. We then" +#~ " instantiate a :code:`PytorchMNISTClient` object" +#~ " with our client ID, our DataLoaders," +#~ " the number of epochs in each " +#~ "round, and which device we want to" +#~ " use for training (CPU or GPU)." #~ msgstr "" - -#~ msgid "FedYogi [Reddi et al., 2020] strategy." -#~ msgstr "Stratégie FedYogi [Reddi et al., 2020]." - -#~ msgid "Adaptive Federated Optimization using Yogi." -#~ msgstr "Optimisation fédérée adaptative à l'aide de Yogi." - -#~ msgid "Federated learning strategy using Yogi on server-side." -#~ msgstr "L'apprentissage fédéré en cinq étapes" - -#~ msgid "Differential Privacy Wrappers in Flower" -#~ msgstr "Les enveloppes différentielles de confidentialité dans les fleurs" - -#~ msgid "Evaluation" -#~ msgstr "Solution" - -#~ msgid "Code examples" -#~ msgstr "Exemple de code complet" +#~ "Encore une fois, nous pouvons aller " +#~ "plus loin et regarder dans " +#~ ":code:`flwr_example/quickstart-pytorch/client.py`. Après" +#~ " avoir parcouru le code d'analyse des" +#~ " arguments au début de notre fonction" +#~ " :code:`main`, tu trouveras un appel " +#~ "à :code:`mnist.load_data`. Cette fonction est" +#~ " responsable du partitionnement des " +#~ "ensembles de données MNIST originaux " +#~ "(*training* et *test*) et renvoie un " +#~ ":code:`torch.utils.data.DataLoader` s pour chacun" +#~ " d'entre eux. Nous instancions ensuite " +#~ "un objet :code:`PytorchMNISTClient` avec notre" +#~ " ID client, nos DataLoaders, le " +#~ "nombre d'époques dans chaque tour et " +#~ "le périphérique que nous voulons " +#~ "utiliser pour l'entraînement (CPU ou " +#~ "GPU)." #~ msgid "" -#~ "Flower Quickstart (PyTorch): coming soon " -#~ "(the TensorFlow/Keras example can easily " -#~ "be changed to make it work with" -#~ " PyTorch)" +#~ "The :code:`PytorchMNISTClient` object when " +#~ "finally passed to :code:`fl.client.start_client` " +#~ "along with the server's address as " +#~ "the training process begins." #~ msgstr "" +#~ "L'objet :code:`PytorchMNISTClient` est finalement" +#~ " transmis à :code:`fl.client.start_client` avec" +#~ " l'adresse du serveur lorsque le " +#~ "processus de formation commence." -#~ msgid "First time contributors" -#~ msgstr "Bonnes premières contributions" +#~ msgid "A Closer Look" +#~ msgstr "Regarder de plus près" -#~ msgid "First MXNet 1.6 example (MNIST)" +#~ msgid "" +#~ "Now, let's look closely into the " +#~ ":code:`PytorchMNISTClient` inside :code:`flwr_example" +#~ ".quickstart-pytorch.mnist` and see what it" +#~ " is doing:" #~ msgstr "" +#~ "Maintenant, examinons de près le " +#~ ":code:`PytorchMNISTClient` à l'intérieur du " +#~ ":code:`flwr_example.quickstart-pytorch.mnist` et " +#~ "voyons ce qu'il fait :" -#~ msgid "ImageNet (PyTorch/TensorFlow)" +#~ msgid "" +#~ "The first thing to notice is that" +#~ " :code:`PytorchMNISTClient` instantiates a CNN" +#~ " model inside its constructor" #~ msgstr "" +#~ "La première chose à remarquer est " +#~ "que :code:`PytorchMNISTClient` instancie un " +#~ "modèle CNN dans son constructeur" -#~ msgid "LSTM (PyTorch/TensorFlow)" +#~ msgid "" +#~ "The code for the CNN is available" +#~ " under :code:`quickstart-pytorch.mnist` and " +#~ "it is reproduced below. It is the" +#~ " same network found in `Basic MNIST" +#~ " Example " +#~ "`_." #~ msgstr "" +#~ "Le code du CNN est disponible sous" +#~ " :code:`quickstart-pytorch.mnist` et il est" +#~ " reproduit ci-dessous. Il s'agit du" +#~ " même réseau que celui que l'on " +#~ "trouve dans `Exemple basique de MNIST" +#~ " `_." -#~ msgid "Transformer (PyTorch/TensorFlow)" +#~ msgid "" +#~ "The second thing to notice is that" +#~ " :code:`PytorchMNISTClient` class inherits from" +#~ " the :code:`fl.client.Client`, and hence it" +#~ " must implement the following methods:" #~ msgstr "" +#~ "La deuxième chose à noter est que" +#~ " la classe :code:`PytorchMNISTClient` hérite " +#~ "de :code:`fl.client.Client`, et qu'elle doit" +#~ " donc implémenter les méthodes suivantes" +#~ " :" -#~ msgid "BERT (PyTorch/TensorFlow)" +#~ msgid "" +#~ "When comparing the abstract class to " +#~ "its derived class :code:`PytorchMNISTClient` " +#~ "you will notice that :code:`fit` calls" +#~ " a :code:`train` function and that " +#~ ":code:`evaluate` calls a :code:`test`: " +#~ "function." #~ msgstr "" +#~ "En comparant la classe abstraite à " +#~ "sa classe dérivée :code:`PytorchMNISTClient`, " +#~ "tu remarqueras que :code:`fit` appelle " +#~ "une fonction :code:`train` et que " +#~ ":code:`evaluate` appelle une fonction " +#~ ":code:`test` :." -#~ msgid "Logging" -#~ msgstr "Enregistrement" - -#~ msgid "|cce04c6f539b421a91f5dba40287193f|" -#~ msgstr "|cce04c6f539b421a91f5dba40287193f|" - -#~ msgid "|e392aef42ba248e19e35446f95a6d1ca|" -#~ msgstr "|e392aef42ba248e19e35446f95a6d1ca|" - -#~ msgid "|7e028f44defe4f31a02debc729f2010d|" -#~ msgstr "|7e028f44defe4f31a02debc729f2010d|" - -#~ msgid "|b89f7b7ae05e4ecd92baa69b7a9fe1be|" -#~ msgstr "|b89f7b7ae05e4ecd92baa69b7a9fe1be|" - -#~ msgid "|9c0445ce962744e1a1c0a4abc697a334|" -#~ msgstr "|9c0445ce962744e1a1c0a4abc697a334|" - -#~ msgid "|a3246766a6db412888131b3bcdad0971|" -#~ msgstr "|a3246766a6db412888131b3bcdad0971|" - -#~ msgid "|db6f2bee32f143b8a5085b6a8ce1acd1|" -#~ msgstr "|db6f2bee32f143b8a5085b6a8ce1acd1|" - -#~ msgid "|405653bc8f874e9595fd59cc82b3d48c|" -#~ msgstr "|405653bc8f874e9595fd59cc82b3d48c|" - -#~ msgid "|073a728154ed406e8fe54e1d9f18dcb9|" -#~ msgstr "|073a728154ed406e8fe54e1d9f18dcb9|" - -#~ msgid "|50e80ea4f22945848b65ed7eed35e0e1|" -#~ msgstr "|50e80ea4f22945848b65ed7eed35e0e1|" - -#~ msgid "|f3cf9148d85e4b68b66b6c255b25e327|" -#~ msgstr "|f3cf9148d85e4b68b66b6c255b25e327|" - -#~ msgid "|1fedb4f8714947e1b13f03696180c741|" -#~ msgstr "|1fedb4f8714947e1b13f03696180c741|" - -#~ msgid "|a32d4ad1ccb34461942d75c7b2b51d65|" -#~ msgstr "|a32d4ad1ccb34461942d75c7b2b51d65|" - -#~ msgid "|3531696c52904cd3b9944034ab959d48|" -#~ msgstr "|3531696c52904cd3b9944034ab959d48|" - -#~ msgid "An Introduction to Federated Learning" -#~ msgstr "Mise à l'échelle de l'apprentissage fédéré" - -#~ msgid "Strategies in Federated Learning" -#~ msgstr "Mise à l'échelle de l'apprentissage fédéré" - -#~ msgid "Building a Strategy" -#~ msgstr "Stratégies intégrées" +#~ msgid "" +#~ "These functions can both be found " +#~ "inside the same :code:`quickstart-" +#~ "pytorch.mnist` module:" +#~ msgstr "" +#~ "Ces fonctions se trouvent toutes deux" +#~ " dans le même module :code:`quickstart-" +#~ "pytorch.mnist` :" -#~ msgid "Client and NumPyClient" -#~ msgstr "NumPyClient" +#~ msgid "" +#~ "Observe that these functions encapsulate " +#~ "regular training and test loops and " +#~ "provide :code:`fit` and :code:`evaluate` with" +#~ " final statistics for each round. You" +#~ " could substitute them with your " +#~ "custom train and test loops and " +#~ "change the network architecture, and the" +#~ " entire example would still work " +#~ "flawlessly. As a matter of fact, " +#~ "why not try and modify the code" +#~ " to an example of your liking?" +#~ msgstr "" +#~ "Observe que ces fonctions encapsulent " +#~ "les boucles d'entraînement et de test" +#~ " habituelles et fournissent à :code:`fit`" +#~ " et :code:`evaluate` les statistiques " +#~ "finales pour chaque tour. Tu pourrais" +#~ " les remplacer par tes boucles " +#~ "d'entraînement et de test personnalisées " +#~ "et changer l'architecture du réseau, et" +#~ " l'ensemble de l'exemple fonctionnerait " +#~ "toujours parfaitement. En fait, pourquoi " +#~ "ne pas essayer de modifier le code" +#~ " pour en faire un exemple qui " +#~ "te plairait ?" -#~ msgid "Strategies" -#~ msgstr "Stratégies personnalisées" +#~ msgid "Give It a Try" +#~ msgstr "Fais un essai" -#~ msgid "SSL-enabled Server and Client" +#~ msgid "" +#~ "Looking through the quickstart code " +#~ "description above will have given a " +#~ "good understanding of how *clients* and" +#~ " *servers* work in Flower, how to " +#~ "run a simple experiment, and the " +#~ "internals of a client wrapper. Here " +#~ "are a few things you could try " +#~ "on your own and get more " +#~ "experience with Flower:" #~ msgstr "" +#~ "En parcourant la description du code " +#~ "de démarrage rapide ci-dessus, tu " +#~ "auras acquis une bonne compréhension du" +#~ " fonctionnement des *clients* et des " +#~ "*serveurs* dans Flower, de l'exécution " +#~ "d'une expérience simple et de la " +#~ "structure interne d'un wrapper client. " +#~ "Voici quelques exemples que tu peux " +#~ "essayer par toi-même pour acquérir " +#~ "plus d'expérience avec Flower :" -#~ msgid "About these documents" -#~ msgstr "À propos de ces documents" - -#~ msgid "Index" -#~ msgstr "Index" - -#~ msgid "Search" -#~ msgstr "Recherche" - -#~ msgid "Copyright" -#~ msgstr "Droits d'auteur" - -#~ msgid "Save Progress" +#~ msgid "" +#~ "Try and change :code:`PytorchMNISTClient` so" +#~ " it can accept different architectures." #~ msgstr "" +#~ "Essaie de modifier :code:`PytorchMNISTClient` " +#~ "pour qu'il puisse accepter différentes " +#~ "architectures." #~ msgid "" -#~ "The Flower server does not prescribe " -#~ "a way to persist model updates or" -#~ " evaluation results. Flower does not " -#~ "(yet) automatically save model updates " -#~ "on the server-side. It's on the" -#~ " roadmap to provide a built-in " -#~ "way of doing this." +#~ "Modify the :code:`train` function so " +#~ "that it accepts different optimizers" #~ msgstr "" +#~ "Modifie la fonction :code:`train` pour " +#~ "qu'elle accepte différents optimiseurs" -#~ msgid "Release Process" -#~ msgstr "Publier Flower" - -#~ msgid "Virtual Env Installation" -#~ msgstr "Virtualenv avec Anaconda" - -#~ msgid "Install development versions" -#~ msgstr "Installer les versions de développement de Flower" - -#~ msgid "Set up a virtual env" -#~ msgstr "Mettre en place un environment virtuel" +#~ msgid "" +#~ "Modify the :code:`test` function so that" +#~ " it proves not only the top-1 " +#~ "(regular accuracy) but also the top-5" +#~ " accuracy?" +#~ msgstr "" +#~ "Modifie la fonction :code:`test` pour " +#~ "qu'elle prouve non seulement le top-1" +#~ " (précision normale) mais aussi le " +#~ "top-5 ?" #~ msgid "" -#~ "Note that, in order to build the" -#~ " documentation locally (with ``poetry run" -#~ " make html``, like described below), " -#~ "`Pandoc _` needs " -#~ "to be installed on the system." +#~ "Go larger! Try to adapt the code" +#~ " to larger images and datasets. Why" +#~ " not try training on ImageNet with" +#~ " a ResNet-50?" #~ msgstr "" -#~ "Notez que, pour construire la " -#~ "documentation localement (avec ``poetry run" -#~ " make html``, comme décrit ci-" -#~ "dessous), ``Pandoc _`" -#~ " doit être installé sur le système." +#~ "Essaie d'adapter le code à des " +#~ "images et à des ensembles de " +#~ "données plus grands. Pourquoi ne pas " +#~ "essayer de s'entraîner sur ImageNet avec" +#~ " un ResNet-50 ?" -#~ msgid "Llama 2 fine-tuning, with Hugging Face Transformers and PyTorch" -#~ msgstr "Un fine-tuning de LLaMA 2 avec Hugging Face et PyTorch" +#~ msgid "You are ready now. Enjoy learning in a federated way!" +#~ msgstr "Tu es prêt maintenant. Profite de l'apprentissage de manière fédérée !" -#~ msgid "XGBoost" -#~ msgstr "XGBoost" +#~ msgid "" +#~ "Flower provides differential privacy (DP) " +#~ "wrapper classes for the easy integration" +#~ " of the central DP guarantees " +#~ "provided by DP-FedAvg into training " +#~ "pipelines defined in any of the " +#~ "various ML frameworks that Flower is " +#~ "compatible with." +#~ msgstr "" +#~ "Flower fournit des classes d'enveloppe " +#~ "de confidentialité différentielle (DP) pour" +#~ " l'intégration facile des garanties " +#~ "centrales de DP fournies par DP-" +#~ "FedAvg dans les pipelines de formation" +#~ " définis dans n'importe lequel des " +#~ "divers cadres de ML avec lesquels " +#~ "Flower est compatible." -#~ msgid "Android ONNX on-device training" +#~ msgid "" +#~ "Please note that these components are" +#~ " still experimental; the correct " +#~ "configuration of DP for a specific " +#~ "task is still an unsolved problem." #~ msgstr "" -#~ "Utiliser Android ONNX pour faire du " -#~ "training directement sur le téléphone" +#~ "Note que ces composants sont encore " +#~ "expérimentaux, la configuration correcte du" +#~ " DP pour une tâche spécifique est " +#~ "encore un problème non résolu." -#~ msgid "Contribute on GitHub" -#~ msgstr "Contribuer sur GitHub" +#~ msgid "" +#~ "The name DP-FedAvg is misleading " +#~ "since it can be applied on top " +#~ "of any FL algorithm that conforms " +#~ "to the general structure prescribed by" +#~ " the FedOpt family of algorithms." +#~ msgstr "" +#~ "Le nom DP-FedAvg est trompeur car" +#~ " il peut être appliqué à n'importe" +#~ " quel algorithme FL qui se conforme" +#~ " à la structure générale prescrite " +#~ "par la famille d'algorithmes FedOpt." -#~ msgid "How to write a good PR title" -#~ msgstr "Comment écrire un bon titre de PR" +#~ msgid "DP-FedAvg" +#~ msgstr "DP-FedAvg" #~ msgid "" -#~ "A well-crafted PR title helps team" -#~ " members quickly understand the purpose " -#~ "and scope of the changes being " -#~ "proposed. Here's a guide to help " -#~ "you write a good GitHub PR title:" +#~ "DP-FedAvg, originally proposed by " +#~ "McMahan et al. [mcmahan]_ and extended" +#~ " by Andrew et al. [andrew]_, is " +#~ "essentially FedAvg with the following " +#~ "modifications." #~ msgstr "" -#~ "Un titre de PR bien choisi permet" -#~ " aux autres développeurs de rapidement " -#~ "comprendre l'intérêt et le scope des " -#~ "changements proposés. Voici un guide " -#~ "pour vous aider à écrire des bons" -#~ " titres de PR :" +#~ "DP-FedAvg, proposé à l'origine par " +#~ "McMahan et al. [mcmahan]_ et étendu " +#~ "par Andrew et al. [andrew]_, est " +#~ "essentiellement FedAvg avec les modifications" +#~ " suivantes." #~ msgid "" -#~ "1. Be Clear and Concise: Provide a" -#~ " clear summary of the changes in " -#~ "a concise manner. 1. Use Actionable " -#~ "Verbs: Start with verbs like \"Add,\"" -#~ " \"Update,\" or \"Fix\" to indicate " -#~ "the purpose. 1. Include Relevant " -#~ "Information: Mention the affected feature " -#~ "or module for context. 1. Keep it" -#~ " Short: Avoid lengthy titles for easy" -#~ " readability. 1. Use Proper Capitalization" -#~ " and Punctuation: Follow grammar rules " -#~ "for clarity." +#~ "**Clipping** : The influence of each " +#~ "client's update is bounded by clipping" +#~ " it. This is achieved by enforcing" +#~ " a cap on the L2 norm of " +#~ "the update, scaling it down if " +#~ "needed." #~ msgstr "" -#~ "1. Soyez clair et concis : Donnez" -#~ " un résumé clair des changements de" -#~ " manière concise. 1. Utilisez des " -#~ "verbes actionnables : Commencez par des" -#~ " verbes comme \"Add\", \"Update\", ou " -#~ "\"Fix\" pour indiquer le but. 1. " -#~ "Inclure des renseignements pertinents : " -#~ "Mentionner la caractéristique ou le " -#~ "module concerné pour le contexte. 1. " -#~ "Gardez le court : Évitez les longs" -#~ " titres pour une lisibilité facile. " -#~ "1. Utiliser une bonne capitalisation et" -#~ " une ponctuation : Suivre les règles" -#~ " de grammaire pour la clarté." +#~ "**Clipping** : L'influence de la mise" +#~ " à jour de chaque client est " +#~ "limitée en l'écrêtant. Ceci est réalisé" +#~ " en imposant un plafond à la " +#~ "norme L2 de la mise à jour, " +#~ "en la réduisant si nécessaire." #~ msgid "" -#~ "Let's start with a few examples " -#~ "for titles that should be avoided " -#~ "because they do not provide meaningful" -#~ " information:" +#~ "**Noising** : Gaussian noise, calibrated " +#~ "to the clipping threshold, is added " +#~ "to the average computed at the " +#~ "server." #~ msgstr "" -#~ "Commençons par quelques exemples de " -#~ "titres qui devraient être évités parce" -#~ " qu'ils ne fournissent pas d'information" -#~ " significative :" - -#~ msgid "Implement Algorithm" -#~ msgstr "Implement Algorithm" - -#~ msgid "Database" -#~ msgstr "Base de données" - -#~ msgid "Add my_new_file.py to codebase" -#~ msgstr "Add my_new_file.py to codebase" +#~ "**Bruit** : un bruit gaussien, calibré" +#~ " sur le seuil d'écrêtage, est ajouté" +#~ " à la moyenne calculée au niveau " +#~ "du serveur." -#~ msgid "Improve code in module" -#~ msgstr "Improve code in module" +#~ msgid "" +#~ "The distribution of the update norm " +#~ "has been shown to vary from " +#~ "task-to-task and to evolve as " +#~ "training progresses. This variability is " +#~ "crucial in understanding its impact on" +#~ " differential privacy guarantees, emphasizing " +#~ "the need for an adaptive approach " +#~ "[andrew]_ that continuously adjusts the " +#~ "clipping threshold to track a " +#~ "prespecified quantile of the update norm" +#~ " distribution." +#~ msgstr "" +#~ "Il a été démontré que la " +#~ "distribution de la norme de mise à" +#~ " jour varie d'une tâche à l'autre " +#~ "et évolue au fur et à mesure " +#~ "de la formation. C'est pourquoi nous " +#~ "utilisons une approche adaptative [andrew]_" +#~ " qui ajuste continuellement le seuil " +#~ "d'écrêtage pour suivre un quantile " +#~ "prédéfini de la distribution de la " +#~ "norme de mise à jour." -#~ msgid "Change SomeModule" -#~ msgstr "Change SomeModule" +#~ msgid "Simplifying Assumptions" +#~ msgstr "Simplifier les hypothèses" #~ msgid "" -#~ "Here are a few positive examples " -#~ "which provide helpful information without " -#~ "repeating how they do it, as that" -#~ " is already visible in the \"Files" -#~ " changed\" section of the PR:" +#~ "We make (and attempt to enforce) a" +#~ " number of assumptions that must be" +#~ " satisfied to ensure that the " +#~ "training process actually realizes the " +#~ ":math:`(\\epsilon, \\delta)` guarantees the " +#~ "user has in mind when configuring " +#~ "the setup." #~ msgstr "" -#~ "Voici quelques bons exemples qui " -#~ "fournissent de l'information utile sans " -#~ "répéter comment ils le font, comme " -#~ "cela est déjà visible dans la " -#~ "section \"Files changed\" de la PR " -#~ ":" - -#~ msgid "Update docs banner to mention Flower Summit 2023" -#~ msgstr "Update docs banner to mention Flower Summit 2023" - -#~ msgid "Remove unnecessary XGBoost dependency" -#~ msgstr "Remove unnecessary XGBoost dependency" - -#~ msgid "Remove redundant attributes in strategies subclassing FedAvg" -#~ msgstr "Remove redundant attributes in strategies subclassing FedAvg" +#~ "Nous formulons (et tentons d'appliquer) " +#~ "un certain nombre d'hypothèses qui " +#~ "doivent être satisfaites pour que le " +#~ "processus de formation réalise réellement " +#~ "les garanties :math:`(\\epsilon, \\delta)` que" +#~ " l'utilisateur a à l'esprit lorsqu'il " +#~ "configure l'installation." -#~ msgid "Add CI job to deploy the staging system when the `main` branch changes" +#~ msgid "" +#~ "**Fixed-size subsampling** :Fixed-size " +#~ "subsamples of the clients must be " +#~ "taken at each round, as opposed to" +#~ " variable-sized Poisson subsamples." #~ msgstr "" -#~ "Ajoute une tâche CI pour déployer " -#~ "le système de mise en scène " -#~ "lorsque la branche `main` change" +#~ "**Sous-échantillonnage de taille fixe** " +#~ ":Des sous-échantillons de taille fixe" +#~ " des clients doivent être prélevés à" +#~ " chaque tour, par opposition aux " +#~ "sous-échantillons de Poisson de taille " +#~ "variable." #~ msgid "" -#~ "Add new amazing library which will " -#~ "be used to improve the simulation " -#~ "engine" +#~ "**Unweighted averaging** : The contributions" +#~ " from all the clients must weighted" +#~ " equally in the aggregate to " +#~ "eliminate the requirement for the server" +#~ " to know in advance the sum of" +#~ " the weights of all clients available" +#~ " for selection." #~ msgstr "" -#~ "Add new amazing library which will " -#~ "be used to improve the simulation " -#~ "engine" - -#~ msgid "Differential privacy" -#~ msgstr "Confidentialité différentielle" +#~ "**Moyenne non pondérée** : Les " +#~ "contributions de tous les clients " +#~ "doivent être pondérées de façon égale" +#~ " dans l'ensemble afin que le serveur" +#~ " n'ait pas à connaître à l'avance " +#~ "la somme des poids de tous les " +#~ "clients disponibles pour la sélection." #~ msgid "" -#~ "The Flower server does not prescribe " -#~ "a way to aggregate evaluation results," -#~ " but it enables the user to " -#~ "fully customize result aggregation." +#~ "**No client failures** : The set " +#~ "of available clients must stay constant" +#~ " across all rounds of training. In" +#~ " other words, clients cannot drop out" +#~ " or fail." #~ msgstr "" - -#~ msgid "Configure logging" -#~ msgstr "Configurer les clients" +#~ "**Aucune défaillance de client** : " +#~ "L'ensemble des clients disponibles doit " +#~ "rester constant pendant toutes les " +#~ "séries de formation. En d'autres termes," +#~ " les clients ne peuvent pas " +#~ "abandonner ou échouer." #~ msgid "" -#~ "The Flower logger keeps track of " -#~ "all core events that take place in" -#~ " federated learning workloads. It presents" -#~ " information by default following a " -#~ "standard message format:" +#~ "The first two are useful for " +#~ "eliminating a multitude of complications " +#~ "associated with calibrating the noise to" +#~ " the clipping threshold, while the " +#~ "third one is required to comply " +#~ "with the assumptions of the privacy " +#~ "analysis." #~ msgstr "" -#~ "L'enregistreur de Flower garde la trace" -#~ " de tous les événements principaux " -#~ "qui ont lieu dans les charges de" -#~ " travail de l'apprentissage fédéré. Il " -#~ "présente les informations par défaut en" -#~ " suivant un format de message " -#~ "standard :" +#~ "Les deux premiers sont utiles pour " +#~ "éliminer une multitude de complications " +#~ "liées au calibrage du bruit en " +#~ "fonction du seuil d'écrêtage, tandis que" +#~ " le troisième est nécessaire pour se" +#~ " conformer aux hypothèses de l'analyse " +#~ "de la vie privée." #~ msgid "" -#~ "containing relevant information including: log" -#~ " message level (e.g. :code:`INFO`, " -#~ ":code:`DEBUG`), a timestamp, the line " -#~ "where the logging took place from, " -#~ "as well as the log message itself." -#~ " In this way, the logger would " -#~ "typically display information on your " -#~ "terminal as follows:" +#~ "These restrictions are in line with " +#~ "constraints imposed by Andrew et al. " +#~ "[andrew]_." #~ msgstr "" -#~ "contenant des informations pertinentes, " -#~ "notamment : le niveau du message " -#~ "de journal (par exemple :code:`INFO`, " -#~ ":code:`DEBUG`), un horodatage, la ligne " -#~ "à partir de laquelle l'enregistrement a" -#~ " eu lieu, ainsi que le message " -#~ "de journal lui-même. De cette " -#~ "façon, le logger afficherait typiquement " -#~ "des informations sur ton terminal comme" -#~ " suit :" +#~ "Ces restrictions sont conformes aux " +#~ "contraintes imposées par Andrew et al." +#~ " [andrew]_." -#~ msgid "Saving log to file" -#~ msgstr "Enregistrement du journal dans un fichier" +#~ msgid "Customizable Responsibility for Noise injection" +#~ msgstr "Responsabilité personnalisable pour l'injection de bruit" #~ msgid "" -#~ "By default, the Flower log is " -#~ "outputted to the terminal where you " -#~ "launch your Federated Learning workload " -#~ "from. This applies for both gRPC-" -#~ "based federation (i.e. when you do " -#~ ":code:`fl.server.start_server`) and when using " -#~ "the :code:`VirtualClientEngine` (i.e. when you" -#~ " do :code:`fl.simulation.start_simulation`). In " -#~ "some situations you might want to " -#~ "save this log to disk. You can " -#~ "do so by calling the " -#~ "`fl.common.logger.configure() " -#~ "`_" -#~ " function. For example:" +#~ "In contrast to other implementations " +#~ "where the addition of noise is " +#~ "performed at the server, you can " +#~ "configure the site of noise injection" +#~ " to better match your threat model." +#~ " We provide users with the " +#~ "flexibility to set up the training " +#~ "such that each client independently adds" +#~ " a small amount of noise to the" +#~ " clipped update, with the result that" +#~ " simply aggregating the noisy updates " +#~ "is equivalent to the explicit addition" +#~ " of noise to the non-noisy " +#~ "aggregate at the server." #~ msgstr "" -#~ "Par défaut, le journal de Flower " -#~ "est affiché dans le terminal à " -#~ "partir duquel tu as lancé ta " -#~ "charge de travail d'apprentissage fédéré. " -#~ "Cela s'applique à la fois à la " -#~ "fédération basée sur gRPC (c'est-à-dire " -#~ "lorsque tu fais :code:`fl.server.start_server`) " -#~ "et à l'utilisation du " -#~ ":code:`VirtualClientEngine` (c'est-à-dire lorsque tu" -#~ " fais :code:`fl.simulation.start_simulation`). Dans " -#~ "certaines situations, tu peux vouloir " -#~ "sauvegarder ce journal sur le disque." -#~ " Tu peux le faire en appelant " -#~ "la fonction `fl.common.logger.configure() " -#~ "`_." -#~ " Par exemple :" +#~ "Contrairement à d'autres implémentations où" +#~ " l'ajout de bruit est effectué au " +#~ "niveau du serveur, tu peux configurer" +#~ " le site d'injection de bruit pour" +#~ " qu'il corresponde mieux à ton modèle" +#~ " de menace. Nous offrons aux " +#~ "utilisateurs la possibilité de configurer " +#~ "l'entraînement de telle sorte que chaque" +#~ " client ajoute indépendamment une petite" +#~ " quantité de bruit à la mise à" +#~ " jour écrêtée, ce qui fait que " +#~ "le simple fait d'agréger les mises " +#~ "à jour bruyantes équivaut à l'ajout " +#~ "explicite de bruit à l'agrégat non " +#~ "bruyant au niveau du serveur." #~ msgid "" -#~ "With the above, Flower will record " -#~ "the log you see on your terminal" -#~ " to :code:`log.txt`. This file will " -#~ "be created in the same directory " -#~ "as were you are running the code" -#~ " from. If we inspect we see the" -#~ " log above is also recorded but " -#~ "prefixing with :code:`identifier` each line:" +#~ "To be precise, if we let :math:`m`" +#~ " be the number of clients sampled " +#~ "each round and :math:`\\sigma_\\Delta` be " +#~ "the scale of the total Gaussian " +#~ "noise that needs to be added to" +#~ " the sum of the model updates, " +#~ "we can use simple maths to show" +#~ " that this is equivalent to each " +#~ "client adding noise with scale " +#~ ":math:`\\sigma_\\Delta/\\sqrt{m}`." #~ msgstr "" -#~ "Avec ce qui précède, Flower enregistrera" -#~ " le journal que tu vois sur ton" -#~ " terminal dans :code:`log.txt`. Ce fichier" -#~ " sera créé dans le même répertoire" -#~ " que celui à partir duquel tu " -#~ "exécutes le code. Si nous inspectons," -#~ " nous voyons que le journal ci-" -#~ "dessus est également enregistré, mais en" -#~ " préfixant chaque ligne avec " -#~ ":code:`identifier` :" +#~ "Pour être précis, si nous laissons " +#~ ":math:`m` être le nombre de clients " +#~ "échantillonnés à chaque tour et " +#~ ":math:\\sigma_\\Delta` être l'échelle du bruit" +#~ " gaussien total qui doit être ajouté" +#~ " à la somme des mises à jour" +#~ " du modèle, nous pouvons utiliser des" +#~ " mathématiques simples pour montrer que " +#~ "cela équivaut à ce que chaque " +#~ "client ajoute du bruit avec l'échelle" +#~ " :math:\\sigma_\\Delta/\\sqrt{m}`." -#~ msgid "Log your own messages" -#~ msgstr "Enregistrer tes propres messages" +#~ msgid "Wrapper-based approach" +#~ msgstr "Approche basée sur l'enveloppe" + +#~ msgid "" +#~ "Introducing DP to an existing workload" +#~ " can be thought of as adding an" +#~ " extra layer of security around it." +#~ " This inspired us to provide the " +#~ "additional server and client-side logic" +#~ " needed to make the training process" +#~ " differentially private as wrappers for " +#~ "instances of the :code:`Strategy` and " +#~ ":code:`NumPyClient` abstract classes respectively." +#~ " This wrapper-based approach has the" +#~ " advantage of being easily composable " +#~ "with other wrappers that someone might" +#~ " contribute to the Flower library in" +#~ " the future, e.g., for secure " +#~ "aggregation. Using Inheritance instead can " +#~ "be tedious because that would require" +#~ " the creation of new sub- classes " +#~ "every time a new class implementing " +#~ ":code:`Strategy` or :code:`NumPyClient` is " +#~ "defined." +#~ msgstr "" +#~ "L'introduction du DP dans une charge " +#~ "de travail existante peut être " +#~ "considérée comme l'ajout d'une couche de" +#~ " sécurité supplémentaire autour d'elle. " +#~ "Cela nous a incités à fournir la" +#~ " logique supplémentaire côté serveur et " +#~ "côté client nécessaire pour rendre le" +#~ " processus de formation différentiellement " +#~ "privé en tant qu'enveloppes pour les " +#~ "instances des classes abstraites " +#~ ":code:`Strategy` et :code:`NumPyClient` " +#~ "respectivement. Cette approche basée sur " +#~ "l'enveloppe a l'avantage d'être facilement " +#~ "composable avec d'autres enveloppes que " +#~ "quelqu'un pourrait contribuer à la " +#~ "bibliothèque Flower à l'avenir, par " +#~ "exemple, pour l'agrégation sécurisée. " +#~ "L'utilisation de l'héritage à la place" +#~ " peut être fastidieuse car cela " +#~ "nécessiterait la création de nouvelles " +#~ "sous-classes chaque fois qu'une nouvelle" +#~ " classe mettant en œuvre :code:`Strategy`" +#~ " ou :code:`NumPyClient` est définie." #~ msgid "" -#~ "You might expand the information shown" -#~ " by default with the Flower logger" -#~ " by adding more messages relevant to" -#~ " your application. You can achieve " -#~ "this easily as follows." +#~ "The first version of our solution " +#~ "was to define a decorator whose " +#~ "constructor accepted, among other things, " +#~ "a boolean-valued variable indicating " +#~ "whether adaptive clipping was to be " +#~ "enabled or not. We quickly realized " +#~ "that this would clutter its " +#~ ":code:`__init__()` function with variables " +#~ "corresponding to hyperparameters of adaptive" +#~ " clipping that would remain unused " +#~ "when it was disabled. A cleaner " +#~ "implementation could be achieved by " +#~ "splitting the functionality into two " +#~ "decorators, :code:`DPFedAvgFixed` and " +#~ ":code:`DPFedAvgAdaptive`, with the latter sub-" +#~ " classing the former. The constructors " +#~ "for both classes accept a boolean " +#~ "parameter :code:`server_side_noising`, which, as " +#~ "the name suggests, determines where " +#~ "noising is to be performed." #~ msgstr "" -#~ "Tu peux élargir les informations " -#~ "affichées par défaut avec le logger " -#~ "Flower en ajoutant d'autres messages " -#~ "pertinents pour ton application. Tu peux" -#~ " y parvenir facilement en procédant " -#~ "comme suit." +#~ "La première version de notre solution" +#~ " consistait à définir un décorateur " +#~ "dont le constructeur acceptait, entre " +#~ "autres, une variable à valeur booléenne" +#~ " indiquant si l'écrêtage adaptatif devait" +#~ " être activé ou non. Nous nous " +#~ "sommes rapidement rendu compte que cela" +#~ " encombrerait sa fonction :code:`__init__()` " +#~ "avec des variables correspondant aux " +#~ "hyperparamètres de l'écrêtage adaptatif qui" +#~ " resteraient inutilisées lorsque celui-ci" +#~ " était désactivé. Une implémentation plus" +#~ " propre pourrait être obtenue en " +#~ "divisant la fonctionnalité en deux " +#~ "décorateurs, :code:`DPFedAvgFixed` et " +#~ ":code:`DPFedAvgAdaptive`, le second sous-" +#~ "classant le premier. Les constructeurs " +#~ "des deux classes acceptent un paramètre" +#~ " booléen :code:`server_side_noising` qui, comme" +#~ " son nom l'indique, détermine l'endroit " +#~ "où le noising doit être effectué." #~ msgid "" -#~ "In this way your logger will show," -#~ " in addition to the default messages," -#~ " the ones introduced by the clients" -#~ " as specified above." +#~ "The server-side capabilities required " +#~ "for the original version of DP-" +#~ "FedAvg, i.e., the one which performed" +#~ " fixed clipping, can be completely " +#~ "captured with the help of wrapper " +#~ "logic for just the following two " +#~ "methods of the :code:`Strategy` abstract " +#~ "class." #~ msgstr "" -#~ "De cette façon, ton logger affichera," -#~ " en plus des messages par défaut, " -#~ "ceux introduits par les clients comme" -#~ " spécifié ci-dessus." - -#~ msgid "Log to a remote service" -#~ msgstr "Se connecter à un service distant" +#~ "Les capacités côté serveur requises pour" +#~ " la version originale de DP-FedAvg," +#~ " c'est-à-dire celle qui effectue un " +#~ "écrêtage fixe, peuvent être entièrement " +#~ "capturées à l'aide d'une logique " +#~ "d'enveloppement pour les deux méthodes " +#~ "suivantes de la classe abstraite " +#~ ":code:`Strategy`." #~ msgid "" -#~ "The :code:`fl.common.logger.configure` function, " -#~ "also allows specifying a host to " -#~ "which logs can be pushed (via " -#~ ":code:`POST`) through a native Python " -#~ ":code:`logging.handler.HTTPHandler`. This is a " -#~ "particularly useful feature in " -#~ ":code:`gRPC`-based Federated Learning workloads " -#~ "where otherwise gathering logs from all" -#~ " entities (i.e. the server and the" -#~ " clients) might be cumbersome. Note " -#~ "that in Flower simulation, the server" -#~ " automatically displays all logs. You " -#~ "can still specify a :code:`HTTPHandler` " -#~ "should you wish to backup or " -#~ "analyze the logs somewhere else." +#~ ":code:`configure_fit()` : The config " +#~ "dictionary being sent by the wrapped " +#~ ":code:`Strategy` to each client needs to" +#~ " be augmented with an additional " +#~ "value equal to the clipping threshold" +#~ " (keyed under :code:`dpfedavg_clip_norm`) and," +#~ " if :code:`server_side_noising=true`, another one" +#~ " equal to the scale of the " +#~ "Gaussian noise that needs to be " +#~ "added at the client (keyed under " +#~ ":code:`dpfedavg_noise_stddev`). This entails " +#~ "*post*-processing of the results returned " +#~ "by the wrappee's implementation of " +#~ ":code:`configure_fit()`." #~ msgstr "" -#~ "La fonction :code:`fl.common.logger.configure` " -#~ "permet également de spécifier un hôte" -#~ " vers lequel les journaux peuvent " -#~ "être envoyés (via :code:`POST`) par " -#~ "l'intermédiaire d'un :code:`logging.handler.HTTPHandler`" -#~ " natif de Python. Il s'agit d'une " -#~ "fonction particulièrement utile dans les " -#~ "charges de travail d'apprentissage fédéré " -#~ "basées sur :code:`gRPC` où la collecte" -#~ " des journaux de toutes les entités" -#~ " (c'est-à-dire le serveur et les " -#~ "clients) pourrait s'avérer fastidieuse. Notez" -#~ " que dans la simulation Flower, le" -#~ " serveur affiche automatiquement tous les" -#~ " journaux. Vous pouvez toujours spécifier" -#~ " un :code:`HTTPHandler` si vous souhaitez" -#~ " sauvegarder ou analyser les journaux " -#~ "à un autre endroit." - -#~ msgid "Enable SSL connections" -#~ msgstr "Collecte centralisée des données" - -#~ msgid "Python version" -#~ msgstr "Version Python" +#~ ":code:`configure_fit()` : Le dictionnaire de" +#~ " configuration envoyé par la " +#~ ":code:`Strategy` enveloppée à chaque client" +#~ " doit être augmenté d'une valeur " +#~ "supplémentaire égale au seuil d'écrêtage " +#~ "(indiqué sous :code:`dpfedavg_clip_norm`) et, " +#~ "si :code:`server_side_noising=true`, d'une autre " +#~ "égale à l'échelle du bruit gaussien " +#~ "qui doit être ajouté au client " +#~ "(indiqué sous :code:`dpfedavg_noise_stddev`)." #~ msgid "" -#~ "Flower requires at least `Python 3.7 " -#~ "`_, but `Python 3.8" -#~ " `_ or above is " -#~ "recommended." +#~ ":code:`aggregate_fit()`: We check whether any" +#~ " of the sampled clients dropped out" +#~ " or failed to upload an update " +#~ "before the round timed out. In " +#~ "that case, we need to abort the" +#~ " current round, discarding any successful" +#~ " updates that were received, and move" +#~ " on to the next one. On the " +#~ "other hand, if all clients responded " +#~ "successfully, we must force the " +#~ "averaging of the updates to happen " +#~ "in an unweighted manner by intercepting" +#~ " the :code:`parameters` field of " +#~ ":code:`FitRes` for each received update " +#~ "and setting it to 1. Furthermore, " +#~ "if :code:`server_side_noising=true`, each update " +#~ "is perturbed with an amount of " +#~ "noise equal to what it would have" +#~ " been subjected to had client-side" +#~ " noising being enabled. This entails " +#~ "*pre*-processing of the arguments to " +#~ "this method before passing them on " +#~ "to the wrappee's implementation of " +#~ ":code:`aggregate_fit()`." #~ msgstr "" -#~ "Flower nécessite `Python 3.7 " -#~ "`_ ou plus, nous " -#~ "recommandons `Python 3.8 " -#~ "`_." - -#~ msgid "Run simulations" -#~ msgstr "Simulation de moniteur" +#~ ":code:`aggregate_fit()`: We check whether any" +#~ " of the sampled clients dropped out" +#~ " or failed to upload an update " +#~ "before the round timed out. In " +#~ "that case, we need to abort the" +#~ " current round, discarding any successful" +#~ " updates that were received, and move" +#~ " on to the next one. On the " +#~ "other hand, if all clients responded " +#~ "successfully, we must force the " +#~ "averaging of the updates to happen " +#~ "in an unweighted manner by intercepting" +#~ " the :code:`parameters` field of " +#~ ":code:`FitRes` for each received update " +#~ "and setting it to 1. Furthermore, " +#~ "if :code:`server_side_noising=true`, each update " +#~ "is perturbed with an amount of " +#~ "noise equal to what it would have" +#~ " been subjected to had client-side" +#~ " noising being enabled. This entails " +#~ "*pre*-processing of the arguments to " +#~ "this method before passing them on " +#~ "to the wrappee's implementation of " +#~ ":code:`aggregate_fit()`." #~ msgid "" -#~ "Simulating Federated Learning workloads is " -#~ "useful for a multitude of use-" -#~ "cases: you might want to run your" -#~ " workload on a large cohort of " -#~ "clients but without having to source," -#~ " configure and mange a large number" -#~ " of physical devices; you might want" -#~ " to run your FL workloads as " -#~ "fast as possible on the compute " -#~ "systems you have access to without " -#~ "having to go through a complex " -#~ "setup process; you might want to " -#~ "validate your algorithm on different " -#~ "scenarios at varying levels of data " -#~ "and system heterogeneity, client availability," -#~ " privacy budgets, etc. These are " -#~ "among some of the use-cases where" -#~ " simulating FL workloads makes sense. " -#~ "Flower can accommodate these scenarios " -#~ "by means of its `VirtualClientEngine " -#~ "`_ or VCE." +#~ "We can't directly change the aggregation" +#~ " function of the wrapped strategy to" +#~ " force it to add noise to the" +#~ " aggregate, hence we simulate client-" +#~ "side noising to implement server-side" +#~ " noising." #~ msgstr "" +#~ "Nous ne pouvons pas modifier directement" +#~ " la fonction d'agrégation de la " +#~ "stratégie enveloppée pour la forcer à" +#~ " ajouter du bruit à l'agrégat, c'est" +#~ " pourquoi nous simulons le bruit côté" +#~ " client pour mettre en œuvre le " +#~ "bruit côté serveur." #~ msgid "" -#~ "The :code:`VirtualClientEngine` schedules, launches" -#~ " and manages `virtual` clients. These " -#~ "clients are identical to `non-virtual`" -#~ " clients (i.e. the ones you launch" -#~ " via the command `flwr.client.start_client " -#~ "`_) in the" -#~ " sense that they can be configure " -#~ "by creating a class inheriting, for " -#~ "example, from `flwr.client.NumPyClient `_ and therefore" -#~ " behave in an identical way. In " -#~ "addition to that, clients managed by " -#~ "the :code:`VirtualClientEngine` are:" +#~ "These changes have been put together " +#~ "into a class called :code:`DPFedAvgFixed`, " +#~ "whose constructor accepts the strategy " +#~ "being decorated, the clipping threshold " +#~ "and the number of clients sampled " +#~ "every round as compulsory arguments. The" +#~ " user is expected to specify the " +#~ "clipping threshold since the order of" +#~ " magnitude of the update norms is " +#~ "highly dependent on the model being " +#~ "trained and providing a default value" +#~ " would be misleading. The number of" +#~ " clients sampled at every round is" +#~ " required to calculate the amount of" +#~ " noise that must be added to " +#~ "each individual update, either by the" +#~ " server or the clients." #~ msgstr "" +#~ "Ces modifications ont été regroupées " +#~ "dans une classe appelée :code:`DPFedAvgFixed`," +#~ " dont le constructeur accepte la " +#~ "stratégie décorée, le seuil d'écrêtage " +#~ "et le nombre de clients échantillonnés" +#~ " à chaque tour comme arguments " +#~ "obligatoires. L'utilisateur est censé " +#~ "spécifier le seuil d'écrêtage car " +#~ "l'ordre de grandeur des normes de " +#~ "mise à jour dépend fortement du " +#~ "modèle formé et fournir une valeur " +#~ "par défaut serait trompeur. Le nombre" +#~ " de clients échantillonnés à chaque " +#~ "tour est nécessaire pour calculer la " +#~ "quantité de bruit qui doit être " +#~ "ajoutée à chaque mise à jour " +#~ "individuelle, que ce soit par le " +#~ "serveur ou par les clients." #~ msgid "" -#~ "resource-aware: this means that each " -#~ "client gets assigned a portion of " -#~ "the compute and memory on your " -#~ "system. You as a user can control" -#~ " this at the beginning of the " -#~ "simulation and allows you to control " -#~ "the degree of parallelism of your " -#~ "Flower FL simulation. The fewer the " -#~ "resources per client, the more clients" -#~ " can run concurrently on the same " -#~ "hardware." +#~ "The additional functionality required to " +#~ "facilitate adaptive clipping has been " +#~ "provided in :code:`DPFedAvgAdaptive`, a " +#~ "subclass of :code:`DPFedAvgFixed`. It " +#~ "overrides the above-mentioned methods to" +#~ " do the following." #~ msgstr "" +#~ "La fonctionnalité supplémentaire nécessaire " +#~ "pour faciliter l'écrêtage adaptatif a " +#~ "été fournie dans :code:`DPFedAvgAdaptive`, une" +#~ " sous-classe de :code:`DPFedAvgFixed`. Elle" +#~ " remplace les méthodes mentionnées ci-" +#~ "dessus pour effectuer les opérations " +#~ "suivantes." #~ msgid "" -#~ "self-managed: this means that you " -#~ "as a user do not need to " -#~ "launch clients manually, instead this " -#~ "gets delegated to :code:`VirtualClientEngine`'s " -#~ "internals." +#~ ":code:`configure_fit()` : It intercepts the" +#~ " config dict returned by " +#~ ":code:`super.configure_fit()` to add the " +#~ "key-value pair " +#~ ":code:`dpfedavg_adaptive_clip_enabled:True` to it, " +#~ "which the client interprets as an " +#~ "instruction to include an indicator bit" +#~ " (1 if update norm <= clipping " +#~ "threshold, 0 otherwise) in the results" +#~ " returned by it." #~ msgstr "" +#~ ":code:`configure_fit()` : Il intercepte le " +#~ "dict de configuration renvoyé par " +#~ ":code:`super.configure_fit()` pour y ajouter " +#~ "la paire clé-valeur " +#~ ":code:`dpfedavg_adaptive_clip_enabled:True`, que le " +#~ "client interprète comme une instruction " +#~ "d'inclure un bit indicateur (1 si " +#~ "la norme de mise à jour <= " +#~ "seuil d'écrêtage, 0 sinon) dans les " +#~ "résultats qu'il renvoie." #~ msgid "" -#~ "ephemeral: this means that a client " -#~ "is only materialized when it is " -#~ "required in the FL process (e.g. " -#~ "to do `fit() `_). The object is" -#~ " destroyed afterwards, releasing the " -#~ "resources it was assigned and allowing" -#~ " in this way other clients to " -#~ "participate." +#~ ":code:`aggregate_fit()` : It follows a " +#~ "call to :code:`super.aggregate_fit()` with one" +#~ " to :code:`__update_clip_norm__()`, a procedure" +#~ " which adjusts the clipping threshold " +#~ "on the basis of the indicator bits" +#~ " received from the sampled clients." #~ msgstr "" +#~ ":code:`aggregate_fit()` : Il fait suivre " +#~ "un appel à :code:`super.aggregate_fit()` d'un" +#~ " appel à :code:`__update_clip_norm__()`, une " +#~ "procédure qui ajuste le seuil d'écrêtage" +#~ " sur la base des bits indicateurs " +#~ "reçus des clients échantillonnés." #~ msgid "" -#~ "The :code:`VirtualClientEngine` implements `virtual`" -#~ " clients using `Ray `_, " -#~ "an open-source framework for scalable" -#~ " Python workloads. In particular, Flower's" -#~ " :code:`VirtualClientEngine` makes use of " -#~ "`Actors `_ to spawn `virtual` clients" -#~ " and run their workload." -#~ msgstr "" - -#~ msgid "Launch your Flower simulation" +#~ "The client-side capabilities required " +#~ "can be completely captured through " +#~ "wrapper logic for just the :code:`fit()`" +#~ " method of the :code:`NumPyClient` abstract" +#~ " class. To be precise, we need " +#~ "to *post-process* the update computed" +#~ " by the wrapped client to clip " +#~ "it, if necessary, to the threshold " +#~ "value supplied by the server as " +#~ "part of the config dictionary. In " +#~ "addition to this, it may need to" +#~ " perform some extra work if either" +#~ " (or both) of the following keys " +#~ "are also present in the dict." #~ msgstr "" +#~ "Les capacités requises côté client " +#~ "peuvent être entièrement capturées par " +#~ "une logique de wrapper pour la " +#~ "seule méthode :code:`fit()` de la classe" +#~ " abstraite :code:`NumPyClient`. Pour être " +#~ "précis, nous devons *post-traiter* la" +#~ " mise à jour calculée par le " +#~ "client wrapped pour l'écrêter, si " +#~ "nécessaire, à la valeur seuil fournie" +#~ " par le serveur dans le cadre " +#~ "du dictionnaire de configuration. En " +#~ "plus de cela, il peut avoir besoin" +#~ " d'effectuer un travail supplémentaire si" +#~ " l'une des clés suivantes (ou les " +#~ "deux) est également présente dans le " +#~ "dict." #~ msgid "" -#~ "Running Flower simulations still require " -#~ "you to define your client class, a" -#~ " strategy, and utility functions to " -#~ "download and load (and potentially " -#~ "partition) your dataset. With that out" -#~ " of the way, launching your " -#~ "simulation is done with `start_simulation " -#~ "`_ " -#~ "and a minimal example looks as " -#~ "follows:" +#~ ":code:`dpfedavg_noise_stddev` : Generate and " +#~ "add the specified amount of noise " +#~ "to the clipped update." #~ msgstr "" - -#~ msgid "VirtualClientEngine resources" -#~ msgstr "Moteur de client virtuel" +#~ ":code:`dpfedavg_noise_stddev` : Génère et " +#~ "ajoute la quantité de bruit spécifiée" +#~ " à la mise à jour de " +#~ "l'écrêtage." #~ msgid "" -#~ "By default the VCE has access to" -#~ " all system resources (i.e. all CPUs," -#~ " all GPUs, etc) since that is " -#~ "also the default behavior when starting" -#~ " Ray. However, in some settings you" -#~ " might want to limit how many " -#~ "of your system resources are used " -#~ "for simulation. You can do this " -#~ "via the :code:`ray_init_args` input argument" -#~ " to :code:`start_simulation` which the VCE" -#~ " internally passes to Ray's " -#~ ":code:`ray.init` command. For a complete " -#~ "list of settings you can configure " -#~ "check the `ray.init `_ " -#~ "documentation. Do not set " -#~ ":code:`ray_init_args` if you want the " -#~ "VCE to use all your system's CPUs" -#~ " and GPUs." +#~ ":code:`dpfedavg_adaptive_clip_enabled` : Augment the" +#~ " metrics dict in the :code:`FitRes` " +#~ "object being returned to the server " +#~ "with an indicator bit, calculated as " +#~ "described earlier." #~ msgstr "" +#~ ":code:`dpfedavg_adaptive_clip_enabled` : Complète " +#~ "les métriques dict dans l'objet " +#~ ":code:`FitRes` renvoyé au serveur avec " +#~ "un bit indicateur, calculé comme décrit" +#~ " précédemment." -#~ msgid "Assigning client resources" -#~ msgstr "" +#~ msgid "Performing the :math:`(\\epsilon, \\delta)` analysis" +#~ msgstr "Effectuer l'analyse :math:`(\\epsilon, \\delta)`" #~ msgid "" -#~ "By default the :code:`VirtualClientEngine` " -#~ "assigns a single CPU core (and " -#~ "nothing else) to each virtual client." -#~ " This means that if your system " -#~ "has 10 cores, that many virtual " -#~ "clients can be concurrently running." +#~ "Assume you have trained for :math:`n`" +#~ " rounds with sampling fraction :math:`q`" +#~ " and noise multiplier :math:`z`. In " +#~ "order to calculate the :math:`\\epsilon` " +#~ "value this would result in for a" +#~ " particular :math:`\\delta`, the following " +#~ "script may be used." #~ msgstr "" +#~ "Supposons que tu te sois entraîné " +#~ "pendant :math:`n` tours avec la fraction" +#~ " d'échantillonnage :math:`q` et le " +#~ "multiplicateur de bruit :math:`z`. Afin " +#~ "de calculer la valeur :math:`epsilon` " +#~ "qui en résulterait pour un " +#~ ":math:`\\delta` particulier, le script suivant" +#~ " peut être utilisé." #~ msgid "" -#~ "More often than not, you would " -#~ "probably like to adjust the resources" -#~ " your clients get assigned based on" -#~ " the complexity (i.e. compute and " -#~ "memory footprint) of your FL workload." -#~ " You can do so when starting " -#~ "your simulation by setting the argument" -#~ " `client_resources` to `start_simulation `_. Two " -#~ "keys are internally used by Ray to" -#~ " schedule and spawn workloads (in our" -#~ " case Flower clients):" +#~ "`How to run Flower using Docker " +#~ "`_" #~ msgstr "" -#~ msgid ":code:`num_cpus` indicates the number of CPU cores a client would get." +#~ msgid "Enjoy building more robust and flexible ``ClientApp``s with mods!" #~ msgstr "" #~ msgid "" -#~ ":code:`num_gpus` indicates the **ratio** of" -#~ " GPU memory a client gets assigned." +#~ ":py:obj:`ClientApp `\\ " +#~ "\\(client\\_fn\\[\\, mods\\]\\)" #~ msgstr "" -#~ msgid "Let's see a few examples:" +#~ msgid ":py:obj:`flwr.server.driver `\\" #~ msgstr "" -#~ msgid "" -#~ "While the :code:`client_resources` can be " -#~ "used to control the degree of " -#~ "concurrency in your FL simulation, this" -#~ " does not stop you from running " -#~ "dozens, hundreds or even thousands of" -#~ " clients in the same round and " -#~ "having orders of magnitude more " -#~ "`dormant` (i.e. not participating in a" -#~ " round) clients. Let's say you want" -#~ " to have 100 clients per round " -#~ "but your system can only accommodate " -#~ "8 clients concurrently. The " -#~ ":code:`VirtualClientEngine` will schedule 100 " -#~ "jobs to run (each simulating a " -#~ "client sampled by the strategy) and " -#~ "then will execute them in a " -#~ "resource-aware manner in batches of " -#~ "8." -#~ msgstr "" +#~ msgid "Flower driver SDK." +#~ msgstr "Serveur de Flower" + +#~ msgid "driver" +#~ msgstr "serveur" #~ msgid "" -#~ "To understand all the intricate details" -#~ " on how resources are used to " -#~ "schedule FL clients and how to " -#~ "define custom resources, please take a" -#~ " look at the `Ray documentation " -#~ "`_." +#~ ":py:obj:`start_driver `\\ " +#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" #~ msgstr "" -#~ msgid "Simulation examples" -#~ msgstr "Exemples de PyTorch" - #~ msgid "" -#~ "A few ready-to-run complete " -#~ "examples for Flower simulation in " -#~ "Tensorflow/Keras and PyTorch are provided " -#~ "in the `Flower repository " -#~ "`_. You can run " -#~ "them on Google Colab too:" +#~ ":py:obj:`Driver `\\ " +#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" #~ msgstr "" #~ msgid "" -#~ "`Tensorflow/Keras Simulation " -#~ "`_: 100 clients collaboratively " -#~ "train a MLP model on MNIST." +#~ ":py:obj:`GrpcDriver `\\ " +#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ msgstr "" + +#~ msgid "`GrpcDriver` provides access to the gRPC Driver API/service." +#~ msgstr "" + +#~ msgid ":py:obj:`get_nodes `\\ \\(\\)" #~ msgstr "" -#~ "`Quickstart TensorFlow (Code) " -#~ "`_" #~ msgid "" -#~ "`PyTorch Simulation " -#~ "`_: 100 clients collaboratively train" -#~ " a CNN model on MNIST." +#~ ":py:obj:`pull_task_res " +#~ "`\\ \\(task\\_ids\\)" +#~ msgstr "" + +#~ msgid "Get task results." #~ msgstr "" -#~ "`Quickstart PyTorch (Code) " -#~ "`_" #~ msgid "" -#~ "Flower's :code:`VirtualClientEngine` allows you " -#~ "to run FL simulations across multiple" -#~ " compute nodes. Before starting your " -#~ "multi-node simulation ensure that you:" +#~ ":py:obj:`push_task_ins " +#~ "`\\ " +#~ "\\(task\\_ins\\_list\\)" #~ msgstr "" -#~ msgid "Have the same Python environment in all nodes." +#~ msgid "Schedule tasks." #~ msgstr "" -#~ msgid "Have a copy of your code (e.g. your entire repo) in all nodes." +#~ msgid "GrpcDriver" +#~ msgstr "" + +#~ msgid ":py:obj:`connect `\\ \\(\\)" +#~ msgstr "" + +#~ msgid "Connect to the Driver API." #~ msgstr "" #~ msgid "" -#~ "Have a copy of your dataset in " -#~ "all nodes (more about this in " -#~ ":ref:`simulation considerations `)" +#~ ":py:obj:`create_run " +#~ "`\\ \\(req\\)" #~ msgstr "" +#~ msgid "Request for run ID." +#~ msgstr "Demande pour une nouvelle Flower Baseline" + #~ msgid "" -#~ "Pass :code:`ray_init_args={\"address\"=\"auto\"}` to " -#~ "`start_simulation `_ so the " -#~ ":code:`VirtualClientEngine` attaches to a " -#~ "running Ray instance." +#~ ":py:obj:`disconnect " +#~ "`\\ \\(\\)" +#~ msgstr "" + +#~ msgid "Disconnect from the Driver API." #~ msgstr "" #~ msgid "" -#~ "Start Ray on you head node: on " -#~ "the terminal type :code:`ray start " -#~ "--head`. This command will print a " -#~ "few lines, one of which indicates " -#~ "how to attach other nodes to the" -#~ " head node." +#~ ":py:obj:`get_nodes `\\" +#~ " \\(req\\)" #~ msgstr "" +#~ msgid "Get client IDs." +#~ msgstr "Moteur client Edge" + #~ msgid "" -#~ "Attach other nodes to the head " -#~ "node: copy the command shown after " -#~ "starting the head and execute it " -#~ "on terminal of a new node: for " -#~ "example :code:`ray start " -#~ "--address='192.168.1.132:6379'`" +#~ ":py:obj:`pull_task_res " +#~ "`\\ \\(req\\)" #~ msgstr "" #~ msgid "" -#~ "With all the above done, you can" -#~ " run your code from the head " -#~ "node as you would if the " -#~ "simulation was running on a single " -#~ "node." +#~ ":py:obj:`push_task_ins " +#~ "`\\ \\(req\\)" #~ msgstr "" #~ msgid "" -#~ "Once your simulation is finished, if " -#~ "you'd like to dismantle your cluster " -#~ "you simply need to run the command" -#~ " :code:`ray stop` in each node's " -#~ "terminal (including the head node)." +#~ "Optionally specify the type of actor " +#~ "to use. The actor object, which " +#~ "persists throughout the simulation, will " +#~ "be the process in charge of " +#~ "running the clients' jobs (i.e. their" +#~ " `fit()` method)." #~ msgstr "" -#~ msgid "Multi-node simulation good-to-know" +#~ msgid "" +#~ "Much effort went into a completely " +#~ "restructured Flower docs experience. The " +#~ "documentation on [flower.ai/docs](flower.ai/docs) is" +#~ " now divided into Flower Framework, " +#~ "Flower Baselines, Flower Android SDK, " +#~ "Flower iOS SDK, and code example " +#~ "projects." #~ msgstr "" #~ msgid "" -#~ "Here we list a few interesting " -#~ "functionality when running multi-node FL" -#~ " simulations:" +#~ "Flower usage examples used to be " +#~ "bundled with Flower in a package " +#~ "called ``flwr_example``. We are migrating " +#~ "those examples to standalone projects to" +#~ " make them easier to use. All " +#~ "new examples are based in the " +#~ "directory `examples " +#~ "`_." #~ msgstr "" +#~ "Les exemples d'utilisation de Flower " +#~ "étaient auparavant regroupés avec Flower " +#~ "dans un paquet appelé ``flwr_example``. " +#~ "Nous migrons ces exemples vers des " +#~ "projets autonomes pour les rendre plus" +#~ " faciles à utiliser. Tous les " +#~ "nouveaux exemples sont basés dans le " +#~ "répertoire ``examples " +#~ "`_." + +#~ msgid "Quickstart TensorFlow/Keras" +#~ msgstr "Démarrage rapide de TensorFlow/Keras" + +#~ msgid "Legacy Examples (`flwr_example`)" +#~ msgstr "Exemples hérités (`flwr_example`)" #~ msgid "" -#~ "User :code:`ray status` to check all " -#~ "nodes connected to your head node " -#~ "as well as the total resources " -#~ "available to the :code:`VirtualClientEngine`." +#~ "The useage examples in `flwr_example` " +#~ "are deprecated and will be removed " +#~ "in the future. New examples are " +#~ "provided as standalone projects in " +#~ "`examples `_." #~ msgstr "" +#~ "Les exemples d'utilisation dans `flwr_example`" +#~ " sont obsolètes et seront supprimés à" +#~ " l'avenir. De nouveaux exemples sont " +#~ "fournis en tant que projets autonomes" +#~ " dans `examples " +#~ "`_." + +#~ msgid "Extra Dependencies" +#~ msgstr "Dépendances supplémentaires" #~ msgid "" -#~ "When attaching a new node to the" -#~ " head, all its resources (i.e. all" -#~ " CPUs, all GPUs) will be visible " -#~ "by the head node. This means that" -#~ " the :code:`VirtualClientEngine` can schedule " -#~ "as many `virtual` clients as that " -#~ "node can possible run. In some " -#~ "settings you might want to exclude " -#~ "certain resources from the simulation. " -#~ "You can do this by appending " -#~ "`--num-cpus=` and/or `--num-" -#~ "gpus=` in any :code:`ray " -#~ "start` command (including when starting " -#~ "the head)" +#~ "The core Flower framework keeps a " +#~ "minimal set of dependencies. The " +#~ "examples demonstrate Flower in the " +#~ "context of different machine learning " +#~ "frameworks, so additional dependencies need" +#~ " to be installed before an example" +#~ " can be run." #~ msgstr "" +#~ "Le noyau du framework Flower conserve" +#~ " un ensemble minimal de dépendances. " +#~ "Les exemples démontrent Flower dans le" +#~ " contexte de différents frameworks " +#~ "d'apprentissage automatique, de sorte que " +#~ "des dépendances supplémentaires doivent être" +#~ " installées avant qu'un exemple puisse " +#~ "être exécuté." -#~ msgid "Considerations for simulations" -#~ msgstr "Simulation de moniteur" +#~ msgid "For PyTorch examples::" +#~ msgstr "Pour les exemples de PyTorch: :" + +#~ msgid "For TensorFlow examples::" +#~ msgstr "Pour les exemples de TensorFlow : :" + +#~ msgid "For both PyTorch and TensorFlow examples::" +#~ msgstr "Pour les exemples PyTorch et TensorFlow: :" #~ msgid "" -#~ "We are actively working on these " -#~ "fronts so to make it trivial to" -#~ " run any FL workload with Flower " -#~ "simulation." +#~ "Please consult :code:`pyproject.toml` for a" +#~ " full list of possible extras " +#~ "(section :code:`[tool.poetry.extras]`)." #~ msgstr "" +#~ "Tu peux consulter :code:`pyproject.toml` pour" +#~ " une liste complète des extras " +#~ "possibles (section :code:`[tool.poetry.extras]`)." + +#~ msgid "PyTorch Examples" +#~ msgstr "Exemples de PyTorch" #~ msgid "" -#~ "The current VCE allows you to run" -#~ " Federated Learning workloads in simulation" -#~ " mode whether you are prototyping " -#~ "simple scenarios on your personal laptop" -#~ " or you want to train a complex" -#~ " FL pipeline across multiple high-" -#~ "performance GPU nodes. While we add " -#~ "more capabilities to the VCE, the " -#~ "points below highlight some of the " -#~ "considerations to keep in mind when " -#~ "designing your FL pipeline with Flower." -#~ " We also highlight a couple of " -#~ "current limitations in our implementation." +#~ "Our PyTorch examples are based on " +#~ "PyTorch 1.7. They should work with " +#~ "other releases as well. So far, we" +#~ " provide the following examples." #~ msgstr "" +#~ "Nos exemples PyTorch sont basés sur " +#~ "PyTorch 1.7. Ils devraient fonctionner " +#~ "avec d'autres versions également. Jusqu'à " +#~ "présent, nous fournissons les exemples " +#~ "suivants." -#~ msgid "GPU resources" -#~ msgstr "Ressources" +#~ msgid "CIFAR-10 Image Classification" +#~ msgstr "Classification d'images CIFAR-10" #~ msgid "" -#~ "The VCE assigns a share of GPU " -#~ "memory to a client that specifies " -#~ "the key :code:`num_gpus` in " -#~ ":code:`client_resources`. This being said, Ray" -#~ " (used internally by the VCE) is " -#~ "by default:" +#~ "`CIFAR-10 and CIFAR-100 " +#~ "`_ are " +#~ "popular RGB image datasets. The Flower" +#~ " CIFAR-10 example uses PyTorch to " +#~ "train a simple CNN classifier in a" +#~ " federated learning setup with two " +#~ "clients." #~ msgstr "" +#~ "`CIFAR-10 et CIFAR-100 " +#~ "`_ sont des" +#~ " ensembles de données d'images RVB " +#~ "populaires. L'exemple Flower CIFAR-10 utilise" +#~ " PyTorch pour former un classificateur " +#~ "CNN simple dans une configuration " +#~ "d'apprentissage fédéré avec deux clients." -#~ msgid "" -#~ "not aware of the total VRAM " -#~ "available on the GPUs. This means " -#~ "that if you set :code:`num_gpus=0.5` and" -#~ " you have two GPUs in your " -#~ "system with different (e.g. 32GB and " -#~ "8GB) VRAM amounts, they both would " -#~ "run 2 clients concurrently." -#~ msgstr "" +#~ msgid "First, start a Flower server:" +#~ msgstr "Tout d'abord, démarre un serveur Flower :" -#~ msgid "" -#~ "not aware of other unrelated (i.e. " -#~ "not created by the VCE) workloads " -#~ "are running on the GPU. Two " -#~ "takeaways from this are:" -#~ msgstr "" +#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" +#~ msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" -#~ msgid "" -#~ "Your Flower server might need a " -#~ "GPU to evaluate the `global model` " -#~ "after aggregation (by instance when " -#~ "making use of the `evaluate method " -#~ "`_)" +#~ msgid "Then, start the two clients in a new terminal window:" #~ msgstr "" +#~ "Ensuite, démarre les deux clients dans" +#~ " une nouvelle fenêtre de terminal :" + +#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" +#~ msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" + +#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_cifar`." +#~ msgstr "Pour plus de détails, voir :code:`src/py/flwr_example/pytorch_cifar`." + +#~ msgid "ImageNet-2012 Image Classification" +#~ msgstr "ImageNet-2012 Classification des images" #~ msgid "" -#~ "If you want to run several " -#~ "independent Flower simulations on the " -#~ "same machine you need to mask-out" -#~ " your GPUs with " -#~ ":code:`CUDA_VISIBLE_DEVICES=\"\"` when launching" -#~ " your experiment." +#~ "`ImageNet-2012 `_ is " +#~ "one of the major computer vision " +#~ "datasets. The Flower ImageNet example " +#~ "uses PyTorch to train a ResNet-18 " +#~ "classifier in a federated learning setup" +#~ " with ten clients." #~ msgstr "" +#~ "`ImageNet-2012 `_ est " +#~ "l'un des principaux ensembles de données" +#~ " de vision par ordinateur. L'exemple " +#~ "Flower ImageNet utilise PyTorch pour " +#~ "entraîner un classificateur ResNet-18 dans " +#~ "une configuration d'apprentissage fédéré avec" +#~ " dix clients." -#~ msgid "" -#~ "In addition, the GPU resource limits " -#~ "passed to :code:`client_resources` are not " -#~ "`enforced` (i.e. they can be exceeded)" -#~ " which can result in the situation" -#~ " of client using more VRAM than " -#~ "the ratio specified when starting the" -#~ " simulation." +#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" +#~ msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" + +#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" +#~ msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" + +#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_imagenet`." #~ msgstr "" +#~ "Pour plus de détails, voir " +#~ ":code:`src/py/flwr_example/pytorch_imagenet`." -#~ msgid "TensorFlow with GPUs" +#~ msgid "TensorFlow Examples" #~ msgstr "Exemples de TensorFlow" #~ msgid "" -#~ "When `using a GPU with TensorFlow " -#~ "`_ nearly your " -#~ "entire GPU memory of all your GPUs" -#~ " visible to the process will be " -#~ "mapped. This is done by TensorFlow " -#~ "for optimization purposes. However, in " -#~ "settings such as FL simulations where" -#~ " we want to split the GPU into" -#~ " multiple `virtual` clients, this is " -#~ "not a desirable mechanism. Luckily we" -#~ " can disable this default behavior by" -#~ " `enabling memory growth " -#~ "`_." +#~ "Our TensorFlow examples are based on " +#~ "TensorFlow 2.0 or newer. So far, " +#~ "we provide the following examples." #~ msgstr "" +#~ "Nos exemples TensorFlow sont basés sur" +#~ " TensorFlow 2.0 ou une version plus" +#~ " récente. Jusqu'à présent, nous te " +#~ "proposons les exemples suivants." -#~ msgid "" -#~ "This would need to be done in " -#~ "the main process (which is where " -#~ "the server would run) and in each" -#~ " Actor created by the VCE. By " -#~ "means of :code:`actor_kwargs` we can " -#~ "pass the reserved key `\"on_actor_init_fn\"`" -#~ " in order to specify a function " -#~ "to be executed upon actor " -#~ "initialization. In this case, to enable" -#~ " GPU growth for TF workloads. It " -#~ "would look as follows:" -#~ msgstr "" +#~ msgid "Fashion-MNIST Image Classification" +#~ msgstr "Classification d'images Fashion-MNIST" #~ msgid "" -#~ "This is precisely the mechanism used " -#~ "in `Tensorflow/Keras Simulation " -#~ "`_ example." +#~ "`Fashion-MNIST `_ is often used as " +#~ "the \"Hello, world!\" of machine " +#~ "learning. We follow this tradition and" +#~ " provide an example which samples " +#~ "random local datasets from Fashion-MNIST" +#~ " and trains a simple image " +#~ "classification model over those partitions." #~ msgstr "" -#~ "`Quickstart TensorFlow (Code) " -#~ "`_" +#~ "nous suivons cette tradition et " +#~ "fournissons un exemple qui échantillonne " +#~ "des ensembles de données locales " +#~ "aléatoires de Fashion-MNIST et entraîne" +#~ " un modèle simple de classification " +#~ "d'images sur ces partitions." -#~ msgid "Multi-node setups" -#~ msgstr "" +#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" +#~ msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" + +#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" +#~ msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" #~ msgid "" -#~ "The VCE does not currently offer a" -#~ " way to control on which node a" -#~ " particular `virtual` client is executed." -#~ " In other words, if more than a" -#~ " single node have the resources " -#~ "needed by a client to run, then" -#~ " any of those nodes could get " -#~ "the client workload scheduled onto. " -#~ "Later in the FL process (i.e. in" -#~ " a different round) the same client" -#~ " could be executed by a different " -#~ "node. Depending on how your clients " -#~ "access their datasets, this might " -#~ "require either having a copy of " -#~ "all dataset partitions on all nodes " -#~ "or a dataset serving mechanism (e.g. " -#~ "using nfs, a database) to circumvent " -#~ "data duplication." +#~ "For more details, see " +#~ ":code:`src/py/flwr_example/tensorflow_fashion_mnist`." #~ msgstr "" +#~ "Pour plus de détails, voir " +#~ ":code:`src/py/flwr_example/tensorflow_fashion_mnist`." #~ msgid "" -#~ "By definition virtual clients are " -#~ "`stateless` due to their ephemeral " -#~ "nature. A client state can be " -#~ "implemented as part of the Flower " -#~ "client class but users need to " -#~ "ensure this saved to persistent storage" -#~ " (e.g. a database, disk) and that " -#~ "can be retrieve later by the same" -#~ " client regardless on which node it" -#~ " is running from. This is related " -#~ "to the point above also since, in" -#~ " some way, the client's dataset could" -#~ " be seen as a type of `state`." +#~ "MXNet is no longer maintained and " +#~ "has been moved into `Attic " +#~ "`_. As a " +#~ "result, we would encourage you to " +#~ "use other ML frameworks alongise Flower," +#~ " for example, PyTorch. This tutorial " +#~ "might be removed in future versions " +#~ "of Flower." #~ msgstr "" -#~ msgid "Save and load model checkpoints" -#~ msgstr "Sauvegarde et chargement des points de contrôle PyTorch" - #~ msgid "" -#~ "Flower does not automatically save model" -#~ " updates on the server-side. This " -#~ "how-to guide describes the steps " -#~ "to save (and load) model checkpoints " -#~ "in Flower." +#~ "Now that you have known how " +#~ "federated XGBoost work with Flower, it's" +#~ " time to run some more comprehensive" +#~ " experiments by customising the " +#~ "experimental settings. In the xgboost-" +#~ "comprehensive example (`full code " +#~ "`_), we provide more options " +#~ "to define various experimental setups, " +#~ "including aggregation strategies, data " +#~ "partitioning and centralised/distributed evaluation." +#~ " We also support `Flower simulation " +#~ "`_ making it easy to " +#~ "simulate large client cohorts in a " +#~ "resource-aware manner. Let's take a " +#~ "look!" #~ msgstr "" -#~ msgid "Legacy example guides" +#~ msgid "|31e4b1afa87c4b968327bbeafbf184d4|" #~ msgstr "" -#~ msgid "Contributor tutorials" -#~ msgstr "Configuration du contributeur" - -#~ msgid "Contributor explanations" -#~ msgstr "Explications" - -#~ msgid "Flower Framework Documentation" -#~ msgstr "Documentation de Flower" - -#~ msgid "PyTorch" -#~ msgstr "Exemples de PyTorch" - -#~ msgid "TensorFlow" -#~ msgstr "TensorFlow" - -#~ msgid "Flower CLI reference" -#~ msgstr "Client de Flower" +#~ msgid "|c9d935b4284e4c389a33d86b33e07c0a|" +#~ msgstr "" -#~ msgid "flwr (Python API reference)" -#~ msgstr "Référence pour l'API" +#~ msgid "|00727b5faffb468f84dd1b03ded88638|" +#~ msgstr "" -#~ msgid "Unreleased" -#~ msgstr "Inédit" +#~ msgid "|daf0cf0ff4c24fd29439af78416cf47b|" +#~ msgstr "" -#~ msgid "**Deprecate Python 3.7**" -#~ msgstr "**Deprecate Python 3.7**" +#~ msgid "|9f093007080d471d94ca90d3e9fde9b6|" +#~ msgstr "" -#~ msgid "" -#~ "Since Python 3.7 reached its end " -#~ "of life (EOL) on 2023-06-27, support " -#~ "for Python 3.7 is now deprecated " -#~ "and will be removed in an upcoming" -#~ " release." +#~ msgid "|46a26e6150e0479fbd3dfd655f36eb13|" #~ msgstr "" -#~ "Étant donné que Python 3.7 a " -#~ "atteint sa fin de vie (EOL) le " -#~ "2023-06-27, la prise en charge de " -#~ "Python 3.7 est désormais dépréciée et" -#~ " sera supprimée dans une prochaine " -#~ "version." -#~ msgid "" -#~ "**Add new** `FedTrimmedAvg` **strategy** " -#~ "([#1769](https://github.com/adap/flower/pull/1769), " -#~ "[#1853](https://github.com/adap/flower/pull/1853))" +#~ msgid "|3daba297595c4c7fb845d90404a6179a|" #~ msgstr "" -#~ "**Ajouter un nouveau** `FedTrimmedAvg` " -#~ "**stratégie** " -#~ "([#1769](https://github.com/adap/flower/pull/1769), " -#~ "[#1853](https://github.com/adap/flower/pull/1853))" -#~ msgid "" -#~ "The new `FedTrimmedAvg` strategy implements" -#~ " Trimmed Mean by [Dong Yin, " -#~ "2018](https://arxiv.org/abs/1803.01498)" +#~ msgid "|5769874fa9c4455b80b2efda850d39d7|" #~ msgstr "" -#~ "La nouvelle stratégie `FedTrimmedAvg` met " -#~ "en œuvre la moyenne trimmée par " -#~ "[Dong Yin, 2018](https://arxiv.org/abs/1803.01498)" -#~ msgid "" -#~ "**Add parameter aggregation to** `mt-" -#~ "pytorch` **code example** " -#~ "([#1785](https://github.com/adap/flower/pull/1785))" +#~ msgid "|ba47ffb421814b0f8f9fa5719093d839|" #~ msgstr "" -#~ "**Ajouter l'agrégation des paramètres à** " -#~ "`mt-pytorch` **exemple de code** " -#~ "([#1785](https://github.com/adap/flower/pull/1785))" -#~ msgid "" -#~ "The `mt-pytorch` example shows how " -#~ "to aggregate parameters when writing a" -#~ " driver script. The included `driver.py`" -#~ " and `server.py` have been aligned to" -#~ " demonstrate both the low-level way" -#~ " and the high-level way of " -#~ "building server-side logic." +#~ msgid "|aeac5bf79cbf497082e979834717e01b|" #~ msgstr "" -#~ "L'exemple `mt-pytorch` montre comment " -#~ "agréger des paramètres lors de " -#~ "l'écriture d'un script de pilote. Les" -#~ " fichiers `driver.py` et `server.py` inclus" -#~ " ont été alignés pour démontrer à " -#~ "la fois la manière de bas niveau" -#~ " et la manière de haut niveau " -#~ "de construire la logique côté serveur." -#~ msgid "" -#~ "**Introduce (experimental) gRPC request-" -#~ "response API** " -#~ "([#1867](https://github.com/adap/flower/pull/1867), " -#~ "[#1901](https://github.com/adap/flower/pull/1901))" +#~ msgid "|ce27ed4bbe95459dba016afc42486ba2|" #~ msgstr "" -#~ "**Introduire l'API demande-réponse gRPC " -#~ "(expérimentale)** " -#~ "([#1867](https://github.com/adap/flower/pull/1867), " -#~ "[#1901](https://github.com/adap/flower/pull/1901))" -#~ msgid "" -#~ "In addition to the existing gRPC " -#~ "API (based on bidirectional streaming) " -#~ "and the experimental REST API, there " -#~ "is now a new gRPC API that " -#~ "uses a request-response model to " -#~ "communicate with client nodes." +#~ msgid "|ae94a7f71dda443cbec2385751427d41|" #~ msgstr "" -#~ "En plus de l'API gRPC existante " -#~ "(basée sur un flux bidirectionnel) et" -#~ " de l'API REST expérimentale, il " -#~ "existe désormais une nouvelle API gRPC" -#~ " qui utilise un modèle demande-" -#~ "réponse pour communiquer avec les nœuds" -#~ " clients." -#~ msgid "" -#~ "Please note: The gRPC request-response" -#~ " API is still experimental and will" -#~ " likely change significantly over time." +#~ msgid "|e61fce4d43d243e7bb08bdde97d81ce6|" #~ msgstr "" -#~ "Remarque : l'API requête-réponse gRPC" -#~ " est encore expérimentale et est " -#~ "susceptible de changer de manière " -#~ "significative au fil du temps." -#~ msgid "" -#~ "**Replace the eperimental** " -#~ "`start_client(rest=True)` **with the new** " -#~ "`start_client(transport=\"rest\")` " -#~ "([#1880](https://github.com/adap/flower/pull/1880))" +#~ msgid "|08cb60859b07461588fe44e55810b050|" #~ msgstr "" -#~ "**Remplacez le fichier expérimental** " -#~ "`start_client(rest=True) **par le nouveau** " -#~ "`start_client(transport=\"rest\")` " -#~ "([#1880](https://github.com/adap/flower/pull/1880))" #~ msgid "" -#~ "The (experimental) `start_client` argument " -#~ "`rest` was deprecated in favor of " -#~ "a new argument `transport`. " -#~ "`start_client(transport=\"rest\")` will yield the" -#~ " same behaviour as `start_client(rest=True)` " -#~ "did before. All code should migrate " -#~ "to the new argument `transport`. The " -#~ "deprecated argument `rest` will be " -#~ "removed in a future release." +#~ "Flower provides pre-made docker images" +#~ " on `Docker Hub " +#~ "`_ that include" +#~ " all necessary dependencies for running " +#~ "the server. You can also build " +#~ "your own custom docker images from " +#~ "scratch with a different version of " +#~ "Python or Ubuntu if that is what" +#~ " you need. In this guide, we " +#~ "will explain what images exist and " +#~ "how to build them locally." #~ msgstr "" #~ msgid "" -#~ "**Migrate experimental REST API to " -#~ "Starlette** ([2171](https://github.com/adap/flower/pull/2171))" +#~ "Currently, Flower provides two images, a" +#~ " base image and a server image. " +#~ "There will also be a client image" +#~ " soon. The base image, as the " +#~ "name suggests, contains basic dependencies " +#~ "that both the server and the " +#~ "client need. This includes system " +#~ "dependencies, Python and Python tools. " +#~ "The server image is based on the" +#~ " base image, but it additionally " +#~ "installs the Flower server using " +#~ "``pip``." #~ msgstr "" -#~ "**Migrer l'API REST expérimentale vers " -#~ "Starlette** ([2171](https://github.com/adap/flower/pull/2171))" #~ msgid "" -#~ "The (experimental) REST API used to " -#~ "be implemented in " -#~ "[FastAPI](https://fastapi.tiangolo.com/), but it has" -#~ " now been migrated to use " -#~ "[Starlette](https://www.starlette.io/) directly." +#~ "Both, base and server image are " +#~ "configured via build arguments. Through " +#~ "build arguments, we can make our " +#~ "build more flexible. For example, in " +#~ "the base image, we can specify the" +#~ " version of Python to install using" +#~ " the ``PYTHON_VERSION`` build argument. " +#~ "Some of the build arguments have " +#~ "default values, others must be specified" +#~ " when building the image. All " +#~ "available build arguments for each image" +#~ " are listed in one of the " +#~ "tables below." #~ msgstr "" -#~ "L'API REST (expérimentale) était auparavant" -#~ " implémentée dans " -#~ "[FastAPI](https://fastapi.tiangolo.com/), mais elle " -#~ "a maintenant été migrée pour utiliser" -#~ " directement [Starlette](https://www.starlette.io/)." -#~ msgid "" -#~ "**Add a new gRPC option** " -#~ "([#2197](https://github.com/adap/flower/pull/2197))" +#~ msgid "Defaults to ``flwr/server``." #~ msgstr "" -#~ "**Ajouter une nouvelle option gRPC** " -#~ "([#2197](https://github.com/adap/flower/pull/2197))" -#~ msgid "" -#~ "We now start a gRPC server with" -#~ " the `grpc.keepalive_permit_without_calls` option " -#~ "set to 0 by default. This prevents" -#~ " the clients from sending keepalive " -#~ "pings when there is no outstanding " -#~ "stream." +#~ msgid "``BASE_IMAGE_TAG``" #~ msgstr "" -#~ "Nous démarrons maintenant un serveur " -#~ "gRPC avec l'option " -#~ "`grpc.keepalive_permit_without_calls` réglée sur 0" -#~ " par défaut, ce qui empêche les " -#~ "clients d'envoyer des pings de maintien" -#~ " lorsqu'il n'y a pas de flux en" -#~ " attente." -#~ msgid "" -#~ "**General improvements** " -#~ "([#1872](https://github.com/adap/flower/pull/1872), " -#~ "[#1866](https://github.com/adap/flower/pull/1866), " -#~ "[#1884](https://github.com/adap/flower/pull/1884))" +#~ msgid "The image tag of the base image." #~ msgstr "" -#~ "**Mettre à jour les exemples de " -#~ "code** ([#1291](https://github.com/adap/flower/pull/1291), " -#~ "[#1286](https://github.com/adap/flower/pull/1286), " -#~ "[#1282](https://github.com/adap/flower/pull/1282))" -#~ msgid "Example projects" -#~ msgstr "Exemples" +#~ msgid "Defaults to ``py3.11-ubuntu22.04``." +#~ msgstr "" #~ msgid "" -#~ "`Flower simulation PyTorch " -#~ "`_" +#~ "The following example creates a server" +#~ " image with the official Flower base" +#~ " image py3.11-ubuntu22.04 and Flower 1.7.0:" #~ msgstr "" -#~ "`Flower Quickstart (TensorFlow/Keras) " -#~ "`_" #~ msgid "" -#~ "`Android Kotlin example " -#~ "`_" +#~ "The name of image is ``flwr_server`` " +#~ "and the tag ``0.1.0``. Remember that " +#~ "the build arguments as well as the" +#~ " name and tag can be adapted to" +#~ " your needs. These values serve as" +#~ " examples only." #~ msgstr "" -#~ msgid "`Android Java example `_" +#~ msgid "" +#~ "If you want to use your own " +#~ "base image instead of the official " +#~ "Flower base image, all you need to" +#~ " do is set the ``BASE_REPOSITORY`` " +#~ "and ``BASE_IMAGE_TAG`` build arguments. The" +#~ " value of ``BASE_REPOSITORY`` must match" +#~ " the name of your image and the" +#~ " value of ``BASE_IMAGE_TAG`` must match " +#~ "the tag of your image." #~ msgstr "" -#~ msgid "Build a strategy from scratch" -#~ msgstr "Élaborer une stratégie à partir de zéro" - -#~ msgid "Customize the client" -#~ msgstr "Création du client IMDBC" - -#~ msgid "Get started with Flower" +#~ msgid "" +#~ "It is important to follow the " +#~ "instructions described in comments. For " +#~ "instance, in order to not break " +#~ "how our changelog system works, you " +#~ "should read the information above the" +#~ " ``Changelog entry`` section carefully. You" +#~ " can also checkout some examples and" +#~ " details in the :ref:`changelogentry` " +#~ "appendix." #~ msgstr "" -#~ msgid "Quickstart Android" -#~ msgstr "Démarrage rapide d'Android" +#~ msgid "Open a PR (as shown above)" +#~ msgstr "Ouvre un RP (comme indiqué ci-dessus)" #~ msgid "" -#~ "Let's build a federated learning system" -#~ " using TFLite and Flower on Android!" -#~ msgstr "" -#~ "Construisons un système d'apprentissage fédéré" -#~ " en utilisant TFLite et Flower sur" -#~ " Android !" +#~ "Add CI job to deploy the staging" +#~ " system when the ``main`` branch " +#~ "changes" +#~ msgstr "Add CI job to deploy the staging system when the `main` branch changes" + +#~ msgid "Changelog entry" +#~ msgstr "Changelog" #~ msgid "" -#~ "Please refer to the `full code " -#~ "example " -#~ "`_ to" -#~ " learn more." +#~ "When opening a new PR, inside its" +#~ " description, there should be a " +#~ "``Changelog entry`` header." #~ msgstr "" -#~ "Réfère-toi à l'exemple de code " -#~ "complet " -#~ "`_ " -#~ "pour en savoir plus." - -#~ msgid "Quickstart iOS" -#~ msgstr "Démarrage rapide iOS" #~ msgid "" -#~ "In this tutorial we will learn how" -#~ " to train a Neural Network on " -#~ "MNIST using Flower and CoreML on " -#~ "iOS devices." +#~ "Above this header you should see " +#~ "the following comment that explains how" +#~ " to write your changelog entry:" #~ msgstr "" -#~ "Dans ce tutoriel, nous allons apprendre" -#~ " à former un réseau neuronal sur " -#~ "MNIST en utilisant Flower et CoreML " -#~ "sur les appareils iOS." #~ msgid "" -#~ "First of all, for running the " -#~ "Flower Python server, it is recommended" -#~ " to create a virtual environment and" -#~ " run everything within a `virtualenv " -#~ "`_. " -#~ "For the Flower client implementation in" -#~ " iOS, it is recommended to use " -#~ "Xcode as our IDE." +#~ "Inside the following 'Changelog entry' " +#~ "section, you should put the description" +#~ " of your changes that will be " +#~ "added to the changelog alongside your" +#~ " PR title." #~ msgstr "" -#~ "Tout d'abord, pour l'exécution du " -#~ "serveur Flower Python, il est recommandé" -#~ " de créer un environnement virtuel et" -#~ " de tout exécuter au sein d'un " -#~ "`virtualenv `_. Pour l'implémentation du client" -#~ " Flower dans iOS, il est recommandé" -#~ " d'utiliser Xcode comme notre IDE." #~ msgid "" -#~ "Our example consists of one Python " -#~ "*server* and two iPhone *clients* that" -#~ " all have the same model." +#~ "If the section is completely empty " +#~ "(without any token) or non-existent, " +#~ "the changelog will just contain the " +#~ "title of the PR for the changelog" +#~ " entry, without any description." #~ msgstr "" -#~ "Notre exemple se compose d'un *serveur*" -#~ " Python et de deux *clients* iPhone" -#~ " qui ont tous le même modèle." #~ msgid "" -#~ "*Clients* are responsible for generating " -#~ "individual weight updates for the model" -#~ " based on their local datasets. These" -#~ " updates are then sent to the " -#~ "*server* which will aggregate them to" -#~ " produce a better model. Finally, the" -#~ " *server* sends this improved version " -#~ "of the model back to each " -#~ "*client*. A complete cycle of weight " -#~ "updates is called a *round*." +#~ "If the section contains some text " +#~ "other than tokens, it will use it" +#~ " to add a description to the " +#~ "change." #~ msgstr "" -#~ "*Les clients* sont chargés de générer" -#~ " des mises à jour de poids " -#~ "individuelles pour le modèle en fonction" -#~ " de leurs ensembles de données " -#~ "locaux. Ces mises à jour sont " -#~ "ensuite envoyées au *serveur* qui les" -#~ " agrège pour produire un meilleur " -#~ "modèle. Enfin, le *serveur* renvoie " -#~ "cette version améliorée du modèle à " -#~ "chaque *client*. Un cycle complet de " -#~ "mises à jour de poids s'appelle un" -#~ " *round*." #~ msgid "" -#~ "Now that we have a rough idea " -#~ "of what is going on, let's get " -#~ "started to setup our Flower server " -#~ "environment. We first need to install" -#~ " Flower. You can do this by " -#~ "using pip:" +#~ "If the section contains one of the" +#~ " following tokens it will ignore any" +#~ " other text and put the PR " +#~ "under the corresponding section of the" +#~ " changelog:" #~ msgstr "" -#~ "Maintenant que nous avons une idée " -#~ "approximative de ce qui se passe, " -#~ "commençons à configurer notre environnement" -#~ " de serveur Flower. Nous devons " -#~ "d'abord installer Flower, ce que tu " -#~ "peux faire à l'aide de pip :" - -#~ msgid "Or Poetry:" -#~ msgstr "Ou de la poésie :" -#~ msgid "" -#~ "Now that we have all our " -#~ "dependencies installed, let's run a " -#~ "simple distributed training using CoreML " -#~ "as our local training pipeline and " -#~ "MNIST as our dataset. For simplicity " -#~ "reasons we will use the complete " -#~ "Flower client with CoreML, that has " -#~ "been implemented and stored inside the" -#~ " Swift SDK. The client implementation " -#~ "can be seen below:" +#~ msgid " is for classifying a PR as a general improvement." #~ msgstr "" -#~ "Maintenant que toutes nos dépendances " -#~ "sont installées, exécutons une simple " -#~ "formation distribuée en utilisant CoreML " -#~ "comme pipeline de formation local et " -#~ "MNIST comme ensemble de données. Pour" -#~ " des raisons de simplicité, nous " -#~ "utiliserons le client Flower complet " -#~ "avec CoreML, qui a été mis en " -#~ "œuvre et stocké à l'intérieur du " -#~ "SDK Swift. La mise en œuvre du " -#~ "client peut être vue ci-dessous :" -#~ msgid "" -#~ "Let's create a new application project" -#~ " in Xcode and add :code:`flwr` as " -#~ "a dependency in your project. For " -#~ "our application, we will store the " -#~ "logic of our app in " -#~ ":code:`FLiOSModel.swift` and the UI elements" -#~ " in :code:`ContentView.swift`. We will " -#~ "focus more on :code:`FLiOSModel.swift` in " -#~ "this quickstart. Please refer to the " -#~ "`full code example " -#~ "`_ to " -#~ "learn more about the app." +#~ msgid " is to not add the PR to the changelog" #~ msgstr "" -#~ "Créons un nouveau projet d'application " -#~ "dans Xcode et ajoutons :code:`flwr` " -#~ "comme dépendance dans ton projet. Pour" -#~ " notre application, nous stockerons la " -#~ "logique de notre application dans " -#~ ":code:`FLiOSModel.swift` et les éléments de" -#~ " l'interface utilisateur dans " -#~ ":code:`ContentView.swift`.Nous nous concentrerons " -#~ "davantage sur :code:`FLiOSModel.swift` dans ce" -#~ " quickstart. N'hésite pas à te " -#~ "référer à l'`exemple de code complet " -#~ "`_ pour" -#~ " en savoir plus sur l'application." -#~ msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" +#~ msgid " is to add a general baselines change to the PR" #~ msgstr "" -#~ "Importe les paquets liés à Flower " -#~ "et CoreML dans :code:`FLiOSModel.swift` :" -#~ msgid "" -#~ "Then add the mlmodel to the " -#~ "project simply by drag-and-drop, " -#~ "the mlmodel will be bundled inside " -#~ "the application during deployment to " -#~ "your iOS device. We need to pass" -#~ " the url to access mlmodel and " -#~ "run CoreML machine learning processes, " -#~ "it can be retrieved by calling the" -#~ " function :code:`Bundle.main.url`. For the " -#~ "MNIST dataset, we need to preprocess " -#~ "it into :code:`MLBatchProvider` object. The" -#~ " preprocessing is done inside " -#~ ":code:`DataLoader.swift`." +#~ msgid " is to add a general examples change to the PR" #~ msgstr "" -#~ "Ensuite, ajoute le mlmodel au projet " -#~ "simplement par glisser-déposer, le " -#~ "mlmodel sera regroupé à l'intérieur de" -#~ " l'application lors du déploiement sur " -#~ "ton appareil iOS. Nous devons passer " -#~ "l'url pour accéder au mlmodel et " -#~ "exécuter les processus d'apprentissage " -#~ "automatique CoreML, elle peut être " -#~ "récupérée en appelant la fonction " -#~ ":code:`Bundle.main.url`. Pour l'ensemble de " -#~ "données MNIST, nous devons le prétraiter" -#~ " dans l'objet :code:`MLBatchProvider`. Le " -#~ "prétraitement est effectué à l'intérieur " -#~ "de :code:`DataLoader.swift`." -#~ msgid "" -#~ "Since CoreML does not allow the " -#~ "model parameters to be seen before " -#~ "training, and accessing the model " -#~ "parameters during or after the training" -#~ " can only be done by specifying " -#~ "the layer name, we need to know" -#~ " this informations beforehand, through " -#~ "looking at the model specification, " -#~ "which are written as proto files. " -#~ "The implementation can be seen in " -#~ ":code:`MLModelInspect`." +#~ msgid " is to add a general sdk change to the PR" #~ msgstr "" -#~ msgid "" -#~ "After we have all of the necessary" -#~ " informations, let's create our Flower " -#~ "client." +#~ msgid " is to add a general simulations change to the PR" #~ msgstr "" -#~ msgid "" -#~ "Then start the Flower gRPC client " -#~ "and start communicating to the server" -#~ " by passing our Flower client to " -#~ "the function :code:`startFlwrGRPC`." +#~ msgid "Note that only one token should be used." #~ msgstr "" -#~ "Lance ensuite le client Flower gRPC " -#~ "et commence à communiquer avec le " -#~ "serveur en passant notre client Flower" -#~ " à la fonction :code:`startFlwrGRPC`." #~ msgid "" -#~ "That's it for the client. We only" -#~ " have to implement :code:`Client` or " -#~ "call the provided :code:`MLFlwrClient` and " -#~ "call :code:`startFlwrGRPC()`. The attribute " -#~ ":code:`hostname` and :code:`port` tells the" -#~ " client which server to connect to." -#~ " This can be done by entering " -#~ "the hostname and port in the " -#~ "application before clicking the start " -#~ "button to start the federated learning" -#~ " process." +#~ "Its content must have a specific " +#~ "format. We will break down what " +#~ "each possibility does:" #~ msgstr "" -#~ "C'est tout pour le client. Il nous" -#~ " suffit d'implémenter :code:`Client` ou " -#~ "d'appeler le :code:`MLFlwrClient` fourni et" -#~ " d'appeler :code:`startFlwrGRPC()`. L'attribut " -#~ ":code:`hostname` et :code:`port` indique au" -#~ " client à quel serveur se connecter." -#~ " Pour ce faire, il suffit d'entrer" -#~ " le nom d'hôte et le port dans" -#~ " l'application avant de cliquer sur " -#~ "le bouton de démarrage pour lancer " -#~ "le processus d'apprentissage fédéré." #~ msgid "" -#~ "Once the server is running we can" -#~ " start the clients in different " -#~ "terminals. Build and run the client " -#~ "through your Xcode, one through Xcode" -#~ " Simulator and the other by deploying" -#~ " it to your iPhone. To see more" -#~ " about how to deploy your app " -#~ "to iPhone or Simulator visit `here " -#~ "`_." +#~ "If the ``### Changelog entry`` section" +#~ " contains nothing or doesn't exist, " +#~ "the following text will be added " +#~ "to the changelog::" #~ msgstr "" -#~ "Une fois que le serveur fonctionne, " -#~ "nous pouvons démarrer les clients dans" -#~ " différents terminaux. Construis et exécute" -#~ " le client grâce à ton Xcode, " -#~ "l'un via le simulateur Xcode et " -#~ "l'autre en le déployant sur ton " -#~ "iPhone. Pour en savoir plus sur la" -#~ " façon de déployer ton application " -#~ "sur l'iPhone ou le simulateur, visite" -#~ " `ici `_." #~ msgid "" -#~ "Congratulations! You've successfully built and" -#~ " run your first federated learning " -#~ "system in your ios device. The " -#~ "full `source code " -#~ "`_ for" -#~ " this example can be found in " -#~ ":code:`examples/ios`." +#~ "If the ``### Changelog entry`` section" +#~ " contains a description (and no " +#~ "token), the following text will be " +#~ "added to the changelog::" #~ msgstr "" -#~ "Félicitations ! Tu as réussi à " -#~ "construire et à faire fonctionner ton" -#~ " premier système d'apprentissage fédéré " -#~ "dans ton appareil ios. Le `code " -#~ "source complet " -#~ "`_ de " -#~ "cet exemple se trouve dans " -#~ ":code:`examples/ios`." #~ msgid "" -#~ "`Star Flower on GitHub " -#~ "`__ ⭐️ and join " -#~ "the open-source Flower community on " -#~ "Slack to connect, ask questions, and " -#~ "get help: `Join Slack `__ 🌼 We'd love to hear" -#~ " from you in the ``#introductions`` " -#~ "channel! And if anything is unclear, " -#~ "head over to the ``#questions`` channel." +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, nothing will change" +#~ " in the changelog." #~ msgstr "" -#~ "`Star Flower on GitHub " -#~ "`__ ⭐️ et rejoignez" -#~ " la communauté open-source Flower sur" -#~ " Slack pour vous connecter, poser des" -#~ " questions et obtenir de l'aide : " -#~ "`Join Slack `__ " -#~ "🌼 Nous serions ravis d'avoir de " -#~ "vos nouvelles dans le canal " -#~ "``#introductions`` ! Et si quelque chose" -#~ " n'est pas clair, dirigez-vous vers" -#~ " le canal ``#questions``." -#~ msgid "|bd48315a61c14495babefe3c7918b493|" +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following text" +#~ " will be added to the changelog::" #~ msgstr "" -#~ msgid "|c00d9e5b0d324d96b86da8a78b05b14b|" +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following " +#~ "text will be added to the " +#~ "changelog::" #~ msgstr "" -#~ msgid "|faae2ee10f4149c9907563c4f48ec6ea|" +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following " +#~ "text will be added to the " +#~ "changelog::" #~ msgstr "" -#~ msgid "|13a655510351455292f145a61d6c15d6|" +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following text " +#~ "will be added to the changelog::" #~ msgstr "" -#~ msgid "|13949884182846e3a91433190a936ba9|" +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following " +#~ "text will be added to the " +#~ "changelog::" #~ msgstr "" -#~ msgid "|9bf26cc650b146e88b4745df040ece37|" +#~ msgid "" +#~ "Note that only one token must be" +#~ " provided, otherwise, only the first " +#~ "action (in the order listed above), " +#~ "will be performed." #~ msgstr "" -#~ msgid "|1590915480fc41708bd43e48af9582f9|" +#~ msgid "Example: MXNet - Run MXNet Federated" +#~ msgstr "Exemple : MXNet - Exécuter MXNet Federated" + +#~ msgid "" +#~ "This tutorial will show you how to" +#~ " use Flower to build a federated " +#~ "version of an existing MXNet workload." +#~ " We are using MXNet to train a" +#~ " Sequential model on the MNIST " +#~ "dataset. We will structure the example" +#~ " similar to our `PyTorch - From " +#~ "Centralized To Federated " +#~ "`_ walkthrough. " +#~ "MXNet and PyTorch are very similar " +#~ "and a very good comparison between " +#~ "MXNet and PyTorch is given `here " +#~ "`_. First, " +#~ "we build a centralized training approach" +#~ " based on the `Handwritten Digit " +#~ "Recognition " +#~ "`_" +#~ " tutorial. Then, we build upon the" +#~ " centralized training code to run the" +#~ " training in a federated fashion." #~ msgstr "" +#~ "Ce tutoriel te montrera comment utiliser" +#~ " Flower pour construire une version " +#~ "fédérée d'une charge de travail MXNet" +#~ " existante. Nous utilisons MXNet pour " +#~ "former un modèle séquentiel sur " +#~ "l'ensemble de données MNIST. Nous " +#~ "structurerons l'exemple de la même " +#~ "manière que notre présentation `PyTorch " +#~ "- De la centralisation à la " +#~ "fédération `_. " +#~ "MXNet et PyTorch sont très similaires" +#~ " et une très bonne comparaison entre" +#~ " MXNet et PyTorch est donnée ici " +#~ "`_. Tout " +#~ "d'abord, nous construisons une approche " +#~ "de formation centralisée basée sur le" +#~ " tutoriel `Handandwritten Digit Recognition " +#~ "`_." +#~ " Ensuite, nous nous basons sur le " +#~ "code de formation centralisé pour " +#~ "exécuter la formation de manière " +#~ "fédérée." -#~ msgid "|e5ee96d702b64256b97b8ca99db10787|" +#~ msgid "" +#~ "Before we start setting up our " +#~ "MXNet example, we install the " +#~ ":code:`mxnet` and :code:`flwr` packages:" #~ msgstr "" +#~ "Avant de commencer à configurer notre" +#~ " exemple MXNet, nous installons les " +#~ "paquets :code:`mxnet` et :code:`flwr` :" -#~ msgid "|84840b244edd47c481278ce534c126cd|" +#~ msgid "MNIST Training with MXNet" +#~ msgstr "Formation MNIST avec MXNet" + +#~ msgid "" +#~ "We begin with a brief description " +#~ "of the centralized training code based" +#~ " on a :code:`Sequential` model. If " +#~ "you want a more in-depth " +#~ "explanation of what's going on then " +#~ "have a look at the official `MXNet" +#~ " tutorial " +#~ "`_." #~ msgstr "" +#~ "Nous commençons par une brève " +#~ "description du code d'entraînement centralisé" +#~ " basé sur un modèle :code:`Sequential`. " +#~ "Si tu veux une explication plus " +#~ "approfondie de ce qui se passe, " +#~ "jette un coup d'œil au tutoriel " +#~ "officiel `MXNet " +#~ "`_." -#~ msgid "|f33f5ebb3a844a2ba54bb6be3571b172|" +#~ msgid "" +#~ "Let's create a new file " +#~ "called:code:`mxnet_mnist.py` with all the " +#~ "components required for a traditional " +#~ "(centralized) MNIST training. First, the " +#~ "MXNet package :code:`mxnet` needs to be" +#~ " imported. You can see that we " +#~ "do not yet import the :code:`flwr` " +#~ "package for federated learning. This " +#~ "will be done later." #~ msgstr "" +#~ "Créons un nouveau fichier appelé " +#~ ":code:`mxnet_mnist.py` avec tous les " +#~ "composants requis pour un apprentissage " +#~ "MNIST traditionnel (centralisé). Tout d'abord," +#~ " le package MXNet :code:`mxnet` doit " +#~ "être importé. Tu peux voir que " +#~ "nous n'avons pas encore importé le " +#~ "package :code:`flwr` pour l'apprentissage " +#~ "fédéré. Cela sera fait plus tard." -#~ msgid "|5645db4ba9c945518d51ff234f35c797|" +#~ msgid "" +#~ "The :code:`load_data()` function loads the " +#~ "MNIST training and test sets." #~ msgstr "" +#~ "La fonction :code:`load_data()` charge les " +#~ "ensembles d'entraînement et de test " +#~ "MNIST." -#~ msgid "|317af8d28fcc479ab981047d058c4751|" +#~ msgid "" +#~ "As already mentioned, we will use " +#~ "the MNIST dataset for this machine " +#~ "learning workload. The model architecture " +#~ "(a very simple :code:`Sequential` model) " +#~ "is defined in :code:`model()`." #~ msgstr "" +#~ "Comme nous l'avons déjà mentionné, nous" +#~ " utiliserons l'ensemble de données MNIST" +#~ " pour cette charge de travail " +#~ "d'apprentissage automatique. L'architecture du " +#~ "modèle (un modèle :code:`Séquentiel` très " +#~ "simple) est définie dans :code:`model()`." -#~ msgid "|8bfd0e697a494d5385662debafade6bf|" +#~ msgid "" +#~ "We now need to define the training" +#~ " (function :code:`train()`) which loops " +#~ "over the training set and measures " +#~ "the loss for each batch of " +#~ "training examples." #~ msgstr "" +#~ "Nous devons maintenant définir la " +#~ "formation (fonction :code:`train()`) qui passe" +#~ " en boucle sur l'ensemble de la " +#~ "formation et mesure la perte pour " +#~ "chaque lot d'exemples de formation." #~ msgid "" -#~ "Differential privacy (DP) is often " -#~ "mentioned in the context of Federated" -#~ " Learning. It is a privacy-preserving" -#~ " method used when analyzing and " -#~ "sharing statistical data, ensuring the " -#~ "privacy of individual participants. DP " -#~ "achieves this by adding statistical " -#~ "noise to the model updates, ensuring " -#~ "any individual participants’ information " -#~ "cannot be distinguished or re-" -#~ "identified. This technique can be " -#~ "considered an optimization that provides " -#~ "a quantifiable privacy protection measure." +#~ "The evaluation of the model is " +#~ "defined in function :code:`test()`. The " +#~ "function loops over all test samples " +#~ "and measures the loss and accuracy " +#~ "of the model based on the test " +#~ "dataset." #~ msgstr "" -#~ "La confidentialité différentielle (DP) est " -#~ "souvent mentionnée dans le contexte de" -#~ " l'apprentissage fédéré. Il s'agit d'une" -#~ " méthode de préservation de la vie" -#~ " privée utilisée lors de l'analyse et" -#~ " du partage de données statistiques, " -#~ "garantissant la confidentialité des " -#~ "participants individuels. La DP y " -#~ "parvient en ajoutant un bruit " -#~ "statistique aux mises à jour du " -#~ "modèle, garantissant que toute information " -#~ "sur les participants individuels ne peut" -#~ " être distinguée ou réidentifiée. Cette " -#~ "technique peut être considérée comme une" -#~ " optimisation qui fournit une mesure " -#~ "quantifiable de protection de la vie " -#~ "privée." +#~ "L'évaluation du modèle est définie dans" +#~ " la fonction :code:`test()`. Cette fonction" +#~ " passe en boucle sur tous les " +#~ "échantillons de test et mesure la " +#~ "perte et la précision du modèle en" +#~ " fonction de l'ensemble des données " +#~ "de test." -#~ msgid "|e5dc001d27ad460caeab669e957b3c36|" +#~ msgid "" +#~ "Having defined the data loading, model" +#~ " architecture, training, and evaluation we" +#~ " can put everything together and " +#~ "train our model on MNIST. Note " +#~ "that the GPU/CPU device for the " +#~ "training and testing is defined within" +#~ " the :code:`ctx` (context)." #~ msgstr "" +#~ "Après avoir défini le chargement des " +#~ "données, l'architecture du modèle, " +#~ "l'entraînement et l'évaluation, nous pouvons" +#~ " tout assembler et entraîner notre " +#~ "modèle sur MNIST. Note que le " +#~ "dispositif GPU/CPU pour l'entraînement et " +#~ "le test est défini dans le " +#~ ":code:`ctx` (contexte)." -#~ msgid "API Reference - Flower binaries" +#~ msgid "You can now run your (centralized) MXNet machine learning workload:" +#~ msgstr "" +#~ "Tu peux maintenant exécuter ta charge" +#~ " de travail (centralisée) d'apprentissage " +#~ "automatique MXNet :" + +#~ msgid "" +#~ "So far this should all look fairly" +#~ " familiar if you've used MXNet (or" +#~ " even PyTorch) before. Let's take the" +#~ " next step and use what we've " +#~ "built to create a simple federated " +#~ "learning system consisting of one server" +#~ " and two clients." #~ msgstr "" +#~ "Jusqu'à présent, tout cela devrait te" +#~ " sembler assez familier si tu as " +#~ "déjà utilisé MXNet (ou même PyTorch)." +#~ " Passons à l'étape suivante et " +#~ "utilisons ce que nous avons construit" +#~ " pour créer un simple système " +#~ "d'apprentissage fédéré composé d'un serveur" +#~ " et de deux clients." -#~ msgid "API Reference - flwr" -#~ msgstr "Référence pour l'API" +#~ msgid "MXNet meets Flower" +#~ msgstr "MXNet rencontre Flower" #~ msgid "" -#~ "Defines whether or not the client " -#~ "is interacting with the server using " -#~ "the experimental REST API. This feature" -#~ " is experimental, it might change " -#~ "considerably in future versions of " -#~ "Flower." -#~ msgstr "" - -#~ msgid "Returns a client's set of properties." +#~ "So far, it was not easily possible" +#~ " to use MXNet workloads for federated" +#~ " learning because federated learning is " +#~ "not supported in MXNet. Since Flower " +#~ "is fully agnostic towards the underlying" +#~ " machine learning framework, it can " +#~ "be used to federated arbitrary machine" +#~ " learning workloads. This section will " +#~ "show you how Flower can be used" +#~ " to federate our centralized MXNet " +#~ "workload." #~ msgstr "" +#~ "Jusqu'à présent, il n'était pas facile" +#~ " d'utiliser les charges de travail " +#~ "MXNet pour l'apprentissage fédéré car " +#~ "l'apprentissage fédéré n'est pas pris en" +#~ " charge dans MXNet. Comme Flower est" +#~ " totalement agnostique vis-à-vis du cadre" +#~ " d'apprentissage automatique sous-jacent, " +#~ "il peut être utilisé pour fédérer " +#~ "des charges de travail d'apprentissage " +#~ "automatique arbitraires. Cette section te " +#~ "montrera comment Flower peut être " +#~ "utilisé pour fédérer notre charge de " +#~ "travail MXNet centralisée." #~ msgid "" -#~ "Defines whether or not the client " -#~ "is interacting with the server using " -#~ "the experimental REST API. This feature" -#~ " is experimental, it might be change" -#~ " considerably in future versions of " -#~ "Flower." +#~ "The concept to federate an existing " +#~ "workload is always the same and " +#~ "easy to understand. We have to " +#~ "start a *server* and then use the" +#~ " code in :code:`mxnet_mnist.py` for the " +#~ "*clients* that are connected to the " +#~ "*server*. The *server* sends model " +#~ "parameters to the clients. The *clients*" +#~ " run the training and update the " +#~ "parameters. The updated parameters are " +#~ "sent back to the *server* which " +#~ "averages all received parameter updates. " +#~ "This describes one round of the " +#~ "federated learning process and we repeat" +#~ " this for multiple rounds." #~ msgstr "" +#~ "Le concept pour fédérer une charge " +#~ "de travail existante est toujours le " +#~ "même et facile à comprendre. Nous " +#~ "devons démarrer un *serveur* et ensuite" +#~ " utiliser le code dans " +#~ ":code:`mxnet_mnist.py` pour les *clients* qui" +#~ " sont connectés au *serveur*. Le " +#~ "*serveur* envoie les paramètres du " +#~ "modèle aux clients. Les *clients* " +#~ "exécutent la formation et mettent à " +#~ "jour les paramètres. Les paramètres mis" +#~ " à jour sont renvoyés au *serveur*" +#~ " qui fait la moyenne de toutes " +#~ "les mises à jour de paramètres " +#~ "reçues. Ceci décrit un tour du " +#~ "processus d'apprentissage fédéré et nous " +#~ "répétons cette opération pour plusieurs " +#~ "tours." #~ msgid "" -#~ "A function creating client instances. " -#~ "The function must take a single " -#~ "str argument called `cid`. It should " -#~ "return a single client instance of " -#~ "type ClientLike. Note that the created" -#~ " client instances are ephemeral and " -#~ "will often be destroyed after a " -#~ "single method invocation. Since client " -#~ "instances are not long-lived, they " -#~ "should not attempt to carry state " -#~ "over method invocations. Any state " -#~ "required by the instance (model, " -#~ "dataset,hyperparameters, ...) should be " -#~ "(re-)created in either the call to " -#~ "`client_fn` or the call to any of" -#~ " the client methods (e.g., load " -#~ "evaluation data in the `evaluate` method" -#~ " itself)." +#~ "Finally, we will define our *client* " +#~ "logic in :code:`client.py` and build " +#~ "upon the previously defined MXNet " +#~ "training in :code:`mxnet_mnist.py`. Our " +#~ "*client* needs to import :code:`flwr`, " +#~ "but also :code:`mxnet` to update the " +#~ "parameters on our MXNet model:" #~ msgstr "" +#~ "Enfin, nous allons définir la logique" +#~ " de notre *client* dans :code:`client.py`" +#~ " et nous appuyer sur l'entraînement " +#~ "MXNet défini précédemment dans " +#~ ":code:`mxnet_mnist.py`. Notre *client* doit " +#~ "importer :code:`flwr`, mais aussi " +#~ ":code:`mxnet` pour mettre à jour les " +#~ "paramètres de notre modèle MXNet :" #~ msgid "" -#~ "A function creating client instances. " -#~ "The function must take a single " -#~ "str argument called `cid`. It should " -#~ "return a single client instance of " -#~ "type ClientLike. Note that the created" -#~ " client instances are ephemeral and " -#~ "will often be destroyed after a " -#~ "single method invocation. Since client " -#~ "instances are not long-lived, they " -#~ "should not" +#~ "Implementing a Flower *client* basically " +#~ "means implementing a subclass of either" +#~ " :code:`flwr.client.Client` or " +#~ ":code:`flwr.client.NumPyClient`. Our implementation " +#~ "will be based on " +#~ ":code:`flwr.client.NumPyClient` and we'll call " +#~ "it :code:`MNISTClient`. :code:`NumPyClient` is " +#~ "slightly easier to implement than " +#~ ":code:`Client` if you use a framework" +#~ " with good NumPy interoperability (like " +#~ "PyTorch or MXNet) because it avoids " +#~ "some of the boilerplate that would " +#~ "otherwise be necessary. :code:`MNISTClient` " +#~ "needs to implement four methods, two " +#~ "methods for getting/setting model parameters," +#~ " one method for training the model," +#~ " and one method for testing the " +#~ "model:" #~ msgstr "" +#~ "Implementing a Flower *client* basically " +#~ "means implementing a subclass of either" +#~ " :code:`flwr.client.Client` or " +#~ ":code:`flwr.client.NumPyClient`. Our implementation " +#~ "will be based on " +#~ ":code:`flwr.client.NumPyClient` and we'll call " +#~ "it :code:`MNISTClient`. :code:`NumPyClient` is " +#~ "slightly easier to implement than " +#~ ":code:`Client` if you use a framework" +#~ " with good NumPy interoperability (like " +#~ "PyTorch or MXNet) because it avoids " +#~ "some of the boilerplate that would " +#~ "otherwise be necessary. :code:`MNISTClient` " +#~ "needs to implement four methods, two " +#~ "methods for getting/setting model parameters," +#~ " one method for training the model," +#~ " and one method for testing the " +#~ "model:" -#~ msgid "attempt to carry state over method invocations. Any state required by" -#~ msgstr "" +#~ msgid "transform MXNet :code:`NDArray`'s to NumPy :code:`ndarray`'s" +#~ msgstr "transforme les :code:`NDArray` du MXNet en :code:`ndarray` de NumPy" #~ msgid "" -#~ "the instance (model, dataset,hyperparameters, " -#~ "...) should be (re-)created in either" -#~ " the call to `client_fn` or the " -#~ "call to any of the client methods" -#~ " (e.g., load evaluation data in the" -#~ " `evaluate` method itself)." +#~ "The challenging part is to transform " +#~ "the MXNet parameters from :code:`NDArray` " +#~ "to :code:`NumPy Arrays` to make it " +#~ "readable for Flower." #~ msgstr "" +#~ "La partie la plus difficile est de" +#~ " transformer les paramètres MXNet de " +#~ ":code:`NDArray` en :code:`NumPy Arrays` pour" +#~ " les rendre lisibles pour Flower." #~ msgid "" -#~ "\\frac{\\mu}{2} || w - w^t ||^2\n" -#~ "\n" +#~ "The two :code:`NumPyClient` methods " +#~ ":code:`fit` and :code:`evaluate` make use " +#~ "of the functions :code:`train()` and " +#~ ":code:`test()` previously defined in " +#~ ":code:`mxnet_mnist.py`. So what we really " +#~ "do here is we tell Flower through" +#~ " our :code:`NumPyClient` subclass which of" +#~ " our already defined functions to " +#~ "call for training and evaluation. We " +#~ "included type annotations to give you" +#~ " a better understanding of the data" +#~ " types that get passed around." #~ msgstr "" +#~ "Les deux méthodes :code:`NumPyClient` " +#~ ":code:`fit` et :code:`evaluate` utilisent les" +#~ " fonctions :code:`train()` et :code:`test()` " +#~ "définies précédemment dans :code:`mxnet_mnist.py`." +#~ " Ce que nous faisons vraiment ici," +#~ " c'est que nous indiquons à Flower," +#~ " par le biais de notre sous-" +#~ "classe :code:`NumPyClient`, laquelle de nos" +#~ " fonctions déjà définies doit être " +#~ "appelée pour l'entraînement et l'évaluation." +#~ " Nous avons inclus des annotations de" +#~ " type pour te donner une meilleure" +#~ " compréhension des types de données " +#~ "qui sont transmis." #~ msgid "" -#~ "Adaptive Federated Optimization using Adagrad" -#~ " (FedAdagrad) [Reddi et al., 2020] " -#~ "strategy." +#~ "Having defined data loading, model " +#~ "architecture, training, and evaluation we " +#~ "can put everything together and train" +#~ " our :code:`Sequential` model on MNIST." #~ msgstr "" +#~ "Après avoir défini le chargement des " +#~ "données, l'architecture du modèle, la " +#~ "formation et l'évaluation, nous pouvons " +#~ "tout rassembler et former notre modèle" +#~ " :code:`Sequential` sur MNIST." #~ msgid "" -#~ "Adaptive Federated Optimization using Adam " -#~ "(FedAdam) [Reddi et al., 2020] strategy." +#~ "in each window (make sure that the" +#~ " server is still running before you" +#~ " do so) and see your MXNet " +#~ "project run federated learning across " +#~ "two clients. Congratulations!" #~ msgstr "" +#~ "dans chaque fenêtre (assure-toi que " +#~ "le serveur est toujours en cours " +#~ "d'exécution avant de le faire) et " +#~ "tu verras ton projet MXNet exécuter " +#~ "l'apprentissage fédéré sur deux clients. " +#~ "Félicitations !" #~ msgid "" -#~ "Adaptive Federated Optimization using Yogi " -#~ "(FedYogi) [Reddi et al., 2020] strategy." +#~ "The full source code for this " +#~ "example: `MXNet: From Centralized To " +#~ "Federated (Code) " +#~ "`_. Our " +#~ "example is of course somewhat over-" +#~ "simplified because both clients load the" +#~ " exact same dataset, which isn't " +#~ "realistic. You're now prepared to " +#~ "explore this topic further. How about" +#~ " using a CNN or using a " +#~ "different dataset? How about adding more" +#~ " clients?" #~ msgstr "" +#~ "Le code source complet de cet " +#~ "exemple : `MXNet : From Centralized " +#~ "To Federated (Code) " +#~ "`_. Notre " +#~ "exemple est bien sûr un peu trop" +#~ " simplifié parce que les deux clients" +#~ " chargent exactement le même ensemble " +#~ "de données, ce qui n'est pas " +#~ "réaliste. Tu es maintenant prêt à " +#~ "explorer ce sujet plus en profondeur." +#~ " Pourquoi ne pas utiliser un CNN " +#~ "ou un ensemble de données différent " +#~ "? Pourquoi ne pas ajouter d'autres " +#~ "clients ?" -#~ msgid "Contributing Baselines" -#~ msgstr "Configuration du contributeur" +#~ msgid "with the following command sequence:" +#~ msgstr "avec la séquence de commandes suivante :" #~ msgid "" -#~ "Do you have a new federated " -#~ "learning paper and want to add a" -#~ " new baseline to Flower? Or do " -#~ "you want to add an experiment to" -#~ " an existing baseline paper? Great, " -#~ "we really appreciate your contribution." +#~ "In case you are a researcher you" +#~ " might be just fine using the " +#~ "self-signed certificates generated using " +#~ "the scripts which are part of this" +#~ " guide." #~ msgstr "" +#~ "Si tu es un chercheur, tu peux " +#~ "très bien utiliser les certificats " +#~ "auto-signés générés à l'aide des " +#~ "scripts qui font partie de ce " +#~ "guide." #~ msgid "" -#~ "The goal of Flower Baselines is to" -#~ " reproduce experiments from popular papers" -#~ " to accelerate researchers by enabling " -#~ "faster comparisons to new strategies, " -#~ "datasets, models, and federated pipelines " -#~ "in general." +#~ "We are now going to show how " +#~ "to write a sever which uses the" +#~ " previously generated scripts." #~ msgstr "" +#~ "Nous allons maintenant montrer comment " +#~ "écrire un serveur qui utilise les " +#~ "scripts générés précédemment." #~ msgid "" -#~ "Before you start to work on a " -#~ "new baseline or experiment, please check" -#~ " the `Flower Issues " -#~ "`_ or `Flower " -#~ "Pull Requests `_ " -#~ "to see if someone else is already" -#~ " working on it. Please open a " -#~ "new issue if you are planning to" -#~ " work on a new baseline or " -#~ "experiment with a short description of" -#~ " the corresponding paper and the " -#~ "experiment you want to contribute." -#~ msgstr "" - -#~ msgid "TL;DR: Adding a new Flower Baseline" +#~ "When providing certificates, the server " +#~ "expects a tuple of three certificates." +#~ " :code:`Path` can be used to easily" +#~ " read the contents of those files " +#~ "into byte strings, which is the " +#~ "data type :code:`start_server` expects." #~ msgstr "" +#~ "Lorsqu'il fournit des certificats, le " +#~ "serveur attend un tuple de trois " +#~ "certificats. :code:`Path` peut être utilisé" +#~ " pour lire facilement le contenu de" +#~ " ces fichiers en chaînes d'octets, ce" +#~ " qui est le type de données " +#~ "attendu par :code:`start_server`." #~ msgid "" -#~ "Let's say you want to contribute " -#~ "the code of your most recent " -#~ "Federated Learning publication, *FedAweseome*. " -#~ "There are only three steps necessary " -#~ "to create a new *FedAweseome* Flower " -#~ "Baseline:" +#~ "The simplest way to get started " +#~ "with Flower is by using the " +#~ "pre-made Docker images, which you can" +#~ " find on `Docker Hub " +#~ "`_." #~ msgstr "" -#~ msgid "**Get the Flower source code on your machine**" -#~ msgstr "" +#~ msgid "Flower server" +#~ msgstr "Serveur de Flower" #~ msgid "" -#~ "Fork the Flower codebase: got to " -#~ "the `Flower GitHub repo " -#~ "`_ and fork the " -#~ "code (click the *Fork* button in " -#~ "the top-right corner and follow " -#~ "the instructions)" +#~ "The command will pull the Docker " +#~ "image with the tag " +#~ "``1.7.0-py3.11-ubuntu22.04`` from Docker Hub. " +#~ "The tag contains the information which" +#~ " Flower, Python and Ubuntu is used." +#~ " In this case, it uses Flower " +#~ "1.7.0, Python 3.11 and Ubuntu 22.04. " +#~ "The ``--rm`` flag tells Docker to " +#~ "remove the container after it exits." #~ msgstr "" #~ msgid "" -#~ "Clone the (forked) Flower source code:" -#~ " :code:`git clone " -#~ "git@github.com:[your_github_username]/flower.git`" +#~ "By default, the Flower server keeps " +#~ "state in-memory. When using the " +#~ "Docker flag ``--rm``, the state is " +#~ "not persisted between container starts. " +#~ "We will show below how to save " +#~ "the state in a file on your " +#~ "host system." #~ msgstr "" #~ msgid "" -#~ "Open the code in your favorite " -#~ "editor (e.g., using VSCode: ``cd flower" -#~ " ; code .``)" -#~ msgstr "" - -#~ msgid "**Add the FedAwesome code**" +#~ "The ``-p :`` flag tells " +#~ "Docker to map the ports " +#~ "``9091``/``9092`` of the host to " +#~ "``9091``/``9092`` of the container, allowing" +#~ " you to access the Driver API " +#~ "on ``http://localhost:9091`` and the Fleet " +#~ "API on ``http://localhost:9092``. Lastly, any" +#~ " flag that comes after the tag " +#~ "is passed to the Flower server. " +#~ "Here, we are passing the flag " +#~ "``--insecure``." #~ msgstr "" #~ msgid "" -#~ "Add your :code:`FedAwesome` code under " -#~ ":code:`baselines/flwr_baselines/publications/[fedawesome]`" -#~ msgstr "" - -#~ msgid "Add a `pyproject.toml` with all necessary dependencies" -#~ msgstr "" - -#~ msgid "Add a `README.md` describing how to use your baseline" -#~ msgstr "" - -#~ msgid "**Open a pull request**" +#~ "The ``--insecure`` flag enables insecure " +#~ "communication (using HTTP, not HTTPS) " +#~ "and should only be used for " +#~ "testing purposes. We strongly recommend " +#~ "enabling `SSL `_ when " +#~ "deploying to a production environment." #~ msgstr "" -#~ msgid "Stage your changes: :code:`git add .`" +#~ msgid "" +#~ "You can use ``--help`` to view all" +#~ " available flags that the server " +#~ "supports:" #~ msgstr "" #~ msgid "" -#~ "Commit & push: :code:`git commit -m " -#~ "\"Create new FedAweseome baseline\" ; " -#~ "git push`" +#~ "If you want to persist the state" +#~ " of the server on your host " +#~ "system, all you need to do is " +#~ "specify a path where you want to" +#~ " save the file on your host " +#~ "system and a name for the database" +#~ " file. In the example below, we " +#~ "tell Docker via the flag ``-v`` to" +#~ " mount the user's home directory " +#~ "(``~/`` on your host) into the " +#~ "``/app/`` directory of the container. " +#~ "Furthermore, we use the flag " +#~ "``--database`` to specify the name of" +#~ " the database file." #~ msgstr "" #~ msgid "" -#~ "Open a pull request: go to *your*" -#~ " fork of the Flower codebase and " -#~ "create a pull request that targets " -#~ "the Flower ``main``` branch" +#~ "As soon as the server starts, the" +#~ " file ``state.db`` is created in the" +#~ " user's home directory on your host" +#~ " system. If the file already exists," +#~ " the server tries to restore the " +#~ "state from the file. To start the" +#~ " server with an empty database, " +#~ "simply remove the ``state.db`` file." #~ msgstr "" -#~ msgid "Further reading:" -#~ msgstr "Aide supplémentaire" - #~ msgid "" -#~ "`GitHub docs: About forks " -#~ "`_" +#~ "To enable SSL, you will need a " +#~ "CA certificate, a server certificate and" +#~ " a server private key." #~ msgstr "" #~ msgid "" -#~ "`GitHub docs: Creating a pull request" -#~ " `_" +#~ "For testing purposes, you can generate" +#~ " your own self-signed certificates. " +#~ "The `Enable SSL connections " +#~ "`_ page contains " +#~ "a section that will guide you " +#~ "through the process." #~ msgstr "" #~ msgid "" -#~ "`GitHub docs: Creating a pull request" -#~ " from a fork `_" +#~ "Assuming all files we need are in" +#~ " the local ``certificates`` directory, we" +#~ " can use the flag ``-v`` to " +#~ "mount the local directory into the " +#~ "``/app/`` directory of the container. " +#~ "This allows the server to access " +#~ "the files within the container. Finally," +#~ " we pass the names of the " +#~ "certificates to the server with the " +#~ "``--certificates`` flag." #~ msgstr "" -#~ msgid "Requirements" -#~ msgstr "Changements nécessaires" - -#~ msgid "" -#~ "Contributing a new baseline is really" -#~ " easy. You only have to make " -#~ "sure that your federated learning " -#~ "experiments are running with Flower. As" -#~ " soon as you have created a " -#~ "Flower-based experiment, you can contribute" -#~ " it." +#~ msgid "Using a different Flower or Python version" #~ msgstr "" #~ msgid "" -#~ "It is recommended (but not required) " -#~ "to use `Hydra `_ to " -#~ "execute the experiment." +#~ "If you want to use a different " +#~ "version of Flower or Python, you " +#~ "can do so by changing the tag. " +#~ "All versions we provide are available" +#~ " on `Docker Hub " +#~ "`_." #~ msgstr "" #~ msgid "" -#~ "Please make sure to add your " -#~ "baseline or experiment to the " -#~ "corresponding directory as explained in " -#~ "`Executing Baseline `_. Give your baseline the " -#~ "unique identifier. For example, :code:`fedbn`" -#~ " refers to the paper \"FedBN: " -#~ "Federated Learning on non-IID Features" -#~ " via Local Batch Normalization\" and " -#~ "creates the corresponding directory " -#~ ":code:`flower/baselines/flwr_baselines/publications/fedbn`. Then" -#~ " you create the experiment directory " -#~ "with the experiment name. For example," -#~ " the experiment that measures the " -#~ "convergence has the directory " -#~ ":code:`flower/baselines/flwr_baselines/publications/fedbn/convergence_rate`." -#~ " This directory contains all your " -#~ "code and a :code:`README.md` with a " -#~ "link to the paper, the paper's " -#~ "abstract, and a detailed description of" -#~ " how to execute the experiments." +#~ "The following command returns the " +#~ "current image hash referenced by the " +#~ "``server:1.7.0-py3.11-ubuntu22.04`` tag:" #~ msgstr "" +#~ msgid "Next, we can pin the hash when running a new server container:" +#~ msgstr "" + +#~ msgid "flower-driver-api" +#~ msgstr "flower-driver-api" + +#~ msgid "flower-fleet-api" +#~ msgstr "flower-fleet-api" + #~ msgid "" -#~ "Please also check if :code:`pyproject.toml`" -#~ " and :code:`requirements.txt` (all in the" -#~ " directory `baselines " -#~ "`_ contain" -#~ " all required Python packages (libraries," -#~ " frameworks, ...). If the required " -#~ "Python package is not yet listed, " -#~ "please add it to :code:`pyproject.toml`. " -#~ "If you need a different version of" -#~ " a package already listed, please try" -#~ " to ensure your experiment runs with" -#~ " the existing version listed in " -#~ ":code:`pyproject.toml` (or :code:`requirements.txt`). " -#~ "If that doesn't work, open a " -#~ "GitHub Issue and request the version " -#~ "change." +#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +#~ "[:py:class:`str`, :py:obj:`~typing.Union`\\ " +#~ "[:py:class:`int`, :py:class:`float`, :py:class:`str`, " +#~ ":py:class:`bytes`, :py:class:`bool`, " +#~ ":py:class:`~typing.List`\\ [:py:class:`int`], " +#~ ":py:class:`~typing.List`\\ [:py:class:`float`], " +#~ ":py:class:`~typing.List`\\ [:py:class:`str`], " +#~ ":py:class:`~typing.List`\\ [:py:class:`bytes`], " +#~ ":py:class:`~typing.List`\\ [:py:class:`bool`]]]" #~ msgstr "" #~ msgid "" -#~ "The experiment also needs to contain " -#~ "a file with a downloader for the" -#~ " dataset - if possible automatic. " -#~ "This can be included in one of " -#~ "the files or as an extra file." +#~ ":py:obj:`create_error_reply " +#~ "`\\ \\(error\\, " +#~ "ttl\\)" #~ msgstr "" #~ msgid "" -#~ "Finally, please add plots for all " -#~ "experimental results your code is " -#~ "running to the :code:`experiment` directory" -#~ " and include them in :code:`README.md`. " -#~ "Doing this helps others and enables " -#~ "them to recognize your contributions " -#~ "quickly." +#~ ":py:obj:`create_reply `\\ " +#~ "\\(content\\, ttl\\)" #~ msgstr "" #~ msgid "" -#~ "We are aware that a few libraries" -#~ " are available only via Conda. " -#~ "However, we want to encourage you " -#~ "to ensure that your code also runs" -#~ " well outside of Conda to make " -#~ "it more accessible to the broader " -#~ "research community." +#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +#~ "[:py:class:`str`, :py:obj:`~typing.Union`\\ " +#~ "[:py:class:`int`, :py:class:`float`, " +#~ ":py:class:`~typing.List`\\ [:py:class:`int`], " +#~ ":py:class:`~typing.List`\\ [:py:class:`float`]]]" #~ msgstr "" -#~ msgid "Here is a checklist for adding a new baseline:" +#~ msgid "Run Flower server (Driver API and Fleet API)." #~ msgstr "" #~ msgid "" -#~ "add required Python packages to " -#~ ":code:`pyproject.toml` or :code:`requirements.txt`" +#~ ":py:obj:`start_driver `\\ " +#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" #~ msgstr "" +#~ msgid "Start a Flower Driver API server." +#~ msgstr "Tout d'abord, démarre un serveur Flower :" + #~ msgid "" -#~ "add all required code under " -#~ ":code:`baselines/flwr_baselines/publications/[new_publication]`" +#~ ":py:obj:`Driver `\\ " +#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" #~ msgstr "" +#~ "Flower 1.0 : ``start_server(..., " +#~ "config=flwr.server.ServerConfig(num_rounds=3, " +#~ "round_timeout=600.0), ...)``" -#~ msgid "add a dataset downloader" +#~ msgid "`Driver` class provides an interface to the Driver API." #~ msgstr "" -#~ msgid "add an experiment plot" +#~ msgid "" +#~ "The IPv4 or IPv6 address of the" +#~ " Driver API server. Defaults to " +#~ "`\"[::]:9091\"`." #~ msgstr "" -#~ msgid "add a :code:`README.md`" +#~ msgid "Disconnect from the SuperLink if connected." #~ msgstr "" -#~ msgid "Usability" +#~ msgid "" +#~ ":py:obj:`create_message `\\" +#~ " \\(content\\, message\\_type\\, ...\\)" #~ msgstr "" #~ msgid "" -#~ "Flower is known and loved for its" -#~ " usability. Therefore, make sure that " -#~ "your baseline or experiment can be " -#~ "executed with a single command such " -#~ "as :code:`./run.sh` or :code:`python3 " -#~ "main.py`. How you organize the " -#~ "experiments and the related code " -#~ "structure is up to you as an " -#~ "author, but please keep in mind to" -#~ " make sure that other users can " -#~ "easily understand and execute your " -#~ "baseline." +#~ "Time-to-live for the round trip" +#~ " of this message, i.e., the time " +#~ "from sending this message to receiving" +#~ " a reply. It specifies the duration" +#~ " for which the message and its " +#~ "potential reply are considered valid." #~ msgstr "" -#~ msgid "We look forward to your contribution!" -#~ msgstr "Exemple de première contribution" - -#~ msgid "flwr" -#~ msgstr "Fleur" +#~ msgid "start\\_driver" +#~ msgstr "start_client" -#~ msgid "binaries" +#~ msgid "" +#~ "The IPv4 or IPv6 address of the" +#~ " Driver API server. Defaults to " +#~ "`\"[::]:8080\"`." #~ msgstr "" -#~ msgid "Flower Baselines" -#~ msgstr "Demande pour une nouvelle Flower Baseline" +#~ msgid "" +#~ "A server implementation, either " +#~ "`flwr.server.Server` or a subclass thereof." +#~ " If no instance is provided, then " +#~ "`start_driver` will create one." +#~ msgstr "" #~ msgid "" -#~ "Flower Baselines are a collection of " -#~ "organised scripts used to reproduce " -#~ "results from well-known publications or" -#~ " benchmarks. You can check which " -#~ "baselines already exist and/or contribute " -#~ "your own baseline." +#~ "An implementation of the class " +#~ "`flwr.server.ClientManager`. If no implementation" +#~ " is provided, then `start_driver` will " +#~ "use `flwr.server.SimpleClientManager`." #~ msgstr "" -#~ msgid "Flower requires `Python 3.7 `_ or above." -#~ msgstr "`Python 3.7 `_ ou plus" +#~ msgid "The Driver object to use." +#~ msgstr "" -#~ msgid "|9e234df38403464899ad3aee36bf1b95|" +#~ msgid "Starting a driver that connects to an insecure server:" #~ msgstr "" -#~ msgid "|081158351506446f9f772cb45ee68523|" +#~ msgid "Starting a driver that connects to an SSL-enabled server:" #~ msgstr "" -#~ msgid "|e9325042b79c45ed96b5a8d2f6f3cdc9|" +#~ msgid "" +#~ ":py:obj:`run_simulation_from_cli " +#~ "`\\ \\(\\)" #~ msgstr "" -#~ msgid "|11b83bb107344db78a37266e080c4a7a|" +#~ msgid "Run Simulation Engine from the CLI." #~ msgstr "" -#~ msgid "|cd764bcf6d174a9cb62880ace9a8a6bd|" +#~ msgid "run\\_simulation\\_from\\_cli" +#~ msgstr "Simulation de moniteur" + +#~ msgid "" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with MXNet to train a Sequential " +#~ "model on MNIST." #~ msgstr "" -#~ msgid "|5c520984cced41e38f6bb4af416c3f84|" +#~ msgid "Quickstart MXNet" +#~ msgstr "Démarrage rapide de MXNet" + +#~ msgid "" +#~ "MXNet is no longer maintained and " +#~ "has been moved into `Attic " +#~ "`_. As a " +#~ "result, we would encourage you to " +#~ "use other ML frameworks alongside " +#~ "Flower, for example, PyTorch. This " +#~ "tutorial might be removed in future " +#~ "versions of Flower." #~ msgstr "" -#~ msgid "|66941b0608644cf1a2269a194d3bc0dd|" +#~ msgid "" +#~ "In this tutorial, we will learn " +#~ "how to train a :code:`Sequential` model" +#~ " on MNIST using Flower and MXNet." #~ msgstr "" +#~ "Dans ce tutoriel, nous allons apprendre" +#~ " à former un modèle :code:`Sequential` " +#~ "sur MNIST à l'aide de Flower et" +#~ " de MXNet." -#~ msgid "|4b149f3a095b402bb8890275aabc9298|" +#~ msgid "Since we want to use MXNet, let's go ahead and install it:" +#~ msgstr "Puisque nous voulons utiliser MXNet, allons-y et installons-le :" + +#~ msgid "" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. Our training " +#~ "procedure and network architecture are " +#~ "based on MXNet´s `Hand-written Digit " +#~ "Recognition tutorial " +#~ "`_." #~ msgstr "" +#~ "Maintenant que toutes nos dépendances " +#~ "sont installées, lançons une formation " +#~ "distribuée simple avec deux clients et" +#~ " un serveur. Notre procédure de " +#~ "formation et l'architecture du réseau " +#~ "sont basées sur le tutoriel de " +#~ "reconnaissance de chiffres écrits à la" +#~ " main du MXNet " +#~ "`_." -#~ msgid "|675cf7d3d53a4817b5d47529c0758158|" +#~ msgid "" +#~ "In a file called :code:`client.py`, " +#~ "import Flower and MXNet related " +#~ "packages:" #~ msgstr "" +#~ "Dans un fichier appelé :code:`client.py`, " +#~ "importe Flower et les paquets liés " +#~ "au MXNet :" -#~ msgid "|7ca594e16ae7477790c2e3cf096ec7cd|" +#~ msgid "In addition, define the device allocation in MXNet with:" +#~ msgstr "En outre, définis l'attribution de l'appareil dans MXNet avec :" + +#~ msgid "" +#~ "We use MXNet to load MNIST, a " +#~ "popular image classification dataset of " +#~ "handwritten digits for machine learning. " +#~ "The MXNet utility :code:`mx.test_utils.get_mnist()`" +#~ " downloads the training and test " +#~ "data." #~ msgstr "" +#~ "Nous utilisons MXNet pour charger MNIST," +#~ " un ensemble de données de " +#~ "classification d'images populaire de chiffres" +#~ " manuscrits pour l'apprentissage automatique. " +#~ "L'utilitaire MXNet :code:`mx.test_utils.get_mnist()` " +#~ "télécharge les données d'entraînement et " +#~ "de test." -#~ msgid "|d669336577b545a081d5d74169a9bc4d|" +#~ msgid "" +#~ "Define the training and loss with " +#~ "MXNet. We train the model by " +#~ "looping over the dataset, measure the" +#~ " corresponding loss, and optimize it." +#~ msgstr "" +#~ "Définis l'entraînement et la perte avec" +#~ " MXNet. Nous entraînons le modèle en" +#~ " parcourant en boucle l'ensemble des " +#~ "données, nous mesurons la perte " +#~ "correspondante et nous l'optimisons." + +#~ msgid "" +#~ "Next, we define the validation of " +#~ "our machine learning model. We loop " +#~ "over the test set and measure both" +#~ " loss and accuracy on the test " +#~ "set." #~ msgstr "" +#~ "Ensuite, nous définissons la validation " +#~ "de notre modèle d'apprentissage automatique." +#~ " Nous effectuons une boucle sur " +#~ "l'ensemble de test et mesurons à " +#~ "la fois la perte et la précision" +#~ " sur l'ensemble de test." -#~ msgid "|00b3d6cde1ff410ba54eff58da4e033a|" +#~ msgid "" +#~ "After defining the training and testing" +#~ " of a MXNet machine learning model," +#~ " we use these functions to implement" +#~ " a Flower client." #~ msgstr "" +#~ "Après avoir défini la formation et " +#~ "le test d'un modèle d'apprentissage " +#~ "automatique MXNet, nous utilisons ces " +#~ "fonctions pour mettre en œuvre un " +#~ "client Flower." -#~ msgid "|29a11f5353084c1995c538f7edef71a5|" -#~ msgstr "" +#~ msgid "Our Flower clients will use a simple :code:`Sequential` model:" +#~ msgstr "Nos clients Flower utiliseront un modèle simple :code:`Sequential` :" -#~ msgid "|d62eda312fd44726bb5db2b761fe7e0d|" +#~ msgid "" +#~ "After loading the dataset with " +#~ ":code:`load_data()` we perform one forward " +#~ "propagation to initialize the model and" +#~ " model parameters with :code:`model(init)`. " +#~ "Next, we implement a Flower client." #~ msgstr "" +#~ "Après avoir chargé l'ensemble de données" +#~ " avec :code:`load_data()`, nous effectuons " +#~ "une propagation vers l'avant pour " +#~ "initialiser le modèle et les paramètres" +#~ " du modèle avec :code:`model(init)`. " +#~ "Ensuite, nous implémentons un client " +#~ "Flower." -#~ msgid "Using Baselines" +#~ msgid "" +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses MXNet." +#~ " Implementing :code:`NumPyClient` usually means" +#~ " defining the following methods " +#~ "(:code:`set_parameters` is optional though):" #~ msgstr "" +#~ "Flower fournit une classe de commodité" +#~ " appelée :code:`NumPyClient` qui facilite " +#~ "l'implémentation de l'interface :code:`Client` " +#~ "lorsque ta charge de travail utilise " +#~ "MXNet. L'implémentation de :code:`NumPyClient` " +#~ "signifie généralement la définition des " +#~ "méthodes suivantes (:code:`set_parameters` est " +#~ "cependant facultatif) :" -#~ msgid "Structure" -#~ msgstr "" +#~ msgid "They can be implemented in the following way:" +#~ msgstr "Ils peuvent être mis en œuvre de la manière suivante :" #~ msgid "" -#~ "All baselines are available in the " -#~ "directory `baselines " -#~ "`_. This " -#~ "directory has two different files:" +#~ "We can now create an instance of" +#~ " our class :code:`MNISTClient` and add " +#~ "one line to actually run this " +#~ "client:" #~ msgstr "" +#~ "Nous pouvons maintenant créer une " +#~ "instance de notre classe :code:`MNISTClient`" +#~ " et ajouter une ligne pour exécuter" +#~ " ce client :" #~ msgid "" -#~ "Both files contain all the information" -#~ " about required Python packages (libraries," -#~ " frameworks, ...) and their versions. " -#~ "You can install each library separately" -#~ " by using :code: `pip install` or " -#~ "you can use Poetry and run " -#~ "code:`poetry install` in the directory " -#~ "where you find the :code:`pyproject.toml` " -#~ "file. After installing all requirements, " -#~ "you can start to run your " -#~ "baseline." +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()` or " +#~ ":code:`fl.client.start_numpy_client()`. The string " +#~ ":code:`\"0.0.0.0:8080\"` tells the client " +#~ "which server to connect to. In our" +#~ " case we can run the server and" +#~ " the client on the same machine, " +#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" +#~ " we run a truly federated workload" +#~ " with the server and clients running" +#~ " on different machines, all that " +#~ "needs to change is the " +#~ ":code:`server_address` we pass to the " +#~ "client." #~ msgstr "" +#~ "C'est tout pour le client. Il nous" +#~ " suffit d'implémenter :code:`Client` ou " +#~ ":code:`NumPyClient` et d'appeler " +#~ ":code:`fl.client.start_client()`. La chaîne " +#~ ":code:`\"0.0.0:8080\"` indique au client à " +#~ "quel serveur se connecter. Dans notre" +#~ " cas, nous pouvons exécuter le " +#~ "serveur et le client sur la même" +#~ " machine, c'est pourquoi nous utilisons " +#~ ":code:`\"0.0.0:8080\"`. Si nous exécutons une" +#~ " charge de travail véritablement fédérée" +#~ " avec le serveur et les clients " +#~ "s'exécutant sur des machines différentes, " +#~ "tout ce qui doit changer est " +#~ ":code:`server_address` que nous transmettons " +#~ "au client." #~ msgid "" -#~ "Go to the baseline that you want" -#~ " to execute. The directories and " -#~ "files are structured so that you " -#~ "can first find the paper with " -#~ "their unique identifier such that, for" -#~ " example, :code:`FedProx` refers to the " -#~ "paper \"Federated Optimization in " -#~ "Heterogeneous Networks\". The :code:`fedprox` " -#~ "section contains all available experiments " -#~ "from that paper." +#~ "With both client and server ready, " +#~ "we can now run everything and see" +#~ " federated learning in action. Federated" +#~ " learning systems usually have a " +#~ "server and multiple clients. We " +#~ "therefore have to start the server " +#~ "first:" #~ msgstr "" +#~ "Le client et le serveur étant " +#~ "prêts, nous pouvons maintenant tout " +#~ "exécuter et voir l'apprentissage fédéré " +#~ "en action. Les systèmes d'apprentissage " +#~ "fédéré ont généralement un serveur et" +#~ " plusieurs clients. Nous devons donc " +#~ "commencer par démarrer le serveur :" #~ msgid "" -#~ "The experiment area contains a " -#~ ":code:`README.md` covering the corresponding " -#~ "paper, its abstract, and goal as " -#~ "well as a detailed description of " -#~ "how to run the baseline. Please " -#~ "use the :code:`README.md` to see how " -#~ "to execute each individual baseline." +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this example can " +#~ "be found in :code:`examples/quickstart-mxnet`." #~ msgstr "" +#~ "Félicitations ! Tu as réussi à " +#~ "construire et à faire fonctionner ton" +#~ " premier système d'apprentissage fédéré. Le" +#~ " code source complet " +#~ "`_ de cet exemple se " +#~ "trouve dans :code:`examples/quickstart-mxnet`." -#~ msgid "Available Baselines" -#~ msgstr "" +#~ msgid ":code:`load_mnist()`" +#~ msgstr ":code:`load_mnist()`" + +#~ msgid "Loads the MNIST dataset using OpenML" +#~ msgstr "Charge l'ensemble de données MNIST à l'aide d'OpenML" + +#~ msgid ":code:`shuffle()`" +#~ msgstr ":code:`shuffle()`" + +#~ msgid "Shuffles data and its label" +#~ msgstr "Mélange les données et leur étiquette" + +#~ msgid ":code:`partition()`" +#~ msgstr ":code:`partition()`" + +#~ msgid "Splits datasets into a number of partitions" +#~ msgstr "Divise les ensembles de données en un certain nombre de partitions" #~ msgid "" -#~ "The following table lists all currently" -#~ " available baselines and the corresponding" -#~ " papers. If you want to add a" -#~ " new baseline or experiment, please " -#~ "check the `Contributing Baselines " -#~ "`_ section." +#~ "We load the MNIST dataset from " +#~ "`OpenML " +#~ "`_, a" +#~ " popular image classification dataset of" +#~ " handwritten digits for machine learning." +#~ " The utility :code:`utils.load_mnist()` downloads" +#~ " the training and test data. The " +#~ "training set is split afterwards into" +#~ " 10 partitions with :code:`utils.partition()`." #~ msgstr "" +#~ "Nous chargeons l'ensemble de données " +#~ "MNIST de `OpenML `_," +#~ " un ensemble de données de " +#~ "classification d'images populaires de chiffres" +#~ " manuscrits pour l'apprentissage automatique. " +#~ "L'utilitaire :code:`utils.load_mnist()` télécharge " +#~ "les données d'entraînement et de test." +#~ " L'ensemble d'entraînement est ensuite " +#~ "divisé en 10 partitions avec " +#~ ":code:`utils.partition()`." -#~ msgid "Paper" -#~ msgstr "" +#~ msgid "Let's get stated!" +#~ msgstr "Allons-y, déclarons-le !" -#~ msgid "Experiment" +#~ msgid "|2b5c62c529f6416f840c594cce062fbb|" #~ msgstr "" -#~ msgid "Directory" +#~ msgid "|90b334680cb7467d9a04d39b8e8dca9f|" #~ msgstr "" -#~ msgid "`FedAvg `_" +#~ msgid "|65764ceee89f4335bfd93fd0b115e831|" #~ msgstr "" -#~ msgid "MNIST" +#~ msgid "|d97319ec28bb407ea0ab9705e38f3bcf|" #~ msgstr "" -#~ msgid ":code:`flower/baselines/flwr_baselines/publications/fedavg_mnist/`" +#~ msgid "|11e95ac83a8548d8b3505b4663187d07|" #~ msgstr "" -#~ msgid "`FedProx `_" +#~ msgid "|1dab2f3a23674abc8a6731f20fa10730|" #~ msgstr "" -#~ msgid ":code:`flower/baselines/flwr_baselines/publications/fedprox_mnist/`" +#~ msgid "|7f0ee162da38450788493a21627306f7|" #~ msgstr "" -#~ msgid "`FedOpt `_" +#~ msgid "|296a1fb72c514b23b3d8905ff0ff98c6|" #~ msgstr "" -#~ msgid "sparse gradient task" +#~ msgid "|5b1408eec0d746cdb91162a9107b6089|" #~ msgstr "" -#~ msgid ":code:`flower/baselines/flwr_baselines/publications/adaptive_federated_optimization`" +#~ msgid "|aef19f4b122c4e8d9f4c57f99bcd5dd2|" #~ msgstr "" -#~ msgid "`FedBN `_" +#~ msgid "|2881a86d8fc54ba29d96b29fc2819f4a|" #~ msgstr "" -#~ msgid "convergence rate" +#~ msgid "|ec1fe880237247e0975f52766775ab84|" #~ msgstr "" -#~ msgid ":code:`flower/baselines/flwr_baselines/publications/fedbn/convergence_rate`" +#~ msgid "|9fdf048ed58d4467b2718cdf4aaf1ec3|" #~ msgstr "" -#~ msgid "" -#~ "Flower requires `Python 3.7 " -#~ "`_ or above, we " -#~ "recommend `Python 3.8 " -#~ "`_." +#~ msgid "|ff726bc5505e432388ee2fdd6ef420b9|" #~ msgstr "" -#~ "Flower nécessite `Python 3.7 " -#~ "`_ ou plus, nous " -#~ "recommandons `Python 3.8 " -#~ "`_." -#~ msgid "|6baade94cd14454e82ead34fcc29a182|" +#~ msgid "" +#~ "Flower provides pre-made docker images" +#~ " on `Docker Hub `_" +#~ " that include all necessary dependencies" +#~ " for running the SuperLink. You can" +#~ " also build your own custom docker" +#~ " images from scratch with a different" +#~ " version of Python or Ubuntu if " +#~ "that is what you need. In this " +#~ "guide, we will explain what images " +#~ "exist and how to build them " +#~ "locally." #~ msgstr "" -#~ msgid "|1209ecd819104c458d396cf665c7ed4f|" +#~ msgid "" +#~ "Currently, Flower provides two images, a" +#~ " ``base`` image and a ``superlink`` " +#~ "image. The base image, as the name" +#~ " suggests, contains basic dependencies that" +#~ " the SuperLink needs. This includes " +#~ "system dependencies, Python and Python " +#~ "tools. The SuperLink image is based " +#~ "on the base image, but it " +#~ "additionally installs the SuperLink using " +#~ "``pip``." #~ msgstr "" -#~ msgid "|c088b02349304344a53f3ce1464225fb|" +#~ msgid "" +#~ "Both, base and SuperLink image are " +#~ "configured via build arguments. Through " +#~ "build arguments, we can make our " +#~ "build more flexible. For example, in " +#~ "the base image, we can specify the" +#~ " version of Python to install using" +#~ " the ``PYTHON_VERSION`` build argument. " +#~ "Some of the build arguments have " +#~ "default values, others must be specified" +#~ " when building the image. All " +#~ "available build arguments for each image" +#~ " are listed in one of the " +#~ "tables below." #~ msgstr "" -#~ msgid "|b54d50afc82a4a57a55997a9eaeb735b|" -#~ msgstr "" +#~ msgid "``3.11``" +#~ msgstr "1.0.0rc1" -#~ msgid "|d17b57e97b714a25b43790d4b832fd87|" +#~ msgid "``UBUNTU_VERSION``" #~ msgstr "" -#~ msgid "|38966d05301a4854aa73c8c5033bfaab|" +#~ msgid "Version of the official Ubuntu Docker image." #~ msgstr "" -#~ msgid "|231d55f7926d4a5db02dcd724ec62529|" +#~ msgid "Defaults to ``22.04``." #~ msgstr "" -#~ msgid "|fb44f2e13a1b4b69b7a72234eedd13f4|" +#~ msgid "" +#~ "The following example creates a base " +#~ "image with Python 3.11.0, pip 23.0.1 " +#~ "and setuptools 69.0.2:" #~ msgstr "" -#~ msgid "|1cfc77af5d164030942e84d14268c256|" -#~ msgstr "" +#~ msgid "Building the SuperLink image" +#~ msgstr "Démarrer le serveur" -#~ msgid "|0d50828231a64bc08223544a2d2fa216|" +#~ msgid "Defaults to ``flwr/base``." #~ msgstr "" -#~ msgid "|904387757ceb42fbaa1875f3e8061113|" -#~ msgstr "" +#~ msgid "The Python version of the base image." +#~ msgstr "Évaluer la réponse d'un client." -#~ msgid "|68608e1b7c4842458c528b431c715f5a|" +#~ msgid "Defaults to ``py3.11``." #~ msgstr "" -#~ msgid "|2adb106bda97480bb4b33eac472e321e|" +#~ msgid "Defaults to ``ubuntu22.04``." #~ msgstr "" -#~ msgid "|025f0a6f7a6145cba4bf8fa0e2495851|" +#~ msgid "The PyPI package to install." #~ msgstr "" -#~ msgid "Before the release" -#~ msgstr "Avant la sortie" +#~ msgid "Defaults to ``flwr``." +#~ msgstr "Flux de travail" #~ msgid "" -#~ "Update the changelog (``changelog.md``) with" -#~ " all relevant changes that happened " -#~ "after the last release. If the " -#~ "last release was tagged ``v1.2.0``, you" -#~ " can use the following URL to " -#~ "see all commits that got merged " -#~ "into ``main`` since then:" +#~ "The following example creates a " +#~ "SuperLink image with the official Flower" +#~ " base image py3.11-ubuntu22.04 and Flower" +#~ " 1.8.0:" #~ msgstr "" -#~ "Mettez à jour le journal des " -#~ "modifications (``changelog.md``) avec tous les" -#~ " changements pertinents qui se sont " -#~ "produits après la dernière version. Si" -#~ " la dernière version a été étiquetée" -#~ " ``v1.2.0``, vous pouvez utiliser l'URL " -#~ "suivante pour voir tous les commits " -#~ "qui ont été fusionnés dans ``main`` " -#~ "depuis lors :" #~ msgid "" -#~ "`GitHub: Compare v1.2.0...main " -#~ "`_" +#~ "The name of image is ``flwr_superlink``" +#~ " and the tag ``0.1.0``. Remember that" +#~ " the build arguments as well as " +#~ "the name and tag can be adapted" +#~ " to your needs. These values serve" +#~ " as examples only." #~ msgstr "" -#~ "`GitHub : Compare v1.2.0...main " -#~ "`_" #~ msgid "" -#~ "Thank the authors who contributed since" -#~ " the last release. This command helps" -#~ " extract them: ``git log --format='%aN' " -#~ "v1.1.0..HEAD | sort -u``. The command" -#~ " has the same order as ``git " -#~ "shortlog``." +#~ "If you want to use your own " +#~ "base image instead of the official " +#~ "Flower base image, all you need to" +#~ " do is set the ``BASE_REPOSITORY``, " +#~ "``PYTHON_VERSION`` and ``UBUNTU_VERSION`` build " +#~ "arguments." #~ msgstr "" -#~ "Remerciez les auteurs qui ont contribué" -#~ " depuis la dernière version. Cette " -#~ "commande permet de les extraire : " -#~ "``git log --format='%aN' v1.1.0..HEAD | " -#~ "sort -u``. La commande a le même" -#~ " ordre que ``git shortlog``." + +#~ msgid "Creating New Messages" +#~ msgstr "Création de nouveaux messages" #~ msgid "" -#~ "Update the ``changelog.md`` section header " -#~ "``Unreleased`` to contain the version " -#~ "number and date for the release " -#~ "you are building. Create a pull " -#~ "request with the change." +#~ "This is a simple guide for " +#~ "creating a new type of message " +#~ "between the server and clients in " +#~ "Flower." #~ msgstr "" -#~ "Mettez à jour l'en-tête de section" -#~ " ``changelog.md`` ``Unreleased`` pour qu'il " -#~ "contienne le numéro de version et " -#~ "la date de la version que vous " -#~ "construisez. Créez une demande de " -#~ "traction avec le changement." +#~ "Voici un guide simple pour créer " +#~ "un nouveau type de message entre " +#~ "le serveur et les clients dans " +#~ "Flower." #~ msgid "" -#~ "Tag the release commit with the " -#~ "version number as soon as the PR" -#~ " is merged: ``git tag v0.12.3``, then" -#~ " ``git push --tags``" +#~ "Let's suppose we have the following " +#~ "example functions in :code:`server.py` and " +#~ ":code:`numpy_client.py`..." #~ msgstr "" -#~ "Marquez le commit de la version " -#~ "avec le numéro de version dès que" -#~ " le PR est fusionné : ``git tag" -#~ " v0.12.3``, puis ``git push --tags``" +#~ "Supposons que nous ayons les fonctions" +#~ " suivantes dans :code:`server.py` et " +#~ ":code:`numpy_client.py`..." + +#~ msgid "Server's side:" +#~ msgstr "Côté serveur :" + +#~ msgid "Client's side:" +#~ msgstr "Côté client :" #~ msgid "" -#~ "Build the release with ``./dev/build.sh``, " -#~ "then publish it with ``./dev/publish.sh``" +#~ "Let's now see what we need to " +#~ "implement in order to get this " +#~ "simple function between the server and" +#~ " client to work!" #~ msgstr "" -#~ "Construisez la version avec " -#~ "``./dev/build.sh``, puis publiez-la avec " -#~ "``./dev/publish.sh``" +#~ "Voyons maintenant ce que nous devons " +#~ "mettre en œuvre pour que cette " +#~ "simple fonction entre le serveur et " +#~ "le client fonctionne !" + +#~ msgid "Message Types for Protocol Buffers" +#~ msgstr "Types de messages pour les tampons de protocole" #~ msgid "" -#~ "Create an entry in GitHub releases " -#~ "with the release notes for the " -#~ "previously tagged commit and attach the" -#~ " build artifacts (:code:`.whl` and " -#~ ":code:`.tar.gz`)." +#~ "The first thing we need to do " +#~ "is to define a message type for" +#~ " the RPC system in :code:`transport.proto`." +#~ " Note that we have to do it " +#~ "for both the request and response " +#~ "messages. For more details on the " +#~ "syntax of proto3, please see the " +#~ "`official documentation `_." #~ msgstr "" -#~ "Crée une entrée dans GitHub releases " -#~ "avec les notes de version pour le" -#~ " commit précédemment étiqueté et attache" -#~ " les artefacts de construction " -#~ "(:code:`.whl` et :code:`.tar.gz`)." +#~ "La première chose à faire est de" +#~ " définir un type de message pour " +#~ "le système RPC dans :code:`transport.proto`." +#~ " Notez que nous devons le faire " +#~ "à la fois pour les messages de " +#~ "demande et de réponse. Pour plus " +#~ "de détails sur la syntaxe de " +#~ "proto3, veuillez consulter la `documentation" +#~ " officielle `_." + +#~ msgid "Within the :code:`ServerMessage` block:" +#~ msgstr "Dans le bloc :code:`ServerMessage` :" + +#~ msgid "Within the ClientMessage block:" +#~ msgstr "Dans le bloc ClientMessage :" #~ msgid "" -#~ "Second, create a virtual environment " -#~ "(and activate it). If you chose to" -#~ " use :code:`pyenv` (with the :code" -#~ ":`pyenv-virtualenv` plugin) and already " -#~ "have it installed , you can use" -#~ " the following convenience script (by " -#~ "default it will use :code:`Python " -#~ "3.8.17`, but you can change it by" -#~ " providing a specific :code:``)::" +#~ "Make sure to also add a field " +#~ "of the newly created message type " +#~ "in :code:`oneof msg`." #~ msgstr "" -#~ "Deuxièmement, créer un environnement virtuel" -#~ " (et l'activer). Si vous choisissez " -#~ "d'utiliser :code:`pyenv` (avec le plugin " -#~ ":code:`pyenv-virtualenv`) et que vous " -#~ "l'avez déjà installé, vous pouvez " -#~ "utiliser le script suivant (par défaut" -#~ " il utilisera :code:`Python 3.8.17`, mais" -#~ " vous pouvez le changer en " -#~ "fournissant une :code:`` spécifique)::" +#~ "Veille à ajouter également un champ " +#~ "du type de message nouvellement créé " +#~ "dans :code:`oneof msg`." -#~ msgid "server.strategy.FedAvg" -#~ msgstr "serveur.stratégie.FedAvg" +#~ msgid "Once that is done, we will compile the file with:" +#~ msgstr "Une fois que c'est fait, nous compilerons le fichier avec :" -#~ msgid "server.strategy.FedAvgM" -#~ msgstr "stratégie.serveur.FedAvgM" +#~ msgid "If it compiles successfully, you should see the following message:" +#~ msgstr "S'il se compile avec succès, tu devrais voir le message suivant :" -#~ msgid "server.strategy.QFedAvg" -#~ msgstr "server.strategy.QFedAvg" +#~ msgid "Serialization and Deserialization Functions" +#~ msgstr "Fonctions de sérialisation et de désérialisation" -#~ msgid "server.strategy.FedOpt" -#~ msgstr "serveur.stratégie.FedOpt" +#~ msgid "" +#~ "Our next step is to add functions" +#~ " to serialize and deserialize Python " +#~ "datatypes to or from our defined " +#~ "RPC message types. You should add " +#~ "these functions in :code:`serde.py`." +#~ msgstr "" +#~ "La prochaine étape consiste à ajouter" +#~ " des fonctions pour sérialiser et " +#~ "désérialiser les types de données Python" +#~ " vers ou à partir des types de" +#~ " messages RPC définis. Tu dois " +#~ "ajouter ces fonctions dans :code:`serde.py`." -#~ msgid "server.strategy.FedProx" -#~ msgstr "serveur.stratégie.FedProx" +#~ msgid "The four functions:" +#~ msgstr "Les quatre fonctions :" -#~ msgid "server.strategy.FedAdagrad" -#~ msgstr "serveur.stratégie.FedAdagrad" +#~ msgid "Sending the Message from the Server" +#~ msgstr "Envoi du message à partir du serveur" -#~ msgid "server.strategy.FedAdam" -#~ msgstr "serveur.stratégie.FedAdam" +#~ msgid "" +#~ "Now write the request function in " +#~ "your Client Proxy class (e.g., " +#~ ":code:`grpc_client_proxy.py`) using the serde " +#~ "functions you just created:" +#~ msgstr "" +#~ "Écris maintenant la fonction de demande" +#~ " dans ta classe Client Proxy (par " +#~ "exemple, :code:`grpc_client_proxy.py`) en utilisant" +#~ " les fonctions serde que tu viens " +#~ "de créer :" -#~ msgid "server.strategy.FedYogi" -#~ msgstr "serveur.stratégie.FedYogi" +#~ msgid "Receiving the Message by the Client" +#~ msgstr "Réception du message par le client" #~ msgid "" -#~ "`achiverram28`, `Adam Narozniak`, `Anass " -#~ "Anhari`, `Charles Beauville`, `Dana-Farber`," -#~ " `Daniel J. Beutel`, `Daniel Nata " -#~ "Nugraha`, `Edoardo Gabrielli`, `eunchung`, " -#~ "`Gustavo Bertoli`, `Heng Pan`, `Javier`, " -#~ "`Mahdi`, `Ruth Galindo`, `Steven Hé " -#~ "(Sīchàng)`, `Taner Topal`" +#~ "Last step! Modify the code in " +#~ ":code:`message_handler.py` to check the field" +#~ " of your message and call the " +#~ ":code:`example_response` function. Remember to " +#~ "use the serde functions!" #~ msgstr "" +#~ "Dernière étape ! Modifie le code " +#~ "dans :code:`message_handler.py` pour vérifier " +#~ "le champ de ton message et appeler" +#~ " la fonction :code:`example_response`. N'oublie" +#~ " pas d'utiliser les fonctions serde !" -#~ msgid "" -#~ "Let's now load the CIFAR-10 training " -#~ "and test set, partition them into " -#~ "ten smaller datasets (each split into" -#~ " training and validation set), and " -#~ "wrap the resulting partitions by " -#~ "creating a PyTorch ``DataLoader`` for " -#~ "each of them:" +#~ msgid "Within the handle function:" +#~ msgstr "Dans le cadre de la fonction de poignée :" + +#~ msgid "And add a new function:" +#~ msgstr "Et ajoute une nouvelle fonction :" + +#~ msgid "Hopefully, when you run your program you will get the intended result!" #~ msgstr "" -#~ "Chargeons maintenant l'ensemble de formation" -#~ " et de test CIFAR-10, partitionnons-" -#~ "les en dix ensembles de données " -#~ "plus petits (chacun divisé en ensemble" -#~ " de formation et de validation), et" -#~ " enveloppons les partitions résultantes en" -#~ " créant un PyTorch ``DataLoader`` pour " -#~ "chacun d'entre eux :" +#~ "Avec un peu de chance, lorsque tu" +#~ " exécuteras ton programme, tu obtiendras" +#~ " le résultat escompté !" #~ msgid "" -#~ "Let's build a horizontal federated " -#~ "learning system using XGBoost and " -#~ "Flower!" +#~ "The simplest way to get started " +#~ "with Flower is by using the " +#~ "pre-made Docker images, which you can" +#~ " find on `Docker Hub " +#~ "`__." #~ msgstr "" -#~ "Construisons un système d'apprentissage fédéré" -#~ " horizontal en utilisant XGBoost et " -#~ "Flower !" #~ msgid "" -#~ "Please refer to the `full code " -#~ "example `_ to learn " -#~ "more." +#~ "If you want to persist the state" +#~ " of the SuperLink on your host " +#~ "system, all you need to do is " +#~ "specify a path where you want to" +#~ " save the file on your host " +#~ "system and a name for the database" +#~ " file. In the example below, we " +#~ "tell Docker via the flag ``--volume``" +#~ " to mount the user's home directory" +#~ " (``~/`` on your host) into the " +#~ "``/app/`` directory of the container. " +#~ "Furthermore, we use the flag " +#~ "``--database`` to specify the name of" +#~ " the database file." #~ msgstr "" -#~ "Réfère-toi à l'exemple de code " -#~ "complet `_ pour en " -#~ "savoir plus." -#~ msgid "|3ff4c820a01d4a5abb022617de537c54|" +#~ msgid "" +#~ "As soon as the SuperLink starts, " +#~ "the file ``state.db`` is created in " +#~ "the user's home directory on your " +#~ "host system. If the file already " +#~ "exists, the SuperLink tries to restore" +#~ " the state from the file. To " +#~ "start the SuperLink with an empty " +#~ "database, simply remove the ``state.db`` " +#~ "file." #~ msgstr "" -#~ msgid "|7f1889391ad448e2a65920165f0d798c|" +#~ msgid "" +#~ "Assuming all files we need are in" +#~ " the local ``certificates`` directory, we" +#~ " can use the flag ``--volume`` to " +#~ "mount the local directory into the " +#~ "``/app/`` directory of the container. " +#~ "This allows the SuperLink to access " +#~ "the files within the container. Finally," +#~ " we pass the names of the " +#~ "certificates to the SuperLink with the" +#~ " ``--certificates`` flag." #~ msgstr "" -#~ msgid "|a171dc4a0d044e70b5d585cc10ace0e0|" +#~ msgid "" +#~ "``--server 192.168.1.100:9092``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Fleet" #~ msgstr "" -#~ msgid "|fe518aa0d86341f7b2fc87bd6e3bbf0c|" +#~ msgid "" +#~ "Assuming the certificate already exists " +#~ "locally, we can use the flag " +#~ "``--volume`` to mount the local " +#~ "certificate into the container's ``/app/`` " +#~ "directory. This allows the SuperNode to" +#~ " access the certificate within the " +#~ "container. Use the ``--certificates`` flag " +#~ "when starting the container." #~ msgstr "" -#~ msgid "|6abfdf0dade44469ae9f08c8dc7d148c|" +#~ msgid "" +#~ "``--server 192.168.1.100:9091``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Driver" #~ msgstr "" -#~ msgid "|b4f147db24bb4da9a786e1d6676a1c2d|" +#~ msgid "" +#~ "Assuming the certificate already exists " +#~ "locally, we can use the flag " +#~ "``--volume`` to mount the local " +#~ "certificate into the container's ``/app/`` " +#~ "directory. This allows the ServerApp to" +#~ " access the certificate within the " +#~ "container. Use the ``--certificates`` flag " +#~ "when starting the container." #~ msgstr "" -#~ msgid "|5c62032f589a457bb37b5fee5b2adbde|" +#~ msgid "" +#~ "If you want to use a different " +#~ "version of Flower, for example Flower" +#~ " nightly, you can do so by " +#~ "changing the tag. All available versions" +#~ " are on `Docker Hub " +#~ "`__." #~ msgstr "" -#~ msgid "|f154df1846dd44f79a94f1dc3ae8b088|" +#~ msgid "" +#~ "Here's another example to start with " +#~ "HTTPS. Use the ``--certificates`` command " +#~ "line argument to pass paths to (CA" +#~ " certificate, server certificate, and " +#~ "server private key)." #~ msgstr "" -#~ msgid "|9d20be8160f7451fb0f33b194506503f|" +#~ msgid ":py:obj:`run_driver_api `\\ \\(\\)" #~ msgstr "" -#~ msgid "|3d949f76988443c59990d2e64f05c386|" -#~ msgstr "" +#~ msgid "Run Flower server (Driver API)." +#~ msgstr "flower-driver-api" -#~ msgid "|526c6d9140f6404f8a226d9056327b3b|" +#~ msgid ":py:obj:`run_fleet_api `\\ \\(\\)" #~ msgstr "" -#~ msgid "|a5f6af14cd7c4550929b17f83b4f63c7|" -#~ msgstr "" +#~ msgid "Run Flower server (Fleet API)." +#~ msgstr "flower-fleet-api" -#~ msgid "|bcd571c4f4ee4803a54f71b5c20448cb|" +#~ msgid "|d8bf04f23d9b46d8a23cc6f4887d7873|" #~ msgstr "" -#~ msgid "|c76452ae1ed84965be7ef23c72b95845|" +#~ msgid "|5aa1711387d74d0f8b9c499e1a51627e|" #~ msgstr "" -#~ msgid "" -#~ "Please follow the first section on " -#~ "`Run Flower using Docker " -#~ "`_ which covers this" -#~ " step in more detail." +#~ msgid "|2bc8e069228d4873804061ff4a95048c|" #~ msgstr "" -#~ msgid "" -#~ "Since `Flower 1.5 `_ we have " -#~ "introduced translations to our doc " -#~ "pages, but, as you might have " -#~ "noticed, the translations are often " -#~ "imperfect. If you speak languages other" -#~ " than English, you might be able " -#~ "to help us in our effort to " -#~ "make Federated Learning accessible to as" -#~ " many people as possible by " -#~ "contributing to those translations! This " -#~ "might also be a great opportunity " -#~ "for those wanting to become open " -#~ "source contributors with little prerequistes." +#~ msgid "|c258488766324dc9a6807f0e7c4fd5f4|" #~ msgstr "" -#~ msgid "" -#~ "You input your translation in the " -#~ "textbox at the top and then, once" -#~ " you are happy with it, you " -#~ "either press ``Save and continue`` (to" -#~ " save the translation and go to " -#~ "the next untranslated string), ``Save " -#~ "and stay`` (to save the translation " -#~ "and stay on the same page), " -#~ "``Suggest`` (to add your translation to" -#~ " suggestions for other users to " -#~ "view), or ``Skip`` (to go to the" -#~ " next untranslated string without saving" -#~ " anything)." +#~ msgid "|d5f962c3f4ec48529efda980868c14b0|" +#~ msgstr "" + +#~ msgid "|a5eccea18d4c43a68b54b65043cabef8|" #~ msgstr "" -#~ msgid "" -#~ "If the section is completely empty " -#~ "(without any token) or non-existant, " -#~ "the changelog will just contain the " -#~ "title of the PR for the changelog" -#~ " entry, without any description." +#~ msgid "|f17662f7df2d42f68cac70a1fdeda8a7|" #~ msgstr "" -#~ msgid "Example: Walk-Through PyTorch & MNIST" -#~ msgstr "Exemple : PyTorch et MNIST" +#~ msgid "|241fc906441a4f038c625a19d30d01b2|" +#~ msgstr "" -#~ msgid "" -#~ "In this tutorial we will learn, " -#~ "how to train a Convolutional Neural " -#~ "Network on MNIST using Flower and " -#~ "PyTorch." +#~ msgid "|0aa5aa05810b44b6a835cecce28f3137|" #~ msgstr "" -#~ "Dans ce tutoriel, nous allons apprendre," -#~ " comment former un réseau neuronal " -#~ "convolutif sur MNIST en utilisant Flower" -#~ " et PyTorch." -#~ msgid "" -#~ "Since we want to use PyTorch to" -#~ " solve a computer vision task, let's" -#~ " go ahead an install PyTorch and " -#~ "the **torchvision** library:" +#~ msgid "|c742940dd4bf4de09d8d0d5e8d179638|" #~ msgstr "" -#~ "Puisque nous voulons utiliser PyTorch " -#~ "pour résoudre une tâche de vision " -#~ "par ordinateur, installons PyTorch et la" -#~ " bibliothèque **torchvision** :" -#~ msgid "Ready... Set... Train!" -#~ msgstr "Prêts... prêts... entraînez-vous !" +#~ msgid "|1f169ab4601a47e1a226f1628f4ebddb|" +#~ msgstr "" -#~ msgid "" -#~ "Now that we have all our " -#~ "dependencies installed, let's run a " -#~ "simple distributed training with two " -#~ "clients and one server. Our training " -#~ "procedure and network architecture are " -#~ "based on PyTorch's `Basic MNIST Example" -#~ " `_. " -#~ "This will allow you see how easy" -#~ " it is to wrap your code with" -#~ " Flower and begin training in a " -#~ "federated way. We provide you with " -#~ "two helper scripts, namely *run-" -#~ "server.sh*, and *run-clients.sh*. Don't " -#~ "be afraid to look inside, they are" -#~ " simple enough =)." +#~ msgid "|12cfa9cde14440ecb8c8f6c1d7185bec|" #~ msgstr "" -#~ "Maintenant que nous avons installé " -#~ "toutes nos dépendances, lançons un " -#~ "simple entraînement distribué avec deux " -#~ "clients et un serveur. Notre procédure" -#~ " d'entraînement et l'architecture de notre" -#~ " réseau sont basées sur l'exemple " -#~ "MNIST de base de PyTorch " -#~ "`_. Cela" -#~ " te permettra de voir à quel " -#~ "point il est facile d'envelopper ton " -#~ "code avec Flower et de commencer " -#~ "l'entraînement de manière fédérée. Nous " -#~ "te fournissons deux scripts d'aide, à" -#~ " savoir *run-server.sh*, et *run-" -#~ "clients.sh*. N'aie pas peur de regarder" -#~ " à l'intérieur, ils sont assez " -#~ "simples =)." -#~ msgid "" -#~ "Go ahead and launch on a terminal" -#~ " the *run-server.sh* script first as" -#~ " follows:" -#~ msgstr "Lance sur un terminal le script *run-server.sh* d'abord comme suit :" +#~ msgid "|72939caf6e294b0986fee6dde96614d7|" +#~ msgstr "" -#~ msgid "Now that the server is up and running, go ahead and launch the clients." -#~ msgstr "Maintenant que le serveur est opérationnel, vas-y et lance les clients." +#~ msgid "|83a8daee45da4a98b8d6f24ae098fc50|" +#~ msgstr "" + +#~ msgid "Edge Client Engine" +#~ msgstr "Moteur client Edge" #~ msgid "" -#~ "Et voilà! You should be seeing the" -#~ " training procedure and, after a few" -#~ " iterations, the test accuracy for " -#~ "each client." +#~ "`Flower `_ core framework " +#~ "architecture with Edge Client Engine" #~ msgstr "" -#~ "Et voilà ! Tu devrais voir la " -#~ "procédure d'entraînement et, après quelques" -#~ " itérations, la précision du test " -#~ "pour chaque client." +#~ "`Flower `_ architecture de " +#~ "base avec Edge Client Engine" -#~ msgid "Now, let's see what is really happening inside." -#~ msgstr "Maintenant, voyons ce qui se passe réellement à l'intérieur." +#~ msgid "Virtual Client Engine" +#~ msgstr "Moteur de client virtuel" #~ msgid "" -#~ "Inside the server helper script *run-" -#~ "server.sh* you will find the following" -#~ " code that basically runs the " -#~ ":code:`server.py`" +#~ "`Flower `_ core framework " +#~ "architecture with Virtual Client Engine" #~ msgstr "" -#~ "Dans le script d'aide au serveur " -#~ "*run-server.sh*, tu trouveras le code " -#~ "suivant qui exécute le fichier " -#~ ":code:`server.py`" +#~ "`Flower `_ architecture de " +#~ "base avec moteur de client virtuel" -#~ msgid "" -#~ "We can go a bit deeper and " -#~ "see that :code:`server.py` simply launches " -#~ "a server that will coordinate three " -#~ "rounds of training. Flower Servers are" -#~ " very customizable, but for simple " -#~ "workloads, we can start a server " -#~ "using the :ref:`start_server ` function and leave " -#~ "all the configuration possibilities at " -#~ "their default values, as seen below." +#~ msgid "Virtual Client Engine and Edge Client Engine in the same workload" #~ msgstr "" -#~ "Nous pouvons aller un peu plus " -#~ "loin et voir que :code:`server.py` lance" -#~ " simplement un serveur qui coordonnera " -#~ "trois tours de formation. Flower Les " -#~ "serveurs sont très personnalisables, mais " -#~ "pour les charges de travail simples, " -#~ "nous pouvons démarrer un serveur à " -#~ "l'aide de la fonction :ref:`start_server " -#~ "` et " -#~ "laisser toutes les possibilités de " -#~ "configuration à leurs valeurs par " -#~ "défaut, comme on peut le voir " -#~ "ci-dessous." +#~ "Moteur client virtuel et moteur client" +#~ " Edge dans la même charge de " +#~ "travail" #~ msgid "" -#~ "Next, let's take a look at the " -#~ "*run-clients.sh* file. You will see " -#~ "that it contains the main loop " -#~ "that starts a set of *clients*." +#~ "`Flower `_ core framework " +#~ "architecture with both Virtual Client " +#~ "Engine and Edge Client Engine" #~ msgstr "" -#~ "Ensuite, jetons un coup d'œil au " -#~ "fichier *run-clients.sh*. Tu verras " -#~ "qu'il contient la boucle principale qui" -#~ " démarre un ensemble de *clients*." +#~ "`Flower `_ architecture de " +#~ "base avec un moteur de client " +#~ "virtuel et un moteur de client " +#~ "périphérique" -#~ msgid "" -#~ "**cid**: is the client ID. It is" -#~ " an integer that uniquely identifies " -#~ "client identifier." +#~ msgid "How to build Docker Flower images locally" #~ msgstr "" -#~ "**cid** : c'est l'identifiant du client." -#~ " C'est un nombre entier qui identifie" -#~ " de façon unique l'identifiant du " -#~ "client." -#~ msgid "**sever_address**: String that identifies IP and port of the server." -#~ msgstr "**sever_address** : Chaîne qui identifie l'IP et le port du serveur." +#~ msgid "Clone the flower repository." +#~ msgstr "**Fourche le dépôt de Flower**" #~ msgid "" -#~ "**nb_clients**: This defines the number " -#~ "of clients being created. This piece " -#~ "of information is not required by " -#~ "the client, but it helps us " -#~ "partition the original MNIST dataset to" -#~ " make sure that every client is " -#~ "working on unique subsets of both " -#~ "*training* and *test* sets." +#~ "Please follow the first section on " +#~ ":doc:`Run Flower using Docker ` which " +#~ "covers this step in more detail." #~ msgstr "" -#~ "**Cette information n'est pas requise " -#~ "par le client, mais elle nous aide" -#~ " à partitionner l'ensemble de données " -#~ "MNIST original pour nous assurer que " -#~ "chaque client travaille sur des sous-" -#~ "ensembles uniques des ensembles *formation*" -#~ " et *test*." + +#~ msgid "``22.04``" +#~ msgstr "1.0.0rc1" + +#~ msgid "``23.0.1``" +#~ msgstr "1.0.0rc1" + +#~ msgid "``69.0.2``" +#~ msgstr "``1.0.0b0``" + +#~ msgid "``1.8.0``" +#~ msgstr "``1.0.0b0``" #~ msgid "" -#~ "Again, we can go deeper and look" -#~ " inside :code:`flwr_example/quickstart-" -#~ "pytorch/client.py`. After going through the" -#~ " argument parsing code at the " -#~ "beginning of our :code:`main` function, " -#~ "you will find a call to " -#~ ":code:`mnist.load_data`. This function is " -#~ "responsible for partitioning the original " -#~ "MNIST datasets (*training* and *test*) " -#~ "and returning a :code:`torch.utils.data.DataLoader`" -#~ " s for each of them. We then" -#~ " instantiate a :code:`PytorchMNISTClient` object" -#~ " with our client ID, our DataLoaders," -#~ " the number of epochs in each " -#~ "round, and which device we want to" -#~ " use for training (CPU or GPU)." +#~ "The following example creates a base " +#~ "Ubuntu/Alpine image with Python 3.11.0, " +#~ "pip 23.0.1, setuptools 69.0.2 and Flower" +#~ " 1.8.0:" #~ msgstr "" -#~ "Encore une fois, nous pouvons aller " -#~ "plus loin et regarder dans " -#~ ":code:`flwr_example/quickstart-pytorch/client.py`. Après" -#~ " avoir parcouru le code d'analyse des" -#~ " arguments au début de notre fonction" -#~ " :code:`main`, tu trouveras un appel " -#~ "à :code:`mnist.load_data`. Cette fonction est" -#~ " responsable du partitionnement des " -#~ "ensembles de données MNIST originaux " -#~ "(*training* et *test*) et renvoie un " -#~ ":code:`torch.utils.data.DataLoader` s pour chacun" -#~ " d'entre eux. Nous instancions ensuite " -#~ "un objet :code:`PytorchMNISTClient` avec notre" -#~ " ID client, nos DataLoaders, le " -#~ "nombre d'époques dans chaque tour et " -#~ "le périphérique que nous voulons " -#~ "utiliser pour l'entraînement (CPU ou " -#~ "GPU)." #~ msgid "" -#~ "The :code:`PytorchMNISTClient` object when " -#~ "finally passed to :code:`fl.client.start_client` " -#~ "along with the server's address as " -#~ "the training process begins." +#~ "The name of image is ``flwr_base`` " +#~ "and the tag ``0.1.0``. Remember that " +#~ "the build arguments as well as the" +#~ " name and tag can be adapted to" +#~ " your needs. These values serve as" +#~ " examples only." #~ msgstr "" -#~ "L'objet :code:`PytorchMNISTClient` est finalement" -#~ " transmis à :code:`fl.client.start_client` avec" -#~ " l'adresse du serveur lorsque le " -#~ "processus de formation commence." -#~ msgid "A Closer Look" -#~ msgstr "Regarder de plus près" +#~ msgid "Building the SuperLink/SuperNode or ServerApp image" +#~ msgstr "Démarrer le serveur" -#~ msgid "" -#~ "Now, let's look closely into the " -#~ ":code:`PytorchMNISTClient` inside :code:`flwr_example" -#~ ".quickstart-pytorch.mnist` and see what it" -#~ " is doing:" +#~ msgid "``1.8.0-py3.10-ubuntu22.04``" #~ msgstr "" -#~ "Maintenant, examinons de près le " -#~ ":code:`PytorchMNISTClient` à l'intérieur du " -#~ ":code:`flwr_example.quickstart-pytorch.mnist` et " -#~ "voyons ce qu'il fait :" #~ msgid "" -#~ "The first thing to notice is that" -#~ " :code:`PytorchMNISTClient` instantiates a CNN" -#~ " model inside its constructor" +#~ "The following example creates a " +#~ "SuperLink/SuperNode or ServerApp image with" +#~ " the official Flower base image:" #~ msgstr "" -#~ "La première chose à remarquer est " -#~ "que :code:`PytorchMNISTClient` instancie un " -#~ "modèle CNN dans son constructeur" #~ msgid "" -#~ "The code for the CNN is available" -#~ " under :code:`quickstart-pytorch.mnist` and " -#~ "it is reproduced below. It is the" -#~ " same network found in `Basic MNIST" -#~ " Example " -#~ "`_." +#~ "If you want to use your own " +#~ "base image instead of the official " +#~ "Flower base image, all you need to" +#~ " do is set the ``BASE_REPOSITORY`` " +#~ "build argument." #~ msgstr "" -#~ "Le code du CNN est disponible sous" -#~ " :code:`quickstart-pytorch.mnist` et il est" -#~ " reproduit ci-dessous. Il s'agit du" -#~ " même réseau que celui que l'on " -#~ "trouve dans `Exemple basique de MNIST" -#~ " `_." -#~ msgid "" -#~ "The second thing to notice is that" -#~ " :code:`PytorchMNISTClient` class inherits from" -#~ " the :code:`fl.client.Client`, and hence it" -#~ " must implement the following methods:" -#~ msgstr "" -#~ "La deuxième chose à noter est que" -#~ " la classe :code:`PytorchMNISTClient` hérite " -#~ "de :code:`fl.client.Client`, et qu'elle doit" -#~ " donc implémenter les méthodes suivantes" -#~ " :" +#~ msgid "Trigger the CI for building the Docker images." +#~ msgstr "Démarrer le serveur" -#~ msgid "" -#~ "When comparing the abstract class to " -#~ "its derived class :code:`PytorchMNISTClient` " -#~ "you will notice that :code:`fit` calls" -#~ " a :code:`train` function and that " -#~ ":code:`evaluate` calls a :code:`test`: " -#~ "function." +#~ msgid "" +#~ "To trigger the workflow, a collaborator" +#~ " must create a ``workflow_dispatch`` event" +#~ " in the GitHub CI. This can be" +#~ " done either through the UI or " +#~ "via the GitHub CLI. The event " +#~ "requires only one input, the Flower " +#~ "version, to be released." #~ msgstr "" -#~ "En comparant la classe abstraite à " -#~ "sa classe dérivée :code:`PytorchMNISTClient`, " -#~ "tu remarqueras que :code:`fit` appelle " -#~ "une fonction :code:`train` et que " -#~ ":code:`evaluate` appelle une fonction " -#~ ":code:`test` :." + +#~ msgid "**Via the UI**" +#~ msgstr "**Review the PR**" #~ msgid "" -#~ "These functions can both be found " -#~ "inside the same :code:`quickstart-" -#~ "pytorch.mnist` module:" +#~ "Go to the ``Build docker images`` " +#~ "workflow `page " +#~ "`_." #~ msgstr "" -#~ "Ces fonctions se trouvent toutes deux" -#~ " dans le même module :code:`quickstart-" -#~ "pytorch.mnist` :" #~ msgid "" -#~ "Observe that these functions encapsulate " -#~ "regular training and test loops and " -#~ "provide :code:`fit` and :code:`evaluate` with" -#~ " final statistics for each round. You" -#~ " could substitute them with your " -#~ "custom train and test loops and " -#~ "change the network architecture, and the" -#~ " entire example would still work " -#~ "flawlessly. As a matter of fact, " -#~ "why not try and modify the code" -#~ " to an example of your liking?" +#~ "Click on the ``Run workflow`` button " +#~ "and type the new version of Flower" +#~ " in the ``Version of Flower`` input" +#~ " field." #~ msgstr "" -#~ "Observe que ces fonctions encapsulent " -#~ "les boucles d'entraînement et de test" -#~ " habituelles et fournissent à :code:`fit`" -#~ " et :code:`evaluate` les statistiques " -#~ "finales pour chaque tour. Tu pourrais" -#~ " les remplacer par tes boucles " -#~ "d'entraînement et de test personnalisées " -#~ "et changer l'architecture du réseau, et" -#~ " l'ensemble de l'exemple fonctionnerait " -#~ "toujours parfaitement. En fait, pourquoi " -#~ "ne pas essayer de modifier le code" -#~ " pour en faire un exemple qui " -#~ "te plairait ?" -#~ msgid "Give It a Try" -#~ msgstr "Fais un essai" +#~ msgid "Click on the **green** ``Run workflow`` button." +#~ msgstr "" -#~ msgid "" -#~ "Looking through the quickstart code " -#~ "description above will have given a " -#~ "good understanding of how *clients* and" -#~ " *servers* work in Flower, how to " -#~ "run a simple experiment, and the " -#~ "internals of a client wrapper. Here " -#~ "are a few things you could try " -#~ "on your own and get more " -#~ "experience with Flower:" +#~ msgid "**Via the GitHub CI**" #~ msgstr "" -#~ "En parcourant la description du code " -#~ "de démarrage rapide ci-dessus, tu " -#~ "auras acquis une bonne compréhension du" -#~ " fonctionnement des *clients* et des " -#~ "*serveurs* dans Flower, de l'exécution " -#~ "d'une expérience simple et de la " -#~ "structure interne d'un wrapper client. " -#~ "Voici quelques exemples que tu peux " -#~ "essayer par toi-même pour acquérir " -#~ "plus d'expérience avec Flower :" #~ msgid "" -#~ "Try and change :code:`PytorchMNISTClient` so" -#~ " it can accept different architectures." +#~ "Make sure you are logged in via" +#~ " ``gh auth login`` and that the " +#~ "current working directory is the root" +#~ " of the Flower repository." #~ msgstr "" -#~ "Essaie de modifier :code:`PytorchMNISTClient` " -#~ "pour qu'il puisse accepter différentes " -#~ "architectures." #~ msgid "" -#~ "Modify the :code:`train` function so " -#~ "that it accepts different optimizers" +#~ "Trigger the workflow via ``gh workflow" +#~ " run docker-images.yml -f flwr-" +#~ "version=``." +#~ msgstr "" + +#~ msgid "Preliminarities" #~ msgstr "" -#~ "Modifie la fonction :code:`train` pour " -#~ "qu'elle accepte différents optimiseurs" + +#~ msgid "Example: JAX - Run JAX Federated" +#~ msgstr "Exemple : JAX - Exécuter JAX Federated" #~ msgid "" -#~ "Modify the :code:`test` function so that" -#~ " it proves not only the top-1 " -#~ "(regular accuracy) but also the top-5" -#~ " accuracy?" +#~ "\\small\n" +#~ "P[M(D_{1} \\in A)] \\leq e^{\\delta} P[M(D_{2} \\in A)] + \\delta" +#~ msgstr "" + +#~ msgid ":doc:`How to run Flower using Docker `" #~ msgstr "" -#~ "Modifie la fonction :code:`test` pour " -#~ "qu'elle prouve non seulement le top-1" -#~ " (précision normale) mais aussi le " -#~ "top-5 ?" #~ msgid "" -#~ "Go larger! Try to adapt the code" -#~ " to larger images and datasets. Why" -#~ " not try training on ImageNet with" -#~ " a ResNet-50?" +#~ "The simplest way to get started " +#~ "with Flower is by using the " +#~ "pre-made Docker images, which you can" +#~ " find on `Docker Hub " +#~ "`__. Supported " +#~ "architectures include ``amd64`` and " +#~ "``arm64v8``." #~ msgstr "" -#~ "Essaie d'adapter le code à des " -#~ "images et à des ensembles de " -#~ "données plus grands. Pourquoi ne pas " -#~ "essayer de s'entraîner sur ImageNet avec" -#~ " un ResNet-50 ?" -#~ msgid "You are ready now. Enjoy learning in a federated way!" -#~ msgstr "Tu es prêt maintenant. Profite de l'apprentissage de manière fédérée !" +#~ msgid "Before you start, make sure that the Docker daemon is running:" +#~ msgstr "" #~ msgid "" -#~ "Flower provides differential privacy (DP) " -#~ "wrapper classes for the easy integration" -#~ " of the central DP guarantees " -#~ "provided by DP-FedAvg into training " -#~ "pipelines defined in any of the " -#~ "various ML frameworks that Flower is " -#~ "compatible with." +#~ "If you do not see the version " +#~ "of Docker but instead get an error" +#~ " saying that the command was not " +#~ "found, you will need to install " +#~ "Docker first. You can find installation" +#~ " instruction `here `_." #~ msgstr "" -#~ "Flower fournit des classes d'enveloppe " -#~ "de confidentialité différentielle (DP) pour" -#~ " l'intégration facile des garanties " -#~ "centrales de DP fournies par DP-" -#~ "FedAvg dans les pipelines de formation" -#~ " définis dans n'importe lequel des " -#~ "divers cadres de ML avec lesquels " -#~ "Flower est compatible." #~ msgid "" -#~ "Please note that these components are" -#~ " still experimental; the correct " -#~ "configuration of DP for a specific " -#~ "task is still an unsolved problem." +#~ "On Linux, Docker commands require " +#~ "``sudo`` privilege. If you want to " +#~ "avoid using ``sudo``, you can follow " +#~ "the `Post-installation steps " +#~ "`_" +#~ " on the official Docker website." #~ msgstr "" -#~ "Note que ces composants sont encore " -#~ "expérimentaux, la configuration correcte du" -#~ " DP pour une tâche spécifique est " -#~ "encore un problème non résolu." #~ msgid "" -#~ "The name DP-FedAvg is misleading " -#~ "since it can be applied on top " -#~ "of any FL algorithm that conforms " -#~ "to the general structure prescribed by" -#~ " the FedOpt family of algorithms." +#~ "To ensure optimal performance and " +#~ "compatibility, the SuperLink, SuperNode and" +#~ " ServerApp image must have the same" +#~ " version when running together. This " +#~ "guarantees seamless integration and avoids " +#~ "potential conflicts or issues that may" +#~ " arise from using different versions." #~ msgstr "" -#~ "Le nom DP-FedAvg est trompeur car" -#~ " il peut être appliqué à n'importe" -#~ " quel algorithme FL qui se conforme" -#~ " à la structure générale prescrite " -#~ "par la famille d'algorithmes FedOpt." -#~ msgid "DP-FedAvg" -#~ msgstr "DP-FedAvg" +#~ msgid "Flower SuperLink" +#~ msgstr "flower-superlink" + +#~ msgid "Quickstart" +#~ msgstr "Démarrage rapide de JAX" + +#~ msgid "If you're looking to try out Flower, you can use the following command:" +#~ msgstr "" #~ msgid "" -#~ "DP-FedAvg, originally proposed by " -#~ "McMahan et al. [mcmahan]_ and extended" -#~ " by Andrew et al. [andrew]_, is " -#~ "essentially FedAvg with the following " -#~ "modifications." +#~ "The command pulls the Docker image " +#~ "with the tag ``1.8.0`` from Docker " +#~ "Hub. The tag specifies the Flower " +#~ "version. In this case, Flower 1.8.0. " +#~ "The ``--rm`` flag tells Docker to " +#~ "remove the container after it exits." #~ msgstr "" -#~ "DP-FedAvg, proposé à l'origine par " -#~ "McMahan et al. [mcmahan]_ et étendu " -#~ "par Andrew et al. [andrew]_, est " -#~ "essentiellement FedAvg avec les modifications" -#~ " suivantes." #~ msgid "" -#~ "**Clipping** : The influence of each " -#~ "client's update is bounded by clipping" -#~ " it. This is achieved by enforcing" -#~ " a cap on the L2 norm of " -#~ "the update, scaling it down if " -#~ "needed." +#~ "By default, the Flower SuperLink keeps" +#~ " state in-memory. When using the " +#~ "Docker flag ``--rm``, the state is " +#~ "not persisted between container starts. " +#~ "We will show below how to save " +#~ "the state in a file on your " +#~ "host system." #~ msgstr "" -#~ "**Clipping** : L'influence de la mise" -#~ " à jour de chaque client est " -#~ "limitée en l'écrêtant. Ceci est réalisé" -#~ " en imposant un plafond à la " -#~ "norme L2 de la mise à jour, " -#~ "en la réduisant si nécessaire." #~ msgid "" -#~ "**Noising** : Gaussian noise, calibrated " -#~ "to the clipping threshold, is added " -#~ "to the average computed at the " -#~ "server." +#~ "The ``-p :`` flag tells " +#~ "Docker to map the ports " +#~ "``9091``/``9092`` of the host to " +#~ "``9091``/``9092`` of the container, allowing" +#~ " you to access the Driver API " +#~ "on ``http://localhost:9091`` and the Fleet " +#~ "API on ``http://localhost:9092``. Lastly, any" +#~ " flag that comes after the tag " +#~ "is passed to the Flower SuperLink. " +#~ "Here, we are passing the flag " +#~ "``--insecure``." #~ msgstr "" -#~ "**Bruit** : un bruit gaussien, calibré" -#~ " sur le seuil d'écrêtage, est ajouté" -#~ " à la moyenne calculée au niveau " -#~ "du serveur." #~ msgid "" -#~ "The distribution of the update norm " -#~ "has been shown to vary from " -#~ "task-to-task and to evolve as " -#~ "training progresses. This variability is " -#~ "crucial in understanding its impact on" -#~ " differential privacy guarantees, emphasizing " -#~ "the need for an adaptive approach " -#~ "[andrew]_ that continuously adjusts the " -#~ "clipping threshold to track a " -#~ "prespecified quantile of the update norm" -#~ " distribution." +#~ "The ``--insecure`` flag enables insecure " +#~ "communication (using HTTP, not HTTPS) " +#~ "and should only be used for " +#~ "testing purposes. We strongly recommend " +#~ "enabling `SSL `__ when " +#~ "deploying to a production environment." #~ msgstr "" -#~ "Il a été démontré que la " -#~ "distribution de la norme de mise à" -#~ " jour varie d'une tâche à l'autre " -#~ "et évolue au fur et à mesure " -#~ "de la formation. C'est pourquoi nous " -#~ "utilisons une approche adaptative [andrew]_" -#~ " qui ajuste continuellement le seuil " -#~ "d'écrêtage pour suivre un quantile " -#~ "prédéfini de la distribution de la " -#~ "norme de mise à jour." -#~ msgid "Simplifying Assumptions" -#~ msgstr "Simplifier les hypothèses" +#~ msgid "" +#~ "You can use ``--help`` to view all" +#~ " available flags that the SuperLink " +#~ "supports:" +#~ msgstr "" + +#~ msgid "Mounting a volume to store the state on the host system" +#~ msgstr "" #~ msgid "" -#~ "We make (and attempt to enforce) a" -#~ " number of assumptions that must be" -#~ " satisfied to ensure that the " -#~ "training process actually realizes the " -#~ ":math:`(\\epsilon, \\delta)` guarantees the " -#~ "user has in mind when configuring " -#~ "the setup." +#~ "If you want to persist the state" +#~ " of the SuperLink on your host " +#~ "system, all you need to do is " +#~ "specify a directory where you want " +#~ "to save the file on your host " +#~ "system and a name for the database" +#~ " file. By default, the SuperLink " +#~ "container runs with a non-root " +#~ "user called ``app`` with the user " +#~ "ID ``49999``. It is recommended to " +#~ "create new directory and change the " +#~ "user ID of the directory to " +#~ "``49999`` to ensure the mounted " +#~ "directory has the proper permissions. If" +#~ " you later want to delete the " +#~ "directory, you can change the user " +#~ "ID back to the current user ID " +#~ "by running ``sudo chown -R $USER:$(id" +#~ " -gn) state``." +#~ msgstr "" + +#~ msgid "" +#~ "In the example below, we create a" +#~ " new directory, change the user ID" +#~ " and tell Docker via the flag " +#~ "``--volume`` to mount the local " +#~ "``state`` directory into the ``/app/state``" +#~ " directory of the container. Furthermore," +#~ " we use the flag ``--database`` to" +#~ " specify the name of the database " +#~ "file." +#~ msgstr "" + +#~ msgid "" +#~ "As soon as the SuperLink starts, " +#~ "the file ``state.db`` is created in " +#~ "the ``state`` directory on your host " +#~ "system. If the file already exists, " +#~ "the SuperLink tries to restore the " +#~ "state from the file. To start the" +#~ " SuperLink with an empty database, " +#~ "simply remove the ``state.db`` file." #~ msgstr "" -#~ "Nous formulons (et tentons d'appliquer) " -#~ "un certain nombre d'hypothèses qui " -#~ "doivent être satisfaites pour que le " -#~ "processus de formation réalise réellement " -#~ "les garanties :math:`(\\epsilon, \\delta)` que" -#~ " l'utilisateur a à l'esprit lorsqu'il " -#~ "configure l'installation." #~ msgid "" -#~ "**Fixed-size subsampling** :Fixed-size " -#~ "subsamples of the clients must be " -#~ "taken at each round, as opposed to" -#~ " variable-sized Poisson subsamples." +#~ "To enable SSL, you will need a " +#~ "PEM-encoded root certificate, a PEM-" +#~ "encoded private key and a PEM-" +#~ "encoded certificate chain." +#~ msgstr "" + +#~ msgid "" +#~ "Assuming all files we need are in" +#~ " the local ``certificates`` directory, we" +#~ " can use the flag ``--volume`` to " +#~ "mount the local directory into the " +#~ "``/app/certificates/`` directory of the " +#~ "container. This allows the SuperLink to" +#~ " access the files within the " +#~ "container. The ``ro`` stands for " +#~ "``read-only``. Docker volumes default to" +#~ " ``read-write``; that option tells " +#~ "Docker to make the volume ``read-" +#~ "only`` instead. Finally, we pass the " +#~ "names of the certificates and key " +#~ "file to the SuperLink with the " +#~ "``--ssl-ca-certfile``, ``--ssl-certfile`` " +#~ "and ``--ssl-keyfile`` flag." #~ msgstr "" -#~ "**Sous-échantillonnage de taille fixe** " -#~ ":Des sous-échantillons de taille fixe" -#~ " des clients doivent être prélevés à" -#~ " chaque tour, par opposition aux " -#~ "sous-échantillons de Poisson de taille " -#~ "variable." #~ msgid "" -#~ "**Unweighted averaging** : The contributions" -#~ " from all the clients must weighted" -#~ " equally in the aggregate to " -#~ "eliminate the requirement for the server" -#~ " to know in advance the sum of" -#~ " the weights of all clients available" -#~ " for selection." +#~ "Because Flower containers, by default, " +#~ "run with a non-root user ``app``," +#~ " the mounted files and directories " +#~ "must have the proper permissions for " +#~ "the user ID ``49999``. For example, " +#~ "to change the user ID of all " +#~ "files in the ``certificates/`` directory, " +#~ "you can run ``sudo chown -R " +#~ "49999:49999 certificates/*``." #~ msgstr "" -#~ "**Moyenne non pondérée** : Les " -#~ "contributions de tous les clients " -#~ "doivent être pondérées de façon égale" -#~ " dans l'ensemble afin que le serveur" -#~ " n'ait pas à connaître à l'avance " -#~ "la somme des poids de tous les " -#~ "clients disponibles pour la sélection." #~ msgid "" -#~ "**No client failures** : The set " -#~ "of available clients must stay constant" -#~ " across all rounds of training. In" -#~ " other words, clients cannot drop out" -#~ " or fail." +#~ "The SuperNode Docker image comes with" +#~ " a pre-installed version of Flower" +#~ " and serves as a base for " +#~ "building your own SuperNode image." #~ msgstr "" -#~ "**Aucune défaillance de client** : " -#~ "L'ensemble des clients disponibles doit " -#~ "rester constant pendant toutes les " -#~ "séries de formation. En d'autres termes," -#~ " les clients ne peuvent pas " -#~ "abandonner ou échouer." #~ msgid "" -#~ "The first two are useful for " -#~ "eliminating a multitude of complications " -#~ "associated with calibrating the noise to" -#~ " the clipping threshold, while the " -#~ "third one is required to comply " -#~ "with the assumptions of the privacy " -#~ "analysis." +#~ "The SuperNode Docker image currently " +#~ "works only with the 1.9.0-nightly " +#~ "release. A stable version will be " +#~ "available when Flower 1.9.0 (stable) " +#~ "gets released (ETA: May). A SuperNode" +#~ " nightly image must be paired with" +#~ " the corresponding SuperLink and ServerApp" +#~ " nightly images released on the same" +#~ " day. To ensure the versions are " +#~ "in sync, using the concrete tag, " +#~ "e.g., ``1.9.0.dev20240501`` instead of " +#~ "``nightly`` is recommended." #~ msgstr "" -#~ "Les deux premiers sont utiles pour " -#~ "éliminer une multitude de complications " -#~ "liées au calibrage du bruit en " -#~ "fonction du seuil d'écrêtage, tandis que" -#~ " le troisième est nécessaire pour se" -#~ " conformer aux hypothèses de l'analyse " -#~ "de la vie privée." #~ msgid "" -#~ "These restrictions are in line with " -#~ "constraints imposed by Andrew et al. " -#~ "[andrew]_." +#~ "We will use the ``quickstart-pytorch``" +#~ " example, which you can find in " +#~ "the Flower repository, to illustrate how" +#~ " you can dockerize your ClientApp." #~ msgstr "" -#~ "Ces restrictions sont conformes aux " -#~ "contraintes imposées par Andrew et al." -#~ " [andrew]_." - -#~ msgid "Customizable Responsibility for Noise injection" -#~ msgstr "Responsabilité personnalisable pour l'injection de bruit" #~ msgid "" -#~ "In contrast to other implementations " -#~ "where the addition of noise is " -#~ "performed at the server, you can " -#~ "configure the site of noise injection" -#~ " to better match your threat model." -#~ " We provide users with the " -#~ "flexibility to set up the training " -#~ "such that each client independently adds" -#~ " a small amount of noise to the" -#~ " clipped update, with the result that" -#~ " simply aggregating the noisy updates " -#~ "is equivalent to the explicit addition" -#~ " of noise to the non-noisy " -#~ "aggregate at the server." +#~ "Before we can start, we need to" +#~ " meet a few prerequisites in our " +#~ "local development environment. You can " +#~ "skip the first part if you want" +#~ " to run your ClientApp instead of " +#~ "the ``quickstart-pytorch`` example." +#~ msgstr "" + +#~ msgid "Creating a SuperNode Dockerfile" +#~ msgstr "" + +#~ msgid "Let's assume the following project layout:" #~ msgstr "" -#~ "Contrairement à d'autres implémentations où" -#~ " l'ajout de bruit est effectué au " -#~ "niveau du serveur, tu peux configurer" -#~ " le site d'injection de bruit pour" -#~ " qu'il corresponde mieux à ton modèle" -#~ " de menace. Nous offrons aux " -#~ "utilisateurs la possibilité de configurer " -#~ "l'entraînement de telle sorte que chaque" -#~ " client ajoute indépendamment une petite" -#~ " quantité de bruit à la mise à" -#~ " jour écrêtée, ce qui fait que " -#~ "le simple fait d'agréger les mises " -#~ "à jour bruyantes équivaut à l'ajout " -#~ "explicite de bruit à l'agrégat non " -#~ "bruyant au niveau du serveur." #~ msgid "" -#~ "To be precise, if we let :math:`m`" -#~ " be the number of clients sampled " -#~ "each round and :math:`\\sigma_\\Delta` be " -#~ "the scale of the total Gaussian " -#~ "noise that needs to be added to" -#~ " the sum of the model updates, " -#~ "we can use simple maths to show" -#~ " that this is equivalent to each " -#~ "client adding noise with scale " -#~ ":math:`\\sigma_\\Delta/\\sqrt{m}`." +#~ "First, we need to create a " +#~ "``requirements.txt`` file in the directory " +#~ "where the ``ClientApp`` code is located." +#~ " In the file, we list all the" +#~ " dependencies that the ClientApp requires." #~ msgstr "" -#~ "Pour être précis, si nous laissons " -#~ ":math:`m` être le nombre de clients " -#~ "échantillonnés à chaque tour et " -#~ ":math:\\sigma_\\Delta` être l'échelle du bruit" -#~ " gaussien total qui doit être ajouté" -#~ " à la somme des mises à jour" -#~ " du modèle, nous pouvons utiliser des" -#~ " mathématiques simples pour montrer que " -#~ "cela équivaut à ce que chaque " -#~ "client ajoute du bruit avec l'échelle" -#~ " :math:\\sigma_\\Delta/\\sqrt{m}`." -#~ msgid "Wrapper-based approach" -#~ msgstr "Approche basée sur l'enveloppe" +#~ msgid "" +#~ "Note that `flwr `__" +#~ " is already installed in the " +#~ "``flwr/supernode`` base image, so you " +#~ "only need to include other package " +#~ "dependencies in your ``requirements.txt``, " +#~ "such as ``torch``, ``tensorflow``, etc." +#~ msgstr "" #~ msgid "" -#~ "Introducing DP to an existing workload" -#~ " can be thought of as adding an" -#~ " extra layer of security around it." -#~ " This inspired us to provide the " -#~ "additional server and client-side logic" -#~ " needed to make the training process" -#~ " differentially private as wrappers for " -#~ "instances of the :code:`Strategy` and " -#~ ":code:`NumPyClient` abstract classes respectively." -#~ " This wrapper-based approach has the" -#~ " advantage of being easily composable " -#~ "with other wrappers that someone might" -#~ " contribute to the Flower library in" -#~ " the future, e.g., for secure " -#~ "aggregation. Using Inheritance instead can " -#~ "be tedious because that would require" -#~ " the creation of new sub- classes " -#~ "every time a new class implementing " -#~ ":code:`Strategy` or :code:`NumPyClient` is " -#~ "defined." +#~ "Next, we create a Dockerfile. If " +#~ "you use the ``quickstart-pytorch`` " +#~ "example, create a new file called " +#~ "``Dockerfile.supernode`` in ``examples/quickstart-" +#~ "pytorch``." #~ msgstr "" -#~ "L'introduction du DP dans une charge " -#~ "de travail existante peut être " -#~ "considérée comme l'ajout d'une couche de" -#~ " sécurité supplémentaire autour d'elle. " -#~ "Cela nous a incités à fournir la" -#~ " logique supplémentaire côté serveur et " -#~ "côté client nécessaire pour rendre le" -#~ " processus de formation différentiellement " -#~ "privé en tant qu'enveloppes pour les " -#~ "instances des classes abstraites " -#~ ":code:`Strategy` et :code:`NumPyClient` " -#~ "respectivement. Cette approche basée sur " -#~ "l'enveloppe a l'avantage d'être facilement " -#~ "composable avec d'autres enveloppes que " -#~ "quelqu'un pourrait contribuer à la " -#~ "bibliothèque Flower à l'avenir, par " -#~ "exemple, pour l'agrégation sécurisée. " -#~ "L'utilisation de l'héritage à la place" -#~ " peut être fastidieuse car cela " -#~ "nécessiterait la création de nouvelles " -#~ "sous-classes chaque fois qu'une nouvelle" -#~ " classe mettant en œuvre :code:`Strategy`" -#~ " ou :code:`NumPyClient` est définie." #~ msgid "" -#~ "The first version of our solution " -#~ "was to define a decorator whose " -#~ "constructor accepted, among other things, " -#~ "a boolean-valued variable indicating " -#~ "whether adaptive clipping was to be " -#~ "enabled or not. We quickly realized " -#~ "that this would clutter its " -#~ ":code:`__init__()` function with variables " -#~ "corresponding to hyperparameters of adaptive" -#~ " clipping that would remain unused " -#~ "when it was disabled. A cleaner " -#~ "implementation could be achieved by " -#~ "splitting the functionality into two " -#~ "decorators, :code:`DPFedAvgFixed` and " -#~ ":code:`DPFedAvgAdaptive`, with the latter sub-" -#~ " classing the former. The constructors " -#~ "for both classes accept a boolean " -#~ "parameter :code:`server_side_noising`, which, as " -#~ "the name suggests, determines where " -#~ "noising is to be performed." +#~ "The ``Dockerfile.supernode`` contains the " +#~ "instructions that assemble the SuperNode " +#~ "image." #~ msgstr "" -#~ "La première version de notre solution" -#~ " consistait à définir un décorateur " -#~ "dont le constructeur acceptait, entre " -#~ "autres, une variable à valeur booléenne" -#~ " indiquant si l'écrêtage adaptatif devait" -#~ " être activé ou non. Nous nous " -#~ "sommes rapidement rendu compte que cela" -#~ " encombrerait sa fonction :code:`__init__()` " -#~ "avec des variables correspondant aux " -#~ "hyperparamètres de l'écrêtage adaptatif qui" -#~ " resteraient inutilisées lorsque celui-ci" -#~ " était désactivé. Une implémentation plus" -#~ " propre pourrait être obtenue en " -#~ "divisant la fonctionnalité en deux " -#~ "décorateurs, :code:`DPFedAvgFixed` et " -#~ ":code:`DPFedAvgAdaptive`, le second sous-" -#~ "classant le premier. Les constructeurs " -#~ "des deux classes acceptent un paramètre" -#~ " booléen :code:`server_side_noising` qui, comme" -#~ " son nom l'indique, détermine l'endroit " -#~ "où le noising doit être effectué." #~ msgid "" -#~ "The server-side capabilities required " -#~ "for the original version of DP-" -#~ "FedAvg, i.e., the one which performed" -#~ " fixed clipping, can be completely " -#~ "captured with the help of wrapper " -#~ "logic for just the following two " -#~ "methods of the :code:`Strategy` abstract " -#~ "class." +#~ "In the first two lines, we " +#~ "instruct Docker to use the SuperNode " +#~ "image tagged ``nightly`` as a base " +#~ "image and set our working directory " +#~ "to ``/app``. The following instructions " +#~ "will now be executed in the " +#~ "``/app`` directory. Next, we install the" +#~ " ClientApp dependencies by copying the " +#~ "``requirements.txt`` file into the image " +#~ "and run ``pip install``. In the " +#~ "last two lines, we copy the " +#~ "``client.py`` module into the image and" +#~ " set the entry point to ``flower-" +#~ "client-app`` with the argument " +#~ "``client:app``. The argument is the " +#~ "object reference of the ClientApp " +#~ "(``:``) that will be run" +#~ " inside the ClientApp." #~ msgstr "" -#~ "Les capacités côté serveur requises pour" -#~ " la version originale de DP-FedAvg," -#~ " c'est-à-dire celle qui effectue un " -#~ "écrêtage fixe, peuvent être entièrement " -#~ "capturées à l'aide d'une logique " -#~ "d'enveloppement pour les deux méthodes " -#~ "suivantes de la classe abstraite " -#~ ":code:`Strategy`." + +#~ msgid "Building the SuperNode Docker image" +#~ msgstr "Démarrer le serveur" #~ msgid "" -#~ ":code:`configure_fit()` : The config " -#~ "dictionary being sent by the wrapped " -#~ ":code:`Strategy` to each client needs to" -#~ " be augmented with an additional " -#~ "value equal to the clipping threshold" -#~ " (keyed under :code:`dpfedavg_clip_norm`) and," -#~ " if :code:`server_side_noising=true`, another one" -#~ " equal to the scale of the " -#~ "Gaussian noise that needs to be " -#~ "added at the client (keyed under " -#~ ":code:`dpfedavg_noise_stddev`). This entails " -#~ "*post*-processing of the results returned " -#~ "by the wrappee's implementation of " -#~ ":code:`configure_fit()`." +#~ "Next, we build the SuperNode Docker " +#~ "image by running the following command" +#~ " in the directory where Dockerfile " +#~ "and ClientApp code are located." #~ msgstr "" -#~ ":code:`configure_fit()` : Le dictionnaire de" -#~ " configuration envoyé par la " -#~ ":code:`Strategy` enveloppée à chaque client" -#~ " doit être augmenté d'une valeur " -#~ "supplémentaire égale au seuil d'écrêtage " -#~ "(indiqué sous :code:`dpfedavg_clip_norm`) et, " -#~ "si :code:`server_side_noising=true`, d'une autre " -#~ "égale à l'échelle du bruit gaussien " -#~ "qui doit être ajouté au client " -#~ "(indiqué sous :code:`dpfedavg_noise_stddev`)." #~ msgid "" -#~ ":code:`aggregate_fit()`: We check whether any" -#~ " of the sampled clients dropped out" -#~ " or failed to upload an update " -#~ "before the round timed out. In " -#~ "that case, we need to abort the" -#~ " current round, discarding any successful" -#~ " updates that were received, and move" -#~ " on to the next one. On the " -#~ "other hand, if all clients responded " -#~ "successfully, we must force the " -#~ "averaging of the updates to happen " -#~ "in an unweighted manner by intercepting" -#~ " the :code:`parameters` field of " -#~ ":code:`FitRes` for each received update " -#~ "and setting it to 1. Furthermore, " -#~ "if :code:`server_side_noising=true`, each update " -#~ "is perturbed with an amount of " -#~ "noise equal to what it would have" -#~ " been subjected to had client-side" -#~ " noising being enabled. This entails " -#~ "*pre*-processing of the arguments to " -#~ "this method before passing them on " -#~ "to the wrappee's implementation of " -#~ ":code:`aggregate_fit()`." +#~ "We gave the image the name " +#~ "``flwr_supernode``, and the tag ``0.0.1``. " +#~ "Remember that the here chosen values " +#~ "only serve as an example. You can" +#~ " change them to your needs." +#~ msgstr "" + +#~ msgid "Now that we have built the SuperNode image, we can finally run it." +#~ msgstr "" + +#~ msgid "Let's break down each part of this command:" +#~ msgstr "" + +#~ msgid "``docker run``: This is the command to run a new Docker container." #~ msgstr "" -#~ ":code:`aggregate_fit()`: We check whether any" -#~ " of the sampled clients dropped out" -#~ " or failed to upload an update " -#~ "before the round timed out. In " -#~ "that case, we need to abort the" -#~ " current round, discarding any successful" -#~ " updates that were received, and move" -#~ " on to the next one. On the " -#~ "other hand, if all clients responded " -#~ "successfully, we must force the " -#~ "averaging of the updates to happen " -#~ "in an unweighted manner by intercepting" -#~ " the :code:`parameters` field of " -#~ ":code:`FitRes` for each received update " -#~ "and setting it to 1. Furthermore, " -#~ "if :code:`server_side_noising=true`, each update " -#~ "is perturbed with an amount of " -#~ "noise equal to what it would have" -#~ " been subjected to had client-side" -#~ " noising being enabled. This entails " -#~ "*pre*-processing of the arguments to " -#~ "this method before passing them on " -#~ "to the wrappee's implementation of " -#~ ":code:`aggregate_fit()`." #~ msgid "" -#~ "We can't directly change the aggregation" -#~ " function of the wrapped strategy to" -#~ " force it to add noise to the" -#~ " aggregate, hence we simulate client-" -#~ "side noising to implement server-side" -#~ " noising." +#~ "``--rm``: This option specifies that the" +#~ " container should be automatically removed" +#~ " when it stops." +#~ msgstr "" + +#~ msgid "``flwr_supernode:0.0.1``: The name the tag of the Docker image to use." +#~ msgstr "" + +#~ msgid "``--insecure``: This option enables insecure communication." #~ msgstr "" -#~ "Nous ne pouvons pas modifier directement" -#~ " la fonction d'agrégation de la " -#~ "stratégie enveloppée pour la forcer à" -#~ " ajouter du bruit à l'agrégat, c'est" -#~ " pourquoi nous simulons le bruit côté" -#~ " client pour mettre en œuvre le " -#~ "bruit côté serveur." #~ msgid "" -#~ "These changes have been put together " -#~ "into a class called :code:`DPFedAvgFixed`, " -#~ "whose constructor accepts the strategy " -#~ "being decorated, the clipping threshold " -#~ "and the number of clients sampled " -#~ "every round as compulsory arguments. The" -#~ " user is expected to specify the " -#~ "clipping threshold since the order of" -#~ " magnitude of the update norms is " -#~ "highly dependent on the model being " -#~ "trained and providing a default value" -#~ " would be misleading. The number of" -#~ " clients sampled at every round is" -#~ " required to calculate the amount of" -#~ " noise that must be added to " -#~ "each individual update, either by the" -#~ " server or the clients." +#~ "``--superlink 192.168.1.100:9092``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Fleet" +#~ msgstr "" + +#~ msgid "API to connect to. Remember to update it with your SuperLink IP." #~ msgstr "" -#~ "Ces modifications ont été regroupées " -#~ "dans une classe appelée :code:`DPFedAvgFixed`," -#~ " dont le constructeur accepte la " -#~ "stratégie décorée, le seuil d'écrêtage " -#~ "et le nombre de clients échantillonnés" -#~ " à chaque tour comme arguments " -#~ "obligatoires. L'utilisateur est censé " -#~ "spécifier le seuil d'écrêtage car " -#~ "l'ordre de grandeur des normes de " -#~ "mise à jour dépend fortement du " -#~ "modèle formé et fournir une valeur " -#~ "par défaut serait trompeur. Le nombre" -#~ " de clients échantillonnés à chaque " -#~ "tour est nécessaire pour calculer la " -#~ "quantité de bruit qui doit être " -#~ "ajoutée à chaque mise à jour " -#~ "individuelle, que ce soit par le " -#~ "serveur ou par les clients." #~ msgid "" -#~ "The additional functionality required to " -#~ "facilitate adaptive clipping has been " -#~ "provided in :code:`DPFedAvgAdaptive`, a " -#~ "subclass of :code:`DPFedAvgFixed`. It " -#~ "overrides the above-mentioned methods to" -#~ " do the following." +#~ "To test running Flower locally, you " +#~ "can create a `bridge network " +#~ "`__, use the ``--network`` argument" +#~ " and pass the name of the " +#~ "Docker network to run your SuperNodes." #~ msgstr "" -#~ "La fonctionnalité supplémentaire nécessaire " -#~ "pour faciliter l'écrêtage adaptatif a " -#~ "été fournie dans :code:`DPFedAvgAdaptive`, une" -#~ " sous-classe de :code:`DPFedAvgFixed`. Elle" -#~ " remplace les méthodes mentionnées ci-" -#~ "dessus pour effectuer les opérations " -#~ "suivantes." #~ msgid "" -#~ ":code:`configure_fit()` : It intercepts the" -#~ " config dict returned by " -#~ ":code:`super.configure_fit()` to add the " -#~ "key-value pair " -#~ ":code:`dpfedavg_adaptive_clip_enabled:True` to it, " -#~ "which the client interprets as an " -#~ "instruction to include an indicator bit" -#~ " (1 if update norm <= clipping " -#~ "threshold, 0 otherwise) in the results" -#~ " returned by it." +#~ "Any argument that comes after the " +#~ "tag is passed to the Flower " +#~ "SuperNode binary. To see all available" +#~ " flags that the SuperNode supports, " +#~ "run:" #~ msgstr "" -#~ ":code:`configure_fit()` : Il intercepte le " -#~ "dict de configuration renvoyé par " -#~ ":code:`super.configure_fit()` pour y ajouter " -#~ "la paire clé-valeur " -#~ ":code:`dpfedavg_adaptive_clip_enabled:True`, que le " -#~ "client interprète comme une instruction " -#~ "d'inclure un bit indicateur (1 si " -#~ "la norme de mise à jour <= " -#~ "seuil d'écrêtage, 0 sinon) dans les " -#~ "résultats qu'il renvoie." #~ msgid "" -#~ ":code:`aggregate_fit()` : It follows a " -#~ "call to :code:`super.aggregate_fit()` with one" -#~ " to :code:`__update_clip_norm__()`, a procedure" -#~ " which adjusts the clipping threshold " -#~ "on the basis of the indicator bits" -#~ " received from the sampled clients." +#~ "To enable SSL, we will need to " +#~ "mount a PEM-encoded root certificate " +#~ "into your SuperNode container." #~ msgstr "" -#~ ":code:`aggregate_fit()` : Il fait suivre " -#~ "un appel à :code:`super.aggregate_fit()` d'un" -#~ " appel à :code:`__update_clip_norm__()`, une " -#~ "procédure qui ajuste le seuil d'écrêtage" -#~ " sur la base des bits indicateurs " -#~ "reçus des clients échantillonnés." #~ msgid "" -#~ "The client-side capabilities required " -#~ "can be completely captured through " -#~ "wrapper logic for just the :code:`fit()`" -#~ " method of the :code:`NumPyClient` abstract" -#~ " class. To be precise, we need " -#~ "to *post-process* the update computed" -#~ " by the wrapped client to clip " -#~ "it, if necessary, to the threshold " -#~ "value supplied by the server as " -#~ "part of the config dictionary. In " -#~ "addition to this, it may need to" -#~ " perform some extra work if either" -#~ " (or both) of the following keys " -#~ "are also present in the dict." +#~ "Assuming the certificate already exists " +#~ "locally, we can use the flag " +#~ "``--volume`` to mount the local " +#~ "certificate into the container's ``/app/`` " +#~ "directory. This allows the SuperNode to" +#~ " access the certificate within the " +#~ "container. Use the ``--root-certificates`` " +#~ "flag when starting the container." #~ msgstr "" -#~ "Les capacités requises côté client " -#~ "peuvent être entièrement capturées par " -#~ "une logique de wrapper pour la " -#~ "seule méthode :code:`fit()` de la classe" -#~ " abstraite :code:`NumPyClient`. Pour être " -#~ "précis, nous devons *post-traiter* la" -#~ " mise à jour calculée par le " -#~ "client wrapped pour l'écrêter, si " -#~ "nécessaire, à la valeur seuil fournie" -#~ " par le serveur dans le cadre " -#~ "du dictionnaire de configuration. En " -#~ "plus de cela, il peut avoir besoin" -#~ " d'effectuer un travail supplémentaire si" -#~ " l'une des clés suivantes (ou les " -#~ "deux) est également présente dans le " -#~ "dict." #~ msgid "" -#~ ":code:`dpfedavg_noise_stddev` : Generate and " -#~ "add the specified amount of noise " -#~ "to the clipped update." +#~ "The procedure for building and running" +#~ " a ServerApp image is almost " +#~ "identical to the SuperNode image." #~ msgstr "" -#~ ":code:`dpfedavg_noise_stddev` : Génère et " -#~ "ajoute la quantité de bruit spécifiée" -#~ " à la mise à jour de " -#~ "l'écrêtage." #~ msgid "" -#~ ":code:`dpfedavg_adaptive_clip_enabled` : Augment the" -#~ " metrics dict in the :code:`FitRes` " -#~ "object being returned to the server " -#~ "with an indicator bit, calculated as " -#~ "described earlier." +#~ "Similar to the SuperNode image, the " +#~ "ServerApp Docker image comes with a " +#~ "pre-installed version of Flower and " +#~ "serves as a base for building your" +#~ " own ServerApp image." #~ msgstr "" -#~ ":code:`dpfedavg_adaptive_clip_enabled` : Complète " -#~ "les métriques dict dans l'objet " -#~ ":code:`FitRes` renvoyé au serveur avec " -#~ "un bit indicateur, calculé comme décrit" -#~ " précédemment." -#~ msgid "Performing the :math:`(\\epsilon, \\delta)` analysis" -#~ msgstr "Effectuer l'analyse :math:`(\\epsilon, \\delta)`" +#~ msgid "" +#~ "We will use the same ``quickstart-" +#~ "pytorch`` example as we do in the" +#~ " Flower SuperNode section. If you " +#~ "have not already done so, please " +#~ "follow the `SuperNode Prerequisites`_ before" +#~ " proceeding." +#~ msgstr "" + +#~ msgid "Creating a ServerApp Dockerfile" +#~ msgstr "" #~ msgid "" -#~ "Assume you have trained for :math:`n`" -#~ " rounds with sampling fraction :math:`q`" -#~ " and noise multiplier :math:`z`. In " -#~ "order to calculate the :math:`\\epsilon` " -#~ "value this would result in for a" -#~ " particular :math:`\\delta`, the following " -#~ "script may be used." +#~ "First, we need to create a " +#~ "Dockerfile in the directory where the" +#~ " ``ServerApp`` code is located. If " +#~ "you use the ``quickstart-pytorch`` " +#~ "example, create a new file called " +#~ "``Dockerfile.serverapp`` in ``examples/quickstart-" +#~ "pytorch``." #~ msgstr "" -#~ "Supposons que tu te sois entraîné " -#~ "pendant :math:`n` tours avec la fraction" -#~ " d'échantillonnage :math:`q` et le " -#~ "multiplicateur de bruit :math:`z`. Afin " -#~ "de calculer la valeur :math:`epsilon` " -#~ "qui en résulterait pour un " -#~ ":math:`\\delta` particulier, le script suivant" -#~ " peut être utilisé." #~ msgid "" -#~ "`How to run Flower using Docker " -#~ "`_" +#~ "The ``Dockerfile.serverapp`` contains the " +#~ "instructions that assemble the ServerApp " +#~ "image." #~ msgstr "" -#~ msgid "Enjoy building more robust and flexible ``ClientApp``s with mods!" +#~ msgid "" +#~ "In the first two lines, we " +#~ "instruct Docker to use the ServerApp " +#~ "image tagged ``1.8.0`` as a base " +#~ "image and set our working directory " +#~ "to ``/app``. The following instructions " +#~ "will now be executed in the " +#~ "``/app`` directory. In the last two " +#~ "lines, we copy the ``server.py`` module" +#~ " into the image and set the " +#~ "entry point to ``flower-server-app`` " +#~ "with the argument ``server:app``. The " +#~ "argument is the object reference of " +#~ "the ServerApp (``:``) that " +#~ "will be run inside the ServerApp " +#~ "container." #~ msgstr "" +#~ msgid "Building the ServerApp Docker image" +#~ msgstr "Démarrer le serveur" + #~ msgid "" -#~ ":py:obj:`ClientApp `\\ " -#~ "\\(client\\_fn\\[\\, mods\\]\\)" +#~ "Next, we build the ServerApp Docker " +#~ "image by running the following command" +#~ " in the directory where Dockerfile " +#~ "and ServerApp code are located." #~ msgstr "" -#~ msgid ":py:obj:`flwr.server.driver `\\" +#~ msgid "" +#~ "We gave the image the name " +#~ "``flwr_serverapp``, and the tag ``0.0.1``. " +#~ "Remember that the here chosen values " +#~ "only serve as an example. You can" +#~ " change them to your needs." #~ msgstr "" -#~ msgid "Flower driver SDK." -#~ msgstr "Serveur de Flower" +#~ msgid "Running the ServerApp Docker image" +#~ msgstr "Démarrer le serveur" -#~ msgid "driver" -#~ msgstr "serveur" +#~ msgid "Now that we have built the ServerApp image, we can finally run it." +#~ msgstr "" + +#~ msgid "``flwr_serverapp:0.0.1``: The name the tag of the Docker image to use." +#~ msgstr "" #~ msgid "" -#~ ":py:obj:`start_driver `\\ " -#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" +#~ "``--superlink 192.168.1.100:9091``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Driver" #~ msgstr "" #~ msgid "" -#~ ":py:obj:`Driver `\\ " -#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ "To test running Flower locally, you " +#~ "can create a `bridge network " +#~ "`__, use the ``--network`` argument" +#~ " and pass the name of the " +#~ "Docker network to run your ServerApps." #~ msgstr "" #~ msgid "" -#~ ":py:obj:`GrpcDriver `\\ " -#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ "Any argument that comes after the " +#~ "tag is passed to the Flower " +#~ "ServerApp binary. To see all available" +#~ " flags that the ServerApp supports, " +#~ "run:" #~ msgstr "" -#~ msgid "`GrpcDriver` provides access to the gRPC Driver API/service." +#~ msgid "" +#~ "To enable SSL, we will need to " +#~ "mount a PEM-encoded root certificate " +#~ "into your ServerApp container." #~ msgstr "" -#~ msgid ":py:obj:`get_nodes `\\ \\(\\)" +#~ msgid "" +#~ "Assuming the certificate already exists " +#~ "locally, we can use the flag " +#~ "``--volume`` to mount the local " +#~ "certificate into the container's ``/app/`` " +#~ "directory. This allows the ServerApp to" +#~ " access the certificate within the " +#~ "container. Use the ``--root-certificates`` " +#~ "flags when starting the container." +#~ msgstr "" + +#~ msgid "Run with root user privileges" #~ msgstr "" #~ msgid "" -#~ ":py:obj:`pull_task_res " -#~ "`\\ \\(task\\_ids\\)" +#~ "Flower Docker images, by default, run" +#~ " with a non-root user " +#~ "(username/groupname: ``app``, UID/GID: ``49999``)." +#~ " Using root user is not recommended" +#~ " unless it is necessary for specific" +#~ " tasks during the build process. " +#~ "Always make sure to run the " +#~ "container as a non-root user in" +#~ " production to maintain security best " +#~ "practices." #~ msgstr "" -#~ msgid "Get task results." +#~ msgid "**Run a container with root user privileges**" +#~ msgstr "" + +#~ msgid "**Run the build process with root user privileges**" +#~ msgstr "" + +#~ msgid "Using a different Flower version" +#~ msgstr "" + +#~ msgid "Pinning a Docker image to a specific version" #~ msgstr "" #~ msgid "" -#~ ":py:obj:`push_task_ins " -#~ "`\\ " -#~ "\\(task\\_ins\\_list\\)" +#~ "It may happen that we update the" +#~ " images behind the tags. Such updates" +#~ " usually include security updates of " +#~ "system dependencies that should not " +#~ "change the functionality of Flower. " +#~ "However, if you want to ensure " +#~ "that you always use the same " +#~ "image, you can specify the hash of" +#~ " the image instead of the tag." +#~ msgstr "" + +#~ msgid "" +#~ "The following command returns the " +#~ "current image hash referenced by the " +#~ "``superlink:1.8.0`` tag:" +#~ msgstr "" + +#~ msgid "Next, we can pin the hash when running a new SuperLink container:" +#~ msgstr "" + +#~ msgid "" +#~ "To set a variable inside a Docker" +#~ " container, you can use the ``-e " +#~ "=`` flag." +#~ msgstr "" + +#~ msgid "" +#~ "This approach consists of two seprate" +#~ " phases: clipping of the updates and" +#~ " adding noise to the aggregated " +#~ "model. For the clipping phase, Flower" +#~ " framework has made it possible to" +#~ " decide whether to perform clipping " +#~ "on the server side or the client" +#~ " side." +#~ msgstr "" + +#~ msgid ":py:obj:`flwr.client `\\" +#~ msgstr "" + +#~ msgid ":py:obj:`flwr.common `\\" #~ msgstr "" -#~ msgid "Schedule tasks." +#~ msgid ":py:obj:`flwr.server `\\" #~ msgstr "" -#~ msgid "GrpcDriver" +#~ msgid ":py:obj:`flwr.simulation `\\" #~ msgstr "" -#~ msgid ":py:obj:`connect `\\ \\(\\)" +#~ msgid ":py:obj:`run_client_app `\\ \\(\\)" #~ msgstr "" -#~ msgid "Connect to the Driver API." +#~ msgid ":py:obj:`run_supernode `\\ \\(\\)" +#~ msgstr "serveur.stratégie.Stratégie" + +#~ msgid ":py:obj:`Context `\\ \\(state\\)" #~ msgstr "" -#~ msgid "" -#~ ":py:obj:`create_run " -#~ "`\\ \\(req\\)" +#~ msgid "State of your run." #~ msgstr "" -#~ msgid "Request for run ID." -#~ msgstr "Demande pour une nouvelle Flower Baseline" +#~ msgid "Metrics record." +#~ msgstr "" #~ msgid "" -#~ ":py:obj:`disconnect " -#~ "`\\ \\(\\)" +#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +#~ "[:py:class:`str`, :py:class:`int` | " +#~ ":py:class:`float` | :py:class:`str` | " +#~ ":py:class:`bytes` | :py:class:`bool` | " +#~ ":py:class:`~typing.List`\\ [:py:class:`int`] | " +#~ ":py:class:`~typing.List`\\ [:py:class:`float`] | " +#~ ":py:class:`~typing.List`\\ [:py:class:`str`] | " +#~ ":py:class:`~typing.List`\\ [:py:class:`bytes`] | " +#~ ":py:class:`~typing.List`\\ [:py:class:`bool`]]" #~ msgstr "" -#~ msgid "Disconnect from the Driver API." +#~ msgid "Remove all items from R." #~ msgstr "" -#~ msgid "" -#~ ":py:obj:`get_nodes `\\" -#~ " \\(req\\)" +#~ msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" #~ msgstr "" -#~ msgid "Get client IDs." -#~ msgstr "Moteur client Edge" +#~ msgid "d defaults to None." +#~ msgstr "" -#~ msgid "" -#~ ":py:obj:`pull_task_res " -#~ "`\\ \\(req\\)" +#~ msgid "Update R from dict/iterable E and F." #~ msgstr "" #~ msgid "" -#~ ":py:obj:`push_task_ins " -#~ "`\\ \\(req\\)" +#~ ":py:obj:`RUN_DRIVER_API_ENTER " +#~ "`\\" #~ msgstr "" #~ msgid "" -#~ "Optionally specify the type of actor " -#~ "to use. The actor object, which " -#~ "persists throughout the simulation, will " -#~ "be the process in charge of " -#~ "running the clients' jobs (i.e. their" -#~ " `fit()` method)." +#~ ":py:obj:`RUN_DRIVER_API_LEAVE " +#~ "`\\" #~ msgstr "" #~ msgid "" -#~ "Much effort went into a completely " -#~ "restructured Flower docs experience. The " -#~ "documentation on [flower.ai/docs](flower.ai/docs) is" -#~ " now divided into Flower Framework, " -#~ "Flower Baselines, Flower Android SDK, " -#~ "Flower iOS SDK, and code example " -#~ "projects." +#~ ":py:obj:`RUN_FLEET_API_ENTER " +#~ "`\\" #~ msgstr "" #~ msgid "" -#~ "Flower usage examples used to be " -#~ "bundled with Flower in a package " -#~ "called ``flwr_example``. We are migrating " -#~ "those examples to standalone projects to" -#~ " make them easier to use. All " -#~ "new examples are based in the " -#~ "directory `examples " -#~ "`_." +#~ ":py:obj:`RUN_FLEET_API_LEAVE " +#~ "`\\" #~ msgstr "" -#~ "Les exemples d'utilisation de Flower " -#~ "étaient auparavant regroupés avec Flower " -#~ "dans un paquet appelé ``flwr_example``. " -#~ "Nous migrons ces exemples vers des " -#~ "projets autonomes pour les rendre plus" -#~ " faciles à utiliser. Tous les " -#~ "nouveaux exemples sont basés dans le " -#~ "répertoire ``examples " -#~ "`_." -#~ msgid "Quickstart TensorFlow/Keras" -#~ msgstr "Démarrage rapide de TensorFlow/Keras" +#~ msgid ":py:obj:`DRIVER_CONNECT `\\" +#~ msgstr "" -#~ msgid "Legacy Examples (`flwr_example`)" -#~ msgstr "Exemples hérités (`flwr_example`)" +#~ msgid ":py:obj:`DRIVER_DISCONNECT `\\" +#~ msgstr "" #~ msgid "" -#~ "The useage examples in `flwr_example` " -#~ "are deprecated and will be removed " -#~ "in the future. New examples are " -#~ "provided as standalone projects in " -#~ "`examples `_." +#~ ":py:obj:`START_DRIVER_ENTER " +#~ "`\\" #~ msgstr "" -#~ "Les exemples d'utilisation dans `flwr_example`" -#~ " sont obsolètes et seront supprimés à" -#~ " l'avenir. De nouveaux exemples sont " -#~ "fournis en tant que projets autonomes" -#~ " dans `examples " -#~ "`_." - -#~ msgid "Extra Dependencies" -#~ msgstr "Dépendances supplémentaires" #~ msgid "" -#~ "The core Flower framework keeps a " -#~ "minimal set of dependencies. The " -#~ "examples demonstrate Flower in the " -#~ "context of different machine learning " -#~ "frameworks, so additional dependencies need" -#~ " to be installed before an example" -#~ " can be run." +#~ ":py:obj:`START_DRIVER_LEAVE " +#~ "`\\" #~ msgstr "" -#~ "Le noyau du framework Flower conserve" -#~ " un ensemble minimal de dépendances. " -#~ "Les exemples démontrent Flower dans le" -#~ " contexte de différents frameworks " -#~ "d'apprentissage automatique, de sorte que " -#~ "des dépendances supplémentaires doivent être" -#~ " installées avant qu'un exemple puisse " -#~ "être exécuté." - -#~ msgid "For PyTorch examples::" -#~ msgstr "Pour les exemples de PyTorch: :" - -#~ msgid "For TensorFlow examples::" -#~ msgstr "Pour les exemples de TensorFlow : :" - -#~ msgid "For both PyTorch and TensorFlow examples::" -#~ msgstr "Pour les exemples PyTorch et TensorFlow: :" #~ msgid "" -#~ "Please consult :code:`pyproject.toml` for a" -#~ " full list of possible extras " -#~ "(section :code:`[tool.poetry.extras]`)." +#~ "An identifier that can be used " +#~ "when loading a particular data partition" +#~ " for a ClientApp. Making use of " +#~ "this identifier is more relevant when" +#~ " conducting simulations." #~ msgstr "" -#~ "Tu peux consulter :code:`pyproject.toml` pour" -#~ " une liste complète des extras " -#~ "possibles (section :code:`[tool.poetry.extras]`)." - -#~ msgid "PyTorch Examples" -#~ msgstr "Exemples de PyTorch" -#~ msgid "" -#~ "Our PyTorch examples are based on " -#~ "PyTorch 1.7. They should work with " -#~ "other releases as well. So far, we" -#~ " provide the following examples." +#~ msgid ":py:obj:`partition_id `\\" #~ msgstr "" -#~ "Nos exemples PyTorch sont basés sur " -#~ "PyTorch 1.7. Ils devraient fonctionner " -#~ "avec d'autres versions également. Jusqu'à " -#~ "présent, nous fournissons les exemples " -#~ "suivants." -#~ msgid "CIFAR-10 Image Classification" -#~ msgstr "Classification d'images CIFAR-10" +#~ msgid "An identifier telling which data partition a ClientApp should use." +#~ msgstr "" #~ msgid "" -#~ "`CIFAR-10 and CIFAR-100 " -#~ "`_ are " -#~ "popular RGB image datasets. The Flower" -#~ " CIFAR-10 example uses PyTorch to " -#~ "train a simple CNN classifier in a" -#~ " federated learning setup with two " -#~ "clients." +#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +#~ "[:py:class:`str`, :py:class:`int` | " +#~ ":py:class:`float` | :py:class:`~typing.List`\\ " +#~ "[:py:class:`int`] | :py:class:`~typing.List`\\ " +#~ "[:py:class:`float`]]" #~ msgstr "" -#~ "`CIFAR-10 et CIFAR-100 " -#~ "`_ sont des" -#~ " ensembles de données d'images RVB " -#~ "populaires. L'exemple Flower CIFAR-10 utilise" -#~ " PyTorch pour former un classificateur " -#~ "CNN simple dans une configuration " -#~ "d'apprentissage fédéré avec deux clients." -#~ msgid "First, start a Flower server:" -#~ msgstr "Tout d'abord, démarre un serveur Flower :" +#~ msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +#~ msgstr "" -#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" -#~ msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" +#~ msgid "" +#~ "A dataclass storing named Arrays in " +#~ "order. This means that it holds " +#~ "entries as an OrderedDict[str, Array]. " +#~ "ParametersRecord objects can be viewed " +#~ "as an equivalent to PyTorch's " +#~ "state_dict, but holding serialised tensors " +#~ "instead." +#~ msgstr "" -#~ msgid "Then, start the two clients in a new terminal window:" +#~ msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" #~ msgstr "" -#~ "Ensuite, démarre les deux clients dans" -#~ " une nouvelle fenêtre de terminal :" -#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" -#~ msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" +#~ msgid ":py:obj:`run_server_app `\\ \\(\\)" +#~ msgstr "" -#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_cifar`." -#~ msgstr "Pour plus de détails, voir :code:`src/py/flwr_example/pytorch_cifar`." +#~ msgid ":py:obj:`run_superlink `\\ \\(\\)" +#~ msgstr "" -#~ msgid "ImageNet-2012 Image Classification" -#~ msgstr "ImageNet-2012 Classification des images" +#~ msgid "Run Flower SuperLink (Driver API and Fleet API)." +#~ msgstr "flower-fleet-api" #~ msgid "" -#~ "`ImageNet-2012 `_ is " -#~ "one of the major computer vision " -#~ "datasets. The Flower ImageNet example " -#~ "uses PyTorch to train a ResNet-18 " -#~ "classifier in a federated learning setup" -#~ " with ten clients." +#~ ":py:obj:`LegacyContext `\\ " +#~ "\\(state\\[\\, config\\, strategy\\, ...\\]\\)" #~ msgstr "" -#~ "`ImageNet-2012 `_ est " -#~ "l'un des principaux ensembles de données" -#~ " de vision par ordinateur. L'exemple " -#~ "Flower ImageNet utilise PyTorch pour " -#~ "entraîner un classificateur ResNet-18 dans " -#~ "une configuration d'apprentissage fédéré avec" -#~ " dix clients." - -#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" -#~ msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" -#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" -#~ msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" +#~ msgid "run\\_driver\\_api" +#~ msgstr "flower-driver-api" -#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_imagenet`." +#~ msgid "run\\_fleet\\_api" #~ msgstr "" -#~ "Pour plus de détails, voir " -#~ ":code:`src/py/flwr_example/pytorch_imagenet`." - -#~ msgid "TensorFlow Examples" -#~ msgstr "Exemples de TensorFlow" #~ msgid "" -#~ "Our TensorFlow examples are based on " -#~ "TensorFlow 2.0 or newer. So far, " -#~ "we provide the following examples." +#~ "The protocol involves four main stages:" +#~ " - 'setup': Send SecAgg+ configuration " +#~ "to clients and collect their public " +#~ "keys. - 'share keys': Broadcast public" +#~ " keys among clients and collect " +#~ "encrypted secret" #~ msgstr "" -#~ "Nos exemples TensorFlow sont basés sur" -#~ " TensorFlow 2.0 ou une version plus" -#~ " récente. Jusqu'à présent, nous te " -#~ "proposons les exemples suivants." -#~ msgid "Fashion-MNIST Image Classification" -#~ msgstr "Classification d'images Fashion-MNIST" +#~ msgid "key shares." +#~ msgstr "" #~ msgid "" -#~ "`Fashion-MNIST `_ is often used as " -#~ "the \"Hello, world!\" of machine " -#~ "learning. We follow this tradition and" -#~ " provide an example which samples " -#~ "random local datasets from Fashion-MNIST" -#~ " and trains a simple image " -#~ "classification model over those partitions." +#~ "The protocol involves four main stages:" +#~ " - 'setup': Send SecAgg configuration " +#~ "to clients and collect their public " +#~ "keys. - 'share keys': Broadcast public" +#~ " keys among clients and collect " +#~ "encrypted secret" #~ msgstr "" -#~ "nous suivons cette tradition et " -#~ "fournissons un exemple qui échantillonne " -#~ "des ensembles de données locales " -#~ "aléatoires de Fashion-MNIST et entraîne" -#~ " un modèle simple de classification " -#~ "d'images sur ces partitions." - -#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" -#~ msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" -#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" -#~ msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" +#~ msgid "" +#~ ":py:obj:`start_simulation `\\" +#~ " \\(\\*\\, client\\_fn\\[\\, ...\\]\\)" +#~ msgstr "" #~ msgid "" -#~ "For more details, see " -#~ ":code:`src/py/flwr_example/tensorflow_fashion_mnist`." +#~ "'A dictionary, e.g {\"\": , " +#~ "\"\": } to configure a " +#~ "backend. Values supported in are" +#~ " those included by " +#~ "`flwr.common.typing.ConfigsRecordValues`." #~ msgstr "" -#~ "Pour plus de détails, voir " -#~ ":code:`src/py/flwr_example/tensorflow_fashion_mnist`." #~ msgid "" -#~ "MXNet is no longer maintained and " -#~ "has been moved into `Attic " -#~ "`_. As a " -#~ "result, we would encourage you to " -#~ "use other ML frameworks alongise Flower," -#~ " for example, PyTorch. This tutorial " -#~ "might be removed in future versions " -#~ "of Flower." +#~ "When diabled, only INFO, WARNING and " +#~ "ERROR log messages will be shown. " +#~ "If enabled, DEBUG-level logs will " +#~ "be displayed." #~ msgstr "" #~ msgid "" -#~ "Now that you have known how " -#~ "federated XGBoost work with Flower, it's" -#~ " time to run some more comprehensive" -#~ " experiments by customising the " -#~ "experimental settings. In the xgboost-" -#~ "comprehensive example (`full code " -#~ "`_), we provide more options " -#~ "to define various experimental setups, " -#~ "including aggregation strategies, data " -#~ "partitioning and centralised/distributed evaluation." -#~ " We also support `Flower simulation " -#~ "`_ making it easy to " -#~ "simulate large client cohorts in a " -#~ "resource-aware manner. Let's take a " -#~ "look!" +#~ "A function creating client instances. " +#~ "The function must take a single " +#~ "`str` argument called `cid`. It should" +#~ " return a single client instance of" +#~ " type Client. Note that the created" +#~ " client instances are ephemeral and " +#~ "will often be destroyed after a " +#~ "single method invocation. Since client " +#~ "instances are not long-lived, they " +#~ "should not attempt to carry state " +#~ "over method invocations. Any state " +#~ "required by the instance (model, " +#~ "dataset, hyperparameters, ...) should be " +#~ "(re-)created in either the call to " +#~ "`client_fn` or the call to any of" +#~ " the client methods (e.g., load " +#~ "evaluation data in the `evaluate` method" +#~ " itself)." #~ msgstr "" -#~ msgid "|31e4b1afa87c4b968327bbeafbf184d4|" +#~ msgid "" +#~ "In this tutorial we will learn how" +#~ " to train a Convolutional Neural " +#~ "Network on CIFAR10 using Flower and " +#~ "PyTorch." #~ msgstr "" +#~ "Dans ce tutoriel, nous allons apprendre" +#~ " à entraîner un réseau neuronal " +#~ "convolutif sur CIFAR10 à l'aide de " +#~ "Flower et PyTorch." -#~ msgid "|c9d935b4284e4c389a33d86b33e07c0a|" +#~ msgid "" +#~ "*Clients* are responsible for generating " +#~ "individual weight-updates for the model" +#~ " based on their local datasets. These" +#~ " updates are then sent to the " +#~ "*server* which will aggregate them to" +#~ " produce a better model. Finally, the" +#~ " *server* sends this improved version " +#~ "of the model back to each " +#~ "*client*. A complete cycle of weight " +#~ "updates is called a *round*." #~ msgstr "" +#~ "*Les clients* sont chargés de générer" +#~ " des mises à jour de poids " +#~ "individuelles pour le modèle en fonction" +#~ " de leurs ensembles de données " +#~ "locales. Ces mises à jour sont " +#~ "ensuite envoyées au *serveur* qui les" +#~ " agrège pour produire un meilleur " +#~ "modèle. Enfin, le *serveur* renvoie " +#~ "cette version améliorée du modèle à " +#~ "chaque *client*. Un cycle complet de " +#~ "mises à jour de poids s'appelle un" +#~ " *round*." -#~ msgid "|00727b5faffb468f84dd1b03ded88638|" +#~ msgid "" +#~ "Now that we have a rough idea " +#~ "of what is going on, let's get " +#~ "started. We first need to install " +#~ "Flower. You can do this by running" +#~ " :" #~ msgstr "" +#~ "Maintenant que nous avons une idée " +#~ "générale de ce qui se passe, " +#~ "commençons. Nous devons d'abord installer " +#~ "Flower. Tu peux le faire en " +#~ "exécutant :" -#~ msgid "|daf0cf0ff4c24fd29439af78416cf47b|" +#~ msgid "" +#~ "Since we want to use PyTorch to" +#~ " solve a computer vision task, let's" +#~ " go ahead and install PyTorch and " +#~ "the **torchvision** library:" #~ msgstr "" +#~ "Puisque nous voulons utiliser PyTorch " +#~ "pour résoudre une tâche de vision " +#~ "par ordinateur, allons-y et installons " +#~ "PyTorch et la bibliothèque **torchvision** " +#~ ":" -#~ msgid "|9f093007080d471d94ca90d3e9fde9b6|" +#~ msgid "" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. Our training " +#~ "procedure and network architecture are " +#~ "based on PyTorch's `Deep Learning with" +#~ " PyTorch " +#~ "`_." #~ msgstr "" +#~ "Maintenant que nous avons installé " +#~ "toutes nos dépendances, lançons une " +#~ "formation distribuée simple avec deux " +#~ "clients et un serveur. Notre procédure" +#~ " de formation et l'architecture de " +#~ "notre réseau sont basées sur `Deep " +#~ "Learning with PyTorch " +#~ "`_" +#~ " de PyTorch." -#~ msgid "|46a26e6150e0479fbd3dfd655f36eb13|" +#~ msgid "" +#~ "In a file called :code:`client.py`, " +#~ "import Flower and PyTorch related " +#~ "packages:" #~ msgstr "" +#~ "Dans un fichier appelé :code:`client.py`, " +#~ "importe Flower et les paquets liés " +#~ "à PyTorch :" -#~ msgid "|3daba297595c4c7fb845d90404a6179a|" +#~ msgid "In addition, we define the device allocation in PyTorch with:" #~ msgstr "" +#~ "En outre, nous définissons l'attribution " +#~ "des appareils dans PyTorch avec :" -#~ msgid "|5769874fa9c4455b80b2efda850d39d7|" +#~ msgid "" +#~ "We use PyTorch to load CIFAR10, a" +#~ " popular colored image classification " +#~ "dataset for machine learning. The " +#~ "PyTorch :code:`DataLoader()` downloads the " +#~ "training and test data that are " +#~ "then normalized." #~ msgstr "" +#~ "Nous utilisons PyTorch pour charger " +#~ "CIFAR10, un ensemble de données de " +#~ "classification d'images colorées populaire " +#~ "pour l'apprentissage automatique. Le " +#~ ":code:`DataLoader()` de PyTorch télécharge les" +#~ " données d'entraînement et de test " +#~ "qui sont ensuite normalisées." -#~ msgid "|ba47ffb421814b0f8f9fa5719093d839|" +#~ msgid "" +#~ "Define the loss and optimizer with " +#~ "PyTorch. The training of the dataset " +#~ "is done by looping over the " +#~ "dataset, measure the corresponding loss " +#~ "and optimize it." #~ msgstr "" +#~ "Définis la perte et l'optimiseur avec" +#~ " PyTorch L'entraînement de l'ensemble de" +#~ " données se fait en bouclant sur " +#~ "l'ensemble de données, en mesurant la" +#~ " perte correspondante et en l'optimisant." -#~ msgid "|aeac5bf79cbf497082e979834717e01b|" +#~ msgid "" +#~ "Define then the validation of the " +#~ "machine learning network. We loop over" +#~ " the test set and measure the " +#~ "loss and accuracy of the test set." #~ msgstr "" +#~ "Définis ensuite la validation du réseau" +#~ " d'apprentissage automatique. Nous passons " +#~ "en boucle sur l'ensemble de test " +#~ "et mesurons la perte et la " +#~ "précision de l'ensemble de test." -#~ msgid "|ce27ed4bbe95459dba016afc42486ba2|" +#~ msgid "" +#~ "After defining the training and testing" +#~ " of a PyTorch machine learning model," +#~ " we use the functions for the " +#~ "Flower clients." #~ msgstr "" +#~ "Après avoir défini l'entraînement et le" +#~ " test d'un modèle d'apprentissage " +#~ "automatique PyTorch, nous utilisons les " +#~ "fonctions pour les clients Flower." -#~ msgid "|ae94a7f71dda443cbec2385751427d41|" +#~ msgid "" +#~ "The Flower clients will use a " +#~ "simple CNN adapted from 'PyTorch: A " +#~ "60 Minute Blitz':" #~ msgstr "" +#~ "Les clients de Flower utiliseront un " +#~ "CNN simple adapté de \"PyTorch : A" +#~ " 60 Minute Blitz\" :" -#~ msgid "|e61fce4d43d243e7bb08bdde97d81ce6|" +#~ msgid "" +#~ "After loading the data set with " +#~ ":code:`load_data()` we define the Flower " +#~ "interface." #~ msgstr "" +#~ "Après avoir chargé l'ensemble des " +#~ "données avec :code:`load_data()`, nous " +#~ "définissons l'interface Flower." -#~ msgid "|08cb60859b07461588fe44e55810b050|" +#~ msgid "" +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses " +#~ "PyTorch. Implementing :code:`NumPyClient` usually" +#~ " means defining the following methods " +#~ "(:code:`set_parameters` is optional though):" #~ msgstr "" +#~ "Flower fournit une classe de commodité" +#~ " appelée :code:`NumPyClient` qui facilite " +#~ "la mise en œuvre de l'interface " +#~ ":code:`Client` lorsque ta charge de " +#~ "travail utilise PyTorch. Mettre en œuvre" +#~ " :code:`NumPyClient` signifie généralement " +#~ "définir les méthodes suivantes " +#~ "(:code:`set_parameters` est cependant facultatif)" +#~ " :" + +#~ msgid "which can be implemented in the following way:" +#~ msgstr "qui peut être mis en œuvre de la manière suivante :" #~ msgid "" -#~ "Flower provides pre-made docker images" -#~ " on `Docker Hub " -#~ "`_ that include" -#~ " all necessary dependencies for running " -#~ "the server. You can also build " -#~ "your own custom docker images from " -#~ "scratch with a different version of " -#~ "Python or Ubuntu if that is what" -#~ " you need. In this guide, we " -#~ "will explain what images exist and " -#~ "how to build them locally." +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this example can " +#~ "be found in :code:`examples/quickstart-" +#~ "pytorch`." #~ msgstr "" +#~ "Félicitations ! Tu as réussi à " +#~ "construire et à faire fonctionner ton" +#~ " premier système d'apprentissage fédéré. Le" +#~ " code source complet " +#~ "`_ de cet exemple se " +#~ "trouve dans :code:`examples/quickstart-pytorch`." #~ msgid "" -#~ "Currently, Flower provides two images, a" -#~ " base image and a server image. " -#~ "There will also be a client image" -#~ " soon. The base image, as the " -#~ "name suggests, contains basic dependencies " -#~ "that both the server and the " -#~ "client need. This includes system " -#~ "dependencies, Python and Python tools. " -#~ "The server image is based on the" -#~ " base image, but it additionally " -#~ "installs the Flower server using " -#~ "``pip``." +#~ "In this example, we split the " +#~ "dataset into two partitions with uniform" +#~ " distribution (:code:`IidPartitioner(num_partitions=2)`). " +#~ "Then, we load the partition for " +#~ "the given client based on " +#~ ":code:`node_id`:" #~ msgstr "" #~ msgid "" -#~ "Both, base and server image are " -#~ "configured via build arguments. Through " -#~ "build arguments, we can make our " -#~ "build more flexible. For example, in " -#~ "the base image, we can specify the" -#~ " version of Python to install using" -#~ " the ``PYTHON_VERSION`` build argument. " -#~ "Some of the build arguments have " -#~ "default values, others must be specified" -#~ " when building the image. All " -#~ "available build arguments for each image" -#~ " are listed in one of the " -#~ "tables below." +#~ "The :code:`self.bst` is used to keep " +#~ "the Booster objects that remain " +#~ "consistent across rounds, allowing them " +#~ "to store predictions from trees " +#~ "integrated in earlier rounds and " +#~ "maintain other essential data structures " +#~ "for training." #~ msgstr "" -#~ msgid "Defaults to ``flwr/server``." +#~ msgid "" +#~ "In :code:`fit`, at the first round, " +#~ "we call :code:`xgb.train()` to build up" +#~ " the first set of trees. the " +#~ "returned Booster object and config are" +#~ " stored in :code:`self.bst` and " +#~ ":code:`self.config`, respectively. From the " +#~ "second round, we load the global " +#~ "model sent from server to " +#~ ":code:`self.bst`, and then update model " +#~ "weights on local training data with " +#~ "function :code:`local_boost` as follows:" #~ msgstr "" -#~ msgid "``BASE_IMAGE_TAG``" +#~ msgid "" +#~ "Given :code:`num_local_round`, we update trees" +#~ " by calling :code:`self.bst.update` method. " +#~ "After training, the last " +#~ ":code:`N=num_local_round` trees will be " +#~ "extracted to send to the server." #~ msgstr "" -#~ msgid "The image tag of the base image." +#~ msgid "" +#~ "In :code:`evaluate`, we call " +#~ ":code:`self.bst.eval_set` function to conduct " +#~ "evaluation on valid set. The AUC " +#~ "value will be returned." #~ msgstr "" -#~ msgid "Defaults to ``py3.11-ubuntu22.04``." +#~ msgid "" +#~ "We use two clients for this " +#~ "example. An :code:`evaluate_metrics_aggregation` " +#~ "function is defined to collect and " +#~ "wighted average the AUC values from " +#~ "clients." #~ msgstr "" #~ msgid "" -#~ "The following example creates a server" -#~ " image with the official Flower base" -#~ " image py3.11-ubuntu22.04 and Flower 1.7.0:" +#~ "Let's now create the Federated Dataset" +#~ " abstraction that from ``flwr-datasets``" +#~ " that partitions the CIFAR-10. We " +#~ "will create small training and test " +#~ "set for each edge device and wrap" +#~ " each of them into a PyTorch " +#~ "``DataLoader``:" #~ msgstr "" +#~ msgid "Implementing a Flower client" +#~ msgstr "Mise en place d'un client Flower" + #~ msgid "" -#~ "The name of image is ``flwr_server`` " -#~ "and the tag ``0.1.0``. Remember that " -#~ "the build arguments as well as the" -#~ " name and tag can be adapted to" -#~ " your needs. These values serve as" -#~ " examples only." +#~ "To implement the Flower client, we " +#~ "create a subclass of " +#~ "``flwr.client.NumPyClient`` and implement the " +#~ "three methods ``get_parameters``, ``fit``, and" +#~ " ``evaluate``:" #~ msgstr "" +#~ "Pour mettre en œuvre le client " +#~ "Flower, nous créons une sous-classe " +#~ "de ``flwr.client.NumPyClient`` et mettons en" +#~ " œuvre les trois méthodes " +#~ "``get_parameters``, ``fit`` et ``evaluate`` :" #~ msgid "" -#~ "If you want to use your own " -#~ "base image instead of the official " -#~ "Flower base image, all you need to" -#~ " do is set the ``BASE_REPOSITORY`` " -#~ "and ``BASE_IMAGE_TAG`` build arguments. The" -#~ " value of ``BASE_REPOSITORY`` must match" -#~ " the name of your image and the" -#~ " value of ``BASE_IMAGE_TAG`` must match " -#~ "the tag of your image." +#~ "The function ``start_simulation`` accepts a" +#~ " number of arguments, amongst them " +#~ "the ``client_fn`` used to create " +#~ "``FlowerClient`` instances, the number of " +#~ "clients to simulate (``num_clients``), the " +#~ "number of federated learning rounds " +#~ "(``num_rounds``), and the strategy. The " +#~ "strategy encapsulates the federated learning" +#~ " approach/algorithm, for example, *Federated " +#~ "Averaging* (FedAvg)." #~ msgstr "" +#~ "La fonction ``start_simulation`` accepte un" +#~ " certain nombre d'arguments, parmi lesquels" +#~ " le ``client_fn`` utilisé pour créer " +#~ "les instances ``FlowerClient``, le nombre " +#~ "de clients à simuler (``num_clients``), " +#~ "le nombre de tours d'apprentissage " +#~ "fédéré (``num_rounds``), et la stratégie. " +#~ "La stratégie encapsule l'approche/algorithme " +#~ "d'apprentissage fédéré, par exemple, " +#~ "*Federated Averaging* (FedAvg)." #~ msgid "" -#~ "It is important to follow the " -#~ "instructions described in comments. For " -#~ "instance, in order to not break " -#~ "how our changelog system works, you " -#~ "should read the information above the" -#~ " ``Changelog entry`` section carefully. You" -#~ " can also checkout some examples and" -#~ " details in the :ref:`changelogentry` " -#~ "appendix." +#~ "The only thing left to do is " +#~ "to tell the strategy to call this" +#~ " function whenever it receives evaluation" +#~ " metric dictionaries from the clients:" #~ msgstr "" +#~ "La seule chose qui reste à faire" +#~ " est d'indiquer à la stratégie " +#~ "d'appeler cette fonction chaque fois " +#~ "qu'elle reçoit des dictionnaires de " +#~ "métriques d'évaluation de la part des" +#~ " clients :" -#~ msgid "Open a PR (as shown above)" -#~ msgstr "Ouvre un RP (comme indiqué ci-dessus)" +#~ msgid "|93b02017c78049bbbd5ae456dcb2c91b|" +#~ msgstr "" -#~ msgid "" -#~ "Add CI job to deploy the staging" -#~ " system when the ``main`` branch " -#~ "changes" -#~ msgstr "Add CI job to deploy the staging system when the `main` branch changes" +#~ msgid "|01471150fd5144c080a176b43e92a3ff|" +#~ msgstr "" -#~ msgid "Changelog entry" -#~ msgstr "Changelog" +#~ msgid "|9bc21c7dbd17444a8f070c60786e3484|" +#~ msgstr "" -#~ msgid "" -#~ "When opening a new PR, inside its" -#~ " description, there should be a " -#~ "``Changelog entry`` header." +#~ msgid "|3047bbce54b34099ae559963d0420d79|" #~ msgstr "" -#~ msgid "" -#~ "Above this header you should see " -#~ "the following comment that explains how" -#~ " to write your changelog entry:" +#~ msgid "|e9f8ce948593444fb838d2f354c7ec5d|" #~ msgstr "" -#~ msgid "" -#~ "Inside the following 'Changelog entry' " -#~ "section, you should put the description" -#~ " of your changes that will be " -#~ "added to the changelog alongside your" -#~ " PR title." +#~ msgid "|c24c1478b30e4f74839208628a842d1e|" #~ msgstr "" -#~ msgid "" -#~ "If the section is completely empty " -#~ "(without any token) or non-existent, " -#~ "the changelog will just contain the " -#~ "title of the PR for the changelog" -#~ " entry, without any description." +#~ msgid "|1b3613d7a58847b59e1d3180802dbc09|" #~ msgstr "" -#~ msgid "" -#~ "If the section contains some text " -#~ "other than tokens, it will use it" -#~ " to add a description to the " -#~ "change." +#~ msgid "|9980b5213db547d0b8024a50992b9e3f|" #~ msgstr "" -#~ msgid "" -#~ "If the section contains one of the" -#~ " following tokens it will ignore any" -#~ " other text and put the PR " -#~ "under the corresponding section of the" -#~ " changelog:" +#~ msgid "|c7afb4c92d154bfaa5e8cb9a150e17f1|" #~ msgstr "" -#~ msgid " is for classifying a PR as a general improvement." +#~ msgid "|032eb6fed6924ac387b9f13854919196|" #~ msgstr "" -#~ msgid " is to not add the PR to the changelog" +#~ msgid "|fbf225add7fd4df5a9bf25a95597d954|" #~ msgstr "" -#~ msgid " is to add a general baselines change to the PR" +#~ msgid "|7efbe3d29d8349b89594e8947e910525|" #~ msgstr "" -#~ msgid " is to add a general examples change to the PR" +#~ msgid "|329fb3c04c744eda83bb51fa444c2266|" #~ msgstr "" -#~ msgid " is to add a general sdk change to the PR" +#~ msgid "|c00bf2750bc24d229737a0fe1395f0fc|" #~ msgstr "" -#~ msgid " is to add a general simulations change to the PR" +#~ msgid "run\\_client\\_app" #~ msgstr "" -#~ msgid "Note that only one token should be used." +#~ msgid "run\\_supernode" +#~ msgstr "flower-superlink" + +#~ msgid "Retrieve the corresponding layout by the string key." #~ msgstr "" #~ msgid "" -#~ "Its content must have a specific " -#~ "format. We will break down what " -#~ "each possibility does:" +#~ "When there isn't an exact match, " +#~ "all the existing keys in the " +#~ "layout map will be treated as a" +#~ " regex and map against the input " +#~ "key again. The first match will be" +#~ " returned, based on the key insertion" +#~ " order. Return None if there isn't" +#~ " any match found." #~ msgstr "" -#~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains nothing or doesn't exist, " -#~ "the following text will be added " -#~ "to the changelog::" +#~ msgid "the string key as the query for the layout." #~ msgstr "" -#~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains a description (and no " -#~ "token), the following text will be " -#~ "added to the changelog::" +#~ msgid "Corresponding layout based on the query." #~ msgstr "" -#~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, nothing will change" -#~ " in the changelog." +#~ msgid "run\\_server\\_app" #~ msgstr "" +#~ msgid "run\\_superlink" +#~ msgstr "flower-superlink" + #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following text" -#~ " will be added to the changelog::" +#~ ":py:obj:`start_simulation `\\" +#~ " \\(\\*\\, client\\_fn\\, num\\_clients\\)" #~ msgstr "" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following " -#~ "text will be added to the " -#~ "changelog::" +#~ "A function creating `Client` instances. " +#~ "The function must have the signature " +#~ "`client_fn(context: Context). It should return" +#~ " a single client instance of type " +#~ "`Client`. Note that the created client" +#~ " instances are ephemeral and will " +#~ "often be destroyed after a single " +#~ "method invocation. Since client instances " +#~ "are not long-lived, they should " +#~ "not attempt to carry state over " +#~ "method invocations. Any state required " +#~ "by the instance (model, dataset, " +#~ "hyperparameters, ...) should be (re-)created" +#~ " in either the call to `client_fn`" +#~ " or the call to any of the " +#~ "client methods (e.g., load evaluation " +#~ "data in the `evaluate` method itself)." +#~ msgstr "" + +#~ msgid "The total number of clients in this simulation." #~ msgstr "" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following " -#~ "text will be added to the " -#~ "changelog::" +#~ "UNSUPPORTED, WILL BE REMOVED. USE " +#~ "`num_clients` INSTEAD. List `client_id`s for" +#~ " each client. This is only required" +#~ " if `num_clients` is not set. Setting" +#~ " both `num_clients` and `clients_ids` with" +#~ " `len(clients_ids)` not equal to " +#~ "`num_clients` generates an error. Using " +#~ "this argument will raise an error." #~ msgstr "" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following text " -#~ "will be added to the changelog::" +#~ "CPU and GPU resources for a single" +#~ " client. Supported keys are `num_cpus` " +#~ "and `num_gpus`. To understand the GPU" +#~ " utilization caused by `num_gpus`, as " +#~ "well as using custom resources, please" +#~ " consult the Ray documentation." #~ msgstr "" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following " -#~ "text will be added to the " -#~ "changelog::" +#~ "Optionally specify the type of actor " +#~ "to use. The actor object, which " +#~ "persists throughout the simulation, will " +#~ "be the process in charge of " +#~ "executing a ClientApp wrapping input " +#~ "argument `client_fn`." #~ msgstr "" #~ msgid "" -#~ "Note that only one token must be" -#~ " provided, otherwise, only the first " -#~ "action (in the order listed above), " -#~ "will be performed." +#~ "If you want to create your own " +#~ "Actor classes, you might need to " +#~ "pass some input argument. You can " +#~ "use this dictionary for such purpose." #~ msgstr "" -#~ msgid "Example: MXNet - Run MXNet Federated" -#~ msgstr "Exemple : MXNet - Exécuter MXNet Federated" - #~ msgid "" -#~ "This tutorial will show you how to" -#~ " use Flower to build a federated " -#~ "version of an existing MXNet workload." -#~ " We are using MXNet to train a" -#~ " Sequential model on the MNIST " -#~ "dataset. We will structure the example" -#~ " similar to our `PyTorch - From " -#~ "Centralized To Federated " -#~ "`_ walkthrough. " -#~ "MXNet and PyTorch are very similar " -#~ "and a very good comparison between " -#~ "MXNet and PyTorch is given `here " -#~ "`_. First, " -#~ "we build a centralized training approach" -#~ " based on the `Handwritten Digit " -#~ "Recognition " -#~ "`_" -#~ " tutorial. Then, we build upon the" -#~ " centralized training code to run the" -#~ " training in a federated fashion." +#~ "(default: \"DEFAULT\") Optional string " +#~ "(\"DEFAULT\" or \"SPREAD\") for the VCE" +#~ " to choose in which node the " +#~ "actor is placed. If you are an " +#~ "advanced user needed more control you" +#~ " can use lower-level scheduling " +#~ "strategies to pin actors to specific " +#~ "compute nodes (e.g. via " +#~ "NodeAffinitySchedulingStrategy). Please note this" +#~ " is an advanced feature. For all " +#~ "details, please refer to the Ray " +#~ "documentation: https://docs.ray.io/en/latest/ray-" +#~ "core/scheduling/index.html" #~ msgstr "" -#~ "Ce tutoriel te montrera comment utiliser" -#~ " Flower pour construire une version " -#~ "fédérée d'une charge de travail MXNet" -#~ " existante. Nous utilisons MXNet pour " -#~ "former un modèle séquentiel sur " -#~ "l'ensemble de données MNIST. Nous " -#~ "structurerons l'exemple de la même " -#~ "manière que notre présentation `PyTorch " -#~ "- De la centralisation à la " -#~ "fédération `_. " -#~ "MXNet et PyTorch sont très similaires" -#~ " et une très bonne comparaison entre" -#~ " MXNet et PyTorch est donnée ici " -#~ "`_. Tout " -#~ "d'abord, nous construisons une approche " -#~ "de formation centralisée basée sur le" -#~ " tutoriel `Handandwritten Digit Recognition " -#~ "`_." -#~ " Ensuite, nous nous basons sur le " -#~ "code de formation centralisé pour " -#~ "exécuter la formation de manière " -#~ "fédérée." #~ msgid "" -#~ "Before we start setting up our " -#~ "MXNet example, we install the " -#~ ":code:`mxnet` and :code:`flwr` packages:" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with FastAI to train a vision " +#~ "model on CIFAR-10." #~ msgstr "" -#~ "Avant de commencer à configurer notre" -#~ " exemple MXNet, nous installons les " -#~ "paquets :code:`mxnet` et :code:`flwr` :" -#~ msgid "MNIST Training with MXNet" -#~ msgstr "Formation MNIST avec MXNet" +#~ msgid "Let's build a federated learning system using fastai and Flower!" +#~ msgstr "" +#~ "Construisons un système d'apprentissage fédéré" +#~ " en utilisant fastai et Flower !" #~ msgid "" -#~ "We begin with a brief description " -#~ "of the centralized training code based" -#~ " on a :code:`Sequential` model. If " -#~ "you want a more in-depth " -#~ "explanation of what's going on then " -#~ "have a look at the official `MXNet" -#~ " tutorial " -#~ "`_." +#~ "Please refer to the `full code " +#~ "example `_ to learn more." #~ msgstr "" -#~ "Nous commençons par une brève " -#~ "description du code d'entraînement centralisé" -#~ " basé sur un modèle :code:`Sequential`. " -#~ "Si tu veux une explication plus " -#~ "approfondie de ce qui se passe, " -#~ "jette un coup d'œil au tutoriel " -#~ "officiel `MXNet " -#~ "`_." +#~ "Réfère-toi à l'exemple de code " +#~ "complet `_ pour en savoir plus." #~ msgid "" -#~ "Let's create a new file " -#~ "called:code:`mxnet_mnist.py` with all the " -#~ "components required for a traditional " -#~ "(centralized) MNIST training. First, the " -#~ "MXNet package :code:`mxnet` needs to be" -#~ " imported. You can see that we " -#~ "do not yet import the :code:`flwr` " -#~ "package for federated learning. This " -#~ "will be done later." +#~ "Check out this Federating Learning " +#~ "quickstart tutorial for using Flower " +#~ "with HuggingFace Transformers in order " +#~ "to fine-tune an LLM." #~ msgstr "" -#~ "Créons un nouveau fichier appelé " -#~ ":code:`mxnet_mnist.py` avec tous les " -#~ "composants requis pour un apprentissage " -#~ "MNIST traditionnel (centralisé). Tout d'abord," -#~ " le package MXNet :code:`mxnet` doit " -#~ "être importé. Tu peux voir que " -#~ "nous n'avons pas encore importé le " -#~ "package :code:`flwr` pour l'apprentissage " -#~ "fédéré. Cela sera fait plus tard." #~ msgid "" -#~ "The :code:`load_data()` function loads the " -#~ "MNIST training and test sets." +#~ "Let's build a federated learning system" +#~ " using Hugging Face Transformers and " +#~ "Flower!" #~ msgstr "" -#~ "La fonction :code:`load_data()` charge les " -#~ "ensembles d'entraînement et de test " -#~ "MNIST." +#~ "Construisons un système d'apprentissage fédéré" +#~ " à l'aide des transformateurs Hugging " +#~ "Face et de Flower !" + +#~ msgid "Dependencies" +#~ msgstr "Dépendances" #~ msgid "" -#~ "As already mentioned, we will use " -#~ "the MNIST dataset for this machine " -#~ "learning workload. The model architecture " -#~ "(a very simple :code:`Sequential` model) " -#~ "is defined in :code:`model()`." +#~ "To follow along this tutorial you " +#~ "will need to install the following " +#~ "packages: :code:`datasets`, :code:`evaluate`, " +#~ ":code:`flwr`, :code:`torch`, and " +#~ ":code:`transformers`. This can be done " +#~ "using :code:`pip`:" #~ msgstr "" -#~ "Comme nous l'avons déjà mentionné, nous" -#~ " utiliserons l'ensemble de données MNIST" -#~ " pour cette charge de travail " -#~ "d'apprentissage automatique. L'architecture du " -#~ "modèle (un modèle :code:`Séquentiel` très " -#~ "simple) est définie dans :code:`model()`." +#~ "Pour suivre ce tutoriel, tu devras " +#~ "installer les paquets suivants : " +#~ ":code:`datasets`, :code:`evaluate`, :code:`flwr`, " +#~ ":code:`torch`, et :code:`transformers`. Cela " +#~ "peut être fait en utilisant :code:`pip`" +#~ " :" + +#~ msgid "Standard Hugging Face workflow" +#~ msgstr "Flux de travail standard pour le visage" + +#~ msgid "Handling the data" +#~ msgstr "Traitement des données" #~ msgid "" -#~ "We now need to define the training" -#~ " (function :code:`train()`) which loops " -#~ "over the training set and measures " -#~ "the loss for each batch of " -#~ "training examples." +#~ "To fetch the IMDB dataset, we will" +#~ " use Hugging Face's :code:`datasets` " +#~ "library. We then need to tokenize " +#~ "the data and create :code:`PyTorch` " +#~ "dataloaders, this is all done in " +#~ "the :code:`load_data` function:" #~ msgstr "" -#~ "Nous devons maintenant définir la " -#~ "formation (fonction :code:`train()`) qui passe" -#~ " en boucle sur l'ensemble de la " -#~ "formation et mesure la perte pour " -#~ "chaque lot d'exemples de formation." +#~ "Pour récupérer le jeu de données " +#~ "IMDB, nous utiliserons la bibliothèque " +#~ ":code:`datasets` de Hugging Face. Nous " +#~ "devons ensuite tokeniser les données et" +#~ " créer des :code:`PyTorch` dataloaders, ce" +#~ " qui est fait dans la fonction " +#~ ":code:`load_data` :" + +#~ msgid "Training and testing the model" +#~ msgstr "Former et tester le modèle" #~ msgid "" -#~ "The evaluation of the model is " -#~ "defined in function :code:`test()`. The " -#~ "function loops over all test samples " -#~ "and measures the loss and accuracy " -#~ "of the model based on the test " -#~ "dataset." +#~ "Once we have a way of creating " +#~ "our trainloader and testloader, we can" +#~ " take care of the training and " +#~ "testing. This is very similar to " +#~ "any :code:`PyTorch` training or testing " +#~ "loop:" #~ msgstr "" -#~ "L'évaluation du modèle est définie dans" -#~ " la fonction :code:`test()`. Cette fonction" -#~ " passe en boucle sur tous les " -#~ "échantillons de test et mesure la " -#~ "perte et la précision du modèle en" -#~ " fonction de l'ensemble des données " -#~ "de test." +#~ "Une fois que nous avons trouvé un" +#~ " moyen de créer notre trainloader et" +#~ " notre testloader, nous pouvons nous " +#~ "occuper de l'entraînement et du test." +#~ " C'est très similaire à n'importe " +#~ "quelle boucle d'entraînement ou de test" +#~ " :code:`PyTorch` :" + +#~ msgid "Creating the model itself" +#~ msgstr "Créer le modèle lui-même" #~ msgid "" -#~ "Having defined the data loading, model" -#~ " architecture, training, and evaluation we" -#~ " can put everything together and " -#~ "train our model on MNIST. Note " -#~ "that the GPU/CPU device for the " -#~ "training and testing is defined within" -#~ " the :code:`ctx` (context)." +#~ "To create the model itself, we " +#~ "will just load the pre-trained " +#~ "distillBERT model using Hugging Face’s " +#~ ":code:`AutoModelForSequenceClassification` :" #~ msgstr "" -#~ "Après avoir défini le chargement des " -#~ "données, l'architecture du modèle, " -#~ "l'entraînement et l'évaluation, nous pouvons" -#~ " tout assembler et entraîner notre " -#~ "modèle sur MNIST. Note que le " -#~ "dispositif GPU/CPU pour l'entraînement et " -#~ "le test est défini dans le " -#~ ":code:`ctx` (contexte)." +#~ "Pour créer le modèle lui-même, " +#~ "nous allons simplement charger le modèle" +#~ " distillBERT pré-entraîné en utilisant le" +#~ " :code:`AutoModelForSequenceClassification` de Hugging" +#~ " Face :" -#~ msgid "You can now run your (centralized) MXNet machine learning workload:" -#~ msgstr "" -#~ "Tu peux maintenant exécuter ta charge" -#~ " de travail (centralisée) d'apprentissage " -#~ "automatique MXNet :" +#~ msgid "Creating the IMDBClient" +#~ msgstr "Création du client IMDBC" #~ msgid "" -#~ "So far this should all look fairly" -#~ " familiar if you've used MXNet (or" -#~ " even PyTorch) before. Let's take the" -#~ " next step and use what we've " -#~ "built to create a simple federated " -#~ "learning system consisting of one server" -#~ " and two clients." -#~ msgstr "" -#~ "Jusqu'à présent, tout cela devrait te" -#~ " sembler assez familier si tu as " -#~ "déjà utilisé MXNet (ou même PyTorch)." -#~ " Passons à l'étape suivante et " -#~ "utilisons ce que nous avons construit" -#~ " pour créer un simple système " -#~ "d'apprentissage fédéré composé d'un serveur" -#~ " et de deux clients." +#~ "To federate our example to multiple " +#~ "clients, we first need to write " +#~ "our Flower client class (inheriting from" +#~ " :code:`flwr.client.NumPyClient`). This is very" +#~ " easy, as our model is a " +#~ "standard :code:`PyTorch` model:" +#~ msgstr "" +#~ "Pour fédérer notre exemple à plusieurs" +#~ " clients, nous devons d'abord écrire " +#~ "notre classe de client Flower (héritant" +#~ " de :code:`flwr.client.NumPyClient`). C'est très" +#~ " facile, car notre modèle est un " +#~ "modèle :code:`PyTorch` standard :" + +#~ msgid "" +#~ "The :code:`get_parameters` function lets the" +#~ " server get the client's parameters. " +#~ "Inversely, the :code:`set_parameters` function " +#~ "allows the server to send its " +#~ "parameters to the client. Finally, the" +#~ " :code:`fit` function trains the model " +#~ "locally for the client, and the " +#~ ":code:`evaluate` function tests the model " +#~ "locally and returns the relevant " +#~ "metrics." +#~ msgstr "" +#~ "La fonction :code:`get_parameters` permet au" +#~ " serveur d'obtenir les paramètres du " +#~ "client. Inversement, la fonction " +#~ ":code:`set_parameters` permet au serveur " +#~ "d'envoyer ses paramètres au client. " +#~ "Enfin, la fonction :code:`fit` forme le" +#~ " modèle localement pour le client, et" +#~ " la fonction :code:`evaluate` teste le " +#~ "modèle localement et renvoie les mesures" +#~ " correspondantes." + +#~ msgid "Starting the server" +#~ msgstr "Démarrer le serveur" -#~ msgid "MXNet meets Flower" -#~ msgstr "MXNet rencontre Flower" +#~ msgid "" +#~ "Now that we have a way to " +#~ "instantiate clients, we need to create" +#~ " our server in order to aggregate " +#~ "the results. Using Flower, this can " +#~ "be done very easily by first " +#~ "choosing a strategy (here, we are " +#~ "using :code:`FedAvg`, which will define " +#~ "the global weights as the average " +#~ "of all the clients' weights at " +#~ "each round) and then using the " +#~ ":code:`flwr.server.start_server` function:" +#~ msgstr "" +#~ "Maintenant que nous avons un moyen " +#~ "d'instancier les clients, nous devons " +#~ "créer notre serveur afin d'agréger les" +#~ " résultats. Avec Flower, cela peut " +#~ "être fait très facilement en choisissant" +#~ " d'abord une stratégie (ici, nous " +#~ "utilisons :code:`FedAvg`, qui définira les " +#~ "poids globaux comme la moyenne des " +#~ "poids de tous les clients à chaque" +#~ " tour) et en utilisant ensuite la " +#~ "fonction :code:`flwr.server.start_server` :" + +#~ msgid "" +#~ "The :code:`weighted_average` function is there" +#~ " to provide a way to aggregate " +#~ "the metrics distributed amongst the " +#~ "clients (basically this allows us to " +#~ "display a nice average accuracy and " +#~ "loss for every round)." +#~ msgstr "" +#~ "La fonction :code:`weighted_average` est là" +#~ " pour fournir un moyen d'agréger les" +#~ " mesures réparties entre les clients " +#~ "(en gros, cela nous permet d'afficher" +#~ " une belle moyenne de précision et" +#~ " de perte pour chaque tour)." + +#~ msgid "Putting everything together" +#~ msgstr "Tout assembler" + +#~ msgid "We can now start client instances using:" +#~ msgstr "" +#~ "Nous pouvons maintenant démarrer des " +#~ "instances de clients en utilisant :" + +#~ msgid "" +#~ "And they will be able to connect" +#~ " to the server and start the " +#~ "federated training." +#~ msgstr "" +#~ "Et ils pourront se connecter au " +#~ "serveur et démarrer la formation " +#~ "fédérée." #~ msgid "" -#~ "So far, it was not easily possible" -#~ " to use MXNet workloads for federated" -#~ " learning because federated learning is " -#~ "not supported in MXNet. Since Flower " -#~ "is fully agnostic towards the underlying" -#~ " machine learning framework, it can " -#~ "be used to federated arbitrary machine" -#~ " learning workloads. This section will " -#~ "show you how Flower can be used" -#~ " to federate our centralized MXNet " -#~ "workload." +#~ "If you want to check out " +#~ "everything put together, you should " +#~ "check out the `full code example " +#~ "`_ ." #~ msgstr "" -#~ "Jusqu'à présent, il n'était pas facile" -#~ " d'utiliser les charges de travail " -#~ "MXNet pour l'apprentissage fédéré car " -#~ "l'apprentissage fédéré n'est pas pris en" -#~ " charge dans MXNet. Comme Flower est" -#~ " totalement agnostique vis-à-vis du cadre" -#~ " d'apprentissage automatique sous-jacent, " -#~ "il peut être utilisé pour fédérer " -#~ "des charges de travail d'apprentissage " -#~ "automatique arbitraires. Cette section te " -#~ "montrera comment Flower peut être " -#~ "utilisé pour fédérer notre charge de " -#~ "travail MXNet centralisée." +#~ "Si tu veux voir tout ce qui " +#~ "est mis ensemble, tu devrais consulter" +#~ " l'exemple de code complet : " +#~ "[https://github.com/adap/flower/tree/main/examples/quickstart-" +#~ "huggingface](https://github.com/adap/flower/tree/main/examples" +#~ "/quickstart-huggingface)." #~ msgid "" -#~ "The concept to federate an existing " -#~ "workload is always the same and " -#~ "easy to understand. We have to " -#~ "start a *server* and then use the" -#~ " code in :code:`mxnet_mnist.py` for the " -#~ "*clients* that are connected to the " -#~ "*server*. The *server* sends model " -#~ "parameters to the clients. The *clients*" -#~ " run the training and update the " -#~ "parameters. The updated parameters are " -#~ "sent back to the *server* which " -#~ "averages all received parameter updates. " -#~ "This describes one round of the " -#~ "federated learning process and we repeat" -#~ " this for multiple rounds." +#~ "Of course, this is a very basic" +#~ " example, and a lot can be " +#~ "added or modified, it was just to" +#~ " showcase how simply we could " +#~ "federate a Hugging Face workflow using" +#~ " Flower." #~ msgstr "" -#~ "Le concept pour fédérer une charge " -#~ "de travail existante est toujours le " -#~ "même et facile à comprendre. Nous " -#~ "devons démarrer un *serveur* et ensuite" -#~ " utiliser le code dans " -#~ ":code:`mxnet_mnist.py` pour les *clients* qui" -#~ " sont connectés au *serveur*. Le " -#~ "*serveur* envoie les paramètres du " -#~ "modèle aux clients. Les *clients* " -#~ "exécutent la formation et mettent à " -#~ "jour les paramètres. Les paramètres mis" -#~ " à jour sont renvoyés au *serveur*" -#~ " qui fait la moyenne de toutes " -#~ "les mises à jour de paramètres " -#~ "reçues. Ceci décrit un tour du " -#~ "processus d'apprentissage fédéré et nous " -#~ "répétons cette opération pour plusieurs " -#~ "tours." +#~ "Bien sûr, c'est un exemple très " +#~ "basique, et beaucoup de choses peuvent" +#~ " être ajoutées ou modifiées, il " +#~ "s'agissait juste de montrer avec quelle" +#~ " simplicité on pouvait fédérer un " +#~ "flux de travail Hugging Face à " +#~ "l'aide de Flower." #~ msgid "" -#~ "Finally, we will define our *client* " -#~ "logic in :code:`client.py` and build " -#~ "upon the previously defined MXNet " -#~ "training in :code:`mxnet_mnist.py`. Our " -#~ "*client* needs to import :code:`flwr`, " -#~ "but also :code:`mxnet` to update the " -#~ "parameters on our MXNet model:" +#~ "Note that in this example we used" +#~ " :code:`PyTorch`, but we could have " +#~ "very well used :code:`TensorFlow`." #~ msgstr "" -#~ "Enfin, nous allons définir la logique" -#~ " de notre *client* dans :code:`client.py`" -#~ " et nous appuyer sur l'entraînement " -#~ "MXNet défini précédemment dans " -#~ ":code:`mxnet_mnist.py`. Notre *client* doit " -#~ "importer :code:`flwr`, mais aussi " -#~ ":code:`mxnet` pour mettre à jour les " -#~ "paramètres de notre modèle MXNet :" +#~ "Notez que dans cet exemple, nous " +#~ "avons utilisé :code:`PyTorch`, mais nous " +#~ "aurions très bien pu utiliser " +#~ ":code:`TensorFlow`." #~ msgid "" -#~ "Implementing a Flower *client* basically " -#~ "means implementing a subclass of either" -#~ " :code:`flwr.client.Client` or " -#~ ":code:`flwr.client.NumPyClient`. Our implementation " -#~ "will be based on " -#~ ":code:`flwr.client.NumPyClient` and we'll call " -#~ "it :code:`MNISTClient`. :code:`NumPyClient` is " -#~ "slightly easier to implement than " -#~ ":code:`Client` if you use a framework" -#~ " with good NumPy interoperability (like " -#~ "PyTorch or MXNet) because it avoids " -#~ "some of the boilerplate that would " -#~ "otherwise be necessary. :code:`MNISTClient` " -#~ "needs to implement four methods, two " -#~ "methods for getting/setting model parameters," -#~ " one method for training the model," -#~ " and one method for testing the " -#~ "model:" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with PyTorch Lightning to train an " +#~ "Auto Encoder model on MNIST." #~ msgstr "" -#~ "Implementing a Flower *client* basically " -#~ "means implementing a subclass of either" -#~ " :code:`flwr.client.Client` or " -#~ ":code:`flwr.client.NumPyClient`. Our implementation " -#~ "will be based on " -#~ ":code:`flwr.client.NumPyClient` and we'll call " -#~ "it :code:`MNISTClient`. :code:`NumPyClient` is " -#~ "slightly easier to implement than " -#~ ":code:`Client` if you use a framework" -#~ " with good NumPy interoperability (like " -#~ "PyTorch or MXNet) because it avoids " -#~ "some of the boilerplate that would " -#~ "otherwise be necessary. :code:`MNISTClient` " -#~ "needs to implement four methods, two " -#~ "methods for getting/setting model parameters," -#~ " one method for training the model," -#~ " and one method for testing the " -#~ "model:" -#~ msgid "transform MXNet :code:`NDArray`'s to NumPy :code:`ndarray`'s" -#~ msgstr "transforme les :code:`NDArray` du MXNet en :code:`ndarray` de NumPy" +#~ msgid "" +#~ "Let's build a horizontal federated " +#~ "learning system using PyTorch Lightning " +#~ "and Flower!" +#~ msgstr "" +#~ "Construisons un système d'apprentissage fédéré" +#~ " en utilisant PyTorch Lightning et " +#~ "Flower !" #~ msgid "" -#~ "The challenging part is to transform " -#~ "the MXNet parameters from :code:`NDArray` " -#~ "to :code:`NumPy Arrays` to make it " -#~ "readable for Flower." +#~ "Please refer to the `full code " +#~ "example `_ to learn " +#~ "more." #~ msgstr "" -#~ "La partie la plus difficile est de" -#~ " transformer les paramètres MXNet de " -#~ ":code:`NDArray` en :code:`NumPy Arrays` pour" -#~ " les rendre lisibles pour Flower." +#~ "Réfère-toi à l'exemple de code " +#~ "complet `_ pour en " +#~ "savoir plus." #~ msgid "" -#~ "The two :code:`NumPyClient` methods " -#~ ":code:`fit` and :code:`evaluate` make use " -#~ "of the functions :code:`train()` and " -#~ ":code:`test()` previously defined in " -#~ ":code:`mxnet_mnist.py`. So what we really " -#~ "do here is we tell Flower through" -#~ " our :code:`NumPyClient` subclass which of" -#~ " our already defined functions to " -#~ "call for training and evaluation. We " -#~ "included type annotations to give you" -#~ " a better understanding of the data" -#~ " types that get passed around." +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with TensorFlow to train a MobilNetV2" +#~ " model on CIFAR-10." #~ msgstr "" -#~ "Les deux méthodes :code:`NumPyClient` " -#~ ":code:`fit` et :code:`evaluate` utilisent les" -#~ " fonctions :code:`train()` et :code:`test()` " -#~ "définies précédemment dans :code:`mxnet_mnist.py`." -#~ " Ce que nous faisons vraiment ici," -#~ " c'est que nous indiquons à Flower," -#~ " par le biais de notre sous-" -#~ "classe :code:`NumPyClient`, laquelle de nos" -#~ " fonctions déjà définies doit être " -#~ "appelée pour l'entraînement et l'évaluation." -#~ " Nous avons inclus des annotations de" -#~ " type pour te donner une meilleure" -#~ " compréhension des types de données " -#~ "qui sont transmis." + +#~ msgid "Let's build a federated learning system in less than 20 lines of code!" +#~ msgstr "" +#~ "Construisons un système d'apprentissage fédéré" +#~ " en moins de 20 lignes de code" +#~ " !" + +#~ msgid "Before Flower can be imported we have to install it:" +#~ msgstr "Avant de pouvoir importer une fleur, nous devons l'installer :" + +#~ msgid "" +#~ "Since we want to use the Keras " +#~ "API of TensorFlow (TF), we have to" +#~ " install TF as well:" +#~ msgstr "" +#~ "Comme nous voulons utiliser l'API Keras" +#~ " de TensorFlow (TF), nous devons " +#~ "également installer TF :" + +#~ msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" +#~ msgstr "" +#~ "Ensuite, dans un fichier appelé " +#~ ":code:`client.py`, importe Flower et " +#~ "TensorFlow :" + +#~ msgid "" +#~ "We use the Keras utilities of TF" +#~ " to load CIFAR10, a popular colored" +#~ " image classification dataset for machine" +#~ " learning. The call to " +#~ ":code:`tf.keras.datasets.cifar10.load_data()` downloads " +#~ "CIFAR10, caches it locally, and then " +#~ "returns the entire training and test " +#~ "set as NumPy ndarrays." +#~ msgstr "" +#~ "Nous utilisons les utilitaires Keras de" +#~ " TF pour charger CIFAR10, un ensemble" +#~ " de données de classification d'images " +#~ "colorées populaire pour l'apprentissage " +#~ "automatique. L'appel à " +#~ ":code:`tf.keras.datasets.cifar10.load_data()` télécharge " +#~ "CIFAR10, le met en cache localement, " +#~ "puis renvoie l'ensemble d'entraînement et " +#~ "de test sous forme de NumPy " +#~ "ndarrays." #~ msgid "" -#~ "Having defined data loading, model " -#~ "architecture, training, and evaluation we " -#~ "can put everything together and train" -#~ " our :code:`Sequential` model on MNIST." +#~ "Next, we need a model. For the " +#~ "purpose of this tutorial, we use " +#~ "MobilNetV2 with 10 output classes:" +#~ msgstr "" +#~ "Ensuite, nous avons besoin d'un modèle." +#~ " Pour les besoins de ce tutoriel, " +#~ "nous utilisons MobilNetV2 avec 10 " +#~ "classes de sortie :" + +#~ msgid "" +#~ "The Flower server interacts with clients" +#~ " through an interface called " +#~ ":code:`Client`. When the server selects " +#~ "a particular client for training, it " +#~ "sends training instructions over the " +#~ "network. The client receives those " +#~ "instructions and calls one of the " +#~ ":code:`Client` methods to run your code" +#~ " (i.e., to train the neural network" +#~ " we defined earlier)." +#~ msgstr "" +#~ "Le serveur Flower interagit avec les " +#~ "clients par le biais d'une interface " +#~ "appelée :code:`Client`. Lorsque le serveur " +#~ "sélectionne un client particulier pour " +#~ "la formation, il envoie des instructions" +#~ " de formation sur le réseau. Le " +#~ "client reçoit ces instructions et " +#~ "appelle l'une des méthodes :code:`Client` " +#~ "pour exécuter ton code (c'est-à-dire " +#~ "pour former le réseau neuronal que " +#~ "nous avons défini plus tôt)." + +#~ msgid "" +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses Keras." +#~ " The :code:`NumPyClient` interface defines " +#~ "three methods which can be implemented" +#~ " in the following way:" #~ msgstr "" -#~ "Après avoir défini le chargement des " -#~ "données, l'architecture du modèle, la " -#~ "formation et l'évaluation, nous pouvons " -#~ "tout rassembler et former notre modèle" -#~ " :code:`Sequential` sur MNIST." +#~ "Flower fournit une classe de commodité" +#~ " appelée :code:`NumPyClient` qui facilite " +#~ "la mise en œuvre de l'interface " +#~ ":code:`Client` lorsque ta charge de " +#~ "travail utilise Keras. L'interface " +#~ ":code:`NumPyClient` définit trois méthodes qui" +#~ " peuvent être mises en œuvre de " +#~ "la manière suivante :" #~ msgid "" -#~ "in each window (make sure that the" -#~ " server is still running before you" -#~ " do so) and see your MXNet " -#~ "project run federated learning across " -#~ "two clients. Congratulations!" +#~ "We can now create an instance of" +#~ " our class :code:`CifarClient` and add " +#~ "one line to actually run this " +#~ "client:" #~ msgstr "" -#~ "dans chaque fenêtre (assure-toi que " -#~ "le serveur est toujours en cours " -#~ "d'exécution avant de le faire) et " -#~ "tu verras ton projet MXNet exécuter " -#~ "l'apprentissage fédéré sur deux clients. " -#~ "Félicitations !" +#~ "Nous pouvons maintenant créer une " +#~ "instance de notre classe :code:`CifarClient`" +#~ " et ajouter une ligne pour exécuter" +#~ " ce client :" #~ msgid "" -#~ "The full source code for this " -#~ "example: `MXNet: From Centralized To " -#~ "Federated (Code) " -#~ "`_. Our " -#~ "example is of course somewhat over-" -#~ "simplified because both clients load the" -#~ " exact same dataset, which isn't " -#~ "realistic. You're now prepared to " -#~ "explore this topic further. How about" -#~ " using a CNN or using a " -#~ "different dataset? How about adding more" -#~ " clients?" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. If you implement" +#~ " a client of type :code:`NumPyClient` " +#~ "you'll need to first call its " +#~ ":code:`to_client()` method. The string " +#~ ":code:`\"[::]:8080\"` tells the client which" +#~ " server to connect to. In our " +#~ "case we can run the server and " +#~ "the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." #~ msgstr "" -#~ "Le code source complet de cet " -#~ "exemple : `MXNet : From Centralized " -#~ "To Federated (Code) " -#~ "`_. Notre " -#~ "exemple est bien sûr un peu trop" -#~ " simplifié parce que les deux clients" -#~ " chargent exactement le même ensemble " -#~ "de données, ce qui n'est pas " -#~ "réaliste. Tu es maintenant prêt à " -#~ "explorer ce sujet plus en profondeur." -#~ " Pourquoi ne pas utiliser un CNN " -#~ "ou un ensemble de données différent " -#~ "? Pourquoi ne pas ajouter d'autres " -#~ "clients ?" +#~ "C'est tout pour le client. Il nous" +#~ " suffit d'implémenter :code:`Client` ou " +#~ ":code:`NumPyClient` et d'appeler " +#~ ":code:`fl.client.start_client()`. La chaîne " +#~ ":code:`\"[: :]:8080\"` indique au client " +#~ "à quel serveur se connecter. Dans " +#~ "notre cas, nous pouvons exécuter le " +#~ "serveur et le client sur la même" +#~ " machine, c'est pourquoi nous utilisons " +#~ ":code:`\"[: :]:8080\"`. Si nous exécutons " +#~ "une charge de travail véritablement " +#~ "fédérée avec le serveur et les " +#~ "clients fonctionnant sur des machines " +#~ "différentes, tout ce qui doit changer" +#~ " est l'adresse :code:`server_address` vers " +#~ "laquelle nous dirigeons le client." -#~ msgid "with the following command sequence:" -#~ msgstr "avec la séquence de commandes suivante :" +#~ msgid "Each client will have its own dataset." +#~ msgstr "Chaque client aura son propre ensemble de données." #~ msgid "" -#~ "In case you are a researcher you" -#~ " might be just fine using the " -#~ "self-signed certificates generated using " -#~ "the scripts which are part of this" -#~ " guide." +#~ "You should now see how the " +#~ "training does in the very first " +#~ "terminal (the one that started the " +#~ "server):" #~ msgstr "" -#~ "Si tu es un chercheur, tu peux " -#~ "très bien utiliser les certificats " -#~ "auto-signés générés à l'aide des " -#~ "scripts qui font partie de ce " -#~ "guide." +#~ "Tu devrais maintenant voir comment la" +#~ " formation se déroule dans le tout" +#~ " premier terminal (celui qui a " +#~ "démarré le serveur) :" #~ msgid "" -#~ "We are now going to show how " -#~ "to write a sever which uses the" -#~ " previously generated scripts." +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this can be " +#~ "found in :code:`examples/quickstart-" +#~ "tensorflow/client.py`." #~ msgstr "" -#~ "Nous allons maintenant montrer comment " -#~ "écrire un serveur qui utilise les " -#~ "scripts générés précédemment." +#~ "Félicitations ! Tu as réussi à " +#~ "construire et à faire fonctionner ton" +#~ " premier système d'apprentissage fédéré. Le" +#~ " `code source complet " +#~ "`_ pour cela se trouve" +#~ " dans :code:`examples/quickstart-tensorflow/client.py`." -#~ msgid "" -#~ "When providing certificates, the server " -#~ "expects a tuple of three certificates." -#~ " :code:`Path` can be used to easily" -#~ " read the contents of those files " -#~ "into byte strings, which is the " -#~ "data type :code:`start_server` expects." +#~ msgid "|e5918c1c06a4434bbe4bf49235e40059|" +#~ msgstr "" + +#~ msgid "|c0165741bd1944f09ec55ce49032377d|" +#~ msgstr "" + +#~ msgid "|0a0ac9427ac7487b8e52d75ed514f04e|" +#~ msgstr "" + +#~ msgid "|5defee3ea4ca40d99fcd3e4ea045be25|" +#~ msgstr "" + +#~ msgid "|74f26ca701254d3db57d7899bd91eb55|" +#~ msgstr "" + +#~ msgid "|bda79f21f8154258a40e5766b2634ad7|" +#~ msgstr "" + +#~ msgid "|89d30862e62e4f9989e193483a08680a|" +#~ msgstr "" + +#~ msgid "|77e9918671c54b4f86e01369c0785ce8|" +#~ msgstr "" + +#~ msgid "|7e4ccef37cc94148a067107b34eb7447|" +#~ msgstr "" + +#~ msgid "|28e47e4cded14479a0846c8e5f22c872|" +#~ msgstr "" + +#~ msgid "|4b8c5d1afa144294b76ffc76e4658a38|" +#~ msgstr "" + +#~ msgid "|9dbdb3a0f6cb4a129fac863eaa414c17|" +#~ msgstr "" + +#~ msgid "|81749d0ac0834c36a83bd38f433fea31|" +#~ msgstr "" + +#~ msgid "|ed9aae51da70428eab7eef32f21e819e|" +#~ msgstr "" + +#~ msgid "|e87b69b2ada74ea49412df16f4a0b9cc|" +#~ msgstr "" + +#~ msgid "|33cacb7d985c4906b348515c1a5cd993|" +#~ msgstr "" + +#~ msgid "|cc080a555947492fa66131dc3a967603|" +#~ msgstr "" + +#~ msgid "|085c3e0fb8664c6aa06246636524b20b|" +#~ msgstr "" + +#~ msgid "|bfe69c74e48c45d49b50251c38c2a019|" +#~ msgstr "" + +#~ msgid "|ebbecd651f0348d99c6511ea859bf4ca|" +#~ msgstr "" + +#~ msgid "|163117eb654a4273babba413cf8065f5|" #~ msgstr "" -#~ "Lorsqu'il fournit des certificats, le " -#~ "serveur attend un tuple de trois " -#~ "certificats. :code:`Path` peut être utilisé" -#~ " pour lire facilement le contenu de" -#~ " ces fichiers en chaînes d'octets, ce" -#~ " qui est le type de données " -#~ "attendu par :code:`start_server`." -#~ msgid "" -#~ "The simplest way to get started " -#~ "with Flower is by using the " -#~ "pre-made Docker images, which you can" -#~ " find on `Docker Hub " -#~ "`_." +#~ msgid "|452ac3ba453b4cd1be27be1ba7560d64|" #~ msgstr "" -#~ msgid "Flower server" -#~ msgstr "Serveur de Flower" +#~ msgid "|f403fcd69e4e44409627e748b404c086|" +#~ msgstr "" -#~ msgid "" -#~ "The command will pull the Docker " -#~ "image with the tag " -#~ "``1.7.0-py3.11-ubuntu22.04`` from Docker Hub. " -#~ "The tag contains the information which" -#~ " Flower, Python and Ubuntu is used." -#~ " In this case, it uses Flower " -#~ "1.7.0, Python 3.11 and Ubuntu 22.04. " -#~ "The ``--rm`` flag tells Docker to " -#~ "remove the container after it exits." +#~ msgid "|4b00fe63870145968f8443619a792a42|" #~ msgstr "" -#~ msgid "" -#~ "By default, the Flower server keeps " -#~ "state in-memory. When using the " -#~ "Docker flag ``--rm``, the state is " -#~ "not persisted between container starts. " -#~ "We will show below how to save " -#~ "the state in a file on your " -#~ "host system." +#~ msgid "|368378731066486fa4397e89bc6b870c|" +#~ msgstr "" + +#~ msgid "|a66aa83d85bf4ffba7ed660b718066da|" +#~ msgstr "" + +#~ msgid "|82324b9af72a4582a81839d55caab767|" +#~ msgstr "" + +#~ msgid "|fbf2da0da3cc4f8ab3b3eff852d80c41|" #~ msgstr "" #~ msgid "" -#~ "The ``-p :`` flag tells " -#~ "Docker to map the ports " -#~ "``9091``/``9092`` of the host to " -#~ "``9091``/``9092`` of the container, allowing" -#~ " you to access the Driver API " -#~ "on ``http://localhost:9091`` and the Fleet " -#~ "API on ``http://localhost:9092``. Lastly, any" -#~ " flag that comes after the tag " -#~ "is passed to the Flower server. " -#~ "Here, we are passing the flag " -#~ "``--insecure``." +#~ "Install `xz` (to install different " +#~ "Python versions) and `pandoc` to build" +#~ " the docs::" #~ msgstr "" #~ msgid "" -#~ "The ``--insecure`` flag enables insecure " -#~ "communication (using HTTP, not HTTPS) " -#~ "and should only be used for " -#~ "testing purposes. We strongly recommend " -#~ "enabling `SSL `_ when " -#~ "deploying to a production environment." +#~ "Ensure you system (Ubuntu 22.04+) is " +#~ "up-to-date, and you have all " +#~ "necessary packages::" #~ msgstr "" #~ msgid "" -#~ "You can use ``--help`` to view all" -#~ " available flags that the server " -#~ "supports:" +#~ "Let's create the Python environment for" +#~ " all-things Flower. If you wish " +#~ "to use :code:`pyenv`, we provide two " +#~ "convenience scripts that you can use." +#~ " If you prefer using something else" +#~ " than :code:`pyenv`, create a new " +#~ "environment, activate and skip to the" +#~ " last point where all packages are" +#~ " installed." #~ msgstr "" #~ msgid "" -#~ "If you want to persist the state" -#~ " of the server on your host " -#~ "system, all you need to do is " -#~ "specify a path where you want to" -#~ " save the file on your host " -#~ "system and a name for the database" -#~ " file. In the example below, we " -#~ "tell Docker via the flag ``-v`` to" -#~ " mount the user's home directory " -#~ "(``~/`` on your host) into the " -#~ "``/app/`` directory of the container. " -#~ "Furthermore, we use the flag " -#~ "``--database`` to specify the name of" -#~ " the database file." +#~ "If in a hurry, bypass the hook " +#~ "using ``--no-verify`` with the ``git " +#~ "commit`` command. ::" #~ msgstr "" #~ msgid "" -#~ "As soon as the server starts, the" -#~ " file ``state.db`` is created in the" -#~ " user's home directory on your host" -#~ " system. If the file already exists," -#~ " the server tries to restore the " -#~ "state from the file. To start the" -#~ " server with an empty database, " -#~ "simply remove the ``state.db`` file." +#~ "Flower's documentation uses `Sphinx " +#~ "`_. There's no " +#~ "convenience script to re-build the " +#~ "documentation yet, but it's pretty " +#~ "easy::" #~ msgstr "" #~ msgid "" -#~ "To enable SSL, you will need a " -#~ "CA certificate, a server certificate and" -#~ " a server private key." +#~ "Some quickstart examples may have " +#~ "limitations or requirements that prevent " +#~ "them from running on every environment." +#~ " For more information, please see " +#~ "`Limitations`_." #~ msgstr "" #~ msgid "" -#~ "For testing purposes, you can generate" -#~ " your own self-signed certificates. " -#~ "The `Enable SSL connections " -#~ "`_ page contains " -#~ "a section that will guide you " -#~ "through the process." +#~ "Change the application code. For " +#~ "example, change the ``seed`` in " +#~ "``quickstart_docker/task.py`` to ``43`` and " +#~ "save it:" #~ msgstr "" +#~ msgid ":code:`fit`" +#~ msgstr ":code:`fit`" + #~ msgid "" -#~ "Assuming all files we need are in" -#~ " the local ``certificates`` directory, we" -#~ " can use the flag ``-v`` to " -#~ "mount the local directory into the " -#~ "``/app/`` directory of the container. " -#~ "This allows the server to access " -#~ "the files within the container. Finally," -#~ " we pass the names of the " -#~ "certificates to the server with the " -#~ "``--certificates`` flag." +#~ "\\small\n" +#~ "\\frac{∆ \\times \\sqrt{2 \\times " +#~ "\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}\n" +#~ "\n" #~ msgstr "" -#~ msgid "Using a different Flower or Python version" +#~ msgid "Enable node authentication in :code:`SuperLink`" #~ msgstr "" #~ msgid "" -#~ "If you want to use a different " -#~ "version of Flower or Python, you " -#~ "can do so by changing the tag. " -#~ "All versions we provide are available" -#~ " on `Docker Hub " -#~ "`_." +#~ "To enable node authentication, first you" +#~ " need to configure SSL/TLS connections " +#~ "to secure the SuperLink<>SuperNode " +#~ "communication. You can find the complete" +#~ " guide `here `_. After " +#~ "configuring secure connections, you can " +#~ "enable client authentication in a " +#~ "long-running Flower :code:`SuperLink`. Use " +#~ "the following terminal command to start" +#~ " a Flower :code:`SuperNode` that has " +#~ "both secure connections and node " +#~ "authentication enabled:" #~ msgstr "" #~ msgid "" -#~ "The following command returns the " -#~ "current image hash referenced by the " -#~ "``server:1.7.0-py3.11-ubuntu22.04`` tag:" +#~ "The first flag :code:`--auth-list-" +#~ "public-keys` expects a path to a " +#~ "CSV file storing all known node " +#~ "public keys. You need to store all" +#~ " known node public keys that are " +#~ "allowed to participate in a federation" +#~ " in one CSV file (:code:`.csv`)." #~ msgstr "" -#~ msgid "Next, we can pin the hash when running a new server container:" +#~ msgid "" +#~ "The second and third flags :code" +#~ ":`--auth-superlink-private-key` and :code" +#~ ":`--auth-superlink-public-key` expect paths" +#~ " to the server's private and public" +#~ " keys. For development purposes, you " +#~ "can generate a private and public " +#~ "key pair using :code:`ssh-keygen -t " +#~ "ecdsa -b 384`." #~ msgstr "" -#~ msgid "flower-driver-api" -#~ msgstr "flower-driver-api" +#~ msgid "Enable node authentication in :code:`SuperNode`" +#~ msgstr "" -#~ msgid "flower-fleet-api" -#~ msgstr "flower-fleet-api" +#~ msgid "" +#~ "Similar to the long-running Flower " +#~ "server (:code:`SuperLink`), you can easily " +#~ "enable node authentication in the " +#~ "long-running Flower client (:code:`SuperNode`)." +#~ " Use the following terminal command " +#~ "to start an authenticated :code:`SuperNode`:" +#~ msgstr "" #~ msgid "" -#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -#~ "[:py:class:`str`, :py:obj:`~typing.Union`\\ " -#~ "[:py:class:`int`, :py:class:`float`, :py:class:`str`, " -#~ ":py:class:`bytes`, :py:class:`bool`, " -#~ ":py:class:`~typing.List`\\ [:py:class:`int`], " -#~ ":py:class:`~typing.List`\\ [:py:class:`float`], " -#~ ":py:class:`~typing.List`\\ [:py:class:`str`], " -#~ ":py:class:`~typing.List`\\ [:py:class:`bytes`], " -#~ ":py:class:`~typing.List`\\ [:py:class:`bool`]]]" +#~ "The :code:`--auth-supernode-private-key` " +#~ "flag expects a path to the node's" +#~ " private key file and the :code" +#~ ":`--auth-supernode-public-key` flag expects" +#~ " a path to the node's public " +#~ "key file. For development purposes, you" +#~ " can generate a private and public" +#~ " key pair using :code:`ssh-keygen -t" +#~ " ecdsa -b 384`." #~ msgstr "" #~ msgid "" -#~ ":py:obj:`create_error_reply " -#~ "`\\ \\(error\\, " -#~ "ttl\\)" +#~ "You should now have learned how to" +#~ " start a long-running Flower server" +#~ " (:code:`SuperLink`) and client " +#~ "(:code:`SuperNode`) with node authentication " +#~ "enabled. You should also know the " +#~ "significance of the private key and " +#~ "store it safely to minimize security " +#~ "risks." #~ msgstr "" #~ msgid "" -#~ ":py:obj:`create_reply `\\ " -#~ "\\(content\\, ttl\\)" +#~ "If you have not added ``conda-" +#~ "forge`` to your channels, you will " +#~ "first need to run the following::" #~ msgstr "" #~ msgid "" -#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -#~ "[:py:class:`str`, :py:obj:`~typing.Union`\\ " -#~ "[:py:class:`int`, :py:class:`float`, " -#~ ":py:class:`~typing.List`\\ [:py:class:`int`], " -#~ ":py:class:`~typing.List`\\ [:py:class:`float`]]]" +#~ "Once the ``conda-forge`` channel has " +#~ "been enabled, ``flwr`` can be installed" +#~ " with ``conda``::" #~ msgstr "" -#~ msgid "Run Flower server (Driver API and Fleet API)." +#~ msgid "or with ``mamba``::" #~ msgstr "" #~ msgid "" -#~ ":py:obj:`start_driver `\\ " -#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" +#~ "For central DP with server-side " +#~ "clipping, there are two :code:`Strategy` " +#~ "classes that act as wrappers around " +#~ "the actual :code:`Strategy` instance (for " +#~ "example, :code:`FedAvg`). The two wrapper " +#~ "classes are " +#~ ":code:`DifferentialPrivacyServerSideFixedClipping` and " +#~ ":code:`DifferentialPrivacyServerSideAdaptiveClipping` for " +#~ "fixed and adaptive clipping." #~ msgstr "" -#~ msgid "Start a Flower Driver API server." -#~ msgstr "Tout d'abord, démarre un serveur Flower :" +#~ msgid "" +#~ "The code sample below enables the " +#~ ":code:`FedAvg` strategy to use server-" +#~ "side fixed clipping using the " +#~ ":code:`DifferentialPrivacyServerSideFixedClipping` wrapper " +#~ "class. The same approach can be " +#~ "used with " +#~ ":code:`DifferentialPrivacyServerSideAdaptiveClipping` by " +#~ "adjusting the corresponding input parameters." +#~ msgstr "" #~ msgid "" -#~ ":py:obj:`Driver `\\ " -#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ "For central DP with client-side " +#~ "clipping, the server sends the clipping" +#~ " value to selected clients on each" +#~ " round. Clients can use existing " +#~ "Flower :code:`Mods` to perform the " +#~ "clipping. Two mods are available for " +#~ "fixed and adaptive client-side clipping:" +#~ " :code:`fixedclipping_mod` and " +#~ ":code:`adaptiveclipping_mod` with corresponding " +#~ "server-side wrappers " +#~ ":code:`DifferentialPrivacyClientSideFixedClipping` and " +#~ ":code:`DifferentialPrivacyClientSideAdaptiveClipping`." #~ msgstr "" -#~ "Flower 1.0 : ``start_server(..., " -#~ "config=flwr.server.ServerConfig(num_rounds=3, " -#~ "round_timeout=600.0), ...)``" -#~ msgid "`Driver` class provides an interface to the Driver API." +#~ msgid "" +#~ "The code sample below enables the " +#~ ":code:`FedAvg` strategy to use differential" +#~ " privacy with client-side fixed " +#~ "clipping using both the " +#~ ":code:`DifferentialPrivacyClientSideFixedClipping` wrapper " +#~ "class and, on the client, " +#~ ":code:`fixedclipping_mod`:" #~ msgstr "" #~ msgid "" -#~ "The IPv4 or IPv6 address of the" -#~ " Driver API server. Defaults to " -#~ "`\"[::]:9091\"`." +#~ "In addition to the server-side " +#~ "strategy wrapper, the :code:`ClientApp` needs" +#~ " to configure the matching " +#~ ":code:`fixedclipping_mod` to perform the " +#~ "client-side clipping:" #~ msgstr "" -#~ msgid "Disconnect from the SuperLink if connected." +#~ msgid "Below is a code example that shows how to use :code:`LocalDpMod`:" #~ msgstr "" #~ msgid "" -#~ ":py:obj:`create_message `\\" -#~ " \\(content\\, message\\_type\\, ...\\)" +#~ "Note that since version :code:`1.11.0`, " +#~ ":code:`flower-server-app` no longer " +#~ "supports passing a reference to a " +#~ "`ServerApp` attribute. Instead, you need " +#~ "to pass the path to Flower app " +#~ "via the argument :code:`--app`. This is" +#~ " the path to a directory containing" +#~ " a `pyproject.toml`. You can create a" +#~ " valid Flower app by executing " +#~ ":code:`flwr new` and following the " +#~ "prompt." #~ msgstr "" #~ msgid "" -#~ "Time-to-live for the round trip" -#~ " of this message, i.e., the time " -#~ "from sending this message to receiving" -#~ " a reply. It specifies the duration" -#~ " for which the message and its " -#~ "potential reply are considered valid." +#~ "Since CoreML does not allow the " +#~ "model parameters to be seen before " +#~ "training, and accessing the model " +#~ "parameters during or after the training" +#~ " can only be done by specifying " +#~ "the layer name, we need to know" +#~ " this information beforehand, through " +#~ "looking at the model specification, " +#~ "which are written as proto files. " +#~ "The implementation can be seen in " +#~ ":code:`MLModelInspect`." #~ msgstr "" -#~ msgid "start\\_driver" -#~ msgstr "start_client" +#~ msgid "" +#~ "Prior to local training, we need " +#~ "to load the MNIST dataset, a " +#~ "popular image classification dataset of " +#~ "handwritten digits for machine learning, " +#~ "and partition the dataset for FL. " +#~ "This can be conveniently achieved using" +#~ " `Flower Datasets `_." +#~ " The :code:`FederatedDataset.load_partition()` method" +#~ " loads the partitioned training set " +#~ "for each partition ID defined in " +#~ "the :code:`--partition-id` argument." +#~ msgstr "" #~ msgid "" -#~ "The IPv4 or IPv6 address of the" -#~ " Driver API server. Defaults to " -#~ "`\"[::]:8080\"`." +#~ "In this tutorial we will learn how" +#~ " to train a federated XGBoost model" +#~ " on HIGGS dataset using Flower and" +#~ " :code:`xgboost` package. We use a " +#~ "simple example (`full code xgboost-" +#~ "quickstart `_) with two *clients* " +#~ "and one *server* to demonstrate how " +#~ "federated XGBoost works, and then we " +#~ "dive into a more complex example " +#~ "(`full code xgboost-comprehensive " +#~ "`_) to run various experiments." #~ msgstr "" #~ msgid "" -#~ "A server implementation, either " -#~ "`flwr.server.Server` or a subclass thereof." -#~ " If no instance is provided, then " -#~ "`start_driver` will create one." +#~ "In this example, we split the " +#~ "dataset into 30 partitions with uniform" +#~ " distribution (:code:`IidPartitioner(num_partitions=30)`)." +#~ " Then, we load the partition for " +#~ "the given client based on " +#~ ":code:`partition_id`:" #~ msgstr "" #~ msgid "" -#~ "An implementation of the class " -#~ "`flwr.server.ClientManager`. If no implementation" -#~ " is provided, then `start_driver` will " -#~ "use `flwr.server.SimpleClientManager`." +#~ "After that, we do train/test splitting" +#~ " on the given partition (client's " +#~ "local data), and transform data format" +#~ " for :code:`xgboost` package." #~ msgstr "" -#~ msgid "The Driver object to use." +#~ msgid "" +#~ "The functions of :code:`train_test_split` and" +#~ " :code:`transform_dataset_to_dmatrix` are defined " +#~ "as below:" #~ msgstr "" -#~ msgid "Starting a driver that connects to an insecure server:" +#~ msgid "" +#~ "The :code:`num_local_round` represents the " +#~ "number of iterations for local tree " +#~ "boost. We use CPU for the training" +#~ " in default. One can shift it " +#~ "to GPU by setting :code:`tree_method` to" +#~ " :code:`gpu_hist`. We use AUC as " +#~ "evaluation metric." #~ msgstr "" -#~ msgid "Starting a driver that connects to an SSL-enabled server:" +#~ msgid "" +#~ "After loading the dataset we define " +#~ "the Flower client. We follow the " +#~ "general rule to define :code:`XgbClient` " +#~ "class inherited from :code:`fl.client.Client`." #~ msgstr "" #~ msgid "" -#~ ":py:obj:`run_simulation_from_cli " -#~ "`\\ \\(\\)" +#~ "All required parameters defined above " +#~ "are passed to :code:`XgbClient`'s constructor." #~ msgstr "" -#~ msgid "Run Simulation Engine from the CLI." +#~ msgid "" +#~ "Then, we override :code:`get_parameters`, " +#~ ":code:`fit` and :code:`evaluate` methods " +#~ "insides :code:`XgbClient` class as follows." #~ msgstr "" -#~ msgid "run\\_simulation\\_from\\_cli" -#~ msgstr "Simulation de moniteur" +#~ msgid "" +#~ "Unlike neural network training, XGBoost " +#~ "trees are not started from a " +#~ "specified random weights. In this case," +#~ " we do not use :code:`get_parameters` " +#~ "and :code:`set_parameters` to initialise model" +#~ " parameters for XGBoost. As a result," +#~ " let's return an empty tensor in " +#~ ":code:`get_parameters` when it is called " +#~ "by the server at the first round." +#~ msgstr "" #~ msgid "" -#~ "Check out this Federated Learning " -#~ "quickstart tutorial for using Flower " -#~ "with MXNet to train a Sequential " -#~ "model on MNIST." +#~ "In :code:`fit`, at the first round, " +#~ "we call :code:`xgb.train()` to build up" +#~ " the first set of trees. From " +#~ "the second round, we load the " +#~ "global model sent from server to " +#~ "new build Booster object, and then " +#~ "update model weights on local training" +#~ " data with function :code:`local_boost` as" +#~ " follows:" #~ msgstr "" -#~ msgid "Quickstart MXNet" -#~ msgstr "Démarrage rapide de MXNet" +#~ msgid "" +#~ "Given :code:`num_local_round`, we update trees" +#~ " by calling :code:`bst_input.update` method. " +#~ "After training, the last " +#~ ":code:`N=num_local_round` trees will be " +#~ "extracted to send to the server." +#~ msgstr "" #~ msgid "" -#~ "MXNet is no longer maintained and " -#~ "has been moved into `Attic " -#~ "`_. As a " -#~ "result, we would encourage you to " -#~ "use other ML frameworks alongside " -#~ "Flower, for example, PyTorch. This " -#~ "tutorial might be removed in future " -#~ "versions of Flower." +#~ "In :code:`evaluate`, after loading the " +#~ "global model, we call :code:`bst.eval_set` " +#~ "function to conduct evaluation on valid" +#~ " set. The AUC value will be " +#~ "returned." #~ msgstr "" #~ msgid "" -#~ "In this tutorial, we will learn " -#~ "how to train a :code:`Sequential` model" -#~ " on MNIST using Flower and MXNet." +#~ "We use two clients for this " +#~ "example. An :code:`evaluate_metrics_aggregation` " +#~ "function is defined to collect and " +#~ "wighted average the AUC values from " +#~ "clients. The :code:`config_func` function is" +#~ " to return the current FL round " +#~ "number to client's :code:`fit()` and " +#~ ":code:`evaluate()` methods." #~ msgstr "" -#~ "Dans ce tutoriel, nous allons apprendre" -#~ " à former un modèle :code:`Sequential` " -#~ "sur MNIST à l'aide de Flower et" -#~ " de MXNet." -#~ msgid "Since we want to use MXNet, let's go ahead and install it:" -#~ msgstr "Puisque nous voulons utiliser MXNet, allons-y et installons-le :" +#~ msgid "" +#~ "In file :code:`flwr.server.strategy.fedxgb_bagging.py`," +#~ " we define :code:`FedXgbBagging` inherited " +#~ "from :code:`flwr.server.strategy.FedAvg`. Then, we" +#~ " override the :code:`aggregate_fit`, " +#~ ":code:`aggregate_evaluate` and :code:`evaluate` " +#~ "methods as follows:" +#~ msgstr "" #~ msgid "" -#~ "Now that we have all our " -#~ "dependencies installed, let's run a " -#~ "simple distributed training with two " -#~ "clients and one server. Our training " -#~ "procedure and network architecture are " -#~ "based on MXNet´s `Hand-written Digit " -#~ "Recognition tutorial " -#~ "`_." +#~ "In :code:`aggregate_fit`, we sequentially " +#~ "aggregate the clients' XGBoost trees by" +#~ " calling :code:`aggregate()` function:" +#~ msgstr "" + +#~ msgid "" +#~ "In this function, we first fetch " +#~ "the number of trees and the number" +#~ " of parallel trees for the current" +#~ " and previous model by calling " +#~ ":code:`_get_tree_nums`. Then, the fetched " +#~ "information will be aggregated. After " +#~ "that, the trees (containing model " +#~ "weights) are aggregated to generate a" +#~ " new tree model." +#~ msgstr "" + +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated XGBoost " +#~ "system. The AUC values can be " +#~ "checked in :code:`metrics_distributed`. One " +#~ "can see that the average AUC " +#~ "increases over FL rounds." #~ msgstr "" -#~ "Maintenant que toutes nos dépendances " -#~ "sont installées, lançons une formation " -#~ "distribuée simple avec deux clients et" -#~ " un serveur. Notre procédure de " -#~ "formation et l'architecture du réseau " -#~ "sont basées sur le tutoriel de " -#~ "reconnaissance de chiffres écrits à la" -#~ " main du MXNet " -#~ "`_." #~ msgid "" -#~ "In a file called :code:`client.py`, " -#~ "import Flower and MXNet related " -#~ "packages:" +#~ "To do this, we first customise a" +#~ " :code:`ClientManager` in :code:`server_utils.py`:" #~ msgstr "" -#~ "Dans un fichier appelé :code:`client.py`, " -#~ "importe Flower et les paquets liés " -#~ "au MXNet :" -#~ msgid "In addition, define the device allocation in MXNet with:" -#~ msgstr "En outre, définis l'attribution de l'appareil dans MXNet avec :" +#~ msgid "" +#~ "The customised :code:`ClientManager` samples " +#~ "all available clients in each FL " +#~ "round based on the order of " +#~ "connection to the server. Then, we " +#~ "define a new strategy :code:`FedXgbCyclic` " +#~ "in :code:`flwr.server.strategy.fedxgb_cyclic.py`, in " +#~ "order to sequentially select only one" +#~ " client in given round and pass " +#~ "the received model to next client." +#~ msgstr "" #~ msgid "" -#~ "We use MXNet to load MNIST, a " -#~ "popular image classification dataset of " -#~ "handwritten digits for machine learning. " -#~ "The MXNet utility :code:`mx.test_utils.get_mnist()`" -#~ " downloads the training and test " -#~ "data." +#~ "Unlike the original :code:`FedAvg`, we " +#~ "don't perform aggregation here. Instead, " +#~ "we just make a copy of the " +#~ "received client model as global model" +#~ " by overriding :code:`aggregate_fit`." #~ msgstr "" -#~ "Nous utilisons MXNet pour charger MNIST," -#~ " un ensemble de données de " -#~ "classification d'images populaire de chiffres" -#~ " manuscrits pour l'apprentissage automatique. " -#~ "L'utilitaire MXNet :code:`mx.test_utils.get_mnist()` " -#~ "télécharge les données d'entraînement et " -#~ "de test." #~ msgid "" -#~ "Define the training and loss with " -#~ "MXNet. We train the model by " -#~ "looping over the dataset, measure the" -#~ " corresponding loss, and optimize it." +#~ "Also, the customised :code:`configure_fit` and" +#~ " :code:`configure_evaluate` methods ensure the" +#~ " clients to be sequentially selected " +#~ "given FL round:" #~ msgstr "" -#~ "Définis l'entraînement et la perte avec" -#~ " MXNet. Nous entraînons le modèle en" -#~ " parcourant en boucle l'ensemble des " -#~ "données, nous mesurons la perte " -#~ "correspondante et nous l'optimisons." #~ msgid "" -#~ "Next, we define the validation of " -#~ "our machine learning model. We loop " -#~ "over the test set and measure both" -#~ " loss and accuracy on the test " -#~ "set." +#~ "In :code:`dataset.py`, we have a " +#~ "function :code:`instantiate_partitioner` to " +#~ "instantiate the data partitioner based " +#~ "on the given :code:`num_partitions` and " +#~ ":code:`partitioner_type`. Currently, we provide " +#~ "four supported partitioner type to " +#~ "simulate the uniformity/non-uniformity in " +#~ "data quantity (uniform, linear, square, " +#~ "exponential)." #~ msgstr "" -#~ "Ensuite, nous définissons la validation " -#~ "de notre modèle d'apprentissage automatique." -#~ " Nous effectuons une boucle sur " -#~ "l'ensemble de test et mesurons à " -#~ "la fois la perte et la précision" -#~ " sur l'ensemble de test." #~ msgid "" -#~ "After defining the training and testing" -#~ " of a MXNet machine learning model," -#~ " we use these functions to implement" -#~ " a Flower client." +#~ "To facilitate centralised evaluation, we " +#~ "define a function in :code:`server_utils.py`:" #~ msgstr "" -#~ "Après avoir défini la formation et " -#~ "le test d'un modèle d'apprentissage " -#~ "automatique MXNet, nous utilisons ces " -#~ "fonctions pour mettre en œuvre un " -#~ "client Flower." -#~ msgid "Our Flower clients will use a simple :code:`Sequential` model:" -#~ msgstr "Nos clients Flower utiliseront un modèle simple :code:`Sequential` :" +#~ msgid "" +#~ "This function returns a evaluation " +#~ "function which instantiates a :code:`Booster`" +#~ " object and loads the global model" +#~ " weights to it. The evaluation is " +#~ "conducted by calling :code:`eval_set()` " +#~ "method, and the tested AUC value " +#~ "is reported." +#~ msgstr "" #~ msgid "" -#~ "After loading the dataset with " -#~ ":code:`load_data()` we perform one forward " -#~ "propagation to initialize the model and" -#~ " model parameters with :code:`model(init)`. " -#~ "Next, we implement a Flower client." +#~ "As for distributed evaluation on the " +#~ "clients, it's same as the quick-" +#~ "start example by overriding the " +#~ ":code:`evaluate()` method insides the " +#~ ":code:`XgbClient` class in :code:`client_utils.py`." #~ msgstr "" -#~ "Après avoir chargé l'ensemble de données" -#~ " avec :code:`load_data()`, nous effectuons " -#~ "une propagation vers l'avant pour " -#~ "initialiser le modèle et les paramètres" -#~ " du modèle avec :code:`model(init)`. " -#~ "Ensuite, nous implémentons un client " -#~ "Flower." #~ msgid "" -#~ "Flower provides a convenience class " -#~ "called :code:`NumPyClient` which makes it " -#~ "easier to implement the :code:`Client` " -#~ "interface when your workload uses MXNet." -#~ " Implementing :code:`NumPyClient` usually means" -#~ " defining the following methods " -#~ "(:code:`set_parameters` is optional though):" +#~ "We also provide an example code " +#~ "(:code:`sim.py`) to use the simulation " +#~ "capabilities of Flower to simulate " +#~ "federated XGBoost training on either a" +#~ " single machine or a cluster of " +#~ "machines." #~ msgstr "" -#~ "Flower fournit une classe de commodité" -#~ " appelée :code:`NumPyClient` qui facilite " -#~ "l'implémentation de l'interface :code:`Client` " -#~ "lorsque ta charge de travail utilise " -#~ "MXNet. L'implémentation de :code:`NumPyClient` " -#~ "signifie généralement la définition des " -#~ "méthodes suivantes (:code:`set_parameters` est " -#~ "cependant facultatif) :" -#~ msgid "They can be implemented in the following way:" -#~ msgstr "Ils peuvent être mis en œuvre de la manière suivante :" +#~ msgid "" +#~ "After importing all required packages, " +#~ "we define a :code:`main()` function to" +#~ " perform the simulation process:" +#~ msgstr "" #~ msgid "" -#~ "We can now create an instance of" -#~ " our class :code:`MNISTClient` and add " -#~ "one line to actually run this " -#~ "client:" +#~ "We first load the dataset and " +#~ "perform data partitioning, and the " +#~ "pre-processed data is stored in a " +#~ ":code:`list`. After the simulation begins, " +#~ "the clients won't need to pre-" +#~ "process their partitions again." #~ msgstr "" -#~ "Nous pouvons maintenant créer une " -#~ "instance de notre classe :code:`MNISTClient`" -#~ " et ajouter une ligne pour exécuter" -#~ " ce client :" #~ msgid "" -#~ "That's it for the client. We only" -#~ " have to implement :code:`Client` or " -#~ ":code:`NumPyClient` and call " -#~ ":code:`fl.client.start_client()` or " -#~ ":code:`fl.client.start_numpy_client()`. The string " -#~ ":code:`\"0.0.0.0:8080\"` tells the client " -#~ "which server to connect to. In our" -#~ " case we can run the server and" -#~ " the client on the same machine, " -#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" -#~ " we run a truly federated workload" -#~ " with the server and clients running" -#~ " on different machines, all that " -#~ "needs to change is the " -#~ ":code:`server_address` we pass to the " -#~ "client." +#~ "After that, we start the simulation " +#~ "by calling :code:`fl.simulation.start_simulation`:" #~ msgstr "" -#~ "C'est tout pour le client. Il nous" -#~ " suffit d'implémenter :code:`Client` ou " -#~ ":code:`NumPyClient` et d'appeler " -#~ ":code:`fl.client.start_client()`. La chaîne " -#~ ":code:`\"0.0.0:8080\"` indique au client à " -#~ "quel serveur se connecter. Dans notre" -#~ " cas, nous pouvons exécuter le " -#~ "serveur et le client sur la même" -#~ " machine, c'est pourquoi nous utilisons " -#~ ":code:`\"0.0.0:8080\"`. Si nous exécutons une" -#~ " charge de travail véritablement fédérée" -#~ " avec le serveur et les clients " -#~ "s'exécutant sur des machines différentes, " -#~ "tout ce qui doit changer est " -#~ ":code:`server_address` que nous transmettons " -#~ "au client." #~ msgid "" -#~ "With both client and server ready, " -#~ "we can now run everything and see" -#~ " federated learning in action. Federated" -#~ " learning systems usually have a " -#~ "server and multiple clients. We " -#~ "therefore have to start the server " -#~ "first:" +#~ "One of key parameters for " +#~ ":code:`start_simulation` is :code:`client_fn` which" +#~ " returns a function to construct a" +#~ " client. We define it as follows:" #~ msgstr "" -#~ "Le client et le serveur étant " -#~ "prêts, nous pouvons maintenant tout " -#~ "exécuter et voir l'apprentissage fédéré " -#~ "en action. Les systèmes d'apprentissage " -#~ "fédéré ont généralement un serveur et" -#~ " plusieurs clients. Nous devons donc " -#~ "commencer par démarrer le serveur :" #~ msgid "" -#~ "Congratulations! You've successfully built and" -#~ " run your first federated learning " -#~ "system. The full `source code " -#~ "`_ for this example can " -#~ "be found in :code:`examples/quickstart-mxnet`." +#~ "In :code:`utils.py`, we define the " +#~ "arguments parsers for clients, server " +#~ "and simulation, allowing users to " +#~ "specify different experimental settings. Let's" +#~ " first see the sever side:" #~ msgstr "" -#~ "Félicitations ! Tu as réussi à " -#~ "construire et à faire fonctionner ton" -#~ " premier système d'apprentissage fédéré. Le" -#~ " code source complet " -#~ "`_ de cet exemple se " -#~ "trouve dans :code:`examples/quickstart-mxnet`." -#~ msgid ":code:`load_mnist()`" -#~ msgstr ":code:`load_mnist()`" +#~ msgid "" +#~ "This allows user to specify training " +#~ "strategies / the number of total " +#~ "clients / FL rounds / participating " +#~ "clients / clients for evaluation, and" +#~ " evaluation fashion. Note that with " +#~ ":code:`--centralised-eval`, the sever will " +#~ "do centralised evaluation and all " +#~ "functionalities for client evaluation will " +#~ "be disabled." +#~ msgstr "" -#~ msgid "Loads the MNIST dataset using OpenML" -#~ msgstr "Charge l'ensemble de données MNIST à l'aide d'OpenML" +#~ msgid "" +#~ "This defines various options for client" +#~ " data partitioning. Besides, clients also" +#~ " have an option to conduct evaluation" +#~ " on centralised test set by setting" +#~ " :code:`--centralised-eval`, as well as " +#~ "an option to perform scaled learning " +#~ "rate based on the number of " +#~ "clients by setting :code:`--scaled-lr`." +#~ msgstr "" -#~ msgid ":code:`shuffle()`" -#~ msgstr ":code:`shuffle()`" +#~ msgid "|b8714c45b74b4d8fb008e2ebb3bc1d44|" +#~ msgstr "" -#~ msgid "Shuffles data and its label" -#~ msgstr "Mélange les données et leur étiquette" +#~ msgid "|75f1561efcfd422ea67d28d1513120dc|" +#~ msgstr "" -#~ msgid ":code:`partition()`" -#~ msgstr ":code:`partition()`" +#~ msgid "|6a1f51b235304558a9bdaaabfc93b8d2|" +#~ msgstr "" -#~ msgid "Splits datasets into a number of partitions" -#~ msgstr "Divise les ensembles de données en un certain nombre de partitions" +#~ msgid "|35e70dab1fb544af9aa3a9c09c4f9797|" +#~ msgstr "" -#~ msgid "" -#~ "We load the MNIST dataset from " -#~ "`OpenML " -#~ "`_, a" -#~ " popular image classification dataset of" -#~ " handwritten digits for machine learning." -#~ " The utility :code:`utils.load_mnist()` downloads" -#~ " the training and test data. The " -#~ "training set is split afterwards into" -#~ " 10 partitions with :code:`utils.partition()`." +#~ msgid "|d7efb5705dd3467f991ed23746824a07|" #~ msgstr "" -#~ "Nous chargeons l'ensemble de données " -#~ "MNIST de `OpenML `_," -#~ " un ensemble de données de " -#~ "classification d'images populaires de chiffres" -#~ " manuscrits pour l'apprentissage automatique. " -#~ "L'utilitaire :code:`utils.load_mnist()` télécharge " -#~ "les données d'entraînement et de test." -#~ " L'ensemble d'entraînement est ensuite " -#~ "divisé en 10 partitions avec " -#~ ":code:`utils.partition()`." -#~ msgid "Let's get stated!" -#~ msgstr "Allons-y, déclarons-le !" +#~ msgid "|94e7b021c7b540bfbedf7f082a41ff87|" +#~ msgstr "" -#~ msgid "|2b5c62c529f6416f840c594cce062fbb|" +#~ msgid "|a80714782dde439ab73936518f91fc3c|" #~ msgstr "" -#~ msgid "|90b334680cb7467d9a04d39b8e8dca9f|" +#~ msgid "|c62080ca6197473da57d191c8225a9d9|" #~ msgstr "" -#~ msgid "|65764ceee89f4335bfd93fd0b115e831|" +#~ msgid "|21a8f1e6a5b14a7bbb8559979d0e8a2b|" #~ msgstr "" -#~ msgid "|d97319ec28bb407ea0ab9705e38f3bcf|" +#~ msgid "|c310f2a22f7b4917bf42775aae7a1c09|" #~ msgstr "" -#~ msgid "|11e95ac83a8548d8b3505b4663187d07|" +#~ msgid "|a0c5b43401194535a8460bcf02e65f9a|" #~ msgstr "" -#~ msgid "|1dab2f3a23674abc8a6731f20fa10730|" +#~ msgid "|aabfdbd5564e41a790f8ea93cc21a444|" #~ msgstr "" -#~ msgid "|7f0ee162da38450788493a21627306f7|" +#~ msgid "|c9cc8f160fa647b09e742fe4dc8edb54|" #~ msgstr "" -#~ msgid "|296a1fb72c514b23b3d8905ff0ff98c6|" +#~ msgid "|7e83aad011cd4907b2f02f907c6922e9|" #~ msgstr "" -#~ msgid "|5b1408eec0d746cdb91162a9107b6089|" +#~ msgid "|4627c2bb6cc443ae9e079f81f33c9dd9|" #~ msgstr "" -#~ msgid "|aef19f4b122c4e8d9f4c57f99bcd5dd2|" +#~ msgid "|131af8322dc5466b827afd24be98f8c0|" #~ msgstr "" -#~ msgid "|2881a86d8fc54ba29d96b29fc2819f4a|" +#~ msgid "|f92920b87f3a40179bf7ddd0b6144c53|" #~ msgstr "" -#~ msgid "|ec1fe880237247e0975f52766775ab84|" +#~ msgid "|d62da263071d45a496f543e41fce3a19|" #~ msgstr "" -#~ msgid "|9fdf048ed58d4467b2718cdf4aaf1ec3|" +#~ msgid "|ad851971645b4e1fbf8d15bcc0b2ee11|" #~ msgstr "" -#~ msgid "|ff726bc5505e432388ee2fdd6ef420b9|" +#~ msgid "|929e9a6de6b34edb8488e644e2bb5221|" +#~ msgstr "" + +#~ msgid "|404cf9c9e8d64784a55646c0f9479cbc|" +#~ msgstr "" + +#~ msgid "|b021ff9d25814458b1e631f8985a648b|" +#~ msgstr "" + +#~ msgid "|e6ca84e1df244f238288a768352678e5|" +#~ msgstr "" + +#~ msgid "|39c2422082554a21963baffb33a0d057|" +#~ msgstr "" + +#~ msgid "|07ecf5fcd6814e88906accec6fa0fbfb|" +#~ msgstr "" + +#~ msgid "|57e78c0ca8a94ba5a64a04b1f2280e55|" +#~ msgstr "" + +#~ msgid "|9819b40e59ee40a4921e1244e8c99bac|" +#~ msgstr "" + +#~ msgid "|797bf279c4894b5ead31dc9b0534ed62|" +#~ msgstr "" + +#~ msgid "|3a7aceef05f0421794726ac54aaf12fd|" +#~ msgstr "" + +#~ msgid "|d741075f8e624331b42c0746f7d258a0|" +#~ msgstr "" + +#~ msgid "|8fc92d668bcb42b8bda55143847f2329|" +#~ msgstr "" + +#~ msgid "|1c705d833a024f22adcaeb8ae3d13b0b|" +#~ msgstr "" + +#~ msgid "|77a037b546a84262b608e04bc82a2c96|" +#~ msgstr "" + +#~ msgid "|f568e24c9fb0435690ac628210a4be96|" +#~ msgstr "" + +#~ msgid "|a7bf029981514e2593aa3a2b48c9d76a|" +#~ msgstr "" + +#~ msgid "|3f645ad807f84be8b1f8f3267173939c|" +#~ msgstr "" + +#~ msgid "|a06a9dbd603f45819afd8e8cfc3c4b8f|" +#~ msgstr "" + +#~ msgid "|edcf9a04d96e42608fd01a333375febe|" +#~ msgstr "" + +#~ msgid "|3dae22fe797043968e2b7aa7073c78bd|" +#~ msgstr "" + +#~ msgid "|ba178f75267d4ad8aa7363f20709195f|" +#~ msgstr "" + +#~ msgid "|c380c750bfd2444abce039a1c6fa8e60|" +#~ msgstr "" + +#~ msgid "|e7cec00a114b48359935c6510595132e|" #~ msgstr "" diff --git a/doc/locales/ko/LC_MESSAGES/framework-docs.po b/doc/locales/ko/LC_MESSAGES/framework-docs.po index 17960b663150..424eaf5f86a2 100644 --- a/doc/locales/ko/LC_MESSAGES/framework-docs.po +++ b/doc/locales/ko/LC_MESSAGES/framework-docs.po @@ -7,9 +7,9 @@ msgid "" msgstr "" "Project-Id-Version: Flower main\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2024-05-28 11:47+0200\n" -"PO-Revision-Date: 2024-05-14 21:01+0000\n" -"Last-Translator: \"Young D. Kwon\" \n" +"POT-Creation-Date: 2024-10-10 00:29+0000\n" +"PO-Revision-Date: 2024-08-23 13:09+0000\n" +"Last-Translator: Seulki Yun \n" "Language: ko\n" "Language-Team: Korean \n" @@ -17,282 +17,482 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.15.0\n" +"Generated-By: Babel 2.16.0\n" -#: ../../source/contributor-explanation-architecture.rst:2 -msgid "Flower Architecture" -msgstr "Flower 아키텍처" - -#: ../../source/contributor-explanation-architecture.rst:5 -msgid "Edge Client Engine" -msgstr "엣지 클라이언트 엔진" +#: ../../source/contributor-explanation-public-and-private-apis.rst:2 +msgid "Public and private APIs" +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:7 +#: ../../source/contributor-explanation-public-and-private-apis.rst:4 msgid "" -"`Flower `_ core framework architecture with Edge " -"Client Engine" -msgstr "`Flower `_의 핵심 프레임워크 아키텍처와 엣지 클라이언트 엔진" - -#: ../../source/contributor-explanation-architecture.rst:13 -msgid "Virtual Client Engine" -msgstr "가상 클라이언트 엔진" +"In Python, everything is public. To enable developers to understand which" +" components can be relied upon, Flower declares a public API. Components " +"that are part of the public API can be relied upon. Changes to the public" +" API are announced in the release notes and are subject to deprecation " +"policies." +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:15 +#: ../../source/contributor-explanation-public-and-private-apis.rst:9 msgid "" -"`Flower `_ core framework architecture with Virtual " -"Client Engine" -msgstr "`Flower `_의 핵심 프레임워크 아키텍처와 가상 클라이언트 엔진" - -#: ../../source/contributor-explanation-architecture.rst:21 -msgid "Virtual Client Engine and Edge Client Engine in the same workload" -msgstr "동일 작업에서 가상 클라이언트 엔진과 엣지 클라이언트 엔진" +"Everything that is not part of the public API is part of the private API." +" Even though Python allows accessing them, user code should never use " +"those components. Private APIs can change at any time, even in patch " +"releases." +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:23 +#: ../../source/contributor-explanation-public-and-private-apis.rst:13 msgid "" -"`Flower `_ core framework architecture with both " -"Virtual Client Engine and Edge Client Engine" -msgstr "`Flower `_의 핵심 프레임워크 아키텍처와 가상 및 엣지 클라이언트 엔진" +"How can you determine whether a component is part of the public API or " +"not? Easy:" +msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:2 -msgid "How to build Docker Flower images locally" -msgstr "Docker Flower 이미지를 Locally 구축하는 방법" +#: ../../source/contributor-explanation-public-and-private-apis.rst:15 +msgid "`Use the Flower API reference documentation `_" +msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:4 -msgid "" -"Flower provides pre-made docker images on `Docker Hub " -"`_ that include all necessary dependencies" -" for running the SuperLink. You can also build your own custom docker " -"images from scratch with a different version of Python or Ubuntu if that " -"is what you need. In this guide, we will explain what images exist and " -"how to build them locally." +#: ../../source/contributor-explanation-public-and-private-apis.rst:16 +msgid "`Use the Flower CLI reference documentation `_" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:9 +#: ../../source/contributor-explanation-public-and-private-apis.rst:18 msgid "" -"Before we can start, we need to meet a few prerequisites in our local " -"development environment." +"Everything listed in the reference documentation is part of the public " +"API. This document explains how Flower maintainers define the public API " +"and how you can determine whether a component is part of the public API " +"or not by reading the Flower source code." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:11 -msgid "Clone the flower repository." -msgstr "" +#: ../../source/contributor-explanation-public-and-private-apis.rst:23 +#, fuzzy +msgid "Flower public API" +msgstr "Flower ClientApp." -#: ../../source/contributor-how-to-build-docker-images.rst:17 -#: ../../source/how-to-run-flower-using-docker.rst:144 -msgid "Verify the Docker daemon is running." +#: ../../source/contributor-explanation-public-and-private-apis.rst:25 +msgid "Flower has a well-defined public API. Let's look at this in more detail." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:19 -#: ../../source/how-to-run-flower-using-docker.rst:146 +#: ../../source/contributor-explanation-public-and-private-apis.rst:29 msgid "" -"Please follow the first section on :doc:`Run Flower using Docker ` which covers this step in more detail." +"Every component that is reachable by recursively following " +"``__init__.__all__`` starting from the root package (``flwr``) is part of" +" the public API." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:23 +#: ../../source/contributor-explanation-public-and-private-apis.rst:32 msgid "" -"Currently, Flower provides two images, a ``base`` image and a " -"``superlink`` image. The base image, as the name suggests, contains basic" -" dependencies that the SuperLink needs. This includes system " -"dependencies, Python and Python tools. The SuperLink image is based on " -"the base image, but it additionally installs the SuperLink using ``pip``." +"If you want to determine whether a component " +"(class/function/generator/...) is part of the public API or not, you need" +" to start at the root of the ``flwr`` package. Let's use ``tree -L 1 -d " +"src/py/flwr`` to look at the Python sub-packages contained ``flwr``:" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:28 +#: ../../source/contributor-explanation-public-and-private-apis.rst:46 msgid "" -"The build instructions that assemble the images are located in the " -"respective Dockerfiles. You can find them in the subdirectories of " -"``src/docker``." +"Contrast this with the definition of ``__all__`` in the root " +"``src/py/flwr/__init__.py``:" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:31 +#: ../../source/contributor-explanation-public-and-private-apis.rst:59 msgid "" -"Both, base and SuperLink image are configured via build arguments. " -"Through build arguments, we can make our build more flexible. For " -"example, in the base image, we can specify the version of Python to " -"install using the ``PYTHON_VERSION`` build argument. Some of the build " -"arguments have default values, others must be specified when building the" -" image. All available build arguments for each image are listed in one of" -" the tables below." +"You can see that ``flwr`` has six subpackages (``cli``, ``client``, " +"``common``, ``proto``, ``server``, ``simulation``), but only four of them" +" are \"exported\" via ``__all__`` (``client``, ``common``, ``server``, " +"``simulation``)." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:38 -msgid "Building the base image" +#: ../../source/contributor-explanation-public-and-private-apis.rst:63 +msgid "" +"What does this mean? It means that ``client``, ``common``, ``server`` and" +" ``simulation`` are part of the public API, but ``cli`` and ``proto`` are" +" not. The ``flwr`` subpackages ``cli`` and ``proto`` are private APIs. A " +"private API can change completely from one release to the next (even in " +"patch releases). It can change in a breaking way, it can be renamed (for " +"example, ``flwr.cli`` could be renamed to ``flwr.command``) and it can " +"even be removed completely." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:44 -#: ../../source/contributor-how-to-build-docker-images.rst:86 -msgid "Build argument" +#: ../../source/contributor-explanation-public-and-private-apis.rst:70 +msgid "Therefore, as a Flower user:" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:45 -#: ../../source/contributor-how-to-build-docker-images.rst:87 -msgid "Description" +#: ../../source/contributor-explanation-public-and-private-apis.rst:72 +msgid "``from flwr import client`` ✅ Ok, you're importing a public API." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:46 -#: ../../source/contributor-how-to-build-docker-images.rst:88 -msgid "Required" +#: ../../source/contributor-explanation-public-and-private-apis.rst:73 +msgid "" +"``from flwr import proto`` ❌ Not recommended, you're importing a private " +"API." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:47 -#: ../../source/contributor-how-to-build-docker-images.rst:89 -msgid "Example" +#: ../../source/contributor-explanation-public-and-private-apis.rst:75 +msgid "" +"What about components that are nested deeper in the hierarchy? Let's look" +" at Flower strategies to see another typical pattern. Flower strategies " +"like ``FedAvg`` are often imported using ``from flwr.server.strategy " +"import FedAvg``. Let's look at " +"``src/py/flwr/server/strategy/__init__.py``:" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:48 -#: ../../source/contributor-how-to-build-docker-images.rst:94 -msgid "``PYTHON_VERSION``" +#: ../../source/contributor-explanation-public-and-private-apis.rst:91 +msgid "" +"What's notable here is that all strategies are implemented in dedicated " +"modules (e.g., ``fedavg.py``). In ``__init__.py``, we *import* the " +"components we want to make part of the public API and then *export* them " +"via ``__all__``. Note that we export the component itself (for example, " +"the ``FedAvg`` class), but not the module it is defined in (for example, " +"``fedavg.py``). This allows us to move the definition of ``FedAvg`` into " +"a different module (or even a module in a subpackage) without breaking " +"the public API (as long as we update the import path in ``__init__.py``)." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:49 -msgid "Version of ``python`` to be installed." +#: ../../source/contributor-explanation-public-and-private-apis.rst:99 +msgid "Therefore:" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:50 -#: ../../source/contributor-how-to-build-docker-images.rst:54 -#: ../../source/contributor-how-to-build-docker-images.rst:58 -#: ../../source/contributor-how-to-build-docker-images.rst:108 -msgid "Yes" +#: ../../source/contributor-explanation-public-and-private-apis.rst:101 +msgid "" +"``from flwr.server.strategy import FedAvg`` ✅ Ok, you're importing a " +"class that is part of the public API." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:51 -msgid "``3.11``" +#: ../../source/contributor-explanation-public-and-private-apis.rst:103 +msgid "" +"``from flwr.server.strategy import fedavg`` ❌ Not recommended, you're " +"importing a private module." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:52 -msgid "``PIP_VERSION``" +#: ../../source/contributor-explanation-public-and-private-apis.rst:106 +msgid "" +"This approach is also implemented in the tooling that automatically " +"builds API reference docs." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:53 -msgid "Version of ``pip`` to be installed." +#: ../../source/contributor-explanation-public-and-private-apis.rst:110 +msgid "Flower public API of private packages" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:55 -msgid "``23.0.1``" +#: ../../source/contributor-explanation-public-and-private-apis.rst:112 +msgid "" +"We also use this to define the public API of private subpackages. Public," +" in this context, means the API that other ``flwr`` subpackages should " +"use. For example, ``flwr.server.driver`` is a private subpackage (it's " +"not exported via ``src/py/flwr/server/__init__.py``'s ``__all__``)." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:56 -msgid "``SETUPTOOLS_VERSION``" +#: ../../source/contributor-explanation-public-and-private-apis.rst:117 +msgid "" +"Still, the private sub-package ``flwr.server.driver`` defines a " +"\"public\" API using ``__all__`` in " +"``src/py/flwr/server/driver/__init__.py``:" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:57 -msgid "Version of ``setuptools`` to be installed." +#: ../../source/contributor-explanation-public-and-private-apis.rst:132 +msgid "" +"The interesting part is that both ``GrpcDriver`` and ``InMemoryDriver`` " +"are never used by Flower framework users, only by other parts of the " +"Flower framework codebase. Those other parts of the codebase import, for " +"example, ``InMemoryDriver`` using ``from flwr.server.driver import " +"InMemoryDriver`` (i.e., the ``InMemoryDriver`` exported via ``__all__``)," +" not ``from flwr.server.driver.in_memory_driver import InMemoryDriver`` " +"(``in_memory_driver.py`` is the module containing the actual " +"``InMemoryDriver`` class definition)." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:59 -msgid "``69.0.2``" +#: ../../source/contributor-explanation-public-and-private-apis.rst:140 +msgid "" +"This is because ``flwr.server.driver`` defines a public interface for " +"other ``flwr`` subpackages. This allows codeowners of " +"``flwr.server.driver`` to refactor the package without breaking other " +"``flwr``-internal users." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:60 -#: ../../source/contributor-how-to-build-docker-images.rst:98 -msgid "``UBUNTU_VERSION``" -msgstr "" +#: ../../source/contributor-how-to-build-docker-images.rst:2 +#, fuzzy +msgid "How to Build Docker Flower Images Locally" +msgstr "Docker Flower 이미지를 Locally 구축하는 방법" -#: ../../source/contributor-how-to-build-docker-images.rst:61 -msgid "Version of the official Ubuntu Docker image." +#: ../../source/contributor-how-to-build-docker-images.rst:4 +msgid "" +"Flower provides pre-made docker images on `Docker Hub " +"`_ that include all necessary dependencies" +" for running the SuperLink, SuperNode or ServerApp. You can also build " +"your own custom docker images from scratch with a different version of " +"Python or Linux distribution (Ubuntu/Alpine) if that is what you need. In" +" this guide, we will explain what images exist and how to build them " +"locally." msgstr "" +"Flower는 'Docker Hub '_에서 미리 만들어진 Docker " +"이미지들을 제공합니다. 해당 이미지들은 SuperLink, ServerNode 또는 ServerApp을 실행하는 데 필요한 모든 " +"dependencies를 포함합니다. 필요한 경우 다른 버전의 Python이나 Linux 배포판(Ubuntu/Alpine)을 사용해" +" 처음부터 사용자 정의 Docker 이미지를 빌드할 수도 있습니다. 이 가이드에서는 존재하는 이미지들과 이들을 로컬에서 빌드하는 " +"방법에 대해 설명하겠습니다." -#: ../../source/contributor-how-to-build-docker-images.rst:62 -msgid "Defaults to ``22.04``." -msgstr "" +#: ../../source/contributor-how-to-build-docker-images.rst:10 +msgid "" +"Before we can start, we need to meet a few prerequisites in our local " +"development environment." +msgstr "시작하기 전에, 로컬 개발 환경에서 몇 가지 전제 조건을 충족해야 합니다." -#: ../../source/contributor-how-to-build-docker-images.rst:65 +#: ../../source/contributor-how-to-build-docker-images.rst:13 +#, fuzzy +msgid "Clone the ``flower`` repository." +msgstr "플라워 레포지토리를 클론합니다." + +#: ../../source/contributor-how-to-build-docker-images.rst:19 +msgid "Verify the Docker daemon is running." +msgstr "Docker 데몬이 실행 중인지 확인하십시오." + +#: ../../source/contributor-how-to-build-docker-images.rst:21 msgid "" -"The following example creates a base image with Python 3.11.0, pip 23.0.1" -" and setuptools 69.0.2:" +"The build instructions that assemble the images are located in the " +"respective Dockerfiles. You can find them in the subdirectories of " +"``src/docker``." msgstr "" +"이미지들을 조합하는 빌드 명령어들은 해당 Dockerfile에 있습니다. \"src/docker\" 의 하위 디렉토리에서 찾을 수 " +"있습니다." -#: ../../source/contributor-how-to-build-docker-images.rst:76 +#: ../../source/contributor-how-to-build-docker-images.rst:24 msgid "" -"The name of image is ``flwr_base`` and the tag ``0.1.0``. Remember that " -"the build arguments as well as the name and tag can be adapted to your " -"needs. These values serve as examples only." +"Flower Docker images are configured via build arguments. Through build " +"arguments, we can make the creation of images more flexible. For example," +" in the base image, we can specify the version of Python to install using" +" the ``PYTHON_VERSION`` build argument. Some of the build arguments have " +"default values, others must be specified when building the image. All " +"available build arguments for each image are listed in one of the tables " +"below." msgstr "" +"Flower Docker는 빌드 전달인자를 통해 구성됩니다. 빌드 argument들을 통해, 이미지를 보다 유연하게 생성할 수 " +"있습니다. 예를 들어, base 이미지에서 \"PYTHON_VERSION\" 빌드 전달인자를 사용하여 Python 버전을 지정할 수" +" 있습니다. 일부 빌드 전달인자들은 기본값이며, 이미지를 빌드할 때 지정해야 합니다. 각 이미지에 사용할 수 있는 모든 빌드 " +"전달인자는 아래 표 중에 있습니다." -#: ../../source/contributor-how-to-build-docker-images.rst:80 -msgid "Building the SuperLink image" -msgstr "" +#: ../../source/contributor-how-to-build-docker-images.rst:32 +#, fuzzy +msgid "Building the Base Image" +msgstr "기본 이미지 빌드" -#: ../../source/contributor-how-to-build-docker-images.rst:90 -msgid "``BASE_REPOSITORY``" -msgstr "" +#: ../../source/contributor-how-to-build-docker-images.rst:38 +#: ../../source/contributor-how-to-build-docker-images.rst:104 +msgid "Build argument" +msgstr "빌드 전달인자" -#: ../../source/contributor-how-to-build-docker-images.rst:91 -msgid "The repository name of the base image." -msgstr "" +#: ../../source/contributor-how-to-build-docker-images.rst:39 +#: ../../source/contributor-how-to-build-docker-images.rst:105 +msgid "Description" +msgstr "설명" -#: ../../source/contributor-how-to-build-docker-images.rst:92 -msgid "Defaults to ``flwr/base``." -msgstr "" +#: ../../source/contributor-how-to-build-docker-images.rst:40 +#: ../../source/contributor-how-to-build-docker-images.rst:106 +msgid "Required" +msgstr "필수" -#: ../../source/contributor-how-to-build-docker-images.rst:95 -msgid "The Python version of the base image." -msgstr "" +#: ../../source/contributor-how-to-build-docker-images.rst:41 +#: ../../source/contributor-how-to-build-docker-images.rst:107 +#: ../../source/docker/persist-superlink-state.rst:19 +#: ../../source/docker/pin-version.rst:12 +#: ../../source/docker/set-environment-variables.rst:8 +msgid "Example" +msgstr "예시" + +#: ../../source/contributor-how-to-build-docker-images.rst:42 +msgid "``DISTRO``" +msgstr "``DISTRO``" + +#: ../../source/contributor-how-to-build-docker-images.rst:43 +msgid "The Linux distribution to use as the base image." +msgstr "기본 이미지 사용을 위한 Linux 배포판." + +#: ../../source/contributor-how-to-build-docker-images.rst:44 +#: ../../source/contributor-how-to-build-docker-images.rst:48 +#: ../../source/contributor-how-to-build-docker-images.rst:52 +#: ../../source/contributor-how-to-build-docker-images.rst:68 +#: ../../source/contributor-how-to-build-docker-images.rst:75 +#: ../../source/contributor-how-to-build-docker-images.rst:110 +msgid "No" +msgstr "아니오" + +#: ../../source/contributor-how-to-build-docker-images.rst:45 +msgid "``ubuntu``" +msgstr "``ubuntu``" + +#: ../../source/contributor-how-to-build-docker-images.rst:46 +msgid "``DISTRO_VERSION``" +msgstr "``DISTRO_VERSION``" + +#: ../../source/contributor-how-to-build-docker-images.rst:47 +msgid "Version of the Linux distribution." +msgstr "Linux 배포판 버전." -#: ../../source/contributor-how-to-build-docker-images.rst:96 -msgid "Defaults to ``py3.11``." +#: ../../source/contributor-how-to-build-docker-images.rst:49 +msgid ":substitution-code:`|ubuntu_version|`" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:99 -msgid "The Ubuntu version of the base image." +#: ../../source/contributor-how-to-build-docker-images.rst:50 +msgid "``PYTHON_VERSION``" +msgstr "``PYTHON_VERSION``" + +#: ../../source/contributor-how-to-build-docker-images.rst:51 +msgid "Version of ``python`` to be installed." +msgstr "설치 된 ``python`` 버전." + +#: ../../source/contributor-how-to-build-docker-images.rst:53 +msgid "``3.11`` or ``3.11.1``" +msgstr "``3.11`` 또는 ``3.11.1``" + +#: ../../source/contributor-how-to-build-docker-images.rst:54 +msgid "``PIP_VERSION``" +msgstr "``PIP_VERSION``" + +#: ../../source/contributor-how-to-build-docker-images.rst:55 +msgid "Version of ``pip`` to be installed." +msgstr "설치 된 ``pip`` 버전." + +#: ../../source/contributor-how-to-build-docker-images.rst:56 +#: ../../source/contributor-how-to-build-docker-images.rst:60 +#: ../../source/contributor-how-to-build-docker-images.rst:64 +#: ../../source/contributor-how-to-build-docker-images.rst:114 +msgid "Yes" +msgstr "예" + +#: ../../source/contributor-how-to-build-docker-images.rst:57 +msgid ":substitution-code:`|pip_version|`" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:100 -msgid "Defaults to ``ubuntu22.04``." +#: ../../source/contributor-how-to-build-docker-images.rst:58 +msgid "``SETUPTOOLS_VERSION``" +msgstr "``SETUPTOOLS_VERSION``" + +#: ../../source/contributor-how-to-build-docker-images.rst:59 +msgid "Version of ``setuptools`` to be installed." +msgstr "설치 된 ``setuptools`` 버전." + +#: ../../source/contributor-how-to-build-docker-images.rst:61 +#, fuzzy +msgid ":substitution-code:`|setuptools_version|`" +msgstr "``SETUPTOOLS_VERSION``" + +#: ../../source/contributor-how-to-build-docker-images.rst:62 +msgid "``FLWR_VERSION``" +msgstr "``FLWR_VERSION``" + +#: ../../source/contributor-how-to-build-docker-images.rst:63 +msgid "Version of Flower to be installed." +msgstr "설치 된 Flower 버전." + +#: ../../source/contributor-how-to-build-docker-images.rst:65 +msgid ":substitution-code:`|stable_flwr_version|`" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:102 +#: ../../source/contributor-how-to-build-docker-images.rst:66 msgid "``FLWR_PACKAGE``" -msgstr "" +msgstr "``FLWR_PACKAGE``" -#: ../../source/contributor-how-to-build-docker-images.rst:103 -msgid "The PyPI package to install." -msgstr "" +#: ../../source/contributor-how-to-build-docker-images.rst:67 +msgid "The Flower package to be installed." +msgstr "설치 할 Flower 패키지." -#: ../../source/contributor-how-to-build-docker-images.rst:104 -msgid "Defaults to ``flwr``." +#: ../../source/contributor-how-to-build-docker-images.rst:69 +msgid "``flwr`` or ``flwr-nightly``" +msgstr "``flwr`` 또는 ``flwr-nightly``" + +#: ../../source/contributor-how-to-build-docker-images.rst:70 +#, fuzzy +msgid "``FLWR_VERSION_REF``" +msgstr "``FLWR_VERSION``" + +#: ../../source/contributor-how-to-build-docker-images.rst:71 +msgid "" +"A `direct reference " +"`_ without the ``@`` specifier. If both " +"``FLWR_VERSION`` and ``FLWR_VERSION_REF`` are specified, the " +"``FLWR_VERSION_REF`` has precedence." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:106 -msgid "``FLWR_VERSION``" +#: ../../source/contributor-how-to-build-docker-images.rst:76 +#, fuzzy +msgid "`Direct Reference Examples`_" +msgstr "예시 요청" + +#: ../../source/contributor-how-to-build-docker-images.rst:78 +#, fuzzy +msgid "" +"The following example creates a base Ubuntu/Alpine image with Python " +"``3.11.0``, pip :substitution-code:`|pip_version|`, setuptools " +":substitution-code:`|setuptools_version|` and Flower :substitution-" +"code:`|stable_flwr_version|`:" msgstr "" +"다음 예시에서는 Python 3.11.0, pip 23.0.1, setuptools 및 Flower 1.8.0으로 기본 " +"Ubuntu/Alpine 이미지를 만듭니다:" -#: ../../source/contributor-how-to-build-docker-images.rst:107 -msgid "Version of Flower to be installed." +#: ../../source/contributor-how-to-build-docker-images.rst:93 +#, fuzzy +msgid "" +"In this example, we specify our image name as ``flwr_base`` and the tag " +"as ``0.1.0``. Remember that the build arguments as well as the name and " +"tag can be adapted to your needs. These values serve as examples only." msgstr "" +"이미지의 이름은 ``flwr_base``이고 태그는 ``0.1.0``입니다. 필요에 따라 빌드 전달인자들 뿐만 아니라 이름과 태그도" +" 정할 수 있습니다. 이 값들은 예시일 뿐입니다." + +#: ../../source/contributor-how-to-build-docker-images.rst:98 +#, fuzzy +msgid "Building a Flower Binary Image" +msgstr "기본 이미지 빌드" + +#: ../../source/contributor-how-to-build-docker-images.rst:108 +msgid "``BASE_REPOSITORY``" +msgstr "``BASE_REPOSITORY``" #: ../../source/contributor-how-to-build-docker-images.rst:109 -msgid "``1.8.0``" -msgstr "" +msgid "The repository name of the base image." +msgstr "기본 이미지의 레포지토리 이름." + +#: ../../source/contributor-how-to-build-docker-images.rst:111 +msgid "``flwr/base``" +msgstr "``flwr/base``" #: ../../source/contributor-how-to-build-docker-images.rst:112 -msgid "" -"The following example creates a SuperLink image with the official Flower " -"base image py3.11-ubuntu22.04 and Flower 1.8.0:" +msgid "``BASE_IMAGE``" +msgstr "``BASE_IMAGE``" + +#: ../../source/contributor-how-to-build-docker-images.rst:113 +msgid "The Tag of the Flower base image." +msgstr "Flower 기본 이미지의 태그." + +#: ../../source/contributor-how-to-build-docker-images.rst:115 +msgid ":substitution-code:`|stable_flwr_version|-py3.11-ubuntu|ubuntu_version|`" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:122 +#: ../../source/contributor-how-to-build-docker-images.rst:117 msgid "" -"The name of image is ``flwr_superlink`` and the tag ``0.1.0``. Remember " -"that the build arguments as well as the name and tag can be adapted to " -"your needs. These values serve as examples only." +"For example, to build a SuperLink image with the latest Flower version, " +"Python 3.11 and Ubuntu 22.04, run the following:" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:125 +#: ../../source/contributor-how-to-build-docker-images.rst:128 +#, fuzzy msgid "" "If you want to use your own base image instead of the official Flower " -"base image, all you need to do is set the ``BASE_REPOSITORY``, " -"``PYTHON_VERSION`` and ``UBUNTU_VERSION`` build arguments." +"base image, all you need to do is set the ``BASE_REPOSITORY`` build " +"argument to ``flwr_base`` (as we've specified above)." msgstr "" +"공식 Flower 기본 이미지 대신 자체 기본 이미지를 사용 하길 원한다면, ``BASE_REPOSITORY`` 빌드 전달인자들을 " +"설정해야 합니다." -#: ../../source/contributor-how-to-build-docker-images.rst:138 +#: ../../source/contributor-how-to-build-docker-images.rst:140 msgid "After creating the image, we can test whether the image is working:" -msgstr "" +msgstr "이미지 생성 후에, 이미지가 작동하는지 테스트할 수 있습니다:" + +#: ../../source/contributor-how-to-build-docker-images.rst:147 +#, fuzzy +msgid "Direct Reference Examples" +msgstr "예시 요청" #: ../../source/contributor-how-to-contribute-translations.rst:2 msgid "Contribute translations" -msgstr "" +msgstr "번역 기여" #: ../../source/contributor-how-to-contribute-translations.rst:4 msgid "" @@ -305,6 +505,11 @@ msgid "" "also be a great opportunity for those wanting to become open source " "contributors with little prerequisites." msgstr "" +"`Flower 1.5 `_ 부터 문서 페이지에 번역을 도입했지만, 아시다시피 번역이 불안전한 " +"경우가 많습니다. 만일 영어 이외의 언어를 사용한다면, 많은 사람들이 연합 학습에 접근할 수 있도록 번역 작업에 기여함으로써 저희의" +" 노력에 도움을 주실 수 있습니다! 이는 전제 조건이 거의 없는 오픈 소스 기여자가 되고자 하는 사람들에게 좋은 기회가 될 수도 " +"있습니다." #: ../../source/contributor-how-to-contribute-translations.rst:13 msgid "" @@ -312,10 +517,12 @@ msgid "" "`_, this " "where most of the work will happen." msgstr "" +"번역 프로젝트는 `Weblate `_에서 공개적으로 진행되며, 대부분의 작업이 이곳에서 이루어집니다." #: ../../source/contributor-how-to-contribute-translations.rst:18 msgid "Contribute to existing languages" -msgstr "" +msgstr "기존 언어에 기여하기" #: ../../source/contributor-how-to-contribute-translations.rst:23 msgid "" @@ -325,34 +532,43 @@ msgid "" " profile settings can be found `here " "`_." msgstr "" +"기여를 하기 위해 가장 먼저 해야 할 일은 해당 `page " +"`_에서 무료 Weblate 계정을 만드는 " +"것입니다. 프로필 설정에 대한 자세한 정보는 `here " +"`_를 참조하세요." -#: ../../source/contributor-how-to-contribute-translations.rst:29 +#: ../../source/contributor-how-to-contribute-translations.rst:28 msgid "" "Once you are signed in to Weblate, you can navigate to the `Flower " "Framework project `_. Here, you should see the different existing languages" " that can be found on the website." msgstr "" +"Weblate에 로그인한 후, `Flower Framework project " +"`_로 이동할 수 " +"있습니다. 여기에서 웹사이트에 있는 다양한 기존 언어들을 확인할 수 있습니다." -#: ../../source/contributor-how-to-contribute-translations.rst:34 +#: ../../source/contributor-how-to-contribute-translations.rst:32 msgid "" "Once you have selected the language you want to contribute to, you should" " see a similar interface to this:" -msgstr "" +msgstr "기여하고자 하는 언어를 선택하면, 다음과 같은 인터페이스가 나타납니다:" -#: ../../source/contributor-how-to-contribute-translations.rst:39 +#: ../../source/contributor-how-to-contribute-translations.rst:37 msgid "" "The most straight forward option here is to click on the ``Translate`` " "button on the top right (in the ``Translation status`` section). This " "will automatically bring you to the translation interface for " "untranslated strings." msgstr "" +"여기서 가장 간단한 옵션은 오른쪽 상단(``Translation status`` 부분)에 있는 ``Translate`` 버튼을 " +"클릭하는 것 입니다. 번역되지 않은 문장에 대한 번역 인터페이스로 자동으로 이동합니다." -#: ../../source/contributor-how-to-contribute-translations.rst:43 +#: ../../source/contributor-how-to-contribute-translations.rst:41 msgid "This is what the interface looks like:" -msgstr "" +msgstr "인터페이스는 다음과 같습니다:" -#: ../../source/contributor-how-to-contribute-translations.rst:47 +#: ../../source/contributor-how-to-contribute-translations.rst:45 msgid "" "You input your translation in the text box at the top and then, once you " "are happy with it, you either press ``Save and continue`` (to save the " @@ -361,8 +577,12 @@ msgid "" "your translation to suggestions for other users to view), or ``Skip`` (to" " go to the next untranslated string without saving anything)." msgstr "" +"번역문을 상단의 텍스트 상자에 입력한 후, 번역이 만족스러우면 ``Save and continue``(번역을 저장하고 다음 미번역 " +"문장으로 이동), ``Save and stay``(번역을 저장하고 해당 페이지에 머무르기), ``Suggest`` (다른 사용자가 " +"볼 수 있도록 번역을 제안 항목에 추가), ``Skip``(아무것도 저장하지 않고 다음 미번역 문장으로 이동) 중 하나를 선택하면 " +"됩니다." -#: ../../source/contributor-how-to-contribute-translations.rst:54 +#: ../../source/contributor-how-to-contribute-translations.rst:51 msgid "" "In order to help with the translations, you can see on the bottom the " "``Nearby strings``, the ``Comments`` (from other contributors), the " @@ -370,148 +590,41 @@ msgid "" "translations in ``Other languages``, and the ``History`` of translations " "for this string." msgstr "" +"번역에 도움을 주기위해 하단에서 `주변 문자열``, ``의견``(다른 기여자의), ``자동 제안``(기계 번역의), ``다른 " +"언어``의 번역 및 해당 문장의 번역``히스토리``를 볼 수 있습니다." -#: ../../source/contributor-how-to-contribute-translations.rst:59 +#: ../../source/contributor-how-to-contribute-translations.rst:56 msgid "" "On the right, under the ``String information`` section, you can also " "click the link under ``Source string location`` in order to view the " "source of the doc file containing the string." -msgstr "" +msgstr "오른쪽의 ``문자열 정보``에서 ``원본 문자열 위치``를 클릭하여 해당 문장이 포함된 문서의 파일 소스를 볼 수도 있습니다." -#: ../../source/contributor-how-to-contribute-translations.rst:63 +#: ../../source/contributor-how-to-contribute-translations.rst:60 msgid "" "For more information about translating using Weblate, you can check out " "this `in-depth guide " "`_." msgstr "" +"Weblate를 통한 번역에 대한 자세한 정보는 `in-depth guide " +"`_를 확인하세요." -#: ../../source/contributor-how-to-contribute-translations.rst:67 +#: ../../source/contributor-how-to-contribute-translations.rst:64 msgid "Add new languages" -msgstr "" +msgstr "새 언어 추가" -#: ../../source/contributor-how-to-contribute-translations.rst:69 +#: ../../source/contributor-how-to-contribute-translations.rst:66 msgid "" "If you want to add a new language, you will first have to contact us, " "either on `Slack `_, or by opening an issue" " on our `GitHub repo `_." msgstr "" - -#: ../../source/contributor-how-to-create-new-messages.rst:2 -msgid "Creating New Messages" -msgstr "" - -#: ../../source/contributor-how-to-create-new-messages.rst:4 -msgid "" -"This is a simple guide for creating a new type of message between the " -"server and clients in Flower." -msgstr "" - -#: ../../source/contributor-how-to-create-new-messages.rst:6 -msgid "" -"Let's suppose we have the following example functions in " -":code:`server.py` and :code:`numpy_client.py`..." -msgstr "" - -#: ../../source/contributor-how-to-create-new-messages.rst:8 -msgid "Server's side:" -msgstr "" - -#: ../../source/contributor-how-to-create-new-messages.rst:17 -msgid "Client's side:" -msgstr "" - -#: ../../source/contributor-how-to-create-new-messages.rst:26 -msgid "" -"Let's now see what we need to implement in order to get this simple " -"function between the server and client to work!" -msgstr "" - -#: ../../source/contributor-how-to-create-new-messages.rst:30 -msgid "Message Types for Protocol Buffers" -msgstr "" - -#: ../../source/contributor-how-to-create-new-messages.rst:32 -msgid "" -"The first thing we need to do is to define a message type for the RPC " -"system in :code:`transport.proto`. Note that we have to do it for both " -"the request and response messages. For more details on the syntax of " -"proto3, please see the `official documentation `_." -msgstr "" - -#: ../../source/contributor-how-to-create-new-messages.rst:35 -msgid "Within the :code:`ServerMessage` block:" -msgstr "" - -#: ../../source/contributor-how-to-create-new-messages.rst:52 -msgid "Within the ClientMessage block:" -msgstr "" - -#: ../../source/contributor-how-to-create-new-messages.rst:70 -msgid "" -"Make sure to also add a field of the newly created message type in " -":code:`oneof msg`." -msgstr "" - -#: ../../source/contributor-how-to-create-new-messages.rst:72 -msgid "Once that is done, we will compile the file with:" -msgstr "" - -#: ../../source/contributor-how-to-create-new-messages.rst:78 -msgid "If it compiles successfully, you should see the following message:" -msgstr "" - -#: ../../source/contributor-how-to-create-new-messages.rst:87 -msgid "Serialization and Deserialization Functions" -msgstr "" - -#: ../../source/contributor-how-to-create-new-messages.rst:89 -msgid "" -"Our next step is to add functions to serialize and deserialize Python " -"datatypes to or from our defined RPC message types. You should add these " -"functions in :code:`serde.py`." -msgstr "" - -#: ../../source/contributor-how-to-create-new-messages.rst:91 -msgid "The four functions:" -msgstr "" - -#: ../../source/contributor-how-to-create-new-messages.rst:112 -msgid "Sending the Message from the Server" -msgstr "" - -#: ../../source/contributor-how-to-create-new-messages.rst:114 -msgid "" -"Now write the request function in your Client Proxy class (e.g., " -":code:`grpc_client_proxy.py`) using the serde functions you just created:" -msgstr "" - -#: ../../source/contributor-how-to-create-new-messages.rst:128 -msgid "Receiving the Message by the Client" -msgstr "" - -#: ../../source/contributor-how-to-create-new-messages.rst:130 -msgid "" -"Last step! Modify the code in :code:`message_handler.py` to check the " -"field of your message and call the :code:`example_response` function. " -"Remember to use the serde functions!" -msgstr "" - -#: ../../source/contributor-how-to-create-new-messages.rst:132 -msgid "Within the handle function:" -msgstr "" - -#: ../../source/contributor-how-to-create-new-messages.rst:139 -msgid "And add a new function:" -msgstr "" - -#: ../../source/contributor-how-to-create-new-messages.rst:149 -msgid "Hopefully, when you run your program you will get the intended result!" -msgstr "" +"새 언어를 추가하려면, `Slack `에 문의하거나 `GitHub repo " +"`_에서 issue에 들어가 문의 해야 합니다." #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:2 msgid "Develop in VSCode Dev Containers" -msgstr "" +msgstr "VSCode Dev Container에서 개발" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:4 msgid "" @@ -520,21 +633,29 @@ msgid "" "tests. For this purpose we are using the VSCode Remote Containers " "extension. What is it? Read the following quote:" msgstr "" +"Flower 프레임워크 작업시, 모든 기여자들이 코드 포맷팅이나 테스트 실행을 위해 동일한 개발 환경을 사용하길 원합니다. 이를 " +"위해 VSCode Remote Containers 확장을 사용하고 있습니다. 그것이 무엇인지 알아보기 위해 다음 인용문을 " +"읽어보세요:" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:7 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:8 +#, fuzzy msgid "" "The Visual Studio Code Remote - Containers extension lets you use a " "Docker container as a fully-featured development environment. It allows " "you to open any folder inside (or mounted into) a container and take " "advantage of Visual Studio Code's full feature set. A " -":code:`devcontainer.json` file in your project tells VS Code how to " -"access (or create) a development container with a well-defined tool and " -"runtime stack. This container can be used to run an application or to " -"separate tools, libraries, or runtimes needed for working with a " -"codebase." +"``devcontainer.json`` file in your project tells VS Code how to access " +"(or create) a development container with a well-defined tool and runtime " +"stack. This container can be used to run an application or to separate " +"tools, libraries, or runtimes needed for working with a codebase." msgstr "" +"Visual Studio Code Remote - 컨테이너 확장을 사용하면 Docker 컨테이너를 모든 기능을 갖춘 개발 환경으로 " +"사용할 수 있습니다. 이 확장 기능을 사용하면 컨테이너 내부(또는 컨테이너에 마운트된)의 모든 폴더를 열고 Visual Studio" +" Code의 모든 기능을 활용할 수 있습니다. 프로젝트에 있는 :code:`devcontainer.json` 파일은 잘 정의된 " +"도구와 런타임 스택을 사용하여 개발 컨테이너에 액세스(또는 생성)하는 방법을 VS Code에 알려줍니다. 이 컨테이너는 " +"애플리케이션을 실행하거나 코드베이스 작업에 필요한 도구, 라이브러리 또는 런타임을 분리하는 데 사용할 수 있습니다." -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:9 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:16 msgid "" "Workspace files are mounted from the local file system or copied or " "cloned into the container. Extensions are installed and run inside the " @@ -542,29 +663,37 @@ msgid "" "system. This means that you can seamlessly switch your entire development" " environment just by connecting to a different container." msgstr "" +"작업 공간 파일은 로컬 파일 시스템에서 마운트되거나 컨테이너에 복사 또는 클론됩니다. 확장 프로그램은 컨테이너 내부에 설치되고 " +"실행되며, 도구, 플랫폼 및 파일 시스템에 완전한 접근 권한을 갖습니다. 이는 다른 컨테이너에 연결하는 것만으로 전체 개발 환경을 " +"원활하게 전환할 수 있음을 의미합니다." -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:11 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:22 msgid "" "Source: `Official VSCode documentation " "`_" -msgstr "" +msgstr "출처 : 공식 VSCode 문서" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:15 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:26 msgid "Getting started" -msgstr "" +msgstr "시작하기" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:17 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:28 +#, fuzzy msgid "" -"Configuring and setting up the :code:`Dockerfile` as well the " -"configuration for the devcontainer can be a bit more involved. The good " -"thing is you don't have to do it. Usually it should be enough to install " -"`Docker `_ on your system and " -"ensure its available on your command line. Additionally, install the " -"`VSCode Containers Extension `_ on your system and ensure its" +" available on your command line. Additionally, install the `VSCode " +"Containers Extension `_." msgstr "" +"`Dockerfile`을 설정하고 구성하는 것과 개발 컨테이너 구성은 약간 복잡할 수 있습니다. 다행히도, 이를 직접 할 필요는 " +"없습니다. 일반적으로 시스템에 `Docker `_를 " +"설치하고 커맨드 라인에서 사용할 수 있는지 확인하는 것으로 충분합니다. 추가로 `VSCode Containers Extension " +"`_을 설치하세요." -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:19 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:35 msgid "" "Now you should be good to go. When starting VSCode, it will ask you to " "run in the container environment and - if you confirm - automatically " @@ -573,37 +702,42 @@ msgid "" "area in the bottom left corner of your VSCode window and select the " "option *(Re)Open Folder in Container*." msgstr "" +"이제 준비가 완료되었습니다. VSCode를 시작하면 컨테이너 환경에서 실행할지를 묻고, 확인하면 자동으로 컨테이너를 빌드하고 사용할" +" 것입니다. VSCode에 수동으로 개발 컨테이너를 사용하도록 지시하려면, 확장을 설치한 후, VSCode 창의 왼쪽 하단에 있는 " +"초록색 부을 클릭하고 *(Re)Open Folder in Container* 옵션을 선택하세요." -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:21 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:41 msgid "" "In some cases your setup might be more involved. For those cases consult " "the following sources:" -msgstr "" +msgstr "경우에 따라 설정이 더 복잡할 수도 있습니다. 이러한 경우에는 다음 소스를 참조하세요:" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:23 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:44 msgid "" "`Developing inside a Container " "`_" msgstr "" +"`컨테이너 내부 개발`_" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:24 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:46 msgid "" "`Remote development in Containers " "`_" -msgstr "" +msgstr "`컨테이너 원격 개발`_" #: ../../source/contributor-how-to-install-development-versions.rst:2 msgid "Install development versions" -msgstr "" +msgstr "개발 버전 설치하기" #: ../../source/contributor-how-to-install-development-versions.rst:5 msgid "Install development versions of Flower" -msgstr "" +msgstr "Flower 개발 버전 설치하기" #: ../../source/contributor-how-to-install-development-versions.rst:8 msgid "Using Poetry (recommended)" -msgstr "" +msgstr "Poetry 사용하기(권장)" #: ../../source/contributor-how-to-install-development-versions.rst:10 msgid "" @@ -611,190 +745,227 @@ msgid "" "in ``pyproject.toml`` and then reinstall (don't forget to delete " "``poetry.lock`` (``rm poetry.lock``) before running ``poetry install``)." msgstr "" +"PyPI에서 ``flwr`` 사전 릴리스 설치하기: ``pyproject.toml``에서 ``flwr``의 의존성을 업데이트한 " +"다음, 재설치하세요(``poetry 설치``이전에 ``poetry.lock`` (``rm poetry.lock``)를 제거하는 것을" +" 잊지 마세요)." -#: ../../source/contributor-how-to-install-development-versions.rst:12 +#: ../../source/contributor-how-to-install-development-versions.rst:14 msgid "" "``flwr = { version = \"1.0.0a0\", allow-prereleases = true }`` (without " "extras)" -msgstr "" +msgstr "``flwr = { version = \"1.0.0a0\", allow-prereleases = true }`` (extras 제외)" -#: ../../source/contributor-how-to-install-development-versions.rst:13 +#: ../../source/contributor-how-to-install-development-versions.rst:15 msgid "" "``flwr = { version = \"1.0.0a0\", allow-prereleases = true, extras = " "[\"simulation\"] }`` (with extras)" msgstr "" +"``flwr = { version = \"1.0.0a0\", allow-prereleases = true, extras = " +"[\"simulation\"] }`` (extras 포함)" -#: ../../source/contributor-how-to-install-development-versions.rst:15 +#: ../../source/contributor-how-to-install-development-versions.rst:18 msgid "" "Install ``flwr`` from a local copy of the Flower source code via " "``pyproject.toml``:" -msgstr "" +msgstr "``pyproject.toml``을 통해 Flower 소스 코드의 로컬 복사본에서 ``flwr``을 설치하세요:" -#: ../../source/contributor-how-to-install-development-versions.rst:17 +#: ../../source/contributor-how-to-install-development-versions.rst:20 msgid "``flwr = { path = \"../../\", develop = true }`` (without extras)" -msgstr "" +msgstr "``flwr = { path = \"../../\", develop = true }`` (extras 제외)" -#: ../../source/contributor-how-to-install-development-versions.rst:18 +#: ../../source/contributor-how-to-install-development-versions.rst:21 msgid "" "``flwr = { path = \"../../\", develop = true, extras = [\"simulation\"] " "}`` (with extras)" msgstr "" +"``flwr = { path = \"../../\", develop = true, extras = [\"simulation\"] " +"}`` (extras 포함)" -#: ../../source/contributor-how-to-install-development-versions.rst:20 +#: ../../source/contributor-how-to-install-development-versions.rst:23 msgid "Install ``flwr`` from a local wheel file via ``pyproject.toml``:" -msgstr "" +msgstr "``pyproject.toml``을 통해 로컬 wheel file에서 ``flwr``을 설치하세요:" -#: ../../source/contributor-how-to-install-development-versions.rst:22 +#: ../../source/contributor-how-to-install-development-versions.rst:25 msgid "" "``flwr = { path = \"../../dist/flwr-1.8.0-py3-none-any.whl\" }`` (without" " extras)" msgstr "" +"``flwr = { path = \"../../dist/flwr-1.8.0-py3-none-any.whl\" }`` (extras " +"제외)" -#: ../../source/contributor-how-to-install-development-versions.rst:23 +#: ../../source/contributor-how-to-install-development-versions.rst:26 msgid "" "``flwr = { path = \"../../dist/flwr-1.8.0-py3-none-any.whl\", extras = " "[\"simulation\"] }`` (with extras)" msgstr "" +"``flwr = { path = \"../../dist/flwr-1.8.0-py3-none-any.whl\", extras = " +"[\"simulation\"] }`` (extras 포함)" -#: ../../source/contributor-how-to-install-development-versions.rst:25 +#: ../../source/contributor-how-to-install-development-versions.rst:29 msgid "" "Please refer to the Poetry documentation for further details: `Poetry " "Dependency Specification `_" msgstr "" +"자세한 내용은 Poetry 문서를 참고하세요: `Poetry Dependency Specification `_" -#: ../../source/contributor-how-to-install-development-versions.rst:28 +#: ../../source/contributor-how-to-install-development-versions.rst:33 msgid "Using pip (recommended on Colab)" -msgstr "" +msgstr "pip 사용하기(Colab에서 권장)" -#: ../../source/contributor-how-to-install-development-versions.rst:30 +#: ../../source/contributor-how-to-install-development-versions.rst:35 msgid "Install a ``flwr`` pre-release from PyPI:" -msgstr "" +msgstr "PyPI에서 ``flwr`` 사전 릴리즈를 설치하기:" -#: ../../source/contributor-how-to-install-development-versions.rst:32 +#: ../../source/contributor-how-to-install-development-versions.rst:37 msgid "``pip install -U --pre flwr`` (without extras)" -msgstr "" +msgstr "``pip install -U --pre flwr`` (extras 제외)" -#: ../../source/contributor-how-to-install-development-versions.rst:33 -msgid "``pip install -U --pre flwr[simulation]`` (with extras)" -msgstr "" +#: ../../source/contributor-how-to-install-development-versions.rst:38 +msgid "``pip install -U --pre 'flwr[simulation]'`` (with extras)" +msgstr "``pip install -U --pre 'flwr[simulation]'`` (extras 포함)" -#: ../../source/contributor-how-to-install-development-versions.rst:35 +#: ../../source/contributor-how-to-install-development-versions.rst:40 msgid "" "Python packages can be installed from git repositories. Use one of the " "following commands to install the Flower directly from GitHub." msgstr "" +"Python 패키지는 git 저장소에서 설치할 수 있습니다. 다음 명령어 중 하나를 사용하여 GitHub에서 직접 Flower를 " +"설치하세요." -#: ../../source/contributor-how-to-install-development-versions.rst:37 +#: ../../source/contributor-how-to-install-development-versions.rst:43 msgid "Install ``flwr`` from the default GitHub branch (``main``):" -msgstr "" +msgstr "기본 GitHub branch (``main``)에서 ``flwr`` 를 설치하기:" -#: ../../source/contributor-how-to-install-development-versions.rst:39 +#: ../../source/contributor-how-to-install-development-versions.rst:45 msgid "" "``pip install flwr@git+https://github.com/adap/flower.git`` (without " "extras)" -msgstr "" +msgstr "``pip install flwr@git+https://github.com/adap/flower.git`` (extras 제외)" -#: ../../source/contributor-how-to-install-development-versions.rst:40 +#: ../../source/contributor-how-to-install-development-versions.rst:46 msgid "" -"``pip install flwr[simulation]@git+https://github.com/adap/flower.git`` " -"(with extras)" +"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git'``" +" (with extras)" msgstr "" +"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git'``" +" (extras 포함)" -#: ../../source/contributor-how-to-install-development-versions.rst:42 +#: ../../source/contributor-how-to-install-development-versions.rst:49 msgid "Install ``flwr`` from a specific GitHub branch (``branch-name``):" -msgstr "" +msgstr "특정 GitHub branch (``branch-name``)에서 ``flwr`` 설치하기:" -#: ../../source/contributor-how-to-install-development-versions.rst:44 +#: ../../source/contributor-how-to-install-development-versions.rst:51 msgid "" "``pip install flwr@git+https://github.com/adap/flower.git@branch-name`` " "(without extras)" msgstr "" +"``pip install flwr@git+https://github.com/adap/flower.git@branch-name`` " +"(extras 제외)" -#: ../../source/contributor-how-to-install-development-versions.rst:45 +#: ../../source/contributor-how-to-install-development-versions.rst:53 msgid "" -"``pip install flwr[simulation]@git+https://github.com/adap/flower.git" -"@branch-name`` (with extras)" +"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git" +"@branch-name'`` (with extras)" msgstr "" +"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git" +"@branch-name'`` (extras 포함)" -#: ../../source/contributor-how-to-install-development-versions.rst:49 +#: ../../source/contributor-how-to-install-development-versions.rst:57 msgid "Open Jupyter Notebooks on Google Colab" -msgstr "" +msgstr "Google Colab에서 Jupyter Notebooks 열기" -#: ../../source/contributor-how-to-install-development-versions.rst:51 +#: ../../source/contributor-how-to-install-development-versions.rst:59 msgid "" "Open the notebook ``doc/source/tutorial-series-get-started-with-flower-" "pytorch.ipynb``:" msgstr "" +"``doc/source/tutorial-series-get-started-with-flower-" +"pytorch.ipynb``notebook을 엽니다:" -#: ../../source/contributor-how-to-install-development-versions.rst:53 +#: ../../source/contributor-how-to-install-development-versions.rst:61 msgid "" "https://colab.research.google.com/github/adap/flower/blob/main/doc/source" "/tutorial-series-get-started-with-flower-pytorch.ipynb" msgstr "" +"https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-series-get-started-with-flower-pytorch.ipynb" -#: ../../source/contributor-how-to-install-development-versions.rst:55 +#: ../../source/contributor-how-to-install-development-versions.rst:63 msgid "" "Open a development version of the same notebook from branch `branch-name`" " by changing ``main`` to ``branch-name`` (right after ``blob``):" msgstr "" +"``main``을 ``branch-name``(``blob`` 바로 뒤)으로 변경하여 동일한 notebook의 개발 버전을 브랜치 " +"`branch-name`에서 엽니다 :" -#: ../../source/contributor-how-to-install-development-versions.rst:57 +#: ../../source/contributor-how-to-install-development-versions.rst:66 msgid "" "https://colab.research.google.com/github/adap/flower/blob/branch-" "name/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb" msgstr "" +"https://colab.research.google.com/github/adap/flower/blob/branch-" +"name/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb" -#: ../../source/contributor-how-to-install-development-versions.rst:59 +#: ../../source/contributor-how-to-install-development-versions.rst:68 msgid "Install a `whl` on Google Colab:" -msgstr "" +msgstr "Google Colab에서 `whl` 설치하기:" -#: ../../source/contributor-how-to-install-development-versions.rst:61 +#: ../../source/contributor-how-to-install-development-versions.rst:70 msgid "" "In the vertical icon grid on the left hand side, select ``Files`` > " "``Upload to session storage``" -msgstr "" +msgstr "왼쪽의 수직 아이콘 그리드에서 ``Files`` > ``Upload to session storage``를 선택하세요" -#: ../../source/contributor-how-to-install-development-versions.rst:62 +#: ../../source/contributor-how-to-install-development-versions.rst:72 msgid "Upload the whl (e.g., ``flwr-1.8.0-py3-none-any.whl``)" -msgstr "" +msgstr "whl (예:``flwr-1.8.0-py3-none-any.whl``)을 업로드하세요" -#: ../../source/contributor-how-to-install-development-versions.rst:63 +#: ../../source/contributor-how-to-install-development-versions.rst:73 msgid "" "Change ``!pip install -q 'flwr[simulation]' torch torchvision " "matplotlib`` to ``!pip install -q 'flwr-1.8.0-py3-none-" "any.whl[simulation]' torch torchvision matplotlib``" msgstr "" +"``!pip install -q 'flwr[simulation]' torch torchvision matplotlib``를 " +"``!pip install -q 'flwr-1.8.0-py3-none-any.whl[simulation]' torch " +"torchvision matplotlib``로 바꾸세요" #: ../../source/contributor-how-to-release-flower.rst:2 msgid "Release Flower" -msgstr "" +msgstr "Flower 릴리즈 하기" #: ../../source/contributor-how-to-release-flower.rst:4 msgid "" "This document describes the current release process. It may or may not " "change in the future." -msgstr "" +msgstr "이 문서는 현재 릴리즈 과정을 설명합니다. 이는 앞으로 변경될 수도 있습니다." -#: ../../source/contributor-how-to-release-flower.rst:7 +#: ../../source/contributor-how-to-release-flower.rst:8 msgid "During the release" -msgstr "" +msgstr "릴리즈 동안에" -#: ../../source/contributor-how-to-release-flower.rst:9 +#: ../../source/contributor-how-to-release-flower.rst:10 msgid "" "The version number of a release is stated in ``pyproject.toml``. To " "release a new version of Flower, the following things need to happen (in " "that order):" msgstr "" +"릴리즈의 버전 번호는 ``pyproject.toml``에 명시되어 있습니다. Flower의 새 버전을 릴리즈하려면 다음 작업이 " +"순서대로 수행되어야 합니다:" -#: ../../source/contributor-how-to-release-flower.rst:11 +#: ../../source/contributor-how-to-release-flower.rst:13 msgid "" "Run ``python3 src/py/flwr_tool/update_changelog.py `` in " "order to add every new change to the changelog (feel free to make manual " "changes to the changelog afterwards until it looks good)." msgstr "" +"모든 새로운 변경 사항을 변경 로그에 추가하기 위해``python3 " +"src/py/flwr_tool/update_changelog.py ``을 실행합니다 (변경 로그가 " +"만족스러워질 때까지 수동으로 변경해도 됩니다)." -#: ../../source/contributor-how-to-release-flower.rst:12 +#: ../../source/contributor-how-to-release-flower.rst:16 msgid "" "Once the changelog has been updated with all the changes, run ``./dev" "/prepare-release-changelog.sh v``, where ```` " @@ -803,8 +974,12 @@ msgid "" "by the version and current date, and it will add a thanking message for " "the contributors. Open a pull request with those changes." msgstr "" +"모든 변경 사항으로 변경 로그가 업데이트되면,``./dev/prepare-release-changelog.sh " +"v``을 실행합니다. 여기서 ````은 ``pyproject.toml``에 명시된 " +"버전 번호입니다 (앞에 ``v``가 추가된 것을 주의하세요). 이 명령어는 변경 로그의 ``Unreleased``헤더를 해당 버전과" +" 현재 날짜로 교체하고, 기여자들에게 감사 메시지가 추가됩니다. 이러한 변경 사항으로 pull request합니다." -#: ../../source/contributor-how-to-release-flower.rst:13 +#: ../../source/contributor-how-to-release-flower.rst:22 msgid "" "Once the pull request is merged, tag the release commit with the version " "number as soon as the PR is merged: ``git tag v`` (notice " @@ -812,134 +987,144 @@ msgid "" "This will create a draft release on GitHub containing the correct " "artifacts and the relevant part of the changelog." msgstr "" +"pull request가 병합되면, PR이 병합되는 즉시 버전 번호로 릴리즈 커밋에 태그를 지정합니다:``git tag " +"v`` (버전 번호 앞에 ``v``가 추가된 것을 확인), 그 다음 ``git push --tags``. " +"이렇게 하면 올바른 아티팩트와 변경 로그의 관련 부분이 포함된 초안 릴리즈가 GitHub에 생성됩니다." -#: ../../source/contributor-how-to-release-flower.rst:14 +#: ../../source/contributor-how-to-release-flower.rst:26 msgid "Check the draft release on GitHub, and if everything is good, publish it." -msgstr "" +msgstr "GitHub에서 릴리즈 초안을 확인하고, 모든 것이 양호하면 게시하세요." -#: ../../source/contributor-how-to-release-flower.rst:17 +#: ../../source/contributor-how-to-release-flower.rst:29 msgid "After the release" -msgstr "" +msgstr "릴리즈 후에" -#: ../../source/contributor-how-to-release-flower.rst:19 +#: ../../source/contributor-how-to-release-flower.rst:31 msgid "Create a pull request which contains the following changes:" -msgstr "" +msgstr "다음 변경 사항이 포함된 pull request를 만듭니다:" -#: ../../source/contributor-how-to-release-flower.rst:21 +#: ../../source/contributor-how-to-release-flower.rst:33 msgid "Increase the minor version in ``pyproject.toml`` by one." -msgstr "" +msgstr "``pyproject.toml``의 마이너 버전을 하나씩 늘립니다." -#: ../../source/contributor-how-to-release-flower.rst:22 +#: ../../source/contributor-how-to-release-flower.rst:34 msgid "Update all files which contain the current version number if necessary." -msgstr "" +msgstr "필요한 경우 현재 버전 번호가 포함된 모든 파일을 업데이트합니다." -#: ../../source/contributor-how-to-release-flower.rst:23 +#: ../../source/contributor-how-to-release-flower.rst:35 msgid "Add a new ``Unreleased`` section in ``changelog.md``." -msgstr "" +msgstr "``changelog.md``에 ``Unreleased`` 섹션을 새로 추가합니다." -#: ../../source/contributor-how-to-release-flower.rst:25 +#: ../../source/contributor-how-to-release-flower.rst:37 msgid "" "Merge the pull request on the same day (i.e., before a new nightly " "release gets published to PyPI)." -msgstr "" +msgstr "pull request를 같은 날(즉, 새로운 nightly 릴리즈가 PyPI에 게시되기 전에) 병합하세요." -#: ../../source/contributor-how-to-release-flower.rst:28 +#: ../../source/contributor-how-to-release-flower.rst:41 msgid "Publishing a pre-release" -msgstr "" +msgstr "사전 릴리즈 게시" -#: ../../source/contributor-how-to-release-flower.rst:31 +#: ../../source/contributor-how-to-release-flower.rst:44 msgid "Pre-release naming" -msgstr "" +msgstr "사전 릴리즈 이름" -#: ../../source/contributor-how-to-release-flower.rst:33 +#: ../../source/contributor-how-to-release-flower.rst:46 msgid "" "PyPI supports pre-releases (alpha, beta, release candidate). Pre-releases" " MUST use one of the following naming patterns:" -msgstr "" +msgstr "PyPI는 사전 릴리즈(알파, 베타, 릴리스 후보)를 지원합니다. 사전 릴리즈는 반드시 다음 명명 패턴 중 하나를 사용해야 합니다:" -#: ../../source/contributor-how-to-release-flower.rst:35 +#: ../../source/contributor-how-to-release-flower.rst:49 msgid "Alpha: ``MAJOR.MINOR.PATCHaN``" -msgstr "" +msgstr "Alpha: ``MAJOR.MINOR.PATCHaN``" -#: ../../source/contributor-how-to-release-flower.rst:36 +#: ../../source/contributor-how-to-release-flower.rst:50 msgid "Beta: ``MAJOR.MINOR.PATCHbN``" -msgstr "" +msgstr "Beta: ``MAJOR.MINOR.PATCHbN``" -#: ../../source/contributor-how-to-release-flower.rst:37 +#: ../../source/contributor-how-to-release-flower.rst:51 msgid "Release candidate (RC): ``MAJOR.MINOR.PATCHrcN``" -msgstr "" +msgstr "Release candidate (RC): ``MAJOR.MINOR.PATCHrcN``" -#: ../../source/contributor-how-to-release-flower.rst:39 +#: ../../source/contributor-how-to-release-flower.rst:53 msgid "Examples include:" -msgstr "" +msgstr "예시:" -#: ../../source/contributor-how-to-release-flower.rst:41 +#: ../../source/contributor-how-to-release-flower.rst:55 msgid "``1.0.0a0``" -msgstr "" +msgstr "``1.0.0a0``" -#: ../../source/contributor-how-to-release-flower.rst:42 +#: ../../source/contributor-how-to-release-flower.rst:56 msgid "``1.0.0b0``" -msgstr "" +msgstr "``1.0.0b0``" -#: ../../source/contributor-how-to-release-flower.rst:43 +#: ../../source/contributor-how-to-release-flower.rst:57 msgid "``1.0.0rc0``" -msgstr "" +msgstr "``1.0.0rc0``" -#: ../../source/contributor-how-to-release-flower.rst:44 +#: ../../source/contributor-how-to-release-flower.rst:58 msgid "``1.0.0rc1``" -msgstr "" +msgstr "``1.0.0rc1``" -#: ../../source/contributor-how-to-release-flower.rst:46 +#: ../../source/contributor-how-to-release-flower.rst:60 msgid "" "This is in line with PEP-440 and the recommendations from the Python " "Packaging Authority (PyPA):" -msgstr "" +msgstr "이는 PEP-440 및 Python Packaging Authority (PyPA)의 권장 사항과 일치합니다:" -#: ../../source/contributor-how-to-release-flower.rst:49 +#: ../../source/contributor-how-to-release-flower.rst:63 msgid "`PEP-440 `_" -msgstr "" +msgstr "`PEP-440 `_" -#: ../../source/contributor-how-to-release-flower.rst:50 +#: ../../source/contributor-how-to-release-flower.rst:64 msgid "" "`PyPA Choosing a versioning scheme " "`_" msgstr "" +"`PyPA 버전 관리 체계 선택하기 `_" -#: ../../source/contributor-how-to-release-flower.rst:52 +#: ../../source/contributor-how-to-release-flower.rst:67 msgid "" "Note that the approach defined by PyPA is not compatible with SemVer " "2.0.0 spec, for details consult the `Semantic Versioning Specification " "`_ (specifically item " "11 on precedence)." msgstr "" +"PyPA에서 정의한 접근 방식은 SemVer 2.0.0 사양과 호환되지 않으며, 자세한 내용은`Semantic Versioning " +"관리 사양 `_ (특히 항목 11이 " +"우선순위)을 참조하세요." -#: ../../source/contributor-how-to-release-flower.rst:55 +#: ../../source/contributor-how-to-release-flower.rst:73 msgid "Pre-release classification" -msgstr "" +msgstr "사전 릴리즈 분류" -#: ../../source/contributor-how-to-release-flower.rst:57 +#: ../../source/contributor-how-to-release-flower.rst:75 msgid "Should the next pre-release be called alpha, beta, or release candidate?" -msgstr "" +msgstr "다음 사전 릴리즈를 알파, 베타 또는 릴리스 후보라고 불러야 하나요?" -#: ../../source/contributor-how-to-release-flower.rst:59 +#: ../../source/contributor-how-to-release-flower.rst:77 msgid "" "RC: feature complete, no known issues (apart from issues that are " "classified as \"won't fix\" for the next stable release) - if no issues " "surface this will become the next stable release" msgstr "" +"RC: 기능 완료, 알려진 문제 없음(다음 stable 릴리즈에서 \"수정되지 않음\"으로 분류된 문제 제외) - 문제가 나타나지 " +"않으면 다음 stable 릴리즈가 됩니다" -#: ../../source/contributor-how-to-release-flower.rst:60 +#: ../../source/contributor-how-to-release-flower.rst:80 msgid "Beta: feature complete, allowed to have known issues" -msgstr "" +msgstr "베타: 기능 완료, 알려진 문제 발생 가능" -#: ../../source/contributor-how-to-release-flower.rst:61 +#: ../../source/contributor-how-to-release-flower.rst:81 msgid "Alpha: not feature complete, allowed to have known issues" -msgstr "" +msgstr "알파: 기능 미완성, 알려진 문제가 있을 수 있음" #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:2 msgid "Set up a virtual env" -msgstr "" +msgstr "가상 환경 설정" #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:4 msgid "" @@ -948,157 +1133,182 @@ msgid "" "environment with pyenv virtualenv, poetry, or Anaconda. You can follow " "the instructions or choose your preferred setup." msgstr "" +"가상 환경 내에서 파이썬 설정을 실행하는 것이 좋습니다. 이 가이드에서는 pyenv virtualenv, poetry 또는 " +"Anaconda를 사용하여 가상 환경을 만드는 세 가지 예제를 보여줍니다. 안내를 따르거나 원하는 설정을 선택할 수 있습니다." -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:9 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:10 msgid "Python Version" -msgstr "" +msgstr "Python 버전" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:11 -#: ../../source/how-to-install-flower.rst:8 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:12 +#: ../../source/how-to-install-flower.rst:7 msgid "" -"Flower requires at least `Python 3.8 `_, " +"Flower requires at least `Python 3.9 `_, " "but `Python 3.10 `_ or above is " "recommended." msgstr "" +"Flower는 `Python 3.9 `_이상이 필요하지만, `Python " +"3.10 `_이상을 권장합니다." -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:14 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:17 msgid "" "Due to a known incompatibility with `ray " "`_, we currently recommend utilizing at " "most `Python 3.11 `_ for running Flower " "simulations." msgstr "" +"`Ray `__와 호환되지 않는 것으로 알려져 있으므로, 현재 Flower" +" 시뮬레이션을 실행할 때는 최대 `Python 3.11 `_을 사용하는 것이" +" 좋습니다." -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:19 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:22 msgid "Virtualenv with Pyenv/Virtualenv" -msgstr "" +msgstr "Pyenv/Virtualenv를 사용한 가상 환경" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:21 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:24 msgid "" "One of the recommended virtual environment is `pyenv " "`_/`virtualenv `_. Please see `Flower examples " "`_ for details." msgstr "" +"권장 가상 환경 중 하나는 `pyenv `_/`virtualenv " +"`_입니다. 자세한 내용은 `Flower " +"examples `_를 참조하세요." -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:23 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:29 msgid "" "Once Pyenv is set up, you can use it to install `Python Version 3.10 " "`_ or above:" msgstr "" +"Pyenv가 설정되면 이를 사용하여 'Python 버전 3.10 `_ " +"이상'을 설치할 수 있습니다:" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:29 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:36 msgid "Create the virtualenv with:" -msgstr "" +msgstr "가상 환경을 만듭니다:" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:36 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:42 msgid "Activate the virtualenv by running the following command:" -msgstr "" +msgstr "다음 명령을 실행하여 가상 환경을 활성화합니다:" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:44 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:49 msgid "Virtualenv with Poetry" -msgstr "" +msgstr "Poetry를 사용한 가상 환경" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:46 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:51 msgid "" "The Flower examples are based on `Poetry `_ to manage dependencies. After installing Poetry you " "simply create a virtual environment with:" msgstr "" +"Flower examples은 의존성을 관리하기 위해 `Poetry `_를 기반으로 합니다. Poetry를 설치한 후 가상 환경을 생성하기만 하면 됩니다:" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:52 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:58 msgid "" "If you open a new terminal you can activate the previously created " "virtual environment with the following command:" -msgstr "" +msgstr "새 터미널을 열면 다음 명령을 사용하여 이전에 생성한 가상 환경을 활성화할 수 있습니다:" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:60 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:66 msgid "Virtualenv with Anaconda" -msgstr "" +msgstr "Anaconda를 사용한 가상 환경" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:62 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:68 +#, fuzzy msgid "" "If you prefer to use Anaconda for your virtual environment then install " "and setup the `conda `_ package. After setting it up you can " +"/user-guide/install/index.html>`_ package. After setting it up you can " "create a virtual environment with:" msgstr "" +"가상 환경에서 Anaconda를 사용하려면 `conda " +"`_ 패키지를 설치 및 설정하세요. 설정 후 다음을 사용하여 가상 환경을 만들 수 " +"있습니다:" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:68 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:76 msgid "and activate the virtual environment with:" -msgstr "" +msgstr "그 후 가상 환경을 활성화합니다:" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:76 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:83 msgid "And then?" -msgstr "" +msgstr "그다음은?" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:78 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:85 msgid "" "As soon as you created your virtual environment you clone one of the " "`Flower examples `_." msgstr "" +"가상 환경을 생성하자마자 'Flower examples " +"`_ 중 하나를 클론합니다." #: ../../source/contributor-how-to-write-documentation.rst:2 msgid "Write documentation" -msgstr "" +msgstr "문서 작성" -#: ../../source/contributor-how-to-write-documentation.rst:6 +#: ../../source/contributor-how-to-write-documentation.rst:5 msgid "Project layout" -msgstr "" +msgstr "프로젝트 레이아웃" -#: ../../source/contributor-how-to-write-documentation.rst:8 +#: ../../source/contributor-how-to-write-documentation.rst:7 msgid "" "The Flower documentation lives in the ``doc`` directory. The Sphinx-based" " documentation system supports both reStructuredText (``.rst`` files) and" " Markdown (``.md`` files)." msgstr "" +"Flower 문서는 ``doc`` 디렉토리에 있습니다. Sphinx 기반 문서 시스템은 reStructuredText " +"텍스트(``.rst`` 파일)와 Markdown(``.md`` 파일)을 모두 지원합니다." #: ../../source/contributor-how-to-write-documentation.rst:10 -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:169 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:196 msgid "" "Note that, in order to build the documentation locally (with ``poetry run" " make html``, like described below), `Pandoc " "`_ needs to be installed on the " "system." msgstr "" +"로컬에서 문서를 작성하려면(아래 설명과 같이 ``poetry run make html``로) `Pandoc " +"`_이 시스템에 설치되어 있어야 합니다." -#: ../../source/contributor-how-to-write-documentation.rst:14 +#: ../../source/contributor-how-to-write-documentation.rst:15 msgid "Edit an existing page" -msgstr "" +msgstr "기존 페이지 편집" -#: ../../source/contributor-how-to-write-documentation.rst:16 +#: ../../source/contributor-how-to-write-documentation.rst:17 msgid "Edit an existing ``.rst`` (or ``.md``) file under ``doc/source/``" -msgstr "" +msgstr "doc/source/``에서 기존 ``.rst``(또는 ``.md``) 파일을 편집합니다" -#: ../../source/contributor-how-to-write-documentation.rst:17 +#: ../../source/contributor-how-to-write-documentation.rst:18 #: ../../source/contributor-how-to-write-documentation.rst:27 msgid "Compile the docs: ``cd doc``, then ``poetry run make html``" -msgstr "" +msgstr "문서를 컴파일합니다: ``cd doc``, ``poetry run make html`` 순으로 컴파일합니다" -#: ../../source/contributor-how-to-write-documentation.rst:18 +#: ../../source/contributor-how-to-write-documentation.rst:19 #: ../../source/contributor-how-to-write-documentation.rst:28 msgid "Open ``doc/build/html/index.html`` in the browser to check the result" -msgstr "" +msgstr "브라우저에서 ``doc/build/html/index.html``을 열어 결과를 확인합니다" #: ../../source/contributor-how-to-write-documentation.rst:22 msgid "Create a new page" -msgstr "" +msgstr "새 페이지 만들기" #: ../../source/contributor-how-to-write-documentation.rst:24 msgid "Add new ``.rst`` file under ``doc/source/``" -msgstr "" +msgstr "``doc/source/`에 새 ``.rst`` 을 추가합니다" #: ../../source/contributor-how-to-write-documentation.rst:25 msgid "Add content to the new ``.rst`` file" -msgstr "" +msgstr "새 '.rst' 파일에 내용을 추가합니다" #: ../../source/contributor-how-to-write-documentation.rst:26 msgid "Link to the new rst from ``index.rst``" -msgstr "" +msgstr "``index.rst``에서 새 rst로 연결합니다" #: ../../source/contributor-ref-good-first-contributions.rst:2 msgid "Good first contributions" -msgstr "" +msgstr "훌륭한 첫 번째 기여" #: ../../source/contributor-ref-good-first-contributions.rst:4 msgid "" @@ -1107,42 +1317,49 @@ msgid "" "where to start to increase your chances of getting your PR accepted into " "the Flower codebase." msgstr "" +"Flower에 대한 기여를 환영합니다! 하지만 어디서부터 시작해야 할지 알기란 쉽지 않습니다. 그래서 저희는 여러분의 PR이 " +"Flower 코드베이스에 채택될 가능성을 높이기 위해 어디서부터 시작해야 하는지 몇 가지 권장 사항을 정리해 보았습니다." -#: ../../source/contributor-ref-good-first-contributions.rst:11 +#: ../../source/contributor-ref-good-first-contributions.rst:9 msgid "Where to start" -msgstr "" +msgstr "시작 위치" -#: ../../source/contributor-ref-good-first-contributions.rst:13 +#: ../../source/contributor-ref-good-first-contributions.rst:11 msgid "" "Until the Flower core library matures it will be easier to get PR's " "accepted if they only touch non-core areas of the codebase. Good " "candidates to get started are:" msgstr "" +"Flower 코어 라이브러리가 완성될 때까지는 코드베이스의 비핵심 영역만 건드리는 것이 PR을 승인받기가 더 쉬울 것입니다. " +"시작하기에 좋은 후보자는 다음과 같습니다:" -#: ../../source/contributor-ref-good-first-contributions.rst:17 +#: ../../source/contributor-ref-good-first-contributions.rst:14 msgid "Documentation: What's missing? What could be expressed more clearly?" -msgstr "" +msgstr "문서: 무엇이 누락되었나요? 무엇을 더 명확하게 표현할 수 있을까요?" -#: ../../source/contributor-ref-good-first-contributions.rst:18 +#: ../../source/contributor-ref-good-first-contributions.rst:15 msgid "Baselines: See below." -msgstr "" +msgstr "Baselines: 아래를 참조하세요." -#: ../../source/contributor-ref-good-first-contributions.rst:19 +#: ../../source/contributor-ref-good-first-contributions.rst:16 msgid "Examples: See below." -msgstr "" +msgstr "예시: 아래를 참조하세요." -#: ../../source/contributor-ref-good-first-contributions.rst:23 +#: ../../source/contributor-ref-good-first-contributions.rst:19 msgid "Request for Flower Baselines" -msgstr "" +msgstr "Flower Baselines 요청" -#: ../../source/contributor-ref-good-first-contributions.rst:25 +#: ../../source/contributor-ref-good-first-contributions.rst:21 msgid "" "If you are not familiar with Flower Baselines, you should probably check-" "out our `contributing guide for baselines " "`_." msgstr "" +"Flower Baseline에 익숙하지 않다면 ' Baseline 기여 가이드 " +"`_를 " +"확인해보세요." -#: ../../source/contributor-ref-good-first-contributions.rst:27 +#: ../../source/contributor-ref-good-first-contributions.rst:25 msgid "" "You should then check out the open `issues " "`_" @@ -1150,16 +1367,20 @@ msgid "" " and that has no assignees, feel free to assign it to yourself and start " "working on it!" msgstr "" +"그런 다음 오픈 된 `issues " +"`_에서" +" baseline 요청을 확인해야 합니다. 작업하고 싶은 기준선을 찾았지만 담당자가 없는 경우, 자유롭게 자신에게 할당하고 작업을 " +"시작하세요!" -#: ../../source/contributor-ref-good-first-contributions.rst:31 +#: ../../source/contributor-ref-good-first-contributions.rst:30 msgid "" "Otherwise, if you don't find a baseline you'd like to work on, be sure to" " open a new issue with the baseline request template!" -msgstr "" +msgstr "그렇지 않으면 작업하고 싶은 baseline을 찾지 못하면 baseline 요청 템플릿으로 새 이슈를 열어야 합니다!" #: ../../source/contributor-ref-good-first-contributions.rst:34 msgid "Request for examples" -msgstr "" +msgstr "예시 요청" #: ../../source/contributor-ref-good-first-contributions.rst:36 msgid "" @@ -1167,22 +1388,24 @@ msgid "" "help users to get started with building what they want to build. Here are" " a few ideas where we'd be happy to accept a PR:" msgstr "" +"사용 예시는 사용자가 원하는 것을 구축하는 데 도움이 된다고 생각하기 때문에 더 많은 시간을 할애하여 작성할 수 있었으면 합니다. " +"다음은 저희가 기꺼이 PR을 수락할 수 있는 몇 가지 아이디어입니다:" #: ../../source/contributor-ref-good-first-contributions.rst:40 msgid "Llama 2 fine-tuning, with Hugging Face Transformers and PyTorch" -msgstr "" +msgstr "Llama 2 미세 조정, Hugging Face Transformer와 파이토치 포함" #: ../../source/contributor-ref-good-first-contributions.rst:41 msgid "XGBoost" -msgstr "" +msgstr "XGBoost" #: ../../source/contributor-ref-good-first-contributions.rst:42 msgid "Android ONNX on-device training" -msgstr "" +msgstr "Android ONNX 온디바이스 훈련" #: ../../source/contributor-ref-secure-aggregation-protocols.rst:2 msgid "Secure Aggregation Protocols" -msgstr "" +msgstr "Secure Aggregation 프로토콜" #: ../../source/contributor-ref-secure-aggregation-protocols.rst:4 msgid "" @@ -1191,60 +1414,69 @@ msgid "" " not be accurate in practice. The SecAgg protocol can be considered as a " "special case of the SecAgg+ protocol." msgstr "" +"SecAgg, SecAgg+, LightSecAgg 프로토콜을 포함합니다. LightSecAgg 프로토콜은 아직 구현되지 않았기 " +"때문에 다이어그램과 추상화가 실제로는 정확하지 않을 수 있습니다. SecAgg 프로토콜은 SecAgg+ 프로토콜의 특수한 경우로 " +"간주할 수 있습니다." -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:8 -msgid "The :code:`SecAgg+` abstraction" -msgstr "" +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:9 +#, fuzzy +msgid "The ``SecAgg+`` abstraction" +msgstr "The :code:`SecAgg+` 추상화" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:10 -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:161 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:11 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:163 msgid "" "In this implementation, each client will be assigned with a unique index " "(int) for secure aggregation, and thus many python dictionaries used have" " keys of int type rather than ClientProxy type." msgstr "" +"구현에서는 각 클라이언트에 secure aggregation를 위한 고유 인덱스(int)가 할당되므로 사용되는 많은 파이썬 " +"dictionaries에는 ClientProxy 타입이 아닌 int 타입의 키가 있습니다." -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:65 -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:198 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:67 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:204 msgid "" "The Flower server will execute and process received results in the " "following order:" -msgstr "" +msgstr "Flower 서버는 수신된 결과를 다음 순서로 실행하고 처리합니다:" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:159 -msgid "The :code:`LightSecAgg` abstraction" -msgstr "" +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:161 +#, fuzzy +msgid "The ``LightSecAgg`` abstraction" +msgstr "The :code:`LightSecAgg` 추상" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:271 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:277 msgid "Types" -msgstr "" +msgstr "타입" #: ../../source/contributor-tutorial-contribute-on-github.rst:2 msgid "Contribute on GitHub" -msgstr "" +msgstr "GitHub에서 기여하기" #: ../../source/contributor-tutorial-contribute-on-github.rst:4 msgid "" "This guide is for people who want to get involved with Flower, but who " "are not used to contributing to GitHub projects." -msgstr "" +msgstr "이 가이드는 Flower에 참여하고 싶지만 GitHub 프로젝트에 기여하는 데 익숙하지 않은 분들을 위한 것입니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:6 +#: ../../source/contributor-tutorial-contribute-on-github.rst:7 msgid "" "If you're familiar with how contributing on GitHub works, you can " "directly checkout our :doc:`getting started guide for contributors " "`." msgstr "" +"깃허브에서 기여하는 방식에 익숙하다면 :doc:`기여자를 위한 시작 가이드`를 직접 확인하세요." -#: ../../source/contributor-tutorial-contribute-on-github.rst:10 +#: ../../source/contributor-tutorial-contribute-on-github.rst:12 msgid "Setting up the repository" -msgstr "" +msgstr "레포지토리 설정하기" -#: ../../source/contributor-tutorial-contribute-on-github.rst:21 +#: ../../source/contributor-tutorial-contribute-on-github.rst:29 msgid "**Create a GitHub account and setup Git**" -msgstr "" +msgstr "**GitHub 계정을 만들고 Git을 설정합니다**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:13 +#: ../../source/contributor-tutorial-contribute-on-github.rst:15 msgid "" "Git is a distributed version control tool. This allows for an entire " "codebase's history to be stored and every developer's machine. It is a " @@ -1252,109 +1484,126 @@ msgid "" "follow this `guide `_ to set it up." msgstr "" +"Git은 분산 버전 관리 도구입니다. 이를 통해 전체 코드베이스의 히스토리와 모든 개발자의 컴퓨터를 저장할 수 있습니다. 로컬 " +"컴퓨터에 설치해야 하는 소프트웨어로, 이 `가이드 `_를 따라 설정할 수 있습니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:16 +#: ../../source/contributor-tutorial-contribute-on-github.rst:21 msgid "" "GitHub, itself, is a code hosting platform for version control and " "collaboration. It allows for everyone to collaborate and work from " "anywhere on remote repositories." msgstr "" +"GitHub는 그 자체로 버전 관리 및 협업을 위한 코드 호스팅 플랫폼입니다. 누구나 원격 레포지토리에서 어디서든 협업하고 작업할 " +"수 있습니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:18 +#: ../../source/contributor-tutorial-contribute-on-github.rst:25 msgid "" "If you haven't already, you will need to create an account on `GitHub " "`_." -msgstr "" +msgstr "아직 계정을 만들지 않았다면 `GitHub `_에서 계정을 만들어야 합니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:20 +#: ../../source/contributor-tutorial-contribute-on-github.rst:28 msgid "" "The idea behind the generic Git and GitHub workflow boils down to this: " "you download code from a remote repository on GitHub, make changes " "locally and keep track of them using Git and then you upload your new " "history back to GitHub." msgstr "" +"일반적인 Git 및 GitHub 워크플로우의 기본 개념은 다음과 같이 요약됩니다. GitHub의 원격 레포지토리에서 코드를 " +"다운로드하고 로컬에서 변경한 후 Git을 사용하여 추적한 다음 새 기록을 다시 GitHub에 업로드하는 것입니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:32 +#: ../../source/contributor-tutorial-contribute-on-github.rst:42 msgid "**Forking the Flower repository**" -msgstr "" +msgstr "**Flower 레포지토리 포크하기**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:24 +#: ../../source/contributor-tutorial-contribute-on-github.rst:32 +#, fuzzy msgid "" "A fork is a personal copy of a GitHub repository. To create one for " -"Flower, you must navigate to ``_ (while " +"Flower, you must navigate to https://github.com/adap/flower (while " "connected to your GitHub account) and click the ``Fork`` button situated " "on the top right of the page." msgstr "" +"포크는 GitHub 리포지토리의 개인 복사본입니다. Flower용 포크를 만들려면 " +"``_로 이동하여(GitHub 계정에 연결된 상태에서) 페이지 오른쪽 " +"상단에 있는 ``포크`` 버튼을 클릭해야 합니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:29 +#: ../../source/contributor-tutorial-contribute-on-github.rst:38 msgid "" "You can change the name if you want, but this is not necessary as this " "version of Flower will be yours and will sit inside your own account " "(i.e., in your own list of repositories). Once created, you should see on" " the top left corner that you are looking at your own version of Flower." msgstr "" +"원하는 경우 이름을 변경할 수 있지만, 이 버전의 Flower는 자신의 계정(즉, 자신의 리포지토리 목록)에 위치하게 되므로 변경할" +" 필요는 없습니다. 만들기가 완료되면 왼쪽 상단에Flower 버전이 표시되는 것을 볼 수 있습니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:47 +#: ../../source/contributor-tutorial-contribute-on-github.rst:59 msgid "**Cloning your forked repository**" -msgstr "" +msgstr "**포크된 레포지토리 클론하기**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:35 +#: ../../source/contributor-tutorial-contribute-on-github.rst:45 msgid "" "The next step is to download the forked repository on your machine to be " "able to make changes to it. On your forked repository page, you should " "first click on the ``Code`` button on the right, this will give you the " "ability to copy the HTTPS link of the repository." msgstr "" +"다음 단계는 컴퓨터에서 포크된 레포지토리를 변경할 수 있도록 다운로드하는 것입니다. 포크된 포지토리 페이지에서 먼저 오른쪽의 " +"``Code`` 버튼을 클릭하면 레포지토리의 HTTPS 링크를 복사할 수 있습니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:41 +#: ../../source/contributor-tutorial-contribute-on-github.rst:52 msgid "" "Once you copied the \\, you can open a terminal on your machine, " "navigate to the place you want to download the repository to and type:" -msgstr "" +msgstr "\\를 복사한 후에는 컴퓨터에서 터미널을 열고 레포지토리를 다운로드할 위치로 이동하여 입력하면 됩니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:47 +#: ../../source/contributor-tutorial-contribute-on-github.rst:59 msgid "" "This will create a ``flower/`` (or the name of your fork if you renamed " "it) folder in the current working directory." -msgstr "" +msgstr "현재 작업 디렉터리에``flower/``(또는 포크 이름을 변경한 경우 포크 이름) 폴더가 생성됩니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:66 +#: ../../source/contributor-tutorial-contribute-on-github.rst:78 msgid "**Add origin**" -msgstr "" +msgstr "**origin 추가**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:50 +#: ../../source/contributor-tutorial-contribute-on-github.rst:62 msgid "You can then go into the repository folder:" -msgstr "" +msgstr "그런 다음 레포지토리 폴더로 이동할 수 있습니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:56 +#: ../../source/contributor-tutorial-contribute-on-github.rst:68 msgid "" "And here we will need to add an origin to our repository. The origin is " "the \\ of the remote fork repository. To obtain it, we can do as " "previously mentioned by going to our fork repository on our GitHub " "account and copying the link." msgstr "" +"여기에 레포지토리에 origin을 추가해야 합니다. origin은 원격 포크 레포지토리의 \\입니다. origin을 " +"얻으려면 앞서 설명한 대로 GitHub 계정의 포크 레포지토리로 이동하여 링크를 복사하면 됩니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:61 +#: ../../source/contributor-tutorial-contribute-on-github.rst:75 msgid "" "Once the \\ is copied, we can type the following command in our " "terminal:" -msgstr "" +msgstr "\\ 이 복사되면 터미널에 다음 명령을 입력하면 됩니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:90 +#: ../../source/contributor-tutorial-contribute-on-github.rst:102 msgid "**Add upstream**" -msgstr "" +msgstr "**Upstream 추가하기**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:69 +#: ../../source/contributor-tutorial-contribute-on-github.rst:81 msgid "" "Now we will add an upstream address to our repository. Still in the same " "directory, we must run the following command:" -msgstr "" +msgstr "이제 레포지토리에 upstream 주소를 추가하겠습니다. 여전히 같은 디렉터리에서 다음 명령을 실행해야 합니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:76 +#: ../../source/contributor-tutorial-contribute-on-github.rst:88 msgid "The following diagram visually explains what we did in the previous steps:" -msgstr "" +msgstr "다음 다이어그램은 이전 단계에서 수행한 작업을 시각적으로 설명합니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:80 +#: ../../source/contributor-tutorial-contribute-on-github.rst:92 msgid "" "The upstream is the GitHub remote address of the parent repository (in " "this case Flower), i.e. the one we eventually want to contribute to and " @@ -1362,455 +1611,497 @@ msgid "" "remote address of the forked repository we created, i.e. the copy (fork) " "in our own account." msgstr "" +"upstream은 부모 레포지토리(이 경우 Flower)의 GitHub 원격 주소, 즉 우리가 최종적으로 기여하고 싶고 따라서 최신" +" 기록이 필요한 레포지토리입니다. origin은 우리가 만든 포크된 레포지토리의 GitHub 원격 주소, 즉 우리 계정에 있는 " +"사본(포크)입니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:84 +#: ../../source/contributor-tutorial-contribute-on-github.rst:97 msgid "" "To make sure our local version of the fork is up-to-date with the latest " "changes from the Flower repository, we can execute the following command:" -msgstr "" +msgstr "로컬 버전의 포크가 Flower 레포지토리의 최신 변경 사항으로 최신 상태인지 확인하려면 다음 명령을 실행하면 됩니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:93 +#: ../../source/contributor-tutorial-contribute-on-github.rst:105 msgid "Setting up the coding environment" -msgstr "" +msgstr "코딩 환경 설정" -#: ../../source/contributor-tutorial-contribute-on-github.rst:95 +#: ../../source/contributor-tutorial-contribute-on-github.rst:107 msgid "" "This can be achieved by following this :doc:`getting started guide for " "contributors ` (note " "that you won't need to clone the repository). Once you are able to write " "code and test it, you can finally start making changes!" msgstr "" +":doc:'기여자를 위한 시작 가이드 '를 참조하세요(레포지토리를 복제할 필요는 없습니다). 코드를 작성하고 테스트할 수 있게 되면 드디어" +" 변경을 시작할 수 있습니다!" -#: ../../source/contributor-tutorial-contribute-on-github.rst:100 +#: ../../source/contributor-tutorial-contribute-on-github.rst:113 msgid "Making changes" -msgstr "" +msgstr "변경하기" -#: ../../source/contributor-tutorial-contribute-on-github.rst:102 +#: ../../source/contributor-tutorial-contribute-on-github.rst:115 msgid "" "Before making any changes make sure you are up-to-date with your " "repository:" -msgstr "" +msgstr "변경하기 전에 레포지토리를 최신 상태로 유지하세요:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:108 +#: ../../source/contributor-tutorial-contribute-on-github.rst:121 msgid "And with Flower's repository:" -msgstr "" +msgstr "Flower의 레포지토리도 있습니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:122 +#: ../../source/contributor-tutorial-contribute-on-github.rst:134 msgid "**Create a new branch**" -msgstr "" +msgstr "**새 브랜치 만들기**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:115 +#: ../../source/contributor-tutorial-contribute-on-github.rst:128 msgid "" "To make the history cleaner and easier to work with, it is good practice " "to create a new branch for each feature/project that needs to be " "implemented." -msgstr "" +msgstr "히스토리를 더 깔끔하고 작업하기 쉽게 만들려면 구현해야 하는 각 기능/프로젝트에 대해 새 브랜치를 만드는 것이 좋습니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:118 +#: ../../source/contributor-tutorial-contribute-on-github.rst:131 msgid "" "To do so, just run the following command inside the repository's " "directory:" -msgstr "" +msgstr "이렇게 하려면 레포지토리 디렉토리에서 다음 명령을 실행하면 됩니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:125 +#: ../../source/contributor-tutorial-contribute-on-github.rst:136 msgid "**Make changes**" -msgstr "" +msgstr "**변경하기**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:125 +#: ../../source/contributor-tutorial-contribute-on-github.rst:137 msgid "Write great code and create wonderful changes using your favorite editor!" -msgstr "" +msgstr "선호하는 편집기를 사용하여 멋진 코드를 작성하고 훌륭한 변화를 만들어 보세요!" -#: ../../source/contributor-tutorial-contribute-on-github.rst:138 +#: ../../source/contributor-tutorial-contribute-on-github.rst:149 msgid "**Test and format your code**" -msgstr "" +msgstr "**코드 테스트 및 서식 지정**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:128 +#: ../../source/contributor-tutorial-contribute-on-github.rst:139 msgid "" "Don't forget to test and format your code! Otherwise your code won't be " "able to be merged into the Flower repository. This is done so the " "codebase stays consistent and easy to understand." msgstr "" +"코드를 테스트하고 서식을 지정하는 것을 잊지 마세요! 그렇지 않으면 코드를 Flower 레포지토리에 병합할 수 없습니다. 이는 " +"코드베이스가 일관성을 유지하고 이해하기 쉽도록 하기 위한 것입니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:131 +#: ../../source/contributor-tutorial-contribute-on-github.rst:143 msgid "To do so, we have written a few scripts that you can execute:" -msgstr "" +msgstr "이를 위해 실행할 수 있는 몇 가지 스크립트를 작성했습니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:150 +#: ../../source/contributor-tutorial-contribute-on-github.rst:162 msgid "**Stage changes**" -msgstr "" +msgstr "**변경사항 스테이징**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:141 +#: ../../source/contributor-tutorial-contribute-on-github.rst:152 msgid "" "Before creating a commit that will update your history, you must specify " "to Git which files it needs to take into account." -msgstr "" +msgstr "기록을 업데이트할 커밋을 만들기 전에 어떤 파일을 고려해야 하는지 Git에 지정해야 합니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:143 +#: ../../source/contributor-tutorial-contribute-on-github.rst:155 msgid "This can be done with:" -msgstr "" +msgstr "이 작업을 수행할 수 있습니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:149 +#: ../../source/contributor-tutorial-contribute-on-github.rst:161 +#, fuzzy msgid "" "To check which files have been modified compared to the last version " "(last commit) and to see which files are staged for commit, you can use " -"the :code:`git status` command." +"the ``git status`` command." msgstr "" +"마지막 버전(마지막 커밋)과 비교하여 수정된 파일을 확인하고 커밋을 위해 스테이징된 파일을 확인하려면 :code:`git " +"status` 명령을 사용하면 됩니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:160 +#: ../../source/contributor-tutorial-contribute-on-github.rst:173 msgid "**Commit changes**" -msgstr "" +msgstr "**변경사항 커밋**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:153 +#: ../../source/contributor-tutorial-contribute-on-github.rst:165 +#, fuzzy msgid "" -"Once you have added all the files you wanted to commit using :code:`git " -"add`, you can finally create your commit using this command:" -msgstr "" +"Once you have added all the files you wanted to commit using ``git add``," +" you can finally create your commit using this command:" +msgstr ":code:`git add`를 사용하여 커밋하려는 모든 파일을 추가한 후, 마지막으로 이 명령을 사용하여 커밋을 생성할 수 있습니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:159 +#: ../../source/contributor-tutorial-contribute-on-github.rst:172 +#, fuzzy msgid "" "The \\ is there to explain to others what the commit " "does. It should be written in an imperative style and be concise. An " -"example would be :code:`git commit -m \"Add images to README\"`." +"example would be ``git commit -m \"Add images to README\"``." msgstr "" +"커밋의 내용을 다른 사람에게 설명하기 위해 \\가 있습니다. 명령형 스타일로 작성해야 하며 간결해야" +" 합니다. 예를 들면 :code:`git commit -m \"Add images to README\"`." -#: ../../source/contributor-tutorial-contribute-on-github.rst:171 +#: ../../source/contributor-tutorial-contribute-on-github.rst:185 msgid "**Push the changes to the fork**" -msgstr "" +msgstr "**변경 사항을 포크에 푸시**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:163 +#: ../../source/contributor-tutorial-contribute-on-github.rst:176 msgid "" "Once we have committed our changes, we have effectively updated our local" " history, but GitHub has no way of knowing this unless we push our " "changes to our origin's remote address:" msgstr "" +"변경 사항을 커밋하면 로컬 히스토리를 효과적으로 업데이트한 것이지만, 변경 사항을 원본의 원격 주소로 푸시하지 않는 한 " +"GitHub는 이를 알 방법이 없습니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:170 +#: ../../source/contributor-tutorial-contribute-on-github.rst:184 msgid "" "Once this is done, you will see on the GitHub that your forked repo was " "updated with the changes you have made." -msgstr "" +msgstr "이 작업이 완료되면 변경한 내용으로 포크된 레포지토리가 업데이트된 것을 GitHub에서 확인할 수 있습니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:174 +#: ../../source/contributor-tutorial-contribute-on-github.rst:188 msgid "Creating and merging a pull request (PR)" -msgstr "" +msgstr "pull request(PR) 만들기 및 병합하기" -#: ../../source/contributor-tutorial-contribute-on-github.rst:206 +#: ../../source/contributor-tutorial-contribute-on-github.rst:226 msgid "**Create the PR**" -msgstr "" +msgstr "**PR 만들기**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:177 +#: ../../source/contributor-tutorial-contribute-on-github.rst:191 msgid "" "Once you have pushed changes, on the GitHub webpage of your repository " "you should see the following message:" -msgstr "" +msgstr "변경 사항을 푸시하고 나면 레포지토리의 GitHub 웹페이지에 다음 메시지가 표시됩니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:181 +#: ../../source/contributor-tutorial-contribute-on-github.rst:196 msgid "Otherwise you can always find this option in the ``Branches`` page." -msgstr "" +msgstr "그렇지 않으면 언제든지 ``Branches`` 페이지에서 이 옵션을 찾을 수 있습니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:183 +#: ../../source/contributor-tutorial-contribute-on-github.rst:198 msgid "" "Once you click the ``Compare & pull request`` button, you should see " "something similar to this:" -msgstr "" +msgstr "``Compare & pull request`` 버튼을 클릭하면 이와 비슷한 화면이 표시됩니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:187 +#: ../../source/contributor-tutorial-contribute-on-github.rst:203 msgid "At the top you have an explanation of which branch will be merged where:" -msgstr "" +msgstr "상단에는 어느 지점이 어디에 병합될 것인지에 대한 설명이 있습니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:191 +#: ../../source/contributor-tutorial-contribute-on-github.rst:207 msgid "" "In this example you can see that the request is to merge the branch " "``doc-fixes`` from my forked repository to branch ``main`` from the " "Flower repository." msgstr "" +"이 예제에서는 내 포크된 레포지토리의 ``doc-fixes`` 브랜치를 Flower 레포지토리의 ``main`` 브랜치에 병합하라는" +" 요청을 볼 수 있습니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:193 +#: ../../source/contributor-tutorial-contribute-on-github.rst:210 msgid "" "The title should be changed to adhere to the :ref:`pr_title_format` " "guidelines, otherwise it won't be possible to merge the PR. So in this " "case, a correct title might be ``docs(framework:skip) Fix typos``." msgstr "" +"제목은 :ref:`pr_title_format` 가이드라인을 준수하도록 변경해야 하며, 그렇지 않으면 PR을 병합할 수 없습니다. " +"따라서 이 경우 올바른 제목은 ``docs(framework:skip) Fix typos``이 될 수 있습니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:196 +#: ../../source/contributor-tutorial-contribute-on-github.rst:214 msgid "" "The input box in the middle is there for you to describe what your PR " "does and to link it to existing issues. We have placed comments (that " "won't be rendered once the PR is opened) to guide you through the " "process." msgstr "" +"가운데에 있는 입력 상자는 PR의 기능을 설명하고 기존 이슈에 연결할 수 있는 곳입니다. 프로세스를 안내하기 위해 코멘트(PR이 " +"열리면 렌더링되지 않음)를 배치했습니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:199 +#: ../../source/contributor-tutorial-contribute-on-github.rst:218 msgid "It is important to follow the instructions described in comments." -msgstr "" +msgstr "코멘트에 설명된 지침을 따르는 것이 중요합니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:201 +#: ../../source/contributor-tutorial-contribute-on-github.rst:220 msgid "" "At the bottom you will find the button to open the PR. This will notify " "reviewers that a new PR has been opened and that they should look over it" " to merge or to request changes." msgstr "" +"하단에는 PR을 여는 버튼이 있습니다. 이렇게 하면 검토자에게 새 PR이 열렸으며 병합하거나 변경을 요청하기 위해 검토해야 함을 " +"알립니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:204 +#: ../../source/contributor-tutorial-contribute-on-github.rst:224 msgid "" "If your PR is not yet ready for review, and you don't want to notify " "anyone, you have the option to create a draft pull request:" -msgstr "" +msgstr "PR이 아직 검토할 준비가 되지 않았고 다른 사람에게 알리고 싶지 않은 경우 pull request 초안을 만드는 옵션이 있습니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:209 +#: ../../source/contributor-tutorial-contribute-on-github.rst:230 msgid "**Making new changes**" -msgstr "" +msgstr "**new changes 만들기**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:209 +#: ../../source/contributor-tutorial-contribute-on-github.rst:229 msgid "" "Once the PR has been opened (as draft or not), you can still push new " "commits to it the same way we did before, by making changes to the branch" " associated with the PR." -msgstr "" +msgstr "PR이 초안으로 열렸든 아니든, PR과 연결된 브랜치를 변경하여 이전과 같은 방식으로 새 커밋을 푸시할 수 있습니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:231 +#: ../../source/contributor-tutorial-contribute-on-github.rst:253 msgid "**Review the PR**" -msgstr "" +msgstr "**PR 검토하기**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:212 +#: ../../source/contributor-tutorial-contribute-on-github.rst:233 msgid "" "Once the PR has been opened or once the draft PR has been marked as " "ready, a review from code owners will be automatically requested:" -msgstr "" +msgstr "PR이 열리거나 초안 PR이 준비됨으로 표시되면 코드 소유자의 검토가 자동으로 요청됩니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:216 +#: ../../source/contributor-tutorial-contribute-on-github.rst:238 msgid "" "Code owners will then look into the code, ask questions, request changes " "or validate the PR." -msgstr "" +msgstr "그러면 코드 소유자는 코드를 살펴보고, 질문하고, 변경을 요청하거나 PR의 유효성을 검사합니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:218 +#: ../../source/contributor-tutorial-contribute-on-github.rst:241 msgid "Merging will be blocked if there are ongoing requested changes." -msgstr "" +msgstr "진행 중인 변경 요청이 있는 경우 병합이 차단됩니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:222 +#: ../../source/contributor-tutorial-contribute-on-github.rst:245 msgid "" "To resolve them, just push the necessary changes to the branch associated" " with the PR:" -msgstr "" +msgstr "이를 해결하려면 PR과 연결된 브랜치에 필요한 변경 사항을 푸시하면 됩니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:226 +#: ../../source/contributor-tutorial-contribute-on-github.rst:250 msgid "And resolve the conversation:" -msgstr "" +msgstr "그리고 소통을 통해 해결하세요:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:230 +#: ../../source/contributor-tutorial-contribute-on-github.rst:254 msgid "" "Once all the conversations have been resolved, you can re-request a " "review." -msgstr "" +msgstr "모든 대화가 해결되면 검토를 다시 요청할 수 있습니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:251 +#: ../../source/contributor-tutorial-contribute-on-github.rst:274 msgid "**Once the PR is merged**" -msgstr "" +msgstr "**PR이 병합되면**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:234 +#: ../../source/contributor-tutorial-contribute-on-github.rst:256 msgid "" "If all the automatic tests have passed and reviewers have no more changes" " to request, they can approve the PR and merge it." -msgstr "" +msgstr "모든 자동 테스트가 통과되고 검토자가 더 이상 요청할 변경 사항이 없는 경우 PR을 승인하고 병합할 수 있습니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:238 +#: ../../source/contributor-tutorial-contribute-on-github.rst:261 msgid "" "Once it is merged, you can delete the branch on GitHub (a button should " "appear to do so) and also delete it locally by doing:" -msgstr "" +msgstr "병합이 완료되면 GitHub에서 브랜치를 삭제할 수 있으며(삭제 버튼이 표시되어야 함), 로컬에서도 삭제할 수 있습니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:245 +#: ../../source/contributor-tutorial-contribute-on-github.rst:269 msgid "Then you should update your forked repository by doing:" -msgstr "" +msgstr "그런 다음 다음을 수행하여 포크된 레포지토리를 업데이트해야 합니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:254 +#: ../../source/contributor-tutorial-contribute-on-github.rst:277 msgid "Example of first contribution" -msgstr "" +msgstr "첫 번째 기여의 예" -#: ../../source/contributor-tutorial-contribute-on-github.rst:257 +#: ../../source/contributor-tutorial-contribute-on-github.rst:280 msgid "Problem" -msgstr "" +msgstr "문제" -#: ../../source/contributor-tutorial-contribute-on-github.rst:259 +#: ../../source/contributor-tutorial-contribute-on-github.rst:282 msgid "" "For our documentation, we've started to use the `Diàtaxis framework " "`_." -msgstr "" +msgstr "저희 문서에는 'Diàtaxis 프레임워크 `_'를 사용하기 시작했습니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:261 +#: ../../source/contributor-tutorial-contribute-on-github.rst:285 msgid "" "Our \"How to\" guides should have titles that continue the sentence \"How" " to …\", for example, \"How to upgrade to Flower 1.0\"." msgstr "" +"'How to' 가이드의 제목은 \"How to …\"라는 문장을 이어가는 제목이어야 합니다(예: \"How to upgrade " +"to Flower 1.0\")." -#: ../../source/contributor-tutorial-contribute-on-github.rst:263 +#: ../../source/contributor-tutorial-contribute-on-github.rst:288 msgid "" "Most of our guides do not follow this new format yet, and changing their " "title is (unfortunately) more involved than one might think." -msgstr "" +msgstr "대부분의 가이드는 아직 이 새로운 형식을 따르지 않으며, 안타깝게도 제목을 변경하는 작업은 생각보다 복잡합니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:265 +#: ../../source/contributor-tutorial-contribute-on-github.rst:291 msgid "" "This issue is about changing the title of a doc from present continuous " "to present simple." -msgstr "" +msgstr "이번 이슈는 문서 제목을 현재 연속형에서 현재 단순형으로 변경하는 것에 관한 것입니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:267 +#: ../../source/contributor-tutorial-contribute-on-github.rst:294 msgid "" "Let's take the example of \"Saving Progress\" which we changed to \"Save " "Progress\". Does this pass our check?" msgstr "" +"\"How to saving progress\"을 \"How to save progress\"으로 변경한 예를 들어 보겠습니다. " +"이것이 우리의 점검을 통과했나요?" -#: ../../source/contributor-tutorial-contribute-on-github.rst:269 +#: ../../source/contributor-tutorial-contribute-on-github.rst:297 msgid "Before: \"How to saving progress\" ❌" -msgstr "" +msgstr "Before: \"How to saving progress\" ❌" -#: ../../source/contributor-tutorial-contribute-on-github.rst:271 +#: ../../source/contributor-tutorial-contribute-on-github.rst:299 msgid "After: \"How to save progress\" ✅" -msgstr "" +msgstr "After: \"How to save progress\" ✅" -#: ../../source/contributor-tutorial-contribute-on-github.rst:274 +#: ../../source/contributor-tutorial-contribute-on-github.rst:302 msgid "Solution" -msgstr "" +msgstr "해결법" -#: ../../source/contributor-tutorial-contribute-on-github.rst:276 +#: ../../source/contributor-tutorial-contribute-on-github.rst:304 msgid "" "This is a tiny change, but it'll allow us to test your end-to-end setup. " "After cloning and setting up the Flower repo, here's what you should do:" msgstr "" +"이것은 사소한 변경이지만 end-to-end 설정을 테스트할 수 있습니다. Flower 레포지토리를 복제하고 설정한 후에는 다음과 " +"같이 하세요:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:278 +#: ../../source/contributor-tutorial-contribute-on-github.rst:307 msgid "Find the source file in ``doc/source``" -msgstr "" +msgstr "``doc/source``에서 소스 파일을 찾습니다" -#: ../../source/contributor-tutorial-contribute-on-github.rst:279 +#: ../../source/contributor-tutorial-contribute-on-github.rst:308 msgid "" "Make the change in the ``.rst`` file (beware, the dashes under the title " "should be the same length as the title itself)" -msgstr "" +msgstr "``.rst`` 파일에서 변경합니다(제목 아래의 대시는 제목 자체의 길이와 같아야 합니다)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:280 +#: ../../source/contributor-tutorial-contribute-on-github.rst:310 msgid "" "Build the docs and `check the result `_" msgstr "" +"문서를 빌드하고 '결과 확인 `_'합니다" -#: ../../source/contributor-tutorial-contribute-on-github.rst:283 +#: ../../source/contributor-tutorial-contribute-on-github.rst:314 msgid "Rename file" -msgstr "" +msgstr "파일 이름 바꾸기" -#: ../../source/contributor-tutorial-contribute-on-github.rst:285 +#: ../../source/contributor-tutorial-contribute-on-github.rst:316 msgid "" "You might have noticed that the file name still reflects the old wording." " If we just change the file, then we break all existing links to it - it " "is **very important** to avoid that, breaking links can harm our search " "engine ranking." msgstr "" +"파일 이름에 여전히 이전 문구가 반영되어 있는 것을 보셨을 것입니다. 파일만 변경하면 파일에 대한 기존 링크가 모두 끊어지는데, " +"링크를 끊으면 검색 엔진 순위에 영향을 줄 수 있으므로 이를 방지하는 것이 **매우 중요**합니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:288 +#: ../../source/contributor-tutorial-contribute-on-github.rst:320 msgid "Here's how to change the file name:" -msgstr "" +msgstr "파일 이름을 변경하는 방법은 다음과 같습니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:290 +#: ../../source/contributor-tutorial-contribute-on-github.rst:322 msgid "Change the file name to ``save-progress.rst``" -msgstr "" +msgstr "파일 이름을 ``save-progress.rst``로 변경합니다" -#: ../../source/contributor-tutorial-contribute-on-github.rst:291 +#: ../../source/contributor-tutorial-contribute-on-github.rst:323 msgid "Add a redirect rule to ``doc/source/conf.py``" -msgstr "" +msgstr "'doc/source/conf.py'에 리디렉션 규칙을 추가합니다" -#: ../../source/contributor-tutorial-contribute-on-github.rst:293 +#: ../../source/contributor-tutorial-contribute-on-github.rst:325 msgid "" "This will cause a redirect from ``saving-progress.html`` to ``save-" "progress.html``, old links will continue to work." msgstr "" +"이렇게 하면 ``saving-progress.html``에서 ``save-progress.html``로 리디렉션되며, 이전 링크는 " +"계속 작동합니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:296 +#: ../../source/contributor-tutorial-contribute-on-github.rst:329 msgid "Apply changes in the index file" -msgstr "" +msgstr "인덱스 파일에 변경 사항 적용" -#: ../../source/contributor-tutorial-contribute-on-github.rst:298 +#: ../../source/contributor-tutorial-contribute-on-github.rst:331 msgid "" "For the lateral navigation bar to work properly, it is very important to " "update the ``index.rst`` file as well. This is where we define the whole " "arborescence of the navbar." msgstr "" +"횡방향 내비게이션 바가 제대로 작동하려면 ``index.rst`` 파일도 업데이트하는 것이 매우 중요합니다. 이 파일은 탐색 모음의" +" 전체 배열을 정의하는 곳입니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:301 +#: ../../source/contributor-tutorial-contribute-on-github.rst:335 msgid "Find and modify the file name in ``index.rst``" -msgstr "" +msgstr "``index.rst``에서 파일 이름을 찾아 수정합니다" -#: ../../source/contributor-tutorial-contribute-on-github.rst:304 +#: ../../source/contributor-tutorial-contribute-on-github.rst:338 msgid "Open PR" -msgstr "" +msgstr "PR 열기" -#: ../../source/contributor-tutorial-contribute-on-github.rst:306 +#: ../../source/contributor-tutorial-contribute-on-github.rst:340 msgid "" "Commit the changes (commit messages are always imperative: \"Do " "something\", in this case \"Change …\")" -msgstr "" +msgstr "변경 사항을 커밋합니다(커밋 메시지는 항상 필수 메시지입니다:\"Do something\"(이 경우 는 \"Change …\" )" -#: ../../source/contributor-tutorial-contribute-on-github.rst:307 +#: ../../source/contributor-tutorial-contribute-on-github.rst:342 msgid "Push the changes to your fork" -msgstr "" +msgstr "변경 사항을 포크에 푸시합니다" -#: ../../source/contributor-tutorial-contribute-on-github.rst:308 +#: ../../source/contributor-tutorial-contribute-on-github.rst:343 msgid "" "Open a PR (as shown above) with title ``docs(framework) Update how-to " "guide title``" -msgstr "" +msgstr "``docs(framework) Update how-to guide title`` 제목으로 PR(위와 같이)을 엽니다" -#: ../../source/contributor-tutorial-contribute-on-github.rst:309 +#: ../../source/contributor-tutorial-contribute-on-github.rst:344 msgid "Wait for it to be approved!" -msgstr "" +msgstr "승인될 때까지 기다리세요!" -#: ../../source/contributor-tutorial-contribute-on-github.rst:310 +#: ../../source/contributor-tutorial-contribute-on-github.rst:345 msgid "Congrats! 🥳 You're now officially a Flower contributor!" -msgstr "" +msgstr "축하합니다! 이제 공식적으로 Flower 기여자가 되셨습니다!" -#: ../../source/contributor-tutorial-contribute-on-github.rst:314 -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:548 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:946 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:727 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:713 +#: ../../source/contributor-tutorial-contribute-on-github.rst:348 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:573 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1012 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:811 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:857 #: ../../source/tutorial-series-what-is-federated-learning.ipynb:367 msgid "Next steps" -msgstr "" +msgstr "다음 단계" -#: ../../source/contributor-tutorial-contribute-on-github.rst:316 +#: ../../source/contributor-tutorial-contribute-on-github.rst:350 msgid "" "Once you have made your first PR, and want to contribute more, be sure to" " check out the following :" -msgstr "" +msgstr "첫 번째 PR을 작성하고 더 많은 기여를 하고 싶다면 다음을 확인하세요:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:318 +#: ../../source/contributor-tutorial-contribute-on-github.rst:353 +#, fuzzy msgid "" ":doc:`Good first contributions `, where you should particularly look into the " -":code:`baselines` contributions." +"``baselines`` contributions." msgstr "" +":doc:`훌륭한 첫 번째 기여 `, 특히 " +":code:`baselines` 기여를 살펴봐야 합니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:322 +#: ../../source/contributor-tutorial-contribute-on-github.rst:357 #: ../../source/fed/0000-20200102-fed-template.md:60 msgid "Appendix" -msgstr "" +msgstr "부록" -#: ../../source/contributor-tutorial-contribute-on-github.rst:327 +#: ../../source/contributor-tutorial-contribute-on-github.rst:362 msgid "PR title format" -msgstr "" +msgstr "PR 제목 형식" -#: ../../source/contributor-tutorial-contribute-on-github.rst:329 +#: ../../source/contributor-tutorial-contribute-on-github.rst:364 msgid "We enforce the following PR title format:" -msgstr "" +msgstr "다음과 같은 PR 제목 형식을 적용합니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:335 +#: ../../source/contributor-tutorial-contribute-on-github.rst:370 msgid "" "(or ``(:skip) `` to ignore the PR in the " "changelog)" -msgstr "" +msgstr "(또는 ``(:skip) ``를 사용하면 변경 로그에서 PR을 무시합니다.)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:337 +#: ../../source/contributor-tutorial-contribute-on-github.rst:372 msgid "" "Where ```` needs to be in ``{ci, fix, feat, docs, refactor, " "break}``, ```` should be in ``{framework, baselines, datasets, " @@ -1818,19410 +2109,27192 @@ msgid "" "':skip' flag to be used}``, and ```` starts with a capitalised " "verb in the imperative mood." msgstr "" +"여기서 ````은 ``{ci, fix, feat, docs, refactor, break}``, " +"````는 ``{framework, baselines, datasets, examples, or '*' " +"':skip' 플래그를 사용해야 하는 여러 프로젝트를 수정하는 경우}``로 입력해야 하며, ````는 대문자로 " +"시작해야 합니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:341 +#: ../../source/contributor-tutorial-contribute-on-github.rst:377 msgid "Valid examples:" -msgstr "" +msgstr "유효한 예시입니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:343 +#: ../../source/contributor-tutorial-contribute-on-github.rst:379 msgid "``feat(framework) Add flwr build CLI command``" -msgstr "" +msgstr "``feat(framework) Add flwr build CLI command``" -#: ../../source/contributor-tutorial-contribute-on-github.rst:344 +#: ../../source/contributor-tutorial-contribute-on-github.rst:380 msgid "``refactor(examples:skip) Improve quickstart-pytorch logging``" -msgstr "" +msgstr "``refactor(examples:skip) Improve quickstart-pytorch logging``" -#: ../../source/contributor-tutorial-contribute-on-github.rst:345 +#: ../../source/contributor-tutorial-contribute-on-github.rst:381 msgid "``ci(*:skip) Enforce PR title format``" -msgstr "" +msgstr "``ci(*:skip) Enforce PR title format``" -#: ../../source/contributor-tutorial-contribute-on-github.rst:347 +#: ../../source/contributor-tutorial-contribute-on-github.rst:383 msgid "Invalid examples:" -msgstr "" +msgstr "잘못된 예시입니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:349 +#: ../../source/contributor-tutorial-contribute-on-github.rst:385 msgid "``feat(framework): Add flwr build CLI command`` (extra ``:``)" -msgstr "" +msgstr "``feat(framework): Add flwr build CLI command`` ( ``:``제외)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:350 +#: ../../source/contributor-tutorial-contribute-on-github.rst:386 msgid "" "``feat(*) Add flwr build CLI command`` (missing ``skip`` flag along with " "``*``)" -msgstr "" +msgstr "``feat(*) Add flwr build CLI command`` (``skip`` flag와 함께 ``*``누락)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:351 +#: ../../source/contributor-tutorial-contribute-on-github.rst:387 msgid "``feat(skip) Add flwr build CLI command`` (missing ````)" -msgstr "" +msgstr "``feat(skip) Add flwr build CLI command`` (````누락)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:352 +#: ../../source/contributor-tutorial-contribute-on-github.rst:388 msgid "``feat(framework) add flwr build CLI command`` (non capitalised verb)" -msgstr "" +msgstr "``feat(framework) add flwr build CLI command`` (대문자로 표기되지 않은 동사)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:353 +#: ../../source/contributor-tutorial-contribute-on-github.rst:389 msgid "``feat(framework) Add flwr build CLI command.`` (dot at the end)" -msgstr "" +msgstr "``feat(framework) Add flwr build CLI command.`` (끝에 마침표)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:354 +#: ../../source/contributor-tutorial-contribute-on-github.rst:390 msgid "``Add flwr build CLI command.`` (missing ``()``)" -msgstr "" +msgstr "``Add flwr build CLI command.`` ( ``()``누락)" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:2 msgid "Get started as a contributor" -msgstr "" +msgstr "기여자로 시작하기" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:5 -#: ../../source/how-to-run-flower-using-docker.rst:132 +#: ../../source/docker/run-as-subprocess.rst:11 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:16 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:18 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:13 +#: ../../source/docker/tutorial-quickstart-docker.rst:11 msgid "Prerequisites" -msgstr "" +msgstr "전제 조건" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:7 -msgid "`Python 3.8 `_ or above" -msgstr "" +msgid "`Python 3.9 `_ or above" +msgstr "Python 3.9 `_ 이상" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:8 msgid "`Poetry 1.3 `_ or above" -msgstr "" +msgstr "`Poetry 1.3 `_ _ 이상" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:9 msgid "(Optional) `pyenv `_" -msgstr "" +msgstr "(선택 사항) `pyenv `_" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:10 msgid "(Optional) `pyenv-virtualenv `_" -msgstr "" +msgstr "(선택 사항) `pyenv-virtualenv `_" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:12 +#, fuzzy msgid "" -"Flower uses :code:`pyproject.toml` to manage dependencies and configure " +"Flower uses ``pyproject.toml`` to manage dependencies and configure " "development tools (the ones which support it). Poetry is a build tool " "which supports `PEP 517 `_." msgstr "" +"Flower는 dependencies을 관리하고 개발 도구(이를 지원하는 도구)를 구성하기 위해 " +":code:`pyproject.toml`을 사용합니다. Poetry는 `PEP 517 " +"`_을 지원하는 빌드 도구입니다." -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:18 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:17 msgid "Developer Machine Setup" -msgstr "" +msgstr "개발자 머신 설정" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:21 -msgid "Preliminarities" -msgstr "" +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:20 +#, fuzzy +msgid "Preliminaries" +msgstr "사전 준비" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:22 msgid "Some system-wide dependencies are needed." -msgstr "" +msgstr "일부 시스템 전체에 대한 의존성이 필요합니다." #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:25 msgid "For macOS" -msgstr "" +msgstr "macOS의 경우" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:27 msgid "" "Install `homebrew `_. Don't forget the post-" "installation actions to add `brew` to your PATH." -msgstr "" +msgstr "`homebrew `_를 설치합니다. 설치 후 `brew`를 PATH에 추가하는 작업을 잊지 마세요." -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:28 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:29 +#, fuzzy msgid "" "Install `xz` (to install different Python versions) and `pandoc` to build" -" the docs::" -msgstr "" +" the docs:" +msgstr "xz`(다른 Python 버전을 설치하려면)와 `pandoc`을 설치하여 문서를 빌드합니다::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:34 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:36 msgid "For Ubuntu" -msgstr "" +msgstr "Ubuntu의 경우" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:35 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:38 +#, fuzzy msgid "" "Ensure you system (Ubuntu 22.04+) is up-to-date, and you have all " -"necessary packages::" -msgstr "" +"necessary packages:" +msgstr "시스템(우분투 22.04 이상)이 최신 상태이고 필요한 패키지가 모두 설치되어 있는지 확인하세요:" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:44 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:47 msgid "Create Flower Dev Environment" -msgstr "" +msgstr "Flower 개발 환경 만들기" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:46 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:49 +#, fuzzy msgid "" -"1. Clone the `Flower repository `_ from " -"GitHub::" -msgstr "" +"Clone the `Flower repository `_ from " +"GitHub:" +msgstr "1. GitHub: 에서 ``Flower 레포지토리 `_를 복제합니다::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:52 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:56 +#, fuzzy msgid "" "Let's create the Python environment for all-things Flower. If you wish to" -" use :code:`pyenv`, we provide two convenience scripts that you can use. " -"If you prefer using something else than :code:`pyenv`, create a new " -"environment, activate and skip to the last point where all packages are " -"installed." +" use ``pyenv``, we provide two convenience scripts that you can use. If " +"you prefer using something else than ``pyenv``, create a new environment," +" activate and skip to the last point where all packages are installed." msgstr "" +"Flower의 모든 것을 위한 파이썬 환경을 만들어 보겠습니다.:code:`pyenv`를 사용하고자 하는 경우 사용할 수 있는 두 " +"가지 편의 스크립트를 제공합니다.:code:`pyenv`가 아닌 다른 것을 사용하려면 새 환경을 생성하고 활성화한 후 모든 패키지가" +" 설치된 마지막 지점으로 건너뛰세요." -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:54 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:61 +#, fuzzy msgid "" -"If you don't have :code:`pyenv` installed, the following script that will" -" install it, set it up, and create the virtual environment (with " -":code:`Python 3.8.17` by default)::" +"If you don't have ``pyenv`` installed, the following script that will " +"install it, set it up, and create the virtual environment (with " +":substitution-code:`Python |python_full_version|` by default):" msgstr "" +":code:`pyenv`가 설치되어 있지 않은 경우 다음 스크립트를 사용하여 설치, 설정 및 가상 환경을 생성합니다(기본적으로 " +":code:`Python 3.9.20` 사용):" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:58 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:69 +#, fuzzy msgid "" -"If you already have :code:`pyenv` installed (along with the :code:`pyenv-" -"virtualenv` plugin), you can use the following convenience script (with " -":code:`Python 3.8.17` by default)::" +"If you already have ``pyenv`` installed (along with the ``pyenv-" +"virtualenv`` plugin), you can use the following convenience script (with " +":substitution-code:`Python |python_full_version|` by default):" msgstr "" +":code:`pyenv`가 이미 설치되어 있는 경우( :code:`pyenv-virtualenv` 플러그인과 함께) 다음과 같은 " +"편의 스크립트를 사용할 수 있습니다(기본적으로 코드:`Python 3.9.20` 사용):" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:62 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:77 +#, fuzzy msgid "" -"3. Install the Flower package in development mode (think :code:`pip " -"install -e`) along with all necessary dependencies::" +"3. Install the Flower package in development mode (think ``pip install " +"-e``) along with all necessary dependencies:" msgstr "" +"3. 필요한 모든 dependencies와 함께 개발 모드에서 Flower 패키지를 설치합니다(예:code:`pip install " +"-e`)::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:69 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:85 msgid "Convenience Scripts" -msgstr "" +msgstr "편의 스크립트" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:71 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:87 +#, fuzzy msgid "" "The Flower repository contains a number of convenience scripts to make " -"recurring development tasks easier and less error-prone. See the " -":code:`/dev` subdirectory for a full list. The following scripts are " -"amongst the most important ones:" +"recurring development tasks easier and less error-prone. See the ``/dev``" +" subdirectory for a full list. The following scripts are amongst the most" +" important ones:" msgstr "" +"Flower 레포지토리에는 반복적인 개발 작업을 더 쉽고 오류를 줄이기 위한 여러 가지 편의 스크립트가 포함되어 있습니다. 전체 " +"목록은 :code:`/dev` 하위 디렉터리를 참조하세요. 다음 스크립트는 가장 중요한 스크립트 중 하나입니다:" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:77 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:92 msgid "Create/Delete Virtual Environment" -msgstr "" +msgstr "가상 환경 생성/삭제" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:85 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:101 msgid "Compile ProtoBuf Definitions" -msgstr "" +msgstr "ProtoBuf 정의 컴파일" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:92 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:108 msgid "Auto-Format Code" -msgstr "" +msgstr "자동 포맷 코드" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:99 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:115 msgid "Run Linters and Tests" -msgstr "" +msgstr "린터 및 테스트 실행" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:106 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:122 msgid "Add a pre-commit hook" -msgstr "" +msgstr "사전 커밋 훅 추가" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:108 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:124 msgid "" "Developers may integrate a pre-commit hook into their workflow utilizing " "the `pre-commit `_ library. The pre-" "commit hook is configured to execute two primary operations: " "``./dev/format.sh`` and ``./dev/test.sh`` scripts." msgstr "" +"개발자는 `pre-commit `_ 라이브러리를 사용하여 사전 커밋 훅을" +" 워크플로에 통합할 수 있습니다. 사전 커밋 훅은 두 가지 기본 작업을 실행하도록 구성됩니다:``./dev/format.sh`` 및" +" ``./dev/test.sh`` 스크립트." -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:110 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:128 msgid "There are multiple ways developers can use this:" -msgstr "" +msgstr "개발자가 이것을 사용할 수 있는 여러가지 방법이 있습니다:" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:112 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:130 msgid "Install the pre-commit hook to your local git directory by simply running:" -msgstr "" +msgstr "간단하게 실행하여 로컬 git 디렉터리에 사전 커밋 훅을 설치하세요:" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:118 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:136 msgid "" "Each ``git commit`` will trigger the execution of formatting and " "linting/test scripts." -msgstr "" +msgstr "각 ``git 커밋``은 포맷 및 린팅/테스트 스크립트의 실행을 트리거합니다." -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:119 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:138 +#, fuzzy msgid "" "If in a hurry, bypass the hook using ``--no-verify`` with the ``git " -"commit`` command. ::" -msgstr "" +"commit`` command." +msgstr "급한 경우 ``git commit`` 명령과 함께 `--no-verify``를 사용하여 훅을 넘기세요:" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:124 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:145 msgid "" "For developers who prefer not to install the hook permanently, it is " "possible to execute a one-time check prior to committing changes by using" " the following command:" -msgstr "" +msgstr "훅을 영구적으로 설치하지 않으려는 개발자의 경우 다음 명령을 사용하여 변경 사항을 커밋하기 전에 일회성 검사를 실행할 수 있습니다:" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:130 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:152 msgid "" "This executes the formatting and linting checks/tests on all the files " "without modifying the default behavior of ``git commit``." -msgstr "" +msgstr "이렇게 하면 ``git commit``의 기본 동작을 수정하지 않고 모든 파일에 대해 포맷 및 린팅 검사/테스트를 실행합니다." -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:133 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:156 msgid "Run Github Actions (CI) locally" -msgstr "" +msgstr "로컬에서 Github Action(CI) 실행하기" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:135 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:158 +#, fuzzy msgid "" "Developers could run the full set of Github Actions workflows under their" " local environment by using `Act `_. " "Please refer to the installation instructions under the linked repository" -" and run the next command under Flower main cloned repository folder::" +" and run the next command under Flower main cloned repository folder:" msgstr "" +"개발자는 `Act `_를 사용하여 로컬 환경에서 전체 Github " +"Actions 워크플로우 세트를 실행할 수 있습니다. 링크된 레포지토리 아래의 설치 지침을 참조하여 Flower 메인 클론 " +"레포지토리 폴더 아래에서 다음 명령을 실행하세요::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:142 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:167 msgid "" "The Flower default workflow would run by setting up the required Docker " "machines underneath." -msgstr "" +msgstr "Flower 기본 워크플로우는 아래에 필요한 Docker 머신을 설정하여 실행합니다." -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:147 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:171 msgid "Build Release" -msgstr "" +msgstr "릴리즈 빌드" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:149 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:173 +#, fuzzy msgid "" "Flower uses Poetry to build releases. The necessary command is wrapped in" -" a simple script::" -msgstr "" +" a simple script:" +msgstr "Flower는 Poetry를 사용하여 릴리즈를 빌드합니다. 필요한 명령은 간단한 스크립트로 래핑됩니다::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:154 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:180 +#, fuzzy msgid "" -"The resulting :code:`.whl` and :code:`.tar.gz` releases will be stored in" -" the :code:`/dist` subdirectory." -msgstr "" +"The resulting ``.whl`` and ``.tar.gz`` releases will be stored in the " +"``/dist`` subdirectory." +msgstr "결과물인 :code:`.whl` 및 :code:`.tar.gz` 릴리즈는 :code:`/dist` 하위 디렉터리에 저장됩니다." -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:159 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:184 msgid "Build Documentation" -msgstr "" +msgstr "문서 빌드" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:161 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:186 +#, fuzzy msgid "" "Flower's documentation uses `Sphinx `_. " "There's no convenience script to re-build the documentation yet, but it's" -" pretty easy::" +" pretty easy:" msgstr "" +"Flower의 문서는 `Sphinx `_를 사용합니다. 아직 문서를 다시 작성할" +" 수 있는 편리한 스크립트는 없지만 다음과 같이 쉽게 작성할 수 있습니다:" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:167 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:194 msgid "This will generate HTML documentation in ``doc/build/html``." -msgstr "" +msgstr "그러면 ``doc/build/html``에 HTML 문서가 생성됩니다." -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:2 -msgid "Example: FedBN in PyTorch - From Centralized To Federated" -msgstr "" +#: ../../source/docker/enable-tls.rst:2 +#, fuzzy +msgid "Enable TLS for Secure Connections" +msgstr "보안 연결을 위한 SSL 사용 설정" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:4 +#: ../../source/docker/enable-tls.rst:4 msgid "" -"This tutorial will show you how to use Flower to build a federated " -"version of an existing machine learning workload with `FedBN " -"`_, a federated training strategy " -"designed for non-iid data. We are using PyTorch to train a Convolutional " -"Neural Network(with Batch Normalization layers) on the CIFAR-10 dataset. " -"When applying FedBN, only few changes needed compared to :doc:`Example: " -"PyTorch - From Centralized To Federated `." +"When operating in a production environment, it is strongly recommended to" +" enable Transport Layer Security (TLS) for each Flower Component to " +"ensure secure communication." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:9 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:10 -msgid "Centralized Training" -msgstr "" +#: ../../source/docker/enable-tls.rst:7 +#, fuzzy +msgid "" +"To enable TLS, you will need a PEM-encoded root certificate, a PEM-" +"encoded private key and a PEM-encoded certificate chain." +msgstr "SSL을 사용하려면 PEM으로 인코딩된 루트 인증서, PEM으로 인코딩된 개인 키 및 PEM으로 인코딩된 인증서 체인이 필요합니다." -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:10 +#: ../../source/docker/enable-tls.rst:12 msgid "" -"All files are revised based on :doc:`Example: PyTorch - From Centralized " -"To Federated `. The only " -"thing to do is modifying the file called :code:`cifar.py`, revised part " -"is shown below:" +"For testing purposes, you can generate your own self-signed certificates." +" The `Enable SSL connections `__ page contains a section that" +" will guide you through the process." msgstr "" +"테스트 목적으로 자체 서명된 인증서를 생성할 수 있습니다. 'SSL 연결 사용 " +"`__ 페이지에 프로세스를 안내하는 섹션이 있습니다." -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:13 +#: ../../source/docker/enable-tls.rst:17 +#, fuzzy msgid "" -"The model architecture defined in class Net() is added with Batch " -"Normalization layers accordingly." +"Because Flower containers, by default, run with a non-root user ``app``, " +"the mounted files and directories must have the proper permissions for " +"the user ID ``49999``." msgstr "" +"기본적으로 Flower 컨테이너는 루트가 아닌 사용자 ``app``로 실행되므로 마운트된 파일과 디렉터리에 사용자 ID " +"``49999``에 대한 적절한 권한이 있어야 합니다. 예를 들어, ``certificates/`` 디렉터리에 있는 모든 파일의 " +"사용자 ID를 변경하려면 ``sudo chown -R 49999:49999 certificates/*``를 실행하면 됩니다." -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:41 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:157 -msgid "You can now run your machine learning workload:" +#: ../../source/docker/enable-tls.rst:20 +#, fuzzy +msgid "" +"For example, to change the user ID of all files in the ``certificates/`` " +"directory, you can run ``sudo chown -R 49999:49999 certificates/*``." msgstr "" +"기본적으로 Flower 컨테이너는 루트가 아닌 사용자 ``app``로 실행되므로 마운트된 파일과 디렉터리에 사용자 ID " +"``49999``에 대한 적절한 권한이 있어야 합니다. 예를 들어, ``certificates/`` 디렉터리에 있는 모든 파일의 " +"사용자 ID를 변경하려면 ``sudo chown -R 49999:49999 certificates/*``를 실행하면 됩니다." -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:47 +#: ../../source/docker/enable-tls.rst:23 +#: ../../source/docker/persist-superlink-state.rst:15 msgid "" -"So far this should all look fairly familiar if you've used PyTorch " -"before. Let's take the next step and use what we've built to create a " -"federated learning system within FedBN, the system consists of one server" -" and two clients." +"If you later want to delete the directory, you can change the user ID " +"back to the current user ID by running ``sudo chown -R $USER:$(id -gn) " +"state``." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:51 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:167 -msgid "Federated Training" -msgstr "" +#: ../../source/docker/enable-tls.rst:27 +#, fuzzy +msgid "SuperLink" +msgstr "flower 초연결" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:53 +#: ../../source/docker/enable-tls.rst:29 msgid "" -"If you have read :doc:`Example: PyTorch - From Centralized To Federated " -"`, the following parts are" -" easy to follow, only :code:`get_parameters` and :code:`set_parameters` " -"function in :code:`client.py` needed to revise. If not, please read the " -":doc:`Example: PyTorch - From Centralized To Federated `. first." +"Assuming all files we need are in the local ``certificates`` directory, " +"we can use the flag ``--volume`` to mount the local directory into the " +"``/app/certificates/`` directory of the container:" msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:56 -msgid "" -"Our example consists of one *server* and two *clients*. In FedBN, " -":code:`server.py` keeps unchanged, we can start the server directly." +#: ../../source/docker/enable-tls.rst +msgid "Understanding the command" msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:62 -msgid "" -"Finally, we will revise our *client* logic by changing " -":code:`get_parameters` and :code:`set_parameters` in :code:`client.py`, " -"we will exclude batch normalization parameters from model parameter list " -"when sending to or receiving from the server." +#: ../../source/docker/enable-tls.rst:45 ../../source/docker/enable-tls.rst:92 +#: ../../source/docker/enable-tls.rst:125 +#: ../../source/docker/tutorial-quickstart-docker.rst:66 +#: ../../source/docker/tutorial-quickstart-docker.rst:103 +#: ../../source/docker/tutorial-quickstart-docker.rst:217 +#: ../../source/docker/tutorial-quickstart-docker.rst:305 +#, fuzzy +msgid "``docker run``: This tells Docker to run a container from an image." +msgstr "``docker run``: 새 Docker 컨테이너를 실행하는 명령입니다." + +#: ../../source/docker/enable-tls.rst:46 ../../source/docker/enable-tls.rst:93 +#: ../../source/docker/enable-tls.rst:126 +#: ../../source/docker/tutorial-quickstart-docker.rst:67 +#: ../../source/docker/tutorial-quickstart-docker.rst:104 +#: ../../source/docker/tutorial-quickstart-docker.rst:218 +#: ../../source/docker/tutorial-quickstart-docker.rst:306 +msgid "``--rm``: Remove the container once it is stopped or the command exits." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:85 -msgid "Now, you can now open two additional terminal windows and run" +#: ../../source/docker/enable-tls.rst +msgid "" +"``--volume ./certificates/:/app/certificates/:ro``: Mount the " +"``certificates`` directory in" msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:91 +#: ../../source/docker/enable-tls.rst msgid "" -"in each window (make sure that the server is still running before you do " -"so) and see your (previously centralized) PyTorch project run federated " -"learning with FedBN strategy across two clients. Congratulations!" +"the current working directory of the host machine as a read-only volume " +"at the" msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:94 -#: ../../source/example-jax-from-centralized-to-federated.rst:277 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:310 -#: ../../source/tutorial-quickstart-jax.rst:283 -msgid "Next Steps" +#: ../../source/docker/enable-tls.rst +msgid "``/app/certificates`` directory inside the container." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:96 +#: ../../source/docker/enable-tls.rst msgid "" -"The full source code for this example can be found `here " -"`_. Our example is of course somewhat over-" -"simplified because both clients load the exact same dataset, which isn't " -"realistic. You're now prepared to explore this topic further. How about " -"using different subsets of CIFAR-10 on each client? How about adding more" -" clients?" +"This allows the container to access the TLS certificates that are stored " +"in the certificates" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:2 -msgid "Example: JAX - Run JAX Federated" +#: ../../source/docker/enable-tls.rst +msgid "directory." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:4 -#: ../../source/tutorial-quickstart-jax.rst:10 +#: ../../source/docker/enable-tls.rst +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"This tutorial will show you how to use Flower to build a federated " -"version of an existing JAX workload. We are using JAX to train a linear " -"regression model on a scikit-learn dataset. We will structure the example" -" similar to our `PyTorch - From Centralized To Federated " -"`_ walkthrough. First, we build a centralized " -"training approach based on the `Linear Regression with JAX " -"`_" -" tutorial`. Then, we build upon the centralized training code to run the " -"training in a federated fashion." +":substitution-code:`flwr/superlink:|stable_flwr_version|`: The name of " +"the image to be run and the specific" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:10 -#: ../../source/tutorial-quickstart-jax.rst:16 +#: ../../source/docker/enable-tls.rst msgid "" -"Before we start building our JAX example, we need install the packages " -":code:`jax`, :code:`jaxlib`, :code:`scikit-learn`, and :code:`flwr`:" -msgstr "" - -#: ../../source/example-jax-from-centralized-to-federated.rst:18 -#: ../../source/tutorial-quickstart-jax.rst:24 -msgid "Linear Regression with JAX" +"tag of the image. The tag :substitution-code:`|stable_flwr_version|` " +"represents a specific version of the image." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:20 -#: ../../source/tutorial-quickstart-jax.rst:26 +#: ../../source/docker/enable-tls.rst msgid "" -"We begin with a brief description of the centralized training code based " -"on a :code:`Linear Regression` model. If you want a more in-depth " -"explanation of what's going on then have a look at the official `JAX " -"documentation `_." +"``--ssl-ca-certfile certificates/ca.crt``: Specify the location of the CA" +" certificate file" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:23 -#: ../../source/tutorial-quickstart-jax.rst:29 +#: ../../source/docker/enable-tls.rst +#, fuzzy +msgid "inside the container." +msgstr "VSCode Dev Container에서 개발" + +#: ../../source/docker/enable-tls.rst msgid "" -"Let's create a new file called :code:`jax_training.py` with all the " -"components required for a traditional (centralized) linear regression " -"training. First, the JAX packages :code:`jax` and :code:`jaxlib` need to " -"be imported. In addition, we need to import :code:`sklearn` since we use " -":code:`make_regression` for the dataset and :code:`train_test_split` to " -"split the dataset into a training and test set. You can see that we do " -"not yet import the :code:`flwr` package for federated learning. This will" -" be done later." +"The ``certificates/ca.crt`` file is a certificate that is used to verify " +"the identity of the" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:37 -#: ../../source/tutorial-quickstart-jax.rst:43 +#: ../../source/docker/enable-tls.rst +#, fuzzy +msgid "SuperLink." +msgstr "flower 초연결" + +#: ../../source/docker/enable-tls.rst msgid "" -"The :code:`load_data()` function loads the mentioned training and test " -"sets." +"``--ssl-certfile certificates/server.pem``: Specify the location of the " +"SuperLink's" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:47 -#: ../../source/tutorial-quickstart-jax.rst:53 -msgid "" -"The model architecture (a very simple :code:`Linear Regression` model) is" -" defined in :code:`load_model()`." +#: ../../source/docker/enable-tls.rst +msgid "TLS certificate file inside the container." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:59 -#: ../../source/tutorial-quickstart-jax.rst:65 +#: ../../source/docker/enable-tls.rst msgid "" -"We now need to define the training (function :code:`train()`), which " -"loops over the training set and measures the loss (function " -":code:`loss_fn()`) for each batch of training examples. The loss function" -" is separate since JAX takes derivatives with a :code:`grad()` function " -"(defined in the :code:`main()` function and called in :code:`train()`)." +"The ``certificates/server.pem`` file is used to identify the SuperLink " +"and to encrypt the" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:77 -#: ../../source/tutorial-quickstart-jax.rst:83 -msgid "" -"The evaluation of the model is defined in the function " -":code:`evaluation()`. The function takes all test examples and measures " -"the loss of the linear regression model." +#: ../../source/docker/enable-tls.rst +msgid "data that is transmitted over the network." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:88 -#: ../../source/tutorial-quickstart-jax.rst:94 +#: ../../source/docker/enable-tls.rst msgid "" -"Having defined the data loading, model architecture, training, and " -"evaluation we can put everything together and train our model using JAX. " -"As already mentioned, the :code:`jax.grad()` function is defined in " -":code:`main()` and passed to :code:`train()`." +"``--ssl-keyfile certificates/server.key``: Specify the location of the " +"SuperLink's" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:105 -#: ../../source/tutorial-quickstart-jax.rst:111 -msgid "You can now run your (centralized) JAX linear regression workload:" +#: ../../source/docker/enable-tls.rst +msgid "TLS private key file inside the container." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:111 -#: ../../source/tutorial-quickstart-jax.rst:117 +#: ../../source/docker/enable-tls.rst msgid "" -"So far this should all look fairly familiar if you've used JAX before. " -"Let's take the next step and use what we've built to create a simple " -"federated learning system consisting of one server and two clients." +"The ``certificates/server.key`` file is used to decrypt the data that is " +"transmitted over" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:115 -#: ../../source/tutorial-quickstart-jax.rst:121 -msgid "JAX meets Flower" +#: ../../source/docker/enable-tls.rst +msgid "the network." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:117 -#: ../../source/tutorial-quickstart-jax.rst:123 +#: ../../source/docker/enable-tls.rst:72 +#, fuzzy +msgid "SuperNode" +msgstr "run\\_supernode" + +#: ../../source/docker/enable-tls.rst:74 +#, fuzzy msgid "" -"The concept of federating an existing workload is always the same and " -"easy to understand. We have to start a *server* and then use the code in " -":code:`jax_training.py` for the *clients* that are connected to the " -"*server*. The *server* sends model parameters to the clients. The " -"*clients* run the training and update the parameters. The updated " -"parameters are sent back to the *server*, which averages all received " -"parameter updates. This describes one round of the federated learning " -"process, and we repeat this for multiple rounds." +"Assuming that the ``ca.crt`` certificate already exists locally, we can " +"use the flag ``--volume`` to mount the local certificate into the " +"container's ``/app/`` directory." msgstr "" +"인증서가 이미 로컬에 존재한다고 가정하면, ``--volume`` 플래그를 사용하여 로컬 인증서를 컨테이너의 ``/app/`` " +"디렉터리에 마운트할 수 있습니다. 이렇게 하면 SuperNode가 컨테이너 내의 인증서에 액세스할 수 있습니다. 컨테이너를 시작할 " +"때 ``--root-certificates`` 플래그를 사용하세요." -#: ../../source/example-jax-from-centralized-to-federated.rst:123 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:181 -#: ../../source/tutorial-quickstart-jax.rst:129 +#: ../../source/docker/enable-tls.rst:79 msgid "" -"Our example consists of one *server* and two *clients*. Let's set up " -":code:`server.py` first. The *server* needs to import the Flower package " -":code:`flwr`. Next, we use the :code:`start_server` function to start a " -"server and tell it to perform three rounds of federated learning." +"If you're generating self-signed certificates and the ``ca.crt`` " +"certificate doesn't exist on the SuperNode, you can copy it over after " +"the generation step." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:133 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:191 -#: ../../source/tutorial-quickstart-jax.rst:139 -msgid "We can already start the *server*:" +#: ../../source/docker/enable-tls.rst +msgid "``--volume ./ca.crt:/app/ca.crt/:ro``: Mount the ``ca.crt`` file from the" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:139 -#: ../../source/tutorial-quickstart-jax.rst:145 +#: ../../source/docker/enable-tls.rst msgid "" -"Finally, we will define our *client* logic in :code:`client.py` and build" -" upon the previously defined JAX training in :code:`jax_training.py`. Our" -" *client* needs to import :code:`flwr`, but also :code:`jax` and " -":code:`jaxlib` to update the parameters on our JAX model:" +"current working directory of the host machine as a read-only volume at " +"the ``/app/ca.crt``" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:154 -#: ../../source/tutorial-quickstart-jax.rst:160 +#: ../../source/docker/enable-tls.rst +#, fuzzy +msgid "directory inside the container." +msgstr "VSCode Dev Container에서 개발" + +#: ../../source/docker/enable-tls.rst msgid "" -"Implementing a Flower *client* basically means implementing a subclass of" -" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " -"Our implementation will be based on :code:`flwr.client.NumPyClient` and " -"we'll call it :code:`FlowerClient`. :code:`NumPyClient` is slightly " -"easier to implement than :code:`Client` if you use a framework with good " -"NumPy interoperability (like JAX) because it avoids some of the " -"boilerplate that would otherwise be necessary. :code:`FlowerClient` needs" -" to implement four methods, two methods for getting/setting model " -"parameters, one method for training the model, and one method for testing" -" the model:" -msgstr "" - -#: ../../source/example-jax-from-centralized-to-federated.rst:161 -#: ../../source/tutorial-quickstart-jax.rst:167 -msgid ":code:`set_parameters (optional)`" +":substitution-code:`flwr/supernode:|stable_flwr_version|`: The name of " +"the image to be run and the specific" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:160 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:219 -#: ../../source/tutorial-quickstart-jax.rst:166 +#: ../../source/docker/enable-tls.rst msgid "" -"set the model parameters on the local model that are received from the " -"server" +"``--root-certificates ca.crt``: This specifies the location of the CA " +"certificate file" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:161 -#: ../../source/tutorial-quickstart-jax.rst:167 -msgid "transform parameters to NumPy :code:`ndarray`'s" +#: ../../source/docker/enable-tls.rst +msgid "The ``ca.crt`` file is used to verify the identity of the SuperLink." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:162 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:220 -#: ../../source/tutorial-quickstart-jax.rst:168 -msgid "" -"loop over the list of model parameters received as NumPy " -":code:`ndarray`'s (think list of neural network layers)" +#: ../../source/docker/enable-tls.rst:105 +msgid "SuperExec" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:163 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:221 -#: ../../source/tutorial-quickstart-jax.rst:169 -#: ../../source/tutorial-quickstart-pytorch.rst:155 -#: ../../source/tutorial-quickstart-scikitlearn.rst:118 -msgid ":code:`get_parameters`" +#: ../../source/docker/enable-tls.rst:107 +msgid "" +"Assuming all files we need are in the local ``certificates`` directory " +"where the SuperExec will be executed from, we can use the flag " +"``--volume`` to mount the local directory into the ``/app/certificates/``" +" directory of the container:" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:164 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:222 -#: ../../source/tutorial-quickstart-jax.rst:170 +#: ../../source/docker/enable-tls.rst msgid "" -"get the model parameters and return them as a list of NumPy " -":code:`ndarray`'s (which is what :code:`flwr.client.NumPyClient` expects)" +":substitution-code:`flwr/superexec:|stable_flwr_version|`: The name of " +"the image to be run and the specific" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:167 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:225 -#: ../../source/tutorial-quickstart-jax.rst:173 -#: ../../source/tutorial-quickstart-pytorch.rst:161 -#: ../../source/tutorial-quickstart-scikitlearn.rst:125 -msgid ":code:`fit`" +#: ../../source/docker/enable-tls.rst +msgid "SuperExec." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:166 -#: ../../source/example-jax-from-centralized-to-federated.rst:170 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:224 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:228 -#: ../../source/tutorial-quickstart-jax.rst:172 -#: ../../source/tutorial-quickstart-jax.rst:176 +#: ../../source/docker/enable-tls.rst msgid "" -"update the parameters of the local model with the parameters received " -"from the server" +"``--ssl-certfile certificates/server.pem``: Specify the location of the " +"SuperExec's" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:167 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:225 -#: ../../source/tutorial-quickstart-jax.rst:173 -msgid "train the model on the local training set" +#: ../../source/docker/enable-tls.rst +msgid "" +"The ``certificates/server.pem`` file is used to identify the SuperExec " +"and to encrypt the" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:168 -#: ../../source/tutorial-quickstart-jax.rst:174 -msgid "get the updated local model parameters and return them to the server" +#: ../../source/docker/enable-tls.rst +msgid "" +"``--ssl-keyfile certificates/server.key``: Specify the location of the " +"SuperExec's" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:172 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:230 -#: ../../source/tutorial-quickstart-jax.rst:178 -#: ../../source/tutorial-quickstart-pytorch.rst:164 -#: ../../source/tutorial-quickstart-scikitlearn.rst:128 -msgid ":code:`evaluate`" +#: ../../source/docker/enable-tls.rst +msgid "" +"``--executor-config root-" +"certificates=\\\"certificates/superlink_ca.crt\\\"``: Specify the" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:171 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:229 -#: ../../source/tutorial-quickstart-jax.rst:177 -msgid "evaluate the updated model on the local test set" +#: ../../source/docker/enable-tls.rst +msgid "" +"location of the CA certificate file inside the container that the " +"SuperExec executor" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:172 -#: ../../source/tutorial-quickstart-jax.rst:178 -msgid "return the local loss to the server" +#: ../../source/docker/enable-tls.rst +msgid "should use to verify the SuperLink's identity." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:174 -#: ../../source/tutorial-quickstart-jax.rst:180 +#: ../../source/docker/index.rst:2 +msgid "Run Flower using Docker" +msgstr "Docker를 사용하여 Flower 실행" + +#: ../../source/docker/index.rst:4 msgid "" -"The challenging part is to transform the JAX model parameters from " -":code:`DeviceArray` to :code:`NumPy ndarray` to make them compatible with" -" `NumPyClient`." +"Start your Flower journey with our pre-made Docker images on Docker Hub, " +"supporting ``amd64`` and ``arm64v8`` architectures." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:176 -#: ../../source/tutorial-quickstart-jax.rst:182 +#: ../../source/docker/index.rst:7 msgid "" -"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make" -" use of the functions :code:`train()` and :code:`evaluate()` previously " -"defined in :code:`jax_training.py`. So what we really do here is we tell " -"Flower through our :code:`NumPyClient` subclass which of our already " -"defined functions to call for training and evaluation. We included type " -"annotations to give you a better understanding of the data types that get" -" passed around." +"Our Quickstart guide walks you through containerizing a Flower project " +"and running it end to end using Docker." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:245 -#: ../../source/tutorial-quickstart-jax.rst:251 -msgid "Having defined the federation process, we can run it." +#: ../../source/docker/index.rst:11 +#, fuzzy +msgid "Getting Started" +msgstr "시작하기" + +#: ../../source/docker/index.rst:19 +msgid "Running in Production" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:268 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:301 -#: ../../source/tutorial-quickstart-jax.rst:274 -msgid "And that's it. You can now open two additional terminal windows and run" +#: ../../source/docker/index.rst:28 +#, fuzzy +msgid "Advanced Options" +msgstr "고급 Docker 옵션" + +#: ../../source/docker/index.rst:40 +#, fuzzy +msgid "Run Flower using Docker Compose" +msgstr "Docker를 사용하여 Flower 실행" + +#: ../../source/docker/persist-superlink-state.rst:2 +msgid "Persist the State of the SuperLink" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:274 -#: ../../source/tutorial-quickstart-jax.rst:280 +#: ../../source/docker/persist-superlink-state.rst:4 +#, fuzzy msgid "" -"in each window (make sure that the server is still running before you do " -"so) and see your JAX project run federated learning across two clients. " -"Congratulations!" +"By default, the Flower SuperLink keeps its state in-memory. When using " +"the Docker flag ``--rm``, the state is not persisted between container " +"starts." msgstr "" +"기본적으로 Flower SuperLink는 상태를 in-memory에 유지합니다. Docker 플래그 `--rm``을 사용하는 경우" +" 컨테이너 시작 사이에 상태가 유지되지 않습니다. 아래에서 호스트 시스템의 파일에 상태를 저장하는 방법을 보여드리겠습니다." -#: ../../source/example-jax-from-centralized-to-federated.rst:279 -#: ../../source/tutorial-quickstart-jax.rst:285 +#: ../../source/docker/persist-superlink-state.rst:7 msgid "" -"The source code of this example was improved over time and can be found " -"here: `Quickstart JAX `_. Our example is somewhat over-simplified because both " -"clients load the same dataset." +"If you want to persist the state of the SuperLink on your host system, " +"all you need to do is specify a directory where you want to save the file" +" on your host system and a name for the database file." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:282 -#: ../../source/tutorial-quickstart-jax.rst:288 +#: ../../source/docker/persist-superlink-state.rst:11 msgid "" -"You're now prepared to explore this topic further. How about using a more" -" sophisticated model or using a different dataset? How about adding more " -"clients?" +"By default, the SuperLink container runs with a non-root user called " +"``app`` with the user ID ``49999``. It is recommended to create a new " +"directory and change the user ID of the directory to ``49999`` to ensure " +"the mounted directory has the proper permissions." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:2 -msgid "Example: PyTorch - From Centralized To Federated" +#: ../../source/docker/persist-superlink-state.rst:21 +#, fuzzy +msgid "" +"In the example below, we create a new directory called ``state``, change " +"the user ID and tell Docker via the flag ``--volume`` to mount the local " +"``state`` directory into the ``/app/state`` directory of the container. " +"Lastly, we use the flag ``--database`` to specify the name of the " +"database file." msgstr "" +"아래 예에서는 새 디렉터리를 생성하고, 사용자 ID를 변경하고, 플래그 ``--volume``을 통해 Docker에게 로컬 " +"``state`` 디렉터리를 컨테이너의 ``/app/state`` 디렉터리에 마운트하도록 지시합니다. 또한 " +"``--database`` 플래그를 사용하여 데이터베이스 파일의 이름을 지정합니다." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:4 +#: ../../source/docker/persist-superlink-state.rst:36 +#, fuzzy msgid "" -"This tutorial will show you how to use Flower to build a federated " -"version of an existing machine learning workload. We are using PyTorch to" -" train a Convolutional Neural Network on the CIFAR-10 dataset. First, we " -"introduce this machine learning task with a centralized training approach" -" based on the `Deep Learning with PyTorch " -"`_ " -"tutorial. Then, we build upon the centralized training code to run the " -"training in a federated fashion." +"As soon as the SuperLink starts, the file ``state.db`` is created in the " +"``state`` directory on your host system. If the file already exists, the " +"SuperLink tries to restore the state from the file. To start the " +"SuperLink with an empty database, ensure that there is no database called" +" ``state.db`` in the ``state`` directory (``rm state.db``) before you " +"execute the ``docker run`` command above." msgstr "" +"SuperLink가 시작되자마자 호스트 시스템의 ``state`` 디렉터리에 ``state.db`` 파일이 생성됩니다. 파일이 이미" +" 존재하는 경우 SuperLink는 파일에서 상태를 복원하려고 시도합니다. 빈 데이터베이스로 SuperLink를 시작하려면 " +"``state.db`` 파일을 제거하면 됩니다." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:12 +#: ../../source/docker/pin-version.rst:2 +#, fuzzy +msgid "Pin a Docker Image to a Specific Version" +msgstr "특정 버전에 Docker 이미지 고정하기" + +#: ../../source/docker/pin-version.rst:4 +#, fuzzy msgid "" -"We begin with a brief description of the centralized CNN training code. " -"If you want a more in-depth explanation of what's going on then have a " -"look at the official `PyTorch tutorial " -"`_." +"It may happen that we update the images behind the tags. Such updates " +"usually include security updates of system dependencies that should not " +"change the functionality of Flower. However, if you want to ensure that " +"you use a fixed version of the Docker image in your deployments, you can " +"`specify the digest " +"`_ of the image instead of the tag." msgstr "" +"태그 뒤에 있는 이미지가 업데이트될 수 있습니다. 이러한 업데이트에는 일반적으로 Flower의 기능을 변경해서는 안 되는 시스템 " +"의존성에 대한 보안 업데이트가 포함됩니다. 그러나 항상 동일한 이미지를 사용하려면 태그 대신 이미지의 해시를 지정할 수 있습니다." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:15 +#: ../../source/docker/pin-version.rst:14 +#, fuzzy msgid "" -"Let's create a new file called :code:`cifar.py` with all the components " -"required for a traditional (centralized) training on CIFAR-10. First, all" -" required packages (such as :code:`torch` and :code:`torchvision`) need " -"to be imported. You can see that we do not import any package for " -"federated learning. You can keep all these imports as they are even when " -"we add the federated learning components at a later point." +"The following command returns the current image digest referenced by the " +":substitution-code:`superlink:|stable_flwr_version|` tag:" +msgstr "다음 명령은 ``superlink:1.8.0`` 태그가 참조하는 현재 이미지 해시를 반환합니다:" + +#: ../../source/docker/pin-version.rst:23 +msgid "This will output" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:32 +#: ../../source/docker/pin-version.rst:30 +#, fuzzy +msgid "Next, we can pin the digest when running a new SuperLink container:" +msgstr "다음으로, 새 SuperLink 컨테이너를 실행할 때 해시를 고정할 수 있습니다:" + +#: ../../source/docker/run-as-root-user.rst:2 +#, fuzzy +msgid "Run with Root User Privileges" +msgstr "루트 사용자 권한으로 실행" + +#: ../../source/docker/run-as-root-user.rst:4 +#, fuzzy msgid "" -"As already mentioned we will use the CIFAR-10 dataset for this machine " -"learning workload. The model architecture (a very simple Convolutional " -"Neural Network) is defined in :code:`class Net()`." +"Flower Docker images, by default, run with a non-root user " +"(username/groupname: ``app``, UID/GID: ``49999``). Using root user is " +"**not recommended** unless it is necessary for specific tasks during the " +"build process." msgstr "" +"기본적으로 Flower Docker 이미지는 루트 사용자가 아닌 사용자(사용자명/그룹명:``app``, UID/GID: " +"``49999``)로 실행됩니다. 빌드 프로세스 중 특정 작업에 필요한 경우가 아니라면 루트 사용자를 사용하지 않는 것이 좋습니다." +" 보안 모범 사례를 유지하려면 항상 프로덕션 환경에서 루트 사용자가 아닌 사용자로 컨테이너를 실행해야 합니다." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:56 +#: ../../source/docker/run-as-root-user.rst:8 msgid "" -"The :code:`load_data()` function loads the CIFAR-10 training and test " -"sets. The :code:`transform` normalized the data after loading." +"Always make sure to run the container as a non-root user in production to" +" maintain security best practices." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:74 +#: ../../source/docker/run-as-root-user.rst:12 +#, fuzzy +msgid "Run a Container with Root User Privileges" +msgstr "**루트 사용자 권한으로 컨테이너 실행하기**" + +#: ../../source/docker/run-as-root-user.rst:14 msgid "" -"We now need to define the training (function :code:`train()`) which loops" -" over the training set, measures the loss, backpropagates it, and then " -"takes one optimizer step for each batch of training examples." -msgstr "" +"Run the Docker image with the ``-u`` flag and specify ``root`` as the " +"username:" +msgstr "``-u`` 플래그를 사용하여 Docker 이미지를 실행하고 사용자 이름으로 ``root``를 지정합니다:" + +#: ../../source/docker/run-as-root-user.rst:21 +msgid "This command will run the Docker container with root user privileges." +msgstr "이 명령은 루트 사용자 권한으로 Docker 컨테이너를 실행합니다." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:76 +#: ../../source/docker/run-as-root-user.rst:24 +#, fuzzy +msgid "Run the Build Process with Root User Privileges" +msgstr "**루트 사용자 권한으로 빌드 프로세스를 실행합니다**" + +#: ../../source/docker/run-as-root-user.rst:26 msgid "" -"The evaluation of the model is defined in the function :code:`test()`. " -"The function loops over all test samples and measures the loss of the " -"model based on the test dataset." +"If you want to switch to the root user during the build process of the " +"Docker image to install missing system dependencies, you can use the " +"``USER root`` directive within your Dockerfile." msgstr "" +"Docker 이미지 빌드 과정에서 루트 사용자로 전환하여 누락된 시스템 의존성을 설치하려면 Dockerfile 내에서 ``USER " +"root`` 지시어를 사용할 수 있습니다." + +#: ../../source/docker/run-as-root-user.rst:30 +#, fuzzy +msgid "SuperNode Dockerfile" +msgstr "SuperNode Dockerfile 만들기" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:136 +#: ../../source/docker/run-as-subprocess.rst:2 +#, fuzzy +msgid "Run ClientApp as a Subprocess" +msgstr "린터 및 테스트 실행" + +#: ../../source/docker/run-as-subprocess.rst:4 msgid "" -"Having defined the data loading, model architecture, training, and " -"evaluation we can put everything together and train our CNN on CIFAR-10." +"In this mode, the ClientApp is executed as a subprocess within the " +"SuperNode Docker container, rather than running in a separate container. " +"This approach reduces the number of running containers, which can be " +"beneficial for environments with limited resources. However, it also " +"means that the ClientApp is no longer isolated from the SuperNode, which " +"may introduce additional security concerns." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:163 +#: ../../source/docker/run-as-subprocess.rst:13 msgid "" -"So far, this should all look fairly familiar if you've used PyTorch " -"before. Let's take the next step and use what we've built to create a " -"simple federated learning system consisting of one server and two " -"clients." +"Before running the ClientApp as a subprocess, ensure that the FAB " +"dependencies have been installed in the SuperNode images. This can be " +"done by extending the SuperNode image:" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:169 +#: ../../source/docker/run-as-subprocess.rst:17 +#, fuzzy +msgid "Dockerfile.supernode" +msgstr "Flower SuperNode" + +#: ../../source/docker/run-as-subprocess.rst:31 +#, fuzzy msgid "" -"The simple machine learning project discussed in the previous section " -"trains the model on a single dataset (CIFAR-10), we call this centralized" -" learning. This concept of centralized learning, as shown in the previous" -" section, is probably known to most of you, and many of you have used it " -"previously. Normally, if you'd want to run machine learning workloads in " -"a federated fashion, then you'd have to change most of your code and set " -"everything up from scratch. This can be a considerable effort." +"Next, build the SuperNode Docker image by running the following command " +"in the directory where Dockerfile is located:" msgstr "" +"다음으로, Dockerfile 및 ClientApp 코드가 있는 디렉터리에서 다음 명령을 실행하여 SuperNode Docker " +"이미지를 빌드합니다." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:173 -msgid "" -"However, with Flower you can evolve your pre-existing code into a " -"federated learning setup without the need for a major rewrite." +#: ../../source/docker/run-as-subprocess.rst:39 +msgid "Run the ClientApp as a Subprocess" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:175 +#: ../../source/docker/run-as-subprocess.rst:41 msgid "" -"The concept is easy to understand. We have to start a *server* and then " -"use the code in :code:`cifar.py` for the *clients* that are connected to " -"the *server*. The *server* sends model parameters to the clients. The " -"*clients* run the training and update the parameters. The updated " -"parameters are sent back to the *server* which averages all received " -"parameter updates. This describes one round of the federated learning " -"process and we repeat this for multiple rounds." +"Start the SuperNode with the flag ``--isolation subprocess``, which tells" +" the SuperNode to execute the ClientApp as a subprocess:" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:197 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:2 +#, fuzzy +msgid "Run Flower Quickstart Examples with Docker Compose" +msgstr "빠른 시작 튜토리얼" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:4 msgid "" -"Finally, we will define our *client* logic in :code:`client.py` and build" -" upon the previously defined centralized training in :code:`cifar.py`. " -"Our *client* needs to import :code:`flwr`, but also :code:`torch` to " -"update the parameters on our PyTorch model:" +"Flower provides a set of `quickstart examples " +"`_ to help you get " +"started with the framework. These examples are designed to demonstrate " +"the capabilities of Flower and by default run using the Simulation " +"Engine. This guide demonstrates how to run them using Flower's Deployment" +" Engine via Docker Compose." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:213 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:12 msgid "" -"Implementing a Flower *client* basically means implementing a subclass of" -" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " -"Our implementation will be based on :code:`flwr.client.NumPyClient` and " -"we'll call it :code:`CifarClient`. :code:`NumPyClient` is slightly easier" -" to implement than :code:`Client` if you use a framework with good NumPy " -"interoperability (like PyTorch or TensorFlow/Keras) because it avoids " -"some of the boilerplate that would otherwise be necessary. " -":code:`CifarClient` needs to implement four methods, two methods for " -"getting/setting model parameters, one method for training the model, and " -"one method for testing the model:" +"Some quickstart examples may have limitations or requirements that " +"prevent them from running on every environment. For more information, " +"please see Limitations_." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:219 -msgid ":code:`set_parameters`" -msgstr "" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:18 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:15 +#: ../../source/docker/tutorial-quickstart-docker.rst:13 +#, fuzzy +msgid "Before you start, make sure that:" +msgstr "시작하기 전에 Docker daemon이 실행 중인지 확인하세요:" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:226 -msgid "get the updated local model weights and return them to the server" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:20 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:22 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:17 +#: ../../source/docker/tutorial-quickstart-docker.rst:15 +msgid "The ``flwr`` CLI is :doc:`installed <../how-to-install-flower>` locally." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:230 -msgid "return the local loss and accuracy to the server" -msgstr "" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:21 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:18 +#: ../../source/docker/tutorial-quickstart-docker.rst:16 +#, fuzzy +msgid "The Docker daemon is running." +msgstr "Docker 데몬이 실행 중인지 확인하십시오." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:232 -msgid "" -"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make" -" use of the functions :code:`train()` and :code:`test()` previously " -"defined in :code:`cifar.py`. So what we really do here is we tell Flower " -"through our :code:`NumPyClient` subclass which of our already defined " -"functions to call for training and evaluation. We included type " -"annotations to give you a better understanding of the data types that get" -" passed around." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:22 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:19 +msgid "Docker Compose is `installed `_." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:280 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:25 +#, fuzzy +msgid "Run the Quickstart Example" +msgstr "예시 요청" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:27 msgid "" -"All that's left to do it to define a function that loads both model and " -"data, creates a :code:`CifarClient`, and starts this client. You load " -"your data and model by using :code:`cifar.py`. Start :code:`CifarClient` " -"with the function :code:`fl.client.start_client()` by pointing it at the " -"same IP address we used in :code:`server.py`:" +"Clone the quickstart example you like to run. For example, ``quickstart-" +"pytorch``:" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:307 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:35 msgid "" -"in each window (make sure that the server is running before you do so) " -"and see your (previously centralized) PyTorch project run federated " -"learning across two clients. Congratulations!" +"Download the `compose.yml " +"`_" +" file into the example directory:" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:312 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:44 +#, fuzzy +msgid "Build and start the services using the following command:" +msgstr "다음 명령을 실행하여 가상 환경을 활성화합니다:" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:50 +#, fuzzy msgid "" -"The full source code for this example: `PyTorch: From Centralized To " -"Federated (Code) `_. Our example is, of course, " -"somewhat over-simplified because both clients load the exact same " -"dataset, which isn't realistic. You're now prepared to explore this topic" -" further. How about using different subsets of CIFAR-10 on each client? " -"How about adding more clients?" -msgstr "" +"Append the following lines to the end of the ``pyproject.toml`` file and " +"save it:" +msgstr "``pyproject.toml``에 다음 버전 제약 조건을 설정했는지 확인하세요:" -#: ../../source/explanation-differential-privacy.rst:2 -#: ../../source/explanation-differential-privacy.rst:11 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:303 -msgid "Differential Privacy" -msgstr "" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:52 +#: ../../source/docker/tutorial-quickstart-docker.rst:324 +#, fuzzy +msgid "pyproject.toml" +msgstr "또는 ``pyproject.toml``:" -#: ../../source/explanation-differential-privacy.rst:3 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:61 msgid "" -"The information in datasets like healthcare, financial transactions, user" -" preferences, etc., is valuable and has the potential for scientific " -"breakthroughs and provides important business insights. However, such " -"data is also sensitive and there is a risk of compromising individual " -"privacy." +"You can customize the string that follows ``tool.flwr.federations.`` to " +"fit your needs. However, please note that the string cannot contain a dot" +" (``.``)." msgstr "" -#: ../../source/explanation-differential-privacy.rst:6 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:64 msgid "" -"Traditional methods like anonymization alone would not work because of " -"attacks like Re-identification and Data Linkage. That's where " -"differential privacy comes in. It provides the possibility of analyzing " -"data while ensuring the privacy of individuals." +"In this example, ``local-deployment`` has been used. Just remember to " +"replace ``local-deployment`` with your chosen name in both the " +"``tool.flwr.federations.`` string and the corresponding ``flwr run .`` " +"command." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:68 +#, fuzzy +msgid "Run the example:" +msgstr "전체 코드 예제" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:74 +msgid "Follow the logs of the SuperExec service:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:12 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:80 msgid "" -"Imagine two datasets that are identical except for a single record (for " -"instance, Alice's data). Differential Privacy (DP) guarantees that any " -"analysis (M), like calculating the average income, will produce nearly " -"identical results for both datasets (O and O' would be similar). This " -"preserves group patterns while obscuring individual details, ensuring the" -" individual's information remains hidden in the crowd." +"That is all it takes! You can monitor the progress of the run through the" +" logs of the SuperExec." msgstr "" -#: ../../source/explanation-differential-privacy.rst:-1 -msgid "DP Intro" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:84 +msgid "Run a Different Quickstart Example" msgstr "" -#: ../../source/explanation-differential-privacy.rst:22 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:86 msgid "" -"One of the most commonly used mechanisms to achieve DP is adding enough " -"noise to the output of the analysis to mask the contribution of each " -"individual in the data while preserving the overall accuracy of the " -"analysis." +"To run a different quickstart example, such as ``quickstart-tensorflow``," +" first, shut down the Docker Compose services of the current example:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:25 -msgid "Formal Definition" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:93 +msgid "After that, you can repeat the steps above." msgstr "" -#: ../../source/explanation-differential-privacy.rst:26 -msgid "" -"Differential Privacy (DP) provides statistical guarantees against the " -"information an adversary can infer through the output of a randomized " -"algorithm. It provides an unconditional upper bound on the influence of a" -" single individual on the output of the algorithm by adding noise [1]. A " -"randomized mechanism M provides (:math:`\\epsilon`, " -":math:`\\delta`)-differential privacy if for any two neighboring " -"databases, D :sub:`1` and D :sub:`2`, that differ in only a single " -"record, and for all possible outputs S ⊆ Range(A):" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:96 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:102 +#, fuzzy +msgid "Limitations" +msgstr "동기" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:101 +#, fuzzy +msgid "Quickstart Example" +msgstr "빠른 시작" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:103 +#, fuzzy +msgid "quickstart-fastai" +msgstr "빠른 시작 튜토리얼" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:104 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:106 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:115 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:117 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:121 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:123 +#: ../../source/ref-changelog.md:33 ../../source/ref-changelog.md:399 +#: ../../source/ref-changelog.md:676 ../../source/ref-changelog.md:740 +#: ../../source/ref-changelog.md:798 ../../source/ref-changelog.md:867 +#: ../../source/ref-changelog.md:929 +msgid "None" msgstr "" -#: ../../source/explanation-differential-privacy.rst:32 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:105 +#, fuzzy +msgid "quickstart-huggingface" +msgstr "빠른 시작 튜토리얼" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:107 +#, fuzzy +msgid "quickstart-jax" +msgstr "빠른 시작" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:108 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:110 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:125 msgid "" -"\\small\n" -"P[M(D_{1} \\in A)] \\leq e^{\\delta} P[M(D_{2} \\in A)] + \\delta" +"The example has not yet been updated to work with the latest ``flwr`` " +"version." msgstr "" -#: ../../source/explanation-differential-privacy.rst:38 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:109 +#, fuzzy +msgid "quickstart-mlcube" +msgstr "빠른 시작" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:111 +#, fuzzy +msgid "quickstart-mlx" +msgstr "빠른 시작" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:112 msgid "" -"The :math:`\\epsilon` parameter, also known as the privacy budget, is a " -"metric of privacy loss. It also controls the privacy-utility trade-off; " -"lower :math:`\\epsilon` values indicate higher levels of privacy but are " -"likely to reduce utility as well. The :math:`\\delta` parameter accounts " -"for a small probability on which the upper bound :math:`\\epsilon` does " -"not hold. The amount of noise needed to achieve differential privacy is " -"proportional to the sensitivity of the output, which measures the maximum" -" change in the output due to the inclusion or removal of a single record." +"`Requires to run on macOS with Apple Silicon `_." msgstr "" -#: ../../source/explanation-differential-privacy.rst:45 -msgid "Differential Privacy in Machine Learning" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:114 +#, fuzzy +msgid "quickstart-monai" +msgstr "빠른 시작" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:116 +#, fuzzy +msgid "quickstart-pandas" +msgstr "빠른 시작 튜토리얼" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:118 +msgid "quickstart-pytorch-lightning" msgstr "" -#: ../../source/explanation-differential-privacy.rst:46 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:119 msgid "" -"DP can be utilized in machine learning to preserve the privacy of the " -"training data. Differentially private machine learning algorithms are " -"designed in a way to prevent the algorithm to learn any specific " -"information about any individual data points and subsequently prevent the" -" model from revealing sensitive information. Depending on the stage at " -"which noise is introduced, various methods exist for applying DP to " -"machine learning algorithms. One approach involves adding noise to the " -"training data (either to the features or labels), while another method " -"entails injecting noise into the gradients of the loss function during " -"model training. Additionally, such noise can be incorporated into the " -"model's output." +"Requires an older pip version that is not supported by the Flower Docker " +"images." msgstr "" -#: ../../source/explanation-differential-privacy.rst:53 -msgid "Differential Privacy in Federated Learning" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:120 +#, fuzzy +msgid "quickstart-pytorch" +msgstr "빠른 시작 튜토리얼" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:122 +msgid "quickstart-sklearn-tabular" msgstr "" -#: ../../source/explanation-differential-privacy.rst:54 -msgid "" -"Federated learning is a data minimization approach that allows multiple " -"parties to collaboratively train a model without sharing their raw data. " -"However, federated learning also introduces new privacy challenges. The " -"model updates between parties and the central server can leak information" -" about the local data. These leaks can be exploited by attacks such as " -"membership inference and property inference attacks, or model inversion " -"attacks." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:124 +#, fuzzy +msgid "quickstart-tabnet" +msgstr "빠른 시작 튜토리얼" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:126 +#, fuzzy +msgid "quickstart-tensorflow" +msgstr "빠른 시작 튜토리얼" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:127 +msgid "Only runs on AMD64." msgstr "" -#: ../../source/explanation-differential-privacy.rst:58 +#: ../../source/docker/set-environment-variables.rst:2 +#, fuzzy +msgid "Set Environment Variables" +msgstr "환경 변수 설정" + +#: ../../source/docker/set-environment-variables.rst:4 +#, fuzzy msgid "" -"DP can play a crucial role in federated learning to provide privacy for " -"the clients' data." -msgstr "" +"To set a variable inside a Docker container, you can use the ``-e " +"=`` flag. Multiple ``-e`` flags can be used to set multiple " +"environment variables for a container." +msgstr "Docker 컨테이너 내에서 변수를 설정하려면 ``-e =`` 플래그를 사용하면 됩니다." -#: ../../source/explanation-differential-privacy.rst:60 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:2 +#, fuzzy +msgid "Deploy Flower on Multiple Machines with Docker Compose" +msgstr "빠른 시작 튜토리얼" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:4 msgid "" -"Depending on the granularity of privacy provision or the location of " -"noise addition, different forms of DP exist in federated learning. In " -"this explainer, we focus on two approaches of DP utilization in federated" -" learning based on where the noise is added: at the server (also known as" -" the center) or at the client (also known as the local)." +"This guide will help you set up a Flower project on multiple machines " +"using Docker Compose." msgstr "" -#: ../../source/explanation-differential-privacy.rst:63 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:7 msgid "" -"**Central Differential Privacy**: DP is applied by the server and the " -"goal is to prevent the aggregated model from leaking information about " -"each client's data." +"You will learn how to run the Flower client and server components on two " +"separate machines, with Flower configured to use TLS encryption and " +"persist SuperLink state across restarts. A server consists of a SuperLink" +" and ``SuperExec``. For more details about the Flower architecture, refer" +" to the :doc:`../explanation-flower-architecture` explainer page." msgstr "" -#: ../../source/explanation-differential-privacy.rst:65 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:13 msgid "" -"**Local Differential Privacy**: DP is applied on the client side before " -"sending any information to the server and the goal is to prevent the " -"updates that are sent to the server from leaking any information about " -"the client's data." +"This guide assumes you have completed the :doc:`tutorial-quickstart-" +"docker-compose` tutorial. It is highly recommended that you follow and " +"understand the contents of that tutorial before proceeding with this " +"guide." msgstr "" -#: ../../source/explanation-differential-privacy.rst:-1 -#: ../../source/explanation-differential-privacy.rst:68 -#: ../../source/how-to-use-differential-privacy.rst:11 -msgid "Central Differential Privacy" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:20 +msgid "Before you begin, make sure you have the following prerequisites:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:69 -msgid "" -"In this approach, which is also known as user-level DP, the central " -"server is responsible for adding noise to the globally aggregated " -"parameters. It should be noted that trust in the server is required." +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:23 +msgid "The Docker daemon is running on your local machine and the remote machine." msgstr "" -#: ../../source/explanation-differential-privacy.rst:76 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:24 msgid "" -"While there are various ways to implement central DP in federated " -"learning, we concentrate on the algorithms proposed by [2] and [3]. The " -"overall approach is to clip the model updates sent by the clients and add" -" some amount of noise to the aggregated model. In each iteration, a " -"random set of clients is chosen with a specific probability for training." -" Each client performs local training on its own data. The update of each " -"client is then clipped by some value `S` (sensitivity `S`). This would " -"limit the impact of any individual client which is crucial for privacy " -"and often beneficial for robustness. A common approach to achieve this is" -" by restricting the `L2` norm of the clients' model updates, ensuring " -"that larger updates are scaled down to fit within the norm `S`." -msgstr "" - -#: ../../source/explanation-differential-privacy.rst:-1 -msgid "clipping" +"Docker Compose V2 is installed on both your local machine and the remote " +"machine." msgstr "" -#: ../../source/explanation-differential-privacy.rst:89 -msgid "" -"Afterwards, the Gaussian mechanism is used to add noise in order to " -"distort the sum of all clients' updates. The amount of noise is scaled to" -" the sensitivity value to obtain a privacy guarantee. The Gaussian " -"mechanism is used with a noise sampled from `N (0, σ²)` where `σ = ( " -"noise_scale * S ) / (number of sampled clients)`." +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:25 +msgid "You can connect to the remote machine from your local machine." msgstr "" -#: ../../source/explanation-differential-privacy.rst:94 -msgid "Clipping" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:26 +msgid "Ports ``9091`` and ``9093`` are accessible on the remote machine." msgstr "" -#: ../../source/explanation-differential-privacy.rst:96 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:30 msgid "" -"There are two forms of clipping commonly used in Central DP: Fixed " -"Clipping and Adaptive Clipping." +"The guide uses the |quickstart_sklearn_tabular|_ example as an example " +"project." msgstr "" -#: ../../source/explanation-differential-privacy.rst:98 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:32 msgid "" -"**Fixed Clipping** : A predefined fix threshold is set for the magnitude " -"of clients' updates. Any update exceeding this threshold is clipped back " -"to the threshold value." +"If your project has a different name or location, please remember to " +"adjust the commands/paths accordingly." msgstr "" -#: ../../source/explanation-differential-privacy.rst:100 -msgid "" -"**Adaptive Clipping** : The clipping threshold dynamically adjusts based " -"on the observed update distribution [4]. It means that the clipping value" -" is tuned during the rounds with respect to the quantile of the update " -"norm distribution." +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:36 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:22 +#: ../../source/docker/tutorial-quickstart-docker.rst:19 +msgid "Step 1: Set Up" msgstr "" -#: ../../source/explanation-differential-privacy.rst:102 -msgid "" -"The choice between fixed and adaptive clipping depends on various factors" -" such as privacy requirements, data distribution, model complexity, and " -"others." +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:38 +msgid "Clone the Flower repository and change to the ``distributed`` directory:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:-1 -#: ../../source/explanation-differential-privacy.rst:105 -#: ../../source/how-to-use-differential-privacy.rst:96 -msgid "Local Differential Privacy" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:45 +msgid "Get the IP address from the remote machine and save it for later." msgstr "" -#: ../../source/explanation-differential-privacy.rst:107 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:46 msgid "" -"In this approach, each client is responsible for performing DP. Local DP " -"avoids the need for a fully trusted aggregator, but it should be noted " -"that local DP leads to a decrease in accuracy but better privacy in " -"comparison to central DP." +"Use the ``certs.yml`` Compose file to generate your own self-signed " +"certificates. If you have certificates, you can continue with Step 2." msgstr "" -#: ../../source/explanation-differential-privacy.rst:116 -msgid "In this explainer, we focus on two forms of achieving Local DP:" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:51 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:221 +msgid "These certificates should be used only for development purposes." msgstr "" -#: ../../source/explanation-differential-privacy.rst:118 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:53 msgid "" -"Each client adds noise to the local updates before sending them to the " -"server. To achieve (:math:`\\epsilon`, :math:`\\delta`)-DP, considering " -"the sensitivity of the local model to be ∆, Gaussian noise is applied " -"with a noise scale of σ where:" +"For production environments, you may have to use dedicated services to " +"obtain your certificates." msgstr "" -#: ../../source/explanation-differential-privacy.rst:120 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:56 msgid "" -"\\small\n" -"\\frac{∆ \\times \\sqrt{2 \\times " -"\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}\n" -"\n" +"First, set the environment variables ``SUPERLINK_IP`` and " +"``SUPEREXEC_IP`` with the IP address from the remote machine. For " +"example, if the IP is ``192.168.2.33``, execute:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:125 -msgid "" -"Each client adds noise to the gradients of the model during the local " -"training (DP-SGD). More specifically, in this approach, gradients are " -"clipped and an amount of calibrated noise is injected into the gradients." +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:65 +msgid "Next, generate the self-signed certificates:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:128 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:72 +msgid "Step 2: Copy the Server Compose Files" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:74 msgid "" -"Please note that these two approaches are providing privacy at different " -"levels." +"Use the method that works best for you to copy the ``server`` directory, " +"the certificates, and your Flower project to the remote machine." msgstr "" -#: ../../source/explanation-differential-privacy.rst:131 -msgid "**References:**" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:77 +msgid "For example, you can use ``scp`` to copy the directories:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:133 -msgid "[1] Dwork et al. The Algorithmic Foundations of Differential Privacy." +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:87 +msgid "Step 3: Start the Flower Server Components" msgstr "" -#: ../../source/explanation-differential-privacy.rst:135 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:89 msgid "" -"[2] McMahan et al. Learning Differentially Private Recurrent Language " -"Models." +"Log into the remote machine using ``ssh`` and run the following command " +"to start the SuperLink and SuperExec services:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:137 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:102 msgid "" -"[3] Geyer et al. Differentially Private Federated Learning: A Client " -"Level Perspective." +"The Path of the ``PROJECT_DIR`` should be relative to the location of the" +" ``server`` Docker Compose files." msgstr "" -#: ../../source/explanation-differential-privacy.rst:139 -msgid "[4] Galen et al. Differentially Private Learning with Adaptive Clipping." +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:105 +msgid "Go back to your terminal on your local machine." msgstr "" -#: ../../source/explanation-federated-evaluation.rst:2 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:292 -msgid "Federated evaluation" -msgstr "" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:108 +#, fuzzy +msgid "Step 4: Start the Flower Client Components" +msgstr "서버(SuperLink)" -#: ../../source/explanation-federated-evaluation.rst:4 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:110 msgid "" -"There are two main approaches to evaluating models in federated learning " -"systems: centralized (or server-side) evaluation and federated (or " -"client-side) evaluation." +"On your local machine, run the following command to start the client " +"components:" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:8 -msgid "Centralized Evaluation" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:120 +msgid "" +"The Path of the ``PROJECT_DIR`` should be relative to the location of the" +" ``client`` Docker Compose files." msgstr "" -#: ../../source/explanation-federated-evaluation.rst:11 -msgid "Built-In Strategies" -msgstr "" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:124 +#, fuzzy +msgid "Step 5: Run Your Flower Project" +msgstr "Flower SuperNode를 실행합니다." -#: ../../source/explanation-federated-evaluation.rst:13 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:126 msgid "" -"All built-in strategies support centralized evaluation by providing an " -"evaluation function during initialization. An evaluation function is any " -"function that can take the current global model parameters as input and " -"return evaluation results:" +"Specify the remote SuperExec IP addresses and the path to the root " +"certificate in the ``[tool.flwr.federations.remote-superexec]`` table in " +"the ``pyproject.toml`` file. Here, we have named our remote federation " +"``remote-superexec``:" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:58 -msgid "Custom Strategies" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:130 +msgid "examples/quickstart-sklearn-tabular/pyproject.toml" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:60 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:139 msgid "" -"The :code:`Strategy` abstraction provides a method called " -":code:`evaluate` that can directly be used to evaluate the current global" -" model parameters. The current server implementation calls " -":code:`evaluate` after parameter aggregation and before federated " -"evaluation (see next paragraph)." +"The Path of the ``root-certificates`` should be relative to the location " +"of the ``pyproject.toml`` file." msgstr "" -#: ../../source/explanation-federated-evaluation.rst:65 -msgid "Federated Evaluation" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:142 +msgid "To run the project, execute:" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:68 -msgid "Implementing Federated Evaluation" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:148 +msgid "" +"That's it! With these steps, you've set up Flower on two separate " +"machines and are ready to start using it." msgstr "" -#: ../../source/explanation-federated-evaluation.rst:70 -msgid "" -"Client-side evaluation happens in the :code:`Client.evaluate` method and " -"can be configured from the server side." +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:152 +msgid "Step 6: Clean Up" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:101 -msgid "Configuring Federated Evaluation" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:154 +#, fuzzy +msgid "Shut down the Flower client components:" +msgstr "Flower 클라이언트 앱을 실행합니다." + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:161 +msgid "Shut down the Flower server components and delete the SuperLink state:" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:103 +#: ../../source/docker/tutorial-quickstart-docker.rst:2 +#, fuzzy +msgid "Quickstart with Docker" +msgstr "빠른 시작 튜토리얼" + +#: ../../source/docker/tutorial-quickstart-docker.rst:4 msgid "" -"Federated evaluation can be configured from the server side. Built-in " -"strategies support the following arguments:" +"This quickstart aims to guide you through the process of containerizing a" +" Flower project and running it end to end using Docker on your local " +"machine." msgstr "" -#: ../../source/explanation-federated-evaluation.rst:105 +#: ../../source/docker/tutorial-quickstart-docker.rst:7 msgid "" -":code:`fraction_evaluate`: a :code:`float` defining the fraction of " -"clients that will be selected for evaluation. If " -":code:`fraction_evaluate` is set to :code:`0.1` and :code:`100` clients " -"are connected to the server, then :code:`10` will be randomly selected " -"for evaluation. If :code:`fraction_evaluate` is set to :code:`0.0`, " -"federated evaluation will be disabled." +"This tutorial does not use production-ready settings, so you can focus on" +" understanding the basic workflow that uses the minimum configurations." msgstr "" -#: ../../source/explanation-federated-evaluation.rst:106 -msgid "" -":code:`min_evaluate_clients`: an :code:`int`: the minimum number of " -"clients to be selected for evaluation. If :code:`fraction_evaluate` is " -"set to :code:`0.1`, :code:`min_evaluate_clients` is set to 20, and " -":code:`100` clients are connected to the server, then :code:`20` clients " -"will be selected for evaluation." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:32 +#: ../../source/docker/tutorial-quickstart-docker.rst:21 +msgid "Create a new Flower project (PyTorch):" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:107 -msgid "" -":code:`min_available_clients`: an :code:`int` that defines the minimum " -"number of clients which need to be connected to the server before a round" -" of federated evaluation can start. If fewer than " -":code:`min_available_clients` are connected to the server, the server " -"will wait until more clients are connected before it continues to sample " -"clients for evaluation." +#: ../../source/docker/tutorial-quickstart-docker.rst:39 +msgid "Create a new Docker bridge network called ``flwr-network``:" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:108 +#: ../../source/docker/tutorial-quickstart-docker.rst:45 msgid "" -":code:`on_evaluate_config_fn`: a function that returns a configuration " -"dictionary which will be sent to the selected clients. The function will " -"be called during each round and provides a convenient way to customize " -"client-side evaluation from the server side, for example, to configure " -"the number of validation steps performed." +"User-defined networks, such as ``flwr-network``, enable IP resolution of " +"container names, a feature absent in the default bridge network. This " +"simplifies quickstart example by avoiding the need to determine host IP " +"first." msgstr "" -#: ../../source/explanation-federated-evaluation.rst:135 -msgid "Evaluating Local Model Updates During Training" -msgstr "" +#: ../../source/docker/tutorial-quickstart-docker.rst:50 +#, fuzzy +msgid "Step 2: Start the SuperLink" +msgstr "서버(SuperLink)" -#: ../../source/explanation-federated-evaluation.rst:137 -msgid "" -"Model parameters can also be evaluated during training. " -":code:`Client.fit` can return arbitrary evaluation results as a " -"dictionary:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:62 +#: ../../source/docker/tutorial-quickstart-docker.rst:52 +msgid "Open your terminal and run:" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:177 -msgid "Full Code Example" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "Understand the command" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:179 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"For a full code example that uses both centralized and federated " -"evaluation, see the *Advanced TensorFlow Example* (the same approach can " -"be applied to workloads implemented in any other framework): " -"https://github.com/adap/flower/tree/main/examples/advanced-tensorflow" +"``-p 9091:9091 -p 9092:9092``: Map port ``9091`` and ``9092`` of the " +"container to the same port of" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:10 -msgid "FED Template" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "the host machine, allowing other services to access the Driver API on" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:12 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:12 -msgid "Table of Contents" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``http://localhost:9091`` and the Fleet API on ``http://localhost:9092``." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:14 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:14 -msgid "[Table of Contents](#table-of-contents)" +#: ../../source/docker/tutorial-quickstart-docker.rst:71 +#: ../../source/docker/tutorial-quickstart-docker.rst:108 +#: ../../source/docker/tutorial-quickstart-docker.rst:219 +#: ../../source/docker/tutorial-quickstart-docker.rst:309 +msgid "" +"``--network flwr-network``: Make the container join the network named " +"``flwr-network``." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:15 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:15 -msgid "[Summary](#summary)" +#: ../../source/docker/tutorial-quickstart-docker.rst:72 +msgid "``--name superlink``: Assign the name ``superlink`` to the container." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:16 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:16 -msgid "[Motivation](#motivation)" +#: ../../source/docker/tutorial-quickstart-docker.rst:73 +#: ../../source/docker/tutorial-quickstart-docker.rst:110 +#: ../../source/docker/tutorial-quickstart-docker.rst:220 +#: ../../source/docker/tutorial-quickstart-docker.rst:311 +msgid "" +"``--detach``: Run the container in the background, freeing up the " +"terminal." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:17 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:17 -msgid "[Goals](#goals)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"tag of the image. The tag :substitution-code:`|stable_flwr_version|` " +"represents a :doc:`specific version ` of the image." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:18 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:18 -msgid "[Non-Goals](#non-goals)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--insecure``: This flag tells the container to operate in an insecure " +"mode, allowing" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:19 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:19 -msgid "[Proposal](#proposal)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "unencrypted communication." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:20 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:23 -msgid "[Drawbacks](#drawbacks)" +#: ../../source/docker/tutorial-quickstart-docker.rst:80 +msgid "Step 3: Start the SuperNode" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:21 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:24 -msgid "[Alternatives Considered](#alternatives-considered)" +#: ../../source/docker/tutorial-quickstart-docker.rst:82 +msgid "Start two SuperNode containers." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:22 -msgid "[Appendix](#appendix)" +#: ../../source/docker/tutorial-quickstart-docker.rst:84 +msgid "Start the first container:" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:24 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:28 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:76 -msgid "Summary" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``-p 9094:9094``: Map port ``9094`` of the container to the same port of" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:26 -msgid "\\[TODO - sentence 1: summary of the problem\\]" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "the host machine, allowing other services to access the SuperNode API on" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:28 -msgid "\\[TODO - sentence 2: summary of the solution\\]" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``http://localhost:9094``." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:30 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:47 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:77 -msgid "Motivation" +#: ../../source/docker/tutorial-quickstart-docker.rst:109 +msgid "``--name supernode-1``: Assign the name ``supernode-1`` to the container." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:32 -#: ../../source/fed/0000-20200102-fed-template.md:36 -#: ../../source/fed/0000-20200102-fed-template.md:40 -#: ../../source/fed/0000-20200102-fed-template.md:44 -#: ../../source/fed/0000-20200102-fed-template.md:48 -#: ../../source/fed/0000-20200102-fed-template.md:54 -#: ../../source/fed/0000-20200102-fed-template.md:58 -msgid "\\[TODO\\]" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``flwr/supernode:|stable_flwr_version|``: This is the name of the image " +"to be run and the specific tag" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:34 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:53 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:78 -msgid "Goals" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "of the image." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:38 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:59 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:79 -msgid "Non-Goals" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--superlink superlink:9092``: Connect to the SuperLink's Fleet API at " +"the address" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:42 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:65 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:80 -msgid "Proposal" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``superlink:9092``." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:46 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:85 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:129 -msgid "Drawbacks" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--node-config \"partition-id=0 num-partitions=2\"``: Set the partition " +"ID to ``0`` and the" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:50 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:86 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:135 -msgid "Alternatives Considered" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "number of partitions to ``2`` for the SuperNode configuration." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:52 -msgid "\\[Alternative 1\\]" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--supernode-address 0.0.0.0:9094``: Set the address and port number " +"that the SuperNode" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:56 -msgid "\\[Alternative 2\\]" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "is listening on." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:10 -msgid "Flower Enhancement Doc" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--isolation process``: Tells the SuperNode that the ClientApp is " +"created by separate" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:20 -msgid "[Enhancement Doc Template](#enhancement-doc-template)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "independent process. The SuperNode does not attempt to create it." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:21 -msgid "[Metadata](#metadata)" +#: ../../source/docker/tutorial-quickstart-docker.rst:124 +msgid "Start the second container:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:22 -msgid "[Workflow](#workflow)" +#: ../../source/docker/tutorial-quickstart-docker.rst:142 +msgid "Step 4: Start the ClientApp" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:25 -msgid "[GitHub Issues](#github-issues)" +#: ../../source/docker/tutorial-quickstart-docker.rst:144 +msgid "" +"The ClientApp Docker image comes with a pre-installed version of Flower " +"and serves as a base for building your own ClientApp image. In order to " +"install the FAB dependencies, you will need to create a Dockerfile that " +"extends the ClientApp image and installs the required dependencies." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:26 -msgid "[Google Docs](#google-docs)" +#: ../../source/docker/tutorial-quickstart-docker.rst:149 +msgid "" +"Create a ClientApp Dockerfile called ``Dockerfile.clientapp`` and paste " +"the following code into it:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:30 -msgid "A Flower Enhancement is a standardized development process to" -msgstr "" +#: ../../source/docker/tutorial-quickstart-docker.rst:152 +#, fuzzy +msgid "Dockerfile.clientapp" +msgstr "flower 클라이언트 앱" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:32 -msgid "provide a common structure for proposing larger changes" +#: ../../source/docker/tutorial-quickstart-docker.rst +#, fuzzy +msgid "Understand the Dockerfile" +msgstr "SuperNode Dockerfile 만들기" + +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +":substitution-code:`FROM flwr/clientapp:|stable_flwr_version|`: This line" +" specifies that the Docker image" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:33 -msgid "ensure that the motivation for a change is clear" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"to be built from is the ``flwr/clientapp image``, version :substitution-" +"code:`|stable_flwr_version|`." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:34 -msgid "persist project information in a version control system" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``WORKDIR /app``: Set the working directory for the container to ``/app``." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:35 -msgid "document the motivation for impactful user-facing changes" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"Any subsequent commands that reference a directory will be relative to " +"this directory." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:36 -msgid "reserve GitHub issues for tracking work in flight" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``COPY pyproject.toml .``: Copy the ``pyproject.toml`` file" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:37 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"ensure community participants can successfully drive changes to " -"completion across one or more releases while stakeholders are adequately " -"represented throughout the process" +"from the current working directory into the container's ``/app`` " +"directory." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:39 -msgid "Hence, an Enhancement Doc combines aspects of" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``RUN sed -i 's/.*flwr\\[simulation\\].*//' pyproject.toml``: Remove the " +"``flwr`` dependency" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:41 -msgid "a feature, and effort-tracking document" -msgstr "" +#: ../../source/docker/tutorial-quickstart-docker.rst +#, fuzzy +msgid "from the ``pyproject.toml``." +msgstr "또는 ``pyproject.toml``:" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:42 -msgid "a product requirements document" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``python -m pip install -U --no-cache-dir .``: Run the ``pip`` install " +"command to" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:43 -msgid "a design document" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "install the dependencies defined in the ``pyproject.toml`` file" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:45 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"into one file, which is created incrementally in collaboration with the " -"community." +"The ``-U`` flag indicates that any existing packages should be upgraded, " +"and" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:49 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"For far-fetching changes or features proposed to Flower, an abstraction " -"beyond a single GitHub issue or pull request is required to understand " -"and communicate upcoming changes to the project." +"``--no-cache-dir`` prevents pip from using the cache to speed up the " +"installation." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:51 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"The purpose of this process is to reduce the amount of \"tribal " -"knowledge\" in our community. By moving decisions from Slack threads, " -"video calls, and hallway conversations into a well-tracked artifact, this" -" process aims to enhance communication and discoverability." +"``ENTRYPOINT [\"flwr-clientapp\"]``: Set the command ``flwr-clientapp`` " +"to be" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:55 -msgid "" -"Roughly any larger, user-facing enhancement should follow the Enhancement" -" process. If an enhancement would be described in either written or " -"verbal communication to anyone besides the author or developer, then " -"consider creating an Enhancement Doc." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "the default command run when the container is started." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:57 +#: ../../source/docker/tutorial-quickstart-docker.rst:186 msgid "" -"Similarly, any technical effort (refactoring, major architectural change)" -" that will impact a large section of the development community should " -"also be communicated widely. The Enhancement process is suited for this " -"even if it will have zero impact on the typical user or operator." +"Note that `flwr `__ is already installed " +"in the ``flwr/clientapp`` base image, so only other package dependencies " +"such as ``flwr-datasets``, ``torch``, etc., need to be installed. As a " +"result, the ``flwr`` dependency is removed from the ``pyproject.toml`` " +"after it has been copied into the Docker image (see line 5)." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:61 +#: ../../source/docker/tutorial-quickstart-docker.rst:192 +#, fuzzy msgid "" -"For small changes and additions, going through the Enhancement process " -"would be time-consuming and unnecessary. This includes, for example, " -"adding new Federated Learning algorithms, as these only add features " -"without changing how Flower works or is used." +"Next, build the ClientApp Docker image by running the following command " +"in the directory where the Dockerfile is located:" msgstr "" +"다음으로, Docker파일과 ServerApp 코드가 있는 디렉터리에서 다음 명령을 실행하여 ServerApp Docker 이미지를" +" 빌드합니다." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:63 +#: ../../source/docker/tutorial-quickstart-docker.rst:201 +#, fuzzy msgid "" -"Enhancements are different from feature requests, as they are already " -"providing a laid-out path for implementation and are championed by " -"members of the community." +"The image name was set as ``flwr_clientapp`` with the tag ``0.0.1``. " +"Remember that these values are merely examples, and you can customize " +"them according to your requirements." msgstr "" +"이미지에``flwr_serverapp``이라는 이름을 붙이고 ``0.0.1``이라는 태그를 붙였습니다. 여기서 선택한 값은 예시일 " +"뿐이라는 점을 기억하세요. 필요에 따라 변경할 수 있습니다." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:67 -msgid "" -"An Enhancement is captured in a Markdown file that follows a defined " -"template and a workflow to review and store enhancement docs for " -"reference — the Enhancement Doc." +#: ../../source/docker/tutorial-quickstart-docker.rst:205 +msgid "Start the first ClientApp container:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:69 -msgid "Enhancement Doc Template" -msgstr "" +#: ../../source/docker/tutorial-quickstart-docker.rst +#, fuzzy +msgid "" +"``flwr_clientapp:0.0.1``: This is the name of the image to be run and the" +" specific tag" +msgstr "``flwr_serverapp:0.0.1``: 사용할 Docker 이미지의 태그 이름입니다." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:71 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Each enhancement doc is provided as a Markdown file having the following " -"structure" +"``--supernode supernode-1:9094``: Connect to the SuperNode's Fleet API at" +" the address" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:73 -msgid "Metadata (as [described below](#metadata) in form of a YAML preamble)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``supernode-1:9094``." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:74 -msgid "Title (same as in metadata)" +#: ../../source/docker/tutorial-quickstart-docker.rst:226 +msgid "Start the second ClientApp container:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:75 -msgid "Table of Contents (if needed)" +#: ../../source/docker/tutorial-quickstart-docker.rst:237 +msgid "Step 5: Start the SuperExec" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:81 -msgid "Notes/Constraints/Caveats (optional)" -msgstr "" +#: ../../source/docker/tutorial-quickstart-docker.rst:239 +#, fuzzy +msgid "" +"The procedure for building and running a SuperExec image is almost " +"identical to the ClientApp image." +msgstr "ServerApp 이미지를 빌드하고 실행하는 절차는 SuperNode 이미지와 거의 동일합니다." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:82 -msgid "Design Details (optional)" +#: ../../source/docker/tutorial-quickstart-docker.rst:242 +msgid "" +"Similar to the ClientApp image, you will need to create a Dockerfile that" +" extends the SuperExec image and installs the required FAB dependencies." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:83 -msgid "Graduation Criteria" +#: ../../source/docker/tutorial-quickstart-docker.rst:245 +msgid "" +"Create a SuperExec Dockerfile called ``Dockerfile.superexec`` and paste " +"the following code in:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:84 -msgid "Upgrade/Downgrade Strategy (if applicable)" +#: ../../source/docker/tutorial-quickstart-docker.rst:248 +msgid "Dockerfile.superexec" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:88 -msgid "As a reference, this document follows the above structure." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +":substitution-code:`FROM flwr/superexec:|stable_flwr_version|`: This line" +" specifies that the Docker image" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:90 -#: ../../source/ref-api/flwr.common.Metadata.rst:2 -msgid "Metadata" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"to be built from is the ``flwr/superexec image``, version :substitution-" +"code:`|stable_flwr_version|`." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:92 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"**fed-number** (Required) The `fed-number` of the last Flower Enhancement" -" Doc + 1. With this number, it becomes easy to reference other proposals." +"``ENTRYPOINT [\"flower-superexec\"``: Set the command ``flower-" +"superexec`` to be" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:94 -msgid "**title** (Required) The title of the proposal in plain language." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``\"--executor\", \"flwr.superexec.deployment:executor\"]`` Use the" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:96 -msgid "" -"**status** (Required) The current status of the proposal. See " -"[workflow](#workflow) for the possible states." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``flwr.superexec.deployment:executor`` executor to run the ServerApps." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:98 +#: ../../source/docker/tutorial-quickstart-docker.rst:283 msgid "" -"**authors** (Required) A list of authors of the proposal. This is simply " -"the GitHub ID." +"Afterward, in the directory that holds the Dockerfile, execute this " +"Docker command to build the SuperExec image:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:100 -msgid "" -"**creation-date** (Required) The date that the proposal was first " -"submitted in a PR." +#: ../../source/docker/tutorial-quickstart-docker.rst:290 +msgid "Start the SuperExec container:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:102 -msgid "" -"**last-updated** (Optional) The date that the proposal was last changed " -"significantly." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``-p 9093:9093``: Map port ``9093`` of the container to the same port of" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:104 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"**see-also** (Optional) A list of other proposals that are relevant to " -"this one." +"the host machine, allowing you to access the SuperExec API on " +"``http://localhost:9093``." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:106 -msgid "**replaces** (Optional) A list of proposals that this one replaces." +#: ../../source/docker/tutorial-quickstart-docker.rst:310 +msgid "``--name superexec``: Assign the name ``superexec`` to the container." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:108 -msgid "**superseded-by** (Optional) A list of proposals that this one supersedes." +#: ../../source/docker/tutorial-quickstart-docker.rst +#, fuzzy +msgid "" +"``flwr_superexec:0.0.1``: This is the name of the image to be run and the" +" specific tag" +msgstr "``flwr_supernode:0.0.1``: 사용할 Docker 이미지의 태그 이름입니다." + +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--executor-config superlink=\\\"superlink:9091\\\"``: Configure the " +"SuperExec executor to" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:111 -msgid "Workflow" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "connect to the SuperLink running on port ``9091``." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:113 -msgid "" -"The idea forming the enhancement should already have been discussed or " -"pitched in the community. As such, it needs a champion, usually the " -"author, who shepherds the enhancement. This person also has to find " -"committers to Flower willing to review the proposal." +#: ../../source/docker/tutorial-quickstart-docker.rst:320 +msgid "Step 6: Run the Quickstart Project" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:115 -msgid "" -"New enhancements are checked in with a file name in the form of `NNNN-" -"YYYYMMDD-enhancement-title.md`, with `NNNN` being the Flower Enhancement " -"Doc number, to `enhancements`. All enhancements start in `provisional` " -"state as part of a pull request. Discussions are done as part of the pull" -" request review." +#: ../../source/docker/tutorial-quickstart-docker.rst:322 +#, fuzzy +msgid "Add the following lines to the ``pyproject.toml``:" +msgstr "``pyproject.toml``에 다음 버전 제약 조건을 설정했는지 확인하세요:" + +#: ../../source/docker/tutorial-quickstart-docker.rst:331 +msgid "Run the ``quickstart-docker`` project by executing the command:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:117 -msgid "" -"Once an enhancement has been reviewed and approved, its status is changed" -" to `implementable`. The actual implementation is then done in separate " -"pull requests. These pull requests should mention the respective " -"enhancement as part of their description. After the implementation is " -"done, the proposal status is changed to `implemented`." +#: ../../source/docker/tutorial-quickstart-docker.rst:337 +msgid "Follow the SuperExec logs to track the execution of the run:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:119 -msgid "" -"Under certain conditions, other states are possible. An Enhancement has " -"the following states:" +#: ../../source/docker/tutorial-quickstart-docker.rst:344 +msgid "Step 7: Update the Application" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:121 +#: ../../source/docker/tutorial-quickstart-docker.rst:346 msgid "" -"`provisional`: The enhancement has been proposed and is actively being " -"defined. This is the starting state while the proposal is being fleshed " -"out and actively defined and discussed." +"Change the application code. For example, change the ``seed`` in " +"``quickstart_docker/task.py`` to ``43`` and save it:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:122 -msgid "`implementable`: The enhancement has been reviewed and approved." +#: ../../source/docker/tutorial-quickstart-docker.rst:349 +msgid "quickstart_docker/task.py" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:123 -msgid "" -"`implemented`: The enhancement has been implemented and is no longer " -"actively changed." +#: ../../source/docker/tutorial-quickstart-docker.rst:356 +#, fuzzy +msgid "Stop the current ClientApp containers:" +msgstr "현재 클라이언트 속성입니다." + +#: ../../source/docker/tutorial-quickstart-docker.rst:362 +#, fuzzy +msgid "Rebuild the FAB and ClientApp image:" +msgstr "기본 이미지 빌드" + +#: ../../source/docker/tutorial-quickstart-docker.rst:368 +msgid "Launch two new ClientApp containers based on the newly built image:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:124 -msgid "`deferred`: The enhancement is proposed but not actively being worked on." +#: ../../source/docker/tutorial-quickstart-docker.rst:383 +msgid "Run the updated project:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:125 -msgid "" -"`rejected`: The authors and reviewers have decided that this enhancement " -"is not moving forward." +#: ../../source/docker/tutorial-quickstart-docker.rst:390 +msgid "Step 8: Clean Up" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:126 -msgid "`withdrawn`: The authors have withdrawn the enhancement." +#: ../../source/docker/tutorial-quickstart-docker.rst:392 +msgid "Remove the containers and the bridge network:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:127 -msgid "`replaced`: The enhancement has been replaced by a new enhancement." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:408 +#: ../../source/docker/tutorial-quickstart-docker.rst:404 +#, fuzzy +msgid "Where to Go Next" +msgstr "시작 위치" + +#: ../../source/docker/tutorial-quickstart-docker.rst:406 +msgid ":doc:`enable-tls`" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:131 -msgid "" -"Adding an additional process to the ones already provided by GitHub " -"(Issues and Pull Requests) adds more complexity and can be a barrier for " -"potential first-time contributors." +#: ../../source/docker/tutorial-quickstart-docker.rst:407 +msgid ":doc:`persist-superlink-state`" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:133 -msgid "" -"Expanding the proposal template beyond the single-sentence description " -"currently required in the features issue template may be a heavy burden " -"for non-native English speakers." +#: ../../source/docker/tutorial-quickstart-docker.rst:408 +msgid ":doc:`tutorial-quickstart-docker-compose`" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:137 -msgid "GitHub Issues" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:2 +#, fuzzy +msgid "Quickstart with Docker Compose" +msgstr "빠른 시작 튜토리얼" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:4 +msgid "" +"This quickstart shows you how to set up Flower using Docker Compose in a " +"single command, allowing you to focus on developing your application " +"without worrying about the underlying infrastructure." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:139 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:8 msgid "" -"Using GitHub Issues for these kinds of enhancements is doable. One could " -"use, for example, tags, to differentiate and filter them from other " -"issues. The main issue is in discussing and reviewing an enhancement: " -"GitHub issues only have a single thread for comments. Enhancements " -"usually have multiple threads of discussion at the same time for various " -"parts of the doc. Managing these multiple discussions can be confusing " -"when using GitHub Issues." +"You will also learn how to easily enable TLS encryption and persist " +"application state locally, giving you the freedom to choose the " +"configuration that best suits your project's needs." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:141 -msgid "Google Docs" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:24 +msgid "Clone the Docker Compose ``complete`` directory:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:143 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:38 msgid "" -"Google Docs allow for multiple threads of discussions. But as Google Docs" -" are hosted outside the project, their discoverability by the community " -"needs to be taken care of. A list of links to all proposals has to be " -"managed and made available for the community. Compared to shipping " -"proposals as part of Flower's repository, the potential for missing links" -" is much higher." +"Export the path of the newly created project. The path should be relative" +" to the location of the Docker Compose files:" msgstr "" -#: ../../source/fed/index.md:1 -msgid "FED - Flower Enhancement Doc" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:45 +msgid "" +"Setting the ``PROJECT_DIR`` helps Docker Compose locate the " +"``pyproject.toml`` file, allowing it to install dependencies in the " +"SuperExec and SuperNode images correctly." msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:2 -msgid "Aggregate evaluation results" -msgstr "" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:49 +#, fuzzy +msgid "Step 2: Run Flower in Insecure Mode" +msgstr "Flower SuperNode를 실행합니다." -#: ../../source/how-to-aggregate-evaluation-results.rst:4 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:51 msgid "" -"The Flower server does not prescribe a way to aggregate evaluation " -"results, but it enables the user to fully customize result aggregation." +"To begin, start Flower with the most basic configuration. In this setup, " +"Flower will run without TLS and without persisting the state." msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:8 -msgid "Aggregate Custom Evaluation Results" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:56 +msgid "" +"Without TLS, the data sent between the services remains **unencrypted**. " +"Use it only for development purposes." msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:10 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:59 msgid "" -"The same :code:`Strategy`-customization approach can be used to aggregate" -" custom evaluation results coming from individual clients. Clients can " -"return custom metrics to the server by returning a dictionary:" +"For production-oriented use cases, :ref:`enable TLS` for secure data" +" transmission." msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:36 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:70 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:184 +#, fuzzy +msgid "``docker compose``: The Docker command to run the Docker Compose tool." +msgstr "``docker run``: 새 Docker 컨테이너를 실행하는 명령입니다." + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:71 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:185 msgid "" -"The server can then use a customized strategy to aggregate the metrics " -"provided in these dictionaries:" +"``-f compose.yml``: Specify the YAML file that contains the basic Flower " +"service definitions." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:2 -msgid "Authenticate SuperNodes" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:72 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:190 +msgid "" +"``--build``: Rebuild the images for each service if they don't already " +"exist." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:4 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:73 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:191 msgid "" -"Flower has built-in support for authenticated SuperNodes that you can use" -" to verify the identities of each SuperNode connecting to a SuperLink. " -"Flower node authentication works similar to how GitHub SSH authentication" -" works:" +"``-d``: Detach the containers from the terminal and run them in the " +"background." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:7 -msgid "SuperLink (server) stores a list of known (client) node public keys" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:76 +msgid "Step 3: Run the Quickstart Project" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:8 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:78 msgid "" -"Using ECDH, both SuperNode and SuperLink independently derive a shared " -"secret" +"Now that the Flower services have been started via Docker Compose, it is " +"time to run the quickstart example." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:9 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:81 msgid "" -"Shared secret is used to compute the HMAC value of the message sent from " -"SuperNode to SuperLink as a token" +"To ensure the ``flwr`` CLI connects to the SuperExec, you need to specify" +" the SuperExec addresses in the ``pyproject.toml`` file." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:10 -msgid "SuperLink verifies the token" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:84 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:232 +msgid "Add the following lines to the ``quickstart-compose/pyproject.toml``:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:12 -msgid "" -"We recommend you to check out the complete `code example " -"`_ demonstrating federated learning with Flower in an " -"authenticated setting." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:86 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:234 +msgid "quickstart-compose/pyproject.toml" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:15 -msgid "" -"This guide covers a preview feature that might change in future versions " -"of Flower." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:93 +msgid "Execute the command to run the quickstart example:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:18 -msgid "" -"For increased security, node authentication can only be used when " -"encrypted connections (SSL/TLS) are enabled." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:99 +msgid "Monitor the SuperExec logs and wait for the summary to appear:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:21 -msgid "Enable node authentication in :code:`SuperLink`" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:106 +msgid "Step 4: Update the Application" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:23 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:108 +msgid "In the next step, change the application code." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:110 msgid "" -"To enable node authentication, first you need to configure SSL/TLS " -"connections to secure the SuperLink<>SuperNode communication. You can " -"find the complete guide `here `_. After configuring secure connections, you" -" can enable client authentication in a long-running Flower " -":code:`SuperLink`. Use the following terminal command to start a Flower " -":code:`SuperNode` that has both secure connections and node " -"authentication enabled:" +"For example, go to the ``task.py`` file in the ``quickstart-" +"compose/quickstart_compose/`` directory and add a ``print`` call in the " +"``get_weights`` function:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:36 -msgid "Let's break down the authentication flags:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:114 +msgid "quickstart-compose/quickstart_compose/task.py" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:38 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:125 +#, fuzzy +msgid "Rebuild and restart the services." +msgstr "이미 *서버*를 시작할 수 있습니다:" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:129 msgid "" -"The first flag :code:`--auth-list-public-keys` expects a path to a CSV " -"file storing all known node public keys. You need to store all known node" -" public keys that are allowed to participate in a federation in one CSV " -"file (:code:`.csv`)." +"If you have modified the dependencies listed in your ``pyproject.toml`` " +"file, it is essential to rebuild images." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:40 -msgid "" -"A valid CSV file storing known node public keys should list the keys in " -"OpenSSH format, separated by commas and without any comments. For an " -"example, refer to our code sample, which contains a CSV file with two " -"known node public keys." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:132 +msgid "If you haven't made any changes, you can skip this step." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:42 -msgid "" -"The second and third flags :code:`--auth-superlink-private-key` and :code" -":`--auth-superlink-public-key` expect paths to the server's private and " -"public keys. For development purposes, you can generate a private and " -"public key pair using :code:`ssh-keygen -t ecdsa -b 384`." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:134 +msgid "Run the following command to rebuild and restart the services:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:45 -msgid "" -"In Flower 1.9, there is no support for dynamically removing, editing, or " -"adding known node public keys to the SuperLink. To change the set of " -"known nodes, you need to shut the server down, edit the CSV file, and " -"start the server again. Support for dynamically changing the set of known" -" nodes is on the roadmap to be released in Flower 1.10 (ETA: June)." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:140 +msgid "Run the updated quickstart example:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:51 -msgid "Enable node authentication in :code:`SuperNode`" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:147 +msgid "In the SuperExec logs, you should find the ``Get weights`` line:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:53 -msgid "" -"Similar to the long-running Flower server (:code:`SuperLink`), you can " -"easily enable node authentication in the long-running Flower client " -"(:code:`SuperNode`). Use the following terminal command to start an " -"authenticated :code:`SuperNode`:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:164 +msgid "Step 5: Persisting the SuperLink State" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:64 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:166 msgid "" -"The :code:`--auth-supernode-private-key` flag expects a path to the " -"node's private key file and the :code:`--auth-supernode-public-key` flag " -"expects a path to the node's public key file. For development purposes, " -"you can generate a private and public key pair using :code:`ssh-keygen -t" -" ecdsa -b 384`." +"In this step, Flower services are configured to persist the state of the " +"SuperLink service, ensuring that it maintains its state even after a " +"restart." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:68 -msgid "Security notice" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:171 +msgid "" +"When working with Docker Compose on Linux, you may need to create the " +"``state`` directory first and change its ownership to ensure proper " +"access and permissions." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:70 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:174 msgid "" -"The system's security relies on the credentials of the SuperLink and each" -" SuperNode. Therefore, it is imperative to safeguard and safely store the" -" credentials to avoid security risks such as Public Key Infrastructure " -"(PKI) impersonation attacks. The node authentication mechanism also " -"involves human interaction, so please ensure that all of the " -"communication is done in a secure manner, using trusted communication " -"methods." +"For more information, consult the following page: :doc:`persist-" +"superlink-state`." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:75 -#: ../../source/how-to-enable-ssl-connections.rst:65 -#: ../../source/how-to-use-built-in-mods.rst:85 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:287 -msgid "Conclusion" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:176 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:226 +msgid "Run the command:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:77 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst msgid "" -"You should now have learned how to start a long-running Flower server " -"(:code:`SuperLink`) and client (:code:`SuperNode`) with node " -"authentication enabled. You should also know the significance of the " -"private key and store it safely to minimize security risks." +"``-f with-state.yml``: Specifies the path to an additional Docker Compose" +" file that" msgstr "" -#: ../../source/how-to-configure-clients.rst:2 -msgid "Configure clients" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst +msgid "contains the configuration for persisting the SuperLink state." msgstr "" -#: ../../source/how-to-configure-clients.rst:4 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst msgid "" -"Along with model parameters, Flower can send configuration values to " -"clients. Configuration values can be used for various purposes. They are," -" for example, a popular way to control client-side hyperparameters from " -"the server." +"Docker merges Compose files according to `merging rules " +"`_." msgstr "" -#: ../../source/how-to-configure-clients.rst:7 -msgid "Configuration values" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:193 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:247 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:375 +msgid "Rerun the ``quickstart-compose`` project:" msgstr "" -#: ../../source/how-to-configure-clients.rst:9 -msgid "" -"Configuration values are represented as a dictionary with ``str`` keys " -"and values of type ``bool``, ``bytes``, ``double`` (64-bit precision " -"float), ``int``, or ``str`` (or equivalent types in different languages)." -" Here is an example of a configuration dictionary in Python:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:199 +msgid "Check the content of the ``state`` directory:" msgstr "" -#: ../../source/how-to-configure-clients.rst:20 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:206 msgid "" -"Flower serializes these configuration dictionaries (or *config dict* for " -"short) to their ProtoBuf representation, transports them to the client " -"using gRPC, and then deserializes them back to Python dictionaries." +"You should see a ``state.db`` file in the ``state`` directory. If you " +"restart the service, the state file will be used to restore the state " +"from the previously saved data. This ensures that the data persists even " +"if the containers are stopped and started again." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:214 +msgid "Step 6: Run Flower with TLS" msgstr "" -#: ../../source/how-to-configure-clients.rst:24 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:216 msgid "" -"Currently, there is no support for directly sending collection types " -"(e.g., ``Set``, ``List``, ``Map``) as values in configuration " -"dictionaries. There are several workarounds to send collections as values" -" by converting them to one of the supported value types (and converting " -"them back on the client-side)." +"To demonstrate how to enable TLS, generate self-signed certificates using" +" the ``certs.yml`` Compose file." msgstr "" -#: ../../source/how-to-configure-clients.rst:26 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:223 msgid "" -"One can, for example, convert a list of floating-point numbers to a JSON " -"string, then send the JSON string using the configuration dictionary, and" -" then convert the JSON string back to a list of floating-point numbers on" -" the client." +"For production environments, use a service like `Let's Encrypt " +"`_ to obtain your certificates." msgstr "" -#: ../../source/how-to-configure-clients.rst:30 -msgid "Configuration through built-in strategies" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:241 +msgid "Restart the services with TLS enabled:" msgstr "" -#: ../../source/how-to-configure-clients.rst:32 -msgid "" -"The easiest way to send configuration values to clients is to use a " -"built-in strategy like :code:`FedAvg`. Built-in strategies support so-" -"called configuration functions. A configuration function is a function " -"that the built-in strategy calls to get the configuration dictionary for " -"the current round. It then forwards the configuration dictionary to all " -"the clients selected during that round." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:255 +msgid "Step 7: Add another SuperNode" msgstr "" -#: ../../source/how-to-configure-clients.rst:34 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:257 msgid "" -"Let's start with a simple example. Imagine we want to send (a) the batch " -"size that the client should use, (b) the current global round of " -"federated learning, and (c) the number of epochs to train on the client-" -"side. Our configuration function could look like this:" +"You can add more SuperNodes and ClientApps by duplicating their " +"definitions in the ``compose.yml`` file." msgstr "" -#: ../../source/how-to-configure-clients.rst:47 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:260 msgid "" -"To make the built-in strategies use this function, we can pass it to " -"``FedAvg`` during initialization using the parameter " -":code:`on_fit_config_fn`:" +"Just give each new SuperNode and ClientApp service a unique service name " +"like ``supernode-3``, ``clientapp-3``, etc." msgstr "" -#: ../../source/how-to-configure-clients.rst:56 -msgid "One the client side, we receive the configuration dictionary in ``fit``:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:263 +msgid "In ``compose.yml``, add the following:" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:265 +msgid "compose.yml" msgstr "" -#: ../../source/how-to-configure-clients.rst:67 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:316 msgid "" -"There is also an `on_evaluate_config_fn` to configure evaluation, which " -"works the same way. They are separate functions because one might want to" -" send different configuration values to `evaluate` (for example, to use a" -" different batch size)." +"If you also want to enable TLS for the new SuperNodes, duplicate the " +"SuperNode definition for each new SuperNode service in the ``with-" +"tls.yml`` file." msgstr "" -#: ../../source/how-to-configure-clients.rst:69 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:319 msgid "" -"The built-in strategies call this function every round (that is, every " -"time `Strategy.configure_fit` or `Strategy.configure_evaluate` runs). " -"Calling `on_evaluate_config_fn` every round allows us to vary/change the " -"config dict over consecutive rounds. If we wanted to implement a " -"hyperparameter schedule, for example, to increase the number of local " -"epochs during later rounds, we could do the following:" +"Make sure that the names of the services match with the one in the " +"``compose.yml`` file." msgstr "" -#: ../../source/how-to-configure-clients.rst:82 -msgid "The :code:`FedAvg` strategy will call this function *every round*." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:321 +msgid "In ``with-tls.yml``, add the following:" msgstr "" -#: ../../source/how-to-configure-clients.rst:85 -msgid "Configuring individual clients" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:323 +msgid "with-tls.yml" msgstr "" -#: ../../source/how-to-configure-clients.rst:87 -msgid "" -"In some cases, it is necessary to send different configuration values to " -"different clients." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:345 +msgid "Step 8: Persisting the SuperLink State and Enabling TLS" msgstr "" -#: ../../source/how-to-configure-clients.rst:89 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:347 msgid "" -"This can be achieved by customizing an existing strategy or by " -":doc:`implementing a custom strategy from scratch `. Here's a nonsensical example that customizes :code:`FedAvg`" -" by adding a custom ``\"hello\": \"world\"`` configuration key/value pair" -" to the config dict of a *single client* (only the first client in the " -"list, the other clients in this round to not receive this \"special\" " -"config value):" +"To run Flower with persisted SuperLink state and enabled TLS, a slight " +"change in the ``with-state.yml`` file is required:" msgstr "" -#: ../../source/how-to-configure-logging.rst:2 -msgid "Configure logging" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:350 +msgid "Comment out the lines 2-4 and uncomment the lines 5-9:" msgstr "" -#: ../../source/how-to-configure-logging.rst:4 -msgid "" -"The Flower logger keeps track of all core events that take place in " -"federated learning workloads. It presents information by default " -"following a standard message format:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:352 +msgid "with-state.yml" msgstr "" -#: ../../source/how-to-configure-logging.rst:13 -msgid "" -"containing relevant information including: log message level (e.g. " -":code:`INFO`, :code:`DEBUG`), a timestamp, the line where the logging " -"took place from, as well as the log message itself. In this way, the " -"logger would typically display information on your terminal as follows:" -msgstr "" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:369 +#, fuzzy +msgid "Restart the services:" +msgstr "이미 *서버*를 시작할 수 있습니다:" -#: ../../source/how-to-configure-logging.rst:34 -msgid "Saving log to file" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:383 +msgid "Step 9: Merge Multiple Compose Files" msgstr "" -#: ../../source/how-to-configure-logging.rst:36 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:385 msgid "" -"By default, the Flower log is outputted to the terminal where you launch " -"your Federated Learning workload from. This applies for both gRPC-based " -"federation (i.e. when you do :code:`fl.server.start_server`) and when " -"using the :code:`VirtualClientEngine` (i.e. when you do " -":code:`fl.simulation.start_simulation`). In some situations you might " -"want to save this log to disk. You can do so by calling the " -"`fl.common.logger.configure() " -"`_" -" function. For example:" +"You can merge multiple Compose files into a single file. For instance, if" +" you wish to combine the basic configuration with the TLS configuration, " +"execute the following command:" msgstr "" -#: ../../source/how-to-configure-logging.rst:53 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:394 msgid "" -"With the above, Flower will record the log you see on your terminal to " -":code:`log.txt`. This file will be created in the same directory as were " -"you are running the code from. If we inspect we see the log above is also" -" recorded but prefixing with :code:`identifier` each line:" +"This will merge the contents of ``compose.yml`` and ``with-tls.yml`` into" +" a new file called ``my_compose.yml``." msgstr "" -#: ../../source/how-to-configure-logging.rst:74 -msgid "Log your own messages" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:398 +msgid "Step 10: Clean Up" msgstr "" -#: ../../source/how-to-configure-logging.rst:76 -msgid "" -"You might expand the information shown by default with the Flower logger " -"by adding more messages relevant to your application. You can achieve " -"this easily as follows." -msgstr "" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:400 +#, fuzzy +msgid "Remove all services and volumes:" +msgstr "R에서 모든 항목을 제거합니다." -#: ../../source/how-to-configure-logging.rst:102 -msgid "" -"In this way your logger will show, in addition to the default messages, " -"the ones introduced by the clients as specified above." -msgstr "" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:410 +#, fuzzy +msgid ":doc:`run-quickstart-examples-docker-compose`" +msgstr "빠른 시작 튜토리얼" -#: ../../source/how-to-configure-logging.rst:128 -msgid "Log to a remote service" -msgstr "" +#: ../../source/docker/use-a-different-version.rst:2 +#, fuzzy +msgid "Use a Different Flower Version" +msgstr "다른 Flower 버전 사용" -#: ../../source/how-to-configure-logging.rst:130 +#: ../../source/docker/use-a-different-version.rst:4 msgid "" -"The :code:`fl.common.logger.configure` function, also allows specifying a" -" host to which logs can be pushed (via :code:`POST`) through a native " -"Python :code:`logging.handler.HTTPHandler`. This is a particularly useful" -" feature in :code:`gRPC`-based Federated Learning workloads where " -"otherwise gathering logs from all entities (i.e. the server and the " -"clients) might be cumbersome. Note that in Flower simulation, the server " -"automatically displays all logs. You can still specify a " -":code:`HTTPHandler` should you wish to backup or analyze the logs " -"somewhere else." -msgstr "" - -#: ../../source/how-to-enable-ssl-connections.rst:2 -msgid "Enable SSL connections" +"If you want to use a different version of Flower, for example Flower " +"nightly, you can do so by changing the tag. All available versions are on" +" `Docker Hub `__." msgstr "" +"다른 버전의 Flower를 사용하려면 태그를 변경하여 사용할 수 있습니다(예: Flower nightly). 사용 가능한 모든 " +"버전은 `Docker Hub `__에 있습니다." -#: ../../source/how-to-enable-ssl-connections.rst:4 +#: ../../source/docker/use-a-different-version.rst:10 +#, fuzzy msgid "" -"This guide describes how to a SSL-enabled secure Flower server " -"(:code:`SuperLink`) can be started and how a Flower client " -"(:code:`SuperNode`) can establish a secure connections to it." +"When using Flower nightly, the SuperLink nightly image must be paired " +"with the corresponding SuperNode and ServerApp nightly images released on" +" the same day. To ensure the versions are in sync, using the concrete " +"tag, e.g., ``1.10.0.dev20240610`` instead of ``nightly`` is recommended." msgstr "" +"SuperNode Docker 이미지는 현재 1.9.0 야간 릴리스에서만 작동합니다. 안정 버전은 Flower 1.9.0(안정)이 " +"출시되면 사용할 수 있습니다(예상 출시일: 5월). SuperNode 야간 이미지는 같은 날 릴리스된 해당 SuperLink 및 " +"서버앱 야간 이미지와 페어링되어야 합니다. 버전이 동기화되도록 하려면 ``nightly`` 대신 " +"``1.9.0.dev20240501``과 같은 구체적인 태그를 사용하는 것이 좋습니다." -#: ../../source/how-to-enable-ssl-connections.rst:7 -msgid "" -"A complete code example demonstrating a secure connection can be found " -"`here `_." -msgstr "" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:2 +msgid "Example: FedBN in PyTorch - From Centralized To Federated" +msgstr "예시: PyTorch에서 FedBN - 중앙 집중식에서 연합식으로" -#: ../../source/how-to-enable-ssl-connections.rst:10 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:4 msgid "" -"The code example comes with a :code:`README.md` file which explains how " -"to start it. Although it is already SSL-enabled, it might be less " -"descriptive on how it does so. Stick to this guide for a deeper " -"introduction to the topic." -msgstr "" - -#: ../../source/how-to-enable-ssl-connections.rst:16 -msgid "Certificates" +"This tutorial will show you how to use Flower to build a federated " +"version of an existing machine learning workload with `FedBN " +"`_, a federated training strategy " +"designed for non-iid data. We are using PyTorch to train a Convolutional " +"Neural Network(with Batch Normalization layers) on the CIFAR-10 dataset. " +"When applying FedBN, only few changes needed compared to :doc:`Example: " +"PyTorch - From Centralized To Federated `." msgstr "" +"이 튜토리얼에서는 non-iid data를 위해 설계된 federated 훈련 전략인 `FedBN " +"`_으로 기존 머신러닝 워크로드의 federated 버전을 구축하기 " +"위해 Flower를 사용하는 방법을 보여드립니다. 우리는 PyTorch를 사용하여 CIFAR-10 데이터 세트에서 컨볼루션 " +"신경망(일괄 정규화 레이어 포함)을 훈련하고 있습니다. FedBN을 적용할 때, :doc:`예제: 파이토치 -중앙 집중식에서 " +"연합식으로 ` 와 비교했을 때 몇 가지 사항만 " +"변경 하면 됩니다." -#: ../../source/how-to-enable-ssl-connections.rst:18 -msgid "" -"Using SSL-enabled connections requires certificates to be passed to the " -"server and client. For the purpose of this guide we are going to generate" -" self-signed certificates. As this can become quite complex we are going " -"to ask you to run the script in :code:`examples/advanced-" -"tensorflow/certificates/generate.sh` with the following command sequence:" -msgstr "" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:12 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:12 +msgid "Centralized Training" +msgstr "중앙 집중식 훈련" -#: ../../source/how-to-enable-ssl-connections.rst:29 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:14 +#, fuzzy msgid "" -"This will generate the certificates in :code:`examples/advanced-" -"tensorflow/.cache/certificates`." +"All files are revised based on :doc:`Example: PyTorch - From Centralized " +"To Federated `. The only " +"thing to do is modifying the file called ``cifar.py``, revised part is " +"shown below:" msgstr "" +"모든 파일은 :doc:`예제: 파이토치 -중앙 집중식에서 연합식으로 `를 기반으로 수정합니다. :code:`cifar.py`라는 파일을 수정하기만 하면 되며, 수정된 부분은 " +"아래와 같습니다:" -#: ../../source/how-to-enable-ssl-connections.rst:31 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:18 msgid "" -"The approach for generating SSL certificates in the context of this " -"example can serve as an inspiration and starting point, but it should not" -" be used as a reference for production environments. Please refer to " -"other sources regarding the issue of correctly generating certificates " -"for production environments. For non-critical prototyping or research " -"projects, it might be sufficient to use the self-signed certificates " -"generated using the scripts mentioned in this guide." -msgstr "" - -#: ../../source/how-to-enable-ssl-connections.rst:39 -msgid "Server (SuperLink)" -msgstr "" +"The model architecture defined in class Net() is added with Batch " +"Normalization layers accordingly." +msgstr "Net() 클래스에 정의된 모델 아키텍처는 그에 따라 배치 정규화 레이어가 추가됩니다." -#: ../../source/how-to-enable-ssl-connections.rst:41 -msgid "" -"Use the following terminal command to start a sever (SuperLink) that uses" -" the previously generated certificates:" -msgstr "" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:47 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:171 +msgid "You can now run your machine learning workload:" +msgstr "이제 머신 러닝 워크로드를 실행할 수 있습니다:" -#: ../../source/how-to-enable-ssl-connections.rst:47 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:53 msgid "" -"When providing certificates, the server expects a tuple of three " -"certificates paths: CA certificate, server certificate and server private" -" key." +"So far this should all look fairly familiar if you've used PyTorch " +"before. Let's take the next step and use what we've built to create a " +"federated learning system within FedBN, the system consists of one server" +" and two clients." msgstr "" +"지금까지는 파이토치를 사용해 본 적이 있다면 상당히 익숙하게 보일 것입니다. 다음 단계로 넘어가서 우리가 구축한 것을 사용하여 " +"FedBN 내에서 하나의 서버와 두 개의 클라이언트로 구성된 연합학습 시스템을 만들어 보겠습니다." -#: ../../source/how-to-enable-ssl-connections.rst:51 -msgid "Client (SuperNode)" -msgstr "" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:58 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:182 +msgid "Federated Training" +msgstr "연합 훈련" -#: ../../source/how-to-enable-ssl-connections.rst:53 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:60 +#, fuzzy msgid "" -"Use the following terminal command to start a client (SuperNode) that " -"uses the previously generated certificates:" +"If you have read :doc:`Example: PyTorch - From Centralized To Federated " +"`, the following parts are" +" easy to follow, only ``get_parameters`` and ``set_parameters`` function " +"in ``client.py`` needed to revise. If not, please read the :doc:`Example:" +" PyTorch - From Centralized To Federated `. first." msgstr "" +":doc:`예제: 파이토치 - 중앙 집중식에서 연합식으로 `를 읽었다면, 다음 부분은 쉽게 따라할 수 있으며 :code:`client.py`의 " +":code:`get_parameters`와 :code:`set_parameters` 함수만 수정해야 합니다. 그렇지 않은 경우 " +":doc:`예제: 파이토치 - 중앙 집중식에서 연합식으로 `를 먼저 읽어보세요." -#: ../../source/how-to-enable-ssl-connections.rst:61 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:66 +#, fuzzy msgid "" -"When setting :code:`root_certificates`, the client expects a file path to" -" PEM-encoded root certificates." +"Our example consists of one *server* and two *clients*. In FedBN, " +"``server.py`` keeps unchanged, we can start the server directly." msgstr "" +"이 예제는 하나의 *서버*와 두 개의 *클라이언트*로 구성됩니다. FedBN에서 :code:`server.py`는 변경되지 않고 " +"그대로 유지되므로 서버를 바로 시작할 수 있습니다." -#: ../../source/how-to-enable-ssl-connections.rst:67 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:73 +#, fuzzy msgid "" -"You should now have learned how to generate self-signed certificates " -"using the given script, start an SSL-enabled server and have a client " -"establish a secure connection to it." +"Finally, we will revise our *client* logic by changing ``get_parameters``" +" and ``set_parameters`` in ``client.py``, we will exclude batch " +"normalization parameters from model parameter list when sending to or " +"receiving from the server." msgstr "" +"마지막으로, :code:`client.py`에서 :code:`get_parameters` 및 " +":code:`set_parameters`를 변경하여 *client* 로직을 수정할 것입니다. 서버로 보내거나 서버에서 받을 때 모델" +" 파라미터 목록에서 배치 정규화 파라미터를 제외할 수 있습니다." -#: ../../source/how-to-enable-ssl-connections.rst:72 -msgid "Additional resources" -msgstr "" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:102 +msgid "Now, you can now open two additional terminal windows and run" +msgstr "이제 두 개의 터미널 창을 추가로 열고 다음을 실행할 수 있습니다" -#: ../../source/how-to-enable-ssl-connections.rst:74 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:108 msgid "" -"These additional sources might be relevant if you would like to dive " -"deeper into the topic of certificates:" -msgstr "" - -#: ../../source/how-to-enable-ssl-connections.rst:76 -msgid "`Let's Encrypt `_" -msgstr "" - -#: ../../source/how-to-enable-ssl-connections.rst:77 -msgid "`certbot `_" +"in each window (make sure that the server is still running before you do " +"so) and see your (previously centralized) PyTorch project run federated " +"learning with FedBN strategy across two clients. Congratulations!" msgstr "" +"를 입력하고(클릭하기 전에 서버가 계속 실행 중인지 확인하세요), (이전에 중앙 집중된) PyTorch 프로젝트가 두 클라이언트에서" +" FedBN으로 연합 학습을 실행하는 것을 확인합니다. 축하합니다!" -#: ../../source/how-to-implement-strategies.rst:2 -msgid "Implement strategies" -msgstr "" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:113 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:349 +#: ../../source/tutorial-quickstart-jax.rst:319 +msgid "Next Steps" +msgstr "다음 단계" -#: ../../source/how-to-implement-strategies.rst:4 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:115 msgid "" -"The strategy abstraction enables implementation of fully custom " -"strategies. A strategy is basically the federated learning algorithm that" -" runs on the server. Strategies decide how to sample clients, how to " -"configure clients for training, how to aggregate updates, and how to " -"evaluate models. Flower provides a few built-in strategies which are " -"based on the same API described below." +"The full source code for this example can be found `here " +"`_. Our example is of course somewhat over-" +"simplified because both clients load the exact same dataset, which isn't " +"realistic. You're now prepared to explore this topic further. How about " +"using different subsets of CIFAR-10 on each client? How about adding more" +" clients?" msgstr "" +"이 예제의 전체 소스 코드는 '여기 `_'에서 확인할 수 있습니다. 물론 이 예제는 두 " +"클라이언트가 완전히 동일한 데이터 세트를 로드하기 때문에 다소 지나치게 단순화되어 있으며, 이는 현실적이지 않습니다. 이제 이 " +"주제를 더 자세히 살펴볼 준비가 되셨습니다. 각 클라이언트에서 서로 다른 CIFAR-10의 하위 집합을 사용해 보는 것은 어떨까요?" +" 클라이언트를 더 추가하는 것은 어떨까요?" -#: ../../source/how-to-implement-strategies.rst:11 -msgid "The :code:`Strategy` abstraction" -msgstr "" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:2 +msgid "Example: PyTorch - From Centralized To Federated" +msgstr "예제: 파이토치 - 중앙 집중식에서 연합식으로" -#: ../../source/how-to-implement-strategies.rst:13 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:4 msgid "" -"All strategy implementation are derived from the abstract base class " -":code:`flwr.server.strategy.Strategy`, both built-in implementations and " -"third party implementations. This means that custom strategy " -"implementations have the exact same capabilities at their disposal as " -"built-in ones." +"This tutorial will show you how to use Flower to build a federated " +"version of an existing machine learning workload. We are using PyTorch to" +" train a Convolutional Neural Network on the CIFAR-10 dataset. First, we " +"introduce this machine learning task with a centralized training approach" +" based on the `Deep Learning with PyTorch " +"`_ " +"tutorial. Then, we build upon the centralized training code to run the " +"training in a federated fashion." msgstr "" +"이 튜토리얼에서는 Flower를 사용해 기존 머신 러닝 워크로드의 연합 버전을 구축하는 방법을 보여드립니다. 여기서는 " +"PyTorch를 사용해 CIFAR-10 데이터 세트에서 컨볼루션 신경망을 훈련합니다. 먼저, 'PyTorch로 딥 러닝 " +"`_ " +"튜토리얼을 기반으로 centralized 학습 접근 방식을 사용하여 이 머신 러닝 작업을 소개합니다. 그런 다음 " +"centralized 훈련 코드를 기반으로 연합 방식 훈련을 실행합니다." -#: ../../source/how-to-implement-strategies.rst:18 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:14 msgid "" -"The strategy abstraction defines a few abstract methods that need to be " -"implemented:" +"We begin with a brief description of the centralized CNN training code. " +"If you want a more in-depth explanation of what's going on then have a " +"look at the official `PyTorch tutorial " +"`_." msgstr "" +"중앙 집중식 CNN 트레이닝 코드에 대한 간략한 설명부터 시작하겠습니다. 무슨 일이 일어나고 있는지 더 자세히 설명하려면 공식 " +"`PyTorch 튜토리얼 " +"`_을 " +"참조하세요." -#: ../../source/how-to-implement-strategies.rst:75 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:18 +#, fuzzy msgid "" -"Creating a new strategy means implementing a new :code:`class` (derived " -"from the abstract base class :code:`Strategy`) that implements for the " -"previously shown abstract methods:" -msgstr "" - -#: ../../source/how-to-implement-strategies.rst:100 -msgid "The Flower server calls these methods in the following order:" -msgstr "" - -#: ../../source/how-to-implement-strategies.rst:177 -msgid "The following sections describe each of those methods in more detail." -msgstr "" - -#: ../../source/how-to-implement-strategies.rst:180 -msgid "The :code:`initialize_parameters` method" +"Let's create a new file called ``cifar.py`` with all the components " +"required for a traditional (centralized) training on CIFAR-10. First, all" +" required packages (such as ``torch`` and ``torchvision``) need to be " +"imported. You can see that we do not import any package for federated " +"learning. You can keep all these imports as they are even when we add the" +" federated learning components at a later point." msgstr "" +"CIFAR-10에 대한 기존 (중앙 집중식) 교육에 필요한 모든 구성 요소가 포함된 :code:`cifar.py`라는 새 파일을 " +"생성해 보겠습니다. 먼저, 필요한 모든 패키지(예: :code:`torch` 및 :code:`torchvision`)를 가져와야 " +"합니다. 연합 학습을 위한 패키지를 가져오지 않는 것을 확인 할 수 있습니. 나중에 연합 학습 구성 요소를 추가할 때에도 이러한 " +"모든 가져오기를 그대로 유지할 수 있습니다." -#: ../../source/how-to-implement-strategies.rst:182 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:36 +#, fuzzy msgid "" -":code:`initialize_parameters` is called only once, at the very beginning " -"of an execution. It is responsible for providing the initial global model" -" parameters in a serialized form (i.e., as a :code:`Parameters` object)." +"As already mentioned we will use the CIFAR-10 dataset for this machine " +"learning workload. The model architecture (a very simple Convolutional " +"Neural Network) is defined in ``class Net()``." msgstr "" +"이미 언급했듯이 이 머신 러닝 워크로드에는 CIFAR-10 데이터 세트를 사용합니다. 모델 아키텍처(매우 간단한 컨볼루션 신경망)는" +" :code:`class Net()`에 정의되어 있습니다." -#: ../../source/how-to-implement-strategies.rst:184 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:62 +#, fuzzy msgid "" -"Built-in strategies return user-provided initial parameters. The " -"following example shows how initial parameters can be passed to " -":code:`FedAvg`:" +"The ``load_data()`` function loads the CIFAR-10 training and test sets. " +"The ``transform`` normalized the data after loading." msgstr "" +":code:`load_data()` 함수는 CIFAR-10 훈련 및 테스트 세트를 로드합니다. :code:`transform`은 " +"로드 후 데이터를 정규화합니다." -#: ../../source/how-to-implement-strategies.rst:209 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:84 +#, fuzzy msgid "" -"The Flower server will call :code:`initialize_parameters`, which either " -"returns the parameters that were passed to :code:`initial_parameters`, or" -" :code:`None`. If no parameters are returned from " -":code:`initialize_parameters` (i.e., :code:`None`), the server will " -"randomly select one client and ask it to provide its parameters. This is " -"a convenience feature and not recommended in practice, but it can be " -"useful for prototyping. In practice, it is recommended to always use " -"server-side parameter initialization." +"We now need to define the training (function ``train()``) which loops " +"over the training set, measures the loss, backpropagates it, and then " +"takes one optimizer step for each batch of training examples." msgstr "" +"이제 학습 집합을 반복하고, 손실을 측정하고, 이를 역전파한 다음 각 학습 예제 배치에 대해 하나의 최적화 단계를 수행하는 " +"학습(함수 :code:`train()`)을 정의해야 합니다." -#: ../../source/how-to-implement-strategies.rst:213 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:88 +#, fuzzy msgid "" -"Server-side parameter initialization is a powerful mechanism. It can be " -"used, for example, to resume training from a previously saved checkpoint." -" It is also the fundamental capability needed to implement hybrid " -"approaches, for example, to fine-tune a pre-trained model using federated" -" learning." -msgstr "" - -#: ../../source/how-to-implement-strategies.rst:216 -msgid "The :code:`configure_fit` method" +"The evaluation of the model is defined in the function ``test()``. The " +"function loops over all test samples and measures the loss of the model " +"based on the test dataset." msgstr "" +"모델 평가는 :code:`test()` 함수에 정의되어 있습니다. 이 함수는 모든 테스트 샘플을 반복하고 테스트 데이터 세트에 따라" +" 모델의 손실을 측정합니다." -#: ../../source/how-to-implement-strategies.rst:218 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:149 msgid "" -":code:`configure_fit` is responsible for configuring the upcoming round " -"of training. What does *configure* mean in this context? Configuring a " -"round means selecting clients and deciding what instructions to send to " -"these clients. The signature of :code:`configure_fit` makes this clear:" -msgstr "" +"Having defined the data loading, model architecture, training, and " +"evaluation we can put everything together and train our CNN on CIFAR-10." +msgstr "데이터 로딩, 모델 아키텍처, 훈련 및 평가를 정의했으면 모든 것을 종합하여 CIFAR-10에서 CNN을 훈련할 수 있습니다." -#: ../../source/how-to-implement-strategies.rst:231 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:177 msgid "" -"The return value is a list of tuples, each representing the instructions " -"that will be sent to a particular client. Strategy implementations " -"usually perform the following steps in :code:`configure_fit`:" +"So far, this should all look fairly familiar if you've used PyTorch " +"before. Let's take the next step and use what we've built to create a " +"simple federated learning system consisting of one server and two " +"clients." msgstr "" +"지금까지는 파이토치를 사용해 본 적이 있다면 상당히 익숙하게 보일 것입니다. 다음 단계로 넘어가서 구축한 것을 사용하여 하나의 " +"서버와 두 개의 클라이언트로 구성된 간단한 연합 학습 시스템을 만들어 보겠습니다." -#: ../../source/how-to-implement-strategies.rst:233 -#: ../../source/how-to-implement-strategies.rst:280 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:184 msgid "" -"Use the :code:`client_manager` to randomly sample all (or a subset of) " -"available clients (each represented as a :code:`ClientProxy` object)" +"The simple machine learning project discussed in the previous section " +"trains the model on a single dataset (CIFAR-10), we call this centralized" +" learning. This concept of centralized learning, as shown in the previous" +" section, is probably known to most of you, and many of you have used it " +"previously. Normally, if you'd want to run machine learning workloads in " +"a federated fashion, then you'd have to change most of your code and set " +"everything up from scratch. This can be a considerable effort." msgstr "" +"이전 섹션에서 설명한 간단한 머신 러닝 프로젝트는 단일 데이터 세트(CIFAR-10)로 모델을 학습시키는데, 이를 중앙 집중식 " +"학습이라고 부릅니다. 이전 섹션에서 설명한 중앙 집중식 학습의 개념은 대부분 알고 계실 것이며, 많은 분들이 이전에 사용해 보셨을 " +"것입니다. 일반적으로 머신 러닝 워크로드를 연합 방식으로 실행하려면 대부분의 코드를 변경하고 모든 것을 처음부터 다시 설정해야 " +"합니다. 이는 상당한 노력이 필요할 수 있습니다." -#: ../../source/how-to-implement-strategies.rst:234 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:191 msgid "" -"Pair each :code:`ClientProxy` with the same :code:`FitIns` holding the " -"current global model :code:`parameters` and :code:`config` dict" -msgstr "" +"However, with Flower you can evolve your pre-existing code into a " +"federated learning setup without the need for a major rewrite." +msgstr "하지만 Flower를 사용하면 대대적인 재작성 없이도 기존 코드를 연합 학습 설정으로 발전시킬 수 있습니다." -#: ../../source/how-to-implement-strategies.rst:236 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:194 +#, fuzzy msgid "" -"More sophisticated implementations can use :code:`configure_fit` to " -"implement custom client selection logic. A client will only participate " -"in a round if the corresponding :code:`ClientProxy` is included in the " -"list returned from :code:`configure_fit`." +"The concept is easy to understand. We have to start a *server* and then " +"use the code in ``cifar.py`` for the *clients* that are connected to the " +"*server*. The *server* sends model parameters to the clients. The " +"*clients* run the training and update the parameters. The updated " +"parameters are sent back to the *server* which averages all received " +"parameter updates. This describes one round of the federated learning " +"process and we repeat this for multiple rounds." msgstr "" +"개념은 이해하기 쉽습니다. *서버*를 시작한 다음 *서버*에 연결된 *클라이언트*에 대해 :code:`cifar.py`의 코드를 " +"사용해야 합니다. *서버*는 모델 파라미터를 클라이언트로 전송합니다. *클라이언트*는 학습을 실행하고 파라미터를 업데이트합니다. " +"업데이트된 파라미터는 *서버*로 다시 전송되며, *서버*는 수신된 모든 파라미터 업데이트의 평균을 구합니다. 이것은 연합 학습 " +"프로세스의 한 라운드를 설명하며 여러 라운드에 걸쳐 이 과정을 반복합니다." -#: ../../source/how-to-implement-strategies.rst:240 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:201 +#: ../../source/tutorial-quickstart-jax.rst:147 +#, fuzzy msgid "" -"The structure of this return value provides a lot of flexibility to the " -"user. Since instructions are defined on a per-client basis, different " -"instructions can be sent to each client. This enables custom strategies " -"to train, for example, different models on different clients, or use " -"different hyperparameters on different clients (via the :code:`config` " -"dict)." +"Our example consists of one *server* and two *clients*. Let's set up " +"``server.py`` first. The *server* needs to import the Flower package " +"``flwr``. Next, we use the ``start_server`` function to start a server " +"and tell it to perform three rounds of federated learning." msgstr "" +"이 예제는 하나의 *서버*와 두 개의 *클라이언트*로 구성됩니다. 먼저 :code:`server.py`를 설정해 보겠습니다. " +"*server*는 Flower 패키지 :code:`flwr`를 가져와야 합니다. 다음으로, :code:`start_server` " +"함수를 사용하여 서버를 시작하고 세 차례의 연합 학습을 수행하도록 지시합니다." -#: ../../source/how-to-implement-strategies.rst:243 -msgid "The :code:`aggregate_fit` method" -msgstr "" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:215 +#: ../../source/tutorial-quickstart-jax.rst:161 +msgid "We can already start the *server*:" +msgstr "이미 *서버*를 시작할 수 있습니다:" -#: ../../source/how-to-implement-strategies.rst:245 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:221 +#, fuzzy msgid "" -":code:`aggregate_fit` is responsible for aggregating the results returned" -" by the clients that were selected and asked to train in " -":code:`configure_fit`." +"Finally, we will define our *client* logic in ``client.py`` and build " +"upon the previously defined centralized training in ``cifar.py``. Our " +"*client* needs to import ``flwr``, but also ``torch`` to update the " +"parameters on our PyTorch model:" msgstr "" +"마지막으로, :code:`client.py`에서 *client* 로직을 정의하고 :code:`cifar.py`에서 이전에 정의한 " +"중앙 집중식 학습을 기반으로 구축합니다. *클라이언트*는 :code:`flwr`을 가져와야 하며, PyTorch 모델의 파라미터를 " +"업데이트하기 위해 :code:`torch`도 가져와야 합니다:" -#: ../../source/how-to-implement-strategies.rst:258 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:238 +#, fuzzy msgid "" -"Of course, failures can happen, so there is no guarantee that the server " -"will get results from all the clients it sent instructions to (via " -":code:`configure_fit`). :code:`aggregate_fit` therefore receives a list " -"of :code:`results`, but also a list of :code:`failures`." -msgstr "" +"Implementing a Flower *client* basically means implementing a subclass of" +" either ``flwr.client.Client`` or ``flwr.client.NumPyClient``. Our " +"implementation will be based on ``flwr.client.NumPyClient`` and we'll " +"call it ``CifarClient``. ``NumPyClient`` is slightly easier to implement " +"than ``Client`` if you use a framework with good NumPy interoperability " +"(like PyTorch or TensorFlow/Keras) because it avoids some of the " +"boilerplate that would otherwise be necessary. ``CifarClient`` needs to " +"implement four methods, two methods for getting/setting model parameters," +" one method for training the model, and one method for testing the model:" +msgstr "" +"Flower *클라이언트*를 구현한다는 것은 기본적으로 :code:`flwr.client.Client` 또는 " +":code:`flwr.client.NumPyClient`의 서브클래스를 구현하는 것을 의미합니다. 우리의 구현은 " +":code:`flwr.client.NumPyClient`를 기반으로 하며, 이를 :code:`CifarClient`라고 부를 " +"것입니다. :code:`NumPyClient`는 파이토치나 텐서플로우/Keras처럼 NumPy 상호운용성이 좋은 프레임워크를 " +"사용하는 경우 필요한 일부 보일러플레이트를 피하기 때문에 :code:`Client`보다 구현하기가 조금 더 쉽습니다. " +"code:`CifarClient`는 모델 파라미터를 가져오거나 설정하는 메서드 2개, 모델 학습을 위한 메서드 1개, 모델 테스트를" +" 위한 메서드 1개 등 네 가지 메서드를 구현해야 합니다:" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:249 +#, fuzzy +msgid "``set_parameters``" +msgstr ":code:`set_parameters`" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:248 +#: ../../source/tutorial-quickstart-jax.rst:192 +msgid "" +"set the model parameters on the local model that are received from the " +"server" +msgstr "서버에서 수신한 로컬 모델의 모델 파라미터를 설정합니다" -#: ../../source/how-to-implement-strategies.rst:260 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:249 +#: ../../source/tutorial-quickstart-jax.rst:194 +#, fuzzy msgid "" -":code:`aggregate_fit` returns an optional :code:`Parameters` object and a" -" dictionary of aggregated metrics. The :code:`Parameters` return value is" -" optional because :code:`aggregate_fit` might decide that the results " -"provided are not sufficient for aggregation (e.g., too many failures)." -msgstr "" +"loop over the list of model parameters received as NumPy ``ndarray``'s " +"(think list of neural network layers)" +msgstr "(신경망 레이어 목록으로 생각하면 됩니다) NumPy :code:`ndarray`로 받은 모델 파라미터 목록에 대해 반복합니다" -#: ../../source/how-to-implement-strategies.rst:263 -msgid "The :code:`configure_evaluate` method" -msgstr "" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:252 +#: ../../source/tutorial-quickstart-jax.rst:197 +#: ../../source/tutorial-quickstart-scikitlearn.rst:129 +#, fuzzy +msgid "``get_parameters``" +msgstr ":code:`get_parameters`" -#: ../../source/how-to-implement-strategies.rst:265 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:252 +#: ../../source/tutorial-quickstart-jax.rst:197 +#, fuzzy msgid "" -":code:`configure_evaluate` is responsible for configuring the upcoming " -"round of evaluation. What does *configure* mean in this context? " -"Configuring a round means selecting clients and deciding what " -"instructions to send to these clients. The signature of " -":code:`configure_evaluate` makes this clear:" +"get the model parameters and return them as a list of NumPy ``ndarray``'s" +" (which is what ``flwr.client.NumPyClient`` expects)" msgstr "" +"모델 매개변수를 가져와서 NumPy :code:`ndarray`의 목록으로 반환합니다(이는 " +":code:`flwr.client.NumPyClient`가 기대하는 바와 같습니다)" -#: ../../source/how-to-implement-strategies.rst:278 -msgid "" -"The return value is a list of tuples, each representing the instructions " -"that will be sent to a particular client. Strategy implementations " -"usually perform the following steps in :code:`configure_evaluate`:" -msgstr "" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:257 +#: ../../source/tutorial-quickstart-jax.rst:202 +#: ../../source/tutorial-quickstart-scikitlearn.rst:136 +#, fuzzy +msgid "``fit``" +msgstr "``DISTRO``" -#: ../../source/how-to-implement-strategies.rst:281 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:255 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:260 +#: ../../source/tutorial-quickstart-jax.rst:200 +#: ../../source/tutorial-quickstart-jax.rst:205 msgid "" -"Pair each :code:`ClientProxy` with the same :code:`EvaluateIns` holding " -"the current global model :code:`parameters` and :code:`config` dict" -msgstr "" +"update the parameters of the local model with the parameters received " +"from the server" +msgstr "서버에서 받은 파라미터로 로컬 모델의 파라미터를 업데이트합니다" -#: ../../source/how-to-implement-strategies.rst:283 -msgid "" -"More sophisticated implementations can use :code:`configure_evaluate` to " -"implement custom client selection logic. A client will only participate " -"in a round if the corresponding :code:`ClientProxy` is included in the " -"list returned from :code:`configure_evaluate`." -msgstr "" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:257 +#: ../../source/tutorial-quickstart-jax.rst:202 +msgid "train the model on the local training set" +msgstr "로컬 훈련 세트에서 모델을 훈련합니다" -#: ../../source/how-to-implement-strategies.rst:287 -msgid "" -"The structure of this return value provides a lot of flexibility to the " -"user. Since instructions are defined on a per-client basis, different " -"instructions can be sent to each client. This enables custom strategies " -"to evaluate, for example, different models on different clients, or use " -"different hyperparameters on different clients (via the :code:`config` " -"dict)." -msgstr "" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:258 +msgid "get the updated local model weights and return them to the server" +msgstr "업데이트된 로컬 모델 가중치를 가져와 서버로 반환합니다" -#: ../../source/how-to-implement-strategies.rst:291 -msgid "The :code:`aggregate_evaluate` method" -msgstr "" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:263 +#: ../../source/tutorial-quickstart-jax.rst:208 +#: ../../source/tutorial-quickstart-scikitlearn.rst:139 +#, fuzzy +msgid "``evaluate``" +msgstr ":code:`evaluate`" -#: ../../source/how-to-implement-strategies.rst:293 -msgid "" -":code:`aggregate_evaluate` is responsible for aggregating the results " -"returned by the clients that were selected and asked to evaluate in " -":code:`configure_evaluate`." -msgstr "" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:262 +#: ../../source/tutorial-quickstart-jax.rst:207 +msgid "evaluate the updated model on the local test set" +msgstr "로컬 테스트 세트에서 업데이트된 모델을 평가합니다" -#: ../../source/how-to-implement-strategies.rst:306 -msgid "" -"Of course, failures can happen, so there is no guarantee that the server " -"will get results from all the clients it sent instructions to (via " -":code:`configure_evaluate`). :code:`aggregate_evaluate` therefore " -"receives a list of :code:`results`, but also a list of :code:`failures`." -msgstr "" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:263 +msgid "return the local loss and accuracy to the server" +msgstr "로컬 손실 및 정확도를 서버에 반환합니다" -#: ../../source/how-to-implement-strategies.rst:308 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:265 +#, fuzzy msgid "" -":code:`aggregate_evaluate` returns an optional :code:`float` (loss) and a" -" dictionary of aggregated metrics. The :code:`float` return value is " -"optional because :code:`aggregate_evaluate` might decide that the results" -" provided are not sufficient for aggregation (e.g., too many failures)." +"The two ``NumPyClient`` methods ``fit`` and ``evaluate`` make use of the " +"functions ``train()`` and ``test()`` previously defined in ``cifar.py``. " +"So what we really do here is we tell Flower through our ``NumPyClient`` " +"subclass which of our already defined functions to call for training and " +"evaluation. We included type annotations to give you a better " +"understanding of the data types that get passed around." msgstr "" +"두 개의 :code:`NumPyClient` 메서드인 :code:`fit`과 :code:`evaluate`는 이전에 " +":code:`cifar.py`에 정의된 함수인 :code:`train()`과 :code:`test()`를 활용합니다. 따라서 여기서" +" 실제로 하는 일은 :code:`NumPyClient` 서브클래스를 통해 이미 정의된 함수 중 훈련과 평가를 위해 호출할 함수를 " +"Flower에 알려주는 것입니다. 전달되는 데이터 유형을 더 잘 이해할 수 있도록 type annotations을 포함했습니다." -#: ../../source/how-to-implement-strategies.rst:311 -msgid "The :code:`evaluate` method" -msgstr "" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:315 +#, fuzzy +msgid "" +"All that's left to do it to define a function that loads both model and " +"data, creates a ``CifarClient``, and starts this client. You load your " +"data and model by using ``cifar.py``. Start ``CifarClient`` with the " +"function ``fl.client.start_client()`` by pointing it at the same IP " +"address we used in ``server.py``:" +msgstr "" +"이제 모델과 데이터를 모두 로드하는 함수를 정의하고, :code:`CifarClient`를 생성하고, 이 클라이언트를 시작하는 " +"작업만 남았습니다. 코드:`cifar.py`를 사용하여 데이터와 모델을 로드합니다. :code:`server.py`에서 사용한 것과" +" 동일한 IP 주소를 지정하여 :code:`fl.client.start_client()` 함수로 " +":code:`CifarClient`를 시작합니다:" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:338 +#: ../../source/tutorial-quickstart-jax.rst:309 +msgid "And that's it. You can now open two additional terminal windows and run" +msgstr "여기까지입니다. 이제 두 개의 터미널 창을 추가로 열고 다음을 실행할 수 있습니다" -#: ../../source/how-to-implement-strategies.rst:313 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:344 msgid "" -":code:`evaluate` is responsible for evaluating model parameters on the " -"server-side. Having :code:`evaluate` in addition to " -":code:`configure_evaluate`/:code:`aggregate_evaluate` enables strategies " -"to perform both servers-side and client-side (federated) evaluation." +"in each window (make sure that the server is running before you do so) " +"and see your (previously centralized) PyTorch project run federated " +"learning across two clients. Congratulations!" msgstr "" +"를 입력하고(그 전에 서버가 실행 중인지 확인하세요) (이전에는 중앙 집중식) PyTorch 프로젝트가 두 클라이언트에서 연합 " +"학습을 실행하는 것을 확인합니다. 축하합니다!" -#: ../../source/how-to-implement-strategies.rst:323 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:351 msgid "" -"The return value is again optional because the strategy might not need to" -" implement server-side evaluation or because the user-defined " -":code:`evaluate` method might not complete successfully (e.g., it might " -"fail to load the server-side evaluation data)." -msgstr "" - -#: ../../source/how-to-install-flower.rst:2 -msgid "Install Flower" -msgstr "" - -#: ../../source/how-to-install-flower.rst:6 -msgid "Python version" +"The full source code for this example: `PyTorch: From Centralized To " +"Federated (Code) `_. Our example is, of course, " +"somewhat over-simplified because both clients load the exact same " +"dataset, which isn't realistic. You're now prepared to explore this topic" +" further. How about using different subsets of CIFAR-10 on each client? " +"How about adding more clients?" msgstr "" +"이 예제의 전체 소스 코드: `파이토치: 중앙 Centralized에서 Federated으로 (코드) " +"`_. 물론 이 예제는 두 클라이언트가 완전히 동일한 데이터 세트를 로드하기 때문에 " +"다소 지나치게 단순화되어 있으며, 이는 현실적이지 않습니다. 이제 이 주제를 더 자세히 살펴볼 준비가 되셨습니다. 각 클라이언트에서" +" 서로 다른 CIFAR-10의 하위 집합을 사용해 보는 것은 어떨까요? 클라이언트를 더 추가하는 것은 어떨까요?" -#: ../../source/how-to-install-flower.rst:12 -msgid "Install stable release" -msgstr "" +#: ../../source/explanation-differential-privacy.rst:2 +#: ../../source/explanation-differential-privacy.rst:14 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:303 +msgid "Differential Privacy" +msgstr "차등 프라이버시" -#: ../../source/how-to-install-flower.rst:15 -#: ../../source/how-to-upgrade-to-flower-next.rst:46 -msgid "Using pip" +#: ../../source/explanation-differential-privacy.rst:4 +msgid "" +"The information in datasets like healthcare, financial transactions, user" +" preferences, etc., is valuable and has the potential for scientific " +"breakthroughs and provides important business insights. However, such " +"data is also sensitive and there is a risk of compromising individual " +"privacy." msgstr "" +"의료, 금융 거래, 사용자 선호도 등과 같은 데이터 세트의 정보는 가치 있고 과학적 혁신의 잠재력을 지니고 있으며 중요한 비즈니스 " +"인사이트를 제공합니다. 그러나 이러한 데이터는 또한 민감한 정보이며 개인의 프라이버시를 침해할 위험이 있습니다." -#: ../../source/how-to-install-flower.rst:17 +#: ../../source/explanation-differential-privacy.rst:9 msgid "" -"Stable releases are available on `PyPI " -"`_::" +"Traditional methods like anonymization alone would not work because of " +"attacks like Re-identification and Data Linkage. That's where " +"differential privacy comes in. It provides the possibility of analyzing " +"data while ensuring the privacy of individuals." msgstr "" +"익명화와 같은 기존 방법만으로는 재식별 및 데이터 연결과 같은 공격으로 인해 효과가 없습니다. 그래서 차등 프라이버시가 " +"등장했습니다. 차등 프라이버시는 개인의 개인 정보 보호를 보장하면서 데이터를 분석할 수 있는 가능성을 제공합니다." -#: ../../source/how-to-install-flower.rst:21 +#: ../../source/explanation-differential-privacy.rst:16 msgid "" -"For simulations that use the Virtual Client Engine, ``flwr`` should be " -"installed with the ``simulation`` extra::" +"Imagine two datasets that are identical except for a single record (for " +"instance, Alice's data). Differential Privacy (DP) guarantees that any " +"analysis (M), like calculating the average income, will produce nearly " +"identical results for both datasets (O and O' would be similar). This " +"preserves group patterns while obscuring individual details, ensuring the" +" individual's information remains hidden in the crowd." msgstr "" +"하나의 레코드(예: 앨리스의 데이터)를 제외하고는 동일한 두 개의 데이터 세트가 있다고 상상해 보세요. 차등 프라이버시(DP)는 " +"평균 소득 계산과 같은 모든 분석(M)이 두 데이터 세트에 대해 거의 동일한 결과를 산출하도록 보장합니다(O와 O' 는 비슷할 " +"것입니다). 이렇게 하면 그룹 패턴은 보존하면서 개별 세부 정보는 가려져 개인의 정보가 군중 속에 숨겨집니다." -#: ../../source/how-to-install-flower.rst:27 -msgid "Using conda (or mamba)" -msgstr "" +#: ../../source/explanation-differential-privacy.rst:-1 +msgid "DP Intro" +msgstr "DP 소개" -#: ../../source/how-to-install-flower.rst:29 -msgid "Flower can also be installed from the ``conda-forge`` channel." +#: ../../source/explanation-differential-privacy.rst:27 +msgid "" +"One of the most commonly used mechanisms to achieve DP is adding enough " +"noise to the output of the analysis to mask the contribution of each " +"individual in the data while preserving the overall accuracy of the " +"analysis." msgstr "" +"DP를 달성하기 위해 가장 일반적으로 사용되는 메커니즘 중 하나는 분석의 전반적인 정확도를 유지하면서 데이터에서 각 개인의 기여도를" +" 가릴 수 있도록 분석 결과에 충분한 노이즈를 추가하는 것입니다." + +#: ../../source/explanation-differential-privacy.rst:32 +msgid "Formal Definition" +msgstr "공식 정의" -#: ../../source/how-to-install-flower.rst:31 +#: ../../source/explanation-differential-privacy.rst:34 msgid "" -"If you have not added ``conda-forge`` to your channels, you will first " -"need to run the following::" +"Differential Privacy (DP) provides statistical guarantees against the " +"information an adversary can infer through the output of a randomized " +"algorithm. It provides an unconditional upper bound on the influence of a" +" single individual on the output of the algorithm by adding noise [1]. A " +"randomized mechanism M provides (:math:`\\epsilon`, " +":math:`\\delta`)-differential privacy if for any two neighboring " +"databases, D :sub:`1` and D :sub:`2`, that differ in only a single " +"record, and for all possible outputs S ⊆ Range(A):" msgstr "" +"차등 프라이버시(DP)는 공격자가 무작위 알고리즘의 출력을 통해 유추할 수 있는 정보에 대해 통계적 보장을 제공합니다. 이는 " +"노이즈를 추가하여 알고리즘의 출력에 대한 한 개인의 영향력에 대한 무조건적인 상한선을 제공합니다[1]. 무작위 메커니즘 M은 하나의" +" 레코드만 다른 두 개의 인접 데이터베이스인 D:sub:`1`과 D:sub:`2`의 경우, 가능한 모든 출력 S ⊆ " +"Range(A)에 대해 (:math:`\\epsilon`, :math:`\\delta`)-차등 프라이버시를 제공합니다:" -#: ../../source/how-to-install-flower.rst:36 +#: ../../source/explanation-differential-privacy.rst:42 +#, fuzzy msgid "" -"Once the ``conda-forge`` channel has been enabled, ``flwr`` can be " -"installed with ``conda``::" +"\\small\n" +"P[M(D_{1} \\in A)] \\leq e^{\\epsilon} P[M(D_{2} \\in A)] + \\delta" msgstr "" +"\\small\n" +"P[M(D_{1} \\in A)] \\leq e^{\\delta} P[M(D_{2} \\in A)] + \\delta" -#: ../../source/how-to-install-flower.rst:40 -msgid "or with ``mamba``::" +#: ../../source/explanation-differential-privacy.rst:47 +msgid "" +"The :math:`\\epsilon` parameter, also known as the privacy budget, is a " +"metric of privacy loss. It also controls the privacy-utility trade-off; " +"lower :math:`\\epsilon` values indicate higher levels of privacy but are " +"likely to reduce utility as well. The :math:`\\delta` parameter accounts " +"for a small probability on which the upper bound :math:`\\epsilon` does " +"not hold. The amount of noise needed to achieve differential privacy is " +"proportional to the sensitivity of the output, which measures the maximum" +" change in the output due to the inclusion or removal of a single record." msgstr "" +"프라이버시 예산이라고도 하는 :math:`\\epsilon` 매개변수는 프라이버시 손실을 측정하는 지표입니다. 이 매개변수는 " +"프라이버시와 효용의 균형을 제어하며, :math:`\\epsilon` 값이 낮을수록 프라이버시 수준이 높지만 효용도 감소할 가능성이" +" 높습니다. math:`\\delta` 매개변수는 상한값인 :math:`\\epsilon`이 적용되지 않는 작은 확률을 설명합니다." +" 차등 프라이버시를 달성하는 데 필요한 노이즈의 양은 출력의 감도에 비례하며, 이는 단일 레코드의 포함 또는 제거로 인한 출력의 " +"최대 변화를 측정합니다." -#: ../../source/how-to-install-flower.rst:46 -msgid "Verify installation" -msgstr "" +#: ../../source/explanation-differential-privacy.rst:56 +msgid "Differential Privacy in Machine Learning" +msgstr "머신 러닝의 차등 프라이버시" -#: ../../source/how-to-install-flower.rst:48 +#: ../../source/explanation-differential-privacy.rst:58 msgid "" -"The following command can be used to verify if Flower was successfully " -"installed. If everything worked, it should print the version of Flower to" -" the command line::" +"DP can be utilized in machine learning to preserve the privacy of the " +"training data. Differentially private machine learning algorithms are " +"designed in a way to prevent the algorithm to learn any specific " +"information about any individual data points and subsequently prevent the" +" model from revealing sensitive information. Depending on the stage at " +"which noise is introduced, various methods exist for applying DP to " +"machine learning algorithms. One approach involves adding noise to the " +"training data (either to the features or labels), while another method " +"entails injecting noise into the gradients of the loss function during " +"model training. Additionally, such noise can be incorporated into the " +"model's output." msgstr "" +"머신 러닝에서 DP를 활용하여 학습 데이터의 개인정보를 보호할 수 있습니다. 차등 비공개 머신 러닝 알고리즘은 알고리즘이 개별 " +"데이터 포인트에 대한 특정 정보를 학습하지 못하도록 하여 모델이 민감한 정보를 노출하지 않도록 하는 방식으로 설계되었습니다. " +"노이즈가 도입되는 단계에 따라 머신 러닝 알고리즘에 DP를 적용하는 다양한 방법이 존재합니다. 한 가지 방법은 학습 데이터(특징 " +"또는 레이블)에 노이즈를 추가하는 것이고, 다른 방법은 모델 학습 중에 손실 함수의 기울기에 노이즈를 주입하는 것입니다. 또한 " +"이러한 노이즈를 모델의 출력에 통합할 수도 있습니다." -#: ../../source/how-to-install-flower.rst:55 -msgid "Advanced installation options" -msgstr "" +#: ../../source/explanation-differential-privacy.rst:69 +msgid "Differential Privacy in Federated Learning" +msgstr "연합 학습의 차등 프라이버시" -#: ../../source/how-to-install-flower.rst:58 -msgid "Install via Docker" +#: ../../source/explanation-differential-privacy.rst:71 +msgid "" +"Federated learning is a data minimization approach that allows multiple " +"parties to collaboratively train a model without sharing their raw data. " +"However, federated learning also introduces new privacy challenges. The " +"model updates between parties and the central server can leak information" +" about the local data. These leaks can be exploited by attacks such as " +"membership inference and property inference attacks, or model inversion " +"attacks." msgstr "" +"연합 학습은 여러 당사자가 원시 데이터를 공유하지 않고도 공동으로 모델을 학습할 수 있는 데이터 최소화 접근 방식입니다. 그러나 " +"연합 학습은 새로운 개인정보 보호 문제를 야기하기도 합니다. 당사자와 중앙 서버 간의 모델 업데이트는 로컬 데이터에 대한 정보를 " +"유출할 수 있습니다. 이러한 유출은 멤버십 추론 및 속성 추론 공격이나 모델 반전 공격과 같은 공격에 악용될 수 있습니다." -#: ../../source/how-to-install-flower.rst:60 -msgid ":doc:`How to run Flower using Docker `" -msgstr "" +#: ../../source/explanation-differential-privacy.rst:78 +msgid "" +"DP can play a crucial role in federated learning to provide privacy for " +"the clients' data." +msgstr "DP는 연합 학습에서 클라이언트의 데이터에 대한 개인 정보 보호를 제공하는 데 중요한 역할을 할 수 있습니다." -#: ../../source/how-to-install-flower.rst:63 -msgid "Install pre-release" +#: ../../source/explanation-differential-privacy.rst:81 +msgid "" +"Depending on the granularity of privacy provision or the location of " +"noise addition, different forms of DP exist in federated learning. In " +"this explainer, we focus on two approaches of DP utilization in federated" +" learning based on where the noise is added: at the server (also known as" +" the center) or at the client (also known as the local)." msgstr "" +"개인 정보 제공의 세분성 또는 노이즈 추가 위치에 따라 연합 학습에는 다양한 형태의 DP가 존재합니다. 이 설명에서는 노이즈가 " +"추가되는 위치에 따라 서버(중앙이라고도 함) 또는 클라이언트(로컬이라고도 함)에서의 연합 학습에서 DP를 활용하는 두 가지 접근 " +"방식에 중점을 둡니다." -#: ../../source/how-to-install-flower.rst:65 +#: ../../source/explanation-differential-privacy.rst:86 msgid "" -"New (possibly unstable) versions of Flower are sometimes available as " -"pre-release versions (alpha, beta, release candidate) before the stable " -"release happens::" +"**Central Differential Privacy**: DP is applied by the server and the " +"goal is to prevent the aggregated model from leaking information about " +"each client's data." msgstr "" +"**중앙 차등 프라이버시**: DP는 서버에서 적용되며 집계된 모델이 각 클라이언트의 데이터에 대한 정보를 유출하는 것을 방지하는 " +"것이 목표입니다." -#: ../../source/how-to-install-flower.rst:69 +#: ../../source/explanation-differential-privacy.rst:88 msgid "" -"For simulations that use the Virtual Client Engine, ``flwr`` pre-releases" -" should be installed with the ``simulation`` extra::" +"**Local Differential Privacy**: DP is applied on the client side before " +"sending any information to the server and the goal is to prevent the " +"updates that are sent to the server from leaking any information about " +"the client's data." msgstr "" +"**로컬 차등 프라이버시**: DP는 정보를 서버로 보내기 전에 클라이언트 측에서 적용되며, 서버로 전송되는 업데이트가 클라이언트 " +"데이터에 대한 정보를 유출하는 것을 방지하는 것이 목표입니다." -#: ../../source/how-to-install-flower.rst:74 -msgid "Install nightly release" -msgstr "" +#: ../../source/explanation-differential-privacy.rst:-1 +#: ../../source/explanation-differential-privacy.rst:93 +#: ../../source/how-to-use-differential-privacy.rst:15 +msgid "Central Differential Privacy" +msgstr "중앙 차등 프라이버시" -#: ../../source/how-to-install-flower.rst:76 +#: ../../source/explanation-differential-privacy.rst:95 msgid "" -"The latest (potentially unstable) changes in Flower are available as " -"nightly releases::" +"In this approach, which is also known as user-level DP, the central " +"server is responsible for adding noise to the globally aggregated " +"parameters. It should be noted that trust in the server is required." msgstr "" +"사용자 수준 DP라고도 하는 이 접근 방식에서는 중앙 서버가 전역적으로 집계된 매개변수에 노이즈를 추가하는 역할을 담당합니다. " +"서버에 대한 신뢰가 필요하다는 점에 유의해야 합니다." -#: ../../source/how-to-install-flower.rst:80 +#: ../../source/explanation-differential-privacy.rst:104 msgid "" -"For simulations that use the Virtual Client Engine, ``flwr-nightly`` " -"should be installed with the ``simulation`` extra::" +"While there are various ways to implement central DP in federated " +"learning, we concentrate on the algorithms proposed by [2] and [3]. The " +"overall approach is to clip the model updates sent by the clients and add" +" some amount of noise to the aggregated model. In each iteration, a " +"random set of clients is chosen with a specific probability for training." +" Each client performs local training on its own data. The update of each " +"client is then clipped by some value `S` (sensitivity `S`). This would " +"limit the impact of any individual client which is crucial for privacy " +"and often beneficial for robustness. A common approach to achieve this is" +" by restricting the `L2` norm of the clients' model updates, ensuring " +"that larger updates are scaled down to fit within the norm `S`." msgstr "" +"연합 학습에서 중앙 DP를 구현하는 방법은 여러 가지가 있지만, 여기서는 [2]와 [3]에서 제안한 알고리즘에 집중합니다. 전반적인" +" 접근 방식은 클라이언트가 전송한 모델 업데이트를 잘라내고 집계된 모델에 약간의 노이즈를 추가하는 것입니다. 각 반복에서 특정 " +"확률로 훈련할 무작위 클라이언트 세트가 선택됩니다. 각 클라이언트는 자체 데이터에 대해 로컬 학습을 수행합니다. 그런 다음 각 " +"클라이언트의 업데이트는 특정 값 `S`(민감도 `S`)에 의해 잘립니다. 이렇게 하면 개별 클라이언트의 영향을 제한할 수 있어 " +"개인정보 보호에 중요하고 견고성에 도움이 되는 경우가 많습니다. 이를 달성하기 위한 일반적인 접근 방식은 클라이언트 모델 업데이트의" +" `L2` 규범을 제한하여 더 큰 업데이트가 규범 `S`에 맞도록 축소되도록 하는 것입니다." -#: ../../source/how-to-monitor-simulation.rst:2 -msgid "Monitor simulation" -msgstr "" +#: ../../source/explanation-differential-privacy.rst:-1 +msgid "clipping" +msgstr "클리핑" -#: ../../source/how-to-monitor-simulation.rst:4 +#: ../../source/explanation-differential-privacy.rst:120 msgid "" -"Flower allows you to monitor system resources while running your " -"simulation. Moreover, the Flower simulation engine is powerful and " -"enables you to decide how to allocate resources per client manner and " -"constrain the total usage. Insights from resource consumption can help " -"you make smarter decisions and speed up the execution time." +"Afterwards, the Gaussian mechanism is used to add noise in order to " +"distort the sum of all clients' updates. The amount of noise is scaled to" +" the sensitivity value to obtain a privacy guarantee. The Gaussian " +"mechanism is used with a noise sampled from `N (0, σ²)` where `σ = ( " +"noise_scale * S ) / (number of sampled clients)`." msgstr "" +"그 후 가우시안 메커니즘을 사용하여 모든 클라이언트의 업데이트 합계를 왜곡하기 위해 노이즈를 추가합니다. 노이즈의 양은 감도 값에 " +"따라 조정되어 프라이버시 보장을 얻습니다. 가우시안 메커니즘은 `N (0, σ²)`에서 샘플링된 노이즈와 함께 사용됩니다. 여기서 " +"`σ = (noise_scale * S) / (샘플링된 클라이언트 수)`입니다." -#: ../../source/how-to-monitor-simulation.rst:6 -msgid "" -"The specific instructions assume you are using macOS and have the " -"`Homebrew `_ package manager installed." -msgstr "" +#: ../../source/explanation-differential-privacy.rst:126 +msgid "Clipping" +msgstr "클리핑" -#: ../../source/how-to-monitor-simulation.rst:10 -msgid "Downloads" -msgstr "" +#: ../../source/explanation-differential-privacy.rst:128 +msgid "" +"There are two forms of clipping commonly used in Central DP: Fixed " +"Clipping and Adaptive Clipping." +msgstr "중앙 DP에서 일반적으로 사용되는 클리핑에는 고정 클리핑과 조정 클리핑의 두 가지 형태가 있습니다." -#: ../../source/how-to-monitor-simulation.rst:16 +#: ../../source/explanation-differential-privacy.rst:131 msgid "" -"`Prometheus `_ is used for data collection, while" -" `Grafana `_ will enable you to visualize the " -"collected data. They are both well integrated with `Ray " -"`_ which Flower uses under the hood." +"**Fixed Clipping** : A predefined fix threshold is set for the magnitude " +"of clients' updates. Any update exceeding this threshold is clipped back " +"to the threshold value." msgstr "" +"**고정 클리핑** : 클라이언트의 업데이트 크기에 대해 미리 정의된 고정 임계값이 설정됩니다. 이 임계값을 초과하는 모든 " +"업데이트는 임계값으로 다시 클리핑됩니다." -#: ../../source/how-to-monitor-simulation.rst:18 +#: ../../source/explanation-differential-privacy.rst:133 msgid "" -"Overwrite the configuration files (depending on your device, it might be " -"installed on a different path)." +"**Adaptive Clipping** : The clipping threshold dynamically adjusts based " +"on the observed update distribution [4]. It means that the clipping value" +" is tuned during the rounds with respect to the quantile of the update " +"norm distribution." msgstr "" +"**조정 클리핑** : 클리핑 임계값은 관찰된 업데이트 분포에 따라 동적으로 조정됩니다[4]. 즉, 클리핑 값은 업데이트 표준 " +"분포의 사분위수에 따라 라운드가 진행되는 동안 조정됩니다." -#: ../../source/how-to-monitor-simulation.rst:20 -msgid "If you are on an M1 Mac, it should be:" -msgstr "" +#: ../../source/explanation-differential-privacy.rst:137 +msgid "" +"The choice between fixed and adaptive clipping depends on various factors" +" such as privacy requirements, data distribution, model complexity, and " +"others." +msgstr "고정 클리핑과 조정 클리핑 중 선택은 개인정보 보호 요구 사항, 데이터 배포, 모델 복잡성 등 다양한 요인에 따라 달라집니다." -#: ../../source/how-to-monitor-simulation.rst:27 -msgid "On the previous generation Intel Mac devices, it should be:" -msgstr "" +#: ../../source/explanation-differential-privacy.rst:-1 +#: ../../source/explanation-differential-privacy.rst:141 +#: ../../source/how-to-use-differential-privacy.rst:113 +msgid "Local Differential Privacy" +msgstr "로컬 차등 프라이버시" -#: ../../source/how-to-monitor-simulation.rst:34 +#: ../../source/explanation-differential-privacy.rst:143 msgid "" -"Open the respective configuration files and change them. Depending on " -"your device, use one of the two following commands:" +"In this approach, each client is responsible for performing DP. Local DP " +"avoids the need for a fully trusted aggregator, but it should be noted " +"that local DP leads to a decrease in accuracy but better privacy in " +"comparison to central DP." msgstr "" +"이 접근 방식에서는 각 클라이언트가 DP를 수행할 책임이 있습니다. 로컬 DP는 완전히 신뢰할 수 있는 애그리게이터가 필요하지 " +"않지만, 로컬 DP는 중앙 DP에 비해 정확도는 떨어져도 개인 정보 보호는 더 우수하다는 점에 유의해야 합니다." -#: ../../source/how-to-monitor-simulation.rst:44 +#: ../../source/explanation-differential-privacy.rst:152 +msgid "In this explainer, we focus on two forms of achieving Local DP:" +msgstr "이 설명에서는 로컬 DP를 달성하는 두 가지 형태에 중점을 둡니다:" + +#: ../../source/explanation-differential-privacy.rst:154 msgid "" -"and then delete all the text in the file and paste a new Prometheus " -"config you see below. You may adjust the time intervals to your " -"requirements:" +"Each client adds noise to the local updates before sending them to the " +"server. To achieve (:math:`\\epsilon`, :math:`\\delta`)-DP, considering " +"the sensitivity of the local model to be ∆, Gaussian noise is applied " +"with a noise scale of σ where:" msgstr "" +"각 클라이언트는 로컬 업데이트를 서버로 보내기 전에 로컬 업데이트에 노이즈를 추가합니다. 로컬 모델의 감도를 ∆로 간주하여 가우시안" +" 노이즈가 σ의 노이즈 스케일로 적용되어 (:math:`\\epsilon`, :math:`\\delta`)-DP를 달성하기 위해, " +"여기서 σ는 노이즈 스케일입니다:" -#: ../../source/how-to-monitor-simulation.rst:59 +#: ../../source/explanation-differential-privacy.rst:158 +#, fuzzy msgid "" -"Now after you have edited the Prometheus configuration, do the same with " -"the Grafana configuration files. Open those using one of the following " -"commands as before:" +"\\small\n" +"\\frac{∆ \\times \\sqrt{2 \\times " +"\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}" msgstr "" +"\\small\n" +"\\frac{∆ \\times \\sqrt{2 \\times " +"\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}\n" +"\n" -#: ../../source/how-to-monitor-simulation.rst:69 +#: ../../source/explanation-differential-privacy.rst:163 msgid "" -"Your terminal editor should open and allow you to apply the following " -"configuration as before." +"Each client adds noise to the gradients of the model during the local " +"training (DP-SGD). More specifically, in this approach, gradients are " +"clipped and an amount of calibrated noise is injected into the gradients." msgstr "" +"각 클라이언트는 로컬 트레이닝(DP-SGD) 중에 모델의 gradient에 노이즈를 추가합니다. 보다 구체적으로, 이 접근 " +"방식에서는 gradient이 클리핑되고 보정된 노이즈가 gradient에 주입됩니다." -#: ../../source/how-to-monitor-simulation.rst:84 +#: ../../source/explanation-differential-privacy.rst:167 msgid "" -"Congratulations, you just downloaded all the necessary software needed " -"for metrics tracking. Now, let’s start it." -msgstr "" +"Please note that these two approaches are providing privacy at different " +"levels." +msgstr "이 두 가지 접근 방식은 서로 다른 수준의 개인정보 보호 기능을 제공한다는 점에 유의하세요." -#: ../../source/how-to-monitor-simulation.rst:88 -msgid "Tracking metrics" -msgstr "" +#: ../../source/explanation-differential-privacy.rst:169 +msgid "**References:**" +msgstr "**참고:**" + +#: ../../source/explanation-differential-privacy.rst:171 +msgid "[1] Dwork et al. The Algorithmic Foundations of Differential Privacy." +msgstr "[1] Dwork 외. 차등 프라이버시의 알고리즘적 기초." -#: ../../source/how-to-monitor-simulation.rst:90 +#: ../../source/explanation-differential-privacy.rst:173 msgid "" -"Before running your Flower simulation, you have to start the monitoring " -"tools you have just installed and configured." -msgstr "" +"[2] McMahan et al. Learning Differentially Private Recurrent Language " +"Models." +msgstr "[2] McMahan 외. 차등적 개인 반복 언어 모델 학습." -#: ../../source/how-to-monitor-simulation.rst:97 +#: ../../source/explanation-differential-privacy.rst:175 msgid "" -"Please include the following argument in your Python code when starting a" -" simulation." -msgstr "" +"[3] Geyer et al. Differentially Private Federated Learning: A Client " +"Level Perspective." +msgstr "[3] Geyer 외. 차등적 개인 연합 학습: 고객 수준의 관점." -#: ../../source/how-to-monitor-simulation.rst:108 -msgid "Now, you are ready to start your workload." -msgstr "" +#: ../../source/explanation-differential-privacy.rst:177 +msgid "[4] Galen et al. Differentially Private Learning with Adaptive Clipping." +msgstr "[4] Galen 외. 조정형 클리핑을 통한 차등적 개인 학습." -#: ../../source/how-to-monitor-simulation.rst:110 -msgid "" -"Shortly after the simulation starts, you should see the following logs in" -" your terminal:" -msgstr "" - -#: ../../source/how-to-monitor-simulation.rst:117 -msgid "You can look at everything at ``_ ." -msgstr "" - -#: ../../source/how-to-monitor-simulation.rst:119 -msgid "" -"It's a Ray Dashboard. You can navigate to Metrics (on the left panel, the" -" lowest option)." -msgstr "" - -#: ../../source/how-to-monitor-simulation.rst:121 -msgid "" -"Or alternatively, you can just see them in Grafana by clicking on the " -"right-up corner, “View in Grafana”. Please note that the Ray dashboard is" -" only accessible during the simulation. After the simulation ends, you " -"can only use Grafana to explore the metrics. You can start Grafana by " -"going to ``http://localhost:3000/``." -msgstr "" +#: ../../source/explanation-federated-evaluation.rst:2 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:292 +msgid "Federated evaluation" +msgstr "연합 평가" -#: ../../source/how-to-monitor-simulation.rst:123 +#: ../../source/explanation-federated-evaluation.rst:4 msgid "" -"After you finish the visualization, stop Prometheus and Grafana. This is " -"important as they will otherwise block, for example port :code:`3000` on " -"your machine as long as they are running." +"There are two main approaches to evaluating models in federated learning " +"systems: centralized (or server-side) evaluation and federated (or " +"client-side) evaluation." msgstr "" +"연합 학습 시스템에서 모델을 평가하는 데는 중앙 집중식(또는 서버 측) 평가와 연합(또는 클라이언트 측) 평가라는 두 가지 주요 " +"접근 방식이 있습니다." -#: ../../source/how-to-monitor-simulation.rst:132 -msgid "Resource allocation" -msgstr "" +#: ../../source/explanation-federated-evaluation.rst:8 +msgid "Centralized Evaluation" +msgstr "중앙 집중식 평가" -#: ../../source/how-to-monitor-simulation.rst:134 -msgid "" -"You must understand how the Ray library works to efficiently allocate " -"system resources to simulation clients on your own." -msgstr "" +#: ../../source/explanation-federated-evaluation.rst:11 +msgid "Built-In Strategies" +msgstr "기본 제공 전략" -#: ../../source/how-to-monitor-simulation.rst:136 +#: ../../source/explanation-federated-evaluation.rst:13 msgid "" -"Initially, the simulation (which Ray handles under the hood) starts by " -"default with all the available resources on the system, which it shares " -"among the clients. It doesn't mean it divides it equally among all of " -"them, nor that the model training happens at all of them simultaneously. " -"You will learn more about that in the later part of this blog. You can " -"check the system resources by running the following:" +"All built-in strategies support centralized evaluation by providing an " +"evaluation function during initialization. An evaluation function is any " +"function that can take the current global model parameters as input and " +"return evaluation results:" msgstr "" +"모든 기본 제공 전략은 초기화 중에 평가 함수를 제공하여 중앙 집중식 평가를 지원합니다. 평가 함수는 현재 글로벌 모델 파라미터를 " +"입력으로 받아 평가 결과를 반환할 수 있는 모든 함수입니다:" -#: ../../source/how-to-monitor-simulation.rst:143 -msgid "In Google Colab, the result you see might be similar to this:" -msgstr "" +#: ../../source/explanation-federated-evaluation.rst:61 +msgid "Custom Strategies" +msgstr "사용자 정의 전략" -#: ../../source/how-to-monitor-simulation.rst:155 +#: ../../source/explanation-federated-evaluation.rst:63 +#, fuzzy msgid "" -"However, you can overwrite the defaults. When starting a simulation, do " -"the following (you don't need to overwrite all of them):" +"The ``Strategy`` abstraction provides a method called ``evaluate`` that " +"can directly be used to evaluate the current global model parameters. The" +" current server implementation calls ``evaluate`` after parameter " +"aggregation and before federated evaluation (see next paragraph)." msgstr "" +"코드:`전략` 추상화는 현재 전역 모델 파라미터를 평가하는 데 직접 사용할 수 있는 :코드:`평가`라는 메서드를 제공합니다. 현재 " +"서버 구현에서는 매개변수 집계 후와 연합 평가 전에 :code:`evaluate`를 호출합니다(다음 단락 참조)." -#: ../../source/how-to-monitor-simulation.rst:175 -msgid "Let’s also specify the resource for a single client." -msgstr "" +#: ../../source/explanation-federated-evaluation.rst:69 +msgid "Federated Evaluation" +msgstr "연합 평가" -#: ../../source/how-to-monitor-simulation.rst:205 -msgid "" -"Now comes the crucial part. Ray will start a new client only when it has " -"all the required resources (such that they run in parallel) when the " -"resources allow." -msgstr "" +#: ../../source/explanation-federated-evaluation.rst:72 +msgid "Implementing Federated Evaluation" +msgstr "연합 평가 구현" -#: ../../source/how-to-monitor-simulation.rst:207 +#: ../../source/explanation-federated-evaluation.rst:74 +#, fuzzy msgid "" -"In the example above, only one client will be run, so your clients won't " -"run concurrently. Setting :code:`client_num_gpus = 0.5` would allow " -"running two clients and therefore enable them to run concurrently. Be " -"careful not to require more resources than available. If you specified " -":code:`client_num_gpus = 2`, the simulation wouldn't start (even if you " -"had 2 GPUs but decided to set 1 in :code:`ray_init_args`)." -msgstr "" - -#: ../../source/how-to-monitor-simulation.rst:212 ../../source/ref-faq.rst:2 -msgid "FAQ" -msgstr "" +"Client-side evaluation happens in the ``Client.evaluate`` method and can " +"be configured from the server side." +msgstr "클라이언트 측 평가는 :code:`Client.evaluate` 메서드에서 이루어지며 서버 측에서 구성할 수 있습니다." -#: ../../source/how-to-monitor-simulation.rst:214 -msgid "Q: I don't see any metrics logged." -msgstr "" +#: ../../source/explanation-federated-evaluation.rst:108 +msgid "Configuring Federated Evaluation" +msgstr "연합 평가 구성" -#: ../../source/how-to-monitor-simulation.rst:216 +#: ../../source/explanation-federated-evaluation.rst:110 msgid "" -"A: The timeframe might not be properly set. The setting is in the top " -"right corner (\"Last 30 minutes\" by default). Please change the " -"timeframe to reflect the period when the simulation was running." -msgstr "" +"Federated evaluation can be configured from the server side. Built-in " +"strategies support the following arguments:" +msgstr "연합 평가는 서버 측에서 구성할 수 있습니다. 기본 제공 전략은 다음 인수를 지원합니다:" -#: ../../source/how-to-monitor-simulation.rst:218 +#: ../../source/explanation-federated-evaluation.rst:113 +#, fuzzy msgid "" -"Q: I see “Grafana server not detected. Please make sure the Grafana " -"server is running and refresh this page” after going to the Metrics tab " -"in Ray Dashboard." +"``fraction_evaluate``: a ``float`` defining the fraction of clients that " +"will be selected for evaluation. If ``fraction_evaluate`` is set to " +"``0.1`` and ``100`` clients are connected to the server, then ``10`` will" +" be randomly selected for evaluation. If ``fraction_evaluate`` is set to " +"``0.0``, federated evaluation will be disabled." msgstr "" +":code:`fraction_evaluate`: 평가를 위해 선택될 클라이언트의 비율을 정의하는 :code:`float`입니다. " +"코드:`fraction_evaluate`가 :code:`0.1`로 설정되어 있고 :code:`100` 클라이언트가 서버에 연결되어 " +"있는 경우 :code:`10`이 평가를 위해 무작위로 선택됩니다. code:`fraction_evaluate`가 " +":code:`0.0`으로 설정된 경우 연합 평가가 비활성화됩니다." -#: ../../source/how-to-monitor-simulation.rst:220 +#: ../../source/explanation-federated-evaluation.rst:118 +#, fuzzy msgid "" -"A: You probably don't have Grafana running. Please check the running " -"services" +"``min_evaluate_clients``: an ``int``: the minimum number of clients to be" +" selected for evaluation. If ``fraction_evaluate`` is set to ``0.1``, " +"``min_evaluate_clients`` is set to 20, and ``100`` clients are connected " +"to the server, then ``20`` clients will be selected for evaluation." msgstr "" +":code:`min_evaluate_clients`: 평가를 위해 선택할 최소 클라이언트 수. :code:`int`. " +"코드:`fraction_evaluate`가 :code:`0.1`로 설정되어 있고 :code:`fraction_evaluate`가 " +"20으로 설정되어 있으며 :code:`100` 클라이언트가 서버에 연결되어 있는 경우 :code:`20` 클라이언트가 평가를 위해 " +"선택됩니다." -#: ../../source/how-to-monitor-simulation.rst:226 +#: ../../source/explanation-federated-evaluation.rst:122 +#, fuzzy msgid "" -"Q: I see \"This site can't be reached\" when going to " -"``_." +"``min_available_clients``: an ``int`` that defines the minimum number of " +"clients which need to be connected to the server before a round of " +"federated evaluation can start. If fewer than ``min_available_clients`` " +"are connected to the server, the server will wait until more clients are " +"connected before it continues to sample clients for evaluation." msgstr "" +":code:`min_available_clients`: federated 평가 단계를 시작하기 전에 서버에 연결해야 하는 최소 " +"클라이언트 수를 정의하는 :code:`int`입니다. 서버에 연결된 클라이언트가 " +":code:`min_available_clients`보다 적으면 서버는 더 많은 클라이언트가 연결될 때까지 기다렸다가 평가를 위한 " +"클라이언트 샘플링을 계속합니다." -#: ../../source/how-to-monitor-simulation.rst:228 +#: ../../source/explanation-federated-evaluation.rst:127 +#, fuzzy msgid "" -"A: Either the simulation has already finished, or you still need to start" -" Prometheus." +"``on_evaluate_config_fn``: a function that returns a configuration " +"dictionary which will be sent to the selected clients. The function will " +"be called during each round and provides a convenient way to customize " +"client-side evaluation from the server side, for example, to configure " +"the number of validation steps performed." msgstr "" +"code:`on_evaluate_config_fn`: 선택한 클라이언트로 전송할 구성 사전을 반환하는 함수입니다. 이 함수는 각 " +"단계 중에 호출되며, 서버 측에서 클라이언트 측 평가를 사용자 지정하는 편리한 방법을 제공합니다(예: 수행되는 유효성 검사 단계 수" +" 구성)." -#: ../../source/how-to-monitor-simulation.rst:232 -msgid "Resources" -msgstr "" +#: ../../source/explanation-federated-evaluation.rst:157 +msgid "Evaluating Local Model Updates During Training" +msgstr "훈련 중 로컬 모델 업데이트 평가" -#: ../../source/how-to-monitor-simulation.rst:234 +#: ../../source/explanation-federated-evaluation.rst:159 +#, fuzzy msgid "" -"Ray Dashboard: ``_" -msgstr "" - -#: ../../source/how-to-monitor-simulation.rst:236 -msgid "Ray Metrics: ``_" +"Model parameters can also be evaluated during training. ``Client.fit`` " +"can return arbitrary evaluation results as a dictionary:" msgstr "" +"모델 파라미터는 훈련 중에도 평가할 수 있습니다. :code:`Client.fit`은 임의의 평가 결과를 dictionary로 " +"반환할 수 있습니다:" -#: ../../source/how-to-run-flower-using-docker.rst:2 -msgid "Run Flower using Docker" -msgstr "" +#: ../../source/explanation-federated-evaluation.rst:201 +msgid "Full Code Example" +msgstr "전체 코드 예제" -#: ../../source/how-to-run-flower-using-docker.rst:4 +#: ../../source/explanation-federated-evaluation.rst:203 msgid "" -"The simplest way to get started with Flower is by using the pre-made " -"Docker images, which you can find on `Docker Hub " -"`__." -msgstr "" - -#: ../../source/how-to-run-flower-using-docker.rst:7 -msgid "Before you start, make sure that the Docker daemon is running:" +"For a full code example that uses both centralized and federated " +"evaluation, see the *Advanced TensorFlow Example* (the same approach can " +"be applied to workloads implemented in any other framework): " +"https://github.com/adap/flower/tree/main/examples/advanced-tensorflow" msgstr "" +"연합 평가와 중앙 집중식 평가를 모두 사용하는 전체 코드 예제는 *고급 텐서플로우 예제*(다른 프레임워크에서 구현된 워크로드에도 " +"동일한 접근 방식을 적용할 수 있음)를 참조하세요: " +"https://github.com/adap/flower/tree/main/examples/advanced-tensorflow" -#: ../../source/how-to-run-flower-using-docker.rst:14 +#: ../../source/explanation-flower-architecture.rst:-1 msgid "" -"If you do not see the version of Docker but instead get an error saying " -"that the command was not found, you will need to install Docker first. " -"You can find installation instruction `here `_." +"Explore the federated learning architecture of the Flower framework, " +"featuring multi-run, concurrent execution, and scalable, secure machine " +"learning while preserving data privacy." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:20 -msgid "" -"On Linux, Docker commands require ``sudo`` privilege. If you want to " -"avoid using ``sudo``, you can follow the `Post-installation steps " -"`_ on the " -"official Docker website." -msgstr "" +#: ../../source/explanation-flower-architecture.rst:2 +msgid "Flower Architecture" +msgstr "Flower 아키텍처" -#: ../../source/how-to-run-flower-using-docker.rst:26 +#: ../../source/explanation-flower-architecture.rst:4 msgid "" -"To ensure optimal performance and compatibility, the SuperLink, SuperNode" -" and ServerApp image must have the same version when running together. " -"This guarantees seamless integration and avoids potential conflicts or " -"issues that may arise from using different versions." -msgstr "" - -#: ../../source/how-to-run-flower-using-docker.rst:31 -msgid "Flower SuperLink" -msgstr "" - -#: ../../source/how-to-run-flower-using-docker.rst:34 -msgid "Quickstart" -msgstr "" - -#: ../../source/how-to-run-flower-using-docker.rst:36 -msgid "If you're looking to try out Flower, you can use the following command:" +"This page explains the architecture of deployed Flower federated learning" +" system." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:42 +#: ../../source/explanation-flower-architecture.rst:6 msgid "" -"The command pulls the Docker image with the tag ``1.8.0`` from Docker " -"Hub. The tag specifies the Flower version. In this case, Flower 1.8.0. " -"The ``--rm`` flag tells Docker to remove the container after it exits." +"In federated learning (FL), there is typically one server and a number of" +" clients that are connected to the server. This is often called a " +"federation." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:48 +#: ../../source/explanation-flower-architecture.rst:9 msgid "" -"By default, the Flower SuperLink keeps state in-memory. When using the " -"Docker flag ``--rm``, the state is not persisted between container " -"starts. We will show below how to save the state in a file on your host " -"system." +"The role of the server is to coordinate the training process. The role of" +" each client is to receive tasks from the server, execute those tasks and" +" return the results back to the server." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:52 -msgid "" -"The ``-p :`` flag tells Docker to map the ports " -"``9091``/``9092`` of the host to ``9091``/``9092`` of the container, " -"allowing you to access the Driver API on ``http://localhost:9091`` and " -"the Fleet API on ``http://localhost:9092``. Lastly, any flag that comes " -"after the tag is passed to the Flower SuperLink. Here, we are passing the" -" flag ``--insecure``." +#: ../../source/explanation-flower-architecture.rst:13 +msgid "This is sometimes called a hub-and-spoke topology:" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:59 -#: ../../source/how-to-run-flower-using-docker.rst:238 -#: ../../source/how-to-run-flower-using-docker.rst:354 -msgid "" -"The ``--insecure`` flag enables insecure communication (using HTTP, not " -"HTTPS) and should only be used for testing purposes. We strongly " -"recommend enabling `SSL `__ when " -"deploying to a production environment." -msgstr "" +#: ../../source/explanation-flower-architecture.rst:21 +#, fuzzy +msgid "Hub-and-spoke topology in federated learning" +msgstr "연합 학습이란 무엇입니까?" -#: ../../source/how-to-run-flower-using-docker.rst:64 +#: ../../source/explanation-flower-architecture.rst:21 msgid "" -"You can use ``--help`` to view all available flags that the SuperLink " -"supports:" -msgstr "" - -#: ../../source/how-to-run-flower-using-docker.rst:71 -msgid "Mounting a volume to store the state on the host system" +"Hub-and-spoke topology in federated learning (one server, multiple " +"clients)." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:73 +#: ../../source/explanation-flower-architecture.rst:23 msgid "" -"If you want to persist the state of the SuperLink on your host system, " -"all you need to do is specify a path where you want to save the file on " -"your host system and a name for the database file. In the example below, " -"we tell Docker via the flag ``--volume`` to mount the user's home " -"directory (``~/`` on your host) into the ``/app/`` directory of the " -"container. Furthermore, we use the flag ``--database`` to specify the " -"name of the database file." +"In a real-world deployment, we typically want to run different projects " +"on such a federation. Each project could use different hyperparameters, " +"different model architectures, different aggregation strategies, or even " +"different machine learning frameworks like PyTorch and TensorFlow." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:86 +#: ../../source/explanation-flower-architecture.rst:28 msgid "" -"As soon as the SuperLink starts, the file ``state.db`` is created in the " -"user's home directory on your host system. If the file already exists, " -"the SuperLink tries to restore the state from the file. To start the " -"SuperLink with an empty database, simply remove the ``state.db`` file." -msgstr "" - -#: ../../source/how-to-run-flower-using-docker.rst:91 -#: ../../source/how-to-run-flower-using-docker.rst:260 -#: ../../source/how-to-run-flower-using-docker.rst:375 -msgid "Enabling SSL for secure connections" +"This is why, in Flower, both the server side and the client side are " +"split into two parts. One part is long-lived and responsible for " +"communicating across the network, the other part is short-lived and " +"executes task-specific code." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:93 -msgid "" -"To enable SSL, you will need a PEM-encoded root certificate, a PEM-" -"encoded private key and a PEM-encoded certificate chain." +#: ../../source/explanation-flower-architecture.rst:32 +msgid "A Flower `server` consists of **SuperLink** and ``ServerApp``:" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:97 +#: ../../source/explanation-flower-architecture.rst:34 msgid "" -"For testing purposes, you can generate your own self-signed certificates." -" The `Enable SSL connections `__ page contains a section that" -" will guide you through the process." +"**SuperLink**: a long-running process that forwards task instructions to " +"clients (SuperNodes) and receives task results back." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:101 +#: ../../source/explanation-flower-architecture.rst:36 msgid "" -"Assuming all files we need are in the local ``certificates`` directory, " -"we can use the flag ``--volume`` to mount the local directory into the " -"``/app/`` directory of the container. This allows the SuperLink to access" -" the files within the container. Finally, we pass the names of the " -"certificates to the SuperLink with the ``--certificates`` flag." -msgstr "" - -#: ../../source/how-to-run-flower-using-docker.rst:113 -msgid "Flower SuperNode" +"``ServerApp``: a short-lived process with project-spcific code that " +"customizes all server-side aspects of federated learning systems (client " +"selection, client configuration, result aggregation). This is what AI " +"researchers and AI engineers write when they build Flower apps." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:115 -msgid "" -"The SuperNode Docker image comes with a pre-installed version of Flower " -"and serves as a base for building your own SuperNode image." +#: ../../source/explanation-flower-architecture.rst:41 +msgid "A Flower `client` consists of **SuperNode** and ``ClientApp``:" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:120 +#: ../../source/explanation-flower-architecture.rst:43 msgid "" -"The SuperNode Docker image currently works only with the 1.9.0-nightly " -"release. A stable version will be available when Flower 1.9.0 (stable) " -"gets released (ETA: May). A SuperNode nightly image must be paired with " -"the corresponding SuperLink and ServerApp nightly images released on the " -"same day. To ensure the versions are in sync, using the concrete tag, " -"e.g., ``1.9.0.dev20240501`` instead of ``nightly`` is recommended." +"**SuperNode**: a long-running process that connects to the SuperLink, " +"asks for tasks, executes tasks (for example, \"train this model on your " +"local data\") and returns task results back to the SuperLink." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:126 +#: ../../source/explanation-flower-architecture.rst:46 msgid "" -"We will use the ``quickstart-pytorch`` example, which you can find in the" -" Flower repository, to illustrate how you can dockerize your ClientApp." +"``ClientApp``: a short-lived process with project-specific code that " +"customizes all client-side aspects of federated learning systems (local " +"model training and evaluation, pre- and post-processing). This is what AI" +" researchers and AI engineers write when they build Flower apps." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:134 +#: ../../source/explanation-flower-architecture.rst:51 msgid "" -"Before we can start, we need to meet a few prerequisites in our local " -"development environment. You can skip the first part if you want to run " -"your ClientApp instead of the ``quickstart-pytorch`` example." -msgstr "" - -#: ../../source/how-to-run-flower-using-docker.rst:138 -msgid "Clone the Flower repository." +"Why SuperNode and SuperLink? Well, in federated learning, the clients are" +" the actual stars of the show. They hold the training data and they run " +"the actual training. This is why Flower decided to name them " +"**SuperNode**. The **SuperLink** is then responsible for acting as the " +"`missing link` between all those SuperNodes." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:152 -msgid "Creating a SuperNode Dockerfile" -msgstr "" +#: ../../source/explanation-flower-architecture.rst:62 +#, fuzzy +msgid "Basic Flower architecture" +msgstr "Flower 아키텍처" -#: ../../source/how-to-run-flower-using-docker.rst:154 -#: ../../source/how-to-run-flower-using-docker.rst:289 -msgid "Let's assume the following project layout:" +#: ../../source/explanation-flower-architecture.rst:62 +msgid "The basic Flower architecture for federated learning." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:163 +#: ../../source/explanation-flower-architecture.rst:64 msgid "" -"First, we need to create a ``requirements.txt`` file in the directory " -"where the ``ClientApp`` code is located. In the file, we list all the " -"dependencies that the ClientApp requires." +"In a Flower app project, users will typically develop the ``ServerApp`` " +"and the ``ClientApp``. All the network communication between `server` and" +" `clients` is taken care of by the SuperLink and SuperNodes." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:175 +#: ../../source/explanation-flower-architecture.rst:70 msgid "" -"Note that `flwr `__ is already installed " -"in the ``flwr/supernode`` base image, so you only need to include other " -"package dependencies in your ``requirements.txt``, such as ``torch``, " -"``tensorflow``, etc." +"For more details, please refer to the |serverapp_link|_ and " +"|clientapp_link|_ documentation." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:179 +#: ../../source/explanation-flower-architecture.rst:73 msgid "" -"Next, we create a Dockerfile. If you use the ``quickstart-pytorch`` " -"example, create a new file called ``Dockerfile.supernode`` in ``examples" -"/quickstart-pytorch``." +"With *multi-run*, multiple ``ServerApp``\\s and ``ClientApp``\\s are now " +"capable of running on the same federation consisting of a single long-" +"running SuperLink and multiple long-running SuperNodes. This is sometimes" +" referred to as `multi-tenancy` or `multi-job`." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:182 +#: ../../source/explanation-flower-architecture.rst:78 msgid "" -"The ``Dockerfile.supernode`` contains the instructions that assemble the " -"SuperNode image." +"As shown in the figure below, two projects, each consisting of a " +"``ServerApp`` and a ``ClientApp``, could share the same SuperLink and " +"SuperNodes." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:196 -msgid "" -"In the first two lines, we instruct Docker to use the SuperNode image " -"tagged ``nightly`` as a base image and set our working directory to " -"``/app``. The following instructions will now be executed in the ``/app``" -" directory. Next, we install the ClientApp dependencies by copying the " -"``requirements.txt`` file into the image and run ``pip install``. In the " -"last two lines, we copy the ``client.py`` module into the image and set " -"the entry point to ``flower-client-app`` with the argument " -"``client:app``. The argument is the object reference of the ClientApp " -"(``:``) that will be run inside the ClientApp." +#: ../../source/explanation-flower-architecture.rst:87 +msgid "Multi-tenancy federated learning architecture" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:205 -msgid "Building the SuperNode Docker image" +#: ../../source/explanation-flower-architecture.rst:87 +msgid "Multi-tenancy federated learning architecture with Flower" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:207 +#: ../../source/explanation-flower-architecture.rst:89 msgid "" -"Next, we build the SuperNode Docker image by running the following " -"command in the directory where Dockerfile and ClientApp code are located." +"To illustrate how multi-run works, consider one federated learning " +"training run where a ``ServerApp`` and a ``ClientApp`` are participating " +"in ``[run 1]``. Note that a SuperNode will only run a ``ClientApp`` if it" +" is selected to participate in the training run." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:214 +#: ../../source/explanation-flower-architecture.rst:94 msgid "" -"We gave the image the name ``flwr_supernode``, and the tag ``0.0.1``. " -"Remember that the here chosen values only serve as an example. You can " -"change them to your needs." -msgstr "" - -#: ../../source/how-to-run-flower-using-docker.rst:219 -msgid "Running the SuperNode Docker image" -msgstr "" - -#: ../../source/how-to-run-flower-using-docker.rst:221 -msgid "Now that we have built the SuperNode image, we can finally run it." -msgstr "" - -#: ../../source/how-to-run-flower-using-docker.rst:229 -#: ../../source/how-to-run-flower-using-docker.rst:345 -msgid "Let's break down each part of this command:" +"In ``[run 1]`` below, all the SuperNodes are selected and therefore run " +"their corresponding ``ClientApp``\\s:" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:231 -#: ../../source/how-to-run-flower-using-docker.rst:347 -msgid "``docker run``: This is the command to run a new Docker container." +#: ../../source/explanation-flower-architecture.rst:103 +msgid "Multi-tenancy federated learning architecture - Run 1" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:232 -#: ../../source/how-to-run-flower-using-docker.rst:348 +#: ../../source/explanation-flower-architecture.rst:103 msgid "" -"``--rm``: This option specifies that the container should be " -"automatically removed when it stops." -msgstr "" - -#: ../../source/how-to-run-flower-using-docker.rst:233 -msgid "``flwr_supernode:0.0.1``: The name the tag of the Docker image to use." -msgstr "" - -#: ../../source/how-to-run-flower-using-docker.rst:234 -#: ../../source/how-to-run-flower-using-docker.rst:350 -msgid "``--insecure``: This option enables insecure communication." +"Run 1 in a multi-run federated learning architecture with Flower. All " +"SuperNodes participate in the training round." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst +#: ../../source/explanation-flower-architecture.rst:106 msgid "" -"``--server 192.168.1.100:9092``: This option specifies the address of the" -" SuperLinks Fleet" +"However, in ``[run 2]``, only the first and third SuperNodes are selected" +" to participate in the training:" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst -msgid "API to connect to. Remember to update it with your SuperLink IP." +#: ../../source/explanation-flower-architecture.rst:115 +msgid "Multi-tenancy federated learning architecture - Run 2" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:248 +#: ../../source/explanation-flower-architecture.rst:115 msgid "" -"To test running Flower locally, you can create a `bridge network " -"`__, use the ``--network`` argument and pass the " -"name of the Docker network to run your SuperNodes." +"Run 2 in a multi-run federated learning architecture with Flower. Only " +"the first and third SuperNodes are selected to participate in the " +"training round." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:252 +#: ../../source/explanation-flower-architecture.rst:118 msgid "" -"Any argument that comes after the tag is passed to the Flower SuperNode " -"binary. To see all available flags that the SuperNode supports, run:" +"Therefore, with Flower multi-run, different projects (each consisting of " +"a ``ServerApp`` and ``ClientApp``) can run on different sets of clients." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:262 +#: ../../source/explanation-flower-architecture.rst:121 msgid "" -"To enable SSL, we will need to mount a PEM-encoded root certificate into " -"your SuperNode container." +"To help you start and manage all of the concurrently executing training " +"runs, Flower offers one additional long-running server-side service " +"called **SuperExec**. When you type ``flwr run`` to start a new training " +"run, the ``flwr`` CLI bundles your local project (mainly your " +"``ServerApp`` and ``ClientApp``) and sends it to the **SuperExec**. The " +"**SuperExec** will then take care of starting and managing your " +"``ServerApp``, which in turn selects SuperNodes to execute your " +"``ClientApp``." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:264 +#: ../../source/explanation-flower-architecture.rst:128 msgid "" -"Assuming the certificate already exists locally, we can use the flag " -"``--volume`` to mount the local certificate into the container's " -"``/app/`` directory. This allows the SuperNode to access the certificate " -"within the container. Use the ``--certificates`` flag when starting the " -"container." +"This architecture allows many users to (concurrently) run their projects " +"on the same federation, simply by typing ``flwr run`` on their local " +"developer machine." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:275 -msgid "Flower ServerApp" +#: ../../source/explanation-flower-architecture.rst:137 +msgid "Flower Deployment Engine with SuperExec" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:277 -msgid "" -"The procedure for building and running a ServerApp image is almost " -"identical to the SuperNode image." +#: ../../source/explanation-flower-architecture.rst:137 +msgid "The SuperExec service for managing concurrent training runs in Flower." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:279 +#: ../../source/explanation-flower-architecture.rst:141 msgid "" -"Similar to the SuperNode image, the ServerApp Docker image comes with a " -"pre-installed version of Flower and serves as a base for building your " -"own ServerApp image." +"This explanation covers the Flower Deployment Engine. An explanation " +"covering the Flower Simulation Engine will follow." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:282 +#: ../../source/explanation-flower-architecture.rst:146 +#, fuzzy msgid "" -"We will use the same ``quickstart-pytorch`` example as we do in the " -"Flower SuperNode section. If you have not already done so, please follow " -"the `SuperNode Prerequisites`_ before proceeding." +"As we continue to enhance Flower at a rapid pace, we'll periodically " +"update this explainer document. Feel free to share any feedback with us." msgstr "" +"Flower Next는 빠른 속도로 지속적으로 개선되고 있으므로 이 가이드는 주기적으로 업데이트될 예정입니다. 피드백이 있으면 " +"언제든지 공유해 주세요!" -#: ../../source/how-to-run-flower-using-docker.rst:287 -msgid "Creating a ServerApp Dockerfile" -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:10 +msgid "FED Template" +msgstr "FED 템플릿" -#: ../../source/how-to-run-flower-using-docker.rst:298 -msgid "" -"First, we need to create a Dockerfile in the directory where the " -"``ServerApp`` code is located. If you use the ``quickstart-pytorch`` " -"example, create a new file called ``Dockerfile.serverapp`` in ``examples" -"/quickstart-pytorch``." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:12 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:12 +msgid "Table of Contents" +msgstr "목차" -#: ../../source/how-to-run-flower-using-docker.rst:302 -msgid "" -"The ``Dockerfile.serverapp`` contains the instructions that assemble the " -"ServerApp image." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:14 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:14 +msgid "[Table of Contents](#table-of-contents)" +msgstr "[목차](#목차)" -#: ../../source/how-to-run-flower-using-docker.rst:313 -msgid "" -"In the first two lines, we instruct Docker to use the ServerApp image " -"tagged ``1.8.0`` as a base image and set our working directory to " -"``/app``. The following instructions will now be executed in the ``/app``" -" directory. In the last two lines, we copy the ``server.py`` module into " -"the image and set the entry point to ``flower-server-app`` with the " -"argument ``server:app``. The argument is the object reference of the " -"ServerApp (``:``) that will be run inside the " -"ServerApp container." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:15 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:15 +msgid "[Summary](#summary)" +msgstr "[요약](#요약)" -#: ../../source/how-to-run-flower-using-docker.rst:321 -msgid "Building the ServerApp Docker image" -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:16 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:16 +msgid "[Motivation](#motivation)" +msgstr "[동기](#동기)" -#: ../../source/how-to-run-flower-using-docker.rst:323 -msgid "" -"Next, we build the ServerApp Docker image by running the following " -"command in the directory where Dockerfile and ServerApp code are located." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:17 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:17 +msgid "[Goals](#goals)" +msgstr "[목표](#목표)" -#: ../../source/how-to-run-flower-using-docker.rst:330 -msgid "" -"We gave the image the name ``flwr_serverapp``, and the tag ``0.0.1``. " -"Remember that the here chosen values only serve as an example. You can " -"change them to your needs." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:18 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:18 +msgid "[Non-Goals](#non-goals)" +msgstr "[비목표](#비목표)" -#: ../../source/how-to-run-flower-using-docker.rst:335 -msgid "Running the ServerApp Docker image" -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:19 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:19 +msgid "[Proposal](#proposal)" +msgstr "[제안](#제안)" -#: ../../source/how-to-run-flower-using-docker.rst:337 -msgid "Now that we have built the ServerApp image, we can finally run it." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:20 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:23 +msgid "[Drawbacks](#drawbacks)" +msgstr "[단점](#단점)" -#: ../../source/how-to-run-flower-using-docker.rst:349 -msgid "``flwr_serverapp:0.0.1``: The name the tag of the Docker image to use." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:21 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:24 +msgid "[Alternatives Considered](#alternatives-considered)" +msgstr "[고려되는 대안](#고려되는 대안)" -#: ../../source/how-to-run-flower-using-docker.rst -msgid "" -"``--server 192.168.1.100:9091``: This option specifies the address of the" -" SuperLinks Driver" -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:22 +msgid "[Appendix](#appendix)" +msgstr "[부록](#부록)" -#: ../../source/how-to-run-flower-using-docker.rst:363 -msgid "" -"To test running Flower locally, you can create a `bridge network " -"`__, use the ``--network`` argument and pass the " -"name of the Docker network to run your ServerApps." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:24 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:28 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:76 +msgid "Summary" +msgstr "요약" -#: ../../source/how-to-run-flower-using-docker.rst:367 -msgid "" -"Any argument that comes after the tag is passed to the Flower ServerApp " -"binary. To see all available flags that the ServerApp supports, run:" -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:26 +msgid "\\[TODO - sentence 1: summary of the problem\\]" +msgstr "\\[TODO - 문장 1: 문제 요약\\]" -#: ../../source/how-to-run-flower-using-docker.rst:377 -msgid "" -"To enable SSL, we will need to mount a PEM-encoded root certificate into " -"your ServerApp container." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:28 +msgid "\\[TODO - sentence 2: summary of the solution\\]" +msgstr "\\[TODO - 문장 2: 솔루션 요약\\]" -#: ../../source/how-to-run-flower-using-docker.rst:379 -msgid "" -"Assuming the certificate already exists locally, we can use the flag " -"``--volume`` to mount the local certificate into the container's " -"``/app/`` directory. This allows the ServerApp to access the certificate " -"within the container. Use the ``--certificates`` flag when starting the " -"container." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:30 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:47 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:77 +msgid "Motivation" +msgstr "동기" -#: ../../source/how-to-run-flower-using-docker.rst:390 -msgid "Advanced Docker options" -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:32 +#: ../../source/fed/0000-20200102-fed-template.md:36 +#: ../../source/fed/0000-20200102-fed-template.md:40 +#: ../../source/fed/0000-20200102-fed-template.md:44 +#: ../../source/fed/0000-20200102-fed-template.md:48 +#: ../../source/fed/0000-20200102-fed-template.md:54 +#: ../../source/fed/0000-20200102-fed-template.md:58 +msgid "\\[TODO\\]" +msgstr "\\[TODO\\]" -#: ../../source/how-to-run-flower-using-docker.rst:393 -msgid "Using a different Flower version" -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:34 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:53 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:78 +msgid "Goals" +msgstr "목표" -#: ../../source/how-to-run-flower-using-docker.rst:395 -msgid "" -"If you want to use a different version of Flower, for example Flower " -"nightly, you can do so by changing the tag. All available versions are on" -" `Docker Hub `__." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:38 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:59 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:79 +msgid "Non-Goals" +msgstr "목표가 아닌 것" -#: ../../source/how-to-run-flower-using-docker.rst:400 -msgid "Pinning a Docker image to a specific version" -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:42 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:65 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:80 +msgid "Proposal" +msgstr "제안" -#: ../../source/how-to-run-flower-using-docker.rst:402 -msgid "" -"It may happen that we update the images behind the tags. Such updates " -"usually include security updates of system dependencies that should not " -"change the functionality of Flower. However, if you want to ensure that " -"you always use the same image, you can specify the hash of the image " -"instead of the tag." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:46 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:85 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:129 +msgid "Drawbacks" +msgstr "단점" -#: ../../source/how-to-run-flower-using-docker.rst:407 -msgid "" -"The following command returns the current image hash referenced by the " -"``superlink:1.8.0`` tag:" -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:50 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:86 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:135 +msgid "Alternatives Considered" +msgstr "고려되는 대안" -#: ../../source/how-to-run-flower-using-docker.rst:414 -msgid "Next, we can pin the hash when running a new SuperLink container:" -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:52 +msgid "\\[Alternative 1\\]" +msgstr "\\[대안 1\\]" -#: ../../source/how-to-run-flower-using-docker.rst:423 -msgid "Setting environment variables" -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:56 +msgid "\\[Alternative 2\\]" +msgstr "\\[대안 2\\]" -#: ../../source/how-to-run-flower-using-docker.rst:425 -msgid "" -"To set a variable inside a Docker container, you can use the ``-e " -"=`` flag." -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:10 +msgid "Flower Enhancement Doc" +msgstr "Flower Enhancement Doc" -#: ../../source/how-to-run-simulations.rst:2 -msgid "Run simulations" -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:20 +msgid "[Enhancement Doc Template](#enhancement-doc-template)" +msgstr "[Enhancement Doc 템플릿](#enhancement-doc-템플릿)" -#: ../../source/how-to-run-simulations.rst:8 -msgid "" -"Simulating Federated Learning workloads is useful for a multitude of use-" -"cases: you might want to run your workload on a large cohort of clients " -"but without having to source, configure and mange a large number of " -"physical devices; you might want to run your FL workloads as fast as " -"possible on the compute systems you have access to without having to go " -"through a complex setup process; you might want to validate your " -"algorithm on different scenarios at varying levels of data and system " -"heterogeneity, client availability, privacy budgets, etc. These are among" -" some of the use-cases where simulating FL workloads makes sense. Flower " -"can accommodate these scenarios by means of its `VirtualClientEngine " -"`_ or " -"VCE." -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:21 +msgid "[Metadata](#metadata)" +msgstr "[Metadata](#metadata)" -#: ../../source/how-to-run-simulations.rst:10 -msgid "" -"The :code:`VirtualClientEngine` schedules, launches and manages `virtual`" -" clients. These clients are identical to `non-virtual` clients (i.e. the " -"ones you launch via the command `flwr.client.start_client `_) in the sense that they can be configure by " -"creating a class inheriting, for example, from `flwr.client.NumPyClient " -"`_ and therefore behave in an " -"identical way. In addition to that, clients managed by the " -":code:`VirtualClientEngine` are:" -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:22 +msgid "[Workflow](#workflow)" +msgstr "[워크플로우](#워크플로우)" -#: ../../source/how-to-run-simulations.rst:12 -msgid "" -"resource-aware: this means that each client gets assigned a portion of " -"the compute and memory on your system. You as a user can control this at " -"the beginning of the simulation and allows you to control the degree of " -"parallelism of your Flower FL simulation. The fewer the resources per " -"client, the more clients can run concurrently on the same hardware." -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:25 +msgid "[GitHub Issues](#github-issues)" +msgstr "[GitHub Issues](#github-issues)" -#: ../../source/how-to-run-simulations.rst:13 -msgid "" -"self-managed: this means that you as a user do not need to launch clients" -" manually, instead this gets delegated to :code:`VirtualClientEngine`'s " -"internals." -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:26 +msgid "[Google Docs](#google-docs)" +msgstr "[Google Docs](#google-docs)" -#: ../../source/how-to-run-simulations.rst:14 -msgid "" -"ephemeral: this means that a client is only materialized when it is " -"required in the FL process (e.g. to do `fit() `_). The object is destroyed afterwards," -" releasing the resources it was assigned and allowing in this way other " -"clients to participate." -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:30 +msgid "A Flower Enhancement is a standardized development process to" +msgstr "Flower Enhancement는 다음과 같은 표준화된 개발 프로세스입니다" -#: ../../source/how-to-run-simulations.rst:16 -msgid "" -"The :code:`VirtualClientEngine` implements `virtual` clients using `Ray " -"`_, an open-source framework for scalable Python " -"workloads. In particular, Flower's :code:`VirtualClientEngine` makes use " -"of `Actors `_ to " -"spawn `virtual` clients and run their workload." -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:32 +msgid "provide a common structure for proposing larger changes" +msgstr "더 큰 변경 사항을 제안하기 위한 공통 구조를 제공합니다" -#: ../../source/how-to-run-simulations.rst:20 -msgid "Launch your Flower simulation" -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:33 +msgid "ensure that the motivation for a change is clear" +msgstr "변화의 동기가 분명한지 확인합니다" -#: ../../source/how-to-run-simulations.rst:22 -msgid "" -"Running Flower simulations still require you to define your client class," -" a strategy, and utility functions to download and load (and potentially " -"partition) your dataset. With that out of the way, launching your " -"simulation is done with `start_simulation `_ and a minimal example looks" -" as follows:" -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:34 +msgid "persist project information in a version control system" +msgstr "버전 관리 시스템에서 프로젝트 정보를 유지합니다" -#: ../../source/how-to-run-simulations.rst:44 -msgid "VirtualClientEngine resources" -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:35 +msgid "document the motivation for impactful user-facing changes" +msgstr "사용자에게 영향력 있는 변화에 대한 동기를 문서화합니다" -#: ../../source/how-to-run-simulations.rst:45 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:36 +msgid "reserve GitHub issues for tracking work in flight" +msgstr "운행 중 작업 추적을 위한 깃허브 이슈를 예약합니다" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:37 msgid "" -"By default the VCE has access to all system resources (i.e. all CPUs, all" -" GPUs, etc) since that is also the default behavior when starting Ray. " -"However, in some settings you might want to limit how many of your system" -" resources are used for simulation. You can do this via the " -":code:`ray_init_args` input argument to :code:`start_simulation` which " -"the VCE internally passes to Ray's :code:`ray.init` command. For a " -"complete list of settings you can configure check the `ray.init " -"`_" -" documentation. Do not set :code:`ray_init_args` if you want the VCE to " -"use all your system's CPUs and GPUs." +"ensure community participants can successfully drive changes to " +"completion across one or more releases while stakeholders are adequately " +"represented throughout the process" msgstr "" +"커뮤니티 참여자가 하나 이상의 릴리즈에서 변경 사항을 성공적으로 완료할 수 있도록 하는 동시에 이해 관계자가 프로세스 전반에 걸쳐 " +"적절히 대표되도록 보장합니다" -#: ../../source/how-to-run-simulations.rst:62 -msgid "Assigning client resources" -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:39 +msgid "Hence, an Enhancement Doc combines aspects of" +msgstr "따라서 Enhancement 문서에는 다음과 같은 측면이 결합되어 있습니다" -#: ../../source/how-to-run-simulations.rst:63 -msgid "" -"By default the :code:`VirtualClientEngine` assigns a single CPU core (and" -" nothing else) to each virtual client. This means that if your system has" -" 10 cores, that many virtual clients can be concurrently running." -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:41 +msgid "a feature, and effort-tracking document" +msgstr "기능 및 effort-tracking 문서" -#: ../../source/how-to-run-simulations.rst:65 -msgid "" -"More often than not, you would probably like to adjust the resources your" -" clients get assigned based on the complexity (i.e. compute and memory " -"footprint) of your FL workload. You can do so when starting your " -"simulation by setting the argument `client_resources` to " -"`start_simulation `_." -" Two keys are internally used by Ray to schedule and spawn workloads (in " -"our case Flower clients):" -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:42 +msgid "a product requirements document" +msgstr "제품 요구 사항 문서" -#: ../../source/how-to-run-simulations.rst:67 -msgid ":code:`num_cpus` indicates the number of CPU cores a client would get." -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:43 +msgid "a design document" +msgstr "디자인 문서" -#: ../../source/how-to-run-simulations.rst:68 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:45 msgid "" -":code:`num_gpus` indicates the **ratio** of GPU memory a client gets " -"assigned." -msgstr "" +"into one file, which is created incrementally in collaboration with the " +"community." +msgstr "를 하나의 파일로 통합하여 커뮤니티와 협력해 점진적으로 생성합니다." -#: ../../source/how-to-run-simulations.rst:70 -msgid "Let's see a few examples:" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:49 +msgid "" +"For far-fetching changes or features proposed to Flower, an abstraction " +"beyond a single GitHub issue or pull request is required to understand " +"and communicate upcoming changes to the project." msgstr "" +"Flower에 제안된 변경 사항이나 기능을 멀리 가져오는 경우, 프로젝트의 향후 변경 사항을 이해하고 전달하기 위해 단일 " +"GitHub 이슈 또는 pull request를 넘어서는 abstraction이 필요합니다." -#: ../../source/how-to-run-simulations.rst:89 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:51 msgid "" -"While the :code:`client_resources` can be used to control the degree of " -"concurrency in your FL simulation, this does not stop you from running " -"dozens, hundreds or even thousands of clients in the same round and " -"having orders of magnitude more `dormant` (i.e. not participating in a " -"round) clients. Let's say you want to have 100 clients per round but your" -" system can only accommodate 8 clients concurrently. The " -":code:`VirtualClientEngine` will schedule 100 jobs to run (each " -"simulating a client sampled by the strategy) and then will execute them " -"in a resource-aware manner in batches of 8." +"The purpose of this process is to reduce the amount of \"tribal " +"knowledge\" in our community. By moving decisions from Slack threads, " +"video calls, and hallway conversations into a well-tracked artifact, this" +" process aims to enhance communication and discoverability." msgstr "" +"이 프로세스의 목적은 커뮤니티 내 '부족한 지식'의 양을 줄이는 것입니다. 이 프로세스는 Slack 스레드, 영상 통화, 복도 " +"대화에서 나온 의사 결정을 잘 추적된 아티팩트로 옮김으로써 커뮤니케이션과 검색 가능성을 향상시키는 것을 목표로 합니다." -#: ../../source/how-to-run-simulations.rst:91 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:55 msgid "" -"To understand all the intricate details on how resources are used to " -"schedule FL clients and how to define custom resources, please take a " -"look at the `Ray documentation `_." +"Roughly any larger, user-facing enhancement should follow the Enhancement" +" process. If an enhancement would be described in either written or " +"verbal communication to anyone besides the author or developer, then " +"consider creating an Enhancement Doc." msgstr "" +"대략적으로 사용자를 대상으로 하는 대규모 개선 사항은 개선 프로세스를 따라야 합니다. 개선 사항을 작성자나 개발자 이외의 다른 " +"사람에게 서면 또는 구두로 설명해야 하는 경우에는 개선 문서 작성을 고려하세요." -#: ../../source/how-to-run-simulations.rst:94 -msgid "Simulation examples" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:57 +msgid "" +"Similarly, any technical effort (refactoring, major architectural change)" +" that will impact a large section of the development community should " +"also be communicated widely. The Enhancement process is suited for this " +"even if it will have zero impact on the typical user or operator." msgstr "" +"마찬가지로 개발 커뮤니티의 많은 부분에 영향을 미치는 기술적 노력(리팩토링, 주요 아키텍처 변경)도 널리 알려야 합니다. 개선 " +"프로세스는 일반 사용자나 운영자에게 전혀 영향을 미치지 않더라도 이를 위해 적합합니다." -#: ../../source/how-to-run-simulations.rst:96 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:61 msgid "" -"A few ready-to-run complete examples for Flower simulation in " -"Tensorflow/Keras and PyTorch are provided in the `Flower repository " -"`_. You can run them on Google Colab too:" +"For small changes and additions, going through the Enhancement process " +"would be time-consuming and unnecessary. This includes, for example, " +"adding new Federated Learning algorithms, as these only add features " +"without changing how Flower works or is used." msgstr "" +"작은 변경 및 추가의 경우, 개선 프로세스를 거치는 것은 시간이 많이 걸리고 불필요합니다. 예를 들어, 새로운 연합 학습 알고리즘을" +" 추가하는 것은 Flower의 작동 방식이나 사용 방식을 변경하지 않고 기능만 추가하는 것이기 때문입니다." -#: ../../source/how-to-run-simulations.rst:98 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:63 msgid "" -"`Tensorflow/Keras Simulation " -"`_: 100 clients collaboratively train a MLP model on MNIST." -msgstr "" +"Enhancements are different from feature requests, as they are already " +"providing a laid-out path for implementation and are championed by " +"members of the community." +msgstr "기능 개선은 이미 구현할 수 있는 경로가 마련되어 있고 커뮤니티 구성원들이 지지하는 것이므로 기능 요청과는 다릅니다." -#: ../../source/how-to-run-simulations.rst:99 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:67 msgid "" -"`PyTorch Simulation `_: 100 clients collaboratively train a CNN model on " -"MNIST." +"An Enhancement is captured in a Markdown file that follows a defined " +"template and a workflow to review and store enhancement docs for " +"reference — the Enhancement Doc." msgstr "" +"개선 사항은 정의된 템플릿과 참조용으로 Enhancement Doc.를 검토하고 저장하는 워크플로우를 따르는 Markdown 파일에" +" 캡처됩니다." -#: ../../source/how-to-run-simulations.rst:104 -msgid "Multi-node Flower simulations" -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:69 +msgid "Enhancement Doc Template" +msgstr "Enhancement Doc 템플릿" -#: ../../source/how-to-run-simulations.rst:106 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:71 msgid "" -"Flower's :code:`VirtualClientEngine` allows you to run FL simulations " -"across multiple compute nodes. Before starting your multi-node simulation" -" ensure that you:" -msgstr "" +"Each enhancement doc is provided as a Markdown file having the following " +"structure" +msgstr "각 개선 사항 문서는 다음과 같은 구조의 Markdown 파일로 제공됩니다" -#: ../../source/how-to-run-simulations.rst:108 -msgid "Have the same Python environment in all nodes." -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:73 +msgid "Metadata (as [described below](#metadata) in form of a YAML preamble)" +msgstr "Metadata ([아래 설명](#metadata) YAML preamble 형식)" -#: ../../source/how-to-run-simulations.rst:109 -msgid "Have a copy of your code (e.g. your entire repo) in all nodes." -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:74 +msgid "Title (same as in metadata)" +msgstr "Title (metadata와 같게)" -#: ../../source/how-to-run-simulations.rst:110 -msgid "" -"Have a copy of your dataset in all nodes (more about this in " -":ref:`simulation considerations `)" -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:75 +msgid "Table of Contents (if needed)" +msgstr "Table of Contents (필요시)" -#: ../../source/how-to-run-simulations.rst:111 -msgid "" -"Pass :code:`ray_init_args={\"address\"=\"auto\"}` to `start_simulation " -"`_ so the " -":code:`VirtualClientEngine` attaches to a running Ray instance." -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:81 +msgid "Notes/Constraints/Caveats (optional)" +msgstr "Notes/Constraints/Caveats (선택 사항)" -#: ../../source/how-to-run-simulations.rst:112 -msgid "" -"Start Ray on you head node: on the terminal type :code:`ray start " -"--head`. This command will print a few lines, one of which indicates how " -"to attach other nodes to the head node." -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:82 +msgid "Design Details (optional)" +msgstr "Design Details (선택 사항)" -#: ../../source/how-to-run-simulations.rst:113 -msgid "" -"Attach other nodes to the head node: copy the command shown after " -"starting the head and execute it on terminal of a new node: for example " -":code:`ray start --address='192.168.1.132:6379'`" -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:83 +msgid "Graduation Criteria" +msgstr "졸업 기준" -#: ../../source/how-to-run-simulations.rst:115 -msgid "" -"With all the above done, you can run your code from the head node as you " -"would if the simulation was running on a single node." -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:84 +msgid "Upgrade/Downgrade Strategy (if applicable)" +msgstr "업그레이드/다운그레이드 전략(해당되는 경우)" -#: ../../source/how-to-run-simulations.rst:117 -msgid "" -"Once your simulation is finished, if you'd like to dismantle your cluster" -" you simply need to run the command :code:`ray stop` in each node's " -"terminal (including the head node)." -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:88 +msgid "As a reference, this document follows the above structure." +msgstr "참고로 이 문서는 위의 구조를 따릅니다." -#: ../../source/how-to-run-simulations.rst:120 -msgid "Multi-node simulation good-to-know" -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:90 +#: ../../source/ref-api/flwr.common.Metadata.rst:2 +msgid "Metadata" +msgstr "Metadata" -#: ../../source/how-to-run-simulations.rst:122 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:92 msgid "" -"Here we list a few interesting functionality when running multi-node FL " -"simulations:" +"**fed-number** (Required) The `fed-number` of the last Flower Enhancement" +" Doc + 1. With this number, it becomes easy to reference other proposals." msgstr "" +"**피드 번호** (필수) 마지막 Flower Enhancement 문서의 `피드 번호` + 1. 이 번호를 사용하면 다른 제안을 " +"쉽게 참조할 수 있습니다." -#: ../../source/how-to-run-simulations.rst:124 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:94 +msgid "**title** (Required) The title of the proposal in plain language." +msgstr "**제목** (필수) 제안서의 제목을 평이한 언어로 입력합니다." + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:96 msgid "" -"User :code:`ray status` to check all nodes connected to your head node as" -" well as the total resources available to the " -":code:`VirtualClientEngine`." -msgstr "" +"**status** (Required) The current status of the proposal. See " +"[workflow](#workflow) for the possible states." +msgstr "**상태** (필수) 제안의 현재 상태입니다. 가능한 상태는 [워크플로](#워크플로)를 참조하세요." -#: ../../source/how-to-run-simulations.rst:126 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:98 msgid "" -"When attaching a new node to the head, all its resources (i.e. all CPUs, " -"all GPUs) will be visible by the head node. This means that the " -":code:`VirtualClientEngine` can schedule as many `virtual` clients as " -"that node can possible run. In some settings you might want to exclude " -"certain resources from the simulation. You can do this by appending " -"`--num-cpus=` and/or `--num-" -"gpus=` in any :code:`ray start` command (including " -"when starting the head)" -msgstr "" +"**authors** (Required) A list of authors of the proposal. This is simply " +"the GitHub ID." +msgstr "**저자** (필수) 제안서의 작성자 목록입니다. 간단히 GitHub ID입니다." -#: ../../source/how-to-run-simulations.rst:132 -msgid "Considerations for simulations" -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:100 +msgid "" +"**creation-date** (Required) The date that the proposal was first " +"submitted in a PR." +msgstr "**생성 날짜** (필수) PR에서 제안서를 처음 제출한 날짜입니다." -#: ../../source/how-to-run-simulations.rst:135 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:102 msgid "" -"We are actively working on these fronts so to make it trivial to run any " -"FL workload with Flower simulation." -msgstr "" +"**last-updated** (Optional) The date that the proposal was last changed " +"significantly." +msgstr "**마지막 업데이트** (선택 사항) 제안서가 마지막으로 크게 변경된 날짜입니다." -#: ../../source/how-to-run-simulations.rst:138 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:104 msgid "" -"The current VCE allows you to run Federated Learning workloads in " -"simulation mode whether you are prototyping simple scenarios on your " -"personal laptop or you want to train a complex FL pipeline across " -"multiple high-performance GPU nodes. While we add more capabilities to " -"the VCE, the points below highlight some of the considerations to keep in" -" mind when designing your FL pipeline with Flower. We also highlight a " -"couple of current limitations in our implementation." -msgstr "" +"**see-also** (Optional) A list of other proposals that are relevant to " +"this one." +msgstr "**함께 보기** (선택 사항) 이 제안과 관련된 다른 제안 목록입니다." -#: ../../source/how-to-run-simulations.rst:141 -msgid "GPU resources" -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:106 +msgid "**replaces** (Optional) A list of proposals that this one replaces." +msgstr "**대체** (선택 사항) 이 제안이 대체하는 제안 목록입니다." -#: ../../source/how-to-run-simulations.rst:143 -msgid "" -"The VCE assigns a share of GPU memory to a client that specifies the key " -":code:`num_gpus` in :code:`client_resources`. This being said, Ray (used " -"internally by the VCE) is by default:" -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:108 +msgid "**superseded-by** (Optional) A list of proposals that this one supersedes." +msgstr "**대체됨** (선택 사항) 이 제안이 대체하는 제안의 목록입니다." + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:111 +msgid "Workflow" +msgstr "워크플로우" -#: ../../source/how-to-run-simulations.rst:146 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:113 msgid "" -"not aware of the total VRAM available on the GPUs. This means that if you" -" set :code:`num_gpus=0.5` and you have two GPUs in your system with " -"different (e.g. 32GB and 8GB) VRAM amounts, they both would run 2 clients" -" concurrently." +"The idea forming the enhancement should already have been discussed or " +"pitched in the community. As such, it needs a champion, usually the " +"author, who shepherds the enhancement. This person also has to find " +"committers to Flower willing to review the proposal." msgstr "" +"개선 사항을 구성하는 아이디어는 이미 커뮤니티에서 논의되었거나 제안된 적이 있어야 합니다. 따라서 개선 사항을 주도하는 사(보통 " +"작성자)이 필요합니다. 이 사람은 또한 제안을 검토할 의향이 있는 Flower 커미터를 찾아야 합니다." -#: ../../source/how-to-run-simulations.rst:147 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:115 msgid "" -"not aware of other unrelated (i.e. not created by the VCE) workloads are " -"running on the GPU. Two takeaways from this are:" +"New enhancements are checked in with a file name in the form of `NNNN-" +"YYYYMMDD-enhancement-title.md`, with `NNNN` being the Flower Enhancement " +"Doc number, to `enhancements`. All enhancements start in `provisional` " +"state as part of a pull request. Discussions are done as part of the pull" +" request review." msgstr "" +"새 개선 사항은 `NNNN-YYYYMMDD-enhancement-title.md` 형식의 파일 이름으로 체크인되며, `NNNN`은 " +"Flower 개선 문서 번호이고 `enhancements`에 해당합니다. 모든 개선 사항은 pull request의 일부로 `잠정`" +" 상태에서 시작됩니다. 토론은 pull request 검토의 일부로 이루어집니다." -#: ../../source/how-to-run-simulations.rst:149 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:117 msgid "" -"Your Flower server might need a GPU to evaluate the `global model` after " -"aggregation (by instance when making use of the `evaluate method `_)" +"Once an enhancement has been reviewed and approved, its status is changed" +" to `implementable`. The actual implementation is then done in separate " +"pull requests. These pull requests should mention the respective " +"enhancement as part of their description. After the implementation is " +"done, the proposal status is changed to `implemented`." msgstr "" +"개선 사항이 검토 및 승인되면 상태가 '구현 가능'으로 변경됩니다. 그런 다음 실제 구현은 별도의 pull requests를 통해 " +"이루어집니다. 이러한 pull requests는 설명의 일부로 해당 개선 사항을 언급해야 합니다. 구현이 완료되면 제안 상태는 " +"'구현됨'으로 변경됩니다." -#: ../../source/how-to-run-simulations.rst:150 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:119 msgid "" -"If you want to run several independent Flower simulations on the same " -"machine you need to mask-out your GPUs with " -":code:`CUDA_VISIBLE_DEVICES=\"\"` when launching your " -"experiment." -msgstr "" +"Under certain conditions, other states are possible. An Enhancement has " +"the following states:" +msgstr "특정 조건에서는 다른 상태도 가능합니다. 개선에는 다음과 같은 상태가 있습니다:" -#: ../../source/how-to-run-simulations.rst:153 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:121 msgid "" -"In addition, the GPU resource limits passed to :code:`client_resources` " -"are not `enforced` (i.e. they can be exceeded) which can result in the " -"situation of client using more VRAM than the ratio specified when " -"starting the simulation." -msgstr "" +"`provisional`: The enhancement has been proposed and is actively being " +"defined. This is the starting state while the proposal is being fleshed " +"out and actively defined and discussed." +msgstr "'잠정적': 개선 사항이 제안되어 활발히 정의되고 있습니다. 제안이 구체화되고 활발하게 정의 및 논의되는 동안의 시작 단계입니다." -#: ../../source/how-to-run-simulations.rst:156 -msgid "TensorFlow with GPUs" -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:122 +msgid "`implementable`: The enhancement has been reviewed and approved." +msgstr "`구현 가능`: 개선 사항이 검토 및 승인되었습니다." -#: ../../source/how-to-run-simulations.rst:158 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:123 msgid "" -"When `using a GPU with TensorFlow " -"`_ nearly your entire GPU memory of" -" all your GPUs visible to the process will be mapped. This is done by " -"TensorFlow for optimization purposes. However, in settings such as FL " -"simulations where we want to split the GPU into multiple `virtual` " -"clients, this is not a desirable mechanism. Luckily we can disable this " -"default behavior by `enabling memory growth " -"`_." -msgstr "" +"`implemented`: The enhancement has been implemented and is no longer " +"actively changed." +msgstr "`구현됨`: 개선 사항이 구현되었으며 더 이상 활발히 변경되지 않습니다." -#: ../../source/how-to-run-simulations.rst:160 -msgid "" -"This would need to be done in the main process (which is where the server" -" would run) and in each Actor created by the VCE. By means of " -":code:`actor_kwargs` we can pass the reserved key `\"on_actor_init_fn\"` " -"in order to specify a function to be executed upon actor initialization. " -"In this case, to enable GPU growth for TF workloads. It would look as " -"follows:" -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:124 +msgid "`deferred`: The enhancement is proposed but not actively being worked on." +msgstr "'지연됨': 개선 사항이 제안되었지만 아직 활발히 작업 중이 아닙니다." -#: ../../source/how-to-run-simulations.rst:179 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:125 msgid "" -"This is precisely the mechanism used in `Tensorflow/Keras Simulation " -"`_ example." -msgstr "" +"`rejected`: The authors and reviewers have decided that this enhancement " +"is not moving forward." +msgstr "`거부됨`: 작성자와 검토자는 이 개선 사항을 더 이상 진행하지 않기로 결정했습니다." -#: ../../source/how-to-run-simulations.rst:183 -msgid "Multi-node setups" -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:126 +msgid "`withdrawn`: The authors have withdrawn the enhancement." +msgstr "`철회`: 작성자가 개선 사항을 철회했습니다." -#: ../../source/how-to-run-simulations.rst:185 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:127 +msgid "`replaced`: The enhancement has been replaced by a new enhancement." +msgstr "'대체됨': 개선 사항이 새로운 개선 사항으로 대체되었습니다." + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:131 msgid "" -"The VCE does not currently offer a way to control on which node a " -"particular `virtual` client is executed. In other words, if more than a " -"single node have the resources needed by a client to run, then any of " -"those nodes could get the client workload scheduled onto. Later in the FL" -" process (i.e. in a different round) the same client could be executed by" -" a different node. Depending on how your clients access their datasets, " -"this might require either having a copy of all dataset partitions on all " -"nodes or a dataset serving mechanism (e.g. using nfs, a database) to " -"circumvent data duplication." +"Adding an additional process to the ones already provided by GitHub " +"(Issues and Pull Requests) adds more complexity and can be a barrier for " +"potential first-time contributors." msgstr "" +"GitHub에서 이미 제공하는 프로세스(이슈 및 Pull Requests)에 추가 프로세스를 추가하면 더 복잡해지고 잠재적인 처음인" +" 기여자에게는 장벽이 될 수 있습니다." -#: ../../source/how-to-run-simulations.rst:187 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:133 msgid "" -"By definition virtual clients are `stateless` due to their ephemeral " -"nature. A client state can be implemented as part of the Flower client " -"class but users need to ensure this saved to persistent storage (e.g. a " -"database, disk) and that can be retrieve later by the same client " -"regardless on which node it is running from. This is related to the point" -" above also since, in some way, the client's dataset could be seen as a " -"type of `state`." +"Expanding the proposal template beyond the single-sentence description " +"currently required in the features issue template may be a heavy burden " +"for non-native English speakers." msgstr "" +"현재 기능 이슈 템플릿에서 요구되는 한 문장 설명 이상으로 제안서 템플릿을 확장하는 것은 영어가 모국어가 아닌 사용자에게는 큰 " +"부담이 될 수 있습니다." -#: ../../source/how-to-save-and-load-model-checkpoints.rst:2 -msgid "Save and load model checkpoints" -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:137 +msgid "GitHub Issues" +msgstr "GitHub 이슈" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:4 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:139 msgid "" -"Flower does not automatically save model updates on the server-side. This" -" how-to guide describes the steps to save (and load) model checkpoints in" -" Flower." +"Using GitHub Issues for these kinds of enhancements is doable. One could " +"use, for example, tags, to differentiate and filter them from other " +"issues. The main issue is in discussing and reviewing an enhancement: " +"GitHub issues only have a single thread for comments. Enhancements " +"usually have multiple threads of discussion at the same time for various " +"parts of the doc. Managing these multiple discussions can be confusing " +"when using GitHub Issues." msgstr "" +"이러한 종류의 개선을 위해 GitHub 이슈를 사용하면 가능합니다. 예를 들어 태그를 사용하여 다른 이슈와 구별하고 필터링할 수 " +"있습니다. 주요 이슈는 개선 사항에 대해 토론하고 검토하는 것입니다: GitHub 이슈에는 댓글 스레드가 하나만 있습니다. 개선 " +"사항에는 일반적으로 문서의 여러 부분에 대해 동시에 여러 개의 토론 스레드가 있습니다. GitHub 이슈를 사용할 때 이러한 여러 " +"토론을 관리하면 혼란스러울 수 있습니다." -#: ../../source/how-to-save-and-load-model-checkpoints.rst:8 -msgid "Model checkpointing" -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:141 +msgid "Google Docs" +msgstr "Google 문서 도구" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:10 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:143 msgid "" -"Model updates can be persisted on the server-side by customizing " -":code:`Strategy` methods. Implementing custom strategies is always an " -"option, but for many cases it may be more convenient to simply customize " -"an existing strategy. The following code example defines a new " -":code:`SaveModelStrategy` which customized the existing built-in " -":code:`FedAvg` strategy. In particular, it customizes " -":code:`aggregate_fit` by calling :code:`aggregate_fit` in the base class " -"(:code:`FedAvg`). It then continues to save returned (aggregated) weights" -" before it returns those aggregated weights to the caller (i.e., the " -"server):" +"Google Docs allow for multiple threads of discussions. But as Google Docs" +" are hosted outside the project, their discoverability by the community " +"needs to be taken care of. A list of links to all proposals has to be " +"managed and made available for the community. Compared to shipping " +"proposals as part of Flower's repository, the potential for missing links" +" is much higher." msgstr "" +"Google 문서는 여러 스레드의 토론을 허용합니다. 하지만 Google 문서는 프로젝트 외부에서 호스팅되므로 커뮤니티에서 검색할 " +"수 있도록 관리해야 합니다. 모든 제안에 대한 링크 목록을 관리하고 커뮤니티에 제공해야 합니다. Flower 저장소의 일부로 " +"제안서를 보낼 때와 비교하면 링크가 누락될 가능성이 훨씬 더 높습니다." -#: ../../source/how-to-save-and-load-model-checkpoints.rst:47 -msgid "Save and load PyTorch checkpoints" -msgstr "" +#: ../../source/fed/index.md:1 +msgid "FED - Flower Enhancement Doc" +msgstr "FED - Flower 개선 문서" + +#: ../../source/how-to-aggregate-evaluation-results.rst:2 +msgid "Aggregate evaluation results" +msgstr "종합 평가 결과" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:49 +#: ../../source/how-to-aggregate-evaluation-results.rst:4 msgid "" -"Similar to the previous example but with a few extra steps, we'll show " -"how to store a PyTorch checkpoint we'll use the ``torch.save`` function. " -"Firstly, ``aggregate_fit`` returns a ``Parameters`` object that has to be" -" transformed into a list of NumPy ``ndarray``'s, then those are " -"transformed into the PyTorch ``state_dict`` following the ``OrderedDict``" -" class structure." -msgstr "" +"The Flower server does not prescribe a way to aggregate evaluation " +"results, but it enables the user to fully customize result aggregation." +msgstr "Flower 서버는 평가 결과를 집계하는 방법을 규정하고 있지 않지만 사용자가 결과 집계를 완전히 사용자 지정할 수 있습니다." + +#: ../../source/how-to-aggregate-evaluation-results.rst:8 +msgid "Aggregate Custom Evaluation Results" +msgstr "사용자 지정 평가 결과 집계" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:85 +#: ../../source/how-to-aggregate-evaluation-results.rst:10 +#, fuzzy msgid "" -"To load your progress, you simply append the following lines to your " -"code. Note that this will iterate over all saved checkpoints and load the" -" latest one:" +"The same ``Strategy``-customization approach can be used to aggregate " +"custom evaluation results coming from individual clients. Clients can " +"return custom metrics to the server by returning a dictionary:" msgstr "" +"동일한 :code:`Strategy`-사용자 지정 방식을 사용하여 개별 클라이언트로부터 오는 사용자 지정 평가 결과를 집계할 수 " +"있습니다. 클라이언트는 dictionary를 반환하여 사용자 지정 지표를 서버에 반환할 수 있습니다:" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:97 +#: ../../source/how-to-aggregate-evaluation-results.rst:39 msgid "" -"Return/use this object of type ``Parameters`` wherever necessary, such as" -" in the ``initial_parameters`` when defining a ``Strategy``." -msgstr "" +"The server can then use a customized strategy to aggregate the metrics " +"provided in these dictionaries:" +msgstr "그런 다음 서버는 사용자 지정 전략을 사용하여 이러한 dictionaries에서 제공하는 메트릭을 집계할 수 있습니다:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:2 -msgid "Upgrade to Flower 1.0" -msgstr "" +#: ../../source/how-to-authenticate-supernodes.rst:2 +msgid "Authenticate SuperNodes" +msgstr "SuperNodes 인증하기" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:4 +#: ../../source/how-to-authenticate-supernodes.rst:4 msgid "" -"Flower 1.0 is here. Along with new features, Flower 1.0 provides a stable" -" foundation for future growth. Compared to Flower 0.19 (and other 0.x " -"series releases), there are a few breaking changes that make it necessary" -" to change the code of existing 0.x-series projects." +"Flower has built-in support for authenticated SuperNodes that you can use" +" to verify the identities of each SuperNode connecting to a SuperLink. " +"Flower node authentication works similar to how GitHub SSH authentication" +" works:" msgstr "" +"Flower는 SuperLink에 연결하는 각 SuperNodes의 신원을 확인하는 데 사용할 수 있는 인증된 SuperNodes에" +" 대한 기본 지원을 제공합니다. Flower 노드 인증은 GitHub SSH 인증 방식과 유사하게 작동합니다:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:8 -#: ../../source/how-to-upgrade-to-flower-next.rst:43 -msgid "Install update" -msgstr "" +#: ../../source/how-to-authenticate-supernodes.rst:8 +msgid "SuperLink (server) stores a list of known (client) node public keys" +msgstr "SuperLink(서버)는 알려진 (클라이언트) 노드 공개키 목록을 저장합니다" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:10 +#: ../../source/how-to-authenticate-supernodes.rst:9 msgid "" -"Here's how to update an existing installation to Flower 1.0 using either " -"pip or Poetry:" -msgstr "" +"Using ECDH, both SuperNode and SuperLink independently derive a shared " +"secret" +msgstr "SuperNode와 SuperLink는 ECDH를 사용하여 독립적으로 공유된 비밀을 도출합니다" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:12 -msgid "pip: add ``-U`` when installing." -msgstr "" +#: ../../source/how-to-authenticate-supernodes.rst:10 +msgid "" +"Shared secret is used to compute the HMAC value of the message sent from " +"SuperNode to SuperLink as a token" +msgstr "비밀 공유는 SuperNode에서 SuperLink로 토큰으로 전송된 메시지의 HMAC 값을 계산하는 데 사용됩니다" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:14 +#: ../../source/how-to-authenticate-supernodes.rst:12 +msgid "SuperLink verifies the token" +msgstr "SuperLink가 토큰을 확인합니다" + +#: ../../source/how-to-authenticate-supernodes.rst:14 msgid "" -"``python -m pip install -U flwr`` (when using ``start_server`` and " -"``start_client``)" +"We recommend you to check out the complete `code example " +"`_ demonstrating federated learning with Flower in an " +"authenticated setting." msgstr "" +"인증된 환경에서 Flower로 연합 학습을 시연하는 전체 '코드 예제 " +"`_를 확인하는 것이 좋습니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:15 +#: ../../source/how-to-authenticate-supernodes.rst:20 msgid "" -"``python -m pip install -U flwr[simulation]`` (when using " -"``start_simulation``)" -msgstr "" +"This guide covers a preview feature that might change in future versions " +"of Flower." +msgstr "이 가이드에서는 향후 버전의 Flower에서 변경될 수 있는 미리보기 기능에 대해 설명합니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:17 +#: ../../source/how-to-authenticate-supernodes.rst:24 msgid "" -"Poetry: update the ``flwr`` dependency in ``pyproject.toml`` and then " -"reinstall (don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` " -"before running ``poetry install``)." -msgstr "" +"For increased security, node authentication can only be used when " +"encrypted connections (SSL/TLS) are enabled." +msgstr "보안을 강화하기 위해 노드 인증은 암호화된 연결(SSL/TLS)을 사용하도록 설정한 경우에만 사용할 수 있습니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:19 -msgid "``flwr = \"^1.0.0\"`` (when using ``start_server`` and ``start_client``)" -msgstr "" +#: ../../source/how-to-authenticate-supernodes.rst:28 +#, fuzzy +msgid "Enable node authentication in ``SuperLink``" +msgstr ":code:`SuperLink`에서 노드 인증 활성화" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:20 +#: ../../source/how-to-authenticate-supernodes.rst:30 +#, fuzzy msgid "" -"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` (when " -"using ``start_simulation``)" +"To enable node authentication, first you need to configure SSL/TLS " +"connections to secure the SuperLink<>SuperNode communication. You can " +"find the complete guide `here `_. After configuring secure connections, you" +" can enable client authentication in a long-running Flower ``SuperLink``." +" Use the following terminal command to start a Flower ``SuperNode`` that " +"has both secure connections and node authentication enabled:" msgstr "" +"노드 인증을 활성화하려면 먼저 SuperLink<>SuperNode 통신을 보호하기 위해 SSL/TLS 연결을 구성해야 합니다. " +"전체 가이드는 `여기 `_에서 확인할 수 있습니다. 보안 연결을 구성한 후, 장기 실행하는 Flower " +":code:`SuperLink`에서 클라이언트 인증을 활성화할 수 있습니다. 다음 터미널 명령을 사용하여 보안 연결과 노드 인증이 " +"모두 활성화된 Flower :code:`SuperNode`를 시작하세요:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:24 -#: ../../source/how-to-upgrade-to-flower-next.rst:100 -msgid "Required changes" -msgstr "" +#: ../../source/how-to-authenticate-supernodes.rst:47 +msgid "Let's break down the authentication flags:" +msgstr "인증 플래그를 세분화해 보겠습니다:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:26 -msgid "The following breaking changes require manual updates." +#: ../../source/how-to-authenticate-supernodes.rst:49 +#, fuzzy +msgid "" +"The first flag ``--auth-list-public-keys`` expects a path to a CSV file " +"storing all known node public keys. You need to store all known node " +"public keys that are allowed to participate in a federation in one CSV " +"file (``.csv``)." msgstr "" +"첫 번째 플래그 :code:`--auth-list-public-keys`는 알려진 모든 노드 공개키를 저장하는 CSV 파일의 경로를" +" 기대합니다. federation에 참여하도록 허용된 모든 알려진 노드 공개 키를 하나의 CSV 파일(:code:`.csv`)에 " +"저장해야 합니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:29 -msgid "General" +#: ../../source/how-to-authenticate-supernodes.rst:53 +msgid "" +"A valid CSV file storing known node public keys should list the keys in " +"OpenSSH format, separated by commas and without any comments. For an " +"example, refer to our code sample, which contains a CSV file with two " +"known node public keys." msgstr "" +"알려진 노드 공개키를 저장하는 유효한 CSV 파일은 쉼표로 구분하고 주석 없이 OpenSSH 형식으로 키를 나열해야 합니다. 예를 " +"들어, 두 개의 알려진 노드 공개키가 포함된 CSV 파일이 포함된 코드 샘플을 참조하세요." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:31 +#: ../../source/how-to-authenticate-supernodes.rst:57 +#, fuzzy msgid "" -"Pass all arguments as keyword arguments (not as positional arguments). " -"Here's an example:" +"The second and third flags ``--auth-superlink-private-key`` and ``--auth-" +"superlink-public-key`` expect paths to the server's private and public " +"keys. For development purposes, you can generate a private and public key" +" pair using ``ssh-keygen -t ecdsa -b 384``." msgstr "" +"두 번째 및 세 번째 플래그 :code:`--auth-superlink-private-key` 및 :code:`--auth-" +"superlink-public-key`는 서버의 개인 및 공개 키의 경로를 예상합니다. 개발 목적으로 :code:`ssh-" +"keygen -t ecdsa -b 384`를 사용하여 개인 및 공개 키 쌍을 생성할 수 있습니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:33 +#: ../../source/how-to-authenticate-supernodes.rst:64 msgid "" -"Flower 0.19 (positional arguments): ``start_client(\"127.0.0.1:8080\", " -"FlowerClient())``" +"In Flower 1.9, there is no support for dynamically removing, editing, or " +"adding known node public keys to the SuperLink. To change the set of " +"known nodes, you need to shut the server down, edit the CSV file, and " +"start the server again. Support for dynamically changing the set of known" +" nodes is on the roadmap to be released in Flower 1.10 (ETA: June)." msgstr "" +"Flower 1.9에서는 알려진 노드 공개키를 SuperLink에 동적으로 제거, 편집 또는 추가하는 기능이 지원되지 않습니다. " +"알려진 노드 집합을 변경하려면 서버를 종료하고 CSV 파일을 편집한 다음 서버를 다시 시작해야 합니다. 알려진 노드 집합을 동적으로" +" 변경하는 기능은 Flower 1.10(출시 예정일: 6월)에서 로드맵에 포함되어 있습니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:34 +#: ../../source/how-to-authenticate-supernodes.rst:71 +#, fuzzy +msgid "Enable node authentication in ``SuperNode``" +msgstr ":code:`SuperNode`에서 노드 인증을 활성화합니다" + +#: ../../source/how-to-authenticate-supernodes.rst:73 +#, fuzzy msgid "" -"Flower 1.0 (keyword arguments): " -"``start_client(server_address=\"127.0.0.1:8080\", " -"client=FlowerClient())``" +"Similar to the long-running Flower server (``SuperLink``), you can easily" +" enable node authentication in the long-running Flower client " +"(``SuperNode``). Use the following terminal command to start an " +"authenticated ``SuperNode``:" msgstr "" +"장기 실행 중인 Flower 서버(:code:`SuperLink`)와 마찬가지로, 장기 실행 중인 Flower " +"클라이언트(:code:`SuperNode`)에서도 노드 인증을 쉽게 활성화할 수 있습니다. 다음 터미널 명령을 사용하여 인증된 " +":code:`SuperNode`를 시작하세요:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:37 -#: ../../source/ref-api/flwr.client.Client.rst:2 -msgid "Client" +#: ../../source/how-to-authenticate-supernodes.rst:85 +#, fuzzy +msgid "" +"The ``--auth-supernode-private-key`` flag expects a path to the node's " +"private key file and the ``--auth-supernode-public-key`` flag expects a " +"path to the node's public key file. For development purposes, you can " +"generate a private and public key pair using ``ssh-keygen -t ecdsa -b " +"384``." msgstr "" +":code:`--auth-supernode-private-key` 플래그는 노드의 개인 키 파일 경로를, :code:`--auth-" +"supernode-public-key` 플래그는 노드의 공개 키 파일 경로를 예상합니다. 개발 목적으로 :code:`ssh-" +"keygen -t ecdsa -b 384`를 사용하여 개인 및 공개 키 쌍을 생성할 수 있습니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:39 +#: ../../source/how-to-authenticate-supernodes.rst:91 +msgid "Security notice" +msgstr "보안 공지" + +#: ../../source/how-to-authenticate-supernodes.rst:93 msgid "" -"Subclasses of ``NumPyClient``: change ``def get_parameters(self):``` to " -"``def get_parameters(self, config):``" +"The system's security relies on the credentials of the SuperLink and each" +" SuperNode. Therefore, it is imperative to safeguard and safely store the" +" credentials to avoid security risks such as Public Key Infrastructure " +"(PKI) impersonation attacks. The node authentication mechanism also " +"involves human interaction, so please ensure that all of the " +"communication is done in a secure manner, using trusted communication " +"methods." msgstr "" +"시스템의 보안은 SuperLink와 각SuperNode의 자격 증명에 의존합니다. 따라서 공개키 기반구조(PKI) 사칭 공격과 같은" +" 보안 위험을 피하기 위해 자격 증명을 보호하고 안전하게 보관하는 것이 필수적입니다. 노드 인증 메커니즘에는 사람의 상호 작용도 " +"포함되므로 모든 통신이 신뢰할 수 있는 통신 방법을 사용하여 안전한 방식으로 이루어지도록 하세요." + +#: ../../source/how-to-authenticate-supernodes.rst:100 +#: ../../source/how-to-enable-ssl-connections.rst:71 +#: ../../source/how-to-use-built-in-mods.rst:95 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:287 +msgid "Conclusion" +msgstr "결론" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:40 +#: ../../source/how-to-authenticate-supernodes.rst:102 +#, fuzzy msgid "" -"Subclasses of ``Client``: change ``def get_parameters(self):``` to ``def " -"get_parameters(self, ins: GetParametersIns):``" +"You should now have learned how to start a long-running Flower server " +"(``SuperLink``) and client (``SuperNode``) with node authentication " +"enabled. You should also know the significance of the private key and " +"store it safely to minimize security risks." msgstr "" +"이제 노드 인증이 활성화된 상태에서 장기간 실행되는 Flower 서버(:code:`SuperLink`)와 " +"클라이언트(:code:`SuperNode`)를 시작하는 방법을 배웠을 것입니다. 또한 보안 위험을 최소화하기 위해 개인키의 중요성을" +" 알고 안전하게 보관해야 합니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:43 -msgid "Strategies / ``start_server`` / ``start_simulation``" -msgstr "" +#: ../../source/how-to-configure-clients.rst:2 +msgid "Configure clients" +msgstr "클라이언트 구성" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:45 +#: ../../source/how-to-configure-clients.rst:4 msgid "" -"Pass ``ServerConfig`` (instead of a dictionary) to ``start_server`` and " -"``start_simulation``. Here's an example:" +"Along with model parameters, Flower can send configuration values to " +"clients. Configuration values can be used for various purposes. They are," +" for example, a popular way to control client-side hyperparameters from " +"the server." msgstr "" +"모델 파라미터와 함께 Flower는 설정 값을 클라이언트에 전송할 수 있습니다. 구성 값은 다양한 용도로 사용할 수 있습니다. 예를" +" 들어 서버에서 클라이언트 측 하이퍼파라미터를 제어하는 데 널리 사용되는 방법입니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:47 +#: ../../source/how-to-configure-clients.rst:9 +msgid "Configuration values" +msgstr "구성 값" + +#: ../../source/how-to-configure-clients.rst:11 msgid "" -"Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " -"\"round_timeout\": 600.0}, ...)``" +"Configuration values are represented as a dictionary with ``str`` keys " +"and values of type ``bool``, ``bytes``, ``double`` (64-bit precision " +"float), ``int``, or ``str`` (or equivalent types in different languages)." +" Here is an example of a configuration dictionary in Python:" msgstr "" +"구성 값은 ``str`` 키와 ``bool``, ``bytes``, ``double``(64비트 정밀도 정수), ``int`` 또는" +" ``str``(또는 다른 언어의 동등한 유형) 유형의 값으로 구성된 사전으로 표현됩니다. 다음은 Python의 구성 사전 " +"예제입니다:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:48 +#: ../../source/how-to-configure-clients.rst:25 msgid "" -"Flower 1.0: ``start_server(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" +"Flower serializes these configuration dictionaries (or *config dict* for " +"short) to their ProtoBuf representation, transports them to the client " +"using gRPC, and then deserializes them back to Python dictionaries." msgstr "" +"Flower는 이러한 구성 dictionaries(또는 줄여서 *config dict*)를 ProtoBuf 표현으로 직렬화하고, " +"gRPC를 사용하여 클라이언트로 전송한 다음 다시 Python dictionaries로 역직렬화합니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:50 +#: ../../source/how-to-configure-clients.rst:31 msgid "" -"Replace ``num_rounds=1`` in ``start_simulation`` with the new " -"``config=ServerConfig(...)`` (see previous item)" +"Currently, there is no support for directly sending collection types " +"(e.g., ``Set``, ``List``, ``Map``) as values in configuration " +"dictionaries. There are several workarounds to send collections as values" +" by converting them to one of the supported value types (and converting " +"them back on the client-side)." msgstr "" +"현재 구성 사전에서 컬렉션 유형(예: ``Set``, ``List``, ``Map``)을 값으로 직접 전송하는 기능은 지원되지 " +"않습니다. 컬렉션을 지원되는 값 유형 중 하나로 변환한 다음 클라이언트 측에서 다시 변환하여 값으로 보내는 몇 가지 해결 방법이 " +"있습니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:51 +#: ../../source/how-to-configure-clients.rst:36 msgid "" -"Remove ``force_final_distributed_eval`` parameter from calls to " -"``start_server``. Distributed evaluation on all clients can be enabled by" -" configuring the strategy to sample all clients for evaluation after the " -"last round of training." +"One can, for example, convert a list of floating-point numbers to a JSON " +"string, then send the JSON string using the configuration dictionary, and" +" then convert the JSON string back to a list of floating-point numbers on" +" the client." msgstr "" +"예를 들어 부동 소수점 숫자 목록을 JSON 문자열로 변환한 다음 구성 dictionary을 사용하여 JSON 문자열을 전송한 다음" +" 클라이언트에서 다시 부동 소수점 숫자 목록으로 변환할 수 있습니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:52 -msgid "Rename parameter/ndarray conversion functions:" -msgstr "" +#: ../../source/how-to-configure-clients.rst:41 +msgid "Configuration through built-in strategies" +msgstr "기본 제공 전략을 통한 구성" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:54 -msgid "``parameters_to_weights`` --> ``parameters_to_ndarrays``" +#: ../../source/how-to-configure-clients.rst:43 +#, fuzzy +msgid "" +"The easiest way to send configuration values to clients is to use a " +"built-in strategy like ``FedAvg``. Built-in strategies support so-called " +"configuration functions. A configuration function is a function that the " +"built-in strategy calls to get the configuration dictionary for the " +"current round. It then forwards the configuration dictionary to all the " +"clients selected during that round." msgstr "" +"클라이언트에 구성 값을 보내는 가장 쉬운 방법은 :code:`FedAvg`와 같은 기본 제공 전략을 사용하는 것입니다. 기본 제공 " +"전략은 소위 구성 함수를 지원합니다. 구성 함수는 내장 전략이 현재 단계의 구성 사전을 가져오기 위해 호출하는 함수입니다. 그런 " +"다음 해당 단계 동안 선택된 모든 클라이언트에 구성 사전을 전달합니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:55 -msgid "``weights_to_parameters`` --> ``ndarrays_to_parameters``" +#: ../../source/how-to-configure-clients.rst:49 +msgid "" +"Let's start with a simple example. Imagine we want to send (a) the batch " +"size that the client should use, (b) the current global round of " +"federated learning, and (c) the number of epochs to train on the client-" +"side. Our configuration function could look like this:" msgstr "" +"간단한 예부터 시작하겠습니다. (a) 클라이언트가 사용해야 하는 배치 크기, (b) 현재 글로벌 연합 라운드, (c) 클라이언트 " +"측에서 학습할 에포크 수를 전송하고 싶다고 가정해 보겠습니다. 구성 함수는 다음과 같습니다:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:57 +#: ../../source/how-to-configure-clients.rst:65 +#, fuzzy msgid "" -"Strategy initialization: if the strategy relies on the default values for" -" ``fraction_fit`` and ``fraction_evaluate``, set ``fraction_fit`` and " -"``fraction_evaluate`` manually to ``0.1``. Projects that do not manually " -"create a strategy (by calling ``start_server`` or ``start_simulation`` " -"without passing a strategy instance) should now manually initialize " -"FedAvg with ``fraction_fit`` and ``fraction_evaluate`` set to ``0.1``." +"To make the built-in strategies use this function, we can pass it to " +"``FedAvg`` during initialization using the parameter " +"``on_fit_config_fn``:" msgstr "" +"기본 제공 전략이 이 함수를 사용하도록 하려면 초기화 중에 매개 변수 :code:`on_fit_config_fn`을 사용하여 " +"``FedAvg``에 이 함수를 전달하면 됩니다:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:58 -msgid "Rename built-in strategy parameters (e.g., ``FedAvg``):" -msgstr "" +#: ../../source/how-to-configure-clients.rst:75 +msgid "One the client side, we receive the configuration dictionary in ``fit``:" +msgstr "클라이언트 측에서는 ``fit``으로 구성 dictionary을 받습니다:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:60 -msgid "``fraction_eval`` --> ``fraction_evaluate``" +#: ../../source/how-to-configure-clients.rst:86 +msgid "" +"There is also an `on_evaluate_config_fn` to configure evaluation, which " +"works the same way. They are separate functions because one might want to" +" send different configuration values to `evaluate` (for example, to use a" +" different batch size)." msgstr "" +"평가를 구성하는 `on_evaluate_config_fn`도 있으며, 같은 방식으로 작동합니다. 다른 배치 크기를 사용하기 위해 " +"다른 구성 값을 `evaluate`로 보내려고 할 수 있기 때문에 이 함수는 별도의 함수입니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:61 -msgid "``min_eval_clients`` --> ``min_evaluate_clients``" +#: ../../source/how-to-configure-clients.rst:90 +msgid "" +"The built-in strategies call this function every round (that is, every " +"time `Strategy.configure_fit` or `Strategy.configure_evaluate` runs). " +"Calling `on_evaluate_config_fn` every round allows us to vary/change the " +"config dict over consecutive rounds. If we wanted to implement a " +"hyperparameter schedule, for example, to increase the number of local " +"epochs during later rounds, we could do the following:" msgstr "" +"기본 제공 전략은 매 라운드마다 이 함수를 호출합니다(즉, `Strategy.configure_fit` 또는 " +"`Strategy.configure_evaluate`가 실행될 때마다). 매 라운드마다 `on_evaluate_config_fn`을" +" 호출하면 연속된 라운드에서 config dict를 변경/변경할 수 있습니다. 예를 들어 이후 라운드에서 로컬 에포크 수를 늘리기 " +"위해 하이퍼파라미터 일정을 구현하려면 다음과 같이 할 수 있습니다:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:62 -msgid "``eval_fn`` --> ``evaluate_fn``" -msgstr "" +#: ../../source/how-to-configure-clients.rst:107 +#, fuzzy +msgid "The ``FedAvg`` strategy will call this function *every round*." +msgstr ":code:`FedAvg` 전략은 이 함수를 *매 라운드마다* 호출합니다." + +#: ../../source/how-to-configure-clients.rst:110 +msgid "Configuring individual clients" +msgstr "개별 클라이언트 구성" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:64 +#: ../../source/how-to-configure-clients.rst:112 msgid "" -"Rename ``rnd`` to ``server_round``. This impacts multiple methods and " -"functions, for example, ``configure_fit``, ``aggregate_fit``, " -"``configure_evaluate``, ``aggregate_evaluate``, and ``evaluate_fn``." -msgstr "" +"In some cases, it is necessary to send different configuration values to " +"different clients." +msgstr "경우에 따라 다른 구성 값을 다른 클라이언트에 보내야 하는 경우도 있습니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:65 -msgid "Add ``server_round`` and ``config`` to ``evaluate_fn``:" -msgstr "" +#: ../../source/how-to-configure-clients.rst:115 +#, fuzzy +msgid "" +"This can be achieved by customizing an existing strategy or by " +":doc:`implementing a custom strategy from scratch `. Here's a nonsensical example that customizes ``FedAvg`` by " +"adding a custom ``\"hello\": \"world\"`` configuration key/value pair to " +"the config dict of a *single client* (only the first client in the list, " +"the other clients in this round to not receive this \"special\" config " +"value):" +msgstr "" +"이는 기존 전략을 사용자 지정하거나 :doc:`implementing a custom strategy from scratch " +"`를 통해 수행할 수 있습니다. 다음은 사용자 지정 ``\"hello\"'를 " +"추가하여 :code:`FedAvg`를 사용자 지정하는 무의미한 예입니다: \"world\"`` 구성 키/값 쌍을 *단일 " +"클라이언트*의 config dict에 추가합니다(목록의 첫 번째 클라이언트만, 이 라운드의 다른 클라이언트는 이 \"특별한\" 구성" +" 값을 수신하지 않음):" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:67 +#: ../../source/how-to-configure-logging.rst:2 +msgid "Configure logging" +msgstr "로깅 구성" + +#: ../../source/how-to-configure-logging.rst:4 msgid "" -"Flower 0.19: ``def evaluate(parameters: NDArrays) -> " -"Optional[Tuple[float, Dict[str, Scalar]]]:``" +"The Flower logger keeps track of all core events that take place in " +"federated learning workloads. It presents information by default " +"following a standard message format:" msgstr "" +"Flower 로거는 federated 학습 워크로드에서 발생하는 모든 핵심 이벤트를 추적합니다. 기본적으로 표준 메시지 형식에 따라" +" 정보를 표시합니다:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:68 +#: ../../source/how-to-configure-logging.rst:13 +#, fuzzy msgid "" -"Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, " -"config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " -"Scalar]]]:``" +"containing relevant information including: log message level (e.g. " +"``INFO``, ``DEBUG``), a timestamp, the line where the logging took place " +"from, as well as the log message itself. In this way, the logger would " +"typically display information on your terminal as follows:" msgstr "" +"로그 메시지 수준(예: :code:`INFO`, :code:`DEBUG`), 타임스탬프, 로깅이 발생한 줄, 로그 메시지 자체 등 " +"관련 정보를 포함합니다. 이러한 방식으로 로거는 일반적으로 다음과 같은 정보를 터미널에 표시합니다:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:71 -msgid "Custom strategies" -msgstr "" +#: ../../source/how-to-configure-logging.rst:35 +msgid "Saving log to file" +msgstr "파일에 로그 저장" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:73 +#: ../../source/how-to-configure-logging.rst:37 +#, fuzzy msgid "" -"The type of parameter ``failures`` has changed from " -"``List[BaseException]`` to ``List[Union[Tuple[ClientProxy, FitRes], " -"BaseException]]`` (in ``aggregate_fit``) and " -"``List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]]`` (in " -"``aggregate_evaluate``)" +"By default, the Flower log is outputted to the terminal where you launch " +"your Federated Learning workload from. This applies for both gRPC-based " +"federation (i.e. when you do ``fl.server.start_server``) and when using " +"the ``VirtualClientEngine`` (i.e. when you do " +"``fl.simulation.start_simulation``). In some situations you might want to" +" save this log to disk. You can do so by calling the " +"`fl.common.logger.configure() " +"`_" +" function. For example:" msgstr "" +"기본적으로 Flower 로그는 Federated 학습 워크로드를 실행하는 터미널에 출력됩니다. 이는 gRPC 기반 " +"페더레이션(즉,:code:`fl.simulation.start_simulation`를 실행하는 경우)과 " +":code:`VirtualClientEngine`을 사용하는 경우(즉, " +":코드:`fl.simulation.start_simulation`을 실행하는 경우) 모두에 적용됩니다. 경우에 따라 이 로그를 " +"디스크에 저장하고 싶을 수도 있습니다. 이 경우 `fl.common.logger.configure() " +"`_" +" 함수를 호출하여 저장할 수 있습니다. 예를 들어:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:74 +#: ../../source/how-to-configure-logging.rst:59 +#, fuzzy msgid "" -"The ``Strategy`` method ``evaluate`` now receives the current round of " -"federated learning/evaluation as the first parameter:" +"With the above, Flower will record the log you see on your terminal to " +"``log.txt``. This file will be created in the same directory as were you " +"are running the code from. If we inspect we see the log above is also " +"recorded but prefixing with ``identifier`` each line:" msgstr "" +"위와 같이 하면 Flower는 터미널에 표시되는 로그를 :code:`log.txt`에 기록합니다. 이 파일은 코드를 실행한 " +"디렉터리와 동일한 디렉터리에 생성됩니다. 검사해보면 위의 로그도 기록되지만 각 줄 앞에 :code:`identifier` 접두사가 " +"붙는 것을 확인할 수 있습니다:" + +#: ../../source/how-to-configure-logging.rst:81 +msgid "Log your own messages" +msgstr "나만의 메시지 기록" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:76 +#: ../../source/how-to-configure-logging.rst:83 msgid "" -"Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " -"Optional[Tuple[float, Dict[str, Scalar]]]:``" +"You might expand the information shown by default with the Flower logger " +"by adding more messages relevant to your application. You can achieve " +"this easily as follows." msgstr "" +"애플리케이션과 관련된 메시지를 더 추가하여 Flower 로거에 기본적으로 표시되는 정보를 확장할 수 있습니다. 다음과 같이 쉽게 " +"추가할 수 있습니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:77 +#: ../../source/how-to-configure-logging.rst:114 msgid "" -"Flower 1.0: ``def evaluate(self, server_round: int, parameters: " -"Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" -msgstr "" +"In this way your logger will show, in addition to the default messages, " +"the ones introduced by the clients as specified above." +msgstr "이렇게 하면 로거에 기본 메시지 외에 위에서 지정한 대로 클라이언트가 소개한 메시지가 표시됩니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:80 -msgid "Optional improvements" -msgstr "" +#: ../../source/how-to-configure-logging.rst:140 +msgid "Log to a remote service" +msgstr "원격 서비스에 로그인" + +#: ../../source/how-to-configure-logging.rst:142 +#, fuzzy +msgid "" +"The ``fl.common.logger.configure`` function, also allows specifying a " +"host to which logs can be pushed (via ``POST``) through a native Python " +"``logging.handler.HTTPHandler``. This is a particularly useful feature in" +" ``gRPC``-based Federated Learning workloads where otherwise gathering " +"logs from all entities (i.e. the server and the clients) might be " +"cumbersome. Note that in Flower simulation, the server automatically " +"displays all logs. You can still specify a ``HTTPHandler`` should you " +"wish to backup or analyze the logs somewhere else." +msgstr "" +"또한 :code:`fl.common.logger.configure` 함수를 사용하면 네이티브 Python " +":code:`logging.handler.HTTPHandler`를 통해 로그를 푸시할 수 있는 호스트를 지정할 수 " +"있습니다(:code:`POST`를 통해). 이는 모든 엔티티(예: 서버 및 클라이언트)에서 로그를 수집하는 것이 번거로울 수 있는 " +":code:`gRPC` 기반 Federated 학습 워크로드에서 특히 유용한 기능입니다. Flower 시뮬레이션에서는 서버가 모든 " +"로그를 자동으로 표시합니다. 로그를 다른 곳에 백업하거나 분석하려는 경우 :code:`HTTPHandler`를 지정할 수 있습니다." + +#: ../../source/how-to-enable-ssl-connections.rst:2 +msgid "Enable SSL connections" +msgstr "SSL 연결 사용" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:82 +#: ../../source/how-to-enable-ssl-connections.rst:4 +#, fuzzy msgid "" -"Along with the necessary changes above, there are a number of potential " -"improvements that just became possible:" +"This guide describes how to a SSL-enabled secure Flower server " +"(``SuperLink``) can be started and how a Flower client (``SuperNode``) " +"can establish a secure connections to it." msgstr "" +"이 가이드에서는 SSL을 지원하는 보안 Flower 서버(:코드:`SuperLink`)를 시작하는 방법과 Flower " +"클라이언트(:코드:`SuperNode`)가 이 서버에 보안 연결을 설정하는 방법을 설명합니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:84 +#: ../../source/how-to-enable-ssl-connections.rst:8 msgid "" -"Remove \"placeholder\" methods from subclasses of ``Client`` or " -"``NumPyClient``. If you, for example, use server-side evaluation, then " -"empty placeholder implementations of ``evaluate`` are no longer " -"necessary." +"A complete code example demonstrating a secure connection can be found " +"`here `_." msgstr "" +"보안 연결을 보여주는 전체 코드 예제는 '여기 " +"`_'에서 확인할 수 있습니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:85 +#: ../../source/how-to-enable-ssl-connections.rst:11 +#, fuzzy msgid "" -"Configure the round timeout via ``start_simulation``: " -"``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, " -"round_timeout=600.0), ...)``" +"The code example comes with a ``README.md`` file which explains how to " +"start it. Although it is already SSL-enabled, it might be less " +"descriptive on how it does so. Stick to this guide for a deeper " +"introduction to the topic." msgstr "" +"코드 예제에는 시작 방법을 설명하는 :code:`README.md` 파일이 함께 제공됩니다. 이미 SSL을 사용하도록 설정되어 " +"있지만 그 방법에 대한 설명이 부족할 수 있습니다. 이 가이드를 참고하여 이 주제에 대해 자세히 알아보세요." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:89 -#: ../../source/how-to-upgrade-to-flower-next.rst:317 -msgid "Further help" -msgstr "" +#: ../../source/how-to-enable-ssl-connections.rst:16 +msgid "Certificates" +msgstr "인증서" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:91 +#: ../../source/how-to-enable-ssl-connections.rst:18 +#, fuzzy msgid "" -"Most official `Flower code examples " -"`_ are already updated" -" to Flower 1.0, they can serve as a reference for using the Flower 1.0 " -"API. If there are further questions, `join the Flower Slack " -"`_ and use the channel ``#questions``." +"Using SSL-enabled connections requires certificates to be passed to the " +"server and client. For the purpose of this guide we are going to generate" +" self-signed certificates. As this can become quite complex we are going " +"to ask you to run the script in ``examples/advanced-" +"tensorflow/certificates/generate.sh`` with the following command " +"sequence:" msgstr "" +"SSL 사용 연결을 사용하려면 서버와 클라이언트에 인증서를 전달해야 합니다. 이 가이드에서는 자체 서명된 인증서를 생성하겠습니다. " +"이 과정은 상당히 복잡할 수 있으므로 다음 명령 시퀀스를 사용하여 :code:`examples/advanced-" +"tensorflow/certificates/generate.sh`에서 스크립트를 실행하도록 요청하겠습니다:" -#: ../../source/how-to-upgrade-to-flower-next.rst:2 -msgid "Upgrade to Flower Next" +#: ../../source/how-to-enable-ssl-connections.rst:29 +#, fuzzy +msgid "" +"This will generate the certificates in ``examples/advanced-" +"tensorflow/.cache/certificates``." msgstr "" +"이렇게 하면 :code:`examples/advanced-tensorflow/.cache/certificates`에 인증서가 " +"생성됩니다." -#: ../../source/how-to-upgrade-to-flower-next.rst:4 +#: ../../source/how-to-enable-ssl-connections.rst:32 msgid "" -"Welcome to the migration guide for updating Flower to Flower Next! " -"Whether you're a seasoned user or just getting started, this guide will " -"help you smoothly transition your existing setup to take advantage of the" -" latest features and improvements in Flower Next, starting from version " -"1.8." +"The approach for generating SSL certificates in the context of this " +"example can serve as an inspiration and starting point, but it should not" +" be used as a reference for production environments. Please refer to " +"other sources regarding the issue of correctly generating certificates " +"for production environments. For non-critical prototyping or research " +"projects, it might be sufficient to use the self-signed certificates " +"generated using the scripts mentioned in this guide." msgstr "" +"이 예의 맥락에서 SSL 인증서를 생성하는 접근 방식은 영감과 출발점이 될 수 있지만 프로덕션 환경에 대한 참조로 사용해서는 안 " +"됩니다. 프로덕션 환경용 인증서를 올바르게 생성하는 문제에 대해서는 다른 출처를 참조하세요. 중요하지 않은 프로토타이핑 또는 연구 " +"프로젝트의 경우, 이 가이드에 언급된 스크립트를 사용하여 생성한 자체 서명 인증서를 사용하는 것으로 충분할 수 있습니다." -#: ../../source/how-to-upgrade-to-flower-next.rst:9 -msgid "" -"This guide shows how to reuse pre-``1.8`` Flower code with minimum code " -"changes by using the *compatibility layer* in Flower Next. In another " -"guide, we will show how to run Flower Next end-to-end with pure Flower " -"Next APIs." -msgstr "" +#: ../../source/how-to-enable-ssl-connections.rst:40 +msgid "Server (SuperLink)" +msgstr "서버(SuperLink)" -#: ../../source/how-to-upgrade-to-flower-next.rst:13 -msgid "Let's dive in!" -msgstr "" +#: ../../source/how-to-enable-ssl-connections.rst:42 +msgid "" +"Use the following terminal command to start a sever (SuperLink) that uses" +" the previously generated certificates:" +msgstr "다음 터미널 명령을 사용하여 이전에 생성한 인증서를 사용하는 서버(SuperLink)를 시작합니다:" -#: ../../source/how-to-upgrade-to-flower-next.rst:48 +#: ../../source/how-to-enable-ssl-connections.rst:52 msgid "" -"Here's how to update an existing installation of Flower to Flower Next " -"with ``pip``:" -msgstr "" +"When providing certificates, the server expects a tuple of three " +"certificates paths: CA certificate, server certificate and server private" +" key." +msgstr "인증서를 제공할 때 서버는 세 가지 인증서 경로의 튜플을 기대합니다: CA 인증서, 서버 인증서 및 서버 개인 키입니다." -#: ../../source/how-to-upgrade-to-flower-next.rst:54 -msgid "or if you need Flower Next with simulation:" -msgstr "" +#: ../../source/how-to-enable-ssl-connections.rst:56 +msgid "Client (SuperNode)" +msgstr "클라이언트(SuperNode)" -#: ../../source/how-to-upgrade-to-flower-next.rst:61 +#: ../../source/how-to-enable-ssl-connections.rst:58 msgid "" -"Ensure you set the following version constraint in your " -"``requirements.txt``" -msgstr "" - -#: ../../source/how-to-upgrade-to-flower-next.rst:71 -msgid "or ``pyproject.toml``:" -msgstr "" +"Use the following terminal command to start a client (SuperNode) that " +"uses the previously generated certificates:" +msgstr "다음 터미널 명령을 사용하여 이전에 생성한 인증서를 사용하는 클라이언트(SuperNode)를 시작합니다:" -#: ../../source/how-to-upgrade-to-flower-next.rst:82 -msgid "Using Poetry" -msgstr "" +#: ../../source/how-to-enable-ssl-connections.rst:67 +#, fuzzy +msgid "" +"When setting ``root_certificates``, the client expects a file path to " +"PEM-encoded root certificates." +msgstr "코드:`root_certificates`를 설정하면 클라이언트는 PEM 인코딩된 루트 인증서의 파일 경로를 예상합니다." -#: ../../source/how-to-upgrade-to-flower-next.rst:84 +#: ../../source/how-to-enable-ssl-connections.rst:73 msgid "" -"Update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall " -"(don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` before " -"running ``poetry install``)." +"You should now have learned how to generate self-signed certificates " +"using the given script, start an SSL-enabled server and have a client " +"establish a secure connection to it." msgstr "" +"이제 주어진 스크립트를 사용하여 자체 서명 인증서를 생성하고, SSL 사용 서버를 시작하고, 클라이언트가 보안 연결을 설정하는 " +"방법을 배웠을 것입니다." + +#: ../../source/how-to-enable-ssl-connections.rst:78 +msgid "Additional resources" +msgstr "추가 리소스" -#: ../../source/how-to-upgrade-to-flower-next.rst:86 +#: ../../source/how-to-enable-ssl-connections.rst:80 msgid "" -"Ensure you set the following version constraint in your " -"``pyproject.toml``:" -msgstr "" +"These additional sources might be relevant if you would like to dive " +"deeper into the topic of certificates:" +msgstr "인증서에 대해 더 자세히 알아보고 싶다면 이러한 추가 자료를 참고하세요:" + +#: ../../source/how-to-enable-ssl-connections.rst:83 +msgid "`Let's Encrypt `_" +msgstr "'암호화하세요 `_'" + +#: ../../source/how-to-enable-ssl-connections.rst:84 +msgid "`certbot `_" +msgstr "`인증봇 `_" + +#: ../../source/how-to-implement-strategies.rst:2 +msgid "Implement strategies" +msgstr "전략 구현" -#: ../../source/how-to-upgrade-to-flower-next.rst:102 +#: ../../source/how-to-implement-strategies.rst:4 msgid "" -"In Flower Next, the *infrastructure* and *application layers* have been " -"decoupled. Instead of starting a client in code via ``start_client()``, " -"you create a |clientapp_link|_ and start it via the command line. Instead" -" of starting a server in code via ``start_server()``, you create a " -"|serverapp_link|_ and start it via the command line. The long-running " -"components of server and client are called SuperLink and SuperNode. The " -"following non-breaking changes that require manual updates and allow you " -"to run your project both in the traditional way and in the Flower Next " -"way:" +"The strategy abstraction enables implementation of fully custom " +"strategies. A strategy is basically the federated learning algorithm that" +" runs on the server. Strategies decide how to sample clients, how to " +"configure clients for training, how to aggregate updates, and how to " +"evaluate models. Flower provides a few built-in strategies which are " +"based on the same API described below." msgstr "" +"전략 추상화를 통해 완전한 맞춤형 전략을 구현할 수 있습니다. 전략은 기본적으로 서버에서 실행되는 연합 학습 알고리즘입니다. 전략은" +" 클라이언트를 샘플링하는 방법, 학습을 위해 클라이언트를 구성하는 방법, 업데이트를 집계하는 방법, 모델을 평가하는 방법을 " +"결정합니다. Flower는 아래에 설명된 것과 동일한 API를 기반으로 하는 몇 가지 기본 제공 전략을 제공합니다." -#: ../../source/how-to-upgrade-to-flower-next.rst:109 -msgid "|clientapp_link|_" -msgstr "" +#: ../../source/how-to-implement-strategies.rst:11 +#, fuzzy +msgid "The ``Strategy`` abstraction" +msgstr ":code:`Strategy` 추상화" -#: ../../source/how-to-upgrade-to-flower-next.rst:110 +#: ../../source/how-to-implement-strategies.rst:13 +#, fuzzy msgid "" -"Wrap your existing client with |clientapp_link|_ instead of launching it " -"via |startclient_link|_. Here's an example:" +"All strategy implementation are derived from the abstract base class " +"``flwr.server.strategy.Strategy``, both built-in implementations and " +"third party implementations. This means that custom strategy " +"implementations have the exact same capabilities at their disposal as " +"built-in ones." msgstr "" +"모든 전략 구현은 기본 제공 구현과 타사 구현 모두 추상 기본 클래스인 " +":code:`flwr.server.strategy.Strategy`에서 파생됩니다. 즉, 사용자 정의 전략 구현은 기본 제공 구현과" +" 완전히 동일한 기능을 사용할 수 있습니다." -#: ../../source/how-to-upgrade-to-flower-next.rst:132 -msgid "|serverapp_link|_" -msgstr "" +#: ../../source/how-to-implement-strategies.rst:18 +msgid "" +"The strategy abstraction defines a few abstract methods that need to be " +"implemented:" +msgstr "전략 추상화에서는 구현해야 하는 몇 가지 추상적인 메서드를 정의합니다:" -#: ../../source/how-to-upgrade-to-flower-next.rst:133 +#: ../../source/how-to-implement-strategies.rst:67 +#, fuzzy msgid "" -"Wrap your existing strategy with |serverapp_link|_ instead of starting " -"the server via |startserver_link|_. Here's an example:" +"Creating a new strategy means implementing a new ``class`` (derived from " +"the abstract base class ``Strategy``) that implements for the previously " +"shown abstract methods:" msgstr "" +"새 전략을 생성한다는 것은 이전에 표시된 추상 메서드에 대해 구현하는 새로운 :code:`class`(추상 기본 클래스 " +":code:`Strategy`에서 파생됨)를 구현하는 것을 의미합니다:" -#: ../../source/how-to-upgrade-to-flower-next.rst:154 -msgid "Deployment" -msgstr "" +#: ../../source/how-to-implement-strategies.rst:97 +msgid "The Flower server calls these methods in the following order:" +msgstr "Flower 서버는 다음 순서로 이러한 메서드를 호출합니다:" + +#: ../../source/how-to-implement-strategies.rst:174 +msgid "The following sections describe each of those methods in more detail." +msgstr "다음 섹션에서는 이러한 각 방법에 대해 자세히 설명합니다." -#: ../../source/how-to-upgrade-to-flower-next.rst:155 +#: ../../source/how-to-implement-strategies.rst:177 +#, fuzzy +msgid "The ``initialize_parameters`` method" +msgstr ":code:`initialize_parameters` 메서드" + +#: ../../source/how-to-implement-strategies.rst:179 +#, fuzzy msgid "" -"Run the ``SuperLink`` using |flowernext_superlink_link|_ before running, " -"in sequence, |flowernext_clientapp_link|_ (2x) and " -"|flowernext_serverapp_link|_. There is no need to execute `client.py` and" -" `server.py` as Python scripts." +"``initialize_parameters`` is called only once, at the very beginning of " +"an execution. It is responsible for providing the initial global model " +"parameters in a serialized form (i.e., as a ``Parameters`` object)." msgstr "" +"code:`initialize_parameters`는 실행을 처음 시작할 때 한 번만 호출됩니다. 이 함수는 초기 전역 모델 " +"파라미터를 직렬화된 형식(즉, :code:`Parameters` 객체)으로 제공하는 역할을 합니다." -#: ../../source/how-to-upgrade-to-flower-next.rst:158 +#: ../../source/how-to-implement-strategies.rst:183 +#, fuzzy msgid "" -"Here's an example to start the server without HTTPS (only for " -"prototyping):" +"Built-in strategies return user-provided initial parameters. The " +"following example shows how initial parameters can be passed to " +"``FedAvg``:" msgstr "" +"기본 제공 전략은 사용자가 제공한 초기 매개 변수를 반환합니다. 다음 예는 초기 매개 변수를 :code:`FedAvg`에 전달하는 " +"방법을 보여줍니다:" + +#: ../../source/how-to-implement-strategies.rst:209 +#, fuzzy +msgid "" +"The Flower server will call ``initialize_parameters``, which either " +"returns the parameters that were passed to ``initial_parameters``, or " +"``None``. If no parameters are returned from ``initialize_parameters`` " +"(i.e., ``None``), the server will randomly select one client and ask it " +"to provide its parameters. This is a convenience feature and not " +"recommended in practice, but it can be useful for prototyping. In " +"practice, it is recommended to always use server-side parameter " +"initialization." +msgstr "" +"Flower 서버는 :code:`initialize_parameters`를 호출하여 " +":code:`initial_parameters`에 전달된 파라미터를 반환하거나 :code:`None`을 반환합니다. " +":code:`initial_parameters`에서 반환되는 매개변수가 없는 경우(즉, :code:`None`) 서버는 무작위로 " +"클라이언트 하나를 선택하여 해당 클라이언트에 매개변수를 제공하도록 요청합니다. 이는 편의 기능이며 실제로는 권장하지 않지만 " +"프로토타이핑에는 유용할 수 있습니다. 실제로는 항상 서버 측 매개변수 초기화를 사용하는 것이 좋습니다." -#: ../../source/how-to-upgrade-to-flower-next.rst:174 +#: ../../source/how-to-implement-strategies.rst:218 msgid "" -"Here's another example to start with HTTPS. Use the ``--certificates`` " -"command line argument to pass paths to (CA certificate, server " -"certificate, and server private key)." +"Server-side parameter initialization is a powerful mechanism. It can be " +"used, for example, to resume training from a previously saved checkpoint." +" It is also the fundamental capability needed to implement hybrid " +"approaches, for example, to fine-tune a pre-trained model using federated" +" learning." msgstr "" +"서버 측 파라미터 초기화는 강력한 메커니즘입니다. 예를 들어 이전에 저장한 체크포인트에서 학습을 재개하는 데 사용할 수 있습니다. " +"또한 연합 학습을 사용하여 사전 학습된 모델을 미세 조정하는 등 하이브리드 접근 방식을 구현하는 데 필요한 기본 기능입니다." -#: ../../source/how-to-upgrade-to-flower-next.rst:201 -msgid "Simulation in CLI" -msgstr "" +#: ../../source/how-to-implement-strategies.rst:224 +#, fuzzy +msgid "The ``configure_fit`` method" +msgstr ":code:`configure_fit` 메서드" -#: ../../source/how-to-upgrade-to-flower-next.rst:202 +#: ../../source/how-to-implement-strategies.rst:226 +#, fuzzy msgid "" -"Wrap your existing client and strategy with |clientapp_link|_ and " -"|serverapp_link|_, respectively. There is no need to use |startsim_link|_" -" anymore. Here's an example:" +"``configure_fit`` is responsible for configuring the upcoming round of " +"training. What does *configure* mean in this context? Configuring a round" +" means selecting clients and deciding what instructions to send to these " +"clients. The signature of ``configure_fit`` makes this clear:" msgstr "" +":code:`configure_fit`은 다가오는 학 라운드를 구성하는 역할을 합니다. 이 문맥에서 *구성*은 무엇을 의미하나요? " +"라운드를 구성한다는 것은 클라이언트를 선택하고 이 클라이언트에게 어떤 지침을 보낼지 결정하는 것을 의미합니다. " +"code:`configure_fit`의 시그니처를 보면 이를 명확히 알 수 있습니다:" -#: ../../source/how-to-upgrade-to-flower-next.rst:232 +#: ../../source/how-to-implement-strategies.rst:239 +#, fuzzy msgid "" -"Run |flower_simulation_link|_ in CLI and point to the ``server_app`` / " -"``client_app`` object in the code instead of executing the Python script." -" Here's an example (assuming the ``server_app`` and ``client_app`` " -"objects are in a ``sim.py`` module):" +"The return value is a list of tuples, each representing the instructions " +"that will be sent to a particular client. Strategy implementations " +"usually perform the following steps in ``configure_fit``:" msgstr "" +"반환 값은 튜플 목록으로, 각 튜플은 특정 클라이언트로 전송될 명령어를 나타냅니다. 전략 구현은 일반적으로 " +":code:`configure_fit`에서 다음 단계를 수행합니다:" -#: ../../source/how-to-upgrade-to-flower-next.rst:249 +#: ../../source/how-to-implement-strategies.rst:243 +#: ../../source/how-to-implement-strategies.rst:307 +#, fuzzy msgid "" -"Set default resources for each |clientapp_link|_ using the ``--backend-" -"config`` command line argument instead of setting the " -"``client_resources`` argument in |startsim_link|_. Here's an example:" +"Use the ``client_manager`` to randomly sample all (or a subset of) " +"available clients (each represented as a ``ClientProxy`` object)" msgstr "" +":code:`client_manager`를 사용하여 사용 가능한 모든 클라이언트(또는 그 하위 집합)를 무작위로 샘플링합니다(각각 " +":code:`ClientProxy` 개체로 표시됨)" -#: ../../source/how-to-upgrade-to-flower-next.rst:275 -msgid "Simulation in a Notebook" +#: ../../source/how-to-implement-strategies.rst:245 +#, fuzzy +msgid "" +"Pair each ``ClientProxy`` with the same ``FitIns`` holding the current " +"global model ``parameters`` and ``config`` dict" msgstr "" +"각 :code:`ClientProxy`를 현재 글로벌 모델 :code:`parameters` 및 :code:`config` " +"dict를 보유한 동일한 :code:`FitIns`와 쌍을 이룹니다" -#: ../../source/how-to-upgrade-to-flower-next.rst:276 +#: ../../source/how-to-implement-strategies.rst:248 +#, fuzzy msgid "" -"Run |runsim_link|_ in your notebook instead of |startsim_link|_. Here's " -"an example:" +"More sophisticated implementations can use ``configure_fit`` to implement" +" custom client selection logic. A client will only participate in a round" +" if the corresponding ``ClientProxy`` is included in the list returned " +"from ``configure_fit``." msgstr "" +"보다 정교한 구현은 :code:`configure_fit`을 사용하여 사용자 지정 클라이언트 선택 로직을 구현할 수 있습니다. " +"클라이언트는 :code:`configure_fit`에서 반환된 목록에 해당 :code:`ClientProxy`가 포함된 경우에만 " +"라운드에 참여합니다." -#: ../../source/how-to-upgrade-to-flower-next.rst:319 +#: ../../source/how-to-implement-strategies.rst:254 +#, fuzzy msgid "" -"Some official `Flower code examples `_ " -"are already updated to Flower Next so they can serve as a reference for " -"using the Flower Next API. If there are further questions, `join the " -"Flower Slack `_ and use the channel " -"``#questions``. You can also `participate in Flower Discuss " -"`_ where you can find us answering questions," -" or share and learn from others about migrating to Flower Next." +"The structure of this return value provides a lot of flexibility to the " +"user. Since instructions are defined on a per-client basis, different " +"instructions can be sent to each client. This enables custom strategies " +"to train, for example, different models on different clients, or use " +"different hyperparameters on different clients (via the ``config`` dict)." msgstr "" +"이 반환 값의 구조는 사용자에게 많은 유연성을 제공합니다. instructions은 클라이언트별로 정의되므로 각 클라이언트에 서로 " +"다른 명령어를 전송할 수 있습니다. 이를 통해 예를 들어 클라이언트마다 다른 모델을 학습시키거나 클라이언트마다 다른 하이퍼파라미터를" +" 사용하는 사용자 지정 전략을 사용할 수 있습니다(:code:`config` dict를 통해)." -#: ../../source/how-to-upgrade-to-flower-next.rst:325 -msgid "Important" -msgstr "" +#: ../../source/how-to-implement-strategies.rst:261 +#, fuzzy +msgid "The ``aggregate_fit`` method" +msgstr ":code:`aggregate_fit` 메서드" -#: ../../source/how-to-upgrade-to-flower-next.rst:328 +#: ../../source/how-to-implement-strategies.rst:263 +#, fuzzy msgid "" -"As we continuously enhance Flower Next at a rapid pace, we'll be " -"periodically updating this guide. Please feel free to share any feedback " -"with us!" +"``aggregate_fit`` is responsible for aggregating the results returned by " +"the clients that were selected and asked to train in ``configure_fit``." msgstr "" +"code:`aggregate_fit`은 :code:`configure_fit`에서 훈련하도록 선택되고 요청된 클라이언트가 반환한 " +"결과를 집계하는 역할을 담당합니다." -#: ../../source/how-to-upgrade-to-flower-next.rst:334 -msgid "Happy migrating! 🚀" +#: ../../source/how-to-implement-strategies.rst:277 +#, fuzzy +msgid "" +"Of course, failures can happen, so there is no guarantee that the server " +"will get results from all the clients it sent instructions to (via " +"``configure_fit``). ``aggregate_fit`` therefore receives a list of " +"``results``, but also a list of ``failures``." msgstr "" +"물론 실패가 발생할 수 있으므로 서버가 명령을 보낸 모든 클라이언트로부터 결과를 얻을 수 있다는 보장은 " +"없습니다(:code:`configure_fit`을 통해). 따라서 :code:`aggregate_fit`은 " +":code:`results` 목록뿐만 아니라 :code:`failures` 목록도 받습니다." -#: ../../source/how-to-use-built-in-mods.rst:2 -msgid "Use Built-in Mods" +#: ../../source/how-to-implement-strategies.rst:282 +#, fuzzy +msgid "" +"``aggregate_fit`` returns an optional ``Parameters`` object and a " +"dictionary of aggregated metrics. The ``Parameters`` return value is " +"optional because ``aggregate_fit`` might decide that the results provided" +" are not sufficient for aggregation (e.g., too many failures)." msgstr "" +"code:`aggregate_fit`은 선택적 :code:`Parameters` 개체와 집계된 메트릭의 dictionary를 " +"반환합니다. :code:`Parameters` 반환 값은 :code:`aggregate_fit`이 제공된 결과가 집계에 충분하지 " +"않다고 판단할 수 있으므로(예: 실패 수가 너무 많음) 선택 사항입니다." -#: ../../source/how-to-use-built-in-mods.rst:4 +#: ../../source/how-to-implement-strategies.rst:288 +#, fuzzy +msgid "The ``configure_evaluate`` method" +msgstr ":code:`configure_evaluate` 메서드" + +#: ../../source/how-to-implement-strategies.rst:290 +#, fuzzy msgid "" -"**Note: This tutorial covers experimental features. The functionality and" -" interfaces may change in future versions.**" +"``configure_evaluate`` is responsible for configuring the upcoming round " +"of evaluation. What does *configure* mean in this context? Configuring a " +"round means selecting clients and deciding what instructions to send to " +"these clients. The signature of ``configure_evaluate`` makes this clear:" msgstr "" +":code:`configure_evaluate`는 다가오는 평가 라운드를 구성하는 역할을 합니다. 이 문맥에서 *구성*은 무엇을 " +"의미하나요? 라운드를 구성한다는 것은 클라이언트를 선택하고 이러한 클라이언트에 전송할 지침을 결정하는 것을 의미합니다. " +":code:`configure_evaluate`의 시그니처를 보면 이를 명확히 알 수 있습니다:" -#: ../../source/how-to-use-built-in-mods.rst:6 +#: ../../source/how-to-implement-strategies.rst:303 +#, fuzzy msgid "" -"In this tutorial, we will learn how to utilize built-in mods to augment " -"the behavior of a ``ClientApp``. Mods (sometimes also called Modifiers) " -"allow us to perform operations before and after a task is processed in " -"the ``ClientApp``." +"The return value is a list of tuples, each representing the instructions " +"that will be sent to a particular client. Strategy implementations " +"usually perform the following steps in ``configure_evaluate``:" msgstr "" +"반환 값은 튜플 목록으로, 각 튜플은 특정 클라이언트로 전송될 명령어를 나타냅니다. 전략 구현은 일반적으로 " +":code:`configure_evaluate`에서 다음 단계를 수행합니다:" -#: ../../source/how-to-use-built-in-mods.rst:9 -msgid "What are Mods?" +#: ../../source/how-to-implement-strategies.rst:309 +#, fuzzy +msgid "" +"Pair each ``ClientProxy`` with the same ``EvaluateIns`` holding the " +"current global model ``parameters`` and ``config`` dict" msgstr "" +"각 :code:`ClientProxy`를 현재 글로벌 모델 :code:`parameters` 및 :code:`config` " +"dict를 보유한 동일한 :code:`EvaluateIns`와 쌍을 이룹니다" -#: ../../source/how-to-use-built-in-mods.rst:11 +#: ../../source/how-to-implement-strategies.rst:312 +#, fuzzy msgid "" -"A Mod is a callable that wraps around a ``ClientApp``. It can manipulate " -"or inspect the incoming ``Message`` and the resulting outgoing " -"``Message``. The signature for a ``Mod`` is as follows:" +"More sophisticated implementations can use ``configure_evaluate`` to " +"implement custom client selection logic. A client will only participate " +"in a round if the corresponding ``ClientProxy`` is included in the list " +"returned from ``configure_evaluate``." msgstr "" +"보다 정교한 구현은 :code:`configure_evaluate`를 사용하여 사용자 지정 클라이언트 선택 로직을 구현할 수 " +"있습니다. 클라이언트는 :code:`configure_evaluate`에서 반환된 목록에 해당 :code:`ClientProxy`가" +" 포함된 경우에만 라운드에 참여합니다." -#: ../../source/how-to-use-built-in-mods.rst:18 -msgid "A typical mod function might look something like this:" +#: ../../source/how-to-implement-strategies.rst:318 +#, fuzzy +msgid "" +"The structure of this return value provides a lot of flexibility to the " +"user. Since instructions are defined on a per-client basis, different " +"instructions can be sent to each client. This enables custom strategies " +"to evaluate, for example, different models on different clients, or use " +"different hyperparameters on different clients (via the ``config`` dict)." msgstr "" +"이 반환 값의 구조는 사용자에게 많은 유연성을 제공합니다. 명령어는 클라이언트별로 정의되므로 각 클라이언트에 서로 다른 명령어를 " +"전송할 수 있습니다. 이를 통해 사용자 지정 전략을 통해 예를 들어 클라이언트마다 다른 모델을 평가하거나 클라이언트마다 다른 " +"하이퍼파라미터를 사용할 수 있습니다(:code:`config` dict를 통해)." -#: ../../source/how-to-use-built-in-mods.rst:31 -msgid "Using Mods" -msgstr "" +#: ../../source/how-to-implement-strategies.rst:325 +#, fuzzy +msgid "The ``aggregate_evaluate`` method" +msgstr ":code:`aggregate_evaluate` 메서드" -#: ../../source/how-to-use-built-in-mods.rst:33 -msgid "To use mods in your ``ClientApp``, you can follow these steps:" +#: ../../source/how-to-implement-strategies.rst:327 +#, fuzzy +msgid "" +"``aggregate_evaluate`` is responsible for aggregating the results " +"returned by the clients that were selected and asked to evaluate in " +"``configure_evaluate``." msgstr "" +"code:`aggregate_evaluate`는 :code:`configure_evaluate`에서 선택되어 평가를 요청한 " +"클라이언트가 반환한 결과를 집계하는 역할을 담당합니다." -#: ../../source/how-to-use-built-in-mods.rst:36 -msgid "1. Import the required mods" +#: ../../source/how-to-implement-strategies.rst:341 +#, fuzzy +msgid "" +"Of course, failures can happen, so there is no guarantee that the server " +"will get results from all the clients it sent instructions to (via " +"``configure_evaluate``). ``aggregate_evaluate`` therefore receives a list" +" of ``results``, but also a list of ``failures``." msgstr "" +"물론 실패가 발생할 수 있으므로 서버가 명령을 보낸 모든 클라이언트로부터 결과를 얻을 수 있다는 보장은 " +"없습니다(:code:`configure_evaluate`를 통해). 따라서 :code:`aggregate_evaluate`는 " +":code:`results` 목록뿐만 아니라 :code:`failures` 목록도 받습니다." -#: ../../source/how-to-use-built-in-mods.rst:38 -msgid "First, import the built-in mod you intend to use:" +#: ../../source/how-to-implement-strategies.rst:346 +#, fuzzy +msgid "" +"``aggregate_evaluate`` returns an optional ``float`` (loss) and a " +"dictionary of aggregated metrics. The ``float`` return value is optional " +"because ``aggregate_evaluate`` might decide that the results provided are" +" not sufficient for aggregation (e.g., too many failures)." msgstr "" +"code:`aggregate_evaluate`는 선택적 :code:`float`(손실)와 집계된 메트릭의 dictionary를 " +"반환합니다. code:`float` 반환 값은 :code:`aggregate_evaluate`가 제공된 결과가 집계에 충분하지 " +"않다고 판단할 수 있으므로(예: 실패 수가 너무 많음) 선택 사항입니다." -#: ../../source/how-to-use-built-in-mods.rst:46 -msgid "2. Define your client function" -msgstr "" +#: ../../source/how-to-implement-strategies.rst:352 +#, fuzzy +msgid "The ``evaluate`` method" +msgstr ":code:`evaluate` 메서드" -#: ../../source/how-to-use-built-in-mods.rst:48 +#: ../../source/how-to-implement-strategies.rst:354 +#, fuzzy msgid "" -"Define your client function (``client_fn``) that will be wrapped by the " -"mod(s):" -msgstr "" - -#: ../../source/how-to-use-built-in-mods.rst:57 -msgid "3. Create the ``ClientApp`` with mods" +"``evaluate`` is responsible for evaluating model parameters on the " +"server-side. Having ``evaluate`` in addition to " +"``configure_evaluate``/``aggregate_evaluate`` enables strategies to " +"perform both servers-side and client-side (federated) evaluation." msgstr "" +":code:`evaluate`는 서버 측에서 모델 매개변수를 평가하는 역할을 담당합니다. " +"code:`configure_evaluate`/:code:`aggregate_evaluate`와 함께 " +":code:`evaluate`를 사용하면 서버 측과 클라이언트 측(federated) 평가를 모두 수행할 수 있는 전략을 사용할 수" +" 있습니다." -#: ../../source/how-to-use-built-in-mods.rst:59 +#: ../../source/how-to-implement-strategies.rst:364 +#, fuzzy msgid "" -"Create your ``ClientApp`` and pass the mods as a list to the ``mods`` " -"argument. The order in which you provide the mods matters:" +"The return value is again optional because the strategy might not need to" +" implement server-side evaluation or because the user-defined " +"``evaluate`` method might not complete successfully (e.g., it might fail " +"to load the server-side evaluation data)." msgstr "" +"반환 값은 전략에서 서버 측 평가를 구현할 필요가 없거나 사용자 정의 :code:`evaluate` 메서드가 성공적으로 완료되지 " +"않을 수 있기 때문에(예: 서버 측 평가 데이터를 로드하지 못할 수 있음) 다시 선택 사항으로 설정할 수 있습니다." -#: ../../source/how-to-use-built-in-mods.rst:72 -msgid "Order of execution" -msgstr "" +#: ../../source/how-to-install-flower.rst:2 +msgid "Install Flower" +msgstr "Flower 설치" -#: ../../source/how-to-use-built-in-mods.rst:74 -msgid "" -"When the ``ClientApp`` runs, the mods are executed in the order they are " -"provided in the list:" -msgstr "" +#: ../../source/how-to-install-flower.rst:5 +msgid "Python version" +msgstr "Python 버전" -#: ../../source/how-to-use-built-in-mods.rst:76 -msgid "``example_mod_1`` (outermost mod)" -msgstr "" +#: ../../source/how-to-install-flower.rst:11 +msgid "Install stable release" +msgstr "안정적인 릴리즈 설치" -#: ../../source/how-to-use-built-in-mods.rst:77 -msgid "``example_mod_2`` (next mod)" -msgstr "" +#: ../../source/how-to-install-flower.rst:14 +#: ../../source/how-to-upgrade-to-flower-next.rst:66 +msgid "Using pip" +msgstr "pip 사용" -#: ../../source/how-to-use-built-in-mods.rst:78 +#: ../../source/how-to-install-flower.rst:16 +#, fuzzy +msgid "Stable releases are available on `PyPI `_:" +msgstr "안정적인 릴리즈는 `PyPI `_:: 에서 확인할 수 있습니다::" + +#: ../../source/how-to-install-flower.rst:22 +#, fuzzy msgid "" -"Message handler (core function that handles the incoming ``Message`` and " -"returns the outgoing ``Message``)" -msgstr "" +"For simulations that use the Virtual Client Engine, ``flwr`` should be " +"installed with the ``simulation`` extra:" +msgstr "가상 클라이언트 엔진을 사용하는 시뮬레이션의 경우 ``flwr``을 ``simulation``extra와 함께 설치해야 합니다:" -#: ../../source/how-to-use-built-in-mods.rst:79 -msgid "``example_mod_2`` (on the way back)" -msgstr "" +#: ../../source/how-to-install-flower.rst:30 +msgid "Using conda (or mamba)" +msgstr "conda(또는 mamba) 사용" -#: ../../source/how-to-use-built-in-mods.rst:80 -msgid "``example_mod_1`` (outermost mod on the way back)" -msgstr "" +#: ../../source/how-to-install-flower.rst:32 +msgid "Flower can also be installed from the ``conda-forge`` channel." +msgstr "Flower은 'conda-forge' 채널에서도 설치할 수 있습니다." -#: ../../source/how-to-use-built-in-mods.rst:82 +#: ../../source/how-to-install-flower.rst:34 +#, fuzzy msgid "" -"Each mod has a chance to inspect and modify the incoming ``Message`` " -"before passing it to the next mod, and likewise with the outgoing " -"``Message`` before returning it up the stack." -msgstr "" +"If you have not added ``conda-forge`` to your channels, you will first " +"need to run the following:" +msgstr "채널에 'conda-forge'를 추가하지 않은 경우 먼저 다음을 실행해야 합니다:" -#: ../../source/how-to-use-built-in-mods.rst:87 +#: ../../source/how-to-install-flower.rst:42 +#, fuzzy msgid "" -"By following this guide, you have learned how to effectively use mods to " -"enhance your ``ClientApp``'s functionality. Remember that the order of " -"mods is crucial and affects how the input and output are processed." -msgstr "" +"Once the ``conda-forge`` channel has been enabled, ``flwr`` can be " +"installed with ``conda``:" +msgstr "conda-forge`` 채널이 활성화되면 ``flwr``을 ``conda``로 설치할 수 있습니다::" -#: ../../source/how-to-use-built-in-mods.rst:89 -msgid "Enjoy building a more robust and flexible ``ClientApp`` with mods!" -msgstr "" +#: ../../source/how-to-install-flower.rst:49 +#, fuzzy +msgid "or with ``mamba``:" +msgstr "또는 ``mamba``::" -#: ../../source/how-to-use-differential-privacy.rst:2 -msgid "Use Differential Privacy" -msgstr "" +#: ../../source/how-to-install-flower.rst:56 +msgid "Verify installation" +msgstr "설치 확인" -#: ../../source/how-to-use-differential-privacy.rst:3 +#: ../../source/how-to-install-flower.rst:58 +#, fuzzy msgid "" -"This guide explains how you can utilize differential privacy in the " -"Flower framework. If you are not yet familiar with differential privacy, " -"you can refer to :doc:`explanation-differential-privacy`." +"The following command can be used to verify if Flower was successfully " +"installed. If everything worked, it should print the version of Flower to" +" the command line:" msgstr "" +"다음 명령을 사용하여 Flower가 성공적으로 설치되었는지 확인할 수 있습니다. 모든 것이 정상적으로 작동하면 명령줄에 " +"Flower의 버전이 출력됩니다:" + +#: ../../source/how-to-install-flower.rst:68 +msgid "Advanced installation options" +msgstr "고급 설치 옵션" + +#: ../../source/how-to-install-flower.rst:71 +msgid "Install via Docker" +msgstr "Docker를 통해 설치" + +#: ../../source/how-to-install-flower.rst:73 +#, fuzzy +msgid ":doc:`Run Flower using Docker `" +msgstr ":doc:`Docker를 사용하여 Flower를 실행하는 방법 `" + +#: ../../source/how-to-install-flower.rst:76 +msgid "Install pre-release" +msgstr "사전 릴리즈 설치" -#: ../../source/how-to-use-differential-privacy.rst:7 +#: ../../source/how-to-install-flower.rst:78 +#, fuzzy msgid "" -"Differential Privacy in Flower is in a preview phase. If you plan to use " -"these features in a production environment with sensitive data, feel free" -" contact us to discuss your requirements and to receive guidance on how " -"to best use these features." +"New (possibly unstable) versions of Flower are sometimes available as " +"pre-release versions (alpha, beta, release candidate) before the stable " +"release happens:" msgstr "" +"새(불안정할 수 있는) 버전의 Flower는 안정 버전이 출시되기 전에 사전 릴리즈 버전(알파, 베타, 릴리즈 후보)으로 제공되는 " +"경우가 있습니다:" -#: ../../source/how-to-use-differential-privacy.rst:12 +#: ../../source/how-to-install-flower.rst:85 +#, fuzzy msgid "" -"This approach consists of two seprate phases: clipping of the updates and" -" adding noise to the aggregated model. For the clipping phase, Flower " -"framework has made it possible to decide whether to perform clipping on " -"the server side or the client side." +"For simulations that use the Virtual Client Engine, ``flwr`` pre-releases" +" should be installed with the ``simulation`` extra:" msgstr "" +"가상 클라이언트 엔진을 사용하는 시뮬레이션의 경우 ``flwr`` 사전 릴리즈를 ``simulation`` extra와 함께 " +"설치해야 합니다:" -#: ../../source/how-to-use-differential-privacy.rst:15 +#: ../../source/how-to-install-flower.rst:93 +msgid "Install nightly release" +msgstr "야간 릴리즈 설치" + +#: ../../source/how-to-install-flower.rst:95 +#, fuzzy msgid "" -"**Server-side Clipping**: This approach has the advantage of the server " -"enforcing uniform clipping across all clients' updates and reducing the " -"communication overhead for clipping values. However, it also has the " -"disadvantage of increasing the computational load on the server due to " -"the need to perform the clipping operation for all clients." -msgstr "" +"The latest (potentially unstable) changes in Flower are available as " +"nightly releases:" +msgstr "Flower의 최신 (불안정할 수 있는) 변경 사항은 다음과 같이 야간 릴리즈로 제공됩니다:" -#: ../../source/how-to-use-differential-privacy.rst:16 +#: ../../source/how-to-install-flower.rst:101 +#, fuzzy msgid "" -"**Client-side Clipping**: This approach has the advantage of reducing the" -" computational overhead on the server. However, it also has the " -"disadvantage of lacking centralized control, as the server has less " -"control over the clipping process." +"For simulations that use the Virtual Client Engine, ``flwr-nightly`` " +"should be installed with the ``simulation`` extra:" msgstr "" +"가상 클라이언트 엔진을 사용하는 시뮬레이션의 경우, ``flwr-nightly``를 ``simulation`` extr와 함께 " +"설치해야 합니다::" -#: ../../source/how-to-use-differential-privacy.rst:21 -msgid "Server-side Clipping" -msgstr "" +#: ../../source/how-to-monitor-simulation.rst:2 +msgid "Monitor simulation" +msgstr "모니터 시뮬레이션" -#: ../../source/how-to-use-differential-privacy.rst:22 +#: ../../source/how-to-monitor-simulation.rst:4 msgid "" -"For central DP with server-side clipping, there are two :code:`Strategy` " -"classes that act as wrappers around the actual :code:`Strategy` instance " -"(for example, :code:`FedAvg`). The two wrapper classes are " -":code:`DifferentialPrivacyServerSideFixedClipping` and " -":code:`DifferentialPrivacyServerSideAdaptiveClipping` for fixed and " -"adaptive clipping." +"Flower allows you to monitor system resources while running your " +"simulation. Moreover, the Flower simulation engine is powerful and " +"enables you to decide how to allocate resources per client manner and " +"constrain the total usage. Insights from resource consumption can help " +"you make smarter decisions and speed up the execution time." msgstr "" +"Flower를 사용하면 시뮬레이션을 실행하는 동안 시스템 리소스를 모니터링할 수 있습니다. 또한 Flower 시뮬레이션 엔진은 " +"강력하며 클라이언트별 리소스 할당 방법을 결정하고 총 사용량을 제한할 수 있습니다. 리소스 소비에 대한 인사이트를 통해 더 현명한 " +"결정을 내리고 실행 시간을 단축할 수 있습니다." -#: ../../source/how-to-use-differential-privacy.rst:-1 -msgid "server side clipping" +#: ../../source/how-to-monitor-simulation.rst:9 +msgid "" +"The specific instructions assume you are using macOS and have the " +"`Homebrew `_ package manager installed." msgstr "" +"구체적인 지침은 macOS를 사용 중이고 'Homebrew `_ 패키지 관리자가 설치되어 있다고 " +"가정합니다." -#: ../../source/how-to-use-differential-privacy.rst:31 +#: ../../source/how-to-monitor-simulation.rst:13 +msgid "Downloads" +msgstr "다운로드" + +#: ../../source/how-to-monitor-simulation.rst:19 msgid "" -"The code sample below enables the :code:`FedAvg` strategy to use server-" -"side fixed clipping using the " -":code:`DifferentialPrivacyServerSideFixedClipping` wrapper class. The " -"same approach can be used with " -":code:`DifferentialPrivacyServerSideAdaptiveClipping` by adjusting the " -"corresponding input parameters." +"`Prometheus `_ is used for data collection, while" +" `Grafana `_ will enable you to visualize the " +"collected data. They are both well integrated with `Ray " +"`_ which Flower uses under the hood." msgstr "" +"`Prometheus `_는 데이터 수집에 사용되며, `Grafana " +"`_는 수집된 데이터를 시각화할 수 있게 해줍니다. 이 두 도구는 모두 Flower가 " +"내부적으로 사용하는 `Ray `_와 잘 통합되어 있습니다." -#: ../../source/how-to-use-differential-privacy.rst:52 -msgid "Client-side Clipping" -msgstr "" +#: ../../source/how-to-monitor-simulation.rst:23 +msgid "" +"Overwrite the configuration files (depending on your device, it might be " +"installed on a different path)." +msgstr "구성 파일을 덮어씁니다(장치에 따라 다른 경로에 설치되어 있을 수 있음)." + +#: ../../source/how-to-monitor-simulation.rst:26 +msgid "If you are on an M1 Mac, it should be:" +msgstr "M1 Mac을 사용 중이라면:" -#: ../../source/how-to-use-differential-privacy.rst:53 +#: ../../source/how-to-monitor-simulation.rst:33 +msgid "On the previous generation Intel Mac devices, it should be:" +msgstr "이전 세대 Intel Mac 장치에서는:" + +#: ../../source/how-to-monitor-simulation.rst:40 msgid "" -"For central DP with client-side clipping, the server sends the clipping " -"value to selected clients on each round. Clients can use existing Flower " -":code:`Mods` to perform the clipping. Two mods are available for fixed " -"and adaptive client-side clipping: :code:`fixedclipping_mod` and " -":code:`adaptiveclipping_mod` with corresponding server-side wrappers " -":code:`DifferentialPrivacyClientSideFixedClipping` and " -":code:`DifferentialPrivacyClientSideAdaptiveClipping`." -msgstr "" +"Open the respective configuration files and change them. Depending on " +"your device, use one of the two following commands:" +msgstr "각 구성 파일을 열고 변경합니다. 장치에 따라 다음 두 명령 중 하나를 사용합니다:" -#: ../../source/how-to-use-differential-privacy.rst:-1 -msgid "client side clipping" +#: ../../source/how-to-monitor-simulation.rst:51 +msgid "" +"and then delete all the text in the file and paste a new Prometheus " +"config you see below. You may adjust the time intervals to your " +"requirements:" msgstr "" +"를 입력한 다음 파일의 모든 텍스트를 삭제하고 아래에 표시된 새 Prometheus 설정을 붙여넣습니다. 요구 사항에 따라 시간 " +"간격을 조정할 수 있습니다:" -#: ../../source/how-to-use-differential-privacy.rst:63 +#: ../../source/how-to-monitor-simulation.rst:67 msgid "" -"The code sample below enables the :code:`FedAvg` strategy to use " -"differential privacy with client-side fixed clipping using both the " -":code:`DifferentialPrivacyClientSideFixedClipping` wrapper class and, on " -"the client, :code:`fixedclipping_mod`:" +"Now after you have edited the Prometheus configuration, do the same with " +"the Grafana configuration files. Open those using one of the following " +"commands as before:" msgstr "" +"이제 Prometheus 구성을 편집한 후 Grafana 구성 파일에 대해서도 동일한 작업을 수행합니다. 이전과 마찬가지로 다음 " +"명령 중 하나를 사용하여 파일을 엽니다:" -#: ../../source/how-to-use-differential-privacy.rst:80 +#: ../../source/how-to-monitor-simulation.rst:78 msgid "" -"In addition to the server-side strategy wrapper, the :code:`ClientApp` " -"needs to configure the matching :code:`fixedclipping_mod` to perform the " -"client-side clipping:" -msgstr "" +"Your terminal editor should open and allow you to apply the following " +"configuration as before." +msgstr "터미널 편집기가 열리면 이전과 마찬가지로 다음 구성을 적용할 수 있습니다." -#: ../../source/how-to-use-differential-privacy.rst:97 +#: ../../source/how-to-monitor-simulation.rst:94 msgid "" -"To utilize local differential privacy (DP) and add noise to the client " -"model parameters before transmitting them to the server in Flower, you " -"can use the `LocalDpMod`. The following hyperparameters need to be set: " -"clipping norm value, sensitivity, epsilon, and delta." -msgstr "" +"Congratulations, you just downloaded all the necessary software needed " +"for metrics tracking. Now, let’s start it." +msgstr "축하합니다. 매트릭 트레킹에 필요한 모든 소프트웨어를 다운로드하셨습니다. 이제 시작해 보겠습니다." -#: ../../source/how-to-use-differential-privacy.rst:-1 -msgid "local DP mod" -msgstr "" +#: ../../source/how-to-monitor-simulation.rst:98 +msgid "Tracking metrics" +msgstr "매트릭 트래킹" -#: ../../source/how-to-use-differential-privacy.rst:104 -msgid "Below is a code example that shows how to use :code:`LocalDpMod`:" -msgstr "" +#: ../../source/how-to-monitor-simulation.rst:100 +msgid "" +"Before running your Flower simulation, you have to start the monitoring " +"tools you have just installed and configured." +msgstr "Flower 시뮬레이션을 실행하기 전에 방금 설치 및 구성한 모니터링 도구를 시작해야 합니다." -#: ../../source/how-to-use-differential-privacy.rst:122 +#: ../../source/how-to-monitor-simulation.rst:108 msgid "" -"Please note that the order of mods, especially those that modify " -"parameters, is important when using multiple modifiers. Typically, " -"differential privacy (DP) modifiers should be the last to operate on " -"parameters." -msgstr "" +"Please include the following argument in your Python code when starting a" +" simulation." +msgstr "시뮬레이션을 시작할 때 Python 코드에 다음 전달인자를 포함하세요." -#: ../../source/how-to-use-differential-privacy.rst:125 -msgid "Local Training using Privacy Engines" -msgstr "" +#: ../../source/how-to-monitor-simulation.rst:119 +msgid "Now, you are ready to start your workload." +msgstr "이제 워크로드를 시작할 준비가 되었습니다." -#: ../../source/how-to-use-differential-privacy.rst:126 +#: ../../source/how-to-monitor-simulation.rst:121 msgid "" -"For ensuring data instance-level privacy during local model training on " -"the client side, consider leveraging privacy engines such as Opacus and " -"TensorFlow Privacy. For examples of using Flower with these engines, " -"please refer to the Flower examples directory (`Opacus " -"`_, `Tensorflow" -" Privacy `_)." -msgstr "" +"Shortly after the simulation starts, you should see the following logs in" +" your terminal:" +msgstr "시뮬레이션이 시작되고 얼마 지나지 않아 터미널에 다음 로그가 표시됩니다:" -#: ../../source/how-to-use-strategies.rst:2 -msgid "Use strategies" -msgstr "" +#: ../../source/how-to-monitor-simulation.rst:127 +#, fuzzy +msgid "You can look at everything at http://127.0.0.1:8265 ." +msgstr "``_ 에서 모든 것을 볼 수 있습니다." -#: ../../source/how-to-use-strategies.rst:4 +#: ../../source/how-to-monitor-simulation.rst:129 msgid "" -"Flower allows full customization of the learning process through the " -":code:`Strategy` abstraction. A number of built-in strategies are " -"provided in the core framework." -msgstr "" +"It's a Ray Dashboard. You can navigate to Metrics (on the left panel, the" +" lowest option)." +msgstr "Ray 대시보드입니다. 메트릭(왼쪽 패널의 가장 아래 옵션)으로 이동할 수 있습니다." -#: ../../source/how-to-use-strategies.rst:6 +#: ../../source/how-to-monitor-simulation.rst:132 msgid "" -"There are three ways to customize the way Flower orchestrates the " -"learning process on the server side:" +"Or alternatively, you can just see them in Grafana by clicking on the " +"right-up corner, “View in Grafana”. Please note that the Ray dashboard is" +" only accessible during the simulation. After the simulation ends, you " +"can only use Grafana to explore the metrics. You can start Grafana by " +"going to ``http://localhost:3000/``." msgstr "" +"또는 오른쪽 위 모서리인 \"Grafana에서 보기\"를 클릭하여 Grafana에서 바로 확인할 수도 있습니다. Ray 대시보드는 " +"시뮬레이션 중에만 액세스할 수 있다는 점에 유의하세요. 시뮬레이션이 종료된 후에는 Grafana를 사용하여 메트릭을 탐색할 수만 " +"있습니다. ``http://localhost:3000/``로 이동하여 Grafana를 시작할 수 있습니다." -#: ../../source/how-to-use-strategies.rst:8 -msgid "Use an existing strategy, for example, :code:`FedAvg`" +#: ../../source/how-to-monitor-simulation.rst:137 +#, fuzzy +msgid "" +"After you finish the visualization, stop Prometheus and Grafana. This is " +"important as they will otherwise block, for example port ``3000`` on your" +" machine as long as they are running." msgstr "" +"시각화를 완료한 후에는 Prometheus와 Grafana를 중지합니다. 그렇지 않으면 실행 중인 동안 컴퓨터에서 포트 " +":code:`3000` 등을 차단하므로 이 작업이 중요합니다." -#: ../../source/how-to-use-strategies.rst:9 -#: ../../source/how-to-use-strategies.rst:40 -msgid "Customize an existing strategy with callback functions" -msgstr "" +#: ../../source/how-to-monitor-simulation.rst:147 +msgid "Resource allocation" +msgstr "리소스 할당" -#: ../../source/how-to-use-strategies.rst:10 -#: ../../source/how-to-use-strategies.rst:87 -msgid "Implement a novel strategy" -msgstr "" +#: ../../source/how-to-monitor-simulation.rst:149 +msgid "" +"You must understand how the Ray library works to efficiently allocate " +"system resources to simulation clients on your own." +msgstr "Ray 라이브러리가 어떻게 작동하는지 이해해야 시뮬레이션 클라이언트에 시스템 리소스를 효율적으로 할당할 수 있습니다." -#: ../../source/how-to-use-strategies.rst:14 -msgid "Use an existing strategy" +#: ../../source/how-to-monitor-simulation.rst:152 +msgid "" +"Initially, the simulation (which Ray handles under the hood) starts by " +"default with all the available resources on the system, which it shares " +"among the clients. It doesn't mean it divides it equally among all of " +"them, nor that the model training happens at all of them simultaneously. " +"You will learn more about that in the later part of this blog. You can " +"check the system resources by running the following:" msgstr "" +"처음에 시뮬레이션(Ray가 내부에서 처리하는)은 기본적으로 시스템에서 사용 가능한 모든 리소스를 사용하여 시작되며, 이 리소스는 " +"클라이언트 간에 공유됩니다. 그렇다고 해서 모든 클라이언트에게 균등하게 분배하거나 모든 클라이언트에서 동시에 모델 학습이 이루어지는" +" 것은 아닙니다. 이에 대한 자세한 내용은 이 블로그의 뒷부분에서 설명합니다. 다음을 실행하여 시스템 리소스를 확인할 수 있습니다:" + +#: ../../source/how-to-monitor-simulation.rst:164 +msgid "In Google Colab, the result you see might be similar to this:" +msgstr "Google Colab에서는 이와 유사한 결과가 표시될 수 있습니다:" -#: ../../source/how-to-use-strategies.rst:16 +#: ../../source/how-to-monitor-simulation.rst:175 msgid "" -"Flower comes with a number of popular federated learning strategies " -"built-in. A built-in strategy can be instantiated as follows:" -msgstr "" +"However, you can overwrite the defaults. When starting a simulation, do " +"the following (you don't need to overwrite all of them):" +msgstr "그러나 기본값을 덮어쓸 수 있습니다. 시뮬레이션을 시작할 때 다음을 수행합니다(모두 덮어쓸 필요는 없음):" + +#: ../../source/how-to-monitor-simulation.rst:195 +msgid "Let’s also specify the resource for a single client." +msgstr "단일 클라이언트에 대한 리소스도 지정해 보겠습니다." -#: ../../source/how-to-use-strategies.rst:25 +#: ../../source/how-to-monitor-simulation.rst:225 msgid "" -"This creates a strategy with all parameters left at their default values " -"and passes it to the :code:`start_server` function. It is usually " -"recommended to adjust a few parameters during instantiation:" +"Now comes the crucial part. Ray will start a new client only when it has " +"all the required resources (such that they run in parallel) when the " +"resources allow." msgstr "" +"이제 중요한 부분이 나옵니다. Ray는 리소스가 허용하는 경우에만 필요한 모든 리소스가 있을 때(병렬로 실행되는 등) 새 " +"클라이언트를 시작합니다." -#: ../../source/how-to-use-strategies.rst:42 +#: ../../source/how-to-monitor-simulation.rst:228 +#, fuzzy msgid "" -"Existing strategies provide several ways to customize their behaviour. " -"Callback functions allow strategies to call user-provided code during " -"execution." -msgstr "" +"In the example above, only one client will be run, so your clients won't " +"run concurrently. Setting ``client_num_gpus = 0.5`` would allow running " +"two clients and therefore enable them to run concurrently. Be careful not" +" to require more resources than available. If you specified " +"``client_num_gpus = 2``, the simulation wouldn't start (even if you had 2" +" GPUs but decided to set 1 in ``ray_init_args``)." +msgstr "" +"위의 예에서는 하나의 클라이언트만 실행되므로 클라이언트가 동시에 실행되지 않습니다. :code:`client_num_gpus = " +"0.5` 를 설정하면 두 개의 클라이언트를 실행할 수 있으므로 동시에 실행할 수 있습니다. 사용 가능한 리소스보다 더 많은 리소스를" +" 요구하지 않도록 주의하세요. :code:`client_num_gpus = 2`를 지정하면 시뮬레이션이 시작되지 않습니다(GPU가 " +"2개이지만 :code:`ray_init_args`에서 1개를 설정한 경우에도 마찬가지입니다)." + +#: ../../source/how-to-monitor-simulation.rst:235 ../../source/ref-faq.rst:2 +msgid "FAQ" +msgstr "자주 묻는 질문" -#: ../../source/how-to-use-strategies.rst:45 -msgid "Configuring client fit and client evaluate" -msgstr "" +#: ../../source/how-to-monitor-simulation.rst:237 +msgid "Q: I don't see any metrics logged." +msgstr "질문: 기록된 메트릭이 보이지 않습니다." -#: ../../source/how-to-use-strategies.rst:47 +#: ../../source/how-to-monitor-simulation.rst:239 msgid "" -"The server can pass new configuration values to the client each round by " -"providing a function to :code:`on_fit_config_fn`. The provided function " -"will be called by the strategy and must return a dictionary of " -"configuration key values pairs that will be sent to the client. It must " -"return a dictionary of arbitrary configuration values :code:`client.fit`" -" and :code:`client.evaluate` functions during each round of federated " -"learning." +"A: The timeframe might not be properly set. The setting is in the top " +"right corner (\"Last 30 minutes\" by default). Please change the " +"timeframe to reflect the period when the simulation was running." msgstr "" +"A: 기간이 제대로 설정되지 않았을 수 있습니다. 설정은 오른쪽 상단에 있습니다(기본값은 '지난 30분'). 시뮬레이션이 실행된 " +"기간을 반영하도록 기간을 변경해 주세요." -#: ../../source/how-to-use-strategies.rst:75 +#: ../../source/how-to-monitor-simulation.rst:243 msgid "" -"The :code:`on_fit_config_fn` can be used to pass arbitrary configuration " -"values from server to client, and poetentially change these values each " -"round, for example, to adjust the learning rate. The client will receive " -"the dictionary returned by the :code:`on_fit_config_fn` in its own " -":code:`client.fit()` function." +"Q: I see “Grafana server not detected. Please make sure the Grafana " +"server is running and refresh this page” after going to the Metrics tab " +"in Ray Dashboard." msgstr "" +"질문: \"Grafana 서버가 감지되지 않았습니다. Ray 대시보드의 메트릭 탭으로 이동한 후 Grafana 서버가 실행 중인지 " +"확인하고 이 페이지를 새로고침하세요.\"라는 메시지가 표시됩니다." -#: ../../source/how-to-use-strategies.rst:78 +#: ../../source/how-to-monitor-simulation.rst:246 msgid "" -"Similar to :code:`on_fit_config_fn`, there is also " -":code:`on_evaluate_config_fn` to customize the configuration sent to " -":code:`client.evaluate()`" -msgstr "" +"A: You probably don't have Grafana running. Please check the running " +"services" +msgstr "A: Grafana가 실행되고 있지 않을 수 있습니다. 실행 중인 서비스를 확인하세요" -#: ../../source/how-to-use-strategies.rst:81 -msgid "Configuring server-side evaluation" -msgstr "" +#: ../../source/how-to-monitor-simulation.rst:252 +#, fuzzy +msgid "" +"Q: I see \"This site can't be reached\" when going to " +"http://127.0.0.1:8265." +msgstr "Q: ``_로 이동할 때 \"이 사이트에 연결할 수 없습니다.\"라는 메시지가 표시됩니다." -#: ../../source/how-to-use-strategies.rst:83 +#: ../../source/how-to-monitor-simulation.rst:254 msgid "" -"Server-side evaluation can be enabled by passing an evaluation function " -"to :code:`evaluate_fn`." -msgstr "" +"A: Either the simulation has already finished, or you still need to start" +" Prometheus." +msgstr "A: 시뮬레이션이 이미 완료되었거나 아직 Prometheus를 시작해야 합니다." -#: ../../source/how-to-use-strategies.rst:89 +#: ../../source/how-to-monitor-simulation.rst:257 +msgid "Resources" +msgstr "리소스" + +#: ../../source/how-to-monitor-simulation.rst:259 +#, fuzzy msgid "" -"Writing a fully custom strategy is a bit more involved, but it provides " -"the most flexibility. Read the `Implementing Strategies `_ guide to learn more." +"Ray Dashboard: https://docs.ray.io/en/latest/ray-observability/getting-" +"started.html" msgstr "" +"Ray 대시보드: ``_" -#: ../../source/index.rst:34 -msgid "Tutorial" -msgstr "" +#: ../../source/how-to-monitor-simulation.rst:261 +#, fuzzy +msgid "Ray Metrics: https://docs.ray.io/en/latest/cluster/metrics.html" +msgstr "Ray 메트릭: ``_" -#: ../../source/index.rst:44 -msgid "Quickstart tutorials" -msgstr "" +#: ../../source/how-to-run-simulations.rst:2 +msgid "Run simulations" +msgstr "시뮬레이션 실행" -#: ../../source/index.rst:74 ../../source/index.rst:78 -msgid "How-to guides" +#: ../../source/how-to-run-simulations.rst:8 +msgid "" +"Simulating Federated Learning workloads is useful for a multitude of use-" +"cases: you might want to run your workload on a large cohort of clients " +"but without having to source, configure and mange a large number of " +"physical devices; you might want to run your FL workloads as fast as " +"possible on the compute systems you have access to without having to go " +"through a complex setup process; you might want to validate your " +"algorithm on different scenarios at varying levels of data and system " +"heterogeneity, client availability, privacy budgets, etc. These are among" +" some of the use-cases where simulating FL workloads makes sense. Flower " +"can accommodate these scenarios by means of its `VirtualClientEngine " +"`_ or " +"VCE." msgstr "" +"Federated 학습 워크로드 시뮬레이션은 다양한 사용 사례에 유용합니다. 대규모 클라이언트 집단에서 워크로드를 실행하되 많은 " +"수의 물리적 장치를 소싱, 구성 및 관리할 필요가 없는 경우, 복잡한 설정 과정을 거치지 않고도 액세스 가능한 컴퓨팅 시스템에서 " +"최대한 빠르게 FL 워크로드를 실행하려는 경우, 다양한 수준의 데이터 및 시스템 이질성, 클라이언트 가용성, 개인정보 예산 등의 " +"다양한 시나리오에서 알고리즘을 검증하려는 경우 등 여러 가지 사용 사례에 유용합니다. 이러한 사례는 FL 워크로드 시뮬레이션이 " +"적합한 사용 사례 중 일부입니다. Flower는 `VirtualClientEngine `_ 또는 VCE를 통해 이러한 시나리오를 수용할 수 " +"있습니다." -#: ../../source/index.rst:99 -msgid "Legacy example guides" +#: ../../source/how-to-run-simulations.rst:19 +#, fuzzy +msgid "" +"The ``VirtualClientEngine`` schedules, launches and manages `virtual` " +"clients. These clients are identical to `non-virtual` clients (i.e. the " +"ones you launch via the command `flwr.client.start_client `_) in the sense that they can be configure by " +"creating a class inheriting, for example, from `flwr.client.NumPyClient " +"`_ and therefore behave in an " +"identical way. In addition to that, clients managed by the " +"``VirtualClientEngine`` are:" msgstr "" +":code:`VirtualClientEngine`은 `virtual` 클라이언트를 예약, 실행 및 관리합니다. 이러한 클라이언트는 " +"`non-virtual` 클라이언트(예: `flwr.client.start_client `_ 명령을 통해 실행하는 클라이언트)와 동일하며, `flwr.client.NumPyClient `_에서 상속하는 클래스 생성으로 구성될 수 있으므로 동일한 " +"방식으로 동작합니다. 그 외에도 :code:`VirtualClientEngine`에 의해 관리되는 클라이언트는 다음과 같습니다:" -#: ../../source/index.rst:108 ../../source/index.rst:112 -msgid "Explanations" +#: ../../source/how-to-run-simulations.rst:26 +msgid "" +"resource-aware: this means that each client gets assigned a portion of " +"the compute and memory on your system. You as a user can control this at " +"the beginning of the simulation and allows you to control the degree of " +"parallelism of your Flower FL simulation. The fewer the resources per " +"client, the more clients can run concurrently on the same hardware." msgstr "" +"resource-aware: 이는 각 클라이언트가 시스템에서 컴퓨팅 및 메모리의 일부를 할당받는다는 것을 의미합니다. 사용자는 " +"시뮬레이션을 시작할 때 이를 제어할 수 있으며, 이를 통해 Flower FL 시뮬레이션의 병렬 처리 정도를 제어할 수 있습니다. " +"클라이언트당 리소스가 적을수록 동일한 하드웨어에서 더 많은 클라이언트를 동시에 실행할 수 있습니다." -#: None:-1 -msgid "API reference" -msgstr "" - -#: ../../source/index.rst:137 -msgid "Reference docs" -msgstr "" - -#: ../../source/index.rst:153 -msgid "Contributor tutorials" +#: ../../source/how-to-run-simulations.rst:31 +#, fuzzy +msgid "" +"self-managed: this means that you as a user do not need to launch clients" +" manually, instead this gets delegated to ``VirtualClientEngine``'s " +"internals." msgstr "" +"self-managed: 이는 사용자가 클라이언트를 수동으로 실행할 필요가 없으며, 대신 " +":code:`VirtualClientEngine`의 내부에 위임된다는 의미입니다." -#: ../../source/index.rst:160 -msgid "Contributor how-to guides" +#: ../../source/how-to-run-simulations.rst:33 +msgid "" +"ephemeral: this means that a client is only materialized when it is " +"required in the FL process (e.g. to do `fit() `_). The object is destroyed afterwards," +" releasing the resources it was assigned and allowing in this way other " +"clients to participate." msgstr "" +"ephemeral: 이는 클라이언트가 FL 프로세스에서 필요할 때만 구체화됨을 의미합니다(예: `fit() `_을 수행하기 위해). 객체는 나중에 소멸되어 할당된 리소스를 해제하고" +" 다른 클라이언트가 참여할 수 있도록 허용합니다." -#: ../../source/index.rst:173 -msgid "Contributor explanations" +#: ../../source/how-to-run-simulations.rst:38 +#, fuzzy +msgid "" +"The ``VirtualClientEngine`` implements `virtual` clients using `Ray " +"`_, an open-source framework for scalable Python " +"workloads. In particular, Flower's ``VirtualClientEngine`` makes use of " +"`Actors `_ to spawn " +"`virtual` clients and run their workload." msgstr "" +":code:`VirtualClientEngine`은 확장 가능한 파이썬 워크로드를 위한 오픈 소스 프레임워크인 `Ray " +"`_를 사용하여 `virtual` 클라이언트를 구현합니다. 특히 Flower의 " +":code:`VirtualClientEngine`은 `Actors `_를 사용하여 `virtual` 클라이언트를 생성하고 해당 워크로드를 실행합니다." -#: ../../source/index.rst:179 -msgid "Contributor references" -msgstr "" +#: ../../source/how-to-run-simulations.rst:45 +msgid "Launch your Flower simulation" +msgstr "Flower 시뮬레이션 시작" -#: ../../source/index.rst:-1 +#: ../../source/how-to-run-simulations.rst:47 msgid "" -"Check out the documentation of the main Flower Framework enabling easy " -"Python development for Federated Learning." +"Running Flower simulations still require you to define your client class," +" a strategy, and utility functions to download and load (and potentially " +"partition) your dataset. With that out of the way, launching your " +"simulation is done with `start_simulation `_ and a minimal example looks" +" as follows:" msgstr "" +"Flower 시뮬레이션을 실행하려면 여전히 클라이언트 클래스, 전략 및 유틸리티 함수를 정의하여 데이터 세트를 다운로드하고 로드(및" +" 파티션)해야 합니다. 이 작업을 마친 후 시뮬레이션을 시작하려면 `start_simulation `_을 사용하면 되며, 최소한의 예시는 다음과 " +"같습니다:" -#: ../../source/index.rst:2 -msgid "Flower Framework Documentation" -msgstr "" +#: ../../source/how-to-run-simulations.rst:73 +msgid "VirtualClientEngine resources" +msgstr "VirtualClientEngine 리소스" -#: ../../source/index.rst:7 +#: ../../source/how-to-run-simulations.rst:75 +#, fuzzy msgid "" -"Welcome to Flower's documentation. `Flower `_ is a " -"friendly federated learning framework." +"By default the VCE has access to all system resources (i.e. all CPUs, all" +" GPUs, etc) since that is also the default behavior when starting Ray. " +"However, in some settings you might want to limit how many of your system" +" resources are used for simulation. You can do this via the " +"``ray_init_args`` input argument to ``start_simulation`` which the VCE " +"internally passes to Ray's ``ray.init`` command. For a complete list of " +"settings you can configure check the `ray.init " +"`_" +" documentation. Do not set ``ray_init_args`` if you want the VCE to use " +"all your system's CPUs and GPUs." msgstr "" +"기본적으로 VCE는 모든 시스템 리소스(예: 모든 CPU, 모든 GPU 등)에 액세스할 수 있으며, 이는 Ray를 시작할 때의 기본" +" 동작이기도 합니다. 그러나 일부 설정에서는 시뮬레이션에 사용되는 시스템 리소스의 수를 제한하고 싶을 수 있습니다. 이 설정은 " +"VCE가 내부적으로 Ray의 :code:`ray.init` 명령에 전달하는 :code:`start_simulation`에 대한 " +":code:`ray_init_args` 입력 인수를 통해 수행할 수 있습니다. 구성할 수 있는 전체 설정 목록은 `ray.init " +"`_" +" 설명서를 확인하세요. VCE가 시스템의 모든 CPU와 GPU를 사용하도록 하려면 :code:`ray_init_args`를 설정하지" +" 마세요." -#: ../../source/index.rst:11 -msgid "Join the Flower Community" -msgstr "" +#: ../../source/how-to-run-simulations.rst:97 +msgid "Assigning client resources" +msgstr "클라이언트 리소스 할당" -#: ../../source/index.rst:13 +#: ../../source/how-to-run-simulations.rst:99 +#, fuzzy msgid "" -"The Flower Community is growing quickly - we're a friendly group of " -"researchers, engineers, students, professionals, academics, and other " -"enthusiasts." +"By default the ``VirtualClientEngine`` assigns a single CPU core (and " +"nothing else) to each virtual client. This means that if your system has " +"10 cores, that many virtual clients can be concurrently running." msgstr "" +"기본적으로 :code:`VirtualClientEngine`은 각 가상 클라이언트에 단일 CPU 코어를 할당합니다(그 외에는 " +"아무것도 할당하지 않음). 즉, 시스템에 코어가 10개인 경우 그만큼 많은 가상 클라이언트를 동시에 실행할 수 있습니다." -#: ../../source/index.rst:15 -msgid "Join us on Slack" +#: ../../source/how-to-run-simulations.rst:103 +msgid "" +"More often than not, you would probably like to adjust the resources your" +" clients get assigned based on the complexity (i.e. compute and memory " +"footprint) of your FL workload. You can do so when starting your " +"simulation by setting the argument `client_resources` to " +"`start_simulation `_." +" Two keys are internally used by Ray to schedule and spawn workloads (in " +"our case Flower clients):" msgstr "" +"대부분의 경우 FL 워크로드의 복잡성(즉, 컴퓨팅 및 메모리 사용량)에 따라 클라이언트에 할당되는 리소스를 조정하고 싶을 것입니다." +" 시뮬레이션을 시작할 때 `client_resources` argument를 `start_simulation `_로 설정하여 이를 수행할 수 있습니다. Ray는 " +"내부적으로 두 개의 키를 사용하여 워크로드(이 경우 Flower 클라이언트)를 스케줄링하고 스폰합니다:" -#: ../../source/index.rst:23 -msgid "Flower Framework" -msgstr "" +#: ../../source/how-to-run-simulations.rst:110 +#, fuzzy +msgid "``num_cpus`` indicates the number of CPU cores a client would get." +msgstr ":code:`num_cpus`는 클라이언트에서 사용할 수 있는 CPU 코어 수를 나타냅니다." -#: ../../source/index.rst:25 -msgid "" -"The user guide is targeted at researchers and developers who want to use " -"Flower to bring existing machine learning workloads into a federated " -"setting. One of Flower's design goals was to make this simple. Read on to" -" learn more." -msgstr "" +#: ../../source/how-to-run-simulations.rst:111 +#, fuzzy +msgid "``num_gpus`` indicates the **ratio** of GPU memory a client gets assigned." +msgstr ":code:`num_gpus`는 클라이언트에 할당되는 GPU 메모리의 **비율**을 나타냅니다." -#: ../../source/index.rst:30 -msgid "Tutorials" -msgstr "" +#: ../../source/how-to-run-simulations.rst:113 +msgid "Let's see a few examples:" +msgstr "몇 가지 예를 살펴보겠습니다:" -#: ../../source/index.rst:32 +#: ../../source/how-to-run-simulations.rst:132 +#, fuzzy msgid "" -"A learning-oriented series of federated learning tutorials, the best " -"place to start." +"While the ``client_resources`` can be used to control the degree of " +"concurrency in your FL simulation, this does not stop you from running " +"dozens, hundreds or even thousands of clients in the same round and " +"having orders of magnitude more `dormant` (i.e. not participating in a " +"round) clients. Let's say you want to have 100 clients per round but your" +" system can only accommodate 8 clients concurrently. The " +"``VirtualClientEngine`` will schedule 100 jobs to run (each simulating a " +"client sampled by the strategy) and then will execute them in a resource-" +"aware manner in batches of 8." msgstr "" +"code:`client_resources`를 사용하여 FL 시뮬레이션의 동시성 정도를 제어할 수 있지만, 동일한 라운드에서 수십, " +"수백 또는 수천 개의 클라이언트를 실행하고 훨씬 더 많은 '휴면'(즉, 라운드에 참여하지 않는) 클라이언트를 보유하는 것을 막을 " +"수는 없습니다. 라운드당 100명의 클라이언트를 받고 싶지만 시스템이 동시에 8명의 클라이언트만 수용할 수 있다고 가정해 봅시다. " +"code:`VirtualClientEngine`은 실행할 100개의 작업(각각 전략에서 샘플링한 클라이언트를 시뮬레이션)을 예약한 " +"다음 리소스 인식 방식으로 8개씩 일괄적으로 실행합니다." -#: ../../source/index.rst:61 +#: ../../source/how-to-run-simulations.rst:140 msgid "" -"QUICKSTART TUTORIALS: :doc:`PyTorch ` | " -":doc:`TensorFlow ` | :doc:`🤗 Transformers" -" ` | :doc:`JAX ` | :doc:`Pandas ` | :doc:`fastai " -"` | :doc:`PyTorch Lightning ` | :doc:`scikit-learn ` | :doc:`XGBoost ` | " -":doc:`Android ` | :doc:`iOS `" -msgstr "" - -#: ../../source/index.rst:63 -msgid "We also made video tutorials for PyTorch:" +"To understand all the intricate details on how resources are used to " +"schedule FL clients and how to define custom resources, please take a " +"look at the `Ray documentation `_." msgstr "" +"리소스가 FL 클라이언트를 예약하는 데 사용되는 방법과 사용자 지정 리소스를 정의하는 방법에 대한 모든 복잡한 세부 사항을 " +"이해하려면 'Ray 문서 '를 참조하세요." -#: ../../source/index.rst:68 -msgid "And TensorFlow:" -msgstr "" +#: ../../source/how-to-run-simulations.rst:145 +msgid "Simulation examples" +msgstr "시뮬레이션 예제" -#: ../../source/index.rst:76 +#: ../../source/how-to-run-simulations.rst:147 msgid "" -"Problem-oriented how-to guides show step-by-step how to achieve a " -"specific goal." +"A few ready-to-run complete examples for Flower simulation in " +"Tensorflow/Keras and PyTorch are provided in the `Flower repository " +"`_. You can run them on Google Colab too:" msgstr "" +"Tensorflow/Keras와 파이토치에서 바로 실행할 수 있는 몇 가지 Flower 시뮬레이션 예제는 `Flower 레포지토리 " +"`_에서 제공됩니다. Google Colab에서도 실행할 수 있습니다:" -#: ../../source/index.rst:110 +#: ../../source/how-to-run-simulations.rst:151 msgid "" -"Understanding-oriented concept guides explain and discuss key topics and " -"underlying ideas behind Flower and collaborative AI." +"`Tensorflow/Keras Simulation " +"`_: 100 clients collaboratively train a MLP model on MNIST." msgstr "" +"`Tensorflow/Keras 시뮬레이션 " +"`_: 100개의 클라이언트가 공동으로 MNIST에서 MLP 모델을 훈련합니다." -#: ../../source/index.rst:120 -msgid "References" +#: ../../source/how-to-run-simulations.rst:154 +msgid "" +"`PyTorch Simulation `_: 100 clients collaboratively train a CNN model on " +"MNIST." msgstr "" +"파이토치 시뮬레이션 `_: 100개의 클라이언트가 공동으로 MNIST에서 CNN 모델을 훈련합니다." -#: ../../source/index.rst:122 -msgid "Information-oriented API reference and other reference material." -msgstr "" +#: ../../source/how-to-run-simulations.rst:159 +msgid "Multi-node Flower simulations" +msgstr "멀티 노드 Flower 시뮬레이션" -#: ../../source/index.rst:131::1 -msgid ":py:obj:`flwr `\\" +#: ../../source/how-to-run-simulations.rst:161 +#, fuzzy +msgid "" +"Flower's ``VirtualClientEngine`` allows you to run FL simulations across " +"multiple compute nodes. Before starting your multi-node simulation ensure" +" that you:" msgstr "" +"Flower의 :code:`VirtualClientEngine`을 사용하면 여러 컴퓨팅 노드에서 FL 시뮬레이션을 실행할 수 " +"있습니다. 멀티 노드 시뮬레이션을 시작하기 전에 다음 사항을 확인하세요:" -#: ../../source/index.rst:131::1 flwr:1 of -msgid "Flower main package." -msgstr "" +#: ../../source/how-to-run-simulations.rst:164 +msgid "Have the same Python environment in all nodes." +msgstr "모든 노드에서 동일한 Python 환경을 유지합니다." -#: ../../source/index.rst:148 -msgid "Contributor docs" -msgstr "" +#: ../../source/how-to-run-simulations.rst:165 +msgid "Have a copy of your code (e.g. your entire repo) in all nodes." +msgstr "모든 노드에 코드 사본(예: 전체 레포지토리)을 보관하세요." -#: ../../source/index.rst:150 +#: ../../source/how-to-run-simulations.rst:166 msgid "" -"The Flower community welcomes contributions. The following docs are " -"intended to help along the way." +"Have a copy of your dataset in all nodes (more about this in " +":ref:`simulation considerations `)" msgstr "" +"모든 노드에 데이터 세트의 사본을 보유하세요(자세한 내용은 :ref:`simulation considerations " +"`에서 확인하세요)" -#: ../../source/ref-api-cli.rst:2 -msgid "Flower CLI reference" +#: ../../source/how-to-run-simulations.rst:168 +#, fuzzy +msgid "" +"Pass ``ray_init_args={\"address\"=\"auto\"}`` to `start_simulation `_ so the " +"``VirtualClientEngine`` attaches to a running Ray instance." msgstr "" +":code:`ray_init_args={\"address\"=\"auto\"}`를 `start_simulation `_에 전달하여 " +":code:`VirtualClientEngine`이 실행 중인 Ray 인스턴스에 연결되도록 합니다." -#: ../../source/ref-api-cli.rst:7 -msgid "flower-simulation" +#: ../../source/how-to-run-simulations.rst:171 +#, fuzzy +msgid "" +"Start Ray on you head node: on the terminal type ``ray start --head``. " +"This command will print a few lines, one of which indicates how to attach" +" other nodes to the head node." msgstr "" +"헤드 노드에서 Ray 시작: 터미널에서 :code:`ray start --head`를 입력합니다. 이 명령은 몇 줄을 출력하며, 그" +" 중 하나는 다른 노드를 헤드 노드에 연결하는 방법을 나타냅니다." -#: ../../source/ref-api-cli.rst:17 -msgid "flower-superlink" +#: ../../source/how-to-run-simulations.rst:174 +#, fuzzy +msgid "" +"Attach other nodes to the head node: copy the command shown after " +"starting the head and execute it on terminal of a new node: for example " +"``ray start --address='192.168.1.132:6379'``" msgstr "" +"헤드 노드에 다른 노드 연결: 헤드를 시작한 후 표시된 명령어을 복사하여 새 노드의 터미널에서 실행합니다: 예: :code:`ray" +" start --address='192.168.1.132:6379'`" -#: ../../source/ref-api-cli.rst:27 -msgid "flower-client-app" -msgstr "" +#: ../../source/how-to-run-simulations.rst:178 +msgid "" +"With all the above done, you can run your code from the head node as you " +"would if the simulation was running on a single node." +msgstr "위의 모든 작업이 완료되면 단일 노드에서 시뮬레이션을 실행할 때와 마찬가지로 헤드 노드에서 코드를 실행할 수 있습니다." -#: ../../source/ref-api-cli.rst:37 -msgid "flower-server-app" +#: ../../source/how-to-run-simulations.rst:181 +#, fuzzy +msgid "" +"Once your simulation is finished, if you'd like to dismantle your cluster" +" you simply need to run the command ``ray stop`` in each node's terminal " +"(including the head node)." msgstr "" +"시뮬레이션이 완료되면 클러스터를 해체하려면 각 노드(헤드 노드 포함)의 터미널에서 :code:`ray stop` 명령을 실행하기만 " +"하면 됩니다." -#: ../../source/ref-api/flwr.rst:2 -msgid "flwr" -msgstr "" +#: ../../source/how-to-run-simulations.rst:185 +msgid "Multi-node simulation good-to-know" +msgstr "멀티 노드 시뮬레이션에 대해 알아두면 좋은 사항" -#: ../../source/ref-api/flwr.rst:25 ../../source/ref-api/flwr.server.rst:51 -msgid "Modules" -msgstr "" +#: ../../source/how-to-run-simulations.rst:187 +msgid "" +"Here we list a few interesting functionality when running multi-node FL " +"simulations:" +msgstr "여기에서는 멀티 노드 FL 시뮬레이션을 실행할 때 흥미로운 몇 가지 기능을 나열합니다:" -#: ../../source/ref-api/flwr.rst:35::1 -msgid ":py:obj:`flwr.client `\\" +#: ../../source/how-to-run-simulations.rst:189 +#, fuzzy +msgid "" +"User ``ray status`` to check all nodes connected to your head node as " +"well as the total resources available to the ``VirtualClientEngine``." msgstr "" +"사용자는 :code:`ray status`를 통해 헤드 노드에 연결된 모든 노드와 " +":code:`VirtualClientEngine`에 사용 가능한 총 리소스를 확인할 수 있습니다." -#: ../../source/ref-api/flwr.rst:35::1 flwr.client:1 of -msgid "Flower client." -msgstr "" +#: ../../source/how-to-run-simulations.rst:192 +#, fuzzy +msgid "" +"When attaching a new node to the head, all its resources (i.e. all CPUs, " +"all GPUs) will be visible by the head node. This means that the " +"``VirtualClientEngine`` can schedule as many `virtual` clients as that " +"node can possible run. In some settings you might want to exclude certain" +" resources from the simulation. You can do this by appending `--num-" +"cpus=` and/or `--num-gpus=` in " +"any ``ray start`` command (including when starting the head)" +msgstr "" +"새 노드를 헤드에 연결하면 해당 노드의 모든 리소스(즉, 모든 CPU, 모든 GPU)가 헤드 노드에 표시됩니다. 즉, " +":code:`VirtualClientEngine`은 해당 노드가 실행할 수 있는 만큼의 `가상` 클라이언트를 예약할 수 있습니다. " +"일부 설정에서는 시뮬레이션에서 특정 리소스를 제외하고 싶을 수 있습니다. 모든 :code:`ray start` 명령(헤드 시작 시 " +"포함)에 `--num-cpus=` 및/또는 `--num-" +"gpus=`를 추가하여 이 작업을 수행하면 됩니다" + +#: ../../source/how-to-run-simulations.rst:202 +msgid "Considerations for simulations" +msgstr "시뮬레이션 시 고려 사항" -#: ../../source/ref-api/flwr.rst:35::1 -msgid ":py:obj:`flwr.common `\\" -msgstr "" +#: ../../source/how-to-run-simulations.rst:206 +msgid "" +"We are actively working on these fronts so to make it trivial to run any " +"FL workload with Flower simulation." +msgstr "Flower 시뮬레이션으로 모든 FL 워크로드를 간편하게 실행할 수 있도록 이러한 측면에서 적극적으로 노력하고 있습니다." -#: ../../source/ref-api/flwr.rst:35::1 flwr.common:1 of -msgid "Common components shared between server and client." +#: ../../source/how-to-run-simulations.rst:209 +msgid "" +"The current VCE allows you to run Federated Learning workloads in " +"simulation mode whether you are prototyping simple scenarios on your " +"personal laptop or you want to train a complex FL pipeline across " +"multiple high-performance GPU nodes. While we add more capabilities to " +"the VCE, the points below highlight some of the considerations to keep in" +" mind when designing your FL pipeline with Flower. We also highlight a " +"couple of current limitations in our implementation." msgstr "" +"현재 VCE를 사용하면 개인 노트북에서 간단한 시나리오를 프로토타이핑하든, 여러 고성능 GPU 노드에서 복잡한 FL 파이프라인을 " +"훈련하든 상관없이 시뮬레이션 모드에서 Federated 학습 워크로드를 실행할 수 있습니다. VCE에 더 많은 기능을 추가하는 " +"동안, 아래에서는 Flower로 FL 파이프라인을 설계할 때 염두에 두어야 할 몇 가지 사항을 강조합니다. 또한 현재 구현에서 몇 " +"가지 제한 사항을 강조합니다." -#: ../../source/ref-api/flwr.rst:35::1 -msgid ":py:obj:`flwr.server `\\" -msgstr "" +#: ../../source/how-to-run-simulations.rst:217 +msgid "GPU resources" +msgstr "GPU 리소스" -#: ../../source/ref-api/flwr.rst:35::1 -#: ../../source/ref-api/flwr.server.rst:40::1 flwr.server:1 -#: flwr.server.server.Server:1 of -msgid "Flower server." +#: ../../source/how-to-run-simulations.rst:219 +#, fuzzy +msgid "" +"The VCE assigns a share of GPU memory to a client that specifies the key " +"``num_gpus`` in ``client_resources``. This being said, Ray (used " +"internally by the VCE) is by default:" msgstr "" +"VCE는 :code:`client_resources`에서 :code:`num_gpus` 키를 지정하는 클라이언트에 GPU 메모리 " +"공유를 할당합니다. 즉, (VCE에서 내부적으로 사용하는) Ray가 기본적으로 사용됩니다:" -#: ../../source/ref-api/flwr.rst:35::1 -msgid ":py:obj:`flwr.simulation `\\" +#: ../../source/how-to-run-simulations.rst:222 +#, fuzzy +msgid "" +"not aware of the total VRAM available on the GPUs. This means that if you" +" set ``num_gpus=0.5`` and you have two GPUs in your system with different" +" (e.g. 32GB and 8GB) VRAM amounts, they both would run 2 clients " +"concurrently." msgstr "" +"GPU에서 사용 가능한 총 VRAM을 인식하지 못합니다. 즉, 시스템에 서로 다른(예: 32GB와 8GB) VRAM 용량을 가진 두" +" 개의 GPU가 있고 :code:`num_gpus=0.5`를 설정하면 둘 다 동시에 2개의 클라이언트를 실행하게 됩니다." -#: ../../source/ref-api/flwr.rst:35::1 flwr.simulation:1 of -msgid "Flower simulation." +#: ../../source/how-to-run-simulations.rst:225 +msgid "" +"not aware of other unrelated (i.e. not created by the VCE) workloads are " +"running on the GPU. Two takeaways from this are:" msgstr "" +"관련 없는(즉, VCE에 의해 생성되지 않은) 다른 워크로드가 GPU에서 실행되고 있는지 알지 못합니다. 여기서 두 가지 시사점을 " +"얻을 수 있습니다:" -#: ../../source/ref-api/flwr.client.rst:2 -msgid "client" +#: ../../source/how-to-run-simulations.rst:228 +msgid "" +"Your Flower server might need a GPU to evaluate the `global model` after " +"aggregation (by instance when making use of the `evaluate method `_)" msgstr "" +"집계 후 '글로벌 모델'을 평가하려면 Flower 서버에 GPU가 필요할 수 있습니다(예: `evaluate method `_를 사용할 때)" -#: ../../source/ref-api/flwr.client.rst:13 -#: ../../source/ref-api/flwr.common.rst:13 -#: ../../source/ref-api/flwr.server.rst:13 -#: ../../source/ref-api/flwr.simulation.rst:13 -msgid "Functions" +#: ../../source/how-to-run-simulations.rst:231 +#, fuzzy +msgid "" +"If you want to run several independent Flower simulations on the same " +"machine you need to mask-out your GPUs with " +"``CUDA_VISIBLE_DEVICES=\"\"`` when launching your experiment." msgstr "" +"동일한 머신에서 여러 개의 독립적인 Flower 시뮬레이션을 실행하려면, 실험을 시작할 때 " +":code:`CUDA_VISIBLE_DEVICES=\"\"`로 GPU를 마스킹해야 합니다." -#: ../../source/ref-api/flwr.client.rst:25::1 -msgid ":py:obj:`run_client_app `\\ \\(\\)" +#: ../../source/how-to-run-simulations.rst:235 +#, fuzzy +msgid "" +"In addition, the GPU resource limits passed to ``client_resources`` are " +"not `enforced` (i.e. they can be exceeded) which can result in the " +"situation of client using more VRAM than the ratio specified when " +"starting the simulation." msgstr "" +"또한 :code:`client_resources`에 전달된 GPU 리소스 제한이 '강제'되지 않아(즉, 초과할 수 있음) " +"클라이언트가 시뮬레이션을 시작할 때 지정된 비율보다 더 많은 VRAM을 사용하는 상황이 발생할 수 있습니다." -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.supernode.app.run_client_app:1 of -msgid "Run Flower client app." -msgstr "" +#: ../../source/how-to-run-simulations.rst:240 +msgid "TensorFlow with GPUs" +msgstr "GPU를 사용한 TensorFlow" -#: ../../source/ref-api/flwr.client.rst:25::1 -msgid ":py:obj:`run_supernode `\\ \\(\\)" +#: ../../source/how-to-run-simulations.rst:242 +msgid "" +"When `using a GPU with TensorFlow " +"`_ nearly your entire GPU memory of" +" all your GPUs visible to the process will be mapped. This is done by " +"TensorFlow for optimization purposes. However, in settings such as FL " +"simulations where we want to split the GPU into multiple `virtual` " +"clients, this is not a desirable mechanism. Luckily we can disable this " +"default behavior by `enabling memory growth " +"`_." msgstr "" +"`TensorFlow와 함께 GPU를 사용 `_하면 프로세스에 " +"보이는 모든 GPU의 거의 전체 GPU 메모리가 매핑됩니다. 이는 최적화 목적으로 TensorFlow에서 수행됩니다. 그러나 " +"GPU를 여러 개의 '가상' 클라이언트로 분할하려는 FL 시뮬레이션과 같은 설정에서는 이는 바람직한 메커니즘이 아닙니다. 다행히도 " +"'메모리 증가 활성화 " +"`_'를 통해 " +"이 기본 동작을 비활성화할 수 있습니다." -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.supernode.app.run_supernode:1 of -msgid "Run Flower SuperNode." +#: ../../source/how-to-run-simulations.rst:249 +#, fuzzy +msgid "" +"This would need to be done in the main process (which is where the server" +" would run) and in each Actor created by the VCE. By means of " +"``actor_kwargs`` we can pass the reserved key `\"on_actor_init_fn\"` in " +"order to specify a function to be executed upon actor initialization. In " +"this case, to enable GPU growth for TF workloads. It would look as " +"follows:" msgstr "" +"이 작업은 메인 프로세스(서버가 실행되는 곳)와 VCE에서 생성한 각 액터에서 수행해야 합니다. " +":code:`actor_kwargs`를 통해 예약 키 `\"on_actor_init_fn\"`을 전달하여 액터 초기화 시 실행할 " +"함수를 지정할 수 있습니다. 이 경우 TF 워크로드에 대한 GPU 증가를 활성화합니다. 다음과 같이 보입니다:" -#: ../../source/ref-api/flwr.client.rst:25::1 +#: ../../source/how-to-run-simulations.rst:272 msgid "" -":py:obj:`start_client `\\ \\(\\*\\, " -"server\\_address\\[\\, client\\_fn\\, ...\\]\\)" +"This is precisely the mechanism used in `Tensorflow/Keras Simulation " +"`_ example." msgstr "" +"이것이 바로`Tensorflow/Keras Simulation " +"`_ 예제에서 사용된 메커니즘입니다." -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.app.start_client:1 of -msgid "Start a Flower client node which connects to a Flower server." -msgstr "" +#: ../../source/how-to-run-simulations.rst:276 +msgid "Multi-node setups" +msgstr "멀티 노드 설정" -#: ../../source/ref-api/flwr.client.rst:25::1 +#: ../../source/how-to-run-simulations.rst:278 msgid "" -":py:obj:`start_numpy_client `\\ \\(\\*\\," -" server\\_address\\, client\\)" +"The VCE does not currently offer a way to control on which node a " +"particular `virtual` client is executed. In other words, if more than a " +"single node have the resources needed by a client to run, then any of " +"those nodes could get the client workload scheduled onto. Later in the FL" +" process (i.e. in a different round) the same client could be executed by" +" a different node. Depending on how your clients access their datasets, " +"this might require either having a copy of all dataset partitions on all " +"nodes or a dataset serving mechanism (e.g. using nfs, a database) to " +"circumvent data duplication." msgstr "" +"VCE는 현재 특정 '가상' 클라이언트를 어느 노드에서 실행할지 제어하는 방법을 제공하지 않습니다. 즉, 클라이언트가 실행하는 데 " +"필요한 리소스가 하나 이상의 노드에 있는 경우 해당 노드 중 어느 노드에나 클라이언트 워크로드가 예약될 수 있습니다. FL 프로세스" +" 후반부(즉, 다른 라운드에서)에는 동일한 클라이언트가 다른 노드에서 실행될 수 있습니다. 클라이언트가 데이터 세트에 액세스하는 " +"방식에 따라 모든 노드에 모든 데이터 세트 파티션의 복사본을 보유하거나 데이터 중복을 피하기 위해 데이터 세트 제공 메커니즘(예: " +"nfs, 데이터베이스 사용)을 사용해야 할 수 있습니다." -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.app.start_numpy_client:1 of -msgid "Start a Flower NumPyClient which connects to a gRPC server." +#: ../../source/how-to-run-simulations.rst:286 +msgid "" +"By definition virtual clients are `stateless` due to their ephemeral " +"nature. A client state can be implemented as part of the Flower client " +"class but users need to ensure this saved to persistent storage (e.g. a " +"database, disk) and that can be retrieve later by the same client " +"regardless on which node it is running from. This is related to the point" +" above also since, in some way, the client's dataset could be seen as a " +"type of `state`." msgstr "" +"정의상 가상 클라이언트는 임시적 특성으로 인해 '상태 없음'입니다. 클라이언트 상태는 Flower 클라이언트 클래스의 일부로 구현할" +" 수 있지만, 사용자는 이를 영구 저장소(예: 데이터베이스, 디스크)에 저장하여 나중에 실행 중인 노드와 관계없이 동일한 " +"클라이언트가 검색할 수 있도록 해야 합니다. 이는 어떤 식으로든 클라이언트의 데이터 세트가 일종의 '상태'로 볼 수 있기 때문에 " +"위의 요점과도 관련이 있습니다." -#: ../../source/ref-api/flwr.client.rst:27 -#: ../../source/ref-api/flwr.common.rst:32 -#: ../../source/ref-api/flwr.server.rst:28 -#: ../../source/ref-api/flwr.server.strategy.rst:17 -#: ../../source/ref-api/flwr.server.workflow.rst:17 -msgid "Classes" -msgstr "" +#: ../../source/how-to-save-and-load-model-checkpoints.rst:2 +msgid "Save and load model checkpoints" +msgstr "모델 체크포인트 저장 및 로드" -#: ../../source/ref-api/flwr.client.rst:34::1 -msgid ":py:obj:`Client `\\ \\(\\)" +#: ../../source/how-to-save-and-load-model-checkpoints.rst:4 +msgid "" +"Flower does not automatically save model updates on the server-side. This" +" how-to guide describes the steps to save (and load) model checkpoints in" +" Flower." msgstr "" +"Flower는 서버 측에서 모델 업데이트를 자동으로 저장하지 않습니다. 이 사용법 가이드에서는 Flower에서 모델 체크포인트를 " +"저장(및 로드)하는 단계에 대해 설명합니다." -#: ../../source/ref-api/flwr.client.rst:34::1 -#: flwr.client.client.Client:1 of -msgid "Abstract base class for Flower clients." -msgstr "" +#: ../../source/how-to-save-and-load-model-checkpoints.rst:8 +msgid "Model checkpointing" +msgstr "모델 체크포인트" -#: ../../source/ref-api/flwr.client.rst:34::1 +#: ../../source/how-to-save-and-load-model-checkpoints.rst:10 +#, fuzzy msgid "" -":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, " -"mods\\]\\)" -msgstr "" +"Model updates can be persisted on the server-side by customizing " +"``Strategy`` methods. Implementing custom strategies is always an option," +" but for many cases it may be more convenient to simply customize an " +"existing strategy. The following code example defines a new " +"``SaveModelStrategy`` which customized the existing built-in ``FedAvg`` " +"strategy. In particular, it customizes ``aggregate_fit`` by calling " +"``aggregate_fit`` in the base class (``FedAvg``). It then continues to " +"save returned (aggregated) weights before it returns those aggregated " +"weights to the caller (i.e., the server):" +msgstr "" +":code:`Strategy` 메소드를 사용자 지정하여 서버 측에서 모델 업데이트를 지속할 수 있습니다. 사용자 지정 전략을 " +"구현하는 것은 항상 옵션이지만 대부분의 경우 기존 전략을 간단히 사용자 지정하는 것이 더 편리할 수 있습니다. 다음 코드 예시는 " +"기존의 기본 제공 :code:`FedAvg` 전략을 사용자 지정한 새로운 :code:`SaveModelStrategy`를 " +"정의합니다. 특히, 기본 클래스(:code:`FedAvg`)에서 :code:`aggregate_fit`을 호출하여 " +":code:`aggregate_fit`을 사용자 지정합니다. 그런 다음 호출자(즉, 서버)에게 집계된 가중치를 반환하기 전에 " +"반환된(집계된) 가중치를 계속 저장합니다:" + +#: ../../source/how-to-save-and-load-model-checkpoints.rst:53 +msgid "Save and load PyTorch checkpoints" +msgstr "파이토치 체크포인트 저장 및 로드" -#: ../../source/ref-api/flwr.client.rst:34::1 -#: flwr.client.client_app.ClientApp:1 of -msgid "Flower ClientApp." +#: ../../source/how-to-save-and-load-model-checkpoints.rst:55 +msgid "" +"Similar to the previous example but with a few extra steps, we'll show " +"how to store a PyTorch checkpoint we'll use the ``torch.save`` function. " +"Firstly, ``aggregate_fit`` returns a ``Parameters`` object that has to be" +" transformed into a list of NumPy ``ndarray``'s, then those are " +"transformed into the PyTorch ``state_dict`` following the ``OrderedDict``" +" class structure." msgstr "" +"이전 예제와 비슷하지만 몇 가지 단계가 추가되어 ``torch.save`` 함수를 사용하여 파이토치 체크포인트를 저장하는 방법을 " +"보여드리겠습니다. 먼저, ``aggregate_fit``은 ``Parameters`` 객체를 반환하는데, 이 객체는 NumPy " +"``ndarray``의 목록으로 변환되어야 하며, ``OrderedDict`` 클래스 구조에 따라 파이토치 " +"``state_dict``로 변환됩니다." -#: ../../source/ref-api/flwr.client.rst:34::1 -msgid ":py:obj:`NumPyClient `\\ \\(\\)" +#: ../../source/how-to-save-and-load-model-checkpoints.rst:98 +msgid "" +"To load your progress, you simply append the following lines to your " +"code. Note that this will iterate over all saved checkpoints and load the" +" latest one:" msgstr "" +"진행 상황을 로드하려면 코드에 다음 줄을 추가하기만 하면 됩니다. 이렇게 하면 저장된 모든 체크포인트를 반복하고 최신 체크포인트를 " +"로드합니다:" -#: ../../source/ref-api/flwr.client.rst:34::1 -#: flwr.client.numpy_client.NumPyClient:1 of -msgid "Abstract base class for Flower clients using NumPy." -msgstr "" +#: ../../source/how-to-save-and-load-model-checkpoints.rst:111 +msgid "" +"Return/use this object of type ``Parameters`` wherever necessary, such as" +" in the ``initial_parameters`` when defining a ``Strategy``." +msgstr "``전략``을 정의할 때 ``초기_파라미터``와 같이 필요한 경우 ``파라미터`` 유형의 이 객체를 반환/사용합니다." -#: flwr.client.client.Client:1 flwr.client.numpy_client.NumPyClient:1 -#: flwr.server.client_manager.ClientManager:1 -#: flwr.server.driver.driver.Driver:1 flwr.server.strategy.strategy.Strategy:1 -#: of -msgid "Bases: :py:class:`~abc.ABC`" -msgstr "" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:2 +msgid "Upgrade to Flower 1.0" +msgstr "Flower 1.0으로 업그레이드" -#: ../../source/ref-api/flwr.client.Client.rst:15 -#: ../../source/ref-api/flwr.client.ClientApp.rst:15 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:15 -#: ../../source/ref-api/flwr.common.Array.rst:15 -#: ../../source/ref-api/flwr.common.ClientMessage.rst:15 -#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:15 -#: ../../source/ref-api/flwr.common.Context.rst:15 -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:15 -#: ../../source/ref-api/flwr.common.Error.rst:15 -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:15 -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:15 -#: ../../source/ref-api/flwr.common.EventType.rst:15 -#: ../../source/ref-api/flwr.common.FitIns.rst:15 -#: ../../source/ref-api/flwr.common.FitRes.rst:15 -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:15 -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:15 -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:15 -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:15 -#: ../../source/ref-api/flwr.common.Message.rst:15 -#: ../../source/ref-api/flwr.common.MessageType.rst:15 -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:15 -#: ../../source/ref-api/flwr.common.Metadata.rst:15 -#: ../../source/ref-api/flwr.common.MetricsRecord.rst:15 -#: ../../source/ref-api/flwr.common.Parameters.rst:15 -#: ../../source/ref-api/flwr.common.ParametersRecord.rst:15 -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:15 -#: ../../source/ref-api/flwr.common.RecordSet.rst:15 -#: ../../source/ref-api/flwr.common.ServerMessage.rst:15 -#: ../../source/ref-api/flwr.common.Status.rst:15 -#: ../../source/ref-api/flwr.server.ClientManager.rst:15 -#: ../../source/ref-api/flwr.server.Driver.rst:15 -#: ../../source/ref-api/flwr.server.History.rst:15 -#: ../../source/ref-api/flwr.server.LegacyContext.rst:15 -#: ../../source/ref-api/flwr.server.Server.rst:15 -#: ../../source/ref-api/flwr.server.ServerApp.rst:15 -#: ../../source/ref-api/flwr.server.ServerConfig.rst:15 -#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:15 -#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:15 -#: ../../source/ref-api/flwr.server.strategy.Krum.rst:15 -#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:15 -#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:15 -#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:15 -#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:15 -msgid "Methods" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:4 +msgid "" +"Flower 1.0 is here. Along with new features, Flower 1.0 provides a stable" +" foundation for future growth. Compared to Flower 0.19 (and other 0.x " +"series releases), there are a few breaking changes that make it necessary" +" to change the code of existing 0.x-series projects." msgstr "" +"Flower 1.0이 출시되었습니다. 새로운 기능과 함께 Flower 1.0은 향후 성장을 위한 안정적인 기반을 제공합니다. " +"Flower 0.19(및 다른 0.x 시리즈 릴리스)와 비교했을 때 기존 0.x 시리즈 프로젝트의 코드를 변경해야 하는 몇 가지 " +"획기적인 변경 사항이 있습니다." -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`evaluate `\\ \\(ins\\)" -msgstr "" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:10 +#: ../../source/how-to-upgrade-to-flower-next.rst:63 +msgid "Install update" +msgstr "업데이트 설치" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.evaluate:1 -#: flwr.client.numpy_client.NumPyClient.evaluate:1 of -msgid "Evaluate the provided parameters using the locally held dataset." -msgstr "" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:12 +msgid "" +"Here's how to update an existing installation to Flower 1.0 using either " +"pip or Poetry:" +msgstr "다음은 pip 또는 Poetry를 사용하여 기존 설치를 Flower 1.0으로 업데이트하는 방법입니다:" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`fit `\\ \\(ins\\)" -msgstr "" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:14 +msgid "pip: add ``-U`` when installing." +msgstr "pip: 설치할 때 ``-U``를 추가합니다." -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: flwr.client.client.Client.fit:1 of -msgid "Refine the provided parameters using the locally held dataset." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:16 +msgid "" +"``python -m pip install -U flwr`` (when using ``start_server`` and " +"``start_client``)" msgstr "" +"``python -m pip install -U flwr``(``start_server`` 및 ``start_client``를 " +"사용하는 경우)" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`get_context `\\ \\(\\)" -msgstr "" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:17 +msgid "" +"``python -m pip install -U 'flwr[simulation]'`` (when using " +"``start_simulation``)" +msgstr "``python -m pip install -U 'flwr[simulation]'``(``start_simulation`` 사용 시)" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.get_context:1 -#: flwr.client.numpy_client.NumPyClient.get_context:1 of -msgid "Get the run context from this client." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:19 +msgid "" +"Poetry: update the ``flwr`` dependency in ``pyproject.toml`` and then " +"reinstall (don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` " +"before running ``poetry install``)." msgstr "" +"Poetry: ``pyproject.toml``에서 ``flwr`` dependency을 업데이트한 다음 다시 " +"설치하세요(``poetry 설치``를 실행하기 전에 ``rm poetry.lock``을 통해 ``poetry.lock``을 삭제하는" +" 것을 잊지 마세요)." -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`get_parameters `\\ \\(ins\\)" -msgstr "" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:23 +msgid "``flwr = \"^1.0.0\"`` (when using ``start_server`` and ``start_client``)" +msgstr "``flwr = \"^1.0.0\"``(``start_server`` 및 ``start_client`` 사용 시)" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.get_parameters:1 -#: flwr.client.numpy_client.NumPyClient.get_parameters:1 of -msgid "Return the current local model parameters." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:24 +msgid "" +"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` (when " +"using ``start_simulation``)" msgstr "" +"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` " +"(``start_simulation`` 사용 시)" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`get_properties `\\ \\(ins\\)" -msgstr "" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:28 +#: ../../source/how-to-upgrade-to-flower-next.rst:121 +msgid "Required changes" +msgstr "필수 변경 사항" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: flwr.client.client.Client.get_properties:1 of -msgid "Return set of client's properties." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:30 +msgid "The following breaking changes require manual updates." +msgstr "다음과 같은 주요 변경 사항에는 수동 업데이트가 필요합니다." + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:33 +msgid "General" +msgstr "일반" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:35 +msgid "" +"Pass all arguments as keyword arguments (not as positional arguments). " +"Here's an example:" +msgstr "모든 전달인자를 위치 전달인자가 아닌 키워드 전달인자로 전달합니다. 다음은 예시입니다:" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:38 +msgid "" +"Flower 0.19 (positional arguments): ``start_client(\"127.0.0.1:8080\", " +"FlowerClient())``" msgstr "" +"Flower 0.19 (위치 전달인자): ``start_client(\"127.0.0.1:8080\", " +"FlowerClient())``" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`set_context `\\ \\(context\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:39 +msgid "" +"Flower 1.0 (keyword arguments): " +"``start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())``" msgstr "" +"Flower 1.0 (키워드 전달인자): ``start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())``" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.set_context:1 -#: flwr.client.numpy_client.NumPyClient.set_context:1 of -msgid "Apply a run context to this client." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:43 +#: ../../source/ref-api/flwr.client.Client.rst:2 +msgid "Client" +msgstr "클라이언트" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:45 +msgid "" +"Subclasses of ``NumPyClient``: change ``def get_parameters(self):``` to " +"``def get_parameters(self, config):``" msgstr "" +"``NumPyClient``의 서브클래스: ``def get_parameters(self):``를 ``def " +"get_parameters(self, config):``로 변경합니다" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`to_client `\\ \\(\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:47 +msgid "" +"Subclasses of ``Client``: change ``def get_parameters(self):``` to ``def " +"get_parameters(self, ins: GetParametersIns):``" msgstr "" +"``클라이언트``의 서브클래스: ``def get_parameters(self):``를 ``def " +"get_parameters(self, ins: GetParametersIns):``로 변경합니다" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: flwr.client.client.Client.to_client:1 of -msgid "Return client (itself)." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:51 +msgid "Strategies / ``start_server`` / ``start_simulation``" +msgstr "전략 / ``start_server`` / ``start_simulation``" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:53 +msgid "" +"Pass ``ServerConfig`` (instead of a dictionary) to ``start_server`` and " +"``start_simulation``. Here's an example:" msgstr "" +"Dictionary 대신 ``ServerConfig``를 ``start_server`` 및 ``start_simulation``에 " +"전달합니다. 다음은 예제입니다:" -#: ../../source/ref-api/flwr.client.Client.rst:46 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:46 -#: ../../source/ref-api/flwr.common.Array.rst:28 -#: ../../source/ref-api/flwr.common.ClientMessage.rst:25 -#: ../../source/ref-api/flwr.common.Code.rst:19 -#: ../../source/ref-api/flwr.common.Context.rst:25 -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:25 -#: ../../source/ref-api/flwr.common.Error.rst:25 -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:25 -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:25 -#: ../../source/ref-api/flwr.common.EventType.rst:165 -#: ../../source/ref-api/flwr.common.FitIns.rst:25 -#: ../../source/ref-api/flwr.common.FitRes.rst:25 -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:25 -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:25 -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:25 -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:25 -#: ../../source/ref-api/flwr.common.Message.rst:37 -#: ../../source/ref-api/flwr.common.MessageType.rst:25 -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:25 -#: ../../source/ref-api/flwr.common.Metadata.rst:25 -#: ../../source/ref-api/flwr.common.Parameters.rst:25 -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:25 -#: ../../source/ref-api/flwr.common.RecordSet.rst:25 -#: ../../source/ref-api/flwr.common.ServerMessage.rst:25 -#: ../../source/ref-api/flwr.common.Status.rst:25 -#: ../../source/ref-api/flwr.server.LegacyContext.rst:25 -#: ../../source/ref-api/flwr.server.ServerConfig.rst:25 -msgid "Attributes" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:56 +msgid "" +"Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " +"\"round_timeout\": 600.0}, ...)``" msgstr "" +"Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " +"\"round_timeout\": 600.0}, ...)``" -#: flwr.client.client.Client.evaluate:1::1 of -msgid ":py:obj:`context `\\" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:58 +msgid "" +"Flower 1.0: ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" msgstr "" +"Flower 1.0: ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" -#: ../../source/ref-api/flwr.common.Parameters.rst:2 -#: flwr.client.app.start_client flwr.client.app.start_numpy_client -#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit -#: flwr.client.client.Client.get_parameters -#: flwr.client.client.Client.get_properties -#: flwr.client.numpy_client.NumPyClient.evaluate -#: flwr.client.numpy_client.NumPyClient.fit -#: flwr.client.numpy_client.NumPyClient.get_parameters -#: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.common.context.Context flwr.common.message.Error -#: flwr.common.message.Message flwr.common.message.Message.create_error_reply -#: flwr.common.message.Message.create_reply flwr.common.message.Metadata -#: flwr.common.record.parametersrecord.Array flwr.server.app.start_server -#: flwr.server.client_manager.ClientManager.register -#: flwr.server.client_manager.ClientManager.unregister -#: flwr.server.client_manager.SimpleClientManager.register -#: flwr.server.client_manager.SimpleClientManager.unregister -#: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.driver.Driver.create_message -#: flwr.server.driver.driver.Driver.pull_messages -#: flwr.server.driver.driver.Driver.push_messages -#: flwr.server.driver.driver.Driver.send_and_receive -#: flwr.server.strategy.bulyan.Bulyan -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit -#: flwr.server.strategy.fedadagrad.FedAdagrad -#: flwr.server.strategy.fedadam.FedAdam flwr.server.strategy.fedavg.FedAvg -#: flwr.server.strategy.fedavg_android.FedAvgAndroid -#: flwr.server.strategy.fedavgm.FedAvgM flwr.server.strategy.fedopt.FedOpt -#: flwr.server.strategy.fedprox.FedProx -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg -#: flwr.server.strategy.fedyogi.FedYogi flwr.server.strategy.krum.Krum -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate -#: flwr.server.strategy.strategy.Strategy.aggregate_fit -#: flwr.server.strategy.strategy.Strategy.configure_evaluate -#: flwr.server.strategy.strategy.Strategy.configure_fit -#: flwr.server.strategy.strategy.Strategy.evaluate -#: flwr.server.strategy.strategy.Strategy.initialize_parameters -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow -#: flwr.simulation.app.start_simulation -#: flwr.simulation.run_simulation.run_simulation of -msgid "Parameters" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:61 +msgid "" +"Replace ``num_rounds=1`` in ``start_simulation`` with the new " +"``config=ServerConfig(...)`` (see previous item)" msgstr "" +"``start_simulation``의 ``num_rounds=1``을 새로운 ``config=ServerConfig(...)``로" +" 바꿉니다(이전 항목 참조)" -#: flwr.client.client.Client.evaluate:3 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:63 msgid "" -"The evaluation instructions containing (global) model parameters received" -" from the server and a dictionary of configuration values used to " -"customize the local evaluation process." +"Remove ``force_final_distributed_eval`` parameter from calls to " +"``start_server``. Distributed evaluation on all clients can be enabled by" +" configuring the strategy to sample all clients for evaluation after the " +"last round of training." msgstr "" +"'start_server`` 호출에서 ``force_final_distributed_eval`` 매개변수를 제거합니다. 모든 " +"클라이언트에 대한 분산 평가는 마지막 훈련 라운드 후 평가를 위해 모든 클라이언트를 샘플링하도록 전략을 구성하여 활성화할 수 " +"있습니다." -#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit -#: flwr.client.client.Client.get_parameters -#: flwr.client.client.Client.get_properties -#: flwr.client.numpy_client.NumPyClient.evaluate -#: flwr.client.numpy_client.NumPyClient.fit -#: flwr.client.numpy_client.NumPyClient.get_parameters -#: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.common.message.Message.create_reply flwr.server.app.start_server -#: flwr.server.client_manager.ClientManager.num_available -#: flwr.server.client_manager.ClientManager.register -#: flwr.server.client_manager.SimpleClientManager.num_available -#: flwr.server.client_manager.SimpleClientManager.register -#: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.driver.Driver.create_message -#: flwr.server.driver.driver.Driver.pull_messages -#: flwr.server.driver.driver.Driver.push_messages -#: flwr.server.driver.driver.Driver.send_and_receive -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate -#: flwr.server.strategy.strategy.Strategy.aggregate_fit -#: flwr.server.strategy.strategy.Strategy.configure_evaluate -#: flwr.server.strategy.strategy.Strategy.configure_fit -#: flwr.server.strategy.strategy.Strategy.evaluate -#: flwr.server.strategy.strategy.Strategy.initialize_parameters -#: flwr.simulation.app.start_simulation of -msgid "Returns" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:66 +msgid "Rename parameter/ndarray conversion functions:" +msgstr "매개변수/ndarray 변환 함수의 이름을 바꿉니다:" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:68 +msgid "``parameters_to_weights`` --> ``parameters_to_ndarrays``" +msgstr "``parameters_to_weights`` --> ``parameters_to_ndarrays``" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:69 +msgid "``weights_to_parameters`` --> ``ndarrays_to_parameters``" +msgstr "``weights_to_parameters`` --> ``ndarrays_to_parameters``" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:71 +msgid "" +"Strategy initialization: if the strategy relies on the default values for" +" ``fraction_fit`` and ``fraction_evaluate``, set ``fraction_fit`` and " +"``fraction_evaluate`` manually to ``0.1``. Projects that do not manually " +"create a strategy (by calling ``start_server`` or ``start_simulation`` " +"without passing a strategy instance) should now manually initialize " +"FedAvg with ``fraction_fit`` and ``fraction_evaluate`` set to ``0.1``." msgstr "" +"전략 초기화: 전략이 ``fraction_fit`` 및 ``fraction_evaluate``의 기본값에 의존하는 경우 " +"``fraction_fit`` 및 ``fraction_evaluate``를 ``0.1``로 수동 설정합니다. 전략을 수동으로 " +"생성하지 않는 프로젝트(전략 인스턴스를 전달하지 않고 ``start_server`` 또는 ``start_simulation``을 " +"호출하여)는 이제 ``fraction_fit`` 및 ``fraction_evaluate``를 ``0.1``로 설정하여 FedAvg를" +" 수동으로 초기화해야 합니다." -#: flwr.client.client.Client.evaluate:8 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:77 +msgid "Rename built-in strategy parameters (e.g., ``FedAvg``):" +msgstr "기본 제공 전략 매개변수의 이름을 바꿉니다(예: ``FedAvg``):" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:79 +msgid "``fraction_eval`` --> ``fraction_evaluate``" +msgstr "``fraction_eval`` --> ``fraction_evaluate``" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:80 +msgid "``min_eval_clients`` --> ``min_evaluate_clients``" +msgstr "``min_eval_clients`` --> ``min_evaluate_clients``" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:81 +msgid "``eval_fn`` --> ``evaluate_fn``" +msgstr "``eval_fn`` --> ``evaluate_fn``" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:83 msgid "" -"The evaluation result containing the loss on the local dataset and other " -"details such as the number of local data examples used for evaluation." +"Rename ``rnd`` to ``server_round``. This impacts multiple methods and " +"functions, for example, ``configure_fit``, ``aggregate_fit``, " +"``configure_evaluate``, ``aggregate_evaluate``, and ``evaluate_fn``." msgstr "" +"``rnd``의 이름을 ``server_round``로 바꿉니다. 이는 여러 메서드 및 함수(예: ``configure_fit``," +" ``aggregate_fit``, ``configure_evaluate``, ``aggregate_evaluate`` 및 " +"``evaluate_fn``)에 영향을 미칩니다." -#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit -#: flwr.client.client.Client.get_parameters -#: flwr.client.client.Client.get_properties -#: flwr.client.numpy_client.NumPyClient.get_parameters -#: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.common.message.Message.create_reply flwr.server.app.start_server -#: flwr.server.client_manager.ClientManager.num_available -#: flwr.server.client_manager.ClientManager.register -#: flwr.server.client_manager.SimpleClientManager.num_available -#: flwr.server.client_manager.SimpleClientManager.register -#: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.driver.Driver.create_message -#: flwr.server.driver.driver.Driver.pull_messages -#: flwr.server.driver.driver.Driver.push_messages -#: flwr.server.driver.driver.Driver.send_and_receive -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate -#: flwr.server.strategy.strategy.Strategy.aggregate_fit -#: flwr.server.strategy.strategy.Strategy.configure_evaluate -#: flwr.server.strategy.strategy.Strategy.configure_fit -#: flwr.server.strategy.strategy.Strategy.evaluate -#: flwr.server.strategy.strategy.Strategy.initialize_parameters -#: flwr.simulation.app.start_simulation of -msgid "Return type" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:86 +msgid "Add ``server_round`` and ``config`` to ``evaluate_fn``:" +msgstr "``server_round`` 및 ``config``를 ``evaluate_fn``에 추가합니다:" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:88 +msgid "" +"Flower 0.19: ``def evaluate(parameters: NDArrays) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]:``" msgstr "" +"Flower 0.19: ``def evaluate(parameters: NDArrays) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]:``" -#: flwr.client.client.Client.fit:3 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:90 msgid "" -"The training instructions containing (global) model parameters received " -"from the server and a dictionary of configuration values used to " -"customize the local training process." +"Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, " +"config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " +"Scalar]]]:``" msgstr "" +"Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, " +"config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " +"Scalar]]]:``" -#: flwr.client.client.Client.fit:8 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:94 +msgid "Custom strategies" +msgstr "사용자 정의 전략" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:96 msgid "" -"The training result containing updated parameters and other details such " -"as the number of local training examples used for training." +"The type of parameter ``failures`` has changed from " +"``List[BaseException]`` to ``List[Union[Tuple[ClientProxy, FitRes], " +"BaseException]]`` (in ``aggregate_fit``) and " +"``List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]]`` (in " +"``aggregate_evaluate``)" +msgstr "" +"매개변수 ``failures``의 유형이 ``List[BaseException]``에서 " +"``List[Union[Tuple[ClientProxy], FitRes], " +"BaseException]]``(``aggregate_fit``에서) 및 ``List[Union[Tuple[ClientProxy]," +" EvaluateRes], BaseException]]``(``aggregate_evaluate``)로 변경되었습니다" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:100 +msgid "" +"The ``Strategy`` method ``evaluate`` now receives the current round of " +"federated learning/evaluation as the first parameter:" +msgstr "이제 ``Strategy`` 메서드 ``evaluate``는 현재 federated 학습/평가 라운드를 첫 번째 파라미터로 받습니다:" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:103 +msgid "" +"Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]:``" +msgstr "" +"Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]:``" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:105 +msgid "" +"Flower 1.0: ``def evaluate(self, server_round: int, parameters: " +"Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" msgstr "" +"Flower 1.0: ``def evaluate(self, server_round: int, parameters: " +"Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:109 +msgid "Optional improvements" +msgstr "선택적 개선 사항" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:111 +msgid "" +"Along with the necessary changes above, there are a number of potential " +"improvements that just became possible:" +msgstr "위의 필수 변경 사항과 함께 방금 가능한 여러 가지 잠재적 개선 사항이 있습니다:" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:114 +msgid "" +"Remove \"placeholder\" methods from subclasses of ``Client`` or " +"``NumPyClient``. If you, for example, use server-side evaluation, then " +"empty placeholder implementations of ``evaluate`` are no longer " +"necessary." +msgstr "" +"``Client`` 또는 ``NumPyClient``의 서브 클래스에서 \"placeholder\" 메서드를 제거합니다. 예를 들어" +" 서버 측 평가를 사용하는 경우 ``evaluate``의 빈 자리 표시자 구현은 더 이상 필요하지 않습니다." + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:117 +msgid "" +"Configure the round timeout via ``start_simulation``: " +"``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, " +"round_timeout=600.0), ...)``" +msgstr "" +"``start_simulation``을 통해 라운드 타임아웃을 구성합니다: ``start_simulation(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:121 +#: ../../source/how-to-upgrade-to-flower-next.rst:349 +msgid "Further help" +msgstr "추가 도움말" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:123 +msgid "" +"Most official `Flower code examples " +"`_ are already updated" +" to Flower 1.0, they can serve as a reference for using the Flower 1.0 " +"API. If there are further questions, `join the Flower Slack " +"`_ and use the channel ``#questions``." +msgstr "" +"대부분의 공식 ``Flower code 예제 " +"`_는 이미 Flower 1.0으로 " +"업데이트되어 있으며, Flower 1.0 API를 사용하기 위한 참고 자료로 사용할 수 있습니다. 더 궁금한 점이 있다면 ``플라워" +" 슬랙 `_에 가입하여 ``#questions`` 채널을 이용하세요." + +#: ../../source/how-to-upgrade-to-flower-next.rst:2 +msgid "Upgrade to Flower Next" +msgstr "Flower Next 업그레이드" + +#: ../../source/how-to-upgrade-to-flower-next.rst:4 +msgid "" +"Welcome to the migration guide for updating Flower to Flower Next! " +"Whether you're a seasoned user or just getting started, this guide will " +"help you smoothly transition your existing setup to take advantage of the" +" latest features and improvements in Flower Next, starting from version " +"1.8." +msgstr "" +"Flower에서 Flower Next로의 업데이트를 위한 이동 가이드에 오신 것을 환영합니다! 이 가이드는 숙련된 사용자든 이제 막" +" 시작한 사용자든 상관없이 기존 설정을 원활하게 전환하여 버전 1.8부터 Flower Next의 최신 기능 및 개선 사항을 활용할 " +"수 있도록 도와드립니다." + +#: ../../source/how-to-upgrade-to-flower-next.rst:11 +msgid "" +"This guide shows how to reuse pre-``1.8`` Flower code with minimum code " +"changes by using the *compatibility layer* in Flower Next. In another " +"guide, we will show how to run Flower Next end-to-end with pure Flower " +"Next APIs." +msgstr "" +"이 가이드에서는 Flower Next의 *호환성 레이어*를 사용하여 최소한의 코드 변경으로 ``1.8`` 이전의 Flower 코드를" +" 재사용하는 방법을 보여줍니다. 다른 가이드에서는 순수한 Flower Next API로 Flower Next를 end-to-end로" +" 실행하는 방법을 보여드리겠습니다." + +#: ../../source/how-to-upgrade-to-flower-next.rst:15 +msgid "Let's dive in!" +msgstr "자세히 알아봅시다!" + +#: ../../source/how-to-upgrade-to-flower-next.rst:68 +msgid "" +"Here's how to update an existing installation of Flower to Flower Next " +"with ``pip``:" +msgstr "기존에 설치된 Flower to Flower Next를 ``pip``으로 업데이트하는 방법은 다음과 같습니다:" + +#: ../../source/how-to-upgrade-to-flower-next.rst:74 +msgid "or if you need Flower Next with simulation:" +msgstr "또는 시뮬레이션이 포함된 Flower Next가 필요한 경우:" + +#: ../../source/how-to-upgrade-to-flower-next.rst:80 +msgid "" +"Ensure you set the following version constraint in your " +"``requirements.txt``" +msgstr "``requirements.txt``에서 다음 버전 제약 조건을 설정했는지 확인하세요" + +#: ../../source/how-to-upgrade-to-flower-next.rst:90 +msgid "or ``pyproject.toml``:" +msgstr "또는 ``pyproject.toml``:" + +#: ../../source/how-to-upgrade-to-flower-next.rst:101 +msgid "Using Poetry" +msgstr "Poetry 사용" + +#: ../../source/how-to-upgrade-to-flower-next.rst:103 +msgid "" +"Update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall " +"(don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` before " +"running ``poetry install``)." +msgstr "" +"``pyproject.toml``에서 ``flwr`` 의존성를 업데이트한 다음 다시 설치하세요(``poetry install``을 " +"실행하기 전에 ``rm poetry.lock``을 통해 ``poetry.lock``을 삭제하는 것을 잊지 마세요)." + +#: ../../source/how-to-upgrade-to-flower-next.rst:106 +msgid "" +"Ensure you set the following version constraint in your " +"``pyproject.toml``:" +msgstr "``pyproject.toml``에 다음 버전 제약 조건을 설정했는지 확인하세요:" + +#: ../../source/how-to-upgrade-to-flower-next.rst:123 +msgid "" +"In Flower Next, the *infrastructure* and *application layers* have been " +"decoupled. Instead of starting a client in code via ``start_client()``, " +"you create a |clientapp_link|_ and start it via the command line. Instead" +" of starting a server in code via ``start_server()``, you create a " +"|serverapp_link|_ and start it via the command line. The long-running " +"components of server and client are called SuperLink and SuperNode. The " +"following non-breaking changes that require manual updates and allow you " +"to run your project both in the traditional way and in the Flower Next " +"way:" +msgstr "" +"Flower Next에서는 *infrastructure*와 *application layers*가 분리되었습니다. 코드에서 " +"``start_client()``를 통해 클라이언트를 시작하는 대신, 명령줄을 통해 |clientapp_link|_를 생성하여 " +"시작합니다. 코드에서 ``start_server()``를 통해 서버를 시작하는 대신 |serverapp_link|_를 생성하고 " +"명령줄을 통해 서버를 시작합니다. 서버와 클라이언트의 장기 실행 컴포넌트를 SuperLink와 SuperNode라고 합니다. 수동 " +"업데이트가 필요하지 않고 기존 방식과 Flower Next 방식 모두에서 프로젝트를 실행할 수 있는 non-breaking 변경 " +"사항은 다음과 같습니다:" + +#: ../../source/how-to-upgrade-to-flower-next.rst:132 +msgid "|clientapp_link|_" +msgstr "|clientapp_link|_" + +#: ../../source/how-to-upgrade-to-flower-next.rst:134 +msgid "" +"Wrap your existing client with |clientapp_link|_ instead of launching it " +"via |startclient_link|_. Here's an example:" +msgstr "" +"|clientapp_link|_를 통해 실행하는 대신 기존 클라이언트를 |clientapp_link|_로 래핑하세요. 다음은 " +"예시입니다:" + +#: ../../source/how-to-upgrade-to-flower-next.rst:157 +msgid "|serverapp_link|_" +msgstr "|serverapp_link|_" + +#: ../../source/how-to-upgrade-to-flower-next.rst:159 +msgid "" +"Wrap your existing strategy with |serverapp_link|_ instead of starting " +"the server via |startserver_link|_. Here's an example:" +msgstr "" +"서버를 시작하려면 |startserver_link|_를 통해 서버를 시작하는 대신 기존 전략을 |serverapp_link|_로 " +"래핑하세요. 다음은 예시입니다:" + +#: ../../source/how-to-upgrade-to-flower-next.rst:180 +msgid "Deployment" +msgstr "배포" + +#: ../../source/how-to-upgrade-to-flower-next.rst:182 +msgid "" +"Run the ``SuperLink`` using |flowernext_superlink_link|_ before running, " +"in sequence, |flowernext_clientapp_link|_ (2x) and " +"|flowernext_serverapp_link|_. There is no need to execute `client.py` and" +" `server.py` as Python scripts." +msgstr "" +"실행하기 전에 |flowernext_superlink_link|_를 사용하여 ``SuperLink``를 실행한 후 " +"|flowernext_clientapp_link|_(2회) 및 |flowernext_serverapp_link|_를 순서대로 " +"실행합니다. 'client.py'와 'server.py'를 Python 스크립트로 실행할 필요는 없습니다." + +#: ../../source/how-to-upgrade-to-flower-next.rst:185 +msgid "" +"Here's an example to start the server without HTTPS (only for " +"prototyping):" +msgstr "다음은 HTTPS 없이 서버를 시작하는 예제입니다(프로토타이핑용으로만 사용):" + +#: ../../source/how-to-upgrade-to-flower-next.rst:201 +msgid "" +"Here's another example to start with HTTPS. Use the ``--ssl-ca-" +"certfile``, ``--ssl-certfile``, and ``--ssl-keyfile`` command line " +"options to pass paths to (CA certificate, server certificate, and server " +"private key)." +msgstr "" +"다음은 HTTPS로 시작하는 또 다른 예제입니다. '`--ssl-ca-certfile``, '`--ssl-certfile``, " +"'`--ssl-keyfile`` 명령줄 옵션을 사용하여 (CA 인증서, 서버 인증서 및 서버 개인 키)의 경로를 전달합니다." + +#: ../../source/how-to-upgrade-to-flower-next.rst:229 +msgid "Simulation in CLI" +msgstr "CLI 시뮬레이션" + +#: ../../source/how-to-upgrade-to-flower-next.rst:231 +msgid "" +"Wrap your existing client and strategy with |clientapp_link|_ and " +"|serverapp_link|_, respectively. There is no need to use |startsim_link|_" +" anymore. Here's an example:" +msgstr "" +"기존 클라이언트와 전략을 각각 |clientapp_link|_와 |serverapp_link|_로 래핑하세요. 더 이상 " +"|startsim_link|_를 사용할 필요가 없습니다. 다음은 예시입니다:" + +#: ../../source/how-to-upgrade-to-flower-next.rst:264 +msgid "" +"Run |flower_simulation_link|_ in CLI and point to the ``server_app`` / " +"``client_app`` object in the code instead of executing the Python script." +" Here's an example (assuming the ``server_app`` and ``client_app`` " +"objects are in a ``sim.py`` module):" +msgstr "" +"CLI에서 |flower_simulation_link|_를 실행하고 Python 스크립트를 실행하는 대신 코드에서 " +"``server_app`` / ``client_app`` 개체를 가리키세요. 다음은 예제입니다(``server_app`` 및 " +"``client_app`` 객체가 ``sim.py`` 모듈에 있다고 가정):" + +#: ../../source/how-to-upgrade-to-flower-next.rst:281 +msgid "" +"Set default resources for each |clientapp_link|_ using the ``--backend-" +"config`` command line argument instead of setting the " +"``client_resources`` argument in |startsim_link|_. Here's an example:" +msgstr "" +"|startsim_link|_에서 ``client_resources`` 인수를 설정하는 대신 ``--backend-config`` " +"명령줄 인수를 사용하여 각 |clientapp_link|_에 대한 기본 리소스를 설정하세요. 다음은 예시입니다:" + +#: ../../source/how-to-upgrade-to-flower-next.rst:305 +msgid "Simulation in a Notebook" +msgstr "Notebook에서 시뮬레이션" + +#: ../../source/how-to-upgrade-to-flower-next.rst:307 +msgid "" +"Run |runsim_link|_ in your notebook instead of |startsim_link|_. Here's " +"an example:" +msgstr "notebook에서 |startsim_link|_ 대신 |runsim_link|_를 실행하세요. 다음은 예시입니다:" + +#: ../../source/how-to-upgrade-to-flower-next.rst:351 +msgid "" +"Some official `Flower code examples `_ " +"are already updated to Flower Next so they can serve as a reference for " +"using the Flower Next API. If there are further questions, `join the " +"Flower Slack `_ and use the channel " +"``#questions``. You can also `participate in Flower Discuss " +"`_ where you can find us answering questions," +" or share and learn from others about migrating to Flower Next." +msgstr "" +"일부 공식 ``Flower 코드 예제 `_는 이미 플라워 넥스트에 " +"업데이트되어 있으므로 플라워 넥스트 API를 사용하는 데 참고할 수 있습니다. 더 궁금한 점이 있다면 ``플라워 슬랙 " +"`_에 가입하고 ``#questions`` 채널을 이용하세요. 또한, " +"``Flower Discuss `_에 참여하여 질문에 대한 답변을 확인하거나 다른" +" 사람들과 Flower Next로의 이동에 대해 공유하고 배울 수 있습니다." + +#: ../../source/how-to-upgrade-to-flower-next.rst:358 +msgid "Important" +msgstr "중요" + +#: ../../source/how-to-upgrade-to-flower-next.rst:360 +msgid "" +"As we continuously enhance Flower Next at a rapid pace, we'll be " +"periodically updating this guide. Please feel free to share any feedback " +"with us!" +msgstr "" +"Flower Next는 빠른 속도로 지속적으로 개선되고 있으므로 이 가이드는 주기적으로 업데이트될 예정입니다. 피드백이 있으면 " +"언제든지 공유해 주세요!" + +#: ../../source/how-to-upgrade-to-flower-next.rst:366 +msgid "Happy migrating! 🚀" +msgstr "행복한 마이그레이션! 🚀" + +#: ../../source/how-to-use-built-in-mods.rst:2 +msgid "Use Built-in Mods" +msgstr "기본 제공 모드 사용" + +#: ../../source/how-to-use-built-in-mods.rst:4 +msgid "" +"**Note: This tutorial covers experimental features. The functionality and" +" interfaces may change in future versions.**" +msgstr "**참고: 이 튜토리얼은 실험적인 기능을 다룹니다. 기능 및 인터페이스는 향후 버전에서 변경될 수 있습니다.**" + +#: ../../source/how-to-use-built-in-mods.rst:7 +msgid "" +"In this tutorial, we will learn how to utilize built-in mods to augment " +"the behavior of a ``ClientApp``. Mods (sometimes also called Modifiers) " +"allow us to perform operations before and after a task is processed in " +"the ``ClientApp``." +msgstr "" +"이 튜토리얼에서는 내장 모드를 활용하여 ``ClientApp``의 동작을 보강하는 방법을 배우겠습니다. " +"Mods(Modifiers라고도 함)를 사용하면 ``ClientApp``에서 작업이 처리되기 전과 후에 작업을 수행할 수 있습니다." + +#: ../../source/how-to-use-built-in-mods.rst:12 +msgid "What are Mods?" +msgstr "Mods란 무엇인가요?" + +#: ../../source/how-to-use-built-in-mods.rst:14 +msgid "" +"A Mod is a callable that wraps around a ``ClientApp``. It can manipulate " +"or inspect the incoming ``Message`` and the resulting outgoing " +"``Message``. The signature for a ``Mod`` is as follows:" +msgstr "" +"Mod는 ``ClientApp``을 감싸는 콜러블입니다. 들어오는 ``Message``와 그 결과로 나가는 ``Message``를 " +"조작하거나 검사할 수 있습니다. ``Mod``의 시그니처는 다음과 같습니다:" + +#: ../../source/how-to-use-built-in-mods.rst:23 +msgid "A typical mod function might look something like this:" +msgstr "일반적인 mod 함수는 다음과 같은 모습일 수 있습니다:" + +#: ../../source/how-to-use-built-in-mods.rst:36 +msgid "Using Mods" +msgstr "Mods 사용" + +#: ../../source/how-to-use-built-in-mods.rst:38 +msgid "To use mods in your ``ClientApp``, you can follow these steps:" +msgstr "``ClientApp``에서 mods를 사용하려면 다음 단계를 따르세요:" + +#: ../../source/how-to-use-built-in-mods.rst:41 +msgid "1. Import the required mods" +msgstr "1. 필요한 mods를 가져옵니다" + +#: ../../source/how-to-use-built-in-mods.rst:43 +msgid "First, import the built-in mod you intend to use:" +msgstr "먼저 사용하려는 기본 제공 mod를 가져옵니다:" + +#: ../../source/how-to-use-built-in-mods.rst:51 +msgid "2. Define your client function" +msgstr "2. 클라이언트 기능 정의" + +#: ../../source/how-to-use-built-in-mods.rst:53 +msgid "" +"Define your client function (``client_fn``) that will be wrapped by the " +"mod(s):" +msgstr "mod(s)로 래핑할 클라이언트 함수('``client_fn``)를 정의합니다:" + +#: ../../source/how-to-use-built-in-mods.rst:62 +msgid "3. Create the ``ClientApp`` with mods" +msgstr "3. mods로 ``ClientApp``을 생성합니다" + +#: ../../source/how-to-use-built-in-mods.rst:64 +msgid "" +"Create your ``ClientApp`` and pass the mods as a list to the ``mods`` " +"argument. The order in which you provide the mods matters:" +msgstr "" +"``ClientApp``을 생성하고 mods를 ``mods`` argument에 목록으로 전달합니다. mods를 제공하는 순서가 " +"중요합니다:" + +#: ../../source/how-to-use-built-in-mods.rst:78 +msgid "Order of execution" +msgstr "실행 순서" + +#: ../../source/how-to-use-built-in-mods.rst:80 +msgid "" +"When the ``ClientApp`` runs, the mods are executed in the order they are " +"provided in the list:" +msgstr "``ClientApp``이 실행되면 목록에 제공된 순서대로 모드가 실행됩니다:" + +#: ../../source/how-to-use-built-in-mods.rst:83 +msgid "``example_mod_1`` (outermost mod)" +msgstr "``example_mod_1``(가장 바깥쪽 mod)" + +#: ../../source/how-to-use-built-in-mods.rst:84 +msgid "``example_mod_2`` (next mod)" +msgstr "``example_mod_2`` (다음 mod)" + +#: ../../source/how-to-use-built-in-mods.rst:85 +msgid "" +"Message handler (core function that handles the incoming ``Message`` and " +"returns the outgoing ``Message``)" +msgstr "Message handler(들어오는 ``Message``를 처리하고 나가는 ``Message``를 반환하는 핵심 함수)" + +#: ../../source/how-to-use-built-in-mods.rst:87 +msgid "``example_mod_2`` (on the way back)" +msgstr "``example_mod_2``(돌아가는 방법)" + +#: ../../source/how-to-use-built-in-mods.rst:88 +msgid "``example_mod_1`` (outermost mod on the way back)" +msgstr "``example_mod_1``(돌아가는 방법에 가장 바깥쪽 모드)" + +#: ../../source/how-to-use-built-in-mods.rst:90 +msgid "" +"Each mod has a chance to inspect and modify the incoming ``Message`` " +"before passing it to the next mod, and likewise with the outgoing " +"``Message`` before returning it up the stack." +msgstr "" +"각 mod는 다음 mod로 전달하기 전에 들어오는 ``Message``를 검사하고 수정할 기회가 있으며, 스택 위로 반환하기 전에 " +"나가는 ``Message``도 마찬가지로 검사하고 수정할 수 있습니다." + +#: ../../source/how-to-use-built-in-mods.rst:97 +msgid "" +"By following this guide, you have learned how to effectively use mods to " +"enhance your ``ClientApp``'s functionality. Remember that the order of " +"mods is crucial and affects how the input and output are processed." +msgstr "" +"이 가이드를 따라 mods를 효과적으로 사용하여 ``ClientApp``의 기능을 향상시키는 방법을 배웠습니다. mods 순서는 " +"매우 중요하며 입력과 출력이 처리되는 방식에 영향을 미친다는 점을 기억하세요." + +#: ../../source/how-to-use-built-in-mods.rst:101 +msgid "Enjoy building a more robust and flexible ``ClientApp`` with mods!" +msgstr "Mods를 통해 더욱 강력하고 유연한 ``ClientApp``을 구축해 보세요!" + +#: ../../source/how-to-use-differential-privacy.rst:2 +msgid "Use Differential Privacy" +msgstr "차분 개인정보 보호 사용" + +#: ../../source/how-to-use-differential-privacy.rst:4 +msgid "" +"This guide explains how you can utilize differential privacy in the " +"Flower framework. If you are not yet familiar with differential privacy, " +"you can refer to :doc:`explanation-differential-privacy`." +msgstr "" +"이 가이드에서는 Flower 프레임워크에서 차분 개인정보 보호 기능을 활용하는 방법을 설명합니다. 차분 개인정보 보호에 대해 아직 " +"익숙하지 않은 경우 :doc:`explanation-differential-privacy`를 참조하세요." + +#: ../../source/how-to-use-differential-privacy.rst:10 +msgid "" +"Differential Privacy in Flower is in a preview phase. If you plan to use " +"these features in a production environment with sensitive data, feel free" +" contact us to discuss your requirements and to receive guidance on how " +"to best use these features." +msgstr "" +"Flower의 차분 개인정보 보호는 현재 프리뷰 단계에 있습니다. 민감한 데이터가 있는 프로덕션 환경에서 이러한 기능을 사용할 " +"계획이라면 언제든지 문의하여 요구 사항을 논의하고 이러한 기능을 가장 잘 사용하는 방법에 대한 안내를 받으세요." + +#: ../../source/how-to-use-differential-privacy.rst:17 +#, fuzzy +msgid "" +"This approach consists of two separate phases: clipping of the updates " +"and adding noise to the aggregated model. For the clipping phase, Flower " +"framework has made it possible to decide whether to perform clipping on " +"the server side or the client side." +msgstr "" +"이 접근 방식은 업데이트 클리핑과 집계된 모델에 노이즈 추가라는 두 가지 단계로 구성됩니다. 클리핑 단계의 경우, Flower " +"프레임워크는 클리핑을 서버 측에서 수행할지 클라이언트 측에서 수행할지 결정할 수 있도록 했습니다." + +#: ../../source/how-to-use-differential-privacy.rst:21 +msgid "" +"**Server-side Clipping**: This approach has the advantage of the server " +"enforcing uniform clipping across all clients' updates and reducing the " +"communication overhead for clipping values. However, it also has the " +"disadvantage of increasing the computational load on the server due to " +"the need to perform the clipping operation for all clients." +msgstr "" +"**Server-side Clipping**: 이 방식은 서버가 모든 클라이언트의 업데이트에 대해 균일한 클리핑을 적용하고 클리핑 " +"값에 대한 통신 오버헤드를 줄일 수 있다는 장점이 있습니다. 하지만 모든 클라이언트에 대해 클리핑 작업을 수행해야 하기 때문에 " +"서버의 계산 부하가 증가한다는 단점도 있습니다." + +#: ../../source/how-to-use-differential-privacy.rst:26 +msgid "" +"**Client-side Clipping**: This approach has the advantage of reducing the" +" computational overhead on the server. However, it also has the " +"disadvantage of lacking centralized control, as the server has less " +"control over the clipping process." +msgstr "" +"**Client-side Clipping**: 이 방식은 서버의 계산 오버헤드를 줄일 수 있다는 장점이 있습니다. 하지만 서버가 " +"클리핑 프로세스에 대한 통제력이 떨어지기 때문에 centralized 제어가 부족하다는 단점도 있습니다." + +#: ../../source/how-to-use-differential-privacy.rst:31 +msgid "Server-side Clipping" +msgstr "서버 측 클리핑" + +#: ../../source/how-to-use-differential-privacy.rst:33 +#, fuzzy +msgid "" +"For central DP with server-side clipping, there are two ``Strategy`` " +"classes that act as wrappers around the actual ``Strategy`` instance (for" +" example, ``FedAvg``). The two wrapper classes are " +"``DifferentialPrivacyServerSideFixedClipping`` and " +"``DifferentialPrivacyServerSideAdaptiveClipping`` for fixed and adaptive " +"clipping." +msgstr "" +"서버 측 클리핑이 있는 중앙 DP의 경우, 실제 :code:`Strategy` 인스턴스를 감싸는 래퍼 역할을 하는 두 개의 " +":code:`Strategy` 클래스가 있습니다(예: :code:`FedAvg`). 두 개의 래퍼 클래스는 고정 및 적응형 클리핑을" +" 위한 :code:`DifferentialPrivacyServerSideFixedClipping`과 " +":code:`DifferentialPrivacyServerSideAdaptiveClipping`입니다." + +#: ../../source/how-to-use-differential-privacy.rst:-1 +msgid "server side clipping" +msgstr "서버 측 클리핑" + +#: ../../source/how-to-use-differential-privacy.rst:43 +#, fuzzy +msgid "" +"The code sample below enables the ``FedAvg`` strategy to use server-side " +"fixed clipping using the ``DifferentialPrivacyServerSideFixedClipping`` " +"wrapper class. The same approach can be used with " +"``DifferentialPrivacyServerSideAdaptiveClipping`` by adjusting the " +"corresponding input parameters." +msgstr "" +"아래 코드 샘플은 :code:`FedAvg` 전략이 " +":code:`DifferentialPrivacyServerSideFixedClipping` 래퍼 클래스를 사용하여 서버 측 고정 " +"클리핑을 사용할 수 있도록 합니다. 해당 입력 매개변수를 조정하여 " +":code:`DifferentialPrivacyServerSideAdaptiveClipping`과 동일한 접근 방식을 사용할 수 " +"있습니다." + +#: ../../source/how-to-use-differential-privacy.rst:64 +msgid "Client-side Clipping" +msgstr "클라이언트 측 클리핑" + +#: ../../source/how-to-use-differential-privacy.rst:66 +#, fuzzy +msgid "" +"For central DP with client-side clipping, the server sends the clipping " +"value to selected clients on each round. Clients can use existing Flower " +"``Mods`` to perform the clipping. Two mods are available for fixed and " +"adaptive client-side clipping: ``fixedclipping_mod`` and " +"``adaptiveclipping_mod`` with corresponding server-side wrappers " +"``DifferentialPrivacyClientSideFixedClipping`` and " +"``DifferentialPrivacyClientSideAdaptiveClipping``." +msgstr "" +"클라이언트 측 클리핑이 있는 중앙 DP의 경우 서버는 각 라운드마다 선택한 클라이언트에 클리핑 값을 보냅니다. 클라이언트는 기존 " +"Flower :code:`Mods`를 사용하여 클리핑을 수행할 수 있습니다. 고정 및 적응형 클라이언트 측 클리핑에는 두 가지 " +"모드를 사용할 수 있습니다: :code:`fixedclipping_mod` 및 :code:`adaptiveclipping_mod`와" +" 해당 서버 측 래퍼 :code:`DifferentialPrivacyClientSideFixedClipping` 및 " +":code:`DifferentialPrivacyClientSideAdaptiveClipping`이 있습니다." + +#: ../../source/how-to-use-differential-privacy.rst:-1 +msgid "client side clipping" +msgstr "클라이언트 측 클리핑" + +#: ../../source/how-to-use-differential-privacy.rst:78 +#, fuzzy +msgid "" +"The code sample below enables the ``FedAvg`` strategy to use differential" +" privacy with client-side fixed clipping using both the " +"``DifferentialPrivacyClientSideFixedClipping`` wrapper class and, on the " +"client, ``fixedclipping_mod``:" +msgstr "" +"아래 코드 샘플은 :code:`FedAvg` 전략이 클라이언트 측 고정 클리핑과 함께 차분 프라이버시를 사용할 수 있도록 " +":code:`DifferentialPrivacyClientSideFixedClipping` 래퍼 클래스와 클라이언트에서 " +":code:`fixedclipping_mod`를 모두 사용하도록 합니다:" + +#: ../../source/how-to-use-differential-privacy.rst:97 +#, fuzzy +msgid "" +"In addition to the server-side strategy wrapper, the ``ClientApp`` needs " +"to configure the matching ``fixedclipping_mod`` to perform the client-" +"side clipping:" +msgstr "" +"서버 측 전략 래퍼 외에도 클라이언트 측 클리핑을 수행하려면 :code:`ClientApp`이 일치하는 " +":code:`fixedclipping_mod`를 구성해야 합니다:" + +#: ../../source/how-to-use-differential-privacy.rst:115 +msgid "" +"To utilize local differential privacy (DP) and add noise to the client " +"model parameters before transmitting them to the server in Flower, you " +"can use the `LocalDpMod`. The following hyperparameters need to be set: " +"clipping norm value, sensitivity, epsilon, and delta." +msgstr "" +"로컬 차분 프라이버시(DP)를 활용하고 클라이언트 모델 파라미터를 서버로 전송하기 전에 노이즈를 추가하려면 `LocalDpMod`를" +" 사용하면 됩니다. 클리핑 노멀 값, 감도, 엡실론, 델타 등의 하이퍼파라미터를 설정해야 합니다." + +#: ../../source/how-to-use-differential-privacy.rst:-1 +msgid "local DP mod" +msgstr "로컬 DP mod" + +#: ../../source/how-to-use-differential-privacy.rst:125 +#, fuzzy +msgid "Below is a code example that shows how to use ``LocalDpMod``:" +msgstr "다음은 :code:`LocalDpMod`를 사용하는 방법을 보여주는 코드 예시입니다:" + +#: ../../source/how-to-use-differential-privacy.rst:140 +msgid "" +"Please note that the order of mods, especially those that modify " +"parameters, is important when using multiple modifiers. Typically, " +"differential privacy (DP) modifiers should be the last to operate on " +"parameters." +msgstr "" +"여러 개의 수정자를 사용할 때는 수정자, 특히 매개변수를 수정하는 수정자의 순서가 중요하다는 점에 유의하세요. 일반적으로 차분 " +"프라이버시(DP) 수정자는 매개변수에서 가장 마지막에 작동해야 합니다." + +#: ../../source/how-to-use-differential-privacy.rst:145 +msgid "Local Training using Privacy Engines" +msgstr "Privacy Engines을 사용한 로컬 훈련" + +#: ../../source/how-to-use-differential-privacy.rst:147 +msgid "" +"For ensuring data instance-level privacy during local model training on " +"the client side, consider leveraging privacy engines such as Opacus and " +"TensorFlow Privacy. For examples of using Flower with these engines, " +"please refer to the Flower examples directory (`Opacus " +"`_, `Tensorflow" +" Privacy `_)." +msgstr "" +"클라이언트 측에서 로컬 모델을 훈련하는 동안 데이터 인스턴스 수준의 개인 정보 보호를 보장하려면 Opacus 및 TensorFlow" +" Privacy와 같은 개인 정보 보호 엔진을 활용하는 것을 고려하세요. 이러한 엔진과 함께 Flower를 사용하는 예제는 " +"Flower examples directory (`Opacus " +"`_, `Tensorflow" +" Privacy `_)를 참조하세요." + +#: ../../source/how-to-use-strategies.rst:2 +msgid "Use strategies" +msgstr "전략 사용하기" + +#: ../../source/how-to-use-strategies.rst:4 +#, fuzzy +msgid "" +"Flower allows full customization of the learning process through the " +"``Strategy`` abstraction. A number of built-in strategies are provided in" +" the core framework." +msgstr "" +"Flower는 :code:`Strategy` abstraction를 통해 학습 과정을 완전히 사용자 정의할 수 있습니다. 핵심 " +"프레임워크에는 여러 가지 기본 제공 전략이 제공됩니다." + +#: ../../source/how-to-use-strategies.rst:7 +msgid "" +"There are three ways to customize the way Flower orchestrates the " +"learning process on the server side:" +msgstr "서버 측에서 Flower가 학습 과정을 조율하는 방식을 사용자 지정하는 방법에는 세 가지가 있습니다:" + +#: ../../source/how-to-use-strategies.rst:10 +#, fuzzy +msgid "Use an existing strategy, for example, ``FedAvg``" +msgstr "기존 전략(예: :code:`FedAvg`)을 사용합니다" + +#: ../../source/how-to-use-strategies.rst:11 +#: ../../source/how-to-use-strategies.rst:43 +msgid "Customize an existing strategy with callback functions" +msgstr "콜백 함수로 기존 전략 사용자 지정" + +#: ../../source/how-to-use-strategies.rst:12 +#: ../../source/how-to-use-strategies.rst:99 +msgid "Implement a novel strategy" +msgstr "새로운 전략 구현" + +#: ../../source/how-to-use-strategies.rst:15 +msgid "Use an existing strategy" +msgstr "기존 전략 사용" + +#: ../../source/how-to-use-strategies.rst:17 +msgid "" +"Flower comes with a number of popular federated learning strategies " +"built-in. A built-in strategy can be instantiated as follows:" +msgstr "Flower에는 여러 가지 인기 있는 연합 학습 전략이 기본으로 제공됩니다. 기본 제공 전략은 다음과 같이 인스턴스화할 수 있습니다:" + +#: ../../source/how-to-use-strategies.rst:27 +#, fuzzy +msgid "" +"This creates a strategy with all parameters left at their default values " +"and passes it to the ``start_server`` function. It is usually recommended" +" to adjust a few parameters during instantiation:" +msgstr "" +"이렇게 하면 모든 매개변수가 기본값으로 유지된 전략이 생성되어 :code:`start_server` 함수에 전달됩니다. 일반적으로 " +"인스턴스화 중에 몇 가지 매개변수를 조정하는 것이 좋습니다:" + +#: ../../source/how-to-use-strategies.rst:45 +msgid "" +"Existing strategies provide several ways to customize their behaviour. " +"Callback functions allow strategies to call user-provided code during " +"execution." +msgstr "" +"기존 전략은 동작을 사용자 지정하는 여러 가지 방법을 제공합니다. 콜백 함수를 사용하면 전략이 실행 중에 사용자가 제공한 코드를 " +"호출할 수 있습니다." + +#: ../../source/how-to-use-strategies.rst:49 +msgid "Configuring client fit and client evaluate" +msgstr "클라이언트 적합성 및 클라이언트 평가 구성" + +#: ../../source/how-to-use-strategies.rst:51 +#, fuzzy +msgid "" +"The server can pass new configuration values to the client each round by " +"providing a function to ``on_fit_config_fn``. The provided function will " +"be called by the strategy and must return a dictionary of configuration " +"key values pairs that will be sent to the client. It must return a " +"dictionary of arbitrary configuration values ``client.fit`` and " +"``client.evaluate`` functions during each round of federated learning." +msgstr "" +"서버는 매 라운드마다 새로운 설정 값을 클라이언트에 전달하기 위해 :code:`on_fit_config_fn`에 함수를 제공할 수 " +"있습니다. 제공된 함수는 전략에 의해 호출되며 클라이언트에 전송될 구성 키 값 쌍의 dictionary를 반환해야 합니다. 연합 " +"학습의 각 라운드 동안 임의의 구성 값 dictionary인 :code:`client.fit` 및 " +":code:`client.evaluate` 함수를 반환해야 합니다." + +#: ../../source/how-to-use-strategies.rst:84 +#, fuzzy +msgid "" +"The ``on_fit_config_fn`` can be used to pass arbitrary configuration " +"values from server to client, and potentially change these values each " +"round, for example, to adjust the learning rate. The client will receive " +"the dictionary returned by the ``on_fit_config_fn`` in its own " +"``client.fit()`` function." +msgstr "" +":code:`on_fit_config_fn`은 서버에서 클라이언트로 임의의 구성 값을 전달하고, 예를 들어 학습 속도를 조정하기 " +"위해 매 라운드마다 이 값을 잠재적으로 변경하는 데 사용할 수 있습니다. 클라이언트는 자체 :code:`client.fit()` " +"함수에서 :code:`on_fit_config_fn`이 반환한 dictionary를 받습니다." + +#: ../../source/how-to-use-strategies.rst:89 +#, fuzzy +msgid "" +"Similar to ``on_fit_config_fn``, there is also ``on_evaluate_config_fn`` " +"to customize the configuration sent to ``client.evaluate()``" +msgstr "" +":code:`on_fit_config_fn`과 유사하게, :code:`client.evaluate()`로 전송되는 구성을 사용자 " +"지정하는 :code:`on_evaluate_config_fn`도 있습니다" + +#: ../../source/how-to-use-strategies.rst:93 +msgid "Configuring server-side evaluation" +msgstr "서버 측 평가 구성" + +#: ../../source/how-to-use-strategies.rst:95 +#, fuzzy +msgid "" +"Server-side evaluation can be enabled by passing an evaluation function " +"to ``evaluate_fn``." +msgstr "서버 측 평가는 :code:`evaluate_fn`에 평가 함수를 전달하여 활성화할 수 있습니다." + +#: ../../source/how-to-use-strategies.rst:101 +msgid "" +"Writing a fully custom strategy is a bit more involved, but it provides " +"the most flexibility. Read the `Implementing Strategies `_ guide to learn more." +msgstr "" +"완전한 사용자 지정 전략을 작성하는 것은 조금 더 복잡하지만 유연성이 가장 뛰어납니다. 자세한 내용은 `Implementing " +"Strategies `_ 가이드를 참조하세요." + +#: ../../source/index.rst:34 +msgid "Tutorial" +msgstr "튜토리얼" + +#: ../../source/index.rst:44 +msgid "Quickstart tutorials" +msgstr "빠른 시작 튜토리얼" + +#: ../../source/index.rst:81 ../../source/index.rst:85 +msgid "How-to guides" +msgstr "사용 방법 가이드" + +#: ../../source/index.rst:106 +msgid "Legacy example guides" +msgstr "레거시 예제 가이드" + +#: ../../source/index.rst:114 ../../source/index.rst:119 +msgid "Explanations" +msgstr "설명" + +#: None:-1 +msgid "API reference" +msgstr "API 참조" + +#: ../../source/index.rst:145 +msgid "Reference docs" +msgstr "참조 문서" + +#: ../../source/index.rst:160 +msgid "Contributor tutorials" +msgstr "기여자 튜토리얼" + +#: ../../source/index.rst:167 +msgid "Contributor how-to guides" +msgstr "기여자 사용법 가이드" + +#: ../../source/index.rst:179 +msgid "Contributor explanations" +msgstr "기여자 설명" + +#: ../../source/index.rst:185 +msgid "Contributor references" +msgstr "기여자 참조" + +#: ../../source/index.rst:-1 +msgid "" +"Check out the documentation of the main Flower Framework enabling easy " +"Python development for Federated Learning." +msgstr "연합 학습을 위한 Python 개발을 쉽게 할 수 있는 주요 Flower 프레임워크의 설명서를 확인하세요." + +#: ../../source/index.rst:2 +msgid "Flower Framework Documentation" +msgstr "플라워 프레임워크 문서" + +#: ../../source/index.rst:7 +msgid "" +"Welcome to Flower's documentation. `Flower `_ is a " +"friendly federated learning framework." +msgstr "Flower 문서에 오신 것을 환영합니다. Flower `_는 편한 연합 학습 프레임워크입니다." + +#: ../../source/index.rst:11 +msgid "Join the Flower Community" +msgstr "Flower 커뮤니티 가입하기" + +#: ../../source/index.rst:13 +msgid "" +"The Flower Community is growing quickly - we're a friendly group of " +"researchers, engineers, students, professionals, academics, and other " +"enthusiasts." +msgstr "Flower 커뮤니티는 연구원, 엔지니어, 학생, 전문가, 학자 및 기타 애호가들로 구성된 편한 그룹으로 빠르게 성장하고 있습니다." + +#: ../../source/index.rst:16 +msgid "Join us on Slack" +msgstr "Slack에 가입하세요" + +#: ../../source/index.rst:23 +msgid "Flower Framework" +msgstr "Flower 프레임워크" + +#: ../../source/index.rst:25 +msgid "" +"The user guide is targeted at researchers and developers who want to use " +"Flower to bring existing machine learning workloads into a federated " +"setting. One of Flower's design goals was to make this simple. Read on to" +" learn more." +msgstr "" +"이 사용자 가이드는 Flower를 사용해 기존 머신 러닝 워크로드를 연합된 환경으로 가져오고자 하는 연구자와 개발자를 대상으로 " +"합니다. Flower의 설계 목표 중 하나는 이를 간단하게 만드는 것이었습니다. 자세히 알아보려면 계속 읽어보세요." + +#: ../../source/index.rst:30 +msgid "Tutorials" +msgstr "튜토리얼" + +#: ../../source/index.rst:32 +msgid "" +"A learning-oriented series of federated learning tutorials, the best " +"place to start." +msgstr "학습 중심의 연합 학습 튜토리얼 시리즈로, 시작하기에 가장 좋은 곳입니다." + +#: ../../source/index.rst:62 +#, fuzzy +msgid "" +"QUICKSTART TUTORIALS: :doc:`PyTorch ` | " +":doc:`TensorFlow ` | :doc:`MLX ` | :doc:`🤗 Transformers ` | :doc:`JAX ` | :doc:`Pandas " +"` | :doc:`fastai ` | :doc:`PyTorch Lightning ` | :doc:`scikit-learn ` | " +":doc:`XGBoost ` | :doc:`Android ` | :doc:`iOS `" +msgstr "" +"QUICKSTART TUTORIALS: :doc:`PyTorch ` | " +":doc:`TensorFlow ` | :doc:`🤗 Transformers" +" ` | :doc:`JAX ` | :doc:`Pandas ` | :doc:`fastai " +"` | :doc:`PyTorch Lightning ` | :doc:`scikit-learn ` | :doc:`XGBoost ` | " +":doc:`Android ` | :doc:`iOS `" + +#: ../../source/index.rst:70 +msgid "We also made video tutorials for PyTorch:" +msgstr "파이토치용 동영상 튜토리얼도 만들었습니다:" + +#: ../../source/index.rst:75 +msgid "And TensorFlow:" +msgstr "그리고 TensorFlow도:" + +#: ../../source/index.rst:83 +msgid "" +"Problem-oriented how-to guides show step-by-step how to achieve a " +"specific goal." +msgstr "문제 중심의 방법 가이드는 특정 목표를 달성하는 방법을 단계별로 보여줍니다." + +#: ../../source/index.rst:116 +msgid "" +"Understanding-oriented concept guides explain and discuss key topics and " +"underlying ideas behind Flower and collaborative AI." +msgstr "이해 중심의 개념 가이드에서는 Flower와 협업 AI의 주요 주제와 기본 아이디어를 설명하고 토론합니다." + +#: ../../source/index.rst:128 +msgid "References" +msgstr "참조" + +#: ../../source/index.rst:130 +msgid "Information-oriented API reference and other reference material." +msgstr "정보 지향 API 참조 및 기타 참고 자료." + +#: ../../source/index.rst:139::1 +msgid ":py:obj:`flwr `\\" +msgstr ":py:obj:`flwr `\\" + +#: ../../source/index.rst:139::1 flwr:1 of +msgid "Flower main package." +msgstr "Flower 메인 패키지." + +#: ../../source/index.rst:155 +msgid "Contributor docs" +msgstr "기여자 문서" + +#: ../../source/index.rst:157 +msgid "" +"The Flower community welcomes contributions. The following docs are " +"intended to help along the way." +msgstr "Flower 커뮤니티는 여러분의 기여를 환영합니다. 다음 문서는 그 과정에서 도움을 드리기 위한 문서입니다." + +#: ../../source/ref-api-cli.rst:2 +msgid "Flower CLI reference" +msgstr "Flower CLI 참조" + +#: ../../source/ref-api-cli.rst:7 +#, fuzzy +msgid "flwr CLI" +msgstr "Flower 클라이언트." + +#: ../../flwr:1 +#, fuzzy +msgid "flwr is the Flower command line interface." +msgstr "Flower ClientProxy 인스턴스 등록 해제." + +#: ../../source/ref-api-cli.rst +#, fuzzy +msgid "Options" +msgstr "해결법" + +#: ../../flwr:1 +#, fuzzy +msgid "Install completion for the current shell." +msgstr "현재 실행에 대한 식별자입니다." + +#: ../../flwr:1 +msgid "" +"Show completion for the current shell, to copy it or customize the " +"installation." +msgstr "" + +#: ../../flwr build:1 +msgid "Build a Flower App into a Flower App Bundle (FAB)." +msgstr "" + +#: ../../flwr build:1 +msgid "" +"You can run ``flwr build`` without any arguments to bundle the app " +"located in the current directory. Alternatively, you can you can specify " +"a path using the ``--app`` option to bundle an app located at the " +"provided path. For example:" +msgstr "" + +#: ../../flwr build:1 +msgid "``flwr build --app ./apps/flower-hello-world``." +msgstr "" + +#: ../../flwr build:1 +msgid "Path of the Flower App to bundle into a FAB" +msgstr "" + +#: ../../flwr install:1 +#, fuzzy +msgid "Install a Flower App Bundle." +msgstr "Flower 설치" + +#: ../../flwr install:1 +msgid "It can be ran with a single FAB file argument:" +msgstr "" + +#: ../../flwr install:1 +msgid "``flwr install ./target_project.fab``" +msgstr "" + +#: ../../flwr install:1 +msgid "The target install directory can be specified with ``--flwr-dir``:" +msgstr "" + +#: ../../flwr install:1 +msgid "``flwr install ./target_project.fab --flwr-dir ./docs/flwr``" +msgstr "" + +#: ../../flwr install:1 +msgid "" +"This will install ``target_project`` to ``./docs/flwr/``. By default, " +"``flwr-dir`` is equal to:" +msgstr "" + +#: ../../flwr install:1 +msgid "``$FLWR_HOME/`` if ``$FLWR_HOME`` is defined" +msgstr "" + +#: ../../flwr install:1 +msgid "``$XDG_DATA_HOME/.flwr/`` if ``$XDG_DATA_HOME`` is defined" +msgstr "" + +#: ../../flwr install:1 +msgid "``$HOME/.flwr/`` in all other cases" +msgstr "" + +#: ../../flwr install:1 +msgid "The desired install path." +msgstr "" + +#: ../../source/ref-api-cli.rst +#, fuzzy +msgid "Arguments" +msgstr "빌드 전달인자" + +#: ../../flwr install:1 log:1 new:1 run:1 +#, fuzzy +msgid "Optional argument" +msgstr "선택적 개선 사항" + +#: ../../flwr install:1 +msgid "The source FAB file to install." +msgstr "" + +#: ../../flwr log:1 +msgid "Get logs from a Flower project run." +msgstr "" + +#: ../../flwr log:1 +msgid "Flag to stream or print logs from the Flower run" +msgstr "" + +#: ../../flwr log run +msgid "default" +msgstr "" + +#: ../../flwr log:1 +#, fuzzy +msgid "``True``" +msgstr "``DISTRO``" + +#: ../../flwr log:1 +#, fuzzy +msgid "Required argument" +msgstr "빌드 전달인자" + +#: ../../flwr log:1 +#, fuzzy +msgid "The Flower run ID to query" +msgstr "Flower 커뮤니티 가입하기" + +#: ../../flwr log:1 +msgid "Path of the Flower project to run" +msgstr "" + +#: ../../flwr log:1 +msgid "Name of the federation to run the app on" +msgstr "" + +#: ../../flwr new:1 +#, fuzzy +msgid "Create new Flower App." +msgstr "Flower 서버를 실행하세요." + +#: ../../flwr new:1 +msgid "The ML framework to use" +msgstr "" + +#: ../../flwr new +#, fuzzy +msgid "options" +msgstr "해결법" + +#: ../../flwr new:1 +msgid "" +"PyTorch | TensorFlow | sklearn | HuggingFace | JAX | MLX | NumPy | " +"FlowerTune | Flower Baseline" +msgstr "" + +#: ../../flwr new:1 +msgid "The Flower username of the author" +msgstr "" + +#: ../../flwr new:1 +#, fuzzy +msgid "The name of the Flower App" +msgstr "Flower 기본 이미지의 태그." + +#: ../../flwr run:1 +#, fuzzy +msgid "Run Flower App." +msgstr "Flower 서버를 실행하세요." + +#: ../../flwr run:1 +msgid "Override configuration key-value pairs, should be of the format:" +msgstr "" + +#: ../../flwr run:1 +msgid "" +"`--run-config 'key1=\"value1\" key2=\"value2\"' --run-config " +"'key3=\"value3\"'`" +msgstr "" + +#: ../../flwr run:1 +msgid "" +"Note that `key1`, `key2`, and `key3` in this example need to exist inside" +" the `pyproject.toml` in order to be properly overriden." +msgstr "" + +#: ../../flwr run:1 +msgid "" +"Use `--stream` with `flwr run` to display logs; logs are not streamed by " +"default." +msgstr "" + +#: ../../flwr run:1 +#, fuzzy +msgid "``False``" +msgstr "``flwr/base``" + +#: ../../flwr run:1 +#, fuzzy +msgid "Path of the Flower App to run." +msgstr "Flower 기본 이미지의 태그." + +#: ../../flwr run:1 +msgid "Name of the federation to run the app on." +msgstr "" + +#: ../../source/ref-api-cli.rst:16 +msgid "flower-simulation" +msgstr "flower 시뮬레이션" + +#: ../../source/ref-api-cli.rst:26 +msgid "flower-superlink" +msgstr "flower 초연결" + +#: ../../source/ref-api-cli.rst:36 +#, fuzzy +msgid "flower-supernode" +msgstr "Flower SuperNode" + +#: ../../source/ref-api-cli.rst:46 +msgid "flower-server-app" +msgstr "flower 서버 프로그램" + +#: ../../source/ref-api-cli.rst:50 +msgid "" +"Note that since version ``1.11.0``, ``flower-server-app`` no longer " +"supports passing a reference to a `ServerApp` attribute. Instead, you " +"need to pass the path to Flower app via the argument ``--app``. This is " +"the path to a directory containing a `pyproject.toml`. You can create a " +"valid Flower app by executing ``flwr new`` and following the prompt." +msgstr "" + +#: ../../source/ref-api-cli.rst:64 +#, fuzzy +msgid "flower-superexec" +msgstr "flower 초연결" + +#: ../../source/ref-api/flwr.rst:2 +msgid "flwr" +msgstr "flwr" + +#: ../../source/ref-api/flwr.client.rst:43 ../../source/ref-api/flwr.rst:25 +#: ../../source/ref-api/flwr.server.rst:48 +msgid "Modules" +msgstr "Modules" + +#: ../../source/ref-api/flwr.rst:35::1 +#, fuzzy +msgid ":py:obj:`flwr.client `\\" +msgstr ":py:obj:`flwr.client `\\" + +#: ../../source/ref-api/flwr.rst:35::1 flwr.client:1 of +msgid "Flower client." +msgstr "Flower 클라이언트." + +#: ../../source/ref-api/flwr.rst:35::1 +#, fuzzy +msgid ":py:obj:`flwr.common `\\" +msgstr ":py:obj:`flwr.common `\\" + +#: ../../source/ref-api/flwr.rst:35::1 flwr.common:1 of +msgid "Common components shared between server and client." +msgstr "서버와 클라이언트 간에 공유되는 공통 구성 요소입니다." + +#: ../../source/ref-api/flwr.rst:35::1 +#, fuzzy +msgid ":py:obj:`flwr.server `\\" +msgstr ":py:obj:`flwr.server `\\" + +#: ../../source/ref-api/flwr.rst:35::1 +#: ../../source/ref-api/flwr.server.rst:37::1 flwr.server:1 +#: flwr.server.server.Server:1 of +msgid "Flower server." +msgstr "Flower 서버." + +#: ../../source/ref-api/flwr.rst:35::1 +#, fuzzy +msgid ":py:obj:`flwr.simulation `\\" +msgstr ":py:obj:`flwr.simulation `\\" + +#: ../../source/ref-api/flwr.rst:35::1 flwr.simulation:1 of +msgid "Flower simulation." +msgstr "Flower 시뮬레이션." + +#: ../../source/ref-api/flwr.client.rst:2 +msgid "client" +msgstr "클라이언트" + +#: ../../source/ref-api/flwr.client.mod.rst:13 +#: ../../source/ref-api/flwr.client.rst:13 +#: ../../source/ref-api/flwr.common.rst:13 +#: ../../source/ref-api/flwr.server.rst:13 +#: ../../source/ref-api/flwr.simulation.rst:13 +msgid "Functions" +msgstr "함수" + +#: ../../source/ref-api/flwr.client.rst:23::1 +msgid "" +":py:obj:`start_client `\\ \\(\\*\\, " +"server\\_address\\[\\, client\\_fn\\, ...\\]\\)" +msgstr "" +":py:obj:`start_client `\\ \\(\\*\\, " +"server\\_address\\[\\, client\\_fn\\, ...\\]\\)" + +#: ../../source/ref-api/flwr.client.rst:23::1 +#: flwr.client.app.start_client:1 of +msgid "Start a Flower client node which connects to a Flower server." +msgstr "Flower 서버에 연결되는 Flower 클라이언트 노드를 시작합니다." + +#: ../../source/ref-api/flwr.client.rst:23::1 +msgid "" +":py:obj:`start_numpy_client `\\ \\(\\*\\," +" server\\_address\\, client\\)" +msgstr "" +":py:obj:`start_numpy_client `\\ \\(\\*\\," +" server\\_address\\, client\\)" + +#: ../../source/ref-api/flwr.client.rst:23::1 +#: flwr.client.app.start_numpy_client:1 of +msgid "Start a Flower NumPyClient which connects to a gRPC server." +msgstr "gRPC 서버에 연결되는 Flower NumPyClient를 시작합니다." + +#: ../../source/ref-api/flwr.client.mod.rst:30 +#: ../../source/ref-api/flwr.client.rst:25 +#: ../../source/ref-api/flwr.common.rst:32 +#: ../../source/ref-api/flwr.server.rst:24 +#: ../../source/ref-api/flwr.server.strategy.rst:17 +#: ../../source/ref-api/flwr.server.workflow.rst:17 +msgid "Classes" +msgstr "클래스" + +#: ../../source/ref-api/flwr.client.rst:32::1 +msgid ":py:obj:`Client `\\ \\(\\)" +msgstr ":py:obj:`Client `\\ \\(\\)" + +#: ../../source/ref-api/flwr.client.rst:32::1 +#: flwr.client.client.Client:1 of +msgid "Abstract base class for Flower clients." +msgstr "Flower 클라이언트를 위한 추상 베이스 클래스입니다." + +#: ../../source/ref-api/flwr.client.rst:32::1 +msgid "" +":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, " +"mods\\]\\)" +msgstr "" +":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, " +"mods\\]\\)" + +#: ../../source/ref-api/flwr.client.rst:32::1 +#: flwr.client.client_app.ClientApp:1 of +msgid "Flower ClientApp." +msgstr "Flower ClientApp." + +#: ../../source/ref-api/flwr.client.rst:32::1 +msgid ":py:obj:`NumPyClient `\\ \\(\\)" +msgstr ":py:obj:`NumPyClient `\\ \\(\\)" + +#: ../../source/ref-api/flwr.client.rst:32::1 +#: flwr.client.numpy_client.NumPyClient:1 of +msgid "Abstract base class for Flower clients using NumPy." +msgstr "NumPy를 사용하는 Flower 클라이언트를 위한 추상 베이스 클래스입니다." + +#: ../../source/ref-api/flwr.client.rst:50::1 +#, fuzzy +msgid ":py:obj:`flwr.client.mod `\\" +msgstr ":py:obj:`flwr.client.mod `\\" + +#: ../../source/ref-api/flwr.client.rst:50::1 flwr.client.mod:1 of +msgid "Flower Built-in Mods." +msgstr "Flower 내장 모드." + +#: flwr.client.client.Client:1 flwr.client.numpy_client.NumPyClient:1 +#: flwr.server.client_manager.ClientManager:1 +#: flwr.server.driver.driver.Driver:1 flwr.server.strategy.strategy.Strategy:1 +#: of +msgid "Bases: :py:class:`~abc.ABC`" +msgstr "Bases: :py:class:`~abc.ABC`" + +#: ../../source/ref-api/flwr.client.Client.rst:15 +#: ../../source/ref-api/flwr.client.ClientApp.rst:15 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:15 +#: ../../source/ref-api/flwr.client.mod.LocalDpMod.rst:15 +#: ../../source/ref-api/flwr.common.Array.rst:15 +#: ../../source/ref-api/flwr.common.ClientMessage.rst:15 +#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:15 +#: ../../source/ref-api/flwr.common.Context.rst:15 +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:15 +#: ../../source/ref-api/flwr.common.Error.rst:15 +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:15 +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:15 +#: ../../source/ref-api/flwr.common.EventType.rst:15 +#: ../../source/ref-api/flwr.common.FitIns.rst:15 +#: ../../source/ref-api/flwr.common.FitRes.rst:15 +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:15 +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:15 +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:15 +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:15 +#: ../../source/ref-api/flwr.common.Message.rst:15 +#: ../../source/ref-api/flwr.common.MessageType.rst:15 +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:15 +#: ../../source/ref-api/flwr.common.Metadata.rst:15 +#: ../../source/ref-api/flwr.common.MetricsRecord.rst:15 +#: ../../source/ref-api/flwr.common.Parameters.rst:15 +#: ../../source/ref-api/flwr.common.ParametersRecord.rst:15 +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:15 +#: ../../source/ref-api/flwr.common.RecordSet.rst:15 +#: ../../source/ref-api/flwr.common.ServerMessage.rst:15 +#: ../../source/ref-api/flwr.common.Status.rst:15 +#: ../../source/ref-api/flwr.server.ClientManager.rst:15 +#: ../../source/ref-api/flwr.server.Driver.rst:15 +#: ../../source/ref-api/flwr.server.History.rst:15 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:15 +#: ../../source/ref-api/flwr.server.Server.rst:15 +#: ../../source/ref-api/flwr.server.ServerApp.rst:15 +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:15 +#: ../../source/ref-api/flwr.server.ServerConfig.rst:15 +#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:15 +#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:15 +#: ../../source/ref-api/flwr.server.strategy.Krum.rst:15 +#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:15 +#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:15 +#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:15 +#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:15 +msgid "Methods" +msgstr "메소드" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`evaluate `\\ \\(ins\\)" +msgstr ":py:obj:`evaluate `\\ \\(ins\\)" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.evaluate:1 +#: flwr.client.numpy_client.NumPyClient.evaluate:1 of +msgid "Evaluate the provided parameters using the locally held dataset." +msgstr "로컬로 보유한 데이터 세트를 사용하여 제공된 매개변수를 평가합니다." + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`fit `\\ \\(ins\\)" +msgstr ":py:obj:`fit `\\ \\(ins\\)" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: flwr.client.client.Client.fit:1 of +msgid "Refine the provided parameters using the locally held dataset." +msgstr "로컬로 보유한 데이터 세트를 사용하여 제공된 매개변수를 구체화합니다." + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`get_context `\\ \\(\\)" +msgstr ":py:obj:`get_context `\\ \\(\\)" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.get_context:1 +#: flwr.client.numpy_client.NumPyClient.get_context:1 of +msgid "Get the run context from this client." +msgstr "이 클라이언트에서 실행 컨텍스트를 가져옵니다." + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`get_parameters `\\ \\(ins\\)" +msgstr ":py:obj:`get_parameters `\\ \\(ins\\)" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.get_parameters:1 +#: flwr.client.numpy_client.NumPyClient.get_parameters:1 of +msgid "Return the current local model parameters." +msgstr "현재 로컬 모델 파라미터를 반환합니다." + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`get_properties `\\ \\(ins\\)" +msgstr ":py:obj:`get_properties `\\ \\(ins\\)" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: flwr.client.client.Client.get_properties:1 of +msgid "Return set of client's properties." +msgstr "클라이언트의 속성 집합을 반환합니다." + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`set_context `\\ \\(context\\)" +msgstr ":py:obj:`set_context `\\ \\(context\\)" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.set_context:1 +#: flwr.client.numpy_client.NumPyClient.set_context:1 of +msgid "Apply a run context to this client." +msgstr "이 클라이언트에 실행 컨텍스트를 적용합니다." + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`to_client `\\ \\(\\)" +msgstr ":py:obj:`to_client `\\ \\(\\)" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: flwr.client.client.Client.to_client:1 of +msgid "Return client (itself)." +msgstr "클라이언트(자체)를 반환합니다." + +#: ../../source/ref-api/flwr.client.Client.rst:46 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:46 +#: ../../source/ref-api/flwr.common.Array.rst:28 +#: ../../source/ref-api/flwr.common.ClientMessage.rst:25 +#: ../../source/ref-api/flwr.common.Code.rst:19 +#: ../../source/ref-api/flwr.common.Context.rst:25 +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:25 +#: ../../source/ref-api/flwr.common.Error.rst:25 +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:25 +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:25 +#: ../../source/ref-api/flwr.common.EventType.rst:165 +#: ../../source/ref-api/flwr.common.FitIns.rst:25 +#: ../../source/ref-api/flwr.common.FitRes.rst:25 +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:25 +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:25 +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:25 +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:25 +#: ../../source/ref-api/flwr.common.Message.rst:37 +#: ../../source/ref-api/flwr.common.MessageType.rst:25 +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:25 +#: ../../source/ref-api/flwr.common.Metadata.rst:25 +#: ../../source/ref-api/flwr.common.Parameters.rst:25 +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:25 +#: ../../source/ref-api/flwr.common.RecordSet.rst:25 +#: ../../source/ref-api/flwr.common.ServerMessage.rst:25 +#: ../../source/ref-api/flwr.common.Status.rst:25 +#: ../../source/ref-api/flwr.server.Driver.rst:40 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:25 +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:25 +#: ../../source/ref-api/flwr.server.ServerConfig.rst:25 +msgid "Attributes" +msgstr "속성" + +#: flwr.client.Client.context:1::1 of +msgid ":py:obj:`context `\\" +msgstr ":py:obj:`context `\\" + +#: flwr.client.Client.context:1 flwr.client.Client.context:1::1 +#: flwr.client.NumPyClient.context:1 +#: flwr.client.NumPyClient.context:1::1 of +msgid "Getter for `Context` client attribute." +msgstr "" + +#: ../../source/ref-api/flwr.common.Parameters.rst:2 +#: flwr.client.app.start_client flwr.client.app.start_numpy_client +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.mod.localdp_mod.LocalDpMod +#: flwr.client.numpy_client.NumPyClient.evaluate +#: flwr.client.numpy_client.NumPyClient.fit +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.context.Context flwr.common.message.Error +#: flwr.common.message.Message flwr.common.message.Message.create_error_reply +#: flwr.common.message.Message.create_reply flwr.common.message.Metadata +#: flwr.common.record.configsrecord.ConfigsRecord +#: flwr.common.record.metricsrecord.MetricsRecord +#: flwr.common.record.parametersrecord.Array +#: flwr.common.record.parametersrecord.ParametersRecord +#: flwr.common.record.recordset.RecordSet flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.ClientManager.unregister +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.unregister +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.serverapp_components.ServerAppComponents +#: flwr.server.strategy.bulyan.Bulyan +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.fedadagrad.FedAdagrad +#: flwr.server.strategy.fedadam.FedAdam flwr.server.strategy.fedavg.FedAvg +#: flwr.server.strategy.fedavg_android.FedAvgAndroid +#: flwr.server.strategy.fedavgm.FedAvgM flwr.server.strategy.fedopt.FedOpt +#: flwr.server.strategy.fedprox.FedProx +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg +#: flwr.server.strategy.fedyogi.FedYogi flwr.server.strategy.krum.Krum +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow +#: flwr.simulation.run_simulation.run_simulation of +msgid "Parameters" +msgstr "파라미터" + +#: flwr.client.client.Client.evaluate:3 of +msgid "" +"The evaluation instructions containing (global) model parameters received" +" from the server and a dictionary of configuration values used to " +"customize the local evaluation process." +msgstr "서버에서 받은 (전역) 모델 파라미터와 로컬 평가 프로세스를 사용자 지정하는 데 사용되는 구성 값 사전이 포함된 평가 지침입니다." + +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.numpy_client.NumPyClient.evaluate +#: flwr.client.numpy_client.NumPyClient.fit +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.message.Message.create_reply flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.num_available +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.SimpleClientManager.num_available +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters of +msgid "Returns" +msgstr "반환" + +#: flwr.client.client.Client.evaluate:8 of +msgid "" +"The evaluation result containing the loss on the local dataset and other " +"details such as the number of local data examples used for evaluation." +msgstr "로컬 데이터 세트의 손실 및 평가에 사용된 로컬 데이터 예제 수와 같은 기타 세부 정보가 포함된 평가 결과입니다." + +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.message.Message.create_reply flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.num_available +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.SimpleClientManager.num_available +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters of +msgid "Return type" +msgstr "반환 타입" + +#: flwr.client.client.Client.fit:3 of +msgid "" +"The training instructions containing (global) model parameters received " +"from the server and a dictionary of configuration values used to " +"customize the local training process." +msgstr "서버에서 받은 (전역) 모델 파라미터와 로컬 학습 프로세스를 사용자 지정하는 데 사용되는 구성 값 사전이 포함된 학습 지침입니다." + +#: flwr.client.client.Client.fit:8 of +msgid "" +"The training result containing updated parameters and other details such " +"as the number of local training examples used for training." +msgstr "업데이트된 매개변수와 훈련에 사용된 로컬 훈련 예제 수와 같은 기타 세부 정보가 포함된 훈련 결과입니다." + +#: flwr.client.client.Client.get_parameters:3 of +msgid "" +"The get parameters instructions received from the server containing a " +"dictionary of configuration values." +msgstr "구성 값 dictionary이 포함된 서버에서 받은 매개변수 가져오기 명령어입니다." + +#: flwr.client.client.Client.get_parameters:7 of +msgid "The current local model parameters." +msgstr "현재 로컬 모델 파라미터입니다." + +#: flwr.client.client.Client.get_properties:3 of +msgid "" +"The get properties instructions received from the server containing a " +"dictionary of configuration values." +msgstr "구성 값 dictionary이 포함된 서버로부터 받은 속성 가져오기 명령입니다." + +#: flwr.client.client.Client.get_properties:7 of +msgid "The current client properties." +msgstr "현재 클라이언트 속성입니다." + +#: ../../source/ref-api/flwr.client.ClientApp.rst:2 +msgid "ClientApp" +msgstr "클라이언트앱" + +#: flwr.client.client_app.ClientApp:1 flwr.client.mod.localdp_mod.LocalDpMod:1 +#: flwr.common.constant.MessageType:1 flwr.common.constant.MessageTypeLegacy:1 +#: flwr.common.context.Context:1 flwr.common.message.Error:1 +#: flwr.common.message.Message:1 flwr.common.message.Metadata:1 +#: flwr.common.record.parametersrecord.Array:1 +#: flwr.common.record.recordset.RecordSet:1 flwr.common.typing.ClientMessage:1 +#: flwr.common.typing.DisconnectRes:1 flwr.common.typing.EvaluateIns:1 +#: flwr.common.typing.EvaluateRes:1 flwr.common.typing.FitIns:1 +#: flwr.common.typing.FitRes:1 flwr.common.typing.GetParametersIns:1 +#: flwr.common.typing.GetParametersRes:1 flwr.common.typing.GetPropertiesIns:1 +#: flwr.common.typing.GetPropertiesRes:1 flwr.common.typing.Parameters:1 +#: flwr.common.typing.ReconnectIns:1 flwr.common.typing.ServerMessage:1 +#: flwr.common.typing.Status:1 flwr.server.history.History:1 +#: flwr.server.server.Server:1 flwr.server.server_app.ServerApp:1 +#: flwr.server.server_config.ServerConfig:1 +#: flwr.server.serverapp_components.ServerAppComponents:1 +#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 +#: of +msgid "Bases: :py:class:`object`" +msgstr "Bases: :py:class:`object`" + +#: flwr.client.app.start_client:41 flwr.client.app.start_numpy_client:36 +#: flwr.client.client_app.ClientApp:4 +#: flwr.client.client_app.ClientApp.evaluate:4 +#: flwr.client.client_app.ClientApp.query:4 +#: flwr.client.client_app.ClientApp.train:4 +#: flwr.client.mod.localdp_mod.LocalDpMod:22 +#: flwr.common.record.configsrecord.ConfigsRecord:20 +#: flwr.common.record.metricsrecord.MetricsRecord:19 +#: flwr.common.record.parametersrecord.ParametersRecord:22 +#: flwr.common.record.recordset.RecordSet:23 flwr.server.app.start_server:41 +#: flwr.server.server_app.ServerApp:4 flwr.server.server_app.ServerApp.main:4 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:29 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:22 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:21 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:14 +#: of +msgid "Examples" +msgstr "예시" + +#: flwr.client.client_app.ClientApp:5 of +msgid "" +"Assuming a typical `Client` implementation named `FlowerClient`, you can " +"wrap it in a `ClientApp` as follows:" +msgstr "" +"일반적인 `Client` 구현의 이름이 `FlowerClient`라고 가정하면, 다음과 같이 `ClientApp`으로 래핑할 수 " +"있습니다:" + +#: flwr.client.client_app.ClientApp:16 of +msgid "" +"If the above code is in a Python module called `client`, it can be " +"started as follows:" +msgstr "위의 코드가 'client'라는 Python 모듈에 있는 경우 다음과 같이 시작할 수 있습니다:" + +#: flwr.client.client_app.ClientApp:21 of +msgid "" +"In this `client:app` example, `client` refers to the Python module " +"`client.py` in which the previous code lives in and `app` refers to the " +"global attribute `app` that points to an object of type `ClientApp`." +msgstr "" +"이 `client:app` 예제에서 `client`는 이전 코드가 있는 Python 모듈 `client.py`를 가리키고 " +"`app`는 `ClientApp` 유형의 객체를 가리키는 전역 속성 `app`을 가리킵니다." + +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid ":py:obj:`evaluate `\\ \\(\\)" +msgstr ":py:obj:`evaluate `\\ \\(\\)" + +#: flwr.client.client_app.ClientApp.evaluate:1 +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid "Return a decorator that registers the evaluate fn with the client app." +msgstr "클라이언트 앱에 평가함수를 등록하는 데코레이터를 반환합니다." + +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid ":py:obj:`query `\\ \\(\\)" +msgstr ":py:obj:`query `\\ \\(\\)" + +#: flwr.client.client_app.ClientApp.evaluate:1::1 +#: flwr.client.client_app.ClientApp.query:1 of +msgid "Return a decorator that registers the query fn with the client app." +msgstr "클라이언트 앱에 query fn을 등록하는 데코레이터를 반환합니다." + +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid ":py:obj:`train `\\ \\(\\)" +msgstr ":py:obj:`train `\\ \\(\\)" + +#: flwr.client.client_app.ClientApp.evaluate:1::1 +#: flwr.client.client_app.ClientApp.train:1 of +msgid "Return a decorator that registers the train fn with the client app." +msgstr "클라이언트 앱에 train fn을 등록하는 데코레이터를 반환합니다." + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:2 +msgid "NumPyClient" +msgstr "NumPyClient" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid "" +":py:obj:`evaluate `\\ \\(parameters\\, " +"config\\)" +msgstr "" +":py:obj:`evaluate `\\ \\(parameters\\, " +"config\\)" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid ":py:obj:`fit `\\ \\(parameters\\, config\\)" +msgstr ":py:obj:`fit `\\ \\(parameters\\, config\\)" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.numpy_client.NumPyClient.fit:1 of +msgid "Train the provided parameters using the locally held dataset." +msgstr "로컬로 보유한 데이터 세트를 사용하여 제공된 파라미터를 학습합니다." + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid ":py:obj:`get_context `\\ \\(\\)" +msgstr ":py:obj:`get_context `\\ \\(\\)" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid "" +":py:obj:`get_parameters `\\ " +"\\(config\\)" +msgstr "" +":py:obj:`get_parameters `\\ " +"\\(config\\)" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid "" +":py:obj:`get_properties `\\ " +"\\(config\\)" +msgstr "" +":py:obj:`get_properties `\\ " +"\\(config\\)" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.numpy_client.NumPyClient.get_properties:1 of +msgid "Return a client's set of properties." +msgstr "클라이언트의 속성 집합을 반환합니다." + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid "" +":py:obj:`set_context `\\ " +"\\(context\\)" +msgstr "" +":py:obj:`set_context `\\ " +"\\(context\\)" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid ":py:obj:`to_client `\\ \\(\\)" +msgstr ":py:obj:`to_client `\\ \\(\\)" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.numpy_client.NumPyClient.to_client:1 of +msgid "Convert to object to Client type and return it." +msgstr "객체를 클라이언트 유형으로 변환하고 반환합니다." + +#: flwr.client.NumPyClient.context:1::1 of +msgid ":py:obj:`context `\\" +msgstr ":py:obj:`context `\\" + +#: flwr.client.numpy_client.NumPyClient.evaluate:3 +#: flwr.client.numpy_client.NumPyClient.fit:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:5 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:8 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:5 +#: flwr.server.strategy.strategy.Strategy.configure_fit:5 +#: flwr.server.strategy.strategy.Strategy.evaluate:8 of +msgid "The current (global) model parameters." +msgstr "현재(전역) 모델 매개변수입니다." + +#: flwr.client.numpy_client.NumPyClient.evaluate:5 of +msgid "" +"Configuration parameters which allow the server to influence evaluation " +"on the client. It can be used to communicate arbitrary values from the " +"server to the client, for example, to influence the number of examples " +"used for evaluation." +msgstr "" +"서버가 클라이언트의 평가에 영향을 줄 수 있는 구성 매개변수입니다. 예를 들어 평가에 사용되는 예제 수에 영향을 주기 위해 서버에서" +" 클라이언트로 임의의 값을 전달하는 데 사용할 수 있습니다." + +#: flwr.client.numpy_client.NumPyClient.evaluate:11 of +msgid "" +"* **loss** (*float*) -- The evaluation loss of the model on the local " +"dataset. * **num_examples** (*int*) -- The number of examples used for " +"evaluation. * **metrics** (*Dict[str, Scalar]*) -- A dictionary mapping " +"arbitrary string keys to values of type bool, bytes, float, int, or " +"str. It can be used to communicate arbitrary values back to the server." +msgstr "" +"* **loss** (*float*) - 로컬 데이터 세트에서 모델의 평가 손실입니다. * **num_examples** " +"(*int*) -- 평가에 사용된 예제 수입니다. * **metrics** (*Dict[str, Scalar]*) -- 임의의 " +"문자열 키를 부울, 바이트, float, int 또는 str 유형의 값에 매핑하는 dictionary입니다. 임의의 값을 서버에 " +"다시 전달하는 데 사용할 수 있습니다." + +#: flwr.client.numpy_client.NumPyClient.evaluate:11 of +msgid "" +"**loss** (*float*) -- The evaluation loss of the model on the local " +"dataset." +msgstr "**loss** (*float*) -- 로컬 데이터 세트에서 모델의 평가 손실입니다." + +#: flwr.client.numpy_client.NumPyClient.evaluate:12 of +msgid "**num_examples** (*int*) -- The number of examples used for evaluation." +msgstr "**num_examples** (*int*) - 평가에 사용된 예제 수입니다." + +#: flwr.client.numpy_client.NumPyClient.evaluate:13 +#: flwr.client.numpy_client.NumPyClient.fit:13 of +msgid "" +"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " +"string keys to values of type bool, bytes, float, int, or str. It can be " +"used to communicate arbitrary values back to the server." +msgstr "" +"**metrics** (*Dict[str, Scalar]*) - 임의의 문자열 키를 bool, bytes, float, int 또는" +" str 타입의 값에 매핑하는 dictionary입니다. 임의의 값을 서버에 다시 전달하는 데 사용할 수 있습니다." + +#: flwr.client.numpy_client.NumPyClient.evaluate:19 of +msgid "" +"The previous return type format (int, float, float) and the extended " +"format (int, float, float, Dict[str, Scalar]) have been deprecated and " +"removed since Flower 0.19." +msgstr "" +"이전 반환 유형 형식(int, float, float)과 확장 형식(int, float, float, Dict[str, " +"Scalar])은 Flower 0.19부터 더 이상 사용되지 않으며 제거되었습니다." + +#: flwr.client.numpy_client.NumPyClient.fit:5 of +msgid "" +"Configuration parameters which allow the server to influence training on " +"the client. It can be used to communicate arbitrary values from the " +"server to the client, for example, to set the number of (local) training " +"epochs." +msgstr "" +"서버가 클라이언트의 훈련에 영향을 줄 수 있는 구성 매개변수입니다. 예를 들어 (로컬) 트레이닝 에포크 수를 설정하는 등 서버에서 " +"클라이언트로 임의의 값을 전달하는 데 사용할 수 있습니다." + +#: flwr.client.numpy_client.NumPyClient.fit:11 of +msgid "" +"* **parameters** (*NDArrays*) -- The locally updated model parameters. * " +"**num_examples** (*int*) -- The number of examples used for training. * " +"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " +"string keys to values of type bool, bytes, float, int, or str. It can " +"be used to communicate arbitrary values back to the server." +msgstr "" +"* **parameters** (*NDArrays*) - 로컬로 업데이트된 모델 파라미터입니다. * **num_examples** " +"(*int*) -- 학습에 사용된 예제 수입니다. * **metrics** (*Dict[str, Scalar]*) - 임의의 문자열" +" 키를 bool, bytes, float, int,또는 str 타입의 값에 매핑하는 dictionary입니다. 임의의 값을 서버에 " +"다시 전달하는 데 사용할 수 있습니다." + +#: flwr.client.numpy_client.NumPyClient.fit:11 of +msgid "**parameters** (*NDArrays*) -- The locally updated model parameters." +msgstr "**parameters** (*NDArrays*) - 로컬로 업데이트된 모델 파라미터입니다." + +#: flwr.client.numpy_client.NumPyClient.fit:12 of +msgid "**num_examples** (*int*) -- The number of examples used for training." +msgstr "**num_examples** (*int*) - 트레이닝에 사용된 예제 수입니다." + +#: flwr.client.numpy_client.NumPyClient.get_parameters:3 of +msgid "" +"Configuration parameters requested by the server. This can be used to " +"tell the client which parameters are needed along with some Scalar " +"attributes." +msgstr "" +"서버에서 요청한 구성 매개변수입니다. 이는 일부 스칼라 속성과 함께 어떤 매개변수가 필요한지 클라이언트에게 알려주는 데 사용할 수 " +"있습니다." + +#: flwr.client.numpy_client.NumPyClient.get_parameters:8 of +msgid "**parameters** -- The local model parameters as a list of NumPy ndarrays." +msgstr "**parameters** -- 로컬 모델 파라미터를 NumPy 배열 목록으로 표시합니다." + +#: flwr.client.numpy_client.NumPyClient.get_properties:3 of +msgid "" +"Configuration parameters requested by the server. This can be used to " +"tell the client which properties are needed along with some Scalar " +"attributes." +msgstr "" +"서버에서 요청하는 구성 매개변수입니다. 이는 일부 스칼라 속성과 함께 어떤 속성이 필요한지 클라이언트에게 알려주는 데 사용할 수 " +"있습니다." + +#: flwr.client.numpy_client.NumPyClient.get_properties:8 of +msgid "" +"**properties** -- A dictionary mapping arbitrary string keys to values of" +" type bool, bytes, float, int, or str. It can be used to communicate " +"arbitrary property values back to the server." +msgstr "" +"**properties** -- 임의의 문자열 키를 bool, bytes, float, int 또는 str 타입의 값에 매핑하는 " +"dictionary입니다. 임의의 속성 값을 서버에 다시 전달하는 데 사용할 수 있습니다." + +#: ../../source/ref-api/flwr.client.mod.rst:2 +msgid "mod" +msgstr "mod" + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid "" +":py:obj:`adaptiveclipping_mod `\\ " +"\\(msg\\, ctxt\\, call\\_next\\)" +msgstr "" +":py:obj:`adaptiveclipping_mod `\\ " +"\\(msg\\, ctxt\\, call\\_next\\)" + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:1 of +msgid "Client-side adaptive clipping modifier." +msgstr "클라이언트 측 적응형 클리핑 수정자." + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid "" +":py:obj:`fixedclipping_mod `\\ " +"\\(msg\\, ctxt\\, call\\_next\\)" +msgstr "" +":py:obj:`fixedclipping_mod `\\ " +"\\(msg\\, ctxt\\, call\\_next\\)" + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:1 of +msgid "Client-side fixed clipping modifier." +msgstr "클라이언트 측 고정 클리핑 수정자." + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid ":py:obj:`make_ffn `\\ \\(ffn\\, mods\\)" +msgstr ":py:obj:`make_ffn `\\ \\(ffn\\, mods\\)" + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.utils.make_ffn:1 of +msgid "." +msgstr "." + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid "" +":py:obj:`message_size_mod `\\ \\(msg\\," +" ctxt\\, call\\_next\\)" +msgstr "" +":py:obj:`message_size_mod `\\ \\(msg\\," +" ctxt\\, call\\_next\\)" + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.comms_mods.message_size_mod:1 of +msgid "Message size mod." +msgstr "메시지 크기 수정." + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid "" +":py:obj:`parameters_size_mod `\\ " +"\\(msg\\, ctxt\\, call\\_next\\)" +msgstr "" +":py:obj:`parameters_size_mod `\\ " +"\\(msg\\, ctxt\\, call\\_next\\)" + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.comms_mods.parameters_size_mod:1 of +msgid "Parameters size mod." +msgstr "매개변수 크기 mod." + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid "" +":py:obj:`secagg_mod `\\ \\(msg\\, ctxt\\, " +"call\\_next\\)" +msgstr "" +":py:obj:`secagg_mod `\\ \\(msg\\, ctxt\\, " +"call\\_next\\)" + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.secure_aggregation.secagg_mod.secagg_mod:1 of +msgid "Handle incoming message and return results, following the SecAgg protocol." +msgstr "SecAgg 프로토콜에 따라 수신 메시지를 처리하고 결과를 반환합니다." + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid "" +":py:obj:`secaggplus_mod `\\ \\(msg\\, " +"ctxt\\, call\\_next\\)" +msgstr "" +":py:obj:`secaggplus_mod `\\ \\(msg\\, " +"ctxt\\, call\\_next\\)" + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.secure_aggregation.secaggplus_mod.secaggplus_mod:1 of +msgid "" +"Handle incoming message and return results, following the SecAgg+ " +"protocol." +msgstr "SecAgg+ 프로토콜에 따라 수신 메시지를 처리하고 결과를 반환합니다." + +#: ../../source/ref-api/flwr.client.mod.rst:35::1 +msgid "" +":py:obj:`LocalDpMod `\\ \\(clipping\\_norm\\," +" sensitivity\\, ...\\)" +msgstr "" +":py:obj:`LocalDpMod `\\ \\(clipping\\_norm\\," +" sensitivity\\, ...\\)" + +#: ../../source/ref-api/flwr.client.mod.rst:35::1 +#: flwr.client.mod.localdp_mod.LocalDpMod:1 of +msgid "Modifier for local differential privacy." +msgstr "로컬 차분 프라이버시를 위한 수정자." + +#: ../../source/ref-api/flwr.client.mod.LocalDpMod.rst:2 +msgid "LocalDpMod" +msgstr "LocalDpMod" + +#: flwr.client.mod.localdp_mod.LocalDpMod:3 of +msgid "" +"This mod clips the client model updates and adds noise to the params " +"before sending them to the server." +msgstr "이 모드는 클라이언트 모델 업데이트를 클립하고 서버로 보내기 전에 파라미터에 노이즈를 추가합니다." + +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:12 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:10 +#: flwr.client.mod.localdp_mod.LocalDpMod:6 of +msgid "It operates on messages of type `MessageType.TRAIN`." +msgstr "이 함수는 `MessageType.TRAIN` 유형의 메시지에 대해 작동합니다." + +#: flwr.client.mod.localdp_mod.LocalDpMod:8 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:15 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:8 +#: of +msgid "The value of the clipping norm." +msgstr "클리핑 기준값입니다." + +#: flwr.client.mod.localdp_mod.LocalDpMod:10 of +msgid "The sensitivity of the client model." +msgstr "클라이언트 모델의 민감도입니다." + +#: flwr.client.mod.localdp_mod.LocalDpMod:12 of +msgid "" +"The privacy budget. Smaller value of epsilon indicates a higher level of " +"privacy protection." +msgstr "개인정보 보호 예산. 엡실론 값이 작을수록 개인정보 보호 수준이 높음을 나타냅니다." + +#: flwr.client.mod.localdp_mod.LocalDpMod:15 of +msgid "" +"The failure probability. The probability that the privacy mechanism fails" +" to provide the desired level of privacy. A smaller value of delta " +"indicates a stricter privacy guarantee." +msgstr "" +"실패 확률입니다. 프라이버시 메커니즘이 원하는 수준의 프라이버시를 제공하지 못할 확률입니다. 델타 값이 작을수록 프라이버시가 더 " +"엄격하게 보장된다는 의미입니다." + +#: flwr.client.mod.localdp_mod.LocalDpMod:23 of +msgid "Create an instance of the local DP mod and add it to the client-side mods:" +msgstr "로컬 DP 모드의 인스턴스를 생성하고 클라이언트 측 모드에 추가합니다:" + +#: ../../source/ref-api/flwr.client.mod.adaptiveclipping_mod.rst:2 +msgid "adaptiveclipping\\_mod" +msgstr "adaptiveclipping\\_mod" + +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:3 of +msgid "" +"This mod needs to be used with the " +"DifferentialPrivacyClientSideAdaptiveClipping server-side strategy " +"wrapper." +msgstr "이 모드는 서버 측 전략 래퍼인 차분 프라이버시 클라이언트 측 적응형 클리핑과 함께 사용해야 합니다." + +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:6 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:6 of +msgid "The wrapper sends the clipping_norm value to the client." +msgstr "래퍼는 클라이언트에 clipping_norm 값을 전송합니다." + +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:8 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:8 of +msgid "This mod clips the client model updates before sending them to the server." +msgstr "이 모드는 클라이언트 모델 업데이트를 서버로 보내기 전에 클립합니다." + +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:10 of +msgid "" +"It also sends KEY_NORM_BIT to the server for computing the new clipping " +"value." +msgstr "또한 새 클리핑 값을 계산하기 위해 서버로 KEY_NORM_BIT을 전송합니다." + +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:15 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:13 +#: flwr.server.driver.driver.Driver.send_and_receive:18 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:54 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:61 +#: of +msgid "Notes" +msgstr "참고" + +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:16 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:14 of +msgid "Consider the order of mods when using multiple." +msgstr "여러 개를 사용할 때는 모드의 순서를 고려하세요." + +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:18 of +msgid "Typically, adaptiveclipping_mod should be the last to operate on params." +msgstr "일반적으로 adaptiveclipping_mod는 매개변수에서 가장 마지막으로 작동해야 합니다." + +#: ../../source/ref-api/flwr.client.mod.fixedclipping_mod.rst:2 +msgid "fixedclipping\\_mod" +msgstr "fixedclipping\\_mod" + +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:3 of +msgid "" +"This mod needs to be used with the " +"DifferentialPrivacyClientSideFixedClipping server-side strategy wrapper." +msgstr "이 모드는 서버 측 전략 래퍼인 DifferentialPrivacyClientSideFixedClipping과 함께 사용해야 합니다." + +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:16 of +msgid "Typically, fixedclipping_mod should be the last to operate on params." +msgstr "일반적으로 fixedclipping_mod는 매개변수에서 가장 마지막으로 작동해야 합니다." + +#: ../../source/ref-api/flwr.client.mod.make_ffn.rst:2 +msgid "make\\_ffn" +msgstr "make\\_ffn" + +#: ../../source/ref-api/flwr.client.mod.message_size_mod.rst:2 +msgid "message\\_size\\_mod" +msgstr "message\\_size\\_mod" + +#: flwr.client.mod.comms_mods.message_size_mod:3 of +msgid "This mod logs the size in bytes of the message being transmited." +msgstr "이 모드는 전송되는 메시지의 크기를 바이트 단위로 기록합니다." + +#: ../../source/ref-api/flwr.client.mod.parameters_size_mod.rst:2 +msgid "parameters\\_size\\_mod" +msgstr "parameters\\_size\\_mod" + +#: flwr.client.mod.comms_mods.parameters_size_mod:3 of +msgid "" +"This mod logs the number of parameters transmitted in the message as well" +" as their size in bytes." +msgstr "이 모드는 메시지에서 전송된 매개변수의 수와 그 크기를 바이트 단위로 기록합니다." + +#: ../../source/ref-api/flwr.client.mod.secagg_mod.rst:2 +msgid "secagg\\_mod" +msgstr "secagg\\_mod" + +#: ../../source/ref-api/flwr.client.mod.secaggplus_mod.rst:2 +msgid "secaggplus\\_mod" +msgstr "secaggplus\\_mod" + +#: ../../source/ref-api/flwr.client.start_client.rst:2 +msgid "start\\_client" +msgstr "start\\_client" + +#: flwr.client.app.start_client:3 flwr.client.app.start_numpy_client:9 of +msgid "" +"The IPv4 or IPv6 address of the server. If the Flower server runs on the " +"same machine on port 8080, then `server_address` would be " +"`\"[::]:8080\"`." +msgstr "" +"서버의 IPv4 또는 IPv6 주소입니다. Flower 서버가 포트 8080의 동일한 컴퓨터에서 실행되는 경우 `서버_주소`는 " +"`\"[::]:8080\"`이 됩니다." + +#: flwr.client.app.start_client:7 of +msgid "A callable that instantiates a Client. (default: None)" +msgstr "클라이언트를 인스턴스화하는 호출 가능 항목입니다. (기본값: None)" + +#: flwr.client.app.start_client:9 of +msgid "" +"An implementation of the abstract base class `flwr.client.Client` " +"(default: None)" +msgstr "추상 베이스 클래스 `flwr.client.Client`의 구현(기본값: None)" + +#: flwr.client.app.start_client:12 flwr.client.app.start_numpy_client:15 of +msgid "" +"The maximum length of gRPC messages that can be exchanged with the Flower" +" server. The default should be sufficient for most models. Users who " +"train very large models might need to increase this value. Note that the " +"Flower server needs to be started with the same value (see " +"`flwr.server.start_server`), otherwise it will not know about the " +"increased limit and block larger messages." +msgstr "" +"Flower 서버와 교환할 수 있는 gRPC 메시지의 최대 길이입니다. 기본값은 대부분의 모델에 충분합니다. 매우 큰 모델을 " +"훈련하는 사용자는 이 값을 늘려야 할 수도 있습니다. Flower 서버는 동일한 값으로 시작해야 " +"하며(`flwr.server.start_server` 참조), 그렇지 않으면 증가된 제한을 알지 못해 더 큰 메시지를 차단합니다." + +#: flwr.client.app.start_client:19 flwr.client.app.start_numpy_client:22 of +msgid "" +"The PEM-encoded root certificates as a byte string or a path string. If " +"provided, a secure connection using the certificates will be established " +"to an SSL-enabled Flower server." +msgstr "" +"바이트 문자열 또는 경로 문자열로 PEM 인코딩된 루트 인증서. 제공하면 인증서를 사용하여 SSL이 활성화된 Flower 서버에 " +"보안 연결이 설정됩니다." + +#: flwr.client.app.start_client:23 flwr.client.app.start_numpy_client:26 of +msgid "" +"Starts an insecure gRPC connection when True. Enables HTTPS connection " +"when False, using system certificates if `root_certificates` is None." +msgstr "" +"True일 경우 안전하지 않은 gRPC 연결을 시작합니다. root_certificates`가 None인 경우 시스템 인증서를 " +"사용하여 False일 때 HTTPS 연결을 활성화합니다." + +#: flwr.client.app.start_client:26 flwr.client.app.start_numpy_client:29 of +msgid "" +"Configure the transport layer. Allowed values: - 'grpc-bidi': gRPC, " +"bidirectional streaming - 'grpc-rere': gRPC, request-response " +"(experimental) - 'rest': HTTP (experimental)" +msgstr "" +"전송 계층을 구성합니다. 허용되는 값입니다: - 'grpc-bidi': gRPC, 양방향 스트리밍 - 'grpc-rere': " +"gRPC, 요청-응답(실험적) - 'rest': HTTP(실험적)" + +#: flwr.client.app.start_client:31 of +msgid "" +"The maximum number of times the client will try to connect to the server " +"before giving up in case of a connection error. If set to None, there is " +"no limit to the number of tries." +msgstr "" +"연결 오류 발생 시 클라이언트가 서버 연결을 포기하기 전에 시도하는 최대 횟수입니다. None으로 설정하면 시도 횟수에 제한이 " +"없습니다." + +#: flwr.client.app.start_client:35 of +msgid "" +"The maximum duration before the client stops trying to connect to the " +"server in case of connection error. If set to None, there is no limit to " +"the total time." +msgstr "" +"연결 오류 발생 시 클라이언트가 서버에 대한 연결을 시도하지 않는 최대 기간입니다. None으로 설정하면 총 시간에는 제한이 " +"없습니다." + +#: flwr.client.app.start_client:42 flwr.client.app.start_numpy_client:37 of +msgid "Starting a gRPC client with an insecure server connection:" +msgstr "안전하지 않은 서버 연결로 gRPC 클라이언트 시작하기:" + +#: flwr.client.app.start_client:49 flwr.client.app.start_numpy_client:44 of +msgid "Starting an SSL-enabled gRPC client using system certificates:" +msgstr "시스템 인증서를 사용하여 SSL 사용 gRPC 클라이언트를 시작합니다:" + +#: flwr.client.app.start_client:60 flwr.client.app.start_numpy_client:52 of +msgid "Starting an SSL-enabled gRPC client using provided certificates:" +msgstr "제공된 인증서를 사용하여 SSL 지원 gRPC 클라이언트를 시작합니다:" + +#: ../../source/ref-api/flwr.client.start_numpy_client.rst:2 +msgid "start\\_numpy\\_client" +msgstr "start\\_numpy\\_client" + +#: flwr.client.app.start_numpy_client:5 of +msgid "" +"This function is deprecated since 1.7.0. Use " +":code:`flwr.client.start_client` instead and first convert your " +":code:`NumPyClient` to type :code:`flwr.client.Client` by executing its " +":code:`to_client()` method." +msgstr "" +"이 함수는 1.7.0부터 더 이상 사용되지 않습니다. 대신 :code:`flwr.client.start_client`를 사용하고 " +"먼저 :code:`to_client()` 메서드를 실행하여 :code:`NumPyClient`를 " +":code:`flwr.client.Client` 유형으로 변환합니다." + +#: flwr.client.app.start_numpy_client:13 of +msgid "An implementation of the abstract base class `flwr.client.NumPyClient`." +msgstr "추상 베이스 클래스 `flwr.client.NumPyClient`의 구현입니다." + +#: ../../source/ref-api/flwr.common.rst:2 +msgid "common" +msgstr "공통" + +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid ":py:obj:`array_from_numpy `\\ \\(ndarray\\)" +msgstr ":py:obj:`array_from_numpy `\\ \\(ndarray\\)" + +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.record.conversion_utils.array_from_numpy:1 of +msgid "Create Array from NumPy ndarray." +msgstr "NumPy에서 배열을 만듭니다." + +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid ":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" +msgstr ":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" + +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.bytes_to_ndarray:1 of +msgid "Deserialize NumPy ndarray from bytes." +msgstr "바이트에서 NumPy를 역직렬화합니다." + +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid "" +":py:obj:`configure `\\ \\(identifier\\[\\, " +"filename\\, host\\]\\)" +msgstr "" +":py:obj:`configure `\\ \\(identifier\\[\\, " +"filename\\, host\\]\\)" + +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.logger.configure:1 of +msgid "Configure logging to file and/or remote log server." +msgstr "파일 및/또는 원격 로그 서버에 로깅을 구성합니다." + +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid "" +":py:obj:`event `\\ \\(event\\_type\\[\\, " +"event\\_details\\]\\)" +msgstr "" +":py:obj:`event `\\ \\(event\\_type\\[\\, " +"event\\_details\\]\\)" + +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.telemetry.event:1 of +msgid "Submit create_event to ThreadPoolExecutor to avoid blocking." +msgstr "차단을 피하기 위해 create_event를 ThreadPoolExecutor에 제출합니다." + +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid "" +":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " +"\\*\\*kwargs\\)" +msgstr "" +":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " +"\\*\\*kwargs\\)" + +#: ../../source/ref-api/flwr.common.rst:30::1 logging.Logger.log:1 +#: of +msgid "Log 'msg % args' with the integer severity 'level'." +msgstr "정수 심각도 'level'과 함께 'msg % args'를 기록합니다." + +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid ":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" +msgstr ":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" + +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.ndarray_to_bytes:1 of +msgid "Serialize NumPy ndarray to bytes." +msgstr "NumPy와 배열을 바이트열로 직렬화합니다." + +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid "" +":py:obj:`ndarrays_to_parameters `\\ " +"\\(ndarrays\\)" +msgstr "" +":py:obj:`ndarrays_to_parameters `\\ " +"\\(ndarrays\\)" + +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.ndarrays_to_parameters:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarrays_to_parameters:1 +#: of +msgid "Convert NumPy ndarrays to parameters object." +msgstr "NumPy 배열을 매개변수 객체로 변환합니다." + +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid ":py:obj:`now `\\ \\(\\)" +msgstr ":py:obj:`now `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.date.now:1 of +msgid "Construct a datetime from time.time() with time zone set to UTC." +msgstr "표준 시간대를 UTC로 설정하여 time.time()에서 날짜 시간을 생성합니다." + +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid "" +":py:obj:`parameters_to_ndarrays `\\ " +"\\(parameters\\)" +msgstr "" +":py:obj:`parameters_to_ndarrays `\\ " +"\\(parameters\\)" + +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.parameters_to_ndarrays:1 of +msgid "Convert parameters object to NumPy ndarrays." +msgstr "매개변수 객체를 NumPy 배열로 변환합니다." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, " +"data\\)" +msgstr "" +":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, " +"data\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.parametersrecord.Array:1 of +msgid "Array type." +msgstr "배열 유형." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`ClientMessage `\\ " +"\\(\\[get\\_properties\\_res\\, ...\\]\\)" +msgstr "" +":py:obj:`ClientMessage `\\ " +"\\(\\[get\\_properties\\_res\\, ...\\]\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.ClientMessage:1 of +msgid "ClientMessage is a container used to hold one result message." +msgstr "ClientMessage는 하나의 결과 메시지를 저장하는 데 사용되는 컨테이너입니다." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`Code `\\ \\(value\\)" +msgstr ":py:obj:`Code `\\ \\(value\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.Code:1 of +msgid "Client status codes." +msgstr "클라이언트 상태 코드." + +#: ../../source/ref-api/flwr.common.rst:68::1 +#, fuzzy +msgid ":py:obj:`Config `\\" +msgstr ":py:obj:`config `\\" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#, fuzzy +msgid "" +"alias of :py:class:`dict`\\ [:py:class:`str`, :py:class:`bool` | " +":py:class:`bytes` | :py:class:`float` | :py:class:`int` | " +":py:class:`str`]" +msgstr "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`int` | :py:class:`float` | " +":py:class:`~typing.List`\\ [:py:class:`int`] | :py:class:`~typing.List`\\" +" [:py:class:`float`]]" + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`ConfigsRecord `\\ " +"\\(\\[configs\\_dict\\, keep\\_input\\]\\)" +msgstr "" +":py:obj:`ConfigsRecord `\\ " +"\\(\\[configs\\_dict\\, keep\\_input\\]\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.configsrecord.ConfigsRecord:1 of +msgid "Configs record." +msgstr "레코드를 설정합니다." + +#: ../../source/ref-api/flwr.common.rst:68::1 +#, fuzzy +msgid "" +":py:obj:`Context `\\ \\(node\\_id\\, " +"node\\_config\\, state\\, run\\_config\\)" +msgstr ":py:obj:`Context `\\ \\(state\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.context.Context:1 of +#, fuzzy +msgid "Context of your run." +msgstr "실행 상태." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`DisconnectRes `\\ \\(reason\\)" +msgstr ":py:obj:`DisconnectRes `\\ \\(reason\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.DisconnectRes:1 of +msgid "DisconnectRes message from client to server." +msgstr "클라이언트에서 서버로 연결 해제 메시지를 보냅니다." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`Error `\\ \\(code\\[\\, reason\\]\\)" +msgstr ":py:obj:`Error `\\ \\(code\\[\\, reason\\]\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.message.Error:1 of +msgid "A dataclass that stores information about an error that occurred." +msgstr "발생한 오류에 대한 정보를 저장하는 데이터 클래스입니다." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`EvaluateIns `\\ \\(parameters\\, " +"config\\)" +msgstr "" +":py:obj:`EvaluateIns `\\ \\(parameters\\, " +"config\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.EvaluateIns:1 of +msgid "Evaluate instructions for a client." +msgstr "클라이언트에 대한 지침을 평가합니다." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`EvaluateRes `\\ \\(status\\, loss\\, " +"num\\_examples\\, metrics\\)" +msgstr "" +":py:obj:`EvaluateRes `\\ \\(status\\, loss\\, " +"num\\_examples\\, metrics\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.EvaluateRes:1 of +msgid "Evaluate response from a client." +msgstr "클라이언트의 응답을 평가합니다." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`EventType `\\ \\(value\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.telemetry.EventType:1 of +msgid "Types of telemetry events." +msgstr "원격 분석 이벤트의 유형." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`FitIns `\\ \\(parameters\\, config\\)" +msgstr ":py:obj:`FitIns `\\ \\(parameters\\, config\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.FitIns:1 of +msgid "Fit instructions for a client." +msgstr "고객을 위한 맞춤 지침." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`FitRes `\\ \\(status\\, parameters\\, " +"num\\_examples\\, metrics\\)" +msgstr "" +":py:obj:`FitRes `\\ \\(status\\, parameters\\, " +"num\\_examples\\, metrics\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.FitRes:1 of +msgid "Fit response from a client." +msgstr "클라이언트의 적합성 응답." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`GetParametersIns `\\ \\(config\\)" +msgstr ":py:obj:`GetParametersIns `\\ \\(config\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetParametersIns:1 of +msgid "Parameters request for a client." +msgstr "클라이언트에 대한 매개변수 요청입니다." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`GetParametersRes `\\ \\(status\\, " +"parameters\\)" +msgstr "" +":py:obj:`GetParametersRes `\\ \\(status\\, " +"parameters\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetParametersRes:1 of +msgid "Response when asked to return parameters." +msgstr "매개변수 반환 요청 시 응답합니다." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`GetPropertiesIns `\\ \\(config\\)" +msgstr ":py:obj:`GetPropertiesIns `\\ \\(config\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetPropertiesIns:1 of +msgid "Properties request for a client." +msgstr "클라이언트에 대한 속성 요청." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`GetPropertiesRes `\\ \\(status\\, " +"properties\\)" +msgstr "" +":py:obj:`GetPropertiesRes `\\ \\(status\\, " +"properties\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetPropertiesRes:1 of +msgid "Properties response from a client." +msgstr "클라이언트의 속성 응답을 확인합니다." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " +"error\\]\\)" +msgstr "" +":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " +"error\\]\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.message.Message:1 of +msgid "State of your application from the viewpoint of the entity using it." +msgstr "애플리케이션을 사용하는 엔티티의 관점에서 애플리케이션의 상태입니다." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`MessageType `\\ \\(\\)" +msgstr ":py:obj:`MessageType `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.constant.MessageType:1 of +msgid "Message type." +msgstr "메시지 타입." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`MessageTypeLegacy `\\ \\(\\)" +msgstr ":py:obj:`MessageTypeLegacy `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.constant.MessageTypeLegacy:1 of +msgid "Legacy message type." +msgstr "레거시 메시지 타입." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`Metadata `\\ \\(run\\_id\\, " +"message\\_id\\, src\\_node\\_id\\, ...\\)" +msgstr "" +":py:obj:`Metadata `\\ \\(run\\_id\\, " +"message\\_id\\, src\\_node\\_id\\, ...\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.message.Metadata:1 of +msgid "A dataclass holding metadata associated with the current message." +msgstr "현재 메시지와 관련된 메타데이터를 보유한 데이터 클래스입니다." + +#: ../../source/ref-api/flwr.common.rst:68::1 +#, fuzzy +msgid ":py:obj:`Metrics `\\" +msgstr ":py:obj:`metrics `\\" + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`MetricsRecord `\\ " +"\\(\\[metrics\\_dict\\, keep\\_input\\]\\)" +msgstr "" +":py:obj:`MetricsRecord `\\ " +"\\(\\[metrics\\_dict\\, keep\\_input\\]\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.metricsrecord.MetricsRecord:1 of +#, fuzzy +msgid "Metrics recod." +msgstr "메트릭 기록." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`NDArray `\\" +msgstr ":py:obj:`NDArray `\\" + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " +":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" +msgstr "" +"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " +":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#, fuzzy +msgid ":py:obj:`NDArrays `\\" +msgstr ":py:obj:`NDArray `\\" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#, fuzzy +msgid "" +"alias of :py:class:`list`\\ [:py:class:`~numpy.ndarray`\\ " +"[:py:obj:`~typing.Any`, :py:class:`~numpy.dtype`\\ " +"[:py:obj:`~typing.Any`]]]" +msgstr "" +"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " +":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`Parameters `\\ \\(tensors\\, " +"tensor\\_type\\)" +msgstr "" +":py:obj:`Parameters `\\ \\(tensors\\, " +"tensor\\_type\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.Parameters:1 of +msgid "Model parameters." +msgstr "모델 매개변수." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`ParametersRecord `\\ " +"\\(\\[array\\_dict\\, keep\\_input\\]\\)" +msgstr "" +":py:obj:`ParametersRecord `\\ " +"\\(\\[array\\_dict\\, keep\\_input\\]\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.parametersrecord.ParametersRecord:1 of +msgid "Parameters record." +msgstr "매개변수 기록." + +#: ../../source/ref-api/flwr.common.rst:68::1 +#, fuzzy +msgid ":py:obj:`Properties `\\" +msgstr ":py:obj:`properties `\\" + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`ReconnectIns `\\ \\(seconds\\)" +msgstr ":py:obj:`ReconnectIns `\\ \\(seconds\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.ReconnectIns:1 of +msgid "ReconnectIns message from server to client." +msgstr "서버에서 클라이언트로 메시지를 다시 연결합니다." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`RecordSet `\\ " +"\\(\\[parameters\\_records\\, ...\\]\\)" +msgstr "" +":py:obj:`RecordSet `\\ " +"\\(\\[parameters\\_records\\, ...\\]\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.recordset.RecordSet:1 of +msgid "RecordSet stores groups of parameters, metrics and configs." +msgstr "RecordSet은 매개변수, 메트릭 및 설정 그룹을 저장합니다." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`ServerMessage `\\ " +"\\(\\[get\\_properties\\_ins\\, ...\\]\\)" +msgstr "" +":py:obj:`ServerMessage `\\ " +"\\(\\[get\\_properties\\_ins\\, ...\\]\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.ServerMessage:1 of +msgid "ServerMessage is a container used to hold one instruction message." +msgstr "ServerMessage는 하나의 instruction 메시지를 저장하는 데 사용되는 컨테이너입니다." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`Status `\\ \\(code\\, message\\)" +msgstr ":py:obj:`Status `\\ \\(code\\, message\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.Status:1 of +msgid "Client status." +msgstr "클라이언트 상태." + +#: ../../source/ref-api/flwr.common.Array.rst:2 +msgid "Array" +msgstr "배열" + +#: flwr.common.record.parametersrecord.Array:3 of +msgid "" +"A dataclass containing serialized data from an array-like or tensor-like " +"object along with some metadata about it." +msgstr "배열형 또는 텐서형 객체의 직렬화된 데이터와 그에 대한 일부 메타데이터를 포함하는 데이터 클래스입니다." + +#: flwr.common.record.parametersrecord.Array:6 of +msgid "" +"A string representing the data type of the serialised object (e.g. " +"`np.float32`)" +msgstr "직렬화된 객체의 데이터 유형을 나타내는 문자열(예: `np.float32`)" + +#: flwr.common.record.parametersrecord.Array:8 of +msgid "" +"A list representing the shape of the unserialized array-like object. This" +" is used to deserialize the data (depending on the serialization method) " +"or simply as a metadata field." +msgstr "" +"직렬화되지 않은 배열과 같은 객체의 모양을 나타내는 목록입니다. 직렬화 방법에 따라 데이터를 역직렬화하는 데 사용되거나 단순히 " +"메타데이터 필드로 사용됩니다." + +#: flwr.common.record.parametersrecord.Array:12 of +msgid "" +"A string indicating the type of serialisation mechanism used to generate " +"the bytes in `data` from an array-like or tensor-like object." +msgstr "배열형 또는 텐서형 객체에서 `데이터`의 바이트를 생성하는 데 사용되는 직렬화 메커니즘의 유형을 나타내는 문자열입니다." + +#: flwr.common.record.parametersrecord.Array:15 of +msgid "A buffer of bytes containing the data." +msgstr "데이터를 포함하는 바이트 버퍼입니다." + +#: ../../source/ref-api/flwr.common.Array.rst:26::1 +msgid ":py:obj:`numpy `\\ \\(\\)" +msgstr ":py:obj:`numpy `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.Array.rst:26::1 +#: flwr.common.record.parametersrecord.Array.numpy:1 of +msgid "Return the array as a NumPy array." +msgstr "배열을 NumPy 배열로 반환합니다." + +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`dtype `\\" +msgstr ":py:obj:`dtype `\\" + +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`shape `\\" +msgstr ":py:obj:`shape `\\" + +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`stype `\\" +msgstr ":py:obj:`stype `\\" + +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`data `\\" +msgstr ":py:obj:`data `\\" + +#: ../../source/ref-api/flwr.common.ClientMessage.rst:2 +msgid "ClientMessage" +msgstr "클라이언트 메시지" + +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +msgid ":py:obj:`evaluate_res `\\" +msgstr ":py:obj:`evaluate_res `\\" + +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +msgid ":py:obj:`fit_res `\\" +msgstr ":py:obj:`fit_res `\\" + +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +msgid "" +":py:obj:`get_parameters_res " +"`\\" +msgstr "" +":py:obj:`get_parameters_res " +"`\\" + +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +msgid "" +":py:obj:`get_properties_res " +"`\\" +msgstr "" +":py:obj:`get_properties_res " +"`\\" + +#: ../../source/ref-api/flwr.common.Code.rst:2 +msgid "Code" +msgstr "코드" + +#: flwr.common.typing.Code:1 of +msgid "Bases: :py:class:`~enum.Enum`" +msgstr "Bases: :py:class:`~enum.Enum`" + +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid ":py:obj:`OK `\\" +msgstr ":py:obj:`OK `\\" + +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid "" +":py:obj:`GET_PROPERTIES_NOT_IMPLEMENTED " +"`\\" +msgstr "" +":py:obj:`GET_PROPERTIES_NOT_IMPLEMENTED " +"`\\" + +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid "" +":py:obj:`GET_PARAMETERS_NOT_IMPLEMENTED " +"`\\" +msgstr "" +":py:obj:`GET_PARAMETERS_NOT_IMPLEMENTED " +"`\\" + +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid ":py:obj:`FIT_NOT_IMPLEMENTED `\\" +msgstr ":py:obj:`FIT_NOT_IMPLEMENTED `\\" + +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid "" +":py:obj:`EVALUATE_NOT_IMPLEMENTED " +"`\\" +msgstr "" +":py:obj:`EVALUATE_NOT_IMPLEMENTED " +"`\\" + +#: ../../source/ref-api/flwr.common.Config.rst:2 +#, fuzzy +msgid "Config" +msgstr "구성" + +#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:2 +msgid "ConfigsRecord" +msgstr "컨피그 레코드" + +#: flwr.common.record.configsrecord.ConfigsRecord:1 of +#, fuzzy +msgid "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`int` | :py:class:`float` | :py:class:`str` |" +" :py:class:`bytes` | :py:class:`bool` | :py:class:`list`\\ " +"[:py:class:`int`] | :py:class:`list`\\ [:py:class:`float`] | " +":py:class:`list`\\ [:py:class:`str`] | :py:class:`list`\\ " +"[:py:class:`bytes`] | :py:class:`list`\\ [:py:class:`bool`]]" +msgstr "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`int` | :py:class:`float` | :py:class:`str` |" +" :py:class:`bytes` | :py:class:`bool` | :py:class:`~typing.List`\\ " +"[:py:class:`int`] | :py:class:`~typing.List`\\ [:py:class:`float`] | " +":py:class:`~typing.List`\\ [:py:class:`str`] | :py:class:`~typing.List`\\" +" [:py:class:`bytes`] | :py:class:`~typing.List`\\ [:py:class:`bool`]]" + +#: flwr.common.record.configsrecord.ConfigsRecord:3 of +msgid "" +"A :code:`ConfigsRecord` is a Python dictionary designed to ensure that " +"each key-value pair adheres to specified data types. A " +":code:`ConfigsRecord` is one of the types of records that a " +"`flwr.common.RecordSet `_ supports " +"and can therefore be used to construct :code:`common.Message` objects." +msgstr "" + +#: flwr.common.record.configsrecord.ConfigsRecord:9 of +msgid "" +"A dictionary that stores basic types (i.e. `str`, `int`, `float`, `bytes`" +" as defined in `ConfigsScalar`) and lists of such types (see " +"`ConfigsScalarList`)." +msgstr "" + +#: flwr.common.record.configsrecord.ConfigsRecord:13 of +msgid "" +"A boolean indicating whether config passed should be deleted from the " +"input dictionary immediately after adding them to the record. When set to" +" True, the data is duplicated in memory. If memory is a concern, set it " +"to False." +msgstr "" + +#: flwr.common.record.configsrecord.ConfigsRecord:21 of +msgid "" +"The usage of a :code:`ConfigsRecord` is envisioned for sending " +"configuration values telling the target node how to perform a certain " +"action (e.g. train/evaluate a model ). You can use standard Python built-" +"in types such as :code:`float`, :code:`str` , :code:`bytes`. All types " +"allowed are defined in :code:`flwr.common.ConfigsRecordValues`. While " +"lists are supported, we encourage you to use a :code:`ParametersRecord` " +"instead if these are of high dimensionality." +msgstr "" + +#: flwr.common.record.configsrecord.ConfigsRecord:29 of +msgid "" +"Let's see some examples of how to construct a :code:`ConfigsRecord` from " +"scratch:" +msgstr "" + +#: flwr.common.record.configsrecord.ConfigsRecord:42 of +msgid "" +"Just like the other types of records in a :code:`flwr.common.RecordSet`, " +"types are enforced. If you need to add a custom data structure or object," +" we recommend to serialise it into bytes and save it as such (bytes are " +"allowed in a :code:`ConfigsRecord`)" +msgstr "" + +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" +msgstr ":py:obj:`clear `\\ \\(\\)" + +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" +msgstr ":py:obj:`count_bytes `\\ \\(\\)" + +#: collections.abc.MutableMapping.clear:1::1 +#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:1 +#: flwr.common.record.metricsrecord.MetricsRecord.count_bytes:1 +#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:1 of +msgid "Return number of Bytes stored in this object." +msgstr "이 객체에 저장된 바이트 수를 반환합니다." + +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" + +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" +msgstr ":py:obj:`items `\\ \\(\\)" + +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" +msgstr ":py:obj:`keys `\\ \\(\\)" + +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" + +#: collections.abc.MutableMapping.clear:1::1 +#: collections.abc.MutableMapping.pop:1 of +msgid "If key is not found, d is returned if given, otherwise KeyError is raised." +msgstr "키를 찾을 수 없으면 주어진 경우 d가 반환되고, 그렇지 않으면 KeyError가 발생합니다." + +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`popitem `\\ \\(\\)" +msgstr ":py:obj:`items `\\ \\(\\)" + +#: collections.abc.MutableMapping.clear:1::1 +#: collections.abc.MutableMapping.popitem:1 of +msgid "as a 2-tuple; but raise KeyError if D is empty." +msgstr "" + +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid "" +":py:obj:`setdefault `\\ " +"\\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" + +#: collections.abc.MutableMapping.clear:1::1 of +msgid "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" +msgstr "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" + +#: collections.abc.MutableMapping.clear:1::1 +#: collections.abc.MutableMapping.update:1 of +msgid "" +"If E present and has a .keys() method, does: for k in E: D[k] = E[k] " +"If E present and lacks .keys() method, does: for (k, v) in E: D[k] = " +"v In either case, this is followed by: for k, v in F.items(): D[k] = v" +msgstr "" + +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" +msgstr ":py:obj:`values `\\ \\(\\)" + +#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:3 of +msgid "This function counts booleans as occupying 1 Byte." +msgstr "이 함수는 booleans을 1바이트를 차지하는 것으로 계산합니다." + +#: ../../source/ref-api/flwr.common.Context.rst:2 +msgid "Context" +msgstr "컨텍스트" + +#: flwr.common.context.Context:3 of +#, fuzzy +msgid "The ID that identifies the node." +msgstr "오류 식별자입니다." + +#: flwr.common.context.Context:5 of +msgid "" +"A config (key/value mapping) unique to the node and independent of the " +"`run_config`. This config persists across all runs this node participates" +" in." +msgstr "" + +#: flwr.common.context.Context:8 of +msgid "" +"Holds records added by the entity in a given run and that will stay " +"local. This means that the data it holds will never leave the system it's" +" running from. This can be used as an intermediate storage or scratchpad " +"when executing mods. It can also be used as a memory to access at " +"different points during the lifecycle of this entity (e.g. across " +"multiple rounds)" +msgstr "" +"특정 실행에서 엔티티가 추가한 레코드를 보유하며 로컬에 유지됩니다. 즉, 저장된 데이터는 실행 중인 시스템을 벗어나지 않습니다. " +"모드를 실행할 때 중간 저장소나 스크래치 패드로 사용할 수 있습니다. 또한 이 엔티티의 수명 주기 동안 다른 시점에서 액세스하기 " +"위한 메모리로도 사용할 수 있습니다(예: 여러 라운드에 걸쳐)" + +#: flwr.common.context.Context:15 of +msgid "" +"A config (key/value mapping) held by the entity in a given run and that " +"will stay local. It can be used at any point during the lifecycle of this" +" entity (e.g. across multiple rounds)" +msgstr "" + +#: ../../source/ref-api/flwr.common.Context.rst:31::1 +#, fuzzy +msgid ":py:obj:`node_id `\\" +msgstr ":py:obj:`src_node_id `\\" + +#: ../../source/ref-api/flwr.common.Context.rst:31::1 +#, fuzzy +msgid ":py:obj:`node_config `\\" +msgstr ":py:obj:`config `\\" + +#: ../../source/ref-api/flwr.common.Context.rst:31::1 +msgid ":py:obj:`state `\\" +msgstr ":py:obj:`state `\\" + +#: ../../source/ref-api/flwr.common.Context.rst:31::1 +#, fuzzy +msgid ":py:obj:`run_config `\\" +msgstr ":py:obj:`config `\\" + +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:2 +msgid "DisconnectRes" +msgstr "연결 해제" + +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:28::1 +msgid ":py:obj:`reason `\\" +msgstr ":py:obj:`reason `\\" + +#: ../../source/ref-api/flwr.common.Error.rst:2 +msgid "Error" +msgstr "오류" + +#: flwr.common.message.Error:3 of +msgid "An identifier for the error." +msgstr "오류 식별자입니다." + +#: flwr.common.message.Error:5 of +msgid "A reason for why the error arose (e.g. an exception stack-trace)" +msgstr "오류가 발생한 이유(예: 예외 스택 추적)" + +#: flwr.common.Error.code:1::1 of +msgid ":py:obj:`code `\\" +msgstr ":py:obj:`code `\\" + +#: flwr.common.Error.code:1 flwr.common.Error.code:1::1 of +msgid "Error code." +msgstr "오류 코드." + +#: flwr.common.Error.code:1::1 of +msgid ":py:obj:`reason `\\" +msgstr ":py:obj:`reason `\\" + +#: flwr.common.Error.code:1::1 flwr.common.Error.reason:1 of +msgid "Reason reported about the error." +msgstr "오류에 대해 보고된 사유입니다." + +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:2 +msgid "EvaluateIns" +msgstr "평가" + +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 +msgid ":py:obj:`parameters `\\" +msgstr ":py:obj:`parameters `\\" + +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 +msgid ":py:obj:`config `\\" +msgstr ":py:obj:`config `\\" + +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:2 +msgid "EvaluateRes" +msgstr "EvaluateRes" + +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`status `\\" +msgstr ":py:obj:`status `\\" + +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`loss `\\" +msgstr ":py:obj:`loss `\\" + +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`num_examples `\\" +msgstr ":py:obj:`num_examples `\\" + +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`metrics `\\" +msgstr ":py:obj:`metrics `\\" + +#: ../../source/ref-api/flwr.common.EventType.rst:2 +msgid "EventType" +msgstr "이벤트 타입" + +#: flwr.common.telemetry.EventType:1 of +msgid "Bases: :py:class:`str`, :py:class:`~enum.Enum`" +msgstr "Bases: :py:class:`str`, :py:class:`~enum.Enum`" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`encode `\\ \\(\\[encoding\\, " +"errors\\]\\)" +msgstr "" +":py:obj:`encode `\\ \\(\\[encoding\\, " +"errors\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.encode:1 of +msgid "Encode the string using the codec registered for encoding." +msgstr "인코딩용으로 등록된 코덱을 사용하여 문자열을 인코딩합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`replace `\\ \\(old\\, new\\[\\, " +"count\\]\\)" +msgstr "" +":py:obj:`replace `\\ \\(old\\, new\\[\\, " +"count\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.replace:1 of +msgid "Return a copy with all occurrences of substring old replaced by new." +msgstr "이전 하위 문자열이 모두 새 하위 문자열로 바뀐 사본을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`split `\\ \\(\\[sep\\, " +"maxsplit\\]\\)" +msgstr "" +":py:obj:`split `\\ \\(\\[sep\\, " +"maxsplit\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.rsplit:1 flwr.common.EventType.split:1 of +msgid "" +"Return a list of the substrings in the string, using sep as the separator" +" string." +msgstr "sep를 구분 문자열로 사용하여 문자열의 하위 문자열 목록을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`rsplit `\\ \\(\\[sep\\, " +"maxsplit\\]\\)" +msgstr "" +":py:obj:`rsplit `\\ \\(\\[sep\\, " +"maxsplit\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`join `\\ \\(iterable\\, \\/\\)" +msgstr ":py:obj:`join `\\ \\(iterable\\, \\/\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.join:1 of +msgid "Concatenate any number of strings." +msgstr "원하는 수의 문자열을 연결합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`capitalize `\\ \\(\\)" +msgstr ":py:obj:`capitalize `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.capitalize:1 of +msgid "Return a capitalized version of the string." +msgstr "대문자로 된 문자열을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`casefold `\\ \\(\\)" +msgstr ":py:obj:`casefold `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.casefold:1 of +msgid "Return a version of the string suitable for caseless comparisons." +msgstr "대소문자 구분 없는 비교에 적합한 문자열을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`title `\\ \\(\\)" +msgstr ":py:obj:`title `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.title:1 of +msgid "Return a version of the string where each word is titlecased." +msgstr "각 단어의 제목이 대소문자로 구분된 문자열을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`center `\\ \\(width\\[\\, " +"fillchar\\]\\)" +msgstr "" +":py:obj:`center `\\ \\(width\\[\\, " +"fillchar\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.center:1 of +msgid "Return a centered string of length width." +msgstr "길이 너비의 가운데 문자열을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`count `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" +msgstr "" +":py:obj:`count `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +"Return the number of non-overlapping occurrences of substring sub in " +"string S[start:end]." +msgstr "문자열 S[start:end]에서 하위 문자열 sub이 겹치지 않는 횟수를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`expandtabs `\\ " +"\\(\\[tabsize\\]\\)" +msgstr "" +":py:obj:`expandtabs `\\ " +"\\(\\[tabsize\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.expandtabs:1 of +msgid "Return a copy where all tab characters are expanded using spaces." +msgstr "모든 탭 문자가 공백을 사용하여 확장된 사본을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`find `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" +msgstr "" +":py:obj:`find `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +"Return the lowest index in S where substring sub is found, such that sub " +"is contained within S[start:end]." +msgstr "하위 문자열 sub이 발견되는 S에서 하위가 S[start:end] 내에 포함되는 가장 낮은 인덱스를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`partition `\\ \\(sep\\, \\/\\)" +msgstr ":py:obj:`partition `\\ \\(sep\\, \\/\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.partition:1 flwr.common.EventType.rpartition:1 of +msgid "Partition the string into three parts using the given separator." +msgstr "지정된 구분 기호를 사용하여 문자열을 세 부분으로 분할합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`index `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" +msgstr "" +":py:obj:`index `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`ljust `\\ \\(width\\[\\, " +"fillchar\\]\\)" +msgstr "" +":py:obj:`ljust `\\ \\(width\\[\\, " +"fillchar\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.ljust:1 of +msgid "Return a left-justified string of length width." +msgstr "왼쪽으로 정렬된 길이의 문자열을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`lower `\\ \\(\\)" +msgstr ":py:obj:`lower `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.lower:1 of +msgid "Return a copy of the string converted to lowercase." +msgstr "소문자로 변환된 문자열 사본을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`lstrip `\\ \\(\\[chars\\]\\)" +msgstr ":py:obj:`lstrip `\\ \\(\\[chars\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.lstrip:1 of +msgid "Return a copy of the string with leading whitespace removed." +msgstr "선행 공백이 제거된 문자열의 복사본을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`rfind `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" +msgstr "" +":py:obj:`rfind `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +"Return the highest index in S where substring sub is found, such that sub" +" is contained within S[start:end]." +msgstr "부분 문자열 sub이 발견되는 곳에서 sub이 S[start:end] 내에 포함되도록 S에서 가장 높은 인덱스를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`rindex `\\ \\(sub\\[\\, " +"start\\[\\, end\\]\\]\\)" +msgstr "" +":py:obj:`rindex `\\ \\(sub\\[\\, " +"start\\[\\, end\\]\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`rjust `\\ \\(width\\[\\, " +"fillchar\\]\\)" +msgstr "" +":py:obj:`rjust `\\ \\(width\\[\\, " +"fillchar\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.rjust:1 of +msgid "Return a right-justified string of length width." +msgstr "길이 너비의 오른쪽 정렬된 문자열을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`rstrip `\\ \\(\\[chars\\]\\)" +msgstr ":py:obj:`rstrip `\\ \\(\\[chars\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.rstrip:1 of +msgid "Return a copy of the string with trailing whitespace removed." +msgstr "후행 공백이 제거된 문자열의 복사본을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`rpartition `\\ \\(sep\\, \\/\\)" +msgstr ":py:obj:`rpartition `\\ \\(sep\\, \\/\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`splitlines `\\ " +"\\(\\[keepends\\]\\)" +msgstr "" +":py:obj:`splitlines `\\ " +"\\(\\[keepends\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.splitlines:1 of +msgid "Return a list of the lines in the string, breaking at line boundaries." +msgstr "문자열의 줄 목록을 줄 경계에서 구분하여 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`strip `\\ \\(\\[chars\\]\\)" +msgstr ":py:obj:`strip `\\ \\(\\[chars\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.strip:1 of +msgid "Return a copy of the string with leading and trailing whitespace removed." +msgstr "선행 및 후행 공백이 제거된 문자열 사본을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`swapcase `\\ \\(\\)" +msgstr ":py:obj:`swapcase `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.swapcase:1 of +msgid "" +"Convert uppercase characters to lowercase and lowercase characters to " +"uppercase." +msgstr "대문자를 소문자로, 소문자를 대문자로 변환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`translate `\\ \\(table\\, \\/\\)" +msgstr ":py:obj:`translate `\\ \\(table\\, \\/\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.translate:1 of +msgid "Replace each character in the string using the given translation table." +msgstr "주어진 번역 테이블을 사용하여 문자열의 각 문자를 바꿉니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`upper `\\ \\(\\)" +msgstr ":py:obj:`upper `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.upper:1 of +msgid "Return a copy of the string converted to uppercase." +msgstr "Return a copy of the string converted to uppercase." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`startswith `\\ \\(prefix\\[\\," +" start\\[\\, end\\]\\]\\)" +msgstr "" +":py:obj:`startswith `\\ \\(prefix\\[\\," +" start\\[\\, end\\]\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "Return True if S starts with the specified prefix, False otherwise." +msgstr "S가 지정된 접두사로 시작하면 True를 반환하고, 그렇지 않으면 False를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`endswith `\\ \\(suffix\\[\\, " +"start\\[\\, end\\]\\]\\)" +msgstr "" +":py:obj:`endswith `\\ \\(suffix\\[\\, " +"start\\[\\, end\\]\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "Return True if S ends with the specified suffix, False otherwise." +msgstr "S가 지정된 접미사로 끝나면 True를 반환하고 그렇지 않으면 False을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`removeprefix `\\ " +"\\(prefix\\, \\/\\)" +msgstr "" +":py:obj:`removeprefix `\\ " +"\\(prefix\\, \\/\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.removeprefix:1 of +msgid "Return a str with the given prefix string removed if present." +msgstr "주어진 접두사 문자열이 있는 경우 제거된 문자열을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`removesuffix `\\ " +"\\(suffix\\, \\/\\)" +msgstr "" +":py:obj:`removesuffix `\\ " +"\\(suffix\\, \\/\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.removesuffix:1 of +msgid "Return a str with the given suffix string removed if present." +msgstr "주어진 접미사 문자열이 있는 경우 제거된 문자열을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isascii `\\ \\(\\)" +msgstr ":py:obj:`isascii `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isascii:1 of +msgid "Return True if all characters in the string are ASCII, False otherwise." +msgstr "문자열의 모든 문자가 ASCII인 경우 True를 반환하고, 그렇지 않으면 False를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`islower `\\ \\(\\)" +msgstr ":py:obj:`islower `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.islower:1 of +msgid "Return True if the string is a lowercase string, False otherwise." +msgstr "문자열이 소문자 문자열이면 True를 반환하고, 그렇지 않으면 False를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isupper `\\ \\(\\)" +msgstr ":py:obj:`isupper `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isupper:1 of +msgid "Return True if the string is an uppercase string, False otherwise." +msgstr "문자열이 대문자 문자열이면 True를 반환하고, 그렇지 않으면 False를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`istitle `\\ \\(\\)" +msgstr ":py:obj:`istitle `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.istitle:1 of +msgid "Return True if the string is a title-cased string, False otherwise." +msgstr "문자열이 제목 대/소문자가 구분된 문자열이면 True를 반환하고, 그렇지 않으면 False를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isspace `\\ \\(\\)" +msgstr ":py:obj:`isspace `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isspace:1 of +msgid "Return True if the string is a whitespace string, False otherwise." +msgstr "문자열이 공백 문자열이면 True를 반환하고, 그렇지 않으면 False를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isdecimal `\\ \\(\\)" +msgstr ":py:obj:`isdecimal `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isdecimal:1 of +msgid "Return True if the string is a decimal string, False otherwise." +msgstr "문자열이 10진수 문자열이면 True를 반환하고, 그렇지 않으면 False를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isdigit `\\ \\(\\)" +msgstr ":py:obj:`isdigit `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isdigit:1 of +msgid "Return True if the string is a digit string, False otherwise." +msgstr "문자열이 숫자 문자열이면 True를 반환하고, 그렇지 않으면 False를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isnumeric `\\ \\(\\)" +msgstr ":py:obj:`isnumeric `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isnumeric:1 of +msgid "Return True if the string is a numeric string, False otherwise." +msgstr "문자열이 숫자 문자열이면 True를 반환하고, 그렇지 않으면 False를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isalpha `\\ \\(\\)" +msgstr ":py:obj:`isalpha `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isalpha:1 of +msgid "Return True if the string is an alphabetic string, False otherwise." +msgstr "문자열이 알파벳 문자열이면 True를 반환하고, 그렇지 않으면 False를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isalnum `\\ \\(\\)" +msgstr ":py:obj:`isalnum `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isalnum:1 of +msgid "Return True if the string is an alpha-numeric string, False otherwise." +msgstr "문자열이 영-숫자 문자열이면 True를 반환하고, 그렇지 않으면 False를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isidentifier `\\ \\(\\)" +msgstr ":py:obj:`isidentifier `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isidentifier:1 of +msgid "Return True if the string is a valid Python identifier, False otherwise." +msgstr "문자열이 유효한 파이썬 식별자인 경우 True를 반환하고, 그렇지 않으면 False를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isprintable `\\ \\(\\)" +msgstr ":py:obj:`isprintable `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isprintable:1 of +msgid "Return True if the string is printable, False otherwise." +msgstr "문자열을 인쇄할 수 있으면 True를 반환하고, 그렇지 않으면 False를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`zfill `\\ \\(width\\, \\/\\)" +msgstr ":py:obj:`zfill `\\ \\(width\\, \\/\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.zfill:1 of +msgid "" +"Pad a numeric string with zeros on the left, to fill a field of the given" +" width." +msgstr "숫자 문자열을 왼쪽에 0으로 채워서 지정된 너비의 필드를 채웁니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`format `\\ \\(\\*args\\, " +"\\*\\*kwargs\\)" +msgstr "" +":py:obj:`format `\\ \\(\\*args\\, " +"\\*\\*kwargs\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "Return a formatted version of S, using substitutions from args and kwargs." +msgstr "args와 kwarg의 치환을 사용하여 형식이 지정된 S를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`format_map `\\ \\(mapping\\)" +msgstr ":py:obj:`format_map `\\ \\(mapping\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "Return a formatted version of S, using substitutions from mapping." +msgstr "매핑의 치환을 사용하여 형식이 지정된 S를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`maketrans `\\" +msgstr ":py:obj:`maketrans `\\" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.maketrans:1 of +msgid "Return a translation table usable for str.translate()." +msgstr "str.translate()에 사용할 수 있는 번역 테이블을 반환합니다." + +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`PING `\\" +msgstr ":py:obj:`PING `\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`START_CLIENT_ENTER `\\" +msgstr ":py:obj:`START_CLIENT_ENTER `\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`START_CLIENT_LEAVE `\\" +msgstr ":py:obj:`START_CLIENT_LEAVE `\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`START_SERVER_ENTER `\\" +msgstr ":py:obj:`START_SERVER_ENTER `\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`START_SERVER_LEAVE `\\" +msgstr ":py:obj:`START_SERVER_LEAVE `\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`START_SIMULATION_ENTER " +"`\\" +msgstr "" +":py:obj:`START_SIMULATION_ENTER " +"`\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`START_SIMULATION_LEAVE " +"`\\" +msgstr "" +":py:obj:`START_SIMULATION_LEAVE " +"`\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SUPEREXEC_ENTER " +"`\\" +msgstr "" +":py:obj:`RUN_SUPEREXEC_ENTER " +"`\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SUPEREXEC_LEAVE " +"`\\" +msgstr "" +":py:obj:`RUN_SUPEREXEC_LEAVE " +"`\\" + +#: flwr.common.EventType.capitalize:1::1 of +#, fuzzy +msgid "" +":py:obj:`CLI_FLOWER_SIMULATION_ENTER " +"`\\" +msgstr "" +":py:obj:`START_SIMULATION_ENTER " +"`\\" + +#: flwr.common.EventType.capitalize:1::1 of +#, fuzzy +msgid "" +":py:obj:`CLI_FLOWER_SIMULATION_LEAVE " +"`\\" +msgstr "" +":py:obj:`START_SIMULATION_LEAVE " +"`\\" + +#: flwr.common.EventType.capitalize:1::1 of +#, fuzzy +msgid "" +":py:obj:`PYTHON_API_RUN_SIMULATION_ENTER " +"`\\" +msgstr "" +":py:obj:`START_SIMULATION_ENTER " +"`\\" + +#: flwr.common.EventType.capitalize:1::1 of +#, fuzzy +msgid "" +":py:obj:`PYTHON_API_RUN_SIMULATION_LEAVE " +"`\\" +msgstr "" +":py:obj:`START_SIMULATION_LEAVE " +"`\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SUPERLINK_ENTER " +"`\\" +msgstr "" +":py:obj:`RUN_SUPERLINK_ENTER " +"`\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SUPERLINK_LEAVE " +"`\\" +msgstr "" +":py:obj:`RUN_SUPERLINK_LEAVE " +"`\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SUPERNODE_ENTER " +"`\\" +msgstr "" +":py:obj:`RUN_SUPERNODE_ENTER " +"`\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SUPERNODE_LEAVE " +"`\\" +msgstr "" +":py:obj:`RUN_SUPERNODE_LEAVE " +"`\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SERVER_APP_ENTER " +"`\\" +msgstr "" +":py:obj:`RUN_SERVER_APP_ENTER " +"`\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SERVER_APP_LEAVE " +"`\\" +msgstr "" +":py:obj:`RUN_SERVER_APP_LEAVE " +"`\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_CLIENT_APP_ENTER " +"`\\" +msgstr "" +":py:obj:`RUN_CLIENT_APP_ENTER " +"`\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_CLIENT_APP_LEAVE " +"`\\" +msgstr "" +":py:obj:`RUN_CLIENT_APP_LEAVE " +"`\\" + +#: flwr.common.EventType.capitalize:3 of +msgid "" +"More specifically, make the first character have upper case and the rest " +"lower case." +msgstr "보다 구체적으로, 첫 번째 문자는 대문자로, 나머지는 소문자로 만듭니다." + +#: flwr.common.EventType.center:3 flwr.common.EventType.ljust:3 +#: flwr.common.EventType.rjust:3 of +msgid "Padding is done using the specified fill character (default is a space)." +msgstr "패딩은 지정된 채우기 문자를 사용하여 수행됩니다(기본값은 공백)." + +#: flwr.common.EventType.count:1 of +msgid "" +"Return the number of non-overlapping occurrences of substring sub in " +"string S[start:end]. Optional arguments start and end are interpreted as" +" in slice notation." +msgstr "" +"문자열 S[start:end]에서 부분 문자열 sub의 겹치지 않는 횟수를 반환합니다. 선택적 인자 start와 end는 슬라이스" +" 표기법과 같이 해석됩니다." + +#: flwr.common.EventType.encode:3 of +msgid "encoding" +msgstr "인코딩" + +#: flwr.common.EventType.encode:4 of +msgid "The encoding in which to encode the string." +msgstr "문자열을 인코딩합니다." + +#: flwr.common.EventType.encode:9 of +msgid "errors" +msgstr "오류" + +#: flwr.common.EventType.encode:6 of +msgid "" +"The error handling scheme to use for encoding errors. The default is " +"'strict' meaning that encoding errors raise a UnicodeEncodeError. Other " +"possible values are 'ignore', 'replace' and 'xmlcharrefreplace' as well " +"as any other name registered with codecs.register_error that can handle " +"UnicodeEncodeErrors." +msgstr "" +"인코딩 오류에 사용할 오류 처리 방식입니다. 기본값은 'strict'로, 인코딩 오류가 발생하면 UnicodeEncodeError를" +" 발생시킵니다. 다른 가능한 값으로는 'ignore', 'replace', 'xmlcharrefreplace', 그리고 " +"UnicodeEncodeError를 처리할 수 있는 codecs.register_error에 등록된 다른 이름도 사용할 수 " +"있습니다." + +#: flwr.common.EventType.endswith:1 of +msgid "" +"Return True if S ends with the specified suffix, False otherwise. With " +"optional start, test S beginning at that position. With optional end, " +"stop comparing S at that position. suffix can also be a tuple of strings " +"to try." +msgstr "" +"S가 지정된 접미사로 끝나면 True를 반환하고, 그렇지 않으면 False를 반환합니다. 시작 옵션을 사용하면 해당 위치부터 S를 " +"테스트합니다. end 옵션을 사용하면 해당 위치에서 S 비교를 중지합니다. 접미사는 시도할 문자열의 튜플일 수도 있습니다." + +#: flwr.common.EventType.expandtabs:3 of +msgid "If tabsize is not given, a tab size of 8 characters is assumed." +msgstr "탭 크기를 지정하지 않으면 크기가 8로 지정됩니다." + +#: flwr.common.EventType.find:1 flwr.common.EventType.index:1 of +msgid "" +"Return the lowest index in S where substring sub is found, such that sub " +"is contained within S[start:end]. Optional arguments start and end are " +"interpreted as in slice notation." +msgstr "" +"부분 문자열 sub가 발견되는 곳의 가장 낮은 인덱스를 반환하며, sub는 S[start:end] 내에 포함되어야 합니다. 선택적" +" 인자 start와 end는 슬라이스 표기법과 같이 해석됩니다." + +#: flwr.common.EventType.find:5 flwr.common.EventType.rfind:5 of +msgid "Return -1 on failure." +msgstr "실패 시 -1을 반환합니다." + +#: flwr.common.EventType.format:1 of +msgid "" +"Return a formatted version of S, using substitutions from args and " +"kwargs. The substitutions are identified by braces ('{' and '}')." +msgstr "args와 kwargs의 치환을 사용하여 형식이 지정된 S를 반환합니다. 치환은 중괄호('{' 및 '}')로 식별됩니다." + +#: flwr.common.EventType.format_map:1 of +msgid "" +"Return a formatted version of S, using substitutions from mapping. The " +"substitutions are identified by braces ('{' and '}')." +msgstr "매핑의 치환을 사용하여 형식이 지정된 S를 반환합니다. 치환은 중괄호('{' 및 '}')로 식별됩니다." + +#: flwr.common.EventType.index:5 flwr.common.EventType.rindex:5 of +msgid "Raises ValueError when the substring is not found." +msgstr "부분 문자열을 찾을 수 없을 때 ValueError를 발생시킵니다." + +#: flwr.common.EventType.isalnum:3 of +msgid "" +"A string is alpha-numeric if all characters in the string are alpha-" +"numeric and there is at least one character in the string." +msgstr "문자열의 모든 문자가 영숫자이고 문자열에 하나 이상의 문자가 있는 경우 문자열은 영-숫자입니다." + +#: flwr.common.EventType.isalpha:3 of +msgid "" +"A string is alphabetic if all characters in the string are alphabetic and" +" there is at least one character in the string." +msgstr "문자열의 모든 문자가 알파벳이고 문자열에 하나 이상의 문자가 있는 경우 문자열은 알파벳입니다." + +#: flwr.common.EventType.isascii:3 of +msgid "" +"ASCII characters have code points in the range U+0000-U+007F. Empty " +"string is ASCII too." +msgstr "ASCII 문자는 U+0000-U+007F 범위의 코드 포인트가 있습니다. 빈 문자열도 ASCII입니다." + +#: flwr.common.EventType.isdecimal:3 of +msgid "" +"A string is a decimal string if all characters in the string are decimal " +"and there is at least one character in the string." +msgstr "문자열의 모든 문자가 10진수이고 문자열에 하나 이상의 문자가 있는 경우 문자열은 10진수 문자열입니다." + +#: flwr.common.EventType.isdigit:3 of +msgid "" +"A string is a digit string if all characters in the string are digits and" +" there is at least one character in the string." +msgstr "문자열의 모든 문자가 숫자이고 문자열에 하나 이상의 문자가 있는 경우 문자열은 숫자 문자열입니다." + +#: flwr.common.EventType.isidentifier:3 of +msgid "" +"Call keyword.iskeyword(s) to test whether string s is a reserved " +"identifier, such as \"def\" or \"class\"." +msgstr "" +"keyword.iskeyword(s)를 호출하여 문자열 s가 \"def\" 또는 \"class\"와 같은 예약 식별자인지 " +"테스트합니다." + +#: flwr.common.EventType.islower:3 of +msgid "" +"A string is lowercase if all cased characters in the string are lowercase" +" and there is at least one cased character in the string." +msgstr "문자열이 모두 소문자이고 문자열에 문자가 하나 이상 있는 경우 문자열은 소문자입니다." + +#: flwr.common.EventType.isnumeric:3 of +msgid "" +"A string is numeric if all characters in the string are numeric and there" +" is at least one character in the string." +msgstr "문자열의 모든 문자가 숫자이고 문자열에 하나 이상의 문자가 있는 경우 문자열은 숫자입니다." + +#: flwr.common.EventType.isprintable:3 of +msgid "" +"A string is printable if all of its characters are considered printable " +"in repr() or if it is empty." +msgstr "문자열은 repr()에서 모든 문자가 인쇄 가능한 것으로 간주되거나 비어 있는 경우 인쇄할 수 있습니다." + +#: flwr.common.EventType.isspace:3 of +msgid "" +"A string is whitespace if all characters in the string are whitespace and" +" there is at least one character in the string." +msgstr "문자열의 모든 문자가 공백이고 문자열에 하나 이상의 문자가 있는 경우 문자열은 공백입니다." + +#: flwr.common.EventType.istitle:3 of +msgid "" +"In a title-cased string, upper- and title-case characters may only follow" +" uncased characters and lowercase characters only cased ones." +msgstr "제목 대/소문자 문자열에서 대문자와 제목 대문자는 대소문자만, 소문자는 대문자만 뒤에 올 수 있습니다." + +#: flwr.common.EventType.isupper:3 of +msgid "" +"A string is uppercase if all cased characters in the string are uppercase" +" and there is at least one cased character in the string." +msgstr "문자열의 모든 문자가 대문자이고 문자열에 문자가 하나 이상 있는 경우 문자열은 대문자입니다." + +#: flwr.common.EventType.join:3 of +msgid "" +"The string whose method is called is inserted in between each given " +"string. The result is returned as a new string." +msgstr "메서드가 호출되는 문자열은 주어진 각 문자열 사이에 삽입됩니다. 결과는 새 문자열로 반환됩니다." + +#: flwr.common.EventType.join:6 of +msgid "Example: '.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'" +msgstr "Example: '.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'" + +#: flwr.common.EventType.lstrip:3 flwr.common.EventType.rstrip:3 +#: flwr.common.EventType.strip:3 of +msgid "If chars is given and not None, remove characters in chars instead." +msgstr "None이 아닌 문자가 지정되면 대신 문자열에서 문자를 제거합니다." + +#: flwr.common.EventType.maketrans:3 of +msgid "" +"If there is only one argument, it must be a dictionary mapping Unicode " +"ordinals (integers) or characters to Unicode ordinals, strings or None. " +"Character keys will be then converted to ordinals. If there are two " +"arguments, they must be strings of equal length, and in the resulting " +"dictionary, each character in x will be mapped to the character at the " +"same position in y. If there is a third argument, it must be a string, " +"whose characters will be mapped to None in the result." +msgstr "" +"argument이 하나만 있는 경우, 유니코드 서수(정수) 또는 문자를 유니코드 서수, 문자열 또는 None에 매핑하는 " +"dictionary이어야 합니다. 그러면 문자 키가 서수로 변환됩니다. 인수가 두 개이면 길이가 같은 문자열이어야 하며, 결과 " +"dictionary에서 x의 각 문자는 y의 같은 위치에 있는 문자에 매핑됩니다. 세 번째 인수가 있으면 문자열이어야 하며, 그 " +"문자는 결과에서 None에 매핑됩니다." + +#: flwr.common.EventType.partition:3 of +msgid "" +"This will search for the separator in the string. If the separator is " +"found, returns a 3-tuple containing the part before the separator, the " +"separator itself, and the part after it." +msgstr "" +"문자열에서 구분 기호를 검색합니다. 구분 기호가 발견되면 구분 기호 앞 부분, 구분 기호 자체, 구분 기호 뒤 부분을 포함하는 " +"3-tuple을 반환합니다." + +#: flwr.common.EventType.partition:7 of +msgid "" +"If the separator is not found, returns a 3-tuple containing the original " +"string and two empty strings." +msgstr "구분 기호를 찾을 수 없으면 원래 문자열과 빈 문자열 2개를 포함하는 3-튜플을 반환합니다." + +#: flwr.common.EventType.removeprefix:3 of +msgid "" +"If the string starts with the prefix string, return string[len(prefix):]." +" Otherwise, return a copy of the original string." +msgstr "문자열이 접두사 문자열로 시작하면 문자열[len(prefix):]을 반환합니다. 그렇지 않으면 원본 문자열의 복사본을 반환합니다." + +#: flwr.common.EventType.removesuffix:3 of +msgid "" +"If the string ends with the suffix string and that suffix is not empty, " +"return string[:-len(suffix)]. Otherwise, return a copy of the original " +"string." +msgstr "" +"문자열이 접미사 문자열로 끝나고 해당 접미사가 비어 있지 않으면 문자열[:-len(suffix)]을 반환합니다. 그렇지 않으면 원본" +" 문자열의 복사본을 반환합니다." + +#: flwr.common.EventType.replace:5 of +msgid "count" +msgstr "카운트" + +#: flwr.common.EventType.replace:4 of +msgid "" +"Maximum number of occurrences to replace. -1 (the default value) means " +"replace all occurrences." +msgstr "대체할 최대 발생 횟수입니다. -1(기본값)은 모든 항목을 교체한다는 의미입니다." + +#: flwr.common.EventType.replace:7 of +msgid "" +"If the optional argument count is given, only the first count occurrences" +" are replaced." +msgstr "선택적 argument 개수를 지정하면 첫 번째 개수만 바뀝니다." + +#: flwr.common.EventType.rfind:1 flwr.common.EventType.rindex:1 of +msgid "" +"Return the highest index in S where substring sub is found, such that sub" +" is contained within S[start:end]. Optional arguments start and end are " +"interpreted as in slice notation." +msgstr "" +"부분 문자열 sub가 발견되는 곳의 가장 높은 인덱스를 반환하며, sub는 S[start:end] 내에 포함되어야 합니다. 선택적" +" 인자 start와 end는 슬라이스 표기법과 같이 해석됩니다." + +#: flwr.common.EventType.rpartition:3 of +msgid "" +"This will search for the separator in the string, starting at the end. If" +" the separator is found, returns a 3-tuple containing the part before the" +" separator, the separator itself, and the part after it." +msgstr "" +"그러면 문자열에서 끝 부분부터 시작하여 구분 기호를 검색합니다. 구분 기호가 발견되면 구분 기호 앞 부분, 구분 기호 자체, 구분 " +"기호 뒤 부분을 포함하는 3-tuple을 반환합니다." + +#: flwr.common.EventType.rpartition:7 of +msgid "" +"If the separator is not found, returns a 3-tuple containing two empty " +"strings and the original string." +msgstr "구분 기호를 찾을 수 없는 경우 빈 문자열 2개와 원래 문자열을 포함하는 3-tuple을 반환합니다." + +#: flwr.common.EventType.rsplit:7 flwr.common.EventType.split:7 of +msgid "sep" +msgstr "sep" + +#: flwr.common.EventType.rsplit:4 flwr.common.EventType.split:4 of +msgid "The separator used to split the string." +msgstr "문자열을 분할하는 데 사용되는 구분 기호입니다." + +#: flwr.common.EventType.rsplit:6 flwr.common.EventType.split:6 of +msgid "" +"When set to None (the default value), will split on any whitespace " +"character (including \\\\n \\\\r \\\\t \\\\f and spaces) and will discard" +" empty strings from the result." +msgstr "" +"None(기본값)으로 설정하면 모든 공백 문자(\\\\n \\\\r \\\\t \\\\f 및 공백 포함)를 분할하고 결과에서 빈 " +"문자열을 삭제합니다." + +#: flwr.common.EventType.rsplit:11 flwr.common.EventType.split:11 of +msgid "maxsplit" +msgstr "maxsplit" + +#: flwr.common.EventType.rsplit:10 flwr.common.EventType.split:10 of +msgid "" +"Maximum number of splits (starting from the left). -1 (the default value)" +" means no limit." +msgstr "최대 분할 횟수(왼쪽부터 시작). -1(기본값)은 제한이 없음을 의미합니다." + +#: flwr.common.EventType.rsplit:13 of +msgid "Splitting starts at the end of the string and works to the front." +msgstr "분할은 문자열 끝에서 시작하여 앞쪽으로 진행됩니다." + +#: flwr.common.EventType.split:13 of +msgid "" +"Note, str.split() is mainly useful for data that has been intentionally " +"delimited. With natural text that includes punctuation, consider using " +"the regular expression module." +msgstr "" +"참고로 str.split()은 주로 의도적으로 구분된 데이터에 유용합니다. 구두점이 포함된 자연 텍스트의 경우 정규식 모듈을 " +"사용하는 것이 좋습니다." + +#: flwr.common.EventType.splitlines:3 of +msgid "" +"Line breaks are not included in the resulting list unless keepends is " +"given and true." +msgstr "줄 바꿈은 keepends가 주어지고 참이 아니면 결과 목록에 포함되지 않습니다." + +#: flwr.common.EventType.startswith:1 of +msgid "" +"Return True if S starts with the specified prefix, False otherwise. With " +"optional start, test S beginning at that position. With optional end, " +"stop comparing S at that position. prefix can also be a tuple of strings " +"to try." +msgstr "" +"S가 지정된 접두사로 시작하면 True를 반환하고, 그렇지 않으면 False를 반환합니다. 시작 옵션을 사용하면 해당 위치에서 " +"시작되는 S를 테스트합니다. 선택적 end를 사용하면 해당 위치에서 S 비교를 중지합니다. 접두사는 시도할 문자열의 튜플일 수도 " +"있습니다." + +#: flwr.common.EventType.title:3 of +msgid "" +"More specifically, words start with uppercased characters and all " +"remaining cased characters have lower case." +msgstr "보다 구체적으로, 단어는 대문자로 시작하고 나머지 모든 대소문자는 소문자로 표기합니다." + +#: flwr.common.EventType.translate:5 of +msgid "table" +msgstr "table" + +#: flwr.common.EventType.translate:4 of +msgid "" +"Translation table, which must be a mapping of Unicode ordinals to Unicode" +" ordinals, strings, or None." +msgstr "유니코드 서수를 유니코드 서수, 문자열 또는 없음으로 매핑하는 번역 테이블이어야 합니다." + +#: flwr.common.EventType.translate:7 of +msgid "" +"The table must implement lookup/indexing via __getitem__, for instance a " +"dictionary or list. If this operation raises LookupError, the character " +"is left untouched. Characters mapped to None are deleted." +msgstr "" +"테이블은 사전이나 목록과 같이 __getitem__을 통해 조회/색인을 구현해야 합니다. 이 작업에서 LookupError가 " +"발생하면 문자는 그대로 유지됩니다. 없음으로 매핑된 문자는 삭제됩니다." + +#: flwr.common.EventType.zfill:3 of +msgid "The string is never truncated." +msgstr "문자열은 잘리지 않습니다." + +#: ../../source/ref-api/flwr.common.FitIns.rst:2 +msgid "FitIns" +msgstr "FitIns" + +#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 +msgid ":py:obj:`parameters `\\" +msgstr ":py:obj:`parameters `\\" + +#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 +msgid ":py:obj:`config `\\" +msgstr ":py:obj:`config `\\" + +#: ../../source/ref-api/flwr.common.FitRes.rst:2 +msgid "FitRes" +msgstr "FitRes" + +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`status `\\" +msgstr ":py:obj:`status `\\" + +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`parameters `\\" +msgstr ":py:obj:`parameters `\\" + +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`num_examples `\\" +msgstr ":py:obj:`num_examples `\\" + +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`metrics `\\" +msgstr ":py:obj:`metrics `\\" + +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:2 +msgid "GetParametersIns" +msgstr "GetParametersIns" + +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:28::1 +msgid ":py:obj:`config `\\" +msgstr ":py:obj:`config `\\" + +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:2 +msgid "GetParametersRes" +msgstr "GetParametersRes" + +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 +msgid ":py:obj:`status `\\" +msgstr ":py:obj:`status `\\" + +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 +msgid ":py:obj:`parameters `\\" +msgstr ":py:obj:`parameters `\\" + +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:2 +msgid "GetPropertiesIns" +msgstr "GetPropertiesIns" + +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:28::1 +msgid ":py:obj:`config `\\" +msgstr ":py:obj:`config `\\" + +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:2 +msgid "GetPropertiesRes" +msgstr "GetPropertiesRes" + +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 +msgid ":py:obj:`status `\\" +msgstr ":py:obj:`status `\\" + +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 +msgid ":py:obj:`properties `\\" +msgstr ":py:obj:`properties `\\" + +#: ../../source/ref-api/flwr.common.Message.rst:2 +msgid "Message" +msgstr "Message" + +#: flwr.common.Message.content:1::1 flwr.common.Message.metadata:1 +#: flwr.common.message.Message:3 of +msgid "A dataclass including information about the message to be executed." +msgstr "실행할 메시지에 대한 정보를 포함한 데이터 클래스입니다." + +#: flwr.common.message.Message:5 of +msgid "" +"Holds records either sent by another entity (e.g. sent by the server-side" +" logic to a client, or vice-versa) or that will be sent to it." +msgstr "다른 엔터티(예: 서버 측 로직이 클라이언트로 전송하거나 그 반대로 전송하는 등)가 전송했거나 전송할 레코드를 보유합니다." + +#: flwr.common.message.Message:8 of +msgid "" +"A dataclass that captures information about an error that took place when" +" processing another message." +msgstr "다른 메시지를 처리할 때 발생한 오류에 대한 정보를 캡처하는 데이터 클래스입니다." + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid "" +":py:obj:`create_error_reply `\\ " +"\\(error\\[\\, ttl\\]\\)" +msgstr "" +":py:obj:`create_error_reply `\\ " +"\\(error\\[\\, ttl\\]\\)" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.create_error_reply:1 of +msgid "Construct a reply message indicating an error happened." +msgstr "오류가 발생했음을 나타내는 답장 메시지를 작성합니다." + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid "" +":py:obj:`create_reply `\\ " +"\\(content\\[\\, ttl\\]\\)" +msgstr "" +":py:obj:`create_reply `\\ " +"\\(content\\[\\, ttl\\]\\)" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.create_reply:1 of +msgid "Create a reply to this message with specified content and TTL." +msgstr "지정된 콘텐츠와 TTL을 사용하여 이 메시지에 대한 답글을 작성합니다." + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid ":py:obj:`has_content `\\ \\(\\)" +msgstr ":py:obj:`has_content `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.has_content:1 of +msgid "Return True if message has content, else False." +msgstr "메시지에 콘텐츠가 있으면 True을 반환하고, 그렇지 않으면 False을 반환합니다." + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid ":py:obj:`has_error `\\ \\(\\)" +msgstr ":py:obj:`has_error `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.has_error:1 of +msgid "Return True if message has an error, else False." +msgstr "메시지에 오류가 있으면 True을 반환하고, 그렇지 않으면 False을 반환합니다." + +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`content `\\" +msgstr ":py:obj:`content `\\" + +#: flwr.common.Message.content:1 flwr.common.Message.content:1::1 +#: of +msgid "The content of this message." +msgstr "이 메시지의 내용입니다." + +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`error `\\" +msgstr ":py:obj:`error `\\" + +#: flwr.common.Message.content:1::1 flwr.common.Message.error:1 of +msgid "Error captured by this message." +msgstr "이 메시지가 캡처한 오류입니다." + +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`metadata `\\" +msgstr ":py:obj:`metadata `\\" + +#: flwr.common.message.Message.create_error_reply:3 of +msgid "The error that was encountered." +msgstr "오류가 발생했습니다." + +#: flwr.common.message.Message.create_error_reply:5 +#: flwr.common.message.Message.create_reply:9 of +msgid "" +"Time-to-live for this message in seconds. If unset, it will be set based " +"on the remaining time for the received message before it expires. This " +"follows the equation: ttl = msg.meta.ttl - (reply.meta.created_at - " +"msg.meta.created_at)" +msgstr "" +"이 메시지의 남은 시간(초)입니다. 설정하지 않으면 수신된 메시지가 만료되기 전까지 남은 시간을 기준으로 설정됩니다. 이는 다음과 " +"같은 공식을 따릅니다: ttl = msg.meta.ttl - (reply.meta.created_at - " +"msg.meta.created_at)" + +#: flwr.common.message.Message.create_error_reply:5 +#: flwr.common.message.Message.create_reply:9 of +msgid "" +"Time-to-live for this message in seconds. If unset, it will be set based " +"on the remaining time for the received message before it expires. This " +"follows the equation:" +msgstr "" +"이 메시지의 남은 시간(초)입니다. 설정하지 않으면 수신된 메시지가 만료되기 전까지 남은 시간을 기준으로 설정됩니다. 이는 다음 " +"공식을 따릅니다:" + +#: flwr.common.message.Message.create_error_reply:9 +#: flwr.common.message.Message.create_reply:13 of +msgid "ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at)" +msgstr "ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at)" + +#: flwr.common.message.Message.create_reply:3 of +msgid "" +"The method generates a new `Message` as a reply to this message. It " +"inherits 'run_id', 'src_node_id', 'dst_node_id', and 'message_type' from " +"this message and sets 'reply_to_message' to the ID of this message." +msgstr "" +"이 메서드는 이 메시지에 대한 응답으로 새로운 '메시지'를 생성합니다. 이 메시지에서 'run_id', 'src_node_id', " +"'dst_node_id', 'message_type'을 상속하고 'reply_to_message'를 이 메시지의 ID로 설정합니다." + +#: flwr.common.message.Message.create_reply:7 of +msgid "The content for the reply message." +msgstr "답장 메시지의 콘텐츠입니다." + +#: flwr.common.message.Message.create_reply:16 of +msgid "A new `Message` instance representing the reply." +msgstr "답장을 나타내는 새로운 `메시지` 인스턴스입니다." + +#: ../../source/ref-api/flwr.common.MessageType.rst:2 +msgid "MessageType" +msgstr "MessageType" + +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`EVALUATE `\\" +msgstr ":py:obj:`EVALUATE `\\" + +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`QUERY `\\" +msgstr ":py:obj:`QUERY `\\" + +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`TRAIN `\\" +msgstr ":py:obj:`TRAIN `\\" + +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:2 +msgid "MessageTypeLegacy" +msgstr "MessageTypeLegacy" + +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 +msgid ":py:obj:`GET_PARAMETERS `\\" +msgstr ":py:obj:`GET_PARAMETERS `\\" + +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 +msgid ":py:obj:`GET_PROPERTIES `\\" +msgstr ":py:obj:`GET_PROPERTIES `\\" + +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.run_id:1 flwr.common.message.Metadata:3 of +msgid "An identifier for the current run." +msgstr "현재 실행에 대한 식별자입니다." + +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.message_id:1 flwr.common.message.Metadata:5 of +msgid "An identifier for the current message." +msgstr "현재 메시지의 식별자입니다." + +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.src_node_id:1 flwr.common.message.Metadata:7 of +msgid "An identifier for the node sending this message." +msgstr "이 메시지를 보내는 노드의 식별자입니다." + +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.dst_node_id:1 flwr.common.message.Metadata:9 of +msgid "An identifier for the node receiving this message." +msgstr "이 메시지를 수신하는 노드의 식별자입니다." -#: flwr.client.client.Client.get_parameters:3 of +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.reply_to_message:1 flwr.common.message.Metadata:11 of +msgid "An identifier for the message this message replies to." +msgstr "이 메시지가 회신하는 메시지의 식별자입니다." + +#: flwr.common.message.Metadata:13 of msgid "" -"The get parameters instructions received from the server containing a " -"dictionary of configuration values." -msgstr "" +"An identifier for grouping messages. In some settings, this is used as " +"the FL round." +msgstr "메시지를 그룹화하기 위한 식별자입니다. 일부 설정에서는 FL 라운드로 사용됩니다." -#: flwr.client.client.Client.get_parameters:7 of -msgid "The current local model parameters." -msgstr "" +#: flwr.common.message.Metadata:16 of +msgid "Time-to-live for this message in seconds." +msgstr "이 메시지의 유효 시간(초)입니다." -#: flwr.client.client.Client.get_properties:3 of -msgid "" -"The get properties instructions received from the server containing a " -"dictionary of configuration values." -msgstr "" +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.message_type:1 flwr.common.message.Metadata:18 of +msgid "A string that encodes the action to be executed on the receiving end." +msgstr "수신 측에서 실행할 작업을 인코딩하는 문자열입니다." -#: flwr.client.client.Client.get_properties:7 of -msgid "The current client properties." -msgstr "" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`created_at `\\" +msgstr ":py:obj:`created_at `\\" -#: ../../source/ref-api/flwr.client.ClientApp.rst:2 -msgid "ClientApp" -msgstr "" +#: flwr.common.Metadata.created_at:1 +#: flwr.common.Metadata.created_at:1::1 of +msgid "Unix timestamp when the message was created." +msgstr "메시지가 생성된 때의 Unix timestamp입니다." -#: flwr.client.client_app.ClientApp:1 flwr.common.constant.MessageType:1 -#: flwr.common.constant.MessageTypeLegacy:1 flwr.common.context.Context:1 -#: flwr.common.message.Error:1 flwr.common.message.Message:1 -#: flwr.common.message.Metadata:1 flwr.common.record.parametersrecord.Array:1 -#: flwr.common.record.recordset.RecordSet:1 flwr.common.typing.ClientMessage:1 -#: flwr.common.typing.DisconnectRes:1 flwr.common.typing.EvaluateIns:1 -#: flwr.common.typing.EvaluateRes:1 flwr.common.typing.FitIns:1 -#: flwr.common.typing.FitRes:1 flwr.common.typing.GetParametersIns:1 -#: flwr.common.typing.GetParametersRes:1 flwr.common.typing.GetPropertiesIns:1 -#: flwr.common.typing.GetPropertiesRes:1 flwr.common.typing.Parameters:1 -#: flwr.common.typing.ReconnectIns:1 flwr.common.typing.ServerMessage:1 -#: flwr.common.typing.Status:1 flwr.server.history.History:1 -#: flwr.server.server.Server:1 flwr.server.server_app.ServerApp:1 -#: flwr.server.server_config.ServerConfig:1 -#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 -#: of -msgid "Bases: :py:class:`object`" -msgstr "" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`dst_node_id `\\" +msgstr ":py:obj:`dst_node_id `\\" -#: flwr.client.app.start_client:41 flwr.client.app.start_numpy_client:36 -#: flwr.client.client_app.ClientApp:4 -#: flwr.client.client_app.ClientApp.evaluate:4 -#: flwr.client.client_app.ClientApp.query:4 -#: flwr.client.client_app.ClientApp.train:4 flwr.server.app.start_server:41 -#: flwr.server.server_app.ServerApp:4 flwr.server.server_app.ServerApp.main:4 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:29 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:22 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:21 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:14 -#: of -msgid "Examples" -msgstr "" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`group_id `\\" +msgstr ":py:obj:`group_id `\\" -#: flwr.client.client_app.ClientApp:5 of -msgid "" -"Assuming a typical `Client` implementation named `FlowerClient`, you can " -"wrap it in a `ClientApp` as follows:" -msgstr "" +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.group_id:1 of +msgid "An identifier for grouping messages." +msgstr "메시지를 그룹화하기 위한 식별자입니다." -#: flwr.client.client_app.ClientApp:16 of -msgid "" -"If the above code is in a Python module called `client`, it can be " -"started as follows:" -msgstr "" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`message_id `\\" +msgstr ":py:obj:`message_id `\\" -#: flwr.client.client_app.ClientApp:21 of -msgid "" -"In this `client:app` example, `client` refers to the Python module " -"`client.py` in which the previous code lives in and `app` refers to the " -"global attribute `app` that points to an object of type `ClientApp`." -msgstr "" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`message_type `\\" +msgstr ":py:obj:`message_type `\\" -#: flwr.client.client_app.ClientApp.evaluate:1::1 of -msgid ":py:obj:`evaluate `\\ \\(\\)" -msgstr "" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`reply_to_message `\\" +msgstr ":py:obj:`reply_to_message `\\" -#: flwr.client.client_app.ClientApp.evaluate:1 -#: flwr.client.client_app.ClientApp.evaluate:1::1 of -msgid "Return a decorator that registers the evaluate fn with the client app." -msgstr "" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`run_id `\\" +msgstr ":py:obj:`run_id `\\" -#: flwr.client.client_app.ClientApp.evaluate:1::1 of -msgid ":py:obj:`query `\\ \\(\\)" -msgstr "" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`src_node_id `\\" +msgstr ":py:obj:`src_node_id `\\" -#: flwr.client.client_app.ClientApp.evaluate:1::1 -#: flwr.client.client_app.ClientApp.query:1 of -msgid "Return a decorator that registers the query fn with the client app." -msgstr "" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`ttl `\\" +msgstr ":py:obj:`ttl `\\" -#: flwr.client.client_app.ClientApp.evaluate:1::1 of -msgid ":py:obj:`train `\\ \\(\\)" -msgstr "" +#: flwr.common.Metadata.created_at:1::1 flwr.common.Metadata.ttl:1 +#: of +msgid "Time-to-live for this message." +msgstr "이 메시지를 기다리는 시간입니다." -#: flwr.client.client_app.ClientApp.evaluate:1::1 -#: flwr.client.client_app.ClientApp.train:1 of -msgid "Return a decorator that registers the train fn with the client app." -msgstr "" +#: ../../source/ref-api/flwr.common.Metrics.rst:2 +#, fuzzy +msgid "Metrics" +msgstr "MetricsRecord" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:2 -msgid "NumPyClient" -msgstr "" +#: ../../source/ref-api/flwr.common.MetricsRecord.rst:2 +msgid "MetricsRecord" +msgstr "MetricsRecord" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.common.record.metricsrecord.MetricsRecord:1 of +#, fuzzy msgid "" -":py:obj:`evaluate `\\ \\(parameters\\, " -"config\\)" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`int` | :py:class:`float` | " +":py:class:`list`\\ [:py:class:`int`] | :py:class:`list`\\ " +"[:py:class:`float`]]" msgstr "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`int` | :py:class:`float` | " +":py:class:`~typing.List`\\ [:py:class:`int`] | :py:class:`~typing.List`\\" +" [:py:class:`float`]]" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -msgid ":py:obj:`fit `\\ \\(parameters\\, config\\)" +#: flwr.common.record.metricsrecord.MetricsRecord:3 of +msgid "" +"A :code:`MetricsRecord` is a Python dictionary designed to ensure that " +"each key-value pair adheres to specified data types. A " +":code:`MetricsRecord` is one of the types of records that a " +"`flwr.common.RecordSet `_ supports " +"and can therefore be used to construct :code:`common.Message` objects." msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.numpy_client.NumPyClient.fit:1 of -msgid "Train the provided parameters using the locally held dataset." +#: flwr.common.record.metricsrecord.MetricsRecord:9 of +msgid "" +"A dictionary that stores basic types (i.e. `int`, `float` as defined in " +"`MetricsScalar`) and list of such types (see `MetricsScalarList`)." msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -msgid ":py:obj:`get_context `\\ \\(\\)" +#: flwr.common.record.metricsrecord.MetricsRecord:12 of +msgid "" +"A boolean indicating whether metrics should be deleted from the input " +"dictionary immediately after adding them to the record. When set to True," +" the data is duplicated in memory. If memory is a concern, set it to " +"False." msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.common.record.metricsrecord.MetricsRecord:20 of msgid "" -":py:obj:`get_parameters `\\ " -"\\(config\\)" +"The usage of a :code:`MetricsRecord` is envisioned for communicating " +"results obtained when a node performs an action. A few typical examples " +"include: communicating the training accuracy after a model is trained " +"locally by a :code:`ClientApp`, reporting the validation loss obtained at" +" a :code:`ClientApp`, or, more generally, the output of executing a query" +" by the :code:`ClientApp`. Common to these examples is that the output " +"can be typically represented by a single scalar (:code:`int`, " +":code:`float`) or list of scalars." msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.common.record.metricsrecord.MetricsRecord:28 of msgid "" -":py:obj:`get_properties `\\ " -"\\(config\\)" +"Let's see some examples of how to construct a :code:`MetricsRecord` from " +"scratch:" msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.numpy_client.NumPyClient.get_properties:1 of -msgid "Return a client's set of properties." +#: flwr.common.record.metricsrecord.MetricsRecord:39 of +msgid "" +"Since types are enforced, the types of the objects inserted are checked. " +"For a :code:`MetricsRecord`, value types allowed are those in defined in " +":code:`flwr.common.MetricsRecordValues`. Similarly, only :code:`str` keys" +" are allowed." msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.common.record.metricsrecord.MetricsRecord:50 of msgid "" -":py:obj:`set_context `\\ " -"\\(context\\)" +"If you need a more versatily type of record try :code:`ConfigsRecord` or " +":code:`ParametersRecord`." msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -msgid ":py:obj:`to_client `\\ \\(\\)" -msgstr "" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" +msgstr ":py:obj:`clear `\\ \\(\\)" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.numpy_client.NumPyClient.to_client:1 of -msgid "Convert to object to Client type and return it." -msgstr "" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" +msgstr ":py:obj:`count_bytes `\\ \\(\\)" -#: flwr.client.numpy_client.NumPyClient.evaluate:1::1 of -msgid ":py:obj:`context `\\" -msgstr "" +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -#: flwr.client.numpy_client.NumPyClient.evaluate:3 -#: flwr.client.numpy_client.NumPyClient.fit:3 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:5 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:8 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:5 -#: flwr.server.strategy.strategy.Strategy.configure_fit:5 -#: flwr.server.strategy.strategy.Strategy.evaluate:8 of -msgid "The current (global) model parameters." -msgstr "" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" +msgstr ":py:obj:`items `\\ \\(\\)" -#: flwr.client.numpy_client.NumPyClient.evaluate:5 of -msgid "" -"Configuration parameters which allow the server to influence evaluation " -"on the client. It can be used to communicate arbitrary values from the " -"server to the client, for example, to influence the number of examples " -"used for evaluation." -msgstr "" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" +msgstr ":py:obj:`keys `\\ \\(\\)" -#: flwr.client.numpy_client.NumPyClient.evaluate:11 of +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" + +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`popitem `\\ \\(\\)" +msgstr ":py:obj:`items `\\ \\(\\)" + +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy msgid "" -"* **loss** (*float*) -- The evaluation loss of the model on the local " -"dataset. * **num_examples** (*int*) -- The number of examples used for " -"evaluation. * **metrics** (*Dict[str, Scalar]*) -- A dictionary mapping " -"arbitrary string keys to values of type bool, bytes, float, int, or " -"str. It can be used to communicate arbitrary values back to the server." -msgstr "" +":py:obj:`setdefault `\\ " +"\\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -#: flwr.client.numpy_client.NumPyClient.evaluate:11 of +#: collections.abc.MutableMapping.clear:1::1 of msgid "" -"**loss** (*float*) -- The evaluation loss of the model on the local " -"dataset." +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" msgstr "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" -#: flwr.client.numpy_client.NumPyClient.evaluate:12 of -msgid "**num_examples** (*int*) -- The number of examples used for evaluation." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" msgstr "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" -#: flwr.client.numpy_client.NumPyClient.evaluate:13 -#: flwr.client.numpy_client.NumPyClient.fit:13 of -msgid "" -"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " -"string keys to values of type bool, bytes, float, int, or str. It can be " -"used to communicate arbitrary values back to the server." -msgstr "" +#: ../../source/ref-api/flwr.common.NDArray.rst:2 +msgid "NDArray" +msgstr "NDArray" -#: flwr.client.numpy_client.NumPyClient.evaluate:19 of -msgid "" -"The previous return type format (int, float, float) and the extended " -"format (int, float, float, Dict[str, Scalar]) have been deprecated and " -"removed since Flower 0.19." -msgstr "" +#: ../../source/ref-api/flwr.common.NDArrays.rst:2 +#, fuzzy +msgid "NDArrays" +msgstr "NDArray" -#: flwr.client.numpy_client.NumPyClient.fit:5 of +#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 +msgid ":py:obj:`tensors `\\" +msgstr ":py:obj:`tensors `\\" + +#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 +msgid ":py:obj:`tensor_type `\\" +msgstr ":py:obj:`tensor_type `\\" + +#: ../../source/ref-api/flwr.common.ParametersRecord.rst:2 +msgid "ParametersRecord" +msgstr "ParametersRecord" + +#: flwr.common.record.parametersrecord.ParametersRecord:1 of msgid "" -"Configuration parameters which allow the server to influence training on " -"the client. It can be used to communicate arbitrary values from the " -"server to the client, for example, to set the number of (local) training " -"epochs." +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`~flwr.common.record.parametersrecord.Array`]" msgstr "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`~flwr.common.record.parametersrecord.Array`]" -#: flwr.client.numpy_client.NumPyClient.fit:11 of -msgid "" -"* **parameters** (*NDArrays*) -- The locally updated model parameters. * " -"**num_examples** (*int*) -- The number of examples used for training. * " -"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " -"string keys to values of type bool, bytes, float, int, or str. It can " -"be used to communicate arbitrary values back to the server." +#: flwr.common.record.parametersrecord.ParametersRecord:3 of +#, fuzzy +msgid "" +"A dataclass storing named Arrays in order. This means that it holds " +"entries as an OrderedDict[str, Array]. ParametersRecord objects can be " +"viewed as an equivalent to PyTorch's state_dict, but holding serialised " +"tensors instead. A :code:`ParametersRecord` is one of the types of " +"records that a `flwr.common.RecordSet " +"`_ supports and can therefore be " +"used to construct :code:`common.Message` objects." msgstr "" +"Arrays라는 이름의 데이터 클래스를 순서대로 저장합니다. 즉, OrderedDict[str, Array]로 항목을 보유합니다. " +"ParametersRecord 객체는 파이토치의 state_dict와 동등한 것으로 볼 수 있지만, 대신 직렬화된 텐서를 " +"보유합니다." -#: flwr.client.numpy_client.NumPyClient.fit:11 of -msgid "**parameters** (*NDArrays*) -- The locally updated model parameters." +#: flwr.common.record.parametersrecord.ParametersRecord:10 of +msgid "A dictionary that stores serialized array-like or tensor-like objects." msgstr "" -#: flwr.client.numpy_client.NumPyClient.fit:12 of -msgid "**num_examples** (*int*) -- The number of examples used for training." +#: flwr.common.record.parametersrecord.ParametersRecord:12 of +msgid "" +"A boolean indicating whether parameters should be deleted from the input " +"dictionary immediately after adding them to the record. If False, the " +"dictionary passed to `set_parameters()` will be empty once exiting from " +"that function. This is the desired behaviour when working with very large" +" models/tensors/arrays. However, if you plan to continue working with " +"your parameters after adding it to the record, set this flag to True. " +"When set to True, the data is duplicated in memory." msgstr "" -#: flwr.client.numpy_client.NumPyClient.get_parameters:3 of +#: flwr.common.record.parametersrecord.ParametersRecord:23 of msgid "" -"Configuration parameters requested by the server. This can be used to " -"tell the client which parameters are needed along with some Scalar " -"attributes." +"The usage of :code:`ParametersRecord` is envisioned for storing data " +"arrays (e.g. parameters of a machine learning model). These first need to" +" be serialized into a :code:`flwr.common.Array` data structure." msgstr "" -#: flwr.client.numpy_client.NumPyClient.get_parameters:8 of -msgid "**parameters** -- The local model parameters as a list of NumPy ndarrays." -msgstr "" +#: flwr.common.record.parametersrecord.ParametersRecord:27 of +#, fuzzy +msgid "Let's see some examples:" +msgstr "몇 가지 예를 살펴보겠습니다:" -#: flwr.client.numpy_client.NumPyClient.get_properties:3 of +#: flwr.common.record.parametersrecord.ParametersRecord:50 of msgid "" -"Configuration parameters requested by the server. This can be used to " -"tell the client which properties are needed along with some Scalar " -"attributes." +"Now that the NumPy array is embedded into a :code:`ParametersRecord` it " +"could be sent if added as part of a :code:`common.Message` or it could be" +" saved as a persistent state of a :code:`ClientApp` via its context. " +"Regardless of the usecase, we will sooner or later want to recover the " +"array in its original NumPy representation. For the example above, where " +"the array was serialized using the built-in utility function, " +"deserialization can be done as follows:" msgstr "" -#: flwr.client.numpy_client.NumPyClient.get_properties:8 of +#: flwr.common.record.parametersrecord.ParametersRecord:65 of msgid "" -"**properties** -- A dictionary mapping arbitrary string keys to values of" -" type bool, bytes, float, int, or str. It can be used to communicate " -"arbitrary property values back to the server." +"If you need finer control on how your arrays are serialized and " +"deserialized, you can construct :code:`Array` objects directly like this:" msgstr "" -#: ../../source/ref-api/flwr.client.run_client_app.rst:2 -msgid "run\\_client\\_app" +#: flwr.common.record.parametersrecord.ParametersRecord:83 of +msgid "" +"Note that different arrays (e.g. from PyTorch, Tensorflow) might require " +"different serialization mechanism. Howerver, they often support a " +"conversion to NumPy, therefore allowing to use the same or similar steps " +"as in the example above." msgstr "" -#: ../../source/ref-api/flwr.client.run_supernode.rst:2 -msgid "run\\_supernode" -msgstr "" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" +msgstr ":py:obj:`clear `\\ \\(\\)" -#: ../../source/ref-api/flwr.client.start_client.rst:2 -msgid "start\\_client" -msgstr "" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" +msgstr ":py:obj:`count_bytes `\\ \\(\\)" -#: flwr.client.app.start_client:3 flwr.client.app.start_numpy_client:9 of +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" + +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" +msgstr ":py:obj:`items `\\ \\(\\)" + +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" +msgstr ":py:obj:`keys `\\ \\(\\)" + +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" + +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`popitem `\\ \\(\\)" +msgstr ":py:obj:`items `\\ \\(\\)" + +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy msgid "" -"The IPv4 or IPv6 address of the server. If the Flower server runs on the " -"same machine on port 8080, then `server_address` would be " -"`\"[::]:8080\"`." -msgstr "" +":py:obj:`setdefault `\\ " +"\\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -#: flwr.client.app.start_client:7 of -msgid "A callable that instantiates a Client. (default: None)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" msgstr "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" -#: flwr.client.app.start_client:9 of +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" +msgstr ":py:obj:`values `\\ \\(\\)" + +#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:3 of msgid "" -"An implementation of the abstract base class `flwr.client.Client` " -"(default: None)" -msgstr "" +"Note that a small amount of Bytes might also be included in this counting" +" that correspond to metadata of the serialized object (e.g. of NumPy " +"array) needed for deseralization." +msgstr "역직렬화에 필요한 직렬화된 객체의 메타데이터(예: NumPy 배열)에 해당하는 소량의 바이트도 이 카운팅에 포함될 수 있습니다." -#: flwr.client.app.start_client:12 flwr.client.app.start_numpy_client:15 of +#: ../../source/ref-api/flwr.common.Properties.rst:2 +#, fuzzy +msgid "Properties" +msgstr "GetPropertiesRes" + +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:2 +msgid "ReconnectIns" +msgstr "ReconnectIns" + +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:28::1 +msgid ":py:obj:`seconds `\\" +msgstr ":py:obj:`seconds `\\" + +#: ../../source/ref-api/flwr.common.RecordSet.rst:2 +msgid "RecordSet" +msgstr "RecordSet" + +#: flwr.common.record.recordset.RecordSet:3 of msgid "" -"The maximum length of gRPC messages that can be exchanged with the Flower" -" server. The default should be sufficient for most models. Users who " -"train very large models might need to increase this value. Note that the " -"Flower server needs to be started with the same value (see " -"`flwr.server.start_server`), otherwise it will not know about the " -"increased limit and block larger messages." +"A :code:`RecordSet` is the unified mechanism by which parameters, metrics" +" and configs can be either stored as part of a `flwr.common.Context " +"`_ in your apps or communicated as part of a " +"`flwr.common.Message `_ between your apps." msgstr "" -#: flwr.client.app.start_client:19 flwr.client.app.start_numpy_client:22 of +#: flwr.common.record.recordset.RecordSet:9 of msgid "" -"The PEM-encoded root certificates as a byte string or a path string. If " -"provided, a secure connection using the certificates will be established " -"to an SSL-enabled Flower server." +"A dictionary of :code:`ParametersRecords` that can be used to record and " +"communicate model parameters and high-dimensional arrays." msgstr "" -#: flwr.client.app.start_client:23 flwr.client.app.start_numpy_client:26 of +#: flwr.common.record.recordset.RecordSet:12 of msgid "" -"Starts an insecure gRPC connection when True. Enables HTTPS connection " -"when False, using system certificates if `root_certificates` is None." +"A dictionary of :code:`MetricsRecord` that can be used to record and " +"communicate scalar-valued metrics that are the result of performing and " +"action, for example, by a :code:`ClientApp`." msgstr "" -#: flwr.client.app.start_client:26 flwr.client.app.start_numpy_client:29 of +#: flwr.common.record.recordset.RecordSet:16 of msgid "" -"Configure the transport layer. Allowed values: - 'grpc-bidi': gRPC, " -"bidirectional streaming - 'grpc-rere': gRPC, request-response " -"(experimental) - 'rest': HTTP (experimental)" +"A dictionary of :code:`ConfigsRecord` that can be used to record and " +"communicate configuration values to an entity (e.g. to a " +":code:`ClientApp`) for it to adjust how an action is performed." msgstr "" -#: flwr.client.app.start_client:31 of +#: flwr.common.record.recordset.RecordSet:24 of msgid "" -"The maximum number of times the client will try to connect to the server " -"before giving up in case of a connection error. If set to None, there is " -"no limit to the number of tries." +"A :code:`RecordSet` can hold three types of records, each designed with " +"an specific purpose. What is common to all of them is that they are " +"Python dictionaries designed to ensure that each key-value pair adheres " +"to specified data types." msgstr "" -#: flwr.client.app.start_client:35 of +#: flwr.common.record.recordset.RecordSet:29 of +#, fuzzy +msgid "Let's see an example." +msgstr "몇 가지 예를 살펴보겠습니다:" + +#: flwr.common.record.recordset.RecordSet:47 of msgid "" -"The maximum duration before the client stops trying to connect to the " -"server in case of connection error. If set to None, there is no limit to " -"the total time." +"Adding a :code:`ParametersRecord` follows the same steps as above but " +"first, the array needs to be serialized and represented as a " +":code:`flwr.common.Array`. If the array is a :code:`NumPy` array, you can" +" use the built-in utility function `array_from_numpy " +"`_. It is often possible to convert an" +" array first to :code:`NumPy` and then use the aforementioned function." msgstr "" -#: flwr.client.app.start_client:42 flwr.client.app.start_numpy_client:37 of -msgid "Starting a gRPC client with an insecure server connection:" +#: flwr.common.record.recordset.RecordSet:66 of +msgid "" +"For additional examples on how to construct each of the records types " +"shown above, please refer to the documentation for :code:`ConfigsRecord`," +" :code:`MetricsRecord` and :code:`ParametersRecord`." msgstr "" -#: flwr.client.app.start_client:49 flwr.client.app.start_numpy_client:44 of -msgid "Starting an SSL-enabled gRPC client using system certificates:" -msgstr "" +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`configs_records `\\" +msgstr ":py:obj:`configs_records `\\" -#: flwr.client.app.start_client:60 flwr.client.app.start_numpy_client:52 of -msgid "Starting an SSL-enabled gRPC client using provided certificates:" -msgstr "" +#: flwr.common.RecordSet.configs_records:1 +#: flwr.common.RecordSet.configs_records:1::1 of +msgid "Dictionary holding ConfigsRecord instances." +msgstr "Dictionary holding ConfigsRecord instances." -#: ../../source/ref-api/flwr.client.start_numpy_client.rst:2 -msgid "start\\_numpy\\_client" -msgstr "" +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`metrics_records `\\" +msgstr ":py:obj:`metrics_records `\\" -#: flwr.client.app.start_numpy_client:5 of +#: flwr.common.RecordSet.configs_records:1::1 +#: flwr.common.RecordSet.metrics_records:1 of +msgid "Dictionary holding MetricsRecord instances." +msgstr "Dictionary holding MetricsRecord instances." + +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`parameters_records `\\" +msgstr ":py:obj:`parameters_records `\\" + +#: flwr.common.RecordSet.configs_records:1::1 +#: flwr.common.RecordSet.parameters_records:1 of +msgid "Dictionary holding ParametersRecord instances." +msgstr "Dictionary holding ParametersRecord instances." + +#: ../../source/ref-api/flwr.common.ServerMessage.rst:2 +msgid "ServerMessage" +msgstr "ServerMessage" + +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +msgid ":py:obj:`evaluate_ins `\\" +msgstr ":py:obj:`evaluate_ins `\\" + +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +msgid ":py:obj:`fit_ins `\\" +msgstr ":py:obj:`fit_ins `\\" + +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 msgid "" -"This function is deprecated since 1.7.0. Use " -":code:`flwr.client.start_client` instead and first convert your " -":code:`NumPyClient` to type :code:`flwr.client.Client` by executing its " -":code:`to_client()` method." +":py:obj:`get_parameters_ins " +"`\\" msgstr "" +":py:obj:`get_parameters_ins " +"`\\" -#: flwr.client.app.start_numpy_client:13 of -msgid "An implementation of the abstract base class `flwr.client.NumPyClient`." +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +msgid "" +":py:obj:`get_properties_ins " +"`\\" msgstr "" +":py:obj:`get_properties_ins " +"`\\" -#: ../../source/ref-api/flwr.common.rst:2 -msgid "common" -msgstr "" +#: ../../source/ref-api/flwr.common.Status.rst:2 +msgid "Status" +msgstr "Status" -#: ../../source/ref-api/flwr.common.rst:30::1 -msgid ":py:obj:`array_from_numpy `\\ \\(ndarray\\)" -msgstr "" +#: ../../source/ref-api/flwr.common.Status.rst:29::1 +msgid ":py:obj:`code `\\" +msgstr ":py:obj:`code `\\" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.record.conversion_utils.array_from_numpy:1 of -msgid "Create Array from NumPy ndarray." -msgstr "" +#: ../../source/ref-api/flwr.common.Status.rst:29::1 +msgid ":py:obj:`message `\\" +msgstr ":py:obj:`message `\\" -#: ../../source/ref-api/flwr.common.rst:30::1 -msgid ":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" -msgstr "" +#: ../../source/ref-api/flwr.common.array_from_numpy.rst:2 +msgid "array\\_from\\_numpy" +msgstr "array\\_from\\_numpy" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.bytes_to_ndarray:1 of -msgid "Deserialize NumPy ndarray from bytes." -msgstr "" +#: ../../source/ref-api/flwr.common.bytes_to_ndarray.rst:2 +msgid "bytes\\_to\\_ndarray" +msgstr "bytes\\_to\\_ndarray" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/ref-api/flwr.common.configure.rst:2 +msgid "configure" +msgstr "구성" + +#: ../../source/ref-api/flwr.common.event.rst:2 +msgid "event" +msgstr "이벤트" + +#: ../../source/ref-api/flwr.common.log.rst:2 +msgid "log" +msgstr "로그" + +#: logging.Logger.log:3 of msgid "" -":py:obj:`configure `\\ \\(identifier\\[\\, " -"filename\\, host\\]\\)" -msgstr "" +"To pass exception information, use the keyword argument exc_info with a " +"true value, e.g." +msgstr "예외 정보를 전달하려면 키워드 argument exc_info를 참 값과 함께 사용합니다." -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.logger.configure:1 of -msgid "Configure logging to file and/or remote log server." +#: logging.Logger.log:6 of +#, python-format +msgid "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -msgid "" -":py:obj:`event `\\ \\(event\\_type\\[\\, " -"event\\_details\\]\\)" +#: ../../source/ref-api/flwr.common.ndarray_to_bytes.rst:2 +msgid "ndarray\\_to\\_bytes" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.telemetry.event:1 of -msgid "Submit create_event to ThreadPoolExecutor to avoid blocking." +#: ../../source/ref-api/flwr.common.ndarrays_to_parameters.rst:2 +msgid "ndarrays\\_to\\_parameters" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -msgid "" -":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " -"\\*\\*kwargs\\)" +#: ../../source/ref-api/flwr.common.now.rst:2 +msgid "now" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 logging.Logger.log:1 -#: of -msgid "Log 'msg % args' with the integer severity 'level'." +#: ../../source/ref-api/flwr.common.parameters_to_ndarrays.rst:2 +msgid "parameters\\_to\\_ndarrays" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -msgid ":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" +#: ../../source/ref-api/flwr.server.rst:2 +msgid "server" +msgstr "서버" + +#: ../../source/ref-api/flwr.server.rst:22::1 +msgid "" +":py:obj:`start_server `\\ \\(\\*\\[\\, " +"server\\_address\\, server\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.ndarray_to_bytes:1 of -msgid "Serialize NumPy ndarray to bytes." +#: ../../source/ref-api/flwr.server.rst:22::1 +#: flwr.server.app.start_server:1 of +msgid "Start a Flower server using the gRPC transport layer." +msgstr "gRPC transport layer를 사용하여 Flower 서버를 실행하세요." + +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid ":py:obj:`ClientManager `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -msgid ":py:obj:`now `\\ \\(\\)" +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.client_manager.ClientManager:1 of +msgid "Abstract base class for managing Flower clients." +msgstr "Flower 클라이언트를 관리하기 위한 Abstract base class." + +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid ":py:obj:`Driver `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.date.now:1 of -msgid "Construct a datetime from time.time() with time zone set to UTC." +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.driver.driver.Driver:1 of +msgid "Abstract base Driver class for the Driver API." +msgstr "Driver API를 위한 Abstract base Driver class." + +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid ":py:obj:`History `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.history.History:1 of +msgid "History class for training and/or evaluation metrics collection." +msgstr "메트릭 콜렉션을 훈련 및 평가하기 위한 History class." + +#: ../../source/ref-api/flwr.server.rst:37::1 +#, fuzzy msgid "" -":py:obj:`ndarrays_to_parameters `\\ " -"\\(ndarrays\\)" -msgstr "" +":py:obj:`LegacyContext `\\ \\(context\\[\\, " +"config\\, strategy\\, ...\\]\\)" +msgstr ":py:obj:`Context `\\ \\(state\\)" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.ndarrays_to_parameters:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarrays_to_parameters:1 -#: of -msgid "Convert NumPy ndarrays to parameters object." -msgstr "" +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.compat.legacy_context.LegacyContext:1 of +msgid "Legacy Context." +msgstr "레거시 콘텍스트." -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/ref-api/flwr.server.rst:37::1 msgid "" -":py:obj:`parameters_to_ndarrays `\\ " -"\\(parameters\\)" +":py:obj:`Server `\\ \\(\\*\\, client\\_manager\\[\\, " +"strategy\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.parameters_to_ndarrays:1 of -msgid "Convert parameters object to NumPy ndarrays." +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid "" +":py:obj:`ServerApp `\\ \\(\\[server\\, config\\, " +"strategy\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.server_app.ServerApp:1 of +msgid "Flower ServerApp." +msgstr "Flower 서버." + +#: ../../source/ref-api/flwr.server.rst:37::1 +#, fuzzy msgid "" -":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, " -"data\\)" +":py:obj:`ServerAppComponents `\\ " +"\\(\\[server\\, config\\, ...\\]\\)" msgstr "" +":py:obj:`ServerMessage `\\ " +"\\(\\[get\\_properties\\_ins\\, ...\\]\\)" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.parametersrecord.Array:1 of -msgid "Array type." +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.serverapp_components.ServerAppComponents:1 of +msgid "Components to construct a ServerApp." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/ref-api/flwr.server.rst:37::1 msgid "" -":py:obj:`ClientMessage `\\ " -"\\(\\[get\\_properties\\_res\\, ...\\]\\)" +":py:obj:`ServerConfig `\\ \\(\\[num\\_rounds\\," +" round\\_timeout\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.ClientMessage:1 of -msgid "ClientMessage is a container used to hold one result message." -msgstr "" +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.server_config.ServerConfig:1 of +msgid "Flower server config." +msgstr "Flower 서버 설정." -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`Code `\\ \\(value\\)" +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid ":py:obj:`SimpleClientManager `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.Code:1 of -msgid "Client status codes." -msgstr "" +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.client_manager.SimpleClientManager:1 of +msgid "Provides a pool of available clients." +msgstr "사용 가능한 클라이언트 그룹 제공." -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid "" -":py:obj:`ConfigsRecord `\\ " -"\\(\\[configs\\_dict\\, keep\\_input\\]\\)" -msgstr "" +#: ../../source/ref-api/flwr.server.rst:56::1 +#, fuzzy +msgid ":py:obj:`flwr.server.strategy `\\" +msgstr ":py:obj:`state `\\" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.configsrecord.ConfigsRecord:1 of -msgid "Configs record." +#: ../../source/ref-api/flwr.server.rst:56::1 +#: flwr.server.strategy:1 of +msgid "Contains the strategy abstraction and different implementations." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`Context `\\ \\(state\\)" -msgstr "" +#: ../../source/ref-api/flwr.server.rst:56::1 +#, fuzzy +msgid ":py:obj:`flwr.server.workflow `\\" +msgstr ":py:obj:`flwr.server `\\" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.context.Context:1 of -msgid "State of your run." +#: ../../source/ref-api/flwr.server.rst:56::1 +#: flwr.server.workflow:1 of +msgid "Workflows." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`DisconnectRes `\\ \\(reason\\)" +#: ../../source/ref-api/flwr.server.ClientManager.rst:2 +msgid "ClientManager" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.DisconnectRes:1 of -msgid "DisconnectRes message from client to server." +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`all `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid "" -":py:obj:`EvaluateIns `\\ \\(parameters\\, " -"config\\)" +#: flwr.server.client_manager.ClientManager.all:1 +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.all:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid "Return all available clients." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.EvaluateIns:1 of -msgid "Evaluate instructions for a client." +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`num_available `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid "" -":py:obj:`EvaluateRes `\\ \\(status\\, loss\\, " -"num\\_examples\\, metrics\\)" +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.num_available:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.num_available:1 of +msgid "Return the number of available clients." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.EvaluateRes:1 of -msgid "Evaluate response from a client." +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`register `\\ \\(client\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`EventType `\\ \\(value\\)" +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.register:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.register:1 of +msgid "Register Flower ClientProxy instance." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.telemetry.EventType:1 of -msgid "Types of telemetry events." +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid "" +":py:obj:`sample `\\ " +"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`FitIns `\\ \\(parameters\\, config\\)" +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.sample:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.sample:1 of +msgid "Sample a number of Flower ClientProxy instances." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.FitIns:1 of -msgid "Fit instructions for a client." +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`unregister `\\ \\(client\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.unregister:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.unregister:1 of +msgid "Unregister Flower ClientProxy instance." +msgstr "Flower ClientProxy 인스턴스 등록 해제." + +#: flwr.server.client_manager.ClientManager.all:1::1 of msgid "" -":py:obj:`FitRes `\\ \\(status\\, parameters\\, " -"num\\_examples\\, metrics\\)" +":py:obj:`wait_for `\\ " +"\\(num\\_clients\\, timeout\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.FitRes:1 of -msgid "Fit response from a client." -msgstr "" +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.wait_for:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.wait_for:1 of +msgid "Wait until at least `num_clients` are available." +msgstr "적어도 1개의 `num_clients` 가 사용 가능해질 때까지 기다리세요." -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`Error `\\ \\(code\\[\\, reason\\]\\)" +#: flwr.server.client_manager.ClientManager.num_available:3 +#: flwr.server.client_manager.SimpleClientManager.num_available:3 of +msgid "**num_available** -- The number of currently available clients." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.message.Error:1 of -msgid "A dataclass that stores information about an error that occurred." +#: flwr.server.client_manager.ClientManager.register:6 +#: flwr.server.client_manager.SimpleClientManager.register:6 of +msgid "" +"**success** -- Indicating if registration was successful. False if " +"ClientProxy is already registered or can not be registered for any " +"reason." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`GetParametersIns `\\ \\(config\\)" +#: flwr.server.client_manager.ClientManager.unregister:3 +#: flwr.server.client_manager.SimpleClientManager.unregister:3 of +msgid "This method is idempotent." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetParametersIns:1 of -msgid "Parameters request for a client." +#: ../../source/ref-api/flwr.server.Driver.rst:2 +msgid "Driver" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 msgid "" -":py:obj:`GetParametersRes `\\ \\(status\\, " -"parameters\\)" +":py:obj:`create_message `\\ " +"\\(content\\, message\\_type\\, ...\\[\\, ttl\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetParametersRes:1 of -msgid "Response when asked to return parameters." +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.create_message:1 of +msgid "Create a new message with specified parameters." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`GetPropertiesIns `\\ \\(config\\)" +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +msgid ":py:obj:`get_node_ids `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetPropertiesIns:1 of -msgid "Properties request for a client." +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.get_node_ids:1 of +msgid "Get node IDs." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 msgid "" -":py:obj:`GetPropertiesRes `\\ \\(status\\, " -"properties\\)" +":py:obj:`pull_messages `\\ " +"\\(message\\_ids\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetPropertiesRes:1 of -msgid "Properties response from a client." +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.pull_messages:1 of +msgid "Pull messages based on message IDs." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 msgid "" -":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " -"error\\]\\)" +":py:obj:`push_messages `\\ " +"\\(messages\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.message.Message:1 of -msgid "State of your application from the viewpoint of the entity using it." +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.push_messages:1 of +msgid "Push messages to specified node IDs." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`MessageType `\\ \\(\\)" +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +msgid "" +":py:obj:`send_and_receive `\\ " +"\\(messages\\, \\*\\[\\, timeout\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.constant.MessageType:1 of -msgid "Message type." +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.send_and_receive:1 of +msgid "Push messages to specified node IDs and pull the reply messages." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`MessageTypeLegacy `\\ \\(\\)" +#: flwr.server.driver.driver.Driver.create_message:1::1 of +#, fuzzy +msgid ":py:obj:`run `\\" +msgstr ":py:obj:`flwr.server `\\" + +#: flwr.server.Driver.run:1 +#: flwr.server.driver.driver.Driver.create_message:1::1 of +#, fuzzy +msgid "Run information." +msgstr "시뮬레이션 실행" + +#: flwr.server.driver.driver.Driver.create_message:3 of +msgid "" +"This method constructs a new `Message` with given content and metadata. " +"The `run_id` and `src_node_id` will be set automatically." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.constant.MessageTypeLegacy:1 of -msgid "Legacy message type." +#: flwr.server.driver.driver.Driver.create_message:6 of +msgid "" +"The content for the new message. This holds records that are to be sent " +"to the destination node." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.server.driver.driver.Driver.create_message:9 of msgid "" -":py:obj:`Metadata `\\ \\(run\\_id\\, " -"message\\_id\\, src\\_node\\_id\\, ...\\)" +"The type of the message, defining the action to be executed on the " +"receiving end." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.message.Metadata:1 of -msgid "A dataclass holding metadata associated with the current message." +#: flwr.server.driver.driver.Driver.create_message:12 of +msgid "The ID of the destination node to which the message is being sent." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.server.driver.driver.Driver.create_message:14 of msgid "" -":py:obj:`MetricsRecord `\\ " -"\\(\\[metrics\\_dict\\, keep\\_input\\]\\)" +"The ID of the group to which this message is associated. In some " +"settings, this is used as the FL round." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.metricsrecord.MetricsRecord:1 of -msgid "Metrics record." +#: flwr.server.driver.driver.Driver.create_message:17 of +msgid "" +"Time-to-live for the round trip of this message, i.e., the time from " +"sending this message to receiving a reply. It specifies in seconds the " +"duration for which the message and its potential reply are considered " +"valid. If unset, the default TTL (i.e., `common.DEFAULT_TTL`) will be " +"used." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`NDArray `\\" +#: flwr.server.driver.driver.Driver.create_message:23 of +msgid "" +"**message** -- A new `Message` instance with the specified content and " +"metadata." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.server.driver.driver.Driver.pull_messages:3 of msgid "" -"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " -":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" +"This method is used to collect messages from the SuperLink that " +"correspond to a set of given message IDs." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid "" -":py:obj:`Parameters `\\ \\(tensors\\, " -"tensor\\_type\\)" +#: flwr.server.driver.driver.Driver.pull_messages:6 of +msgid "An iterable of message IDs for which reply messages are to be retrieved." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.Parameters:1 of -msgid "Model parameters." +#: flwr.server.driver.driver.Driver.pull_messages:9 of +msgid "**messages** -- An iterable of messages received." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.server.driver.driver.Driver.push_messages:3 of msgid "" -":py:obj:`ParametersRecord `\\ " -"\\(\\[array\\_dict\\, keep\\_input\\]\\)" +"This method takes an iterable of messages and sends each message to the " +"node specified in `dst_node_id`." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.parametersrecord.ParametersRecord:1 of -msgid "Parameters record." +#: flwr.server.driver.driver.Driver.push_messages:6 +#: flwr.server.driver.driver.Driver.send_and_receive:7 of +msgid "An iterable of messages to be sent." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`ReconnectIns `\\ \\(seconds\\)" +#: flwr.server.driver.driver.Driver.push_messages:9 of +msgid "" +"**message_ids** -- An iterable of IDs for the messages that were sent, " +"which can be used to pull replies." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.ReconnectIns:1 of -msgid "ReconnectIns message from server to client." +#: flwr.server.driver.driver.Driver.send_and_receive:3 of +msgid "" +"This method sends a list of messages to their destination node IDs and " +"then waits for the replies. It continues to pull replies until either all" +" replies are received or the specified timeout duration is exceeded." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.server.driver.driver.Driver.send_and_receive:9 of msgid "" -":py:obj:`RecordSet `\\ " -"\\(\\[parameters\\_records\\, ...\\]\\)" +"The timeout duration in seconds. If specified, the method will wait for " +"replies for this duration. If `None`, there is no time limit and the " +"method will wait until replies for all messages are received." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.recordset.RecordSet:1 of -msgid "RecordSet stores groups of parameters, metrics and configs." +#: flwr.server.driver.driver.Driver.send_and_receive:14 of +msgid "**replies** -- An iterable of reply messages received from the SuperLink." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.server.driver.driver.Driver.send_and_receive:19 of msgid "" -":py:obj:`ServerMessage `\\ " -"\\(\\[get\\_properties\\_ins\\, ...\\]\\)" +"This method uses `push_messages` to send the messages and `pull_messages`" +" to collect the replies. If `timeout` is set, the method may not return " +"replies for all sent messages. A message remains valid until its TTL, " +"which is not affected by `timeout`." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.ServerMessage:1 of -msgid "ServerMessage is a container used to hold one instruction message." +#: ../../source/ref-api/flwr.server.History.rst:2 +msgid "History" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`Status `\\ \\(code\\, message\\)" +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "" +":py:obj:`add_loss_centralized " +"`\\ \\(server\\_round\\, " +"loss\\)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.Status:1 of -msgid "Client status." +#: flwr.server.history.History.add_loss_centralized:1 +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "Add one loss entry (from centralized evaluation)." msgstr "" -#: ../../source/ref-api/flwr.common.Array.rst:2 -msgid "Array" +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "" +":py:obj:`add_loss_distributed " +"`\\ \\(server\\_round\\, " +"loss\\)" msgstr "" -#: flwr.common.record.parametersrecord.Array:3 of -msgid "" -"A dataclass containing serialized data from an array-like or tensor-like " -"object along with some metadata about it." +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_loss_distributed:1 of +msgid "Add one loss entry (from distributed evaluation)." msgstr "" -#: flwr.common.record.parametersrecord.Array:6 of +#: flwr.server.history.History.add_loss_centralized:1::1 of msgid "" -"A string representing the data type of the serialised object (e.g. " -"`np.float32`)" +":py:obj:`add_metrics_centralized " +"`\\ \\(server\\_round\\, " +"metrics\\)" msgstr "" -#: flwr.common.record.parametersrecord.Array:8 of -msgid "" -"A list representing the shape of the unserialized array-like object. This" -" is used to deserialize the data (depending on the serialization method) " -"or simply as a metadata field." +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_centralized:1 of +msgid "Add metrics entries (from centralized evaluation)." msgstr "" -#: flwr.common.record.parametersrecord.Array:12 of +#: flwr.server.history.History.add_loss_centralized:1::1 of msgid "" -"A string indicating the type of serialisation mechanism used to generate " -"the bytes in `data` from an array-like or tensor-like object." +":py:obj:`add_metrics_distributed " +"`\\ \\(server\\_round\\, " +"metrics\\)" msgstr "" -#: flwr.common.record.parametersrecord.Array:15 of -msgid "A buffer of bytes containing the data." +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_distributed:1 of +msgid "Add metrics entries (from distributed evaluation)." msgstr "" -#: ../../source/ref-api/flwr.common.Array.rst:26::1 -msgid ":py:obj:`numpy `\\ \\(\\)" +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "" +":py:obj:`add_metrics_distributed_fit " +"`\\ \\(server\\_round\\," +" ...\\)" msgstr "" -#: ../../source/ref-api/flwr.common.Array.rst:26::1 -#: flwr.common.record.parametersrecord.Array.numpy:1 of -msgid "Return the array as a NumPy array." +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_distributed_fit:1 of +msgid "Add metrics entries (from distributed fit)." msgstr "" -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of -msgid ":py:obj:`dtype `\\" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:2 +msgid "LegacyContext" msgstr "" -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of -msgid ":py:obj:`shape `\\" +#: flwr.server.compat.legacy_context.LegacyContext:1 of +msgid "Bases: :py:class:`~flwr.common.context.Context`" msgstr "" -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of -msgid ":py:obj:`stype `\\" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +msgid ":py:obj:`config `\\" msgstr "" -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of -msgid ":py:obj:`data `\\" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +msgid ":py:obj:`strategy `\\" msgstr "" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:2 -msgid "ClientMessage" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +msgid ":py:obj:`client_manager `\\" msgstr "" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 -msgid ":py:obj:`evaluate_res `\\" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +msgid ":py:obj:`history `\\" msgstr "" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 -msgid ":py:obj:`fit_res `\\" -msgstr "" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#, fuzzy +msgid ":py:obj:`node_id `\\" +msgstr ":py:obj:`src_node_id `\\" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 -msgid "" -":py:obj:`get_parameters_res " -"`\\" -msgstr "" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#, fuzzy +msgid ":py:obj:`node_config `\\" +msgstr ":py:obj:`config `\\" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 -msgid "" -":py:obj:`get_properties_res " -"`\\" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +msgid ":py:obj:`state `\\" msgstr "" -#: ../../source/ref-api/flwr.common.Code.rst:2 -msgid "Code" -msgstr "" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#, fuzzy +msgid ":py:obj:`run_config `\\" +msgstr ":py:obj:`config `\\" -#: flwr.common.typing.Code:1 of -msgid "Bases: :py:class:`~enum.Enum`" +#: ../../source/ref-api/flwr.server.Server.rst:2 +msgid "Server" msgstr "" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 -msgid ":py:obj:`OK `\\" +#: flwr.server.server.Server.client_manager:1::1 of +msgid ":py:obj:`client_manager `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 -msgid "" -":py:obj:`GET_PROPERTIES_NOT_IMPLEMENTED " -"`\\" +#: flwr.server.server.Server.client_manager:1 +#: flwr.server.server.Server.client_manager:1::1 of +msgid "Return ClientManager." msgstr "" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 +#: flwr.server.server.Server.client_manager:1::1 of msgid "" -":py:obj:`GET_PARAMETERS_NOT_IMPLEMENTED " -"`\\" +":py:obj:`disconnect_all_clients " +"`\\ \\(timeout\\)" msgstr "" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 -msgid ":py:obj:`FIT_NOT_IMPLEMENTED `\\" +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.disconnect_all_clients:1 of +msgid "Send shutdown signal to all clients." msgstr "" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 +#: flwr.server.server.Server.client_manager:1::1 of msgid "" -":py:obj:`EVALUATE_NOT_IMPLEMENTED " -"`\\" -msgstr "" - -#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:2 -msgid "ConfigsRecord" +":py:obj:`evaluate_round `\\ " +"\\(server\\_round\\, timeout\\)" msgstr "" -#: flwr.common.record.configsrecord.ConfigsRecord:1 of -msgid "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -"[:py:class:`str`, :py:class:`int` | :py:class:`float` | :py:class:`str` |" -" :py:class:`bytes` | :py:class:`bool` | :py:class:`~typing.List`\\ " -"[:py:class:`int`] | :py:class:`~typing.List`\\ [:py:class:`float`] | " -":py:class:`~typing.List`\\ [:py:class:`str`] | :py:class:`~typing.List`\\" -" [:py:class:`bytes`] | :py:class:`~typing.List`\\ [:py:class:`bool`]]" +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.evaluate_round:1 of +msgid "Validate current global model on a number of clients." msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`clear `\\ \\(\\)" +#: flwr.server.server.Server.client_manager:1::1 of +msgid ":py:obj:`fit `\\ \\(num\\_rounds\\, timeout\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1 -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid "Remove all items from R." +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.fit:1 of +msgid "Run federated averaging for a number of rounds." msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`count_bytes `\\ \\(\\)" +#: flwr.server.server.Server.client_manager:1::1 of +msgid "" +":py:obj:`fit_round `\\ \\(server\\_round\\," +" timeout\\)" msgstr "" -#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:1 -#: flwr.common.record.metricsrecord.MetricsRecord.count_bytes:1 -#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:1 -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid "Return number of Bytes stored in this object." +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.fit_round:1 of +msgid "Perform a single round of federated averaging." msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +#: flwr.server.server.Server.client_manager:1::1 of +msgid "" +":py:obj:`set_max_workers `\\ " +"\\(max\\_workers\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 -#: flwr.common.record.typeddict.TypedDict.get:1 of -msgid "d defaults to None." +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.set_max_workers:1 of +msgid "Set the max_workers used by ThreadPoolExecutor." msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`items `\\ \\(\\)" +#: flwr.server.server.Server.client_manager:1::1 of +msgid ":py:obj:`set_strategy `\\ \\(strategy\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`keys `\\ \\(\\)" +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.set_strategy:1 of +msgid "Replace server strategy." msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +#: ../../source/ref-api/flwr.server.ServerApp.rst:2 +msgid "ServerApp" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 -#: flwr.common.record.typeddict.TypedDict.pop:1 of -msgid "If key is not found, d is returned if given, otherwise KeyError is raised." +#: flwr.server.server_app.ServerApp:5 of +msgid "Use the `ServerApp` with an existing `Strategy`:" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid "" -":py:obj:`update `\\ \\(\\[E\\, " -"\\]\\*\\*F\\)" +#: flwr.server.server_app.ServerApp:17 of +msgid "Use the `ServerApp` with a custom main function:" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 -#: flwr.common.record.typeddict.TypedDict.update:1 of -msgid "Update R from dict/iterable E and F." +#: flwr.server.server_app.ServerApp.main:1::1 of +msgid ":py:obj:`main `\\ \\(\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`values `\\ \\(\\)" +#: flwr.server.server_app.ServerApp.main:1 +#: flwr.server.server_app.ServerApp.main:1::1 of +msgid "Return a decorator that registers the main fn with the server app." msgstr "" -#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:3 of -msgid "This function counts booleans as occupying 1 Byte." +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:2 +msgid "ServerAppComponents" msgstr "" -#: ../../source/ref-api/flwr.common.Context.rst:2 -msgid "Context" +#: flwr.server.serverapp_components.ServerAppComponents:3 of +msgid "" +"A server implementation, either `flwr.server.Server` or a subclass " +"thereof. If no instance is provided, one will be created internally." msgstr "" -#: flwr.common.context.Context:3 of +#: flwr.server.app.start_server:9 +#: flwr.server.serverapp_components.ServerAppComponents:6 of msgid "" -"Holds records added by the entity in a given run and that will stay " -"local. This means that the data it holds will never leave the system it's" -" running from. This can be used as an intermediate storage or scratchpad " -"when executing mods. It can also be used as a memory to access at " -"different points during the lifecycle of this entity (e.g. across " -"multiple rounds)" +"Currently supported values are `num_rounds` (int, default: 1) and " +"`round_timeout` in seconds (float, default: None)." msgstr "" -#: ../../source/ref-api/flwr.common.Context.rst:28::1 -msgid ":py:obj:`state `\\" +#: flwr.server.serverapp_components.ServerAppComponents:9 of +msgid "" +"An implementation of the abstract base class " +"`flwr.server.strategy.Strategy`. If no strategy is provided, then " +"`flwr.server.strategy.FedAvg` will be used." msgstr "" -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:2 -msgid "DisconnectRes" +#: flwr.server.serverapp_components.ServerAppComponents:13 of +msgid "" +"An implementation of the class `flwr.server.ClientManager`. If no " +"implementation is provided, then `flwr.server.SimpleClientManager` will " +"be used." msgstr "" -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:28::1 -msgid ":py:obj:`reason `\\" +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 +#, fuzzy +msgid "" +":py:obj:`client_manager " +"`\\" msgstr "" +":py:obj:`RUN_CLIENT_APP_ENTER " +"`\\" -#: ../../source/ref-api/flwr.common.Error.rst:2 -msgid "Error" -msgstr "" +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 +#, fuzzy +msgid ":py:obj:`config `\\" +msgstr ":py:obj:`config `\\" -#: flwr.common.message.Error:3 of -msgid "An identifier for the error." +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 +#, fuzzy +msgid ":py:obj:`server `\\" +msgstr ":py:obj:`flwr.server `\\" + +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 +#, fuzzy +msgid ":py:obj:`strategy `\\" +msgstr ":py:obj:`state `\\" + +#: ../../source/ref-api/flwr.server.ServerConfig.rst:2 +msgid "ServerConfig" msgstr "" -#: flwr.common.message.Error:5 of -msgid "A reason for why the error arose (e.g. an exception stack-trace)" +#: flwr.server.server_config.ServerConfig:3 of +msgid "" +"All attributes have default values which allows users to configure just " +"the ones they care about." msgstr "" -#: flwr.common.Error.code:1::1 of -msgid ":py:obj:`code `\\" +#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 +msgid ":py:obj:`num_rounds `\\" msgstr "" -#: flwr.common.Error.code:1 flwr.common.Error.code:1::1 of -msgid "Error code." +#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 +msgid ":py:obj:`round_timeout `\\" msgstr "" -#: flwr.common.Error.code:1::1 of -msgid ":py:obj:`reason `\\" +#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:2 +msgid "SimpleClientManager" msgstr "" -#: flwr.common.Error.code:1::1 flwr.common.Error.reason:1 of -msgid "Reason reported about the error." +#: flwr.server.client_manager.SimpleClientManager:1 of +msgid "Bases: :py:class:`~flwr.server.client_manager.ClientManager`" msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:2 -msgid "EvaluateIns" +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid ":py:obj:`all `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 -msgid ":py:obj:`parameters `\\" +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid "" +":py:obj:`num_available `\\" +" \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 -msgid ":py:obj:`config `\\" +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid "" +":py:obj:`register `\\ " +"\\(client\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:2 -msgid "EvaluateRes" +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid "" +":py:obj:`sample `\\ " +"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 -msgid ":py:obj:`status `\\" +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid "" +":py:obj:`unregister `\\ " +"\\(client\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 -msgid ":py:obj:`loss `\\" +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid "" +":py:obj:`wait_for `\\ " +"\\(num\\_clients\\[\\, timeout\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 -msgid ":py:obj:`num_examples `\\" +#: flwr.server.client_manager.SimpleClientManager.wait_for:3 of +msgid "" +"Blocks until the requested number of clients is available or until a " +"timeout is reached. Current timeout default: 1 day." msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 -msgid ":py:obj:`metrics `\\" +#: flwr.server.client_manager.SimpleClientManager.wait_for:6 of +msgid "The number of clients to wait for." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:2 -msgid "EventType" +#: flwr.server.client_manager.SimpleClientManager.wait_for:8 of +msgid "The time in seconds to wait for, defaults to 86400 (24h)." msgstr "" -#: flwr.common.telemetry.EventType:1 of -msgid "Bases: :py:class:`str`, :py:class:`~enum.Enum`" +#: flwr.server.client_manager.SimpleClientManager.wait_for:11 of +msgid "**success**" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`encode `\\ \\(\\[encoding\\, " -"errors\\]\\)" +#: ../../source/ref-api/flwr.server.start_server.rst:2 +msgid "start\\_server" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.encode:1 of -msgid "Encode the string using the codec registered for encoding." +#: flwr.server.app.start_server:3 of +msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.server.app.start_server:5 of msgid "" -":py:obj:`replace `\\ \\(old\\, new\\[\\, " -"count\\]\\)" +"A server implementation, either `flwr.server.Server` or a subclass " +"thereof. If no instance is provided, then `start_server` will create one." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.replace:1 of -msgid "Return a copy with all occurrences of substring old replaced by new." +#: flwr.server.app.start_server:12 of +msgid "" +"An implementation of the abstract base class " +"`flwr.server.strategy.Strategy`. If no strategy is provided, then " +"`start_server` will use `flwr.server.strategy.FedAvg`." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.server.app.start_server:16 of msgid "" -":py:obj:`split `\\ \\(\\[sep\\, " -"maxsplit\\]\\)" +"An implementation of the abstract base class `flwr.server.ClientManager`." +" If no implementation is provided, then `start_server` will use " +"`flwr.server.client_manager.SimpleClientManager`." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.rsplit:1 flwr.common.EventType.split:1 of +#: flwr.server.app.start_server:21 of msgid "" -"Return a list of the substrings in the string, using sep as the separator" -" string." +"The maximum length of gRPC messages that can be exchanged with the Flower" +" clients. The default should be sufficient for most models. Users who " +"train very large models might need to increase this value. Note that the " +"Flower clients need to be started with the same value (see " +"`flwr.client.start_client`), otherwise clients will not know about the " +"increased limit and block larger messages." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.server.app.start_server:28 of msgid "" -":py:obj:`rsplit `\\ \\(\\[sep\\, " -"maxsplit\\]\\)" +"Tuple containing root certificate, server certificate, and private key to" +" start a secure SSL-enabled server. The tuple is expected to have three " +"bytes elements in the following order: * CA certificate. * " +"server certificate. * server private key." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`join `\\ \\(iterable\\, \\/\\)" +#: flwr.server.app.start_server:28 of +msgid "" +"Tuple containing root certificate, server certificate, and private key to" +" start a secure SSL-enabled server. The tuple is expected to have three " +"bytes elements in the following order:" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.join:1 of -msgid "Concatenate any number of strings." +#: flwr.server.app.start_server:32 of +msgid "CA certificate." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`capitalize `\\ \\(\\)" +#: flwr.server.app.start_server:33 of +msgid "server certificate." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.capitalize:1 of -msgid "Return a capitalized version of the string." +#: flwr.server.app.start_server:34 of +msgid "server private key." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`casefold `\\ \\(\\)" +#: flwr.server.app.start_server:37 of +msgid "**hist** -- Object containing training and evaluation metrics." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.casefold:1 of -msgid "Return a version of the string suitable for caseless comparisons." +#: flwr.server.app.start_server:42 of +msgid "Starting an insecure server:" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`title `\\ \\(\\)" +#: flwr.server.app.start_server:46 of +msgid "Starting an SSL-enabled server:" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.title:1 of -msgid "Return a version of the string where each word is titlecased." +#: ../../source/ref-api/flwr.server.strategy.rst:2 +msgid "strategy" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`center `\\ \\(width\\[\\, " -"fillchar\\]\\)" +":py:obj:`Bulyan `\\ \\(\\*\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.center:1 of -msgid "Return a centered string of length width." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.bulyan.Bulyan:1 of +msgid "Bulyan strategy." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`count `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" +":py:obj:`DPFedAvgAdaptive `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -"Return the number of non-overlapping occurrences of substring sub in " -"string S[start:end]." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of +msgid "Wrapper for configuring a Strategy for DP with Adaptive Clipping." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`expandtabs `\\ " -"\\(\\[tabsize\\]\\)" +":py:obj:`DPFedAvgFixed `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.expandtabs:1 of -msgid "Return a copy where all tab characters are expanded using spaces." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 of +msgid "Wrapper for configuring a Strategy for DP with Fixed Clipping." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`find `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" +":py:obj:`DifferentialPrivacyClientSideAdaptiveClipping " +"`\\ " +"\\(...\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -"Return the lowest index in S where substring sub is found, such that sub " -"is contained within S[start:end]." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 +#: of +msgid "Strategy wrapper for central DP with client-side adaptive clipping." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`partition `\\ \\(sep\\, \\/\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`DifferentialPrivacyClientSideFixedClipping " +"`\\ " +"\\(...\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.partition:1 flwr.common.EventType.rpartition:1 of -msgid "Partition the string into three parts using the given separator." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 +#: of +msgid "Strategy wrapper for central DP with client-side fixed clipping." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`index `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" +":py:obj:`DifferentialPrivacyServerSideAdaptiveClipping " +"`\\ " +"\\(...\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 +#: of +msgid "Strategy wrapper for central DP with server-side adaptive clipping." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`ljust `\\ \\(width\\[\\, " -"fillchar\\]\\)" +":py:obj:`DifferentialPrivacyServerSideFixedClipping " +"`\\ " +"\\(...\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.ljust:1 of -msgid "Return a left-justified string of length width." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 +#: of +msgid "Strategy wrapper for central DP with server-side fixed clipping." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`lower `\\ \\(\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FaultTolerantFedAvg " +"`\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.lower:1 of -msgid "Return a copy of the string converted to lowercase." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 of +msgid "Configurable fault-tolerant FedAvg strategy implementation." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`lstrip `\\ \\(\\[chars\\]\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedAdagrad `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.lstrip:1 of -msgid "Return a copy of the string with leading whitespace removed." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedadagrad.FedAdagrad:1 of +msgid "FedAdagrad strategy - Adaptive Federated Optimization using Adagrad." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`rfind `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" +":py:obj:`FedAdam `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -"Return the highest index in S where substring sub is found, such that sub" -" is contained within S[start:end]." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedadam.FedAdam:1 of +msgid "FedAdam - Adaptive Federated Optimization using Adam." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`rindex `\\ \\(sub\\[\\, " -"start\\[\\, end\\]\\]\\)" +":py:obj:`FedAvg `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedavg.FedAvg:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of +msgid "Federated Averaging strategy." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`rjust `\\ \\(width\\[\\, " -"fillchar\\]\\)" +":py:obj:`FedAvgAndroid `\\ " +"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.rjust:1 of -msgid "Return a right-justified string of length width." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedAvgM `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`rstrip `\\ \\(\\[chars\\]\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedavgm.FedAvgM:1 of +msgid "Federated Averaging with Momentum strategy." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.rstrip:1 of -msgid "Return a copy of the string with trailing whitespace removed." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedMedian `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`rpartition `\\ \\(sep\\, \\/\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedmedian.FedMedian:1 of +msgid "Configurable FedMedian strategy implementation." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`splitlines `\\ " -"\\(\\[keepends\\]\\)" +":py:obj:`FedOpt `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.splitlines:1 of -msgid "Return a list of the lines in the string, breaking at line boundaries." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedopt.FedOpt:1 of +msgid "Federated Optim strategy." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`strip `\\ \\(\\[chars\\]\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedProx `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.strip:1 of -msgid "Return a copy of the string with leading and trailing whitespace removed." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedprox.FedProx:1 of +msgid "Federated Optimization strategy." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`swapcase `\\ \\(\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedTrimmedAvg `\\ " +"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.swapcase:1 of -msgid "" -"Convert uppercase characters to lowercase and lowercase characters to " -"uppercase." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 of +msgid "Federated Averaging with Trimmed Mean [Dong Yin, et al., 2021]." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`translate `\\ \\(table\\, \\/\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedXgbBagging `\\ " +"\\(\\[evaluate\\_function\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.translate:1 of -msgid "Replace each character in the string using the given translation table." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 of +msgid "Configurable FedXgbBagging strategy implementation." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`upper `\\ \\(\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedXgbCyclic `\\ " +"\\(\\*\\*kwargs\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.upper:1 of -msgid "Return a copy of the string converted to uppercase." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 of +msgid "Configurable FedXgbCyclic strategy implementation." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`startswith `\\ \\(prefix\\[\\," -" start\\[\\, end\\]\\]\\)" +":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " +"\\*\\*kwargs\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "Return True if S starts with the specified prefix, False otherwise." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 of +msgid "Configurable FedXgbNnAvg strategy implementation." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`endswith `\\ \\(suffix\\[\\, " -"start\\[\\, end\\]\\]\\)" +":py:obj:`FedYogi `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "Return True if S ends with the specified suffix, False otherwise." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedyogi.FedYogi:1 of +msgid "FedYogi [Reddi et al., 2020] strategy." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`removeprefix `\\ " -"\\(prefix\\, \\/\\)" +":py:obj:`Krum `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.removeprefix:1 of -msgid "Return a str with the given prefix string removed if present." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.krum.Krum:1 of +msgid "Krum [Blanchard et al., 2017] strategy." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`removesuffix `\\ " -"\\(suffix\\, \\/\\)" +":py:obj:`QFedAvg `\\ \\(\\*\\[\\, " +"q\\_param\\, qffl\\_learning\\_rate\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.removesuffix:1 of -msgid "Return a str with the given suffix string removed if present." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.qfedavg.QFedAvg:1 of +msgid "Configurable QFedAvg strategy implementation." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isascii `\\ \\(\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid ":py:obj:`Strategy `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isascii:1 of -msgid "Return True if all characters in the string are ASCII, False otherwise." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.strategy.Strategy:1 of +msgid "Abstract base class for server strategy implementations." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`islower `\\ \\(\\)" +#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:2 +msgid "Bulyan" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.islower:1 of -msgid "Return True if the string is a lowercase string, False otherwise." +#: flwr.server.strategy.bulyan.Bulyan:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 +#: flwr.server.strategy.fedavgm.FedAvgM:1 +#: flwr.server.strategy.fedmedian.FedMedian:1 +#: flwr.server.strategy.fedopt.FedOpt:1 flwr.server.strategy.fedprox.FedProx:1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 +#: flwr.server.strategy.krum.Krum:1 flwr.server.strategy.qfedavg.QFedAvg:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.fedavg.FedAvg`" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isupper `\\ \\(\\)" +#: flwr.server.strategy.bulyan.Bulyan:3 of +msgid "Implementation based on https://arxiv.org/abs/1802.07927." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isupper:1 of -msgid "Return True if the string is an uppercase string, False otherwise." +#: flwr.server.strategy.bulyan.Bulyan:5 +#: flwr.server.strategy.fedadagrad.FedAdagrad:5 +#: flwr.server.strategy.fedadam.FedAdam:5 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:5 +#: flwr.server.strategy.fedavgm.FedAvgM:5 flwr.server.strategy.fedopt.FedOpt:5 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:5 +#: flwr.server.strategy.fedyogi.FedYogi:5 flwr.server.strategy.krum.Krum:5 of +msgid "Fraction of clients used during training. Defaults to 1.0." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`istitle `\\ \\(\\)" +#: flwr.server.strategy.bulyan.Bulyan:7 +#: flwr.server.strategy.fedadagrad.FedAdagrad:7 +#: flwr.server.strategy.fedadam.FedAdam:7 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:7 +#: flwr.server.strategy.fedavgm.FedAvgM:7 flwr.server.strategy.fedopt.FedOpt:7 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:7 +#: flwr.server.strategy.fedyogi.FedYogi:7 flwr.server.strategy.krum.Krum:7 of +msgid "Fraction of clients used during validation. Defaults to 1.0." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.istitle:1 of -msgid "Return True if the string is a title-cased string, False otherwise." +#: flwr.server.strategy.bulyan.Bulyan:9 +#: flwr.server.strategy.fedadagrad.FedAdagrad:9 +#: flwr.server.strategy.fedadam.FedAdam:9 flwr.server.strategy.fedavg.FedAvg:13 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:9 +#: flwr.server.strategy.fedavgm.FedAvgM:9 flwr.server.strategy.fedopt.FedOpt:9 +#: flwr.server.strategy.fedprox.FedProx:45 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:9 +#: flwr.server.strategy.fedyogi.FedYogi:9 flwr.server.strategy.krum.Krum:9 of +msgid "Minimum number of clients used during training. Defaults to 2." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isspace `\\ \\(\\)" +#: flwr.server.strategy.bulyan.Bulyan:11 +#: flwr.server.strategy.fedadagrad.FedAdagrad:11 +#: flwr.server.strategy.fedadam.FedAdam:11 +#: flwr.server.strategy.fedavg.FedAvg:15 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:11 +#: flwr.server.strategy.fedavgm.FedAvgM:11 +#: flwr.server.strategy.fedopt.FedOpt:11 +#: flwr.server.strategy.fedprox.FedProx:47 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:11 +#: flwr.server.strategy.fedyogi.FedYogi:11 flwr.server.strategy.krum.Krum:11 of +msgid "Minimum number of clients used during validation. Defaults to 2." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isspace:1 of -msgid "Return True if the string is a whitespace string, False otherwise." +#: flwr.server.strategy.bulyan.Bulyan:13 +#: flwr.server.strategy.fedadagrad.FedAdagrad:13 +#: flwr.server.strategy.fedadam.FedAdam:13 +#: flwr.server.strategy.fedavg.FedAvg:17 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:13 +#: flwr.server.strategy.fedavgm.FedAvgM:13 +#: flwr.server.strategy.fedopt.FedOpt:13 +#: flwr.server.strategy.fedprox.FedProx:49 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:13 +#: flwr.server.strategy.fedyogi.FedYogi:13 flwr.server.strategy.krum.Krum:13 of +msgid "Minimum number of total clients in the system. Defaults to 2." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isdecimal `\\ \\(\\)" +#: flwr.server.strategy.bulyan.Bulyan:15 flwr.server.strategy.krum.Krum:15 of +msgid "Number of malicious clients in the system. Defaults to 0." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isdecimal:1 of -msgid "Return True if the string is a decimal string, False otherwise." +#: flwr.server.strategy.bulyan.Bulyan:17 +#: flwr.server.strategy.fedadagrad.FedAdagrad:15 +#: flwr.server.strategy.fedadam.FedAdam:15 +#: flwr.server.strategy.fedavg.FedAvg:19 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:15 +#: flwr.server.strategy.fedavgm.FedAvgM:15 +#: flwr.server.strategy.fedopt.FedOpt:15 +#: flwr.server.strategy.fedprox.FedProx:51 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:15 +#: flwr.server.strategy.fedyogi.FedYogi:17 +#: flwr.server.strategy.fedyogi.FedYogi:18 +#: flwr.server.strategy.fedyogi.FedYogi:19 flwr.server.strategy.krum.Krum:20 of +msgid "Optional function used for validation. Defaults to None." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isdigit `\\ \\(\\)" +#: flwr.server.strategy.bulyan.Bulyan:19 +#: flwr.server.strategy.fedadagrad.FedAdagrad:17 +#: flwr.server.strategy.fedadam.FedAdam:17 +#: flwr.server.strategy.fedavg.FedAvg:21 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:17 +#: flwr.server.strategy.fedavgm.FedAvgM:17 +#: flwr.server.strategy.fedopt.FedOpt:17 +#: flwr.server.strategy.fedprox.FedProx:53 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:17 +#: flwr.server.strategy.fedyogi.FedYogi:20 flwr.server.strategy.krum.Krum:22 of +msgid "Function used to configure training. Defaults to None." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isdigit:1 of -msgid "Return True if the string is a digit string, False otherwise." +#: flwr.server.strategy.bulyan.Bulyan:21 +#: flwr.server.strategy.fedadagrad.FedAdagrad:19 +#: flwr.server.strategy.fedadam.FedAdam:19 +#: flwr.server.strategy.fedavg.FedAvg:23 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:19 +#: flwr.server.strategy.fedavgm.FedAvgM:19 +#: flwr.server.strategy.fedopt.FedOpt:19 +#: flwr.server.strategy.fedprox.FedProx:55 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:19 +#: flwr.server.strategy.fedyogi.FedYogi:22 flwr.server.strategy.krum.Krum:24 of +msgid "Function used to configure validation. Defaults to None." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isnumeric `\\ \\(\\)" +#: flwr.server.strategy.bulyan.Bulyan:23 +#: flwr.server.strategy.fedadagrad.FedAdagrad:25 +#: flwr.server.strategy.fedadam.FedAdam:21 +#: flwr.server.strategy.fedavg.FedAvg:25 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:21 +#: flwr.server.strategy.fedavgm.FedAvgM:21 +#: flwr.server.strategy.fedopt.FedOpt:21 +#: flwr.server.strategy.fedprox.FedProx:57 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:21 +#: flwr.server.strategy.fedyogi.FedYogi:24 flwr.server.strategy.krum.Krum:26 of +msgid "Whether or not accept rounds containing failures. Defaults to True." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isnumeric:1 of -msgid "Return True if the string is a numeric string, False otherwise." +#: flwr.server.strategy.bulyan.Bulyan:25 +#: flwr.server.strategy.fedadagrad.FedAdagrad:27 +#: flwr.server.strategy.fedadam.FedAdam:23 +#: flwr.server.strategy.fedavg.FedAvg:27 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:24 +#: flwr.server.strategy.fedavgm.FedAvgM:23 +#: flwr.server.strategy.fedopt.FedOpt:23 +#: flwr.server.strategy.fedprox.FedProx:59 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:23 +#: flwr.server.strategy.fedyogi.FedYogi:26 flwr.server.strategy.krum.Krum:28 of +msgid "Initial global model parameters." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isalpha `\\ \\(\\)" +#: flwr.server.strategy.bulyan.Bulyan:27 of +msgid "" +"Byzantine resilient aggregation rule that is used as the first step of " +"the Bulyan (e.g., Krum)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isalpha:1 of -msgid "Return True if the string is an alphabetic string, False otherwise." +#: flwr.server.strategy.bulyan.Bulyan:29 of +msgid "arguments to the first_aggregation rule" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isalnum `\\ \\(\\)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isalnum:1 of -msgid "Return True if the string is an alpha-numeric string, False otherwise." +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "Aggregate evaluation losses using weighted average." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isidentifier `\\ \\(\\)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isidentifier:1 of -msgid "Return True if the string is a valid Python identifier, False otherwise." +#: flwr.server.strategy.bulyan.Bulyan.aggregate_fit:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "Aggregate fit results using Bulyan." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isprintable `\\ \\(\\)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isprintable:1 of -msgid "Return True if the string is printable, False otherwise." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_evaluate:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.configure_evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.configure_evaluate:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:1 of +msgid "Configure the next round of evaluation." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`zfill `\\ \\(width\\, \\/\\)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.zfill:1 of -msgid "" -"Pad a numeric string with zeros on the left, to fill a field of the given" -" width." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_fit:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_fit:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_fit:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_fit:1 +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.configure_fit:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.configure_fit:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_fit:1 +#: flwr.server.strategy.fedprox.FedProx.configure_fit:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_fit:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.configure_fit:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.configure_fit:1 of +msgid "Configure the next round of training." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`format `\\ \\(\\*args\\, " -"\\*\\*kwargs\\)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "Return a formatted version of S, using substitutions from args and kwargs." +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.evaluate:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "Evaluate model parameters using an evaluation function." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`format_map `\\ \\(mapping\\)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "Return a formatted version of S, using substitutions from mapping." +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.initialize_parameters:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.initialize_parameters:1 +#: flwr.server.strategy.fedavgm.FedAvgM.initialize_parameters:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "Initialize global model parameters." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`maketrans `\\" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.maketrans:1 of -msgid "Return a translation table usable for str.translate()." +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.num_evaluation_clients:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_evaluation_clients:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.num_evaluation_clients:1 of +msgid "Use a fraction of available clients for evaluation." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`PING `\\" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`START_CLIENT_ENTER `\\" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.num_fit_clients:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_fit_clients:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.num_fit_clients:1 of +msgid "Return the sample size and the required number of available clients." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`START_CLIENT_LEAVE `\\" +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:2 +msgid "DPFedAvgAdaptive" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`START_SERVER_ENTER `\\" +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed`" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`START_SERVER_LEAVE `\\" +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:3 of +msgid "This class is deprecated and will be removed in a future release." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`RUN_DRIVER_API_ENTER " -"`\\" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_DRIVER_API_LEAVE " -"`\\" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "Aggregate evaluation losses using the given strategy." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`RUN_FLEET_API_ENTER " -"`\\" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_FLEET_API_LEAVE " -"`\\" +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.aggregate_fit:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "Aggregate training results as in DPFedAvgFixed and update clip norms." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`RUN_SUPERLINK_ENTER " -"`\\" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_SUPERLINK_LEAVE " -"`\\" +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:1 of +msgid "Configure the next round of evaluation using the specified strategy." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`START_SIMULATION_ENTER " -"`\\" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`START_SIMULATION_LEAVE " -"`\\" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`DRIVER_CONNECT `\\" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.evaluate:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.evaluate:1 of +msgid "Evaluate model parameters using an evaluation function from the strategy." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`DRIVER_DISCONNECT `\\" +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`START_DRIVER_ENTER `\\" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.initialize_parameters:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.initialize_parameters:1 of +msgid "Initialize global model parameters using given strategy." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`START_DRIVER_LEAVE `\\" +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:6 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:3 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:3 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:3 +#: flwr.server.strategy.strategy.Strategy.configure_fit:3 +#: flwr.server.strategy.strategy.Strategy.evaluate:6 of +msgid "The current round of federated learning." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_CLIENT_APP_ENTER " -"`\\" +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:7 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:10 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:7 +#: flwr.server.strategy.strategy.Strategy.configure_fit:7 +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:3 of +msgid "The client manager which holds all currently connected clients." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:10 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:10 of msgid "" -":py:obj:`RUN_CLIENT_APP_LEAVE " -"`\\" +"**evaluate_configuration** -- A list of tuples. Each tuple in the list " +"identifies a `ClientProxy` and the `EvaluateIns` for this particular " +"`ClientProxy`. If a particular `ClientProxy` is not included in this " +"list, it means that this `ClientProxy` will not participate in the next " +"round of federated evaluation." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_SERVER_APP_ENTER " -"`\\" +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:2 +msgid "DPFedAvgFixed" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_SERVER_APP_LEAVE " -"`\\" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 +#: flwr.server.strategy.fedavg.FedAvg:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.strategy.Strategy`" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`RUN_SUPERNODE_ENTER " -"`\\" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`RUN_SUPERNODE_LEAVE " -"`\\" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.common.EventType.capitalize:3 of -msgid "" -"More specifically, make the first character have upper case and the rest " -"lower case." +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_fit:1 of +msgid "Aggregate training results using unweighted aggregation." msgstr "" -#: flwr.common.EventType.center:3 flwr.common.EventType.ljust:3 -#: flwr.common.EventType.rjust:3 of -msgid "Padding is done using the specified fill character (default is a space)." +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.common.EventType.count:1 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"Return the number of non-overlapping occurrences of substring sub in " -"string S[start:end]. Optional arguments start and end are interpreted as" -" in slice notation." +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.common.EventType.encode:3 of -msgid "encoding" +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:1 of +msgid "" +"Configure the next round of training incorporating Differential Privacy " +"(DP)." msgstr "" -#: flwr.common.EventType.encode:4 of -msgid "The encoding in which to encode the string." +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.common.EventType.encode:9 of -msgid "errors" +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: flwr.common.EventType.encode:6 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:3 of msgid "" -"The error handling scheme to use for encoding errors. The default is " -"'strict' meaning that encoding errors raise a UnicodeEncodeError. Other " -"possible values are 'ignore', 'replace' and 'xmlcharrefreplace' as well " -"as any other name registered with codecs.register_error that can handle " -"UnicodeEncodeErrors." +"Configuration of the next training round includes information related to " +"DP, such as clip norm and noise stddev." msgstr "" -#: flwr.common.EventType.endswith:1 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:13 +#: flwr.server.strategy.strategy.Strategy.configure_fit:10 of msgid "" -"Return True if S ends with the specified suffix, False otherwise. With " -"optional start, test S beginning at that position. With optional end, " -"stop comparing S at that position. suffix can also be a tuple of strings " -"to try." +"**fit_configuration** -- A list of tuples. Each tuple in the list " +"identifies a `ClientProxy` and the `FitIns` for this particular " +"`ClientProxy`. If a particular `ClientProxy` is not included in this " +"list, it means that this `ClientProxy` will not participate in the next " +"round of federated learning." msgstr "" -#: flwr.common.EventType.expandtabs:3 of -msgid "If tabsize is not given, a tab size of 8 characters is assumed." +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:2 +msgid "DifferentialPrivacyClientSideAdaptiveClipping" msgstr "" -#: flwr.common.EventType.find:1 flwr.common.EventType.index:1 of -msgid "" -"Return the lowest index in S where substring sub is found, such that sub " -"is contained within S[start:end]. Optional arguments start and end are " -"interpreted as in slice notation." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:3 +#: of +msgid "Use `adaptiveclipping_mod` modifier at the client side." msgstr "" -#: flwr.common.EventType.find:5 flwr.common.EventType.rfind:5 of -msgid "Return -1 on failure." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:5 +#: of +msgid "" +"In comparison to `DifferentialPrivacyServerSideAdaptiveClipping`, which " +"performs clipping on the server-side, " +"`DifferentialPrivacyClientSideAdaptiveClipping` expects clipping to " +"happen on the client-side, usually by using the built-in " +"`adaptiveclipping_mod`." msgstr "" -#: flwr.common.EventType.format:1 of -msgid "" -"Return a formatted version of S, using substitutions from args and " -"kwargs. The substitutions are identified by braces ('{' and '}')." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:10 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:3 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:10 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:3 +#: of +msgid "The strategy to which DP functionalities will be added by this wrapper." msgstr "" -#: flwr.common.EventType.format_map:1 of -msgid "" -"Return a formatted version of S, using substitutions from mapping. The " -"substitutions are identified by braces ('{' and '}')." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:12 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:5 +#: of +msgid "The noise multiplier for the Gaussian mechanism for model updates." msgstr "" -#: flwr.common.EventType.index:5 flwr.common.EventType.rindex:5 of -msgid "Raises ValueError when the substring is not found." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:14 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:7 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:17 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:10 +#: of +msgid "The number of clients that are sampled on each round." msgstr "" -#: flwr.common.EventType.isalnum:3 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:16 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:9 +#: of msgid "" -"A string is alpha-numeric if all characters in the string are alpha-" -"numeric and there is at least one character in the string." +"The initial value of clipping norm. Defaults to 0.1. Andrew et al. " +"recommends to set to 0.1." msgstr "" -#: flwr.common.EventType.isalpha:3 of -msgid "" -"A string is alphabetic if all characters in the string are alphabetic and" -" there is at least one character in the string." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:19 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:12 +#: of +msgid "The desired quantile of updates which should be clipped. Defaults to 0.5." msgstr "" -#: flwr.common.EventType.isascii:3 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:21 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:14 +#: of msgid "" -"ASCII characters have code points in the range U+0000-U+007F. Empty " -"string is ASCII too." +"The learning rate for the clipping norm adaptation. Defaults to 0.2. " +"Andrew et al. recommends to set to 0.2." msgstr "" -#: flwr.common.EventType.isdecimal:3 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:24 +#: of msgid "" -"A string is a decimal string if all characters in the string are decimal " -"and there is at least one character in the string." +"The stddev of the noise added to the count of updates currently below the" +" estimate. Andrew et al. recommends to set to `expected_num_records/20`" msgstr "" -#: flwr.common.EventType.isdigit:3 of -msgid "" -"A string is a digit string if all characters in the string are digits and" -" there is at least one character in the string." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:30 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:23 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:22 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:15 +#: of +msgid "Create a strategy:" msgstr "" -#: flwr.common.EventType.isidentifier:3 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:34 +#: of msgid "" -"Call keyword.iskeyword(s) to test whether string s is a reserved " -"identifier, such as \"def\" or \"class\"." +"Wrap the strategy with the " +"`DifferentialPrivacyClientSideAdaptiveClipping` wrapper:" msgstr "" -#: flwr.common.EventType.islower:3 of -msgid "" -"A string is lowercase if all cased characters in the string are lowercase" -" and there is at least one cased character in the string." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:40 +#: of +msgid "On the client, add the `adaptiveclipping_mod` to the client-side mods:" msgstr "" -#: flwr.common.EventType.isnumeric:3 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"A string is numeric if all characters in the string are numeric and there" -" is at least one character in the string." +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" msgstr "" -#: flwr.common.EventType.isprintable:3 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"A string is printable if all of its characters are considered printable " -"in repr() or if it is empty." +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.common.EventType.isspace:3 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_fit:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_fit:1 +#: of +msgid "Aggregate training results and update clip norms." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"A string is whitespace if all characters in the string are whitespace and" -" there is at least one character in the string." +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.common.EventType.istitle:3 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"In a title-cased string, upper- and title-case characters may only follow" -" uncased characters and lowercase characters only cased ones." +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.common.EventType.isupper:3 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"A string is uppercase if all cased characters in the string are uppercase" -" and there is at least one cased character in the string." +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.common.EventType.join:3 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"The string whose method is called is inserted in between each given " -"string. The result is returned as a new string." +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" msgstr "" -#: flwr.common.EventType.join:6 of -msgid "Example: '.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'" +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:2 +msgid "DifferentialPrivacyClientSideFixedClipping" msgstr "" -#: flwr.common.EventType.lstrip:3 flwr.common.EventType.rstrip:3 -#: flwr.common.EventType.strip:3 of -msgid "If chars is given and not None, remove characters in chars instead." +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:3 +#: of +msgid "Use `fixedclipping_mod` modifier at the client side." msgstr "" -#: flwr.common.EventType.maketrans:3 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:5 +#: of msgid "" -"If there is only one argument, it must be a dictionary mapping Unicode " -"ordinals (integers) or characters to Unicode ordinals, strings or None. " -"Character keys will be then converted to ordinals. If there are two " -"arguments, they must be strings of equal length, and in the resulting " -"dictionary, each character in x will be mapped to the character at the " -"same position in y. If there is a third argument, it must be a string, " -"whose characters will be mapped to None in the result." +"In comparison to `DifferentialPrivacyServerSideFixedClipping`, which " +"performs clipping on the server-side, " +"`DifferentialPrivacyClientSideFixedClipping` expects clipping to happen " +"on the client-side, usually by using the built-in `fixedclipping_mod`." msgstr "" -#: flwr.common.EventType.partition:3 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:12 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:5 +#: of msgid "" -"This will search for the separator in the string. If the separator is " -"found, returns a 3-tuple containing the part before the separator, the " -"separator itself, and the part after it." +"The noise multiplier for the Gaussian mechanism for model updates. A " +"value of 1.0 or higher is recommended for strong privacy." msgstr "" -#: flwr.common.EventType.partition:7 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:26 +#: of msgid "" -"If the separator is not found, returns a 3-tuple containing the original " -"string and two empty strings." +"Wrap the strategy with the `DifferentialPrivacyClientSideFixedClipping` " +"wrapper:" msgstr "" -#: flwr.common.EventType.removeprefix:3 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:32 +#: of +msgid "On the client, add the `fixedclipping_mod` to the client-side mods:" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"If the string starts with the prefix string, return string[len(prefix):]." -" Otherwise, return a copy of the original string." +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" msgstr "" -#: flwr.common.EventType.removesuffix:3 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"If the string ends with the suffix string and that suffix is not empty, " -"return string[:-len(suffix)]. Otherwise, return a copy of the original " -"string." +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.common.EventType.replace:5 of -msgid "count" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_fit:1 +#: of +msgid "Add noise to the aggregated parameters." +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.common.EventType.replace:4 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"Maximum number of occurrences to replace. -1 (the default value) means " -"replace all occurrences." +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.common.EventType.replace:7 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"If the optional argument count is given, only the first count occurrences" -" are replaced." +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.common.EventType.rfind:1 flwr.common.EventType.rindex:1 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"Return the highest index in S where substring sub is found, such that sub" -" is contained within S[start:end]. Optional arguments start and end are " -"interpreted as in slice notation." +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" msgstr "" -#: flwr.common.EventType.rpartition:3 of -msgid "" -"This will search for the separator in the string, starting at the end. If" -" the separator is found, returns a 3-tuple containing the part before the" -" separator, the separator itself, and the part after it." +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:2 +msgid "DifferentialPrivacyServerSideAdaptiveClipping" msgstr "" -#: flwr.common.EventType.rpartition:7 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:17 +#: of msgid "" -"If the separator is not found, returns a 3-tuple containing two empty " -"strings and the original string." +"The standard deviation of the noise added to the count of updates below " +"the estimate. Andrew et al. recommends to set to " +"`expected_num_records/20`" msgstr "" -#: flwr.common.EventType.rsplit:7 flwr.common.EventType.split:7 of -msgid "sep" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:27 +#: of +msgid "" +"Wrap the strategy with the DifferentialPrivacyServerSideAdaptiveClipping " +"wrapper" msgstr "" -#: flwr.common.EventType.rsplit:4 flwr.common.EventType.split:4 of -msgid "The separator used to split the string." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" msgstr "" -#: flwr.common.EventType.rsplit:6 flwr.common.EventType.split:6 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"When set to None (the default value), will split on any whitespace " -"character (including \\\\n \\\\r \\\\t \\\\f and spaces) and will discard" -" empty strings from the result." +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.common.EventType.rsplit:11 flwr.common.EventType.split:11 of -msgid "maxsplit" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.common.EventType.rsplit:10 flwr.common.EventType.split:10 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"Maximum number of splits (starting from the left). -1 (the default value)" -" means no limit." +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.common.EventType.rsplit:13 of -msgid "Splitting starts at the end of the string and works to the front." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.common.EventType.split:13 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"Note, str.split() is mainly useful for data that has been intentionally " -"delimited. With natural text that includes punctuation, consider using " -"the regular expression module." +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" msgstr "" -#: flwr.common.EventType.splitlines:3 of -msgid "" -"Line breaks are not included in the resulting list unless keepends is " -"given and true." +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:2 +msgid "DifferentialPrivacyServerSideFixedClipping" msgstr "" -#: flwr.common.EventType.startswith:1 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:19 +#: of msgid "" -"Return True if S starts with the specified prefix, False otherwise. With " -"optional start, test S beginning at that position. With optional end, " -"stop comparing S at that position. prefix can also be a tuple of strings " -"to try." +"Wrap the strategy with the DifferentialPrivacyServerSideFixedClipping " +"wrapper" msgstr "" -#: flwr.common.EventType.title:3 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"More specifically, words start with uppercased characters and all " -"remaining cased characters have lower case." +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" msgstr "" -#: flwr.common.EventType.translate:5 of -msgid "table" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.common.EventType.translate:4 of -msgid "" -"Translation table, which must be a mapping of Unicode ordinals to Unicode" -" ordinals, strings, or None." +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:1 +#: of +msgid "Compute the updates, clip, and pass them for aggregation." msgstr "" -#: flwr.common.EventType.translate:7 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"The table must implement lookup/indexing via __getitem__, for instance a " -"dictionary or list. If this operation raises LookupError, the character " -"is left untouched. Characters mapped to None are deleted." +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.common.EventType.zfill:3 of -msgid "The string is never truncated." +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.common.FitIns.rst:2 -msgid "FitIns" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 -msgid ":py:obj:`parameters `\\" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" msgstr "" -#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 -msgid ":py:obj:`config `\\" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:3 +#: of +msgid "Afterward, add noise to the aggregated parameters." msgstr "" -#: ../../source/ref-api/flwr.common.FitRes.rst:2 -msgid "FitRes" +#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:2 +msgid "FaultTolerantFedAvg" msgstr "" -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 -msgid ":py:obj:`status `\\" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 -msgid ":py:obj:`parameters `\\" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 -msgid ":py:obj:`num_examples `\\" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_fit:1 +#: flwr.server.strategy.fedadagrad.FedAdagrad.aggregate_fit:1 +#: flwr.server.strategy.fedadam.FedAdam.aggregate_fit:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_fit:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_fit:1 +#: flwr.server.strategy.fedavgm.FedAvgM.aggregate_fit:1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.aggregate_fit:1 +#: flwr.server.strategy.fedyogi.FedYogi.aggregate_fit:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_fit:1 of +msgid "Aggregate fit results using weighted average." msgstr "" -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 -msgid ":py:obj:`metrics `\\" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:2 -msgid "GetParametersIns" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:28::1 -msgid ":py:obj:`config `\\" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:2 -msgid "GetParametersRes" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 -msgid ":py:obj:`status `\\" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 -msgid ":py:obj:`parameters `\\" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:2 -msgid "GetPropertiesIns" +#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:2 +#: ../../source/ref-changelog.md:1231 +msgid "FedAdagrad" msgstr "" -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:28::1 -msgid ":py:obj:`config `\\" +#: flwr.server.strategy.fedadagrad.FedAdagrad:1 +#: flwr.server.strategy.fedadam.FedAdam:1 +#: flwr.server.strategy.fedyogi.FedYogi:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.fedopt.FedOpt`" msgstr "" -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:2 -msgid "GetPropertiesRes" +#: flwr.server.strategy.fedadagrad.FedAdagrad:3 +#: flwr.server.strategy.fedadam.FedAdam:3 flwr.server.strategy.fedopt.FedOpt:3 +#: flwr.server.strategy.fedyogi.FedYogi:3 of +msgid "Implementation based on https://arxiv.org/abs/2003.00295v5" msgstr "" -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 -msgid ":py:obj:`status `\\" +#: flwr.server.strategy.fedadagrad.FedAdagrad:21 +#: flwr.server.strategy.fedadagrad.FedAdagrad:23 +#: flwr.server.strategy.fedadam.FedAdam:25 +#: flwr.server.strategy.fedadam.FedAdam:27 +#: flwr.server.strategy.fedavg.FedAvg:29 flwr.server.strategy.fedavg.FedAvg:31 +#: flwr.server.strategy.fedopt.FedOpt:25 flwr.server.strategy.fedopt.FedOpt:27 +#: flwr.server.strategy.fedprox.FedProx:61 +#: flwr.server.strategy.fedprox.FedProx:63 +#: flwr.server.strategy.fedyogi.FedYogi:28 +#: flwr.server.strategy.fedyogi.FedYogi:30 of +msgid "Metrics aggregation function, optional." msgstr "" -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 -msgid ":py:obj:`properties `\\" +#: flwr.server.strategy.fedadagrad.FedAdagrad:29 +#: flwr.server.strategy.fedadam.FedAdam:29 +#: flwr.server.strategy.fedopt.FedOpt:29 of +msgid "Server-side learning rate. Defaults to 1e-1." msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:2 -msgid "Message" +#: flwr.server.strategy.fedadagrad.FedAdagrad:31 +#: flwr.server.strategy.fedadam.FedAdam:31 +#: flwr.server.strategy.fedopt.FedOpt:31 of +msgid "Client-side learning rate. Defaults to 1e-1." msgstr "" -#: flwr.common.Message.content:1::1 flwr.common.Message.metadata:1 -#: flwr.common.message.Message:3 of -msgid "A dataclass including information about the message to be executed." +#: flwr.server.strategy.fedadagrad.FedAdagrad:33 +#: flwr.server.strategy.fedadam.FedAdam:37 +#: flwr.server.strategy.fedopt.FedOpt:37 of +msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-9." msgstr "" -#: flwr.common.message.Message:5 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Holds records either sent by another entity (e.g. sent by the server-side" -" logic to a client, or vice-versa) or that will be sent to it." +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: flwr.common.message.Message:8 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"A dataclass that captures information about an error that took place when" -" processing another message." +":py:obj:`aggregate_fit `\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`create_error_reply `\\ " -"\\(error\\[\\, ttl\\]\\)" -msgstr "" - -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.create_error_reply:1 of -msgid "Construct a reply message indicating an error happened." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`create_reply `\\ " -"\\(content\\[\\, ttl\\]\\)" +":py:obj:`configure_fit `\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.create_reply:1 of -msgid "Create a reply to this message with specified content and TTL." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -msgid ":py:obj:`has_content `\\ \\(\\)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.has_content:1 of -msgid "Return True if message has content, else False." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -msgid ":py:obj:`has_error `\\ \\(\\)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.has_error:1 of -msgid "Return True if message has an error, else False." +#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:2 +msgid "FedAdam" msgstr "" -#: flwr.common.Message.content:1::1 of -msgid ":py:obj:`content `\\" +#: flwr.server.strategy.fedadam.FedAdam:33 +#: flwr.server.strategy.fedyogi.FedYogi:36 of +msgid "Momentum parameter. Defaults to 0.9." msgstr "" -#: flwr.common.Message.content:1 flwr.common.Message.content:1::1 -#: of -msgid "The content of this message." +#: flwr.server.strategy.fedadam.FedAdam:35 +#: flwr.server.strategy.fedyogi.FedYogi:38 of +msgid "Second moment parameter. Defaults to 0.99." msgstr "" -#: flwr.common.Message.content:1::1 of -msgid ":py:obj:`error `\\" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -#: flwr.common.Message.content:1::1 flwr.common.Message.error:1 of -msgid "Error captured by this message." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.common.Message.content:1::1 of -msgid ":py:obj:`metadata `\\" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -#: flwr.common.message.Message.create_error_reply:3 of -msgid "The error that was encountered." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.common.message.Message.create_error_reply:5 -#: flwr.common.message.Message.create_reply:9 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Time-to-live for this message in seconds. If unset, it will be set based " -"on the remaining time for the received message before it expires. This " -"follows the equation: ttl = msg.meta.ttl - (reply.meta.created_at - " -"msg.meta.created_at)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.common.message.Message.create_error_reply:5 -#: flwr.common.message.Message.create_reply:9 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Time-to-live for this message in seconds. If unset, it will be set based " -"on the remaining time for the received message before it expires. This " -"follows the equation:" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: flwr.common.message.Message.create_error_reply:9 -#: flwr.common.message.Message.create_reply:13 of -msgid "ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.common.message.Message.create_reply:3 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The method generates a new `Message` as a reply to this message. It " -"inherits 'run_id', 'src_node_id', 'dst_node_id', and 'message_type' from " -"this message and sets 'reply_to_message' to the ID of this message." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.common.message.Message.create_reply:7 of -msgid "The content for the reply message." +#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:2 +msgid "FedAvg" msgstr "" -#: flwr.common.message.Message.create_reply:16 of -msgid "A new `Message` instance representing the reply." +#: flwr.server.strategy.fedavg.FedAvg:3 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:3 of +msgid "Implementation based on https://arxiv.org/abs/1602.05629" msgstr "" -#: ../../source/ref-api/flwr.common.MessageType.rst:2 -msgid "MessageType" +#: flwr.server.strategy.fedavg.FedAvg:5 flwr.server.strategy.fedprox.FedProx:37 +#: of +msgid "" +"Fraction of clients used during training. In case `min_fit_clients` is " +"larger than `fraction_fit * available_clients`, `min_fit_clients` will " +"still be sampled. Defaults to 1.0." msgstr "" -#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 -msgid ":py:obj:`EVALUATE `\\" +#: flwr.server.strategy.fedavg.FedAvg:9 flwr.server.strategy.fedprox.FedProx:41 +#: of +msgid "" +"Fraction of clients used during validation. In case " +"`min_evaluate_clients` is larger than `fraction_evaluate * " +"available_clients`, `min_evaluate_clients` will still be sampled. " +"Defaults to 1.0." msgstr "" -#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 -msgid ":py:obj:`QUERY `\\" +#: flwr.server.strategy.fedavg.FedAvg:33 of +msgid "Enable (True) or disable (False) in-place aggregation of model updates." msgstr "" -#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 -msgid ":py:obj:`TRAIN `\\" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:2 -msgid "MessageTypeLegacy" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 -msgid ":py:obj:`GET_PARAMETERS `\\" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 -msgid ":py:obj:`GET_PROPERTIES `\\" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.run_id:1 flwr.common.message.Metadata:3 of -msgid "An identifier for the current run." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.message_id:1 flwr.common.message.Metadata:5 of -msgid "An identifier for the current message." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.src_node_id:1 flwr.common.message.Metadata:7 of -msgid "An identifier for the node sending this message." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.dst_node_id:1 flwr.common.message.Metadata:9 of -msgid "An identifier for the node receiving this message." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.reply_to_message:1 flwr.common.message.Metadata:11 of -msgid "An identifier for the message this message replies to." +#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:2 +msgid "FedAvgAndroid" msgstr "" -#: flwr.common.message.Metadata:13 of +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"An identifier for grouping messages. In some settings, this is used as " -"the FL round." +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: flwr.common.message.Metadata:16 of -msgid "Time-to-live for this message in seconds." +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.message_type:1 flwr.common.message.Metadata:18 of -msgid "A string that encodes the action to be executed on the receiving end." +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`bytes_to_ndarray " +"`\\ \\(tensor\\)" msgstr "" -#: flwr.common.message.Metadata:21 of -msgid "" -"An identifier that can be used when loading a particular data partition " -"for a ClientApp. Making use of this identifier is more relevant when " -"conducting simulations." +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.bytes_to_ndarray:1 of +msgid "Deserialize NumPy array from bytes." msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`created_at `\\" +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.common.Metadata.created_at:1 -#: flwr.common.Metadata.created_at:1::1 of -msgid "Unix timestamp when the message was created." +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`dst_node_id `\\" +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`group_id `\\" +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.group_id:1 of -msgid "An identifier for grouping messages." +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`ndarray_to_bytes " +"`\\ \\(ndarray\\)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`message_id `\\" +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarray_to_bytes:1 of +msgid "Serialize NumPy array to bytes." msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`message_type `\\" +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`ndarrays_to_parameters " +"`\\ " +"\\(ndarrays\\)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`partition_id `\\" +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.partition_id:1 of -msgid "An identifier telling which data partition a ClientApp should use." +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`reply_to_message `\\" +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`parameters_to_ndarrays " +"`\\ " +"\\(parameters\\)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`run_id `\\" +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.parameters_to_ndarrays:1 +#: of +msgid "Convert parameters object to NumPy weights." msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`src_node_id `\\" +#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:2 +msgid "FedAvgM" msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`ttl `\\" +#: flwr.server.strategy.fedavgm.FedAvgM:3 of +msgid "Implementation based on https://arxiv.org/abs/1909.06335" msgstr "" -#: flwr.common.Metadata.created_at:1::1 flwr.common.Metadata.ttl:1 -#: of -msgid "Time-to-live for this message." +#: flwr.server.strategy.fedavgm.FedAvgM:25 of +msgid "" +"Server-side learning rate used in server-side optimization. Defaults to " +"1.0." msgstr "" -#: ../../source/ref-api/flwr.common.MetricsRecord.rst:2 -msgid "MetricsRecord" +#: flwr.server.strategy.fedavgm.FedAvgM:28 of +msgid "Server-side momentum factor used for FedAvgM. Defaults to 0.0." msgstr "" -#: flwr.common.record.metricsrecord.MetricsRecord:1 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -"[:py:class:`str`, :py:class:`int` | :py:class:`float` | " -":py:class:`~typing.List`\\ [:py:class:`int`] | :py:class:`~typing.List`\\" -" [:py:class:`float`]]" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`clear `\\ \\(\\)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`count_bytes `\\ \\(\\)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`items `\\ \\(\\)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`keys `\\ \\(\\)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`update `\\ \\(\\[E\\, " -"\\]\\*\\*F\\)" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`values `\\ \\(\\)" +#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:2 +msgid "FedMedian" msgstr "" -#: ../../source/ref-api/flwr.common.NDArray.rst:2 -msgid "NDArray" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 -msgid ":py:obj:`tensors `\\" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 -msgid ":py:obj:`tensor_type `\\" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedmedian.FedMedian.aggregate_fit:1 of +msgid "Aggregate fit results using median." msgstr "" -#: ../../source/ref-api/flwr.common.ParametersRecord.rst:2 -msgid "ParametersRecord" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.common.record.parametersrecord.ParametersRecord:1 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -"[:py:class:`str`, :py:class:`~flwr.common.record.parametersrecord.Array`]" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.common.record.parametersrecord.ParametersRecord:3 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"A dataclass storing named Arrays in order. This means that it holds " -"entries as an OrderedDict[str, Array]. ParametersRecord objects can be " -"viewed as an equivalent to PyTorch's state_dict, but holding serialised " -"tensors instead." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`clear `\\ \\(\\)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`count_bytes `\\ \\(\\)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`items `\\ \\(\\)" +#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:2 +msgid "FedOpt" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`keys `\\ \\(\\)" +#: flwr.server.strategy.fedopt.FedOpt:33 of +msgid "Momentum parameter. Defaults to 0.0." msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +#: flwr.server.strategy.fedopt.FedOpt:35 of +msgid "Second moment parameter. Defaults to 0.0." msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`update `\\ \\(\\[E\\, " -"\\]\\*\\*F\\)" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`values `\\ \\(\\)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:3 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Note that a small amount of Bytes might also be included in this counting" -" that correspond to metadata of the serialized object (e.g. of NumPy " -"array) needed for deseralization." +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:2 -msgid "ReconnectIns" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:28::1 -msgid ":py:obj:`seconds `\\" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-api/flwr.common.RecordSet.rst:2 -msgid "RecordSet" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: flwr.common.RecordSet.configs_records:1::1 of -msgid ":py:obj:`configs_records `\\" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.common.RecordSet.configs_records:1 -#: flwr.common.RecordSet.configs_records:1::1 of -msgid "Dictionary holding ConfigsRecord instances." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" msgstr "" -#: flwr.common.RecordSet.configs_records:1::1 of -msgid ":py:obj:`metrics_records `\\" +#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:2 +msgid "FedProx" msgstr "" -#: flwr.common.RecordSet.configs_records:1::1 -#: flwr.common.RecordSet.metrics_records:1 of -msgid "Dictionary holding MetricsRecord instances." +#: flwr.server.strategy.fedprox.FedProx:3 of +msgid "Implementation based on https://arxiv.org/abs/1812.06127" msgstr "" -#: flwr.common.RecordSet.configs_records:1::1 of -msgid ":py:obj:`parameters_records `\\" +#: flwr.server.strategy.fedprox.FedProx:5 of +msgid "" +"The strategy in itself will not be different than FedAvg, the client " +"needs to be adjusted. A proximal term needs to be added to the loss " +"function during the training:" msgstr "" -#: flwr.common.RecordSet.configs_records:1::1 -#: flwr.common.RecordSet.parameters_records:1 of -msgid "Dictionary holding ParametersRecord instances." +#: flwr.server.strategy.fedprox.FedProx:9 of +msgid "" +"\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" +"\n" msgstr "" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:2 -msgid "ServerMessage" +#: flwr.server.strategy.fedprox.FedProx:12 of +msgid "" +"Where $w^t$ are the global parameters and $w$ are the local weights the " +"function will be optimized with." msgstr "" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 -msgid ":py:obj:`evaluate_ins `\\" +#: flwr.server.strategy.fedprox.FedProx:15 of +msgid "In PyTorch, for example, the loss would go from:" msgstr "" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 -msgid ":py:obj:`fit_ins `\\" +#: flwr.server.strategy.fedprox.FedProx:21 of +msgid "To:" msgstr "" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +#: flwr.server.strategy.fedprox.FedProx:30 of msgid "" -":py:obj:`get_parameters_ins " -"`\\" +"With `global_params` being a copy of the parameters before the training " +"takes place." msgstr "" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +#: flwr.server.strategy.fedprox.FedProx:65 of msgid "" -":py:obj:`get_properties_ins " -"`\\" -msgstr "" - -#: ../../source/ref-api/flwr.common.Status.rst:2 -msgid "Status" -msgstr "" - -#: ../../source/ref-api/flwr.common.Status.rst:29::1 -msgid ":py:obj:`code `\\" -msgstr "" - -#: ../../source/ref-api/flwr.common.Status.rst:29::1 -msgid ":py:obj:`message `\\" +"The weight of the proximal term used in the optimization. 0.0 makes this " +"strategy equivalent to FedAvg, and the higher the coefficient, the more " +"regularization will be used (that is, the client parameters will need to " +"be closer to the server parameters during training)." msgstr "" -#: ../../source/ref-api/flwr.common.array_from_numpy.rst:2 -msgid "array\\_from\\_numpy" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.common.bytes_to_ndarray.rst:2 -msgid "bytes\\_to\\_ndarray" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-api/flwr.common.configure.rst:2 -msgid "configure" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.common.event.rst:2 -msgid "event" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.common.log.rst:2 -msgid "log" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: logging.Logger.log:3 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"To pass exception information, use the keyword argument exc_info with a " -"true value, e.g." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: logging.Logger.log:6 of -#, python-format -msgid "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.common.ndarray_to_bytes.rst:2 -msgid "ndarray\\_to\\_bytes" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.common.ndarrays_to_parameters.rst:2 -msgid "ndarrays\\_to\\_parameters" +#: flwr.server.strategy.fedprox.FedProx.configure_fit:3 of +msgid "Sends the proximal factor mu to the clients" msgstr "" -#: ../../source/ref-api/flwr.common.now.rst:2 -msgid "now" +#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:2 +msgid "FedTrimmedAvg" msgstr "" -#: ../../source/ref-api/flwr.common.parameters_to_ndarrays.rst:2 -msgid "parameters\\_to\\_ndarrays" +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:3 of +msgid "Implemented based on: https://arxiv.org/abs/1803.01498" msgstr "" -#: ../../source/ref-api/flwr.server.rst:2 -msgid "server" +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:25 of +msgid "Fraction to cut off of both tails of the distribution. Defaults to 0.2." msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 -msgid ":py:obj:`run_driver_api `\\ \\(\\)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 -#: flwr.server.app.run_driver_api:1 of -msgid "Run Flower server (Driver API)." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 -msgid ":py:obj:`run_fleet_api `\\ \\(\\)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.aggregate_fit:1 of +msgid "Aggregate fit results using trimmed average." msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 -#: flwr.server.app.run_fleet_api:1 of -msgid "Run Flower server (Fleet API)." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 -msgid ":py:obj:`run_server_app `\\ \\(\\)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 -#: flwr.server.run_serverapp.run_server_app:1 of -msgid "Run Flower server app." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 -msgid ":py:obj:`run_superlink `\\ \\(\\)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 -#: flwr.server.app.run_superlink:1 of -msgid "Run Flower SuperLink (Driver API and Fleet API)." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`start_server `\\ \\(\\*\\[\\, " -"server\\_address\\, server\\, ...\\]\\)" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 -#: flwr.server.app.start_server:1 of -msgid "Start a Flower server using the gRPC transport layer." +#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:2 +msgid "FedXgbBagging" msgstr "" -#: ../../source/ref-api/flwr.server.rst:40::1 -msgid ":py:obj:`ClientManager `\\ \\(\\)" +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:40::1 -#: flwr.server.client_manager.ClientManager:1 of -msgid "Abstract base class for managing Flower clients." +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "Aggregate evaluation metrics using average." msgstr "" -#: ../../source/ref-api/flwr.server.rst:40::1 -msgid ":py:obj:`Driver `\\ \\(\\)" +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:40::1 -#: flwr.server.driver.driver.Driver:1 of -msgid "Abstract base Driver class for the Driver API." +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_fit:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_fit:1 of +msgid "Aggregate fit results using bagging." msgstr "" -#: ../../source/ref-api/flwr.server.rst:40::1 -msgid ":py:obj:`History `\\ \\(\\)" +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:40::1 -#: flwr.server.history.History:1 of -msgid "History class for training and/or evaluation metrics collection." +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:40::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`LegacyContext `\\ \\(state\\[\\, " -"config\\, strategy\\, ...\\]\\)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:40::1 -#: flwr.server.compat.legacy_context.LegacyContext:1 of -msgid "Legacy Context." +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:40::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`Server `\\ \\(\\*\\, client\\_manager\\[\\, " -"strategy\\]\\)" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:40::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`ServerApp `\\ \\(\\[server\\, config\\, " -"strategy\\, ...\\]\\)" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:40::1 -#: flwr.server.server_app.ServerApp:1 of -msgid "Flower ServerApp." +#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:2 +msgid "FedXgbCyclic" msgstr "" -#: ../../source/ref-api/flwr.server.rst:40::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`ServerConfig `\\ \\(\\[num\\_rounds\\," -" round\\_timeout\\]\\)" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:40::1 -#: flwr.server.server_config.ServerConfig:1 of -msgid "Flower server config." +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\ \\(server\\_round\\," +" results\\, failures\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:40::1 -msgid ":py:obj:`SimpleClientManager `\\ \\(\\)" +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:40::1 -#: flwr.server.client_manager.SimpleClientManager:1 of -msgid "Provides a pool of available clients." +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:59::1 -msgid ":py:obj:`flwr.server.strategy `\\" +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:59::1 -#: flwr.server.strategy:1 of -msgid "Contains the strategy abstraction and different implementations." +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:59::1 -msgid ":py:obj:`flwr.server.workflow `\\" +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:59::1 -#: flwr.server.workflow:1 of -msgid "Workflows." +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.server.ClientManager.rst:2 -msgid "ClientManager" +#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:2 +msgid "FedXgbNnAvg" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of -msgid ":py:obj:`all `\\ \\(\\)" +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:5 of +msgid "" +"This strategy is deprecated, but a copy of it is available in Flower " +"Baselines: " +"https://github.com/adap/flower/tree/main/baselines/hfedxgboost." msgstr "" -#: flwr.server.client_manager.ClientManager.all:1 -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.all:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of -msgid "Return all available clients." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of -msgid ":py:obj:`num_available `\\ \\(\\)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit " +"`\\ \\(server\\_round\\, " +"results\\, failures\\)" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.num_available:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.num_available:1 of -msgid "Return the number of available clients." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of -msgid ":py:obj:`register `\\ \\(client\\)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.register:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.register:1 of -msgid "Register Flower ClientProxy instance." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`sample `\\ " -"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.sample:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.sample:1 of -msgid "Sample a number of Flower ClientProxy instances." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of -msgid ":py:obj:`unregister `\\ \\(client\\)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.unregister:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.unregister:1 of -msgid "Unregister Flower ClientProxy instance." +#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:2 +msgid "FedYogi" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of -msgid "" -":py:obj:`wait_for `\\ " -"\\(num\\_clients\\, timeout\\)" +#: flwr.server.strategy.fedyogi.FedYogi:32 of +msgid "Server-side learning rate. Defaults to 1e-2." msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.wait_for:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.wait_for:1 of -msgid "Wait until at least `num_clients` are available." +#: flwr.server.strategy.fedyogi.FedYogi:34 of +msgid "Client-side learning rate. Defaults to 0.0316." msgstr "" -#: flwr.server.client_manager.ClientManager.num_available:3 -#: flwr.server.client_manager.SimpleClientManager.num_available:3 of -msgid "**num_available** -- The number of currently available clients." +#: flwr.server.strategy.fedyogi.FedYogi:40 of +msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-3." msgstr "" -#: flwr.server.client_manager.ClientManager.register:6 -#: flwr.server.client_manager.SimpleClientManager.register:6 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**success** -- Indicating if registration was successful. False if " -"ClientProxy is already registered or can not be registered for any " -"reason." +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -#: flwr.server.client_manager.ClientManager.unregister:3 -#: flwr.server.client_manager.SimpleClientManager.unregister:3 of -msgid "This method is idempotent." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-api/flwr.server.Driver.rst:2 -msgid "Driver" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`create_message `\\ " -"\\(content\\, message\\_type\\, ...\\[\\, ttl\\]\\)" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1 -#: flwr.server.driver.driver.Driver.create_message:1::1 of -msgid "Create a new message with specified parameters." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 of -msgid ":py:obj:`get_node_ids `\\ \\(\\)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.get_node_ids:1 of -msgid "Get node IDs." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 of -msgid "" -":py:obj:`pull_messages `\\ " -"\\(message\\_ids\\)" +#: ../../source/ref-api/flwr.server.strategy.Krum.rst:2 +msgid "Krum" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.pull_messages:1 of -msgid "Pull messages based on message IDs." +#: flwr.server.strategy.krum.Krum:3 of +msgid "Implementation based on https://arxiv.org/abs/1703.02757" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 of +#: flwr.server.strategy.krum.Krum:17 of msgid "" -":py:obj:`push_messages `\\ " -"\\(messages\\)" +"Number of clients to keep before averaging (MultiKrum). Defaults to 0, in" +" that case classical Krum is applied." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.push_messages:1 of -msgid "Push messages to specified node IDs." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`send_and_receive `\\ " -"\\(messages\\, \\*\\[\\, timeout\\]\\)" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.send_and_receive:1 of -msgid "Push messages to specified node IDs and pull the reply messages." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.krum.Krum.aggregate_fit:1 of +msgid "Aggregate fit results using Krum." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:3 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"This method constructs a new `Message` with given content and metadata. " -"The `run_id` and `src_node_id` will be set automatically." +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:6 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The content for the new message. This holds records that are to be sent " -"to the destination node." +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:9 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The type of the message, defining the action to be executed on the " -"receiving end." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:12 of -msgid "The ID of the destination node to which the message is being sent." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:14 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The ID of the group to which this message is associated. In some " -"settings, this is used as the FL round." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:17 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Time-to-live for the round trip of this message, i.e., the time from " -"sending this message to receiving a reply. It specifies in seconds the " -"duration for which the message and its potential reply are considered " -"valid. If unset, the default TTL (i.e., `common.DEFAULT_TTL`) will be " -"used." +":py:obj:`num_fit_clients `\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:23 of -msgid "" -"**message** -- A new `Message` instance with the specified content and " -"metadata." +#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:2 +msgid "QFedAvg" msgstr "" -#: flwr.server.driver.driver.Driver.pull_messages:3 of +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"This method is used to collect messages from the SuperLink that " -"correspond to a set of given message IDs." +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -#: flwr.server.driver.driver.Driver.pull_messages:6 of -msgid "An iterable of message IDs for which reply messages are to be retrieved." +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.server.driver.driver.Driver.pull_messages:9 of -msgid "**messages** -- An iterable of messages received." +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -#: flwr.server.driver.driver.Driver.push_messages:3 of +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"This method takes an iterable of messages and sends each message to the " -"node specified in `dst_node_id`." +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.driver.driver.Driver.push_messages:6 -#: flwr.server.driver.driver.Driver.send_and_receive:7 of -msgid "An iterable of messages to be sent." +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.server.driver.driver.Driver.push_messages:9 of +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"**message_ids** -- An iterable of IDs for the messages that were sent, " -"which can be used to pull replies." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: flwr.server.driver.driver.Driver.send_and_receive:3 of +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"This method sends a list of messages to their destination node IDs and " -"then waits for the replies. It continues to pull replies until either all" -" replies are received or the specified timeout duration is exceeded." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.driver.driver.Driver.send_and_receive:9 of +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"The timeout duration in seconds. If specified, the method will wait for " -"replies for this duration. If `None`, there is no time limit and the " -"method will wait until replies for all messages are received." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.driver.driver.Driver.send_and_receive:14 of -msgid "**replies** -- An iterable of reply messages received from the SuperLink." +#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:2 +msgid "Strategy" msgstr "" -#: flwr.server.driver.driver.Driver.send_and_receive:18 -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:53 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:60 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 #: of -msgid "Notes" -msgstr "" - -#: flwr.server.driver.driver.Driver.send_and_receive:19 of msgid "" -"This method uses `push_messages` to send the messages and `pull_messages`" -" to collect the replies. If `timeout` is set, the method may not return " -"replies for all sent messages. A message remains valid until its TTL, " -"which is not affected by `timeout`." +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.History.rst:2 -msgid "History" +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +msgid "Aggregate evaluation results." msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`add_loss_centralized " -"`\\ \\(server\\_round\\, " -"loss\\)" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1 -#: flwr.server.history.History.add_loss_centralized:1::1 of -msgid "Add one loss entry (from centralized evaluation)." +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:1 of +msgid "Aggregate training results." msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`add_loss_distributed " -"`\\ \\(server\\_round\\, " -"loss\\)" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_loss_distributed:1 of -msgid "Add one loss entry (from distributed evaluation)." +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`add_metrics_centralized " -"`\\ \\(server\\_round\\, " -"metrics\\)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_metrics_centralized:1 of -msgid "Add metrics entries (from centralized evaluation)." +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.evaluate:1 of +msgid "Evaluate the current model parameters." msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`add_metrics_distributed " -"`\\ \\(server\\_round\\, " -"metrics\\)" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_metrics_distributed:1 of -msgid "Add metrics entries (from distributed evaluation)." +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:1 of +msgid "Initialize the (global) model parameters." msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:5 of msgid "" -":py:obj:`add_metrics_distributed_fit " -"`\\ \\(server\\_round\\," -" ...\\)" -msgstr "" - -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_metrics_distributed_fit:1 of -msgid "Add metrics entries (from distributed fit)." +"Successful updates from the previously selected and configured clients. " +"Each pair of `(ClientProxy, FitRes` constitutes a successful update from " +"one of the previously selected clients. Not that not all previously " +"selected clients are necessarily included in this list: a client might " +"drop out and not submit a result. For each client that did not submit an " +"update, there should be an `Exception` in `failures`." msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:2 -msgid "LegacyContext" +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:13 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:13 of +msgid "Exceptions that occurred while the server was waiting for client updates." msgstr "" -#: flwr.server.compat.legacy_context.LegacyContext:1 of -msgid "Bases: :py:class:`~flwr.common.context.Context`" +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:16 of +msgid "" +"**aggregation_result** -- The aggregated evaluation result. Aggregation " +"typically uses some variant of a weighted average." msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 -msgid ":py:obj:`config `\\" +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:5 of +msgid "" +"Successful updates from the previously selected and configured clients. " +"Each pair of `(ClientProxy, FitRes)` constitutes a successful update from" +" one of the previously selected clients. Not that not all previously " +"selected clients are necessarily included in this list: a client might " +"drop out and not submit a result. For each client that did not submit an " +"update, there should be an `Exception` in `failures`." msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 -msgid ":py:obj:`strategy `\\" +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:17 of +msgid "" +"**parameters** -- If parameters are returned, then the server will treat " +"these as the new global model parameters (i.e., it will replace the " +"previous parameters with the ones returned from this method). If `None` " +"is returned (e.g., because there were only failures and no viable " +"results) then the server will no update the previous model parameters, " +"the updates received in this round are discarded, and the global model " +"parameters remain the same." msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 -msgid ":py:obj:`client_manager `\\" +#: flwr.server.strategy.strategy.Strategy.evaluate:3 of +msgid "" +"This function can be used to perform centralized (i.e., server-side) " +"evaluation of model parameters." msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 -msgid ":py:obj:`history `\\" +#: flwr.server.strategy.strategy.Strategy.evaluate:11 of +msgid "" +"**evaluation_result** -- The evaluation result, usually a Tuple " +"containing loss and a dictionary containing task-specific metrics (e.g., " +"accuracy)." msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 -msgid ":py:obj:`state `\\" +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:6 of +msgid "" +"**parameters** -- If parameters are returned, then the server will treat " +"these as the initial global model parameters." msgstr "" -#: ../../source/ref-api/flwr.server.Server.rst:2 -msgid "Server" +#: ../../source/ref-api/flwr.server.workflow.rst:2 +msgid "workflow" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of -msgid ":py:obj:`client_manager `\\ \\(\\)" +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +msgid "" +":py:obj:`DefaultWorkflow `\\ " +"\\(\\[fit\\_workflow\\, ...\\]\\)" msgstr "" -#: flwr.server.server.Server.client_manager:1 -#: flwr.server.server.Server.client_manager:1::1 of -msgid "Return ClientManager." +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 of +msgid "Default workflow in Flower." msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 msgid "" -":py:obj:`disconnect_all_clients " -"`\\ \\(timeout\\)" +":py:obj:`SecAggPlusWorkflow `\\ " +"\\(num\\_shares\\, ...\\[\\, ...\\]\\)" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.disconnect_all_clients:1 of -msgid "Send shutdown signal to all clients." +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 +#: of +msgid "The workflow for the SecAgg+ protocol." msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 msgid "" -":py:obj:`evaluate_round `\\ " -"\\(server\\_round\\, timeout\\)" +":py:obj:`SecAggWorkflow `\\ " +"\\(reconstruction\\_threshold\\, \\*\\)" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.evaluate_round:1 of -msgid "Validate current global model on a number of clients." +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of +msgid "The workflow for the SecAgg protocol." msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of -msgid ":py:obj:`fit `\\ \\(num\\_rounds\\, timeout\\)" +#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:2 +msgid "DefaultWorkflow" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.fit:1 of -msgid "Run federated averaging for a number of rounds." +#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:2 +msgid "SecAggPlusWorkflow" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:3 +#: of msgid "" -":py:obj:`fit_round `\\ \\(server\\_round\\," -" timeout\\)" +"The SecAgg+ protocol ensures the secure summation of integer vectors " +"owned by multiple parties, without accessing any individual integer " +"vector. This workflow allows the server to compute the weighted average " +"of model parameters across all clients, ensuring individual contributions" +" remain private. This is achieved by clients sending both, a weighting " +"factor and a weighted version of the locally updated parameters, both of " +"which are masked for privacy. Specifically, each client uploads \"[w, w *" +" params]\" with masks, where weighting factor 'w' is the number of " +"examples ('num_examples') and 'params' represents the model parameters " +"('parameters') from the client's `FitRes`. The server then aggregates " +"these contributions to compute the weighted average of model parameters." msgstr "" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.fit_round:1 of -msgid "Perform a single round of federated averaging." +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:14 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:14 +#: of +msgid "The protocol involves four main stages:" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:16 +#: of msgid "" -":py:obj:`set_max_workers `\\ " -"\\(max\\_workers\\)" -msgstr "" - -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.set_max_workers:1 of -msgid "Set the max_workers used by ThreadPoolExecutor." -msgstr "" - -#: flwr.server.server.Server.client_manager:1::1 of -msgid ":py:obj:`set_strategy `\\ \\(strategy\\)" +"'setup': Send SecAgg+ configuration to clients and collect their public " +"keys." msgstr "" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.set_strategy:1 of -msgid "Replace server strategy." +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:17 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:17 +#: of +msgid "" +"'share keys': Broadcast public keys among clients and collect encrypted " +"secret key shares." msgstr "" -#: ../../source/ref-api/flwr.server.ServerApp.rst:2 -msgid "ServerApp" +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:19 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:19 +#: of +msgid "" +"'collect masked vectors': Forward encrypted secret key shares to target " +"clients and collect masked model parameters." msgstr "" -#: flwr.server.server_app.ServerApp:5 of -msgid "Use the `ServerApp` with an existing `Strategy`:" +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:21 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:21 +#: of +msgid "" +"'unmask': Collect secret key shares to decrypt and aggregate the model " +"parameters." msgstr "" -#: flwr.server.server_app.ServerApp:15 of -msgid "Use the `ServerApp` with a custom main function:" +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:23 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:23 +#: of +msgid "" +"Only the aggregated model parameters are exposed and passed to " +"`Strategy.aggregate_fit`, ensuring individual data privacy." msgstr "" -#: flwr.server.server_app.ServerApp.main:1::1 of -msgid ":py:obj:`main `\\ \\(\\)" +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:26 +#: of +msgid "" +"The number of shares into which each client's private key is split under " +"the SecAgg+ protocol. If specified as a float, it represents the " +"proportion of all selected clients, and the number of shares will be set " +"dynamically in the run time. A private key can be reconstructed from " +"these shares, allowing for the secure aggregation of model updates. Each " +"client sends one share to each of its neighbors while retaining one." msgstr "" -#: flwr.server.server_app.ServerApp.main:1 -#: flwr.server.server_app.ServerApp.main:1::1 of -msgid "Return a decorator that registers the main fn with the server app." +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:26 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:33 +#: of +msgid "" +"The minimum number of shares required to reconstruct a client's private " +"key, or, if specified as a float, it represents the proportion of the " +"total number of shares needed for reconstruction. This threshold ensures " +"privacy by allowing for the recovery of contributions from dropped " +"clients during aggregation, without compromising individual client data." msgstr "" -#: ../../source/ref-api/flwr.server.ServerConfig.rst:2 -msgid "ServerConfig" +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:32 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:39 +#: of +msgid "" +"The maximum value of the weight that can be assigned to any single " +"client's update during the weighted average calculation on the server " +"side, e.g., in the FedAvg algorithm." msgstr "" -#: flwr.server.server_config.ServerConfig:3 of +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:36 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:43 +#: of msgid "" -"All attributes have default values which allows users to configure just " -"the ones they care about." +"The range within which model parameters are clipped before quantization. " +"This parameter ensures each model parameter is bounded within " +"[-clipping_range, clipping_range], facilitating quantization." msgstr "" -#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 -msgid ":py:obj:`num_rounds `\\" +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:40 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:47 +#: of +msgid "" +"The size of the range into which floating-point model parameters are " +"quantized, mapping each parameter to an integer in [0, " +"quantization_range-1]. This facilitates cryptographic operations on the " +"model updates." msgstr "" -#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 -msgid ":py:obj:`round_timeout `\\" +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:44 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:51 +#: of +msgid "" +"The range of values from which random mask entries are uniformly sampled " +"([0, modulus_range-1]). `modulus_range` must be less than 4294967296. " +"Please use 2**n values for `modulus_range` to prevent overflow issues." msgstr "" -#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:2 -msgid "SimpleClientManager" +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:48 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:55 +#: of +msgid "" +"The timeout duration in seconds. If specified, the workflow will wait for" +" replies for this duration each time. If `None`, there is no time limit " +"and the workflow will wait until replies for all messages are received." msgstr "" -#: flwr.server.client_manager.SimpleClientManager:1 of -msgid "Bases: :py:class:`~flwr.server.client_manager.ClientManager`" +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:62 +#: of +msgid "" +"Generally, higher `num_shares` means more robust to dropouts while " +"increasing the computational costs; higher `reconstruction_threshold` " +"means better privacy guarantees but less tolerance to dropouts." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of -msgid ":py:obj:`all `\\ \\(\\)" +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:59 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:65 +#: of +msgid "Too large `max_weight` may compromise the precision of the quantization." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of -msgid "" -":py:obj:`num_available `\\" -" \\(\\)" +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:60 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:66 +#: of +msgid "`modulus_range` must be 2**n and larger than `quantization_range`." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:67 +#: of msgid "" -":py:obj:`register `\\ " -"\\(client\\)" +"When `num_shares` is a float, it is interpreted as the proportion of all " +"selected clients, and hence the number of shares will be determined in " +"the runtime. This allows for dynamic adjustment based on the total number" +" of participating clients." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:70 +#: of msgid "" -":py:obj:`sample `\\ " -"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" +"Similarly, when `reconstruction_threshold` is a float, it is interpreted " +"as the proportion of the number of shares needed for the reconstruction " +"of a private key. This feature enables flexibility in setting the " +"security threshold relative to the number of distributed shares." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:74 +#: of msgid "" -":py:obj:`unregister `\\ " -"\\(client\\)" +"`num_shares`, `reconstruction_threshold`, and the quantization parameters" +" (`clipping_range`, `quantization_range`, `modulus_range`) play critical " +"roles in balancing privacy, robustness, and efficiency within the SecAgg+" +" protocol." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -":py:obj:`wait_for `\\ " -"\\(num\\_clients\\[\\, timeout\\]\\)" +":py:obj:`collect_masked_vectors_stage " +"`\\" +" \\(driver\\, ...\\)" msgstr "" -#: flwr.server.client_manager.SimpleClientManager.wait_for:3 of +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "Execute the 'collect masked vectors' stage." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"Blocks until the requested number of clients is available or until a " -"timeout is reached. Current timeout default: 1 day." +":py:obj:`setup_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -#: flwr.server.client_manager.SimpleClientManager.wait_for:6 of -msgid "The number of clients to wait for." +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.setup_stage:1 +#: of +msgid "Execute the 'setup' stage." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.wait_for:8 of -msgid "The time in seconds to wait for, defaults to 86400 (24h)." +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`share_keys_stage " +"`\\ " +"\\(driver\\, context\\, state\\)" msgstr "" -#: flwr.server.client_manager.SimpleClientManager.wait_for:11 of -msgid "**success**" +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.share_keys_stage:1 +#: of +msgid "Execute the 'share keys' stage." msgstr "" -#: ../../source/ref-api/flwr.server.run_driver_api.rst:2 -msgid "run\\_driver\\_api" +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`unmask_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -#: ../../source/ref-api/flwr.server.run_fleet_api.rst:2 -msgid "run\\_fleet\\_api" +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.unmask_stage:1 +#: of +msgid "Execute the 'unmask' stage." msgstr "" -#: ../../source/ref-api/flwr.server.run_server_app.rst:2 -msgid "run\\_server\\_app" +#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:2 +msgid "SecAggWorkflow" msgstr "" -#: ../../source/ref-api/flwr.server.run_superlink.rst:2 -msgid "run\\_superlink" +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of +msgid "" +"Bases: " +":py:class:`~flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow`" msgstr "" -#: ../../source/ref-api/flwr.server.start_server.rst:2 -msgid "start\\_server" +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:3 of +msgid "" +"The SecAgg protocol ensures the secure summation of integer vectors owned" +" by multiple parties, without accessing any individual integer vector. " +"This workflow allows the server to compute the weighted average of model " +"parameters across all clients, ensuring individual contributions remain " +"private. This is achieved by clients sending both, a weighting factor and" +" a weighted version of the locally updated parameters, both of which are " +"masked for privacy. Specifically, each client uploads \"[w, w * params]\"" +" with masks, where weighting factor 'w' is the number of examples " +"('num_examples') and 'params' represents the model parameters " +"('parameters') from the client's `FitRes`. The server then aggregates " +"these contributions to compute the weighted average of model parameters." msgstr "" -#: flwr.server.app.start_server:3 of -msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:16 of +msgid "" +"'setup': Send SecAgg configuration to clients and collect their public " +"keys." msgstr "" -#: flwr.server.app.start_server:5 of +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:55 of msgid "" -"A server implementation, either `flwr.server.Server` or a subclass " -"thereof. If no instance is provided, then `start_server` will create one." +"Each client's private key is split into N shares under the SecAgg " +"protocol, where N is the number of selected clients." msgstr "" -#: flwr.server.app.start_server:9 flwr.simulation.app.start_simulation:28 of +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:57 of msgid "" -"Currently supported values are `num_rounds` (int, default: 1) and " -"`round_timeout` in seconds (float, default: None)." +"Generally, higher `reconstruction_threshold` means better privacy " +"guarantees but less tolerance to dropouts." msgstr "" -#: flwr.server.app.start_server:12 of +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:61 of msgid "" -"An implementation of the abstract base class " -"`flwr.server.strategy.Strategy`. If no strategy is provided, then " -"`start_server` will use `flwr.server.strategy.FedAvg`." +"When `reconstruction_threshold` is a float, it is interpreted as the " +"proportion of the number of all selected clients needed for the " +"reconstruction of a private key. This feature enables flexibility in " +"setting the security threshold relative to the number of selected " +"clients." msgstr "" -#: flwr.server.app.start_server:16 of +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:65 of msgid "" -"An implementation of the abstract base class `flwr.server.ClientManager`." -" If no implementation is provided, then `start_server` will use " -"`flwr.server.client_manager.SimpleClientManager`." +"`reconstruction_threshold`, and the quantization parameters " +"(`clipping_range`, `quantization_range`, `modulus_range`) play critical " +"roles in balancing privacy, robustness, and efficiency within the SecAgg " +"protocol." msgstr "" -#: flwr.server.app.start_server:21 of +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"The maximum length of gRPC messages that can be exchanged with the Flower" -" clients. The default should be sufficient for most models. Users who " -"train very large models might need to increase this value. Note that the " -"Flower clients need to be started with the same value (see " -"`flwr.client.start_client`), otherwise clients will not know about the " -"increased limit and block larger messages." +":py:obj:`collect_masked_vectors_stage " +"`\\ " +"\\(driver\\, ...\\)" msgstr "" -#: flwr.server.app.start_server:28 of +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"Tuple containing root certificate, server certificate, and private key to" -" start a secure SSL-enabled server. The tuple is expected to have three " -"bytes elements in the following order: * CA certificate. * " -"server certificate. * server private key." +":py:obj:`setup_stage `\\" +" \\(driver\\, context\\, state\\)" msgstr "" -#: flwr.server.app.start_server:28 of +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"Tuple containing root certificate, server certificate, and private key to" -" start a secure SSL-enabled server. The tuple is expected to have three " -"bytes elements in the following order:" +":py:obj:`share_keys_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -#: flwr.server.app.start_server:32 of -msgid "CA certificate." +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`unmask_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -#: flwr.server.app.start_server:33 of -msgid "server certificate." +#: ../../source/ref-api/flwr.simulation.rst:2 +msgid "simulation" msgstr "" -#: flwr.server.app.start_server:34 of -msgid "server private key." +#: ../../source/ref-api/flwr.simulation.rst:18::1 +msgid "" +":py:obj:`run_simulation `\\ " +"\\(server\\_app\\, client\\_app\\, ...\\)" msgstr "" -#: flwr.server.app.start_server:37 of -msgid "**hist** -- Object containing training and evaluation metrics." +#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: flwr.simulation.run_simulation.run_simulation:1 of +msgid "Run a Flower App using the Simulation Engine." msgstr "" -#: flwr.server.app.start_server:42 of -msgid "Starting an insecure server:" +#: ../../source/ref-api/flwr.simulation.rst:18::1 +#, fuzzy +msgid "" +":py:obj:`start_simulation `\\ " +"\\(\\*args\\, \\*\\*kwargs\\)" msgstr "" +":py:obj:`start_client `\\ \\(\\*\\, " +"server\\_address\\[\\, client\\_fn\\, ...\\]\\)" -#: flwr.server.app.start_server:46 of -msgid "Starting an SSL-enabled server:" +#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: flwr.simulation.start_simulation:1 of +msgid "Log error stating that module `ray` could not be imported." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:2 -msgid "strategy" +#: ../../source/ref-api/flwr.simulation.run_simulation.rst:2 +msgid "run\\_simulation" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.simulation.run_simulation.run_simulation:3 of msgid "" -":py:obj:`Bulyan `\\ \\(\\*\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\)" +"The `ServerApp` to be executed. It will send messages to different " +"`ClientApp` instances running on different (virtual) SuperNodes." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.bulyan.Bulyan:1 of -msgid "Bulyan strategy." +#: flwr.simulation.run_simulation.run_simulation:6 of +msgid "" +"The `ClientApp` to be executed by each of the SuperNodes. It will receive" +" messages sent by the `ServerApp`." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.simulation.run_simulation.run_simulation:9 of msgid "" -":py:obj:`DPFedAvgAdaptive `\\ " -"\\(strategy\\, num\\_sampled\\_clients\\)" +"Number of nodes that run a ClientApp. They can be sampled by a Driver in " +"the ServerApp and receive a Message describing what the ClientApp should " +"perform." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of -msgid "Wrapper for configuring a Strategy for DP with Adaptive Clipping." +#: flwr.simulation.run_simulation.run_simulation:12 of +msgid "A simulation backend that runs `ClientApp`s." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.simulation.run_simulation.run_simulation:14 of msgid "" -":py:obj:`DPFedAvgFixed `\\ " -"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" +"'A dictionary to configure a backend. Separate dictionaries to configure " +"different elements of backend. Supported top-level keys are `init_args` " +"for values parsed to initialisation of backend, `client_resources` to " +"define the resources for clients, and `actor` to define the actor " +"parameters. Values supported in are those included by " +"`flwr.common.typing.ConfigsRecordValues`." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 of -msgid "Wrapper for configuring a Strategy for DP with Fixed Clipping." +#: flwr.simulation.run_simulation.run_simulation:21 of +msgid "" +"A boolean to indicate whether to enable GPU growth on the main thread. " +"This is desirable if you make use of a TensorFlow model on your " +"`ServerApp` while having your `ClientApp` running on the same GPU. " +"Without enabling this, you might encounter an out-of-memory error because" +" TensorFlow, by default, allocates all GPU memory. Read more about how " +"`tf.config.experimental.set_memory_growth()` works in the TensorFlow " +"documentation: https://www.tensorflow.org/api/stable." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.simulation.run_simulation.run_simulation:28 of msgid "" -":py:obj:`DifferentialPrivacyClientSideAdaptiveClipping " -"`\\ " -"\\(...\\)" +"When disabled, only INFO, WARNING and ERROR log messages will be shown. " +"If enabled, DEBUG-level logs will be displayed." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 -#: of -msgid "Strategy wrapper for central DP with client-side adaptive clipping." +#: ../../source/ref-api/flwr.simulation.start_simulation.rst:2 +msgid "start\\_simulation" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`DifferentialPrivacyServerSideAdaptiveClipping " -"`\\ " -"\\(...\\)" +#: ../../source/ref-changelog.md:1 +msgid "Changelog" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 -#: of -msgid "Strategy wrapper for central DP with server-side adaptive clipping." +#: ../../source/ref-changelog.md:3 +msgid "v1.11.1 (2024-09-11)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`DifferentialPrivacyClientSideFixedClipping " -"`\\ " -"\\(...\\)" +#: ../../source/ref-changelog.md:5 ../../source/ref-changelog.md:37 +#: ../../source/ref-changelog.md:141 ../../source/ref-changelog.md:239 +#: ../../source/ref-changelog.md:339 ../../source/ref-changelog.md:403 +#: ../../source/ref-changelog.md:496 ../../source/ref-changelog.md:596 +#: ../../source/ref-changelog.md:680 ../../source/ref-changelog.md:744 +#: ../../source/ref-changelog.md:802 ../../source/ref-changelog.md:871 +#: ../../source/ref-changelog.md:940 +msgid "Thanks to our contributors" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 -#: of -msgid "Strategy wrapper for central DP with client-side fixed clipping." +#: ../../source/ref-changelog.md:7 ../../source/ref-changelog.md:39 +#: ../../source/ref-changelog.md:143 ../../source/ref-changelog.md:241 +#: ../../source/ref-changelog.md:341 ../../source/ref-changelog.md:405 +#: ../../source/ref-changelog.md:498 ../../source/ref-changelog.md:598 +#: ../../source/ref-changelog.md:682 ../../source/ref-changelog.md:746 +#: ../../source/ref-changelog.md:804 +msgid "" +"We would like to give our special thanks to all the contributors who made" +" the new version of Flower possible (in `git shortlog` order):" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-changelog.md:9 msgid "" -":py:obj:`DifferentialPrivacyServerSideFixedClipping " -"`\\ " -"\\(...\\)" +"`Charles Beauville`, `Chong Shen Ng`, `Daniel J. Beutel`, `Heng Pan`, " +"`Javier`, `Robert Steiner`, `Yan Gao` " msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 -#: of -msgid "Strategy wrapper for central DP with server-side fixed clipping." -msgstr "" +#: ../../source/ref-changelog.md:11 +#, fuzzy +msgid "Improvements" +msgstr "선택적 개선 사항" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-changelog.md:13 msgid "" -":py:obj:`FedAdagrad `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +"**Implement** `keys/values/items` **methods for** `TypedDict` " +"([#4146](https://github.com/adap/flower/pull/4146))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedadagrad.FedAdagrad:1 of -msgid "FedAdagrad strategy - Adaptive Federated Optimization using Adagrad." +#: ../../source/ref-changelog.md:15 +msgid "" +"**Fix parsing of** `--executor-config` **if present** " +"([#4125](https://github.com/adap/flower/pull/4125))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-changelog.md:17 msgid "" -":py:obj:`FedAdam `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +"**Adjust framework name in templates docstrings** " +"([#4127](https://github.com/adap/flower/pull/4127))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedadam.FedAdam:1 of -msgid "FedAdam - Adaptive Federated Optimization using Adam." +#: ../../source/ref-changelog.md:19 +msgid "" +"**Update** `flwr new` **Hugging Face template** " +"([#4169](https://github.com/adap/flower/pull/4169))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-changelog.md:21 msgid "" -":py:obj:`FedAvg `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +"**Fix** `flwr new` **FlowerTune template** " +"([#4123](https://github.com/adap/flower/pull/4123))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedavg.FedAvg:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of -msgid "Federated Averaging strategy." +#: ../../source/ref-changelog.md:23 +msgid "" +"**Add buffer time after** `ServerApp` **thread initialization** " +"([#4119](https://github.com/adap/flower/pull/4119))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-changelog.md:25 msgid "" -":py:obj:`FedAvgAndroid `\\ " -"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" +"**Handle unsuitable resources for simulation** " +"([#4143](https://github.com/adap/flower/pull/4143))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-changelog.md:27 msgid "" -":py:obj:`FedAvgM `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +"**Update example READMEs** " +"([#4117](https://github.com/adap/flower/pull/4117))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedavgm.FedAvgM:1 of -msgid "Federated Averaging with Momentum strategy." +#: ../../source/ref-changelog.md:29 +msgid "" +"**Update SuperNode authentication docs** " +"([#4160](https://github.com/adap/flower/pull/4160))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedMedian `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +#: ../../source/ref-changelog.md:31 ../../source/ref-changelog.md:111 +#: ../../source/ref-changelog.md:227 ../../source/ref-changelog.md:323 +#: ../../source/ref-changelog.md:397 ../../source/ref-changelog.md:472 +#: ../../source/ref-changelog.md:584 ../../source/ref-changelog.md:674 +#: ../../source/ref-changelog.md:738 ../../source/ref-changelog.md:796 +#: ../../source/ref-changelog.md:865 ../../source/ref-changelog.md:927 +#: ../../source/ref-changelog.md:946 ../../source/ref-changelog.md:1102 +#: ../../source/ref-changelog.md:1173 ../../source/ref-changelog.md:1210 +#: ../../source/ref-changelog.md:1253 +msgid "Incompatible changes" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedmedian.FedMedian:1 of -msgid "Configurable FedMedian strategy implementation." +#: ../../source/ref-changelog.md:35 +msgid "v1.11.0 (2024-08-30)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-changelog.md:41 msgid "" -":py:obj:`FedOpt `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " +"Beutel`, `Daniel Nata Nugraha`, `Danny`, `Edoardo Gabrielli`, `Heng Pan`," +" `Javier`, `Meng Yan`, `Michal Danilowski`, `Mohammad Naseri`, `Robert " +"Steiner`, `Steve Laskaridis`, `Taner Topal`, `Yan Gao` " +msgstr "" + +#: ../../source/ref-changelog.md:43 ../../source/ref-changelog.md:147 +#: ../../source/ref-changelog.md:245 ../../source/ref-changelog.md:345 +#: ../../source/ref-changelog.md:409 ../../source/ref-changelog.md:502 +#: ../../source/ref-changelog.md:602 ../../source/ref-changelog.md:686 +#: ../../source/ref-changelog.md:750 ../../source/ref-changelog.md:808 +#: ../../source/ref-changelog.md:877 ../../source/ref-changelog.md:1006 +#: ../../source/ref-changelog.md:1048 ../../source/ref-changelog.md:1115 +#: ../../source/ref-changelog.md:1181 ../../source/ref-changelog.md:1226 +#: ../../source/ref-changelog.md:1265 ../../source/ref-changelog.md:1298 +#: ../../source/ref-changelog.md:1348 +msgid "What's new?" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedopt.FedOpt:1 of -msgid "Federated Optim strategy." +#: ../../source/ref-changelog.md:45 +msgid "" +"**Deliver Flower App Bundle (FAB) to SuperLink and SuperNodes** " +"([#4006](https://github.com/adap/flower/pull/4006), " +"[#3945](https://github.com/adap/flower/pull/3945), " +"[#3999](https://github.com/adap/flower/pull/3999), " +"[#4027](https://github.com/adap/flower/pull/4027), " +"[#3851](https://github.com/adap/flower/pull/3851), " +"[#3946](https://github.com/adap/flower/pull/3946), " +"[#4003](https://github.com/adap/flower/pull/4003), " +"[#4029](https://github.com/adap/flower/pull/4029), " +"[#3942](https://github.com/adap/flower/pull/3942), " +"[#3957](https://github.com/adap/flower/pull/3957), " +"[#4020](https://github.com/adap/flower/pull/4020), " +"[#4044](https://github.com/adap/flower/pull/4044), " +"[#3852](https://github.com/adap/flower/pull/3852), " +"[#4019](https://github.com/adap/flower/pull/4019), " +"[#4031](https://github.com/adap/flower/pull/4031), " +"[#4036](https://github.com/adap/flower/pull/4036), " +"[#4049](https://github.com/adap/flower/pull/4049), " +"[#4017](https://github.com/adap/flower/pull/4017), " +"[#3943](https://github.com/adap/flower/pull/3943), " +"[#3944](https://github.com/adap/flower/pull/3944), " +"[#4011](https://github.com/adap/flower/pull/4011), " +"[#3619](https://github.com/adap/flower/pull/3619))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-changelog.md:47 msgid "" -":py:obj:`FedProx `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +"Dynamic code updates are here! `flwr run` can now ship and install the " +"latest version of your `ServerApp` and `ClientApp` to an already-running " +"federation (SuperLink and SuperNodes)." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedprox.FedProx:1 of -msgid "Federated Optimization strategy." +#: ../../source/ref-changelog.md:49 +msgid "" +"How does it work? `flwr run` bundles your Flower app into a single FAB " +"(Flower App Bundle) file. It then ships this FAB file, via the SuperExec," +" to both the SuperLink and those SuperNodes that need it. This allows you" +" to keep SuperExec, SuperLink and SuperNodes running as permanent " +"infrastructure, and then ship code updates (including completely new " +"projects!) dynamically." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedTrimmedAvg `\\ " -"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" +#: ../../source/ref-changelog.md:51 +msgid "`flwr run` is all you need." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 of -msgid "Federated Averaging with Trimmed Mean [Dong Yin, et al., 2021]." +#: ../../source/ref-changelog.md:53 +msgid "" +"**Introduce isolated** `ClientApp` **execution** " +"([#3970](https://github.com/adap/flower/pull/3970), " +"[#3976](https://github.com/adap/flower/pull/3976), " +"[#4002](https://github.com/adap/flower/pull/4002), " +"[#4001](https://github.com/adap/flower/pull/4001), " +"[#4034](https://github.com/adap/flower/pull/4034), " +"[#4037](https://github.com/adap/flower/pull/4037), " +"[#3977](https://github.com/adap/flower/pull/3977), " +"[#4042](https://github.com/adap/flower/pull/4042), " +"[#3978](https://github.com/adap/flower/pull/3978), " +"[#4039](https://github.com/adap/flower/pull/4039), " +"[#4033](https://github.com/adap/flower/pull/4033), " +"[#3971](https://github.com/adap/flower/pull/3971), " +"[#4035](https://github.com/adap/flower/pull/4035), " +"[#3973](https://github.com/adap/flower/pull/3973), " +"[#4032](https://github.com/adap/flower/pull/4032))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-changelog.md:55 msgid "" -":py:obj:`FedXgbBagging `\\ " -"\\(\\[evaluate\\_function\\]\\)" +"The SuperNode can now run your `ClientApp` in a fully isolated way. In an" +" enterprise deployment, this allows you to set strict limits on what the " +"`ClientApp` can and cannot do." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 of -msgid "Configurable FedXgbBagging strategy implementation." +#: ../../source/ref-changelog.md:57 +msgid "`flower-supernode` supports three `--isolation` modes:" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-changelog.md:59 msgid "" -":py:obj:`FedXgbCyclic `\\ " -"\\(\\*\\*kwargs\\)" +"Unset: The SuperNode runs the `ClientApp` in the same process (as in " +"previous versions of Flower). This is the default mode." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 of -msgid "Configurable FedXgbCyclic strategy implementation." +#: ../../source/ref-changelog.md:60 +msgid "" +"`--isolation=subprocess`: The SuperNode starts a subprocess to run the " +"`ClientApp`." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-changelog.md:61 msgid "" -":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " -"\\*\\*kwargs\\)" +"`--isolation=process`: The SuperNode expects an externally-managed " +"process to run the `ClientApp`. This external process is not managed by " +"the SuperNode, so it has to be started beforehand and terminated " +"manually. The common way to use this isolation mode is via the new " +"`flwr/clientapp` Docker image." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 of -msgid "Configurable FedXgbNnAvg strategy implementation." +#: ../../source/ref-changelog.md:63 +msgid "" +"**Improve Docker support for enterprise deployments** " +"([#4050](https://github.com/adap/flower/pull/4050), " +"[#4090](https://github.com/adap/flower/pull/4090), " +"[#3784](https://github.com/adap/flower/pull/3784), " +"[#3998](https://github.com/adap/flower/pull/3998), " +"[#4094](https://github.com/adap/flower/pull/4094), " +"[#3722](https://github.com/adap/flower/pull/3722))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-changelog.md:65 msgid "" -":py:obj:`FedYogi `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +"Flower 1.11 ships many Docker improvements that are especially useful for" +" enterprise deployments:" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedyogi.FedYogi:1 of -msgid "FedYogi [Reddi et al., 2020] strategy." +#: ../../source/ref-changelog.md:67 +msgid "`flwr/supernode` comes with a new Alpine Docker image." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-changelog.md:68 msgid "" -":py:obj:`FaultTolerantFedAvg " -"`\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +"`flwr/clientapp` is a new image to be used with the `--isolation=process`" +" option. In this mode, SuperNode and `ClientApp` run in two different " +"Docker containers. `flwr/supernode` (preferably the Alpine version) runs " +"the long-running SuperNode with `--isolation=process`. `flwr/clientapp` " +"runs the `ClientApp`. This is the recommended way to deploy Flower in " +"enterprise settings." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 of -msgid "Configurable fault-tolerant FedAvg strategy implementation." +#: ../../source/ref-changelog.md:69 +msgid "" +"New all-in-one Docker Compose enables you to easily start a full Flower " +"Deployment Engine on a single machine." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-changelog.md:70 msgid "" -":py:obj:`Krum `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +"Completely new Docker documentation: " +"https://flower.ai/docs/framework/docker/index.html" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.krum.Krum:1 of -msgid "Krum [Blanchard et al., 2017] strategy." +#: ../../source/ref-changelog.md:72 +msgid "" +"**Improve SuperNode authentication** " +"([#4043](https://github.com/adap/flower/pull/4043), " +"[#4047](https://github.com/adap/flower/pull/4047), " +"[#4074](https://github.com/adap/flower/pull/4074))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-changelog.md:74 msgid "" -":py:obj:`QFedAvg `\\ \\(\\*\\[\\, " -"q\\_param\\, qffl\\_learning\\_rate\\, ...\\]\\)" +"SuperNode auth has been improved in several ways, including improved " +"logging, improved testing, and improved error handling." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.qfedavg.QFedAvg:1 of -msgid "Configurable QFedAvg strategy implementation." +#: ../../source/ref-changelog.md:76 +msgid "" +"**Update** `flwr new` **templates** " +"([#3933](https://github.com/adap/flower/pull/3933), " +"[#3894](https://github.com/adap/flower/pull/3894), " +"[#3930](https://github.com/adap/flower/pull/3930), " +"[#3931](https://github.com/adap/flower/pull/3931), " +"[#3997](https://github.com/adap/flower/pull/3997), " +"[#3979](https://github.com/adap/flower/pull/3979), " +"[#3965](https://github.com/adap/flower/pull/3965), " +"[#4013](https://github.com/adap/flower/pull/4013), " +"[#4064](https://github.com/adap/flower/pull/4064))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid ":py:obj:`Strategy `\\ \\(\\)" +#: ../../source/ref-changelog.md:78 +msgid "" +"All `flwr new` templates have been updated to show the latest recommended" +" use of Flower APIs." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.strategy.Strategy:1 of -msgid "Abstract base class for server strategy implementations." +#: ../../source/ref-changelog.md:80 +msgid "" +"**Improve Simulation Engine** " +"([#4095](https://github.com/adap/flower/pull/4095), " +"[#3913](https://github.com/adap/flower/pull/3913), " +"[#4059](https://github.com/adap/flower/pull/4059), " +"[#3954](https://github.com/adap/flower/pull/3954), " +"[#4071](https://github.com/adap/flower/pull/4071), " +"[#3985](https://github.com/adap/flower/pull/3985), " +"[#3988](https://github.com/adap/flower/pull/3988))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:2 -msgid "Bulyan" +#: ../../source/ref-changelog.md:82 +msgid "" +"The Flower Simulation Engine comes with several updates, including " +"improved run config support, verbose logging, simulation backend " +"configuration via `flwr run`, and more." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 -#: flwr.server.strategy.fedavgm.FedAvgM:1 -#: flwr.server.strategy.fedmedian.FedMedian:1 -#: flwr.server.strategy.fedopt.FedOpt:1 flwr.server.strategy.fedprox.FedProx:1 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 -#: flwr.server.strategy.krum.Krum:1 flwr.server.strategy.qfedavg.QFedAvg:1 of -msgid "Bases: :py:class:`~flwr.server.strategy.fedavg.FedAvg`" +#: ../../source/ref-changelog.md:84 +msgid "" +"**Improve** `RecordSet` " +"([#4052](https://github.com/adap/flower/pull/4052), " +"[#3218](https://github.com/adap/flower/pull/3218), " +"[#4016](https://github.com/adap/flower/pull/4016))" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:3 of -msgid "Implementation based on https://arxiv.org/abs/1802.07927." +#: ../../source/ref-changelog.md:86 +msgid "" +"`RecordSet` is the core object to exchange model parameters, " +"configuration values and metrics between `ClientApp` and `ServerApp`. " +"This release ships several smaller improvements to `RecordSet` and " +"related `*Record` types." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:5 -#: flwr.server.strategy.fedadagrad.FedAdagrad:5 -#: flwr.server.strategy.fedadam.FedAdam:5 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:5 -#: flwr.server.strategy.fedavgm.FedAvgM:5 flwr.server.strategy.fedopt.FedOpt:5 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:5 -#: flwr.server.strategy.fedyogi.FedYogi:5 flwr.server.strategy.krum.Krum:5 of -msgid "Fraction of clients used during training. Defaults to 1.0." +#: ../../source/ref-changelog.md:88 +msgid "" +"**Update documentation** " +"([#3972](https://github.com/adap/flower/pull/3972), " +"[#3925](https://github.com/adap/flower/pull/3925), " +"[#4061](https://github.com/adap/flower/pull/4061), " +"[#3984](https://github.com/adap/flower/pull/3984), " +"[#3917](https://github.com/adap/flower/pull/3917), " +"[#3900](https://github.com/adap/flower/pull/3900), " +"[#4066](https://github.com/adap/flower/pull/4066), " +"[#3765](https://github.com/adap/flower/pull/3765), " +"[#4021](https://github.com/adap/flower/pull/4021), " +"[#3906](https://github.com/adap/flower/pull/3906), " +"[#4063](https://github.com/adap/flower/pull/4063), " +"[#4076](https://github.com/adap/flower/pull/4076), " +"[#3920](https://github.com/adap/flower/pull/3920), " +"[#3916](https://github.com/adap/flower/pull/3916))" +msgstr "" + +#: ../../source/ref-changelog.md:90 +msgid "" +"Many parts of the documentation, including the main tutorial, have been " +"migrated to show new Flower APIs and other new Flower features like the " +"improved Docker support." +msgstr "" + +#: ../../source/ref-changelog.md:92 +msgid "" +"**Migrate code example to use new Flower APIs** " +"([#3758](https://github.com/adap/flower/pull/3758), " +"[#3701](https://github.com/adap/flower/pull/3701), " +"[#3919](https://github.com/adap/flower/pull/3919), " +"[#3918](https://github.com/adap/flower/pull/3918), " +"[#3934](https://github.com/adap/flower/pull/3934), " +"[#3893](https://github.com/adap/flower/pull/3893), " +"[#3833](https://github.com/adap/flower/pull/3833), " +"[#3922](https://github.com/adap/flower/pull/3922), " +"[#3846](https://github.com/adap/flower/pull/3846), " +"[#3777](https://github.com/adap/flower/pull/3777), " +"[#3874](https://github.com/adap/flower/pull/3874), " +"[#3873](https://github.com/adap/flower/pull/3873), " +"[#3935](https://github.com/adap/flower/pull/3935), " +"[#3754](https://github.com/adap/flower/pull/3754), " +"[#3980](https://github.com/adap/flower/pull/3980), " +"[#4089](https://github.com/adap/flower/pull/4089), " +"[#4046](https://github.com/adap/flower/pull/4046), " +"[#3314](https://github.com/adap/flower/pull/3314), " +"[#3316](https://github.com/adap/flower/pull/3316), " +"[#3295](https://github.com/adap/flower/pull/3295), " +"[#3313](https://github.com/adap/flower/pull/3313))" +msgstr "" + +#: ../../source/ref-changelog.md:94 +msgid "Many code examples have been migrated to use new Flower APIs." +msgstr "" + +#: ../../source/ref-changelog.md:96 +msgid "" +"**Update Flower framework, framework internals and quality " +"infrastructure** ([#4018](https://github.com/adap/flower/pull/4018), " +"[#4053](https://github.com/adap/flower/pull/4053), " +"[#4098](https://github.com/adap/flower/pull/4098), " +"[#4067](https://github.com/adap/flower/pull/4067), " +"[#4105](https://github.com/adap/flower/pull/4105), " +"[#4048](https://github.com/adap/flower/pull/4048), " +"[#4107](https://github.com/adap/flower/pull/4107), " +"[#4069](https://github.com/adap/flower/pull/4069), " +"[#3915](https://github.com/adap/flower/pull/3915), " +"[#4101](https://github.com/adap/flower/pull/4101), " +"[#4108](https://github.com/adap/flower/pull/4108), " +"[#3914](https://github.com/adap/flower/pull/3914), " +"[#4068](https://github.com/adap/flower/pull/4068), " +"[#4041](https://github.com/adap/flower/pull/4041), " +"[#4040](https://github.com/adap/flower/pull/4040), " +"[#3986](https://github.com/adap/flower/pull/3986), " +"[#4026](https://github.com/adap/flower/pull/4026), " +"[#3961](https://github.com/adap/flower/pull/3961), " +"[#3975](https://github.com/adap/flower/pull/3975), " +"[#3983](https://github.com/adap/flower/pull/3983), " +"[#4091](https://github.com/adap/flower/pull/4091), " +"[#3982](https://github.com/adap/flower/pull/3982), " +"[#4079](https://github.com/adap/flower/pull/4079), " +"[#4073](https://github.com/adap/flower/pull/4073), " +"[#4060](https://github.com/adap/flower/pull/4060), " +"[#4106](https://github.com/adap/flower/pull/4106), " +"[#4080](https://github.com/adap/flower/pull/4080), " +"[#3974](https://github.com/adap/flower/pull/3974), " +"[#3996](https://github.com/adap/flower/pull/3996), " +"[#3991](https://github.com/adap/flower/pull/3991), " +"[#3981](https://github.com/adap/flower/pull/3981), " +"[#4093](https://github.com/adap/flower/pull/4093), " +"[#4100](https://github.com/adap/flower/pull/4100), " +"[#3939](https://github.com/adap/flower/pull/3939), " +"[#3955](https://github.com/adap/flower/pull/3955), " +"[#3940](https://github.com/adap/flower/pull/3940), " +"[#4038](https://github.com/adap/flower/pull/4038))" +msgstr "" + +#: ../../source/ref-changelog.md:98 ../../source/ref-changelog.md:205 +msgid "" +"As always, many parts of the Flower framework and quality infrastructure " +"were improved and updated." +msgstr "" + +#: ../../source/ref-changelog.md:100 ../../source/ref-changelog.md:217 +#: ../../source/ref-changelog.md:309 ../../source/ref-changelog.md:1292 +msgid "Deprecations" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:7 -#: flwr.server.strategy.fedadagrad.FedAdagrad:7 -#: flwr.server.strategy.fedadam.FedAdam:7 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:7 -#: flwr.server.strategy.fedavgm.FedAvgM:7 flwr.server.strategy.fedopt.FedOpt:7 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:7 -#: flwr.server.strategy.fedyogi.FedYogi:7 flwr.server.strategy.krum.Krum:7 of -msgid "Fraction of clients used during validation. Defaults to 1.0." +#: ../../source/ref-changelog.md:102 +msgid "" +"**Deprecate accessing `Context` via `Client.context`** " +"([#3797](https://github.com/adap/flower/pull/3797))" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:9 -#: flwr.server.strategy.fedadagrad.FedAdagrad:9 -#: flwr.server.strategy.fedadam.FedAdam:9 flwr.server.strategy.fedavg.FedAvg:13 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:9 -#: flwr.server.strategy.fedavgm.FedAvgM:9 flwr.server.strategy.fedopt.FedOpt:9 -#: flwr.server.strategy.fedprox.FedProx:45 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:9 -#: flwr.server.strategy.fedyogi.FedYogi:9 flwr.server.strategy.krum.Krum:9 of -msgid "Minimum number of clients used during training. Defaults to 2." +#: ../../source/ref-changelog.md:104 +msgid "" +"Now that both `client_fn` and `server_fn` receive a `Context` object, " +"accessing `Context` via `Client.context` is deprecated. `Client.context` " +"will be removed in a future release. If you need to access `Context` in " +"your `Client` implementation, pass it manually when creating the `Client`" +" instance in `client_fn`:" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:11 -#: flwr.server.strategy.fedadagrad.FedAdagrad:11 -#: flwr.server.strategy.fedadam.FedAdam:11 -#: flwr.server.strategy.fedavg.FedAvg:15 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:11 -#: flwr.server.strategy.fedavgm.FedAvgM:11 -#: flwr.server.strategy.fedopt.FedOpt:11 -#: flwr.server.strategy.fedprox.FedProx:47 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:11 -#: flwr.server.strategy.fedyogi.FedYogi:11 flwr.server.strategy.krum.Krum:11 of -msgid "Minimum number of clients used during validation. Defaults to 2." +#: ../../source/ref-changelog.md:113 +msgid "" +"**Update CLIs to accept an app directory instead of** `ClientApp` **and**" +" `ServerApp` ([#3952](https://github.com/adap/flower/pull/3952), " +"[#4077](https://github.com/adap/flower/pull/4077), " +"[#3850](https://github.com/adap/flower/pull/3850))" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:13 -#: flwr.server.strategy.fedadagrad.FedAdagrad:13 -#: flwr.server.strategy.fedadam.FedAdam:13 -#: flwr.server.strategy.fedavg.FedAvg:17 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:13 -#: flwr.server.strategy.fedavgm.FedAvgM:13 -#: flwr.server.strategy.fedopt.FedOpt:13 -#: flwr.server.strategy.fedprox.FedProx:49 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:13 -#: flwr.server.strategy.fedyogi.FedYogi:13 flwr.server.strategy.krum.Krum:13 of -msgid "Minimum number of total clients in the system. Defaults to 2." +#: ../../source/ref-changelog.md:115 +msgid "" +"The CLI commands `flower-supernode` and `flower-server-app` now accept an" +" app directory as argument (instead of references to a `ClientApp` or " +"`ServerApp`). An app directory is any directory containing a " +"`pyproject.toml` file (with the appropriate Flower config fields set). " +"The easiest way to generate a compatible project structure is to use " +"`flwr new`." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:15 flwr.server.strategy.krum.Krum:15 of -msgid "Number of malicious clients in the system. Defaults to 0." +#: ../../source/ref-changelog.md:117 +msgid "" +"**Disable** `flower-client-app` **CLI command** " +"([#4022](https://github.com/adap/flower/pull/4022))" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:17 -#: flwr.server.strategy.fedadagrad.FedAdagrad:15 -#: flwr.server.strategy.fedadam.FedAdam:15 -#: flwr.server.strategy.fedavg.FedAvg:19 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:15 -#: flwr.server.strategy.fedavgm.FedAvgM:15 -#: flwr.server.strategy.fedopt.FedOpt:15 -#: flwr.server.strategy.fedprox.FedProx:51 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:15 -#: flwr.server.strategy.fedyogi.FedYogi:17 -#: flwr.server.strategy.fedyogi.FedYogi:18 -#: flwr.server.strategy.fedyogi.FedYogi:19 flwr.server.strategy.krum.Krum:20 of -msgid "Optional function used for validation. Defaults to None." +#: ../../source/ref-changelog.md:119 +msgid "`flower-client-app` has been disabled. Use `flower-supernode` instead." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:19 -#: flwr.server.strategy.fedadagrad.FedAdagrad:17 -#: flwr.server.strategy.fedadam.FedAdam:17 -#: flwr.server.strategy.fedavg.FedAvg:21 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:17 -#: flwr.server.strategy.fedavgm.FedAvgM:17 -#: flwr.server.strategy.fedopt.FedOpt:17 -#: flwr.server.strategy.fedprox.FedProx:53 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:17 -#: flwr.server.strategy.fedyogi.FedYogi:20 flwr.server.strategy.krum.Krum:22 of -msgid "Function used to configure training. Defaults to None." +#: ../../source/ref-changelog.md:121 +msgid "" +"**Use spaces instead of commas for separating config args** " +"([#4000](https://github.com/adap/flower/pull/4000))" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:21 -#: flwr.server.strategy.fedadagrad.FedAdagrad:19 -#: flwr.server.strategy.fedadam.FedAdam:19 -#: flwr.server.strategy.fedavg.FedAvg:23 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:19 -#: flwr.server.strategy.fedavgm.FedAvgM:19 -#: flwr.server.strategy.fedopt.FedOpt:19 -#: flwr.server.strategy.fedprox.FedProx:55 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:19 -#: flwr.server.strategy.fedyogi.FedYogi:22 flwr.server.strategy.krum.Krum:24 of -msgid "Function used to configure validation. Defaults to None." +#: ../../source/ref-changelog.md:123 +msgid "" +"When passing configs (run config, node config) to Flower, you now need to" +" separate key-value pairs using spaces instead of commas. For example:" +msgstr "" + +#: ../../source/ref-changelog.md:129 +msgid "Previously, you could pass configs using commas, like this:" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:23 -#: flwr.server.strategy.fedadagrad.FedAdagrad:25 -#: flwr.server.strategy.fedadam.FedAdam:21 -#: flwr.server.strategy.fedavg.FedAvg:25 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:21 -#: flwr.server.strategy.fedavgm.FedAvgM:21 -#: flwr.server.strategy.fedopt.FedOpt:21 -#: flwr.server.strategy.fedprox.FedProx:57 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:21 -#: flwr.server.strategy.fedyogi.FedYogi:24 flwr.server.strategy.krum.Krum:26 of -msgid "Whether or not accept rounds containing failures. Defaults to True." +#: ../../source/ref-changelog.md:135 +msgid "" +"**Remove** `flwr example` **CLI command** " +"([#4084](https://github.com/adap/flower/pull/4084))" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:25 -#: flwr.server.strategy.fedadagrad.FedAdagrad:27 -#: flwr.server.strategy.fedadam.FedAdam:23 -#: flwr.server.strategy.fedavg.FedAvg:27 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:24 -#: flwr.server.strategy.fedavgm.FedAvgM:23 -#: flwr.server.strategy.fedopt.FedOpt:23 -#: flwr.server.strategy.fedprox.FedProx:59 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:23 -#: flwr.server.strategy.fedyogi.FedYogi:26 flwr.server.strategy.krum.Krum:28 of -msgid "Initial global model parameters." +#: ../../source/ref-changelog.md:137 +msgid "" +"The experimental `flwr example` CLI command has been removed. Use `flwr " +"new` to generate a project and then run it using `flwr run`." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:27 of +#: ../../source/ref-changelog.md:139 +msgid "v1.10.0 (2024-07-24)" +msgstr "" + +#: ../../source/ref-changelog.md:145 msgid "" -"Byzantine resilient aggregation rule that is used as the first step of " -"the Bulyan (e.g., Krum)" +"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " +"Beutel`, `Daniel Nata Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, " +"`Ikko Eltociear Ashimine`, `Javier`, `Jiahao Tan`, `Mohammad Naseri`, " +"`Robert Steiner`, `Sebastian van der Voort`, `Taner Topal`, `Yan Gao` " msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:29 of -msgid "arguments to the first_aggregation rule" +#: ../../source/ref-changelog.md:149 +msgid "" +"**Introduce** `flwr run` **(beta)** " +"([#3810](https://github.com/adap/flower/pull/3810), " +"[#3826](https://github.com/adap/flower/pull/3826), " +"[#3880](https://github.com/adap/flower/pull/3880), " +"[#3807](https://github.com/adap/flower/pull/3807), " +"[#3800](https://github.com/adap/flower/pull/3800), " +"[#3814](https://github.com/adap/flower/pull/3814), " +"[#3811](https://github.com/adap/flower/pull/3811), " +"[#3809](https://github.com/adap/flower/pull/3809), " +"[#3819](https://github.com/adap/flower/pull/3819))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:151 msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +"Flower 1.10 ships the first beta release of the new `flwr run` command. " +"`flwr run` can run different projects using `flwr run path/to/project`, " +"it enables you to easily switch between different federations using `flwr" +" run . federation` and it runs your Flower project using either local " +"simulation or the new (experimental) SuperExec service. This allows " +"Flower to scale federatated learning from fast local simulation to large-" +"scale production deployment, seamlessly. All projects generated with " +"`flwr new` are immediately runnable using `flwr run`. Give it a try: use " +"`flwr new` to generate a project and then run it using `flwr run`." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "Aggregate evaluation losses using weighted average." +#: ../../source/ref-changelog.md:153 +msgid "" +"**Introduce run config** " +"([#3751](https://github.com/adap/flower/pull/3751), " +"[#3750](https://github.com/adap/flower/pull/3750), " +"[#3845](https://github.com/adap/flower/pull/3845), " +"[#3824](https://github.com/adap/flower/pull/3824), " +"[#3746](https://github.com/adap/flower/pull/3746), " +"[#3728](https://github.com/adap/flower/pull/3728), " +"[#3730](https://github.com/adap/flower/pull/3730), " +"[#3725](https://github.com/adap/flower/pull/3725), " +"[#3729](https://github.com/adap/flower/pull/3729), " +"[#3580](https://github.com/adap/flower/pull/3580), " +"[#3578](https://github.com/adap/flower/pull/3578), " +"[#3576](https://github.com/adap/flower/pull/3576), " +"[#3798](https://github.com/adap/flower/pull/3798), " +"[#3732](https://github.com/adap/flower/pull/3732), " +"[#3815](https://github.com/adap/flower/pull/3815))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:155 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"The new run config feature allows you to run your Flower project in " +"different configurations without having to change a single line of code. " +"You can now build a configurable `ServerApp` and `ClientApp` that read " +"configuration values at runtime. This enables you to specify config " +"values like `learning-rate=0.01` in `pyproject.toml` (under the " +"`[tool.flwr.app.config]` key). These config values can then be easily " +"overridden via `flwr run --run-config learning-rate=0.02`, and read from " +"`Context` using `lr = context.run_config[\"learning-rate\"]`. Create a " +"new project using `flwr new` to see run config in action." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan.aggregate_fit:1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "Aggregate fit results using Bulyan." +#: ../../source/ref-changelog.md:157 +msgid "" +"**Generalize** `client_fn` **signature to** `client_fn(context: Context) " +"-> Client` ([#3779](https://github.com/adap/flower/pull/3779), " +"[#3697](https://github.com/adap/flower/pull/3697), " +"[#3694](https://github.com/adap/flower/pull/3694), " +"[#3696](https://github.com/adap/flower/pull/3696))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:159 msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +"The `client_fn` signature has been generalized to `client_fn(context: " +"Context) -> Client`. It now receives a `Context` object instead of the " +"(now depreacated) `cid: str`. `Context` allows accessing `node_id`, " +"`node_config` and `run_config`, among other things. This enables you to " +"build a configurable `ClientApp` that leverages the new run config " +"system." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_evaluate:1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.configure_evaluate:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_evaluate:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_evaluate:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.configure_evaluate:1 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:1 of -msgid "Configure the next round of evaluation." +#: ../../source/ref-changelog.md:161 +msgid "" +"The previous signature `client_fn(cid: str)` is now deprecated and " +"support for it will be removed in a future release. Use " +"`client_fn(context: Context) -> Client` everywhere." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:163 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"**Introduce new** `server_fn(context)` " +"([#3773](https://github.com/adap/flower/pull/3773), " +"[#3796](https://github.com/adap/flower/pull/3796), " +"[#3771](https://github.com/adap/flower/pull/3771))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_fit:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_fit:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_fit:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_fit:1 -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.configure_fit:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.configure_fit:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_fit:1 -#: flwr.server.strategy.fedprox.FedProx.configure_fit:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_fit:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.configure_fit:1 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.configure_fit:1 of -msgid "Configure the next round of training." +#: ../../source/ref-changelog.md:165 +msgid "" +"In addition to the new `client_fn(context:Context)`, a new " +"`server_fn(context: Context) -> ServerAppComponents` can now be passed to" +" `ServerApp` (instead of passing, for example, `Strategy`, directly). " +"This enables you to leverage the full `Context` on the server-side to " +"build a configurable `ServerApp`." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:167 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"**Relaunch all** `flwr new` **templates** " +"([#3877](https://github.com/adap/flower/pull/3877), " +"[#3821](https://github.com/adap/flower/pull/3821), " +"[#3587](https://github.com/adap/flower/pull/3587), " +"[#3795](https://github.com/adap/flower/pull/3795), " +"[#3875](https://github.com/adap/flower/pull/3875), " +"[#3859](https://github.com/adap/flower/pull/3859), " +"[#3760](https://github.com/adap/flower/pull/3760))" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.evaluate:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.evaluate:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.evaluate:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.evaluate:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "Evaluate model parameters using an evaluation function." +#: ../../source/ref-changelog.md:169 +msgid "" +"All `flwr new` templates have been significantly updated to showcase new " +"Flower features and best practices. This includes using `flwr run` and " +"the new run config feature. You can now easily create a new project using" +" `flwr new` and, after following the instructions to install it, `flwr " +"run` it." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:171 msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"**Introduce** `flower-supernode` **(preview)** " +"([#3353](https://github.com/adap/flower/pull/3353))" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.initialize_parameters:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.initialize_parameters:1 -#: flwr.server.strategy.fedavgm.FedAvgM.initialize_parameters:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "Initialize global model parameters." +#: ../../source/ref-changelog.md:173 +msgid "" +"The new `flower-supernode` CLI is here to replace `flower-client-app`. " +"`flower-supernode` brings full multi-app support to the Flower client-" +"side. It also allows to pass `--node-config` to the SuperNode, which is " +"accessible in your `ClientApp` via `Context` (using the new " +"`client_fn(context: Context)` signature)." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:175 msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"**Introduce node config** " +"([#3782](https://github.com/adap/flower/pull/3782), " +"[#3780](https://github.com/adap/flower/pull/3780), " +"[#3695](https://github.com/adap/flower/pull/3695), " +"[#3886](https://github.com/adap/flower/pull/3886))" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.num_evaluation_clients:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_evaluation_clients:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.num_evaluation_clients:1 of -msgid "Use a fraction of available clients for evaluation." +#: ../../source/ref-changelog.md:177 +msgid "" +"A new node config feature allows you to pass a static configuration to " +"the SuperNode. This configuration is read-only and available to every " +"`ClientApp` running on that SuperNode. A `ClientApp` can access the node " +"config via `Context` (`context.node_config`)." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:179 msgid "" -":py:obj:`num_fit_clients `\\" -" \\(num\\_available\\_clients\\)" +"**Introduce SuperExec (experimental)** " +"([#3605](https://github.com/adap/flower/pull/3605), " +"[#3723](https://github.com/adap/flower/pull/3723), " +"[#3731](https://github.com/adap/flower/pull/3731), " +"[#3589](https://github.com/adap/flower/pull/3589), " +"[#3604](https://github.com/adap/flower/pull/3604), " +"[#3622](https://github.com/adap/flower/pull/3622), " +"[#3838](https://github.com/adap/flower/pull/3838), " +"[#3720](https://github.com/adap/flower/pull/3720), " +"[#3606](https://github.com/adap/flower/pull/3606), " +"[#3602](https://github.com/adap/flower/pull/3602), " +"[#3603](https://github.com/adap/flower/pull/3603), " +"[#3555](https://github.com/adap/flower/pull/3555), " +"[#3808](https://github.com/adap/flower/pull/3808), " +"[#3724](https://github.com/adap/flower/pull/3724), " +"[#3658](https://github.com/adap/flower/pull/3658), " +"[#3629](https://github.com/adap/flower/pull/3629))" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.num_fit_clients:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_fit_clients:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.num_fit_clients:1 of -msgid "Return the sample size and the required number of available clients." +#: ../../source/ref-changelog.md:181 +msgid "" +"This is the first experimental release of Flower SuperExec, a new service" +" that executes your runs. It's not ready for production deployment just " +"yet, but don't hesitate to give it a try if you're interested." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:2 -msgid "DPFedAvgAdaptive" +#: ../../source/ref-changelog.md:183 +msgid "" +"**Add new federated learning with tabular data example** " +"([#3568](https://github.com/adap/flower/pull/3568))" msgstr "" -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of -msgid "Bases: :py:class:`~flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed`" +#: ../../source/ref-changelog.md:185 +msgid "" +"A new code example exemplifies a federated learning setup using the " +"Flower framework on the Adult Census Income tabular dataset." msgstr "" -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:3 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:3 of -msgid "This class is deprecated and will be removed in a future release." +#: ../../source/ref-changelog.md:187 +msgid "" +"**Create generic adapter layer (preview)** " +"([#3538](https://github.com/adap/flower/pull/3538), " +"[#3536](https://github.com/adap/flower/pull/3536), " +"[#3540](https://github.com/adap/flower/pull/3540))" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:189 msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +"A new generic gRPC adapter layer allows 3rd-party frameworks to integrate" +" with Flower in a transparent way. This makes Flower more modular and " +"allows for integration into other federated learning solutions and " +"platforms." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -msgid "Aggregate evaluation losses using the given strategy." +#: ../../source/ref-changelog.md:191 +msgid "" +"**Refactor Flower Simulation Engine** " +"([#3581](https://github.com/adap/flower/pull/3581), " +"[#3471](https://github.com/adap/flower/pull/3471), " +"[#3804](https://github.com/adap/flower/pull/3804), " +"[#3468](https://github.com/adap/flower/pull/3468), " +"[#3839](https://github.com/adap/flower/pull/3839), " +"[#3806](https://github.com/adap/flower/pull/3806), " +"[#3861](https://github.com/adap/flower/pull/3861), " +"[#3543](https://github.com/adap/flower/pull/3543), " +"[#3472](https://github.com/adap/flower/pull/3472), " +"[#3829](https://github.com/adap/flower/pull/3829), " +"[#3469](https://github.com/adap/flower/pull/3469))" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:193 msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +"The Simulation Engine was significantly refactored. This results in " +"faster and more stable simulations. It is also the foundation for " +"upcoming changes that aim to provide the next level of performance and " +"configurability in federated learning simulations." msgstr "" -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.aggregate_fit:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -msgid "Aggregate training results as in DPFedAvgFixed and update clip norms." +#: ../../source/ref-changelog.md:195 +msgid "" +"**Optimize Docker containers** " +"([#3591](https://github.com/adap/flower/pull/3591))" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:197 msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"Flower Docker containers were optimized and updated to use that latest " +"Flower framework features." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:1 of -msgid "Configure the next round of evaluation using the specified strategy." +#: ../../source/ref-changelog.md:199 +msgid "" +"**Improve logging** ([#3776](https://github.com/adap/flower/pull/3776), " +"[#3789](https://github.com/adap/flower/pull/3789))" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:201 msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"Improved logging aims to be more concise and helpful to show you the " +"details you actually care about." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:203 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"**Refactor framework internals** " +"([#3621](https://github.com/adap/flower/pull/3621), " +"[#3792](https://github.com/adap/flower/pull/3792), " +"[#3772](https://github.com/adap/flower/pull/3772), " +"[#3805](https://github.com/adap/flower/pull/3805), " +"[#3583](https://github.com/adap/flower/pull/3583), " +"[#3825](https://github.com/adap/flower/pull/3825), " +"[#3597](https://github.com/adap/flower/pull/3597), " +"[#3802](https://github.com/adap/flower/pull/3802), " +"[#3569](https://github.com/adap/flower/pull/3569))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.evaluate:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.evaluate:1 of -msgid "Evaluate model parameters using an evaluation function from the strategy." +#: ../../source/ref-changelog.md:207 +#, fuzzy +msgid "Documentation improvements" +msgstr "선택적 개선 사항" + +#: ../../source/ref-changelog.md:209 +msgid "" +"**Add 🇰🇷 Korean translations** " +"([#3680](https://github.com/adap/flower/pull/3680))" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:211 msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"**Update translations** " +"([#3586](https://github.com/adap/flower/pull/3586), " +"[#3679](https://github.com/adap/flower/pull/3679), " +"[#3570](https://github.com/adap/flower/pull/3570), " +"[#3681](https://github.com/adap/flower/pull/3681), " +"[#3617](https://github.com/adap/flower/pull/3617), " +"[#3674](https://github.com/adap/flower/pull/3674), " +"[#3671](https://github.com/adap/flower/pull/3671), " +"[#3572](https://github.com/adap/flower/pull/3572), " +"[#3631](https://github.com/adap/flower/pull/3631))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.initialize_parameters:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.initialize_parameters:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.initialize_parameters:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.initialize_parameters:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.initialize_parameters:1 of -msgid "Initialize global model parameters using given strategy." +#: ../../source/ref-changelog.md:213 +msgid "" +"**Update documentation** " +"([#3864](https://github.com/adap/flower/pull/3864), " +"[#3688](https://github.com/adap/flower/pull/3688), " +"[#3562](https://github.com/adap/flower/pull/3562), " +"[#3641](https://github.com/adap/flower/pull/3641), " +"[#3384](https://github.com/adap/flower/pull/3384), " +"[#3634](https://github.com/adap/flower/pull/3634), " +"[#3823](https://github.com/adap/flower/pull/3823), " +"[#3793](https://github.com/adap/flower/pull/3793), " +"[#3707](https://github.com/adap/flower/pull/3707))" +msgstr "" + +#: ../../source/ref-changelog.md:215 +msgid "" +"Updated documentation includes new install instructions for different " +"shells, a new Flower Code Examples documentation landing page, new `flwr`" +" CLI docs and an updated federated XGBoost code example." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:3 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:6 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:3 -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:3 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:3 -#: flwr.server.strategy.strategy.Strategy.configure_fit:3 -#: flwr.server.strategy.strategy.Strategy.evaluate:6 of -msgid "The current round of federated learning." +#: ../../source/ref-changelog.md:219 +msgid "**Deprecate** `client_fn(cid: str)`" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:7 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:10 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:7 -#: flwr.server.strategy.strategy.Strategy.configure_fit:7 -#: flwr.server.strategy.strategy.Strategy.initialize_parameters:3 of -msgid "The client manager which holds all currently connected clients." +#: ../../source/ref-changelog.md:221 +msgid "" +"`client_fn` used to have a signature `client_fn(cid: str) -> Client`. " +"This signature is now deprecated. Use the new signature " +"`client_fn(context: Context) -> Client` instead. The new argument " +"`context` allows accessing `node_id`, `node_config`, `run_config` and " +"other `Context` features. When running using the simulation engine (or " +"using `flower-supernode` with a custom `--node-config partition-id=...`)," +" `context.node_config[\"partition-id\"]` will return an `int` partition " +"ID that can be used with Flower Datasets to load a different partition of" +" the dataset on each simulated or deployed SuperNode." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:10 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:10 of +#: ../../source/ref-changelog.md:223 msgid "" -"**evaluate_configuration** -- A list of tuples. Each tuple in the list " -"identifies a `ClientProxy` and the `EvaluateIns` for this particular " -"`ClientProxy`. If a particular `ClientProxy` is not included in this " -"list, it means that this `ClientProxy` will not participate in the next " -"round of federated evaluation." +"**Deprecate passing** `Server/ServerConfig/Strategy/ClientManager` **to**" +" `ServerApp` **directly**" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:2 -msgid "DPFedAvgFixed" +#: ../../source/ref-changelog.md:225 +msgid "" +"Creating `ServerApp` using `ServerApp(config=config, strategy=strategy)` " +"is now deprecated. Instead of passing " +"`Server/ServerConfig/Strategy/ClientManager` to `ServerApp` directly, " +"pass them wrapped in a `server_fn(context: Context) -> " +"ServerAppComponents` function, like this: " +"`ServerApp(server_fn=server_fn)`. `ServerAppComponents` can hold " +"references to `Server/ServerConfig/Strategy/ClientManager`. In addition " +"to that, `server_fn` allows you to access `Context` (for example, to read" +" the `run_config`)." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 -#: flwr.server.strategy.fedavg.FedAvg:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of -msgid "Bases: :py:class:`~flwr.server.strategy.strategy.Strategy`" +#: ../../source/ref-changelog.md:229 +msgid "" +"**Remove support for `client_ids` in `start_simulation`** " +"([#3699](https://github.com/adap/flower/pull/3699))" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:231 msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +"The (rarely used) feature that allowed passing custom `client_ids` to the" +" `start_simulation` function was removed. This removal is part of a " +"bigger effort to refactor the simulation engine and unify how the Flower " +"internals work in simulation and deployment." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:233 msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +"**Remove `flower-driver-api` and `flower-fleet-api`** " +"([#3418](https://github.com/adap/flower/pull/3418))" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_fit:1 of -msgid "Aggregate training results using unweighted aggregation." +#: ../../source/ref-changelog.md:235 +msgid "" +"The two deprecated CLI commands `flower-driver-api` and `flower-fleet-" +"api` were removed in an effort to streamline the SuperLink developer " +"experience. Use `flower-superlink` instead." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:237 +msgid "v1.9.0 (2024-06-10)" +msgstr "" + +#: ../../source/ref-changelog.md:243 msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " +"Beutel`, `Daniel Nata Nugraha`, `Heng Pan`, `Javier`, `Mahdi Beitollahi`," +" `Robert Steiner`, `Taner Topal`, `Yan Gao`, `bapic`, `mohammadnaseri` " msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:247 msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"**Introduce built-in authentication (preview)** " +"([#2946](https://github.com/adap/flower/pull/2946), " +"[#3388](https://github.com/adap/flower/pull/3388), " +"[#2948](https://github.com/adap/flower/pull/2948), " +"[#2917](https://github.com/adap/flower/pull/2917), " +"[#3386](https://github.com/adap/flower/pull/3386), " +"[#3308](https://github.com/adap/flower/pull/3308), " +"[#3001](https://github.com/adap/flower/pull/3001), " +"[#3409](https://github.com/adap/flower/pull/3409), " +"[#2999](https://github.com/adap/flower/pull/2999), " +"[#2979](https://github.com/adap/flower/pull/2979), " +"[#3389](https://github.com/adap/flower/pull/3389), " +"[#3503](https://github.com/adap/flower/pull/3503), " +"[#3366](https://github.com/adap/flower/pull/3366), " +"[#3357](https://github.com/adap/flower/pull/3357))" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:1 of +#: ../../source/ref-changelog.md:249 msgid "" -"Configure the next round of training incorporating Differential Privacy " -"(DP)." +"Flower 1.9 introduces the first build-in version of client node " +"authentication. In previous releases, users often wrote glue code to " +"connect Flower to external authentication systems. With this release, the" +" SuperLink can authenticate SuperNodes using a built-in authentication " +"system. A new [how-to guide](https://flower.ai/docs/framework/how-to-" +"authenticate-supernodes.html) and a new [code " +"example](https://github.com/adap/flower/tree/main/examples/flower-" +"authentication) help you to get started." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:251 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"This is the first preview release of the Flower-native authentication " +"system. Many additional features are on the roadmap for upcoming Flower " +"releases - stay tuned." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:253 msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"**Introduce end-to-end Docker support** " +"([#3483](https://github.com/adap/flower/pull/3483), " +"[#3266](https://github.com/adap/flower/pull/3266), " +"[#3390](https://github.com/adap/flower/pull/3390), " +"[#3283](https://github.com/adap/flower/pull/3283), " +"[#3285](https://github.com/adap/flower/pull/3285), " +"[#3391](https://github.com/adap/flower/pull/3391), " +"[#3403](https://github.com/adap/flower/pull/3403), " +"[#3458](https://github.com/adap/flower/pull/3458), " +"[#3533](https://github.com/adap/flower/pull/3533), " +"[#3453](https://github.com/adap/flower/pull/3453), " +"[#3486](https://github.com/adap/flower/pull/3486), " +"[#3290](https://github.com/adap/flower/pull/3290))" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:3 of +#: ../../source/ref-changelog.md:255 msgid "" -"Configuration of the next training round includes information related to " -"DP, such as clip norm and noise stddev." +"Full Flower Next Docker support is here! With the release of Flower 1.9, " +"Flower provides stable Docker images for the Flower SuperLink, the Flower" +" SuperNode, and the Flower `ServerApp`. This set of images enables you to" +" run all Flower components in Docker. Check out the new [how-to " +"guide](https://flower.ai/docs/framework/how-to-run-flower-using-" +"docker.html) to get stated." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:13 -#: flwr.server.strategy.strategy.Strategy.configure_fit:10 of +#: ../../source/ref-changelog.md:257 msgid "" -"**fit_configuration** -- A list of tuples. Each tuple in the list " -"identifies a `ClientProxy` and the `FitIns` for this particular " -"`ClientProxy`. If a particular `ClientProxy` is not included in this " -"list, it means that this `ClientProxy` will not participate in the next " -"round of federated learning." +"**Re-architect Flower Next simulation engine** " +"([#3307](https://github.com/adap/flower/pull/3307), " +"[#3355](https://github.com/adap/flower/pull/3355), " +"[#3272](https://github.com/adap/flower/pull/3272), " +"[#3273](https://github.com/adap/flower/pull/3273), " +"[#3417](https://github.com/adap/flower/pull/3417), " +"[#3281](https://github.com/adap/flower/pull/3281), " +"[#3343](https://github.com/adap/flower/pull/3343), " +"[#3326](https://github.com/adap/flower/pull/3326))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:2 -msgid "DifferentialPrivacyClientSideAdaptiveClipping" +#: ../../source/ref-changelog.md:259 +msgid "" +"Flower Next simulations now use a new in-memory `Driver` that improves " +"the reliability of simulations, especially in notebook environments. This" +" is a significant step towards a complete overhaul of the Flower Next " +"simulation architecture." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:3 -#: of -msgid "Use `adaptiveclipping_mod` modifier at the client side." +#: ../../source/ref-changelog.md:261 +msgid "" +"**Upgrade simulation engine** " +"([#3354](https://github.com/adap/flower/pull/3354), " +"[#3378](https://github.com/adap/flower/pull/3378), " +"[#3262](https://github.com/adap/flower/pull/3262), " +"[#3435](https://github.com/adap/flower/pull/3435), " +"[#3501](https://github.com/adap/flower/pull/3501), " +"[#3482](https://github.com/adap/flower/pull/3482), " +"[#3494](https://github.com/adap/flower/pull/3494))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:5 -#: of +#: ../../source/ref-changelog.md:263 msgid "" -"In comparison to `DifferentialPrivacyServerSideAdaptiveClipping`, which " -"performs clipping on the server-side, " -"`DifferentialPrivacyClientSideAdaptiveClipping` expects clipping to " -"happen on the client-side, usually by using the built-in " -"`adaptiveclipping_mod`." +"The Flower Next simulation engine comes with improved and configurable " +"logging. The Ray-based simulation backend in Flower 1.9 was updated to " +"use Ray 2.10." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:10 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:3 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:10 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:3 -#: of -msgid "The strategy to which DP functionalities will be added by this wrapper." +#: ../../source/ref-changelog.md:265 +msgid "" +"**Introduce FedPFT baseline** " +"([#3268](https://github.com/adap/flower/pull/3268))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:12 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:5 -#: of -msgid "The noise multiplier for the Gaussian mechanism for model updates." +#: ../../source/ref-changelog.md:267 +msgid "" +"FedPFT allows you to perform one-shot Federated Learning by leveraging " +"widely available foundational models, dramatically reducing communication" +" costs while delivering high performing models. This is work led by Mahdi" +" Beitollahi from Huawei Noah's Ark Lab (Montreal, Canada). Read all the " +"details in their paper: \"Parametric Feature Transfer: One-shot Federated" +" Learning with Foundation Models\" " +"([arxiv](https://arxiv.org/abs/2402.01862))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:14 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:7 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:17 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:10 -#: of -msgid "The number of clients that are sampled on each round." +#: ../../source/ref-changelog.md:269 +msgid "" +"**Launch additional** `flwr new` **templates for Apple MLX, Hugging Face " +"Transformers, scikit-learn and TensorFlow** " +"([#3291](https://github.com/adap/flower/pull/3291), " +"[#3139](https://github.com/adap/flower/pull/3139), " +"[#3284](https://github.com/adap/flower/pull/3284), " +"[#3251](https://github.com/adap/flower/pull/3251), " +"[#3376](https://github.com/adap/flower/pull/3376), " +"[#3287](https://github.com/adap/flower/pull/3287))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:16 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:9 -#: of +#: ../../source/ref-changelog.md:271 msgid "" -"The initial value of clipping norm. Defaults to 0.1. Andrew et al. " -"recommends to set to 0.1." +"The `flwr` CLI's `flwr new` command is starting to become everone's " +"favorite way of creating new Flower projects. This release introduces " +"additional `flwr new` templates for Apple MLX, Hugging Face Transformers," +" scikit-learn and TensorFlow. In addition to that, existing templates " +"also received updates." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:19 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:12 -#: of -msgid "The desired quantile of updates which should be clipped. Defaults to 0.5." +#: ../../source/ref-changelog.md:273 +msgid "" +"**Refine** `RecordSet` **API** " +"([#3209](https://github.com/adap/flower/pull/3209), " +"[#3331](https://github.com/adap/flower/pull/3331), " +"[#3334](https://github.com/adap/flower/pull/3334), " +"[#3335](https://github.com/adap/flower/pull/3335), " +"[#3375](https://github.com/adap/flower/pull/3375), " +"[#3368](https://github.com/adap/flower/pull/3368))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:21 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:14 -#: of +#: ../../source/ref-changelog.md:275 msgid "" -"The learning rate for the clipping norm adaptation. Defaults to 0.2. " -"Andrew et al. recommends to set to 0.2." +"`RecordSet` is part of the Flower Next low-level API preview release. In " +"Flower 1.9, `RecordSet` received a number of usability improvements that " +"make it easier to build `RecordSet`-based `ServerApp`s and `ClientApp`s." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:24 -#: of +#: ../../source/ref-changelog.md:277 msgid "" -"The stddev of the noise added to the count of updates currently below the" -" estimate. Andrew et al. recommends to set to `expected_num_records/20`" +"**Beautify logging** ([#3379](https://github.com/adap/flower/pull/3379), " +"[#3430](https://github.com/adap/flower/pull/3430), " +"[#3461](https://github.com/adap/flower/pull/3461), " +"[#3360](https://github.com/adap/flower/pull/3360), " +"[#3433](https://github.com/adap/flower/pull/3433))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:30 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:23 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:22 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:15 -#: of -msgid "Create a strategy:" +#: ../../source/ref-changelog.md:279 +msgid "" +"Logs received a substantial update. Not only are logs now much nicer to " +"look at, but they are also more configurable." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:34 -#: of +#: ../../source/ref-changelog.md:281 msgid "" -"Wrap the strategy with the " -"`DifferentialPrivacyClientSideAdaptiveClipping` wrapper:" +"**Improve reliability** " +"([#3564](https://github.com/adap/flower/pull/3564), " +"[#3561](https://github.com/adap/flower/pull/3561), " +"[#3566](https://github.com/adap/flower/pull/3566), " +"[#3462](https://github.com/adap/flower/pull/3462), " +"[#3225](https://github.com/adap/flower/pull/3225), " +"[#3514](https://github.com/adap/flower/pull/3514), " +"[#3535](https://github.com/adap/flower/pull/3535), " +"[#3372](https://github.com/adap/flower/pull/3372))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:40 -#: of -msgid "On the client, add the `adaptiveclipping_mod` to the client-side mods:" +#: ../../source/ref-changelog.md:283 +msgid "" +"Flower 1.9 includes reliability improvements across many parts of the " +"system. One example is a much improved SuperNode shutdown procedure." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:285 msgid "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +"**Update Swift and C++ SDKs** " +"([#3321](https://github.com/adap/flower/pull/3321), " +"[#2763](https://github.com/adap/flower/pull/2763))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:287 msgid "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +"In the C++ SDK, communication-related code is now separate from main " +"client logic. A new abstract class `Communicator` has been introduced " +"alongside a gRPC implementation of it." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_fit:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_fit:1 -#: of -msgid "Aggregate training results and update clip norms." +#: ../../source/ref-changelog.md:289 +msgid "" +"**Improve testing, tooling and CI/CD infrastructure** " +"([#3294](https://github.com/adap/flower/pull/3294), " +"[#3282](https://github.com/adap/flower/pull/3282), " +"[#3311](https://github.com/adap/flower/pull/3311), " +"[#2878](https://github.com/adap/flower/pull/2878), " +"[#3333](https://github.com/adap/flower/pull/3333), " +"[#3255](https://github.com/adap/flower/pull/3255), " +"[#3349](https://github.com/adap/flower/pull/3349), " +"[#3400](https://github.com/adap/flower/pull/3400), " +"[#3401](https://github.com/adap/flower/pull/3401), " +"[#3399](https://github.com/adap/flower/pull/3399), " +"[#3346](https://github.com/adap/flower/pull/3346), " +"[#3398](https://github.com/adap/flower/pull/3398), " +"[#3397](https://github.com/adap/flower/pull/3397), " +"[#3347](https://github.com/adap/flower/pull/3347), " +"[#3502](https://github.com/adap/flower/pull/3502), " +"[#3387](https://github.com/adap/flower/pull/3387), " +"[#3542](https://github.com/adap/flower/pull/3542), " +"[#3396](https://github.com/adap/flower/pull/3396), " +"[#3496](https://github.com/adap/flower/pull/3496), " +"[#3465](https://github.com/adap/flower/pull/3465), " +"[#3473](https://github.com/adap/flower/pull/3473), " +"[#3484](https://github.com/adap/flower/pull/3484), " +"[#3521](https://github.com/adap/flower/pull/3521), " +"[#3363](https://github.com/adap/flower/pull/3363), " +"[#3497](https://github.com/adap/flower/pull/3497), " +"[#3464](https://github.com/adap/flower/pull/3464), " +"[#3495](https://github.com/adap/flower/pull/3495), " +"[#3478](https://github.com/adap/flower/pull/3478), " +"[#3271](https://github.com/adap/flower/pull/3271))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:291 msgid "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +"As always, the Flower tooling, testing, and CI/CD infrastructure has " +"received many updates." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:293 msgid "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +"**Improve documentation** " +"([#3530](https://github.com/adap/flower/pull/3530), " +"[#3539](https://github.com/adap/flower/pull/3539), " +"[#3425](https://github.com/adap/flower/pull/3425), " +"[#3520](https://github.com/adap/flower/pull/3520), " +"[#3286](https://github.com/adap/flower/pull/3286), " +"[#3516](https://github.com/adap/flower/pull/3516), " +"[#3523](https://github.com/adap/flower/pull/3523), " +"[#3545](https://github.com/adap/flower/pull/3545), " +"[#3498](https://github.com/adap/flower/pull/3498), " +"[#3439](https://github.com/adap/flower/pull/3439), " +"[#3440](https://github.com/adap/flower/pull/3440), " +"[#3382](https://github.com/adap/flower/pull/3382), " +"[#3559](https://github.com/adap/flower/pull/3559), " +"[#3432](https://github.com/adap/flower/pull/3432), " +"[#3278](https://github.com/adap/flower/pull/3278), " +"[#3371](https://github.com/adap/flower/pull/3371), " +"[#3519](https://github.com/adap/flower/pull/3519), " +"[#3267](https://github.com/adap/flower/pull/3267), " +"[#3204](https://github.com/adap/flower/pull/3204), " +"[#3274](https://github.com/adap/flower/pull/3274))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:295 msgid "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" +"As always, the Flower documentation has received many updates. Notable " +"new pages include:" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:297 msgid "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" +"[How-to upgrate to Flower Next (Flower Next migration " +"guide)](https://flower.ai/docs/framework/how-to-upgrade-to-flower-" +"next.html)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:2 -msgid "DifferentialPrivacyClientSideFixedClipping" +#: ../../source/ref-changelog.md:299 +msgid "" +"[How-to run Flower using Docker](https://flower.ai/docs/framework/how-to-" +"run-flower-using-docker.html)" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:3 -#: of -msgid "Use `fixedclipping_mod` modifier at the client side." +#: ../../source/ref-changelog.md:301 +msgid "" +"[Flower Mods reference](https://flower.ai/docs/framework/ref-" +"api/flwr.client.mod.html#module-flwr.client.mod)" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:5 -#: of +#: ../../source/ref-changelog.md:303 msgid "" -"In comparison to `DifferentialPrivacyServerSideFixedClipping`, which " -"performs clipping on the server-side, " -"`DifferentialPrivacyClientSideFixedClipping` expects clipping to happen " -"on the client-side, usually by using the built-in `fixedclipping_mod`." +"**General updates to Flower Examples** " +"([#3205](https://github.com/adap/flower/pull/3205), " +"[#3226](https://github.com/adap/flower/pull/3226), " +"[#3211](https://github.com/adap/flower/pull/3211), " +"[#3252](https://github.com/adap/flower/pull/3252), " +"[#3427](https://github.com/adap/flower/pull/3427), " +"[#3410](https://github.com/adap/flower/pull/3410), " +"[#3426](https://github.com/adap/flower/pull/3426), " +"[#3228](https://github.com/adap/flower/pull/3228), " +"[#3342](https://github.com/adap/flower/pull/3342), " +"[#3200](https://github.com/adap/flower/pull/3200), " +"[#3202](https://github.com/adap/flower/pull/3202), " +"[#3394](https://github.com/adap/flower/pull/3394), " +"[#3488](https://github.com/adap/flower/pull/3488), " +"[#3329](https://github.com/adap/flower/pull/3329), " +"[#3526](https://github.com/adap/flower/pull/3526), " +"[#3392](https://github.com/adap/flower/pull/3392), " +"[#3474](https://github.com/adap/flower/pull/3474), " +"[#3269](https://github.com/adap/flower/pull/3269))" +msgstr "" + +#: ../../source/ref-changelog.md:305 +msgid "As always, Flower code examples have received many updates." +msgstr "" + +#: ../../source/ref-changelog.md:307 +msgid "" +"**General improvements** " +"([#3532](https://github.com/adap/flower/pull/3532), " +"[#3318](https://github.com/adap/flower/pull/3318), " +"[#3565](https://github.com/adap/flower/pull/3565), " +"[#3296](https://github.com/adap/flower/pull/3296), " +"[#3305](https://github.com/adap/flower/pull/3305), " +"[#3246](https://github.com/adap/flower/pull/3246), " +"[#3224](https://github.com/adap/flower/pull/3224), " +"[#3475](https://github.com/adap/flower/pull/3475), " +"[#3297](https://github.com/adap/flower/pull/3297), " +"[#3317](https://github.com/adap/flower/pull/3317), " +"[#3429](https://github.com/adap/flower/pull/3429), " +"[#3196](https://github.com/adap/flower/pull/3196), " +"[#3534](https://github.com/adap/flower/pull/3534), " +"[#3240](https://github.com/adap/flower/pull/3240), " +"[#3365](https://github.com/adap/flower/pull/3365), " +"[#3407](https://github.com/adap/flower/pull/3407), " +"[#3563](https://github.com/adap/flower/pull/3563), " +"[#3344](https://github.com/adap/flower/pull/3344), " +"[#3330](https://github.com/adap/flower/pull/3330), " +"[#3436](https://github.com/adap/flower/pull/3436), " +"[#3300](https://github.com/adap/flower/pull/3300), " +"[#3327](https://github.com/adap/flower/pull/3327), " +"[#3254](https://github.com/adap/flower/pull/3254), " +"[#3253](https://github.com/adap/flower/pull/3253), " +"[#3419](https://github.com/adap/flower/pull/3419), " +"[#3289](https://github.com/adap/flower/pull/3289), " +"[#3208](https://github.com/adap/flower/pull/3208), " +"[#3245](https://github.com/adap/flower/pull/3245), " +"[#3319](https://github.com/adap/flower/pull/3319), " +"[#3203](https://github.com/adap/flower/pull/3203), " +"[#3423](https://github.com/adap/flower/pull/3423), " +"[#3352](https://github.com/adap/flower/pull/3352), " +"[#3292](https://github.com/adap/flower/pull/3292), " +"[#3261](https://github.com/adap/flower/pull/3261))" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:12 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:5 -#: of +#: ../../source/ref-changelog.md:311 +msgid "**Deprecate Python 3.8 support**" +msgstr "" + +#: ../../source/ref-changelog.md:313 msgid "" -"The noise multiplier for the Gaussian mechanism for model updates. A " -"value of 1.0 or higher is recommended for strong privacy." +"Python 3.8 will stop receiving security fixes in [October " +"2024](https://devguide.python.org/versions/). Support for Python 3.8 is " +"now deprecated and will be removed in an upcoming release." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:15 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:8 -#: of -msgid "The value of the clipping norm." +#: ../../source/ref-changelog.md:315 +msgid "" +"**Deprecate (experimental)** `flower-driver-api` **and** `flower-fleet-" +"api` ([#3416](https://github.com/adap/flower/pull/3416), " +"[#3420](https://github.com/adap/flower/pull/3420))" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:26 -#: of +#: ../../source/ref-changelog.md:317 msgid "" -"Wrap the strategy with the `DifferentialPrivacyClientSideFixedClipping` " -"wrapper:" +"Flower 1.9 deprecates the two (experimental) commands `flower-driver-api`" +" and `flower-fleet-api`. Both commands will be removed in an upcoming " +"release. Use `flower-superlink` instead." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:32 -#: of -msgid "On the client, add the `fixedclipping_mod` to the client-side mods:" +#: ../../source/ref-changelog.md:319 +msgid "" +"**Deprecate** `--server` **in favor of** `--superlink` " +"([#3518](https://github.com/adap/flower/pull/3518))" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:321 msgid "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +"The commands `flower-server-app` and `flower-client-app` should use " +"`--superlink` instead of the now deprecated `--server`. Support for " +"`--server` will be removed in a future release." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:325 msgid "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +"**Replace** `flower-superlink` **CLI option** `--certificates` **with** " +"`--ssl-ca-certfile` **,** `--ssl-certfile` **and** `--ssl-keyfile` " +"([#3512](https://github.com/adap/flower/pull/3512), " +"[#3408](https://github.com/adap/flower/pull/3408))" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_fit:1 -#: of -msgid "Add noise to the aggregated parameters." +#: ../../source/ref-changelog.md:327 +msgid "" +"SSL-related `flower-superlink` CLI arguments were restructured in an " +"incompatible way. Instead of passing a single `--certificates` flag with " +"three values, you now need to pass three flags (`--ssl-ca-certfile`, " +"`--ssl-certfile` and `--ssl-keyfile`) with one value each. Check out the " +"[SSL connections](https://flower.ai/docs/framework/how-to-enable-ssl-" +"connections.html) documentation page for details." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:329 msgid "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +"**Remove SuperLink** `--vce` **option** " +"([#3513](https://github.com/adap/flower/pull/3513))" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:331 msgid "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +"Instead of separately starting a SuperLink and a `ServerApp` for " +"simulation, simulations must now be started using the single `flower-" +"simulation` command." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:333 msgid "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" +"**Merge** `--grpc-rere` **and** `--rest` **SuperLink options** " +"([#3527](https://github.com/adap/flower/pull/3527))" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:335 msgid "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" +"To simplify the usage of `flower-superlink`, previously separate sets of " +"CLI options for gRPC and REST were merged into one unified set of " +"options. Consult the [Flower CLI reference " +"documentation](https://flower.ai/docs/framework/ref-api-cli.html) for " +"details." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:2 -msgid "DifferentialPrivacyServerSideAdaptiveClipping" +#: ../../source/ref-changelog.md:337 +msgid "v1.8.0 (2024-04-03)" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:17 -#: of +#: ../../source/ref-changelog.md:343 msgid "" -"The standard deviation of the noise added to the count of updates below " -"the estimate. Andrew et al. recommends to set to " -"`expected_num_records/20`" +"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata " +"Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, `Ikko Eltociear " +"Ashimine`, `Jack Cook`, `Javier`, `Raj Parekh`, `Robert Steiner`, " +"`Sebastian van der Voort`, `Taner Topal`, `Yan Gao`, `mohammadnaseri`, " +"`tabdar-khan` " msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:27 -#: of +#: ../../source/ref-changelog.md:347 msgid "" -"Wrap the strategy with the DifferentialPrivacyServerSideAdaptiveClipping " -"wrapper" +"**Introduce Flower Next high-level API (stable)** " +"([#3002](https://github.com/adap/flower/pull/3002), " +"[#2934](https://github.com/adap/flower/pull/2934), " +"[#2958](https://github.com/adap/flower/pull/2958), " +"[#3173](https://github.com/adap/flower/pull/3173), " +"[#3174](https://github.com/adap/flower/pull/3174), " +"[#2923](https://github.com/adap/flower/pull/2923), " +"[#2691](https://github.com/adap/flower/pull/2691), " +"[#3079](https://github.com/adap/flower/pull/3079), " +"[#2961](https://github.com/adap/flower/pull/2961), " +"[#2924](https://github.com/adap/flower/pull/2924), " +"[#3166](https://github.com/adap/flower/pull/3166), " +"[#3031](https://github.com/adap/flower/pull/3031), " +"[#3057](https://github.com/adap/flower/pull/3057), " +"[#3000](https://github.com/adap/flower/pull/3000), " +"[#3113](https://github.com/adap/flower/pull/3113), " +"[#2957](https://github.com/adap/flower/pull/2957), " +"[#3183](https://github.com/adap/flower/pull/3183), " +"[#3180](https://github.com/adap/flower/pull/3180), " +"[#3035](https://github.com/adap/flower/pull/3035), " +"[#3189](https://github.com/adap/flower/pull/3189), " +"[#3185](https://github.com/adap/flower/pull/3185), " +"[#3190](https://github.com/adap/flower/pull/3190), " +"[#3191](https://github.com/adap/flower/pull/3191), " +"[#3195](https://github.com/adap/flower/pull/3195), " +"[#3197](https://github.com/adap/flower/pull/3197))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:349 msgid "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +"The Flower Next high-level API is stable! Flower Next is the future of " +"Flower - all new features (like Flower Mods) will be built on top of it. " +"You can start to migrate your existing projects to Flower Next by using " +"`ServerApp` and `ClientApp` (check out `quickstart-pytorch` or " +"`quickstart-tensorflow`, a detailed migration guide will follow shortly)." +" Flower Next allows you to run multiple projects concurrently (we call " +"this multi-run) and execute the same project in either simulation " +"environments or deployment environments without having to change a single" +" line of code. The best part? It's fully compatible with existing Flower " +"projects that use `Strategy`, `NumPyClient` & co." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:351 msgid "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +"**Introduce Flower Next low-level API (preview)** " +"([#3062](https://github.com/adap/flower/pull/3062), " +"[#3034](https://github.com/adap/flower/pull/3034), " +"[#3069](https://github.com/adap/flower/pull/3069))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:353 msgid "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +"In addition to the Flower Next *high-level* API that uses `Strategy`, " +"`NumPyClient` & co, Flower 1.8 also comes with a preview version of the " +"new Flower Next *low-level* API. The low-level API allows for granular " +"control of every aspect of the learning process by sending/receiving " +"individual messages to/from client nodes. The new `ServerApp` supports " +"registering a custom `main` function that allows writing custom training " +"loops for methods like async FL, cyclic training, or federated analytics." +" The new `ClientApp` supports registering `train`, `evaluate` and `query`" +" functions that can access the raw message received from the `ServerApp`." +" New abstractions like `RecordSet`, `Message` and `Context` further " +"enable sending multiple models, multiple sets of config values and " +"metrics, stateful computations on the client node and implementations of " +"custom SMPC protocols, to name just a few." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:355 msgid "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +"**Introduce Flower Mods (preview)** " +"([#3054](https://github.com/adap/flower/pull/3054), " +"[#2911](https://github.com/adap/flower/pull/2911), " +"[#3083](https://github.com/adap/flower/pull/3083))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:357 msgid "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" +"Flower Modifiers (we call them Mods) can intercept messages and analyze, " +"edit or handle them directly. Mods can be used to develop pluggable " +"modules that work across different projects. Flower 1.8 already includes " +"mods to log the size of a message, the number of parameters sent over the" +" network, differential privacy with fixed clipping and adaptive clipping," +" local differential privacy and secure aggregation protocols SecAgg and " +"SecAgg+. The Flower Mods API is released as a preview, but researchers " +"can already use it to experiment with arbirtrary SMPC protocols." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:359 msgid "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" +"**Fine-tune LLMs with LLM FlowerTune** " +"([#3029](https://github.com/adap/flower/pull/3029), " +"[#3089](https://github.com/adap/flower/pull/3089), " +"[#3092](https://github.com/adap/flower/pull/3092), " +"[#3100](https://github.com/adap/flower/pull/3100), " +"[#3114](https://github.com/adap/flower/pull/3114), " +"[#3162](https://github.com/adap/flower/pull/3162), " +"[#3172](https://github.com/adap/flower/pull/3172))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:2 -msgid "DifferentialPrivacyServerSideFixedClipping" +#: ../../source/ref-changelog.md:361 +msgid "" +"We are introducing LLM FlowerTune, an introductory example that " +"demonstrates federated LLM fine-tuning of pre-trained Llama2 models on " +"the Alpaca-GPT4 dataset. The example is built to be easily adapted to use" +" different models and/or datasets. Read our blog post [LLM FlowerTune: " +"Federated LLM Fine-tuning with Flower](https://flower.ai/blog/2024-03-14" +"-llm-flowertune-federated-llm-finetuning-with-flower/) for more details." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:19 -#: of +#: ../../source/ref-changelog.md:363 msgid "" -"Wrap the strategy with the DifferentialPrivacyServerSideFixedClipping " -"wrapper" +"**Introduce built-in Differential Privacy (preview)** " +"([#2798](https://github.com/adap/flower/pull/2798), " +"[#2959](https://github.com/adap/flower/pull/2959), " +"[#3038](https://github.com/adap/flower/pull/3038), " +"[#3147](https://github.com/adap/flower/pull/3147), " +"[#2909](https://github.com/adap/flower/pull/2909), " +"[#2893](https://github.com/adap/flower/pull/2893), " +"[#2892](https://github.com/adap/flower/pull/2892), " +"[#3039](https://github.com/adap/flower/pull/3039), " +"[#3074](https://github.com/adap/flower/pull/3074))" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:365 msgid "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +"Built-in Differential Privacy is here! Flower supports both central and " +"local differential privacy (DP). Central DP can be configured with either" +" fixed or adaptive clipping. The clipping can happen either on the " +"server-side or the client-side. Local DP does both clipping and noising " +"on the client-side. A new documentation page [explains Differential " +"Privacy approaches](https://flower.ai/docs/framework/explanation-" +"differential-privacy.html) and a new how-to guide describes [how to use " +"the new Differential Privacy components](https://flower.ai/docs/framework" +"/how-to-use-differential-privacy.html) in Flower." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:367 msgid "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +"**Introduce built-in Secure Aggregation (preview)** " +"([#3120](https://github.com/adap/flower/pull/3120), " +"[#3110](https://github.com/adap/flower/pull/3110), " +"[#3108](https://github.com/adap/flower/pull/3108))" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:1 -#: of -msgid "Compute the updates, clip, and pass them for aggregation." +#: ../../source/ref-changelog.md:369 +msgid "" +"Built-in Secure Aggregation is here! Flower now supports different secure" +" aggregation protocols out-of-the-box. The best part? You can add secure " +"aggregation to your Flower projects with only a few lines of code. In " +"this initial release, we inlcude support for SecAgg and SecAgg+, but more" +" protocols will be implemented shortly. We'll also add detailed docs that" +" explain secure aggregation and how to use it in Flower. You can already " +"check out the new code example that shows how to use Flower to easily " +"combine Federated Learning, Differential Privacy and Secure Aggregation " +"in the same project." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:371 msgid "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +"**Introduce** `flwr` **CLI (preview)** " +"([#2942](https://github.com/adap/flower/pull/2942), " +"[#3055](https://github.com/adap/flower/pull/3055), " +"[#3111](https://github.com/adap/flower/pull/3111), " +"[#3130](https://github.com/adap/flower/pull/3130), " +"[#3136](https://github.com/adap/flower/pull/3136), " +"[#3094](https://github.com/adap/flower/pull/3094), " +"[#3059](https://github.com/adap/flower/pull/3059), " +"[#3049](https://github.com/adap/flower/pull/3049), " +"[#3142](https://github.com/adap/flower/pull/3142))" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:373 msgid "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +"A new `flwr` CLI command allows creating new Flower projects (`flwr new`)" +" and then running them using the Simulation Engine (`flwr run`)." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:375 msgid "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" +"**Introduce Flower Next Simulation Engine** " +"([#3024](https://github.com/adap/flower/pull/3024), " +"[#3061](https://github.com/adap/flower/pull/3061), " +"[#2997](https://github.com/adap/flower/pull/2997), " +"[#2783](https://github.com/adap/flower/pull/2783), " +"[#3184](https://github.com/adap/flower/pull/3184), " +"[#3075](https://github.com/adap/flower/pull/3075), " +"[#3047](https://github.com/adap/flower/pull/3047), " +"[#2998](https://github.com/adap/flower/pull/2998), " +"[#3009](https://github.com/adap/flower/pull/3009), " +"[#3008](https://github.com/adap/flower/pull/3008))" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:377 msgid "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" +"The Flower Simulation Engine can now run Flower Next projects. For " +"notebook environments, there's also a new `run_simulation` function that " +"can run `ServerApp` and `ClientApp`." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:3 -#: of -msgid "Afterward, add noise to the aggregated parameters." +#: ../../source/ref-changelog.md:379 +msgid "" +"**Handle SuperNode connection errors** " +"([#2969](https://github.com/adap/flower/pull/2969))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:2 -msgid "FaultTolerantFedAvg" +#: ../../source/ref-changelog.md:381 +msgid "" +"A SuperNode will now try to reconnect indefinitely to the SuperLink in " +"case of connection errors. The arguments `--max-retries` and `--max-wait-" +"time` can now be passed to the `flower-client-app` command. `--max-" +"retries` will define the number of tentatives the client should make " +"before it gives up trying to reconnect to the SuperLink, and, `--max-" +"wait-time` defines the time before the SuperNode gives up trying to " +"reconnect to the SuperLink." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:383 msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +"**General updates to Flower Baselines** " +"([#2904](https://github.com/adap/flower/pull/2904), " +"[#2482](https://github.com/adap/flower/pull/2482), " +"[#2985](https://github.com/adap/flower/pull/2985), " +"[#2968](https://github.com/adap/flower/pull/2968))" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:385 msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +"There's a new [FedStar](https://flower.ai/docs/baselines/fedstar.html) " +"baseline. Several other baselined have been updated as well." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_fit:1 -#: flwr.server.strategy.fedadagrad.FedAdagrad.aggregate_fit:1 -#: flwr.server.strategy.fedadam.FedAdam.aggregate_fit:1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_fit:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_fit:1 -#: flwr.server.strategy.fedavgm.FedAvgM.aggregate_fit:1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.aggregate_fit:1 -#: flwr.server.strategy.fedyogi.FedYogi.aggregate_fit:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_fit:1 of -msgid "Aggregate fit results using weighted average." +#: ../../source/ref-changelog.md:387 +msgid "" +"**Improve documentation and translations** " +"([#3050](https://github.com/adap/flower/pull/3050), " +"[#3044](https://github.com/adap/flower/pull/3044), " +"[#3043](https://github.com/adap/flower/pull/3043), " +"[#2986](https://github.com/adap/flower/pull/2986), " +"[#3041](https://github.com/adap/flower/pull/3041), " +"[#3046](https://github.com/adap/flower/pull/3046), " +"[#3042](https://github.com/adap/flower/pull/3042), " +"[#2978](https://github.com/adap/flower/pull/2978), " +"[#2952](https://github.com/adap/flower/pull/2952), " +"[#3167](https://github.com/adap/flower/pull/3167), " +"[#2953](https://github.com/adap/flower/pull/2953), " +"[#3045](https://github.com/adap/flower/pull/3045), " +"[#2654](https://github.com/adap/flower/pull/2654), " +"[#3082](https://github.com/adap/flower/pull/3082), " +"[#2990](https://github.com/adap/flower/pull/2990), " +"[#2989](https://github.com/adap/flower/pull/2989))" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:389 msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"As usual, we merged many smaller and larger improvements to the " +"documentation. A special thank you goes to [Sebastian van der " +"Voort](https://github.com/svdvoort) for landing a big documentation PR!" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:391 msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"**General updates to Flower Examples** " +"([3134](https://github.com/adap/flower/pull/3134), " +"[2996](https://github.com/adap/flower/pull/2996), " +"[2930](https://github.com/adap/flower/pull/2930), " +"[2967](https://github.com/adap/flower/pull/2967), " +"[2467](https://github.com/adap/flower/pull/2467), " +"[2910](https://github.com/adap/flower/pull/2910), " +"[#2918](https://github.com/adap/flower/pull/2918), " +"[#2773](https://github.com/adap/flower/pull/2773), " +"[#3063](https://github.com/adap/flower/pull/3063), " +"[#3116](https://github.com/adap/flower/pull/3116), " +"[#3117](https://github.com/adap/flower/pull/3117))" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:393 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"Two new examples show federated training of a Vision Transformer (ViT) " +"and federated learning in a medical context using the popular MONAI " +"library. `quickstart-pytorch` and `quickstart-tensorflow` demonstrate the" +" new Flower Next `ServerApp` and `ClientApp`. Many other examples " +"received considerable updates as well." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:395 msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"**General improvements** " +"([#3171](https://github.com/adap/flower/pull/3171), " +"[3099](https://github.com/adap/flower/pull/3099), " +"[3003](https://github.com/adap/flower/pull/3003), " +"[3145](https://github.com/adap/flower/pull/3145), " +"[3017](https://github.com/adap/flower/pull/3017), " +"[3085](https://github.com/adap/flower/pull/3085), " +"[3012](https://github.com/adap/flower/pull/3012), " +"[3119](https://github.com/adap/flower/pull/3119), " +"[2991](https://github.com/adap/flower/pull/2991), " +"[2970](https://github.com/adap/flower/pull/2970), " +"[2980](https://github.com/adap/flower/pull/2980), " +"[3086](https://github.com/adap/flower/pull/3086), " +"[2932](https://github.com/adap/flower/pull/2932), " +"[2928](https://github.com/adap/flower/pull/2928), " +"[2941](https://github.com/adap/flower/pull/2941), " +"[2933](https://github.com/adap/flower/pull/2933), " +"[3181](https://github.com/adap/flower/pull/3181), " +"[2973](https://github.com/adap/flower/pull/2973), " +"[2992](https://github.com/adap/flower/pull/2992), " +"[2915](https://github.com/adap/flower/pull/2915), " +"[3040](https://github.com/adap/flower/pull/3040), " +"[3022](https://github.com/adap/flower/pull/3022), " +"[3032](https://github.com/adap/flower/pull/3032), " +"[2902](https://github.com/adap/flower/pull/2902), " +"[2931](https://github.com/adap/flower/pull/2931), " +"[3005](https://github.com/adap/flower/pull/3005), " +"[3132](https://github.com/adap/flower/pull/3132), " +"[3115](https://github.com/adap/flower/pull/3115), " +"[2944](https://github.com/adap/flower/pull/2944), " +"[3064](https://github.com/adap/flower/pull/3064), " +"[3106](https://github.com/adap/flower/pull/3106), " +"[2974](https://github.com/adap/flower/pull/2974), " +"[3178](https://github.com/adap/flower/pull/3178), " +"[2993](https://github.com/adap/flower/pull/2993), " +"[3186](https://github.com/adap/flower/pull/3186), " +"[3091](https://github.com/adap/flower/pull/3091), " +"[3125](https://github.com/adap/flower/pull/3125), " +"[3093](https://github.com/adap/flower/pull/3093), " +"[3013](https://github.com/adap/flower/pull/3013), " +"[3033](https://github.com/adap/flower/pull/3033), " +"[3133](https://github.com/adap/flower/pull/3133), " +"[3068](https://github.com/adap/flower/pull/3068), " +"[2916](https://github.com/adap/flower/pull/2916), " +"[2975](https://github.com/adap/flower/pull/2975), " +"[2984](https://github.com/adap/flower/pull/2984), " +"[2846](https://github.com/adap/flower/pull/2846), " +"[3077](https://github.com/adap/flower/pull/3077), " +"[3143](https://github.com/adap/flower/pull/3143), " +"[2921](https://github.com/adap/flower/pull/2921), " +"[3101](https://github.com/adap/flower/pull/3101), " +"[2927](https://github.com/adap/flower/pull/2927), " +"[2995](https://github.com/adap/flower/pull/2995), " +"[2972](https://github.com/adap/flower/pull/2972), " +"[2912](https://github.com/adap/flower/pull/2912), " +"[3065](https://github.com/adap/flower/pull/3065), " +"[3028](https://github.com/adap/flower/pull/3028), " +"[2922](https://github.com/adap/flower/pull/2922), " +"[2982](https://github.com/adap/flower/pull/2982), " +"[2914](https://github.com/adap/flower/pull/2914), " +"[3179](https://github.com/adap/flower/pull/3179), " +"[3080](https://github.com/adap/flower/pull/3080), " +"[2994](https://github.com/adap/flower/pull/2994), " +"[3187](https://github.com/adap/flower/pull/3187), " +"[2926](https://github.com/adap/flower/pull/2926), " +"[3018](https://github.com/adap/flower/pull/3018), " +"[3144](https://github.com/adap/flower/pull/3144), " +"[3011](https://github.com/adap/flower/pull/3011), " +"[#3152](https://github.com/adap/flower/pull/3152), " +"[#2836](https://github.com/adap/flower/pull/2836), " +"[#2929](https://github.com/adap/flower/pull/2929), " +"[#2943](https://github.com/adap/flower/pull/2943), " +"[#2955](https://github.com/adap/flower/pull/2955), " +"[#2954](https://github.com/adap/flower/pull/2954))" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-changelog.md:401 +msgid "v1.7.0 (2024-02-05)" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:407 msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"`Aasheesh Singh`, `Adam Narozniak`, `Aml Hassan Esmil`, `Charles " +"Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo " +"Gabrielli`, `Gustavo Bertoli`, `HelinLin`, `Heng Pan`, `Javier`, `M S " +"Chaitanya Kumar`, `Mohammad Naseri`, `Nikos Vlachakis`, `Pritam Neog`, " +"`Robert Kuska`, `Robert Steiner`, `Taner Topal`, `Yahia Salaheldin " +"Shaaban`, `Yan Gao`, `Yasar Abbas` " msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:2 -#: ../../source/ref-changelog.md:905 -msgid "FedAdagrad" +#: ../../source/ref-changelog.md:411 +msgid "" +"**Introduce stateful clients (experimental)** " +"([#2770](https://github.com/adap/flower/pull/2770), " +"[#2686](https://github.com/adap/flower/pull/2686), " +"[#2696](https://github.com/adap/flower/pull/2696), " +"[#2643](https://github.com/adap/flower/pull/2643), " +"[#2769](https://github.com/adap/flower/pull/2769))" msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:1 -#: flwr.server.strategy.fedadam.FedAdam:1 -#: flwr.server.strategy.fedyogi.FedYogi:1 of -msgid "Bases: :py:class:`~flwr.server.strategy.fedopt.FedOpt`" +#: ../../source/ref-changelog.md:413 +msgid "" +"Subclasses of `Client` and `NumPyClient` can now store local state that " +"remains on the client. Let's start with the highlight first: this new " +"feature is compatible with both simulated clients (via " +"`start_simulation`) and networked clients (via `start_client`). It's also" +" the first preview of new abstractions like `Context` and `RecordSet`. " +"Clients can access state of type `RecordSet` via `state: RecordSet = " +"self.context.state`. Changes to this `RecordSet` are preserved across " +"different rounds of execution to enable stateful computations in a " +"unified way across simulation and deployment." msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:3 -#: flwr.server.strategy.fedadam.FedAdam:3 flwr.server.strategy.fedopt.FedOpt:3 -#: flwr.server.strategy.fedyogi.FedYogi:3 of -msgid "Implementation based on https://arxiv.org/abs/2003.00295v5" +#: ../../source/ref-changelog.md:415 +msgid "" +"**Improve performance** " +"([#2293](https://github.com/adap/flower/pull/2293))" msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:21 -#: flwr.server.strategy.fedadagrad.FedAdagrad:23 -#: flwr.server.strategy.fedadam.FedAdam:25 -#: flwr.server.strategy.fedadam.FedAdam:27 -#: flwr.server.strategy.fedavg.FedAvg:29 flwr.server.strategy.fedavg.FedAvg:31 -#: flwr.server.strategy.fedopt.FedOpt:25 flwr.server.strategy.fedopt.FedOpt:27 -#: flwr.server.strategy.fedprox.FedProx:61 -#: flwr.server.strategy.fedprox.FedProx:63 -#: flwr.server.strategy.fedyogi.FedYogi:28 -#: flwr.server.strategy.fedyogi.FedYogi:30 of -msgid "Metrics aggregation function, optional." +#: ../../source/ref-changelog.md:417 +msgid "" +"Flower is faster than ever. All `FedAvg`-derived strategies now use in-" +"place aggregation to reduce memory consumption. The Flower client " +"serialization/deserialization has been rewritten from the ground up, " +"which results in significant speedups, especially when the client-side " +"training time is short." msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:29 -#: flwr.server.strategy.fedadam.FedAdam:29 -#: flwr.server.strategy.fedopt.FedOpt:29 of -msgid "Server-side learning rate. Defaults to 1e-1." +#: ../../source/ref-changelog.md:419 +msgid "" +"**Support Federated Learning with Apple MLX and Flower** " +"([#2693](https://github.com/adap/flower/pull/2693))" msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:31 -#: flwr.server.strategy.fedadam.FedAdam:31 -#: flwr.server.strategy.fedopt.FedOpt:31 of -msgid "Client-side learning rate. Defaults to 1e-1." +#: ../../source/ref-changelog.md:421 +msgid "" +"Flower has official support for federated learning using [Apple " +"MLX](https://ml-explore.github.io/mlx) via the new `quickstart-mlx` code " +"example." msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:33 -#: flwr.server.strategy.fedadam.FedAdam:37 -#: flwr.server.strategy.fedopt.FedOpt:37 of -msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-9." +#: ../../source/ref-changelog.md:423 +msgid "" +"**Introduce new XGBoost cyclic strategy** " +"([#2666](https://github.com/adap/flower/pull/2666), " +"[#2668](https://github.com/adap/flower/pull/2668))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:425 msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +"A new strategy called `FedXgbCyclic` supports a client-by-client style of" +" training (often called cyclic). The `xgboost-comprehensive` code example" +" shows how to use it in a full project. In addition to that, `xgboost-" +"comprehensive` now also supports simulation mode. With this, Flower " +"offers best-in-class XGBoost support." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:427 msgid "" -":py:obj:`aggregate_fit `\\" -" \\(server\\_round\\, results\\, failures\\)" +"**Support Python 3.11** " +"([#2394](https://github.com/adap/flower/pull/2394))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:429 msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"Framework tests now run on Python 3.8, 3.9, 3.10, and 3.11. This will " +"ensure better support for users using more recent Python versions." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:431 msgid "" -":py:obj:`configure_fit `\\" -" \\(server\\_round\\, parameters\\, ...\\)" +"**Update gRPC and ProtoBuf dependencies** " +"([#2814](https://github.com/adap/flower/pull/2814))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:433 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"The `grpcio` and `protobuf` dependencies were updated to their latest " +"versions for improved security and performance." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:435 msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"**Introduce Docker image for Flower server** " +"([#2700](https://github.com/adap/flower/pull/2700), " +"[#2688](https://github.com/adap/flower/pull/2688), " +"[#2705](https://github.com/adap/flower/pull/2705), " +"[#2695](https://github.com/adap/flower/pull/2695), " +"[#2747](https://github.com/adap/flower/pull/2747), " +"[#2746](https://github.com/adap/flower/pull/2746), " +"[#2680](https://github.com/adap/flower/pull/2680), " +"[#2682](https://github.com/adap/flower/pull/2682), " +"[#2701](https://github.com/adap/flower/pull/2701))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:437 msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"The Flower server can now be run using an official Docker image. A new " +"how-to guide explains [how to run Flower using " +"Docker](https://flower.ai/docs/framework/how-to-run-flower-using-" +"docker.html). An official Flower client Docker image will follow." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:439 msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"**Introduce** `flower-via-docker-compose` **example** " +"([#2626](https://github.com/adap/flower/pull/2626))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:2 -msgid "FedAdam" +#: ../../source/ref-changelog.md:441 +msgid "" +"**Introduce** `quickstart-sklearn-tabular` **example** " +"([#2719](https://github.com/adap/flower/pull/2719))" msgstr "" -#: flwr.server.strategy.fedadam.FedAdam:33 -#: flwr.server.strategy.fedyogi.FedYogi:36 of -msgid "Momentum parameter. Defaults to 0.9." +#: ../../source/ref-changelog.md:443 +msgid "" +"**Introduce** `custom-metrics` **example** " +"([#1958](https://github.com/adap/flower/pull/1958))" msgstr "" -#: flwr.server.strategy.fedadam.FedAdam:35 -#: flwr.server.strategy.fedyogi.FedYogi:38 of -msgid "Second moment parameter. Defaults to 0.99." +#: ../../source/ref-changelog.md:445 +msgid "" +"**Update code examples to use Flower Datasets** " +"([#2450](https://github.com/adap/flower/pull/2450), " +"[#2456](https://github.com/adap/flower/pull/2456), " +"[#2318](https://github.com/adap/flower/pull/2318), " +"[#2712](https://github.com/adap/flower/pull/2712))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:447 msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +"Several code examples were updated to use [Flower " +"Datasets](https://flower.ai/docs/datasets/)." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:449 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"**General updates to Flower Examples** " +"([#2381](https://github.com/adap/flower/pull/2381), " +"[#2805](https://github.com/adap/flower/pull/2805), " +"[#2782](https://github.com/adap/flower/pull/2782), " +"[#2806](https://github.com/adap/flower/pull/2806), " +"[#2829](https://github.com/adap/flower/pull/2829), " +"[#2825](https://github.com/adap/flower/pull/2825), " +"[#2816](https://github.com/adap/flower/pull/2816), " +"[#2726](https://github.com/adap/flower/pull/2726), " +"[#2659](https://github.com/adap/flower/pull/2659), " +"[#2655](https://github.com/adap/flower/pull/2655))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +#: ../../source/ref-changelog.md:451 +msgid "Many Flower code examples received substantial updates." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-changelog.md:453 ../../source/ref-changelog.md:546 +msgid "**Update Flower Baselines**" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:455 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"HFedXGBoost ([#2226](https://github.com/adap/flower/pull/2226), " +"[#2771](https://github.com/adap/flower/pull/2771))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: ../../source/ref-changelog.md:456 +msgid "FedVSSL ([#2412](https://github.com/adap/flower/pull/2412))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-changelog.md:457 +msgid "FedNova ([#2179](https://github.com/adap/flower/pull/2179))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-changelog.md:458 +msgid "HeteroFL ([#2439](https://github.com/adap/flower/pull/2439))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:2 -msgid "FedAvg" +#: ../../source/ref-changelog.md:459 +msgid "FedAvgM ([#2246](https://github.com/adap/flower/pull/2246))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg:3 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:3 of -msgid "Implementation based on https://arxiv.org/abs/1602.05629" +#: ../../source/ref-changelog.md:460 +msgid "FedPara ([#2722](https://github.com/adap/flower/pull/2722))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg:5 flwr.server.strategy.fedprox.FedProx:37 -#: of +#: ../../source/ref-changelog.md:462 msgid "" -"Fraction of clients used during training. In case `min_fit_clients` is " -"larger than `fraction_fit * available_clients`, `min_fit_clients` will " -"still be sampled. Defaults to 1.0." +"**Improve documentation** " +"([#2674](https://github.com/adap/flower/pull/2674), " +"[#2480](https://github.com/adap/flower/pull/2480), " +"[#2826](https://github.com/adap/flower/pull/2826), " +"[#2727](https://github.com/adap/flower/pull/2727), " +"[#2761](https://github.com/adap/flower/pull/2761), " +"[#2900](https://github.com/adap/flower/pull/2900))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg:9 flwr.server.strategy.fedprox.FedProx:41 -#: of +#: ../../source/ref-changelog.md:464 msgid "" -"Fraction of clients used during validation. In case " -"`min_evaluate_clients` is larger than `fraction_evaluate * " -"available_clients`, `min_evaluate_clients` will still be sampled. " -"Defaults to 1.0." +"**Improved testing and development infrastructure** " +"([#2797](https://github.com/adap/flower/pull/2797), " +"[#2676](https://github.com/adap/flower/pull/2676), " +"[#2644](https://github.com/adap/flower/pull/2644), " +"[#2656](https://github.com/adap/flower/pull/2656), " +"[#2848](https://github.com/adap/flower/pull/2848), " +"[#2675](https://github.com/adap/flower/pull/2675), " +"[#2735](https://github.com/adap/flower/pull/2735), " +"[#2767](https://github.com/adap/flower/pull/2767), " +"[#2732](https://github.com/adap/flower/pull/2732), " +"[#2744](https://github.com/adap/flower/pull/2744), " +"[#2681](https://github.com/adap/flower/pull/2681), " +"[#2699](https://github.com/adap/flower/pull/2699), " +"[#2745](https://github.com/adap/flower/pull/2745), " +"[#2734](https://github.com/adap/flower/pull/2734), " +"[#2731](https://github.com/adap/flower/pull/2731), " +"[#2652](https://github.com/adap/flower/pull/2652), " +"[#2720](https://github.com/adap/flower/pull/2720), " +"[#2721](https://github.com/adap/flower/pull/2721), " +"[#2717](https://github.com/adap/flower/pull/2717), " +"[#2864](https://github.com/adap/flower/pull/2864), " +"[#2694](https://github.com/adap/flower/pull/2694), " +"[#2709](https://github.com/adap/flower/pull/2709), " +"[#2658](https://github.com/adap/flower/pull/2658), " +"[#2796](https://github.com/adap/flower/pull/2796), " +"[#2692](https://github.com/adap/flower/pull/2692), " +"[#2657](https://github.com/adap/flower/pull/2657), " +"[#2813](https://github.com/adap/flower/pull/2813), " +"[#2661](https://github.com/adap/flower/pull/2661), " +"[#2398](https://github.com/adap/flower/pull/2398))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg:33 of -msgid "Enable (True) or disable (False) in-place aggregation of model updates." +#: ../../source/ref-changelog.md:466 +msgid "" +"The Flower testing and development infrastructure has received " +"substantial updates. This makes Flower 1.7 the most tested release ever." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:468 msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +"**Update dependencies** " +"([#2753](https://github.com/adap/flower/pull/2753), " +"[#2651](https://github.com/adap/flower/pull/2651), " +"[#2739](https://github.com/adap/flower/pull/2739), " +"[#2837](https://github.com/adap/flower/pull/2837), " +"[#2788](https://github.com/adap/flower/pull/2788), " +"[#2811](https://github.com/adap/flower/pull/2811), " +"[#2774](https://github.com/adap/flower/pull/2774), " +"[#2790](https://github.com/adap/flower/pull/2790), " +"[#2751](https://github.com/adap/flower/pull/2751), " +"[#2850](https://github.com/adap/flower/pull/2850), " +"[#2812](https://github.com/adap/flower/pull/2812), " +"[#2872](https://github.com/adap/flower/pull/2872), " +"[#2736](https://github.com/adap/flower/pull/2736), " +"[#2756](https://github.com/adap/flower/pull/2756), " +"[#2857](https://github.com/adap/flower/pull/2857), " +"[#2757](https://github.com/adap/flower/pull/2757), " +"[#2810](https://github.com/adap/flower/pull/2810), " +"[#2740](https://github.com/adap/flower/pull/2740), " +"[#2789](https://github.com/adap/flower/pull/2789))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:470 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"**General improvements** " +"([#2803](https://github.com/adap/flower/pull/2803), " +"[#2847](https://github.com/adap/flower/pull/2847), " +"[#2877](https://github.com/adap/flower/pull/2877), " +"[#2690](https://github.com/adap/flower/pull/2690), " +"[#2889](https://github.com/adap/flower/pull/2889), " +"[#2874](https://github.com/adap/flower/pull/2874), " +"[#2819](https://github.com/adap/flower/pull/2819), " +"[#2689](https://github.com/adap/flower/pull/2689), " +"[#2457](https://github.com/adap/flower/pull/2457), " +"[#2870](https://github.com/adap/flower/pull/2870), " +"[#2669](https://github.com/adap/flower/pull/2669), " +"[#2876](https://github.com/adap/flower/pull/2876), " +"[#2885](https://github.com/adap/flower/pull/2885), " +"[#2858](https://github.com/adap/flower/pull/2858), " +"[#2867](https://github.com/adap/flower/pull/2867), " +"[#2351](https://github.com/adap/flower/pull/2351), " +"[#2886](https://github.com/adap/flower/pull/2886), " +"[#2860](https://github.com/adap/flower/pull/2860), " +"[#2828](https://github.com/adap/flower/pull/2828), " +"[#2869](https://github.com/adap/flower/pull/2869), " +"[#2875](https://github.com/adap/flower/pull/2875), " +"[#2733](https://github.com/adap/flower/pull/2733), " +"[#2488](https://github.com/adap/flower/pull/2488), " +"[#2646](https://github.com/adap/flower/pull/2646), " +"[#2879](https://github.com/adap/flower/pull/2879), " +"[#2821](https://github.com/adap/flower/pull/2821), " +"[#2855](https://github.com/adap/flower/pull/2855), " +"[#2800](https://github.com/adap/flower/pull/2800), " +"[#2807](https://github.com/adap/flower/pull/2807), " +"[#2801](https://github.com/adap/flower/pull/2801), " +"[#2804](https://github.com/adap/flower/pull/2804), " +"[#2851](https://github.com/adap/flower/pull/2851), " +"[#2787](https://github.com/adap/flower/pull/2787), " +"[#2852](https://github.com/adap/flower/pull/2852), " +"[#2672](https://github.com/adap/flower/pull/2672), " +"[#2759](https://github.com/adap/flower/pull/2759))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:474 msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +"**Deprecate** `start_numpy_client` " +"([#2563](https://github.com/adap/flower/pull/2563), " +"[#2718](https://github.com/adap/flower/pull/2718))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:476 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"Until now, clients of type `NumPyClient` needed to be started via " +"`start_numpy_client`. In our efforts to consolidate framework APIs, we " +"have introduced changes, and now all client types should start via " +"`start_client`. To continue using `NumPyClient` clients, you simply need " +"to first call the `.to_client()` method and then pass returned `Client` " +"object to `start_client`. The examples and the documentation have been " +"updated accordingly." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:478 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"**Deprecate legacy DP wrappers** " +"([#2749](https://github.com/adap/flower/pull/2749))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:480 msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"Legacy DP wrapper classes are deprecated, but still functional. This is " +"in preparation for an all-new pluggable version of differential privacy " +"support in Flower." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:482 msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"**Make optional arg** `--callable` **in** `flower-client` **a required " +"positional arg** ([#2673](https://github.com/adap/flower/pull/2673))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:484 msgid "" -":py:obj:`num_fit_clients `\\" -" \\(num\\_available\\_clients\\)" +"**Rename** `certificates` **to** `root_certificates` **in** `Driver` " +"([#2890](https://github.com/adap/flower/pull/2890))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:2 -msgid "FedAvgAndroid" +#: ../../source/ref-changelog.md:486 +msgid "" +"**Drop experimental** `Task` **fields** " +"([#2866](https://github.com/adap/flower/pull/2866), " +"[#2865](https://github.com/adap/flower/pull/2865))" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:488 msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +"Experimental fields `sa`, `legacy_server_message` and " +"`legacy_client_message` were removed from `Task` message. The removed " +"fields are superseded by the new `RecordSet` abstraction." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:490 msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +"**Retire MXNet examples** " +"([#2724](https://github.com/adap/flower/pull/2724))" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:492 msgid "" -":py:obj:`bytes_to_ndarray " -"`\\ \\(tensor\\)" +"The development of the MXNet fremework has ended and the project is now " +"[archived on GitHub](https://github.com/apache/mxnet). Existing MXNet " +"examples won't receive updates." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.bytes_to_ndarray:1 of -msgid "Deserialize NumPy array from bytes." +#: ../../source/ref-changelog.md:494 +msgid "v1.6.0 (2023-11-28)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:500 msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " +"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " +"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`," +" `Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, " +"`Steve Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, " +"`cnxdeveloper`, `k3nfalt` " msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:504 msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"**Add experimental support for Python 3.12** " +"([#2565](https://github.com/adap/flower/pull/2565))" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:506 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"**Add new XGBoost examples** " +"([#2612](https://github.com/adap/flower/pull/2612), " +"[#2554](https://github.com/adap/flower/pull/2554), " +"[#2617](https://github.com/adap/flower/pull/2617), " +"[#2618](https://github.com/adap/flower/pull/2618), " +"[#2619](https://github.com/adap/flower/pull/2619), " +"[#2567](https://github.com/adap/flower/pull/2567))" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:508 msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"We have added a new `xgboost-quickstart` example alongside a new " +"`xgboost-comprehensive` example that goes more in-depth." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:510 msgid "" -":py:obj:`ndarray_to_bytes " -"`\\ \\(ndarray\\)" -msgstr "" - -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarray_to_bytes:1 of -msgid "Serialize NumPy array to bytes." +"**Add Vertical FL example** " +"([#2598](https://github.com/adap/flower/pull/2598))" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:512 msgid "" -":py:obj:`ndarrays_to_parameters " -"`\\ " -"\\(ndarrays\\)" +"We had many questions about Vertical Federated Learning using Flower, so " +"we decided to add an simple example for it on the [Titanic " +"dataset](https://www.kaggle.com/competitions/titanic/data) alongside a " +"tutorial (in the README)." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:514 msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"**Support custom** `ClientManager` **in** `start_driver()` " +"([#2292](https://github.com/adap/flower/pull/2292))" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:516 msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"**Update REST API to support create and delete nodes** " +"([#2283](https://github.com/adap/flower/pull/2283))" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:518 msgid "" -":py:obj:`parameters_to_ndarrays " -"`\\ " -"\\(parameters\\)" -msgstr "" - -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.parameters_to_ndarrays:1 -#: of -msgid "Convert parameters object to NumPy weights." -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:2 -msgid "FedAvgM" +"**Update the Android SDK** " +"([#2187](https://github.com/adap/flower/pull/2187))" msgstr "" -#: flwr.server.strategy.fedavgm.FedAvgM:3 of -msgid "Implementation based on https://arxiv.org/abs/1909.06335" +#: ../../source/ref-changelog.md:520 +msgid "Add gRPC request-response capability to the Android SDK." msgstr "" -#: flwr.server.strategy.fedavgm.FedAvgM:25 of +#: ../../source/ref-changelog.md:522 msgid "" -"Server-side learning rate used in server-side optimization. Defaults to " -"1.0." +"**Update the C++ SDK** " +"([#2537](https://github.com/adap/flower/pull/2537), " +"[#2528](https://github.com/adap/flower/pull/2528), " +"[#2523](https://github.com/adap/flower/pull/2523), " +"[#2522](https://github.com/adap/flower/pull/2522))" msgstr "" -#: flwr.server.strategy.fedavgm.FedAvgM:28 of -msgid "Server-side momentum factor used for FedAvgM. Defaults to 0.0." +#: ../../source/ref-changelog.md:524 +msgid "Add gRPC request-response capability to the C++ SDK." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:526 msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +"**Make HTTPS the new default** " +"([#2591](https://github.com/adap/flower/pull/2591), " +"[#2636](https://github.com/adap/flower/pull/2636))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:528 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"Flower is moving to HTTPS by default. The new `flower-server` requires " +"passing `--certificates`, but users can enable `--insecure` to use HTTP " +"for prototyping. The same applies to `flower-client`, which can either " +"use user-provided credentials or gRPC-bundled certificates to connect to " +"an HTTPS-enabled server or requires opt-out via passing `--insecure` to " +"enable insecure HTTP connections." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:530 msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +"For backward compatibility, `start_client()` and `start_numpy_client()` " +"will still start in insecure mode by default. In a future release, " +"insecure connections will require user opt-in by passing `insecure=True`." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:532 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"**Unify client API** ([#2303](https://github.com/adap/flower/pull/2303), " +"[#2390](https://github.com/adap/flower/pull/2390), " +"[#2493](https://github.com/adap/flower/pull/2493))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:534 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"Using the `client_fn`, Flower clients can interchangeably run as " +"standalone processes (i.e. via `start_client`) or in simulation (i.e. via" +" `start_simulation`) without requiring changes to how the client class is" +" defined and instantiated. The `to_client()` function is introduced to " +"convert a `NumPyClient` to a `Client`." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:536 msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"**Add new** `Bulyan` **strategy** " +"([#1817](https://github.com/adap/flower/pull/1817), " +"[#1891](https://github.com/adap/flower/pull/1891))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:538 msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"The new `Bulyan` strategy implements Bulyan by [El Mhamdi et al., " +"2018](https://arxiv.org/abs/1802.07927)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:540 msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:2 -msgid "FedMedian" +"**Add new** `XGB Bagging` **strategy** " +"([#2611](https://github.com/adap/flower/pull/2611))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:542 ../../source/ref-changelog.md:544 msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +"**Introduce `WorkloadState`** " +"([#2564](https://github.com/adap/flower/pull/2564), " +"[#2632](https://github.com/adap/flower/pull/2632))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:548 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" -msgstr "" - -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedmedian.FedMedian.aggregate_fit:1 of -msgid "Aggregate fit results using median." +"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " +"[#2286](https://github.com/adap/flower/pull/2286), " +"[#2509](https://github.com/adap/flower/pull/2509))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:550 msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"Baselines Docs ([#2290](https://github.com/adap/flower/pull/2290), " +"[#2400](https://github.com/adap/flower/pull/2400))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:552 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " +"[#2507](https://github.com/adap/flower/pull/2507))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:554 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " +"[#2508](https://github.com/adap/flower/pull/2508))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: ../../source/ref-changelog.md:556 +msgid "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-changelog.md:558 +msgid "FjORD [#2431](https://github.com/adap/flower/pull/2431)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-changelog.md:560 +msgid "MOON [#2421](https://github.com/adap/flower/pull/2421)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:2 -msgid "FedOpt" +#: ../../source/ref-changelog.md:562 +msgid "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" msgstr "" -#: flwr.server.strategy.fedopt.FedOpt:33 of -msgid "Momentum parameter. Defaults to 0.0." +#: ../../source/ref-changelog.md:564 +msgid "FedPer [#2266](https://github.com/adap/flower/pull/2266)" msgstr "" -#: flwr.server.strategy.fedopt.FedOpt:35 of -msgid "Second moment parameter. Defaults to 0.0." +#: ../../source/ref-changelog.md:566 +msgid "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +#: ../../source/ref-changelog.md:568 +msgid "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:570 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " +"[#2615](https://github.com/adap/flower/pull/2615))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:572 msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +"**General updates to Flower Examples** " +"([#2384](https://github.com/adap/flower/pull/2384), " +"[#2425](https://github.com/adap/flower/pull/2425), " +"[#2526](https://github.com/adap/flower/pull/2526), " +"[#2302](https://github.com/adap/flower/pull/2302), " +"[#2545](https://github.com/adap/flower/pull/2545))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:574 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"**General updates to Flower Baselines** " +"([#2301](https://github.com/adap/flower/pull/2301), " +"[#2305](https://github.com/adap/flower/pull/2305), " +"[#2307](https://github.com/adap/flower/pull/2307), " +"[#2327](https://github.com/adap/flower/pull/2327), " +"[#2435](https://github.com/adap/flower/pull/2435), " +"[#2462](https://github.com/adap/flower/pull/2462), " +"[#2463](https://github.com/adap/flower/pull/2463), " +"[#2461](https://github.com/adap/flower/pull/2461), " +"[#2469](https://github.com/adap/flower/pull/2469), " +"[#2466](https://github.com/adap/flower/pull/2466), " +"[#2471](https://github.com/adap/flower/pull/2471), " +"[#2472](https://github.com/adap/flower/pull/2472), " +"[#2470](https://github.com/adap/flower/pull/2470))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:576 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"**General updates to the simulation engine** " +"([#2331](https://github.com/adap/flower/pull/2331), " +"[#2447](https://github.com/adap/flower/pull/2447), " +"[#2448](https://github.com/adap/flower/pull/2448), " +"[#2294](https://github.com/adap/flower/pull/2294))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:578 msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"**General updates to Flower SDKs** " +"([#2288](https://github.com/adap/flower/pull/2288), " +"[#2429](https://github.com/adap/flower/pull/2429), " +"[#2555](https://github.com/adap/flower/pull/2555), " +"[#2543](https://github.com/adap/flower/pull/2543), " +"[#2544](https://github.com/adap/flower/pull/2544), " +"[#2597](https://github.com/adap/flower/pull/2597), " +"[#2623](https://github.com/adap/flower/pull/2623))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:580 msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"**General improvements** " +"([#2309](https://github.com/adap/flower/pull/2309), " +"[#2310](https://github.com/adap/flower/pull/2310), " +"[#2313](https://github.com/adap/flower/pull/2313), " +"[#2316](https://github.com/adap/flower/pull/2316), " +"[#2317](https://github.com/adap/flower/pull/2317), " +"[#2349](https://github.com/adap/flower/pull/2349), " +"[#2360](https://github.com/adap/flower/pull/2360), " +"[#2402](https://github.com/adap/flower/pull/2402), " +"[#2446](https://github.com/adap/flower/pull/2446), " +"[#2561](https://github.com/adap/flower/pull/2561), " +"[#2273](https://github.com/adap/flower/pull/2273), " +"[#2267](https://github.com/adap/flower/pull/2267), " +"[#2274](https://github.com/adap/flower/pull/2274), " +"[#2275](https://github.com/adap/flower/pull/2275), " +"[#2432](https://github.com/adap/flower/pull/2432), " +"[#2251](https://github.com/adap/flower/pull/2251), " +"[#2321](https://github.com/adap/flower/pull/2321), " +"[#1936](https://github.com/adap/flower/pull/1936), " +"[#2408](https://github.com/adap/flower/pull/2408), " +"[#2413](https://github.com/adap/flower/pull/2413), " +"[#2401](https://github.com/adap/flower/pull/2401), " +"[#2531](https://github.com/adap/flower/pull/2531), " +"[#2534](https://github.com/adap/flower/pull/2534), " +"[#2535](https://github.com/adap/flower/pull/2535), " +"[#2521](https://github.com/adap/flower/pull/2521), " +"[#2553](https://github.com/adap/flower/pull/2553), " +"[#2596](https://github.com/adap/flower/pull/2596))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients `\\" -" \\(num\\_available\\_clients\\)" +#: ../../source/ref-changelog.md:582 ../../source/ref-changelog.md:672 +#: ../../source/ref-changelog.md:736 ../../source/ref-changelog.md:790 +#: ../../source/ref-changelog.md:857 +msgid "Flower received many improvements under the hood, too many to list here." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:2 -msgid "FedProx" +#: ../../source/ref-changelog.md:586 +msgid "" +"**Remove support for Python 3.7** " +"([#2280](https://github.com/adap/flower/pull/2280), " +"[#2299](https://github.com/adap/flower/pull/2299), " +"[#2304](https://github.com/adap/flower/pull/2304), " +"[#2306](https://github.com/adap/flower/pull/2306), " +"[#2355](https://github.com/adap/flower/pull/2355), " +"[#2356](https://github.com/adap/flower/pull/2356))" msgstr "" -#: flwr.server.strategy.fedprox.FedProx:3 of -msgid "Implementation based on https://arxiv.org/abs/1812.06127" +#: ../../source/ref-changelog.md:588 +msgid "" +"Python 3.7 support was deprecated in Flower 1.5, and this release removes" +" support. Flower now requires Python 3.8." msgstr "" -#: flwr.server.strategy.fedprox.FedProx:5 of +#: ../../source/ref-changelog.md:590 msgid "" -"The strategy in itself will not be different than FedAvg, the client " -"needs to be adjusted. A proximal term needs to be added to the loss " -"function during the training:" +"**Remove experimental argument** `rest` **from** `start_client` " +"([#2324](https://github.com/adap/flower/pull/2324))" msgstr "" -#: flwr.server.strategy.fedprox.FedProx:9 of +#: ../../source/ref-changelog.md:592 msgid "" -"\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" -"\n" +"The (still experimental) argument `rest` was removed from `start_client` " +"and `start_numpy_client`. Use `transport=\"rest\"` to opt into the " +"experimental REST API instead." msgstr "" -#: flwr.server.strategy.fedprox.FedProx:12 of +#: ../../source/ref-changelog.md:594 +msgid "v1.5.0 (2023-08-31)" +msgstr "" + +#: ../../source/ref-changelog.md:600 msgid "" -"Where $w^t$ are the global parameters and $w$ are the local weights the " -"function will be optimized with." +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " msgstr "" -#: flwr.server.strategy.fedprox.FedProx:15 of -msgid "In PyTorch, for example, the loss would go from:" +#: ../../source/ref-changelog.md:604 +msgid "" +"**Introduce new simulation engine** " +"([#1969](https://github.com/adap/flower/pull/1969), " +"[#2221](https://github.com/adap/flower/pull/2221), " +"[#2248](https://github.com/adap/flower/pull/2248))" msgstr "" -#: flwr.server.strategy.fedprox.FedProx:21 of -msgid "To:" +#: ../../source/ref-changelog.md:606 +msgid "" +"The new simulation engine has been rewritten from the ground up, yet it " +"remains fully backwards compatible. It offers much improved stability and" +" memory handling, especially when working with GPUs. Simulations " +"transparently adapt to different settings to scale simulation in CPU-" +"only, CPU+GPU, multi-GPU, or multi-node multi-GPU environments." msgstr "" -#: flwr.server.strategy.fedprox.FedProx:30 of +#: ../../source/ref-changelog.md:608 msgid "" -"With `global_params` being a copy of the parameters before the training " -"takes place." +"Comprehensive documentation includes a new [how-to run " +"simulations](https://flower.ai/docs/framework/how-to-run-" +"simulations.html) guide, new [simulation-" +"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " +"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" +"tensorflow.html) notebooks, and a new [YouTube tutorial " +"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)." msgstr "" -#: flwr.server.strategy.fedprox.FedProx:65 of +#: ../../source/ref-changelog.md:610 msgid "" -"The weight of the proximal term used in the optimization. 0.0 makes this " -"strategy equivalent to FedAvg, and the higher the coefficient, the more " -"regularization will be used (that is, the client parameters will need to " -"be closer to the server parameters during training)." +"**Restructure Flower Docs** " +"([#1824](https://github.com/adap/flower/pull/1824), " +"[#1865](https://github.com/adap/flower/pull/1865), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1887](https://github.com/adap/flower/pull/1887), " +"[#1919](https://github.com/adap/flower/pull/1919), " +"[#1922](https://github.com/adap/flower/pull/1922), " +"[#1920](https://github.com/adap/flower/pull/1920), " +"[#1923](https://github.com/adap/flower/pull/1923), " +"[#1924](https://github.com/adap/flower/pull/1924), " +"[#1962](https://github.com/adap/flower/pull/1962), " +"[#2006](https://github.com/adap/flower/pull/2006), " +"[#2133](https://github.com/adap/flower/pull/2133), " +"[#2203](https://github.com/adap/flower/pull/2203), " +"[#2215](https://github.com/adap/flower/pull/2215), " +"[#2122](https://github.com/adap/flower/pull/2122), " +"[#2223](https://github.com/adap/flower/pull/2223), " +"[#2219](https://github.com/adap/flower/pull/2219), " +"[#2232](https://github.com/adap/flower/pull/2232), " +"[#2233](https://github.com/adap/flower/pull/2233), " +"[#2234](https://github.com/adap/flower/pull/2234), " +"[#2235](https://github.com/adap/flower/pull/2235), " +"[#2237](https://github.com/adap/flower/pull/2237), " +"[#2238](https://github.com/adap/flower/pull/2238), " +"[#2242](https://github.com/adap/flower/pull/2242), " +"[#2231](https://github.com/adap/flower/pull/2231), " +"[#2243](https://github.com/adap/flower/pull/2243), " +"[#2227](https://github.com/adap/flower/pull/2227))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:612 msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +"Much effort went into a completely restructured Flower docs experience. " +"The documentation on [flower.ai/docs](https://flower.ai/docs) is now " +"divided into Flower Framework, Flower Baselines, Flower Android SDK, " +"Flower iOS SDK, and code example projects." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:614 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"**Introduce Flower Swift SDK** " +"([#1858](https://github.com/adap/flower/pull/1858), " +"[#1897](https://github.com/adap/flower/pull/1897))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:616 msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +"This is the first preview release of the Flower Swift SDK. Flower support" +" on iOS is improving, and alongside the Swift SDK and code example, there" +" is now also an iOS quickstart tutorial." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:618 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"**Introduce Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:620 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"This is the first preview release of the Flower Kotlin SDK. Flower " +"support on Android is improving, and alongside the Kotlin SDK and code " +"example, there is now also an Android quickstart tutorial." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:622 msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"**Introduce new end-to-end testing infrastructure** " +"([#1842](https://github.com/adap/flower/pull/1842), " +"[#2071](https://github.com/adap/flower/pull/2071), " +"[#2072](https://github.com/adap/flower/pull/2072), " +"[#2068](https://github.com/adap/flower/pull/2068), " +"[#2067](https://github.com/adap/flower/pull/2067), " +"[#2069](https://github.com/adap/flower/pull/2069), " +"[#2073](https://github.com/adap/flower/pull/2073), " +"[#2070](https://github.com/adap/flower/pull/2070), " +"[#2074](https://github.com/adap/flower/pull/2074), " +"[#2082](https://github.com/adap/flower/pull/2082), " +"[#2084](https://github.com/adap/flower/pull/2084), " +"[#2093](https://github.com/adap/flower/pull/2093), " +"[#2109](https://github.com/adap/flower/pull/2109), " +"[#2095](https://github.com/adap/flower/pull/2095), " +"[#2140](https://github.com/adap/flower/pull/2140), " +"[#2137](https://github.com/adap/flower/pull/2137), " +"[#2165](https://github.com/adap/flower/pull/2165))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:624 msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"A new testing infrastructure ensures that new changes stay compatible " +"with existing framework integrations or strategies." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-changelog.md:626 +msgid "**Deprecate Python 3.7**" msgstr "" -#: flwr.server.strategy.fedprox.FedProx.configure_fit:3 of -msgid "Sends the proximal factor mu to the clients" +#: ../../source/ref-changelog.md:628 +msgid "" +"Since Python 3.7 reached its end of life (EOL) on 2023-06-27, support for" +" Python 3.7 is now deprecated and will be removed in an upcoming release." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:2 -msgid "FedTrimmedAvg" +#: ../../source/ref-changelog.md:630 +msgid "" +"**Add new** `FedTrimmedAvg` **strategy** " +"([#1769](https://github.com/adap/flower/pull/1769), " +"[#1853](https://github.com/adap/flower/pull/1853))" msgstr "" -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:3 of -msgid "Implemented based on: https://arxiv.org/abs/1803.01498" +#: ../../source/ref-changelog.md:632 +msgid "" +"The new `FedTrimmedAvg` strategy implements Trimmed Mean by [Dong Yin, " +"2018](https://arxiv.org/abs/1803.01498)." msgstr "" -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:25 of -msgid "Fraction to cut off of both tails of the distribution. Defaults to 0.2." +#: ../../source/ref-changelog.md:634 +msgid "" +"**Introduce start_driver** " +"([#1697](https://github.com/adap/flower/pull/1697))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:636 msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +"In addition to `start_server` and using the raw Driver API, there is a " +"new `start_driver` function that allows for running `start_server` " +"scripts as a Flower driver with only a single-line code change. Check out" +" the `mt-pytorch` code example to see a working example using " +"`start_driver`." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:638 msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +"**Add parameter aggregation to** `mt-pytorch` **code example** " +"([#1785](https://github.com/adap/flower/pull/1785))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.aggregate_fit:1 of -msgid "Aggregate fit results using trimmed average." +#: ../../source/ref-changelog.md:640 +msgid "" +"The `mt-pytorch` example shows how to aggregate parameters when writing a" +" driver script. The included `driver.py` and `server.py` have been " +"aligned to demonstrate both the low-level way and the high-level way of " +"building server-side logic." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:642 msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"**Migrate experimental REST API to Starlette** " +"([2171](https://github.com/adap/flower/pull/2171))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:644 msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"The (experimental) REST API used to be implemented in " +"[FastAPI](https://fastapi.tiangolo.com/), but it has now been migrated to" +" use [Starlette](https://www.starlette.io/) directly." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:646 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"Please note: The REST request-response API is still experimental and will" +" likely change significantly over time." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:648 msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"**Introduce experimental gRPC request-response API** " +"([#1867](https://github.com/adap/flower/pull/1867), " +"[#1901](https://github.com/adap/flower/pull/1901))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:650 msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"In addition to the existing gRPC API (based on bidirectional streaming) " +"and the experimental REST API, there is now a new gRPC API that uses a " +"request-response model to communicate with client nodes." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:652 msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"Please note: The gRPC request-response API is still experimental and will" +" likely change significantly over time." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:2 -msgid "FedXgbBagging" +#: ../../source/ref-changelog.md:654 +msgid "" +"**Replace the experimental** `start_client(rest=True)` **with the new** " +"`start_client(transport=\"rest\")` " +"([#1880](https://github.com/adap/flower/pull/1880))" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:656 msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +"The (experimental) `start_client` argument `rest` was deprecated in " +"favour of a new argument `transport`. `start_client(transport=\"rest\")` " +"will yield the same behaviour as `start_client(rest=True)` did before. " +"All code should migrate to the new argument `transport`. The deprecated " +"argument `rest` will be removed in a future release." msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of -msgid "Aggregate evaluation metrics using average." +#: ../../source/ref-changelog.md:658 +msgid "" +"**Add a new gRPC option** " +"([#2197](https://github.com/adap/flower/pull/2197))" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:660 msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +"We now start a gRPC server with the `grpc.keepalive_permit_without_calls`" +" option set to 0 by default. This prevents the clients from sending " +"keepalive pings when there is no outstanding stream." msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_fit:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_fit:1 of -msgid "Aggregate fit results using bagging." +#: ../../source/ref-changelog.md:662 +msgid "" +"**Improve example notebooks** " +"([#2005](https://github.com/adap/flower/pull/2005))" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-changelog.md:664 +msgid "There's a new 30min Federated Learning PyTorch tutorial!" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:666 msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"**Example updates** ([#1772](https://github.com/adap/flower/pull/1772), " +"[#1873](https://github.com/adap/flower/pull/1873), " +"[#1981](https://github.com/adap/flower/pull/1981), " +"[#1988](https://github.com/adap/flower/pull/1988), " +"[#1984](https://github.com/adap/flower/pull/1984), " +"[#1982](https://github.com/adap/flower/pull/1982), " +"[#2112](https://github.com/adap/flower/pull/2112), " +"[#2144](https://github.com/adap/flower/pull/2144), " +"[#2174](https://github.com/adap/flower/pull/2174), " +"[#2225](https://github.com/adap/flower/pull/2225), " +"[#2183](https://github.com/adap/flower/pull/2183))" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:668 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"Many examples have received significant updates, including simplified " +"advanced-tensorflow and advanced-pytorch examples, improved macOS " +"compatibility of TensorFlow examples, and code examples for simulation. A" +" major upgrade is that all code examples now have a `requirements.txt` " +"(in addition to `pyproject.toml`)." msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:670 msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"**General improvements** " +"([#1872](https://github.com/adap/flower/pull/1872), " +"[#1866](https://github.com/adap/flower/pull/1866), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1837](https://github.com/adap/flower/pull/1837), " +"[#1477](https://github.com/adap/flower/pull/1477), " +"[#2171](https://github.com/adap/flower/pull/2171))" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-changelog.md:678 +msgid "v1.4.0 (2023-04-21)" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:684 msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " +"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " +"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " +"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " +"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:2 -msgid "FedXgbCyclic" +#: ../../source/ref-changelog.md:688 +msgid "" +"**Introduce support for XGBoost (**`FedXgbNnAvg` **strategy and " +"example)** ([#1694](https://github.com/adap/flower/pull/1694), " +"[#1709](https://github.com/adap/flower/pull/1709), " +"[#1715](https://github.com/adap/flower/pull/1715), " +"[#1717](https://github.com/adap/flower/pull/1717), " +"[#1763](https://github.com/adap/flower/pull/1763), " +"[#1795](https://github.com/adap/flower/pull/1795))" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:690 msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +"XGBoost is a tree-based ensemble machine learning algorithm that uses " +"gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg`" +" " +"[strategy](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," +" and a [code example](https://github.com/adap/flower/tree/main/examples" +"/xgboost-quickstart) that demonstrates the usage of this new strategy in " +"an XGBoost project." msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:692 msgid "" -":py:obj:`aggregate_fit " -"`\\ \\(server\\_round\\," -" results\\, failures\\)" +"**Introduce iOS SDK (preview)** " +"([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:694 msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"This is a major update for anyone wanting to implement Federated Learning" +" on iOS mobile devices. We now have a swift iOS SDK present under " +"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" +" that will facilitate greatly the app creating process. To showcase its " +"use, the [iOS " +"example](https://github.com/adap/flower/tree/main/examples/ios) has also " +"been updated!" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:696 msgid "" -":py:obj:`configure_fit " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +"**Introduce new \"What is Federated Learning?\" tutorial** " +"([#1657](https://github.com/adap/flower/pull/1657), " +"[#1721](https://github.com/adap/flower/pull/1721))" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:698 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"A new [entry-level tutorial](https://flower.ai/docs/framework/tutorial-" +"what-is-federated-learning.html) in our documentation explains the basics" +" of Fedetated Learning. It enables anyone who's unfamiliar with Federated" +" Learning to start their journey with Flower. Forward it to anyone who's " +"interested in Federated Learning!" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:700 msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"**Introduce new Flower Baseline: FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679))" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:702 msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"This new baseline replicates the MNIST+CNN task from the paper [Federated" +" Optimization in Heterogeneous Networks (Li et al., " +"2018)](https://arxiv.org/abs/1812.06127). It uses the `FedProx` strategy," +" which aims at making convergence more robust in heterogeneous settings." msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:704 msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"**Introduce new Flower Baseline: FedAvg FEMNIST** " +"([#1655](https://github.com/adap/flower/pull/1655))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:2 -msgid "FedXgbNnAvg" +#: ../../source/ref-changelog.md:706 +msgid "" +"This new baseline replicates an experiment evaluating the performance of " +"the FedAvg algorithm on the FEMNIST dataset from the paper [LEAF: A " +"Benchmark for Federated Settings (Caldas et al., " +"2018)](https://arxiv.org/abs/1812.01097)." msgstr "" -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:5 of +#: ../../source/ref-changelog.md:708 msgid "" -"This strategy is deprecated, but a copy of it is available in Flower " -"Baselines: " -"https://github.com/adap/flower/tree/main/baselines/hfedxgboost." +"**Introduce (experimental) REST API** " +"([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:710 msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +"A new REST API has been introduced as an alternative to the gRPC-based " +"communication stack. In this initial version, the REST API only supports " +"anonymous clients." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:712 msgid "" -":py:obj:`aggregate_fit " -"`\\ \\(server\\_round\\, " -"results\\, failures\\)" +"Please note: The REST API is still experimental and will likely change " +"significantly over time." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:714 msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"**Improve the (experimental) Driver API** " +"([#1663](https://github.com/adap/flower/pull/1663), " +"[#1666](https://github.com/adap/flower/pull/1666), " +"[#1667](https://github.com/adap/flower/pull/1667), " +"[#1664](https://github.com/adap/flower/pull/1664), " +"[#1675](https://github.com/adap/flower/pull/1675), " +"[#1676](https://github.com/adap/flower/pull/1676), " +"[#1693](https://github.com/adap/flower/pull/1693), " +"[#1662](https://github.com/adap/flower/pull/1662), " +"[#1794](https://github.com/adap/flower/pull/1794))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:716 msgid "" -":py:obj:`configure_fit " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +"The Driver API is still an experimental feature, but this release " +"introduces some major upgrades. One of the main improvements is the " +"introduction of an SQLite database to store server state on disk (instead" +" of in-memory). Another improvement is that tasks (instructions or " +"results) that have been delivered will now be deleted. This greatly " +"improves the memory efficiency of a long-running Flower server." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:718 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"**Fix spilling issues related to Ray during simulations** " +"([#1698](https://github.com/adap/flower/pull/1698))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:720 msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"While running long simulations, `ray` was sometimes spilling huge amounts" +" of data that would make the training unable to continue. This is now " +"fixed! 🎉" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:722 msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"**Add new example using** `TabNet` **and Flower** " +"([#1725](https://github.com/adap/flower/pull/1725))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:724 msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"TabNet is a powerful and flexible framework for training machine learning" +" models on tabular data. We now have a federated example using Flower: " +"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples" +"/quickstart-tabnet)." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:2 -msgid "FedYogi" +#: ../../source/ref-changelog.md:726 +msgid "" +"**Add new how-to guide for monitoring simulations** " +"([#1649](https://github.com/adap/flower/pull/1649))" msgstr "" -#: flwr.server.strategy.fedyogi.FedYogi:32 of -msgid "Server-side learning rate. Defaults to 1e-2." +#: ../../source/ref-changelog.md:728 +msgid "" +"We now have a documentation guide to help users monitor their performance" +" during simulations." msgstr "" -#: flwr.server.strategy.fedyogi.FedYogi:34 of -msgid "Client-side learning rate. Defaults to 0.0316." +#: ../../source/ref-changelog.md:730 +msgid "" +"**Add training metrics to** `History` **object during simulations** " +"([#1696](https://github.com/adap/flower/pull/1696))" msgstr "" -#: flwr.server.strategy.fedyogi.FedYogi:40 of -msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-3." +#: ../../source/ref-changelog.md:732 +msgid "" +"The `fit_metrics_aggregation_fn` can be used to aggregate training " +"metrics, but previous releases did not save the results in the `History` " +"object. This is now the case!" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:734 msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +"**General improvements** " +"([#1659](https://github.com/adap/flower/pull/1659), " +"[#1646](https://github.com/adap/flower/pull/1646), " +"[#1647](https://github.com/adap/flower/pull/1647), " +"[#1471](https://github.com/adap/flower/pull/1471), " +"[#1648](https://github.com/adap/flower/pull/1648), " +"[#1651](https://github.com/adap/flower/pull/1651), " +"[#1652](https://github.com/adap/flower/pull/1652), " +"[#1653](https://github.com/adap/flower/pull/1653), " +"[#1659](https://github.com/adap/flower/pull/1659), " +"[#1665](https://github.com/adap/flower/pull/1665), " +"[#1670](https://github.com/adap/flower/pull/1670), " +"[#1672](https://github.com/adap/flower/pull/1672), " +"[#1677](https://github.com/adap/flower/pull/1677), " +"[#1684](https://github.com/adap/flower/pull/1684), " +"[#1683](https://github.com/adap/flower/pull/1683), " +"[#1686](https://github.com/adap/flower/pull/1686), " +"[#1682](https://github.com/adap/flower/pull/1682), " +"[#1685](https://github.com/adap/flower/pull/1685), " +"[#1692](https://github.com/adap/flower/pull/1692), " +"[#1705](https://github.com/adap/flower/pull/1705), " +"[#1708](https://github.com/adap/flower/pull/1708), " +"[#1711](https://github.com/adap/flower/pull/1711), " +"[#1713](https://github.com/adap/flower/pull/1713), " +"[#1714](https://github.com/adap/flower/pull/1714), " +"[#1718](https://github.com/adap/flower/pull/1718), " +"[#1716](https://github.com/adap/flower/pull/1716), " +"[#1723](https://github.com/adap/flower/pull/1723), " +"[#1735](https://github.com/adap/flower/pull/1735), " +"[#1678](https://github.com/adap/flower/pull/1678), " +"[#1750](https://github.com/adap/flower/pull/1750), " +"[#1753](https://github.com/adap/flower/pull/1753), " +"[#1736](https://github.com/adap/flower/pull/1736), " +"[#1766](https://github.com/adap/flower/pull/1766), " +"[#1760](https://github.com/adap/flower/pull/1760), " +"[#1775](https://github.com/adap/flower/pull/1775), " +"[#1776](https://github.com/adap/flower/pull/1776), " +"[#1777](https://github.com/adap/flower/pull/1777), " +"[#1779](https://github.com/adap/flower/pull/1779), " +"[#1784](https://github.com/adap/flower/pull/1784), " +"[#1773](https://github.com/adap/flower/pull/1773), " +"[#1755](https://github.com/adap/flower/pull/1755), " +"[#1789](https://github.com/adap/flower/pull/1789), " +"[#1788](https://github.com/adap/flower/pull/1788), " +"[#1798](https://github.com/adap/flower/pull/1798), " +"[#1799](https://github.com/adap/flower/pull/1799), " +"[#1739](https://github.com/adap/flower/pull/1739), " +"[#1800](https://github.com/adap/flower/pull/1800), " +"[#1804](https://github.com/adap/flower/pull/1804), " +"[#1805](https://github.com/adap/flower/pull/1805))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-changelog.md:742 +msgid "v1.3.0 (2023-02-06)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:748 msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:752 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"**Add support for** `workload_id` **and** `group_id` **in Driver API** " +"([#1595](https://github.com/adap/flower/pull/1595))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:754 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"The (experimental) Driver API now supports a `workload_id` that can be " +"used to identify which workload a task belongs to. It also supports a new" +" `group_id` that can be used, for example, to indicate the current " +"training round. Both the `workload_id` and `group_id` enable client nodes" +" to decide whether they want to handle a task or not." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:756 msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"**Make Driver API and Fleet API address configurable** " +"([#1637](https://github.com/adap/flower/pull/1637))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:758 msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"The (experimental) long-running Flower server (Driver API and Fleet API) " +"can now configure the server address of both Driver API (via `--driver-" +"api-address`) and Fleet API (via `--fleet-api-address`) when starting:" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:760 msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.Krum.rst:2 -msgid "Krum" +"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " +"\"0.0.0.0:8086\"`" msgstr "" -#: flwr.server.strategy.krum.Krum:3 of -msgid "Implementation based on https://arxiv.org/abs/1703.02757" +#: ../../source/ref-changelog.md:762 +msgid "Both IPv4 and IPv6 addresses are supported." msgstr "" -#: flwr.server.strategy.krum.Krum:17 of +#: ../../source/ref-changelog.md:764 msgid "" -"Number of clients to keep before averaging (MultiKrum). Defaults to 0, in" -" that case classical Krum is applied." +"**Add new example of Federated Learning using fastai and Flower** " +"([#1598](https://github.com/adap/flower/pull/1598))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:766 msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +"A new code example (`quickstart-fastai`) demonstrates federated learning " +"with [fastai](https://www.fast.ai/) and Flower. You can find it here: " +"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples" +"/quickstart-fastai)." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:768 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"**Make Android example compatible with** `flwr >= 1.0.0` **and the latest" +" versions of Android** " +"([#1603](https://github.com/adap/flower/pull/1603))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.krum.Krum.aggregate_fit:1 of -msgid "Aggregate fit results using Krum." +#: ../../source/ref-changelog.md:770 +msgid "" +"The Android code example has received a substantial update: the project " +"is compatible with Flower 1.0 (and later), the UI received a full " +"refresh, and the project is updated to be compatible with newer Android " +"tooling." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:772 msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +"**Add new `FedProx` strategy** " +"([#1619](https://github.com/adap/flower/pull/1619))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:774 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"This " +"[strategy](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" +" is almost identical to " +"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," +" but helps users replicate what is described in this " +"[paper](https://arxiv.org/abs/1812.06127). It essentially adds a " +"parameter called `proximal_mu` to regularize the local models with " +"respect to the global models." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:776 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"**Add new metrics to telemetry events** " +"([#1640](https://github.com/adap/flower/pull/1640))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:778 msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"An updated event structure allows, for example, the clustering of events " +"within the same workload." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:780 msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"**Add new custom strategy tutorial section** " +"[#1623](https://github.com/adap/flower/pull/1623)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:782 msgid "" -":py:obj:`num_fit_clients `\\ " -"\\(num\\_available\\_clients\\)" +"The Flower tutorial now has a new section that covers implementing a " +"custom strategy from scratch: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:2 -msgid "QFedAvg" +#: ../../source/ref-changelog.md:784 +msgid "" +"**Add new custom serialization tutorial section** " +"([#1622](https://github.com/adap/flower/pull/1622))" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:786 msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +"The Flower tutorial now has a new section that covers custom " +"serialization: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-customize-the-client-pytorch.ipynb)" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:788 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"**General improvements** " +"([#1638](https://github.com/adap/flower/pull/1638), " +"[#1634](https://github.com/adap/flower/pull/1634), " +"[#1636](https://github.com/adap/flower/pull/1636), " +"[#1635](https://github.com/adap/flower/pull/1635), " +"[#1633](https://github.com/adap/flower/pull/1633), " +"[#1632](https://github.com/adap/flower/pull/1632), " +"[#1631](https://github.com/adap/flower/pull/1631), " +"[#1630](https://github.com/adap/flower/pull/1630), " +"[#1627](https://github.com/adap/flower/pull/1627), " +"[#1593](https://github.com/adap/flower/pull/1593), " +"[#1616](https://github.com/adap/flower/pull/1616), " +"[#1615](https://github.com/adap/flower/pull/1615), " +"[#1607](https://github.com/adap/flower/pull/1607), " +"[#1609](https://github.com/adap/flower/pull/1609), " +"[#1608](https://github.com/adap/flower/pull/1608), " +"[#1603](https://github.com/adap/flower/pull/1603), " +"[#1590](https://github.com/adap/flower/pull/1590), " +"[#1580](https://github.com/adap/flower/pull/1580), " +"[#1599](https://github.com/adap/flower/pull/1599), " +"[#1600](https://github.com/adap/flower/pull/1600), " +"[#1601](https://github.com/adap/flower/pull/1601), " +"[#1597](https://github.com/adap/flower/pull/1597), " +"[#1595](https://github.com/adap/flower/pull/1595), " +"[#1591](https://github.com/adap/flower/pull/1591), " +"[#1588](https://github.com/adap/flower/pull/1588), " +"[#1589](https://github.com/adap/flower/pull/1589), " +"[#1587](https://github.com/adap/flower/pull/1587), " +"[#1573](https://github.com/adap/flower/pull/1573), " +"[#1581](https://github.com/adap/flower/pull/1581), " +"[#1578](https://github.com/adap/flower/pull/1578), " +"[#1574](https://github.com/adap/flower/pull/1574), " +"[#1572](https://github.com/adap/flower/pull/1572), " +"[#1586](https://github.com/adap/flower/pull/1586))" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:792 msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +"**Updated documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:794 ../../source/ref-changelog.md:861 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"As usual, the documentation has improved quite a bit. It is another step " +"in our effort to make the Flower documentation the best documentation of " +"any project. Stay tuned and as always, feel free to provide feedback!" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: ../../source/ref-changelog.md:800 +msgid "v1.2.0 (2023-01-13)" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:806 msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." +" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:810 msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"**Introduce new Flower Baseline: FedAvg MNIST** " +"([#1497](https://github.com/adap/flower/pull/1497), " +"[#1552](https://github.com/adap/flower/pull/1552))" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:812 msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"Over the coming weeks, we will be releasing a number of new reference " +"implementations useful especially to FL newcomers. They will typically " +"revisit well known papers from the literature, and be suitable for " +"integration in your own application or for experimentation, in order to " +"deepen your knowledge of FL in general. Today's release is the first in " +"this series. [Read more.](https://flower.ai/blog/2023-01-12-fl-starter-" +"pack-fedavg-mnist-cnn/)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:2 -msgid "Strategy" +#: ../../source/ref-changelog.md:814 +msgid "" +"**Improve GPU support in simulations** " +"([#1555](https://github.com/adap/flower/pull/1555))" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:816 msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +"The Ray-based Virtual Client Engine (`start_simulation`) has been updated" +" to improve GPU support. The update includes some of the hard-earned " +"lessons from scaling simulations in GPU cluster environments. New " +"defaults make running GPU-based simulations substantially more robust." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of -msgid "Aggregate evaluation results." +#: ../../source/ref-changelog.md:818 +msgid "" +"**Improve GPU support in Jupyter Notebook tutorials** " +"([#1527](https://github.com/adap/flower/pull/1527), " +"[#1558](https://github.com/adap/flower/pull/1558))" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:820 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"Some users reported that Jupyter Notebooks have not always been easy to " +"use on GPU instances. We listened and made improvements to all of our " +"Jupyter notebooks! Check out the updated notebooks here:" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:1 of -msgid "Aggregate training results." +#: ../../source/ref-changelog.md:822 +msgid "" +"[An Introduction to Federated Learning](https://flower.ai/docs/framework" +"/tutorial-get-started-with-flower-pytorch.html)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:823 msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"[Strategies in Federated Learning](https://flower.ai/docs/framework" +"/tutorial-use-a-federated-learning-strategy-pytorch.html)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:824 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"[Building a Strategy](https://flower.ai/docs/framework/tutorial-build-a" +"-strategy-from-scratch-pytorch.html)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:825 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"[Client and NumPyClient](https://flower.ai/docs/framework/tutorial-" +"customize-the-client-pytorch.html)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.evaluate:1 of -msgid "Evaluate the current model parameters." +#: ../../source/ref-changelog.md:827 +msgid "" +"**Introduce optional telemetry** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:829 msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"After a [request for " +"feedback](https://github.com/adap/flower/issues/1534) from the community," +" the Flower open-source project introduces optional collection of " +"*anonymous* usage metrics to make well-informed decisions to improve " +"Flower. Doing this enables the Flower team to understand how Flower is " +"used and what challenges users might face." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.initialize_parameters:1 of -msgid "Initialize the (global) model parameters." +#: ../../source/ref-changelog.md:831 +msgid "" +"**Flower is a friendly framework for collaborative AI and data science.**" +" Staying true to this statement, Flower makes it easy to disable " +"telemetry for users who do not want to share anonymous usage metrics. " +"[Read more.](https://flower.ai/docs/telemetry.html)." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:5 of +#: ../../source/ref-changelog.md:833 msgid "" -"Successful updates from the previously selected and configured clients. " -"Each pair of `(ClientProxy, FitRes` constitutes a successful update from " -"one of the previously selected clients. Not that not all previously " -"selected clients are necessarily included in this list: a client might " -"drop out and not submit a result. For each client that did not submit an " -"update, there should be an `Exception` in `failures`." +"**Introduce (experimental) Driver API** " +"([#1520](https://github.com/adap/flower/pull/1520), " +"[#1525](https://github.com/adap/flower/pull/1525), " +"[#1545](https://github.com/adap/flower/pull/1545), " +"[#1546](https://github.com/adap/flower/pull/1546), " +"[#1550](https://github.com/adap/flower/pull/1550), " +"[#1551](https://github.com/adap/flower/pull/1551), " +"[#1567](https://github.com/adap/flower/pull/1567))" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:13 -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:13 of -msgid "Exceptions that occurred while the server was waiting for client updates." +#: ../../source/ref-changelog.md:835 +msgid "" +"Flower now has a new (experimental) Driver API which will enable fully " +"programmable, async, and multi-tenant Federated Learning and Federated " +"Analytics applications. Phew, that's a lot! Going forward, the Driver API" +" will be the abstraction that many upcoming features will be built on - " +"and you can start building those things now, too." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:16 of +#: ../../source/ref-changelog.md:837 msgid "" -"**aggregation_result** -- The aggregated evaluation result. Aggregation " -"typically uses some variant of a weighted average." +"The Driver API also enables a new execution mode in which the server runs" +" indefinitely. Multiple individual workloads can run concurrently and " +"start and stop their execution independent of the server. This is " +"especially useful for users who want to deploy Flower in production." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:5 of +#: ../../source/ref-changelog.md:839 msgid "" -"Successful updates from the previously selected and configured clients. " -"Each pair of `(ClientProxy, FitRes)` constitutes a successful update from" -" one of the previously selected clients. Not that not all previously " -"selected clients are necessarily included in this list: a client might " -"drop out and not submit a result. For each client that did not submit an " -"update, there should be an `Exception` in `failures`." +"To learn more, check out the `mt-pytorch` code example. We look forward " +"to you feedback!" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:17 of +#: ../../source/ref-changelog.md:841 msgid "" -"**parameters** -- If parameters are returned, then the server will treat " -"these as the new global model parameters (i.e., it will replace the " -"previous parameters with the ones returned from this method). If `None` " -"is returned (e.g., because there were only failures and no viable " -"results) then the server will no update the previous model parameters, " -"the updates received in this round are discarded, and the global model " -"parameters remain the same." +"Please note: *The Driver API is still experimental and will likely change" +" significantly over time.*" msgstr "" -#: flwr.server.strategy.strategy.Strategy.evaluate:3 of +#: ../../source/ref-changelog.md:843 msgid "" -"This function can be used to perform centralized (i.e., server-side) " -"evaluation of model parameters." +"**Add new Federated Analytics with Pandas example** " +"([#1469](https://github.com/adap/flower/pull/1469), " +"[#1535](https://github.com/adap/flower/pull/1535))" msgstr "" -#: flwr.server.strategy.strategy.Strategy.evaluate:11 of +#: ../../source/ref-changelog.md:845 msgid "" -"**evaluation_result** -- The evaluation result, usually a Tuple " -"containing loss and a dictionary containing task-specific metrics (e.g., " -"accuracy)." +"A new code example (`quickstart-pandas`) demonstrates federated analytics" +" with Pandas and Flower. You can find it here: [quickstart-" +"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" +"pandas)." msgstr "" -#: flwr.server.strategy.strategy.Strategy.initialize_parameters:6 of +#: ../../source/ref-changelog.md:847 msgid "" -"**parameters** -- If parameters are returned, then the server will treat " -"these as the initial global model parameters." +"**Add new strategies: Krum and MultiKrum** " +"([#1481](https://github.com/adap/flower/pull/1481))" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:2 -msgid "workflow" +#: ../../source/ref-changelog.md:849 +msgid "" +"Edoardo, a computer science student at the Sapienza University of Rome, " +"contributed a new `Krum` strategy that enables users to easily use Krum " +"and MultiKrum in their workloads." msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: ../../source/ref-changelog.md:851 msgid "" -":py:obj:`DefaultWorkflow `\\ " -"\\(\\[fit\\_workflow\\, ...\\]\\)" +"**Update C++ example to be compatible with Flower v1.2.0** " +"([#1495](https://github.com/adap/flower/pull/1495))" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 -#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 of -msgid "Default workflow in Flower." +#: ../../source/ref-changelog.md:853 +msgid "" +"The C++ code example has received a substantial update to make it " +"compatible with the latest version of Flower." msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: ../../source/ref-changelog.md:855 msgid "" -":py:obj:`SecAggPlusWorkflow `\\ " -"\\(num\\_shares\\, ...\\[\\, ...\\]\\)" +"**General improvements** " +"([#1491](https://github.com/adap/flower/pull/1491), " +"[#1504](https://github.com/adap/flower/pull/1504), " +"[#1506](https://github.com/adap/flower/pull/1506), " +"[#1514](https://github.com/adap/flower/pull/1514), " +"[#1522](https://github.com/adap/flower/pull/1522), " +"[#1523](https://github.com/adap/flower/pull/1523), " +"[#1526](https://github.com/adap/flower/pull/1526), " +"[#1528](https://github.com/adap/flower/pull/1528), " +"[#1547](https://github.com/adap/flower/pull/1547), " +"[#1549](https://github.com/adap/flower/pull/1549), " +"[#1560](https://github.com/adap/flower/pull/1560), " +"[#1564](https://github.com/adap/flower/pull/1564), " +"[#1566](https://github.com/adap/flower/pull/1566))" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 -#: of -msgid "The workflow for the SecAgg+ protocol." +#: ../../source/ref-changelog.md:859 +msgid "" +"**Updated documentation** " +"([#1494](https://github.com/adap/flower/pull/1494), " +"[#1496](https://github.com/adap/flower/pull/1496), " +"[#1500](https://github.com/adap/flower/pull/1500), " +"[#1503](https://github.com/adap/flower/pull/1503), " +"[#1505](https://github.com/adap/flower/pull/1505), " +"[#1524](https://github.com/adap/flower/pull/1524), " +"[#1518](https://github.com/adap/flower/pull/1518), " +"[#1519](https://github.com/adap/flower/pull/1519), " +"[#1515](https://github.com/adap/flower/pull/1515))" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: ../../source/ref-changelog.md:863 msgid "" -":py:obj:`SecAggWorkflow `\\ " -"\\(reconstruction\\_threshold\\, \\*\\)" +"One highlight is the new [first time contributor " +"guide](https://flower.ai/docs/first-time-contributors.html): if you've " +"never contributed on GitHub before, this is the perfect place to start!" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of -msgid "The workflow for the SecAgg protocol." +#: ../../source/ref-changelog.md:869 +msgid "v1.1.0 (2022-10-31)" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:2 -msgid "DefaultWorkflow" +#: ../../source/ref-changelog.md:873 +msgid "" +"We would like to give our **special thanks** to all the contributors who " +"made the new version of Flower possible (in `git shortlog` order):" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:2 -msgid "SecAggPlusWorkflow" +#: ../../source/ref-changelog.md:875 +msgid "" +"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " +"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " +"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " +"`danielnugraha`, `edogab33`" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:3 -#: of +#: ../../source/ref-changelog.md:879 msgid "" -"The SecAgg+ protocol ensures the secure summation of integer vectors " -"owned by multiple parties, without accessing any individual integer " -"vector. This workflow allows the server to compute the weighted average " -"of model parameters across all clients, ensuring individual contributions" -" remain private. This is achieved by clients sending both, a weighting " -"factor and a weighted version of the locally updated parameters, both of " -"which are masked for privacy. Specifically, each client uploads \"[w, w *" -" params]\" with masks, where weighting factor 'w' is the number of " -"examples ('num_examples') and 'params' represents the model parameters " -"('parameters') from the client's `FitRes`. The server then aggregates " -"these contributions to compute the weighted average of model parameters." +"**Introduce Differential Privacy wrappers (preview)** " +"([#1357](https://github.com/adap/flower/pull/1357), " +"[#1460](https://github.com/adap/flower/pull/1460))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:14 -#: of +#: ../../source/ref-changelog.md:881 +msgid "" +"The first (experimental) preview of pluggable Differential Privacy " +"wrappers enables easy configuration and usage of differential privacy " +"(DP). The pluggable DP wrappers enable framework-agnostic **and** " +"strategy-agnostic usage of both client-side DP and server-side DP. Head " +"over to the Flower docs, a new explainer goes into more detail." +msgstr "" + +#: ../../source/ref-changelog.md:883 msgid "" -"The protocol involves four main stages: - 'setup': Send SecAgg+ " -"configuration to clients and collect their public keys. - 'share keys': " -"Broadcast public keys among clients and collect encrypted secret" +"**New iOS CoreML code example** " +"([#1289](https://github.com/adap/flower/pull/1289))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:17 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:17 -#: of -msgid "key shares." +#: ../../source/ref-changelog.md:885 +msgid "" +"Flower goes iOS! A massive new code example shows how Flower clients can " +"be built for iOS. The code example contains both Flower iOS SDK " +"components that can be used for many tasks, and one task example running " +"on CoreML." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:18 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:18 -#: of +#: ../../source/ref-changelog.md:887 msgid "" -"'collect masked vectors': Forward encrypted secret key shares to target " -"clients and collect masked model parameters." +"**New FedMedian strategy** " +"([#1461](https://github.com/adap/flower/pull/1461))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:20 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:20 -#: of +#: ../../source/ref-changelog.md:889 msgid "" -"'unmask': Collect secret key shares to decrypt and aggregate the model " -"parameters." +"The new `FedMedian` strategy implements Federated Median (FedMedian) by " +"[Yin et al., 2018](https://arxiv.org/pdf/1803.01498v1.pdf)." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:22 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:22 -#: of +#: ../../source/ref-changelog.md:891 msgid "" -"Only the aggregated model parameters are exposed and passed to " -"`Strategy.aggregate_fit`, ensuring individual data privacy." +"**Log** `Client` **exceptions in Virtual Client Engine** " +"([#1493](https://github.com/adap/flower/pull/1493))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:25 -#: of +#: ../../source/ref-changelog.md:893 msgid "" -"The number of shares into which each client's private key is split under " -"the SecAgg+ protocol. If specified as a float, it represents the " -"proportion of all selected clients, and the number of shares will be set " -"dynamically in the run time. A private key can be reconstructed from " -"these shares, allowing for the secure aggregation of model updates. Each " -"client sends one share to each of its neighbors while retaining one." +"All `Client` exceptions happening in the VCE are now logged by default " +"and not just exposed to the configured `Strategy` (via the `failures` " +"argument)." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:25 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:32 -#: of +#: ../../source/ref-changelog.md:895 msgid "" -"The minimum number of shares required to reconstruct a client's private " -"key, or, if specified as a float, it represents the proportion of the " -"total number of shares needed for reconstruction. This threshold ensures " -"privacy by allowing for the recovery of contributions from dropped " -"clients during aggregation, without compromising individual client data." +"**Improve Virtual Client Engine internals** " +"([#1401](https://github.com/adap/flower/pull/1401), " +"[#1453](https://github.com/adap/flower/pull/1453))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:31 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:38 -#: of +#: ../../source/ref-changelog.md:897 msgid "" -"The maximum value of the weight that can be assigned to any single " -"client's update during the weighted average calculation on the server " -"side, e.g., in the FedAvg algorithm." +"Some internals of the Virtual Client Engine have been revamped. The VCE " +"now uses Ray 2.0 under the hood, the value type of the `client_resources`" +" dictionary changed to `float` to allow fractions of resources to be " +"allocated." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:35 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:42 -#: of +#: ../../source/ref-changelog.md:899 msgid "" -"The range within which model parameters are clipped before quantization. " -"This parameter ensures each model parameter is bounded within " -"[-clipping_range, clipping_range], facilitating quantization." +"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " +"Client Engine**" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:39 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:46 -#: of +#: ../../source/ref-changelog.md:901 msgid "" -"The size of the range into which floating-point model parameters are " -"quantized, mapping each parameter to an integer in [0, " -"quantization_range-1]. This facilitates cryptographic operations on the " -"model updates." +"The Virtual Client Engine now has full support for optional `Client` (and" +" `NumPyClient`) methods." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:43 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:50 -#: of +#: ../../source/ref-changelog.md:903 msgid "" -"The range of values from which random mask entries are uniformly sampled " -"([0, modulus_range-1]). `modulus_range` must be less than 4294967296. " -"Please use 2**n values for `modulus_range` to prevent overflow issues." +"**Provide type information to packages using** `flwr` " +"([#1377](https://github.com/adap/flower/pull/1377))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:47 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:54 -#: of +#: ../../source/ref-changelog.md:905 msgid "" -"The timeout duration in seconds. If specified, the workflow will wait for" -" replies for this duration each time. If `None`, there is no time limit " -"and the workflow will wait until replies for all messages are received." +"The package `flwr` is now bundled with a `py.typed` file indicating that " +"the package is typed. This enables typing support for projects or " +"packages that use `flwr` by enabling them to improve their code using " +"static type checkers like `mypy`." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:61 -#: of +#: ../../source/ref-changelog.md:907 msgid "" -"Generally, higher `num_shares` means more robust to dropouts while " -"increasing the computational costs; higher `reconstruction_threshold` " -"means better privacy guarantees but less tolerance to dropouts." +"**Updated code example** " +"([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:58 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:64 -#: of -msgid "Too large `max_weight` may compromise the precision of the quantization." +#: ../../source/ref-changelog.md:909 +msgid "" +"The code examples covering scikit-learn and PyTorch Lightning have been " +"updated to work with the latest version of Flower." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:59 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:65 -#: of -msgid "`modulus_range` must be 2**n and larger than `quantization_range`." +#: ../../source/ref-changelog.md:911 +msgid "" +"**Updated documentation** " +"([#1355](https://github.com/adap/flower/pull/1355), " +"[#1558](https://github.com/adap/flower/pull/1558), " +"[#1379](https://github.com/adap/flower/pull/1379), " +"[#1380](https://github.com/adap/flower/pull/1380), " +"[#1381](https://github.com/adap/flower/pull/1381), " +"[#1332](https://github.com/adap/flower/pull/1332), " +"[#1391](https://github.com/adap/flower/pull/1391), " +"[#1403](https://github.com/adap/flower/pull/1403), " +"[#1364](https://github.com/adap/flower/pull/1364), " +"[#1409](https://github.com/adap/flower/pull/1409), " +"[#1419](https://github.com/adap/flower/pull/1419), " +"[#1444](https://github.com/adap/flower/pull/1444), " +"[#1448](https://github.com/adap/flower/pull/1448), " +"[#1417](https://github.com/adap/flower/pull/1417), " +"[#1449](https://github.com/adap/flower/pull/1449), " +"[#1465](https://github.com/adap/flower/pull/1465), " +"[#1467](https://github.com/adap/flower/pull/1467))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:66 -#: of +#: ../../source/ref-changelog.md:913 msgid "" -"When `num_shares` is a float, it is interpreted as the proportion of all " -"selected clients, and hence the number of shares will be determined in " -"the runtime. This allows for dynamic adjustment based on the total number" -" of participating clients." +"There have been so many documentation updates that it doesn't even make " +"sense to list them individually." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:69 -#: of +#: ../../source/ref-changelog.md:915 msgid "" -"Similarly, when `reconstruction_threshold` is a float, it is interpreted " -"as the proportion of the number of shares needed for the reconstruction " -"of a private key. This feature enables flexibility in setting the " -"security threshold relative to the number of distributed shares." +"**Restructured documentation** " +"([#1387](https://github.com/adap/flower/pull/1387))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:73 -#: of +#: ../../source/ref-changelog.md:917 msgid "" -"`num_shares`, `reconstruction_threshold`, and the quantization parameters" -" (`clipping_range`, `quantization_range`, `modulus_range`) play critical " -"roles in balancing privacy, robustness, and efficiency within the SecAgg+" -" protocol." +"The documentation has been restructured to make it easier to navigate. " +"This is just the first step in a larger effort to make the Flower " +"documentation the best documentation of any project ever. Stay tuned!" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of +#: ../../source/ref-changelog.md:919 msgid "" -":py:obj:`collect_masked_vectors_stage " -"`\\" -" \\(driver\\, ...\\)" +"**Open in Colab button** " +"([#1389](https://github.com/adap/flower/pull/1389))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of -msgid "Execute the 'collect masked vectors' stage." +#: ../../source/ref-changelog.md:921 +msgid "" +"The four parts of the Flower Federated Learning Tutorial now come with a " +"new `Open in Colab` button. No need to install anything on your local " +"machine, you can now use and learn about Flower in your browser, it's " +"only a single click away." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of +#: ../../source/ref-changelog.md:923 msgid "" -":py:obj:`setup_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +"**Improved tutorial** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.setup_stage:1 -#: of -msgid "Execute the 'setup' stage." +#: ../../source/ref-changelog.md:925 +msgid "" +"The Flower Federated Learning Tutorial has two brand-new parts covering " +"custom strategies (still WIP) and the distinction between `Client` and " +"`NumPyClient`. The existing parts one and two have also been improved " +"(many small changes and fixes)." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of -msgid "" -":py:obj:`share_keys_stage " -"`\\ " -"\\(driver\\, context\\, state\\)" +#: ../../source/ref-changelog.md:931 +msgid "v1.0.0 (2022-07-28)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.share_keys_stage:1 -#: of -msgid "Execute the 'share keys' stage." +#: ../../source/ref-changelog.md:933 +msgid "Highlights" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of -msgid "" -":py:obj:`unmask_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +#: ../../source/ref-changelog.md:935 +msgid "Stable **Virtual Client Engine** (accessible via `start_simulation`)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.unmask_stage:1 -#: of -msgid "Execute the 'unmask' stage." +#: ../../source/ref-changelog.md:936 +msgid "All `Client`/`NumPyClient` methods are now optional" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:2 -msgid "SecAggWorkflow" +#: ../../source/ref-changelog.md:937 +msgid "Configurable `get_parameters`" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of +#: ../../source/ref-changelog.md:938 msgid "" -"Bases: " -":py:class:`~flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow`" +"Tons of small API cleanups resulting in a more coherent developer " +"experience" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:3 of +#: ../../source/ref-changelog.md:942 msgid "" -"The SecAgg protocol ensures the secure summation of integer vectors owned" -" by multiple parties, without accessing any individual integer vector. " -"This workflow allows the server to compute the weighted average of model " -"parameters across all clients, ensuring individual contributions remain " -"private. This is achieved by clients sending both, a weighting factor and" -" a weighted version of the locally updated parameters, both of which are " -"masked for privacy. Specifically, each client uploads \"[w, w * params]\"" -" with masks, where weighting factor 'w' is the number of examples " -"('num_examples') and 'params' represents the model parameters " -"('parameters') from the client's `FitRes`. The server then aggregates " -"these contributions to compute the weighted average of model parameters." +"We would like to give our **special thanks** to all the contributors who " +"made Flower 1.0 possible (in reverse [GitHub " +"Contributors](https://github.com/adap/flower/graphs/contributors) order):" +msgstr "" + +#: ../../source/ref-changelog.md:944 +msgid "" +"[@rtaiello](https://github.com/rtaiello), " +"[@g-pichler](https://github.com/g-pichler), [@rob-" +"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" +"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " +"[@nfnt](https://github.com/nfnt), " +"[@tatiana-s](https://github.com/tatiana-s), " +"[@TParcollet](https://github.com/TParcollet), " +"[@vballoli](https://github.com/vballoli), " +"[@negedng](https://github.com/negedng), " +"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " +"[@hei411](https://github.com/hei411), " +"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " +"[@AmitChaulwar](https://github.com/AmitChaulwar), " +"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" +"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " +"[@lbhm](https://github.com/lbhm), " +"[@sishtiaq](https://github.com/sishtiaq), " +"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" +"/Jueun-Park), [@architjen](https://github.com/architjen), " +"[@PratikGarai](https://github.com/PratikGarai), " +"[@mrinaald](https://github.com/mrinaald), " +"[@zliel](https://github.com/zliel), " +"[@MeiruiJiang](https://github.com/MeiruiJiang), " +"[@sancarlim](https://github.com/sancarlim), " +"[@gubertoli](https://github.com/gubertoli), " +"[@Vingt100](https://github.com/Vingt100), " +"[@MakGulati](https://github.com/MakGulati), " +"[@cozek](https://github.com/cozek), " +"[@jafermarq](https://github.com/jafermarq), " +"[@sisco0](https://github.com/sisco0), " +"[@akhilmathurs](https://github.com/akhilmathurs), " +"[@CanTuerk](https://github.com/CanTuerk), " +"[@mariaboerner1987](https://github.com/mariaboerner1987), " +"[@pedropgusmao](https://github.com/pedropgusmao), " +"[@tanertopal](https://github.com/tanertopal), " +"[@danieljanes](https://github.com/danieljanes)." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:14 of +#: ../../source/ref-changelog.md:948 msgid "" -"The protocol involves four main stages: - 'setup': Send SecAgg " -"configuration to clients and collect their public keys. - 'share keys': " -"Broadcast public keys among clients and collect encrypted secret" +"**All arguments must be passed as keyword arguments** " +"([#1338](https://github.com/adap/flower/pull/1338))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:54 of +#: ../../source/ref-changelog.md:950 msgid "" -"Each client's private key is split into N shares under the SecAgg " -"protocol, where N is the number of selected clients." +"Pass all arguments as keyword arguments, positional arguments are not " +"longer supported. Code that uses positional arguments (e.g., " +"`start_client(\"127.0.0.1:8080\", FlowerClient())`) must add the keyword " +"for each positional argument (e.g., " +"`start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())`)." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:56 of +#: ../../source/ref-changelog.md:952 msgid "" -"Generally, higher `reconstruction_threshold` means better privacy " -"guarantees but less tolerance to dropouts." +"**Introduce configuration object** `ServerConfig` **in** `start_server` " +"**and** `start_simulation` " +"([#1317](https://github.com/adap/flower/pull/1317))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:60 of +#: ../../source/ref-changelog.md:954 msgid "" -"When `reconstruction_threshold` is a float, it is interpreted as the " -"proportion of the number of all selected clients needed for the " -"reconstruction of a private key. This feature enables flexibility in " -"setting the security threshold relative to the number of selected " -"clients." +"Instead of a config dictionary `{\"num_rounds\": 3, \"round_timeout\": " +"600.0}`, `start_server` and `start_simulation` now expect a configuration" +" object of type `flwr.server.ServerConfig`. `ServerConfig` takes the same" +" arguments that as the previous config dict, but it makes writing type-" +"safe code easier and the default parameters values more transparent." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:64 of +#: ../../source/ref-changelog.md:956 msgid "" -"`reconstruction_threshold`, and the quantization parameters " -"(`clipping_range`, `quantization_range`, `modulus_range`) play critical " -"roles in balancing privacy, robustness, and efficiency within the SecAgg " -"protocol." +"**Rename built-in strategy parameters for clarity** " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of +#: ../../source/ref-changelog.md:958 msgid "" -":py:obj:`collect_masked_vectors_stage " -"`\\ " -"\\(driver\\, ...\\)" +"The following built-in strategy parameters were renamed to improve " +"readability and consistency with other API's:" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of -msgid "" -":py:obj:`setup_stage `\\" -" \\(driver\\, context\\, state\\)" +#: ../../source/ref-changelog.md:960 +msgid "`fraction_eval` --> `fraction_evaluate`" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of -msgid "" -":py:obj:`share_keys_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +#: ../../source/ref-changelog.md:961 +msgid "`min_eval_clients` --> `min_evaluate_clients`" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of -msgid "" -":py:obj:`unmask_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +#: ../../source/ref-changelog.md:962 +msgid "`eval_fn` --> `evaluate_fn`" msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:2 -msgid "simulation" +#: ../../source/ref-changelog.md:964 +msgid "" +"**Update default arguments of built-in strategies** " +"([#1278](https://github.com/adap/flower/pull/1278))" msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: ../../source/ref-changelog.md:966 msgid "" -":py:obj:`start_simulation `\\ \\(\\*\\," -" client\\_fn\\[\\, ...\\]\\)" +"All built-in strategies now use `fraction_fit=1.0` and " +"`fraction_evaluate=1.0`, which means they select *all* currently " +"available clients for training and evaluation. Projects that relied on " +"the previous default values can get the previous behaviour by " +"initializing the strategy in the following way:" msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:18::1 -#: flwr.simulation.app.start_simulation:1 of -msgid "Start a Ray-based Flower simulation server." +#: ../../source/ref-changelog.md:968 +msgid "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: ../../source/ref-changelog.md:970 msgid "" -":py:obj:`run_simulation `\\ " -"\\(server\\_app\\, client\\_app\\, ...\\)" +"**Add** `server_round` **to** `Strategy.evaluate` " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:18::1 -#: flwr.simulation.run_simulation.run_simulation:1 of -msgid "Run a Flower App using the Simulation Engine." +#: ../../source/ref-changelog.md:972 +msgid "" +"The `Strategy` method `evaluate` now receives the current round of " +"federated learning/evaluation as the first parameter." msgstr "" -#: ../../source/ref-api/flwr.simulation.run_simulation.rst:2 -msgid "run\\_simulation" +#: ../../source/ref-changelog.md:974 +msgid "" +"**Add** `server_round` **and** `config` **parameters to** `evaluate_fn` " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -#: flwr.simulation.run_simulation.run_simulation:3 of +#: ../../source/ref-changelog.md:976 msgid "" -"The `ServerApp` to be executed. It will send messages to different " -"`ClientApp` instances running on different (virtual) SuperNodes." +"The `evaluate_fn` passed to built-in strategies like `FedAvg` now takes " +"three parameters: (1) The current round of federated learning/evaluation " +"(`server_round`), (2) the model parameters to evaluate (`parameters`), " +"and (3) a config dictionary (`config`)." msgstr "" -#: flwr.simulation.run_simulation.run_simulation:6 of +#: ../../source/ref-changelog.md:978 msgid "" -"The `ClientApp` to be executed by each of the SuperNodes. It will receive" -" messages sent by the `ServerApp`." +"**Rename** `rnd` **to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" msgstr "" -#: flwr.simulation.run_simulation.run_simulation:9 of +#: ../../source/ref-changelog.md:980 msgid "" -"Number of nodes that run a ClientApp. They can be sampled by a Driver in " -"the ServerApp and receive a Message describing what the ClientApp should " -"perform." +"Several Flower methods and functions (`evaluate_fn`, `configure_fit`, " +"`aggregate_fit`, `configure_evaluate`, `aggregate_evaluate`) receive the " +"current round of federated learning/evaluation as their first parameter. " +"To improve reaability and avoid confusion with *random*, this parameter " +"has been renamed from `rnd` to `server_round`." msgstr "" -#: flwr.simulation.run_simulation.run_simulation:13 of -msgid "A simulation backend that runs `ClientApp`s." +#: ../../source/ref-changelog.md:982 +msgid "" +"**Move** `flwr.dataset` **to** `flwr_baselines` " +"([#1273](https://github.com/adap/flower/pull/1273))" msgstr "" -#: flwr.simulation.run_simulation.run_simulation:15 of -msgid "" -"'A dictionary, e.g {\"\": , \"\": } to " -"configure a backend. Values supported in are those included by " -"`flwr.common.typing.ConfigsRecordValues`." +#: ../../source/ref-changelog.md:984 +msgid "The experimental package `flwr.dataset` was migrated to Flower Baselines." msgstr "" -#: flwr.simulation.run_simulation.run_simulation:19 of +#: ../../source/ref-changelog.md:986 msgid "" -"A boolean to indicate whether to enable GPU growth on the main thread. " -"This is desirable if you make use of a TensorFlow model on your " -"`ServerApp` while having your `ClientApp` running on the same GPU. " -"Without enabling this, you might encounter an out-of-memory error because" -" TensorFlow, by default, allocates all GPU memory. Read more about how " -"`tf.config.experimental.set_memory_growth()` works in the TensorFlow " -"documentation: https://www.tensorflow.org/api/stable." +"**Remove experimental strategies** " +"([#1280](https://github.com/adap/flower/pull/1280))" msgstr "" -#: flwr.simulation.run_simulation.run_simulation:26 of +#: ../../source/ref-changelog.md:988 msgid "" -"When diabled, only INFO, WARNING and ERROR log messages will be shown. If" -" enabled, DEBUG-level logs will be displayed." +"Remove unmaintained experimental strategies (`FastAndSlow`, `FedFSv0`, " +"`FedFSv1`)." msgstr "" -#: ../../source/ref-api/flwr.simulation.start_simulation.rst:2 -msgid "start\\_simulation" +#: ../../source/ref-changelog.md:990 +msgid "" +"**Rename** `Weights` **to** `NDArrays` " +"([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -#: flwr.simulation.app.start_simulation:3 of +#: ../../source/ref-changelog.md:992 msgid "" -"A function creating client instances. The function must take a single " -"`str` argument called `cid`. It should return a single client instance of" -" type Client. Note that the created client instances are ephemeral and " -"will often be destroyed after a single method invocation. Since client " -"instances are not long-lived, they should not attempt to carry state over" -" method invocations. Any state required by the instance (model, dataset, " -"hyperparameters, ...) should be (re-)created in either the call to " -"`client_fn` or the call to any of the client methods (e.g., load " -"evaluation data in the `evaluate` method itself)." +"`flwr.common.Weights` was renamed to `flwr.common.NDArrays` to better " +"capture what this type is all about." msgstr "" -#: flwr.simulation.app.start_simulation:13 of +#: ../../source/ref-changelog.md:994 msgid "" -"The total number of clients in this simulation. This must be set if " -"`clients_ids` is not set and vice-versa." +"**Remove antiquated** `force_final_distributed_eval` **from** " +"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -#: flwr.simulation.app.start_simulation:16 of +#: ../../source/ref-changelog.md:996 msgid "" -"List `client_id`s for each client. This is only required if `num_clients`" -" is not set. Setting both `num_clients` and `clients_ids` with " -"`len(clients_ids)` not equal to `num_clients` generates an error." +"The `start_server` parameter `force_final_distributed_eval` has long been" +" a historic artefact, in this release it is finally gone for good." msgstr "" -#: flwr.simulation.app.start_simulation:20 of +#: ../../source/ref-changelog.md:998 msgid "" -"CPU and GPU resources for a single client. Supported keys are `num_cpus` " -"and `num_gpus`. To understand the GPU utilization caused by `num_gpus`, " -"as well as using custom resources, please consult the Ray documentation." +"**Make** `get_parameters` **configurable** " +"([#1242](https://github.com/adap/flower/pull/1242))" msgstr "" -#: flwr.simulation.app.start_simulation:25 of +#: ../../source/ref-changelog.md:1000 msgid "" -"An implementation of the abstract base class `flwr.server.Server`. If no " -"instance is provided, then `start_server` will create one." +"The `get_parameters` method now accepts a configuration dictionary, just " +"like `get_properties`, `fit`, and `evaluate`." msgstr "" -#: flwr.simulation.app.start_simulation:31 of +#: ../../source/ref-changelog.md:1002 msgid "" -"An implementation of the abstract base class `flwr.server.Strategy`. If " -"no strategy is provided, then `start_server` will use " -"`flwr.server.strategy.FedAvg`." +"**Replace** `num_rounds` **in** `start_simulation` **with new** `config` " +"**parameter** ([#1281](https://github.com/adap/flower/pull/1281))" msgstr "" -#: flwr.simulation.app.start_simulation:35 of +#: ../../source/ref-changelog.md:1004 msgid "" -"An implementation of the abstract base class `flwr.server.ClientManager`." -" If no implementation is provided, then `start_simulation` will use " -"`flwr.server.client_manager.SimpleClientManager`." +"The `start_simulation` function now accepts a configuration dictionary " +"`config` instead of the `num_rounds` integer. This improves the " +"consistency between `start_simulation` and `start_server` and makes " +"transitioning between the two easier." msgstr "" -#: flwr.simulation.app.start_simulation:39 of +#: ../../source/ref-changelog.md:1008 msgid "" -"Optional dictionary containing arguments for the call to `ray.init`. If " -"ray_init_args is None (the default), Ray will be initialized with the " -"following default args: { \"ignore_reinit_error\": True, " -"\"include_dashboard\": False } An empty dictionary can be used " -"(ray_init_args={}) to prevent any arguments from being passed to " -"ray.init." +"**Support Python 3.10** " +"([#1320](https://github.com/adap/flower/pull/1320))" msgstr "" -#: flwr.simulation.app.start_simulation:39 of +#: ../../source/ref-changelog.md:1010 msgid "" -"Optional dictionary containing arguments for the call to `ray.init`. If " -"ray_init_args is None (the default), Ray will be initialized with the " -"following default args:" +"The previous Flower release introduced experimental support for Python " +"3.10, this release declares Python 3.10 support as stable." msgstr "" -#: flwr.simulation.app.start_simulation:43 of -msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" +#: ../../source/ref-changelog.md:1012 +msgid "" +"**Make all** `Client` **and** `NumPyClient` **methods optional** " +"([#1260](https://github.com/adap/flower/pull/1260), " +"[#1277](https://github.com/adap/flower/pull/1277))" msgstr "" -#: flwr.simulation.app.start_simulation:45 of +#: ../../source/ref-changelog.md:1014 msgid "" -"An empty dictionary can be used (ray_init_args={}) to prevent any " -"arguments from being passed to ray.init." +"The `Client`/`NumPyClient` methods `get_properties`, `get_parameters`, " +"`fit`, and `evaluate` are all optional. This enables writing clients that" +" implement, for example, only `fit`, but no other method. No need to " +"implement `evaluate` when using centralized evaluation!" msgstr "" -#: flwr.simulation.app.start_simulation:48 of +#: ../../source/ref-changelog.md:1016 msgid "" -"Set to True to prevent `ray.shutdown()` in case " -"`ray.is_initialized()=True`." +"**Enable passing a** `Server` **instance to** `start_simulation` " +"([#1281](https://github.com/adap/flower/pull/1281))" msgstr "" -#: flwr.simulation.app.start_simulation:50 of +#: ../../source/ref-changelog.md:1018 msgid "" -"Optionally specify the type of actor to use. The actor object, which " -"persists throughout the simulation, will be the process in charge of " -"executing a ClientApp wrapping input argument `client_fn`." +"Similar to `start_server`, `start_simulation` now accepts a full `Server`" +" instance. This enables users to heavily customize the execution of " +"eperiments and opens the door to running, for example, async FL using the" +" Virtual Client Engine." msgstr "" -#: flwr.simulation.app.start_simulation:54 of +#: ../../source/ref-changelog.md:1020 msgid "" -"If you want to create your own Actor classes, you might need to pass some" -" input argument. You can use this dictionary for such purpose." +"**Update code examples** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" msgstr "" -#: flwr.simulation.app.start_simulation:57 of +#: ../../source/ref-changelog.md:1022 msgid "" -"(default: \"DEFAULT\") Optional string (\"DEFAULT\" or \"SPREAD\") for " -"the VCE to choose in which node the actor is placed. If you are an " -"advanced user needed more control you can use lower-level scheduling " -"strategies to pin actors to specific compute nodes (e.g. via " -"NodeAffinitySchedulingStrategy). Please note this is an advanced feature." -" For all details, please refer to the Ray documentation: " -"https://docs.ray.io/en/latest/ray-core/scheduling/index.html" +"Many code examples received small or even large maintenance updates, " +"among them are" msgstr "" -#: flwr.simulation.app.start_simulation:66 of -msgid "**hist** -- Object containing metrics from training." +#: ../../source/ref-changelog.md:1024 +msgid "`scikit-learn`" msgstr "" -#: ../../source/ref-changelog.md:1 -msgid "Changelog" +#: ../../source/ref-changelog.md:1025 +msgid "`simulation_pytorch`" msgstr "" -#: ../../source/ref-changelog.md:3 -msgid "Unreleased" +#: ../../source/ref-changelog.md:1026 +msgid "`quickstart_pytorch`" msgstr "" -#: ../../source/ref-changelog.md:5 ../../source/ref-changelog.md:19 -#: ../../source/ref-changelog.md:83 ../../source/ref-changelog.md:176 -#: ../../source/ref-changelog.md:276 ../../source/ref-changelog.md:360 -#: ../../source/ref-changelog.md:424 ../../source/ref-changelog.md:482 -#: ../../source/ref-changelog.md:551 ../../source/ref-changelog.md:680 -#: ../../source/ref-changelog.md:722 ../../source/ref-changelog.md:789 -#: ../../source/ref-changelog.md:855 ../../source/ref-changelog.md:900 -#: ../../source/ref-changelog.md:939 ../../source/ref-changelog.md:972 -#: ../../source/ref-changelog.md:1022 -msgid "What's new?" +#: ../../source/ref-changelog.md:1027 +msgid "`quickstart_simulation`" msgstr "" -#: ../../source/ref-changelog.md:7 ../../source/ref-changelog.md:71 -#: ../../source/ref-changelog.md:146 ../../source/ref-changelog.md:258 -#: ../../source/ref-changelog.md:348 ../../source/ref-changelog.md:412 -#: ../../source/ref-changelog.md:470 ../../source/ref-changelog.md:539 -#: ../../source/ref-changelog.md:601 ../../source/ref-changelog.md:620 -#: ../../source/ref-changelog.md:776 ../../source/ref-changelog.md:847 -#: ../../source/ref-changelog.md:884 ../../source/ref-changelog.md:927 -msgid "Incompatible changes" +#: ../../source/ref-changelog.md:1028 +msgid "`quickstart_tensorflow`" msgstr "" -#: ../../source/ref-changelog.md:9 ../../source/ref-changelog.md:73 -#: ../../source/ref-changelog.md:350 ../../source/ref-changelog.md:414 -#: ../../source/ref-changelog.md:472 ../../source/ref-changelog.md:541 -#: ../../source/ref-changelog.md:603 -msgid "None" +#: ../../source/ref-changelog.md:1029 +msgid "`advanced_tensorflow`" msgstr "" -#: ../../source/ref-changelog.md:11 -msgid "v1.8.0 (2024-04-03)" +#: ../../source/ref-changelog.md:1031 +msgid "" +"**Remove the obsolete simulation example** " +"([#1328](https://github.com/adap/flower/pull/1328))" msgstr "" -#: ../../source/ref-changelog.md:13 ../../source/ref-changelog.md:77 -#: ../../source/ref-changelog.md:170 ../../source/ref-changelog.md:270 -#: ../../source/ref-changelog.md:354 ../../source/ref-changelog.md:418 -#: ../../source/ref-changelog.md:476 ../../source/ref-changelog.md:545 -#: ../../source/ref-changelog.md:614 -msgid "Thanks to our contributors" +#: ../../source/ref-changelog.md:1033 +msgid "" +"Removes the obsolete `simulation` example and renames " +"`quickstart_simulation` to `simulation_tensorflow` so it fits withs the " +"naming of `simulation_pytorch`" msgstr "" -#: ../../source/ref-changelog.md:15 ../../source/ref-changelog.md:79 -#: ../../source/ref-changelog.md:172 ../../source/ref-changelog.md:272 -#: ../../source/ref-changelog.md:356 ../../source/ref-changelog.md:420 -#: ../../source/ref-changelog.md:478 +#: ../../source/ref-changelog.md:1035 msgid "" -"We would like to give our special thanks to all the contributors who made" -" the new version of Flower possible (in `git shortlog` order):" +"**Update documentation** " +"([#1223](https://github.com/adap/flower/pull/1223), " +"[#1209](https://github.com/adap/flower/pull/1209), " +"[#1251](https://github.com/adap/flower/pull/1251), " +"[#1257](https://github.com/adap/flower/pull/1257), " +"[#1267](https://github.com/adap/flower/pull/1267), " +"[#1268](https://github.com/adap/flower/pull/1268), " +"[#1300](https://github.com/adap/flower/pull/1300), " +"[#1304](https://github.com/adap/flower/pull/1304), " +"[#1305](https://github.com/adap/flower/pull/1305), " +"[#1307](https://github.com/adap/flower/pull/1307))" msgstr "" -#: ../../source/ref-changelog.md:17 +#: ../../source/ref-changelog.md:1037 msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata " -"Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, `Ikko Eltociear " -"Ashimine`, `Jack Cook`, `Javier`, `Raj Parekh`, `Robert Steiner`, " -"`Sebastian van der Voort`, `Taner Topal`, `Yan Gao`, `mohammadnaseri`, " -"`tabdar-khan` " +"One substantial documentation update fixes multiple smaller rendering " +"issues, makes titles more succinct to improve navigation, removes a " +"deprecated library, updates documentation dependencies, includes the " +"`flwr.common` module in the API reference, includes support for markdown-" +"based documentation, migrates the changelog from `.rst` to `.md`, and " +"fixes a number of smaller details!" +msgstr "" + +#: ../../source/ref-changelog.md:1039 ../../source/ref-changelog.md:1094 +#: ../../source/ref-changelog.md:1163 ../../source/ref-changelog.md:1202 +msgid "**Minor updates**" msgstr "" -#: ../../source/ref-changelog.md:21 +#: ../../source/ref-changelog.md:1041 msgid "" -"**Introduce Flower Next high-level API (stable)** " -"([#3002](https://github.com/adap/flower/pull/3002), " -"[#2934](https://github.com/adap/flower/pull/2934), " -"[#2958](https://github.com/adap/flower/pull/2958), " -"[#3173](https://github.com/adap/flower/pull/3173), " -"[#3174](https://github.com/adap/flower/pull/3174), " -"[#2923](https://github.com/adap/flower/pull/2923), " -"[#2691](https://github.com/adap/flower/pull/2691), " -"[#3079](https://github.com/adap/flower/pull/3079), " -"[#2961](https://github.com/adap/flower/pull/2961), " -"[#2924](https://github.com/adap/flower/pull/2924), " -"[#3166](https://github.com/adap/flower/pull/3166), " -"[#3031](https://github.com/adap/flower/pull/3031), " -"[#3057](https://github.com/adap/flower/pull/3057), " -"[#3000](https://github.com/adap/flower/pull/3000), " -"[#3113](https://github.com/adap/flower/pull/3113), " -"[#2957](https://github.com/adap/flower/pull/2957), " -"[#3183](https://github.com/adap/flower/pull/3183), " -"[#3180](https://github.com/adap/flower/pull/3180), " -"[#3035](https://github.com/adap/flower/pull/3035), " -"[#3189](https://github.com/adap/flower/pull/3189), " -"[#3185](https://github.com/adap/flower/pull/3185), " -"[#3190](https://github.com/adap/flower/pull/3190), " -"[#3191](https://github.com/adap/flower/pull/3191), " -"[#3195](https://github.com/adap/flower/pull/3195), " -"[#3197](https://github.com/adap/flower/pull/3197))" +"Add round number to fit and evaluate log messages " +"([#1266](https://github.com/adap/flower/pull/1266))" msgstr "" -#: ../../source/ref-changelog.md:23 +#: ../../source/ref-changelog.md:1042 msgid "" -"The Flower Next high-level API is stable! Flower Next is the future of " -"Flower - all new features (like Flower Mods) will be built on top of it. " -"You can start to migrate your existing projects to Flower Next by using " -"`ServerApp` and `ClientApp` (check out `quickstart-pytorch` or " -"`quickstart-tensorflow`, a detailed migration guide will follow shortly)." -" Flower Next allows you to run multiple projects concurrently (we call " -"this multi-run) and execute the same project in either simulation " -"environments or deployment environments without having to change a single" -" line of code. The best part? It's fully compatible with existing Flower " -"projects that use `Strategy`, `NumPyClient` & co." +"Add secure gRPC connection to the `advanced_tensorflow` code example " +"([#847](https://github.com/adap/flower/pull/847))" msgstr "" -#: ../../source/ref-changelog.md:25 +#: ../../source/ref-changelog.md:1043 msgid "" -"**Introduce Flower Next low-level API (preview)** " -"([#3062](https://github.com/adap/flower/pull/3062), " -"[#3034](https://github.com/adap/flower/pull/3034), " -"[#3069](https://github.com/adap/flower/pull/3069))" +"Update developer tooling " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" msgstr "" -#: ../../source/ref-changelog.md:27 +#: ../../source/ref-changelog.md:1044 msgid "" -"In addition to the Flower Next *high-level* API that uses `Strategy`, " -"`NumPyClient` & co, Flower 1.8 also comes with a preview version of the " -"new Flower Next *low-level* API. The low-level API allows for granular " -"control of every aspect of the learning process by sending/receiving " -"individual messages to/from client nodes. The new `ServerApp` supports " -"registering a custom `main` function that allows writing custom training " -"loops for methods like async FL, cyclic training, or federated analytics." -" The new `ClientApp` supports registering `train`, `evaluate` and `query`" -" functions that can access the raw message received from the `ServerApp`." -" New abstractions like `RecordSet`, `Message` and `Context` further " -"enable sending multiple models, multiple sets of config values and " -"metrics, stateful computations on the client node and implementations of " -"custom SMPC protocols, to name just a few." +"Rename ProtoBuf messages to improve consistency " +"([#1214](https://github.com/adap/flower/pull/1214), " +"[#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -#: ../../source/ref-changelog.md:29 -msgid "" -"**Introduce Flower Mods (preview)** " -"([#3054](https://github.com/adap/flower/pull/3054), " -"[#2911](https://github.com/adap/flower/pull/2911), " -"[#3083](https://github.com/adap/flower/pull/3083))" +#: ../../source/ref-changelog.md:1046 +msgid "v0.19.0 (2022-05-18)" msgstr "" -#: ../../source/ref-changelog.md:31 +#: ../../source/ref-changelog.md:1050 msgid "" -"Flower Modifiers (we call them Mods) can intercept messages and analyze, " -"edit or handle them directly. Mods can be used to develop pluggable " -"modules that work across different projects. Flower 1.8 already includes " -"mods to log the size of a message, the number of parameters sent over the" -" network, differential privacy with fixed clipping and adaptive clipping," -" local differential privacy and secure aggregation protocols SecAgg and " -"SecAgg+. The Flower Mods API is released as a preview, but researchers " -"can already use it to experiment with arbirtrary SMPC protocols." +"**Flower Baselines (preview): FedOpt, FedBN, FedAvgM** " +"([#919](https://github.com/adap/flower/pull/919), " +"[#1127](https://github.com/adap/flower/pull/1127), " +"[#914](https://github.com/adap/flower/pull/914))" msgstr "" -#: ../../source/ref-changelog.md:33 +#: ../../source/ref-changelog.md:1052 msgid "" -"**Fine-tune LLMs with LLM FlowerTune** " -"([#3029](https://github.com/adap/flower/pull/3029), " -"[#3089](https://github.com/adap/flower/pull/3089), " -"[#3092](https://github.com/adap/flower/pull/3092), " -"[#3100](https://github.com/adap/flower/pull/3100), " -"[#3114](https://github.com/adap/flower/pull/3114), " -"[#3162](https://github.com/adap/flower/pull/3162), " -"[#3172](https://github.com/adap/flower/pull/3172))" +"The first preview release of Flower Baselines has arrived! We're " +"kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " +"FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how " +"to use [Flower Baselines](https://flower.ai/docs/using-baselines.html). " +"With this first preview release we're also inviting the community to " +"[contribute their own baselines](https://flower.ai/docs/baselines/how-to-" +"contribute-baselines.html)." msgstr "" -#: ../../source/ref-changelog.md:35 +#: ../../source/ref-changelog.md:1054 msgid "" -"We are introducing LLM FlowerTune, an introductory example that " -"demonstrates federated LLM fine-tuning of pre-trained Llama2 models on " -"the Alpaca-GPT4 dataset. The example is built to be easily adapted to use" -" different models and/or datasets. Read our blog post [LLM FlowerTune: " -"Federated LLM Fine-tuning with Flower](https://flower.ai/blog/2024-03-14" -"-llm-flowertune-federated-llm-finetuning-with-flower/) for more details." +"**C++ client SDK (preview) and code example** " +"([#1111](https://github.com/adap/flower/pull/1111))" msgstr "" -#: ../../source/ref-changelog.md:37 +#: ../../source/ref-changelog.md:1056 msgid "" -"**Introduce built-in Differential Privacy (preview)** " -"([#2798](https://github.com/adap/flower/pull/2798), " -"[#2959](https://github.com/adap/flower/pull/2959), " -"[#3038](https://github.com/adap/flower/pull/3038), " -"[#3147](https://github.com/adap/flower/pull/3147), " -"[#2909](https://github.com/adap/flower/pull/2909), " -"[#2893](https://github.com/adap/flower/pull/2893), " -"[#2892](https://github.com/adap/flower/pull/2892), " -"[#3039](https://github.com/adap/flower/pull/3039), " -"[#3074](https://github.com/adap/flower/pull/3074))" +"Preview support for Flower clients written in C++. The C++ preview " +"includes a Flower client SDK and a quickstart code example that " +"demonstrates a simple C++ client using the SDK." msgstr "" -#: ../../source/ref-changelog.md:39 +#: ../../source/ref-changelog.md:1058 msgid "" -"Built-in Differential Privacy is here! Flower supports both central and " -"local differential privacy (DP). Central DP can be configured with either" -" fixed or adaptive clipping. The clipping can happen either on the " -"server-side or the client-side. Local DP does both clipping and noising " -"on the client-side. A new documentation page [explains Differential " -"Privacy approaches](https://flower.ai/docs/framework/explanation-" -"differential-privacy.html) and a new how-to guide describes [how to use " -"the new Differential Privacy components](https://flower.ai/docs/framework" -"/how-to-use-differential-privacy.html) in Flower." +"**Add experimental support for Python 3.10 and Python 3.11** " +"([#1135](https://github.com/adap/flower/pull/1135))" msgstr "" -#: ../../source/ref-changelog.md:41 +#: ../../source/ref-changelog.md:1060 msgid "" -"**Introduce built-in Secure Aggregation (preview)** " -"([#3120](https://github.com/adap/flower/pull/3120), " -"[#3110](https://github.com/adap/flower/pull/3110), " -"[#3108](https://github.com/adap/flower/pull/3108))" +"Python 3.10 is the latest stable release of Python and Python 3.11 is due" +" to be released in October. This Flower release adds experimental support" +" for both Python versions." msgstr "" -#: ../../source/ref-changelog.md:43 +#: ../../source/ref-changelog.md:1062 msgid "" -"Built-in Secure Aggregation is here! Flower now supports different secure" -" aggregation protocols out-of-the-box. The best part? You can add secure " -"aggregation to your Flower projects with only a few lines of code. In " -"this initial release, we inlcude support for SecAgg and SecAgg+, but more" -" protocols will be implemented shortly. We'll also add detailed docs that" -" explain secure aggregation and how to use it in Flower. You can already " -"check out the new code example that shows how to use Flower to easily " -"combine Federated Learning, Differential Privacy and Secure Aggregation " -"in the same project." +"**Aggregate custom metrics through user-provided functions** " +"([#1144](https://github.com/adap/flower/pull/1144))" msgstr "" -#: ../../source/ref-changelog.md:45 +#: ../../source/ref-changelog.md:1064 msgid "" -"**Introduce** `flwr` **CLI (preview)** " -"([#2942](https://github.com/adap/flower/pull/2942), " -"[#3055](https://github.com/adap/flower/pull/3055), " -"[#3111](https://github.com/adap/flower/pull/3111), " -"[#3130](https://github.com/adap/flower/pull/3130), " -"[#3136](https://github.com/adap/flower/pull/3136), " -"[#3094](https://github.com/adap/flower/pull/3094), " -"[#3059](https://github.com/adap/flower/pull/3059), " -"[#3049](https://github.com/adap/flower/pull/3049), " -"[#3142](https://github.com/adap/flower/pull/3142))" +"Custom metrics (e.g., `accuracy`) can now be aggregated without having to" +" customize the strategy. Built-in strategies support two new arguments, " +"`fit_metrics_aggregation_fn` and `evaluate_metrics_aggregation_fn`, that " +"allow passing custom metric aggregation functions." msgstr "" -#: ../../source/ref-changelog.md:47 +#: ../../source/ref-changelog.md:1066 msgid "" -"A new `flwr` CLI command allows creating new Flower projects (`flwr new`)" -" and then running them using the Simulation Engine (`flwr run`)." +"**User-configurable round timeout** " +"([#1162](https://github.com/adap/flower/pull/1162))" msgstr "" -#: ../../source/ref-changelog.md:49 +#: ../../source/ref-changelog.md:1068 msgid "" -"**Introduce Flower Next Simulation Engine** " -"([#3024](https://github.com/adap/flower/pull/3024), " -"[#3061](https://github.com/adap/flower/pull/3061), " -"[#2997](https://github.com/adap/flower/pull/2997), " -"[#2783](https://github.com/adap/flower/pull/2783), " -"[#3184](https://github.com/adap/flower/pull/3184), " -"[#3075](https://github.com/adap/flower/pull/3075), " -"[#3047](https://github.com/adap/flower/pull/3047), " -"[#2998](https://github.com/adap/flower/pull/2998), " -"[#3009](https://github.com/adap/flower/pull/3009), " -"[#3008](https://github.com/adap/flower/pull/3008))" +"A new configuration value allows the round timeout to be set for " +"`start_server` and `start_simulation`. If the `config` dictionary " +"contains a `round_timeout` key (with a `float` value in seconds), the " +"server will wait *at least* `round_timeout` seconds before it closes the " +"connection." msgstr "" -#: ../../source/ref-changelog.md:51 +#: ../../source/ref-changelog.md:1070 msgid "" -"The Flower Simulation Engine can now run Flower Next projects. For " -"notebook environments, there's also a new `run_simulation` function that " -"can run `ServerApp` and `ClientApp`." +"**Enable both federated evaluation and centralized evaluation to be used " +"at the same time in all built-in strategies** " +"([#1091](https://github.com/adap/flower/pull/1091))" msgstr "" -#: ../../source/ref-changelog.md:53 +#: ../../source/ref-changelog.md:1072 msgid "" -"**Handle SuperNode connection errors** " -"([#2969](https://github.com/adap/flower/pull/2969))" +"Built-in strategies can now perform both federated evaluation (i.e., " +"client-side) and centralized evaluation (i.e., server-side) in the same " +"round. Federated evaluation can be disabled by setting `fraction_eval` to" +" `0.0`." msgstr "" -#: ../../source/ref-changelog.md:55 +#: ../../source/ref-changelog.md:1074 msgid "" -"A SuperNode will now try to reconnect indefinitely to the SuperLink in " -"case of connection errors. The arguments `--max-retries` and `--max-wait-" -"time` can now be passed to the `flower-client-app` command. `--max-" -"retries` will define the number of tentatives the client should make " -"before it gives up trying to reconnect to the SuperLink, and, `--max-" -"wait-time` defines the time before the SuperNode gives up trying to " -"reconnect to the SuperLink." +"**Two new Jupyter Notebook tutorials** " +"([#1141](https://github.com/adap/flower/pull/1141))" msgstr "" -#: ../../source/ref-changelog.md:57 +#: ../../source/ref-changelog.md:1076 msgid "" -"**General updates to Flower Baselines** " -"([#2904](https://github.com/adap/flower/pull/2904), " -"[#2482](https://github.com/adap/flower/pull/2482), " -"[#2985](https://github.com/adap/flower/pull/2985), " -"[#2968](https://github.com/adap/flower/pull/2968))" +"Two Jupyter Notebook tutorials (compatible with Google Colab) explain " +"basic and intermediate Flower features:" msgstr "" -#: ../../source/ref-changelog.md:59 +#: ../../source/ref-changelog.md:1078 msgid "" -"There's a new [FedStar](https://flower.ai/docs/baselines/fedstar.html) " -"baseline. Several other baselined have been updated as well." +"*An Introduction to Federated Learning*: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" +"-Intro-to-FL-PyTorch.ipynb)" msgstr "" -#: ../../source/ref-changelog.md:61 +#: ../../source/ref-changelog.md:1080 msgid "" -"**Improve documentation and translations** " -"([#3050](https://github.com/adap/flower/pull/3050), " -"[#3044](https://github.com/adap/flower/pull/3044), " -"[#3043](https://github.com/adap/flower/pull/3043), " -"[#2986](https://github.com/adap/flower/pull/2986), " -"[#3041](https://github.com/adap/flower/pull/3041), " -"[#3046](https://github.com/adap/flower/pull/3046), " -"[#3042](https://github.com/adap/flower/pull/3042), " -"[#2978](https://github.com/adap/flower/pull/2978), " -"[#2952](https://github.com/adap/flower/pull/2952), " -"[#3167](https://github.com/adap/flower/pull/3167), " -"[#2953](https://github.com/adap/flower/pull/2953), " -"[#3045](https://github.com/adap/flower/pull/3045), " -"[#2654](https://github.com/adap/flower/pull/2654), " -"[#3082](https://github.com/adap/flower/pull/3082), " -"[#2990](https://github.com/adap/flower/pull/2990), " -"[#2989](https://github.com/adap/flower/pull/2989))" +"*Using Strategies in Federated Learning*: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" +"-Strategies-in-FL-PyTorch.ipynb)" msgstr "" -#: ../../source/ref-changelog.md:63 +#: ../../source/ref-changelog.md:1082 msgid "" -"As usual, we merged many smaller and larger improvements to the " -"documentation. A special thank you goes to [Sebastian van der " -"Voort](https://github.com/svdvoort) for landing a big documentation PR!" +"**New FedAvgM strategy (Federated Averaging with Server Momentum)** " +"([#1076](https://github.com/adap/flower/pull/1076))" msgstr "" -#: ../../source/ref-changelog.md:65 +#: ../../source/ref-changelog.md:1084 msgid "" -"**General updates to Flower Examples** " -"([3134](https://github.com/adap/flower/pull/3134), " -"[2996](https://github.com/adap/flower/pull/2996), " -"[2930](https://github.com/adap/flower/pull/2930), " -"[2967](https://github.com/adap/flower/pull/2967), " -"[2467](https://github.com/adap/flower/pull/2467), " -"[2910](https://github.com/adap/flower/pull/2910), " -"[#2918](https://github.com/adap/flower/pull/2918), " -"[#2773](https://github.com/adap/flower/pull/2773), " -"[#3063](https://github.com/adap/flower/pull/3063), " -"[#3116](https://github.com/adap/flower/pull/3116), " -"[#3117](https://github.com/adap/flower/pull/3117))" +"The new `FedAvgM` strategy implements Federated Averaging with Server " +"Momentum \\[Hsu et al., 2019\\]." msgstr "" -#: ../../source/ref-changelog.md:67 +#: ../../source/ref-changelog.md:1086 msgid "" -"Two new examples show federated training of a Vision Transformer (ViT) " -"and federated learning in a medical context using the popular MONAI " -"library. `quickstart-pytorch` and `quickstart-tensorflow` demonstrate the" -" new Flower Next `ServerApp` and `ClientApp`. Many other examples " -"received considerable updates as well." +"**New advanced PyTorch code example** " +"([#1007](https://github.com/adap/flower/pull/1007))" msgstr "" -#: ../../source/ref-changelog.md:69 +#: ../../source/ref-changelog.md:1088 msgid "" -"**General improvements** " -"([#3171](https://github.com/adap/flower/pull/3171), " -"[3099](https://github.com/adap/flower/pull/3099), " -"[3003](https://github.com/adap/flower/pull/3003), " -"[3145](https://github.com/adap/flower/pull/3145), " -"[3017](https://github.com/adap/flower/pull/3017), " -"[3085](https://github.com/adap/flower/pull/3085), " -"[3012](https://github.com/adap/flower/pull/3012), " -"[3119](https://github.com/adap/flower/pull/3119), " -"[2991](https://github.com/adap/flower/pull/2991), " -"[2970](https://github.com/adap/flower/pull/2970), " -"[2980](https://github.com/adap/flower/pull/2980), " -"[3086](https://github.com/adap/flower/pull/3086), " -"[2932](https://github.com/adap/flower/pull/2932), " -"[2928](https://github.com/adap/flower/pull/2928), " -"[2941](https://github.com/adap/flower/pull/2941), " -"[2933](https://github.com/adap/flower/pull/2933), " -"[3181](https://github.com/adap/flower/pull/3181), " -"[2973](https://github.com/adap/flower/pull/2973), " -"[2992](https://github.com/adap/flower/pull/2992), " -"[2915](https://github.com/adap/flower/pull/2915), " -"[3040](https://github.com/adap/flower/pull/3040), " -"[3022](https://github.com/adap/flower/pull/3022), " -"[3032](https://github.com/adap/flower/pull/3032), " -"[2902](https://github.com/adap/flower/pull/2902), " -"[2931](https://github.com/adap/flower/pull/2931), " -"[3005](https://github.com/adap/flower/pull/3005), " -"[3132](https://github.com/adap/flower/pull/3132), " -"[3115](https://github.com/adap/flower/pull/3115), " -"[2944](https://github.com/adap/flower/pull/2944), " -"[3064](https://github.com/adap/flower/pull/3064), " -"[3106](https://github.com/adap/flower/pull/3106), " -"[2974](https://github.com/adap/flower/pull/2974), " -"[3178](https://github.com/adap/flower/pull/3178), " -"[2993](https://github.com/adap/flower/pull/2993), " -"[3186](https://github.com/adap/flower/pull/3186), " -"[3091](https://github.com/adap/flower/pull/3091), " -"[3125](https://github.com/adap/flower/pull/3125), " -"[3093](https://github.com/adap/flower/pull/3093), " -"[3013](https://github.com/adap/flower/pull/3013), " -"[3033](https://github.com/adap/flower/pull/3033), " -"[3133](https://github.com/adap/flower/pull/3133), " -"[3068](https://github.com/adap/flower/pull/3068), " -"[2916](https://github.com/adap/flower/pull/2916), " -"[2975](https://github.com/adap/flower/pull/2975), " -"[2984](https://github.com/adap/flower/pull/2984), " -"[2846](https://github.com/adap/flower/pull/2846), " -"[3077](https://github.com/adap/flower/pull/3077), " -"[3143](https://github.com/adap/flower/pull/3143), " -"[2921](https://github.com/adap/flower/pull/2921), " -"[3101](https://github.com/adap/flower/pull/3101), " -"[2927](https://github.com/adap/flower/pull/2927), " -"[2995](https://github.com/adap/flower/pull/2995), " -"[2972](https://github.com/adap/flower/pull/2972), " -"[2912](https://github.com/adap/flower/pull/2912), " -"[3065](https://github.com/adap/flower/pull/3065), " -"[3028](https://github.com/adap/flower/pull/3028), " -"[2922](https://github.com/adap/flower/pull/2922), " -"[2982](https://github.com/adap/flower/pull/2982), " -"[2914](https://github.com/adap/flower/pull/2914), " -"[3179](https://github.com/adap/flower/pull/3179), " -"[3080](https://github.com/adap/flower/pull/3080), " -"[2994](https://github.com/adap/flower/pull/2994), " -"[3187](https://github.com/adap/flower/pull/3187), " -"[2926](https://github.com/adap/flower/pull/2926), " -"[3018](https://github.com/adap/flower/pull/3018), " -"[3144](https://github.com/adap/flower/pull/3144), " -"[3011](https://github.com/adap/flower/pull/3011), " -"[#3152](https://github.com/adap/flower/pull/3152), " -"[#2836](https://github.com/adap/flower/pull/2836), " -"[#2929](https://github.com/adap/flower/pull/2929), " -"[#2943](https://github.com/adap/flower/pull/2943), " -"[#2955](https://github.com/adap/flower/pull/2955), " -"[#2954](https://github.com/adap/flower/pull/2954))" +"A new code example (`advanced_pytorch`) demonstrates advanced Flower " +"concepts with PyTorch." msgstr "" -#: ../../source/ref-changelog.md:75 -msgid "v1.7.0 (2024-02-05)" +#: ../../source/ref-changelog.md:1090 +msgid "" +"**New JAX code example** " +"([#906](https://github.com/adap/flower/pull/906), " +"[#1143](https://github.com/adap/flower/pull/1143))" msgstr "" -#: ../../source/ref-changelog.md:81 +#: ../../source/ref-changelog.md:1092 msgid "" -"`Aasheesh Singh`, `Adam Narozniak`, `Aml Hassan Esmil`, `Charles " -"Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo " -"Gabrielli`, `Gustavo Bertoli`, `HelinLin`, `Heng Pan`, `Javier`, `M S " -"Chaitanya Kumar`, `Mohammad Naseri`, `Nikos Vlachakis`, `Pritam Neog`, " -"`Robert Kuska`, `Robert Steiner`, `Taner Topal`, `Yahia Salaheldin " -"Shaaban`, `Yan Gao`, `Yasar Abbas` " +"A new code example (`jax_from_centralized_to_federated`) shows federated " +"learning with JAX and Flower." msgstr "" -#: ../../source/ref-changelog.md:85 +#: ../../source/ref-changelog.md:1096 msgid "" -"**Introduce stateful clients (experimental)** " -"([#2770](https://github.com/adap/flower/pull/2770), " -"[#2686](https://github.com/adap/flower/pull/2686), " -"[#2696](https://github.com/adap/flower/pull/2696), " -"[#2643](https://github.com/adap/flower/pull/2643), " -"[#2769](https://github.com/adap/flower/pull/2769))" +"New option to keep Ray running if Ray was already initialized in " +"`start_simulation` ([#1177](https://github.com/adap/flower/pull/1177))" msgstr "" -#: ../../source/ref-changelog.md:87 +#: ../../source/ref-changelog.md:1097 msgid "" -"Subclasses of `Client` and `NumPyClient` can now store local state that " -"remains on the client. Let's start with the highlight first: this new " -"feature is compatible with both simulated clients (via " -"`start_simulation`) and networked clients (via `start_client`). It's also" -" the first preview of new abstractions like `Context` and `RecordSet`. " -"Clients can access state of type `RecordSet` via `state: RecordSet = " -"self.context.state`. Changes to this `RecordSet` are preserved across " -"different rounds of execution to enable stateful computations in a " -"unified way across simulation and deployment." +"Add support for custom `ClientManager` as a `start_simulation` parameter " +"([#1171](https://github.com/adap/flower/pull/1171))" msgstr "" -#: ../../source/ref-changelog.md:89 +#: ../../source/ref-changelog.md:1098 msgid "" -"**Improve performance** " -"([#2293](https://github.com/adap/flower/pull/2293))" +"New documentation for [implementing " +"strategies](https://flower.ai/docs/framework/how-to-implement-" +"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " +"[#1175](https://github.com/adap/flower/pull/1175))" msgstr "" -#: ../../source/ref-changelog.md:91 +#: ../../source/ref-changelog.md:1099 msgid "" -"Flower is faster than ever. All `FedAvg`-derived strategies now use in-" -"place aggregation to reduce memory consumption. The Flower client " -"serialization/deserialization has been rewritten from the ground up, " -"which results in significant speedups, especially when the client-side " -"training time is short." +"New mobile-friendly documentation theme " +"([#1174](https://github.com/adap/flower/pull/1174))" msgstr "" -#: ../../source/ref-changelog.md:93 +#: ../../source/ref-changelog.md:1100 msgid "" -"**Support Federated Learning with Apple MLX and Flower** " -"([#2693](https://github.com/adap/flower/pull/2693))" +"Limit version range for (optional) `ray` dependency to include only " +"compatible releases (`>=1.9.2,<1.12.0`) " +"([#1205](https://github.com/adap/flower/pull/1205))" msgstr "" -#: ../../source/ref-changelog.md:95 +#: ../../source/ref-changelog.md:1104 msgid "" -"Flower has official support for federated learning using [Apple " -"MLX](https://ml-explore.github.io/mlx) via the new `quickstart-mlx` code " -"example." +"**Remove deprecated support for Python 3.6** " +"([#871](https://github.com/adap/flower/pull/871))" msgstr "" -#: ../../source/ref-changelog.md:97 +#: ../../source/ref-changelog.md:1105 msgid "" -"**Introduce new XGBoost cyclic strategy** " -"([#2666](https://github.com/adap/flower/pull/2666), " -"[#2668](https://github.com/adap/flower/pull/2668))" +"**Remove deprecated KerasClient** " +"([#857](https://github.com/adap/flower/pull/857))" msgstr "" -#: ../../source/ref-changelog.md:99 +#: ../../source/ref-changelog.md:1106 msgid "" -"A new strategy called `FedXgbCyclic` supports a client-by-client style of" -" training (often called cyclic). The `xgboost-comprehensive` code example" -" shows how to use it in a full project. In addition to that, `xgboost-" -"comprehensive` now also supports simulation mode. With this, Flower " -"offers best-in-class XGBoost support." +"**Remove deprecated no-op extra installs** " +"([#973](https://github.com/adap/flower/pull/973))" msgstr "" -#: ../../source/ref-changelog.md:101 +#: ../../source/ref-changelog.md:1107 msgid "" -"**Support Python 3.11** " -"([#2394](https://github.com/adap/flower/pull/2394))" +"**Remove deprecated proto fields from** `FitRes` **and** `EvaluateRes` " +"([#869](https://github.com/adap/flower/pull/869))" msgstr "" -#: ../../source/ref-changelog.md:103 +#: ../../source/ref-changelog.md:1108 msgid "" -"Framework tests now run on Python 3.8, 3.9, 3.10, and 3.11. This will " -"ensure better support for users using more recent Python versions." +"**Remove deprecated QffedAvg strategy (replaced by QFedAvg)** " +"([#1107](https://github.com/adap/flower/pull/1107))" msgstr "" -#: ../../source/ref-changelog.md:105 +#: ../../source/ref-changelog.md:1109 msgid "" -"**Update gRPC and ProtoBuf dependencies** " -"([#2814](https://github.com/adap/flower/pull/2814))" +"**Remove deprecated DefaultStrategy strategy** " +"([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" -#: ../../source/ref-changelog.md:107 +#: ../../source/ref-changelog.md:1110 msgid "" -"The `grpcio` and `protobuf` dependencies were updated to their latest " -"versions for improved security and performance." +"**Remove deprecated support for eval_fn accuracy return value** " +"([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" -#: ../../source/ref-changelog.md:109 +#: ../../source/ref-changelog.md:1111 msgid "" -"**Introduce Docker image for Flower server** " -"([#2700](https://github.com/adap/flower/pull/2700), " -"[#2688](https://github.com/adap/flower/pull/2688), " -"[#2705](https://github.com/adap/flower/pull/2705), " -"[#2695](https://github.com/adap/flower/pull/2695), " -"[#2747](https://github.com/adap/flower/pull/2747), " -"[#2746](https://github.com/adap/flower/pull/2746), " -"[#2680](https://github.com/adap/flower/pull/2680), " -"[#2682](https://github.com/adap/flower/pull/2682), " -"[#2701](https://github.com/adap/flower/pull/2701))" +"**Remove deprecated support for passing initial parameters as NumPy " +"ndarrays** ([#1142](https://github.com/adap/flower/pull/1142))" +msgstr "" + +#: ../../source/ref-changelog.md:1113 +msgid "v0.18.0 (2022-02-28)" msgstr "" -#: ../../source/ref-changelog.md:111 +#: ../../source/ref-changelog.md:1117 msgid "" -"The Flower server can now be run using an official Docker image. A new " -"how-to guide explains [how to run Flower using " -"Docker](https://flower.ai/docs/framework/how-to-run-flower-using-" -"docker.html). An official Flower client Docker image will follow." +"**Improved Virtual Client Engine compatibility with Jupyter Notebook / " +"Google Colab** ([#866](https://github.com/adap/flower/pull/866), " +"[#872](https://github.com/adap/flower/pull/872), " +"[#833](https://github.com/adap/flower/pull/833), " +"[#1036](https://github.com/adap/flower/pull/1036))" msgstr "" -#: ../../source/ref-changelog.md:113 +#: ../../source/ref-changelog.md:1119 msgid "" -"**Introduce** `flower-via-docker-compose` **example** " -"([#2626](https://github.com/adap/flower/pull/2626))" +"Simulations (using the Virtual Client Engine through `start_simulation`) " +"now work more smoothly on Jupyter Notebooks (incl. Google Colab) after " +"installing Flower with the `simulation` extra (`pip install " +"'flwr[simulation]'`)." msgstr "" -#: ../../source/ref-changelog.md:115 +#: ../../source/ref-changelog.md:1121 msgid "" -"**Introduce** `quickstart-sklearn-tabular` **example** " -"([#2719](https://github.com/adap/flower/pull/2719))" +"**New Jupyter Notebook code example** " +"([#833](https://github.com/adap/flower/pull/833))" msgstr "" -#: ../../source/ref-changelog.md:117 +#: ../../source/ref-changelog.md:1123 msgid "" -"**Introduce** `custom-metrics` **example** " -"([#1958](https://github.com/adap/flower/pull/1958))" +"A new code example (`quickstart_simulation`) demonstrates Flower " +"simulations using the Virtual Client Engine through Jupyter Notebook " +"(incl. Google Colab)." msgstr "" -#: ../../source/ref-changelog.md:119 +#: ../../source/ref-changelog.md:1125 msgid "" -"**Update code examples to use Flower Datasets** " -"([#2450](https://github.com/adap/flower/pull/2450), " -"[#2456](https://github.com/adap/flower/pull/2456), " -"[#2318](https://github.com/adap/flower/pull/2318), " -"[#2712](https://github.com/adap/flower/pull/2712))" +"**Client properties (feature preview)** " +"([#795](https://github.com/adap/flower/pull/795))" msgstr "" -#: ../../source/ref-changelog.md:121 +#: ../../source/ref-changelog.md:1127 msgid "" -"Several code examples were updated to use [Flower " -"Datasets](https://flower.ai/docs/datasets/)." +"Clients can implement a new method `get_properties` to enable server-side" +" strategies to query client properties." +msgstr "" + +#: ../../source/ref-changelog.md:1129 +msgid "" +"**Experimental Android support with TFLite** " +"([#865](https://github.com/adap/flower/pull/865))" +msgstr "" + +#: ../../source/ref-changelog.md:1131 +msgid "" +"Android support has finally arrived in `main`! Flower is both client-" +"agnostic and framework-agnostic by design. One can integrate arbitrary " +"client platforms and with this release, using Flower on Android has " +"become a lot easier." msgstr "" -#: ../../source/ref-changelog.md:123 +#: ../../source/ref-changelog.md:1133 msgid "" -"**General updates to Flower Examples** " -"([#2381](https://github.com/adap/flower/pull/2381), " -"[#2805](https://github.com/adap/flower/pull/2805), " -"[#2782](https://github.com/adap/flower/pull/2782), " -"[#2806](https://github.com/adap/flower/pull/2806), " -"[#2829](https://github.com/adap/flower/pull/2829), " -"[#2825](https://github.com/adap/flower/pull/2825), " -"[#2816](https://github.com/adap/flower/pull/2816), " -"[#2726](https://github.com/adap/flower/pull/2726), " -"[#2659](https://github.com/adap/flower/pull/2659), " -"[#2655](https://github.com/adap/flower/pull/2655))" +"The example uses TFLite on the client side, along with a new " +"`FedAvgAndroid` strategy. The Android client and `FedAvgAndroid` are " +"still experimental, but they are a first step towards a fully-fledged " +"Android SDK and a unified `FedAvg` implementation that integrated the new" +" functionality from `FedAvgAndroid`." msgstr "" -#: ../../source/ref-changelog.md:125 -msgid "Many Flower code examples received substantial updates." +#: ../../source/ref-changelog.md:1135 +msgid "" +"**Make gRPC keepalive time user-configurable and decrease default " +"keepalive time** ([#1069](https://github.com/adap/flower/pull/1069))" msgstr "" -#: ../../source/ref-changelog.md:127 ../../source/ref-changelog.md:220 -msgid "**Update Flower Baselines**" +#: ../../source/ref-changelog.md:1137 +msgid "" +"The default gRPC keepalive time has been reduced to increase the " +"compatibility of Flower with more cloud environments (for example, " +"Microsoft Azure). Users can configure the keepalive time to customize the" +" gRPC stack based on specific requirements." msgstr "" -#: ../../source/ref-changelog.md:129 +#: ../../source/ref-changelog.md:1139 msgid "" -"HFedXGBoost ([#2226](https://github.com/adap/flower/pull/2226), " -"[#2771](https://github.com/adap/flower/pull/2771))" +"**New differential privacy example using Opacus and PyTorch** " +"([#805](https://github.com/adap/flower/pull/805))" msgstr "" -#: ../../source/ref-changelog.md:130 -msgid "FedVSSL ([#2412](https://github.com/adap/flower/pull/2412))" +#: ../../source/ref-changelog.md:1141 +msgid "" +"A new code example (`opacus`) demonstrates differentially-private " +"federated learning with Opacus, PyTorch, and Flower." msgstr "" -#: ../../source/ref-changelog.md:131 -msgid "FedNova ([#2179](https://github.com/adap/flower/pull/2179))" +#: ../../source/ref-changelog.md:1143 +msgid "" +"**New Hugging Face Transformers code example** " +"([#863](https://github.com/adap/flower/pull/863))" msgstr "" -#: ../../source/ref-changelog.md:132 -msgid "HeteroFL ([#2439](https://github.com/adap/flower/pull/2439))" +#: ../../source/ref-changelog.md:1145 +msgid "" +"A new code example (`quickstart_huggingface`) demonstrates usage of " +"Hugging Face Transformers with Flower." msgstr "" -#: ../../source/ref-changelog.md:133 -msgid "FedAvgM ([#2246](https://github.com/adap/flower/pull/2246))" +#: ../../source/ref-changelog.md:1147 +msgid "" +"**New MLCube code example** " +"([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" msgstr "" -#: ../../source/ref-changelog.md:134 -msgid "FedPara ([#2722](https://github.com/adap/flower/pull/2722))" +#: ../../source/ref-changelog.md:1149 +msgid "" +"A new code example (`quickstart_mlcube`) demonstrates usage of MLCube " +"with Flower." msgstr "" -#: ../../source/ref-changelog.md:136 +#: ../../source/ref-changelog.md:1151 msgid "" -"**Improve documentation** " -"([#2674](https://github.com/adap/flower/pull/2674), " -"[#2480](https://github.com/adap/flower/pull/2480), " -"[#2826](https://github.com/adap/flower/pull/2826), " -"[#2727](https://github.com/adap/flower/pull/2727), " -"[#2761](https://github.com/adap/flower/pull/2761), " -"[#2900](https://github.com/adap/flower/pull/2900))" +"**SSL-enabled server and client** " +"([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" msgstr "" -#: ../../source/ref-changelog.md:138 +#: ../../source/ref-changelog.md:1153 msgid "" -"**Improved testing and development infrastructure** " -"([#2797](https://github.com/adap/flower/pull/2797), " -"[#2676](https://github.com/adap/flower/pull/2676), " -"[#2644](https://github.com/adap/flower/pull/2644), " -"[#2656](https://github.com/adap/flower/pull/2656), " -"[#2848](https://github.com/adap/flower/pull/2848), " -"[#2675](https://github.com/adap/flower/pull/2675), " -"[#2735](https://github.com/adap/flower/pull/2735), " -"[#2767](https://github.com/adap/flower/pull/2767), " -"[#2732](https://github.com/adap/flower/pull/2732), " -"[#2744](https://github.com/adap/flower/pull/2744), " -"[#2681](https://github.com/adap/flower/pull/2681), " -"[#2699](https://github.com/adap/flower/pull/2699), " -"[#2745](https://github.com/adap/flower/pull/2745), " -"[#2734](https://github.com/adap/flower/pull/2734), " -"[#2731](https://github.com/adap/flower/pull/2731), " -"[#2652](https://github.com/adap/flower/pull/2652), " -"[#2720](https://github.com/adap/flower/pull/2720), " -"[#2721](https://github.com/adap/flower/pull/2721), " -"[#2717](https://github.com/adap/flower/pull/2717), " -"[#2864](https://github.com/adap/flower/pull/2864), " -"[#2694](https://github.com/adap/flower/pull/2694), " -"[#2709](https://github.com/adap/flower/pull/2709), " -"[#2658](https://github.com/adap/flower/pull/2658), " -"[#2796](https://github.com/adap/flower/pull/2796), " -"[#2692](https://github.com/adap/flower/pull/2692), " -"[#2657](https://github.com/adap/flower/pull/2657), " -"[#2813](https://github.com/adap/flower/pull/2813), " -"[#2661](https://github.com/adap/flower/pull/2661), " -"[#2398](https://github.com/adap/flower/pull/2398))" +"SSL enables secure encrypted connections between clients and servers. " +"This release open-sources the Flower secure gRPC implementation to make " +"encrypted communication channels accessible to all Flower users." msgstr "" -#: ../../source/ref-changelog.md:140 +#: ../../source/ref-changelog.md:1155 msgid "" -"The Flower testing and development infrastructure has received " -"substantial updates. This makes Flower 1.7 the most tested release ever." +"**Updated** `FedAdam` **and** `FedYogi` **strategies** " +"([#885](https://github.com/adap/flower/pull/885), " +"[#895](https://github.com/adap/flower/pull/895))" msgstr "" -#: ../../source/ref-changelog.md:142 +#: ../../source/ref-changelog.md:1157 msgid "" -"**Update dependencies** " -"([#2753](https://github.com/adap/flower/pull/2753), " -"[#2651](https://github.com/adap/flower/pull/2651), " -"[#2739](https://github.com/adap/flower/pull/2739), " -"[#2837](https://github.com/adap/flower/pull/2837), " -"[#2788](https://github.com/adap/flower/pull/2788), " -"[#2811](https://github.com/adap/flower/pull/2811), " -"[#2774](https://github.com/adap/flower/pull/2774), " -"[#2790](https://github.com/adap/flower/pull/2790), " -"[#2751](https://github.com/adap/flower/pull/2751), " -"[#2850](https://github.com/adap/flower/pull/2850), " -"[#2812](https://github.com/adap/flower/pull/2812), " -"[#2872](https://github.com/adap/flower/pull/2872), " -"[#2736](https://github.com/adap/flower/pull/2736), " -"[#2756](https://github.com/adap/flower/pull/2756), " -"[#2857](https://github.com/adap/flower/pull/2857), " -"[#2757](https://github.com/adap/flower/pull/2757), " -"[#2810](https://github.com/adap/flower/pull/2810), " -"[#2740](https://github.com/adap/flower/pull/2740), " -"[#2789](https://github.com/adap/flower/pull/2789))" +"`FedAdam` and `FedAdam` match the latest version of the Adaptive " +"Federated Optimization paper." msgstr "" -#: ../../source/ref-changelog.md:144 +#: ../../source/ref-changelog.md:1159 msgid "" -"**General improvements** " -"([#2803](https://github.com/adap/flower/pull/2803), " -"[#2847](https://github.com/adap/flower/pull/2847), " -"[#2877](https://github.com/adap/flower/pull/2877), " -"[#2690](https://github.com/adap/flower/pull/2690), " -"[#2889](https://github.com/adap/flower/pull/2889), " -"[#2874](https://github.com/adap/flower/pull/2874), " -"[#2819](https://github.com/adap/flower/pull/2819), " -"[#2689](https://github.com/adap/flower/pull/2689), " -"[#2457](https://github.com/adap/flower/pull/2457), " -"[#2870](https://github.com/adap/flower/pull/2870), " -"[#2669](https://github.com/adap/flower/pull/2669), " -"[#2876](https://github.com/adap/flower/pull/2876), " -"[#2885](https://github.com/adap/flower/pull/2885), " -"[#2858](https://github.com/adap/flower/pull/2858), " -"[#2867](https://github.com/adap/flower/pull/2867), " -"[#2351](https://github.com/adap/flower/pull/2351), " -"[#2886](https://github.com/adap/flower/pull/2886), " -"[#2860](https://github.com/adap/flower/pull/2860), " -"[#2828](https://github.com/adap/flower/pull/2828), " -"[#2869](https://github.com/adap/flower/pull/2869), " -"[#2875](https://github.com/adap/flower/pull/2875), " -"[#2733](https://github.com/adap/flower/pull/2733), " -"[#2488](https://github.com/adap/flower/pull/2488), " -"[#2646](https://github.com/adap/flower/pull/2646), " -"[#2879](https://github.com/adap/flower/pull/2879), " -"[#2821](https://github.com/adap/flower/pull/2821), " -"[#2855](https://github.com/adap/flower/pull/2855), " -"[#2800](https://github.com/adap/flower/pull/2800), " -"[#2807](https://github.com/adap/flower/pull/2807), " -"[#2801](https://github.com/adap/flower/pull/2801), " -"[#2804](https://github.com/adap/flower/pull/2804), " -"[#2851](https://github.com/adap/flower/pull/2851), " -"[#2787](https://github.com/adap/flower/pull/2787), " -"[#2852](https://github.com/adap/flower/pull/2852), " -"[#2672](https://github.com/adap/flower/pull/2672), " -"[#2759](https://github.com/adap/flower/pull/2759))" +"**Initialize** `start_simulation` **with a list of client IDs** " +"([#860](https://github.com/adap/flower/pull/860))" msgstr "" -#: ../../source/ref-changelog.md:148 +#: ../../source/ref-changelog.md:1161 msgid "" -"**Deprecate** `start_numpy_client` " -"([#2563](https://github.com/adap/flower/pull/2563), " -"[#2718](https://github.com/adap/flower/pull/2718))" +"`start_simulation` can now be called with a list of client IDs " +"(`clients_ids`, type: `List[str]`). Those IDs will be passed to the " +"`client_fn` whenever a client needs to be initialized, which can make it " +"easier to load data partitions that are not accessible through `int` " +"identifiers." msgstr "" -#: ../../source/ref-changelog.md:150 +#: ../../source/ref-changelog.md:1165 msgid "" -"Until now, clients of type `NumPyClient` needed to be started via " -"`start_numpy_client`. In our efforts to consolidate framework APIs, we " -"have introduced changes, and now all client types should start via " -"`start_client`. To continue using `NumPyClient` clients, you simply need " -"to first call the `.to_client()` method and then pass returned `Client` " -"object to `start_client`. The examples and the documentation have been " -"updated accordingly." +"Update `num_examples` calculation in PyTorch code examples in " +"([#909](https://github.com/adap/flower/pull/909))" msgstr "" -#: ../../source/ref-changelog.md:152 +#: ../../source/ref-changelog.md:1166 msgid "" -"**Deprecate legacy DP wrappers** " -"([#2749](https://github.com/adap/flower/pull/2749))" +"Expose Flower version through `flwr.__version__` " +"([#952](https://github.com/adap/flower/pull/952))" msgstr "" -#: ../../source/ref-changelog.md:154 +#: ../../source/ref-changelog.md:1167 msgid "" -"Legacy DP wrapper classes are deprecated, but still functional. This is " -"in preparation for an all-new pluggable version of differential privacy " -"support in Flower." +"`start_server` in `app.py` now returns a `History` object containing " +"metrics from training ([#974](https://github.com/adap/flower/pull/974))" msgstr "" -#: ../../source/ref-changelog.md:156 +#: ../../source/ref-changelog.md:1168 msgid "" -"**Make optional arg** `--callable` **in** `flower-client` **a required " -"positional arg** ([#2673](https://github.com/adap/flower/pull/2673))" +"Make `max_workers` (used by `ThreadPoolExecutor`) configurable " +"([#978](https://github.com/adap/flower/pull/978))" msgstr "" -#: ../../source/ref-changelog.md:158 +#: ../../source/ref-changelog.md:1169 msgid "" -"**Rename** `certificates` **to** `root_certificates` **in** `Driver` " -"([#2890](https://github.com/adap/flower/pull/2890))" +"Increase sleep time after server start to three seconds in all code " +"examples ([#1086](https://github.com/adap/flower/pull/1086))" msgstr "" -#: ../../source/ref-changelog.md:160 +#: ../../source/ref-changelog.md:1170 msgid "" -"**Drop experimental** `Task` **fields** " -"([#2866](https://github.com/adap/flower/pull/2866), " -"[#2865](https://github.com/adap/flower/pull/2865))" +"Added a new FAQ section to the documentation " +"([#948](https://github.com/adap/flower/pull/948))" msgstr "" -#: ../../source/ref-changelog.md:162 +#: ../../source/ref-changelog.md:1171 msgid "" -"Experimental fields `sa`, `legacy_server_message` and " -"`legacy_client_message` were removed from `Task` message. The removed " -"fields are superseded by the new `RecordSet` abstraction." +"And many more under-the-hood changes, library updates, documentation " +"changes, and tooling improvements!" msgstr "" -#: ../../source/ref-changelog.md:164 +#: ../../source/ref-changelog.md:1175 msgid "" -"**Retire MXNet examples** " -"([#2724](https://github.com/adap/flower/pull/2724))" +"**Removed** `flwr_example` **and** `flwr_experimental` **from release " +"build** ([#869](https://github.com/adap/flower/pull/869))" msgstr "" -#: ../../source/ref-changelog.md:166 +#: ../../source/ref-changelog.md:1177 msgid "" -"The development of the MXNet fremework has ended and the project is now " -"[archived on GitHub](https://github.com/apache/mxnet). Existing MXNet " -"examples won't receive updates." +"The packages `flwr_example` and `flwr_experimental` have been deprecated " +"since Flower 0.12.0 and they are not longer included in Flower release " +"builds. The associated extras (`baseline`, `examples-pytorch`, `examples-" +"tensorflow`, `http-logger`, `ops`) are now no-op and will be removed in " +"an upcoming release." msgstr "" -#: ../../source/ref-changelog.md:168 -msgid "v1.6.0 (2023-11-28)" +#: ../../source/ref-changelog.md:1179 +msgid "v0.17.0 (2021-09-24)" msgstr "" -#: ../../source/ref-changelog.md:174 +#: ../../source/ref-changelog.md:1183 msgid "" -"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " -"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " -"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`," -" `Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, " -"`Steve Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, " -"`cnxdeveloper`, `k3nfalt` " +"**Experimental virtual client engine** " +"([#781](https://github.com/adap/flower/pull/781) " +"[#790](https://github.com/adap/flower/pull/790) " +"[#791](https://github.com/adap/flower/pull/791))" msgstr "" -#: ../../source/ref-changelog.md:178 +#: ../../source/ref-changelog.md:1185 msgid "" -"**Add experimental support for Python 3.12** " -"([#2565](https://github.com/adap/flower/pull/2565))" +"One of Flower's goals is to enable research at scale. This release " +"enables a first (experimental) peek at a major new feature, codenamed the" +" virtual client engine. Virtual clients enable simulations that scale to " +"a (very) large number of clients on a single machine or compute cluster. " +"The easiest way to test the new functionality is to look at the two new " +"code examples called `quickstart_simulation` and `simulation_pytorch`." msgstr "" -#: ../../source/ref-changelog.md:180 +#: ../../source/ref-changelog.md:1187 msgid "" -"**Add new XGBoost examples** " -"([#2612](https://github.com/adap/flower/pull/2612), " -"[#2554](https://github.com/adap/flower/pull/2554), " -"[#2617](https://github.com/adap/flower/pull/2617), " -"[#2618](https://github.com/adap/flower/pull/2618), " -"[#2619](https://github.com/adap/flower/pull/2619), " -"[#2567](https://github.com/adap/flower/pull/2567))" +"The feature is still experimental, so there's no stability guarantee for " +"the API. It's also not quite ready for prime time and comes with a few " +"known caveats. However, those who are curious are encouraged to try it " +"out and share their thoughts." msgstr "" -#: ../../source/ref-changelog.md:182 +#: ../../source/ref-changelog.md:1189 msgid "" -"We have added a new `xgboost-quickstart` example alongside a new " -"`xgboost-comprehensive` example that goes more in-depth." +"**New built-in strategies** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" msgstr "" -#: ../../source/ref-changelog.md:184 +#: ../../source/ref-changelog.md:1191 msgid "" -"**Add Vertical FL example** " -"([#2598](https://github.com/adap/flower/pull/2598))" +"FedYogi - Federated learning strategy using Yogi on server-side. " +"Implementation based on https://arxiv.org/abs/2003.00295" msgstr "" -#: ../../source/ref-changelog.md:186 +#: ../../source/ref-changelog.md:1192 msgid "" -"We had many questions about Vertical Federated Learning using Flower, so " -"we decided to add an simple example for it on the [Titanic " -"dataset](https://www.kaggle.com/competitions/titanic/data) alongside a " -"tutorial (in the README)." +"FedAdam - Federated learning strategy using Adam on server-side. " +"Implementation based on https://arxiv.org/abs/2003.00295" msgstr "" -#: ../../source/ref-changelog.md:188 +#: ../../source/ref-changelog.md:1194 msgid "" -"**Support custom** `ClientManager` **in** `start_driver()` " -"([#2292](https://github.com/adap/flower/pull/2292))" +"**New PyTorch Lightning code example** " +"([#617](https://github.com/adap/flower/pull/617))" msgstr "" -#: ../../source/ref-changelog.md:190 +#: ../../source/ref-changelog.md:1196 msgid "" -"**Update REST API to support create and delete nodes** " -"([#2283](https://github.com/adap/flower/pull/2283))" +"**New Variational Auto-Encoder code example** " +"([#752](https://github.com/adap/flower/pull/752))" msgstr "" -#: ../../source/ref-changelog.md:192 +#: ../../source/ref-changelog.md:1198 msgid "" -"**Update the Android SDK** " -"([#2187](https://github.com/adap/flower/pull/2187))" +"**New scikit-learn code example** " +"([#748](https://github.com/adap/flower/pull/748))" msgstr "" -#: ../../source/ref-changelog.md:194 -msgid "Add gRPC request-response capability to the Android SDK." +#: ../../source/ref-changelog.md:1200 +msgid "" +"**New experimental TensorBoard strategy** " +"([#789](https://github.com/adap/flower/pull/789))" msgstr "" -#: ../../source/ref-changelog.md:196 +#: ../../source/ref-changelog.md:1204 msgid "" -"**Update the C++ SDK** " -"([#2537](https://github.com/adap/flower/pull/2537), " -"[#2528](https://github.com/adap/flower/pull/2528), " -"[#2523](https://github.com/adap/flower/pull/2523), " -"[#2522](https://github.com/adap/flower/pull/2522))" +"Improved advanced TensorFlow code example " +"([#769](https://github.com/adap/flower/pull/769))" msgstr "" -#: ../../source/ref-changelog.md:198 -msgid "Add gRPC request-response capability to the C++ SDK." +#: ../../source/ref-changelog.md:1205 +msgid "" +"Warning when `min_available_clients` is misconfigured " +"([#830](https://github.com/adap/flower/pull/830))" msgstr "" -#: ../../source/ref-changelog.md:200 +#: ../../source/ref-changelog.md:1206 msgid "" -"**Make HTTPS the new default** " -"([#2591](https://github.com/adap/flower/pull/2591), " -"[#2636](https://github.com/adap/flower/pull/2636))" +"Improved gRPC server docs " +"([#841](https://github.com/adap/flower/pull/841))" msgstr "" -#: ../../source/ref-changelog.md:202 +#: ../../source/ref-changelog.md:1207 msgid "" -"Flower is moving to HTTPS by default. The new `flower-server` requires " -"passing `--certificates`, but users can enable `--insecure` to use HTTP " -"for prototyping. The same applies to `flower-client`, which can either " -"use user-provided credentials or gRPC-bundled certificates to connect to " -"an HTTPS-enabled server or requires opt-out via passing `--insecure` to " -"enable insecure HTTP connections." +"Improved error message in `NumPyClient` " +"([#851](https://github.com/adap/flower/pull/851))" msgstr "" -#: ../../source/ref-changelog.md:204 +#: ../../source/ref-changelog.md:1208 msgid "" -"For backward compatibility, `start_client()` and `start_numpy_client()` " -"will still start in insecure mode by default. In a future release, " -"insecure connections will require user opt-in by passing `insecure=True`." +"Improved PyTorch quickstart code example " +"([#852](https://github.com/adap/flower/pull/852))" msgstr "" -#: ../../source/ref-changelog.md:206 +#: ../../source/ref-changelog.md:1212 msgid "" -"**Unify client API** ([#2303](https://github.com/adap/flower/pull/2303), " -"[#2390](https://github.com/adap/flower/pull/2390), " -"[#2493](https://github.com/adap/flower/pull/2493))" +"**Disabled final distributed evaluation** " +"([#800](https://github.com/adap/flower/pull/800))" msgstr "" -#: ../../source/ref-changelog.md:208 +#: ../../source/ref-changelog.md:1214 msgid "" -"Using the `client_fn`, Flower clients can interchangeably run as " -"standalone processes (i.e. via `start_client`) or in simulation (i.e. via" -" `start_simulation`) without requiring changes to how the client class is" -" defined and instantiated. The `to_client()` function is introduced to " -"convert a `NumPyClient` to a `Client`." +"Prior behaviour was to perform a final round of distributed evaluation on" +" all connected clients, which is often not required (e.g., when using " +"server-side evaluation). The prior behaviour can be enabled by passing " +"`force_final_distributed_eval=True` to `start_server`." msgstr "" -#: ../../source/ref-changelog.md:210 +#: ../../source/ref-changelog.md:1216 msgid "" -"**Add new** `Bulyan` **strategy** " -"([#1817](https://github.com/adap/flower/pull/1817), " -"[#1891](https://github.com/adap/flower/pull/1891))" +"**Renamed q-FedAvg strategy** " +"([#802](https://github.com/adap/flower/pull/802))" msgstr "" -#: ../../source/ref-changelog.md:212 +#: ../../source/ref-changelog.md:1218 msgid "" -"The new `Bulyan` strategy implements Bulyan by [El Mhamdi et al., " -"2018](https://arxiv.org/abs/1802.07927)" +"The strategy named `QffedAvg` was renamed to `QFedAvg` to better reflect " +"the notation given in the original paper (q-FFL is the optimization " +"objective, q-FedAvg is the proposed solver). Note the original (now " +"deprecated) `QffedAvg` class is still available for compatibility reasons" +" (it will be removed in a future release)." msgstr "" -#: ../../source/ref-changelog.md:214 +#: ../../source/ref-changelog.md:1220 msgid "" -"**Add new** `XGB Bagging` **strategy** " -"([#2611](https://github.com/adap/flower/pull/2611))" +"**Deprecated and renamed code example** `simulation_pytorch` **to** " +"`simulation_pytorch_legacy` " +"([#791](https://github.com/adap/flower/pull/791))" msgstr "" -#: ../../source/ref-changelog.md:216 ../../source/ref-changelog.md:218 +#: ../../source/ref-changelog.md:1222 msgid "" -"**Introduce `WorkloadState`** " -"([#2564](https://github.com/adap/flower/pull/2564), " -"[#2632](https://github.com/adap/flower/pull/2632))" +"This example has been replaced by a new example. The new example is based" +" on the experimental virtual client engine, which will become the new " +"default way of doing most types of large-scale simulations in Flower. The" +" existing example was kept for reference purposes, but it might be " +"removed in the future." msgstr "" -#: ../../source/ref-changelog.md:222 -msgid "" -"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " -"[#2286](https://github.com/adap/flower/pull/2286), " -"[#2509](https://github.com/adap/flower/pull/2509))" +#: ../../source/ref-changelog.md:1224 +msgid "v0.16.0 (2021-05-11)" msgstr "" -#: ../../source/ref-changelog.md:224 +#: ../../source/ref-changelog.md:1228 msgid "" -"Baselines Docs ([#2290](https://github.com/adap/flower/pull/2290), " -"[#2400](https://github.com/adap/flower/pull/2400))" +"**New built-in strategies** " +"([#549](https://github.com/adap/flower/pull/549))" msgstr "" -#: ../../source/ref-changelog.md:226 -msgid "" -"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " -"[#2507](https://github.com/adap/flower/pull/2507))" +#: ../../source/ref-changelog.md:1230 +msgid "(abstract) FedOpt" msgstr "" -#: ../../source/ref-changelog.md:228 +#: ../../source/ref-changelog.md:1233 msgid "" -"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " -"[#2508](https://github.com/adap/flower/pull/2508))" +"**Custom metrics for server and strategies** " +"([#717](https://github.com/adap/flower/pull/717))" msgstr "" -#: ../../source/ref-changelog.md:230 -msgid "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" +#: ../../source/ref-changelog.md:1235 +msgid "" +"The Flower server is now fully task-agnostic, all remaining instances of " +"task-specific metrics (such as `accuracy`) have been replaced by custom " +"metrics dictionaries. Flower 0.15 introduced the capability to pass a " +"dictionary containing custom metrics from client to server. As of this " +"release, custom metrics replace task-specific metrics on the server." msgstr "" -#: ../../source/ref-changelog.md:232 -msgid "FjORD [#2431](https://github.com/adap/flower/pull/2431)" +#: ../../source/ref-changelog.md:1237 +msgid "" +"Custom metric dictionaries are now used in two user-facing APIs: they are" +" returned from Strategy methods `aggregate_fit`/`aggregate_evaluate` and " +"they enable evaluation functions passed to built-in strategies (via " +"`eval_fn`) to return more than two evaluation metrics. Strategies can " +"even return *aggregated* metrics dictionaries for the server to keep " +"track of." msgstr "" -#: ../../source/ref-changelog.md:234 -msgid "MOON [#2421](https://github.com/adap/flower/pull/2421)" +#: ../../source/ref-changelog.md:1239 +msgid "" +"Strategy implementations should migrate their `aggregate_fit` and " +"`aggregate_evaluate` methods to the new return type (e.g., by simply " +"returning an empty `{}`), server-side evaluation functions should migrate" +" from `return loss, accuracy` to `return loss, {\"accuracy\": accuracy}`." msgstr "" -#: ../../source/ref-changelog.md:236 -msgid "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" +#: ../../source/ref-changelog.md:1241 +msgid "" +"Flower 0.15-style return types are deprecated (but still supported), " +"compatibility will be removed in a future release." msgstr "" -#: ../../source/ref-changelog.md:238 -msgid "FedPer [#2266](https://github.com/adap/flower/pull/2266)" +#: ../../source/ref-changelog.md:1243 +msgid "" +"**Migration warnings for deprecated functionality** " +"([#690](https://github.com/adap/flower/pull/690))" msgstr "" -#: ../../source/ref-changelog.md:240 -msgid "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" +#: ../../source/ref-changelog.md:1245 +msgid "" +"Earlier versions of Flower were often migrated to new APIs, while " +"maintaining compatibility with legacy APIs. This release introduces " +"detailed warning messages if usage of deprecated APIs is detected. The " +"new warning messages often provide details on how to migrate to more " +"recent APIs, thus easing the transition from one release to another." msgstr "" -#: ../../source/ref-changelog.md:242 -msgid "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" +#: ../../source/ref-changelog.md:1247 +msgid "" +"Improved docs and docstrings " +"([#691](https://github.com/adap/flower/pull/691) " +"[#692](https://github.com/adap/flower/pull/692) " +"[#713](https://github.com/adap/flower/pull/713))" msgstr "" -#: ../../source/ref-changelog.md:244 -msgid "" -"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " -"[#2615](https://github.com/adap/flower/pull/2615))" +#: ../../source/ref-changelog.md:1249 +msgid "MXNet example and documentation" msgstr "" -#: ../../source/ref-changelog.md:246 +#: ../../source/ref-changelog.md:1251 msgid "" -"**General updates to Flower Examples** " -"([#2384](https://github.com/adap/flower/pull/2384), " -"[#2425](https://github.com/adap/flower/pull/2425), " -"[#2526](https://github.com/adap/flower/pull/2526), " -"[#2302](https://github.com/adap/flower/pull/2302), " -"[#2545](https://github.com/adap/flower/pull/2545))" +"FedBN implementation in example PyTorch: From Centralized To Federated " +"([#696](https://github.com/adap/flower/pull/696) " +"[#702](https://github.com/adap/flower/pull/702) " +"[#705](https://github.com/adap/flower/pull/705))" msgstr "" -#: ../../source/ref-changelog.md:248 +#: ../../source/ref-changelog.md:1255 msgid "" -"**General updates to Flower Baselines** " -"([#2301](https://github.com/adap/flower/pull/2301), " -"[#2305](https://github.com/adap/flower/pull/2305), " -"[#2307](https://github.com/adap/flower/pull/2307), " -"[#2327](https://github.com/adap/flower/pull/2327), " -"[#2435](https://github.com/adap/flower/pull/2435), " -"[#2462](https://github.com/adap/flower/pull/2462), " -"[#2463](https://github.com/adap/flower/pull/2463), " -"[#2461](https://github.com/adap/flower/pull/2461), " -"[#2469](https://github.com/adap/flower/pull/2469), " -"[#2466](https://github.com/adap/flower/pull/2466), " -"[#2471](https://github.com/adap/flower/pull/2471), " -"[#2472](https://github.com/adap/flower/pull/2472), " -"[#2470](https://github.com/adap/flower/pull/2470))" +"**Serialization-agnostic server** " +"([#721](https://github.com/adap/flower/pull/721))" msgstr "" -#: ../../source/ref-changelog.md:250 +#: ../../source/ref-changelog.md:1257 msgid "" -"**General updates to the simulation engine** " -"([#2331](https://github.com/adap/flower/pull/2331), " -"[#2447](https://github.com/adap/flower/pull/2447), " -"[#2448](https://github.com/adap/flower/pull/2448), " -"[#2294](https://github.com/adap/flower/pull/2294))" +"The Flower server is now fully serialization-agnostic. Prior usage of " +"class `Weights` (which represents parameters as deserialized NumPy " +"ndarrays) was replaced by class `Parameters` (e.g., in `Strategy`). " +"`Parameters` objects are fully serialization-agnostic and represents " +"parameters as byte arrays, the `tensor_type` attributes indicates how " +"these byte arrays should be interpreted (e.g., for " +"serialization/deserialization)." msgstr "" -#: ../../source/ref-changelog.md:252 +#: ../../source/ref-changelog.md:1259 msgid "" -"**General updates to Flower SDKs** " -"([#2288](https://github.com/adap/flower/pull/2288), " -"[#2429](https://github.com/adap/flower/pull/2429), " -"[#2555](https://github.com/adap/flower/pull/2555), " -"[#2543](https://github.com/adap/flower/pull/2543), " -"[#2544](https://github.com/adap/flower/pull/2544), " -"[#2597](https://github.com/adap/flower/pull/2597), " -"[#2623](https://github.com/adap/flower/pull/2623))" +"Built-in strategies implement this approach by handling serialization and" +" deserialization to/from `Weights` internally. Custom/3rd-party Strategy " +"implementations should update to the slightly changed Strategy method " +"definitions. Strategy authors can consult PR " +"[#721](https://github.com/adap/flower/pull/721) to see how strategies can" +" easily migrate to the new format." msgstr "" -#: ../../source/ref-changelog.md:254 +#: ../../source/ref-changelog.md:1261 msgid "" -"**General improvements** " -"([#2309](https://github.com/adap/flower/pull/2309), " -"[#2310](https://github.com/adap/flower/pull/2310), " -"[#2313](https://github.com/adap/flower/pull/2313), " -"[#2316](https://github.com/adap/flower/pull/2316), " -"[#2317](https://github.com/adap/flower/pull/2317), " -"[#2349](https://github.com/adap/flower/pull/2349), " -"[#2360](https://github.com/adap/flower/pull/2360), " -"[#2402](https://github.com/adap/flower/pull/2402), " -"[#2446](https://github.com/adap/flower/pull/2446), " -"[#2561](https://github.com/adap/flower/pull/2561), " -"[#2273](https://github.com/adap/flower/pull/2273), " -"[#2267](https://github.com/adap/flower/pull/2267), " -"[#2274](https://github.com/adap/flower/pull/2274), " -"[#2275](https://github.com/adap/flower/pull/2275), " -"[#2432](https://github.com/adap/flower/pull/2432), " -"[#2251](https://github.com/adap/flower/pull/2251), " -"[#2321](https://github.com/adap/flower/pull/2321), " -"[#1936](https://github.com/adap/flower/pull/1936), " -"[#2408](https://github.com/adap/flower/pull/2408), " -"[#2413](https://github.com/adap/flower/pull/2413), " -"[#2401](https://github.com/adap/flower/pull/2401), " -"[#2531](https://github.com/adap/flower/pull/2531), " -"[#2534](https://github.com/adap/flower/pull/2534), " -"[#2535](https://github.com/adap/flower/pull/2535), " -"[#2521](https://github.com/adap/flower/pull/2521), " -"[#2553](https://github.com/adap/flower/pull/2553), " -"[#2596](https://github.com/adap/flower/pull/2596))" +"Deprecated `flwr.server.Server.evaluate`, use " +"`flwr.server.Server.evaluate_round` instead " +"([#717](https://github.com/adap/flower/pull/717))" msgstr "" -#: ../../source/ref-changelog.md:256 ../../source/ref-changelog.md:346 -#: ../../source/ref-changelog.md:410 ../../source/ref-changelog.md:464 -#: ../../source/ref-changelog.md:531 -msgid "Flower received many improvements under the hood, too many to list here." +#: ../../source/ref-changelog.md:1263 +msgid "v0.15.0 (2021-03-12)" msgstr "" -#: ../../source/ref-changelog.md:260 +#: ../../source/ref-changelog.md:1267 msgid "" -"**Remove support for Python 3.7** " -"([#2280](https://github.com/adap/flower/pull/2280), " -"[#2299](https://github.com/adap/flower/pull/2299), " -"[#2304](https://github.com/adap/flower/pull/2304), " -"[#2306](https://github.com/adap/flower/pull/2306), " -"[#2355](https://github.com/adap/flower/pull/2355), " -"[#2356](https://github.com/adap/flower/pull/2356))" +"**Server-side parameter initialization** " +"([#658](https://github.com/adap/flower/pull/658))" msgstr "" -#: ../../source/ref-changelog.md:262 +#: ../../source/ref-changelog.md:1269 msgid "" -"Python 3.7 support was deprecated in Flower 1.5, and this release removes" -" support. Flower now requires Python 3.8." +"Model parameters can now be initialized on the server-side. Server-side " +"parameter initialization works via a new `Strategy` method called " +"`initialize_parameters`." msgstr "" -#: ../../source/ref-changelog.md:264 +#: ../../source/ref-changelog.md:1271 msgid "" -"**Remove experimental argument** `rest` **from** `start_client` " -"([#2324](https://github.com/adap/flower/pull/2324))" +"Built-in strategies support a new constructor argument called " +"`initial_parameters` to set the initial parameters. Built-in strategies " +"will provide these initial parameters to the server on startup and then " +"delete them to free the memory afterwards." msgstr "" -#: ../../source/ref-changelog.md:266 +#: ../../source/ref-changelog.md:1290 msgid "" -"The (still experimental) argument `rest` was removed from `start_client` " -"and `start_numpy_client`. Use `transport=\"rest\"` to opt into the " -"experimental REST API instead." +"If no initial parameters are provided to the strategy, the server will " +"continue to use the current behaviour (namely, it will ask one of the " +"connected clients for its parameters and use these as the initial global " +"parameters)." msgstr "" -#: ../../source/ref-changelog.md:268 -msgid "v1.5.0 (2023-08-31)" +#: ../../source/ref-changelog.md:1294 +msgid "" +"Deprecate `flwr.server.strategy.DefaultStrategy` (migrate to " +"`flwr.server.strategy.FedAvg`, which is equivalent)" msgstr "" -#: ../../source/ref-changelog.md:274 -msgid "" -"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " -"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " -"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " -"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " +#: ../../source/ref-changelog.md:1296 +msgid "v0.14.0 (2021-02-18)" msgstr "" -#: ../../source/ref-changelog.md:278 +#: ../../source/ref-changelog.md:1300 msgid "" -"**Introduce new simulation engine** " -"([#1969](https://github.com/adap/flower/pull/1969), " -"[#2221](https://github.com/adap/flower/pull/2221), " -"[#2248](https://github.com/adap/flower/pull/2248))" +"**Generalized** `Client.fit` **and** `Client.evaluate` **return values** " +"([#610](https://github.com/adap/flower/pull/610) " +"[#572](https://github.com/adap/flower/pull/572) " +"[#633](https://github.com/adap/flower/pull/633))" msgstr "" -#: ../../source/ref-changelog.md:280 +#: ../../source/ref-changelog.md:1302 msgid "" -"The new simulation engine has been rewritten from the ground up, yet it " -"remains fully backwards compatible. It offers much improved stability and" -" memory handling, especially when working with GPUs. Simulations " -"transparently adapt to different settings to scale simulation in CPU-" -"only, CPU+GPU, multi-GPU, or multi-node multi-GPU environments." +"Clients can now return an additional dictionary mapping `str` keys to " +"values of the following types: `bool`, `bytes`, `float`, `int`, `str`. " +"This means one can return almost arbitrary values from `fit`/`evaluate` " +"and make use of them on the server side!" msgstr "" -#: ../../source/ref-changelog.md:282 +#: ../../source/ref-changelog.md:1304 msgid "" -"Comprehensive documentation includes a new [how-to run " -"simulations](https://flower.ai/docs/framework/how-to-run-" -"simulations.html) guide, new [simulation-" -"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " -"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" -"tensorflow.html) notebooks, and a new [YouTube tutorial " -"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)." +"This improvement also allowed for more consistent return types between " +"`fit` and `evaluate`: `evaluate` should now return a tuple `(float, int, " +"dict)` representing the loss, number of examples, and a dictionary " +"holding arbitrary problem-specific values like accuracy." msgstr "" -#: ../../source/ref-changelog.md:284 +#: ../../source/ref-changelog.md:1306 msgid "" -"**Restructure Flower Docs** " -"([#1824](https://github.com/adap/flower/pull/1824), " -"[#1865](https://github.com/adap/flower/pull/1865), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1887](https://github.com/adap/flower/pull/1887), " -"[#1919](https://github.com/adap/flower/pull/1919), " -"[#1922](https://github.com/adap/flower/pull/1922), " -"[#1920](https://github.com/adap/flower/pull/1920), " -"[#1923](https://github.com/adap/flower/pull/1923), " -"[#1924](https://github.com/adap/flower/pull/1924), " -"[#1962](https://github.com/adap/flower/pull/1962), " -"[#2006](https://github.com/adap/flower/pull/2006), " -"[#2133](https://github.com/adap/flower/pull/2133), " -"[#2203](https://github.com/adap/flower/pull/2203), " -"[#2215](https://github.com/adap/flower/pull/2215), " -"[#2122](https://github.com/adap/flower/pull/2122), " -"[#2223](https://github.com/adap/flower/pull/2223), " -"[#2219](https://github.com/adap/flower/pull/2219), " -"[#2232](https://github.com/adap/flower/pull/2232), " -"[#2233](https://github.com/adap/flower/pull/2233), " -"[#2234](https://github.com/adap/flower/pull/2234), " -"[#2235](https://github.com/adap/flower/pull/2235), " -"[#2237](https://github.com/adap/flower/pull/2237), " -"[#2238](https://github.com/adap/flower/pull/2238), " -"[#2242](https://github.com/adap/flower/pull/2242), " -"[#2231](https://github.com/adap/flower/pull/2231), " -"[#2243](https://github.com/adap/flower/pull/2243), " -"[#2227](https://github.com/adap/flower/pull/2227))" +"In case you wondered: this feature is compatible with existing projects, " +"the additional dictionary return value is optional. New code should " +"however migrate to the new return types to be compatible with upcoming " +"Flower releases (`fit`: `List[np.ndarray], int, Dict[str, Scalar]`, " +"`evaluate`: `float, int, Dict[str, Scalar]`). See the example below for " +"details." msgstr "" -#: ../../source/ref-changelog.md:286 +#: ../../source/ref-changelog.md:1308 msgid "" -"Much effort went into a completely restructured Flower docs experience. " -"The documentation on [flower.ai/docs](https://flower.ai/docs) is now " -"divided into Flower Framework, Flower Baselines, Flower Android SDK, " -"Flower iOS SDK, and code example projects." +"*Code example:* note the additional dictionary return values in both " +"`FlwrClient.fit` and `FlwrClient.evaluate`:" msgstr "" -#: ../../source/ref-changelog.md:288 +#: ../../source/ref-changelog.md:1323 msgid "" -"**Introduce Flower Swift SDK** " -"([#1858](https://github.com/adap/flower/pull/1858), " -"[#1897](https://github.com/adap/flower/pull/1897))" +"**Generalized** `config` **argument in** `Client.fit` **and** " +"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" msgstr "" -#: ../../source/ref-changelog.md:290 +#: ../../source/ref-changelog.md:1325 msgid "" -"This is the first preview release of the Flower Swift SDK. Flower support" -" on iOS is improving, and alongside the Swift SDK and code example, there" -" is now also an iOS quickstart tutorial." +"The `config` argument used to be of type `Dict[str, str]`, which means " +"that dictionary values were expected to be strings. The new release " +"generalizes this to enable values of the following types: `bool`, " +"`bytes`, `float`, `int`, `str`." msgstr "" -#: ../../source/ref-changelog.md:292 +#: ../../source/ref-changelog.md:1327 msgid "" -"**Introduce Flower Android SDK** " -"([#2131](https://github.com/adap/flower/pull/2131))" +"This means one can now pass almost arbitrary values to `fit`/`evaluate` " +"using the `config` dictionary. Yay, no more `str(epochs)` on the server-" +"side and `int(config[\"epochs\"])` on the client side!" msgstr "" -#: ../../source/ref-changelog.md:294 +#: ../../source/ref-changelog.md:1329 msgid "" -"This is the first preview release of the Flower Kotlin SDK. Flower " -"support on Android is improving, and alongside the Kotlin SDK and code " -"example, there is now also an Android quickstart tutorial." +"*Code example:* note that the `config` dictionary now contains non-`str` " +"values in both `Client.fit` and `Client.evaluate`:" msgstr "" -#: ../../source/ref-changelog.md:296 -msgid "" -"**Introduce new end-to-end testing infrastructure** " -"([#1842](https://github.com/adap/flower/pull/1842), " -"[#2071](https://github.com/adap/flower/pull/2071), " -"[#2072](https://github.com/adap/flower/pull/2072), " -"[#2068](https://github.com/adap/flower/pull/2068), " -"[#2067](https://github.com/adap/flower/pull/2067), " -"[#2069](https://github.com/adap/flower/pull/2069), " -"[#2073](https://github.com/adap/flower/pull/2073), " -"[#2070](https://github.com/adap/flower/pull/2070), " -"[#2074](https://github.com/adap/flower/pull/2074), " -"[#2082](https://github.com/adap/flower/pull/2082), " -"[#2084](https://github.com/adap/flower/pull/2084), " -"[#2093](https://github.com/adap/flower/pull/2093), " -"[#2109](https://github.com/adap/flower/pull/2109), " -"[#2095](https://github.com/adap/flower/pull/2095), " -"[#2140](https://github.com/adap/flower/pull/2140), " -"[#2137](https://github.com/adap/flower/pull/2137), " -"[#2165](https://github.com/adap/flower/pull/2165))" +#: ../../source/ref-changelog.md:1346 +msgid "v0.13.0 (2021-01-08)" msgstr "" -#: ../../source/ref-changelog.md:298 +#: ../../source/ref-changelog.md:1350 msgid "" -"A new testing infrastructure ensures that new changes stay compatible " -"with existing framework integrations or strategies." +"New example: PyTorch From Centralized To Federated " +"([#549](https://github.com/adap/flower/pull/549))" msgstr "" -#: ../../source/ref-changelog.md:300 -msgid "**Deprecate Python 3.7**" +#: ../../source/ref-changelog.md:1351 +msgid "Improved documentation" msgstr "" -#: ../../source/ref-changelog.md:302 -msgid "" -"Since Python 3.7 reached its end of life (EOL) on 2023-06-27, support for" -" Python 3.7 is now deprecated and will be removed in an upcoming release." +#: ../../source/ref-changelog.md:1352 +msgid "New documentation theme ([#551](https://github.com/adap/flower/pull/551))" msgstr "" -#: ../../source/ref-changelog.md:304 -msgid "" -"**Add new** `FedTrimmedAvg` **strategy** " -"([#1769](https://github.com/adap/flower/pull/1769), " -"[#1853](https://github.com/adap/flower/pull/1853))" +#: ../../source/ref-changelog.md:1353 +msgid "New API reference ([#554](https://github.com/adap/flower/pull/554))" msgstr "" -#: ../../source/ref-changelog.md:306 +#: ../../source/ref-changelog.md:1354 msgid "" -"The new `FedTrimmedAvg` strategy implements Trimmed Mean by [Dong Yin, " -"2018](https://arxiv.org/abs/1803.01498)." +"Updated examples documentation " +"([#549](https://github.com/adap/flower/pull/549))" msgstr "" -#: ../../source/ref-changelog.md:308 +#: ../../source/ref-changelog.md:1355 msgid "" -"**Introduce start_driver** " -"([#1697](https://github.com/adap/flower/pull/1697))" +"Removed obsolete documentation " +"([#548](https://github.com/adap/flower/pull/548))" msgstr "" -#: ../../source/ref-changelog.md:310 -msgid "" -"In addition to `start_server` and using the raw Driver API, there is a " -"new `start_driver` function that allows for running `start_server` " -"scripts as a Flower driver with only a single-line code change. Check out" -" the `mt-pytorch` code example to see a working example using " -"`start_driver`." +#: ../../source/ref-changelog.md:1357 +msgid "Bugfix:" msgstr "" -#: ../../source/ref-changelog.md:312 +#: ../../source/ref-changelog.md:1359 msgid "" -"**Add parameter aggregation to** `mt-pytorch` **code example** " -"([#1785](https://github.com/adap/flower/pull/1785))" +"`Server.fit` does not disconnect clients when finished, disconnecting the" +" clients is now handled in `flwr.server.start_server` " +"([#553](https://github.com/adap/flower/pull/553) " +"[#540](https://github.com/adap/flower/issues/540))." msgstr "" -#: ../../source/ref-changelog.md:314 -msgid "" -"The `mt-pytorch` example shows how to aggregate parameters when writing a" -" driver script. The included `driver.py` and `server.py` have been " -"aligned to demonstrate both the low-level way and the high-level way of " -"building server-side logic." +#: ../../source/ref-changelog.md:1361 +msgid "v0.12.0 (2020-12-07)" msgstr "" -#: ../../source/ref-changelog.md:316 -msgid "" -"**Migrate experimental REST API to Starlette** " -"([2171](https://github.com/adap/flower/pull/2171))" +#: ../../source/ref-changelog.md:1363 ../../source/ref-changelog.md:1379 +msgid "Important changes:" msgstr "" -#: ../../source/ref-changelog.md:318 +#: ../../source/ref-changelog.md:1365 msgid "" -"The (experimental) REST API used to be implemented in " -"[FastAPI](https://fastapi.tiangolo.com/), but it has now been migrated to" -" use [Starlette](https://www.starlette.io/) directly." +"Added an example for embedded devices " +"([#507](https://github.com/adap/flower/pull/507))" msgstr "" -#: ../../source/ref-changelog.md:320 +#: ../../source/ref-changelog.md:1366 msgid "" -"Please note: The REST request-response API is still experimental and will" -" likely change significantly over time." +"Added a new NumPyClient (in addition to the existing KerasClient) " +"([#504](https://github.com/adap/flower/pull/504) " +"[#508](https://github.com/adap/flower/pull/508))" msgstr "" -#: ../../source/ref-changelog.md:322 +#: ../../source/ref-changelog.md:1367 msgid "" -"**Introduce experimental gRPC request-response API** " -"([#1867](https://github.com/adap/flower/pull/1867), " -"[#1901](https://github.com/adap/flower/pull/1901))" +"Deprecated `flwr_example` package and started to migrate examples into " +"the top-level `examples` directory " +"([#494](https://github.com/adap/flower/pull/494) " +"[#512](https://github.com/adap/flower/pull/512))" msgstr "" -#: ../../source/ref-changelog.md:324 -msgid "" -"In addition to the existing gRPC API (based on bidirectional streaming) " -"and the experimental REST API, there is now a new gRPC API that uses a " -"request-response model to communicate with client nodes." +#: ../../source/ref-changelog.md:1369 +msgid "v0.11.0 (2020-11-30)" msgstr "" -#: ../../source/ref-changelog.md:326 -msgid "" -"Please note: The gRPC request-response API is still experimental and will" -" likely change significantly over time." +#: ../../source/ref-changelog.md:1371 +msgid "Incompatible changes:" msgstr "" -#: ../../source/ref-changelog.md:328 +#: ../../source/ref-changelog.md:1373 msgid "" -"**Replace the experimental** `start_client(rest=True)` **with the new** " -"`start_client(transport=\"rest\")` " -"([#1880](https://github.com/adap/flower/pull/1880))" +"Renamed strategy methods " +"([#486](https://github.com/adap/flower/pull/486)) to unify the naming of " +"Flower's public APIs. Other public methods/functions (e.g., every method " +"in `Client`, but also `Strategy.evaluate`) do not use the `on_` prefix, " +"which is why we're removing it from the four methods in Strategy. To " +"migrate rename the following `Strategy` methods accordingly:" msgstr "" -#: ../../source/ref-changelog.md:330 -msgid "" -"The (experimental) `start_client` argument `rest` was deprecated in " -"favour of a new argument `transport`. `start_client(transport=\"rest\")` " -"will yield the same behaviour as `start_client(rest=True)` did before. " -"All code should migrate to the new argument `transport`. The deprecated " -"argument `rest` will be removed in a future release." +#: ../../source/ref-changelog.md:1374 +msgid "`on_configure_evaluate` => `configure_evaluate`" msgstr "" -#: ../../source/ref-changelog.md:332 -msgid "" -"**Add a new gRPC option** " -"([#2197](https://github.com/adap/flower/pull/2197))" +#: ../../source/ref-changelog.md:1375 +msgid "`on_aggregate_evaluate` => `aggregate_evaluate`" msgstr "" -#: ../../source/ref-changelog.md:334 -msgid "" -"We now start a gRPC server with the `grpc.keepalive_permit_without_calls`" -" option set to 0 by default. This prevents the clients from sending " -"keepalive pings when there is no outstanding stream." +#: ../../source/ref-changelog.md:1376 +msgid "`on_configure_fit` => `configure_fit`" msgstr "" -#: ../../source/ref-changelog.md:336 -msgid "" -"**Improve example notebooks** " -"([#2005](https://github.com/adap/flower/pull/2005))" +#: ../../source/ref-changelog.md:1377 +msgid "`on_aggregate_fit` => `aggregate_fit`" msgstr "" -#: ../../source/ref-changelog.md:338 -msgid "There's a new 30min Federated Learning PyTorch tutorial!" +#: ../../source/ref-changelog.md:1381 +msgid "" +"Deprecated `DefaultStrategy` " +"([#479](https://github.com/adap/flower/pull/479)). To migrate use " +"`FedAvg` instead." msgstr "" -#: ../../source/ref-changelog.md:340 +#: ../../source/ref-changelog.md:1382 msgid "" -"**Example updates** ([#1772](https://github.com/adap/flower/pull/1772), " -"[#1873](https://github.com/adap/flower/pull/1873), " -"[#1981](https://github.com/adap/flower/pull/1981), " -"[#1988](https://github.com/adap/flower/pull/1988), " -"[#1984](https://github.com/adap/flower/pull/1984), " -"[#1982](https://github.com/adap/flower/pull/1982), " -"[#2112](https://github.com/adap/flower/pull/2112), " -"[#2144](https://github.com/adap/flower/pull/2144), " -"[#2174](https://github.com/adap/flower/pull/2174), " -"[#2225](https://github.com/adap/flower/pull/2225), " -"[#2183](https://github.com/adap/flower/pull/2183))" +"Simplified examples and baselines " +"([#484](https://github.com/adap/flower/pull/484))." msgstr "" -#: ../../source/ref-changelog.md:342 +#: ../../source/ref-changelog.md:1383 msgid "" -"Many examples have received significant updates, including simplified " -"advanced-tensorflow and advanced-pytorch examples, improved macOS " -"compatibility of TensorFlow examples, and code examples for simulation. A" -" major upgrade is that all code examples now have a `requirements.txt` " -"(in addition to `pyproject.toml`)." +"Removed presently unused `on_conclude_round` from strategy interface " +"([#483](https://github.com/adap/flower/pull/483))." msgstr "" -#: ../../source/ref-changelog.md:344 +#: ../../source/ref-changelog.md:1384 msgid "" -"**General improvements** " -"([#1872](https://github.com/adap/flower/pull/1872), " -"[#1866](https://github.com/adap/flower/pull/1866), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1837](https://github.com/adap/flower/pull/1837), " -"[#1477](https://github.com/adap/flower/pull/1477), " -"[#2171](https://github.com/adap/flower/pull/2171))" +"Set minimal Python version to 3.6.1 instead of 3.6.9 " +"([#471](https://github.com/adap/flower/pull/471))." msgstr "" -#: ../../source/ref-changelog.md:352 -msgid "v1.4.0 (2023-04-21)" +#: ../../source/ref-changelog.md:1385 +msgid "" +"Improved `Strategy` docstrings " +"([#470](https://github.com/adap/flower/pull/470))." msgstr "" -#: ../../source/ref-changelog.md:358 -msgid "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " -"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " -"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " -"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " -"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" +#: ../../source/ref-example-projects.rst:2 +msgid "Example projects" msgstr "" -#: ../../source/ref-changelog.md:362 +#: ../../source/ref-example-projects.rst:4 msgid "" -"**Introduce support for XGBoost (**`FedXgbNnAvg` **strategy and " -"example)** ([#1694](https://github.com/adap/flower/pull/1694), " -"[#1709](https://github.com/adap/flower/pull/1709), " -"[#1715](https://github.com/adap/flower/pull/1715), " -"[#1717](https://github.com/adap/flower/pull/1717), " -"[#1763](https://github.com/adap/flower/pull/1763), " -"[#1795](https://github.com/adap/flower/pull/1795))" +"Flower comes with a number of usage examples. The examples demonstrate " +"how Flower can be used to federate different kinds of existing machine " +"learning pipelines, usually leveraging popular machine learning " +"frameworks such as `PyTorch `_ or `TensorFlow " +"`_." msgstr "" -#: ../../source/ref-changelog.md:364 -msgid "" -"XGBoost is a tree-based ensemble machine learning algorithm that uses " -"gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg`" -" " -"[strategy](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," -" and a [code example](https://github.com/adap/flower/tree/main/examples" -"/xgboost-quickstart) that demonstrates the usage of this new strategy in " -"an XGBoost project." +#: ../../source/ref-example-projects.rst:9 +msgid "The following examples are available as standalone projects." msgstr "" -#: ../../source/ref-changelog.md:366 +#: ../../source/ref-example-projects.rst:12 +#, fuzzy +msgid "Quickstart TensorFlow/Keras" +msgstr "빠른 시작 튜토리얼" + +#: ../../source/ref-example-projects.rst:14 msgid "" -"**Introduce iOS SDK (preview)** " -"([#1621](https://github.com/adap/flower/pull/1621), " -"[#1764](https://github.com/adap/flower/pull/1764))" +"The TensorFlow/Keras quickstart example shows CIFAR-10 image " +"classification with MobileNetV2:" msgstr "" -#: ../../source/ref-changelog.md:368 +#: ../../source/ref-example-projects.rst:17 msgid "" -"This is a major update for anyone wanting to implement Federated Learning" -" on iOS mobile devices. We now have a swift iOS SDK present under " -"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" -" that will facilitate greatly the app creating process. To showcase its " -"use, the [iOS " -"example](https://github.com/adap/flower/tree/main/examples/ios) has also " -"been updated!" +"`Quickstart TensorFlow (Code) " +"`_" msgstr "" -#: ../../source/ref-changelog.md:370 -msgid "" -"**Introduce new \"What is Federated Learning?\" tutorial** " -"([#1657](https://github.com/adap/flower/pull/1657), " -"[#1721](https://github.com/adap/flower/pull/1721))" +#: ../../source/ref-example-projects.rst:19 +msgid ":doc:`Quickstart TensorFlow (Tutorial) `" msgstr "" -#: ../../source/ref-changelog.md:372 +#: ../../source/ref-example-projects.rst:20 msgid "" -"A new [entry-level tutorial](https://flower.ai/docs/framework/tutorial-" -"what-is-federated-learning.html) in our documentation explains the basics" -" of Fedetated Learning. It enables anyone who's unfamiliar with Federated" -" Learning to start their journey with Flower. Forward it to anyone who's " -"interested in Federated Learning!" +"`Quickstart TensorFlow (Blog Post) `_" msgstr "" -#: ../../source/ref-changelog.md:374 -msgid "" -"**Introduce new Flower Baseline: FedProx MNIST** " -"([#1513](https://github.com/adap/flower/pull/1513), " -"[#1680](https://github.com/adap/flower/pull/1680), " -"[#1681](https://github.com/adap/flower/pull/1681), " -"[#1679](https://github.com/adap/flower/pull/1679))" +#: ../../source/ref-example-projects.rst:24 +#: ../../source/tutorial-quickstart-pytorch.rst:4 +msgid "Quickstart PyTorch" msgstr "" -#: ../../source/ref-changelog.md:376 +#: ../../source/ref-example-projects.rst:26 msgid "" -"This new baseline replicates the MNIST+CNN task from the paper [Federated" -" Optimization in Heterogeneous Networks (Li et al., " -"2018)](https://arxiv.org/abs/1812.06127). It uses the `FedProx` strategy," -" which aims at making convergence more robust in heterogeneous settings." +"The PyTorch quickstart example shows CIFAR-10 image classification with a" +" simple Convolutional Neural Network:" msgstr "" -#: ../../source/ref-changelog.md:378 +#: ../../source/ref-example-projects.rst:29 msgid "" -"**Introduce new Flower Baseline: FedAvg FEMNIST** " -"([#1655](https://github.com/adap/flower/pull/1655))" +"`Quickstart PyTorch (Code) " +"`_" msgstr "" -#: ../../source/ref-changelog.md:380 -msgid "" -"This new baseline replicates an experiment evaluating the performance of " -"the FedAvg algorithm on the FEMNIST dataset from the paper [LEAF: A " -"Benchmark for Federated Settings (Caldas et al., " -"2018)](https://arxiv.org/abs/1812.01097)." +#: ../../source/ref-example-projects.rst:31 +msgid ":doc:`Quickstart PyTorch (Tutorial) `" msgstr "" -#: ../../source/ref-changelog.md:382 -msgid "" -"**Introduce (experimental) REST API** " -"([#1594](https://github.com/adap/flower/pull/1594), " -"[#1690](https://github.com/adap/flower/pull/1690), " -"[#1695](https://github.com/adap/flower/pull/1695), " -"[#1712](https://github.com/adap/flower/pull/1712), " -"[#1802](https://github.com/adap/flower/pull/1802), " -"[#1770](https://github.com/adap/flower/pull/1770), " -"[#1733](https://github.com/adap/flower/pull/1733))" +#: ../../source/ref-example-projects.rst:34 +msgid "PyTorch: From Centralized To Federated" msgstr "" -#: ../../source/ref-changelog.md:384 +#: ../../source/ref-example-projects.rst:36 msgid "" -"A new REST API has been introduced as an alternative to the gRPC-based " -"communication stack. In this initial version, the REST API only supports " -"anonymous clients." +"This example shows how a regular PyTorch project can be federated using " +"Flower:" msgstr "" -#: ../../source/ref-changelog.md:386 +#: ../../source/ref-example-projects.rst:38 msgid "" -"Please note: The REST API is still experimental and will likely change " -"significantly over time." +"`PyTorch: From Centralized To Federated (Code) " +"`_" msgstr "" -#: ../../source/ref-changelog.md:388 +#: ../../source/ref-example-projects.rst:40 msgid "" -"**Improve the (experimental) Driver API** " -"([#1663](https://github.com/adap/flower/pull/1663), " -"[#1666](https://github.com/adap/flower/pull/1666), " -"[#1667](https://github.com/adap/flower/pull/1667), " -"[#1664](https://github.com/adap/flower/pull/1664), " -"[#1675](https://github.com/adap/flower/pull/1675), " -"[#1676](https://github.com/adap/flower/pull/1676), " -"[#1693](https://github.com/adap/flower/pull/1693), " -"[#1662](https://github.com/adap/flower/pull/1662), " -"[#1794](https://github.com/adap/flower/pull/1794))" +":doc:`PyTorch: From Centralized To Federated (Tutorial) `" msgstr "" -#: ../../source/ref-changelog.md:390 -msgid "" -"The Driver API is still an experimental feature, but this release " -"introduces some major upgrades. One of the main improvements is the " -"introduction of an SQLite database to store server state on disk (instead" -" of in-memory). Another improvement is that tasks (instructions or " -"results) that have been delivered will now be deleted. This greatly " -"improves the memory efficiency of a long-running Flower server." +#: ../../source/ref-example-projects.rst:44 +msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" msgstr "" -#: ../../source/ref-changelog.md:392 +#: ../../source/ref-example-projects.rst:46 msgid "" -"**Fix spilling issues related to Ray during simulations** " -"([#1698](https://github.com/adap/flower/pull/1698))" +"This example shows how Flower can be used to build a federated learning " +"system that run across Raspberry Pi and Nvidia Jetson:" msgstr "" -#: ../../source/ref-changelog.md:394 +#: ../../source/ref-example-projects.rst:49 msgid "" -"While running long simulations, `ray` was sometimes spilling huge amounts" -" of data that would make the training unable to continue. This is now " -"fixed! 🎉" +"`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " +"`_" msgstr "" -#: ../../source/ref-changelog.md:396 +#: ../../source/ref-example-projects.rst:51 msgid "" -"**Add new example using** `TabNet` **and Flower** " -"([#1725](https://github.com/adap/flower/pull/1725))" +"`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " +"`_" msgstr "" -#: ../../source/ref-changelog.md:398 +#: ../../source/ref-faq.rst:4 msgid "" -"TabNet is a powerful and flexible framework for training machine learning" -" models on tabular data. We now have a federated example using Flower: " -"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples" -"/quickstart-tabnet)." +"This page collects answers to commonly asked questions about Federated " +"Learning with Flower." msgstr "" -#: ../../source/ref-changelog.md:400 -msgid "" -"**Add new how-to guide for monitoring simulations** " -"([#1649](https://github.com/adap/flower/pull/1649))" +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Can Flower run on Jupyter Notebooks / Google Colab?" msgstr "" -#: ../../source/ref-changelog.md:402 +#: ../../source/ref-faq.rst:9 msgid "" -"We now have a documentation guide to help users monitor their performance" -" during simulations." +"Yes, it can! Flower even comes with a few under-the-hood optimizations to" +" make it work even better on Colab. Here's a quickstart example:" msgstr "" -#: ../../source/ref-changelog.md:404 +#: ../../source/ref-faq.rst:11 msgid "" -"**Add training metrics to** `History` **object during simulations** " -"([#1696](https://github.com/adap/flower/pull/1696))" +"`Flower simulation PyTorch " +"`_" msgstr "" -#: ../../source/ref-changelog.md:406 +#: ../../source/ref-faq.rst:12 msgid "" -"The `fit_metrics_aggregation_fn` can be used to aggregate training " -"metrics, but previous releases did not save the results in the `History` " -"object. This is now the case!" +"`Flower simulation TensorFlow/Keras " +"`_" +msgstr "" + +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` How can I run Federated Learning on a Raspberry Pi?" msgstr "" -#: ../../source/ref-changelog.md:408 +#: ../../source/ref-faq.rst:16 msgid "" -"**General improvements** " -"([#1659](https://github.com/adap/flower/pull/1659), " -"[#1646](https://github.com/adap/flower/pull/1646), " -"[#1647](https://github.com/adap/flower/pull/1647), " -"[#1471](https://github.com/adap/flower/pull/1471), " -"[#1648](https://github.com/adap/flower/pull/1648), " -"[#1651](https://github.com/adap/flower/pull/1651), " -"[#1652](https://github.com/adap/flower/pull/1652), " -"[#1653](https://github.com/adap/flower/pull/1653), " -"[#1659](https://github.com/adap/flower/pull/1659), " -"[#1665](https://github.com/adap/flower/pull/1665), " -"[#1670](https://github.com/adap/flower/pull/1670), " -"[#1672](https://github.com/adap/flower/pull/1672), " -"[#1677](https://github.com/adap/flower/pull/1677), " -"[#1684](https://github.com/adap/flower/pull/1684), " -"[#1683](https://github.com/adap/flower/pull/1683), " -"[#1686](https://github.com/adap/flower/pull/1686), " -"[#1682](https://github.com/adap/flower/pull/1682), " -"[#1685](https://github.com/adap/flower/pull/1685), " -"[#1692](https://github.com/adap/flower/pull/1692), " -"[#1705](https://github.com/adap/flower/pull/1705), " -"[#1708](https://github.com/adap/flower/pull/1708), " -"[#1711](https://github.com/adap/flower/pull/1711), " -"[#1713](https://github.com/adap/flower/pull/1713), " -"[#1714](https://github.com/adap/flower/pull/1714), " -"[#1718](https://github.com/adap/flower/pull/1718), " -"[#1716](https://github.com/adap/flower/pull/1716), " -"[#1723](https://github.com/adap/flower/pull/1723), " -"[#1735](https://github.com/adap/flower/pull/1735), " -"[#1678](https://github.com/adap/flower/pull/1678), " -"[#1750](https://github.com/adap/flower/pull/1750), " -"[#1753](https://github.com/adap/flower/pull/1753), " -"[#1736](https://github.com/adap/flower/pull/1736), " -"[#1766](https://github.com/adap/flower/pull/1766), " -"[#1760](https://github.com/adap/flower/pull/1760), " -"[#1775](https://github.com/adap/flower/pull/1775), " -"[#1776](https://github.com/adap/flower/pull/1776), " -"[#1777](https://github.com/adap/flower/pull/1777), " -"[#1779](https://github.com/adap/flower/pull/1779), " -"[#1784](https://github.com/adap/flower/pull/1784), " -"[#1773](https://github.com/adap/flower/pull/1773), " -"[#1755](https://github.com/adap/flower/pull/1755), " -"[#1789](https://github.com/adap/flower/pull/1789), " -"[#1788](https://github.com/adap/flower/pull/1788), " -"[#1798](https://github.com/adap/flower/pull/1798), " -"[#1799](https://github.com/adap/flower/pull/1799), " -"[#1739](https://github.com/adap/flower/pull/1739), " -"[#1800](https://github.com/adap/flower/pull/1800), " -"[#1804](https://github.com/adap/flower/pull/1804), " -"[#1805](https://github.com/adap/flower/pull/1805))" +"Find the `blog post about federated learning on embedded device here " +"`_" +" and the corresponding `GitHub code example " +"`_." msgstr "" -#: ../../source/ref-changelog.md:416 -msgid "v1.3.0 (2023-02-06)" +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Does Flower support federated learning on Android devices?" +msgstr "" + +#: ../../source/ref-faq.rst:20 +msgid "" +"Yes, it does. Please take a look at our `blog post " +"`_ or check out the code examples:" msgstr "" -#: ../../source/ref-changelog.md:422 +#: ../../source/ref-faq.rst:22 msgid "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" +"`Android Kotlin example `_" msgstr "" -#: ../../source/ref-changelog.md:426 -msgid "" -"**Add support for** `workload_id` **and** `group_id` **in Driver API** " -"([#1595](https://github.com/adap/flower/pull/1595))" +#: ../../source/ref-faq.rst:23 +msgid "`Android Java example `_" msgstr "" -#: ../../source/ref-changelog.md:428 -msgid "" -"The (experimental) Driver API now supports a `workload_id` that can be " -"used to identify which workload a task belongs to. It also supports a new" -" `group_id` that can be used, for example, to indicate the current " -"training round. Both the `workload_id` and `group_id` enable client nodes" -" to decide whether they want to handle a task or not." +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Can I combine federated learning with blockchain?" msgstr "" -#: ../../source/ref-changelog.md:430 +#: ../../source/ref-faq.rst:27 msgid "" -"**Make Driver API and Fleet API address configurable** " -"([#1637](https://github.com/adap/flower/pull/1637))" +"Yes, of course. A list of available examples using Flower within a " +"blockchain environment is available here:" msgstr "" -#: ../../source/ref-changelog.md:432 -msgid "" -"The (experimental) long-running Flower server (Driver API and Fleet API) " -"can now configure the server address of both Driver API (via `--driver-" -"api-address`) and Fleet API (via `--fleet-api-address`) when starting:" +#: ../../source/ref-faq.rst:30 +msgid "`FLock: A Decentralised AI Training Platform `_." msgstr "" -#: ../../source/ref-changelog.md:434 -msgid "" -"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " -"\"0.0.0.0:8086\"`" +#: ../../source/ref-faq.rst:30 +msgid "Contribute to on-chain training the model and earn rewards." msgstr "" -#: ../../source/ref-changelog.md:436 -msgid "Both IPv4 and IPv6 addresses are supported." +#: ../../source/ref-faq.rst:31 +msgid "Local blockchain with federated learning simulation." msgstr "" -#: ../../source/ref-changelog.md:438 +#: ../../source/ref-faq.rst:32 msgid "" -"**Add new example of Federated Learning using fastai and Flower** " -"([#1598](https://github.com/adap/flower/pull/1598))" +"`Flower meets Nevermined GitHub Repository `_." msgstr "" -#: ../../source/ref-changelog.md:440 +#: ../../source/ref-faq.rst:33 msgid "" -"A new code example (`quickstart-fastai`) demonstrates federated learning " -"with [fastai](https://www.fast.ai/) and Flower. You can find it here: " -"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples" -"/quickstart-fastai)." +"`Flower meets Nevermined YouTube video " +"`_." msgstr "" -#: ../../source/ref-changelog.md:442 +#: ../../source/ref-faq.rst:34 msgid "" -"**Make Android example compatible with** `flwr >= 1.0.0` **and the latest" -" versions of Android** " -"([#1603](https://github.com/adap/flower/pull/1603))" +"`Flower meets KOSMoS `_." msgstr "" -#: ../../source/ref-changelog.md:444 +#: ../../source/ref-faq.rst:35 msgid "" -"The Android code example has received a substantial update: the project " -"is compatible with Flower 1.0 (and later), the UI received a full " -"refresh, and the project is updated to be compatible with newer Android " -"tooling." +"`Flower meets Talan blog post `_ ." msgstr "" -#: ../../source/ref-changelog.md:446 +#: ../../source/ref-faq.rst:36 msgid "" -"**Add new `FedProx` strategy** " -"([#1619](https://github.com/adap/flower/pull/1619))" +"`Flower meets Talan GitHub Repository " +"`_ ." msgstr "" -#: ../../source/ref-changelog.md:448 -msgid "" -"This " -"[strategy](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" -" is almost identical to " -"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," -" but helps users replicate what is described in this " -"[paper](https://arxiv.org/abs/1812.06127). It essentially adds a " -"parameter called `proximal_mu` to regularize the local models with " -"respect to the global models." +#: ../../source/ref-telemetry.md:1 +msgid "Telemetry" msgstr "" -#: ../../source/ref-changelog.md:450 +#: ../../source/ref-telemetry.md:3 msgid "" -"**Add new metrics to telemetry events** " -"([#1640](https://github.com/adap/flower/pull/1640))" +"The Flower open-source project collects **anonymous** usage metrics to " +"make well-informed decisions to improve Flower. Doing this enables the " +"Flower team to understand how Flower is used and what challenges users " +"might face." msgstr "" -#: ../../source/ref-changelog.md:452 +#: ../../source/ref-telemetry.md:5 msgid "" -"An updated event structure allows, for example, the clustering of events " -"within the same workload." +"**Flower is a friendly framework for collaborative AI and data science.**" +" Staying true to this statement, Flower makes it easy to disable " +"telemetry for users that do not want to share anonymous usage metrics." msgstr "" -#: ../../source/ref-changelog.md:454 -msgid "" -"**Add new custom strategy tutorial section** " -"[#1623](https://github.com/adap/flower/pull/1623)" +#: ../../source/ref-telemetry.md:7 +msgid "Principles" msgstr "" -#: ../../source/ref-changelog.md:456 +#: ../../source/ref-telemetry.md:9 +msgid "We follow strong principles guarding anonymous usage metrics collection:" +msgstr "" + +#: ../../source/ref-telemetry.md:11 msgid "" -"The Flower tutorial now has a new section that covers implementing a " -"custom strategy from scratch: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" +"**Optional:** You will always be able to disable telemetry; read on to " +"learn “[How to opt-out](#how-to-opt-out)”." msgstr "" -#: ../../source/ref-changelog.md:458 +#: ../../source/ref-telemetry.md:12 msgid "" -"**Add new custom serialization tutorial section** " -"([#1622](https://github.com/adap/flower/pull/1622))" +"**Anonymous:** The reported usage metrics are anonymous and do not " +"contain any personally identifiable information (PII). See “[Collected " +"metrics](#collected-metrics)” to understand what metrics are being " +"reported." msgstr "" -#: ../../source/ref-changelog.md:460 +#: ../../source/ref-telemetry.md:13 msgid "" -"The Flower tutorial now has a new section that covers custom " -"serialization: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -"/tutorial-customize-the-client-pytorch.ipynb)" +"**Transparent:** You can easily inspect what anonymous metrics are being " +"reported; see the section “[How to inspect what is being reported](#how-" +"to-inspect-what-is-being-reported)”" msgstr "" -#: ../../source/ref-changelog.md:462 +#: ../../source/ref-telemetry.md:14 msgid "" -"**General improvements** " -"([#1638](https://github.com/adap/flower/pull/1638), " -"[#1634](https://github.com/adap/flower/pull/1634), " -"[#1636](https://github.com/adap/flower/pull/1636), " -"[#1635](https://github.com/adap/flower/pull/1635), " -"[#1633](https://github.com/adap/flower/pull/1633), " -"[#1632](https://github.com/adap/flower/pull/1632), " -"[#1631](https://github.com/adap/flower/pull/1631), " -"[#1630](https://github.com/adap/flower/pull/1630), " -"[#1627](https://github.com/adap/flower/pull/1627), " -"[#1593](https://github.com/adap/flower/pull/1593), " -"[#1616](https://github.com/adap/flower/pull/1616), " -"[#1615](https://github.com/adap/flower/pull/1615), " -"[#1607](https://github.com/adap/flower/pull/1607), " -"[#1609](https://github.com/adap/flower/pull/1609), " -"[#1608](https://github.com/adap/flower/pull/1608), " -"[#1603](https://github.com/adap/flower/pull/1603), " -"[#1590](https://github.com/adap/flower/pull/1590), " -"[#1580](https://github.com/adap/flower/pull/1580), " -"[#1599](https://github.com/adap/flower/pull/1599), " -"[#1600](https://github.com/adap/flower/pull/1600), " -"[#1601](https://github.com/adap/flower/pull/1601), " -"[#1597](https://github.com/adap/flower/pull/1597), " -"[#1595](https://github.com/adap/flower/pull/1595), " -"[#1591](https://github.com/adap/flower/pull/1591), " -"[#1588](https://github.com/adap/flower/pull/1588), " -"[#1589](https://github.com/adap/flower/pull/1589), " -"[#1587](https://github.com/adap/flower/pull/1587), " -"[#1573](https://github.com/adap/flower/pull/1573), " -"[#1581](https://github.com/adap/flower/pull/1581), " -"[#1578](https://github.com/adap/flower/pull/1578), " -"[#1574](https://github.com/adap/flower/pull/1574), " -"[#1572](https://github.com/adap/flower/pull/1572), " -"[#1586](https://github.com/adap/flower/pull/1586))" +"**Open for feedback:** You can always reach out to us if you have " +"feedback; see the section “[How to contact us](#how-to-contact-us)” for " +"details." msgstr "" -#: ../../source/ref-changelog.md:466 +#: ../../source/ref-telemetry.md:16 +msgid "How to opt-out" +msgstr "" + +#: ../../source/ref-telemetry.md:18 msgid "" -"**Updated documentation** " -"([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614))" +"When Flower starts, it will check for an environment variable called " +"`FLWR_TELEMETRY_ENABLED`. Telemetry can easily be disabled by setting " +"`FLWR_TELEMETRY_ENABLED=0`. Assuming you are starting a Flower server or " +"client, simply do so by prepending your command as in:" msgstr "" -#: ../../source/ref-changelog.md:468 ../../source/ref-changelog.md:535 +#: ../../source/ref-telemetry.md:24 msgid "" -"As usual, the documentation has improved quite a bit. It is another step " -"in our effort to make the Flower documentation the best documentation of " -"any project. Stay tuned and as always, feel free to provide feedback!" +"Alternatively, you can export `FLWR_TELEMETRY_ENABLED=0` in, for example," +" `.bashrc` (or whatever configuration file applies to your environment) " +"to disable Flower telemetry permanently." msgstr "" -#: ../../source/ref-changelog.md:474 -msgid "v1.2.0 (2023-01-13)" +#: ../../source/ref-telemetry.md:26 +msgid "Collected metrics" msgstr "" -#: ../../source/ref-changelog.md:480 +#: ../../source/ref-telemetry.md:28 +msgid "Flower telemetry collects the following metrics:" +msgstr "" + +#: ../../source/ref-telemetry.md:30 msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." -" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" +"**Flower version.** Understand which versions of Flower are currently " +"being used. This helps us to decide whether we should invest effort into " +"releasing a patch version for an older version of Flower or instead use " +"the bandwidth to build new features." msgstr "" -#: ../../source/ref-changelog.md:484 +#: ../../source/ref-telemetry.md:32 msgid "" -"**Introduce new Flower Baseline: FedAvg MNIST** " -"([#1497](https://github.com/adap/flower/pull/1497), " -"[#1552](https://github.com/adap/flower/pull/1552))" +"**Operating system.** Enables us to answer questions such as: *Should we " +"create more guides for Linux, macOS, or Windows?*" msgstr "" -#: ../../source/ref-changelog.md:486 +#: ../../source/ref-telemetry.md:34 msgid "" -"Over the coming weeks, we will be releasing a number of new reference " -"implementations useful especially to FL newcomers. They will typically " -"revisit well known papers from the literature, and be suitable for " -"integration in your own application or for experimentation, in order to " -"deepen your knowledge of FL in general. Today's release is the first in " -"this series. [Read more.](https://flower.ai/blog/2023-01-12-fl-starter-" -"pack-fedavg-mnist-cnn/)" +"**Python version.** Knowing the Python version helps us, for example, to " +"decide whether we should invest effort into supporting old versions of " +"Python or stop supporting them and start taking advantage of new Python " +"features." msgstr "" -#: ../../source/ref-changelog.md:488 +#: ../../source/ref-telemetry.md:36 msgid "" -"**Improve GPU support in simulations** " -"([#1555](https://github.com/adap/flower/pull/1555))" +"**Hardware properties.** Understanding the hardware environment that " +"Flower is being used in helps to decide whether we should, for example, " +"put more effort into supporting low-resource environments." msgstr "" -#: ../../source/ref-changelog.md:490 +#: ../../source/ref-telemetry.md:38 msgid "" -"The Ray-based Virtual Client Engine (`start_simulation`) has been updated" -" to improve GPU support. The update includes some of the hard-earned " -"lessons from scaling simulations in GPU cluster environments. New " -"defaults make running GPU-based simulations substantially more robust." +"**Execution mode.** Knowing what execution mode Flower starts in enables " +"us to understand how heavily certain features are being used and better " +"prioritize based on that." msgstr "" -#: ../../source/ref-changelog.md:492 +#: ../../source/ref-telemetry.md:40 msgid "" -"**Improve GPU support in Jupyter Notebook tutorials** " -"([#1527](https://github.com/adap/flower/pull/1527), " -"[#1558](https://github.com/adap/flower/pull/1558))" +"**Cluster.** Flower telemetry assigns a random in-memory cluster ID each " +"time a Flower workload starts. This allows us to understand which device " +"types not only start Flower workloads but also successfully complete " +"them." msgstr "" -#: ../../source/ref-changelog.md:494 +#: ../../source/ref-telemetry.md:42 msgid "" -"Some users reported that Jupyter Notebooks have not always been easy to " -"use on GPU instances. We listened and made improvements to all of our " -"Jupyter notebooks! Check out the updated notebooks here:" +"**Source.** Flower telemetry tries to store a random source ID in " +"`~/.flwr/source` the first time a telemetry event is generated. The " +"source ID is important to identify whether an issue is recurring or " +"whether an issue is triggered by multiple clusters running concurrently " +"(which often happens in simulation). For example, if a device runs " +"multiple workloads at the same time, and this results in an issue, then, " +"in order to reproduce the issue, multiple workloads must be started at " +"the same time." msgstr "" -#: ../../source/ref-changelog.md:496 +#: ../../source/ref-telemetry.md:44 msgid "" -"[An Introduction to Federated Learning](https://flower.ai/docs/framework" -"/tutorial-get-started-with-flower-pytorch.html)" +"You may delete the source ID at any time. If you wish for all events " +"logged under a specific source ID to be deleted, you can send a deletion " +"request mentioning the source ID to `telemetry@flower.ai`. All events " +"related to that source ID will then be permanently deleted." msgstr "" -#: ../../source/ref-changelog.md:497 +#: ../../source/ref-telemetry.md:46 msgid "" -"[Strategies in Federated Learning](https://flower.ai/docs/framework" -"/tutorial-use-a-federated-learning-strategy-pytorch.html)" +"We will not collect any personally identifiable information. If you think" +" any of the metrics collected could be misused in any way, please [get in" +" touch with us](#how-to-contact-us). We will update this page to reflect " +"any changes to the metrics collected and publish changes in the " +"changelog." msgstr "" -#: ../../source/ref-changelog.md:498 +#: ../../source/ref-telemetry.md:48 msgid "" -"[Building a Strategy](https://flower.ai/docs/framework/tutorial-build-a" -"-strategy-from-scratch-pytorch.html)" +"If you think other metrics would be helpful for us to better guide our " +"decisions, please let us know! We will carefully review them; if we are " +"confident that they do not compromise user privacy, we may add them." msgstr "" -#: ../../source/ref-changelog.md:499 -msgid "" -"[Client and NumPyClient](https://flower.ai/docs/framework/tutorial-" -"customize-the-client-pytorch.html)" +#: ../../source/ref-telemetry.md:50 +msgid "How to inspect what is being reported" msgstr "" -#: ../../source/ref-changelog.md:501 +#: ../../source/ref-telemetry.md:52 msgid "" -"**Introduce optional telemetry** " -"([#1533](https://github.com/adap/flower/pull/1533), " -"[#1544](https://github.com/adap/flower/pull/1544), " -"[#1584](https://github.com/adap/flower/pull/1584))" +"We wanted to make it very easy for you to inspect what anonymous usage " +"metrics are reported. You can view all the reported telemetry information" +" by setting the environment variable `FLWR_TELEMETRY_LOGGING=1`. Logging " +"is disabled by default. You may use logging independently from " +"`FLWR_TELEMETRY_ENABLED` so that you can inspect the telemetry feature " +"without sending any metrics." msgstr "" -#: ../../source/ref-changelog.md:503 +#: ../../source/ref-telemetry.md:58 msgid "" -"After a [request for " -"feedback](https://github.com/adap/flower/issues/1534) from the community," -" the Flower open-source project introduces optional collection of " -"*anonymous* usage metrics to make well-informed decisions to improve " -"Flower. Doing this enables the Flower team to understand how Flower is " -"used and what challenges users might face." +"The inspect Flower telemetry without sending any anonymous usage metrics," +" use both environment variables:" msgstr "" -#: ../../source/ref-changelog.md:505 -msgid "" -"**Flower is a friendly framework for collaborative AI and data science.**" -" Staying true to this statement, Flower makes it easy to disable " -"telemetry for users who do not want to share anonymous usage metrics. " -"[Read more.](https://flower.ai/docs/telemetry.html)." +#: ../../source/ref-telemetry.md:64 +msgid "How to contact us" msgstr "" -#: ../../source/ref-changelog.md:507 +#: ../../source/ref-telemetry.md:66 msgid "" -"**Introduce (experimental) Driver API** " -"([#1520](https://github.com/adap/flower/pull/1520), " -"[#1525](https://github.com/adap/flower/pull/1525), " -"[#1545](https://github.com/adap/flower/pull/1545), " -"[#1546](https://github.com/adap/flower/pull/1546), " -"[#1550](https://github.com/adap/flower/pull/1550), " -"[#1551](https://github.com/adap/flower/pull/1551), " -"[#1567](https://github.com/adap/flower/pull/1567))" +"We want to hear from you. If you have any feedback or ideas on how to " +"improve the way we handle anonymous usage metrics, reach out to us via " +"[Slack](https://flower.ai/join-slack/) (channel `#telemetry`) or email " +"(`telemetry@flower.ai`)." msgstr "" -#: ../../source/ref-changelog.md:509 +#: ../../source/tutorial-quickstart-android.rst:-1 msgid "" -"Flower now has a new (experimental) Driver API which will enable fully " -"programmable, async, and multi-tenant Federated Learning and Federated " -"Analytics applications. Phew, that's a lot! Going forward, the Driver API" -" will be the abstraction that many upcoming features will be built on - " -"and you can start building those things now, too." +"Read this Federated Learning quickstart tutorial for creating an Android " +"app using Flower." msgstr "" -#: ../../source/ref-changelog.md:511 -msgid "" -"The Driver API also enables a new execution mode in which the server runs" -" indefinitely. Multiple individual workloads can run concurrently and " -"start and stop their execution independent of the server. This is " -"especially useful for users who want to deploy Flower in production." +#: ../../source/tutorial-quickstart-android.rst:4 +msgid "Quickstart Android" msgstr "" -#: ../../source/ref-changelog.md:513 +#: ../../source/tutorial-quickstart-android.rst:9 msgid "" -"To learn more, check out the `mt-pytorch` code example. We look forward " -"to you feedback!" +"Let's build a federated learning system using TFLite and Flower on " +"Android!" msgstr "" -#: ../../source/ref-changelog.md:515 +#: ../../source/tutorial-quickstart-android.rst:11 msgid "" -"Please note: *The Driver API is still experimental and will likely change" -" significantly over time.*" +"Please refer to the `full code example " +"`_ to learn " +"more." msgstr "" -#: ../../source/ref-changelog.md:517 -msgid "" -"**Add new Federated Analytics with Pandas example** " -"([#1469](https://github.com/adap/flower/pull/1469), " -"[#1535](https://github.com/adap/flower/pull/1535))" +#: ../../source/tutorial-quickstart-fastai.rst:4 +msgid "Quickstart fastai" msgstr "" -#: ../../source/ref-changelog.md:519 +#: ../../source/tutorial-quickstart-fastai.rst:6 msgid "" -"A new code example (`quickstart-pandas`) demonstrates federated analytics" -" with Pandas and Flower. You can find it here: [quickstart-" -"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" -"pandas)." +"In this federated learning tutorial we will learn how to train a " +"SqueezeNet model on MNIST using Flower and fastai. It is recommended to " +"create a virtual environment and run everything within a :doc:`virtualenv" +" `." msgstr "" -#: ../../source/ref-changelog.md:521 -msgid "" -"**Add new strategies: Krum and MultiKrum** " -"([#1481](https://github.com/adap/flower/pull/1481))" +#: ../../source/tutorial-quickstart-fastai.rst:10 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:11 +msgid "Then, clone the code example directly from GitHub:" msgstr "" -#: ../../source/ref-changelog.md:523 +#: ../../source/tutorial-quickstart-fastai.rst:18 msgid "" -"Edoardo, a computer science student at the Sapienza University of Rome, " -"contributed a new `Krum` strategy that enables users to easily use Krum " -"and MultiKrum in their workloads." +"This will create a new directory called `quickstart-fastai` containing " +"the following files:" msgstr "" -#: ../../source/ref-changelog.md:525 +#: ../../source/tutorial-quickstart-fastai.rst:31 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:32 +#, fuzzy +msgid "Next, activate your environment, then run:" +msgstr "그 후 가상 환경을 활성화합니다:" + +#: ../../source/tutorial-quickstart-fastai.rst:41 msgid "" -"**Update C++ example to be compatible with Flower v1.2.0** " -"([#1495](https://github.com/adap/flower/pull/1495))" +"This example by default runs the Flower Simulation Engine, creating a " +"federation of 10 nodes using `FedAvg `_ " +"as the aggregation strategy. The dataset will be partitioned using Flower" +" Dataset's `IidPartitioner `_." +" Let's run the project:" msgstr "" -#: ../../source/ref-changelog.md:527 -msgid "" -"The C++ code example has received a substantial update to make it " -"compatible with the latest version of Flower." +#: ../../source/tutorial-quickstart-fastai.rst:54 +#: ../../source/tutorial-quickstart-huggingface.rst:61 +#: ../../source/tutorial-quickstart-mlx.rst:60 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:55 +#: ../../source/tutorial-quickstart-pytorch.rst:62 +#: ../../source/tutorial-quickstart-tensorflow.rst:62 +msgid "With default arguments you will see an output like this one:" msgstr "" -#: ../../source/ref-changelog.md:529 +#: ../../source/tutorial-quickstart-fastai.rst:98 +#: ../../source/tutorial-quickstart-huggingface.rst:112 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:105 +#: ../../source/tutorial-quickstart-pytorch.rst:103 +#: ../../source/tutorial-quickstart-tensorflow.rst:103 msgid "" -"**General improvements** " -"([#1491](https://github.com/adap/flower/pull/1491), " -"[#1504](https://github.com/adap/flower/pull/1504), " -"[#1506](https://github.com/adap/flower/pull/1506), " -"[#1514](https://github.com/adap/flower/pull/1514), " -"[#1522](https://github.com/adap/flower/pull/1522), " -"[#1523](https://github.com/adap/flower/pull/1523), " -"[#1526](https://github.com/adap/flower/pull/1526), " -"[#1528](https://github.com/adap/flower/pull/1528), " -"[#1547](https://github.com/adap/flower/pull/1547), " -"[#1549](https://github.com/adap/flower/pull/1549), " -"[#1560](https://github.com/adap/flower/pull/1560), " -"[#1564](https://github.com/adap/flower/pull/1564), " -"[#1566](https://github.com/adap/flower/pull/1566))" +"You can also override the parameters defined in the " +"``[tool.flwr.app.config]`` section in ``pyproject.toml`` like this:" msgstr "" -#: ../../source/ref-changelog.md:533 +#: ../../source/tutorial-quickstart-fastai.rst:108 msgid "" -"**Updated documentation** " -"([#1494](https://github.com/adap/flower/pull/1494), " -"[#1496](https://github.com/adap/flower/pull/1496), " -"[#1500](https://github.com/adap/flower/pull/1500), " -"[#1503](https://github.com/adap/flower/pull/1503), " -"[#1505](https://github.com/adap/flower/pull/1505), " -"[#1524](https://github.com/adap/flower/pull/1524), " -"[#1518](https://github.com/adap/flower/pull/1518), " -"[#1519](https://github.com/adap/flower/pull/1519), " -"[#1515](https://github.com/adap/flower/pull/1515))" +"Check the `source code `_ of this tutorial in ``examples/quickstart-fasai`` " +"in the Flower GitHub repository." msgstr "" -#: ../../source/ref-changelog.md:537 +#: ../../source/tutorial-quickstart-huggingface.rst:-1 msgid "" -"One highlight is the new [first time contributor " -"guide](https://flower.ai/docs/first-time-contributors.html): if you've " -"never contributed on GitHub before, this is the perfect place to start!" +"Check out this Federating Learning quickstart tutorial for using Flower " +"with 🤗 HuggingFace Transformers in order to fine-tune an LLM." msgstr "" -#: ../../source/ref-changelog.md:543 -msgid "v1.1.0 (2022-10-31)" +#: ../../source/tutorial-quickstart-huggingface.rst:4 +msgid "Quickstart 🤗 Transformers" msgstr "" -#: ../../source/ref-changelog.md:547 +#: ../../source/tutorial-quickstart-huggingface.rst:6 msgid "" -"We would like to give our **special thanks** to all the contributors who " -"made the new version of Flower possible (in `git shortlog` order):" +"In this federated learning tutorial we will learn how to train a large " +"language model (LLM) on the `IMDB " +"`_ dataset using Flower" +" and the 🤗 Hugging Face Transformers library. It is recommended to create" +" a virtual environment and run everything within a :doc:`virtualenv " +"`." msgstr "" -#: ../../source/ref-changelog.md:549 +#: ../../source/tutorial-quickstart-huggingface.rst:12 msgid "" -"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " -"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " -"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " -"`danielnugraha`, `edogab33`" +"Let's use ``flwr new`` to create a complete Flower+🤗 Hugging Face " +"project. It will generate all the files needed to run, by default with " +"the Flower Simulation Engine, a federation of 10 nodes using |fedavg|_ " +"The dataset will be partitioned using |flowerdatasets|_'s " +"|iidpartitioner|_." msgstr "" -#: ../../source/ref-changelog.md:553 +#: ../../source/tutorial-quickstart-huggingface.rst:17 +#: ../../source/tutorial-quickstart-mlx.rst:17 +#: ../../source/tutorial-quickstart-pytorch.rst:18 +#: ../../source/tutorial-quickstart-tensorflow.rst:18 msgid "" -"**Introduce Differential Privacy wrappers (preview)** " -"([#1357](https://github.com/adap/flower/pull/1357), " -"[#1460](https://github.com/adap/flower/pull/1460))" +"Now that we have a rough idea of what this example is about, let's get " +"started. First, install Flower in your new environment:" msgstr "" -#: ../../source/ref-changelog.md:555 +#: ../../source/tutorial-quickstart-huggingface.rst:25 msgid "" -"The first (experimental) preview of pluggable Differential Privacy " -"wrappers enables easy configuration and usage of differential privacy " -"(DP). The pluggable DP wrappers enable framework-agnostic **and** " -"strategy-agnostic usage of both client-side DP and server-side DP. Head " -"over to the Flower docs, a new explainer goes into more detail." +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``HuggingFace``), give a name to your " +"project, and type in your developer name:" msgstr "" -#: ../../source/ref-changelog.md:557 +#: ../../source/tutorial-quickstart-huggingface.rst:33 +#: ../../source/tutorial-quickstart-mlx.rst:32 +#: ../../source/tutorial-quickstart-pytorch.rst:34 +#: ../../source/tutorial-quickstart-tensorflow.rst:34 msgid "" -"**New iOS CoreML code example** " -"([#1289](https://github.com/adap/flower/pull/1289))" +"After running it you'll notice a new directory with your project name has" +" been created. It should have the following structure:" msgstr "" -#: ../../source/ref-changelog.md:559 +#: ../../source/tutorial-quickstart-huggingface.rst:47 +#: ../../source/tutorial-quickstart-mlx.rst:46 +#: ../../source/tutorial-quickstart-pytorch.rst:48 +#: ../../source/tutorial-quickstart-tensorflow.rst:48 msgid "" -"Flower goes iOS! A massive new code example shows how Flower clients can " -"be built for iOS. The code example contains both Flower iOS SDK " -"components that can be used for many tasks, and one task example running " -"on CoreML." +"If you haven't yet installed the project and its dependencies, you can do" +" so by:" msgstr "" -#: ../../source/ref-changelog.md:561 -msgid "" -"**New FedMedian strategy** " -"([#1461](https://github.com/adap/flower/pull/1461))" +#: ../../source/tutorial-quickstart-huggingface.rst:54 +#: ../../source/tutorial-quickstart-pytorch.rst:55 +#: ../../source/tutorial-quickstart-tensorflow.rst:55 +msgid "To run the project, do:" msgstr "" -#: ../../source/ref-changelog.md:563 -msgid "" -"The new `FedMedian` strategy implements Federated Median (FedMedian) by " -"[Yin et al., 2018](https://arxiv.org/pdf/1803.01498v1.pdf)." +#: ../../source/tutorial-quickstart-huggingface.rst:102 +msgid "You can also run the project with GPU as follows:" msgstr "" -#: ../../source/ref-changelog.md:565 +#: ../../source/tutorial-quickstart-huggingface.rst:109 msgid "" -"**Log** `Client` **exceptions in Virtual Client Engine** " -"([#1493](https://github.com/adap/flower/pull/1493))" +"This will use the default arguments where each ``ClientApp`` will use 2 " +"CPUs and at most 4 ``ClientApp``\\s will run in a given GPU." msgstr "" -#: ../../source/ref-changelog.md:567 +#: ../../source/tutorial-quickstart-huggingface.rst:120 +#: ../../source/tutorial-quickstart-mlx.rst:110 +#: ../../source/tutorial-quickstart-pytorch.rst:111 msgid "" -"All `Client` exceptions happening in the VCE are now logged by default " -"and not just exposed to the configured `Strategy` (via the `failures` " -"argument)." +"What follows is an explanation of each component in the project you just " +"created: dataset partition, the model, defining the ``ClientApp`` and " +"defining the ``ServerApp``." msgstr "" -#: ../../source/ref-changelog.md:569 -msgid "" -"**Improve Virtual Client Engine internals** " -"([#1401](https://github.com/adap/flower/pull/1401), " -"[#1453](https://github.com/adap/flower/pull/1453))" -msgstr "" +#: ../../source/tutorial-quickstart-huggingface.rst:124 +#: ../../source/tutorial-quickstart-mlx.rst:114 +#: ../../source/tutorial-quickstart-pytorch.rst:115 +#: ../../source/tutorial-quickstart-tensorflow.rst:112 +#, fuzzy +msgid "The Data" +msgstr "Metadata" -#: ../../source/ref-changelog.md:571 +#: ../../source/tutorial-quickstart-huggingface.rst:126 msgid "" -"Some internals of the Virtual Client Engine have been revamped. The VCE " -"now uses Ray 2.0 under the hood, the value type of the `client_resources`" -" dictionary changed to `float` to allow fractions of resources to be " -"allocated." +"This tutorial uses |flowerdatasets|_ to easily download and partition the" +" `IMDB `_ dataset. In " +"this example you'll make use of the |iidpartitioner|_ to generate " +"``num_partitions`` partitions. You can choose |otherpartitioners|_ " +"available in Flower Datasets. To tokenize the text, we will also load the" +" tokenizer from the pre-trained Transformer model that we'll use during " +"training - more on that in the next section. Each ``ClientApp`` will call" +" this function to create dataloaders with the data that correspond to " +"their data partition." msgstr "" -#: ../../source/ref-changelog.md:573 -msgid "" -"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " -"Client Engine**" +#: ../../source/tutorial-quickstart-huggingface.rst:171 +#: ../../source/tutorial-quickstart-mlx.rst:155 +#: ../../source/tutorial-quickstart-pytorch.rst:150 +#: ../../source/tutorial-quickstart-tensorflow.rst:139 +msgid "The Model" msgstr "" -#: ../../source/ref-changelog.md:575 +#: ../../source/tutorial-quickstart-huggingface.rst:173 msgid "" -"The Virtual Client Engine now has full support for optional `Client` (and" -" `NumPyClient`) methods." +"We will leverage 🤗 Hugging Face to federate the training of language " +"models over multiple clients using Flower. More specifically, we will " +"fine-tune a pre-trained Transformer model (|berttiny|_) for sequence " +"classification over the dataset of IMDB ratings. The end goal is to " +"detect if a movie rating is positive or negative. If you have access to " +"larger GPUs, feel free to use larger models!" msgstr "" -#: ../../source/ref-changelog.md:577 +#: ../../source/tutorial-quickstart-huggingface.rst:185 msgid "" -"**Provide type information to packages using** `flwr` " -"([#1377](https://github.com/adap/flower/pull/1377))" +"Note that here, ``model_name`` is a string that will be loaded from the " +"``Context`` in the ClientApp and ServerApp." msgstr "" -#: ../../source/ref-changelog.md:579 +#: ../../source/tutorial-quickstart-huggingface.rst:188 msgid "" -"The package `flwr` is now bundled with a `py.typed` file indicating that " -"the package is typed. This enables typing support for projects or " -"packages that use `flwr` by enabling them to improve their code using " -"static type checkers like `mypy`." +"In addition to loading the pretrained model weights and architecture, we " +"also include two utility functions to perform both training (i.e. " +"``train()``) and evaluation (i.e. ``test()``) using the above model. " +"These functions should look fairly familiar if you have some prior " +"experience with PyTorch. Note these functions do not have anything " +"specific to Flower. That being said, the training function will normally " +"be called, as we'll see later, from a Flower client passing its own data." +" In summary, your clients can use standard training/testing functions to " +"perform local training or evaluation:" msgstr "" -#: ../../source/ref-changelog.md:581 +#: ../../source/tutorial-quickstart-huggingface.rst:228 +#: ../../source/tutorial-quickstart-mlx.rst:199 +#: ../../source/tutorial-quickstart-pytorch.rst:224 +#: ../../source/tutorial-quickstart-tensorflow.rst:168 +#, fuzzy +msgid "The ClientApp" +msgstr "클라이언트앱" + +#: ../../source/tutorial-quickstart-huggingface.rst:230 msgid "" -"**Updated code example** " -"([#1344](https://github.com/adap/flower/pull/1344), " -"[#1347](https://github.com/adap/flower/pull/1347))" +"The main changes we have to make to use 🤗 Hugging Face with Flower will " +"be found in the ``get_weights()`` and ``set_weights()`` functions. Under " +"the hood, the ``transformers`` library uses PyTorch, which means we can " +"reuse the ``get_weights()`` and ``set_weights()`` code that we defined in" +" the :doc:`Quickstart PyTorch ` tutorial. As" +" a reminder, in ``get_weights()``, PyTorch model parameters are extracted" +" and represented as a list of NumPy arrays. The ``set_weights()`` " +"function that's the opposite: given a list of NumPy arrays it applies " +"them to an existing PyTorch model. Doing this in fairly easy in PyTorch." msgstr "" -#: ../../source/ref-changelog.md:583 +#: ../../source/tutorial-quickstart-huggingface.rst:241 +#: ../../source/tutorial-quickstart-pytorch.rst:234 msgid "" -"The code examples covering scikit-learn and PyTorch Lightning have been " -"updated to work with the latest version of Flower." +"The specific implementation of ``get_weights()`` and ``set_weights()`` " +"depends on the type of models you use. The ones shown below work for a " +"wide range of PyTorch models but you might need to adjust them if you " +"have more exotic model architectures." msgstr "" -#: ../../source/ref-changelog.md:585 +#: ../../source/tutorial-quickstart-huggingface.rst:257 +#: ../../source/tutorial-quickstart-pytorch.rst:250 msgid "" -"**Updated documentation** " -"([#1355](https://github.com/adap/flower/pull/1355), " -"[#1558](https://github.com/adap/flower/pull/1558), " -"[#1379](https://github.com/adap/flower/pull/1379), " -"[#1380](https://github.com/adap/flower/pull/1380), " -"[#1381](https://github.com/adap/flower/pull/1381), " -"[#1332](https://github.com/adap/flower/pull/1332), " -"[#1391](https://github.com/adap/flower/pull/1391), " -"[#1403](https://github.com/adap/flower/pull/1403), " -"[#1364](https://github.com/adap/flower/pull/1364), " -"[#1409](https://github.com/adap/flower/pull/1409), " -"[#1419](https://github.com/adap/flower/pull/1419), " -"[#1444](https://github.com/adap/flower/pull/1444), " -"[#1448](https://github.com/adap/flower/pull/1448), " -"[#1417](https://github.com/adap/flower/pull/1417), " -"[#1449](https://github.com/adap/flower/pull/1449), " -"[#1465](https://github.com/adap/flower/pull/1465), " -"[#1467](https://github.com/adap/flower/pull/1467))" +"The rest of the functionality is directly inspired by the centralized " +"case. The ``fit()`` method in the client trains the model using the local" +" dataset. Similarly, the ``evaluate()`` method is used to evaluate the " +"model received on a held-out validation set that the client might have:" msgstr "" -#: ../../source/ref-changelog.md:587 +#: ../../source/tutorial-quickstart-huggingface.rst:283 msgid "" -"There have been so many documentation updates that it doesn't even make " -"sense to list them individually." +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparemeters defined in your " +"``pyproject.toml`` to configure the run. In this tutorial we access the " +"``local-epochs`` setting to control the number of epochs a ``ClientApp`` " +"will perform when running the ``fit()`` method. You could define " +"additional hyperparameters in ``pyproject.toml`` and access them here." msgstr "" -#: ../../source/ref-changelog.md:589 +#: ../../source/tutorial-quickstart-huggingface.rst:316 +#: ../../source/tutorial-quickstart-mlx.rst:361 +#: ../../source/tutorial-quickstart-pytorch.rst:307 +#: ../../source/tutorial-quickstart-tensorflow.rst:232 +#, fuzzy +msgid "The ServerApp" +msgstr "Flower 서버앱" + +#: ../../source/tutorial-quickstart-huggingface.rst:318 msgid "" -"**Restructured documentation** " -"([#1387](https://github.com/adap/flower/pull/1387))" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"|serverappcomponents|_ as opposed to a |client|_ In this example we use " +"the `FedAvg` strategy. To it we pass a randomly initialized model that " +"will server as the global model to federated. Note that the value of " +"``fraction_fit`` is read from the run config. You can find the default " +"value defined in the ``pyproject.toml``." msgstr "" -#: ../../source/ref-changelog.md:591 +#: ../../source/tutorial-quickstart-huggingface.rst:356 msgid "" -"The documentation has been restructured to make it easier to navigate. " -"This is just the first step in a larger effort to make the Flower " -"documentation the best documentation of any project ever. Stay tuned!" +"Congratulations! You've successfully built and run your first federated " +"learning system for an LLM." msgstr "" -#: ../../source/ref-changelog.md:593 +#: ../../source/tutorial-quickstart-huggingface.rst:361 msgid "" -"**Open in Colab button** " -"([#1389](https://github.com/adap/flower/pull/1389))" +"Check the source code of the extended version of this tutorial in " +"|quickstart_hf_link|_ in the Flower GitHub repository. For a " +"comprehensive example of a federated fine-tuning of an LLM with Flower, " +"refer to the |flowertune|_ example in the Flower GitHub repository." msgstr "" -#: ../../source/ref-changelog.md:595 +#: ../../source/tutorial-quickstart-ios.rst:-1 msgid "" -"The four parts of the Flower Federated Learning Tutorial now come with a " -"new `Open in Colab` button. No need to install anything on your local " -"machine, you can now use and learn about Flower in your browser, it's " -"only a single click away." +"Read this Federated Learning quickstart tutorial for creating an iOS app " +"using Flower to train a neural network on MNIST." msgstr "" -#: ../../source/ref-changelog.md:597 -msgid "" -"**Improved tutorial** ([#1468](https://github.com/adap/flower/pull/1468)," -" [#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475))" +#: ../../source/tutorial-quickstart-ios.rst:4 +msgid "Quickstart iOS" msgstr "" -#: ../../source/ref-changelog.md:599 -msgid "" -"The Flower Federated Learning Tutorial has two brand-new parts covering " -"custom strategies (still WIP) and the distinction between `Client` and " -"`NumPyClient`. The existing parts one and two have also been improved " -"(many small changes and fixes)." +#: ../../source/tutorial-quickstart-ios.rst:9 +msgid "" +"In this tutorial we will learn how to train a Neural Network on MNIST " +"using Flower and CoreML on iOS devices." msgstr "" -#: ../../source/ref-changelog.md:605 -msgid "v1.0.0 (2022-07-28)" +#: ../../source/tutorial-quickstart-ios.rst:12 +msgid "" +"First of all, for running the Flower Python server, it is recommended to " +"create a virtual environment and run everything within a :doc:`virtualenv" +" `. For the Flower client " +"implementation in iOS, it is recommended to use Xcode as our IDE." msgstr "" -#: ../../source/ref-changelog.md:607 -msgid "Highlights" +#: ../../source/tutorial-quickstart-ios.rst:17 +msgid "" +"Our example consists of one Python *server* and two iPhone *clients* that" +" all have the same model." msgstr "" -#: ../../source/ref-changelog.md:609 -msgid "Stable **Virtual Client Engine** (accessible via `start_simulation`)" +#: ../../source/tutorial-quickstart-ios.rst:20 +msgid "" +"*Clients* are responsible for generating individual weight updates for " +"the model based on their local datasets. These updates are then sent to " +"the *server* which will aggregate them to produce a better model. " +"Finally, the *server* sends this improved version of the model back to " +"each *client*. A complete cycle of weight updates is called a *round*." msgstr "" -#: ../../source/ref-changelog.md:610 -msgid "All `Client`/`NumPyClient` methods are now optional" +#: ../../source/tutorial-quickstart-ios.rst:26 +msgid "" +"Now that we have a rough idea of what is going on, let's get started to " +"setup our Flower server environment. We first need to install Flower. You" +" can do this by using pip:" msgstr "" -#: ../../source/ref-changelog.md:611 -msgid "Configurable `get_parameters`" +#: ../../source/tutorial-quickstart-ios.rst:33 +msgid "Or Poetry:" msgstr "" -#: ../../source/ref-changelog.md:612 -msgid "" -"Tons of small API cleanups resulting in a more coherent developer " -"experience" +#: ../../source/tutorial-quickstart-ios.rst:40 +#: ../../source/tutorial-quickstart-scikitlearn.rst:43 +#: ../../source/tutorial-quickstart-xgboost.rst:65 +msgid "Flower Client" msgstr "" -#: ../../source/ref-changelog.md:616 +#: ../../source/tutorial-quickstart-ios.rst:42 msgid "" -"We would like to give our **special thanks** to all the contributors who " -"made Flower 1.0 possible (in reverse [GitHub " -"Contributors](https://github.com/adap/flower/graphs/contributors) order):" +"Now that we have all our dependencies installed, let's run a simple " +"distributed training using CoreML as our local training pipeline and " +"MNIST as our dataset. For simplicity reasons we will use the complete " +"Flower client with CoreML, that has been implemented and stored inside " +"the Swift SDK. The client implementation can be seen below:" msgstr "" -#: ../../source/ref-changelog.md:618 +#: ../../source/tutorial-quickstart-ios.rst:80 msgid "" -"[@rtaiello](https://github.com/rtaiello), " -"[@g-pichler](https://github.com/g-pichler), [@rob-" -"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" -"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " -"[@nfnt](https://github.com/nfnt), " -"[@tatiana-s](https://github.com/tatiana-s), " -"[@TParcollet](https://github.com/TParcollet), " -"[@vballoli](https://github.com/vballoli), " -"[@negedng](https://github.com/negedng), " -"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " -"[@hei411](https://github.com/hei411), " -"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " -"[@AmitChaulwar](https://github.com/AmitChaulwar), " -"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" -"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " -"[@lbhm](https://github.com/lbhm), " -"[@sishtiaq](https://github.com/sishtiaq), " -"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" -"/Jueun-Park), [@architjen](https://github.com/architjen), " -"[@PratikGarai](https://github.com/PratikGarai), " -"[@mrinaald](https://github.com/mrinaald), " -"[@zliel](https://github.com/zliel), " -"[@MeiruiJiang](https://github.com/MeiruiJiang), " -"[@sancarlim](https://github.com/sancarlim), " -"[@gubertoli](https://github.com/gubertoli), " -"[@Vingt100](https://github.com/Vingt100), " -"[@MakGulati](https://github.com/MakGulati), " -"[@cozek](https://github.com/cozek), " -"[@jafermarq](https://github.com/jafermarq), " -"[@sisco0](https://github.com/sisco0), " -"[@akhilmathurs](https://github.com/akhilmathurs), " -"[@CanTuerk](https://github.com/CanTuerk), " -"[@mariaboerner1987](https://github.com/mariaboerner1987), " -"[@pedropgusmao](https://github.com/pedropgusmao), " -"[@tanertopal](https://github.com/tanertopal), " -"[@danieljanes](https://github.com/danieljanes)." +"Let's create a new application project in Xcode and add ``flwr`` as a " +"dependency in your project. For our application, we will store the logic " +"of our app in ``FLiOSModel.swift`` and the UI elements in " +"``ContentView.swift``. We will focus more on ``FLiOSModel.swift`` in this" +" quickstart. Please refer to the `full code example " +"`_ to learn more " +"about the app." msgstr "" -#: ../../source/ref-changelog.md:622 -msgid "" -"**All arguments must be passed as keyword arguments** " -"([#1338](https://github.com/adap/flower/pull/1338))" +#: ../../source/tutorial-quickstart-ios.rst:86 +msgid "Import Flower and CoreML related packages in ``FLiOSModel.swift``:" msgstr "" -#: ../../source/ref-changelog.md:624 +#: ../../source/tutorial-quickstart-ios.rst:94 msgid "" -"Pass all arguments as keyword arguments, positional arguments are not " -"longer supported. Code that uses positional arguments (e.g., " -"`start_client(\"127.0.0.1:8080\", FlowerClient())`) must add the keyword " -"for each positional argument (e.g., " -"`start_client(server_address=\"127.0.0.1:8080\", " -"client=FlowerClient())`)." +"Then add the mlmodel to the project simply by drag-and-drop, the mlmodel " +"will be bundled inside the application during deployment to your iOS " +"device. We need to pass the url to access mlmodel and run CoreML machine " +"learning processes, it can be retrieved by calling the function " +"``Bundle.main.url``. For the MNIST dataset, we need to preprocess it into" +" ``MLBatchProvider`` object. The preprocessing is done inside " +"``DataLoader.swift``." msgstr "" -#: ../../source/ref-changelog.md:626 +#: ../../source/tutorial-quickstart-ios.rst:112 msgid "" -"**Introduce configuration object** `ServerConfig` **in** `start_server` " -"**and** `start_simulation` " -"([#1317](https://github.com/adap/flower/pull/1317))" +"Since CoreML does not allow the model parameters to be seen before " +"training, and accessing the model parameters during or after the training" +" can only be done by specifying the layer name, we need to know this " +"information beforehand, through looking at the model specification, which" +" are written as proto files. The implementation can be seen in " +"``MLModelInspect``." msgstr "" -#: ../../source/ref-changelog.md:628 +#: ../../source/tutorial-quickstart-ios.rst:118 msgid "" -"Instead of a config dictionary `{\"num_rounds\": 3, \"round_timeout\": " -"600.0}`, `start_server` and `start_simulation` now expect a configuration" -" object of type `flwr.server.ServerConfig`. `ServerConfig` takes the same" -" arguments that as the previous config dict, but it makes writing type-" -"safe code easier and the default parameters values more transparent." +"After we have all of the necessary information, let's create our Flower " +"client." msgstr "" -#: ../../source/ref-changelog.md:630 +#: ../../source/tutorial-quickstart-ios.rst:133 msgid "" -"**Rename built-in strategy parameters for clarity** " -"([#1334](https://github.com/adap/flower/pull/1334))" +"Then start the Flower gRPC client and start communicating to the server " +"by passing our Flower client to the function ``startFlwrGRPC``." msgstr "" -#: ../../source/ref-changelog.md:632 +#: ../../source/tutorial-quickstart-ios.rst:141 msgid "" -"The following built-in strategy parameters were renamed to improve " -"readability and consistency with other API's:" +"That's it for the client. We only have to implement ``Client`` or call " +"the provided ``MLFlwrClient`` and call ``startFlwrGRPC()``. The attribute" +" ``hostname`` and ``port`` tells the client which server to connect to. " +"This can be done by entering the hostname and port in the application " +"before clicking the start button to start the federated learning process." msgstr "" -#: ../../source/ref-changelog.md:634 -msgid "`fraction_eval` --> `fraction_evaluate`" +#: ../../source/tutorial-quickstart-ios.rst:148 +#: ../../source/tutorial-quickstart-scikitlearn.rst:179 +#: ../../source/tutorial-quickstart-xgboost.rst:358 +msgid "Flower Server" msgstr "" -#: ../../source/ref-changelog.md:635 -msgid "`min_eval_clients` --> `min_evaluate_clients`" +#: ../../source/tutorial-quickstart-ios.rst:150 +msgid "" +"For simple workloads we can start a Flower server and leave all the " +"configuration possibilities at their default values. In a file named " +"``server.py``, import Flower and start the server:" msgstr "" -#: ../../source/ref-changelog.md:636 -msgid "`eval_fn` --> `evaluate_fn`" +#: ../../source/tutorial-quickstart-ios.rst:161 +#: ../../source/tutorial-quickstart-scikitlearn.rst:254 +msgid "Train the model, federated!" msgstr "" -#: ../../source/ref-changelog.md:638 +#: ../../source/tutorial-quickstart-ios.rst:163 +#: ../../source/tutorial-quickstart-xgboost.rst:590 msgid "" -"**Update default arguments of built-in strategies** " -"([#1278](https://github.com/adap/flower/pull/1278))" +"With both client and server ready, we can now run everything and see " +"federated learning in action. FL systems usually have a server and " +"multiple clients. We therefore have to start the server first:" msgstr "" -#: ../../source/ref-changelog.md:640 +#: ../../source/tutorial-quickstart-ios.rst:171 msgid "" -"All built-in strategies now use `fraction_fit=1.0` and " -"`fraction_evaluate=1.0`, which means they select *all* currently " -"available clients for training and evaluation. Projects that relied on " -"the previous default values can get the previous behaviour by " -"initializing the strategy in the following way:" -msgstr "" - -#: ../../source/ref-changelog.md:642 -msgid "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" +"Once the server is running we can start the clients in different " +"terminals. Build and run the client through your Xcode, one through Xcode" +" Simulator and the other by deploying it to your iPhone. To see more " +"about how to deploy your app to iPhone or Simulator visit `here " +"`_." msgstr "" -#: ../../source/ref-changelog.md:644 +#: ../../source/tutorial-quickstart-ios.rst:177 msgid "" -"**Add** `server_round` **to** `Strategy.evaluate` " -"([#1334](https://github.com/adap/flower/pull/1334))" +"Congratulations! You've successfully built and run your first federated " +"learning system in your ios device. The full `source code " +"`_ for this " +"example can be found in ``examples/ios``." msgstr "" -#: ../../source/ref-changelog.md:646 +#: ../../source/tutorial-quickstart-jax.rst:-1 msgid "" -"The `Strategy` method `evaluate` now receives the current round of " -"federated learning/evaluation as the first parameter." +"Check out this Federated Learning quickstart tutorial for using Flower " +"with Jax to train a linear regression model on a scikit-learn dataset." msgstr "" -#: ../../source/ref-changelog.md:648 -msgid "" -"**Add** `server_round` **and** `config` **parameters to** `evaluate_fn` " -"([#1334](https://github.com/adap/flower/pull/1334))" +#: ../../source/tutorial-quickstart-jax.rst:4 +msgid "Quickstart JAX" msgstr "" -#: ../../source/ref-changelog.md:650 +#: ../../source/tutorial-quickstart-jax.rst:9 msgid "" -"The `evaluate_fn` passed to built-in strategies like `FedAvg` now takes " -"three parameters: (1) The current round of federated learning/evaluation " -"(`server_round`), (2) the model parameters to evaluate (`parameters`), " -"and (3) a config dictionary (`config`)." +"This tutorial will show you how to use Flower to build a federated " +"version of an existing JAX workload. We are using JAX to train a linear " +"regression model on a scikit-learn dataset. We will structure the example" +" similar to our `PyTorch - From Centralized To Federated " +"`_ walkthrough. First, we build a centralized " +"training approach based on the `Linear Regression with JAX " +"`_" +" tutorial`. Then, we build upon the centralized training code to run the " +"training in a federated fashion." msgstr "" +"이 튜토리얼에서는 Flower를 사용하여 기존 JAX 워크로드의 연합 버전을 구축하는 방법을 보여드립니다. JAX를 사용해 " +"scikit-learn 데이터 세트에서 선형 회귀 모델을 훈련하고 있습니다. 예제는 '파이토치 - Centralized에서 " +"Federated으로 `_ 워크스루와 유사하게 구성하겠습니다. 먼저, `JAX를 사용한 선형 회귀 " +"`_" +" 튜토리얼`을 기반으로 centralized 학습 접근 방식을 구축합니다. 그런 다음 centralized 트레이닝 코드를 기반으로" +" federated 방식으로 트레이닝을 실행합니다." -#: ../../source/ref-changelog.md:652 +#: ../../source/tutorial-quickstart-jax.rst:20 +#, fuzzy msgid "" -"**Rename** `rnd` **to** `server_round` " -"([#1321](https://github.com/adap/flower/pull/1321))" +"Before we start building our JAX example, we need install the packages " +"``jax``, ``jaxlib``, ``scikit-learn``, and ``flwr``:" msgstr "" +"JAX 예제 빌드를 시작하기 전에 :code:`jax`, :code:`jaxlib`, :code:`scikit-learn`, " +":code:`flwr` 패키지를 설치해야 합니다:" -#: ../../source/ref-changelog.md:654 +#: ../../source/tutorial-quickstart-jax.rst:28 +msgid "Linear Regression with JAX" +msgstr "JAX를 사용한 선형 회귀" + +#: ../../source/tutorial-quickstart-jax.rst:30 +#, fuzzy msgid "" -"Several Flower methods and functions (`evaluate_fn`, `configure_fit`, " -"`aggregate_fit`, `configure_evaluate`, `aggregate_evaluate`) receive the " -"current round of federated learning/evaluation as their first parameter. " -"To improve reaability and avoid confusion with *random*, this parameter " -"has been renamed from `rnd` to `server_round`." +"We begin with a brief description of the centralized training code based " +"on a ``Linear Regression`` model. If you want a more in-depth explanation" +" of what's going on then have a look at the official `JAX documentation " +"`_." msgstr "" +"먼저 :code:`선형 회귀` 모델을 기반으로 하는 중앙 집중식 훈련 코드에 대한 간략한 설명부터 시작하겠습니다. 더 자세한 설명을" +" 원하시면 공식 `JAX 문서 `_를 참조하세요." -#: ../../source/ref-changelog.md:656 +#: ../../source/tutorial-quickstart-jax.rst:34 +#, fuzzy msgid "" -"**Move** `flwr.dataset` **to** `flwr_baselines` " -"([#1273](https://github.com/adap/flower/pull/1273))" +"Let's create a new file called ``jax_training.py`` with all the " +"components required for a traditional (centralized) linear regression " +"training. First, the JAX packages ``jax`` and ``jaxlib`` need to be " +"imported. In addition, we need to import ``sklearn`` since we use " +"``make_regression`` for the dataset and ``train_test_split`` to split the" +" dataset into a training and test set. You can see that we do not yet " +"import the ``flwr`` package for federated learning. This will be done " +"later." msgstr "" +"전통적인(중앙 집중식) 선형 회귀 훈련에 필요한 모든 구성 요소가 포함된 :code:`jax_training.py`라는 새 파일을 " +"생성해 보겠습니다. 먼저, JAX 패키지인 :code:`jax`와 :code:`jaxlib`를 가져와야 합니다. 또한 데이터 세트에" +" :code:`make_regression`을 사용하고 데이터 세트를 학습 및 테스트 세트로 분할하기 위해 " +":code:`train_test_split`을 사용하므로 :code:`sklearn`을 가져와야 합니다. 연합 학습을 위해 아직 " +":code:`flwr` 패키지를 가져오지 않은 것을 볼 수 있습니다. 이 작업은 나중에 수행됩니다." -#: ../../source/ref-changelog.md:658 -msgid "The experimental package `flwr.dataset` was migrated to Flower Baselines." -msgstr "" +#: ../../source/tutorial-quickstart-jax.rst:51 +#, fuzzy +msgid "The ``load_data()`` function loads the mentioned training and test sets." +msgstr "code:`load_data()` 함수는 앞서 언급한 트레이닝 및 테스트 세트를 로드합니다." -#: ../../source/ref-changelog.md:660 +#: ../../source/tutorial-quickstart-jax.rst:63 +#, fuzzy msgid "" -"**Remove experimental strategies** " -"([#1280](https://github.com/adap/flower/pull/1280))" -msgstr "" +"The model architecture (a very simple ``Linear Regression`` model) is " +"defined in ``load_model()``." +msgstr "모델 아키텍처(매우 간단한 :code:`선형 회귀` 모델)는 :code:`load_model()`에 정의되어 있습니다." -#: ../../source/ref-changelog.md:662 +#: ../../source/tutorial-quickstart-jax.rst:73 +#, fuzzy msgid "" -"Remove unmaintained experimental strategies (`FastAndSlow`, `FedFSv0`, " -"`FedFSv1`)." +"We now need to define the training (function ``train()``), which loops " +"over the training set and measures the loss (function ``loss_fn()``) for " +"each batch of training examples. The loss function is separate since JAX " +"takes derivatives with a ``grad()`` function (defined in the ``main()`` " +"function and called in ``train()``)." msgstr "" +"이제 훈련 집합을 반복하고 각 훈련 예제 배치에 대해 손실을 측정하는(함수 :code:`loss_fn()`) 훈련(함수 " +":code:`train()`)을 정의해야 합니다. JAX는 :code:`grad()` 함수(:code:`main()` 함수에 " +"정의되고 :code:`train()`에서 호출됨)로 파생물을 취하므로 손실 함수는 분리되어 있습니다." -#: ../../source/ref-changelog.md:664 +#: ../../source/tutorial-quickstart-jax.rst:95 +#, fuzzy msgid "" -"**Rename** `Weights` **to** `NDArrays` " -"([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"The evaluation of the model is defined in the function ``evaluation()``. " +"The function takes all test examples and measures the loss of the linear " +"regression model." msgstr "" +"모델의 평가는 :code:`evaluation()` 함수에 정의되어 있습니다. 이 함수는 모든 테스트 예제를 가져와 선형 회귀 " +"모델의 손실을 측정합니다." -#: ../../source/ref-changelog.md:666 +#: ../../source/tutorial-quickstart-jax.rst:107 +#, fuzzy msgid "" -"`flwr.common.Weights` was renamed to `flwr.common.NDArrays` to better " -"capture what this type is all about." +"Having defined the data loading, model architecture, training, and " +"evaluation we can put everything together and train our model using JAX. " +"As already mentioned, the ``jax.grad()`` function is defined in " +"``main()`` and passed to ``train()``." msgstr "" +"데이터 로딩, 모델 아키텍처, 훈련 및 평가를 정의했으므로 이제 모든 것을 종합하여 JAX를 사용 모델을 훈련할 수 있습니다. 이미" +" 언급했듯이 :code:`jax.grad()` 함수는 :code:`main()`에 정의되어 :code:`train()`에 " +"전달됩니다." -#: ../../source/ref-changelog.md:668 +#: ../../source/tutorial-quickstart-jax.rst:126 +msgid "You can now run your (centralized) JAX linear regression workload:" +msgstr "이제 (중앙 집중식) JAX 선형 회귀 워크로드를 실행할 수 있습니다:" + +#: ../../source/tutorial-quickstart-jax.rst:132 msgid "" -"**Remove antiquated** `force_final_distributed_eval` **from** " -"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"So far this should all look fairly familiar if you've used JAX before. " +"Let's take the next step and use what we've built to create a simple " +"federated learning system consisting of one server and two clients." msgstr "" +"지금까지는 JAX를 사용해 본 적이 있다면 이 모든 것이 상당히 익숙해 보일 것입니다. 다음 단계로 넘어가서 우리가 구축한 것을 " +"사용하여 하나의 서버와 두 개의 클라이언트로 구성된 간단한 연합 학습 시스템을 만들어 보겠습니다." -#: ../../source/ref-changelog.md:670 +#: ../../source/tutorial-quickstart-jax.rst:137 +msgid "JAX meets Flower" +msgstr "JAX와 Flower의 만남" + +#: ../../source/tutorial-quickstart-jax.rst:139 +#, fuzzy msgid "" -"The `start_server` parameter `force_final_distributed_eval` has long been" -" a historic artefact, in this release it is finally gone for good." -msgstr "" +"The concept of federating an existing workload is always the same and " +"easy to understand. We have to start a *server* and then use the code in " +"``jax_training.py`` for the *clients* that are connected to the *server*." +" The *server* sends model parameters to the clients. The *clients* run " +"the training and update the parameters. The updated parameters are sent " +"back to the *server*, which averages all received parameter updates. This" +" describes one round of the federated learning process, and we repeat " +"this for multiple rounds." +msgstr "" +"기존 워크로드를 연합하는 개념은 항상 동일하고 이해하기 쉽습니다. 서버*를 시작한 다음 *서버*에 연결된 *클라이언트*에 대해 " +":code:`jax_training.py`의 코드를 사용해야 합니다. *서버*는 모델 파라미터를 클라이언트로 전송합니다. " +"클라이언트는 학습을 실행하고 파라미터를 업데이트합니다. 업데이트된 파라미터는 *서버*로 다시 전송되며, 수신된 모든 파라미터 " +"업데이트의 평균을 구합니다. 이는 연합 학습 프로세스의 한 라운드를 설명하며, 이 과정을 여러 라운드에 걸쳐 반복합니다." -#: ../../source/ref-changelog.md:672 +#: ../../source/tutorial-quickstart-jax.rst:167 +#, fuzzy msgid "" -"**Make** `get_parameters` **configurable** " -"([#1242](https://github.com/adap/flower/pull/1242))" +"Finally, we will define our *client* logic in ``client.py`` and build " +"upon the previously defined JAX training in ``jax_training.py``. Our " +"*client* needs to import ``flwr``, but also ``jax`` and ``jaxlib`` to " +"update the parameters on our JAX model:" msgstr "" +"마지막으로, :code:`client.py`에서 *client* 로직을 정의하고 :code:`jax_training.py`에서 " +"이전에 정의한 JAX 교육을 기반으로 빌드합니다. *클라이언트*는 :code:`flwr`을 가져와야 하며, JAX 모델의 파라미터를" +" 업데이트하기 위해 :code:`jax` 및 :code:`jaxlib`도 가져와야 합니다:" -#: ../../source/ref-changelog.md:674 +#: ../../source/tutorial-quickstart-jax.rst:182 +#, fuzzy msgid "" -"The `get_parameters` method now accepts a configuration dictionary, just " -"like `get_properties`, `fit`, and `evaluate`." +"Implementing a Flower *client* basically means implementing a subclass of" +" either ``flwr.client.Client`` or ``flwr.client.NumPyClient``. Our " +"implementation will be based on ``flwr.client.NumPyClient`` and we'll " +"call it ``FlowerClient``. ``NumPyClient`` is slightly easier to implement" +" than ``Client`` if you use a framework with good NumPy interoperability " +"(like JAX) because it avoids some of the boilerplate that would otherwise" +" be necessary. ``FlowerClient`` needs to implement four methods, two " +"methods for getting/setting model parameters, one method for training the" +" model, and one method for testing the model:" +msgstr "" +"Flower *클라이언트*를 구현한다는 것은 기본적으로 :code:`flwr.client.Client` 또는 " +":code:`flwr.client.NumPyClient`의 서브클래스를 구현하는 것을 의미합니다. 구현은 " +":code:`flwr.client.NumPyClient`를 기반으로 하며, 이를 :code:`FlowerClient`라고 부를 " +"것입니다. :code:`NumPyClient`는 필요한 일부 보일러플레이를 피할 수 있기 때문에 NumPy 상호 운용성이 좋은 " +"프레임워크(예: JAX)를 사용하는 경우 :code:`Client`보다 구현하기가 약간 더 쉽습니다. " +"code:`FlowerClient`는 모델 매개변수를 가져오거나 설정하는 메서드 2개, 모델 학습을 위한 메서드 1개, 모델 " +"테스트를 위한 메서드 1개 등 총 4개의 메서드를 구현해야 합니다:" + +#: ../../source/tutorial-quickstart-jax.rst:194 +#, fuzzy +msgid "``set_parameters (optional)``" +msgstr ":code:`set_parameters (선택사항)`" + +#: ../../source/tutorial-quickstart-jax.rst:193 +#, fuzzy +msgid "transform parameters to NumPy ``ndarray``'s" +msgstr "매개 변수를 NumPy :code:`ndarray`로 변환" + +#: ../../source/tutorial-quickstart-jax.rst:203 +msgid "get the updated local model parameters and return them to the server" +msgstr "업데이트된 로컬 모델 파라미터를 가져와 서버로 반환합니다" + +#: ../../source/tutorial-quickstart-jax.rst:208 +msgid "return the local loss to the server" +msgstr "로컬 손실을 서버로 반환합니다" + +#: ../../source/tutorial-quickstart-jax.rst:210 +#, fuzzy +msgid "" +"The challenging part is to transform the JAX model parameters from " +"``DeviceArray`` to ``NumPy ndarray`` to make them compatible with " +"`NumPyClient`." msgstr "" +"어려운 부분은 JAX 모델 매개변수를 :code:`DeviceArray`에서 :code:`NumPy ndarray`로 변환하여 " +"`NumPyClient`와 호환되도록 하는 것입니다." -#: ../../source/ref-changelog.md:676 +#: ../../source/tutorial-quickstart-jax.rst:213 +#, fuzzy msgid "" -"**Replace** `num_rounds` **in** `start_simulation` **with new** `config` " -"**parameter** ([#1281](https://github.com/adap/flower/pull/1281))" +"The two ``NumPyClient`` methods ``fit`` and ``evaluate`` make use of the " +"functions ``train()`` and ``evaluate()`` previously defined in " +"``jax_training.py``. So what we really do here is we tell Flower through " +"our ``NumPyClient`` subclass which of our already defined functions to " +"call for training and evaluation. We included type annotations to give " +"you a better understanding of the data types that get passed around." msgstr "" +"두 개의 :code:`NumPyClient` 메서드인 :code:`fit`과 :code:`evaluate`는 이전에 " +":code:`jax_training.py`에 정의된 함수 :code:`train()`과 :code:`evaluate()`를 " +"사용합니다. 따라서 여기서 우리가 실제로 하는 일은 이미 정의된 함수 중 훈련과 평가를 위해 호출할 함수를 " +":code:`NumPyClient` 서브클래스를 통해 Flower에게 알려주는 것입니다. 전달되는 데이터 유형을 더 잘 이해할 수 " +"있도록 유형 type annotation을 포함했습니다." -#: ../../source/ref-changelog.md:678 +#: ../../source/tutorial-quickstart-jax.rst:286 +msgid "Having defined the federation process, we can run it." +msgstr "연합 프로세스를 정의했으면 이제 실행할 수 있습니다." + +#: ../../source/tutorial-quickstart-jax.rst:315 msgid "" -"The `start_simulation` function now accepts a configuration dictionary " -"`config` instead of the `num_rounds` integer. This improves the " -"consistency between `start_simulation` and `start_server` and makes " -"transitioning between the two easier." +"in each window (make sure that the server is still running before you do " +"so) and see your JAX project run federated learning across two clients. " +"Congratulations!" msgstr "" +"를 입력하고(그 전에 서버가 계속 실행 중인지 확인하세요) 두 클라이언트에서 연합 학습을 실행하는 JAX 프로젝트를 확인합니다. " +"축하합니다!" -#: ../../source/ref-changelog.md:682 +#: ../../source/tutorial-quickstart-jax.rst:321 msgid "" -"**Support Python 3.10** " -"([#1320](https://github.com/adap/flower/pull/1320))" +"The source code of this example was improved over time and can be found " +"here: `Quickstart JAX `_. Our example is somewhat over-simplified because both " +"clients load the same dataset." msgstr "" +"이 예제의 소스 코드는 시간이 지남에 따라 개선되었으며 여기에서 확인할 수 있습니다: 'Quickstart JAX " +"`_. 두 " +"클라이언트가 동일한 데이터 세트를 로드하기 때문에 이 예제는 다소 단순화되어 있습니다." -#: ../../source/ref-changelog.md:684 +#: ../../source/tutorial-quickstart-jax.rst:325 msgid "" -"The previous Flower release introduced experimental support for Python " -"3.10, this release declares Python 3.10 support as stable." +"You're now prepared to explore this topic further. How about using a more" +" sophisticated model or using a different dataset? How about adding more " +"clients?" msgstr "" +"이제 이 주제를 더 자세히 살펴볼 준비가 되었습니다. 더 정교한 모델을 사용하거나 다른 데이터 집합을 사용해 보는 것은 어떨까요? " +"클라이언트를 더 추가하는 것은 어떨까요?" -#: ../../source/ref-changelog.md:686 +#: ../../source/tutorial-quickstart-mlx.rst:4 +#, fuzzy +msgid "Quickstart MLX" +msgstr "빠른 시작" + +#: ../../source/tutorial-quickstart-mlx.rst:6 msgid "" -"**Make all** `Client` **and** `NumPyClient` **methods optional** " -"([#1260](https://github.com/adap/flower/pull/1260), " -"[#1277](https://github.com/adap/flower/pull/1277))" +"In this federated learning tutorial we will learn how to train simple MLP" +" on MNIST using Flower and MLX. It is recommended to create a virtual " +"environment and run everything within a :doc:`virtualenv `." msgstr "" -#: ../../source/ref-changelog.md:688 +#: ../../source/tutorial-quickstart-mlx.rst:10 msgid "" -"The `Client`/`NumPyClient` methods `get_properties`, `get_parameters`, " -"`fit`, and `evaluate` are all optional. This enables writing clients that" -" implement, for example, only `fit`, but no other method. No need to " -"implement `evaluate` when using centralized evaluation!" +"Let's use `flwr new` to create a complete Flower+MLX project. It will " +"generate all the files needed to run, by default with the Simulation " +"Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." msgstr "" -#: ../../source/ref-changelog.md:690 +#: ../../source/tutorial-quickstart-mlx.rst:25 msgid "" -"**Enable passing a** `Server` **instance to** `start_simulation` " -"([#1281](https://github.com/adap/flower/pull/1281))" +"Then, run the command below. You will be prompted to select of the " +"available templates (choose ``MLX``), give a name to your project, and " +"type in your developer name:" msgstr "" -#: ../../source/ref-changelog.md:692 -msgid "" -"Similar to `start_server`, `start_simulation` now accepts a full `Server`" -" instance. This enables users to heavily customize the execution of " -"eperiments and opens the door to running, for example, async FL using the" -" Virtual Client Engine." +#: ../../source/tutorial-quickstart-mlx.rst:53 +msgid "To run the project do:" msgstr "" -#: ../../source/ref-changelog.md:694 +#: ../../source/tutorial-quickstart-mlx.rst:102 msgid "" -"**Update code examples** " -"([#1291](https://github.com/adap/flower/pull/1291), " -"[#1286](https://github.com/adap/flower/pull/1286), " -"[#1282](https://github.com/adap/flower/pull/1282))" +"You can also override the parameters defined in " +"``[tool.flwr.app.config]`` section in the ``pyproject.toml`` like this:" msgstr "" -#: ../../source/ref-changelog.md:696 +#: ../../source/tutorial-quickstart-mlx.rst:116 msgid "" -"Many code examples received small or even large maintenance updates, " -"among them are" +"We will use `Flower Datasets `_ to " +"easily download and partition the `MNIST` dataset. In this example you'll" +" make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets:" msgstr "" -#: ../../source/ref-changelog.md:698 -msgid "`scikit-learn`" +#: ../../source/tutorial-quickstart-mlx.rst:157 +msgid "" +"We define the model as in the `centralized MLX example " +"`_, it's a " +"simple MLP:" msgstr "" -#: ../../source/ref-changelog.md:699 -msgid "`simulation_pytorch`" +#: ../../source/tutorial-quickstart-mlx.rst:180 +msgid "" +"We also define some utility functions to test our model and to iterate " +"over batches." msgstr "" -#: ../../source/ref-changelog.md:700 -msgid "`quickstart_pytorch`" +#: ../../source/tutorial-quickstart-mlx.rst:201 +msgid "" +"The main changes we have to make to use `MLX` with `Flower` will be found" +" in the ``get_params()`` and ``set_params()`` functions. Indeed, MLX " +"doesn't provide an easy way to convert the model parameters into a list " +"of ``np.array`` objects (the format we need for the serialization of the " +"messages to work)." msgstr "" -#: ../../source/ref-changelog.md:701 -msgid "`quickstart_simulation`" +#: ../../source/tutorial-quickstart-mlx.rst:206 +msgid "The way MLX stores its parameters is as follows:" msgstr "" -#: ../../source/ref-changelog.md:702 -msgid "`quickstart_tensorflow`" +#: ../../source/tutorial-quickstart-mlx.rst:219 +msgid "" +"Therefore, to get our list of ``np.array`` objects, we need to extract " +"each array and convert them into a NumPy array:" msgstr "" -#: ../../source/ref-changelog.md:703 -msgid "`advanced_tensorflow`" +#: ../../source/tutorial-quickstart-mlx.rst:228 +msgid "" +"For the ``set_params()`` function, we perform the reverse operation. We " +"receive a list of NumPy arrays and want to convert them into MLX " +"parameters. Therefore, we iterate through pairs of parameters and assign " +"them to the `weight` and `bias` keys of each layer dict:" msgstr "" -#: ../../source/ref-changelog.md:705 +#: ../../source/tutorial-quickstart-mlx.rst:243 msgid "" -"**Remove the obsolete simulation example** " -"([#1328](https://github.com/adap/flower/pull/1328))" +"The rest of the functionality is directly inspired by the centralized " +"case. The ``fit()`` method in the client trains the model using the local" +" dataset:" msgstr "" -#: ../../source/ref-changelog.md:707 +#: ../../source/tutorial-quickstart-mlx.rst:259 msgid "" -"Removes the obsolete `simulation` example and renames " -"`quickstart_simulation` to `simulation_tensorflow` so it fits withs the " -"naming of `simulation_pytorch`" +"Here, after updating the parameters, we perform the training as in the " +"centralized case, and return the new parameters." msgstr "" -#: ../../source/ref-changelog.md:709 -msgid "" -"**Update documentation** " -"([#1223](https://github.com/adap/flower/pull/1223), " -"[#1209](https://github.com/adap/flower/pull/1209), " -"[#1251](https://github.com/adap/flower/pull/1251), " -"[#1257](https://github.com/adap/flower/pull/1257), " -"[#1267](https://github.com/adap/flower/pull/1267), " -"[#1268](https://github.com/adap/flower/pull/1268), " -"[#1300](https://github.com/adap/flower/pull/1300), " -"[#1304](https://github.com/adap/flower/pull/1304), " -"[#1305](https://github.com/adap/flower/pull/1305), " -"[#1307](https://github.com/adap/flower/pull/1307))" +#: ../../source/tutorial-quickstart-mlx.rst:262 +msgid "And for the ``evaluate()`` method of the client:" msgstr "" -#: ../../source/ref-changelog.md:711 +#: ../../source/tutorial-quickstart-mlx.rst:272 msgid "" -"One substantial documentation update fixes multiple smaller rendering " -"issues, makes titles more succinct to improve navigation, removes a " -"deprecated library, updates documentation dependencies, includes the " -"`flwr.common` module in the API reference, includes support for markdown-" -"based documentation, migrates the changelog from `.rst` to `.md`, and " -"fixes a number of smaller details!" +"We also begin by updating the parameters with the ones sent by the " +"server, and then we compute the loss and accuracy using the functions " +"defined above. In the constructor of the ``FlowerClient`` we instantiate " +"the `MLP` model as well as other components such as the optimizer." msgstr "" -#: ../../source/ref-changelog.md:713 ../../source/ref-changelog.md:768 -#: ../../source/ref-changelog.md:837 ../../source/ref-changelog.md:876 -msgid "**Minor updates**" +#: ../../source/tutorial-quickstart-mlx.rst:277 +msgid "Putting everything together we have:" msgstr "" -#: ../../source/ref-changelog.md:715 +#: ../../source/tutorial-quickstart-mlx.rst:331 msgid "" -"Add round number to fit and evaluate log messages " -"([#1266](https://github.com/adap/flower/pull/1266))" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that " +"``context`` enables you to get access to hyperparemeters defined in " +"``pyproject.toml`` to configure the run. In this tutorial we access, " +"among other hyperparameters, the ``local-epochs`` setting to control the " +"number of epochs a ``ClientApp`` will perform when running the ``fit()`` " +"method." msgstr "" -#: ../../source/ref-changelog.md:716 +#: ../../source/tutorial-quickstart-mlx.rst:363 msgid "" -"Add secure gRPC connection to the `advanced_tensorflow` code example " -"([#847](https://github.com/adap/flower/pull/847))" +"To construct a ``ServerApp``, we define a ``server_fn()`` callback with " +"an identical signature to that of ``client_fn()``, but the return type is" +" `ServerAppComponents `_ as " +"opposed to `Client `_. In this example we use the " +"``FedAvg`` strategy." msgstr "" -#: ../../source/ref-changelog.md:717 +#: ../../source/tutorial-quickstart-mlx.rst:386 +#: ../../source/tutorial-quickstart-pytorch.rst:344 +#: ../../source/tutorial-quickstart-tensorflow.rst:266 msgid "" -"Update developer tooling " -"([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310))" +"Congratulations! You've successfully built and run your first federated " +"learning system." msgstr "" -#: ../../source/ref-changelog.md:718 +#: ../../source/tutorial-quickstart-mlx.rst:390 msgid "" -"Rename ProtoBuf messages to improve consistency " -"([#1214](https://github.com/adap/flower/pull/1214), " -"[#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"Check the `source code `_ of the extended version of this tutorial in ``examples" +"/quickstart-mlx`` in the Flower GitHub repository." msgstr "" -#: ../../source/ref-changelog.md:720 -msgid "v0.19.0 (2022-05-18)" +#: ../../source/tutorial-quickstart-pandas.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with Pandas to perform Federated Analytics." msgstr "" -#: ../../source/ref-changelog.md:724 -msgid "" -"**Flower Baselines (preview): FedOpt, FedBN, FedAvgM** " -"([#919](https://github.com/adap/flower/pull/919), " -"[#1127](https://github.com/adap/flower/pull/1127), " -"[#914](https://github.com/adap/flower/pull/914))" +#: ../../source/tutorial-quickstart-pandas.rst:4 +msgid "Quickstart Pandas" msgstr "" -#: ../../source/ref-changelog.md:726 -msgid "" -"The first preview release of Flower Baselines has arrived! We're " -"kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " -"FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how " -"to use [Flower Baselines](https://flower.ai/docs/using-baselines.html). " -"With this first preview release we're also inviting the community to " -"[contribute their own baselines](https://flower.ai/docs/baselines/how-to-" -"contribute-baselines.html)." +#: ../../source/tutorial-quickstart-pandas.rst:9 +msgid "Let's build a federated analytics system using Pandas and Flower!" msgstr "" -#: ../../source/ref-changelog.md:728 +#: ../../source/tutorial-quickstart-pandas.rst:11 msgid "" -"**C++ client SDK (preview) and code example** " -"([#1111](https://github.com/adap/flower/pull/1111))" +"Please refer to the `full code example " +"`_ " +"to learn more." msgstr "" -#: ../../source/ref-changelog.md:730 +#: ../../source/tutorial-quickstart-pytorch.rst:-1 msgid "" -"Preview support for Flower clients written in C++. The C++ preview " -"includes a Flower client SDK and a quickstart code example that " -"demonstrates a simple C++ client using the SDK." +"Check out this Federated Learning quickstart tutorial for using Flower " +"with PyTorch to train a CNN model on MNIST." msgstr "" -#: ../../source/ref-changelog.md:732 +#: ../../source/tutorial-quickstart-pytorch.rst:6 msgid "" -"**Add experimental support for Python 3.10 and Python 3.11** " -"([#1135](https://github.com/adap/flower/pull/1135))" +"In this federated learning tutorial we will learn how to train a " +"Convolutional Neural Network on CIFAR-10 using Flower and PyTorch. It is " +"recommended to create a virtual environment and run everything within a " +":doc:`virtualenv `." msgstr "" -#: ../../source/ref-changelog.md:734 +#: ../../source/tutorial-quickstart-pytorch.rst:11 msgid "" -"Python 3.10 is the latest stable release of Python and Python 3.11 is due" -" to be released in October. This Flower release adds experimental support" -" for both Python versions." +"Let's use `flwr new` to create a complete Flower+PyTorch project. It will" +" generate all the files needed to run, by default with the Flower " +"Simulation Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." msgstr "" -#: ../../source/ref-changelog.md:736 +#: ../../source/tutorial-quickstart-pytorch.rst:26 msgid "" -"**Aggregate custom metrics through user-provided functions** " -"([#1144](https://github.com/adap/flower/pull/1144))" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``PyTorch``), give a name to your project, " +"and type in your developer name:" msgstr "" -#: ../../source/ref-changelog.md:738 +#: ../../source/tutorial-quickstart-pytorch.rst:117 msgid "" -"Custom metrics (e.g., `accuracy`) can now be aggregated without having to" -" customize the strategy. Built-in strategies support two new arguments, " -"`fit_metrics_aggregation_fn` and `evaluate_metrics_aggregation_fn`, that " -"allow passing custom metric aggregation functions." +"This tutorial uses `Flower Datasets `_ " +"to easily download and partition the `CIFAR-10` dataset. In this example " +"you'll make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets. Each " +"``ClientApp`` will call this function to create dataloaders with the data" +" that correspond to their data partition." msgstr "" -#: ../../source/ref-changelog.md:740 +#: ../../source/tutorial-quickstart-pytorch.rst:152 msgid "" -"**User-configurable round timeout** " -"([#1162](https://github.com/adap/flower/pull/1162))" +"We defined a simple Convolutional Neural Network (CNN), but feel free to " +"replace it with a more sophisticated model if you'd like:" msgstr "" -#: ../../source/ref-changelog.md:742 +#: ../../source/tutorial-quickstart-pytorch.rst:177 msgid "" -"A new configuration value allows the round timeout to be set for " -"`start_server` and `start_simulation`. If the `config` dictionary " -"contains a `round_timeout` key (with a `float` value in seconds), the " -"server will wait *at least* `round_timeout` seconds before it closes the " -"connection." +"In addition to defining the model architecture, we also include two " +"utility functions to perform both training (i.e. ``train()``) and " +"evaluation (i.e. ``test()``) using the above model. These functions " +"should look fairly familiar if you have some prior experience with " +"PyTorch. Note these functions do not have anything specific to Flower. " +"That being said, the training function will normally be called, as we'll " +"see later, from a Flower client passing its own data. In summary, your " +"clients can use standard training/testing functions to perform local " +"training or evaluation:" msgstr "" -#: ../../source/ref-changelog.md:744 +#: ../../source/tutorial-quickstart-pytorch.rst:226 msgid "" -"**Enable both federated evaluation and centralized evaluation to be used " -"at the same time in all built-in strategies** " -"([#1091](https://github.com/adap/flower/pull/1091))" +"The main changes we have to make to use `PyTorch` with `Flower` will be " +"found in the ``get_weights()`` and ``set_weights()`` functions. In " +"``get_weights()`` PyTorch model parameters are extracted and represented " +"as a list of NumPy arrays. The ``set_weights()`` function that's the " +"oposite: given a list of NumPy arrays it applies them to an existing " +"PyTorch model. Doing this in fairly easy in PyTorch." msgstr "" -#: ../../source/ref-changelog.md:746 +#: ../../source/tutorial-quickstart-pytorch.rst:282 msgid "" -"Built-in strategies can now perform both federated evaluation (i.e., " -"client-side) and centralized evaluation (i.e., server-side) in the same " -"round. Federated evaluation can be disabled by setting `fraction_eval` to" -" `0.0`." +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparemeters defined in your " +"``pyproject.toml`` to configure the run. In this tutorial we access the " +"`local-epochs` setting to control the number of epochs a ``ClientApp`` " +"will perform when running the ``fit()`` method. You could define " +"additioinal hyperparameters in ``pyproject.toml`` and access them here." msgstr "" -#: ../../source/ref-changelog.md:748 +#: ../../source/tutorial-quickstart-pytorch.rst:309 msgid "" -"**Two new Jupyter Notebook tutorials** " -"([#1141](https://github.com/adap/flower/pull/1141))" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"`ServerAppComponents `_ as " +"opposed to a `Client `_. In this example we use the " +"`FedAvg`. To it we pass a randomly initialized model that will server as " +"the global model to federated. Note that the value of ``fraction_fit`` is" +" read from the run config. You can find the default value defined in the " +"``pyproject.toml``." msgstr "" -#: ../../source/ref-changelog.md:750 +#: ../../source/tutorial-quickstart-pytorch.rst:348 msgid "" -"Two Jupyter Notebook tutorials (compatible with Google Colab) explain " -"basic and intermediate Flower features:" +"Check the `source code `_ of the extended version of this tutorial in " +"``examples/quickstart-pytorch`` in the Flower GitHub repository." msgstr "" -#: ../../source/ref-changelog.md:752 +#: ../../source/tutorial-quickstart-pytorch.rst:354 +#: ../../source/tutorial-quickstart-tensorflow.rst:278 +#, fuzzy +msgid "Video tutorial" +msgstr "튜토리얼" + +#: ../../source/tutorial-quickstart-pytorch.rst:358 msgid "" -"*An Introduction to Federated Learning*: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" -"-Intro-to-FL-PyTorch.ipynb)" +"The video shown below shows how to setup a PyTorch + Flower project using" +" our previously recommended APIs. A new video tutorial will be released " +"that shows the new APIs (as the content above does)" msgstr "" -#: ../../source/ref-changelog.md:754 -msgid "" -"*Using Strategies in Federated Learning*: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" -"-Strategies-in-FL-PyTorch.ipynb)" +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:4 +msgid "Quickstart PyTorch Lightning" msgstr "" -#: ../../source/ref-changelog.md:756 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:6 msgid "" -"**New FedAvgM strategy (Federated Averaging with Server Momentum)** " -"([#1076](https://github.com/adap/flower/pull/1076))" +"In this federated learning tutorial we will learn how to train an " +"AutoEncoder model on MNIST using Flower and PyTorch Lightning. It is " +"recommended to create a virtual environment and run everything within a " +":doc:`virtualenv `." msgstr "" -#: ../../source/ref-changelog.md:758 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:19 msgid "" -"The new `FedAvgM` strategy implements Federated Averaging with Server " -"Momentum \\[Hsu et al., 2019\\]." +"This will create a new directory called `quickstart-pytorch-lightning` " +"containing the following files:" msgstr "" -#: ../../source/ref-changelog.md:760 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:42 msgid "" -"**New advanced PyTorch code example** " -"([#1007](https://github.com/adap/flower/pull/1007))" +"By default, Flower Simulation Engine will be started and it will create a" +" federation of 4 nodes using `FedAvg `_ " +"as the aggregation strategy. The dataset will be partitioned using Flower" +" Dataset's `IidPartitioner `_." +" To run the project, do:" msgstr "" -#: ../../source/ref-changelog.md:762 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:93 msgid "" -"A new code example (`advanced_pytorch`) demonstrates advanced Flower " -"concepts with PyTorch." +"Each simulated `ClientApp` (two per round) will also log a summary of " +"their local training process. Expect this output to be similar to:" msgstr "" -#: ../../source/ref-changelog.md:764 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:115 msgid "" -"**New JAX code example** " -"([#906](https://github.com/adap/flower/pull/906), " -"[#1143](https://github.com/adap/flower/pull/1143))" +"Check the `source code `_ of this tutorial in ``examples" +"/quickstart-pytorch-lightning`` in the Flower GitHub repository." msgstr "" -#: ../../source/ref-changelog.md:766 +#: ../../source/tutorial-quickstart-scikitlearn.rst:-1 msgid "" -"A new code example (`jax_from_centralized_to_federated`) shows federated " -"learning with JAX and Flower." +"Check out this Federated Learning quickstart tutorial for using Flower " +"with scikit-learn to train a linear regression model." msgstr "" -#: ../../source/ref-changelog.md:770 -msgid "" -"New option to keep Ray running if Ray was already initialized in " -"`start_simulation` ([#1177](https://github.com/adap/flower/pull/1177))" +#: ../../source/tutorial-quickstart-scikitlearn.rst:4 +msgid "Quickstart scikit-learn" msgstr "" -#: ../../source/ref-changelog.md:771 +#: ../../source/tutorial-quickstart-scikitlearn.rst:9 msgid "" -"Add support for custom `ClientManager` as a `start_simulation` parameter " -"([#1171](https://github.com/adap/flower/pull/1171))" +"In this tutorial, we will learn how to train a ``Logistic Regression`` " +"model on MNIST using Flower and scikit-learn." msgstr "" -#: ../../source/ref-changelog.md:772 +#: ../../source/tutorial-quickstart-scikitlearn.rst:12 msgid "" -"New documentation for [implementing " -"strategies](https://flower.ai/docs/framework/how-to-implement-" -"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " -"[#1175](https://github.com/adap/flower/pull/1175))" +"It is recommended to create a virtual environment and run everything " +"within this :doc:`virtualenv `." msgstr "" -#: ../../source/ref-changelog.md:773 +#: ../../source/tutorial-quickstart-scikitlearn.rst:15 msgid "" -"New mobile-friendly documentation theme " -"([#1174](https://github.com/adap/flower/pull/1174))" +"Our example consists of one *server* and two *clients* all having the " +"same model." msgstr "" -#: ../../source/ref-changelog.md:774 +#: ../../source/tutorial-quickstart-scikitlearn.rst:17 msgid "" -"Limit version range for (optional) `ray` dependency to include only " -"compatible releases (`>=1.9.2,<1.12.0`) " -"([#1205](https://github.com/adap/flower/pull/1205))" +"*Clients* are responsible for generating individual model parameter " +"updates for the model based on their local datasets. These updates are " +"then sent to the *server* which will aggregate them to produce an updated" +" global model. Finally, the *server* sends this improved version of the " +"model back to each *client*. A complete cycle of parameters updates is " +"called a *round*." msgstr "" -#: ../../source/ref-changelog.md:778 +#: ../../source/tutorial-quickstart-scikitlearn.rst:23 msgid "" -"**Remove deprecated support for Python 3.6** " -"([#871](https://github.com/adap/flower/pull/871))" +"Now that we have a rough idea of what is going on, let's get started. We " +"first need to install Flower. You can do this by running:" msgstr "" -#: ../../source/ref-changelog.md:779 -msgid "" -"**Remove deprecated KerasClient** " -"([#857](https://github.com/adap/flower/pull/857))" +#: ../../source/tutorial-quickstart-scikitlearn.rst:30 +msgid "Since we want to use scikit-learn, let's go ahead and install it:" msgstr "" -#: ../../source/ref-changelog.md:780 +#: ../../source/tutorial-quickstart-scikitlearn.rst:36 +msgid "Or simply install all dependencies using Poetry:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:45 msgid "" -"**Remove deprecated no-op extra installs** " -"([#973](https://github.com/adap/flower/pull/973))" +"Now that we have all our dependencies installed, let's run a simple " +"distributed training with two clients and one server. However, before " +"setting up the client and server, we will define all functionalities that" +" we need for our federated learning setup within ``utils.py``. The " +"``utils.py`` contains different functions defining all the machine " +"learning basics:" msgstr "" -#: ../../source/ref-changelog.md:781 -msgid "" -"**Remove deprecated proto fields from** `FitRes` **and** `EvaluateRes` " -"([#869](https://github.com/adap/flower/pull/869))" -msgstr "" +#: ../../source/tutorial-quickstart-scikitlearn.rst:51 +#, fuzzy +msgid "``get_model_parameters()``" +msgstr "모델 매개변수." -#: ../../source/ref-changelog.md:782 -msgid "" -"**Remove deprecated QffedAvg strategy (replaced by QFedAvg)** " -"([#1107](https://github.com/adap/flower/pull/1107))" +#: ../../source/tutorial-quickstart-scikitlearn.rst:52 +msgid "Returns the parameters of a ``sklearn`` LogisticRegression model" msgstr "" -#: ../../source/ref-changelog.md:783 -msgid "" -"**Remove deprecated DefaultStrategy strategy** " -"([#1142](https://github.com/adap/flower/pull/1142))" +#: ../../source/tutorial-quickstart-scikitlearn.rst:53 +msgid "``set_model_params()``" msgstr "" -#: ../../source/ref-changelog.md:784 -msgid "" -"**Remove deprecated support for eval_fn accuracy return value** " -"([#1142](https://github.com/adap/flower/pull/1142))" +#: ../../source/tutorial-quickstart-scikitlearn.rst:54 +msgid "Sets the parameters of a ``sklearn`` LogisticRegression model" msgstr "" -#: ../../source/ref-changelog.md:785 -msgid "" -"**Remove deprecated support for passing initial parameters as NumPy " -"ndarrays** ([#1142](https://github.com/adap/flower/pull/1142))" +#: ../../source/tutorial-quickstart-scikitlearn.rst:56 +msgid "``set_initial_params()``" msgstr "" -#: ../../source/ref-changelog.md:787 -msgid "v0.18.0 (2022-02-28)" +#: ../../source/tutorial-quickstart-scikitlearn.rst:56 +msgid "Initializes the model parameters that the Flower server will ask for" msgstr "" -#: ../../source/ref-changelog.md:791 +#: ../../source/tutorial-quickstart-scikitlearn.rst:58 msgid "" -"**Improved Virtual Client Engine compatibility with Jupyter Notebook / " -"Google Colab** ([#866](https://github.com/adap/flower/pull/866), " -"[#872](https://github.com/adap/flower/pull/872), " -"[#833](https://github.com/adap/flower/pull/833), " -"[#1036](https://github.com/adap/flower/pull/1036))" +"Please check out ``utils.py`` `here " +"`_ for more details. The pre-defined functions are used in" +" the ``client.py`` and imported. The ``client.py`` also requires to " +"import several packages such as Flower and scikit-learn:" msgstr "" -#: ../../source/ref-changelog.md:793 +#: ../../source/tutorial-quickstart-scikitlearn.rst:75 msgid "" -"Simulations (using the Virtual Client Engine through `start_simulation`) " -"now work more smoothly on Jupyter Notebooks (incl. Google Colab) after " -"installing Flower with the `simulation` extra (`pip install " -"flwr[simulation]`)." +"Prior to local training, we need to load the MNIST dataset, a popular " +"image classification dataset of handwritten digits for machine learning, " +"and partition the dataset for FL. This can be conveniently achieved using" +" `Flower Datasets `_. The " +"``FederatedDataset.load_partition()`` method loads the partitioned " +"training set for each partition ID defined in the ``--partition-id`` " +"argument." msgstr "" -#: ../../source/ref-changelog.md:795 +#: ../../source/tutorial-quickstart-scikitlearn.rst:106 msgid "" -"**New Jupyter Notebook code example** " -"([#833](https://github.com/adap/flower/pull/833))" +"Next, the logistic regression model is defined and initialized with " +"``utils.set_initial_params()``." msgstr "" -#: ../../source/ref-changelog.md:797 +#: ../../source/tutorial-quickstart-scikitlearn.rst:119 msgid "" -"A new code example (`quickstart_simulation`) demonstrates Flower " -"simulations using the Virtual Client Engine through Jupyter Notebook " -"(incl. Google Colab)." +"The Flower server interacts with clients through an interface called " +"``Client``. When the server selects a particular client for training, it " +"sends training instructions over the network. The client receives those " +"instructions and calls one of the ``Client`` methods to run your code " +"(i.e., to fit the logistic regression we defined earlier)." msgstr "" -#: ../../source/ref-changelog.md:799 +#: ../../source/tutorial-quickstart-scikitlearn.rst:124 msgid "" -"**Client properties (feature preview)** " -"([#795](https://github.com/adap/flower/pull/795))" +"Flower provides a convenience class called ``NumPyClient`` which makes it" +" easier to implement the ``Client`` interface when your workload uses " +"scikit-learn. Implementing ``NumPyClient`` usually means defining the " +"following methods (``set_parameters`` is optional though):" msgstr "" -#: ../../source/ref-changelog.md:801 -msgid "" -"Clients can implement a new method `get_properties` to enable server-side" -" strategies to query client properties." +#: ../../source/tutorial-quickstart-scikitlearn.rst:130 +msgid "return the model weight as a list of NumPy ndarrays" msgstr "" -#: ../../source/ref-changelog.md:803 +#: ../../source/tutorial-quickstart-scikitlearn.rst:132 +#, fuzzy +msgid "``set_parameters`` (optional)" +msgstr ":code:`set_parameters (선택사항)`" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:132 msgid "" -"**Experimental Android support with TFLite** " -"([#865](https://github.com/adap/flower/pull/865))" +"update the local model weights with the parameters received from the " +"server" msgstr "" -#: ../../source/ref-changelog.md:805 -msgid "" -"Android support has finally arrived in `main`! Flower is both client-" -"agnostic and framework-agnostic by design. One can integrate arbitrary " -"client platforms and with this release, using Flower on Android has " -"become a lot easier." +#: ../../source/tutorial-quickstart-scikitlearn.rst:133 +msgid "is directly imported with ``utils.set_model_params()``" msgstr "" -#: ../../source/ref-changelog.md:807 -msgid "" -"The example uses TFLite on the client side, along with a new " -"`FedAvgAndroid` strategy. The Android client and `FedAvgAndroid` are " -"still experimental, but they are a first step towards a fully-fledged " -"Android SDK and a unified `FedAvg` implementation that integrated the new" -" functionality from `FedAvgAndroid`." +#: ../../source/tutorial-quickstart-scikitlearn.rst:135 +msgid "set the local model weights" msgstr "" -#: ../../source/ref-changelog.md:809 -msgid "" -"**Make gRPC keepalive time user-configurable and decrease default " -"keepalive time** ([#1069](https://github.com/adap/flower/pull/1069))" +#: ../../source/tutorial-quickstart-scikitlearn.rst:136 +msgid "train the local model" msgstr "" -#: ../../source/ref-changelog.md:811 -msgid "" -"The default gRPC keepalive time has been reduced to increase the " -"compatibility of Flower with more cloud environments (for example, " -"Microsoft Azure). Users can configure the keepalive time to customize the" -" gRPC stack based on specific requirements." +#: ../../source/tutorial-quickstart-scikitlearn.rst:137 +#, fuzzy +msgid "return the updated local model weights" +msgstr "현재 로컬 모델 파라미터를 반환합니다." + +#: ../../source/tutorial-quickstart-scikitlearn.rst:139 +msgid "test the local model" msgstr "" -#: ../../source/ref-changelog.md:813 -msgid "" -"**New differential privacy example using Opacus and PyTorch** " -"([#805](https://github.com/adap/flower/pull/805))" +#: ../../source/tutorial-quickstart-scikitlearn.rst:141 +msgid "The methods can be implemented in the following way:" msgstr "" -#: ../../source/ref-changelog.md:815 +#: ../../source/tutorial-quickstart-scikitlearn.rst:163 msgid "" -"A new code example (`opacus`) demonstrates differentially-private " -"federated learning with Opacus, PyTorch, and Flower." +"We can now create an instance of our class ``MnistClient`` and add one " +"line to actually run this client:" msgstr "" -#: ../../source/ref-changelog.md:817 +#: ../../source/tutorial-quickstart-scikitlearn.rst:170 msgid "" -"**New Hugging Face Transformers code example** " -"([#863](https://github.com/adap/flower/pull/863))" +"That's it for the client. We only have to implement ``Client`` or " +"``NumPyClient`` and call ``fl.client.start_client()``. If you implement a" +" client of type ``NumPyClient`` you'll need to first call its " +"``to_client()`` method. The string ``\"0.0.0.0:8080\"`` tells the client " +"which server to connect to. In our case we can run the server and the " +"client on the same machine, therefore we use ``\"0.0.0.0:8080\"``. If we " +"run a truly federated workload with the server and clients running on " +"different machines, all that needs to change is the ``server_address`` we" +" pass to the client." msgstr "" -#: ../../source/ref-changelog.md:819 +#: ../../source/tutorial-quickstart-scikitlearn.rst:181 msgid "" -"A new code example (`quickstart_huggingface`) demonstrates usage of " -"Hugging Face Transformers with Flower." +"The following Flower server is a little bit more advanced and returns an " +"evaluation function for the server-side evaluation. First, we import " +"again all required libraries such as Flower and scikit-learn." msgstr "" -#: ../../source/ref-changelog.md:821 -msgid "" -"**New MLCube code example** " -"([#779](https://github.com/adap/flower/pull/779), " -"[#1034](https://github.com/adap/flower/pull/1034), " -"[#1065](https://github.com/adap/flower/pull/1065), " -"[#1090](https://github.com/adap/flower/pull/1090))" +#: ../../source/tutorial-quickstart-scikitlearn.rst:185 +msgid "``server.py``, import Flower and start the server:" msgstr "" -#: ../../source/ref-changelog.md:823 +#: ../../source/tutorial-quickstart-scikitlearn.rst:198 msgid "" -"A new code example (`quickstart_mlcube`) demonstrates usage of MLCube " -"with Flower." +"The number of federated learning rounds is set in ``fit_round()`` and the" +" evaluation is defined in ``get_evaluate_fn()``. The evaluation function " +"is called after each federated learning round and gives you information " +"about loss and accuracy. Note that we also make use of Flower Datasets " +"here to load the test split of the MNIST dataset for server-side " +"evaluation." msgstr "" -#: ../../source/ref-changelog.md:825 +#: ../../source/tutorial-quickstart-scikitlearn.rst:228 msgid "" -"**SSL-enabled server and client** " -"([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" +"The ``main`` contains the server-side parameter initialization " +"``utils.set_initial_params()`` as well as the aggregation strategy " +"``fl.server.strategy:FedAvg()``. The strategy is the default one, " +"federated averaging (or FedAvg), with two clients and evaluation after " +"each federated learning round. The server can be started with the command" +" ``fl.server.start_server(server_address=\"0.0.0.0:8080\", " +"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))``." msgstr "" -#: ../../source/ref-changelog.md:827 +#: ../../source/tutorial-quickstart-scikitlearn.rst:256 msgid "" -"SSL enables secure encrypted connections between clients and servers. " -"This release open-sources the Flower secure gRPC implementation to make " -"encrypted communication channels accessible to all Flower users." +"With both client and server ready, we can now run everything and see " +"federated learning in action. Federated learning systems usually have a " +"server and multiple clients. We, therefore, have to start the server " +"first:" msgstr "" -#: ../../source/ref-changelog.md:829 +#: ../../source/tutorial-quickstart-scikitlearn.rst:264 +#: ../../source/tutorial-quickstart-xgboost.rst:598 msgid "" -"**Updated** `FedAdam` **and** `FedYogi` **strategies** " -"([#885](https://github.com/adap/flower/pull/885), " -"[#895](https://github.com/adap/flower/pull/895))" +"Once the server is running we can start the clients in different " +"terminals. Open a new terminal and start the first client:" msgstr "" -#: ../../source/ref-changelog.md:831 -msgid "" -"`FedAdam` and `FedAdam` match the latest version of the Adaptive " -"Federated Optimization paper." +#: ../../source/tutorial-quickstart-scikitlearn.rst:271 +#: ../../source/tutorial-quickstart-xgboost.rst:605 +msgid "Open another terminal and start the second client:" msgstr "" -#: ../../source/ref-changelog.md:833 +#: ../../source/tutorial-quickstart-scikitlearn.rst:277 +#: ../../source/tutorial-quickstart-xgboost.rst:611 msgid "" -"**Initialize** `start_simulation` **with a list of client IDs** " -"([#860](https://github.com/adap/flower/pull/860))" +"Each client will have its own dataset. You should now see how the " +"training does in the very first terminal (the one that started the " +"server):" msgstr "" -#: ../../source/ref-changelog.md:835 +#: ../../source/tutorial-quickstart-scikitlearn.rst:311 msgid "" -"`start_simulation` can now be called with a list of client IDs " -"(`clients_ids`, type: `List[str]`). Those IDs will be passed to the " -"`client_fn` whenever a client needs to be initialized, which can make it " -"easier to load data partitions that are not accessible through `int` " -"identifiers." +"Congratulations! You've successfully built and run your first federated " +"learning system. The full `source code " +"`_ for this example can be found in ``examples/sklearn-logreg-" +"mnist``." msgstr "" -#: ../../source/ref-changelog.md:839 +#: ../../source/tutorial-quickstart-tensorflow.rst:-1 msgid "" -"Update `num_examples` calculation in PyTorch code examples in " -"([#909](https://github.com/adap/flower/pull/909))" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with TensorFlow to train a CNN model on CIFAR-10." msgstr "" -#: ../../source/ref-changelog.md:840 -msgid "" -"Expose Flower version through `flwr.__version__` " -"([#952](https://github.com/adap/flower/pull/952))" +#: ../../source/tutorial-quickstart-tensorflow.rst:4 +msgid "Quickstart TensorFlow" msgstr "" -#: ../../source/ref-changelog.md:841 +#: ../../source/tutorial-quickstart-tensorflow.rst:6 msgid "" -"`start_server` in `app.py` now returns a `History` object containing " -"metrics from training ([#974](https://github.com/adap/flower/pull/974))" +"In this tutorial we will learn how to train a Convolutional Neural " +"Network on CIFAR-10 using the Flower framework and TensorFlow. First of " +"all, it is recommended to create a virtual environment and run everything" +" within a :doc:`virtualenv `." msgstr "" -#: ../../source/ref-changelog.md:842 +#: ../../source/tutorial-quickstart-tensorflow.rst:11 msgid "" -"Make `max_workers` (used by `ThreadPoolExecutor`) configurable " -"([#978](https://github.com/adap/flower/pull/978))" +"Let's use `flwr new` to create a complete Flower+TensorFlow project. It " +"will generate all the files needed to run, by default with the Flower " +"Simulation Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." msgstr "" -#: ../../source/ref-changelog.md:843 +#: ../../source/tutorial-quickstart-tensorflow.rst:26 msgid "" -"Increase sleep time after server start to three seconds in all code " -"examples ([#1086](https://github.com/adap/flower/pull/1086))" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``TensorFlow``), give a name to your project," +" and type in your developer name:" msgstr "" -#: ../../source/ref-changelog.md:844 +#: ../../source/tutorial-quickstart-tensorflow.rst:114 msgid "" -"Added a new FAQ section to the documentation " -"([#948](https://github.com/adap/flower/pull/948))" +"This tutorial uses `Flower Datasets `_ " +"to easily download and partition the `CIFAR-10` dataset. In this example " +"you'll make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets. Each " +"``ClientApp`` will call this function to create the ``NumPy`` arrays that" +" correspond to their data partition." msgstr "" -#: ../../source/ref-changelog.md:845 +#: ../../source/tutorial-quickstart-tensorflow.rst:141 msgid "" -"And many more under-the-hood changes, library updates, documentation " -"changes, and tooling improvements!" +"Next, we need a model. We defined a simple Convolutional Neural Network " +"(CNN), but feel free to replace it with a more sophisticated model if " +"you'd like:" msgstr "" -#: ../../source/ref-changelog.md:849 +#: ../../source/tutorial-quickstart-tensorflow.rst:170 msgid "" -"**Removed** `flwr_example` **and** `flwr_experimental` **from release " -"build** ([#869](https://github.com/adap/flower/pull/869))" +"With `TensorFlow`, we can use the built-in ``get_weights()`` and " +"``set_weights()`` functions, which simplifies the implementation with " +"`Flower`. The rest of the functionality in the ClientApp is directly " +"inspired by the centralized case. The ``fit()`` method in the client " +"trains the model using the local dataset. Similarly, the ``evaluate()`` " +"method is used to evaluate the model received on a held-out validation " +"set that the client might have:" msgstr "" -#: ../../source/ref-changelog.md:851 +#: ../../source/tutorial-quickstart-tensorflow.rst:203 msgid "" -"The packages `flwr_example` and `flwr_experimental` have been deprecated " -"since Flower 0.12.0 and they are not longer included in Flower release " -"builds. The associated extras (`baseline`, `examples-pytorch`, `examples-" -"tensorflow`, `http-logger`, `ops`) are now no-op and will be removed in " -"an upcoming release." +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparameters defined in your " +"``pyproject.toml`` to configure the run. For example, in this tutorial we" +" access the `local-epochs` setting to control the number of epochs a " +"``ClientApp`` will perform when running the ``fit()`` method, in addition" +" to `batch-size`. You could define additional hyperparameters in " +"``pyproject.toml`` and access them here." msgstr "" -#: ../../source/ref-changelog.md:853 -msgid "v0.17.0 (2021-09-24)" +#: ../../source/tutorial-quickstart-tensorflow.rst:234 +msgid "" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"`ServerAppComponents `_ as " +"opposed to a `Client `_. In this example we use the " +"`FedAvg`. To it we pass a randomly initialized model that will serve as " +"the global model to federate." msgstr "" -#: ../../source/ref-changelog.md:857 +#: ../../source/tutorial-quickstart-tensorflow.rst:270 msgid "" -"**Experimental virtual client engine** " -"([#781](https://github.com/adap/flower/pull/781) " -"[#790](https://github.com/adap/flower/pull/790) " -"[#791](https://github.com/adap/flower/pull/791))" +"Check the source code of the extended version of this tutorial in " +"|quickstart_tf_link|_ in the Flower GitHub repository." msgstr "" -#: ../../source/ref-changelog.md:859 +#: ../../source/tutorial-quickstart-tensorflow.rst:282 msgid "" -"One of Flower's goals is to enable research at scale. This release " -"enables a first (experimental) peek at a major new feature, codenamed the" -" virtual client engine. Virtual clients enable simulations that scale to " -"a (very) large number of clients on a single machine or compute cluster. " -"The easiest way to test the new functionality is to look at the two new " -"code examples called `quickstart_simulation` and `simulation_pytorch`." +"The video shown below shows how to setup a TensorFlow + Flower project " +"using our previously recommended APIs. A new video tutorial will be " +"released that shows the new APIs (as the content above does)" msgstr "" -#: ../../source/ref-changelog.md:861 +#: ../../source/tutorial-quickstart-xgboost.rst:-1 msgid "" -"The feature is still experimental, so there's no stability guarantee for " -"the API. It's also not quite ready for prime time and comes with a few " -"known caveats. However, those who are curious are encouraged to try it " -"out and share their thoughts." +"Check out this Federated Learning quickstart tutorial for using Flower " +"with XGBoost to train classification models on trees." msgstr "" -#: ../../source/ref-changelog.md:863 -msgid "" -"**New built-in strategies** " -"([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822))" +#: ../../source/tutorial-quickstart-xgboost.rst:4 +msgid "Quickstart XGBoost" msgstr "" -#: ../../source/ref-changelog.md:865 -msgid "" -"FedYogi - Federated learning strategy using Yogi on server-side. " -"Implementation based on https://arxiv.org/abs/2003.00295" +#: ../../source/tutorial-quickstart-xgboost.rst:13 +msgid "Federated XGBoost" msgstr "" -#: ../../source/ref-changelog.md:866 +#: ../../source/tutorial-quickstart-xgboost.rst:15 msgid "" -"FedAdam - Federated learning strategy using Adam on server-side. " -"Implementation based on https://arxiv.org/abs/2003.00295" +"EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient " +"implementation of gradient-boosted decision tree (**GBDT**), that " +"maximises the computational boundaries for boosted tree methods. It's " +"primarily designed to enhance both the performance and computational " +"speed of machine learning models. In XGBoost, trees are constructed " +"concurrently, unlike the sequential approach taken by GBDT." msgstr "" -#: ../../source/ref-changelog.md:868 +#: ../../source/tutorial-quickstart-xgboost.rst:21 msgid "" -"**New PyTorch Lightning code example** " -"([#617](https://github.com/adap/flower/pull/617))" +"Often, for tabular data on medium-sized datasets with fewer than 10k " +"training examples, XGBoost surpasses the results of deep learning " +"techniques." msgstr "" -#: ../../source/ref-changelog.md:870 -msgid "" -"**New Variational Auto-Encoder code example** " -"([#752](https://github.com/adap/flower/pull/752))" +#: ../../source/tutorial-quickstart-xgboost.rst:25 +msgid "Why federated XGBoost?" msgstr "" -#: ../../source/ref-changelog.md:872 +#: ../../source/tutorial-quickstart-xgboost.rst:27 msgid "" -"**New scikit-learn code example** " -"([#748](https://github.com/adap/flower/pull/748))" +"Indeed, as the demand for data privacy and decentralized learning grows, " +"there's an increasing requirement to implement federated XGBoost systems " +"for specialised applications, like survival analysis and financial fraud " +"detection." msgstr "" -#: ../../source/ref-changelog.md:874 +#: ../../source/tutorial-quickstart-xgboost.rst:31 msgid "" -"**New experimental TensorBoard strategy** " -"([#789](https://github.com/adap/flower/pull/789))" +"Federated learning ensures that raw data remains on the local device, " +"making it an attractive approach for sensitive domains where data " +"security and privacy are paramount. Given the robustness and efficiency " +"of XGBoost, combining it with federated learning offers a promising " +"solution for these specific challenges." msgstr "" -#: ../../source/ref-changelog.md:878 +#: ../../source/tutorial-quickstart-xgboost.rst:36 msgid "" -"Improved advanced TensorFlow code example " -"([#769](https://github.com/adap/flower/pull/769))" +"In this tutorial we will learn how to train a federated XGBoost model on " +"HIGGS dataset using Flower and ``xgboost`` package. We use a simple " +"example (`full code xgboost-quickstart " +"`_)" +" with two *clients* and one *server* to demonstrate how federated XGBoost" +" works, and then we dive into a more complex example (`full code xgboost-" +"comprehensive `_) to run various experiments." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:46 +msgid "Environment Setup" msgstr "" -#: ../../source/ref-changelog.md:879 +#: ../../source/tutorial-quickstart-xgboost.rst:48 msgid "" -"Warning when `min_available_clients` is misconfigured " -"([#830](https://github.com/adap/flower/pull/830))" +"First of all, it is recommended to create a virtual environment and run " +"everything within a :doc:`virtualenv `." msgstr "" -#: ../../source/ref-changelog.md:880 +#: ../../source/tutorial-quickstart-xgboost.rst:51 msgid "" -"Improved gRPC server docs " -"([#841](https://github.com/adap/flower/pull/841))" +"We first need to install Flower and Flower Datasets. You can do this by " +"running :" msgstr "" -#: ../../source/ref-changelog.md:881 +#: ../../source/tutorial-quickstart-xgboost.rst:57 msgid "" -"Improved error message in `NumPyClient` " -"([#851](https://github.com/adap/flower/pull/851))" +"Since we want to use ``xgboost`` package to build up XGBoost trees, let's" +" go ahead and install ``xgboost``:" msgstr "" -#: ../../source/ref-changelog.md:882 +#: ../../source/tutorial-quickstart-xgboost.rst:67 msgid "" -"Improved PyTorch quickstart code example " -"([#852](https://github.com/adap/flower/pull/852))" +"*Clients* are responsible for generating individual weight-updates for " +"the model based on their local datasets. Now that we have all our " +"dependencies installed, let's run a simple distributed training with two " +"clients and one server." msgstr "" -#: ../../source/ref-changelog.md:886 +#: ../../source/tutorial-quickstart-xgboost.rst:71 msgid "" -"**Disabled final distributed evaluation** " -"([#800](https://github.com/adap/flower/pull/800))" +"In a file called ``client.py``, import xgboost, Flower, Flower Datasets " +"and other related functions:" msgstr "" -#: ../../source/ref-changelog.md:888 -msgid "" -"Prior behaviour was to perform a final round of distributed evaluation on" -" all connected clients, which is often not required (e.g., when using " -"server-side evaluation). The prior behaviour can be enabled by passing " -"`force_final_distributed_eval=True` to `start_server`." +#: ../../source/tutorial-quickstart-xgboost.rst:99 +msgid "Dataset partition and hyper-parameter selection" msgstr "" -#: ../../source/ref-changelog.md:890 +#: ../../source/tutorial-quickstart-xgboost.rst:101 msgid "" -"**Renamed q-FedAvg strategy** " -"([#802](https://github.com/adap/flower/pull/802))" +"Prior to local training, we require loading the HIGGS dataset from Flower" +" Datasets and conduct data partitioning for FL:" msgstr "" -#: ../../source/ref-changelog.md:892 +#: ../../source/tutorial-quickstart-xgboost.rst:115 msgid "" -"The strategy named `QffedAvg` was renamed to `QFedAvg` to better reflect " -"the notation given in the original paper (q-FFL is the optimization " -"objective, q-FedAvg is the proposed solver). Note the original (now " -"deprecated) `QffedAvg` class is still available for compatibility reasons" -" (it will be removed in a future release)." +"In this example, we split the dataset into 30 partitions with uniform " +"distribution (``IidPartitioner(num_partitions=30)``). Then, we load the " +"partition for the given client based on ``partition_id``:" msgstr "" -#: ../../source/ref-changelog.md:894 +#: ../../source/tutorial-quickstart-xgboost.rst:135 msgid "" -"**Deprecated and renamed code example** `simulation_pytorch` **to** " -"`simulation_pytorch_legacy` " -"([#791](https://github.com/adap/flower/pull/791))" +"After that, we do train/test splitting on the given partition (client's " +"local data), and transform data format for ``xgboost`` package." msgstr "" -#: ../../source/ref-changelog.md:896 +#: ../../source/tutorial-quickstart-xgboost.rst:149 msgid "" -"This example has been replaced by a new example. The new example is based" -" on the experimental virtual client engine, which will become the new " -"default way of doing most types of large-scale simulations in Flower. The" -" existing example was kept for reference purposes, but it might be " -"removed in the future." +"The functions of ``train_test_split`` and " +"``transform_dataset_to_dmatrix`` are defined as below:" msgstr "" -#: ../../source/ref-changelog.md:898 -msgid "v0.16.0 (2021-05-11)" +#: ../../source/tutorial-quickstart-xgboost.rst:174 +msgid "Finally, we define the hyper-parameters used for XGBoost training." msgstr "" -#: ../../source/ref-changelog.md:902 +#: ../../source/tutorial-quickstart-xgboost.rst:190 msgid "" -"**New built-in strategies** " -"([#549](https://github.com/adap/flower/pull/549))" +"The ``num_local_round`` represents the number of iterations for local " +"tree boost. We use CPU for the training in default. One can shift it to " +"GPU by setting ``tree_method`` to ``gpu_hist``. We use AUC as evaluation " +"metric." msgstr "" -#: ../../source/ref-changelog.md:904 -msgid "(abstract) FedOpt" +#: ../../source/tutorial-quickstart-xgboost.rst:195 +msgid "Flower client definition for XGBoost" msgstr "" -#: ../../source/ref-changelog.md:907 +#: ../../source/tutorial-quickstart-xgboost.rst:197 msgid "" -"**Custom metrics for server and strategies** " -"([#717](https://github.com/adap/flower/pull/717))" +"After loading the dataset we define the Flower client. We follow the " +"general rule to define ``XgbClient`` class inherited from " +"``fl.client.Client``." msgstr "" -#: ../../source/ref-changelog.md:909 +#: ../../source/tutorial-quickstart-xgboost.rst:219 msgid "" -"The Flower server is now fully task-agnostic, all remaining instances of " -"task-specific metrics (such as `accuracy`) have been replaced by custom " -"metrics dictionaries. Flower 0.15 introduced the capability to pass a " -"dictionary containing custom metrics from client to server. As of this " -"release, custom metrics replace task-specific metrics on the server." +"All required parameters defined above are passed to ``XgbClient``'s " +"constructor." msgstr "" -#: ../../source/ref-changelog.md:911 +#: ../../source/tutorial-quickstart-xgboost.rst:221 msgid "" -"Custom metric dictionaries are now used in two user-facing APIs: they are" -" returned from Strategy methods `aggregate_fit`/`aggregate_evaluate` and " -"they enable evaluation functions passed to built-in strategies (via " -"`eval_fn`) to return more than two evaluation metrics. Strategies can " -"even return *aggregated* metrics dictionaries for the server to keep " -"track of." +"Then, we override ``get_parameters``, ``fit`` and ``evaluate`` methods " +"insides ``XgbClient`` class as follows." msgstr "" -#: ../../source/ref-changelog.md:913 +#: ../../source/tutorial-quickstart-xgboost.rst:236 msgid "" -"Strategy implementations should migrate their `aggregate_fit` and " -"`aggregate_evaluate` methods to the new return type (e.g., by simply " -"returning an empty `{}`), server-side evaluation functions should migrate" -" from `return loss, accuracy` to `return loss, {\"accuracy\": accuracy}`." +"Unlike neural network training, XGBoost trees are not started from a " +"specified random weights. In this case, we do not use ``get_parameters`` " +"and ``set_parameters`` to initialise model parameters for XGBoost. As a " +"result, let's return an empty tensor in ``get_parameters`` when it is " +"called by the server at the first round." msgstr "" -#: ../../source/ref-changelog.md:915 +#: ../../source/tutorial-quickstart-xgboost.rst:278 msgid "" -"Flower 0.15-style return types are deprecated (but still supported), " -"compatibility will be removed in a future release." +"In ``fit``, at the first round, we call ``xgb.train()`` to build up the " +"first set of trees. From the second round, we load the global model sent " +"from server to new build Booster object, and then update model weights on" +" local training data with function ``local_boost`` as follows:" msgstr "" -#: ../../source/ref-changelog.md:917 +#: ../../source/tutorial-quickstart-xgboost.rst:298 msgid "" -"**Migration warnings for deprecated functionality** " -"([#690](https://github.com/adap/flower/pull/690))" +"Given ``num_local_round``, we update trees by calling " +"``bst_input.update`` method. After training, the last " +"``N=num_local_round`` trees will be extracted to send to the server." msgstr "" -#: ../../source/ref-changelog.md:919 +#: ../../source/tutorial-quickstart-xgboost.rst:330 msgid "" -"Earlier versions of Flower were often migrated to new APIs, while " -"maintaining compatibility with legacy APIs. This release introduces " -"detailed warning messages if usage of deprecated APIs is detected. The " -"new warning messages often provide details on how to migrate to more " -"recent APIs, thus easing the transition from one release to another." +"In ``evaluate``, after loading the global model, we call ``bst.eval_set``" +" function to conduct evaluation on valid set. The AUC value will be " +"returned." msgstr "" -#: ../../source/ref-changelog.md:921 +#: ../../source/tutorial-quickstart-xgboost.rst:333 msgid "" -"Improved docs and docstrings " -"([#691](https://github.com/adap/flower/pull/691) " -"[#692](https://github.com/adap/flower/pull/692) " -"[#713](https://github.com/adap/flower/pull/713))" -msgstr "" - -#: ../../source/ref-changelog.md:923 -msgid "MXNet example and documentation" +"Now, we can create an instance of our class ``XgbClient`` and add one " +"line to actually run this client:" msgstr "" -#: ../../source/ref-changelog.md:925 +#: ../../source/tutorial-quickstart-xgboost.rst:350 msgid "" -"FedBN implementation in example PyTorch: From Centralized To Federated " -"([#696](https://github.com/adap/flower/pull/696) " -"[#702](https://github.com/adap/flower/pull/702) " -"[#705](https://github.com/adap/flower/pull/705))" +"That's it for the client. We only have to implement ``Client`` and call " +"``fl.client.start_client()``. The string ``\"[::]:8080\"`` tells the " +"client which server to connect to. In our case we can run the server and " +"the client on the same machine, therefore we use ``\"[::]:8080\"``. If we" +" run a truly federated workload with the server and clients running on " +"different machines, all that needs to change is the ``server_address`` we" +" point the client at." msgstr "" -#: ../../source/ref-changelog.md:929 +#: ../../source/tutorial-quickstart-xgboost.rst:360 msgid "" -"**Serialization-agnostic server** " -"([#721](https://github.com/adap/flower/pull/721))" +"These updates are then sent to the *server* which will aggregate them to " +"produce a better model. Finally, the *server* sends this improved version" +" of the model back to each *client* to finish a complete FL round." msgstr "" -#: ../../source/ref-changelog.md:931 +#: ../../source/tutorial-quickstart-xgboost.rst:364 msgid "" -"The Flower server is now fully serialization-agnostic. Prior usage of " -"class `Weights` (which represents parameters as deserialized NumPy " -"ndarrays) was replaced by class `Parameters` (e.g., in `Strategy`). " -"`Parameters` objects are fully serialization-agnostic and represents " -"parameters as byte arrays, the `tensor_type` attributes indicates how " -"these byte arrays should be interpreted (e.g., for " -"serialization/deserialization)." +"In a file named ``server.py``, import Flower and FedXgbBagging from " +"``flwr.server.strategy``." msgstr "" -#: ../../source/ref-changelog.md:933 -msgid "" -"Built-in strategies implement this approach by handling serialization and" -" deserialization to/from `Weights` internally. Custom/3rd-party Strategy " -"implementations should update to the slightly changed Strategy method " -"definitions. Strategy authors can consult PR " -"[#721](https://github.com/adap/flower/pull/721) to see how strategies can" -" easily migrate to the new format." +#: ../../source/tutorial-quickstart-xgboost.rst:367 +msgid "We first define a strategy for XGBoost bagging aggregation." msgstr "" -#: ../../source/ref-changelog.md:935 +#: ../../source/tutorial-quickstart-xgboost.rst:401 msgid "" -"Deprecated `flwr.server.Server.evaluate`, use " -"`flwr.server.Server.evaluate_round` instead " -"([#717](https://github.com/adap/flower/pull/717))" +"We use two clients for this example. An ``evaluate_metrics_aggregation`` " +"function is defined to collect and wighted average the AUC values from " +"clients. The ``config_func`` function is to return the current FL round " +"number to client's ``fit()`` and ``evaluate()`` methods." msgstr "" -#: ../../source/ref-changelog.md:937 -msgid "v0.15.0 (2021-03-12)" +#: ../../source/tutorial-quickstart-xgboost.rst:406 +msgid "Then, we start the server:" msgstr "" -#: ../../source/ref-changelog.md:941 -msgid "" -"**Server-side parameter initialization** " -"([#658](https://github.com/adap/flower/pull/658))" +#: ../../source/tutorial-quickstart-xgboost.rst:418 +msgid "Tree-based bagging aggregation" msgstr "" -#: ../../source/ref-changelog.md:943 +#: ../../source/tutorial-quickstart-xgboost.rst:420 msgid "" -"Model parameters can now be initialized on the server-side. Server-side " -"parameter initialization works via a new `Strategy` method called " -"`initialize_parameters`." +"You must be curious about how bagging aggregation works. Let's look into " +"the details." msgstr "" -#: ../../source/ref-changelog.md:945 +#: ../../source/tutorial-quickstart-xgboost.rst:422 msgid "" -"Built-in strategies support a new constructor argument called " -"`initial_parameters` to set the initial parameters. Built-in strategies " -"will provide these initial parameters to the server on startup and then " -"delete them to free the memory afterwards." +"In file ``flwr.server.strategy.fedxgb_bagging.py``, we define " +"``FedXgbBagging`` inherited from ``flwr.server.strategy.FedAvg``. Then, " +"we override the ``aggregate_fit``, ``aggregate_evaluate`` and " +"``evaluate`` methods as follows:" msgstr "" -#: ../../source/ref-changelog.md:964 +#: ../../source/tutorial-quickstart-xgboost.rst:519 msgid "" -"If no initial parameters are provided to the strategy, the server will " -"continue to use the current behaviour (namely, it will ask one of the " -"connected clients for its parameters and use these as the initial global " -"parameters)." +"In ``aggregate_fit``, we sequentially aggregate the clients' XGBoost " +"trees by calling ``aggregate()`` function:" msgstr "" -#: ../../source/ref-changelog.md:966 -msgid "Deprecations" +#: ../../source/tutorial-quickstart-xgboost.rst:579 +msgid "" +"In this function, we first fetch the number of trees and the number of " +"parallel trees for the current and previous model by calling " +"``_get_tree_nums``. Then, the fetched information will be aggregated. " +"After that, the trees (containing model weights) are aggregated to " +"generate a new tree model." msgstr "" -#: ../../source/ref-changelog.md:968 +#: ../../source/tutorial-quickstart-xgboost.rst:584 msgid "" -"Deprecate `flwr.server.strategy.DefaultStrategy` (migrate to " -"`flwr.server.strategy.FedAvg`, which is equivalent)" +"After traversal of all clients' models, a new global model is generated, " +"followed by the serialisation, and sending back to each client." msgstr "" -#: ../../source/ref-changelog.md:970 -msgid "v0.14.0 (2021-02-18)" +#: ../../source/tutorial-quickstart-xgboost.rst:588 +msgid "Launch Federated XGBoost!" msgstr "" -#: ../../source/ref-changelog.md:974 +#: ../../source/tutorial-quickstart-xgboost.rst:664 msgid "" -"**Generalized** `Client.fit` **and** `Client.evaluate` **return values** " -"([#610](https://github.com/adap/flower/pull/610) " -"[#572](https://github.com/adap/flower/pull/572) " -"[#633](https://github.com/adap/flower/pull/633))" +"Congratulations! You've successfully built and run your first federated " +"XGBoost system. The AUC values can be checked in ``metrics_distributed``." +" One can see that the average AUC increases over FL rounds." msgstr "" -#: ../../source/ref-changelog.md:976 +#: ../../source/tutorial-quickstart-xgboost.rst:668 msgid "" -"Clients can now return an additional dictionary mapping `str` keys to " -"values of the following types: `bool`, `bytes`, `float`, `int`, `str`. " -"This means one can return almost arbitrary values from `fit`/`evaluate` " -"and make use of them on the server side!" +"The full `source code `_ for this example can be found in ``examples" +"/xgboost-quickstart``." msgstr "" -#: ../../source/ref-changelog.md:978 -msgid "" -"This improvement also allowed for more consistent return types between " -"`fit` and `evaluate`: `evaluate` should now return a tuple `(float, int, " -"dict)` representing the loss, number of examples, and a dictionary " -"holding arbitrary problem-specific values like accuracy." +#: ../../source/tutorial-quickstart-xgboost.rst:673 +msgid "Comprehensive Federated XGBoost" msgstr "" -#: ../../source/ref-changelog.md:980 +#: ../../source/tutorial-quickstart-xgboost.rst:675 msgid "" -"In case you wondered: this feature is compatible with existing projects, " -"the additional dictionary return value is optional. New code should " -"however migrate to the new return types to be compatible with upcoming " -"Flower releases (`fit`: `List[np.ndarray], int, Dict[str, Scalar]`, " -"`evaluate`: `float, int, Dict[str, Scalar]`). See the example below for " -"details." +"Now that you have known how federated XGBoost work with Flower, it's time" +" to run some more comprehensive experiments by customising the " +"experimental settings. In the xgboost-comprehensive example (`full code " +"`_), we provide more options to define various experimental" +" setups, including aggregation strategies, data partitioning and " +"centralised/distributed evaluation. We also support :doc:`Flower " +"simulation ` making it easy to simulate large " +"client cohorts in a resource-aware manner. Let's take a look!" msgstr "" -#: ../../source/ref-changelog.md:982 -msgid "" -"*Code example:* note the additional dictionary return values in both " -"`FlwrClient.fit` and `FlwrClient.evaluate`:" +#: ../../source/tutorial-quickstart-xgboost.rst:685 +msgid "Cyclic training" msgstr "" -#: ../../source/ref-changelog.md:997 +#: ../../source/tutorial-quickstart-xgboost.rst:687 msgid "" -"**Generalized** `config` **argument in** `Client.fit` **and** " -"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" +"In addition to bagging aggregation, we offer a cyclic training scheme, " +"which performs FL in a client-by-client fashion. Instead of aggregating " +"multiple clients, there is only one single client participating in the " +"training per round in the cyclic training scenario. The trained local " +"XGBoost trees will be passed to the next client as an initialised model " +"for next round's boosting." msgstr "" -#: ../../source/ref-changelog.md:999 -msgid "" -"The `config` argument used to be of type `Dict[str, str]`, which means " -"that dictionary values were expected to be strings. The new release " -"generalizes this to enable values of the following types: `bool`, " -"`bytes`, `float`, `int`, `str`." +#: ../../source/tutorial-quickstart-xgboost.rst:693 +msgid "To do this, we first customise a ``ClientManager`` in ``server_utils.py``:" msgstr "" -#: ../../source/ref-changelog.md:1001 +#: ../../source/tutorial-quickstart-xgboost.rst:733 msgid "" -"This means one can now pass almost arbitrary values to `fit`/`evaluate` " -"using the `config` dictionary. Yay, no more `str(epochs)` on the server-" -"side and `int(config[\"epochs\"])` on the client side!" +"The customised ``ClientManager`` samples all available clients in each FL" +" round based on the order of connection to the server. Then, we define a " +"new strategy ``FedXgbCyclic`` in " +"``flwr.server.strategy.fedxgb_cyclic.py``, in order to sequentially " +"select only one client in given round and pass the received model to next" +" client." msgstr "" -#: ../../source/ref-changelog.md:1003 +#: ../../source/tutorial-quickstart-xgboost.rst:775 msgid "" -"*Code example:* note that the `config` dictionary now contains non-`str` " -"values in both `Client.fit` and `Client.evaluate`:" +"Unlike the original ``FedAvg``, we don't perform aggregation here. " +"Instead, we just make a copy of the received client model as global model" +" by overriding ``aggregate_fit``." msgstr "" -#: ../../source/ref-changelog.md:1020 -msgid "v0.13.0 (2021-01-08)" +#: ../../source/tutorial-quickstart-xgboost.rst:778 +msgid "" +"Also, the customised ``configure_fit`` and ``configure_evaluate`` methods" +" ensure the clients to be sequentially selected given FL round:" msgstr "" -#: ../../source/ref-changelog.md:1024 -msgid "" -"New example: PyTorch From Centralized To Federated " -"([#549](https://github.com/adap/flower/pull/549))" +#: ../../source/tutorial-quickstart-xgboost.rst:840 +msgid "Customised data partitioning" msgstr "" -#: ../../source/ref-changelog.md:1025 -msgid "Improved documentation" +#: ../../source/tutorial-quickstart-xgboost.rst:842 +msgid "" +"In ``dataset.py``, we have a function ``instantiate_partitioner`` to " +"instantiate the data partitioner based on the given ``num_partitions`` " +"and ``partitioner_type``. Currently, we provide four supported " +"partitioner type to simulate the uniformity/non-uniformity in data " +"quantity (uniform, linear, square, exponential)." msgstr "" -#: ../../source/ref-changelog.md:1026 -msgid "New documentation theme ([#551](https://github.com/adap/flower/pull/551))" +#: ../../source/tutorial-quickstart-xgboost.rst:873 +msgid "Customised centralised/distributed evaluation" msgstr "" -#: ../../source/ref-changelog.md:1027 -msgid "New API reference ([#554](https://github.com/adap/flower/pull/554))" +#: ../../source/tutorial-quickstart-xgboost.rst:875 +msgid "" +"To facilitate centralised evaluation, we define a function in " +"``server_utils.py``:" msgstr "" -#: ../../source/ref-changelog.md:1028 +#: ../../source/tutorial-quickstart-xgboost.rst:907 msgid "" -"Updated examples documentation " -"([#549](https://github.com/adap/flower/pull/549))" +"This function returns a evaluation function which instantiates a " +"``Booster`` object and loads the global model weights to it. The " +"evaluation is conducted by calling ``eval_set()`` method, and the tested " +"AUC value is reported." msgstr "" -#: ../../source/ref-changelog.md:1029 +#: ../../source/tutorial-quickstart-xgboost.rst:911 msgid "" -"Removed obsolete documentation " -"([#548](https://github.com/adap/flower/pull/548))" +"As for distributed evaluation on the clients, it's same as the quick-" +"start example by overriding the ``evaluate()`` method insides the " +"``XgbClient`` class in ``client_utils.py``." msgstr "" -#: ../../source/ref-changelog.md:1031 -msgid "Bugfix:" +#: ../../source/tutorial-quickstart-xgboost.rst:916 +msgid "Flower simulation" msgstr "" -#: ../../source/ref-changelog.md:1033 +#: ../../source/tutorial-quickstart-xgboost.rst:918 msgid "" -"`Server.fit` does not disconnect clients when finished, disconnecting the" -" clients is now handled in `flwr.server.start_server` " -"([#553](https://github.com/adap/flower/pull/553) " -"[#540](https://github.com/adap/flower/issues/540))." +"We also provide an example code (``sim.py``) to use the simulation " +"capabilities of Flower to simulate federated XGBoost training on either a" +" single machine or a cluster of machines." msgstr "" -#: ../../source/ref-changelog.md:1035 -msgid "v0.12.0 (2020-12-07)" +#: ../../source/tutorial-quickstart-xgboost.rst:954 +msgid "" +"After importing all required packages, we define a ``main()`` function to" +" perform the simulation process:" msgstr "" -#: ../../source/ref-changelog.md:1037 ../../source/ref-changelog.md:1053 -msgid "Important changes:" +#: ../../source/tutorial-quickstart-xgboost.rst:1010 +msgid "" +"We first load the dataset and perform data partitioning, and the pre-" +"processed data is stored in a ``list``. After the simulation begins, the " +"clients won't need to pre-process their partitions again." msgstr "" -#: ../../source/ref-changelog.md:1039 -msgid "" -"Added an example for embedded devices " -"([#507](https://github.com/adap/flower/pull/507))" +#: ../../source/tutorial-quickstart-xgboost.rst:1014 +msgid "Then, we define the strategies and other hyper-parameters:" msgstr "" -#: ../../source/ref-changelog.md:1040 +#: ../../source/tutorial-quickstart-xgboost.rst:1065 msgid "" -"Added a new NumPyClient (in addition to the existing KerasClient) " -"([#504](https://github.com/adap/flower/pull/504) " -"[#508](https://github.com/adap/flower/pull/508))" +"After that, we start the simulation by calling " +"``fl.simulation.start_simulation``:" msgstr "" -#: ../../source/ref-changelog.md:1041 +#: ../../source/tutorial-quickstart-xgboost.rst:1085 msgid "" -"Deprecated `flwr_example` package and started to migrate examples into " -"the top-level `examples` directory " -"([#494](https://github.com/adap/flower/pull/494) " -"[#512](https://github.com/adap/flower/pull/512))" +"One of key parameters for ``start_simulation`` is ``client_fn`` which " +"returns a function to construct a client. We define it as follows:" msgstr "" -#: ../../source/ref-changelog.md:1043 -msgid "v0.11.0 (2020-11-30)" +#: ../../source/tutorial-quickstart-xgboost.rst:1126 +msgid "Arguments parser" msgstr "" -#: ../../source/ref-changelog.md:1045 -msgid "Incompatible changes:" +#: ../../source/tutorial-quickstart-xgboost.rst:1128 +msgid "" +"In ``utils.py``, we define the arguments parsers for clients, server and " +"simulation, allowing users to specify different experimental settings. " +"Let's first see the sever side:" msgstr "" -#: ../../source/ref-changelog.md:1047 +#: ../../source/tutorial-quickstart-xgboost.rst:1175 msgid "" -"Renamed strategy methods " -"([#486](https://github.com/adap/flower/pull/486)) to unify the naming of " -"Flower's public APIs. Other public methods/functions (e.g., every method " -"in `Client`, but also `Strategy.evaluate`) do not use the `on_` prefix, " -"which is why we're removing it from the four methods in Strategy. To " -"migrate rename the following `Strategy` methods accordingly:" +"This allows user to specify training strategies / the number of total " +"clients / FL rounds / participating clients / clients for evaluation, and" +" evaluation fashion. Note that with ``--centralised-eval``, the sever " +"will do centralised evaluation and all functionalities for client " +"evaluation will be disabled." msgstr "" -#: ../../source/ref-changelog.md:1048 -msgid "`on_configure_evaluate` => `configure_evaluate`" +#: ../../source/tutorial-quickstart-xgboost.rst:1180 +msgid "Then, the argument parser on client side:" msgstr "" -#: ../../source/ref-changelog.md:1049 -msgid "`on_aggregate_evaluate` => `aggregate_evaluate`" +#: ../../source/tutorial-quickstart-xgboost.rst:1234 +msgid "" +"This defines various options for client data partitioning. Besides, " +"clients also have an option to conduct evaluation on centralised test set" +" by setting ``--centralised-eval``, as well as an option to perform " +"scaled learning rate based on the number of clients by setting " +"``--scaled-lr``." msgstr "" -#: ../../source/ref-changelog.md:1050 -msgid "`on_configure_fit` => `configure_fit`" +#: ../../source/tutorial-quickstart-xgboost.rst:1239 +msgid "We also have an argument parser for simulation:" msgstr "" -#: ../../source/ref-changelog.md:1051 -msgid "`on_aggregate_fit` => `aggregate_fit`" +#: ../../source/tutorial-quickstart-xgboost.rst:1317 +msgid "This integrates all arguments for both client and server sides." msgstr "" -#: ../../source/ref-changelog.md:1055 -msgid "" -"Deprecated `DefaultStrategy` " -"([#479](https://github.com/adap/flower/pull/479)). To migrate use " -"`FedAvg` instead." +#: ../../source/tutorial-quickstart-xgboost.rst:1320 +msgid "Example commands" msgstr "" -#: ../../source/ref-changelog.md:1056 +#: ../../source/tutorial-quickstart-xgboost.rst:1322 msgid "" -"Simplified examples and baselines " -"([#484](https://github.com/adap/flower/pull/484))." +"To run a centralised evaluated experiment with bagging strategy on 5 " +"clients with exponential distribution for 50 rounds, we first start the " +"server as below:" msgstr "" -#: ../../source/ref-changelog.md:1057 -msgid "" -"Removed presently unused `on_conclude_round` from strategy interface " -"([#483](https://github.com/adap/flower/pull/483))." +#: ../../source/tutorial-quickstart-xgboost.rst:1329 +msgid "Then, on each client terminal, we start the clients:" msgstr "" -#: ../../source/ref-changelog.md:1058 -msgid "" -"Set minimal Python version to 3.6.1 instead of 3.6.9 " -"([#471](https://github.com/adap/flower/pull/471))." +#: ../../source/tutorial-quickstart-xgboost.rst:1335 +msgid "To run the same experiment with Flower simulation:" msgstr "" -#: ../../source/ref-changelog.md:1059 +#: ../../source/tutorial-quickstart-xgboost.rst:1341 msgid "" -"Improved `Strategy` docstrings " -"([#470](https://github.com/adap/flower/pull/470))." +"The full `code `_ for this comprehensive example can be found in" +" ``examples/xgboost-comprehensive``." msgstr "" -#: ../../source/ref-example-projects.rst:2 -msgid "Example projects" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:9 +msgid "Build a strategy from scratch" msgstr "" -#: ../../source/ref-example-projects.rst:4 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:11 msgid "" -"Flower comes with a number of usage examples. The examples demonstrate " -"how Flower can be used to federate different kinds of existing machine " -"learning pipelines, usually leveraging popular machine learning " -"frameworks such as `PyTorch `_ or `TensorFlow " -"`_." +"Welcome to the third part of the Flower federated learning tutorial. In " +"previous parts of this tutorial, we introduced federated learning with " +"PyTorch and the Flower framework (`part 1 " +"`__) and we learned how strategies can be used to customize " +"the execution on both the server and the clients (`part 2 " +"`__)." msgstr "" -#: ../../source/ref-example-projects.rst:10 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:13 msgid "" -"The following examples are available as standalone projects. Quickstart " -"TensorFlow/Keras ---------------------------" +"In this notebook, we'll continue to customize the federated learning " +"system we built previously by creating a custom version of FedAvg using " +"the Flower framework, Flower Datasets, and PyTorch." msgstr "" -#: ../../source/ref-example-projects.rst:14 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:15 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:16 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:15 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:15 +#, fuzzy msgid "" -"The TensorFlow/Keras quickstart example shows CIFAR-10 image " -"classification with MobileNetV2:" +"`Star Flower on GitHub `__ ⭐️ and join " +"the Flower community on Flower Discuss and the Flower Slack to connect, " +"ask questions, and get help: - `Join Flower Discuss " +"`__ We'd love to hear from you in the " +"``Introduction`` topic! If anything is unclear, post in ``Flower Help - " +"Beginners``. - `Join Flower Slack `__ We'd " +"love to hear from you in the ``#introductions`` channel! If anything is " +"unclear, head over to the ``#questions`` channel." msgstr "" +"`Star Flower on GitHub `__ ⭐️ Slack의 오픈소스" +" Flower 커뮤니티에 가입하여 소통하고 질문하고 도움을 받을 수 있습니다: `Slack 가입`__ 🌼 ``#introductions``채널에서 당신의 목소리를 듣고 싶습니다! 궁금한 점이 " +"있으시면``#questions`` 채널로 방문해 주시기 바랍니다." -#: ../../source/ref-example-projects.rst:17 -msgid "" -"`Quickstart TensorFlow (Code) " -"`_" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:18 +msgid "Let's build a new ``Strategy`` from scratch! 🌼" msgstr "" -#: ../../source/ref-example-projects.rst:18 -msgid ":doc:`Quickstart TensorFlow (Tutorial) `" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:30 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:30 +msgid "Preparation" msgstr "" -#: ../../source/ref-example-projects.rst:19 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:32 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:33 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:32 msgid "" -"`Quickstart TensorFlow (Blog Post) `_" +"Before we begin with the actual code, let's make sure that we have " +"everything we need." msgstr "" -#: ../../source/ref-example-projects.rst:23 -#: ../../source/tutorial-quickstart-pytorch.rst:5 -msgid "Quickstart PyTorch" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:44 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:45 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:44 +msgid "Installing dependencies" msgstr "" -#: ../../source/ref-example-projects.rst:25 -msgid "" -"The PyTorch quickstart example shows CIFAR-10 image classification with a" -" simple Convolutional Neural Network:" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:46 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:47 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:46 +msgid "First, we install the necessary packages:" msgstr "" -#: ../../source/ref-example-projects.rst:28 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:66 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:67 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:66 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:66 msgid "" -"`Quickstart PyTorch (Code) " -"`_" +"Now that we have all dependencies installed, we can import everything we " +"need for this tutorial:" msgstr "" -#: ../../source/ref-example-projects.rst:29 -msgid ":doc:`Quickstart PyTorch (Tutorial) `" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:106 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:106 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:106 +msgid "" +"It is possible to switch to a runtime that has GPU acceleration enabled " +"(on Google Colab: ``Runtime > Change runtime type > Hardware acclerator: " +"GPU > Save``). Note, however, that Google Colab is not always able to " +"offer GPU acceleration. If you see an error related to GPU availability " +"in one of the following sections, consider switching back to CPU-based " +"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " +"has GPU acceleration enabled, you should see the output ``Training on " +"cuda``, otherwise it'll say ``Training on cpu``." msgstr "" -#: ../../source/ref-example-projects.rst:33 -msgid "PyTorch: From Centralized To Federated" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:119 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:119 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:119 +msgid "Data loading" msgstr "" -#: ../../source/ref-example-projects.rst:35 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:121 msgid "" -"This example shows how a regular PyTorch project can be federated using " -"Flower:" +"Let's now load the CIFAR-10 training and test set, partition them into " +"ten smaller datasets (each split into training and validation set), and " +"wrap everything in their own ``DataLoader``." msgstr "" -#: ../../source/ref-example-projects.rst:37 -msgid "" -"`PyTorch: From Centralized To Federated (Code) " -"`_" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:163 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:163 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:169 +msgid "Model training/evaluation" msgstr "" -#: ../../source/ref-example-projects.rst:38 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:165 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:165 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:171 msgid "" -":doc:`PyTorch: From Centralized To Federated (Tutorial) `" +"Let's continue with the usual model definition (including " +"``set_parameters`` and ``get_parameters``), training and test functions:" msgstr "" -#: ../../source/ref-example-projects.rst:42 -msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:256 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:262 +msgid "Flower client" msgstr "" -#: ../../source/ref-example-projects.rst:44 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:258 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:264 msgid "" -"This example shows how Flower can be used to build a federated learning " -"system that run across Raspberry Pi and Nvidia Jetson:" +"To implement the Flower client, we (again) create a subclass of " +"``flwr.client.NumPyClient`` and implement the three methods " +"``get_parameters``, ``fit``, and ``evaluate``. Here, we also pass the " +"``partition_id`` to the client and use it log additional details. We then" +" create an instance of ``ClientApp`` and pass it the ``client_fn``." msgstr "" -#: ../../source/ref-example-projects.rst:46 -msgid "" -"`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " -"`_" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:311 +msgid "Let's test what we have so far before we continue:" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:357 +msgid "Build a Strategy from scratch" msgstr "" -#: ../../source/ref-example-projects.rst:47 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:359 msgid "" -"`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " -"`_" +"Let’s overwrite the ``configure_fit`` method such that it passes a higher" +" learning rate (potentially also other hyperparameters) to the optimizer " +"of a fraction of the clients. We will keep the sampling of the clients as" +" it is in ``FedAvg`` and then change the configuration dictionary (one of" +" the ``FitIns`` attributes)." msgstr "" -#: ../../source/ref-faq.rst:4 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:523 msgid "" -"This page collects answers to commonly asked questions about Federated " -"Learning with Flower." +"The only thing left is to use the newly created custom Strategy " +"``FedCustom`` when starting the experiment:" msgstr "" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Can Flower run on Jupyter Notebooks / Google Colab?" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:559 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:998 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:841 +msgid "Recap" msgstr "" -#: ../../source/ref-faq.rst:8 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:561 msgid "" -"Yes, it can! Flower even comes with a few under-the-hood optimizations to" -" make it work even better on Colab. Here's a quickstart example:" +"In this notebook, we’ve seen how to implement a custom strategy. A custom" +" strategy enables granular control over client node configuration, result" +" aggregation, and more. To define a custom strategy, you only have to " +"overwrite the abstract methods of the (abstract) base class ``Strategy``." +" To make custom strategies even more powerful, you can pass custom " +"functions to the constructor of your new class (``__init__``) and then " +"call these functions whenever needed." msgstr "" -#: ../../source/ref-faq.rst:10 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:575 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1014 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:813 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:859 msgid "" -"`Flower simulation PyTorch " -"`_" +"Before you continue, make sure to join the Flower community on Flower " +"Discuss (`Join Flower Discuss `__) and on " +"Slack (`Join Slack `__)." msgstr "" -#: ../../source/ref-faq.rst:11 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:577 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1016 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:815 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:861 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:371 msgid "" -"`Flower simulation TensorFlow/Keras " -"`_" +"There's a dedicated ``#questions`` channel if you need help, but we'd " +"also love to hear who you are in ``#introductions``!" msgstr "" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` How can I run Federated Learning on a Raspberry Pi?" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:579 +msgid "" +"The `Flower Federated Learning Tutorial - Part 4 " +"`__ introduces ``Client``, the flexible API underlying " +"``NumPyClient``." msgstr "" -#: ../../source/ref-faq.rst:15 -msgid "" -"Find the `blog post about federated learning on embedded device here " -"`_" -" and the corresponding `GitHub code example " -"`_." +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:9 +msgid "Customize the client" msgstr "" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Does Flower support federated learning on Android devices?" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:11 +msgid "" +"Welcome to the fourth part of the Flower federated learning tutorial. In " +"the previous parts of this tutorial, we introduced federated learning " +"with PyTorch and Flower (`part 1 `__), we learned how " +"strategies can be used to customize the execution on both the server and " +"the clients (`part 2 `__), and we built our own " +"custom strategy from scratch (`part 3 `__)." msgstr "" -#: ../../source/ref-faq.rst:19 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:14 msgid "" -"Yes, it does. Please take a look at our `blog post " -"`_ or check out the code examples:" +"In this notebook, we revisit ``NumPyClient`` and introduce a new " +"baseclass for building clients, simply named ``Client``. In previous " +"parts of this tutorial, we've based our client on ``NumPyClient``, a " +"convenience class which makes it easy to work with machine learning " +"libraries that have good NumPy interoperability. With ``Client``, we gain" +" a lot of flexibility that we didn't have before, but we'll also have to " +"do a few things the we didn't have to do before." msgstr "" -#: ../../source/ref-faq.rst:21 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:19 msgid "" -"`Android Kotlin example `_" +"Let's go deeper and see what it takes to move from ``NumPyClient`` to " +"``Client``! 🌼" msgstr "" -#: ../../source/ref-faq.rst:22 -msgid "`Android Java example `_" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:31 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:30 +msgid "Step 0: Preparation" msgstr "" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Can I combine federated learning with blockchain?" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:121 +msgid "" +"Let's now define a loading function for the CIFAR-10 training and test " +"set, partition them into ``num_partitions`` smaller datasets (each split " +"into training and validation set), and wrap everything in their own " +"``DataLoader``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:256 +msgid "Step 1: Revisiting NumPyClient" msgstr "" -#: ../../source/ref-faq.rst:26 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:258 msgid "" -"Yes, of course. A list of available examples using Flower within a " -"blockchain environment is available here:" +"So far, we've implemented our client by subclassing " +"``flwr.client.NumPyClient``. The three methods we implemented are " +"``get_parameters``, ``fit``, and ``evaluate``." msgstr "" -#: ../../source/ref-faq.rst:28 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:299 msgid "" -"`Flower meets Nevermined GitHub Repository `_." +"Then, we define the function ``numpyclient_fn`` that is used by Flower to" +" create the ``FlowerNumpyClient`` instances on demand. Finally, we create" +" the ``ClientApp`` and pass the ``numpyclient_fn`` to it." msgstr "" -#: ../../source/ref-faq.rst:29 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:328 msgid "" -"`Flower meets Nevermined YouTube video " -"`_." +"We've seen this before, there's nothing new so far. The only *tiny* " +"difference compared to the previous notebook is naming, we've changed " +"``FlowerClient`` to ``FlowerNumPyClient`` and ``client_fn`` to " +"``numpyclient_fn``. Next, we configure the number of federated learning " +"rounds using ``ServerConfig`` and create the ``ServerApp`` with this " +"config:" msgstr "" -#: ../../source/ref-faq.rst:30 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:355 msgid "" -"`Flower meets KOSMoS `_." +"Finally, we specify the resources for each client and run the simulation " +"to see the output we get:" msgstr "" -#: ../../source/ref-faq.rst:31 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:389 msgid "" -"`Flower meets Talan blog post `_ ." +"This works as expected, ten clients are training for three rounds of " +"federated learning." msgstr "" -#: ../../source/ref-faq.rst:32 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:391 msgid "" -"`Flower meets Talan GitHub Repository " -"`_ ." +"Let's dive a little bit deeper and discuss how Flower executes this " +"simulation. Whenever a client is selected to do some work, " +"``run_simulation`` launches the ``ClientApp`` object which in turn calls " +"the function ``numpyclient_fn`` to create an instance of our " +"``FlowerNumPyClient`` (along with loading the model and the data)." msgstr "" -#: ../../source/ref-telemetry.md:1 -msgid "Telemetry" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:393 +msgid "" +"But here's the perhaps surprising part: Flower doesn't actually use the " +"``FlowerNumPyClient`` object directly. Instead, it wraps the object to " +"makes it look like a subclass of ``flwr.client.Client``, not " +"``flwr.client.NumPyClient``. In fact, the Flower core framework doesn't " +"know how to handle ``NumPyClient``'s, it only knows how to handle " +"``Client``'s. ``NumPyClient`` is just a convenience abstraction built on " +"top of ``Client``." msgstr "" -#: ../../source/ref-telemetry.md:3 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:395 msgid "" -"The Flower open-source project collects **anonymous** usage metrics to " -"make well-informed decisions to improve Flower. Doing this enables the " -"Flower team to understand how Flower is used and what challenges users " -"might face." +"Instead of building on top of ``NumPyClient``, we can directly build on " +"top of ``Client``." msgstr "" -#: ../../source/ref-telemetry.md:5 -msgid "" -"**Flower is a friendly framework for collaborative AI and data science.**" -" Staying true to this statement, Flower makes it easy to disable " -"telemetry for users that do not want to share anonymous usage metrics." +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:407 +msgid "Step 2: Moving from ``NumPyClient`` to ``Client``" msgstr "" -#: ../../source/ref-telemetry.md:7 -msgid "Principles" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:409 +msgid "" +"Let's try to do the same thing using ``Client`` instead of " +"``NumPyClient``." msgstr "" -#: ../../source/ref-telemetry.md:9 -msgid "We follow strong principles guarding anonymous usage metrics collection:" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:519 +msgid "" +"Before we discuss the code in more detail, let's try to run it! Gotta " +"make sure our new ``Client``-based client works, right?" msgstr "" -#: ../../source/ref-telemetry.md:11 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:545 msgid "" -"**Optional:** You will always be able to disable telemetry; read on to " -"learn “[How to opt-out](#how-to-opt-out)”." +"That's it, we're now using ``Client``. It probably looks similar to what " +"we've done with ``NumPyClient``. So what's the difference?" msgstr "" -#: ../../source/ref-telemetry.md:12 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:547 msgid "" -"**Anonymous:** The reported usage metrics are anonymous and do not " -"contain any personally identifiable information (PII). See “[Collected " -"metrics](#collected-metrics)” to understand what metrics are being " -"reported." +"First of all, it's more code. But why? The difference comes from the fact" +" that ``Client`` expects us to take care of parameter serialization and " +"deserialization. For Flower to be able to send parameters over the " +"network, it eventually needs to turn these parameters into ``bytes``. " +"Turning parameters (e.g., NumPy ``ndarray``'s) into raw bytes is called " +"serialization. Turning raw bytes into something more useful (like NumPy " +"``ndarray``'s) is called deserialization. Flower needs to do both: it " +"needs to serialize parameters on the server-side and send them to the " +"client, the client needs to deserialize them to use them for local " +"training, and then serialize the updated parameters again to send them " +"back to the server, which (finally!) deserializes them again in order to " +"aggregate them with the updates received from other clients." msgstr "" -#: ../../source/ref-telemetry.md:13 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:550 msgid "" -"**Transparent:** You can easily inspect what anonymous metrics are being " -"reported; see the section “[How to inspect what is being reported](#how-" -"to-inspect-what-is-being-reported)”" +"The only *real* difference between Client and NumPyClient is that " +"NumPyClient takes care of serialization and deserialization for you. It " +"can do so because it expects you to return parameters as NumPy ndarray's," +" and it knows how to handle these. This makes working with machine " +"learning libraries that have good NumPy support (most of them) a breeze." msgstr "" -#: ../../source/ref-telemetry.md:14 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:552 msgid "" -"**Open for feedback:** You can always reach out to us if you have " -"feedback; see the section “[How to contact us](#how-to-contact-us)” for " -"details." +"In terms of API, there's one major difference: all methods in Client take" +" exactly one argument (e.g., ``FitIns`` in ``Client.fit``) and return " +"exactly one value (e.g., ``FitRes`` in ``Client.fit``). The methods in " +"``NumPyClient`` on the other hand have multiple arguments (e.g., " +"``parameters`` and ``config`` in ``NumPyClient.fit``) and multiple return" +" values (e.g., ``parameters``, ``num_example``, and ``metrics`` in " +"``NumPyClient.fit``) if there are multiple things to handle. These " +"``*Ins`` and ``*Res`` objects in ``Client`` wrap all the individual " +"values you're used to from ``NumPyClient``." msgstr "" -#: ../../source/ref-telemetry.md:16 -msgid "How to opt-out" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:565 +msgid "Step 3: Custom serialization" msgstr "" -#: ../../source/ref-telemetry.md:18 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:567 msgid "" -"When Flower starts, it will check for an environment variable called " -"`FLWR_TELEMETRY_ENABLED`. Telemetry can easily be disabled by setting " -"`FLWR_TELEMETRY_ENABLED=0`. Assuming you are starting a Flower server or " -"client, simply do so by prepending your command as in:" +"Here we will explore how to implement custom serialization with a simple " +"example." msgstr "" -#: ../../source/ref-telemetry.md:24 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:569 msgid "" -"Alternatively, you can export `FLWR_TELEMETRY_ENABLED=0` in, for example," -" `.bashrc` (or whatever configuration file applies to your environment) " -"to disable Flower telemetry permanently." +"But first what is serialization? Serialization is just the process of " +"converting an object into raw bytes, and equally as important, " +"deserialization is the process of converting raw bytes back into an " +"object. This is very useful for network communication. Indeed, without " +"serialization, you could not just a Python object through the internet." msgstr "" -#: ../../source/ref-telemetry.md:26 -msgid "Collected metrics" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:571 +msgid "" +"Federated Learning relies heavily on internet communication for training " +"by sending Python objects back and forth between the clients and the " +"server. This means that serialization is an essential part of Federated " +"Learning." msgstr "" -#: ../../source/ref-telemetry.md:28 -msgid "Flower telemetry collects the following metrics:" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:573 +msgid "" +"In the following section, we will write a basic example where instead of " +"sending a serialized version of our ``ndarray``\\ s containing our " +"parameters, we will first convert the ``ndarray`` into sparse matrices, " +"before sending them. This technique can be used to save bandwidth, as in " +"certain cases where the weights of a model are sparse (containing many 0 " +"entries), converting them to a sparse matrix can greatly improve their " +"bytesize." msgstr "" -#: ../../source/ref-telemetry.md:30 -msgid "" -"**Flower version.** Understand which versions of Flower are currently " -"being used. This helps us to decide whether we should invest effort into " -"releasing a patch version for an older version of Flower or instead use " -"the bandwidth to build new features." +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:576 +msgid "Our custom serialization/deserialization functions" msgstr "" -#: ../../source/ref-telemetry.md:32 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:578 msgid "" -"**Operating system.** Enables us to answer questions such as: *Should we " -"create more guides for Linux, macOS, or Windows?*" +"This is where the real serialization/deserialization will happen, " +"especially in ``ndarray_to_sparse_bytes`` for serialization and " +"``sparse_bytes_to_ndarray`` for deserialization." msgstr "" -#: ../../source/ref-telemetry.md:34 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:580 msgid "" -"**Python version.** Knowing the Python version helps us, for example, to " -"decide whether we should invest effort into supporting old versions of " -"Python or stop supporting them and start taking advantage of new Python " -"features." +"Note that we imported the ``scipy.sparse`` library in order to convert " +"our arrays." msgstr "" -#: ../../source/ref-telemetry.md:36 -msgid "" -"**Hardware properties.** Understanding the hardware environment that " -"Flower is being used in helps to decide whether we should, for example, " -"put more effort into supporting low-resource environments." +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:668 +msgid "Client-side" msgstr "" -#: ../../source/ref-telemetry.md:38 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:670 msgid "" -"**Execution mode.** Knowing what execution mode Flower starts in enables " -"us to understand how heavily certain features are being used and better " -"prioritize based on that." +"To be able to serialize our ``ndarray``\\ s into sparse parameters, we " +"will just have to call our custom functions in our " +"``flwr.client.Client``." msgstr "" -#: ../../source/ref-telemetry.md:40 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:672 msgid "" -"**Cluster.** Flower telemetry assigns a random in-memory cluster ID each " -"time a Flower workload starts. This allows us to understand which device " -"types not only start Flower workloads but also successfully complete " -"them." +"Indeed, in ``get_parameters`` we need to serialize the parameters we got " +"from our network using our custom ``ndarrays_to_sparse_parameters`` " +"defined above." msgstr "" -#: ../../source/ref-telemetry.md:42 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:674 msgid "" -"**Source.** Flower telemetry tries to store a random source ID in " -"`~/.flwr/source` the first time a telemetry event is generated. The " -"source ID is important to identify whether an issue is recurring or " -"whether an issue is triggered by multiple clusters running concurrently " -"(which often happens in simulation). For example, if a device runs " -"multiple workloads at the same time, and this results in an issue, then, " -"in order to reproduce the issue, multiple workloads must be started at " -"the same time." +"In ``fit``, we first need to deserialize the parameters coming from the " +"server using our custom ``sparse_parameters_to_ndarrays`` and then we " +"need to serialize our local results with " +"``ndarrays_to_sparse_parameters``." msgstr "" -#: ../../source/ref-telemetry.md:44 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:676 msgid "" -"You may delete the source ID at any time. If you wish for all events " -"logged under a specific source ID to be deleted, you can send a deletion " -"request mentioning the source ID to `telemetry@flower.ai`. All events " -"related to that source ID will then be permanently deleted." +"In ``evaluate``, we will only need to deserialize the global parameters " +"with our custom function." msgstr "" -#: ../../source/ref-telemetry.md:46 -msgid "" -"We will not collect any personally identifiable information. If you think" -" any of the metrics collected could be misused in any way, please [get in" -" touch with us](#how-to-contact-us). We will update this page to reflect " -"any changes to the metrics collected and publish changes in the " -"changelog." +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:781 +msgid "Server-side" msgstr "" -#: ../../source/ref-telemetry.md:48 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:783 msgid "" -"If you think other metrics would be helpful for us to better guide our " -"decisions, please let us know! We will carefully review them; if we are " -"confident that they do not compromise user privacy, we may add them." +"For this example, we will just use ``FedAvg`` as a strategy. To change " +"the serialization and deserialization here, we only need to reimplement " +"the ``evaluate`` and ``aggregate_fit`` functions of ``FedAvg``. The other" +" functions of the strategy will be inherited from the super class " +"``FedAvg``." msgstr "" -#: ../../source/ref-telemetry.md:50 -msgid "How to inspect what is being reported" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:785 +msgid "As you can see only one line as change in ``evaluate``:" msgstr "" -#: ../../source/ref-telemetry.md:52 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:791 msgid "" -"We wanted to make it very easy for you to inspect what anonymous usage " -"metrics are reported. You can view all the reported telemetry information" -" by setting the environment variable `FLWR_TELEMETRY_LOGGING=1`. Logging " -"is disabled by default. You may use logging independently from " -"`FLWR_TELEMETRY_ENABLED` so that you can inspect the telemetry feature " -"without sending any metrics." +"And for ``aggregate_fit``, we will first deserialize every result we " +"received:" msgstr "" -#: ../../source/ref-telemetry.md:58 -msgid "" -"The inspect Flower telemetry without sending any anonymous usage metrics," -" use both environment variables:" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:800 +msgid "And then serialize the aggregated result:" msgstr "" -#: ../../source/ref-telemetry.md:64 -msgid "How to contact us" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:959 +msgid "We can now run our custom serialization example!" msgstr "" -#: ../../source/ref-telemetry.md:66 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1000 msgid "" -"We want to hear from you. If you have any feedback or ideas on how to " -"improve the way we handle anonymous usage metrics, reach out to us via " -"[Slack](https://flower.ai/join-slack/) (channel `#telemetry`) or email " -"(`telemetry@flower.ai`)." +"In this part of the tutorial, we've seen how we can build clients by " +"subclassing either ``NumPyClient`` or ``Client``. ``NumPyClient`` is a " +"convenience abstraction that makes it easier to work with machine " +"learning libraries that have good NumPy interoperability. ``Client`` is a" +" more flexible abstraction that allows us to do things that are not " +"possible in ``NumPyClient``. In order to do so, it requires us to handle " +"parameter serialization and deserialization ourselves." msgstr "" -#: ../../source/tutorial-quickstart-android.rst:-1 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1018 msgid "" -"Read this Federated Learning quickstart tutorial for creating an Android " -"app using Flower." +"This is the final part of the Flower tutorial (for now!), " +"congratulations! You're now well equipped to understand the rest of the " +"documentation. There are many topics we didn't cover in the tutorial, we " +"recommend the following resources:" msgstr "" -#: ../../source/tutorial-quickstart-android.rst:5 -msgid "Quickstart Android" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1020 +msgid "`Read Flower Docs `__" msgstr "" -#: ../../source/tutorial-quickstart-android.rst:10 -msgid "" -"Let's build a federated learning system using TFLite and Flower on " -"Android!" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1021 +msgid "`Check out Flower Code Examples `__" msgstr "" -#: ../../source/tutorial-quickstart-android.rst:12 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1022 msgid "" -"Please refer to the `full code example " -"`_ to learn " -"more." +"`Use Flower Baselines for your research " +"`__" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:-1 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1023 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with FastAI to train a vision model on CIFAR-10." +"`Watch Flower AI Summit 2024 videos `__" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:5 -msgid "Quickstart fastai" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:9 +msgid "Get started with Flower" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:10 -msgid "Let's build a federated learning system using fastai and Flower!" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:11 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:11 +msgid "Welcome to the Flower federated learning tutorial!" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:12 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:13 msgid "" -"Please refer to the `full code example " -"`_ " -"to learn more." +"In this notebook, we'll build a federated learning system using the " +"Flower framework, Flower Datasets and PyTorch. In part 1, we use PyTorch " +"for the model training pipeline and data loading. In part 2, we federate " +"the PyTorch project using Flower." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:-1 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:18 +#, fuzzy +msgid "Let's get started! 🌼" +msgstr "시작하기" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:32 msgid "" -"Check out this Federating Learning quickstart tutorial for using Flower " -"with HuggingFace Transformers in order to fine-tune an LLM." +"Before we begin with any actual code, let's make sure that we have " +"everything we need." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:5 -msgid "Quickstart 🤗 Transformers" -msgstr "" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:44 +#, fuzzy +msgid "Install dependencies" +msgstr "사전 릴리즈 설치" -#: ../../source/tutorial-quickstart-huggingface.rst:10 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:46 msgid "" -"Let's build a federated learning system using Hugging Face Transformers " -"and Flower!" +"Next, we install the necessary packages for PyTorch (``torch`` and " +"``torchvision``), Flower Datasets (``flwr-datasets``) and Flower " +"(``flwr``):" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:12 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:109 msgid "" -"We will leverage Hugging Face to federate the training of language models" -" over multiple clients using Flower. More specifically, we will fine-tune" -" a pre-trained Transformer model (distilBERT) for sequence classification" -" over a dataset of IMDB ratings. The end goal is to detect if a movie " -"rating is positive or negative." +"It is possible to switch to a runtime that has GPU acceleration enabled " +"(on Google Colab: ``Runtime > Change runtime type > Hardware accelerator:" +" GPU > Save``). Note, however, that Google Colab is not always able to " +"offer GPU acceleration. If you see an error related to GPU availability " +"in one of the following sections, consider switching back to CPU-based " +"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " +"has GPU acceleration enabled, you should see the output ``Training on " +"cuda``, otherwise it'll say ``Training on cpu``." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:18 -msgid "Dependencies" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:122 +msgid "Load the data" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:20 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:124 msgid "" -"To follow along this tutorial you will need to install the following " -"packages: :code:`datasets`, :code:`evaluate`, :code:`flwr`, " -":code:`torch`, and :code:`transformers`. This can be done using " -":code:`pip`:" -msgstr "" - -#: ../../source/tutorial-quickstart-huggingface.rst:30 -msgid "Standard Hugging Face workflow" +"Federated learning can be applied to many different types of tasks across" +" different domains. In this tutorial, we introduce federated learning by " +"training a simple convolutional neural network (CNN) on the popular " +"CIFAR-10 dataset. CIFAR-10 can be used to train image classifiers that " +"distinguish between images from ten different classes: 'airplane', " +"'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', and " +"'truck'." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:33 -msgid "Handling the data" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:135 +msgid "" +"We simulate having multiple datasets from multiple organizations (also " +"called the \"cross-silo\" setting in federated learning) by splitting the" +" original CIFAR-10 dataset into multiple partitions. Each partition will " +"represent the data from a single organization. We're doing this purely " +"for experimentation purposes, in the real world there's no need for data " +"splitting because each organization already has their own data (the data " +"is naturally partitioned)." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:35 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:137 msgid "" -"To fetch the IMDB dataset, we will use Hugging Face's :code:`datasets` " -"library. We then need to tokenize the data and create :code:`PyTorch` " -"dataloaders, this is all done in the :code:`load_data` function:" +"Each organization will act as a client in the federated learning system. " +"Having ten organizations participate in a federation means having ten " +"clients connected to the federated learning server." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:81 -msgid "Training and testing the model" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:148 +msgid "" +"We use the Flower Datasets library (``flwr-datasets``) to partition " +"CIFAR-10 into ten partitions using ``FederatedDataset``. We will create a" +" small training and test set for each of the ten organizations and wrap " +"each of these into a PyTorch ``DataLoader``:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:83 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:196 msgid "" -"Once we have a way of creating our trainloader and testloader, we can " -"take care of the training and testing. This is very similar to any " -":code:`PyTorch` training or testing loop:" +"We now have a function that can return a training set and validation set " +"(``trainloader`` and ``valloader``) representing one dataset from one of " +"ten different organizations. Each ``trainloader``/``valloader`` pair " +"contains 4000 training examples and 1000 validation examples. There's " +"also a single ``testloader`` (we did not split the test set). Again, this" +" is only necessary for building research or educational systems, actual " +"federated learning systems have their data naturally distributed across " +"multiple partitions." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:121 -msgid "Creating the model itself" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:199 +msgid "" +"Let's take a look at the first batch of images and labels in the first " +"training set (i.e., ``trainloader`` from ``partition_id=0``) before we " +"move on:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:123 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:241 msgid "" -"To create the model itself, we will just load the pre-trained distillBERT" -" model using Hugging Face’s :code:`AutoModelForSequenceClassification` :" +"The output above shows a random batch of images from the ``trainloader`` " +"from the first of ten partitions. It also prints the labels associated " +"with each image (i.e., one of the ten possible labels we've seen above). " +"If you run the cell again, you should see another batch of images." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:136 -msgid "Federating the example" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:253 +msgid "Step 1: Centralized Training with PyTorch" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:139 -msgid "Creating the IMDBClient" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:264 +msgid "" +"Next, we're going to use PyTorch to define a simple convolutional neural " +"network. This introduction assumes basic familiarity with PyTorch, so it " +"doesn't cover the PyTorch-related aspects in full detail. If you want to " +"dive deeper into PyTorch, we recommend `DEEP LEARNING WITH PYTORCH: A 60 " +"MINUTE BLITZ " +"`__." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:141 -msgid "" -"To federate our example to multiple clients, we first need to write our " -"Flower client class (inheriting from :code:`flwr.client.NumPyClient`). " -"This is very easy, as our model is a standard :code:`PyTorch` model:" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:276 +msgid "Define the model" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:169 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:278 msgid "" -"The :code:`get_parameters` function lets the server get the client's " -"parameters. Inversely, the :code:`set_parameters` function allows the " -"server to send its parameters to the client. Finally, the :code:`fit` " -"function trains the model locally for the client, and the " -":code:`evaluate` function tests the model locally and returns the " -"relevant metrics." +"We use the simple CNN described in the `PyTorch tutorial " +"`__:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:175 -msgid "Starting the server" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:315 +msgid "Let's continue with the usual training and test functions:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:177 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:375 +#, fuzzy +msgid "Train the model" +msgstr "릴리즈 동안에" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:377 msgid "" -"Now that we have a way to instantiate clients, we need to create our " -"server in order to aggregate the results. Using Flower, this can be done " -"very easily by first choosing a strategy (here, we are using " -":code:`FedAvg`, which will define the global weights as the average of " -"all the clients' weights at each round) and then using the " -":code:`flwr.server.start_server` function:" +"We now have all the basic building blocks we need: a dataset, a model, a " +"training function, and a test function. Let's put them together to train " +"the model on the dataset of one of our organizations " +"(``partition_id=0``). This simulates the reality of most machine learning" +" projects today: each organization has their own data and trains models " +"only on this internal data:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:205 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:406 msgid "" -"The :code:`weighted_average` function is there to provide a way to " -"aggregate the metrics distributed amongst the clients (basically this " -"allows us to display a nice average accuracy and loss for every round)." +"Training the simple CNN on our CIFAR-10 split for 5 epochs should result " +"in a test set accuracy of about 41%, which is not good, but at the same " +"time, it doesn't really matter for the purposes of this tutorial. The " +"intent was just to show a simple centralized training pipeline that sets " +"the stage for what comes next - federated learning!" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:209 -msgid "Putting everything together" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:418 +msgid "Step 2: Federated Learning with Flower" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:211 -msgid "We can now start client instances using:" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:420 +msgid "" +"Step 1 demonstrated a simple centralized training pipeline. All data was " +"in one place (i.e., a single ``trainloader`` and a single ``valloader``)." +" Next, we'll simulate a situation where we have multiple datasets in " +"multiple organizations and where we train a model over these " +"organizations using federated learning." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:221 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:432 +#, fuzzy +msgid "Update model parameters" +msgstr "모델 매개변수." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:434 msgid "" -"And they will be able to connect to the server and start the federated " -"training." +"In federated learning, the server sends global model parameters to the " +"client, and the client updates the local model with parameters received " +"from the server. It then trains the model on the local data (which " +"changes the model parameters locally) and sends the updated/changed model" +" parameters back to the server (or, alternatively, it sends just the " +"gradients back to the server, not the full model parameters)." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:223 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:436 msgid "" -"If you want to check out everything put together, you should check out " -"the `full code example `_ ." +"We need two helper functions to update the local model with parameters " +"received from the server and to get the updated model parameters from the" +" local model: ``set_parameters`` and ``get_parameters``. The following " +"two functions do just that for the PyTorch model above." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:226 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:438 msgid "" -"Of course, this is a very basic example, and a lot can be added or " -"modified, it was just to showcase how simply we could federate a Hugging " -"Face workflow using Flower." +"The details of how this works are not really important here (feel free to" +" consult the PyTorch documentation if you want to learn more). In " +"essence, we use ``state_dict`` to access PyTorch model parameter tensors." +" The parameter tensors are then converted to/from a list of NumPy " +"ndarray's (which the Flower ``NumPyClient`` knows how to " +"serialize/deserialize):" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:229 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:466 +#, fuzzy +msgid "Define the Flower ClientApp" +msgstr "Flower 클라이언트 앱을 실행합니다." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:468 msgid "" -"Note that in this example we used :code:`PyTorch`, but we could have very" -" well used :code:`TensorFlow`." +"With that out of the way, let's move on to the interesting part. " +"Federated learning systems consist of a server and multiple clients. In " +"Flower, we create a ``ServerApp`` and a ``ClientApp`` to run the server-" +"side and client-side code, respectively." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:-1 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:470 msgid "" -"Read this Federated Learning quickstart tutorial for creating an iOS app " -"using Flower to train a neural network on MNIST." +"The first step toward creating a ``ClientApp`` is to implement a " +"subclasses of ``flwr.client.Client`` or ``flwr.client.NumPyClient``. We " +"use ``NumPyClient`` in this tutorial because it is easier to implement " +"and requires us to write less boilerplate. To implement ``NumPyClient``, " +"we create a subclass that implements the three methods " +"``get_parameters``, ``fit``, and ``evaluate``:" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:5 -msgid "Quickstart iOS" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:472 +msgid "``get_parameters``: Return the current local model parameters" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:10 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:473 msgid "" -"In this tutorial we will learn how to train a Neural Network on MNIST " -"using Flower and CoreML on iOS devices." +"``fit``: Receive model parameters from the server, train the model on the" +" local data, and return the updated model parameters to the server" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:12 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:474 msgid "" -"First of all, for running the Flower Python server, it is recommended to " -"create a virtual environment and run everything within a :doc:`virtualenv" -" `. For the Flower client " -"implementation in iOS, it is recommended to use Xcode as our IDE." +"``evaluate``: Receive model parameters from the server, evaluate the " +"model on the local data, and return the evaluation result to the server" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:15 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:476 msgid "" -"Our example consists of one Python *server* and two iPhone *clients* that" -" all have the same model." +"We mentioned that our clients will use the previously defined PyTorch " +"components for model training and evaluation. Let's see a simple Flower " +"client implementation that brings everything together:" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:17 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:513 msgid "" -"*Clients* are responsible for generating individual weight updates for " -"the model based on their local datasets. These updates are then sent to " -"the *server* which will aggregate them to produce a better model. " -"Finally, the *server* sends this improved version of the model back to " -"each *client*. A complete cycle of weight updates is called a *round*." +"Our class ``FlowerClient`` defines how local training/evaluation will be " +"performed and allows Flower to call the local training/evaluation through" +" ``fit`` and ``evaluate``. Each instance of ``FlowerClient`` represents a" +" *single client* in our federated learning system. Federated learning " +"systems have multiple clients (otherwise, there's not much to federate), " +"so each client will be represented by its own instance of " +"``FlowerClient``. If we have, for example, three clients in our workload," +" then we'd have three instances of ``FlowerClient`` (one on each of the " +"machines we'd start the client on). Flower calls ``FlowerClient.fit`` on " +"the respective instance when the server selects a particular client for " +"training (and ``FlowerClient.evaluate`` for evaluation)." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:21 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:516 msgid "" -"Now that we have a rough idea of what is going on, let's get started to " -"setup our Flower server environment. We first need to install Flower. You" -" can do this by using pip:" +"In this notebook, we want to simulate a federated learning system with 10" +" clients *on a single machine*. This means that the server and all 10 " +"clients will live on a single machine and share resources such as CPU, " +"GPU, and memory. Having 10 clients would mean having 10 instances of " +"``FlowerClient`` in memory. Doing this on a single machine can quickly " +"exhaust the available memory resources, even if only a subset of these " +"clients participates in a single round of federated learning." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:27 -msgid "Or Poetry:" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:518 +msgid "" +"In addition to the regular capabilities where server and clients run on " +"multiple machines, Flower, therefore, provides special simulation " +"capabilities that create ``FlowerClient`` instances only when they are " +"actually necessary for training or evaluation. To enable the Flower " +"framework to create clients when necessary, we need to implement a " +"function that creates a ``FlowerClient`` instance on demand. We typically" +" call this function ``client_fn``. Flower calls ``client_fn`` whenever it" +" needs an instance of one particular client to call ``fit`` or " +"``evaluate`` (those instances are usually discarded after use, so they " +"should not keep any local state). In federated learning experiments using" +" Flower, clients are identified by a partition ID, or ``partition-id``. " +"This ``partition-id`` is used to load different local data partitions for" +" different clients, as can be seen below. The value of ``partition-id`` " +"is retrieved from the ``node_config`` dictionary in the ``Context`` " +"object, which holds the information that persists throughout each " +"training round." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:522 +msgid "" +"With this, we have the class ``FlowerClient`` which defines client-side " +"training/evaluation and ``client_fn`` which allows Flower to create " +"``FlowerClient`` instances whenever it needs to call ``fit`` or " +"``evaluate`` on one particular client. Last, but definitely not least, we" +" create an instance of ``ClientApp`` and pass it the ``client_fn``. " +"``ClientApp`` is the entrypoint that a running Flower client uses to call" +" your code (as defined in, for example, ``FlowerClient.fit``)." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:34 -#: ../../source/tutorial-quickstart-pytorch.rst:37 -#: ../../source/tutorial-quickstart-scikitlearn.rst:40 -#: ../../source/tutorial-quickstart-tensorflow.rst:29 -#: ../../source/tutorial-quickstart-xgboost.rst:55 -msgid "Flower Client" -msgstr "" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:563 +#, fuzzy +msgid "Define the Flower ServerApp" +msgstr "Flower 서버앱" -#: ../../source/tutorial-quickstart-ios.rst:36 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:565 msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training using CoreML as our local training pipeline and " -"MNIST as our dataset. For simplicity reasons we will use the complete " -"Flower client with CoreML, that has been implemented and stored inside " -"the Swift SDK. The client implementation can be seen below:" +"On the server side, we need to configure a strategy which encapsulates " +"the federated learning approach/algorithm, for example, *Federated " +"Averaging* (FedAvg). Flower has a number of built-in strategies, but we " +"can also use our own strategy implementations to customize nearly all " +"aspects of the federated learning approach. For this example, we use the " +"built-in ``FedAvg`` implementation and customize it using a few basic " +"parameters:" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:72 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:592 msgid "" -"Let's create a new application project in Xcode and add :code:`flwr` as a" -" dependency in your project. For our application, we will store the logic" -" of our app in :code:`FLiOSModel.swift` and the UI elements in " -":code:`ContentView.swift`. We will focus more on :code:`FLiOSModel.swift`" -" in this quickstart. Please refer to the `full code example " -"`_ to learn more " -"about the app." +"Similar to ``ClientApp``, we create a ``ServerApp`` using a utility " +"function ``server_fn``. In ``server_fn``, we pass an instance of " +"``ServerConfig`` for defining the number of federated learning rounds " +"(``num_rounds``) and we also pass the previously created ``strategy``. " +"The ``server_fn`` returns a ``ServerAppComponents`` object containing the" +" settings that define the ``ServerApp`` behaviour. ``ServerApp`` is the " +"entrypoint that Flower uses to call all your server-side code (for " +"example, the strategy)." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:75 -msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:629 +msgid "Run the training" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:83 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:631 msgid "" -"Then add the mlmodel to the project simply by drag-and-drop, the mlmodel " -"will be bundled inside the application during deployment to your iOS " -"device. We need to pass the url to access mlmodel and run CoreML machine " -"learning processes, it can be retrieved by calling the function " -":code:`Bundle.main.url`. For the MNIST dataset, we need to preprocess it " -"into :code:`MLBatchProvider` object. The preprocessing is done inside " -":code:`DataLoader.swift`." +"In simulation, we often want to control the amount of resources each " +"client can use. In the next cell, we specify a ``backend_config`` " +"dictionary with the ``client_resources`` key (required) for defining the " +"amount of CPU and GPU resources each client can access." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:99 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:659 msgid "" -"Since CoreML does not allow the model parameters to be seen before " -"training, and accessing the model parameters during or after the training" -" can only be done by specifying the layer name, we need to know this " -"information beforehand, through looking at the model specification, which" -" are written as proto files. The implementation can be seen in " -":code:`MLModelInspect`." +"The last step is the actual call to ``run_simulation`` which - you " +"guessed it - runs the simulation. ``run_simulation`` accepts a number of " +"arguments: - ``server_app`` and ``client_app``: the previously created " +"``ServerApp`` and ``ClientApp`` objects, respectively - " +"``num_supernodes``: the number of ``SuperNodes`` to simulate which equals" +" the number of clients for Flower simulation - ``backend_config``: the " +"resource allocation used in this simulation" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:102 -msgid "" -"After we have all of the necessary information, let's create our Flower " -"client." +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:686 +msgid "Behind the scenes" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:117 -msgid "" -"Then start the Flower gRPC client and start communicating to the server " -"by passing our Flower client to the function :code:`startFlwrGRPC`." +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:688 +msgid "So how does this work? How does Flower execute this simulation?" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:124 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:690 +#, python-format msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -"call the provided :code:`MLFlwrClient` and call :code:`startFlwrGRPC()`. " -"The attribute :code:`hostname` and :code:`port` tells the client which " -"server to connect to. This can be done by entering the hostname and port " -"in the application before clicking the start button to start the " -"federated learning process." -msgstr "" - -#: ../../source/tutorial-quickstart-ios.rst:129 -#: ../../source/tutorial-quickstart-pytorch.rst:203 -#: ../../source/tutorial-quickstart-scikitlearn.rst:167 -#: ../../source/tutorial-quickstart-tensorflow.rst:98 -#: ../../source/tutorial-quickstart-xgboost.rst:309 -msgid "Flower Server" +"When we call ``run_simulation``, we tell Flower that there are 10 clients" +" (``num_supernodes=10``, where 1 ``SuperNode`` launches 1 ``ClientApp``)." +" Flower then goes ahead an asks the ``ServerApp`` to issue an " +"instructions to those nodes using the ``FedAvg`` strategy. ``FedAvg`` " +"knows that it should select 100% of the available clients " +"(``fraction_fit=1.0``), so it goes ahead and selects 10 random clients " +"(i.e., 100% of 10)." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:131 -#: ../../source/tutorial-quickstart-pytorch.rst:205 -#: ../../source/tutorial-quickstart-tensorflow.rst:100 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:692 msgid "" -"For simple workloads we can start a Flower server and leave all the " -"configuration possibilities at their default values. In a file named " -":code:`server.py`, import Flower and start the server:" +"Flower then asks the selected 10 clients to train the model. Each of the " +"10 ``ClientApp`` instances receives a message, which causes it to call " +"``client_fn`` to create an instance of ``FlowerClient``. It then calls " +"``.fit()`` on each the ``FlowerClient`` instances and returns the " +"resulting model parameter updates to the ``ServerApp``. When the " +"``ServerApp`` receives the model parameter updates from the clients, it " +"hands those updates over to the strategy (*FedAvg*) for aggregation. The " +"strategy aggregates those updates and returns the new global model, which" +" then gets used in the next round of federated learning." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:142 -#: ../../source/tutorial-quickstart-pytorch.rst:216 -#: ../../source/tutorial-quickstart-scikitlearn.rst:230 -#: ../../source/tutorial-quickstart-tensorflow.rst:112 -msgid "Train the model, federated!" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:705 +msgid "Where's the accuracy?" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:144 -#: ../../source/tutorial-quickstart-pytorch.rst:218 -#: ../../source/tutorial-quickstart-tensorflow.rst:114 -#: ../../source/tutorial-quickstart-xgboost.rst:525 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:707 msgid "" -"With both client and server ready, we can now run everything and see " -"federated learning in action. FL systems usually have a server and " -"multiple clients. We therefore have to start the server first:" +"You may have noticed that all metrics except for ``losses_distributed`` " +"are empty. Where did the ``{\"accuracy\": float(accuracy)}`` go?" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:152 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:709 msgid "" -"Once the server is running we can start the clients in different " -"terminals. Build and run the client through your Xcode, one through Xcode" -" Simulator and the other by deploying it to your iPhone. To see more " -"about how to deploy your app to iPhone or Simulator visit `here " -"`_." +"Flower can automatically aggregate losses returned by individual clients," +" but it cannot do the same for metrics in the generic metrics dictionary " +"(the one with the ``accuracy`` key). Metrics dictionaries can contain " +"very different kinds of metrics and even key/value pairs that are not " +"metrics at all, so the framework does not (and can not) know how to " +"handle these automatically." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:156 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:711 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system in your ios device. The full `source code " -"`_ for this " -"example can be found in :code:`examples/ios`." +"As users, we need to tell the framework how to handle/aggregate these " +"custom metrics, and we do so by passing metric aggregation functions to " +"the strategy. The strategy will then call these functions whenever it " +"receives fit or evaluate metrics from clients. The two possible functions" +" are ``fit_metrics_aggregation_fn`` and " +"``evaluate_metrics_aggregation_fn``." msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:-1 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:713 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with Jax to train a linear regression model on a scikit-learn dataset." -msgstr "" - -#: ../../source/tutorial-quickstart-jax.rst:5 -msgid "Quickstart JAX" +"Let's create a simple weighted averaging function to aggregate the " +"``accuracy`` metric we return from ``evaluate``:" msgstr "" -#: ../../source/tutorial-quickstart-pandas.rst:-1 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:781 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with Pandas to perform Federated Analytics." -msgstr "" - -#: ../../source/tutorial-quickstart-pandas.rst:5 -msgid "Quickstart Pandas" -msgstr "" - -#: ../../source/tutorial-quickstart-pandas.rst:10 -msgid "Let's build a federated analytics system using Pandas and Flower!" +"We now have a full system that performs federated training and federated " +"evaluation. It uses the ``weighted_average`` function to aggregate custom" +" evaluation metrics and calculates a single ``accuracy`` metric across " +"all clients on the server side." msgstr "" -#: ../../source/tutorial-quickstart-pandas.rst:12 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:783 msgid "" -"Please refer to the `full code example " -"`_ " -"to learn more." +"The other two categories of metrics (``losses_centralized`` and " +"``metrics_centralized``) are still empty because they only apply when " +"centralized evaluation is being used. Part two of the Flower tutorial " +"will cover centralized evaluation." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:-1 -msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with PyTorch to train a CNN model on MNIST." +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:795 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:351 +msgid "Final remarks" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:13 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:797 msgid "" -"In this tutorial we will learn how to train a Convolutional Neural " -"Network on CIFAR10 using Flower and PyTorch." +"Congratulations, you just trained a convolutional neural network, " +"federated over 10 clients! With that, you understand the basics of " +"federated learning with Flower. The same approach you've seen can be used" +" with other machine learning frameworks (not just PyTorch) and tasks (not" +" just CIFAR-10 images classification), for example NLP with Hugging Face " +"Transformers or speech with SpeechBrain." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:15 -#: ../../source/tutorial-quickstart-xgboost.rst:39 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:799 msgid "" -"First of all, it is recommended to create a virtual environment and run " -"everything within a :doc:`virtualenv `." +"In the next notebook, we're going to cover some more advanced concepts. " +"Want to customize your strategy? Initialize parameters on the server " +"side? Or evaluate the aggregated model on the server side? We'll cover " +"all this and more in the next tutorial." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:17 -#: ../../source/tutorial-quickstart-scikitlearn.rst:14 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:817 msgid "" -"Our example consists of one *server* and two *clients* all having the " -"same model." +"The `Flower Federated Learning Tutorial - Part 2 " +"`__ goes into more depth about strategies and all " +"the advanced things you can build with them." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:19 -msgid "" -"*Clients* are responsible for generating individual weight-updates for " -"the model based on their local datasets. These updates are then sent to " -"the *server* which will aggregate them to produce a better model. " -"Finally, the *server* sends this improved version of the model back to " -"each *client*. A complete cycle of weight updates is called a *round*." +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:9 +msgid "Use a federated learning strategy" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:23 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:11 msgid "" -"Now that we have a rough idea of what is going on, let's get started. We " -"first need to install Flower. You can do this by running :" +"Welcome to the next part of the federated learning tutorial. In previous " +"parts of this tutorial, we introduced federated learning with PyTorch and" +" Flower (`part 1 `__)." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:29 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:13 msgid "" -"Since we want to use PyTorch to solve a computer vision task, let's go " -"ahead and install PyTorch and the **torchvision** library:" +"In this notebook, we'll begin to customize the federated learning system " +"we built in the introductory notebook again, using the Flower framework, " +"Flower Datasets, and PyTorch." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:39 -msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training with two clients and one server. Our training " -"procedure and network architecture are based on PyTorch's `Deep Learning " -"with PyTorch " -"`_." +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:18 +msgid "Let's move beyond FedAvg with Flower strategies! 🌼" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:41 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:121 msgid "" -"In a file called :code:`client.py`, import Flower and PyTorch related " -"packages:" +"Let's now load the CIFAR-10 training and test set, partition them into " +"ten smaller datasets (each split into training and validation set), and " +"wrap everything in their own ``DataLoader``. We introduce a new parameter" +" ``num_partitions`` which allows us to call ``load_datasets`` with " +"different numbers of partitions." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:56 -msgid "In addition, we define the device allocation in PyTorch with:" +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:321 +msgid "Strategy customization" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:62 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:323 msgid "" -"We use PyTorch to load CIFAR10, a popular colored image classification " -"dataset for machine learning. The PyTorch :code:`DataLoader()` downloads " -"the training and test data that are then normalized." +"So far, everything should look familiar if you've worked through the " +"introductory notebook. With that, we're ready to introduce a number of " +"new features." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:78 -msgid "" -"Define the loss and optimizer with PyTorch. The training of the dataset " -"is done by looping over the dataset, measure the corresponding loss and " -"optimize it." +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:335 +msgid "Server-side parameter **initialization**" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:94 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:337 msgid "" -"Define then the validation of the machine learning network. We loop over" -" the test set and measure the loss and accuracy of the test set." +"Flower, by default, initializes the global model by asking one random " +"client for the initial parameters. In many cases, we want more control " +"over parameter initialization though. Flower therefore allows you to " +"directly pass the initial parameters to the Strategy. We create an " +"instance of ``Net()`` and get the paramaters as follows:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:113 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:358 msgid "" -"After defining the training and testing of a PyTorch machine learning " -"model, we use the functions for the Flower clients." +"Next, we create a ``server_fn`` that returns the components needed for " +"the server. Within ``server_fn``, we create a Strategy that uses the " +"initial parameters." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:115 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:393 msgid "" -"The Flower clients will use a simple CNN adapted from 'PyTorch: A 60 " -"Minute Blitz':" +"Passing ``initial_parameters`` to the ``FedAvg`` strategy prevents Flower" +" from asking one of the clients for the initial parameters. In " +"``server_fn``, we pass this new ``strategy`` and a ``ServerConfig`` for " +"defining the number of federated learning rounds (``num_rounds``)." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:142 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:395 msgid "" -"After loading the data set with :code:`load_data()` we define the Flower " -"interface." +"Similar to the ``ClientApp``, we now create the ``ServerApp`` using the " +"``server_fn``:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:144 -#: ../../source/tutorial-quickstart-tensorflow.rst:54 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:416 msgid "" -"The Flower server interacts with clients through an interface called " -":code:`Client`. When the server selects a particular client for training," -" it sends training instructions over the network. The client receives " -"those instructions and calls one of the :code:`Client` methods to run " -"your code (i.e., to train the neural network we defined earlier)." +"Last but not least, we specify the resources for each client and run the " +"simulation." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:150 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:448 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses PyTorch. Implementing :code:`NumPyClient` usually means " -"defining the following methods (:code:`set_parameters` is optional " -"though):" -msgstr "" - -#: ../../source/tutorial-quickstart-pytorch.rst:156 -#: ../../source/tutorial-quickstart-scikitlearn.rst:119 -msgid "return the model weight as a list of NumPy ndarrays" +"If we look closely, we can see that the logs do not show any calls to the" +" ``FlowerClient.get_parameters`` method." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:157 -#: ../../source/tutorial-quickstart-scikitlearn.rst:121 -msgid ":code:`set_parameters` (optional)" +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:460 +msgid "Starting with a customized strategy" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:158 -#: ../../source/tutorial-quickstart-scikitlearn.rst:121 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:462 msgid "" -"update the local model weights with the parameters received from the " -"server" -msgstr "" - -#: ../../source/tutorial-quickstart-pytorch.rst:160 -#: ../../source/tutorial-quickstart-scikitlearn.rst:124 -msgid "set the local model weights" +"We've seen the function ``run_simulation`` before. It accepts a number of" +" arguments, amongst them the ``server_app`` which wraps around the " +"strategy and number of training rounds, ``client_app`` which wraps around" +" the ``client_fn`` used to create ``FlowerClient`` instances, and the " +"number of clients to simulate which equals ``num_supernodes``." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:161 -#: ../../source/tutorial-quickstart-scikitlearn.rst:125 -msgid "train the local model" +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:464 +msgid "" +"The strategy encapsulates the federated learning approach/algorithm, for " +"example, ``FedAvg`` or ``FedAdagrad``. Let's try to use a different " +"strategy this time:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:162 -#: ../../source/tutorial-quickstart-scikitlearn.rst:126 -msgid "receive the updated local model weights" +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:509 +msgid "Server-side parameter **evaluation**" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:164 -#: ../../source/tutorial-quickstart-scikitlearn.rst:128 -msgid "test the local model" +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:511 +msgid "" +"Flower can evaluate the aggregated model on the server-side or on the " +"client-side. Client-side and server-side evaluation are similar in some " +"ways, but different in others." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:166 -msgid "which can be implemented in the following way:" +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:513 +msgid "" +"**Centralized Evaluation** (or *server-side evaluation*) is conceptually " +"simple: it works the same way that evaluation in centralized machine " +"learning does. If there is a server-side dataset that can be used for " +"evaluation purposes, then that's great. We can evaluate the newly " +"aggregated model after each round of training without having to send the " +"model to clients. We're also fortunate in the sense that our entire " +"evaluation dataset is available at all times." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:189 -#: ../../source/tutorial-quickstart-tensorflow.rst:82 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:515 msgid "" -"We can now create an instance of our class :code:`CifarClient` and add " -"one line to actually run this client:" +"**Federated Evaluation** (or *client-side evaluation*) is more complex, " +"but also more powerful: it doesn't require a centralized dataset and " +"allows us to evaluate models over a larger set of data, which often " +"yields more realistic evaluation results. In fact, many scenarios require" +" us to use **Federated Evaluation** if we want to get representative " +"evaluation results at all. But this power comes at a cost: once we start " +"to evaluate on the client side, we should be aware that our evaluation " +"dataset can change over consecutive rounds of learning if those clients " +"are not always available. Moreover, the dataset held by each client can " +"also change over consecutive rounds. This can lead to evaluation results " +"that are not stable, so even if we would not change the model, we'd see " +"our evaluation results fluctuate over consecutive rounds." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:196 -#: ../../source/tutorial-quickstart-tensorflow.rst:90 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:518 msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " -"implement a client of type :code:`NumPyClient` you'll need to first call " -"its :code:`to_client()` method. The string :code:`\"[::]:8080\"` tells " -"the client which server to connect to. In our case we can run the server " -"and the client on the same machine, therefore we use " -":code:`\"[::]:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we point the client at." +"We've seen how federated evaluation works on the client side (i.e., by " +"implementing the ``evaluate`` method in ``FlowerClient``). Now let's see " +"how we can evaluate aggregated model parameters on the server-side:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:226 -#: ../../source/tutorial-quickstart-scikitlearn.rst:239 -#: ../../source/tutorial-quickstart-tensorflow.rst:122 -#: ../../source/tutorial-quickstart-xgboost.rst:533 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:549 msgid "" -"Once the server is running we can start the clients in different " -"terminals. Open a new terminal and start the first client:" +"We create a ``FedAvg`` strategy and pass ``evaluate_fn`` to it. Then, we " +"create a ``ServerApp`` that uses this strategy." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:233 -#: ../../source/tutorial-quickstart-scikitlearn.rst:246 -#: ../../source/tutorial-quickstart-tensorflow.rst:129 -#: ../../source/tutorial-quickstart-xgboost.rst:540 -msgid "Open another terminal and start the second client:" -msgstr "" +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:586 +#, fuzzy +msgid "Finally, we run the simulation." +msgstr "Flower 시뮬레이션." -#: ../../source/tutorial-quickstart-pytorch.rst:239 -#: ../../source/tutorial-quickstart-scikitlearn.rst:252 -#: ../../source/tutorial-quickstart-xgboost.rst:546 -msgid "" -"Each client will have its own dataset. You should now see how the " -"training does in the very first terminal (the one that started the " -"server):" +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:613 +msgid "Sending/receiving arbitrary values to/from clients" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:271 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:615 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this example can be found in :code:`examples" -"/quickstart-pytorch`." +"In some situations, we want to configure client-side execution (training," +" evaluation) from the server-side. One example for that is the server " +"asking the clients to train for a certain number of local epochs. Flower " +"provides a way to send configuration values from the server to the " +"clients using a dictionary. Let's look at an example where the clients " +"receive values from the server through the ``config`` parameter in " +"``fit`` (``config`` is also available in ``evaluate``). The ``fit`` " +"method receives the configuration dictionary through the ``config`` " +"parameter and can then read values from this dictionary. In this example," +" it reads ``server_round`` and ``local_epochs`` and uses those values to " +"improve the logging and configure the number of local training epochs:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:-1 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:674 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with PyTorch Lightning to train an Auto Encoder model on MNIST." -msgstr "" - -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:5 -msgid "Quickstart PyTorch Lightning" +"So how can we send this config dictionary from server to clients? The " +"built-in Flower Strategies provide way to do this, and it works similarly" +" to the way server-side evaluation works. We provide a function to the " +"strategy, and the strategy calls this function for every round of " +"federated learning:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:10 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:704 msgid "" -"Let's build a horizontal federated learning system using PyTorch " -"Lightning and Flower!" +"Next, we'll pass this function to the FedAvg strategy before starting the" +" simulation:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:12 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:749 msgid "" -"Please refer to the `full code example " -"`_ to learn more." +"As we can see, the client logs now include the current round of federated" +" learning (which they read from the ``config`` dictionary). We can also " +"configure local training to run for one epoch during the first and second" +" round of federated learning, and then for two epochs during the third " +"round." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:-1 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:751 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with scikit-learn to train a linear regression model." +"Clients can also return arbitrary values to the server. To do so, they " +"return a dictionary from ``fit`` and/or ``evaluate``. We have seen and " +"used this concept throughout this notebook without mentioning it " +"explicitly: our ``FlowerClient`` returns a dictionary containing a custom" +" key/value pair as the third return value in ``evaluate``." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:5 -msgid "Quickstart scikit-learn" +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:763 +msgid "Scaling federated learning" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:10 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:765 msgid "" -"In this tutorial, we will learn how to train a :code:`Logistic " -"Regression` model on MNIST using Flower and scikit-learn." +"As a last step in this notebook, let's see how we can use Flower to " +"experiment with a large number of clients." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:12 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:785 msgid "" -"It is recommended to create a virtual environment and run everything " -"within this :doc:`virtualenv `." +"Note that we can reuse the ``ClientApp`` for different ``num-partitions``" +" since the Context is defined by the ``num_supernodes`` argument in " +"``run_simulation()``." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:16 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:787 +#, python-format msgid "" -"*Clients* are responsible for generating individual model parameter " -"updates for the model based on their local datasets. These updates are " -"then sent to the *server* which will aggregate them to produce an updated" -" global model. Finally, the *server* sends this improved version of the " -"model back to each *client*. A complete cycle of parameters updates is " -"called a *round*." +"We now have 1000 partitions, each holding 45 training and 5 validation " +"examples. Given that the number of training examples on each client is " +"quite small, we should probably train the model a bit longer, so we " +"configure the clients to perform 3 local training epochs. We should also " +"adjust the fraction of clients selected for training during each round " +"(we don't want all 1000 clients participating in every round), so we " +"adjust ``fraction_fit`` to ``0.025``, which means that only 2.5% of " +"available clients (so 25 clients) will be selected for training each " +"round:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:20 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:843 msgid "" -"Now that we have a rough idea of what is going on, let's get started. We " -"first need to install Flower. You can do this by running:" -msgstr "" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:26 -msgid "Since we want to use scikit-learn, let's go ahead and install it:" +"In this notebook, we've seen how we can gradually enhance our system by " +"customizing the strategy, initializing parameters on the server side, " +"choosing a different strategy, and evaluating models on the server-side. " +"That's quite a bit of flexibility with so little code, right?" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:32 -msgid "Or simply install all dependencies using Poetry:" +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:845 +msgid "" +"In the later sections, we've seen how we can communicate arbitrary values" +" between server and clients to fully customize client-side execution. " +"With that capability, we built a large-scale Federated Learning " +"simulation using the Flower Virtual Client Engine and ran an experiment " +"involving 1000 clients in the same workload - all in a Jupyter Notebook!" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:42 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:863 msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training with two clients and one server. However, before " -"setting up the client and server, we will define all functionalities that" -" we need for our federated learning setup within :code:`utils.py`. The " -":code:`utils.py` contains different functions defining all the machine " -"learning basics:" +"The `Flower Federated Learning Tutorial - Part 3 " +"`__ shows how to build a fully custom ``Strategy`` from " +"scratch." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:45 -msgid ":code:`get_model_parameters()`" -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:9 +msgid "What is Federated Learning?" +msgstr "연합 학습이란 무엇입니까?" -#: ../../source/tutorial-quickstart-scikitlearn.rst:46 -msgid "Returns the parameters of a :code:`sklearn` LogisticRegression model" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:13 +msgid "" +"In this tutorial, you will learn what federated learning is, build your " +"first system in Flower, and gradually extend it. If you work through all " +"parts of the tutorial, you will be able to build advanced federated " +"learning systems that approach the current state of the art in the field." msgstr "" +"이 튜토리얼에서 연합 학습이 무엇인지 배우고 Flower로 첫 번째 시스템을 구축하고 점진적으로 확장해 나갈 것입니다. 본 " +"튜토리얼의 모든 부분을 완성할 수 있다면, 당신은 고급 연합 학습 시스템을 구축하여 그 분야의 현재 최고 기술 수준에 접근할 수 " +"있을 것입니다." -#: ../../source/tutorial-quickstart-scikitlearn.rst:47 -msgid ":code:`set_model_params()`" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:15 +msgid "" +"🧑‍🏫 This tutorial starts at zero and expects no familiarity with " +"federated learning. Only a basic understanding of data science and Python" +" programming is assumed." msgstr "" +"🧑‍🏫이 튜토리얼은 사전 지식을 많이 필요로 하지 않으며 연합 학습에 대해 상세히알 필요는 없습니다. 데이터 과학과 파이썬 " +"프로그래밍에 대한 기본적인 이해만 가정합니다." -#: ../../source/tutorial-quickstart-scikitlearn.rst:48 -msgid "Sets the parameters of a :code:`sklearn` LogisticRegression model" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:17 +msgid "" +"`Star Flower on GitHub `__ ⭐️ and join " +"the open-source Flower community on Slack to connect, ask questions, and " +"get help: `Join Slack `__ 🌼 We'd love to " +"hear from you in the ``#introductions`` channel! And if anything is " +"unclear, head over to the ``#questions`` channel." msgstr "" +"`Star Flower on GitHub `__ ⭐️ Slack의 오픈소스" +" Flower 커뮤니티에 가입하여 소통하고 질문하고 도움을 받을 수 있습니다: `Slack 가입`__ 🌼 ``#introductions``채널에서 당신의 목소리를 듣고 싶습니다! 궁금한 점이 " +"있으시면``#questions`` 채널로 방문해 주시기 바랍니다." -#: ../../source/tutorial-quickstart-scikitlearn.rst:50 -msgid ":code:`set_initial_params()`" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:19 +msgid "Let's get started!" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:50 -msgid "Initializes the model parameters that the Flower server will ask for" -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:31 +msgid "Classic machine learning" +msgstr "전통적인 머신러닝(기계학습)" -#: ../../source/tutorial-quickstart-scikitlearn.rst:52 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:33 msgid "" -"Please check out :code:`utils.py` `here " -"`_ for more details. The pre-defined functions are used in" -" the :code:`client.py` and imported. The :code:`client.py` also requires " -"to import several packages such as Flower and scikit-learn:" -msgstr "" +"Before we begin to discuss federated learning, let us quickly recap how " +"most machine learning works today." +msgstr "연합 학습에 대해 논의하기 전에 현재 대부분의 머신러닝이 어떻게 작동하는지 간략히 요약하겠습니다." -#: ../../source/tutorial-quickstart-scikitlearn.rst:67 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:35 msgid "" -"Prior to local training, we need to load the MNIST dataset, a popular " -"image classification dataset of handwritten digits for machine learning, " -"and partition the dataset for FL. This can be conveniently achieved using" -" `Flower Datasets `_. The " -":code:`FederatedDataset.load_partition()` method loads the partitioned " -"training set for each partition ID defined in the :code:`--partition-id` " -"argument." +"In machine learning, we have a model, and we have data. The model could " +"be a neural network (as depicted here), or something else, like classical" +" linear regression." msgstr "" +"머신러닝에서 우리는 모델과 데이터를 가지고 있습니다. 모델은 신경망(그림과 같이)일 수도 있고 고전적인 선형 회귀와 같은 다른 것일" +" 수도 있습니다." -#: ../../source/tutorial-quickstart-scikitlearn.rst:95 -msgid "" -"Next, the logistic regression model is defined and initialized with " -":code:`utils.set_initial_params()`." +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 +msgid "|ac0a9766e26044d6aea222a829859b20|" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:107 -msgid "" -"The Flower server interacts with clients through an interface called " -":code:`Client`. When the server selects a particular client for training," -" it sends training instructions over the network. The client receives " -"those instructions and calls one of the :code:`Client` methods to run " -"your code (i.e., to fit the logistic regression we defined earlier)." -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 +msgid "Model and data" +msgstr "모델과 데이터" -#: ../../source/tutorial-quickstart-scikitlearn.rst:113 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:47 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses scikit-learn. Implementing :code:`NumPyClient` usually " -"means defining the following methods (:code:`set_parameters` is optional " -"though):" +"We train the model using the data to perform a useful task. A task could " +"be to detect objects in images, transcribe an audio recording, or play a " +"game like Go." msgstr "" +"우리는 유용한 작업을 수행하기 위해 데이터를 사용하여 모델을 훈련합니다. 작업은 이미지 속 물체를 감지하거나 음성 녹음을 기록하거나" +" 바둑과 같은 게임을 하는 것일 수 있습니다." -#: ../../source/tutorial-quickstart-scikitlearn.rst:122 -msgid "is directly imported with :code:`utils.set_model_params()`" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 +msgid "|36cd6e248b1443ce8a82b5a025bba368|" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:130 -msgid "The methods can be implemented in the following way:" -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 +msgid "Train model using data" +msgstr "데이터를 이용한 모델 훈련" -#: ../../source/tutorial-quickstart-scikitlearn.rst:153 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:59 msgid "" -"We can now create an instance of our class :code:`MnistClient` and add " -"one line to actually run this client:" -msgstr "" +"Now, in practice, the training data we work with doesn't originate on the" +" machine we train the model on. It gets created somewhere else." +msgstr "실제로 우리가 사용하는 훈련 데이터는 모델을 훈련시키는 기계에서 비롯된 것이 아닙니다. 그 데이터는 다른 곳에서 만들어졌습니다." -#: ../../source/tutorial-quickstart-scikitlearn.rst:160 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:61 msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " -"implement a client of type :code:`NumPyClient` you'll need to first call " -"its :code:`to_client()` method. The string :code:`\"0.0.0.0:8080\"` tells" -" the client which server to connect to. In our case we can run the server" -" and the client on the same machine, therefore we use " -":code:`\"0.0.0.0:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we pass to the client." +"It originates on a smartphone by the user interacting with an app, a car " +"collecting sensor data, a laptop receiving input via the keyboard, or a " +"smart speaker listening to someone trying to sing a song." msgstr "" +"스마트폰에서 사용자와 앱의 상호 작용, 센서 데이터를 수집하는 자동차, 키보드를 통해 입력을 받는 노트북 또는 누군가 노래를 " +"부르리는 것을 듣는 스마트 스피커에서 비롯됩니다." -#: ../../source/tutorial-quickstart-scikitlearn.rst:169 -msgid "" -"The following Flower server is a little bit more advanced and returns an " -"evaluation function for the server-side evaluation. First, we import " -"again all required libraries such as Flower and scikit-learn." +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 +msgid "|bf4fb057f4774df39e1dcb5c71fd804a|" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:172 -msgid ":code:`server.py`, import Flower and start the server:" -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 +msgid "Data on a phone" +msgstr "핸드폰에 있는 데이터" -#: ../../source/tutorial-quickstart-scikitlearn.rst:185 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:73 msgid "" -"The number of federated learning rounds is set in :code:`fit_round()` and" -" the evaluation is defined in :code:`get_evaluate_fn()`. The evaluation " -"function is called after each federated learning round and gives you " -"information about loss and accuracy. Note that we also make use of Flower" -" Datasets here to load the test split of the MNIST dataset for server-" -"side evaluation." +"What's also important to mention, this \"somewhere else\" is usually not " +"just one place, it's many places. It could be several devices all running" +" the same app. But it could also be several organizations, all generating" +" data for the same task." msgstr "" +"또한 중요한 것은 이 \"다른 곳\"이 보통 한 곳만 아니라 여러 곳이라는 것입니다. 같은 앱을 실행하는 여러 기기일 수도 " +"있습니다. 하지만 여러 조직이 모두 같은 작업을 위해 데이터를 생성하는 것일 수도 있습니다." -#: ../../source/tutorial-quickstart-scikitlearn.rst:213 -msgid "" -"The :code:`main` contains the server-side parameter initialization " -":code:`utils.set_initial_params()` as well as the aggregation strategy " -":code:`fl.server.strategy:FedAvg()`. The strategy is the default one, " -"federated averaging (or FedAvg), with two clients and evaluation after " -"each federated learning round. The server can be started with the command" -" :code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " -"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))`." +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 +msgid "|71bb9f3c74c04f959b9bc1f02b736c95|" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:232 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 +msgid "Data is on many devices" +msgstr "데이터가 여러 장치에 있습니다" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:85 msgid "" -"With both client and server ready, we can now run everything and see " -"federated learning in action. Federated learning systems usually have a " -"server and multiple clients. We, therefore, have to start the server " -"first:" +"So to use machine learning, or any kind of data analysis, the approach " +"that has been used in the past was to collect all data on a central " +"server. This server can be somewhere in a data center, or somewhere in " +"the cloud." msgstr "" +"따라서 머신러닝이나 어떤 종류의 데이터 분석을 이용하려면 과거에는 중앙 서버에서 모든 데이터를 수집하는 방법이 사용되었습니다. 이 " +"서버는 데이터 센터 어딘가에 있을 수도 있고 클라우드 어딘가에 있을 수도 있습니다." -#: ../../source/tutorial-quickstart-scikitlearn.rst:286 -msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this example can be found in :code:`examples/sklearn-logreg-" -"mnist`." +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 +msgid "|7605632e1b0f49599ffacf841491fcfb|" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:-1 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 +msgid "Central data collection" +msgstr "중앙 데이터 수집" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:97 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with TensorFlow to train a MobilNetV2 model on CIFAR-10." +"Once all the data is collected in one place, we can finally use machine " +"learning algorithms to train our model on the data. This is the machine " +"learning approach that we've basically always relied on." msgstr "" +"모든 데이터가 한 곳에 모이면, 우리는 궁극적으로 머신러닝 알고리즘을 사용하여 데이터에서 모델을 훈련시킬 수 있습니다. 이것이 바로" +" 우리가 기본적으로 사용해 온 머신러닝 방법입니다." -#: ../../source/tutorial-quickstart-tensorflow.rst:5 -msgid "Quickstart TensorFlow" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 +msgid "|91b1b5a7d3484eb7a2350c1923f18307|" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:13 -msgid "Let's build a federated learning system in less than 20 lines of code!" -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 +msgid "Central model training" +msgstr "중앙 데이터 훈련" -#: ../../source/tutorial-quickstart-tensorflow.rst:15 -msgid "Before Flower can be imported we have to install it:" -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:130 +msgid "Challenges of classical machine learning" +msgstr "클래식 머신러닝의 어려움" -#: ../../source/tutorial-quickstart-tensorflow.rst:21 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:132 msgid "" -"Since we want to use the Keras API of TensorFlow (TF), we have to install" -" TF as well:" +"The classic machine learning approach we've just seen can be used in some" +" cases. Great examples include categorizing holiday photos, or analyzing " +"web traffic. Cases, where all the data is naturally available on a " +"centralized server." msgstr "" +"우리가 방금 본 전통적 머신러닝의 접근 방식은 경우에 따라 다르게 사용될 수 있습니다. 좋은 예로는 휴일 사진을 분류하거나 웹 " +"트래픽을 분석하는 것이 있습니다. 이러한 사례에서 모든 데이터는 자연스럽게 중앙 서버에 존재합니다." -#: ../../source/tutorial-quickstart-tensorflow.rst:31 -msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 +msgid "|5405ed430e4746e28b083b146fb71731|" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:38 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 +msgid "Centralized possible" +msgstr "집중화 가능" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:144 msgid "" -"We use the Keras utilities of TF to load CIFAR10, a popular colored image" -" classification dataset for machine learning. The call to " -":code:`tf.keras.datasets.cifar10.load_data()` downloads CIFAR10, caches " -"it locally, and then returns the entire training and test set as NumPy " -"ndarrays." +"But the approach can not be used in many other cases. Cases, where the " +"data is not available on a centralized server, or cases where the data " +"available on one server is not enough to train a good model." msgstr "" +"그러나 이 방법은 다른 많은 경우에 적용되지 않을 수 있습니다. 예를 들어, 중앙 집중식 서버에 데이터가 없거나 서버의 데이터가 " +"좋은 모델을 훈련하기에 충분하지 않을 수 있습니다." -#: ../../source/tutorial-quickstart-tensorflow.rst:47 -msgid "" -"Next, we need a model. For the purpose of this tutorial, we use " -"MobilNetV2 with 10 output classes:" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 +msgid "|a389e87dab394eb48a8949aa2397687b|" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:60 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 +msgid "Centralized impossible" +msgstr "집중화 불가능" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:156 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses Keras. The :code:`NumPyClient` interface defines three " -"methods which can be implemented in the following way:" +"There are many reasons why the classic centralized machine learning " +"approach does not work for a large number of highly important real-world " +"use cases. Those reasons include:" msgstr "" +"전통적인 중앙 집중식 머신러닝 방법이 현실 세계에서 매우 중요한 수많은 사용 사례를 충족시킬 수 없는 이유가 있습니다. 이유는 " +"다음과 같은 여러 가지가 있습니다:" -#: ../../source/tutorial-quickstart-tensorflow.rst:135 -msgid "Each client will have its own dataset." +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:158 +msgid "" +"**Regulations**: GDPR (Europe), CCPA (California), PIPEDA (Canada), LGPD " +"(Brazil), PDPL (Argentina), KVKK (Turkey), POPI (South Africa), FSS " +"(Russia), CDPR (China), PDPB (India), PIPA (Korea), APPI (Japan), PDP " +"(Indonesia), PDPA (Singapore), APP (Australia), and other regulations " +"protect sensitive data from being moved. In fact, those regulations " +"sometimes even prevent single organizations from combining their own " +"users' data for artificial intelligence training because those users live" +" in different parts of the world, and their data is governed by different" +" data protection regulations." msgstr "" +"**규정**: GDPR (유럽), CCPA (캘리포니아), PIPEDA (캐나다), LGPD (브라질), PDPL (아르헨티나), " +"KVKK (터키), POPI (남아프리카공화국), FSS (러시아), CDPR (중국), PDPB (인도), PIPA (한국), " +"APPI (일본), PDP (인도네시아), PDPA (싱가포르), APP (호주)등의 법규로 민감한 데이터가 이동하지 않도록 " +"보호하고 있습니다. 실제로 이러한 규정은 사용자가 세계의 다른 지역에 살고 데이터가 다른 데이터 보호 규정에 의해 통제되기 때문에 " +"단일 조직이 자체 사용자 데이터를 인공 지능 학습에 사용하는 것을 방지하기도 합니다." -#: ../../source/tutorial-quickstart-tensorflow.rst:137 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:160 msgid "" -"You should now see how the training does in the very first terminal (the " -"one that started the server):" +"**User preference**: In addition to regulation, there are use cases where" +" users just expect that no data leaves their device, ever. If you type " +"your passwords and credit card info into the digital keyboard of your " +"phone, you don't expect those passwords to end up on the server of the " +"company that developed that keyboard, do you? In fact, that use case was " +"the reason federated learning was invented in the first place." msgstr "" +"**사용자 선호도**: 규정 외에도 일부 사용 사례에서 사용자는 데이터가 자기 장치를 떠나지 않기를 예상합니다. 휴대폰의 디지털 " +"키보드에 비밀번호와 신용카드 정보를 입력하면 비밀번호가 해당 키보드를 개발한 회사의 서버에 뜨길 원하지는 않겠죠? 사실, 이 사용 " +"사례가 애당초 연합 학습이 발명된 이유였습니다." -#: ../../source/tutorial-quickstart-tensorflow.rst:169 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:161 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this can be found in :code:`examples" -"/quickstart-tensorflow/client.py`." +"**Data volume**: Some sensors, like cameras, produce such a high data " +"volume that it is neither feasible nor economic to collect all the data " +"(due to, for example, bandwidth or communication efficiency). Think about" +" a national rail service with hundreds of train stations across the " +"country. If each of these train stations is outfitted with a number of " +"security cameras, the volume of raw on-device data they produce requires " +"incredibly powerful and exceedingly expensive infrastructure to process " +"and store. And most of the data isn't even useful." msgstr "" +"**데이터 볼륨**: 일부 센서(예:카메라)는 너무 많은 데이터 볼륨을 생성하여 모든 데이터를 수집하는 것이 실현 가능하지도 않고 " +"경제적이지도 않습니다(예: 대역폭 또는 통신 효율로 인해). 전국에 수백 개 기차역이 있는 국가 철도 서비스를 생각해 보세요. 각 " +"기차역에 수 많은 보안 카메라가 설치되어 있다면, 그들이 생산하는 대량의 미가공 된 온디바이스 데이터는 처리 및 저장을 위해 " +"엄청나게 강력하고 매우 비싼기반 구조를 필요로 합니다. 그런데 대부분의 데이터는 유용하지도 않습니다." -#: ../../source/tutorial-quickstart-xgboost.rst:-1 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:164 +msgid "Examples where centralized machine learning does not work include:" +msgstr "중앙 집중식 머신러닝이 작동하지 않는 예는 다음과 같습니다:" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:166 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with XGBoost to train classification models on trees." -msgstr "" +"Sensitive healthcare records from multiple hospitals to train cancer " +"detection models" +msgstr "여러 병원의 민감한 의료기록으로 암 검진 모델 훈련" -#: ../../source/tutorial-quickstart-xgboost.rst:5 -msgid "Quickstart XGBoost" -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:167 +msgid "" +"Financial information from different organizations to detect financial " +"fraud" +msgstr "금융 사기를 탐지하기 위한 다양한 조직의 금융 정보" -#: ../../source/tutorial-quickstart-xgboost.rst:14 -msgid "Federated XGBoost" -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:168 +msgid "Location data from your electric car to make better range prediction" +msgstr "더 나은 범위 예측을 위해 전기 자동차의 위치 데이터" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:169 +msgid "End-to-end encrypted messages to train better auto-complete models" +msgstr "더 나은 자동 완성 모델을 훈련시키기 위한 엔드 투 엔드 암호화 된 메시지" -#: ../../source/tutorial-quickstart-xgboost.rst:16 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:171 msgid "" -"EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient " -"implementation of gradient-boosted decision tree (**GBDT**), that " -"maximises the computational boundaries for boosted tree methods. It's " -"primarily designed to enhance both the performance and computational " -"speed of machine learning models. In XGBoost, trees are constructed " -"concurrently, unlike the sequential approach taken by GBDT." +"The popularity of privacy-enhancing systems like the `Brave " +"`__ browser or the `Signal `__ " +"messenger shows that users care about privacy. In fact, they choose the " +"privacy-enhancing version over other alternatives, if such an alternative" +" exists. But what can we do to apply machine learning and data science to" +" these cases to utilize private data? After all, these are all areas that" +" would benefit significantly from recent advances in AI." msgstr "" +"`Brave `__ 브라우저나 `Signal `__ " +"메신저와 같은 개인 정보 보호 시스템의 인기는 사용자들이 개인 정보 보호에 신경 쓴다는 것을 보여줍니다. 실제로 그러한 대안이 " +"존재하는 경우 다른 대안보다 개인 정보 보호 강화 버전을 선택합니다. 그런데 이러한 사례에 머신러닝 및 데이터 과학을 적용하여 " +"프라이버시 데이터를 활용하려면 어떻게 해야 합니까? 이 모든 분야는 최근 AI의 발전으로 상당한 이익을 얻을 수 있는 분야입니다." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:186 +msgid "Federated learning" +msgstr "연합 학습" -#: ../../source/tutorial-quickstart-xgboost.rst:20 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:188 msgid "" -"Often, for tabular data on medium-sized datasets with fewer than 10k " -"training examples, XGBoost surpasses the results of deep learning " -"techniques." +"Federated learning simply reverses this approach. It enables machine " +"learning on distributed data by moving the training to the data, instead " +"of moving the data to the training. Here's the single-sentence " +"explanation:" msgstr "" +"연합 학습은 이 방법을 쉽게 뒤집었습니다. 데이터를 컴퓨팅 센터로 옮기는 대신 컴퓨팅 능력을 데이터가 생성되는 장소로 이동 " +"시킴으로써 분산된 데이터에서 머신러닝을 실현합니다. 요약하자면:" -#: ../../source/tutorial-quickstart-xgboost.rst:23 -msgid "Why federated XGBoost?" -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:190 +msgid "Central machine learning: move the data to the computation" +msgstr "중앙 집중식 머신러닝: 데이터를 컴퓨팅 센터로 이동" -#: ../../source/tutorial-quickstart-xgboost.rst:25 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:191 +msgid "Federated (machine) learning: move the computation to the data" +msgstr "연합(기계)학습: 컴퓨팅을 데이터로 옮김" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:193 msgid "" -"Indeed, as the demand for data privacy and decentralized learning grows, " -"there's an increasing requirement to implement federated XGBoost systems " -"for specialised applications, like survival analysis and financial fraud " -"detection." +"By doing so, it enables us to use machine learning (and other data " +"science approaches) in areas where it wasn't possible before. We can now " +"train excellent medical AI models by enabling different hospitals to work" +" together. We can solve financial fraud by training AI models on the data" +" of different financial institutions. We can build novel privacy-" +"enhancing applications (such as secure messaging) that have better built-" +"in AI than their non-privacy-enhancing alternatives. And those are just a" +" few of the examples that come to mind. As we deploy federated learning, " +"we discover more and more areas that can suddenly be reinvented because " +"they now have access to vast amounts of previously inaccessible data." msgstr "" +"이를 통해 이전에는 불가능했던 분야에서 머신러닝(및 기타 데이터 과학 방법)을 사용할 수 있습니다. 이제 다양한 병원이 협력할 수 " +"있도록 함으로써 우수한 의료 AI 모델을 훈련할 수 있습니다. 다양한 금융 기관의 데이터에 대한 AI 모델을 훈련하여 금융 사기를 " +"해결할 수 있습니다. 개인 정보 보호를 강화하지 않는 대안보다 더 나은 AI가 내장된 새로운 개인 정보 보호 강화 애플리케이션(예:" +" 보안 메시징)을 구축할 수 있습니다. 그것들은 떠오르는 몇 가지 예에 불과합니다. 연합 학습을 구축함에 따라 이전에 액세스할 수 " +"없었던 많은 데이터에 액세스할 수 있게 되었기 때문에 갑자기 재생될 수 있는 영역이 점점 더 많아지고 있습니다." -#: ../../source/tutorial-quickstart-xgboost.rst:27 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:196 msgid "" -"Federated learning ensures that raw data remains on the local device, " -"making it an attractive approach for sensitive domains where data " -"security and privacy are paramount. Given the robustness and efficiency " -"of XGBoost, combining it with federated learning offers a promising " -"solution for these specific challenges." -msgstr "" +"So how does federated learning work, exactly? Let's start with an " +"intuitive explanation." +msgstr "그렇다면 연합 학습은 어떻게 작동합니까? 직관적인 설명부터 시작하겠습니다." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:199 +msgid "Federated learning in five steps" +msgstr "연합 학습의 5단계" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:202 +msgid "Step 0: Initialize global model" +msgstr "0단계: 글로벌 모델 초기화" -#: ../../source/tutorial-quickstart-xgboost.rst:30 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:204 msgid "" -"In this tutorial we will learn how to train a federated XGBoost model on " -"HIGGS dataset using Flower and :code:`xgboost` package. We use a simple " -"example (`full code xgboost-quickstart " -"`_)" -" with two *clients* and one *server* to demonstrate how federated XGBoost" -" works, and then we dive into a more complex example (`full code xgboost-" -"comprehensive `_) to run various experiments." +"We start by initializing the model on the server. This is exactly the " +"same in classic centralized learning: we initialize the model parameters," +" either randomly or from a previously saved checkpoint." msgstr "" +"서버에서 모델을 초기화하는 것으로 시작합니다. 이것은 전통적인 중앙 집중식 학습과도 동일합니다: 임의로 또는 이전에 저장된 " +"체크포인트에서 모델 매개변수를 초기화합니다." -#: ../../source/tutorial-quickstart-xgboost.rst:37 -msgid "Environment Setup" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 +msgid "|89c412136a5146ec8dc32c0973729f12|" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:41 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 +msgid "Initialize global model" +msgstr "글로벌 모델 초기화" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:217 msgid "" -"We first need to install Flower and Flower Datasets. You can do this by " -"running :" -msgstr "" +"Step 1: Send model to a number of connected organizations/devices (client" +" nodes)" +msgstr "1단계: 연결된 여러 조직/장치(클라이언트 노드)에 모델 전송" -#: ../../source/tutorial-quickstart-xgboost.rst:47 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:219 msgid "" -"Since we want to use :code:`xgboost` package to build up XGBoost trees, " -"let's go ahead and install :code:`xgboost`:" +"Next, we send the parameters of the global model to the connected client " +"nodes (think: edge devices like smartphones or servers belonging to " +"organizations). This is to ensure that each participating node starts " +"their local training using the same model parameters. We often use only a" +" few of the connected nodes instead of all nodes. The reason for this is " +"that selecting more and more client nodes has diminishing returns." msgstr "" +"다음으로 글로벌 모델의 파라미터를 연결된 클라이언트 노드(예: 스마트폰과 같은 에지 디바이스 또는 조직에 속한 서버)로 보냅니다. " +"이것은 각 참여 노드가 동일한 모델 매개변수를 사용하여 로컬 훈련을 시작하도록 하기 위함입니다. 일반적으로 모든 노드가 아닌 몇 " +"개의 연결 노드만 사용합니다. 그 이유는 점점 더 많은 클라이언트 노드를 선택하면 학습의 효율성이 감소하기 때문입니다." -#: ../../source/tutorial-quickstart-xgboost.rst:57 -msgid "" -"*Clients* are responsible for generating individual weight-updates for " -"the model based on their local datasets. Now that we have all our " -"dependencies installed, let's run a simple distributed training with two " -"clients and one server." +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 +msgid "|9503d3dc3a144e8aa295f8800cd8a766|" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:60 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 +msgid "Send global model" +msgstr "글로벌 모델 전송" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:232 +msgid "" +"Step 2: Train model locally on the data of each organization/device " +"(client node)" +msgstr "2단계: 각 조직/장치(클라이언트 노드)의 데이터에 대해 로컬로 모델 훈련" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:234 msgid "" -"In a file called :code:`client.py`, import xgboost, Flower, Flower " -"Datasets and other related functions:" +"Now that all (selected) client nodes have the latest version of the " +"global model parameters, they start the local training. They use their " +"own local dataset to train their own local model. They don't train the " +"model until full convergence, but they only train for a little while. " +"This could be as little as one epoch on the local data, or even just a " +"few steps (mini-batches)." msgstr "" +"이제 모든(선택된) 클라이언트 노드에는 최신 버전의 글로벌 모델 파라미터가 있으며 로컬 훈련을 시작합니다. 그들은 자신의 로컬 " +"데이터 세트를 사용하여 자신의 로컬 모델을 훈련합니다. 모델이 완전히 수렴할 때까지 훈련하지 않고 잠시만 훈련합니다. 이는 로컬 " +"데이터에서 한 단계 정도로 짧거나 몇 단계(mini-batches)에 불과할 수 있습니다." -#: ../../source/tutorial-quickstart-xgboost.rst:87 -msgid "Dataset partition and hyper-parameter selection" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 +msgid "|aadb59e29b9e445d8e239d9a8a7045cb|" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:89 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 +msgid "Train on local data" +msgstr "로컬 데이터에 대한 훈련" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:247 +msgid "Step 3: Return model updates back to the server" +msgstr "3단계: 모델 파라미터를 업데이트하여 서버로 되돌리기" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:249 msgid "" -"Prior to local training, we require loading the HIGGS dataset from Flower" -" Datasets and conduct data partitioning for FL:" +"After local training, each client node has a slightly different version " +"of the model parameters they originally received. The parameters are all " +"different because each client node has different examples in its local " +"dataset. The client nodes then send those model updates back to the " +"server. The model updates they send can either be the full model " +"parameters or just the gradients that were accumulated during local " +"training." msgstr "" +"로컬 훈련 후에는 클라이언트 노드마다 원래 받은 모델 파라미터의 버전이 조금씩 다릅니다. 파라미터가 다른 이유는 각 클라이언트 " +"노드의 로컬 데이터 세트에 다른 데이터가 있기 때문입니다. 그런 다음 클라이언트 노드는 이러한 모델 업데이트를 서버로 다시 " +"보냅니다. 보내는 모델 업데이트는 전체 모델 파라미터거나 로컬 교육 중에 누적된 그레디언트(gradient)일 수 있습니다." -#: ../../source/tutorial-quickstart-xgboost.rst:102 -msgid "" -"In this example, we split the dataset into two partitions with uniform " -"distribution (:code:`IidPartitioner(num_partitions=2)`). Then, we load " -"the partition for the given client based on :code:`node_id`:" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 +msgid "|a7579ad7734347508e959d9e14f2f53d|" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:121 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 +msgid "Send model updates" +msgstr "모델 업데이트 전송" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:262 +msgid "Step 4: Aggregate model updates into a new global model" +msgstr "4단계: 모델 업데이트를 새 글로벌 모델로 집계" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:264 msgid "" -"After that, we do train/test splitting on the given partition (client's " -"local data), and transform data format for :code:`xgboost` package." +"The server receives model updates from the selected client nodes. If it " +"selected 100 client nodes, it now has 100 slightly different versions of " +"the original global model, each trained on the local data of one client. " +"But didn't we want to have one model that contains the learnings from the" +" data of all 100 client nodes?" msgstr "" +"서버는 선택된 클라이언트 노드들로부터 모델 업데이트들을 수신합니다. 서버가 100개의 클라이언트 노드를 선택했다면 이제 각각 " +"클라이언트의 로컬 데이터를 기반으로 훈련된 100개의 조금씩 다른 원래 글로벌 모델 버전을 갖게 됩니다. 하지만 우리는 100개의 " +"모든 클라이언트 노드의 데이터에서 학습한 내용을 포함하는 모델을 하나만 갖고 싶지 않았습니까?" -#: ../../source/tutorial-quickstart-xgboost.rst:134 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:266 msgid "" -"The functions of :code:`train_test_split` and " -":code:`transform_dataset_to_dmatrix` are defined as below:" +"In order to get one single model, we have to combine all the model " +"updates we received from the client nodes. This process is called " +"*aggregation*, and there are many different ways to do it. The most basic" +" way to do it is called *Federated Averaging* (`McMahan et al., 2016 " +"`__), often abbreviated as *FedAvg*. " +"*FedAvg* takes the 100 model updates and, as the name suggests, averages " +"them. To be more precise, it takes the *weighted average* of the model " +"updates, weighted by the number of examples each client used for " +"training. The weighting is important to make sure that each data example " +"has the same \"influence\" on the resulting global model. If one client " +"has 10 examples, and another client has 100 examples, then - without " +"weighting - each of the 10 examples would influence the global model ten " +"times as much as each of the 100 examples." msgstr "" +"단일 모델 하나를 얻으려면 클라이언트 노드에서 받은 모든 모델 업데이트를 결합해야 합니다. 이 과정이 *집합*라고 하며 여러 가지 " +"방법이 있습니다. 가장 기본적인 방법은*Federated Averaging* (`McMahan et al., 2016 " +"`__)이라고 하고 보통 줄여서 *FedAvg*로 표기합니다. " +"*FedAvg* 는 100개의 모델 업데이트를 받아 이름에서 알 수 있듯이 모델 업데이트를 평균화합니다. 더 정확히 말하면, 모델 " +"업데이트의 *가중 평균* 을 각 클라이언트가 훈련에 사용한 예제 수에 따라 가중치를 부여합니다. 가중치는 각 데이터 예제가 결과 " +"글로벌 모델에 동일한 \"영향\" 을 미치는지 확인하는 데 중요합니다. 한 클라이언트에 10개의 데이터 포인트가 있고 다른 " +"클라이언트에 100개의 데이터 포인트가 있다면 가중치를 부여하지 않고 10개의 예가 100개의 사례보다 글로벌 모델에 10배 더 " +"많은 영향을 미칩니다." -#: ../../source/tutorial-quickstart-xgboost.rst:158 -msgid "Finally, we define the hyper-parameters used for XGBoost training." +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 +msgid "|73d15dd1d4fc41678b2d54815503fbe8|" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:174 -msgid "" -"The :code:`num_local_round` represents the number of iterations for local" -" tree boost. We use CPU for the training in default. One can shift it to " -"GPU by setting :code:`tree_method` to :code:`gpu_hist`. We use AUC as " -"evaluation metric." -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 +msgid "Aggregate model updates" +msgstr "모델 업데이트 집계" -#: ../../source/tutorial-quickstart-xgboost.rst:181 -msgid "Flower client definition for XGBoost" -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:280 +msgid "Step 5: Repeat steps 1 to 4 until the model converges" +msgstr "5단계: 모델이 수렴할 때까지 1~4단계를 반복합니다" -#: ../../source/tutorial-quickstart-xgboost.rst:183 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:282 msgid "" -"After loading the dataset we define the Flower client. We follow the " -"general rule to define :code:`XgbClient` class inherited from " -":code:`fl.client.Client`." +"Steps 1 to 4 are what we call a single round of federated learning. The " +"global model parameters get sent to the participating client nodes (step " +"1), the client nodes train on their local data (step 2), they send their " +"updated models to the server (step 3), and the server then aggregates the" +" model updates to get a new version of the global model (step 4)." msgstr "" +"단계 1에서 4는 우리가 말하는 단일 라운드 연합 학습입니다. 글로벌 모델 파라미터는 참여하는 클라이언트 노드에 전송되고(1단계)," +" 클라이언트 노드는 로컬 데이터에 대한 훈련을 받고(2단계), 업데이트된 모델을 서버에 전송하고(3단계), 서버는 모델 업데이트를 " +"집계하여 글로벌 모델의 새로운 버전을 얻습니다(4단계)." -#: ../../source/tutorial-quickstart-xgboost.rst:193 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:284 msgid "" -"The :code:`self.bst` is used to keep the Booster objects that remain " -"consistent across rounds, allowing them to store predictions from trees " -"integrated in earlier rounds and maintain other essential data structures" -" for training." +"During a single round, each client node that participates in that " +"iteration only trains for a little while. This means that after the " +"aggregation step (step 4), we have a model that has been trained on all " +"the data of all participating client nodes, but only for a little while. " +"We then have to repeat this training process over and over again to " +"eventually arrive at a fully trained model that performs well across the " +"data of all client nodes." msgstr "" +"한 라운드의 반복에서 해당 반복에 참여하는 각 클라이언트 노드는 짧은 시간 동안만 훈련합니다. 집계 단계(4단계) 이후 우리 모델이" +" 관련된 모든 클라이언트 노드의 모든 데이터에 대해 잠시 동안만 훈련되었음을 의미합니다. 그런 다음 모든 클라이언트 노드의 " +"데이터에서 잘 작동하는 완전히 훈련된 모델에 도달하려면 이 훈련 과정을 계속 반복해야 합니다." -#: ../../source/tutorial-quickstart-xgboost.rst:196 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:289 msgid "" -"Then, we override :code:`get_parameters`, :code:`fit` and " -":code:`evaluate` methods insides :code:`XgbClient` class as follows." +"Congratulations, you now understand the basics of federated learning. " +"There's a lot more to discuss, of course, but that was federated learning" +" in a nutshell. In later parts of this tutorial, we will go into more " +"detail. Interesting questions include: How can we select the best client " +"nodes that should participate in the next round? What's the best way to " +"aggregate model updates? How can we handle failing client nodes " +"(stragglers)?" msgstr "" +"축하합니다, 이제 연합 학습의 기초에 대해 알게 되었습니다. 물론 아직 논의해야 할 내용이 많지만 이는 연합 학습의 축소판일 " +"뿐입니다. 본 튜토리얼의 후반부에는 좀 더 자세히 설명하겠습니다. 흥미로운 질문은 다음과 같습니다: 다음 라운드에 참여해야 할 가장" +" 좋은 클라이언트 노드를 어떻게 선택할 수 있을까요? 모델 업데이트를 집계하는 가장 좋은 방법은 무엇일까요? 실패한 클라이언트 " +"노드(낙오자)를 어떻게 처리할 수 있을까요?" -#: ../../source/tutorial-quickstart-xgboost.rst:210 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:294 msgid "" -"Unlike neural network training, XGBoost trees are not started from a " -"specified random weights. In this case, we do not use " -":code:`get_parameters` and :code:`set_parameters` to initialise model " -"parameters for XGBoost. As a result, let's return an empty tensor in " -":code:`get_parameters` when it is called by the server at the first " -"round." +"Just like we can train a model on the decentralized data of different " +"client nodes, we can also evaluate the model on that data to receive " +"valuable metrics. This is called federated evaluation, sometimes " +"abbreviated as FE. In fact, federated evaluation is an integral part of " +"most federated learning systems." msgstr "" +"다양한 클라이언트 노드의 분산된 데이터에 대해 모델을 훈련할 수 있는 것처럼 해당 데이터에 대한 모델을 평가하여 가치 있는 " +"메트릭(metrics)을 받을 수도 있습니다. 이를 연합 평가라고 하며 FE라고 약칭하기도 합니다. 사실 연합 평가는 대부분의 연합" +" 학습 시스템에서 필수적인 부분입니다." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:297 +msgid "Federated analytics" +msgstr "연합 분석" -#: ../../source/tutorial-quickstart-xgboost.rst:251 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:299 msgid "" -"In :code:`fit`, at the first round, we call :code:`xgb.train()` to build " -"up the first set of trees. the returned Booster object and config are " -"stored in :code:`self.bst` and :code:`self.config`, respectively. From " -"the second round, we load the global model sent from server to " -":code:`self.bst`, and then update model weights on local training data " -"with function :code:`local_boost` as follows:" +"In many cases, machine learning isn't necessary to derive value from " +"data. Data analysis can yield valuable insights, but again, there's often" +" not enough data to get a clear answer. What's the average age at which " +"people develop a certain type of health condition? Federated analytics " +"enables such queries over multiple client nodes. It is usually used in " +"conjunction with other privacy-enhancing technologies like secure " +"aggregation to prevent the server from seeing the results submitted by " +"individual client nodes." msgstr "" +"많은 경우 머신러닝은 데이터로부터 가치를 얻기 위한 필수 조건이 아닙니다. 데이터 분석을 통해 귀중한 통찰력을 얻을 수 있지만, " +"명확한 답변을 얻기에는 데이터가 충분하지 않은 경우가 많습니다. 특정 유형의 건강 상태가 발생하는 평균 연령은 몇 살입니까? 연합 " +"분석을 사용하면 여러 클라이언트 노드에서 이러한 쿼리(query)를 실행할 수 있습니다. 서버가 단일 클라이언트 노드에서 제출한 " +"결과를 보지 못하도록 보안을 강화한 집합 방식과 같은 다른 프라이버시 향상 기술과 함께 자주 사용됩니다." -#: ../../source/tutorial-quickstart-xgboost.rst:269 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:305 msgid "" -"Given :code:`num_local_round`, we update trees by calling " -":code:`self.bst.update` method. After training, the last " -":code:`N=num_local_round` trees will be extracted to send to the server." +"Differential privacy (DP) is often mentioned in the context of Federated " +"Learning. It is a privacy-preserving method used when analyzing and " +"sharing statistical data, ensuring the privacy of individual " +"participants. DP achieves this by adding statistical noise to the model " +"updates, ensuring any individual participants’ information cannot be " +"distinguished or re-identified. This technique can be considered an " +"optimization that provides a quantifiable privacy protection measure." msgstr "" +"차분 프라이버시(Differential Privacy)는 연합 학습의 맥락에서 종종 언급됩니다. 통계 데이터를 분석하고 공유할 때 " +"사용하는 프라이버시 보호 방식으로, 참가자 개인의 프라이버시를 보장합니다. 차분 프라이버시는 모델 업데이트에 통계적 " +"잡음(noise)를 추가하여 개별 참가자의 정보를 구별하거나 재식별할 수 없도록 함으로써 이를 달성합니다. 이 기술은 정량적 개인 " +"정보 보호 조치를 제공하는 최적화라고 볼 수 있습니다." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:326 +msgid "Flower" +msgstr "Flower" -#: ../../source/tutorial-quickstart-xgboost.rst:291 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:328 msgid "" -"In :code:`evaluate`, we call :code:`self.bst.eval_set` function to " -"conduct evaluation on valid set. The AUC value will be returned." +"Federated learning, federated evaluation, and federated analytics require" +" infrastructure to move machine learning models back and forth, train and" +" evaluate them on local data, and then aggregate the updated models. " +"Flower provides the infrastructure to do exactly that in an easy, " +"scalable, and secure way. In short, Flower presents a unified approach to" +" federated learning, analytics, and evaluation. It allows the user to " +"federate any workload, any ML framework, and any programming language." msgstr "" +"연합 학습, 연합 평가 및 연합 분석은 머신러닝 모델을 앞뒤로 이동하고 로컬 데이터에 대해 훈련 및 평가한 다음 업데이트된 모델을 " +"통합하기 위한 기본 프레임워크가 필요합니다. Flower가 제공하는 기반 구조는 간단하고 확장 가능하며 안전한 방식으로 이러한 " +"목표를 달성합니다. 간단히 말해서, Flower는 연합 학습, 분석 및 평가를 위한 통합 접근 방식을 제공합니다. 이를 통해 " +"사용자는 모든 워크로드, 머신러닝 프레임워크 및 모든 프로그래밍 언어를 통합할 수 있습니다." -#: ../../source/tutorial-quickstart-xgboost.rst:294 -msgid "" -"Now, we can create an instance of our class :code:`XgbClient` and add one" -" line to actually run this client:" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 +msgid "|55472eef61274ba1b739408607e109df|" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:300 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 msgid "" -"That's it for the client. We only have to implement :code:`Client`and " -"call :code:`fl.client.start_client()`. The string :code:`\"[::]:8080\"` " -"tells the client which server to connect to. In our case we can run the " -"server and the client on the same machine, therefore we use " -":code:`\"[::]:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we point the client at." -msgstr "" +"Flower federated learning server and client nodes (car, scooter, personal" +" computer, roomba, and phone)" +msgstr "Flower 연합 학습 서버 및 클라이언트 노드(자동차, 스쿠터, 개인용 컴퓨터, 룸바, 전화)" -#: ../../source/tutorial-quickstart-xgboost.rst:311 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:353 msgid "" -"These updates are then sent to the *server* which will aggregate them to " -"produce a better model. Finally, the *server* sends this improved version" -" of the model back to each *client* to finish a complete FL round." -msgstr "" +"Congratulations, you just learned the basics of federated learning and " +"how it relates to the classic (centralized) machine learning!" +msgstr "축하합니다, 지금까지 당신은 연합 학습의 기본 지식과 그것이 어떻게 전통적 (중앙 집중식) 머신러닝과 관련되는지 배웠습니다!" -#: ../../source/tutorial-quickstart-xgboost.rst:314 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:355 msgid "" -"In a file named :code:`server.py`, import Flower and FedXgbBagging from " -":code:`flwr.server.strategy`." -msgstr "" +"In the next part of this tutorial, we are going to build a first " +"federated learning system with Flower." +msgstr "이 튜토리얼의 다음 부분에서는 Flower와 함께 첫 번째 연합 학습 시스템을 구축할 것입니다." -#: ../../source/tutorial-quickstart-xgboost.rst:316 -msgid "We first define a strategy for XGBoost bagging aggregation." +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:369 +msgid "" +"Before you continue, make sure to join the Flower community on Slack: " +"`Join Slack `__" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:339 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:373 msgid "" -"We use two clients for this example. An " -":code:`evaluate_metrics_aggregation` function is defined to collect and " -"wighted average the AUC values from clients." +"The `Flower Federated Learning Tutorial - Part 1 " +"`__ shows how to build a simple federated learning system " +"with PyTorch and Flower." msgstr "" +"`Flower 연합 학습 튜토리얼- 1부 `__ PyTorch와 Flower를 사용하여 간단한 연합 학습 시스템을" +" 구축하는 방법을 보여줍니다." -#: ../../source/tutorial-quickstart-xgboost.rst:342 -msgid "Then, we start the server:" -msgstr "" +#~ msgid "" +#~ "Currently, Flower provides two images, a" +#~ " ``base`` image and a ``superlink`` " +#~ "image. The base image, as the name" +#~ " suggests, contains basic dependencies that" +#~ " the SuperLink needs. This includes " +#~ "system dependencies, Python and Python " +#~ "tools. The SuperLink image is based " +#~ "on the base image, but it " +#~ "additionally installs the SuperLink using " +#~ "``pip``." +#~ msgstr "" +#~ "현재, Flower는 \"base\" 이미지 그리고 " +#~ "\"superlink\" 이미지를 제공합니다. base 이미지는 이름에서" +#~ " 알 수 있듯이 SuperLink가 필요로 하는 기본" +#~ " dependencies를 포함하고 있습니다. 여기에는 시스템 " +#~ "dependencies, Python 및 Python 도구가 포함됩니다." +#~ " SuperLink 이미지는 base 이미지를 기반으로 하지만" +#~ " \"pip\"을 사용하여 SuperLink를 추가로 설치합니다." -#: ../../source/tutorial-quickstart-xgboost.rst:354 -msgid "Tree-based bagging aggregation" -msgstr "" +#~ msgid "``3.11``" +#~ msgstr "``3.11``" -#: ../../source/tutorial-quickstart-xgboost.rst:356 -msgid "" -"You must be curious about how bagging aggregation works. Let's look into " -"the details." -msgstr "" +#~ msgid "Defaults to ``22.04``." +#~ msgstr "``22.04``이 기본값." -#: ../../source/tutorial-quickstart-xgboost.rst:358 -msgid "" -"In file :code:`flwr.server.strategy.fedxgb_bagging.py`, we define " -":code:`FedXgbBagging` inherited from :code:`flwr.server.strategy.FedAvg`." -" Then, we override the :code:`aggregate_fit`, :code:`aggregate_evaluate` " -"and :code:`evaluate` methods as follows:" -msgstr "" +#~ msgid "Defaults to ``flwr/base``." +#~ msgstr "``flwr/base``이 기본값." -#: ../../source/tutorial-quickstart-xgboost.rst:454 -msgid "" -"In :code:`aggregate_fit`, we sequentially aggregate the clients' XGBoost " -"trees by calling :code:`aggregate()` function:" -msgstr "" +#~ msgid "The Python version of the base image." +#~ msgstr "base 이미지의 Python 버전." -#: ../../source/tutorial-quickstart-xgboost.rst:513 -msgid "" -"In this function, we first fetch the number of trees and the number of " -"parallel trees for the current and previous model by calling " -":code:`_get_tree_nums`. Then, the fetched information will be aggregated." -" After that, the trees (containing model weights) are aggregated to " -"generate a new tree model." -msgstr "" +#~ msgid "Defaults to ``py3.11``." +#~ msgstr "``py3.11``이 기본값." -#: ../../source/tutorial-quickstart-xgboost.rst:518 -msgid "" -"After traversal of all clients' models, a new global model is generated, " -"followed by the serialisation, and sending back to each client." -msgstr "" +#~ msgid "Defaults to ``ubuntu22.04``." +#~ msgstr "``ubuntu22.04``이 기본값." -#: ../../source/tutorial-quickstart-xgboost.rst:523 -msgid "Launch Federated XGBoost!" -msgstr "" +#~ msgid "Defaults to ``flwr``." +#~ msgstr "``flwr``이 기본값." -#: ../../source/tutorial-quickstart-xgboost.rst:585 -msgid "" -"Congratulations! You've successfully built and run your first federated " -"XGBoost system. The AUC values can be checked in " -":code:`metrics_distributed`. One can see that the average AUC increases " -"over FL rounds." -msgstr "" +#~ msgid "" +#~ "The name of image is ``flwr_superlink``" +#~ " and the tag ``0.1.0``. Remember that" +#~ " the build arguments as well as " +#~ "the name and tag can be adapted" +#~ " to your needs. These values serve" +#~ " as examples only." +#~ msgstr "" +#~ "이미지의 이름은 ``flwr_superlink``이고 태그는 " +#~ "``0.1.0``입니다. 필요에 따라 빌드 argument들 뿐만 " +#~ "아니라 이름과 태그도 정할 수 있습니다. 이 값들은" +#~ " 예시일 뿐입니다." -#: ../../source/tutorial-quickstart-xgboost.rst:590 -msgid "" -"The full `source code `_ for this example can be found in :code:`examples" -"/xgboost-quickstart`." -msgstr "" +#~ msgid "Edge Client Engine" +#~ msgstr "엣지 클라이언트 엔진" -#: ../../source/tutorial-quickstart-xgboost.rst:594 -msgid "Comprehensive Federated XGBoost" -msgstr "" +#~ msgid "" +#~ "`Flower `_ core framework " +#~ "architecture with Edge Client Engine" +#~ msgstr "`Flower `_의 핵심 프레임워크 아키텍처와 엣지 클라이언트 엔진" -#: ../../source/tutorial-quickstart-xgboost.rst:596 -msgid "" -"Now that you have known how federated XGBoost work with Flower, it's time" -" to run some more comprehensive experiments by customising the " -"experimental settings. In the xgboost-comprehensive example (`full code " -"`_), we provide more options to define various experimental" -" setups, including aggregation strategies, data partitioning and " -"centralised/distributed evaluation. We also support :doc:`Flower " -"simulation ` making it easy to simulate large " -"client cohorts in a resource-aware manner. Let's take a look!" -msgstr "" +#~ msgid "Virtual Client Engine" +#~ msgstr "가상 클라이언트 엔진" -#: ../../source/tutorial-quickstart-xgboost.rst:603 -msgid "Cyclic training" -msgstr "" +#~ msgid "" +#~ "`Flower `_ core framework " +#~ "architecture with Virtual Client Engine" +#~ msgstr "`Flower `_의 핵심 프레임워크 아키텍처와 가상 클라이언트 엔진" -#: ../../source/tutorial-quickstart-xgboost.rst:605 -msgid "" -"In addition to bagging aggregation, we offer a cyclic training scheme, " -"which performs FL in a client-by-client fashion. Instead of aggregating " -"multiple clients, there is only one single client participating in the " -"training per round in the cyclic training scenario. The trained local " -"XGBoost trees will be passed to the next client as an initialised model " -"for next round's boosting." -msgstr "" +#~ msgid "Virtual Client Engine and Edge Client Engine in the same workload" +#~ msgstr "동일 작업에서 가상 클라이언트 엔진과 엣지 클라이언트 엔진" -#: ../../source/tutorial-quickstart-xgboost.rst:609 -msgid "" -"To do this, we first customise a :code:`ClientManager` in " -":code:`server_utils.py`:" -msgstr "" +#~ msgid "" +#~ "`Flower `_ core framework " +#~ "architecture with both Virtual Client " +#~ "Engine and Edge Client Engine" +#~ msgstr "`Flower `_의 핵심 프레임워크 아키텍처와 가상 및 엣지 클라이언트 엔진" -#: ../../source/tutorial-quickstart-xgboost.rst:649 -msgid "" -"The customised :code:`ClientManager` samples all available clients in " -"each FL round based on the order of connection to the server. Then, we " -"define a new strategy :code:`FedXgbCyclic` in " -":code:`flwr.server.strategy.fedxgb_cyclic.py`, in order to sequentially " -"select only one client in given round and pass the received model to next" -" client." -msgstr "" +#~ msgid "Clone the flower repository." +#~ msgstr "Flower 레포지토리를 복제합니다." -#: ../../source/tutorial-quickstart-xgboost.rst:690 -msgid "" -"Unlike the original :code:`FedAvg`, we don't perform aggregation here. " -"Instead, we just make a copy of the received client model as global model" -" by overriding :code:`aggregate_fit`." -msgstr "" +#~ msgid "" +#~ "Please follow the first section on " +#~ ":doc:`Run Flower using Docker ` which " +#~ "covers this step in more detail." +#~ msgstr "" +#~ ":doc:Run Flower using Docker 의 첫 번째 섹션을" +#~ " 따라 주십시오. 해당 부분을 더 자세히 설명해 " +#~ "줍니다." -#: ../../source/tutorial-quickstart-xgboost.rst:693 -msgid "" -"Also, the customised :code:`configure_fit` and :code:`configure_evaluate`" -" methods ensure the clients to be sequentially selected given FL round:" -msgstr "" +#~ msgid "``22.04``" +#~ msgstr "``22.04``" -#: ../../source/tutorial-quickstart-xgboost.rst:757 -msgid "Customised data partitioning" -msgstr "" +#~ msgid "``23.0.1``" +#~ msgstr "``23.0.1``" -#: ../../source/tutorial-quickstart-xgboost.rst:759 -msgid "" -"In :code:`dataset.py`, we have a function :code:`instantiate_partitioner`" -" to instantiate the data partitioner based on the given " -":code:`num_partitions` and :code:`partitioner_type`. Currently, we " -"provide four supported partitioner type to simulate the uniformity/non-" -"uniformity in data quantity (uniform, linear, square, exponential)." -msgstr "" +#~ msgid "``69.0.2``" +#~ msgstr "``69.0.2``" -#: ../../source/tutorial-quickstart-xgboost.rst:790 -msgid "Customised centralised/distributed evaluation" -msgstr "" +#~ msgid "``1.8.0``" +#~ msgstr "``1.8.0``" -#: ../../source/tutorial-quickstart-xgboost.rst:792 -msgid "" -"To facilitate centralised evaluation, we define a function in " -":code:`server_utils.py`:" -msgstr "" +#~ msgid "Building the SuperLink/SuperNode or ServerApp image" +#~ msgstr "SuperLink/SuperNode 또는 ServerApp 이미지 빌드" -#: ../../source/tutorial-quickstart-xgboost.rst:824 -msgid "" -"This function returns a evaluation function which instantiates a " -":code:`Booster` object and loads the global model weights to it. The " -"evaluation is conducted by calling :code:`eval_set()` method, and the " -"tested AUC value is reported." -msgstr "" +#~ msgid "``1.8.0-py3.10-ubuntu22.04``" +#~ msgstr "``1.8.0-py3.10-ubuntu22.04``" -#: ../../source/tutorial-quickstart-xgboost.rst:827 -msgid "" -"As for distributed evaluation on the clients, it's same as the quick-" -"start example by overriding the :code:`evaluate()` method insides the " -":code:`XgbClient` class in :code:`client_utils.py`." -msgstr "" +#~ msgid "" +#~ "The following example creates a " +#~ "SuperLink/SuperNode or ServerApp image with" +#~ " the official Flower base image:" +#~ msgstr "다음 예시에서는 공식 Flower 기본 이미지로 SuperLink/SuperNode 또는 ServerApp이미지를 만듭니다:" -#: ../../source/tutorial-quickstart-xgboost.rst:831 -msgid "Flower simulation" -msgstr "" +#~ msgid "Trigger the CI for building the Docker images." +#~ msgstr "Docker 이미지 빌드를 위해 CI를 트리거합니다." -#: ../../source/tutorial-quickstart-xgboost.rst:832 -msgid "" -"We also provide an example code (:code:`sim.py`) to use the simulation " -"capabilities of Flower to simulate federated XGBoost training on either a" -" single machine or a cluster of machines." -msgstr "" +#~ msgid "" +#~ "To trigger the workflow, a collaborator" +#~ " must create a ``workflow_dispatch`` event" +#~ " in the GitHub CI. This can be" +#~ " done either through the UI or " +#~ "via the GitHub CLI. The event " +#~ "requires only one input, the Flower " +#~ "version, to be released." +#~ msgstr "" +#~ "워크플로우를 트리거하려면 공동 작업자가 GitHub CI에서 " +#~ "``workflow_dispatch``를 생성해야 합니다. 이 작업은 " +#~ "UI 또는 GitHub CLI 를 통해 수행할 수" +#~ " 있습니다. 이벤트는 Flower 버전 한 가지 입력만" +#~ " 필요합니다." -#: ../../source/tutorial-quickstart-xgboost.rst:866 -msgid "" -"After importing all required packages, we define a :code:`main()` " -"function to perform the simulation process:" -msgstr "" +#~ msgid "**Via the UI**" +#~ msgstr "**UI를 통해서**" -#: ../../source/tutorial-quickstart-xgboost.rst:921 -msgid "" -"We first load the dataset and perform data partitioning, and the pre-" -"processed data is stored in a :code:`list`. After the simulation begins, " -"the clients won't need to pre-process their partitions again." -msgstr "" +#~ msgid "" +#~ "Go to the ``Build docker images`` " +#~ "workflow `page " +#~ "`_." +#~ msgstr "" +#~ "``Build docker images`` 워크플로우 `페이지 " +#~ "`_로 이동합니다." + +#~ msgid "" +#~ "Click on the ``Run workflow`` button " +#~ "and type the new version of Flower" +#~ " in the ``Version of Flower`` input" +#~ " field." +#~ msgstr "``Run workflow`` 버튼을 누르고 ``Version of Flower``에 Flower의 새버전을 입력합니다." -#: ../../source/tutorial-quickstart-xgboost.rst:924 -msgid "Then, we define the strategies and other hyper-parameters:" -msgstr "" +#~ msgid "Click on the **green** ``Run workflow`` button." +#~ msgstr "**초록색**의 ``Run workflow``버튼을 클릭합니다." -#: ../../source/tutorial-quickstart-xgboost.rst:975 -msgid "" -"After that, we start the simulation by calling " -":code:`fl.simulation.start_simulation`:" -msgstr "" +#~ msgid "**Via the GitHub CI**" +#~ msgstr "**GitHub CI를 통해서**" -#: ../../source/tutorial-quickstart-xgboost.rst:995 -msgid "" -"One of key parameters for :code:`start_simulation` is :code:`client_fn` " -"which returns a function to construct a client. We define it as follows:" -msgstr "" +#~ msgid "" +#~ "Make sure you are logged in via" +#~ " ``gh auth login`` and that the " +#~ "current working directory is the root" +#~ " of the Flower repository." +#~ msgstr "``gh auth login``을 통해 로그인 했는지, 현재 작업 디렉토리가 Flower 리포지토리의 root인지 확인하세요." -#: ../../source/tutorial-quickstart-xgboost.rst:1038 -msgid "Arguments parser" -msgstr "" +#~ msgid "" +#~ "Trigger the workflow via ``gh workflow" +#~ " run docker-images.yml -f flwr-" +#~ "version=``." +#~ msgstr "" +#~ "``gh workflow run docker-images.yml -f" +#~ " flwr-version=``을 통해 워크플로우 를" +#~ " 트리거합니다." -#: ../../source/tutorial-quickstart-xgboost.rst:1040 -msgid "" -"In :code:`utils.py`, we define the arguments parsers for clients, server " -"and simulation, allowing users to specify different experimental " -"settings. Let's first see the sever side:" -msgstr "" +#~ msgid "Example: JAX - Run JAX Federated" +#~ msgstr "예시: JAX - JAX Federated 실행" -#: ../../source/tutorial-quickstart-xgboost.rst:1086 -msgid "" -"This allows user to specify training strategies / the number of total " -"clients / FL rounds / participating clients / clients for evaluation, and" -" evaluation fashion. Note that with :code:`--centralised-eval`, the sever" -" will do centralised evaluation and all functionalities for client " -"evaluation will be disabled." -msgstr "" +#~ msgid "" +#~ "The simplest way to get started " +#~ "with Flower is by using the " +#~ "pre-made Docker images, which you can" +#~ " find on `Docker Hub " +#~ "`__. Supported " +#~ "architectures include ``amd64`` and " +#~ "``arm64v8``." +#~ msgstr "" +#~ "Flower를 시작하는 가장 간단한 방법은 `Docker " +#~ "Hub `__에서 찾을 수 " +#~ "있는 미리 만들어진 Docker 이미지를 사용하는 것입니다." +#~ " 지원되는 아키텍처는 ``amd64`` 및 ``arm64v8``입니다." -#: ../../source/tutorial-quickstart-xgboost.rst:1090 -msgid "Then, the argument parser on client side:" -msgstr "" +#~ msgid "" +#~ "If you do not see the version " +#~ "of Docker but instead get an error" +#~ " saying that the command was not " +#~ "found, you will need to install " +#~ "Docker first. You can find installation" +#~ " instruction `here `_." +#~ msgstr "" +#~ "전이 표시되지 않고 대신 명령을 찾을 수 없다는" +#~ " 오류가 표시되는 경우 먼저 Docker를 설치해야 " +#~ "합니다. `여기 `_에서" +#~ " 설치 지침을 찾을 수 있습니다." -#: ../../source/tutorial-quickstart-xgboost.rst:1144 -msgid "" -"This defines various options for client data partitioning. Besides, " -"clients also have an option to conduct evaluation on centralised test set" -" by setting :code:`--centralised-eval`, as well as an option to perform " -"scaled learning rate based on the number of clients by setting :code" -":`--scaled-lr`." -msgstr "" +#~ msgid "" +#~ "On Linux, Docker commands require " +#~ "``sudo`` privilege. If you want to " +#~ "avoid using ``sudo``, you can follow " +#~ "the `Post-installation steps " +#~ "`_" +#~ " on the official Docker website." +#~ msgstr "" +#~ "Linux에서 Docker 명령을 실행하려면 ``sudo`` 권한이" +#~ " 필요합니다. ``sudo`` 를 사용하지 않으려면 공식 " +#~ "Docker 웹사이트의 `Post-installation steps " +#~ "`_를" +#~ " 따르세요." -#: ../../source/tutorial-quickstart-xgboost.rst:1148 -msgid "We also have an argument parser for simulation:" -msgstr "" +#~ msgid "" +#~ "To ensure optimal performance and " +#~ "compatibility, the SuperLink, SuperNode and" +#~ " ServerApp image must have the same" +#~ " version when running together. This " +#~ "guarantees seamless integration and avoids " +#~ "potential conflicts or issues that may" +#~ " arise from using different versions." +#~ msgstr "" +#~ "최적의 성능과 호환성을 보장하려면 SuperLink, SuperNode" +#~ " 및 ServerApp 이미지를 함께 실행할 때 버전이" +#~ " 동일해야 합니다. 이렇게 하면 원활한 통합을 보장하고" +#~ " 서로 다른 버전을 사용할 때 발생할 수 있는" +#~ " 잠재적인 충돌이나 문제를 방지할 수 있습니다." -#: ../../source/tutorial-quickstart-xgboost.rst:1226 -msgid "This integrates all arguments for both client and server sides." -msgstr "" +#~ msgid "Flower SuperLink" +#~ msgstr "Flower SuperLink" -#: ../../source/tutorial-quickstart-xgboost.rst:1229 -msgid "Example commands" -msgstr "" +#~ msgid "If you're looking to try out Flower, you can use the following command:" +#~ msgstr "Flower를 사용해보고 싶다면 다음 명령을 사용하면 됩니다:" -#: ../../source/tutorial-quickstart-xgboost.rst:1231 -msgid "" -"To run a centralised evaluated experiment with bagging strategy on 5 " -"clients with exponential distribution for 50 rounds, we first start the " -"server as below:" -msgstr "" +#~ msgid "" +#~ "The command pulls the Docker image " +#~ "with the tag ``1.8.0`` from Docker " +#~ "Hub. The tag specifies the Flower " +#~ "version. In this case, Flower 1.8.0. " +#~ "The ``--rm`` flag tells Docker to " +#~ "remove the container after it exits." +#~ msgstr "" +#~ "이 명령은 Docker Hub에서 ``1.8.0`` 태그가 " +#~ "있는 Docker 이미지를 가져옵니다. 이 태그는 Flower" +#~ " 버전을 지정합니다. 이 경우, Flower 1.8.0입니다." +#~ " '`--rm`` 플래그는 컨테이너가 종료된 후 컨테이너를 " +#~ "제거하도록 Docker에 지시합니다." -#: ../../source/tutorial-quickstart-xgboost.rst:1238 -msgid "Then, on each client terminal, we start the clients:" -msgstr "" +#~ msgid "" +#~ "The ``-p :`` flag tells " +#~ "Docker to map the ports " +#~ "``9091``/``9092`` of the host to " +#~ "``9091``/``9092`` of the container, allowing" +#~ " you to access the Driver API " +#~ "on ``http://localhost:9091`` and the Fleet " +#~ "API on ``http://localhost:9092``. Lastly, any" +#~ " flag that comes after the tag " +#~ "is passed to the Flower SuperLink. " +#~ "Here, we are passing the flag " +#~ "``--insecure``." +#~ msgstr "" +#~ "``-p :`` 플래그는 호스트의 포트 " +#~ "``9091``/``9092``를 컨테이너의 ``9091``/``9092``에 매핑하여 " +#~ "``http://localhost:9091``의 드라이버 API와 " +#~ "``http://localhost:9092``의 Fleet API에 액세스할 수" +#~ " 있도록 Docker에 지시합니다. 마지막으로, 태그 뒤에 " +#~ "오는 모든 플래그는 Flower SuperLink에 전달됩니다. " +#~ "여기서는 ``--insecure``플래그를 전달합니다." -#: ../../source/tutorial-quickstart-xgboost.rst:1244 -msgid "To run the same experiment with Flower simulation:" -msgstr "" +#~ msgid "" +#~ "The ``--insecure`` flag enables insecure " +#~ "communication (using HTTP, not HTTPS) " +#~ "and should only be used for " +#~ "testing purposes. We strongly recommend " +#~ "enabling `SSL `__ when " +#~ "deploying to a production environment." +#~ msgstr "" +#~ "``--insecure`` 플래그는 안전하지 않은 통신(HTTPS가 아닌" +#~ " HTTP 사용)을 활성화하며 테스트 목적으로만 사용해야 " +#~ "합니다. 프로덕션 환경에 배포할 때는 `SSL " +#~ "`__을 활성화할 것을 강력히 권장합니다." -#: ../../source/tutorial-quickstart-xgboost.rst:1250 -msgid "" -"The full `code `_ for this comprehensive example can be found in" -" :code:`examples/xgboost-comprehensive`." -msgstr "" +#~ msgid "" +#~ "You can use ``--help`` to view all" +#~ " available flags that the SuperLink " +#~ "supports:" +#~ msgstr "'`--help``을 사용하면 SuperLink가 지원하는 모든 플래그를 볼 수 있습니다:" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:9 -msgid "Build a strategy from scratch" -msgstr "" +#~ msgid "Mounting a volume to store the state on the host system" +#~ msgstr "호스트 시스템에 상태를 저장할 볼륨 마운트하기" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:11 -msgid "" -"Welcome to the third part of the Flower federated learning tutorial. In " -"previous parts of this tutorial, we introduced federated learning with " -"PyTorch and Flower (`part 1 `__) and we learned how strategies " -"can be used to customize the execution on both the server and the clients" -" (`part 2 `__)." -msgstr "" +#~ msgid "" +#~ "If you want to persist the state" +#~ " of the SuperLink on your host " +#~ "system, all you need to do is " +#~ "specify a directory where you want " +#~ "to save the file on your host " +#~ "system and a name for the database" +#~ " file. By default, the SuperLink " +#~ "container runs with a non-root " +#~ "user called ``app`` with the user " +#~ "ID ``49999``. It is recommended to " +#~ "create new directory and change the " +#~ "user ID of the directory to " +#~ "``49999`` to ensure the mounted " +#~ "directory has the proper permissions. If" +#~ " you later want to delete the " +#~ "directory, you can change the user " +#~ "ID back to the current user ID " +#~ "by running ``sudo chown -R $USER:$(id" +#~ " -gn) state``." +#~ msgstr "" +#~ "호스트 시스템에서 SuperLink의 상태를 유지하려면 호스트 " +#~ "시스템에서 파일을 저장할 디렉터리와 데이터베이스 파일의 이름을" +#~ " 지정하기만 하면 됩니다. 기본적으로 SuperLink 컨테이너는" +#~ " 사용자 ID가 ``49999``인 ``app``이라는 루트가 아닌" +#~ " 사용자로 실행됩니다. 마운트된 디렉터리에 적절한 권한이 " +#~ "있는지 확인하려면 새 디렉터리를 생성하고 디렉터리의 사용자" +#~ " ID를 ``49999``로 변경하는 것이 좋습니다. 나중에 " +#~ "디렉터리를 삭제하려면 ``sudo chown -R $USER:$(id" +#~ " -gn) state``를 실행하여 사용자 ID를 현재 " +#~ "사용자 ID로 다시 변경할 수 있습니다." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:13 -msgid "" -"In this notebook, we'll continue to customize the federated learning " -"system we built previously by creating a custom version of FedAvg (again," -" using `Flower `__ and `PyTorch " -"`__)." -msgstr "" +#~ msgid "" +#~ "Assuming all files we need are in" +#~ " the local ``certificates`` directory, we" +#~ " can use the flag ``--volume`` to " +#~ "mount the local directory into the " +#~ "``/app/certificates/`` directory of the " +#~ "container. This allows the SuperLink to" +#~ " access the files within the " +#~ "container. The ``ro`` stands for " +#~ "``read-only``. Docker volumes default to" +#~ " ``read-write``; that option tells " +#~ "Docker to make the volume ``read-" +#~ "only`` instead. Finally, we pass the " +#~ "names of the certificates and key " +#~ "file to the SuperLink with the " +#~ "``--ssl-ca-certfile``, ``--ssl-certfile`` " +#~ "and ``--ssl-keyfile`` flag." +#~ msgstr "" +#~ "필요한 모든 파일이 로컬``certificates`` 디렉터리에 있다고" +#~ " 가정하면, ``--volume``플래그를 사용하여 로컬 디렉터리를 " +#~ "컨테이너의 ``/app/certificates/`` 디렉터리에 마운트할 수 " +#~ "있습니다. 이렇게 하면 SuperLink 가 컨테이너 내의" +#~ " 파일에 액세스할 수 있습니다. ``ro``는 ``read-" +#~ "only``을 의미합니다. Docker 볼륨은 기본적으로 " +#~ "``read-write``로 설정되어 있는데, 이 옵션을 사용하면" +#~ " 볼륨을 ``read-only``으로 만들 수 있습니다. " +#~ "마지막으로 인증서 및 키 파일의 이름을 ``--ssl-" +#~ "ca-certfile``, ``--ssl-certfile`` 및 " +#~ "``--ssl-keyfile`` 플래그와 함께 SuperLink에 전달합니다." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:15 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:16 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:15 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:15 -msgid "" -"`Star Flower on GitHub `__ ⭐️ and join " -"the Flower community on Slack to connect, ask questions, and get help: " -"`Join Slack `__ 🌼 We'd love to hear from " -"you in the ``#introductions`` channel! And if anything is unclear, head " -"over to the ``#questions`` channel." -msgstr "" +#~ msgid "" +#~ "The SuperNode Docker image comes with" +#~ " a pre-installed version of Flower" +#~ " and serves as a base for " +#~ "building your own SuperNode image." +#~ msgstr "" +#~ "SuperNode Docker 이미지는 Flower의 사전 설치된 " +#~ "버전과 함께 제공되며, 자체 SuperNode 이미지를 " +#~ "구축하기 위한 기반 역할을 합니다." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:17 -msgid "Let's build a new ``Strategy`` from scratch!" -msgstr "" +#~ msgid "" +#~ "We will use the ``quickstart-pytorch``" +#~ " example, which you can find in " +#~ "the Flower repository, to illustrate how" +#~ " you can dockerize your ClientApp." +#~ msgstr "" +#~ "Flower 레포지토리에서 찾을 수 있는 ``quickstart-" +#~ "pytorch`` 예제를 사용하여 ClientApp을 도커라이즈하는 " +#~ "방법을 설명하겠습니다." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:29 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:29 -msgid "Preparation" -msgstr "" +#~ msgid "" +#~ "Before we can start, we need to" +#~ " meet a few prerequisites in our " +#~ "local development environment. You can " +#~ "skip the first part if you want" +#~ " to run your ClientApp instead of " +#~ "the ``quickstart-pytorch`` example." +#~ msgstr "" +#~ "시작하기 전에 로컬 개발 환경에서 몇 가지 전제" +#~ " 조건을 충족해야 합니다. 'quickstart-pytorch' " +#~ "예제 대신 ClientApp을 실행하려는 경우 첫 번째 " +#~ "부분을 건너뛸 수 있습니다." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:31 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:32 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:31 -msgid "" -"Before we begin with the actual code, let's make sure that we have " -"everything we need." -msgstr "" +#~ msgid "Let's assume the following project layout:" +#~ msgstr "다음과 같은 프로젝트 레이아웃을 가정해 보겠습니다:" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:43 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:44 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:43 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:43 -msgid "Installing dependencies" -msgstr "" +#~ msgid "" +#~ "First, we need to create a " +#~ "``requirements.txt`` file in the directory " +#~ "where the ``ClientApp`` code is located." +#~ " In the file, we list all the" +#~ " dependencies that the ClientApp requires." +#~ msgstr "" +#~ "먼저 ``ClientApp`` 코드가 있는 디렉토리에 " +#~ "``requirements.txt`` 파일을 만들어야 합니다. 이 " +#~ "파일에는 클라이언트 앱에 필요한 모든 의존성을 나열합니다." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:45 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:46 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:45 -msgid "First, we install the necessary packages:" -msgstr "" +#~ msgid "" +#~ "Note that `flwr `__" +#~ " is already installed in the " +#~ "``flwr/supernode`` base image, so you " +#~ "only need to include other package " +#~ "dependencies in your ``requirements.txt``, " +#~ "such as ``torch``, ``tensorflow``, etc." +#~ msgstr "" +#~ "`flwr `__ 는 이미 " +#~ "``flwr/supernode`` 기본 이미지에 설치되어 있으므로, " +#~ "``torch``, ``tensorflow`` 등과 같은 다른 패키지" +#~ " dependencies만 ``requirements.txt``에 포함시키면 됩니다." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:65 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:66 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:65 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:65 -msgid "" -"Now that we have all dependencies installed, we can import everything we " -"need for this tutorial:" -msgstr "" +#~ msgid "" +#~ "Next, we create a Dockerfile. If " +#~ "you use the ``quickstart-pytorch`` " +#~ "example, create a new file called " +#~ "``Dockerfile.supernode`` in ``examples/quickstart-" +#~ "pytorch``." +#~ msgstr "" +#~ "다음으로, Dockerfile을 생성합니다.``quickstart-pytorch`` " +#~ "예제를 사용하는 경우 ``examples/quickstart-pytorch``에" +#~ " ``Dockerfile.supernode``라는 새 파일을 생성합니다." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:101 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:102 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:101 -msgid "" -"It is possible to switch to a runtime that has GPU acceleration enabled " -"(on Google Colab: ``Runtime > Change runtime type > Hardware acclerator: " -"GPU > Save``). Note, however, that Google Colab is not always able to " -"offer GPU acceleration. If you see an error related to GPU availability " -"in one of the following sections, consider switching back to CPU-based " -"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " -"has GPU acceleration enabled, you should see the output ``Training on " -"cuda``, otherwise it'll say ``Training on cpu``." -msgstr "" +#~ msgid "" +#~ "The ``Dockerfile.supernode`` contains the " +#~ "instructions that assemble the SuperNode " +#~ "image." +#~ msgstr "``Dockerfile.supernode``에는 SuperNode 이미지를 조립하는 지침이 포함되어 있습니다." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:114 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:115 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:114 -msgid "Data loading" -msgstr "" +#~ msgid "" +#~ "In the first two lines, we " +#~ "instruct Docker to use the SuperNode " +#~ "image tagged ``nightly`` as a base " +#~ "image and set our working directory " +#~ "to ``/app``. The following instructions " +#~ "will now be executed in the " +#~ "``/app`` directory. Next, we install the" +#~ " ClientApp dependencies by copying the " +#~ "``requirements.txt`` file into the image " +#~ "and run ``pip install``. In the " +#~ "last two lines, we copy the " +#~ "``client.py`` module into the image and" +#~ " set the entry point to ``flower-" +#~ "client-app`` with the argument " +#~ "``client:app``. The argument is the " +#~ "object reference of the ClientApp " +#~ "(``:``) that will be run" +#~ " inside the ClientApp." +#~ msgstr "" +#~ "처음 두 줄에서는 ``nightly`` 태그가 붙은 " +#~ "SuperNode 이미지를 기본 이미지로 사용하고 작업 " +#~ "디렉터리를 ``/app``로 설정하도록 Docker에 지시합니다. 이제" +#~ " ``/app`` 디렉토리에서 다음 명령이 실행됩니다. 다음으로," +#~ " ``requirements.txt`` 파일을 이미지에 복사하여 " +#~ "ClientApp dependencies 요소를 설치하고 ``pip " +#~ "install``을 실행합니다. 마지막 두 줄에서 " +#~ "``client.py`` 모듈을 이미지에 복사하고 ``client:app`` " +#~ "인수를 사용하여 진입점을 ``flower-client-app``로 " +#~ "설정합니다. 인수는 클라이언트앱 내부에서 실행될 클라이언트앱의 " +#~ "객체 참조 (``:``) 입니다." + +#~ msgid "Building the SuperNode Docker image" +#~ msgstr "SuperNode Docker 이미지 빌드" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:116 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:116 -msgid "" -"Let's now load the CIFAR-10 training and test set, partition them into " -"ten smaller datasets (each split into training and validation set), and " -"wrap everything in their own ``DataLoader``. We introduce a new parameter" -" ``num_clients`` which allows us to call ``load_datasets`` with different" -" numbers of clients." -msgstr "" +#~ msgid "" +#~ "We gave the image the name " +#~ "``flwr_supernode``, and the tag ``0.0.1``. " +#~ "Remember that the here chosen values " +#~ "only serve as an example. You can" +#~ " change them to your needs." +#~ msgstr "" +#~ "이미지에 ``flwr_supernode``라는 이름을 붙이고 ``0.0.1``" +#~ " 태그를 붙였습니다. 여기서 선택한 값은 예시일 뿐이라는" +#~ " 점을 기억하세요. 필요에 따라 변경할 수 있습니다." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:167 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:168 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:167 -msgid "Model training/evaluation" -msgstr "" +#~ msgid "Running the SuperNode Docker image" +#~ msgstr "SuperNode Docker 이미지 실행" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:169 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:170 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:169 -msgid "" -"Let's continue with the usual model definition (including " -"``set_parameters`` and ``get_parameters``), training and test functions:" -msgstr "" +#~ msgid "Now that we have built the SuperNode image, we can finally run it." +#~ msgstr "이제 SuperNode 이미지를 빌드했으니 이제 실행할 수 있습니다." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:258 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:258 -msgid "Flower client" -msgstr "" +#~ msgid "Let's break down each part of this command:" +#~ msgstr "이 명령의 각 부분을 자세히 살펴보겠습니다:" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:260 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:260 -msgid "" -"To implement the Flower client, we (again) create a subclass of " -"``flwr.client.NumPyClient`` and implement the three methods " -"``get_parameters``, ``fit``, and ``evaluate``. Here, we also pass the " -"``cid`` to the client and use it log additional details:" -msgstr "" +#~ msgid "" +#~ "``--rm``: This option specifies that the" +#~ " container should be automatically removed" +#~ " when it stops." +#~ msgstr "``--rm``: 이 옵션은 컨테이너가 중지될 때 자동으로 제거되도록 지정합니다." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:308 -msgid "Let's test what we have so far before we continue:" -msgstr "" +#~ msgid "``--insecure``: This option enables insecure communication." +#~ msgstr "``--insecure``: 이 옵션은 보안되지 않은 통신을 활성화합니다." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:339 -msgid "Build a Strategy from scratch" -msgstr "" +#~ msgid "" +#~ "``--superlink 192.168.1.100:9092``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Fleet" +#~ msgstr "``--superlink 192.168.1.100:9092``: 이 옵션은 SuperLinks Fleet의 주소를 지정합니다" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:341 -msgid "" -"Let’s overwrite the ``configure_fit`` method such that it passes a higher" -" learning rate (potentially also other hyperparameters) to the optimizer " -"of a fraction of the clients. We will keep the sampling of the clients as" -" it is in ``FedAvg`` and then change the configuration dictionary (one of" -" the ``FitIns`` attributes)." -msgstr "" +#~ msgid "API to connect to. Remember to update it with your SuperLink IP." +#~ msgstr "API에 연결할 수 있습니다. SuperLink IP로 업데이트하는 것을 잊지 마세요." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:507 -msgid "" -"The only thing left is to use the newly created custom Strategy " -"``FedCustom`` when starting the experiment:" -msgstr "" +#~ msgid "" +#~ "To test running Flower locally, you " +#~ "can create a `bridge network " +#~ "`__, use the ``--network`` argument" +#~ " and pass the name of the " +#~ "Docker network to run your SuperNodes." +#~ msgstr "" +#~ "로컬에서 Flower를 실행하는 것을 테스트하려면 `bridge " +#~ "network `__를 생성하고 ``--network`` argument를 " +#~ "사용하고 SuperNodes를 실행할 Docker 네트워크의 이름을" +#~ " 전달하면 됩니다." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:534 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:932 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:697 -msgid "Recap" -msgstr "" +#~ msgid "" +#~ "Any argument that comes after the " +#~ "tag is passed to the Flower " +#~ "SuperNode binary. To see all available" +#~ " flags that the SuperNode supports, " +#~ "run:" +#~ msgstr "" +#~ "태그 뒤에 오는 모든 argument는 Flower " +#~ "SuperNode 바이너리에 전달됩니다. SuperNode가 지원하는 " +#~ "사용 가능한 모든 플래그를 보려면 실행하세요:" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:536 -msgid "" -"In this notebook, we’ve seen how to implement a custom strategy. A custom" -" strategy enables granular control over client node configuration, result" -" aggregation, and more. To define a custom strategy, you only have to " -"overwrite the abstract methods of the (abstract) base class ``Strategy``." -" To make custom strategies even more powerful, you can pass custom " -"functions to the constructor of your new class (``__init__``) and then " -"call these functions whenever needed." -msgstr "" +#~ msgid "" +#~ "To enable SSL, we will need to " +#~ "mount a PEM-encoded root certificate " +#~ "into your SuperNode container." +#~ msgstr "SSL을 사용하려면 PEM 인코딩된 루트 인증서를 SuperNode 컨테이너에 마운트해야 합니다." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:550 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:948 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:729 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:715 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:369 -msgid "" -"Before you continue, make sure to join the Flower community on Slack: " -"`Join Slack `__" -msgstr "" +#~ msgid "" +#~ "Similar to the SuperNode image, the " +#~ "ServerApp Docker image comes with a " +#~ "pre-installed version of Flower and " +#~ "serves as a base for building your" +#~ " own ServerApp image." +#~ msgstr "" +#~ "SuperNode 이미지와 마찬가지로 ServerApp Docker " +#~ "이미지는 Flower의 사전 설치된 버전과 함께 제공되며," +#~ " 자체 ServerApp 이미지를 구축하기 위한 기본 " +#~ "역할을 합니다." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:552 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:950 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:731 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:717 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:371 -msgid "" -"There's a dedicated ``#questions`` channel if you need help, but we'd " -"also love to hear who you are in ``#introductions``!" -msgstr "" +#~ msgid "" +#~ "We will use the same ``quickstart-" +#~ "pytorch`` example as we do in the" +#~ " Flower SuperNode section. If you " +#~ "have not already done so, please " +#~ "follow the `SuperNode Prerequisites`_ before" +#~ " proceeding." +#~ msgstr "" +#~ "여기서는 Flower SuperNode 섹션에서와 동일한`quickstart-" +#~ "pytorch`` 예제를 사용하겠습니다. 아직 수행하지 않았다면 " +#~ "계속 진행하기 전에 `SuperNode Prerequisites`_ 을" +#~ " 따르세요." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:554 -msgid "" -"The `Flower Federated Learning Tutorial - Part 4 " -"`__ introduces ``Client``, the flexible API underlying " -"``NumPyClient``." -msgstr "" +#~ msgid "Creating a ServerApp Dockerfile" +#~ msgstr "ServerApp Dockerfile 만들기" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:9 -msgid "Customize the client" -msgstr "" +#~ msgid "" +#~ "First, we need to create a " +#~ "Dockerfile in the directory where the" +#~ " ``ServerApp`` code is located. If " +#~ "you use the ``quickstart-pytorch`` " +#~ "example, create a new file called " +#~ "``Dockerfile.serverapp`` in ``examples/quickstart-" +#~ "pytorch``." +#~ msgstr "" +#~ "먼저, ``ServerApp`` 코드가 있는 디렉토리에 Docker파일을" +#~ " 생성해야 합니다. ``quickstart-pytorch`` 예제를 " +#~ "사용하는 경우 ``examples/quickstart-pytorch``에 " +#~ "``Dockerfile.serverapp``이라는 새 파일을 생성합니다." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:11 -msgid "" -"Welcome to the fourth part of the Flower federated learning tutorial. In " -"the previous parts of this tutorial, we introduced federated learning " -"with PyTorch and Flower (`part 1 `__), we learned how " -"strategies can be used to customize the execution on both the server and " -"the clients (`part 2 `__), and we built our own " -"custom strategy from scratch (`part 3 `__)." -msgstr "" +#~ msgid "" +#~ "The ``Dockerfile.serverapp`` contains the " +#~ "instructions that assemble the ServerApp " +#~ "image." +#~ msgstr "``Dockerfile.serverapp``에는 ServerApp 이미지를 합치는 지침이 포함되어 있습니다." + +#~ msgid "" +#~ "In the first two lines, we " +#~ "instruct Docker to use the ServerApp " +#~ "image tagged ``1.8.0`` as a base " +#~ "image and set our working directory " +#~ "to ``/app``. The following instructions " +#~ "will now be executed in the " +#~ "``/app`` directory. In the last two " +#~ "lines, we copy the ``server.py`` module" +#~ " into the image and set the " +#~ "entry point to ``flower-server-app`` " +#~ "with the argument ``server:app``. The " +#~ "argument is the object reference of " +#~ "the ServerApp (``:``) that " +#~ "will be run inside the ServerApp " +#~ "container." +#~ msgstr "" +#~ "처음 두 줄에서는 ``1.8.0`` 태그가 붙은 " +#~ "ServerApp 이미지를 기본 이미지로 사용하고 작업 " +#~ "디렉터리를 ``/app``로 설정하도록 Docker에 지시합니다. 이제" +#~ " ``/app`` 디렉토리에서 다음 명령이 실행됩니다. 마지막" +#~ " 두 줄에서는 ``server.py`` 모듈을 이미지에 복사하고" +#~ " ``server:app`` argument를 사용하여 진입점을 " +#~ "``flower-server-app``로 설정합니다. 인수는 ServerApp" +#~ " 컨테이너 내에서 실행될 ServerApp의 객체 " +#~ "참조(``:``)입니다." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:14 -msgid "" -"In this notebook, we revisit ``NumPyClient`` and introduce a new " -"baseclass for building clients, simply named ``Client``. In previous " -"parts of this tutorial, we've based our client on ``NumPyClient``, a " -"convenience class which makes it easy to work with machine learning " -"libraries that have good NumPy interoperability. With ``Client``, we gain" -" a lot of flexibility that we didn't have before, but we'll also have to " -"do a few things the we didn't have to do before." -msgstr "" +#~ msgid "Building the ServerApp Docker image" +#~ msgstr "ServerApp Docker 이미지 빌드" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:18 -msgid "" -"Let's go deeper and see what it takes to move from ``NumPyClient`` to " -"``Client``!" -msgstr "" +#~ msgid "Running the ServerApp Docker image" +#~ msgstr "ServerApp Docker 이미지 실행" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:30 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:29 -msgid "Step 0: Preparation" -msgstr "" +#~ msgid "Now that we have built the ServerApp image, we can finally run it." +#~ msgstr "이제 ServerApp 이미지를 빌드했으니 이제 실행할 수 있습니다." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:117 -msgid "" -"Let's now load the CIFAR-10 training and test set, partition them into " -"ten smaller datasets (each split into training and validation set), and " -"wrap everything in their own ``DataLoader``." -msgstr "" +#~ msgid "" +#~ "``--superlink 192.168.1.100:9091``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Driver" +#~ msgstr "``--superlink 192.168.1.100:9091``: 이 옵션은 SuperLinks 드라이버의 주소를 지정합니다" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:259 -msgid "Step 1: Revisiting NumPyClient" -msgstr "" +#~ msgid "" +#~ "To test running Flower locally, you " +#~ "can create a `bridge network " +#~ "`__, use the ``--network`` argument" +#~ " and pass the name of the " +#~ "Docker network to run your ServerApps." +#~ msgstr "" +#~ "로컬에서 Flower를 실행하는 것을 테스트하려면 `bridge " +#~ "network `__,를 생성하고 ``--network`` argument를 " +#~ "사용하여 ServerApp을 실행할 Docker 네트워크의 이름을 " +#~ "전달하면 됩니다." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:261 -msgid "" -"So far, we've implemented our client by subclassing " -"``flwr.client.NumPyClient``. The three methods we implemented are " -"``get_parameters``, ``fit``, and ``evaluate``. Finally, we wrap the " -"creation of instances of this class in a function called ``client_fn``:" -msgstr "" +#~ msgid "" +#~ "Any argument that comes after the " +#~ "tag is passed to the Flower " +#~ "ServerApp binary. To see all available" +#~ " flags that the ServerApp supports, " +#~ "run:" +#~ msgstr "" +#~ "태그 뒤에 오는 모든 argument는 Flower " +#~ "ServerApp 바이너리에 전달됩니다. ServerApp에서 지원하는 " +#~ "사용 가능한 모든 플래그를 보려면 실행하세요:" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:309 -msgid "" -"We've seen this before, there's nothing new so far. The only *tiny* " -"difference compared to the previous notebook is naming, we've changed " -"``FlowerClient`` to ``FlowerNumPyClient`` and ``client_fn`` to " -"``numpyclient_fn``. Let's run it to see the output we get:" -msgstr "" +#~ msgid "" +#~ "To enable SSL, we will need to " +#~ "mount a PEM-encoded root certificate " +#~ "into your ServerApp container." +#~ msgstr "SSL을 사용하려면 PEM 인코딩된 루트 인증서를 ServerApp 컨테이너에 마운트해야 합니다." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:339 -msgid "" -"This works as expected, two clients are training for three rounds of " -"federated learning." -msgstr "" +#~ msgid "" +#~ "Assuming the certificate already exists " +#~ "locally, we can use the flag " +#~ "``--volume`` to mount the local " +#~ "certificate into the container's ``/app/`` " +#~ "directory. This allows the ServerApp to" +#~ " access the certificate within the " +#~ "container. Use the ``--root-certificates`` " +#~ "flags when starting the container." +#~ msgstr "" +#~ "인증서가 이미 로컬에 존재한다고 가정하면, ``--volume`` " +#~ "플래그를 사용하여 로컬 인증서를 컨테이너의 ``/app/`` " +#~ "디렉터리에 마운트할 수 있습니다. 이렇게 하면 " +#~ "ServerApp이 컨테이너 내의 인증서에 액세스할 수 " +#~ "있습니다. 컨테이너를 시작할 때 ``--root-" +#~ "certificates`` 플래그를 사용하세요." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:341 -msgid "" -"Let's dive a little bit deeper and discuss how Flower executes this " -"simulation. Whenever a client is selected to do some work, " -"``start_simulation`` calls the function ``numpyclient_fn`` to create an " -"instance of our ``FlowerNumPyClient`` (along with loading the model and " -"the data)." -msgstr "" +#~ msgid ":py:obj:`run_client_app `\\ \\(\\)" +#~ msgstr ":py:obj:`run_client_app `\\ \\(\\)" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:343 -msgid "" -"But here's the perhaps surprising part: Flower doesn't actually use the " -"``FlowerNumPyClient`` object directly. Instead, it wraps the object to " -"makes it look like a subclass of ``flwr.client.Client``, not " -"``flwr.client.NumPyClient``. In fact, the Flower core framework doesn't " -"know how to handle ``NumPyClient``'s, it only knows how to handle " -"``Client``'s. ``NumPyClient`` is just a convenience abstraction built on " -"top of ``Client``." -msgstr "" +#~ msgid ":py:obj:`run_supernode `\\ \\(\\)" +#~ msgstr ":py:obj:`run_supernode `\\ \\(\\)" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:345 -msgid "" -"Instead of building on top of ``NumPyClient``, we can directly build on " -"top of ``Client``." -msgstr "" +#~ msgid "d defaults to None." +#~ msgstr "d는 기본값이 None입니다." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:357 -msgid "Step 2: Moving from ``NumPyClient`` to ``Client``" -msgstr "" +#~ msgid "Update R from dict/iterable E and F." +#~ msgstr "dict/iterable E 및 F에서 R을 업데이트합니다." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:359 -msgid "" -"Let's try to do the same thing using ``Client`` instead of " -"``NumPyClient``." -msgstr "" +#~ msgid "" +#~ ":py:obj:`RUN_DRIVER_API_ENTER " +#~ "`\\" +#~ msgstr "" +#~ ":py:obj:`RUN_DRIVER_API_ENTER " +#~ "`\\" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:465 -msgid "" -"Before we discuss the code in more detail, let's try to run it! Gotta " -"make sure our new ``Client``-based client works, right?" -msgstr "" +#~ msgid "" +#~ ":py:obj:`RUN_DRIVER_API_LEAVE " +#~ "`\\" +#~ msgstr "" +#~ ":py:obj:`RUN_DRIVER_API_LEAVE " +#~ "`\\" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:490 -msgid "" -"That's it, we're now using ``Client``. It probably looks similar to what " -"we've done with ``NumPyClient``. So what's the difference?" -msgstr "" +#~ msgid "" +#~ ":py:obj:`RUN_FLEET_API_ENTER " +#~ "`\\" +#~ msgstr "" +#~ ":py:obj:`RUN_FLEET_API_ENTER " +#~ "`\\" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:492 -msgid "" -"First of all, it's more code. But why? The difference comes from the fact" -" that ``Client`` expects us to take care of parameter serialization and " -"deserialization. For Flower to be able to send parameters over the " -"network, it eventually needs to turn these parameters into ``bytes``. " -"Turning parameters (e.g., NumPy ``ndarray``'s) into raw bytes is called " -"serialization. Turning raw bytes into something more useful (like NumPy " -"``ndarray``'s) is called deserialization. Flower needs to do both: it " -"needs to serialize parameters on the server-side and send them to the " -"client, the client needs to deserialize them to use them for local " -"training, and then serialize the updated parameters again to send them " -"back to the server, which (finally!) deserializes them again in order to " -"aggregate them with the updates received from other clients." -msgstr "" +#~ msgid "" +#~ ":py:obj:`RUN_FLEET_API_LEAVE " +#~ "`\\" +#~ msgstr "" +#~ ":py:obj:`RUN_FLEET_API_LEAVE " +#~ "`\\" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:495 -msgid "" -"The only *real* difference between Client and NumPyClient is that " -"NumPyClient takes care of serialization and deserialization for you. It " -"can do so because it expects you to return parameters as NumPy ndarray's," -" and it knows how to handle these. This makes working with machine " -"learning libraries that have good NumPy support (most of them) a breeze." -msgstr "" +#~ msgid ":py:obj:`DRIVER_CONNECT `\\" +#~ msgstr ":py:obj:`DRIVER_CONNECT `\\" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:497 -msgid "" -"In terms of API, there's one major difference: all methods in Client take" -" exactly one argument (e.g., ``FitIns`` in ``Client.fit``) and return " -"exactly one value (e.g., ``FitRes`` in ``Client.fit``). The methods in " -"``NumPyClient`` on the other hand have multiple arguments (e.g., " -"``parameters`` and ``config`` in ``NumPyClient.fit``) and multiple return" -" values (e.g., ``parameters``, ``num_example``, and ``metrics`` in " -"``NumPyClient.fit``) if there are multiple things to handle. These " -"``*Ins`` and ``*Res`` objects in ``Client`` wrap all the individual " -"values you're used to from ``NumPyClient``." -msgstr "" +#~ msgid ":py:obj:`DRIVER_DISCONNECT `\\" +#~ msgstr ":py:obj:`DRIVER_DISCONNECT `\\" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:510 -msgid "Step 3: Custom serialization" -msgstr "" +#~ msgid "" +#~ ":py:obj:`START_DRIVER_ENTER " +#~ "`\\" +#~ msgstr "" +#~ ":py:obj:`START_DRIVER_ENTER " +#~ "`\\" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:512 -msgid "" -"Here we will explore how to implement custom serialization with a simple " -"example." -msgstr "" +#~ msgid "" +#~ ":py:obj:`START_DRIVER_LEAVE " +#~ "`\\" +#~ msgstr "" +#~ ":py:obj:`START_DRIVER_LEAVE " +#~ "`\\" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:514 -msgid "" -"But first what is serialization? Serialization is just the process of " -"converting an object into raw bytes, and equally as important, " -"deserialization is the process of converting raw bytes back into an " -"object. This is very useful for network communication. Indeed, without " -"serialization, you could not just a Python object through the internet." -msgstr "" +#~ msgid "" +#~ "An identifier that can be used " +#~ "when loading a particular data partition" +#~ " for a ClientApp. Making use of " +#~ "this identifier is more relevant when" +#~ " conducting simulations." +#~ msgstr "" +#~ "클라이언트 앱의 특정 데이터 파티션을 로드할 때 " +#~ "사용할 수 있는 식별자입니다. 시뮬레이션을 수행할 때 " +#~ "이 식별자를 사용하는 것이 더 적절합니다." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:516 -msgid "" -"Federated Learning relies heavily on internet communication for training " -"by sending Python objects back and forth between the clients and the " -"server. This means that serialization is an essential part of Federated " -"Learning." -msgstr "" +#~ msgid ":py:obj:`partition_id `\\" +#~ msgstr ":py:obj:`partition_id `\\" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:518 -msgid "" -"In the following section, we will write a basic example where instead of " -"sending a serialized version of our ``ndarray``\\ s containing our " -"parameters, we will first convert the ``ndarray`` into sparse matrices, " -"before sending them. This technique can be used to save bandwidth, as in " -"certain cases where the weights of a model are sparse (containing many 0 " -"entries), converting them to a sparse matrix can greatly improve their " -"bytesize." -msgstr "" +#~ msgid "An identifier telling which data partition a ClientApp should use." +#~ msgstr "클라이언트앱이 사용해야 하는 데이터 파티션을 알려주는 식별자입니다." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:521 -msgid "Our custom serialization/deserialization functions" -msgstr "" +#~ msgid ":py:obj:`run_server_app `\\ \\(\\)" +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:523 -msgid "" -"This is where the real serialization/deserialization will happen, " -"especially in ``ndarray_to_sparse_bytes`` for serialization and " -"``sparse_bytes_to_ndarray`` for deserialization." -msgstr "" +#~ msgid ":py:obj:`run_superlink `\\ \\(\\)" +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:525 -msgid "" -"Note that we imported the ``scipy.sparse`` library in order to convert " -"our arrays." -msgstr "" +#~ msgid "Run Flower SuperLink (Driver API and Fleet API)." +#~ msgstr "Flower SuperLink(Driver API 및 Fleet API)를 실행하세요." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:613 -msgid "Client-side" -msgstr "" +#~ msgid "" +#~ ":py:obj:`LegacyContext `\\ " +#~ "\\(state\\[\\, config\\, strategy\\, ...\\]\\)" +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:615 -msgid "" -"To be able to serialize our ``ndarray``\\ s into sparse parameters, we " -"will just have to call our custom functions in our " -"``flwr.client.Client``." -msgstr "" +#~ msgid ":py:obj:`flwr.server.strategy `\\" +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:617 -msgid "" -"Indeed, in ``get_parameters`` we need to serialize the parameters we got " -"from our network using our custom ``ndarrays_to_sparse_parameters`` " -"defined above." -msgstr "" +#~ msgid ":py:obj:`flwr.server.workflow `\\" +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:619 -msgid "" -"In ``fit``, we first need to deserialize the parameters coming from the " -"server using our custom ``sparse_parameters_to_ndarrays`` and then we " -"need to serialize our local results with " -"``ndarrays_to_sparse_parameters``." -msgstr "" +#~ msgid "run\\_driver\\_api" +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:621 -msgid "" -"In ``evaluate``, we will only need to deserialize the global parameters " -"with our custom function." -msgstr "" +#~ msgid "run\\_fleet\\_api" +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:725 -msgid "Server-side" -msgstr "" +#~ msgid "" +#~ "The protocol involves four main stages:" +#~ " - 'setup': Send SecAgg+ configuration " +#~ "to clients and collect their public " +#~ "keys. - 'share keys': Broadcast public" +#~ " keys among clients and collect " +#~ "encrypted secret" +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:727 -msgid "" -"For this example, we will just use ``FedAvg`` as a strategy. To change " -"the serialization and deserialization here, we only need to reimplement " -"the ``evaluate`` and ``aggregate_fit`` functions of ``FedAvg``. The other" -" functions of the strategy will be inherited from the super class " -"``FedAvg``." -msgstr "" +#~ msgid "key shares." +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:729 -msgid "As you can see only one line as change in ``evaluate``:" -msgstr "" +#~ msgid "" +#~ "The protocol involves four main stages:" +#~ " - 'setup': Send SecAgg configuration " +#~ "to clients and collect their public " +#~ "keys. - 'share keys': Broadcast public" +#~ " keys among clients and collect " +#~ "encrypted secret" +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:735 -msgid "" -"And for ``aggregate_fit``, we will first deserialize every result we " -"received:" -msgstr "" +#~ msgid "" +#~ ":py:obj:`start_simulation `\\" +#~ " \\(\\*\\, client\\_fn\\[\\, ...\\]\\)" +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:744 -msgid "And then serialize the aggregated result:" -msgstr "" +#~ msgid "" +#~ "'A dictionary, e.g {\"\": , " +#~ "\"\": } to configure a " +#~ "backend. Values supported in are" +#~ " those included by " +#~ "`flwr.common.typing.ConfigsRecordValues`." +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:903 -msgid "We can now run our custom serialization example!" -msgstr "" +#~ msgid "" +#~ "When diabled, only INFO, WARNING and " +#~ "ERROR log messages will be shown. " +#~ "If enabled, DEBUG-level logs will " +#~ "be displayed." +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:934 -msgid "" -"In this part of the tutorial, we've seen how we can build clients by " -"subclassing either ``NumPyClient`` or ``Client``. ``NumPyClient`` is a " -"convenience abstraction that makes it easier to work with machine " -"learning libraries that have good NumPy interoperability. ``Client`` is a" -" more flexible abstraction that allows us to do things that are not " -"possible in ``NumPyClient``. In order to do so, it requires us to handle " -"parameter serialization and deserialization ourselves." -msgstr "" +#~ msgid "" +#~ "A function creating client instances. " +#~ "The function must take a single " +#~ "`str` argument called `cid`. It should" +#~ " return a single client instance of" +#~ " type Client. Note that the created" +#~ " client instances are ephemeral and " +#~ "will often be destroyed after a " +#~ "single method invocation. Since client " +#~ "instances are not long-lived, they " +#~ "should not attempt to carry state " +#~ "over method invocations. Any state " +#~ "required by the instance (model, " +#~ "dataset, hyperparameters, ...) should be " +#~ "(re-)created in either the call to " +#~ "`client_fn` or the call to any of" +#~ " the client methods (e.g., load " +#~ "evaluation data in the `evaluate` method" +#~ " itself)." +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:952 -msgid "" -"This is the final part of the Flower tutorial (for now!), " -"congratulations! You're now well equipped to understand the rest of the " -"documentation. There are many topics we didn't cover in the tutorial, we " -"recommend the following resources:" -msgstr "" +#~ msgid "" +#~ "The total number of clients in " +#~ "this simulation. This must be set " +#~ "if `clients_ids` is not set and " +#~ "vice-versa." +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:954 -msgid "`Read Flower Docs `__" -msgstr "" +#~ msgid "" +#~ "List `client_id`s for each client. This" +#~ " is only required if `num_clients` is" +#~ " not set. Setting both `num_clients` " +#~ "and `clients_ids` with `len(clients_ids)` not" +#~ " equal to `num_clients` generates an " +#~ "error." +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:955 -msgid "" -"`Check out Flower Code Examples " -"`__" -msgstr "" +#~ msgid "" +#~ "In this tutorial we will learn how" +#~ " to train a Convolutional Neural " +#~ "Network on CIFAR10 using Flower and " +#~ "PyTorch." +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:956 -msgid "" -"`Use Flower Baselines for your research " -"`__" -msgstr "" +#~ msgid "" +#~ "*Clients* are responsible for generating " +#~ "individual weight-updates for the model" +#~ " based on their local datasets. These" +#~ " updates are then sent to the " +#~ "*server* which will aggregate them to" +#~ " produce a better model. Finally, the" +#~ " *server* sends this improved version " +#~ "of the model back to each " +#~ "*client*. A complete cycle of weight " +#~ "updates is called a *round*." +#~ msgstr "" + +#~ msgid "" +#~ "Now that we have a rough idea " +#~ "of what is going on, let's get " +#~ "started. We first need to install " +#~ "Flower. You can do this by running" +#~ " :" +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:957 -msgid "" -"`Watch Flower Summit 2023 videos `__" -msgstr "" +#~ msgid "" +#~ "Since we want to use PyTorch to" +#~ " solve a computer vision task, let's" +#~ " go ahead and install PyTorch and " +#~ "the **torchvision** library:" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:9 -msgid "Get started with Flower" -msgstr "" +#~ msgid "" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. Our training " +#~ "procedure and network architecture are " +#~ "based on PyTorch's `Deep Learning with" +#~ " PyTorch " +#~ "`_." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:11 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:11 -msgid "Welcome to the Flower federated learning tutorial!" -msgstr "" +#~ msgid "" +#~ "In a file called :code:`client.py`, " +#~ "import Flower and PyTorch related " +#~ "packages:" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:13 -msgid "" -"In this notebook, we'll build a federated learning system using Flower, " -"`Flower Datasets `__ and PyTorch. In " -"part 1, we use PyTorch for the model training pipeline and data loading. " -"In part 2, we continue to federate the PyTorch-based pipeline using " -"Flower." -msgstr "" +#~ msgid "In addition, we define the device allocation in PyTorch with:" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:17 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:19 -msgid "Let's get started!" -msgstr "" +#~ msgid "" +#~ "We use PyTorch to load CIFAR10, a" +#~ " popular colored image classification " +#~ "dataset for machine learning. The " +#~ "PyTorch :code:`DataLoader()` downloads the " +#~ "training and test data that are " +#~ "then normalized." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:31 -msgid "" -"Before we begin with any actual code, let's make sure that we have " -"everything we need." -msgstr "" +#~ msgid "" +#~ "Define the loss and optimizer with " +#~ "PyTorch. The training of the dataset " +#~ "is done by looping over the " +#~ "dataset, measure the corresponding loss " +#~ "and optimize it." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:45 -msgid "" -"Next, we install the necessary packages for PyTorch (``torch`` and " -"``torchvision``), Flower Datasets (``flwr-datasets``) and Flower " -"(``flwr``):" -msgstr "" +#~ msgid "" +#~ "Define then the validation of the " +#~ "machine learning network. We loop over" +#~ " the test set and measure the " +#~ "loss and accuracy of the test set." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:105 -msgid "" -"It is possible to switch to a runtime that has GPU acceleration enabled " -"(on Google Colab: ``Runtime > Change runtime type > Hardware accelerator:" -" GPU > Save``). Note, however, that Google Colab is not always able to " -"offer GPU acceleration. If you see an error related to GPU availability " -"in one of the following sections, consider switching back to CPU-based " -"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " -"has GPU acceleration enabled, you should see the output ``Training on " -"cuda``, otherwise it'll say ``Training on cpu``." -msgstr "" +#~ msgid "" +#~ "After defining the training and testing" +#~ " of a PyTorch machine learning model," +#~ " we use the functions for the " +#~ "Flower clients." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:118 -msgid "Loading the data" -msgstr "" +#~ msgid "" +#~ "The Flower clients will use a " +#~ "simple CNN adapted from 'PyTorch: A " +#~ "60 Minute Blitz':" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:120 -msgid "" -"Federated learning can be applied to many different types of tasks across" -" different domains. In this tutorial, we introduce federated learning by " -"training a simple convolutional neural network (CNN) on the popular " -"CIFAR-10 dataset. CIFAR-10 can be used to train image classifiers that " -"distinguish between images from ten different classes: 'airplane', " -"'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', and " -"'truck'." -msgstr "" +#~ msgid "" +#~ "After loading the data set with " +#~ ":code:`load_data()` we define the Flower " +#~ "interface." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:131 -msgid "" -"We simulate having multiple datasets from multiple organizations (also " -"called the \"cross-silo\" setting in federated learning) by splitting the" -" original CIFAR-10 dataset into multiple partitions. Each partition will " -"represent the data from a single organization. We're doing this purely " -"for experimentation purposes, in the real world there's no need for data " -"splitting because each organization already has their own data (so the " -"data is naturally partitioned)." -msgstr "" +#~ msgid "" +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses " +#~ "PyTorch. Implementing :code:`NumPyClient` usually" +#~ " means defining the following methods " +#~ "(:code:`set_parameters` is optional though):" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:133 -msgid "" -"Each organization will act as a client in the federated learning system. " -"So having ten organizations participate in a federation means having ten " -"clients connected to the federated learning server." -msgstr "" +#~ msgid "receive the updated local model weights" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:144 -msgid "" -"Let's now create the Federated Dataset abstraction that from ``flwr-" -"datasets`` that partitions the CIFAR-10. We will create small training " -"and test set for each edge device and wrap each of them into a PyTorch " -"``DataLoader``:" -msgstr "" +#~ msgid "which can be implemented in the following way:" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:198 -msgid "" -"We now have a list of ten training sets and ten validation sets " -"(``trainloaders`` and ``valloaders``) representing the data of ten " -"different organizations. Each ``trainloader``/``valloader`` pair contains" -" 4000 training examples and 1000 validation examples. There's also a " -"single ``testloader`` (we did not split the test set). Again, this is " -"only necessary for building research or educational systems, actual " -"federated learning systems have their data naturally distributed across " -"multiple partitions." -msgstr "" +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this example can " +#~ "be found in :code:`examples/quickstart-" +#~ "pytorch`." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:201 -msgid "" -"Let's take a look at the first batch of images and labels in the first " -"training set (i.e., ``trainloaders[0]``) before we move on:" -msgstr "" +#~ msgid "" +#~ "In this example, we split the " +#~ "dataset into two partitions with uniform" +#~ " distribution (:code:`IidPartitioner(num_partitions=2)`). " +#~ "Then, we load the partition for " +#~ "the given client based on " +#~ ":code:`node_id`:" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:240 -msgid "" -"The output above shows a random batch of images from the first " -"``trainloader`` in our list of ten ``trainloaders``. It also prints the " -"labels associated with each image (i.e., one of the ten possible labels " -"we've seen above). If you run the cell again, you should see another " -"batch of images." -msgstr "" +#~ msgid "" +#~ "The :code:`self.bst` is used to keep " +#~ "the Booster objects that remain " +#~ "consistent across rounds, allowing them " +#~ "to store predictions from trees " +#~ "integrated in earlier rounds and " +#~ "maintain other essential data structures " +#~ "for training." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:252 -msgid "Step 1: Centralized Training with PyTorch" -msgstr "" +#~ msgid "" +#~ "In :code:`fit`, at the first round, " +#~ "we call :code:`xgb.train()` to build up" +#~ " the first set of trees. the " +#~ "returned Booster object and config are" +#~ " stored in :code:`self.bst` and " +#~ ":code:`self.config`, respectively. From the " +#~ "second round, we load the global " +#~ "model sent from server to " +#~ ":code:`self.bst`, and then update model " +#~ "weights on local training data with " +#~ "function :code:`local_boost` as follows:" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:263 -msgid "" -"Next, we're going to use PyTorch to define a simple convolutional neural " -"network. This introduction assumes basic familiarity with PyTorch, so it " -"doesn't cover the PyTorch-related aspects in full detail. If you want to " -"dive deeper into PyTorch, we recommend `DEEP LEARNING WITH PYTORCH: A 60 " -"MINUTE BLITZ " -"`__." -msgstr "" +#~ msgid "" +#~ "Given :code:`num_local_round`, we update trees" +#~ " by calling :code:`self.bst.update` method. " +#~ "After training, the last " +#~ ":code:`N=num_local_round` trees will be " +#~ "extracted to send to the server." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:275 -msgid "Defining the model" -msgstr "" +#~ msgid "" +#~ "In :code:`evaluate`, we call " +#~ ":code:`self.bst.eval_set` function to conduct " +#~ "evaluation on valid set. The AUC " +#~ "value will be returned." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:277 -msgid "" -"We use the simple CNN described in the `PyTorch tutorial " -"`__:" -msgstr "" +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client`and call" +#~ " :code:`fl.client.start_client()`. The string " +#~ ":code:`\"[::]:8080\"` tells the client which" +#~ " server to connect to. In our " +#~ "case we can run the server and " +#~ "the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:314 -msgid "Let's continue with the usual training and test functions:" -msgstr "" +#~ msgid "" +#~ "We use two clients for this " +#~ "example. An :code:`evaluate_metrics_aggregation` " +#~ "function is defined to collect and " +#~ "wighted average the AUC values from " +#~ "clients." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:374 -msgid "Training the model" -msgstr "" +#~ msgid "" +#~ "Welcome to the third part of the" +#~ " Flower federated learning tutorial. In " +#~ "previous parts of this tutorial, we " +#~ "introduced federated learning with PyTorch " +#~ "and Flower (`part 1 " +#~ "`__) and we " +#~ "learned how strategies can be used " +#~ "to customize the execution on both " +#~ "the server and the clients (`part " +#~ "2 `__)." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:376 -msgid "" -"We now have all the basic building blocks we need: a dataset, a model, a " -"training function, and a test function. Let's put them together to train " -"the model on the dataset of one of our organizations " -"(``trainloaders[0]``). This simulates the reality of most machine " -"learning projects today: each organization has their own data and trains " -"models only on this internal data:" -msgstr "" +#~ msgid "" +#~ "In this notebook, we'll continue to " +#~ "customize the federated learning system " +#~ "we built previously by creating a " +#~ "custom version of FedAvg (again, using" +#~ " `Flower `__ and `PyTorch " +#~ "`__)." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:406 -msgid "" -"Training the simple CNN on our CIFAR-10 split for 5 epochs should result " -"in a test set accuracy of about 41%, which is not good, but at the same " -"time, it doesn't really matter for the purposes of this tutorial. The " -"intent was just to show a simplistic centralized training pipeline that " -"sets the stage for what comes next - federated learning!" -msgstr "" +#~ msgid "" +#~ "`Star Flower on GitHub " +#~ "`__ ⭐️ and join " +#~ "the Flower community on Slack to " +#~ "connect, ask questions, and get help:" +#~ " `Join Slack `__" +#~ " 🌼 We'd love to hear from you" +#~ " in the ``#introductions`` channel! And " +#~ "if anything is unclear, head over " +#~ "to the ``#questions`` channel." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:418 -msgid "Step 2: Federated Learning with Flower" -msgstr "" +#~ msgid "Let's build a new ``Strategy`` from scratch!" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:420 -msgid "" -"Step 1 demonstrated a simple centralized training pipeline. All data was " -"in one place (i.e., a single ``trainloader`` and a single ``valloader``)." -" Next, we'll simulate a situation where we have multiple datasets in " -"multiple organizations and where we train a model over these " -"organizations using federated learning." -msgstr "" +#~ msgid "" +#~ "Let's now load the CIFAR-10 training " +#~ "and test set, partition them into " +#~ "ten smaller datasets (each split into" +#~ " training and validation set), and " +#~ "wrap everything in their own " +#~ "``DataLoader``. We introduce a new " +#~ "parameter ``num_clients`` which allows us " +#~ "to call ``load_datasets`` with different " +#~ "numbers of clients." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:432 -msgid "Updating model parameters" -msgstr "" +#~ msgid "" +#~ "To implement the Flower client, we " +#~ "(again) create a subclass of " +#~ "``flwr.client.NumPyClient`` and implement the " +#~ "three methods ``get_parameters``, ``fit``, and" +#~ " ``evaluate``. Here, we also pass the" +#~ " ``cid`` to the client and use " +#~ "it log additional details:" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:434 -msgid "" -"In federated learning, the server sends the global model parameters to " -"the client, and the client updates the local model with the parameters " -"received from the server. It then trains the model on the local data " -"(which changes the model parameters locally) and sends the " -"updated/changed model parameters back to the server (or, alternatively, " -"it sends just the gradients back to the server, not the full model " -"parameters)." -msgstr "" +#~ msgid "" +#~ "Let's go deeper and see what it" +#~ " takes to move from ``NumPyClient`` " +#~ "to ``Client``!" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:436 -msgid "" -"We need two helper functions to update the local model with parameters " -"received from the server and to get the updated model parameters from the" -" local model: ``set_parameters`` and ``get_parameters``. The following " -"two functions do just that for the PyTorch model above." -msgstr "" +#~ msgid "" +#~ "So far, we've implemented our client " +#~ "by subclassing ``flwr.client.NumPyClient``. The " +#~ "three methods we implemented are " +#~ "``get_parameters``, ``fit``, and ``evaluate``. " +#~ "Finally, we wrap the creation of " +#~ "instances of this class in a " +#~ "function called ``client_fn``:" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:438 -msgid "" -"The details of how this works are not really important here (feel free to" -" consult the PyTorch documentation if you want to learn more). In " -"essence, we use ``state_dict`` to access PyTorch model parameter tensors." -" The parameter tensors are then converted to/from a list of NumPy " -"ndarray's (which Flower knows how to serialize/deserialize):" -msgstr "" +#~ msgid "" +#~ "We've seen this before, there's nothing" +#~ " new so far. The only *tiny* " +#~ "difference compared to the previous " +#~ "notebook is naming, we've changed " +#~ "``FlowerClient`` to ``FlowerNumPyClient`` and " +#~ "``client_fn`` to ``numpyclient_fn``. Let's run" +#~ " it to see the output we get:" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:466 -msgid "Implementing a Flower client" -msgstr "" +#~ msgid "" +#~ "This works as expected, two clients " +#~ "are training for three rounds of " +#~ "federated learning." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:468 -msgid "" -"With that out of the way, let's move on to the interesting part. " -"Federated learning systems consist of a server and multiple clients. In " -"Flower, we create clients by implementing subclasses of " -"``flwr.client.Client`` or ``flwr.client.NumPyClient``. We use " -"``NumPyClient`` in this tutorial because it is easier to implement and " -"requires us to write less boilerplate." -msgstr "" +#~ msgid "" +#~ "Let's dive a little bit deeper and" +#~ " discuss how Flower executes this " +#~ "simulation. Whenever a client is " +#~ "selected to do some work, " +#~ "``start_simulation`` calls the function " +#~ "``numpyclient_fn`` to create an instance " +#~ "of our ``FlowerNumPyClient`` (along with " +#~ "loading the model and the data)." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:470 -msgid "" -"To implement the Flower client, we create a subclass of " -"``flwr.client.NumPyClient`` and implement the three methods " -"``get_parameters``, ``fit``, and ``evaluate``:" -msgstr "" +#~ msgid "" +#~ "`Check out Flower Code Examples " +#~ "`__" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:472 -msgid "``get_parameters``: Return the current local model parameters" -msgstr "" +#~ msgid "" +#~ "`Watch Flower Summit 2023 videos " +#~ "`__" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:473 -msgid "" -"``fit``: Receive model parameters from the server, train the model " -"parameters on the local data, and return the (updated) model parameters " -"to the server" -msgstr "" +#~ msgid "" +#~ "In this notebook, we'll build a " +#~ "federated learning system using Flower, " +#~ "`Flower Datasets `__ " +#~ "and PyTorch. In part 1, we use " +#~ "PyTorch for the model training pipeline" +#~ " and data loading. In part 2, " +#~ "we continue to federate the PyTorch-" +#~ "based pipeline using Flower." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:474 -msgid "" -"``evaluate``: Receive model parameters from the server, evaluate the " -"model parameters on the local data, and return the evaluation result to " -"the server" -msgstr "" +#~ msgid "Loading the data" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:476 -msgid "" -"We mentioned that our clients will use the previously defined PyTorch " -"components for model training and evaluation. Let's see a simple Flower " -"client implementation that brings everything together:" -msgstr "" +#~ msgid "" +#~ "We simulate having multiple datasets " +#~ "from multiple organizations (also called " +#~ "the \"cross-silo\" setting in federated" +#~ " learning) by splitting the original " +#~ "CIFAR-10 dataset into multiple partitions. " +#~ "Each partition will represent the data" +#~ " from a single organization. We're " +#~ "doing this purely for experimentation " +#~ "purposes, in the real world there's " +#~ "no need for data splitting because " +#~ "each organization already has their own" +#~ " data (so the data is naturally " +#~ "partitioned)." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:513 -msgid "" -"Our class ``FlowerClient`` defines how local training/evaluation will be " -"performed and allows Flower to call the local training/evaluation through" -" ``fit`` and ``evaluate``. Each instance of ``FlowerClient`` represents a" -" *single client* in our federated learning system. Federated learning " -"systems have multiple clients (otherwise, there's not much to federate), " -"so each client will be represented by its own instance of " -"``FlowerClient``. If we have, for example, three clients in our workload," -" then we'd have three instances of ``FlowerClient``. Flower calls " -"``FlowerClient.fit`` on the respective instance when the server selects a" -" particular client for training (and ``FlowerClient.evaluate`` for " -"evaluation)." -msgstr "" +#~ msgid "" +#~ "Each organization will act as a " +#~ "client in the federated learning system." +#~ " So having ten organizations participate" +#~ " in a federation means having ten " +#~ "clients connected to the federated " +#~ "learning server." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:517 -msgid "Using the Virtual Client Engine" -msgstr "" +#~ msgid "" +#~ "Let's now create the Federated Dataset" +#~ " abstraction that from ``flwr-datasets``" +#~ " that partitions the CIFAR-10. We " +#~ "will create small training and test " +#~ "set for each edge device and wrap" +#~ " each of them into a PyTorch " +#~ "``DataLoader``:" +#~ msgstr "" + +#~ msgid "" +#~ "We now have a list of ten " +#~ "training sets and ten validation sets" +#~ " (``trainloaders`` and ``valloaders``) " +#~ "representing the data of ten different" +#~ " organizations. Each ``trainloader``/``valloader`` " +#~ "pair contains 4000 training examples and" +#~ " 1000 validation examples. There's also " +#~ "a single ``testloader`` (we did not " +#~ "split the test set). Again, this " +#~ "is only necessary for building research" +#~ " or educational systems, actual federated" +#~ " learning systems have their data " +#~ "naturally distributed across multiple " +#~ "partitions." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:519 -msgid "" -"In this notebook, we want to simulate a federated learning system with 10" -" clients on a single machine. This means that the server and all 10 " -"clients will live on a single machine and share resources such as CPU, " -"GPU, and memory. Having 10 clients would mean having 10 instances of " -"``FlowerClient`` in memory. Doing this on a single machine can quickly " -"exhaust the available memory resources, even if only a subset of these " -"clients participates in a single round of federated learning." -msgstr "" +#~ msgid "" +#~ "Let's take a look at the first " +#~ "batch of images and labels in the" +#~ " first training set (i.e., " +#~ "``trainloaders[0]``) before we move on:" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:521 -msgid "" -"In addition to the regular capabilities where server and clients run on " -"multiple machines, Flower, therefore, provides special simulation " -"capabilities that create ``FlowerClient`` instances only when they are " -"actually necessary for training or evaluation. To enable the Flower " -"framework to create clients when necessary, we need to implement a " -"function called ``client_fn`` that creates a ``FlowerClient`` instance on" -" demand. Flower calls ``client_fn`` whenever it needs an instance of one " -"particular client to call ``fit`` or ``evaluate`` (those instances are " -"usually discarded after use, so they should not keep any local state). " -"Clients are identified by a client ID, or short ``cid``. The ``cid`` can " -"be used, for example, to load different local data partitions for " -"different clients, as can be seen below:" -msgstr "" +#~ msgid "" +#~ "The output above shows a random " +#~ "batch of images from the first " +#~ "``trainloader`` in our list of ten " +#~ "``trainloaders``. It also prints the " +#~ "labels associated with each image (i.e.," +#~ " one of the ten possible labels " +#~ "we've seen above). If you run the" +#~ " cell again, you should see another" +#~ " batch of images." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:556 -msgid "Starting the training" -msgstr "" +#~ msgid "Defining the model" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:558 -msgid "" -"We now have the class ``FlowerClient`` which defines client-side " -"training/evaluation and ``client_fn`` which allows Flower to create " -"``FlowerClient`` instances whenever it needs to call ``fit`` or " -"``evaluate`` on one particular client. The last step is to start the " -"actual simulation using ``flwr.simulation.start_simulation``." -msgstr "" +#~ msgid "Training the model" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:560 -msgid "" -"The function ``start_simulation`` accepts a number of arguments, amongst " -"them the ``client_fn`` used to create ``FlowerClient`` instances, the " -"number of clients to simulate (``num_clients``), the number of federated " -"learning rounds (``num_rounds``), and the strategy. The strategy " -"encapsulates the federated learning approach/algorithm, for example, " -"*Federated Averaging* (FedAvg)." -msgstr "" +#~ msgid "" +#~ "We now have all the basic building" +#~ " blocks we need: a dataset, a " +#~ "model, a training function, and a " +#~ "test function. Let's put them together" +#~ " to train the model on the " +#~ "dataset of one of our organizations " +#~ "(``trainloaders[0]``). This simulates the " +#~ "reality of most machine learning " +#~ "projects today: each organization has " +#~ "their own data and trains models " +#~ "only on this internal data:" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:562 -msgid "" -"Flower has a number of built-in strategies, but we can also use our own " -"strategy implementations to customize nearly all aspects of the federated" -" learning approach. For this example, we use the built-in ``FedAvg`` " -"implementation and customize it using a few basic parameters. The last " -"step is the actual call to ``start_simulation`` which - you guessed it - " -"starts the simulation:" -msgstr "" +#~ msgid "" +#~ "Training the simple CNN on our " +#~ "CIFAR-10 split for 5 epochs should " +#~ "result in a test set accuracy of" +#~ " about 41%, which is not good, " +#~ "but at the same time, it doesn't" +#~ " really matter for the purposes of" +#~ " this tutorial. The intent was just" +#~ " to show a simplistic centralized " +#~ "training pipeline that sets the stage" +#~ " for what comes next - federated " +#~ "learning!" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:608 -msgid "Behind the scenes" -msgstr "" +#~ msgid "Updating model parameters" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:610 -msgid "So how does this work? How does Flower execute this simulation?" -msgstr "" +#~ msgid "" +#~ "In federated learning, the server sends" +#~ " the global model parameters to the" +#~ " client, and the client updates the" +#~ " local model with the parameters " +#~ "received from the server. It then " +#~ "trains the model on the local data" +#~ " (which changes the model parameters " +#~ "locally) and sends the updated/changed " +#~ "model parameters back to the server " +#~ "(or, alternatively, it sends just the" +#~ " gradients back to the server, not" +#~ " the full model parameters)." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:612 -#, python-format -msgid "" -"When we call ``start_simulation``, we tell Flower that there are 10 " -"clients (``num_clients=10``). Flower then goes ahead an asks the " -"``FedAvg`` strategy to select clients. ``FedAvg`` knows that it should " -"select 100% of the available clients (``fraction_fit=1.0``), so it goes " -"ahead and selects 10 random clients (i.e., 100% of 10)." -msgstr "" +#~ msgid "" +#~ "The details of how this works are" +#~ " not really important here (feel free" +#~ " to consult the PyTorch documentation " +#~ "if you want to learn more). In " +#~ "essence, we use ``state_dict`` to access" +#~ " PyTorch model parameter tensors. The " +#~ "parameter tensors are then converted " +#~ "to/from a list of NumPy ndarray's " +#~ "(which Flower knows how to " +#~ "serialize/deserialize):" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:614 -msgid "" -"Flower then asks the selected 10 clients to train the model. When the " -"server receives the model parameter updates from the clients, it hands " -"those updates over to the strategy (*FedAvg*) for aggregation. The " -"strategy aggregates those updates and returns the new global model, which" -" then gets used in the next round of federated learning." -msgstr "" +#~ msgid "Implementing a Flower client" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:626 -msgid "Where's the accuracy?" -msgstr "" +#~ msgid "" +#~ "With that out of the way, let's" +#~ " move on to the interesting part. " +#~ "Federated learning systems consist of a" +#~ " server and multiple clients. In " +#~ "Flower, we create clients by " +#~ "implementing subclasses of ``flwr.client.Client``" +#~ " or ``flwr.client.NumPyClient``. We use " +#~ "``NumPyClient`` in this tutorial because " +#~ "it is easier to implement and " +#~ "requires us to write less boilerplate." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:628 -msgid "" -"You may have noticed that all metrics except for ``losses_distributed`` " -"are empty. Where did the ``{\"accuracy\": float(accuracy)}`` go?" -msgstr "" +#~ msgid "" +#~ "To implement the Flower client, we " +#~ "create a subclass of " +#~ "``flwr.client.NumPyClient`` and implement the " +#~ "three methods ``get_parameters``, ``fit``, and" +#~ " ``evaluate``:" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:630 -msgid "" -"Flower can automatically aggregate losses returned by individual clients," -" but it cannot do the same for metrics in the generic metrics dictionary " -"(the one with the ``accuracy`` key). Metrics dictionaries can contain " -"very different kinds of metrics and even key/value pairs that are not " -"metrics at all, so the framework does not (and can not) know how to " -"handle these automatically." -msgstr "" +#~ msgid "" +#~ "``fit``: Receive model parameters from " +#~ "the server, train the model parameters" +#~ " on the local data, and return " +#~ "the (updated) model parameters to the" +#~ " server" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:632 -msgid "" -"As users, we need to tell the framework how to handle/aggregate these " -"custom metrics, and we do so by passing metric aggregation functions to " -"the strategy. The strategy will then call these functions whenever it " -"receives fit or evaluate metrics from clients. The two possible functions" -" are ``fit_metrics_aggregation_fn`` and " -"``evaluate_metrics_aggregation_fn``." -msgstr "" +#~ msgid "" +#~ "``evaluate``: Receive model parameters from" +#~ " the server, evaluate the model " +#~ "parameters on the local data, and " +#~ "return the evaluation result to the " +#~ "server" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:634 -msgid "" -"Let's create a simple weighted averaging function to aggregate the " -"``accuracy`` metric we return from ``evaluate``:" -msgstr "" +#~ msgid "" +#~ "Our class ``FlowerClient`` defines how " +#~ "local training/evaluation will be performed" +#~ " and allows Flower to call the " +#~ "local training/evaluation through ``fit`` and" +#~ " ``evaluate``. Each instance of " +#~ "``FlowerClient`` represents a *single client*" +#~ " in our federated learning system. " +#~ "Federated learning systems have multiple " +#~ "clients (otherwise, there's not much to" +#~ " federate), so each client will be" +#~ " represented by its own instance of" +#~ " ``FlowerClient``. If we have, for " +#~ "example, three clients in our workload," +#~ " then we'd have three instances of" +#~ " ``FlowerClient``. Flower calls " +#~ "``FlowerClient.fit`` on the respective " +#~ "instance when the server selects a " +#~ "particular client for training (and " +#~ "``FlowerClient.evaluate`` for evaluation)." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:660 -msgid "" -"The only thing left to do is to tell the strategy to call this function " -"whenever it receives evaluation metric dictionaries from the clients:" -msgstr "" +#~ msgid "Using the Virtual Client Engine" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:697 -msgid "" -"We now have a full system that performs federated training and federated " -"evaluation. It uses the ``weighted_average`` function to aggregate custom" -" evaluation metrics and calculates a single ``accuracy`` metric across " -"all clients on the server side." -msgstr "" +#~ msgid "" +#~ "In this notebook, we want to " +#~ "simulate a federated learning system " +#~ "with 10 clients on a single " +#~ "machine. This means that the server " +#~ "and all 10 clients will live on" +#~ " a single machine and share resources" +#~ " such as CPU, GPU, and memory. " +#~ "Having 10 clients would mean having " +#~ "10 instances of ``FlowerClient`` in " +#~ "memory. Doing this on a single " +#~ "machine can quickly exhaust the " +#~ "available memory resources, even if only" +#~ " a subset of these clients " +#~ "participates in a single round of " +#~ "federated learning." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:699 -msgid "" -"The other two categories of metrics (``losses_centralized`` and " -"``metrics_centralized``) are still empty because they only apply when " -"centralized evaluation is being used. Part two of the Flower tutorial " -"will cover centralized evaluation." -msgstr "" +#~ msgid "" +#~ "In addition to the regular capabilities" +#~ " where server and clients run on " +#~ "multiple machines, Flower, therefore, provides" +#~ " special simulation capabilities that " +#~ "create ``FlowerClient`` instances only when" +#~ " they are actually necessary for " +#~ "training or evaluation. To enable the" +#~ " Flower framework to create clients " +#~ "when necessary, we need to implement " +#~ "a function called ``client_fn`` that " +#~ "creates a ``FlowerClient`` instance on " +#~ "demand. Flower calls ``client_fn`` whenever" +#~ " it needs an instance of one " +#~ "particular client to call ``fit`` or " +#~ "``evaluate`` (those instances are usually " +#~ "discarded after use, so they should " +#~ "not keep any local state). Clients " +#~ "are identified by a client ID, or" +#~ " short ``cid``. The ``cid`` can be" +#~ " used, for example, to load different" +#~ " local data partitions for different " +#~ "clients, as can be seen below:" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:711 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:351 -msgid "Final remarks" -msgstr "" +#~ msgid "Starting the training" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:713 -msgid "" -"Congratulations, you just trained a convolutional neural network, " -"federated over 10 clients! With that, you understand the basics of " -"federated learning with Flower. The same approach you've seen can be used" -" with other machine learning frameworks (not just PyTorch) and tasks (not" -" just CIFAR-10 images classification), for example NLP with Hugging Face " -"Transformers or speech with SpeechBrain." -msgstr "" +#~ msgid "" +#~ "We now have the class ``FlowerClient``" +#~ " which defines client-side " +#~ "training/evaluation and ``client_fn`` which " +#~ "allows Flower to create ``FlowerClient`` " +#~ "instances whenever it needs to call " +#~ "``fit`` or ``evaluate`` on one " +#~ "particular client. The last step is " +#~ "to start the actual simulation using " +#~ "``flwr.simulation.start_simulation``." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:715 -msgid "" -"In the next notebook, we're going to cover some more advanced concepts. " -"Want to customize your strategy? Initialize parameters on the server " -"side? Or evaluate the aggregated model on the server side? We'll cover " -"all this and more in the next tutorial." -msgstr "" +#~ msgid "" +#~ "The function ``start_simulation`` accepts a" +#~ " number of arguments, amongst them " +#~ "the ``client_fn`` used to create " +#~ "``FlowerClient`` instances, the number of " +#~ "clients to simulate (``num_clients``), the " +#~ "number of federated learning rounds " +#~ "(``num_rounds``), and the strategy. The " +#~ "strategy encapsulates the federated learning" +#~ " approach/algorithm, for example, *Federated " +#~ "Averaging* (FedAvg)." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:733 -msgid "" -"The `Flower Federated Learning Tutorial - Part 2 " -"`__ goes into more depth about strategies and all " -"the advanced things you can build with them." -msgstr "" +#~ msgid "" +#~ "Flower has a number of built-in" +#~ " strategies, but we can also use " +#~ "our own strategy implementations to " +#~ "customize nearly all aspects of the " +#~ "federated learning approach. For this " +#~ "example, we use the built-in " +#~ "``FedAvg`` implementation and customize it " +#~ "using a few basic parameters. The " +#~ "last step is the actual call to" +#~ " ``start_simulation`` which - you guessed" +#~ " it - starts the simulation:" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:9 -msgid "Use a federated learning strategy" -msgstr "" +#~ msgid "" +#~ "When we call ``start_simulation``, we " +#~ "tell Flower that there are 10 " +#~ "clients (``num_clients=10``). Flower then goes" +#~ " ahead an asks the ``FedAvg`` " +#~ "strategy to select clients. ``FedAvg`` " +#~ "knows that it should select 100% " +#~ "of the available clients " +#~ "(``fraction_fit=1.0``), so it goes ahead " +#~ "and selects 10 random clients (i.e., " +#~ "100% of 10)." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:11 -msgid "" -"Welcome to the next part of the federated learning tutorial. In previous " -"parts of this tutorial, we introduced federated learning with PyTorch and" -" Flower (`part 1 `__)." -msgstr "" +#~ msgid "" +#~ "Flower then asks the selected 10 " +#~ "clients to train the model. When " +#~ "the server receives the model parameter" +#~ " updates from the clients, it hands" +#~ " those updates over to the strategy" +#~ " (*FedAvg*) for aggregation. The strategy" +#~ " aggregates those updates and returns " +#~ "the new global model, which then " +#~ "gets used in the next round of " +#~ "federated learning." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:13 -msgid "" -"In this notebook, we'll begin to customize the federated learning system " -"we built in the introductory notebook (again, using `Flower " -"`__ and `PyTorch `__)." -msgstr "" +#~ msgid "" +#~ "The only thing left to do is " +#~ "to tell the strategy to call this" +#~ " function whenever it receives evaluation" +#~ " metric dictionaries from the clients:" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:17 -msgid "Let's move beyond FedAvg with Flower strategies!" -msgstr "" +#~ msgid "" +#~ "In this notebook, we'll begin to " +#~ "customize the federated learning system " +#~ "we built in the introductory notebook" +#~ " (again, using `Flower `__" +#~ " and `PyTorch `__)." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:309 -msgid "Strategy customization" -msgstr "" +#~ msgid "Let's move beyond FedAvg with Flower strategies!" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:311 -msgid "" -"So far, everything should look familiar if you've worked through the " -"introductory notebook. With that, we're ready to introduce a number of " -"new features." -msgstr "" +#~ msgid "" +#~ "Flower, by default, initializes the " +#~ "global model by asking one random " +#~ "client for the initial parameters. In" +#~ " many cases, we want more control " +#~ "over parameter initialization though. Flower" +#~ " therefore allows you to directly " +#~ "pass the initial parameters to the " +#~ "Strategy:" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:323 -msgid "Server-side parameter **initialization**" -msgstr "" +#~ msgid "" +#~ "Passing ``initial_parameters`` to the " +#~ "``FedAvg`` strategy prevents Flower from " +#~ "asking one of the clients for the" +#~ " initial parameters. If we look " +#~ "closely, we can see that the logs" +#~ " do not show any calls to the" +#~ " ``FlowerClient.get_parameters`` method." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:325 -msgid "" -"Flower, by default, initializes the global model by asking one random " -"client for the initial parameters. In many cases, we want more control " -"over parameter initialization though. Flower therefore allows you to " -"directly pass the initial parameters to the Strategy:" -msgstr "" +#~ msgid "" +#~ "We've seen the function ``start_simulation``" +#~ " before. It accepts a number of " +#~ "arguments, amongst them the ``client_fn`` " +#~ "used to create ``FlowerClient`` instances, " +#~ "the number of clients to simulate " +#~ "``num_clients``, the number of rounds " +#~ "``num_rounds``, and the strategy." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:370 -msgid "" -"Passing ``initial_parameters`` to the ``FedAvg`` strategy prevents Flower" -" from asking one of the clients for the initial parameters. If we look " -"closely, we can see that the logs do not show any calls to the " -"``FlowerClient.get_parameters`` method." -msgstr "" +#~ msgid "" +#~ "Next, we'll just pass this function " +#~ "to the FedAvg strategy before starting" +#~ " the simulation:" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:382 -msgid "Starting with a customized strategy" -msgstr "" +#~ msgid "" +#~ "We now have 1000 partitions, each " +#~ "holding 45 training and 5 validation " +#~ "examples. Given that the number of " +#~ "training examples on each client is " +#~ "quite small, we should probably train" +#~ " the model a bit longer, so we" +#~ " configure the clients to perform 3" +#~ " local training epochs. We should " +#~ "also adjust the fraction of clients " +#~ "selected for training during each round" +#~ " (we don't want all 1000 clients " +#~ "participating in every round), so we " +#~ "adjust ``fraction_fit`` to ``0.05``, which " +#~ "means that only 5% of available " +#~ "clients (so 50 clients) will be " +#~ "selected for training each round:" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:384 -msgid "" -"We've seen the function ``start_simulation`` before. It accepts a number " -"of arguments, amongst them the ``client_fn`` used to create " -"``FlowerClient`` instances, the number of clients to simulate " -"``num_clients``, the number of rounds ``num_rounds``, and the strategy." -msgstr "" +#~ msgid "|93b02017c78049bbbd5ae456dcb2c91b|" +#~ msgstr "|93b02017c78049bbbd5ae456dcb2c91b|" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:386 -msgid "" -"The strategy encapsulates the federated learning approach/algorithm, for " -"example, ``FedAvg`` or ``FedAdagrad``. Let's try to use a different " -"strategy this time:" -msgstr "" +#~ msgid "|01471150fd5144c080a176b43e92a3ff|" +#~ msgstr "|01471150fd5144c080a176b43e92a3ff|" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:424 -msgid "Server-side parameter **evaluation**" -msgstr "" +#~ msgid "|9bc21c7dbd17444a8f070c60786e3484|" +#~ msgstr "|9bc21c7dbd17444a8f070c60786e3484|" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:426 -msgid "" -"Flower can evaluate the aggregated model on the server-side or on the " -"client-side. Client-side and server-side evaluation are similar in some " -"ways, but different in others." -msgstr "" +#~ msgid "|3047bbce54b34099ae559963d0420d79|" +#~ msgstr "|3047bbce54b34099ae559963d0420d79|" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:428 -msgid "" -"**Centralized Evaluation** (or *server-side evaluation*) is conceptually " -"simple: it works the same way that evaluation in centralized machine " -"learning does. If there is a server-side dataset that can be used for " -"evaluation purposes, then that's great. We can evaluate the newly " -"aggregated model after each round of training without having to send the " -"model to clients. We're also fortunate in the sense that our entire " -"evaluation dataset is available at all times." -msgstr "" +#~ msgid "|e9f8ce948593444fb838d2f354c7ec5d|" +#~ msgstr "|e9f8ce948593444fb838d2f354c7ec5d|" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:430 -msgid "" -"**Federated Evaluation** (or *client-side evaluation*) is more complex, " -"but also more powerful: it doesn't require a centralized dataset and " -"allows us to evaluate models over a larger set of data, which often " -"yields more realistic evaluation results. In fact, many scenarios require" -" us to use **Federated Evaluation** if we want to get representative " -"evaluation results at all. But this power comes at a cost: once we start " -"to evaluate on the client side, we should be aware that our evaluation " -"dataset can change over consecutive rounds of learning if those clients " -"are not always available. Moreover, the dataset held by each client can " -"also change over consecutive rounds. This can lead to evaluation results " -"that are not stable, so even if we would not change the model, we'd see " -"our evaluation results fluctuate over consecutive rounds." -msgstr "" +#~ msgid "|c24c1478b30e4f74839208628a842d1e|" +#~ msgstr "|c24c1478b30e4f74839208628a842d1e|" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:433 -msgid "" -"We've seen how federated evaluation works on the client side (i.e., by " -"implementing the ``evaluate`` method in ``FlowerClient``). Now let's see " -"how we can evaluate aggregated model parameters on the server-side:" -msgstr "" +#~ msgid "|1b3613d7a58847b59e1d3180802dbc09|" +#~ msgstr "|1b3613d7a58847b59e1d3180802dbc09|" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:490 -msgid "Sending/receiving arbitrary values to/from clients" -msgstr "" +#~ msgid "|9980b5213db547d0b8024a50992b9e3f|" +#~ msgstr "|9980b5213db547d0b8024a50992b9e3f|" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:492 -msgid "" -"In some situations, we want to configure client-side execution (training," -" evaluation) from the server-side. One example for that is the server " -"asking the clients to train for a certain number of local epochs. Flower " -"provides a way to send configuration values from the server to the " -"clients using a dictionary. Let's look at an example where the clients " -"receive values from the server through the ``config`` parameter in " -"``fit`` (``config`` is also available in ``evaluate``). The ``fit`` " -"method receives the configuration dictionary through the ``config`` " -"parameter and can then read values from this dictionary. In this example," -" it reads ``server_round`` and ``local_epochs`` and uses those values to " -"improve the logging and configure the number of local training epochs:" -msgstr "" +#~ msgid "|c7afb4c92d154bfaa5e8cb9a150e17f1|" +#~ msgstr "|c7afb4c92d154bfaa5e8cb9a150e17f1|" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:546 -msgid "" -"So how can we send this config dictionary from server to clients? The " -"built-in Flower Strategies provide way to do this, and it works similarly" -" to the way server-side evaluation works. We provide a function to the " -"strategy, and the strategy calls this function for every round of " -"federated learning:" -msgstr "" +#~ msgid "|032eb6fed6924ac387b9f13854919196|" +#~ msgstr "|032eb6fed6924ac387b9f13854919196|" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:576 -msgid "" -"Next, we'll just pass this function to the FedAvg strategy before " -"starting the simulation:" -msgstr "" +#~ msgid "|fbf225add7fd4df5a9bf25a95597d954|" +#~ msgstr "|fbf225add7fd4df5a9bf25a95597d954|" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:613 -msgid "" -"As we can see, the client logs now include the current round of federated" -" learning (which they read from the ``config`` dictionary). We can also " -"configure local training to run for one epoch during the first and second" -" round of federated learning, and then for two epochs during the third " -"round." -msgstr "" +#~ msgid "|7efbe3d29d8349b89594e8947e910525|" +#~ msgstr "|7efbe3d29d8349b89594e8947e910525|" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:615 -msgid "" -"Clients can also return arbitrary values to the server. To do so, they " -"return a dictionary from ``fit`` and/or ``evaluate``. We have seen and " -"used this concept throughout this notebook without mentioning it " -"explicitly: our ``FlowerClient`` returns a dictionary containing a custom" -" key/value pair as the third return value in ``evaluate``." -msgstr "" +#~ msgid "|329fb3c04c744eda83bb51fa444c2266|" +#~ msgstr "|329fb3c04c744eda83bb51fa444c2266|" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:627 -msgid "Scaling federated learning" -msgstr "" +#~ msgid "|c00bf2750bc24d229737a0fe1395f0fc|" +#~ msgstr "|c00bf2750bc24d229737a0fe1395f0fc|" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:629 -msgid "" -"As a last step in this notebook, let's see how we can use Flower to " -"experiment with a large number of clients." -msgstr "" +#~ msgid "run\\_client\\_app" +#~ msgstr "run\\_client\\_app" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:651 -#, python-format -msgid "" -"We now have 1000 partitions, each holding 45 training and 5 validation " -"examples. Given that the number of training examples on each client is " -"quite small, we should probably train the model a bit longer, so we " -"configure the clients to perform 3 local training epochs. We should also " -"adjust the fraction of clients selected for training during each round " -"(we don't want all 1000 clients participating in every round), so we " -"adjust ``fraction_fit`` to ``0.05``, which means that only 5% of " -"available clients (so 50 clients) will be selected for training each " -"round:" -msgstr "" +#~ msgid "run\\_supernode" +#~ msgstr "run\\_supernode" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:699 -msgid "" -"In this notebook, we've seen how we can gradually enhance our system by " -"customizing the strategy, initializing parameters on the server side, " -"choosing a different strategy, and evaluating models on the server-side. " -"That's quite a bit of flexibility with so little code, right?" -msgstr "" +#~ msgid "Retrieve the corresponding layout by the string key." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:701 -msgid "" -"In the later sections, we've seen how we can communicate arbitrary values" -" between server and clients to fully customize client-side execution. " -"With that capability, we built a large-scale Federated Learning " -"simulation using the Flower Virtual Client Engine and ran an experiment " -"involving 1000 clients in the same workload - all in a Jupyter Notebook!" -msgstr "" +#~ msgid "" +#~ "When there isn't an exact match, " +#~ "all the existing keys in the " +#~ "layout map will be treated as a" +#~ " regex and map against the input " +#~ "key again. The first match will be" +#~ " returned, based on the key insertion" +#~ " order. Return None if there isn't" +#~ " any match found." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:719 -msgid "" -"The `Flower Federated Learning Tutorial - Part 3 " -"`__ shows how to build a fully custom ``Strategy`` from " -"scratch." -msgstr "" +#~ msgid "the string key as the query for the layout." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:9 -msgid "What is Federated Learning?" -msgstr "" +#~ msgid "Corresponding layout based on the query." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:13 -msgid "" -"In this tutorial, you will learn what federated learning is, build your " -"first system in Flower, and gradually extend it. If you work through all " -"parts of the tutorial, you will be able to build advanced federated " -"learning systems that approach the current state of the art in the field." -msgstr "" +#~ msgid "run\\_server\\_app" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:15 -msgid "" -"🧑‍🏫 This tutorial starts at zero and expects no familiarity with " -"federated learning. Only a basic understanding of data science and Python" -" programming is assumed." -msgstr "" +#~ msgid "run\\_superlink" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:17 -msgid "" -"`Star Flower on GitHub `__ ⭐️ and join " -"the open-source Flower community on Slack to connect, ask questions, and " -"get help: `Join Slack `__ 🌼 We'd love to " -"hear from you in the ``#introductions`` channel! And if anything is " -"unclear, head over to the ``#questions`` channel." -msgstr "" +#~ msgid "Start a Ray-based Flower simulation server." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:31 -msgid "Classic machine learning" -msgstr "" +#~ msgid "" +#~ "A function creating `Client` instances. " +#~ "The function must have the signature " +#~ "`client_fn(context: Context). It should return" +#~ " a single client instance of type " +#~ "`Client`. Note that the created client" +#~ " instances are ephemeral and will " +#~ "often be destroyed after a single " +#~ "method invocation. Since client instances " +#~ "are not long-lived, they should " +#~ "not attempt to carry state over " +#~ "method invocations. Any state required " +#~ "by the instance (model, dataset, " +#~ "hyperparameters, ...) should be (re-)created" +#~ " in either the call to `client_fn`" +#~ " or the call to any of the " +#~ "client methods (e.g., load evaluation " +#~ "data in the `evaluate` method itself)." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:33 -msgid "" -"Before we begin to discuss federated learning, let us quickly recap how " -"most machine learning works today." -msgstr "" +#~ msgid "The total number of clients in this simulation." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:35 -msgid "" -"In machine learning, we have a model, and we have data. The model could " -"be a neural network (as depicted here), or something else, like classical" -" linear regression." -msgstr "" +#~ msgid "" +#~ "UNSUPPORTED, WILL BE REMOVED. USE " +#~ "`num_clients` INSTEAD. List `client_id`s for" +#~ " each client. This is only required" +#~ " if `num_clients` is not set. Setting" +#~ " both `num_clients` and `clients_ids` with" +#~ " `len(clients_ids)` not equal to " +#~ "`num_clients` generates an error. Using " +#~ "this argument will raise an error." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 -msgid "|d8bf04f23d9b46d8a23cc6f4887d7873|" -msgstr "" +#~ msgid "" +#~ "CPU and GPU resources for a single" +#~ " client. Supported keys are `num_cpus` " +#~ "and `num_gpus`. To understand the GPU" +#~ " utilization caused by `num_gpus`, as " +#~ "well as using custom resources, please" +#~ " consult the Ray documentation." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 -msgid "Model and data" -msgstr "" +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.Server`. If no instance" +#~ " is provided, then `start_server` will " +#~ "create one." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:47 -msgid "" -"We train the model using the data to perform a useful task. A task could " -"be to detect objects in images, transcribe an audio recording, or play a " -"game like Go." -msgstr "" +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.Strategy`. If no " +#~ "strategy is provided, then `start_server` " +#~ "will use `flwr.server.strategy.FedAvg`." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 -msgid "|5aa1711387d74d0f8b9c499e1a51627e|" -msgstr "" +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.ClientManager`. If no " +#~ "implementation is provided, then " +#~ "`start_simulation` will use " +#~ "`flwr.server.client_manager.SimpleClientManager`." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 -msgid "Train model using data" -msgstr "" +#~ msgid "" +#~ "Optional dictionary containing arguments for" +#~ " the call to `ray.init`. If " +#~ "ray_init_args is None (the default), Ray" +#~ " will be initialized with the " +#~ "following default args: { " +#~ "\"ignore_reinit_error\": True, \"include_dashboard\": " +#~ "False } An empty dictionary can " +#~ "be used (ray_init_args={}) to prevent " +#~ "any arguments from being passed to " +#~ "ray.init." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:59 -msgid "" -"Now, in practice, the training data we work with doesn't originate on the" -" machine we train the model on. It gets created somewhere else." -msgstr "" +#~ msgid "" +#~ "Optional dictionary containing arguments for" +#~ " the call to `ray.init`. If " +#~ "ray_init_args is None (the default), Ray" +#~ " will be initialized with the " +#~ "following default args:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:61 -msgid "" -"It originates on a smartphone by the user interacting with an app, a car " -"collecting sensor data, a laptop receiving input via the keyboard, or a " -"smart speaker listening to someone trying to sing a song." -msgstr "" +#~ msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 -msgid "|2bc8e069228d4873804061ff4a95048c|" -msgstr "" +#~ msgid "" +#~ "An empty dictionary can be used " +#~ "(ray_init_args={}) to prevent any arguments" +#~ " from being passed to ray.init." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 -msgid "Data on a phone" -msgstr "" +#~ msgid "" +#~ "Set to True to prevent `ray.shutdown()`" +#~ " in case `ray.is_initialized()=True`." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:73 -msgid "" -"What's also important to mention, this \"somewhere else\" is usually not " -"just one place, it's many places. It could be several devices all running" -" the same app. But it could also be several organizations, all generating" -" data for the same task." -msgstr "" +#~ msgid "" +#~ "Optionally specify the type of actor " +#~ "to use. The actor object, which " +#~ "persists throughout the simulation, will " +#~ "be the process in charge of " +#~ "executing a ClientApp wrapping input " +#~ "argument `client_fn`." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 -msgid "|c258488766324dc9a6807f0e7c4fd5f4|" -msgstr "" +#~ msgid "" +#~ "If you want to create your own " +#~ "Actor classes, you might need to " +#~ "pass some input argument. You can " +#~ "use this dictionary for such purpose." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 -msgid "Data is on many devices" -msgstr "" +#~ msgid "" +#~ "(default: \"DEFAULT\") Optional string " +#~ "(\"DEFAULT\" or \"SPREAD\") for the VCE" +#~ " to choose in which node the " +#~ "actor is placed. If you are an " +#~ "advanced user needed more control you" +#~ " can use lower-level scheduling " +#~ "strategies to pin actors to specific " +#~ "compute nodes (e.g. via " +#~ "NodeAffinitySchedulingStrategy). Please note this" +#~ " is an advanced feature. For all " +#~ "details, please refer to the Ray " +#~ "documentation: https://docs.ray.io/en/latest/ray-" +#~ "core/scheduling/index.html" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:85 -msgid "" -"So to use machine learning, or any kind of data analysis, the approach " -"that has been used in the past was to collect all data on a central " -"server. This server can be somewhere in a data center, or somewhere in " -"the cloud." -msgstr "" +#~ msgid "**hist** -- Object containing metrics from training." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 -msgid "|d5f962c3f4ec48529efda980868c14b0|" -msgstr "" +#~ msgid "" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with FastAI to train a vision " +#~ "model on CIFAR-10." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 -msgid "Central data collection" -msgstr "" +#~ msgid "Let's build a federated learning system using fastai and Flower!" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:97 -msgid "" -"Once all the data is collected in one place, we can finally use machine " -"learning algorithms to train our model on the data. This is the machine " -"learning approach that we've basically always relied on." -msgstr "" +#~ msgid "" +#~ "Please refer to the `full code " +#~ "example `_ to learn more." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 -msgid "|a5eccea18d4c43a68b54b65043cabef8|" -msgstr "" +#~ msgid "" +#~ "Check out this Federating Learning " +#~ "quickstart tutorial for using Flower " +#~ "with HuggingFace Transformers in order " +#~ "to fine-tune an LLM." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 -msgid "Central model training" -msgstr "" +#~ msgid "" +#~ "Let's build a federated learning system" +#~ " using Hugging Face Transformers and " +#~ "Flower!" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:130 -msgid "Challenges of classical machine learning" -msgstr "" +#~ msgid "" +#~ "We will leverage Hugging Face to " +#~ "federate the training of language models" +#~ " over multiple clients using Flower. " +#~ "More specifically, we will fine-tune " +#~ "a pre-trained Transformer model " +#~ "(distilBERT) for sequence classification over" +#~ " a dataset of IMDB ratings. The " +#~ "end goal is to detect if a " +#~ "movie rating is positive or negative." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:132 -msgid "" -"The classic machine learning approach we've just seen can be used in some" -" cases. Great examples include categorizing holiday photos, or analyzing " -"web traffic. Cases, where all the data is naturally available on a " -"centralized server." -msgstr "" +#~ msgid "Dependencies" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 -msgid "|f17662f7df2d42f68cac70a1fdeda8a7|" -msgstr "" +#~ msgid "" +#~ "To follow along this tutorial you " +#~ "will need to install the following " +#~ "packages: :code:`datasets`, :code:`evaluate`, " +#~ ":code:`flwr`, :code:`torch`, and " +#~ ":code:`transformers`. This can be done " +#~ "using :code:`pip`:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 -msgid "Centralized possible" -msgstr "" +#~ msgid "Standard Hugging Face workflow" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:144 -msgid "" -"But the approach can not be used in many other cases. Cases, where the " -"data is not available on a centralized server, or cases where the data " -"available on one server is not enough to train a good model." -msgstr "" +#~ msgid "Handling the data" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 -msgid "|241fc906441a4f038c625a19d30d01b2|" -msgstr "" +#~ msgid "" +#~ "To fetch the IMDB dataset, we will" +#~ " use Hugging Face's :code:`datasets` " +#~ "library. We then need to tokenize " +#~ "the data and create :code:`PyTorch` " +#~ "dataloaders, this is all done in " +#~ "the :code:`load_data` function:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 -msgid "Centralized impossible" -msgstr "" +#~ msgid "Training and testing the model" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:156 -msgid "" -"There are many reasons why the classic centralized machine learning " -"approach does not work for a large number of highly important real-world " -"use cases. Those reasons include:" -msgstr "" +#~ msgid "" +#~ "Once we have a way of creating " +#~ "our trainloader and testloader, we can" +#~ " take care of the training and " +#~ "testing. This is very similar to " +#~ "any :code:`PyTorch` training or testing " +#~ "loop:" +#~ msgstr "" + +#~ msgid "Creating the model itself" +#~ msgstr "" + +#~ msgid "" +#~ "To create the model itself, we " +#~ "will just load the pre-trained " +#~ "distillBERT model using Hugging Face’s " +#~ ":code:`AutoModelForSequenceClassification` :" +#~ msgstr "" + +#~ msgid "Federating the example" +#~ msgstr "" + +#~ msgid "Creating the IMDBClient" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:158 -msgid "" -"**Regulations**: GDPR (Europe), CCPA (California), PIPEDA (Canada), LGPD " -"(Brazil), PDPL (Argentina), KVKK (Turkey), POPI (South Africa), FSS " -"(Russia), CDPR (China), PDPB (India), PIPA (Korea), APPI (Japan), PDP " -"(Indonesia), PDPA (Singapore), APP (Australia), and other regulations " -"protect sensitive data from being moved. In fact, those regulations " -"sometimes even prevent single organizations from combining their own " -"users' data for artificial intelligence training because those users live" -" in different parts of the world, and their data is governed by different" -" data protection regulations." -msgstr "" +#~ msgid "" +#~ "To federate our example to multiple " +#~ "clients, we first need to write " +#~ "our Flower client class (inheriting from" +#~ " :code:`flwr.client.NumPyClient`). This is very" +#~ " easy, as our model is a " +#~ "standard :code:`PyTorch` model:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:160 -msgid "" -"**User preference**: In addition to regulation, there are use cases where" -" users just expect that no data leaves their device, ever. If you type " -"your passwords and credit card info into the digital keyboard of your " -"phone, you don't expect those passwords to end up on the server of the " -"company that developed that keyboard, do you? In fact, that use case was " -"the reason federated learning was invented in the first place." -msgstr "" +#~ msgid "" +#~ "The :code:`get_parameters` function lets the" +#~ " server get the client's parameters. " +#~ "Inversely, the :code:`set_parameters` function " +#~ "allows the server to send its " +#~ "parameters to the client. Finally, the" +#~ " :code:`fit` function trains the model " +#~ "locally for the client, and the " +#~ ":code:`evaluate` function tests the model " +#~ "locally and returns the relevant " +#~ "metrics." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:161 -msgid "" -"**Data volume**: Some sensors, like cameras, produce such a high data " -"volume that it is neither feasible nor economic to collect all the data " -"(due to, for example, bandwidth or communication efficiency). Think about" -" a national rail service with hundreds of train stations across the " -"country. If each of these train stations is outfitted with a number of " -"security cameras, the volume of raw on-device data they produce requires " -"incredibly powerful and exceedingly expensive infrastructure to process " -"and store. And most of the data isn't even useful." -msgstr "" +#~ msgid "Starting the server" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:164 -msgid "Examples where centralized machine learning does not work include:" -msgstr "" +#~ msgid "" +#~ "Now that we have a way to " +#~ "instantiate clients, we need to create" +#~ " our server in order to aggregate " +#~ "the results. Using Flower, this can " +#~ "be done very easily by first " +#~ "choosing a strategy (here, we are " +#~ "using :code:`FedAvg`, which will define " +#~ "the global weights as the average " +#~ "of all the clients' weights at " +#~ "each round) and then using the " +#~ ":code:`flwr.server.start_server` function:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:166 -msgid "" -"Sensitive healthcare records from multiple hospitals to train cancer " -"detection models" -msgstr "" +#~ msgid "" +#~ "The :code:`weighted_average` function is there" +#~ " to provide a way to aggregate " +#~ "the metrics distributed amongst the " +#~ "clients (basically this allows us to " +#~ "display a nice average accuracy and " +#~ "loss for every round)." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:167 -msgid "" -"Financial information from different organizations to detect financial " -"fraud" -msgstr "" +#~ msgid "Putting everything together" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:168 -msgid "Location data from your electric car to make better range prediction" -msgstr "" +#~ msgid "We can now start client instances using:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:169 -msgid "End-to-end encrypted messages to train better auto-complete models" -msgstr "" +#~ msgid "" +#~ "And they will be able to connect" +#~ " to the server and start the " +#~ "federated training." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:171 -msgid "" -"The popularity of privacy-enhancing systems like the `Brave " -"`__ browser or the `Signal `__ " -"messenger shows that users care about privacy. In fact, they choose the " -"privacy-enhancing version over other alternatives, if such an alternative" -" exists. But what can we do to apply machine learning and data science to" -" these cases to utilize private data? After all, these are all areas that" -" would benefit significantly from recent advances in AI." -msgstr "" +#~ msgid "" +#~ "If you want to check out " +#~ "everything put together, you should " +#~ "check out the `full code example " +#~ "`_ ." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:186 -msgid "Federated learning" -msgstr "" +#~ msgid "" +#~ "Of course, this is a very basic" +#~ " example, and a lot can be " +#~ "added or modified, it was just to" +#~ " showcase how simply we could " +#~ "federate a Hugging Face workflow using" +#~ " Flower." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:188 -msgid "" -"Federated learning simply reverses this approach. It enables machine " -"learning on distributed data by moving the training to the data, instead " -"of moving the data to the training. Here's the single-sentence " -"explanation:" -msgstr "" +#~ msgid "" +#~ "Note that in this example we used" +#~ " :code:`PyTorch`, but we could have " +#~ "very well used :code:`TensorFlow`." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:190 -msgid "Central machine learning: move the data to the computation" -msgstr "" +#~ msgid "" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with PyTorch Lightning to train an " +#~ "Auto Encoder model on MNIST." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:191 -msgid "Federated (machine) learning: move the computation to the data" -msgstr "" +#~ msgid "" +#~ "Let's build a horizontal federated " +#~ "learning system using PyTorch Lightning " +#~ "and Flower!" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:193 -msgid "" -"By doing so, it enables us to use machine learning (and other data " -"science approaches) in areas where it wasn't possible before. We can now " -"train excellent medical AI models by enabling different hospitals to work" -" together. We can solve financial fraud by training AI models on the data" -" of different financial institutions. We can build novel privacy-" -"enhancing applications (such as secure messaging) that have better built-" -"in AI than their non-privacy-enhancing alternatives. And those are just a" -" few of the examples that come to mind. As we deploy federated learning, " -"we discover more and more areas that can suddenly be reinvented because " -"they now have access to vast amounts of previously inaccessible data." -msgstr "" +#~ msgid "" +#~ "Please refer to the `full code " +#~ "example `_ to learn " +#~ "more." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:196 -msgid "" -"So how does federated learning work, exactly? Let's start with an " -"intuitive explanation." -msgstr "" +#~ msgid "" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with TensorFlow to train a MobilNetV2" +#~ " model on CIFAR-10." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:199 -msgid "Federated learning in five steps" -msgstr "" +#~ msgid "Let's build a federated learning system in less than 20 lines of code!" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:202 -msgid "Step 0: Initialize global model" -msgstr "" +#~ msgid "Before Flower can be imported we have to install it:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:204 -msgid "" -"We start by initializing the model on the server. This is exactly the " -"same in classic centralized learning: we initialize the model parameters," -" either randomly or from a previously saved checkpoint." -msgstr "" +#~ msgid "" +#~ "Since we want to use the Keras " +#~ "API of TensorFlow (TF), we have to" +#~ " install TF as well:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 -msgid "|0aa5aa05810b44b6a835cecce28f3137|" -msgstr "" +#~ msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 -msgid "Initialize global model" -msgstr "" +#~ msgid "" +#~ "We use the Keras utilities of TF" +#~ " to load CIFAR10, a popular colored" +#~ " image classification dataset for machine" +#~ " learning. The call to " +#~ ":code:`tf.keras.datasets.cifar10.load_data()` downloads " +#~ "CIFAR10, caches it locally, and then " +#~ "returns the entire training and test " +#~ "set as NumPy ndarrays." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:217 -msgid "" -"Step 1: Send model to a number of connected organizations/devices (client" -" nodes)" -msgstr "" +#~ msgid "" +#~ "Next, we need a model. For the " +#~ "purpose of this tutorial, we use " +#~ "MobilNetV2 with 10 output classes:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:219 -msgid "" -"Next, we send the parameters of the global model to the connected client " -"nodes (think: edge devices like smartphones or servers belonging to " -"organizations). This is to ensure that each participating node starts " -"their local training using the same model parameters. We often use only a" -" few of the connected nodes instead of all nodes. The reason for this is " -"that selecting more and more client nodes has diminishing returns." -msgstr "" +#~ msgid "" +#~ "The Flower server interacts with clients" +#~ " through an interface called " +#~ ":code:`Client`. When the server selects " +#~ "a particular client for training, it " +#~ "sends training instructions over the " +#~ "network. The client receives those " +#~ "instructions and calls one of the " +#~ ":code:`Client` methods to run your code" +#~ " (i.e., to train the neural network" +#~ " we defined earlier)." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 -msgid "|c742940dd4bf4de09d8d0d5e8d179638|" -msgstr "" +#~ msgid "" +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses Keras." +#~ " The :code:`NumPyClient` interface defines " +#~ "three methods which can be implemented" +#~ " in the following way:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 -msgid "Send global model" -msgstr "" +#~ msgid "" +#~ "We can now create an instance of" +#~ " our class :code:`CifarClient` and add " +#~ "one line to actually run this " +#~ "client:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:232 -msgid "" -"Step 2: Train model locally on the data of each organization/device " -"(client node)" -msgstr "" +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. If you implement" +#~ " a client of type :code:`NumPyClient` " +#~ "you'll need to first call its " +#~ ":code:`to_client()` method. The string " +#~ ":code:`\"[::]:8080\"` tells the client which" +#~ " server to connect to. In our " +#~ "case we can run the server and " +#~ "the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:234 -msgid "" -"Now that all (selected) client nodes have the latest version of the " -"global model parameters, they start the local training. They use their " -"own local dataset to train their own local model. They don't train the " -"model until full convergence, but they only train for a little while. " -"This could be as little as one epoch on the local data, or even just a " -"few steps (mini-batches)." -msgstr "" +#~ msgid "Each client will have its own dataset." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 -msgid "|1f169ab4601a47e1a226f1628f4ebddb|" -msgstr "" +#~ msgid "" +#~ "You should now see how the " +#~ "training does in the very first " +#~ "terminal (the one that started the " +#~ "server):" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 -msgid "Train on local data" -msgstr "" +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this can be " +#~ "found in :code:`examples/quickstart-" +#~ "tensorflow/client.py`." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:247 -msgid "Step 3: Return model updates back to the server" -msgstr "" +#~ msgid "|e5918c1c06a4434bbe4bf49235e40059|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:249 -msgid "" -"After local training, each client node has a slightly different version " -"of the model parameters they originally received. The parameters are all " -"different because each client node has different examples in its local " -"dataset. The client nodes then send those model updates back to the " -"server. The model updates they send can either be the full model " -"parameters or just the gradients that were accumulated during local " -"training." -msgstr "" +#~ msgid "|c0165741bd1944f09ec55ce49032377d|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 -msgid "|12cfa9cde14440ecb8c8f6c1d7185bec|" -msgstr "" +#~ msgid "|0a0ac9427ac7487b8e52d75ed514f04e|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 -msgid "Send model updates" -msgstr "" +#~ msgid "|5defee3ea4ca40d99fcd3e4ea045be25|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:262 -msgid "Step 4: Aggregate model updates into a new global model" -msgstr "" +#~ msgid "|74f26ca701254d3db57d7899bd91eb55|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:264 -msgid "" -"The server receives model updates from the selected client nodes. If it " -"selected 100 client nodes, it now has 100 slightly different versions of " -"the original global model, each trained on the local data of one client. " -"But didn't we want to have one model that contains the learnings from the" -" data of all 100 client nodes?" -msgstr "" +#~ msgid "|bda79f21f8154258a40e5766b2634ad7|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:266 -msgid "" -"In order to get one single model, we have to combine all the model " -"updates we received from the client nodes. This process is called " -"*aggregation*, and there are many different ways to do it. The most basic" -" way to do it is called *Federated Averaging* (`McMahan et al., 2016 " -"`__), often abbreviated as *FedAvg*. " -"*FedAvg* takes the 100 model updates and, as the name suggests, averages " -"them. To be more precise, it takes the *weighted average* of the model " -"updates, weighted by the number of examples each client used for " -"training. The weighting is important to make sure that each data example " -"has the same \"influence\" on the resulting global model. If one client " -"has 10 examples, and another client has 100 examples, then - without " -"weighting - each of the 10 examples would influence the global model ten " -"times as much as each of the 100 examples." -msgstr "" +#~ msgid "|89d30862e62e4f9989e193483a08680a|" +#~ msgstr "" + +#~ msgid "|77e9918671c54b4f86e01369c0785ce8|" +#~ msgstr "" + +#~ msgid "|7e4ccef37cc94148a067107b34eb7447|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 -msgid "|72939caf6e294b0986fee6dde96614d7|" -msgstr "" +#~ msgid "|28e47e4cded14479a0846c8e5f22c872|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 -msgid "Aggregate model updates" -msgstr "" +#~ msgid "|4b8c5d1afa144294b76ffc76e4658a38|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:280 -msgid "Step 5: Repeat steps 1 to 4 until the model converges" -msgstr "" +#~ msgid "|9dbdb3a0f6cb4a129fac863eaa414c17|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:282 -msgid "" -"Steps 1 to 4 are what we call a single round of federated learning. The " -"global model parameters get sent to the participating client nodes (step " -"1), the client nodes train on their local data (step 2), they send their " -"updated models to the server (step 3), and the server then aggregates the" -" model updates to get a new version of the global model (step 4)." -msgstr "" +#~ msgid "|81749d0ac0834c36a83bd38f433fea31|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:284 -msgid "" -"During a single round, each client node that participates in that " -"iteration only trains for a little while. This means that after the " -"aggregation step (step 4), we have a model that has been trained on all " -"the data of all participating client nodes, but only for a little while. " -"We then have to repeat this training process over and over again to " -"eventually arrive at a fully trained model that performs well across the " -"data of all client nodes." -msgstr "" +#~ msgid "|ed9aae51da70428eab7eef32f21e819e|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:289 -msgid "" -"Congratulations, you now understand the basics of federated learning. " -"There's a lot more to discuss, of course, but that was federated learning" -" in a nutshell. In later parts of this tutorial, we will go into more " -"detail. Interesting questions include: How can we select the best client " -"nodes that should participate in the next round? What's the best way to " -"aggregate model updates? How can we handle failing client nodes " -"(stragglers)?" -msgstr "" +#~ msgid "|e87b69b2ada74ea49412df16f4a0b9cc|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:294 -msgid "" -"Just like we can train a model on the decentralized data of different " -"client nodes, we can also evaluate the model on that data to receive " -"valuable metrics. This is called federated evaluation, sometimes " -"abbreviated as FE. In fact, federated evaluation is an integral part of " -"most federated learning systems." -msgstr "" +#~ msgid "|33cacb7d985c4906b348515c1a5cd993|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:297 -msgid "Federated analytics" -msgstr "" +#~ msgid "|cc080a555947492fa66131dc3a967603|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:299 -msgid "" -"In many cases, machine learning isn't necessary to derive value from " -"data. Data analysis can yield valuable insights, but again, there's often" -" not enough data to get a clear answer. What's the average age at which " -"people develop a certain type of health condition? Federated analytics " -"enables such queries over multiple client nodes. It is usually used in " -"conjunction with other privacy-enhancing technologies like secure " -"aggregation to prevent the server from seeing the results submitted by " -"individual client nodes." -msgstr "" +#~ msgid "|085c3e0fb8664c6aa06246636524b20b|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:305 -msgid "" -"Differential privacy (DP) is often mentioned in the context of Federated " -"Learning. It is a privacy-preserving method used when analyzing and " -"sharing statistical data, ensuring the privacy of individual " -"participants. DP achieves this by adding statistical noise to the model " -"updates, ensuring any individual participants’ information cannot be " -"distinguished or re-identified. This technique can be considered an " -"optimization that provides a quantifiable privacy protection measure." -msgstr "" +#~ msgid "|bfe69c74e48c45d49b50251c38c2a019|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:326 -msgid "Flower" -msgstr "" +#~ msgid "|ebbecd651f0348d99c6511ea859bf4ca|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:328 -msgid "" -"Federated learning, federated evaluation, and federated analytics require" -" infrastructure to move machine learning models back and forth, train and" -" evaluate them on local data, and then aggregate the updated models. " -"Flower provides the infrastructure to do exactly that in an easy, " -"scalable, and secure way. In short, Flower presents a unified approach to" -" federated learning, analytics, and evaluation. It allows the user to " -"federate any workload, any ML framework, and any programming language." -msgstr "" +#~ msgid "|163117eb654a4273babba413cf8065f5|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 -msgid "|83a8daee45da4a98b8d6f24ae098fc50|" -msgstr "" +#~ msgid "|452ac3ba453b4cd1be27be1ba7560d64|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 -msgid "" -"Flower federated learning server and client nodes (car, scooter, personal" -" computer, roomba, and phone)" -msgstr "" +#~ msgid "|f403fcd69e4e44409627e748b404c086|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:353 -msgid "" -"Congratulations, you just learned the basics of federated learning and " -"how it relates to the classic (centralized) machine learning!" -msgstr "" +#~ msgid "|4b00fe63870145968f8443619a792a42|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:355 -msgid "" -"In the next part of this tutorial, we are going to build a first " -"federated learning system with Flower." -msgstr "" +#~ msgid "|368378731066486fa4397e89bc6b870c|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:373 -msgid "" -"The `Flower Federated Learning Tutorial - Part 1 " -"`__ shows how to build a simple federated learning system " -"with PyTorch and Flower." -msgstr "" +#~ msgid "|a66aa83d85bf4ffba7ed660b718066da|" +#~ msgstr "" -#~ msgid "" -#~ "If you want to use your own " -#~ "base image instead of the official " -#~ "Flower base image, all you need to" -#~ " do is set the ``BASE_REPOSITORY``, " -#~ "``PYTHON_VERSION`` and ``UBUNTU_VERSION`` build " -#~ "arguments. .. code-block:: bash" +#~ msgid "|82324b9af72a4582a81839d55caab767|" #~ msgstr "" -#~ msgid "$ cd src/docker/superlink/ $ docker build \\" +#~ msgid "|fbf2da0da3cc4f8ab3b3eff852d80c41|" #~ msgstr "" #~ msgid "" -#~ "--build-arg BASE_REPOSITORY=flwr_base \\ " -#~ "--build-arg PYTHON_VERSION=3.11 \\ --build-" -#~ "arg UBUNTU_VERSION=ubuntu22.04 \\ --build-arg" -#~ " FLWR_VERSION=1.8.0 \\ -t flwr_superlink:0.1.0" -#~ " ." +#~ "Some quickstart examples may have " +#~ "limitations or requirements that prevent " +#~ "them from running on every environment." +#~ " For more information, please see " +#~ "`Limitations`_." #~ msgstr "" #~ msgid "" -#~ "It is important to follow the " -#~ "instructions described in comments. For " -#~ "instance, in order to not break " -#~ "how our changelog system works, you " -#~ "should read the information above the" -#~ " ``Changelog entry`` section carefully. You" -#~ " can also checkout some examples and" -#~ " details in the :ref:`changelogentry` " -#~ "appendix." +#~ "Change the application code. For " +#~ "example, change the ``seed`` in " +#~ "``quickstart_docker/task.py`` to ``43`` and " +#~ "save it:" #~ msgstr "" -#~ msgid "Open a PR (as shown above)" +#~ msgid ":code:`fit`" +#~ msgstr ":code:`fit`" + +#~ msgid "" +#~ "Note that since version :code:`1.11.0`, " +#~ ":code:`flower-server-app` no longer " +#~ "supports passing a reference to a " +#~ "`ServerApp` attribute. Instead, you need " +#~ "to pass the path to Flower app " +#~ "via the argument :code:`--app`. This is" +#~ " the path to a directory containing" +#~ " a `pyproject.toml`. You can create a" +#~ " valid Flower app by executing " +#~ ":code:`flwr new` and following the " +#~ "prompt." #~ msgstr "" -#~ msgid "How to write a good PR title" +#~ msgid "" +#~ "The following examples are available as" +#~ " standalone projects. Quickstart TensorFlow/Keras" +#~ " ---------------------------" #~ msgstr "" #~ msgid "" -#~ "A well-crafted PR title helps team" -#~ " members quickly understand the purpose " -#~ "and scope of the changes being " -#~ "proposed. Here's a guide to help " -#~ "you write a good GitHub PR title:" +#~ "Let's create a new application project" +#~ " in Xcode and add :code:`flwr` as " +#~ "a dependency in your project. For " +#~ "our application, we will store the " +#~ "logic of our app in " +#~ ":code:`FLiOSModel.swift` and the UI elements" +#~ " in :code:`ContentView.swift`. We will " +#~ "focus more on :code:`FLiOSModel.swift` in " +#~ "this quickstart. Please refer to the " +#~ "`full code example " +#~ "`_ to " +#~ "learn more about the app." +#~ msgstr "" + +#~ msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" #~ msgstr "" #~ msgid "" -#~ "1. Be Clear and Concise: Provide a" -#~ " clear summary of the changes in " -#~ "a concise manner. 1. Use Actionable " -#~ "Verbs: Start with verbs like \"Add,\"" -#~ " \"Update,\" or \"Fix\" to indicate " -#~ "the purpose. 1. Include Relevant " -#~ "Information: Mention the affected feature " -#~ "or module for context. 1. Keep it" -#~ " Short: Avoid lengthy titles for easy" -#~ " readability. 1. Use Proper Capitalization" -#~ " and Punctuation: Follow grammar rules " -#~ "for clarity." +#~ "Then add the mlmodel to the " +#~ "project simply by drag-and-drop, " +#~ "the mlmodel will be bundled inside " +#~ "the application during deployment to " +#~ "your iOS device. We need to pass" +#~ " the url to access mlmodel and " +#~ "run CoreML machine learning processes, " +#~ "it can be retrieved by calling the" +#~ " function :code:`Bundle.main.url`. For the " +#~ "MNIST dataset, we need to preprocess " +#~ "it into :code:`MLBatchProvider` object. The" +#~ " preprocessing is done inside " +#~ ":code:`DataLoader.swift`." #~ msgstr "" #~ msgid "" -#~ "Let's start with a few examples " -#~ "for titles that should be avoided " -#~ "because they do not provide meaningful" -#~ " information:" +#~ "Since CoreML does not allow the " +#~ "model parameters to be seen before " +#~ "training, and accessing the model " +#~ "parameters during or after the training" +#~ " can only be done by specifying " +#~ "the layer name, we need to know" +#~ " this information beforehand, through " +#~ "looking at the model specification, " +#~ "which are written as proto files. " +#~ "The implementation can be seen in " +#~ ":code:`MLModelInspect`." #~ msgstr "" -#~ msgid "Implement Algorithm" +#~ msgid "" +#~ "Then start the Flower gRPC client " +#~ "and start communicating to the server" +#~ " by passing our Flower client to " +#~ "the function :code:`startFlwrGRPC`." #~ msgstr "" -#~ msgid "Database" +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ "call the provided :code:`MLFlwrClient` and " +#~ "call :code:`startFlwrGRPC()`. The attribute " +#~ ":code:`hostname` and :code:`port` tells the" +#~ " client which server to connect to." +#~ " This can be done by entering " +#~ "the hostname and port in the " +#~ "application before clicking the start " +#~ "button to start the federated learning" +#~ " process." #~ msgstr "" -#~ msgid "Add my_new_file.py to codebase" +#~ msgid "" +#~ "For simple workloads we can start " +#~ "a Flower server and leave all the" +#~ " configuration possibilities at their " +#~ "default values. In a file named " +#~ ":code:`server.py`, import Flower and start " +#~ "the server:" #~ msgstr "" -#~ msgid "Improve code in module" +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system in your ios device. The " +#~ "full `source code " +#~ "`_ for" +#~ " this example can be found in " +#~ ":code:`examples/ios`." #~ msgstr "" -#~ msgid "Change SomeModule" +#~ msgid "" +#~ "In this tutorial, we will learn " +#~ "how to train a :code:`Logistic " +#~ "Regression` model on MNIST using Flower" +#~ " and scikit-learn." #~ msgstr "" #~ msgid "" -#~ "Here are a few positive examples " -#~ "which provide helpful information without " -#~ "repeating how they do it, as that" -#~ " is already visible in the \"Files" -#~ " changed\" section of the PR:" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. However, before" +#~ " setting up the client and server," +#~ " we will define all functionalities " +#~ "that we need for our federated " +#~ "learning setup within :code:`utils.py`. The" +#~ " :code:`utils.py` contains different functions" +#~ " defining all the machine learning " +#~ "basics:" #~ msgstr "" -#~ msgid "Update docs banner to mention Flower Summit 2023" +#~ msgid ":code:`get_model_parameters()`" #~ msgstr "" -#~ msgid "Remove unnecessary XGBoost dependency" +#~ msgid "Returns the parameters of a :code:`sklearn` LogisticRegression model" #~ msgstr "" -#~ msgid "Remove redundant attributes in strategies subclassing FedAvg" +#~ msgid ":code:`set_model_params()`" #~ msgstr "" -#~ msgid "" -#~ "Add CI job to deploy the staging" -#~ " system when the ``main`` branch " -#~ "changes" +#~ msgid "Sets the parameters of a :code:`sklearn` LogisticRegression model" +#~ msgstr "" + +#~ msgid ":code:`set_initial_params()`" #~ msgstr "" #~ msgid "" -#~ "Add new amazing library which will " -#~ "be used to improve the simulation " -#~ "engine" +#~ "Please check out :code:`utils.py` `here " +#~ "`_ for more details. " +#~ "The pre-defined functions are used " +#~ "in the :code:`client.py` and imported. " +#~ "The :code:`client.py` also requires to " +#~ "import several packages such as Flower" +#~ " and scikit-learn:" #~ msgstr "" -#~ msgid "Changelog entry" +#~ msgid "" +#~ "Prior to local training, we need " +#~ "to load the MNIST dataset, a " +#~ "popular image classification dataset of " +#~ "handwritten digits for machine learning, " +#~ "and partition the dataset for FL. " +#~ "This can be conveniently achieved using" +#~ " `Flower Datasets `_." +#~ " The :code:`FederatedDataset.load_partition()` method" +#~ " loads the partitioned training set " +#~ "for each partition ID defined in " +#~ "the :code:`--partition-id` argument." #~ msgstr "" #~ msgid "" -#~ "When opening a new PR, inside its" -#~ " description, there should be a " -#~ "``Changelog entry`` header." +#~ "Next, the logistic regression model is" +#~ " defined and initialized with " +#~ ":code:`utils.set_initial_params()`." #~ msgstr "" #~ msgid "" -#~ "Above this header you should see " -#~ "the following comment that explains how" -#~ " to write your changelog entry:" +#~ "The Flower server interacts with clients" +#~ " through an interface called " +#~ ":code:`Client`. When the server selects " +#~ "a particular client for training, it " +#~ "sends training instructions over the " +#~ "network. The client receives those " +#~ "instructions and calls one of the " +#~ ":code:`Client` methods to run your code" +#~ " (i.e., to fit the logistic " +#~ "regression we defined earlier)." #~ msgstr "" #~ msgid "" -#~ "Inside the following 'Changelog entry' " -#~ "section, you should put the description" -#~ " of your changes that will be " -#~ "added to the changelog alongside your" -#~ " PR title." +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses " +#~ "scikit-learn. Implementing :code:`NumPyClient` " +#~ "usually means defining the following " +#~ "methods (:code:`set_parameters` is optional " +#~ "though):" +#~ msgstr "" + +#~ msgid ":code:`set_parameters` (optional)" +#~ msgstr "" + +#~ msgid "is directly imported with :code:`utils.set_model_params()`" #~ msgstr "" #~ msgid "" -#~ "If the section is completely empty " -#~ "(without any token) or non-existent, " -#~ "the changelog will just contain the " -#~ "title of the PR for the changelog" -#~ " entry, without any description." +#~ "We can now create an instance of" +#~ " our class :code:`MnistClient` and add " +#~ "one line to actually run this " +#~ "client:" #~ msgstr "" #~ msgid "" -#~ "If the section contains some text " -#~ "other than tokens, it will use it" -#~ " to add a description to the " -#~ "change." +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. If you implement" +#~ " a client of type :code:`NumPyClient` " +#~ "you'll need to first call its " +#~ ":code:`to_client()` method. The string " +#~ ":code:`\"0.0.0.0:8080\"` tells the client " +#~ "which server to connect to. In our" +#~ " case we can run the server and" +#~ " the client on the same machine, " +#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" +#~ " we run a truly federated workload" +#~ " with the server and clients running" +#~ " on different machines, all that " +#~ "needs to change is the " +#~ ":code:`server_address` we pass to the " +#~ "client." +#~ msgstr "" + +#~ msgid ":code:`server.py`, import Flower and start the server:" #~ msgstr "" #~ msgid "" -#~ "If the section contains one of the" -#~ " following tokens it will ignore any" -#~ " other text and put the PR " -#~ "under the corresponding section of the" -#~ " changelog:" +#~ "The number of federated learning rounds" +#~ " is set in :code:`fit_round()` and " +#~ "the evaluation is defined in " +#~ ":code:`get_evaluate_fn()`. The evaluation function" +#~ " is called after each federated " +#~ "learning round and gives you information" +#~ " about loss and accuracy. Note that" +#~ " we also make use of Flower " +#~ "Datasets here to load the test " +#~ "split of the MNIST dataset for " +#~ "server-side evaluation." #~ msgstr "" -#~ msgid " is for classifying a PR as a general improvement." +#~ msgid "" +#~ "The :code:`main` contains the server-" +#~ "side parameter initialization " +#~ ":code:`utils.set_initial_params()` as well as " +#~ "the aggregation strategy " +#~ ":code:`fl.server.strategy:FedAvg()`. The strategy is" +#~ " the default one, federated averaging " +#~ "(or FedAvg), with two clients and " +#~ "evaluation after each federated learning " +#~ "round. The server can be started " +#~ "with the command " +#~ ":code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " +#~ "strategy=strategy, " +#~ "config=fl.server.ServerConfig(num_rounds=3))`." #~ msgstr "" -#~ msgid " is to not add the PR to the changelog" +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this example can " +#~ "be found in :code:`examples/sklearn-logreg-" +#~ "mnist`." #~ msgstr "" -#~ msgid " is to add a general baselines change to the PR" +#~ msgid "" +#~ "In this tutorial we will learn how" +#~ " to train a federated XGBoost model" +#~ " on HIGGS dataset using Flower and" +#~ " :code:`xgboost` package. We use a " +#~ "simple example (`full code xgboost-" +#~ "quickstart `_) with two *clients* " +#~ "and one *server* to demonstrate how " +#~ "federated XGBoost works, and then we " +#~ "dive into a more complex example " +#~ "(`full code xgboost-comprehensive " +#~ "`_) to run various experiments." #~ msgstr "" -#~ msgid " is to add a general examples change to the PR" +#~ msgid "" +#~ "Since we want to use :code:`xgboost` " +#~ "package to build up XGBoost trees, " +#~ "let's go ahead and install " +#~ ":code:`xgboost`:" #~ msgstr "" -#~ msgid " is to add a general sdk change to the PR" +#~ msgid "" +#~ "In a file called :code:`client.py`, " +#~ "import xgboost, Flower, Flower Datasets " +#~ "and other related functions:" #~ msgstr "" -#~ msgid " is to add a general simulations change to the PR" +#~ msgid "" +#~ "In this example, we split the " +#~ "dataset into 30 partitions with uniform" +#~ " distribution (:code:`IidPartitioner(num_partitions=30)`)." +#~ " Then, we load the partition for " +#~ "the given client based on " +#~ ":code:`partition_id`:" #~ msgstr "" -#~ msgid "Note that only one token should be used." +#~ msgid "" +#~ "After that, we do train/test splitting" +#~ " on the given partition (client's " +#~ "local data), and transform data format" +#~ " for :code:`xgboost` package." #~ msgstr "" #~ msgid "" -#~ "Its content must have a specific " -#~ "format. We will break down what " -#~ "each possibility does:" +#~ "The functions of :code:`train_test_split` and" +#~ " :code:`transform_dataset_to_dmatrix` are defined " +#~ "as below:" #~ msgstr "" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains nothing or doesn't exist, " -#~ "the following text will be added " -#~ "to the changelog::" +#~ "The :code:`num_local_round` represents the " +#~ "number of iterations for local tree " +#~ "boost. We use CPU for the training" +#~ " in default. One can shift it " +#~ "to GPU by setting :code:`tree_method` to" +#~ " :code:`gpu_hist`. We use AUC as " +#~ "evaluation metric." #~ msgstr "" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains a description (and no " -#~ "token), the following text will be " -#~ "added to the changelog::" +#~ "After loading the dataset we define " +#~ "the Flower client. We follow the " +#~ "general rule to define :code:`XgbClient` " +#~ "class inherited from :code:`fl.client.Client`." #~ msgstr "" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, nothing will change" -#~ " in the changelog." +#~ "All required parameters defined above " +#~ "are passed to :code:`XgbClient`'s constructor." #~ msgstr "" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following text" -#~ " will be added to the changelog::" +#~ "Then, we override :code:`get_parameters`, " +#~ ":code:`fit` and :code:`evaluate` methods " +#~ "insides :code:`XgbClient` class as follows." #~ msgstr "" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following " -#~ "text will be added to the " -#~ "changelog::" +#~ "Unlike neural network training, XGBoost " +#~ "trees are not started from a " +#~ "specified random weights. In this case," +#~ " we do not use :code:`get_parameters` " +#~ "and :code:`set_parameters` to initialise model" +#~ " parameters for XGBoost. As a result," +#~ " let's return an empty tensor in " +#~ ":code:`get_parameters` when it is called " +#~ "by the server at the first round." #~ msgstr "" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following " -#~ "text will be added to the " -#~ "changelog::" +#~ "In :code:`fit`, at the first round, " +#~ "we call :code:`xgb.train()` to build up" +#~ " the first set of trees. From " +#~ "the second round, we load the " +#~ "global model sent from server to " +#~ "new build Booster object, and then " +#~ "update model weights on local training" +#~ " data with function :code:`local_boost` as" +#~ " follows:" #~ msgstr "" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following text " -#~ "will be added to the changelog::" +#~ "Given :code:`num_local_round`, we update trees" +#~ " by calling :code:`bst_input.update` method. " +#~ "After training, the last " +#~ ":code:`N=num_local_round` trees will be " +#~ "extracted to send to the server." #~ msgstr "" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following " -#~ "text will be added to the " -#~ "changelog::" +#~ "In :code:`evaluate`, after loading the " +#~ "global model, we call :code:`bst.eval_set` " +#~ "function to conduct evaluation on valid" +#~ " set. The AUC value will be " +#~ "returned." #~ msgstr "" #~ msgid "" -#~ "Note that only one token must be" -#~ " provided, otherwise, only the first " -#~ "action (in the order listed above), " -#~ "will be performed." +#~ "Now, we can create an instance of" +#~ " our class :code:`XgbClient` and add " +#~ "one line to actually run this " +#~ "client:" #~ msgstr "" #~ msgid "" -#~ "We recommend you to check out the" -#~ " complete `code example " -#~ "`_ demonstrating federated " -#~ "learning with Flower in an authenticated" -#~ " setting." +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` and " +#~ "call :code:`fl.client.start_client()`. The string" +#~ " :code:`\"[::]:8080\"` tells the client " +#~ "which server to connect to. In our" +#~ " case we can run the server and" +#~ " the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." #~ msgstr "" -#~ msgid "Let's break down the :code:`--require-client-authentication` flag:" +#~ msgid "" +#~ "In a file named :code:`server.py`, " +#~ "import Flower and FedXgbBagging from " +#~ ":code:`flwr.server.strategy`." #~ msgstr "" #~ msgid "" -#~ "The first argument is a path to" -#~ " a CSV file storing all known " -#~ "node public keys. You need to " -#~ "store all known node public keys " -#~ "that are allowed to participate in " -#~ "a federation in one CSV file " -#~ "(:code:`.csv`)." +#~ "We use two clients for this " +#~ "example. An :code:`evaluate_metrics_aggregation` " +#~ "function is defined to collect and " +#~ "wighted average the AUC values from " +#~ "clients. The :code:`config_func` function is" +#~ " to return the current FL round " +#~ "number to client's :code:`fit()` and " +#~ ":code:`evaluate()` methods." #~ msgstr "" #~ msgid "" -#~ "The second and third arguments are " -#~ "paths to the server's private and " -#~ "public keys. For development purposes, " -#~ "you can generate a private and " -#~ "public key pair using :code:`ssh-keygen" -#~ " -t ecdsa -b 384`." +#~ "In file :code:`flwr.server.strategy.fedxgb_bagging.py`," +#~ " we define :code:`FedXgbBagging` inherited " +#~ "from :code:`flwr.server.strategy.FedAvg`. Then, we" +#~ " override the :code:`aggregate_fit`, " +#~ ":code:`aggregate_evaluate` and :code:`evaluate` " +#~ "methods as follows:" #~ msgstr "" #~ msgid "" -#~ "The :code:`--authentication-keys` flag expects" -#~ " two arguments: a path to the " -#~ "node's private key file and a path" -#~ " to the node's public key file. " -#~ "For development purposes, you can " -#~ "generate a private and public key " -#~ "pair using :code:`ssh-keygen -t ecdsa" -#~ " -b 384`." +#~ "In :code:`aggregate_fit`, we sequentially " +#~ "aggregate the clients' XGBoost trees by" +#~ " calling :code:`aggregate()` function:" #~ msgstr "" #~ msgid "" -#~ "The simplest way to get started " -#~ "with Flower is by using the " -#~ "pre-made Docker images, which you can" -#~ " find on `Docker Hub " -#~ "`_." +#~ "In this function, we first fetch " +#~ "the number of trees and the number" +#~ " of parallel trees for the current" +#~ " and previous model by calling " +#~ ":code:`_get_tree_nums`. Then, the fetched " +#~ "information will be aggregated. After " +#~ "that, the trees (containing model " +#~ "weights) are aggregated to generate a" +#~ " new tree model." #~ msgstr "" #~ msgid "" -#~ "The ``--insecure`` flag enables insecure " -#~ "communication (using HTTP, not HTTPS) " -#~ "and should only be used for " -#~ "testing purposes. We strongly recommend " -#~ "enabling `SSL `_ when " -#~ "deploying to a production environment." +#~ "Congratulations! You've successfully built and" +#~ " run your first federated XGBoost " +#~ "system. The AUC values can be " +#~ "checked in :code:`metrics_distributed`. One " +#~ "can see that the average AUC " +#~ "increases over FL rounds." #~ msgstr "" #~ msgid "" -#~ "If you want to persist the state" -#~ " of the SuperLink on your host " -#~ "system, all you need to do is " -#~ "specify a path where you want to" -#~ " save the file on your host " -#~ "system and a name for the database" -#~ " file. In the example below, we " -#~ "tell Docker via the flag ``-v`` to" -#~ " mount the user's home directory " -#~ "(``~/`` on your host) into the " -#~ "``/app/`` directory of the container. " -#~ "Furthermore, we use the flag " -#~ "``--database`` to specify the name of" -#~ " the database file." +#~ "The full `source code " +#~ "`_ for this example can be" +#~ " found in :code:`examples/xgboost-quickstart`." #~ msgstr "" #~ msgid "" -#~ "For testing purposes, you can generate" -#~ " your own self-signed certificates. " -#~ "The `Enable SSL connections " -#~ "`_ page contains " -#~ "a section that will guide you " -#~ "through the process." +#~ "To do this, we first customise a" +#~ " :code:`ClientManager` in :code:`server_utils.py`:" #~ msgstr "" #~ msgid "" -#~ "Assuming all files we need are in" -#~ " the local ``certificates`` directory, we" -#~ " can use the flag ``-v`` to " -#~ "mount the local directory into the " -#~ "``/app/`` directory of the container. " -#~ "This allows the SuperLink to access " -#~ "the files within the container. Finally," -#~ " we pass the names of the " -#~ "certificates to the SuperLink with the" -#~ " ``--certificates`` flag." +#~ "The customised :code:`ClientManager` samples " +#~ "all available clients in each FL " +#~ "round based on the order of " +#~ "connection to the server. Then, we " +#~ "define a new strategy :code:`FedXgbCyclic` " +#~ "in :code:`flwr.server.strategy.fedxgb_cyclic.py`, in " +#~ "order to sequentially select only one" +#~ " client in given round and pass " +#~ "the received model to next client." #~ msgstr "" #~ msgid "" -#~ "The SuperNode Docker image currently " -#~ "works only with the 1.9.0-nightly " -#~ "release. A stable version will be " -#~ "available when Flower 1.9.0 (stable) " -#~ "gets released (ETA: May). A SuperNode" -#~ " nightly image must be paired with" -#~ " the corresponding SuperLink nightly image" -#~ " released on the same day. To " -#~ "ensure the versions are in sync, " -#~ "using the concrete tag, e.g., " -#~ "``1.9.0.dev20240501`` instead of ``nightly`` " -#~ "is recommended." +#~ "Unlike the original :code:`FedAvg`, we " +#~ "don't perform aggregation here. Instead, " +#~ "we just make a copy of the " +#~ "received client model as global model" +#~ " by overriding :code:`aggregate_fit`." #~ msgstr "" #~ msgid "" -#~ "We will use the ``app-pytorch`` " -#~ "example, which you can find in the" -#~ " Flower repository, to illustrate how " -#~ "you can dockerize your client-app." +#~ "Also, the customised :code:`configure_fit` and" +#~ " :code:`configure_evaluate` methods ensure the" +#~ " clients to be sequentially selected " +#~ "given FL round:" #~ msgstr "" #~ msgid "" -#~ "Before we can start, we need to" -#~ " meet a few prerequisites in our " -#~ "local development environment. You can " -#~ "skip the first part if you want" -#~ " to run your client-app instead " -#~ "of the ``app-pytorch`` example." +#~ "In :code:`dataset.py`, we have a " +#~ "function :code:`instantiate_partitioner` to " +#~ "instantiate the data partitioner based " +#~ "on the given :code:`num_partitions` and " +#~ ":code:`partitioner_type`. Currently, we provide " +#~ "four supported partitioner type to " +#~ "simulate the uniformity/non-uniformity in " +#~ "data quantity (uniform, linear, square, " +#~ "exponential)." #~ msgstr "" #~ msgid "" -#~ "First, we need to create a " -#~ "Dockerfile in the directory where the" -#~ " ``ClientApp`` code is located. If " -#~ "you use the ``app-pytorch`` example, " -#~ "create a new file called ``Dockerfile``" -#~ " in ``examples/app-pytorch``." +#~ "To facilitate centralised evaluation, we " +#~ "define a function in :code:`server_utils.py`:" #~ msgstr "" #~ msgid "" -#~ "The ``Dockerfile`` contains the instructions" -#~ " that assemble the SuperNode image." +#~ "This function returns a evaluation " +#~ "function which instantiates a :code:`Booster`" +#~ " object and loads the global model" +#~ " weights to it. The evaluation is " +#~ "conducted by calling :code:`eval_set()` " +#~ "method, and the tested AUC value " +#~ "is reported." #~ msgstr "" #~ msgid "" -#~ "In the first two lines, we " -#~ "instruct Docker to use the SuperNode " -#~ "image tagged ``nightly`` as a base " -#~ "image and set our working directory " -#~ "to ``/app``. The following instructions " -#~ "will now be executed in the " -#~ "``/app`` directory. Next, we install the" -#~ " ``ClientApp`` dependencies by copying the" -#~ " ``requirements.txt`` file into the image" -#~ " and run ``pip install``. In the " -#~ "last two lines, we copy the " -#~ "``ClientApp`` code (``client.py`` and " -#~ "``task.py``) into the image and set " -#~ "the entry point to ``flower-client-" -#~ "app``." +#~ "As for distributed evaluation on the " +#~ "clients, it's same as the quick-" +#~ "start example by overriding the " +#~ ":code:`evaluate()` method insides the " +#~ ":code:`XgbClient` class in :code:`client_utils.py`." #~ msgstr "" #~ msgid "" -#~ "Next, we build the SuperNode Docker " -#~ "image by running the following command" -#~ " in the directory where Dockerfile " -#~ "and client-app code are located." +#~ "We also provide an example code " +#~ "(:code:`sim.py`) to use the simulation " +#~ "capabilities of Flower to simulate " +#~ "federated XGBoost training on either a" +#~ " single machine or a cluster of " +#~ "machines." #~ msgstr "" #~ msgid "" -#~ "``client:app``: The object reference of " -#~ "the ``ClientApp`` (``:``)." +#~ "After importing all required packages, " +#~ "we define a :code:`main()` function to" +#~ " perform the simulation process:" #~ msgstr "" #~ msgid "" -#~ "It points to the ``ClientApp`` that " -#~ "will be run inside the SuperNode " -#~ "container." +#~ "We first load the dataset and " +#~ "perform data partitioning, and the " +#~ "pre-processed data is stored in a " +#~ ":code:`list`. After the simulation begins, " +#~ "the clients won't need to pre-" +#~ "process their partitions again." #~ msgstr "" #~ msgid "" -#~ "Assuming the certificate already exists " -#~ "locally, we can use the flag " -#~ "``-v`` to mount the local certificate" -#~ " into the container's ``/app/`` directory." -#~ " This allows the SuperNode to access" -#~ " the certificate within the container. " -#~ "Use the ``--certificates`` flag when " -#~ "starting the container." +#~ "After that, we start the simulation " +#~ "by calling :code:`fl.simulation.start_simulation`:" #~ msgstr "" #~ msgid "" -#~ "If you want to use a different " -#~ "version of Flower, for example Flower" -#~ " nightly, you can do so by " -#~ "changing the tag. All available versions" -#~ " are on `Docker Hub " -#~ "`_." +#~ "One of key parameters for " +#~ ":code:`start_simulation` is :code:`client_fn` which" +#~ " returns a function to construct a" +#~ " client. We define it as follows:" #~ msgstr "" #~ msgid "" -#~ "Run |runsimcli_link|_ in CLI and point" -#~ " to the ``server_app`` / ``client_app`` " -#~ "object in the code instead of " -#~ "executing the Python script. Here's an" -#~ " example (assuming the ``server_app`` and" -#~ " ``client_app`` objects are in a " -#~ "``sim.py`` module):" +#~ "In :code:`utils.py`, we define the " +#~ "arguments parsers for clients, server " +#~ "and simulation, allowing users to " +#~ "specify different experimental settings. Let's" +#~ " first see the sever side:" #~ msgstr "" -#~ msgid "start\\_driver" +#~ msgid "" +#~ "This allows user to specify training " +#~ "strategies / the number of total " +#~ "clients / FL rounds / participating " +#~ "clients / clients for evaluation, and" +#~ " evaluation fashion. Note that with " +#~ ":code:`--centralised-eval`, the sever will " +#~ "do centralised evaluation and all " +#~ "functionalities for client evaluation will " +#~ "be disabled." #~ msgstr "" -#~ msgid "run\\_simulation\\_from\\_cli" +#~ msgid "" +#~ "This defines various options for client" +#~ " data partitioning. Besides, clients also" +#~ " have an option to conduct evaluation" +#~ " on centralised test set by setting" +#~ " :code:`--centralised-eval`, as well as " +#~ "an option to perform scaled learning " +#~ "rate based on the number of " +#~ "clients by setting :code:`--scaled-lr`." #~ msgstr "" #~ msgid "" -#~ "We now have a list of ten " -#~ "training sets and ten validation sets" -#~ " (``trainloaders`` and ``valloaders``) " -#~ "representing the data of ten different" -#~ " organizations. Each ``trainloader``/``valloader`` " -#~ "pair contains 4500 training examples and" -#~ " 500 validation examples. There's also " -#~ "a single ``testloader`` (we did not " -#~ "split the test set). Again, this " -#~ "is only necessary for building research" -#~ " or educational systems, actual federated" -#~ " learning systems have their data " -#~ "naturally distributed across multiple " -#~ "partitions." +#~ "The full `code " +#~ "`_ for this comprehensive " +#~ "example can be found in :code:`examples" +#~ "/xgboost-comprehensive`." +#~ msgstr "" + +#~ msgid "|b8714c45b74b4d8fb008e2ebb3bc1d44|" +#~ msgstr "" + +#~ msgid "|75f1561efcfd422ea67d28d1513120dc|" +#~ msgstr "" + +#~ msgid "|6a1f51b235304558a9bdaaabfc93b8d2|" +#~ msgstr "" + +#~ msgid "|35e70dab1fb544af9aa3a9c09c4f9797|" +#~ msgstr "" + +#~ msgid "|d7efb5705dd3467f991ed23746824a07|" +#~ msgstr "" + +#~ msgid "|94e7b021c7b540bfbedf7f082a41ff87|" +#~ msgstr "" + +#~ msgid "|a80714782dde439ab73936518f91fc3c|" +#~ msgstr "" + +#~ msgid "|c62080ca6197473da57d191c8225a9d9|" +#~ msgstr "" + +#~ msgid "|21a8f1e6a5b14a7bbb8559979d0e8a2b|" +#~ msgstr "" + +#~ msgid "|c310f2a22f7b4917bf42775aae7a1c09|" +#~ msgstr "" + +#~ msgid "|a0c5b43401194535a8460bcf02e65f9a|" +#~ msgstr "" + +#~ msgid "|aabfdbd5564e41a790f8ea93cc21a444|" +#~ msgstr "" + +#~ msgid "|c9cc8f160fa647b09e742fe4dc8edb54|" +#~ msgstr "" + +#~ msgid "|7e83aad011cd4907b2f02f907c6922e9|" +#~ msgstr "" + +#~ msgid "|4627c2bb6cc443ae9e079f81f33c9dd9|" +#~ msgstr "" + +#~ msgid "|131af8322dc5466b827afd24be98f8c0|" +#~ msgstr "" + +#~ msgid "|f92920b87f3a40179bf7ddd0b6144c53|" +#~ msgstr "" + +#~ msgid "|d62da263071d45a496f543e41fce3a19|" +#~ msgstr "" + +#~ msgid "|ad851971645b4e1fbf8d15bcc0b2ee11|" +#~ msgstr "" + +#~ msgid "|929e9a6de6b34edb8488e644e2bb5221|" +#~ msgstr "" + +#~ msgid "|404cf9c9e8d64784a55646c0f9479cbc|" +#~ msgstr "" + +#~ msgid "|b021ff9d25814458b1e631f8985a648b|" +#~ msgstr "" + +#~ msgid "|e6ca84e1df244f238288a768352678e5|" +#~ msgstr "" + +#~ msgid "|39c2422082554a21963baffb33a0d057|" +#~ msgstr "" + +#~ msgid "|07ecf5fcd6814e88906accec6fa0fbfb|" +#~ msgstr "" + +#~ msgid "|57e78c0ca8a94ba5a64a04b1f2280e55|" +#~ msgstr "" + +#~ msgid "|9819b40e59ee40a4921e1244e8c99bac|" +#~ msgstr "" + +#~ msgid "|797bf279c4894b5ead31dc9b0534ed62|" #~ msgstr "" -#~ msgid "|191c6b8b5e1d46f99de4872746afa8af|" +#~ msgid "|3a7aceef05f0421794726ac54aaf12fd|" #~ msgstr "" -#~ msgid "|21b83f3feb024a049617190555a13549|" +#~ msgid "|d741075f8e624331b42c0746f7d258a0|" #~ msgstr "" -#~ msgid "|0dd15b4df7e3422f88aaf74cb401bfa7|" +#~ msgid "|8fc92d668bcb42b8bda55143847f2329|" #~ msgstr "" -#~ msgid "|60e16f6be7354ca793444e01aa7adf25|" +#~ msgid "|1c705d833a024f22adcaeb8ae3d13b0b|" #~ msgstr "" -#~ msgid "|a7032acbd65948a8beef8bccbbb9b83a|" +#~ msgid "|77a037b546a84262b608e04bc82a2c96|" #~ msgstr "" -#~ msgid "|dd0e05706e584ee29e07cd39e6af5498|" +#~ msgid "|f568e24c9fb0435690ac628210a4be96|" #~ msgstr "" -#~ msgid "|2a2031018a1c4f81a69ea16df4947bd0|" +#~ msgid "|a7bf029981514e2593aa3a2b48c9d76a|" #~ msgstr "" -#~ msgid "|5e841497933340d3b5c2efbf37e3e6a6|" +#~ msgid "|3f645ad807f84be8b1f8f3267173939c|" #~ msgstr "" -#~ msgid "|19687aecbc3a485da999b66fe2051005|" +#~ msgid "|a06a9dbd603f45819afd8e8cfc3c4b8f|" #~ msgstr "" -#~ msgid "|32ef0bbade4d4500b7be97cf62405661|" +#~ msgid "|edcf9a04d96e42608fd01a333375febe|" #~ msgstr "" -#~ msgid "|9d57ed324b304a698263f5a983a56a6b|" +#~ msgid "|3dae22fe797043968e2b7aa7073c78bd|" #~ msgstr "" -#~ msgid "|d41510e6781c4bf18c234c6bfb8d4937|" +#~ msgid "|ba178f75267d4ad8aa7363f20709195f|" #~ msgstr "" -#~ msgid "|a0198a7ebbfb4b9289e7312711cbc967|" +#~ msgid "|c380c750bfd2444abce039a1c6fa8e60|" #~ msgstr "" -#~ msgid "|2c13f726c8c843fc8aae997bf906125b|" +#~ msgid "|e7cec00a114b48359935c6510595132e|" #~ msgstr "" diff --git a/doc/locales/pt_BR/LC_MESSAGES/framework-docs.po b/doc/locales/pt_BR/LC_MESSAGES/framework-docs.po index f0127ad93ed7..393c04bb0b13 100644 --- a/doc/locales/pt_BR/LC_MESSAGES/framework-docs.po +++ b/doc/locales/pt_BR/LC_MESSAGES/framework-docs.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: Flower main\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2024-05-28 11:47+0200\n" +"POT-Creation-Date: 2024-10-10 00:29+0000\n" "PO-Revision-Date: 2024-05-25 11:09+0000\n" "Last-Translator: Gustavo Bertoli \n" "Language: pt_BR\n" @@ -17,52 +17,198 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.15.0\n" +"Generated-By: Babel 2.16.0\n" -#: ../../source/contributor-explanation-architecture.rst:2 -msgid "Flower Architecture" -msgstr "Arquitetura do Flower" +#: ../../source/contributor-explanation-public-and-private-apis.rst:2 +msgid "Public and private APIs" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:4 +msgid "" +"In Python, everything is public. To enable developers to understand which" +" components can be relied upon, Flower declares a public API. Components " +"that are part of the public API can be relied upon. Changes to the public" +" API are announced in the release notes and are subject to deprecation " +"policies." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:9 +msgid "" +"Everything that is not part of the public API is part of the private API." +" Even though Python allows accessing them, user code should never use " +"those components. Private APIs can change at any time, even in patch " +"releases." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:13 +msgid "" +"How can you determine whether a component is part of the public API or " +"not? Easy:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:15 +msgid "`Use the Flower API reference documentation `_" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:16 +msgid "`Use the Flower CLI reference documentation `_" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:18 +msgid "" +"Everything listed in the reference documentation is part of the public " +"API. This document explains how Flower maintainers define the public API " +"and how you can determine whether a component is part of the public API " +"or not by reading the Flower source code." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:23 +msgid "Flower public API" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:25 +msgid "Flower has a well-defined public API. Let's look at this in more detail." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:29 +msgid "" +"Every component that is reachable by recursively following " +"``__init__.__all__`` starting from the root package (``flwr``) is part of" +" the public API." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:32 +msgid "" +"If you want to determine whether a component " +"(class/function/generator/...) is part of the public API or not, you need" +" to start at the root of the ``flwr`` package. Let's use ``tree -L 1 -d " +"src/py/flwr`` to look at the Python sub-packages contained ``flwr``:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:46 +msgid "" +"Contrast this with the definition of ``__all__`` in the root " +"``src/py/flwr/__init__.py``:" +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:5 -msgid "Edge Client Engine" -msgstr "Engine do Edge Client" +#: ../../source/contributor-explanation-public-and-private-apis.rst:59 +msgid "" +"You can see that ``flwr`` has six subpackages (``cli``, ``client``, " +"``common``, ``proto``, ``server``, ``simulation``), but only four of them" +" are \"exported\" via ``__all__`` (``client``, ``common``, ``server``, " +"``simulation``)." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:63 +msgid "" +"What does this mean? It means that ``client``, ``common``, ``server`` and" +" ``simulation`` are part of the public API, but ``cli`` and ``proto`` are" +" not. The ``flwr`` subpackages ``cli`` and ``proto`` are private APIs. A " +"private API can change completely from one release to the next (even in " +"patch releases). It can change in a breaking way, it can be renamed (for " +"example, ``flwr.cli`` could be renamed to ``flwr.command``) and it can " +"even be removed completely." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:70 +msgid "Therefore, as a Flower user:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:72 +msgid "``from flwr import client`` ✅ Ok, you're importing a public API." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:73 +msgid "" +"``from flwr import proto`` ❌ Not recommended, you're importing a private " +"API." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:75 +msgid "" +"What about components that are nested deeper in the hierarchy? Let's look" +" at Flower strategies to see another typical pattern. Flower strategies " +"like ``FedAvg`` are often imported using ``from flwr.server.strategy " +"import FedAvg``. Let's look at " +"``src/py/flwr/server/strategy/__init__.py``:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:91 +msgid "" +"What's notable here is that all strategies are implemented in dedicated " +"modules (e.g., ``fedavg.py``). In ``__init__.py``, we *import* the " +"components we want to make part of the public API and then *export* them " +"via ``__all__``. Note that we export the component itself (for example, " +"the ``FedAvg`` class), but not the module it is defined in (for example, " +"``fedavg.py``). This allows us to move the definition of ``FedAvg`` into " +"a different module (or even a module in a subpackage) without breaking " +"the public API (as long as we update the import path in ``__init__.py``)." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:99 +msgid "Therefore:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:101 +msgid "" +"``from flwr.server.strategy import FedAvg`` ✅ Ok, you're importing a " +"class that is part of the public API." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:103 +msgid "" +"``from flwr.server.strategy import fedavg`` ❌ Not recommended, you're " +"importing a private module." +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:7 +#: ../../source/contributor-explanation-public-and-private-apis.rst:106 msgid "" -"`Flower `_ core framework architecture with Edge " -"Client Engine" +"This approach is also implemented in the tooling that automatically " +"builds API reference docs." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:110 +msgid "Flower public API of private packages" msgstr "" -"`Flower `_ arquitetura principal do framework com " -"Engine do Edge Client" -#: ../../source/contributor-explanation-architecture.rst:13 -msgid "Virtual Client Engine" -msgstr "Engine do Virtual Client" +#: ../../source/contributor-explanation-public-and-private-apis.rst:112 +msgid "" +"We also use this to define the public API of private subpackages. Public," +" in this context, means the API that other ``flwr`` subpackages should " +"use. For example, ``flwr.server.driver`` is a private subpackage (it's " +"not exported via ``src/py/flwr/server/__init__.py``'s ``__all__``)." +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:15 +#: ../../source/contributor-explanation-public-and-private-apis.rst:117 msgid "" -"`Flower `_ core framework architecture with Virtual " -"Client Engine" +"Still, the private sub-package ``flwr.server.driver`` defines a " +"\"public\" API using ``__all__`` in " +"``src/py/flwr/server/driver/__init__.py``:" msgstr "" -"`Flower `_ arquitetura principal do framework com " -"Engine do Virtual Client" -#: ../../source/contributor-explanation-architecture.rst:21 -msgid "Virtual Client Engine and Edge Client Engine in the same workload" +#: ../../source/contributor-explanation-public-and-private-apis.rst:132 +msgid "" +"The interesting part is that both ``GrpcDriver`` and ``InMemoryDriver`` " +"are never used by Flower framework users, only by other parts of the " +"Flower framework codebase. Those other parts of the codebase import, for " +"example, ``InMemoryDriver`` using ``from flwr.server.driver import " +"InMemoryDriver`` (i.e., the ``InMemoryDriver`` exported via ``__all__``)," +" not ``from flwr.server.driver.in_memory_driver import InMemoryDriver`` " +"(``in_memory_driver.py`` is the module containing the actual " +"``InMemoryDriver`` class definition)." msgstr "" -"Engine do Virtual Client e do Edge Client no mesma carga de trabalho " -"(workload)" -#: ../../source/contributor-explanation-architecture.rst:23 +#: ../../source/contributor-explanation-public-and-private-apis.rst:140 msgid "" -"`Flower `_ core framework architecture with both " -"Virtual Client Engine and Edge Client Engine" +"This is because ``flwr.server.driver`` defines a public interface for " +"other ``flwr`` subpackages. This allows codeowners of " +"``flwr.server.driver`` to refactor the package without breaking other " +"``flwr``-internal users." msgstr "" -"`Flower `_ arquitetura principal do framework com " -"ambas engines do Virtual Client e do Edge Client" #: ../../source/contributor-how-to-build-docker-images.rst:2 -msgid "How to build Docker Flower images locally" +#, fuzzy +msgid "How to Build Docker Flower Images Locally" msgstr "Como construir imagens Docker do Flower localmente" #: ../../source/contributor-how-to-build-docker-images.rst:4 @@ -70,10 +216,11 @@ msgstr "Como construir imagens Docker do Flower localmente" msgid "" "Flower provides pre-made docker images on `Docker Hub " "`_ that include all necessary dependencies" -" for running the SuperLink. You can also build your own custom docker " -"images from scratch with a different version of Python or Ubuntu if that " -"is what you need. In this guide, we will explain what images exist and " -"how to build them locally." +" for running the SuperLink, SuperNode or ServerApp. You can also build " +"your own custom docker images from scratch with a different version of " +"Python or Linux distribution (Ubuntu/Alpine) if that is what you need. In" +" this guide, we will explain what images exist and how to build them " +"locally." msgstr "" "Flower disponibiliza imagens docker em `Docker Hub " "`_ que incluem todas as " @@ -82,7 +229,7 @@ msgstr "" " diferente do Python ou do Ubuntu se isso for o que você precisa. Neste " "guia, explicaremos quais imagens existem e como compilar localmente." -#: ../../source/contributor-how-to-build-docker-images.rst:9 +#: ../../source/contributor-how-to-build-docker-images.rst:10 msgid "" "Before we can start, we need to meet a few prerequisites in our local " "development environment." @@ -90,41 +237,16 @@ msgstr "" "Antes de começarmos, precisamos encontrar alguns pré-requisitos em nosso " "ambiente de desenvolvimento local." -#: ../../source/contributor-how-to-build-docker-images.rst:11 -msgid "Clone the flower repository." +#: ../../source/contributor-how-to-build-docker-images.rst:13 +#, fuzzy +msgid "Clone the ``flower`` repository." msgstr "Clone o repositório do flower." -#: ../../source/contributor-how-to-build-docker-images.rst:17 -#: ../../source/how-to-run-flower-using-docker.rst:144 +#: ../../source/contributor-how-to-build-docker-images.rst:19 msgid "Verify the Docker daemon is running." msgstr "Verifique que o serviço Docker está rodando." -#: ../../source/contributor-how-to-build-docker-images.rst:19 -#: ../../source/how-to-run-flower-using-docker.rst:146 -msgid "" -"Please follow the first section on :doc:`Run Flower using Docker ` which covers this step in more detail." -msgstr "" -"Por favor, siga a primeira seção em :doc:`Execute o Flower usando Docker " -"` que cobre este passo em mais detalhes." - -#: ../../source/contributor-how-to-build-docker-images.rst:23 -#, fuzzy -msgid "" -"Currently, Flower provides two images, a ``base`` image and a " -"``superlink`` image. The base image, as the name suggests, contains basic" -" dependencies that the SuperLink needs. This includes system " -"dependencies, Python and Python tools. The SuperLink image is based on " -"the base image, but it additionally installs the SuperLink using ``pip``." -msgstr "" -"Atualmente, Flower fornece duas imagens, uma imagem base e uma imagem de " -"servidor. Também haverá uma imagem de cliente em breve. A imagem base, " -"como o nome sugere, contém dependências básicas que tanto o servidor " -"quanto o cliente precisam. Isso inclui dependências do sistema, Python e " -"ferramentas Python. A imagem do servidor é baseada na imagem base, mas " -"também instala o servidor Flower usando ``pip```." - -#: ../../source/contributor-how-to-build-docker-images.rst:28 +#: ../../source/contributor-how-to-build-docker-images.rst:21 msgid "" "The build instructions that assemble the images are located in the " "respective Dockerfiles. You can find them in the subdirectories of " @@ -134,16 +256,16 @@ msgstr "" "respectivos Dockerfiles. Você pode encontrá-los nos subdiretórios " "``src/docker```." -#: ../../source/contributor-how-to-build-docker-images.rst:31 +#: ../../source/contributor-how-to-build-docker-images.rst:24 #, fuzzy msgid "" -"Both, base and SuperLink image are configured via build arguments. " -"Through build arguments, we can make our build more flexible. For " -"example, in the base image, we can specify the version of Python to " -"install using the ``PYTHON_VERSION`` build argument. Some of the build " -"arguments have default values, others must be specified when building the" -" image. All available build arguments for each image are listed in one of" -" the tables below." +"Flower Docker images are configured via build arguments. Through build " +"arguments, we can make the creation of images more flexible. For example," +" in the base image, we can specify the version of Python to install using" +" the ``PYTHON_VERSION`` build argument. Some of the build arguments have " +"default values, others must be specified when building the image. All " +"available build arguments for each image are listed in one of the tables " +"below." msgstr "" "Ambas, imagens base e do servidor são configuradas através dos argumentos" " de compilação. Através dos argumentos de compilação, podemos tornar " @@ -154,196 +276,224 @@ msgstr "" "Todos os argumentos de compilação disponíveis para cada imagem estão " "listados em uma das tabelas abaixo." -#: ../../source/contributor-how-to-build-docker-images.rst:38 -msgid "Building the base image" +#: ../../source/contributor-how-to-build-docker-images.rst:32 +#, fuzzy +msgid "Building the Base Image" msgstr "Construindo a imagem base" -#: ../../source/contributor-how-to-build-docker-images.rst:44 -#: ../../source/contributor-how-to-build-docker-images.rst:86 +#: ../../source/contributor-how-to-build-docker-images.rst:38 +#: ../../source/contributor-how-to-build-docker-images.rst:104 msgid "Build argument" msgstr "Argumento de compilação" -#: ../../source/contributor-how-to-build-docker-images.rst:45 -#: ../../source/contributor-how-to-build-docker-images.rst:87 +#: ../../source/contributor-how-to-build-docker-images.rst:39 +#: ../../source/contributor-how-to-build-docker-images.rst:105 msgid "Description" msgstr "Descrição" -#: ../../source/contributor-how-to-build-docker-images.rst:46 -#: ../../source/contributor-how-to-build-docker-images.rst:88 +#: ../../source/contributor-how-to-build-docker-images.rst:40 +#: ../../source/contributor-how-to-build-docker-images.rst:106 msgid "Required" msgstr "Necessário" -#: ../../source/contributor-how-to-build-docker-images.rst:47 -#: ../../source/contributor-how-to-build-docker-images.rst:89 +#: ../../source/contributor-how-to-build-docker-images.rst:41 +#: ../../source/contributor-how-to-build-docker-images.rst:107 +#: ../../source/docker/persist-superlink-state.rst:19 +#: ../../source/docker/pin-version.rst:12 +#: ../../source/docker/set-environment-variables.rst:8 msgid "Example" msgstr "Exemplo" +#: ../../source/contributor-how-to-build-docker-images.rst:42 +msgid "``DISTRO``" +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:43 +#, fuzzy +msgid "The Linux distribution to use as the base image." +msgstr "O nome do repositório da imagem base." + +#: ../../source/contributor-how-to-build-docker-images.rst:44 #: ../../source/contributor-how-to-build-docker-images.rst:48 -#: ../../source/contributor-how-to-build-docker-images.rst:94 -msgid "``PYTHON_VERSION``" -msgstr "``PYTHON_VERSION``" +#: ../../source/contributor-how-to-build-docker-images.rst:52 +#: ../../source/contributor-how-to-build-docker-images.rst:68 +#: ../../source/contributor-how-to-build-docker-images.rst:75 +#: ../../source/contributor-how-to-build-docker-images.rst:110 +msgid "No" +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:45 +#, fuzzy +msgid "``ubuntu``" +msgstr "``UBUNTU_VERSION``" + +#: ../../source/contributor-how-to-build-docker-images.rst:46 +#, fuzzy +msgid "``DISTRO_VERSION``" +msgstr "``PIP_VERSION``" + +#: ../../source/contributor-how-to-build-docker-images.rst:47 +msgid "Version of the Linux distribution." +msgstr "" #: ../../source/contributor-how-to-build-docker-images.rst:49 -msgid "Version of ``python`` to be installed." -msgstr "Versão do ``python`` a ser instalada." +msgid ":substitution-code:`|ubuntu_version|`" +msgstr "" #: ../../source/contributor-how-to-build-docker-images.rst:50 -#: ../../source/contributor-how-to-build-docker-images.rst:54 -#: ../../source/contributor-how-to-build-docker-images.rst:58 -#: ../../source/contributor-how-to-build-docker-images.rst:108 -msgid "Yes" -msgstr "Sim" +msgid "``PYTHON_VERSION``" +msgstr "``PYTHON_VERSION``" #: ../../source/contributor-how-to-build-docker-images.rst:51 -msgid "``3.11``" -msgstr "``3.11``" +msgid "Version of ``python`` to be installed." +msgstr "Versão do ``python`` a ser instalada." -#: ../../source/contributor-how-to-build-docker-images.rst:52 +#: ../../source/contributor-how-to-build-docker-images.rst:53 +msgid "``3.11`` or ``3.11.1``" +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:54 msgid "``PIP_VERSION``" msgstr "``PIP_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:53 +#: ../../source/contributor-how-to-build-docker-images.rst:55 msgid "Version of ``pip`` to be installed." msgstr "Versão do ``pip`` a ser instalada." -#: ../../source/contributor-how-to-build-docker-images.rst:55 -msgid "``23.0.1``" -msgstr "``23.0.1``" - #: ../../source/contributor-how-to-build-docker-images.rst:56 +#: ../../source/contributor-how-to-build-docker-images.rst:60 +#: ../../source/contributor-how-to-build-docker-images.rst:64 +#: ../../source/contributor-how-to-build-docker-images.rst:114 +msgid "Yes" +msgstr "Sim" + +#: ../../source/contributor-how-to-build-docker-images.rst:57 +msgid ":substitution-code:`|pip_version|`" +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:58 msgid "``SETUPTOOLS_VERSION``" msgstr "``SETUPTOOLS_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:57 +#: ../../source/contributor-how-to-build-docker-images.rst:59 msgid "Version of ``setuptools`` to be installed." msgstr "Versão do ``setuptools`` a ser instalada." -#: ../../source/contributor-how-to-build-docker-images.rst:59 -msgid "``69.0.2``" -msgstr "``69.0.2``" - -#: ../../source/contributor-how-to-build-docker-images.rst:60 -#: ../../source/contributor-how-to-build-docker-images.rst:98 -msgid "``UBUNTU_VERSION``" -msgstr "``UBUNTU_VERSION``" - #: ../../source/contributor-how-to-build-docker-images.rst:61 -msgid "Version of the official Ubuntu Docker image." -msgstr "Versão da imagem Docker oficial do Ubuntu." +#, fuzzy +msgid ":substitution-code:`|setuptools_version|`" +msgstr "``SETUPTOOLS_VERSION``" #: ../../source/contributor-how-to-build-docker-images.rst:62 -msgid "Defaults to ``22.04``." -msgstr "Como padrão ``22.04``." +msgid "``FLWR_VERSION``" +msgstr "``FLWR_VERSION``" + +#: ../../source/contributor-how-to-build-docker-images.rst:63 +msgid "Version of Flower to be installed." +msgstr "Versão do Flower a ser instalada." #: ../../source/contributor-how-to-build-docker-images.rst:65 +msgid ":substitution-code:`|stable_flwr_version|`" +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:66 +#, fuzzy +msgid "``FLWR_PACKAGE``" +msgstr "``FLWR_VERSION``" + +#: ../../source/contributor-how-to-build-docker-images.rst:67 +#, fuzzy +msgid "The Flower package to be installed." +msgstr "Versão do Flower a ser instalada." + +#: ../../source/contributor-how-to-build-docker-images.rst:69 +msgid "``flwr`` or ``flwr-nightly``" +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:70 +#, fuzzy +msgid "``FLWR_VERSION_REF``" +msgstr "``FLWR_VERSION``" + +#: ../../source/contributor-how-to-build-docker-images.rst:71 +msgid "" +"A `direct reference " +"`_ without the ``@`` specifier. If both " +"``FLWR_VERSION`` and ``FLWR_VERSION_REF`` are specified, the " +"``FLWR_VERSION_REF`` has precedence." +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:76 +msgid "`Direct Reference Examples`_" +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:78 +#, fuzzy msgid "" -"The following example creates a base image with Python 3.11.0, pip 23.0.1" -" and setuptools 69.0.2:" +"The following example creates a base Ubuntu/Alpine image with Python " +"``3.11.0``, pip :substitution-code:`|pip_version|`, setuptools " +":substitution-code:`|setuptools_version|` and Flower :substitution-" +"code:`|stable_flwr_version|`:" msgstr "" "O exemplo seguinte cria uma imagem base com Python 3.11.0, pip 23.0.1 e " "setuptools 69.0.2:" -#: ../../source/contributor-how-to-build-docker-images.rst:76 +#: ../../source/contributor-how-to-build-docker-images.rst:93 +#, fuzzy msgid "" -"The name of image is ``flwr_base`` and the tag ``0.1.0``. Remember that " -"the build arguments as well as the name and tag can be adapted to your " -"needs. These values serve as examples only." +"In this example, we specify our image name as ``flwr_base`` and the tag " +"as ``0.1.0``. Remember that the build arguments as well as the name and " +"tag can be adapted to your needs. These values serve as examples only." msgstr "" "O nome da imagem é ``flwr_base`` com a tag ``0.1.0``. Lembre-se que os " "argumentos de construção assim como o nome e a tag podem ser adaptados de" " acordo com suas necessidades. Estes valores servem apenas como exemplo." -#: ../../source/contributor-how-to-build-docker-images.rst:80 +#: ../../source/contributor-how-to-build-docker-images.rst:98 #, fuzzy -msgid "Building the SuperLink image" -msgstr "Construindo a imagem do servidor" +msgid "Building a Flower Binary Image" +msgstr "Construindo a imagem base" -#: ../../source/contributor-how-to-build-docker-images.rst:90 +#: ../../source/contributor-how-to-build-docker-images.rst:108 msgid "``BASE_REPOSITORY``" msgstr "``BASE_REPOSITORY``" -#: ../../source/contributor-how-to-build-docker-images.rst:91 +#: ../../source/contributor-how-to-build-docker-images.rst:109 msgid "The repository name of the base image." msgstr "O nome do repositório da imagem base." -#: ../../source/contributor-how-to-build-docker-images.rst:92 -#, fuzzy -msgid "Defaults to ``flwr/base``." -msgstr "Pré-definido para ``flwr/server``." - -#: ../../source/contributor-how-to-build-docker-images.rst:95 -#, fuzzy -msgid "The Python version of the base image." -msgstr "O nome do repositório da imagem base." - -#: ../../source/contributor-how-to-build-docker-images.rst:96 -#, fuzzy -msgid "Defaults to ``py3.11``." -msgstr "Como padrão ``22.04``." - -#: ../../source/contributor-how-to-build-docker-images.rst:99 -#, fuzzy -msgid "The Ubuntu version of the base image." -msgstr "O nome do repositório da imagem base." - -#: ../../source/contributor-how-to-build-docker-images.rst:100 -#, fuzzy -msgid "Defaults to ``ubuntu22.04``." -msgstr "Pré-definido para ``py3.11-ubuntu22.04``." - -#: ../../source/contributor-how-to-build-docker-images.rst:102 +#: ../../source/contributor-how-to-build-docker-images.rst:111 #, fuzzy -msgid "``FLWR_PACKAGE``" +msgid "``flwr/base``" msgstr "``FLWR_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:103 -msgid "The PyPI package to install." -msgstr "" - -#: ../../source/contributor-how-to-build-docker-images.rst:104 +#: ../../source/contributor-how-to-build-docker-images.rst:112 #, fuzzy -msgid "Defaults to ``flwr``." -msgstr "Pré-definido para ``flwr/server``." - -#: ../../source/contributor-how-to-build-docker-images.rst:106 -msgid "``FLWR_VERSION``" -msgstr "``FLWR_VERSION``" - -#: ../../source/contributor-how-to-build-docker-images.rst:107 -msgid "Version of Flower to be installed." -msgstr "Versão do Flower a ser instalada." +msgid "``BASE_IMAGE``" +msgstr "``BASE_REPOSITORY``" -#: ../../source/contributor-how-to-build-docker-images.rst:109 +#: ../../source/contributor-how-to-build-docker-images.rst:113 #, fuzzy -msgid "``1.8.0``" -msgstr "``1.7.0``" +msgid "The Tag of the Flower base image." +msgstr "O nome do repositório da imagem base." -#: ../../source/contributor-how-to-build-docker-images.rst:112 -#, fuzzy -msgid "" -"The following example creates a SuperLink image with the official Flower " -"base image py3.11-ubuntu22.04 and Flower 1.8.0:" +#: ../../source/contributor-how-to-build-docker-images.rst:115 +msgid ":substitution-code:`|stable_flwr_version|-py3.11-ubuntu|ubuntu_version|`" msgstr "" -"O exemplo a seguir cria uma imagem de servidor com a imagem base oficial " -"do Flower py3.11-ubuntu22.04 e Flower 1.7.0:" -#: ../../source/contributor-how-to-build-docker-images.rst:122 -#, fuzzy +#: ../../source/contributor-how-to-build-docker-images.rst:117 msgid "" -"The name of image is ``flwr_superlink`` and the tag ``0.1.0``. Remember " -"that the build arguments as well as the name and tag can be adapted to " -"your needs. These values serve as examples only." +"For example, to build a SuperLink image with the latest Flower version, " +"Python 3.11 and Ubuntu 22.04, run the following:" msgstr "" -"O nome da imagem é ``flwr_server`` e a tag ``0.1.0``. Lembre-se que os " -"argumentos de compilação, bem como o nome e a tag podem ser adaptados às " -"suas necessidades. Esses valores servem apenas como exemplos." -#: ../../source/contributor-how-to-build-docker-images.rst:125 +#: ../../source/contributor-how-to-build-docker-images.rst:128 #, fuzzy msgid "" "If you want to use your own base image instead of the official Flower " -"base image, all you need to do is set the ``BASE_REPOSITORY``, " -"``PYTHON_VERSION`` and ``UBUNTU_VERSION`` build arguments." +"base image, all you need to do is set the ``BASE_REPOSITORY`` build " +"argument to ``flwr_base`` (as we've specified above)." msgstr "" "Se você quiser usar sua própria imagem base ao invés da imagem oficial " "base do Flower, tudo que você precisa fazer é definir os argumentos " @@ -352,10 +502,14 @@ msgstr "" "sua imagem e o valor de ``BASE_IMAGE_TAG`` deve corresponder à tag da sua" " imagem." -#: ../../source/contributor-how-to-build-docker-images.rst:138 +#: ../../source/contributor-how-to-build-docker-images.rst:140 msgid "After creating the image, we can test whether the image is working:" msgstr "Depois de criar a imagem, podemos testar se a imagem está funcionando:" +#: ../../source/contributor-how-to-build-docker-images.rst:147 +msgid "Direct Reference Examples" +msgstr "" + #: ../../source/contributor-how-to-contribute-translations.rst:2 msgid "Contribute translations" msgstr "Contribua com traduções" @@ -411,7 +565,7 @@ msgstr "" " as configurações de perfil podem ser encontradas `aqui " "`_." -#: ../../source/contributor-how-to-contribute-translations.rst:29 +#: ../../source/contributor-how-to-contribute-translations.rst:28 msgid "" "Once you are signed in to Weblate, you can navigate to the `Flower " "Framework project `_. Aqui, você deve ver os diferentes idiomas existentes " "que podem ser encontrados no site." -#: ../../source/contributor-how-to-contribute-translations.rst:34 +#: ../../source/contributor-how-to-contribute-translations.rst:32 msgid "" "Once you have selected the language you want to contribute to, you should" " see a similar interface to this:" @@ -431,7 +585,7 @@ msgstr "" "Uma vez que você tenha selecionado o idioma que deseja contribuir, você " "deve ver uma interface semelhante a esta:" -#: ../../source/contributor-how-to-contribute-translations.rst:39 +#: ../../source/contributor-how-to-contribute-translations.rst:37 msgid "" "The most straight forward option here is to click on the ``Translate`` " "button on the top right (in the ``Translation status`` section). This " @@ -443,12 +597,12 @@ msgstr "" "automaticamente para a interface de tradução de strings ainda não " "traduzidas." -#: ../../source/contributor-how-to-contribute-translations.rst:43 +#: ../../source/contributor-how-to-contribute-translations.rst:41 #, fuzzy msgid "This is what the interface looks like:" msgstr "É assim que a interface se parece:" -#: ../../source/contributor-how-to-contribute-translations.rst:47 +#: ../../source/contributor-how-to-contribute-translations.rst:45 msgid "" "You input your translation in the text box at the top and then, once you " "are happy with it, you either press ``Save and continue`` (to save the " @@ -465,7 +619,7 @@ msgstr "" "ou ``Skip`` (para ir para a próxima string não traduzida sem salvar nada " "na atual)." -#: ../../source/contributor-how-to-contribute-translations.rst:54 +#: ../../source/contributor-how-to-contribute-translations.rst:51 msgid "" "In order to help with the translations, you can see on the bottom the " "``Nearby strings``, the ``Comments`` (from other contributors), the " @@ -480,7 +634,7 @@ msgstr "" "(outras línguas), e o ``History`` (histórico) de traduções para esta " "string." -#: ../../source/contributor-how-to-contribute-translations.rst:59 +#: ../../source/contributor-how-to-contribute-translations.rst:56 msgid "" "On the right, under the ``String information`` section, you can also " "click the link under ``Source string location`` in order to view the " @@ -490,7 +644,7 @@ msgstr "" " link sob ``Source string location`` para visualizar a fonte do arquivo " "doc que contém a string." -#: ../../source/contributor-how-to-contribute-translations.rst:63 +#: ../../source/contributor-how-to-contribute-translations.rst:60 msgid "" "For more information about translating using Weblate, you can check out " "this `in-depth guide " @@ -500,12 +654,12 @@ msgstr "" "pode conferir este `guia detalhado " "`_." -#: ../../source/contributor-how-to-contribute-translations.rst:67 +#: ../../source/contributor-how-to-contribute-translations.rst:64 #, fuzzy msgid "Add new languages" msgstr "Adicionar novos idiomas" -#: ../../source/contributor-how-to-contribute-translations.rst:69 +#: ../../source/contributor-how-to-contribute-translations.rst:66 msgid "" "If you want to add a new language, you will first have to contact us, " "either on `Slack `_, or by opening an issue" @@ -516,210 +670,95 @@ msgstr "" "abrindo uma issue no nosso `repositório GitHub " "`_." -#: ../../source/contributor-how-to-create-new-messages.rst:2 -#, fuzzy -msgid "Creating New Messages" -msgstr "Criando novas mensagens" - -#: ../../source/contributor-how-to-create-new-messages.rst:4 -msgid "" -"This is a simple guide for creating a new type of message between the " -"server and clients in Flower." +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:2 +msgid "Develop in VSCode Dev Containers" msgstr "" -#: ../../source/contributor-how-to-create-new-messages.rst:6 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:4 msgid "" -"Let's suppose we have the following example functions in " -":code:`server.py` and :code:`numpy_client.py`..." +"When working on the Flower framework we want to ensure that all " +"contributors use the same developer environment to format code or run " +"tests. For this purpose we are using the VSCode Remote Containers " +"extension. What is it? Read the following quote:" msgstr "" -#: ../../source/contributor-how-to-create-new-messages.rst:8 -msgid "Server's side:" +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:8 +msgid "" +"The Visual Studio Code Remote - Containers extension lets you use a " +"Docker container as a fully-featured development environment. It allows " +"you to open any folder inside (or mounted into) a container and take " +"advantage of Visual Studio Code's full feature set. A " +"``devcontainer.json`` file in your project tells VS Code how to access " +"(or create) a development container with a well-defined tool and runtime " +"stack. This container can be used to run an application or to separate " +"tools, libraries, or runtimes needed for working with a codebase." msgstr "" -#: ../../source/contributor-how-to-create-new-messages.rst:17 -msgid "Client's side:" +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:16 +msgid "" +"Workspace files are mounted from the local file system or copied or " +"cloned into the container. Extensions are installed and run inside the " +"container, where they have full access to the tools, platform, and file " +"system. This means that you can seamlessly switch your entire development" +" environment just by connecting to a different container." msgstr "" -#: ../../source/contributor-how-to-create-new-messages.rst:26 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:22 msgid "" -"Let's now see what we need to implement in order to get this simple " -"function between the server and client to work!" +"Source: `Official VSCode documentation " +"`_" msgstr "" -#: ../../source/contributor-how-to-create-new-messages.rst:30 -msgid "Message Types for Protocol Buffers" +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:26 +msgid "Getting started" msgstr "" -#: ../../source/contributor-how-to-create-new-messages.rst:32 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:28 msgid "" -"The first thing we need to do is to define a message type for the RPC " -"system in :code:`transport.proto`. Note that we have to do it for both " -"the request and response messages. For more details on the syntax of " -"proto3, please see the `official documentation `_." +"Configuring and setting up the ``Dockerfile`` as well the configuration " +"for the devcontainer can be a bit more involved. The good thing is you " +"don't have to do it. Usually it should be enough to install `Docker " +"`_ on your system and ensure its" +" available on your command line. Additionally, install the `VSCode " +"Containers Extension `_." msgstr "" -#: ../../source/contributor-how-to-create-new-messages.rst:35 -msgid "Within the :code:`ServerMessage` block:" +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:35 +msgid "" +"Now you should be good to go. When starting VSCode, it will ask you to " +"run in the container environment and - if you confirm - automatically " +"build the container and use it. To manually instruct VSCode to use the " +"devcontainer, you can, after installing the extension, click the green " +"area in the bottom left corner of your VSCode window and select the " +"option *(Re)Open Folder in Container*." msgstr "" -#: ../../source/contributor-how-to-create-new-messages.rst:52 -msgid "Within the ClientMessage block:" +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:41 +msgid "" +"In some cases your setup might be more involved. For those cases consult " +"the following sources:" msgstr "" -#: ../../source/contributor-how-to-create-new-messages.rst:70 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:44 msgid "" -"Make sure to also add a field of the newly created message type in " -":code:`oneof msg`." +"`Developing inside a Container " +"`_" msgstr "" -#: ../../source/contributor-how-to-create-new-messages.rst:72 -msgid "Once that is done, we will compile the file with:" +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:46 +msgid "" +"`Remote development in Containers " +"`_" msgstr "" -#: ../../source/contributor-how-to-create-new-messages.rst:78 -msgid "If it compiles successfully, you should see the following message:" +#: ../../source/contributor-how-to-install-development-versions.rst:2 +msgid "Install development versions" msgstr "" -#: ../../source/contributor-how-to-create-new-messages.rst:87 -msgid "Serialization and Deserialization Functions" -msgstr "" - -#: ../../source/contributor-how-to-create-new-messages.rst:89 -msgid "" -"Our next step is to add functions to serialize and deserialize Python " -"datatypes to or from our defined RPC message types. You should add these " -"functions in :code:`serde.py`." -msgstr "" - -#: ../../source/contributor-how-to-create-new-messages.rst:91 -msgid "The four functions:" -msgstr "" - -#: ../../source/contributor-how-to-create-new-messages.rst:112 -msgid "Sending the Message from the Server" -msgstr "" - -#: ../../source/contributor-how-to-create-new-messages.rst:114 -msgid "" -"Now write the request function in your Client Proxy class (e.g., " -":code:`grpc_client_proxy.py`) using the serde functions you just created:" -msgstr "" - -#: ../../source/contributor-how-to-create-new-messages.rst:128 -msgid "Receiving the Message by the Client" -msgstr "" - -#: ../../source/contributor-how-to-create-new-messages.rst:130 -msgid "" -"Last step! Modify the code in :code:`message_handler.py` to check the " -"field of your message and call the :code:`example_response` function. " -"Remember to use the serde functions!" -msgstr "" - -#: ../../source/contributor-how-to-create-new-messages.rst:132 -msgid "Within the handle function:" -msgstr "" - -#: ../../source/contributor-how-to-create-new-messages.rst:139 -msgid "And add a new function:" -msgstr "" - -#: ../../source/contributor-how-to-create-new-messages.rst:149 -msgid "Hopefully, when you run your program you will get the intended result!" -msgstr "" - -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:2 -msgid "Develop in VSCode Dev Containers" -msgstr "" - -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:4 -msgid "" -"When working on the Flower framework we want to ensure that all " -"contributors use the same developer environment to format code or run " -"tests. For this purpose we are using the VSCode Remote Containers " -"extension. What is it? Read the following quote:" -msgstr "" - -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:7 -msgid "" -"The Visual Studio Code Remote - Containers extension lets you use a " -"Docker container as a fully-featured development environment. It allows " -"you to open any folder inside (or mounted into) a container and take " -"advantage of Visual Studio Code's full feature set. A " -":code:`devcontainer.json` file in your project tells VS Code how to " -"access (or create) a development container with a well-defined tool and " -"runtime stack. This container can be used to run an application or to " -"separate tools, libraries, or runtimes needed for working with a " -"codebase." -msgstr "" - -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:9 -msgid "" -"Workspace files are mounted from the local file system or copied or " -"cloned into the container. Extensions are installed and run inside the " -"container, where they have full access to the tools, platform, and file " -"system. This means that you can seamlessly switch your entire development" -" environment just by connecting to a different container." -msgstr "" - -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:11 -msgid "" -"Source: `Official VSCode documentation " -"`_" -msgstr "" - -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:15 -msgid "Getting started" -msgstr "" - -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:17 -msgid "" -"Configuring and setting up the :code:`Dockerfile` as well the " -"configuration for the devcontainer can be a bit more involved. The good " -"thing is you don't have to do it. Usually it should be enough to install " -"`Docker `_ on your system and " -"ensure its available on your command line. Additionally, install the " -"`VSCode Containers Extension `_." -msgstr "" - -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:19 -msgid "" -"Now you should be good to go. When starting VSCode, it will ask you to " -"run in the container environment and - if you confirm - automatically " -"build the container and use it. To manually instruct VSCode to use the " -"devcontainer, you can, after installing the extension, click the green " -"area in the bottom left corner of your VSCode window and select the " -"option *(Re)Open Folder in Container*." -msgstr "" - -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:21 -msgid "" -"In some cases your setup might be more involved. For those cases consult " -"the following sources:" -msgstr "" - -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:23 -msgid "" -"`Developing inside a Container " -"`_" -msgstr "" - -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:24 -msgid "" -"`Remote development in Containers " -"`_" -msgstr "" - -#: ../../source/contributor-how-to-install-development-versions.rst:2 -msgid "Install development versions" -msgstr "" - -#: ../../source/contributor-how-to-install-development-versions.rst:5 -msgid "Install development versions of Flower" +#: ../../source/contributor-how-to-install-development-versions.rst:5 +msgid "Install development versions of Flower" msgstr "" #: ../../source/contributor-how-to-install-development-versions.rst:8 @@ -733,154 +772,154 @@ msgid "" "``poetry.lock`` (``rm poetry.lock``) before running ``poetry install``)." msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:12 +#: ../../source/contributor-how-to-install-development-versions.rst:14 msgid "" "``flwr = { version = \"1.0.0a0\", allow-prereleases = true }`` (without " "extras)" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:13 +#: ../../source/contributor-how-to-install-development-versions.rst:15 msgid "" "``flwr = { version = \"1.0.0a0\", allow-prereleases = true, extras = " "[\"simulation\"] }`` (with extras)" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:15 +#: ../../source/contributor-how-to-install-development-versions.rst:18 msgid "" "Install ``flwr`` from a local copy of the Flower source code via " "``pyproject.toml``:" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:17 +#: ../../source/contributor-how-to-install-development-versions.rst:20 msgid "``flwr = { path = \"../../\", develop = true }`` (without extras)" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:18 +#: ../../source/contributor-how-to-install-development-versions.rst:21 msgid "" "``flwr = { path = \"../../\", develop = true, extras = [\"simulation\"] " "}`` (with extras)" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:20 +#: ../../source/contributor-how-to-install-development-versions.rst:23 msgid "Install ``flwr`` from a local wheel file via ``pyproject.toml``:" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:22 +#: ../../source/contributor-how-to-install-development-versions.rst:25 msgid "" "``flwr = { path = \"../../dist/flwr-1.8.0-py3-none-any.whl\" }`` (without" " extras)" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:23 +#: ../../source/contributor-how-to-install-development-versions.rst:26 msgid "" "``flwr = { path = \"../../dist/flwr-1.8.0-py3-none-any.whl\", extras = " "[\"simulation\"] }`` (with extras)" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:25 +#: ../../source/contributor-how-to-install-development-versions.rst:29 msgid "" "Please refer to the Poetry documentation for further details: `Poetry " "Dependency Specification `_" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:28 +#: ../../source/contributor-how-to-install-development-versions.rst:33 msgid "Using pip (recommended on Colab)" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:30 +#: ../../source/contributor-how-to-install-development-versions.rst:35 msgid "Install a ``flwr`` pre-release from PyPI:" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:32 +#: ../../source/contributor-how-to-install-development-versions.rst:37 msgid "``pip install -U --pre flwr`` (without extras)" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:33 -msgid "``pip install -U --pre flwr[simulation]`` (with extras)" +#: ../../source/contributor-how-to-install-development-versions.rst:38 +msgid "``pip install -U --pre 'flwr[simulation]'`` (with extras)" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:35 +#: ../../source/contributor-how-to-install-development-versions.rst:40 msgid "" "Python packages can be installed from git repositories. Use one of the " "following commands to install the Flower directly from GitHub." msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:37 +#: ../../source/contributor-how-to-install-development-versions.rst:43 msgid "Install ``flwr`` from the default GitHub branch (``main``):" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:39 +#: ../../source/contributor-how-to-install-development-versions.rst:45 msgid "" "``pip install flwr@git+https://github.com/adap/flower.git`` (without " "extras)" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:40 +#: ../../source/contributor-how-to-install-development-versions.rst:46 msgid "" -"``pip install flwr[simulation]@git+https://github.com/adap/flower.git`` " -"(with extras)" +"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git'``" +" (with extras)" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:42 +#: ../../source/contributor-how-to-install-development-versions.rst:49 msgid "Install ``flwr`` from a specific GitHub branch (``branch-name``):" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:44 +#: ../../source/contributor-how-to-install-development-versions.rst:51 msgid "" "``pip install flwr@git+https://github.com/adap/flower.git@branch-name`` " "(without extras)" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:45 +#: ../../source/contributor-how-to-install-development-versions.rst:53 msgid "" -"``pip install flwr[simulation]@git+https://github.com/adap/flower.git" -"@branch-name`` (with extras)" +"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git" +"@branch-name'`` (with extras)" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:49 +#: ../../source/contributor-how-to-install-development-versions.rst:57 msgid "Open Jupyter Notebooks on Google Colab" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:51 +#: ../../source/contributor-how-to-install-development-versions.rst:59 msgid "" "Open the notebook ``doc/source/tutorial-series-get-started-with-flower-" "pytorch.ipynb``:" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:53 +#: ../../source/contributor-how-to-install-development-versions.rst:61 msgid "" "https://colab.research.google.com/github/adap/flower/blob/main/doc/source" "/tutorial-series-get-started-with-flower-pytorch.ipynb" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:55 +#: ../../source/contributor-how-to-install-development-versions.rst:63 msgid "" "Open a development version of the same notebook from branch `branch-name`" " by changing ``main`` to ``branch-name`` (right after ``blob``):" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:57 +#: ../../source/contributor-how-to-install-development-versions.rst:66 msgid "" "https://colab.research.google.com/github/adap/flower/blob/branch-" "name/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:59 +#: ../../source/contributor-how-to-install-development-versions.rst:68 msgid "Install a `whl` on Google Colab:" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:61 +#: ../../source/contributor-how-to-install-development-versions.rst:70 msgid "" "In the vertical icon grid on the left hand side, select ``Files`` > " "``Upload to session storage``" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:62 +#: ../../source/contributor-how-to-install-development-versions.rst:72 msgid "Upload the whl (e.g., ``flwr-1.8.0-py3-none-any.whl``)" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:63 +#: ../../source/contributor-how-to-install-development-versions.rst:73 msgid "" "Change ``!pip install -q 'flwr[simulation]' torch torchvision " "matplotlib`` to ``!pip install -q 'flwr-1.8.0-py3-none-" @@ -897,25 +936,25 @@ msgid "" "change in the future." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:7 +#: ../../source/contributor-how-to-release-flower.rst:8 msgid "During the release" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:9 +#: ../../source/contributor-how-to-release-flower.rst:10 msgid "" "The version number of a release is stated in ``pyproject.toml``. To " "release a new version of Flower, the following things need to happen (in " "that order):" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:11 +#: ../../source/contributor-how-to-release-flower.rst:13 msgid "" "Run ``python3 src/py/flwr_tool/update_changelog.py `` in " "order to add every new change to the changelog (feel free to make manual " "changes to the changelog afterwards until it looks good)." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:12 +#: ../../source/contributor-how-to-release-flower.rst:16 msgid "" "Once the changelog has been updated with all the changes, run ``./dev" "/prepare-release-changelog.sh v``, where ```` " @@ -925,7 +964,7 @@ msgid "" "the contributors. Open a pull request with those changes." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:13 +#: ../../source/contributor-how-to-release-flower.rst:22 msgid "" "Once the pull request is merged, tag the release commit with the version " "number as soon as the PR is merged: ``git tag v`` (notice " @@ -934,100 +973,100 @@ msgid "" "artifacts and the relevant part of the changelog." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:14 +#: ../../source/contributor-how-to-release-flower.rst:26 msgid "Check the draft release on GitHub, and if everything is good, publish it." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:17 +#: ../../source/contributor-how-to-release-flower.rst:29 msgid "After the release" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:19 +#: ../../source/contributor-how-to-release-flower.rst:31 msgid "Create a pull request which contains the following changes:" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:21 +#: ../../source/contributor-how-to-release-flower.rst:33 msgid "Increase the minor version in ``pyproject.toml`` by one." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:22 +#: ../../source/contributor-how-to-release-flower.rst:34 msgid "Update all files which contain the current version number if necessary." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:23 +#: ../../source/contributor-how-to-release-flower.rst:35 msgid "Add a new ``Unreleased`` section in ``changelog.md``." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:25 +#: ../../source/contributor-how-to-release-flower.rst:37 msgid "" "Merge the pull request on the same day (i.e., before a new nightly " "release gets published to PyPI)." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:28 +#: ../../source/contributor-how-to-release-flower.rst:41 msgid "Publishing a pre-release" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:31 +#: ../../source/contributor-how-to-release-flower.rst:44 msgid "Pre-release naming" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:33 +#: ../../source/contributor-how-to-release-flower.rst:46 msgid "" "PyPI supports pre-releases (alpha, beta, release candidate). Pre-releases" " MUST use one of the following naming patterns:" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:35 +#: ../../source/contributor-how-to-release-flower.rst:49 msgid "Alpha: ``MAJOR.MINOR.PATCHaN``" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:36 +#: ../../source/contributor-how-to-release-flower.rst:50 msgid "Beta: ``MAJOR.MINOR.PATCHbN``" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:37 +#: ../../source/contributor-how-to-release-flower.rst:51 msgid "Release candidate (RC): ``MAJOR.MINOR.PATCHrcN``" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:39 +#: ../../source/contributor-how-to-release-flower.rst:53 msgid "Examples include:" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:41 +#: ../../source/contributor-how-to-release-flower.rst:55 msgid "``1.0.0a0``" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:42 +#: ../../source/contributor-how-to-release-flower.rst:56 msgid "``1.0.0b0``" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:43 +#: ../../source/contributor-how-to-release-flower.rst:57 msgid "``1.0.0rc0``" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:44 +#: ../../source/contributor-how-to-release-flower.rst:58 msgid "``1.0.0rc1``" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:46 +#: ../../source/contributor-how-to-release-flower.rst:60 msgid "" "This is in line with PEP-440 and the recommendations from the Python " "Packaging Authority (PyPA):" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:49 +#: ../../source/contributor-how-to-release-flower.rst:63 msgid "`PEP-440 `_" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:50 +#: ../../source/contributor-how-to-release-flower.rst:64 msgid "" "`PyPA Choosing a versioning scheme " "`_" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:52 +#: ../../source/contributor-how-to-release-flower.rst:67 msgid "" "Note that the approach defined by PyPA is not compatible with SemVer " "2.0.0 spec, for details consult the `Semantic Versioning Specification " @@ -1035,26 +1074,26 @@ msgid "" "11 on precedence)." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:55 +#: ../../source/contributor-how-to-release-flower.rst:73 msgid "Pre-release classification" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:57 +#: ../../source/contributor-how-to-release-flower.rst:75 msgid "Should the next pre-release be called alpha, beta, or release candidate?" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:59 +#: ../../source/contributor-how-to-release-flower.rst:77 msgid "" "RC: feature complete, no known issues (apart from issues that are " "classified as \"won't fix\" for the next stable release) - if no issues " "surface this will become the next stable release" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:60 +#: ../../source/contributor-how-to-release-flower.rst:80 msgid "Beta: feature complete, allowed to have known issues" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:61 +#: ../../source/contributor-how-to-release-flower.rst:81 msgid "Alpha: not feature complete, allowed to have known issues" msgstr "" @@ -1070,19 +1109,19 @@ msgid "" "the instructions or choose your preferred setup." msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:9 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:10 msgid "Python Version" msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:11 -#: ../../source/how-to-install-flower.rst:8 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:12 +#: ../../source/how-to-install-flower.rst:7 msgid "" -"Flower requires at least `Python 3.8 `_, " +"Flower requires at least `Python 3.9 `_, " "but `Python 3.10 `_ or above is " "recommended." msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:14 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:17 msgid "" "Due to a known incompatibility with `ray " "`_, we currently recommend utilizing at " @@ -1090,11 +1129,11 @@ msgid "" "simulations." msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:19 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:22 msgid "Virtualenv with Pyenv/Virtualenv" msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:21 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:24 msgid "" "One of the recommended virtual environment is `pyenv " "`_/`virtualenv `_ for details." msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:23 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:29 msgid "" "Once Pyenv is set up, you can use it to install `Python Version 3.10 " "`_ or above:" msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:29 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:36 msgid "Create the virtualenv with:" msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:36 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:42 msgid "Activate the virtualenv by running the following command:" msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:44 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:49 msgid "Virtualenv with Poetry" msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:46 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:51 msgid "" "The Flower examples are based on `Poetry `_ to manage dependencies. After installing Poetry you " "simply create a virtual environment with:" msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:52 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:58 msgid "" "If you open a new terminal you can activate the previously created " "virtual environment with the following command:" msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:60 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:66 msgid "Virtualenv with Anaconda" msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:62 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:68 msgid "" "If you prefer to use Anaconda for your virtual environment then install " "and setup the `conda `_ package. After setting it up you can " +"/user-guide/install/index.html>`_ package. After setting it up you can " "create a virtual environment with:" msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:68 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:76 msgid "and activate the virtual environment with:" msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:76 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:83 msgid "And then?" msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:78 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:85 msgid "" "As soon as you created your virtual environment you clone one of the " "`Flower examples `_." @@ -1163,11 +1202,11 @@ msgstr "" msgid "Write documentation" msgstr "" -#: ../../source/contributor-how-to-write-documentation.rst:6 +#: ../../source/contributor-how-to-write-documentation.rst:5 msgid "Project layout" msgstr "" -#: ../../source/contributor-how-to-write-documentation.rst:8 +#: ../../source/contributor-how-to-write-documentation.rst:7 msgid "" "The Flower documentation lives in the ``doc`` directory. The Sphinx-based" " documentation system supports both reStructuredText (``.rst`` files) and" @@ -1175,7 +1214,7 @@ msgid "" msgstr "" #: ../../source/contributor-how-to-write-documentation.rst:10 -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:169 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:196 msgid "" "Note that, in order to build the documentation locally (with ``poetry run" " make html``, like described below), `Pandoc " @@ -1183,20 +1222,20 @@ msgid "" "system." msgstr "" -#: ../../source/contributor-how-to-write-documentation.rst:14 +#: ../../source/contributor-how-to-write-documentation.rst:15 msgid "Edit an existing page" msgstr "" -#: ../../source/contributor-how-to-write-documentation.rst:16 +#: ../../source/contributor-how-to-write-documentation.rst:17 msgid "Edit an existing ``.rst`` (or ``.md``) file under ``doc/source/``" msgstr "" -#: ../../source/contributor-how-to-write-documentation.rst:17 +#: ../../source/contributor-how-to-write-documentation.rst:18 #: ../../source/contributor-how-to-write-documentation.rst:27 msgid "Compile the docs: ``cd doc``, then ``poetry run make html``" msgstr "" -#: ../../source/contributor-how-to-write-documentation.rst:18 +#: ../../source/contributor-how-to-write-documentation.rst:19 #: ../../source/contributor-how-to-write-documentation.rst:28 msgid "Open ``doc/build/html/index.html`` in the browser to check the result" msgstr "" @@ -1229,41 +1268,41 @@ msgid "" "the Flower codebase." msgstr "" -#: ../../source/contributor-ref-good-first-contributions.rst:11 +#: ../../source/contributor-ref-good-first-contributions.rst:9 msgid "Where to start" msgstr "" -#: ../../source/contributor-ref-good-first-contributions.rst:13 +#: ../../source/contributor-ref-good-first-contributions.rst:11 msgid "" "Until the Flower core library matures it will be easier to get PR's " "accepted if they only touch non-core areas of the codebase. Good " "candidates to get started are:" msgstr "" -#: ../../source/contributor-ref-good-first-contributions.rst:17 +#: ../../source/contributor-ref-good-first-contributions.rst:14 msgid "Documentation: What's missing? What could be expressed more clearly?" msgstr "" -#: ../../source/contributor-ref-good-first-contributions.rst:18 +#: ../../source/contributor-ref-good-first-contributions.rst:15 msgid "Baselines: See below." msgstr "" -#: ../../source/contributor-ref-good-first-contributions.rst:19 +#: ../../source/contributor-ref-good-first-contributions.rst:16 msgid "Examples: See below." msgstr "" -#: ../../source/contributor-ref-good-first-contributions.rst:23 +#: ../../source/contributor-ref-good-first-contributions.rst:19 msgid "Request for Flower Baselines" msgstr "" -#: ../../source/contributor-ref-good-first-contributions.rst:25 +#: ../../source/contributor-ref-good-first-contributions.rst:21 msgid "" "If you are not familiar with Flower Baselines, you should probably check-" "out our `contributing guide for baselines " "`_." msgstr "" -#: ../../source/contributor-ref-good-first-contributions.rst:27 +#: ../../source/contributor-ref-good-first-contributions.rst:25 msgid "" "You should then check out the open `issues " "`_" @@ -1272,7 +1311,7 @@ msgid "" "working on it!" msgstr "" -#: ../../source/contributor-ref-good-first-contributions.rst:31 +#: ../../source/contributor-ref-good-first-contributions.rst:30 msgid "" "Otherwise, if you don't find a baseline you'd like to work on, be sure to" " open a new issue with the baseline request template!" @@ -1313,30 +1352,30 @@ msgid "" "special case of the SecAgg+ protocol." msgstr "" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:8 -msgid "The :code:`SecAgg+` abstraction" +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:9 +msgid "The ``SecAgg+`` abstraction" msgstr "" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:10 -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:161 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:11 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:163 msgid "" "In this implementation, each client will be assigned with a unique index " "(int) for secure aggregation, and thus many python dictionaries used have" " keys of int type rather than ClientProxy type." msgstr "" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:65 -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:198 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:67 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:204 msgid "" "The Flower server will execute and process received results in the " "following order:" msgstr "" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:159 -msgid "The :code:`LightSecAgg` abstraction" +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:161 +msgid "The ``LightSecAgg`` abstraction" msgstr "" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:271 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:277 msgid "Types" msgstr "" @@ -1350,22 +1389,22 @@ msgid "" "are not used to contributing to GitHub projects." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:6 +#: ../../source/contributor-tutorial-contribute-on-github.rst:7 msgid "" "If you're familiar with how contributing on GitHub works, you can " "directly checkout our :doc:`getting started guide for contributors " "`." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:10 +#: ../../source/contributor-tutorial-contribute-on-github.rst:12 msgid "Setting up the repository" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:21 +#: ../../source/contributor-tutorial-contribute-on-github.rst:29 msgid "**Create a GitHub account and setup Git**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:13 +#: ../../source/contributor-tutorial-contribute-on-github.rst:15 msgid "" "Git is a distributed version control tool. This allows for an entire " "codebase's history to be stored and every developer's machine. It is a " @@ -1374,20 +1413,20 @@ msgid "" "started-with-git/set-up-git>`_ to set it up." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:16 +#: ../../source/contributor-tutorial-contribute-on-github.rst:21 msgid "" "GitHub, itself, is a code hosting platform for version control and " "collaboration. It allows for everyone to collaborate and work from " "anywhere on remote repositories." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:18 +#: ../../source/contributor-tutorial-contribute-on-github.rst:25 msgid "" "If you haven't already, you will need to create an account on `GitHub " "`_." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:20 +#: ../../source/contributor-tutorial-contribute-on-github.rst:28 msgid "" "The idea behind the generic Git and GitHub workflow boils down to this: " "you download code from a remote repository on GitHub, make changes " @@ -1395,19 +1434,19 @@ msgid "" "history back to GitHub." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:32 +#: ../../source/contributor-tutorial-contribute-on-github.rst:42 msgid "**Forking the Flower repository**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:24 +#: ../../source/contributor-tutorial-contribute-on-github.rst:32 msgid "" "A fork is a personal copy of a GitHub repository. To create one for " -"Flower, you must navigate to ``_ (while " +"Flower, you must navigate to https://github.com/adap/flower (while " "connected to your GitHub account) and click the ``Fork`` button situated " "on the top right of the page." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:29 +#: ../../source/contributor-tutorial-contribute-on-github.rst:38 msgid "" "You can change the name if you want, but this is not necessary as this " "version of Flower will be yours and will sit inside your own account " @@ -1415,11 +1454,11 @@ msgid "" " the top left corner that you are looking at your own version of Flower." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:47 +#: ../../source/contributor-tutorial-contribute-on-github.rst:59 msgid "**Cloning your forked repository**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:35 +#: ../../source/contributor-tutorial-contribute-on-github.rst:45 msgid "" "The next step is to download the forked repository on your machine to be " "able to make changes to it. On your forked repository page, you should " @@ -1427,27 +1466,27 @@ msgid "" "ability to copy the HTTPS link of the repository." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:41 +#: ../../source/contributor-tutorial-contribute-on-github.rst:52 msgid "" "Once you copied the \\, you can open a terminal on your machine, " "navigate to the place you want to download the repository to and type:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:47 +#: ../../source/contributor-tutorial-contribute-on-github.rst:59 msgid "" "This will create a ``flower/`` (or the name of your fork if you renamed " "it) folder in the current working directory." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:66 +#: ../../source/contributor-tutorial-contribute-on-github.rst:78 msgid "**Add origin**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:50 +#: ../../source/contributor-tutorial-contribute-on-github.rst:62 msgid "You can then go into the repository folder:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:56 +#: ../../source/contributor-tutorial-contribute-on-github.rst:68 msgid "" "And here we will need to add an origin to our repository. The origin is " "the \\ of the remote fork repository. To obtain it, we can do as " @@ -1455,27 +1494,27 @@ msgid "" "account and copying the link." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:61 +#: ../../source/contributor-tutorial-contribute-on-github.rst:75 msgid "" "Once the \\ is copied, we can type the following command in our " "terminal:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:90 +#: ../../source/contributor-tutorial-contribute-on-github.rst:102 msgid "**Add upstream**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:69 +#: ../../source/contributor-tutorial-contribute-on-github.rst:81 msgid "" "Now we will add an upstream address to our repository. Still in the same " "directory, we must run the following command:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:76 +#: ../../source/contributor-tutorial-contribute-on-github.rst:88 msgid "The following diagram visually explains what we did in the previous steps:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:80 +#: ../../source/contributor-tutorial-contribute-on-github.rst:92 msgid "" "The upstream is the GitHub remote address of the parent repository (in " "this case Flower), i.e. the one we eventually want to contribute to and " @@ -1484,17 +1523,17 @@ msgid "" "in our own account." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:84 +#: ../../source/contributor-tutorial-contribute-on-github.rst:97 msgid "" "To make sure our local version of the fork is up-to-date with the latest " "changes from the Flower repository, we can execute the following command:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:93 +#: ../../source/contributor-tutorial-contribute-on-github.rst:105 msgid "Setting up the coding environment" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:95 +#: ../../source/contributor-tutorial-contribute-on-github.rst:107 msgid "" "This can be achieved by following this :doc:`getting started guide for " "contributors ` (note " @@ -1502,158 +1541,158 @@ msgid "" "code and test it, you can finally start making changes!" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:100 +#: ../../source/contributor-tutorial-contribute-on-github.rst:113 msgid "Making changes" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:102 +#: ../../source/contributor-tutorial-contribute-on-github.rst:115 msgid "" "Before making any changes make sure you are up-to-date with your " "repository:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:108 +#: ../../source/contributor-tutorial-contribute-on-github.rst:121 msgid "And with Flower's repository:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:122 +#: ../../source/contributor-tutorial-contribute-on-github.rst:134 msgid "**Create a new branch**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:115 +#: ../../source/contributor-tutorial-contribute-on-github.rst:128 msgid "" "To make the history cleaner and easier to work with, it is good practice " "to create a new branch for each feature/project that needs to be " "implemented." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:118 +#: ../../source/contributor-tutorial-contribute-on-github.rst:131 msgid "" "To do so, just run the following command inside the repository's " "directory:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:125 +#: ../../source/contributor-tutorial-contribute-on-github.rst:136 msgid "**Make changes**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:125 +#: ../../source/contributor-tutorial-contribute-on-github.rst:137 msgid "Write great code and create wonderful changes using your favorite editor!" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:138 +#: ../../source/contributor-tutorial-contribute-on-github.rst:149 msgid "**Test and format your code**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:128 +#: ../../source/contributor-tutorial-contribute-on-github.rst:139 msgid "" "Don't forget to test and format your code! Otherwise your code won't be " "able to be merged into the Flower repository. This is done so the " "codebase stays consistent and easy to understand." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:131 +#: ../../source/contributor-tutorial-contribute-on-github.rst:143 msgid "To do so, we have written a few scripts that you can execute:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:150 +#: ../../source/contributor-tutorial-contribute-on-github.rst:162 msgid "**Stage changes**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:141 +#: ../../source/contributor-tutorial-contribute-on-github.rst:152 msgid "" "Before creating a commit that will update your history, you must specify " "to Git which files it needs to take into account." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:143 +#: ../../source/contributor-tutorial-contribute-on-github.rst:155 msgid "This can be done with:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:149 +#: ../../source/contributor-tutorial-contribute-on-github.rst:161 msgid "" "To check which files have been modified compared to the last version " "(last commit) and to see which files are staged for commit, you can use " -"the :code:`git status` command." +"the ``git status`` command." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:160 +#: ../../source/contributor-tutorial-contribute-on-github.rst:173 msgid "**Commit changes**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:153 +#: ../../source/contributor-tutorial-contribute-on-github.rst:165 msgid "" -"Once you have added all the files you wanted to commit using :code:`git " -"add`, you can finally create your commit using this command:" +"Once you have added all the files you wanted to commit using ``git add``," +" you can finally create your commit using this command:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:159 +#: ../../source/contributor-tutorial-contribute-on-github.rst:172 msgid "" "The \\ is there to explain to others what the commit " "does. It should be written in an imperative style and be concise. An " -"example would be :code:`git commit -m \"Add images to README\"`." +"example would be ``git commit -m \"Add images to README\"``." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:171 +#: ../../source/contributor-tutorial-contribute-on-github.rst:185 msgid "**Push the changes to the fork**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:163 +#: ../../source/contributor-tutorial-contribute-on-github.rst:176 msgid "" "Once we have committed our changes, we have effectively updated our local" " history, but GitHub has no way of knowing this unless we push our " "changes to our origin's remote address:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:170 +#: ../../source/contributor-tutorial-contribute-on-github.rst:184 msgid "" "Once this is done, you will see on the GitHub that your forked repo was " "updated with the changes you have made." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:174 +#: ../../source/contributor-tutorial-contribute-on-github.rst:188 msgid "Creating and merging a pull request (PR)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:206 +#: ../../source/contributor-tutorial-contribute-on-github.rst:226 msgid "**Create the PR**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:177 +#: ../../source/contributor-tutorial-contribute-on-github.rst:191 msgid "" "Once you have pushed changes, on the GitHub webpage of your repository " "you should see the following message:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:181 +#: ../../source/contributor-tutorial-contribute-on-github.rst:196 msgid "Otherwise you can always find this option in the ``Branches`` page." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:183 +#: ../../source/contributor-tutorial-contribute-on-github.rst:198 msgid "" "Once you click the ``Compare & pull request`` button, you should see " "something similar to this:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:187 +#: ../../source/contributor-tutorial-contribute-on-github.rst:203 msgid "At the top you have an explanation of which branch will be merged where:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:191 +#: ../../source/contributor-tutorial-contribute-on-github.rst:207 msgid "" "In this example you can see that the request is to merge the branch " "``doc-fixes`` from my forked repository to branch ``main`` from the " "Flower repository." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:193 +#: ../../source/contributor-tutorial-contribute-on-github.rst:210 msgid "" "The title should be changed to adhere to the :ref:`pr_title_format` " "guidelines, otherwise it won't be possible to merge the PR. So in this " "case, a correct title might be ``docs(framework:skip) Fix typos``." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:196 +#: ../../source/contributor-tutorial-contribute-on-github.rst:214 msgid "" "The input box in the middle is there for you to describe what your PR " "does and to link it to existing issues. We have placed comments (that " @@ -1661,167 +1700,167 @@ msgid "" "process." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:199 +#: ../../source/contributor-tutorial-contribute-on-github.rst:218 msgid "It is important to follow the instructions described in comments." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:201 +#: ../../source/contributor-tutorial-contribute-on-github.rst:220 msgid "" "At the bottom you will find the button to open the PR. This will notify " "reviewers that a new PR has been opened and that they should look over it" " to merge or to request changes." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:204 +#: ../../source/contributor-tutorial-contribute-on-github.rst:224 msgid "" "If your PR is not yet ready for review, and you don't want to notify " "anyone, you have the option to create a draft pull request:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:209 +#: ../../source/contributor-tutorial-contribute-on-github.rst:230 msgid "**Making new changes**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:209 +#: ../../source/contributor-tutorial-contribute-on-github.rst:229 msgid "" "Once the PR has been opened (as draft or not), you can still push new " "commits to it the same way we did before, by making changes to the branch" " associated with the PR." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:231 +#: ../../source/contributor-tutorial-contribute-on-github.rst:253 msgid "**Review the PR**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:212 +#: ../../source/contributor-tutorial-contribute-on-github.rst:233 msgid "" "Once the PR has been opened or once the draft PR has been marked as " "ready, a review from code owners will be automatically requested:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:216 +#: ../../source/contributor-tutorial-contribute-on-github.rst:238 msgid "" "Code owners will then look into the code, ask questions, request changes " "or validate the PR." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:218 +#: ../../source/contributor-tutorial-contribute-on-github.rst:241 msgid "Merging will be blocked if there are ongoing requested changes." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:222 +#: ../../source/contributor-tutorial-contribute-on-github.rst:245 msgid "" "To resolve them, just push the necessary changes to the branch associated" " with the PR:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:226 +#: ../../source/contributor-tutorial-contribute-on-github.rst:250 msgid "And resolve the conversation:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:230 +#: ../../source/contributor-tutorial-contribute-on-github.rst:254 msgid "" "Once all the conversations have been resolved, you can re-request a " "review." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:251 +#: ../../source/contributor-tutorial-contribute-on-github.rst:274 msgid "**Once the PR is merged**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:234 +#: ../../source/contributor-tutorial-contribute-on-github.rst:256 msgid "" "If all the automatic tests have passed and reviewers have no more changes" " to request, they can approve the PR and merge it." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:238 +#: ../../source/contributor-tutorial-contribute-on-github.rst:261 msgid "" "Once it is merged, you can delete the branch on GitHub (a button should " "appear to do so) and also delete it locally by doing:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:245 +#: ../../source/contributor-tutorial-contribute-on-github.rst:269 msgid "Then you should update your forked repository by doing:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:254 +#: ../../source/contributor-tutorial-contribute-on-github.rst:277 msgid "Example of first contribution" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:257 +#: ../../source/contributor-tutorial-contribute-on-github.rst:280 msgid "Problem" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:259 +#: ../../source/contributor-tutorial-contribute-on-github.rst:282 msgid "" "For our documentation, we've started to use the `Diàtaxis framework " "`_." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:261 +#: ../../source/contributor-tutorial-contribute-on-github.rst:285 msgid "" "Our \"How to\" guides should have titles that continue the sentence \"How" " to …\", for example, \"How to upgrade to Flower 1.0\"." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:263 +#: ../../source/contributor-tutorial-contribute-on-github.rst:288 msgid "" "Most of our guides do not follow this new format yet, and changing their " "title is (unfortunately) more involved than one might think." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:265 +#: ../../source/contributor-tutorial-contribute-on-github.rst:291 msgid "" "This issue is about changing the title of a doc from present continuous " "to present simple." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:267 +#: ../../source/contributor-tutorial-contribute-on-github.rst:294 msgid "" "Let's take the example of \"Saving Progress\" which we changed to \"Save " "Progress\". Does this pass our check?" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:269 +#: ../../source/contributor-tutorial-contribute-on-github.rst:297 msgid "Before: \"How to saving progress\" ❌" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:271 +#: ../../source/contributor-tutorial-contribute-on-github.rst:299 msgid "After: \"How to save progress\" ✅" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:274 +#: ../../source/contributor-tutorial-contribute-on-github.rst:302 msgid "Solution" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:276 +#: ../../source/contributor-tutorial-contribute-on-github.rst:304 msgid "" "This is a tiny change, but it'll allow us to test your end-to-end setup. " "After cloning and setting up the Flower repo, here's what you should do:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:278 +#: ../../source/contributor-tutorial-contribute-on-github.rst:307 msgid "Find the source file in ``doc/source``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:279 +#: ../../source/contributor-tutorial-contribute-on-github.rst:308 msgid "" "Make the change in the ``.rst`` file (beware, the dashes under the title " "should be the same length as the title itself)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:280 +#: ../../source/contributor-tutorial-contribute-on-github.rst:310 msgid "" "Build the docs and `check the result `_" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:283 +#: ../../source/contributor-tutorial-contribute-on-github.rst:314 msgid "Rename file" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:285 +#: ../../source/contributor-tutorial-contribute-on-github.rst:316 msgid "" "You might have noticed that the file name still reflects the old wording." " If we just change the file, then we break all existing links to it - it " @@ -1829,109 +1868,109 @@ msgid "" "engine ranking." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:288 +#: ../../source/contributor-tutorial-contribute-on-github.rst:320 msgid "Here's how to change the file name:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:290 +#: ../../source/contributor-tutorial-contribute-on-github.rst:322 msgid "Change the file name to ``save-progress.rst``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:291 +#: ../../source/contributor-tutorial-contribute-on-github.rst:323 msgid "Add a redirect rule to ``doc/source/conf.py``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:293 +#: ../../source/contributor-tutorial-contribute-on-github.rst:325 msgid "" "This will cause a redirect from ``saving-progress.html`` to ``save-" "progress.html``, old links will continue to work." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:296 +#: ../../source/contributor-tutorial-contribute-on-github.rst:329 msgid "Apply changes in the index file" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:298 +#: ../../source/contributor-tutorial-contribute-on-github.rst:331 msgid "" "For the lateral navigation bar to work properly, it is very important to " "update the ``index.rst`` file as well. This is where we define the whole " "arborescence of the navbar." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:301 +#: ../../source/contributor-tutorial-contribute-on-github.rst:335 msgid "Find and modify the file name in ``index.rst``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:304 +#: ../../source/contributor-tutorial-contribute-on-github.rst:338 msgid "Open PR" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:306 +#: ../../source/contributor-tutorial-contribute-on-github.rst:340 msgid "" "Commit the changes (commit messages are always imperative: \"Do " "something\", in this case \"Change …\")" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:307 +#: ../../source/contributor-tutorial-contribute-on-github.rst:342 msgid "Push the changes to your fork" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:308 +#: ../../source/contributor-tutorial-contribute-on-github.rst:343 msgid "" "Open a PR (as shown above) with title ``docs(framework) Update how-to " "guide title``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:309 +#: ../../source/contributor-tutorial-contribute-on-github.rst:344 msgid "Wait for it to be approved!" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:310 +#: ../../source/contributor-tutorial-contribute-on-github.rst:345 msgid "Congrats! 🥳 You're now officially a Flower contributor!" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:314 -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:548 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:946 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:727 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:713 +#: ../../source/contributor-tutorial-contribute-on-github.rst:348 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:573 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1012 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:811 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:857 #: ../../source/tutorial-series-what-is-federated-learning.ipynb:367 msgid "Next steps" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:316 +#: ../../source/contributor-tutorial-contribute-on-github.rst:350 msgid "" "Once you have made your first PR, and want to contribute more, be sure to" " check out the following :" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:318 +#: ../../source/contributor-tutorial-contribute-on-github.rst:353 msgid "" ":doc:`Good first contributions `, where you should particularly look into the " -":code:`baselines` contributions." +"``baselines`` contributions." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:322 +#: ../../source/contributor-tutorial-contribute-on-github.rst:357 #: ../../source/fed/0000-20200102-fed-template.md:60 msgid "Appendix" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:327 +#: ../../source/contributor-tutorial-contribute-on-github.rst:362 msgid "PR title format" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:329 +#: ../../source/contributor-tutorial-contribute-on-github.rst:364 msgid "We enforce the following PR title format:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:335 +#: ../../source/contributor-tutorial-contribute-on-github.rst:370 msgid "" "(or ``(:skip) `` to ignore the PR in the " "changelog)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:337 +#: ../../source/contributor-tutorial-contribute-on-github.rst:372 msgid "" "Where ```` needs to be in ``{ci, fix, feat, docs, refactor, " "break}``, ```` should be in ``{framework, baselines, datasets, " @@ -1940,50 +1979,50 @@ msgid "" "verb in the imperative mood." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:341 +#: ../../source/contributor-tutorial-contribute-on-github.rst:377 #, fuzzy msgid "Valid examples:" msgstr "Exemplo" -#: ../../source/contributor-tutorial-contribute-on-github.rst:343 +#: ../../source/contributor-tutorial-contribute-on-github.rst:379 msgid "``feat(framework) Add flwr build CLI command``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:344 +#: ../../source/contributor-tutorial-contribute-on-github.rst:380 msgid "``refactor(examples:skip) Improve quickstart-pytorch logging``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:345 +#: ../../source/contributor-tutorial-contribute-on-github.rst:381 msgid "``ci(*:skip) Enforce PR title format``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:347 +#: ../../source/contributor-tutorial-contribute-on-github.rst:383 msgid "Invalid examples:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:349 +#: ../../source/contributor-tutorial-contribute-on-github.rst:385 msgid "``feat(framework): Add flwr build CLI command`` (extra ``:``)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:350 +#: ../../source/contributor-tutorial-contribute-on-github.rst:386 msgid "" "``feat(*) Add flwr build CLI command`` (missing ``skip`` flag along with " "``*``)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:351 +#: ../../source/contributor-tutorial-contribute-on-github.rst:387 msgid "``feat(skip) Add flwr build CLI command`` (missing ````)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:352 +#: ../../source/contributor-tutorial-contribute-on-github.rst:388 msgid "``feat(framework) add flwr build CLI command`` (non capitalised verb)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:353 +#: ../../source/contributor-tutorial-contribute-on-github.rst:389 msgid "``feat(framework) Add flwr build CLI command.`` (dot at the end)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:354 +#: ../../source/contributor-tutorial-contribute-on-github.rst:390 msgid "``Add flwr build CLI command.`` (missing ``()``)" msgstr "" @@ -1992,12 +2031,16 @@ msgid "Get started as a contributor" msgstr "" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:5 -#: ../../source/how-to-run-flower-using-docker.rst:132 +#: ../../source/docker/run-as-subprocess.rst:11 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:16 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:18 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:13 +#: ../../source/docker/tutorial-quickstart-docker.rst:11 msgid "Prerequisites" msgstr "" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:7 -msgid "`Python 3.8 `_ or above" +msgid "`Python 3.9 `_ or above" msgstr "" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:8 @@ -2014,17 +2057,17 @@ msgstr "" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:12 msgid "" -"Flower uses :code:`pyproject.toml` to manage dependencies and configure " +"Flower uses ``pyproject.toml`` to manage dependencies and configure " "development tools (the ones which support it). Poetry is a build tool " "which supports `PEP 517 `_." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:18 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:17 msgid "Developer Machine Setup" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:21 -msgid "Preliminarities" +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:20 +msgid "Preliminaries" msgstr "" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:22 @@ -2041,94 +2084,93 @@ msgid "" "installation actions to add `brew` to your PATH." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:28 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:29 msgid "" "Install `xz` (to install different Python versions) and `pandoc` to build" -" the docs::" +" the docs:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:34 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:36 msgid "For Ubuntu" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:35 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:38 msgid "" "Ensure you system (Ubuntu 22.04+) is up-to-date, and you have all " -"necessary packages::" +"necessary packages:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:44 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:47 msgid "Create Flower Dev Environment" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:46 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:49 msgid "" -"1. Clone the `Flower repository `_ from " -"GitHub::" +"Clone the `Flower repository `_ from " +"GitHub:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:52 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:56 msgid "" "Let's create the Python environment for all-things Flower. If you wish to" -" use :code:`pyenv`, we provide two convenience scripts that you can use. " -"If you prefer using something else than :code:`pyenv`, create a new " -"environment, activate and skip to the last point where all packages are " -"installed." +" use ``pyenv``, we provide two convenience scripts that you can use. If " +"you prefer using something else than ``pyenv``, create a new environment," +" activate and skip to the last point where all packages are installed." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:54 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:61 msgid "" -"If you don't have :code:`pyenv` installed, the following script that will" -" install it, set it up, and create the virtual environment (with " -":code:`Python 3.8.17` by default)::" +"If you don't have ``pyenv`` installed, the following script that will " +"install it, set it up, and create the virtual environment (with " +":substitution-code:`Python |python_full_version|` by default):" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:58 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:69 msgid "" -"If you already have :code:`pyenv` installed (along with the :code:`pyenv-" -"virtualenv` plugin), you can use the following convenience script (with " -":code:`Python 3.8.17` by default)::" +"If you already have ``pyenv`` installed (along with the ``pyenv-" +"virtualenv`` plugin), you can use the following convenience script (with " +":substitution-code:`Python |python_full_version|` by default):" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:62 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:77 msgid "" -"3. Install the Flower package in development mode (think :code:`pip " -"install -e`) along with all necessary dependencies::" +"3. Install the Flower package in development mode (think ``pip install " +"-e``) along with all necessary dependencies:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:69 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:85 msgid "Convenience Scripts" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:71 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:87 msgid "" "The Flower repository contains a number of convenience scripts to make " -"recurring development tasks easier and less error-prone. See the " -":code:`/dev` subdirectory for a full list. The following scripts are " -"amongst the most important ones:" +"recurring development tasks easier and less error-prone. See the ``/dev``" +" subdirectory for a full list. The following scripts are amongst the most" +" important ones:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:77 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:92 msgid "Create/Delete Virtual Environment" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:85 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:101 msgid "Compile ProtoBuf Definitions" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:92 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:108 msgid "Auto-Format Code" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:99 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:115 msgid "Run Linters and Tests" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:106 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:122 msgid "Add a pre-commit hook" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:108 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:124 msgid "" "Developers may integrate a pre-commit hook into their workflow utilizing " "the `pre-commit `_ library. The pre-" @@ -2136,21070 +2178,29887 @@ msgid "" "``./dev/format.sh`` and ``./dev/test.sh`` scripts." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:110 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:128 msgid "There are multiple ways developers can use this:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:112 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:130 msgid "Install the pre-commit hook to your local git directory by simply running:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:118 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:136 msgid "" "Each ``git commit`` will trigger the execution of formatting and " "linting/test scripts." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:119 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:138 msgid "" "If in a hurry, bypass the hook using ``--no-verify`` with the ``git " -"commit`` command. ::" +"commit`` command." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:124 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:145 msgid "" "For developers who prefer not to install the hook permanently, it is " "possible to execute a one-time check prior to committing changes by using" " the following command:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:130 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:152 msgid "" "This executes the formatting and linting checks/tests on all the files " "without modifying the default behavior of ``git commit``." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:133 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:156 msgid "Run Github Actions (CI) locally" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:135 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:158 msgid "" "Developers could run the full set of Github Actions workflows under their" " local environment by using `Act `_. " "Please refer to the installation instructions under the linked repository" -" and run the next command under Flower main cloned repository folder::" +" and run the next command under Flower main cloned repository folder:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:142 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:167 msgid "" "The Flower default workflow would run by setting up the required Docker " "machines underneath." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:147 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:171 msgid "Build Release" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:149 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:173 msgid "" "Flower uses Poetry to build releases. The necessary command is wrapped in" -" a simple script::" +" a simple script:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:154 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:180 msgid "" -"The resulting :code:`.whl` and :code:`.tar.gz` releases will be stored in" -" the :code:`/dist` subdirectory." +"The resulting ``.whl`` and ``.tar.gz`` releases will be stored in the " +"``/dist`` subdirectory." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:159 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:184 msgid "Build Documentation" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:161 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:186 msgid "" "Flower's documentation uses `Sphinx `_. " "There's no convenience script to re-build the documentation yet, but it's" -" pretty easy::" +" pretty easy:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:167 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:194 msgid "This will generate HTML documentation in ``doc/build/html``." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:2 -msgid "Example: FedBN in PyTorch - From Centralized To Federated" +#: ../../source/docker/enable-tls.rst:2 +msgid "Enable TLS for Secure Connections" msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:4 +#: ../../source/docker/enable-tls.rst:4 msgid "" -"This tutorial will show you how to use Flower to build a federated " -"version of an existing machine learning workload with `FedBN " -"`_, a federated training strategy " -"designed for non-iid data. We are using PyTorch to train a Convolutional " -"Neural Network(with Batch Normalization layers) on the CIFAR-10 dataset. " -"When applying FedBN, only few changes needed compared to :doc:`Example: " -"PyTorch - From Centralized To Federated `." +"When operating in a production environment, it is strongly recommended to" +" enable Transport Layer Security (TLS) for each Flower Component to " +"ensure secure communication." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:9 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:10 -msgid "Centralized Training" +#: ../../source/docker/enable-tls.rst:7 +msgid "" +"To enable TLS, you will need a PEM-encoded root certificate, a PEM-" +"encoded private key and a PEM-encoded certificate chain." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:10 +#: ../../source/docker/enable-tls.rst:12 msgid "" -"All files are revised based on :doc:`Example: PyTorch - From Centralized " -"To Federated `. The only " -"thing to do is modifying the file called :code:`cifar.py`, revised part " -"is shown below:" +"For testing purposes, you can generate your own self-signed certificates." +" The `Enable SSL connections `__ page contains a section that" +" will guide you through the process." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:13 +#: ../../source/docker/enable-tls.rst:17 msgid "" -"The model architecture defined in class Net() is added with Batch " -"Normalization layers accordingly." +"Because Flower containers, by default, run with a non-root user ``app``, " +"the mounted files and directories must have the proper permissions for " +"the user ID ``49999``." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:41 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:157 -msgid "You can now run your machine learning workload:" +#: ../../source/docker/enable-tls.rst:20 +msgid "" +"For example, to change the user ID of all files in the ``certificates/`` " +"directory, you can run ``sudo chown -R 49999:49999 certificates/*``." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:47 +#: ../../source/docker/enable-tls.rst:23 +#: ../../source/docker/persist-superlink-state.rst:15 msgid "" -"So far this should all look fairly familiar if you've used PyTorch " -"before. Let's take the next step and use what we've built to create a " -"federated learning system within FedBN, the system consists of one server" -" and two clients." +"If you later want to delete the directory, you can change the user ID " +"back to the current user ID by running ``sudo chown -R $USER:$(id -gn) " +"state``." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:51 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:167 -msgid "Federated Training" +#: ../../source/docker/enable-tls.rst:27 +msgid "SuperLink" msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:53 +#: ../../source/docker/enable-tls.rst:29 msgid "" -"If you have read :doc:`Example: PyTorch - From Centralized To Federated " -"`, the following parts are" -" easy to follow, only :code:`get_parameters` and :code:`set_parameters` " -"function in :code:`client.py` needed to revise. If not, please read the " -":doc:`Example: PyTorch - From Centralized To Federated `. first." +"Assuming all files we need are in the local ``certificates`` directory, " +"we can use the flag ``--volume`` to mount the local directory into the " +"``/app/certificates/`` directory of the container:" msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:56 -msgid "" -"Our example consists of one *server* and two *clients*. In FedBN, " -":code:`server.py` keeps unchanged, we can start the server directly." +#: ../../source/docker/enable-tls.rst +msgid "Understanding the command" msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:62 -msgid "" -"Finally, we will revise our *client* logic by changing " -":code:`get_parameters` and :code:`set_parameters` in :code:`client.py`, " -"we will exclude batch normalization parameters from model parameter list " -"when sending to or receiving from the server." +#: ../../source/docker/enable-tls.rst:45 ../../source/docker/enable-tls.rst:92 +#: ../../source/docker/enable-tls.rst:125 +#: ../../source/docker/tutorial-quickstart-docker.rst:66 +#: ../../source/docker/tutorial-quickstart-docker.rst:103 +#: ../../source/docker/tutorial-quickstart-docker.rst:217 +#: ../../source/docker/tutorial-quickstart-docker.rst:305 +msgid "``docker run``: This tells Docker to run a container from an image." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:85 -msgid "Now, you can now open two additional terminal windows and run" +#: ../../source/docker/enable-tls.rst:46 ../../source/docker/enable-tls.rst:93 +#: ../../source/docker/enable-tls.rst:126 +#: ../../source/docker/tutorial-quickstart-docker.rst:67 +#: ../../source/docker/tutorial-quickstart-docker.rst:104 +#: ../../source/docker/tutorial-quickstart-docker.rst:218 +#: ../../source/docker/tutorial-quickstart-docker.rst:306 +msgid "``--rm``: Remove the container once it is stopped or the command exits." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:91 +#: ../../source/docker/enable-tls.rst msgid "" -"in each window (make sure that the server is still running before you do " -"so) and see your (previously centralized) PyTorch project run federated " -"learning with FedBN strategy across two clients. Congratulations!" -msgstr "" - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:94 -#: ../../source/example-jax-from-centralized-to-federated.rst:277 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:310 -#: ../../source/tutorial-quickstart-jax.rst:283 -msgid "Next Steps" +"``--volume ./certificates/:/app/certificates/:ro``: Mount the " +"``certificates`` directory in" msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:96 +#: ../../source/docker/enable-tls.rst msgid "" -"The full source code for this example can be found `here " -"`_. Our example is of course somewhat over-" -"simplified because both clients load the exact same dataset, which isn't " -"realistic. You're now prepared to explore this topic further. How about " -"using different subsets of CIFAR-10 on each client? How about adding more" -" clients?" +"the current working directory of the host machine as a read-only volume " +"at the" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:2 -msgid "Example: JAX - Run JAX Federated" +#: ../../source/docker/enable-tls.rst +msgid "``/app/certificates`` directory inside the container." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:4 -#: ../../source/tutorial-quickstart-jax.rst:10 +#: ../../source/docker/enable-tls.rst msgid "" -"This tutorial will show you how to use Flower to build a federated " -"version of an existing JAX workload. We are using JAX to train a linear " -"regression model on a scikit-learn dataset. We will structure the example" -" similar to our `PyTorch - From Centralized To Federated " -"`_ walkthrough. First, we build a centralized " -"training approach based on the `Linear Regression with JAX " -"`_" -" tutorial`. Then, we build upon the centralized training code to run the " -"training in a federated fashion." +"This allows the container to access the TLS certificates that are stored " +"in the certificates" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:10 -#: ../../source/tutorial-quickstart-jax.rst:16 -msgid "" -"Before we start building our JAX example, we need install the packages " -":code:`jax`, :code:`jaxlib`, :code:`scikit-learn`, and :code:`flwr`:" +#: ../../source/docker/enable-tls.rst +msgid "directory." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:18 -#: ../../source/tutorial-quickstart-jax.rst:24 -msgid "Linear Regression with JAX" +#: ../../source/docker/enable-tls.rst +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +":substitution-code:`flwr/superlink:|stable_flwr_version|`: The name of " +"the image to be run and the specific" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:20 -#: ../../source/tutorial-quickstart-jax.rst:26 +#: ../../source/docker/enable-tls.rst msgid "" -"We begin with a brief description of the centralized training code based " -"on a :code:`Linear Regression` model. If you want a more in-depth " -"explanation of what's going on then have a look at the official `JAX " -"documentation `_." +"tag of the image. The tag :substitution-code:`|stable_flwr_version|` " +"represents a specific version of the image." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:23 -#: ../../source/tutorial-quickstart-jax.rst:29 +#: ../../source/docker/enable-tls.rst msgid "" -"Let's create a new file called :code:`jax_training.py` with all the " -"components required for a traditional (centralized) linear regression " -"training. First, the JAX packages :code:`jax` and :code:`jaxlib` need to " -"be imported. In addition, we need to import :code:`sklearn` since we use " -":code:`make_regression` for the dataset and :code:`train_test_split` to " -"split the dataset into a training and test set. You can see that we do " -"not yet import the :code:`flwr` package for federated learning. This will" -" be done later." +"``--ssl-ca-certfile certificates/ca.crt``: Specify the location of the CA" +" certificate file" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:37 -#: ../../source/tutorial-quickstart-jax.rst:43 -msgid "" -"The :code:`load_data()` function loads the mentioned training and test " -"sets." +#: ../../source/docker/enable-tls.rst +msgid "inside the container." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:47 -#: ../../source/tutorial-quickstart-jax.rst:53 +#: ../../source/docker/enable-tls.rst msgid "" -"The model architecture (a very simple :code:`Linear Regression` model) is" -" defined in :code:`load_model()`." +"The ``certificates/ca.crt`` file is a certificate that is used to verify " +"the identity of the" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:59 -#: ../../source/tutorial-quickstart-jax.rst:65 -msgid "" -"We now need to define the training (function :code:`train()`), which " -"loops over the training set and measures the loss (function " -":code:`loss_fn()`) for each batch of training examples. The loss function" -" is separate since JAX takes derivatives with a :code:`grad()` function " -"(defined in the :code:`main()` function and called in :code:`train()`)." +#: ../../source/docker/enable-tls.rst +msgid "SuperLink." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:77 -#: ../../source/tutorial-quickstart-jax.rst:83 +#: ../../source/docker/enable-tls.rst msgid "" -"The evaluation of the model is defined in the function " -":code:`evaluation()`. The function takes all test examples and measures " -"the loss of the linear regression model." +"``--ssl-certfile certificates/server.pem``: Specify the location of the " +"SuperLink's" +msgstr "" + +#: ../../source/docker/enable-tls.rst +msgid "TLS certificate file inside the container." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:88 -#: ../../source/tutorial-quickstart-jax.rst:94 +#: ../../source/docker/enable-tls.rst msgid "" -"Having defined the data loading, model architecture, training, and " -"evaluation we can put everything together and train our model using JAX. " -"As already mentioned, the :code:`jax.grad()` function is defined in " -":code:`main()` and passed to :code:`train()`." +"The ``certificates/server.pem`` file is used to identify the SuperLink " +"and to encrypt the" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:105 -#: ../../source/tutorial-quickstart-jax.rst:111 -msgid "You can now run your (centralized) JAX linear regression workload:" +#: ../../source/docker/enable-tls.rst +msgid "data that is transmitted over the network." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:111 -#: ../../source/tutorial-quickstart-jax.rst:117 +#: ../../source/docker/enable-tls.rst msgid "" -"So far this should all look fairly familiar if you've used JAX before. " -"Let's take the next step and use what we've built to create a simple " -"federated learning system consisting of one server and two clients." +"``--ssl-keyfile certificates/server.key``: Specify the location of the " +"SuperLink's" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:115 -#: ../../source/tutorial-quickstart-jax.rst:121 -msgid "JAX meets Flower" +#: ../../source/docker/enable-tls.rst +msgid "TLS private key file inside the container." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:117 -#: ../../source/tutorial-quickstart-jax.rst:123 +#: ../../source/docker/enable-tls.rst msgid "" -"The concept of federating an existing workload is always the same and " -"easy to understand. We have to start a *server* and then use the code in " -":code:`jax_training.py` for the *clients* that are connected to the " -"*server*. The *server* sends model parameters to the clients. The " -"*clients* run the training and update the parameters. The updated " -"parameters are sent back to the *server*, which averages all received " -"parameter updates. This describes one round of the federated learning " -"process, and we repeat this for multiple rounds." +"The ``certificates/server.key`` file is used to decrypt the data that is " +"transmitted over" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:123 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:181 -#: ../../source/tutorial-quickstart-jax.rst:129 -msgid "" -"Our example consists of one *server* and two *clients*. Let's set up " -":code:`server.py` first. The *server* needs to import the Flower package " -":code:`flwr`. Next, we use the :code:`start_server` function to start a " -"server and tell it to perform three rounds of federated learning." +#: ../../source/docker/enable-tls.rst +msgid "the network." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:133 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:191 -#: ../../source/tutorial-quickstart-jax.rst:139 -msgid "We can already start the *server*:" +#: ../../source/docker/enable-tls.rst:72 +msgid "SuperNode" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:139 -#: ../../source/tutorial-quickstart-jax.rst:145 +#: ../../source/docker/enable-tls.rst:74 msgid "" -"Finally, we will define our *client* logic in :code:`client.py` and build" -" upon the previously defined JAX training in :code:`jax_training.py`. Our" -" *client* needs to import :code:`flwr`, but also :code:`jax` and " -":code:`jaxlib` to update the parameters on our JAX model:" +"Assuming that the ``ca.crt`` certificate already exists locally, we can " +"use the flag ``--volume`` to mount the local certificate into the " +"container's ``/app/`` directory." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:154 -#: ../../source/tutorial-quickstart-jax.rst:160 +#: ../../source/docker/enable-tls.rst:79 msgid "" -"Implementing a Flower *client* basically means implementing a subclass of" -" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " -"Our implementation will be based on :code:`flwr.client.NumPyClient` and " -"we'll call it :code:`FlowerClient`. :code:`NumPyClient` is slightly " -"easier to implement than :code:`Client` if you use a framework with good " -"NumPy interoperability (like JAX) because it avoids some of the " -"boilerplate that would otherwise be necessary. :code:`FlowerClient` needs" -" to implement four methods, two methods for getting/setting model " -"parameters, one method for training the model, and one method for testing" -" the model:" -msgstr "" - -#: ../../source/example-jax-from-centralized-to-federated.rst:161 -#: ../../source/tutorial-quickstart-jax.rst:167 -msgid ":code:`set_parameters (optional)`" +"If you're generating self-signed certificates and the ``ca.crt`` " +"certificate doesn't exist on the SuperNode, you can copy it over after " +"the generation step." +msgstr "" + +#: ../../source/docker/enable-tls.rst +msgid "``--volume ./ca.crt:/app/ca.crt/:ro``: Mount the ``ca.crt`` file from the" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:160 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:219 -#: ../../source/tutorial-quickstart-jax.rst:166 +#: ../../source/docker/enable-tls.rst msgid "" -"set the model parameters on the local model that are received from the " -"server" +"current working directory of the host machine as a read-only volume at " +"the ``/app/ca.crt``" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:161 -#: ../../source/tutorial-quickstart-jax.rst:167 -msgid "transform parameters to NumPy :code:`ndarray`'s" +#: ../../source/docker/enable-tls.rst +msgid "directory inside the container." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:162 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:220 -#: ../../source/tutorial-quickstart-jax.rst:168 +#: ../../source/docker/enable-tls.rst msgid "" -"loop over the list of model parameters received as NumPy " -":code:`ndarray`'s (think list of neural network layers)" +":substitution-code:`flwr/supernode:|stable_flwr_version|`: The name of " +"the image to be run and the specific" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:163 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:221 -#: ../../source/tutorial-quickstart-jax.rst:169 -#: ../../source/tutorial-quickstart-pytorch.rst:155 -#: ../../source/tutorial-quickstart-scikitlearn.rst:118 -msgid ":code:`get_parameters`" +#: ../../source/docker/enable-tls.rst +msgid "" +"``--root-certificates ca.crt``: This specifies the location of the CA " +"certificate file" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:164 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:222 -#: ../../source/tutorial-quickstart-jax.rst:170 -msgid "" -"get the model parameters and return them as a list of NumPy " -":code:`ndarray`'s (which is what :code:`flwr.client.NumPyClient` expects)" +#: ../../source/docker/enable-tls.rst +msgid "The ``ca.crt`` file is used to verify the identity of the SuperLink." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:167 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:225 -#: ../../source/tutorial-quickstart-jax.rst:173 -#: ../../source/tutorial-quickstart-pytorch.rst:161 -#: ../../source/tutorial-quickstart-scikitlearn.rst:125 -msgid ":code:`fit`" +#: ../../source/docker/enable-tls.rst:105 +msgid "SuperExec" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:166 -#: ../../source/example-jax-from-centralized-to-federated.rst:170 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:224 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:228 -#: ../../source/tutorial-quickstart-jax.rst:172 -#: ../../source/tutorial-quickstart-jax.rst:176 +#: ../../source/docker/enable-tls.rst:107 msgid "" -"update the parameters of the local model with the parameters received " -"from the server" +"Assuming all files we need are in the local ``certificates`` directory " +"where the SuperExec will be executed from, we can use the flag " +"``--volume`` to mount the local directory into the ``/app/certificates/``" +" directory of the container:" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:167 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:225 -#: ../../source/tutorial-quickstart-jax.rst:173 -msgid "train the model on the local training set" +#: ../../source/docker/enable-tls.rst +msgid "" +":substitution-code:`flwr/superexec:|stable_flwr_version|`: The name of " +"the image to be run and the specific" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:168 -#: ../../source/tutorial-quickstart-jax.rst:174 -msgid "get the updated local model parameters and return them to the server" +#: ../../source/docker/enable-tls.rst +msgid "SuperExec." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:172 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:230 -#: ../../source/tutorial-quickstart-jax.rst:178 -#: ../../source/tutorial-quickstart-pytorch.rst:164 -#: ../../source/tutorial-quickstart-scikitlearn.rst:128 -msgid ":code:`evaluate`" +#: ../../source/docker/enable-tls.rst +msgid "" +"``--ssl-certfile certificates/server.pem``: Specify the location of the " +"SuperExec's" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:171 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:229 -#: ../../source/tutorial-quickstart-jax.rst:177 -msgid "evaluate the updated model on the local test set" +#: ../../source/docker/enable-tls.rst +msgid "" +"The ``certificates/server.pem`` file is used to identify the SuperExec " +"and to encrypt the" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:172 -#: ../../source/tutorial-quickstart-jax.rst:178 -msgid "return the local loss to the server" +#: ../../source/docker/enable-tls.rst +msgid "" +"``--ssl-keyfile certificates/server.key``: Specify the location of the " +"SuperExec's" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:174 -#: ../../source/tutorial-quickstart-jax.rst:180 +#: ../../source/docker/enable-tls.rst msgid "" -"The challenging part is to transform the JAX model parameters from " -":code:`DeviceArray` to :code:`NumPy ndarray` to make them compatible with" -" `NumPyClient`." +"``--executor-config root-" +"certificates=\\\"certificates/superlink_ca.crt\\\"``: Specify the" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:176 -#: ../../source/tutorial-quickstart-jax.rst:182 +#: ../../source/docker/enable-tls.rst msgid "" -"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make" -" use of the functions :code:`train()` and :code:`evaluate()` previously " -"defined in :code:`jax_training.py`. So what we really do here is we tell " -"Flower through our :code:`NumPyClient` subclass which of our already " -"defined functions to call for training and evaluation. We included type " -"annotations to give you a better understanding of the data types that get" -" passed around." +"location of the CA certificate file inside the container that the " +"SuperExec executor" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:245 -#: ../../source/tutorial-quickstart-jax.rst:251 -msgid "Having defined the federation process, we can run it." +#: ../../source/docker/enable-tls.rst +msgid "should use to verify the SuperLink's identity." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:268 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:301 -#: ../../source/tutorial-quickstart-jax.rst:274 -msgid "And that's it. You can now open two additional terminal windows and run" +#: ../../source/docker/index.rst:2 +msgid "Run Flower using Docker" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:274 -#: ../../source/tutorial-quickstart-jax.rst:280 +#: ../../source/docker/index.rst:4 msgid "" -"in each window (make sure that the server is still running before you do " -"so) and see your JAX project run federated learning across two clients. " -"Congratulations!" +"Start your Flower journey with our pre-made Docker images on Docker Hub, " +"supporting ``amd64`` and ``arm64v8`` architectures." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:279 -#: ../../source/tutorial-quickstart-jax.rst:285 +#: ../../source/docker/index.rst:7 msgid "" -"The source code of this example was improved over time and can be found " -"here: `Quickstart JAX `_. Our example is somewhat over-simplified because both " -"clients load the same dataset." +"Our Quickstart guide walks you through containerizing a Flower project " +"and running it end to end using Docker." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:282 -#: ../../source/tutorial-quickstart-jax.rst:288 -msgid "" -"You're now prepared to explore this topic further. How about using a more" -" sophisticated model or using a different dataset? How about adding more " -"clients?" +#: ../../source/docker/index.rst:11 +msgid "Getting Started" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:2 -msgid "Example: PyTorch - From Centralized To Federated" +#: ../../source/docker/index.rst:19 +msgid "Running in Production" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:4 -msgid "" -"This tutorial will show you how to use Flower to build a federated " -"version of an existing machine learning workload. We are using PyTorch to" -" train a Convolutional Neural Network on the CIFAR-10 dataset. First, we " -"introduce this machine learning task with a centralized training approach" -" based on the `Deep Learning with PyTorch " -"`_ " -"tutorial. Then, we build upon the centralized training code to run the " -"training in a federated fashion." +#: ../../source/docker/index.rst:28 +msgid "Advanced Options" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:12 -msgid "" -"We begin with a brief description of the centralized CNN training code. " -"If you want a more in-depth explanation of what's going on then have a " -"look at the official `PyTorch tutorial " -"`_." +#: ../../source/docker/index.rst:40 +msgid "Run Flower using Docker Compose" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:15 -msgid "" -"Let's create a new file called :code:`cifar.py` with all the components " -"required for a traditional (centralized) training on CIFAR-10. First, all" -" required packages (such as :code:`torch` and :code:`torchvision`) need " -"to be imported. You can see that we do not import any package for " -"federated learning. You can keep all these imports as they are even when " -"we add the federated learning components at a later point." +#: ../../source/docker/persist-superlink-state.rst:2 +msgid "Persist the State of the SuperLink" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:32 +#: ../../source/docker/persist-superlink-state.rst:4 msgid "" -"As already mentioned we will use the CIFAR-10 dataset for this machine " -"learning workload. The model architecture (a very simple Convolutional " -"Neural Network) is defined in :code:`class Net()`." +"By default, the Flower SuperLink keeps its state in-memory. When using " +"the Docker flag ``--rm``, the state is not persisted between container " +"starts." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:56 +#: ../../source/docker/persist-superlink-state.rst:7 msgid "" -"The :code:`load_data()` function loads the CIFAR-10 training and test " -"sets. The :code:`transform` normalized the data after loading." +"If you want to persist the state of the SuperLink on your host system, " +"all you need to do is specify a directory where you want to save the file" +" on your host system and a name for the database file." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:74 +#: ../../source/docker/persist-superlink-state.rst:11 msgid "" -"We now need to define the training (function :code:`train()`) which loops" -" over the training set, measures the loss, backpropagates it, and then " -"takes one optimizer step for each batch of training examples." +"By default, the SuperLink container runs with a non-root user called " +"``app`` with the user ID ``49999``. It is recommended to create a new " +"directory and change the user ID of the directory to ``49999`` to ensure " +"the mounted directory has the proper permissions." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:76 +#: ../../source/docker/persist-superlink-state.rst:21 msgid "" -"The evaluation of the model is defined in the function :code:`test()`. " -"The function loops over all test samples and measures the loss of the " -"model based on the test dataset." +"In the example below, we create a new directory called ``state``, change " +"the user ID and tell Docker via the flag ``--volume`` to mount the local " +"``state`` directory into the ``/app/state`` directory of the container. " +"Lastly, we use the flag ``--database`` to specify the name of the " +"database file." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:136 +#: ../../source/docker/persist-superlink-state.rst:36 msgid "" -"Having defined the data loading, model architecture, training, and " -"evaluation we can put everything together and train our CNN on CIFAR-10." +"As soon as the SuperLink starts, the file ``state.db`` is created in the " +"``state`` directory on your host system. If the file already exists, the " +"SuperLink tries to restore the state from the file. To start the " +"SuperLink with an empty database, ensure that there is no database called" +" ``state.db`` in the ``state`` directory (``rm state.db``) before you " +"execute the ``docker run`` command above." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:163 -msgid "" -"So far, this should all look fairly familiar if you've used PyTorch " -"before. Let's take the next step and use what we've built to create a " -"simple federated learning system consisting of one server and two " -"clients." +#: ../../source/docker/pin-version.rst:2 +msgid "Pin a Docker Image to a Specific Version" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:169 +#: ../../source/docker/pin-version.rst:4 msgid "" -"The simple machine learning project discussed in the previous section " -"trains the model on a single dataset (CIFAR-10), we call this centralized" -" learning. This concept of centralized learning, as shown in the previous" -" section, is probably known to most of you, and many of you have used it " -"previously. Normally, if you'd want to run machine learning workloads in " -"a federated fashion, then you'd have to change most of your code and set " -"everything up from scratch. This can be a considerable effort." +"It may happen that we update the images behind the tags. Such updates " +"usually include security updates of system dependencies that should not " +"change the functionality of Flower. However, if you want to ensure that " +"you use a fixed version of the Docker image in your deployments, you can " +"`specify the digest " +"`_ of the image instead of the tag." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:173 +#: ../../source/docker/pin-version.rst:14 msgid "" -"However, with Flower you can evolve your pre-existing code into a " -"federated learning setup without the need for a major rewrite." +"The following command returns the current image digest referenced by the " +":substitution-code:`superlink:|stable_flwr_version|` tag:" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:175 -msgid "" -"The concept is easy to understand. We have to start a *server* and then " -"use the code in :code:`cifar.py` for the *clients* that are connected to " -"the *server*. The *server* sends model parameters to the clients. The " -"*clients* run the training and update the parameters. The updated " -"parameters are sent back to the *server* which averages all received " -"parameter updates. This describes one round of the federated learning " -"process and we repeat this for multiple rounds." +#: ../../source/docker/pin-version.rst:23 +msgid "This will output" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:197 -msgid "" -"Finally, we will define our *client* logic in :code:`client.py` and build" -" upon the previously defined centralized training in :code:`cifar.py`. " -"Our *client* needs to import :code:`flwr`, but also :code:`torch` to " -"update the parameters on our PyTorch model:" +#: ../../source/docker/pin-version.rst:30 +msgid "Next, we can pin the digest when running a new SuperLink container:" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:213 -msgid "" -"Implementing a Flower *client* basically means implementing a subclass of" -" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " -"Our implementation will be based on :code:`flwr.client.NumPyClient` and " -"we'll call it :code:`CifarClient`. :code:`NumPyClient` is slightly easier" -" to implement than :code:`Client` if you use a framework with good NumPy " -"interoperability (like PyTorch or TensorFlow/Keras) because it avoids " -"some of the boilerplate that would otherwise be necessary. " -":code:`CifarClient` needs to implement four methods, two methods for " -"getting/setting model parameters, one method for training the model, and " -"one method for testing the model:" +#: ../../source/docker/run-as-root-user.rst:2 +msgid "Run with Root User Privileges" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:219 -msgid ":code:`set_parameters`" +#: ../../source/docker/run-as-root-user.rst:4 +msgid "" +"Flower Docker images, by default, run with a non-root user " +"(username/groupname: ``app``, UID/GID: ``49999``). Using root user is " +"**not recommended** unless it is necessary for specific tasks during the " +"build process." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:226 -msgid "get the updated local model weights and return them to the server" +#: ../../source/docker/run-as-root-user.rst:8 +msgid "" +"Always make sure to run the container as a non-root user in production to" +" maintain security best practices." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:230 -msgid "return the local loss and accuracy to the server" +#: ../../source/docker/run-as-root-user.rst:12 +msgid "Run a Container with Root User Privileges" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:232 +#: ../../source/docker/run-as-root-user.rst:14 msgid "" -"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make" -" use of the functions :code:`train()` and :code:`test()` previously " -"defined in :code:`cifar.py`. So what we really do here is we tell Flower " -"through our :code:`NumPyClient` subclass which of our already defined " -"functions to call for training and evaluation. We included type " -"annotations to give you a better understanding of the data types that get" -" passed around." +"Run the Docker image with the ``-u`` flag and specify ``root`` as the " +"username:" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:280 -msgid "" -"All that's left to do it to define a function that loads both model and " -"data, creates a :code:`CifarClient`, and starts this client. You load " -"your data and model by using :code:`cifar.py`. Start :code:`CifarClient` " -"with the function :code:`fl.client.start_client()` by pointing it at the " -"same IP address we used in :code:`server.py`:" +#: ../../source/docker/run-as-root-user.rst:21 +msgid "This command will run the Docker container with root user privileges." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:307 -msgid "" -"in each window (make sure that the server is running before you do so) " -"and see your (previously centralized) PyTorch project run federated " -"learning across two clients. Congratulations!" +#: ../../source/docker/run-as-root-user.rst:24 +msgid "Run the Build Process with Root User Privileges" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:312 +#: ../../source/docker/run-as-root-user.rst:26 msgid "" -"The full source code for this example: `PyTorch: From Centralized To " -"Federated (Code) `_. Our example is, of course, " -"somewhat over-simplified because both clients load the exact same " -"dataset, which isn't realistic. You're now prepared to explore this topic" -" further. How about using different subsets of CIFAR-10 on each client? " -"How about adding more clients?" +"If you want to switch to the root user during the build process of the " +"Docker image to install missing system dependencies, you can use the " +"``USER root`` directive within your Dockerfile." msgstr "" -#: ../../source/explanation-differential-privacy.rst:2 -#: ../../source/explanation-differential-privacy.rst:11 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:303 -msgid "Differential Privacy" +#: ../../source/docker/run-as-root-user.rst:30 +#, fuzzy +msgid "SuperNode Dockerfile" +msgstr "Construindo a imagem do servidor" + +#: ../../source/docker/run-as-subprocess.rst:2 +msgid "Run ClientApp as a Subprocess" msgstr "" -#: ../../source/explanation-differential-privacy.rst:3 +#: ../../source/docker/run-as-subprocess.rst:4 msgid "" -"The information in datasets like healthcare, financial transactions, user" -" preferences, etc., is valuable and has the potential for scientific " -"breakthroughs and provides important business insights. However, such " -"data is also sensitive and there is a risk of compromising individual " -"privacy." +"In this mode, the ClientApp is executed as a subprocess within the " +"SuperNode Docker container, rather than running in a separate container. " +"This approach reduces the number of running containers, which can be " +"beneficial for environments with limited resources. However, it also " +"means that the ClientApp is no longer isolated from the SuperNode, which " +"may introduce additional security concerns." msgstr "" -#: ../../source/explanation-differential-privacy.rst:6 +#: ../../source/docker/run-as-subprocess.rst:13 msgid "" -"Traditional methods like anonymization alone would not work because of " -"attacks like Re-identification and Data Linkage. That's where " -"differential privacy comes in. It provides the possibility of analyzing " -"data while ensuring the privacy of individuals." +"Before running the ClientApp as a subprocess, ensure that the FAB " +"dependencies have been installed in the SuperNode images. This can be " +"done by extending the SuperNode image:" +msgstr "" + +#: ../../source/docker/run-as-subprocess.rst:17 +msgid "Dockerfile.supernode" msgstr "" -#: ../../source/explanation-differential-privacy.rst:12 +#: ../../source/docker/run-as-subprocess.rst:31 msgid "" -"Imagine two datasets that are identical except for a single record (for " -"instance, Alice's data). Differential Privacy (DP) guarantees that any " -"analysis (M), like calculating the average income, will produce nearly " -"identical results for both datasets (O and O' would be similar). This " -"preserves group patterns while obscuring individual details, ensuring the" -" individual's information remains hidden in the crowd." +"Next, build the SuperNode Docker image by running the following command " +"in the directory where Dockerfile is located:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:-1 -msgid "DP Intro" +#: ../../source/docker/run-as-subprocess.rst:39 +msgid "Run the ClientApp as a Subprocess" msgstr "" -#: ../../source/explanation-differential-privacy.rst:22 +#: ../../source/docker/run-as-subprocess.rst:41 msgid "" -"One of the most commonly used mechanisms to achieve DP is adding enough " -"noise to the output of the analysis to mask the contribution of each " -"individual in the data while preserving the overall accuracy of the " -"analysis." +"Start the SuperNode with the flag ``--isolation subprocess``, which tells" +" the SuperNode to execute the ClientApp as a subprocess:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:25 -msgid "Formal Definition" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:2 +msgid "Run Flower Quickstart Examples with Docker Compose" msgstr "" -#: ../../source/explanation-differential-privacy.rst:26 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:4 msgid "" -"Differential Privacy (DP) provides statistical guarantees against the " -"information an adversary can infer through the output of a randomized " -"algorithm. It provides an unconditional upper bound on the influence of a" -" single individual on the output of the algorithm by adding noise [1]. A " -"randomized mechanism M provides (:math:`\\epsilon`, " -":math:`\\delta`)-differential privacy if for any two neighboring " -"databases, D :sub:`1` and D :sub:`2`, that differ in only a single " -"record, and for all possible outputs S ⊆ Range(A):" +"Flower provides a set of `quickstart examples " +"`_ to help you get " +"started with the framework. These examples are designed to demonstrate " +"the capabilities of Flower and by default run using the Simulation " +"Engine. This guide demonstrates how to run them using Flower's Deployment" +" Engine via Docker Compose." msgstr "" -#: ../../source/explanation-differential-privacy.rst:32 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:12 msgid "" -"\\small\n" -"P[M(D_{1} \\in A)] \\leq e^{\\delta} P[M(D_{2} \\in A)] + \\delta" +"Some quickstart examples may have limitations or requirements that " +"prevent them from running on every environment. For more information, " +"please see Limitations_." msgstr "" -#: ../../source/explanation-differential-privacy.rst:38 -msgid "" -"The :math:`\\epsilon` parameter, also known as the privacy budget, is a " -"metric of privacy loss. It also controls the privacy-utility trade-off; " -"lower :math:`\\epsilon` values indicate higher levels of privacy but are " -"likely to reduce utility as well. The :math:`\\delta` parameter accounts " -"for a small probability on which the upper bound :math:`\\epsilon` does " -"not hold. The amount of noise needed to achieve differential privacy is " -"proportional to the sensitivity of the output, which measures the maximum" -" change in the output due to the inclusion or removal of a single record." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:18 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:15 +#: ../../source/docker/tutorial-quickstart-docker.rst:13 +msgid "Before you start, make sure that:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:45 -msgid "Differential Privacy in Machine Learning" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:20 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:22 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:17 +#: ../../source/docker/tutorial-quickstart-docker.rst:15 +msgid "The ``flwr`` CLI is :doc:`installed <../how-to-install-flower>` locally." msgstr "" -#: ../../source/explanation-differential-privacy.rst:46 -msgid "" -"DP can be utilized in machine learning to preserve the privacy of the " -"training data. Differentially private machine learning algorithms are " -"designed in a way to prevent the algorithm to learn any specific " -"information about any individual data points and subsequently prevent the" -" model from revealing sensitive information. Depending on the stage at " -"which noise is introduced, various methods exist for applying DP to " -"machine learning algorithms. One approach involves adding noise to the " -"training data (either to the features or labels), while another method " -"entails injecting noise into the gradients of the loss function during " -"model training. Additionally, such noise can be incorporated into the " -"model's output." -msgstr "" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:21 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:18 +#: ../../source/docker/tutorial-quickstart-docker.rst:16 +#, fuzzy +msgid "The Docker daemon is running." +msgstr "Verifique que o serviço Docker está rodando." -#: ../../source/explanation-differential-privacy.rst:53 -msgid "Differential Privacy in Federated Learning" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:22 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:19 +msgid "Docker Compose is `installed `_." msgstr "" -#: ../../source/explanation-differential-privacy.rst:54 -msgid "" -"Federated learning is a data minimization approach that allows multiple " -"parties to collaboratively train a model without sharing their raw data. " -"However, federated learning also introduces new privacy challenges. The " -"model updates between parties and the central server can leak information" -" about the local data. These leaks can be exploited by attacks such as " -"membership inference and property inference attacks, or model inversion " -"attacks." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:25 +msgid "Run the Quickstart Example" msgstr "" -#: ../../source/explanation-differential-privacy.rst:58 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:27 msgid "" -"DP can play a crucial role in federated learning to provide privacy for " -"the clients' data." +"Clone the quickstart example you like to run. For example, ``quickstart-" +"pytorch``:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:60 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:35 msgid "" -"Depending on the granularity of privacy provision or the location of " -"noise addition, different forms of DP exist in federated learning. In " -"this explainer, we focus on two approaches of DP utilization in federated" -" learning based on where the noise is added: at the server (also known as" -" the center) or at the client (also known as the local)." +"Download the `compose.yml " +"`_" +" file into the example directory:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:63 -msgid "" -"**Central Differential Privacy**: DP is applied by the server and the " -"goal is to prevent the aggregated model from leaking information about " -"each client's data." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:44 +msgid "Build and start the services using the following command:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:65 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:50 msgid "" -"**Local Differential Privacy**: DP is applied on the client side before " -"sending any information to the server and the goal is to prevent the " -"updates that are sent to the server from leaking any information about " -"the client's data." +"Append the following lines to the end of the ``pyproject.toml`` file and " +"save it:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:-1 -#: ../../source/explanation-differential-privacy.rst:68 -#: ../../source/how-to-use-differential-privacy.rst:11 -msgid "Central Differential Privacy" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:52 +#: ../../source/docker/tutorial-quickstart-docker.rst:324 +msgid "pyproject.toml" msgstr "" -#: ../../source/explanation-differential-privacy.rst:69 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:61 msgid "" -"In this approach, which is also known as user-level DP, the central " -"server is responsible for adding noise to the globally aggregated " -"parameters. It should be noted that trust in the server is required." +"You can customize the string that follows ``tool.flwr.federations.`` to " +"fit your needs. However, please note that the string cannot contain a dot" +" (``.``)." msgstr "" -#: ../../source/explanation-differential-privacy.rst:76 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:64 msgid "" -"While there are various ways to implement central DP in federated " -"learning, we concentrate on the algorithms proposed by [2] and [3]. The " -"overall approach is to clip the model updates sent by the clients and add" -" some amount of noise to the aggregated model. In each iteration, a " -"random set of clients is chosen with a specific probability for training." -" Each client performs local training on its own data. The update of each " -"client is then clipped by some value `S` (sensitivity `S`). This would " -"limit the impact of any individual client which is crucial for privacy " -"and often beneficial for robustness. A common approach to achieve this is" -" by restricting the `L2` norm of the clients' model updates, ensuring " -"that larger updates are scaled down to fit within the norm `S`." +"In this example, ``local-deployment`` has been used. Just remember to " +"replace ``local-deployment`` with your chosen name in both the " +"``tool.flwr.federations.`` string and the corresponding ``flwr run .`` " +"command." msgstr "" -#: ../../source/explanation-differential-privacy.rst:-1 -msgid "clipping" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:68 +#, fuzzy +msgid "Run the example:" +msgstr "Exemplo" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:74 +msgid "Follow the logs of the SuperExec service:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:89 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:80 msgid "" -"Afterwards, the Gaussian mechanism is used to add noise in order to " -"distort the sum of all clients' updates. The amount of noise is scaled to" -" the sensitivity value to obtain a privacy guarantee. The Gaussian " -"mechanism is used with a noise sampled from `N (0, σ²)` where `σ = ( " -"noise_scale * S ) / (number of sampled clients)`." +"That is all it takes! You can monitor the progress of the run through the" +" logs of the SuperExec." msgstr "" -#: ../../source/explanation-differential-privacy.rst:94 -msgid "Clipping" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:84 +msgid "Run a Different Quickstart Example" msgstr "" -#: ../../source/explanation-differential-privacy.rst:96 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:86 msgid "" -"There are two forms of clipping commonly used in Central DP: Fixed " -"Clipping and Adaptive Clipping." +"To run a different quickstart example, such as ``quickstart-tensorflow``," +" first, shut down the Docker Compose services of the current example:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:98 -msgid "" -"**Fixed Clipping** : A predefined fix threshold is set for the magnitude " -"of clients' updates. Any update exceeding this threshold is clipped back " -"to the threshold value." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:93 +msgid "After that, you can repeat the steps above." msgstr "" -#: ../../source/explanation-differential-privacy.rst:100 -msgid "" -"**Adaptive Clipping** : The clipping threshold dynamically adjusts based " -"on the observed update distribution [4]. It means that the clipping value" -" is tuned during the rounds with respect to the quantile of the update " -"norm distribution." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:96 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:102 +msgid "Limitations" msgstr "" -#: ../../source/explanation-differential-privacy.rst:102 -msgid "" -"The choice between fixed and adaptive clipping depends on various factors" -" such as privacy requirements, data distribution, model complexity, and " -"others." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:101 +msgid "Quickstart Example" msgstr "" -#: ../../source/explanation-differential-privacy.rst:-1 -#: ../../source/explanation-differential-privacy.rst:105 -#: ../../source/how-to-use-differential-privacy.rst:96 -msgid "Local Differential Privacy" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:103 +msgid "quickstart-fastai" msgstr "" -#: ../../source/explanation-differential-privacy.rst:107 -msgid "" -"In this approach, each client is responsible for performing DP. Local DP " -"avoids the need for a fully trusted aggregator, but it should be noted " -"that local DP leads to a decrease in accuracy but better privacy in " -"comparison to central DP." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:104 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:106 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:115 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:117 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:121 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:123 +#: ../../source/ref-changelog.md:33 ../../source/ref-changelog.md:399 +#: ../../source/ref-changelog.md:676 ../../source/ref-changelog.md:740 +#: ../../source/ref-changelog.md:798 ../../source/ref-changelog.md:867 +#: ../../source/ref-changelog.md:929 +msgid "None" msgstr "" -#: ../../source/explanation-differential-privacy.rst:116 -msgid "In this explainer, we focus on two forms of achieving Local DP:" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:105 +msgid "quickstart-huggingface" msgstr "" -#: ../../source/explanation-differential-privacy.rst:118 -msgid "" -"Each client adds noise to the local updates before sending them to the " -"server. To achieve (:math:`\\epsilon`, :math:`\\delta`)-DP, considering " -"the sensitivity of the local model to be ∆, Gaussian noise is applied " -"with a noise scale of σ where:" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:107 +msgid "quickstart-jax" msgstr "" -#: ../../source/explanation-differential-privacy.rst:120 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:108 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:110 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:125 msgid "" -"\\small\n" -"\\frac{∆ \\times \\sqrt{2 \\times " -"\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}\n" -"\n" +"The example has not yet been updated to work with the latest ``flwr`` " +"version." msgstr "" -#: ../../source/explanation-differential-privacy.rst:125 -msgid "" -"Each client adds noise to the gradients of the model during the local " -"training (DP-SGD). More specifically, in this approach, gradients are " -"clipped and an amount of calibrated noise is injected into the gradients." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:109 +msgid "quickstart-mlcube" msgstr "" -#: ../../source/explanation-differential-privacy.rst:128 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:111 +msgid "quickstart-mlx" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:112 msgid "" -"Please note that these two approaches are providing privacy at different " -"levels." +"`Requires to run on macOS with Apple Silicon `_." msgstr "" -#: ../../source/explanation-differential-privacy.rst:131 -msgid "**References:**" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:114 +msgid "quickstart-monai" msgstr "" -#: ../../source/explanation-differential-privacy.rst:133 -msgid "[1] Dwork et al. The Algorithmic Foundations of Differential Privacy." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:116 +msgid "quickstart-pandas" msgstr "" -#: ../../source/explanation-differential-privacy.rst:135 -msgid "" -"[2] McMahan et al. Learning Differentially Private Recurrent Language " -"Models." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:118 +msgid "quickstart-pytorch-lightning" msgstr "" -#: ../../source/explanation-differential-privacy.rst:137 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:119 msgid "" -"[3] Geyer et al. Differentially Private Federated Learning: A Client " -"Level Perspective." +"Requires an older pip version that is not supported by the Flower Docker " +"images." msgstr "" -#: ../../source/explanation-differential-privacy.rst:139 -msgid "[4] Galen et al. Differentially Private Learning with Adaptive Clipping." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:120 +msgid "quickstart-pytorch" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:2 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:292 -msgid "Federated evaluation" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:122 +msgid "quickstart-sklearn-tabular" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:4 -msgid "" -"There are two main approaches to evaluating models in federated learning " -"systems: centralized (or server-side) evaluation and federated (or " -"client-side) evaluation." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:124 +msgid "quickstart-tabnet" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:8 -msgid "Centralized Evaluation" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:126 +msgid "quickstart-tensorflow" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:11 -msgid "Built-In Strategies" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:127 +msgid "Only runs on AMD64." msgstr "" -#: ../../source/explanation-federated-evaluation.rst:13 +#: ../../source/docker/set-environment-variables.rst:2 +msgid "Set Environment Variables" +msgstr "" + +#: ../../source/docker/set-environment-variables.rst:4 msgid "" -"All built-in strategies support centralized evaluation by providing an " -"evaluation function during initialization. An evaluation function is any " -"function that can take the current global model parameters as input and " -"return evaluation results:" +"To set a variable inside a Docker container, you can use the ``-e " +"=`` flag. Multiple ``-e`` flags can be used to set multiple " +"environment variables for a container." msgstr "" -#: ../../source/explanation-federated-evaluation.rst:58 -msgid "Custom Strategies" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:2 +msgid "Deploy Flower on Multiple Machines with Docker Compose" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:60 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:4 msgid "" -"The :code:`Strategy` abstraction provides a method called " -":code:`evaluate` that can directly be used to evaluate the current global" -" model parameters. The current server implementation calls " -":code:`evaluate` after parameter aggregation and before federated " -"evaluation (see next paragraph)." +"This guide will help you set up a Flower project on multiple machines " +"using Docker Compose." msgstr "" -#: ../../source/explanation-federated-evaluation.rst:65 -msgid "Federated Evaluation" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:7 +msgid "" +"You will learn how to run the Flower client and server components on two " +"separate machines, with Flower configured to use TLS encryption and " +"persist SuperLink state across restarts. A server consists of a SuperLink" +" and ``SuperExec``. For more details about the Flower architecture, refer" +" to the :doc:`../explanation-flower-architecture` explainer page." msgstr "" -#: ../../source/explanation-federated-evaluation.rst:68 -msgid "Implementing Federated Evaluation" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:13 +msgid "" +"This guide assumes you have completed the :doc:`tutorial-quickstart-" +"docker-compose` tutorial. It is highly recommended that you follow and " +"understand the contents of that tutorial before proceeding with this " +"guide." msgstr "" -#: ../../source/explanation-federated-evaluation.rst:70 -msgid "" -"Client-side evaluation happens in the :code:`Client.evaluate` method and " -"can be configured from the server side." +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:20 +msgid "Before you begin, make sure you have the following prerequisites:" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:101 -msgid "Configuring Federated Evaluation" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:23 +msgid "The Docker daemon is running on your local machine and the remote machine." msgstr "" -#: ../../source/explanation-federated-evaluation.rst:103 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:24 msgid "" -"Federated evaluation can be configured from the server side. Built-in " -"strategies support the following arguments:" +"Docker Compose V2 is installed on both your local machine and the remote " +"machine." msgstr "" -#: ../../source/explanation-federated-evaluation.rst:105 -msgid "" -":code:`fraction_evaluate`: a :code:`float` defining the fraction of " -"clients that will be selected for evaluation. If " -":code:`fraction_evaluate` is set to :code:`0.1` and :code:`100` clients " -"are connected to the server, then :code:`10` will be randomly selected " -"for evaluation. If :code:`fraction_evaluate` is set to :code:`0.0`, " -"federated evaluation will be disabled." +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:25 +msgid "You can connect to the remote machine from your local machine." msgstr "" -#: ../../source/explanation-federated-evaluation.rst:106 -msgid "" -":code:`min_evaluate_clients`: an :code:`int`: the minimum number of " -"clients to be selected for evaluation. If :code:`fraction_evaluate` is " -"set to :code:`0.1`, :code:`min_evaluate_clients` is set to 20, and " -":code:`100` clients are connected to the server, then :code:`20` clients " -"will be selected for evaluation." +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:26 +msgid "Ports ``9091`` and ``9093`` are accessible on the remote machine." msgstr "" -#: ../../source/explanation-federated-evaluation.rst:107 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:30 msgid "" -":code:`min_available_clients`: an :code:`int` that defines the minimum " -"number of clients which need to be connected to the server before a round" -" of federated evaluation can start. If fewer than " -":code:`min_available_clients` are connected to the server, the server " -"will wait until more clients are connected before it continues to sample " -"clients for evaluation." +"The guide uses the |quickstart_sklearn_tabular|_ example as an example " +"project." msgstr "" -#: ../../source/explanation-federated-evaluation.rst:108 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:32 msgid "" -":code:`on_evaluate_config_fn`: a function that returns a configuration " -"dictionary which will be sent to the selected clients. The function will " -"be called during each round and provides a convenient way to customize " -"client-side evaluation from the server side, for example, to configure " -"the number of validation steps performed." +"If your project has a different name or location, please remember to " +"adjust the commands/paths accordingly." msgstr "" -#: ../../source/explanation-federated-evaluation.rst:135 -msgid "Evaluating Local Model Updates During Training" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:36 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:22 +#: ../../source/docker/tutorial-quickstart-docker.rst:19 +msgid "Step 1: Set Up" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:137 -msgid "" -"Model parameters can also be evaluated during training. " -":code:`Client.fit` can return arbitrary evaluation results as a " -"dictionary:" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:38 +msgid "Clone the Flower repository and change to the ``distributed`` directory:" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:177 -msgid "Full Code Example" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:45 +msgid "Get the IP address from the remote machine and save it for later." msgstr "" -#: ../../source/explanation-federated-evaluation.rst:179 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:46 msgid "" -"For a full code example that uses both centralized and federated " -"evaluation, see the *Advanced TensorFlow Example* (the same approach can " -"be applied to workloads implemented in any other framework): " -"https://github.com/adap/flower/tree/main/examples/advanced-tensorflow" +"Use the ``certs.yml`` Compose file to generate your own self-signed " +"certificates. If you have certificates, you can continue with Step 2." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:10 -msgid "FED Template" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:51 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:221 +msgid "These certificates should be used only for development purposes." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:12 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:12 -msgid "Table of Contents" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:53 +msgid "" +"For production environments, you may have to use dedicated services to " +"obtain your certificates." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:14 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:14 -msgid "[Table of Contents](#table-of-contents)" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:56 +msgid "" +"First, set the environment variables ``SUPERLINK_IP`` and " +"``SUPEREXEC_IP`` with the IP address from the remote machine. For " +"example, if the IP is ``192.168.2.33``, execute:" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:15 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:15 -msgid "[Summary](#summary)" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:65 +msgid "Next, generate the self-signed certificates:" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:16 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:16 -msgid "[Motivation](#motivation)" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:72 +msgid "Step 2: Copy the Server Compose Files" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:17 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:17 -msgid "[Goals](#goals)" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:74 +msgid "" +"Use the method that works best for you to copy the ``server`` directory, " +"the certificates, and your Flower project to the remote machine." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:18 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:18 -msgid "[Non-Goals](#non-goals)" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:77 +msgid "For example, you can use ``scp`` to copy the directories:" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:19 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:19 -msgid "[Proposal](#proposal)" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:87 +msgid "Step 3: Start the Flower Server Components" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:20 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:23 -msgid "[Drawbacks](#drawbacks)" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:89 +msgid "" +"Log into the remote machine using ``ssh`` and run the following command " +"to start the SuperLink and SuperExec services:" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:21 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:24 -msgid "[Alternatives Considered](#alternatives-considered)" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:102 +msgid "" +"The Path of the ``PROJECT_DIR`` should be relative to the location of the" +" ``server`` Docker Compose files." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:22 -msgid "[Appendix](#appendix)" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:105 +msgid "Go back to your terminal on your local machine." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:24 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:28 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:76 -msgid "Summary" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:108 +msgid "Step 4: Start the Flower Client Components" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:26 -msgid "\\[TODO - sentence 1: summary of the problem\\]" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:110 +msgid "" +"On your local machine, run the following command to start the client " +"components:" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:28 -msgid "\\[TODO - sentence 2: summary of the solution\\]" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:120 +msgid "" +"The Path of the ``PROJECT_DIR`` should be relative to the location of the" +" ``client`` Docker Compose files." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:30 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:47 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:77 -msgid "Motivation" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:124 +msgid "Step 5: Run Your Flower Project" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:32 -#: ../../source/fed/0000-20200102-fed-template.md:36 -#: ../../source/fed/0000-20200102-fed-template.md:40 -#: ../../source/fed/0000-20200102-fed-template.md:44 -#: ../../source/fed/0000-20200102-fed-template.md:48 -#: ../../source/fed/0000-20200102-fed-template.md:54 -#: ../../source/fed/0000-20200102-fed-template.md:58 -msgid "\\[TODO\\]" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:126 +msgid "" +"Specify the remote SuperExec IP addresses and the path to the root " +"certificate in the ``[tool.flwr.federations.remote-superexec]`` table in " +"the ``pyproject.toml`` file. Here, we have named our remote federation " +"``remote-superexec``:" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:34 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:53 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:78 -msgid "Goals" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:130 +msgid "examples/quickstart-sklearn-tabular/pyproject.toml" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:38 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:59 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:79 -msgid "Non-Goals" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:139 +msgid "" +"The Path of the ``root-certificates`` should be relative to the location " +"of the ``pyproject.toml`` file." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:42 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:65 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:80 -msgid "Proposal" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:142 +msgid "To run the project, execute:" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:46 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:85 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:129 -msgid "Drawbacks" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:148 +msgid "" +"That's it! With these steps, you've set up Flower on two separate " +"machines and are ready to start using it." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:50 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:86 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:135 -msgid "Alternatives Considered" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:152 +msgid "Step 6: Clean Up" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:52 -msgid "\\[Alternative 1\\]" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:154 +msgid "Shut down the Flower client components:" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:56 -msgid "\\[Alternative 2\\]" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:161 +msgid "Shut down the Flower server components and delete the SuperLink state:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:10 -msgid "Flower Enhancement Doc" +#: ../../source/docker/tutorial-quickstart-docker.rst:2 +msgid "Quickstart with Docker" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:20 -msgid "[Enhancement Doc Template](#enhancement-doc-template)" +#: ../../source/docker/tutorial-quickstart-docker.rst:4 +msgid "" +"This quickstart aims to guide you through the process of containerizing a" +" Flower project and running it end to end using Docker on your local " +"machine." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:21 -msgid "[Metadata](#metadata)" +#: ../../source/docker/tutorial-quickstart-docker.rst:7 +msgid "" +"This tutorial does not use production-ready settings, so you can focus on" +" understanding the basic workflow that uses the minimum configurations." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:22 -msgid "[Workflow](#workflow)" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:32 +#: ../../source/docker/tutorial-quickstart-docker.rst:21 +msgid "Create a new Flower project (PyTorch):" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:25 -msgid "[GitHub Issues](#github-issues)" +#: ../../source/docker/tutorial-quickstart-docker.rst:39 +msgid "Create a new Docker bridge network called ``flwr-network``:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:26 -msgid "[Google Docs](#google-docs)" +#: ../../source/docker/tutorial-quickstart-docker.rst:45 +msgid "" +"User-defined networks, such as ``flwr-network``, enable IP resolution of " +"container names, a feature absent in the default bridge network. This " +"simplifies quickstart example by avoiding the need to determine host IP " +"first." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:30 -msgid "A Flower Enhancement is a standardized development process to" +#: ../../source/docker/tutorial-quickstart-docker.rst:50 +msgid "Step 2: Start the SuperLink" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:32 -msgid "provide a common structure for proposing larger changes" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:62 +#: ../../source/docker/tutorial-quickstart-docker.rst:52 +msgid "Open your terminal and run:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:33 -msgid "ensure that the motivation for a change is clear" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "Understand the command" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:34 -msgid "persist project information in a version control system" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``-p 9091:9091 -p 9092:9092``: Map port ``9091`` and ``9092`` of the " +"container to the same port of" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:35 -msgid "document the motivation for impactful user-facing changes" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "the host machine, allowing other services to access the Driver API on" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:36 -msgid "reserve GitHub issues for tracking work in flight" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``http://localhost:9091`` and the Fleet API on ``http://localhost:9092``." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:37 +#: ../../source/docker/tutorial-quickstart-docker.rst:71 +#: ../../source/docker/tutorial-quickstart-docker.rst:108 +#: ../../source/docker/tutorial-quickstart-docker.rst:219 +#: ../../source/docker/tutorial-quickstart-docker.rst:309 msgid "" -"ensure community participants can successfully drive changes to " -"completion across one or more releases while stakeholders are adequately " -"represented throughout the process" -msgstr "" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:39 -msgid "Hence, an Enhancement Doc combines aspects of" +"``--network flwr-network``: Make the container join the network named " +"``flwr-network``." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:41 -msgid "a feature, and effort-tracking document" +#: ../../source/docker/tutorial-quickstart-docker.rst:72 +msgid "``--name superlink``: Assign the name ``superlink`` to the container." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:42 -msgid "a product requirements document" +#: ../../source/docker/tutorial-quickstart-docker.rst:73 +#: ../../source/docker/tutorial-quickstart-docker.rst:110 +#: ../../source/docker/tutorial-quickstart-docker.rst:220 +#: ../../source/docker/tutorial-quickstart-docker.rst:311 +msgid "" +"``--detach``: Run the container in the background, freeing up the " +"terminal." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:43 -msgid "a design document" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"tag of the image. The tag :substitution-code:`|stable_flwr_version|` " +"represents a :doc:`specific version ` of the image." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:45 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"into one file, which is created incrementally in collaboration with the " -"community." +"``--insecure``: This flag tells the container to operate in an insecure " +"mode, allowing" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:49 -msgid "" -"For far-fetching changes or features proposed to Flower, an abstraction " -"beyond a single GitHub issue or pull request is required to understand " -"and communicate upcoming changes to the project." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "unencrypted communication." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:51 -msgid "" -"The purpose of this process is to reduce the amount of \"tribal " -"knowledge\" in our community. By moving decisions from Slack threads, " -"video calls, and hallway conversations into a well-tracked artifact, this" -" process aims to enhance communication and discoverability." +#: ../../source/docker/tutorial-quickstart-docker.rst:80 +msgid "Step 3: Start the SuperNode" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:55 -msgid "" -"Roughly any larger, user-facing enhancement should follow the Enhancement" -" process. If an enhancement would be described in either written or " -"verbal communication to anyone besides the author or developer, then " -"consider creating an Enhancement Doc." +#: ../../source/docker/tutorial-quickstart-docker.rst:82 +msgid "Start two SuperNode containers." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:57 -msgid "" -"Similarly, any technical effort (refactoring, major architectural change)" -" that will impact a large section of the development community should " -"also be communicated widely. The Enhancement process is suited for this " -"even if it will have zero impact on the typical user or operator." +#: ../../source/docker/tutorial-quickstart-docker.rst:84 +msgid "Start the first container:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:61 -msgid "" -"For small changes and additions, going through the Enhancement process " -"would be time-consuming and unnecessary. This includes, for example, " -"adding new Federated Learning algorithms, as these only add features " -"without changing how Flower works or is used." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``-p 9094:9094``: Map port ``9094`` of the container to the same port of" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:63 -msgid "" -"Enhancements are different from feature requests, as they are already " -"providing a laid-out path for implementation and are championed by " -"members of the community." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "the host machine, allowing other services to access the SuperNode API on" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:67 -msgid "" -"An Enhancement is captured in a Markdown file that follows a defined " -"template and a workflow to review and store enhancement docs for " -"reference — the Enhancement Doc." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``http://localhost:9094``." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:69 -msgid "Enhancement Doc Template" +#: ../../source/docker/tutorial-quickstart-docker.rst:109 +msgid "``--name supernode-1``: Assign the name ``supernode-1`` to the container." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:71 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Each enhancement doc is provided as a Markdown file having the following " -"structure" +"``flwr/supernode:|stable_flwr_version|``: This is the name of the image " +"to be run and the specific tag" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:73 -msgid "Metadata (as [described below](#metadata) in form of a YAML preamble)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "of the image." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:74 -msgid "Title (same as in metadata)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--superlink superlink:9092``: Connect to the SuperLink's Fleet API at " +"the address" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:75 -msgid "Table of Contents (if needed)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``superlink:9092``." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:81 -msgid "Notes/Constraints/Caveats (optional)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--node-config \"partition-id=0 num-partitions=2\"``: Set the partition " +"ID to ``0`` and the" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:82 -msgid "Design Details (optional)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "number of partitions to ``2`` for the SuperNode configuration." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:83 -msgid "Graduation Criteria" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--supernode-address 0.0.0.0:9094``: Set the address and port number " +"that the SuperNode" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:84 -msgid "Upgrade/Downgrade Strategy (if applicable)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "is listening on." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:88 -msgid "As a reference, this document follows the above structure." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--isolation process``: Tells the SuperNode that the ClientApp is " +"created by separate" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:90 -#: ../../source/ref-api/flwr.common.Metadata.rst:2 -msgid "Metadata" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "independent process. The SuperNode does not attempt to create it." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:92 -msgid "" -"**fed-number** (Required) The `fed-number` of the last Flower Enhancement" -" Doc + 1. With this number, it becomes easy to reference other proposals." +#: ../../source/docker/tutorial-quickstart-docker.rst:124 +msgid "Start the second container:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:94 -msgid "**title** (Required) The title of the proposal in plain language." +#: ../../source/docker/tutorial-quickstart-docker.rst:142 +msgid "Step 4: Start the ClientApp" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:96 +#: ../../source/docker/tutorial-quickstart-docker.rst:144 msgid "" -"**status** (Required) The current status of the proposal. See " -"[workflow](#workflow) for the possible states." +"The ClientApp Docker image comes with a pre-installed version of Flower " +"and serves as a base for building your own ClientApp image. In order to " +"install the FAB dependencies, you will need to create a Dockerfile that " +"extends the ClientApp image and installs the required dependencies." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:98 +#: ../../source/docker/tutorial-quickstart-docker.rst:149 msgid "" -"**authors** (Required) A list of authors of the proposal. This is simply " -"the GitHub ID." +"Create a ClientApp Dockerfile called ``Dockerfile.clientapp`` and paste " +"the following code into it:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:100 -msgid "" -"**creation-date** (Required) The date that the proposal was first " -"submitted in a PR." +#: ../../source/docker/tutorial-quickstart-docker.rst:152 +msgid "Dockerfile.clientapp" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:102 -msgid "" -"**last-updated** (Optional) The date that the proposal was last changed " -"significantly." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "Understand the Dockerfile" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:104 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"**see-also** (Optional) A list of other proposals that are relevant to " -"this one." +":substitution-code:`FROM flwr/clientapp:|stable_flwr_version|`: This line" +" specifies that the Docker image" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:106 -msgid "**replaces** (Optional) A list of proposals that this one replaces." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"to be built from is the ``flwr/clientapp image``, version :substitution-" +"code:`|stable_flwr_version|`." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:108 -msgid "**superseded-by** (Optional) A list of proposals that this one supersedes." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``WORKDIR /app``: Set the working directory for the container to ``/app``." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:111 -msgid "Workflow" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"Any subsequent commands that reference a directory will be relative to " +"this directory." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:113 -msgid "" -"The idea forming the enhancement should already have been discussed or " -"pitched in the community. As such, it needs a champion, usually the " -"author, who shepherds the enhancement. This person also has to find " -"committers to Flower willing to review the proposal." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``COPY pyproject.toml .``: Copy the ``pyproject.toml`` file" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:115 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"New enhancements are checked in with a file name in the form of `NNNN-" -"YYYYMMDD-enhancement-title.md`, with `NNNN` being the Flower Enhancement " -"Doc number, to `enhancements`. All enhancements start in `provisional` " -"state as part of a pull request. Discussions are done as part of the pull" -" request review." +"from the current working directory into the container's ``/app`` " +"directory." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:117 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Once an enhancement has been reviewed and approved, its status is changed" -" to `implementable`. The actual implementation is then done in separate " -"pull requests. These pull requests should mention the respective " -"enhancement as part of their description. After the implementation is " -"done, the proposal status is changed to `implemented`." +"``RUN sed -i 's/.*flwr\\[simulation\\].*//' pyproject.toml``: Remove the " +"``flwr`` dependency" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:119 -msgid "" -"Under certain conditions, other states are possible. An Enhancement has " -"the following states:" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "from the ``pyproject.toml``." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:121 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"`provisional`: The enhancement has been proposed and is actively being " -"defined. This is the starting state while the proposal is being fleshed " -"out and actively defined and discussed." +"``python -m pip install -U --no-cache-dir .``: Run the ``pip`` install " +"command to" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:122 -msgid "`implementable`: The enhancement has been reviewed and approved." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "install the dependencies defined in the ``pyproject.toml`` file" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:123 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"`implemented`: The enhancement has been implemented and is no longer " -"actively changed." +"The ``-U`` flag indicates that any existing packages should be upgraded, " +"and" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:124 -msgid "`deferred`: The enhancement is proposed but not actively being worked on." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--no-cache-dir`` prevents pip from using the cache to speed up the " +"installation." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:125 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"`rejected`: The authors and reviewers have decided that this enhancement " -"is not moving forward." +"``ENTRYPOINT [\"flwr-clientapp\"]``: Set the command ``flwr-clientapp`` " +"to be" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:126 -msgid "`withdrawn`: The authors have withdrawn the enhancement." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "the default command run when the container is started." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:127 -msgid "`replaced`: The enhancement has been replaced by a new enhancement." +#: ../../source/docker/tutorial-quickstart-docker.rst:186 +msgid "" +"Note that `flwr `__ is already installed " +"in the ``flwr/clientapp`` base image, so only other package dependencies " +"such as ``flwr-datasets``, ``torch``, etc., need to be installed. As a " +"result, the ``flwr`` dependency is removed from the ``pyproject.toml`` " +"after it has been copied into the Docker image (see line 5)." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:131 +#: ../../source/docker/tutorial-quickstart-docker.rst:192 msgid "" -"Adding an additional process to the ones already provided by GitHub " -"(Issues and Pull Requests) adds more complexity and can be a barrier for " -"potential first-time contributors." +"Next, build the ClientApp Docker image by running the following command " +"in the directory where the Dockerfile is located:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:133 +#: ../../source/docker/tutorial-quickstart-docker.rst:201 msgid "" -"Expanding the proposal template beyond the single-sentence description " -"currently required in the features issue template may be a heavy burden " -"for non-native English speakers." +"The image name was set as ``flwr_clientapp`` with the tag ``0.0.1``. " +"Remember that these values are merely examples, and you can customize " +"them according to your requirements." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:137 -msgid "GitHub Issues" +#: ../../source/docker/tutorial-quickstart-docker.rst:205 +msgid "Start the first ClientApp container:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:139 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Using GitHub Issues for these kinds of enhancements is doable. One could " -"use, for example, tags, to differentiate and filter them from other " -"issues. The main issue is in discussing and reviewing an enhancement: " -"GitHub issues only have a single thread for comments. Enhancements " -"usually have multiple threads of discussion at the same time for various " -"parts of the doc. Managing these multiple discussions can be confusing " -"when using GitHub Issues." +"``flwr_clientapp:0.0.1``: This is the name of the image to be run and the" +" specific tag" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:141 -msgid "Google Docs" -msgstr "" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:143 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Google Docs allow for multiple threads of discussions. But as Google Docs" -" are hosted outside the project, their discoverability by the community " -"needs to be taken care of. A list of links to all proposals has to be " -"managed and made available for the community. Compared to shipping " -"proposals as part of Flower's repository, the potential for missing links" -" is much higher." +"``--supernode supernode-1:9094``: Connect to the SuperNode's Fleet API at" +" the address" msgstr "" -#: ../../source/fed/index.md:1 -msgid "FED - Flower Enhancement Doc" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``supernode-1:9094``." msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:2 -msgid "Aggregate evaluation results" +#: ../../source/docker/tutorial-quickstart-docker.rst:226 +msgid "Start the second ClientApp container:" msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:4 -msgid "" -"The Flower server does not prescribe a way to aggregate evaluation " -"results, but it enables the user to fully customize result aggregation." +#: ../../source/docker/tutorial-quickstart-docker.rst:237 +msgid "Step 5: Start the SuperExec" msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:8 -msgid "Aggregate Custom Evaluation Results" +#: ../../source/docker/tutorial-quickstart-docker.rst:239 +msgid "" +"The procedure for building and running a SuperExec image is almost " +"identical to the ClientApp image." msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:10 +#: ../../source/docker/tutorial-quickstart-docker.rst:242 msgid "" -"The same :code:`Strategy`-customization approach can be used to aggregate" -" custom evaluation results coming from individual clients. Clients can " -"return custom metrics to the server by returning a dictionary:" +"Similar to the ClientApp image, you will need to create a Dockerfile that" +" extends the SuperExec image and installs the required FAB dependencies." msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:36 +#: ../../source/docker/tutorial-quickstart-docker.rst:245 msgid "" -"The server can then use a customized strategy to aggregate the metrics " -"provided in these dictionaries:" +"Create a SuperExec Dockerfile called ``Dockerfile.superexec`` and paste " +"the following code in:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:2 -msgid "Authenticate SuperNodes" +#: ../../source/docker/tutorial-quickstart-docker.rst:248 +msgid "Dockerfile.superexec" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:4 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Flower has built-in support for authenticated SuperNodes that you can use" -" to verify the identities of each SuperNode connecting to a SuperLink. " -"Flower node authentication works similar to how GitHub SSH authentication" -" works:" +":substitution-code:`FROM flwr/superexec:|stable_flwr_version|`: This line" +" specifies that the Docker image" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:7 -msgid "SuperLink (server) stores a list of known (client) node public keys" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"to be built from is the ``flwr/superexec image``, version :substitution-" +"code:`|stable_flwr_version|`." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:8 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Using ECDH, both SuperNode and SuperLink independently derive a shared " -"secret" +"``ENTRYPOINT [\"flower-superexec\"``: Set the command ``flower-" +"superexec`` to be" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:9 -msgid "" -"Shared secret is used to compute the HMAC value of the message sent from " -"SuperNode to SuperLink as a token" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``\"--executor\", \"flwr.superexec.deployment:executor\"]`` Use the" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:10 -msgid "SuperLink verifies the token" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``flwr.superexec.deployment:executor`` executor to run the ServerApps." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:12 +#: ../../source/docker/tutorial-quickstart-docker.rst:283 msgid "" -"We recommend you to check out the complete `code example " -"`_ demonstrating federated learning with Flower in an " -"authenticated setting." +"Afterward, in the directory that holds the Dockerfile, execute this " +"Docker command to build the SuperExec image:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:15 -msgid "" -"This guide covers a preview feature that might change in future versions " -"of Flower." +#: ../../source/docker/tutorial-quickstart-docker.rst:290 +msgid "Start the SuperExec container:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:18 +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``-p 9093:9093``: Map port ``9093`` of the container to the same port of" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"For increased security, node authentication can only be used when " -"encrypted connections (SSL/TLS) are enabled." +"the host machine, allowing you to access the SuperExec API on " +"``http://localhost:9093``." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:21 -msgid "Enable node authentication in :code:`SuperLink`" +#: ../../source/docker/tutorial-quickstart-docker.rst:310 +msgid "``--name superexec``: Assign the name ``superexec`` to the container." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:23 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"To enable node authentication, first you need to configure SSL/TLS " -"connections to secure the SuperLink<>SuperNode communication. You can " -"find the complete guide `here `_. After configuring secure connections, you" -" can enable client authentication in a long-running Flower " -":code:`SuperLink`. Use the following terminal command to start a Flower " -":code:`SuperNode` that has both secure connections and node " -"authentication enabled:" +"``flwr_superexec:0.0.1``: This is the name of the image to be run and the" +" specific tag" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:36 -msgid "Let's break down the authentication flags:" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--executor-config superlink=\\\"superlink:9091\\\"``: Configure the " +"SuperExec executor to" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:38 -msgid "" -"The first flag :code:`--auth-list-public-keys` expects a path to a CSV " -"file storing all known node public keys. You need to store all known node" -" public keys that are allowed to participate in a federation in one CSV " -"file (:code:`.csv`)." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "connect to the SuperLink running on port ``9091``." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:40 -msgid "" -"A valid CSV file storing known node public keys should list the keys in " -"OpenSSH format, separated by commas and without any comments. For an " -"example, refer to our code sample, which contains a CSV file with two " -"known node public keys." +#: ../../source/docker/tutorial-quickstart-docker.rst:320 +msgid "Step 6: Run the Quickstart Project" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:42 -msgid "" -"The second and third flags :code:`--auth-superlink-private-key` and :code" -":`--auth-superlink-public-key` expect paths to the server's private and " -"public keys. For development purposes, you can generate a private and " -"public key pair using :code:`ssh-keygen -t ecdsa -b 384`." +#: ../../source/docker/tutorial-quickstart-docker.rst:322 +msgid "Add the following lines to the ``pyproject.toml``:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:45 -msgid "" -"In Flower 1.9, there is no support for dynamically removing, editing, or " -"adding known node public keys to the SuperLink. To change the set of " -"known nodes, you need to shut the server down, edit the CSV file, and " -"start the server again. Support for dynamically changing the set of known" -" nodes is on the roadmap to be released in Flower 1.10 (ETA: June)." +#: ../../source/docker/tutorial-quickstart-docker.rst:331 +msgid "Run the ``quickstart-docker`` project by executing the command:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:51 -msgid "Enable node authentication in :code:`SuperNode`" +#: ../../source/docker/tutorial-quickstart-docker.rst:337 +msgid "Follow the SuperExec logs to track the execution of the run:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:53 -msgid "" -"Similar to the long-running Flower server (:code:`SuperLink`), you can " -"easily enable node authentication in the long-running Flower client " -"(:code:`SuperNode`). Use the following terminal command to start an " -"authenticated :code:`SuperNode`:" +#: ../../source/docker/tutorial-quickstart-docker.rst:344 +msgid "Step 7: Update the Application" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:64 +#: ../../source/docker/tutorial-quickstart-docker.rst:346 msgid "" -"The :code:`--auth-supernode-private-key` flag expects a path to the " -"node's private key file and the :code:`--auth-supernode-public-key` flag " -"expects a path to the node's public key file. For development purposes, " -"you can generate a private and public key pair using :code:`ssh-keygen -t" -" ecdsa -b 384`." +"Change the application code. For example, change the ``seed`` in " +"``quickstart_docker/task.py`` to ``43`` and save it:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:68 -msgid "Security notice" +#: ../../source/docker/tutorial-quickstart-docker.rst:349 +msgid "quickstart_docker/task.py" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:70 -msgid "" -"The system's security relies on the credentials of the SuperLink and each" -" SuperNode. Therefore, it is imperative to safeguard and safely store the" -" credentials to avoid security risks such as Public Key Infrastructure " -"(PKI) impersonation attacks. The node authentication mechanism also " -"involves human interaction, so please ensure that all of the " -"communication is done in a secure manner, using trusted communication " -"methods." +#: ../../source/docker/tutorial-quickstart-docker.rst:356 +msgid "Stop the current ClientApp containers:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:75 -#: ../../source/how-to-enable-ssl-connections.rst:65 -#: ../../source/how-to-use-built-in-mods.rst:85 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:287 -msgid "Conclusion" +#: ../../source/docker/tutorial-quickstart-docker.rst:362 +#, fuzzy +msgid "Rebuild the FAB and ClientApp image:" +msgstr "Construindo a imagem base" + +#: ../../source/docker/tutorial-quickstart-docker.rst:368 +msgid "Launch two new ClientApp containers based on the newly built image:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:77 -msgid "" -"You should now have learned how to start a long-running Flower server " -"(:code:`SuperLink`) and client (:code:`SuperNode`) with node " -"authentication enabled. You should also know the significance of the " -"private key and store it safely to minimize security risks." +#: ../../source/docker/tutorial-quickstart-docker.rst:383 +msgid "Run the updated project:" msgstr "" -#: ../../source/how-to-configure-clients.rst:2 -msgid "Configure clients" +#: ../../source/docker/tutorial-quickstart-docker.rst:390 +msgid "Step 8: Clean Up" msgstr "" -#: ../../source/how-to-configure-clients.rst:4 -msgid "" -"Along with model parameters, Flower can send configuration values to " -"clients. Configuration values can be used for various purposes. They are," -" for example, a popular way to control client-side hyperparameters from " -"the server." +#: ../../source/docker/tutorial-quickstart-docker.rst:392 +msgid "Remove the containers and the bridge network:" msgstr "" -#: ../../source/how-to-configure-clients.rst:7 -msgid "Configuration values" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:408 +#: ../../source/docker/tutorial-quickstart-docker.rst:404 +msgid "Where to Go Next" msgstr "" -#: ../../source/how-to-configure-clients.rst:9 -msgid "" -"Configuration values are represented as a dictionary with ``str`` keys " -"and values of type ``bool``, ``bytes``, ``double`` (64-bit precision " -"float), ``int``, or ``str`` (or equivalent types in different languages)." -" Here is an example of a configuration dictionary in Python:" +#: ../../source/docker/tutorial-quickstart-docker.rst:406 +msgid ":doc:`enable-tls`" msgstr "" -#: ../../source/how-to-configure-clients.rst:20 -msgid "" -"Flower serializes these configuration dictionaries (or *config dict* for " -"short) to their ProtoBuf representation, transports them to the client " -"using gRPC, and then deserializes them back to Python dictionaries." +#: ../../source/docker/tutorial-quickstart-docker.rst:407 +msgid ":doc:`persist-superlink-state`" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst:408 +msgid ":doc:`tutorial-quickstart-docker-compose`" msgstr "" -#: ../../source/how-to-configure-clients.rst:24 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:2 +msgid "Quickstart with Docker Compose" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:4 msgid "" -"Currently, there is no support for directly sending collection types " -"(e.g., ``Set``, ``List``, ``Map``) as values in configuration " -"dictionaries. There are several workarounds to send collections as values" -" by converting them to one of the supported value types (and converting " -"them back on the client-side)." +"This quickstart shows you how to set up Flower using Docker Compose in a " +"single command, allowing you to focus on developing your application " +"without worrying about the underlying infrastructure." msgstr "" -#: ../../source/how-to-configure-clients.rst:26 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:8 msgid "" -"One can, for example, convert a list of floating-point numbers to a JSON " -"string, then send the JSON string using the configuration dictionary, and" -" then convert the JSON string back to a list of floating-point numbers on" -" the client." +"You will also learn how to easily enable TLS encryption and persist " +"application state locally, giving you the freedom to choose the " +"configuration that best suits your project's needs." msgstr "" -#: ../../source/how-to-configure-clients.rst:30 -msgid "Configuration through built-in strategies" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:24 +msgid "Clone the Docker Compose ``complete`` directory:" msgstr "" -#: ../../source/how-to-configure-clients.rst:32 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:38 msgid "" -"The easiest way to send configuration values to clients is to use a " -"built-in strategy like :code:`FedAvg`. Built-in strategies support so-" -"called configuration functions. A configuration function is a function " -"that the built-in strategy calls to get the configuration dictionary for " -"the current round. It then forwards the configuration dictionary to all " -"the clients selected during that round." +"Export the path of the newly created project. The path should be relative" +" to the location of the Docker Compose files:" msgstr "" -#: ../../source/how-to-configure-clients.rst:34 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:45 msgid "" -"Let's start with a simple example. Imagine we want to send (a) the batch " -"size that the client should use, (b) the current global round of " -"federated learning, and (c) the number of epochs to train on the client-" -"side. Our configuration function could look like this:" +"Setting the ``PROJECT_DIR`` helps Docker Compose locate the " +"``pyproject.toml`` file, allowing it to install dependencies in the " +"SuperExec and SuperNode images correctly." msgstr "" -#: ../../source/how-to-configure-clients.rst:47 -msgid "" -"To make the built-in strategies use this function, we can pass it to " -"``FedAvg`` during initialization using the parameter " -":code:`on_fit_config_fn`:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:49 +msgid "Step 2: Run Flower in Insecure Mode" msgstr "" -#: ../../source/how-to-configure-clients.rst:56 -msgid "One the client side, we receive the configuration dictionary in ``fit``:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:51 +msgid "" +"To begin, start Flower with the most basic configuration. In this setup, " +"Flower will run without TLS and without persisting the state." msgstr "" -#: ../../source/how-to-configure-clients.rst:67 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:56 msgid "" -"There is also an `on_evaluate_config_fn` to configure evaluation, which " -"works the same way. They are separate functions because one might want to" -" send different configuration values to `evaluate` (for example, to use a" -" different batch size)." +"Without TLS, the data sent between the services remains **unencrypted**. " +"Use it only for development purposes." msgstr "" -#: ../../source/how-to-configure-clients.rst:69 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:59 msgid "" -"The built-in strategies call this function every round (that is, every " -"time `Strategy.configure_fit` or `Strategy.configure_evaluate` runs). " -"Calling `on_evaluate_config_fn` every round allows us to vary/change the " -"config dict over consecutive rounds. If we wanted to implement a " -"hyperparameter schedule, for example, to increase the number of local " -"epochs during later rounds, we could do the following:" +"For production-oriented use cases, :ref:`enable TLS` for secure data" +" transmission." msgstr "" -#: ../../source/how-to-configure-clients.rst:82 -msgid "The :code:`FedAvg` strategy will call this function *every round*." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:70 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:184 +msgid "``docker compose``: The Docker command to run the Docker Compose tool." msgstr "" -#: ../../source/how-to-configure-clients.rst:85 -msgid "Configuring individual clients" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:71 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:185 +msgid "" +"``-f compose.yml``: Specify the YAML file that contains the basic Flower " +"service definitions." msgstr "" -#: ../../source/how-to-configure-clients.rst:87 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:72 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:190 msgid "" -"In some cases, it is necessary to send different configuration values to " -"different clients." +"``--build``: Rebuild the images for each service if they don't already " +"exist." msgstr "" -#: ../../source/how-to-configure-clients.rst:89 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:73 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:191 msgid "" -"This can be achieved by customizing an existing strategy or by " -":doc:`implementing a custom strategy from scratch `. Here's a nonsensical example that customizes :code:`FedAvg`" -" by adding a custom ``\"hello\": \"world\"`` configuration key/value pair" -" to the config dict of a *single client* (only the first client in the " -"list, the other clients in this round to not receive this \"special\" " -"config value):" +"``-d``: Detach the containers from the terminal and run them in the " +"background." msgstr "" -#: ../../source/how-to-configure-logging.rst:2 -msgid "Configure logging" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:76 +msgid "Step 3: Run the Quickstart Project" msgstr "" -#: ../../source/how-to-configure-logging.rst:4 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:78 msgid "" -"The Flower logger keeps track of all core events that take place in " -"federated learning workloads. It presents information by default " -"following a standard message format:" +"Now that the Flower services have been started via Docker Compose, it is " +"time to run the quickstart example." msgstr "" -#: ../../source/how-to-configure-logging.rst:13 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:81 msgid "" -"containing relevant information including: log message level (e.g. " -":code:`INFO`, :code:`DEBUG`), a timestamp, the line where the logging " -"took place from, as well as the log message itself. In this way, the " -"logger would typically display information on your terminal as follows:" +"To ensure the ``flwr`` CLI connects to the SuperExec, you need to specify" +" the SuperExec addresses in the ``pyproject.toml`` file." msgstr "" -#: ../../source/how-to-configure-logging.rst:34 -msgid "Saving log to file" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:84 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:232 +msgid "Add the following lines to the ``quickstart-compose/pyproject.toml``:" msgstr "" -#: ../../source/how-to-configure-logging.rst:36 -msgid "" -"By default, the Flower log is outputted to the terminal where you launch " -"your Federated Learning workload from. This applies for both gRPC-based " -"federation (i.e. when you do :code:`fl.server.start_server`) and when " -"using the :code:`VirtualClientEngine` (i.e. when you do " -":code:`fl.simulation.start_simulation`). In some situations you might " -"want to save this log to disk. You can do so by calling the " -"`fl.common.logger.configure() " -"`_" -" function. For example:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:86 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:234 +msgid "quickstart-compose/pyproject.toml" msgstr "" -#: ../../source/how-to-configure-logging.rst:53 -msgid "" -"With the above, Flower will record the log you see on your terminal to " -":code:`log.txt`. This file will be created in the same directory as were " -"you are running the code from. If we inspect we see the log above is also" -" recorded but prefixing with :code:`identifier` each line:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:93 +msgid "Execute the command to run the quickstart example:" msgstr "" -#: ../../source/how-to-configure-logging.rst:74 -msgid "Log your own messages" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:99 +msgid "Monitor the SuperExec logs and wait for the summary to appear:" msgstr "" -#: ../../source/how-to-configure-logging.rst:76 -msgid "" -"You might expand the information shown by default with the Flower logger " -"by adding more messages relevant to your application. You can achieve " -"this easily as follows." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:106 +msgid "Step 4: Update the Application" msgstr "" -#: ../../source/how-to-configure-logging.rst:102 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:108 +msgid "In the next step, change the application code." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:110 msgid "" -"In this way your logger will show, in addition to the default messages, " -"the ones introduced by the clients as specified above." +"For example, go to the ``task.py`` file in the ``quickstart-" +"compose/quickstart_compose/`` directory and add a ``print`` call in the " +"``get_weights`` function:" msgstr "" -#: ../../source/how-to-configure-logging.rst:128 -msgid "Log to a remote service" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:114 +msgid "quickstart-compose/quickstart_compose/task.py" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:125 +msgid "Rebuild and restart the services." msgstr "" -#: ../../source/how-to-configure-logging.rst:130 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:129 msgid "" -"The :code:`fl.common.logger.configure` function, also allows specifying a" -" host to which logs can be pushed (via :code:`POST`) through a native " -"Python :code:`logging.handler.HTTPHandler`. This is a particularly useful" -" feature in :code:`gRPC`-based Federated Learning workloads where " -"otherwise gathering logs from all entities (i.e. the server and the " -"clients) might be cumbersome. Note that in Flower simulation, the server " -"automatically displays all logs. You can still specify a " -":code:`HTTPHandler` should you wish to backup or analyze the logs " -"somewhere else." +"If you have modified the dependencies listed in your ``pyproject.toml`` " +"file, it is essential to rebuild images." msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:2 -msgid "Enable SSL connections" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:132 +msgid "If you haven't made any changes, you can skip this step." msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:4 -msgid "" -"This guide describes how to a SSL-enabled secure Flower server " -"(:code:`SuperLink`) can be started and how a Flower client " -"(:code:`SuperNode`) can establish a secure connections to it." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:134 +msgid "Run the following command to rebuild and restart the services:" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:7 -msgid "" -"A complete code example demonstrating a secure connection can be found " -"`here `_." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:140 +msgid "Run the updated quickstart example:" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:10 -msgid "" -"The code example comes with a :code:`README.md` file which explains how " -"to start it. Although it is already SSL-enabled, it might be less " -"descriptive on how it does so. Stick to this guide for a deeper " -"introduction to the topic." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:147 +msgid "In the SuperExec logs, you should find the ``Get weights`` line:" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:16 -msgid "Certificates" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:164 +msgid "Step 5: Persisting the SuperLink State" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:18 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:166 msgid "" -"Using SSL-enabled connections requires certificates to be passed to the " -"server and client. For the purpose of this guide we are going to generate" -" self-signed certificates. As this can become quite complex we are going " -"to ask you to run the script in :code:`examples/advanced-" -"tensorflow/certificates/generate.sh` with the following command sequence:" +"In this step, Flower services are configured to persist the state of the " +"SuperLink service, ensuring that it maintains its state even after a " +"restart." msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:29 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:171 msgid "" -"This will generate the certificates in :code:`examples/advanced-" -"tensorflow/.cache/certificates`." +"When working with Docker Compose on Linux, you may need to create the " +"``state`` directory first and change its ownership to ensure proper " +"access and permissions." msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:31 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:174 msgid "" -"The approach for generating SSL certificates in the context of this " -"example can serve as an inspiration and starting point, but it should not" -" be used as a reference for production environments. Please refer to " -"other sources regarding the issue of correctly generating certificates " -"for production environments. For non-critical prototyping or research " -"projects, it might be sufficient to use the self-signed certificates " -"generated using the scripts mentioned in this guide." +"For more information, consult the following page: :doc:`persist-" +"superlink-state`." msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:39 -msgid "Server (SuperLink)" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:176 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:226 +msgid "Run the command:" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:41 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst msgid "" -"Use the following terminal command to start a sever (SuperLink) that uses" -" the previously generated certificates:" +"``-f with-state.yml``: Specifies the path to an additional Docker Compose" +" file that" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:47 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst +msgid "contains the configuration for persisting the SuperLink state." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst msgid "" -"When providing certificates, the server expects a tuple of three " -"certificates paths: CA certificate, server certificate and server private" -" key." +"Docker merges Compose files according to `merging rules " +"`_." msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:51 -msgid "Client (SuperNode)" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:193 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:247 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:375 +msgid "Rerun the ``quickstart-compose`` project:" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:53 -msgid "" -"Use the following terminal command to start a client (SuperNode) that " -"uses the previously generated certificates:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:199 +msgid "Check the content of the ``state`` directory:" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:61 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:206 msgid "" -"When setting :code:`root_certificates`, the client expects a file path to" -" PEM-encoded root certificates." +"You should see a ``state.db`` file in the ``state`` directory. If you " +"restart the service, the state file will be used to restore the state " +"from the previously saved data. This ensures that the data persists even " +"if the containers are stopped and started again." msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:67 -msgid "" -"You should now have learned how to generate self-signed certificates " -"using the given script, start an SSL-enabled server and have a client " -"establish a secure connection to it." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:214 +msgid "Step 6: Run Flower with TLS" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:72 -msgid "Additional resources" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:216 +msgid "" +"To demonstrate how to enable TLS, generate self-signed certificates using" +" the ``certs.yml`` Compose file." msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:74 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:223 msgid "" -"These additional sources might be relevant if you would like to dive " -"deeper into the topic of certificates:" +"For production environments, use a service like `Let's Encrypt " +"`_ to obtain your certificates." msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:76 -msgid "`Let's Encrypt `_" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:241 +msgid "Restart the services with TLS enabled:" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:77 -msgid "`certbot `_" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:255 +msgid "Step 7: Add another SuperNode" msgstr "" -#: ../../source/how-to-implement-strategies.rst:2 -msgid "Implement strategies" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:257 +msgid "" +"You can add more SuperNodes and ClientApps by duplicating their " +"definitions in the ``compose.yml`` file." msgstr "" -#: ../../source/how-to-implement-strategies.rst:4 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:260 msgid "" -"The strategy abstraction enables implementation of fully custom " -"strategies. A strategy is basically the federated learning algorithm that" -" runs on the server. Strategies decide how to sample clients, how to " -"configure clients for training, how to aggregate updates, and how to " -"evaluate models. Flower provides a few built-in strategies which are " -"based on the same API described below." +"Just give each new SuperNode and ClientApp service a unique service name " +"like ``supernode-3``, ``clientapp-3``, etc." msgstr "" -#: ../../source/how-to-implement-strategies.rst:11 -msgid "The :code:`Strategy` abstraction" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:263 +msgid "In ``compose.yml``, add the following:" msgstr "" -#: ../../source/how-to-implement-strategies.rst:13 -msgid "" -"All strategy implementation are derived from the abstract base class " -":code:`flwr.server.strategy.Strategy`, both built-in implementations and " -"third party implementations. This means that custom strategy " -"implementations have the exact same capabilities at their disposal as " -"built-in ones." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:265 +msgid "compose.yml" msgstr "" -#: ../../source/how-to-implement-strategies.rst:18 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:316 msgid "" -"The strategy abstraction defines a few abstract methods that need to be " -"implemented:" +"If you also want to enable TLS for the new SuperNodes, duplicate the " +"SuperNode definition for each new SuperNode service in the ``with-" +"tls.yml`` file." msgstr "" -#: ../../source/how-to-implement-strategies.rst:75 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:319 msgid "" -"Creating a new strategy means implementing a new :code:`class` (derived " -"from the abstract base class :code:`Strategy`) that implements for the " -"previously shown abstract methods:" +"Make sure that the names of the services match with the one in the " +"``compose.yml`` file." msgstr "" -#: ../../source/how-to-implement-strategies.rst:100 -msgid "The Flower server calls these methods in the following order:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:321 +msgid "In ``with-tls.yml``, add the following:" msgstr "" -#: ../../source/how-to-implement-strategies.rst:177 -msgid "The following sections describe each of those methods in more detail." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:323 +msgid "with-tls.yml" msgstr "" -#: ../../source/how-to-implement-strategies.rst:180 -msgid "The :code:`initialize_parameters` method" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:345 +msgid "Step 8: Persisting the SuperLink State and Enabling TLS" msgstr "" -#: ../../source/how-to-implement-strategies.rst:182 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:347 msgid "" -":code:`initialize_parameters` is called only once, at the very beginning " -"of an execution. It is responsible for providing the initial global model" -" parameters in a serialized form (i.e., as a :code:`Parameters` object)." +"To run Flower with persisted SuperLink state and enabled TLS, a slight " +"change in the ``with-state.yml`` file is required:" msgstr "" -#: ../../source/how-to-implement-strategies.rst:184 -msgid "" -"Built-in strategies return user-provided initial parameters. The " -"following example shows how initial parameters can be passed to " -":code:`FedAvg`:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:350 +msgid "Comment out the lines 2-4 and uncomment the lines 5-9:" msgstr "" -#: ../../source/how-to-implement-strategies.rst:209 -msgid "" -"The Flower server will call :code:`initialize_parameters`, which either " -"returns the parameters that were passed to :code:`initial_parameters`, or" -" :code:`None`. If no parameters are returned from " -":code:`initialize_parameters` (i.e., :code:`None`), the server will " -"randomly select one client and ask it to provide its parameters. This is " -"a convenience feature and not recommended in practice, but it can be " -"useful for prototyping. In practice, it is recommended to always use " -"server-side parameter initialization." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:352 +msgid "with-state.yml" msgstr "" -#: ../../source/how-to-implement-strategies.rst:213 -msgid "" -"Server-side parameter initialization is a powerful mechanism. It can be " -"used, for example, to resume training from a previously saved checkpoint." -" It is also the fundamental capability needed to implement hybrid " -"approaches, for example, to fine-tune a pre-trained model using federated" -" learning." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:369 +msgid "Restart the services:" msgstr "" -#: ../../source/how-to-implement-strategies.rst:216 -msgid "The :code:`configure_fit` method" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:383 +msgid "Step 9: Merge Multiple Compose Files" msgstr "" -#: ../../source/how-to-implement-strategies.rst:218 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:385 msgid "" -":code:`configure_fit` is responsible for configuring the upcoming round " -"of training. What does *configure* mean in this context? Configuring a " -"round means selecting clients and deciding what instructions to send to " -"these clients. The signature of :code:`configure_fit` makes this clear:" +"You can merge multiple Compose files into a single file. For instance, if" +" you wish to combine the basic configuration with the TLS configuration, " +"execute the following command:" msgstr "" -#: ../../source/how-to-implement-strategies.rst:231 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:394 msgid "" -"The return value is a list of tuples, each representing the instructions " -"that will be sent to a particular client. Strategy implementations " -"usually perform the following steps in :code:`configure_fit`:" +"This will merge the contents of ``compose.yml`` and ``with-tls.yml`` into" +" a new file called ``my_compose.yml``." msgstr "" -#: ../../source/how-to-implement-strategies.rst:233 -#: ../../source/how-to-implement-strategies.rst:280 -msgid "" -"Use the :code:`client_manager` to randomly sample all (or a subset of) " -"available clients (each represented as a :code:`ClientProxy` object)" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:398 +msgid "Step 10: Clean Up" msgstr "" -#: ../../source/how-to-implement-strategies.rst:234 -msgid "" -"Pair each :code:`ClientProxy` with the same :code:`FitIns` holding the " -"current global model :code:`parameters` and :code:`config` dict" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:400 +msgid "Remove all services and volumes:" msgstr "" -#: ../../source/how-to-implement-strategies.rst:236 -msgid "" -"More sophisticated implementations can use :code:`configure_fit` to " -"implement custom client selection logic. A client will only participate " -"in a round if the corresponding :code:`ClientProxy` is included in the " -"list returned from :code:`configure_fit`." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:410 +msgid ":doc:`run-quickstart-examples-docker-compose`" msgstr "" -#: ../../source/how-to-implement-strategies.rst:240 -msgid "" -"The structure of this return value provides a lot of flexibility to the " -"user. Since instructions are defined on a per-client basis, different " -"instructions can be sent to each client. This enables custom strategies " -"to train, for example, different models on different clients, or use " -"different hyperparameters on different clients (via the :code:`config` " -"dict)." +#: ../../source/docker/use-a-different-version.rst:2 +msgid "Use a Different Flower Version" msgstr "" -#: ../../source/how-to-implement-strategies.rst:243 -msgid "The :code:`aggregate_fit` method" +#: ../../source/docker/use-a-different-version.rst:4 +msgid "" +"If you want to use a different version of Flower, for example Flower " +"nightly, you can do so by changing the tag. All available versions are on" +" `Docker Hub `__." msgstr "" -#: ../../source/how-to-implement-strategies.rst:245 +#: ../../source/docker/use-a-different-version.rst:10 msgid "" -":code:`aggregate_fit` is responsible for aggregating the results returned" -" by the clients that were selected and asked to train in " -":code:`configure_fit`." +"When using Flower nightly, the SuperLink nightly image must be paired " +"with the corresponding SuperNode and ServerApp nightly images released on" +" the same day. To ensure the versions are in sync, using the concrete " +"tag, e.g., ``1.10.0.dev20240610`` instead of ``nightly`` is recommended." msgstr "" -#: ../../source/how-to-implement-strategies.rst:258 -msgid "" -"Of course, failures can happen, so there is no guarantee that the server " -"will get results from all the clients it sent instructions to (via " -":code:`configure_fit`). :code:`aggregate_fit` therefore receives a list " -"of :code:`results`, but also a list of :code:`failures`." +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:2 +msgid "Example: FedBN in PyTorch - From Centralized To Federated" msgstr "" -#: ../../source/how-to-implement-strategies.rst:260 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:4 msgid "" -":code:`aggregate_fit` returns an optional :code:`Parameters` object and a" -" dictionary of aggregated metrics. The :code:`Parameters` return value is" -" optional because :code:`aggregate_fit` might decide that the results " -"provided are not sufficient for aggregation (e.g., too many failures)." +"This tutorial will show you how to use Flower to build a federated " +"version of an existing machine learning workload with `FedBN " +"`_, a federated training strategy " +"designed for non-iid data. We are using PyTorch to train a Convolutional " +"Neural Network(with Batch Normalization layers) on the CIFAR-10 dataset. " +"When applying FedBN, only few changes needed compared to :doc:`Example: " +"PyTorch - From Centralized To Federated `." msgstr "" -#: ../../source/how-to-implement-strategies.rst:263 -msgid "The :code:`configure_evaluate` method" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:12 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:12 +msgid "Centralized Training" msgstr "" -#: ../../source/how-to-implement-strategies.rst:265 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:14 msgid "" -":code:`configure_evaluate` is responsible for configuring the upcoming " -"round of evaluation. What does *configure* mean in this context? " -"Configuring a round means selecting clients and deciding what " -"instructions to send to these clients. The signature of " -":code:`configure_evaluate` makes this clear:" +"All files are revised based on :doc:`Example: PyTorch - From Centralized " +"To Federated `. The only " +"thing to do is modifying the file called ``cifar.py``, revised part is " +"shown below:" msgstr "" -#: ../../source/how-to-implement-strategies.rst:278 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:18 msgid "" -"The return value is a list of tuples, each representing the instructions " -"that will be sent to a particular client. Strategy implementations " -"usually perform the following steps in :code:`configure_evaluate`:" +"The model architecture defined in class Net() is added with Batch " +"Normalization layers accordingly." msgstr "" -#: ../../source/how-to-implement-strategies.rst:281 -msgid "" -"Pair each :code:`ClientProxy` with the same :code:`EvaluateIns` holding " -"the current global model :code:`parameters` and :code:`config` dict" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:47 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:171 +msgid "You can now run your machine learning workload:" msgstr "" -#: ../../source/how-to-implement-strategies.rst:283 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:53 msgid "" -"More sophisticated implementations can use :code:`configure_evaluate` to " -"implement custom client selection logic. A client will only participate " -"in a round if the corresponding :code:`ClientProxy` is included in the " -"list returned from :code:`configure_evaluate`." -msgstr "" - -#: ../../source/how-to-implement-strategies.rst:287 -msgid "" -"The structure of this return value provides a lot of flexibility to the " -"user. Since instructions are defined on a per-client basis, different " -"instructions can be sent to each client. This enables custom strategies " -"to evaluate, for example, different models on different clients, or use " -"different hyperparameters on different clients (via the :code:`config` " -"dict)." +"So far this should all look fairly familiar if you've used PyTorch " +"before. Let's take the next step and use what we've built to create a " +"federated learning system within FedBN, the system consists of one server" +" and two clients." msgstr "" -#: ../../source/how-to-implement-strategies.rst:291 -msgid "The :code:`aggregate_evaluate` method" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:58 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:182 +msgid "Federated Training" msgstr "" -#: ../../source/how-to-implement-strategies.rst:293 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:60 msgid "" -":code:`aggregate_evaluate` is responsible for aggregating the results " -"returned by the clients that were selected and asked to evaluate in " -":code:`configure_evaluate`." +"If you have read :doc:`Example: PyTorch - From Centralized To Federated " +"`, the following parts are" +" easy to follow, only ``get_parameters`` and ``set_parameters`` function " +"in ``client.py`` needed to revise. If not, please read the :doc:`Example:" +" PyTorch - From Centralized To Federated `. first." msgstr "" -#: ../../source/how-to-implement-strategies.rst:306 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:66 msgid "" -"Of course, failures can happen, so there is no guarantee that the server " -"will get results from all the clients it sent instructions to (via " -":code:`configure_evaluate`). :code:`aggregate_evaluate` therefore " -"receives a list of :code:`results`, but also a list of :code:`failures`." +"Our example consists of one *server* and two *clients*. In FedBN, " +"``server.py`` keeps unchanged, we can start the server directly." msgstr "" -#: ../../source/how-to-implement-strategies.rst:308 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:73 msgid "" -":code:`aggregate_evaluate` returns an optional :code:`float` (loss) and a" -" dictionary of aggregated metrics. The :code:`float` return value is " -"optional because :code:`aggregate_evaluate` might decide that the results" -" provided are not sufficient for aggregation (e.g., too many failures)." -msgstr "" - -#: ../../source/how-to-implement-strategies.rst:311 -msgid "The :code:`evaluate` method" +"Finally, we will revise our *client* logic by changing ``get_parameters``" +" and ``set_parameters`` in ``client.py``, we will exclude batch " +"normalization parameters from model parameter list when sending to or " +"receiving from the server." msgstr "" -#: ../../source/how-to-implement-strategies.rst:313 -msgid "" -":code:`evaluate` is responsible for evaluating model parameters on the " -"server-side. Having :code:`evaluate` in addition to " -":code:`configure_evaluate`/:code:`aggregate_evaluate` enables strategies " -"to perform both servers-side and client-side (federated) evaluation." +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:102 +msgid "Now, you can now open two additional terminal windows and run" msgstr "" -#: ../../source/how-to-implement-strategies.rst:323 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:108 msgid "" -"The return value is again optional because the strategy might not need to" -" implement server-side evaluation or because the user-defined " -":code:`evaluate` method might not complete successfully (e.g., it might " -"fail to load the server-side evaluation data)." +"in each window (make sure that the server is still running before you do " +"so) and see your (previously centralized) PyTorch project run federated " +"learning with FedBN strategy across two clients. Congratulations!" msgstr "" -#: ../../source/how-to-install-flower.rst:2 -msgid "Install Flower" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:113 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:349 +#: ../../source/tutorial-quickstart-jax.rst:319 +msgid "Next Steps" msgstr "" -#: ../../source/how-to-install-flower.rst:6 -msgid "Python version" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:115 +msgid "" +"The full source code for this example can be found `here " +"`_. Our example is of course somewhat over-" +"simplified because both clients load the exact same dataset, which isn't " +"realistic. You're now prepared to explore this topic further. How about " +"using different subsets of CIFAR-10 on each client? How about adding more" +" clients?" msgstr "" -#: ../../source/how-to-install-flower.rst:12 -msgid "Install stable release" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:2 +msgid "Example: PyTorch - From Centralized To Federated" msgstr "" -#: ../../source/how-to-install-flower.rst:15 -#: ../../source/how-to-upgrade-to-flower-next.rst:46 -msgid "Using pip" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:4 +msgid "" +"This tutorial will show you how to use Flower to build a federated " +"version of an existing machine learning workload. We are using PyTorch to" +" train a Convolutional Neural Network on the CIFAR-10 dataset. First, we " +"introduce this machine learning task with a centralized training approach" +" based on the `Deep Learning with PyTorch " +"`_ " +"tutorial. Then, we build upon the centralized training code to run the " +"training in a federated fashion." msgstr "" -#: ../../source/how-to-install-flower.rst:17 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:14 msgid "" -"Stable releases are available on `PyPI " -"`_::" +"We begin with a brief description of the centralized CNN training code. " +"If you want a more in-depth explanation of what's going on then have a " +"look at the official `PyTorch tutorial " +"`_." msgstr "" -#: ../../source/how-to-install-flower.rst:21 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:18 msgid "" -"For simulations that use the Virtual Client Engine, ``flwr`` should be " -"installed with the ``simulation`` extra::" +"Let's create a new file called ``cifar.py`` with all the components " +"required for a traditional (centralized) training on CIFAR-10. First, all" +" required packages (such as ``torch`` and ``torchvision``) need to be " +"imported. You can see that we do not import any package for federated " +"learning. You can keep all these imports as they are even when we add the" +" federated learning components at a later point." msgstr "" -#: ../../source/how-to-install-flower.rst:27 -msgid "Using conda (or mamba)" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:36 +msgid "" +"As already mentioned we will use the CIFAR-10 dataset for this machine " +"learning workload. The model architecture (a very simple Convolutional " +"Neural Network) is defined in ``class Net()``." msgstr "" -#: ../../source/how-to-install-flower.rst:29 -msgid "Flower can also be installed from the ``conda-forge`` channel." +#: ../../source/example-pytorch-from-centralized-to-federated.rst:62 +msgid "" +"The ``load_data()`` function loads the CIFAR-10 training and test sets. " +"The ``transform`` normalized the data after loading." msgstr "" -#: ../../source/how-to-install-flower.rst:31 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:84 msgid "" -"If you have not added ``conda-forge`` to your channels, you will first " -"need to run the following::" +"We now need to define the training (function ``train()``) which loops " +"over the training set, measures the loss, backpropagates it, and then " +"takes one optimizer step for each batch of training examples." msgstr "" -#: ../../source/how-to-install-flower.rst:36 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:88 msgid "" -"Once the ``conda-forge`` channel has been enabled, ``flwr`` can be " -"installed with ``conda``::" +"The evaluation of the model is defined in the function ``test()``. The " +"function loops over all test samples and measures the loss of the model " +"based on the test dataset." msgstr "" -#: ../../source/how-to-install-flower.rst:40 -msgid "or with ``mamba``::" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:149 +msgid "" +"Having defined the data loading, model architecture, training, and " +"evaluation we can put everything together and train our CNN on CIFAR-10." msgstr "" -#: ../../source/how-to-install-flower.rst:46 -msgid "Verify installation" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:177 +msgid "" +"So far, this should all look fairly familiar if you've used PyTorch " +"before. Let's take the next step and use what we've built to create a " +"simple federated learning system consisting of one server and two " +"clients." msgstr "" -#: ../../source/how-to-install-flower.rst:48 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:184 msgid "" -"The following command can be used to verify if Flower was successfully " -"installed. If everything worked, it should print the version of Flower to" -" the command line::" +"The simple machine learning project discussed in the previous section " +"trains the model on a single dataset (CIFAR-10), we call this centralized" +" learning. This concept of centralized learning, as shown in the previous" +" section, is probably known to most of you, and many of you have used it " +"previously. Normally, if you'd want to run machine learning workloads in " +"a federated fashion, then you'd have to change most of your code and set " +"everything up from scratch. This can be a considerable effort." msgstr "" -#: ../../source/how-to-install-flower.rst:55 -msgid "Advanced installation options" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:191 +msgid "" +"However, with Flower you can evolve your pre-existing code into a " +"federated learning setup without the need for a major rewrite." msgstr "" -#: ../../source/how-to-install-flower.rst:58 -msgid "Install via Docker" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:194 +msgid "" +"The concept is easy to understand. We have to start a *server* and then " +"use the code in ``cifar.py`` for the *clients* that are connected to the " +"*server*. The *server* sends model parameters to the clients. The " +"*clients* run the training and update the parameters. The updated " +"parameters are sent back to the *server* which averages all received " +"parameter updates. This describes one round of the federated learning " +"process and we repeat this for multiple rounds." msgstr "" -#: ../../source/how-to-install-flower.rst:60 -msgid ":doc:`How to run Flower using Docker `" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:201 +#: ../../source/tutorial-quickstart-jax.rst:147 +msgid "" +"Our example consists of one *server* and two *clients*. Let's set up " +"``server.py`` first. The *server* needs to import the Flower package " +"``flwr``. Next, we use the ``start_server`` function to start a server " +"and tell it to perform three rounds of federated learning." msgstr "" -#: ../../source/how-to-install-flower.rst:63 -msgid "Install pre-release" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:215 +#: ../../source/tutorial-quickstart-jax.rst:161 +msgid "We can already start the *server*:" msgstr "" -#: ../../source/how-to-install-flower.rst:65 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:221 msgid "" -"New (possibly unstable) versions of Flower are sometimes available as " -"pre-release versions (alpha, beta, release candidate) before the stable " -"release happens::" +"Finally, we will define our *client* logic in ``client.py`` and build " +"upon the previously defined centralized training in ``cifar.py``. Our " +"*client* needs to import ``flwr``, but also ``torch`` to update the " +"parameters on our PyTorch model:" msgstr "" -#: ../../source/how-to-install-flower.rst:69 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:238 msgid "" -"For simulations that use the Virtual Client Engine, ``flwr`` pre-releases" -" should be installed with the ``simulation`` extra::" +"Implementing a Flower *client* basically means implementing a subclass of" +" either ``flwr.client.Client`` or ``flwr.client.NumPyClient``. Our " +"implementation will be based on ``flwr.client.NumPyClient`` and we'll " +"call it ``CifarClient``. ``NumPyClient`` is slightly easier to implement " +"than ``Client`` if you use a framework with good NumPy interoperability " +"(like PyTorch or TensorFlow/Keras) because it avoids some of the " +"boilerplate that would otherwise be necessary. ``CifarClient`` needs to " +"implement four methods, two methods for getting/setting model parameters," +" one method for training the model, and one method for testing the model:" msgstr "" -#: ../../source/how-to-install-flower.rst:74 -msgid "Install nightly release" -msgstr "" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:249 +#, fuzzy +msgid "``set_parameters``" +msgstr "``SETUPTOOLS_VERSION``" -#: ../../source/how-to-install-flower.rst:76 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:248 +#: ../../source/tutorial-quickstart-jax.rst:192 msgid "" -"The latest (potentially unstable) changes in Flower are available as " -"nightly releases::" +"set the model parameters on the local model that are received from the " +"server" msgstr "" -#: ../../source/how-to-install-flower.rst:80 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:249 +#: ../../source/tutorial-quickstart-jax.rst:194 msgid "" -"For simulations that use the Virtual Client Engine, ``flwr-nightly`` " -"should be installed with the ``simulation`` extra::" +"loop over the list of model parameters received as NumPy ``ndarray``'s " +"(think list of neural network layers)" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:2 -msgid "Monitor simulation" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:252 +#: ../../source/tutorial-quickstart-jax.rst:197 +#: ../../source/tutorial-quickstart-scikitlearn.rst:129 +msgid "``get_parameters``" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:4 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:252 +#: ../../source/tutorial-quickstart-jax.rst:197 msgid "" -"Flower allows you to monitor system resources while running your " -"simulation. Moreover, the Flower simulation engine is powerful and " -"enables you to decide how to allocate resources per client manner and " -"constrain the total usage. Insights from resource consumption can help " -"you make smarter decisions and speed up the execution time." +"get the model parameters and return them as a list of NumPy ``ndarray``'s" +" (which is what ``flwr.client.NumPyClient`` expects)" +msgstr "" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:257 +#: ../../source/tutorial-quickstart-jax.rst:202 +#: ../../source/tutorial-quickstart-scikitlearn.rst:136 +msgid "``fit``" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:6 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:255 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:260 +#: ../../source/tutorial-quickstart-jax.rst:200 +#: ../../source/tutorial-quickstart-jax.rst:205 msgid "" -"The specific instructions assume you are using macOS and have the " -"`Homebrew `_ package manager installed." +"update the parameters of the local model with the parameters received " +"from the server" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:10 -msgid "Downloads" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:257 +#: ../../source/tutorial-quickstart-jax.rst:202 +msgid "train the model on the local training set" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:16 -msgid "" -"`Prometheus `_ is used for data collection, while" -" `Grafana `_ will enable you to visualize the " -"collected data. They are both well integrated with `Ray " -"`_ which Flower uses under the hood." +#: ../../source/example-pytorch-from-centralized-to-federated.rst:258 +msgid "get the updated local model weights and return them to the server" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:18 -msgid "" -"Overwrite the configuration files (depending on your device, it might be " -"installed on a different path)." +#: ../../source/example-pytorch-from-centralized-to-federated.rst:263 +#: ../../source/tutorial-quickstart-jax.rst:208 +#: ../../source/tutorial-quickstart-scikitlearn.rst:139 +msgid "``evaluate``" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:20 -msgid "If you are on an M1 Mac, it should be:" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:262 +#: ../../source/tutorial-quickstart-jax.rst:207 +msgid "evaluate the updated model on the local test set" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:27 -msgid "On the previous generation Intel Mac devices, it should be:" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:263 +msgid "return the local loss and accuracy to the server" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:34 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:265 msgid "" -"Open the respective configuration files and change them. Depending on " -"your device, use one of the two following commands:" +"The two ``NumPyClient`` methods ``fit`` and ``evaluate`` make use of the " +"functions ``train()`` and ``test()`` previously defined in ``cifar.py``. " +"So what we really do here is we tell Flower through our ``NumPyClient`` " +"subclass which of our already defined functions to call for training and " +"evaluation. We included type annotations to give you a better " +"understanding of the data types that get passed around." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:44 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:315 msgid "" -"and then delete all the text in the file and paste a new Prometheus " -"config you see below. You may adjust the time intervals to your " -"requirements:" +"All that's left to do it to define a function that loads both model and " +"data, creates a ``CifarClient``, and starts this client. You load your " +"data and model by using ``cifar.py``. Start ``CifarClient`` with the " +"function ``fl.client.start_client()`` by pointing it at the same IP " +"address we used in ``server.py``:" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:59 -msgid "" -"Now after you have edited the Prometheus configuration, do the same with " -"the Grafana configuration files. Open those using one of the following " -"commands as before:" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:338 +#: ../../source/tutorial-quickstart-jax.rst:309 +msgid "And that's it. You can now open two additional terminal windows and run" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:69 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:344 msgid "" -"Your terminal editor should open and allow you to apply the following " -"configuration as before." +"in each window (make sure that the server is running before you do so) " +"and see your (previously centralized) PyTorch project run federated " +"learning across two clients. Congratulations!" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:84 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:351 msgid "" -"Congratulations, you just downloaded all the necessary software needed " -"for metrics tracking. Now, let’s start it." +"The full source code for this example: `PyTorch: From Centralized To " +"Federated (Code) `_. Our example is, of course, " +"somewhat over-simplified because both clients load the exact same " +"dataset, which isn't realistic. You're now prepared to explore this topic" +" further. How about using different subsets of CIFAR-10 on each client? " +"How about adding more clients?" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:88 -msgid "Tracking metrics" +#: ../../source/explanation-differential-privacy.rst:2 +#: ../../source/explanation-differential-privacy.rst:14 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:303 +msgid "Differential Privacy" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:90 +#: ../../source/explanation-differential-privacy.rst:4 msgid "" -"Before running your Flower simulation, you have to start the monitoring " -"tools you have just installed and configured." +"The information in datasets like healthcare, financial transactions, user" +" preferences, etc., is valuable and has the potential for scientific " +"breakthroughs and provides important business insights. However, such " +"data is also sensitive and there is a risk of compromising individual " +"privacy." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:97 +#: ../../source/explanation-differential-privacy.rst:9 msgid "" -"Please include the following argument in your Python code when starting a" -" simulation." -msgstr "" - -#: ../../source/how-to-monitor-simulation.rst:108 -msgid "Now, you are ready to start your workload." +"Traditional methods like anonymization alone would not work because of " +"attacks like Re-identification and Data Linkage. That's where " +"differential privacy comes in. It provides the possibility of analyzing " +"data while ensuring the privacy of individuals." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:110 +#: ../../source/explanation-differential-privacy.rst:16 msgid "" -"Shortly after the simulation starts, you should see the following logs in" -" your terminal:" +"Imagine two datasets that are identical except for a single record (for " +"instance, Alice's data). Differential Privacy (DP) guarantees that any " +"analysis (M), like calculating the average income, will produce nearly " +"identical results for both datasets (O and O' would be similar). This " +"preserves group patterns while obscuring individual details, ensuring the" +" individual's information remains hidden in the crowd." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:117 -msgid "You can look at everything at ``_ ." +#: ../../source/explanation-differential-privacy.rst:-1 +msgid "DP Intro" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:119 +#: ../../source/explanation-differential-privacy.rst:27 msgid "" -"It's a Ray Dashboard. You can navigate to Metrics (on the left panel, the" -" lowest option)." +"One of the most commonly used mechanisms to achieve DP is adding enough " +"noise to the output of the analysis to mask the contribution of each " +"individual in the data while preserving the overall accuracy of the " +"analysis." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:121 -msgid "" -"Or alternatively, you can just see them in Grafana by clicking on the " -"right-up corner, “View in Grafana”. Please note that the Ray dashboard is" -" only accessible during the simulation. After the simulation ends, you " -"can only use Grafana to explore the metrics. You can start Grafana by " -"going to ``http://localhost:3000/``." +#: ../../source/explanation-differential-privacy.rst:32 +msgid "Formal Definition" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:123 +#: ../../source/explanation-differential-privacy.rst:34 msgid "" -"After you finish the visualization, stop Prometheus and Grafana. This is " -"important as they will otherwise block, for example port :code:`3000` on " -"your machine as long as they are running." -msgstr "" - -#: ../../source/how-to-monitor-simulation.rst:132 -msgid "Resource allocation" +"Differential Privacy (DP) provides statistical guarantees against the " +"information an adversary can infer through the output of a randomized " +"algorithm. It provides an unconditional upper bound on the influence of a" +" single individual on the output of the algorithm by adding noise [1]. A " +"randomized mechanism M provides (:math:`\\epsilon`, " +":math:`\\delta`)-differential privacy if for any two neighboring " +"databases, D :sub:`1` and D :sub:`2`, that differ in only a single " +"record, and for all possible outputs S ⊆ Range(A):" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:134 +#: ../../source/explanation-differential-privacy.rst:42 msgid "" -"You must understand how the Ray library works to efficiently allocate " -"system resources to simulation clients on your own." +"\\small\n" +"P[M(D_{1} \\in A)] \\leq e^{\\epsilon} P[M(D_{2} \\in A)] + \\delta" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:136 +#: ../../source/explanation-differential-privacy.rst:47 msgid "" -"Initially, the simulation (which Ray handles under the hood) starts by " -"default with all the available resources on the system, which it shares " -"among the clients. It doesn't mean it divides it equally among all of " -"them, nor that the model training happens at all of them simultaneously. " -"You will learn more about that in the later part of this blog. You can " -"check the system resources by running the following:" +"The :math:`\\epsilon` parameter, also known as the privacy budget, is a " +"metric of privacy loss. It also controls the privacy-utility trade-off; " +"lower :math:`\\epsilon` values indicate higher levels of privacy but are " +"likely to reduce utility as well. The :math:`\\delta` parameter accounts " +"for a small probability on which the upper bound :math:`\\epsilon` does " +"not hold. The amount of noise needed to achieve differential privacy is " +"proportional to the sensitivity of the output, which measures the maximum" +" change in the output due to the inclusion or removal of a single record." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:143 -msgid "In Google Colab, the result you see might be similar to this:" +#: ../../source/explanation-differential-privacy.rst:56 +msgid "Differential Privacy in Machine Learning" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:155 +#: ../../source/explanation-differential-privacy.rst:58 msgid "" -"However, you can overwrite the defaults. When starting a simulation, do " -"the following (you don't need to overwrite all of them):" -msgstr "" - -#: ../../source/how-to-monitor-simulation.rst:175 -msgid "Let’s also specify the resource for a single client." +"DP can be utilized in machine learning to preserve the privacy of the " +"training data. Differentially private machine learning algorithms are " +"designed in a way to prevent the algorithm to learn any specific " +"information about any individual data points and subsequently prevent the" +" model from revealing sensitive information. Depending on the stage at " +"which noise is introduced, various methods exist for applying DP to " +"machine learning algorithms. One approach involves adding noise to the " +"training data (either to the features or labels), while another method " +"entails injecting noise into the gradients of the loss function during " +"model training. Additionally, such noise can be incorporated into the " +"model's output." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:205 -msgid "" -"Now comes the crucial part. Ray will start a new client only when it has " -"all the required resources (such that they run in parallel) when the " -"resources allow." +#: ../../source/explanation-differential-privacy.rst:69 +msgid "Differential Privacy in Federated Learning" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:207 +#: ../../source/explanation-differential-privacy.rst:71 msgid "" -"In the example above, only one client will be run, so your clients won't " -"run concurrently. Setting :code:`client_num_gpus = 0.5` would allow " -"running two clients and therefore enable them to run concurrently. Be " -"careful not to require more resources than available. If you specified " -":code:`client_num_gpus = 2`, the simulation wouldn't start (even if you " -"had 2 GPUs but decided to set 1 in :code:`ray_init_args`)." +"Federated learning is a data minimization approach that allows multiple " +"parties to collaboratively train a model without sharing their raw data. " +"However, federated learning also introduces new privacy challenges. The " +"model updates between parties and the central server can leak information" +" about the local data. These leaks can be exploited by attacks such as " +"membership inference and property inference attacks, or model inversion " +"attacks." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:212 ../../source/ref-faq.rst:2 -msgid "FAQ" +#: ../../source/explanation-differential-privacy.rst:78 +msgid "" +"DP can play a crucial role in federated learning to provide privacy for " +"the clients' data." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:214 -msgid "Q: I don't see any metrics logged." +#: ../../source/explanation-differential-privacy.rst:81 +msgid "" +"Depending on the granularity of privacy provision or the location of " +"noise addition, different forms of DP exist in federated learning. In " +"this explainer, we focus on two approaches of DP utilization in federated" +" learning based on where the noise is added: at the server (also known as" +" the center) or at the client (also known as the local)." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:216 +#: ../../source/explanation-differential-privacy.rst:86 msgid "" -"A: The timeframe might not be properly set. The setting is in the top " -"right corner (\"Last 30 minutes\" by default). Please change the " -"timeframe to reflect the period when the simulation was running." +"**Central Differential Privacy**: DP is applied by the server and the " +"goal is to prevent the aggregated model from leaking information about " +"each client's data." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:218 +#: ../../source/explanation-differential-privacy.rst:88 msgid "" -"Q: I see “Grafana server not detected. Please make sure the Grafana " -"server is running and refresh this page” after going to the Metrics tab " -"in Ray Dashboard." +"**Local Differential Privacy**: DP is applied on the client side before " +"sending any information to the server and the goal is to prevent the " +"updates that are sent to the server from leaking any information about " +"the client's data." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:220 -msgid "" -"A: You probably don't have Grafana running. Please check the running " -"services" +#: ../../source/explanation-differential-privacy.rst:-1 +#: ../../source/explanation-differential-privacy.rst:93 +#: ../../source/how-to-use-differential-privacy.rst:15 +msgid "Central Differential Privacy" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:226 +#: ../../source/explanation-differential-privacy.rst:95 msgid "" -"Q: I see \"This site can't be reached\" when going to " -"``_." +"In this approach, which is also known as user-level DP, the central " +"server is responsible for adding noise to the globally aggregated " +"parameters. It should be noted that trust in the server is required." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:228 +#: ../../source/explanation-differential-privacy.rst:104 msgid "" -"A: Either the simulation has already finished, or you still need to start" -" Prometheus." +"While there are various ways to implement central DP in federated " +"learning, we concentrate on the algorithms proposed by [2] and [3]. The " +"overall approach is to clip the model updates sent by the clients and add" +" some amount of noise to the aggregated model. In each iteration, a " +"random set of clients is chosen with a specific probability for training." +" Each client performs local training on its own data. The update of each " +"client is then clipped by some value `S` (sensitivity `S`). This would " +"limit the impact of any individual client which is crucial for privacy " +"and often beneficial for robustness. A common approach to achieve this is" +" by restricting the `L2` norm of the clients' model updates, ensuring " +"that larger updates are scaled down to fit within the norm `S`." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:232 -msgid "Resources" +#: ../../source/explanation-differential-privacy.rst:-1 +msgid "clipping" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:234 +#: ../../source/explanation-differential-privacy.rst:120 msgid "" -"Ray Dashboard: ``_" -msgstr "" - -#: ../../source/how-to-monitor-simulation.rst:236 -msgid "Ray Metrics: ``_" +"Afterwards, the Gaussian mechanism is used to add noise in order to " +"distort the sum of all clients' updates. The amount of noise is scaled to" +" the sensitivity value to obtain a privacy guarantee. The Gaussian " +"mechanism is used with a noise sampled from `N (0, σ²)` where `σ = ( " +"noise_scale * S ) / (number of sampled clients)`." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:2 -msgid "Run Flower using Docker" +#: ../../source/explanation-differential-privacy.rst:126 +msgid "Clipping" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:4 +#: ../../source/explanation-differential-privacy.rst:128 msgid "" -"The simplest way to get started with Flower is by using the pre-made " -"Docker images, which you can find on `Docker Hub " -"`__." -msgstr "" - -#: ../../source/how-to-run-flower-using-docker.rst:7 -msgid "Before you start, make sure that the Docker daemon is running:" +"There are two forms of clipping commonly used in Central DP: Fixed " +"Clipping and Adaptive Clipping." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:14 +#: ../../source/explanation-differential-privacy.rst:131 msgid "" -"If you do not see the version of Docker but instead get an error saying " -"that the command was not found, you will need to install Docker first. " -"You can find installation instruction `here `_." +"**Fixed Clipping** : A predefined fix threshold is set for the magnitude " +"of clients' updates. Any update exceeding this threshold is clipped back " +"to the threshold value." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:20 +#: ../../source/explanation-differential-privacy.rst:133 msgid "" -"On Linux, Docker commands require ``sudo`` privilege. If you want to " -"avoid using ``sudo``, you can follow the `Post-installation steps " -"`_ on the " -"official Docker website." +"**Adaptive Clipping** : The clipping threshold dynamically adjusts based " +"on the observed update distribution [4]. It means that the clipping value" +" is tuned during the rounds with respect to the quantile of the update " +"norm distribution." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:26 +#: ../../source/explanation-differential-privacy.rst:137 msgid "" -"To ensure optimal performance and compatibility, the SuperLink, SuperNode" -" and ServerApp image must have the same version when running together. " -"This guarantees seamless integration and avoids potential conflicts or " -"issues that may arise from using different versions." +"The choice between fixed and adaptive clipping depends on various factors" +" such as privacy requirements, data distribution, model complexity, and " +"others." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:31 -msgid "Flower SuperLink" +#: ../../source/explanation-differential-privacy.rst:-1 +#: ../../source/explanation-differential-privacy.rst:141 +#: ../../source/how-to-use-differential-privacy.rst:113 +msgid "Local Differential Privacy" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:34 -msgid "Quickstart" +#: ../../source/explanation-differential-privacy.rst:143 +msgid "" +"In this approach, each client is responsible for performing DP. Local DP " +"avoids the need for a fully trusted aggregator, but it should be noted " +"that local DP leads to a decrease in accuracy but better privacy in " +"comparison to central DP." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:36 -msgid "If you're looking to try out Flower, you can use the following command:" +#: ../../source/explanation-differential-privacy.rst:152 +msgid "In this explainer, we focus on two forms of achieving Local DP:" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:42 +#: ../../source/explanation-differential-privacy.rst:154 msgid "" -"The command pulls the Docker image with the tag ``1.8.0`` from Docker " -"Hub. The tag specifies the Flower version. In this case, Flower 1.8.0. " -"The ``--rm`` flag tells Docker to remove the container after it exits." +"Each client adds noise to the local updates before sending them to the " +"server. To achieve (:math:`\\epsilon`, :math:`\\delta`)-DP, considering " +"the sensitivity of the local model to be ∆, Gaussian noise is applied " +"with a noise scale of σ where:" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:48 +#: ../../source/explanation-differential-privacy.rst:158 msgid "" -"By default, the Flower SuperLink keeps state in-memory. When using the " -"Docker flag ``--rm``, the state is not persisted between container " -"starts. We will show below how to save the state in a file on your host " -"system." +"\\small\n" +"\\frac{∆ \\times \\sqrt{2 \\times " +"\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:52 +#: ../../source/explanation-differential-privacy.rst:163 msgid "" -"The ``-p :`` flag tells Docker to map the ports " -"``9091``/``9092`` of the host to ``9091``/``9092`` of the container, " -"allowing you to access the Driver API on ``http://localhost:9091`` and " -"the Fleet API on ``http://localhost:9092``. Lastly, any flag that comes " -"after the tag is passed to the Flower SuperLink. Here, we are passing the" -" flag ``--insecure``." +"Each client adds noise to the gradients of the model during the local " +"training (DP-SGD). More specifically, in this approach, gradients are " +"clipped and an amount of calibrated noise is injected into the gradients." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:59 -#: ../../source/how-to-run-flower-using-docker.rst:238 -#: ../../source/how-to-run-flower-using-docker.rst:354 +#: ../../source/explanation-differential-privacy.rst:167 msgid "" -"The ``--insecure`` flag enables insecure communication (using HTTP, not " -"HTTPS) and should only be used for testing purposes. We strongly " -"recommend enabling `SSL `__ when " -"deploying to a production environment." +"Please note that these two approaches are providing privacy at different " +"levels." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:64 -msgid "" -"You can use ``--help`` to view all available flags that the SuperLink " -"supports:" +#: ../../source/explanation-differential-privacy.rst:169 +msgid "**References:**" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:71 -msgid "Mounting a volume to store the state on the host system" +#: ../../source/explanation-differential-privacy.rst:171 +msgid "[1] Dwork et al. The Algorithmic Foundations of Differential Privacy." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:73 +#: ../../source/explanation-differential-privacy.rst:173 msgid "" -"If you want to persist the state of the SuperLink on your host system, " -"all you need to do is specify a path where you want to save the file on " -"your host system and a name for the database file. In the example below, " -"we tell Docker via the flag ``--volume`` to mount the user's home " -"directory (``~/`` on your host) into the ``/app/`` directory of the " -"container. Furthermore, we use the flag ``--database`` to specify the " -"name of the database file." +"[2] McMahan et al. Learning Differentially Private Recurrent Language " +"Models." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:86 +#: ../../source/explanation-differential-privacy.rst:175 msgid "" -"As soon as the SuperLink starts, the file ``state.db`` is created in the " -"user's home directory on your host system. If the file already exists, " -"the SuperLink tries to restore the state from the file. To start the " -"SuperLink with an empty database, simply remove the ``state.db`` file." -msgstr "" - -#: ../../source/how-to-run-flower-using-docker.rst:91 -#: ../../source/how-to-run-flower-using-docker.rst:260 -#: ../../source/how-to-run-flower-using-docker.rst:375 -msgid "Enabling SSL for secure connections" +"[3] Geyer et al. Differentially Private Federated Learning: A Client " +"Level Perspective." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:93 -msgid "" -"To enable SSL, you will need a PEM-encoded root certificate, a PEM-" -"encoded private key and a PEM-encoded certificate chain." +#: ../../source/explanation-differential-privacy.rst:177 +msgid "[4] Galen et al. Differentially Private Learning with Adaptive Clipping." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:97 -msgid "" -"For testing purposes, you can generate your own self-signed certificates." -" The `Enable SSL connections `__ page contains a section that" -" will guide you through the process." +#: ../../source/explanation-federated-evaluation.rst:2 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:292 +msgid "Federated evaluation" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:101 +#: ../../source/explanation-federated-evaluation.rst:4 msgid "" -"Assuming all files we need are in the local ``certificates`` directory, " -"we can use the flag ``--volume`` to mount the local directory into the " -"``/app/`` directory of the container. This allows the SuperLink to access" -" the files within the container. Finally, we pass the names of the " -"certificates to the SuperLink with the ``--certificates`` flag." +"There are two main approaches to evaluating models in federated learning " +"systems: centralized (or server-side) evaluation and federated (or " +"client-side) evaluation." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:113 -msgid "Flower SuperNode" +#: ../../source/explanation-federated-evaluation.rst:8 +msgid "Centralized Evaluation" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:115 -msgid "" -"The SuperNode Docker image comes with a pre-installed version of Flower " -"and serves as a base for building your own SuperNode image." +#: ../../source/explanation-federated-evaluation.rst:11 +msgid "Built-In Strategies" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:120 +#: ../../source/explanation-federated-evaluation.rst:13 msgid "" -"The SuperNode Docker image currently works only with the 1.9.0-nightly " -"release. A stable version will be available when Flower 1.9.0 (stable) " -"gets released (ETA: May). A SuperNode nightly image must be paired with " -"the corresponding SuperLink and ServerApp nightly images released on the " -"same day. To ensure the versions are in sync, using the concrete tag, " -"e.g., ``1.9.0.dev20240501`` instead of ``nightly`` is recommended." +"All built-in strategies support centralized evaluation by providing an " +"evaluation function during initialization. An evaluation function is any " +"function that can take the current global model parameters as input and " +"return evaluation results:" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:126 -msgid "" -"We will use the ``quickstart-pytorch`` example, which you can find in the" -" Flower repository, to illustrate how you can dockerize your ClientApp." +#: ../../source/explanation-federated-evaluation.rst:61 +msgid "Custom Strategies" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:134 -#, fuzzy +#: ../../source/explanation-federated-evaluation.rst:63 msgid "" -"Before we can start, we need to meet a few prerequisites in our local " -"development environment. You can skip the first part if you want to run " -"your ClientApp instead of the ``quickstart-pytorch`` example." +"The ``Strategy`` abstraction provides a method called ``evaluate`` that " +"can directly be used to evaluate the current global model parameters. The" +" current server implementation calls ``evaluate`` after parameter " +"aggregation and before federated evaluation (see next paragraph)." msgstr "" -"Antes de começarmos, precisamos encontrar alguns pré-requisitos em nosso " -"ambiente de desenvolvimento local." - -#: ../../source/how-to-run-flower-using-docker.rst:138 -#, fuzzy -msgid "Clone the Flower repository." -msgstr "Clone o repositório do flower." -#: ../../source/how-to-run-flower-using-docker.rst:152 -msgid "Creating a SuperNode Dockerfile" +#: ../../source/explanation-federated-evaluation.rst:69 +msgid "Federated Evaluation" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:154 -#: ../../source/how-to-run-flower-using-docker.rst:289 -msgid "Let's assume the following project layout:" +#: ../../source/explanation-federated-evaluation.rst:72 +msgid "Implementing Federated Evaluation" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:163 +#: ../../source/explanation-federated-evaluation.rst:74 msgid "" -"First, we need to create a ``requirements.txt`` file in the directory " -"where the ``ClientApp`` code is located. In the file, we list all the " -"dependencies that the ClientApp requires." +"Client-side evaluation happens in the ``Client.evaluate`` method and can " +"be configured from the server side." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:175 -msgid "" -"Note that `flwr `__ is already installed " -"in the ``flwr/supernode`` base image, so you only need to include other " -"package dependencies in your ``requirements.txt``, such as ``torch``, " -"``tensorflow``, etc." +#: ../../source/explanation-federated-evaluation.rst:108 +msgid "Configuring Federated Evaluation" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:179 +#: ../../source/explanation-federated-evaluation.rst:110 msgid "" -"Next, we create a Dockerfile. If you use the ``quickstart-pytorch`` " -"example, create a new file called ``Dockerfile.supernode`` in ``examples" -"/quickstart-pytorch``." +"Federated evaluation can be configured from the server side. Built-in " +"strategies support the following arguments:" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:182 +#: ../../source/explanation-federated-evaluation.rst:113 msgid "" -"The ``Dockerfile.supernode`` contains the instructions that assemble the " -"SuperNode image." +"``fraction_evaluate``: a ``float`` defining the fraction of clients that " +"will be selected for evaluation. If ``fraction_evaluate`` is set to " +"``0.1`` and ``100`` clients are connected to the server, then ``10`` will" +" be randomly selected for evaluation. If ``fraction_evaluate`` is set to " +"``0.0``, federated evaluation will be disabled." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:196 +#: ../../source/explanation-federated-evaluation.rst:118 msgid "" -"In the first two lines, we instruct Docker to use the SuperNode image " -"tagged ``nightly`` as a base image and set our working directory to " -"``/app``. The following instructions will now be executed in the ``/app``" -" directory. Next, we install the ClientApp dependencies by copying the " -"``requirements.txt`` file into the image and run ``pip install``. In the " -"last two lines, we copy the ``client.py`` module into the image and set " -"the entry point to ``flower-client-app`` with the argument " -"``client:app``. The argument is the object reference of the ClientApp " -"(``:``) that will be run inside the ClientApp." +"``min_evaluate_clients``: an ``int``: the minimum number of clients to be" +" selected for evaluation. If ``fraction_evaluate`` is set to ``0.1``, " +"``min_evaluate_clients`` is set to 20, and ``100`` clients are connected " +"to the server, then ``20`` clients will be selected for evaluation." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:205 -#, fuzzy -msgid "Building the SuperNode Docker image" -msgstr "Construindo a imagem do servidor" - -#: ../../source/how-to-run-flower-using-docker.rst:207 +#: ../../source/explanation-federated-evaluation.rst:122 msgid "" -"Next, we build the SuperNode Docker image by running the following " -"command in the directory where Dockerfile and ClientApp code are located." +"``min_available_clients``: an ``int`` that defines the minimum number of " +"clients which need to be connected to the server before a round of " +"federated evaluation can start. If fewer than ``min_available_clients`` " +"are connected to the server, the server will wait until more clients are " +"connected before it continues to sample clients for evaluation." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:214 +#: ../../source/explanation-federated-evaluation.rst:127 msgid "" -"We gave the image the name ``flwr_supernode``, and the tag ``0.0.1``. " -"Remember that the here chosen values only serve as an example. You can " -"change them to your needs." -msgstr "" - -#: ../../source/how-to-run-flower-using-docker.rst:219 -#, fuzzy -msgid "Running the SuperNode Docker image" -msgstr "Construindo a imagem do servidor" - -#: ../../source/how-to-run-flower-using-docker.rst:221 -msgid "Now that we have built the SuperNode image, we can finally run it." -msgstr "" - -#: ../../source/how-to-run-flower-using-docker.rst:229 -#: ../../source/how-to-run-flower-using-docker.rst:345 -msgid "Let's break down each part of this command:" +"``on_evaluate_config_fn``: a function that returns a configuration " +"dictionary which will be sent to the selected clients. The function will " +"be called during each round and provides a convenient way to customize " +"client-side evaluation from the server side, for example, to configure " +"the number of validation steps performed." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:231 -#: ../../source/how-to-run-flower-using-docker.rst:347 -msgid "``docker run``: This is the command to run a new Docker container." +#: ../../source/explanation-federated-evaluation.rst:157 +msgid "Evaluating Local Model Updates During Training" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:232 -#: ../../source/how-to-run-flower-using-docker.rst:348 +#: ../../source/explanation-federated-evaluation.rst:159 msgid "" -"``--rm``: This option specifies that the container should be " -"automatically removed when it stops." +"Model parameters can also be evaluated during training. ``Client.fit`` " +"can return arbitrary evaluation results as a dictionary:" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:233 -msgid "``flwr_supernode:0.0.1``: The name the tag of the Docker image to use." +#: ../../source/explanation-federated-evaluation.rst:201 +msgid "Full Code Example" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:234 -#: ../../source/how-to-run-flower-using-docker.rst:350 -msgid "``--insecure``: This option enables insecure communication." +#: ../../source/explanation-federated-evaluation.rst:203 +msgid "" +"For a full code example that uses both centralized and federated " +"evaluation, see the *Advanced TensorFlow Example* (the same approach can " +"be applied to workloads implemented in any other framework): " +"https://github.com/adap/flower/tree/main/examples/advanced-tensorflow" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst +#: ../../source/explanation-flower-architecture.rst:-1 msgid "" -"``--server 192.168.1.100:9092``: This option specifies the address of the" -" SuperLinks Fleet" +"Explore the federated learning architecture of the Flower framework, " +"featuring multi-run, concurrent execution, and scalable, secure machine " +"learning while preserving data privacy." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst -msgid "API to connect to. Remember to update it with your SuperLink IP." -msgstr "" +#: ../../source/explanation-flower-architecture.rst:2 +msgid "Flower Architecture" +msgstr "Arquitetura do Flower" -#: ../../source/how-to-run-flower-using-docker.rst:248 +#: ../../source/explanation-flower-architecture.rst:4 msgid "" -"To test running Flower locally, you can create a `bridge network " -"`__, use the ``--network`` argument and pass the " -"name of the Docker network to run your SuperNodes." +"This page explains the architecture of deployed Flower federated learning" +" system." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:252 +#: ../../source/explanation-flower-architecture.rst:6 msgid "" -"Any argument that comes after the tag is passed to the Flower SuperNode " -"binary. To see all available flags that the SuperNode supports, run:" +"In federated learning (FL), there is typically one server and a number of" +" clients that are connected to the server. This is often called a " +"federation." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:262 +#: ../../source/explanation-flower-architecture.rst:9 msgid "" -"To enable SSL, we will need to mount a PEM-encoded root certificate into " -"your SuperNode container." +"The role of the server is to coordinate the training process. The role of" +" each client is to receive tasks from the server, execute those tasks and" +" return the results back to the server." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:264 -msgid "" -"Assuming the certificate already exists locally, we can use the flag " -"``--volume`` to mount the local certificate into the container's " -"``/app/`` directory. This allows the SuperNode to access the certificate " -"within the container. Use the ``--certificates`` flag when starting the " -"container." +#: ../../source/explanation-flower-architecture.rst:13 +msgid "This is sometimes called a hub-and-spoke topology:" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:275 -msgid "Flower ServerApp" +#: ../../source/explanation-flower-architecture.rst:21 +msgid "Hub-and-spoke topology in federated learning" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:277 +#: ../../source/explanation-flower-architecture.rst:21 msgid "" -"The procedure for building and running a ServerApp image is almost " -"identical to the SuperNode image." +"Hub-and-spoke topology in federated learning (one server, multiple " +"clients)." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:279 +#: ../../source/explanation-flower-architecture.rst:23 msgid "" -"Similar to the SuperNode image, the ServerApp Docker image comes with a " -"pre-installed version of Flower and serves as a base for building your " -"own ServerApp image." +"In a real-world deployment, we typically want to run different projects " +"on such a federation. Each project could use different hyperparameters, " +"different model architectures, different aggregation strategies, or even " +"different machine learning frameworks like PyTorch and TensorFlow." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:282 +#: ../../source/explanation-flower-architecture.rst:28 msgid "" -"We will use the same ``quickstart-pytorch`` example as we do in the " -"Flower SuperNode section. If you have not already done so, please follow " -"the `SuperNode Prerequisites`_ before proceeding." +"This is why, in Flower, both the server side and the client side are " +"split into two parts. One part is long-lived and responsible for " +"communicating across the network, the other part is short-lived and " +"executes task-specific code." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:287 -msgid "Creating a ServerApp Dockerfile" +#: ../../source/explanation-flower-architecture.rst:32 +msgid "A Flower `server` consists of **SuperLink** and ``ServerApp``:" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:298 +#: ../../source/explanation-flower-architecture.rst:34 msgid "" -"First, we need to create a Dockerfile in the directory where the " -"``ServerApp`` code is located. If you use the ``quickstart-pytorch`` " -"example, create a new file called ``Dockerfile.serverapp`` in ``examples" -"/quickstart-pytorch``." +"**SuperLink**: a long-running process that forwards task instructions to " +"clients (SuperNodes) and receives task results back." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:302 +#: ../../source/explanation-flower-architecture.rst:36 msgid "" -"The ``Dockerfile.serverapp`` contains the instructions that assemble the " -"ServerApp image." +"``ServerApp``: a short-lived process with project-spcific code that " +"customizes all server-side aspects of federated learning systems (client " +"selection, client configuration, result aggregation). This is what AI " +"researchers and AI engineers write when they build Flower apps." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:313 -msgid "" -"In the first two lines, we instruct Docker to use the ServerApp image " -"tagged ``1.8.0`` as a base image and set our working directory to " -"``/app``. The following instructions will now be executed in the ``/app``" -" directory. In the last two lines, we copy the ``server.py`` module into " -"the image and set the entry point to ``flower-server-app`` with the " -"argument ``server:app``. The argument is the object reference of the " -"ServerApp (``:``) that will be run inside the " -"ServerApp container." +#: ../../source/explanation-flower-architecture.rst:41 +msgid "A Flower `client` consists of **SuperNode** and ``ClientApp``:" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:321 -#, fuzzy -msgid "Building the ServerApp Docker image" -msgstr "Construindo a imagem do servidor" +#: ../../source/explanation-flower-architecture.rst:43 +msgid "" +"**SuperNode**: a long-running process that connects to the SuperLink, " +"asks for tasks, executes tasks (for example, \"train this model on your " +"local data\") and returns task results back to the SuperLink." +msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:323 +#: ../../source/explanation-flower-architecture.rst:46 msgid "" -"Next, we build the ServerApp Docker image by running the following " -"command in the directory where Dockerfile and ServerApp code are located." +"``ClientApp``: a short-lived process with project-specific code that " +"customizes all client-side aspects of federated learning systems (local " +"model training and evaluation, pre- and post-processing). This is what AI" +" researchers and AI engineers write when they build Flower apps." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:330 +#: ../../source/explanation-flower-architecture.rst:51 msgid "" -"We gave the image the name ``flwr_serverapp``, and the tag ``0.0.1``. " -"Remember that the here chosen values only serve as an example. You can " -"change them to your needs." +"Why SuperNode and SuperLink? Well, in federated learning, the clients are" +" the actual stars of the show. They hold the training data and they run " +"the actual training. This is why Flower decided to name them " +"**SuperNode**. The **SuperLink** is then responsible for acting as the " +"`missing link` between all those SuperNodes." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:335 +#: ../../source/explanation-flower-architecture.rst:62 #, fuzzy -msgid "Running the ServerApp Docker image" -msgstr "Construindo a imagem do servidor" - -#: ../../source/how-to-run-flower-using-docker.rst:337 -msgid "Now that we have built the ServerApp image, we can finally run it." -msgstr "" +msgid "Basic Flower architecture" +msgstr "Arquitetura do Flower" -#: ../../source/how-to-run-flower-using-docker.rst:349 -msgid "``flwr_serverapp:0.0.1``: The name the tag of the Docker image to use." +#: ../../source/explanation-flower-architecture.rst:62 +msgid "The basic Flower architecture for federated learning." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst +#: ../../source/explanation-flower-architecture.rst:64 msgid "" -"``--server 192.168.1.100:9091``: This option specifies the address of the" -" SuperLinks Driver" +"In a Flower app project, users will typically develop the ``ServerApp`` " +"and the ``ClientApp``. All the network communication between `server` and" +" `clients` is taken care of by the SuperLink and SuperNodes." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:363 +#: ../../source/explanation-flower-architecture.rst:70 msgid "" -"To test running Flower locally, you can create a `bridge network " -"`__, use the ``--network`` argument and pass the " -"name of the Docker network to run your ServerApps." +"For more details, please refer to the |serverapp_link|_ and " +"|clientapp_link|_ documentation." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:367 +#: ../../source/explanation-flower-architecture.rst:73 msgid "" -"Any argument that comes after the tag is passed to the Flower ServerApp " -"binary. To see all available flags that the ServerApp supports, run:" +"With *multi-run*, multiple ``ServerApp``\\s and ``ClientApp``\\s are now " +"capable of running on the same federation consisting of a single long-" +"running SuperLink and multiple long-running SuperNodes. This is sometimes" +" referred to as `multi-tenancy` or `multi-job`." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:377 +#: ../../source/explanation-flower-architecture.rst:78 msgid "" -"To enable SSL, we will need to mount a PEM-encoded root certificate into " -"your ServerApp container." +"As shown in the figure below, two projects, each consisting of a " +"``ServerApp`` and a ``ClientApp``, could share the same SuperLink and " +"SuperNodes." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:379 -msgid "" -"Assuming the certificate already exists locally, we can use the flag " -"``--volume`` to mount the local certificate into the container's " -"``/app/`` directory. This allows the ServerApp to access the certificate " -"within the container. Use the ``--certificates`` flag when starting the " -"container." +#: ../../source/explanation-flower-architecture.rst:87 +msgid "Multi-tenancy federated learning architecture" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:390 -msgid "Advanced Docker options" +#: ../../source/explanation-flower-architecture.rst:87 +msgid "Multi-tenancy federated learning architecture with Flower" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:393 -msgid "Using a different Flower version" +#: ../../source/explanation-flower-architecture.rst:89 +msgid "" +"To illustrate how multi-run works, consider one federated learning " +"training run where a ``ServerApp`` and a ``ClientApp`` are participating " +"in ``[run 1]``. Note that a SuperNode will only run a ``ClientApp`` if it" +" is selected to participate in the training run." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:395 +#: ../../source/explanation-flower-architecture.rst:94 msgid "" -"If you want to use a different version of Flower, for example Flower " -"nightly, you can do so by changing the tag. All available versions are on" -" `Docker Hub `__." +"In ``[run 1]`` below, all the SuperNodes are selected and therefore run " +"their corresponding ``ClientApp``\\s:" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:400 -msgid "Pinning a Docker image to a specific version" +#: ../../source/explanation-flower-architecture.rst:103 +msgid "Multi-tenancy federated learning architecture - Run 1" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:402 +#: ../../source/explanation-flower-architecture.rst:103 msgid "" -"It may happen that we update the images behind the tags. Such updates " -"usually include security updates of system dependencies that should not " -"change the functionality of Flower. However, if you want to ensure that " -"you always use the same image, you can specify the hash of the image " -"instead of the tag." +"Run 1 in a multi-run federated learning architecture with Flower. All " +"SuperNodes participate in the training round." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:407 +#: ../../source/explanation-flower-architecture.rst:106 msgid "" -"The following command returns the current image hash referenced by the " -"``superlink:1.8.0`` tag:" +"However, in ``[run 2]``, only the first and third SuperNodes are selected" +" to participate in the training:" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:414 -msgid "Next, we can pin the hash when running a new SuperLink container:" +#: ../../source/explanation-flower-architecture.rst:115 +msgid "Multi-tenancy federated learning architecture - Run 2" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:423 -msgid "Setting environment variables" +#: ../../source/explanation-flower-architecture.rst:115 +msgid "" +"Run 2 in a multi-run federated learning architecture with Flower. Only " +"the first and third SuperNodes are selected to participate in the " +"training round." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:425 +#: ../../source/explanation-flower-architecture.rst:118 msgid "" -"To set a variable inside a Docker container, you can use the ``-e " -"=`` flag." +"Therefore, with Flower multi-run, different projects (each consisting of " +"a ``ServerApp`` and ``ClientApp``) can run on different sets of clients." msgstr "" -#: ../../source/how-to-run-simulations.rst:2 -msgid "Run simulations" +#: ../../source/explanation-flower-architecture.rst:121 +msgid "" +"To help you start and manage all of the concurrently executing training " +"runs, Flower offers one additional long-running server-side service " +"called **SuperExec**. When you type ``flwr run`` to start a new training " +"run, the ``flwr`` CLI bundles your local project (mainly your " +"``ServerApp`` and ``ClientApp``) and sends it to the **SuperExec**. The " +"**SuperExec** will then take care of starting and managing your " +"``ServerApp``, which in turn selects SuperNodes to execute your " +"``ClientApp``." msgstr "" -#: ../../source/how-to-run-simulations.rst:8 +#: ../../source/explanation-flower-architecture.rst:128 msgid "" -"Simulating Federated Learning workloads is useful for a multitude of use-" -"cases: you might want to run your workload on a large cohort of clients " -"but without having to source, configure and mange a large number of " -"physical devices; you might want to run your FL workloads as fast as " -"possible on the compute systems you have access to without having to go " -"through a complex setup process; you might want to validate your " -"algorithm on different scenarios at varying levels of data and system " -"heterogeneity, client availability, privacy budgets, etc. These are among" -" some of the use-cases where simulating FL workloads makes sense. Flower " -"can accommodate these scenarios by means of its `VirtualClientEngine " -"`_ or " -"VCE." +"This architecture allows many users to (concurrently) run their projects " +"on the same federation, simply by typing ``flwr run`` on their local " +"developer machine." msgstr "" -#: ../../source/how-to-run-simulations.rst:10 -msgid "" -"The :code:`VirtualClientEngine` schedules, launches and manages `virtual`" -" clients. These clients are identical to `non-virtual` clients (i.e. the " -"ones you launch via the command `flwr.client.start_client `_) in the sense that they can be configure by " -"creating a class inheriting, for example, from `flwr.client.NumPyClient " -"`_ and therefore behave in an " -"identical way. In addition to that, clients managed by the " -":code:`VirtualClientEngine` are:" +#: ../../source/explanation-flower-architecture.rst:137 +msgid "Flower Deployment Engine with SuperExec" msgstr "" -#: ../../source/how-to-run-simulations.rst:12 -msgid "" -"resource-aware: this means that each client gets assigned a portion of " -"the compute and memory on your system. You as a user can control this at " -"the beginning of the simulation and allows you to control the degree of " -"parallelism of your Flower FL simulation. The fewer the resources per " -"client, the more clients can run concurrently on the same hardware." +#: ../../source/explanation-flower-architecture.rst:137 +msgid "The SuperExec service for managing concurrent training runs in Flower." msgstr "" -#: ../../source/how-to-run-simulations.rst:13 +#: ../../source/explanation-flower-architecture.rst:141 msgid "" -"self-managed: this means that you as a user do not need to launch clients" -" manually, instead this gets delegated to :code:`VirtualClientEngine`'s " -"internals." +"This explanation covers the Flower Deployment Engine. An explanation " +"covering the Flower Simulation Engine will follow." msgstr "" -#: ../../source/how-to-run-simulations.rst:14 +#: ../../source/explanation-flower-architecture.rst:146 msgid "" -"ephemeral: this means that a client is only materialized when it is " -"required in the FL process (e.g. to do `fit() `_). The object is destroyed afterwards," -" releasing the resources it was assigned and allowing in this way other " -"clients to participate." +"As we continue to enhance Flower at a rapid pace, we'll periodically " +"update this explainer document. Feel free to share any feedback with us." msgstr "" -#: ../../source/how-to-run-simulations.rst:16 -msgid "" -"The :code:`VirtualClientEngine` implements `virtual` clients using `Ray " -"`_, an open-source framework for scalable Python " -"workloads. In particular, Flower's :code:`VirtualClientEngine` makes use " -"of `Actors `_ to " -"spawn `virtual` clients and run their workload." +#: ../../source/fed/0000-20200102-fed-template.md:10 +msgid "FED Template" msgstr "" -#: ../../source/how-to-run-simulations.rst:20 -msgid "Launch your Flower simulation" +#: ../../source/fed/0000-20200102-fed-template.md:12 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:12 +msgid "Table of Contents" msgstr "" -#: ../../source/how-to-run-simulations.rst:22 -msgid "" -"Running Flower simulations still require you to define your client class," -" a strategy, and utility functions to download and load (and potentially " -"partition) your dataset. With that out of the way, launching your " -"simulation is done with `start_simulation `_ and a minimal example looks" -" as follows:" +#: ../../source/fed/0000-20200102-fed-template.md:14 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:14 +msgid "[Table of Contents](#table-of-contents)" msgstr "" -#: ../../source/how-to-run-simulations.rst:44 -msgid "VirtualClientEngine resources" +#: ../../source/fed/0000-20200102-fed-template.md:15 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:15 +msgid "[Summary](#summary)" msgstr "" -#: ../../source/how-to-run-simulations.rst:45 -msgid "" -"By default the VCE has access to all system resources (i.e. all CPUs, all" -" GPUs, etc) since that is also the default behavior when starting Ray. " -"However, in some settings you might want to limit how many of your system" -" resources are used for simulation. You can do this via the " -":code:`ray_init_args` input argument to :code:`start_simulation` which " -"the VCE internally passes to Ray's :code:`ray.init` command. For a " -"complete list of settings you can configure check the `ray.init " -"`_" -" documentation. Do not set :code:`ray_init_args` if you want the VCE to " -"use all your system's CPUs and GPUs." +#: ../../source/fed/0000-20200102-fed-template.md:16 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:16 +msgid "[Motivation](#motivation)" msgstr "" -#: ../../source/how-to-run-simulations.rst:62 -msgid "Assigning client resources" +#: ../../source/fed/0000-20200102-fed-template.md:17 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:17 +msgid "[Goals](#goals)" msgstr "" -#: ../../source/how-to-run-simulations.rst:63 -msgid "" -"By default the :code:`VirtualClientEngine` assigns a single CPU core (and" -" nothing else) to each virtual client. This means that if your system has" -" 10 cores, that many virtual clients can be concurrently running." +#: ../../source/fed/0000-20200102-fed-template.md:18 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:18 +msgid "[Non-Goals](#non-goals)" msgstr "" -#: ../../source/how-to-run-simulations.rst:65 -msgid "" -"More often than not, you would probably like to adjust the resources your" -" clients get assigned based on the complexity (i.e. compute and memory " -"footprint) of your FL workload. You can do so when starting your " -"simulation by setting the argument `client_resources` to " -"`start_simulation `_." -" Two keys are internally used by Ray to schedule and spawn workloads (in " -"our case Flower clients):" +#: ../../source/fed/0000-20200102-fed-template.md:19 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:19 +msgid "[Proposal](#proposal)" msgstr "" -#: ../../source/how-to-run-simulations.rst:67 -msgid ":code:`num_cpus` indicates the number of CPU cores a client would get." +#: ../../source/fed/0000-20200102-fed-template.md:20 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:23 +msgid "[Drawbacks](#drawbacks)" msgstr "" -#: ../../source/how-to-run-simulations.rst:68 -msgid "" -":code:`num_gpus` indicates the **ratio** of GPU memory a client gets " -"assigned." +#: ../../source/fed/0000-20200102-fed-template.md:21 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:24 +msgid "[Alternatives Considered](#alternatives-considered)" msgstr "" -#: ../../source/how-to-run-simulations.rst:70 -msgid "Let's see a few examples:" +#: ../../source/fed/0000-20200102-fed-template.md:22 +msgid "[Appendix](#appendix)" msgstr "" -#: ../../source/how-to-run-simulations.rst:89 -msgid "" -"While the :code:`client_resources` can be used to control the degree of " -"concurrency in your FL simulation, this does not stop you from running " -"dozens, hundreds or even thousands of clients in the same round and " -"having orders of magnitude more `dormant` (i.e. not participating in a " -"round) clients. Let's say you want to have 100 clients per round but your" -" system can only accommodate 8 clients concurrently. The " -":code:`VirtualClientEngine` will schedule 100 jobs to run (each " -"simulating a client sampled by the strategy) and then will execute them " -"in a resource-aware manner in batches of 8." +#: ../../source/fed/0000-20200102-fed-template.md:24 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:28 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:76 +msgid "Summary" msgstr "" -#: ../../source/how-to-run-simulations.rst:91 -msgid "" -"To understand all the intricate details on how resources are used to " -"schedule FL clients and how to define custom resources, please take a " -"look at the `Ray documentation `_." +#: ../../source/fed/0000-20200102-fed-template.md:26 +msgid "\\[TODO - sentence 1: summary of the problem\\]" msgstr "" -#: ../../source/how-to-run-simulations.rst:94 -msgid "Simulation examples" +#: ../../source/fed/0000-20200102-fed-template.md:28 +msgid "\\[TODO - sentence 2: summary of the solution\\]" msgstr "" -#: ../../source/how-to-run-simulations.rst:96 -msgid "" -"A few ready-to-run complete examples for Flower simulation in " -"Tensorflow/Keras and PyTorch are provided in the `Flower repository " -"`_. You can run them on Google Colab too:" +#: ../../source/fed/0000-20200102-fed-template.md:30 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:47 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:77 +msgid "Motivation" msgstr "" -#: ../../source/how-to-run-simulations.rst:98 -msgid "" -"`Tensorflow/Keras Simulation " -"`_: 100 clients collaboratively train a MLP model on MNIST." +#: ../../source/fed/0000-20200102-fed-template.md:32 +#: ../../source/fed/0000-20200102-fed-template.md:36 +#: ../../source/fed/0000-20200102-fed-template.md:40 +#: ../../source/fed/0000-20200102-fed-template.md:44 +#: ../../source/fed/0000-20200102-fed-template.md:48 +#: ../../source/fed/0000-20200102-fed-template.md:54 +#: ../../source/fed/0000-20200102-fed-template.md:58 +msgid "\\[TODO\\]" msgstr "" -#: ../../source/how-to-run-simulations.rst:99 -msgid "" -"`PyTorch Simulation `_: 100 clients collaboratively train a CNN model on " -"MNIST." +#: ../../source/fed/0000-20200102-fed-template.md:34 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:53 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:78 +msgid "Goals" msgstr "" -#: ../../source/how-to-run-simulations.rst:104 -msgid "Multi-node Flower simulations" +#: ../../source/fed/0000-20200102-fed-template.md:38 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:59 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:79 +msgid "Non-Goals" msgstr "" -#: ../../source/how-to-run-simulations.rst:106 -msgid "" -"Flower's :code:`VirtualClientEngine` allows you to run FL simulations " -"across multiple compute nodes. Before starting your multi-node simulation" -" ensure that you:" +#: ../../source/fed/0000-20200102-fed-template.md:42 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:65 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:80 +msgid "Proposal" msgstr "" -#: ../../source/how-to-run-simulations.rst:108 -msgid "Have the same Python environment in all nodes." +#: ../../source/fed/0000-20200102-fed-template.md:46 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:85 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:129 +msgid "Drawbacks" msgstr "" -#: ../../source/how-to-run-simulations.rst:109 -msgid "Have a copy of your code (e.g. your entire repo) in all nodes." +#: ../../source/fed/0000-20200102-fed-template.md:50 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:86 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:135 +msgid "Alternatives Considered" msgstr "" -#: ../../source/how-to-run-simulations.rst:110 -msgid "" -"Have a copy of your dataset in all nodes (more about this in " -":ref:`simulation considerations `)" +#: ../../source/fed/0000-20200102-fed-template.md:52 +msgid "\\[Alternative 1\\]" msgstr "" -#: ../../source/how-to-run-simulations.rst:111 -msgid "" -"Pass :code:`ray_init_args={\"address\"=\"auto\"}` to `start_simulation " -"`_ so the " -":code:`VirtualClientEngine` attaches to a running Ray instance." +#: ../../source/fed/0000-20200102-fed-template.md:56 +msgid "\\[Alternative 2\\]" msgstr "" -#: ../../source/how-to-run-simulations.rst:112 -msgid "" -"Start Ray on you head node: on the terminal type :code:`ray start " -"--head`. This command will print a few lines, one of which indicates how " -"to attach other nodes to the head node." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:10 +msgid "Flower Enhancement Doc" msgstr "" -#: ../../source/how-to-run-simulations.rst:113 -msgid "" -"Attach other nodes to the head node: copy the command shown after " -"starting the head and execute it on terminal of a new node: for example " -":code:`ray start --address='192.168.1.132:6379'`" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:20 +msgid "[Enhancement Doc Template](#enhancement-doc-template)" msgstr "" -#: ../../source/how-to-run-simulations.rst:115 -msgid "" -"With all the above done, you can run your code from the head node as you " -"would if the simulation was running on a single node." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:21 +msgid "[Metadata](#metadata)" msgstr "" -#: ../../source/how-to-run-simulations.rst:117 -msgid "" -"Once your simulation is finished, if you'd like to dismantle your cluster" -" you simply need to run the command :code:`ray stop` in each node's " -"terminal (including the head node)." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:22 +msgid "[Workflow](#workflow)" msgstr "" -#: ../../source/how-to-run-simulations.rst:120 -msgid "Multi-node simulation good-to-know" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:25 +msgid "[GitHub Issues](#github-issues)" msgstr "" -#: ../../source/how-to-run-simulations.rst:122 -msgid "" -"Here we list a few interesting functionality when running multi-node FL " -"simulations:" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:26 +msgid "[Google Docs](#google-docs)" msgstr "" -#: ../../source/how-to-run-simulations.rst:124 -msgid "" -"User :code:`ray status` to check all nodes connected to your head node as" -" well as the total resources available to the " -":code:`VirtualClientEngine`." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:30 +msgid "A Flower Enhancement is a standardized development process to" msgstr "" -#: ../../source/how-to-run-simulations.rst:126 -msgid "" -"When attaching a new node to the head, all its resources (i.e. all CPUs, " -"all GPUs) will be visible by the head node. This means that the " -":code:`VirtualClientEngine` can schedule as many `virtual` clients as " -"that node can possible run. In some settings you might want to exclude " -"certain resources from the simulation. You can do this by appending " -"`--num-cpus=` and/or `--num-" -"gpus=` in any :code:`ray start` command (including " -"when starting the head)" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:32 +msgid "provide a common structure for proposing larger changes" msgstr "" -#: ../../source/how-to-run-simulations.rst:132 -msgid "Considerations for simulations" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:33 +msgid "ensure that the motivation for a change is clear" msgstr "" -#: ../../source/how-to-run-simulations.rst:135 -msgid "" -"We are actively working on these fronts so to make it trivial to run any " -"FL workload with Flower simulation." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:34 +msgid "persist project information in a version control system" msgstr "" -#: ../../source/how-to-run-simulations.rst:138 -msgid "" -"The current VCE allows you to run Federated Learning workloads in " -"simulation mode whether you are prototyping simple scenarios on your " -"personal laptop or you want to train a complex FL pipeline across " -"multiple high-performance GPU nodes. While we add more capabilities to " -"the VCE, the points below highlight some of the considerations to keep in" -" mind when designing your FL pipeline with Flower. We also highlight a " -"couple of current limitations in our implementation." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:35 +msgid "document the motivation for impactful user-facing changes" msgstr "" -#: ../../source/how-to-run-simulations.rst:141 -msgid "GPU resources" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:36 +msgid "reserve GitHub issues for tracking work in flight" msgstr "" -#: ../../source/how-to-run-simulations.rst:143 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:37 msgid "" -"The VCE assigns a share of GPU memory to a client that specifies the key " -":code:`num_gpus` in :code:`client_resources`. This being said, Ray (used " -"internally by the VCE) is by default:" +"ensure community participants can successfully drive changes to " +"completion across one or more releases while stakeholders are adequately " +"represented throughout the process" msgstr "" -#: ../../source/how-to-run-simulations.rst:146 -msgid "" -"not aware of the total VRAM available on the GPUs. This means that if you" -" set :code:`num_gpus=0.5` and you have two GPUs in your system with " -"different (e.g. 32GB and 8GB) VRAM amounts, they both would run 2 clients" -" concurrently." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:39 +msgid "Hence, an Enhancement Doc combines aspects of" msgstr "" -#: ../../source/how-to-run-simulations.rst:147 -msgid "" -"not aware of other unrelated (i.e. not created by the VCE) workloads are " -"running on the GPU. Two takeaways from this are:" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:41 +msgid "a feature, and effort-tracking document" msgstr "" -#: ../../source/how-to-run-simulations.rst:149 -msgid "" -"Your Flower server might need a GPU to evaluate the `global model` after " -"aggregation (by instance when making use of the `evaluate method `_)" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:42 +msgid "a product requirements document" msgstr "" -#: ../../source/how-to-run-simulations.rst:150 -msgid "" -"If you want to run several independent Flower simulations on the same " -"machine you need to mask-out your GPUs with " -":code:`CUDA_VISIBLE_DEVICES=\"\"` when launching your " -"experiment." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:43 +msgid "a design document" msgstr "" -#: ../../source/how-to-run-simulations.rst:153 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:45 msgid "" -"In addition, the GPU resource limits passed to :code:`client_resources` " -"are not `enforced` (i.e. they can be exceeded) which can result in the " -"situation of client using more VRAM than the ratio specified when " -"starting the simulation." -msgstr "" - -#: ../../source/how-to-run-simulations.rst:156 -msgid "TensorFlow with GPUs" +"into one file, which is created incrementally in collaboration with the " +"community." msgstr "" -#: ../../source/how-to-run-simulations.rst:158 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:49 msgid "" -"When `using a GPU with TensorFlow " -"`_ nearly your entire GPU memory of" -" all your GPUs visible to the process will be mapped. This is done by " -"TensorFlow for optimization purposes. However, in settings such as FL " -"simulations where we want to split the GPU into multiple `virtual` " -"clients, this is not a desirable mechanism. Luckily we can disable this " -"default behavior by `enabling memory growth " -"`_." +"For far-fetching changes or features proposed to Flower, an abstraction " +"beyond a single GitHub issue or pull request is required to understand " +"and communicate upcoming changes to the project." msgstr "" -#: ../../source/how-to-run-simulations.rst:160 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:51 msgid "" -"This would need to be done in the main process (which is where the server" -" would run) and in each Actor created by the VCE. By means of " -":code:`actor_kwargs` we can pass the reserved key `\"on_actor_init_fn\"` " -"in order to specify a function to be executed upon actor initialization. " -"In this case, to enable GPU growth for TF workloads. It would look as " -"follows:" +"The purpose of this process is to reduce the amount of \"tribal " +"knowledge\" in our community. By moving decisions from Slack threads, " +"video calls, and hallway conversations into a well-tracked artifact, this" +" process aims to enhance communication and discoverability." msgstr "" -#: ../../source/how-to-run-simulations.rst:179 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:55 msgid "" -"This is precisely the mechanism used in `Tensorflow/Keras Simulation " -"`_ example." +"Roughly any larger, user-facing enhancement should follow the Enhancement" +" process. If an enhancement would be described in either written or " +"verbal communication to anyone besides the author or developer, then " +"consider creating an Enhancement Doc." msgstr "" -#: ../../source/how-to-run-simulations.rst:183 -msgid "Multi-node setups" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:57 +msgid "" +"Similarly, any technical effort (refactoring, major architectural change)" +" that will impact a large section of the development community should " +"also be communicated widely. The Enhancement process is suited for this " +"even if it will have zero impact on the typical user or operator." msgstr "" -#: ../../source/how-to-run-simulations.rst:185 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:61 msgid "" -"The VCE does not currently offer a way to control on which node a " -"particular `virtual` client is executed. In other words, if more than a " -"single node have the resources needed by a client to run, then any of " -"those nodes could get the client workload scheduled onto. Later in the FL" -" process (i.e. in a different round) the same client could be executed by" -" a different node. Depending on how your clients access their datasets, " -"this might require either having a copy of all dataset partitions on all " -"nodes or a dataset serving mechanism (e.g. using nfs, a database) to " -"circumvent data duplication." +"For small changes and additions, going through the Enhancement process " +"would be time-consuming and unnecessary. This includes, for example, " +"adding new Federated Learning algorithms, as these only add features " +"without changing how Flower works or is used." msgstr "" -#: ../../source/how-to-run-simulations.rst:187 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:63 msgid "" -"By definition virtual clients are `stateless` due to their ephemeral " -"nature. A client state can be implemented as part of the Flower client " -"class but users need to ensure this saved to persistent storage (e.g. a " -"database, disk) and that can be retrieve later by the same client " -"regardless on which node it is running from. This is related to the point" -" above also since, in some way, the client's dataset could be seen as a " -"type of `state`." +"Enhancements are different from feature requests, as they are already " +"providing a laid-out path for implementation and are championed by " +"members of the community." msgstr "" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:2 -msgid "Save and load model checkpoints" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:67 +msgid "" +"An Enhancement is captured in a Markdown file that follows a defined " +"template and a workflow to review and store enhancement docs for " +"reference — the Enhancement Doc." msgstr "" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:4 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:69 +msgid "Enhancement Doc Template" +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:71 msgid "" -"Flower does not automatically save model updates on the server-side. This" -" how-to guide describes the steps to save (and load) model checkpoints in" -" Flower." +"Each enhancement doc is provided as a Markdown file having the following " +"structure" msgstr "" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:8 -msgid "Model checkpointing" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:73 +msgid "Metadata (as [described below](#metadata) in form of a YAML preamble)" msgstr "" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:10 -msgid "" -"Model updates can be persisted on the server-side by customizing " -":code:`Strategy` methods. Implementing custom strategies is always an " -"option, but for many cases it may be more convenient to simply customize " -"an existing strategy. The following code example defines a new " -":code:`SaveModelStrategy` which customized the existing built-in " -":code:`FedAvg` strategy. In particular, it customizes " -":code:`aggregate_fit` by calling :code:`aggregate_fit` in the base class " -"(:code:`FedAvg`). It then continues to save returned (aggregated) weights" -" before it returns those aggregated weights to the caller (i.e., the " -"server):" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:74 +msgid "Title (same as in metadata)" msgstr "" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:47 -msgid "Save and load PyTorch checkpoints" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:75 +msgid "Table of Contents (if needed)" msgstr "" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:49 -msgid "" -"Similar to the previous example but with a few extra steps, we'll show " -"how to store a PyTorch checkpoint we'll use the ``torch.save`` function. " -"Firstly, ``aggregate_fit`` returns a ``Parameters`` object that has to be" -" transformed into a list of NumPy ``ndarray``'s, then those are " -"transformed into the PyTorch ``state_dict`` following the ``OrderedDict``" -" class structure." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:81 +msgid "Notes/Constraints/Caveats (optional)" msgstr "" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:85 -msgid "" -"To load your progress, you simply append the following lines to your " -"code. Note that this will iterate over all saved checkpoints and load the" -" latest one:" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:82 +msgid "Design Details (optional)" msgstr "" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:97 -msgid "" -"Return/use this object of type ``Parameters`` wherever necessary, such as" -" in the ``initial_parameters`` when defining a ``Strategy``." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:83 +msgid "Graduation Criteria" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:2 -msgid "Upgrade to Flower 1.0" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:84 +msgid "Upgrade/Downgrade Strategy (if applicable)" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:4 -msgid "" -"Flower 1.0 is here. Along with new features, Flower 1.0 provides a stable" -" foundation for future growth. Compared to Flower 0.19 (and other 0.x " -"series releases), there are a few breaking changes that make it necessary" -" to change the code of existing 0.x-series projects." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:88 +msgid "As a reference, this document follows the above structure." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:8 -#: ../../source/how-to-upgrade-to-flower-next.rst:43 -msgid "Install update" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:90 +#: ../../source/ref-api/flwr.common.Metadata.rst:2 +msgid "Metadata" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:10 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:92 msgid "" -"Here's how to update an existing installation to Flower 1.0 using either " -"pip or Poetry:" +"**fed-number** (Required) The `fed-number` of the last Flower Enhancement" +" Doc + 1. With this number, it becomes easy to reference other proposals." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:12 -msgid "pip: add ``-U`` when installing." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:94 +msgid "**title** (Required) The title of the proposal in plain language." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:14 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:96 msgid "" -"``python -m pip install -U flwr`` (when using ``start_server`` and " -"``start_client``)" +"**status** (Required) The current status of the proposal. See " +"[workflow](#workflow) for the possible states." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:15 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:98 msgid "" -"``python -m pip install -U flwr[simulation]`` (when using " -"``start_simulation``)" +"**authors** (Required) A list of authors of the proposal. This is simply " +"the GitHub ID." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:17 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:100 msgid "" -"Poetry: update the ``flwr`` dependency in ``pyproject.toml`` and then " -"reinstall (don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` " -"before running ``poetry install``)." +"**creation-date** (Required) The date that the proposal was first " +"submitted in a PR." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:19 -msgid "``flwr = \"^1.0.0\"`` (when using ``start_server`` and ``start_client``)" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:102 +msgid "" +"**last-updated** (Optional) The date that the proposal was last changed " +"significantly." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:20 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:104 msgid "" -"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` (when " -"using ``start_simulation``)" +"**see-also** (Optional) A list of other proposals that are relevant to " +"this one." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:24 -#: ../../source/how-to-upgrade-to-flower-next.rst:100 -msgid "Required changes" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:106 +msgid "**replaces** (Optional) A list of proposals that this one replaces." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:26 -msgid "The following breaking changes require manual updates." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:108 +msgid "**superseded-by** (Optional) A list of proposals that this one supersedes." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:29 -msgid "General" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:111 +msgid "Workflow" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:31 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:113 msgid "" -"Pass all arguments as keyword arguments (not as positional arguments). " -"Here's an example:" +"The idea forming the enhancement should already have been discussed or " +"pitched in the community. As such, it needs a champion, usually the " +"author, who shepherds the enhancement. This person also has to find " +"committers to Flower willing to review the proposal." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:33 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:115 msgid "" -"Flower 0.19 (positional arguments): ``start_client(\"127.0.0.1:8080\", " -"FlowerClient())``" +"New enhancements are checked in with a file name in the form of `NNNN-" +"YYYYMMDD-enhancement-title.md`, with `NNNN` being the Flower Enhancement " +"Doc number, to `enhancements`. All enhancements start in `provisional` " +"state as part of a pull request. Discussions are done as part of the pull" +" request review." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:34 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:117 msgid "" -"Flower 1.0 (keyword arguments): " -"``start_client(server_address=\"127.0.0.1:8080\", " -"client=FlowerClient())``" +"Once an enhancement has been reviewed and approved, its status is changed" +" to `implementable`. The actual implementation is then done in separate " +"pull requests. These pull requests should mention the respective " +"enhancement as part of their description. After the implementation is " +"done, the proposal status is changed to `implemented`." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:37 -#: ../../source/ref-api/flwr.client.Client.rst:2 -msgid "Client" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:119 +msgid "" +"Under certain conditions, other states are possible. An Enhancement has " +"the following states:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:39 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:121 msgid "" -"Subclasses of ``NumPyClient``: change ``def get_parameters(self):``` to " -"``def get_parameters(self, config):``" +"`provisional`: The enhancement has been proposed and is actively being " +"defined. This is the starting state while the proposal is being fleshed " +"out and actively defined and discussed." +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:122 +msgid "`implementable`: The enhancement has been reviewed and approved." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:40 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:123 msgid "" -"Subclasses of ``Client``: change ``def get_parameters(self):``` to ``def " -"get_parameters(self, ins: GetParametersIns):``" +"`implemented`: The enhancement has been implemented and is no longer " +"actively changed." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:43 -msgid "Strategies / ``start_server`` / ``start_simulation``" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:124 +msgid "`deferred`: The enhancement is proposed but not actively being worked on." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:45 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:125 msgid "" -"Pass ``ServerConfig`` (instead of a dictionary) to ``start_server`` and " -"``start_simulation``. Here's an example:" +"`rejected`: The authors and reviewers have decided that this enhancement " +"is not moving forward." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:47 -msgid "" -"Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " -"\"round_timeout\": 600.0}, ...)``" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:126 +msgid "`withdrawn`: The authors have withdrawn the enhancement." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:48 -msgid "" -"Flower 1.0: ``start_server(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:127 +msgid "`replaced`: The enhancement has been replaced by a new enhancement." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:50 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:131 msgid "" -"Replace ``num_rounds=1`` in ``start_simulation`` with the new " -"``config=ServerConfig(...)`` (see previous item)" +"Adding an additional process to the ones already provided by GitHub " +"(Issues and Pull Requests) adds more complexity and can be a barrier for " +"potential first-time contributors." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:51 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:133 msgid "" -"Remove ``force_final_distributed_eval`` parameter from calls to " -"``start_server``. Distributed evaluation on all clients can be enabled by" -" configuring the strategy to sample all clients for evaluation after the " -"last round of training." +"Expanding the proposal template beyond the single-sentence description " +"currently required in the features issue template may be a heavy burden " +"for non-native English speakers." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:52 -msgid "Rename parameter/ndarray conversion functions:" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:137 +msgid "GitHub Issues" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:54 -msgid "``parameters_to_weights`` --> ``parameters_to_ndarrays``" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:139 +msgid "" +"Using GitHub Issues for these kinds of enhancements is doable. One could " +"use, for example, tags, to differentiate and filter them from other " +"issues. The main issue is in discussing and reviewing an enhancement: " +"GitHub issues only have a single thread for comments. Enhancements " +"usually have multiple threads of discussion at the same time for various " +"parts of the doc. Managing these multiple discussions can be confusing " +"when using GitHub Issues." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:55 -msgid "``weights_to_parameters`` --> ``ndarrays_to_parameters``" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:141 +msgid "Google Docs" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:57 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:143 msgid "" -"Strategy initialization: if the strategy relies on the default values for" -" ``fraction_fit`` and ``fraction_evaluate``, set ``fraction_fit`` and " -"``fraction_evaluate`` manually to ``0.1``. Projects that do not manually " -"create a strategy (by calling ``start_server`` or ``start_simulation`` " -"without passing a strategy instance) should now manually initialize " -"FedAvg with ``fraction_fit`` and ``fraction_evaluate`` set to ``0.1``." -msgstr "" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:58 -msgid "Rename built-in strategy parameters (e.g., ``FedAvg``):" -msgstr "" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:60 -msgid "``fraction_eval`` --> ``fraction_evaluate``" +"Google Docs allow for multiple threads of discussions. But as Google Docs" +" are hosted outside the project, their discoverability by the community " +"needs to be taken care of. A list of links to all proposals has to be " +"managed and made available for the community. Compared to shipping " +"proposals as part of Flower's repository, the potential for missing links" +" is much higher." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:61 -msgid "``min_eval_clients`` --> ``min_evaluate_clients``" +#: ../../source/fed/index.md:1 +msgid "FED - Flower Enhancement Doc" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:62 -msgid "``eval_fn`` --> ``evaluate_fn``" +#: ../../source/how-to-aggregate-evaluation-results.rst:2 +msgid "Aggregate evaluation results" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:64 +#: ../../source/how-to-aggregate-evaluation-results.rst:4 msgid "" -"Rename ``rnd`` to ``server_round``. This impacts multiple methods and " -"functions, for example, ``configure_fit``, ``aggregate_fit``, " -"``configure_evaluate``, ``aggregate_evaluate``, and ``evaluate_fn``." +"The Flower server does not prescribe a way to aggregate evaluation " +"results, but it enables the user to fully customize result aggregation." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:65 -msgid "Add ``server_round`` and ``config`` to ``evaluate_fn``:" +#: ../../source/how-to-aggregate-evaluation-results.rst:8 +msgid "Aggregate Custom Evaluation Results" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:67 +#: ../../source/how-to-aggregate-evaluation-results.rst:10 msgid "" -"Flower 0.19: ``def evaluate(parameters: NDArrays) -> " -"Optional[Tuple[float, Dict[str, Scalar]]]:``" +"The same ``Strategy``-customization approach can be used to aggregate " +"custom evaluation results coming from individual clients. Clients can " +"return custom metrics to the server by returning a dictionary:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:68 +#: ../../source/how-to-aggregate-evaluation-results.rst:39 msgid "" -"Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, " -"config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " -"Scalar]]]:``" +"The server can then use a customized strategy to aggregate the metrics " +"provided in these dictionaries:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:71 -msgid "Custom strategies" +#: ../../source/how-to-authenticate-supernodes.rst:2 +msgid "Authenticate SuperNodes" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:73 +#: ../../source/how-to-authenticate-supernodes.rst:4 msgid "" -"The type of parameter ``failures`` has changed from " -"``List[BaseException]`` to ``List[Union[Tuple[ClientProxy, FitRes], " -"BaseException]]`` (in ``aggregate_fit``) and " -"``List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]]`` (in " -"``aggregate_evaluate``)" +"Flower has built-in support for authenticated SuperNodes that you can use" +" to verify the identities of each SuperNode connecting to a SuperLink. " +"Flower node authentication works similar to how GitHub SSH authentication" +" works:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:74 -msgid "" -"The ``Strategy`` method ``evaluate`` now receives the current round of " -"federated learning/evaluation as the first parameter:" +#: ../../source/how-to-authenticate-supernodes.rst:8 +msgid "SuperLink (server) stores a list of known (client) node public keys" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:76 +#: ../../source/how-to-authenticate-supernodes.rst:9 msgid "" -"Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " -"Optional[Tuple[float, Dict[str, Scalar]]]:``" +"Using ECDH, both SuperNode and SuperLink independently derive a shared " +"secret" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:77 +#: ../../source/how-to-authenticate-supernodes.rst:10 msgid "" -"Flower 1.0: ``def evaluate(self, server_round: int, parameters: " -"Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" +"Shared secret is used to compute the HMAC value of the message sent from " +"SuperNode to SuperLink as a token" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:80 -msgid "Optional improvements" +#: ../../source/how-to-authenticate-supernodes.rst:12 +msgid "SuperLink verifies the token" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:82 +#: ../../source/how-to-authenticate-supernodes.rst:14 msgid "" -"Along with the necessary changes above, there are a number of potential " -"improvements that just became possible:" +"We recommend you to check out the complete `code example " +"`_ demonstrating federated learning with Flower in an " +"authenticated setting." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:84 +#: ../../source/how-to-authenticate-supernodes.rst:20 msgid "" -"Remove \"placeholder\" methods from subclasses of ``Client`` or " -"``NumPyClient``. If you, for example, use server-side evaluation, then " -"empty placeholder implementations of ``evaluate`` are no longer " -"necessary." +"This guide covers a preview feature that might change in future versions " +"of Flower." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:85 +#: ../../source/how-to-authenticate-supernodes.rst:24 msgid "" -"Configure the round timeout via ``start_simulation``: " -"``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, " -"round_timeout=600.0), ...)``" +"For increased security, node authentication can only be used when " +"encrypted connections (SSL/TLS) are enabled." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:89 -#: ../../source/how-to-upgrade-to-flower-next.rst:317 -msgid "Further help" +#: ../../source/how-to-authenticate-supernodes.rst:28 +msgid "Enable node authentication in ``SuperLink``" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:91 +#: ../../source/how-to-authenticate-supernodes.rst:30 msgid "" -"Most official `Flower code examples " -"`_ are already updated" -" to Flower 1.0, they can serve as a reference for using the Flower 1.0 " -"API. If there are further questions, `join the Flower Slack " -"`_ and use the channel ``#questions``." +"To enable node authentication, first you need to configure SSL/TLS " +"connections to secure the SuperLink<>SuperNode communication. You can " +"find the complete guide `here `_. After configuring secure connections, you" +" can enable client authentication in a long-running Flower ``SuperLink``." +" Use the following terminal command to start a Flower ``SuperNode`` that " +"has both secure connections and node authentication enabled:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:2 -msgid "Upgrade to Flower Next" +#: ../../source/how-to-authenticate-supernodes.rst:47 +msgid "Let's break down the authentication flags:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:4 +#: ../../source/how-to-authenticate-supernodes.rst:49 msgid "" -"Welcome to the migration guide for updating Flower to Flower Next! " -"Whether you're a seasoned user or just getting started, this guide will " -"help you smoothly transition your existing setup to take advantage of the" -" latest features and improvements in Flower Next, starting from version " -"1.8." +"The first flag ``--auth-list-public-keys`` expects a path to a CSV file " +"storing all known node public keys. You need to store all known node " +"public keys that are allowed to participate in a federation in one CSV " +"file (``.csv``)." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:9 +#: ../../source/how-to-authenticate-supernodes.rst:53 msgid "" -"This guide shows how to reuse pre-``1.8`` Flower code with minimum code " -"changes by using the *compatibility layer* in Flower Next. In another " -"guide, we will show how to run Flower Next end-to-end with pure Flower " -"Next APIs." +"A valid CSV file storing known node public keys should list the keys in " +"OpenSSH format, separated by commas and without any comments. For an " +"example, refer to our code sample, which contains a CSV file with two " +"known node public keys." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:13 -msgid "Let's dive in!" +#: ../../source/how-to-authenticate-supernodes.rst:57 +msgid "" +"The second and third flags ``--auth-superlink-private-key`` and ``--auth-" +"superlink-public-key`` expect paths to the server's private and public " +"keys. For development purposes, you can generate a private and public key" +" pair using ``ssh-keygen -t ecdsa -b 384``." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:48 +#: ../../source/how-to-authenticate-supernodes.rst:64 msgid "" -"Here's how to update an existing installation of Flower to Flower Next " -"with ``pip``:" +"In Flower 1.9, there is no support for dynamically removing, editing, or " +"adding known node public keys to the SuperLink. To change the set of " +"known nodes, you need to shut the server down, edit the CSV file, and " +"start the server again. Support for dynamically changing the set of known" +" nodes is on the roadmap to be released in Flower 1.10 (ETA: June)." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:54 -msgid "or if you need Flower Next with simulation:" +#: ../../source/how-to-authenticate-supernodes.rst:71 +msgid "Enable node authentication in ``SuperNode``" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:61 +#: ../../source/how-to-authenticate-supernodes.rst:73 msgid "" -"Ensure you set the following version constraint in your " -"``requirements.txt``" +"Similar to the long-running Flower server (``SuperLink``), you can easily" +" enable node authentication in the long-running Flower client " +"(``SuperNode``). Use the following terminal command to start an " +"authenticated ``SuperNode``:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:71 -msgid "or ``pyproject.toml``:" +#: ../../source/how-to-authenticate-supernodes.rst:85 +msgid "" +"The ``--auth-supernode-private-key`` flag expects a path to the node's " +"private key file and the ``--auth-supernode-public-key`` flag expects a " +"path to the node's public key file. For development purposes, you can " +"generate a private and public key pair using ``ssh-keygen -t ecdsa -b " +"384``." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:82 -msgid "Using Poetry" +#: ../../source/how-to-authenticate-supernodes.rst:91 +msgid "Security notice" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:84 +#: ../../source/how-to-authenticate-supernodes.rst:93 msgid "" -"Update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall " -"(don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` before " -"running ``poetry install``)." +"The system's security relies on the credentials of the SuperLink and each" +" SuperNode. Therefore, it is imperative to safeguard and safely store the" +" credentials to avoid security risks such as Public Key Infrastructure " +"(PKI) impersonation attacks. The node authentication mechanism also " +"involves human interaction, so please ensure that all of the " +"communication is done in a secure manner, using trusted communication " +"methods." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:86 -msgid "" -"Ensure you set the following version constraint in your " -"``pyproject.toml``:" +#: ../../source/how-to-authenticate-supernodes.rst:100 +#: ../../source/how-to-enable-ssl-connections.rst:71 +#: ../../source/how-to-use-built-in-mods.rst:95 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:287 +msgid "Conclusion" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:102 +#: ../../source/how-to-authenticate-supernodes.rst:102 msgid "" -"In Flower Next, the *infrastructure* and *application layers* have been " -"decoupled. Instead of starting a client in code via ``start_client()``, " -"you create a |clientapp_link|_ and start it via the command line. Instead" -" of starting a server in code via ``start_server()``, you create a " -"|serverapp_link|_ and start it via the command line. The long-running " -"components of server and client are called SuperLink and SuperNode. The " -"following non-breaking changes that require manual updates and allow you " -"to run your project both in the traditional way and in the Flower Next " -"way:" +"You should now have learned how to start a long-running Flower server " +"(``SuperLink``) and client (``SuperNode``) with node authentication " +"enabled. You should also know the significance of the private key and " +"store it safely to minimize security risks." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:109 -msgid "|clientapp_link|_" +#: ../../source/how-to-configure-clients.rst:2 +msgid "Configure clients" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:110 +#: ../../source/how-to-configure-clients.rst:4 msgid "" -"Wrap your existing client with |clientapp_link|_ instead of launching it " -"via |startclient_link|_. Here's an example:" +"Along with model parameters, Flower can send configuration values to " +"clients. Configuration values can be used for various purposes. They are," +" for example, a popular way to control client-side hyperparameters from " +"the server." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:132 -msgid "|serverapp_link|_" +#: ../../source/how-to-configure-clients.rst:9 +msgid "Configuration values" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:133 +#: ../../source/how-to-configure-clients.rst:11 msgid "" -"Wrap your existing strategy with |serverapp_link|_ instead of starting " -"the server via |startserver_link|_. Here's an example:" -msgstr "" - -#: ../../source/how-to-upgrade-to-flower-next.rst:154 -msgid "Deployment" +"Configuration values are represented as a dictionary with ``str`` keys " +"and values of type ``bool``, ``bytes``, ``double`` (64-bit precision " +"float), ``int``, or ``str`` (or equivalent types in different languages)." +" Here is an example of a configuration dictionary in Python:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:155 +#: ../../source/how-to-configure-clients.rst:25 msgid "" -"Run the ``SuperLink`` using |flowernext_superlink_link|_ before running, " -"in sequence, |flowernext_clientapp_link|_ (2x) and " -"|flowernext_serverapp_link|_. There is no need to execute `client.py` and" -" `server.py` as Python scripts." +"Flower serializes these configuration dictionaries (or *config dict* for " +"short) to their ProtoBuf representation, transports them to the client " +"using gRPC, and then deserializes them back to Python dictionaries." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:158 +#: ../../source/how-to-configure-clients.rst:31 msgid "" -"Here's an example to start the server without HTTPS (only for " -"prototyping):" +"Currently, there is no support for directly sending collection types " +"(e.g., ``Set``, ``List``, ``Map``) as values in configuration " +"dictionaries. There are several workarounds to send collections as values" +" by converting them to one of the supported value types (and converting " +"them back on the client-side)." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:174 +#: ../../source/how-to-configure-clients.rst:36 msgid "" -"Here's another example to start with HTTPS. Use the ``--certificates`` " -"command line argument to pass paths to (CA certificate, server " -"certificate, and server private key)." +"One can, for example, convert a list of floating-point numbers to a JSON " +"string, then send the JSON string using the configuration dictionary, and" +" then convert the JSON string back to a list of floating-point numbers on" +" the client." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:201 -msgid "Simulation in CLI" +#: ../../source/how-to-configure-clients.rst:41 +msgid "Configuration through built-in strategies" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:202 +#: ../../source/how-to-configure-clients.rst:43 msgid "" -"Wrap your existing client and strategy with |clientapp_link|_ and " -"|serverapp_link|_, respectively. There is no need to use |startsim_link|_" -" anymore. Here's an example:" +"The easiest way to send configuration values to clients is to use a " +"built-in strategy like ``FedAvg``. Built-in strategies support so-called " +"configuration functions. A configuration function is a function that the " +"built-in strategy calls to get the configuration dictionary for the " +"current round. It then forwards the configuration dictionary to all the " +"clients selected during that round." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:232 +#: ../../source/how-to-configure-clients.rst:49 msgid "" -"Run |flower_simulation_link|_ in CLI and point to the ``server_app`` / " -"``client_app`` object in the code instead of executing the Python script." -" Here's an example (assuming the ``server_app`` and ``client_app`` " -"objects are in a ``sim.py`` module):" +"Let's start with a simple example. Imagine we want to send (a) the batch " +"size that the client should use, (b) the current global round of " +"federated learning, and (c) the number of epochs to train on the client-" +"side. Our configuration function could look like this:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:249 +#: ../../source/how-to-configure-clients.rst:65 msgid "" -"Set default resources for each |clientapp_link|_ using the ``--backend-" -"config`` command line argument instead of setting the " -"``client_resources`` argument in |startsim_link|_. Here's an example:" +"To make the built-in strategies use this function, we can pass it to " +"``FedAvg`` during initialization using the parameter " +"``on_fit_config_fn``:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:275 -msgid "Simulation in a Notebook" +#: ../../source/how-to-configure-clients.rst:75 +msgid "One the client side, we receive the configuration dictionary in ``fit``:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:276 +#: ../../source/how-to-configure-clients.rst:86 msgid "" -"Run |runsim_link|_ in your notebook instead of |startsim_link|_. Here's " -"an example:" +"There is also an `on_evaluate_config_fn` to configure evaluation, which " +"works the same way. They are separate functions because one might want to" +" send different configuration values to `evaluate` (for example, to use a" +" different batch size)." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:319 +#: ../../source/how-to-configure-clients.rst:90 msgid "" -"Some official `Flower code examples `_ " -"are already updated to Flower Next so they can serve as a reference for " -"using the Flower Next API. If there are further questions, `join the " -"Flower Slack `_ and use the channel " -"``#questions``. You can also `participate in Flower Discuss " -"`_ where you can find us answering questions," -" or share and learn from others about migrating to Flower Next." +"The built-in strategies call this function every round (that is, every " +"time `Strategy.configure_fit` or `Strategy.configure_evaluate` runs). " +"Calling `on_evaluate_config_fn` every round allows us to vary/change the " +"config dict over consecutive rounds. If we wanted to implement a " +"hyperparameter schedule, for example, to increase the number of local " +"epochs during later rounds, we could do the following:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:325 -msgid "Important" +#: ../../source/how-to-configure-clients.rst:107 +msgid "The ``FedAvg`` strategy will call this function *every round*." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:328 -msgid "" -"As we continuously enhance Flower Next at a rapid pace, we'll be " -"periodically updating this guide. Please feel free to share any feedback " -"with us!" +#: ../../source/how-to-configure-clients.rst:110 +msgid "Configuring individual clients" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:334 -msgid "Happy migrating! 🚀" -msgstr "" - -#: ../../source/how-to-use-built-in-mods.rst:2 -msgid "Use Built-in Mods" -msgstr "" - -#: ../../source/how-to-use-built-in-mods.rst:4 +#: ../../source/how-to-configure-clients.rst:112 msgid "" -"**Note: This tutorial covers experimental features. The functionality and" -" interfaces may change in future versions.**" +"In some cases, it is necessary to send different configuration values to " +"different clients." msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:6 +#: ../../source/how-to-configure-clients.rst:115 msgid "" -"In this tutorial, we will learn how to utilize built-in mods to augment " -"the behavior of a ``ClientApp``. Mods (sometimes also called Modifiers) " -"allow us to perform operations before and after a task is processed in " -"the ``ClientApp``." +"This can be achieved by customizing an existing strategy or by " +":doc:`implementing a custom strategy from scratch `. Here's a nonsensical example that customizes ``FedAvg`` by " +"adding a custom ``\"hello\": \"world\"`` configuration key/value pair to " +"the config dict of a *single client* (only the first client in the list, " +"the other clients in this round to not receive this \"special\" config " +"value):" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:9 -msgid "What are Mods?" +#: ../../source/how-to-configure-logging.rst:2 +msgid "Configure logging" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:11 +#: ../../source/how-to-configure-logging.rst:4 msgid "" -"A Mod is a callable that wraps around a ``ClientApp``. It can manipulate " -"or inspect the incoming ``Message`` and the resulting outgoing " -"``Message``. The signature for a ``Mod`` is as follows:" +"The Flower logger keeps track of all core events that take place in " +"federated learning workloads. It presents information by default " +"following a standard message format:" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:18 -msgid "A typical mod function might look something like this:" +#: ../../source/how-to-configure-logging.rst:13 +msgid "" +"containing relevant information including: log message level (e.g. " +"``INFO``, ``DEBUG``), a timestamp, the line where the logging took place " +"from, as well as the log message itself. In this way, the logger would " +"typically display information on your terminal as follows:" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:31 -msgid "Using Mods" +#: ../../source/how-to-configure-logging.rst:35 +msgid "Saving log to file" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:33 -msgid "To use mods in your ``ClientApp``, you can follow these steps:" +#: ../../source/how-to-configure-logging.rst:37 +msgid "" +"By default, the Flower log is outputted to the terminal where you launch " +"your Federated Learning workload from. This applies for both gRPC-based " +"federation (i.e. when you do ``fl.server.start_server``) and when using " +"the ``VirtualClientEngine`` (i.e. when you do " +"``fl.simulation.start_simulation``). In some situations you might want to" +" save this log to disk. You can do so by calling the " +"`fl.common.logger.configure() " +"`_" +" function. For example:" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:36 -msgid "1. Import the required mods" +#: ../../source/how-to-configure-logging.rst:59 +msgid "" +"With the above, Flower will record the log you see on your terminal to " +"``log.txt``. This file will be created in the same directory as were you " +"are running the code from. If we inspect we see the log above is also " +"recorded but prefixing with ``identifier`` each line:" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:38 -msgid "First, import the built-in mod you intend to use:" +#: ../../source/how-to-configure-logging.rst:81 +msgid "Log your own messages" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:46 -msgid "2. Define your client function" +#: ../../source/how-to-configure-logging.rst:83 +msgid "" +"You might expand the information shown by default with the Flower logger " +"by adding more messages relevant to your application. You can achieve " +"this easily as follows." msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:48 +#: ../../source/how-to-configure-logging.rst:114 msgid "" -"Define your client function (``client_fn``) that will be wrapped by the " -"mod(s):" +"In this way your logger will show, in addition to the default messages, " +"the ones introduced by the clients as specified above." msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:57 -msgid "3. Create the ``ClientApp`` with mods" +#: ../../source/how-to-configure-logging.rst:140 +msgid "Log to a remote service" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:59 +#: ../../source/how-to-configure-logging.rst:142 msgid "" -"Create your ``ClientApp`` and pass the mods as a list to the ``mods`` " -"argument. The order in which you provide the mods matters:" +"The ``fl.common.logger.configure`` function, also allows specifying a " +"host to which logs can be pushed (via ``POST``) through a native Python " +"``logging.handler.HTTPHandler``. This is a particularly useful feature in" +" ``gRPC``-based Federated Learning workloads where otherwise gathering " +"logs from all entities (i.e. the server and the clients) might be " +"cumbersome. Note that in Flower simulation, the server automatically " +"displays all logs. You can still specify a ``HTTPHandler`` should you " +"wish to backup or analyze the logs somewhere else." msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:72 -msgid "Order of execution" +#: ../../source/how-to-enable-ssl-connections.rst:2 +msgid "Enable SSL connections" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:74 +#: ../../source/how-to-enable-ssl-connections.rst:4 msgid "" -"When the ``ClientApp`` runs, the mods are executed in the order they are " -"provided in the list:" -msgstr "" - -#: ../../source/how-to-use-built-in-mods.rst:76 -msgid "``example_mod_1`` (outermost mod)" +"This guide describes how to a SSL-enabled secure Flower server " +"(``SuperLink``) can be started and how a Flower client (``SuperNode``) " +"can establish a secure connections to it." msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:77 -msgid "``example_mod_2`` (next mod)" +#: ../../source/how-to-enable-ssl-connections.rst:8 +msgid "" +"A complete code example demonstrating a secure connection can be found " +"`here `_." msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:78 +#: ../../source/how-to-enable-ssl-connections.rst:11 msgid "" -"Message handler (core function that handles the incoming ``Message`` and " -"returns the outgoing ``Message``)" +"The code example comes with a ``README.md`` file which explains how to " +"start it. Although it is already SSL-enabled, it might be less " +"descriptive on how it does so. Stick to this guide for a deeper " +"introduction to the topic." msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:79 -msgid "``example_mod_2`` (on the way back)" +#: ../../source/how-to-enable-ssl-connections.rst:16 +msgid "Certificates" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:80 -msgid "``example_mod_1`` (outermost mod on the way back)" +#: ../../source/how-to-enable-ssl-connections.rst:18 +msgid "" +"Using SSL-enabled connections requires certificates to be passed to the " +"server and client. For the purpose of this guide we are going to generate" +" self-signed certificates. As this can become quite complex we are going " +"to ask you to run the script in ``examples/advanced-" +"tensorflow/certificates/generate.sh`` with the following command " +"sequence:" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:82 +#: ../../source/how-to-enable-ssl-connections.rst:29 msgid "" -"Each mod has a chance to inspect and modify the incoming ``Message`` " -"before passing it to the next mod, and likewise with the outgoing " -"``Message`` before returning it up the stack." +"This will generate the certificates in ``examples/advanced-" +"tensorflow/.cache/certificates``." msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:87 +#: ../../source/how-to-enable-ssl-connections.rst:32 msgid "" -"By following this guide, you have learned how to effectively use mods to " -"enhance your ``ClientApp``'s functionality. Remember that the order of " -"mods is crucial and affects how the input and output are processed." +"The approach for generating SSL certificates in the context of this " +"example can serve as an inspiration and starting point, but it should not" +" be used as a reference for production environments. Please refer to " +"other sources regarding the issue of correctly generating certificates " +"for production environments. For non-critical prototyping or research " +"projects, it might be sufficient to use the self-signed certificates " +"generated using the scripts mentioned in this guide." msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:89 -msgid "Enjoy building a more robust and flexible ``ClientApp`` with mods!" +#: ../../source/how-to-enable-ssl-connections.rst:40 +msgid "Server (SuperLink)" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:2 -msgid "Use Differential Privacy" +#: ../../source/how-to-enable-ssl-connections.rst:42 +msgid "" +"Use the following terminal command to start a sever (SuperLink) that uses" +" the previously generated certificates:" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:3 +#: ../../source/how-to-enable-ssl-connections.rst:52 msgid "" -"This guide explains how you can utilize differential privacy in the " -"Flower framework. If you are not yet familiar with differential privacy, " -"you can refer to :doc:`explanation-differential-privacy`." +"When providing certificates, the server expects a tuple of three " +"certificates paths: CA certificate, server certificate and server private" +" key." msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:7 -msgid "" -"Differential Privacy in Flower is in a preview phase. If you plan to use " -"these features in a production environment with sensitive data, feel free" -" contact us to discuss your requirements and to receive guidance on how " -"to best use these features." +#: ../../source/how-to-enable-ssl-connections.rst:56 +msgid "Client (SuperNode)" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:12 +#: ../../source/how-to-enable-ssl-connections.rst:58 msgid "" -"This approach consists of two seprate phases: clipping of the updates and" -" adding noise to the aggregated model. For the clipping phase, Flower " -"framework has made it possible to decide whether to perform clipping on " -"the server side or the client side." +"Use the following terminal command to start a client (SuperNode) that " +"uses the previously generated certificates:" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:15 +#: ../../source/how-to-enable-ssl-connections.rst:67 msgid "" -"**Server-side Clipping**: This approach has the advantage of the server " -"enforcing uniform clipping across all clients' updates and reducing the " -"communication overhead for clipping values. However, it also has the " -"disadvantage of increasing the computational load on the server due to " -"the need to perform the clipping operation for all clients." +"When setting ``root_certificates``, the client expects a file path to " +"PEM-encoded root certificates." msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:16 +#: ../../source/how-to-enable-ssl-connections.rst:73 msgid "" -"**Client-side Clipping**: This approach has the advantage of reducing the" -" computational overhead on the server. However, it also has the " -"disadvantage of lacking centralized control, as the server has less " -"control over the clipping process." +"You should now have learned how to generate self-signed certificates " +"using the given script, start an SSL-enabled server and have a client " +"establish a secure connection to it." msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:21 -msgid "Server-side Clipping" +#: ../../source/how-to-enable-ssl-connections.rst:78 +msgid "Additional resources" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:22 +#: ../../source/how-to-enable-ssl-connections.rst:80 msgid "" -"For central DP with server-side clipping, there are two :code:`Strategy` " -"classes that act as wrappers around the actual :code:`Strategy` instance " -"(for example, :code:`FedAvg`). The two wrapper classes are " -":code:`DifferentialPrivacyServerSideFixedClipping` and " -":code:`DifferentialPrivacyServerSideAdaptiveClipping` for fixed and " -"adaptive clipping." +"These additional sources might be relevant if you would like to dive " +"deeper into the topic of certificates:" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:-1 -msgid "server side clipping" +#: ../../source/how-to-enable-ssl-connections.rst:83 +msgid "`Let's Encrypt `_" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:31 -msgid "" -"The code sample below enables the :code:`FedAvg` strategy to use server-" -"side fixed clipping using the " -":code:`DifferentialPrivacyServerSideFixedClipping` wrapper class. The " -"same approach can be used with " -":code:`DifferentialPrivacyServerSideAdaptiveClipping` by adjusting the " -"corresponding input parameters." +#: ../../source/how-to-enable-ssl-connections.rst:84 +msgid "`certbot `_" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:52 -msgid "Client-side Clipping" +#: ../../source/how-to-implement-strategies.rst:2 +msgid "Implement strategies" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:53 +#: ../../source/how-to-implement-strategies.rst:4 msgid "" -"For central DP with client-side clipping, the server sends the clipping " -"value to selected clients on each round. Clients can use existing Flower " -":code:`Mods` to perform the clipping. Two mods are available for fixed " -"and adaptive client-side clipping: :code:`fixedclipping_mod` and " -":code:`adaptiveclipping_mod` with corresponding server-side wrappers " -":code:`DifferentialPrivacyClientSideFixedClipping` and " -":code:`DifferentialPrivacyClientSideAdaptiveClipping`." +"The strategy abstraction enables implementation of fully custom " +"strategies. A strategy is basically the federated learning algorithm that" +" runs on the server. Strategies decide how to sample clients, how to " +"configure clients for training, how to aggregate updates, and how to " +"evaluate models. Flower provides a few built-in strategies which are " +"based on the same API described below." msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:-1 -msgid "client side clipping" +#: ../../source/how-to-implement-strategies.rst:11 +msgid "The ``Strategy`` abstraction" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:63 +#: ../../source/how-to-implement-strategies.rst:13 msgid "" -"The code sample below enables the :code:`FedAvg` strategy to use " -"differential privacy with client-side fixed clipping using both the " -":code:`DifferentialPrivacyClientSideFixedClipping` wrapper class and, on " -"the client, :code:`fixedclipping_mod`:" +"All strategy implementation are derived from the abstract base class " +"``flwr.server.strategy.Strategy``, both built-in implementations and " +"third party implementations. This means that custom strategy " +"implementations have the exact same capabilities at their disposal as " +"built-in ones." msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:80 +#: ../../source/how-to-implement-strategies.rst:18 msgid "" -"In addition to the server-side strategy wrapper, the :code:`ClientApp` " -"needs to configure the matching :code:`fixedclipping_mod` to perform the " -"client-side clipping:" +"The strategy abstraction defines a few abstract methods that need to be " +"implemented:" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:97 +#: ../../source/how-to-implement-strategies.rst:67 msgid "" -"To utilize local differential privacy (DP) and add noise to the client " -"model parameters before transmitting them to the server in Flower, you " -"can use the `LocalDpMod`. The following hyperparameters need to be set: " -"clipping norm value, sensitivity, epsilon, and delta." -msgstr "" - -#: ../../source/how-to-use-differential-privacy.rst:-1 -msgid "local DP mod" +"Creating a new strategy means implementing a new ``class`` (derived from " +"the abstract base class ``Strategy``) that implements for the previously " +"shown abstract methods:" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:104 -msgid "Below is a code example that shows how to use :code:`LocalDpMod`:" +#: ../../source/how-to-implement-strategies.rst:97 +msgid "The Flower server calls these methods in the following order:" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:122 -msgid "" -"Please note that the order of mods, especially those that modify " -"parameters, is important when using multiple modifiers. Typically, " -"differential privacy (DP) modifiers should be the last to operate on " -"parameters." +#: ../../source/how-to-implement-strategies.rst:174 +msgid "The following sections describe each of those methods in more detail." msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:125 -msgid "Local Training using Privacy Engines" +#: ../../source/how-to-implement-strategies.rst:177 +msgid "The ``initialize_parameters`` method" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:126 +#: ../../source/how-to-implement-strategies.rst:179 msgid "" -"For ensuring data instance-level privacy during local model training on " -"the client side, consider leveraging privacy engines such as Opacus and " -"TensorFlow Privacy. For examples of using Flower with these engines, " -"please refer to the Flower examples directory (`Opacus " -"`_, `Tensorflow" -" Privacy `_)." +"``initialize_parameters`` is called only once, at the very beginning of " +"an execution. It is responsible for providing the initial global model " +"parameters in a serialized form (i.e., as a ``Parameters`` object)." msgstr "" -#: ../../source/how-to-use-strategies.rst:2 -msgid "Use strategies" +#: ../../source/how-to-implement-strategies.rst:183 +msgid "" +"Built-in strategies return user-provided initial parameters. The " +"following example shows how initial parameters can be passed to " +"``FedAvg``:" msgstr "" -#: ../../source/how-to-use-strategies.rst:4 +#: ../../source/how-to-implement-strategies.rst:209 msgid "" -"Flower allows full customization of the learning process through the " -":code:`Strategy` abstraction. A number of built-in strategies are " -"provided in the core framework." +"The Flower server will call ``initialize_parameters``, which either " +"returns the parameters that were passed to ``initial_parameters``, or " +"``None``. If no parameters are returned from ``initialize_parameters`` " +"(i.e., ``None``), the server will randomly select one client and ask it " +"to provide its parameters. This is a convenience feature and not " +"recommended in practice, but it can be useful for prototyping. In " +"practice, it is recommended to always use server-side parameter " +"initialization." msgstr "" -#: ../../source/how-to-use-strategies.rst:6 +#: ../../source/how-to-implement-strategies.rst:218 msgid "" -"There are three ways to customize the way Flower orchestrates the " -"learning process on the server side:" +"Server-side parameter initialization is a powerful mechanism. It can be " +"used, for example, to resume training from a previously saved checkpoint." +" It is also the fundamental capability needed to implement hybrid " +"approaches, for example, to fine-tune a pre-trained model using federated" +" learning." msgstr "" -#: ../../source/how-to-use-strategies.rst:8 -msgid "Use an existing strategy, for example, :code:`FedAvg`" +#: ../../source/how-to-implement-strategies.rst:224 +msgid "The ``configure_fit`` method" msgstr "" -#: ../../source/how-to-use-strategies.rst:9 -#: ../../source/how-to-use-strategies.rst:40 -msgid "Customize an existing strategy with callback functions" +#: ../../source/how-to-implement-strategies.rst:226 +msgid "" +"``configure_fit`` is responsible for configuring the upcoming round of " +"training. What does *configure* mean in this context? Configuring a round" +" means selecting clients and deciding what instructions to send to these " +"clients. The signature of ``configure_fit`` makes this clear:" msgstr "" -#: ../../source/how-to-use-strategies.rst:10 -#: ../../source/how-to-use-strategies.rst:87 -msgid "Implement a novel strategy" +#: ../../source/how-to-implement-strategies.rst:239 +msgid "" +"The return value is a list of tuples, each representing the instructions " +"that will be sent to a particular client. Strategy implementations " +"usually perform the following steps in ``configure_fit``:" msgstr "" -#: ../../source/how-to-use-strategies.rst:14 -msgid "Use an existing strategy" +#: ../../source/how-to-implement-strategies.rst:243 +#: ../../source/how-to-implement-strategies.rst:307 +msgid "" +"Use the ``client_manager`` to randomly sample all (or a subset of) " +"available clients (each represented as a ``ClientProxy`` object)" msgstr "" -#: ../../source/how-to-use-strategies.rst:16 +#: ../../source/how-to-implement-strategies.rst:245 msgid "" -"Flower comes with a number of popular federated learning strategies " -"built-in. A built-in strategy can be instantiated as follows:" +"Pair each ``ClientProxy`` with the same ``FitIns`` holding the current " +"global model ``parameters`` and ``config`` dict" msgstr "" -#: ../../source/how-to-use-strategies.rst:25 +#: ../../source/how-to-implement-strategies.rst:248 msgid "" -"This creates a strategy with all parameters left at their default values " -"and passes it to the :code:`start_server` function. It is usually " -"recommended to adjust a few parameters during instantiation:" +"More sophisticated implementations can use ``configure_fit`` to implement" +" custom client selection logic. A client will only participate in a round" +" if the corresponding ``ClientProxy`` is included in the list returned " +"from ``configure_fit``." msgstr "" -#: ../../source/how-to-use-strategies.rst:42 +#: ../../source/how-to-implement-strategies.rst:254 msgid "" -"Existing strategies provide several ways to customize their behaviour. " -"Callback functions allow strategies to call user-provided code during " -"execution." +"The structure of this return value provides a lot of flexibility to the " +"user. Since instructions are defined on a per-client basis, different " +"instructions can be sent to each client. This enables custom strategies " +"to train, for example, different models on different clients, or use " +"different hyperparameters on different clients (via the ``config`` dict)." msgstr "" -#: ../../source/how-to-use-strategies.rst:45 -msgid "Configuring client fit and client evaluate" +#: ../../source/how-to-implement-strategies.rst:261 +msgid "The ``aggregate_fit`` method" msgstr "" -#: ../../source/how-to-use-strategies.rst:47 +#: ../../source/how-to-implement-strategies.rst:263 msgid "" -"The server can pass new configuration values to the client each round by " -"providing a function to :code:`on_fit_config_fn`. The provided function " -"will be called by the strategy and must return a dictionary of " -"configuration key values pairs that will be sent to the client. It must " -"return a dictionary of arbitrary configuration values :code:`client.fit`" -" and :code:`client.evaluate` functions during each round of federated " -"learning." +"``aggregate_fit`` is responsible for aggregating the results returned by " +"the clients that were selected and asked to train in ``configure_fit``." msgstr "" -#: ../../source/how-to-use-strategies.rst:75 +#: ../../source/how-to-implement-strategies.rst:277 msgid "" -"The :code:`on_fit_config_fn` can be used to pass arbitrary configuration " -"values from server to client, and poetentially change these values each " -"round, for example, to adjust the learning rate. The client will receive " -"the dictionary returned by the :code:`on_fit_config_fn` in its own " -":code:`client.fit()` function." +"Of course, failures can happen, so there is no guarantee that the server " +"will get results from all the clients it sent instructions to (via " +"``configure_fit``). ``aggregate_fit`` therefore receives a list of " +"``results``, but also a list of ``failures``." msgstr "" -#: ../../source/how-to-use-strategies.rst:78 +#: ../../source/how-to-implement-strategies.rst:282 msgid "" -"Similar to :code:`on_fit_config_fn`, there is also " -":code:`on_evaluate_config_fn` to customize the configuration sent to " -":code:`client.evaluate()`" +"``aggregate_fit`` returns an optional ``Parameters`` object and a " +"dictionary of aggregated metrics. The ``Parameters`` return value is " +"optional because ``aggregate_fit`` might decide that the results provided" +" are not sufficient for aggregation (e.g., too many failures)." msgstr "" -#: ../../source/how-to-use-strategies.rst:81 -msgid "Configuring server-side evaluation" +#: ../../source/how-to-implement-strategies.rst:288 +msgid "The ``configure_evaluate`` method" msgstr "" -#: ../../source/how-to-use-strategies.rst:83 +#: ../../source/how-to-implement-strategies.rst:290 msgid "" -"Server-side evaluation can be enabled by passing an evaluation function " -"to :code:`evaluate_fn`." +"``configure_evaluate`` is responsible for configuring the upcoming round " +"of evaluation. What does *configure* mean in this context? Configuring a " +"round means selecting clients and deciding what instructions to send to " +"these clients. The signature of ``configure_evaluate`` makes this clear:" msgstr "" -#: ../../source/how-to-use-strategies.rst:89 +#: ../../source/how-to-implement-strategies.rst:303 msgid "" -"Writing a fully custom strategy is a bit more involved, but it provides " -"the most flexibility. Read the `Implementing Strategies `_ guide to learn more." +"The return value is a list of tuples, each representing the instructions " +"that will be sent to a particular client. Strategy implementations " +"usually perform the following steps in ``configure_evaluate``:" msgstr "" -#: ../../source/index.rst:34 -msgid "Tutorial" +#: ../../source/how-to-implement-strategies.rst:309 +msgid "" +"Pair each ``ClientProxy`` with the same ``EvaluateIns`` holding the " +"current global model ``parameters`` and ``config`` dict" msgstr "" -#: ../../source/index.rst:44 -msgid "Quickstart tutorials" +#: ../../source/how-to-implement-strategies.rst:312 +msgid "" +"More sophisticated implementations can use ``configure_evaluate`` to " +"implement custom client selection logic. A client will only participate " +"in a round if the corresponding ``ClientProxy`` is included in the list " +"returned from ``configure_evaluate``." msgstr "" -#: ../../source/index.rst:74 ../../source/index.rst:78 -msgid "How-to guides" +#: ../../source/how-to-implement-strategies.rst:318 +msgid "" +"The structure of this return value provides a lot of flexibility to the " +"user. Since instructions are defined on a per-client basis, different " +"instructions can be sent to each client. This enables custom strategies " +"to evaluate, for example, different models on different clients, or use " +"different hyperparameters on different clients (via the ``config`` dict)." msgstr "" -#: ../../source/index.rst:99 -msgid "Legacy example guides" +#: ../../source/how-to-implement-strategies.rst:325 +msgid "The ``aggregate_evaluate`` method" msgstr "" -#: ../../source/index.rst:108 ../../source/index.rst:112 -msgid "Explanations" +#: ../../source/how-to-implement-strategies.rst:327 +msgid "" +"``aggregate_evaluate`` is responsible for aggregating the results " +"returned by the clients that were selected and asked to evaluate in " +"``configure_evaluate``." msgstr "" -#: None:-1 -msgid "API reference" +#: ../../source/how-to-implement-strategies.rst:341 +msgid "" +"Of course, failures can happen, so there is no guarantee that the server " +"will get results from all the clients it sent instructions to (via " +"``configure_evaluate``). ``aggregate_evaluate`` therefore receives a list" +" of ``results``, but also a list of ``failures``." msgstr "" -#: ../../source/index.rst:137 -msgid "Reference docs" +#: ../../source/how-to-implement-strategies.rst:346 +msgid "" +"``aggregate_evaluate`` returns an optional ``float`` (loss) and a " +"dictionary of aggregated metrics. The ``float`` return value is optional " +"because ``aggregate_evaluate`` might decide that the results provided are" +" not sufficient for aggregation (e.g., too many failures)." msgstr "" -#: ../../source/index.rst:153 -msgid "Contributor tutorials" +#: ../../source/how-to-implement-strategies.rst:352 +msgid "The ``evaluate`` method" msgstr "" -#: ../../source/index.rst:160 -msgid "Contributor how-to guides" +#: ../../source/how-to-implement-strategies.rst:354 +msgid "" +"``evaluate`` is responsible for evaluating model parameters on the " +"server-side. Having ``evaluate`` in addition to " +"``configure_evaluate``/``aggregate_evaluate`` enables strategies to " +"perform both servers-side and client-side (federated) evaluation." msgstr "" -#: ../../source/index.rst:173 -msgid "Contributor explanations" +#: ../../source/how-to-implement-strategies.rst:364 +msgid "" +"The return value is again optional because the strategy might not need to" +" implement server-side evaluation or because the user-defined " +"``evaluate`` method might not complete successfully (e.g., it might fail " +"to load the server-side evaluation data)." msgstr "" -#: ../../source/index.rst:179 -msgid "Contributor references" +#: ../../source/how-to-install-flower.rst:2 +msgid "Install Flower" msgstr "" -#: ../../source/index.rst:-1 -msgid "" -"Check out the documentation of the main Flower Framework enabling easy " -"Python development for Federated Learning." +#: ../../source/how-to-install-flower.rst:5 +msgid "Python version" msgstr "" -#: ../../source/index.rst:2 -msgid "Flower Framework Documentation" +#: ../../source/how-to-install-flower.rst:11 +msgid "Install stable release" msgstr "" -#: ../../source/index.rst:7 -msgid "" -"Welcome to Flower's documentation. `Flower `_ is a " -"friendly federated learning framework." +#: ../../source/how-to-install-flower.rst:14 +#: ../../source/how-to-upgrade-to-flower-next.rst:66 +msgid "Using pip" msgstr "" -#: ../../source/index.rst:11 -msgid "Join the Flower Community" +#: ../../source/how-to-install-flower.rst:16 +msgid "Stable releases are available on `PyPI `_:" msgstr "" -#: ../../source/index.rst:13 +#: ../../source/how-to-install-flower.rst:22 msgid "" -"The Flower Community is growing quickly - we're a friendly group of " -"researchers, engineers, students, professionals, academics, and other " -"enthusiasts." +"For simulations that use the Virtual Client Engine, ``flwr`` should be " +"installed with the ``simulation`` extra:" msgstr "" -#: ../../source/index.rst:15 -msgid "Join us on Slack" +#: ../../source/how-to-install-flower.rst:30 +msgid "Using conda (or mamba)" msgstr "" -#: ../../source/index.rst:23 -msgid "Flower Framework" +#: ../../source/how-to-install-flower.rst:32 +msgid "Flower can also be installed from the ``conda-forge`` channel." msgstr "" -#: ../../source/index.rst:25 +#: ../../source/how-to-install-flower.rst:34 msgid "" -"The user guide is targeted at researchers and developers who want to use " -"Flower to bring existing machine learning workloads into a federated " -"setting. One of Flower's design goals was to make this simple. Read on to" -" learn more." +"If you have not added ``conda-forge`` to your channels, you will first " +"need to run the following:" msgstr "" -#: ../../source/index.rst:30 -msgid "Tutorials" +#: ../../source/how-to-install-flower.rst:42 +msgid "" +"Once the ``conda-forge`` channel has been enabled, ``flwr`` can be " +"installed with ``conda``:" msgstr "" -#: ../../source/index.rst:32 -msgid "" -"A learning-oriented series of federated learning tutorials, the best " -"place to start." +#: ../../source/how-to-install-flower.rst:49 +msgid "or with ``mamba``:" msgstr "" -#: ../../source/index.rst:61 -msgid "" -"QUICKSTART TUTORIALS: :doc:`PyTorch ` | " -":doc:`TensorFlow ` | :doc:`🤗 Transformers" -" ` | :doc:`JAX ` | :doc:`Pandas ` | :doc:`fastai " -"` | :doc:`PyTorch Lightning ` | :doc:`scikit-learn ` | :doc:`XGBoost ` | " -":doc:`Android ` | :doc:`iOS `" +#: ../../source/how-to-install-flower.rst:56 +msgid "Verify installation" msgstr "" -#: ../../source/index.rst:63 -msgid "We also made video tutorials for PyTorch:" +#: ../../source/how-to-install-flower.rst:58 +msgid "" +"The following command can be used to verify if Flower was successfully " +"installed. If everything worked, it should print the version of Flower to" +" the command line:" msgstr "" -#: ../../source/index.rst:68 -msgid "And TensorFlow:" +#: ../../source/how-to-install-flower.rst:68 +msgid "Advanced installation options" msgstr "" -#: ../../source/index.rst:76 -msgid "" -"Problem-oriented how-to guides show step-by-step how to achieve a " -"specific goal." +#: ../../source/how-to-install-flower.rst:71 +msgid "Install via Docker" msgstr "" -#: ../../source/index.rst:110 -msgid "" -"Understanding-oriented concept guides explain and discuss key topics and " -"underlying ideas behind Flower and collaborative AI." +#: ../../source/how-to-install-flower.rst:73 +msgid ":doc:`Run Flower using Docker `" msgstr "" -#: ../../source/index.rst:120 -msgid "References" +#: ../../source/how-to-install-flower.rst:76 +msgid "Install pre-release" msgstr "" -#: ../../source/index.rst:122 -msgid "Information-oriented API reference and other reference material." +#: ../../source/how-to-install-flower.rst:78 +msgid "" +"New (possibly unstable) versions of Flower are sometimes available as " +"pre-release versions (alpha, beta, release candidate) before the stable " +"release happens:" msgstr "" -#: ../../source/index.rst:131::1 -msgid ":py:obj:`flwr `\\" +#: ../../source/how-to-install-flower.rst:85 +msgid "" +"For simulations that use the Virtual Client Engine, ``flwr`` pre-releases" +" should be installed with the ``simulation`` extra:" msgstr "" -#: ../../source/index.rst:131::1 flwr:1 of -msgid "Flower main package." +#: ../../source/how-to-install-flower.rst:93 +msgid "Install nightly release" msgstr "" -#: ../../source/index.rst:148 -msgid "Contributor docs" +#: ../../source/how-to-install-flower.rst:95 +msgid "" +"The latest (potentially unstable) changes in Flower are available as " +"nightly releases:" msgstr "" -#: ../../source/index.rst:150 +#: ../../source/how-to-install-flower.rst:101 msgid "" -"The Flower community welcomes contributions. The following docs are " -"intended to help along the way." +"For simulations that use the Virtual Client Engine, ``flwr-nightly`` " +"should be installed with the ``simulation`` extra:" msgstr "" -#: ../../source/ref-api-cli.rst:2 -msgid "Flower CLI reference" +#: ../../source/how-to-monitor-simulation.rst:2 +msgid "Monitor simulation" msgstr "" -#: ../../source/ref-api-cli.rst:7 -msgid "flower-simulation" +#: ../../source/how-to-monitor-simulation.rst:4 +msgid "" +"Flower allows you to monitor system resources while running your " +"simulation. Moreover, the Flower simulation engine is powerful and " +"enables you to decide how to allocate resources per client manner and " +"constrain the total usage. Insights from resource consumption can help " +"you make smarter decisions and speed up the execution time." msgstr "" -#: ../../source/ref-api-cli.rst:17 -msgid "flower-superlink" +#: ../../source/how-to-monitor-simulation.rst:9 +msgid "" +"The specific instructions assume you are using macOS and have the " +"`Homebrew `_ package manager installed." msgstr "" -#: ../../source/ref-api-cli.rst:27 -msgid "flower-client-app" +#: ../../source/how-to-monitor-simulation.rst:13 +msgid "Downloads" msgstr "" -#: ../../source/ref-api-cli.rst:37 -msgid "flower-server-app" +#: ../../source/how-to-monitor-simulation.rst:19 +msgid "" +"`Prometheus `_ is used for data collection, while" +" `Grafana `_ will enable you to visualize the " +"collected data. They are both well integrated with `Ray " +"`_ which Flower uses under the hood." msgstr "" -#: ../../source/ref-api/flwr.rst:2 -msgid "flwr" +#: ../../source/how-to-monitor-simulation.rst:23 +msgid "" +"Overwrite the configuration files (depending on your device, it might be " +"installed on a different path)." msgstr "" -#: ../../source/ref-api/flwr.rst:25 ../../source/ref-api/flwr.server.rst:51 -msgid "Modules" +#: ../../source/how-to-monitor-simulation.rst:26 +msgid "If you are on an M1 Mac, it should be:" msgstr "" -#: ../../source/ref-api/flwr.rst:35::1 -msgid ":py:obj:`flwr.client `\\" +#: ../../source/how-to-monitor-simulation.rst:33 +msgid "On the previous generation Intel Mac devices, it should be:" msgstr "" -#: ../../source/ref-api/flwr.rst:35::1 flwr.client:1 of -msgid "Flower client." +#: ../../source/how-to-monitor-simulation.rst:40 +msgid "" +"Open the respective configuration files and change them. Depending on " +"your device, use one of the two following commands:" msgstr "" -#: ../../source/ref-api/flwr.rst:35::1 -msgid ":py:obj:`flwr.common `\\" +#: ../../source/how-to-monitor-simulation.rst:51 +msgid "" +"and then delete all the text in the file and paste a new Prometheus " +"config you see below. You may adjust the time intervals to your " +"requirements:" msgstr "" -#: ../../source/ref-api/flwr.rst:35::1 flwr.common:1 of -msgid "Common components shared between server and client." +#: ../../source/how-to-monitor-simulation.rst:67 +msgid "" +"Now after you have edited the Prometheus configuration, do the same with " +"the Grafana configuration files. Open those using one of the following " +"commands as before:" msgstr "" -#: ../../source/ref-api/flwr.rst:35::1 -msgid ":py:obj:`flwr.server `\\" +#: ../../source/how-to-monitor-simulation.rst:78 +msgid "" +"Your terminal editor should open and allow you to apply the following " +"configuration as before." msgstr "" -#: ../../source/ref-api/flwr.rst:35::1 -#: ../../source/ref-api/flwr.server.rst:40::1 flwr.server:1 -#: flwr.server.server.Server:1 of -msgid "Flower server." +#: ../../source/how-to-monitor-simulation.rst:94 +msgid "" +"Congratulations, you just downloaded all the necessary software needed " +"for metrics tracking. Now, let’s start it." msgstr "" -#: ../../source/ref-api/flwr.rst:35::1 -msgid ":py:obj:`flwr.simulation `\\" +#: ../../source/how-to-monitor-simulation.rst:98 +msgid "Tracking metrics" msgstr "" -#: ../../source/ref-api/flwr.rst:35::1 flwr.simulation:1 of -msgid "Flower simulation." +#: ../../source/how-to-monitor-simulation.rst:100 +msgid "" +"Before running your Flower simulation, you have to start the monitoring " +"tools you have just installed and configured." msgstr "" -#: ../../source/ref-api/flwr.client.rst:2 -msgid "client" +#: ../../source/how-to-monitor-simulation.rst:108 +msgid "" +"Please include the following argument in your Python code when starting a" +" simulation." msgstr "" -#: ../../source/ref-api/flwr.client.rst:13 -#: ../../source/ref-api/flwr.common.rst:13 -#: ../../source/ref-api/flwr.server.rst:13 -#: ../../source/ref-api/flwr.simulation.rst:13 -msgid "Functions" +#: ../../source/how-to-monitor-simulation.rst:119 +msgid "Now, you are ready to start your workload." msgstr "" -#: ../../source/ref-api/flwr.client.rst:25::1 -msgid ":py:obj:`run_client_app `\\ \\(\\)" +#: ../../source/how-to-monitor-simulation.rst:121 +msgid "" +"Shortly after the simulation starts, you should see the following logs in" +" your terminal:" msgstr "" -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.supernode.app.run_client_app:1 of -msgid "Run Flower client app." +#: ../../source/how-to-monitor-simulation.rst:127 +msgid "You can look at everything at http://127.0.0.1:8265 ." msgstr "" -#: ../../source/ref-api/flwr.client.rst:25::1 -msgid ":py:obj:`run_supernode `\\ \\(\\)" +#: ../../source/how-to-monitor-simulation.rst:129 +msgid "" +"It's a Ray Dashboard. You can navigate to Metrics (on the left panel, the" +" lowest option)." msgstr "" -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.supernode.app.run_supernode:1 of -msgid "Run Flower SuperNode." +#: ../../source/how-to-monitor-simulation.rst:132 +msgid "" +"Or alternatively, you can just see them in Grafana by clicking on the " +"right-up corner, “View in Grafana”. Please note that the Ray dashboard is" +" only accessible during the simulation. After the simulation ends, you " +"can only use Grafana to explore the metrics. You can start Grafana by " +"going to ``http://localhost:3000/``." msgstr "" -#: ../../source/ref-api/flwr.client.rst:25::1 +#: ../../source/how-to-monitor-simulation.rst:137 msgid "" -":py:obj:`start_client `\\ \\(\\*\\, " -"server\\_address\\[\\, client\\_fn\\, ...\\]\\)" +"After you finish the visualization, stop Prometheus and Grafana. This is " +"important as they will otherwise block, for example port ``3000`` on your" +" machine as long as they are running." msgstr "" -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.app.start_client:1 of -msgid "Start a Flower client node which connects to a Flower server." +#: ../../source/how-to-monitor-simulation.rst:147 +msgid "Resource allocation" msgstr "" -#: ../../source/ref-api/flwr.client.rst:25::1 +#: ../../source/how-to-monitor-simulation.rst:149 msgid "" -":py:obj:`start_numpy_client `\\ \\(\\*\\," -" server\\_address\\, client\\)" +"You must understand how the Ray library works to efficiently allocate " +"system resources to simulation clients on your own." msgstr "" -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.app.start_numpy_client:1 of -msgid "Start a Flower NumPyClient which connects to a gRPC server." +#: ../../source/how-to-monitor-simulation.rst:152 +msgid "" +"Initially, the simulation (which Ray handles under the hood) starts by " +"default with all the available resources on the system, which it shares " +"among the clients. It doesn't mean it divides it equally among all of " +"them, nor that the model training happens at all of them simultaneously. " +"You will learn more about that in the later part of this blog. You can " +"check the system resources by running the following:" msgstr "" -#: ../../source/ref-api/flwr.client.rst:27 -#: ../../source/ref-api/flwr.common.rst:32 -#: ../../source/ref-api/flwr.server.rst:28 -#: ../../source/ref-api/flwr.server.strategy.rst:17 -#: ../../source/ref-api/flwr.server.workflow.rst:17 -msgid "Classes" +#: ../../source/how-to-monitor-simulation.rst:164 +msgid "In Google Colab, the result you see might be similar to this:" msgstr "" -#: ../../source/ref-api/flwr.client.rst:34::1 -msgid ":py:obj:`Client `\\ \\(\\)" +#: ../../source/how-to-monitor-simulation.rst:175 +msgid "" +"However, you can overwrite the defaults. When starting a simulation, do " +"the following (you don't need to overwrite all of them):" msgstr "" -#: ../../source/ref-api/flwr.client.rst:34::1 -#: flwr.client.client.Client:1 of -msgid "Abstract base class for Flower clients." +#: ../../source/how-to-monitor-simulation.rst:195 +msgid "Let’s also specify the resource for a single client." msgstr "" -#: ../../source/ref-api/flwr.client.rst:34::1 +#: ../../source/how-to-monitor-simulation.rst:225 msgid "" -":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, " -"mods\\]\\)" +"Now comes the crucial part. Ray will start a new client only when it has " +"all the required resources (such that they run in parallel) when the " +"resources allow." msgstr "" -#: ../../source/ref-api/flwr.client.rst:34::1 -#: flwr.client.client_app.ClientApp:1 of -msgid "Flower ClientApp." +#: ../../source/how-to-monitor-simulation.rst:228 +msgid "" +"In the example above, only one client will be run, so your clients won't " +"run concurrently. Setting ``client_num_gpus = 0.5`` would allow running " +"two clients and therefore enable them to run concurrently. Be careful not" +" to require more resources than available. If you specified " +"``client_num_gpus = 2``, the simulation wouldn't start (even if you had 2" +" GPUs but decided to set 1 in ``ray_init_args``)." msgstr "" -#: ../../source/ref-api/flwr.client.rst:34::1 -msgid ":py:obj:`NumPyClient `\\ \\(\\)" +#: ../../source/how-to-monitor-simulation.rst:235 ../../source/ref-faq.rst:2 +msgid "FAQ" msgstr "" -#: ../../source/ref-api/flwr.client.rst:34::1 -#: flwr.client.numpy_client.NumPyClient:1 of -msgid "Abstract base class for Flower clients using NumPy." +#: ../../source/how-to-monitor-simulation.rst:237 +msgid "Q: I don't see any metrics logged." msgstr "" -#: flwr.client.client.Client:1 flwr.client.numpy_client.NumPyClient:1 -#: flwr.server.client_manager.ClientManager:1 -#: flwr.server.driver.driver.Driver:1 flwr.server.strategy.strategy.Strategy:1 -#: of -msgid "Bases: :py:class:`~abc.ABC`" -msgstr "" - -#: ../../source/ref-api/flwr.client.Client.rst:15 -#: ../../source/ref-api/flwr.client.ClientApp.rst:15 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:15 -#: ../../source/ref-api/flwr.common.Array.rst:15 -#: ../../source/ref-api/flwr.common.ClientMessage.rst:15 -#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:15 -#: ../../source/ref-api/flwr.common.Context.rst:15 -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:15 -#: ../../source/ref-api/flwr.common.Error.rst:15 -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:15 -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:15 -#: ../../source/ref-api/flwr.common.EventType.rst:15 -#: ../../source/ref-api/flwr.common.FitIns.rst:15 -#: ../../source/ref-api/flwr.common.FitRes.rst:15 -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:15 -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:15 -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:15 -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:15 -#: ../../source/ref-api/flwr.common.Message.rst:15 -#: ../../source/ref-api/flwr.common.MessageType.rst:15 -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:15 -#: ../../source/ref-api/flwr.common.Metadata.rst:15 -#: ../../source/ref-api/flwr.common.MetricsRecord.rst:15 -#: ../../source/ref-api/flwr.common.Parameters.rst:15 -#: ../../source/ref-api/flwr.common.ParametersRecord.rst:15 -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:15 -#: ../../source/ref-api/flwr.common.RecordSet.rst:15 -#: ../../source/ref-api/flwr.common.ServerMessage.rst:15 -#: ../../source/ref-api/flwr.common.Status.rst:15 -#: ../../source/ref-api/flwr.server.ClientManager.rst:15 -#: ../../source/ref-api/flwr.server.Driver.rst:15 -#: ../../source/ref-api/flwr.server.History.rst:15 -#: ../../source/ref-api/flwr.server.LegacyContext.rst:15 -#: ../../source/ref-api/flwr.server.Server.rst:15 -#: ../../source/ref-api/flwr.server.ServerApp.rst:15 -#: ../../source/ref-api/flwr.server.ServerConfig.rst:15 -#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:15 -#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:15 -#: ../../source/ref-api/flwr.server.strategy.Krum.rst:15 -#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:15 -#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:15 -#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:15 -#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:15 -msgid "Methods" +#: ../../source/how-to-monitor-simulation.rst:239 +msgid "" +"A: The timeframe might not be properly set. The setting is in the top " +"right corner (\"Last 30 minutes\" by default). Please change the " +"timeframe to reflect the period when the simulation was running." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`evaluate `\\ \\(ins\\)" +#: ../../source/how-to-monitor-simulation.rst:243 +msgid "" +"Q: I see “Grafana server not detected. Please make sure the Grafana " +"server is running and refresh this page” after going to the Metrics tab " +"in Ray Dashboard." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.evaluate:1 -#: flwr.client.numpy_client.NumPyClient.evaluate:1 of -msgid "Evaluate the provided parameters using the locally held dataset." +#: ../../source/how-to-monitor-simulation.rst:246 +msgid "" +"A: You probably don't have Grafana running. Please check the running " +"services" msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`fit `\\ \\(ins\\)" +#: ../../source/how-to-monitor-simulation.rst:252 +msgid "" +"Q: I see \"This site can't be reached\" when going to " +"http://127.0.0.1:8265." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: flwr.client.client.Client.fit:1 of -msgid "Refine the provided parameters using the locally held dataset." +#: ../../source/how-to-monitor-simulation.rst:254 +msgid "" +"A: Either the simulation has already finished, or you still need to start" +" Prometheus." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`get_context `\\ \\(\\)" +#: ../../source/how-to-monitor-simulation.rst:257 +msgid "Resources" msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.get_context:1 -#: flwr.client.numpy_client.NumPyClient.get_context:1 of -msgid "Get the run context from this client." +#: ../../source/how-to-monitor-simulation.rst:259 +msgid "" +"Ray Dashboard: https://docs.ray.io/en/latest/ray-observability/getting-" +"started.html" msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`get_parameters `\\ \\(ins\\)" +#: ../../source/how-to-monitor-simulation.rst:261 +msgid "Ray Metrics: https://docs.ray.io/en/latest/cluster/metrics.html" msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.get_parameters:1 -#: flwr.client.numpy_client.NumPyClient.get_parameters:1 of -msgid "Return the current local model parameters." +#: ../../source/how-to-run-simulations.rst:2 +msgid "Run simulations" msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`get_properties `\\ \\(ins\\)" +#: ../../source/how-to-run-simulations.rst:8 +msgid "" +"Simulating Federated Learning workloads is useful for a multitude of use-" +"cases: you might want to run your workload on a large cohort of clients " +"but without having to source, configure and mange a large number of " +"physical devices; you might want to run your FL workloads as fast as " +"possible on the compute systems you have access to without having to go " +"through a complex setup process; you might want to validate your " +"algorithm on different scenarios at varying levels of data and system " +"heterogeneity, client availability, privacy budgets, etc. These are among" +" some of the use-cases where simulating FL workloads makes sense. Flower " +"can accommodate these scenarios by means of its `VirtualClientEngine " +"`_ or " +"VCE." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: flwr.client.client.Client.get_properties:1 of -msgid "Return set of client's properties." +#: ../../source/how-to-run-simulations.rst:19 +msgid "" +"The ``VirtualClientEngine`` schedules, launches and manages `virtual` " +"clients. These clients are identical to `non-virtual` clients (i.e. the " +"ones you launch via the command `flwr.client.start_client `_) in the sense that they can be configure by " +"creating a class inheriting, for example, from `flwr.client.NumPyClient " +"`_ and therefore behave in an " +"identical way. In addition to that, clients managed by the " +"``VirtualClientEngine`` are:" msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`set_context `\\ \\(context\\)" +#: ../../source/how-to-run-simulations.rst:26 +msgid "" +"resource-aware: this means that each client gets assigned a portion of " +"the compute and memory on your system. You as a user can control this at " +"the beginning of the simulation and allows you to control the degree of " +"parallelism of your Flower FL simulation. The fewer the resources per " +"client, the more clients can run concurrently on the same hardware." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.set_context:1 -#: flwr.client.numpy_client.NumPyClient.set_context:1 of -msgid "Apply a run context to this client." +#: ../../source/how-to-run-simulations.rst:31 +msgid "" +"self-managed: this means that you as a user do not need to launch clients" +" manually, instead this gets delegated to ``VirtualClientEngine``'s " +"internals." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`to_client `\\ \\(\\)" +#: ../../source/how-to-run-simulations.rst:33 +msgid "" +"ephemeral: this means that a client is only materialized when it is " +"required in the FL process (e.g. to do `fit() `_). The object is destroyed afterwards," +" releasing the resources it was assigned and allowing in this way other " +"clients to participate." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: flwr.client.client.Client.to_client:1 of -msgid "Return client (itself)." +#: ../../source/how-to-run-simulations.rst:38 +msgid "" +"The ``VirtualClientEngine`` implements `virtual` clients using `Ray " +"`_, an open-source framework for scalable Python " +"workloads. In particular, Flower's ``VirtualClientEngine`` makes use of " +"`Actors `_ to spawn " +"`virtual` clients and run their workload." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:46 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:46 -#: ../../source/ref-api/flwr.common.Array.rst:28 -#: ../../source/ref-api/flwr.common.ClientMessage.rst:25 -#: ../../source/ref-api/flwr.common.Code.rst:19 -#: ../../source/ref-api/flwr.common.Context.rst:25 -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:25 -#: ../../source/ref-api/flwr.common.Error.rst:25 -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:25 -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:25 -#: ../../source/ref-api/flwr.common.EventType.rst:165 -#: ../../source/ref-api/flwr.common.FitIns.rst:25 -#: ../../source/ref-api/flwr.common.FitRes.rst:25 -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:25 -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:25 -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:25 -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:25 -#: ../../source/ref-api/flwr.common.Message.rst:37 -#: ../../source/ref-api/flwr.common.MessageType.rst:25 -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:25 -#: ../../source/ref-api/flwr.common.Metadata.rst:25 -#: ../../source/ref-api/flwr.common.Parameters.rst:25 -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:25 -#: ../../source/ref-api/flwr.common.RecordSet.rst:25 -#: ../../source/ref-api/flwr.common.ServerMessage.rst:25 -#: ../../source/ref-api/flwr.common.Status.rst:25 -#: ../../source/ref-api/flwr.server.LegacyContext.rst:25 -#: ../../source/ref-api/flwr.server.ServerConfig.rst:25 -msgid "Attributes" +#: ../../source/how-to-run-simulations.rst:45 +msgid "Launch your Flower simulation" msgstr "" -#: flwr.client.client.Client.evaluate:1::1 of -msgid ":py:obj:`context `\\" +#: ../../source/how-to-run-simulations.rst:47 +msgid "" +"Running Flower simulations still require you to define your client class," +" a strategy, and utility functions to download and load (and potentially " +"partition) your dataset. With that out of the way, launching your " +"simulation is done with `start_simulation `_ and a minimal example looks" +" as follows:" msgstr "" -#: ../../source/ref-api/flwr.common.Parameters.rst:2 -#: flwr.client.app.start_client flwr.client.app.start_numpy_client -#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit -#: flwr.client.client.Client.get_parameters -#: flwr.client.client.Client.get_properties -#: flwr.client.numpy_client.NumPyClient.evaluate -#: flwr.client.numpy_client.NumPyClient.fit -#: flwr.client.numpy_client.NumPyClient.get_parameters -#: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.common.context.Context flwr.common.message.Error -#: flwr.common.message.Message flwr.common.message.Message.create_error_reply -#: flwr.common.message.Message.create_reply flwr.common.message.Metadata -#: flwr.common.record.parametersrecord.Array flwr.server.app.start_server -#: flwr.server.client_manager.ClientManager.register -#: flwr.server.client_manager.ClientManager.unregister -#: flwr.server.client_manager.SimpleClientManager.register -#: flwr.server.client_manager.SimpleClientManager.unregister -#: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.driver.Driver.create_message -#: flwr.server.driver.driver.Driver.pull_messages -#: flwr.server.driver.driver.Driver.push_messages -#: flwr.server.driver.driver.Driver.send_and_receive -#: flwr.server.strategy.bulyan.Bulyan -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit -#: flwr.server.strategy.fedadagrad.FedAdagrad -#: flwr.server.strategy.fedadam.FedAdam flwr.server.strategy.fedavg.FedAvg -#: flwr.server.strategy.fedavg_android.FedAvgAndroid -#: flwr.server.strategy.fedavgm.FedAvgM flwr.server.strategy.fedopt.FedOpt -#: flwr.server.strategy.fedprox.FedProx -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg -#: flwr.server.strategy.fedyogi.FedYogi flwr.server.strategy.krum.Krum -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate -#: flwr.server.strategy.strategy.Strategy.aggregate_fit -#: flwr.server.strategy.strategy.Strategy.configure_evaluate -#: flwr.server.strategy.strategy.Strategy.configure_fit -#: flwr.server.strategy.strategy.Strategy.evaluate -#: flwr.server.strategy.strategy.Strategy.initialize_parameters -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow -#: flwr.simulation.app.start_simulation -#: flwr.simulation.run_simulation.run_simulation of -msgid "Parameters" +#: ../../source/how-to-run-simulations.rst:73 +msgid "VirtualClientEngine resources" msgstr "" -#: flwr.client.client.Client.evaluate:3 of +#: ../../source/how-to-run-simulations.rst:75 msgid "" -"The evaluation instructions containing (global) model parameters received" -" from the server and a dictionary of configuration values used to " -"customize the local evaluation process." +"By default the VCE has access to all system resources (i.e. all CPUs, all" +" GPUs, etc) since that is also the default behavior when starting Ray. " +"However, in some settings you might want to limit how many of your system" +" resources are used for simulation. You can do this via the " +"``ray_init_args`` input argument to ``start_simulation`` which the VCE " +"internally passes to Ray's ``ray.init`` command. For a complete list of " +"settings you can configure check the `ray.init " +"`_" +" documentation. Do not set ``ray_init_args`` if you want the VCE to use " +"all your system's CPUs and GPUs." msgstr "" -#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit -#: flwr.client.client.Client.get_parameters -#: flwr.client.client.Client.get_properties -#: flwr.client.numpy_client.NumPyClient.evaluate -#: flwr.client.numpy_client.NumPyClient.fit -#: flwr.client.numpy_client.NumPyClient.get_parameters -#: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.common.message.Message.create_reply flwr.server.app.start_server -#: flwr.server.client_manager.ClientManager.num_available -#: flwr.server.client_manager.ClientManager.register -#: flwr.server.client_manager.SimpleClientManager.num_available -#: flwr.server.client_manager.SimpleClientManager.register -#: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.driver.Driver.create_message -#: flwr.server.driver.driver.Driver.pull_messages -#: flwr.server.driver.driver.Driver.push_messages -#: flwr.server.driver.driver.Driver.send_and_receive -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate -#: flwr.server.strategy.strategy.Strategy.aggregate_fit -#: flwr.server.strategy.strategy.Strategy.configure_evaluate -#: flwr.server.strategy.strategy.Strategy.configure_fit -#: flwr.server.strategy.strategy.Strategy.evaluate -#: flwr.server.strategy.strategy.Strategy.initialize_parameters -#: flwr.simulation.app.start_simulation of -msgid "Returns" +#: ../../source/how-to-run-simulations.rst:97 +msgid "Assigning client resources" msgstr "" -#: flwr.client.client.Client.evaluate:8 of +#: ../../source/how-to-run-simulations.rst:99 msgid "" -"The evaluation result containing the loss on the local dataset and other " -"details such as the number of local data examples used for evaluation." +"By default the ``VirtualClientEngine`` assigns a single CPU core (and " +"nothing else) to each virtual client. This means that if your system has " +"10 cores, that many virtual clients can be concurrently running." msgstr "" -#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit -#: flwr.client.client.Client.get_parameters -#: flwr.client.client.Client.get_properties -#: flwr.client.numpy_client.NumPyClient.get_parameters -#: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.common.message.Message.create_reply flwr.server.app.start_server -#: flwr.server.client_manager.ClientManager.num_available -#: flwr.server.client_manager.ClientManager.register -#: flwr.server.client_manager.SimpleClientManager.num_available -#: flwr.server.client_manager.SimpleClientManager.register -#: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.driver.Driver.create_message -#: flwr.server.driver.driver.Driver.pull_messages -#: flwr.server.driver.driver.Driver.push_messages -#: flwr.server.driver.driver.Driver.send_and_receive -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate -#: flwr.server.strategy.strategy.Strategy.aggregate_fit -#: flwr.server.strategy.strategy.Strategy.configure_evaluate -#: flwr.server.strategy.strategy.Strategy.configure_fit -#: flwr.server.strategy.strategy.Strategy.evaluate -#: flwr.server.strategy.strategy.Strategy.initialize_parameters -#: flwr.simulation.app.start_simulation of -msgid "Return type" +#: ../../source/how-to-run-simulations.rst:103 +msgid "" +"More often than not, you would probably like to adjust the resources your" +" clients get assigned based on the complexity (i.e. compute and memory " +"footprint) of your FL workload. You can do so when starting your " +"simulation by setting the argument `client_resources` to " +"`start_simulation `_." +" Two keys are internally used by Ray to schedule and spawn workloads (in " +"our case Flower clients):" msgstr "" -#: flwr.client.client.Client.fit:3 of +#: ../../source/how-to-run-simulations.rst:110 +msgid "``num_cpus`` indicates the number of CPU cores a client would get." +msgstr "" + +#: ../../source/how-to-run-simulations.rst:111 +msgid "``num_gpus`` indicates the **ratio** of GPU memory a client gets assigned." +msgstr "" + +#: ../../source/how-to-run-simulations.rst:113 +msgid "Let's see a few examples:" +msgstr "" + +#: ../../source/how-to-run-simulations.rst:132 msgid "" -"The training instructions containing (global) model parameters received " -"from the server and a dictionary of configuration values used to " -"customize the local training process." +"While the ``client_resources`` can be used to control the degree of " +"concurrency in your FL simulation, this does not stop you from running " +"dozens, hundreds or even thousands of clients in the same round and " +"having orders of magnitude more `dormant` (i.e. not participating in a " +"round) clients. Let's say you want to have 100 clients per round but your" +" system can only accommodate 8 clients concurrently. The " +"``VirtualClientEngine`` will schedule 100 jobs to run (each simulating a " +"client sampled by the strategy) and then will execute them in a resource-" +"aware manner in batches of 8." msgstr "" -#: flwr.client.client.Client.fit:8 of +#: ../../source/how-to-run-simulations.rst:140 msgid "" -"The training result containing updated parameters and other details such " -"as the number of local training examples used for training." +"To understand all the intricate details on how resources are used to " +"schedule FL clients and how to define custom resources, please take a " +"look at the `Ray documentation `_." msgstr "" -#: flwr.client.client.Client.get_parameters:3 of +#: ../../source/how-to-run-simulations.rst:145 +msgid "Simulation examples" +msgstr "" + +#: ../../source/how-to-run-simulations.rst:147 msgid "" -"The get parameters instructions received from the server containing a " -"dictionary of configuration values." +"A few ready-to-run complete examples for Flower simulation in " +"Tensorflow/Keras and PyTorch are provided in the `Flower repository " +"`_. You can run them on Google Colab too:" msgstr "" -#: flwr.client.client.Client.get_parameters:7 of -msgid "The current local model parameters." +#: ../../source/how-to-run-simulations.rst:151 +msgid "" +"`Tensorflow/Keras Simulation " +"`_: 100 clients collaboratively train a MLP model on MNIST." msgstr "" -#: flwr.client.client.Client.get_properties:3 of +#: ../../source/how-to-run-simulations.rst:154 msgid "" -"The get properties instructions received from the server containing a " -"dictionary of configuration values." +"`PyTorch Simulation `_: 100 clients collaboratively train a CNN model on " +"MNIST." msgstr "" -#: flwr.client.client.Client.get_properties:7 of -msgid "The current client properties." +#: ../../source/how-to-run-simulations.rst:159 +msgid "Multi-node Flower simulations" msgstr "" -#: ../../source/ref-api/flwr.client.ClientApp.rst:2 -msgid "ClientApp" +#: ../../source/how-to-run-simulations.rst:161 +msgid "" +"Flower's ``VirtualClientEngine`` allows you to run FL simulations across " +"multiple compute nodes. Before starting your multi-node simulation ensure" +" that you:" msgstr "" -#: flwr.client.client_app.ClientApp:1 flwr.common.constant.MessageType:1 -#: flwr.common.constant.MessageTypeLegacy:1 flwr.common.context.Context:1 -#: flwr.common.message.Error:1 flwr.common.message.Message:1 -#: flwr.common.message.Metadata:1 flwr.common.record.parametersrecord.Array:1 -#: flwr.common.record.recordset.RecordSet:1 flwr.common.typing.ClientMessage:1 -#: flwr.common.typing.DisconnectRes:1 flwr.common.typing.EvaluateIns:1 -#: flwr.common.typing.EvaluateRes:1 flwr.common.typing.FitIns:1 -#: flwr.common.typing.FitRes:1 flwr.common.typing.GetParametersIns:1 -#: flwr.common.typing.GetParametersRes:1 flwr.common.typing.GetPropertiesIns:1 -#: flwr.common.typing.GetPropertiesRes:1 flwr.common.typing.Parameters:1 -#: flwr.common.typing.ReconnectIns:1 flwr.common.typing.ServerMessage:1 -#: flwr.common.typing.Status:1 flwr.server.history.History:1 -#: flwr.server.server.Server:1 flwr.server.server_app.ServerApp:1 -#: flwr.server.server_config.ServerConfig:1 -#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 -#: of -msgid "Bases: :py:class:`object`" +#: ../../source/how-to-run-simulations.rst:164 +msgid "Have the same Python environment in all nodes." msgstr "" -#: flwr.client.app.start_client:41 flwr.client.app.start_numpy_client:36 -#: flwr.client.client_app.ClientApp:4 -#: flwr.client.client_app.ClientApp.evaluate:4 -#: flwr.client.client_app.ClientApp.query:4 -#: flwr.client.client_app.ClientApp.train:4 flwr.server.app.start_server:41 -#: flwr.server.server_app.ServerApp:4 flwr.server.server_app.ServerApp.main:4 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:29 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:22 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:21 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:14 -#: of -msgid "Examples" +#: ../../source/how-to-run-simulations.rst:165 +msgid "Have a copy of your code (e.g. your entire repo) in all nodes." msgstr "" -#: flwr.client.client_app.ClientApp:5 of +#: ../../source/how-to-run-simulations.rst:166 msgid "" -"Assuming a typical `Client` implementation named `FlowerClient`, you can " -"wrap it in a `ClientApp` as follows:" +"Have a copy of your dataset in all nodes (more about this in " +":ref:`simulation considerations `)" msgstr "" -#: flwr.client.client_app.ClientApp:16 of +#: ../../source/how-to-run-simulations.rst:168 msgid "" -"If the above code is in a Python module called `client`, it can be " -"started as follows:" +"Pass ``ray_init_args={\"address\"=\"auto\"}`` to `start_simulation `_ so the " +"``VirtualClientEngine`` attaches to a running Ray instance." msgstr "" -#: flwr.client.client_app.ClientApp:21 of +#: ../../source/how-to-run-simulations.rst:171 msgid "" -"In this `client:app` example, `client` refers to the Python module " -"`client.py` in which the previous code lives in and `app` refers to the " -"global attribute `app` that points to an object of type `ClientApp`." +"Start Ray on you head node: on the terminal type ``ray start --head``. " +"This command will print a few lines, one of which indicates how to attach" +" other nodes to the head node." msgstr "" -#: flwr.client.client_app.ClientApp.evaluate:1::1 of -msgid ":py:obj:`evaluate `\\ \\(\\)" +#: ../../source/how-to-run-simulations.rst:174 +msgid "" +"Attach other nodes to the head node: copy the command shown after " +"starting the head and execute it on terminal of a new node: for example " +"``ray start --address='192.168.1.132:6379'``" msgstr "" -#: flwr.client.client_app.ClientApp.evaluate:1 -#: flwr.client.client_app.ClientApp.evaluate:1::1 of -msgid "Return a decorator that registers the evaluate fn with the client app." +#: ../../source/how-to-run-simulations.rst:178 +msgid "" +"With all the above done, you can run your code from the head node as you " +"would if the simulation was running on a single node." msgstr "" -#: flwr.client.client_app.ClientApp.evaluate:1::1 of -msgid ":py:obj:`query `\\ \\(\\)" +#: ../../source/how-to-run-simulations.rst:181 +msgid "" +"Once your simulation is finished, if you'd like to dismantle your cluster" +" you simply need to run the command ``ray stop`` in each node's terminal " +"(including the head node)." msgstr "" -#: flwr.client.client_app.ClientApp.evaluate:1::1 -#: flwr.client.client_app.ClientApp.query:1 of -msgid "Return a decorator that registers the query fn with the client app." +#: ../../source/how-to-run-simulations.rst:185 +msgid "Multi-node simulation good-to-know" msgstr "" -#: flwr.client.client_app.ClientApp.evaluate:1::1 of -msgid ":py:obj:`train `\\ \\(\\)" +#: ../../source/how-to-run-simulations.rst:187 +msgid "" +"Here we list a few interesting functionality when running multi-node FL " +"simulations:" msgstr "" -#: flwr.client.client_app.ClientApp.evaluate:1::1 -#: flwr.client.client_app.ClientApp.train:1 of -msgid "Return a decorator that registers the train fn with the client app." +#: ../../source/how-to-run-simulations.rst:189 +msgid "" +"User ``ray status`` to check all nodes connected to your head node as " +"well as the total resources available to the ``VirtualClientEngine``." msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:2 -msgid "NumPyClient" +#: ../../source/how-to-run-simulations.rst:192 +msgid "" +"When attaching a new node to the head, all its resources (i.e. all CPUs, " +"all GPUs) will be visible by the head node. This means that the " +"``VirtualClientEngine`` can schedule as many `virtual` clients as that " +"node can possible run. In some settings you might want to exclude certain" +" resources from the simulation. You can do this by appending `--num-" +"cpus=` and/or `--num-gpus=` in " +"any ``ray start`` command (including when starting the head)" msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: ../../source/how-to-run-simulations.rst:202 +msgid "Considerations for simulations" +msgstr "" + +#: ../../source/how-to-run-simulations.rst:206 msgid "" -":py:obj:`evaluate `\\ \\(parameters\\, " -"config\\)" +"We are actively working on these fronts so to make it trivial to run any " +"FL workload with Flower simulation." msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -msgid ":py:obj:`fit `\\ \\(parameters\\, config\\)" +#: ../../source/how-to-run-simulations.rst:209 +msgid "" +"The current VCE allows you to run Federated Learning workloads in " +"simulation mode whether you are prototyping simple scenarios on your " +"personal laptop or you want to train a complex FL pipeline across " +"multiple high-performance GPU nodes. While we add more capabilities to " +"the VCE, the points below highlight some of the considerations to keep in" +" mind when designing your FL pipeline with Flower. We also highlight a " +"couple of current limitations in our implementation." msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.numpy_client.NumPyClient.fit:1 of -msgid "Train the provided parameters using the locally held dataset." +#: ../../source/how-to-run-simulations.rst:217 +msgid "GPU resources" msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -msgid ":py:obj:`get_context `\\ \\(\\)" +#: ../../source/how-to-run-simulations.rst:219 +msgid "" +"The VCE assigns a share of GPU memory to a client that specifies the key " +"``num_gpus`` in ``client_resources``. This being said, Ray (used " +"internally by the VCE) is by default:" msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: ../../source/how-to-run-simulations.rst:222 msgid "" -":py:obj:`get_parameters `\\ " -"\\(config\\)" +"not aware of the total VRAM available on the GPUs. This means that if you" +" set ``num_gpus=0.5`` and you have two GPUs in your system with different" +" (e.g. 32GB and 8GB) VRAM amounts, they both would run 2 clients " +"concurrently." msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: ../../source/how-to-run-simulations.rst:225 msgid "" -":py:obj:`get_properties `\\ " -"\\(config\\)" +"not aware of other unrelated (i.e. not created by the VCE) workloads are " +"running on the GPU. Two takeaways from this are:" msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.numpy_client.NumPyClient.get_properties:1 of -msgid "Return a client's set of properties." +#: ../../source/how-to-run-simulations.rst:228 +msgid "" +"Your Flower server might need a GPU to evaluate the `global model` after " +"aggregation (by instance when making use of the `evaluate method `_)" msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: ../../source/how-to-run-simulations.rst:231 msgid "" -":py:obj:`set_context `\\ " -"\\(context\\)" +"If you want to run several independent Flower simulations on the same " +"machine you need to mask-out your GPUs with " +"``CUDA_VISIBLE_DEVICES=\"\"`` when launching your experiment." msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -msgid ":py:obj:`to_client `\\ \\(\\)" +#: ../../source/how-to-run-simulations.rst:235 +msgid "" +"In addition, the GPU resource limits passed to ``client_resources`` are " +"not `enforced` (i.e. they can be exceeded) which can result in the " +"situation of client using more VRAM than the ratio specified when " +"starting the simulation." msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.numpy_client.NumPyClient.to_client:1 of -msgid "Convert to object to Client type and return it." +#: ../../source/how-to-run-simulations.rst:240 +msgid "TensorFlow with GPUs" msgstr "" -#: flwr.client.numpy_client.NumPyClient.evaluate:1::1 of -msgid ":py:obj:`context `\\" -msgstr "" - -#: flwr.client.numpy_client.NumPyClient.evaluate:3 -#: flwr.client.numpy_client.NumPyClient.fit:3 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:5 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:8 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:5 -#: flwr.server.strategy.strategy.Strategy.configure_fit:5 -#: flwr.server.strategy.strategy.Strategy.evaluate:8 of -msgid "The current (global) model parameters." -msgstr "" - -#: flwr.client.numpy_client.NumPyClient.evaluate:5 of +#: ../../source/how-to-run-simulations.rst:242 msgid "" -"Configuration parameters which allow the server to influence evaluation " -"on the client. It can be used to communicate arbitrary values from the " -"server to the client, for example, to influence the number of examples " -"used for evaluation." +"When `using a GPU with TensorFlow " +"`_ nearly your entire GPU memory of" +" all your GPUs visible to the process will be mapped. This is done by " +"TensorFlow for optimization purposes. However, in settings such as FL " +"simulations where we want to split the GPU into multiple `virtual` " +"clients, this is not a desirable mechanism. Luckily we can disable this " +"default behavior by `enabling memory growth " +"`_." msgstr "" -#: flwr.client.numpy_client.NumPyClient.evaluate:11 of +#: ../../source/how-to-run-simulations.rst:249 msgid "" -"* **loss** (*float*) -- The evaluation loss of the model on the local " -"dataset. * **num_examples** (*int*) -- The number of examples used for " -"evaluation. * **metrics** (*Dict[str, Scalar]*) -- A dictionary mapping " -"arbitrary string keys to values of type bool, bytes, float, int, or " -"str. It can be used to communicate arbitrary values back to the server." +"This would need to be done in the main process (which is where the server" +" would run) and in each Actor created by the VCE. By means of " +"``actor_kwargs`` we can pass the reserved key `\"on_actor_init_fn\"` in " +"order to specify a function to be executed upon actor initialization. In " +"this case, to enable GPU growth for TF workloads. It would look as " +"follows:" msgstr "" -#: flwr.client.numpy_client.NumPyClient.evaluate:11 of +#: ../../source/how-to-run-simulations.rst:272 msgid "" -"**loss** (*float*) -- The evaluation loss of the model on the local " -"dataset." +"This is precisely the mechanism used in `Tensorflow/Keras Simulation " +"`_ example." msgstr "" -#: flwr.client.numpy_client.NumPyClient.evaluate:12 of -msgid "**num_examples** (*int*) -- The number of examples used for evaluation." +#: ../../source/how-to-run-simulations.rst:276 +msgid "Multi-node setups" msgstr "" -#: flwr.client.numpy_client.NumPyClient.evaluate:13 -#: flwr.client.numpy_client.NumPyClient.fit:13 of +#: ../../source/how-to-run-simulations.rst:278 msgid "" -"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " -"string keys to values of type bool, bytes, float, int, or str. It can be " -"used to communicate arbitrary values back to the server." +"The VCE does not currently offer a way to control on which node a " +"particular `virtual` client is executed. In other words, if more than a " +"single node have the resources needed by a client to run, then any of " +"those nodes could get the client workload scheduled onto. Later in the FL" +" process (i.e. in a different round) the same client could be executed by" +" a different node. Depending on how your clients access their datasets, " +"this might require either having a copy of all dataset partitions on all " +"nodes or a dataset serving mechanism (e.g. using nfs, a database) to " +"circumvent data duplication." msgstr "" -#: flwr.client.numpy_client.NumPyClient.evaluate:19 of +#: ../../source/how-to-run-simulations.rst:286 msgid "" -"The previous return type format (int, float, float) and the extended " -"format (int, float, float, Dict[str, Scalar]) have been deprecated and " -"removed since Flower 0.19." +"By definition virtual clients are `stateless` due to their ephemeral " +"nature. A client state can be implemented as part of the Flower client " +"class but users need to ensure this saved to persistent storage (e.g. a " +"database, disk) and that can be retrieve later by the same client " +"regardless on which node it is running from. This is related to the point" +" above also since, in some way, the client's dataset could be seen as a " +"type of `state`." msgstr "" -#: flwr.client.numpy_client.NumPyClient.fit:5 of -msgid "" -"Configuration parameters which allow the server to influence training on " -"the client. It can be used to communicate arbitrary values from the " -"server to the client, for example, to set the number of (local) training " -"epochs." +#: ../../source/how-to-save-and-load-model-checkpoints.rst:2 +msgid "Save and load model checkpoints" msgstr "" -#: flwr.client.numpy_client.NumPyClient.fit:11 of +#: ../../source/how-to-save-and-load-model-checkpoints.rst:4 msgid "" -"* **parameters** (*NDArrays*) -- The locally updated model parameters. * " -"**num_examples** (*int*) -- The number of examples used for training. * " -"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " -"string keys to values of type bool, bytes, float, int, or str. It can " -"be used to communicate arbitrary values back to the server." -msgstr "" - -#: flwr.client.numpy_client.NumPyClient.fit:11 of -msgid "**parameters** (*NDArrays*) -- The locally updated model parameters." +"Flower does not automatically save model updates on the server-side. This" +" how-to guide describes the steps to save (and load) model checkpoints in" +" Flower." msgstr "" -#: flwr.client.numpy_client.NumPyClient.fit:12 of -msgid "**num_examples** (*int*) -- The number of examples used for training." +#: ../../source/how-to-save-and-load-model-checkpoints.rst:8 +msgid "Model checkpointing" msgstr "" -#: flwr.client.numpy_client.NumPyClient.get_parameters:3 of +#: ../../source/how-to-save-and-load-model-checkpoints.rst:10 msgid "" -"Configuration parameters requested by the server. This can be used to " -"tell the client which parameters are needed along with some Scalar " -"attributes." +"Model updates can be persisted on the server-side by customizing " +"``Strategy`` methods. Implementing custom strategies is always an option," +" but for many cases it may be more convenient to simply customize an " +"existing strategy. The following code example defines a new " +"``SaveModelStrategy`` which customized the existing built-in ``FedAvg`` " +"strategy. In particular, it customizes ``aggregate_fit`` by calling " +"``aggregate_fit`` in the base class (``FedAvg``). It then continues to " +"save returned (aggregated) weights before it returns those aggregated " +"weights to the caller (i.e., the server):" msgstr "" -#: flwr.client.numpy_client.NumPyClient.get_parameters:8 of -msgid "**parameters** -- The local model parameters as a list of NumPy ndarrays." +#: ../../source/how-to-save-and-load-model-checkpoints.rst:53 +msgid "Save and load PyTorch checkpoints" msgstr "" -#: flwr.client.numpy_client.NumPyClient.get_properties:3 of +#: ../../source/how-to-save-and-load-model-checkpoints.rst:55 msgid "" -"Configuration parameters requested by the server. This can be used to " -"tell the client which properties are needed along with some Scalar " -"attributes." +"Similar to the previous example but with a few extra steps, we'll show " +"how to store a PyTorch checkpoint we'll use the ``torch.save`` function. " +"Firstly, ``aggregate_fit`` returns a ``Parameters`` object that has to be" +" transformed into a list of NumPy ``ndarray``'s, then those are " +"transformed into the PyTorch ``state_dict`` following the ``OrderedDict``" +" class structure." msgstr "" -#: flwr.client.numpy_client.NumPyClient.get_properties:8 of +#: ../../source/how-to-save-and-load-model-checkpoints.rst:98 msgid "" -"**properties** -- A dictionary mapping arbitrary string keys to values of" -" type bool, bytes, float, int, or str. It can be used to communicate " -"arbitrary property values back to the server." -msgstr "" - -#: ../../source/ref-api/flwr.client.run_client_app.rst:2 -msgid "run\\_client\\_app" +"To load your progress, you simply append the following lines to your " +"code. Note that this will iterate over all saved checkpoints and load the" +" latest one:" msgstr "" -#: ../../source/ref-api/flwr.client.run_supernode.rst:2 -msgid "run\\_supernode" +#: ../../source/how-to-save-and-load-model-checkpoints.rst:111 +msgid "" +"Return/use this object of type ``Parameters`` wherever necessary, such as" +" in the ``initial_parameters`` when defining a ``Strategy``." msgstr "" -#: ../../source/ref-api/flwr.client.start_client.rst:2 -msgid "start\\_client" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:2 +msgid "Upgrade to Flower 1.0" msgstr "" -#: flwr.client.app.start_client:3 flwr.client.app.start_numpy_client:9 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:4 msgid "" -"The IPv4 or IPv6 address of the server. If the Flower server runs on the " -"same machine on port 8080, then `server_address` would be " -"`\"[::]:8080\"`." +"Flower 1.0 is here. Along with new features, Flower 1.0 provides a stable" +" foundation for future growth. Compared to Flower 0.19 (and other 0.x " +"series releases), there are a few breaking changes that make it necessary" +" to change the code of existing 0.x-series projects." msgstr "" -#: flwr.client.app.start_client:7 of -msgid "A callable that instantiates a Client. (default: None)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:10 +#: ../../source/how-to-upgrade-to-flower-next.rst:63 +msgid "Install update" msgstr "" -#: flwr.client.app.start_client:9 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:12 msgid "" -"An implementation of the abstract base class `flwr.client.Client` " -"(default: None)" +"Here's how to update an existing installation to Flower 1.0 using either " +"pip or Poetry:" msgstr "" -#: flwr.client.app.start_client:12 flwr.client.app.start_numpy_client:15 of -msgid "" -"The maximum length of gRPC messages that can be exchanged with the Flower" -" server. The default should be sufficient for most models. Users who " -"train very large models might need to increase this value. Note that the " -"Flower server needs to be started with the same value (see " -"`flwr.server.start_server`), otherwise it will not know about the " -"increased limit and block larger messages." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:14 +msgid "pip: add ``-U`` when installing." msgstr "" -#: flwr.client.app.start_client:19 flwr.client.app.start_numpy_client:22 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:16 msgid "" -"The PEM-encoded root certificates as a byte string or a path string. If " -"provided, a secure connection using the certificates will be established " -"to an SSL-enabled Flower server." +"``python -m pip install -U flwr`` (when using ``start_server`` and " +"``start_client``)" msgstr "" -#: flwr.client.app.start_client:23 flwr.client.app.start_numpy_client:26 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:17 msgid "" -"Starts an insecure gRPC connection when True. Enables HTTPS connection " -"when False, using system certificates if `root_certificates` is None." +"``python -m pip install -U 'flwr[simulation]'`` (when using " +"``start_simulation``)" msgstr "" -#: flwr.client.app.start_client:26 flwr.client.app.start_numpy_client:29 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:19 msgid "" -"Configure the transport layer. Allowed values: - 'grpc-bidi': gRPC, " -"bidirectional streaming - 'grpc-rere': gRPC, request-response " -"(experimental) - 'rest': HTTP (experimental)" +"Poetry: update the ``flwr`` dependency in ``pyproject.toml`` and then " +"reinstall (don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` " +"before running ``poetry install``)." msgstr "" -#: flwr.client.app.start_client:31 of -msgid "" -"The maximum number of times the client will try to connect to the server " -"before giving up in case of a connection error. If set to None, there is " -"no limit to the number of tries." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:23 +msgid "``flwr = \"^1.0.0\"`` (when using ``start_server`` and ``start_client``)" msgstr "" -#: flwr.client.app.start_client:35 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:24 msgid "" -"The maximum duration before the client stops trying to connect to the " -"server in case of connection error. If set to None, there is no limit to " -"the total time." +"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` (when " +"using ``start_simulation``)" msgstr "" -#: flwr.client.app.start_client:42 flwr.client.app.start_numpy_client:37 of -msgid "Starting a gRPC client with an insecure server connection:" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:28 +#: ../../source/how-to-upgrade-to-flower-next.rst:121 +msgid "Required changes" msgstr "" -#: flwr.client.app.start_client:49 flwr.client.app.start_numpy_client:44 of -msgid "Starting an SSL-enabled gRPC client using system certificates:" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:30 +msgid "The following breaking changes require manual updates." msgstr "" -#: flwr.client.app.start_client:60 flwr.client.app.start_numpy_client:52 of -msgid "Starting an SSL-enabled gRPC client using provided certificates:" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:33 +msgid "General" msgstr "" -#: ../../source/ref-api/flwr.client.start_numpy_client.rst:2 -msgid "start\\_numpy\\_client" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:35 +msgid "" +"Pass all arguments as keyword arguments (not as positional arguments). " +"Here's an example:" msgstr "" -#: flwr.client.app.start_numpy_client:5 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:38 msgid "" -"This function is deprecated since 1.7.0. Use " -":code:`flwr.client.start_client` instead and first convert your " -":code:`NumPyClient` to type :code:`flwr.client.Client` by executing its " -":code:`to_client()` method." +"Flower 0.19 (positional arguments): ``start_client(\"127.0.0.1:8080\", " +"FlowerClient())``" msgstr "" -#: flwr.client.app.start_numpy_client:13 of -msgid "An implementation of the abstract base class `flwr.client.NumPyClient`." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:39 +msgid "" +"Flower 1.0 (keyword arguments): " +"``start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())``" msgstr "" -#: ../../source/ref-api/flwr.common.rst:2 -msgid "common" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:43 +#: ../../source/ref-api/flwr.client.Client.rst:2 +msgid "Client" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -msgid ":py:obj:`array_from_numpy `\\ \\(ndarray\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:45 +msgid "" +"Subclasses of ``NumPyClient``: change ``def get_parameters(self):``` to " +"``def get_parameters(self, config):``" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.record.conversion_utils.array_from_numpy:1 of -msgid "Create Array from NumPy ndarray." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:47 +msgid "" +"Subclasses of ``Client``: change ``def get_parameters(self):``` to ``def " +"get_parameters(self, ins: GetParametersIns):``" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -msgid ":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:51 +msgid "Strategies / ``start_server`` / ``start_simulation``" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.bytes_to_ndarray:1 of -msgid "Deserialize NumPy ndarray from bytes." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:53 +msgid "" +"Pass ``ServerConfig`` (instead of a dictionary) to ``start_server`` and " +"``start_simulation``. Here's an example:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:56 msgid "" -":py:obj:`configure `\\ \\(identifier\\[\\, " -"filename\\, host\\]\\)" +"Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " +"\"round_timeout\": 600.0}, ...)``" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.logger.configure:1 of -msgid "Configure logging to file and/or remote log server." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:58 +msgid "" +"Flower 1.0: ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:61 msgid "" -":py:obj:`event `\\ \\(event\\_type\\[\\, " -"event\\_details\\]\\)" +"Replace ``num_rounds=1`` in ``start_simulation`` with the new " +"``config=ServerConfig(...)`` (see previous item)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.telemetry.event:1 of -msgid "Submit create_event to ThreadPoolExecutor to avoid blocking." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:63 +msgid "" +"Remove ``force_final_distributed_eval`` parameter from calls to " +"``start_server``. Distributed evaluation on all clients can be enabled by" +" configuring the strategy to sample all clients for evaluation after the " +"last round of training." msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -msgid "" -":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " -"\\*\\*kwargs\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:66 +msgid "Rename parameter/ndarray conversion functions:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 logging.Logger.log:1 -#: of -msgid "Log 'msg % args' with the integer severity 'level'." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:68 +msgid "``parameters_to_weights`` --> ``parameters_to_ndarrays``" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -msgid ":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:69 +msgid "``weights_to_parameters`` --> ``ndarrays_to_parameters``" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.ndarray_to_bytes:1 of -msgid "Serialize NumPy ndarray to bytes." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:71 +msgid "" +"Strategy initialization: if the strategy relies on the default values for" +" ``fraction_fit`` and ``fraction_evaluate``, set ``fraction_fit`` and " +"``fraction_evaluate`` manually to ``0.1``. Projects that do not manually " +"create a strategy (by calling ``start_server`` or ``start_simulation`` " +"without passing a strategy instance) should now manually initialize " +"FedAvg with ``fraction_fit`` and ``fraction_evaluate`` set to ``0.1``." msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -msgid ":py:obj:`now `\\ \\(\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:77 +msgid "Rename built-in strategy parameters (e.g., ``FedAvg``):" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.date.now:1 of -msgid "Construct a datetime from time.time() with time zone set to UTC." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:79 +msgid "``fraction_eval`` --> ``fraction_evaluate``" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -msgid "" -":py:obj:`ndarrays_to_parameters `\\ " -"\\(ndarrays\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:80 +msgid "``min_eval_clients`` --> ``min_evaluate_clients``" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.ndarrays_to_parameters:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarrays_to_parameters:1 -#: of -msgid "Convert NumPy ndarrays to parameters object." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:81 +msgid "``eval_fn`` --> ``evaluate_fn``" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:83 msgid "" -":py:obj:`parameters_to_ndarrays `\\ " -"\\(parameters\\)" +"Rename ``rnd`` to ``server_round``. This impacts multiple methods and " +"functions, for example, ``configure_fit``, ``aggregate_fit``, " +"``configure_evaluate``, ``aggregate_evaluate``, and ``evaluate_fn``." msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.parameters_to_ndarrays:1 of -msgid "Convert parameters object to NumPy ndarrays." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:86 +msgid "Add ``server_round`` and ``config`` to ``evaluate_fn``:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:88 msgid "" -":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, " -"data\\)" +"Flower 0.19: ``def evaluate(parameters: NDArrays) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]:``" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.parametersrecord.Array:1 of -msgid "Array type." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:90 +msgid "" +"Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, " +"config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " +"Scalar]]]:``" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid "" -":py:obj:`ClientMessage `\\ " -"\\(\\[get\\_properties\\_res\\, ...\\]\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:94 +msgid "Custom strategies" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.ClientMessage:1 of -msgid "ClientMessage is a container used to hold one result message." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:96 +msgid "" +"The type of parameter ``failures`` has changed from " +"``List[BaseException]`` to ``List[Union[Tuple[ClientProxy, FitRes], " +"BaseException]]`` (in ``aggregate_fit``) and " +"``List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]]`` (in " +"``aggregate_evaluate``)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`Code `\\ \\(value\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:100 +msgid "" +"The ``Strategy`` method ``evaluate`` now receives the current round of " +"federated learning/evaluation as the first parameter:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.Code:1 of -msgid "Client status codes." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:103 +msgid "" +"Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]:``" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:105 msgid "" -":py:obj:`ConfigsRecord `\\ " -"\\(\\[configs\\_dict\\, keep\\_input\\]\\)" +"Flower 1.0: ``def evaluate(self, server_round: int, parameters: " +"Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.configsrecord.ConfigsRecord:1 of -msgid "Configs record." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:109 +msgid "Optional improvements" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`Context `\\ \\(state\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:111 +msgid "" +"Along with the necessary changes above, there are a number of potential " +"improvements that just became possible:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.context.Context:1 of -msgid "State of your run." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:114 +msgid "" +"Remove \"placeholder\" methods from subclasses of ``Client`` or " +"``NumPyClient``. If you, for example, use server-side evaluation, then " +"empty placeholder implementations of ``evaluate`` are no longer " +"necessary." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`DisconnectRes `\\ \\(reason\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:117 +msgid "" +"Configure the round timeout via ``start_simulation``: " +"``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, " +"round_timeout=600.0), ...)``" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.DisconnectRes:1 of -msgid "DisconnectRes message from client to server." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:121 +#: ../../source/how-to-upgrade-to-flower-next.rst:349 +msgid "Further help" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:123 msgid "" -":py:obj:`EvaluateIns `\\ \\(parameters\\, " -"config\\)" +"Most official `Flower code examples " +"`_ are already updated" +" to Flower 1.0, they can serve as a reference for using the Flower 1.0 " +"API. If there are further questions, `join the Flower Slack " +"`_ and use the channel ``#questions``." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.EvaluateIns:1 of -msgid "Evaluate instructions for a client." +#: ../../source/how-to-upgrade-to-flower-next.rst:2 +msgid "Upgrade to Flower Next" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:4 msgid "" -":py:obj:`EvaluateRes `\\ \\(status\\, loss\\, " -"num\\_examples\\, metrics\\)" +"Welcome to the migration guide for updating Flower to Flower Next! " +"Whether you're a seasoned user or just getting started, this guide will " +"help you smoothly transition your existing setup to take advantage of the" +" latest features and improvements in Flower Next, starting from version " +"1.8." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.EvaluateRes:1 of -msgid "Evaluate response from a client." +#: ../../source/how-to-upgrade-to-flower-next.rst:11 +msgid "" +"This guide shows how to reuse pre-``1.8`` Flower code with minimum code " +"changes by using the *compatibility layer* in Flower Next. In another " +"guide, we will show how to run Flower Next end-to-end with pure Flower " +"Next APIs." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`EventType `\\ \\(value\\)" +#: ../../source/how-to-upgrade-to-flower-next.rst:15 +msgid "Let's dive in!" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.telemetry.EventType:1 of -msgid "Types of telemetry events." +#: ../../source/how-to-upgrade-to-flower-next.rst:68 +msgid "" +"Here's how to update an existing installation of Flower to Flower Next " +"with ``pip``:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`FitIns `\\ \\(parameters\\, config\\)" +#: ../../source/how-to-upgrade-to-flower-next.rst:74 +msgid "or if you need Flower Next with simulation:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.FitIns:1 of -msgid "Fit instructions for a client." +#: ../../source/how-to-upgrade-to-flower-next.rst:80 +msgid "" +"Ensure you set the following version constraint in your " +"``requirements.txt``" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid "" -":py:obj:`FitRes `\\ \\(status\\, parameters\\, " -"num\\_examples\\, metrics\\)" +#: ../../source/how-to-upgrade-to-flower-next.rst:90 +msgid "or ``pyproject.toml``:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.FitRes:1 of -msgid "Fit response from a client." +#: ../../source/how-to-upgrade-to-flower-next.rst:101 +msgid "Using Poetry" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`Error `\\ \\(code\\[\\, reason\\]\\)" +#: ../../source/how-to-upgrade-to-flower-next.rst:103 +msgid "" +"Update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall " +"(don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` before " +"running ``poetry install``)." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.message.Error:1 of -msgid "A dataclass that stores information about an error that occurred." +#: ../../source/how-to-upgrade-to-flower-next.rst:106 +msgid "" +"Ensure you set the following version constraint in your " +"``pyproject.toml``:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`GetParametersIns `\\ \\(config\\)" +#: ../../source/how-to-upgrade-to-flower-next.rst:123 +msgid "" +"In Flower Next, the *infrastructure* and *application layers* have been " +"decoupled. Instead of starting a client in code via ``start_client()``, " +"you create a |clientapp_link|_ and start it via the command line. Instead" +" of starting a server in code via ``start_server()``, you create a " +"|serverapp_link|_ and start it via the command line. The long-running " +"components of server and client are called SuperLink and SuperNode. The " +"following non-breaking changes that require manual updates and allow you " +"to run your project both in the traditional way and in the Flower Next " +"way:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetParametersIns:1 of -msgid "Parameters request for a client." +#: ../../source/how-to-upgrade-to-flower-next.rst:132 +msgid "|clientapp_link|_" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:134 msgid "" -":py:obj:`GetParametersRes `\\ \\(status\\, " -"parameters\\)" +"Wrap your existing client with |clientapp_link|_ instead of launching it " +"via |startclient_link|_. Here's an example:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetParametersRes:1 of -msgid "Response when asked to return parameters." +#: ../../source/how-to-upgrade-to-flower-next.rst:157 +msgid "|serverapp_link|_" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`GetPropertiesIns `\\ \\(config\\)" +#: ../../source/how-to-upgrade-to-flower-next.rst:159 +msgid "" +"Wrap your existing strategy with |serverapp_link|_ instead of starting " +"the server via |startserver_link|_. Here's an example:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetPropertiesIns:1 of -msgid "Properties request for a client." +#: ../../source/how-to-upgrade-to-flower-next.rst:180 +msgid "Deployment" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:182 msgid "" -":py:obj:`GetPropertiesRes `\\ \\(status\\, " -"properties\\)" +"Run the ``SuperLink`` using |flowernext_superlink_link|_ before running, " +"in sequence, |flowernext_clientapp_link|_ (2x) and " +"|flowernext_serverapp_link|_. There is no need to execute `client.py` and" +" `server.py` as Python scripts." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetPropertiesRes:1 of -msgid "Properties response from a client." +#: ../../source/how-to-upgrade-to-flower-next.rst:185 +msgid "" +"Here's an example to start the server without HTTPS (only for " +"prototyping):" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:201 msgid "" -":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " -"error\\]\\)" +"Here's another example to start with HTTPS. Use the ``--ssl-ca-" +"certfile``, ``--ssl-certfile``, and ``--ssl-keyfile`` command line " +"options to pass paths to (CA certificate, server certificate, and server " +"private key)." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.message.Message:1 of -msgid "State of your application from the viewpoint of the entity using it." +#: ../../source/how-to-upgrade-to-flower-next.rst:229 +msgid "Simulation in CLI" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`MessageType `\\ \\(\\)" +#: ../../source/how-to-upgrade-to-flower-next.rst:231 +msgid "" +"Wrap your existing client and strategy with |clientapp_link|_ and " +"|serverapp_link|_, respectively. There is no need to use |startsim_link|_" +" anymore. Here's an example:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.constant.MessageType:1 of -msgid "Message type." +#: ../../source/how-to-upgrade-to-flower-next.rst:264 +msgid "" +"Run |flower_simulation_link|_ in CLI and point to the ``server_app`` / " +"``client_app`` object in the code instead of executing the Python script." +" Here's an example (assuming the ``server_app`` and ``client_app`` " +"objects are in a ``sim.py`` module):" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`MessageTypeLegacy `\\ \\(\\)" +#: ../../source/how-to-upgrade-to-flower-next.rst:281 +msgid "" +"Set default resources for each |clientapp_link|_ using the ``--backend-" +"config`` command line argument instead of setting the " +"``client_resources`` argument in |startsim_link|_. Here's an example:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.constant.MessageTypeLegacy:1 of -msgid "Legacy message type." +#: ../../source/how-to-upgrade-to-flower-next.rst:305 +msgid "Simulation in a Notebook" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:307 msgid "" -":py:obj:`Metadata `\\ \\(run\\_id\\, " -"message\\_id\\, src\\_node\\_id\\, ...\\)" +"Run |runsim_link|_ in your notebook instead of |startsim_link|_. Here's " +"an example:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.message.Metadata:1 of -msgid "A dataclass holding metadata associated with the current message." -msgstr "" - -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:351 msgid "" -":py:obj:`MetricsRecord `\\ " -"\\(\\[metrics\\_dict\\, keep\\_input\\]\\)" -msgstr "" - -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.metricsrecord.MetricsRecord:1 of -msgid "Metrics record." +"Some official `Flower code examples `_ " +"are already updated to Flower Next so they can serve as a reference for " +"using the Flower Next API. If there are further questions, `join the " +"Flower Slack `_ and use the channel " +"``#questions``. You can also `participate in Flower Discuss " +"`_ where you can find us answering questions," +" or share and learn from others about migrating to Flower Next." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`NDArray `\\" +#: ../../source/how-to-upgrade-to-flower-next.rst:358 +msgid "Important" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:360 msgid "" -"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " -":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" +"As we continuously enhance Flower Next at a rapid pace, we'll be " +"periodically updating this guide. Please feel free to share any feedback " +"with us!" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid "" -":py:obj:`Parameters `\\ \\(tensors\\, " -"tensor\\_type\\)" +#: ../../source/how-to-upgrade-to-flower-next.rst:366 +msgid "Happy migrating! 🚀" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.Parameters:1 of -msgid "Model parameters." +#: ../../source/how-to-use-built-in-mods.rst:2 +msgid "Use Built-in Mods" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-built-in-mods.rst:4 msgid "" -":py:obj:`ParametersRecord `\\ " -"\\(\\[array\\_dict\\, keep\\_input\\]\\)" -msgstr "" - -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.parametersrecord.ParametersRecord:1 of -msgid "Parameters record." +"**Note: This tutorial covers experimental features. The functionality and" +" interfaces may change in future versions.**" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`ReconnectIns `\\ \\(seconds\\)" +#: ../../source/how-to-use-built-in-mods.rst:7 +msgid "" +"In this tutorial, we will learn how to utilize built-in mods to augment " +"the behavior of a ``ClientApp``. Mods (sometimes also called Modifiers) " +"allow us to perform operations before and after a task is processed in " +"the ``ClientApp``." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.ReconnectIns:1 of -msgid "ReconnectIns message from server to client." +#: ../../source/how-to-use-built-in-mods.rst:12 +msgid "What are Mods?" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-built-in-mods.rst:14 msgid "" -":py:obj:`RecordSet `\\ " -"\\(\\[parameters\\_records\\, ...\\]\\)" +"A Mod is a callable that wraps around a ``ClientApp``. It can manipulate " +"or inspect the incoming ``Message`` and the resulting outgoing " +"``Message``. The signature for a ``Mod`` is as follows:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.recordset.RecordSet:1 of -msgid "RecordSet stores groups of parameters, metrics and configs." +#: ../../source/how-to-use-built-in-mods.rst:23 +msgid "A typical mod function might look something like this:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid "" -":py:obj:`ServerMessage `\\ " -"\\(\\[get\\_properties\\_ins\\, ...\\]\\)" +#: ../../source/how-to-use-built-in-mods.rst:36 +msgid "Using Mods" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.ServerMessage:1 of -msgid "ServerMessage is a container used to hold one instruction message." +#: ../../source/how-to-use-built-in-mods.rst:38 +msgid "To use mods in your ``ClientApp``, you can follow these steps:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`Status `\\ \\(code\\, message\\)" +#: ../../source/how-to-use-built-in-mods.rst:41 +msgid "1. Import the required mods" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.Status:1 of -msgid "Client status." +#: ../../source/how-to-use-built-in-mods.rst:43 +msgid "First, import the built-in mod you intend to use:" msgstr "" -#: ../../source/ref-api/flwr.common.Array.rst:2 -msgid "Array" +#: ../../source/how-to-use-built-in-mods.rst:51 +msgid "2. Define your client function" msgstr "" -#: flwr.common.record.parametersrecord.Array:3 of +#: ../../source/how-to-use-built-in-mods.rst:53 msgid "" -"A dataclass containing serialized data from an array-like or tensor-like " -"object along with some metadata about it." +"Define your client function (``client_fn``) that will be wrapped by the " +"mod(s):" msgstr "" -#: flwr.common.record.parametersrecord.Array:6 of -msgid "" -"A string representing the data type of the serialised object (e.g. " -"`np.float32`)" +#: ../../source/how-to-use-built-in-mods.rst:62 +msgid "3. Create the ``ClientApp`` with mods" msgstr "" -#: flwr.common.record.parametersrecord.Array:8 of +#: ../../source/how-to-use-built-in-mods.rst:64 msgid "" -"A list representing the shape of the unserialized array-like object. This" -" is used to deserialize the data (depending on the serialization method) " -"or simply as a metadata field." +"Create your ``ClientApp`` and pass the mods as a list to the ``mods`` " +"argument. The order in which you provide the mods matters:" msgstr "" -#: flwr.common.record.parametersrecord.Array:12 of -msgid "" -"A string indicating the type of serialisation mechanism used to generate " -"the bytes in `data` from an array-like or tensor-like object." +#: ../../source/how-to-use-built-in-mods.rst:78 +msgid "Order of execution" msgstr "" -#: flwr.common.record.parametersrecord.Array:15 of -msgid "A buffer of bytes containing the data." +#: ../../source/how-to-use-built-in-mods.rst:80 +msgid "" +"When the ``ClientApp`` runs, the mods are executed in the order they are " +"provided in the list:" msgstr "" -#: ../../source/ref-api/flwr.common.Array.rst:26::1 -msgid ":py:obj:`numpy `\\ \\(\\)" +#: ../../source/how-to-use-built-in-mods.rst:83 +msgid "``example_mod_1`` (outermost mod)" msgstr "" -#: ../../source/ref-api/flwr.common.Array.rst:26::1 -#: flwr.common.record.parametersrecord.Array.numpy:1 of -msgid "Return the array as a NumPy array." +#: ../../source/how-to-use-built-in-mods.rst:84 +msgid "``example_mod_2`` (next mod)" msgstr "" -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of -msgid ":py:obj:`dtype `\\" +#: ../../source/how-to-use-built-in-mods.rst:85 +msgid "" +"Message handler (core function that handles the incoming ``Message`` and " +"returns the outgoing ``Message``)" msgstr "" -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of -msgid ":py:obj:`shape `\\" +#: ../../source/how-to-use-built-in-mods.rst:87 +msgid "``example_mod_2`` (on the way back)" msgstr "" -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of -msgid ":py:obj:`stype `\\" +#: ../../source/how-to-use-built-in-mods.rst:88 +msgid "``example_mod_1`` (outermost mod on the way back)" msgstr "" -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of -msgid ":py:obj:`data `\\" +#: ../../source/how-to-use-built-in-mods.rst:90 +msgid "" +"Each mod has a chance to inspect and modify the incoming ``Message`` " +"before passing it to the next mod, and likewise with the outgoing " +"``Message`` before returning it up the stack." msgstr "" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:2 -msgid "ClientMessage" +#: ../../source/how-to-use-built-in-mods.rst:97 +msgid "" +"By following this guide, you have learned how to effectively use mods to " +"enhance your ``ClientApp``'s functionality. Remember that the order of " +"mods is crucial and affects how the input and output are processed." msgstr "" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 -msgid ":py:obj:`evaluate_res `\\" +#: ../../source/how-to-use-built-in-mods.rst:101 +msgid "Enjoy building a more robust and flexible ``ClientApp`` with mods!" msgstr "" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 -msgid ":py:obj:`fit_res `\\" +#: ../../source/how-to-use-differential-privacy.rst:2 +msgid "Use Differential Privacy" msgstr "" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +#: ../../source/how-to-use-differential-privacy.rst:4 msgid "" -":py:obj:`get_parameters_res " -"`\\" +"This guide explains how you can utilize differential privacy in the " +"Flower framework. If you are not yet familiar with differential privacy, " +"you can refer to :doc:`explanation-differential-privacy`." msgstr "" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +#: ../../source/how-to-use-differential-privacy.rst:10 msgid "" -":py:obj:`get_properties_res " -"`\\" +"Differential Privacy in Flower is in a preview phase. If you plan to use " +"these features in a production environment with sensitive data, feel free" +" contact us to discuss your requirements and to receive guidance on how " +"to best use these features." msgstr "" -#: ../../source/ref-api/flwr.common.Code.rst:2 -msgid "Code" +#: ../../source/how-to-use-differential-privacy.rst:17 +msgid "" +"This approach consists of two separate phases: clipping of the updates " +"and adding noise to the aggregated model. For the clipping phase, Flower " +"framework has made it possible to decide whether to perform clipping on " +"the server side or the client side." msgstr "" -#: flwr.common.typing.Code:1 of -msgid "Bases: :py:class:`~enum.Enum`" +#: ../../source/how-to-use-differential-privacy.rst:21 +msgid "" +"**Server-side Clipping**: This approach has the advantage of the server " +"enforcing uniform clipping across all clients' updates and reducing the " +"communication overhead for clipping values. However, it also has the " +"disadvantage of increasing the computational load on the server due to " +"the need to perform the clipping operation for all clients." msgstr "" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 -msgid ":py:obj:`OK `\\" +#: ../../source/how-to-use-differential-privacy.rst:26 +msgid "" +"**Client-side Clipping**: This approach has the advantage of reducing the" +" computational overhead on the server. However, it also has the " +"disadvantage of lacking centralized control, as the server has less " +"control over the clipping process." msgstr "" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 -msgid "" -":py:obj:`GET_PROPERTIES_NOT_IMPLEMENTED " -"`\\" +#: ../../source/how-to-use-differential-privacy.rst:31 +msgid "Server-side Clipping" msgstr "" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 +#: ../../source/how-to-use-differential-privacy.rst:33 msgid "" -":py:obj:`GET_PARAMETERS_NOT_IMPLEMENTED " -"`\\" +"For central DP with server-side clipping, there are two ``Strategy`` " +"classes that act as wrappers around the actual ``Strategy`` instance (for" +" example, ``FedAvg``). The two wrapper classes are " +"``DifferentialPrivacyServerSideFixedClipping`` and " +"``DifferentialPrivacyServerSideAdaptiveClipping`` for fixed and adaptive " +"clipping." msgstr "" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 -msgid ":py:obj:`FIT_NOT_IMPLEMENTED `\\" +#: ../../source/how-to-use-differential-privacy.rst:-1 +msgid "server side clipping" msgstr "" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 +#: ../../source/how-to-use-differential-privacy.rst:43 msgid "" -":py:obj:`EVALUATE_NOT_IMPLEMENTED " -"`\\" +"The code sample below enables the ``FedAvg`` strategy to use server-side " +"fixed clipping using the ``DifferentialPrivacyServerSideFixedClipping`` " +"wrapper class. The same approach can be used with " +"``DifferentialPrivacyServerSideAdaptiveClipping`` by adjusting the " +"corresponding input parameters." msgstr "" -#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:2 -msgid "ConfigsRecord" +#: ../../source/how-to-use-differential-privacy.rst:64 +msgid "Client-side Clipping" msgstr "" -#: flwr.common.record.configsrecord.ConfigsRecord:1 of +#: ../../source/how-to-use-differential-privacy.rst:66 msgid "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -"[:py:class:`str`, :py:class:`int` | :py:class:`float` | :py:class:`str` |" -" :py:class:`bytes` | :py:class:`bool` | :py:class:`~typing.List`\\ " -"[:py:class:`int`] | :py:class:`~typing.List`\\ [:py:class:`float`] | " -":py:class:`~typing.List`\\ [:py:class:`str`] | :py:class:`~typing.List`\\" -" [:py:class:`bytes`] | :py:class:`~typing.List`\\ [:py:class:`bool`]]" +"For central DP with client-side clipping, the server sends the clipping " +"value to selected clients on each round. Clients can use existing Flower " +"``Mods`` to perform the clipping. Two mods are available for fixed and " +"adaptive client-side clipping: ``fixedclipping_mod`` and " +"``adaptiveclipping_mod`` with corresponding server-side wrappers " +"``DifferentialPrivacyClientSideFixedClipping`` and " +"``DifferentialPrivacyClientSideAdaptiveClipping``." msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`clear `\\ \\(\\)" +#: ../../source/how-to-use-differential-privacy.rst:-1 +msgid "client side clipping" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1 -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid "Remove all items from R." +#: ../../source/how-to-use-differential-privacy.rst:78 +msgid "" +"The code sample below enables the ``FedAvg`` strategy to use differential" +" privacy with client-side fixed clipping using both the " +"``DifferentialPrivacyClientSideFixedClipping`` wrapper class and, on the " +"client, ``fixedclipping_mod``:" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`count_bytes `\\ \\(\\)" +#: ../../source/how-to-use-differential-privacy.rst:97 +msgid "" +"In addition to the server-side strategy wrapper, the ``ClientApp`` needs " +"to configure the matching ``fixedclipping_mod`` to perform the client-" +"side clipping:" msgstr "" -#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:1 -#: flwr.common.record.metricsrecord.MetricsRecord.count_bytes:1 -#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:1 -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid "Return number of Bytes stored in this object." +#: ../../source/how-to-use-differential-privacy.rst:115 +msgid "" +"To utilize local differential privacy (DP) and add noise to the client " +"model parameters before transmitting them to the server in Flower, you " +"can use the `LocalDpMod`. The following hyperparameters need to be set: " +"clipping norm value, sensitivity, epsilon, and delta." msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +#: ../../source/how-to-use-differential-privacy.rst:-1 +msgid "local DP mod" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 -#: flwr.common.record.typeddict.TypedDict.get:1 of -msgid "d defaults to None." +#: ../../source/how-to-use-differential-privacy.rst:125 +msgid "Below is a code example that shows how to use ``LocalDpMod``:" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`items `\\ \\(\\)" +#: ../../source/how-to-use-differential-privacy.rst:140 +msgid "" +"Please note that the order of mods, especially those that modify " +"parameters, is important when using multiple modifiers. Typically, " +"differential privacy (DP) modifiers should be the last to operate on " +"parameters." msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`keys `\\ \\(\\)" +#: ../../source/how-to-use-differential-privacy.rst:145 +msgid "Local Training using Privacy Engines" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +#: ../../source/how-to-use-differential-privacy.rst:147 +msgid "" +"For ensuring data instance-level privacy during local model training on " +"the client side, consider leveraging privacy engines such as Opacus and " +"TensorFlow Privacy. For examples of using Flower with these engines, " +"please refer to the Flower examples directory (`Opacus " +"`_, `Tensorflow" +" Privacy `_)." msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 -#: flwr.common.record.typeddict.TypedDict.pop:1 of -msgid "If key is not found, d is returned if given, otherwise KeyError is raised." +#: ../../source/how-to-use-strategies.rst:2 +msgid "Use strategies" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/how-to-use-strategies.rst:4 msgid "" -":py:obj:`update `\\ \\(\\[E\\, " -"\\]\\*\\*F\\)" +"Flower allows full customization of the learning process through the " +"``Strategy`` abstraction. A number of built-in strategies are provided in" +" the core framework." msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 -#: flwr.common.record.typeddict.TypedDict.update:1 of -msgid "Update R from dict/iterable E and F." +#: ../../source/how-to-use-strategies.rst:7 +msgid "" +"There are three ways to customize the way Flower orchestrates the " +"learning process on the server side:" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`values `\\ \\(\\)" +#: ../../source/how-to-use-strategies.rst:10 +msgid "Use an existing strategy, for example, ``FedAvg``" msgstr "" -#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:3 of -msgid "This function counts booleans as occupying 1 Byte." +#: ../../source/how-to-use-strategies.rst:11 +#: ../../source/how-to-use-strategies.rst:43 +msgid "Customize an existing strategy with callback functions" msgstr "" -#: ../../source/ref-api/flwr.common.Context.rst:2 -msgid "Context" +#: ../../source/how-to-use-strategies.rst:12 +#: ../../source/how-to-use-strategies.rst:99 +msgid "Implement a novel strategy" msgstr "" -#: flwr.common.context.Context:3 of -msgid "" -"Holds records added by the entity in a given run and that will stay " -"local. This means that the data it holds will never leave the system it's" -" running from. This can be used as an intermediate storage or scratchpad " -"when executing mods. It can also be used as a memory to access at " -"different points during the lifecycle of this entity (e.g. across " -"multiple rounds)" +#: ../../source/how-to-use-strategies.rst:15 +msgid "Use an existing strategy" msgstr "" -#: ../../source/ref-api/flwr.common.Context.rst:28::1 -msgid ":py:obj:`state `\\" +#: ../../source/how-to-use-strategies.rst:17 +msgid "" +"Flower comes with a number of popular federated learning strategies " +"built-in. A built-in strategy can be instantiated as follows:" msgstr "" -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:2 -msgid "DisconnectRes" +#: ../../source/how-to-use-strategies.rst:27 +msgid "" +"This creates a strategy with all parameters left at their default values " +"and passes it to the ``start_server`` function. It is usually recommended" +" to adjust a few parameters during instantiation:" msgstr "" -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:28::1 -msgid ":py:obj:`reason `\\" +#: ../../source/how-to-use-strategies.rst:45 +msgid "" +"Existing strategies provide several ways to customize their behaviour. " +"Callback functions allow strategies to call user-provided code during " +"execution." msgstr "" -#: ../../source/ref-api/flwr.common.Error.rst:2 -msgid "Error" +#: ../../source/how-to-use-strategies.rst:49 +msgid "Configuring client fit and client evaluate" msgstr "" -#: flwr.common.message.Error:3 of -msgid "An identifier for the error." +#: ../../source/how-to-use-strategies.rst:51 +msgid "" +"The server can pass new configuration values to the client each round by " +"providing a function to ``on_fit_config_fn``. The provided function will " +"be called by the strategy and must return a dictionary of configuration " +"key values pairs that will be sent to the client. It must return a " +"dictionary of arbitrary configuration values ``client.fit`` and " +"``client.evaluate`` functions during each round of federated learning." msgstr "" -#: flwr.common.message.Error:5 of -msgid "A reason for why the error arose (e.g. an exception stack-trace)" +#: ../../source/how-to-use-strategies.rst:84 +msgid "" +"The ``on_fit_config_fn`` can be used to pass arbitrary configuration " +"values from server to client, and potentially change these values each " +"round, for example, to adjust the learning rate. The client will receive " +"the dictionary returned by the ``on_fit_config_fn`` in its own " +"``client.fit()`` function." msgstr "" -#: flwr.common.Error.code:1::1 of -msgid ":py:obj:`code `\\" +#: ../../source/how-to-use-strategies.rst:89 +msgid "" +"Similar to ``on_fit_config_fn``, there is also ``on_evaluate_config_fn`` " +"to customize the configuration sent to ``client.evaluate()``" msgstr "" -#: flwr.common.Error.code:1 flwr.common.Error.code:1::1 of -msgid "Error code." +#: ../../source/how-to-use-strategies.rst:93 +msgid "Configuring server-side evaluation" msgstr "" -#: flwr.common.Error.code:1::1 of -msgid ":py:obj:`reason `\\" +#: ../../source/how-to-use-strategies.rst:95 +msgid "" +"Server-side evaluation can be enabled by passing an evaluation function " +"to ``evaluate_fn``." msgstr "" -#: flwr.common.Error.code:1::1 flwr.common.Error.reason:1 of -msgid "Reason reported about the error." +#: ../../source/how-to-use-strategies.rst:101 +msgid "" +"Writing a fully custom strategy is a bit more involved, but it provides " +"the most flexibility. Read the `Implementing Strategies `_ guide to learn more." msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:2 -msgid "EvaluateIns" +#: ../../source/index.rst:34 +msgid "Tutorial" msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 -msgid ":py:obj:`parameters `\\" +#: ../../source/index.rst:44 +msgid "Quickstart tutorials" msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 -msgid ":py:obj:`config `\\" +#: ../../source/index.rst:81 ../../source/index.rst:85 +msgid "How-to guides" msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:2 -msgid "EvaluateRes" +#: ../../source/index.rst:106 +msgid "Legacy example guides" msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 -msgid ":py:obj:`status `\\" +#: ../../source/index.rst:114 ../../source/index.rst:119 +msgid "Explanations" msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 -msgid ":py:obj:`loss `\\" +#: None:-1 +msgid "API reference" msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 -msgid ":py:obj:`num_examples `\\" +#: ../../source/index.rst:145 +msgid "Reference docs" msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 -msgid ":py:obj:`metrics `\\" +#: ../../source/index.rst:160 +msgid "Contributor tutorials" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:2 -msgid "EventType" +#: ../../source/index.rst:167 +msgid "Contributor how-to guides" msgstr "" -#: flwr.common.telemetry.EventType:1 of -msgid "Bases: :py:class:`str`, :py:class:`~enum.Enum`" +#: ../../source/index.rst:179 +msgid "Contributor explanations" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/index.rst:185 +msgid "Contributor references" +msgstr "" + +#: ../../source/index.rst:-1 msgid "" -":py:obj:`encode `\\ \\(\\[encoding\\, " -"errors\\]\\)" +"Check out the documentation of the main Flower Framework enabling easy " +"Python development for Federated Learning." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.encode:1 of -msgid "Encode the string using the codec registered for encoding." +#: ../../source/index.rst:2 +msgid "Flower Framework Documentation" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/index.rst:7 msgid "" -":py:obj:`replace `\\ \\(old\\, new\\[\\, " -"count\\]\\)" +"Welcome to Flower's documentation. `Flower `_ is a " +"friendly federated learning framework." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.replace:1 of -msgid "Return a copy with all occurrences of substring old replaced by new." +#: ../../source/index.rst:11 +msgid "Join the Flower Community" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/index.rst:13 msgid "" -":py:obj:`split `\\ \\(\\[sep\\, " -"maxsplit\\]\\)" +"The Flower Community is growing quickly - we're a friendly group of " +"researchers, engineers, students, professionals, academics, and other " +"enthusiasts." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.rsplit:1 flwr.common.EventType.split:1 of -msgid "" -"Return a list of the substrings in the string, using sep as the separator" -" string." +#: ../../source/index.rst:16 +msgid "Join us on Slack" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/index.rst:23 +msgid "Flower Framework" +msgstr "" + +#: ../../source/index.rst:25 msgid "" -":py:obj:`rsplit `\\ \\(\\[sep\\, " -"maxsplit\\]\\)" +"The user guide is targeted at researchers and developers who want to use " +"Flower to bring existing machine learning workloads into a federated " +"setting. One of Flower's design goals was to make this simple. Read on to" +" learn more." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`join `\\ \\(iterable\\, \\/\\)" +#: ../../source/index.rst:30 +msgid "Tutorials" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.join:1 of -msgid "Concatenate any number of strings." +#: ../../source/index.rst:32 +msgid "" +"A learning-oriented series of federated learning tutorials, the best " +"place to start." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`capitalize `\\ \\(\\)" +#: ../../source/index.rst:62 +msgid "" +"QUICKSTART TUTORIALS: :doc:`PyTorch ` | " +":doc:`TensorFlow ` | :doc:`MLX ` | :doc:`🤗 Transformers ` | :doc:`JAX ` | :doc:`Pandas " +"` | :doc:`fastai ` | :doc:`PyTorch Lightning ` | :doc:`scikit-learn ` | " +":doc:`XGBoost ` | :doc:`Android ` | :doc:`iOS `" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.capitalize:1 of -msgid "Return a capitalized version of the string." +#: ../../source/index.rst:70 +msgid "We also made video tutorials for PyTorch:" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`casefold `\\ \\(\\)" +#: ../../source/index.rst:75 +msgid "And TensorFlow:" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.casefold:1 of -msgid "Return a version of the string suitable for caseless comparisons." +#: ../../source/index.rst:83 +msgid "" +"Problem-oriented how-to guides show step-by-step how to achieve a " +"specific goal." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`title `\\ \\(\\)" +#: ../../source/index.rst:116 +msgid "" +"Understanding-oriented concept guides explain and discuss key topics and " +"underlying ideas behind Flower and collaborative AI." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.title:1 of -msgid "Return a version of the string where each word is titlecased." +#: ../../source/index.rst:128 +msgid "References" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`center `\\ \\(width\\[\\, " -"fillchar\\]\\)" +#: ../../source/index.rst:130 +msgid "Information-oriented API reference and other reference material." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.center:1 of -msgid "Return a centered string of length width." +#: ../../source/index.rst:139::1 +msgid ":py:obj:`flwr `\\" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`count `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" +#: ../../source/index.rst:139::1 flwr:1 of +msgid "Flower main package." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -"Return the number of non-overlapping occurrences of substring sub in " -"string S[start:end]." +#: ../../source/index.rst:155 +msgid "Contributor docs" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/index.rst:157 msgid "" -":py:obj:`expandtabs `\\ " -"\\(\\[tabsize\\]\\)" +"The Flower community welcomes contributions. The following docs are " +"intended to help along the way." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.expandtabs:1 of -msgid "Return a copy where all tab characters are expanded using spaces." +#: ../../source/ref-api-cli.rst:2 +msgid "Flower CLI reference" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`find `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" +#: ../../source/ref-api-cli.rst:7 +msgid "flwr CLI" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -"Return the lowest index in S where substring sub is found, such that sub " -"is contained within S[start:end]." +#: ../../flwr:1 +msgid "flwr is the Flower command line interface." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`partition `\\ \\(sep\\, \\/\\)" +#: ../../source/ref-api-cli.rst +msgid "Options" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.partition:1 flwr.common.EventType.rpartition:1 of -msgid "Partition the string into three parts using the given separator." +#: ../../flwr:1 +msgid "Install completion for the current shell." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../flwr:1 msgid "" -":py:obj:`index `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" +"Show completion for the current shell, to copy it or customize the " +"installation." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../flwr build:1 +msgid "Build a Flower App into a Flower App Bundle (FAB)." +msgstr "" + +#: ../../flwr build:1 msgid "" -":py:obj:`ljust `\\ \\(width\\[\\, " -"fillchar\\]\\)" +"You can run ``flwr build`` without any arguments to bundle the app " +"located in the current directory. Alternatively, you can you can specify " +"a path using the ``--app`` option to bundle an app located at the " +"provided path. For example:" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.ljust:1 of -msgid "Return a left-justified string of length width." +#: ../../flwr build:1 +msgid "``flwr build --app ./apps/flower-hello-world``." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`lower `\\ \\(\\)" +#: ../../flwr build:1 +msgid "Path of the Flower App to bundle into a FAB" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.lower:1 of -msgid "Return a copy of the string converted to lowercase." +#: ../../flwr install:1 +msgid "Install a Flower App Bundle." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`lstrip `\\ \\(\\[chars\\]\\)" +#: ../../flwr install:1 +msgid "It can be ran with a single FAB file argument:" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.lstrip:1 of -msgid "Return a copy of the string with leading whitespace removed." +#: ../../flwr install:1 +msgid "``flwr install ./target_project.fab``" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`rfind `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" +#: ../../flwr install:1 +msgid "The target install directory can be specified with ``--flwr-dir``:" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -"Return the highest index in S where substring sub is found, such that sub" -" is contained within S[start:end]." +#: ../../flwr install:1 +msgid "``flwr install ./target_project.fab --flwr-dir ./docs/flwr``" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../flwr install:1 msgid "" -":py:obj:`rindex `\\ \\(sub\\[\\, " -"start\\[\\, end\\]\\]\\)" +"This will install ``target_project`` to ``./docs/flwr/``. By default, " +"``flwr-dir`` is equal to:" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`rjust `\\ \\(width\\[\\, " -"fillchar\\]\\)" +#: ../../flwr install:1 +msgid "``$FLWR_HOME/`` if ``$FLWR_HOME`` is defined" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.rjust:1 of -msgid "Return a right-justified string of length width." +#: ../../flwr install:1 +msgid "``$XDG_DATA_HOME/.flwr/`` if ``$XDG_DATA_HOME`` is defined" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`rstrip `\\ \\(\\[chars\\]\\)" +#: ../../flwr install:1 +msgid "``$HOME/.flwr/`` in all other cases" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.rstrip:1 of -msgid "Return a copy of the string with trailing whitespace removed." -msgstr "" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`rpartition `\\ \\(sep\\, \\/\\)" +#: ../../flwr install:1 +msgid "The desired install path." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`splitlines `\\ " -"\\(\\[keepends\\]\\)" -msgstr "" +#: ../../source/ref-api-cli.rst +#, fuzzy +msgid "Arguments" +msgstr "Argumento de compilação" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.splitlines:1 of -msgid "Return a list of the lines in the string, breaking at line boundaries." -msgstr "" +#: ../../flwr install:1 log:1 new:1 run:1 +#, fuzzy +msgid "Optional argument" +msgstr "Argumento de compilação" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`strip `\\ \\(\\[chars\\]\\)" +#: ../../flwr install:1 +msgid "The source FAB file to install." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.strip:1 of -msgid "Return a copy of the string with leading and trailing whitespace removed." +#: ../../flwr log:1 +msgid "Get logs from a Flower project run." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`swapcase `\\ \\(\\)" +#: ../../flwr log:1 +msgid "Flag to stream or print logs from the Flower run" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.swapcase:1 of -msgid "" -"Convert uppercase characters to lowercase and lowercase characters to " -"uppercase." +#: ../../flwr log run +msgid "default" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`translate `\\ \\(table\\, \\/\\)" +#: ../../flwr log:1 +msgid "``True``" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.translate:1 of -msgid "Replace each character in the string using the given translation table." -msgstr "" +#: ../../flwr log:1 +#, fuzzy +msgid "Required argument" +msgstr "Argumento de compilação" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`upper `\\ \\(\\)" +#: ../../flwr log:1 +msgid "The Flower run ID to query" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.upper:1 of -msgid "Return a copy of the string converted to uppercase." +#: ../../flwr log:1 +msgid "Path of the Flower project to run" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`startswith `\\ \\(prefix\\[\\," -" start\\[\\, end\\]\\]\\)" +#: ../../flwr log:1 +msgid "Name of the federation to run the app on" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "Return True if S starts with the specified prefix, False otherwise." +#: ../../flwr new:1 +msgid "Create new Flower App." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`endswith `\\ \\(suffix\\[\\, " -"start\\[\\, end\\]\\]\\)" +#: ../../flwr new:1 +msgid "The ML framework to use" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "Return True if S ends with the specified suffix, False otherwise." +#: ../../flwr new +msgid "options" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../flwr new:1 msgid "" -":py:obj:`removeprefix `\\ " -"\\(prefix\\, \\/\\)" -msgstr "" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.removeprefix:1 of -msgid "Return a str with the given prefix string removed if present." +"PyTorch | TensorFlow | sklearn | HuggingFace | JAX | MLX | NumPy | " +"FlowerTune | Flower Baseline" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`removesuffix `\\ " -"\\(suffix\\, \\/\\)" +#: ../../flwr new:1 +msgid "The Flower username of the author" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.removesuffix:1 of -msgid "Return a str with the given suffix string removed if present." -msgstr "" +#: ../../flwr new:1 +#, fuzzy +msgid "The name of the Flower App" +msgstr "O nome do repositório da imagem base." -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isascii `\\ \\(\\)" +#: ../../flwr run:1 +msgid "Run Flower App." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isascii:1 of -msgid "Return True if all characters in the string are ASCII, False otherwise." +#: ../../flwr run:1 +msgid "Override configuration key-value pairs, should be of the format:" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`islower `\\ \\(\\)" +#: ../../flwr run:1 +msgid "" +"`--run-config 'key1=\"value1\" key2=\"value2\"' --run-config " +"'key3=\"value3\"'`" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.islower:1 of -msgid "Return True if the string is a lowercase string, False otherwise." +#: ../../flwr run:1 +msgid "" +"Note that `key1`, `key2`, and `key3` in this example need to exist inside" +" the `pyproject.toml` in order to be properly overriden." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isupper `\\ \\(\\)" +#: ../../flwr run:1 +msgid "" +"Use `--stream` with `flwr run` to display logs; logs are not streamed by " +"default." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isupper:1 of -msgid "Return True if the string is an uppercase string, False otherwise." -msgstr "" +#: ../../flwr run:1 +#, fuzzy +msgid "``False``" +msgstr "``FLWR_VERSION``" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`istitle `\\ \\(\\)" -msgstr "" +#: ../../flwr run:1 +#, fuzzy +msgid "Path of the Flower App to run." +msgstr "O nome do repositório da imagem base." -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.istitle:1 of -msgid "Return True if the string is a title-cased string, False otherwise." +#: ../../flwr run:1 +msgid "Name of the federation to run the app on." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isspace `\\ \\(\\)" +#: ../../source/ref-api-cli.rst:16 +msgid "flower-simulation" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isspace:1 of -msgid "Return True if the string is a whitespace string, False otherwise." +#: ../../source/ref-api-cli.rst:26 +msgid "flower-superlink" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isdecimal `\\ \\(\\)" +#: ../../source/ref-api-cli.rst:36 +msgid "flower-supernode" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isdecimal:1 of -msgid "Return True if the string is a decimal string, False otherwise." +#: ../../source/ref-api-cli.rst:46 +msgid "flower-server-app" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isdigit `\\ \\(\\)" +#: ../../source/ref-api-cli.rst:50 +msgid "" +"Note that since version ``1.11.0``, ``flower-server-app`` no longer " +"supports passing a reference to a `ServerApp` attribute. Instead, you " +"need to pass the path to Flower app via the argument ``--app``. This is " +"the path to a directory containing a `pyproject.toml`. You can create a " +"valid Flower app by executing ``flwr new`` and following the prompt." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isdigit:1 of -msgid "Return True if the string is a digit string, False otherwise." +#: ../../source/ref-api-cli.rst:64 +msgid "flower-superexec" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isnumeric `\\ \\(\\)" +#: ../../source/ref-api/flwr.rst:2 +msgid "flwr" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isnumeric:1 of -msgid "Return True if the string is a numeric string, False otherwise." +#: ../../source/ref-api/flwr.client.rst:43 ../../source/ref-api/flwr.rst:25 +#: ../../source/ref-api/flwr.server.rst:48 +msgid "Modules" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isalpha `\\ \\(\\)" +#: ../../source/ref-api/flwr.rst:35::1 +msgid ":py:obj:`flwr.client `\\" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isalpha:1 of -msgid "Return True if the string is an alphabetic string, False otherwise." +#: ../../source/ref-api/flwr.rst:35::1 flwr.client:1 of +msgid "Flower client." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isalnum `\\ \\(\\)" +#: ../../source/ref-api/flwr.rst:35::1 +msgid ":py:obj:`flwr.common `\\" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isalnum:1 of -msgid "Return True if the string is an alpha-numeric string, False otherwise." +#: ../../source/ref-api/flwr.rst:35::1 flwr.common:1 of +msgid "Common components shared between server and client." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isidentifier `\\ \\(\\)" +#: ../../source/ref-api/flwr.rst:35::1 +msgid ":py:obj:`flwr.server `\\" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isidentifier:1 of -msgid "Return True if the string is a valid Python identifier, False otherwise." +#: ../../source/ref-api/flwr.rst:35::1 +#: ../../source/ref-api/flwr.server.rst:37::1 flwr.server:1 +#: flwr.server.server.Server:1 of +msgid "Flower server." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isprintable `\\ \\(\\)" +#: ../../source/ref-api/flwr.rst:35::1 +msgid ":py:obj:`flwr.simulation `\\" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isprintable:1 of -msgid "Return True if the string is printable, False otherwise." +#: ../../source/ref-api/flwr.rst:35::1 flwr.simulation:1 of +msgid "Flower simulation." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`zfill `\\ \\(width\\, \\/\\)" +#: ../../source/ref-api/flwr.client.rst:2 +msgid "client" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.zfill:1 of -msgid "" -"Pad a numeric string with zeros on the left, to fill a field of the given" -" width." +#: ../../source/ref-api/flwr.client.mod.rst:13 +#: ../../source/ref-api/flwr.client.rst:13 +#: ../../source/ref-api/flwr.common.rst:13 +#: ../../source/ref-api/flwr.server.rst:13 +#: ../../source/ref-api/flwr.simulation.rst:13 +msgid "Functions" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.rst:23::1 msgid "" -":py:obj:`format `\\ \\(\\*args\\, " -"\\*\\*kwargs\\)" -msgstr "" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "Return a formatted version of S, using substitutions from args and kwargs." -msgstr "" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`format_map `\\ \\(mapping\\)" +":py:obj:`start_client `\\ \\(\\*\\, " +"server\\_address\\[\\, client\\_fn\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "Return a formatted version of S, using substitutions from mapping." +#: ../../source/ref-api/flwr.client.rst:23::1 +#: flwr.client.app.start_client:1 of +msgid "Start a Flower client node which connects to a Flower server." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`maketrans `\\" +#: ../../source/ref-api/flwr.client.rst:23::1 +msgid "" +":py:obj:`start_numpy_client `\\ \\(\\*\\," +" server\\_address\\, client\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.maketrans:1 of -msgid "Return a translation table usable for str.translate()." +#: ../../source/ref-api/flwr.client.rst:23::1 +#: flwr.client.app.start_numpy_client:1 of +msgid "Start a Flower NumPyClient which connects to a gRPC server." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`PING `\\" +#: ../../source/ref-api/flwr.client.mod.rst:30 +#: ../../source/ref-api/flwr.client.rst:25 +#: ../../source/ref-api/flwr.common.rst:32 +#: ../../source/ref-api/flwr.server.rst:24 +#: ../../source/ref-api/flwr.server.strategy.rst:17 +#: ../../source/ref-api/flwr.server.workflow.rst:17 +msgid "Classes" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`START_CLIENT_ENTER `\\" +#: ../../source/ref-api/flwr.client.rst:32::1 +msgid ":py:obj:`Client `\\ \\(\\)" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`START_CLIENT_LEAVE `\\" +#: ../../source/ref-api/flwr.client.rst:32::1 +#: flwr.client.client.Client:1 of +msgid "Abstract base class for Flower clients." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`START_SERVER_ENTER `\\" +#: ../../source/ref-api/flwr.client.rst:32::1 +msgid "" +":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, " +"mods\\]\\)" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`START_SERVER_LEAVE `\\" +#: ../../source/ref-api/flwr.client.rst:32::1 +#: flwr.client.client_app.ClientApp:1 of +msgid "Flower ClientApp." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_DRIVER_API_ENTER " -"`\\" +#: ../../source/ref-api/flwr.client.rst:32::1 +msgid ":py:obj:`NumPyClient `\\ \\(\\)" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_DRIVER_API_LEAVE " -"`\\" +#: ../../source/ref-api/flwr.client.rst:32::1 +#: flwr.client.numpy_client.NumPyClient:1 of +msgid "Abstract base class for Flower clients using NumPy." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_FLEET_API_ENTER " -"`\\" +#: ../../source/ref-api/flwr.client.rst:50::1 +msgid ":py:obj:`flwr.client.mod `\\" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_FLEET_API_LEAVE " -"`\\" +#: ../../source/ref-api/flwr.client.rst:50::1 flwr.client.mod:1 of +msgid "Flower Built-in Mods." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_SUPERLINK_ENTER " -"`\\" +#: flwr.client.client.Client:1 flwr.client.numpy_client.NumPyClient:1 +#: flwr.server.client_manager.ClientManager:1 +#: flwr.server.driver.driver.Driver:1 flwr.server.strategy.strategy.Strategy:1 +#: of +msgid "Bases: :py:class:`~abc.ABC`" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_SUPERLINK_LEAVE " -"`\\" -msgstr "" - -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`START_SIMULATION_ENTER " -"`\\" +#: ../../source/ref-api/flwr.client.Client.rst:15 +#: ../../source/ref-api/flwr.client.ClientApp.rst:15 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:15 +#: ../../source/ref-api/flwr.client.mod.LocalDpMod.rst:15 +#: ../../source/ref-api/flwr.common.Array.rst:15 +#: ../../source/ref-api/flwr.common.ClientMessage.rst:15 +#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:15 +#: ../../source/ref-api/flwr.common.Context.rst:15 +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:15 +#: ../../source/ref-api/flwr.common.Error.rst:15 +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:15 +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:15 +#: ../../source/ref-api/flwr.common.EventType.rst:15 +#: ../../source/ref-api/flwr.common.FitIns.rst:15 +#: ../../source/ref-api/flwr.common.FitRes.rst:15 +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:15 +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:15 +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:15 +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:15 +#: ../../source/ref-api/flwr.common.Message.rst:15 +#: ../../source/ref-api/flwr.common.MessageType.rst:15 +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:15 +#: ../../source/ref-api/flwr.common.Metadata.rst:15 +#: ../../source/ref-api/flwr.common.MetricsRecord.rst:15 +#: ../../source/ref-api/flwr.common.Parameters.rst:15 +#: ../../source/ref-api/flwr.common.ParametersRecord.rst:15 +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:15 +#: ../../source/ref-api/flwr.common.RecordSet.rst:15 +#: ../../source/ref-api/flwr.common.ServerMessage.rst:15 +#: ../../source/ref-api/flwr.common.Status.rst:15 +#: ../../source/ref-api/flwr.server.ClientManager.rst:15 +#: ../../source/ref-api/flwr.server.Driver.rst:15 +#: ../../source/ref-api/flwr.server.History.rst:15 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:15 +#: ../../source/ref-api/flwr.server.Server.rst:15 +#: ../../source/ref-api/flwr.server.ServerApp.rst:15 +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:15 +#: ../../source/ref-api/flwr.server.ServerConfig.rst:15 +#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:15 +#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:15 +#: ../../source/ref-api/flwr.server.strategy.Krum.rst:15 +#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:15 +#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:15 +#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:15 +#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:15 +msgid "Methods" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`START_SIMULATION_LEAVE " -"`\\" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`evaluate `\\ \\(ins\\)" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`DRIVER_CONNECT `\\" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.evaluate:1 +#: flwr.client.numpy_client.NumPyClient.evaluate:1 of +msgid "Evaluate the provided parameters using the locally held dataset." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`DRIVER_DISCONNECT `\\" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`fit `\\ \\(ins\\)" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`START_DRIVER_ENTER `\\" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: flwr.client.client.Client.fit:1 of +msgid "Refine the provided parameters using the locally held dataset." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`START_DRIVER_LEAVE `\\" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`get_context `\\ \\(\\)" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_CLIENT_APP_ENTER " -"`\\" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.get_context:1 +#: flwr.client.numpy_client.NumPyClient.get_context:1 of +msgid "Get the run context from this client." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_CLIENT_APP_LEAVE " -"`\\" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`get_parameters `\\ \\(ins\\)" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_SERVER_APP_ENTER " -"`\\" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.get_parameters:1 +#: flwr.client.numpy_client.NumPyClient.get_parameters:1 of +msgid "Return the current local model parameters." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_SERVER_APP_LEAVE " -"`\\" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`get_properties `\\ \\(ins\\)" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_SUPERNODE_ENTER " -"`\\" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: flwr.client.client.Client.get_properties:1 of +msgid "Return set of client's properties." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_SUPERNODE_LEAVE " -"`\\" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`set_context `\\ \\(context\\)" msgstr "" -#: flwr.common.EventType.capitalize:3 of -msgid "" -"More specifically, make the first character have upper case and the rest " -"lower case." +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.set_context:1 +#: flwr.client.numpy_client.NumPyClient.set_context:1 of +msgid "Apply a run context to this client." msgstr "" -#: flwr.common.EventType.center:3 flwr.common.EventType.ljust:3 -#: flwr.common.EventType.rjust:3 of -msgid "Padding is done using the specified fill character (default is a space)." +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`to_client `\\ \\(\\)" msgstr "" -#: flwr.common.EventType.count:1 of -msgid "" -"Return the number of non-overlapping occurrences of substring sub in " -"string S[start:end]. Optional arguments start and end are interpreted as" -" in slice notation." +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: flwr.client.client.Client.to_client:1 of +msgid "Return client (itself)." msgstr "" -#: flwr.common.EventType.encode:3 of -msgid "encoding" +#: ../../source/ref-api/flwr.client.Client.rst:46 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:46 +#: ../../source/ref-api/flwr.common.Array.rst:28 +#: ../../source/ref-api/flwr.common.ClientMessage.rst:25 +#: ../../source/ref-api/flwr.common.Code.rst:19 +#: ../../source/ref-api/flwr.common.Context.rst:25 +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:25 +#: ../../source/ref-api/flwr.common.Error.rst:25 +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:25 +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:25 +#: ../../source/ref-api/flwr.common.EventType.rst:165 +#: ../../source/ref-api/flwr.common.FitIns.rst:25 +#: ../../source/ref-api/flwr.common.FitRes.rst:25 +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:25 +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:25 +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:25 +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:25 +#: ../../source/ref-api/flwr.common.Message.rst:37 +#: ../../source/ref-api/flwr.common.MessageType.rst:25 +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:25 +#: ../../source/ref-api/flwr.common.Metadata.rst:25 +#: ../../source/ref-api/flwr.common.Parameters.rst:25 +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:25 +#: ../../source/ref-api/flwr.common.RecordSet.rst:25 +#: ../../source/ref-api/flwr.common.ServerMessage.rst:25 +#: ../../source/ref-api/flwr.common.Status.rst:25 +#: ../../source/ref-api/flwr.server.Driver.rst:40 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:25 +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:25 +#: ../../source/ref-api/flwr.server.ServerConfig.rst:25 +msgid "Attributes" msgstr "" -#: flwr.common.EventType.encode:4 of -msgid "The encoding in which to encode the string." +#: flwr.client.Client.context:1::1 of +msgid ":py:obj:`context `\\" msgstr "" -#: flwr.common.EventType.encode:9 of -msgid "errors" +#: flwr.client.Client.context:1 flwr.client.Client.context:1::1 +#: flwr.client.NumPyClient.context:1 +#: flwr.client.NumPyClient.context:1::1 of +msgid "Getter for `Context` client attribute." msgstr "" -#: flwr.common.EventType.encode:6 of -msgid "" -"The error handling scheme to use for encoding errors. The default is " -"'strict' meaning that encoding errors raise a UnicodeEncodeError. Other " -"possible values are 'ignore', 'replace' and 'xmlcharrefreplace' as well " -"as any other name registered with codecs.register_error that can handle " -"UnicodeEncodeErrors." +#: ../../source/ref-api/flwr.common.Parameters.rst:2 +#: flwr.client.app.start_client flwr.client.app.start_numpy_client +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.mod.localdp_mod.LocalDpMod +#: flwr.client.numpy_client.NumPyClient.evaluate +#: flwr.client.numpy_client.NumPyClient.fit +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.context.Context flwr.common.message.Error +#: flwr.common.message.Message flwr.common.message.Message.create_error_reply +#: flwr.common.message.Message.create_reply flwr.common.message.Metadata +#: flwr.common.record.configsrecord.ConfigsRecord +#: flwr.common.record.metricsrecord.MetricsRecord +#: flwr.common.record.parametersrecord.Array +#: flwr.common.record.parametersrecord.ParametersRecord +#: flwr.common.record.recordset.RecordSet flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.ClientManager.unregister +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.unregister +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.serverapp_components.ServerAppComponents +#: flwr.server.strategy.bulyan.Bulyan +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.fedadagrad.FedAdagrad +#: flwr.server.strategy.fedadam.FedAdam flwr.server.strategy.fedavg.FedAvg +#: flwr.server.strategy.fedavg_android.FedAvgAndroid +#: flwr.server.strategy.fedavgm.FedAvgM flwr.server.strategy.fedopt.FedOpt +#: flwr.server.strategy.fedprox.FedProx +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg +#: flwr.server.strategy.fedyogi.FedYogi flwr.server.strategy.krum.Krum +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow +#: flwr.simulation.run_simulation.run_simulation of +msgid "Parameters" msgstr "" -#: flwr.common.EventType.endswith:1 of +#: flwr.client.client.Client.evaluate:3 of msgid "" -"Return True if S ends with the specified suffix, False otherwise. With " -"optional start, test S beginning at that position. With optional end, " -"stop comparing S at that position. suffix can also be a tuple of strings " -"to try." +"The evaluation instructions containing (global) model parameters received" +" from the server and a dictionary of configuration values used to " +"customize the local evaluation process." msgstr "" -#: flwr.common.EventType.expandtabs:3 of -msgid "If tabsize is not given, a tab size of 8 characters is assumed." +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.numpy_client.NumPyClient.evaluate +#: flwr.client.numpy_client.NumPyClient.fit +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.message.Message.create_reply flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.num_available +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.SimpleClientManager.num_available +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters of +msgid "Returns" msgstr "" -#: flwr.common.EventType.find:1 flwr.common.EventType.index:1 of +#: flwr.client.client.Client.evaluate:8 of msgid "" -"Return the lowest index in S where substring sub is found, such that sub " -"is contained within S[start:end]. Optional arguments start and end are " -"interpreted as in slice notation." +"The evaluation result containing the loss on the local dataset and other " +"details such as the number of local data examples used for evaluation." msgstr "" -#: flwr.common.EventType.find:5 flwr.common.EventType.rfind:5 of -msgid "Return -1 on failure." +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.message.Message.create_reply flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.num_available +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.SimpleClientManager.num_available +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters of +msgid "Return type" msgstr "" -#: flwr.common.EventType.format:1 of +#: flwr.client.client.Client.fit:3 of msgid "" -"Return a formatted version of S, using substitutions from args and " -"kwargs. The substitutions are identified by braces ('{' and '}')." +"The training instructions containing (global) model parameters received " +"from the server and a dictionary of configuration values used to " +"customize the local training process." msgstr "" -#: flwr.common.EventType.format_map:1 of +#: flwr.client.client.Client.fit:8 of msgid "" -"Return a formatted version of S, using substitutions from mapping. The " -"substitutions are identified by braces ('{' and '}')." -msgstr "" - -#: flwr.common.EventType.index:5 flwr.common.EventType.rindex:5 of -msgid "Raises ValueError when the substring is not found." +"The training result containing updated parameters and other details such " +"as the number of local training examples used for training." msgstr "" -#: flwr.common.EventType.isalnum:3 of +#: flwr.client.client.Client.get_parameters:3 of msgid "" -"A string is alpha-numeric if all characters in the string are alpha-" -"numeric and there is at least one character in the string." +"The get parameters instructions received from the server containing a " +"dictionary of configuration values." msgstr "" -#: flwr.common.EventType.isalpha:3 of -msgid "" -"A string is alphabetic if all characters in the string are alphabetic and" -" there is at least one character in the string." +#: flwr.client.client.Client.get_parameters:7 of +msgid "The current local model parameters." msgstr "" -#: flwr.common.EventType.isascii:3 of +#: flwr.client.client.Client.get_properties:3 of msgid "" -"ASCII characters have code points in the range U+0000-U+007F. Empty " -"string is ASCII too." +"The get properties instructions received from the server containing a " +"dictionary of configuration values." msgstr "" -#: flwr.common.EventType.isdecimal:3 of -msgid "" -"A string is a decimal string if all characters in the string are decimal " -"and there is at least one character in the string." +#: flwr.client.client.Client.get_properties:7 of +msgid "The current client properties." msgstr "" -#: flwr.common.EventType.isdigit:3 of -msgid "" -"A string is a digit string if all characters in the string are digits and" -" there is at least one character in the string." +#: ../../source/ref-api/flwr.client.ClientApp.rst:2 +msgid "ClientApp" msgstr "" -#: flwr.common.EventType.isidentifier:3 of -msgid "" -"Call keyword.iskeyword(s) to test whether string s is a reserved " -"identifier, such as \"def\" or \"class\"." +#: flwr.client.client_app.ClientApp:1 flwr.client.mod.localdp_mod.LocalDpMod:1 +#: flwr.common.constant.MessageType:1 flwr.common.constant.MessageTypeLegacy:1 +#: flwr.common.context.Context:1 flwr.common.message.Error:1 +#: flwr.common.message.Message:1 flwr.common.message.Metadata:1 +#: flwr.common.record.parametersrecord.Array:1 +#: flwr.common.record.recordset.RecordSet:1 flwr.common.typing.ClientMessage:1 +#: flwr.common.typing.DisconnectRes:1 flwr.common.typing.EvaluateIns:1 +#: flwr.common.typing.EvaluateRes:1 flwr.common.typing.FitIns:1 +#: flwr.common.typing.FitRes:1 flwr.common.typing.GetParametersIns:1 +#: flwr.common.typing.GetParametersRes:1 flwr.common.typing.GetPropertiesIns:1 +#: flwr.common.typing.GetPropertiesRes:1 flwr.common.typing.Parameters:1 +#: flwr.common.typing.ReconnectIns:1 flwr.common.typing.ServerMessage:1 +#: flwr.common.typing.Status:1 flwr.server.history.History:1 +#: flwr.server.server.Server:1 flwr.server.server_app.ServerApp:1 +#: flwr.server.server_config.ServerConfig:1 +#: flwr.server.serverapp_components.ServerAppComponents:1 +#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 +#: of +msgid "Bases: :py:class:`object`" msgstr "" -#: flwr.common.EventType.islower:3 of -msgid "" -"A string is lowercase if all cased characters in the string are lowercase" -" and there is at least one cased character in the string." +#: flwr.client.app.start_client:41 flwr.client.app.start_numpy_client:36 +#: flwr.client.client_app.ClientApp:4 +#: flwr.client.client_app.ClientApp.evaluate:4 +#: flwr.client.client_app.ClientApp.query:4 +#: flwr.client.client_app.ClientApp.train:4 +#: flwr.client.mod.localdp_mod.LocalDpMod:22 +#: flwr.common.record.configsrecord.ConfigsRecord:20 +#: flwr.common.record.metricsrecord.MetricsRecord:19 +#: flwr.common.record.parametersrecord.ParametersRecord:22 +#: flwr.common.record.recordset.RecordSet:23 flwr.server.app.start_server:41 +#: flwr.server.server_app.ServerApp:4 flwr.server.server_app.ServerApp.main:4 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:29 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:22 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:21 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:14 +#: of +msgid "Examples" msgstr "" -#: flwr.common.EventType.isnumeric:3 of +#: flwr.client.client_app.ClientApp:5 of msgid "" -"A string is numeric if all characters in the string are numeric and there" -" is at least one character in the string." +"Assuming a typical `Client` implementation named `FlowerClient`, you can " +"wrap it in a `ClientApp` as follows:" msgstr "" -#: flwr.common.EventType.isprintable:3 of +#: flwr.client.client_app.ClientApp:16 of msgid "" -"A string is printable if all of its characters are considered printable " -"in repr() or if it is empty." +"If the above code is in a Python module called `client`, it can be " +"started as follows:" msgstr "" -#: flwr.common.EventType.isspace:3 of +#: flwr.client.client_app.ClientApp:21 of msgid "" -"A string is whitespace if all characters in the string are whitespace and" -" there is at least one character in the string." +"In this `client:app` example, `client` refers to the Python module " +"`client.py` in which the previous code lives in and `app` refers to the " +"global attribute `app` that points to an object of type `ClientApp`." msgstr "" -#: flwr.common.EventType.istitle:3 of -msgid "" -"In a title-cased string, upper- and title-case characters may only follow" -" uncased characters and lowercase characters only cased ones." +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid ":py:obj:`evaluate `\\ \\(\\)" msgstr "" -#: flwr.common.EventType.isupper:3 of -msgid "" -"A string is uppercase if all cased characters in the string are uppercase" -" and there is at least one cased character in the string." +#: flwr.client.client_app.ClientApp.evaluate:1 +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid "Return a decorator that registers the evaluate fn with the client app." msgstr "" -#: flwr.common.EventType.join:3 of -msgid "" -"The string whose method is called is inserted in between each given " -"string. The result is returned as a new string." +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid ":py:obj:`query `\\ \\(\\)" msgstr "" -#: flwr.common.EventType.join:6 of -msgid "Example: '.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'" +#: flwr.client.client_app.ClientApp.evaluate:1::1 +#: flwr.client.client_app.ClientApp.query:1 of +msgid "Return a decorator that registers the query fn with the client app." msgstr "" -#: flwr.common.EventType.lstrip:3 flwr.common.EventType.rstrip:3 -#: flwr.common.EventType.strip:3 of -msgid "If chars is given and not None, remove characters in chars instead." +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid ":py:obj:`train `\\ \\(\\)" msgstr "" -#: flwr.common.EventType.maketrans:3 of -msgid "" -"If there is only one argument, it must be a dictionary mapping Unicode " -"ordinals (integers) or characters to Unicode ordinals, strings or None. " -"Character keys will be then converted to ordinals. If there are two " -"arguments, they must be strings of equal length, and in the resulting " -"dictionary, each character in x will be mapped to the character at the " -"same position in y. If there is a third argument, it must be a string, " -"whose characters will be mapped to None in the result." +#: flwr.client.client_app.ClientApp.evaluate:1::1 +#: flwr.client.client_app.ClientApp.train:1 of +msgid "Return a decorator that registers the train fn with the client app." msgstr "" -#: flwr.common.EventType.partition:3 of -msgid "" -"This will search for the separator in the string. If the separator is " -"found, returns a 3-tuple containing the part before the separator, the " -"separator itself, and the part after it." +#: ../../source/ref-api/flwr.client.NumPyClient.rst:2 +msgid "NumPyClient" msgstr "" -#: flwr.common.EventType.partition:7 of +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 msgid "" -"If the separator is not found, returns a 3-tuple containing the original " -"string and two empty strings." +":py:obj:`evaluate `\\ \\(parameters\\, " +"config\\)" msgstr "" -#: flwr.common.EventType.removeprefix:3 of -msgid "" -"If the string starts with the prefix string, return string[len(prefix):]." -" Otherwise, return a copy of the original string." +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid ":py:obj:`fit `\\ \\(parameters\\, config\\)" msgstr "" -#: flwr.common.EventType.removesuffix:3 of -msgid "" -"If the string ends with the suffix string and that suffix is not empty, " -"return string[:-len(suffix)]. Otherwise, return a copy of the original " -"string." +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.numpy_client.NumPyClient.fit:1 of +msgid "Train the provided parameters using the locally held dataset." msgstr "" -#: flwr.common.EventType.replace:5 of -msgid "count" +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid ":py:obj:`get_context `\\ \\(\\)" msgstr "" -#: flwr.common.EventType.replace:4 of +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 msgid "" -"Maximum number of occurrences to replace. -1 (the default value) means " -"replace all occurrences." +":py:obj:`get_parameters `\\ " +"\\(config\\)" msgstr "" -#: flwr.common.EventType.replace:7 of +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 msgid "" -"If the optional argument count is given, only the first count occurrences" -" are replaced." +":py:obj:`get_properties `\\ " +"\\(config\\)" msgstr "" -#: flwr.common.EventType.rfind:1 flwr.common.EventType.rindex:1 of -msgid "" -"Return the highest index in S where substring sub is found, such that sub" -" is contained within S[start:end]. Optional arguments start and end are " -"interpreted as in slice notation." +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.numpy_client.NumPyClient.get_properties:1 of +msgid "Return a client's set of properties." msgstr "" -#: flwr.common.EventType.rpartition:3 of +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 msgid "" -"This will search for the separator in the string, starting at the end. If" -" the separator is found, returns a 3-tuple containing the part before the" -" separator, the separator itself, and the part after it." +":py:obj:`set_context `\\ " +"\\(context\\)" msgstr "" -#: flwr.common.EventType.rpartition:7 of -msgid "" -"If the separator is not found, returns a 3-tuple containing two empty " -"strings and the original string." +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid ":py:obj:`to_client `\\ \\(\\)" msgstr "" -#: flwr.common.EventType.rsplit:7 flwr.common.EventType.split:7 of -msgid "sep" +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.numpy_client.NumPyClient.to_client:1 of +msgid "Convert to object to Client type and return it." msgstr "" -#: flwr.common.EventType.rsplit:4 flwr.common.EventType.split:4 of -msgid "The separator used to split the string." +#: flwr.client.NumPyClient.context:1::1 of +msgid ":py:obj:`context `\\" msgstr "" -#: flwr.common.EventType.rsplit:6 flwr.common.EventType.split:6 of +#: flwr.client.numpy_client.NumPyClient.evaluate:3 +#: flwr.client.numpy_client.NumPyClient.fit:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:5 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:8 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:5 +#: flwr.server.strategy.strategy.Strategy.configure_fit:5 +#: flwr.server.strategy.strategy.Strategy.evaluate:8 of +msgid "The current (global) model parameters." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.evaluate:5 of msgid "" -"When set to None (the default value), will split on any whitespace " -"character (including \\\\n \\\\r \\\\t \\\\f and spaces) and will discard" -" empty strings from the result." +"Configuration parameters which allow the server to influence evaluation " +"on the client. It can be used to communicate arbitrary values from the " +"server to the client, for example, to influence the number of examples " +"used for evaluation." msgstr "" -#: flwr.common.EventType.rsplit:11 flwr.common.EventType.split:11 of -msgid "maxsplit" +#: flwr.client.numpy_client.NumPyClient.evaluate:11 of +msgid "" +"* **loss** (*float*) -- The evaluation loss of the model on the local " +"dataset. * **num_examples** (*int*) -- The number of examples used for " +"evaluation. * **metrics** (*Dict[str, Scalar]*) -- A dictionary mapping " +"arbitrary string keys to values of type bool, bytes, float, int, or " +"str. It can be used to communicate arbitrary values back to the server." msgstr "" -#: flwr.common.EventType.rsplit:10 flwr.common.EventType.split:10 of +#: flwr.client.numpy_client.NumPyClient.evaluate:11 of msgid "" -"Maximum number of splits (starting from the left). -1 (the default value)" -" means no limit." +"**loss** (*float*) -- The evaluation loss of the model on the local " +"dataset." msgstr "" -#: flwr.common.EventType.rsplit:13 of -msgid "Splitting starts at the end of the string and works to the front." +#: flwr.client.numpy_client.NumPyClient.evaluate:12 of +msgid "**num_examples** (*int*) -- The number of examples used for evaluation." msgstr "" -#: flwr.common.EventType.split:13 of +#: flwr.client.numpy_client.NumPyClient.evaluate:13 +#: flwr.client.numpy_client.NumPyClient.fit:13 of msgid "" -"Note, str.split() is mainly useful for data that has been intentionally " -"delimited. With natural text that includes punctuation, consider using " -"the regular expression module." +"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " +"string keys to values of type bool, bytes, float, int, or str. It can be " +"used to communicate arbitrary values back to the server." msgstr "" -#: flwr.common.EventType.splitlines:3 of +#: flwr.client.numpy_client.NumPyClient.evaluate:19 of msgid "" -"Line breaks are not included in the resulting list unless keepends is " -"given and true." +"The previous return type format (int, float, float) and the extended " +"format (int, float, float, Dict[str, Scalar]) have been deprecated and " +"removed since Flower 0.19." msgstr "" -#: flwr.common.EventType.startswith:1 of +#: flwr.client.numpy_client.NumPyClient.fit:5 of msgid "" -"Return True if S starts with the specified prefix, False otherwise. With " -"optional start, test S beginning at that position. With optional end, " -"stop comparing S at that position. prefix can also be a tuple of strings " -"to try." +"Configuration parameters which allow the server to influence training on " +"the client. It can be used to communicate arbitrary values from the " +"server to the client, for example, to set the number of (local) training " +"epochs." msgstr "" -#: flwr.common.EventType.title:3 of +#: flwr.client.numpy_client.NumPyClient.fit:11 of msgid "" -"More specifically, words start with uppercased characters and all " -"remaining cased characters have lower case." +"* **parameters** (*NDArrays*) -- The locally updated model parameters. * " +"**num_examples** (*int*) -- The number of examples used for training. * " +"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " +"string keys to values of type bool, bytes, float, int, or str. It can " +"be used to communicate arbitrary values back to the server." msgstr "" -#: flwr.common.EventType.translate:5 of -msgid "table" +#: flwr.client.numpy_client.NumPyClient.fit:11 of +msgid "**parameters** (*NDArrays*) -- The locally updated model parameters." msgstr "" -#: flwr.common.EventType.translate:4 of -msgid "" -"Translation table, which must be a mapping of Unicode ordinals to Unicode" -" ordinals, strings, or None." +#: flwr.client.numpy_client.NumPyClient.fit:12 of +msgid "**num_examples** (*int*) -- The number of examples used for training." msgstr "" -#: flwr.common.EventType.translate:7 of +#: flwr.client.numpy_client.NumPyClient.get_parameters:3 of msgid "" -"The table must implement lookup/indexing via __getitem__, for instance a " -"dictionary or list. If this operation raises LookupError, the character " -"is left untouched. Characters mapped to None are deleted." +"Configuration parameters requested by the server. This can be used to " +"tell the client which parameters are needed along with some Scalar " +"attributes." msgstr "" -#: flwr.common.EventType.zfill:3 of -msgid "The string is never truncated." +#: flwr.client.numpy_client.NumPyClient.get_parameters:8 of +msgid "**parameters** -- The local model parameters as a list of NumPy ndarrays." msgstr "" -#: ../../source/ref-api/flwr.common.FitIns.rst:2 -msgid "FitIns" +#: flwr.client.numpy_client.NumPyClient.get_properties:3 of +msgid "" +"Configuration parameters requested by the server. This can be used to " +"tell the client which properties are needed along with some Scalar " +"attributes." msgstr "" -#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 -msgid ":py:obj:`parameters `\\" +#: flwr.client.numpy_client.NumPyClient.get_properties:8 of +msgid "" +"**properties** -- A dictionary mapping arbitrary string keys to values of" +" type bool, bytes, float, int, or str. It can be used to communicate " +"arbitrary property values back to the server." msgstr "" -#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 -msgid ":py:obj:`config `\\" +#: ../../source/ref-api/flwr.client.mod.rst:2 +msgid "mod" msgstr "" -#: ../../source/ref-api/flwr.common.FitRes.rst:2 -msgid "FitRes" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid "" +":py:obj:`adaptiveclipping_mod `\\ " +"\\(msg\\, ctxt\\, call\\_next\\)" msgstr "" -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 -msgid ":py:obj:`status `\\" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:1 of +msgid "Client-side adaptive clipping modifier." msgstr "" -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 -msgid ":py:obj:`parameters `\\" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid "" +":py:obj:`fixedclipping_mod `\\ " +"\\(msg\\, ctxt\\, call\\_next\\)" msgstr "" -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 -msgid ":py:obj:`num_examples `\\" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:1 of +msgid "Client-side fixed clipping modifier." msgstr "" -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 -msgid ":py:obj:`metrics `\\" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid ":py:obj:`make_ffn `\\ \\(ffn\\, mods\\)" msgstr "" -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:2 -msgid "GetParametersIns" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.utils.make_ffn:1 of +msgid "." msgstr "" -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:28::1 -msgid ":py:obj:`config `\\" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid "" +":py:obj:`message_size_mod `\\ \\(msg\\," +" ctxt\\, call\\_next\\)" msgstr "" -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:2 -msgid "GetParametersRes" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.comms_mods.message_size_mod:1 of +msgid "Message size mod." msgstr "" -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 -msgid ":py:obj:`status `\\" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid "" +":py:obj:`parameters_size_mod `\\ " +"\\(msg\\, ctxt\\, call\\_next\\)" msgstr "" -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 -msgid ":py:obj:`parameters `\\" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.comms_mods.parameters_size_mod:1 of +msgid "Parameters size mod." msgstr "" -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:2 -msgid "GetPropertiesIns" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid "" +":py:obj:`secagg_mod `\\ \\(msg\\, ctxt\\, " +"call\\_next\\)" msgstr "" -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:28::1 -msgid ":py:obj:`config `\\" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.secure_aggregation.secagg_mod.secagg_mod:1 of +msgid "Handle incoming message and return results, following the SecAgg protocol." msgstr "" -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:2 -msgid "GetPropertiesRes" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid "" +":py:obj:`secaggplus_mod `\\ \\(msg\\, " +"ctxt\\, call\\_next\\)" msgstr "" -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 -msgid ":py:obj:`status `\\" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.secure_aggregation.secaggplus_mod.secaggplus_mod:1 of +msgid "" +"Handle incoming message and return results, following the SecAgg+ " +"protocol." msgstr "" -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 -msgid ":py:obj:`properties `\\" +#: ../../source/ref-api/flwr.client.mod.rst:35::1 +msgid "" +":py:obj:`LocalDpMod `\\ \\(clipping\\_norm\\," +" sensitivity\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:2 -msgid "Message" +#: ../../source/ref-api/flwr.client.mod.rst:35::1 +#: flwr.client.mod.localdp_mod.LocalDpMod:1 of +msgid "Modifier for local differential privacy." msgstr "" -#: flwr.common.Message.content:1::1 flwr.common.Message.metadata:1 -#: flwr.common.message.Message:3 of -msgid "A dataclass including information about the message to be executed." +#: ../../source/ref-api/flwr.client.mod.LocalDpMod.rst:2 +msgid "LocalDpMod" msgstr "" -#: flwr.common.message.Message:5 of +#: flwr.client.mod.localdp_mod.LocalDpMod:3 of msgid "" -"Holds records either sent by another entity (e.g. sent by the server-side" -" logic to a client, or vice-versa) or that will be sent to it." +"This mod clips the client model updates and adds noise to the params " +"before sending them to the server." msgstr "" -#: flwr.common.message.Message:8 of -msgid "" -"A dataclass that captures information about an error that took place when" -" processing another message." +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:12 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:10 +#: flwr.client.mod.localdp_mod.LocalDpMod:6 of +msgid "It operates on messages of type `MessageType.TRAIN`." msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -msgid "" -":py:obj:`create_error_reply `\\ " -"\\(error\\[\\, ttl\\]\\)" +#: flwr.client.mod.localdp_mod.LocalDpMod:8 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:15 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:8 +#: of +msgid "The value of the clipping norm." msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.create_error_reply:1 of -msgid "Construct a reply message indicating an error happened." +#: flwr.client.mod.localdp_mod.LocalDpMod:10 of +msgid "The sensitivity of the client model." msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.client.mod.localdp_mod.LocalDpMod:12 of msgid "" -":py:obj:`create_reply `\\ " -"\\(content\\[\\, ttl\\]\\)" +"The privacy budget. Smaller value of epsilon indicates a higher level of " +"privacy protection." msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.create_reply:1 of -msgid "Create a reply to this message with specified content and TTL." +#: flwr.client.mod.localdp_mod.LocalDpMod:15 of +msgid "" +"The failure probability. The probability that the privacy mechanism fails" +" to provide the desired level of privacy. A smaller value of delta " +"indicates a stricter privacy guarantee." msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -msgid ":py:obj:`has_content `\\ \\(\\)" +#: flwr.client.mod.localdp_mod.LocalDpMod:23 of +msgid "Create an instance of the local DP mod and add it to the client-side mods:" msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.has_content:1 of -msgid "Return True if message has content, else False." +#: ../../source/ref-api/flwr.client.mod.adaptiveclipping_mod.rst:2 +msgid "adaptiveclipping\\_mod" msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -msgid ":py:obj:`has_error `\\ \\(\\)" +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:3 of +msgid "" +"This mod needs to be used with the " +"DifferentialPrivacyClientSideAdaptiveClipping server-side strategy " +"wrapper." msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.has_error:1 of -msgid "Return True if message has an error, else False." +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:6 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:6 of +msgid "The wrapper sends the clipping_norm value to the client." msgstr "" -#: flwr.common.Message.content:1::1 of -msgid ":py:obj:`content `\\" +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:8 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:8 of +msgid "This mod clips the client model updates before sending them to the server." msgstr "" -#: flwr.common.Message.content:1 flwr.common.Message.content:1::1 -#: of -msgid "The content of this message." +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:10 of +msgid "" +"It also sends KEY_NORM_BIT to the server for computing the new clipping " +"value." msgstr "" -#: flwr.common.Message.content:1::1 of -msgid ":py:obj:`error `\\" +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:15 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:13 +#: flwr.server.driver.driver.Driver.send_and_receive:18 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:54 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:61 +#: of +msgid "Notes" msgstr "" -#: flwr.common.Message.content:1::1 flwr.common.Message.error:1 of -msgid "Error captured by this message." +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:16 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:14 of +msgid "Consider the order of mods when using multiple." msgstr "" -#: flwr.common.Message.content:1::1 of -msgid ":py:obj:`metadata `\\" +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:18 of +msgid "Typically, adaptiveclipping_mod should be the last to operate on params." msgstr "" -#: flwr.common.message.Message.create_error_reply:3 of -msgid "The error that was encountered." +#: ../../source/ref-api/flwr.client.mod.fixedclipping_mod.rst:2 +msgid "fixedclipping\\_mod" msgstr "" -#: flwr.common.message.Message.create_error_reply:5 -#: flwr.common.message.Message.create_reply:9 of +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:3 of msgid "" -"Time-to-live for this message in seconds. If unset, it will be set based " -"on the remaining time for the received message before it expires. This " -"follows the equation: ttl = msg.meta.ttl - (reply.meta.created_at - " -"msg.meta.created_at)" +"This mod needs to be used with the " +"DifferentialPrivacyClientSideFixedClipping server-side strategy wrapper." msgstr "" -#: flwr.common.message.Message.create_error_reply:5 -#: flwr.common.message.Message.create_reply:9 of -msgid "" -"Time-to-live for this message in seconds. If unset, it will be set based " -"on the remaining time for the received message before it expires. This " -"follows the equation:" +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:16 of +msgid "Typically, fixedclipping_mod should be the last to operate on params." msgstr "" -#: flwr.common.message.Message.create_error_reply:9 -#: flwr.common.message.Message.create_reply:13 of -msgid "ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at)" +#: ../../source/ref-api/flwr.client.mod.make_ffn.rst:2 +msgid "make\\_ffn" msgstr "" -#: flwr.common.message.Message.create_reply:3 of -msgid "" -"The method generates a new `Message` as a reply to this message. It " -"inherits 'run_id', 'src_node_id', 'dst_node_id', and 'message_type' from " -"this message and sets 'reply_to_message' to the ID of this message." +#: ../../source/ref-api/flwr.client.mod.message_size_mod.rst:2 +msgid "message\\_size\\_mod" msgstr "" -#: flwr.common.message.Message.create_reply:7 of -msgid "The content for the reply message." +#: flwr.client.mod.comms_mods.message_size_mod:3 of +msgid "This mod logs the size in bytes of the message being transmited." msgstr "" -#: flwr.common.message.Message.create_reply:16 of -msgid "A new `Message` instance representing the reply." +#: ../../source/ref-api/flwr.client.mod.parameters_size_mod.rst:2 +msgid "parameters\\_size\\_mod" msgstr "" -#: ../../source/ref-api/flwr.common.MessageType.rst:2 -msgid "MessageType" +#: flwr.client.mod.comms_mods.parameters_size_mod:3 of +msgid "" +"This mod logs the number of parameters transmitted in the message as well" +" as their size in bytes." msgstr "" -#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 -msgid ":py:obj:`EVALUATE `\\" +#: ../../source/ref-api/flwr.client.mod.secagg_mod.rst:2 +msgid "secagg\\_mod" msgstr "" -#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 -msgid ":py:obj:`QUERY `\\" +#: ../../source/ref-api/flwr.client.mod.secaggplus_mod.rst:2 +msgid "secaggplus\\_mod" msgstr "" -#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 -msgid ":py:obj:`TRAIN `\\" -msgstr "" +#: ../../source/ref-api/flwr.client.start_client.rst:2 +msgid "start\\_client" +msgstr "" -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:2 -msgid "MessageTypeLegacy" +#: flwr.client.app.start_client:3 flwr.client.app.start_numpy_client:9 of +msgid "" +"The IPv4 or IPv6 address of the server. If the Flower server runs on the " +"same machine on port 8080, then `server_address` would be " +"`\"[::]:8080\"`." msgstr "" -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 -msgid ":py:obj:`GET_PARAMETERS `\\" +#: flwr.client.app.start_client:7 of +msgid "A callable that instantiates a Client. (default: None)" msgstr "" -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 -msgid ":py:obj:`GET_PROPERTIES `\\" +#: flwr.client.app.start_client:9 of +msgid "" +"An implementation of the abstract base class `flwr.client.Client` " +"(default: None)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.run_id:1 flwr.common.message.Metadata:3 of -msgid "An identifier for the current run." +#: flwr.client.app.start_client:12 flwr.client.app.start_numpy_client:15 of +msgid "" +"The maximum length of gRPC messages that can be exchanged with the Flower" +" server. The default should be sufficient for most models. Users who " +"train very large models might need to increase this value. Note that the " +"Flower server needs to be started with the same value (see " +"`flwr.server.start_server`), otherwise it will not know about the " +"increased limit and block larger messages." msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.message_id:1 flwr.common.message.Metadata:5 of -msgid "An identifier for the current message." +#: flwr.client.app.start_client:19 flwr.client.app.start_numpy_client:22 of +msgid "" +"The PEM-encoded root certificates as a byte string or a path string. If " +"provided, a secure connection using the certificates will be established " +"to an SSL-enabled Flower server." msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.src_node_id:1 flwr.common.message.Metadata:7 of -msgid "An identifier for the node sending this message." +#: flwr.client.app.start_client:23 flwr.client.app.start_numpy_client:26 of +msgid "" +"Starts an insecure gRPC connection when True. Enables HTTPS connection " +"when False, using system certificates if `root_certificates` is None." msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.dst_node_id:1 flwr.common.message.Metadata:9 of -msgid "An identifier for the node receiving this message." +#: flwr.client.app.start_client:26 flwr.client.app.start_numpy_client:29 of +msgid "" +"Configure the transport layer. Allowed values: - 'grpc-bidi': gRPC, " +"bidirectional streaming - 'grpc-rere': gRPC, request-response " +"(experimental) - 'rest': HTTP (experimental)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.reply_to_message:1 flwr.common.message.Metadata:11 of -msgid "An identifier for the message this message replies to." +#: flwr.client.app.start_client:31 of +msgid "" +"The maximum number of times the client will try to connect to the server " +"before giving up in case of a connection error. If set to None, there is " +"no limit to the number of tries." msgstr "" -#: flwr.common.message.Metadata:13 of +#: flwr.client.app.start_client:35 of msgid "" -"An identifier for grouping messages. In some settings, this is used as " -"the FL round." +"The maximum duration before the client stops trying to connect to the " +"server in case of connection error. If set to None, there is no limit to " +"the total time." msgstr "" -#: flwr.common.message.Metadata:16 of -msgid "Time-to-live for this message in seconds." +#: flwr.client.app.start_client:42 flwr.client.app.start_numpy_client:37 of +msgid "Starting a gRPC client with an insecure server connection:" msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.message_type:1 flwr.common.message.Metadata:18 of -msgid "A string that encodes the action to be executed on the receiving end." +#: flwr.client.app.start_client:49 flwr.client.app.start_numpy_client:44 of +msgid "Starting an SSL-enabled gRPC client using system certificates:" msgstr "" -#: flwr.common.message.Metadata:21 of -msgid "" -"An identifier that can be used when loading a particular data partition " -"for a ClientApp. Making use of this identifier is more relevant when " -"conducting simulations." +#: flwr.client.app.start_client:60 flwr.client.app.start_numpy_client:52 of +msgid "Starting an SSL-enabled gRPC client using provided certificates:" msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`created_at `\\" +#: ../../source/ref-api/flwr.client.start_numpy_client.rst:2 +msgid "start\\_numpy\\_client" msgstr "" -#: flwr.common.Metadata.created_at:1 -#: flwr.common.Metadata.created_at:1::1 of -msgid "Unix timestamp when the message was created." +#: flwr.client.app.start_numpy_client:5 of +msgid "" +"This function is deprecated since 1.7.0. Use " +":code:`flwr.client.start_client` instead and first convert your " +":code:`NumPyClient` to type :code:`flwr.client.Client` by executing its " +":code:`to_client()` method." msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`dst_node_id `\\" +#: flwr.client.app.start_numpy_client:13 of +msgid "An implementation of the abstract base class `flwr.client.NumPyClient`." msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`group_id `\\" +#: ../../source/ref-api/flwr.common.rst:2 +msgid "common" msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.group_id:1 of -msgid "An identifier for grouping messages." +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid ":py:obj:`array_from_numpy `\\ \\(ndarray\\)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`message_id `\\" +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.record.conversion_utils.array_from_numpy:1 of +msgid "Create Array from NumPy ndarray." msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`message_type `\\" +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid ":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`partition_id `\\" +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.bytes_to_ndarray:1 of +msgid "Deserialize NumPy ndarray from bytes." msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.partition_id:1 of -msgid "An identifier telling which data partition a ClientApp should use." +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid "" +":py:obj:`configure `\\ \\(identifier\\[\\, " +"filename\\, host\\]\\)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`reply_to_message `\\" +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.logger.configure:1 of +msgid "Configure logging to file and/or remote log server." msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`run_id `\\" +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid "" +":py:obj:`event `\\ \\(event\\_type\\[\\, " +"event\\_details\\]\\)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`src_node_id `\\" +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.telemetry.event:1 of +msgid "Submit create_event to ThreadPoolExecutor to avoid blocking." msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`ttl `\\" +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid "" +":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " +"\\*\\*kwargs\\)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 flwr.common.Metadata.ttl:1 +#: ../../source/ref-api/flwr.common.rst:30::1 logging.Logger.log:1 #: of -msgid "Time-to-live for this message." +msgid "Log 'msg % args' with the integer severity 'level'." msgstr "" -#: ../../source/ref-api/flwr.common.MetricsRecord.rst:2 -msgid "MetricsRecord" +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid ":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" msgstr "" -#: flwr.common.record.metricsrecord.MetricsRecord:1 of -msgid "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -"[:py:class:`str`, :py:class:`int` | :py:class:`float` | " -":py:class:`~typing.List`\\ [:py:class:`int`] | :py:class:`~typing.List`\\" -" [:py:class:`float`]]" +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.ndarray_to_bytes:1 of +msgid "Serialize NumPy ndarray to bytes." msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`clear `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid "" +":py:obj:`ndarrays_to_parameters `\\ " +"\\(ndarrays\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`count_bytes `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.ndarrays_to_parameters:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarrays_to_parameters:1 +#: of +msgid "Convert NumPy ndarrays to parameters object." msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid ":py:obj:`now `\\ \\(\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`items `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.date.now:1 of +msgid "Construct a datetime from time.time() with time zone set to UTC." msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`keys `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid "" +":py:obj:`parameters_to_ndarrays `\\ " +"\\(parameters\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.parameters_to_ndarrays:1 of +msgid "Convert parameters object to NumPy ndarrays." msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/ref-api/flwr.common.rst:68::1 msgid "" -":py:obj:`update `\\ \\(\\[E\\, " -"\\]\\*\\*F\\)" +":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, " +"data\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`values `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.parametersrecord.Array:1 of +msgid "Array type." msgstr "" -#: ../../source/ref-api/flwr.common.NDArray.rst:2 -msgid "NDArray" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`ClientMessage `\\ " +"\\(\\[get\\_properties\\_res\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 -msgid ":py:obj:`tensors `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.ClientMessage:1 of +msgid "ClientMessage is a container used to hold one result message." msgstr "" -#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 -msgid ":py:obj:`tensor_type `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`Code `\\ \\(value\\)" msgstr "" -#: ../../source/ref-api/flwr.common.ParametersRecord.rst:2 -msgid "ParametersRecord" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.Code:1 of +msgid "Client status codes." msgstr "" -#: flwr.common.record.parametersrecord.ParametersRecord:1 of -msgid "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -"[:py:class:`str`, :py:class:`~flwr.common.record.parametersrecord.Array`]" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`Config `\\" msgstr "" -#: flwr.common.record.parametersrecord.ParametersRecord:3 of +#: ../../source/ref-api/flwr.common.rst:68::1 msgid "" -"A dataclass storing named Arrays in order. This means that it holds " -"entries as an OrderedDict[str, Array]. ParametersRecord objects can be " -"viewed as an equivalent to PyTorch's state_dict, but holding serialised " -"tensors instead." +"alias of :py:class:`dict`\\ [:py:class:`str`, :py:class:`bool` | " +":py:class:`bytes` | :py:class:`float` | :py:class:`int` | " +":py:class:`str`]" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`clear `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`ConfigsRecord `\\ " +"\\(\\[configs\\_dict\\, keep\\_input\\]\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`count_bytes `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.configsrecord.ConfigsRecord:1 of +msgid "Configs record." msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`Context `\\ \\(node\\_id\\, " +"node\\_config\\, state\\, run\\_config\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`items `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.context.Context:1 of +msgid "Context of your run." msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`keys `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`DisconnectRes `\\ \\(reason\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.DisconnectRes:1 of +msgid "DisconnectRes message from client to server." msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid "" -":py:obj:`update `\\ \\(\\[E\\, " -"\\]\\*\\*F\\)" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`Error `\\ \\(code\\[\\, reason\\]\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`values `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.message.Error:1 of +msgid "A dataclass that stores information about an error that occurred." msgstr "" -#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:3 of +#: ../../source/ref-api/flwr.common.rst:68::1 msgid "" -"Note that a small amount of Bytes might also be included in this counting" -" that correspond to metadata of the serialized object (e.g. of NumPy " -"array) needed for deseralization." +":py:obj:`EvaluateIns `\\ \\(parameters\\, " +"config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:2 -msgid "ReconnectIns" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.EvaluateIns:1 of +msgid "Evaluate instructions for a client." msgstr "" -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:28::1 -msgid ":py:obj:`seconds `\\" -msgstr "" - -#: ../../source/ref-api/flwr.common.RecordSet.rst:2 -msgid "RecordSet" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`EvaluateRes `\\ \\(status\\, loss\\, " +"num\\_examples\\, metrics\\)" msgstr "" -#: flwr.common.RecordSet.configs_records:1::1 of -msgid ":py:obj:`configs_records `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.EvaluateRes:1 of +msgid "Evaluate response from a client." msgstr "" -#: flwr.common.RecordSet.configs_records:1 -#: flwr.common.RecordSet.configs_records:1::1 of -msgid "Dictionary holding ConfigsRecord instances." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`EventType `\\ \\(value\\)" msgstr "" -#: flwr.common.RecordSet.configs_records:1::1 of -msgid ":py:obj:`metrics_records `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.telemetry.EventType:1 of +msgid "Types of telemetry events." msgstr "" -#: flwr.common.RecordSet.configs_records:1::1 -#: flwr.common.RecordSet.metrics_records:1 of -msgid "Dictionary holding MetricsRecord instances." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`FitIns `\\ \\(parameters\\, config\\)" msgstr "" -#: flwr.common.RecordSet.configs_records:1::1 of -msgid ":py:obj:`parameters_records `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.FitIns:1 of +msgid "Fit instructions for a client." msgstr "" -#: flwr.common.RecordSet.configs_records:1::1 -#: flwr.common.RecordSet.parameters_records:1 of -msgid "Dictionary holding ParametersRecord instances." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`FitRes `\\ \\(status\\, parameters\\, " +"num\\_examples\\, metrics\\)" msgstr "" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:2 -msgid "ServerMessage" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.FitRes:1 of +msgid "Fit response from a client." msgstr "" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 -msgid ":py:obj:`evaluate_ins `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`GetParametersIns `\\ \\(config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 -msgid ":py:obj:`fit_ins `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetParametersIns:1 of +msgid "Parameters request for a client." msgstr "" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +#: ../../source/ref-api/flwr.common.rst:68::1 msgid "" -":py:obj:`get_parameters_ins " -"`\\" +":py:obj:`GetParametersRes `\\ \\(status\\, " +"parameters\\)" msgstr "" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 -msgid "" -":py:obj:`get_properties_ins " -"`\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetParametersRes:1 of +msgid "Response when asked to return parameters." msgstr "" -#: ../../source/ref-api/flwr.common.Status.rst:2 -msgid "Status" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`GetPropertiesIns `\\ \\(config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.Status.rst:29::1 -msgid ":py:obj:`code `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetPropertiesIns:1 of +msgid "Properties request for a client." msgstr "" -#: ../../source/ref-api/flwr.common.Status.rst:29::1 -msgid ":py:obj:`message `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`GetPropertiesRes `\\ \\(status\\, " +"properties\\)" msgstr "" -#: ../../source/ref-api/flwr.common.array_from_numpy.rst:2 -msgid "array\\_from\\_numpy" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetPropertiesRes:1 of +msgid "Properties response from a client." msgstr "" -#: ../../source/ref-api/flwr.common.bytes_to_ndarray.rst:2 -msgid "bytes\\_to\\_ndarray" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " +"error\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.configure.rst:2 -msgid "configure" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.message.Message:1 of +msgid "State of your application from the viewpoint of the entity using it." msgstr "" -#: ../../source/ref-api/flwr.common.event.rst:2 -msgid "event" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`MessageType `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.log.rst:2 -msgid "log" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.constant.MessageType:1 of +msgid "Message type." msgstr "" -#: logging.Logger.log:3 of -msgid "" -"To pass exception information, use the keyword argument exc_info with a " -"true value, e.g." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`MessageTypeLegacy `\\ \\(\\)" msgstr "" -#: logging.Logger.log:6 of -#, python-format -msgid "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.constant.MessageTypeLegacy:1 of +msgid "Legacy message type." msgstr "" -#: ../../source/ref-api/flwr.common.ndarray_to_bytes.rst:2 -msgid "ndarray\\_to\\_bytes" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`Metadata `\\ \\(run\\_id\\, " +"message\\_id\\, src\\_node\\_id\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.common.ndarrays_to_parameters.rst:2 -msgid "ndarrays\\_to\\_parameters" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.message.Metadata:1 of +msgid "A dataclass holding metadata associated with the current message." msgstr "" -#: ../../source/ref-api/flwr.common.now.rst:2 -msgid "now" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`Metrics `\\" msgstr "" -#: ../../source/ref-api/flwr.common.parameters_to_ndarrays.rst:2 -msgid "parameters\\_to\\_ndarrays" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`MetricsRecord `\\ " +"\\(\\[metrics\\_dict\\, keep\\_input\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:2 -msgid "server" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.metricsrecord.MetricsRecord:1 of +msgid "Metrics recod." msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 -msgid ":py:obj:`run_driver_api `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`NDArray `\\" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 -#: flwr.server.app.run_driver_api:1 of -msgid "Run Flower server (Driver API)." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " +":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 -msgid ":py:obj:`run_fleet_api `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`NDArrays `\\" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 -#: flwr.server.app.run_fleet_api:1 of -msgid "Run Flower server (Fleet API)." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +"alias of :py:class:`list`\\ [:py:class:`~numpy.ndarray`\\ " +"[:py:obj:`~typing.Any`, :py:class:`~numpy.dtype`\\ " +"[:py:obj:`~typing.Any`]]]" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 -msgid ":py:obj:`run_server_app `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`Parameters `\\ \\(tensors\\, " +"tensor\\_type\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 -#: flwr.server.run_serverapp.run_server_app:1 of -msgid "Run Flower server app." +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.Parameters:1 of +msgid "Model parameters." msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 -msgid ":py:obj:`run_superlink `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`ParametersRecord `\\ " +"\\(\\[array\\_dict\\, keep\\_input\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 -#: flwr.server.app.run_superlink:1 of -msgid "Run Flower SuperLink (Driver API and Fleet API)." +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.parametersrecord.ParametersRecord:1 of +msgid "Parameters record." msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 -msgid "" -":py:obj:`start_server `\\ \\(\\*\\[\\, " -"server\\_address\\, server\\, ...\\]\\)" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`Properties `\\" msgstr "" -#: ../../source/ref-api/flwr.server.rst:26::1 -#: flwr.server.app.start_server:1 of -msgid "Start a Flower server using the gRPC transport layer." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`ReconnectIns `\\ \\(seconds\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:40::1 -msgid ":py:obj:`ClientManager `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.ReconnectIns:1 of +msgid "ReconnectIns message from server to client." msgstr "" -#: ../../source/ref-api/flwr.server.rst:40::1 -#: flwr.server.client_manager.ClientManager:1 of -msgid "Abstract base class for managing Flower clients." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`RecordSet `\\ " +"\\(\\[parameters\\_records\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:40::1 -msgid ":py:obj:`Driver `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.recordset.RecordSet:1 of +msgid "RecordSet stores groups of parameters, metrics and configs." msgstr "" -#: ../../source/ref-api/flwr.server.rst:40::1 -#: flwr.server.driver.driver.Driver:1 of -msgid "Abstract base Driver class for the Driver API." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`ServerMessage `\\ " +"\\(\\[get\\_properties\\_ins\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:40::1 -msgid ":py:obj:`History `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.ServerMessage:1 of +msgid "ServerMessage is a container used to hold one instruction message." msgstr "" -#: ../../source/ref-api/flwr.server.rst:40::1 -#: flwr.server.history.History:1 of -msgid "History class for training and/or evaluation metrics collection." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`Status `\\ \\(code\\, message\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:40::1 -msgid "" -":py:obj:`LegacyContext `\\ \\(state\\[\\, " -"config\\, strategy\\, ...\\]\\)" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.Status:1 of +msgid "Client status." msgstr "" -#: ../../source/ref-api/flwr.server.rst:40::1 -#: flwr.server.compat.legacy_context.LegacyContext:1 of -msgid "Legacy Context." +#: ../../source/ref-api/flwr.common.Array.rst:2 +msgid "Array" msgstr "" -#: ../../source/ref-api/flwr.server.rst:40::1 +#: flwr.common.record.parametersrecord.Array:3 of msgid "" -":py:obj:`Server `\\ \\(\\*\\, client\\_manager\\[\\, " -"strategy\\]\\)" +"A dataclass containing serialized data from an array-like or tensor-like " +"object along with some metadata about it." msgstr "" -#: ../../source/ref-api/flwr.server.rst:40::1 +#: flwr.common.record.parametersrecord.Array:6 of msgid "" -":py:obj:`ServerApp `\\ \\(\\[server\\, config\\, " -"strategy\\, ...\\]\\)" +"A string representing the data type of the serialised object (e.g. " +"`np.float32`)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:40::1 -#: flwr.server.server_app.ServerApp:1 of -msgid "Flower ServerApp." +#: flwr.common.record.parametersrecord.Array:8 of +msgid "" +"A list representing the shape of the unserialized array-like object. This" +" is used to deserialize the data (depending on the serialization method) " +"or simply as a metadata field." msgstr "" -#: ../../source/ref-api/flwr.server.rst:40::1 +#: flwr.common.record.parametersrecord.Array:12 of msgid "" -":py:obj:`ServerConfig `\\ \\(\\[num\\_rounds\\," -" round\\_timeout\\]\\)" +"A string indicating the type of serialisation mechanism used to generate " +"the bytes in `data` from an array-like or tensor-like object." msgstr "" -#: ../../source/ref-api/flwr.server.rst:40::1 -#: flwr.server.server_config.ServerConfig:1 of -msgid "Flower server config." +#: flwr.common.record.parametersrecord.Array:15 of +msgid "A buffer of bytes containing the data." msgstr "" -#: ../../source/ref-api/flwr.server.rst:40::1 -msgid ":py:obj:`SimpleClientManager `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.Array.rst:26::1 +msgid ":py:obj:`numpy `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:40::1 -#: flwr.server.client_manager.SimpleClientManager:1 of -msgid "Provides a pool of available clients." +#: ../../source/ref-api/flwr.common.Array.rst:26::1 +#: flwr.common.record.parametersrecord.Array.numpy:1 of +msgid "Return the array as a NumPy array." msgstr "" -#: ../../source/ref-api/flwr.server.rst:59::1 -msgid ":py:obj:`flwr.server.strategy `\\" +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`dtype `\\" msgstr "" -#: ../../source/ref-api/flwr.server.rst:59::1 -#: flwr.server.strategy:1 of -msgid "Contains the strategy abstraction and different implementations." +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`shape `\\" msgstr "" -#: ../../source/ref-api/flwr.server.rst:59::1 -msgid ":py:obj:`flwr.server.workflow `\\" +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`stype `\\" msgstr "" -#: ../../source/ref-api/flwr.server.rst:59::1 -#: flwr.server.workflow:1 of -msgid "Workflows." +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`data `\\" msgstr "" -#: ../../source/ref-api/flwr.server.ClientManager.rst:2 -msgid "ClientManager" +#: ../../source/ref-api/flwr.common.ClientMessage.rst:2 +msgid "ClientMessage" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of -msgid ":py:obj:`all `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +msgid ":py:obj:`evaluate_res `\\" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1 -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.all:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of -msgid "Return all available clients." +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +msgid ":py:obj:`fit_res `\\" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of -msgid ":py:obj:`num_available `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +msgid "" +":py:obj:`get_parameters_res " +"`\\" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.num_available:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.num_available:1 of -msgid "Return the number of available clients." +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +msgid "" +":py:obj:`get_properties_res " +"`\\" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of -msgid ":py:obj:`register `\\ \\(client\\)" +#: ../../source/ref-api/flwr.common.Code.rst:2 +msgid "Code" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.register:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.register:1 of -msgid "Register Flower ClientProxy instance." +#: flwr.common.typing.Code:1 of +msgid "Bases: :py:class:`~enum.Enum`" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of -msgid "" -":py:obj:`sample `\\ " -"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid ":py:obj:`OK `\\" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.sample:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.sample:1 of -msgid "Sample a number of Flower ClientProxy instances." +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid "" +":py:obj:`GET_PROPERTIES_NOT_IMPLEMENTED " +"`\\" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of -msgid ":py:obj:`unregister `\\ \\(client\\)" +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid "" +":py:obj:`GET_PARAMETERS_NOT_IMPLEMENTED " +"`\\" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.unregister:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.unregister:1 of -msgid "Unregister Flower ClientProxy instance." +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid ":py:obj:`FIT_NOT_IMPLEMENTED `\\" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of +#: ../../source/ref-api/flwr.common.Code.rst:26::1 msgid "" -":py:obj:`wait_for `\\ " -"\\(num\\_clients\\, timeout\\)" +":py:obj:`EVALUATE_NOT_IMPLEMENTED " +"`\\" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.wait_for:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.wait_for:1 of -msgid "Wait until at least `num_clients` are available." +#: ../../source/ref-api/flwr.common.Config.rst:2 +msgid "Config" msgstr "" -#: flwr.server.client_manager.ClientManager.num_available:3 -#: flwr.server.client_manager.SimpleClientManager.num_available:3 of -msgid "**num_available** -- The number of currently available clients." +#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:2 +msgid "ConfigsRecord" msgstr "" -#: flwr.server.client_manager.ClientManager.register:6 -#: flwr.server.client_manager.SimpleClientManager.register:6 of +#: flwr.common.record.configsrecord.ConfigsRecord:1 of msgid "" -"**success** -- Indicating if registration was successful. False if " -"ClientProxy is already registered or can not be registered for any " -"reason." -msgstr "" - -#: flwr.server.client_manager.ClientManager.unregister:3 -#: flwr.server.client_manager.SimpleClientManager.unregister:3 of -msgid "This method is idempotent." +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`int` | :py:class:`float` | :py:class:`str` |" +" :py:class:`bytes` | :py:class:`bool` | :py:class:`list`\\ " +"[:py:class:`int`] | :py:class:`list`\\ [:py:class:`float`] | " +":py:class:`list`\\ [:py:class:`str`] | :py:class:`list`\\ " +"[:py:class:`bytes`] | :py:class:`list`\\ [:py:class:`bool`]]" msgstr "" -#: ../../source/ref-api/flwr.server.Driver.rst:2 -msgid "Driver" +#: flwr.common.record.configsrecord.ConfigsRecord:3 of +msgid "" +"A :code:`ConfigsRecord` is a Python dictionary designed to ensure that " +"each key-value pair adheres to specified data types. A " +":code:`ConfigsRecord` is one of the types of records that a " +"`flwr.common.RecordSet `_ supports " +"and can therefore be used to construct :code:`common.Message` objects." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 of +#: flwr.common.record.configsrecord.ConfigsRecord:9 of msgid "" -":py:obj:`create_message `\\ " -"\\(content\\, message\\_type\\, ...\\[\\, ttl\\]\\)" +"A dictionary that stores basic types (i.e. `str`, `int`, `float`, `bytes`" +" as defined in `ConfigsScalar`) and lists of such types (see " +"`ConfigsScalarList`)." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1 -#: flwr.server.driver.driver.Driver.create_message:1::1 of -msgid "Create a new message with specified parameters." +#: flwr.common.record.configsrecord.ConfigsRecord:13 of +msgid "" +"A boolean indicating whether config passed should be deleted from the " +"input dictionary immediately after adding them to the record. When set to" +" True, the data is duplicated in memory. If memory is a concern, set it " +"to False." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 of -msgid ":py:obj:`get_node_ids `\\ \\(\\)" +#: flwr.common.record.configsrecord.ConfigsRecord:21 of +msgid "" +"The usage of a :code:`ConfigsRecord` is envisioned for sending " +"configuration values telling the target node how to perform a certain " +"action (e.g. train/evaluate a model ). You can use standard Python built-" +"in types such as :code:`float`, :code:`str` , :code:`bytes`. All types " +"allowed are defined in :code:`flwr.common.ConfigsRecordValues`. While " +"lists are supported, we encourage you to use a :code:`ParametersRecord` " +"instead if these are of high dimensionality." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.get_node_ids:1 of -msgid "Get node IDs." +#: flwr.common.record.configsrecord.ConfigsRecord:29 of +msgid "" +"Let's see some examples of how to construct a :code:`ConfigsRecord` from " +"scratch:" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 of +#: flwr.common.record.configsrecord.ConfigsRecord:42 of msgid "" -":py:obj:`pull_messages `\\ " -"\\(message\\_ids\\)" +"Just like the other types of records in a :code:`flwr.common.RecordSet`, " +"types are enforced. If you need to add a custom data structure or object," +" we recommend to serialise it into bytes and save it as such (bytes are " +"allowed in a :code:`ConfigsRecord`)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.pull_messages:1 of -msgid "Pull messages based on message IDs." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 of -msgid "" -":py:obj:`push_messages `\\ " -"\\(messages\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.push_messages:1 of -msgid "Push messages to specified node IDs." +#: collections.abc.MutableMapping.clear:1::1 +#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:1 +#: flwr.common.record.metricsrecord.MetricsRecord.count_bytes:1 +#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:1 of +msgid "Return number of Bytes stored in this object." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 of -msgid "" -":py:obj:`send_and_receive `\\ " -"\\(messages\\, \\*\\[\\, timeout\\]\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.send_and_receive:1 of -msgid "Push messages to specified node IDs and pull the reply messages." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:3 of -msgid "" -"This method constructs a new `Message` with given content and metadata. " -"The `run_id` and `src_node_id` will be set automatically." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:6 of -msgid "" -"The content for the new message. This holds records that are to be sent " -"to the destination node." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:9 of -msgid "" -"The type of the message, defining the action to be executed on the " -"receiving end." +#: collections.abc.MutableMapping.clear:1::1 +#: collections.abc.MutableMapping.pop:1 of +msgid "If key is not found, d is returned if given, otherwise KeyError is raised." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:12 of -msgid "The ID of the destination node to which the message is being sent." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`popitem `\\ \\(\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:14 of -msgid "" -"The ID of the group to which this message is associated. In some " -"settings, this is used as the FL round." +#: collections.abc.MutableMapping.clear:1::1 +#: collections.abc.MutableMapping.popitem:1 of +msgid "as a 2-tuple; but raise KeyError if D is empty." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:17 of +#: collections.abc.MutableMapping.clear:1::1 of msgid "" -"Time-to-live for the round trip of this message, i.e., the time from " -"sending this message to receiving a reply. It specifies in seconds the " -"duration for which the message and its potential reply are considered " -"valid. If unset, the default TTL (i.e., `common.DEFAULT_TTL`) will be " -"used." +":py:obj:`setdefault `\\ " +"\\(k\\[\\,d\\]\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:23 of +#: collections.abc.MutableMapping.clear:1::1 of msgid "" -"**message** -- A new `Message` instance with the specified content and " -"metadata." +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" msgstr "" -#: flwr.server.driver.driver.Driver.pull_messages:3 of +#: collections.abc.MutableMapping.clear:1::1 +#: collections.abc.MutableMapping.update:1 of msgid "" -"This method is used to collect messages from the SuperLink that " -"correspond to a set of given message IDs." +"If E present and has a .keys() method, does: for k in E: D[k] = E[k] " +"If E present and lacks .keys() method, does: for (k, v) in E: D[k] = " +"v In either case, this is followed by: for k, v in F.items(): D[k] = v" msgstr "" -#: flwr.server.driver.driver.Driver.pull_messages:6 of -msgid "An iterable of message IDs for which reply messages are to be retrieved." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" msgstr "" -#: flwr.server.driver.driver.Driver.pull_messages:9 of -msgid "**messages** -- An iterable of messages received." +#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:3 of +msgid "This function counts booleans as occupying 1 Byte." msgstr "" -#: flwr.server.driver.driver.Driver.push_messages:3 of -msgid "" -"This method takes an iterable of messages and sends each message to the " -"node specified in `dst_node_id`." +#: ../../source/ref-api/flwr.common.Context.rst:2 +msgid "Context" msgstr "" -#: flwr.server.driver.driver.Driver.push_messages:6 -#: flwr.server.driver.driver.Driver.send_and_receive:7 of -msgid "An iterable of messages to be sent." +#: flwr.common.context.Context:3 of +msgid "The ID that identifies the node." msgstr "" -#: flwr.server.driver.driver.Driver.push_messages:9 of +#: flwr.common.context.Context:5 of msgid "" -"**message_ids** -- An iterable of IDs for the messages that were sent, " -"which can be used to pull replies." +"A config (key/value mapping) unique to the node and independent of the " +"`run_config`. This config persists across all runs this node participates" +" in." msgstr "" -#: flwr.server.driver.driver.Driver.send_and_receive:3 of +#: flwr.common.context.Context:8 of msgid "" -"This method sends a list of messages to their destination node IDs and " -"then waits for the replies. It continues to pull replies until either all" -" replies are received or the specified timeout duration is exceeded." +"Holds records added by the entity in a given run and that will stay " +"local. This means that the data it holds will never leave the system it's" +" running from. This can be used as an intermediate storage or scratchpad " +"when executing mods. It can also be used as a memory to access at " +"different points during the lifecycle of this entity (e.g. across " +"multiple rounds)" msgstr "" -#: flwr.server.driver.driver.Driver.send_and_receive:9 of +#: flwr.common.context.Context:15 of msgid "" -"The timeout duration in seconds. If specified, the method will wait for " -"replies for this duration. If `None`, there is no time limit and the " -"method will wait until replies for all messages are received." +"A config (key/value mapping) held by the entity in a given run and that " +"will stay local. It can be used at any point during the lifecycle of this" +" entity (e.g. across multiple rounds)" msgstr "" -#: flwr.server.driver.driver.Driver.send_and_receive:14 of -msgid "**replies** -- An iterable of reply messages received from the SuperLink." +#: ../../source/ref-api/flwr.common.Context.rst:31::1 +msgid ":py:obj:`node_id `\\" msgstr "" -#: flwr.server.driver.driver.Driver.send_and_receive:18 -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:53 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:60 -#: of -msgid "Notes" +#: ../../source/ref-api/flwr.common.Context.rst:31::1 +msgid ":py:obj:`node_config `\\" msgstr "" -#: flwr.server.driver.driver.Driver.send_and_receive:19 of -msgid "" -"This method uses `push_messages` to send the messages and `pull_messages`" -" to collect the replies. If `timeout` is set, the method may not return " -"replies for all sent messages. A message remains valid until its TTL, " -"which is not affected by `timeout`." +#: ../../source/ref-api/flwr.common.Context.rst:31::1 +msgid ":py:obj:`state `\\" msgstr "" -#: ../../source/ref-api/flwr.server.History.rst:2 -msgid "History" +#: ../../source/ref-api/flwr.common.Context.rst:31::1 +msgid ":py:obj:`run_config `\\" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of -msgid "" -":py:obj:`add_loss_centralized " -"`\\ \\(server\\_round\\, " -"loss\\)" +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:2 +msgid "DisconnectRes" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1 -#: flwr.server.history.History.add_loss_centralized:1::1 of -msgid "Add one loss entry (from centralized evaluation)." +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:28::1 +msgid ":py:obj:`reason `\\" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of -msgid "" -":py:obj:`add_loss_distributed " -"`\\ \\(server\\_round\\, " -"loss\\)" +#: ../../source/ref-api/flwr.common.Error.rst:2 +msgid "Error" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_loss_distributed:1 of -msgid "Add one loss entry (from distributed evaluation)." +#: flwr.common.message.Error:3 of +msgid "An identifier for the error." msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of -msgid "" -":py:obj:`add_metrics_centralized " -"`\\ \\(server\\_round\\, " -"metrics\\)" +#: flwr.common.message.Error:5 of +msgid "A reason for why the error arose (e.g. an exception stack-trace)" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_metrics_centralized:1 of -msgid "Add metrics entries (from centralized evaluation)." +#: flwr.common.Error.code:1::1 of +msgid ":py:obj:`code `\\" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of -msgid "" -":py:obj:`add_metrics_distributed " -"`\\ \\(server\\_round\\, " -"metrics\\)" +#: flwr.common.Error.code:1 flwr.common.Error.code:1::1 of +msgid "Error code." msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_metrics_distributed:1 of -msgid "Add metrics entries (from distributed evaluation)." +#: flwr.common.Error.code:1::1 of +msgid ":py:obj:`reason `\\" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of -msgid "" -":py:obj:`add_metrics_distributed_fit " -"`\\ \\(server\\_round\\," -" ...\\)" +#: flwr.common.Error.code:1::1 flwr.common.Error.reason:1 of +msgid "Reason reported about the error." msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_metrics_distributed_fit:1 of -msgid "Add metrics entries (from distributed fit)." +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:2 +msgid "EvaluateIns" msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:2 -msgid "LegacyContext" +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 +msgid ":py:obj:`parameters `\\" msgstr "" -#: flwr.server.compat.legacy_context.LegacyContext:1 of -msgid "Bases: :py:class:`~flwr.common.context.Context`" +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 +msgid ":py:obj:`config `\\" msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 -msgid ":py:obj:`config `\\" +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:2 +msgid "EvaluateRes" msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 -msgid ":py:obj:`strategy `\\" +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`status `\\" msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 -msgid ":py:obj:`client_manager `\\" +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`loss `\\" msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 -msgid ":py:obj:`history `\\" -msgstr "" - -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 -msgid ":py:obj:`state `\\" +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`num_examples `\\" msgstr "" -#: ../../source/ref-api/flwr.server.Server.rst:2 -msgid "Server" +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`metrics `\\" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of -msgid ":py:obj:`client_manager `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:2 +msgid "EventType" msgstr "" -#: flwr.server.server.Server.client_manager:1 -#: flwr.server.server.Server.client_manager:1::1 of -msgid "Return ClientManager." +#: flwr.common.telemetry.EventType:1 of +msgid "Bases: :py:class:`str`, :py:class:`~enum.Enum`" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -":py:obj:`disconnect_all_clients " -"`\\ \\(timeout\\)" +":py:obj:`encode `\\ \\(\\[encoding\\, " +"errors\\]\\)" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.disconnect_all_clients:1 of -msgid "Send shutdown signal to all clients." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.encode:1 of +msgid "Encode the string using the codec registered for encoding." msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -":py:obj:`evaluate_round `\\ " -"\\(server\\_round\\, timeout\\)" +":py:obj:`replace `\\ \\(old\\, new\\[\\, " +"count\\]\\)" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.evaluate_round:1 of -msgid "Validate current global model on a number of clients." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.replace:1 of +msgid "Return a copy with all occurrences of substring old replaced by new." msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of -msgid ":py:obj:`fit `\\ \\(num\\_rounds\\, timeout\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`split `\\ \\(\\[sep\\, " +"maxsplit\\]\\)" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.fit:1 of -msgid "Run federated averaging for a number of rounds." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.rsplit:1 flwr.common.EventType.split:1 of +msgid "" +"Return a list of the substrings in the string, using sep as the separator" +" string." msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -":py:obj:`fit_round `\\ \\(server\\_round\\," -" timeout\\)" +":py:obj:`rsplit `\\ \\(\\[sep\\, " +"maxsplit\\]\\)" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.fit_round:1 of -msgid "Perform a single round of federated averaging." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`join `\\ \\(iterable\\, \\/\\)" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of -msgid "" -":py:obj:`set_max_workers `\\ " -"\\(max\\_workers\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.join:1 of +msgid "Concatenate any number of strings." msgstr "" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.set_max_workers:1 of -msgid "Set the max_workers used by ThreadPoolExecutor." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`capitalize `\\ \\(\\)" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of -msgid ":py:obj:`set_strategy `\\ \\(strategy\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.capitalize:1 of +msgid "Return a capitalized version of the string." msgstr "" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.set_strategy:1 of -msgid "Replace server strategy." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`casefold `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.ServerApp.rst:2 -msgid "ServerApp" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.casefold:1 of +msgid "Return a version of the string suitable for caseless comparisons." msgstr "" -#: flwr.server.server_app.ServerApp:5 of -msgid "Use the `ServerApp` with an existing `Strategy`:" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`title `\\ \\(\\)" msgstr "" -#: flwr.server.server_app.ServerApp:15 of -msgid "Use the `ServerApp` with a custom main function:" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.title:1 of +msgid "Return a version of the string where each word is titlecased." msgstr "" -#: flwr.server.server_app.ServerApp.main:1::1 of -msgid ":py:obj:`main `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`center `\\ \\(width\\[\\, " +"fillchar\\]\\)" msgstr "" -#: flwr.server.server_app.ServerApp.main:1 -#: flwr.server.server_app.ServerApp.main:1::1 of -msgid "Return a decorator that registers the main fn with the server app." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.center:1 of +msgid "Return a centered string of length width." msgstr "" -#: ../../source/ref-api/flwr.server.ServerConfig.rst:2 -msgid "ServerConfig" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`count `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" msgstr "" -#: flwr.server.server_config.ServerConfig:3 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -"All attributes have default values which allows users to configure just " -"the ones they care about." +"Return the number of non-overlapping occurrences of substring sub in " +"string S[start:end]." msgstr "" -#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 -msgid ":py:obj:`num_rounds `\\" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`expandtabs `\\ " +"\\(\\[tabsize\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 -msgid ":py:obj:`round_timeout `\\" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.expandtabs:1 of +msgid "Return a copy where all tab characters are expanded using spaces." msgstr "" -#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:2 -msgid "SimpleClientManager" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`find `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" msgstr "" -#: flwr.server.client_manager.SimpleClientManager:1 of -msgid "Bases: :py:class:`~flwr.server.client_manager.ClientManager`" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +"Return the lowest index in S where substring sub is found, such that sub " +"is contained within S[start:end]." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of -msgid ":py:obj:`all `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`partition `\\ \\(sep\\, \\/\\)" msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of -msgid "" -":py:obj:`num_available `\\" -" \\(\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.partition:1 flwr.common.EventType.rpartition:1 of +msgid "Partition the string into three parts using the given separator." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -":py:obj:`register `\\ " -"\\(client\\)" +":py:obj:`index `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -":py:obj:`sample `\\ " -"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" +":py:obj:`ljust `\\ \\(width\\[\\, " +"fillchar\\]\\)" msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of -msgid "" -":py:obj:`unregister `\\ " -"\\(client\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.ljust:1 of +msgid "Return a left-justified string of length width." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of -msgid "" -":py:obj:`wait_for `\\ " -"\\(num\\_clients\\[\\, timeout\\]\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`lower `\\ \\(\\)" msgstr "" -#: flwr.server.client_manager.SimpleClientManager.wait_for:3 of -msgid "" -"Blocks until the requested number of clients is available or until a " -"timeout is reached. Current timeout default: 1 day." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.lower:1 of +msgid "Return a copy of the string converted to lowercase." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.wait_for:6 of -msgid "The number of clients to wait for." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`lstrip `\\ \\(\\[chars\\]\\)" msgstr "" -#: flwr.server.client_manager.SimpleClientManager.wait_for:8 of -msgid "The time in seconds to wait for, defaults to 86400 (24h)." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.lstrip:1 of +msgid "Return a copy of the string with leading whitespace removed." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.wait_for:11 of -msgid "**success**" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`rfind `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.run_driver_api.rst:2 -msgid "run\\_driver\\_api" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +"Return the highest index in S where substring sub is found, such that sub" +" is contained within S[start:end]." msgstr "" -#: ../../source/ref-api/flwr.server.run_fleet_api.rst:2 -msgid "run\\_fleet\\_api" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`rindex `\\ \\(sub\\[\\, " +"start\\[\\, end\\]\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.run_server_app.rst:2 -msgid "run\\_server\\_app" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`rjust `\\ \\(width\\[\\, " +"fillchar\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.run_superlink.rst:2 -msgid "run\\_superlink" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.rjust:1 of +msgid "Return a right-justified string of length width." msgstr "" -#: ../../source/ref-api/flwr.server.start_server.rst:2 -msgid "start\\_server" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`rstrip `\\ \\(\\[chars\\]\\)" msgstr "" -#: flwr.server.app.start_server:3 of -msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.rstrip:1 of +msgid "Return a copy of the string with trailing whitespace removed." msgstr "" -#: flwr.server.app.start_server:5 of -msgid "" -"A server implementation, either `flwr.server.Server` or a subclass " -"thereof. If no instance is provided, then `start_server` will create one." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`rpartition `\\ \\(sep\\, \\/\\)" msgstr "" -#: flwr.server.app.start_server:9 flwr.simulation.app.start_simulation:28 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -"Currently supported values are `num_rounds` (int, default: 1) and " -"`round_timeout` in seconds (float, default: None)." +":py:obj:`splitlines `\\ " +"\\(\\[keepends\\]\\)" msgstr "" -#: flwr.server.app.start_server:12 of -msgid "" -"An implementation of the abstract base class " -"`flwr.server.strategy.Strategy`. If no strategy is provided, then " -"`start_server` will use `flwr.server.strategy.FedAvg`." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.splitlines:1 of +msgid "Return a list of the lines in the string, breaking at line boundaries." msgstr "" -#: flwr.server.app.start_server:16 of -msgid "" -"An implementation of the abstract base class `flwr.server.ClientManager`." -" If no implementation is provided, then `start_server` will use " -"`flwr.server.client_manager.SimpleClientManager`." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`strip `\\ \\(\\[chars\\]\\)" msgstr "" -#: flwr.server.app.start_server:21 of -msgid "" -"The maximum length of gRPC messages that can be exchanged with the Flower" -" clients. The default should be sufficient for most models. Users who " -"train very large models might need to increase this value. Note that the " -"Flower clients need to be started with the same value (see " -"`flwr.client.start_client`), otherwise clients will not know about the " -"increased limit and block larger messages." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.strip:1 of +msgid "Return a copy of the string with leading and trailing whitespace removed." msgstr "" -#: flwr.server.app.start_server:28 of -msgid "" -"Tuple containing root certificate, server certificate, and private key to" -" start a secure SSL-enabled server. The tuple is expected to have three " -"bytes elements in the following order: * CA certificate. * " -"server certificate. * server private key." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`swapcase `\\ \\(\\)" msgstr "" -#: flwr.server.app.start_server:28 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.swapcase:1 of msgid "" -"Tuple containing root certificate, server certificate, and private key to" -" start a secure SSL-enabled server. The tuple is expected to have three " -"bytes elements in the following order:" -msgstr "" - -#: flwr.server.app.start_server:32 of -msgid "CA certificate." +"Convert uppercase characters to lowercase and lowercase characters to " +"uppercase." msgstr "" -#: flwr.server.app.start_server:33 of -msgid "server certificate." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`translate `\\ \\(table\\, \\/\\)" msgstr "" -#: flwr.server.app.start_server:34 of -msgid "server private key." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.translate:1 of +msgid "Replace each character in the string using the given translation table." msgstr "" -#: flwr.server.app.start_server:37 of -msgid "**hist** -- Object containing training and evaluation metrics." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`upper `\\ \\(\\)" msgstr "" -#: flwr.server.app.start_server:42 of -msgid "Starting an insecure server:" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.upper:1 of +msgid "Return a copy of the string converted to uppercase." msgstr "" -#: flwr.server.app.start_server:46 of -msgid "Starting an SSL-enabled server:" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`startswith `\\ \\(prefix\\[\\," +" start\\[\\, end\\]\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:2 -msgid "strategy" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "Return True if S starts with the specified prefix, False otherwise." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -":py:obj:`Bulyan `\\ \\(\\*\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\)" +":py:obj:`endswith `\\ \\(suffix\\[\\, " +"start\\[\\, end\\]\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.bulyan.Bulyan:1 of -msgid "Bulyan strategy." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "Return True if S ends with the specified suffix, False otherwise." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -":py:obj:`DPFedAvgAdaptive `\\ " -"\\(strategy\\, num\\_sampled\\_clients\\)" +":py:obj:`removeprefix `\\ " +"\\(prefix\\, \\/\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of -msgid "Wrapper for configuring a Strategy for DP with Adaptive Clipping." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.removeprefix:1 of +msgid "Return a str with the given prefix string removed if present." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -":py:obj:`DPFedAvgFixed `\\ " -"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" +":py:obj:`removesuffix `\\ " +"\\(suffix\\, \\/\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 of -msgid "Wrapper for configuring a Strategy for DP with Fixed Clipping." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.removesuffix:1 of +msgid "Return a str with the given suffix string removed if present." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`DifferentialPrivacyClientSideAdaptiveClipping " -"`\\ " -"\\(...\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isascii `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 -#: of -msgid "Strategy wrapper for central DP with client-side adaptive clipping." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isascii:1 of +msgid "Return True if all characters in the string are ASCII, False otherwise." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`DifferentialPrivacyServerSideAdaptiveClipping " -"`\\ " -"\\(...\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`islower `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 -#: of -msgid "Strategy wrapper for central DP with server-side adaptive clipping." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.islower:1 of +msgid "Return True if the string is a lowercase string, False otherwise." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`DifferentialPrivacyClientSideFixedClipping " -"`\\ " -"\\(...\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isupper `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 -#: of -msgid "Strategy wrapper for central DP with client-side fixed clipping." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isupper:1 of +msgid "Return True if the string is an uppercase string, False otherwise." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`DifferentialPrivacyServerSideFixedClipping " -"`\\ " -"\\(...\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`istitle `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 -#: of -msgid "Strategy wrapper for central DP with server-side fixed clipping." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.istitle:1 of +msgid "Return True if the string is a title-cased string, False otherwise." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedAdagrad `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isspace `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedadagrad.FedAdagrad:1 of -msgid "FedAdagrad strategy - Adaptive Federated Optimization using Adagrad." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isspace:1 of +msgid "Return True if the string is a whitespace string, False otherwise." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedAdam `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isdecimal `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedadam.FedAdam:1 of -msgid "FedAdam - Adaptive Federated Optimization using Adam." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isdecimal:1 of +msgid "Return True if the string is a decimal string, False otherwise." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedAvg `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isdigit `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedavg.FedAvg:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of -msgid "Federated Averaging strategy." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isdigit:1 of +msgid "Return True if the string is a digit string, False otherwise." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedAvgAndroid `\\ " -"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isnumeric `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedAvgM `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isnumeric:1 of +msgid "Return True if the string is a numeric string, False otherwise." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedavgm.FedAvgM:1 of -msgid "Federated Averaging with Momentum strategy." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isalpha `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedMedian `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isalpha:1 of +msgid "Return True if the string is an alphabetic string, False otherwise." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedmedian.FedMedian:1 of -msgid "Configurable FedMedian strategy implementation." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isalnum `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedOpt `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isalnum:1 of +msgid "Return True if the string is an alpha-numeric string, False otherwise." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedopt.FedOpt:1 of -msgid "Federated Optim strategy." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isidentifier `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedProx `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isidentifier:1 of +msgid "Return True if the string is a valid Python identifier, False otherwise." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedprox.FedProx:1 of -msgid "Federated Optimization strategy." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isprintable `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedTrimmedAvg `\\ " -"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isprintable:1 of +msgid "Return True if the string is printable, False otherwise." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 of -msgid "Federated Averaging with Trimmed Mean [Dong Yin, et al., 2021]." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`zfill `\\ \\(width\\, \\/\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.zfill:1 of msgid "" -":py:obj:`FedXgbBagging `\\ " -"\\(\\[evaluate\\_function\\]\\)" +"Pad a numeric string with zeros on the left, to fill a field of the given" +" width." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 of -msgid "Configurable FedXgbBagging strategy implementation." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`format `\\ \\(\\*args\\, " +"\\*\\*kwargs\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedXgbCyclic `\\ " -"\\(\\*\\*kwargs\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "Return a formatted version of S, using substitutions from args and kwargs." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 of -msgid "Configurable FedXgbCyclic strategy implementation." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`format_map `\\ \\(mapping\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " -"\\*\\*kwargs\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "Return a formatted version of S, using substitutions from mapping." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 of -msgid "Configurable FedXgbNnAvg strategy implementation." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`maketrans `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedYogi `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.maketrans:1 of +msgid "Return a translation table usable for str.translate()." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedyogi.FedYogi:1 of -msgid "FedYogi [Reddi et al., 2020] strategy." +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`PING `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FaultTolerantFedAvg " -"`\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`START_CLIENT_ENTER `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 of -msgid "Configurable fault-tolerant FedAvg strategy implementation." +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`START_CLIENT_LEAVE `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`START_SERVER_ENTER `\\" +msgstr "" + +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`START_SERVER_LEAVE `\\" +msgstr "" + +#: flwr.common.EventType.capitalize:1::1 of msgid "" -":py:obj:`Krum `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +":py:obj:`START_SIMULATION_ENTER " +"`\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.krum.Krum:1 of -msgid "Krum [Blanchard et al., 2017] strategy." +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`START_SIMULATION_LEAVE " +"`\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.capitalize:1::1 of msgid "" -":py:obj:`QFedAvg `\\ \\(\\*\\[\\, " -"q\\_param\\, qffl\\_learning\\_rate\\, ...\\]\\)" +":py:obj:`RUN_SUPEREXEC_ENTER " +"`\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.qfedavg.QFedAvg:1 of -msgid "Configurable QFedAvg strategy implementation." +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SUPEREXEC_LEAVE " +"`\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid ":py:obj:`Strategy `\\ \\(\\)" +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`CLI_FLOWER_SIMULATION_ENTER " +"`\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.strategy.Strategy:1 of -msgid "Abstract base class for server strategy implementations." +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`CLI_FLOWER_SIMULATION_LEAVE " +"`\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:2 -msgid "Bulyan" +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`PYTHON_API_RUN_SIMULATION_ENTER " +"`\\" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 -#: flwr.server.strategy.fedavgm.FedAvgM:1 -#: flwr.server.strategy.fedmedian.FedMedian:1 -#: flwr.server.strategy.fedopt.FedOpt:1 flwr.server.strategy.fedprox.FedProx:1 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 -#: flwr.server.strategy.krum.Krum:1 flwr.server.strategy.qfedavg.QFedAvg:1 of -msgid "Bases: :py:class:`~flwr.server.strategy.fedavg.FedAvg`" +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`PYTHON_API_RUN_SIMULATION_LEAVE " +"`\\" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:3 of -msgid "Implementation based on https://arxiv.org/abs/1802.07927." +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SUPERLINK_ENTER " +"`\\" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:5 -#: flwr.server.strategy.fedadagrad.FedAdagrad:5 -#: flwr.server.strategy.fedadam.FedAdam:5 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:5 -#: flwr.server.strategy.fedavgm.FedAvgM:5 flwr.server.strategy.fedopt.FedOpt:5 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:5 -#: flwr.server.strategy.fedyogi.FedYogi:5 flwr.server.strategy.krum.Krum:5 of -msgid "Fraction of clients used during training. Defaults to 1.0." +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SUPERLINK_LEAVE " +"`\\" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:7 -#: flwr.server.strategy.fedadagrad.FedAdagrad:7 -#: flwr.server.strategy.fedadam.FedAdam:7 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:7 -#: flwr.server.strategy.fedavgm.FedAvgM:7 flwr.server.strategy.fedopt.FedOpt:7 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:7 -#: flwr.server.strategy.fedyogi.FedYogi:7 flwr.server.strategy.krum.Krum:7 of -msgid "Fraction of clients used during validation. Defaults to 1.0." +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SUPERNODE_ENTER " +"`\\" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:9 -#: flwr.server.strategy.fedadagrad.FedAdagrad:9 -#: flwr.server.strategy.fedadam.FedAdam:9 flwr.server.strategy.fedavg.FedAvg:13 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:9 -#: flwr.server.strategy.fedavgm.FedAvgM:9 flwr.server.strategy.fedopt.FedOpt:9 -#: flwr.server.strategy.fedprox.FedProx:45 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:9 -#: flwr.server.strategy.fedyogi.FedYogi:9 flwr.server.strategy.krum.Krum:9 of -msgid "Minimum number of clients used during training. Defaults to 2." +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SUPERNODE_LEAVE " +"`\\" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:11 -#: flwr.server.strategy.fedadagrad.FedAdagrad:11 -#: flwr.server.strategy.fedadam.FedAdam:11 -#: flwr.server.strategy.fedavg.FedAvg:15 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:11 -#: flwr.server.strategy.fedavgm.FedAvgM:11 -#: flwr.server.strategy.fedopt.FedOpt:11 -#: flwr.server.strategy.fedprox.FedProx:47 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:11 -#: flwr.server.strategy.fedyogi.FedYogi:11 flwr.server.strategy.krum.Krum:11 of -msgid "Minimum number of clients used during validation. Defaults to 2." +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SERVER_APP_ENTER " +"`\\" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:13 -#: flwr.server.strategy.fedadagrad.FedAdagrad:13 -#: flwr.server.strategy.fedadam.FedAdam:13 -#: flwr.server.strategy.fedavg.FedAvg:17 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:13 -#: flwr.server.strategy.fedavgm.FedAvgM:13 -#: flwr.server.strategy.fedopt.FedOpt:13 -#: flwr.server.strategy.fedprox.FedProx:49 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:13 -#: flwr.server.strategy.fedyogi.FedYogi:13 flwr.server.strategy.krum.Krum:13 of -msgid "Minimum number of total clients in the system. Defaults to 2." +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SERVER_APP_LEAVE " +"`\\" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:15 flwr.server.strategy.krum.Krum:15 of -msgid "Number of malicious clients in the system. Defaults to 0." +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_CLIENT_APP_ENTER " +"`\\" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:17 -#: flwr.server.strategy.fedadagrad.FedAdagrad:15 -#: flwr.server.strategy.fedadam.FedAdam:15 -#: flwr.server.strategy.fedavg.FedAvg:19 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:15 -#: flwr.server.strategy.fedavgm.FedAvgM:15 -#: flwr.server.strategy.fedopt.FedOpt:15 -#: flwr.server.strategy.fedprox.FedProx:51 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:15 -#: flwr.server.strategy.fedyogi.FedYogi:17 -#: flwr.server.strategy.fedyogi.FedYogi:18 -#: flwr.server.strategy.fedyogi.FedYogi:19 flwr.server.strategy.krum.Krum:20 of -msgid "Optional function used for validation. Defaults to None." +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_CLIENT_APP_LEAVE " +"`\\" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:19 -#: flwr.server.strategy.fedadagrad.FedAdagrad:17 -#: flwr.server.strategy.fedadam.FedAdam:17 -#: flwr.server.strategy.fedavg.FedAvg:21 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:17 -#: flwr.server.strategy.fedavgm.FedAvgM:17 -#: flwr.server.strategy.fedopt.FedOpt:17 -#: flwr.server.strategy.fedprox.FedProx:53 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:17 -#: flwr.server.strategy.fedyogi.FedYogi:20 flwr.server.strategy.krum.Krum:22 of -msgid "Function used to configure training. Defaults to None." +#: flwr.common.EventType.capitalize:3 of +msgid "" +"More specifically, make the first character have upper case and the rest " +"lower case." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:21 -#: flwr.server.strategy.fedadagrad.FedAdagrad:19 -#: flwr.server.strategy.fedadam.FedAdam:19 -#: flwr.server.strategy.fedavg.FedAvg:23 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:19 -#: flwr.server.strategy.fedavgm.FedAvgM:19 -#: flwr.server.strategy.fedopt.FedOpt:19 -#: flwr.server.strategy.fedprox.FedProx:55 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:19 -#: flwr.server.strategy.fedyogi.FedYogi:22 flwr.server.strategy.krum.Krum:24 of -msgid "Function used to configure validation. Defaults to None." +#: flwr.common.EventType.center:3 flwr.common.EventType.ljust:3 +#: flwr.common.EventType.rjust:3 of +msgid "Padding is done using the specified fill character (default is a space)." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:23 -#: flwr.server.strategy.fedadagrad.FedAdagrad:25 -#: flwr.server.strategy.fedadam.FedAdam:21 -#: flwr.server.strategy.fedavg.FedAvg:25 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:21 -#: flwr.server.strategy.fedavgm.FedAvgM:21 -#: flwr.server.strategy.fedopt.FedOpt:21 -#: flwr.server.strategy.fedprox.FedProx:57 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:21 -#: flwr.server.strategy.fedyogi.FedYogi:24 flwr.server.strategy.krum.Krum:26 of -msgid "Whether or not accept rounds containing failures. Defaults to True." +#: flwr.common.EventType.count:1 of +msgid "" +"Return the number of non-overlapping occurrences of substring sub in " +"string S[start:end]. Optional arguments start and end are interpreted as" +" in slice notation." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:25 -#: flwr.server.strategy.fedadagrad.FedAdagrad:27 -#: flwr.server.strategy.fedadam.FedAdam:23 -#: flwr.server.strategy.fedavg.FedAvg:27 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:24 -#: flwr.server.strategy.fedavgm.FedAvgM:23 -#: flwr.server.strategy.fedopt.FedOpt:23 -#: flwr.server.strategy.fedprox.FedProx:59 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:23 -#: flwr.server.strategy.fedyogi.FedYogi:26 flwr.server.strategy.krum.Krum:28 of -msgid "Initial global model parameters." +#: flwr.common.EventType.encode:3 of +msgid "encoding" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:27 of -msgid "" -"Byzantine resilient aggregation rule that is used as the first step of " -"the Bulyan (e.g., Krum)" +#: flwr.common.EventType.encode:4 of +msgid "The encoding in which to encode the string." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:29 of -msgid "arguments to the first_aggregation rule" +#: flwr.common.EventType.encode:9 of +msgid "errors" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.EventType.encode:6 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" -msgstr "" - -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "Aggregate evaluation losses using weighted average." +"The error handling scheme to use for encoding errors. The default is " +"'strict' meaning that encoding errors raise a UnicodeEncodeError. Other " +"possible values are 'ignore', 'replace' and 'xmlcharrefreplace' as well " +"as any other name registered with codecs.register_error that can handle " +"UnicodeEncodeErrors." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.EventType.endswith:1 of msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"Return True if S ends with the specified suffix, False otherwise. With " +"optional start, test S beginning at that position. With optional end, " +"stop comparing S at that position. suffix can also be a tuple of strings " +"to try." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan.aggregate_fit:1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "Aggregate fit results using Bulyan." +#: flwr.common.EventType.expandtabs:3 of +msgid "If tabsize is not given, a tab size of 8 characters is assumed." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.EventType.find:1 flwr.common.EventType.index:1 of msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +"Return the lowest index in S where substring sub is found, such that sub " +"is contained within S[start:end]. Optional arguments start and end are " +"interpreted as in slice notation." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_evaluate:1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.configure_evaluate:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_evaluate:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_evaluate:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.configure_evaluate:1 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:1 of -msgid "Configure the next round of evaluation." +#: flwr.common.EventType.find:5 flwr.common.EventType.rfind:5 of +msgid "Return -1 on failure." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.EventType.format:1 of msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"Return a formatted version of S, using substitutions from args and " +"kwargs. The substitutions are identified by braces ('{' and '}')." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_fit:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_fit:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_fit:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_fit:1 -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.configure_fit:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.configure_fit:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_fit:1 -#: flwr.server.strategy.fedprox.FedProx.configure_fit:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_fit:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.configure_fit:1 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.configure_fit:1 of -msgid "Configure the next round of training." +#: flwr.common.EventType.format_map:1 of +msgid "" +"Return a formatted version of S, using substitutions from mapping. The " +"substitutions are identified by braces ('{' and '}')." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: flwr.common.EventType.index:5 flwr.common.EventType.rindex:5 of +msgid "Raises ValueError when the substring is not found." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.evaluate:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.evaluate:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.evaluate:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.evaluate:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "Evaluate model parameters using an evaluation function." +#: flwr.common.EventType.isalnum:3 of +msgid "" +"A string is alpha-numeric if all characters in the string are alpha-" +"numeric and there is at least one character in the string." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.EventType.isalpha:3 of msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"A string is alphabetic if all characters in the string are alphabetic and" +" there is at least one character in the string." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.initialize_parameters:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.initialize_parameters:1 -#: flwr.server.strategy.fedavgm.FedAvgM.initialize_parameters:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "Initialize global model parameters." +#: flwr.common.EventType.isascii:3 of +msgid "" +"ASCII characters have code points in the range U+0000-U+007F. Empty " +"string is ASCII too." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.EventType.isdecimal:3 of msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"A string is a decimal string if all characters in the string are decimal " +"and there is at least one character in the string." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.num_evaluation_clients:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_evaluation_clients:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.num_evaluation_clients:1 of -msgid "Use a fraction of available clients for evaluation." +#: flwr.common.EventType.isdigit:3 of +msgid "" +"A string is a digit string if all characters in the string are digits and" +" there is at least one character in the string." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.EventType.isidentifier:3 of msgid "" -":py:obj:`num_fit_clients `\\" -" \\(num\\_available\\_clients\\)" +"Call keyword.iskeyword(s) to test whether string s is a reserved " +"identifier, such as \"def\" or \"class\"." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.num_fit_clients:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_fit_clients:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.num_fit_clients:1 of -msgid "Return the sample size and the required number of available clients." +#: flwr.common.EventType.islower:3 of +msgid "" +"A string is lowercase if all cased characters in the string are lowercase" +" and there is at least one cased character in the string." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:2 -msgid "DPFedAvgAdaptive" +#: flwr.common.EventType.isnumeric:3 of +msgid "" +"A string is numeric if all characters in the string are numeric and there" +" is at least one character in the string." msgstr "" -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of -msgid "Bases: :py:class:`~flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed`" +#: flwr.common.EventType.isprintable:3 of +msgid "" +"A string is printable if all of its characters are considered printable " +"in repr() or if it is empty." msgstr "" -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:3 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:3 of -msgid "This class is deprecated and will be removed in a future release." +#: flwr.common.EventType.isspace:3 of +msgid "" +"A string is whitespace if all characters in the string are whitespace and" +" there is at least one character in the string." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.EventType.istitle:3 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +"In a title-cased string, upper- and title-case characters may only follow" +" uncased characters and lowercase characters only cased ones." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -msgid "Aggregate evaluation losses using the given strategy." +#: flwr.common.EventType.isupper:3 of +msgid "" +"A string is uppercase if all cased characters in the string are uppercase" +" and there is at least one cased character in the string." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.EventType.join:3 of msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +"The string whose method is called is inserted in between each given " +"string. The result is returned as a new string." msgstr "" -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.aggregate_fit:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -msgid "Aggregate training results as in DPFedAvgFixed and update clip norms." +#: flwr.common.EventType.join:6 of +msgid "Example: '.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.EventType.lstrip:3 flwr.common.EventType.rstrip:3 +#: flwr.common.EventType.strip:3 of +msgid "If chars is given and not None, remove characters in chars instead." +msgstr "" + +#: flwr.common.EventType.maketrans:3 of msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"If there is only one argument, it must be a dictionary mapping Unicode " +"ordinals (integers) or characters to Unicode ordinals, strings or None. " +"Character keys will be then converted to ordinals. If there are two " +"arguments, they must be strings of equal length, and in the resulting " +"dictionary, each character in x will be mapped to the character at the " +"same position in y. If there is a third argument, it must be a string, " +"whose characters will be mapped to None in the result." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:1 of -msgid "Configure the next round of evaluation using the specified strategy." +#: flwr.common.EventType.partition:3 of +msgid "" +"This will search for the separator in the string. If the separator is " +"found, returns a 3-tuple containing the part before the separator, the " +"separator itself, and the part after it." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.EventType.partition:7 of msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"If the separator is not found, returns a 3-tuple containing the original " +"string and two empty strings." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.EventType.removeprefix:3 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"If the string starts with the prefix string, return string[len(prefix):]." +" Otherwise, return a copy of the original string." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.evaluate:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.evaluate:1 of -msgid "Evaluate model parameters using an evaluation function from the strategy." +#: flwr.common.EventType.removesuffix:3 of +msgid "" +"If the string ends with the suffix string and that suffix is not empty, " +"return string[:-len(suffix)]. Otherwise, return a copy of the original " +"string." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.EventType.replace:5 of +msgid "count" +msgstr "" + +#: flwr.common.EventType.replace:4 of msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"Maximum number of occurrences to replace. -1 (the default value) means " +"replace all occurrences." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.initialize_parameters:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.initialize_parameters:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.initialize_parameters:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.initialize_parameters:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.initialize_parameters:1 of -msgid "Initialize global model parameters using given strategy." +#: flwr.common.EventType.replace:7 of +msgid "" +"If the optional argument count is given, only the first count occurrences" +" are replaced." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:3 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:6 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:3 -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:3 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:3 -#: flwr.server.strategy.strategy.Strategy.configure_fit:3 -#: flwr.server.strategy.strategy.Strategy.evaluate:6 of -msgid "The current round of federated learning." +#: flwr.common.EventType.rfind:1 flwr.common.EventType.rindex:1 of +msgid "" +"Return the highest index in S where substring sub is found, such that sub" +" is contained within S[start:end]. Optional arguments start and end are " +"interpreted as in slice notation." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:7 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:10 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:7 -#: flwr.server.strategy.strategy.Strategy.configure_fit:7 -#: flwr.server.strategy.strategy.Strategy.initialize_parameters:3 of -msgid "The client manager which holds all currently connected clients." +#: flwr.common.EventType.rpartition:3 of +msgid "" +"This will search for the separator in the string, starting at the end. If" +" the separator is found, returns a 3-tuple containing the part before the" +" separator, the separator itself, and the part after it." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:10 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:10 of +#: flwr.common.EventType.rpartition:7 of msgid "" -"**evaluate_configuration** -- A list of tuples. Each tuple in the list " -"identifies a `ClientProxy` and the `EvaluateIns` for this particular " -"`ClientProxy`. If a particular `ClientProxy` is not included in this " -"list, it means that this `ClientProxy` will not participate in the next " -"round of federated evaluation." +"If the separator is not found, returns a 3-tuple containing two empty " +"strings and the original string." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:2 -msgid "DPFedAvgFixed" +#: flwr.common.EventType.rsplit:7 flwr.common.EventType.split:7 of +msgid "sep" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 -#: flwr.server.strategy.fedavg.FedAvg:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of -msgid "Bases: :py:class:`~flwr.server.strategy.strategy.Strategy`" +#: flwr.common.EventType.rsplit:4 flwr.common.EventType.split:4 of +msgid "The separator used to split the string." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.EventType.rsplit:6 flwr.common.EventType.split:6 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +"When set to None (the default value), will split on any whitespace " +"character (including \\\\n \\\\r \\\\t \\\\f and spaces) and will discard" +" empty strings from the result." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.EventType.rsplit:11 flwr.common.EventType.split:11 of +msgid "maxsplit" +msgstr "" + +#: flwr.common.EventType.rsplit:10 flwr.common.EventType.split:10 of msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +"Maximum number of splits (starting from the left). -1 (the default value)" +" means no limit." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_fit:1 of -msgid "Aggregate training results using unweighted aggregation." +#: flwr.common.EventType.rsplit:13 of +msgid "Splitting starts at the end of the string and works to the front." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.EventType.split:13 of msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"Note, str.split() is mainly useful for data that has been intentionally " +"delimited. With natural text that includes punctuation, consider using " +"the regular expression module." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.EventType.splitlines:3 of msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"Line breaks are not included in the resulting list unless keepends is " +"given and true." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:1 of +#: flwr.common.EventType.startswith:1 of msgid "" -"Configure the next round of training incorporating Differential Privacy " -"(DP)." +"Return True if S starts with the specified prefix, False otherwise. With " +"optional start, test S beginning at that position. With optional end, " +"stop comparing S at that position. prefix can also be a tuple of strings " +"to try." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.EventType.title:3 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"More specifically, words start with uppercased characters and all " +"remaining cased characters have lower case." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: flwr.common.EventType.translate:5 of +msgid "table" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:3 of +#: flwr.common.EventType.translate:4 of msgid "" -"Configuration of the next training round includes information related to " -"DP, such as clip norm and noise stddev." +"Translation table, which must be a mapping of Unicode ordinals to Unicode" +" ordinals, strings, or None." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:13 -#: flwr.server.strategy.strategy.Strategy.configure_fit:10 of +#: flwr.common.EventType.translate:7 of msgid "" -"**fit_configuration** -- A list of tuples. Each tuple in the list " -"identifies a `ClientProxy` and the `FitIns` for this particular " -"`ClientProxy`. If a particular `ClientProxy` is not included in this " -"list, it means that this `ClientProxy` will not participate in the next " -"round of federated learning." +"The table must implement lookup/indexing via __getitem__, for instance a " +"dictionary or list. If this operation raises LookupError, the character " +"is left untouched. Characters mapped to None are deleted." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:2 -msgid "DifferentialPrivacyClientSideAdaptiveClipping" +#: flwr.common.EventType.zfill:3 of +msgid "The string is never truncated." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:3 -#: of -msgid "Use `adaptiveclipping_mod` modifier at the client side." +#: ../../source/ref-api/flwr.common.FitIns.rst:2 +msgid "FitIns" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:5 -#: of -msgid "" -"In comparison to `DifferentialPrivacyServerSideAdaptiveClipping`, which " -"performs clipping on the server-side, " -"`DifferentialPrivacyClientSideAdaptiveClipping` expects clipping to " -"happen on the client-side, usually by using the built-in " -"`adaptiveclipping_mod`." +#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 +msgid ":py:obj:`parameters `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:10 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:3 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:10 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:3 -#: of -msgid "The strategy to which DP functionalities will be added by this wrapper." +#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 +msgid ":py:obj:`config `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:12 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:5 -#: of -msgid "The noise multiplier for the Gaussian mechanism for model updates." +#: ../../source/ref-api/flwr.common.FitRes.rst:2 +msgid "FitRes" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:14 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:7 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:17 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:10 -#: of -msgid "The number of clients that are sampled on each round." +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`status `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:16 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:9 -#: of -msgid "" -"The initial value of clipping norm. Defaults to 0.1. Andrew et al. " -"recommends to set to 0.1." +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`parameters `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:19 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:12 -#: of -msgid "The desired quantile of updates which should be clipped. Defaults to 0.5." +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`num_examples `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:21 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:14 -#: of -msgid "" -"The learning rate for the clipping norm adaptation. Defaults to 0.2. " -"Andrew et al. recommends to set to 0.2." +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`metrics `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:24 -#: of -msgid "" -"The stddev of the noise added to the count of updates currently below the" -" estimate. Andrew et al. recommends to set to `expected_num_records/20`" +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:2 +msgid "GetParametersIns" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:30 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:23 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:22 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:15 -#: of -msgid "Create a strategy:" +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:28::1 +msgid ":py:obj:`config `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:34 -#: of -msgid "" -"Wrap the strategy with the " -"`DifferentialPrivacyClientSideAdaptiveClipping` wrapper:" +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:2 +msgid "GetParametersRes" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:40 -#: of -msgid "On the client, add the `adaptiveclipping_mod` to the client-side mods:" +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 +msgid ":py:obj:`status `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 +msgid ":py:obj:`parameters `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:2 +msgid "GetPropertiesIns" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_fit:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_fit:1 -#: of -msgid "Aggregate training results and update clip norms." +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:28::1 +msgid ":py:obj:`config `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:2 +msgid "GetPropertiesRes" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 +msgid ":py:obj:`status `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 +msgid ":py:obj:`properties `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" +#: ../../source/ref-api/flwr.common.Message.rst:2 +msgid "Message" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:2 -msgid "DifferentialPrivacyClientSideFixedClipping" +#: flwr.common.Message.content:1::1 flwr.common.Message.metadata:1 +#: flwr.common.message.Message:3 of +msgid "A dataclass including information about the message to be executed." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:3 -#: of -msgid "Use `fixedclipping_mod` modifier at the client side." +#: flwr.common.message.Message:5 of +msgid "" +"Holds records either sent by another entity (e.g. sent by the server-side" +" logic to a client, or vice-versa) or that will be sent to it." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:5 -#: of +#: flwr.common.message.Message:8 of msgid "" -"In comparison to `DifferentialPrivacyServerSideFixedClipping`, which " -"performs clipping on the server-side, " -"`DifferentialPrivacyClientSideFixedClipping` expects clipping to happen " -"on the client-side, usually by using the built-in `fixedclipping_mod`." +"A dataclass that captures information about an error that took place when" +" processing another message." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:12 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:5 -#: of +#: ../../source/ref-api/flwr.common.Message.rst:35::1 msgid "" -"The noise multiplier for the Gaussian mechanism for model updates. A " -"value of 1.0 or higher is recommended for strong privacy." +":py:obj:`create_error_reply `\\ " +"\\(error\\[\\, ttl\\]\\)" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:15 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:8 -#: of -msgid "The value of the clipping norm." +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.create_error_reply:1 of +msgid "Construct a reply message indicating an error happened." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:26 -#: of +#: ../../source/ref-api/flwr.common.Message.rst:35::1 msgid "" -"Wrap the strategy with the `DifferentialPrivacyClientSideFixedClipping` " -"wrapper:" +":py:obj:`create_reply `\\ " +"\\(content\\[\\, ttl\\]\\)" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:32 -#: of -msgid "On the client, add the `fixedclipping_mod` to the client-side mods:" +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.create_reply:1 of +msgid "Create a reply to this message with specified content and TTL." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid ":py:obj:`has_content `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.has_content:1 of +msgid "Return True if message has content, else False." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_fit:1 -#: of -msgid "Add noise to the aggregated parameters." +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid ":py:obj:`has_error `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.has_error:1 of +msgid "Return True if message has an error, else False." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`content `\\" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.common.Message.content:1 flwr.common.Message.content:1::1 #: of -msgid "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" +msgid "The content of this message." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`error `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:2 -msgid "DifferentialPrivacyServerSideAdaptiveClipping" +#: flwr.common.Message.content:1::1 flwr.common.Message.error:1 of +msgid "Error captured by this message." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:17 -#: of -msgid "" -"The standard deviation of the noise added to the count of updates below " -"the estimate. Andrew et al. recommends to set to " -"`expected_num_records/20`" +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`metadata `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:27 -#: of -msgid "" -"Wrap the strategy with the DifferentialPrivacyServerSideAdaptiveClipping " -"wrapper" +#: flwr.common.message.Message.create_error_reply:3 of +msgid "The error that was encountered." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: flwr.common.message.Message.create_error_reply:5 +#: flwr.common.message.Message.create_reply:9 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +"Time-to-live for this message in seconds. If unset, it will be set based " +"on the remaining time for the received message before it expires. This " +"follows the equation: ttl = msg.meta.ttl - (reply.meta.created_at - " +"msg.meta.created_at)" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: flwr.common.message.Message.create_error_reply:5 +#: flwr.common.message.Message.create_reply:9 of msgid "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +"Time-to-live for this message in seconds. If unset, it will be set based " +"on the remaining time for the received message before it expires. This " +"follows the equation:" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +#: flwr.common.message.Message.create_error_reply:9 +#: flwr.common.message.Message.create_reply:13 of +msgid "ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at)" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: flwr.common.message.Message.create_reply:3 of msgid "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +"The method generates a new `Message` as a reply to this message. It " +"inherits 'run_id', 'src_node_id', 'dst_node_id', and 'message_type' from " +"this message and sets 'reply_to_message' to the ID of this message." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" +#: flwr.common.message.Message.create_reply:7 of +msgid "The content for the reply message." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" +#: flwr.common.message.Message.create_reply:16 of +msgid "A new `Message` instance representing the reply." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:2 -msgid "DifferentialPrivacyServerSideFixedClipping" +#: ../../source/ref-api/flwr.common.MessageType.rst:2 +msgid "MessageType" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:19 -#: of -msgid "" -"Wrap the strategy with the DifferentialPrivacyServerSideFixedClipping " -"wrapper" +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`EVALUATE `\\" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`QUERY `\\" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`TRAIN `\\" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:1 -#: of -msgid "Compute the updates, clip, and pass them for aggregation." +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:2 +msgid "MessageTypeLegacy" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 +msgid ":py:obj:`GET_PARAMETERS `\\" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 +msgid ":py:obj:`GET_PROPERTIES `\\" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.run_id:1 flwr.common.message.Metadata:3 of +msgid "An identifier for the current run." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.message_id:1 flwr.common.message.Metadata:5 of +msgid "An identifier for the current message." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:3 -#: of -msgid "Afterward, add noise to the aggregated parameters." +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.src_node_id:1 flwr.common.message.Metadata:7 of +msgid "An identifier for the node sending this message." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:2 -msgid "FaultTolerantFedAvg" +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.dst_node_id:1 flwr.common.message.Metadata:9 of +msgid "An identifier for the node receiving this message." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.reply_to_message:1 flwr.common.message.Metadata:11 of +msgid "An identifier for the message this message replies to." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: flwr.common.message.Metadata:13 of msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +"An identifier for grouping messages. In some settings, this is used as " +"the FL round." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_fit:1 -#: flwr.server.strategy.fedadagrad.FedAdagrad.aggregate_fit:1 -#: flwr.server.strategy.fedadam.FedAdam.aggregate_fit:1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_fit:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_fit:1 -#: flwr.server.strategy.fedavgm.FedAvgM.aggregate_fit:1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.aggregate_fit:1 -#: flwr.server.strategy.fedyogi.FedYogi.aggregate_fit:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_fit:1 of -msgid "Aggregate fit results using weighted average." +#: flwr.common.message.Metadata:16 of +msgid "Time-to-live for this message in seconds." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.message_type:1 flwr.common.message.Metadata:18 of +msgid "A string that encodes the action to be executed on the receiving end." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`created_at `\\" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: flwr.common.Metadata.created_at:1 +#: flwr.common.Metadata.created_at:1::1 of +msgid "Unix timestamp when the message was created." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`dst_node_id `\\" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`group_id `\\" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.group_id:1 of +msgid "An identifier for grouping messages." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:2 -#: ../../source/ref-changelog.md:905 -msgid "FedAdagrad" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`message_id `\\" msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:1 -#: flwr.server.strategy.fedadam.FedAdam:1 -#: flwr.server.strategy.fedyogi.FedYogi:1 of -msgid "Bases: :py:class:`~flwr.server.strategy.fedopt.FedOpt`" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`message_type `\\" msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:3 -#: flwr.server.strategy.fedadam.FedAdam:3 flwr.server.strategy.fedopt.FedOpt:3 -#: flwr.server.strategy.fedyogi.FedYogi:3 of -msgid "Implementation based on https://arxiv.org/abs/2003.00295v5" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`reply_to_message `\\" msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:21 -#: flwr.server.strategy.fedadagrad.FedAdagrad:23 -#: flwr.server.strategy.fedadam.FedAdam:25 -#: flwr.server.strategy.fedadam.FedAdam:27 -#: flwr.server.strategy.fedavg.FedAvg:29 flwr.server.strategy.fedavg.FedAvg:31 -#: flwr.server.strategy.fedopt.FedOpt:25 flwr.server.strategy.fedopt.FedOpt:27 -#: flwr.server.strategy.fedprox.FedProx:61 -#: flwr.server.strategy.fedprox.FedProx:63 -#: flwr.server.strategy.fedyogi.FedYogi:28 -#: flwr.server.strategy.fedyogi.FedYogi:30 of -msgid "Metrics aggregation function, optional." +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`run_id `\\" msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:29 -#: flwr.server.strategy.fedadam.FedAdam:29 -#: flwr.server.strategy.fedopt.FedOpt:29 of -msgid "Server-side learning rate. Defaults to 1e-1." +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`src_node_id `\\" msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:31 -#: flwr.server.strategy.fedadam.FedAdam:31 -#: flwr.server.strategy.fedopt.FedOpt:31 of -msgid "Client-side learning rate. Defaults to 1e-1." +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`ttl `\\" msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:33 -#: flwr.server.strategy.fedadam.FedAdam:37 -#: flwr.server.strategy.fedopt.FedOpt:37 of -msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-9." +#: flwr.common.Metadata.created_at:1::1 flwr.common.Metadata.ttl:1 +#: of +msgid "Time-to-live for this message." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +#: ../../source/ref-api/flwr.common.Metrics.rst:2 +msgid "Metrics" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit `\\" -" \\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-api/flwr.common.MetricsRecord.rst:2 +msgid "MetricsRecord" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.metricsrecord.MetricsRecord:1 of msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`int` | :py:class:`float` | " +":py:class:`list`\\ [:py:class:`int`] | :py:class:`list`\\ " +"[:py:class:`float`]]" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.metricsrecord.MetricsRecord:3 of msgid "" -":py:obj:`configure_fit `\\" -" \\(server\\_round\\, parameters\\, ...\\)" +"A :code:`MetricsRecord` is a Python dictionary designed to ensure that " +"each key-value pair adheres to specified data types. A " +":code:`MetricsRecord` is one of the types of records that a " +"`flwr.common.RecordSet `_ supports " +"and can therefore be used to construct :code:`common.Message` objects." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.metricsrecord.MetricsRecord:9 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"A dictionary that stores basic types (i.e. `int`, `float` as defined in " +"`MetricsScalar`) and list of such types (see `MetricsScalarList`)." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.metricsrecord.MetricsRecord:12 of msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"A boolean indicating whether metrics should be deleted from the input " +"dictionary immediately after adding them to the record. When set to True," +" the data is duplicated in memory. If memory is a concern, set it to " +"False." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.metricsrecord.MetricsRecord:20 of msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"The usage of a :code:`MetricsRecord` is envisioned for communicating " +"results obtained when a node performs an action. A few typical examples " +"include: communicating the training accuracy after a model is trained " +"locally by a :code:`ClientApp`, reporting the validation loss obtained at" +" a :code:`ClientApp`, or, more generally, the output of executing a query" +" by the :code:`ClientApp`. Common to these examples is that the output " +"can be typically represented by a single scalar (:code:`int`, " +":code:`float`) or list of scalars." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.metricsrecord.MetricsRecord:28 of msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"Let's see some examples of how to construct a :code:`MetricsRecord` from " +"scratch:" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:2 -msgid "FedAdam" +#: flwr.common.record.metricsrecord.MetricsRecord:39 of +msgid "" +"Since types are enforced, the types of the objects inserted are checked. " +"For a :code:`MetricsRecord`, value types allowed are those in defined in " +":code:`flwr.common.MetricsRecordValues`. Similarly, only :code:`str` keys" +" are allowed." msgstr "" -#: flwr.server.strategy.fedadam.FedAdam:33 -#: flwr.server.strategy.fedyogi.FedYogi:36 of -msgid "Momentum parameter. Defaults to 0.9." +#: flwr.common.record.metricsrecord.MetricsRecord:50 of +msgid "" +"If you need a more versatily type of record try :code:`ConfigsRecord` or " +":code:`ParametersRecord`." msgstr "" -#: flwr.server.strategy.fedadam.FedAdam:35 -#: flwr.server.strategy.fedyogi.FedYogi:38 of -msgid "Second moment parameter. Defaults to 0.99." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`popitem `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: collections.abc.MutableMapping.clear:1::1 of msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`setdefault `\\ " +"\\(k\\[\\,d\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: collections.abc.MutableMapping.clear:1::1 of msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:2 -msgid "FedAvg" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg:3 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:3 of -msgid "Implementation based on https://arxiv.org/abs/1602.05629" +#: ../../source/ref-api/flwr.common.NDArray.rst:2 +msgid "NDArray" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg:5 flwr.server.strategy.fedprox.FedProx:37 -#: of -msgid "" -"Fraction of clients used during training. In case `min_fit_clients` is " -"larger than `fraction_fit * available_clients`, `min_fit_clients` will " -"still be sampled. Defaults to 1.0." +#: ../../source/ref-api/flwr.common.NDArrays.rst:2 +msgid "NDArrays" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg:9 flwr.server.strategy.fedprox.FedProx:41 -#: of -msgid "" -"Fraction of clients used during validation. In case " -"`min_evaluate_clients` is larger than `fraction_evaluate * " -"available_clients`, `min_evaluate_clients` will still be sampled. " -"Defaults to 1.0." +#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 +msgid ":py:obj:`tensors `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg:33 of -msgid "Enable (True) or disable (False) in-place aggregation of model updates." +#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 +msgid ":py:obj:`tensor_type `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +#: ../../source/ref-api/flwr.common.ParametersRecord.rst:2 +msgid "ParametersRecord" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.parametersrecord.ParametersRecord:1 of msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`~flwr.common.record.parametersrecord.Array`]" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.parametersrecord.ParametersRecord:3 of msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +"A dataclass storing named Arrays in order. This means that it holds " +"entries as an OrderedDict[str, Array]. ParametersRecord objects can be " +"viewed as an equivalent to PyTorch's state_dict, but holding serialised " +"tensors instead. A :code:`ParametersRecord` is one of the types of " +"records that a `flwr.common.RecordSet " +"`_ supports and can therefore be " +"used to construct :code:`common.Message` objects." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.parametersrecord.ParametersRecord:10 of +msgid "A dictionary that stores serialized array-like or tensor-like objects." +msgstr "" + +#: flwr.common.record.parametersrecord.ParametersRecord:12 of msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"A boolean indicating whether parameters should be deleted from the input " +"dictionary immediately after adding them to the record. If False, the " +"dictionary passed to `set_parameters()` will be empty once exiting from " +"that function. This is the desired behaviour when working with very large" +" models/tensors/arrays. However, if you plan to continue working with " +"your parameters after adding it to the record, set this flag to True. " +"When set to True, the data is duplicated in memory." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.parametersrecord.ParametersRecord:23 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"The usage of :code:`ParametersRecord` is envisioned for storing data " +"arrays (e.g. parameters of a machine learning model). These first need to" +" be serialized into a :code:`flwr.common.Array` data structure." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.parametersrecord.ParametersRecord:27 of +msgid "Let's see some examples:" +msgstr "" + +#: flwr.common.record.parametersrecord.ParametersRecord:50 of msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"Now that the NumPy array is embedded into a :code:`ParametersRecord` it " +"could be sent if added as part of a :code:`common.Message` or it could be" +" saved as a persistent state of a :code:`ClientApp` via its context. " +"Regardless of the usecase, we will sooner or later want to recover the " +"array in its original NumPy representation. For the example above, where " +"the array was serialized using the built-in utility function, " +"deserialization can be done as follows:" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.parametersrecord.ParametersRecord:65 of msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"If you need finer control on how your arrays are serialized and " +"deserialized, you can construct :code:`Array` objects directly like this:" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.parametersrecord.ParametersRecord:83 of msgid "" -":py:obj:`num_fit_clients `\\" -" \\(num\\_available\\_clients\\)" +"Note that different arrays (e.g. from PyTorch, Tensorflow) might require " +"different serialization mechanism. Howerver, they often support a " +"conversion to NumPy, therefore allowing to use the same or similar steps " +"as in the example above." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:2 -msgid "FedAvgAndroid" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`bytes_to_ndarray " -"`\\ \\(tensor\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.bytes_to_ndarray:1 of -msgid "Deserialize NumPy array from bytes." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`popitem `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: collections.abc.MutableMapping.clear:1::1 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`setdefault `\\ " +"\\(k\\[\\,d\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: collections.abc.MutableMapping.clear:1::1 of msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" +msgstr "" + +#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:3 of msgid "" -":py:obj:`ndarray_to_bytes " -"`\\ \\(ndarray\\)" +"Note that a small amount of Bytes might also be included in this counting" +" that correspond to metadata of the serialized object (e.g. of NumPy " +"array) needed for deseralization." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarray_to_bytes:1 of -msgid "Serialize NumPy array to bytes." +#: ../../source/ref-api/flwr.common.Properties.rst:2 +msgid "Properties" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:2 +msgid "ReconnectIns" +msgstr "" + +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:28::1 +msgid ":py:obj:`seconds `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.RecordSet.rst:2 +msgid "RecordSet" +msgstr "" + +#: flwr.common.record.recordset.RecordSet:3 of msgid "" -":py:obj:`ndarrays_to_parameters " -"`\\ " -"\\(ndarrays\\)" +"A :code:`RecordSet` is the unified mechanism by which parameters, metrics" +" and configs can be either stored as part of a `flwr.common.Context " +"`_ in your apps or communicated as part of a " +"`flwr.common.Message `_ between your apps." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.recordset.RecordSet:9 of msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"A dictionary of :code:`ParametersRecords` that can be used to record and " +"communicate model parameters and high-dimensional arrays." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.recordset.RecordSet:12 of msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"A dictionary of :code:`MetricsRecord` that can be used to record and " +"communicate scalar-valued metrics that are the result of performing and " +"action, for example, by a :code:`ClientApp`." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.recordset.RecordSet:16 of msgid "" -":py:obj:`parameters_to_ndarrays " -"`\\ " -"\\(parameters\\)" +"A dictionary of :code:`ConfigsRecord` that can be used to record and " +"communicate configuration values to an entity (e.g. to a " +":code:`ClientApp`) for it to adjust how an action is performed." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.parameters_to_ndarrays:1 -#: of -msgid "Convert parameters object to NumPy weights." +#: flwr.common.record.recordset.RecordSet:24 of +msgid "" +"A :code:`RecordSet` can hold three types of records, each designed with " +"an specific purpose. What is common to all of them is that they are " +"Python dictionaries designed to ensure that each key-value pair adheres " +"to specified data types." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:2 -msgid "FedAvgM" +#: flwr.common.record.recordset.RecordSet:29 of +msgid "Let's see an example." msgstr "" -#: flwr.server.strategy.fedavgm.FedAvgM:3 of -msgid "Implementation based on https://arxiv.org/abs/1909.06335" +#: flwr.common.record.recordset.RecordSet:47 of +msgid "" +"Adding a :code:`ParametersRecord` follows the same steps as above but " +"first, the array needs to be serialized and represented as a " +":code:`flwr.common.Array`. If the array is a :code:`NumPy` array, you can" +" use the built-in utility function `array_from_numpy " +"`_. It is often possible to convert an" +" array first to :code:`NumPy` and then use the aforementioned function." msgstr "" -#: flwr.server.strategy.fedavgm.FedAvgM:25 of +#: flwr.common.record.recordset.RecordSet:66 of msgid "" -"Server-side learning rate used in server-side optimization. Defaults to " -"1.0." +"For additional examples on how to construct each of the records types " +"shown above, please refer to the documentation for :code:`ConfigsRecord`," +" :code:`MetricsRecord` and :code:`ParametersRecord`." msgstr "" -#: flwr.server.strategy.fedavgm.FedAvgM:28 of -msgid "Server-side momentum factor used for FedAvgM. Defaults to 0.0." +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`configs_records `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +#: flwr.common.RecordSet.configs_records:1 +#: flwr.common.RecordSet.configs_records:1::1 of +msgid "Dictionary holding ConfigsRecord instances." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`metrics_records `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +#: flwr.common.RecordSet.configs_records:1::1 +#: flwr.common.RecordSet.metrics_records:1 of +msgid "Dictionary holding MetricsRecord instances." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`parameters_records `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: flwr.common.RecordSet.configs_records:1::1 +#: flwr.common.RecordSet.parameters_records:1 of +msgid "Dictionary holding ParametersRecord instances." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: ../../source/ref-api/flwr.common.ServerMessage.rst:2 +msgid "ServerMessage" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +msgid ":py:obj:`evaluate_ins `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +msgid ":py:obj:`fit_ins `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:2 -msgid "FedMedian" +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +msgid "" +":py:obj:`get_parameters_ins " +"`\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +":py:obj:`get_properties_ins " +"`\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-api/flwr.common.Status.rst:2 +msgid "Status" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedmedian.FedMedian.aggregate_fit:1 of -msgid "Aggregate fit results using median." +#: ../../source/ref-api/flwr.common.Status.rst:29::1 +msgid ":py:obj:`code `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.common.Status.rst:29::1 +msgid ":py:obj:`message `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.common.array_from_numpy.rst:2 +msgid "array\\_from\\_numpy" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: ../../source/ref-api/flwr.common.bytes_to_ndarray.rst:2 +msgid "bytes\\_to\\_ndarray" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: ../../source/ref-api/flwr.common.configure.rst:2 +msgid "configure" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.common.event.rst:2 +msgid "event" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.common.log.rst:2 +msgid "log" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:2 -msgid "FedOpt" +#: logging.Logger.log:3 of +msgid "" +"To pass exception information, use the keyword argument exc_info with a " +"true value, e.g." msgstr "" -#: flwr.server.strategy.fedopt.FedOpt:33 of -msgid "Momentum parameter. Defaults to 0.0." +#: logging.Logger.log:6 of +#, python-format +msgid "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" msgstr "" -#: flwr.server.strategy.fedopt.FedOpt:35 of -msgid "Second moment parameter. Defaults to 0.0." +#: ../../source/ref-api/flwr.common.ndarray_to_bytes.rst:2 +msgid "ndarray\\_to\\_bytes" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +#: ../../source/ref-api/flwr.common.ndarrays_to_parameters.rst:2 +msgid "ndarrays\\_to\\_parameters" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-api/flwr.common.now.rst:2 +msgid "now" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +#: ../../source/ref-api/flwr.common.parameters_to_ndarrays.rst:2 +msgid "parameters\\_to\\_ndarrays" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.server.rst:2 +msgid "server" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.rst:22::1 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`start_server `\\ \\(\\*\\[\\, " +"server\\_address\\, server\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: ../../source/ref-api/flwr.server.rst:22::1 +#: flwr.server.app.start_server:1 of +msgid "Start a Flower server using the gRPC transport layer." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid ":py:obj:`ClientManager `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients `\\" -" \\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.client_manager.ClientManager:1 of +msgid "Abstract base class for managing Flower clients." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:2 -msgid "FedProx" +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid ":py:obj:`Driver `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedprox.FedProx:3 of -msgid "Implementation based on https://arxiv.org/abs/1812.06127" +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.driver.driver.Driver:1 of +msgid "Abstract base Driver class for the Driver API." msgstr "" -#: flwr.server.strategy.fedprox.FedProx:5 of -msgid "" -"The strategy in itself will not be different than FedAvg, the client " -"needs to be adjusted. A proximal term needs to be added to the loss " -"function during the training:" +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid ":py:obj:`History `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedprox.FedProx:9 of -msgid "" -"\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" -"\n" +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.history.History:1 of +msgid "History class for training and/or evaluation metrics collection." msgstr "" -#: flwr.server.strategy.fedprox.FedProx:12 of +#: ../../source/ref-api/flwr.server.rst:37::1 msgid "" -"Where $w^t$ are the global parameters and $w$ are the local weights the " -"function will be optimized with." +":py:obj:`LegacyContext `\\ \\(context\\[\\, " +"config\\, strategy\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.fedprox.FedProx:15 of -msgid "In PyTorch, for example, the loss would go from:" +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.compat.legacy_context.LegacyContext:1 of +msgid "Legacy Context." msgstr "" -#: flwr.server.strategy.fedprox.FedProx:21 of -msgid "To:" +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid "" +":py:obj:`Server `\\ \\(\\*\\, client\\_manager\\[\\, " +"strategy\\]\\)" msgstr "" -#: flwr.server.strategy.fedprox.FedProx:30 of +#: ../../source/ref-api/flwr.server.rst:37::1 msgid "" -"With `global_params` being a copy of the parameters before the training " -"takes place." +":py:obj:`ServerApp `\\ \\(\\[server\\, config\\, " +"strategy\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.fedprox.FedProx:65 of -msgid "" -"The weight of the proximal term used in the optimization. 0.0 makes this " -"strategy equivalent to FedAvg, and the higher the coefficient, the more " -"regularization will be used (that is, the client parameters will need to " -"be closer to the server parameters during training)." +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.server_app.ServerApp:1 of +msgid "Flower ServerApp." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.rst:37::1 msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +":py:obj:`ServerAppComponents `\\ " +"\\(\\[server\\, config\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.serverapp_components.ServerAppComponents:1 of +msgid "Components to construct a ServerApp." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.rst:37::1 msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +":py:obj:`ServerConfig `\\ \\(\\[num\\_rounds\\," +" round\\_timeout\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.server_config.ServerConfig:1 of +msgid "Flower server config." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid ":py:obj:`SimpleClientManager `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.client_manager.SimpleClientManager:1 of +msgid "Provides a pool of available clients." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.server.rst:56::1 +msgid ":py:obj:`flwr.server.strategy `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.server.rst:56::1 +#: flwr.server.strategy:1 of +msgid "Contains the strategy abstraction and different implementations." msgstr "" -#: flwr.server.strategy.fedprox.FedProx.configure_fit:3 of -msgid "Sends the proximal factor mu to the clients" +#: ../../source/ref-api/flwr.server.rst:56::1 +msgid ":py:obj:`flwr.server.workflow `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:2 -msgid "FedTrimmedAvg" +#: ../../source/ref-api/flwr.server.rst:56::1 +#: flwr.server.workflow:1 of +msgid "Workflows." msgstr "" -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:3 of -msgid "Implemented based on: https://arxiv.org/abs/1803.01498" +#: ../../source/ref-api/flwr.server.ClientManager.rst:2 +msgid "ClientManager" msgstr "" -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:25 of -msgid "Fraction to cut off of both tails of the distribution. Defaults to 0.2." +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`all `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +#: flwr.server.client_manager.ClientManager.all:1 +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.all:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid "Return all available clients." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`num_available `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.aggregate_fit:1 of -msgid "Aggregate fit results using trimmed average." +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.num_available:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.num_available:1 of +msgid "Return the number of available clients." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`register `\\ \\(client\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.register:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.register:1 of +msgid "Register Flower ClientProxy instance." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.ClientManager.all:1::1 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`sample `\\ " +"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.sample:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.sample:1 of +msgid "Sample a number of Flower ClientProxy instances." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`unregister `\\ \\(client\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.unregister:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.unregister:1 of +msgid "Unregister Flower ClientProxy instance." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:2 -msgid "FedXgbBagging" +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid "" +":py:obj:`wait_for `\\ " +"\\(num\\_clients\\, timeout\\)" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.wait_for:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.wait_for:1 of +msgid "Wait until at least `num_clients` are available." msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of -msgid "Aggregate evaluation metrics using average." +#: flwr.server.client_manager.ClientManager.num_available:3 +#: flwr.server.client_manager.SimpleClientManager.num_available:3 of +msgid "**num_available** -- The number of currently available clients." msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: flwr.server.client_manager.ClientManager.register:6 +#: flwr.server.client_manager.SimpleClientManager.register:6 of msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +"**success** -- Indicating if registration was successful. False if " +"ClientProxy is already registered or can not be registered for any " +"reason." msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_fit:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_fit:1 of -msgid "Aggregate fit results using bagging." +#: flwr.server.client_manager.ClientManager.unregister:3 +#: flwr.server.client_manager.SimpleClientManager.unregister:3 of +msgid "This method is idempotent." msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.server.Driver.rst:2 +msgid "Driver" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`create_message `\\ " +"\\(content\\, message\\_type\\, ...\\[\\, ttl\\]\\)" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.create_message:1 of +msgid "Create a new message with specified parameters." msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +msgid ":py:obj:`get_node_ids `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.get_node_ids:1 of +msgid "Get node IDs." msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`pull_messages `\\ " +"\\(message\\_ids\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:2 -msgid "FedXgbCyclic" +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.pull_messages:1 of +msgid "Pull messages based on message IDs." msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +":py:obj:`push_messages `\\ " +"\\(messages\\)" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_fit " -"`\\ \\(server\\_round\\," -" results\\, failures\\)" +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.push_messages:1 of +msgid "Push messages to specified node IDs." msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`send_and_receive `\\ " +"\\(messages\\, \\*\\[\\, timeout\\]\\)" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_fit " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.send_and_receive:1 of +msgid "Push messages to specified node IDs and pull the reply messages." msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: flwr.server.driver.driver.Driver.create_message:1::1 of +msgid ":py:obj:`run `\\" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: flwr.server.Driver.run:1 +#: flwr.server.driver.driver.Driver.create_message:1::1 of +msgid "Run information." msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: flwr.server.driver.driver.Driver.create_message:3 of msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"This method constructs a new `Message` with given content and metadata. " +"The `run_id` and `src_node_id` will be set automatically." msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: flwr.server.driver.driver.Driver.create_message:6 of msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"The content for the new message. This holds records that are to be sent " +"to the destination node." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:2 -msgid "FedXgbNnAvg" +#: flwr.server.driver.driver.Driver.create_message:9 of +msgid "" +"The type of the message, defining the action to be executed on the " +"receiving end." msgstr "" -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:5 of -msgid "" -"This strategy is deprecated, but a copy of it is available in Flower " -"Baselines: " -"https://github.com/adap/flower/tree/main/baselines/hfedxgboost." +#: flwr.server.driver.driver.Driver.create_message:12 of +msgid "The ID of the destination node to which the message is being sent." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.create_message:14 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +"The ID of the group to which this message is associated. In some " +"settings, this is used as the FL round." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.create_message:17 of msgid "" -":py:obj:`aggregate_fit " -"`\\ \\(server\\_round\\, " -"results\\, failures\\)" +"Time-to-live for the round trip of this message, i.e., the time from " +"sending this message to receiving a reply. It specifies in seconds the " +"duration for which the message and its potential reply are considered " +"valid. If unset, the default TTL (i.e., `common.DEFAULT_TTL`) will be " +"used." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.create_message:23 of msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"**message** -- A new `Message` instance with the specified content and " +"metadata." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.pull_messages:3 of msgid "" -":py:obj:`configure_fit " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +"This method is used to collect messages from the SuperLink that " +"correspond to a set of given message IDs." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: flwr.server.driver.driver.Driver.pull_messages:6 of +msgid "An iterable of message IDs for which reply messages are to be retrieved." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.pull_messages:9 of +msgid "**messages** -- An iterable of messages received." +msgstr "" + +#: flwr.server.driver.driver.Driver.push_messages:3 of msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"This method takes an iterable of messages and sends each message to the " +"node specified in `dst_node_id`." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.push_messages:6 +#: flwr.server.driver.driver.Driver.send_and_receive:7 of +msgid "An iterable of messages to be sent." +msgstr "" + +#: flwr.server.driver.driver.Driver.push_messages:9 of msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"**message_ids** -- An iterable of IDs for the messages that were sent, " +"which can be used to pull replies." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.send_and_receive:3 of msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"This method sends a list of messages to their destination node IDs and " +"then waits for the replies. It continues to pull replies until either all" +" replies are received or the specified timeout duration is exceeded." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:2 -msgid "FedYogi" +#: flwr.server.driver.driver.Driver.send_and_receive:9 of +msgid "" +"The timeout duration in seconds. If specified, the method will wait for " +"replies for this duration. If `None`, there is no time limit and the " +"method will wait until replies for all messages are received." msgstr "" -#: flwr.server.strategy.fedyogi.FedYogi:32 of -msgid "Server-side learning rate. Defaults to 1e-2." +#: flwr.server.driver.driver.Driver.send_and_receive:14 of +msgid "**replies** -- An iterable of reply messages received from the SuperLink." msgstr "" -#: flwr.server.strategy.fedyogi.FedYogi:34 of -msgid "Client-side learning rate. Defaults to 0.0316." +#: flwr.server.driver.driver.Driver.send_and_receive:19 of +msgid "" +"This method uses `push_messages` to send the messages and `pull_messages`" +" to collect the replies. If `timeout` is set, the method may not return " +"replies for all sent messages. A message remains valid until its TTL, " +"which is not affected by `timeout`." msgstr "" -#: flwr.server.strategy.fedyogi.FedYogi:40 of -msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-3." +#: ../../source/ref-api/flwr.server.History.rst:2 +msgid "History" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.history.History.add_loss_centralized:1::1 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +":py:obj:`add_loss_centralized " +"`\\ \\(server\\_round\\, " +"loss\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: flwr.server.history.History.add_loss_centralized:1 +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "Add one loss entry (from centralized evaluation)." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.history.History.add_loss_centralized:1::1 of msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +":py:obj:`add_loss_distributed " +"`\\ \\(server\\_round\\, " +"loss\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_loss_distributed:1 of +msgid "Add one loss entry (from distributed evaluation)." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.history.History.add_loss_centralized:1::1 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`add_metrics_centralized " +"`\\ \\(server\\_round\\, " +"metrics\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_centralized:1 of +msgid "Add metrics entries (from centralized evaluation)." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.history.History.add_loss_centralized:1::1 of msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`add_metrics_distributed " +"`\\ \\(server\\_round\\, " +"metrics\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_distributed:1 of +msgid "Add metrics entries (from distributed evaluation)." +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 of msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`add_metrics_distributed_fit " +"`\\ \\(server\\_round\\," +" ...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.Krum.rst:2 -msgid "Krum" +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_distributed_fit:1 of +msgid "Add metrics entries (from distributed fit)." msgstr "" -#: flwr.server.strategy.krum.Krum:3 of -msgid "Implementation based on https://arxiv.org/abs/1703.02757" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:2 +msgid "LegacyContext" msgstr "" -#: flwr.server.strategy.krum.Krum:17 of -msgid "" -"Number of clients to keep before averaging (MultiKrum). Defaults to 0, in" -" that case classical Krum is applied." +#: flwr.server.compat.legacy_context.LegacyContext:1 of +msgid "Bases: :py:class:`~flwr.common.context.Context`" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +msgid ":py:obj:`config `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +msgid ":py:obj:`strategy `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.krum.Krum.aggregate_fit:1 of -msgid "Aggregate fit results using Krum." +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +msgid ":py:obj:`client_manager `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +msgid ":py:obj:`history `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +msgid ":py:obj:`node_id `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +msgid ":py:obj:`node_config `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +msgid ":py:obj:`state `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +msgid ":py:obj:`run_config `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients `\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.server.Server.rst:2 +msgid "Server" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:2 -msgid "QFedAvg" +#: flwr.server.server.Server.client_manager:1::1 of +msgid ":py:obj:`client_manager `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +#: flwr.server.server.Server.client_manager:1 +#: flwr.server.server.Server.client_manager:1::1 of +msgid "Return ClientManager." msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: flwr.server.server.Server.client_manager:1::1 of msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +":py:obj:`disconnect_all_clients " +"`\\ \\(timeout\\)" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" -msgstr "" - -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" - -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.disconnect_all_clients:1 of +msgid "Send shutdown signal to all clients." msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: flwr.server.server.Server.client_manager:1::1 of msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`evaluate_round `\\ " +"\\(server\\_round\\, timeout\\)" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.evaluate_round:1 of +msgid "Validate current global model on a number of clients." msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.server.server.Server.client_manager:1::1 of +msgid ":py:obj:`fit `\\ \\(num\\_rounds\\, timeout\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:2 -msgid "Strategy" +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.fit:1 of +msgid "Run federated averaging for a number of rounds." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: flwr.server.server.Server.client_manager:1::1 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +":py:obj:`fit_round `\\ \\(server\\_round\\," +" timeout\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of -msgid "Aggregate evaluation results." +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.fit_round:1 of +msgid "Perform a single round of federated averaging." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: flwr.server.server.Server.client_manager:1::1 of msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +":py:obj:`set_max_workers `\\ " +"\\(max\\_workers\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:1 of -msgid "Aggregate training results." +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.set_max_workers:1 of +msgid "Set the max_workers used by ThreadPoolExecutor." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: flwr.server.server.Server.client_manager:1::1 of +msgid ":py:obj:`set_strategy `\\ \\(strategy\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.set_strategy:1 of +msgid "Replace server strategy." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: ../../source/ref-api/flwr.server.ServerApp.rst:2 +msgid "ServerApp" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.evaluate:1 of -msgid "Evaluate the current model parameters." +#: flwr.server.server_app.ServerApp:5 of +msgid "Use the `ServerApp` with an existing `Strategy`:" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: flwr.server.server_app.ServerApp:17 of +msgid "Use the `ServerApp` with a custom main function:" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.initialize_parameters:1 of -msgid "Initialize the (global) model parameters." +#: flwr.server.server_app.ServerApp.main:1::1 of +msgid ":py:obj:`main `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:5 of -msgid "" -"Successful updates from the previously selected and configured clients. " -"Each pair of `(ClientProxy, FitRes` constitutes a successful update from " -"one of the previously selected clients. Not that not all previously " -"selected clients are necessarily included in this list: a client might " -"drop out and not submit a result. For each client that did not submit an " -"update, there should be an `Exception` in `failures`." +#: flwr.server.server_app.ServerApp.main:1 +#: flwr.server.server_app.ServerApp.main:1::1 of +msgid "Return a decorator that registers the main fn with the server app." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:13 -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:13 of -msgid "Exceptions that occurred while the server was waiting for client updates." +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:2 +msgid "ServerAppComponents" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:16 of +#: flwr.server.serverapp_components.ServerAppComponents:3 of msgid "" -"**aggregation_result** -- The aggregated evaluation result. Aggregation " -"typically uses some variant of a weighted average." +"A server implementation, either `flwr.server.Server` or a subclass " +"thereof. If no instance is provided, one will be created internally." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:5 of +#: flwr.server.app.start_server:9 +#: flwr.server.serverapp_components.ServerAppComponents:6 of msgid "" -"Successful updates from the previously selected and configured clients. " -"Each pair of `(ClientProxy, FitRes)` constitutes a successful update from" -" one of the previously selected clients. Not that not all previously " -"selected clients are necessarily included in this list: a client might " -"drop out and not submit a result. For each client that did not submit an " -"update, there should be an `Exception` in `failures`." +"Currently supported values are `num_rounds` (int, default: 1) and " +"`round_timeout` in seconds (float, default: None)." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:17 of +#: flwr.server.serverapp_components.ServerAppComponents:9 of msgid "" -"**parameters** -- If parameters are returned, then the server will treat " -"these as the new global model parameters (i.e., it will replace the " -"previous parameters with the ones returned from this method). If `None` " -"is returned (e.g., because there were only failures and no viable " -"results) then the server will no update the previous model parameters, " -"the updates received in this round are discarded, and the global model " -"parameters remain the same." +"An implementation of the abstract base class " +"`flwr.server.strategy.Strategy`. If no strategy is provided, then " +"`flwr.server.strategy.FedAvg` will be used." msgstr "" -#: flwr.server.strategy.strategy.Strategy.evaluate:3 of +#: flwr.server.serverapp_components.ServerAppComponents:13 of msgid "" -"This function can be used to perform centralized (i.e., server-side) " -"evaluation of model parameters." +"An implementation of the class `flwr.server.ClientManager`. If no " +"implementation is provided, then `flwr.server.SimpleClientManager` will " +"be used." msgstr "" -#: flwr.server.strategy.strategy.Strategy.evaluate:11 of +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 msgid "" -"**evaluation_result** -- The evaluation result, usually a Tuple " -"containing loss and a dictionary containing task-specific metrics (e.g., " -"accuracy)." +":py:obj:`client_manager " +"`\\" msgstr "" -#: flwr.server.strategy.strategy.Strategy.initialize_parameters:6 of -msgid "" -"**parameters** -- If parameters are returned, then the server will treat " -"these as the initial global model parameters." +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 +msgid ":py:obj:`config `\\" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:2 -msgid "workflow" +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 +msgid ":py:obj:`server `\\" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 -msgid "" -":py:obj:`DefaultWorkflow `\\ " -"\\(\\[fit\\_workflow\\, ...\\]\\)" +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 +msgid ":py:obj:`strategy `\\" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 -#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 of -msgid "Default workflow in Flower." +#: ../../source/ref-api/flwr.server.ServerConfig.rst:2 +msgid "ServerConfig" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.server_config.ServerConfig:3 of msgid "" -":py:obj:`SecAggPlusWorkflow `\\ " -"\\(num\\_shares\\, ...\\[\\, ...\\]\\)" +"All attributes have default values which allows users to configure just " +"the ones they care about." msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 -#: of -msgid "The workflow for the SecAgg+ protocol." +#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 +msgid ":py:obj:`num_rounds `\\" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 -msgid "" -":py:obj:`SecAggWorkflow `\\ " -"\\(reconstruction\\_threshold\\, \\*\\)" +#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 +msgid ":py:obj:`round_timeout `\\" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of -msgid "The workflow for the SecAgg protocol." +#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:2 +msgid "SimpleClientManager" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:2 -msgid "DefaultWorkflow" +#: flwr.server.client_manager.SimpleClientManager:1 of +msgid "Bases: :py:class:`~flwr.server.client_manager.ClientManager`" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:2 -msgid "SecAggPlusWorkflow" +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid ":py:obj:`all `\\ \\(\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:3 -#: of +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of msgid "" -"The SecAgg+ protocol ensures the secure summation of integer vectors " -"owned by multiple parties, without accessing any individual integer " -"vector. This workflow allows the server to compute the weighted average " -"of model parameters across all clients, ensuring individual contributions" -" remain private. This is achieved by clients sending both, a weighting " -"factor and a weighted version of the locally updated parameters, both of " -"which are masked for privacy. Specifically, each client uploads \"[w, w *" -" params]\" with masks, where weighting factor 'w' is the number of " -"examples ('num_examples') and 'params' represents the model parameters " -"('parameters') from the client's `FitRes`. The server then aggregates " -"these contributions to compute the weighted average of model parameters." +":py:obj:`num_available `\\" +" \\(\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:14 -#: of +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of msgid "" -"The protocol involves four main stages: - 'setup': Send SecAgg+ " -"configuration to clients and collect their public keys. - 'share keys': " -"Broadcast public keys among clients and collect encrypted secret" +":py:obj:`register `\\ " +"\\(client\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:17 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:17 -#: of -msgid "key shares." +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid "" +":py:obj:`sample `\\ " +"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:18 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:18 -#: of +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of msgid "" -"'collect masked vectors': Forward encrypted secret key shares to target " -"clients and collect masked model parameters." +":py:obj:`unregister `\\ " +"\\(client\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:20 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:20 -#: of +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of msgid "" -"'unmask': Collect secret key shares to decrypt and aggregate the model " -"parameters." +":py:obj:`wait_for `\\ " +"\\(num\\_clients\\[\\, timeout\\]\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:22 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:22 -#: of +#: flwr.server.client_manager.SimpleClientManager.wait_for:3 of msgid "" -"Only the aggregated model parameters are exposed and passed to " -"`Strategy.aggregate_fit`, ensuring individual data privacy." +"Blocks until the requested number of clients is available or until a " +"timeout is reached. Current timeout default: 1 day." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:25 -#: of -msgid "" -"The number of shares into which each client's private key is split under " -"the SecAgg+ protocol. If specified as a float, it represents the " -"proportion of all selected clients, and the number of shares will be set " -"dynamically in the run time. A private key can be reconstructed from " -"these shares, allowing for the secure aggregation of model updates. Each " -"client sends one share to each of its neighbors while retaining one." +#: flwr.server.client_manager.SimpleClientManager.wait_for:6 of +msgid "The number of clients to wait for." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:25 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:32 -#: of -msgid "" -"The minimum number of shares required to reconstruct a client's private " -"key, or, if specified as a float, it represents the proportion of the " -"total number of shares needed for reconstruction. This threshold ensures " -"privacy by allowing for the recovery of contributions from dropped " -"clients during aggregation, without compromising individual client data." +#: flwr.server.client_manager.SimpleClientManager.wait_for:8 of +msgid "The time in seconds to wait for, defaults to 86400 (24h)." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:31 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:38 -#: of -msgid "" -"The maximum value of the weight that can be assigned to any single " -"client's update during the weighted average calculation on the server " -"side, e.g., in the FedAvg algorithm." +#: flwr.server.client_manager.SimpleClientManager.wait_for:11 of +msgid "**success**" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:35 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:42 -#: of -msgid "" -"The range within which model parameters are clipped before quantization. " -"This parameter ensures each model parameter is bounded within " -"[-clipping_range, clipping_range], facilitating quantization." +#: ../../source/ref-api/flwr.server.start_server.rst:2 +msgid "start\\_server" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:39 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:46 -#: of -msgid "" -"The size of the range into which floating-point model parameters are " -"quantized, mapping each parameter to an integer in [0, " -"quantization_range-1]. This facilitates cryptographic operations on the " -"model updates." +#: flwr.server.app.start_server:3 of +msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:43 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:50 -#: of +#: flwr.server.app.start_server:5 of msgid "" -"The range of values from which random mask entries are uniformly sampled " -"([0, modulus_range-1]). `modulus_range` must be less than 4294967296. " -"Please use 2**n values for `modulus_range` to prevent overflow issues." +"A server implementation, either `flwr.server.Server` or a subclass " +"thereof. If no instance is provided, then `start_server` will create one." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:47 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:54 -#: of +#: flwr.server.app.start_server:12 of msgid "" -"The timeout duration in seconds. If specified, the workflow will wait for" -" replies for this duration each time. If `None`, there is no time limit " -"and the workflow will wait until replies for all messages are received." +"An implementation of the abstract base class " +"`flwr.server.strategy.Strategy`. If no strategy is provided, then " +"`start_server` will use `flwr.server.strategy.FedAvg`." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:61 -#: of +#: flwr.server.app.start_server:16 of msgid "" -"Generally, higher `num_shares` means more robust to dropouts while " -"increasing the computational costs; higher `reconstruction_threshold` " -"means better privacy guarantees but less tolerance to dropouts." -msgstr "" - -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:58 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:64 -#: of -msgid "Too large `max_weight` may compromise the precision of the quantization." -msgstr "" - -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:59 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:65 -#: of -msgid "`modulus_range` must be 2**n and larger than `quantization_range`." +"An implementation of the abstract base class `flwr.server.ClientManager`." +" If no implementation is provided, then `start_server` will use " +"`flwr.server.client_manager.SimpleClientManager`." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:66 -#: of +#: flwr.server.app.start_server:21 of msgid "" -"When `num_shares` is a float, it is interpreted as the proportion of all " -"selected clients, and hence the number of shares will be determined in " -"the runtime. This allows for dynamic adjustment based on the total number" -" of participating clients." +"The maximum length of gRPC messages that can be exchanged with the Flower" +" clients. The default should be sufficient for most models. Users who " +"train very large models might need to increase this value. Note that the " +"Flower clients need to be started with the same value (see " +"`flwr.client.start_client`), otherwise clients will not know about the " +"increased limit and block larger messages." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:69 -#: of +#: flwr.server.app.start_server:28 of msgid "" -"Similarly, when `reconstruction_threshold` is a float, it is interpreted " -"as the proportion of the number of shares needed for the reconstruction " -"of a private key. This feature enables flexibility in setting the " -"security threshold relative to the number of distributed shares." +"Tuple containing root certificate, server certificate, and private key to" +" start a secure SSL-enabled server. The tuple is expected to have three " +"bytes elements in the following order: * CA certificate. * " +"server certificate. * server private key." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:73 -#: of +#: flwr.server.app.start_server:28 of msgid "" -"`num_shares`, `reconstruction_threshold`, and the quantization parameters" -" (`clipping_range`, `quantization_range`, `modulus_range`) play critical " -"roles in balancing privacy, robustness, and efficiency within the SecAgg+" -" protocol." +"Tuple containing root certificate, server certificate, and private key to" +" start a secure SSL-enabled server. The tuple is expected to have three " +"bytes elements in the following order:" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of -msgid "" -":py:obj:`collect_masked_vectors_stage " -"`\\" -" \\(driver\\, ...\\)" +#: flwr.server.app.start_server:32 of +msgid "CA certificate." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of -msgid "Execute the 'collect masked vectors' stage." +#: flwr.server.app.start_server:33 of +msgid "server certificate." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of -msgid "" -":py:obj:`setup_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +#: flwr.server.app.start_server:34 of +msgid "server private key." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.setup_stage:1 -#: of -msgid "Execute the 'setup' stage." +#: flwr.server.app.start_server:37 of +msgid "**hist** -- Object containing training and evaluation metrics." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of -msgid "" -":py:obj:`share_keys_stage " -"`\\ " -"\\(driver\\, context\\, state\\)" +#: flwr.server.app.start_server:42 of +msgid "Starting an insecure server:" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.share_keys_stage:1 -#: of -msgid "Execute the 'share keys' stage." +#: flwr.server.app.start_server:46 of +msgid "Starting an SSL-enabled server:" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of -msgid "" -":py:obj:`unmask_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:2 +msgid "strategy" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.unmask_stage:1 -#: of -msgid "Execute the 'unmask' stage." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`Bulyan `\\ \\(\\*\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:2 -msgid "SecAggWorkflow" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.bulyan.Bulyan:1 of +msgid "Bulyan strategy." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"Bases: " -":py:class:`~flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow`" +":py:obj:`DPFedAvgAdaptive `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:3 of -msgid "" -"The SecAgg protocol ensures the secure summation of integer vectors owned" -" by multiple parties, without accessing any individual integer vector. " -"This workflow allows the server to compute the weighted average of model " -"parameters across all clients, ensuring individual contributions remain " -"private. This is achieved by clients sending both, a weighting factor and" -" a weighted version of the locally updated parameters, both of which are " -"masked for privacy. Specifically, each client uploads \"[w, w * params]\"" -" with masks, where weighting factor 'w' is the number of examples " -"('num_examples') and 'params' represents the model parameters " -"('parameters') from the client's `FitRes`. The server then aggregates " -"these contributions to compute the weighted average of model parameters." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of +msgid "Wrapper for configuring a Strategy for DP with Adaptive Clipping." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:14 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"The protocol involves four main stages: - 'setup': Send SecAgg " -"configuration to clients and collect their public keys. - 'share keys': " -"Broadcast public keys among clients and collect encrypted secret" +":py:obj:`DPFedAvgFixed `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:54 of -msgid "" -"Each client's private key is split into N shares under the SecAgg " -"protocol, where N is the number of selected clients." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 of +msgid "Wrapper for configuring a Strategy for DP with Fixed Clipping." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:56 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"Generally, higher `reconstruction_threshold` means better privacy " -"guarantees but less tolerance to dropouts." +":py:obj:`DifferentialPrivacyClientSideAdaptiveClipping " +"`\\ " +"\\(...\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:60 of -msgid "" -"When `reconstruction_threshold` is a float, it is interpreted as the " -"proportion of the number of all selected clients needed for the " -"reconstruction of a private key. This feature enables flexibility in " -"setting the security threshold relative to the number of selected " -"clients." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 +#: of +msgid "Strategy wrapper for central DP with client-side adaptive clipping." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:64 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"`reconstruction_threshold`, and the quantization parameters " -"(`clipping_range`, `quantization_range`, `modulus_range`) play critical " -"roles in balancing privacy, robustness, and efficiency within the SecAgg " -"protocol." +":py:obj:`DifferentialPrivacyClientSideFixedClipping " +"`\\ " +"\\(...\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 #: of -msgid "" -":py:obj:`collect_masked_vectors_stage " -"`\\ " -"\\(driver\\, ...\\)" +msgid "Strategy wrapper for central DP with client-side fixed clipping." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`setup_stage `\\" -" \\(driver\\, context\\, state\\)" +":py:obj:`DifferentialPrivacyServerSideAdaptiveClipping " +"`\\ " +"\\(...\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 #: of -msgid "" -":py:obj:`share_keys_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +msgid "Strategy wrapper for central DP with server-side adaptive clipping." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`unmask_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +":py:obj:`DifferentialPrivacyServerSideFixedClipping " +"`\\ " +"\\(...\\)" msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:2 -msgid "simulation" -msgstr "" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 +#: of +msgid "Strategy wrapper for central DP with server-side fixed clipping." +msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`start_simulation `\\ \\(\\*\\," -" client\\_fn\\[\\, ...\\]\\)" +":py:obj:`FaultTolerantFedAvg " +"`\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:18::1 -#: flwr.simulation.app.start_simulation:1 of -msgid "Start a Ray-based Flower simulation server." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 of +msgid "Configurable fault-tolerant FedAvg strategy implementation." msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`run_simulation `\\ " -"\\(server\\_app\\, client\\_app\\, ...\\)" -msgstr "" - -#: ../../source/ref-api/flwr.simulation.rst:18::1 -#: flwr.simulation.run_simulation.run_simulation:1 of -msgid "Run a Flower App using the Simulation Engine." +":py:obj:`FedAdagrad `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.simulation.run_simulation.rst:2 -msgid "run\\_simulation" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedadagrad.FedAdagrad:1 of +msgid "FedAdagrad strategy - Adaptive Federated Optimization using Adagrad." msgstr "" -#: flwr.simulation.run_simulation.run_simulation:3 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"The `ServerApp` to be executed. It will send messages to different " -"`ClientApp` instances running on different (virtual) SuperNodes." +":py:obj:`FedAdam `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: flwr.simulation.run_simulation.run_simulation:6 of -msgid "" -"The `ClientApp` to be executed by each of the SuperNodes. It will receive" -" messages sent by the `ServerApp`." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedadam.FedAdam:1 of +msgid "FedAdam - Adaptive Federated Optimization using Adam." msgstr "" -#: flwr.simulation.run_simulation.run_simulation:9 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"Number of nodes that run a ClientApp. They can be sampled by a Driver in " -"the ServerApp and receive a Message describing what the ClientApp should " -"perform." +":py:obj:`FedAvg `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -#: flwr.simulation.run_simulation.run_simulation:13 of -msgid "A simulation backend that runs `ClientApp`s." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedavg.FedAvg:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of +msgid "Federated Averaging strategy." msgstr "" -#: flwr.simulation.run_simulation.run_simulation:15 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"'A dictionary, e.g {\"\": , \"\": } to " -"configure a backend. Values supported in are those included by " -"`flwr.common.typing.ConfigsRecordValues`." +":py:obj:`FedAvgAndroid `\\ " +"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" msgstr "" -#: flwr.simulation.run_simulation.run_simulation:19 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"A boolean to indicate whether to enable GPU growth on the main thread. " -"This is desirable if you make use of a TensorFlow model on your " -"`ServerApp` while having your `ClientApp` running on the same GPU. " -"Without enabling this, you might encounter an out-of-memory error because" -" TensorFlow, by default, allocates all GPU memory. Read more about how " -"`tf.config.experimental.set_memory_growth()` works in the TensorFlow " -"documentation: https://www.tensorflow.org/api/stable." +":py:obj:`FedAvgM `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedavgm.FedAvgM:1 of +msgid "Federated Averaging with Momentum strategy." msgstr "" -#: flwr.simulation.run_simulation.run_simulation:26 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"When diabled, only INFO, WARNING and ERROR log messages will be shown. If" -" enabled, DEBUG-level logs will be displayed." +":py:obj:`FedMedian `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.simulation.start_simulation.rst:2 -msgid "start\\_simulation" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedmedian.FedMedian:1 of +msgid "Configurable FedMedian strategy implementation." msgstr "" -#: flwr.simulation.app.start_simulation:3 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"A function creating client instances. The function must take a single " -"`str` argument called `cid`. It should return a single client instance of" -" type Client. Note that the created client instances are ephemeral and " -"will often be destroyed after a single method invocation. Since client " -"instances are not long-lived, they should not attempt to carry state over" -" method invocations. Any state required by the instance (model, dataset, " -"hyperparameters, ...) should be (re-)created in either the call to " -"`client_fn` or the call to any of the client methods (e.g., load " -"evaluation data in the `evaluate` method itself)." +":py:obj:`FedOpt `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -#: flwr.simulation.app.start_simulation:13 of -msgid "" -"The total number of clients in this simulation. This must be set if " -"`clients_ids` is not set and vice-versa." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedopt.FedOpt:1 of +msgid "Federated Optim strategy." msgstr "" -#: flwr.simulation.app.start_simulation:16 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"List `client_id`s for each client. This is only required if `num_clients`" -" is not set. Setting both `num_clients` and `clients_ids` with " -"`len(clients_ids)` not equal to `num_clients` generates an error." +":py:obj:`FedProx `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: flwr.simulation.app.start_simulation:20 of -msgid "" -"CPU and GPU resources for a single client. Supported keys are `num_cpus` " -"and `num_gpus`. To understand the GPU utilization caused by `num_gpus`, " -"as well as using custom resources, please consult the Ray documentation." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedprox.FedProx:1 of +msgid "Federated Optimization strategy." msgstr "" -#: flwr.simulation.app.start_simulation:25 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"An implementation of the abstract base class `flwr.server.Server`. If no " -"instance is provided, then `start_server` will create one." +":py:obj:`FedTrimmedAvg `\\ " +"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" msgstr "" -#: flwr.simulation.app.start_simulation:31 of -msgid "" -"An implementation of the abstract base class `flwr.server.Strategy`. If " -"no strategy is provided, then `start_server` will use " -"`flwr.server.strategy.FedAvg`." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 of +msgid "Federated Averaging with Trimmed Mean [Dong Yin, et al., 2021]." msgstr "" -#: flwr.simulation.app.start_simulation:35 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"An implementation of the abstract base class `flwr.server.ClientManager`." -" If no implementation is provided, then `start_simulation` will use " -"`flwr.server.client_manager.SimpleClientManager`." +":py:obj:`FedXgbBagging `\\ " +"\\(\\[evaluate\\_function\\]\\)" msgstr "" -#: flwr.simulation.app.start_simulation:39 of -msgid "" -"Optional dictionary containing arguments for the call to `ray.init`. If " -"ray_init_args is None (the default), Ray will be initialized with the " -"following default args: { \"ignore_reinit_error\": True, " -"\"include_dashboard\": False } An empty dictionary can be used " -"(ray_init_args={}) to prevent any arguments from being passed to " -"ray.init." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 of +msgid "Configurable FedXgbBagging strategy implementation." msgstr "" -#: flwr.simulation.app.start_simulation:39 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"Optional dictionary containing arguments for the call to `ray.init`. If " -"ray_init_args is None (the default), Ray will be initialized with the " -"following default args:" +":py:obj:`FedXgbCyclic `\\ " +"\\(\\*\\*kwargs\\)" msgstr "" -#: flwr.simulation.app.start_simulation:43 of -msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 of +msgid "Configurable FedXgbCyclic strategy implementation." msgstr "" -#: flwr.simulation.app.start_simulation:45 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"An empty dictionary can be used (ray_init_args={}) to prevent any " -"arguments from being passed to ray.init." +":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " +"\\*\\*kwargs\\)" msgstr "" -#: flwr.simulation.app.start_simulation:48 of -msgid "" -"Set to True to prevent `ray.shutdown()` in case " -"`ray.is_initialized()=True`." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 of +msgid "Configurable FedXgbNnAvg strategy implementation." msgstr "" -#: flwr.simulation.app.start_simulation:50 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"Optionally specify the type of actor to use. The actor object, which " -"persists throughout the simulation, will be the process in charge of " -"executing a ClientApp wrapping input argument `client_fn`." +":py:obj:`FedYogi `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: flwr.simulation.app.start_simulation:54 of -msgid "" -"If you want to create your own Actor classes, you might need to pass some" -" input argument. You can use this dictionary for such purpose." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedyogi.FedYogi:1 of +msgid "FedYogi [Reddi et al., 2020] strategy." msgstr "" -#: flwr.simulation.app.start_simulation:57 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"(default: \"DEFAULT\") Optional string (\"DEFAULT\" or \"SPREAD\") for " -"the VCE to choose in which node the actor is placed. If you are an " -"advanced user needed more control you can use lower-level scheduling " -"strategies to pin actors to specific compute nodes (e.g. via " -"NodeAffinitySchedulingStrategy). Please note this is an advanced feature." -" For all details, please refer to the Ray documentation: " -"https://docs.ray.io/en/latest/ray-core/scheduling/index.html" -msgstr "" - -#: flwr.simulation.app.start_simulation:66 of -msgid "**hist** -- Object containing metrics from training." +":py:obj:`Krum `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -#: ../../source/ref-changelog.md:1 -msgid "Changelog" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.krum.Krum:1 of +msgid "Krum [Blanchard et al., 2017] strategy." msgstr "" -#: ../../source/ref-changelog.md:3 -msgid "Unreleased" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`QFedAvg `\\ \\(\\*\\[\\, " +"q\\_param\\, qffl\\_learning\\_rate\\, ...\\]\\)" msgstr "" -#: ../../source/ref-changelog.md:5 ../../source/ref-changelog.md:19 -#: ../../source/ref-changelog.md:83 ../../source/ref-changelog.md:176 -#: ../../source/ref-changelog.md:276 ../../source/ref-changelog.md:360 -#: ../../source/ref-changelog.md:424 ../../source/ref-changelog.md:482 -#: ../../source/ref-changelog.md:551 ../../source/ref-changelog.md:680 -#: ../../source/ref-changelog.md:722 ../../source/ref-changelog.md:789 -#: ../../source/ref-changelog.md:855 ../../source/ref-changelog.md:900 -#: ../../source/ref-changelog.md:939 ../../source/ref-changelog.md:972 -#: ../../source/ref-changelog.md:1022 -msgid "What's new?" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.qfedavg.QFedAvg:1 of +msgid "Configurable QFedAvg strategy implementation." msgstr "" -#: ../../source/ref-changelog.md:7 ../../source/ref-changelog.md:71 -#: ../../source/ref-changelog.md:146 ../../source/ref-changelog.md:258 -#: ../../source/ref-changelog.md:348 ../../source/ref-changelog.md:412 -#: ../../source/ref-changelog.md:470 ../../source/ref-changelog.md:539 -#: ../../source/ref-changelog.md:601 ../../source/ref-changelog.md:620 -#: ../../source/ref-changelog.md:776 ../../source/ref-changelog.md:847 -#: ../../source/ref-changelog.md:884 ../../source/ref-changelog.md:927 -msgid "Incompatible changes" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid ":py:obj:`Strategy `\\ \\(\\)" msgstr "" -#: ../../source/ref-changelog.md:9 ../../source/ref-changelog.md:73 -#: ../../source/ref-changelog.md:350 ../../source/ref-changelog.md:414 -#: ../../source/ref-changelog.md:472 ../../source/ref-changelog.md:541 -#: ../../source/ref-changelog.md:603 -msgid "None" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.strategy.Strategy:1 of +msgid "Abstract base class for server strategy implementations." msgstr "" -#: ../../source/ref-changelog.md:11 -msgid "v1.8.0 (2024-04-03)" +#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:2 +msgid "Bulyan" msgstr "" -#: ../../source/ref-changelog.md:13 ../../source/ref-changelog.md:77 -#: ../../source/ref-changelog.md:170 ../../source/ref-changelog.md:270 -#: ../../source/ref-changelog.md:354 ../../source/ref-changelog.md:418 -#: ../../source/ref-changelog.md:476 ../../source/ref-changelog.md:545 -#: ../../source/ref-changelog.md:614 -msgid "Thanks to our contributors" +#: flwr.server.strategy.bulyan.Bulyan:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 +#: flwr.server.strategy.fedavgm.FedAvgM:1 +#: flwr.server.strategy.fedmedian.FedMedian:1 +#: flwr.server.strategy.fedopt.FedOpt:1 flwr.server.strategy.fedprox.FedProx:1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 +#: flwr.server.strategy.krum.Krum:1 flwr.server.strategy.qfedavg.QFedAvg:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.fedavg.FedAvg`" msgstr "" -#: ../../source/ref-changelog.md:15 ../../source/ref-changelog.md:79 -#: ../../source/ref-changelog.md:172 ../../source/ref-changelog.md:272 -#: ../../source/ref-changelog.md:356 ../../source/ref-changelog.md:420 -#: ../../source/ref-changelog.md:478 -msgid "" -"We would like to give our special thanks to all the contributors who made" -" the new version of Flower possible (in `git shortlog` order):" +#: flwr.server.strategy.bulyan.Bulyan:3 of +msgid "Implementation based on https://arxiv.org/abs/1802.07927." msgstr "" -#: ../../source/ref-changelog.md:17 -msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata " -"Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, `Ikko Eltociear " -"Ashimine`, `Jack Cook`, `Javier`, `Raj Parekh`, `Robert Steiner`, " -"`Sebastian van der Voort`, `Taner Topal`, `Yan Gao`, `mohammadnaseri`, " -"`tabdar-khan` " +#: flwr.server.strategy.bulyan.Bulyan:5 +#: flwr.server.strategy.fedadagrad.FedAdagrad:5 +#: flwr.server.strategy.fedadam.FedAdam:5 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:5 +#: flwr.server.strategy.fedavgm.FedAvgM:5 flwr.server.strategy.fedopt.FedOpt:5 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:5 +#: flwr.server.strategy.fedyogi.FedYogi:5 flwr.server.strategy.krum.Krum:5 of +msgid "Fraction of clients used during training. Defaults to 1.0." msgstr "" -#: ../../source/ref-changelog.md:21 -msgid "" -"**Introduce Flower Next high-level API (stable)** " -"([#3002](https://github.com/adap/flower/pull/3002), " -"[#2934](https://github.com/adap/flower/pull/2934), " -"[#2958](https://github.com/adap/flower/pull/2958), " -"[#3173](https://github.com/adap/flower/pull/3173), " -"[#3174](https://github.com/adap/flower/pull/3174), " -"[#2923](https://github.com/adap/flower/pull/2923), " -"[#2691](https://github.com/adap/flower/pull/2691), " -"[#3079](https://github.com/adap/flower/pull/3079), " -"[#2961](https://github.com/adap/flower/pull/2961), " -"[#2924](https://github.com/adap/flower/pull/2924), " -"[#3166](https://github.com/adap/flower/pull/3166), " -"[#3031](https://github.com/adap/flower/pull/3031), " -"[#3057](https://github.com/adap/flower/pull/3057), " -"[#3000](https://github.com/adap/flower/pull/3000), " -"[#3113](https://github.com/adap/flower/pull/3113), " -"[#2957](https://github.com/adap/flower/pull/2957), " -"[#3183](https://github.com/adap/flower/pull/3183), " -"[#3180](https://github.com/adap/flower/pull/3180), " -"[#3035](https://github.com/adap/flower/pull/3035), " -"[#3189](https://github.com/adap/flower/pull/3189), " -"[#3185](https://github.com/adap/flower/pull/3185), " -"[#3190](https://github.com/adap/flower/pull/3190), " -"[#3191](https://github.com/adap/flower/pull/3191), " -"[#3195](https://github.com/adap/flower/pull/3195), " -"[#3197](https://github.com/adap/flower/pull/3197))" +#: flwr.server.strategy.bulyan.Bulyan:7 +#: flwr.server.strategy.fedadagrad.FedAdagrad:7 +#: flwr.server.strategy.fedadam.FedAdam:7 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:7 +#: flwr.server.strategy.fedavgm.FedAvgM:7 flwr.server.strategy.fedopt.FedOpt:7 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:7 +#: flwr.server.strategy.fedyogi.FedYogi:7 flwr.server.strategy.krum.Krum:7 of +msgid "Fraction of clients used during validation. Defaults to 1.0." msgstr "" -#: ../../source/ref-changelog.md:23 -msgid "" -"The Flower Next high-level API is stable! Flower Next is the future of " -"Flower - all new features (like Flower Mods) will be built on top of it. " -"You can start to migrate your existing projects to Flower Next by using " -"`ServerApp` and `ClientApp` (check out `quickstart-pytorch` or " -"`quickstart-tensorflow`, a detailed migration guide will follow shortly)." -" Flower Next allows you to run multiple projects concurrently (we call " -"this multi-run) and execute the same project in either simulation " -"environments or deployment environments without having to change a single" -" line of code. The best part? It's fully compatible with existing Flower " -"projects that use `Strategy`, `NumPyClient` & co." +#: flwr.server.strategy.bulyan.Bulyan:9 +#: flwr.server.strategy.fedadagrad.FedAdagrad:9 +#: flwr.server.strategy.fedadam.FedAdam:9 flwr.server.strategy.fedavg.FedAvg:13 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:9 +#: flwr.server.strategy.fedavgm.FedAvgM:9 flwr.server.strategy.fedopt.FedOpt:9 +#: flwr.server.strategy.fedprox.FedProx:45 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:9 +#: flwr.server.strategy.fedyogi.FedYogi:9 flwr.server.strategy.krum.Krum:9 of +msgid "Minimum number of clients used during training. Defaults to 2." msgstr "" -#: ../../source/ref-changelog.md:25 -msgid "" -"**Introduce Flower Next low-level API (preview)** " -"([#3062](https://github.com/adap/flower/pull/3062), " -"[#3034](https://github.com/adap/flower/pull/3034), " -"[#3069](https://github.com/adap/flower/pull/3069))" +#: flwr.server.strategy.bulyan.Bulyan:11 +#: flwr.server.strategy.fedadagrad.FedAdagrad:11 +#: flwr.server.strategy.fedadam.FedAdam:11 +#: flwr.server.strategy.fedavg.FedAvg:15 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:11 +#: flwr.server.strategy.fedavgm.FedAvgM:11 +#: flwr.server.strategy.fedopt.FedOpt:11 +#: flwr.server.strategy.fedprox.FedProx:47 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:11 +#: flwr.server.strategy.fedyogi.FedYogi:11 flwr.server.strategy.krum.Krum:11 of +msgid "Minimum number of clients used during validation. Defaults to 2." msgstr "" -#: ../../source/ref-changelog.md:27 -msgid "" -"In addition to the Flower Next *high-level* API that uses `Strategy`, " -"`NumPyClient` & co, Flower 1.8 also comes with a preview version of the " -"new Flower Next *low-level* API. The low-level API allows for granular " -"control of every aspect of the learning process by sending/receiving " -"individual messages to/from client nodes. The new `ServerApp` supports " -"registering a custom `main` function that allows writing custom training " -"loops for methods like async FL, cyclic training, or federated analytics." -" The new `ClientApp` supports registering `train`, `evaluate` and `query`" -" functions that can access the raw message received from the `ServerApp`." -" New abstractions like `RecordSet`, `Message` and `Context` further " -"enable sending multiple models, multiple sets of config values and " -"metrics, stateful computations on the client node and implementations of " -"custom SMPC protocols, to name just a few." +#: flwr.server.strategy.bulyan.Bulyan:13 +#: flwr.server.strategy.fedadagrad.FedAdagrad:13 +#: flwr.server.strategy.fedadam.FedAdam:13 +#: flwr.server.strategy.fedavg.FedAvg:17 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:13 +#: flwr.server.strategy.fedavgm.FedAvgM:13 +#: flwr.server.strategy.fedopt.FedOpt:13 +#: flwr.server.strategy.fedprox.FedProx:49 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:13 +#: flwr.server.strategy.fedyogi.FedYogi:13 flwr.server.strategy.krum.Krum:13 of +msgid "Minimum number of total clients in the system. Defaults to 2." msgstr "" -#: ../../source/ref-changelog.md:29 -msgid "" -"**Introduce Flower Mods (preview)** " -"([#3054](https://github.com/adap/flower/pull/3054), " -"[#2911](https://github.com/adap/flower/pull/2911), " -"[#3083](https://github.com/adap/flower/pull/3083))" +#: flwr.server.strategy.bulyan.Bulyan:15 flwr.server.strategy.krum.Krum:15 of +msgid "Number of malicious clients in the system. Defaults to 0." msgstr "" -#: ../../source/ref-changelog.md:31 -msgid "" -"Flower Modifiers (we call them Mods) can intercept messages and analyze, " -"edit or handle them directly. Mods can be used to develop pluggable " -"modules that work across different projects. Flower 1.8 already includes " -"mods to log the size of a message, the number of parameters sent over the" -" network, differential privacy with fixed clipping and adaptive clipping," -" local differential privacy and secure aggregation protocols SecAgg and " -"SecAgg+. The Flower Mods API is released as a preview, but researchers " -"can already use it to experiment with arbirtrary SMPC protocols." +#: flwr.server.strategy.bulyan.Bulyan:17 +#: flwr.server.strategy.fedadagrad.FedAdagrad:15 +#: flwr.server.strategy.fedadam.FedAdam:15 +#: flwr.server.strategy.fedavg.FedAvg:19 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:15 +#: flwr.server.strategy.fedavgm.FedAvgM:15 +#: flwr.server.strategy.fedopt.FedOpt:15 +#: flwr.server.strategy.fedprox.FedProx:51 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:15 +#: flwr.server.strategy.fedyogi.FedYogi:17 +#: flwr.server.strategy.fedyogi.FedYogi:18 +#: flwr.server.strategy.fedyogi.FedYogi:19 flwr.server.strategy.krum.Krum:20 of +msgid "Optional function used for validation. Defaults to None." msgstr "" -#: ../../source/ref-changelog.md:33 -msgid "" -"**Fine-tune LLMs with LLM FlowerTune** " -"([#3029](https://github.com/adap/flower/pull/3029), " -"[#3089](https://github.com/adap/flower/pull/3089), " -"[#3092](https://github.com/adap/flower/pull/3092), " -"[#3100](https://github.com/adap/flower/pull/3100), " -"[#3114](https://github.com/adap/flower/pull/3114), " -"[#3162](https://github.com/adap/flower/pull/3162), " -"[#3172](https://github.com/adap/flower/pull/3172))" +#: flwr.server.strategy.bulyan.Bulyan:19 +#: flwr.server.strategy.fedadagrad.FedAdagrad:17 +#: flwr.server.strategy.fedadam.FedAdam:17 +#: flwr.server.strategy.fedavg.FedAvg:21 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:17 +#: flwr.server.strategy.fedavgm.FedAvgM:17 +#: flwr.server.strategy.fedopt.FedOpt:17 +#: flwr.server.strategy.fedprox.FedProx:53 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:17 +#: flwr.server.strategy.fedyogi.FedYogi:20 flwr.server.strategy.krum.Krum:22 of +msgid "Function used to configure training. Defaults to None." msgstr "" -#: ../../source/ref-changelog.md:35 -msgid "" -"We are introducing LLM FlowerTune, an introductory example that " -"demonstrates federated LLM fine-tuning of pre-trained Llama2 models on " -"the Alpaca-GPT4 dataset. The example is built to be easily adapted to use" -" different models and/or datasets. Read our blog post [LLM FlowerTune: " -"Federated LLM Fine-tuning with Flower](https://flower.ai/blog/2024-03-14" -"-llm-flowertune-federated-llm-finetuning-with-flower/) for more details." +#: flwr.server.strategy.bulyan.Bulyan:21 +#: flwr.server.strategy.fedadagrad.FedAdagrad:19 +#: flwr.server.strategy.fedadam.FedAdam:19 +#: flwr.server.strategy.fedavg.FedAvg:23 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:19 +#: flwr.server.strategy.fedavgm.FedAvgM:19 +#: flwr.server.strategy.fedopt.FedOpt:19 +#: flwr.server.strategy.fedprox.FedProx:55 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:19 +#: flwr.server.strategy.fedyogi.FedYogi:22 flwr.server.strategy.krum.Krum:24 of +msgid "Function used to configure validation. Defaults to None." msgstr "" -#: ../../source/ref-changelog.md:37 -msgid "" -"**Introduce built-in Differential Privacy (preview)** " -"([#2798](https://github.com/adap/flower/pull/2798), " -"[#2959](https://github.com/adap/flower/pull/2959), " -"[#3038](https://github.com/adap/flower/pull/3038), " -"[#3147](https://github.com/adap/flower/pull/3147), " -"[#2909](https://github.com/adap/flower/pull/2909), " -"[#2893](https://github.com/adap/flower/pull/2893), " -"[#2892](https://github.com/adap/flower/pull/2892), " -"[#3039](https://github.com/adap/flower/pull/3039), " -"[#3074](https://github.com/adap/flower/pull/3074))" +#: flwr.server.strategy.bulyan.Bulyan:23 +#: flwr.server.strategy.fedadagrad.FedAdagrad:25 +#: flwr.server.strategy.fedadam.FedAdam:21 +#: flwr.server.strategy.fedavg.FedAvg:25 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:21 +#: flwr.server.strategy.fedavgm.FedAvgM:21 +#: flwr.server.strategy.fedopt.FedOpt:21 +#: flwr.server.strategy.fedprox.FedProx:57 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:21 +#: flwr.server.strategy.fedyogi.FedYogi:24 flwr.server.strategy.krum.Krum:26 of +msgid "Whether or not accept rounds containing failures. Defaults to True." msgstr "" -#: ../../source/ref-changelog.md:39 -msgid "" -"Built-in Differential Privacy is here! Flower supports both central and " -"local differential privacy (DP). Central DP can be configured with either" -" fixed or adaptive clipping. The clipping can happen either on the " -"server-side or the client-side. Local DP does both clipping and noising " -"on the client-side. A new documentation page [explains Differential " -"Privacy approaches](https://flower.ai/docs/framework/explanation-" -"differential-privacy.html) and a new how-to guide describes [how to use " -"the new Differential Privacy components](https://flower.ai/docs/framework" -"/how-to-use-differential-privacy.html) in Flower." +#: flwr.server.strategy.bulyan.Bulyan:25 +#: flwr.server.strategy.fedadagrad.FedAdagrad:27 +#: flwr.server.strategy.fedadam.FedAdam:23 +#: flwr.server.strategy.fedavg.FedAvg:27 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:24 +#: flwr.server.strategy.fedavgm.FedAvgM:23 +#: flwr.server.strategy.fedopt.FedOpt:23 +#: flwr.server.strategy.fedprox.FedProx:59 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:23 +#: flwr.server.strategy.fedyogi.FedYogi:26 flwr.server.strategy.krum.Krum:28 of +msgid "Initial global model parameters." msgstr "" -#: ../../source/ref-changelog.md:41 +#: flwr.server.strategy.bulyan.Bulyan:27 of msgid "" -"**Introduce built-in Secure Aggregation (preview)** " -"([#3120](https://github.com/adap/flower/pull/3120), " -"[#3110](https://github.com/adap/flower/pull/3110), " -"[#3108](https://github.com/adap/flower/pull/3108))" +"Byzantine resilient aggregation rule that is used as the first step of " +"the Bulyan (e.g., Krum)" msgstr "" -#: ../../source/ref-changelog.md:43 -msgid "" -"Built-in Secure Aggregation is here! Flower now supports different secure" -" aggregation protocols out-of-the-box. The best part? You can add secure " -"aggregation to your Flower projects with only a few lines of code. In " -"this initial release, we inlcude support for SecAgg and SecAgg+, but more" -" protocols will be implemented shortly. We'll also add detailed docs that" -" explain secure aggregation and how to use it in Flower. You can already " -"check out the new code example that shows how to use Flower to easily " -"combine Federated Learning, Differential Privacy and Secure Aggregation " -"in the same project." +#: flwr.server.strategy.bulyan.Bulyan:29 of +msgid "arguments to the first_aggregation rule" msgstr "" -#: ../../source/ref-changelog.md:45 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce** `flwr` **CLI (preview)** " -"([#2942](https://github.com/adap/flower/pull/2942), " -"[#3055](https://github.com/adap/flower/pull/3055), " -"[#3111](https://github.com/adap/flower/pull/3111), " -"[#3130](https://github.com/adap/flower/pull/3130), " -"[#3136](https://github.com/adap/flower/pull/3136), " -"[#3094](https://github.com/adap/flower/pull/3094), " -"[#3059](https://github.com/adap/flower/pull/3059), " -"[#3049](https://github.com/adap/flower/pull/3049), " -"[#3142](https://github.com/adap/flower/pull/3142))" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:47 -msgid "" -"A new `flwr` CLI command allows creating new Flower projects (`flwr new`)" -" and then running them using the Simulation Engine (`flwr run`)." +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "Aggregate evaluation losses using weighted average." msgstr "" -#: ../../source/ref-changelog.md:49 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce Flower Next Simulation Engine** " -"([#3024](https://github.com/adap/flower/pull/3024), " -"[#3061](https://github.com/adap/flower/pull/3061), " -"[#2997](https://github.com/adap/flower/pull/2997), " -"[#2783](https://github.com/adap/flower/pull/2783), " -"[#3184](https://github.com/adap/flower/pull/3184), " -"[#3075](https://github.com/adap/flower/pull/3075), " -"[#3047](https://github.com/adap/flower/pull/3047), " -"[#2998](https://github.com/adap/flower/pull/2998), " -"[#3009](https://github.com/adap/flower/pull/3009), " -"[#3008](https://github.com/adap/flower/pull/3008))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:51 -msgid "" -"The Flower Simulation Engine can now run Flower Next projects. For " -"notebook environments, there's also a new `run_simulation` function that " -"can run `ServerApp` and `ClientApp`." +#: flwr.server.strategy.bulyan.Bulyan.aggregate_fit:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "Aggregate fit results using Bulyan." msgstr "" -#: ../../source/ref-changelog.md:53 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Handle SuperNode connection errors** " -"([#2969](https://github.com/adap/flower/pull/2969))" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:55 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_evaluate:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.configure_evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.configure_evaluate:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:1 of +msgid "Configure the next round of evaluation." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"A SuperNode will now try to reconnect indefinitely to the SuperLink in " -"case of connection errors. The arguments `--max-retries` and `--max-wait-" -"time` can now be passed to the `flower-client-app` command. `--max-" -"retries` will define the number of tentatives the client should make " -"before it gives up trying to reconnect to the SuperLink, and, `--max-" -"wait-time` defines the time before the SuperNode gives up trying to " -"reconnect to the SuperLink." +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:57 -msgid "" -"**General updates to Flower Baselines** " -"([#2904](https://github.com/adap/flower/pull/2904), " -"[#2482](https://github.com/adap/flower/pull/2482), " -"[#2985](https://github.com/adap/flower/pull/2985), " -"[#2968](https://github.com/adap/flower/pull/2968))" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_fit:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_fit:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_fit:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_fit:1 +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.configure_fit:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.configure_fit:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_fit:1 +#: flwr.server.strategy.fedprox.FedProx.configure_fit:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_fit:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.configure_fit:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.configure_fit:1 of +msgid "Configure the next round of training." msgstr "" -#: ../../source/ref-changelog.md:59 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"There's a new [FedStar](https://flower.ai/docs/baselines/fedstar.html) " -"baseline. Several other baselined have been updated as well." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:61 -msgid "" -"**Improve documentation and translations** " -"([#3050](https://github.com/adap/flower/pull/3050), " -"[#3044](https://github.com/adap/flower/pull/3044), " -"[#3043](https://github.com/adap/flower/pull/3043), " -"[#2986](https://github.com/adap/flower/pull/2986), " -"[#3041](https://github.com/adap/flower/pull/3041), " -"[#3046](https://github.com/adap/flower/pull/3046), " -"[#3042](https://github.com/adap/flower/pull/3042), " -"[#2978](https://github.com/adap/flower/pull/2978), " -"[#2952](https://github.com/adap/flower/pull/2952), " -"[#3167](https://github.com/adap/flower/pull/3167), " -"[#2953](https://github.com/adap/flower/pull/2953), " -"[#3045](https://github.com/adap/flower/pull/3045), " -"[#2654](https://github.com/adap/flower/pull/2654), " -"[#3082](https://github.com/adap/flower/pull/3082), " -"[#2990](https://github.com/adap/flower/pull/2990), " -"[#2989](https://github.com/adap/flower/pull/2989))" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.evaluate:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "Evaluate model parameters using an evaluation function." msgstr "" -#: ../../source/ref-changelog.md:63 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"As usual, we merged many smaller and larger improvements to the " -"documentation. A special thank you goes to [Sebastian van der " -"Voort](https://github.com/svdvoort) for landing a big documentation PR!" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:65 -msgid "" -"**General updates to Flower Examples** " -"([3134](https://github.com/adap/flower/pull/3134), " -"[2996](https://github.com/adap/flower/pull/2996), " -"[2930](https://github.com/adap/flower/pull/2930), " -"[2967](https://github.com/adap/flower/pull/2967), " -"[2467](https://github.com/adap/flower/pull/2467), " -"[2910](https://github.com/adap/flower/pull/2910), " -"[#2918](https://github.com/adap/flower/pull/2918), " -"[#2773](https://github.com/adap/flower/pull/2773), " -"[#3063](https://github.com/adap/flower/pull/3063), " -"[#3116](https://github.com/adap/flower/pull/3116), " -"[#3117](https://github.com/adap/flower/pull/3117))" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.initialize_parameters:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.initialize_parameters:1 +#: flwr.server.strategy.fedavgm.FedAvgM.initialize_parameters:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "Initialize global model parameters." msgstr "" -#: ../../source/ref-changelog.md:67 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Two new examples show federated training of a Vision Transformer (ViT) " -"and federated learning in a medical context using the popular MONAI " -"library. `quickstart-pytorch` and `quickstart-tensorflow` demonstrate the" -" new Flower Next `ServerApp` and `ClientApp`. Many other examples " -"received considerable updates as well." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:69 -msgid "" -"**General improvements** " -"([#3171](https://github.com/adap/flower/pull/3171), " -"[3099](https://github.com/adap/flower/pull/3099), " -"[3003](https://github.com/adap/flower/pull/3003), " -"[3145](https://github.com/adap/flower/pull/3145), " -"[3017](https://github.com/adap/flower/pull/3017), " -"[3085](https://github.com/adap/flower/pull/3085), " -"[3012](https://github.com/adap/flower/pull/3012), " -"[3119](https://github.com/adap/flower/pull/3119), " -"[2991](https://github.com/adap/flower/pull/2991), " -"[2970](https://github.com/adap/flower/pull/2970), " -"[2980](https://github.com/adap/flower/pull/2980), " -"[3086](https://github.com/adap/flower/pull/3086), " -"[2932](https://github.com/adap/flower/pull/2932), " -"[2928](https://github.com/adap/flower/pull/2928), " -"[2941](https://github.com/adap/flower/pull/2941), " -"[2933](https://github.com/adap/flower/pull/2933), " -"[3181](https://github.com/adap/flower/pull/3181), " -"[2973](https://github.com/adap/flower/pull/2973), " -"[2992](https://github.com/adap/flower/pull/2992), " -"[2915](https://github.com/adap/flower/pull/2915), " -"[3040](https://github.com/adap/flower/pull/3040), " -"[3022](https://github.com/adap/flower/pull/3022), " -"[3032](https://github.com/adap/flower/pull/3032), " -"[2902](https://github.com/adap/flower/pull/2902), " -"[2931](https://github.com/adap/flower/pull/2931), " -"[3005](https://github.com/adap/flower/pull/3005), " -"[3132](https://github.com/adap/flower/pull/3132), " -"[3115](https://github.com/adap/flower/pull/3115), " -"[2944](https://github.com/adap/flower/pull/2944), " -"[3064](https://github.com/adap/flower/pull/3064), " -"[3106](https://github.com/adap/flower/pull/3106), " -"[2974](https://github.com/adap/flower/pull/2974), " -"[3178](https://github.com/adap/flower/pull/3178), " -"[2993](https://github.com/adap/flower/pull/2993), " -"[3186](https://github.com/adap/flower/pull/3186), " -"[3091](https://github.com/adap/flower/pull/3091), " -"[3125](https://github.com/adap/flower/pull/3125), " -"[3093](https://github.com/adap/flower/pull/3093), " -"[3013](https://github.com/adap/flower/pull/3013), " -"[3033](https://github.com/adap/flower/pull/3033), " -"[3133](https://github.com/adap/flower/pull/3133), " -"[3068](https://github.com/adap/flower/pull/3068), " -"[2916](https://github.com/adap/flower/pull/2916), " -"[2975](https://github.com/adap/flower/pull/2975), " -"[2984](https://github.com/adap/flower/pull/2984), " -"[2846](https://github.com/adap/flower/pull/2846), " -"[3077](https://github.com/adap/flower/pull/3077), " -"[3143](https://github.com/adap/flower/pull/3143), " -"[2921](https://github.com/adap/flower/pull/2921), " -"[3101](https://github.com/adap/flower/pull/3101), " -"[2927](https://github.com/adap/flower/pull/2927), " -"[2995](https://github.com/adap/flower/pull/2995), " -"[2972](https://github.com/adap/flower/pull/2972), " -"[2912](https://github.com/adap/flower/pull/2912), " -"[3065](https://github.com/adap/flower/pull/3065), " -"[3028](https://github.com/adap/flower/pull/3028), " -"[2922](https://github.com/adap/flower/pull/2922), " -"[2982](https://github.com/adap/flower/pull/2982), " -"[2914](https://github.com/adap/flower/pull/2914), " -"[3179](https://github.com/adap/flower/pull/3179), " -"[3080](https://github.com/adap/flower/pull/3080), " -"[2994](https://github.com/adap/flower/pull/2994), " -"[3187](https://github.com/adap/flower/pull/3187), " -"[2926](https://github.com/adap/flower/pull/2926), " -"[3018](https://github.com/adap/flower/pull/3018), " -"[3144](https://github.com/adap/flower/pull/3144), " -"[3011](https://github.com/adap/flower/pull/3011), " -"[#3152](https://github.com/adap/flower/pull/3152), " -"[#2836](https://github.com/adap/flower/pull/2836), " -"[#2929](https://github.com/adap/flower/pull/2929), " -"[#2943](https://github.com/adap/flower/pull/2943), " -"[#2955](https://github.com/adap/flower/pull/2955), " -"[#2954](https://github.com/adap/flower/pull/2954))" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.num_evaluation_clients:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_evaluation_clients:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.num_evaluation_clients:1 of +msgid "Use a fraction of available clients for evaluation." msgstr "" -#: ../../source/ref-changelog.md:75 -msgid "v1.7.0 (2024-02-05)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:81 -msgid "" -"`Aasheesh Singh`, `Adam Narozniak`, `Aml Hassan Esmil`, `Charles " -"Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo " -"Gabrielli`, `Gustavo Bertoli`, `HelinLin`, `Heng Pan`, `Javier`, `M S " -"Chaitanya Kumar`, `Mohammad Naseri`, `Nikos Vlachakis`, `Pritam Neog`, " -"`Robert Kuska`, `Robert Steiner`, `Taner Topal`, `Yahia Salaheldin " -"Shaaban`, `Yan Gao`, `Yasar Abbas` " +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.num_fit_clients:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_fit_clients:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.num_fit_clients:1 of +msgid "Return the sample size and the required number of available clients." msgstr "" -#: ../../source/ref-changelog.md:85 -msgid "" -"**Introduce stateful clients (experimental)** " -"([#2770](https://github.com/adap/flower/pull/2770), " -"[#2686](https://github.com/adap/flower/pull/2686), " -"[#2696](https://github.com/adap/flower/pull/2696), " -"[#2643](https://github.com/adap/flower/pull/2643), " -"[#2769](https://github.com/adap/flower/pull/2769))" +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:2 +msgid "DPFedAvgAdaptive" msgstr "" -#: ../../source/ref-changelog.md:87 -msgid "" -"Subclasses of `Client` and `NumPyClient` can now store local state that " -"remains on the client. Let's start with the highlight first: this new " -"feature is compatible with both simulated clients (via " -"`start_simulation`) and networked clients (via `start_client`). It's also" -" the first preview of new abstractions like `Context` and `RecordSet`. " -"Clients can access state of type `RecordSet` via `state: RecordSet = " -"self.context.state`. Changes to this `RecordSet` are preserved across " -"different rounds of execution to enable stateful computations in a " -"unified way across simulation and deployment." +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed`" msgstr "" -#: ../../source/ref-changelog.md:89 -msgid "" -"**Improve performance** " -"([#2293](https://github.com/adap/flower/pull/2293))" +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:3 of +msgid "This class is deprecated and will be removed in a future release." msgstr "" -#: ../../source/ref-changelog.md:91 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"Flower is faster than ever. All `FedAvg`-derived strategies now use in-" -"place aggregation to reduce memory consumption. The Flower client " -"serialization/deserialization has been rewritten from the ground up, " -"which results in significant speedups, especially when the client-side " -"training time is short." +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:93 -msgid "" -"**Support Federated Learning with Apple MLX and Flower** " -"([#2693](https://github.com/adap/flower/pull/2693))" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "Aggregate evaluation losses using the given strategy." msgstr "" -#: ../../source/ref-changelog.md:95 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"Flower has official support for federated learning using [Apple " -"MLX](https://ml-explore.github.io/mlx) via the new `quickstart-mlx` code " -"example." +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:97 -msgid "" -"**Introduce new XGBoost cyclic strategy** " -"([#2666](https://github.com/adap/flower/pull/2666), " -"[#2668](https://github.com/adap/flower/pull/2668))" +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.aggregate_fit:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "Aggregate training results as in DPFedAvgFixed and update clip norms." msgstr "" -#: ../../source/ref-changelog.md:99 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"A new strategy called `FedXgbCyclic` supports a client-by-client style of" -" training (often called cyclic). The `xgboost-comprehensive` code example" -" shows how to use it in a full project. In addition to that, `xgboost-" -"comprehensive` now also supports simulation mode. With this, Flower " -"offers best-in-class XGBoost support." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:101 -msgid "" -"**Support Python 3.11** " -"([#2394](https://github.com/adap/flower/pull/2394))" +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:1 of +msgid "Configure the next round of evaluation using the specified strategy." msgstr "" -#: ../../source/ref-changelog.md:103 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"Framework tests now run on Python 3.8, 3.9, 3.10, and 3.11. This will " -"ensure better support for users using more recent Python versions." +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:105 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"**Update gRPC and ProtoBuf dependencies** " -"([#2814](https://github.com/adap/flower/pull/2814))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:107 -msgid "" -"The `grpcio` and `protobuf` dependencies were updated to their latest " -"versions for improved security and performance." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.evaluate:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.evaluate:1 of +msgid "Evaluate model parameters using an evaluation function from the strategy." msgstr "" -#: ../../source/ref-changelog.md:109 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"**Introduce Docker image for Flower server** " -"([#2700](https://github.com/adap/flower/pull/2700), " -"[#2688](https://github.com/adap/flower/pull/2688), " -"[#2705](https://github.com/adap/flower/pull/2705), " -"[#2695](https://github.com/adap/flower/pull/2695), " -"[#2747](https://github.com/adap/flower/pull/2747), " -"[#2746](https://github.com/adap/flower/pull/2746), " -"[#2680](https://github.com/adap/flower/pull/2680), " -"[#2682](https://github.com/adap/flower/pull/2682), " -"[#2701](https://github.com/adap/flower/pull/2701))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:111 -msgid "" -"The Flower server can now be run using an official Docker image. A new " -"how-to guide explains [how to run Flower using " -"Docker](https://flower.ai/docs/framework/how-to-run-flower-using-" -"docker.html). An official Flower client Docker image will follow." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.initialize_parameters:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.initialize_parameters:1 of +msgid "Initialize global model parameters using given strategy." msgstr "" -#: ../../source/ref-changelog.md:113 -msgid "" -"**Introduce** `flower-via-docker-compose` **example** " -"([#2626](https://github.com/adap/flower/pull/2626))" +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:6 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:3 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:3 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:3 +#: flwr.server.strategy.strategy.Strategy.configure_fit:3 +#: flwr.server.strategy.strategy.Strategy.evaluate:6 of +msgid "The current round of federated learning." msgstr "" -#: ../../source/ref-changelog.md:115 -msgid "" -"**Introduce** `quickstart-sklearn-tabular` **example** " -"([#2719](https://github.com/adap/flower/pull/2719))" +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:7 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:10 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:7 +#: flwr.server.strategy.strategy.Strategy.configure_fit:7 +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:3 of +msgid "The client manager which holds all currently connected clients." msgstr "" -#: ../../source/ref-changelog.md:117 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:10 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:10 of msgid "" -"**Introduce** `custom-metrics` **example** " -"([#1958](https://github.com/adap/flower/pull/1958))" +"**evaluate_configuration** -- A list of tuples. Each tuple in the list " +"identifies a `ClientProxy` and the `EvaluateIns` for this particular " +"`ClientProxy`. If a particular `ClientProxy` is not included in this " +"list, it means that this `ClientProxy` will not participate in the next " +"round of federated evaluation." msgstr "" -#: ../../source/ref-changelog.md:119 -msgid "" -"**Update code examples to use Flower Datasets** " -"([#2450](https://github.com/adap/flower/pull/2450), " -"[#2456](https://github.com/adap/flower/pull/2456), " -"[#2318](https://github.com/adap/flower/pull/2318), " -"[#2712](https://github.com/adap/flower/pull/2712))" +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:2 +msgid "DPFedAvgFixed" msgstr "" -#: ../../source/ref-changelog.md:121 -msgid "" -"Several code examples were updated to use [Flower " -"Datasets](https://flower.ai/docs/datasets/)." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 +#: flwr.server.strategy.fedavg.FedAvg:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.strategy.Strategy`" msgstr "" -#: ../../source/ref-changelog.md:123 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"**General updates to Flower Examples** " -"([#2381](https://github.com/adap/flower/pull/2381), " -"[#2805](https://github.com/adap/flower/pull/2805), " -"[#2782](https://github.com/adap/flower/pull/2782), " -"[#2806](https://github.com/adap/flower/pull/2806), " -"[#2829](https://github.com/adap/flower/pull/2829), " -"[#2825](https://github.com/adap/flower/pull/2825), " -"[#2816](https://github.com/adap/flower/pull/2816), " -"[#2726](https://github.com/adap/flower/pull/2726), " -"[#2659](https://github.com/adap/flower/pull/2659), " -"[#2655](https://github.com/adap/flower/pull/2655))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:125 -msgid "Many Flower code examples received substantial updates." +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:127 ../../source/ref-changelog.md:220 -msgid "**Update Flower Baselines**" +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_fit:1 of +msgid "Aggregate training results using unweighted aggregation." msgstr "" -#: ../../source/ref-changelog.md:129 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"HFedXGBoost ([#2226](https://github.com/adap/flower/pull/2226), " -"[#2771](https://github.com/adap/flower/pull/2771))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:130 -msgid "FedVSSL ([#2412](https://github.com/adap/flower/pull/2412))" +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:131 -msgid "FedNova ([#2179](https://github.com/adap/flower/pull/2179))" +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:1 of +msgid "" +"Configure the next round of training incorporating Differential Privacy " +"(DP)." msgstr "" -#: ../../source/ref-changelog.md:132 -msgid "HeteroFL ([#2439](https://github.com/adap/flower/pull/2439))" +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:133 -msgid "FedAvgM ([#2246](https://github.com/adap/flower/pull/2246))" +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:134 -msgid "FedPara ([#2722](https://github.com/adap/flower/pull/2722))" +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:3 of +msgid "" +"Configuration of the next training round includes information related to " +"DP, such as clip norm and noise stddev." msgstr "" -#: ../../source/ref-changelog.md:136 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:13 +#: flwr.server.strategy.strategy.Strategy.configure_fit:10 of msgid "" -"**Improve documentation** " -"([#2674](https://github.com/adap/flower/pull/2674), " -"[#2480](https://github.com/adap/flower/pull/2480), " -"[#2826](https://github.com/adap/flower/pull/2826), " -"[#2727](https://github.com/adap/flower/pull/2727), " -"[#2761](https://github.com/adap/flower/pull/2761), " -"[#2900](https://github.com/adap/flower/pull/2900))" +"**fit_configuration** -- A list of tuples. Each tuple in the list " +"identifies a `ClientProxy` and the `FitIns` for this particular " +"`ClientProxy`. If a particular `ClientProxy` is not included in this " +"list, it means that this `ClientProxy` will not participate in the next " +"round of federated learning." msgstr "" -#: ../../source/ref-changelog.md:138 -msgid "" -"**Improved testing and development infrastructure** " -"([#2797](https://github.com/adap/flower/pull/2797), " -"[#2676](https://github.com/adap/flower/pull/2676), " -"[#2644](https://github.com/adap/flower/pull/2644), " -"[#2656](https://github.com/adap/flower/pull/2656), " -"[#2848](https://github.com/adap/flower/pull/2848), " -"[#2675](https://github.com/adap/flower/pull/2675), " -"[#2735](https://github.com/adap/flower/pull/2735), " -"[#2767](https://github.com/adap/flower/pull/2767), " -"[#2732](https://github.com/adap/flower/pull/2732), " -"[#2744](https://github.com/adap/flower/pull/2744), " -"[#2681](https://github.com/adap/flower/pull/2681), " -"[#2699](https://github.com/adap/flower/pull/2699), " -"[#2745](https://github.com/adap/flower/pull/2745), " -"[#2734](https://github.com/adap/flower/pull/2734), " -"[#2731](https://github.com/adap/flower/pull/2731), " -"[#2652](https://github.com/adap/flower/pull/2652), " -"[#2720](https://github.com/adap/flower/pull/2720), " -"[#2721](https://github.com/adap/flower/pull/2721), " -"[#2717](https://github.com/adap/flower/pull/2717), " -"[#2864](https://github.com/adap/flower/pull/2864), " -"[#2694](https://github.com/adap/flower/pull/2694), " -"[#2709](https://github.com/adap/flower/pull/2709), " -"[#2658](https://github.com/adap/flower/pull/2658), " -"[#2796](https://github.com/adap/flower/pull/2796), " -"[#2692](https://github.com/adap/flower/pull/2692), " -"[#2657](https://github.com/adap/flower/pull/2657), " -"[#2813](https://github.com/adap/flower/pull/2813), " -"[#2661](https://github.com/adap/flower/pull/2661), " -"[#2398](https://github.com/adap/flower/pull/2398))" +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:2 +msgid "DifferentialPrivacyClientSideAdaptiveClipping" msgstr "" -#: ../../source/ref-changelog.md:140 -msgid "" -"The Flower testing and development infrastructure has received " -"substantial updates. This makes Flower 1.7 the most tested release ever." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:3 +#: of +msgid "Use `adaptiveclipping_mod` modifier at the client side." msgstr "" -#: ../../source/ref-changelog.md:142 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:5 +#: of msgid "" -"**Update dependencies** " -"([#2753](https://github.com/adap/flower/pull/2753), " -"[#2651](https://github.com/adap/flower/pull/2651), " -"[#2739](https://github.com/adap/flower/pull/2739), " -"[#2837](https://github.com/adap/flower/pull/2837), " -"[#2788](https://github.com/adap/flower/pull/2788), " -"[#2811](https://github.com/adap/flower/pull/2811), " -"[#2774](https://github.com/adap/flower/pull/2774), " -"[#2790](https://github.com/adap/flower/pull/2790), " -"[#2751](https://github.com/adap/flower/pull/2751), " -"[#2850](https://github.com/adap/flower/pull/2850), " -"[#2812](https://github.com/adap/flower/pull/2812), " -"[#2872](https://github.com/adap/flower/pull/2872), " -"[#2736](https://github.com/adap/flower/pull/2736), " -"[#2756](https://github.com/adap/flower/pull/2756), " -"[#2857](https://github.com/adap/flower/pull/2857), " -"[#2757](https://github.com/adap/flower/pull/2757), " -"[#2810](https://github.com/adap/flower/pull/2810), " -"[#2740](https://github.com/adap/flower/pull/2740), " -"[#2789](https://github.com/adap/flower/pull/2789))" +"In comparison to `DifferentialPrivacyServerSideAdaptiveClipping`, which " +"performs clipping on the server-side, " +"`DifferentialPrivacyClientSideAdaptiveClipping` expects clipping to " +"happen on the client-side, usually by using the built-in " +"`adaptiveclipping_mod`." msgstr "" -#: ../../source/ref-changelog.md:144 -msgid "" -"**General improvements** " -"([#2803](https://github.com/adap/flower/pull/2803), " -"[#2847](https://github.com/adap/flower/pull/2847), " -"[#2877](https://github.com/adap/flower/pull/2877), " -"[#2690](https://github.com/adap/flower/pull/2690), " -"[#2889](https://github.com/adap/flower/pull/2889), " -"[#2874](https://github.com/adap/flower/pull/2874), " -"[#2819](https://github.com/adap/flower/pull/2819), " -"[#2689](https://github.com/adap/flower/pull/2689), " -"[#2457](https://github.com/adap/flower/pull/2457), " -"[#2870](https://github.com/adap/flower/pull/2870), " -"[#2669](https://github.com/adap/flower/pull/2669), " -"[#2876](https://github.com/adap/flower/pull/2876), " -"[#2885](https://github.com/adap/flower/pull/2885), " -"[#2858](https://github.com/adap/flower/pull/2858), " -"[#2867](https://github.com/adap/flower/pull/2867), " -"[#2351](https://github.com/adap/flower/pull/2351), " -"[#2886](https://github.com/adap/flower/pull/2886), " -"[#2860](https://github.com/adap/flower/pull/2860), " -"[#2828](https://github.com/adap/flower/pull/2828), " -"[#2869](https://github.com/adap/flower/pull/2869), " -"[#2875](https://github.com/adap/flower/pull/2875), " -"[#2733](https://github.com/adap/flower/pull/2733), " -"[#2488](https://github.com/adap/flower/pull/2488), " -"[#2646](https://github.com/adap/flower/pull/2646), " -"[#2879](https://github.com/adap/flower/pull/2879), " -"[#2821](https://github.com/adap/flower/pull/2821), " -"[#2855](https://github.com/adap/flower/pull/2855), " -"[#2800](https://github.com/adap/flower/pull/2800), " -"[#2807](https://github.com/adap/flower/pull/2807), " -"[#2801](https://github.com/adap/flower/pull/2801), " -"[#2804](https://github.com/adap/flower/pull/2804), " -"[#2851](https://github.com/adap/flower/pull/2851), " -"[#2787](https://github.com/adap/flower/pull/2787), " -"[#2852](https://github.com/adap/flower/pull/2852), " -"[#2672](https://github.com/adap/flower/pull/2672), " -"[#2759](https://github.com/adap/flower/pull/2759))" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:10 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:3 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:10 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:3 +#: of +msgid "The strategy to which DP functionalities will be added by this wrapper." msgstr "" -#: ../../source/ref-changelog.md:148 -msgid "" -"**Deprecate** `start_numpy_client` " -"([#2563](https://github.com/adap/flower/pull/2563), " -"[#2718](https://github.com/adap/flower/pull/2718))" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:12 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:5 +#: of +msgid "The noise multiplier for the Gaussian mechanism for model updates." msgstr "" -#: ../../source/ref-changelog.md:150 -msgid "" -"Until now, clients of type `NumPyClient` needed to be started via " -"`start_numpy_client`. In our efforts to consolidate framework APIs, we " -"have introduced changes, and now all client types should start via " -"`start_client`. To continue using `NumPyClient` clients, you simply need " -"to first call the `.to_client()` method and then pass returned `Client` " -"object to `start_client`. The examples and the documentation have been " -"updated accordingly." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:14 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:7 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:17 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:10 +#: of +msgid "The number of clients that are sampled on each round." msgstr "" -#: ../../source/ref-changelog.md:152 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:16 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:9 +#: of msgid "" -"**Deprecate legacy DP wrappers** " -"([#2749](https://github.com/adap/flower/pull/2749))" +"The initial value of clipping norm. Defaults to 0.1. Andrew et al. " +"recommends to set to 0.1." msgstr "" -#: ../../source/ref-changelog.md:154 -msgid "" -"Legacy DP wrapper classes are deprecated, but still functional. This is " -"in preparation for an all-new pluggable version of differential privacy " -"support in Flower." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:19 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:12 +#: of +msgid "The desired quantile of updates which should be clipped. Defaults to 0.5." msgstr "" -#: ../../source/ref-changelog.md:156 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:21 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:14 +#: of msgid "" -"**Make optional arg** `--callable` **in** `flower-client` **a required " -"positional arg** ([#2673](https://github.com/adap/flower/pull/2673))" +"The learning rate for the clipping norm adaptation. Defaults to 0.2. " +"Andrew et al. recommends to set to 0.2." msgstr "" -#: ../../source/ref-changelog.md:158 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:24 +#: of msgid "" -"**Rename** `certificates` **to** `root_certificates` **in** `Driver` " -"([#2890](https://github.com/adap/flower/pull/2890))" +"The stddev of the noise added to the count of updates currently below the" +" estimate. Andrew et al. recommends to set to `expected_num_records/20`" msgstr "" -#: ../../source/ref-changelog.md:160 -msgid "" -"**Drop experimental** `Task` **fields** " -"([#2866](https://github.com/adap/flower/pull/2866), " -"[#2865](https://github.com/adap/flower/pull/2865))" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:30 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:23 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:22 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:15 +#: of +msgid "Create a strategy:" msgstr "" -#: ../../source/ref-changelog.md:162 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:34 +#: of msgid "" -"Experimental fields `sa`, `legacy_server_message` and " -"`legacy_client_message` were removed from `Task` message. The removed " -"fields are superseded by the new `RecordSet` abstraction." +"Wrap the strategy with the " +"`DifferentialPrivacyClientSideAdaptiveClipping` wrapper:" msgstr "" -#: ../../source/ref-changelog.md:164 -msgid "" -"**Retire MXNet examples** " -"([#2724](https://github.com/adap/flower/pull/2724))" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:40 +#: of +msgid "On the client, add the `adaptiveclipping_mod` to the client-side mods:" msgstr "" -#: ../../source/ref-changelog.md:166 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"The development of the MXNet fremework has ended and the project is now " -"[archived on GitHub](https://github.com/apache/mxnet). Existing MXNet " -"examples won't receive updates." +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:168 -msgid "v1.6.0 (2023-11-28)" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:174 -msgid "" -"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " -"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " -"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`," -" `Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, " -"`Steve Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, " -"`cnxdeveloper`, `k3nfalt` " +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_fit:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_fit:1 +#: of +msgid "Aggregate training results and update clip norms." msgstr "" -#: ../../source/ref-changelog.md:178 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**Add experimental support for Python 3.12** " -"([#2565](https://github.com/adap/flower/pull/2565))" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:180 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**Add new XGBoost examples** " -"([#2612](https://github.com/adap/flower/pull/2612), " -"[#2554](https://github.com/adap/flower/pull/2554), " -"[#2617](https://github.com/adap/flower/pull/2617), " -"[#2618](https://github.com/adap/flower/pull/2618), " -"[#2619](https://github.com/adap/flower/pull/2619), " -"[#2567](https://github.com/adap/flower/pull/2567))" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:182 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"We have added a new `xgboost-quickstart` example alongside a new " -"`xgboost-comprehensive` example that goes more in-depth." +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:184 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**Add Vertical FL example** " -"([#2598](https://github.com/adap/flower/pull/2598))" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:186 -msgid "" -"We had many questions about Vertical Federated Learning using Flower, so " -"we decided to add an simple example for it on the [Titanic " -"dataset](https://www.kaggle.com/competitions/titanic/data) alongside a " -"tutorial (in the README)." +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:2 +msgid "DifferentialPrivacyClientSideFixedClipping" msgstr "" -#: ../../source/ref-changelog.md:188 -msgid "" -"**Support custom** `ClientManager` **in** `start_driver()` " -"([#2292](https://github.com/adap/flower/pull/2292))" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:3 +#: of +msgid "Use `fixedclipping_mod` modifier at the client side." msgstr "" -#: ../../source/ref-changelog.md:190 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:5 +#: of msgid "" -"**Update REST API to support create and delete nodes** " -"([#2283](https://github.com/adap/flower/pull/2283))" +"In comparison to `DifferentialPrivacyServerSideFixedClipping`, which " +"performs clipping on the server-side, " +"`DifferentialPrivacyClientSideFixedClipping` expects clipping to happen " +"on the client-side, usually by using the built-in `fixedclipping_mod`." msgstr "" -#: ../../source/ref-changelog.md:192 -msgid "" -"**Update the Android SDK** " -"([#2187](https://github.com/adap/flower/pull/2187))" -msgstr "" - -#: ../../source/ref-changelog.md:194 -msgid "Add gRPC request-response capability to the Android SDK." +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:12 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:5 +#: of +msgid "" +"The noise multiplier for the Gaussian mechanism for model updates. A " +"value of 1.0 or higher is recommended for strong privacy." msgstr "" -#: ../../source/ref-changelog.md:196 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:26 +#: of msgid "" -"**Update the C++ SDK** " -"([#2537](https://github.com/adap/flower/pull/2537), " -"[#2528](https://github.com/adap/flower/pull/2528), " -"[#2523](https://github.com/adap/flower/pull/2523), " -"[#2522](https://github.com/adap/flower/pull/2522))" +"Wrap the strategy with the `DifferentialPrivacyClientSideFixedClipping` " +"wrapper:" msgstr "" -#: ../../source/ref-changelog.md:198 -msgid "Add gRPC request-response capability to the C++ SDK." +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:32 +#: of +msgid "On the client, add the `fixedclipping_mod` to the client-side mods:" msgstr "" -#: ../../source/ref-changelog.md:200 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**Make HTTPS the new default** " -"([#2591](https://github.com/adap/flower/pull/2591), " -"[#2636](https://github.com/adap/flower/pull/2636))" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:202 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"Flower is moving to HTTPS by default. The new `flower-server` requires " -"passing `--certificates`, but users can enable `--insecure` to use HTTP " -"for prototyping. The same applies to `flower-client`, which can either " -"use user-provided credentials or gRPC-bundled certificates to connect to " -"an HTTPS-enabled server or requires opt-out via passing `--insecure` to " -"enable insecure HTTP connections." +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:204 -msgid "" -"For backward compatibility, `start_client()` and `start_numpy_client()` " -"will still start in insecure mode by default. In a future release, " -"insecure connections will require user opt-in by passing `insecure=True`." +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_fit:1 +#: of +msgid "Add noise to the aggregated parameters." msgstr "" -#: ../../source/ref-changelog.md:206 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**Unify client API** ([#2303](https://github.com/adap/flower/pull/2303), " -"[#2390](https://github.com/adap/flower/pull/2390), " -"[#2493](https://github.com/adap/flower/pull/2493))" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:208 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"Using the `client_fn`, Flower clients can interchangeably run as " -"standalone processes (i.e. via `start_client`) or in simulation (i.e. via" -" `start_simulation`) without requiring changes to how the client class is" -" defined and instantiated. The `to_client()` function is introduced to " -"convert a `NumPyClient` to a `Client`." +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:210 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**Add new** `Bulyan` **strategy** " -"([#1817](https://github.com/adap/flower/pull/1817), " -"[#1891](https://github.com/adap/flower/pull/1891))" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:212 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"The new `Bulyan` strategy implements Bulyan by [El Mhamdi et al., " -"2018](https://arxiv.org/abs/1802.07927)" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:2 +msgid "DifferentialPrivacyServerSideAdaptiveClipping" msgstr "" -#: ../../source/ref-changelog.md:214 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:17 +#: of msgid "" -"**Add new** `XGB Bagging` **strategy** " -"([#2611](https://github.com/adap/flower/pull/2611))" +"The standard deviation of the noise added to the count of updates below " +"the estimate. Andrew et al. recommends to set to " +"`expected_num_records/20`" msgstr "" -#: ../../source/ref-changelog.md:216 ../../source/ref-changelog.md:218 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:27 +#: of msgid "" -"**Introduce `WorkloadState`** " -"([#2564](https://github.com/adap/flower/pull/2564), " -"[#2632](https://github.com/adap/flower/pull/2632))" +"Wrap the strategy with the DifferentialPrivacyServerSideAdaptiveClipping " +"wrapper" msgstr "" -#: ../../source/ref-changelog.md:222 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " -"[#2286](https://github.com/adap/flower/pull/2286), " -"[#2509](https://github.com/adap/flower/pull/2509))" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:224 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"Baselines Docs ([#2290](https://github.com/adap/flower/pull/2290), " -"[#2400](https://github.com/adap/flower/pull/2400))" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:226 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " -"[#2507](https://github.com/adap/flower/pull/2507))" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:228 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " -"[#2508](https://github.com/adap/flower/pull/2508))" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:230 -msgid "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:232 -msgid "FjORD [#2431](https://github.com/adap/flower/pull/2431)" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:234 -msgid "MOON [#2421](https://github.com/adap/flower/pull/2421)" +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:2 +msgid "DifferentialPrivacyServerSideFixedClipping" msgstr "" -#: ../../source/ref-changelog.md:236 -msgid "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:19 +#: of +msgid "" +"Wrap the strategy with the DifferentialPrivacyServerSideFixedClipping " +"wrapper" msgstr "" -#: ../../source/ref-changelog.md:238 -msgid "FedPer [#2266](https://github.com/adap/flower/pull/2266)" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:240 -msgid "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:242 -msgid "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:1 +#: of +msgid "Compute the updates, clip, and pass them for aggregation." msgstr "" -#: ../../source/ref-changelog.md:244 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " -"[#2615](https://github.com/adap/flower/pull/2615))" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:246 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**General updates to Flower Examples** " -"([#2384](https://github.com/adap/flower/pull/2384), " -"[#2425](https://github.com/adap/flower/pull/2425), " -"[#2526](https://github.com/adap/flower/pull/2526), " -"[#2302](https://github.com/adap/flower/pull/2302), " -"[#2545](https://github.com/adap/flower/pull/2545))" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:248 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**General updates to Flower Baselines** " -"([#2301](https://github.com/adap/flower/pull/2301), " -"[#2305](https://github.com/adap/flower/pull/2305), " -"[#2307](https://github.com/adap/flower/pull/2307), " -"[#2327](https://github.com/adap/flower/pull/2327), " -"[#2435](https://github.com/adap/flower/pull/2435), " -"[#2462](https://github.com/adap/flower/pull/2462), " -"[#2463](https://github.com/adap/flower/pull/2463), " -"[#2461](https://github.com/adap/flower/pull/2461), " -"[#2469](https://github.com/adap/flower/pull/2469), " -"[#2466](https://github.com/adap/flower/pull/2466), " -"[#2471](https://github.com/adap/flower/pull/2471), " -"[#2472](https://github.com/adap/flower/pull/2472), " -"[#2470](https://github.com/adap/flower/pull/2470))" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:250 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**General updates to the simulation engine** " -"([#2331](https://github.com/adap/flower/pull/2331), " -"[#2447](https://github.com/adap/flower/pull/2447), " -"[#2448](https://github.com/adap/flower/pull/2448), " -"[#2294](https://github.com/adap/flower/pull/2294))" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:3 +#: of +msgid "Afterward, add noise to the aggregated parameters." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:2 +msgid "FaultTolerantFedAvg" msgstr "" -#: ../../source/ref-changelog.md:252 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"**General updates to Flower SDKs** " -"([#2288](https://github.com/adap/flower/pull/2288), " -"[#2429](https://github.com/adap/flower/pull/2429), " -"[#2555](https://github.com/adap/flower/pull/2555), " -"[#2543](https://github.com/adap/flower/pull/2543), " -"[#2544](https://github.com/adap/flower/pull/2544), " -"[#2597](https://github.com/adap/flower/pull/2597), " -"[#2623](https://github.com/adap/flower/pull/2623))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:254 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"**General improvements** " -"([#2309](https://github.com/adap/flower/pull/2309), " -"[#2310](https://github.com/adap/flower/pull/2310), " -"[#2313](https://github.com/adap/flower/pull/2313), " -"[#2316](https://github.com/adap/flower/pull/2316), " -"[#2317](https://github.com/adap/flower/pull/2317), " -"[#2349](https://github.com/adap/flower/pull/2349), " -"[#2360](https://github.com/adap/flower/pull/2360), " -"[#2402](https://github.com/adap/flower/pull/2402), " -"[#2446](https://github.com/adap/flower/pull/2446), " -"[#2561](https://github.com/adap/flower/pull/2561), " -"[#2273](https://github.com/adap/flower/pull/2273), " -"[#2267](https://github.com/adap/flower/pull/2267), " -"[#2274](https://github.com/adap/flower/pull/2274), " -"[#2275](https://github.com/adap/flower/pull/2275), " -"[#2432](https://github.com/adap/flower/pull/2432), " -"[#2251](https://github.com/adap/flower/pull/2251), " -"[#2321](https://github.com/adap/flower/pull/2321), " -"[#1936](https://github.com/adap/flower/pull/1936), " -"[#2408](https://github.com/adap/flower/pull/2408), " -"[#2413](https://github.com/adap/flower/pull/2413), " -"[#2401](https://github.com/adap/flower/pull/2401), " -"[#2531](https://github.com/adap/flower/pull/2531), " -"[#2534](https://github.com/adap/flower/pull/2534), " -"[#2535](https://github.com/adap/flower/pull/2535), " -"[#2521](https://github.com/adap/flower/pull/2521), " -"[#2553](https://github.com/adap/flower/pull/2553), " -"[#2596](https://github.com/adap/flower/pull/2596))" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:256 ../../source/ref-changelog.md:346 -#: ../../source/ref-changelog.md:410 ../../source/ref-changelog.md:464 -#: ../../source/ref-changelog.md:531 -msgid "Flower received many improvements under the hood, too many to list here." +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_fit:1 +#: flwr.server.strategy.fedadagrad.FedAdagrad.aggregate_fit:1 +#: flwr.server.strategy.fedadam.FedAdam.aggregate_fit:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_fit:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_fit:1 +#: flwr.server.strategy.fedavgm.FedAvgM.aggregate_fit:1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.aggregate_fit:1 +#: flwr.server.strategy.fedyogi.FedYogi.aggregate_fit:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_fit:1 of +msgid "Aggregate fit results using weighted average." msgstr "" -#: ../../source/ref-changelog.md:260 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"**Remove support for Python 3.7** " -"([#2280](https://github.com/adap/flower/pull/2280), " -"[#2299](https://github.com/adap/flower/pull/2299), " -"[#2304](https://github.com/adap/flower/pull/2304), " -"[#2306](https://github.com/adap/flower/pull/2306), " -"[#2355](https://github.com/adap/flower/pull/2355), " -"[#2356](https://github.com/adap/flower/pull/2356))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:262 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"Python 3.7 support was deprecated in Flower 1.5, and this release removes" -" support. Flower now requires Python 3.8." +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:264 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"**Remove experimental argument** `rest` **from** `start_client` " -"([#2324](https://github.com/adap/flower/pull/2324))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:266 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"The (still experimental) argument `rest` was removed from `start_client` " -"and `start_numpy_client`. Use `transport=\"rest\"` to opt into the " -"experimental REST API instead." -msgstr "" - -#: ../../source/ref-changelog.md:268 -msgid "v1.5.0 (2023-08-31)" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:274 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " -"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " -"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " -"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:278 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"**Introduce new simulation engine** " -"([#1969](https://github.com/adap/flower/pull/1969), " -"[#2221](https://github.com/adap/flower/pull/2221), " -"[#2248](https://github.com/adap/flower/pull/2248))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:280 -msgid "" -"The new simulation engine has been rewritten from the ground up, yet it " -"remains fully backwards compatible. It offers much improved stability and" -" memory handling, especially when working with GPUs. Simulations " -"transparently adapt to different settings to scale simulation in CPU-" -"only, CPU+GPU, multi-GPU, or multi-node multi-GPU environments." +#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:2 +#: ../../source/ref-changelog.md:1231 +msgid "FedAdagrad" msgstr "" -#: ../../source/ref-changelog.md:282 -msgid "" -"Comprehensive documentation includes a new [how-to run " -"simulations](https://flower.ai/docs/framework/how-to-run-" -"simulations.html) guide, new [simulation-" -"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " -"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" -"tensorflow.html) notebooks, and a new [YouTube tutorial " -"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)." +#: flwr.server.strategy.fedadagrad.FedAdagrad:1 +#: flwr.server.strategy.fedadam.FedAdam:1 +#: flwr.server.strategy.fedyogi.FedYogi:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.fedopt.FedOpt`" msgstr "" -#: ../../source/ref-changelog.md:284 -msgid "" -"**Restructure Flower Docs** " -"([#1824](https://github.com/adap/flower/pull/1824), " -"[#1865](https://github.com/adap/flower/pull/1865), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1887](https://github.com/adap/flower/pull/1887), " -"[#1919](https://github.com/adap/flower/pull/1919), " -"[#1922](https://github.com/adap/flower/pull/1922), " -"[#1920](https://github.com/adap/flower/pull/1920), " -"[#1923](https://github.com/adap/flower/pull/1923), " -"[#1924](https://github.com/adap/flower/pull/1924), " -"[#1962](https://github.com/adap/flower/pull/1962), " -"[#2006](https://github.com/adap/flower/pull/2006), " -"[#2133](https://github.com/adap/flower/pull/2133), " -"[#2203](https://github.com/adap/flower/pull/2203), " -"[#2215](https://github.com/adap/flower/pull/2215), " -"[#2122](https://github.com/adap/flower/pull/2122), " -"[#2223](https://github.com/adap/flower/pull/2223), " -"[#2219](https://github.com/adap/flower/pull/2219), " -"[#2232](https://github.com/adap/flower/pull/2232), " -"[#2233](https://github.com/adap/flower/pull/2233), " -"[#2234](https://github.com/adap/flower/pull/2234), " -"[#2235](https://github.com/adap/flower/pull/2235), " -"[#2237](https://github.com/adap/flower/pull/2237), " -"[#2238](https://github.com/adap/flower/pull/2238), " -"[#2242](https://github.com/adap/flower/pull/2242), " -"[#2231](https://github.com/adap/flower/pull/2231), " -"[#2243](https://github.com/adap/flower/pull/2243), " -"[#2227](https://github.com/adap/flower/pull/2227))" +#: flwr.server.strategy.fedadagrad.FedAdagrad:3 +#: flwr.server.strategy.fedadam.FedAdam:3 flwr.server.strategy.fedopt.FedOpt:3 +#: flwr.server.strategy.fedyogi.FedYogi:3 of +msgid "Implementation based on https://arxiv.org/abs/2003.00295v5" msgstr "" -#: ../../source/ref-changelog.md:286 -msgid "" -"Much effort went into a completely restructured Flower docs experience. " -"The documentation on [flower.ai/docs](https://flower.ai/docs) is now " -"divided into Flower Framework, Flower Baselines, Flower Android SDK, " -"Flower iOS SDK, and code example projects." +#: flwr.server.strategy.fedadagrad.FedAdagrad:21 +#: flwr.server.strategy.fedadagrad.FedAdagrad:23 +#: flwr.server.strategy.fedadam.FedAdam:25 +#: flwr.server.strategy.fedadam.FedAdam:27 +#: flwr.server.strategy.fedavg.FedAvg:29 flwr.server.strategy.fedavg.FedAvg:31 +#: flwr.server.strategy.fedopt.FedOpt:25 flwr.server.strategy.fedopt.FedOpt:27 +#: flwr.server.strategy.fedprox.FedProx:61 +#: flwr.server.strategy.fedprox.FedProx:63 +#: flwr.server.strategy.fedyogi.FedYogi:28 +#: flwr.server.strategy.fedyogi.FedYogi:30 of +msgid "Metrics aggregation function, optional." msgstr "" -#: ../../source/ref-changelog.md:288 -msgid "" -"**Introduce Flower Swift SDK** " -"([#1858](https://github.com/adap/flower/pull/1858), " -"[#1897](https://github.com/adap/flower/pull/1897))" +#: flwr.server.strategy.fedadagrad.FedAdagrad:29 +#: flwr.server.strategy.fedadam.FedAdam:29 +#: flwr.server.strategy.fedopt.FedOpt:29 of +msgid "Server-side learning rate. Defaults to 1e-1." msgstr "" -#: ../../source/ref-changelog.md:290 -msgid "" -"This is the first preview release of the Flower Swift SDK. Flower support" -" on iOS is improving, and alongside the Swift SDK and code example, there" -" is now also an iOS quickstart tutorial." +#: flwr.server.strategy.fedadagrad.FedAdagrad:31 +#: flwr.server.strategy.fedadam.FedAdam:31 +#: flwr.server.strategy.fedopt.FedOpt:31 of +msgid "Client-side learning rate. Defaults to 1e-1." msgstr "" -#: ../../source/ref-changelog.md:292 -msgid "" -"**Introduce Flower Android SDK** " -"([#2131](https://github.com/adap/flower/pull/2131))" +#: flwr.server.strategy.fedadagrad.FedAdagrad:33 +#: flwr.server.strategy.fedadam.FedAdam:37 +#: flwr.server.strategy.fedopt.FedOpt:37 of +msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-9." msgstr "" -#: ../../source/ref-changelog.md:294 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"This is the first preview release of the Flower Kotlin SDK. Flower " -"support on Android is improving, and alongside the Kotlin SDK and code " -"example, there is now also an Android quickstart tutorial." +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:296 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce new end-to-end testing infrastructure** " -"([#1842](https://github.com/adap/flower/pull/1842), " -"[#2071](https://github.com/adap/flower/pull/2071), " -"[#2072](https://github.com/adap/flower/pull/2072), " -"[#2068](https://github.com/adap/flower/pull/2068), " -"[#2067](https://github.com/adap/flower/pull/2067), " -"[#2069](https://github.com/adap/flower/pull/2069), " -"[#2073](https://github.com/adap/flower/pull/2073), " -"[#2070](https://github.com/adap/flower/pull/2070), " -"[#2074](https://github.com/adap/flower/pull/2074), " -"[#2082](https://github.com/adap/flower/pull/2082), " -"[#2084](https://github.com/adap/flower/pull/2084), " -"[#2093](https://github.com/adap/flower/pull/2093), " -"[#2109](https://github.com/adap/flower/pull/2109), " -"[#2095](https://github.com/adap/flower/pull/2095), " -"[#2140](https://github.com/adap/flower/pull/2140), " -"[#2137](https://github.com/adap/flower/pull/2137), " -"[#2165](https://github.com/adap/flower/pull/2165))" +":py:obj:`aggregate_fit `\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:298 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"A new testing infrastructure ensures that new changes stay compatible " -"with existing framework integrations or strategies." -msgstr "" - -#: ../../source/ref-changelog.md:300 -msgid "**Deprecate Python 3.7**" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:302 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Since Python 3.7 reached its end of life (EOL) on 2023-06-27, support for" -" Python 3.7 is now deprecated and will be removed in an upcoming release." +":py:obj:`configure_fit `\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:304 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add new** `FedTrimmedAvg` **strategy** " -"([#1769](https://github.com/adap/flower/pull/1769), " -"[#1853](https://github.com/adap/flower/pull/1853))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:306 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The new `FedTrimmedAvg` strategy implements Trimmed Mean by [Dong Yin, " -"2018](https://arxiv.org/abs/1803.01498)." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:308 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce start_driver** " -"([#1697](https://github.com/adap/flower/pull/1697))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:310 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"In addition to `start_server` and using the raw Driver API, there is a " -"new `start_driver` function that allows for running `start_server` " -"scripts as a Flower driver with only a single-line code change. Check out" -" the `mt-pytorch` code example to see a working example using " -"`start_driver`." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:312 -msgid "" -"**Add parameter aggregation to** `mt-pytorch` **code example** " -"([#1785](https://github.com/adap/flower/pull/1785))" +#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:2 +msgid "FedAdam" msgstr "" -#: ../../source/ref-changelog.md:314 -msgid "" -"The `mt-pytorch` example shows how to aggregate parameters when writing a" -" driver script. The included `driver.py` and `server.py` have been " -"aligned to demonstrate both the low-level way and the high-level way of " -"building server-side logic." +#: flwr.server.strategy.fedadam.FedAdam:33 +#: flwr.server.strategy.fedyogi.FedYogi:36 of +msgid "Momentum parameter. Defaults to 0.9." msgstr "" -#: ../../source/ref-changelog.md:316 -msgid "" -"**Migrate experimental REST API to Starlette** " -"([2171](https://github.com/adap/flower/pull/2171))" +#: flwr.server.strategy.fedadam.FedAdam:35 +#: flwr.server.strategy.fedyogi.FedYogi:38 of +msgid "Second moment parameter. Defaults to 0.99." msgstr "" -#: ../../source/ref-changelog.md:318 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The (experimental) REST API used to be implemented in " -"[FastAPI](https://fastapi.tiangolo.com/), but it has now been migrated to" -" use [Starlette](https://www.starlette.io/) directly." +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:320 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Please note: The REST request-response API is still experimental and will" -" likely change significantly over time." +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:322 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce experimental gRPC request-response API** " -"([#1867](https://github.com/adap/flower/pull/1867), " -"[#1901](https://github.com/adap/flower/pull/1901))" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:324 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"In addition to the existing gRPC API (based on bidirectional streaming) " -"and the experimental REST API, there is now a new gRPC API that uses a " -"request-response model to communicate with client nodes." +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:326 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Please note: The gRPC request-response API is still experimental and will" -" likely change significantly over time." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:328 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Replace the experimental** `start_client(rest=True)` **with the new** " -"`start_client(transport=\"rest\")` " -"([#1880](https://github.com/adap/flower/pull/1880))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:330 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The (experimental) `start_client` argument `rest` was deprecated in " -"favour of a new argument `transport`. `start_client(transport=\"rest\")` " -"will yield the same behaviour as `start_client(rest=True)` did before. " -"All code should migrate to the new argument `transport`. The deprecated " -"argument `rest` will be removed in a future release." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:332 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add a new gRPC option** " -"([#2197](https://github.com/adap/flower/pull/2197))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:2 +msgid "FedAvg" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg:3 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:3 of +msgid "Implementation based on https://arxiv.org/abs/1602.05629" msgstr "" -#: ../../source/ref-changelog.md:334 +#: flwr.server.strategy.fedavg.FedAvg:5 flwr.server.strategy.fedprox.FedProx:37 +#: of msgid "" -"We now start a gRPC server with the `grpc.keepalive_permit_without_calls`" -" option set to 0 by default. This prevents the clients from sending " -"keepalive pings when there is no outstanding stream." +"Fraction of clients used during training. In case `min_fit_clients` is " +"larger than `fraction_fit * available_clients`, `min_fit_clients` will " +"still be sampled. Defaults to 1.0." msgstr "" -#: ../../source/ref-changelog.md:336 +#: flwr.server.strategy.fedavg.FedAvg:9 flwr.server.strategy.fedprox.FedProx:41 +#: of msgid "" -"**Improve example notebooks** " -"([#2005](https://github.com/adap/flower/pull/2005))" +"Fraction of clients used during validation. In case " +"`min_evaluate_clients` is larger than `fraction_evaluate * " +"available_clients`, `min_evaluate_clients` will still be sampled. " +"Defaults to 1.0." msgstr "" -#: ../../source/ref-changelog.md:338 -msgid "There's a new 30min Federated Learning PyTorch tutorial!" +#: flwr.server.strategy.fedavg.FedAvg:33 of +msgid "Enable (True) or disable (False) in-place aggregation of model updates." msgstr "" -#: ../../source/ref-changelog.md:340 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Example updates** ([#1772](https://github.com/adap/flower/pull/1772), " -"[#1873](https://github.com/adap/flower/pull/1873), " -"[#1981](https://github.com/adap/flower/pull/1981), " -"[#1988](https://github.com/adap/flower/pull/1988), " -"[#1984](https://github.com/adap/flower/pull/1984), " -"[#1982](https://github.com/adap/flower/pull/1982), " -"[#2112](https://github.com/adap/flower/pull/2112), " -"[#2144](https://github.com/adap/flower/pull/2144), " -"[#2174](https://github.com/adap/flower/pull/2174), " -"[#2225](https://github.com/adap/flower/pull/2225), " -"[#2183](https://github.com/adap/flower/pull/2183))" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:342 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Many examples have received significant updates, including simplified " -"advanced-tensorflow and advanced-pytorch examples, improved macOS " -"compatibility of TensorFlow examples, and code examples for simulation. A" -" major upgrade is that all code examples now have a `requirements.txt` " -"(in addition to `pyproject.toml`)." +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:344 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**General improvements** " -"([#1872](https://github.com/adap/flower/pull/1872), " -"[#1866](https://github.com/adap/flower/pull/1866), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1837](https://github.com/adap/flower/pull/1837), " -"[#1477](https://github.com/adap/flower/pull/1477), " -"[#2171](https://github.com/adap/flower/pull/2171))" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:352 -msgid "v1.4.0 (2023-04-21)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:358 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " -"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " -"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " -"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " -"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:362 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce support for XGBoost (**`FedXgbNnAvg` **strategy and " -"example)** ([#1694](https://github.com/adap/flower/pull/1694), " -"[#1709](https://github.com/adap/flower/pull/1709), " -"[#1715](https://github.com/adap/flower/pull/1715), " -"[#1717](https://github.com/adap/flower/pull/1717), " -"[#1763](https://github.com/adap/flower/pull/1763), " -"[#1795](https://github.com/adap/flower/pull/1795))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:364 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"XGBoost is a tree-based ensemble machine learning algorithm that uses " -"gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg`" -" " -"[strategy](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," -" and a [code example](https://github.com/adap/flower/tree/main/examples" -"/xgboost-quickstart) that demonstrates the usage of this new strategy in " -"an XGBoost project." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:366 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce iOS SDK (preview)** " -"([#1621](https://github.com/adap/flower/pull/1621), " -"[#1764](https://github.com/adap/flower/pull/1764))" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:368 -msgid "" -"This is a major update for anyone wanting to implement Federated Learning" -" on iOS mobile devices. We now have a swift iOS SDK present under " -"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" -" that will facilitate greatly the app creating process. To showcase its " -"use, the [iOS " -"example](https://github.com/adap/flower/tree/main/examples/ios) has also " -"been updated!" +#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:2 +msgid "FedAvgAndroid" msgstr "" -#: ../../source/ref-changelog.md:370 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"**Introduce new \"What is Federated Learning?\" tutorial** " -"([#1657](https://github.com/adap/flower/pull/1657), " -"[#1721](https://github.com/adap/flower/pull/1721))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:372 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"A new [entry-level tutorial](https://flower.ai/docs/framework/tutorial-" -"what-is-federated-learning.html) in our documentation explains the basics" -" of Fedetated Learning. It enables anyone who's unfamiliar with Federated" -" Learning to start their journey with Flower. Forward it to anyone who's " -"interested in Federated Learning!" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:374 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"**Introduce new Flower Baseline: FedProx MNIST** " -"([#1513](https://github.com/adap/flower/pull/1513), " -"[#1680](https://github.com/adap/flower/pull/1680), " -"[#1681](https://github.com/adap/flower/pull/1681), " -"[#1679](https://github.com/adap/flower/pull/1679))" +":py:obj:`bytes_to_ndarray " +"`\\ \\(tensor\\)" msgstr "" -#: ../../source/ref-changelog.md:376 -msgid "" -"This new baseline replicates the MNIST+CNN task from the paper [Federated" -" Optimization in Heterogeneous Networks (Li et al., " -"2018)](https://arxiv.org/abs/1812.06127). It uses the `FedProx` strategy," -" which aims at making convergence more robust in heterogeneous settings." +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.bytes_to_ndarray:1 of +msgid "Deserialize NumPy array from bytes." msgstr "" -#: ../../source/ref-changelog.md:378 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"**Introduce new Flower Baseline: FedAvg FEMNIST** " -"([#1655](https://github.com/adap/flower/pull/1655))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:380 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"This new baseline replicates an experiment evaluating the performance of " -"the FedAvg algorithm on the FEMNIST dataset from the paper [LEAF: A " -"Benchmark for Federated Settings (Caldas et al., " -"2018)](https://arxiv.org/abs/1812.01097)." +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:382 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"**Introduce (experimental) REST API** " -"([#1594](https://github.com/adap/flower/pull/1594), " -"[#1690](https://github.com/adap/flower/pull/1690), " -"[#1695](https://github.com/adap/flower/pull/1695), " -"[#1712](https://github.com/adap/flower/pull/1712), " -"[#1802](https://github.com/adap/flower/pull/1802), " -"[#1770](https://github.com/adap/flower/pull/1770), " -"[#1733](https://github.com/adap/flower/pull/1733))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:384 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"A new REST API has been introduced as an alternative to the gRPC-based " -"communication stack. In this initial version, the REST API only supports " -"anonymous clients." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:386 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"Please note: The REST API is still experimental and will likely change " -"significantly over time." +":py:obj:`ndarray_to_bytes " +"`\\ \\(ndarray\\)" msgstr "" -#: ../../source/ref-changelog.md:388 -msgid "" -"**Improve the (experimental) Driver API** " -"([#1663](https://github.com/adap/flower/pull/1663), " -"[#1666](https://github.com/adap/flower/pull/1666), " -"[#1667](https://github.com/adap/flower/pull/1667), " -"[#1664](https://github.com/adap/flower/pull/1664), " -"[#1675](https://github.com/adap/flower/pull/1675), " -"[#1676](https://github.com/adap/flower/pull/1676), " -"[#1693](https://github.com/adap/flower/pull/1693), " -"[#1662](https://github.com/adap/flower/pull/1662), " -"[#1794](https://github.com/adap/flower/pull/1794))" +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarray_to_bytes:1 of +msgid "Serialize NumPy array to bytes." msgstr "" -#: ../../source/ref-changelog.md:390 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"The Driver API is still an experimental feature, but this release " -"introduces some major upgrades. One of the main improvements is the " -"introduction of an SQLite database to store server state on disk (instead" -" of in-memory). Another improvement is that tasks (instructions or " -"results) that have been delivered will now be deleted. This greatly " -"improves the memory efficiency of a long-running Flower server." +":py:obj:`ndarrays_to_parameters " +"`\\ " +"\\(ndarrays\\)" msgstr "" -#: ../../source/ref-changelog.md:392 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"**Fix spilling issues related to Ray during simulations** " -"([#1698](https://github.com/adap/flower/pull/1698))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:394 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"While running long simulations, `ray` was sometimes spilling huge amounts" -" of data that would make the training unable to continue. This is now " -"fixed! 🎉" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:396 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"**Add new example using** `TabNet` **and Flower** " -"([#1725](https://github.com/adap/flower/pull/1725))" +":py:obj:`parameters_to_ndarrays " +"`\\ " +"\\(parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:398 -msgid "" -"TabNet is a powerful and flexible framework for training machine learning" -" models on tabular data. We now have a federated example using Flower: " -"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples" -"/quickstart-tabnet)." +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.parameters_to_ndarrays:1 +#: of +msgid "Convert parameters object to NumPy weights." msgstr "" -#: ../../source/ref-changelog.md:400 -msgid "" -"**Add new how-to guide for monitoring simulations** " -"([#1649](https://github.com/adap/flower/pull/1649))" +#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:2 +msgid "FedAvgM" msgstr "" -#: ../../source/ref-changelog.md:402 -msgid "" -"We now have a documentation guide to help users monitor their performance" -" during simulations." +#: flwr.server.strategy.fedavgm.FedAvgM:3 of +msgid "Implementation based on https://arxiv.org/abs/1909.06335" msgstr "" -#: ../../source/ref-changelog.md:404 +#: flwr.server.strategy.fedavgm.FedAvgM:25 of msgid "" -"**Add training metrics to** `History` **object during simulations** " -"([#1696](https://github.com/adap/flower/pull/1696))" +"Server-side learning rate used in server-side optimization. Defaults to " +"1.0." msgstr "" -#: ../../source/ref-changelog.md:406 -msgid "" -"The `fit_metrics_aggregation_fn` can be used to aggregate training " -"metrics, but previous releases did not save the results in the `History` " -"object. This is now the case!" +#: flwr.server.strategy.fedavgm.FedAvgM:28 of +msgid "Server-side momentum factor used for FedAvgM. Defaults to 0.0." msgstr "" -#: ../../source/ref-changelog.md:408 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**General improvements** " -"([#1659](https://github.com/adap/flower/pull/1659), " -"[#1646](https://github.com/adap/flower/pull/1646), " -"[#1647](https://github.com/adap/flower/pull/1647), " -"[#1471](https://github.com/adap/flower/pull/1471), " -"[#1648](https://github.com/adap/flower/pull/1648), " -"[#1651](https://github.com/adap/flower/pull/1651), " -"[#1652](https://github.com/adap/flower/pull/1652), " -"[#1653](https://github.com/adap/flower/pull/1653), " -"[#1659](https://github.com/adap/flower/pull/1659), " -"[#1665](https://github.com/adap/flower/pull/1665), " -"[#1670](https://github.com/adap/flower/pull/1670), " -"[#1672](https://github.com/adap/flower/pull/1672), " -"[#1677](https://github.com/adap/flower/pull/1677), " -"[#1684](https://github.com/adap/flower/pull/1684), " -"[#1683](https://github.com/adap/flower/pull/1683), " -"[#1686](https://github.com/adap/flower/pull/1686), " -"[#1682](https://github.com/adap/flower/pull/1682), " -"[#1685](https://github.com/adap/flower/pull/1685), " -"[#1692](https://github.com/adap/flower/pull/1692), " -"[#1705](https://github.com/adap/flower/pull/1705), " -"[#1708](https://github.com/adap/flower/pull/1708), " -"[#1711](https://github.com/adap/flower/pull/1711), " -"[#1713](https://github.com/adap/flower/pull/1713), " -"[#1714](https://github.com/adap/flower/pull/1714), " -"[#1718](https://github.com/adap/flower/pull/1718), " -"[#1716](https://github.com/adap/flower/pull/1716), " -"[#1723](https://github.com/adap/flower/pull/1723), " -"[#1735](https://github.com/adap/flower/pull/1735), " -"[#1678](https://github.com/adap/flower/pull/1678), " -"[#1750](https://github.com/adap/flower/pull/1750), " -"[#1753](https://github.com/adap/flower/pull/1753), " -"[#1736](https://github.com/adap/flower/pull/1736), " -"[#1766](https://github.com/adap/flower/pull/1766), " -"[#1760](https://github.com/adap/flower/pull/1760), " -"[#1775](https://github.com/adap/flower/pull/1775), " -"[#1776](https://github.com/adap/flower/pull/1776), " -"[#1777](https://github.com/adap/flower/pull/1777), " -"[#1779](https://github.com/adap/flower/pull/1779), " -"[#1784](https://github.com/adap/flower/pull/1784), " -"[#1773](https://github.com/adap/flower/pull/1773), " -"[#1755](https://github.com/adap/flower/pull/1755), " -"[#1789](https://github.com/adap/flower/pull/1789), " -"[#1788](https://github.com/adap/flower/pull/1788), " -"[#1798](https://github.com/adap/flower/pull/1798), " -"[#1799](https://github.com/adap/flower/pull/1799), " -"[#1739](https://github.com/adap/flower/pull/1739), " -"[#1800](https://github.com/adap/flower/pull/1800), " -"[#1804](https://github.com/adap/flower/pull/1804), " -"[#1805](https://github.com/adap/flower/pull/1805))" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:416 -msgid "v1.3.0 (2023-02-06)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:422 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:426 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add support for** `workload_id` **and** `group_id` **in Driver API** " -"([#1595](https://github.com/adap/flower/pull/1595))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:428 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The (experimental) Driver API now supports a `workload_id` that can be " -"used to identify which workload a task belongs to. It also supports a new" -" `group_id` that can be used, for example, to indicate the current " -"training round. Both the `workload_id` and `group_id` enable client nodes" -" to decide whether they want to handle a task or not." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:430 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Make Driver API and Fleet API address configurable** " -"([#1637](https://github.com/adap/flower/pull/1637))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:432 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The (experimental) long-running Flower server (Driver API and Fleet API) " -"can now configure the server address of both Driver API (via `--driver-" -"api-address`) and Fleet API (via `--fleet-api-address`) when starting:" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:434 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " -"\"0.0.0.0:8086\"`" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:436 -msgid "Both IPv4 and IPv6 addresses are supported." +#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:2 +msgid "FedMedian" msgstr "" -#: ../../source/ref-changelog.md:438 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add new example of Federated Learning using fastai and Flower** " -"([#1598](https://github.com/adap/flower/pull/1598))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:440 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"A new code example (`quickstart-fastai`) demonstrates federated learning " -"with [fastai](https://www.fast.ai/) and Flower. You can find it here: " -"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples" -"/quickstart-fastai)." +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:442 -msgid "" -"**Make Android example compatible with** `flwr >= 1.0.0` **and the latest" -" versions of Android** " -"([#1603](https://github.com/adap/flower/pull/1603))" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedmedian.FedMedian.aggregate_fit:1 of +msgid "Aggregate fit results using median." msgstr "" -#: ../../source/ref-changelog.md:444 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The Android code example has received a substantial update: the project " -"is compatible with Flower 1.0 (and later), the UI received a full " -"refresh, and the project is updated to be compatible with newer Android " -"tooling." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:446 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add new `FedProx` strategy** " -"([#1619](https://github.com/adap/flower/pull/1619))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:448 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"This " -"[strategy](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" -" is almost identical to " -"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," -" but helps users replicate what is described in this " -"[paper](https://arxiv.org/abs/1812.06127). It essentially adds a " -"parameter called `proximal_mu` to regularize the local models with " -"respect to the global models." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:450 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add new metrics to telemetry events** " -"([#1640](https://github.com/adap/flower/pull/1640))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:452 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"An updated event structure allows, for example, the clustering of events " -"within the same workload." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:454 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add new custom strategy tutorial section** " -"[#1623](https://github.com/adap/flower/pull/1623)" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:456 -msgid "" -"The Flower tutorial now has a new section that covers implementing a " -"custom strategy from scratch: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" +#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:2 +msgid "FedOpt" msgstr "" -#: ../../source/ref-changelog.md:458 -msgid "" -"**Add new custom serialization tutorial section** " -"([#1622](https://github.com/adap/flower/pull/1622))" +#: flwr.server.strategy.fedopt.FedOpt:33 of +msgid "Momentum parameter. Defaults to 0.0." msgstr "" -#: ../../source/ref-changelog.md:460 -msgid "" -"The Flower tutorial now has a new section that covers custom " -"serialization: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -"/tutorial-customize-the-client-pytorch.ipynb)" +#: flwr.server.strategy.fedopt.FedOpt:35 of +msgid "Second moment parameter. Defaults to 0.0." msgstr "" -#: ../../source/ref-changelog.md:462 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**General improvements** " -"([#1638](https://github.com/adap/flower/pull/1638), " -"[#1634](https://github.com/adap/flower/pull/1634), " -"[#1636](https://github.com/adap/flower/pull/1636), " -"[#1635](https://github.com/adap/flower/pull/1635), " -"[#1633](https://github.com/adap/flower/pull/1633), " -"[#1632](https://github.com/adap/flower/pull/1632), " -"[#1631](https://github.com/adap/flower/pull/1631), " -"[#1630](https://github.com/adap/flower/pull/1630), " -"[#1627](https://github.com/adap/flower/pull/1627), " -"[#1593](https://github.com/adap/flower/pull/1593), " -"[#1616](https://github.com/adap/flower/pull/1616), " -"[#1615](https://github.com/adap/flower/pull/1615), " -"[#1607](https://github.com/adap/flower/pull/1607), " -"[#1609](https://github.com/adap/flower/pull/1609), " -"[#1608](https://github.com/adap/flower/pull/1608), " -"[#1603](https://github.com/adap/flower/pull/1603), " -"[#1590](https://github.com/adap/flower/pull/1590), " -"[#1580](https://github.com/adap/flower/pull/1580), " -"[#1599](https://github.com/adap/flower/pull/1599), " -"[#1600](https://github.com/adap/flower/pull/1600), " -"[#1601](https://github.com/adap/flower/pull/1601), " -"[#1597](https://github.com/adap/flower/pull/1597), " -"[#1595](https://github.com/adap/flower/pull/1595), " -"[#1591](https://github.com/adap/flower/pull/1591), " -"[#1588](https://github.com/adap/flower/pull/1588), " -"[#1589](https://github.com/adap/flower/pull/1589), " -"[#1587](https://github.com/adap/flower/pull/1587), " -"[#1573](https://github.com/adap/flower/pull/1573), " -"[#1581](https://github.com/adap/flower/pull/1581), " -"[#1578](https://github.com/adap/flower/pull/1578), " -"[#1574](https://github.com/adap/flower/pull/1574), " -"[#1572](https://github.com/adap/flower/pull/1572), " -"[#1586](https://github.com/adap/flower/pull/1586))" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:466 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Updated documentation** " -"([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:468 ../../source/ref-changelog.md:535 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"As usual, the documentation has improved quite a bit. It is another step " -"in our effort to make the Flower documentation the best documentation of " -"any project. Stay tuned and as always, feel free to provide feedback!" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:474 -msgid "v1.2.0 (2023-01-13)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:480 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." -" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:484 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce new Flower Baseline: FedAvg MNIST** " -"([#1497](https://github.com/adap/flower/pull/1497), " -"[#1552](https://github.com/adap/flower/pull/1552))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:486 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Over the coming weeks, we will be releasing a number of new reference " -"implementations useful especially to FL newcomers. They will typically " -"revisit well known papers from the literature, and be suitable for " -"integration in your own application or for experimentation, in order to " -"deepen your knowledge of FL in general. Today's release is the first in " -"this series. [Read more.](https://flower.ai/blog/2023-01-12-fl-starter-" -"pack-fedavg-mnist-cnn/)" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:488 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Improve GPU support in simulations** " -"([#1555](https://github.com/adap/flower/pull/1555))" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:490 -msgid "" -"The Ray-based Virtual Client Engine (`start_simulation`) has been updated" -" to improve GPU support. The update includes some of the hard-earned " -"lessons from scaling simulations in GPU cluster environments. New " -"defaults make running GPU-based simulations substantially more robust." +#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:2 +msgid "FedProx" msgstr "" -#: ../../source/ref-changelog.md:492 -msgid "" -"**Improve GPU support in Jupyter Notebook tutorials** " -"([#1527](https://github.com/adap/flower/pull/1527), " -"[#1558](https://github.com/adap/flower/pull/1558))" +#: flwr.server.strategy.fedprox.FedProx:3 of +msgid "Implementation based on https://arxiv.org/abs/1812.06127" msgstr "" -#: ../../source/ref-changelog.md:494 +#: flwr.server.strategy.fedprox.FedProx:5 of msgid "" -"Some users reported that Jupyter Notebooks have not always been easy to " -"use on GPU instances. We listened and made improvements to all of our " -"Jupyter notebooks! Check out the updated notebooks here:" +"The strategy in itself will not be different than FedAvg, the client " +"needs to be adjusted. A proximal term needs to be added to the loss " +"function during the training:" msgstr "" -#: ../../source/ref-changelog.md:496 +#: flwr.server.strategy.fedprox.FedProx:9 of msgid "" -"[An Introduction to Federated Learning](https://flower.ai/docs/framework" -"/tutorial-get-started-with-flower-pytorch.html)" +"\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" +"\n" msgstr "" -#: ../../source/ref-changelog.md:497 +#: flwr.server.strategy.fedprox.FedProx:12 of msgid "" -"[Strategies in Federated Learning](https://flower.ai/docs/framework" -"/tutorial-use-a-federated-learning-strategy-pytorch.html)" +"Where $w^t$ are the global parameters and $w$ are the local weights the " +"function will be optimized with." msgstr "" -#: ../../source/ref-changelog.md:498 -msgid "" -"[Building a Strategy](https://flower.ai/docs/framework/tutorial-build-a" -"-strategy-from-scratch-pytorch.html)" +#: flwr.server.strategy.fedprox.FedProx:15 of +msgid "In PyTorch, for example, the loss would go from:" msgstr "" -#: ../../source/ref-changelog.md:499 -msgid "" -"[Client and NumPyClient](https://flower.ai/docs/framework/tutorial-" -"customize-the-client-pytorch.html)" +#: flwr.server.strategy.fedprox.FedProx:21 of +msgid "To:" msgstr "" -#: ../../source/ref-changelog.md:501 +#: flwr.server.strategy.fedprox.FedProx:30 of msgid "" -"**Introduce optional telemetry** " -"([#1533](https://github.com/adap/flower/pull/1533), " -"[#1544](https://github.com/adap/flower/pull/1544), " -"[#1584](https://github.com/adap/flower/pull/1584))" +"With `global_params` being a copy of the parameters before the training " +"takes place." msgstr "" -#: ../../source/ref-changelog.md:503 +#: flwr.server.strategy.fedprox.FedProx:65 of msgid "" -"After a [request for " -"feedback](https://github.com/adap/flower/issues/1534) from the community," -" the Flower open-source project introduces optional collection of " -"*anonymous* usage metrics to make well-informed decisions to improve " -"Flower. Doing this enables the Flower team to understand how Flower is " -"used and what challenges users might face." +"The weight of the proximal term used in the optimization. 0.0 makes this " +"strategy equivalent to FedAvg, and the higher the coefficient, the more " +"regularization will be used (that is, the client parameters will need to " +"be closer to the server parameters during training)." msgstr "" -#: ../../source/ref-changelog.md:505 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Flower is a friendly framework for collaborative AI and data science.**" -" Staying true to this statement, Flower makes it easy to disable " -"telemetry for users who do not want to share anonymous usage metrics. " -"[Read more.](https://flower.ai/docs/telemetry.html)." +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:507 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce (experimental) Driver API** " -"([#1520](https://github.com/adap/flower/pull/1520), " -"[#1525](https://github.com/adap/flower/pull/1525), " -"[#1545](https://github.com/adap/flower/pull/1545), " -"[#1546](https://github.com/adap/flower/pull/1546), " -"[#1550](https://github.com/adap/flower/pull/1550), " -"[#1551](https://github.com/adap/flower/pull/1551), " -"[#1567](https://github.com/adap/flower/pull/1567))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:509 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Flower now has a new (experimental) Driver API which will enable fully " -"programmable, async, and multi-tenant Federated Learning and Federated " -"Analytics applications. Phew, that's a lot! Going forward, the Driver API" -" will be the abstraction that many upcoming features will be built on - " -"and you can start building those things now, too." +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:511 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The Driver API also enables a new execution mode in which the server runs" -" indefinitely. Multiple individual workloads can run concurrently and " -"start and stop their execution independent of the server. This is " -"especially useful for users who want to deploy Flower in production." +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:513 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"To learn more, check out the `mt-pytorch` code example. We look forward " -"to you feedback!" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:515 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Please note: *The Driver API is still experimental and will likely change" -" significantly over time.*" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:517 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add new Federated Analytics with Pandas example** " -"([#1469](https://github.com/adap/flower/pull/1469), " -"[#1535](https://github.com/adap/flower/pull/1535))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:519 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"A new code example (`quickstart-pandas`) demonstrates federated analytics" -" with Pandas and Flower. You can find it here: [quickstart-" -"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" -"pandas)." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:521 -msgid "" -"**Add new strategies: Krum and MultiKrum** " -"([#1481](https://github.com/adap/flower/pull/1481))" +#: flwr.server.strategy.fedprox.FedProx.configure_fit:3 of +msgid "Sends the proximal factor mu to the clients" msgstr "" -#: ../../source/ref-changelog.md:523 -msgid "" -"Edoardo, a computer science student at the Sapienza University of Rome, " -"contributed a new `Krum` strategy that enables users to easily use Krum " -"and MultiKrum in their workloads." +#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:2 +msgid "FedTrimmedAvg" msgstr "" -#: ../../source/ref-changelog.md:525 -msgid "" -"**Update C++ example to be compatible with Flower v1.2.0** " -"([#1495](https://github.com/adap/flower/pull/1495))" +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:3 of +msgid "Implemented based on: https://arxiv.org/abs/1803.01498" +msgstr "" + +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:25 of +msgid "Fraction to cut off of both tails of the distribution. Defaults to 0.2." msgstr "" -#: ../../source/ref-changelog.md:527 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The C++ code example has received a substantial update to make it " -"compatible with the latest version of Flower." +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:529 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**General improvements** " -"([#1491](https://github.com/adap/flower/pull/1491), " -"[#1504](https://github.com/adap/flower/pull/1504), " -"[#1506](https://github.com/adap/flower/pull/1506), " -"[#1514](https://github.com/adap/flower/pull/1514), " -"[#1522](https://github.com/adap/flower/pull/1522), " -"[#1523](https://github.com/adap/flower/pull/1523), " -"[#1526](https://github.com/adap/flower/pull/1526), " -"[#1528](https://github.com/adap/flower/pull/1528), " -"[#1547](https://github.com/adap/flower/pull/1547), " -"[#1549](https://github.com/adap/flower/pull/1549), " -"[#1560](https://github.com/adap/flower/pull/1560), " -"[#1564](https://github.com/adap/flower/pull/1564), " -"[#1566](https://github.com/adap/flower/pull/1566))" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:533 -msgid "" -"**Updated documentation** " -"([#1494](https://github.com/adap/flower/pull/1494), " -"[#1496](https://github.com/adap/flower/pull/1496), " -"[#1500](https://github.com/adap/flower/pull/1500), " -"[#1503](https://github.com/adap/flower/pull/1503), " -"[#1505](https://github.com/adap/flower/pull/1505), " -"[#1524](https://github.com/adap/flower/pull/1524), " -"[#1518](https://github.com/adap/flower/pull/1518), " -"[#1519](https://github.com/adap/flower/pull/1519), " -"[#1515](https://github.com/adap/flower/pull/1515))" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.aggregate_fit:1 of +msgid "Aggregate fit results using trimmed average." msgstr "" -#: ../../source/ref-changelog.md:537 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"One highlight is the new [first time contributor " -"guide](https://flower.ai/docs/first-time-contributors.html): if you've " -"never contributed on GitHub before, this is the perfect place to start!" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:543 -msgid "v1.1.0 (2022-10-31)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:547 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"We would like to give our **special thanks** to all the contributors who " -"made the new version of Flower possible (in `git shortlog` order):" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:549 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " -"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " -"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " -"`danielnugraha`, `edogab33`" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:553 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce Differential Privacy wrappers (preview)** " -"([#1357](https://github.com/adap/flower/pull/1357), " -"[#1460](https://github.com/adap/flower/pull/1460))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:555 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The first (experimental) preview of pluggable Differential Privacy " -"wrappers enables easy configuration and usage of differential privacy " -"(DP). The pluggable DP wrappers enable framework-agnostic **and** " -"strategy-agnostic usage of both client-side DP and server-side DP. Head " -"over to the Flower docs, a new explainer goes into more detail." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:557 -msgid "" -"**New iOS CoreML code example** " -"([#1289](https://github.com/adap/flower/pull/1289))" +#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:2 +msgid "FedXgbBagging" msgstr "" -#: ../../source/ref-changelog.md:559 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"Flower goes iOS! A massive new code example shows how Flower clients can " -"be built for iOS. The code example contains both Flower iOS SDK " -"components that can be used for many tasks, and one task example running " -"on CoreML." +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:561 -msgid "" -"**New FedMedian strategy** " -"([#1461](https://github.com/adap/flower/pull/1461))" +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "Aggregate evaluation metrics using average." msgstr "" -#: ../../source/ref-changelog.md:563 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"The new `FedMedian` strategy implements Federated Median (FedMedian) by " -"[Yin et al., 2018](https://arxiv.org/pdf/1803.01498v1.pdf)." +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:565 -msgid "" -"**Log** `Client` **exceptions in Virtual Client Engine** " -"([#1493](https://github.com/adap/flower/pull/1493))" +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_fit:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_fit:1 of +msgid "Aggregate fit results using bagging." msgstr "" -#: ../../source/ref-changelog.md:567 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"All `Client` exceptions happening in the VCE are now logged by default " -"and not just exposed to the configured `Strategy` (via the `failures` " -"argument)." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:569 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"**Improve Virtual Client Engine internals** " -"([#1401](https://github.com/adap/flower/pull/1401), " -"[#1453](https://github.com/adap/flower/pull/1453))" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:571 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"Some internals of the Virtual Client Engine have been revamped. The VCE " -"now uses Ray 2.0 under the hood, the value type of the `client_resources`" -" dictionary changed to `float` to allow fractions of resources to be " -"allocated." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:573 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " -"Client Engine**" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:575 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"The Virtual Client Engine now has full support for optional `Client` (and" -" `NumPyClient`) methods." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:577 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"**Provide type information to packages using** `flwr` " -"([#1377](https://github.com/adap/flower/pull/1377))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:579 -msgid "" -"The package `flwr` is now bundled with a `py.typed` file indicating that " -"the package is typed. This enables typing support for projects or " -"packages that use `flwr` by enabling them to improve their code using " -"static type checkers like `mypy`." +#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:2 +msgid "FedXgbCyclic" msgstr "" -#: ../../source/ref-changelog.md:581 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"**Updated code example** " -"([#1344](https://github.com/adap/flower/pull/1344), " -"[#1347](https://github.com/adap/flower/pull/1347))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:583 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"The code examples covering scikit-learn and PyTorch Lightning have been " -"updated to work with the latest version of Flower." +":py:obj:`aggregate_fit " +"`\\ \\(server\\_round\\," +" results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:585 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"**Updated documentation** " -"([#1355](https://github.com/adap/flower/pull/1355), " -"[#1558](https://github.com/adap/flower/pull/1558), " -"[#1379](https://github.com/adap/flower/pull/1379), " -"[#1380](https://github.com/adap/flower/pull/1380), " -"[#1381](https://github.com/adap/flower/pull/1381), " -"[#1332](https://github.com/adap/flower/pull/1332), " -"[#1391](https://github.com/adap/flower/pull/1391), " -"[#1403](https://github.com/adap/flower/pull/1403), " -"[#1364](https://github.com/adap/flower/pull/1364), " -"[#1409](https://github.com/adap/flower/pull/1409), " -"[#1419](https://github.com/adap/flower/pull/1419), " -"[#1444](https://github.com/adap/flower/pull/1444), " -"[#1448](https://github.com/adap/flower/pull/1448), " -"[#1417](https://github.com/adap/flower/pull/1417), " -"[#1449](https://github.com/adap/flower/pull/1449), " -"[#1465](https://github.com/adap/flower/pull/1465), " -"[#1467](https://github.com/adap/flower/pull/1467))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:587 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"There have been so many documentation updates that it doesn't even make " -"sense to list them individually." +":py:obj:`configure_fit " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:589 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"**Restructured documentation** " -"([#1387](https://github.com/adap/flower/pull/1387))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:591 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"The documentation has been restructured to make it easier to navigate. " -"This is just the first step in a larger effort to make the Flower " -"documentation the best documentation of any project ever. Stay tuned!" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:593 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"**Open in Colab button** " -"([#1389](https://github.com/adap/flower/pull/1389))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:595 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"The four parts of the Flower Federated Learning Tutorial now come with a " -"new `Open in Colab` button. No need to install anything on your local " -"machine, you can now use and learn about Flower in your browser, it's " -"only a single click away." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:597 -msgid "" -"**Improved tutorial** ([#1468](https://github.com/adap/flower/pull/1468)," -" [#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475))" +#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:2 +msgid "FedXgbNnAvg" msgstr "" -#: ../../source/ref-changelog.md:599 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:5 of msgid "" -"The Flower Federated Learning Tutorial has two brand-new parts covering " -"custom strategies (still WIP) and the distinction between `Client` and " -"`NumPyClient`. The existing parts one and two have also been improved " -"(many small changes and fixes)." +"This strategy is deprecated, but a copy of it is available in Flower " +"Baselines: " +"https://github.com/adap/flower/tree/main/baselines/hfedxgboost." msgstr "" -#: ../../source/ref-changelog.md:605 -msgid "v1.0.0 (2022-07-28)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:607 -msgid "Highlights" -msgstr "" - -#: ../../source/ref-changelog.md:609 -msgid "Stable **Virtual Client Engine** (accessible via `start_simulation`)" -msgstr "" - -#: ../../source/ref-changelog.md:610 -msgid "All `Client`/`NumPyClient` methods are now optional" -msgstr "" - -#: ../../source/ref-changelog.md:611 -msgid "Configurable `get_parameters`" -msgstr "" - -#: ../../source/ref-changelog.md:612 -msgid "" -"Tons of small API cleanups resulting in a more coherent developer " -"experience" -msgstr "" - -#: ../../source/ref-changelog.md:616 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"We would like to give our **special thanks** to all the contributors who " -"made Flower 1.0 possible (in reverse [GitHub " -"Contributors](https://github.com/adap/flower/graphs/contributors) order):" +":py:obj:`aggregate_fit " +"`\\ \\(server\\_round\\, " +"results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:618 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"[@rtaiello](https://github.com/rtaiello), " -"[@g-pichler](https://github.com/g-pichler), [@rob-" -"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" -"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " -"[@nfnt](https://github.com/nfnt), " -"[@tatiana-s](https://github.com/tatiana-s), " -"[@TParcollet](https://github.com/TParcollet), " -"[@vballoli](https://github.com/vballoli), " -"[@negedng](https://github.com/negedng), " -"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " -"[@hei411](https://github.com/hei411), " -"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " -"[@AmitChaulwar](https://github.com/AmitChaulwar), " -"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" -"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " -"[@lbhm](https://github.com/lbhm), " -"[@sishtiaq](https://github.com/sishtiaq), " -"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" -"/Jueun-Park), [@architjen](https://github.com/architjen), " -"[@PratikGarai](https://github.com/PratikGarai), " -"[@mrinaald](https://github.com/mrinaald), " -"[@zliel](https://github.com/zliel), " -"[@MeiruiJiang](https://github.com/MeiruiJiang), " -"[@sancarlim](https://github.com/sancarlim), " -"[@gubertoli](https://github.com/gubertoli), " -"[@Vingt100](https://github.com/Vingt100), " -"[@MakGulati](https://github.com/MakGulati), " -"[@cozek](https://github.com/cozek), " -"[@jafermarq](https://github.com/jafermarq), " -"[@sisco0](https://github.com/sisco0), " -"[@akhilmathurs](https://github.com/akhilmathurs), " -"[@CanTuerk](https://github.com/CanTuerk), " -"[@mariaboerner1987](https://github.com/mariaboerner1987), " -"[@pedropgusmao](https://github.com/pedropgusmao), " -"[@tanertopal](https://github.com/tanertopal), " -"[@danieljanes](https://github.com/danieljanes)." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:622 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**All arguments must be passed as keyword arguments** " -"([#1338](https://github.com/adap/flower/pull/1338))" +":py:obj:`configure_fit " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:624 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Pass all arguments as keyword arguments, positional arguments are not " -"longer supported. Code that uses positional arguments (e.g., " -"`start_client(\"127.0.0.1:8080\", FlowerClient())`) must add the keyword " -"for each positional argument (e.g., " -"`start_client(server_address=\"127.0.0.1:8080\", " -"client=FlowerClient())`)." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:626 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce configuration object** `ServerConfig` **in** `start_server` " -"**and** `start_simulation` " -"([#1317](https://github.com/adap/flower/pull/1317))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:628 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Instead of a config dictionary `{\"num_rounds\": 3, \"round_timeout\": " -"600.0}`, `start_server` and `start_simulation` now expect a configuration" -" object of type `flwr.server.ServerConfig`. `ServerConfig` takes the same" -" arguments that as the previous config dict, but it makes writing type-" -"safe code easier and the default parameters values more transparent." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:630 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Rename built-in strategy parameters for clarity** " -"([#1334](https://github.com/adap/flower/pull/1334))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:632 -msgid "" -"The following built-in strategy parameters were renamed to improve " -"readability and consistency with other API's:" +#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:2 +msgid "FedYogi" msgstr "" -#: ../../source/ref-changelog.md:634 -msgid "`fraction_eval` --> `fraction_evaluate`" +#: flwr.server.strategy.fedyogi.FedYogi:32 of +msgid "Server-side learning rate. Defaults to 1e-2." msgstr "" -#: ../../source/ref-changelog.md:635 -msgid "`min_eval_clients` --> `min_evaluate_clients`" +#: flwr.server.strategy.fedyogi.FedYogi:34 of +msgid "Client-side learning rate. Defaults to 0.0316." msgstr "" -#: ../../source/ref-changelog.md:636 -msgid "`eval_fn` --> `evaluate_fn`" +#: flwr.server.strategy.fedyogi.FedYogi:40 of +msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-3." msgstr "" -#: ../../source/ref-changelog.md:638 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Update default arguments of built-in strategies** " -"([#1278](https://github.com/adap/flower/pull/1278))" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:640 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"All built-in strategies now use `fraction_fit=1.0` and " -"`fraction_evaluate=1.0`, which means they select *all* currently " -"available clients for training and evaluation. Projects that relied on " -"the previous default values can get the previous behaviour by " -"initializing the strategy in the following way:" -msgstr "" - -#: ../../source/ref-changelog.md:642 -msgid "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:644 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add** `server_round` **to** `Strategy.evaluate` " -"([#1334](https://github.com/adap/flower/pull/1334))" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:646 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The `Strategy` method `evaluate` now receives the current round of " -"federated learning/evaluation as the first parameter." +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:648 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add** `server_round` **and** `config` **parameters to** `evaluate_fn` " -"([#1334](https://github.com/adap/flower/pull/1334))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:650 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The `evaluate_fn` passed to built-in strategies like `FedAvg` now takes " -"three parameters: (1) The current round of federated learning/evaluation " -"(`server_round`), (2) the model parameters to evaluate (`parameters`), " -"and (3) a config dictionary (`config`)." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:652 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Rename** `rnd` **to** `server_round` " -"([#1321](https://github.com/adap/flower/pull/1321))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:654 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Several Flower methods and functions (`evaluate_fn`, `configure_fit`, " -"`aggregate_fit`, `configure_evaluate`, `aggregate_evaluate`) receive the " -"current round of federated learning/evaluation as their first parameter. " -"To improve reaability and avoid confusion with *random*, this parameter " -"has been renamed from `rnd` to `server_round`." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:656 -msgid "" -"**Move** `flwr.dataset` **to** `flwr_baselines` " -"([#1273](https://github.com/adap/flower/pull/1273))" +#: ../../source/ref-api/flwr.server.strategy.Krum.rst:2 +msgid "Krum" msgstr "" -#: ../../source/ref-changelog.md:658 -msgid "The experimental package `flwr.dataset` was migrated to Flower Baselines." +#: flwr.server.strategy.krum.Krum:3 of +msgid "Implementation based on https://arxiv.org/abs/1703.02757" msgstr "" -#: ../../source/ref-changelog.md:660 +#: flwr.server.strategy.krum.Krum:17 of msgid "" -"**Remove experimental strategies** " -"([#1280](https://github.com/adap/flower/pull/1280))" +"Number of clients to keep before averaging (MultiKrum). Defaults to 0, in" +" that case classical Krum is applied." msgstr "" -#: ../../source/ref-changelog.md:662 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Remove unmaintained experimental strategies (`FastAndSlow`, `FedFSv0`, " -"`FedFSv1`)." +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:664 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Rename** `Weights` **to** `NDArrays` " -"([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:666 -msgid "" -"`flwr.common.Weights` was renamed to `flwr.common.NDArrays` to better " -"capture what this type is all about." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.krum.Krum.aggregate_fit:1 of +msgid "Aggregate fit results using Krum." msgstr "" -#: ../../source/ref-changelog.md:668 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Remove antiquated** `force_final_distributed_eval` **from** " -"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:670 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The `start_server` parameter `force_final_distributed_eval` has long been" -" a historic artefact, in this release it is finally gone for good." +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:672 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Make** `get_parameters` **configurable** " -"([#1242](https://github.com/adap/flower/pull/1242))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:674 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The `get_parameters` method now accepts a configuration dictionary, just " -"like `get_properties`, `fit`, and `evaluate`." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:676 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Replace** `num_rounds` **in** `start_simulation` **with new** `config` " -"**parameter** ([#1281](https://github.com/adap/flower/pull/1281))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:678 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The `start_simulation` function now accepts a configuration dictionary " -"`config` instead of the `num_rounds` integer. This improves the " -"consistency between `start_simulation` and `start_server` and makes " -"transitioning between the two easier." +":py:obj:`num_fit_clients `\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:682 -msgid "" -"**Support Python 3.10** " -"([#1320](https://github.com/adap/flower/pull/1320))" +#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:2 +msgid "QFedAvg" msgstr "" -#: ../../source/ref-changelog.md:684 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"The previous Flower release introduced experimental support for Python " -"3.10, this release declares Python 3.10 support as stable." +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:686 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"**Make all** `Client` **and** `NumPyClient` **methods optional** " -"([#1260](https://github.com/adap/flower/pull/1260), " -"[#1277](https://github.com/adap/flower/pull/1277))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:688 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"The `Client`/`NumPyClient` methods `get_properties`, `get_parameters`, " -"`fit`, and `evaluate` are all optional. This enables writing clients that" -" implement, for example, only `fit`, but no other method. No need to " -"implement `evaluate` when using centralized evaluation!" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:690 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"**Enable passing a** `Server` **instance to** `start_simulation` " -"([#1281](https://github.com/adap/flower/pull/1281))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:692 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"Similar to `start_server`, `start_simulation` now accepts a full `Server`" -" instance. This enables users to heavily customize the execution of " -"eperiments and opens the door to running, for example, async FL using the" -" Virtual Client Engine." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:694 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"**Update code examples** " -"([#1291](https://github.com/adap/flower/pull/1291), " -"[#1286](https://github.com/adap/flower/pull/1286), " -"[#1282](https://github.com/adap/flower/pull/1282))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:696 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"Many code examples received small or even large maintenance updates, " -"among them are" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:698 -msgid "`scikit-learn`" +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:699 -msgid "`simulation_pytorch`" +#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:2 +msgid "Strategy" msgstr "" -#: ../../source/ref-changelog.md:700 -msgid "`quickstart_pytorch`" +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:701 -msgid "`quickstart_simulation`" +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +msgid "Aggregate evaluation results." msgstr "" -#: ../../source/ref-changelog.md:702 -msgid "`quickstart_tensorflow`" +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:703 -msgid "`advanced_tensorflow`" +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:1 of +msgid "Aggregate training results." msgstr "" -#: ../../source/ref-changelog.md:705 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"**Remove the obsolete simulation example** " -"([#1328](https://github.com/adap/flower/pull/1328))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:707 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"Removes the obsolete `simulation` example and renames " -"`quickstart_simulation` to `simulation_tensorflow` so it fits withs the " -"naming of `simulation_pytorch`" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:709 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"**Update documentation** " -"([#1223](https://github.com/adap/flower/pull/1223), " -"[#1209](https://github.com/adap/flower/pull/1209), " -"[#1251](https://github.com/adap/flower/pull/1251), " -"[#1257](https://github.com/adap/flower/pull/1257), " -"[#1267](https://github.com/adap/flower/pull/1267), " -"[#1268](https://github.com/adap/flower/pull/1268), " -"[#1300](https://github.com/adap/flower/pull/1300), " -"[#1304](https://github.com/adap/flower/pull/1304), " -"[#1305](https://github.com/adap/flower/pull/1305), " -"[#1307](https://github.com/adap/flower/pull/1307))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:711 -msgid "" -"One substantial documentation update fixes multiple smaller rendering " -"issues, makes titles more succinct to improve navigation, removes a " -"deprecated library, updates documentation dependencies, includes the " -"`flwr.common` module in the API reference, includes support for markdown-" -"based documentation, migrates the changelog from `.rst` to `.md`, and " -"fixes a number of smaller details!" +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.evaluate:1 of +msgid "Evaluate the current model parameters." msgstr "" -#: ../../source/ref-changelog.md:713 ../../source/ref-changelog.md:768 -#: ../../source/ref-changelog.md:837 ../../source/ref-changelog.md:876 -msgid "**Minor updates**" +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:715 -msgid "" -"Add round number to fit and evaluate log messages " -"([#1266](https://github.com/adap/flower/pull/1266))" +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:1 of +msgid "Initialize the (global) model parameters." msgstr "" -#: ../../source/ref-changelog.md:716 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:5 of msgid "" -"Add secure gRPC connection to the `advanced_tensorflow` code example " -"([#847](https://github.com/adap/flower/pull/847))" +"Successful updates from the previously selected and configured clients. " +"Each pair of `(ClientProxy, FitRes` constitutes a successful update from " +"one of the previously selected clients. Not that not all previously " +"selected clients are necessarily included in this list: a client might " +"drop out and not submit a result. For each client that did not submit an " +"update, there should be an `Exception` in `failures`." msgstr "" -#: ../../source/ref-changelog.md:717 -msgid "" -"Update developer tooling " -"([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310))" +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:13 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:13 of +msgid "Exceptions that occurred while the server was waiting for client updates." msgstr "" -#: ../../source/ref-changelog.md:718 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:16 of msgid "" -"Rename ProtoBuf messages to improve consistency " -"([#1214](https://github.com/adap/flower/pull/1214), " -"[#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"**aggregation_result** -- The aggregated evaluation result. Aggregation " +"typically uses some variant of a weighted average." msgstr "" -#: ../../source/ref-changelog.md:720 -msgid "v0.19.0 (2022-05-18)" +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:5 of +msgid "" +"Successful updates from the previously selected and configured clients. " +"Each pair of `(ClientProxy, FitRes)` constitutes a successful update from" +" one of the previously selected clients. Not that not all previously " +"selected clients are necessarily included in this list: a client might " +"drop out and not submit a result. For each client that did not submit an " +"update, there should be an `Exception` in `failures`." msgstr "" -#: ../../source/ref-changelog.md:724 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:17 of msgid "" -"**Flower Baselines (preview): FedOpt, FedBN, FedAvgM** " -"([#919](https://github.com/adap/flower/pull/919), " -"[#1127](https://github.com/adap/flower/pull/1127), " -"[#914](https://github.com/adap/flower/pull/914))" +"**parameters** -- If parameters are returned, then the server will treat " +"these as the new global model parameters (i.e., it will replace the " +"previous parameters with the ones returned from this method). If `None` " +"is returned (e.g., because there were only failures and no viable " +"results) then the server will no update the previous model parameters, " +"the updates received in this round are discarded, and the global model " +"parameters remain the same." msgstr "" -#: ../../source/ref-changelog.md:726 +#: flwr.server.strategy.strategy.Strategy.evaluate:3 of msgid "" -"The first preview release of Flower Baselines has arrived! We're " -"kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " -"FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how " -"to use [Flower Baselines](https://flower.ai/docs/using-baselines.html). " -"With this first preview release we're also inviting the community to " -"[contribute their own baselines](https://flower.ai/docs/baselines/how-to-" -"contribute-baselines.html)." +"This function can be used to perform centralized (i.e., server-side) " +"evaluation of model parameters." msgstr "" -#: ../../source/ref-changelog.md:728 +#: flwr.server.strategy.strategy.Strategy.evaluate:11 of msgid "" -"**C++ client SDK (preview) and code example** " -"([#1111](https://github.com/adap/flower/pull/1111))" +"**evaluation_result** -- The evaluation result, usually a Tuple " +"containing loss and a dictionary containing task-specific metrics (e.g., " +"accuracy)." msgstr "" -#: ../../source/ref-changelog.md:730 +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:6 of msgid "" -"Preview support for Flower clients written in C++. The C++ preview " -"includes a Flower client SDK and a quickstart code example that " -"demonstrates a simple C++ client using the SDK." +"**parameters** -- If parameters are returned, then the server will treat " +"these as the initial global model parameters." msgstr "" -#: ../../source/ref-changelog.md:732 -msgid "" -"**Add experimental support for Python 3.10 and Python 3.11** " -"([#1135](https://github.com/adap/flower/pull/1135))" +#: ../../source/ref-api/flwr.server.workflow.rst:2 +msgid "workflow" msgstr "" -#: ../../source/ref-changelog.md:734 +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 msgid "" -"Python 3.10 is the latest stable release of Python and Python 3.11 is due" -" to be released in October. This Flower release adds experimental support" -" for both Python versions." +":py:obj:`DefaultWorkflow `\\ " +"\\(\\[fit\\_workflow\\, ...\\]\\)" msgstr "" -#: ../../source/ref-changelog.md:736 -msgid "" -"**Aggregate custom metrics through user-provided functions** " -"([#1144](https://github.com/adap/flower/pull/1144))" +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 of +msgid "Default workflow in Flower." msgstr "" -#: ../../source/ref-changelog.md:738 +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 msgid "" -"Custom metrics (e.g., `accuracy`) can now be aggregated without having to" -" customize the strategy. Built-in strategies support two new arguments, " -"`fit_metrics_aggregation_fn` and `evaluate_metrics_aggregation_fn`, that " -"allow passing custom metric aggregation functions." +":py:obj:`SecAggPlusWorkflow `\\ " +"\\(num\\_shares\\, ...\\[\\, ...\\]\\)" msgstr "" -#: ../../source/ref-changelog.md:740 -msgid "" -"**User-configurable round timeout** " -"([#1162](https://github.com/adap/flower/pull/1162))" +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 +#: of +msgid "The workflow for the SecAgg+ protocol." msgstr "" -#: ../../source/ref-changelog.md:742 +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 msgid "" -"A new configuration value allows the round timeout to be set for " -"`start_server` and `start_simulation`. If the `config` dictionary " -"contains a `round_timeout` key (with a `float` value in seconds), the " -"server will wait *at least* `round_timeout` seconds before it closes the " -"connection." +":py:obj:`SecAggWorkflow `\\ " +"\\(reconstruction\\_threshold\\, \\*\\)" msgstr "" -#: ../../source/ref-changelog.md:744 -msgid "" -"**Enable both federated evaluation and centralized evaluation to be used " -"at the same time in all built-in strategies** " -"([#1091](https://github.com/adap/flower/pull/1091))" +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of +msgid "The workflow for the SecAgg protocol." msgstr "" -#: ../../source/ref-changelog.md:746 -msgid "" -"Built-in strategies can now perform both federated evaluation (i.e., " -"client-side) and centralized evaluation (i.e., server-side) in the same " -"round. Federated evaluation can be disabled by setting `fraction_eval` to" -" `0.0`." +#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:2 +msgid "DefaultWorkflow" msgstr "" -#: ../../source/ref-changelog.md:748 -msgid "" -"**Two new Jupyter Notebook tutorials** " -"([#1141](https://github.com/adap/flower/pull/1141))" +#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:2 +msgid "SecAggPlusWorkflow" msgstr "" -#: ../../source/ref-changelog.md:750 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:3 +#: of msgid "" -"Two Jupyter Notebook tutorials (compatible with Google Colab) explain " -"basic and intermediate Flower features:" +"The SecAgg+ protocol ensures the secure summation of integer vectors " +"owned by multiple parties, without accessing any individual integer " +"vector. This workflow allows the server to compute the weighted average " +"of model parameters across all clients, ensuring individual contributions" +" remain private. This is achieved by clients sending both, a weighting " +"factor and a weighted version of the locally updated parameters, both of " +"which are masked for privacy. Specifically, each client uploads \"[w, w *" +" params]\" with masks, where weighting factor 'w' is the number of " +"examples ('num_examples') and 'params' represents the model parameters " +"('parameters') from the client's `FitRes`. The server then aggregates " +"these contributions to compute the weighted average of model parameters." msgstr "" -#: ../../source/ref-changelog.md:752 -msgid "" -"*An Introduction to Federated Learning*: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" -"-Intro-to-FL-PyTorch.ipynb)" +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:14 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:14 +#: of +msgid "The protocol involves four main stages:" msgstr "" -#: ../../source/ref-changelog.md:754 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:16 +#: of msgid "" -"*Using Strategies in Federated Learning*: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" -"-Strategies-in-FL-PyTorch.ipynb)" +"'setup': Send SecAgg+ configuration to clients and collect their public " +"keys." msgstr "" -#: ../../source/ref-changelog.md:756 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:17 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:17 +#: of msgid "" -"**New FedAvgM strategy (Federated Averaging with Server Momentum)** " -"([#1076](https://github.com/adap/flower/pull/1076))" +"'share keys': Broadcast public keys among clients and collect encrypted " +"secret key shares." msgstr "" -#: ../../source/ref-changelog.md:758 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:19 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:19 +#: of msgid "" -"The new `FedAvgM` strategy implements Federated Averaging with Server " -"Momentum \\[Hsu et al., 2019\\]." +"'collect masked vectors': Forward encrypted secret key shares to target " +"clients and collect masked model parameters." msgstr "" -#: ../../source/ref-changelog.md:760 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:21 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:21 +#: of msgid "" -"**New advanced PyTorch code example** " -"([#1007](https://github.com/adap/flower/pull/1007))" +"'unmask': Collect secret key shares to decrypt and aggregate the model " +"parameters." msgstr "" -#: ../../source/ref-changelog.md:762 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:23 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:23 +#: of msgid "" -"A new code example (`advanced_pytorch`) demonstrates advanced Flower " -"concepts with PyTorch." +"Only the aggregated model parameters are exposed and passed to " +"`Strategy.aggregate_fit`, ensuring individual data privacy." msgstr "" -#: ../../source/ref-changelog.md:764 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:26 +#: of msgid "" -"**New JAX code example** " -"([#906](https://github.com/adap/flower/pull/906), " -"[#1143](https://github.com/adap/flower/pull/1143))" +"The number of shares into which each client's private key is split under " +"the SecAgg+ protocol. If specified as a float, it represents the " +"proportion of all selected clients, and the number of shares will be set " +"dynamically in the run time. A private key can be reconstructed from " +"these shares, allowing for the secure aggregation of model updates. Each " +"client sends one share to each of its neighbors while retaining one." msgstr "" -#: ../../source/ref-changelog.md:766 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:26 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:33 +#: of msgid "" -"A new code example (`jax_from_centralized_to_federated`) shows federated " -"learning with JAX and Flower." +"The minimum number of shares required to reconstruct a client's private " +"key, or, if specified as a float, it represents the proportion of the " +"total number of shares needed for reconstruction. This threshold ensures " +"privacy by allowing for the recovery of contributions from dropped " +"clients during aggregation, without compromising individual client data." msgstr "" -#: ../../source/ref-changelog.md:770 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:32 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:39 +#: of msgid "" -"New option to keep Ray running if Ray was already initialized in " -"`start_simulation` ([#1177](https://github.com/adap/flower/pull/1177))" +"The maximum value of the weight that can be assigned to any single " +"client's update during the weighted average calculation on the server " +"side, e.g., in the FedAvg algorithm." msgstr "" -#: ../../source/ref-changelog.md:771 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:36 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:43 +#: of msgid "" -"Add support for custom `ClientManager` as a `start_simulation` parameter " -"([#1171](https://github.com/adap/flower/pull/1171))" +"The range within which model parameters are clipped before quantization. " +"This parameter ensures each model parameter is bounded within " +"[-clipping_range, clipping_range], facilitating quantization." msgstr "" -#: ../../source/ref-changelog.md:772 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:40 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:47 +#: of msgid "" -"New documentation for [implementing " -"strategies](https://flower.ai/docs/framework/how-to-implement-" -"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " -"[#1175](https://github.com/adap/flower/pull/1175))" +"The size of the range into which floating-point model parameters are " +"quantized, mapping each parameter to an integer in [0, " +"quantization_range-1]. This facilitates cryptographic operations on the " +"model updates." msgstr "" -#: ../../source/ref-changelog.md:773 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:44 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:51 +#: of msgid "" -"New mobile-friendly documentation theme " -"([#1174](https://github.com/adap/flower/pull/1174))" +"The range of values from which random mask entries are uniformly sampled " +"([0, modulus_range-1]). `modulus_range` must be less than 4294967296. " +"Please use 2**n values for `modulus_range` to prevent overflow issues." msgstr "" -#: ../../source/ref-changelog.md:774 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:48 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:55 +#: of msgid "" -"Limit version range for (optional) `ray` dependency to include only " -"compatible releases (`>=1.9.2,<1.12.0`) " -"([#1205](https://github.com/adap/flower/pull/1205))" +"The timeout duration in seconds. If specified, the workflow will wait for" +" replies for this duration each time. If `None`, there is no time limit " +"and the workflow will wait until replies for all messages are received." msgstr "" -#: ../../source/ref-changelog.md:778 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:62 +#: of msgid "" -"**Remove deprecated support for Python 3.6** " -"([#871](https://github.com/adap/flower/pull/871))" +"Generally, higher `num_shares` means more robust to dropouts while " +"increasing the computational costs; higher `reconstruction_threshold` " +"means better privacy guarantees but less tolerance to dropouts." msgstr "" -#: ../../source/ref-changelog.md:779 -msgid "" -"**Remove deprecated KerasClient** " -"([#857](https://github.com/adap/flower/pull/857))" +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:59 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:65 +#: of +msgid "Too large `max_weight` may compromise the precision of the quantization." msgstr "" -#: ../../source/ref-changelog.md:780 -msgid "" -"**Remove deprecated no-op extra installs** " -"([#973](https://github.com/adap/flower/pull/973))" +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:60 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:66 +#: of +msgid "`modulus_range` must be 2**n and larger than `quantization_range`." msgstr "" -#: ../../source/ref-changelog.md:781 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:67 +#: of msgid "" -"**Remove deprecated proto fields from** `FitRes` **and** `EvaluateRes` " -"([#869](https://github.com/adap/flower/pull/869))" +"When `num_shares` is a float, it is interpreted as the proportion of all " +"selected clients, and hence the number of shares will be determined in " +"the runtime. This allows for dynamic adjustment based on the total number" +" of participating clients." msgstr "" -#: ../../source/ref-changelog.md:782 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:70 +#: of msgid "" -"**Remove deprecated QffedAvg strategy (replaced by QFedAvg)** " -"([#1107](https://github.com/adap/flower/pull/1107))" +"Similarly, when `reconstruction_threshold` is a float, it is interpreted " +"as the proportion of the number of shares needed for the reconstruction " +"of a private key. This feature enables flexibility in setting the " +"security threshold relative to the number of distributed shares." msgstr "" -#: ../../source/ref-changelog.md:783 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:74 +#: of msgid "" -"**Remove deprecated DefaultStrategy strategy** " -"([#1142](https://github.com/adap/flower/pull/1142))" +"`num_shares`, `reconstruction_threshold`, and the quantization parameters" +" (`clipping_range`, `quantization_range`, `modulus_range`) play critical " +"roles in balancing privacy, robustness, and efficiency within the SecAgg+" +" protocol." msgstr "" -#: ../../source/ref-changelog.md:784 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"**Remove deprecated support for eval_fn accuracy return value** " -"([#1142](https://github.com/adap/flower/pull/1142))" +":py:obj:`collect_masked_vectors_stage " +"`\\" +" \\(driver\\, ...\\)" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "Execute the 'collect masked vectors' stage." msgstr "" -#: ../../source/ref-changelog.md:785 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"**Remove deprecated support for passing initial parameters as NumPy " -"ndarrays** ([#1142](https://github.com/adap/flower/pull/1142))" +":py:obj:`setup_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -#: ../../source/ref-changelog.md:787 -msgid "v0.18.0 (2022-02-28)" +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.setup_stage:1 +#: of +msgid "Execute the 'setup' stage." msgstr "" -#: ../../source/ref-changelog.md:791 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"**Improved Virtual Client Engine compatibility with Jupyter Notebook / " -"Google Colab** ([#866](https://github.com/adap/flower/pull/866), " -"[#872](https://github.com/adap/flower/pull/872), " -"[#833](https://github.com/adap/flower/pull/833), " -"[#1036](https://github.com/adap/flower/pull/1036))" +":py:obj:`share_keys_stage " +"`\\ " +"\\(driver\\, context\\, state\\)" msgstr "" -#: ../../source/ref-changelog.md:793 -msgid "" -"Simulations (using the Virtual Client Engine through `start_simulation`) " -"now work more smoothly on Jupyter Notebooks (incl. Google Colab) after " -"installing Flower with the `simulation` extra (`pip install " -"flwr[simulation]`)." +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.share_keys_stage:1 +#: of +msgid "Execute the 'share keys' stage." msgstr "" -#: ../../source/ref-changelog.md:795 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"**New Jupyter Notebook code example** " -"([#833](https://github.com/adap/flower/pull/833))" +":py:obj:`unmask_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -#: ../../source/ref-changelog.md:797 -msgid "" -"A new code example (`quickstart_simulation`) demonstrates Flower " -"simulations using the Virtual Client Engine through Jupyter Notebook " -"(incl. Google Colab)." +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.unmask_stage:1 +#: of +msgid "Execute the 'unmask' stage." msgstr "" -#: ../../source/ref-changelog.md:799 -msgid "" -"**Client properties (feature preview)** " -"([#795](https://github.com/adap/flower/pull/795))" +#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:2 +msgid "SecAggWorkflow" msgstr "" -#: ../../source/ref-changelog.md:801 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of msgid "" -"Clients can implement a new method `get_properties` to enable server-side" -" strategies to query client properties." +"Bases: " +":py:class:`~flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow`" msgstr "" -#: ../../source/ref-changelog.md:803 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:3 of msgid "" -"**Experimental Android support with TFLite** " -"([#865](https://github.com/adap/flower/pull/865))" +"The SecAgg protocol ensures the secure summation of integer vectors owned" +" by multiple parties, without accessing any individual integer vector. " +"This workflow allows the server to compute the weighted average of model " +"parameters across all clients, ensuring individual contributions remain " +"private. This is achieved by clients sending both, a weighting factor and" +" a weighted version of the locally updated parameters, both of which are " +"masked for privacy. Specifically, each client uploads \"[w, w * params]\"" +" with masks, where weighting factor 'w' is the number of examples " +"('num_examples') and 'params' represents the model parameters " +"('parameters') from the client's `FitRes`. The server then aggregates " +"these contributions to compute the weighted average of model parameters." msgstr "" -#: ../../source/ref-changelog.md:805 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:16 of msgid "" -"Android support has finally arrived in `main`! Flower is both client-" -"agnostic and framework-agnostic by design. One can integrate arbitrary " -"client platforms and with this release, using Flower on Android has " -"become a lot easier." +"'setup': Send SecAgg configuration to clients and collect their public " +"keys." msgstr "" -#: ../../source/ref-changelog.md:807 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:55 of msgid "" -"The example uses TFLite on the client side, along with a new " -"`FedAvgAndroid` strategy. The Android client and `FedAvgAndroid` are " -"still experimental, but they are a first step towards a fully-fledged " -"Android SDK and a unified `FedAvg` implementation that integrated the new" -" functionality from `FedAvgAndroid`." +"Each client's private key is split into N shares under the SecAgg " +"protocol, where N is the number of selected clients." msgstr "" -#: ../../source/ref-changelog.md:809 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:57 of msgid "" -"**Make gRPC keepalive time user-configurable and decrease default " -"keepalive time** ([#1069](https://github.com/adap/flower/pull/1069))" +"Generally, higher `reconstruction_threshold` means better privacy " +"guarantees but less tolerance to dropouts." msgstr "" -#: ../../source/ref-changelog.md:811 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:61 of msgid "" -"The default gRPC keepalive time has been reduced to increase the " -"compatibility of Flower with more cloud environments (for example, " -"Microsoft Azure). Users can configure the keepalive time to customize the" -" gRPC stack based on specific requirements." +"When `reconstruction_threshold` is a float, it is interpreted as the " +"proportion of the number of all selected clients needed for the " +"reconstruction of a private key. This feature enables flexibility in " +"setting the security threshold relative to the number of selected " +"clients." msgstr "" -#: ../../source/ref-changelog.md:813 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:65 of msgid "" -"**New differential privacy example using Opacus and PyTorch** " -"([#805](https://github.com/adap/flower/pull/805))" +"`reconstruction_threshold`, and the quantization parameters " +"(`clipping_range`, `quantization_range`, `modulus_range`) play critical " +"roles in balancing privacy, robustness, and efficiency within the SecAgg " +"protocol." msgstr "" -#: ../../source/ref-changelog.md:815 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"A new code example (`opacus`) demonstrates differentially-private " -"federated learning with Opacus, PyTorch, and Flower." +":py:obj:`collect_masked_vectors_stage " +"`\\ " +"\\(driver\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:817 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"**New Hugging Face Transformers code example** " -"([#863](https://github.com/adap/flower/pull/863))" +":py:obj:`setup_stage `\\" +" \\(driver\\, context\\, state\\)" msgstr "" -#: ../../source/ref-changelog.md:819 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"A new code example (`quickstart_huggingface`) demonstrates usage of " -"Hugging Face Transformers with Flower." +":py:obj:`share_keys_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -#: ../../source/ref-changelog.md:821 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"**New MLCube code example** " -"([#779](https://github.com/adap/flower/pull/779), " -"[#1034](https://github.com/adap/flower/pull/1034), " -"[#1065](https://github.com/adap/flower/pull/1065), " -"[#1090](https://github.com/adap/flower/pull/1090))" +":py:obj:`unmask_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -#: ../../source/ref-changelog.md:823 -msgid "" -"A new code example (`quickstart_mlcube`) demonstrates usage of MLCube " -"with Flower." +#: ../../source/ref-api/flwr.simulation.rst:2 +msgid "simulation" msgstr "" -#: ../../source/ref-changelog.md:825 +#: ../../source/ref-api/flwr.simulation.rst:18::1 msgid "" -"**SSL-enabled server and client** " -"([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" +":py:obj:`run_simulation `\\ " +"\\(server\\_app\\, client\\_app\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:827 -msgid "" -"SSL enables secure encrypted connections between clients and servers. " -"This release open-sources the Flower secure gRPC implementation to make " -"encrypted communication channels accessible to all Flower users." +#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: flwr.simulation.run_simulation.run_simulation:1 of +msgid "Run a Flower App using the Simulation Engine." msgstr "" -#: ../../source/ref-changelog.md:829 +#: ../../source/ref-api/flwr.simulation.rst:18::1 msgid "" -"**Updated** `FedAdam` **and** `FedYogi` **strategies** " -"([#885](https://github.com/adap/flower/pull/885), " -"[#895](https://github.com/adap/flower/pull/895))" +":py:obj:`start_simulation `\\ " +"\\(\\*args\\, \\*\\*kwargs\\)" msgstr "" -#: ../../source/ref-changelog.md:831 -msgid "" -"`FedAdam` and `FedAdam` match the latest version of the Adaptive " -"Federated Optimization paper." +#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: flwr.simulation.start_simulation:1 of +msgid "Log error stating that module `ray` could not be imported." msgstr "" -#: ../../source/ref-changelog.md:833 -msgid "" -"**Initialize** `start_simulation` **with a list of client IDs** " -"([#860](https://github.com/adap/flower/pull/860))" +#: ../../source/ref-api/flwr.simulation.run_simulation.rst:2 +msgid "run\\_simulation" msgstr "" -#: ../../source/ref-changelog.md:835 +#: flwr.simulation.run_simulation.run_simulation:3 of msgid "" -"`start_simulation` can now be called with a list of client IDs " -"(`clients_ids`, type: `List[str]`). Those IDs will be passed to the " -"`client_fn` whenever a client needs to be initialized, which can make it " -"easier to load data partitions that are not accessible through `int` " -"identifiers." +"The `ServerApp` to be executed. It will send messages to different " +"`ClientApp` instances running on different (virtual) SuperNodes." msgstr "" -#: ../../source/ref-changelog.md:839 +#: flwr.simulation.run_simulation.run_simulation:6 of msgid "" -"Update `num_examples` calculation in PyTorch code examples in " -"([#909](https://github.com/adap/flower/pull/909))" +"The `ClientApp` to be executed by each of the SuperNodes. It will receive" +" messages sent by the `ServerApp`." msgstr "" -#: ../../source/ref-changelog.md:840 +#: flwr.simulation.run_simulation.run_simulation:9 of msgid "" -"Expose Flower version through `flwr.__version__` " -"([#952](https://github.com/adap/flower/pull/952))" +"Number of nodes that run a ClientApp. They can be sampled by a Driver in " +"the ServerApp and receive a Message describing what the ClientApp should " +"perform." msgstr "" -#: ../../source/ref-changelog.md:841 -msgid "" -"`start_server` in `app.py` now returns a `History` object containing " -"metrics from training ([#974](https://github.com/adap/flower/pull/974))" +#: flwr.simulation.run_simulation.run_simulation:12 of +msgid "A simulation backend that runs `ClientApp`s." msgstr "" -#: ../../source/ref-changelog.md:842 +#: flwr.simulation.run_simulation.run_simulation:14 of msgid "" -"Make `max_workers` (used by `ThreadPoolExecutor`) configurable " -"([#978](https://github.com/adap/flower/pull/978))" +"'A dictionary to configure a backend. Separate dictionaries to configure " +"different elements of backend. Supported top-level keys are `init_args` " +"for values parsed to initialisation of backend, `client_resources` to " +"define the resources for clients, and `actor` to define the actor " +"parameters. Values supported in are those included by " +"`flwr.common.typing.ConfigsRecordValues`." msgstr "" -#: ../../source/ref-changelog.md:843 +#: flwr.simulation.run_simulation.run_simulation:21 of msgid "" -"Increase sleep time after server start to three seconds in all code " -"examples ([#1086](https://github.com/adap/flower/pull/1086))" +"A boolean to indicate whether to enable GPU growth on the main thread. " +"This is desirable if you make use of a TensorFlow model on your " +"`ServerApp` while having your `ClientApp` running on the same GPU. " +"Without enabling this, you might encounter an out-of-memory error because" +" TensorFlow, by default, allocates all GPU memory. Read more about how " +"`tf.config.experimental.set_memory_growth()` works in the TensorFlow " +"documentation: https://www.tensorflow.org/api/stable." msgstr "" -#: ../../source/ref-changelog.md:844 +#: flwr.simulation.run_simulation.run_simulation:28 of msgid "" -"Added a new FAQ section to the documentation " -"([#948](https://github.com/adap/flower/pull/948))" +"When disabled, only INFO, WARNING and ERROR log messages will be shown. " +"If enabled, DEBUG-level logs will be displayed." msgstr "" -#: ../../source/ref-changelog.md:845 -msgid "" -"And many more under-the-hood changes, library updates, documentation " -"changes, and tooling improvements!" +#: ../../source/ref-api/flwr.simulation.start_simulation.rst:2 +msgid "start\\_simulation" msgstr "" -#: ../../source/ref-changelog.md:849 -msgid "" -"**Removed** `flwr_example` **and** `flwr_experimental` **from release " -"build** ([#869](https://github.com/adap/flower/pull/869))" +#: ../../source/ref-changelog.md:1 +msgid "Changelog" msgstr "" -#: ../../source/ref-changelog.md:851 -msgid "" -"The packages `flwr_example` and `flwr_experimental` have been deprecated " -"since Flower 0.12.0 and they are not longer included in Flower release " -"builds. The associated extras (`baseline`, `examples-pytorch`, `examples-" -"tensorflow`, `http-logger`, `ops`) are now no-op and will be removed in " -"an upcoming release." +#: ../../source/ref-changelog.md:3 +msgid "v1.11.1 (2024-09-11)" msgstr "" -#: ../../source/ref-changelog.md:853 -msgid "v0.17.0 (2021-09-24)" +#: ../../source/ref-changelog.md:5 ../../source/ref-changelog.md:37 +#: ../../source/ref-changelog.md:141 ../../source/ref-changelog.md:239 +#: ../../source/ref-changelog.md:339 ../../source/ref-changelog.md:403 +#: ../../source/ref-changelog.md:496 ../../source/ref-changelog.md:596 +#: ../../source/ref-changelog.md:680 ../../source/ref-changelog.md:744 +#: ../../source/ref-changelog.md:802 ../../source/ref-changelog.md:871 +#: ../../source/ref-changelog.md:940 +msgid "Thanks to our contributors" msgstr "" -#: ../../source/ref-changelog.md:857 +#: ../../source/ref-changelog.md:7 ../../source/ref-changelog.md:39 +#: ../../source/ref-changelog.md:143 ../../source/ref-changelog.md:241 +#: ../../source/ref-changelog.md:341 ../../source/ref-changelog.md:405 +#: ../../source/ref-changelog.md:498 ../../source/ref-changelog.md:598 +#: ../../source/ref-changelog.md:682 ../../source/ref-changelog.md:746 +#: ../../source/ref-changelog.md:804 msgid "" -"**Experimental virtual client engine** " -"([#781](https://github.com/adap/flower/pull/781) " -"[#790](https://github.com/adap/flower/pull/790) " -"[#791](https://github.com/adap/flower/pull/791))" +"We would like to give our special thanks to all the contributors who made" +" the new version of Flower possible (in `git shortlog` order):" msgstr "" -#: ../../source/ref-changelog.md:859 +#: ../../source/ref-changelog.md:9 msgid "" -"One of Flower's goals is to enable research at scale. This release " -"enables a first (experimental) peek at a major new feature, codenamed the" -" virtual client engine. Virtual clients enable simulations that scale to " -"a (very) large number of clients on a single machine or compute cluster. " -"The easiest way to test the new functionality is to look at the two new " -"code examples called `quickstart_simulation` and `simulation_pytorch`." +"`Charles Beauville`, `Chong Shen Ng`, `Daniel J. Beutel`, `Heng Pan`, " +"`Javier`, `Robert Steiner`, `Yan Gao` " msgstr "" -#: ../../source/ref-changelog.md:861 -msgid "" -"The feature is still experimental, so there's no stability guarantee for " -"the API. It's also not quite ready for prime time and comes with a few " -"known caveats. However, those who are curious are encouraged to try it " -"out and share their thoughts." +#: ../../source/ref-changelog.md:11 +msgid "Improvements" msgstr "" -#: ../../source/ref-changelog.md:863 +#: ../../source/ref-changelog.md:13 msgid "" -"**New built-in strategies** " -"([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822))" +"**Implement** `keys/values/items` **methods for** `TypedDict` " +"([#4146](https://github.com/adap/flower/pull/4146))" msgstr "" -#: ../../source/ref-changelog.md:865 +#: ../../source/ref-changelog.md:15 msgid "" -"FedYogi - Federated learning strategy using Yogi on server-side. " -"Implementation based on https://arxiv.org/abs/2003.00295" +"**Fix parsing of** `--executor-config` **if present** " +"([#4125](https://github.com/adap/flower/pull/4125))" msgstr "" -#: ../../source/ref-changelog.md:866 +#: ../../source/ref-changelog.md:17 msgid "" -"FedAdam - Federated learning strategy using Adam on server-side. " -"Implementation based on https://arxiv.org/abs/2003.00295" +"**Adjust framework name in templates docstrings** " +"([#4127](https://github.com/adap/flower/pull/4127))" msgstr "" -#: ../../source/ref-changelog.md:868 +#: ../../source/ref-changelog.md:19 msgid "" -"**New PyTorch Lightning code example** " -"([#617](https://github.com/adap/flower/pull/617))" +"**Update** `flwr new` **Hugging Face template** " +"([#4169](https://github.com/adap/flower/pull/4169))" msgstr "" -#: ../../source/ref-changelog.md:870 +#: ../../source/ref-changelog.md:21 msgid "" -"**New Variational Auto-Encoder code example** " -"([#752](https://github.com/adap/flower/pull/752))" +"**Fix** `flwr new` **FlowerTune template** " +"([#4123](https://github.com/adap/flower/pull/4123))" msgstr "" -#: ../../source/ref-changelog.md:872 +#: ../../source/ref-changelog.md:23 msgid "" -"**New scikit-learn code example** " -"([#748](https://github.com/adap/flower/pull/748))" +"**Add buffer time after** `ServerApp` **thread initialization** " +"([#4119](https://github.com/adap/flower/pull/4119))" msgstr "" -#: ../../source/ref-changelog.md:874 +#: ../../source/ref-changelog.md:25 msgid "" -"**New experimental TensorBoard strategy** " -"([#789](https://github.com/adap/flower/pull/789))" +"**Handle unsuitable resources for simulation** " +"([#4143](https://github.com/adap/flower/pull/4143))" msgstr "" -#: ../../source/ref-changelog.md:878 +#: ../../source/ref-changelog.md:27 msgid "" -"Improved advanced TensorFlow code example " -"([#769](https://github.com/adap/flower/pull/769))" +"**Update example READMEs** " +"([#4117](https://github.com/adap/flower/pull/4117))" msgstr "" -#: ../../source/ref-changelog.md:879 +#: ../../source/ref-changelog.md:29 msgid "" -"Warning when `min_available_clients` is misconfigured " -"([#830](https://github.com/adap/flower/pull/830))" +"**Update SuperNode authentication docs** " +"([#4160](https://github.com/adap/flower/pull/4160))" msgstr "" -#: ../../source/ref-changelog.md:880 -msgid "" -"Improved gRPC server docs " -"([#841](https://github.com/adap/flower/pull/841))" +#: ../../source/ref-changelog.md:31 ../../source/ref-changelog.md:111 +#: ../../source/ref-changelog.md:227 ../../source/ref-changelog.md:323 +#: ../../source/ref-changelog.md:397 ../../source/ref-changelog.md:472 +#: ../../source/ref-changelog.md:584 ../../source/ref-changelog.md:674 +#: ../../source/ref-changelog.md:738 ../../source/ref-changelog.md:796 +#: ../../source/ref-changelog.md:865 ../../source/ref-changelog.md:927 +#: ../../source/ref-changelog.md:946 ../../source/ref-changelog.md:1102 +#: ../../source/ref-changelog.md:1173 ../../source/ref-changelog.md:1210 +#: ../../source/ref-changelog.md:1253 +msgid "Incompatible changes" msgstr "" -#: ../../source/ref-changelog.md:881 -msgid "" -"Improved error message in `NumPyClient` " -"([#851](https://github.com/adap/flower/pull/851))" +#: ../../source/ref-changelog.md:35 +msgid "v1.11.0 (2024-08-30)" msgstr "" -#: ../../source/ref-changelog.md:882 +#: ../../source/ref-changelog.md:41 msgid "" -"Improved PyTorch quickstart code example " -"([#852](https://github.com/adap/flower/pull/852))" +"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " +"Beutel`, `Daniel Nata Nugraha`, `Danny`, `Edoardo Gabrielli`, `Heng Pan`," +" `Javier`, `Meng Yan`, `Michal Danilowski`, `Mohammad Naseri`, `Robert " +"Steiner`, `Steve Laskaridis`, `Taner Topal`, `Yan Gao` " +msgstr "" + +#: ../../source/ref-changelog.md:43 ../../source/ref-changelog.md:147 +#: ../../source/ref-changelog.md:245 ../../source/ref-changelog.md:345 +#: ../../source/ref-changelog.md:409 ../../source/ref-changelog.md:502 +#: ../../source/ref-changelog.md:602 ../../source/ref-changelog.md:686 +#: ../../source/ref-changelog.md:750 ../../source/ref-changelog.md:808 +#: ../../source/ref-changelog.md:877 ../../source/ref-changelog.md:1006 +#: ../../source/ref-changelog.md:1048 ../../source/ref-changelog.md:1115 +#: ../../source/ref-changelog.md:1181 ../../source/ref-changelog.md:1226 +#: ../../source/ref-changelog.md:1265 ../../source/ref-changelog.md:1298 +#: ../../source/ref-changelog.md:1348 +msgid "What's new?" msgstr "" -#: ../../source/ref-changelog.md:886 +#: ../../source/ref-changelog.md:45 msgid "" -"**Disabled final distributed evaluation** " -"([#800](https://github.com/adap/flower/pull/800))" +"**Deliver Flower App Bundle (FAB) to SuperLink and SuperNodes** " +"([#4006](https://github.com/adap/flower/pull/4006), " +"[#3945](https://github.com/adap/flower/pull/3945), " +"[#3999](https://github.com/adap/flower/pull/3999), " +"[#4027](https://github.com/adap/flower/pull/4027), " +"[#3851](https://github.com/adap/flower/pull/3851), " +"[#3946](https://github.com/adap/flower/pull/3946), " +"[#4003](https://github.com/adap/flower/pull/4003), " +"[#4029](https://github.com/adap/flower/pull/4029), " +"[#3942](https://github.com/adap/flower/pull/3942), " +"[#3957](https://github.com/adap/flower/pull/3957), " +"[#4020](https://github.com/adap/flower/pull/4020), " +"[#4044](https://github.com/adap/flower/pull/4044), " +"[#3852](https://github.com/adap/flower/pull/3852), " +"[#4019](https://github.com/adap/flower/pull/4019), " +"[#4031](https://github.com/adap/flower/pull/4031), " +"[#4036](https://github.com/adap/flower/pull/4036), " +"[#4049](https://github.com/adap/flower/pull/4049), " +"[#4017](https://github.com/adap/flower/pull/4017), " +"[#3943](https://github.com/adap/flower/pull/3943), " +"[#3944](https://github.com/adap/flower/pull/3944), " +"[#4011](https://github.com/adap/flower/pull/4011), " +"[#3619](https://github.com/adap/flower/pull/3619))" msgstr "" -#: ../../source/ref-changelog.md:888 +#: ../../source/ref-changelog.md:47 msgid "" -"Prior behaviour was to perform a final round of distributed evaluation on" -" all connected clients, which is often not required (e.g., when using " -"server-side evaluation). The prior behaviour can be enabled by passing " -"`force_final_distributed_eval=True` to `start_server`." +"Dynamic code updates are here! `flwr run` can now ship and install the " +"latest version of your `ServerApp` and `ClientApp` to an already-running " +"federation (SuperLink and SuperNodes)." msgstr "" -#: ../../source/ref-changelog.md:890 +#: ../../source/ref-changelog.md:49 msgid "" -"**Renamed q-FedAvg strategy** " -"([#802](https://github.com/adap/flower/pull/802))" +"How does it work? `flwr run` bundles your Flower app into a single FAB " +"(Flower App Bundle) file. It then ships this FAB file, via the SuperExec," +" to both the SuperLink and those SuperNodes that need it. This allows you" +" to keep SuperExec, SuperLink and SuperNodes running as permanent " +"infrastructure, and then ship code updates (including completely new " +"projects!) dynamically." msgstr "" -#: ../../source/ref-changelog.md:892 -msgid "" -"The strategy named `QffedAvg` was renamed to `QFedAvg` to better reflect " -"the notation given in the original paper (q-FFL is the optimization " -"objective, q-FedAvg is the proposed solver). Note the original (now " -"deprecated) `QffedAvg` class is still available for compatibility reasons" -" (it will be removed in a future release)." +#: ../../source/ref-changelog.md:51 +msgid "`flwr run` is all you need." msgstr "" -#: ../../source/ref-changelog.md:894 +#: ../../source/ref-changelog.md:53 msgid "" -"**Deprecated and renamed code example** `simulation_pytorch` **to** " -"`simulation_pytorch_legacy` " -"([#791](https://github.com/adap/flower/pull/791))" +"**Introduce isolated** `ClientApp` **execution** " +"([#3970](https://github.com/adap/flower/pull/3970), " +"[#3976](https://github.com/adap/flower/pull/3976), " +"[#4002](https://github.com/adap/flower/pull/4002), " +"[#4001](https://github.com/adap/flower/pull/4001), " +"[#4034](https://github.com/adap/flower/pull/4034), " +"[#4037](https://github.com/adap/flower/pull/4037), " +"[#3977](https://github.com/adap/flower/pull/3977), " +"[#4042](https://github.com/adap/flower/pull/4042), " +"[#3978](https://github.com/adap/flower/pull/3978), " +"[#4039](https://github.com/adap/flower/pull/4039), " +"[#4033](https://github.com/adap/flower/pull/4033), " +"[#3971](https://github.com/adap/flower/pull/3971), " +"[#4035](https://github.com/adap/flower/pull/4035), " +"[#3973](https://github.com/adap/flower/pull/3973), " +"[#4032](https://github.com/adap/flower/pull/4032))" msgstr "" -#: ../../source/ref-changelog.md:896 +#: ../../source/ref-changelog.md:55 msgid "" -"This example has been replaced by a new example. The new example is based" -" on the experimental virtual client engine, which will become the new " -"default way of doing most types of large-scale simulations in Flower. The" -" existing example was kept for reference purposes, but it might be " -"removed in the future." +"The SuperNode can now run your `ClientApp` in a fully isolated way. In an" +" enterprise deployment, this allows you to set strict limits on what the " +"`ClientApp` can and cannot do." msgstr "" -#: ../../source/ref-changelog.md:898 -msgid "v0.16.0 (2021-05-11)" +#: ../../source/ref-changelog.md:57 +msgid "`flower-supernode` supports three `--isolation` modes:" msgstr "" -#: ../../source/ref-changelog.md:902 +#: ../../source/ref-changelog.md:59 msgid "" -"**New built-in strategies** " -"([#549](https://github.com/adap/flower/pull/549))" +"Unset: The SuperNode runs the `ClientApp` in the same process (as in " +"previous versions of Flower). This is the default mode." msgstr "" -#: ../../source/ref-changelog.md:904 -msgid "(abstract) FedOpt" +#: ../../source/ref-changelog.md:60 +msgid "" +"`--isolation=subprocess`: The SuperNode starts a subprocess to run the " +"`ClientApp`." msgstr "" -#: ../../source/ref-changelog.md:907 +#: ../../source/ref-changelog.md:61 msgid "" -"**Custom metrics for server and strategies** " -"([#717](https://github.com/adap/flower/pull/717))" +"`--isolation=process`: The SuperNode expects an externally-managed " +"process to run the `ClientApp`. This external process is not managed by " +"the SuperNode, so it has to be started beforehand and terminated " +"manually. The common way to use this isolation mode is via the new " +"`flwr/clientapp` Docker image." msgstr "" -#: ../../source/ref-changelog.md:909 +#: ../../source/ref-changelog.md:63 msgid "" -"The Flower server is now fully task-agnostic, all remaining instances of " -"task-specific metrics (such as `accuracy`) have been replaced by custom " -"metrics dictionaries. Flower 0.15 introduced the capability to pass a " -"dictionary containing custom metrics from client to server. As of this " -"release, custom metrics replace task-specific metrics on the server." +"**Improve Docker support for enterprise deployments** " +"([#4050](https://github.com/adap/flower/pull/4050), " +"[#4090](https://github.com/adap/flower/pull/4090), " +"[#3784](https://github.com/adap/flower/pull/3784), " +"[#3998](https://github.com/adap/flower/pull/3998), " +"[#4094](https://github.com/adap/flower/pull/4094), " +"[#3722](https://github.com/adap/flower/pull/3722))" msgstr "" -#: ../../source/ref-changelog.md:911 +#: ../../source/ref-changelog.md:65 msgid "" -"Custom metric dictionaries are now used in two user-facing APIs: they are" -" returned from Strategy methods `aggregate_fit`/`aggregate_evaluate` and " -"they enable evaluation functions passed to built-in strategies (via " -"`eval_fn`) to return more than two evaluation metrics. Strategies can " -"even return *aggregated* metrics dictionaries for the server to keep " -"track of." +"Flower 1.11 ships many Docker improvements that are especially useful for" +" enterprise deployments:" msgstr "" -#: ../../source/ref-changelog.md:913 -msgid "" -"Strategy implementations should migrate their `aggregate_fit` and " -"`aggregate_evaluate` methods to the new return type (e.g., by simply " -"returning an empty `{}`), server-side evaluation functions should migrate" -" from `return loss, accuracy` to `return loss, {\"accuracy\": accuracy}`." +#: ../../source/ref-changelog.md:67 +msgid "`flwr/supernode` comes with a new Alpine Docker image." msgstr "" -#: ../../source/ref-changelog.md:915 +#: ../../source/ref-changelog.md:68 msgid "" -"Flower 0.15-style return types are deprecated (but still supported), " -"compatibility will be removed in a future release." +"`flwr/clientapp` is a new image to be used with the `--isolation=process`" +" option. In this mode, SuperNode and `ClientApp` run in two different " +"Docker containers. `flwr/supernode` (preferably the Alpine version) runs " +"the long-running SuperNode with `--isolation=process`. `flwr/clientapp` " +"runs the `ClientApp`. This is the recommended way to deploy Flower in " +"enterprise settings." msgstr "" -#: ../../source/ref-changelog.md:917 +#: ../../source/ref-changelog.md:69 msgid "" -"**Migration warnings for deprecated functionality** " -"([#690](https://github.com/adap/flower/pull/690))" +"New all-in-one Docker Compose enables you to easily start a full Flower " +"Deployment Engine on a single machine." msgstr "" -#: ../../source/ref-changelog.md:919 +#: ../../source/ref-changelog.md:70 msgid "" -"Earlier versions of Flower were often migrated to new APIs, while " -"maintaining compatibility with legacy APIs. This release introduces " -"detailed warning messages if usage of deprecated APIs is detected. The " -"new warning messages often provide details on how to migrate to more " -"recent APIs, thus easing the transition from one release to another." +"Completely new Docker documentation: " +"https://flower.ai/docs/framework/docker/index.html" msgstr "" -#: ../../source/ref-changelog.md:921 +#: ../../source/ref-changelog.md:72 msgid "" -"Improved docs and docstrings " -"([#691](https://github.com/adap/flower/pull/691) " -"[#692](https://github.com/adap/flower/pull/692) " -"[#713](https://github.com/adap/flower/pull/713))" +"**Improve SuperNode authentication** " +"([#4043](https://github.com/adap/flower/pull/4043), " +"[#4047](https://github.com/adap/flower/pull/4047), " +"[#4074](https://github.com/adap/flower/pull/4074))" msgstr "" -#: ../../source/ref-changelog.md:923 -msgid "MXNet example and documentation" +#: ../../source/ref-changelog.md:74 +msgid "" +"SuperNode auth has been improved in several ways, including improved " +"logging, improved testing, and improved error handling." msgstr "" -#: ../../source/ref-changelog.md:925 +#: ../../source/ref-changelog.md:76 msgid "" -"FedBN implementation in example PyTorch: From Centralized To Federated " -"([#696](https://github.com/adap/flower/pull/696) " -"[#702](https://github.com/adap/flower/pull/702) " -"[#705](https://github.com/adap/flower/pull/705))" +"**Update** `flwr new` **templates** " +"([#3933](https://github.com/adap/flower/pull/3933), " +"[#3894](https://github.com/adap/flower/pull/3894), " +"[#3930](https://github.com/adap/flower/pull/3930), " +"[#3931](https://github.com/adap/flower/pull/3931), " +"[#3997](https://github.com/adap/flower/pull/3997), " +"[#3979](https://github.com/adap/flower/pull/3979), " +"[#3965](https://github.com/adap/flower/pull/3965), " +"[#4013](https://github.com/adap/flower/pull/4013), " +"[#4064](https://github.com/adap/flower/pull/4064))" msgstr "" -#: ../../source/ref-changelog.md:929 +#: ../../source/ref-changelog.md:78 msgid "" -"**Serialization-agnostic server** " -"([#721](https://github.com/adap/flower/pull/721))" +"All `flwr new` templates have been updated to show the latest recommended" +" use of Flower APIs." msgstr "" -#: ../../source/ref-changelog.md:931 +#: ../../source/ref-changelog.md:80 msgid "" -"The Flower server is now fully serialization-agnostic. Prior usage of " -"class `Weights` (which represents parameters as deserialized NumPy " -"ndarrays) was replaced by class `Parameters` (e.g., in `Strategy`). " -"`Parameters` objects are fully serialization-agnostic and represents " -"parameters as byte arrays, the `tensor_type` attributes indicates how " -"these byte arrays should be interpreted (e.g., for " -"serialization/deserialization)." +"**Improve Simulation Engine** " +"([#4095](https://github.com/adap/flower/pull/4095), " +"[#3913](https://github.com/adap/flower/pull/3913), " +"[#4059](https://github.com/adap/flower/pull/4059), " +"[#3954](https://github.com/adap/flower/pull/3954), " +"[#4071](https://github.com/adap/flower/pull/4071), " +"[#3985](https://github.com/adap/flower/pull/3985), " +"[#3988](https://github.com/adap/flower/pull/3988))" msgstr "" -#: ../../source/ref-changelog.md:933 +#: ../../source/ref-changelog.md:82 msgid "" -"Built-in strategies implement this approach by handling serialization and" -" deserialization to/from `Weights` internally. Custom/3rd-party Strategy " -"implementations should update to the slightly changed Strategy method " -"definitions. Strategy authors can consult PR " -"[#721](https://github.com/adap/flower/pull/721) to see how strategies can" -" easily migrate to the new format." +"The Flower Simulation Engine comes with several updates, including " +"improved run config support, verbose logging, simulation backend " +"configuration via `flwr run`, and more." msgstr "" -#: ../../source/ref-changelog.md:935 +#: ../../source/ref-changelog.md:84 msgid "" -"Deprecated `flwr.server.Server.evaluate`, use " -"`flwr.server.Server.evaluate_round` instead " -"([#717](https://github.com/adap/flower/pull/717))" +"**Improve** `RecordSet` " +"([#4052](https://github.com/adap/flower/pull/4052), " +"[#3218](https://github.com/adap/flower/pull/3218), " +"[#4016](https://github.com/adap/flower/pull/4016))" msgstr "" -#: ../../source/ref-changelog.md:937 -msgid "v0.15.0 (2021-03-12)" +#: ../../source/ref-changelog.md:86 +msgid "" +"`RecordSet` is the core object to exchange model parameters, " +"configuration values and metrics between `ClientApp` and `ServerApp`. " +"This release ships several smaller improvements to `RecordSet` and " +"related `*Record` types." msgstr "" -#: ../../source/ref-changelog.md:941 +#: ../../source/ref-changelog.md:88 msgid "" -"**Server-side parameter initialization** " -"([#658](https://github.com/adap/flower/pull/658))" +"**Update documentation** " +"([#3972](https://github.com/adap/flower/pull/3972), " +"[#3925](https://github.com/adap/flower/pull/3925), " +"[#4061](https://github.com/adap/flower/pull/4061), " +"[#3984](https://github.com/adap/flower/pull/3984), " +"[#3917](https://github.com/adap/flower/pull/3917), " +"[#3900](https://github.com/adap/flower/pull/3900), " +"[#4066](https://github.com/adap/flower/pull/4066), " +"[#3765](https://github.com/adap/flower/pull/3765), " +"[#4021](https://github.com/adap/flower/pull/4021), " +"[#3906](https://github.com/adap/flower/pull/3906), " +"[#4063](https://github.com/adap/flower/pull/4063), " +"[#4076](https://github.com/adap/flower/pull/4076), " +"[#3920](https://github.com/adap/flower/pull/3920), " +"[#3916](https://github.com/adap/flower/pull/3916))" +msgstr "" + +#: ../../source/ref-changelog.md:90 +msgid "" +"Many parts of the documentation, including the main tutorial, have been " +"migrated to show new Flower APIs and other new Flower features like the " +"improved Docker support." +msgstr "" + +#: ../../source/ref-changelog.md:92 +msgid "" +"**Migrate code example to use new Flower APIs** " +"([#3758](https://github.com/adap/flower/pull/3758), " +"[#3701](https://github.com/adap/flower/pull/3701), " +"[#3919](https://github.com/adap/flower/pull/3919), " +"[#3918](https://github.com/adap/flower/pull/3918), " +"[#3934](https://github.com/adap/flower/pull/3934), " +"[#3893](https://github.com/adap/flower/pull/3893), " +"[#3833](https://github.com/adap/flower/pull/3833), " +"[#3922](https://github.com/adap/flower/pull/3922), " +"[#3846](https://github.com/adap/flower/pull/3846), " +"[#3777](https://github.com/adap/flower/pull/3777), " +"[#3874](https://github.com/adap/flower/pull/3874), " +"[#3873](https://github.com/adap/flower/pull/3873), " +"[#3935](https://github.com/adap/flower/pull/3935), " +"[#3754](https://github.com/adap/flower/pull/3754), " +"[#3980](https://github.com/adap/flower/pull/3980), " +"[#4089](https://github.com/adap/flower/pull/4089), " +"[#4046](https://github.com/adap/flower/pull/4046), " +"[#3314](https://github.com/adap/flower/pull/3314), " +"[#3316](https://github.com/adap/flower/pull/3316), " +"[#3295](https://github.com/adap/flower/pull/3295), " +"[#3313](https://github.com/adap/flower/pull/3313))" +msgstr "" + +#: ../../source/ref-changelog.md:94 +msgid "Many code examples have been migrated to use new Flower APIs." +msgstr "" + +#: ../../source/ref-changelog.md:96 +msgid "" +"**Update Flower framework, framework internals and quality " +"infrastructure** ([#4018](https://github.com/adap/flower/pull/4018), " +"[#4053](https://github.com/adap/flower/pull/4053), " +"[#4098](https://github.com/adap/flower/pull/4098), " +"[#4067](https://github.com/adap/flower/pull/4067), " +"[#4105](https://github.com/adap/flower/pull/4105), " +"[#4048](https://github.com/adap/flower/pull/4048), " +"[#4107](https://github.com/adap/flower/pull/4107), " +"[#4069](https://github.com/adap/flower/pull/4069), " +"[#3915](https://github.com/adap/flower/pull/3915), " +"[#4101](https://github.com/adap/flower/pull/4101), " +"[#4108](https://github.com/adap/flower/pull/4108), " +"[#3914](https://github.com/adap/flower/pull/3914), " +"[#4068](https://github.com/adap/flower/pull/4068), " +"[#4041](https://github.com/adap/flower/pull/4041), " +"[#4040](https://github.com/adap/flower/pull/4040), " +"[#3986](https://github.com/adap/flower/pull/3986), " +"[#4026](https://github.com/adap/flower/pull/4026), " +"[#3961](https://github.com/adap/flower/pull/3961), " +"[#3975](https://github.com/adap/flower/pull/3975), " +"[#3983](https://github.com/adap/flower/pull/3983), " +"[#4091](https://github.com/adap/flower/pull/4091), " +"[#3982](https://github.com/adap/flower/pull/3982), " +"[#4079](https://github.com/adap/flower/pull/4079), " +"[#4073](https://github.com/adap/flower/pull/4073), " +"[#4060](https://github.com/adap/flower/pull/4060), " +"[#4106](https://github.com/adap/flower/pull/4106), " +"[#4080](https://github.com/adap/flower/pull/4080), " +"[#3974](https://github.com/adap/flower/pull/3974), " +"[#3996](https://github.com/adap/flower/pull/3996), " +"[#3991](https://github.com/adap/flower/pull/3991), " +"[#3981](https://github.com/adap/flower/pull/3981), " +"[#4093](https://github.com/adap/flower/pull/4093), " +"[#4100](https://github.com/adap/flower/pull/4100), " +"[#3939](https://github.com/adap/flower/pull/3939), " +"[#3955](https://github.com/adap/flower/pull/3955), " +"[#3940](https://github.com/adap/flower/pull/3940), " +"[#4038](https://github.com/adap/flower/pull/4038))" +msgstr "" + +#: ../../source/ref-changelog.md:98 ../../source/ref-changelog.md:205 +msgid "" +"As always, many parts of the Flower framework and quality infrastructure " +"were improved and updated." +msgstr "" + +#: ../../source/ref-changelog.md:100 ../../source/ref-changelog.md:217 +#: ../../source/ref-changelog.md:309 ../../source/ref-changelog.md:1292 +msgid "Deprecations" msgstr "" -#: ../../source/ref-changelog.md:943 +#: ../../source/ref-changelog.md:102 msgid "" -"Model parameters can now be initialized on the server-side. Server-side " -"parameter initialization works via a new `Strategy` method called " -"`initialize_parameters`." +"**Deprecate accessing `Context` via `Client.context`** " +"([#3797](https://github.com/adap/flower/pull/3797))" msgstr "" -#: ../../source/ref-changelog.md:945 +#: ../../source/ref-changelog.md:104 msgid "" -"Built-in strategies support a new constructor argument called " -"`initial_parameters` to set the initial parameters. Built-in strategies " -"will provide these initial parameters to the server on startup and then " -"delete them to free the memory afterwards." +"Now that both `client_fn` and `server_fn` receive a `Context` object, " +"accessing `Context` via `Client.context` is deprecated. `Client.context` " +"will be removed in a future release. If you need to access `Context` in " +"your `Client` implementation, pass it manually when creating the `Client`" +" instance in `client_fn`:" msgstr "" -#: ../../source/ref-changelog.md:964 +#: ../../source/ref-changelog.md:113 msgid "" -"If no initial parameters are provided to the strategy, the server will " -"continue to use the current behaviour (namely, it will ask one of the " -"connected clients for its parameters and use these as the initial global " -"parameters)." +"**Update CLIs to accept an app directory instead of** `ClientApp` **and**" +" `ServerApp` ([#3952](https://github.com/adap/flower/pull/3952), " +"[#4077](https://github.com/adap/flower/pull/4077), " +"[#3850](https://github.com/adap/flower/pull/3850))" msgstr "" -#: ../../source/ref-changelog.md:966 -msgid "Deprecations" +#: ../../source/ref-changelog.md:115 +msgid "" +"The CLI commands `flower-supernode` and `flower-server-app` now accept an" +" app directory as argument (instead of references to a `ClientApp` or " +"`ServerApp`). An app directory is any directory containing a " +"`pyproject.toml` file (with the appropriate Flower config fields set). " +"The easiest way to generate a compatible project structure is to use " +"`flwr new`." msgstr "" -#: ../../source/ref-changelog.md:968 +#: ../../source/ref-changelog.md:117 msgid "" -"Deprecate `flwr.server.strategy.DefaultStrategy` (migrate to " -"`flwr.server.strategy.FedAvg`, which is equivalent)" +"**Disable** `flower-client-app` **CLI command** " +"([#4022](https://github.com/adap/flower/pull/4022))" msgstr "" -#: ../../source/ref-changelog.md:970 -msgid "v0.14.0 (2021-02-18)" +#: ../../source/ref-changelog.md:119 +msgid "`flower-client-app` has been disabled. Use `flower-supernode` instead." msgstr "" -#: ../../source/ref-changelog.md:974 +#: ../../source/ref-changelog.md:121 msgid "" -"**Generalized** `Client.fit` **and** `Client.evaluate` **return values** " -"([#610](https://github.com/adap/flower/pull/610) " -"[#572](https://github.com/adap/flower/pull/572) " -"[#633](https://github.com/adap/flower/pull/633))" +"**Use spaces instead of commas for separating config args** " +"([#4000](https://github.com/adap/flower/pull/4000))" msgstr "" -#: ../../source/ref-changelog.md:976 +#: ../../source/ref-changelog.md:123 msgid "" -"Clients can now return an additional dictionary mapping `str` keys to " -"values of the following types: `bool`, `bytes`, `float`, `int`, `str`. " -"This means one can return almost arbitrary values from `fit`/`evaluate` " -"and make use of them on the server side!" +"When passing configs (run config, node config) to Flower, you now need to" +" separate key-value pairs using spaces instead of commas. For example:" msgstr "" -#: ../../source/ref-changelog.md:978 -msgid "" -"This improvement also allowed for more consistent return types between " -"`fit` and `evaluate`: `evaluate` should now return a tuple `(float, int, " -"dict)` representing the loss, number of examples, and a dictionary " -"holding arbitrary problem-specific values like accuracy." +#: ../../source/ref-changelog.md:129 +msgid "Previously, you could pass configs using commas, like this:" msgstr "" -#: ../../source/ref-changelog.md:980 +#: ../../source/ref-changelog.md:135 msgid "" -"In case you wondered: this feature is compatible with existing projects, " -"the additional dictionary return value is optional. New code should " -"however migrate to the new return types to be compatible with upcoming " -"Flower releases (`fit`: `List[np.ndarray], int, Dict[str, Scalar]`, " -"`evaluate`: `float, int, Dict[str, Scalar]`). See the example below for " -"details." +"**Remove** `flwr example` **CLI command** " +"([#4084](https://github.com/adap/flower/pull/4084))" msgstr "" -#: ../../source/ref-changelog.md:982 +#: ../../source/ref-changelog.md:137 msgid "" -"*Code example:* note the additional dictionary return values in both " -"`FlwrClient.fit` and `FlwrClient.evaluate`:" +"The experimental `flwr example` CLI command has been removed. Use `flwr " +"new` to generate a project and then run it using `flwr run`." msgstr "" -#: ../../source/ref-changelog.md:997 -msgid "" -"**Generalized** `config` **argument in** `Client.fit` **and** " -"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" +#: ../../source/ref-changelog.md:139 +msgid "v1.10.0 (2024-07-24)" msgstr "" -#: ../../source/ref-changelog.md:999 +#: ../../source/ref-changelog.md:145 msgid "" -"The `config` argument used to be of type `Dict[str, str]`, which means " -"that dictionary values were expected to be strings. The new release " -"generalizes this to enable values of the following types: `bool`, " -"`bytes`, `float`, `int`, `str`." +"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " +"Beutel`, `Daniel Nata Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, " +"`Ikko Eltociear Ashimine`, `Javier`, `Jiahao Tan`, `Mohammad Naseri`, " +"`Robert Steiner`, `Sebastian van der Voort`, `Taner Topal`, `Yan Gao` " msgstr "" -#: ../../source/ref-changelog.md:1001 +#: ../../source/ref-changelog.md:149 msgid "" -"This means one can now pass almost arbitrary values to `fit`/`evaluate` " -"using the `config` dictionary. Yay, no more `str(epochs)` on the server-" -"side and `int(config[\"epochs\"])` on the client side!" +"**Introduce** `flwr run` **(beta)** " +"([#3810](https://github.com/adap/flower/pull/3810), " +"[#3826](https://github.com/adap/flower/pull/3826), " +"[#3880](https://github.com/adap/flower/pull/3880), " +"[#3807](https://github.com/adap/flower/pull/3807), " +"[#3800](https://github.com/adap/flower/pull/3800), " +"[#3814](https://github.com/adap/flower/pull/3814), " +"[#3811](https://github.com/adap/flower/pull/3811), " +"[#3809](https://github.com/adap/flower/pull/3809), " +"[#3819](https://github.com/adap/flower/pull/3819))" msgstr "" -#: ../../source/ref-changelog.md:1003 +#: ../../source/ref-changelog.md:151 msgid "" -"*Code example:* note that the `config` dictionary now contains non-`str` " -"values in both `Client.fit` and `Client.evaluate`:" +"Flower 1.10 ships the first beta release of the new `flwr run` command. " +"`flwr run` can run different projects using `flwr run path/to/project`, " +"it enables you to easily switch between different federations using `flwr" +" run . federation` and it runs your Flower project using either local " +"simulation or the new (experimental) SuperExec service. This allows " +"Flower to scale federatated learning from fast local simulation to large-" +"scale production deployment, seamlessly. All projects generated with " +"`flwr new` are immediately runnable using `flwr run`. Give it a try: use " +"`flwr new` to generate a project and then run it using `flwr run`." msgstr "" -#: ../../source/ref-changelog.md:1020 -msgid "v0.13.0 (2021-01-08)" +#: ../../source/ref-changelog.md:153 +msgid "" +"**Introduce run config** " +"([#3751](https://github.com/adap/flower/pull/3751), " +"[#3750](https://github.com/adap/flower/pull/3750), " +"[#3845](https://github.com/adap/flower/pull/3845), " +"[#3824](https://github.com/adap/flower/pull/3824), " +"[#3746](https://github.com/adap/flower/pull/3746), " +"[#3728](https://github.com/adap/flower/pull/3728), " +"[#3730](https://github.com/adap/flower/pull/3730), " +"[#3725](https://github.com/adap/flower/pull/3725), " +"[#3729](https://github.com/adap/flower/pull/3729), " +"[#3580](https://github.com/adap/flower/pull/3580), " +"[#3578](https://github.com/adap/flower/pull/3578), " +"[#3576](https://github.com/adap/flower/pull/3576), " +"[#3798](https://github.com/adap/flower/pull/3798), " +"[#3732](https://github.com/adap/flower/pull/3732), " +"[#3815](https://github.com/adap/flower/pull/3815))" msgstr "" -#: ../../source/ref-changelog.md:1024 +#: ../../source/ref-changelog.md:155 msgid "" -"New example: PyTorch From Centralized To Federated " -"([#549](https://github.com/adap/flower/pull/549))" +"The new run config feature allows you to run your Flower project in " +"different configurations without having to change a single line of code. " +"You can now build a configurable `ServerApp` and `ClientApp` that read " +"configuration values at runtime. This enables you to specify config " +"values like `learning-rate=0.01` in `pyproject.toml` (under the " +"`[tool.flwr.app.config]` key). These config values can then be easily " +"overridden via `flwr run --run-config learning-rate=0.02`, and read from " +"`Context` using `lr = context.run_config[\"learning-rate\"]`. Create a " +"new project using `flwr new` to see run config in action." msgstr "" -#: ../../source/ref-changelog.md:1025 -msgid "Improved documentation" +#: ../../source/ref-changelog.md:157 +msgid "" +"**Generalize** `client_fn` **signature to** `client_fn(context: Context) " +"-> Client` ([#3779](https://github.com/adap/flower/pull/3779), " +"[#3697](https://github.com/adap/flower/pull/3697), " +"[#3694](https://github.com/adap/flower/pull/3694), " +"[#3696](https://github.com/adap/flower/pull/3696))" msgstr "" -#: ../../source/ref-changelog.md:1026 -msgid "New documentation theme ([#551](https://github.com/adap/flower/pull/551))" +#: ../../source/ref-changelog.md:159 +msgid "" +"The `client_fn` signature has been generalized to `client_fn(context: " +"Context) -> Client`. It now receives a `Context` object instead of the " +"(now depreacated) `cid: str`. `Context` allows accessing `node_id`, " +"`node_config` and `run_config`, among other things. This enables you to " +"build a configurable `ClientApp` that leverages the new run config " +"system." msgstr "" -#: ../../source/ref-changelog.md:1027 -msgid "New API reference ([#554](https://github.com/adap/flower/pull/554))" +#: ../../source/ref-changelog.md:161 +msgid "" +"The previous signature `client_fn(cid: str)` is now deprecated and " +"support for it will be removed in a future release. Use " +"`client_fn(context: Context) -> Client` everywhere." msgstr "" -#: ../../source/ref-changelog.md:1028 +#: ../../source/ref-changelog.md:163 msgid "" -"Updated examples documentation " -"([#549](https://github.com/adap/flower/pull/549))" +"**Introduce new** `server_fn(context)` " +"([#3773](https://github.com/adap/flower/pull/3773), " +"[#3796](https://github.com/adap/flower/pull/3796), " +"[#3771](https://github.com/adap/flower/pull/3771))" msgstr "" -#: ../../source/ref-changelog.md:1029 +#: ../../source/ref-changelog.md:165 msgid "" -"Removed obsolete documentation " -"([#548](https://github.com/adap/flower/pull/548))" +"In addition to the new `client_fn(context:Context)`, a new " +"`server_fn(context: Context) -> ServerAppComponents` can now be passed to" +" `ServerApp` (instead of passing, for example, `Strategy`, directly). " +"This enables you to leverage the full `Context` on the server-side to " +"build a configurable `ServerApp`." msgstr "" -#: ../../source/ref-changelog.md:1031 -msgid "Bugfix:" +#: ../../source/ref-changelog.md:167 +msgid "" +"**Relaunch all** `flwr new` **templates** " +"([#3877](https://github.com/adap/flower/pull/3877), " +"[#3821](https://github.com/adap/flower/pull/3821), " +"[#3587](https://github.com/adap/flower/pull/3587), " +"[#3795](https://github.com/adap/flower/pull/3795), " +"[#3875](https://github.com/adap/flower/pull/3875), " +"[#3859](https://github.com/adap/flower/pull/3859), " +"[#3760](https://github.com/adap/flower/pull/3760))" msgstr "" -#: ../../source/ref-changelog.md:1033 +#: ../../source/ref-changelog.md:169 msgid "" -"`Server.fit` does not disconnect clients when finished, disconnecting the" -" clients is now handled in `flwr.server.start_server` " -"([#553](https://github.com/adap/flower/pull/553) " -"[#540](https://github.com/adap/flower/issues/540))." +"All `flwr new` templates have been significantly updated to showcase new " +"Flower features and best practices. This includes using `flwr run` and " +"the new run config feature. You can now easily create a new project using" +" `flwr new` and, after following the instructions to install it, `flwr " +"run` it." msgstr "" -#: ../../source/ref-changelog.md:1035 -msgid "v0.12.0 (2020-12-07)" +#: ../../source/ref-changelog.md:171 +msgid "" +"**Introduce** `flower-supernode` **(preview)** " +"([#3353](https://github.com/adap/flower/pull/3353))" msgstr "" -#: ../../source/ref-changelog.md:1037 ../../source/ref-changelog.md:1053 -msgid "Important changes:" +#: ../../source/ref-changelog.md:173 +msgid "" +"The new `flower-supernode` CLI is here to replace `flower-client-app`. " +"`flower-supernode` brings full multi-app support to the Flower client-" +"side. It also allows to pass `--node-config` to the SuperNode, which is " +"accessible in your `ClientApp` via `Context` (using the new " +"`client_fn(context: Context)` signature)." msgstr "" -#: ../../source/ref-changelog.md:1039 +#: ../../source/ref-changelog.md:175 msgid "" -"Added an example for embedded devices " -"([#507](https://github.com/adap/flower/pull/507))" +"**Introduce node config** " +"([#3782](https://github.com/adap/flower/pull/3782), " +"[#3780](https://github.com/adap/flower/pull/3780), " +"[#3695](https://github.com/adap/flower/pull/3695), " +"[#3886](https://github.com/adap/flower/pull/3886))" msgstr "" -#: ../../source/ref-changelog.md:1040 +#: ../../source/ref-changelog.md:177 msgid "" -"Added a new NumPyClient (in addition to the existing KerasClient) " -"([#504](https://github.com/adap/flower/pull/504) " -"[#508](https://github.com/adap/flower/pull/508))" +"A new node config feature allows you to pass a static configuration to " +"the SuperNode. This configuration is read-only and available to every " +"`ClientApp` running on that SuperNode. A `ClientApp` can access the node " +"config via `Context` (`context.node_config`)." msgstr "" -#: ../../source/ref-changelog.md:1041 +#: ../../source/ref-changelog.md:179 msgid "" -"Deprecated `flwr_example` package and started to migrate examples into " -"the top-level `examples` directory " -"([#494](https://github.com/adap/flower/pull/494) " -"[#512](https://github.com/adap/flower/pull/512))" +"**Introduce SuperExec (experimental)** " +"([#3605](https://github.com/adap/flower/pull/3605), " +"[#3723](https://github.com/adap/flower/pull/3723), " +"[#3731](https://github.com/adap/flower/pull/3731), " +"[#3589](https://github.com/adap/flower/pull/3589), " +"[#3604](https://github.com/adap/flower/pull/3604), " +"[#3622](https://github.com/adap/flower/pull/3622), " +"[#3838](https://github.com/adap/flower/pull/3838), " +"[#3720](https://github.com/adap/flower/pull/3720), " +"[#3606](https://github.com/adap/flower/pull/3606), " +"[#3602](https://github.com/adap/flower/pull/3602), " +"[#3603](https://github.com/adap/flower/pull/3603), " +"[#3555](https://github.com/adap/flower/pull/3555), " +"[#3808](https://github.com/adap/flower/pull/3808), " +"[#3724](https://github.com/adap/flower/pull/3724), " +"[#3658](https://github.com/adap/flower/pull/3658), " +"[#3629](https://github.com/adap/flower/pull/3629))" msgstr "" -#: ../../source/ref-changelog.md:1043 -msgid "v0.11.0 (2020-11-30)" +#: ../../source/ref-changelog.md:181 +msgid "" +"This is the first experimental release of Flower SuperExec, a new service" +" that executes your runs. It's not ready for production deployment just " +"yet, but don't hesitate to give it a try if you're interested." msgstr "" -#: ../../source/ref-changelog.md:1045 -msgid "Incompatible changes:" +#: ../../source/ref-changelog.md:183 +msgid "" +"**Add new federated learning with tabular data example** " +"([#3568](https://github.com/adap/flower/pull/3568))" msgstr "" -#: ../../source/ref-changelog.md:1047 +#: ../../source/ref-changelog.md:185 msgid "" -"Renamed strategy methods " -"([#486](https://github.com/adap/flower/pull/486)) to unify the naming of " -"Flower's public APIs. Other public methods/functions (e.g., every method " -"in `Client`, but also `Strategy.evaluate`) do not use the `on_` prefix, " -"which is why we're removing it from the four methods in Strategy. To " -"migrate rename the following `Strategy` methods accordingly:" +"A new code example exemplifies a federated learning setup using the " +"Flower framework on the Adult Census Income tabular dataset." msgstr "" -#: ../../source/ref-changelog.md:1048 -msgid "`on_configure_evaluate` => `configure_evaluate`" +#: ../../source/ref-changelog.md:187 +msgid "" +"**Create generic adapter layer (preview)** " +"([#3538](https://github.com/adap/flower/pull/3538), " +"[#3536](https://github.com/adap/flower/pull/3536), " +"[#3540](https://github.com/adap/flower/pull/3540))" msgstr "" -#: ../../source/ref-changelog.md:1049 -msgid "`on_aggregate_evaluate` => `aggregate_evaluate`" +#: ../../source/ref-changelog.md:189 +msgid "" +"A new generic gRPC adapter layer allows 3rd-party frameworks to integrate" +" with Flower in a transparent way. This makes Flower more modular and " +"allows for integration into other federated learning solutions and " +"platforms." msgstr "" -#: ../../source/ref-changelog.md:1050 -msgid "`on_configure_fit` => `configure_fit`" +#: ../../source/ref-changelog.md:191 +msgid "" +"**Refactor Flower Simulation Engine** " +"([#3581](https://github.com/adap/flower/pull/3581), " +"[#3471](https://github.com/adap/flower/pull/3471), " +"[#3804](https://github.com/adap/flower/pull/3804), " +"[#3468](https://github.com/adap/flower/pull/3468), " +"[#3839](https://github.com/adap/flower/pull/3839), " +"[#3806](https://github.com/adap/flower/pull/3806), " +"[#3861](https://github.com/adap/flower/pull/3861), " +"[#3543](https://github.com/adap/flower/pull/3543), " +"[#3472](https://github.com/adap/flower/pull/3472), " +"[#3829](https://github.com/adap/flower/pull/3829), " +"[#3469](https://github.com/adap/flower/pull/3469))" msgstr "" -#: ../../source/ref-changelog.md:1051 -msgid "`on_aggregate_fit` => `aggregate_fit`" +#: ../../source/ref-changelog.md:193 +msgid "" +"The Simulation Engine was significantly refactored. This results in " +"faster and more stable simulations. It is also the foundation for " +"upcoming changes that aim to provide the next level of performance and " +"configurability in federated learning simulations." msgstr "" -#: ../../source/ref-changelog.md:1055 +#: ../../source/ref-changelog.md:195 msgid "" -"Deprecated `DefaultStrategy` " -"([#479](https://github.com/adap/flower/pull/479)). To migrate use " -"`FedAvg` instead." +"**Optimize Docker containers** " +"([#3591](https://github.com/adap/flower/pull/3591))" msgstr "" -#: ../../source/ref-changelog.md:1056 +#: ../../source/ref-changelog.md:197 msgid "" -"Simplified examples and baselines " -"([#484](https://github.com/adap/flower/pull/484))." +"Flower Docker containers were optimized and updated to use that latest " +"Flower framework features." msgstr "" -#: ../../source/ref-changelog.md:1057 +#: ../../source/ref-changelog.md:199 msgid "" -"Removed presently unused `on_conclude_round` from strategy interface " -"([#483](https://github.com/adap/flower/pull/483))." +"**Improve logging** ([#3776](https://github.com/adap/flower/pull/3776), " +"[#3789](https://github.com/adap/flower/pull/3789))" msgstr "" -#: ../../source/ref-changelog.md:1058 +#: ../../source/ref-changelog.md:201 msgid "" -"Set minimal Python version to 3.6.1 instead of 3.6.9 " -"([#471](https://github.com/adap/flower/pull/471))." +"Improved logging aims to be more concise and helpful to show you the " +"details you actually care about." msgstr "" -#: ../../source/ref-changelog.md:1059 +#: ../../source/ref-changelog.md:203 msgid "" -"Improved `Strategy` docstrings " -"([#470](https://github.com/adap/flower/pull/470))." +"**Refactor framework internals** " +"([#3621](https://github.com/adap/flower/pull/3621), " +"[#3792](https://github.com/adap/flower/pull/3792), " +"[#3772](https://github.com/adap/flower/pull/3772), " +"[#3805](https://github.com/adap/flower/pull/3805), " +"[#3583](https://github.com/adap/flower/pull/3583), " +"[#3825](https://github.com/adap/flower/pull/3825), " +"[#3597](https://github.com/adap/flower/pull/3597), " +"[#3802](https://github.com/adap/flower/pull/3802), " +"[#3569](https://github.com/adap/flower/pull/3569))" msgstr "" -#: ../../source/ref-example-projects.rst:2 -msgid "Example projects" +#: ../../source/ref-changelog.md:207 +msgid "Documentation improvements" msgstr "" -#: ../../source/ref-example-projects.rst:4 +#: ../../source/ref-changelog.md:209 msgid "" -"Flower comes with a number of usage examples. The examples demonstrate " -"how Flower can be used to federate different kinds of existing machine " -"learning pipelines, usually leveraging popular machine learning " -"frameworks such as `PyTorch `_ or `TensorFlow " -"`_." +"**Add 🇰🇷 Korean translations** " +"([#3680](https://github.com/adap/flower/pull/3680))" msgstr "" -#: ../../source/ref-example-projects.rst:10 +#: ../../source/ref-changelog.md:211 msgid "" -"The following examples are available as standalone projects. Quickstart " -"TensorFlow/Keras ---------------------------" +"**Update translations** " +"([#3586](https://github.com/adap/flower/pull/3586), " +"[#3679](https://github.com/adap/flower/pull/3679), " +"[#3570](https://github.com/adap/flower/pull/3570), " +"[#3681](https://github.com/adap/flower/pull/3681), " +"[#3617](https://github.com/adap/flower/pull/3617), " +"[#3674](https://github.com/adap/flower/pull/3674), " +"[#3671](https://github.com/adap/flower/pull/3671), " +"[#3572](https://github.com/adap/flower/pull/3572), " +"[#3631](https://github.com/adap/flower/pull/3631))" msgstr "" -#: ../../source/ref-example-projects.rst:14 +#: ../../source/ref-changelog.md:213 msgid "" -"The TensorFlow/Keras quickstart example shows CIFAR-10 image " -"classification with MobileNetV2:" +"**Update documentation** " +"([#3864](https://github.com/adap/flower/pull/3864), " +"[#3688](https://github.com/adap/flower/pull/3688), " +"[#3562](https://github.com/adap/flower/pull/3562), " +"[#3641](https://github.com/adap/flower/pull/3641), " +"[#3384](https://github.com/adap/flower/pull/3384), " +"[#3634](https://github.com/adap/flower/pull/3634), " +"[#3823](https://github.com/adap/flower/pull/3823), " +"[#3793](https://github.com/adap/flower/pull/3793), " +"[#3707](https://github.com/adap/flower/pull/3707))" msgstr "" -#: ../../source/ref-example-projects.rst:17 +#: ../../source/ref-changelog.md:215 msgid "" -"`Quickstart TensorFlow (Code) " -"`_" +"Updated documentation includes new install instructions for different " +"shells, a new Flower Code Examples documentation landing page, new `flwr`" +" CLI docs and an updated federated XGBoost code example." msgstr "" -#: ../../source/ref-example-projects.rst:18 -msgid ":doc:`Quickstart TensorFlow (Tutorial) `" +#: ../../source/ref-changelog.md:219 +msgid "**Deprecate** `client_fn(cid: str)`" msgstr "" -#: ../../source/ref-example-projects.rst:19 +#: ../../source/ref-changelog.md:221 msgid "" -"`Quickstart TensorFlow (Blog Post) `_" -msgstr "" - -#: ../../source/ref-example-projects.rst:23 -#: ../../source/tutorial-quickstart-pytorch.rst:5 -msgid "Quickstart PyTorch" +"`client_fn` used to have a signature `client_fn(cid: str) -> Client`. " +"This signature is now deprecated. Use the new signature " +"`client_fn(context: Context) -> Client` instead. The new argument " +"`context` allows accessing `node_id`, `node_config`, `run_config` and " +"other `Context` features. When running using the simulation engine (or " +"using `flower-supernode` with a custom `--node-config partition-id=...`)," +" `context.node_config[\"partition-id\"]` will return an `int` partition " +"ID that can be used with Flower Datasets to load a different partition of" +" the dataset on each simulated or deployed SuperNode." msgstr "" -#: ../../source/ref-example-projects.rst:25 +#: ../../source/ref-changelog.md:223 msgid "" -"The PyTorch quickstart example shows CIFAR-10 image classification with a" -" simple Convolutional Neural Network:" +"**Deprecate passing** `Server/ServerConfig/Strategy/ClientManager` **to**" +" `ServerApp` **directly**" msgstr "" -#: ../../source/ref-example-projects.rst:28 +#: ../../source/ref-changelog.md:225 msgid "" -"`Quickstart PyTorch (Code) " -"`_" -msgstr "" - -#: ../../source/ref-example-projects.rst:29 -msgid ":doc:`Quickstart PyTorch (Tutorial) `" +"Creating `ServerApp` using `ServerApp(config=config, strategy=strategy)` " +"is now deprecated. Instead of passing " +"`Server/ServerConfig/Strategy/ClientManager` to `ServerApp` directly, " +"pass them wrapped in a `server_fn(context: Context) -> " +"ServerAppComponents` function, like this: " +"`ServerApp(server_fn=server_fn)`. `ServerAppComponents` can hold " +"references to `Server/ServerConfig/Strategy/ClientManager`. In addition " +"to that, `server_fn` allows you to access `Context` (for example, to read" +" the `run_config`)." msgstr "" -#: ../../source/ref-example-projects.rst:33 -msgid "PyTorch: From Centralized To Federated" +#: ../../source/ref-changelog.md:229 +msgid "" +"**Remove support for `client_ids` in `start_simulation`** " +"([#3699](https://github.com/adap/flower/pull/3699))" msgstr "" -#: ../../source/ref-example-projects.rst:35 +#: ../../source/ref-changelog.md:231 msgid "" -"This example shows how a regular PyTorch project can be federated using " -"Flower:" +"The (rarely used) feature that allowed passing custom `client_ids` to the" +" `start_simulation` function was removed. This removal is part of a " +"bigger effort to refactor the simulation engine and unify how the Flower " +"internals work in simulation and deployment." msgstr "" -#: ../../source/ref-example-projects.rst:37 +#: ../../source/ref-changelog.md:233 msgid "" -"`PyTorch: From Centralized To Federated (Code) " -"`_" +"**Remove `flower-driver-api` and `flower-fleet-api`** " +"([#3418](https://github.com/adap/flower/pull/3418))" msgstr "" -#: ../../source/ref-example-projects.rst:38 +#: ../../source/ref-changelog.md:235 msgid "" -":doc:`PyTorch: From Centralized To Federated (Tutorial) `" +"The two deprecated CLI commands `flower-driver-api` and `flower-fleet-" +"api` were removed in an effort to streamline the SuperLink developer " +"experience. Use `flower-superlink` instead." msgstr "" -#: ../../source/ref-example-projects.rst:42 -msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" +#: ../../source/ref-changelog.md:237 +msgid "v1.9.0 (2024-06-10)" msgstr "" -#: ../../source/ref-example-projects.rst:44 +#: ../../source/ref-changelog.md:243 msgid "" -"This example shows how Flower can be used to build a federated learning " -"system that run across Raspberry Pi and Nvidia Jetson:" +"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " +"Beutel`, `Daniel Nata Nugraha`, `Heng Pan`, `Javier`, `Mahdi Beitollahi`," +" `Robert Steiner`, `Taner Topal`, `Yan Gao`, `bapic`, `mohammadnaseri` " msgstr "" -#: ../../source/ref-example-projects.rst:46 +#: ../../source/ref-changelog.md:247 msgid "" -"`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " -"`_" +"**Introduce built-in authentication (preview)** " +"([#2946](https://github.com/adap/flower/pull/2946), " +"[#3388](https://github.com/adap/flower/pull/3388), " +"[#2948](https://github.com/adap/flower/pull/2948), " +"[#2917](https://github.com/adap/flower/pull/2917), " +"[#3386](https://github.com/adap/flower/pull/3386), " +"[#3308](https://github.com/adap/flower/pull/3308), " +"[#3001](https://github.com/adap/flower/pull/3001), " +"[#3409](https://github.com/adap/flower/pull/3409), " +"[#2999](https://github.com/adap/flower/pull/2999), " +"[#2979](https://github.com/adap/flower/pull/2979), " +"[#3389](https://github.com/adap/flower/pull/3389), " +"[#3503](https://github.com/adap/flower/pull/3503), " +"[#3366](https://github.com/adap/flower/pull/3366), " +"[#3357](https://github.com/adap/flower/pull/3357))" msgstr "" -#: ../../source/ref-example-projects.rst:47 +#: ../../source/ref-changelog.md:249 msgid "" -"`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " -"`_" +"Flower 1.9 introduces the first build-in version of client node " +"authentication. In previous releases, users often wrote glue code to " +"connect Flower to external authentication systems. With this release, the" +" SuperLink can authenticate SuperNodes using a built-in authentication " +"system. A new [how-to guide](https://flower.ai/docs/framework/how-to-" +"authenticate-supernodes.html) and a new [code " +"example](https://github.com/adap/flower/tree/main/examples/flower-" +"authentication) help you to get started." msgstr "" -#: ../../source/ref-faq.rst:4 +#: ../../source/ref-changelog.md:251 msgid "" -"This page collects answers to commonly asked questions about Federated " -"Learning with Flower." -msgstr "" - -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Can Flower run on Jupyter Notebooks / Google Colab?" +"This is the first preview release of the Flower-native authentication " +"system. Many additional features are on the roadmap for upcoming Flower " +"releases - stay tuned." msgstr "" -#: ../../source/ref-faq.rst:8 +#: ../../source/ref-changelog.md:253 msgid "" -"Yes, it can! Flower even comes with a few under-the-hood optimizations to" -" make it work even better on Colab. Here's a quickstart example:" +"**Introduce end-to-end Docker support** " +"([#3483](https://github.com/adap/flower/pull/3483), " +"[#3266](https://github.com/adap/flower/pull/3266), " +"[#3390](https://github.com/adap/flower/pull/3390), " +"[#3283](https://github.com/adap/flower/pull/3283), " +"[#3285](https://github.com/adap/flower/pull/3285), " +"[#3391](https://github.com/adap/flower/pull/3391), " +"[#3403](https://github.com/adap/flower/pull/3403), " +"[#3458](https://github.com/adap/flower/pull/3458), " +"[#3533](https://github.com/adap/flower/pull/3533), " +"[#3453](https://github.com/adap/flower/pull/3453), " +"[#3486](https://github.com/adap/flower/pull/3486), " +"[#3290](https://github.com/adap/flower/pull/3290))" msgstr "" -#: ../../source/ref-faq.rst:10 +#: ../../source/ref-changelog.md:255 msgid "" -"`Flower simulation PyTorch " -"`_" +"Full Flower Next Docker support is here! With the release of Flower 1.9, " +"Flower provides stable Docker images for the Flower SuperLink, the Flower" +" SuperNode, and the Flower `ServerApp`. This set of images enables you to" +" run all Flower components in Docker. Check out the new [how-to " +"guide](https://flower.ai/docs/framework/how-to-run-flower-using-" +"docker.html) to get stated." msgstr "" -#: ../../source/ref-faq.rst:11 +#: ../../source/ref-changelog.md:257 msgid "" -"`Flower simulation TensorFlow/Keras " -"`_" -msgstr "" - -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` How can I run Federated Learning on a Raspberry Pi?" +"**Re-architect Flower Next simulation engine** " +"([#3307](https://github.com/adap/flower/pull/3307), " +"[#3355](https://github.com/adap/flower/pull/3355), " +"[#3272](https://github.com/adap/flower/pull/3272), " +"[#3273](https://github.com/adap/flower/pull/3273), " +"[#3417](https://github.com/adap/flower/pull/3417), " +"[#3281](https://github.com/adap/flower/pull/3281), " +"[#3343](https://github.com/adap/flower/pull/3343), " +"[#3326](https://github.com/adap/flower/pull/3326))" msgstr "" -#: ../../source/ref-faq.rst:15 +#: ../../source/ref-changelog.md:259 msgid "" -"Find the `blog post about federated learning on embedded device here " -"`_" -" and the corresponding `GitHub code example " -"`_." -msgstr "" - -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Does Flower support federated learning on Android devices?" +"Flower Next simulations now use a new in-memory `Driver` that improves " +"the reliability of simulations, especially in notebook environments. This" +" is a significant step towards a complete overhaul of the Flower Next " +"simulation architecture." msgstr "" -#: ../../source/ref-faq.rst:19 +#: ../../source/ref-changelog.md:261 msgid "" -"Yes, it does. Please take a look at our `blog post " -"`_ or check out the code examples:" +"**Upgrade simulation engine** " +"([#3354](https://github.com/adap/flower/pull/3354), " +"[#3378](https://github.com/adap/flower/pull/3378), " +"[#3262](https://github.com/adap/flower/pull/3262), " +"[#3435](https://github.com/adap/flower/pull/3435), " +"[#3501](https://github.com/adap/flower/pull/3501), " +"[#3482](https://github.com/adap/flower/pull/3482), " +"[#3494](https://github.com/adap/flower/pull/3494))" msgstr "" -#: ../../source/ref-faq.rst:21 +#: ../../source/ref-changelog.md:263 msgid "" -"`Android Kotlin example `_" -msgstr "" - -#: ../../source/ref-faq.rst:22 -msgid "`Android Java example `_" +"The Flower Next simulation engine comes with improved and configurable " +"logging. The Ray-based simulation backend in Flower 1.9 was updated to " +"use Ray 2.10." msgstr "" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Can I combine federated learning with blockchain?" -msgstr "" - -#: ../../source/ref-faq.rst:26 +#: ../../source/ref-changelog.md:265 msgid "" -"Yes, of course. A list of available examples using Flower within a " -"blockchain environment is available here:" +"**Introduce FedPFT baseline** " +"([#3268](https://github.com/adap/flower/pull/3268))" msgstr "" -#: ../../source/ref-faq.rst:28 +#: ../../source/ref-changelog.md:267 msgid "" -"`Flower meets Nevermined GitHub Repository `_." +"FedPFT allows you to perform one-shot Federated Learning by leveraging " +"widely available foundational models, dramatically reducing communication" +" costs while delivering high performing models. This is work led by Mahdi" +" Beitollahi from Huawei Noah's Ark Lab (Montreal, Canada). Read all the " +"details in their paper: \"Parametric Feature Transfer: One-shot Federated" +" Learning with Foundation Models\" " +"([arxiv](https://arxiv.org/abs/2402.01862))" msgstr "" -#: ../../source/ref-faq.rst:29 +#: ../../source/ref-changelog.md:269 msgid "" -"`Flower meets Nevermined YouTube video " -"`_." +"**Launch additional** `flwr new` **templates for Apple MLX, Hugging Face " +"Transformers, scikit-learn and TensorFlow** " +"([#3291](https://github.com/adap/flower/pull/3291), " +"[#3139](https://github.com/adap/flower/pull/3139), " +"[#3284](https://github.com/adap/flower/pull/3284), " +"[#3251](https://github.com/adap/flower/pull/3251), " +"[#3376](https://github.com/adap/flower/pull/3376), " +"[#3287](https://github.com/adap/flower/pull/3287))" msgstr "" -#: ../../source/ref-faq.rst:30 +#: ../../source/ref-changelog.md:271 msgid "" -"`Flower meets KOSMoS `_." +"The `flwr` CLI's `flwr new` command is starting to become everone's " +"favorite way of creating new Flower projects. This release introduces " +"additional `flwr new` templates for Apple MLX, Hugging Face Transformers," +" scikit-learn and TensorFlow. In addition to that, existing templates " +"also received updates." msgstr "" -#: ../../source/ref-faq.rst:31 +#: ../../source/ref-changelog.md:273 msgid "" -"`Flower meets Talan blog post `_ ." +"**Refine** `RecordSet` **API** " +"([#3209](https://github.com/adap/flower/pull/3209), " +"[#3331](https://github.com/adap/flower/pull/3331), " +"[#3334](https://github.com/adap/flower/pull/3334), " +"[#3335](https://github.com/adap/flower/pull/3335), " +"[#3375](https://github.com/adap/flower/pull/3375), " +"[#3368](https://github.com/adap/flower/pull/3368))" msgstr "" -#: ../../source/ref-faq.rst:32 +#: ../../source/ref-changelog.md:275 msgid "" -"`Flower meets Talan GitHub Repository " -"`_ ." -msgstr "" - -#: ../../source/ref-telemetry.md:1 -msgid "Telemetry" +"`RecordSet` is part of the Flower Next low-level API preview release. In " +"Flower 1.9, `RecordSet` received a number of usability improvements that " +"make it easier to build `RecordSet`-based `ServerApp`s and `ClientApp`s." msgstr "" -#: ../../source/ref-telemetry.md:3 +#: ../../source/ref-changelog.md:277 msgid "" -"The Flower open-source project collects **anonymous** usage metrics to " -"make well-informed decisions to improve Flower. Doing this enables the " -"Flower team to understand how Flower is used and what challenges users " -"might face." +"**Beautify logging** ([#3379](https://github.com/adap/flower/pull/3379), " +"[#3430](https://github.com/adap/flower/pull/3430), " +"[#3461](https://github.com/adap/flower/pull/3461), " +"[#3360](https://github.com/adap/flower/pull/3360), " +"[#3433](https://github.com/adap/flower/pull/3433))" msgstr "" -#: ../../source/ref-telemetry.md:5 +#: ../../source/ref-changelog.md:279 msgid "" -"**Flower is a friendly framework for collaborative AI and data science.**" -" Staying true to this statement, Flower makes it easy to disable " -"telemetry for users that do not want to share anonymous usage metrics." -msgstr "" - -#: ../../source/ref-telemetry.md:7 -msgid "Principles" -msgstr "" - -#: ../../source/ref-telemetry.md:9 -msgid "We follow strong principles guarding anonymous usage metrics collection:" +"Logs received a substantial update. Not only are logs now much nicer to " +"look at, but they are also more configurable." msgstr "" -#: ../../source/ref-telemetry.md:11 +#: ../../source/ref-changelog.md:281 msgid "" -"**Optional:** You will always be able to disable telemetry; read on to " -"learn “[How to opt-out](#how-to-opt-out)”." +"**Improve reliability** " +"([#3564](https://github.com/adap/flower/pull/3564), " +"[#3561](https://github.com/adap/flower/pull/3561), " +"[#3566](https://github.com/adap/flower/pull/3566), " +"[#3462](https://github.com/adap/flower/pull/3462), " +"[#3225](https://github.com/adap/flower/pull/3225), " +"[#3514](https://github.com/adap/flower/pull/3514), " +"[#3535](https://github.com/adap/flower/pull/3535), " +"[#3372](https://github.com/adap/flower/pull/3372))" msgstr "" -#: ../../source/ref-telemetry.md:12 +#: ../../source/ref-changelog.md:283 msgid "" -"**Anonymous:** The reported usage metrics are anonymous and do not " -"contain any personally identifiable information (PII). See “[Collected " -"metrics](#collected-metrics)” to understand what metrics are being " -"reported." +"Flower 1.9 includes reliability improvements across many parts of the " +"system. One example is a much improved SuperNode shutdown procedure." msgstr "" -#: ../../source/ref-telemetry.md:13 +#: ../../source/ref-changelog.md:285 msgid "" -"**Transparent:** You can easily inspect what anonymous metrics are being " -"reported; see the section “[How to inspect what is being reported](#how-" -"to-inspect-what-is-being-reported)”" +"**Update Swift and C++ SDKs** " +"([#3321](https://github.com/adap/flower/pull/3321), " +"[#2763](https://github.com/adap/flower/pull/2763))" msgstr "" -#: ../../source/ref-telemetry.md:14 +#: ../../source/ref-changelog.md:287 msgid "" -"**Open for feedback:** You can always reach out to us if you have " -"feedback; see the section “[How to contact us](#how-to-contact-us)” for " -"details." -msgstr "" - -#: ../../source/ref-telemetry.md:16 -msgid "How to opt-out" +"In the C++ SDK, communication-related code is now separate from main " +"client logic. A new abstract class `Communicator` has been introduced " +"alongside a gRPC implementation of it." msgstr "" -#: ../../source/ref-telemetry.md:18 +#: ../../source/ref-changelog.md:289 msgid "" -"When Flower starts, it will check for an environment variable called " -"`FLWR_TELEMETRY_ENABLED`. Telemetry can easily be disabled by setting " -"`FLWR_TELEMETRY_ENABLED=0`. Assuming you are starting a Flower server or " -"client, simply do so by prepending your command as in:" +"**Improve testing, tooling and CI/CD infrastructure** " +"([#3294](https://github.com/adap/flower/pull/3294), " +"[#3282](https://github.com/adap/flower/pull/3282), " +"[#3311](https://github.com/adap/flower/pull/3311), " +"[#2878](https://github.com/adap/flower/pull/2878), " +"[#3333](https://github.com/adap/flower/pull/3333), " +"[#3255](https://github.com/adap/flower/pull/3255), " +"[#3349](https://github.com/adap/flower/pull/3349), " +"[#3400](https://github.com/adap/flower/pull/3400), " +"[#3401](https://github.com/adap/flower/pull/3401), " +"[#3399](https://github.com/adap/flower/pull/3399), " +"[#3346](https://github.com/adap/flower/pull/3346), " +"[#3398](https://github.com/adap/flower/pull/3398), " +"[#3397](https://github.com/adap/flower/pull/3397), " +"[#3347](https://github.com/adap/flower/pull/3347), " +"[#3502](https://github.com/adap/flower/pull/3502), " +"[#3387](https://github.com/adap/flower/pull/3387), " +"[#3542](https://github.com/adap/flower/pull/3542), " +"[#3396](https://github.com/adap/flower/pull/3396), " +"[#3496](https://github.com/adap/flower/pull/3496), " +"[#3465](https://github.com/adap/flower/pull/3465), " +"[#3473](https://github.com/adap/flower/pull/3473), " +"[#3484](https://github.com/adap/flower/pull/3484), " +"[#3521](https://github.com/adap/flower/pull/3521), " +"[#3363](https://github.com/adap/flower/pull/3363), " +"[#3497](https://github.com/adap/flower/pull/3497), " +"[#3464](https://github.com/adap/flower/pull/3464), " +"[#3495](https://github.com/adap/flower/pull/3495), " +"[#3478](https://github.com/adap/flower/pull/3478), " +"[#3271](https://github.com/adap/flower/pull/3271))" msgstr "" -#: ../../source/ref-telemetry.md:24 +#: ../../source/ref-changelog.md:291 msgid "" -"Alternatively, you can export `FLWR_TELEMETRY_ENABLED=0` in, for example," -" `.bashrc` (or whatever configuration file applies to your environment) " -"to disable Flower telemetry permanently." -msgstr "" - -#: ../../source/ref-telemetry.md:26 -msgid "Collected metrics" -msgstr "" - -#: ../../source/ref-telemetry.md:28 -msgid "Flower telemetry collects the following metrics:" +"As always, the Flower tooling, testing, and CI/CD infrastructure has " +"received many updates." msgstr "" -#: ../../source/ref-telemetry.md:30 +#: ../../source/ref-changelog.md:293 msgid "" -"**Flower version.** Understand which versions of Flower are currently " -"being used. This helps us to decide whether we should invest effort into " -"releasing a patch version for an older version of Flower or instead use " -"the bandwidth to build new features." +"**Improve documentation** " +"([#3530](https://github.com/adap/flower/pull/3530), " +"[#3539](https://github.com/adap/flower/pull/3539), " +"[#3425](https://github.com/adap/flower/pull/3425), " +"[#3520](https://github.com/adap/flower/pull/3520), " +"[#3286](https://github.com/adap/flower/pull/3286), " +"[#3516](https://github.com/adap/flower/pull/3516), " +"[#3523](https://github.com/adap/flower/pull/3523), " +"[#3545](https://github.com/adap/flower/pull/3545), " +"[#3498](https://github.com/adap/flower/pull/3498), " +"[#3439](https://github.com/adap/flower/pull/3439), " +"[#3440](https://github.com/adap/flower/pull/3440), " +"[#3382](https://github.com/adap/flower/pull/3382), " +"[#3559](https://github.com/adap/flower/pull/3559), " +"[#3432](https://github.com/adap/flower/pull/3432), " +"[#3278](https://github.com/adap/flower/pull/3278), " +"[#3371](https://github.com/adap/flower/pull/3371), " +"[#3519](https://github.com/adap/flower/pull/3519), " +"[#3267](https://github.com/adap/flower/pull/3267), " +"[#3204](https://github.com/adap/flower/pull/3204), " +"[#3274](https://github.com/adap/flower/pull/3274))" msgstr "" -#: ../../source/ref-telemetry.md:32 +#: ../../source/ref-changelog.md:295 msgid "" -"**Operating system.** Enables us to answer questions such as: *Should we " -"create more guides for Linux, macOS, or Windows?*" +"As always, the Flower documentation has received many updates. Notable " +"new pages include:" msgstr "" -#: ../../source/ref-telemetry.md:34 +#: ../../source/ref-changelog.md:297 msgid "" -"**Python version.** Knowing the Python version helps us, for example, to " -"decide whether we should invest effort into supporting old versions of " -"Python or stop supporting them and start taking advantage of new Python " -"features." +"[How-to upgrate to Flower Next (Flower Next migration " +"guide)](https://flower.ai/docs/framework/how-to-upgrade-to-flower-" +"next.html)" msgstr "" -#: ../../source/ref-telemetry.md:36 +#: ../../source/ref-changelog.md:299 msgid "" -"**Hardware properties.** Understanding the hardware environment that " -"Flower is being used in helps to decide whether we should, for example, " -"put more effort into supporting low-resource environments." +"[How-to run Flower using Docker](https://flower.ai/docs/framework/how-to-" +"run-flower-using-docker.html)" msgstr "" -#: ../../source/ref-telemetry.md:38 +#: ../../source/ref-changelog.md:301 msgid "" -"**Execution mode.** Knowing what execution mode Flower starts in enables " -"us to understand how heavily certain features are being used and better " -"prioritize based on that." +"[Flower Mods reference](https://flower.ai/docs/framework/ref-" +"api/flwr.client.mod.html#module-flwr.client.mod)" msgstr "" -#: ../../source/ref-telemetry.md:40 +#: ../../source/ref-changelog.md:303 msgid "" -"**Cluster.** Flower telemetry assigns a random in-memory cluster ID each " -"time a Flower workload starts. This allows us to understand which device " -"types not only start Flower workloads but also successfully complete " -"them." -msgstr "" - -#: ../../source/ref-telemetry.md:42 +"**General updates to Flower Examples** " +"([#3205](https://github.com/adap/flower/pull/3205), " +"[#3226](https://github.com/adap/flower/pull/3226), " +"[#3211](https://github.com/adap/flower/pull/3211), " +"[#3252](https://github.com/adap/flower/pull/3252), " +"[#3427](https://github.com/adap/flower/pull/3427), " +"[#3410](https://github.com/adap/flower/pull/3410), " +"[#3426](https://github.com/adap/flower/pull/3426), " +"[#3228](https://github.com/adap/flower/pull/3228), " +"[#3342](https://github.com/adap/flower/pull/3342), " +"[#3200](https://github.com/adap/flower/pull/3200), " +"[#3202](https://github.com/adap/flower/pull/3202), " +"[#3394](https://github.com/adap/flower/pull/3394), " +"[#3488](https://github.com/adap/flower/pull/3488), " +"[#3329](https://github.com/adap/flower/pull/3329), " +"[#3526](https://github.com/adap/flower/pull/3526), " +"[#3392](https://github.com/adap/flower/pull/3392), " +"[#3474](https://github.com/adap/flower/pull/3474), " +"[#3269](https://github.com/adap/flower/pull/3269))" +msgstr "" + +#: ../../source/ref-changelog.md:305 +msgid "As always, Flower code examples have received many updates." +msgstr "" + +#: ../../source/ref-changelog.md:307 msgid "" -"**Source.** Flower telemetry tries to store a random source ID in " -"`~/.flwr/source` the first time a telemetry event is generated. The " -"source ID is important to identify whether an issue is recurring or " -"whether an issue is triggered by multiple clusters running concurrently " -"(which often happens in simulation). For example, if a device runs " -"multiple workloads at the same time, and this results in an issue, then, " -"in order to reproduce the issue, multiple workloads must be started at " -"the same time." +"**General improvements** " +"([#3532](https://github.com/adap/flower/pull/3532), " +"[#3318](https://github.com/adap/flower/pull/3318), " +"[#3565](https://github.com/adap/flower/pull/3565), " +"[#3296](https://github.com/adap/flower/pull/3296), " +"[#3305](https://github.com/adap/flower/pull/3305), " +"[#3246](https://github.com/adap/flower/pull/3246), " +"[#3224](https://github.com/adap/flower/pull/3224), " +"[#3475](https://github.com/adap/flower/pull/3475), " +"[#3297](https://github.com/adap/flower/pull/3297), " +"[#3317](https://github.com/adap/flower/pull/3317), " +"[#3429](https://github.com/adap/flower/pull/3429), " +"[#3196](https://github.com/adap/flower/pull/3196), " +"[#3534](https://github.com/adap/flower/pull/3534), " +"[#3240](https://github.com/adap/flower/pull/3240), " +"[#3365](https://github.com/adap/flower/pull/3365), " +"[#3407](https://github.com/adap/flower/pull/3407), " +"[#3563](https://github.com/adap/flower/pull/3563), " +"[#3344](https://github.com/adap/flower/pull/3344), " +"[#3330](https://github.com/adap/flower/pull/3330), " +"[#3436](https://github.com/adap/flower/pull/3436), " +"[#3300](https://github.com/adap/flower/pull/3300), " +"[#3327](https://github.com/adap/flower/pull/3327), " +"[#3254](https://github.com/adap/flower/pull/3254), " +"[#3253](https://github.com/adap/flower/pull/3253), " +"[#3419](https://github.com/adap/flower/pull/3419), " +"[#3289](https://github.com/adap/flower/pull/3289), " +"[#3208](https://github.com/adap/flower/pull/3208), " +"[#3245](https://github.com/adap/flower/pull/3245), " +"[#3319](https://github.com/adap/flower/pull/3319), " +"[#3203](https://github.com/adap/flower/pull/3203), " +"[#3423](https://github.com/adap/flower/pull/3423), " +"[#3352](https://github.com/adap/flower/pull/3352), " +"[#3292](https://github.com/adap/flower/pull/3292), " +"[#3261](https://github.com/adap/flower/pull/3261))" msgstr "" -#: ../../source/ref-telemetry.md:44 -msgid "" -"You may delete the source ID at any time. If you wish for all events " -"logged under a specific source ID to be deleted, you can send a deletion " -"request mentioning the source ID to `telemetry@flower.ai`. All events " -"related to that source ID will then be permanently deleted." +#: ../../source/ref-changelog.md:311 +msgid "**Deprecate Python 3.8 support**" msgstr "" -#: ../../source/ref-telemetry.md:46 +#: ../../source/ref-changelog.md:313 msgid "" -"We will not collect any personally identifiable information. If you think" -" any of the metrics collected could be misused in any way, please [get in" -" touch with us](#how-to-contact-us). We will update this page to reflect " -"any changes to the metrics collected and publish changes in the " -"changelog." +"Python 3.8 will stop receiving security fixes in [October " +"2024](https://devguide.python.org/versions/). Support for Python 3.8 is " +"now deprecated and will be removed in an upcoming release." msgstr "" -#: ../../source/ref-telemetry.md:48 +#: ../../source/ref-changelog.md:315 msgid "" -"If you think other metrics would be helpful for us to better guide our " -"decisions, please let us know! We will carefully review them; if we are " -"confident that they do not compromise user privacy, we may add them." -msgstr "" - -#: ../../source/ref-telemetry.md:50 -msgid "How to inspect what is being reported" +"**Deprecate (experimental)** `flower-driver-api` **and** `flower-fleet-" +"api` ([#3416](https://github.com/adap/flower/pull/3416), " +"[#3420](https://github.com/adap/flower/pull/3420))" msgstr "" -#: ../../source/ref-telemetry.md:52 +#: ../../source/ref-changelog.md:317 msgid "" -"We wanted to make it very easy for you to inspect what anonymous usage " -"metrics are reported. You can view all the reported telemetry information" -" by setting the environment variable `FLWR_TELEMETRY_LOGGING=1`. Logging " -"is disabled by default. You may use logging independently from " -"`FLWR_TELEMETRY_ENABLED` so that you can inspect the telemetry feature " -"without sending any metrics." +"Flower 1.9 deprecates the two (experimental) commands `flower-driver-api`" +" and `flower-fleet-api`. Both commands will be removed in an upcoming " +"release. Use `flower-superlink` instead." msgstr "" -#: ../../source/ref-telemetry.md:58 +#: ../../source/ref-changelog.md:319 msgid "" -"The inspect Flower telemetry without sending any anonymous usage metrics," -" use both environment variables:" -msgstr "" - -#: ../../source/ref-telemetry.md:64 -msgid "How to contact us" +"**Deprecate** `--server` **in favor of** `--superlink` " +"([#3518](https://github.com/adap/flower/pull/3518))" msgstr "" -#: ../../source/ref-telemetry.md:66 +#: ../../source/ref-changelog.md:321 msgid "" -"We want to hear from you. If you have any feedback or ideas on how to " -"improve the way we handle anonymous usage metrics, reach out to us via " -"[Slack](https://flower.ai/join-slack/) (channel `#telemetry`) or email " -"(`telemetry@flower.ai`)." +"The commands `flower-server-app` and `flower-client-app` should use " +"`--superlink` instead of the now deprecated `--server`. Support for " +"`--server` will be removed in a future release." msgstr "" -#: ../../source/tutorial-quickstart-android.rst:-1 +#: ../../source/ref-changelog.md:325 msgid "" -"Read this Federated Learning quickstart tutorial for creating an Android " -"app using Flower." +"**Replace** `flower-superlink` **CLI option** `--certificates` **with** " +"`--ssl-ca-certfile` **,** `--ssl-certfile` **and** `--ssl-keyfile` " +"([#3512](https://github.com/adap/flower/pull/3512), " +"[#3408](https://github.com/adap/flower/pull/3408))" msgstr "" -#: ../../source/tutorial-quickstart-android.rst:5 -msgid "Quickstart Android" +#: ../../source/ref-changelog.md:327 +msgid "" +"SSL-related `flower-superlink` CLI arguments were restructured in an " +"incompatible way. Instead of passing a single `--certificates` flag with " +"three values, you now need to pass three flags (`--ssl-ca-certfile`, " +"`--ssl-certfile` and `--ssl-keyfile`) with one value each. Check out the " +"[SSL connections](https://flower.ai/docs/framework/how-to-enable-ssl-" +"connections.html) documentation page for details." msgstr "" -#: ../../source/tutorial-quickstart-android.rst:10 +#: ../../source/ref-changelog.md:329 msgid "" -"Let's build a federated learning system using TFLite and Flower on " -"Android!" +"**Remove SuperLink** `--vce` **option** " +"([#3513](https://github.com/adap/flower/pull/3513))" msgstr "" -#: ../../source/tutorial-quickstart-android.rst:12 +#: ../../source/ref-changelog.md:331 msgid "" -"Please refer to the `full code example " -"`_ to learn " -"more." +"Instead of separately starting a SuperLink and a `ServerApp` for " +"simulation, simulations must now be started using the single `flower-" +"simulation` command." msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:-1 +#: ../../source/ref-changelog.md:333 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with FastAI to train a vision model on CIFAR-10." +"**Merge** `--grpc-rere` **and** `--rest` **SuperLink options** " +"([#3527](https://github.com/adap/flower/pull/3527))" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:5 -msgid "Quickstart fastai" +#: ../../source/ref-changelog.md:335 +msgid "" +"To simplify the usage of `flower-superlink`, previously separate sets of " +"CLI options for gRPC and REST were merged into one unified set of " +"options. Consult the [Flower CLI reference " +"documentation](https://flower.ai/docs/framework/ref-api-cli.html) for " +"details." msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:10 -msgid "Let's build a federated learning system using fastai and Flower!" +#: ../../source/ref-changelog.md:337 +msgid "v1.8.0 (2024-04-03)" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:12 +#: ../../source/ref-changelog.md:343 msgid "" -"Please refer to the `full code example " -"`_ " -"to learn more." +"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata " +"Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, `Ikko Eltociear " +"Ashimine`, `Jack Cook`, `Javier`, `Raj Parekh`, `Robert Steiner`, " +"`Sebastian van der Voort`, `Taner Topal`, `Yan Gao`, `mohammadnaseri`, " +"`tabdar-khan` " msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:-1 +#: ../../source/ref-changelog.md:347 msgid "" -"Check out this Federating Learning quickstart tutorial for using Flower " -"with HuggingFace Transformers in order to fine-tune an LLM." -msgstr "" - -#: ../../source/tutorial-quickstart-huggingface.rst:5 -msgid "Quickstart 🤗 Transformers" +"**Introduce Flower Next high-level API (stable)** " +"([#3002](https://github.com/adap/flower/pull/3002), " +"[#2934](https://github.com/adap/flower/pull/2934), " +"[#2958](https://github.com/adap/flower/pull/2958), " +"[#3173](https://github.com/adap/flower/pull/3173), " +"[#3174](https://github.com/adap/flower/pull/3174), " +"[#2923](https://github.com/adap/flower/pull/2923), " +"[#2691](https://github.com/adap/flower/pull/2691), " +"[#3079](https://github.com/adap/flower/pull/3079), " +"[#2961](https://github.com/adap/flower/pull/2961), " +"[#2924](https://github.com/adap/flower/pull/2924), " +"[#3166](https://github.com/adap/flower/pull/3166), " +"[#3031](https://github.com/adap/flower/pull/3031), " +"[#3057](https://github.com/adap/flower/pull/3057), " +"[#3000](https://github.com/adap/flower/pull/3000), " +"[#3113](https://github.com/adap/flower/pull/3113), " +"[#2957](https://github.com/adap/flower/pull/2957), " +"[#3183](https://github.com/adap/flower/pull/3183), " +"[#3180](https://github.com/adap/flower/pull/3180), " +"[#3035](https://github.com/adap/flower/pull/3035), " +"[#3189](https://github.com/adap/flower/pull/3189), " +"[#3185](https://github.com/adap/flower/pull/3185), " +"[#3190](https://github.com/adap/flower/pull/3190), " +"[#3191](https://github.com/adap/flower/pull/3191), " +"[#3195](https://github.com/adap/flower/pull/3195), " +"[#3197](https://github.com/adap/flower/pull/3197))" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:10 +#: ../../source/ref-changelog.md:349 msgid "" -"Let's build a federated learning system using Hugging Face Transformers " -"and Flower!" +"The Flower Next high-level API is stable! Flower Next is the future of " +"Flower - all new features (like Flower Mods) will be built on top of it. " +"You can start to migrate your existing projects to Flower Next by using " +"`ServerApp` and `ClientApp` (check out `quickstart-pytorch` or " +"`quickstart-tensorflow`, a detailed migration guide will follow shortly)." +" Flower Next allows you to run multiple projects concurrently (we call " +"this multi-run) and execute the same project in either simulation " +"environments or deployment environments without having to change a single" +" line of code. The best part? It's fully compatible with existing Flower " +"projects that use `Strategy`, `NumPyClient` & co." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:12 +#: ../../source/ref-changelog.md:351 msgid "" -"We will leverage Hugging Face to federate the training of language models" -" over multiple clients using Flower. More specifically, we will fine-tune" -" a pre-trained Transformer model (distilBERT) for sequence classification" -" over a dataset of IMDB ratings. The end goal is to detect if a movie " -"rating is positive or negative." -msgstr "" - -#: ../../source/tutorial-quickstart-huggingface.rst:18 -msgid "Dependencies" +"**Introduce Flower Next low-level API (preview)** " +"([#3062](https://github.com/adap/flower/pull/3062), " +"[#3034](https://github.com/adap/flower/pull/3034), " +"[#3069](https://github.com/adap/flower/pull/3069))" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:20 +#: ../../source/ref-changelog.md:353 msgid "" -"To follow along this tutorial you will need to install the following " -"packages: :code:`datasets`, :code:`evaluate`, :code:`flwr`, " -":code:`torch`, and :code:`transformers`. This can be done using " -":code:`pip`:" -msgstr "" - -#: ../../source/tutorial-quickstart-huggingface.rst:30 -msgid "Standard Hugging Face workflow" -msgstr "" - -#: ../../source/tutorial-quickstart-huggingface.rst:33 -msgid "Handling the data" +"In addition to the Flower Next *high-level* API that uses `Strategy`, " +"`NumPyClient` & co, Flower 1.8 also comes with a preview version of the " +"new Flower Next *low-level* API. The low-level API allows for granular " +"control of every aspect of the learning process by sending/receiving " +"individual messages to/from client nodes. The new `ServerApp` supports " +"registering a custom `main` function that allows writing custom training " +"loops for methods like async FL, cyclic training, or federated analytics." +" The new `ClientApp` supports registering `train`, `evaluate` and `query`" +" functions that can access the raw message received from the `ServerApp`." +" New abstractions like `RecordSet`, `Message` and `Context` further " +"enable sending multiple models, multiple sets of config values and " +"metrics, stateful computations on the client node and implementations of " +"custom SMPC protocols, to name just a few." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:35 +#: ../../source/ref-changelog.md:355 msgid "" -"To fetch the IMDB dataset, we will use Hugging Face's :code:`datasets` " -"library. We then need to tokenize the data and create :code:`PyTorch` " -"dataloaders, this is all done in the :code:`load_data` function:" -msgstr "" - -#: ../../source/tutorial-quickstart-huggingface.rst:81 -msgid "Training and testing the model" +"**Introduce Flower Mods (preview)** " +"([#3054](https://github.com/adap/flower/pull/3054), " +"[#2911](https://github.com/adap/flower/pull/2911), " +"[#3083](https://github.com/adap/flower/pull/3083))" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:83 +#: ../../source/ref-changelog.md:357 msgid "" -"Once we have a way of creating our trainloader and testloader, we can " -"take care of the training and testing. This is very similar to any " -":code:`PyTorch` training or testing loop:" -msgstr "" - -#: ../../source/tutorial-quickstart-huggingface.rst:121 -msgid "Creating the model itself" +"Flower Modifiers (we call them Mods) can intercept messages and analyze, " +"edit or handle them directly. Mods can be used to develop pluggable " +"modules that work across different projects. Flower 1.8 already includes " +"mods to log the size of a message, the number of parameters sent over the" +" network, differential privacy with fixed clipping and adaptive clipping," +" local differential privacy and secure aggregation protocols SecAgg and " +"SecAgg+. The Flower Mods API is released as a preview, but researchers " +"can already use it to experiment with arbirtrary SMPC protocols." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:123 +#: ../../source/ref-changelog.md:359 msgid "" -"To create the model itself, we will just load the pre-trained distillBERT" -" model using Hugging Face’s :code:`AutoModelForSequenceClassification` :" -msgstr "" - -#: ../../source/tutorial-quickstart-huggingface.rst:136 -msgid "Federating the example" -msgstr "" - -#: ../../source/tutorial-quickstart-huggingface.rst:139 -msgid "Creating the IMDBClient" +"**Fine-tune LLMs with LLM FlowerTune** " +"([#3029](https://github.com/adap/flower/pull/3029), " +"[#3089](https://github.com/adap/flower/pull/3089), " +"[#3092](https://github.com/adap/flower/pull/3092), " +"[#3100](https://github.com/adap/flower/pull/3100), " +"[#3114](https://github.com/adap/flower/pull/3114), " +"[#3162](https://github.com/adap/flower/pull/3162), " +"[#3172](https://github.com/adap/flower/pull/3172))" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:141 +#: ../../source/ref-changelog.md:361 msgid "" -"To federate our example to multiple clients, we first need to write our " -"Flower client class (inheriting from :code:`flwr.client.NumPyClient`). " -"This is very easy, as our model is a standard :code:`PyTorch` model:" +"We are introducing LLM FlowerTune, an introductory example that " +"demonstrates federated LLM fine-tuning of pre-trained Llama2 models on " +"the Alpaca-GPT4 dataset. The example is built to be easily adapted to use" +" different models and/or datasets. Read our blog post [LLM FlowerTune: " +"Federated LLM Fine-tuning with Flower](https://flower.ai/blog/2024-03-14" +"-llm-flowertune-federated-llm-finetuning-with-flower/) for more details." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:169 +#: ../../source/ref-changelog.md:363 msgid "" -"The :code:`get_parameters` function lets the server get the client's " -"parameters. Inversely, the :code:`set_parameters` function allows the " -"server to send its parameters to the client. Finally, the :code:`fit` " -"function trains the model locally for the client, and the " -":code:`evaluate` function tests the model locally and returns the " -"relevant metrics." -msgstr "" - -#: ../../source/tutorial-quickstart-huggingface.rst:175 -msgid "Starting the server" +"**Introduce built-in Differential Privacy (preview)** " +"([#2798](https://github.com/adap/flower/pull/2798), " +"[#2959](https://github.com/adap/flower/pull/2959), " +"[#3038](https://github.com/adap/flower/pull/3038), " +"[#3147](https://github.com/adap/flower/pull/3147), " +"[#2909](https://github.com/adap/flower/pull/2909), " +"[#2893](https://github.com/adap/flower/pull/2893), " +"[#2892](https://github.com/adap/flower/pull/2892), " +"[#3039](https://github.com/adap/flower/pull/3039), " +"[#3074](https://github.com/adap/flower/pull/3074))" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:177 +#: ../../source/ref-changelog.md:365 msgid "" -"Now that we have a way to instantiate clients, we need to create our " -"server in order to aggregate the results. Using Flower, this can be done " -"very easily by first choosing a strategy (here, we are using " -":code:`FedAvg`, which will define the global weights as the average of " -"all the clients' weights at each round) and then using the " -":code:`flwr.server.start_server` function:" +"Built-in Differential Privacy is here! Flower supports both central and " +"local differential privacy (DP). Central DP can be configured with either" +" fixed or adaptive clipping. The clipping can happen either on the " +"server-side or the client-side. Local DP does both clipping and noising " +"on the client-side. A new documentation page [explains Differential " +"Privacy approaches](https://flower.ai/docs/framework/explanation-" +"differential-privacy.html) and a new how-to guide describes [how to use " +"the new Differential Privacy components](https://flower.ai/docs/framework" +"/how-to-use-differential-privacy.html) in Flower." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:205 +#: ../../source/ref-changelog.md:367 msgid "" -"The :code:`weighted_average` function is there to provide a way to " -"aggregate the metrics distributed amongst the clients (basically this " -"allows us to display a nice average accuracy and loss for every round)." -msgstr "" - -#: ../../source/tutorial-quickstart-huggingface.rst:209 -msgid "Putting everything together" -msgstr "" - -#: ../../source/tutorial-quickstart-huggingface.rst:211 -msgid "We can now start client instances using:" +"**Introduce built-in Secure Aggregation (preview)** " +"([#3120](https://github.com/adap/flower/pull/3120), " +"[#3110](https://github.com/adap/flower/pull/3110), " +"[#3108](https://github.com/adap/flower/pull/3108))" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:221 +#: ../../source/ref-changelog.md:369 msgid "" -"And they will be able to connect to the server and start the federated " -"training." +"Built-in Secure Aggregation is here! Flower now supports different secure" +" aggregation protocols out-of-the-box. The best part? You can add secure " +"aggregation to your Flower projects with only a few lines of code. In " +"this initial release, we inlcude support for SecAgg and SecAgg+, but more" +" protocols will be implemented shortly. We'll also add detailed docs that" +" explain secure aggregation and how to use it in Flower. You can already " +"check out the new code example that shows how to use Flower to easily " +"combine Federated Learning, Differential Privacy and Secure Aggregation " +"in the same project." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:223 +#: ../../source/ref-changelog.md:371 msgid "" -"If you want to check out everything put together, you should check out " -"the `full code example `_ ." +"**Introduce** `flwr` **CLI (preview)** " +"([#2942](https://github.com/adap/flower/pull/2942), " +"[#3055](https://github.com/adap/flower/pull/3055), " +"[#3111](https://github.com/adap/flower/pull/3111), " +"[#3130](https://github.com/adap/flower/pull/3130), " +"[#3136](https://github.com/adap/flower/pull/3136), " +"[#3094](https://github.com/adap/flower/pull/3094), " +"[#3059](https://github.com/adap/flower/pull/3059), " +"[#3049](https://github.com/adap/flower/pull/3049), " +"[#3142](https://github.com/adap/flower/pull/3142))" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:226 +#: ../../source/ref-changelog.md:373 msgid "" -"Of course, this is a very basic example, and a lot can be added or " -"modified, it was just to showcase how simply we could federate a Hugging " -"Face workflow using Flower." +"A new `flwr` CLI command allows creating new Flower projects (`flwr new`)" +" and then running them using the Simulation Engine (`flwr run`)." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:229 +#: ../../source/ref-changelog.md:375 msgid "" -"Note that in this example we used :code:`PyTorch`, but we could have very" -" well used :code:`TensorFlow`." +"**Introduce Flower Next Simulation Engine** " +"([#3024](https://github.com/adap/flower/pull/3024), " +"[#3061](https://github.com/adap/flower/pull/3061), " +"[#2997](https://github.com/adap/flower/pull/2997), " +"[#2783](https://github.com/adap/flower/pull/2783), " +"[#3184](https://github.com/adap/flower/pull/3184), " +"[#3075](https://github.com/adap/flower/pull/3075), " +"[#3047](https://github.com/adap/flower/pull/3047), " +"[#2998](https://github.com/adap/flower/pull/2998), " +"[#3009](https://github.com/adap/flower/pull/3009), " +"[#3008](https://github.com/adap/flower/pull/3008))" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:-1 +#: ../../source/ref-changelog.md:377 msgid "" -"Read this Federated Learning quickstart tutorial for creating an iOS app " -"using Flower to train a neural network on MNIST." -msgstr "" - -#: ../../source/tutorial-quickstart-ios.rst:5 -msgid "Quickstart iOS" +"The Flower Simulation Engine can now run Flower Next projects. For " +"notebook environments, there's also a new `run_simulation` function that " +"can run `ServerApp` and `ClientApp`." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:10 +#: ../../source/ref-changelog.md:379 msgid "" -"In this tutorial we will learn how to train a Neural Network on MNIST " -"using Flower and CoreML on iOS devices." +"**Handle SuperNode connection errors** " +"([#2969](https://github.com/adap/flower/pull/2969))" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:12 +#: ../../source/ref-changelog.md:381 msgid "" -"First of all, for running the Flower Python server, it is recommended to " -"create a virtual environment and run everything within a :doc:`virtualenv" -" `. For the Flower client " -"implementation in iOS, it is recommended to use Xcode as our IDE." +"A SuperNode will now try to reconnect indefinitely to the SuperLink in " +"case of connection errors. The arguments `--max-retries` and `--max-wait-" +"time` can now be passed to the `flower-client-app` command. `--max-" +"retries` will define the number of tentatives the client should make " +"before it gives up trying to reconnect to the SuperLink, and, `--max-" +"wait-time` defines the time before the SuperNode gives up trying to " +"reconnect to the SuperLink." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:15 +#: ../../source/ref-changelog.md:383 msgid "" -"Our example consists of one Python *server* and two iPhone *clients* that" -" all have the same model." +"**General updates to Flower Baselines** " +"([#2904](https://github.com/adap/flower/pull/2904), " +"[#2482](https://github.com/adap/flower/pull/2482), " +"[#2985](https://github.com/adap/flower/pull/2985), " +"[#2968](https://github.com/adap/flower/pull/2968))" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:17 +#: ../../source/ref-changelog.md:385 msgid "" -"*Clients* are responsible for generating individual weight updates for " -"the model based on their local datasets. These updates are then sent to " -"the *server* which will aggregate them to produce a better model. " -"Finally, the *server* sends this improved version of the model back to " -"each *client*. A complete cycle of weight updates is called a *round*." +"There's a new [FedStar](https://flower.ai/docs/baselines/fedstar.html) " +"baseline. Several other baselined have been updated as well." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:21 +#: ../../source/ref-changelog.md:387 msgid "" -"Now that we have a rough idea of what is going on, let's get started to " -"setup our Flower server environment. We first need to install Flower. You" -" can do this by using pip:" -msgstr "" - -#: ../../source/tutorial-quickstart-ios.rst:27 -msgid "Or Poetry:" -msgstr "" - -#: ../../source/tutorial-quickstart-ios.rst:34 -#: ../../source/tutorial-quickstart-pytorch.rst:37 -#: ../../source/tutorial-quickstart-scikitlearn.rst:40 -#: ../../source/tutorial-quickstart-tensorflow.rst:29 -#: ../../source/tutorial-quickstart-xgboost.rst:55 -msgid "Flower Client" +"**Improve documentation and translations** " +"([#3050](https://github.com/adap/flower/pull/3050), " +"[#3044](https://github.com/adap/flower/pull/3044), " +"[#3043](https://github.com/adap/flower/pull/3043), " +"[#2986](https://github.com/adap/flower/pull/2986), " +"[#3041](https://github.com/adap/flower/pull/3041), " +"[#3046](https://github.com/adap/flower/pull/3046), " +"[#3042](https://github.com/adap/flower/pull/3042), " +"[#2978](https://github.com/adap/flower/pull/2978), " +"[#2952](https://github.com/adap/flower/pull/2952), " +"[#3167](https://github.com/adap/flower/pull/3167), " +"[#2953](https://github.com/adap/flower/pull/2953), " +"[#3045](https://github.com/adap/flower/pull/3045), " +"[#2654](https://github.com/adap/flower/pull/2654), " +"[#3082](https://github.com/adap/flower/pull/3082), " +"[#2990](https://github.com/adap/flower/pull/2990), " +"[#2989](https://github.com/adap/flower/pull/2989))" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:36 +#: ../../source/ref-changelog.md:389 msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training using CoreML as our local training pipeline and " -"MNIST as our dataset. For simplicity reasons we will use the complete " -"Flower client with CoreML, that has been implemented and stored inside " -"the Swift SDK. The client implementation can be seen below:" +"As usual, we merged many smaller and larger improvements to the " +"documentation. A special thank you goes to [Sebastian van der " +"Voort](https://github.com/svdvoort) for landing a big documentation PR!" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:72 +#: ../../source/ref-changelog.md:391 msgid "" -"Let's create a new application project in Xcode and add :code:`flwr` as a" -" dependency in your project. For our application, we will store the logic" -" of our app in :code:`FLiOSModel.swift` and the UI elements in " -":code:`ContentView.swift`. We will focus more on :code:`FLiOSModel.swift`" -" in this quickstart. Please refer to the `full code example " -"`_ to learn more " -"about the app." +"**General updates to Flower Examples** " +"([3134](https://github.com/adap/flower/pull/3134), " +"[2996](https://github.com/adap/flower/pull/2996), " +"[2930](https://github.com/adap/flower/pull/2930), " +"[2967](https://github.com/adap/flower/pull/2967), " +"[2467](https://github.com/adap/flower/pull/2467), " +"[2910](https://github.com/adap/flower/pull/2910), " +"[#2918](https://github.com/adap/flower/pull/2918), " +"[#2773](https://github.com/adap/flower/pull/2773), " +"[#3063](https://github.com/adap/flower/pull/3063), " +"[#3116](https://github.com/adap/flower/pull/3116), " +"[#3117](https://github.com/adap/flower/pull/3117))" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:75 -msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" +#: ../../source/ref-changelog.md:393 +msgid "" +"Two new examples show federated training of a Vision Transformer (ViT) " +"and federated learning in a medical context using the popular MONAI " +"library. `quickstart-pytorch` and `quickstart-tensorflow` demonstrate the" +" new Flower Next `ServerApp` and `ClientApp`. Many other examples " +"received considerable updates as well." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:83 +#: ../../source/ref-changelog.md:395 msgid "" -"Then add the mlmodel to the project simply by drag-and-drop, the mlmodel " -"will be bundled inside the application during deployment to your iOS " -"device. We need to pass the url to access mlmodel and run CoreML machine " -"learning processes, it can be retrieved by calling the function " -":code:`Bundle.main.url`. For the MNIST dataset, we need to preprocess it " -"into :code:`MLBatchProvider` object. The preprocessing is done inside " -":code:`DataLoader.swift`." +"**General improvements** " +"([#3171](https://github.com/adap/flower/pull/3171), " +"[3099](https://github.com/adap/flower/pull/3099), " +"[3003](https://github.com/adap/flower/pull/3003), " +"[3145](https://github.com/adap/flower/pull/3145), " +"[3017](https://github.com/adap/flower/pull/3017), " +"[3085](https://github.com/adap/flower/pull/3085), " +"[3012](https://github.com/adap/flower/pull/3012), " +"[3119](https://github.com/adap/flower/pull/3119), " +"[2991](https://github.com/adap/flower/pull/2991), " +"[2970](https://github.com/adap/flower/pull/2970), " +"[2980](https://github.com/adap/flower/pull/2980), " +"[3086](https://github.com/adap/flower/pull/3086), " +"[2932](https://github.com/adap/flower/pull/2932), " +"[2928](https://github.com/adap/flower/pull/2928), " +"[2941](https://github.com/adap/flower/pull/2941), " +"[2933](https://github.com/adap/flower/pull/2933), " +"[3181](https://github.com/adap/flower/pull/3181), " +"[2973](https://github.com/adap/flower/pull/2973), " +"[2992](https://github.com/adap/flower/pull/2992), " +"[2915](https://github.com/adap/flower/pull/2915), " +"[3040](https://github.com/adap/flower/pull/3040), " +"[3022](https://github.com/adap/flower/pull/3022), " +"[3032](https://github.com/adap/flower/pull/3032), " +"[2902](https://github.com/adap/flower/pull/2902), " +"[2931](https://github.com/adap/flower/pull/2931), " +"[3005](https://github.com/adap/flower/pull/3005), " +"[3132](https://github.com/adap/flower/pull/3132), " +"[3115](https://github.com/adap/flower/pull/3115), " +"[2944](https://github.com/adap/flower/pull/2944), " +"[3064](https://github.com/adap/flower/pull/3064), " +"[3106](https://github.com/adap/flower/pull/3106), " +"[2974](https://github.com/adap/flower/pull/2974), " +"[3178](https://github.com/adap/flower/pull/3178), " +"[2993](https://github.com/adap/flower/pull/2993), " +"[3186](https://github.com/adap/flower/pull/3186), " +"[3091](https://github.com/adap/flower/pull/3091), " +"[3125](https://github.com/adap/flower/pull/3125), " +"[3093](https://github.com/adap/flower/pull/3093), " +"[3013](https://github.com/adap/flower/pull/3013), " +"[3033](https://github.com/adap/flower/pull/3033), " +"[3133](https://github.com/adap/flower/pull/3133), " +"[3068](https://github.com/adap/flower/pull/3068), " +"[2916](https://github.com/adap/flower/pull/2916), " +"[2975](https://github.com/adap/flower/pull/2975), " +"[2984](https://github.com/adap/flower/pull/2984), " +"[2846](https://github.com/adap/flower/pull/2846), " +"[3077](https://github.com/adap/flower/pull/3077), " +"[3143](https://github.com/adap/flower/pull/3143), " +"[2921](https://github.com/adap/flower/pull/2921), " +"[3101](https://github.com/adap/flower/pull/3101), " +"[2927](https://github.com/adap/flower/pull/2927), " +"[2995](https://github.com/adap/flower/pull/2995), " +"[2972](https://github.com/adap/flower/pull/2972), " +"[2912](https://github.com/adap/flower/pull/2912), " +"[3065](https://github.com/adap/flower/pull/3065), " +"[3028](https://github.com/adap/flower/pull/3028), " +"[2922](https://github.com/adap/flower/pull/2922), " +"[2982](https://github.com/adap/flower/pull/2982), " +"[2914](https://github.com/adap/flower/pull/2914), " +"[3179](https://github.com/adap/flower/pull/3179), " +"[3080](https://github.com/adap/flower/pull/3080), " +"[2994](https://github.com/adap/flower/pull/2994), " +"[3187](https://github.com/adap/flower/pull/3187), " +"[2926](https://github.com/adap/flower/pull/2926), " +"[3018](https://github.com/adap/flower/pull/3018), " +"[3144](https://github.com/adap/flower/pull/3144), " +"[3011](https://github.com/adap/flower/pull/3011), " +"[#3152](https://github.com/adap/flower/pull/3152), " +"[#2836](https://github.com/adap/flower/pull/2836), " +"[#2929](https://github.com/adap/flower/pull/2929), " +"[#2943](https://github.com/adap/flower/pull/2943), " +"[#2955](https://github.com/adap/flower/pull/2955), " +"[#2954](https://github.com/adap/flower/pull/2954))" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:99 -msgid "" -"Since CoreML does not allow the model parameters to be seen before " -"training, and accessing the model parameters during or after the training" -" can only be done by specifying the layer name, we need to know this " -"information beforehand, through looking at the model specification, which" -" are written as proto files. The implementation can be seen in " -":code:`MLModelInspect`." +#: ../../source/ref-changelog.md:401 +msgid "v1.7.0 (2024-02-05)" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:102 +#: ../../source/ref-changelog.md:407 msgid "" -"After we have all of the necessary information, let's create our Flower " -"client." +"`Aasheesh Singh`, `Adam Narozniak`, `Aml Hassan Esmil`, `Charles " +"Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo " +"Gabrielli`, `Gustavo Bertoli`, `HelinLin`, `Heng Pan`, `Javier`, `M S " +"Chaitanya Kumar`, `Mohammad Naseri`, `Nikos Vlachakis`, `Pritam Neog`, " +"`Robert Kuska`, `Robert Steiner`, `Taner Topal`, `Yahia Salaheldin " +"Shaaban`, `Yan Gao`, `Yasar Abbas` " msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:117 +#: ../../source/ref-changelog.md:411 msgid "" -"Then start the Flower gRPC client and start communicating to the server " -"by passing our Flower client to the function :code:`startFlwrGRPC`." +"**Introduce stateful clients (experimental)** " +"([#2770](https://github.com/adap/flower/pull/2770), " +"[#2686](https://github.com/adap/flower/pull/2686), " +"[#2696](https://github.com/adap/flower/pull/2696), " +"[#2643](https://github.com/adap/flower/pull/2643), " +"[#2769](https://github.com/adap/flower/pull/2769))" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:124 +#: ../../source/ref-changelog.md:413 msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -"call the provided :code:`MLFlwrClient` and call :code:`startFlwrGRPC()`. " -"The attribute :code:`hostname` and :code:`port` tells the client which " -"server to connect to. This can be done by entering the hostname and port " -"in the application before clicking the start button to start the " -"federated learning process." -msgstr "" - -#: ../../source/tutorial-quickstart-ios.rst:129 -#: ../../source/tutorial-quickstart-pytorch.rst:203 -#: ../../source/tutorial-quickstart-scikitlearn.rst:167 -#: ../../source/tutorial-quickstart-tensorflow.rst:98 -#: ../../source/tutorial-quickstart-xgboost.rst:309 -msgid "Flower Server" +"Subclasses of `Client` and `NumPyClient` can now store local state that " +"remains on the client. Let's start with the highlight first: this new " +"feature is compatible with both simulated clients (via " +"`start_simulation`) and networked clients (via `start_client`). It's also" +" the first preview of new abstractions like `Context` and `RecordSet`. " +"Clients can access state of type `RecordSet` via `state: RecordSet = " +"self.context.state`. Changes to this `RecordSet` are preserved across " +"different rounds of execution to enable stateful computations in a " +"unified way across simulation and deployment." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:131 -#: ../../source/tutorial-quickstart-pytorch.rst:205 -#: ../../source/tutorial-quickstart-tensorflow.rst:100 +#: ../../source/ref-changelog.md:415 msgid "" -"For simple workloads we can start a Flower server and leave all the " -"configuration possibilities at their default values. In a file named " -":code:`server.py`, import Flower and start the server:" -msgstr "" - -#: ../../source/tutorial-quickstart-ios.rst:142 -#: ../../source/tutorial-quickstart-pytorch.rst:216 -#: ../../source/tutorial-quickstart-scikitlearn.rst:230 -#: ../../source/tutorial-quickstart-tensorflow.rst:112 -msgid "Train the model, federated!" +"**Improve performance** " +"([#2293](https://github.com/adap/flower/pull/2293))" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:144 -#: ../../source/tutorial-quickstart-pytorch.rst:218 -#: ../../source/tutorial-quickstart-tensorflow.rst:114 -#: ../../source/tutorial-quickstart-xgboost.rst:525 +#: ../../source/ref-changelog.md:417 msgid "" -"With both client and server ready, we can now run everything and see " -"federated learning in action. FL systems usually have a server and " -"multiple clients. We therefore have to start the server first:" +"Flower is faster than ever. All `FedAvg`-derived strategies now use in-" +"place aggregation to reduce memory consumption. The Flower client " +"serialization/deserialization has been rewritten from the ground up, " +"which results in significant speedups, especially when the client-side " +"training time is short." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:152 +#: ../../source/ref-changelog.md:419 msgid "" -"Once the server is running we can start the clients in different " -"terminals. Build and run the client through your Xcode, one through Xcode" -" Simulator and the other by deploying it to your iPhone. To see more " -"about how to deploy your app to iPhone or Simulator visit `here " -"`_." +"**Support Federated Learning with Apple MLX and Flower** " +"([#2693](https://github.com/adap/flower/pull/2693))" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:156 +#: ../../source/ref-changelog.md:421 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system in your ios device. The full `source code " -"`_ for this " -"example can be found in :code:`examples/ios`." +"Flower has official support for federated learning using [Apple " +"MLX](https://ml-explore.github.io/mlx) via the new `quickstart-mlx` code " +"example." msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:-1 +#: ../../source/ref-changelog.md:423 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with Jax to train a linear regression model on a scikit-learn dataset." -msgstr "" - -#: ../../source/tutorial-quickstart-jax.rst:5 -msgid "Quickstart JAX" +"**Introduce new XGBoost cyclic strategy** " +"([#2666](https://github.com/adap/flower/pull/2666), " +"[#2668](https://github.com/adap/flower/pull/2668))" msgstr "" -#: ../../source/tutorial-quickstart-pandas.rst:-1 +#: ../../source/ref-changelog.md:425 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with Pandas to perform Federated Analytics." -msgstr "" - -#: ../../source/tutorial-quickstart-pandas.rst:5 -msgid "Quickstart Pandas" -msgstr "" - -#: ../../source/tutorial-quickstart-pandas.rst:10 -msgid "Let's build a federated analytics system using Pandas and Flower!" +"A new strategy called `FedXgbCyclic` supports a client-by-client style of" +" training (often called cyclic). The `xgboost-comprehensive` code example" +" shows how to use it in a full project. In addition to that, `xgboost-" +"comprehensive` now also supports simulation mode. With this, Flower " +"offers best-in-class XGBoost support." msgstr "" -#: ../../source/tutorial-quickstart-pandas.rst:12 +#: ../../source/ref-changelog.md:427 msgid "" -"Please refer to the `full code example " -"`_ " -"to learn more." +"**Support Python 3.11** " +"([#2394](https://github.com/adap/flower/pull/2394))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:-1 +#: ../../source/ref-changelog.md:429 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with PyTorch to train a CNN model on MNIST." +"Framework tests now run on Python 3.8, 3.9, 3.10, and 3.11. This will " +"ensure better support for users using more recent Python versions." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:13 +#: ../../source/ref-changelog.md:431 msgid "" -"In this tutorial we will learn how to train a Convolutional Neural " -"Network on CIFAR10 using Flower and PyTorch." +"**Update gRPC and ProtoBuf dependencies** " +"([#2814](https://github.com/adap/flower/pull/2814))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:15 -#: ../../source/tutorial-quickstart-xgboost.rst:39 +#: ../../source/ref-changelog.md:433 msgid "" -"First of all, it is recommended to create a virtual environment and run " -"everything within a :doc:`virtualenv `." +"The `grpcio` and `protobuf` dependencies were updated to their latest " +"versions for improved security and performance." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:17 -#: ../../source/tutorial-quickstart-scikitlearn.rst:14 +#: ../../source/ref-changelog.md:435 msgid "" -"Our example consists of one *server* and two *clients* all having the " -"same model." +"**Introduce Docker image for Flower server** " +"([#2700](https://github.com/adap/flower/pull/2700), " +"[#2688](https://github.com/adap/flower/pull/2688), " +"[#2705](https://github.com/adap/flower/pull/2705), " +"[#2695](https://github.com/adap/flower/pull/2695), " +"[#2747](https://github.com/adap/flower/pull/2747), " +"[#2746](https://github.com/adap/flower/pull/2746), " +"[#2680](https://github.com/adap/flower/pull/2680), " +"[#2682](https://github.com/adap/flower/pull/2682), " +"[#2701](https://github.com/adap/flower/pull/2701))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:19 +#: ../../source/ref-changelog.md:437 msgid "" -"*Clients* are responsible for generating individual weight-updates for " -"the model based on their local datasets. These updates are then sent to " -"the *server* which will aggregate them to produce a better model. " -"Finally, the *server* sends this improved version of the model back to " -"each *client*. A complete cycle of weight updates is called a *round*." +"The Flower server can now be run using an official Docker image. A new " +"how-to guide explains [how to run Flower using " +"Docker](https://flower.ai/docs/framework/how-to-run-flower-using-" +"docker.html). An official Flower client Docker image will follow." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:23 +#: ../../source/ref-changelog.md:439 msgid "" -"Now that we have a rough idea of what is going on, let's get started. We " -"first need to install Flower. You can do this by running :" +"**Introduce** `flower-via-docker-compose` **example** " +"([#2626](https://github.com/adap/flower/pull/2626))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:29 +#: ../../source/ref-changelog.md:441 msgid "" -"Since we want to use PyTorch to solve a computer vision task, let's go " -"ahead and install PyTorch and the **torchvision** library:" +"**Introduce** `quickstart-sklearn-tabular` **example** " +"([#2719](https://github.com/adap/flower/pull/2719))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:39 +#: ../../source/ref-changelog.md:443 msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training with two clients and one server. Our training " -"procedure and network architecture are based on PyTorch's `Deep Learning " -"with PyTorch " -"`_." +"**Introduce** `custom-metrics` **example** " +"([#1958](https://github.com/adap/flower/pull/1958))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:41 +#: ../../source/ref-changelog.md:445 msgid "" -"In a file called :code:`client.py`, import Flower and PyTorch related " -"packages:" -msgstr "" - -#: ../../source/tutorial-quickstart-pytorch.rst:56 -msgid "In addition, we define the device allocation in PyTorch with:" +"**Update code examples to use Flower Datasets** " +"([#2450](https://github.com/adap/flower/pull/2450), " +"[#2456](https://github.com/adap/flower/pull/2456), " +"[#2318](https://github.com/adap/flower/pull/2318), " +"[#2712](https://github.com/adap/flower/pull/2712))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:62 +#: ../../source/ref-changelog.md:447 msgid "" -"We use PyTorch to load CIFAR10, a popular colored image classification " -"dataset for machine learning. The PyTorch :code:`DataLoader()` downloads " -"the training and test data that are then normalized." +"Several code examples were updated to use [Flower " +"Datasets](https://flower.ai/docs/datasets/)." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:78 +#: ../../source/ref-changelog.md:449 msgid "" -"Define the loss and optimizer with PyTorch. The training of the dataset " -"is done by looping over the dataset, measure the corresponding loss and " -"optimize it." +"**General updates to Flower Examples** " +"([#2381](https://github.com/adap/flower/pull/2381), " +"[#2805](https://github.com/adap/flower/pull/2805), " +"[#2782](https://github.com/adap/flower/pull/2782), " +"[#2806](https://github.com/adap/flower/pull/2806), " +"[#2829](https://github.com/adap/flower/pull/2829), " +"[#2825](https://github.com/adap/flower/pull/2825), " +"[#2816](https://github.com/adap/flower/pull/2816), " +"[#2726](https://github.com/adap/flower/pull/2726), " +"[#2659](https://github.com/adap/flower/pull/2659), " +"[#2655](https://github.com/adap/flower/pull/2655))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:94 -msgid "" -"Define then the validation of the machine learning network. We loop over" -" the test set and measure the loss and accuracy of the test set." +#: ../../source/ref-changelog.md:451 +msgid "Many Flower code examples received substantial updates." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:113 -msgid "" -"After defining the training and testing of a PyTorch machine learning " -"model, we use the functions for the Flower clients." +#: ../../source/ref-changelog.md:453 ../../source/ref-changelog.md:546 +msgid "**Update Flower Baselines**" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:115 +#: ../../source/ref-changelog.md:455 msgid "" -"The Flower clients will use a simple CNN adapted from 'PyTorch: A 60 " -"Minute Blitz':" +"HFedXGBoost ([#2226](https://github.com/adap/flower/pull/2226), " +"[#2771](https://github.com/adap/flower/pull/2771))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:142 -msgid "" -"After loading the data set with :code:`load_data()` we define the Flower " -"interface." +#: ../../source/ref-changelog.md:456 +msgid "FedVSSL ([#2412](https://github.com/adap/flower/pull/2412))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:144 -#: ../../source/tutorial-quickstart-tensorflow.rst:54 -msgid "" -"The Flower server interacts with clients through an interface called " -":code:`Client`. When the server selects a particular client for training," -" it sends training instructions over the network. The client receives " -"those instructions and calls one of the :code:`Client` methods to run " -"your code (i.e., to train the neural network we defined earlier)." +#: ../../source/ref-changelog.md:457 +msgid "FedNova ([#2179](https://github.com/adap/flower/pull/2179))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:150 -msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses PyTorch. Implementing :code:`NumPyClient` usually means " -"defining the following methods (:code:`set_parameters` is optional " -"though):" +#: ../../source/ref-changelog.md:458 +msgid "HeteroFL ([#2439](https://github.com/adap/flower/pull/2439))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:156 -#: ../../source/tutorial-quickstart-scikitlearn.rst:119 -msgid "return the model weight as a list of NumPy ndarrays" +#: ../../source/ref-changelog.md:459 +msgid "FedAvgM ([#2246](https://github.com/adap/flower/pull/2246))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:157 -#: ../../source/tutorial-quickstart-scikitlearn.rst:121 -msgid ":code:`set_parameters` (optional)" +#: ../../source/ref-changelog.md:460 +msgid "FedPara ([#2722](https://github.com/adap/flower/pull/2722))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:158 -#: ../../source/tutorial-quickstart-scikitlearn.rst:121 +#: ../../source/ref-changelog.md:462 msgid "" -"update the local model weights with the parameters received from the " -"server" -msgstr "" - -#: ../../source/tutorial-quickstart-pytorch.rst:160 -#: ../../source/tutorial-quickstart-scikitlearn.rst:124 -msgid "set the local model weights" -msgstr "" - -#: ../../source/tutorial-quickstart-pytorch.rst:161 -#: ../../source/tutorial-quickstart-scikitlearn.rst:125 -msgid "train the local model" +"**Improve documentation** " +"([#2674](https://github.com/adap/flower/pull/2674), " +"[#2480](https://github.com/adap/flower/pull/2480), " +"[#2826](https://github.com/adap/flower/pull/2826), " +"[#2727](https://github.com/adap/flower/pull/2727), " +"[#2761](https://github.com/adap/flower/pull/2761), " +"[#2900](https://github.com/adap/flower/pull/2900))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:162 -#: ../../source/tutorial-quickstart-scikitlearn.rst:126 -msgid "receive the updated local model weights" +#: ../../source/ref-changelog.md:464 +msgid "" +"**Improved testing and development infrastructure** " +"([#2797](https://github.com/adap/flower/pull/2797), " +"[#2676](https://github.com/adap/flower/pull/2676), " +"[#2644](https://github.com/adap/flower/pull/2644), " +"[#2656](https://github.com/adap/flower/pull/2656), " +"[#2848](https://github.com/adap/flower/pull/2848), " +"[#2675](https://github.com/adap/flower/pull/2675), " +"[#2735](https://github.com/adap/flower/pull/2735), " +"[#2767](https://github.com/adap/flower/pull/2767), " +"[#2732](https://github.com/adap/flower/pull/2732), " +"[#2744](https://github.com/adap/flower/pull/2744), " +"[#2681](https://github.com/adap/flower/pull/2681), " +"[#2699](https://github.com/adap/flower/pull/2699), " +"[#2745](https://github.com/adap/flower/pull/2745), " +"[#2734](https://github.com/adap/flower/pull/2734), " +"[#2731](https://github.com/adap/flower/pull/2731), " +"[#2652](https://github.com/adap/flower/pull/2652), " +"[#2720](https://github.com/adap/flower/pull/2720), " +"[#2721](https://github.com/adap/flower/pull/2721), " +"[#2717](https://github.com/adap/flower/pull/2717), " +"[#2864](https://github.com/adap/flower/pull/2864), " +"[#2694](https://github.com/adap/flower/pull/2694), " +"[#2709](https://github.com/adap/flower/pull/2709), " +"[#2658](https://github.com/adap/flower/pull/2658), " +"[#2796](https://github.com/adap/flower/pull/2796), " +"[#2692](https://github.com/adap/flower/pull/2692), " +"[#2657](https://github.com/adap/flower/pull/2657), " +"[#2813](https://github.com/adap/flower/pull/2813), " +"[#2661](https://github.com/adap/flower/pull/2661), " +"[#2398](https://github.com/adap/flower/pull/2398))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:164 -#: ../../source/tutorial-quickstart-scikitlearn.rst:128 -msgid "test the local model" +#: ../../source/ref-changelog.md:466 +msgid "" +"The Flower testing and development infrastructure has received " +"substantial updates. This makes Flower 1.7 the most tested release ever." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:166 -msgid "which can be implemented in the following way:" +#: ../../source/ref-changelog.md:468 +msgid "" +"**Update dependencies** " +"([#2753](https://github.com/adap/flower/pull/2753), " +"[#2651](https://github.com/adap/flower/pull/2651), " +"[#2739](https://github.com/adap/flower/pull/2739), " +"[#2837](https://github.com/adap/flower/pull/2837), " +"[#2788](https://github.com/adap/flower/pull/2788), " +"[#2811](https://github.com/adap/flower/pull/2811), " +"[#2774](https://github.com/adap/flower/pull/2774), " +"[#2790](https://github.com/adap/flower/pull/2790), " +"[#2751](https://github.com/adap/flower/pull/2751), " +"[#2850](https://github.com/adap/flower/pull/2850), " +"[#2812](https://github.com/adap/flower/pull/2812), " +"[#2872](https://github.com/adap/flower/pull/2872), " +"[#2736](https://github.com/adap/flower/pull/2736), " +"[#2756](https://github.com/adap/flower/pull/2756), " +"[#2857](https://github.com/adap/flower/pull/2857), " +"[#2757](https://github.com/adap/flower/pull/2757), " +"[#2810](https://github.com/adap/flower/pull/2810), " +"[#2740](https://github.com/adap/flower/pull/2740), " +"[#2789](https://github.com/adap/flower/pull/2789))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:189 -#: ../../source/tutorial-quickstart-tensorflow.rst:82 +#: ../../source/ref-changelog.md:470 msgid "" -"We can now create an instance of our class :code:`CifarClient` and add " -"one line to actually run this client:" +"**General improvements** " +"([#2803](https://github.com/adap/flower/pull/2803), " +"[#2847](https://github.com/adap/flower/pull/2847), " +"[#2877](https://github.com/adap/flower/pull/2877), " +"[#2690](https://github.com/adap/flower/pull/2690), " +"[#2889](https://github.com/adap/flower/pull/2889), " +"[#2874](https://github.com/adap/flower/pull/2874), " +"[#2819](https://github.com/adap/flower/pull/2819), " +"[#2689](https://github.com/adap/flower/pull/2689), " +"[#2457](https://github.com/adap/flower/pull/2457), " +"[#2870](https://github.com/adap/flower/pull/2870), " +"[#2669](https://github.com/adap/flower/pull/2669), " +"[#2876](https://github.com/adap/flower/pull/2876), " +"[#2885](https://github.com/adap/flower/pull/2885), " +"[#2858](https://github.com/adap/flower/pull/2858), " +"[#2867](https://github.com/adap/flower/pull/2867), " +"[#2351](https://github.com/adap/flower/pull/2351), " +"[#2886](https://github.com/adap/flower/pull/2886), " +"[#2860](https://github.com/adap/flower/pull/2860), " +"[#2828](https://github.com/adap/flower/pull/2828), " +"[#2869](https://github.com/adap/flower/pull/2869), " +"[#2875](https://github.com/adap/flower/pull/2875), " +"[#2733](https://github.com/adap/flower/pull/2733), " +"[#2488](https://github.com/adap/flower/pull/2488), " +"[#2646](https://github.com/adap/flower/pull/2646), " +"[#2879](https://github.com/adap/flower/pull/2879), " +"[#2821](https://github.com/adap/flower/pull/2821), " +"[#2855](https://github.com/adap/flower/pull/2855), " +"[#2800](https://github.com/adap/flower/pull/2800), " +"[#2807](https://github.com/adap/flower/pull/2807), " +"[#2801](https://github.com/adap/flower/pull/2801), " +"[#2804](https://github.com/adap/flower/pull/2804), " +"[#2851](https://github.com/adap/flower/pull/2851), " +"[#2787](https://github.com/adap/flower/pull/2787), " +"[#2852](https://github.com/adap/flower/pull/2852), " +"[#2672](https://github.com/adap/flower/pull/2672), " +"[#2759](https://github.com/adap/flower/pull/2759))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:196 -#: ../../source/tutorial-quickstart-tensorflow.rst:90 +#: ../../source/ref-changelog.md:474 msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " -"implement a client of type :code:`NumPyClient` you'll need to first call " -"its :code:`to_client()` method. The string :code:`\"[::]:8080\"` tells " -"the client which server to connect to. In our case we can run the server " -"and the client on the same machine, therefore we use " -":code:`\"[::]:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we point the client at." +"**Deprecate** `start_numpy_client` " +"([#2563](https://github.com/adap/flower/pull/2563), " +"[#2718](https://github.com/adap/flower/pull/2718))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:226 -#: ../../source/tutorial-quickstart-scikitlearn.rst:239 -#: ../../source/tutorial-quickstart-tensorflow.rst:122 -#: ../../source/tutorial-quickstart-xgboost.rst:533 +#: ../../source/ref-changelog.md:476 msgid "" -"Once the server is running we can start the clients in different " -"terminals. Open a new terminal and start the first client:" +"Until now, clients of type `NumPyClient` needed to be started via " +"`start_numpy_client`. In our efforts to consolidate framework APIs, we " +"have introduced changes, and now all client types should start via " +"`start_client`. To continue using `NumPyClient` clients, you simply need " +"to first call the `.to_client()` method and then pass returned `Client` " +"object to `start_client`. The examples and the documentation have been " +"updated accordingly." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:233 -#: ../../source/tutorial-quickstart-scikitlearn.rst:246 -#: ../../source/tutorial-quickstart-tensorflow.rst:129 -#: ../../source/tutorial-quickstart-xgboost.rst:540 -msgid "Open another terminal and start the second client:" +#: ../../source/ref-changelog.md:478 +msgid "" +"**Deprecate legacy DP wrappers** " +"([#2749](https://github.com/adap/flower/pull/2749))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:239 -#: ../../source/tutorial-quickstart-scikitlearn.rst:252 -#: ../../source/tutorial-quickstart-xgboost.rst:546 +#: ../../source/ref-changelog.md:480 msgid "" -"Each client will have its own dataset. You should now see how the " -"training does in the very first terminal (the one that started the " -"server):" +"Legacy DP wrapper classes are deprecated, but still functional. This is " +"in preparation for an all-new pluggable version of differential privacy " +"support in Flower." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:271 +#: ../../source/ref-changelog.md:482 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this example can be found in :code:`examples" -"/quickstart-pytorch`." +"**Make optional arg** `--callable` **in** `flower-client` **a required " +"positional arg** ([#2673](https://github.com/adap/flower/pull/2673))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:-1 +#: ../../source/ref-changelog.md:484 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with PyTorch Lightning to train an Auto Encoder model on MNIST." +"**Rename** `certificates` **to** `root_certificates` **in** `Driver` " +"([#2890](https://github.com/adap/flower/pull/2890))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:5 -msgid "Quickstart PyTorch Lightning" +#: ../../source/ref-changelog.md:486 +msgid "" +"**Drop experimental** `Task` **fields** " +"([#2866](https://github.com/adap/flower/pull/2866), " +"[#2865](https://github.com/adap/flower/pull/2865))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:10 +#: ../../source/ref-changelog.md:488 msgid "" -"Let's build a horizontal federated learning system using PyTorch " -"Lightning and Flower!" +"Experimental fields `sa`, `legacy_server_message` and " +"`legacy_client_message` were removed from `Task` message. The removed " +"fields are superseded by the new `RecordSet` abstraction." msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:12 +#: ../../source/ref-changelog.md:490 msgid "" -"Please refer to the `full code example " -"`_ to learn more." +"**Retire MXNet examples** " +"([#2724](https://github.com/adap/flower/pull/2724))" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:-1 +#: ../../source/ref-changelog.md:492 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with scikit-learn to train a linear regression model." +"The development of the MXNet fremework has ended and the project is now " +"[archived on GitHub](https://github.com/apache/mxnet). Existing MXNet " +"examples won't receive updates." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:5 -msgid "Quickstart scikit-learn" +#: ../../source/ref-changelog.md:494 +msgid "v1.6.0 (2023-11-28)" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:10 +#: ../../source/ref-changelog.md:500 msgid "" -"In this tutorial, we will learn how to train a :code:`Logistic " -"Regression` model on MNIST using Flower and scikit-learn." +"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " +"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " +"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`," +" `Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, " +"`Steve Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, " +"`cnxdeveloper`, `k3nfalt` " msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:12 +#: ../../source/ref-changelog.md:504 msgid "" -"It is recommended to create a virtual environment and run everything " -"within this :doc:`virtualenv `." +"**Add experimental support for Python 3.12** " +"([#2565](https://github.com/adap/flower/pull/2565))" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:16 +#: ../../source/ref-changelog.md:506 msgid "" -"*Clients* are responsible for generating individual model parameter " -"updates for the model based on their local datasets. These updates are " -"then sent to the *server* which will aggregate them to produce an updated" -" global model. Finally, the *server* sends this improved version of the " -"model back to each *client*. A complete cycle of parameters updates is " -"called a *round*." +"**Add new XGBoost examples** " +"([#2612](https://github.com/adap/flower/pull/2612), " +"[#2554](https://github.com/adap/flower/pull/2554), " +"[#2617](https://github.com/adap/flower/pull/2617), " +"[#2618](https://github.com/adap/flower/pull/2618), " +"[#2619](https://github.com/adap/flower/pull/2619), " +"[#2567](https://github.com/adap/flower/pull/2567))" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:20 +#: ../../source/ref-changelog.md:508 msgid "" -"Now that we have a rough idea of what is going on, let's get started. We " -"first need to install Flower. You can do this by running:" -msgstr "" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:26 -msgid "Since we want to use scikit-learn, let's go ahead and install it:" +"We have added a new `xgboost-quickstart` example alongside a new " +"`xgboost-comprehensive` example that goes more in-depth." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:32 -msgid "Or simply install all dependencies using Poetry:" +#: ../../source/ref-changelog.md:510 +msgid "" +"**Add Vertical FL example** " +"([#2598](https://github.com/adap/flower/pull/2598))" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:42 +#: ../../source/ref-changelog.md:512 msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training with two clients and one server. However, before " -"setting up the client and server, we will define all functionalities that" -" we need for our federated learning setup within :code:`utils.py`. The " -":code:`utils.py` contains different functions defining all the machine " -"learning basics:" +"We had many questions about Vertical Federated Learning using Flower, so " +"we decided to add an simple example for it on the [Titanic " +"dataset](https://www.kaggle.com/competitions/titanic/data) alongside a " +"tutorial (in the README)." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:45 -msgid ":code:`get_model_parameters()`" +#: ../../source/ref-changelog.md:514 +msgid "" +"**Support custom** `ClientManager` **in** `start_driver()` " +"([#2292](https://github.com/adap/flower/pull/2292))" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:46 -msgid "Returns the parameters of a :code:`sklearn` LogisticRegression model" +#: ../../source/ref-changelog.md:516 +msgid "" +"**Update REST API to support create and delete nodes** " +"([#2283](https://github.com/adap/flower/pull/2283))" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:47 -msgid ":code:`set_model_params()`" +#: ../../source/ref-changelog.md:518 +msgid "" +"**Update the Android SDK** " +"([#2187](https://github.com/adap/flower/pull/2187))" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:48 -msgid "Sets the parameters of a :code:`sklearn` LogisticRegression model" +#: ../../source/ref-changelog.md:520 +msgid "Add gRPC request-response capability to the Android SDK." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:50 -msgid ":code:`set_initial_params()`" +#: ../../source/ref-changelog.md:522 +msgid "" +"**Update the C++ SDK** " +"([#2537](https://github.com/adap/flower/pull/2537), " +"[#2528](https://github.com/adap/flower/pull/2528), " +"[#2523](https://github.com/adap/flower/pull/2523), " +"[#2522](https://github.com/adap/flower/pull/2522))" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:50 -msgid "Initializes the model parameters that the Flower server will ask for" +#: ../../source/ref-changelog.md:524 +msgid "Add gRPC request-response capability to the C++ SDK." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:52 +#: ../../source/ref-changelog.md:526 msgid "" -"Please check out :code:`utils.py` `here " -"`_ for more details. The pre-defined functions are used in" -" the :code:`client.py` and imported. The :code:`client.py` also requires " -"to import several packages such as Flower and scikit-learn:" +"**Make HTTPS the new default** " +"([#2591](https://github.com/adap/flower/pull/2591), " +"[#2636](https://github.com/adap/flower/pull/2636))" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:67 +#: ../../source/ref-changelog.md:528 msgid "" -"Prior to local training, we need to load the MNIST dataset, a popular " -"image classification dataset of handwritten digits for machine learning, " -"and partition the dataset for FL. This can be conveniently achieved using" -" `Flower Datasets `_. The " -":code:`FederatedDataset.load_partition()` method loads the partitioned " -"training set for each partition ID defined in the :code:`--partition-id` " -"argument." +"Flower is moving to HTTPS by default. The new `flower-server` requires " +"passing `--certificates`, but users can enable `--insecure` to use HTTP " +"for prototyping. The same applies to `flower-client`, which can either " +"use user-provided credentials or gRPC-bundled certificates to connect to " +"an HTTPS-enabled server or requires opt-out via passing `--insecure` to " +"enable insecure HTTP connections." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:95 +#: ../../source/ref-changelog.md:530 msgid "" -"Next, the logistic regression model is defined and initialized with " -":code:`utils.set_initial_params()`." +"For backward compatibility, `start_client()` and `start_numpy_client()` " +"will still start in insecure mode by default. In a future release, " +"insecure connections will require user opt-in by passing `insecure=True`." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:107 +#: ../../source/ref-changelog.md:532 msgid "" -"The Flower server interacts with clients through an interface called " -":code:`Client`. When the server selects a particular client for training," -" it sends training instructions over the network. The client receives " -"those instructions and calls one of the :code:`Client` methods to run " -"your code (i.e., to fit the logistic regression we defined earlier)." +"**Unify client API** ([#2303](https://github.com/adap/flower/pull/2303), " +"[#2390](https://github.com/adap/flower/pull/2390), " +"[#2493](https://github.com/adap/flower/pull/2493))" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:113 +#: ../../source/ref-changelog.md:534 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses scikit-learn. Implementing :code:`NumPyClient` usually " -"means defining the following methods (:code:`set_parameters` is optional " -"though):" -msgstr "" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:122 -msgid "is directly imported with :code:`utils.set_model_params()`" -msgstr "" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:130 -msgid "The methods can be implemented in the following way:" +"Using the `client_fn`, Flower clients can interchangeably run as " +"standalone processes (i.e. via `start_client`) or in simulation (i.e. via" +" `start_simulation`) without requiring changes to how the client class is" +" defined and instantiated. The `to_client()` function is introduced to " +"convert a `NumPyClient` to a `Client`." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:153 +#: ../../source/ref-changelog.md:536 msgid "" -"We can now create an instance of our class :code:`MnistClient` and add " -"one line to actually run this client:" +"**Add new** `Bulyan` **strategy** " +"([#1817](https://github.com/adap/flower/pull/1817), " +"[#1891](https://github.com/adap/flower/pull/1891))" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:160 +#: ../../source/ref-changelog.md:538 msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " -"implement a client of type :code:`NumPyClient` you'll need to first call " -"its :code:`to_client()` method. The string :code:`\"0.0.0.0:8080\"` tells" -" the client which server to connect to. In our case we can run the server" -" and the client on the same machine, therefore we use " -":code:`\"0.0.0.0:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we pass to the client." +"The new `Bulyan` strategy implements Bulyan by [El Mhamdi et al., " +"2018](https://arxiv.org/abs/1802.07927)" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:169 +#: ../../source/ref-changelog.md:540 msgid "" -"The following Flower server is a little bit more advanced and returns an " -"evaluation function for the server-side evaluation. First, we import " -"again all required libraries such as Flower and scikit-learn." +"**Add new** `XGB Bagging` **strategy** " +"([#2611](https://github.com/adap/flower/pull/2611))" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:172 -msgid ":code:`server.py`, import Flower and start the server:" +#: ../../source/ref-changelog.md:542 ../../source/ref-changelog.md:544 +msgid "" +"**Introduce `WorkloadState`** " +"([#2564](https://github.com/adap/flower/pull/2564), " +"[#2632](https://github.com/adap/flower/pull/2632))" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:185 +#: ../../source/ref-changelog.md:548 msgid "" -"The number of federated learning rounds is set in :code:`fit_round()` and" -" the evaluation is defined in :code:`get_evaluate_fn()`. The evaluation " -"function is called after each federated learning round and gives you " -"information about loss and accuracy. Note that we also make use of Flower" -" Datasets here to load the test split of the MNIST dataset for server-" -"side evaluation." +"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " +"[#2286](https://github.com/adap/flower/pull/2286), " +"[#2509](https://github.com/adap/flower/pull/2509))" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:213 +#: ../../source/ref-changelog.md:550 msgid "" -"The :code:`main` contains the server-side parameter initialization " -":code:`utils.set_initial_params()` as well as the aggregation strategy " -":code:`fl.server.strategy:FedAvg()`. The strategy is the default one, " -"federated averaging (or FedAvg), with two clients and evaluation after " -"each federated learning round. The server can be started with the command" -" :code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " -"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))`." +"Baselines Docs ([#2290](https://github.com/adap/flower/pull/2290), " +"[#2400](https://github.com/adap/flower/pull/2400))" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:232 +#: ../../source/ref-changelog.md:552 msgid "" -"With both client and server ready, we can now run everything and see " -"federated learning in action. Federated learning systems usually have a " -"server and multiple clients. We, therefore, have to start the server " -"first:" +"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " +"[#2507](https://github.com/adap/flower/pull/2507))" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:286 +#: ../../source/ref-changelog.md:554 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this example can be found in :code:`examples/sklearn-logreg-" -"mnist`." +"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " +"[#2508](https://github.com/adap/flower/pull/2508))" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:-1 -msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with TensorFlow to train a MobilNetV2 model on CIFAR-10." +#: ../../source/ref-changelog.md:556 +msgid "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:5 -msgid "Quickstart TensorFlow" +#: ../../source/ref-changelog.md:558 +msgid "FjORD [#2431](https://github.com/adap/flower/pull/2431)" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:13 -msgid "Let's build a federated learning system in less than 20 lines of code!" +#: ../../source/ref-changelog.md:560 +msgid "MOON [#2421](https://github.com/adap/flower/pull/2421)" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:15 -msgid "Before Flower can be imported we have to install it:" +#: ../../source/ref-changelog.md:562 +msgid "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:21 -msgid "" -"Since we want to use the Keras API of TensorFlow (TF), we have to install" -" TF as well:" +#: ../../source/ref-changelog.md:564 +msgid "FedPer [#2266](https://github.com/adap/flower/pull/2266)" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:31 -msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" +#: ../../source/ref-changelog.md:566 +msgid "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:38 -msgid "" -"We use the Keras utilities of TF to load CIFAR10, a popular colored image" -" classification dataset for machine learning. The call to " -":code:`tf.keras.datasets.cifar10.load_data()` downloads CIFAR10, caches " -"it locally, and then returns the entire training and test set as NumPy " -"ndarrays." +#: ../../source/ref-changelog.md:568 +msgid "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:47 +#: ../../source/ref-changelog.md:570 msgid "" -"Next, we need a model. For the purpose of this tutorial, we use " -"MobilNetV2 with 10 output classes:" +"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " +"[#2615](https://github.com/adap/flower/pull/2615))" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:60 +#: ../../source/ref-changelog.md:572 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses Keras. The :code:`NumPyClient` interface defines three " -"methods which can be implemented in the following way:" +"**General updates to Flower Examples** " +"([#2384](https://github.com/adap/flower/pull/2384), " +"[#2425](https://github.com/adap/flower/pull/2425), " +"[#2526](https://github.com/adap/flower/pull/2526), " +"[#2302](https://github.com/adap/flower/pull/2302), " +"[#2545](https://github.com/adap/flower/pull/2545))" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:135 -msgid "Each client will have its own dataset." +#: ../../source/ref-changelog.md:574 +msgid "" +"**General updates to Flower Baselines** " +"([#2301](https://github.com/adap/flower/pull/2301), " +"[#2305](https://github.com/adap/flower/pull/2305), " +"[#2307](https://github.com/adap/flower/pull/2307), " +"[#2327](https://github.com/adap/flower/pull/2327), " +"[#2435](https://github.com/adap/flower/pull/2435), " +"[#2462](https://github.com/adap/flower/pull/2462), " +"[#2463](https://github.com/adap/flower/pull/2463), " +"[#2461](https://github.com/adap/flower/pull/2461), " +"[#2469](https://github.com/adap/flower/pull/2469), " +"[#2466](https://github.com/adap/flower/pull/2466), " +"[#2471](https://github.com/adap/flower/pull/2471), " +"[#2472](https://github.com/adap/flower/pull/2472), " +"[#2470](https://github.com/adap/flower/pull/2470))" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:137 +#: ../../source/ref-changelog.md:576 msgid "" -"You should now see how the training does in the very first terminal (the " -"one that started the server):" +"**General updates to the simulation engine** " +"([#2331](https://github.com/adap/flower/pull/2331), " +"[#2447](https://github.com/adap/flower/pull/2447), " +"[#2448](https://github.com/adap/flower/pull/2448), " +"[#2294](https://github.com/adap/flower/pull/2294))" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:169 +#: ../../source/ref-changelog.md:578 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this can be found in :code:`examples" -"/quickstart-tensorflow/client.py`." +"**General updates to Flower SDKs** " +"([#2288](https://github.com/adap/flower/pull/2288), " +"[#2429](https://github.com/adap/flower/pull/2429), " +"[#2555](https://github.com/adap/flower/pull/2555), " +"[#2543](https://github.com/adap/flower/pull/2543), " +"[#2544](https://github.com/adap/flower/pull/2544), " +"[#2597](https://github.com/adap/flower/pull/2597), " +"[#2623](https://github.com/adap/flower/pull/2623))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:-1 +#: ../../source/ref-changelog.md:580 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with XGBoost to train classification models on trees." +"**General improvements** " +"([#2309](https://github.com/adap/flower/pull/2309), " +"[#2310](https://github.com/adap/flower/pull/2310), " +"[#2313](https://github.com/adap/flower/pull/2313), " +"[#2316](https://github.com/adap/flower/pull/2316), " +"[#2317](https://github.com/adap/flower/pull/2317), " +"[#2349](https://github.com/adap/flower/pull/2349), " +"[#2360](https://github.com/adap/flower/pull/2360), " +"[#2402](https://github.com/adap/flower/pull/2402), " +"[#2446](https://github.com/adap/flower/pull/2446), " +"[#2561](https://github.com/adap/flower/pull/2561), " +"[#2273](https://github.com/adap/flower/pull/2273), " +"[#2267](https://github.com/adap/flower/pull/2267), " +"[#2274](https://github.com/adap/flower/pull/2274), " +"[#2275](https://github.com/adap/flower/pull/2275), " +"[#2432](https://github.com/adap/flower/pull/2432), " +"[#2251](https://github.com/adap/flower/pull/2251), " +"[#2321](https://github.com/adap/flower/pull/2321), " +"[#1936](https://github.com/adap/flower/pull/1936), " +"[#2408](https://github.com/adap/flower/pull/2408), " +"[#2413](https://github.com/adap/flower/pull/2413), " +"[#2401](https://github.com/adap/flower/pull/2401), " +"[#2531](https://github.com/adap/flower/pull/2531), " +"[#2534](https://github.com/adap/flower/pull/2534), " +"[#2535](https://github.com/adap/flower/pull/2535), " +"[#2521](https://github.com/adap/flower/pull/2521), " +"[#2553](https://github.com/adap/flower/pull/2553), " +"[#2596](https://github.com/adap/flower/pull/2596))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:5 -msgid "Quickstart XGBoost" +#: ../../source/ref-changelog.md:582 ../../source/ref-changelog.md:672 +#: ../../source/ref-changelog.md:736 ../../source/ref-changelog.md:790 +#: ../../source/ref-changelog.md:857 +msgid "Flower received many improvements under the hood, too many to list here." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:14 -msgid "Federated XGBoost" +#: ../../source/ref-changelog.md:586 +msgid "" +"**Remove support for Python 3.7** " +"([#2280](https://github.com/adap/flower/pull/2280), " +"[#2299](https://github.com/adap/flower/pull/2299), " +"[#2304](https://github.com/adap/flower/pull/2304), " +"[#2306](https://github.com/adap/flower/pull/2306), " +"[#2355](https://github.com/adap/flower/pull/2355), " +"[#2356](https://github.com/adap/flower/pull/2356))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:16 +#: ../../source/ref-changelog.md:588 msgid "" -"EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient " -"implementation of gradient-boosted decision tree (**GBDT**), that " -"maximises the computational boundaries for boosted tree methods. It's " -"primarily designed to enhance both the performance and computational " -"speed of machine learning models. In XGBoost, trees are constructed " -"concurrently, unlike the sequential approach taken by GBDT." +"Python 3.7 support was deprecated in Flower 1.5, and this release removes" +" support. Flower now requires Python 3.8." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:20 +#: ../../source/ref-changelog.md:590 msgid "" -"Often, for tabular data on medium-sized datasets with fewer than 10k " -"training examples, XGBoost surpasses the results of deep learning " -"techniques." +"**Remove experimental argument** `rest` **from** `start_client` " +"([#2324](https://github.com/adap/flower/pull/2324))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:23 -msgid "Why federated XGBoost?" +#: ../../source/ref-changelog.md:592 +msgid "" +"The (still experimental) argument `rest` was removed from `start_client` " +"and `start_numpy_client`. Use `transport=\"rest\"` to opt into the " +"experimental REST API instead." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:25 -msgid "" -"Indeed, as the demand for data privacy and decentralized learning grows, " -"there's an increasing requirement to implement federated XGBoost systems " -"for specialised applications, like survival analysis and financial fraud " -"detection." +#: ../../source/ref-changelog.md:594 +msgid "v1.5.0 (2023-08-31)" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:27 +#: ../../source/ref-changelog.md:600 msgid "" -"Federated learning ensures that raw data remains on the local device, " -"making it an attractive approach for sensitive domains where data " -"security and privacy are paramount. Given the robustness and efficiency " -"of XGBoost, combining it with federated learning offers a promising " -"solution for these specific challenges." +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:30 +#: ../../source/ref-changelog.md:604 msgid "" -"In this tutorial we will learn how to train a federated XGBoost model on " -"HIGGS dataset using Flower and :code:`xgboost` package. We use a simple " -"example (`full code xgboost-quickstart " -"`_)" -" with two *clients* and one *server* to demonstrate how federated XGBoost" -" works, and then we dive into a more complex example (`full code xgboost-" -"comprehensive `_) to run various experiments." +"**Introduce new simulation engine** " +"([#1969](https://github.com/adap/flower/pull/1969), " +"[#2221](https://github.com/adap/flower/pull/2221), " +"[#2248](https://github.com/adap/flower/pull/2248))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:37 -msgid "Environment Setup" +#: ../../source/ref-changelog.md:606 +msgid "" +"The new simulation engine has been rewritten from the ground up, yet it " +"remains fully backwards compatible. It offers much improved stability and" +" memory handling, especially when working with GPUs. Simulations " +"transparently adapt to different settings to scale simulation in CPU-" +"only, CPU+GPU, multi-GPU, or multi-node multi-GPU environments." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:41 +#: ../../source/ref-changelog.md:608 msgid "" -"We first need to install Flower and Flower Datasets. You can do this by " -"running :" +"Comprehensive documentation includes a new [how-to run " +"simulations](https://flower.ai/docs/framework/how-to-run-" +"simulations.html) guide, new [simulation-" +"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " +"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" +"tensorflow.html) notebooks, and a new [YouTube tutorial " +"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:47 +#: ../../source/ref-changelog.md:610 msgid "" -"Since we want to use :code:`xgboost` package to build up XGBoost trees, " -"let's go ahead and install :code:`xgboost`:" +"**Restructure Flower Docs** " +"([#1824](https://github.com/adap/flower/pull/1824), " +"[#1865](https://github.com/adap/flower/pull/1865), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1887](https://github.com/adap/flower/pull/1887), " +"[#1919](https://github.com/adap/flower/pull/1919), " +"[#1922](https://github.com/adap/flower/pull/1922), " +"[#1920](https://github.com/adap/flower/pull/1920), " +"[#1923](https://github.com/adap/flower/pull/1923), " +"[#1924](https://github.com/adap/flower/pull/1924), " +"[#1962](https://github.com/adap/flower/pull/1962), " +"[#2006](https://github.com/adap/flower/pull/2006), " +"[#2133](https://github.com/adap/flower/pull/2133), " +"[#2203](https://github.com/adap/flower/pull/2203), " +"[#2215](https://github.com/adap/flower/pull/2215), " +"[#2122](https://github.com/adap/flower/pull/2122), " +"[#2223](https://github.com/adap/flower/pull/2223), " +"[#2219](https://github.com/adap/flower/pull/2219), " +"[#2232](https://github.com/adap/flower/pull/2232), " +"[#2233](https://github.com/adap/flower/pull/2233), " +"[#2234](https://github.com/adap/flower/pull/2234), " +"[#2235](https://github.com/adap/flower/pull/2235), " +"[#2237](https://github.com/adap/flower/pull/2237), " +"[#2238](https://github.com/adap/flower/pull/2238), " +"[#2242](https://github.com/adap/flower/pull/2242), " +"[#2231](https://github.com/adap/flower/pull/2231), " +"[#2243](https://github.com/adap/flower/pull/2243), " +"[#2227](https://github.com/adap/flower/pull/2227))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:57 +#: ../../source/ref-changelog.md:612 msgid "" -"*Clients* are responsible for generating individual weight-updates for " -"the model based on their local datasets. Now that we have all our " -"dependencies installed, let's run a simple distributed training with two " -"clients and one server." +"Much effort went into a completely restructured Flower docs experience. " +"The documentation on [flower.ai/docs](https://flower.ai/docs) is now " +"divided into Flower Framework, Flower Baselines, Flower Android SDK, " +"Flower iOS SDK, and code example projects." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:60 +#: ../../source/ref-changelog.md:614 msgid "" -"In a file called :code:`client.py`, import xgboost, Flower, Flower " -"Datasets and other related functions:" +"**Introduce Flower Swift SDK** " +"([#1858](https://github.com/adap/flower/pull/1858), " +"[#1897](https://github.com/adap/flower/pull/1897))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:87 -msgid "Dataset partition and hyper-parameter selection" +#: ../../source/ref-changelog.md:616 +msgid "" +"This is the first preview release of the Flower Swift SDK. Flower support" +" on iOS is improving, and alongside the Swift SDK and code example, there" +" is now also an iOS quickstart tutorial." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:89 +#: ../../source/ref-changelog.md:618 msgid "" -"Prior to local training, we require loading the HIGGS dataset from Flower" -" Datasets and conduct data partitioning for FL:" +"**Introduce Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:102 +#: ../../source/ref-changelog.md:620 msgid "" -"In this example, we split the dataset into two partitions with uniform " -"distribution (:code:`IidPartitioner(num_partitions=2)`). Then, we load " -"the partition for the given client based on :code:`node_id`:" +"This is the first preview release of the Flower Kotlin SDK. Flower " +"support on Android is improving, and alongside the Kotlin SDK and code " +"example, there is now also an Android quickstart tutorial." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:121 +#: ../../source/ref-changelog.md:622 msgid "" -"After that, we do train/test splitting on the given partition (client's " -"local data), and transform data format for :code:`xgboost` package." +"**Introduce new end-to-end testing infrastructure** " +"([#1842](https://github.com/adap/flower/pull/1842), " +"[#2071](https://github.com/adap/flower/pull/2071), " +"[#2072](https://github.com/adap/flower/pull/2072), " +"[#2068](https://github.com/adap/flower/pull/2068), " +"[#2067](https://github.com/adap/flower/pull/2067), " +"[#2069](https://github.com/adap/flower/pull/2069), " +"[#2073](https://github.com/adap/flower/pull/2073), " +"[#2070](https://github.com/adap/flower/pull/2070), " +"[#2074](https://github.com/adap/flower/pull/2074), " +"[#2082](https://github.com/adap/flower/pull/2082), " +"[#2084](https://github.com/adap/flower/pull/2084), " +"[#2093](https://github.com/adap/flower/pull/2093), " +"[#2109](https://github.com/adap/flower/pull/2109), " +"[#2095](https://github.com/adap/flower/pull/2095), " +"[#2140](https://github.com/adap/flower/pull/2140), " +"[#2137](https://github.com/adap/flower/pull/2137), " +"[#2165](https://github.com/adap/flower/pull/2165))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:134 +#: ../../source/ref-changelog.md:624 msgid "" -"The functions of :code:`train_test_split` and " -":code:`transform_dataset_to_dmatrix` are defined as below:" +"A new testing infrastructure ensures that new changes stay compatible " +"with existing framework integrations or strategies." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:158 -msgid "Finally, we define the hyper-parameters used for XGBoost training." +#: ../../source/ref-changelog.md:626 +msgid "**Deprecate Python 3.7**" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:174 +#: ../../source/ref-changelog.md:628 msgid "" -"The :code:`num_local_round` represents the number of iterations for local" -" tree boost. We use CPU for the training in default. One can shift it to " -"GPU by setting :code:`tree_method` to :code:`gpu_hist`. We use AUC as " -"evaluation metric." +"Since Python 3.7 reached its end of life (EOL) on 2023-06-27, support for" +" Python 3.7 is now deprecated and will be removed in an upcoming release." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:181 -msgid "Flower client definition for XGBoost" +#: ../../source/ref-changelog.md:630 +msgid "" +"**Add new** `FedTrimmedAvg` **strategy** " +"([#1769](https://github.com/adap/flower/pull/1769), " +"[#1853](https://github.com/adap/flower/pull/1853))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:183 +#: ../../source/ref-changelog.md:632 msgid "" -"After loading the dataset we define the Flower client. We follow the " -"general rule to define :code:`XgbClient` class inherited from " -":code:`fl.client.Client`." +"The new `FedTrimmedAvg` strategy implements Trimmed Mean by [Dong Yin, " +"2018](https://arxiv.org/abs/1803.01498)." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:193 +#: ../../source/ref-changelog.md:634 msgid "" -"The :code:`self.bst` is used to keep the Booster objects that remain " -"consistent across rounds, allowing them to store predictions from trees " -"integrated in earlier rounds and maintain other essential data structures" -" for training." +"**Introduce start_driver** " +"([#1697](https://github.com/adap/flower/pull/1697))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:196 +#: ../../source/ref-changelog.md:636 msgid "" -"Then, we override :code:`get_parameters`, :code:`fit` and " -":code:`evaluate` methods insides :code:`XgbClient` class as follows." +"In addition to `start_server` and using the raw Driver API, there is a " +"new `start_driver` function that allows for running `start_server` " +"scripts as a Flower driver with only a single-line code change. Check out" +" the `mt-pytorch` code example to see a working example using " +"`start_driver`." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:210 +#: ../../source/ref-changelog.md:638 msgid "" -"Unlike neural network training, XGBoost trees are not started from a " -"specified random weights. In this case, we do not use " -":code:`get_parameters` and :code:`set_parameters` to initialise model " -"parameters for XGBoost. As a result, let's return an empty tensor in " -":code:`get_parameters` when it is called by the server at the first " -"round." +"**Add parameter aggregation to** `mt-pytorch` **code example** " +"([#1785](https://github.com/adap/flower/pull/1785))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:251 +#: ../../source/ref-changelog.md:640 msgid "" -"In :code:`fit`, at the first round, we call :code:`xgb.train()` to build " -"up the first set of trees. the returned Booster object and config are " -"stored in :code:`self.bst` and :code:`self.config`, respectively. From " -"the second round, we load the global model sent from server to " -":code:`self.bst`, and then update model weights on local training data " -"with function :code:`local_boost` as follows:" +"The `mt-pytorch` example shows how to aggregate parameters when writing a" +" driver script. The included `driver.py` and `server.py` have been " +"aligned to demonstrate both the low-level way and the high-level way of " +"building server-side logic." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:269 +#: ../../source/ref-changelog.md:642 msgid "" -"Given :code:`num_local_round`, we update trees by calling " -":code:`self.bst.update` method. After training, the last " -":code:`N=num_local_round` trees will be extracted to send to the server." +"**Migrate experimental REST API to Starlette** " +"([2171](https://github.com/adap/flower/pull/2171))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:291 +#: ../../source/ref-changelog.md:644 msgid "" -"In :code:`evaluate`, we call :code:`self.bst.eval_set` function to " -"conduct evaluation on valid set. The AUC value will be returned." +"The (experimental) REST API used to be implemented in " +"[FastAPI](https://fastapi.tiangolo.com/), but it has now been migrated to" +" use [Starlette](https://www.starlette.io/) directly." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:294 +#: ../../source/ref-changelog.md:646 msgid "" -"Now, we can create an instance of our class :code:`XgbClient` and add one" -" line to actually run this client:" +"Please note: The REST request-response API is still experimental and will" +" likely change significantly over time." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:300 +#: ../../source/ref-changelog.md:648 msgid "" -"That's it for the client. We only have to implement :code:`Client`and " -"call :code:`fl.client.start_client()`. The string :code:`\"[::]:8080\"` " -"tells the client which server to connect to. In our case we can run the " -"server and the client on the same machine, therefore we use " -":code:`\"[::]:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we point the client at." +"**Introduce experimental gRPC request-response API** " +"([#1867](https://github.com/adap/flower/pull/1867), " +"[#1901](https://github.com/adap/flower/pull/1901))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:311 +#: ../../source/ref-changelog.md:650 msgid "" -"These updates are then sent to the *server* which will aggregate them to " -"produce a better model. Finally, the *server* sends this improved version" -" of the model back to each *client* to finish a complete FL round." +"In addition to the existing gRPC API (based on bidirectional streaming) " +"and the experimental REST API, there is now a new gRPC API that uses a " +"request-response model to communicate with client nodes." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:314 +#: ../../source/ref-changelog.md:652 msgid "" -"In a file named :code:`server.py`, import Flower and FedXgbBagging from " -":code:`flwr.server.strategy`." +"Please note: The gRPC request-response API is still experimental and will" +" likely change significantly over time." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:316 -msgid "We first define a strategy for XGBoost bagging aggregation." +#: ../../source/ref-changelog.md:654 +msgid "" +"**Replace the experimental** `start_client(rest=True)` **with the new** " +"`start_client(transport=\"rest\")` " +"([#1880](https://github.com/adap/flower/pull/1880))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:339 +#: ../../source/ref-changelog.md:656 msgid "" -"We use two clients for this example. An " -":code:`evaluate_metrics_aggregation` function is defined to collect and " -"wighted average the AUC values from clients." +"The (experimental) `start_client` argument `rest` was deprecated in " +"favour of a new argument `transport`. `start_client(transport=\"rest\")` " +"will yield the same behaviour as `start_client(rest=True)` did before. " +"All code should migrate to the new argument `transport`. The deprecated " +"argument `rest` will be removed in a future release." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:342 -msgid "Then, we start the server:" +#: ../../source/ref-changelog.md:658 +msgid "" +"**Add a new gRPC option** " +"([#2197](https://github.com/adap/flower/pull/2197))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:354 -msgid "Tree-based bagging aggregation" +#: ../../source/ref-changelog.md:660 +msgid "" +"We now start a gRPC server with the `grpc.keepalive_permit_without_calls`" +" option set to 0 by default. This prevents the clients from sending " +"keepalive pings when there is no outstanding stream." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:356 +#: ../../source/ref-changelog.md:662 msgid "" -"You must be curious about how bagging aggregation works. Let's look into " -"the details." +"**Improve example notebooks** " +"([#2005](https://github.com/adap/flower/pull/2005))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:358 -msgid "" -"In file :code:`flwr.server.strategy.fedxgb_bagging.py`, we define " -":code:`FedXgbBagging` inherited from :code:`flwr.server.strategy.FedAvg`." -" Then, we override the :code:`aggregate_fit`, :code:`aggregate_evaluate` " -"and :code:`evaluate` methods as follows:" +#: ../../source/ref-changelog.md:664 +msgid "There's a new 30min Federated Learning PyTorch tutorial!" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:454 +#: ../../source/ref-changelog.md:666 msgid "" -"In :code:`aggregate_fit`, we sequentially aggregate the clients' XGBoost " -"trees by calling :code:`aggregate()` function:" +"**Example updates** ([#1772](https://github.com/adap/flower/pull/1772), " +"[#1873](https://github.com/adap/flower/pull/1873), " +"[#1981](https://github.com/adap/flower/pull/1981), " +"[#1988](https://github.com/adap/flower/pull/1988), " +"[#1984](https://github.com/adap/flower/pull/1984), " +"[#1982](https://github.com/adap/flower/pull/1982), " +"[#2112](https://github.com/adap/flower/pull/2112), " +"[#2144](https://github.com/adap/flower/pull/2144), " +"[#2174](https://github.com/adap/flower/pull/2174), " +"[#2225](https://github.com/adap/flower/pull/2225), " +"[#2183](https://github.com/adap/flower/pull/2183))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:513 +#: ../../source/ref-changelog.md:668 msgid "" -"In this function, we first fetch the number of trees and the number of " -"parallel trees for the current and previous model by calling " -":code:`_get_tree_nums`. Then, the fetched information will be aggregated." -" After that, the trees (containing model weights) are aggregated to " -"generate a new tree model." +"Many examples have received significant updates, including simplified " +"advanced-tensorflow and advanced-pytorch examples, improved macOS " +"compatibility of TensorFlow examples, and code examples for simulation. A" +" major upgrade is that all code examples now have a `requirements.txt` " +"(in addition to `pyproject.toml`)." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:518 +#: ../../source/ref-changelog.md:670 msgid "" -"After traversal of all clients' models, a new global model is generated, " -"followed by the serialisation, and sending back to each client." +"**General improvements** " +"([#1872](https://github.com/adap/flower/pull/1872), " +"[#1866](https://github.com/adap/flower/pull/1866), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1837](https://github.com/adap/flower/pull/1837), " +"[#1477](https://github.com/adap/flower/pull/1477), " +"[#2171](https://github.com/adap/flower/pull/2171))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:523 -msgid "Launch Federated XGBoost!" +#: ../../source/ref-changelog.md:678 +msgid "v1.4.0 (2023-04-21)" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:585 +#: ../../source/ref-changelog.md:684 msgid "" -"Congratulations! You've successfully built and run your first federated " -"XGBoost system. The AUC values can be checked in " -":code:`metrics_distributed`. One can see that the average AUC increases " -"over FL rounds." +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " +"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " +"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " +"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " +"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:590 +#: ../../source/ref-changelog.md:688 msgid "" -"The full `source code `_ for this example can be found in :code:`examples" -"/xgboost-quickstart`." +"**Introduce support for XGBoost (**`FedXgbNnAvg` **strategy and " +"example)** ([#1694](https://github.com/adap/flower/pull/1694), " +"[#1709](https://github.com/adap/flower/pull/1709), " +"[#1715](https://github.com/adap/flower/pull/1715), " +"[#1717](https://github.com/adap/flower/pull/1717), " +"[#1763](https://github.com/adap/flower/pull/1763), " +"[#1795](https://github.com/adap/flower/pull/1795))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:594 -msgid "Comprehensive Federated XGBoost" +#: ../../source/ref-changelog.md:690 +msgid "" +"XGBoost is a tree-based ensemble machine learning algorithm that uses " +"gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg`" +" " +"[strategy](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," +" and a [code example](https://github.com/adap/flower/tree/main/examples" +"/xgboost-quickstart) that demonstrates the usage of this new strategy in " +"an XGBoost project." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:596 +#: ../../source/ref-changelog.md:692 msgid "" -"Now that you have known how federated XGBoost work with Flower, it's time" -" to run some more comprehensive experiments by customising the " -"experimental settings. In the xgboost-comprehensive example (`full code " -"`_), we provide more options to define various experimental" -" setups, including aggregation strategies, data partitioning and " -"centralised/distributed evaluation. We also support :doc:`Flower " -"simulation ` making it easy to simulate large " -"client cohorts in a resource-aware manner. Let's take a look!" +"**Introduce iOS SDK (preview)** " +"([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:603 -msgid "Cyclic training" +#: ../../source/ref-changelog.md:694 +msgid "" +"This is a major update for anyone wanting to implement Federated Learning" +" on iOS mobile devices. We now have a swift iOS SDK present under " +"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" +" that will facilitate greatly the app creating process. To showcase its " +"use, the [iOS " +"example](https://github.com/adap/flower/tree/main/examples/ios) has also " +"been updated!" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:605 +#: ../../source/ref-changelog.md:696 msgid "" -"In addition to bagging aggregation, we offer a cyclic training scheme, " -"which performs FL in a client-by-client fashion. Instead of aggregating " -"multiple clients, there is only one single client participating in the " -"training per round in the cyclic training scenario. The trained local " -"XGBoost trees will be passed to the next client as an initialised model " -"for next round's boosting." +"**Introduce new \"What is Federated Learning?\" tutorial** " +"([#1657](https://github.com/adap/flower/pull/1657), " +"[#1721](https://github.com/adap/flower/pull/1721))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:609 +#: ../../source/ref-changelog.md:698 msgid "" -"To do this, we first customise a :code:`ClientManager` in " -":code:`server_utils.py`:" +"A new [entry-level tutorial](https://flower.ai/docs/framework/tutorial-" +"what-is-federated-learning.html) in our documentation explains the basics" +" of Fedetated Learning. It enables anyone who's unfamiliar with Federated" +" Learning to start their journey with Flower. Forward it to anyone who's " +"interested in Federated Learning!" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:649 +#: ../../source/ref-changelog.md:700 msgid "" -"The customised :code:`ClientManager` samples all available clients in " -"each FL round based on the order of connection to the server. Then, we " -"define a new strategy :code:`FedXgbCyclic` in " -":code:`flwr.server.strategy.fedxgb_cyclic.py`, in order to sequentially " -"select only one client in given round and pass the received model to next" -" client." +"**Introduce new Flower Baseline: FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:690 +#: ../../source/ref-changelog.md:702 msgid "" -"Unlike the original :code:`FedAvg`, we don't perform aggregation here. " -"Instead, we just make a copy of the received client model as global model" -" by overriding :code:`aggregate_fit`." +"This new baseline replicates the MNIST+CNN task from the paper [Federated" +" Optimization in Heterogeneous Networks (Li et al., " +"2018)](https://arxiv.org/abs/1812.06127). It uses the `FedProx` strategy," +" which aims at making convergence more robust in heterogeneous settings." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:693 +#: ../../source/ref-changelog.md:704 msgid "" -"Also, the customised :code:`configure_fit` and :code:`configure_evaluate`" -" methods ensure the clients to be sequentially selected given FL round:" +"**Introduce new Flower Baseline: FedAvg FEMNIST** " +"([#1655](https://github.com/adap/flower/pull/1655))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:757 -msgid "Customised data partitioning" +#: ../../source/ref-changelog.md:706 +msgid "" +"This new baseline replicates an experiment evaluating the performance of " +"the FedAvg algorithm on the FEMNIST dataset from the paper [LEAF: A " +"Benchmark for Federated Settings (Caldas et al., " +"2018)](https://arxiv.org/abs/1812.01097)." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:759 +#: ../../source/ref-changelog.md:708 msgid "" -"In :code:`dataset.py`, we have a function :code:`instantiate_partitioner`" -" to instantiate the data partitioner based on the given " -":code:`num_partitions` and :code:`partitioner_type`. Currently, we " -"provide four supported partitioner type to simulate the uniformity/non-" -"uniformity in data quantity (uniform, linear, square, exponential)." +"**Introduce (experimental) REST API** " +"([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:790 -msgid "Customised centralised/distributed evaluation" +#: ../../source/ref-changelog.md:710 +msgid "" +"A new REST API has been introduced as an alternative to the gRPC-based " +"communication stack. In this initial version, the REST API only supports " +"anonymous clients." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:792 +#: ../../source/ref-changelog.md:712 msgid "" -"To facilitate centralised evaluation, we define a function in " -":code:`server_utils.py`:" +"Please note: The REST API is still experimental and will likely change " +"significantly over time." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:824 +#: ../../source/ref-changelog.md:714 msgid "" -"This function returns a evaluation function which instantiates a " -":code:`Booster` object and loads the global model weights to it. The " -"evaluation is conducted by calling :code:`eval_set()` method, and the " -"tested AUC value is reported." +"**Improve the (experimental) Driver API** " +"([#1663](https://github.com/adap/flower/pull/1663), " +"[#1666](https://github.com/adap/flower/pull/1666), " +"[#1667](https://github.com/adap/flower/pull/1667), " +"[#1664](https://github.com/adap/flower/pull/1664), " +"[#1675](https://github.com/adap/flower/pull/1675), " +"[#1676](https://github.com/adap/flower/pull/1676), " +"[#1693](https://github.com/adap/flower/pull/1693), " +"[#1662](https://github.com/adap/flower/pull/1662), " +"[#1794](https://github.com/adap/flower/pull/1794))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:827 +#: ../../source/ref-changelog.md:716 msgid "" -"As for distributed evaluation on the clients, it's same as the quick-" -"start example by overriding the :code:`evaluate()` method insides the " -":code:`XgbClient` class in :code:`client_utils.py`." -msgstr "" - -#: ../../source/tutorial-quickstart-xgboost.rst:831 -msgid "Flower simulation" +"The Driver API is still an experimental feature, but this release " +"introduces some major upgrades. One of the main improvements is the " +"introduction of an SQLite database to store server state on disk (instead" +" of in-memory). Another improvement is that tasks (instructions or " +"results) that have been delivered will now be deleted. This greatly " +"improves the memory efficiency of a long-running Flower server." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:832 +#: ../../source/ref-changelog.md:718 msgid "" -"We also provide an example code (:code:`sim.py`) to use the simulation " -"capabilities of Flower to simulate federated XGBoost training on either a" -" single machine or a cluster of machines." +"**Fix spilling issues related to Ray during simulations** " +"([#1698](https://github.com/adap/flower/pull/1698))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:866 +#: ../../source/ref-changelog.md:720 msgid "" -"After importing all required packages, we define a :code:`main()` " -"function to perform the simulation process:" +"While running long simulations, `ray` was sometimes spilling huge amounts" +" of data that would make the training unable to continue. This is now " +"fixed! 🎉" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:921 +#: ../../source/ref-changelog.md:722 msgid "" -"We first load the dataset and perform data partitioning, and the pre-" -"processed data is stored in a :code:`list`. After the simulation begins, " -"the clients won't need to pre-process their partitions again." -msgstr "" - -#: ../../source/tutorial-quickstart-xgboost.rst:924 -msgid "Then, we define the strategies and other hyper-parameters:" +"**Add new example using** `TabNet` **and Flower** " +"([#1725](https://github.com/adap/flower/pull/1725))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:975 +#: ../../source/ref-changelog.md:724 msgid "" -"After that, we start the simulation by calling " -":code:`fl.simulation.start_simulation`:" +"TabNet is a powerful and flexible framework for training machine learning" +" models on tabular data. We now have a federated example using Flower: " +"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples" +"/quickstart-tabnet)." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:995 +#: ../../source/ref-changelog.md:726 msgid "" -"One of key parameters for :code:`start_simulation` is :code:`client_fn` " -"which returns a function to construct a client. We define it as follows:" -msgstr "" - -#: ../../source/tutorial-quickstart-xgboost.rst:1038 -msgid "Arguments parser" +"**Add new how-to guide for monitoring simulations** " +"([#1649](https://github.com/adap/flower/pull/1649))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1040 +#: ../../source/ref-changelog.md:728 msgid "" -"In :code:`utils.py`, we define the arguments parsers for clients, server " -"and simulation, allowing users to specify different experimental " -"settings. Let's first see the sever side:" +"We now have a documentation guide to help users monitor their performance" +" during simulations." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1086 +#: ../../source/ref-changelog.md:730 msgid "" -"This allows user to specify training strategies / the number of total " -"clients / FL rounds / participating clients / clients for evaluation, and" -" evaluation fashion. Note that with :code:`--centralised-eval`, the sever" -" will do centralised evaluation and all functionalities for client " -"evaluation will be disabled." -msgstr "" - -#: ../../source/tutorial-quickstart-xgboost.rst:1090 -msgid "Then, the argument parser on client side:" +"**Add training metrics to** `History` **object during simulations** " +"([#1696](https://github.com/adap/flower/pull/1696))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1144 +#: ../../source/ref-changelog.md:732 msgid "" -"This defines various options for client data partitioning. Besides, " -"clients also have an option to conduct evaluation on centralised test set" -" by setting :code:`--centralised-eval`, as well as an option to perform " -"scaled learning rate based on the number of clients by setting :code" -":`--scaled-lr`." -msgstr "" - -#: ../../source/tutorial-quickstart-xgboost.rst:1148 -msgid "We also have an argument parser for simulation:" -msgstr "" - -#: ../../source/tutorial-quickstart-xgboost.rst:1226 -msgid "This integrates all arguments for both client and server sides." -msgstr "" - -#: ../../source/tutorial-quickstart-xgboost.rst:1229 -msgid "Example commands" +"The `fit_metrics_aggregation_fn` can be used to aggregate training " +"metrics, but previous releases did not save the results in the `History` " +"object. This is now the case!" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1231 +#: ../../source/ref-changelog.md:734 msgid "" -"To run a centralised evaluated experiment with bagging strategy on 5 " -"clients with exponential distribution for 50 rounds, we first start the " -"server as below:" +"**General improvements** " +"([#1659](https://github.com/adap/flower/pull/1659), " +"[#1646](https://github.com/adap/flower/pull/1646), " +"[#1647](https://github.com/adap/flower/pull/1647), " +"[#1471](https://github.com/adap/flower/pull/1471), " +"[#1648](https://github.com/adap/flower/pull/1648), " +"[#1651](https://github.com/adap/flower/pull/1651), " +"[#1652](https://github.com/adap/flower/pull/1652), " +"[#1653](https://github.com/adap/flower/pull/1653), " +"[#1659](https://github.com/adap/flower/pull/1659), " +"[#1665](https://github.com/adap/flower/pull/1665), " +"[#1670](https://github.com/adap/flower/pull/1670), " +"[#1672](https://github.com/adap/flower/pull/1672), " +"[#1677](https://github.com/adap/flower/pull/1677), " +"[#1684](https://github.com/adap/flower/pull/1684), " +"[#1683](https://github.com/adap/flower/pull/1683), " +"[#1686](https://github.com/adap/flower/pull/1686), " +"[#1682](https://github.com/adap/flower/pull/1682), " +"[#1685](https://github.com/adap/flower/pull/1685), " +"[#1692](https://github.com/adap/flower/pull/1692), " +"[#1705](https://github.com/adap/flower/pull/1705), " +"[#1708](https://github.com/adap/flower/pull/1708), " +"[#1711](https://github.com/adap/flower/pull/1711), " +"[#1713](https://github.com/adap/flower/pull/1713), " +"[#1714](https://github.com/adap/flower/pull/1714), " +"[#1718](https://github.com/adap/flower/pull/1718), " +"[#1716](https://github.com/adap/flower/pull/1716), " +"[#1723](https://github.com/adap/flower/pull/1723), " +"[#1735](https://github.com/adap/flower/pull/1735), " +"[#1678](https://github.com/adap/flower/pull/1678), " +"[#1750](https://github.com/adap/flower/pull/1750), " +"[#1753](https://github.com/adap/flower/pull/1753), " +"[#1736](https://github.com/adap/flower/pull/1736), " +"[#1766](https://github.com/adap/flower/pull/1766), " +"[#1760](https://github.com/adap/flower/pull/1760), " +"[#1775](https://github.com/adap/flower/pull/1775), " +"[#1776](https://github.com/adap/flower/pull/1776), " +"[#1777](https://github.com/adap/flower/pull/1777), " +"[#1779](https://github.com/adap/flower/pull/1779), " +"[#1784](https://github.com/adap/flower/pull/1784), " +"[#1773](https://github.com/adap/flower/pull/1773), " +"[#1755](https://github.com/adap/flower/pull/1755), " +"[#1789](https://github.com/adap/flower/pull/1789), " +"[#1788](https://github.com/adap/flower/pull/1788), " +"[#1798](https://github.com/adap/flower/pull/1798), " +"[#1799](https://github.com/adap/flower/pull/1799), " +"[#1739](https://github.com/adap/flower/pull/1739), " +"[#1800](https://github.com/adap/flower/pull/1800), " +"[#1804](https://github.com/adap/flower/pull/1804), " +"[#1805](https://github.com/adap/flower/pull/1805))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1238 -msgid "Then, on each client terminal, we start the clients:" +#: ../../source/ref-changelog.md:742 +msgid "v1.3.0 (2023-02-06)" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1244 -msgid "To run the same experiment with Flower simulation:" +#: ../../source/ref-changelog.md:748 +msgid "" +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1250 +#: ../../source/ref-changelog.md:752 msgid "" -"The full `code `_ for this comprehensive example can be found in" -" :code:`examples/xgboost-comprehensive`." +"**Add support for** `workload_id` **and** `group_id` **in Driver API** " +"([#1595](https://github.com/adap/flower/pull/1595))" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:9 -msgid "Build a strategy from scratch" +#: ../../source/ref-changelog.md:754 +msgid "" +"The (experimental) Driver API now supports a `workload_id` that can be " +"used to identify which workload a task belongs to. It also supports a new" +" `group_id` that can be used, for example, to indicate the current " +"training round. Both the `workload_id` and `group_id` enable client nodes" +" to decide whether they want to handle a task or not." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:11 +#: ../../source/ref-changelog.md:756 msgid "" -"Welcome to the third part of the Flower federated learning tutorial. In " -"previous parts of this tutorial, we introduced federated learning with " -"PyTorch and Flower (`part 1 `__) and we learned how strategies " -"can be used to customize the execution on both the server and the clients" -" (`part 2 `__)." +"**Make Driver API and Fleet API address configurable** " +"([#1637](https://github.com/adap/flower/pull/1637))" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:13 +#: ../../source/ref-changelog.md:758 msgid "" -"In this notebook, we'll continue to customize the federated learning " -"system we built previously by creating a custom version of FedAvg (again," -" using `Flower `__ and `PyTorch " -"`__)." +"The (experimental) long-running Flower server (Driver API and Fleet API) " +"can now configure the server address of both Driver API (via `--driver-" +"api-address`) and Fleet API (via `--fleet-api-address`) when starting:" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:15 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:16 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:15 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:15 +#: ../../source/ref-changelog.md:760 msgid "" -"`Star Flower on GitHub `__ ⭐️ and join " -"the Flower community on Slack to connect, ask questions, and get help: " -"`Join Slack `__ 🌼 We'd love to hear from " -"you in the ``#introductions`` channel! And if anything is unclear, head " -"over to the ``#questions`` channel." +"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " +"\"0.0.0.0:8086\"`" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:17 -msgid "Let's build a new ``Strategy`` from scratch!" +#: ../../source/ref-changelog.md:762 +msgid "Both IPv4 and IPv6 addresses are supported." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:29 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:29 -msgid "Preparation" +#: ../../source/ref-changelog.md:764 +msgid "" +"**Add new example of Federated Learning using fastai and Flower** " +"([#1598](https://github.com/adap/flower/pull/1598))" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:31 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:32 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:31 +#: ../../source/ref-changelog.md:766 msgid "" -"Before we begin with the actual code, let's make sure that we have " -"everything we need." +"A new code example (`quickstart-fastai`) demonstrates federated learning " +"with [fastai](https://www.fast.ai/) and Flower. You can find it here: " +"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples" +"/quickstart-fastai)." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:43 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:44 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:43 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:43 -msgid "Installing dependencies" +#: ../../source/ref-changelog.md:768 +msgid "" +"**Make Android example compatible with** `flwr >= 1.0.0` **and the latest" +" versions of Android** " +"([#1603](https://github.com/adap/flower/pull/1603))" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:45 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:46 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:45 -msgid "First, we install the necessary packages:" +#: ../../source/ref-changelog.md:770 +msgid "" +"The Android code example has received a substantial update: the project " +"is compatible with Flower 1.0 (and later), the UI received a full " +"refresh, and the project is updated to be compatible with newer Android " +"tooling." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:65 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:66 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:65 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:65 +#: ../../source/ref-changelog.md:772 msgid "" -"Now that we have all dependencies installed, we can import everything we " -"need for this tutorial:" +"**Add new `FedProx` strategy** " +"([#1619](https://github.com/adap/flower/pull/1619))" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:101 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:102 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:101 +#: ../../source/ref-changelog.md:774 msgid "" -"It is possible to switch to a runtime that has GPU acceleration enabled " -"(on Google Colab: ``Runtime > Change runtime type > Hardware acclerator: " -"GPU > Save``). Note, however, that Google Colab is not always able to " -"offer GPU acceleration. If you see an error related to GPU availability " -"in one of the following sections, consider switching back to CPU-based " -"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " -"has GPU acceleration enabled, you should see the output ``Training on " -"cuda``, otherwise it'll say ``Training on cpu``." +"This " +"[strategy](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" +" is almost identical to " +"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," +" but helps users replicate what is described in this " +"[paper](https://arxiv.org/abs/1812.06127). It essentially adds a " +"parameter called `proximal_mu` to regularize the local models with " +"respect to the global models." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:114 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:115 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:114 -msgid "Data loading" +#: ../../source/ref-changelog.md:776 +msgid "" +"**Add new metrics to telemetry events** " +"([#1640](https://github.com/adap/flower/pull/1640))" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:116 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:116 +#: ../../source/ref-changelog.md:778 msgid "" -"Let's now load the CIFAR-10 training and test set, partition them into " -"ten smaller datasets (each split into training and validation set), and " -"wrap everything in their own ``DataLoader``. We introduce a new parameter" -" ``num_clients`` which allows us to call ``load_datasets`` with different" -" numbers of clients." +"An updated event structure allows, for example, the clustering of events " +"within the same workload." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:167 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:168 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:167 -msgid "Model training/evaluation" +#: ../../source/ref-changelog.md:780 +msgid "" +"**Add new custom strategy tutorial section** " +"[#1623](https://github.com/adap/flower/pull/1623)" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:169 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:170 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:169 +#: ../../source/ref-changelog.md:782 msgid "" -"Let's continue with the usual model definition (including " -"``set_parameters`` and ``get_parameters``), training and test functions:" +"The Flower tutorial now has a new section that covers implementing a " +"custom strategy from scratch: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:258 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:258 -msgid "Flower client" +#: ../../source/ref-changelog.md:784 +msgid "" +"**Add new custom serialization tutorial section** " +"([#1622](https://github.com/adap/flower/pull/1622))" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:260 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:260 +#: ../../source/ref-changelog.md:786 msgid "" -"To implement the Flower client, we (again) create a subclass of " -"``flwr.client.NumPyClient`` and implement the three methods " -"``get_parameters``, ``fit``, and ``evaluate``. Here, we also pass the " -"``cid`` to the client and use it log additional details:" +"The Flower tutorial now has a new section that covers custom " +"serialization: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-customize-the-client-pytorch.ipynb)" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:308 -msgid "Let's test what we have so far before we continue:" +#: ../../source/ref-changelog.md:788 +msgid "" +"**General improvements** " +"([#1638](https://github.com/adap/flower/pull/1638), " +"[#1634](https://github.com/adap/flower/pull/1634), " +"[#1636](https://github.com/adap/flower/pull/1636), " +"[#1635](https://github.com/adap/flower/pull/1635), " +"[#1633](https://github.com/adap/flower/pull/1633), " +"[#1632](https://github.com/adap/flower/pull/1632), " +"[#1631](https://github.com/adap/flower/pull/1631), " +"[#1630](https://github.com/adap/flower/pull/1630), " +"[#1627](https://github.com/adap/flower/pull/1627), " +"[#1593](https://github.com/adap/flower/pull/1593), " +"[#1616](https://github.com/adap/flower/pull/1616), " +"[#1615](https://github.com/adap/flower/pull/1615), " +"[#1607](https://github.com/adap/flower/pull/1607), " +"[#1609](https://github.com/adap/flower/pull/1609), " +"[#1608](https://github.com/adap/flower/pull/1608), " +"[#1603](https://github.com/adap/flower/pull/1603), " +"[#1590](https://github.com/adap/flower/pull/1590), " +"[#1580](https://github.com/adap/flower/pull/1580), " +"[#1599](https://github.com/adap/flower/pull/1599), " +"[#1600](https://github.com/adap/flower/pull/1600), " +"[#1601](https://github.com/adap/flower/pull/1601), " +"[#1597](https://github.com/adap/flower/pull/1597), " +"[#1595](https://github.com/adap/flower/pull/1595), " +"[#1591](https://github.com/adap/flower/pull/1591), " +"[#1588](https://github.com/adap/flower/pull/1588), " +"[#1589](https://github.com/adap/flower/pull/1589), " +"[#1587](https://github.com/adap/flower/pull/1587), " +"[#1573](https://github.com/adap/flower/pull/1573), " +"[#1581](https://github.com/adap/flower/pull/1581), " +"[#1578](https://github.com/adap/flower/pull/1578), " +"[#1574](https://github.com/adap/flower/pull/1574), " +"[#1572](https://github.com/adap/flower/pull/1572), " +"[#1586](https://github.com/adap/flower/pull/1586))" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:339 -msgid "Build a Strategy from scratch" +#: ../../source/ref-changelog.md:792 +msgid "" +"**Updated documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:341 +#: ../../source/ref-changelog.md:794 ../../source/ref-changelog.md:861 msgid "" -"Let’s overwrite the ``configure_fit`` method such that it passes a higher" -" learning rate (potentially also other hyperparameters) to the optimizer " -"of a fraction of the clients. We will keep the sampling of the clients as" -" it is in ``FedAvg`` and then change the configuration dictionary (one of" -" the ``FitIns`` attributes)." +"As usual, the documentation has improved quite a bit. It is another step " +"in our effort to make the Flower documentation the best documentation of " +"any project. Stay tuned and as always, feel free to provide feedback!" +msgstr "" + +#: ../../source/ref-changelog.md:800 +msgid "v1.2.0 (2023-01-13)" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:507 +#: ../../source/ref-changelog.md:806 msgid "" -"The only thing left is to use the newly created custom Strategy " -"``FedCustom`` when starting the experiment:" +"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." +" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:534 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:932 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:697 -msgid "Recap" +#: ../../source/ref-changelog.md:810 +msgid "" +"**Introduce new Flower Baseline: FedAvg MNIST** " +"([#1497](https://github.com/adap/flower/pull/1497), " +"[#1552](https://github.com/adap/flower/pull/1552))" +msgstr "" + +#: ../../source/ref-changelog.md:812 +msgid "" +"Over the coming weeks, we will be releasing a number of new reference " +"implementations useful especially to FL newcomers. They will typically " +"revisit well known papers from the literature, and be suitable for " +"integration in your own application or for experimentation, in order to " +"deepen your knowledge of FL in general. Today's release is the first in " +"this series. [Read more.](https://flower.ai/blog/2023-01-12-fl-starter-" +"pack-fedavg-mnist-cnn/)" +msgstr "" + +#: ../../source/ref-changelog.md:814 +msgid "" +"**Improve GPU support in simulations** " +"([#1555](https://github.com/adap/flower/pull/1555))" +msgstr "" + +#: ../../source/ref-changelog.md:816 +msgid "" +"The Ray-based Virtual Client Engine (`start_simulation`) has been updated" +" to improve GPU support. The update includes some of the hard-earned " +"lessons from scaling simulations in GPU cluster environments. New " +"defaults make running GPU-based simulations substantially more robust." +msgstr "" + +#: ../../source/ref-changelog.md:818 +msgid "" +"**Improve GPU support in Jupyter Notebook tutorials** " +"([#1527](https://github.com/adap/flower/pull/1527), " +"[#1558](https://github.com/adap/flower/pull/1558))" +msgstr "" + +#: ../../source/ref-changelog.md:820 +msgid "" +"Some users reported that Jupyter Notebooks have not always been easy to " +"use on GPU instances. We listened and made improvements to all of our " +"Jupyter notebooks! Check out the updated notebooks here:" +msgstr "" + +#: ../../source/ref-changelog.md:822 +msgid "" +"[An Introduction to Federated Learning](https://flower.ai/docs/framework" +"/tutorial-get-started-with-flower-pytorch.html)" +msgstr "" + +#: ../../source/ref-changelog.md:823 +msgid "" +"[Strategies in Federated Learning](https://flower.ai/docs/framework" +"/tutorial-use-a-federated-learning-strategy-pytorch.html)" +msgstr "" + +#: ../../source/ref-changelog.md:824 +msgid "" +"[Building a Strategy](https://flower.ai/docs/framework/tutorial-build-a" +"-strategy-from-scratch-pytorch.html)" +msgstr "" + +#: ../../source/ref-changelog.md:825 +msgid "" +"[Client and NumPyClient](https://flower.ai/docs/framework/tutorial-" +"customize-the-client-pytorch.html)" +msgstr "" + +#: ../../source/ref-changelog.md:827 +msgid "" +"**Introduce optional telemetry** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" +msgstr "" + +#: ../../source/ref-changelog.md:829 +msgid "" +"After a [request for " +"feedback](https://github.com/adap/flower/issues/1534) from the community," +" the Flower open-source project introduces optional collection of " +"*anonymous* usage metrics to make well-informed decisions to improve " +"Flower. Doing this enables the Flower team to understand how Flower is " +"used and what challenges users might face." +msgstr "" + +#: ../../source/ref-changelog.md:831 +msgid "" +"**Flower is a friendly framework for collaborative AI and data science.**" +" Staying true to this statement, Flower makes it easy to disable " +"telemetry for users who do not want to share anonymous usage metrics. " +"[Read more.](https://flower.ai/docs/telemetry.html)." +msgstr "" + +#: ../../source/ref-changelog.md:833 +msgid "" +"**Introduce (experimental) Driver API** " +"([#1520](https://github.com/adap/flower/pull/1520), " +"[#1525](https://github.com/adap/flower/pull/1525), " +"[#1545](https://github.com/adap/flower/pull/1545), " +"[#1546](https://github.com/adap/flower/pull/1546), " +"[#1550](https://github.com/adap/flower/pull/1550), " +"[#1551](https://github.com/adap/flower/pull/1551), " +"[#1567](https://github.com/adap/flower/pull/1567))" +msgstr "" + +#: ../../source/ref-changelog.md:835 +msgid "" +"Flower now has a new (experimental) Driver API which will enable fully " +"programmable, async, and multi-tenant Federated Learning and Federated " +"Analytics applications. Phew, that's a lot! Going forward, the Driver API" +" will be the abstraction that many upcoming features will be built on - " +"and you can start building those things now, too." +msgstr "" + +#: ../../source/ref-changelog.md:837 +msgid "" +"The Driver API also enables a new execution mode in which the server runs" +" indefinitely. Multiple individual workloads can run concurrently and " +"start and stop their execution independent of the server. This is " +"especially useful for users who want to deploy Flower in production." +msgstr "" + +#: ../../source/ref-changelog.md:839 +msgid "" +"To learn more, check out the `mt-pytorch` code example. We look forward " +"to you feedback!" +msgstr "" + +#: ../../source/ref-changelog.md:841 +msgid "" +"Please note: *The Driver API is still experimental and will likely change" +" significantly over time.*" +msgstr "" + +#: ../../source/ref-changelog.md:843 +msgid "" +"**Add new Federated Analytics with Pandas example** " +"([#1469](https://github.com/adap/flower/pull/1469), " +"[#1535](https://github.com/adap/flower/pull/1535))" +msgstr "" + +#: ../../source/ref-changelog.md:845 +msgid "" +"A new code example (`quickstart-pandas`) demonstrates federated analytics" +" with Pandas and Flower. You can find it here: [quickstart-" +"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" +"pandas)." +msgstr "" + +#: ../../source/ref-changelog.md:847 +msgid "" +"**Add new strategies: Krum and MultiKrum** " +"([#1481](https://github.com/adap/flower/pull/1481))" +msgstr "" + +#: ../../source/ref-changelog.md:849 +msgid "" +"Edoardo, a computer science student at the Sapienza University of Rome, " +"contributed a new `Krum` strategy that enables users to easily use Krum " +"and MultiKrum in their workloads." +msgstr "" + +#: ../../source/ref-changelog.md:851 +msgid "" +"**Update C++ example to be compatible with Flower v1.2.0** " +"([#1495](https://github.com/adap/flower/pull/1495))" +msgstr "" + +#: ../../source/ref-changelog.md:853 +msgid "" +"The C++ code example has received a substantial update to make it " +"compatible with the latest version of Flower." +msgstr "" + +#: ../../source/ref-changelog.md:855 +msgid "" +"**General improvements** " +"([#1491](https://github.com/adap/flower/pull/1491), " +"[#1504](https://github.com/adap/flower/pull/1504), " +"[#1506](https://github.com/adap/flower/pull/1506), " +"[#1514](https://github.com/adap/flower/pull/1514), " +"[#1522](https://github.com/adap/flower/pull/1522), " +"[#1523](https://github.com/adap/flower/pull/1523), " +"[#1526](https://github.com/adap/flower/pull/1526), " +"[#1528](https://github.com/adap/flower/pull/1528), " +"[#1547](https://github.com/adap/flower/pull/1547), " +"[#1549](https://github.com/adap/flower/pull/1549), " +"[#1560](https://github.com/adap/flower/pull/1560), " +"[#1564](https://github.com/adap/flower/pull/1564), " +"[#1566](https://github.com/adap/flower/pull/1566))" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:536 -msgid "" -"In this notebook, we’ve seen how to implement a custom strategy. A custom" -" strategy enables granular control over client node configuration, result" -" aggregation, and more. To define a custom strategy, you only have to " -"overwrite the abstract methods of the (abstract) base class ``Strategy``." -" To make custom strategies even more powerful, you can pass custom " -"functions to the constructor of your new class (``__init__``) and then " -"call these functions whenever needed." -msgstr "" +#: ../../source/ref-changelog.md:859 +msgid "" +"**Updated documentation** " +"([#1494](https://github.com/adap/flower/pull/1494), " +"[#1496](https://github.com/adap/flower/pull/1496), " +"[#1500](https://github.com/adap/flower/pull/1500), " +"[#1503](https://github.com/adap/flower/pull/1503), " +"[#1505](https://github.com/adap/flower/pull/1505), " +"[#1524](https://github.com/adap/flower/pull/1524), " +"[#1518](https://github.com/adap/flower/pull/1518), " +"[#1519](https://github.com/adap/flower/pull/1519), " +"[#1515](https://github.com/adap/flower/pull/1515))" +msgstr "" + +#: ../../source/ref-changelog.md:863 +msgid "" +"One highlight is the new [first time contributor " +"guide](https://flower.ai/docs/first-time-contributors.html): if you've " +"never contributed on GitHub before, this is the perfect place to start!" +msgstr "" + +#: ../../source/ref-changelog.md:869 +msgid "v1.1.0 (2022-10-31)" +msgstr "" + +#: ../../source/ref-changelog.md:873 +msgid "" +"We would like to give our **special thanks** to all the contributors who " +"made the new version of Flower possible (in `git shortlog` order):" +msgstr "" + +#: ../../source/ref-changelog.md:875 +msgid "" +"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " +"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " +"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " +"`danielnugraha`, `edogab33`" +msgstr "" + +#: ../../source/ref-changelog.md:879 +msgid "" +"**Introduce Differential Privacy wrappers (preview)** " +"([#1357](https://github.com/adap/flower/pull/1357), " +"[#1460](https://github.com/adap/flower/pull/1460))" +msgstr "" + +#: ../../source/ref-changelog.md:881 +msgid "" +"The first (experimental) preview of pluggable Differential Privacy " +"wrappers enables easy configuration and usage of differential privacy " +"(DP). The pluggable DP wrappers enable framework-agnostic **and** " +"strategy-agnostic usage of both client-side DP and server-side DP. Head " +"over to the Flower docs, a new explainer goes into more detail." +msgstr "" + +#: ../../source/ref-changelog.md:883 +msgid "" +"**New iOS CoreML code example** " +"([#1289](https://github.com/adap/flower/pull/1289))" +msgstr "" + +#: ../../source/ref-changelog.md:885 +msgid "" +"Flower goes iOS! A massive new code example shows how Flower clients can " +"be built for iOS. The code example contains both Flower iOS SDK " +"components that can be used for many tasks, and one task example running " +"on CoreML." +msgstr "" + +#: ../../source/ref-changelog.md:887 +msgid "" +"**New FedMedian strategy** " +"([#1461](https://github.com/adap/flower/pull/1461))" +msgstr "" + +#: ../../source/ref-changelog.md:889 +msgid "" +"The new `FedMedian` strategy implements Federated Median (FedMedian) by " +"[Yin et al., 2018](https://arxiv.org/pdf/1803.01498v1.pdf)." +msgstr "" + +#: ../../source/ref-changelog.md:891 +msgid "" +"**Log** `Client` **exceptions in Virtual Client Engine** " +"([#1493](https://github.com/adap/flower/pull/1493))" +msgstr "" + +#: ../../source/ref-changelog.md:893 +msgid "" +"All `Client` exceptions happening in the VCE are now logged by default " +"and not just exposed to the configured `Strategy` (via the `failures` " +"argument)." +msgstr "" + +#: ../../source/ref-changelog.md:895 +msgid "" +"**Improve Virtual Client Engine internals** " +"([#1401](https://github.com/adap/flower/pull/1401), " +"[#1453](https://github.com/adap/flower/pull/1453))" +msgstr "" + +#: ../../source/ref-changelog.md:897 +msgid "" +"Some internals of the Virtual Client Engine have been revamped. The VCE " +"now uses Ray 2.0 under the hood, the value type of the `client_resources`" +" dictionary changed to `float` to allow fractions of resources to be " +"allocated." +msgstr "" + +#: ../../source/ref-changelog.md:899 +msgid "" +"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " +"Client Engine**" +msgstr "" + +#: ../../source/ref-changelog.md:901 +msgid "" +"The Virtual Client Engine now has full support for optional `Client` (and" +" `NumPyClient`) methods." +msgstr "" + +#: ../../source/ref-changelog.md:903 +msgid "" +"**Provide type information to packages using** `flwr` " +"([#1377](https://github.com/adap/flower/pull/1377))" +msgstr "" + +#: ../../source/ref-changelog.md:905 +msgid "" +"The package `flwr` is now bundled with a `py.typed` file indicating that " +"the package is typed. This enables typing support for projects or " +"packages that use `flwr` by enabling them to improve their code using " +"static type checkers like `mypy`." +msgstr "" + +#: ../../source/ref-changelog.md:907 +msgid "" +"**Updated code example** " +"([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" +msgstr "" + +#: ../../source/ref-changelog.md:909 +msgid "" +"The code examples covering scikit-learn and PyTorch Lightning have been " +"updated to work with the latest version of Flower." +msgstr "" + +#: ../../source/ref-changelog.md:911 +msgid "" +"**Updated documentation** " +"([#1355](https://github.com/adap/flower/pull/1355), " +"[#1558](https://github.com/adap/flower/pull/1558), " +"[#1379](https://github.com/adap/flower/pull/1379), " +"[#1380](https://github.com/adap/flower/pull/1380), " +"[#1381](https://github.com/adap/flower/pull/1381), " +"[#1332](https://github.com/adap/flower/pull/1332), " +"[#1391](https://github.com/adap/flower/pull/1391), " +"[#1403](https://github.com/adap/flower/pull/1403), " +"[#1364](https://github.com/adap/flower/pull/1364), " +"[#1409](https://github.com/adap/flower/pull/1409), " +"[#1419](https://github.com/adap/flower/pull/1419), " +"[#1444](https://github.com/adap/flower/pull/1444), " +"[#1448](https://github.com/adap/flower/pull/1448), " +"[#1417](https://github.com/adap/flower/pull/1417), " +"[#1449](https://github.com/adap/flower/pull/1449), " +"[#1465](https://github.com/adap/flower/pull/1465), " +"[#1467](https://github.com/adap/flower/pull/1467))" +msgstr "" + +#: ../../source/ref-changelog.md:913 +msgid "" +"There have been so many documentation updates that it doesn't even make " +"sense to list them individually." +msgstr "" + +#: ../../source/ref-changelog.md:915 +msgid "" +"**Restructured documentation** " +"([#1387](https://github.com/adap/flower/pull/1387))" +msgstr "" + +#: ../../source/ref-changelog.md:917 +msgid "" +"The documentation has been restructured to make it easier to navigate. " +"This is just the first step in a larger effort to make the Flower " +"documentation the best documentation of any project ever. Stay tuned!" +msgstr "" + +#: ../../source/ref-changelog.md:919 +msgid "" +"**Open in Colab button** " +"([#1389](https://github.com/adap/flower/pull/1389))" +msgstr "" + +#: ../../source/ref-changelog.md:921 +msgid "" +"The four parts of the Flower Federated Learning Tutorial now come with a " +"new `Open in Colab` button. No need to install anything on your local " +"machine, you can now use and learn about Flower in your browser, it's " +"only a single click away." +msgstr "" + +#: ../../source/ref-changelog.md:923 +msgid "" +"**Improved tutorial** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" +msgstr "" + +#: ../../source/ref-changelog.md:925 +msgid "" +"The Flower Federated Learning Tutorial has two brand-new parts covering " +"custom strategies (still WIP) and the distinction between `Client` and " +"`NumPyClient`. The existing parts one and two have also been improved " +"(many small changes and fixes)." +msgstr "" + +#: ../../source/ref-changelog.md:931 +msgid "v1.0.0 (2022-07-28)" +msgstr "" + +#: ../../source/ref-changelog.md:933 +msgid "Highlights" +msgstr "" + +#: ../../source/ref-changelog.md:935 +msgid "Stable **Virtual Client Engine** (accessible via `start_simulation`)" +msgstr "" + +#: ../../source/ref-changelog.md:936 +msgid "All `Client`/`NumPyClient` methods are now optional" +msgstr "" + +#: ../../source/ref-changelog.md:937 +msgid "Configurable `get_parameters`" +msgstr "" + +#: ../../source/ref-changelog.md:938 +msgid "" +"Tons of small API cleanups resulting in a more coherent developer " +"experience" +msgstr "" + +#: ../../source/ref-changelog.md:942 +msgid "" +"We would like to give our **special thanks** to all the contributors who " +"made Flower 1.0 possible (in reverse [GitHub " +"Contributors](https://github.com/adap/flower/graphs/contributors) order):" +msgstr "" + +#: ../../source/ref-changelog.md:944 +msgid "" +"[@rtaiello](https://github.com/rtaiello), " +"[@g-pichler](https://github.com/g-pichler), [@rob-" +"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" +"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " +"[@nfnt](https://github.com/nfnt), " +"[@tatiana-s](https://github.com/tatiana-s), " +"[@TParcollet](https://github.com/TParcollet), " +"[@vballoli](https://github.com/vballoli), " +"[@negedng](https://github.com/negedng), " +"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " +"[@hei411](https://github.com/hei411), " +"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " +"[@AmitChaulwar](https://github.com/AmitChaulwar), " +"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" +"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " +"[@lbhm](https://github.com/lbhm), " +"[@sishtiaq](https://github.com/sishtiaq), " +"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" +"/Jueun-Park), [@architjen](https://github.com/architjen), " +"[@PratikGarai](https://github.com/PratikGarai), " +"[@mrinaald](https://github.com/mrinaald), " +"[@zliel](https://github.com/zliel), " +"[@MeiruiJiang](https://github.com/MeiruiJiang), " +"[@sancarlim](https://github.com/sancarlim), " +"[@gubertoli](https://github.com/gubertoli), " +"[@Vingt100](https://github.com/Vingt100), " +"[@MakGulati](https://github.com/MakGulati), " +"[@cozek](https://github.com/cozek), " +"[@jafermarq](https://github.com/jafermarq), " +"[@sisco0](https://github.com/sisco0), " +"[@akhilmathurs](https://github.com/akhilmathurs), " +"[@CanTuerk](https://github.com/CanTuerk), " +"[@mariaboerner1987](https://github.com/mariaboerner1987), " +"[@pedropgusmao](https://github.com/pedropgusmao), " +"[@tanertopal](https://github.com/tanertopal), " +"[@danieljanes](https://github.com/danieljanes)." +msgstr "" + +#: ../../source/ref-changelog.md:948 +msgid "" +"**All arguments must be passed as keyword arguments** " +"([#1338](https://github.com/adap/flower/pull/1338))" +msgstr "" + +#: ../../source/ref-changelog.md:950 +msgid "" +"Pass all arguments as keyword arguments, positional arguments are not " +"longer supported. Code that uses positional arguments (e.g., " +"`start_client(\"127.0.0.1:8080\", FlowerClient())`) must add the keyword " +"for each positional argument (e.g., " +"`start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())`)." +msgstr "" + +#: ../../source/ref-changelog.md:952 +msgid "" +"**Introduce configuration object** `ServerConfig` **in** `start_server` " +"**and** `start_simulation` " +"([#1317](https://github.com/adap/flower/pull/1317))" +msgstr "" + +#: ../../source/ref-changelog.md:954 +msgid "" +"Instead of a config dictionary `{\"num_rounds\": 3, \"round_timeout\": " +"600.0}`, `start_server` and `start_simulation` now expect a configuration" +" object of type `flwr.server.ServerConfig`. `ServerConfig` takes the same" +" arguments that as the previous config dict, but it makes writing type-" +"safe code easier and the default parameters values more transparent." +msgstr "" + +#: ../../source/ref-changelog.md:956 +msgid "" +"**Rename built-in strategy parameters for clarity** " +"([#1334](https://github.com/adap/flower/pull/1334))" +msgstr "" + +#: ../../source/ref-changelog.md:958 +msgid "" +"The following built-in strategy parameters were renamed to improve " +"readability and consistency with other API's:" +msgstr "" + +#: ../../source/ref-changelog.md:960 +msgid "`fraction_eval` --> `fraction_evaluate`" +msgstr "" + +#: ../../source/ref-changelog.md:961 +msgid "`min_eval_clients` --> `min_evaluate_clients`" +msgstr "" + +#: ../../source/ref-changelog.md:962 +msgid "`eval_fn` --> `evaluate_fn`" +msgstr "" + +#: ../../source/ref-changelog.md:964 +msgid "" +"**Update default arguments of built-in strategies** " +"([#1278](https://github.com/adap/flower/pull/1278))" +msgstr "" + +#: ../../source/ref-changelog.md:966 +msgid "" +"All built-in strategies now use `fraction_fit=1.0` and " +"`fraction_evaluate=1.0`, which means they select *all* currently " +"available clients for training and evaluation. Projects that relied on " +"the previous default values can get the previous behaviour by " +"initializing the strategy in the following way:" +msgstr "" + +#: ../../source/ref-changelog.md:968 +msgid "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" +msgstr "" + +#: ../../source/ref-changelog.md:970 +msgid "" +"**Add** `server_round` **to** `Strategy.evaluate` " +"([#1334](https://github.com/adap/flower/pull/1334))" +msgstr "" + +#: ../../source/ref-changelog.md:972 +msgid "" +"The `Strategy` method `evaluate` now receives the current round of " +"federated learning/evaluation as the first parameter." +msgstr "" + +#: ../../source/ref-changelog.md:974 +msgid "" +"**Add** `server_round` **and** `config` **parameters to** `evaluate_fn` " +"([#1334](https://github.com/adap/flower/pull/1334))" +msgstr "" + +#: ../../source/ref-changelog.md:976 +msgid "" +"The `evaluate_fn` passed to built-in strategies like `FedAvg` now takes " +"three parameters: (1) The current round of federated learning/evaluation " +"(`server_round`), (2) the model parameters to evaluate (`parameters`), " +"and (3) a config dictionary (`config`)." +msgstr "" + +#: ../../source/ref-changelog.md:978 +msgid "" +"**Rename** `rnd` **to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" +msgstr "" + +#: ../../source/ref-changelog.md:980 +msgid "" +"Several Flower methods and functions (`evaluate_fn`, `configure_fit`, " +"`aggregate_fit`, `configure_evaluate`, `aggregate_evaluate`) receive the " +"current round of federated learning/evaluation as their first parameter. " +"To improve reaability and avoid confusion with *random*, this parameter " +"has been renamed from `rnd` to `server_round`." +msgstr "" + +#: ../../source/ref-changelog.md:982 +msgid "" +"**Move** `flwr.dataset` **to** `flwr_baselines` " +"([#1273](https://github.com/adap/flower/pull/1273))" +msgstr "" + +#: ../../source/ref-changelog.md:984 +msgid "The experimental package `flwr.dataset` was migrated to Flower Baselines." +msgstr "" + +#: ../../source/ref-changelog.md:986 +msgid "" +"**Remove experimental strategies** " +"([#1280](https://github.com/adap/flower/pull/1280))" +msgstr "" + +#: ../../source/ref-changelog.md:988 +msgid "" +"Remove unmaintained experimental strategies (`FastAndSlow`, `FedFSv0`, " +"`FedFSv1`)." +msgstr "" + +#: ../../source/ref-changelog.md:990 +msgid "" +"**Rename** `Weights` **to** `NDArrays` " +"([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" +msgstr "" + +#: ../../source/ref-changelog.md:992 +msgid "" +"`flwr.common.Weights` was renamed to `flwr.common.NDArrays` to better " +"capture what this type is all about." +msgstr "" + +#: ../../source/ref-changelog.md:994 +msgid "" +"**Remove antiquated** `force_final_distributed_eval` **from** " +"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" +msgstr "" + +#: ../../source/ref-changelog.md:996 +msgid "" +"The `start_server` parameter `force_final_distributed_eval` has long been" +" a historic artefact, in this release it is finally gone for good." +msgstr "" + +#: ../../source/ref-changelog.md:998 +msgid "" +"**Make** `get_parameters` **configurable** " +"([#1242](https://github.com/adap/flower/pull/1242))" +msgstr "" + +#: ../../source/ref-changelog.md:1000 +msgid "" +"The `get_parameters` method now accepts a configuration dictionary, just " +"like `get_properties`, `fit`, and `evaluate`." +msgstr "" + +#: ../../source/ref-changelog.md:1002 +msgid "" +"**Replace** `num_rounds` **in** `start_simulation` **with new** `config` " +"**parameter** ([#1281](https://github.com/adap/flower/pull/1281))" +msgstr "" + +#: ../../source/ref-changelog.md:1004 +msgid "" +"The `start_simulation` function now accepts a configuration dictionary " +"`config` instead of the `num_rounds` integer. This improves the " +"consistency between `start_simulation` and `start_server` and makes " +"transitioning between the two easier." +msgstr "" + +#: ../../source/ref-changelog.md:1008 +msgid "" +"**Support Python 3.10** " +"([#1320](https://github.com/adap/flower/pull/1320))" +msgstr "" + +#: ../../source/ref-changelog.md:1010 +msgid "" +"The previous Flower release introduced experimental support for Python " +"3.10, this release declares Python 3.10 support as stable." +msgstr "" + +#: ../../source/ref-changelog.md:1012 +msgid "" +"**Make all** `Client` **and** `NumPyClient` **methods optional** " +"([#1260](https://github.com/adap/flower/pull/1260), " +"[#1277](https://github.com/adap/flower/pull/1277))" +msgstr "" + +#: ../../source/ref-changelog.md:1014 +msgid "" +"The `Client`/`NumPyClient` methods `get_properties`, `get_parameters`, " +"`fit`, and `evaluate` are all optional. This enables writing clients that" +" implement, for example, only `fit`, but no other method. No need to " +"implement `evaluate` when using centralized evaluation!" +msgstr "" + +#: ../../source/ref-changelog.md:1016 +msgid "" +"**Enable passing a** `Server` **instance to** `start_simulation` " +"([#1281](https://github.com/adap/flower/pull/1281))" +msgstr "" + +#: ../../source/ref-changelog.md:1018 +msgid "" +"Similar to `start_server`, `start_simulation` now accepts a full `Server`" +" instance. This enables users to heavily customize the execution of " +"eperiments and opens the door to running, for example, async FL using the" +" Virtual Client Engine." +msgstr "" + +#: ../../source/ref-changelog.md:1020 +msgid "" +"**Update code examples** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" +msgstr "" + +#: ../../source/ref-changelog.md:1022 +msgid "" +"Many code examples received small or even large maintenance updates, " +"among them are" +msgstr "" + +#: ../../source/ref-changelog.md:1024 +msgid "`scikit-learn`" +msgstr "" + +#: ../../source/ref-changelog.md:1025 +msgid "`simulation_pytorch`" +msgstr "" + +#: ../../source/ref-changelog.md:1026 +msgid "`quickstart_pytorch`" +msgstr "" + +#: ../../source/ref-changelog.md:1027 +msgid "`quickstart_simulation`" +msgstr "" + +#: ../../source/ref-changelog.md:1028 +msgid "`quickstart_tensorflow`" +msgstr "" + +#: ../../source/ref-changelog.md:1029 +msgid "`advanced_tensorflow`" +msgstr "" + +#: ../../source/ref-changelog.md:1031 +msgid "" +"**Remove the obsolete simulation example** " +"([#1328](https://github.com/adap/flower/pull/1328))" +msgstr "" + +#: ../../source/ref-changelog.md:1033 +msgid "" +"Removes the obsolete `simulation` example and renames " +"`quickstart_simulation` to `simulation_tensorflow` so it fits withs the " +"naming of `simulation_pytorch`" +msgstr "" + +#: ../../source/ref-changelog.md:1035 +msgid "" +"**Update documentation** " +"([#1223](https://github.com/adap/flower/pull/1223), " +"[#1209](https://github.com/adap/flower/pull/1209), " +"[#1251](https://github.com/adap/flower/pull/1251), " +"[#1257](https://github.com/adap/flower/pull/1257), " +"[#1267](https://github.com/adap/flower/pull/1267), " +"[#1268](https://github.com/adap/flower/pull/1268), " +"[#1300](https://github.com/adap/flower/pull/1300), " +"[#1304](https://github.com/adap/flower/pull/1304), " +"[#1305](https://github.com/adap/flower/pull/1305), " +"[#1307](https://github.com/adap/flower/pull/1307))" +msgstr "" + +#: ../../source/ref-changelog.md:1037 +msgid "" +"One substantial documentation update fixes multiple smaller rendering " +"issues, makes titles more succinct to improve navigation, removes a " +"deprecated library, updates documentation dependencies, includes the " +"`flwr.common` module in the API reference, includes support for markdown-" +"based documentation, migrates the changelog from `.rst` to `.md`, and " +"fixes a number of smaller details!" +msgstr "" + +#: ../../source/ref-changelog.md:1039 ../../source/ref-changelog.md:1094 +#: ../../source/ref-changelog.md:1163 ../../source/ref-changelog.md:1202 +msgid "**Minor updates**" +msgstr "" + +#: ../../source/ref-changelog.md:1041 +msgid "" +"Add round number to fit and evaluate log messages " +"([#1266](https://github.com/adap/flower/pull/1266))" +msgstr "" + +#: ../../source/ref-changelog.md:1042 +msgid "" +"Add secure gRPC connection to the `advanced_tensorflow` code example " +"([#847](https://github.com/adap/flower/pull/847))" +msgstr "" + +#: ../../source/ref-changelog.md:1043 +msgid "" +"Update developer tooling " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" +msgstr "" + +#: ../../source/ref-changelog.md:1044 +msgid "" +"Rename ProtoBuf messages to improve consistency " +"([#1214](https://github.com/adap/flower/pull/1214), " +"[#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" +msgstr "" + +#: ../../source/ref-changelog.md:1046 +msgid "v0.19.0 (2022-05-18)" +msgstr "" + +#: ../../source/ref-changelog.md:1050 +msgid "" +"**Flower Baselines (preview): FedOpt, FedBN, FedAvgM** " +"([#919](https://github.com/adap/flower/pull/919), " +"[#1127](https://github.com/adap/flower/pull/1127), " +"[#914](https://github.com/adap/flower/pull/914))" +msgstr "" + +#: ../../source/ref-changelog.md:1052 +msgid "" +"The first preview release of Flower Baselines has arrived! We're " +"kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " +"FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how " +"to use [Flower Baselines](https://flower.ai/docs/using-baselines.html). " +"With this first preview release we're also inviting the community to " +"[contribute their own baselines](https://flower.ai/docs/baselines/how-to-" +"contribute-baselines.html)." +msgstr "" + +#: ../../source/ref-changelog.md:1054 +msgid "" +"**C++ client SDK (preview) and code example** " +"([#1111](https://github.com/adap/flower/pull/1111))" +msgstr "" + +#: ../../source/ref-changelog.md:1056 +msgid "" +"Preview support for Flower clients written in C++. The C++ preview " +"includes a Flower client SDK and a quickstart code example that " +"demonstrates a simple C++ client using the SDK." +msgstr "" + +#: ../../source/ref-changelog.md:1058 +msgid "" +"**Add experimental support for Python 3.10 and Python 3.11** " +"([#1135](https://github.com/adap/flower/pull/1135))" +msgstr "" + +#: ../../source/ref-changelog.md:1060 +msgid "" +"Python 3.10 is the latest stable release of Python and Python 3.11 is due" +" to be released in October. This Flower release adds experimental support" +" for both Python versions." +msgstr "" + +#: ../../source/ref-changelog.md:1062 +msgid "" +"**Aggregate custom metrics through user-provided functions** " +"([#1144](https://github.com/adap/flower/pull/1144))" +msgstr "" + +#: ../../source/ref-changelog.md:1064 +msgid "" +"Custom metrics (e.g., `accuracy`) can now be aggregated without having to" +" customize the strategy. Built-in strategies support two new arguments, " +"`fit_metrics_aggregation_fn` and `evaluate_metrics_aggregation_fn`, that " +"allow passing custom metric aggregation functions." +msgstr "" + +#: ../../source/ref-changelog.md:1066 +msgid "" +"**User-configurable round timeout** " +"([#1162](https://github.com/adap/flower/pull/1162))" +msgstr "" + +#: ../../source/ref-changelog.md:1068 +msgid "" +"A new configuration value allows the round timeout to be set for " +"`start_server` and `start_simulation`. If the `config` dictionary " +"contains a `round_timeout` key (with a `float` value in seconds), the " +"server will wait *at least* `round_timeout` seconds before it closes the " +"connection." +msgstr "" + +#: ../../source/ref-changelog.md:1070 +msgid "" +"**Enable both federated evaluation and centralized evaluation to be used " +"at the same time in all built-in strategies** " +"([#1091](https://github.com/adap/flower/pull/1091))" +msgstr "" + +#: ../../source/ref-changelog.md:1072 +msgid "" +"Built-in strategies can now perform both federated evaluation (i.e., " +"client-side) and centralized evaluation (i.e., server-side) in the same " +"round. Federated evaluation can be disabled by setting `fraction_eval` to" +" `0.0`." +msgstr "" + +#: ../../source/ref-changelog.md:1074 +msgid "" +"**Two new Jupyter Notebook tutorials** " +"([#1141](https://github.com/adap/flower/pull/1141))" +msgstr "" + +#: ../../source/ref-changelog.md:1076 +msgid "" +"Two Jupyter Notebook tutorials (compatible with Google Colab) explain " +"basic and intermediate Flower features:" +msgstr "" + +#: ../../source/ref-changelog.md:1078 +msgid "" +"*An Introduction to Federated Learning*: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" +"-Intro-to-FL-PyTorch.ipynb)" +msgstr "" + +#: ../../source/ref-changelog.md:1080 +msgid "" +"*Using Strategies in Federated Learning*: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" +"-Strategies-in-FL-PyTorch.ipynb)" +msgstr "" + +#: ../../source/ref-changelog.md:1082 +msgid "" +"**New FedAvgM strategy (Federated Averaging with Server Momentum)** " +"([#1076](https://github.com/adap/flower/pull/1076))" +msgstr "" + +#: ../../source/ref-changelog.md:1084 +msgid "" +"The new `FedAvgM` strategy implements Federated Averaging with Server " +"Momentum \\[Hsu et al., 2019\\]." +msgstr "" + +#: ../../source/ref-changelog.md:1086 +msgid "" +"**New advanced PyTorch code example** " +"([#1007](https://github.com/adap/flower/pull/1007))" +msgstr "" + +#: ../../source/ref-changelog.md:1088 +msgid "" +"A new code example (`advanced_pytorch`) demonstrates advanced Flower " +"concepts with PyTorch." +msgstr "" + +#: ../../source/ref-changelog.md:1090 +msgid "" +"**New JAX code example** " +"([#906](https://github.com/adap/flower/pull/906), " +"[#1143](https://github.com/adap/flower/pull/1143))" +msgstr "" + +#: ../../source/ref-changelog.md:1092 +msgid "" +"A new code example (`jax_from_centralized_to_federated`) shows federated " +"learning with JAX and Flower." +msgstr "" + +#: ../../source/ref-changelog.md:1096 +msgid "" +"New option to keep Ray running if Ray was already initialized in " +"`start_simulation` ([#1177](https://github.com/adap/flower/pull/1177))" +msgstr "" + +#: ../../source/ref-changelog.md:1097 +msgid "" +"Add support for custom `ClientManager` as a `start_simulation` parameter " +"([#1171](https://github.com/adap/flower/pull/1171))" +msgstr "" + +#: ../../source/ref-changelog.md:1098 +msgid "" +"New documentation for [implementing " +"strategies](https://flower.ai/docs/framework/how-to-implement-" +"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " +"[#1175](https://github.com/adap/flower/pull/1175))" +msgstr "" + +#: ../../source/ref-changelog.md:1099 +msgid "" +"New mobile-friendly documentation theme " +"([#1174](https://github.com/adap/flower/pull/1174))" +msgstr "" + +#: ../../source/ref-changelog.md:1100 +msgid "" +"Limit version range for (optional) `ray` dependency to include only " +"compatible releases (`>=1.9.2,<1.12.0`) " +"([#1205](https://github.com/adap/flower/pull/1205))" +msgstr "" + +#: ../../source/ref-changelog.md:1104 +msgid "" +"**Remove deprecated support for Python 3.6** " +"([#871](https://github.com/adap/flower/pull/871))" +msgstr "" + +#: ../../source/ref-changelog.md:1105 +msgid "" +"**Remove deprecated KerasClient** " +"([#857](https://github.com/adap/flower/pull/857))" +msgstr "" + +#: ../../source/ref-changelog.md:1106 +msgid "" +"**Remove deprecated no-op extra installs** " +"([#973](https://github.com/adap/flower/pull/973))" +msgstr "" + +#: ../../source/ref-changelog.md:1107 +msgid "" +"**Remove deprecated proto fields from** `FitRes` **and** `EvaluateRes` " +"([#869](https://github.com/adap/flower/pull/869))" +msgstr "" + +#: ../../source/ref-changelog.md:1108 +msgid "" +"**Remove deprecated QffedAvg strategy (replaced by QFedAvg)** " +"([#1107](https://github.com/adap/flower/pull/1107))" +msgstr "" + +#: ../../source/ref-changelog.md:1109 +msgid "" +"**Remove deprecated DefaultStrategy strategy** " +"([#1142](https://github.com/adap/flower/pull/1142))" +msgstr "" + +#: ../../source/ref-changelog.md:1110 +msgid "" +"**Remove deprecated support for eval_fn accuracy return value** " +"([#1142](https://github.com/adap/flower/pull/1142))" +msgstr "" + +#: ../../source/ref-changelog.md:1111 +msgid "" +"**Remove deprecated support for passing initial parameters as NumPy " +"ndarrays** ([#1142](https://github.com/adap/flower/pull/1142))" +msgstr "" + +#: ../../source/ref-changelog.md:1113 +msgid "v0.18.0 (2022-02-28)" +msgstr "" + +#: ../../source/ref-changelog.md:1117 +msgid "" +"**Improved Virtual Client Engine compatibility with Jupyter Notebook / " +"Google Colab** ([#866](https://github.com/adap/flower/pull/866), " +"[#872](https://github.com/adap/flower/pull/872), " +"[#833](https://github.com/adap/flower/pull/833), " +"[#1036](https://github.com/adap/flower/pull/1036))" +msgstr "" + +#: ../../source/ref-changelog.md:1119 +msgid "" +"Simulations (using the Virtual Client Engine through `start_simulation`) " +"now work more smoothly on Jupyter Notebooks (incl. Google Colab) after " +"installing Flower with the `simulation` extra (`pip install " +"'flwr[simulation]'`)." +msgstr "" + +#: ../../source/ref-changelog.md:1121 +msgid "" +"**New Jupyter Notebook code example** " +"([#833](https://github.com/adap/flower/pull/833))" +msgstr "" + +#: ../../source/ref-changelog.md:1123 +msgid "" +"A new code example (`quickstart_simulation`) demonstrates Flower " +"simulations using the Virtual Client Engine through Jupyter Notebook " +"(incl. Google Colab)." +msgstr "" + +#: ../../source/ref-changelog.md:1125 +msgid "" +"**Client properties (feature preview)** " +"([#795](https://github.com/adap/flower/pull/795))" +msgstr "" + +#: ../../source/ref-changelog.md:1127 +msgid "" +"Clients can implement a new method `get_properties` to enable server-side" +" strategies to query client properties." +msgstr "" + +#: ../../source/ref-changelog.md:1129 +msgid "" +"**Experimental Android support with TFLite** " +"([#865](https://github.com/adap/flower/pull/865))" +msgstr "" + +#: ../../source/ref-changelog.md:1131 +msgid "" +"Android support has finally arrived in `main`! Flower is both client-" +"agnostic and framework-agnostic by design. One can integrate arbitrary " +"client platforms and with this release, using Flower on Android has " +"become a lot easier." +msgstr "" + +#: ../../source/ref-changelog.md:1133 +msgid "" +"The example uses TFLite on the client side, along with a new " +"`FedAvgAndroid` strategy. The Android client and `FedAvgAndroid` are " +"still experimental, but they are a first step towards a fully-fledged " +"Android SDK and a unified `FedAvg` implementation that integrated the new" +" functionality from `FedAvgAndroid`." +msgstr "" + +#: ../../source/ref-changelog.md:1135 +msgid "" +"**Make gRPC keepalive time user-configurable and decrease default " +"keepalive time** ([#1069](https://github.com/adap/flower/pull/1069))" +msgstr "" + +#: ../../source/ref-changelog.md:1137 +msgid "" +"The default gRPC keepalive time has been reduced to increase the " +"compatibility of Flower with more cloud environments (for example, " +"Microsoft Azure). Users can configure the keepalive time to customize the" +" gRPC stack based on specific requirements." +msgstr "" + +#: ../../source/ref-changelog.md:1139 +msgid "" +"**New differential privacy example using Opacus and PyTorch** " +"([#805](https://github.com/adap/flower/pull/805))" +msgstr "" + +#: ../../source/ref-changelog.md:1141 +msgid "" +"A new code example (`opacus`) demonstrates differentially-private " +"federated learning with Opacus, PyTorch, and Flower." +msgstr "" + +#: ../../source/ref-changelog.md:1143 +msgid "" +"**New Hugging Face Transformers code example** " +"([#863](https://github.com/adap/flower/pull/863))" +msgstr "" + +#: ../../source/ref-changelog.md:1145 +msgid "" +"A new code example (`quickstart_huggingface`) demonstrates usage of " +"Hugging Face Transformers with Flower." +msgstr "" + +#: ../../source/ref-changelog.md:1147 +msgid "" +"**New MLCube code example** " +"([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" +msgstr "" + +#: ../../source/ref-changelog.md:1149 +msgid "" +"A new code example (`quickstart_mlcube`) demonstrates usage of MLCube " +"with Flower." +msgstr "" + +#: ../../source/ref-changelog.md:1151 +msgid "" +"**SSL-enabled server and client** " +"([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" +msgstr "" + +#: ../../source/ref-changelog.md:1153 +msgid "" +"SSL enables secure encrypted connections between clients and servers. " +"This release open-sources the Flower secure gRPC implementation to make " +"encrypted communication channels accessible to all Flower users." +msgstr "" + +#: ../../source/ref-changelog.md:1155 +msgid "" +"**Updated** `FedAdam` **and** `FedYogi` **strategies** " +"([#885](https://github.com/adap/flower/pull/885), " +"[#895](https://github.com/adap/flower/pull/895))" +msgstr "" + +#: ../../source/ref-changelog.md:1157 +msgid "" +"`FedAdam` and `FedAdam` match the latest version of the Adaptive " +"Federated Optimization paper." +msgstr "" + +#: ../../source/ref-changelog.md:1159 +msgid "" +"**Initialize** `start_simulation` **with a list of client IDs** " +"([#860](https://github.com/adap/flower/pull/860))" +msgstr "" + +#: ../../source/ref-changelog.md:1161 +msgid "" +"`start_simulation` can now be called with a list of client IDs " +"(`clients_ids`, type: `List[str]`). Those IDs will be passed to the " +"`client_fn` whenever a client needs to be initialized, which can make it " +"easier to load data partitions that are not accessible through `int` " +"identifiers." +msgstr "" + +#: ../../source/ref-changelog.md:1165 +msgid "" +"Update `num_examples` calculation in PyTorch code examples in " +"([#909](https://github.com/adap/flower/pull/909))" +msgstr "" + +#: ../../source/ref-changelog.md:1166 +msgid "" +"Expose Flower version through `flwr.__version__` " +"([#952](https://github.com/adap/flower/pull/952))" +msgstr "" + +#: ../../source/ref-changelog.md:1167 +msgid "" +"`start_server` in `app.py` now returns a `History` object containing " +"metrics from training ([#974](https://github.com/adap/flower/pull/974))" +msgstr "" + +#: ../../source/ref-changelog.md:1168 +msgid "" +"Make `max_workers` (used by `ThreadPoolExecutor`) configurable " +"([#978](https://github.com/adap/flower/pull/978))" +msgstr "" + +#: ../../source/ref-changelog.md:1169 +msgid "" +"Increase sleep time after server start to three seconds in all code " +"examples ([#1086](https://github.com/adap/flower/pull/1086))" +msgstr "" + +#: ../../source/ref-changelog.md:1170 +msgid "" +"Added a new FAQ section to the documentation " +"([#948](https://github.com/adap/flower/pull/948))" +msgstr "" + +#: ../../source/ref-changelog.md:1171 +msgid "" +"And many more under-the-hood changes, library updates, documentation " +"changes, and tooling improvements!" +msgstr "" + +#: ../../source/ref-changelog.md:1175 +msgid "" +"**Removed** `flwr_example` **and** `flwr_experimental` **from release " +"build** ([#869](https://github.com/adap/flower/pull/869))" +msgstr "" + +#: ../../source/ref-changelog.md:1177 +msgid "" +"The packages `flwr_example` and `flwr_experimental` have been deprecated " +"since Flower 0.12.0 and they are not longer included in Flower release " +"builds. The associated extras (`baseline`, `examples-pytorch`, `examples-" +"tensorflow`, `http-logger`, `ops`) are now no-op and will be removed in " +"an upcoming release." +msgstr "" + +#: ../../source/ref-changelog.md:1179 +msgid "v0.17.0 (2021-09-24)" +msgstr "" + +#: ../../source/ref-changelog.md:1183 +msgid "" +"**Experimental virtual client engine** " +"([#781](https://github.com/adap/flower/pull/781) " +"[#790](https://github.com/adap/flower/pull/790) " +"[#791](https://github.com/adap/flower/pull/791))" +msgstr "" + +#: ../../source/ref-changelog.md:1185 +msgid "" +"One of Flower's goals is to enable research at scale. This release " +"enables a first (experimental) peek at a major new feature, codenamed the" +" virtual client engine. Virtual clients enable simulations that scale to " +"a (very) large number of clients on a single machine or compute cluster. " +"The easiest way to test the new functionality is to look at the two new " +"code examples called `quickstart_simulation` and `simulation_pytorch`." +msgstr "" + +#: ../../source/ref-changelog.md:1187 +msgid "" +"The feature is still experimental, so there's no stability guarantee for " +"the API. It's also not quite ready for prime time and comes with a few " +"known caveats. However, those who are curious are encouraged to try it " +"out and share their thoughts." +msgstr "" + +#: ../../source/ref-changelog.md:1189 +msgid "" +"**New built-in strategies** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" +msgstr "" + +#: ../../source/ref-changelog.md:1191 +msgid "" +"FedYogi - Federated learning strategy using Yogi on server-side. " +"Implementation based on https://arxiv.org/abs/2003.00295" +msgstr "" + +#: ../../source/ref-changelog.md:1192 +msgid "" +"FedAdam - Federated learning strategy using Adam on server-side. " +"Implementation based on https://arxiv.org/abs/2003.00295" +msgstr "" + +#: ../../source/ref-changelog.md:1194 +msgid "" +"**New PyTorch Lightning code example** " +"([#617](https://github.com/adap/flower/pull/617))" +msgstr "" + +#: ../../source/ref-changelog.md:1196 +msgid "" +"**New Variational Auto-Encoder code example** " +"([#752](https://github.com/adap/flower/pull/752))" +msgstr "" + +#: ../../source/ref-changelog.md:1198 +msgid "" +"**New scikit-learn code example** " +"([#748](https://github.com/adap/flower/pull/748))" +msgstr "" + +#: ../../source/ref-changelog.md:1200 +msgid "" +"**New experimental TensorBoard strategy** " +"([#789](https://github.com/adap/flower/pull/789))" +msgstr "" + +#: ../../source/ref-changelog.md:1204 +msgid "" +"Improved advanced TensorFlow code example " +"([#769](https://github.com/adap/flower/pull/769))" +msgstr "" + +#: ../../source/ref-changelog.md:1205 +msgid "" +"Warning when `min_available_clients` is misconfigured " +"([#830](https://github.com/adap/flower/pull/830))" +msgstr "" + +#: ../../source/ref-changelog.md:1206 +msgid "" +"Improved gRPC server docs " +"([#841](https://github.com/adap/flower/pull/841))" +msgstr "" + +#: ../../source/ref-changelog.md:1207 +msgid "" +"Improved error message in `NumPyClient` " +"([#851](https://github.com/adap/flower/pull/851))" +msgstr "" + +#: ../../source/ref-changelog.md:1208 +msgid "" +"Improved PyTorch quickstart code example " +"([#852](https://github.com/adap/flower/pull/852))" +msgstr "" + +#: ../../source/ref-changelog.md:1212 +msgid "" +"**Disabled final distributed evaluation** " +"([#800](https://github.com/adap/flower/pull/800))" +msgstr "" + +#: ../../source/ref-changelog.md:1214 +msgid "" +"Prior behaviour was to perform a final round of distributed evaluation on" +" all connected clients, which is often not required (e.g., when using " +"server-side evaluation). The prior behaviour can be enabled by passing " +"`force_final_distributed_eval=True` to `start_server`." +msgstr "" + +#: ../../source/ref-changelog.md:1216 +msgid "" +"**Renamed q-FedAvg strategy** " +"([#802](https://github.com/adap/flower/pull/802))" +msgstr "" + +#: ../../source/ref-changelog.md:1218 +msgid "" +"The strategy named `QffedAvg` was renamed to `QFedAvg` to better reflect " +"the notation given in the original paper (q-FFL is the optimization " +"objective, q-FedAvg is the proposed solver). Note the original (now " +"deprecated) `QffedAvg` class is still available for compatibility reasons" +" (it will be removed in a future release)." +msgstr "" + +#: ../../source/ref-changelog.md:1220 +msgid "" +"**Deprecated and renamed code example** `simulation_pytorch` **to** " +"`simulation_pytorch_legacy` " +"([#791](https://github.com/adap/flower/pull/791))" +msgstr "" + +#: ../../source/ref-changelog.md:1222 +msgid "" +"This example has been replaced by a new example. The new example is based" +" on the experimental virtual client engine, which will become the new " +"default way of doing most types of large-scale simulations in Flower. The" +" existing example was kept for reference purposes, but it might be " +"removed in the future." +msgstr "" + +#: ../../source/ref-changelog.md:1224 +msgid "v0.16.0 (2021-05-11)" +msgstr "" + +#: ../../source/ref-changelog.md:1228 +msgid "" +"**New built-in strategies** " +"([#549](https://github.com/adap/flower/pull/549))" +msgstr "" + +#: ../../source/ref-changelog.md:1230 +msgid "(abstract) FedOpt" +msgstr "" + +#: ../../source/ref-changelog.md:1233 +msgid "" +"**Custom metrics for server and strategies** " +"([#717](https://github.com/adap/flower/pull/717))" +msgstr "" + +#: ../../source/ref-changelog.md:1235 +msgid "" +"The Flower server is now fully task-agnostic, all remaining instances of " +"task-specific metrics (such as `accuracy`) have been replaced by custom " +"metrics dictionaries. Flower 0.15 introduced the capability to pass a " +"dictionary containing custom metrics from client to server. As of this " +"release, custom metrics replace task-specific metrics on the server." +msgstr "" + +#: ../../source/ref-changelog.md:1237 +msgid "" +"Custom metric dictionaries are now used in two user-facing APIs: they are" +" returned from Strategy methods `aggregate_fit`/`aggregate_evaluate` and " +"they enable evaluation functions passed to built-in strategies (via " +"`eval_fn`) to return more than two evaluation metrics. Strategies can " +"even return *aggregated* metrics dictionaries for the server to keep " +"track of." +msgstr "" + +#: ../../source/ref-changelog.md:1239 +msgid "" +"Strategy implementations should migrate their `aggregate_fit` and " +"`aggregate_evaluate` methods to the new return type (e.g., by simply " +"returning an empty `{}`), server-side evaluation functions should migrate" +" from `return loss, accuracy` to `return loss, {\"accuracy\": accuracy}`." +msgstr "" + +#: ../../source/ref-changelog.md:1241 +msgid "" +"Flower 0.15-style return types are deprecated (but still supported), " +"compatibility will be removed in a future release." +msgstr "" + +#: ../../source/ref-changelog.md:1243 +msgid "" +"**Migration warnings for deprecated functionality** " +"([#690](https://github.com/adap/flower/pull/690))" +msgstr "" + +#: ../../source/ref-changelog.md:1245 +msgid "" +"Earlier versions of Flower were often migrated to new APIs, while " +"maintaining compatibility with legacy APIs. This release introduces " +"detailed warning messages if usage of deprecated APIs is detected. The " +"new warning messages often provide details on how to migrate to more " +"recent APIs, thus easing the transition from one release to another." +msgstr "" + +#: ../../source/ref-changelog.md:1247 +msgid "" +"Improved docs and docstrings " +"([#691](https://github.com/adap/flower/pull/691) " +"[#692](https://github.com/adap/flower/pull/692) " +"[#713](https://github.com/adap/flower/pull/713))" +msgstr "" + +#: ../../source/ref-changelog.md:1249 +msgid "MXNet example and documentation" +msgstr "" + +#: ../../source/ref-changelog.md:1251 +msgid "" +"FedBN implementation in example PyTorch: From Centralized To Federated " +"([#696](https://github.com/adap/flower/pull/696) " +"[#702](https://github.com/adap/flower/pull/702) " +"[#705](https://github.com/adap/flower/pull/705))" +msgstr "" + +#: ../../source/ref-changelog.md:1255 +msgid "" +"**Serialization-agnostic server** " +"([#721](https://github.com/adap/flower/pull/721))" +msgstr "" + +#: ../../source/ref-changelog.md:1257 +msgid "" +"The Flower server is now fully serialization-agnostic. Prior usage of " +"class `Weights` (which represents parameters as deserialized NumPy " +"ndarrays) was replaced by class `Parameters` (e.g., in `Strategy`). " +"`Parameters` objects are fully serialization-agnostic and represents " +"parameters as byte arrays, the `tensor_type` attributes indicates how " +"these byte arrays should be interpreted (e.g., for " +"serialization/deserialization)." +msgstr "" + +#: ../../source/ref-changelog.md:1259 +msgid "" +"Built-in strategies implement this approach by handling serialization and" +" deserialization to/from `Weights` internally. Custom/3rd-party Strategy " +"implementations should update to the slightly changed Strategy method " +"definitions. Strategy authors can consult PR " +"[#721](https://github.com/adap/flower/pull/721) to see how strategies can" +" easily migrate to the new format." +msgstr "" + +#: ../../source/ref-changelog.md:1261 +msgid "" +"Deprecated `flwr.server.Server.evaluate`, use " +"`flwr.server.Server.evaluate_round` instead " +"([#717](https://github.com/adap/flower/pull/717))" +msgstr "" + +#: ../../source/ref-changelog.md:1263 +msgid "v0.15.0 (2021-03-12)" +msgstr "" + +#: ../../source/ref-changelog.md:1267 +msgid "" +"**Server-side parameter initialization** " +"([#658](https://github.com/adap/flower/pull/658))" +msgstr "" + +#: ../../source/ref-changelog.md:1269 +msgid "" +"Model parameters can now be initialized on the server-side. Server-side " +"parameter initialization works via a new `Strategy` method called " +"`initialize_parameters`." +msgstr "" + +#: ../../source/ref-changelog.md:1271 +msgid "" +"Built-in strategies support a new constructor argument called " +"`initial_parameters` to set the initial parameters. Built-in strategies " +"will provide these initial parameters to the server on startup and then " +"delete them to free the memory afterwards." +msgstr "" + +#: ../../source/ref-changelog.md:1290 +msgid "" +"If no initial parameters are provided to the strategy, the server will " +"continue to use the current behaviour (namely, it will ask one of the " +"connected clients for its parameters and use these as the initial global " +"parameters)." +msgstr "" + +#: ../../source/ref-changelog.md:1294 +msgid "" +"Deprecate `flwr.server.strategy.DefaultStrategy` (migrate to " +"`flwr.server.strategy.FedAvg`, which is equivalent)" +msgstr "" + +#: ../../source/ref-changelog.md:1296 +msgid "v0.14.0 (2021-02-18)" +msgstr "" + +#: ../../source/ref-changelog.md:1300 +msgid "" +"**Generalized** `Client.fit` **and** `Client.evaluate` **return values** " +"([#610](https://github.com/adap/flower/pull/610) " +"[#572](https://github.com/adap/flower/pull/572) " +"[#633](https://github.com/adap/flower/pull/633))" +msgstr "" + +#: ../../source/ref-changelog.md:1302 +msgid "" +"Clients can now return an additional dictionary mapping `str` keys to " +"values of the following types: `bool`, `bytes`, `float`, `int`, `str`. " +"This means one can return almost arbitrary values from `fit`/`evaluate` " +"and make use of them on the server side!" +msgstr "" + +#: ../../source/ref-changelog.md:1304 +msgid "" +"This improvement also allowed for more consistent return types between " +"`fit` and `evaluate`: `evaluate` should now return a tuple `(float, int, " +"dict)` representing the loss, number of examples, and a dictionary " +"holding arbitrary problem-specific values like accuracy." +msgstr "" + +#: ../../source/ref-changelog.md:1306 +msgid "" +"In case you wondered: this feature is compatible with existing projects, " +"the additional dictionary return value is optional. New code should " +"however migrate to the new return types to be compatible with upcoming " +"Flower releases (`fit`: `List[np.ndarray], int, Dict[str, Scalar]`, " +"`evaluate`: `float, int, Dict[str, Scalar]`). See the example below for " +"details." +msgstr "" + +#: ../../source/ref-changelog.md:1308 +msgid "" +"*Code example:* note the additional dictionary return values in both " +"`FlwrClient.fit` and `FlwrClient.evaluate`:" +msgstr "" + +#: ../../source/ref-changelog.md:1323 +msgid "" +"**Generalized** `config` **argument in** `Client.fit` **and** " +"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" +msgstr "" + +#: ../../source/ref-changelog.md:1325 +msgid "" +"The `config` argument used to be of type `Dict[str, str]`, which means " +"that dictionary values were expected to be strings. The new release " +"generalizes this to enable values of the following types: `bool`, " +"`bytes`, `float`, `int`, `str`." +msgstr "" + +#: ../../source/ref-changelog.md:1327 +msgid "" +"This means one can now pass almost arbitrary values to `fit`/`evaluate` " +"using the `config` dictionary. Yay, no more `str(epochs)` on the server-" +"side and `int(config[\"epochs\"])` on the client side!" +msgstr "" + +#: ../../source/ref-changelog.md:1329 +msgid "" +"*Code example:* note that the `config` dictionary now contains non-`str` " +"values in both `Client.fit` and `Client.evaluate`:" +msgstr "" + +#: ../../source/ref-changelog.md:1346 +msgid "v0.13.0 (2021-01-08)" +msgstr "" + +#: ../../source/ref-changelog.md:1350 +msgid "" +"New example: PyTorch From Centralized To Federated " +"([#549](https://github.com/adap/flower/pull/549))" +msgstr "" + +#: ../../source/ref-changelog.md:1351 +msgid "Improved documentation" +msgstr "" + +#: ../../source/ref-changelog.md:1352 +msgid "New documentation theme ([#551](https://github.com/adap/flower/pull/551))" +msgstr "" + +#: ../../source/ref-changelog.md:1353 +msgid "New API reference ([#554](https://github.com/adap/flower/pull/554))" +msgstr "" + +#: ../../source/ref-changelog.md:1354 +msgid "" +"Updated examples documentation " +"([#549](https://github.com/adap/flower/pull/549))" +msgstr "" + +#: ../../source/ref-changelog.md:1355 +msgid "" +"Removed obsolete documentation " +"([#548](https://github.com/adap/flower/pull/548))" +msgstr "" + +#: ../../source/ref-changelog.md:1357 +msgid "Bugfix:" +msgstr "" + +#: ../../source/ref-changelog.md:1359 +msgid "" +"`Server.fit` does not disconnect clients when finished, disconnecting the" +" clients is now handled in `flwr.server.start_server` " +"([#553](https://github.com/adap/flower/pull/553) " +"[#540](https://github.com/adap/flower/issues/540))." +msgstr "" + +#: ../../source/ref-changelog.md:1361 +msgid "v0.12.0 (2020-12-07)" +msgstr "" + +#: ../../source/ref-changelog.md:1363 ../../source/ref-changelog.md:1379 +msgid "Important changes:" +msgstr "" + +#: ../../source/ref-changelog.md:1365 +msgid "" +"Added an example for embedded devices " +"([#507](https://github.com/adap/flower/pull/507))" +msgstr "" + +#: ../../source/ref-changelog.md:1366 +msgid "" +"Added a new NumPyClient (in addition to the existing KerasClient) " +"([#504](https://github.com/adap/flower/pull/504) " +"[#508](https://github.com/adap/flower/pull/508))" +msgstr "" + +#: ../../source/ref-changelog.md:1367 +msgid "" +"Deprecated `flwr_example` package and started to migrate examples into " +"the top-level `examples` directory " +"([#494](https://github.com/adap/flower/pull/494) " +"[#512](https://github.com/adap/flower/pull/512))" +msgstr "" + +#: ../../source/ref-changelog.md:1369 +msgid "v0.11.0 (2020-11-30)" +msgstr "" + +#: ../../source/ref-changelog.md:1371 +msgid "Incompatible changes:" +msgstr "" + +#: ../../source/ref-changelog.md:1373 +msgid "" +"Renamed strategy methods " +"([#486](https://github.com/adap/flower/pull/486)) to unify the naming of " +"Flower's public APIs. Other public methods/functions (e.g., every method " +"in `Client`, but also `Strategy.evaluate`) do not use the `on_` prefix, " +"which is why we're removing it from the four methods in Strategy. To " +"migrate rename the following `Strategy` methods accordingly:" +msgstr "" + +#: ../../source/ref-changelog.md:1374 +msgid "`on_configure_evaluate` => `configure_evaluate`" +msgstr "" + +#: ../../source/ref-changelog.md:1375 +msgid "`on_aggregate_evaluate` => `aggregate_evaluate`" +msgstr "" + +#: ../../source/ref-changelog.md:1376 +msgid "`on_configure_fit` => `configure_fit`" +msgstr "" + +#: ../../source/ref-changelog.md:1377 +msgid "`on_aggregate_fit` => `aggregate_fit`" +msgstr "" + +#: ../../source/ref-changelog.md:1381 +msgid "" +"Deprecated `DefaultStrategy` " +"([#479](https://github.com/adap/flower/pull/479)). To migrate use " +"`FedAvg` instead." +msgstr "" + +#: ../../source/ref-changelog.md:1382 +msgid "" +"Simplified examples and baselines " +"([#484](https://github.com/adap/flower/pull/484))." +msgstr "" + +#: ../../source/ref-changelog.md:1383 +msgid "" +"Removed presently unused `on_conclude_round` from strategy interface " +"([#483](https://github.com/adap/flower/pull/483))." +msgstr "" + +#: ../../source/ref-changelog.md:1384 +msgid "" +"Set minimal Python version to 3.6.1 instead of 3.6.9 " +"([#471](https://github.com/adap/flower/pull/471))." +msgstr "" + +#: ../../source/ref-changelog.md:1385 +msgid "" +"Improved `Strategy` docstrings " +"([#470](https://github.com/adap/flower/pull/470))." +msgstr "" + +#: ../../source/ref-example-projects.rst:2 +msgid "Example projects" +msgstr "" + +#: ../../source/ref-example-projects.rst:4 +msgid "" +"Flower comes with a number of usage examples. The examples demonstrate " +"how Flower can be used to federate different kinds of existing machine " +"learning pipelines, usually leveraging popular machine learning " +"frameworks such as `PyTorch `_ or `TensorFlow " +"`_." +msgstr "" + +#: ../../source/ref-example-projects.rst:9 +msgid "The following examples are available as standalone projects." +msgstr "" + +#: ../../source/ref-example-projects.rst:12 +msgid "Quickstart TensorFlow/Keras" +msgstr "" + +#: ../../source/ref-example-projects.rst:14 +msgid "" +"The TensorFlow/Keras quickstart example shows CIFAR-10 image " +"classification with MobileNetV2:" +msgstr "" + +#: ../../source/ref-example-projects.rst:17 +msgid "" +"`Quickstart TensorFlow (Code) " +"`_" +msgstr "" + +#: ../../source/ref-example-projects.rst:19 +msgid ":doc:`Quickstart TensorFlow (Tutorial) `" +msgstr "" + +#: ../../source/ref-example-projects.rst:20 +msgid "" +"`Quickstart TensorFlow (Blog Post) `_" +msgstr "" + +#: ../../source/ref-example-projects.rst:24 +#: ../../source/tutorial-quickstart-pytorch.rst:4 +msgid "Quickstart PyTorch" +msgstr "" + +#: ../../source/ref-example-projects.rst:26 +msgid "" +"The PyTorch quickstart example shows CIFAR-10 image classification with a" +" simple Convolutional Neural Network:" +msgstr "" + +#: ../../source/ref-example-projects.rst:29 +msgid "" +"`Quickstart PyTorch (Code) " +"`_" +msgstr "" + +#: ../../source/ref-example-projects.rst:31 +msgid ":doc:`Quickstart PyTorch (Tutorial) `" +msgstr "" + +#: ../../source/ref-example-projects.rst:34 +msgid "PyTorch: From Centralized To Federated" +msgstr "" + +#: ../../source/ref-example-projects.rst:36 +msgid "" +"This example shows how a regular PyTorch project can be federated using " +"Flower:" +msgstr "" + +#: ../../source/ref-example-projects.rst:38 +msgid "" +"`PyTorch: From Centralized To Federated (Code) " +"`_" +msgstr "" + +#: ../../source/ref-example-projects.rst:40 +msgid "" +":doc:`PyTorch: From Centralized To Federated (Tutorial) `" +msgstr "" + +#: ../../source/ref-example-projects.rst:44 +msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" +msgstr "" + +#: ../../source/ref-example-projects.rst:46 +msgid "" +"This example shows how Flower can be used to build a federated learning " +"system that run across Raspberry Pi and Nvidia Jetson:" +msgstr "" + +#: ../../source/ref-example-projects.rst:49 +msgid "" +"`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " +"`_" +msgstr "" + +#: ../../source/ref-example-projects.rst:51 +msgid "" +"`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " +"`_" +msgstr "" + +#: ../../source/ref-faq.rst:4 +msgid "" +"This page collects answers to commonly asked questions about Federated " +"Learning with Flower." +msgstr "" + +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Can Flower run on Jupyter Notebooks / Google Colab?" +msgstr "" + +#: ../../source/ref-faq.rst:9 +msgid "" +"Yes, it can! Flower even comes with a few under-the-hood optimizations to" +" make it work even better on Colab. Here's a quickstart example:" +msgstr "" + +#: ../../source/ref-faq.rst:11 +msgid "" +"`Flower simulation PyTorch " +"`_" +msgstr "" + +#: ../../source/ref-faq.rst:12 +msgid "" +"`Flower simulation TensorFlow/Keras " +"`_" +msgstr "" + +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` How can I run Federated Learning on a Raspberry Pi?" +msgstr "" + +#: ../../source/ref-faq.rst:16 +msgid "" +"Find the `blog post about federated learning on embedded device here " +"`_" +" and the corresponding `GitHub code example " +"`_." +msgstr "" + +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Does Flower support federated learning on Android devices?" +msgstr "" + +#: ../../source/ref-faq.rst:20 +msgid "" +"Yes, it does. Please take a look at our `blog post " +"`_ or check out the code examples:" +msgstr "" + +#: ../../source/ref-faq.rst:22 +msgid "" +"`Android Kotlin example `_" +msgstr "" + +#: ../../source/ref-faq.rst:23 +msgid "`Android Java example `_" +msgstr "" + +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Can I combine federated learning with blockchain?" +msgstr "" + +#: ../../source/ref-faq.rst:27 +msgid "" +"Yes, of course. A list of available examples using Flower within a " +"blockchain environment is available here:" +msgstr "" + +#: ../../source/ref-faq.rst:30 +msgid "`FLock: A Decentralised AI Training Platform `_." +msgstr "" + +#: ../../source/ref-faq.rst:30 +msgid "Contribute to on-chain training the model and earn rewards." +msgstr "" + +#: ../../source/ref-faq.rst:31 +msgid "Local blockchain with federated learning simulation." +msgstr "" + +#: ../../source/ref-faq.rst:32 +msgid "" +"`Flower meets Nevermined GitHub Repository `_." +msgstr "" + +#: ../../source/ref-faq.rst:33 +msgid "" +"`Flower meets Nevermined YouTube video " +"`_." +msgstr "" + +#: ../../source/ref-faq.rst:34 +msgid "" +"`Flower meets KOSMoS `_." +msgstr "" + +#: ../../source/ref-faq.rst:35 +msgid "" +"`Flower meets Talan blog post `_ ." +msgstr "" + +#: ../../source/ref-faq.rst:36 +msgid "" +"`Flower meets Talan GitHub Repository " +"`_ ." +msgstr "" + +#: ../../source/ref-telemetry.md:1 +msgid "Telemetry" +msgstr "" + +#: ../../source/ref-telemetry.md:3 +msgid "" +"The Flower open-source project collects **anonymous** usage metrics to " +"make well-informed decisions to improve Flower. Doing this enables the " +"Flower team to understand how Flower is used and what challenges users " +"might face." +msgstr "" + +#: ../../source/ref-telemetry.md:5 +msgid "" +"**Flower is a friendly framework for collaborative AI and data science.**" +" Staying true to this statement, Flower makes it easy to disable " +"telemetry for users that do not want to share anonymous usage metrics." +msgstr "" + +#: ../../source/ref-telemetry.md:7 +msgid "Principles" +msgstr "" + +#: ../../source/ref-telemetry.md:9 +msgid "We follow strong principles guarding anonymous usage metrics collection:" +msgstr "" + +#: ../../source/ref-telemetry.md:11 +msgid "" +"**Optional:** You will always be able to disable telemetry; read on to " +"learn “[How to opt-out](#how-to-opt-out)”." +msgstr "" + +#: ../../source/ref-telemetry.md:12 +msgid "" +"**Anonymous:** The reported usage metrics are anonymous and do not " +"contain any personally identifiable information (PII). See “[Collected " +"metrics](#collected-metrics)” to understand what metrics are being " +"reported." +msgstr "" + +#: ../../source/ref-telemetry.md:13 +msgid "" +"**Transparent:** You can easily inspect what anonymous metrics are being " +"reported; see the section “[How to inspect what is being reported](#how-" +"to-inspect-what-is-being-reported)”" +msgstr "" + +#: ../../source/ref-telemetry.md:14 +msgid "" +"**Open for feedback:** You can always reach out to us if you have " +"feedback; see the section “[How to contact us](#how-to-contact-us)” for " +"details." +msgstr "" + +#: ../../source/ref-telemetry.md:16 +msgid "How to opt-out" +msgstr "" + +#: ../../source/ref-telemetry.md:18 +msgid "" +"When Flower starts, it will check for an environment variable called " +"`FLWR_TELEMETRY_ENABLED`. Telemetry can easily be disabled by setting " +"`FLWR_TELEMETRY_ENABLED=0`. Assuming you are starting a Flower server or " +"client, simply do so by prepending your command as in:" +msgstr "" + +#: ../../source/ref-telemetry.md:24 +msgid "" +"Alternatively, you can export `FLWR_TELEMETRY_ENABLED=0` in, for example," +" `.bashrc` (or whatever configuration file applies to your environment) " +"to disable Flower telemetry permanently." +msgstr "" + +#: ../../source/ref-telemetry.md:26 +msgid "Collected metrics" +msgstr "" + +#: ../../source/ref-telemetry.md:28 +msgid "Flower telemetry collects the following metrics:" +msgstr "" + +#: ../../source/ref-telemetry.md:30 +msgid "" +"**Flower version.** Understand which versions of Flower are currently " +"being used. This helps us to decide whether we should invest effort into " +"releasing a patch version for an older version of Flower or instead use " +"the bandwidth to build new features." +msgstr "" + +#: ../../source/ref-telemetry.md:32 +msgid "" +"**Operating system.** Enables us to answer questions such as: *Should we " +"create more guides for Linux, macOS, or Windows?*" +msgstr "" + +#: ../../source/ref-telemetry.md:34 +msgid "" +"**Python version.** Knowing the Python version helps us, for example, to " +"decide whether we should invest effort into supporting old versions of " +"Python or stop supporting them and start taking advantage of new Python " +"features." +msgstr "" + +#: ../../source/ref-telemetry.md:36 +msgid "" +"**Hardware properties.** Understanding the hardware environment that " +"Flower is being used in helps to decide whether we should, for example, " +"put more effort into supporting low-resource environments." +msgstr "" + +#: ../../source/ref-telemetry.md:38 +msgid "" +"**Execution mode.** Knowing what execution mode Flower starts in enables " +"us to understand how heavily certain features are being used and better " +"prioritize based on that." +msgstr "" + +#: ../../source/ref-telemetry.md:40 +msgid "" +"**Cluster.** Flower telemetry assigns a random in-memory cluster ID each " +"time a Flower workload starts. This allows us to understand which device " +"types not only start Flower workloads but also successfully complete " +"them." +msgstr "" + +#: ../../source/ref-telemetry.md:42 +msgid "" +"**Source.** Flower telemetry tries to store a random source ID in " +"`~/.flwr/source` the first time a telemetry event is generated. The " +"source ID is important to identify whether an issue is recurring or " +"whether an issue is triggered by multiple clusters running concurrently " +"(which often happens in simulation). For example, if a device runs " +"multiple workloads at the same time, and this results in an issue, then, " +"in order to reproduce the issue, multiple workloads must be started at " +"the same time." +msgstr "" + +#: ../../source/ref-telemetry.md:44 +msgid "" +"You may delete the source ID at any time. If you wish for all events " +"logged under a specific source ID to be deleted, you can send a deletion " +"request mentioning the source ID to `telemetry@flower.ai`. All events " +"related to that source ID will then be permanently deleted." +msgstr "" + +#: ../../source/ref-telemetry.md:46 +msgid "" +"We will not collect any personally identifiable information. If you think" +" any of the metrics collected could be misused in any way, please [get in" +" touch with us](#how-to-contact-us). We will update this page to reflect " +"any changes to the metrics collected and publish changes in the " +"changelog." +msgstr "" + +#: ../../source/ref-telemetry.md:48 +msgid "" +"If you think other metrics would be helpful for us to better guide our " +"decisions, please let us know! We will carefully review them; if we are " +"confident that they do not compromise user privacy, we may add them." +msgstr "" + +#: ../../source/ref-telemetry.md:50 +msgid "How to inspect what is being reported" +msgstr "" + +#: ../../source/ref-telemetry.md:52 +msgid "" +"We wanted to make it very easy for you to inspect what anonymous usage " +"metrics are reported. You can view all the reported telemetry information" +" by setting the environment variable `FLWR_TELEMETRY_LOGGING=1`. Logging " +"is disabled by default. You may use logging independently from " +"`FLWR_TELEMETRY_ENABLED` so that you can inspect the telemetry feature " +"without sending any metrics." +msgstr "" + +#: ../../source/ref-telemetry.md:58 +msgid "" +"The inspect Flower telemetry without sending any anonymous usage metrics," +" use both environment variables:" +msgstr "" + +#: ../../source/ref-telemetry.md:64 +msgid "How to contact us" +msgstr "" + +#: ../../source/ref-telemetry.md:66 +msgid "" +"We want to hear from you. If you have any feedback or ideas on how to " +"improve the way we handle anonymous usage metrics, reach out to us via " +"[Slack](https://flower.ai/join-slack/) (channel `#telemetry`) or email " +"(`telemetry@flower.ai`)." +msgstr "" + +#: ../../source/tutorial-quickstart-android.rst:-1 +msgid "" +"Read this Federated Learning quickstart tutorial for creating an Android " +"app using Flower." +msgstr "" + +#: ../../source/tutorial-quickstart-android.rst:4 +msgid "Quickstart Android" +msgstr "" + +#: ../../source/tutorial-quickstart-android.rst:9 +msgid "" +"Let's build a federated learning system using TFLite and Flower on " +"Android!" +msgstr "" + +#: ../../source/tutorial-quickstart-android.rst:11 +msgid "" +"Please refer to the `full code example " +"`_ to learn " +"more." +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:4 +msgid "Quickstart fastai" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:6 +msgid "" +"In this federated learning tutorial we will learn how to train a " +"SqueezeNet model on MNIST using Flower and fastai. It is recommended to " +"create a virtual environment and run everything within a :doc:`virtualenv" +" `." +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:10 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:11 +msgid "Then, clone the code example directly from GitHub:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:18 +msgid "" +"This will create a new directory called `quickstart-fastai` containing " +"the following files:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:31 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:32 +msgid "Next, activate your environment, then run:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:41 +msgid "" +"This example by default runs the Flower Simulation Engine, creating a " +"federation of 10 nodes using `FedAvg `_ " +"as the aggregation strategy. The dataset will be partitioned using Flower" +" Dataset's `IidPartitioner `_." +" Let's run the project:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:54 +#: ../../source/tutorial-quickstart-huggingface.rst:61 +#: ../../source/tutorial-quickstart-mlx.rst:60 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:55 +#: ../../source/tutorial-quickstart-pytorch.rst:62 +#: ../../source/tutorial-quickstart-tensorflow.rst:62 +msgid "With default arguments you will see an output like this one:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:98 +#: ../../source/tutorial-quickstart-huggingface.rst:112 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:105 +#: ../../source/tutorial-quickstart-pytorch.rst:103 +#: ../../source/tutorial-quickstart-tensorflow.rst:103 +msgid "" +"You can also override the parameters defined in the " +"``[tool.flwr.app.config]`` section in ``pyproject.toml`` like this:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:108 +msgid "" +"Check the `source code `_ of this tutorial in ``examples/quickstart-fasai`` " +"in the Flower GitHub repository." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:-1 +msgid "" +"Check out this Federating Learning quickstart tutorial for using Flower " +"with 🤗 HuggingFace Transformers in order to fine-tune an LLM." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:4 +msgid "Quickstart 🤗 Transformers" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:6 +msgid "" +"In this federated learning tutorial we will learn how to train a large " +"language model (LLM) on the `IMDB " +"`_ dataset using Flower" +" and the 🤗 Hugging Face Transformers library. It is recommended to create" +" a virtual environment and run everything within a :doc:`virtualenv " +"`." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:12 +msgid "" +"Let's use ``flwr new`` to create a complete Flower+🤗 Hugging Face " +"project. It will generate all the files needed to run, by default with " +"the Flower Simulation Engine, a federation of 10 nodes using |fedavg|_ " +"The dataset will be partitioned using |flowerdatasets|_'s " +"|iidpartitioner|_." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:17 +#: ../../source/tutorial-quickstart-mlx.rst:17 +#: ../../source/tutorial-quickstart-pytorch.rst:18 +#: ../../source/tutorial-quickstart-tensorflow.rst:18 +msgid "" +"Now that we have a rough idea of what this example is about, let's get " +"started. First, install Flower in your new environment:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:25 +msgid "" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``HuggingFace``), give a name to your " +"project, and type in your developer name:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:33 +#: ../../source/tutorial-quickstart-mlx.rst:32 +#: ../../source/tutorial-quickstart-pytorch.rst:34 +#: ../../source/tutorial-quickstart-tensorflow.rst:34 +msgid "" +"After running it you'll notice a new directory with your project name has" +" been created. It should have the following structure:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:47 +#: ../../source/tutorial-quickstart-mlx.rst:46 +#: ../../source/tutorial-quickstart-pytorch.rst:48 +#: ../../source/tutorial-quickstart-tensorflow.rst:48 +msgid "" +"If you haven't yet installed the project and its dependencies, you can do" +" so by:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:54 +#: ../../source/tutorial-quickstart-pytorch.rst:55 +#: ../../source/tutorial-quickstart-tensorflow.rst:55 +msgid "To run the project, do:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:102 +msgid "You can also run the project with GPU as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:109 +msgid "" +"This will use the default arguments where each ``ClientApp`` will use 2 " +"CPUs and at most 4 ``ClientApp``\\s will run in a given GPU." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:120 +#: ../../source/tutorial-quickstart-mlx.rst:110 +#: ../../source/tutorial-quickstart-pytorch.rst:111 +msgid "" +"What follows is an explanation of each component in the project you just " +"created: dataset partition, the model, defining the ``ClientApp`` and " +"defining the ``ServerApp``." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:124 +#: ../../source/tutorial-quickstart-mlx.rst:114 +#: ../../source/tutorial-quickstart-pytorch.rst:115 +#: ../../source/tutorial-quickstart-tensorflow.rst:112 +msgid "The Data" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:126 +msgid "" +"This tutorial uses |flowerdatasets|_ to easily download and partition the" +" `IMDB `_ dataset. In " +"this example you'll make use of the |iidpartitioner|_ to generate " +"``num_partitions`` partitions. You can choose |otherpartitioners|_ " +"available in Flower Datasets. To tokenize the text, we will also load the" +" tokenizer from the pre-trained Transformer model that we'll use during " +"training - more on that in the next section. Each ``ClientApp`` will call" +" this function to create dataloaders with the data that correspond to " +"their data partition." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:171 +#: ../../source/tutorial-quickstart-mlx.rst:155 +#: ../../source/tutorial-quickstart-pytorch.rst:150 +#: ../../source/tutorial-quickstart-tensorflow.rst:139 +msgid "The Model" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:173 +msgid "" +"We will leverage 🤗 Hugging Face to federate the training of language " +"models over multiple clients using Flower. More specifically, we will " +"fine-tune a pre-trained Transformer model (|berttiny|_) for sequence " +"classification over the dataset of IMDB ratings. The end goal is to " +"detect if a movie rating is positive or negative. If you have access to " +"larger GPUs, feel free to use larger models!" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:185 +msgid "" +"Note that here, ``model_name`` is a string that will be loaded from the " +"``Context`` in the ClientApp and ServerApp." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:188 +msgid "" +"In addition to loading the pretrained model weights and architecture, we " +"also include two utility functions to perform both training (i.e. " +"``train()``) and evaluation (i.e. ``test()``) using the above model. " +"These functions should look fairly familiar if you have some prior " +"experience with PyTorch. Note these functions do not have anything " +"specific to Flower. That being said, the training function will normally " +"be called, as we'll see later, from a Flower client passing its own data." +" In summary, your clients can use standard training/testing functions to " +"perform local training or evaluation:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:228 +#: ../../source/tutorial-quickstart-mlx.rst:199 +#: ../../source/tutorial-quickstart-pytorch.rst:224 +#: ../../source/tutorial-quickstart-tensorflow.rst:168 +msgid "The ClientApp" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:230 +msgid "" +"The main changes we have to make to use 🤗 Hugging Face with Flower will " +"be found in the ``get_weights()`` and ``set_weights()`` functions. Under " +"the hood, the ``transformers`` library uses PyTorch, which means we can " +"reuse the ``get_weights()`` and ``set_weights()`` code that we defined in" +" the :doc:`Quickstart PyTorch ` tutorial. As" +" a reminder, in ``get_weights()``, PyTorch model parameters are extracted" +" and represented as a list of NumPy arrays. The ``set_weights()`` " +"function that's the opposite: given a list of NumPy arrays it applies " +"them to an existing PyTorch model. Doing this in fairly easy in PyTorch." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:241 +#: ../../source/tutorial-quickstart-pytorch.rst:234 +msgid "" +"The specific implementation of ``get_weights()`` and ``set_weights()`` " +"depends on the type of models you use. The ones shown below work for a " +"wide range of PyTorch models but you might need to adjust them if you " +"have more exotic model architectures." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:257 +#: ../../source/tutorial-quickstart-pytorch.rst:250 +msgid "" +"The rest of the functionality is directly inspired by the centralized " +"case. The ``fit()`` method in the client trains the model using the local" +" dataset. Similarly, the ``evaluate()`` method is used to evaluate the " +"model received on a held-out validation set that the client might have:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:283 +msgid "" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparemeters defined in your " +"``pyproject.toml`` to configure the run. In this tutorial we access the " +"``local-epochs`` setting to control the number of epochs a ``ClientApp`` " +"will perform when running the ``fit()`` method. You could define " +"additional hyperparameters in ``pyproject.toml`` and access them here." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:316 +#: ../../source/tutorial-quickstart-mlx.rst:361 +#: ../../source/tutorial-quickstart-pytorch.rst:307 +#: ../../source/tutorial-quickstart-tensorflow.rst:232 +msgid "The ServerApp" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:318 +msgid "" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"|serverappcomponents|_ as opposed to a |client|_ In this example we use " +"the `FedAvg` strategy. To it we pass a randomly initialized model that " +"will server as the global model to federated. Note that the value of " +"``fraction_fit`` is read from the run config. You can find the default " +"value defined in the ``pyproject.toml``." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:356 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system for an LLM." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:361 +msgid "" +"Check the source code of the extended version of this tutorial in " +"|quickstart_hf_link|_ in the Flower GitHub repository. For a " +"comprehensive example of a federated fine-tuning of an LLM with Flower, " +"refer to the |flowertune|_ example in the Flower GitHub repository." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:-1 +msgid "" +"Read this Federated Learning quickstart tutorial for creating an iOS app " +"using Flower to train a neural network on MNIST." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:4 +msgid "Quickstart iOS" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:9 +msgid "" +"In this tutorial we will learn how to train a Neural Network on MNIST " +"using Flower and CoreML on iOS devices." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:12 +msgid "" +"First of all, for running the Flower Python server, it is recommended to " +"create a virtual environment and run everything within a :doc:`virtualenv" +" `. For the Flower client " +"implementation in iOS, it is recommended to use Xcode as our IDE." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:17 +msgid "" +"Our example consists of one Python *server* and two iPhone *clients* that" +" all have the same model." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:20 +msgid "" +"*Clients* are responsible for generating individual weight updates for " +"the model based on their local datasets. These updates are then sent to " +"the *server* which will aggregate them to produce a better model. " +"Finally, the *server* sends this improved version of the model back to " +"each *client*. A complete cycle of weight updates is called a *round*." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:26 +msgid "" +"Now that we have a rough idea of what is going on, let's get started to " +"setup our Flower server environment. We first need to install Flower. You" +" can do this by using pip:" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:33 +msgid "Or Poetry:" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:40 +#: ../../source/tutorial-quickstart-scikitlearn.rst:43 +#: ../../source/tutorial-quickstart-xgboost.rst:65 +msgid "Flower Client" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:42 +msgid "" +"Now that we have all our dependencies installed, let's run a simple " +"distributed training using CoreML as our local training pipeline and " +"MNIST as our dataset. For simplicity reasons we will use the complete " +"Flower client with CoreML, that has been implemented and stored inside " +"the Swift SDK. The client implementation can be seen below:" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:80 +msgid "" +"Let's create a new application project in Xcode and add ``flwr`` as a " +"dependency in your project. For our application, we will store the logic " +"of our app in ``FLiOSModel.swift`` and the UI elements in " +"``ContentView.swift``. We will focus more on ``FLiOSModel.swift`` in this" +" quickstart. Please refer to the `full code example " +"`_ to learn more " +"about the app." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:86 +msgid "Import Flower and CoreML related packages in ``FLiOSModel.swift``:" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:94 +msgid "" +"Then add the mlmodel to the project simply by drag-and-drop, the mlmodel " +"will be bundled inside the application during deployment to your iOS " +"device. We need to pass the url to access mlmodel and run CoreML machine " +"learning processes, it can be retrieved by calling the function " +"``Bundle.main.url``. For the MNIST dataset, we need to preprocess it into" +" ``MLBatchProvider`` object. The preprocessing is done inside " +"``DataLoader.swift``." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:112 +msgid "" +"Since CoreML does not allow the model parameters to be seen before " +"training, and accessing the model parameters during or after the training" +" can only be done by specifying the layer name, we need to know this " +"information beforehand, through looking at the model specification, which" +" are written as proto files. The implementation can be seen in " +"``MLModelInspect``." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:118 +msgid "" +"After we have all of the necessary information, let's create our Flower " +"client." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:133 +msgid "" +"Then start the Flower gRPC client and start communicating to the server " +"by passing our Flower client to the function ``startFlwrGRPC``." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:141 +msgid "" +"That's it for the client. We only have to implement ``Client`` or call " +"the provided ``MLFlwrClient`` and call ``startFlwrGRPC()``. The attribute" +" ``hostname`` and ``port`` tells the client which server to connect to. " +"This can be done by entering the hostname and port in the application " +"before clicking the start button to start the federated learning process." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:148 +#: ../../source/tutorial-quickstart-scikitlearn.rst:179 +#: ../../source/tutorial-quickstart-xgboost.rst:358 +msgid "Flower Server" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:150 +msgid "" +"For simple workloads we can start a Flower server and leave all the " +"configuration possibilities at their default values. In a file named " +"``server.py``, import Flower and start the server:" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:161 +#: ../../source/tutorial-quickstart-scikitlearn.rst:254 +msgid "Train the model, federated!" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:163 +#: ../../source/tutorial-quickstart-xgboost.rst:590 +msgid "" +"With both client and server ready, we can now run everything and see " +"federated learning in action. FL systems usually have a server and " +"multiple clients. We therefore have to start the server first:" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:171 +msgid "" +"Once the server is running we can start the clients in different " +"terminals. Build and run the client through your Xcode, one through Xcode" +" Simulator and the other by deploying it to your iPhone. To see more " +"about how to deploy your app to iPhone or Simulator visit `here " +"`_." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:177 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system in your ios device. The full `source code " +"`_ for this " +"example can be found in ``examples/ios``." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with Jax to train a linear regression model on a scikit-learn dataset." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:4 +msgid "Quickstart JAX" +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:9 +msgid "" +"This tutorial will show you how to use Flower to build a federated " +"version of an existing JAX workload. We are using JAX to train a linear " +"regression model on a scikit-learn dataset. We will structure the example" +" similar to our `PyTorch - From Centralized To Federated " +"`_ walkthrough. First, we build a centralized " +"training approach based on the `Linear Regression with JAX " +"`_" +" tutorial`. Then, we build upon the centralized training code to run the " +"training in a federated fashion." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:20 +msgid "" +"Before we start building our JAX example, we need install the packages " +"``jax``, ``jaxlib``, ``scikit-learn``, and ``flwr``:" +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:28 +msgid "Linear Regression with JAX" +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:30 +msgid "" +"We begin with a brief description of the centralized training code based " +"on a ``Linear Regression`` model. If you want a more in-depth explanation" +" of what's going on then have a look at the official `JAX documentation " +"`_." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:34 +msgid "" +"Let's create a new file called ``jax_training.py`` with all the " +"components required for a traditional (centralized) linear regression " +"training. First, the JAX packages ``jax`` and ``jaxlib`` need to be " +"imported. In addition, we need to import ``sklearn`` since we use " +"``make_regression`` for the dataset and ``train_test_split`` to split the" +" dataset into a training and test set. You can see that we do not yet " +"import the ``flwr`` package for federated learning. This will be done " +"later." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:51 +msgid "The ``load_data()`` function loads the mentioned training and test sets." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:63 +msgid "" +"The model architecture (a very simple ``Linear Regression`` model) is " +"defined in ``load_model()``." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:73 +msgid "" +"We now need to define the training (function ``train()``), which loops " +"over the training set and measures the loss (function ``loss_fn()``) for " +"each batch of training examples. The loss function is separate since JAX " +"takes derivatives with a ``grad()`` function (defined in the ``main()`` " +"function and called in ``train()``)." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:95 +msgid "" +"The evaluation of the model is defined in the function ``evaluation()``. " +"The function takes all test examples and measures the loss of the linear " +"regression model." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:107 +msgid "" +"Having defined the data loading, model architecture, training, and " +"evaluation we can put everything together and train our model using JAX. " +"As already mentioned, the ``jax.grad()`` function is defined in " +"``main()`` and passed to ``train()``." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:126 +msgid "You can now run your (centralized) JAX linear regression workload:" +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:132 +msgid "" +"So far this should all look fairly familiar if you've used JAX before. " +"Let's take the next step and use what we've built to create a simple " +"federated learning system consisting of one server and two clients." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:137 +msgid "JAX meets Flower" +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:139 +msgid "" +"The concept of federating an existing workload is always the same and " +"easy to understand. We have to start a *server* and then use the code in " +"``jax_training.py`` for the *clients* that are connected to the *server*." +" The *server* sends model parameters to the clients. The *clients* run " +"the training and update the parameters. The updated parameters are sent " +"back to the *server*, which averages all received parameter updates. This" +" describes one round of the federated learning process, and we repeat " +"this for multiple rounds." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:167 +msgid "" +"Finally, we will define our *client* logic in ``client.py`` and build " +"upon the previously defined JAX training in ``jax_training.py``. Our " +"*client* needs to import ``flwr``, but also ``jax`` and ``jaxlib`` to " +"update the parameters on our JAX model:" +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:182 +msgid "" +"Implementing a Flower *client* basically means implementing a subclass of" +" either ``flwr.client.Client`` or ``flwr.client.NumPyClient``. Our " +"implementation will be based on ``flwr.client.NumPyClient`` and we'll " +"call it ``FlowerClient``. ``NumPyClient`` is slightly easier to implement" +" than ``Client`` if you use a framework with good NumPy interoperability " +"(like JAX) because it avoids some of the boilerplate that would otherwise" +" be necessary. ``FlowerClient`` needs to implement four methods, two " +"methods for getting/setting model parameters, one method for training the" +" model, and one method for testing the model:" +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:194 +msgid "``set_parameters (optional)``" +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:193 +msgid "transform parameters to NumPy ``ndarray``'s" +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:203 +msgid "get the updated local model parameters and return them to the server" +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:208 +msgid "return the local loss to the server" +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:210 +msgid "" +"The challenging part is to transform the JAX model parameters from " +"``DeviceArray`` to ``NumPy ndarray`` to make them compatible with " +"`NumPyClient`." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:213 +msgid "" +"The two ``NumPyClient`` methods ``fit`` and ``evaluate`` make use of the " +"functions ``train()`` and ``evaluate()`` previously defined in " +"``jax_training.py``. So what we really do here is we tell Flower through " +"our ``NumPyClient`` subclass which of our already defined functions to " +"call for training and evaluation. We included type annotations to give " +"you a better understanding of the data types that get passed around." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:286 +msgid "Having defined the federation process, we can run it." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:315 +msgid "" +"in each window (make sure that the server is still running before you do " +"so) and see your JAX project run federated learning across two clients. " +"Congratulations!" +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:321 +msgid "" +"The source code of this example was improved over time and can be found " +"here: `Quickstart JAX `_. Our example is somewhat over-simplified because both " +"clients load the same dataset." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:325 +msgid "" +"You're now prepared to explore this topic further. How about using a more" +" sophisticated model or using a different dataset? How about adding more " +"clients?" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:4 +msgid "Quickstart MLX" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:6 +msgid "" +"In this federated learning tutorial we will learn how to train simple MLP" +" on MNIST using Flower and MLX. It is recommended to create a virtual " +"environment and run everything within a :doc:`virtualenv `." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:10 +msgid "" +"Let's use `flwr new` to create a complete Flower+MLX project. It will " +"generate all the files needed to run, by default with the Simulation " +"Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:25 +msgid "" +"Then, run the command below. You will be prompted to select of the " +"available templates (choose ``MLX``), give a name to your project, and " +"type in your developer name:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:53 +msgid "To run the project do:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:102 +msgid "" +"You can also override the parameters defined in " +"``[tool.flwr.app.config]`` section in the ``pyproject.toml`` like this:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:116 +msgid "" +"We will use `Flower Datasets `_ to " +"easily download and partition the `MNIST` dataset. In this example you'll" +" make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:157 +msgid "" +"We define the model as in the `centralized MLX example " +"`_, it's a " +"simple MLP:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:180 +msgid "" +"We also define some utility functions to test our model and to iterate " +"over batches." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:201 +msgid "" +"The main changes we have to make to use `MLX` with `Flower` will be found" +" in the ``get_params()`` and ``set_params()`` functions. Indeed, MLX " +"doesn't provide an easy way to convert the model parameters into a list " +"of ``np.array`` objects (the format we need for the serialization of the " +"messages to work)." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:206 +msgid "The way MLX stores its parameters is as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:219 +msgid "" +"Therefore, to get our list of ``np.array`` objects, we need to extract " +"each array and convert them into a NumPy array:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:228 +msgid "" +"For the ``set_params()`` function, we perform the reverse operation. We " +"receive a list of NumPy arrays and want to convert them into MLX " +"parameters. Therefore, we iterate through pairs of parameters and assign " +"them to the `weight` and `bias` keys of each layer dict:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:243 +msgid "" +"The rest of the functionality is directly inspired by the centralized " +"case. The ``fit()`` method in the client trains the model using the local" +" dataset:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:259 +msgid "" +"Here, after updating the parameters, we perform the training as in the " +"centralized case, and return the new parameters." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:262 +msgid "And for the ``evaluate()`` method of the client:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:272 +msgid "" +"We also begin by updating the parameters with the ones sent by the " +"server, and then we compute the loss and accuracy using the functions " +"defined above. In the constructor of the ``FlowerClient`` we instantiate " +"the `MLP` model as well as other components such as the optimizer." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:277 +msgid "Putting everything together we have:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:331 +msgid "" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that " +"``context`` enables you to get access to hyperparemeters defined in " +"``pyproject.toml`` to configure the run. In this tutorial we access, " +"among other hyperparameters, the ``local-epochs`` setting to control the " +"number of epochs a ``ClientApp`` will perform when running the ``fit()`` " +"method." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:363 +msgid "" +"To construct a ``ServerApp``, we define a ``server_fn()`` callback with " +"an identical signature to that of ``client_fn()``, but the return type is" +" `ServerAppComponents `_ as " +"opposed to `Client `_. In this example we use the " +"``FedAvg`` strategy." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:386 +#: ../../source/tutorial-quickstart-pytorch.rst:344 +#: ../../source/tutorial-quickstart-tensorflow.rst:266 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:390 +msgid "" +"Check the `source code `_ of the extended version of this tutorial in ``examples" +"/quickstart-mlx`` in the Flower GitHub repository." +msgstr "" + +#: ../../source/tutorial-quickstart-pandas.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with Pandas to perform Federated Analytics." +msgstr "" + +#: ../../source/tutorial-quickstart-pandas.rst:4 +msgid "Quickstart Pandas" +msgstr "" + +#: ../../source/tutorial-quickstart-pandas.rst:9 +msgid "Let's build a federated analytics system using Pandas and Flower!" +msgstr "" + +#: ../../source/tutorial-quickstart-pandas.rst:11 +msgid "" +"Please refer to the `full code example " +"`_ " +"to learn more." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with PyTorch to train a CNN model on MNIST." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:6 +msgid "" +"In this federated learning tutorial we will learn how to train a " +"Convolutional Neural Network on CIFAR-10 using Flower and PyTorch. It is " +"recommended to create a virtual environment and run everything within a " +":doc:`virtualenv `." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:11 +msgid "" +"Let's use `flwr new` to create a complete Flower+PyTorch project. It will" +" generate all the files needed to run, by default with the Flower " +"Simulation Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:26 +msgid "" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``PyTorch``), give a name to your project, " +"and type in your developer name:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:117 +msgid "" +"This tutorial uses `Flower Datasets `_ " +"to easily download and partition the `CIFAR-10` dataset. In this example " +"you'll make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets. Each " +"``ClientApp`` will call this function to create dataloaders with the data" +" that correspond to their data partition." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:152 +msgid "" +"We defined a simple Convolutional Neural Network (CNN), but feel free to " +"replace it with a more sophisticated model if you'd like:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:177 +msgid "" +"In addition to defining the model architecture, we also include two " +"utility functions to perform both training (i.e. ``train()``) and " +"evaluation (i.e. ``test()``) using the above model. These functions " +"should look fairly familiar if you have some prior experience with " +"PyTorch. Note these functions do not have anything specific to Flower. " +"That being said, the training function will normally be called, as we'll " +"see later, from a Flower client passing its own data. In summary, your " +"clients can use standard training/testing functions to perform local " +"training or evaluation:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:226 +msgid "" +"The main changes we have to make to use `PyTorch` with `Flower` will be " +"found in the ``get_weights()`` and ``set_weights()`` functions. In " +"``get_weights()`` PyTorch model parameters are extracted and represented " +"as a list of NumPy arrays. The ``set_weights()`` function that's the " +"oposite: given a list of NumPy arrays it applies them to an existing " +"PyTorch model. Doing this in fairly easy in PyTorch." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:282 +msgid "" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparemeters defined in your " +"``pyproject.toml`` to configure the run. In this tutorial we access the " +"`local-epochs` setting to control the number of epochs a ``ClientApp`` " +"will perform when running the ``fit()`` method. You could define " +"additioinal hyperparameters in ``pyproject.toml`` and access them here." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:309 +msgid "" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"`ServerAppComponents `_ as " +"opposed to a `Client `_. In this example we use the " +"`FedAvg`. To it we pass a randomly initialized model that will server as " +"the global model to federated. Note that the value of ``fraction_fit`` is" +" read from the run config. You can find the default value defined in the " +"``pyproject.toml``." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:348 +msgid "" +"Check the `source code `_ of the extended version of this tutorial in " +"``examples/quickstart-pytorch`` in the Flower GitHub repository." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:354 +#: ../../source/tutorial-quickstart-tensorflow.rst:278 +msgid "Video tutorial" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:358 +msgid "" +"The video shown below shows how to setup a PyTorch + Flower project using" +" our previously recommended APIs. A new video tutorial will be released " +"that shows the new APIs (as the content above does)" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:4 +msgid "Quickstart PyTorch Lightning" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:6 +msgid "" +"In this federated learning tutorial we will learn how to train an " +"AutoEncoder model on MNIST using Flower and PyTorch Lightning. It is " +"recommended to create a virtual environment and run everything within a " +":doc:`virtualenv `." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:19 +msgid "" +"This will create a new directory called `quickstart-pytorch-lightning` " +"containing the following files:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:42 +msgid "" +"By default, Flower Simulation Engine will be started and it will create a" +" federation of 4 nodes using `FedAvg `_ " +"as the aggregation strategy. The dataset will be partitioned using Flower" +" Dataset's `IidPartitioner `_." +" To run the project, do:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:93 +msgid "" +"Each simulated `ClientApp` (two per round) will also log a summary of " +"their local training process. Expect this output to be similar to:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:115 +msgid "" +"Check the `source code `_ of this tutorial in ``examples" +"/quickstart-pytorch-lightning`` in the Flower GitHub repository." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with scikit-learn to train a linear regression model." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:4 +msgid "Quickstart scikit-learn" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:9 +msgid "" +"In this tutorial, we will learn how to train a ``Logistic Regression`` " +"model on MNIST using Flower and scikit-learn." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:12 +msgid "" +"It is recommended to create a virtual environment and run everything " +"within this :doc:`virtualenv `." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:15 +msgid "" +"Our example consists of one *server* and two *clients* all having the " +"same model." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:17 +msgid "" +"*Clients* are responsible for generating individual model parameter " +"updates for the model based on their local datasets. These updates are " +"then sent to the *server* which will aggregate them to produce an updated" +" global model. Finally, the *server* sends this improved version of the " +"model back to each *client*. A complete cycle of parameters updates is " +"called a *round*." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:23 +msgid "" +"Now that we have a rough idea of what is going on, let's get started. We " +"first need to install Flower. You can do this by running:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:30 +msgid "Since we want to use scikit-learn, let's go ahead and install it:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:36 +msgid "Or simply install all dependencies using Poetry:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:45 +msgid "" +"Now that we have all our dependencies installed, let's run a simple " +"distributed training with two clients and one server. However, before " +"setting up the client and server, we will define all functionalities that" +" we need for our federated learning setup within ``utils.py``. The " +"``utils.py`` contains different functions defining all the machine " +"learning basics:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:51 +msgid "``get_model_parameters()``" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:52 +msgid "Returns the parameters of a ``sklearn`` LogisticRegression model" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:53 +msgid "``set_model_params()``" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:54 +msgid "Sets the parameters of a ``sklearn`` LogisticRegression model" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:56 +msgid "``set_initial_params()``" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:56 +msgid "Initializes the model parameters that the Flower server will ask for" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:58 +msgid "" +"Please check out ``utils.py`` `here " +"`_ for more details. The pre-defined functions are used in" +" the ``client.py`` and imported. The ``client.py`` also requires to " +"import several packages such as Flower and scikit-learn:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:75 +msgid "" +"Prior to local training, we need to load the MNIST dataset, a popular " +"image classification dataset of handwritten digits for machine learning, " +"and partition the dataset for FL. This can be conveniently achieved using" +" `Flower Datasets `_. The " +"``FederatedDataset.load_partition()`` method loads the partitioned " +"training set for each partition ID defined in the ``--partition-id`` " +"argument." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:106 +msgid "" +"Next, the logistic regression model is defined and initialized with " +"``utils.set_initial_params()``." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:119 +msgid "" +"The Flower server interacts with clients through an interface called " +"``Client``. When the server selects a particular client for training, it " +"sends training instructions over the network. The client receives those " +"instructions and calls one of the ``Client`` methods to run your code " +"(i.e., to fit the logistic regression we defined earlier)." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:124 +msgid "" +"Flower provides a convenience class called ``NumPyClient`` which makes it" +" easier to implement the ``Client`` interface when your workload uses " +"scikit-learn. Implementing ``NumPyClient`` usually means defining the " +"following methods (``set_parameters`` is optional though):" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:130 +msgid "return the model weight as a list of NumPy ndarrays" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:132 +msgid "``set_parameters`` (optional)" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:132 +msgid "" +"update the local model weights with the parameters received from the " +"server" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:133 +msgid "is directly imported with ``utils.set_model_params()``" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:135 +msgid "set the local model weights" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:136 +msgid "train the local model" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:137 +msgid "return the updated local model weights" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:139 +msgid "test the local model" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:141 +msgid "The methods can be implemented in the following way:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:163 +msgid "" +"We can now create an instance of our class ``MnistClient`` and add one " +"line to actually run this client:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:170 +msgid "" +"That's it for the client. We only have to implement ``Client`` or " +"``NumPyClient`` and call ``fl.client.start_client()``. If you implement a" +" client of type ``NumPyClient`` you'll need to first call its " +"``to_client()`` method. The string ``\"0.0.0.0:8080\"`` tells the client " +"which server to connect to. In our case we can run the server and the " +"client on the same machine, therefore we use ``\"0.0.0.0:8080\"``. If we " +"run a truly federated workload with the server and clients running on " +"different machines, all that needs to change is the ``server_address`` we" +" pass to the client." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:181 +msgid "" +"The following Flower server is a little bit more advanced and returns an " +"evaluation function for the server-side evaluation. First, we import " +"again all required libraries such as Flower and scikit-learn." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:185 +msgid "``server.py``, import Flower and start the server:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:198 +msgid "" +"The number of federated learning rounds is set in ``fit_round()`` and the" +" evaluation is defined in ``get_evaluate_fn()``. The evaluation function " +"is called after each federated learning round and gives you information " +"about loss and accuracy. Note that we also make use of Flower Datasets " +"here to load the test split of the MNIST dataset for server-side " +"evaluation." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:228 +msgid "" +"The ``main`` contains the server-side parameter initialization " +"``utils.set_initial_params()`` as well as the aggregation strategy " +"``fl.server.strategy:FedAvg()``. The strategy is the default one, " +"federated averaging (or FedAvg), with two clients and evaluation after " +"each federated learning round. The server can be started with the command" +" ``fl.server.start_server(server_address=\"0.0.0.0:8080\", " +"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))``." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:256 +msgid "" +"With both client and server ready, we can now run everything and see " +"federated learning in action. Federated learning systems usually have a " +"server and multiple clients. We, therefore, have to start the server " +"first:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:264 +#: ../../source/tutorial-quickstart-xgboost.rst:598 +msgid "" +"Once the server is running we can start the clients in different " +"terminals. Open a new terminal and start the first client:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:271 +#: ../../source/tutorial-quickstart-xgboost.rst:605 +msgid "Open another terminal and start the second client:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:277 +#: ../../source/tutorial-quickstart-xgboost.rst:611 +msgid "" +"Each client will have its own dataset. You should now see how the " +"training does in the very first terminal (the one that started the " +"server):" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:311 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system. The full `source code " +"`_ for this example can be found in ``examples/sklearn-logreg-" +"mnist``." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with TensorFlow to train a CNN model on CIFAR-10." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:4 +msgid "Quickstart TensorFlow" +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:6 +msgid "" +"In this tutorial we will learn how to train a Convolutional Neural " +"Network on CIFAR-10 using the Flower framework and TensorFlow. First of " +"all, it is recommended to create a virtual environment and run everything" +" within a :doc:`virtualenv `." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:11 +msgid "" +"Let's use `flwr new` to create a complete Flower+TensorFlow project. It " +"will generate all the files needed to run, by default with the Flower " +"Simulation Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:26 +msgid "" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``TensorFlow``), give a name to your project," +" and type in your developer name:" +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:114 +msgid "" +"This tutorial uses `Flower Datasets `_ " +"to easily download and partition the `CIFAR-10` dataset. In this example " +"you'll make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets. Each " +"``ClientApp`` will call this function to create the ``NumPy`` arrays that" +" correspond to their data partition." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:141 +msgid "" +"Next, we need a model. We defined a simple Convolutional Neural Network " +"(CNN), but feel free to replace it with a more sophisticated model if " +"you'd like:" +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:170 +msgid "" +"With `TensorFlow`, we can use the built-in ``get_weights()`` and " +"``set_weights()`` functions, which simplifies the implementation with " +"`Flower`. The rest of the functionality in the ClientApp is directly " +"inspired by the centralized case. The ``fit()`` method in the client " +"trains the model using the local dataset. Similarly, the ``evaluate()`` " +"method is used to evaluate the model received on a held-out validation " +"set that the client might have:" +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:203 +msgid "" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparameters defined in your " +"``pyproject.toml`` to configure the run. For example, in this tutorial we" +" access the `local-epochs` setting to control the number of epochs a " +"``ClientApp`` will perform when running the ``fit()`` method, in addition" +" to `batch-size`. You could define additional hyperparameters in " +"``pyproject.toml`` and access them here." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:234 +msgid "" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"`ServerAppComponents `_ as " +"opposed to a `Client `_. In this example we use the " +"`FedAvg`. To it we pass a randomly initialized model that will serve as " +"the global model to federate." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:270 +msgid "" +"Check the source code of the extended version of this tutorial in " +"|quickstart_tf_link|_ in the Flower GitHub repository." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:282 +msgid "" +"The video shown below shows how to setup a TensorFlow + Flower project " +"using our previously recommended APIs. A new video tutorial will be " +"released that shows the new APIs (as the content above does)" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with XGBoost to train classification models on trees." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:4 +msgid "Quickstart XGBoost" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:13 +msgid "Federated XGBoost" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:15 +msgid "" +"EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient " +"implementation of gradient-boosted decision tree (**GBDT**), that " +"maximises the computational boundaries for boosted tree methods. It's " +"primarily designed to enhance both the performance and computational " +"speed of machine learning models. In XGBoost, trees are constructed " +"concurrently, unlike the sequential approach taken by GBDT." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:21 +msgid "" +"Often, for tabular data on medium-sized datasets with fewer than 10k " +"training examples, XGBoost surpasses the results of deep learning " +"techniques." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:25 +msgid "Why federated XGBoost?" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:27 +msgid "" +"Indeed, as the demand for data privacy and decentralized learning grows, " +"there's an increasing requirement to implement federated XGBoost systems " +"for specialised applications, like survival analysis and financial fraud " +"detection." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:31 +msgid "" +"Federated learning ensures that raw data remains on the local device, " +"making it an attractive approach for sensitive domains where data " +"security and privacy are paramount. Given the robustness and efficiency " +"of XGBoost, combining it with federated learning offers a promising " +"solution for these specific challenges." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:36 +msgid "" +"In this tutorial we will learn how to train a federated XGBoost model on " +"HIGGS dataset using Flower and ``xgboost`` package. We use a simple " +"example (`full code xgboost-quickstart " +"`_)" +" with two *clients* and one *server* to demonstrate how federated XGBoost" +" works, and then we dive into a more complex example (`full code xgboost-" +"comprehensive `_) to run various experiments." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:46 +msgid "Environment Setup" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:48 +msgid "" +"First of all, it is recommended to create a virtual environment and run " +"everything within a :doc:`virtualenv `." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:51 +msgid "" +"We first need to install Flower and Flower Datasets. You can do this by " +"running :" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:57 +msgid "" +"Since we want to use ``xgboost`` package to build up XGBoost trees, let's" +" go ahead and install ``xgboost``:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:67 +msgid "" +"*Clients* are responsible for generating individual weight-updates for " +"the model based on their local datasets. Now that we have all our " +"dependencies installed, let's run a simple distributed training with two " +"clients and one server." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:71 +msgid "" +"In a file called ``client.py``, import xgboost, Flower, Flower Datasets " +"and other related functions:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:99 +msgid "Dataset partition and hyper-parameter selection" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:101 +msgid "" +"Prior to local training, we require loading the HIGGS dataset from Flower" +" Datasets and conduct data partitioning for FL:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:115 +msgid "" +"In this example, we split the dataset into 30 partitions with uniform " +"distribution (``IidPartitioner(num_partitions=30)``). Then, we load the " +"partition for the given client based on ``partition_id``:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:135 +msgid "" +"After that, we do train/test splitting on the given partition (client's " +"local data), and transform data format for ``xgboost`` package." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:149 +msgid "" +"The functions of ``train_test_split`` and " +"``transform_dataset_to_dmatrix`` are defined as below:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:174 +msgid "Finally, we define the hyper-parameters used for XGBoost training." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:190 +msgid "" +"The ``num_local_round`` represents the number of iterations for local " +"tree boost. We use CPU for the training in default. One can shift it to " +"GPU by setting ``tree_method`` to ``gpu_hist``. We use AUC as evaluation " +"metric." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:195 +msgid "Flower client definition for XGBoost" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:197 +msgid "" +"After loading the dataset we define the Flower client. We follow the " +"general rule to define ``XgbClient`` class inherited from " +"``fl.client.Client``." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:219 +msgid "" +"All required parameters defined above are passed to ``XgbClient``'s " +"constructor." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:221 +msgid "" +"Then, we override ``get_parameters``, ``fit`` and ``evaluate`` methods " +"insides ``XgbClient`` class as follows." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:236 +msgid "" +"Unlike neural network training, XGBoost trees are not started from a " +"specified random weights. In this case, we do not use ``get_parameters`` " +"and ``set_parameters`` to initialise model parameters for XGBoost. As a " +"result, let's return an empty tensor in ``get_parameters`` when it is " +"called by the server at the first round." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:278 +msgid "" +"In ``fit``, at the first round, we call ``xgb.train()`` to build up the " +"first set of trees. From the second round, we load the global model sent " +"from server to new build Booster object, and then update model weights on" +" local training data with function ``local_boost`` as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:298 +msgid "" +"Given ``num_local_round``, we update trees by calling " +"``bst_input.update`` method. After training, the last " +"``N=num_local_round`` trees will be extracted to send to the server." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:330 +msgid "" +"In ``evaluate``, after loading the global model, we call ``bst.eval_set``" +" function to conduct evaluation on valid set. The AUC value will be " +"returned." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:333 +msgid "" +"Now, we can create an instance of our class ``XgbClient`` and add one " +"line to actually run this client:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:350 +msgid "" +"That's it for the client. We only have to implement ``Client`` and call " +"``fl.client.start_client()``. The string ``\"[::]:8080\"`` tells the " +"client which server to connect to. In our case we can run the server and " +"the client on the same machine, therefore we use ``\"[::]:8080\"``. If we" +" run a truly federated workload with the server and clients running on " +"different machines, all that needs to change is the ``server_address`` we" +" point the client at." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:360 +msgid "" +"These updates are then sent to the *server* which will aggregate them to " +"produce a better model. Finally, the *server* sends this improved version" +" of the model back to each *client* to finish a complete FL round." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:364 +msgid "" +"In a file named ``server.py``, import Flower and FedXgbBagging from " +"``flwr.server.strategy``." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:367 +msgid "We first define a strategy for XGBoost bagging aggregation." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:401 +msgid "" +"We use two clients for this example. An ``evaluate_metrics_aggregation`` " +"function is defined to collect and wighted average the AUC values from " +"clients. The ``config_func`` function is to return the current FL round " +"number to client's ``fit()`` and ``evaluate()`` methods." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:406 +msgid "Then, we start the server:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:418 +msgid "Tree-based bagging aggregation" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:420 +msgid "" +"You must be curious about how bagging aggregation works. Let's look into " +"the details." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:422 +msgid "" +"In file ``flwr.server.strategy.fedxgb_bagging.py``, we define " +"``FedXgbBagging`` inherited from ``flwr.server.strategy.FedAvg``. Then, " +"we override the ``aggregate_fit``, ``aggregate_evaluate`` and " +"``evaluate`` methods as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:519 +msgid "" +"In ``aggregate_fit``, we sequentially aggregate the clients' XGBoost " +"trees by calling ``aggregate()`` function:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:579 +msgid "" +"In this function, we first fetch the number of trees and the number of " +"parallel trees for the current and previous model by calling " +"``_get_tree_nums``. Then, the fetched information will be aggregated. " +"After that, the trees (containing model weights) are aggregated to " +"generate a new tree model." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:584 +msgid "" +"After traversal of all clients' models, a new global model is generated, " +"followed by the serialisation, and sending back to each client." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:588 +msgid "Launch Federated XGBoost!" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:664 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"XGBoost system. The AUC values can be checked in ``metrics_distributed``." +" One can see that the average AUC increases over FL rounds." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:668 +msgid "" +"The full `source code `_ for this example can be found in ``examples" +"/xgboost-quickstart``." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:673 +msgid "Comprehensive Federated XGBoost" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:675 +msgid "" +"Now that you have known how federated XGBoost work with Flower, it's time" +" to run some more comprehensive experiments by customising the " +"experimental settings. In the xgboost-comprehensive example (`full code " +"`_), we provide more options to define various experimental" +" setups, including aggregation strategies, data partitioning and " +"centralised/distributed evaluation. We also support :doc:`Flower " +"simulation ` making it easy to simulate large " +"client cohorts in a resource-aware manner. Let's take a look!" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:685 +msgid "Cyclic training" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:687 +msgid "" +"In addition to bagging aggregation, we offer a cyclic training scheme, " +"which performs FL in a client-by-client fashion. Instead of aggregating " +"multiple clients, there is only one single client participating in the " +"training per round in the cyclic training scenario. The trained local " +"XGBoost trees will be passed to the next client as an initialised model " +"for next round's boosting." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:693 +msgid "To do this, we first customise a ``ClientManager`` in ``server_utils.py``:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:733 +msgid "" +"The customised ``ClientManager`` samples all available clients in each FL" +" round based on the order of connection to the server. Then, we define a " +"new strategy ``FedXgbCyclic`` in " +"``flwr.server.strategy.fedxgb_cyclic.py``, in order to sequentially " +"select only one client in given round and pass the received model to next" +" client." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:775 +msgid "" +"Unlike the original ``FedAvg``, we don't perform aggregation here. " +"Instead, we just make a copy of the received client model as global model" +" by overriding ``aggregate_fit``." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:778 +msgid "" +"Also, the customised ``configure_fit`` and ``configure_evaluate`` methods" +" ensure the clients to be sequentially selected given FL round:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:840 +msgid "Customised data partitioning" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:842 +msgid "" +"In ``dataset.py``, we have a function ``instantiate_partitioner`` to " +"instantiate the data partitioner based on the given ``num_partitions`` " +"and ``partitioner_type``. Currently, we provide four supported " +"partitioner type to simulate the uniformity/non-uniformity in data " +"quantity (uniform, linear, square, exponential)." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:873 +msgid "Customised centralised/distributed evaluation" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:875 +msgid "" +"To facilitate centralised evaluation, we define a function in " +"``server_utils.py``:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:907 +msgid "" +"This function returns a evaluation function which instantiates a " +"``Booster`` object and loads the global model weights to it. The " +"evaluation is conducted by calling ``eval_set()`` method, and the tested " +"AUC value is reported." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:911 +msgid "" +"As for distributed evaluation on the clients, it's same as the quick-" +"start example by overriding the ``evaluate()`` method insides the " +"``XgbClient`` class in ``client_utils.py``." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:916 +msgid "Flower simulation" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:918 +msgid "" +"We also provide an example code (``sim.py``) to use the simulation " +"capabilities of Flower to simulate federated XGBoost training on either a" +" single machine or a cluster of machines." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:954 +msgid "" +"After importing all required packages, we define a ``main()`` function to" +" perform the simulation process:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1010 +msgid "" +"We first load the dataset and perform data partitioning, and the pre-" +"processed data is stored in a ``list``. After the simulation begins, the " +"clients won't need to pre-process their partitions again." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1014 +msgid "Then, we define the strategies and other hyper-parameters:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1065 +msgid "" +"After that, we start the simulation by calling " +"``fl.simulation.start_simulation``:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1085 +msgid "" +"One of key parameters for ``start_simulation`` is ``client_fn`` which " +"returns a function to construct a client. We define it as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1126 +msgid "Arguments parser" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1128 +msgid "" +"In ``utils.py``, we define the arguments parsers for clients, server and " +"simulation, allowing users to specify different experimental settings. " +"Let's first see the sever side:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1175 +msgid "" +"This allows user to specify training strategies / the number of total " +"clients / FL rounds / participating clients / clients for evaluation, and" +" evaluation fashion. Note that with ``--centralised-eval``, the sever " +"will do centralised evaluation and all functionalities for client " +"evaluation will be disabled." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1180 +msgid "Then, the argument parser on client side:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1234 +msgid "" +"This defines various options for client data partitioning. Besides, " +"clients also have an option to conduct evaluation on centralised test set" +" by setting ``--centralised-eval``, as well as an option to perform " +"scaled learning rate based on the number of clients by setting " +"``--scaled-lr``." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1239 +msgid "We also have an argument parser for simulation:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1317 +msgid "This integrates all arguments for both client and server sides." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1320 +msgid "Example commands" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1322 +msgid "" +"To run a centralised evaluated experiment with bagging strategy on 5 " +"clients with exponential distribution for 50 rounds, we first start the " +"server as below:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1329 +msgid "Then, on each client terminal, we start the clients:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1335 +msgid "To run the same experiment with Flower simulation:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1341 +msgid "" +"The full `code `_ for this comprehensive example can be found in" +" ``examples/xgboost-comprehensive``." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:9 +msgid "Build a strategy from scratch" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:11 +msgid "" +"Welcome to the third part of the Flower federated learning tutorial. In " +"previous parts of this tutorial, we introduced federated learning with " +"PyTorch and the Flower framework (`part 1 " +"`__) and we learned how strategies can be used to customize " +"the execution on both the server and the clients (`part 2 " +"`__)." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:13 +msgid "" +"In this notebook, we'll continue to customize the federated learning " +"system we built previously by creating a custom version of FedAvg using " +"the Flower framework, Flower Datasets, and PyTorch." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:15 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:16 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:15 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:15 +msgid "" +"`Star Flower on GitHub `__ ⭐️ and join " +"the Flower community on Flower Discuss and the Flower Slack to connect, " +"ask questions, and get help: - `Join Flower Discuss " +"`__ We'd love to hear from you in the " +"``Introduction`` topic! If anything is unclear, post in ``Flower Help - " +"Beginners``. - `Join Flower Slack `__ We'd " +"love to hear from you in the ``#introductions`` channel! If anything is " +"unclear, head over to the ``#questions`` channel." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:18 +msgid "Let's build a new ``Strategy`` from scratch! 🌼" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:30 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:30 +msgid "Preparation" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:32 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:33 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:32 +msgid "" +"Before we begin with the actual code, let's make sure that we have " +"everything we need." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:44 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:45 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:44 +msgid "Installing dependencies" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:46 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:47 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:46 +msgid "First, we install the necessary packages:" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:66 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:67 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:66 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:66 +msgid "" +"Now that we have all dependencies installed, we can import everything we " +"need for this tutorial:" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:106 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:106 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:106 +msgid "" +"It is possible to switch to a runtime that has GPU acceleration enabled " +"(on Google Colab: ``Runtime > Change runtime type > Hardware acclerator: " +"GPU > Save``). Note, however, that Google Colab is not always able to " +"offer GPU acceleration. If you see an error related to GPU availability " +"in one of the following sections, consider switching back to CPU-based " +"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " +"has GPU acceleration enabled, you should see the output ``Training on " +"cuda``, otherwise it'll say ``Training on cpu``." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:119 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:119 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:119 +msgid "Data loading" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:121 +msgid "" +"Let's now load the CIFAR-10 training and test set, partition them into " +"ten smaller datasets (each split into training and validation set), and " +"wrap everything in their own ``DataLoader``." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:163 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:163 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:169 +msgid "Model training/evaluation" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:165 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:165 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:171 +msgid "" +"Let's continue with the usual model definition (including " +"``set_parameters`` and ``get_parameters``), training and test functions:" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:256 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:262 +msgid "Flower client" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:258 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:264 +msgid "" +"To implement the Flower client, we (again) create a subclass of " +"``flwr.client.NumPyClient`` and implement the three methods " +"``get_parameters``, ``fit``, and ``evaluate``. Here, we also pass the " +"``partition_id`` to the client and use it log additional details. We then" +" create an instance of ``ClientApp`` and pass it the ``client_fn``." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:311 +msgid "Let's test what we have so far before we continue:" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:357 +msgid "Build a Strategy from scratch" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:359 +msgid "" +"Let’s overwrite the ``configure_fit`` method such that it passes a higher" +" learning rate (potentially also other hyperparameters) to the optimizer " +"of a fraction of the clients. We will keep the sampling of the clients as" +" it is in ``FedAvg`` and then change the configuration dictionary (one of" +" the ``FitIns`` attributes)." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:523 +msgid "" +"The only thing left is to use the newly created custom Strategy " +"``FedCustom`` when starting the experiment:" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:559 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:998 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:841 +msgid "Recap" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:561 +msgid "" +"In this notebook, we’ve seen how to implement a custom strategy. A custom" +" strategy enables granular control over client node configuration, result" +" aggregation, and more. To define a custom strategy, you only have to " +"overwrite the abstract methods of the (abstract) base class ``Strategy``." +" To make custom strategies even more powerful, you can pass custom " +"functions to the constructor of your new class (``__init__``) and then " +"call these functions whenever needed." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:575 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1014 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:813 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:859 +msgid "" +"Before you continue, make sure to join the Flower community on Flower " +"Discuss (`Join Flower Discuss `__) and on " +"Slack (`Join Slack `__)." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:577 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1016 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:815 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:861 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:371 +msgid "" +"There's a dedicated ``#questions`` channel if you need help, but we'd " +"also love to hear who you are in ``#introductions``!" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:579 +msgid "" +"The `Flower Federated Learning Tutorial - Part 4 " +"`__ introduces ``Client``, the flexible API underlying " +"``NumPyClient``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:9 +msgid "Customize the client" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:11 +msgid "" +"Welcome to the fourth part of the Flower federated learning tutorial. In " +"the previous parts of this tutorial, we introduced federated learning " +"with PyTorch and Flower (`part 1 `__), we learned how " +"strategies can be used to customize the execution on both the server and " +"the clients (`part 2 `__), and we built our own " +"custom strategy from scratch (`part 3 `__)." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:14 +msgid "" +"In this notebook, we revisit ``NumPyClient`` and introduce a new " +"baseclass for building clients, simply named ``Client``. In previous " +"parts of this tutorial, we've based our client on ``NumPyClient``, a " +"convenience class which makes it easy to work with machine learning " +"libraries that have good NumPy interoperability. With ``Client``, we gain" +" a lot of flexibility that we didn't have before, but we'll also have to " +"do a few things the we didn't have to do before." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:19 +msgid "" +"Let's go deeper and see what it takes to move from ``NumPyClient`` to " +"``Client``! 🌼" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:31 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:30 +msgid "Step 0: Preparation" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:121 +msgid "" +"Let's now define a loading function for the CIFAR-10 training and test " +"set, partition them into ``num_partitions`` smaller datasets (each split " +"into training and validation set), and wrap everything in their own " +"``DataLoader``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:256 +msgid "Step 1: Revisiting NumPyClient" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:258 +msgid "" +"So far, we've implemented our client by subclassing " +"``flwr.client.NumPyClient``. The three methods we implemented are " +"``get_parameters``, ``fit``, and ``evaluate``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:299 +msgid "" +"Then, we define the function ``numpyclient_fn`` that is used by Flower to" +" create the ``FlowerNumpyClient`` instances on demand. Finally, we create" +" the ``ClientApp`` and pass the ``numpyclient_fn`` to it." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:328 +msgid "" +"We've seen this before, there's nothing new so far. The only *tiny* " +"difference compared to the previous notebook is naming, we've changed " +"``FlowerClient`` to ``FlowerNumPyClient`` and ``client_fn`` to " +"``numpyclient_fn``. Next, we configure the number of federated learning " +"rounds using ``ServerConfig`` and create the ``ServerApp`` with this " +"config:" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:355 +msgid "" +"Finally, we specify the resources for each client and run the simulation " +"to see the output we get:" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:389 +msgid "" +"This works as expected, ten clients are training for three rounds of " +"federated learning." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:391 +msgid "" +"Let's dive a little bit deeper and discuss how Flower executes this " +"simulation. Whenever a client is selected to do some work, " +"``run_simulation`` launches the ``ClientApp`` object which in turn calls " +"the function ``numpyclient_fn`` to create an instance of our " +"``FlowerNumPyClient`` (along with loading the model and the data)." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:393 +msgid "" +"But here's the perhaps surprising part: Flower doesn't actually use the " +"``FlowerNumPyClient`` object directly. Instead, it wraps the object to " +"makes it look like a subclass of ``flwr.client.Client``, not " +"``flwr.client.NumPyClient``. In fact, the Flower core framework doesn't " +"know how to handle ``NumPyClient``'s, it only knows how to handle " +"``Client``'s. ``NumPyClient`` is just a convenience abstraction built on " +"top of ``Client``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:395 +msgid "" +"Instead of building on top of ``NumPyClient``, we can directly build on " +"top of ``Client``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:407 +msgid "Step 2: Moving from ``NumPyClient`` to ``Client``" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:409 +msgid "" +"Let's try to do the same thing using ``Client`` instead of " +"``NumPyClient``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:519 +msgid "" +"Before we discuss the code in more detail, let's try to run it! Gotta " +"make sure our new ``Client``-based client works, right?" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:545 +msgid "" +"That's it, we're now using ``Client``. It probably looks similar to what " +"we've done with ``NumPyClient``. So what's the difference?" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:547 +msgid "" +"First of all, it's more code. But why? The difference comes from the fact" +" that ``Client`` expects us to take care of parameter serialization and " +"deserialization. For Flower to be able to send parameters over the " +"network, it eventually needs to turn these parameters into ``bytes``. " +"Turning parameters (e.g., NumPy ``ndarray``'s) into raw bytes is called " +"serialization. Turning raw bytes into something more useful (like NumPy " +"``ndarray``'s) is called deserialization. Flower needs to do both: it " +"needs to serialize parameters on the server-side and send them to the " +"client, the client needs to deserialize them to use them for local " +"training, and then serialize the updated parameters again to send them " +"back to the server, which (finally!) deserializes them again in order to " +"aggregate them with the updates received from other clients." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:550 +msgid "" +"The only *real* difference between Client and NumPyClient is that " +"NumPyClient takes care of serialization and deserialization for you. It " +"can do so because it expects you to return parameters as NumPy ndarray's," +" and it knows how to handle these. This makes working with machine " +"learning libraries that have good NumPy support (most of them) a breeze." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:552 +msgid "" +"In terms of API, there's one major difference: all methods in Client take" +" exactly one argument (e.g., ``FitIns`` in ``Client.fit``) and return " +"exactly one value (e.g., ``FitRes`` in ``Client.fit``). The methods in " +"``NumPyClient`` on the other hand have multiple arguments (e.g., " +"``parameters`` and ``config`` in ``NumPyClient.fit``) and multiple return" +" values (e.g., ``parameters``, ``num_example``, and ``metrics`` in " +"``NumPyClient.fit``) if there are multiple things to handle. These " +"``*Ins`` and ``*Res`` objects in ``Client`` wrap all the individual " +"values you're used to from ``NumPyClient``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:565 +msgid "Step 3: Custom serialization" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:567 +msgid "" +"Here we will explore how to implement custom serialization with a simple " +"example." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:569 +msgid "" +"But first what is serialization? Serialization is just the process of " +"converting an object into raw bytes, and equally as important, " +"deserialization is the process of converting raw bytes back into an " +"object. This is very useful for network communication. Indeed, without " +"serialization, you could not just a Python object through the internet." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:571 +msgid "" +"Federated Learning relies heavily on internet communication for training " +"by sending Python objects back and forth between the clients and the " +"server. This means that serialization is an essential part of Federated " +"Learning." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:573 +msgid "" +"In the following section, we will write a basic example where instead of " +"sending a serialized version of our ``ndarray``\\ s containing our " +"parameters, we will first convert the ``ndarray`` into sparse matrices, " +"before sending them. This technique can be used to save bandwidth, as in " +"certain cases where the weights of a model are sparse (containing many 0 " +"entries), converting them to a sparse matrix can greatly improve their " +"bytesize." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:576 +msgid "Our custom serialization/deserialization functions" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:578 +msgid "" +"This is where the real serialization/deserialization will happen, " +"especially in ``ndarray_to_sparse_bytes`` for serialization and " +"``sparse_bytes_to_ndarray`` for deserialization." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:580 +msgid "" +"Note that we imported the ``scipy.sparse`` library in order to convert " +"our arrays." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:668 +msgid "Client-side" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:670 +msgid "" +"To be able to serialize our ``ndarray``\\ s into sparse parameters, we " +"will just have to call our custom functions in our " +"``flwr.client.Client``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:672 +msgid "" +"Indeed, in ``get_parameters`` we need to serialize the parameters we got " +"from our network using our custom ``ndarrays_to_sparse_parameters`` " +"defined above." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:674 +msgid "" +"In ``fit``, we first need to deserialize the parameters coming from the " +"server using our custom ``sparse_parameters_to_ndarrays`` and then we " +"need to serialize our local results with " +"``ndarrays_to_sparse_parameters``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:676 +msgid "" +"In ``evaluate``, we will only need to deserialize the global parameters " +"with our custom function." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:781 +msgid "Server-side" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:783 +msgid "" +"For this example, we will just use ``FedAvg`` as a strategy. To change " +"the serialization and deserialization here, we only need to reimplement " +"the ``evaluate`` and ``aggregate_fit`` functions of ``FedAvg``. The other" +" functions of the strategy will be inherited from the super class " +"``FedAvg``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:785 +msgid "As you can see only one line as change in ``evaluate``:" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:791 +msgid "" +"And for ``aggregate_fit``, we will first deserialize every result we " +"received:" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:800 +msgid "And then serialize the aggregated result:" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:959 +msgid "We can now run our custom serialization example!" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1000 +msgid "" +"In this part of the tutorial, we've seen how we can build clients by " +"subclassing either ``NumPyClient`` or ``Client``. ``NumPyClient`` is a " +"convenience abstraction that makes it easier to work with machine " +"learning libraries that have good NumPy interoperability. ``Client`` is a" +" more flexible abstraction that allows us to do things that are not " +"possible in ``NumPyClient``. In order to do so, it requires us to handle " +"parameter serialization and deserialization ourselves." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1018 +msgid "" +"This is the final part of the Flower tutorial (for now!), " +"congratulations! You're now well equipped to understand the rest of the " +"documentation. There are many topics we didn't cover in the tutorial, we " +"recommend the following resources:" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1020 +msgid "`Read Flower Docs `__" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1021 +msgid "`Check out Flower Code Examples `__" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1022 +msgid "" +"`Use Flower Baselines for your research " +"`__" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1023 +msgid "" +"`Watch Flower AI Summit 2024 videos `__" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:9 +msgid "Get started with Flower" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:11 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:11 +msgid "Welcome to the Flower federated learning tutorial!" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:13 +msgid "" +"In this notebook, we'll build a federated learning system using the " +"Flower framework, Flower Datasets and PyTorch. In part 1, we use PyTorch " +"for the model training pipeline and data loading. In part 2, we federate " +"the PyTorch project using Flower." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:18 +msgid "Let's get started! 🌼" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:32 +msgid "" +"Before we begin with any actual code, let's make sure that we have " +"everything we need." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:44 +msgid "Install dependencies" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:46 +msgid "" +"Next, we install the necessary packages for PyTorch (``torch`` and " +"``torchvision``), Flower Datasets (``flwr-datasets``) and Flower " +"(``flwr``):" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:109 +msgid "" +"It is possible to switch to a runtime that has GPU acceleration enabled " +"(on Google Colab: ``Runtime > Change runtime type > Hardware accelerator:" +" GPU > Save``). Note, however, that Google Colab is not always able to " +"offer GPU acceleration. If you see an error related to GPU availability " +"in one of the following sections, consider switching back to CPU-based " +"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " +"has GPU acceleration enabled, you should see the output ``Training on " +"cuda``, otherwise it'll say ``Training on cpu``." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:122 +msgid "Load the data" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:124 +msgid "" +"Federated learning can be applied to many different types of tasks across" +" different domains. In this tutorial, we introduce federated learning by " +"training a simple convolutional neural network (CNN) on the popular " +"CIFAR-10 dataset. CIFAR-10 can be used to train image classifiers that " +"distinguish between images from ten different classes: 'airplane', " +"'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', and " +"'truck'." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:135 +msgid "" +"We simulate having multiple datasets from multiple organizations (also " +"called the \"cross-silo\" setting in federated learning) by splitting the" +" original CIFAR-10 dataset into multiple partitions. Each partition will " +"represent the data from a single organization. We're doing this purely " +"for experimentation purposes, in the real world there's no need for data " +"splitting because each organization already has their own data (the data " +"is naturally partitioned)." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:137 +msgid "" +"Each organization will act as a client in the federated learning system. " +"Having ten organizations participate in a federation means having ten " +"clients connected to the federated learning server." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:148 +msgid "" +"We use the Flower Datasets library (``flwr-datasets``) to partition " +"CIFAR-10 into ten partitions using ``FederatedDataset``. We will create a" +" small training and test set for each of the ten organizations and wrap " +"each of these into a PyTorch ``DataLoader``:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:196 +msgid "" +"We now have a function that can return a training set and validation set " +"(``trainloader`` and ``valloader``) representing one dataset from one of " +"ten different organizations. Each ``trainloader``/``valloader`` pair " +"contains 4000 training examples and 1000 validation examples. There's " +"also a single ``testloader`` (we did not split the test set). Again, this" +" is only necessary for building research or educational systems, actual " +"federated learning systems have their data naturally distributed across " +"multiple partitions." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:199 +msgid "" +"Let's take a look at the first batch of images and labels in the first " +"training set (i.e., ``trainloader`` from ``partition_id=0``) before we " +"move on:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:241 +msgid "" +"The output above shows a random batch of images from the ``trainloader`` " +"from the first of ten partitions. It also prints the labels associated " +"with each image (i.e., one of the ten possible labels we've seen above). " +"If you run the cell again, you should see another batch of images." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:253 +msgid "Step 1: Centralized Training with PyTorch" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:264 +msgid "" +"Next, we're going to use PyTorch to define a simple convolutional neural " +"network. This introduction assumes basic familiarity with PyTorch, so it " +"doesn't cover the PyTorch-related aspects in full detail. If you want to " +"dive deeper into PyTorch, we recommend `DEEP LEARNING WITH PYTORCH: A 60 " +"MINUTE BLITZ " +"`__." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:276 +msgid "Define the model" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:278 +msgid "" +"We use the simple CNN described in the `PyTorch tutorial " +"`__:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:315 +msgid "Let's continue with the usual training and test functions:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:375 +msgid "Train the model" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:377 +msgid "" +"We now have all the basic building blocks we need: a dataset, a model, a " +"training function, and a test function. Let's put them together to train " +"the model on the dataset of one of our organizations " +"(``partition_id=0``). This simulates the reality of most machine learning" +" projects today: each organization has their own data and trains models " +"only on this internal data:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:406 +msgid "" +"Training the simple CNN on our CIFAR-10 split for 5 epochs should result " +"in a test set accuracy of about 41%, which is not good, but at the same " +"time, it doesn't really matter for the purposes of this tutorial. The " +"intent was just to show a simple centralized training pipeline that sets " +"the stage for what comes next - federated learning!" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:418 +msgid "Step 2: Federated Learning with Flower" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:420 +msgid "" +"Step 1 demonstrated a simple centralized training pipeline. All data was " +"in one place (i.e., a single ``trainloader`` and a single ``valloader``)." +" Next, we'll simulate a situation where we have multiple datasets in " +"multiple organizations and where we train a model over these " +"organizations using federated learning." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:432 +msgid "Update model parameters" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:434 +msgid "" +"In federated learning, the server sends global model parameters to the " +"client, and the client updates the local model with parameters received " +"from the server. It then trains the model on the local data (which " +"changes the model parameters locally) and sends the updated/changed model" +" parameters back to the server (or, alternatively, it sends just the " +"gradients back to the server, not the full model parameters)." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:436 +msgid "" +"We need two helper functions to update the local model with parameters " +"received from the server and to get the updated model parameters from the" +" local model: ``set_parameters`` and ``get_parameters``. The following " +"two functions do just that for the PyTorch model above." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:438 +msgid "" +"The details of how this works are not really important here (feel free to" +" consult the PyTorch documentation if you want to learn more). In " +"essence, we use ``state_dict`` to access PyTorch model parameter tensors." +" The parameter tensors are then converted to/from a list of NumPy " +"ndarray's (which the Flower ``NumPyClient`` knows how to " +"serialize/deserialize):" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:466 +msgid "Define the Flower ClientApp" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:468 +msgid "" +"With that out of the way, let's move on to the interesting part. " +"Federated learning systems consist of a server and multiple clients. In " +"Flower, we create a ``ServerApp`` and a ``ClientApp`` to run the server-" +"side and client-side code, respectively." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:470 +msgid "" +"The first step toward creating a ``ClientApp`` is to implement a " +"subclasses of ``flwr.client.Client`` or ``flwr.client.NumPyClient``. We " +"use ``NumPyClient`` in this tutorial because it is easier to implement " +"and requires us to write less boilerplate. To implement ``NumPyClient``, " +"we create a subclass that implements the three methods " +"``get_parameters``, ``fit``, and ``evaluate``:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:472 +msgid "``get_parameters``: Return the current local model parameters" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:473 +msgid "" +"``fit``: Receive model parameters from the server, train the model on the" +" local data, and return the updated model parameters to the server" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:474 +msgid "" +"``evaluate``: Receive model parameters from the server, evaluate the " +"model on the local data, and return the evaluation result to the server" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:476 +msgid "" +"We mentioned that our clients will use the previously defined PyTorch " +"components for model training and evaluation. Let's see a simple Flower " +"client implementation that brings everything together:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:513 +msgid "" +"Our class ``FlowerClient`` defines how local training/evaluation will be " +"performed and allows Flower to call the local training/evaluation through" +" ``fit`` and ``evaluate``. Each instance of ``FlowerClient`` represents a" +" *single client* in our federated learning system. Federated learning " +"systems have multiple clients (otherwise, there's not much to federate), " +"so each client will be represented by its own instance of " +"``FlowerClient``. If we have, for example, three clients in our workload," +" then we'd have three instances of ``FlowerClient`` (one on each of the " +"machines we'd start the client on). Flower calls ``FlowerClient.fit`` on " +"the respective instance when the server selects a particular client for " +"training (and ``FlowerClient.evaluate`` for evaluation)." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:516 +msgid "" +"In this notebook, we want to simulate a federated learning system with 10" +" clients *on a single machine*. This means that the server and all 10 " +"clients will live on a single machine and share resources such as CPU, " +"GPU, and memory. Having 10 clients would mean having 10 instances of " +"``FlowerClient`` in memory. Doing this on a single machine can quickly " +"exhaust the available memory resources, even if only a subset of these " +"clients participates in a single round of federated learning." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:518 +msgid "" +"In addition to the regular capabilities where server and clients run on " +"multiple machines, Flower, therefore, provides special simulation " +"capabilities that create ``FlowerClient`` instances only when they are " +"actually necessary for training or evaluation. To enable the Flower " +"framework to create clients when necessary, we need to implement a " +"function that creates a ``FlowerClient`` instance on demand. We typically" +" call this function ``client_fn``. Flower calls ``client_fn`` whenever it" +" needs an instance of one particular client to call ``fit`` or " +"``evaluate`` (those instances are usually discarded after use, so they " +"should not keep any local state). In federated learning experiments using" +" Flower, clients are identified by a partition ID, or ``partition-id``. " +"This ``partition-id`` is used to load different local data partitions for" +" different clients, as can be seen below. The value of ``partition-id`` " +"is retrieved from the ``node_config`` dictionary in the ``Context`` " +"object, which holds the information that persists throughout each " +"training round." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:522 +msgid "" +"With this, we have the class ``FlowerClient`` which defines client-side " +"training/evaluation and ``client_fn`` which allows Flower to create " +"``FlowerClient`` instances whenever it needs to call ``fit`` or " +"``evaluate`` on one particular client. Last, but definitely not least, we" +" create an instance of ``ClientApp`` and pass it the ``client_fn``. " +"``ClientApp`` is the entrypoint that a running Flower client uses to call" +" your code (as defined in, for example, ``FlowerClient.fit``)." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:563 +#, fuzzy +msgid "Define the Flower ServerApp" +msgstr "Clone o repositório do flower." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:565 +msgid "" +"On the server side, we need to configure a strategy which encapsulates " +"the federated learning approach/algorithm, for example, *Federated " +"Averaging* (FedAvg). Flower has a number of built-in strategies, but we " +"can also use our own strategy implementations to customize nearly all " +"aspects of the federated learning approach. For this example, we use the " +"built-in ``FedAvg`` implementation and customize it using a few basic " +"parameters:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:592 +msgid "" +"Similar to ``ClientApp``, we create a ``ServerApp`` using a utility " +"function ``server_fn``. In ``server_fn``, we pass an instance of " +"``ServerConfig`` for defining the number of federated learning rounds " +"(``num_rounds``) and we also pass the previously created ``strategy``. " +"The ``server_fn`` returns a ``ServerAppComponents`` object containing the" +" settings that define the ``ServerApp`` behaviour. ``ServerApp`` is the " +"entrypoint that Flower uses to call all your server-side code (for " +"example, the strategy)." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:629 +msgid "Run the training" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:631 +msgid "" +"In simulation, we often want to control the amount of resources each " +"client can use. In the next cell, we specify a ``backend_config`` " +"dictionary with the ``client_resources`` key (required) for defining the " +"amount of CPU and GPU resources each client can access." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:659 +msgid "" +"The last step is the actual call to ``run_simulation`` which - you " +"guessed it - runs the simulation. ``run_simulation`` accepts a number of " +"arguments: - ``server_app`` and ``client_app``: the previously created " +"``ServerApp`` and ``ClientApp`` objects, respectively - " +"``num_supernodes``: the number of ``SuperNodes`` to simulate which equals" +" the number of clients for Flower simulation - ``backend_config``: the " +"resource allocation used in this simulation" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:686 +msgid "Behind the scenes" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:688 +msgid "So how does this work? How does Flower execute this simulation?" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:690 +#, python-format +msgid "" +"When we call ``run_simulation``, we tell Flower that there are 10 clients" +" (``num_supernodes=10``, where 1 ``SuperNode`` launches 1 ``ClientApp``)." +" Flower then goes ahead an asks the ``ServerApp`` to issue an " +"instructions to those nodes using the ``FedAvg`` strategy. ``FedAvg`` " +"knows that it should select 100% of the available clients " +"(``fraction_fit=1.0``), so it goes ahead and selects 10 random clients " +"(i.e., 100% of 10)." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:692 +msgid "" +"Flower then asks the selected 10 clients to train the model. Each of the " +"10 ``ClientApp`` instances receives a message, which causes it to call " +"``client_fn`` to create an instance of ``FlowerClient``. It then calls " +"``.fit()`` on each the ``FlowerClient`` instances and returns the " +"resulting model parameter updates to the ``ServerApp``. When the " +"``ServerApp`` receives the model parameter updates from the clients, it " +"hands those updates over to the strategy (*FedAvg*) for aggregation. The " +"strategy aggregates those updates and returns the new global model, which" +" then gets used in the next round of federated learning." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:705 +msgid "Where's the accuracy?" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:707 +msgid "" +"You may have noticed that all metrics except for ``losses_distributed`` " +"are empty. Where did the ``{\"accuracy\": float(accuracy)}`` go?" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:709 +msgid "" +"Flower can automatically aggregate losses returned by individual clients," +" but it cannot do the same for metrics in the generic metrics dictionary " +"(the one with the ``accuracy`` key). Metrics dictionaries can contain " +"very different kinds of metrics and even key/value pairs that are not " +"metrics at all, so the framework does not (and can not) know how to " +"handle these automatically." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:711 +msgid "" +"As users, we need to tell the framework how to handle/aggregate these " +"custom metrics, and we do so by passing metric aggregation functions to " +"the strategy. The strategy will then call these functions whenever it " +"receives fit or evaluate metrics from clients. The two possible functions" +" are ``fit_metrics_aggregation_fn`` and " +"``evaluate_metrics_aggregation_fn``." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:713 +msgid "" +"Let's create a simple weighted averaging function to aggregate the " +"``accuracy`` metric we return from ``evaluate``:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:781 +msgid "" +"We now have a full system that performs federated training and federated " +"evaluation. It uses the ``weighted_average`` function to aggregate custom" +" evaluation metrics and calculates a single ``accuracy`` metric across " +"all clients on the server side." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:783 +msgid "" +"The other two categories of metrics (``losses_centralized`` and " +"``metrics_centralized``) are still empty because they only apply when " +"centralized evaluation is being used. Part two of the Flower tutorial " +"will cover centralized evaluation." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:795 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:351 +msgid "Final remarks" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:797 +msgid "" +"Congratulations, you just trained a convolutional neural network, " +"federated over 10 clients! With that, you understand the basics of " +"federated learning with Flower. The same approach you've seen can be used" +" with other machine learning frameworks (not just PyTorch) and tasks (not" +" just CIFAR-10 images classification), for example NLP with Hugging Face " +"Transformers or speech with SpeechBrain." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:799 +msgid "" +"In the next notebook, we're going to cover some more advanced concepts. " +"Want to customize your strategy? Initialize parameters on the server " +"side? Or evaluate the aggregated model on the server side? We'll cover " +"all this and more in the next tutorial." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:817 +msgid "" +"The `Flower Federated Learning Tutorial - Part 2 " +"`__ goes into more depth about strategies and all " +"the advanced things you can build with them." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:9 +msgid "Use a federated learning strategy" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:11 +msgid "" +"Welcome to the next part of the federated learning tutorial. In previous " +"parts of this tutorial, we introduced federated learning with PyTorch and" +" Flower (`part 1 `__)." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:13 +msgid "" +"In this notebook, we'll begin to customize the federated learning system " +"we built in the introductory notebook again, using the Flower framework, " +"Flower Datasets, and PyTorch." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:18 +msgid "Let's move beyond FedAvg with Flower strategies! 🌼" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:121 +msgid "" +"Let's now load the CIFAR-10 training and test set, partition them into " +"ten smaller datasets (each split into training and validation set), and " +"wrap everything in their own ``DataLoader``. We introduce a new parameter" +" ``num_partitions`` which allows us to call ``load_datasets`` with " +"different numbers of partitions." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:321 +msgid "Strategy customization" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:323 +msgid "" +"So far, everything should look familiar if you've worked through the " +"introductory notebook. With that, we're ready to introduce a number of " +"new features." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:335 +msgid "Server-side parameter **initialization**" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:337 +msgid "" +"Flower, by default, initializes the global model by asking one random " +"client for the initial parameters. In many cases, we want more control " +"over parameter initialization though. Flower therefore allows you to " +"directly pass the initial parameters to the Strategy. We create an " +"instance of ``Net()`` and get the paramaters as follows:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:358 +msgid "" +"Next, we create a ``server_fn`` that returns the components needed for " +"the server. Within ``server_fn``, we create a Strategy that uses the " +"initial parameters." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:393 +msgid "" +"Passing ``initial_parameters`` to the ``FedAvg`` strategy prevents Flower" +" from asking one of the clients for the initial parameters. In " +"``server_fn``, we pass this new ``strategy`` and a ``ServerConfig`` for " +"defining the number of federated learning rounds (``num_rounds``)." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:395 +msgid "" +"Similar to the ``ClientApp``, we now create the ``ServerApp`` using the " +"``server_fn``:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:416 +msgid "" +"Last but not least, we specify the resources for each client and run the " +"simulation." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:448 +msgid "" +"If we look closely, we can see that the logs do not show any calls to the" +" ``FlowerClient.get_parameters`` method." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:460 +msgid "Starting with a customized strategy" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:462 +msgid "" +"We've seen the function ``run_simulation`` before. It accepts a number of" +" arguments, amongst them the ``server_app`` which wraps around the " +"strategy and number of training rounds, ``client_app`` which wraps around" +" the ``client_fn`` used to create ``FlowerClient`` instances, and the " +"number of clients to simulate which equals ``num_supernodes``." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:464 +msgid "" +"The strategy encapsulates the federated learning approach/algorithm, for " +"example, ``FedAvg`` or ``FedAdagrad``. Let's try to use a different " +"strategy this time:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:509 +msgid "Server-side parameter **evaluation**" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:511 +msgid "" +"Flower can evaluate the aggregated model on the server-side or on the " +"client-side. Client-side and server-side evaluation are similar in some " +"ways, but different in others." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:513 +msgid "" +"**Centralized Evaluation** (or *server-side evaluation*) is conceptually " +"simple: it works the same way that evaluation in centralized machine " +"learning does. If there is a server-side dataset that can be used for " +"evaluation purposes, then that's great. We can evaluate the newly " +"aggregated model after each round of training without having to send the " +"model to clients. We're also fortunate in the sense that our entire " +"evaluation dataset is available at all times." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:515 +msgid "" +"**Federated Evaluation** (or *client-side evaluation*) is more complex, " +"but also more powerful: it doesn't require a centralized dataset and " +"allows us to evaluate models over a larger set of data, which often " +"yields more realistic evaluation results. In fact, many scenarios require" +" us to use **Federated Evaluation** if we want to get representative " +"evaluation results at all. But this power comes at a cost: once we start " +"to evaluate on the client side, we should be aware that our evaluation " +"dataset can change over consecutive rounds of learning if those clients " +"are not always available. Moreover, the dataset held by each client can " +"also change over consecutive rounds. This can lead to evaluation results " +"that are not stable, so even if we would not change the model, we'd see " +"our evaluation results fluctuate over consecutive rounds." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:518 +msgid "" +"We've seen how federated evaluation works on the client side (i.e., by " +"implementing the ``evaluate`` method in ``FlowerClient``). Now let's see " +"how we can evaluate aggregated model parameters on the server-side:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:549 +msgid "" +"We create a ``FedAvg`` strategy and pass ``evaluate_fn`` to it. Then, we " +"create a ``ServerApp`` that uses this strategy." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:586 +msgid "Finally, we run the simulation." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:613 +msgid "Sending/receiving arbitrary values to/from clients" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:615 +msgid "" +"In some situations, we want to configure client-side execution (training," +" evaluation) from the server-side. One example for that is the server " +"asking the clients to train for a certain number of local epochs. Flower " +"provides a way to send configuration values from the server to the " +"clients using a dictionary. Let's look at an example where the clients " +"receive values from the server through the ``config`` parameter in " +"``fit`` (``config`` is also available in ``evaluate``). The ``fit`` " +"method receives the configuration dictionary through the ``config`` " +"parameter and can then read values from this dictionary. In this example," +" it reads ``server_round`` and ``local_epochs`` and uses those values to " +"improve the logging and configure the number of local training epochs:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:674 +msgid "" +"So how can we send this config dictionary from server to clients? The " +"built-in Flower Strategies provide way to do this, and it works similarly" +" to the way server-side evaluation works. We provide a function to the " +"strategy, and the strategy calls this function for every round of " +"federated learning:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:704 +msgid "" +"Next, we'll pass this function to the FedAvg strategy before starting the" +" simulation:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:749 +msgid "" +"As we can see, the client logs now include the current round of federated" +" learning (which they read from the ``config`` dictionary). We can also " +"configure local training to run for one epoch during the first and second" +" round of federated learning, and then for two epochs during the third " +"round." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:751 +msgid "" +"Clients can also return arbitrary values to the server. To do so, they " +"return a dictionary from ``fit`` and/or ``evaluate``. We have seen and " +"used this concept throughout this notebook without mentioning it " +"explicitly: our ``FlowerClient`` returns a dictionary containing a custom" +" key/value pair as the third return value in ``evaluate``." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:763 +msgid "Scaling federated learning" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:765 +msgid "" +"As a last step in this notebook, let's see how we can use Flower to " +"experiment with a large number of clients." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:785 +msgid "" +"Note that we can reuse the ``ClientApp`` for different ``num-partitions``" +" since the Context is defined by the ``num_supernodes`` argument in " +"``run_simulation()``." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:787 +#, python-format +msgid "" +"We now have 1000 partitions, each holding 45 training and 5 validation " +"examples. Given that the number of training examples on each client is " +"quite small, we should probably train the model a bit longer, so we " +"configure the clients to perform 3 local training epochs. We should also " +"adjust the fraction of clients selected for training during each round " +"(we don't want all 1000 clients participating in every round), so we " +"adjust ``fraction_fit`` to ``0.025``, which means that only 2.5% of " +"available clients (so 25 clients) will be selected for training each " +"round:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:843 +msgid "" +"In this notebook, we've seen how we can gradually enhance our system by " +"customizing the strategy, initializing parameters on the server side, " +"choosing a different strategy, and evaluating models on the server-side. " +"That's quite a bit of flexibility with so little code, right?" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:845 +msgid "" +"In the later sections, we've seen how we can communicate arbitrary values" +" between server and clients to fully customize client-side execution. " +"With that capability, we built a large-scale Federated Learning " +"simulation using the Flower Virtual Client Engine and ran an experiment " +"involving 1000 clients in the same workload - all in a Jupyter Notebook!" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:863 +msgid "" +"The `Flower Federated Learning Tutorial - Part 3 " +"`__ shows how to build a fully custom ``Strategy`` from " +"scratch." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:9 +msgid "What is Federated Learning?" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:13 +msgid "" +"In this tutorial, you will learn what federated learning is, build your " +"first system in Flower, and gradually extend it. If you work through all " +"parts of the tutorial, you will be able to build advanced federated " +"learning systems that approach the current state of the art in the field." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:15 +msgid "" +"🧑‍🏫 This tutorial starts at zero and expects no familiarity with " +"federated learning. Only a basic understanding of data science and Python" +" programming is assumed." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:17 +msgid "" +"`Star Flower on GitHub `__ ⭐️ and join " +"the open-source Flower community on Slack to connect, ask questions, and " +"get help: `Join Slack `__ 🌼 We'd love to " +"hear from you in the ``#introductions`` channel! And if anything is " +"unclear, head over to the ``#questions`` channel." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:19 +msgid "Let's get started!" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:31 +msgid "Classic machine learning" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:33 +msgid "" +"Before we begin to discuss federated learning, let us quickly recap how " +"most machine learning works today." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:35 +msgid "" +"In machine learning, we have a model, and we have data. The model could " +"be a neural network (as depicted here), or something else, like classical" +" linear regression." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 +msgid "|ac0a9766e26044d6aea222a829859b20|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 +msgid "Model and data" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:47 +msgid "" +"We train the model using the data to perform a useful task. A task could " +"be to detect objects in images, transcribe an audio recording, or play a " +"game like Go." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 +msgid "|36cd6e248b1443ce8a82b5a025bba368|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 +msgid "Train model using data" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:59 +msgid "" +"Now, in practice, the training data we work with doesn't originate on the" +" machine we train the model on. It gets created somewhere else." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:61 +msgid "" +"It originates on a smartphone by the user interacting with an app, a car " +"collecting sensor data, a laptop receiving input via the keyboard, or a " +"smart speaker listening to someone trying to sing a song." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 +msgid "|bf4fb057f4774df39e1dcb5c71fd804a|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 +msgid "Data on a phone" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:73 +msgid "" +"What's also important to mention, this \"somewhere else\" is usually not " +"just one place, it's many places. It could be several devices all running" +" the same app. But it could also be several organizations, all generating" +" data for the same task." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 +msgid "|71bb9f3c74c04f959b9bc1f02b736c95|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 +msgid "Data is on many devices" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:85 +msgid "" +"So to use machine learning, or any kind of data analysis, the approach " +"that has been used in the past was to collect all data on a central " +"server. This server can be somewhere in a data center, or somewhere in " +"the cloud." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 +msgid "|7605632e1b0f49599ffacf841491fcfb|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 +msgid "Central data collection" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:97 +msgid "" +"Once all the data is collected in one place, we can finally use machine " +"learning algorithms to train our model on the data. This is the machine " +"learning approach that we've basically always relied on." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 +msgid "|91b1b5a7d3484eb7a2350c1923f18307|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 +msgid "Central model training" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:130 +msgid "Challenges of classical machine learning" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:132 +msgid "" +"The classic machine learning approach we've just seen can be used in some" +" cases. Great examples include categorizing holiday photos, or analyzing " +"web traffic. Cases, where all the data is naturally available on a " +"centralized server." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 +msgid "|5405ed430e4746e28b083b146fb71731|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 +msgid "Centralized possible" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:144 +msgid "" +"But the approach can not be used in many other cases. Cases, where the " +"data is not available on a centralized server, or cases where the data " +"available on one server is not enough to train a good model." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 +msgid "|a389e87dab394eb48a8949aa2397687b|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 +msgid "Centralized impossible" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:156 +msgid "" +"There are many reasons why the classic centralized machine learning " +"approach does not work for a large number of highly important real-world " +"use cases. Those reasons include:" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:158 +msgid "" +"**Regulations**: GDPR (Europe), CCPA (California), PIPEDA (Canada), LGPD " +"(Brazil), PDPL (Argentina), KVKK (Turkey), POPI (South Africa), FSS " +"(Russia), CDPR (China), PDPB (India), PIPA (Korea), APPI (Japan), PDP " +"(Indonesia), PDPA (Singapore), APP (Australia), and other regulations " +"protect sensitive data from being moved. In fact, those regulations " +"sometimes even prevent single organizations from combining their own " +"users' data for artificial intelligence training because those users live" +" in different parts of the world, and their data is governed by different" +" data protection regulations." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:160 +msgid "" +"**User preference**: In addition to regulation, there are use cases where" +" users just expect that no data leaves their device, ever. If you type " +"your passwords and credit card info into the digital keyboard of your " +"phone, you don't expect those passwords to end up on the server of the " +"company that developed that keyboard, do you? In fact, that use case was " +"the reason federated learning was invented in the first place." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:161 +msgid "" +"**Data volume**: Some sensors, like cameras, produce such a high data " +"volume that it is neither feasible nor economic to collect all the data " +"(due to, for example, bandwidth or communication efficiency). Think about" +" a national rail service with hundreds of train stations across the " +"country. If each of these train stations is outfitted with a number of " +"security cameras, the volume of raw on-device data they produce requires " +"incredibly powerful and exceedingly expensive infrastructure to process " +"and store. And most of the data isn't even useful." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:164 +msgid "Examples where centralized machine learning does not work include:" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:166 +msgid "" +"Sensitive healthcare records from multiple hospitals to train cancer " +"detection models" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:167 +msgid "" +"Financial information from different organizations to detect financial " +"fraud" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:168 +msgid "Location data from your electric car to make better range prediction" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:169 +msgid "End-to-end encrypted messages to train better auto-complete models" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:171 +msgid "" +"The popularity of privacy-enhancing systems like the `Brave " +"`__ browser or the `Signal `__ " +"messenger shows that users care about privacy. In fact, they choose the " +"privacy-enhancing version over other alternatives, if such an alternative" +" exists. But what can we do to apply machine learning and data science to" +" these cases to utilize private data? After all, these are all areas that" +" would benefit significantly from recent advances in AI." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:186 +msgid "Federated learning" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:188 +msgid "" +"Federated learning simply reverses this approach. It enables machine " +"learning on distributed data by moving the training to the data, instead " +"of moving the data to the training. Here's the single-sentence " +"explanation:" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:190 +msgid "Central machine learning: move the data to the computation" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:191 +msgid "Federated (machine) learning: move the computation to the data" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:193 +msgid "" +"By doing so, it enables us to use machine learning (and other data " +"science approaches) in areas where it wasn't possible before. We can now " +"train excellent medical AI models by enabling different hospitals to work" +" together. We can solve financial fraud by training AI models on the data" +" of different financial institutions. We can build novel privacy-" +"enhancing applications (such as secure messaging) that have better built-" +"in AI than their non-privacy-enhancing alternatives. And those are just a" +" few of the examples that come to mind. As we deploy federated learning, " +"we discover more and more areas that can suddenly be reinvented because " +"they now have access to vast amounts of previously inaccessible data." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:196 +msgid "" +"So how does federated learning work, exactly? Let's start with an " +"intuitive explanation." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:199 +msgid "Federated learning in five steps" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:202 +msgid "Step 0: Initialize global model" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:204 +msgid "" +"We start by initializing the model on the server. This is exactly the " +"same in classic centralized learning: we initialize the model parameters," +" either randomly or from a previously saved checkpoint." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 +msgid "|89c412136a5146ec8dc32c0973729f12|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 +msgid "Initialize global model" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:217 +msgid "" +"Step 1: Send model to a number of connected organizations/devices (client" +" nodes)" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:219 +msgid "" +"Next, we send the parameters of the global model to the connected client " +"nodes (think: edge devices like smartphones or servers belonging to " +"organizations). This is to ensure that each participating node starts " +"their local training using the same model parameters. We often use only a" +" few of the connected nodes instead of all nodes. The reason for this is " +"that selecting more and more client nodes has diminishing returns." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 +msgid "|9503d3dc3a144e8aa295f8800cd8a766|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 +msgid "Send global model" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:232 +msgid "" +"Step 2: Train model locally on the data of each organization/device " +"(client node)" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:234 +msgid "" +"Now that all (selected) client nodes have the latest version of the " +"global model parameters, they start the local training. They use their " +"own local dataset to train their own local model. They don't train the " +"model until full convergence, but they only train for a little while. " +"This could be as little as one epoch on the local data, or even just a " +"few steps (mini-batches)." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 +msgid "|aadb59e29b9e445d8e239d9a8a7045cb|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 +msgid "Train on local data" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:247 +msgid "Step 3: Return model updates back to the server" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:249 +msgid "" +"After local training, each client node has a slightly different version " +"of the model parameters they originally received. The parameters are all " +"different because each client node has different examples in its local " +"dataset. The client nodes then send those model updates back to the " +"server. The model updates they send can either be the full model " +"parameters or just the gradients that were accumulated during local " +"training." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 +msgid "|a7579ad7734347508e959d9e14f2f53d|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 +msgid "Send model updates" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:262 +msgid "Step 4: Aggregate model updates into a new global model" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:264 +msgid "" +"The server receives model updates from the selected client nodes. If it " +"selected 100 client nodes, it now has 100 slightly different versions of " +"the original global model, each trained on the local data of one client. " +"But didn't we want to have one model that contains the learnings from the" +" data of all 100 client nodes?" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:266 +msgid "" +"In order to get one single model, we have to combine all the model " +"updates we received from the client nodes. This process is called " +"*aggregation*, and there are many different ways to do it. The most basic" +" way to do it is called *Federated Averaging* (`McMahan et al., 2016 " +"`__), often abbreviated as *FedAvg*. " +"*FedAvg* takes the 100 model updates and, as the name suggests, averages " +"them. To be more precise, it takes the *weighted average* of the model " +"updates, weighted by the number of examples each client used for " +"training. The weighting is important to make sure that each data example " +"has the same \"influence\" on the resulting global model. If one client " +"has 10 examples, and another client has 100 examples, then - without " +"weighting - each of the 10 examples would influence the global model ten " +"times as much as each of the 100 examples." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 +msgid "|73d15dd1d4fc41678b2d54815503fbe8|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 +msgid "Aggregate model updates" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:280 +msgid "Step 5: Repeat steps 1 to 4 until the model converges" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:282 +msgid "" +"Steps 1 to 4 are what we call a single round of federated learning. The " +"global model parameters get sent to the participating client nodes (step " +"1), the client nodes train on their local data (step 2), they send their " +"updated models to the server (step 3), and the server then aggregates the" +" model updates to get a new version of the global model (step 4)." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:284 +msgid "" +"During a single round, each client node that participates in that " +"iteration only trains for a little while. This means that after the " +"aggregation step (step 4), we have a model that has been trained on all " +"the data of all participating client nodes, but only for a little while. " +"We then have to repeat this training process over and over again to " +"eventually arrive at a fully trained model that performs well across the " +"data of all client nodes." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:289 +msgid "" +"Congratulations, you now understand the basics of federated learning. " +"There's a lot more to discuss, of course, but that was federated learning" +" in a nutshell. In later parts of this tutorial, we will go into more " +"detail. Interesting questions include: How can we select the best client " +"nodes that should participate in the next round? What's the best way to " +"aggregate model updates? How can we handle failing client nodes " +"(stragglers)?" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:294 +msgid "" +"Just like we can train a model on the decentralized data of different " +"client nodes, we can also evaluate the model on that data to receive " +"valuable metrics. This is called federated evaluation, sometimes " +"abbreviated as FE. In fact, federated evaluation is an integral part of " +"most federated learning systems." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:297 +msgid "Federated analytics" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:299 +msgid "" +"In many cases, machine learning isn't necessary to derive value from " +"data. Data analysis can yield valuable insights, but again, there's often" +" not enough data to get a clear answer. What's the average age at which " +"people develop a certain type of health condition? Federated analytics " +"enables such queries over multiple client nodes. It is usually used in " +"conjunction with other privacy-enhancing technologies like secure " +"aggregation to prevent the server from seeing the results submitted by " +"individual client nodes." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:305 +msgid "" +"Differential privacy (DP) is often mentioned in the context of Federated " +"Learning. It is a privacy-preserving method used when analyzing and " +"sharing statistical data, ensuring the privacy of individual " +"participants. DP achieves this by adding statistical noise to the model " +"updates, ensuring any individual participants’ information cannot be " +"distinguished or re-identified. This technique can be considered an " +"optimization that provides a quantifiable privacy protection measure." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:326 +msgid "Flower" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:328 +msgid "" +"Federated learning, federated evaluation, and federated analytics require" +" infrastructure to move machine learning models back and forth, train and" +" evaluate them on local data, and then aggregate the updated models. " +"Flower provides the infrastructure to do exactly that in an easy, " +"scalable, and secure way. In short, Flower presents a unified approach to" +" federated learning, analytics, and evaluation. It allows the user to " +"federate any workload, any ML framework, and any programming language." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 +msgid "|55472eef61274ba1b739408607e109df|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 +msgid "" +"Flower federated learning server and client nodes (car, scooter, personal" +" computer, roomba, and phone)" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:353 +msgid "" +"Congratulations, you just learned the basics of federated learning and " +"how it relates to the classic (centralized) machine learning!" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:355 +msgid "" +"In the next part of this tutorial, we are going to build a first " +"federated learning system with Flower." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:369 +msgid "" +"Before you continue, make sure to join the Flower community on Slack: " +"`Join Slack `__" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:373 +msgid "" +"The `Flower Federated Learning Tutorial - Part 1 " +"`__ shows how to build a simple federated learning system " +"with PyTorch and Flower." +msgstr "" + +#~ msgid "" +#~ "Configuring and setting up the " +#~ ":code:`Dockerfile` as well the configuration" +#~ " for the devcontainer can be a " +#~ "bit more involved. The good thing " +#~ "is you want have to do it. " +#~ "Usually it should be enough to " +#~ "install Docker on your system and " +#~ "ensure its available on your command " +#~ "line. Additionally, install the `VSCode " +#~ "Containers Extension `_." +#~ msgstr "" + +#~ msgid "" +#~ "``flwr = { path = " +#~ "\"../../dist/flwr-1.0.0-py3-none-any.whl\" }`` " +#~ "(without extras)" +#~ msgstr "" + +#~ msgid "" +#~ "``flwr = { path = " +#~ "\"../../dist/flwr-1.0.0-py3-none-any.whl\", extras =" +#~ " [\"simulation\"] }`` (with extras)" +#~ msgstr "" + +#~ msgid "Upload the whl (e.g., ``flwr-1.7.0-py3-none-any.whl``)" +#~ msgstr "" + +#~ msgid "" +#~ "Change ``!pip install -q 'flwr[simulation]'" +#~ " torch torchvision matplotlib`` to ``!pip" +#~ " install -q 'flwr-1.7.0-py3-none-" +#~ "any.whl[simulation]' torch torchvision matplotlib``" +#~ msgstr "" + +#~ msgid "Before the release" +#~ msgstr "" + +#~ msgid "" +#~ "Update the changelog (``changelog.md``) with" +#~ " all relevant changes that happened " +#~ "after the last release. If the " +#~ "last release was tagged ``v1.2.0``, you" +#~ " can use the following URL to " +#~ "see all commits that got merged " +#~ "into ``main`` since then:" +#~ msgstr "" + +#~ msgid "" +#~ "`GitHub: Compare v1.2.0...main " +#~ "`_" +#~ msgstr "" + +#~ msgid "" +#~ "Thank the authors who contributed since" +#~ " the last release. This can be " +#~ "done by running the ``./dev/add-" +#~ "shortlog.sh`` convenience script (it can " +#~ "be ran multiple times and will " +#~ "update the names in the list if" +#~ " new contributors were added in the" +#~ " meantime)." +#~ msgstr "" + +#~ msgid "" +#~ "Update the ``changelog.md`` section header " +#~ "``Unreleased`` to contain the version " +#~ "number and date for the release " +#~ "you are building. Create a pull " +#~ "request with the change." +#~ msgstr "" + +#~ msgid "" +#~ "Tag the release commit with the " +#~ "version number as soon as the PR" +#~ " is merged: ``git tag v0.12.3``, then" +#~ " ``git push --tags``. This will " +#~ "create a draft release on GitHub " +#~ "containing the correct artifacts and the" +#~ " relevant part of the changelog." +#~ msgstr "" + +#~ msgid "" +#~ "Note that, in order to build the" +#~ " documentation locally (with ``poetry run" +#~ " make html``, like described below), " +#~ "`Pandoc _` needs " +#~ "to be installed on the system." +#~ msgstr "" + +#~ msgid "" +#~ "If you're familiar with how contributing" +#~ " on GitHub works, you can directly" +#~ " checkout our `getting started guide " +#~ "for contributors `_ and examples " +#~ "of `good first contributions " +#~ "`_." +#~ msgstr "" + +#~ msgid "" +#~ "This will create a `flower/` (or " +#~ "the name of your fork if you " +#~ "renamed it) folder in the current " +#~ "working directory." +#~ msgstr "" + +#~ msgid "Otherwise you can always find this option in the `Branches` page." +#~ msgstr "" + +#~ msgid "" +#~ "Once you click the `Compare & pull" +#~ " request` button, you should see " +#~ "something similar to this:" +#~ msgstr "" + +#~ msgid "Find the source file in `doc/source`" +#~ msgstr "" + +#~ msgid "" +#~ "Make the change in the `.rst` file" +#~ " (beware, the dashes under the title" +#~ " should be the same length as " +#~ "the title itself)" +#~ msgstr "" + +#~ msgid "Change the file name to `save-progress.rst`" +#~ msgstr "" + +#~ msgid "Add a redirect rule to `doc/source/conf.py`" +#~ msgstr "" + +#~ msgid "" +#~ "This will cause a redirect from " +#~ "`saving-progress.html` to `save-progress.html`," +#~ " old links will continue to work." +#~ msgstr "" + +#~ msgid "" +#~ "For the lateral navigation bar to " +#~ "work properly, it is very important " +#~ "to update the `index.rst` file as " +#~ "well. This is where we define the" +#~ " whole arborescence of the navbar." +#~ msgstr "" + +#~ msgid "Find and modify the file name in `index.rst`" +#~ msgstr "" + +#~ msgid "Add CI job to deploy the staging system when the `main` branch changes" +#~ msgstr "" + +#~ msgid "`Python 3.7 `_ or above" +#~ msgstr "" + +#~ msgid "" +#~ "First, clone the `Flower repository " +#~ "`_ from GitHub::" +#~ msgstr "" + +#~ msgid "" +#~ "Second, create a virtual environment " +#~ "(and activate it). If you chose to" +#~ " use :code:`pyenv` (with the :code" +#~ ":`pyenv-virtualenv` plugin) and already " +#~ "have it installed , you can use" +#~ " the following convenience script (by " +#~ "default it will use :code:`Python " +#~ "3.8.17`, but you can change it by" +#~ " providing a specific :code:``)::" +#~ msgstr "" + +#~ msgid "" +#~ "If you don't have :code:`pyenv` " +#~ "installed, you can use the following " +#~ "script that will install pyenv, set " +#~ "it up and create the virtual " +#~ "environment (with :code:`Python 3.8.17` by " +#~ "default)::" +#~ msgstr "" + +#~ msgid "" +#~ "Third, install the Flower package in " +#~ "development mode (think :code:`pip install " +#~ "-e`) along with all necessary " +#~ "dependencies::" +#~ msgstr "" + +#~ msgid "" +#~ "Developers could run the full set " +#~ "of Github Actions workflows under their" +#~ " local environment by using `Act " +#~ "_`. Please refer to" +#~ " the installation instructions under the" +#~ " linked repository and run the next" +#~ " command under Flower main cloned " +#~ "repository folder::" +#~ msgstr "" + +#~ msgid "" +#~ "Please note that these components are" +#~ " still experimental, the correct " +#~ "configuration of DP for a specific " +#~ "task is still an unsolved problem." +#~ msgstr "" + +#~ msgid "" +#~ "The distribution of the update norm " +#~ "has been shown to vary from " +#~ "task-to-task and to evolve as " +#~ "training progresses. Therefore, we use " +#~ "an adaptive approach [andrew]_ that " +#~ "continuously adjusts the clipping threshold" +#~ " to track a prespecified quantile of" +#~ " the update norm distribution." +#~ msgstr "" + +#~ msgid "" +#~ "We make (and attempt to enforce) a" +#~ " number of assumptions that must be" +#~ " satisfied to ensure that the " +#~ "training process actually realises the " +#~ ":math:`(\\epsilon, \\delta)` guarantees the " +#~ "user has in mind when configuring " +#~ "the setup." +#~ msgstr "" + +#~ msgid "" +#~ "The first two are useful for " +#~ "eliminating a multitude of complications " +#~ "associated with calibrating the noise to" +#~ " the clipping threshold while the " +#~ "third one is required to comply " +#~ "with the assumptions of the privacy " +#~ "analysis." +#~ msgstr "" + +#~ msgid "" +#~ "The first version of our solution " +#~ "was to define a decorator whose " +#~ "constructor accepted, among other things, " +#~ "a boolean valued variable indicating " +#~ "whether adaptive clipping was to be " +#~ "enabled or not. We quickly realized " +#~ "that this would clutter its " +#~ ":code:`__init__()` function with variables " +#~ "corresponding to hyperparameters of adaptive" +#~ " clipping that would remain unused " +#~ "when it was disabled. A cleaner " +#~ "implementation could be achieved by " +#~ "splitting the functionality into two " +#~ "decorators, :code:`DPFedAvgFixed` and " +#~ ":code:`DPFedAvgAdaptive`, with the latter sub-" +#~ " classing the former. The constructors " +#~ "for both classes accept a boolean " +#~ "parameter :code:`server_side_noising`, which, as " +#~ "the name suggests, determines where " +#~ "noising is to be performed." +#~ msgstr "" + +#~ msgid "" +#~ ":code:`aggregate_fit()`: We check whether any" +#~ " of the sampled clients dropped out" +#~ " or failed to upload an update " +#~ "before the round timed out. In " +#~ "that case, we need to abort the" +#~ " current round, discarding any successful" +#~ " updates that were received, and move" +#~ " on to the next one. On the " +#~ "other hand, if all clients responded " +#~ "successfully, we must force the " +#~ "averaging of the updates to happen " +#~ "in an unweighted manner by intercepting" +#~ " the :code:`parameters` field of " +#~ ":code:`FitRes` for each received update " +#~ "and setting it to 1. Furthermore, " +#~ "if :code:`server_side_noising=true`, each update " +#~ "is perturbed with an amount of " +#~ "noise equal to what it would have" +#~ " been subjected to had client-side" +#~ " noising being enabled. This entails " +#~ "*pre*-processing of the arguments to " +#~ "this method before passing them on " +#~ "to the wrappee's implementation of " +#~ ":code:`aggregate_fit()`." +#~ msgstr "" + +#~ msgid "" +#~ "McMahan, H. Brendan, et al. \"Learning" +#~ " differentially private recurrent language " +#~ "models.\" arXiv preprint arXiv:1710.06963 " +#~ "(2017)." +#~ msgstr "" + +#~ msgid "" +#~ "Andrew, Galen, et al. \"Differentially " +#~ "private learning with adaptive clipping.\" " +#~ "Advances in Neural Information Processing " +#~ "Systems 34 (2021): 17455-17466." +#~ msgstr "" + +#~ msgid "" +#~ "The following command can be used " +#~ "to verfiy if Flower was successfully " +#~ "installed. If everything worked, it " +#~ "should print the version of Flower " +#~ "to the command line::" +#~ msgstr "" + +#~ msgid "flwr (Python API reference)" +#~ msgstr "" + +#~ msgid "start_client" +#~ msgstr "" + +#~ msgid "start_numpy_client" +#~ msgstr "" + +#~ msgid "start_simulation" +#~ msgstr "" + +#~ msgid "server.start_server" +#~ msgstr "" + +#~ msgid "server.strategy" +#~ msgstr "" + +#~ msgid "server.strategy.Strategy" +#~ msgstr "" + +#~ msgid "server.strategy.FedAvg" +#~ msgstr "" + +#~ msgid "server.strategy.FedAvgM" +#~ msgstr "" + +#~ msgid "server.strategy.FedMedian" +#~ msgstr "" + +#~ msgid "server.strategy.QFedAvg" +#~ msgstr "" + +#~ msgid "server.strategy.FaultTolerantFedAvg" +#~ msgstr "" + +#~ msgid "server.strategy.FedOpt" +#~ msgstr "" + +#~ msgid "server.strategy.FedProx" +#~ msgstr "" + +#~ msgid "server.strategy.FedAdagrad" +#~ msgstr "" + +#~ msgid "server.strategy.FedAdam" +#~ msgstr "" + +#~ msgid "server.strategy.FedYogi" +#~ msgstr "" + +#~ msgid "server.strategy.FedTrimmedAvg" +#~ msgstr "" + +#~ msgid "server.strategy.Krum" +#~ msgstr "" + +#~ msgid "server.strategy.FedXgbNnAvg" +#~ msgstr "" + +#~ msgid "server.strategy.DPFedAvgAdaptive" +#~ msgstr "" + +#~ msgid "server.strategy.DPFedAvgFixed" +#~ msgstr "" + +#~ msgid "" +#~ "**Fix the incorrect return types of " +#~ "Strategy** " +#~ "([#2432](https://github.com/adap/flower/pull/2432/files))" +#~ msgstr "" + +#~ msgid "" +#~ "The types of the return values in" +#~ " the docstrings in two methods " +#~ "(`aggregate_fit` and `aggregate_evaluate`) now " +#~ "match the hint types in the code." +#~ msgstr "" + +#~ msgid "" +#~ "Using the `client_fn`, Flower clients " +#~ "can interchangeably run as standalone " +#~ "processes (i.e. via `start_client`) or " +#~ "in simulation (i.e. via `start_simulation`)" +#~ " without requiring changes to how the" +#~ " client class is defined and " +#~ "instantiated. Calling `start_numpy_client` is " +#~ "now deprecated." +#~ msgstr "" + +#~ msgid "" +#~ "**Update Flower Examples** " +#~ "([#2384](https://github.com/adap/flower/pull/2384)), " +#~ "([#2425](https://github.com/adap/flower/pull/2425))" +#~ msgstr "" + +#~ msgid "" +#~ "**General updates to baselines** " +#~ "([#2301](https://github.com/adap/flower/pull/2301), " +#~ "[#2305](https://github.com/adap/flower/pull/2305), " +#~ "[#2307](https://github.com/adap/flower/pull/2307), " +#~ "[#2327](https://github.com/adap/flower/pull/2327), " +#~ "[#2435](https://github.com/adap/flower/pull/2435))" +#~ msgstr "" + +#~ msgid "" +#~ "**General updates to the simulation " +#~ "engine** ([#2331](https://github.com/adap/flower/pull/2331), " +#~ "[#2447](https://github.com/adap/flower/pull/2447), " +#~ "[#2448](https://github.com/adap/flower/pull/2448))" +#~ msgstr "" + +#~ msgid "" +#~ "**General improvements** " +#~ "([#2309](https://github.com/adap/flower/pull/2309), " +#~ "[#2310](https://github.com/adap/flower/pull/2310), " +#~ "[2313](https://github.com/adap/flower/pull/2313), " +#~ "[#2316](https://github.com/adap/flower/pull/2316), " +#~ "[2317](https://github.com/adap/flower/pull/2317),[#2349](https://github.com/adap/flower/pull/2349)," +#~ " [#2360](https://github.com/adap/flower/pull/2360), " +#~ "[#2402](https://github.com/adap/flower/pull/2402), " +#~ "[#2446](https://github.com/adap/flower/pull/2446))" +#~ msgstr "" + +#~ msgid "" +#~ "`flower-superlink --driver-api-address " +#~ "\"0.0.0.0:8081\" --fleet-api-address " +#~ "\"0.0.0.0:8086\"`" +#~ msgstr "" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. The string " +#~ ":code:`\"0.0.0.0:8080\"` tells the client " +#~ "which server to connect to. In our" +#~ " case we can run the server and" +#~ " the client on the same machine, " +#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" +#~ " we run a truly federated workload" +#~ " with the server and clients running" +#~ " on different machines, all that " +#~ "needs to change is the " +#~ ":code:`server_address` we pass to the " +#~ "client." +#~ msgstr "" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. The string " +#~ ":code:`\"[::]:8080\"` tells the client which" +#~ " server to connect to. In our " +#~ "case we can run the server and " +#~ "the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." +#~ msgstr "" + +#~ msgid "" +#~ "Let's build a horizontal federated " +#~ "learning system using XGBoost and " +#~ "Flower!" +#~ msgstr "" + +#~ msgid "" +#~ "Please refer to the `full code " +#~ "example `_ to learn " +#~ "more." +#~ msgstr "" + +#~ msgid "" +#~ "In this notebook, we'll build a " +#~ "federated learning system using Flower " +#~ "and PyTorch. In part 1, we use " +#~ "PyTorch for the model training pipeline" +#~ " and data loading. In part 2, " +#~ "we continue to federate the PyTorch-" +#~ "based pipeline using Flower." +#~ msgstr "" + +#~ msgid "" +#~ "Next, we install the necessary packages" +#~ " for PyTorch (``torch`` and " +#~ "``torchvision``) and Flower (``flwr``):" +#~ msgstr "" + +#~ msgid "" +#~ "Federated learning can be applied to " +#~ "many different types of tasks across " +#~ "different domains. In this tutorial, we" +#~ " introduce federated learning by training" +#~ " a simple convolutional neural network " +#~ "(CNN) on the popular CIFAR-10 dataset." +#~ " CIFAR-10 can be used to train " +#~ "image classifiers that distinguish between " +#~ "images from ten different classes:" +#~ msgstr "" + +#~ msgid "" +#~ "Each organization will act as a " +#~ "client in the federated learning system." +#~ " So having ten organizations participate" +#~ " in a federation means having ten " +#~ "clients connected to the federated " +#~ "learning server:" +#~ msgstr "" + +#~ msgid "" +#~ "Let's now load the CIFAR-10 training " +#~ "and test set, partition them into " +#~ "ten smaller datasets (each split into" +#~ " training and validation set), and " +#~ "wrap the resulting partitions by " +#~ "creating a PyTorch ``DataLoader`` for " +#~ "each of them:" +#~ msgstr "" + +#~ msgid "|ed6498a023f2477a9ccd57ee4514bda4|" +#~ msgstr "" + +#~ msgid "|5a4f742489ac4f819afefdd4dc9ab272|" +#~ msgstr "" + +#~ msgid "|3331c80cd05045f6a56524d8e3e76d0c|" +#~ msgstr "" + +#~ msgid "|4987b26884ec4b2c8f06c1264bcebe60|" +#~ msgstr "" + +#~ msgid "|ec8ae2d778aa493a986eb2fa29c220e5|" +#~ msgstr "" + +#~ msgid "|b8949d0669fe4f8eadc9a4932f4e9c57|" +#~ msgstr "" + +#~ msgid "|94ff30bdcd09443e8488b5f29932a541|" +#~ msgstr "" + +#~ msgid "|48dccf1d6d0544bba8917d2783a47719|" +#~ msgstr "" + +#~ msgid "|0366618db96b4f329f0d4372d1150fde|" +#~ msgstr "" + +#~ msgid "|ac80eddc76e6478081b1ca35eed029c0|" +#~ msgstr "" + +#~ msgid "|1ac94140c317450e89678db133c7f3c2|" +#~ msgstr "" + +#~ msgid "|f8850c6e96fc4430b55e53bba237a7c0|" +#~ msgstr "" + +#~ msgid "|4a368fdd3fc34adabd20a46752a68582|" +#~ msgstr "" + +#~ msgid "|40f69c17bb444652a7c8dfe577cd120e|" +#~ msgstr "" + +#~ msgid "" +#~ "Please follow the first section on " +#~ "`Run Flower using Docker " +#~ "`_ which covers this" +#~ " step in more detail." +#~ msgstr "" + +#~ msgid "" +#~ "Since `Flower 1.5 `_ we have " +#~ "introduced translations to our doc " +#~ "pages, but, as you might have " +#~ "noticed, the translations are often " +#~ "imperfect. If you speak languages other" +#~ " than English, you might be able " +#~ "to help us in our effort to " +#~ "make Federated Learning accessible to as" +#~ " many people as possible by " +#~ "contributing to those translations! This " +#~ "might also be a great opportunity " +#~ "for those wanting to become open " +#~ "source contributors with little prerequistes." +#~ msgstr "" + +#~ msgid "" +#~ "You input your translation in the " +#~ "textbox at the top and then, once" +#~ " you are happy with it, you " +#~ "either press ``Save and continue`` (to" +#~ " save the translation and go to " +#~ "the next untranslated string), ``Save " +#~ "and stay`` (to save the translation " +#~ "and stay on the same page), " +#~ "``Suggest`` (to add your translation to" +#~ " suggestions for other users to " +#~ "view), or ``Skip`` (to go to the" +#~ " next untranslated string without saving" +#~ " anything)." +#~ msgstr "" + +#~ msgid "" +#~ "The first thing we need to do " +#~ "is to define a message type for" +#~ " the RPC system in :code:`transport.proto`." +#~ " Note that we have to do it " +#~ "for both the request and response " +#~ "messages. For more details on the " +#~ "syntax of proto3, please see the " +#~ "`official documentation `_." +#~ msgstr "" + +#~ msgid "" +#~ "Source: `Official VSCode documentation " +#~ "`_" +#~ msgstr "" + +#~ msgid "" +#~ "`Developing inside a Container " +#~ "`_" +#~ msgstr "" + +#~ msgid "" +#~ "`Remote development in Containers " +#~ "`_" +#~ msgstr "" + +#~ msgid "" +#~ "If you are not familiar with " +#~ "Flower Baselines, you should probably " +#~ "check-out our `contributing guide for " +#~ "baselines `_." +#~ msgstr "" + +#~ msgid "" +#~ "You should then check out the open" +#~ " `issues " +#~ "`_" +#~ " for baseline requests. If you find" +#~ " a baseline that you'd like to " +#~ "work on and that has no assignes," +#~ " feel free to assign it to " +#~ "yourself and start working on it!" +#~ msgstr "" + +#~ msgid "" +#~ "If you're familiar with how contributing" +#~ " on GitHub works, you can directly" +#~ " checkout our `getting started guide " +#~ "for contributors `_." +#~ msgstr "" + +#~ msgid "" +#~ "Git is a distributed version control " +#~ "tool. This allows for an entire " +#~ "codebase's history to be stored and " +#~ "every developer's machine. It is a " +#~ "software that will need to be " +#~ "installed on your local machine, you " +#~ "can follow this `guide " +#~ "`_ to set it up." +#~ msgstr "" + +#~ msgid "" +#~ "A fork is a personal copy of " +#~ "a GitHub repository. To create one " +#~ "for Flower, you must navigate to " +#~ "https://github.com/adap/flower (while connected to" +#~ " your GitHub account) and click the" +#~ " ``Fork`` button situated on the top" +#~ " right of the page." +#~ msgstr "" + +#~ msgid "" +#~ "Now we will add an upstream " +#~ "address to our repository. Still in " +#~ "the same directroy, we must run " +#~ "the following command:" +#~ msgstr "" + +#~ msgid "" +#~ "This can be achieved by following " +#~ "this `getting started guide for " +#~ "contributors`_ (note that you won't need" +#~ " to clone the repository). Once you" +#~ " are able to write code and " +#~ "test it, you can finally start " +#~ "making changes!" +#~ msgstr "" + +#~ msgid "" +#~ "For our documentation, we’ve started to" +#~ " use the `Diàtaxis framework " +#~ "`_." +#~ msgstr "" + +#~ msgid "" +#~ "Our “How to” guides should have " +#~ "titles that continue the sencence “How" +#~ " to …”, for example, “How to " +#~ "upgrade to Flower 1.0”." +#~ msgstr "" + +#~ msgid "" +#~ "This issue is about changing the " +#~ "title of a doc from present " +#~ "continious to present simple." +#~ msgstr "" + +#~ msgid "" +#~ "Let's take the example of “Saving " +#~ "Progress” which we changed to “Save " +#~ "Progress”. Does this pass our check?" +#~ msgstr "" + +#~ msgid "Before: ”How to saving progress” ❌" +#~ msgstr "" + +#~ msgid "After: ”How to save progress” ✅" +#~ msgstr "" + +#~ msgid "" +#~ "This is a tiny change, but it’ll" +#~ " allow us to test your end-" +#~ "to-end setup. After cloning and " +#~ "setting up the Flower repo, here’s " +#~ "what you should do:" +#~ msgstr "" + +#~ msgid "" +#~ "Build the docs and check the " +#~ "result: ``_" +#~ msgstr "" + +#~ msgid "Here’s how to change the file name:" +#~ msgstr "" + +#~ msgid "" +#~ "Commit the changes (commit messages are" +#~ " always imperative: “Do something”, in " +#~ "this case “Change …”)" +#~ msgstr "" + +#~ msgid "" +#~ "`Good first contributions " +#~ "`_, where you should" +#~ " particularly look into the " +#~ ":code:`baselines` contributions." +#~ msgstr "" + +#~ msgid "" +#~ "If the section is completely empty " +#~ "(without any token) or non-existant, " +#~ "the changelog will just contain the " +#~ "title of the PR for the changelog" +#~ " entry, without any description." +#~ msgstr "" + +#~ msgid "" +#~ "Flower uses :code:`pyproject.toml` to manage" +#~ " dependencies and configure development " +#~ "tools (the ones which support it). " +#~ "Poetry is a build tool which " +#~ "supports `PEP 517 " +#~ "`_." +#~ msgstr "" + +#~ msgid "" +#~ "This tutorial will show you how to" +#~ " use Flower to build a federated " +#~ "version of an existing machine learning" +#~ " workload with `FedBN `_, a federated training strategy" +#~ " designed for non-iid data. We " +#~ "are using PyTorch to train a " +#~ "Convolutional Neural Network(with Batch " +#~ "Normalization layers) on the CIFAR-10 " +#~ "dataset. When applying FedBN, only few" +#~ " changes needed compared to `Example: " +#~ "PyTorch - From Centralized To Federated" +#~ " `_." +#~ msgstr "" + +#~ msgid "" +#~ "All files are revised based on " +#~ "`Example: PyTorch - From Centralized To" +#~ " Federated `_. The " +#~ "only thing to do is modifying the" +#~ " file called :code:`cifar.py`, revised part" +#~ " is shown below:" +#~ msgstr "" + +#~ msgid "" +#~ "So far this should all look fairly" +#~ " familiar if you've used PyTorch " +#~ "before. Let's take the next step " +#~ "and use what we've built to create" +#~ " a federated learning system within " +#~ "FedBN, the sytstem consists of one " +#~ "server and two clients." +#~ msgstr "" + +#~ msgid "" +#~ "If you have read `Example: PyTorch " +#~ "- From Centralized To Federated " +#~ "`_, the following" +#~ " parts are easy to follow, onyl " +#~ ":code:`get_parameters` and :code:`set_parameters` " +#~ "function in :code:`client.py` needed to " +#~ "revise. If not, please read the " +#~ "`Example: PyTorch - From Centralized To" +#~ " Federated `_. first." +#~ msgstr "" + +#~ msgid "Example: Walk-Through PyTorch & MNIST" +#~ msgstr "" + +#~ msgid "" +#~ "In this tutorial we will learn, " +#~ "how to train a Convolutional Neural " +#~ "Network on MNIST using Flower and " +#~ "PyTorch." +#~ msgstr "" + +#~ msgid "" +#~ "Since we want to use PyTorch to" +#~ " solve a computer vision task, let's" +#~ " go ahead an install PyTorch and " +#~ "the **torchvision** library:" +#~ msgstr "" + +#~ msgid "Ready... Set... Train!" +#~ msgstr "" + +#~ msgid "" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. Our training " +#~ "procedure and network architecture are " +#~ "based on PyTorch's `Basic MNIST Example" +#~ " `_. " +#~ "This will allow you see how easy" +#~ " it is to wrap your code with" +#~ " Flower and begin training in a " +#~ "federated way. We provide you with " +#~ "two helper scripts, namely *run-" +#~ "server.sh*, and *run-clients.sh*. Don't " +#~ "be afraid to look inside, they are" +#~ " simple enough =)." +#~ msgstr "" + +#~ msgid "" +#~ "Go ahead and launch on a terminal" +#~ " the *run-server.sh* script first as" +#~ " follows:" +#~ msgstr "" + +#~ msgid "Now that the server is up and running, go ahead and launch the clients." +#~ msgstr "" + +#~ msgid "" +#~ "Et voilà! You should be seeing the" +#~ " training procedure and, after a few" +#~ " iterations, the test accuracy for " +#~ "each client." +#~ msgstr "" + +#~ msgid "Now, let's see what is really happening inside." +#~ msgstr "" + +#~ msgid "" +#~ "Inside the server helper script *run-" +#~ "server.sh* you will find the following" +#~ " code that basically runs the " +#~ ":code:`server.py`" +#~ msgstr "" + +#~ msgid "" +#~ "We can go a bit deeper and " +#~ "see that :code:`server.py` simply launches " +#~ "a server that will coordinate three " +#~ "rounds of training. Flower Servers are" +#~ " very customizable, but for simple " +#~ "workloads, we can start a server " +#~ "using the :ref:`start_server ` function and leave " +#~ "all the configuration possibilities at " +#~ "their default values, as seen below." +#~ msgstr "" + +#~ msgid "" +#~ "Next, let's take a look at the " +#~ "*run-clients.sh* file. You will see " +#~ "that it contains the main loop " +#~ "that starts a set of *clients*." +#~ msgstr "" + +#~ msgid "" +#~ "**cid**: is the client ID. It is" +#~ " an integer that uniquely identifies " +#~ "client identifier." +#~ msgstr "" + +#~ msgid "**sever_address**: String that identifies IP and port of the server." +#~ msgstr "" + +#~ msgid "" +#~ "**nb_clients**: This defines the number " +#~ "of clients being created. This piece " +#~ "of information is not required by " +#~ "the client, but it helps us " +#~ "partition the original MNIST dataset to" +#~ " make sure that every client is " +#~ "working on unique subsets of both " +#~ "*training* and *test* sets." +#~ msgstr "" + +#~ msgid "" +#~ "Again, we can go deeper and look" +#~ " inside :code:`flwr_example/quickstart-" +#~ "pytorch/client.py`. After going through the" +#~ " argument parsing code at the " +#~ "beginning of our :code:`main` function, " +#~ "you will find a call to " +#~ ":code:`mnist.load_data`. This function is " +#~ "responsible for partitioning the original " +#~ "MNIST datasets (*training* and *test*) " +#~ "and returning a :code:`torch.utils.data.DataLoader`" +#~ " s for each of them. We then" +#~ " instantiate a :code:`PytorchMNISTClient` object" +#~ " with our client ID, our DataLoaders," +#~ " the number of epochs in each " +#~ "round, and which device we want to" +#~ " use for training (CPU or GPU)." +#~ msgstr "" + +#~ msgid "" +#~ "The :code:`PytorchMNISTClient` object when " +#~ "finally passed to :code:`fl.client.start_client` " +#~ "along with the server's address as " +#~ "the training process begins." +#~ msgstr "" + +#~ msgid "A Closer Look" +#~ msgstr "" + +#~ msgid "" +#~ "Now, let's look closely into the " +#~ ":code:`PytorchMNISTClient` inside :code:`flwr_example" +#~ ".quickstart-pytorch.mnist` and see what it" +#~ " is doing:" +#~ msgstr "" + +#~ msgid "" +#~ "The first thing to notice is that" +#~ " :code:`PytorchMNISTClient` instantiates a CNN" +#~ " model inside its constructor" +#~ msgstr "" + +#~ msgid "" +#~ "The code for the CNN is available" +#~ " under :code:`quickstart-pytorch.mnist` and " +#~ "it is reproduced below. It is the" +#~ " same network found in `Basic MNIST" +#~ " Example " +#~ "`_." +#~ msgstr "" + +#~ msgid "" +#~ "The second thing to notice is that" +#~ " :code:`PytorchMNISTClient` class inherits from" +#~ " the :code:`fl.client.Client`, and hence it" +#~ " must implement the following methods:" +#~ msgstr "" + +#~ msgid "" +#~ "When comparing the abstract class to " +#~ "its derived class :code:`PytorchMNISTClient` " +#~ "you will notice that :code:`fit` calls" +#~ " a :code:`train` function and that " +#~ ":code:`evaluate` calls a :code:`test`: " +#~ "function." +#~ msgstr "" + +#~ msgid "" +#~ "These functions can both be found " +#~ "inside the same :code:`quickstart-" +#~ "pytorch.mnist` module:" +#~ msgstr "" + +#~ msgid "" +#~ "Observe that these functions encapsulate " +#~ "regular training and test loops and " +#~ "provide :code:`fit` and :code:`evaluate` with" +#~ " final statistics for each round. You" +#~ " could substitute them with your " +#~ "custom train and test loops and " +#~ "change the network architecture, and the" +#~ " entire example would still work " +#~ "flawlessly. As a matter of fact, " +#~ "why not try and modify the code" +#~ " to an example of your liking?" +#~ msgstr "" + +#~ msgid "Give It a Try" +#~ msgstr "" + +#~ msgid "" +#~ "Looking through the quickstart code " +#~ "description above will have given a " +#~ "good understanding of how *clients* and" +#~ " *servers* work in Flower, how to " +#~ "run a simple experiment, and the " +#~ "internals of a client wrapper. Here " +#~ "are a few things you could try " +#~ "on your own and get more " +#~ "experience with Flower:" +#~ msgstr "" + +#~ msgid "" +#~ "Try and change :code:`PytorchMNISTClient` so" +#~ " it can accept different architectures." +#~ msgstr "" + +#~ msgid "" +#~ "Modify the :code:`train` function so " +#~ "that it accepts different optimizers" +#~ msgstr "" + +#~ msgid "" +#~ "Modify the :code:`test` function so that" +#~ " it proves not only the top-1 " +#~ "(regular accuracy) but also the top-5" +#~ " accuracy?" +#~ msgstr "" + +#~ msgid "" +#~ "Go larger! Try to adapt the code" +#~ " to larger images and datasets. Why" +#~ " not try training on ImageNet with" +#~ " a ResNet-50?" +#~ msgstr "" + +#~ msgid "You are ready now. Enjoy learning in a federated way!" +#~ msgstr "" + +#~ msgid "Differential privacy" +#~ msgstr "" + +#~ msgid "" +#~ "Flower provides differential privacy (DP) " +#~ "wrapper classes for the easy integration" +#~ " of the central DP guarantees " +#~ "provided by DP-FedAvg into training " +#~ "pipelines defined in any of the " +#~ "various ML frameworks that Flower is " +#~ "compatible with." +#~ msgstr "" + +#~ msgid "" +#~ "Please note that these components are" +#~ " still experimental; the correct " +#~ "configuration of DP for a specific " +#~ "task is still an unsolved problem." +#~ msgstr "" + +#~ msgid "" +#~ "The name DP-FedAvg is misleading " +#~ "since it can be applied on top " +#~ "of any FL algorithm that conforms " +#~ "to the general structure prescribed by" +#~ " the FedOpt family of algorithms." +#~ msgstr "" + +#~ msgid "DP-FedAvg" +#~ msgstr "" + +#~ msgid "" +#~ "DP-FedAvg, originally proposed by " +#~ "McMahan et al. [mcmahan]_ and extended" +#~ " by Andrew et al. [andrew]_, is " +#~ "essentially FedAvg with the following " +#~ "modifications." +#~ msgstr "" + +#~ msgid "" +#~ "**Clipping** : The influence of each " +#~ "client's update is bounded by clipping" +#~ " it. This is achieved by enforcing" +#~ " a cap on the L2 norm of " +#~ "the update, scaling it down if " +#~ "needed." +#~ msgstr "" + +#~ msgid "" +#~ "**Noising** : Gaussian noise, calibrated " +#~ "to the clipping threshold, is added " +#~ "to the average computed at the " +#~ "server." +#~ msgstr "" + +#~ msgid "" +#~ "The distribution of the update norm " +#~ "has been shown to vary from " +#~ "task-to-task and to evolve as " +#~ "training progresses. This variability is " +#~ "crucial in understanding its impact on" +#~ " differential privacy guarantees, emphasizing " +#~ "the need for an adaptive approach " +#~ "[andrew]_ that continuously adjusts the " +#~ "clipping threshold to track a " +#~ "prespecified quantile of the update norm" +#~ " distribution." +#~ msgstr "" + +#~ msgid "Simplifying Assumptions" +#~ msgstr "" + +#~ msgid "" +#~ "We make (and attempt to enforce) a" +#~ " number of assumptions that must be" +#~ " satisfied to ensure that the " +#~ "training process actually realizes the " +#~ ":math:`(\\epsilon, \\delta)` guarantees the " +#~ "user has in mind when configuring " +#~ "the setup." +#~ msgstr "" + +#~ msgid "" +#~ "**Fixed-size subsampling** :Fixed-size " +#~ "subsamples of the clients must be " +#~ "taken at each round, as opposed to" +#~ " variable-sized Poisson subsamples." +#~ msgstr "" + +#~ msgid "" +#~ "**Unweighted averaging** : The contributions" +#~ " from all the clients must weighted" +#~ " equally in the aggregate to " +#~ "eliminate the requirement for the server" +#~ " to know in advance the sum of" +#~ " the weights of all clients available" +#~ " for selection." +#~ msgstr "" + +#~ msgid "" +#~ "**No client failures** : The set " +#~ "of available clients must stay constant" +#~ " across all rounds of training. In" +#~ " other words, clients cannot drop out" +#~ " or fail." +#~ msgstr "" + +#~ msgid "" +#~ "The first two are useful for " +#~ "eliminating a multitude of complications " +#~ "associated with calibrating the noise to" +#~ " the clipping threshold, while the " +#~ "third one is required to comply " +#~ "with the assumptions of the privacy " +#~ "analysis." +#~ msgstr "" + +#~ msgid "" +#~ "These restrictions are in line with " +#~ "constraints imposed by Andrew et al. " +#~ "[andrew]_." +#~ msgstr "" + +#~ msgid "Customizable Responsibility for Noise injection" +#~ msgstr "" + +#~ msgid "" +#~ "In contrast to other implementations " +#~ "where the addition of noise is " +#~ "performed at the server, you can " +#~ "configure the site of noise injection" +#~ " to better match your threat model." +#~ " We provide users with the " +#~ "flexibility to set up the training " +#~ "such that each client independently adds" +#~ " a small amount of noise to the" +#~ " clipped update, with the result that" +#~ " simply aggregating the noisy updates " +#~ "is equivalent to the explicit addition" +#~ " of noise to the non-noisy " +#~ "aggregate at the server." +#~ msgstr "" + +#~ msgid "" +#~ "To be precise, if we let :math:`m`" +#~ " be the number of clients sampled " +#~ "each round and :math:`\\sigma_\\Delta` be " +#~ "the scale of the total Gaussian " +#~ "noise that needs to be added to" +#~ " the sum of the model updates, " +#~ "we can use simple maths to show" +#~ " that this is equivalent to each " +#~ "client adding noise with scale " +#~ ":math:`\\sigma_\\Delta/\\sqrt{m}`." +#~ msgstr "" + +#~ msgid "Wrapper-based approach" +#~ msgstr "" + +#~ msgid "" +#~ "Introducing DP to an existing workload" +#~ " can be thought of as adding an" +#~ " extra layer of security around it." +#~ " This inspired us to provide the " +#~ "additional server and client-side logic" +#~ " needed to make the training process" +#~ " differentially private as wrappers for " +#~ "instances of the :code:`Strategy` and " +#~ ":code:`NumPyClient` abstract classes respectively." +#~ " This wrapper-based approach has the" +#~ " advantage of being easily composable " +#~ "with other wrappers that someone might" +#~ " contribute to the Flower library in" +#~ " the future, e.g., for secure " +#~ "aggregation. Using Inheritance instead can " +#~ "be tedious because that would require" +#~ " the creation of new sub- classes " +#~ "every time a new class implementing " +#~ ":code:`Strategy` or :code:`NumPyClient` is " +#~ "defined." +#~ msgstr "" + +#~ msgid "Server-side logic" +#~ msgstr "" + +#~ msgid "" +#~ "The first version of our solution " +#~ "was to define a decorator whose " +#~ "constructor accepted, among other things, " +#~ "a boolean-valued variable indicating " +#~ "whether adaptive clipping was to be " +#~ "enabled or not. We quickly realized " +#~ "that this would clutter its " +#~ ":code:`__init__()` function with variables " +#~ "corresponding to hyperparameters of adaptive" +#~ " clipping that would remain unused " +#~ "when it was disabled. A cleaner " +#~ "implementation could be achieved by " +#~ "splitting the functionality into two " +#~ "decorators, :code:`DPFedAvgFixed` and " +#~ ":code:`DPFedAvgAdaptive`, with the latter sub-" +#~ " classing the former. The constructors " +#~ "for both classes accept a boolean " +#~ "parameter :code:`server_side_noising`, which, as " +#~ "the name suggests, determines where " +#~ "noising is to be performed." +#~ msgstr "" + +#~ msgid "" +#~ "The server-side capabilities required " +#~ "for the original version of DP-" +#~ "FedAvg, i.e., the one which performed" +#~ " fixed clipping, can be completely " +#~ "captured with the help of wrapper " +#~ "logic for just the following two " +#~ "methods of the :code:`Strategy` abstract " +#~ "class." +#~ msgstr "" + +#~ msgid "" +#~ ":code:`configure_fit()` : The config " +#~ "dictionary being sent by the wrapped " +#~ ":code:`Strategy` to each client needs to" +#~ " be augmented with an additional " +#~ "value equal to the clipping threshold" +#~ " (keyed under :code:`dpfedavg_clip_norm`) and," +#~ " if :code:`server_side_noising=true`, another one" +#~ " equal to the scale of the " +#~ "Gaussian noise that needs to be " +#~ "added at the client (keyed under " +#~ ":code:`dpfedavg_noise_stddev`). This entails " +#~ "*post*-processing of the results returned " +#~ "by the wrappee's implementation of " +#~ ":code:`configure_fit()`." +#~ msgstr "" + +#~ msgid "" +#~ ":code:`aggregate_fit()`: We check whether any" +#~ " of the sampled clients dropped out" +#~ " or failed to upload an update " +#~ "before the round timed out. In " +#~ "that case, we need to abort the" +#~ " current round, discarding any successful" +#~ " updates that were received, and move" +#~ " on to the next one. On the " +#~ "other hand, if all clients responded " +#~ "successfully, we must force the " +#~ "averaging of the updates to happen " +#~ "in an unweighted manner by intercepting" +#~ " the :code:`parameters` field of " +#~ ":code:`FitRes` for each received update " +#~ "and setting it to 1. Furthermore, " +#~ "if :code:`server_side_noising=true`, each update " +#~ "is perturbed with an amount of " +#~ "noise equal to what it would have" +#~ " been subjected to had client-side" +#~ " noising being enabled. This entails " +#~ "*pre*-processing of the arguments to " +#~ "this method before passing them on " +#~ "to the wrappee's implementation of " +#~ ":code:`aggregate_fit()`." +#~ msgstr "" + +#~ msgid "" +#~ "We can't directly change the aggregation" +#~ " function of the wrapped strategy to" +#~ " force it to add noise to the" +#~ " aggregate, hence we simulate client-" +#~ "side noising to implement server-side" +#~ " noising." +#~ msgstr "" + +#~ msgid "" +#~ "These changes have been put together " +#~ "into a class called :code:`DPFedAvgFixed`, " +#~ "whose constructor accepts the strategy " +#~ "being decorated, the clipping threshold " +#~ "and the number of clients sampled " +#~ "every round as compulsory arguments. The" +#~ " user is expected to specify the " +#~ "clipping threshold since the order of" +#~ " magnitude of the update norms is " +#~ "highly dependent on the model being " +#~ "trained and providing a default value" +#~ " would be misleading. The number of" +#~ " clients sampled at every round is" +#~ " required to calculate the amount of" +#~ " noise that must be added to " +#~ "each individual update, either by the" +#~ " server or the clients." +#~ msgstr "" + +#~ msgid "" +#~ "The additional functionality required to " +#~ "facilitate adaptive clipping has been " +#~ "provided in :code:`DPFedAvgAdaptive`, a " +#~ "subclass of :code:`DPFedAvgFixed`. It " +#~ "overrides the above-mentioned methods to" +#~ " do the following." +#~ msgstr "" + +#~ msgid "" +#~ ":code:`configure_fit()` : It intercepts the" +#~ " config dict returned by " +#~ ":code:`super.configure_fit()` to add the " +#~ "key-value pair " +#~ ":code:`dpfedavg_adaptive_clip_enabled:True` to it, " +#~ "which the client interprets as an " +#~ "instruction to include an indicator bit" +#~ " (1 if update norm <= clipping " +#~ "threshold, 0 otherwise) in the results" +#~ " returned by it." +#~ msgstr "" + +#~ msgid "" +#~ ":code:`aggregate_fit()` : It follows a " +#~ "call to :code:`super.aggregate_fit()` with one" +#~ " to :code:`__update_clip_norm__()`, a procedure" +#~ " which adjusts the clipping threshold " +#~ "on the basis of the indicator bits" +#~ " received from the sampled clients." +#~ msgstr "" + +#~ msgid "Client-side logic" +#~ msgstr "" + +#~ msgid "" +#~ "The client-side capabilities required " +#~ "can be completely captured through " +#~ "wrapper logic for just the :code:`fit()`" +#~ " method of the :code:`NumPyClient` abstract" +#~ " class. To be precise, we need " +#~ "to *post-process* the update computed" +#~ " by the wrapped client to clip " +#~ "it, if necessary, to the threshold " +#~ "value supplied by the server as " +#~ "part of the config dictionary. In " +#~ "addition to this, it may need to" +#~ " perform some extra work if either" +#~ " (or both) of the following keys " +#~ "are also present in the dict." +#~ msgstr "" + +#~ msgid "" +#~ ":code:`dpfedavg_noise_stddev` : Generate and " +#~ "add the specified amount of noise " +#~ "to the clipped update." +#~ msgstr "" + +#~ msgid "" +#~ ":code:`dpfedavg_adaptive_clip_enabled` : Augment the" +#~ " metrics dict in the :code:`FitRes` " +#~ "object being returned to the server " +#~ "with an indicator bit, calculated as " +#~ "described earlier." +#~ msgstr "" + +#~ msgid "Performing the :math:`(\\epsilon, \\delta)` analysis" +#~ msgstr "" + +#~ msgid "" +#~ "Assume you have trained for :math:`n`" +#~ " rounds with sampling fraction :math:`q`" +#~ " and noise multiplier :math:`z`. In " +#~ "order to calculate the :math:`\\epsilon` " +#~ "value this would result in for a" +#~ " particular :math:`\\delta`, the following " +#~ "script may be used." +#~ msgstr "" + +#~ msgid "" +#~ "McMahan et al. \"Learning Differentially " +#~ "Private Recurrent Language Models.\" " +#~ "International Conference on Learning " +#~ "Representations (ICLR), 2017." +#~ msgstr "" + +#~ msgid "" +#~ "Andrew, Galen, et al. \"Differentially " +#~ "Private Learning with Adaptive Clipping.\" " +#~ "Advances in Neural Information Processing " +#~ "Systems (NeurIPS), 2021." +#~ msgstr "" + +#~ msgid "" +#~ "This can be achieved by customizing " +#~ "an existing strategy or by `implementing" +#~ " a custom strategy from scratch " +#~ "`_. Here's a nonsensical " +#~ "example that customizes :code:`FedAvg` by " +#~ "adding a custom ``\"hello\": \"world\"`` " +#~ "configuration key/value pair to the " +#~ "config dict of a *single client* " +#~ "(only the first client in the " +#~ "list, the other clients in this " +#~ "round to not receive this \"special\"" +#~ " config value):" +#~ msgstr "" + +#~ msgid "" +#~ "More sophisticated implementations can use " +#~ ":code:`configure_fit` to implement custom " +#~ "client selection logic. A client will" +#~ " only participate in a round if " +#~ "the corresponding :code:`ClientProxy` is " +#~ "included in the the list returned " +#~ "from :code:`configure_fit`." +#~ msgstr "" + +#~ msgid "" +#~ "More sophisticated implementations can use " +#~ ":code:`configure_evaluate` to implement custom " +#~ "client selection logic. A client will" +#~ " only participate in a round if " +#~ "the corresponding :code:`ClientProxy` is " +#~ "included in the the list returned " +#~ "from :code:`configure_evaluate`." +#~ msgstr "" + +#~ msgid "" +#~ "`How to run Flower using Docker " +#~ "`_" +#~ msgstr "" + +#~ msgid "" +#~ "Ray Dashboard: ``_" +#~ msgstr "" + +#~ msgid "" +#~ "Ray Metrics: ``_" +#~ msgstr "" + +#~ msgid "Enjoy building more robust and flexible ``ClientApp``s with mods!" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`ClientApp `\\ " +#~ "\\(client\\_fn\\[\\, mods\\]\\)" +#~ msgstr "" + +#~ msgid ":py:obj:`flwr.server.driver `\\" +#~ msgstr "" + +#~ msgid "Flower driver SDK." +#~ msgstr "" + +#~ msgid "driver" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`start_driver `\\ " +#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`Driver `\\ " +#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`GrpcDriver `\\ " +#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ msgstr "" + +#~ msgid "`GrpcDriver` provides access to the gRPC Driver API/service." +#~ msgstr "" + +#~ msgid ":py:obj:`get_nodes `\\ \\(\\)" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`pull_task_res " +#~ "`\\ \\(task\\_ids\\)" +#~ msgstr "" + +#~ msgid "Get task results." +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`push_task_ins " +#~ "`\\ " +#~ "\\(task\\_ins\\_list\\)" +#~ msgstr "" + +#~ msgid "Schedule tasks." +#~ msgstr "" + +#~ msgid "GrpcDriver" +#~ msgstr "" + +#~ msgid ":py:obj:`connect `\\ \\(\\)" +#~ msgstr "" + +#~ msgid "Connect to the Driver API." +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`create_run " +#~ "`\\ \\(req\\)" +#~ msgstr "" + +#~ msgid "Request for run ID." +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`disconnect " +#~ "`\\ \\(\\)" +#~ msgstr "" + +#~ msgid "Disconnect from the Driver API." +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`get_nodes `\\" +#~ " \\(req\\)" +#~ msgstr "" + +#~ msgid "Get client IDs." +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`pull_task_res " +#~ "`\\ \\(req\\)" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`push_task_ins " +#~ "`\\ \\(req\\)" +#~ msgstr "" + +#~ msgid "" +#~ "Optionally specify the type of actor " +#~ "to use. The actor object, which " +#~ "persists throughout the simulation, will " +#~ "be the process in charge of " +#~ "running the clients' jobs (i.e. their" +#~ " `fit()` method)." +#~ msgstr "" + +#~ msgid "" +#~ "Much effort went into a completely " +#~ "restructured Flower docs experience. The " +#~ "documentation on [flower.ai/docs](flower.ai/docs) is" +#~ " now divided into Flower Framework, " +#~ "Flower Baselines, Flower Android SDK, " +#~ "Flower iOS SDK, and code example " +#~ "projects." +#~ msgstr "" + +#~ msgid "" +#~ "The first preview release of Flower " +#~ "Baselines has arrived! We're kickstarting " +#~ "Flower Baselines with implementations of " +#~ "FedOpt (FedYogi, FedAdam, FedAdagrad), FedBN," +#~ " and FedAvgM. Check the documentation " +#~ "on how to use [Flower " +#~ "Baselines](https://flower.ai/docs/using-baselines.html). " +#~ "With this first preview release we're" +#~ " also inviting the community to " +#~ "[contribute their own " +#~ "baselines](https://flower.ai/docs/contributing-baselines.html)." +#~ msgstr "" + +#~ msgid "" +#~ "Flower usage examples used to be " +#~ "bundled with Flower in a package " +#~ "called ``flwr_example``. We are migrating " +#~ "those examples to standalone projects to" +#~ " make them easier to use. All " +#~ "new examples are based in the " +#~ "directory `examples " +#~ "`_." +#~ msgstr "" + +#~ msgid "The following examples are available as standalone projects." +#~ msgstr "" + +#~ msgid "Quickstart TensorFlow/Keras" +#~ msgstr "" + +#~ msgid "" +#~ "`Quickstart TensorFlow (Tutorial) " +#~ "`_" +#~ msgstr "" + +#~ msgid "" +#~ "`Quickstart PyTorch (Tutorial) " +#~ "`_" +#~ msgstr "" + +#~ msgid "" +#~ "`PyTorch: From Centralized To Federated " +#~ "(Tutorial) `_" +#~ msgstr "" + +#~ msgid "Legacy Examples (`flwr_example`)" +#~ msgstr "" + +#~ msgid "" +#~ "The useage examples in `flwr_example` " +#~ "are deprecated and will be removed " +#~ "in the future. New examples are " +#~ "provided as standalone projects in " +#~ "`examples `_." +#~ msgstr "" + +#~ msgid "Extra Dependencies" +#~ msgstr "" + +#~ msgid "" +#~ "The core Flower framework keeps a " +#~ "minimal set of dependencies. The " +#~ "examples demonstrate Flower in the " +#~ "context of different machine learning " +#~ "frameworks, so additional dependencies need" +#~ " to be installed before an example" +#~ " can be run." +#~ msgstr "" + +#~ msgid "For PyTorch examples::" +#~ msgstr "" + +#~ msgid "For TensorFlow examples::" +#~ msgstr "" + +#~ msgid "For both PyTorch and TensorFlow examples::" +#~ msgstr "" + +#~ msgid "" +#~ "Please consult :code:`pyproject.toml` for a" +#~ " full list of possible extras " +#~ "(section :code:`[tool.poetry.extras]`)." +#~ msgstr "" + +#~ msgid "PyTorch Examples" +#~ msgstr "" + +#~ msgid "" +#~ "Our PyTorch examples are based on " +#~ "PyTorch 1.7. They should work with " +#~ "other releases as well. So far, we" +#~ " provide the following examples." +#~ msgstr "" + +#~ msgid "CIFAR-10 Image Classification" +#~ msgstr "" + +#~ msgid "" +#~ "`CIFAR-10 and CIFAR-100 " +#~ "`_ are " +#~ "popular RGB image datasets. The Flower" +#~ " CIFAR-10 example uses PyTorch to " +#~ "train a simple CNN classifier in a" +#~ " federated learning setup with two " +#~ "clients." +#~ msgstr "" + +#~ msgid "First, start a Flower server:" +#~ msgstr "" + +#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" +#~ msgstr "" + +#~ msgid "Then, start the two clients in a new terminal window:" +#~ msgstr "" + +#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" +#~ msgstr "" + +#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_cifar`." +#~ msgstr "" + +#~ msgid "ImageNet-2012 Image Classification" +#~ msgstr "" + +#~ msgid "" +#~ "`ImageNet-2012 `_ is " +#~ "one of the major computer vision " +#~ "datasets. The Flower ImageNet example " +#~ "uses PyTorch to train a ResNet-18 " +#~ "classifier in a federated learning setup" +#~ " with ten clients." +#~ msgstr "" + +#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" +#~ msgstr "" + +#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" +#~ msgstr "" + +#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_imagenet`." +#~ msgstr "" + +#~ msgid "TensorFlow Examples" +#~ msgstr "" + +#~ msgid "" +#~ "Our TensorFlow examples are based on " +#~ "TensorFlow 2.0 or newer. So far, " +#~ "we provide the following examples." +#~ msgstr "" + +#~ msgid "Fashion-MNIST Image Classification" +#~ msgstr "" + +#~ msgid "" +#~ "`Fashion-MNIST `_ is often used as " +#~ "the \"Hello, world!\" of machine " +#~ "learning. We follow this tradition and" +#~ " provide an example which samples " +#~ "random local datasets from Fashion-MNIST" +#~ " and trains a simple image " +#~ "classification model over those partitions." +#~ msgstr "" + +#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" +#~ msgstr "" + +#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" +#~ msgstr "" + +#~ msgid "" +#~ "For more details, see " +#~ ":code:`src/py/flwr_example/tensorflow_fashion_mnist`." +#~ msgstr "" + +#~ msgid ":fa:`eye,mr-1` Can Flower run on Juptyter Notebooks / Google Colab?" +#~ msgstr "" + +#~ msgid "" +#~ "`Flower meets KOSMoS `_." +#~ msgstr "" + +#~ msgid "" +#~ "If you want to check out " +#~ "everything put together, you should " +#~ "check out the full code example: " +#~ "[https://github.com/adap/flower/tree/main/examples/quickstart-" +#~ "huggingface](https://github.com/adap/flower/tree/main/examples" +#~ "/quickstart-huggingface)." +#~ msgstr "" + +#~ msgid "" +#~ "First of all, for running the " +#~ "Flower Python server, it is recommended" +#~ " to create a virtual environment and" +#~ " run everything within a `virtualenv " +#~ "`_. " +#~ "For the Flower client implementation in" +#~ " iOS, it is recommended to use " +#~ "Xcode as our IDE." +#~ msgstr "" + +#~ msgid "" +#~ "Since CoreML does not allow the " +#~ "model parameters to be seen before " +#~ "training, and accessing the model " +#~ "parameters during or after the training" +#~ " can only be done by specifying " +#~ "the layer name, we need to know" +#~ " this informations beforehand, through " +#~ "looking at the model specification, " +#~ "which are written as proto files. " +#~ "The implementation can be seen in " +#~ ":code:`MLModelInspect`." +#~ msgstr "" + +#~ msgid "" +#~ "After we have all of the necessary" +#~ " informations, let's create our Flower " +#~ "client." +#~ msgstr "" + +#~ msgid "" +#~ "MXNet is no longer maintained and " +#~ "has been moved into `Attic " +#~ "`_. As a " +#~ "result, we would encourage you to " +#~ "use other ML frameworks alongise Flower," +#~ " for example, PyTorch. This tutorial " +#~ "might be removed in future versions " +#~ "of Flower." +#~ msgstr "" + +#~ msgid "" +#~ "It is recommended to create a " +#~ "virtual environment and run everything " +#~ "within this `virtualenv `_." +#~ msgstr "" + +#~ msgid "" +#~ "First of all, it is recommended to" +#~ " create a virtual environment and run" +#~ " everything within a `virtualenv " +#~ "`_." +#~ msgstr "" + +#~ msgid "Since we want to use scikt-learn, let's go ahead and install it:" +#~ msgstr "" + +#~ msgid "" +#~ "We load the MNIST dataset from " +#~ "`OpenML `_, a popular" +#~ " image classification dataset of " +#~ "handwritten digits for machine learning. " +#~ "The utility :code:`utils.load_mnist()` downloads " +#~ "the training and test data. The " +#~ "training set is split afterwards into" +#~ " 10 partitions with :code:`utils.partition()`." +#~ msgstr "" + +#~ msgid "" +#~ "Now that you have known how " +#~ "federated XGBoost work with Flower, it's" +#~ " time to run some more comprehensive" +#~ " experiments by customising the " +#~ "experimental settings. In the xgboost-" +#~ "comprehensive example (`full code " +#~ "`_), we provide more options " +#~ "to define various experimental setups, " +#~ "including aggregation strategies, data " +#~ "partitioning and centralised/distributed evaluation." +#~ " We also support `Flower simulation " +#~ "`_ making it easy to " +#~ "simulate large client cohorts in a " +#~ "resource-aware manner. Let's take a " +#~ "look!" +#~ msgstr "" + +#~ msgid "|31e4b1afa87c4b968327bbeafbf184d4|" +#~ msgstr "" + +#~ msgid "|c9d935b4284e4c389a33d86b33e07c0a|" +#~ msgstr "" + +#~ msgid "|00727b5faffb468f84dd1b03ded88638|" +#~ msgstr "" + +#~ msgid "|daf0cf0ff4c24fd29439af78416cf47b|" +#~ msgstr "" + +#~ msgid "|9f093007080d471d94ca90d3e9fde9b6|" +#~ msgstr "" + +#~ msgid "|46a26e6150e0479fbd3dfd655f36eb13|" +#~ msgstr "" + +#~ msgid "|3daba297595c4c7fb845d90404a6179a|" +#~ msgstr "" + +#~ msgid "|5769874fa9c4455b80b2efda850d39d7|" +#~ msgstr "" + +#~ msgid "|ba47ffb421814b0f8f9fa5719093d839|" +#~ msgstr "" + +#~ msgid "|aeac5bf79cbf497082e979834717e01b|" +#~ msgstr "" + +#~ msgid "|ce27ed4bbe95459dba016afc42486ba2|" +#~ msgstr "" + +#~ msgid "|ae94a7f71dda443cbec2385751427d41|" +#~ msgstr "" + +#~ msgid "|e61fce4d43d243e7bb08bdde97d81ce6|" +#~ msgstr "" + +#~ msgid "|08cb60859b07461588fe44e55810b050|" +#~ msgstr "" + +#~ msgid "``BASE_IMAGE_TAG``" +#~ msgstr "``BASE_IMAGE_TAG``" + +#~ msgid "The image tag of the base image." +#~ msgstr "A tag da imagem da imagem base." + +#~ msgid "" +#~ "Open the notebook ``doc/source/tutorial-" +#~ "get-started-with-flower-pytorch.ipynb``:" +#~ msgstr "" + +#~ msgid "" +#~ "https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +#~ "/tutorial-get-started-with-flower-" +#~ "pytorch.ipynb" +#~ msgstr "" + +#~ msgid "" +#~ "https://colab.research.google.com/github/adap/flower/blob/branch-" +#~ "name/doc/source/tutorial-get-started-with-" +#~ "flower-pytorch.ipynb" +#~ msgstr "" + +#~ msgid "Virutualenv with Pyenv/Virtualenv" +#~ msgstr "" + +#~ msgid "" +#~ "It is important to follow the " +#~ "instructions described in comments. For " +#~ "instance, in order to not break " +#~ "how our changelog system works, you " +#~ "should read the information above the" +#~ " ``Changelog entry`` section carefully. You" +#~ " can also checkout some examples and" +#~ " details in the :ref:`changelogentry` " +#~ "appendix." +#~ msgstr "" + +#~ msgid "Open a PR (as shown above)" +#~ msgstr "" + +#~ msgid "How to write a good PR title" +#~ msgstr "" + +#~ msgid "" +#~ "A well-crafted PR title helps team" +#~ " members quickly understand the purpose " +#~ "and scope of the changes being " +#~ "proposed. Here's a guide to help " +#~ "you write a good GitHub PR title:" +#~ msgstr "" + +#~ msgid "" +#~ "1. Be Clear and Concise: Provide a" +#~ " clear summary of the changes in " +#~ "a concise manner. 1. Use Actionable " +#~ "Verbs: Start with verbs like \"Add,\"" +#~ " \"Update,\" or \"Fix\" to indicate " +#~ "the purpose. 1. Include Relevant " +#~ "Information: Mention the affected feature " +#~ "or module for context. 1. Keep it" +#~ " Short: Avoid lengthy titles for easy" +#~ " readability. 1. Use Proper Capitalization" +#~ " and Punctuation: Follow grammar rules " +#~ "for clarity." +#~ msgstr "" + +#~ msgid "" +#~ "Let's start with a few examples " +#~ "for titles that should be avoided " +#~ "because they do not provide meaningful" +#~ " information:" +#~ msgstr "" + +#~ msgid "Implement Algorithm" +#~ msgstr "" + +#~ msgid "Database" +#~ msgstr "" + +#~ msgid "Add my_new_file.py to codebase" +#~ msgstr "" + +#~ msgid "Improve code in module" +#~ msgstr "" + +#~ msgid "Change SomeModule" +#~ msgstr "" + +#~ msgid "" +#~ "Here are a few positive examples " +#~ "which provide helpful information without " +#~ "repeating how they do it, as that" +#~ " is already visible in the \"Files" +#~ " changed\" section of the PR:" +#~ msgstr "" + +#~ msgid "Update docs banner to mention Flower Summit 2023" +#~ msgstr "" + +#~ msgid "Remove unnecessary XGBoost dependency" +#~ msgstr "" + +#~ msgid "Remove redundant attributes in strategies subclassing FedAvg" +#~ msgstr "" + +#~ msgid "" +#~ "Add CI job to deploy the staging" +#~ " system when the ``main`` branch " +#~ "changes" +#~ msgstr "" + +#~ msgid "" +#~ "Add new amazing library which will " +#~ "be used to improve the simulation " +#~ "engine" +#~ msgstr "" + +#~ msgid "Changelog entry" +#~ msgstr "" + +#~ msgid "" +#~ "When opening a new PR, inside its" +#~ " description, there should be a " +#~ "``Changelog entry`` header." +#~ msgstr "" + +#~ msgid "" +#~ "Above this header you should see " +#~ "the following comment that explains how" +#~ " to write your changelog entry:" +#~ msgstr "" + +#~ msgid "" +#~ "Inside the following 'Changelog entry' " +#~ "section, you should put the description" +#~ " of your changes that will be " +#~ "added to the changelog alongside your" +#~ " PR title." +#~ msgstr "" + +#~ msgid "" +#~ "If the section is completely empty " +#~ "(without any token) or non-existent, " +#~ "the changelog will just contain the " +#~ "title of the PR for the changelog" +#~ " entry, without any description." +#~ msgstr "" + +#~ msgid "" +#~ "If the section contains some text " +#~ "other than tokens, it will use it" +#~ " to add a description to the " +#~ "change." +#~ msgstr "" + +#~ msgid "" +#~ "If the section contains one of the" +#~ " following tokens it will ignore any" +#~ " other text and put the PR " +#~ "under the corresponding section of the" +#~ " changelog:" +#~ msgstr "" + +#~ msgid " is for classifying a PR as a general improvement." +#~ msgstr "" + +#~ msgid " is to not add the PR to the changelog" +#~ msgstr "" + +#~ msgid " is to add a general baselines change to the PR" +#~ msgstr "" + +#~ msgid " is to add a general examples change to the PR" +#~ msgstr "" + +#~ msgid " is to add a general sdk change to the PR" +#~ msgstr "" + +#~ msgid " is to add a general simulations change to the PR" +#~ msgstr "" + +#~ msgid "Note that only one token should be used." +#~ msgstr "" + +#~ msgid "" +#~ "Its content must have a specific " +#~ "format. We will break down what " +#~ "each possibility does:" +#~ msgstr "" + +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains nothing or doesn't exist, " +#~ "the following text will be added " +#~ "to the changelog::" +#~ msgstr "" + +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains a description (and no " +#~ "token), the following text will be " +#~ "added to the changelog::" +#~ msgstr "" + +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, nothing will change" +#~ " in the changelog." +#~ msgstr "" + +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following text" +#~ " will be added to the changelog::" +#~ msgstr "" + +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following " +#~ "text will be added to the " +#~ "changelog::" +#~ msgstr "" + +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following " +#~ "text will be added to the " +#~ "changelog::" +#~ msgstr "" + +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following text " +#~ "will be added to the changelog::" +#~ msgstr "" + +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following " +#~ "text will be added to the " +#~ "changelog::" +#~ msgstr "" + +#~ msgid "" +#~ "Note that only one token must be" +#~ " provided, otherwise, only the first " +#~ "action (in the order listed above), " +#~ "will be performed." +#~ msgstr "" + +#~ msgid "Example: MXNet - Run MXNet Federated" +#~ msgstr "" + +#~ msgid "" +#~ "This tutorial will show you how to" +#~ " use Flower to build a federated " +#~ "version of an existing MXNet workload." +#~ " We are using MXNet to train a" +#~ " Sequential model on the MNIST " +#~ "dataset. We will structure the example" +#~ " similar to our `PyTorch - From " +#~ "Centralized To Federated " +#~ "`_ walkthrough. " +#~ "MXNet and PyTorch are very similar " +#~ "and a very good comparison between " +#~ "MXNet and PyTorch is given `here " +#~ "`_. First, " +#~ "we build a centralized training approach" +#~ " based on the `Handwritten Digit " +#~ "Recognition " +#~ "`_" +#~ " tutorial. Then, we build upon the" +#~ " centralized training code to run the" +#~ " training in a federated fashion." +#~ msgstr "" + +#~ msgid "" +#~ "Before we start setting up our " +#~ "MXNet example, we install the " +#~ ":code:`mxnet` and :code:`flwr` packages:" +#~ msgstr "" + +#~ msgid "MNIST Training with MXNet" +#~ msgstr "" + +#~ msgid "" +#~ "We begin with a brief description " +#~ "of the centralized training code based" +#~ " on a :code:`Sequential` model. If " +#~ "you want a more in-depth " +#~ "explanation of what's going on then " +#~ "have a look at the official `MXNet" +#~ " tutorial " +#~ "`_." +#~ msgstr "" + +#~ msgid "" +#~ "Let's create a new file " +#~ "called:code:`mxnet_mnist.py` with all the " +#~ "components required for a traditional " +#~ "(centralized) MNIST training. First, the " +#~ "MXNet package :code:`mxnet` needs to be" +#~ " imported. You can see that we " +#~ "do not yet import the :code:`flwr` " +#~ "package for federated learning. This " +#~ "will be done later." +#~ msgstr "" + +#~ msgid "" +#~ "The :code:`load_data()` function loads the " +#~ "MNIST training and test sets." +#~ msgstr "" + +#~ msgid "" +#~ "As already mentioned, we will use " +#~ "the MNIST dataset for this machine " +#~ "learning workload. The model architecture " +#~ "(a very simple :code:`Sequential` model) " +#~ "is defined in :code:`model()`." +#~ msgstr "" + +#~ msgid "" +#~ "We now need to define the training" +#~ " (function :code:`train()`) which loops " +#~ "over the training set and measures " +#~ "the loss for each batch of " +#~ "training examples." +#~ msgstr "" + +#~ msgid "" +#~ "The evaluation of the model is " +#~ "defined in function :code:`test()`. The " +#~ "function loops over all test samples " +#~ "and measures the loss and accuracy " +#~ "of the model based on the test " +#~ "dataset." +#~ msgstr "" + +#~ msgid "" +#~ "Having defined the data loading, model" +#~ " architecture, training, and evaluation we" +#~ " can put everything together and " +#~ "train our model on MNIST. Note " +#~ "that the GPU/CPU device for the " +#~ "training and testing is defined within" +#~ " the :code:`ctx` (context)." +#~ msgstr "" + +#~ msgid "You can now run your (centralized) MXNet machine learning workload:" +#~ msgstr "" + +#~ msgid "" +#~ "So far this should all look fairly" +#~ " familiar if you've used MXNet (or" +#~ " even PyTorch) before. Let's take the" +#~ " next step and use what we've " +#~ "built to create a simple federated " +#~ "learning system consisting of one server" +#~ " and two clients." +#~ msgstr "" + +#~ msgid "MXNet meets Flower" +#~ msgstr "" + +#~ msgid "" +#~ "So far, it was not easily possible" +#~ " to use MXNet workloads for federated" +#~ " learning because federated learning is " +#~ "not supported in MXNet. Since Flower " +#~ "is fully agnostic towards the underlying" +#~ " machine learning framework, it can " +#~ "be used to federated arbitrary machine" +#~ " learning workloads. This section will " +#~ "show you how Flower can be used" +#~ " to federate our centralized MXNet " +#~ "workload." +#~ msgstr "" + +#~ msgid "" +#~ "The concept to federate an existing " +#~ "workload is always the same and " +#~ "easy to understand. We have to " +#~ "start a *server* and then use the" +#~ " code in :code:`mxnet_mnist.py` for the " +#~ "*clients* that are connected to the " +#~ "*server*. The *server* sends model " +#~ "parameters to the clients. The *clients*" +#~ " run the training and update the " +#~ "parameters. The updated parameters are " +#~ "sent back to the *server* which " +#~ "averages all received parameter updates. " +#~ "This describes one round of the " +#~ "federated learning process and we repeat" +#~ " this for multiple rounds." +#~ msgstr "" + +#~ msgid "" +#~ "Finally, we will define our *client* " +#~ "logic in :code:`client.py` and build " +#~ "upon the previously defined MXNet " +#~ "training in :code:`mxnet_mnist.py`. Our " +#~ "*client* needs to import :code:`flwr`, " +#~ "but also :code:`mxnet` to update the " +#~ "parameters on our MXNet model:" +#~ msgstr "" + +#~ msgid "" +#~ "Implementing a Flower *client* basically " +#~ "means implementing a subclass of either" +#~ " :code:`flwr.client.Client` or " +#~ ":code:`flwr.client.NumPyClient`. Our implementation " +#~ "will be based on " +#~ ":code:`flwr.client.NumPyClient` and we'll call " +#~ "it :code:`MNISTClient`. :code:`NumPyClient` is " +#~ "slightly easier to implement than " +#~ ":code:`Client` if you use a framework" +#~ " with good NumPy interoperability (like " +#~ "PyTorch or MXNet) because it avoids " +#~ "some of the boilerplate that would " +#~ "otherwise be necessary. :code:`MNISTClient` " +#~ "needs to implement four methods, two " +#~ "methods for getting/setting model parameters," +#~ " one method for training the model," +#~ " and one method for testing the " +#~ "model:" +#~ msgstr "" + +#~ msgid "transform MXNet :code:`NDArray`'s to NumPy :code:`ndarray`'s" +#~ msgstr "" + +#~ msgid "" +#~ "The challenging part is to transform " +#~ "the MXNet parameters from :code:`NDArray` " +#~ "to :code:`NumPy Arrays` to make it " +#~ "readable for Flower." +#~ msgstr "" + +#~ msgid "" +#~ "The two :code:`NumPyClient` methods " +#~ ":code:`fit` and :code:`evaluate` make use " +#~ "of the functions :code:`train()` and " +#~ ":code:`test()` previously defined in " +#~ ":code:`mxnet_mnist.py`. So what we really " +#~ "do here is we tell Flower through" +#~ " our :code:`NumPyClient` subclass which of" +#~ " our already defined functions to " +#~ "call for training and evaluation. We " +#~ "included type annotations to give you" +#~ " a better understanding of the data" +#~ " types that get passed around." +#~ msgstr "" + +#~ msgid "" +#~ "Having defined data loading, model " +#~ "architecture, training, and evaluation we " +#~ "can put everything together and train" +#~ " our :code:`Sequential` model on MNIST." +#~ msgstr "" + +#~ msgid "" +#~ "in each window (make sure that the" +#~ " server is still running before you" +#~ " do so) and see your MXNet " +#~ "project run federated learning across " +#~ "two clients. Congratulations!" +#~ msgstr "" + +#~ msgid "" +#~ "The full source code for this " +#~ "example: `MXNet: From Centralized To " +#~ "Federated (Code) " +#~ "`_. Our " +#~ "example is of course somewhat over-" +#~ "simplified because both clients load the" +#~ " exact same dataset, which isn't " +#~ "realistic. You're now prepared to " +#~ "explore this topic further. How about" +#~ " using a CNN or using a " +#~ "different dataset? How about adding more" +#~ " clients?" +#~ msgstr "" + +#~ msgid "" +#~ "This guide describes how to a " +#~ "SSL-enabled secure Flower server can " +#~ "be started and how a Flower client" +#~ " can establish a secure connections " +#~ "to it." +#~ msgstr "" + +#~ msgid "" +#~ "The code example comes with a " +#~ "README.md file which will explain how" +#~ " to start it. Although it is " +#~ "already SSL-enabled, it might be " +#~ "less descriptive on how. Stick to " +#~ "this guide for a deeper introduction " +#~ "to the topic." +#~ msgstr "" + +#~ msgid "" +#~ "Using SSL-enabled connections requires " +#~ "certificates to be passed to the " +#~ "server and client. For the purpose " +#~ "of this guide we are going to " +#~ "generate self-signed certificates. As " +#~ "this can become quite complex we " +#~ "are going to ask you to run " +#~ "the script in :code:`examples/advanced-" +#~ "tensorflow/certificates/generate.sh`" +#~ msgstr "" + +#~ msgid "with the following command sequence:" +#~ msgstr "" + +#~ msgid "" +#~ "The approach how the SSL certificates" +#~ " are generated in this example can" +#~ " serve as an inspiration and starting" +#~ " point but should not be taken " +#~ "as complete for production environments. " +#~ "Please refer to other sources regarding" +#~ " the issue of correctly generating " +#~ "certificates for production environments." +#~ msgstr "" + +#~ msgid "" +#~ "In case you are a researcher you" +#~ " might be just fine using the " +#~ "self-signed certificates generated using " +#~ "the scripts which are part of this" +#~ " guide." +#~ msgstr "" + +#~ msgid "" +#~ "We are now going to show how " +#~ "to write a sever which uses the" +#~ " previously generated scripts." +#~ msgstr "" + +#~ msgid "" +#~ "When providing certificates, the server " +#~ "expects a tuple of three certificates." +#~ " :code:`Path` can be used to easily" +#~ " read the contents of those files " +#~ "into byte strings, which is the " +#~ "data type :code:`start_server` expects." +#~ msgstr "" + +#~ msgid "" +#~ "We are now going to show how " +#~ "to write a client which uses the" +#~ " previously generated scripts:" +#~ msgstr "" + +#~ msgid "" +#~ "When setting :code:`root_certificates`, the " +#~ "client expects the PEM-encoded root " +#~ "certificates as a byte string. We " +#~ "are again using :code:`Path` to simplify" +#~ " reading those as byte strings." +#~ msgstr "" + +#~ msgid "" +#~ "You should now have learned how to" +#~ " generate self-signed certificates using" +#~ " the given script, start a SSL-" +#~ "enabled server, and have a client " +#~ "establish a secure connection to it." +#~ msgstr "" + +#~ msgid "" +#~ "The simplest way to get started " +#~ "with Flower is by using the " +#~ "pre-made Docker images, which you can" +#~ " find on `Docker Hub " +#~ "`_." +#~ msgstr "" + +#~ msgid "Flower server" +#~ msgstr "" + +#~ msgid "" +#~ "The command will pull the Docker " +#~ "image with the tag " +#~ "``1.7.0-py3.11-ubuntu22.04`` from Docker Hub. " +#~ "The tag contains the information which" +#~ " Flower, Python and Ubuntu is used." +#~ " In this case, it uses Flower " +#~ "1.7.0, Python 3.11 and Ubuntu 22.04. " +#~ "The ``--rm`` flag tells Docker to " +#~ "remove the container after it exits." +#~ msgstr "" + +#~ msgid "" +#~ "By default, the Flower server keeps " +#~ "state in-memory. When using the " +#~ "Docker flag ``--rm``, the state is " +#~ "not persisted between container starts. " +#~ "We will show below how to save " +#~ "the state in a file on your " +#~ "host system." +#~ msgstr "" + +#~ msgid "" +#~ "The ``-p :`` flag tells " +#~ "Docker to map the ports " +#~ "``9091``/``9092`` of the host to " +#~ "``9091``/``9092`` of the container, allowing" +#~ " you to access the Driver API " +#~ "on ``http://localhost:9091`` and the Fleet " +#~ "API on ``http://localhost:9092``. Lastly, any" +#~ " flag that comes after the tag " +#~ "is passed to the Flower server. " +#~ "Here, we are passing the flag " +#~ "``--insecure``." +#~ msgstr "" + +#~ msgid "" +#~ "The ``--insecure`` flag enables insecure " +#~ "communication (using HTTP, not HTTPS) " +#~ "and should only be used for " +#~ "testing purposes. We strongly recommend " +#~ "enabling `SSL `_ when " +#~ "deploying to a production environment." +#~ msgstr "" + +#~ msgid "" +#~ "You can use ``--help`` to view all" +#~ " available flags that the server " +#~ "supports:" +#~ msgstr "" + +#~ msgid "" +#~ "If you want to persist the state" +#~ " of the server on your host " +#~ "system, all you need to do is " +#~ "specify a path where you want to" +#~ " save the file on your host " +#~ "system and a name for the database" +#~ " file. In the example below, we " +#~ "tell Docker via the flag ``-v`` to" +#~ " mount the user's home directory " +#~ "(``~/`` on your host) into the " +#~ "``/app/`` directory of the container. " +#~ "Furthermore, we use the flag " +#~ "``--database`` to specify the name of" +#~ " the database file." +#~ msgstr "" + +#~ msgid "" +#~ "As soon as the server starts, the" +#~ " file ``state.db`` is created in the" +#~ " user's home directory on your host" +#~ " system. If the file already exists," +#~ " the server tries to restore the " +#~ "state from the file. To start the" +#~ " server with an empty database, " +#~ "simply remove the ``state.db`` file." +#~ msgstr "" + +#~ msgid "" +#~ "To enable SSL, you will need a " +#~ "CA certificate, a server certificate and" +#~ " a server private key." +#~ msgstr "" + +#~ msgid "" +#~ "For testing purposes, you can generate" +#~ " your own self-signed certificates. " +#~ "The `Enable SSL connections " +#~ "`_ page contains " +#~ "a section that will guide you " +#~ "through the process." +#~ msgstr "" + +#~ msgid "" +#~ "Assuming all files we need are in" +#~ " the local ``certificates`` directory, we" +#~ " can use the flag ``-v`` to " +#~ "mount the local directory into the " +#~ "``/app/`` directory of the container. " +#~ "This allows the server to access " +#~ "the files within the container. Finally," +#~ " we pass the names of the " +#~ "certificates to the server with the " +#~ "``--certificates`` flag." +#~ msgstr "" + +#~ msgid "Using a different Flower or Python version" +#~ msgstr "" + +#~ msgid "" +#~ "If you want to use a different " +#~ "version of Flower or Python, you " +#~ "can do so by changing the tag. " +#~ "All versions we provide are available" +#~ " on `Docker Hub " +#~ "`_." +#~ msgstr "" + +#~ msgid "" +#~ "The following command returns the " +#~ "current image hash referenced by the " +#~ "``server:1.7.0-py3.11-ubuntu22.04`` tag:" +#~ msgstr "" + +#~ msgid "Next, we can pin the hash when running a new server container:" +#~ msgstr "" + +#~ msgid "" +#~ "QUICKSTART TUTORIALS: :doc:`PyTorch ` | :doc:`TensorFlow " +#~ "` | :doc:`🤗 " +#~ "Transformers ` " +#~ "| :doc:`JAX ` |" +#~ " :doc:`Pandas ` " +#~ "| :doc:`fastai `" +#~ " | :doc:`PyTorch Lightning ` | :doc:`MXNet " +#~ "` | :doc" +#~ ":`scikit-learn `" +#~ " | :doc:`XGBoost ` | :doc:`Android ` | :doc:`iOS `" +#~ msgstr "" + +#~ msgid "flower-driver-api" +#~ msgstr "" + +#~ msgid "flower-fleet-api" +#~ msgstr "" + +#~ msgid "" +#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +#~ "[:py:class:`str`, :py:obj:`~typing.Union`\\ " +#~ "[:py:class:`int`, :py:class:`float`, :py:class:`str`, " +#~ ":py:class:`bytes`, :py:class:`bool`, " +#~ ":py:class:`~typing.List`\\ [:py:class:`int`], " +#~ ":py:class:`~typing.List`\\ [:py:class:`float`], " +#~ ":py:class:`~typing.List`\\ [:py:class:`str`], " +#~ ":py:class:`~typing.List`\\ [:py:class:`bytes`], " +#~ ":py:class:`~typing.List`\\ [:py:class:`bool`]]]" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`create_error_reply " +#~ "`\\ \\(error\\, " +#~ "ttl\\)" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`create_reply `\\ " +#~ "\\(content\\, ttl\\)" +#~ msgstr "" + +#~ msgid "" +#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +#~ "[:py:class:`str`, :py:obj:`~typing.Union`\\ " +#~ "[:py:class:`int`, :py:class:`float`, " +#~ ":py:class:`~typing.List`\\ [:py:class:`int`], " +#~ ":py:class:`~typing.List`\\ [:py:class:`float`]]]" +#~ msgstr "" + +#~ msgid "Run Flower server (Driver API and Fleet API)." +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`start_driver `\\ " +#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" +#~ msgstr "" + +#~ msgid "Start a Flower Driver API server." +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`Driver `\\ " +#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ msgstr "" + +#~ msgid "`Driver` class provides an interface to the Driver API." +#~ msgstr "" + +#~ msgid "" +#~ "The IPv4 or IPv6 address of the" +#~ " Driver API server. Defaults to " +#~ "`\"[::]:9091\"`." +#~ msgstr "" + +#~ msgid ":py:obj:`close `\\ \\(\\)" +#~ msgstr "" + +#~ msgid "Disconnect from the SuperLink if connected." +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`create_message `\\" +#~ " \\(content\\, message\\_type\\, ...\\)" +#~ msgstr "" + +#~ msgid "" +#~ "Time-to-live for the round trip" +#~ " of this message, i.e., the time " +#~ "from sending this message to receiving" +#~ " a reply. It specifies the duration" +#~ " for which the message and its " +#~ "potential reply are considered valid." +#~ msgstr "" + +#~ msgid "start\\_driver" +#~ msgstr "" + +#~ msgid "" +#~ "The IPv4 or IPv6 address of the" +#~ " Driver API server. Defaults to " +#~ "`\"[::]:8080\"`." +#~ msgstr "" + +#~ msgid "" +#~ "A server implementation, either " +#~ "`flwr.server.Server` or a subclass thereof." +#~ " If no instance is provided, then " +#~ "`start_driver` will create one." +#~ msgstr "" + +#~ msgid "" +#~ "An implementation of the class " +#~ "`flwr.server.ClientManager`. If no implementation" +#~ " is provided, then `start_driver` will " +#~ "use `flwr.server.SimpleClientManager`." +#~ msgstr "" + +#~ msgid "The Driver object to use." +#~ msgstr "" + +#~ msgid "Starting a driver that connects to an insecure server:" +#~ msgstr "" + +#~ msgid "Starting a driver that connects to an SSL-enabled server:" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`run_simulation_from_cli " +#~ "`\\ \\(\\)" +#~ msgstr "" + +#~ msgid "Run Simulation Engine from the CLI." +#~ msgstr "" + +#~ msgid "run\\_simulation\\_from\\_cli" +#~ msgstr "" + +#~ msgid "" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with MXNet to train a Sequential " +#~ "model on MNIST." +#~ msgstr "" + +#~ msgid "Quickstart MXNet" +#~ msgstr "" + +#~ msgid "" +#~ "MXNet is no longer maintained and " +#~ "has been moved into `Attic " +#~ "`_. As a " +#~ "result, we would encourage you to " +#~ "use other ML frameworks alongside " +#~ "Flower, for example, PyTorch. This " +#~ "tutorial might be removed in future " +#~ "versions of Flower." +#~ msgstr "" + +#~ msgid "" +#~ "In this tutorial, we will learn " +#~ "how to train a :code:`Sequential` model" +#~ " on MNIST using Flower and MXNet." +#~ msgstr "" + +#~ msgid "Since we want to use MXNet, let's go ahead and install it:" +#~ msgstr "" + +#~ msgid "" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. Our training " +#~ "procedure and network architecture are " +#~ "based on MXNet´s `Hand-written Digit " +#~ "Recognition tutorial " +#~ "`_." +#~ msgstr "" + +#~ msgid "" +#~ "In a file called :code:`client.py`, " +#~ "import Flower and MXNet related " +#~ "packages:" +#~ msgstr "" + +#~ msgid "In addition, define the device allocation in MXNet with:" +#~ msgstr "" + +#~ msgid "" +#~ "We use MXNet to load MNIST, a " +#~ "popular image classification dataset of " +#~ "handwritten digits for machine learning. " +#~ "The MXNet utility :code:`mx.test_utils.get_mnist()`" +#~ " downloads the training and test " +#~ "data." +#~ msgstr "" + +#~ msgid "" +#~ "Define the training and loss with " +#~ "MXNet. We train the model by " +#~ "looping over the dataset, measure the" +#~ " corresponding loss, and optimize it." +#~ msgstr "" + +#~ msgid "" +#~ "Next, we define the validation of " +#~ "our machine learning model. We loop " +#~ "over the test set and measure both" +#~ " loss and accuracy on the test " +#~ "set." +#~ msgstr "" + +#~ msgid "" +#~ "After defining the training and testing" +#~ " of a MXNet machine learning model," +#~ " we use these functions to implement" +#~ " a Flower client." +#~ msgstr "" + +#~ msgid "Our Flower clients will use a simple :code:`Sequential` model:" +#~ msgstr "" + +#~ msgid "" +#~ "After loading the dataset with " +#~ ":code:`load_data()` we perform one forward " +#~ "propagation to initialize the model and" +#~ " model parameters with :code:`model(init)`. " +#~ "Next, we implement a Flower client." +#~ msgstr "" + +#~ msgid "" +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses MXNet." +#~ " Implementing :code:`NumPyClient` usually means" +#~ " defining the following methods " +#~ "(:code:`set_parameters` is optional though):" +#~ msgstr "" + +#~ msgid "They can be implemented in the following way:" +#~ msgstr "" + +#~ msgid "" +#~ "We can now create an instance of" +#~ " our class :code:`MNISTClient` and add " +#~ "one line to actually run this " +#~ "client:" +#~ msgstr "" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()` or " +#~ ":code:`fl.client.start_numpy_client()`. The string " +#~ ":code:`\"0.0.0.0:8080\"` tells the client " +#~ "which server to connect to. In our" +#~ " case we can run the server and" +#~ " the client on the same machine, " +#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" +#~ " we run a truly federated workload" +#~ " with the server and clients running" +#~ " on different machines, all that " +#~ "needs to change is the " +#~ ":code:`server_address` we pass to the " +#~ "client." +#~ msgstr "" + +#~ msgid "" +#~ "With both client and server ready, " +#~ "we can now run everything and see" +#~ " federated learning in action. Federated" +#~ " learning systems usually have a " +#~ "server and multiple clients. We " +#~ "therefore have to start the server " +#~ "first:" +#~ msgstr "" + +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this example can " +#~ "be found in :code:`examples/quickstart-mxnet`." +#~ msgstr "" + +#~ msgid "Sets the parameters of a :code:`sklean` LogisticRegression model" +#~ msgstr "" + +#~ msgid ":code:`load_mnist()`" +#~ msgstr "" + +#~ msgid "Loads the MNIST dataset using OpenML" +#~ msgstr "" + +#~ msgid ":code:`shuffle()`" +#~ msgstr "" + +#~ msgid "Shuffles data and its label" +#~ msgstr "" + +#~ msgid ":code:`partition()`" +#~ msgstr "" + +#~ msgid "Splits datasets into a number of partitions" +#~ msgstr "" + +#~ msgid "" +#~ "We load the MNIST dataset from " +#~ "`OpenML " +#~ "`_, a" +#~ " popular image classification dataset of" +#~ " handwritten digits for machine learning." +#~ " The utility :code:`utils.load_mnist()` downloads" +#~ " the training and test data. The " +#~ "training set is split afterwards into" +#~ " 10 partitions with :code:`utils.partition()`." +#~ msgstr "" + +#~ msgid "" +#~ "The number of federated learning rounds" +#~ " is set in :code:`fit_round()` and " +#~ "the evaluation is defined in " +#~ ":code:`get_evaluate_fn()`. The evaluation function" +#~ " is called after each federated " +#~ "learning round and gives you information" +#~ " about loss and accuracy." +#~ msgstr "" + +#~ msgid "Let's get stated!" +#~ msgstr "" + +#~ msgid "" +#~ "We now have a list of ten " +#~ "training sets and ten validation sets" +#~ " (``trainloaders`` and ``valloaders``) " +#~ "representing the data of ten different" +#~ " organizations. Each ``trainloader``/``valloader`` " +#~ "pair contains 4500 training examples and" +#~ " 500 validation examples. There's also " +#~ "a single ``testloader`` (we did not " +#~ "split the test set). Again, this " +#~ "is only necessary for building research" +#~ " or educational systems, actual federated" +#~ " learning systems have their data " +#~ "naturally distributed across multiple " +#~ "partitions." +#~ msgstr "" + +#~ msgid "|2b5c62c529f6416f840c594cce062fbb|" +#~ msgstr "" + +#~ msgid "|90b334680cb7467d9a04d39b8e8dca9f|" +#~ msgstr "" + +#~ msgid "|65764ceee89f4335bfd93fd0b115e831|" +#~ msgstr "" + +#~ msgid "|d97319ec28bb407ea0ab9705e38f3bcf|" +#~ msgstr "" + +#~ msgid "|11e95ac83a8548d8b3505b4663187d07|" +#~ msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:550 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:948 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:729 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:715 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:369 -msgid "" -"Before you continue, make sure to join the Flower community on Slack: " -"`Join Slack `__" -msgstr "" +#~ msgid "|1dab2f3a23674abc8a6731f20fa10730|" +#~ msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:552 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:950 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:731 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:717 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:371 -msgid "" -"There's a dedicated ``#questions`` channel if you need help, but we'd " -"also love to hear who you are in ``#introductions``!" -msgstr "" +#~ msgid "|7f0ee162da38450788493a21627306f7|" +#~ msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:554 -msgid "" -"The `Flower Federated Learning Tutorial - Part 4 " -"`__ introduces ``Client``, the flexible API underlying " -"``NumPyClient``." -msgstr "" +#~ msgid "|296a1fb72c514b23b3d8905ff0ff98c6|" +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:9 -msgid "Customize the client" -msgstr "" +#~ msgid "|5b1408eec0d746cdb91162a9107b6089|" +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:11 -msgid "" -"Welcome to the fourth part of the Flower federated learning tutorial. In " -"the previous parts of this tutorial, we introduced federated learning " -"with PyTorch and Flower (`part 1 `__), we learned how " -"strategies can be used to customize the execution on both the server and " -"the clients (`part 2 `__), and we built our own " -"custom strategy from scratch (`part 3 `__)." -msgstr "" +#~ msgid "|aef19f4b122c4e8d9f4c57f99bcd5dd2|" +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:14 -msgid "" -"In this notebook, we revisit ``NumPyClient`` and introduce a new " -"baseclass for building clients, simply named ``Client``. In previous " -"parts of this tutorial, we've based our client on ``NumPyClient``, a " -"convenience class which makes it easy to work with machine learning " -"libraries that have good NumPy interoperability. With ``Client``, we gain" -" a lot of flexibility that we didn't have before, but we'll also have to " -"do a few things the we didn't have to do before." -msgstr "" +#~ msgid "|2881a86d8fc54ba29d96b29fc2819f4a|" +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:18 -msgid "" -"Let's go deeper and see what it takes to move from ``NumPyClient`` to " -"``Client``!" -msgstr "" +#~ msgid "|ec1fe880237247e0975f52766775ab84|" +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:30 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:29 -msgid "Step 0: Preparation" -msgstr "" +#~ msgid "|9fdf048ed58d4467b2718cdf4aaf1ec3|" +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:117 -msgid "" -"Let's now load the CIFAR-10 training and test set, partition them into " -"ten smaller datasets (each split into training and validation set), and " -"wrap everything in their own ``DataLoader``." -msgstr "" +#~ msgid "|ff726bc5505e432388ee2fdd6ef420b9|" +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:259 -msgid "Step 1: Revisiting NumPyClient" -msgstr "" +#~ msgid "" +#~ "Currently, Flower provides two images, a" +#~ " ``base`` image and a ``superlink`` " +#~ "image. The base image, as the name" +#~ " suggests, contains basic dependencies that" +#~ " the SuperLink needs. This includes " +#~ "system dependencies, Python and Python " +#~ "tools. The SuperLink image is based " +#~ "on the base image, but it " +#~ "additionally installs the SuperLink using " +#~ "``pip``." +#~ msgstr "" +#~ "Atualmente, Flower fornece duas imagens, " +#~ "uma imagem base e uma imagem de" +#~ " servidor. Também haverá uma imagem " +#~ "de cliente em breve. A imagem " +#~ "base, como o nome sugere, contém " +#~ "dependências básicas que tanto o " +#~ "servidor quanto o cliente precisam. Isso" +#~ " inclui dependências do sistema, Python " +#~ "e ferramentas Python. A imagem do " +#~ "servidor é baseada na imagem base, " +#~ "mas também instala o servidor Flower " +#~ "usando ``pip```." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:261 -msgid "" -"So far, we've implemented our client by subclassing " -"``flwr.client.NumPyClient``. The three methods we implemented are " -"``get_parameters``, ``fit``, and ``evaluate``. Finally, we wrap the " -"creation of instances of this class in a function called ``client_fn``:" -msgstr "" +#~ msgid "``3.11``" +#~ msgstr "``3.11``" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:309 -msgid "" -"We've seen this before, there's nothing new so far. The only *tiny* " -"difference compared to the previous notebook is naming, we've changed " -"``FlowerClient`` to ``FlowerNumPyClient`` and ``client_fn`` to " -"``numpyclient_fn``. Let's run it to see the output we get:" -msgstr "" +#~ msgid "Defaults to ``22.04``." +#~ msgstr "Como padrão ``22.04``." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:339 -msgid "" -"This works as expected, two clients are training for three rounds of " -"federated learning." -msgstr "" +#~ msgid "Building the SuperLink image" +#~ msgstr "Construindo a imagem do servidor" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:341 -msgid "" -"Let's dive a little bit deeper and discuss how Flower executes this " -"simulation. Whenever a client is selected to do some work, " -"``start_simulation`` calls the function ``numpyclient_fn`` to create an " -"instance of our ``FlowerNumPyClient`` (along with loading the model and " -"the data)." -msgstr "" +#~ msgid "Defaults to ``flwr/base``." +#~ msgstr "Pré-definido para ``flwr/server``." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:343 -msgid "" -"But here's the perhaps surprising part: Flower doesn't actually use the " -"``FlowerNumPyClient`` object directly. Instead, it wraps the object to " -"makes it look like a subclass of ``flwr.client.Client``, not " -"``flwr.client.NumPyClient``. In fact, the Flower core framework doesn't " -"know how to handle ``NumPyClient``'s, it only knows how to handle " -"``Client``'s. ``NumPyClient`` is just a convenience abstraction built on " -"top of ``Client``." -msgstr "" +#~ msgid "The Python version of the base image." +#~ msgstr "O nome do repositório da imagem base." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:345 -msgid "" -"Instead of building on top of ``NumPyClient``, we can directly build on " -"top of ``Client``." -msgstr "" +#~ msgid "Defaults to ``py3.11``." +#~ msgstr "Como padrão ``22.04``." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:357 -msgid "Step 2: Moving from ``NumPyClient`` to ``Client``" -msgstr "" +#~ msgid "Defaults to ``ubuntu22.04``." +#~ msgstr "Pré-definido para ``py3.11-ubuntu22.04``." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:359 -msgid "" -"Let's try to do the same thing using ``Client`` instead of " -"``NumPyClient``." -msgstr "" +#~ msgid "The PyPI package to install." +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:465 -msgid "" -"Before we discuss the code in more detail, let's try to run it! Gotta " -"make sure our new ``Client``-based client works, right?" -msgstr "" +#~ msgid "Defaults to ``flwr``." +#~ msgstr "Pré-definido para ``flwr/server``." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:490 -msgid "" -"That's it, we're now using ``Client``. It probably looks similar to what " -"we've done with ``NumPyClient``. So what's the difference?" -msgstr "" +#~ msgid "" +#~ "The name of image is ``flwr_superlink``" +#~ " and the tag ``0.1.0``. Remember that" +#~ " the build arguments as well as " +#~ "the name and tag can be adapted" +#~ " to your needs. These values serve" +#~ " as examples only." +#~ msgstr "" +#~ "O nome da imagem é ``flwr_server`` " +#~ "e a tag ``0.1.0``. Lembre-se que" +#~ " os argumentos de compilação, bem " +#~ "como o nome e a tag podem " +#~ "ser adaptados às suas necessidades. " +#~ "Esses valores servem apenas como " +#~ "exemplos." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:492 -msgid "" -"First of all, it's more code. But why? The difference comes from the fact" -" that ``Client`` expects us to take care of parameter serialization and " -"deserialization. For Flower to be able to send parameters over the " -"network, it eventually needs to turn these parameters into ``bytes``. " -"Turning parameters (e.g., NumPy ``ndarray``'s) into raw bytes is called " -"serialization. Turning raw bytes into something more useful (like NumPy " -"``ndarray``'s) is called deserialization. Flower needs to do both: it " -"needs to serialize parameters on the server-side and send them to the " -"client, the client needs to deserialize them to use them for local " -"training, and then serialize the updated parameters again to send them " -"back to the server, which (finally!) deserializes them again in order to " -"aggregate them with the updates received from other clients." -msgstr "" +#~ msgid "Creating New Messages" +#~ msgstr "Criando novas mensagens" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:495 -msgid "" -"The only *real* difference between Client and NumPyClient is that " -"NumPyClient takes care of serialization and deserialization for you. It " -"can do so because it expects you to return parameters as NumPy ndarray's," -" and it knows how to handle these. This makes working with machine " -"learning libraries that have good NumPy support (most of them) a breeze." -msgstr "" +#~ msgid "" +#~ "This is a simple guide for " +#~ "creating a new type of message " +#~ "between the server and clients in " +#~ "Flower." +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:497 -msgid "" -"In terms of API, there's one major difference: all methods in Client take" -" exactly one argument (e.g., ``FitIns`` in ``Client.fit``) and return " -"exactly one value (e.g., ``FitRes`` in ``Client.fit``). The methods in " -"``NumPyClient`` on the other hand have multiple arguments (e.g., " -"``parameters`` and ``config`` in ``NumPyClient.fit``) and multiple return" -" values (e.g., ``parameters``, ``num_example``, and ``metrics`` in " -"``NumPyClient.fit``) if there are multiple things to handle. These " -"``*Ins`` and ``*Res`` objects in ``Client`` wrap all the individual " -"values you're used to from ``NumPyClient``." -msgstr "" +#~ msgid "" +#~ "Let's suppose we have the following " +#~ "example functions in :code:`server.py` and " +#~ ":code:`numpy_client.py`..." +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:510 -msgid "Step 3: Custom serialization" -msgstr "" +#~ msgid "Server's side:" +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:512 -msgid "" -"Here we will explore how to implement custom serialization with a simple " -"example." -msgstr "" +#~ msgid "Client's side:" +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:514 -msgid "" -"But first what is serialization? Serialization is just the process of " -"converting an object into raw bytes, and equally as important, " -"deserialization is the process of converting raw bytes back into an " -"object. This is very useful for network communication. Indeed, without " -"serialization, you could not just a Python object through the internet." -msgstr "" +#~ msgid "" +#~ "Let's now see what we need to " +#~ "implement in order to get this " +#~ "simple function between the server and" +#~ " client to work!" +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:516 -msgid "" -"Federated Learning relies heavily on internet communication for training " -"by sending Python objects back and forth between the clients and the " -"server. This means that serialization is an essential part of Federated " -"Learning." -msgstr "" +#~ msgid "Message Types for Protocol Buffers" +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:518 -msgid "" -"In the following section, we will write a basic example where instead of " -"sending a serialized version of our ``ndarray``\\ s containing our " -"parameters, we will first convert the ``ndarray`` into sparse matrices, " -"before sending them. This technique can be used to save bandwidth, as in " -"certain cases where the weights of a model are sparse (containing many 0 " -"entries), converting them to a sparse matrix can greatly improve their " -"bytesize." -msgstr "" +#~ msgid "" +#~ "The first thing we need to do " +#~ "is to define a message type for" +#~ " the RPC system in :code:`transport.proto`." +#~ " Note that we have to do it " +#~ "for both the request and response " +#~ "messages. For more details on the " +#~ "syntax of proto3, please see the " +#~ "`official documentation `_." +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:521 -msgid "Our custom serialization/deserialization functions" -msgstr "" +#~ msgid "Within the :code:`ServerMessage` block:" +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:523 -msgid "" -"This is where the real serialization/deserialization will happen, " -"especially in ``ndarray_to_sparse_bytes`` for serialization and " -"``sparse_bytes_to_ndarray`` for deserialization." -msgstr "" +#~ msgid "Within the ClientMessage block:" +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:525 -msgid "" -"Note that we imported the ``scipy.sparse`` library in order to convert " -"our arrays." -msgstr "" +#~ msgid "" +#~ "Make sure to also add a field " +#~ "of the newly created message type " +#~ "in :code:`oneof msg`." +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:613 -msgid "Client-side" -msgstr "" +#~ msgid "Once that is done, we will compile the file with:" +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:615 -msgid "" -"To be able to serialize our ``ndarray``\\ s into sparse parameters, we " -"will just have to call our custom functions in our " -"``flwr.client.Client``." -msgstr "" +#~ msgid "If it compiles successfully, you should see the following message:" +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:617 -msgid "" -"Indeed, in ``get_parameters`` we need to serialize the parameters we got " -"from our network using our custom ``ndarrays_to_sparse_parameters`` " -"defined above." -msgstr "" +#~ msgid "Serialization and Deserialization Functions" +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:619 -msgid "" -"In ``fit``, we first need to deserialize the parameters coming from the " -"server using our custom ``sparse_parameters_to_ndarrays`` and then we " -"need to serialize our local results with " -"``ndarrays_to_sparse_parameters``." -msgstr "" +#~ msgid "" +#~ "Our next step is to add functions" +#~ " to serialize and deserialize Python " +#~ "datatypes to or from our defined " +#~ "RPC message types. You should add " +#~ "these functions in :code:`serde.py`." +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:621 -msgid "" -"In ``evaluate``, we will only need to deserialize the global parameters " -"with our custom function." -msgstr "" +#~ msgid "The four functions:" +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:725 -msgid "Server-side" -msgstr "" +#~ msgid "Sending the Message from the Server" +#~ msgstr "" + +#~ msgid "" +#~ "Now write the request function in " +#~ "your Client Proxy class (e.g., " +#~ ":code:`grpc_client_proxy.py`) using the serde " +#~ "functions you just created:" +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:727 -msgid "" -"For this example, we will just use ``FedAvg`` as a strategy. To change " -"the serialization and deserialization here, we only need to reimplement " -"the ``evaluate`` and ``aggregate_fit`` functions of ``FedAvg``. The other" -" functions of the strategy will be inherited from the super class " -"``FedAvg``." -msgstr "" +#~ msgid "Receiving the Message by the Client" +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:729 -msgid "As you can see only one line as change in ``evaluate``:" -msgstr "" +#~ msgid "" +#~ "Last step! Modify the code in " +#~ ":code:`message_handler.py` to check the field" +#~ " of your message and call the " +#~ ":code:`example_response` function. Remember to " +#~ "use the serde functions!" +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:735 -msgid "" -"And for ``aggregate_fit``, we will first deserialize every result we " -"received:" -msgstr "" +#~ msgid "Within the handle function:" +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:744 -msgid "And then serialize the aggregated result:" -msgstr "" +#~ msgid "And add a new function:" +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:903 -msgid "We can now run our custom serialization example!" -msgstr "" +#~ msgid "Hopefully, when you run your program you will get the intended result!" +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:934 -msgid "" -"In this part of the tutorial, we've seen how we can build clients by " -"subclassing either ``NumPyClient`` or ``Client``. ``NumPyClient`` is a " -"convenience abstraction that makes it easier to work with machine " -"learning libraries that have good NumPy interoperability. ``Client`` is a" -" more flexible abstraction that allows us to do things that are not " -"possible in ``NumPyClient``. In order to do so, it requires us to handle " -"parameter serialization and deserialization ourselves." -msgstr "" +#~ msgid "" +#~ "The simplest way to get started " +#~ "with Flower is by using the " +#~ "pre-made Docker images, which you can" +#~ " find on `Docker Hub " +#~ "`__." +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:952 -msgid "" -"This is the final part of the Flower tutorial (for now!), " -"congratulations! You're now well equipped to understand the rest of the " -"documentation. There are many topics we didn't cover in the tutorial, we " -"recommend the following resources:" -msgstr "" +#~ msgid "" +#~ "If you want to persist the state" +#~ " of the SuperLink on your host " +#~ "system, all you need to do is " +#~ "specify a path where you want to" +#~ " save the file on your host " +#~ "system and a name for the database" +#~ " file. In the example below, we " +#~ "tell Docker via the flag ``--volume``" +#~ " to mount the user's home directory" +#~ " (``~/`` on your host) into the " +#~ "``/app/`` directory of the container. " +#~ "Furthermore, we use the flag " +#~ "``--database`` to specify the name of" +#~ " the database file." +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:954 -msgid "`Read Flower Docs `__" -msgstr "" +#~ msgid "" +#~ "As soon as the SuperLink starts, " +#~ "the file ``state.db`` is created in " +#~ "the user's home directory on your " +#~ "host system. If the file already " +#~ "exists, the SuperLink tries to restore" +#~ " the state from the file. To " +#~ "start the SuperLink with an empty " +#~ "database, simply remove the ``state.db`` " +#~ "file." +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:955 -msgid "" -"`Check out Flower Code Examples " -"`__" -msgstr "" +#~ msgid "" +#~ "Assuming all files we need are in" +#~ " the local ``certificates`` directory, we" +#~ " can use the flag ``--volume`` to " +#~ "mount the local directory into the " +#~ "``/app/`` directory of the container. " +#~ "This allows the SuperLink to access " +#~ "the files within the container. Finally," +#~ " we pass the names of the " +#~ "certificates to the SuperLink with the" +#~ " ``--certificates`` flag." +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:956 -msgid "" -"`Use Flower Baselines for your research " -"`__" -msgstr "" +#~ msgid "" +#~ "``--server 192.168.1.100:9092``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Fleet" +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:957 -msgid "" -"`Watch Flower Summit 2023 videos `__" -msgstr "" +#~ msgid "" +#~ "Assuming the certificate already exists " +#~ "locally, we can use the flag " +#~ "``--volume`` to mount the local " +#~ "certificate into the container's ``/app/`` " +#~ "directory. This allows the SuperNode to" +#~ " access the certificate within the " +#~ "container. Use the ``--certificates`` flag " +#~ "when starting the container." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:9 -msgid "Get started with Flower" -msgstr "" +#~ msgid "" +#~ "``--server 192.168.1.100:9091``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Driver" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:11 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:11 -msgid "Welcome to the Flower federated learning tutorial!" -msgstr "" +#~ msgid "" +#~ "Assuming the certificate already exists " +#~ "locally, we can use the flag " +#~ "``--volume`` to mount the local " +#~ "certificate into the container's ``/app/`` " +#~ "directory. This allows the ServerApp to" +#~ " access the certificate within the " +#~ "container. Use the ``--certificates`` flag " +#~ "when starting the container." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:13 -msgid "" -"In this notebook, we'll build a federated learning system using Flower, " -"`Flower Datasets `__ and PyTorch. In " -"part 1, we use PyTorch for the model training pipeline and data loading. " -"In part 2, we continue to federate the PyTorch-based pipeline using " -"Flower." -msgstr "" +#~ msgid "" +#~ "If you want to use a different " +#~ "version of Flower, for example Flower" +#~ " nightly, you can do so by " +#~ "changing the tag. All available versions" +#~ " are on `Docker Hub " +#~ "`__." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:17 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:19 -msgid "Let's get started!" -msgstr "" +#~ msgid "" +#~ "Here's another example to start with " +#~ "HTTPS. Use the ``--certificates`` command " +#~ "line argument to pass paths to (CA" +#~ " certificate, server certificate, and " +#~ "server private key)." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:31 -msgid "" -"Before we begin with any actual code, let's make sure that we have " -"everything we need." -msgstr "" +#~ msgid ":py:obj:`run_driver_api `\\ \\(\\)" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:45 -msgid "" -"Next, we install the necessary packages for PyTorch (``torch`` and " -"``torchvision``), Flower Datasets (``flwr-datasets``) and Flower " -"(``flwr``):" -msgstr "" +#~ msgid "Run Flower server (Driver API)." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:105 -msgid "" -"It is possible to switch to a runtime that has GPU acceleration enabled " -"(on Google Colab: ``Runtime > Change runtime type > Hardware accelerator:" -" GPU > Save``). Note, however, that Google Colab is not always able to " -"offer GPU acceleration. If you see an error related to GPU availability " -"in one of the following sections, consider switching back to CPU-based " -"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " -"has GPU acceleration enabled, you should see the output ``Training on " -"cuda``, otherwise it'll say ``Training on cpu``." -msgstr "" +#~ msgid ":py:obj:`run_fleet_api `\\ \\(\\)" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:118 -msgid "Loading the data" -msgstr "" +#~ msgid "Run Flower server (Fleet API)." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:120 -msgid "" -"Federated learning can be applied to many different types of tasks across" -" different domains. In this tutorial, we introduce federated learning by " -"training a simple convolutional neural network (CNN) on the popular " -"CIFAR-10 dataset. CIFAR-10 can be used to train image classifiers that " -"distinguish between images from ten different classes: 'airplane', " -"'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', and " -"'truck'." -msgstr "" +#~ msgid "Unreleased" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:131 -msgid "" -"We simulate having multiple datasets from multiple organizations (also " -"called the \"cross-silo\" setting in federated learning) by splitting the" -" original CIFAR-10 dataset into multiple partitions. Each partition will " -"represent the data from a single organization. We're doing this purely " -"for experimentation purposes, in the real world there's no need for data " -"splitting because each organization already has their own data (so the " -"data is naturally partitioned)." -msgstr "" +#~ msgid "|d8bf04f23d9b46d8a23cc6f4887d7873|" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:133 -msgid "" -"Each organization will act as a client in the federated learning system. " -"So having ten organizations participate in a federation means having ten " -"clients connected to the federated learning server." -msgstr "" +#~ msgid "|5aa1711387d74d0f8b9c499e1a51627e|" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:144 -msgid "" -"Let's now create the Federated Dataset abstraction that from ``flwr-" -"datasets`` that partitions the CIFAR-10. We will create small training " -"and test set for each edge device and wrap each of them into a PyTorch " -"``DataLoader``:" -msgstr "" +#~ msgid "|2bc8e069228d4873804061ff4a95048c|" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:198 -msgid "" -"We now have a list of ten training sets and ten validation sets " -"(``trainloaders`` and ``valloaders``) representing the data of ten " -"different organizations. Each ``trainloader``/``valloader`` pair contains" -" 4000 training examples and 1000 validation examples. There's also a " -"single ``testloader`` (we did not split the test set). Again, this is " -"only necessary for building research or educational systems, actual " -"federated learning systems have their data naturally distributed across " -"multiple partitions." -msgstr "" +#~ msgid "|c258488766324dc9a6807f0e7c4fd5f4|" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:201 -msgid "" -"Let's take a look at the first batch of images and labels in the first " -"training set (i.e., ``trainloaders[0]``) before we move on:" -msgstr "" +#~ msgid "|d5f962c3f4ec48529efda980868c14b0|" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:240 -msgid "" -"The output above shows a random batch of images from the first " -"``trainloader`` in our list of ten ``trainloaders``. It also prints the " -"labels associated with each image (i.e., one of the ten possible labels " -"we've seen above). If you run the cell again, you should see another " -"batch of images." -msgstr "" +#~ msgid "|a5eccea18d4c43a68b54b65043cabef8|" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:252 -msgid "Step 1: Centralized Training with PyTorch" -msgstr "" +#~ msgid "|f17662f7df2d42f68cac70a1fdeda8a7|" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:263 -msgid "" -"Next, we're going to use PyTorch to define a simple convolutional neural " -"network. This introduction assumes basic familiarity with PyTorch, so it " -"doesn't cover the PyTorch-related aspects in full detail. If you want to " -"dive deeper into PyTorch, we recommend `DEEP LEARNING WITH PYTORCH: A 60 " -"MINUTE BLITZ " -"`__." -msgstr "" +#~ msgid "|241fc906441a4f038c625a19d30d01b2|" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:275 -msgid "Defining the model" -msgstr "" +#~ msgid "|0aa5aa05810b44b6a835cecce28f3137|" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:277 -msgid "" -"We use the simple CNN described in the `PyTorch tutorial " -"`__:" -msgstr "" +#~ msgid "|c742940dd4bf4de09d8d0d5e8d179638|" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:314 -msgid "Let's continue with the usual training and test functions:" -msgstr "" +#~ msgid "|1f169ab4601a47e1a226f1628f4ebddb|" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:374 -msgid "Training the model" -msgstr "" +#~ msgid "|12cfa9cde14440ecb8c8f6c1d7185bec|" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:376 -msgid "" -"We now have all the basic building blocks we need: a dataset, a model, a " -"training function, and a test function. Let's put them together to train " -"the model on the dataset of one of our organizations " -"(``trainloaders[0]``). This simulates the reality of most machine " -"learning projects today: each organization has their own data and trains " -"models only on this internal data:" -msgstr "" +#~ msgid "|72939caf6e294b0986fee6dde96614d7|" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:406 -msgid "" -"Training the simple CNN on our CIFAR-10 split for 5 epochs should result " -"in a test set accuracy of about 41%, which is not good, but at the same " -"time, it doesn't really matter for the purposes of this tutorial. The " -"intent was just to show a simplistic centralized training pipeline that " -"sets the stage for what comes next - federated learning!" -msgstr "" +#~ msgid "|83a8daee45da4a98b8d6f24ae098fc50|" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:418 -msgid "Step 2: Federated Learning with Flower" -msgstr "" +#~ msgid "Edge Client Engine" +#~ msgstr "Engine do Edge Client" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:420 -msgid "" -"Step 1 demonstrated a simple centralized training pipeline. All data was " -"in one place (i.e., a single ``trainloader`` and a single ``valloader``)." -" Next, we'll simulate a situation where we have multiple datasets in " -"multiple organizations and where we train a model over these " -"organizations using federated learning." -msgstr "" +#~ msgid "" +#~ "`Flower `_ core framework " +#~ "architecture with Edge Client Engine" +#~ msgstr "" +#~ "`Flower `_ arquitetura principal" +#~ " do framework com Engine do Edge " +#~ "Client" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:432 -msgid "Updating model parameters" -msgstr "" +#~ msgid "Virtual Client Engine" +#~ msgstr "Engine do Virtual Client" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:434 -msgid "" -"In federated learning, the server sends the global model parameters to " -"the client, and the client updates the local model with the parameters " -"received from the server. It then trains the model on the local data " -"(which changes the model parameters locally) and sends the " -"updated/changed model parameters back to the server (or, alternatively, " -"it sends just the gradients back to the server, not the full model " -"parameters)." -msgstr "" +#~ msgid "" +#~ "`Flower `_ core framework " +#~ "architecture with Virtual Client Engine" +#~ msgstr "" +#~ "`Flower `_ arquitetura principal" +#~ " do framework com Engine do Virtual" +#~ " Client" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:436 -msgid "" -"We need two helper functions to update the local model with parameters " -"received from the server and to get the updated model parameters from the" -" local model: ``set_parameters`` and ``get_parameters``. The following " -"two functions do just that for the PyTorch model above." -msgstr "" +#~ msgid "Virtual Client Engine and Edge Client Engine in the same workload" +#~ msgstr "" +#~ "Engine do Virtual Client e do Edge" +#~ " Client no mesma carga de trabalho" +#~ " (workload)" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:438 -msgid "" -"The details of how this works are not really important here (feel free to" -" consult the PyTorch documentation if you want to learn more). In " -"essence, we use ``state_dict`` to access PyTorch model parameter tensors." -" The parameter tensors are then converted to/from a list of NumPy " -"ndarray's (which Flower knows how to serialize/deserialize):" -msgstr "" +#~ msgid "" +#~ "`Flower `_ core framework " +#~ "architecture with both Virtual Client " +#~ "Engine and Edge Client Engine" +#~ msgstr "" +#~ "`Flower `_ arquitetura principal" +#~ " do framework com ambas engines do" +#~ " Virtual Client e do Edge Client" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:466 -msgid "Implementing a Flower client" -msgstr "" +#~ msgid "Clone the flower repository." +#~ msgstr "Clone o repositório do flower." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:468 -msgid "" -"With that out of the way, let's move on to the interesting part. " -"Federated learning systems consist of a server and multiple clients. In " -"Flower, we create clients by implementing subclasses of " -"``flwr.client.Client`` or ``flwr.client.NumPyClient``. We use " -"``NumPyClient`` in this tutorial because it is easier to implement and " -"requires us to write less boilerplate." -msgstr "" +#~ msgid "" +#~ "Please follow the first section on " +#~ ":doc:`Run Flower using Docker ` which " +#~ "covers this step in more detail." +#~ msgstr "" +#~ "Por favor, siga a primeira seção " +#~ "em :doc:`Execute o Flower usando Docker" +#~ " `" +#~ " que cobre este passo em mais " +#~ "detalhes." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:470 -msgid "" -"To implement the Flower client, we create a subclass of " -"``flwr.client.NumPyClient`` and implement the three methods " -"``get_parameters``, ``fit``, and ``evaluate``:" -msgstr "" +#~ msgid "``22.04``" +#~ msgstr "``23.0.1``" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:472 -msgid "``get_parameters``: Return the current local model parameters" -msgstr "" +#~ msgid "``23.0.1``" +#~ msgstr "``23.0.1``" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:473 -msgid "" -"``fit``: Receive model parameters from the server, train the model " -"parameters on the local data, and return the (updated) model parameters " -"to the server" -msgstr "" +#~ msgid "``69.0.2``" +#~ msgstr "``69.0.2``" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:474 -msgid "" -"``evaluate``: Receive model parameters from the server, evaluate the " -"model parameters on the local data, and return the evaluation result to " -"the server" -msgstr "" +#~ msgid "``1.8.0``" +#~ msgstr "``1.7.0``" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:476 -msgid "" -"We mentioned that our clients will use the previously defined PyTorch " -"components for model training and evaluation. Let's see a simple Flower " -"client implementation that brings everything together:" -msgstr "" +#~ msgid "Building the SuperLink/SuperNode or ServerApp image" +#~ msgstr "Construindo a imagem do servidor" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:513 -msgid "" -"Our class ``FlowerClient`` defines how local training/evaluation will be " -"performed and allows Flower to call the local training/evaluation through" -" ``fit`` and ``evaluate``. Each instance of ``FlowerClient`` represents a" -" *single client* in our federated learning system. Federated learning " -"systems have multiple clients (otherwise, there's not much to federate), " -"so each client will be represented by its own instance of " -"``FlowerClient``. If we have, for example, three clients in our workload," -" then we'd have three instances of ``FlowerClient``. Flower calls " -"``FlowerClient.fit`` on the respective instance when the server selects a" -" particular client for training (and ``FlowerClient.evaluate`` for " -"evaluation)." -msgstr "" +#~ msgid "``1.8.0-py3.10-ubuntu22.04``" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:517 -msgid "Using the Virtual Client Engine" -msgstr "" +#~ msgid "" +#~ "The following example creates a " +#~ "SuperLink/SuperNode or ServerApp image with" +#~ " the official Flower base image:" +#~ msgstr "" +#~ "O exemplo a seguir cria uma imagem" +#~ " de servidor com a imagem base " +#~ "oficial do Flower py3.11-ubuntu22.04 e " +#~ "Flower 1.7.0:" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:519 -msgid "" -"In this notebook, we want to simulate a federated learning system with 10" -" clients on a single machine. This means that the server and all 10 " -"clients will live on a single machine and share resources such as CPU, " -"GPU, and memory. Having 10 clients would mean having 10 instances of " -"``FlowerClient`` in memory. Doing this on a single machine can quickly " -"exhaust the available memory resources, even if only a subset of these " -"clients participates in a single round of federated learning." -msgstr "" +#~ msgid "Trigger the CI for building the Docker images." +#~ msgstr "Versão da imagem Docker oficial do Ubuntu." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:521 -msgid "" -"In addition to the regular capabilities where server and clients run on " -"multiple machines, Flower, therefore, provides special simulation " -"capabilities that create ``FlowerClient`` instances only when they are " -"actually necessary for training or evaluation. To enable the Flower " -"framework to create clients when necessary, we need to implement a " -"function called ``client_fn`` that creates a ``FlowerClient`` instance on" -" demand. Flower calls ``client_fn`` whenever it needs an instance of one " -"particular client to call ``fit`` or ``evaluate`` (those instances are " -"usually discarded after use, so they should not keep any local state). " -"Clients are identified by a client ID, or short ``cid``. The ``cid`` can " -"be used, for example, to load different local data partitions for " -"different clients, as can be seen below:" -msgstr "" +#~ msgid "" +#~ "To trigger the workflow, a collaborator" +#~ " must create a ``workflow_dispatch`` event" +#~ " in the GitHub CI. This can be" +#~ " done either through the UI or " +#~ "via the GitHub CLI. The event " +#~ "requires only one input, the Flower " +#~ "version, to be released." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:556 -msgid "Starting the training" -msgstr "" +#~ msgid "**Via the UI**" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:558 -msgid "" -"We now have the class ``FlowerClient`` which defines client-side " -"training/evaluation and ``client_fn`` which allows Flower to create " -"``FlowerClient`` instances whenever it needs to call ``fit`` or " -"``evaluate`` on one particular client. The last step is to start the " -"actual simulation using ``flwr.simulation.start_simulation``." -msgstr "" +#~ msgid "" +#~ "Go to the ``Build docker images`` " +#~ "workflow `page " +#~ "`_." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:560 -msgid "" -"The function ``start_simulation`` accepts a number of arguments, amongst " -"them the ``client_fn`` used to create ``FlowerClient`` instances, the " -"number of clients to simulate (``num_clients``), the number of federated " -"learning rounds (``num_rounds``), and the strategy. The strategy " -"encapsulates the federated learning approach/algorithm, for example, " -"*Federated Averaging* (FedAvg)." -msgstr "" +#~ msgid "" +#~ "Click on the ``Run workflow`` button " +#~ "and type the new version of Flower" +#~ " in the ``Version of Flower`` input" +#~ " field." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:562 -msgid "" -"Flower has a number of built-in strategies, but we can also use our own " -"strategy implementations to customize nearly all aspects of the federated" -" learning approach. For this example, we use the built-in ``FedAvg`` " -"implementation and customize it using a few basic parameters. The last " -"step is the actual call to ``start_simulation`` which - you guessed it - " -"starts the simulation:" -msgstr "" +#~ msgid "Click on the **green** ``Run workflow`` button." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:608 -msgid "Behind the scenes" -msgstr "" +#~ msgid "**Via the GitHub CI**" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:610 -msgid "So how does this work? How does Flower execute this simulation?" -msgstr "" +#~ msgid "" +#~ "Make sure you are logged in via" +#~ " ``gh auth login`` and that the " +#~ "current working directory is the root" +#~ " of the Flower repository." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:612 -#, python-format -msgid "" -"When we call ``start_simulation``, we tell Flower that there are 10 " -"clients (``num_clients=10``). Flower then goes ahead an asks the " -"``FedAvg`` strategy to select clients. ``FedAvg`` knows that it should " -"select 100% of the available clients (``fraction_fit=1.0``), so it goes " -"ahead and selects 10 random clients (i.e., 100% of 10)." -msgstr "" +#~ msgid "" +#~ "Trigger the workflow via ``gh workflow" +#~ " run docker-images.yml -f flwr-" +#~ "version=``." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:614 -msgid "" -"Flower then asks the selected 10 clients to train the model. When the " -"server receives the model parameter updates from the clients, it hands " -"those updates over to the strategy (*FedAvg*) for aggregation. The " -"strategy aggregates those updates and returns the new global model, which" -" then gets used in the next round of federated learning." -msgstr "" +#~ msgid "Preliminarities" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:626 -msgid "Where's the accuracy?" -msgstr "" +#~ msgid "Example: JAX - Run JAX Federated" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:628 -msgid "" -"You may have noticed that all metrics except for ``losses_distributed`` " -"are empty. Where did the ``{\"accuracy\": float(accuracy)}`` go?" -msgstr "" +#~ msgid "" +#~ "\\small\n" +#~ "P[M(D_{1} \\in A)] \\leq e^{\\delta} P[M(D_{2} \\in A)] + \\delta" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:630 -msgid "" -"Flower can automatically aggregate losses returned by individual clients," -" but it cannot do the same for metrics in the generic metrics dictionary " -"(the one with the ``accuracy`` key). Metrics dictionaries can contain " -"very different kinds of metrics and even key/value pairs that are not " -"metrics at all, so the framework does not (and can not) know how to " -"handle these automatically." -msgstr "" +#~ msgid "" +#~ "The following command can be used " +#~ "to verify if Flower was successfully " +#~ "installed. If everything worked, it " +#~ "should print the version of Flower " +#~ "to the command line::" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:632 -msgid "" -"As users, we need to tell the framework how to handle/aggregate these " -"custom metrics, and we do so by passing metric aggregation functions to " -"the strategy. The strategy will then call these functions whenever it " -"receives fit or evaluate metrics from clients. The two possible functions" -" are ``fit_metrics_aggregation_fn`` and " -"``evaluate_metrics_aggregation_fn``." -msgstr "" +#~ msgid ":doc:`How to run Flower using Docker `" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:634 -msgid "" -"Let's create a simple weighted averaging function to aggregate the " -"``accuracy`` metric we return from ``evaluate``:" -msgstr "" +#~ msgid "" +#~ "The simplest way to get started " +#~ "with Flower is by using the " +#~ "pre-made Docker images, which you can" +#~ " find on `Docker Hub " +#~ "`__. Supported " +#~ "architectures include ``amd64`` and " +#~ "``arm64v8``." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:660 -msgid "" -"The only thing left to do is to tell the strategy to call this function " -"whenever it receives evaluation metric dictionaries from the clients:" -msgstr "" +#~ msgid "Before you start, make sure that the Docker daemon is running:" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:697 -msgid "" -"We now have a full system that performs federated training and federated " -"evaluation. It uses the ``weighted_average`` function to aggregate custom" -" evaluation metrics and calculates a single ``accuracy`` metric across " -"all clients on the server side." -msgstr "" +#~ msgid "" +#~ "If you do not see the version " +#~ "of Docker but instead get an error" +#~ " saying that the command was not " +#~ "found, you will need to install " +#~ "Docker first. You can find installation" +#~ " instruction `here `_." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:699 -msgid "" -"The other two categories of metrics (``losses_centralized`` and " -"``metrics_centralized``) are still empty because they only apply when " -"centralized evaluation is being used. Part two of the Flower tutorial " -"will cover centralized evaluation." -msgstr "" +#~ msgid "" +#~ "On Linux, Docker commands require " +#~ "``sudo`` privilege. If you want to " +#~ "avoid using ``sudo``, you can follow " +#~ "the `Post-installation steps " +#~ "`_" +#~ " on the official Docker website." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:711 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:351 -msgid "Final remarks" -msgstr "" +#~ msgid "" +#~ "To ensure optimal performance and " +#~ "compatibility, the SuperLink, SuperNode and" +#~ " ServerApp image must have the same" +#~ " version when running together. This " +#~ "guarantees seamless integration and avoids " +#~ "potential conflicts or issues that may" +#~ " arise from using different versions." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:713 -msgid "" -"Congratulations, you just trained a convolutional neural network, " -"federated over 10 clients! With that, you understand the basics of " -"federated learning with Flower. The same approach you've seen can be used" -" with other machine learning frameworks (not just PyTorch) and tasks (not" -" just CIFAR-10 images classification), for example NLP with Hugging Face " -"Transformers or speech with SpeechBrain." -msgstr "" +#~ msgid "Flower SuperLink" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:715 -msgid "" -"In the next notebook, we're going to cover some more advanced concepts. " -"Want to customize your strategy? Initialize parameters on the server " -"side? Or evaluate the aggregated model on the server side? We'll cover " -"all this and more in the next tutorial." -msgstr "" +#~ msgid "Quickstart" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:733 -msgid "" -"The `Flower Federated Learning Tutorial - Part 2 " -"`__ goes into more depth about strategies and all " -"the advanced things you can build with them." -msgstr "" +#~ msgid "If you're looking to try out Flower, you can use the following command:" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:9 -msgid "Use a federated learning strategy" -msgstr "" +#~ msgid "" +#~ "The command pulls the Docker image " +#~ "with the tag ``1.8.0`` from Docker " +#~ "Hub. The tag specifies the Flower " +#~ "version. In this case, Flower 1.8.0. " +#~ "The ``--rm`` flag tells Docker to " +#~ "remove the container after it exits." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:11 -msgid "" -"Welcome to the next part of the federated learning tutorial. In previous " -"parts of this tutorial, we introduced federated learning with PyTorch and" -" Flower (`part 1 `__)." -msgstr "" +#~ msgid "" +#~ "By default, the Flower SuperLink keeps" +#~ " state in-memory. When using the " +#~ "Docker flag ``--rm``, the state is " +#~ "not persisted between container starts. " +#~ "We will show below how to save " +#~ "the state in a file on your " +#~ "host system." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:13 -msgid "" -"In this notebook, we'll begin to customize the federated learning system " -"we built in the introductory notebook (again, using `Flower " -"`__ and `PyTorch `__)." -msgstr "" +#~ msgid "" +#~ "The ``-p :`` flag tells " +#~ "Docker to map the ports " +#~ "``9091``/``9092`` of the host to " +#~ "``9091``/``9092`` of the container, allowing" +#~ " you to access the Driver API " +#~ "on ``http://localhost:9091`` and the Fleet " +#~ "API on ``http://localhost:9092``. Lastly, any" +#~ " flag that comes after the tag " +#~ "is passed to the Flower SuperLink. " +#~ "Here, we are passing the flag " +#~ "``--insecure``." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:17 -msgid "Let's move beyond FedAvg with Flower strategies!" -msgstr "" +#~ msgid "" +#~ "The ``--insecure`` flag enables insecure " +#~ "communication (using HTTP, not HTTPS) " +#~ "and should only be used for " +#~ "testing purposes. We strongly recommend " +#~ "enabling `SSL `__ when " +#~ "deploying to a production environment." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:309 -msgid "Strategy customization" -msgstr "" +#~ msgid "" +#~ "You can use ``--help`` to view all" +#~ " available flags that the SuperLink " +#~ "supports:" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:311 -msgid "" -"So far, everything should look familiar if you've worked through the " -"introductory notebook. With that, we're ready to introduce a number of " -"new features." -msgstr "" +#~ msgid "Mounting a volume to store the state on the host system" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:323 -msgid "Server-side parameter **initialization**" -msgstr "" +#~ msgid "" +#~ "If you want to persist the state" +#~ " of the SuperLink on your host " +#~ "system, all you need to do is " +#~ "specify a directory where you want " +#~ "to save the file on your host " +#~ "system and a name for the database" +#~ " file. By default, the SuperLink " +#~ "container runs with a non-root " +#~ "user called ``app`` with the user " +#~ "ID ``49999``. It is recommended to " +#~ "create new directory and change the " +#~ "user ID of the directory to " +#~ "``49999`` to ensure the mounted " +#~ "directory has the proper permissions. If" +#~ " you later want to delete the " +#~ "directory, you can change the user " +#~ "ID back to the current user ID " +#~ "by running ``sudo chown -R $USER:$(id" +#~ " -gn) state``." +#~ msgstr "" + +#~ msgid "" +#~ "In the example below, we create a" +#~ " new directory, change the user ID" +#~ " and tell Docker via the flag " +#~ "``--volume`` to mount the local " +#~ "``state`` directory into the ``/app/state``" +#~ " directory of the container. Furthermore," +#~ " we use the flag ``--database`` to" +#~ " specify the name of the database " +#~ "file." +#~ msgstr "" + +#~ msgid "" +#~ "As soon as the SuperLink starts, " +#~ "the file ``state.db`` is created in " +#~ "the ``state`` directory on your host " +#~ "system. If the file already exists, " +#~ "the SuperLink tries to restore the " +#~ "state from the file. To start the" +#~ " SuperLink with an empty database, " +#~ "simply remove the ``state.db`` file." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:325 -msgid "" -"Flower, by default, initializes the global model by asking one random " -"client for the initial parameters. In many cases, we want more control " -"over parameter initialization though. Flower therefore allows you to " -"directly pass the initial parameters to the Strategy:" -msgstr "" +#~ msgid "Enabling SSL for secure connections" +#~ msgstr "" + +#~ msgid "" +#~ "To enable SSL, you will need a " +#~ "PEM-encoded root certificate, a PEM-" +#~ "encoded private key and a PEM-" +#~ "encoded certificate chain." +#~ msgstr "" + +#~ msgid "" +#~ "Assuming all files we need are in" +#~ " the local ``certificates`` directory, we" +#~ " can use the flag ``--volume`` to " +#~ "mount the local directory into the " +#~ "``/app/certificates/`` directory of the " +#~ "container. This allows the SuperLink to" +#~ " access the files within the " +#~ "container. The ``ro`` stands for " +#~ "``read-only``. Docker volumes default to" +#~ " ``read-write``; that option tells " +#~ "Docker to make the volume ``read-" +#~ "only`` instead. Finally, we pass the " +#~ "names of the certificates and key " +#~ "file to the SuperLink with the " +#~ "``--ssl-ca-certfile``, ``--ssl-certfile`` " +#~ "and ``--ssl-keyfile`` flag." +#~ msgstr "" + +#~ msgid "" +#~ "Because Flower containers, by default, " +#~ "run with a non-root user ``app``," +#~ " the mounted files and directories " +#~ "must have the proper permissions for " +#~ "the user ID ``49999``. For example, " +#~ "to change the user ID of all " +#~ "files in the ``certificates/`` directory, " +#~ "you can run ``sudo chown -R " +#~ "49999:49999 certificates/*``." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:370 -msgid "" -"Passing ``initial_parameters`` to the ``FedAvg`` strategy prevents Flower" -" from asking one of the clients for the initial parameters. If we look " -"closely, we can see that the logs do not show any calls to the " -"``FlowerClient.get_parameters`` method." -msgstr "" +#~ msgid "Flower SuperNode" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:382 -msgid "Starting with a customized strategy" -msgstr "" +#~ msgid "" +#~ "The SuperNode Docker image comes with" +#~ " a pre-installed version of Flower" +#~ " and serves as a base for " +#~ "building your own SuperNode image." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:384 -msgid "" -"We've seen the function ``start_simulation`` before. It accepts a number " -"of arguments, amongst them the ``client_fn`` used to create " -"``FlowerClient`` instances, the number of clients to simulate " -"``num_clients``, the number of rounds ``num_rounds``, and the strategy." -msgstr "" +#~ msgid "" +#~ "The SuperNode Docker image currently " +#~ "works only with the 1.9.0-nightly " +#~ "release. A stable version will be " +#~ "available when Flower 1.9.0 (stable) " +#~ "gets released (ETA: May). A SuperNode" +#~ " nightly image must be paired with" +#~ " the corresponding SuperLink and ServerApp" +#~ " nightly images released on the same" +#~ " day. To ensure the versions are " +#~ "in sync, using the concrete tag, " +#~ "e.g., ``1.9.0.dev20240501`` instead of " +#~ "``nightly`` is recommended." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:386 -msgid "" -"The strategy encapsulates the federated learning approach/algorithm, for " -"example, ``FedAvg`` or ``FedAdagrad``. Let's try to use a different " -"strategy this time:" -msgstr "" +#~ msgid "" +#~ "We will use the ``quickstart-pytorch``" +#~ " example, which you can find in " +#~ "the Flower repository, to illustrate how" +#~ " you can dockerize your ClientApp." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:424 -msgid "Server-side parameter **evaluation**" -msgstr "" +#~ msgid "" +#~ "Before we can start, we need to" +#~ " meet a few prerequisites in our " +#~ "local development environment. You can " +#~ "skip the first part if you want" +#~ " to run your ClientApp instead of " +#~ "the ``quickstart-pytorch`` example." +#~ msgstr "" +#~ "Antes de começarmos, precisamos encontrar " +#~ "alguns pré-requisitos em nosso ambiente " +#~ "de desenvolvimento local." -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:426 -msgid "" -"Flower can evaluate the aggregated model on the server-side or on the " -"client-side. Client-side and server-side evaluation are similar in some " -"ways, but different in others." -msgstr "" +#~ msgid "Creating a SuperNode Dockerfile" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:428 -msgid "" -"**Centralized Evaluation** (or *server-side evaluation*) is conceptually " -"simple: it works the same way that evaluation in centralized machine " -"learning does. If there is a server-side dataset that can be used for " -"evaluation purposes, then that's great. We can evaluate the newly " -"aggregated model after each round of training without having to send the " -"model to clients. We're also fortunate in the sense that our entire " -"evaluation dataset is available at all times." -msgstr "" +#~ msgid "Let's assume the following project layout:" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:430 -msgid "" -"**Federated Evaluation** (or *client-side evaluation*) is more complex, " -"but also more powerful: it doesn't require a centralized dataset and " -"allows us to evaluate models over a larger set of data, which often " -"yields more realistic evaluation results. In fact, many scenarios require" -" us to use **Federated Evaluation** if we want to get representative " -"evaluation results at all. But this power comes at a cost: once we start " -"to evaluate on the client side, we should be aware that our evaluation " -"dataset can change over consecutive rounds of learning if those clients " -"are not always available. Moreover, the dataset held by each client can " -"also change over consecutive rounds. This can lead to evaluation results " -"that are not stable, so even if we would not change the model, we'd see " -"our evaluation results fluctuate over consecutive rounds." -msgstr "" +#~ msgid "" +#~ "First, we need to create a " +#~ "``requirements.txt`` file in the directory " +#~ "where the ``ClientApp`` code is located." +#~ " In the file, we list all the" +#~ " dependencies that the ClientApp requires." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:433 -msgid "" -"We've seen how federated evaluation works on the client side (i.e., by " -"implementing the ``evaluate`` method in ``FlowerClient``). Now let's see " -"how we can evaluate aggregated model parameters on the server-side:" -msgstr "" +#~ msgid "" +#~ "Note that `flwr `__" +#~ " is already installed in the " +#~ "``flwr/supernode`` base image, so you " +#~ "only need to include other package " +#~ "dependencies in your ``requirements.txt``, " +#~ "such as ``torch``, ``tensorflow``, etc." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:490 -msgid "Sending/receiving arbitrary values to/from clients" -msgstr "" +#~ msgid "" +#~ "Next, we create a Dockerfile. If " +#~ "you use the ``quickstart-pytorch`` " +#~ "example, create a new file called " +#~ "``Dockerfile.supernode`` in ``examples/quickstart-" +#~ "pytorch``." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:492 -msgid "" -"In some situations, we want to configure client-side execution (training," -" evaluation) from the server-side. One example for that is the server " -"asking the clients to train for a certain number of local epochs. Flower " -"provides a way to send configuration values from the server to the " -"clients using a dictionary. Let's look at an example where the clients " -"receive values from the server through the ``config`` parameter in " -"``fit`` (``config`` is also available in ``evaluate``). The ``fit`` " -"method receives the configuration dictionary through the ``config`` " -"parameter and can then read values from this dictionary. In this example," -" it reads ``server_round`` and ``local_epochs`` and uses those values to " -"improve the logging and configure the number of local training epochs:" -msgstr "" +#~ msgid "" +#~ "The ``Dockerfile.supernode`` contains the " +#~ "instructions that assemble the SuperNode " +#~ "image." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:546 -msgid "" -"So how can we send this config dictionary from server to clients? The " -"built-in Flower Strategies provide way to do this, and it works similarly" -" to the way server-side evaluation works. We provide a function to the " -"strategy, and the strategy calls this function for every round of " -"federated learning:" -msgstr "" +#~ msgid "" +#~ "In the first two lines, we " +#~ "instruct Docker to use the SuperNode " +#~ "image tagged ``nightly`` as a base " +#~ "image and set our working directory " +#~ "to ``/app``. The following instructions " +#~ "will now be executed in the " +#~ "``/app`` directory. Next, we install the" +#~ " ClientApp dependencies by copying the " +#~ "``requirements.txt`` file into the image " +#~ "and run ``pip install``. In the " +#~ "last two lines, we copy the " +#~ "``client.py`` module into the image and" +#~ " set the entry point to ``flower-" +#~ "client-app`` with the argument " +#~ "``client:app``. The argument is the " +#~ "object reference of the ClientApp " +#~ "(``:``) that will be run" +#~ " inside the ClientApp." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:576 -msgid "" -"Next, we'll just pass this function to the FedAvg strategy before " -"starting the simulation:" -msgstr "" +#~ msgid "Building the SuperNode Docker image" +#~ msgstr "Construindo a imagem do servidor" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:613 -msgid "" -"As we can see, the client logs now include the current round of federated" -" learning (which they read from the ``config`` dictionary). We can also " -"configure local training to run for one epoch during the first and second" -" round of federated learning, and then for two epochs during the third " -"round." -msgstr "" +#~ msgid "" +#~ "Next, we build the SuperNode Docker " +#~ "image by running the following command" +#~ " in the directory where Dockerfile " +#~ "and ClientApp code are located." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:615 -msgid "" -"Clients can also return arbitrary values to the server. To do so, they " -"return a dictionary from ``fit`` and/or ``evaluate``. We have seen and " -"used this concept throughout this notebook without mentioning it " -"explicitly: our ``FlowerClient`` returns a dictionary containing a custom" -" key/value pair as the third return value in ``evaluate``." -msgstr "" +#~ msgid "" +#~ "We gave the image the name " +#~ "``flwr_supernode``, and the tag ``0.0.1``. " +#~ "Remember that the here chosen values " +#~ "only serve as an example. You can" +#~ " change them to your needs." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:627 -msgid "Scaling federated learning" -msgstr "" +#~ msgid "Now that we have built the SuperNode image, we can finally run it." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:629 -msgid "" -"As a last step in this notebook, let's see how we can use Flower to " -"experiment with a large number of clients." -msgstr "" +#~ msgid "Let's break down each part of this command:" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:651 -#, python-format -msgid "" -"We now have 1000 partitions, each holding 45 training and 5 validation " -"examples. Given that the number of training examples on each client is " -"quite small, we should probably train the model a bit longer, so we " -"configure the clients to perform 3 local training epochs. We should also " -"adjust the fraction of clients selected for training during each round " -"(we don't want all 1000 clients participating in every round), so we " -"adjust ``fraction_fit`` to ``0.05``, which means that only 5% of " -"available clients (so 50 clients) will be selected for training each " -"round:" -msgstr "" +#~ msgid "``docker run``: This is the command to run a new Docker container." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:699 -msgid "" -"In this notebook, we've seen how we can gradually enhance our system by " -"customizing the strategy, initializing parameters on the server side, " -"choosing a different strategy, and evaluating models on the server-side. " -"That's quite a bit of flexibility with so little code, right?" -msgstr "" +#~ msgid "" +#~ "``--rm``: This option specifies that the" +#~ " container should be automatically removed" +#~ " when it stops." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:701 -msgid "" -"In the later sections, we've seen how we can communicate arbitrary values" -" between server and clients to fully customize client-side execution. " -"With that capability, we built a large-scale Federated Learning " -"simulation using the Flower Virtual Client Engine and ran an experiment " -"involving 1000 clients in the same workload - all in a Jupyter Notebook!" -msgstr "" +#~ msgid "``flwr_supernode:0.0.1``: The name the tag of the Docker image to use." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:719 -msgid "" -"The `Flower Federated Learning Tutorial - Part 3 " -"`__ shows how to build a fully custom ``Strategy`` from " -"scratch." -msgstr "" +#~ msgid "``--insecure``: This option enables insecure communication." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:9 -msgid "What is Federated Learning?" -msgstr "" +#~ msgid "" +#~ "``--superlink 192.168.1.100:9092``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Fleet" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:13 -msgid "" -"In this tutorial, you will learn what federated learning is, build your " -"first system in Flower, and gradually extend it. If you work through all " -"parts of the tutorial, you will be able to build advanced federated " -"learning systems that approach the current state of the art in the field." -msgstr "" +#~ msgid "API to connect to. Remember to update it with your SuperLink IP." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:15 -msgid "" -"🧑‍🏫 This tutorial starts at zero and expects no familiarity with " -"federated learning. Only a basic understanding of data science and Python" -" programming is assumed." -msgstr "" +#~ msgid "" +#~ "To test running Flower locally, you " +#~ "can create a `bridge network " +#~ "`__, use the ``--network`` argument" +#~ " and pass the name of the " +#~ "Docker network to run your SuperNodes." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:17 -msgid "" -"`Star Flower on GitHub `__ ⭐️ and join " -"the open-source Flower community on Slack to connect, ask questions, and " -"get help: `Join Slack `__ 🌼 We'd love to " -"hear from you in the ``#introductions`` channel! And if anything is " -"unclear, head over to the ``#questions`` channel." -msgstr "" +#~ msgid "" +#~ "Any argument that comes after the " +#~ "tag is passed to the Flower " +#~ "SuperNode binary. To see all available" +#~ " flags that the SuperNode supports, " +#~ "run:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:31 -msgid "Classic machine learning" -msgstr "" +#~ msgid "" +#~ "To enable SSL, we will need to " +#~ "mount a PEM-encoded root certificate " +#~ "into your SuperNode container." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:33 -msgid "" -"Before we begin to discuss federated learning, let us quickly recap how " -"most machine learning works today." -msgstr "" +#~ msgid "" +#~ "Assuming the certificate already exists " +#~ "locally, we can use the flag " +#~ "``--volume`` to mount the local " +#~ "certificate into the container's ``/app/`` " +#~ "directory. This allows the SuperNode to" +#~ " access the certificate within the " +#~ "container. Use the ``--root-certificates`` " +#~ "flag when starting the container." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:35 -msgid "" -"In machine learning, we have a model, and we have data. The model could " -"be a neural network (as depicted here), or something else, like classical" -" linear regression." -msgstr "" +#~ msgid "Flower ServerApp" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 -msgid "|d8bf04f23d9b46d8a23cc6f4887d7873|" -msgstr "" +#~ msgid "" +#~ "The procedure for building and running" +#~ " a ServerApp image is almost " +#~ "identical to the SuperNode image." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 -msgid "Model and data" -msgstr "" +#~ msgid "" +#~ "Similar to the SuperNode image, the " +#~ "ServerApp Docker image comes with a " +#~ "pre-installed version of Flower and " +#~ "serves as a base for building your" +#~ " own ServerApp image." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:47 -msgid "" -"We train the model using the data to perform a useful task. A task could " -"be to detect objects in images, transcribe an audio recording, or play a " -"game like Go." -msgstr "" +#~ msgid "" +#~ "We will use the same ``quickstart-" +#~ "pytorch`` example as we do in the" +#~ " Flower SuperNode section. If you " +#~ "have not already done so, please " +#~ "follow the `SuperNode Prerequisites`_ before" +#~ " proceeding." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 -msgid "|5aa1711387d74d0f8b9c499e1a51627e|" -msgstr "" +#~ msgid "Creating a ServerApp Dockerfile" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 -msgid "Train model using data" -msgstr "" +#~ msgid "" +#~ "First, we need to create a " +#~ "Dockerfile in the directory where the" +#~ " ``ServerApp`` code is located. If " +#~ "you use the ``quickstart-pytorch`` " +#~ "example, create a new file called " +#~ "``Dockerfile.serverapp`` in ``examples/quickstart-" +#~ "pytorch``." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:59 -msgid "" -"Now, in practice, the training data we work with doesn't originate on the" -" machine we train the model on. It gets created somewhere else." -msgstr "" +#~ msgid "" +#~ "The ``Dockerfile.serverapp`` contains the " +#~ "instructions that assemble the ServerApp " +#~ "image." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:61 -msgid "" -"It originates on a smartphone by the user interacting with an app, a car " -"collecting sensor data, a laptop receiving input via the keyboard, or a " -"smart speaker listening to someone trying to sing a song." -msgstr "" +#~ msgid "" +#~ "In the first two lines, we " +#~ "instruct Docker to use the ServerApp " +#~ "image tagged ``1.8.0`` as a base " +#~ "image and set our working directory " +#~ "to ``/app``. The following instructions " +#~ "will now be executed in the " +#~ "``/app`` directory. In the last two " +#~ "lines, we copy the ``server.py`` module" +#~ " into the image and set the " +#~ "entry point to ``flower-server-app`` " +#~ "with the argument ``server:app``. The " +#~ "argument is the object reference of " +#~ "the ServerApp (``:``) that " +#~ "will be run inside the ServerApp " +#~ "container." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 -msgid "|2bc8e069228d4873804061ff4a95048c|" -msgstr "" +#~ msgid "Building the ServerApp Docker image" +#~ msgstr "Construindo a imagem do servidor" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 -msgid "Data on a phone" -msgstr "" +#~ msgid "" +#~ "Next, we build the ServerApp Docker " +#~ "image by running the following command" +#~ " in the directory where Dockerfile " +#~ "and ServerApp code are located." +#~ msgstr "" + +#~ msgid "" +#~ "We gave the image the name " +#~ "``flwr_serverapp``, and the tag ``0.0.1``. " +#~ "Remember that the here chosen values " +#~ "only serve as an example. You can" +#~ " change them to your needs." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:73 -msgid "" -"What's also important to mention, this \"somewhere else\" is usually not " -"just one place, it's many places. It could be several devices all running" -" the same app. But it could also be several organizations, all generating" -" data for the same task." -msgstr "" +#~ msgid "Running the ServerApp Docker image" +#~ msgstr "Construindo a imagem do servidor" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 -msgid "|c258488766324dc9a6807f0e7c4fd5f4|" -msgstr "" +#~ msgid "Now that we have built the ServerApp image, we can finally run it." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 -msgid "Data is on many devices" -msgstr "" +#~ msgid "``flwr_serverapp:0.0.1``: The name the tag of the Docker image to use." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:85 -msgid "" -"So to use machine learning, or any kind of data analysis, the approach " -"that has been used in the past was to collect all data on a central " -"server. This server can be somewhere in a data center, or somewhere in " -"the cloud." -msgstr "" +#~ msgid "" +#~ "``--superlink 192.168.1.100:9091``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Driver" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 -msgid "|d5f962c3f4ec48529efda980868c14b0|" -msgstr "" +#~ msgid "" +#~ "To test running Flower locally, you " +#~ "can create a `bridge network " +#~ "`__, use the ``--network`` argument" +#~ " and pass the name of the " +#~ "Docker network to run your ServerApps." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 -msgid "Central data collection" -msgstr "" +#~ msgid "" +#~ "Any argument that comes after the " +#~ "tag is passed to the Flower " +#~ "ServerApp binary. To see all available" +#~ " flags that the ServerApp supports, " +#~ "run:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:97 -msgid "" -"Once all the data is collected in one place, we can finally use machine " -"learning algorithms to train our model on the data. This is the machine " -"learning approach that we've basically always relied on." -msgstr "" +#~ msgid "" +#~ "To enable SSL, we will need to " +#~ "mount a PEM-encoded root certificate " +#~ "into your ServerApp container." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 -msgid "|a5eccea18d4c43a68b54b65043cabef8|" -msgstr "" +#~ msgid "" +#~ "Assuming the certificate already exists " +#~ "locally, we can use the flag " +#~ "``--volume`` to mount the local " +#~ "certificate into the container's ``/app/`` " +#~ "directory. This allows the ServerApp to" +#~ " access the certificate within the " +#~ "container. Use the ``--root-certificates`` " +#~ "flags when starting the container." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 -msgid "Central model training" -msgstr "" +#~ msgid "Advanced Docker options" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:130 -msgid "Challenges of classical machine learning" -msgstr "" +#~ msgid "Run with root user privileges" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:132 -msgid "" -"The classic machine learning approach we've just seen can be used in some" -" cases. Great examples include categorizing holiday photos, or analyzing " -"web traffic. Cases, where all the data is naturally available on a " -"centralized server." -msgstr "" +#~ msgid "" +#~ "Flower Docker images, by default, run" +#~ " with a non-root user " +#~ "(username/groupname: ``app``, UID/GID: ``49999``)." +#~ " Using root user is not recommended" +#~ " unless it is necessary for specific" +#~ " tasks during the build process. " +#~ "Always make sure to run the " +#~ "container as a non-root user in" +#~ " production to maintain security best " +#~ "practices." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 -msgid "|f17662f7df2d42f68cac70a1fdeda8a7|" -msgstr "" +#~ msgid "**Run a container with root user privileges**" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 -msgid "Centralized possible" -msgstr "" +#~ msgid "**Run the build process with root user privileges**" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:144 -msgid "" -"But the approach can not be used in many other cases. Cases, where the " -"data is not available on a centralized server, or cases where the data " -"available on one server is not enough to train a good model." -msgstr "" +#~ msgid "Using a different Flower version" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 -msgid "|241fc906441a4f038c625a19d30d01b2|" -msgstr "" +#~ msgid "Pinning a Docker image to a specific version" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 -msgid "Centralized impossible" -msgstr "" +#~ msgid "" +#~ "It may happen that we update the" +#~ " images behind the tags. Such updates" +#~ " usually include security updates of " +#~ "system dependencies that should not " +#~ "change the functionality of Flower. " +#~ "However, if you want to ensure " +#~ "that you always use the same " +#~ "image, you can specify the hash of" +#~ " the image instead of the tag." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:156 -msgid "" -"There are many reasons why the classic centralized machine learning " -"approach does not work for a large number of highly important real-world " -"use cases. Those reasons include:" -msgstr "" +#~ msgid "" +#~ "The following command returns the " +#~ "current image hash referenced by the " +#~ "``superlink:1.8.0`` tag:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:158 -msgid "" -"**Regulations**: GDPR (Europe), CCPA (California), PIPEDA (Canada), LGPD " -"(Brazil), PDPL (Argentina), KVKK (Turkey), POPI (South Africa), FSS " -"(Russia), CDPR (China), PDPB (India), PIPA (Korea), APPI (Japan), PDP " -"(Indonesia), PDPA (Singapore), APP (Australia), and other regulations " -"protect sensitive data from being moved. In fact, those regulations " -"sometimes even prevent single organizations from combining their own " -"users' data for artificial intelligence training because those users live" -" in different parts of the world, and their data is governed by different" -" data protection regulations." -msgstr "" +#~ msgid "Next, we can pin the hash when running a new SuperLink container:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:160 -msgid "" -"**User preference**: In addition to regulation, there are use cases where" -" users just expect that no data leaves their device, ever. If you type " -"your passwords and credit card info into the digital keyboard of your " -"phone, you don't expect those passwords to end up on the server of the " -"company that developed that keyboard, do you? In fact, that use case was " -"the reason federated learning was invented in the first place." -msgstr "" +#~ msgid "Setting environment variables" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:161 -msgid "" -"**Data volume**: Some sensors, like cameras, produce such a high data " -"volume that it is neither feasible nor economic to collect all the data " -"(due to, for example, bandwidth or communication efficiency). Think about" -" a national rail service with hundreds of train stations across the " -"country. If each of these train stations is outfitted with a number of " -"security cameras, the volume of raw on-device data they produce requires " -"incredibly powerful and exceedingly expensive infrastructure to process " -"and store. And most of the data isn't even useful." -msgstr "" +#~ msgid "" +#~ "To set a variable inside a Docker" +#~ " container, you can use the ``-e " +#~ "=`` flag." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:164 -msgid "Examples where centralized machine learning does not work include:" -msgstr "" +#~ msgid "" +#~ "This approach consists of two seprate" +#~ " phases: clipping of the updates and" +#~ " adding noise to the aggregated " +#~ "model. For the clipping phase, Flower" +#~ " framework has made it possible to" +#~ " decide whether to perform clipping " +#~ "on the server side or the client" +#~ " side." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:166 -msgid "" -"Sensitive healthcare records from multiple hospitals to train cancer " -"detection models" -msgstr "" +#~ msgid "" +#~ "The :code:`on_fit_config_fn` can be used " +#~ "to pass arbitrary configuration values " +#~ "from server to client, and poetentially" +#~ " change these values each round, for" +#~ " example, to adjust the learning " +#~ "rate. The client will receive the " +#~ "dictionary returned by the " +#~ ":code:`on_fit_config_fn` in its own " +#~ ":code:`client.fit()` function." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:167 -msgid "" -"Financial information from different organizations to detect financial " -"fraud" -msgstr "" +#~ msgid "" +#~ "QUICKSTART TUTORIALS: :doc:`PyTorch ` | :doc:`TensorFlow " +#~ "` | :doc:`🤗 " +#~ "Transformers ` " +#~ "| :doc:`JAX ` |" +#~ " :doc:`Pandas ` " +#~ "| :doc:`fastai `" +#~ " | :doc:`PyTorch Lightning ` | :doc:`scikit-" +#~ "learn ` | " +#~ ":doc:`XGBoost ` |" +#~ " :doc:`Android ` " +#~ "| :doc:`iOS `" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:168 -msgid "Location data from your electric car to make better range prediction" -msgstr "" +#~ msgid "flower-client-app" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:169 -msgid "End-to-end encrypted messages to train better auto-complete models" -msgstr "" +#~ msgid ":py:obj:`flwr.client `\\" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:171 -msgid "" -"The popularity of privacy-enhancing systems like the `Brave " -"`__ browser or the `Signal `__ " -"messenger shows that users care about privacy. In fact, they choose the " -"privacy-enhancing version over other alternatives, if such an alternative" -" exists. But what can we do to apply machine learning and data science to" -" these cases to utilize private data? After all, these are all areas that" -" would benefit significantly from recent advances in AI." -msgstr "" +#~ msgid ":py:obj:`flwr.common `\\" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:186 -msgid "Federated learning" -msgstr "" +#~ msgid ":py:obj:`flwr.server `\\" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:188 -msgid "" -"Federated learning simply reverses this approach. It enables machine " -"learning on distributed data by moving the training to the data, instead " -"of moving the data to the training. Here's the single-sentence " -"explanation:" -msgstr "" +#~ msgid ":py:obj:`flwr.simulation `\\" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:190 -msgid "Central machine learning: move the data to the computation" -msgstr "" +#~ msgid ":py:obj:`run_client_app `\\ \\(\\)" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:191 -msgid "Federated (machine) learning: move the computation to the data" -msgstr "" +#~ msgid "Run Flower client app." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:193 -msgid "" -"By doing so, it enables us to use machine learning (and other data " -"science approaches) in areas where it wasn't possible before. We can now " -"train excellent medical AI models by enabling different hospitals to work" -" together. We can solve financial fraud by training AI models on the data" -" of different financial institutions. We can build novel privacy-" -"enhancing applications (such as secure messaging) that have better built-" -"in AI than their non-privacy-enhancing alternatives. And those are just a" -" few of the examples that come to mind. As we deploy federated learning, " -"we discover more and more areas that can suddenly be reinvented because " -"they now have access to vast amounts of previously inaccessible data." -msgstr "" +#~ msgid ":py:obj:`run_supernode `\\ \\(\\)" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:196 -msgid "" -"So how does federated learning work, exactly? Let's start with an " -"intuitive explanation." -msgstr "" +#~ msgid "Run Flower SuperNode." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:199 -msgid "Federated learning in five steps" -msgstr "" +#~ msgid ":py:obj:`flwr.client.mod `\\" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:202 -msgid "Step 0: Initialize global model" -msgstr "" +#~ msgid ":py:obj:`Context `\\ \\(state\\)" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:204 -msgid "" -"We start by initializing the model on the server. This is exactly the " -"same in classic centralized learning: we initialize the model parameters," -" either randomly or from a previously saved checkpoint." -msgstr "" +#~ msgid "State of your run." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 -msgid "|0aa5aa05810b44b6a835cecce28f3137|" -msgstr "" +#~ msgid "Metrics record." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 -msgid "Initialize global model" -msgstr "" +#~ msgid "" +#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +#~ "[:py:class:`str`, :py:class:`int` | " +#~ ":py:class:`float` | :py:class:`str` | " +#~ ":py:class:`bytes` | :py:class:`bool` | " +#~ ":py:class:`~typing.List`\\ [:py:class:`int`] | " +#~ ":py:class:`~typing.List`\\ [:py:class:`float`] | " +#~ ":py:class:`~typing.List`\\ [:py:class:`str`] | " +#~ ":py:class:`~typing.List`\\ [:py:class:`bytes`] | " +#~ ":py:class:`~typing.List`\\ [:py:class:`bool`]]" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:217 -msgid "" -"Step 1: Send model to a number of connected organizations/devices (client" -" nodes)" -msgstr "" +#~ msgid "Remove all items from R." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:219 -msgid "" -"Next, we send the parameters of the global model to the connected client " -"nodes (think: edge devices like smartphones or servers belonging to " -"organizations). This is to ensure that each participating node starts " -"their local training using the same model parameters. We often use only a" -" few of the connected nodes instead of all nodes. The reason for this is " -"that selecting more and more client nodes has diminishing returns." -msgstr "" +#~ msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +#~ msgstr "" + +#~ msgid "d defaults to None." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 -msgid "|c742940dd4bf4de09d8d0d5e8d179638|" -msgstr "" +#~ msgid "Update R from dict/iterable E and F." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 -msgid "Send global model" -msgstr "" +#~ msgid "" +#~ ":py:obj:`RUN_DRIVER_API_ENTER " +#~ "`\\" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:232 -msgid "" -"Step 2: Train model locally on the data of each organization/device " -"(client node)" -msgstr "" +#~ msgid "" +#~ ":py:obj:`RUN_DRIVER_API_LEAVE " +#~ "`\\" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:234 -msgid "" -"Now that all (selected) client nodes have the latest version of the " -"global model parameters, they start the local training. They use their " -"own local dataset to train their own local model. They don't train the " -"model until full convergence, but they only train for a little while. " -"This could be as little as one epoch on the local data, or even just a " -"few steps (mini-batches)." -msgstr "" +#~ msgid "" +#~ ":py:obj:`RUN_FLEET_API_ENTER " +#~ "`\\" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 -msgid "|1f169ab4601a47e1a226f1628f4ebddb|" -msgstr "" +#~ msgid "" +#~ ":py:obj:`RUN_FLEET_API_LEAVE " +#~ "`\\" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 -msgid "Train on local data" -msgstr "" +#~ msgid ":py:obj:`DRIVER_CONNECT `\\" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:247 -msgid "Step 3: Return model updates back to the server" -msgstr "" +#~ msgid ":py:obj:`DRIVER_DISCONNECT `\\" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:249 -msgid "" -"After local training, each client node has a slightly different version " -"of the model parameters they originally received. The parameters are all " -"different because each client node has different examples in its local " -"dataset. The client nodes then send those model updates back to the " -"server. The model updates they send can either be the full model " -"parameters or just the gradients that were accumulated during local " -"training." -msgstr "" +#~ msgid "" +#~ ":py:obj:`START_DRIVER_ENTER " +#~ "`\\" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 -msgid "|12cfa9cde14440ecb8c8f6c1d7185bec|" -msgstr "" +#~ msgid "" +#~ ":py:obj:`START_DRIVER_LEAVE " +#~ "`\\" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 -msgid "Send model updates" -msgstr "" +#~ msgid "" +#~ "An identifier that can be used " +#~ "when loading a particular data partition" +#~ " for a ClientApp. Making use of " +#~ "this identifier is more relevant when" +#~ " conducting simulations." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:262 -msgid "Step 4: Aggregate model updates into a new global model" -msgstr "" +#~ msgid ":py:obj:`partition_id `\\" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:264 -msgid "" -"The server receives model updates from the selected client nodes. If it " -"selected 100 client nodes, it now has 100 slightly different versions of " -"the original global model, each trained on the local data of one client. " -"But didn't we want to have one model that contains the learnings from the" -" data of all 100 client nodes?" -msgstr "" +#~ msgid "An identifier telling which data partition a ClientApp should use." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:266 -msgid "" -"In order to get one single model, we have to combine all the model " -"updates we received from the client nodes. This process is called " -"*aggregation*, and there are many different ways to do it. The most basic" -" way to do it is called *Federated Averaging* (`McMahan et al., 2016 " -"`__), often abbreviated as *FedAvg*. " -"*FedAvg* takes the 100 model updates and, as the name suggests, averages " -"them. To be more precise, it takes the *weighted average* of the model " -"updates, weighted by the number of examples each client used for " -"training. The weighting is important to make sure that each data example " -"has the same \"influence\" on the resulting global model. If one client " -"has 10 examples, and another client has 100 examples, then - without " -"weighting - each of the 10 examples would influence the global model ten " -"times as much as each of the 100 examples." -msgstr "" +#~ msgid "" +#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +#~ "[:py:class:`str`, :py:class:`int` | " +#~ ":py:class:`float` | :py:class:`~typing.List`\\ " +#~ "[:py:class:`int`] | :py:class:`~typing.List`\\ " +#~ "[:py:class:`float`]]" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 -msgid "|72939caf6e294b0986fee6dde96614d7|" -msgstr "" +#~ msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 -msgid "Aggregate model updates" -msgstr "" +#~ msgid "" +#~ "A dataclass storing named Arrays in " +#~ "order. This means that it holds " +#~ "entries as an OrderedDict[str, Array]. " +#~ "ParametersRecord objects can be viewed " +#~ "as an equivalent to PyTorch's " +#~ "state_dict, but holding serialised tensors " +#~ "instead." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:280 -msgid "Step 5: Repeat steps 1 to 4 until the model converges" -msgstr "" +#~ msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:282 -msgid "" -"Steps 1 to 4 are what we call a single round of federated learning. The " -"global model parameters get sent to the participating client nodes (step " -"1), the client nodes train on their local data (step 2), they send their " -"updated models to the server (step 3), and the server then aggregates the" -" model updates to get a new version of the global model (step 4)." -msgstr "" +#~ msgid ":py:obj:`run_server_app `\\ \\(\\)" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:284 -msgid "" -"During a single round, each client node that participates in that " -"iteration only trains for a little while. This means that after the " -"aggregation step (step 4), we have a model that has been trained on all " -"the data of all participating client nodes, but only for a little while. " -"We then have to repeat this training process over and over again to " -"eventually arrive at a fully trained model that performs well across the " -"data of all client nodes." -msgstr "" +#~ msgid "Run Flower server app." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:289 -msgid "" -"Congratulations, you now understand the basics of federated learning. " -"There's a lot more to discuss, of course, but that was federated learning" -" in a nutshell. In later parts of this tutorial, we will go into more " -"detail. Interesting questions include: How can we select the best client " -"nodes that should participate in the next round? What's the best way to " -"aggregate model updates? How can we handle failing client nodes " -"(stragglers)?" -msgstr "" +#~ msgid ":py:obj:`run_superlink `\\ \\(\\)" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:294 -msgid "" -"Just like we can train a model on the decentralized data of different " -"client nodes, we can also evaluate the model on that data to receive " -"valuable metrics. This is called federated evaluation, sometimes " -"abbreviated as FE. In fact, federated evaluation is an integral part of " -"most federated learning systems." -msgstr "" +#~ msgid "Run Flower SuperLink (Driver API and Fleet API)." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:297 -msgid "Federated analytics" -msgstr "" +#~ msgid "" +#~ ":py:obj:`LegacyContext `\\ " +#~ "\\(state\\[\\, config\\, strategy\\, ...\\]\\)" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:299 -msgid "" -"In many cases, machine learning isn't necessary to derive value from " -"data. Data analysis can yield valuable insights, but again, there's often" -" not enough data to get a clear answer. What's the average age at which " -"people develop a certain type of health condition? Federated analytics " -"enables such queries over multiple client nodes. It is usually used in " -"conjunction with other privacy-enhancing technologies like secure " -"aggregation to prevent the server from seeing the results submitted by " -"individual client nodes." -msgstr "" +#~ msgid ":py:obj:`flwr.server.strategy `\\" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:305 -msgid "" -"Differential privacy (DP) is often mentioned in the context of Federated " -"Learning. It is a privacy-preserving method used when analyzing and " -"sharing statistical data, ensuring the privacy of individual " -"participants. DP achieves this by adding statistical noise to the model " -"updates, ensuring any individual participants’ information cannot be " -"distinguished or re-identified. This technique can be considered an " -"optimization that provides a quantifiable privacy protection measure." -msgstr "" +#~ msgid ":py:obj:`flwr.server.workflow `\\" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:326 -msgid "Flower" -msgstr "" +#~ msgid "run\\_driver\\_api" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:328 -msgid "" -"Federated learning, federated evaluation, and federated analytics require" -" infrastructure to move machine learning models back and forth, train and" -" evaluate them on local data, and then aggregate the updated models. " -"Flower provides the infrastructure to do exactly that in an easy, " -"scalable, and secure way. In short, Flower presents a unified approach to" -" federated learning, analytics, and evaluation. It allows the user to " -"federate any workload, any ML framework, and any programming language." -msgstr "" +#~ msgid "run\\_fleet\\_api" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 -msgid "|83a8daee45da4a98b8d6f24ae098fc50|" -msgstr "" +#~ msgid "" +#~ "The protocol involves four main stages:" +#~ " - 'setup': Send SecAgg+ configuration " +#~ "to clients and collect their public " +#~ "keys. - 'share keys': Broadcast public" +#~ " keys among clients and collect " +#~ "encrypted secret" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 -msgid "" -"Flower federated learning server and client nodes (car, scooter, personal" -" computer, roomba, and phone)" -msgstr "" +#~ msgid "key shares." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:353 -msgid "" -"Congratulations, you just learned the basics of federated learning and " -"how it relates to the classic (centralized) machine learning!" -msgstr "" +#~ msgid "" +#~ "The protocol involves four main stages:" +#~ " - 'setup': Send SecAgg configuration " +#~ "to clients and collect their public " +#~ "keys. - 'share keys': Broadcast public" +#~ " keys among clients and collect " +#~ "encrypted secret" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:355 -msgid "" -"In the next part of this tutorial, we are going to build a first " -"federated learning system with Flower." -msgstr "" +#~ msgid "" +#~ ":py:obj:`start_simulation `\\" +#~ " \\(\\*\\, client\\_fn\\[\\, ...\\]\\)" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:373 -msgid "" -"The `Flower Federated Learning Tutorial - Part 1 " -"`__ shows how to build a simple federated learning system " -"with PyTorch and Flower." -msgstr "" +#~ msgid "" +#~ "'A dictionary, e.g {\"\": , " +#~ "\"\": } to configure a " +#~ "backend. Values supported in are" +#~ " those included by " +#~ "`flwr.common.typing.ConfigsRecordValues`." +#~ msgstr "" #~ msgid "" -#~ "Configuring and setting up the " -#~ ":code:`Dockerfile` as well the configuration" -#~ " for the devcontainer can be a " -#~ "bit more involved. The good thing " -#~ "is you want have to do it. " -#~ "Usually it should be enough to " -#~ "install Docker on your system and " -#~ "ensure its available on your command " -#~ "line. Additionally, install the `VSCode " -#~ "Containers Extension `_." +#~ "When diabled, only INFO, WARNING and " +#~ "ERROR log messages will be shown. " +#~ "If enabled, DEBUG-level logs will " +#~ "be displayed." #~ msgstr "" #~ msgid "" -#~ "``flwr = { path = " -#~ "\"../../dist/flwr-1.0.0-py3-none-any.whl\" }`` " -#~ "(without extras)" +#~ "A function creating client instances. " +#~ "The function must take a single " +#~ "`str` argument called `cid`. It should" +#~ " return a single client instance of" +#~ " type Client. Note that the created" +#~ " client instances are ephemeral and " +#~ "will often be destroyed after a " +#~ "single method invocation. Since client " +#~ "instances are not long-lived, they " +#~ "should not attempt to carry state " +#~ "over method invocations. Any state " +#~ "required by the instance (model, " +#~ "dataset, hyperparameters, ...) should be " +#~ "(re-)created in either the call to " +#~ "`client_fn` or the call to any of" +#~ " the client methods (e.g., load " +#~ "evaluation data in the `evaluate` method" +#~ " itself)." #~ msgstr "" #~ msgid "" -#~ "``flwr = { path = " -#~ "\"../../dist/flwr-1.0.0-py3-none-any.whl\", extras =" -#~ " [\"simulation\"] }`` (with extras)" +#~ "The total number of clients in " +#~ "this simulation. This must be set " +#~ "if `clients_ids` is not set and " +#~ "vice-versa." #~ msgstr "" -#~ msgid "Upload the whl (e.g., ``flwr-1.7.0-py3-none-any.whl``)" +#~ msgid "" +#~ "List `client_id`s for each client. This" +#~ " is only required if `num_clients` is" +#~ " not set. Setting both `num_clients` " +#~ "and `clients_ids` with `len(clients_ids)` not" +#~ " equal to `num_clients` generates an " +#~ "error." #~ msgstr "" #~ msgid "" -#~ "Change ``!pip install -q 'flwr[simulation]'" -#~ " torch torchvision matplotlib`` to ``!pip" -#~ " install -q 'flwr-1.7.0-py3-none-" -#~ "any.whl[simulation]' torch torchvision matplotlib``" +#~ "In this tutorial we will learn how" +#~ " to train a Convolutional Neural " +#~ "Network on CIFAR10 using Flower and " +#~ "PyTorch." +#~ msgstr "" + +#~ msgid "" +#~ "*Clients* are responsible for generating " +#~ "individual weight-updates for the model" +#~ " based on their local datasets. These" +#~ " updates are then sent to the " +#~ "*server* which will aggregate them to" +#~ " produce a better model. Finally, the" +#~ " *server* sends this improved version " +#~ "of the model back to each " +#~ "*client*. A complete cycle of weight " +#~ "updates is called a *round*." +#~ msgstr "" + +#~ msgid "" +#~ "Now that we have a rough idea " +#~ "of what is going on, let's get " +#~ "started. We first need to install " +#~ "Flower. You can do this by running" +#~ " :" +#~ msgstr "" + +#~ msgid "" +#~ "Since we want to use PyTorch to" +#~ " solve a computer vision task, let's" +#~ " go ahead and install PyTorch and " +#~ "the **torchvision** library:" #~ msgstr "" -#~ msgid "Before the release" +#~ msgid "" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. Our training " +#~ "procedure and network architecture are " +#~ "based on PyTorch's `Deep Learning with" +#~ " PyTorch " +#~ "`_." #~ msgstr "" #~ msgid "" -#~ "Update the changelog (``changelog.md``) with" -#~ " all relevant changes that happened " -#~ "after the last release. If the " -#~ "last release was tagged ``v1.2.0``, you" -#~ " can use the following URL to " -#~ "see all commits that got merged " -#~ "into ``main`` since then:" +#~ "In a file called :code:`client.py`, " +#~ "import Flower and PyTorch related " +#~ "packages:" #~ msgstr "" -#~ msgid "" -#~ "`GitHub: Compare v1.2.0...main " -#~ "`_" +#~ msgid "In addition, we define the device allocation in PyTorch with:" #~ msgstr "" #~ msgid "" -#~ "Thank the authors who contributed since" -#~ " the last release. This can be " -#~ "done by running the ``./dev/add-" -#~ "shortlog.sh`` convenience script (it can " -#~ "be ran multiple times and will " -#~ "update the names in the list if" -#~ " new contributors were added in the" -#~ " meantime)." +#~ "We use PyTorch to load CIFAR10, a" +#~ " popular colored image classification " +#~ "dataset for machine learning. The " +#~ "PyTorch :code:`DataLoader()` downloads the " +#~ "training and test data that are " +#~ "then normalized." #~ msgstr "" #~ msgid "" -#~ "Update the ``changelog.md`` section header " -#~ "``Unreleased`` to contain the version " -#~ "number and date for the release " -#~ "you are building. Create a pull " -#~ "request with the change." +#~ "Define the loss and optimizer with " +#~ "PyTorch. The training of the dataset " +#~ "is done by looping over the " +#~ "dataset, measure the corresponding loss " +#~ "and optimize it." #~ msgstr "" #~ msgid "" -#~ "Tag the release commit with the " -#~ "version number as soon as the PR" -#~ " is merged: ``git tag v0.12.3``, then" -#~ " ``git push --tags``. This will " -#~ "create a draft release on GitHub " -#~ "containing the correct artifacts and the" -#~ " relevant part of the changelog." +#~ "Define then the validation of the " +#~ "machine learning network. We loop over" +#~ " the test set and measure the " +#~ "loss and accuracy of the test set." #~ msgstr "" #~ msgid "" -#~ "Note that, in order to build the" -#~ " documentation locally (with ``poetry run" -#~ " make html``, like described below), " -#~ "`Pandoc _` needs " -#~ "to be installed on the system." +#~ "After defining the training and testing" +#~ " of a PyTorch machine learning model," +#~ " we use the functions for the " +#~ "Flower clients." #~ msgstr "" #~ msgid "" -#~ "If you're familiar with how contributing" -#~ " on GitHub works, you can directly" -#~ " checkout our `getting started guide " -#~ "for contributors `_ and examples " -#~ "of `good first contributions " -#~ "`_." +#~ "The Flower clients will use a " +#~ "simple CNN adapted from 'PyTorch: A " +#~ "60 Minute Blitz':" #~ msgstr "" #~ msgid "" -#~ "This will create a `flower/` (or " -#~ "the name of your fork if you " -#~ "renamed it) folder in the current " -#~ "working directory." +#~ "After loading the data set with " +#~ ":code:`load_data()` we define the Flower " +#~ "interface." #~ msgstr "" -#~ msgid "Otherwise you can always find this option in the `Branches` page." +#~ msgid "" +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses " +#~ "PyTorch. Implementing :code:`NumPyClient` usually" +#~ " means defining the following methods " +#~ "(:code:`set_parameters` is optional though):" #~ msgstr "" -#~ msgid "" -#~ "Once you click the `Compare & pull" -#~ " request` button, you should see " -#~ "something similar to this:" +#~ msgid "receive the updated local model weights" #~ msgstr "" -#~ msgid "Find the source file in `doc/source`" +#~ msgid "which can be implemented in the following way:" #~ msgstr "" #~ msgid "" -#~ "Make the change in the `.rst` file" -#~ " (beware, the dashes under the title" -#~ " should be the same length as " -#~ "the title itself)" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this example can " +#~ "be found in :code:`examples/quickstart-" +#~ "pytorch`." #~ msgstr "" -#~ msgid "Change the file name to `save-progress.rst`" +#~ msgid "" +#~ "In this example, we split the " +#~ "dataset into two partitions with uniform" +#~ " distribution (:code:`IidPartitioner(num_partitions=2)`). " +#~ "Then, we load the partition for " +#~ "the given client based on " +#~ ":code:`node_id`:" #~ msgstr "" -#~ msgid "Add a redirect rule to `doc/source/conf.py`" +#~ msgid "" +#~ "The :code:`self.bst` is used to keep " +#~ "the Booster objects that remain " +#~ "consistent across rounds, allowing them " +#~ "to store predictions from trees " +#~ "integrated in earlier rounds and " +#~ "maintain other essential data structures " +#~ "for training." #~ msgstr "" #~ msgid "" -#~ "This will cause a redirect from " -#~ "`saving-progress.html` to `save-progress.html`," -#~ " old links will continue to work." +#~ "In :code:`fit`, at the first round, " +#~ "we call :code:`xgb.train()` to build up" +#~ " the first set of trees. the " +#~ "returned Booster object and config are" +#~ " stored in :code:`self.bst` and " +#~ ":code:`self.config`, respectively. From the " +#~ "second round, we load the global " +#~ "model sent from server to " +#~ ":code:`self.bst`, and then update model " +#~ "weights on local training data with " +#~ "function :code:`local_boost` as follows:" #~ msgstr "" #~ msgid "" -#~ "For the lateral navigation bar to " -#~ "work properly, it is very important " -#~ "to update the `index.rst` file as " -#~ "well. This is where we define the" -#~ " whole arborescence of the navbar." +#~ "Given :code:`num_local_round`, we update trees" +#~ " by calling :code:`self.bst.update` method. " +#~ "After training, the last " +#~ ":code:`N=num_local_round` trees will be " +#~ "extracted to send to the server." #~ msgstr "" -#~ msgid "Find and modify the file name in `index.rst`" +#~ msgid "" +#~ "In :code:`evaluate`, we call " +#~ ":code:`self.bst.eval_set` function to conduct " +#~ "evaluation on valid set. The AUC " +#~ "value will be returned." #~ msgstr "" -#~ msgid "Add CI job to deploy the staging system when the `main` branch changes" +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client`and call" +#~ " :code:`fl.client.start_client()`. The string " +#~ ":code:`\"[::]:8080\"` tells the client which" +#~ " server to connect to. In our " +#~ "case we can run the server and " +#~ "the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." #~ msgstr "" -#~ msgid "`Python 3.7 `_ or above" +#~ msgid "" +#~ "We use two clients for this " +#~ "example. An :code:`evaluate_metrics_aggregation` " +#~ "function is defined to collect and " +#~ "wighted average the AUC values from " +#~ "clients." #~ msgstr "" #~ msgid "" -#~ "First, clone the `Flower repository " -#~ "`_ from GitHub::" +#~ "Welcome to the third part of the" +#~ " Flower federated learning tutorial. In " +#~ "previous parts of this tutorial, we " +#~ "introduced federated learning with PyTorch " +#~ "and Flower (`part 1 " +#~ "`__) and we " +#~ "learned how strategies can be used " +#~ "to customize the execution on both " +#~ "the server and the clients (`part " +#~ "2 `__)." #~ msgstr "" #~ msgid "" -#~ "Second, create a virtual environment " -#~ "(and activate it). If you chose to" -#~ " use :code:`pyenv` (with the :code" -#~ ":`pyenv-virtualenv` plugin) and already " -#~ "have it installed , you can use" -#~ " the following convenience script (by " -#~ "default it will use :code:`Python " -#~ "3.8.17`, but you can change it by" -#~ " providing a specific :code:``)::" +#~ "In this notebook, we'll continue to " +#~ "customize the federated learning system " +#~ "we built previously by creating a " +#~ "custom version of FedAvg (again, using" +#~ " `Flower `__ and `PyTorch " +#~ "`__)." #~ msgstr "" #~ msgid "" -#~ "If you don't have :code:`pyenv` " -#~ "installed, you can use the following " -#~ "script that will install pyenv, set " -#~ "it up and create the virtual " -#~ "environment (with :code:`Python 3.8.17` by " -#~ "default)::" +#~ "`Star Flower on GitHub " +#~ "`__ ⭐️ and join " +#~ "the Flower community on Slack to " +#~ "connect, ask questions, and get help:" +#~ " `Join Slack `__" +#~ " 🌼 We'd love to hear from you" +#~ " in the ``#introductions`` channel! And " +#~ "if anything is unclear, head over " +#~ "to the ``#questions`` channel." #~ msgstr "" -#~ msgid "" -#~ "Third, install the Flower package in " -#~ "development mode (think :code:`pip install " -#~ "-e`) along with all necessary " -#~ "dependencies::" +#~ msgid "Let's build a new ``Strategy`` from scratch!" #~ msgstr "" #~ msgid "" -#~ "Developers could run the full set " -#~ "of Github Actions workflows under their" -#~ " local environment by using `Act " -#~ "_`. Please refer to" -#~ " the installation instructions under the" -#~ " linked repository and run the next" -#~ " command under Flower main cloned " -#~ "repository folder::" +#~ "Let's now load the CIFAR-10 training " +#~ "and test set, partition them into " +#~ "ten smaller datasets (each split into" +#~ " training and validation set), and " +#~ "wrap everything in their own " +#~ "``DataLoader``. We introduce a new " +#~ "parameter ``num_clients`` which allows us " +#~ "to call ``load_datasets`` with different " +#~ "numbers of clients." #~ msgstr "" #~ msgid "" -#~ "Please note that these components are" -#~ " still experimental, the correct " -#~ "configuration of DP for a specific " -#~ "task is still an unsolved problem." +#~ "To implement the Flower client, we " +#~ "(again) create a subclass of " +#~ "``flwr.client.NumPyClient`` and implement the " +#~ "three methods ``get_parameters``, ``fit``, and" +#~ " ``evaluate``. Here, we also pass the" +#~ " ``cid`` to the client and use " +#~ "it log additional details:" #~ msgstr "" #~ msgid "" -#~ "The distribution of the update norm " -#~ "has been shown to vary from " -#~ "task-to-task and to evolve as " -#~ "training progresses. Therefore, we use " -#~ "an adaptive approach [andrew]_ that " -#~ "continuously adjusts the clipping threshold" -#~ " to track a prespecified quantile of" -#~ " the update norm distribution." +#~ "Let's go deeper and see what it" +#~ " takes to move from ``NumPyClient`` " +#~ "to ``Client``!" #~ msgstr "" #~ msgid "" -#~ "We make (and attempt to enforce) a" -#~ " number of assumptions that must be" -#~ " satisfied to ensure that the " -#~ "training process actually realises the " -#~ ":math:`(\\epsilon, \\delta)` guarantees the " -#~ "user has in mind when configuring " -#~ "the setup." +#~ "So far, we've implemented our client " +#~ "by subclassing ``flwr.client.NumPyClient``. The " +#~ "three methods we implemented are " +#~ "``get_parameters``, ``fit``, and ``evaluate``. " +#~ "Finally, we wrap the creation of " +#~ "instances of this class in a " +#~ "function called ``client_fn``:" #~ msgstr "" #~ msgid "" -#~ "The first two are useful for " -#~ "eliminating a multitude of complications " -#~ "associated with calibrating the noise to" -#~ " the clipping threshold while the " -#~ "third one is required to comply " -#~ "with the assumptions of the privacy " -#~ "analysis." +#~ "We've seen this before, there's nothing" +#~ " new so far. The only *tiny* " +#~ "difference compared to the previous " +#~ "notebook is naming, we've changed " +#~ "``FlowerClient`` to ``FlowerNumPyClient`` and " +#~ "``client_fn`` to ``numpyclient_fn``. Let's run" +#~ " it to see the output we get:" #~ msgstr "" #~ msgid "" -#~ "The first version of our solution " -#~ "was to define a decorator whose " -#~ "constructor accepted, among other things, " -#~ "a boolean valued variable indicating " -#~ "whether adaptive clipping was to be " -#~ "enabled or not. We quickly realized " -#~ "that this would clutter its " -#~ ":code:`__init__()` function with variables " -#~ "corresponding to hyperparameters of adaptive" -#~ " clipping that would remain unused " -#~ "when it was disabled. A cleaner " -#~ "implementation could be achieved by " -#~ "splitting the functionality into two " -#~ "decorators, :code:`DPFedAvgFixed` and " -#~ ":code:`DPFedAvgAdaptive`, with the latter sub-" -#~ " classing the former. The constructors " -#~ "for both classes accept a boolean " -#~ "parameter :code:`server_side_noising`, which, as " -#~ "the name suggests, determines where " -#~ "noising is to be performed." +#~ "This works as expected, two clients " +#~ "are training for three rounds of " +#~ "federated learning." #~ msgstr "" #~ msgid "" -#~ ":code:`aggregate_fit()`: We check whether any" -#~ " of the sampled clients dropped out" -#~ " or failed to upload an update " -#~ "before the round timed out. In " -#~ "that case, we need to abort the" -#~ " current round, discarding any successful" -#~ " updates that were received, and move" -#~ " on to the next one. On the " -#~ "other hand, if all clients responded " -#~ "successfully, we must force the " -#~ "averaging of the updates to happen " -#~ "in an unweighted manner by intercepting" -#~ " the :code:`parameters` field of " -#~ ":code:`FitRes` for each received update " -#~ "and setting it to 1. Furthermore, " -#~ "if :code:`server_side_noising=true`, each update " -#~ "is perturbed with an amount of " -#~ "noise equal to what it would have" -#~ " been subjected to had client-side" -#~ " noising being enabled. This entails " -#~ "*pre*-processing of the arguments to " -#~ "this method before passing them on " -#~ "to the wrappee's implementation of " -#~ ":code:`aggregate_fit()`." +#~ "Let's dive a little bit deeper and" +#~ " discuss how Flower executes this " +#~ "simulation. Whenever a client is " +#~ "selected to do some work, " +#~ "``start_simulation`` calls the function " +#~ "``numpyclient_fn`` to create an instance " +#~ "of our ``FlowerNumPyClient`` (along with " +#~ "loading the model and the data)." #~ msgstr "" #~ msgid "" -#~ "McMahan, H. Brendan, et al. \"Learning" -#~ " differentially private recurrent language " -#~ "models.\" arXiv preprint arXiv:1710.06963 " -#~ "(2017)." +#~ "`Check out Flower Code Examples " +#~ "`__" #~ msgstr "" #~ msgid "" -#~ "Andrew, Galen, et al. \"Differentially " -#~ "private learning with adaptive clipping.\" " -#~ "Advances in Neural Information Processing " -#~ "Systems 34 (2021): 17455-17466." +#~ "`Watch Flower Summit 2023 videos " +#~ "`__" #~ msgstr "" #~ msgid "" -#~ "The following command can be used " -#~ "to verfiy if Flower was successfully " -#~ "installed. If everything worked, it " -#~ "should print the version of Flower " -#~ "to the command line::" +#~ "In this notebook, we'll build a " +#~ "federated learning system using Flower, " +#~ "`Flower Datasets `__ " +#~ "and PyTorch. In part 1, we use " +#~ "PyTorch for the model training pipeline" +#~ " and data loading. In part 2, " +#~ "we continue to federate the PyTorch-" +#~ "based pipeline using Flower." #~ msgstr "" -#~ msgid "flwr (Python API reference)" +#~ msgid "Loading the data" #~ msgstr "" -#~ msgid "start_client" +#~ msgid "" +#~ "We simulate having multiple datasets " +#~ "from multiple organizations (also called " +#~ "the \"cross-silo\" setting in federated" +#~ " learning) by splitting the original " +#~ "CIFAR-10 dataset into multiple partitions. " +#~ "Each partition will represent the data" +#~ " from a single organization. We're " +#~ "doing this purely for experimentation " +#~ "purposes, in the real world there's " +#~ "no need for data splitting because " +#~ "each organization already has their own" +#~ " data (so the data is naturally " +#~ "partitioned)." #~ msgstr "" -#~ msgid "start_numpy_client" +#~ msgid "" +#~ "Each organization will act as a " +#~ "client in the federated learning system." +#~ " So having ten organizations participate" +#~ " in a federation means having ten " +#~ "clients connected to the federated " +#~ "learning server." #~ msgstr "" -#~ msgid "start_simulation" +#~ msgid "" +#~ "Let's now create the Federated Dataset" +#~ " abstraction that from ``flwr-datasets``" +#~ " that partitions the CIFAR-10. We " +#~ "will create small training and test " +#~ "set for each edge device and wrap" +#~ " each of them into a PyTorch " +#~ "``DataLoader``:" #~ msgstr "" -#~ msgid "server.start_server" +#~ msgid "" +#~ "We now have a list of ten " +#~ "training sets and ten validation sets" +#~ " (``trainloaders`` and ``valloaders``) " +#~ "representing the data of ten different" +#~ " organizations. Each ``trainloader``/``valloader`` " +#~ "pair contains 4000 training examples and" +#~ " 1000 validation examples. There's also " +#~ "a single ``testloader`` (we did not " +#~ "split the test set). Again, this " +#~ "is only necessary for building research" +#~ " or educational systems, actual federated" +#~ " learning systems have their data " +#~ "naturally distributed across multiple " +#~ "partitions." #~ msgstr "" -#~ msgid "server.strategy" +#~ msgid "" +#~ "Let's take a look at the first " +#~ "batch of images and labels in the" +#~ " first training set (i.e., " +#~ "``trainloaders[0]``) before we move on:" #~ msgstr "" -#~ msgid "server.strategy.Strategy" +#~ msgid "" +#~ "The output above shows a random " +#~ "batch of images from the first " +#~ "``trainloader`` in our list of ten " +#~ "``trainloaders``. It also prints the " +#~ "labels associated with each image (i.e.," +#~ " one of the ten possible labels " +#~ "we've seen above). If you run the" +#~ " cell again, you should see another" +#~ " batch of images." #~ msgstr "" -#~ msgid "server.strategy.FedAvg" +#~ msgid "Defining the model" #~ msgstr "" -#~ msgid "server.strategy.FedAvgM" +#~ msgid "Training the model" #~ msgstr "" -#~ msgid "server.strategy.FedMedian" +#~ msgid "" +#~ "We now have all the basic building" +#~ " blocks we need: a dataset, a " +#~ "model, a training function, and a " +#~ "test function. Let's put them together" +#~ " to train the model on the " +#~ "dataset of one of our organizations " +#~ "(``trainloaders[0]``). This simulates the " +#~ "reality of most machine learning " +#~ "projects today: each organization has " +#~ "their own data and trains models " +#~ "only on this internal data:" #~ msgstr "" -#~ msgid "server.strategy.QFedAvg" +#~ msgid "" +#~ "Training the simple CNN on our " +#~ "CIFAR-10 split for 5 epochs should " +#~ "result in a test set accuracy of" +#~ " about 41%, which is not good, " +#~ "but at the same time, it doesn't" +#~ " really matter for the purposes of" +#~ " this tutorial. The intent was just" +#~ " to show a simplistic centralized " +#~ "training pipeline that sets the stage" +#~ " for what comes next - federated " +#~ "learning!" #~ msgstr "" -#~ msgid "server.strategy.FaultTolerantFedAvg" +#~ msgid "Updating model parameters" #~ msgstr "" -#~ msgid "server.strategy.FedOpt" +#~ msgid "" +#~ "In federated learning, the server sends" +#~ " the global model parameters to the" +#~ " client, and the client updates the" +#~ " local model with the parameters " +#~ "received from the server. It then " +#~ "trains the model on the local data" +#~ " (which changes the model parameters " +#~ "locally) and sends the updated/changed " +#~ "model parameters back to the server " +#~ "(or, alternatively, it sends just the" +#~ " gradients back to the server, not" +#~ " the full model parameters)." #~ msgstr "" -#~ msgid "server.strategy.FedProx" +#~ msgid "" +#~ "The details of how this works are" +#~ " not really important here (feel free" +#~ " to consult the PyTorch documentation " +#~ "if you want to learn more). In " +#~ "essence, we use ``state_dict`` to access" +#~ " PyTorch model parameter tensors. The " +#~ "parameter tensors are then converted " +#~ "to/from a list of NumPy ndarray's " +#~ "(which Flower knows how to " +#~ "serialize/deserialize):" #~ msgstr "" -#~ msgid "server.strategy.FedAdagrad" +#~ msgid "Implementing a Flower client" #~ msgstr "" -#~ msgid "server.strategy.FedAdam" +#~ msgid "" +#~ "With that out of the way, let's" +#~ " move on to the interesting part. " +#~ "Federated learning systems consist of a" +#~ " server and multiple clients. In " +#~ "Flower, we create clients by " +#~ "implementing subclasses of ``flwr.client.Client``" +#~ " or ``flwr.client.NumPyClient``. We use " +#~ "``NumPyClient`` in this tutorial because " +#~ "it is easier to implement and " +#~ "requires us to write less boilerplate." #~ msgstr "" -#~ msgid "server.strategy.FedYogi" +#~ msgid "" +#~ "To implement the Flower client, we " +#~ "create a subclass of " +#~ "``flwr.client.NumPyClient`` and implement the " +#~ "three methods ``get_parameters``, ``fit``, and" +#~ " ``evaluate``:" #~ msgstr "" -#~ msgid "server.strategy.FedTrimmedAvg" +#~ msgid "" +#~ "``fit``: Receive model parameters from " +#~ "the server, train the model parameters" +#~ " on the local data, and return " +#~ "the (updated) model parameters to the" +#~ " server" #~ msgstr "" -#~ msgid "server.strategy.Krum" +#~ msgid "" +#~ "``evaluate``: Receive model parameters from" +#~ " the server, evaluate the model " +#~ "parameters on the local data, and " +#~ "return the evaluation result to the " +#~ "server" #~ msgstr "" -#~ msgid "server.strategy.FedXgbNnAvg" +#~ msgid "" +#~ "Our class ``FlowerClient`` defines how " +#~ "local training/evaluation will be performed" +#~ " and allows Flower to call the " +#~ "local training/evaluation through ``fit`` and" +#~ " ``evaluate``. Each instance of " +#~ "``FlowerClient`` represents a *single client*" +#~ " in our federated learning system. " +#~ "Federated learning systems have multiple " +#~ "clients (otherwise, there's not much to" +#~ " federate), so each client will be" +#~ " represented by its own instance of" +#~ " ``FlowerClient``. If we have, for " +#~ "example, three clients in our workload," +#~ " then we'd have three instances of" +#~ " ``FlowerClient``. Flower calls " +#~ "``FlowerClient.fit`` on the respective " +#~ "instance when the server selects a " +#~ "particular client for training (and " +#~ "``FlowerClient.evaluate`` for evaluation)." +#~ msgstr "" + +#~ msgid "Using the Virtual Client Engine" #~ msgstr "" -#~ msgid "server.strategy.DPFedAvgAdaptive" +#~ msgid "" +#~ "In this notebook, we want to " +#~ "simulate a federated learning system " +#~ "with 10 clients on a single " +#~ "machine. This means that the server " +#~ "and all 10 clients will live on" +#~ " a single machine and share resources" +#~ " such as CPU, GPU, and memory. " +#~ "Having 10 clients would mean having " +#~ "10 instances of ``FlowerClient`` in " +#~ "memory. Doing this on a single " +#~ "machine can quickly exhaust the " +#~ "available memory resources, even if only" +#~ " a subset of these clients " +#~ "participates in a single round of " +#~ "federated learning." +#~ msgstr "" + +#~ msgid "" +#~ "In addition to the regular capabilities" +#~ " where server and clients run on " +#~ "multiple machines, Flower, therefore, provides" +#~ " special simulation capabilities that " +#~ "create ``FlowerClient`` instances only when" +#~ " they are actually necessary for " +#~ "training or evaluation. To enable the" +#~ " Flower framework to create clients " +#~ "when necessary, we need to implement " +#~ "a function called ``client_fn`` that " +#~ "creates a ``FlowerClient`` instance on " +#~ "demand. Flower calls ``client_fn`` whenever" +#~ " it needs an instance of one " +#~ "particular client to call ``fit`` or " +#~ "``evaluate`` (those instances are usually " +#~ "discarded after use, so they should " +#~ "not keep any local state). Clients " +#~ "are identified by a client ID, or" +#~ " short ``cid``. The ``cid`` can be" +#~ " used, for example, to load different" +#~ " local data partitions for different " +#~ "clients, as can be seen below:" #~ msgstr "" -#~ msgid "server.strategy.DPFedAvgFixed" +#~ msgid "Starting the training" #~ msgstr "" #~ msgid "" -#~ "**Fix the incorrect return types of " -#~ "Strategy** " -#~ "([#2432](https://github.com/adap/flower/pull/2432/files))" +#~ "We now have the class ``FlowerClient``" +#~ " which defines client-side " +#~ "training/evaluation and ``client_fn`` which " +#~ "allows Flower to create ``FlowerClient`` " +#~ "instances whenever it needs to call " +#~ "``fit`` or ``evaluate`` on one " +#~ "particular client. The last step is " +#~ "to start the actual simulation using " +#~ "``flwr.simulation.start_simulation``." #~ msgstr "" #~ msgid "" -#~ "The types of the return values in" -#~ " the docstrings in two methods " -#~ "(`aggregate_fit` and `aggregate_evaluate`) now " -#~ "match the hint types in the code." +#~ "The function ``start_simulation`` accepts a" +#~ " number of arguments, amongst them " +#~ "the ``client_fn`` used to create " +#~ "``FlowerClient`` instances, the number of " +#~ "clients to simulate (``num_clients``), the " +#~ "number of federated learning rounds " +#~ "(``num_rounds``), and the strategy. The " +#~ "strategy encapsulates the federated learning" +#~ " approach/algorithm, for example, *Federated " +#~ "Averaging* (FedAvg)." #~ msgstr "" #~ msgid "" -#~ "Using the `client_fn`, Flower clients " -#~ "can interchangeably run as standalone " -#~ "processes (i.e. via `start_client`) or " -#~ "in simulation (i.e. via `start_simulation`)" -#~ " without requiring changes to how the" -#~ " client class is defined and " -#~ "instantiated. Calling `start_numpy_client` is " -#~ "now deprecated." -#~ msgstr "" +#~ "Flower has a number of built-in" +#~ " strategies, but we can also use " +#~ "our own strategy implementations to " +#~ "customize nearly all aspects of the " +#~ "federated learning approach. For this " +#~ "example, we use the built-in " +#~ "``FedAvg`` implementation and customize it " +#~ "using a few basic parameters. The " +#~ "last step is the actual call to" +#~ " ``start_simulation`` which - you guessed" +#~ " it - starts the simulation:" +#~ msgstr "" #~ msgid "" -#~ "**Update Flower Examples** " -#~ "([#2384](https://github.com/adap/flower/pull/2384)), " -#~ "([#2425](https://github.com/adap/flower/pull/2425))" +#~ "When we call ``start_simulation``, we " +#~ "tell Flower that there are 10 " +#~ "clients (``num_clients=10``). Flower then goes" +#~ " ahead an asks the ``FedAvg`` " +#~ "strategy to select clients. ``FedAvg`` " +#~ "knows that it should select 100% " +#~ "of the available clients " +#~ "(``fraction_fit=1.0``), so it goes ahead " +#~ "and selects 10 random clients (i.e., " +#~ "100% of 10)." #~ msgstr "" #~ msgid "" -#~ "**General updates to baselines** " -#~ "([#2301](https://github.com/adap/flower/pull/2301), " -#~ "[#2305](https://github.com/adap/flower/pull/2305), " -#~ "[#2307](https://github.com/adap/flower/pull/2307), " -#~ "[#2327](https://github.com/adap/flower/pull/2327), " -#~ "[#2435](https://github.com/adap/flower/pull/2435))" +#~ "Flower then asks the selected 10 " +#~ "clients to train the model. When " +#~ "the server receives the model parameter" +#~ " updates from the clients, it hands" +#~ " those updates over to the strategy" +#~ " (*FedAvg*) for aggregation. The strategy" +#~ " aggregates those updates and returns " +#~ "the new global model, which then " +#~ "gets used in the next round of " +#~ "federated learning." #~ msgstr "" #~ msgid "" -#~ "**General updates to the simulation " -#~ "engine** ([#2331](https://github.com/adap/flower/pull/2331), " -#~ "[#2447](https://github.com/adap/flower/pull/2447), " -#~ "[#2448](https://github.com/adap/flower/pull/2448))" +#~ "The only thing left to do is " +#~ "to tell the strategy to call this" +#~ " function whenever it receives evaluation" +#~ " metric dictionaries from the clients:" #~ msgstr "" #~ msgid "" -#~ "**General improvements** " -#~ "([#2309](https://github.com/adap/flower/pull/2309), " -#~ "[#2310](https://github.com/adap/flower/pull/2310), " -#~ "[2313](https://github.com/adap/flower/pull/2313), " -#~ "[#2316](https://github.com/adap/flower/pull/2316), " -#~ "[2317](https://github.com/adap/flower/pull/2317),[#2349](https://github.com/adap/flower/pull/2349)," -#~ " [#2360](https://github.com/adap/flower/pull/2360), " -#~ "[#2402](https://github.com/adap/flower/pull/2402), " -#~ "[#2446](https://github.com/adap/flower/pull/2446))" +#~ "In this notebook, we'll begin to " +#~ "customize the federated learning system " +#~ "we built in the introductory notebook" +#~ " (again, using `Flower `__" +#~ " and `PyTorch `__)." #~ msgstr "" -#~ msgid "" -#~ "`flower-superlink --driver-api-address " -#~ "\"0.0.0.0:8081\" --fleet-api-address " -#~ "\"0.0.0.0:8086\"`" +#~ msgid "Let's move beyond FedAvg with Flower strategies!" #~ msgstr "" #~ msgid "" -#~ "That's it for the client. We only" -#~ " have to implement :code:`Client` or " -#~ ":code:`NumPyClient` and call " -#~ ":code:`fl.client.start_client()`. The string " -#~ ":code:`\"0.0.0.0:8080\"` tells the client " -#~ "which server to connect to. In our" -#~ " case we can run the server and" -#~ " the client on the same machine, " -#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" -#~ " we run a truly federated workload" -#~ " with the server and clients running" -#~ " on different machines, all that " -#~ "needs to change is the " -#~ ":code:`server_address` we pass to the " -#~ "client." +#~ "Flower, by default, initializes the " +#~ "global model by asking one random " +#~ "client for the initial parameters. In" +#~ " many cases, we want more control " +#~ "over parameter initialization though. Flower" +#~ " therefore allows you to directly " +#~ "pass the initial parameters to the " +#~ "Strategy:" #~ msgstr "" #~ msgid "" -#~ "That's it for the client. We only" -#~ " have to implement :code:`Client` or " -#~ ":code:`NumPyClient` and call " -#~ ":code:`fl.client.start_client()`. The string " -#~ ":code:`\"[::]:8080\"` tells the client which" -#~ " server to connect to. In our " -#~ "case we can run the server and " -#~ "the client on the same machine, " -#~ "therefore we use :code:`\"[::]:8080\"`. If " -#~ "we run a truly federated workload " -#~ "with the server and clients running " -#~ "on different machines, all that needs" -#~ " to change is the :code:`server_address`" -#~ " we point the client at." +#~ "Passing ``initial_parameters`` to the " +#~ "``FedAvg`` strategy prevents Flower from " +#~ "asking one of the clients for the" +#~ " initial parameters. If we look " +#~ "closely, we can see that the logs" +#~ " do not show any calls to the" +#~ " ``FlowerClient.get_parameters`` method." #~ msgstr "" #~ msgid "" -#~ "Let's build a horizontal federated " -#~ "learning system using XGBoost and " -#~ "Flower!" +#~ "We've seen the function ``start_simulation``" +#~ " before. It accepts a number of " +#~ "arguments, amongst them the ``client_fn`` " +#~ "used to create ``FlowerClient`` instances, " +#~ "the number of clients to simulate " +#~ "``num_clients``, the number of rounds " +#~ "``num_rounds``, and the strategy." #~ msgstr "" #~ msgid "" -#~ "Please refer to the `full code " -#~ "example `_ to learn " -#~ "more." +#~ "Next, we'll just pass this function " +#~ "to the FedAvg strategy before starting" +#~ " the simulation:" #~ msgstr "" #~ msgid "" -#~ "In this notebook, we'll build a " -#~ "federated learning system using Flower " -#~ "and PyTorch. In part 1, we use " -#~ "PyTorch for the model training pipeline" -#~ " and data loading. In part 2, " -#~ "we continue to federate the PyTorch-" -#~ "based pipeline using Flower." +#~ "We now have 1000 partitions, each " +#~ "holding 45 training and 5 validation " +#~ "examples. Given that the number of " +#~ "training examples on each client is " +#~ "quite small, we should probably train" +#~ " the model a bit longer, so we" +#~ " configure the clients to perform 3" +#~ " local training epochs. We should " +#~ "also adjust the fraction of clients " +#~ "selected for training during each round" +#~ " (we don't want all 1000 clients " +#~ "participating in every round), so we " +#~ "adjust ``fraction_fit`` to ``0.05``, which " +#~ "means that only 5% of available " +#~ "clients (so 50 clients) will be " +#~ "selected for training each round:" #~ msgstr "" -#~ msgid "" -#~ "Next, we install the necessary packages" -#~ " for PyTorch (``torch`` and " -#~ "``torchvision``) and Flower (``flwr``):" +#~ msgid "|93b02017c78049bbbd5ae456dcb2c91b|" #~ msgstr "" -#~ msgid "" -#~ "Federated learning can be applied to " -#~ "many different types of tasks across " -#~ "different domains. In this tutorial, we" -#~ " introduce federated learning by training" -#~ " a simple convolutional neural network " -#~ "(CNN) on the popular CIFAR-10 dataset." -#~ " CIFAR-10 can be used to train " -#~ "image classifiers that distinguish between " -#~ "images from ten different classes:" +#~ msgid "|01471150fd5144c080a176b43e92a3ff|" #~ msgstr "" -#~ msgid "" -#~ "Each organization will act as a " -#~ "client in the federated learning system." -#~ " So having ten organizations participate" -#~ " in a federation means having ten " -#~ "clients connected to the federated " -#~ "learning server:" +#~ msgid "|9bc21c7dbd17444a8f070c60786e3484|" #~ msgstr "" -#~ msgid "" -#~ "Let's now load the CIFAR-10 training " -#~ "and test set, partition them into " -#~ "ten smaller datasets (each split into" -#~ " training and validation set), and " -#~ "wrap the resulting partitions by " -#~ "creating a PyTorch ``DataLoader`` for " -#~ "each of them:" +#~ msgid "|3047bbce54b34099ae559963d0420d79|" #~ msgstr "" -#~ msgid "|ed6498a023f2477a9ccd57ee4514bda4|" +#~ msgid "|e9f8ce948593444fb838d2f354c7ec5d|" #~ msgstr "" -#~ msgid "|5a4f742489ac4f819afefdd4dc9ab272|" +#~ msgid "|c24c1478b30e4f74839208628a842d1e|" #~ msgstr "" -#~ msgid "|3331c80cd05045f6a56524d8e3e76d0c|" +#~ msgid "|1b3613d7a58847b59e1d3180802dbc09|" #~ msgstr "" -#~ msgid "|4987b26884ec4b2c8f06c1264bcebe60|" +#~ msgid "|9980b5213db547d0b8024a50992b9e3f|" #~ msgstr "" -#~ msgid "|ec8ae2d778aa493a986eb2fa29c220e5|" +#~ msgid "|c7afb4c92d154bfaa5e8cb9a150e17f1|" #~ msgstr "" -#~ msgid "|b8949d0669fe4f8eadc9a4932f4e9c57|" +#~ msgid "|032eb6fed6924ac387b9f13854919196|" #~ msgstr "" -#~ msgid "|94ff30bdcd09443e8488b5f29932a541|" +#~ msgid "|fbf225add7fd4df5a9bf25a95597d954|" #~ msgstr "" -#~ msgid "|48dccf1d6d0544bba8917d2783a47719|" +#~ msgid "|7efbe3d29d8349b89594e8947e910525|" #~ msgstr "" -#~ msgid "|0366618db96b4f329f0d4372d1150fde|" +#~ msgid "|329fb3c04c744eda83bb51fa444c2266|" #~ msgstr "" -#~ msgid "|ac80eddc76e6478081b1ca35eed029c0|" +#~ msgid "|c00bf2750bc24d229737a0fe1395f0fc|" #~ msgstr "" -#~ msgid "|1ac94140c317450e89678db133c7f3c2|" +#~ msgid ":py:obj:`client `\\" #~ msgstr "" -#~ msgid "|f8850c6e96fc4430b55e53bba237a7c0|" +#~ msgid ":py:obj:`common `\\" #~ msgstr "" -#~ msgid "|4a368fdd3fc34adabd20a46752a68582|" +#~ msgid ":py:obj:`server `\\" #~ msgstr "" -#~ msgid "|40f69c17bb444652a7c8dfe577cd120e|" +#~ msgid ":py:obj:`simulation `\\" #~ msgstr "" -#~ msgid "" -#~ "Please follow the first section on " -#~ "`Run Flower using Docker " -#~ "`_ which covers this" -#~ " step in more detail." +#~ msgid ":py:obj:`mod `\\" #~ msgstr "" -#~ msgid "" -#~ "Since `Flower 1.5 `_ we have " -#~ "introduced translations to our doc " -#~ "pages, but, as you might have " -#~ "noticed, the translations are often " -#~ "imperfect. If you speak languages other" -#~ " than English, you might be able " -#~ "to help us in our effort to " -#~ "make Federated Learning accessible to as" -#~ " many people as possible by " -#~ "contributing to those translations! This " -#~ "might also be a great opportunity " -#~ "for those wanting to become open " -#~ "source contributors with little prerequistes." +#~ msgid "run\\_client\\_app" #~ msgstr "" -#~ msgid "" -#~ "You input your translation in the " -#~ "textbox at the top and then, once" -#~ " you are happy with it, you " -#~ "either press ``Save and continue`` (to" -#~ " save the translation and go to " -#~ "the next untranslated string), ``Save " -#~ "and stay`` (to save the translation " -#~ "and stay on the same page), " -#~ "``Suggest`` (to add your translation to" -#~ " suggestions for other users to " -#~ "view), or ``Skip`` (to go to the" -#~ " next untranslated string without saving" -#~ " anything)." +#~ msgid "run\\_supernode" #~ msgstr "" #~ msgid "" -#~ "The first thing we need to do " -#~ "is to define a message type for" -#~ " the RPC system in :code:`transport.proto`." -#~ " Note that we have to do it " -#~ "for both the request and response " -#~ "messages. For more details on the " -#~ "syntax of proto3, please see the " -#~ "`official documentation `_." +#~ ":py:obj:`get `\\ " +#~ "\\(key\\[\\, default\\]\\)" #~ msgstr "" -#~ msgid "" -#~ "Source: `Official VSCode documentation " -#~ "`_" +#~ msgid "Retrieve the corresponding layout by the string key." #~ msgstr "" #~ msgid "" -#~ "`Developing inside a Container " -#~ "`_" +#~ "When there isn't an exact match, " +#~ "all the existing keys in the " +#~ "layout map will be treated as a" +#~ " regex and map against the input " +#~ "key again. The first match will be" +#~ " returned, based on the key insertion" +#~ " order. Return None if there isn't" +#~ " any match found." #~ msgstr "" -#~ msgid "" -#~ "`Remote development in Containers " -#~ "`_" +#~ msgid "the string key as the query for the layout." #~ msgstr "" -#~ msgid "" -#~ "If you are not familiar with " -#~ "Flower Baselines, you should probably " -#~ "check-out our `contributing guide for " -#~ "baselines `_." +#~ msgid "Corresponding layout based on the query." #~ msgstr "" #~ msgid "" -#~ "You should then check out the open" -#~ " `issues " -#~ "`_" -#~ " for baseline requests. If you find" -#~ " a baseline that you'd like to " -#~ "work on and that has no assignes," -#~ " feel free to assign it to " -#~ "yourself and start working on it!" +#~ ":py:obj:`get `\\ " +#~ "\\(key\\[\\, default\\]\\)" #~ msgstr "" #~ msgid "" -#~ "If you're familiar with how contributing" -#~ " on GitHub works, you can directly" -#~ " checkout our `getting started guide " -#~ "for contributors `_." +#~ ":py:obj:`get `\\ " +#~ "\\(key\\[\\, default\\]\\)" #~ msgstr "" -#~ msgid "" -#~ "Git is a distributed version control " -#~ "tool. This allows for an entire " -#~ "codebase's history to be stored and " -#~ "every developer's machine. It is a " -#~ "software that will need to be " -#~ "installed on your local machine, you " -#~ "can follow this `guide " -#~ "`_ to set it up." +#~ msgid ":py:obj:`strategy `\\" +#~ msgstr "" + +#~ msgid ":py:obj:`workflow `\\" +#~ msgstr "" + +#~ msgid "run\\_server\\_app" +#~ msgstr "" + +#~ msgid "run\\_superlink" #~ msgstr "" #~ msgid "" -#~ "A fork is a personal copy of " -#~ "a GitHub repository. To create one " -#~ "for Flower, you must navigate to " -#~ "https://github.com/adap/flower (while connected to" -#~ " your GitHub account) and click the" -#~ " ``Fork`` button situated on the top" -#~ " right of the page." +#~ ":py:obj:`start_simulation `\\" +#~ " \\(\\*\\, client\\_fn\\, num\\_clients\\)" +#~ msgstr "" + +#~ msgid "Start a Ray-based Flower simulation server." +#~ msgstr "" + +#~ msgid "" +#~ "A function creating `Client` instances. " +#~ "The function must have the signature " +#~ "`client_fn(context: Context). It should return" +#~ " a single client instance of type " +#~ "`Client`. Note that the created client" +#~ " instances are ephemeral and will " +#~ "often be destroyed after a single " +#~ "method invocation. Since client instances " +#~ "are not long-lived, they should " +#~ "not attempt to carry state over " +#~ "method invocations. Any state required " +#~ "by the instance (model, dataset, " +#~ "hyperparameters, ...) should be (re-)created" +#~ " in either the call to `client_fn`" +#~ " or the call to any of the " +#~ "client methods (e.g., load evaluation " +#~ "data in the `evaluate` method itself)." +#~ msgstr "" + +#~ msgid "The total number of clients in this simulation." #~ msgstr "" #~ msgid "" -#~ "Now we will add an upstream " -#~ "address to our repository. Still in " -#~ "the same directroy, we must run " -#~ "the following command:" +#~ "UNSUPPORTED, WILL BE REMOVED. USE " +#~ "`num_clients` INSTEAD. List `client_id`s for" +#~ " each client. This is only required" +#~ " if `num_clients` is not set. Setting" +#~ " both `num_clients` and `clients_ids` with" +#~ " `len(clients_ids)` not equal to " +#~ "`num_clients` generates an error. Using " +#~ "this argument will raise an error." #~ msgstr "" #~ msgid "" -#~ "This can be achieved by following " -#~ "this `getting started guide for " -#~ "contributors`_ (note that you won't need" -#~ " to clone the repository). Once you" -#~ " are able to write code and " -#~ "test it, you can finally start " -#~ "making changes!" +#~ "CPU and GPU resources for a single" +#~ " client. Supported keys are `num_cpus` " +#~ "and `num_gpus`. To understand the GPU" +#~ " utilization caused by `num_gpus`, as " +#~ "well as using custom resources, please" +#~ " consult the Ray documentation." #~ msgstr "" #~ msgid "" -#~ "For our documentation, we’ve started to" -#~ " use the `Diàtaxis framework " -#~ "`_." +#~ "An implementation of the abstract base" +#~ " class `flwr.server.Server`. If no instance" +#~ " is provided, then `start_server` will " +#~ "create one." #~ msgstr "" #~ msgid "" -#~ "Our “How to” guides should have " -#~ "titles that continue the sencence “How" -#~ " to …”, for example, “How to " -#~ "upgrade to Flower 1.0”." +#~ "An implementation of the abstract base" +#~ " class `flwr.server.Strategy`. If no " +#~ "strategy is provided, then `start_server` " +#~ "will use `flwr.server.strategy.FedAvg`." #~ msgstr "" #~ msgid "" -#~ "This issue is about changing the " -#~ "title of a doc from present " -#~ "continious to present simple." +#~ "An implementation of the abstract base" +#~ " class `flwr.server.ClientManager`. If no " +#~ "implementation is provided, then " +#~ "`start_simulation` will use " +#~ "`flwr.server.client_manager.SimpleClientManager`." #~ msgstr "" #~ msgid "" -#~ "Let's take the example of “Saving " -#~ "Progress” which we changed to “Save " -#~ "Progress”. Does this pass our check?" +#~ "Optional dictionary containing arguments for" +#~ " the call to `ray.init`. If " +#~ "ray_init_args is None (the default), Ray" +#~ " will be initialized with the " +#~ "following default args: { " +#~ "\"ignore_reinit_error\": True, \"include_dashboard\": " +#~ "False } An empty dictionary can " +#~ "be used (ray_init_args={}) to prevent " +#~ "any arguments from being passed to " +#~ "ray.init." #~ msgstr "" -#~ msgid "Before: ”How to saving progress” ❌" +#~ msgid "" +#~ "Optional dictionary containing arguments for" +#~ " the call to `ray.init`. If " +#~ "ray_init_args is None (the default), Ray" +#~ " will be initialized with the " +#~ "following default args:" #~ msgstr "" -#~ msgid "After: ”How to save progress” ✅" +#~ msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" #~ msgstr "" #~ msgid "" -#~ "This is a tiny change, but it’ll" -#~ " allow us to test your end-" -#~ "to-end setup. After cloning and " -#~ "setting up the Flower repo, here’s " -#~ "what you should do:" +#~ "An empty dictionary can be used " +#~ "(ray_init_args={}) to prevent any arguments" +#~ " from being passed to ray.init." #~ msgstr "" #~ msgid "" -#~ "Build the docs and check the " -#~ "result: ``_" +#~ "Set to True to prevent `ray.shutdown()`" +#~ " in case `ray.is_initialized()=True`." #~ msgstr "" -#~ msgid "Here’s how to change the file name:" +#~ msgid "" +#~ "Optionally specify the type of actor " +#~ "to use. The actor object, which " +#~ "persists throughout the simulation, will " +#~ "be the process in charge of " +#~ "executing a ClientApp wrapping input " +#~ "argument `client_fn`." #~ msgstr "" #~ msgid "" -#~ "Commit the changes (commit messages are" -#~ " always imperative: “Do something”, in " -#~ "this case “Change …”)" +#~ "If you want to create your own " +#~ "Actor classes, you might need to " +#~ "pass some input argument. You can " +#~ "use this dictionary for such purpose." #~ msgstr "" #~ msgid "" -#~ "`Good first contributions " -#~ "`_, where you should" -#~ " particularly look into the " -#~ ":code:`baselines` contributions." +#~ "(default: \"DEFAULT\") Optional string " +#~ "(\"DEFAULT\" or \"SPREAD\") for the VCE" +#~ " to choose in which node the " +#~ "actor is placed. If you are an " +#~ "advanced user needed more control you" +#~ " can use lower-level scheduling " +#~ "strategies to pin actors to specific " +#~ "compute nodes (e.g. via " +#~ "NodeAffinitySchedulingStrategy). Please note this" +#~ " is an advanced feature. For all " +#~ "details, please refer to the Ray " +#~ "documentation: https://docs.ray.io/en/latest/ray-" +#~ "core/scheduling/index.html" #~ msgstr "" -#~ msgid "" -#~ "If the section is completely empty " -#~ "(without any token) or non-existant, " -#~ "the changelog will just contain the " -#~ "title of the PR for the changelog" -#~ " entry, without any description." +#~ msgid "**hist** -- Object containing metrics from training." #~ msgstr "" #~ msgid "" -#~ "Flower uses :code:`pyproject.toml` to manage" -#~ " dependencies and configure development " -#~ "tools (the ones which support it). " -#~ "Poetry is a build tool which " -#~ "supports `PEP 517 " -#~ "`_." +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with FastAI to train a vision " +#~ "model on CIFAR-10." +#~ msgstr "" + +#~ msgid "Let's build a federated learning system using fastai and Flower!" #~ msgstr "" #~ msgid "" -#~ "This tutorial will show you how to" -#~ " use Flower to build a federated " -#~ "version of an existing machine learning" -#~ " workload with `FedBN `_, a federated training strategy" -#~ " designed for non-iid data. We " -#~ "are using PyTorch to train a " -#~ "Convolutional Neural Network(with Batch " -#~ "Normalization layers) on the CIFAR-10 " -#~ "dataset. When applying FedBN, only few" -#~ " changes needed compared to `Example: " -#~ "PyTorch - From Centralized To Federated" -#~ " `_." +#~ "Please refer to the `full code " +#~ "example `_ to learn more." #~ msgstr "" #~ msgid "" -#~ "All files are revised based on " -#~ "`Example: PyTorch - From Centralized To" -#~ " Federated `_. The " -#~ "only thing to do is modifying the" -#~ " file called :code:`cifar.py`, revised part" -#~ " is shown below:" +#~ "Check out this Federating Learning " +#~ "quickstart tutorial for using Flower " +#~ "with HuggingFace Transformers in order " +#~ "to fine-tune an LLM." #~ msgstr "" #~ msgid "" -#~ "So far this should all look fairly" -#~ " familiar if you've used PyTorch " -#~ "before. Let's take the next step " -#~ "and use what we've built to create" -#~ " a federated learning system within " -#~ "FedBN, the sytstem consists of one " -#~ "server and two clients." +#~ "Let's build a federated learning system" +#~ " using Hugging Face Transformers and " +#~ "Flower!" #~ msgstr "" #~ msgid "" -#~ "If you have read `Example: PyTorch " -#~ "- From Centralized To Federated " -#~ "`_, the following" -#~ " parts are easy to follow, onyl " -#~ ":code:`get_parameters` and :code:`set_parameters` " -#~ "function in :code:`client.py` needed to " -#~ "revise. If not, please read the " -#~ "`Example: PyTorch - From Centralized To" -#~ " Federated `_. first." +#~ "We will leverage Hugging Face to " +#~ "federate the training of language models" +#~ " over multiple clients using Flower. " +#~ "More specifically, we will fine-tune " +#~ "a pre-trained Transformer model " +#~ "(distilBERT) for sequence classification over" +#~ " a dataset of IMDB ratings. The " +#~ "end goal is to detect if a " +#~ "movie rating is positive or negative." #~ msgstr "" -#~ msgid "Example: Walk-Through PyTorch & MNIST" +#~ msgid "Dependencies" #~ msgstr "" #~ msgid "" -#~ "In this tutorial we will learn, " -#~ "how to train a Convolutional Neural " -#~ "Network on MNIST using Flower and " -#~ "PyTorch." +#~ "To follow along this tutorial you " +#~ "will need to install the following " +#~ "packages: :code:`datasets`, :code:`evaluate`, " +#~ ":code:`flwr`, :code:`torch`, and " +#~ ":code:`transformers`. This can be done " +#~ "using :code:`pip`:" #~ msgstr "" -#~ msgid "" -#~ "Since we want to use PyTorch to" -#~ " solve a computer vision task, let's" -#~ " go ahead an install PyTorch and " -#~ "the **torchvision** library:" +#~ msgid "Standard Hugging Face workflow" #~ msgstr "" -#~ msgid "Ready... Set... Train!" +#~ msgid "Handling the data" #~ msgstr "" #~ msgid "" -#~ "Now that we have all our " -#~ "dependencies installed, let's run a " -#~ "simple distributed training with two " -#~ "clients and one server. Our training " -#~ "procedure and network architecture are " -#~ "based on PyTorch's `Basic MNIST Example" -#~ " `_. " -#~ "This will allow you see how easy" -#~ " it is to wrap your code with" -#~ " Flower and begin training in a " -#~ "federated way. We provide you with " -#~ "two helper scripts, namely *run-" -#~ "server.sh*, and *run-clients.sh*. Don't " -#~ "be afraid to look inside, they are" -#~ " simple enough =)." +#~ "To fetch the IMDB dataset, we will" +#~ " use Hugging Face's :code:`datasets` " +#~ "library. We then need to tokenize " +#~ "the data and create :code:`PyTorch` " +#~ "dataloaders, this is all done in " +#~ "the :code:`load_data` function:" +#~ msgstr "" + +#~ msgid "Training and testing the model" #~ msgstr "" #~ msgid "" -#~ "Go ahead and launch on a terminal" -#~ " the *run-server.sh* script first as" -#~ " follows:" +#~ "Once we have a way of creating " +#~ "our trainloader and testloader, we can" +#~ " take care of the training and " +#~ "testing. This is very similar to " +#~ "any :code:`PyTorch` training or testing " +#~ "loop:" #~ msgstr "" -#~ msgid "Now that the server is up and running, go ahead and launch the clients." +#~ msgid "Creating the model itself" #~ msgstr "" #~ msgid "" -#~ "Et voilà! You should be seeing the" -#~ " training procedure and, after a few" -#~ " iterations, the test accuracy for " -#~ "each client." +#~ "To create the model itself, we " +#~ "will just load the pre-trained " +#~ "distillBERT model using Hugging Face’s " +#~ ":code:`AutoModelForSequenceClassification` :" #~ msgstr "" -#~ msgid "Now, let's see what is really happening inside." +#~ msgid "Federating the example" +#~ msgstr "" + +#~ msgid "Creating the IMDBClient" #~ msgstr "" #~ msgid "" -#~ "Inside the server helper script *run-" -#~ "server.sh* you will find the following" -#~ " code that basically runs the " -#~ ":code:`server.py`" +#~ "To federate our example to multiple " +#~ "clients, we first need to write " +#~ "our Flower client class (inheriting from" +#~ " :code:`flwr.client.NumPyClient`). This is very" +#~ " easy, as our model is a " +#~ "standard :code:`PyTorch` model:" #~ msgstr "" #~ msgid "" -#~ "We can go a bit deeper and " -#~ "see that :code:`server.py` simply launches " -#~ "a server that will coordinate three " -#~ "rounds of training. Flower Servers are" -#~ " very customizable, but for simple " -#~ "workloads, we can start a server " -#~ "using the :ref:`start_server ` function and leave " -#~ "all the configuration possibilities at " -#~ "their default values, as seen below." +#~ "The :code:`get_parameters` function lets the" +#~ " server get the client's parameters. " +#~ "Inversely, the :code:`set_parameters` function " +#~ "allows the server to send its " +#~ "parameters to the client. Finally, the" +#~ " :code:`fit` function trains the model " +#~ "locally for the client, and the " +#~ ":code:`evaluate` function tests the model " +#~ "locally and returns the relevant " +#~ "metrics." +#~ msgstr "" + +#~ msgid "Starting the server" #~ msgstr "" #~ msgid "" -#~ "Next, let's take a look at the " -#~ "*run-clients.sh* file. You will see " -#~ "that it contains the main loop " -#~ "that starts a set of *clients*." +#~ "Now that we have a way to " +#~ "instantiate clients, we need to create" +#~ " our server in order to aggregate " +#~ "the results. Using Flower, this can " +#~ "be done very easily by first " +#~ "choosing a strategy (here, we are " +#~ "using :code:`FedAvg`, which will define " +#~ "the global weights as the average " +#~ "of all the clients' weights at " +#~ "each round) and then using the " +#~ ":code:`flwr.server.start_server` function:" #~ msgstr "" #~ msgid "" -#~ "**cid**: is the client ID. It is" -#~ " an integer that uniquely identifies " -#~ "client identifier." +#~ "The :code:`weighted_average` function is there" +#~ " to provide a way to aggregate " +#~ "the metrics distributed amongst the " +#~ "clients (basically this allows us to " +#~ "display a nice average accuracy and " +#~ "loss for every round)." #~ msgstr "" -#~ msgid "**sever_address**: String that identifies IP and port of the server." +#~ msgid "Putting everything together" #~ msgstr "" -#~ msgid "" -#~ "**nb_clients**: This defines the number " -#~ "of clients being created. This piece " -#~ "of information is not required by " -#~ "the client, but it helps us " -#~ "partition the original MNIST dataset to" -#~ " make sure that every client is " -#~ "working on unique subsets of both " -#~ "*training* and *test* sets." +#~ msgid "We can now start client instances using:" #~ msgstr "" #~ msgid "" -#~ "Again, we can go deeper and look" -#~ " inside :code:`flwr_example/quickstart-" -#~ "pytorch/client.py`. After going through the" -#~ " argument parsing code at the " -#~ "beginning of our :code:`main` function, " -#~ "you will find a call to " -#~ ":code:`mnist.load_data`. This function is " -#~ "responsible for partitioning the original " -#~ "MNIST datasets (*training* and *test*) " -#~ "and returning a :code:`torch.utils.data.DataLoader`" -#~ " s for each of them. We then" -#~ " instantiate a :code:`PytorchMNISTClient` object" -#~ " with our client ID, our DataLoaders," -#~ " the number of epochs in each " -#~ "round, and which device we want to" -#~ " use for training (CPU or GPU)." +#~ "And they will be able to connect" +#~ " to the server and start the " +#~ "federated training." #~ msgstr "" #~ msgid "" -#~ "The :code:`PytorchMNISTClient` object when " -#~ "finally passed to :code:`fl.client.start_client` " -#~ "along with the server's address as " -#~ "the training process begins." +#~ "If you want to check out " +#~ "everything put together, you should " +#~ "check out the `full code example " +#~ "`_ ." #~ msgstr "" -#~ msgid "A Closer Look" +#~ msgid "" +#~ "Of course, this is a very basic" +#~ " example, and a lot can be " +#~ "added or modified, it was just to" +#~ " showcase how simply we could " +#~ "federate a Hugging Face workflow using" +#~ " Flower." #~ msgstr "" #~ msgid "" -#~ "Now, let's look closely into the " -#~ ":code:`PytorchMNISTClient` inside :code:`flwr_example" -#~ ".quickstart-pytorch.mnist` and see what it" -#~ " is doing:" +#~ "Note that in this example we used" +#~ " :code:`PyTorch`, but we could have " +#~ "very well used :code:`TensorFlow`." #~ msgstr "" #~ msgid "" -#~ "The first thing to notice is that" -#~ " :code:`PytorchMNISTClient` instantiates a CNN" -#~ " model inside its constructor" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with PyTorch Lightning to train an " +#~ "Auto Encoder model on MNIST." #~ msgstr "" #~ msgid "" -#~ "The code for the CNN is available" -#~ " under :code:`quickstart-pytorch.mnist` and " -#~ "it is reproduced below. It is the" -#~ " same network found in `Basic MNIST" -#~ " Example " -#~ "`_." +#~ "Let's build a horizontal federated " +#~ "learning system using PyTorch Lightning " +#~ "and Flower!" #~ msgstr "" #~ msgid "" -#~ "The second thing to notice is that" -#~ " :code:`PytorchMNISTClient` class inherits from" -#~ " the :code:`fl.client.Client`, and hence it" -#~ " must implement the following methods:" +#~ "Please refer to the `full code " +#~ "example `_ to learn " +#~ "more." #~ msgstr "" #~ msgid "" -#~ "When comparing the abstract class to " -#~ "its derived class :code:`PytorchMNISTClient` " -#~ "you will notice that :code:`fit` calls" -#~ " a :code:`train` function and that " -#~ ":code:`evaluate` calls a :code:`test`: " -#~ "function." +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with TensorFlow to train a MobilNetV2" +#~ " model on CIFAR-10." #~ msgstr "" -#~ msgid "" -#~ "These functions can both be found " -#~ "inside the same :code:`quickstart-" -#~ "pytorch.mnist` module:" +#~ msgid "Let's build a federated learning system in less than 20 lines of code!" +#~ msgstr "" + +#~ msgid "Before Flower can be imported we have to install it:" #~ msgstr "" #~ msgid "" -#~ "Observe that these functions encapsulate " -#~ "regular training and test loops and " -#~ "provide :code:`fit` and :code:`evaluate` with" -#~ " final statistics for each round. You" -#~ " could substitute them with your " -#~ "custom train and test loops and " -#~ "change the network architecture, and the" -#~ " entire example would still work " -#~ "flawlessly. As a matter of fact, " -#~ "why not try and modify the code" -#~ " to an example of your liking?" +#~ "Since we want to use the Keras " +#~ "API of TensorFlow (TF), we have to" +#~ " install TF as well:" #~ msgstr "" -#~ msgid "Give It a Try" +#~ msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" #~ msgstr "" #~ msgid "" -#~ "Looking through the quickstart code " -#~ "description above will have given a " -#~ "good understanding of how *clients* and" -#~ " *servers* work in Flower, how to " -#~ "run a simple experiment, and the " -#~ "internals of a client wrapper. Here " -#~ "are a few things you could try " -#~ "on your own and get more " -#~ "experience with Flower:" +#~ "We use the Keras utilities of TF" +#~ " to load CIFAR10, a popular colored" +#~ " image classification dataset for machine" +#~ " learning. The call to " +#~ ":code:`tf.keras.datasets.cifar10.load_data()` downloads " +#~ "CIFAR10, caches it locally, and then " +#~ "returns the entire training and test " +#~ "set as NumPy ndarrays." #~ msgstr "" #~ msgid "" -#~ "Try and change :code:`PytorchMNISTClient` so" -#~ " it can accept different architectures." +#~ "Next, we need a model. For the " +#~ "purpose of this tutorial, we use " +#~ "MobilNetV2 with 10 output classes:" #~ msgstr "" #~ msgid "" -#~ "Modify the :code:`train` function so " -#~ "that it accepts different optimizers" +#~ "The Flower server interacts with clients" +#~ " through an interface called " +#~ ":code:`Client`. When the server selects " +#~ "a particular client for training, it " +#~ "sends training instructions over the " +#~ "network. The client receives those " +#~ "instructions and calls one of the " +#~ ":code:`Client` methods to run your code" +#~ " (i.e., to train the neural network" +#~ " we defined earlier)." #~ msgstr "" #~ msgid "" -#~ "Modify the :code:`test` function so that" -#~ " it proves not only the top-1 " -#~ "(regular accuracy) but also the top-5" -#~ " accuracy?" +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses Keras." +#~ " The :code:`NumPyClient` interface defines " +#~ "three methods which can be implemented" +#~ " in the following way:" #~ msgstr "" #~ msgid "" -#~ "Go larger! Try to adapt the code" -#~ " to larger images and datasets. Why" -#~ " not try training on ImageNet with" -#~ " a ResNet-50?" +#~ "We can now create an instance of" +#~ " our class :code:`CifarClient` and add " +#~ "one line to actually run this " +#~ "client:" #~ msgstr "" -#~ msgid "You are ready now. Enjoy learning in a federated way!" +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. If you implement" +#~ " a client of type :code:`NumPyClient` " +#~ "you'll need to first call its " +#~ ":code:`to_client()` method. The string " +#~ ":code:`\"[::]:8080\"` tells the client which" +#~ " server to connect to. In our " +#~ "case we can run the server and " +#~ "the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." #~ msgstr "" -#~ msgid "Differential privacy" +#~ msgid "Each client will have its own dataset." #~ msgstr "" #~ msgid "" -#~ "Flower provides differential privacy (DP) " -#~ "wrapper classes for the easy integration" -#~ " of the central DP guarantees " -#~ "provided by DP-FedAvg into training " -#~ "pipelines defined in any of the " -#~ "various ML frameworks that Flower is " -#~ "compatible with." +#~ "You should now see how the " +#~ "training does in the very first " +#~ "terminal (the one that started the " +#~ "server):" #~ msgstr "" #~ msgid "" -#~ "Please note that these components are" -#~ " still experimental; the correct " -#~ "configuration of DP for a specific " -#~ "task is still an unsolved problem." +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this can be " +#~ "found in :code:`examples/quickstart-" +#~ "tensorflow/client.py`." #~ msgstr "" -#~ msgid "" -#~ "The name DP-FedAvg is misleading " -#~ "since it can be applied on top " -#~ "of any FL algorithm that conforms " -#~ "to the general structure prescribed by" -#~ " the FedOpt family of algorithms." +#~ msgid "|e5918c1c06a4434bbe4bf49235e40059|" #~ msgstr "" -#~ msgid "DP-FedAvg" +#~ msgid "|c0165741bd1944f09ec55ce49032377d|" #~ msgstr "" -#~ msgid "" -#~ "DP-FedAvg, originally proposed by " -#~ "McMahan et al. [mcmahan]_ and extended" -#~ " by Andrew et al. [andrew]_, is " -#~ "essentially FedAvg with the following " -#~ "modifications." +#~ msgid "|0a0ac9427ac7487b8e52d75ed514f04e|" #~ msgstr "" -#~ msgid "" -#~ "**Clipping** : The influence of each " -#~ "client's update is bounded by clipping" -#~ " it. This is achieved by enforcing" -#~ " a cap on the L2 norm of " -#~ "the update, scaling it down if " -#~ "needed." +#~ msgid "|5defee3ea4ca40d99fcd3e4ea045be25|" #~ msgstr "" -#~ msgid "" -#~ "**Noising** : Gaussian noise, calibrated " -#~ "to the clipping threshold, is added " -#~ "to the average computed at the " -#~ "server." +#~ msgid "|74f26ca701254d3db57d7899bd91eb55|" #~ msgstr "" -#~ msgid "" -#~ "The distribution of the update norm " -#~ "has been shown to vary from " -#~ "task-to-task and to evolve as " -#~ "training progresses. This variability is " -#~ "crucial in understanding its impact on" -#~ " differential privacy guarantees, emphasizing " -#~ "the need for an adaptive approach " -#~ "[andrew]_ that continuously adjusts the " -#~ "clipping threshold to track a " -#~ "prespecified quantile of the update norm" -#~ " distribution." +#~ msgid "|bda79f21f8154258a40e5766b2634ad7|" #~ msgstr "" -#~ msgid "Simplifying Assumptions" +#~ msgid "|89d30862e62e4f9989e193483a08680a|" #~ msgstr "" -#~ msgid "" -#~ "We make (and attempt to enforce) a" -#~ " number of assumptions that must be" -#~ " satisfied to ensure that the " -#~ "training process actually realizes the " -#~ ":math:`(\\epsilon, \\delta)` guarantees the " -#~ "user has in mind when configuring " -#~ "the setup." +#~ msgid "|77e9918671c54b4f86e01369c0785ce8|" #~ msgstr "" -#~ msgid "" -#~ "**Fixed-size subsampling** :Fixed-size " -#~ "subsamples of the clients must be " -#~ "taken at each round, as opposed to" -#~ " variable-sized Poisson subsamples." +#~ msgid "|7e4ccef37cc94148a067107b34eb7447|" #~ msgstr "" -#~ msgid "" -#~ "**Unweighted averaging** : The contributions" -#~ " from all the clients must weighted" -#~ " equally in the aggregate to " -#~ "eliminate the requirement for the server" -#~ " to know in advance the sum of" -#~ " the weights of all clients available" -#~ " for selection." +#~ msgid "|28e47e4cded14479a0846c8e5f22c872|" #~ msgstr "" -#~ msgid "" -#~ "**No client failures** : The set " -#~ "of available clients must stay constant" -#~ " across all rounds of training. In" -#~ " other words, clients cannot drop out" -#~ " or fail." +#~ msgid "|4b8c5d1afa144294b76ffc76e4658a38|" #~ msgstr "" -#~ msgid "" -#~ "The first two are useful for " -#~ "eliminating a multitude of complications " -#~ "associated with calibrating the noise to" -#~ " the clipping threshold, while the " -#~ "third one is required to comply " -#~ "with the assumptions of the privacy " -#~ "analysis." +#~ msgid "|9dbdb3a0f6cb4a129fac863eaa414c17|" #~ msgstr "" -#~ msgid "" -#~ "These restrictions are in line with " -#~ "constraints imposed by Andrew et al. " -#~ "[andrew]_." +#~ msgid "|81749d0ac0834c36a83bd38f433fea31|" #~ msgstr "" -#~ msgid "Customizable Responsibility for Noise injection" +#~ msgid "|ed9aae51da70428eab7eef32f21e819e|" #~ msgstr "" -#~ msgid "" -#~ "In contrast to other implementations " -#~ "where the addition of noise is " -#~ "performed at the server, you can " -#~ "configure the site of noise injection" -#~ " to better match your threat model." -#~ " We provide users with the " -#~ "flexibility to set up the training " -#~ "such that each client independently adds" -#~ " a small amount of noise to the" -#~ " clipped update, with the result that" -#~ " simply aggregating the noisy updates " -#~ "is equivalent to the explicit addition" -#~ " of noise to the non-noisy " -#~ "aggregate at the server." +#~ msgid "|e87b69b2ada74ea49412df16f4a0b9cc|" #~ msgstr "" -#~ msgid "" -#~ "To be precise, if we let :math:`m`" -#~ " be the number of clients sampled " -#~ "each round and :math:`\\sigma_\\Delta` be " -#~ "the scale of the total Gaussian " -#~ "noise that needs to be added to" -#~ " the sum of the model updates, " -#~ "we can use simple maths to show" -#~ " that this is equivalent to each " -#~ "client adding noise with scale " -#~ ":math:`\\sigma_\\Delta/\\sqrt{m}`." +#~ msgid "|33cacb7d985c4906b348515c1a5cd993|" #~ msgstr "" -#~ msgid "Wrapper-based approach" +#~ msgid "|cc080a555947492fa66131dc3a967603|" #~ msgstr "" -#~ msgid "" -#~ "Introducing DP to an existing workload" -#~ " can be thought of as adding an" -#~ " extra layer of security around it." -#~ " This inspired us to provide the " -#~ "additional server and client-side logic" -#~ " needed to make the training process" -#~ " differentially private as wrappers for " -#~ "instances of the :code:`Strategy` and " -#~ ":code:`NumPyClient` abstract classes respectively." -#~ " This wrapper-based approach has the" -#~ " advantage of being easily composable " -#~ "with other wrappers that someone might" -#~ " contribute to the Flower library in" -#~ " the future, e.g., for secure " -#~ "aggregation. Using Inheritance instead can " -#~ "be tedious because that would require" -#~ " the creation of new sub- classes " -#~ "every time a new class implementing " -#~ ":code:`Strategy` or :code:`NumPyClient` is " -#~ "defined." +#~ msgid "|085c3e0fb8664c6aa06246636524b20b|" #~ msgstr "" -#~ msgid "Server-side logic" +#~ msgid "|bfe69c74e48c45d49b50251c38c2a019|" #~ msgstr "" -#~ msgid "" -#~ "The first version of our solution " -#~ "was to define a decorator whose " -#~ "constructor accepted, among other things, " -#~ "a boolean-valued variable indicating " -#~ "whether adaptive clipping was to be " -#~ "enabled or not. We quickly realized " -#~ "that this would clutter its " -#~ ":code:`__init__()` function with variables " -#~ "corresponding to hyperparameters of adaptive" -#~ " clipping that would remain unused " -#~ "when it was disabled. A cleaner " -#~ "implementation could be achieved by " -#~ "splitting the functionality into two " -#~ "decorators, :code:`DPFedAvgFixed` and " -#~ ":code:`DPFedAvgAdaptive`, with the latter sub-" -#~ " classing the former. The constructors " -#~ "for both classes accept a boolean " -#~ "parameter :code:`server_side_noising`, which, as " -#~ "the name suggests, determines where " -#~ "noising is to be performed." +#~ msgid "|ebbecd651f0348d99c6511ea859bf4ca|" #~ msgstr "" -#~ msgid "" -#~ "The server-side capabilities required " -#~ "for the original version of DP-" -#~ "FedAvg, i.e., the one which performed" -#~ " fixed clipping, can be completely " -#~ "captured with the help of wrapper " -#~ "logic for just the following two " -#~ "methods of the :code:`Strategy` abstract " -#~ "class." +#~ msgid "|163117eb654a4273babba413cf8065f5|" #~ msgstr "" -#~ msgid "" -#~ ":code:`configure_fit()` : The config " -#~ "dictionary being sent by the wrapped " -#~ ":code:`Strategy` to each client needs to" -#~ " be augmented with an additional " -#~ "value equal to the clipping threshold" -#~ " (keyed under :code:`dpfedavg_clip_norm`) and," -#~ " if :code:`server_side_noising=true`, another one" -#~ " equal to the scale of the " -#~ "Gaussian noise that needs to be " -#~ "added at the client (keyed under " -#~ ":code:`dpfedavg_noise_stddev`). This entails " -#~ "*post*-processing of the results returned " -#~ "by the wrappee's implementation of " -#~ ":code:`configure_fit()`." +#~ msgid "|452ac3ba453b4cd1be27be1ba7560d64|" #~ msgstr "" -#~ msgid "" -#~ ":code:`aggregate_fit()`: We check whether any" -#~ " of the sampled clients dropped out" -#~ " or failed to upload an update " -#~ "before the round timed out. In " -#~ "that case, we need to abort the" -#~ " current round, discarding any successful" -#~ " updates that were received, and move" -#~ " on to the next one. On the " -#~ "other hand, if all clients responded " -#~ "successfully, we must force the " -#~ "averaging of the updates to happen " -#~ "in an unweighted manner by intercepting" -#~ " the :code:`parameters` field of " -#~ ":code:`FitRes` for each received update " -#~ "and setting it to 1. Furthermore, " -#~ "if :code:`server_side_noising=true`, each update " -#~ "is perturbed with an amount of " -#~ "noise equal to what it would have" -#~ " been subjected to had client-side" -#~ " noising being enabled. This entails " -#~ "*pre*-processing of the arguments to " -#~ "this method before passing them on " -#~ "to the wrappee's implementation of " -#~ ":code:`aggregate_fit()`." +#~ msgid "|f403fcd69e4e44409627e748b404c086|" +#~ msgstr "" + +#~ msgid "|4b00fe63870145968f8443619a792a42|" #~ msgstr "" -#~ msgid "" -#~ "We can't directly change the aggregation" -#~ " function of the wrapped strategy to" -#~ " force it to add noise to the" -#~ " aggregate, hence we simulate client-" -#~ "side noising to implement server-side" -#~ " noising." +#~ msgid "|368378731066486fa4397e89bc6b870c|" #~ msgstr "" -#~ msgid "" -#~ "These changes have been put together " -#~ "into a class called :code:`DPFedAvgFixed`, " -#~ "whose constructor accepts the strategy " -#~ "being decorated, the clipping threshold " -#~ "and the number of clients sampled " -#~ "every round as compulsory arguments. The" -#~ " user is expected to specify the " -#~ "clipping threshold since the order of" -#~ " magnitude of the update norms is " -#~ "highly dependent on the model being " -#~ "trained and providing a default value" -#~ " would be misleading. The number of" -#~ " clients sampled at every round is" -#~ " required to calculate the amount of" -#~ " noise that must be added to " -#~ "each individual update, either by the" -#~ " server or the clients." +#~ msgid "|a66aa83d85bf4ffba7ed660b718066da|" +#~ msgstr "" + +#~ msgid "|82324b9af72a4582a81839d55caab767|" +#~ msgstr "" + +#~ msgid "|fbf2da0da3cc4f8ab3b3eff852d80c41|" #~ msgstr "" #~ msgid "" -#~ "The additional functionality required to " -#~ "facilitate adaptive clipping has been " -#~ "provided in :code:`DPFedAvgAdaptive`, a " -#~ "subclass of :code:`DPFedAvgFixed`. It " -#~ "overrides the above-mentioned methods to" -#~ " do the following." +#~ "The Visual Studio Code Remote - " +#~ "Containers extension lets you use a " +#~ "Docker container as a fully-featured " +#~ "development environment. It allows you " +#~ "to open any folder inside (or " +#~ "mounted into) a container and take " +#~ "advantage of Visual Studio Code's full" +#~ " feature set. A :code:`devcontainer.json` " +#~ "file in your project tells VS Code" +#~ " how to access (or create) a " +#~ "development container with a well-" +#~ "defined tool and runtime stack. This " +#~ "container can be used to run an" +#~ " application or to separate tools, " +#~ "libraries, or runtimes needed for " +#~ "working with a codebase." #~ msgstr "" #~ msgid "" -#~ ":code:`configure_fit()` : It intercepts the" -#~ " config dict returned by " -#~ ":code:`super.configure_fit()` to add the " -#~ "key-value pair " -#~ ":code:`dpfedavg_adaptive_clip_enabled:True` to it, " -#~ "which the client interprets as an " -#~ "instruction to include an indicator bit" -#~ " (1 if update norm <= clipping " -#~ "threshold, 0 otherwise) in the results" -#~ " returned by it." +#~ "Configuring and setting up the " +#~ ":code:`Dockerfile` as well the configuration" +#~ " for the devcontainer can be a " +#~ "bit more involved. The good thing " +#~ "is you don't have to do it. " +#~ "Usually it should be enough to " +#~ "install `Docker " +#~ "`_ on your " +#~ "system and ensure its available on " +#~ "your command line. Additionally, install " +#~ "the `VSCode Containers Extension " +#~ "`_." #~ msgstr "" #~ msgid "" -#~ ":code:`aggregate_fit()` : It follows a " -#~ "call to :code:`super.aggregate_fit()` with one" -#~ " to :code:`__update_clip_norm__()`, a procedure" -#~ " which adjusts the clipping threshold " -#~ "on the basis of the indicator bits" -#~ " received from the sampled clients." +#~ "If you prefer to use Anaconda for" +#~ " your virtual environment then install " +#~ "and setup the `conda " +#~ "`_ package. After setting" +#~ " it up you can create a virtual" +#~ " environment with:" #~ msgstr "" -#~ msgid "Client-side logic" +#~ msgid "The :code:`SecAgg+` abstraction" +#~ msgstr "" + +#~ msgid "The :code:`LightSecAgg` abstraction" #~ msgstr "" #~ msgid "" -#~ "The client-side capabilities required " -#~ "can be completely captured through " -#~ "wrapper logic for just the :code:`fit()`" -#~ " method of the :code:`NumPyClient` abstract" -#~ " class. To be precise, we need " -#~ "to *post-process* the update computed" -#~ " by the wrapped client to clip " -#~ "it, if necessary, to the threshold " -#~ "value supplied by the server as " -#~ "part of the config dictionary. In " -#~ "addition to this, it may need to" -#~ " perform some extra work if either" -#~ " (or both) of the following keys " -#~ "are also present in the dict." +#~ "A fork is a personal copy of " +#~ "a GitHub repository. To create one " +#~ "for Flower, you must navigate to " +#~ "``_ (while connected " +#~ "to your GitHub account) and click " +#~ "the ``Fork`` button situated on the " +#~ "top right of the page." #~ msgstr "" #~ msgid "" -#~ ":code:`dpfedavg_noise_stddev` : Generate and " -#~ "add the specified amount of noise " -#~ "to the clipped update." +#~ "To check which files have been " +#~ "modified compared to the last version" +#~ " (last commit) and to see which " +#~ "files are staged for commit, you " +#~ "can use the :code:`git status` command." #~ msgstr "" #~ msgid "" -#~ ":code:`dpfedavg_adaptive_clip_enabled` : Augment the" -#~ " metrics dict in the :code:`FitRes` " -#~ "object being returned to the server " -#~ "with an indicator bit, calculated as " -#~ "described earlier." +#~ "Once you have added all the files" +#~ " you wanted to commit using " +#~ ":code:`git add`, you can finally create" +#~ " your commit using this command:" #~ msgstr "" -#~ msgid "Performing the :math:`(\\epsilon, \\delta)` analysis" +#~ msgid "" +#~ "The \\ is there to " +#~ "explain to others what the commit " +#~ "does. It should be written in an" +#~ " imperative style and be concise. An" +#~ " example would be :code:`git commit " +#~ "-m \"Add images to README\"`." #~ msgstr "" #~ msgid "" -#~ "Assume you have trained for :math:`n`" -#~ " rounds with sampling fraction :math:`q`" -#~ " and noise multiplier :math:`z`. In " -#~ "order to calculate the :math:`\\epsilon` " -#~ "value this would result in for a" -#~ " particular :math:`\\delta`, the following " -#~ "script may be used." +#~ ":doc:`Good first contributions `, where you" +#~ " should particularly look into the " +#~ ":code:`baselines` contributions." #~ msgstr "" #~ msgid "" -#~ "McMahan et al. \"Learning Differentially " -#~ "Private Recurrent Language Models.\" " -#~ "International Conference on Learning " -#~ "Representations (ICLR), 2017." +#~ "Flower uses :code:`pyproject.toml` to manage" +#~ " dependencies and configure development " +#~ "tools (the ones which support it). " +#~ "Poetry is a build tool which " +#~ "supports `PEP 517 " +#~ "`_." #~ msgstr "" #~ msgid "" -#~ "Andrew, Galen, et al. \"Differentially " -#~ "Private Learning with Adaptive Clipping.\" " -#~ "Advances in Neural Information Processing " -#~ "Systems (NeurIPS), 2021." +#~ "Install `xz` (to install different " +#~ "Python versions) and `pandoc` to build" +#~ " the docs::" #~ msgstr "" #~ msgid "" -#~ "This can be achieved by customizing " -#~ "an existing strategy or by `implementing" -#~ " a custom strategy from scratch " -#~ "`_. Here's a nonsensical " -#~ "example that customizes :code:`FedAvg` by " -#~ "adding a custom ``\"hello\": \"world\"`` " -#~ "configuration key/value pair to the " -#~ "config dict of a *single client* " -#~ "(only the first client in the " -#~ "list, the other clients in this " -#~ "round to not receive this \"special\"" -#~ " config value):" +#~ "Ensure you system (Ubuntu 22.04+) is " +#~ "up-to-date, and you have all " +#~ "necessary packages::" #~ msgstr "" #~ msgid "" -#~ "More sophisticated implementations can use " -#~ ":code:`configure_fit` to implement custom " -#~ "client selection logic. A client will" -#~ " only participate in a round if " -#~ "the corresponding :code:`ClientProxy` is " -#~ "included in the the list returned " -#~ "from :code:`configure_fit`." +#~ "1. Clone the `Flower repository " +#~ "`_ from GitHub::" #~ msgstr "" #~ msgid "" -#~ "More sophisticated implementations can use " -#~ ":code:`configure_evaluate` to implement custom " -#~ "client selection logic. A client will" -#~ " only participate in a round if " -#~ "the corresponding :code:`ClientProxy` is " -#~ "included in the the list returned " -#~ "from :code:`configure_evaluate`." +#~ "Let's create the Python environment for" +#~ " all-things Flower. If you wish " +#~ "to use :code:`pyenv`, we provide two " +#~ "convenience scripts that you can use." +#~ " If you prefer using something else" +#~ " than :code:`pyenv`, create a new " +#~ "environment, activate and skip to the" +#~ " last point where all packages are" +#~ " installed." #~ msgstr "" #~ msgid "" -#~ "`How to run Flower using Docker " -#~ "`_" +#~ "If you don't have :code:`pyenv` " +#~ "installed, the following script that " +#~ "will install it, set it up, and" +#~ " create the virtual environment (with " +#~ ":code:`Python 3.9.20` by default)::" #~ msgstr "" #~ msgid "" -#~ "Ray Dashboard: ``_" +#~ "If you already have :code:`pyenv` " +#~ "installed (along with the :code:`pyenv-" +#~ "virtualenv` plugin), you can use the " +#~ "following convenience script (with " +#~ ":code:`Python 3.9.20` by default)::" #~ msgstr "" #~ msgid "" -#~ "Ray Metrics: ``_" +#~ "3. Install the Flower package in " +#~ "development mode (think :code:`pip install " +#~ "-e`) along with all necessary " +#~ "dependencies::" #~ msgstr "" -#~ msgid "Enjoy building more robust and flexible ``ClientApp``s with mods!" +#~ msgid "" +#~ "The Flower repository contains a number" +#~ " of convenience scripts to make " +#~ "recurring development tasks easier and " +#~ "less error-prone. See the :code:`/dev`" +#~ " subdirectory for a full list. The" +#~ " following scripts are amongst the " +#~ "most important ones:" #~ msgstr "" #~ msgid "" -#~ ":py:obj:`ClientApp `\\ " -#~ "\\(client\\_fn\\[\\, mods\\]\\)" +#~ "If in a hurry, bypass the hook " +#~ "using ``--no-verify`` with the ``git " +#~ "commit`` command. ::" #~ msgstr "" -#~ msgid ":py:obj:`flwr.server.driver `\\" +#~ msgid "" +#~ "Developers could run the full set " +#~ "of Github Actions workflows under their" +#~ " local environment by using `Act " +#~ "`_. Please refer to" +#~ " the installation instructions under the" +#~ " linked repository and run the next" +#~ " command under Flower main cloned " +#~ "repository folder::" #~ msgstr "" -#~ msgid "Flower driver SDK." +#~ msgid "" +#~ "Flower uses Poetry to build releases." +#~ " The necessary command is wrapped in" +#~ " a simple script::" #~ msgstr "" -#~ msgid "driver" +#~ msgid "" +#~ "The resulting :code:`.whl` and :code:`.tar.gz`" +#~ " releases will be stored in the " +#~ ":code:`/dist` subdirectory." #~ msgstr "" #~ msgid "" -#~ ":py:obj:`start_driver `\\ " -#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" +#~ "Flower's documentation uses `Sphinx " +#~ "`_. There's no " +#~ "convenience script to re-build the " +#~ "documentation yet, but it's pretty " +#~ "easy::" #~ msgstr "" #~ msgid "" -#~ ":py:obj:`Driver `\\ " -#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ "Some quickstart examples may have " +#~ "limitations or requirements that prevent " +#~ "them from running on every environment." +#~ " For more information, please see " +#~ "`Limitations`_." #~ msgstr "" #~ msgid "" -#~ ":py:obj:`GrpcDriver `\\ " -#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ "Change the application code. For " +#~ "example, change the ``seed`` in " +#~ "``quickstart_docker/task.py`` to ``43`` and " +#~ "save it:" #~ msgstr "" -#~ msgid "`GrpcDriver` provides access to the gRPC Driver API/service." +#~ msgid "" +#~ "All files are revised based on " +#~ ":doc:`Example: PyTorch - From Centralized " +#~ "To Federated `. The only thing" +#~ " to do is modifying the file " +#~ "called :code:`cifar.py`, revised part is " +#~ "shown below:" +#~ msgstr "" + +#~ msgid "" +#~ "If you have read :doc:`Example: PyTorch" +#~ " - From Centralized To Federated " +#~ "`, the following parts are " +#~ "easy to follow, only :code:`get_parameters`" +#~ " and :code:`set_parameters` function in " +#~ ":code:`client.py` needed to revise. If " +#~ "not, please read the :doc:`Example: " +#~ "PyTorch - From Centralized To Federated" +#~ " `. first." #~ msgstr "" -#~ msgid ":py:obj:`get_nodes `\\ \\(\\)" +#~ msgid "" +#~ "Our example consists of one *server* " +#~ "and two *clients*. In FedBN, " +#~ ":code:`server.py` keeps unchanged, we can " +#~ "start the server directly." #~ msgstr "" #~ msgid "" -#~ ":py:obj:`pull_task_res " -#~ "`\\ \\(task\\_ids\\)" +#~ "Finally, we will revise our *client* " +#~ "logic by changing :code:`get_parameters` and" +#~ " :code:`set_parameters` in :code:`client.py`, we" +#~ " will exclude batch normalization " +#~ "parameters from model parameter list " +#~ "when sending to or receiving from " +#~ "the server." #~ msgstr "" -#~ msgid "Get task results." +#~ msgid "" +#~ "Let's create a new file called " +#~ ":code:`cifar.py` with all the components " +#~ "required for a traditional (centralized) " +#~ "training on CIFAR-10. First, all " +#~ "required packages (such as :code:`torch` " +#~ "and :code:`torchvision`) need to be " +#~ "imported. You can see that we do" +#~ " not import any package for federated" +#~ " learning. You can keep all these " +#~ "imports as they are even when we" +#~ " add the federated learning components " +#~ "at a later point." #~ msgstr "" #~ msgid "" -#~ ":py:obj:`push_task_ins " -#~ "`\\ " -#~ "\\(task\\_ins\\_list\\)" +#~ "As already mentioned we will use " +#~ "the CIFAR-10 dataset for this machine" +#~ " learning workload. The model architecture" +#~ " (a very simple Convolutional Neural " +#~ "Network) is defined in :code:`class " +#~ "Net()`." #~ msgstr "" -#~ msgid "Schedule tasks." +#~ msgid "" +#~ "The :code:`load_data()` function loads the " +#~ "CIFAR-10 training and test sets. The " +#~ ":code:`transform` normalized the data after" +#~ " loading." #~ msgstr "" -#~ msgid "GrpcDriver" +#~ msgid "" +#~ "We now need to define the training" +#~ " (function :code:`train()`) which loops " +#~ "over the training set, measures the " +#~ "loss, backpropagates it, and then takes" +#~ " one optimizer step for each batch" +#~ " of training examples." #~ msgstr "" -#~ msgid ":py:obj:`connect `\\ \\(\\)" +#~ msgid "" +#~ "The evaluation of the model is " +#~ "defined in the function :code:`test()`. " +#~ "The function loops over all test " +#~ "samples and measures the loss of " +#~ "the model based on the test " +#~ "dataset." #~ msgstr "" -#~ msgid "Connect to the Driver API." +#~ msgid "" +#~ "The concept is easy to understand. " +#~ "We have to start a *server* and" +#~ " then use the code in " +#~ ":code:`cifar.py` for the *clients* that " +#~ "are connected to the *server*. The " +#~ "*server* sends model parameters to the" +#~ " clients. The *clients* run the " +#~ "training and update the parameters. The" +#~ " updated parameters are sent back to" +#~ " the *server* which averages all " +#~ "received parameter updates. This describes " +#~ "one round of the federated learning " +#~ "process and we repeat this for " +#~ "multiple rounds." #~ msgstr "" #~ msgid "" -#~ ":py:obj:`create_run " -#~ "`\\ \\(req\\)" +#~ "Our example consists of one *server* " +#~ "and two *clients*. Let's set up " +#~ ":code:`server.py` first. The *server* needs" +#~ " to import the Flower package " +#~ ":code:`flwr`. Next, we use the " +#~ ":code:`start_server` function to start a " +#~ "server and tell it to perform " +#~ "three rounds of federated learning." #~ msgstr "" -#~ msgid "Request for run ID." +#~ msgid "" +#~ "Finally, we will define our *client* " +#~ "logic in :code:`client.py` and build " +#~ "upon the previously defined centralized " +#~ "training in :code:`cifar.py`. Our *client* " +#~ "needs to import :code:`flwr`, but also" +#~ " :code:`torch` to update the parameters " +#~ "on our PyTorch model:" #~ msgstr "" #~ msgid "" -#~ ":py:obj:`disconnect " -#~ "`\\ \\(\\)" +#~ "Implementing a Flower *client* basically " +#~ "means implementing a subclass of either" +#~ " :code:`flwr.client.Client` or " +#~ ":code:`flwr.client.NumPyClient`. Our implementation " +#~ "will be based on " +#~ ":code:`flwr.client.NumPyClient` and we'll call " +#~ "it :code:`CifarClient`. :code:`NumPyClient` is " +#~ "slightly easier to implement than " +#~ ":code:`Client` if you use a framework" +#~ " with good NumPy interoperability (like " +#~ "PyTorch or TensorFlow/Keras) because it " +#~ "avoids some of the boilerplate that " +#~ "would otherwise be necessary. " +#~ ":code:`CifarClient` needs to implement four" +#~ " methods, two methods for getting/setting" +#~ " model parameters, one method for " +#~ "training the model, and one method " +#~ "for testing the model:" #~ msgstr "" -#~ msgid "Disconnect from the Driver API." +#~ msgid ":code:`set_parameters`" #~ msgstr "" #~ msgid "" -#~ ":py:obj:`get_nodes `\\" -#~ " \\(req\\)" +#~ "loop over the list of model " +#~ "parameters received as NumPy :code:`ndarray`'s" +#~ " (think list of neural network " +#~ "layers)" #~ msgstr "" -#~ msgid "Get client IDs." +#~ msgid ":code:`get_parameters`" #~ msgstr "" #~ msgid "" -#~ ":py:obj:`pull_task_res " -#~ "`\\ \\(req\\)" +#~ "get the model parameters and return " +#~ "them as a list of NumPy " +#~ ":code:`ndarray`'s (which is what " +#~ ":code:`flwr.client.NumPyClient` expects)" #~ msgstr "" -#~ msgid "" -#~ ":py:obj:`push_task_ins " -#~ "`\\ \\(req\\)" +#~ msgid ":code:`fit`" #~ msgstr "" -#~ msgid "" -#~ "Optionally specify the type of actor " -#~ "to use. The actor object, which " -#~ "persists throughout the simulation, will " -#~ "be the process in charge of " -#~ "running the clients' jobs (i.e. their" -#~ " `fit()` method)." +#~ msgid ":code:`evaluate`" #~ msgstr "" #~ msgid "" -#~ "Much effort went into a completely " -#~ "restructured Flower docs experience. The " -#~ "documentation on [flower.ai/docs](flower.ai/docs) is" -#~ " now divided into Flower Framework, " -#~ "Flower Baselines, Flower Android SDK, " -#~ "Flower iOS SDK, and code example " -#~ "projects." +#~ "The two :code:`NumPyClient` methods " +#~ ":code:`fit` and :code:`evaluate` make use " +#~ "of the functions :code:`train()` and " +#~ ":code:`test()` previously defined in " +#~ ":code:`cifar.py`. So what we really do" +#~ " here is we tell Flower through " +#~ "our :code:`NumPyClient` subclass which of " +#~ "our already defined functions to call" +#~ " for training and evaluation. We " +#~ "included type annotations to give you" +#~ " a better understanding of the data" +#~ " types that get passed around." #~ msgstr "" - -#~ msgid "" -#~ "The first preview release of Flower " -#~ "Baselines has arrived! We're kickstarting " -#~ "Flower Baselines with implementations of " -#~ "FedOpt (FedYogi, FedAdam, FedAdagrad), FedBN," -#~ " and FedAvgM. Check the documentation " -#~ "on how to use [Flower " -#~ "Baselines](https://flower.ai/docs/using-baselines.html). " -#~ "With this first preview release we're" -#~ " also inviting the community to " -#~ "[contribute their own " -#~ "baselines](https://flower.ai/docs/contributing-baselines.html)." + +#~ msgid "" +#~ "All that's left to do it to " +#~ "define a function that loads both " +#~ "model and data, creates a " +#~ ":code:`CifarClient`, and starts this client." +#~ " You load your data and model " +#~ "by using :code:`cifar.py`. Start " +#~ ":code:`CifarClient` with the function " +#~ ":code:`fl.client.start_client()` by pointing it " +#~ "at the same IP address we used " +#~ "in :code:`server.py`:" #~ msgstr "" #~ msgid "" -#~ "Flower usage examples used to be " -#~ "bundled with Flower in a package " -#~ "called ``flwr_example``. We are migrating " -#~ "those examples to standalone projects to" -#~ " make them easier to use. All " -#~ "new examples are based in the " -#~ "directory `examples " -#~ "`_." +#~ "\\small\n" +#~ "\\frac{∆ \\times \\sqrt{2 \\times " +#~ "\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}\n" +#~ "\n" #~ msgstr "" -#~ msgid "The following examples are available as standalone projects." +#~ msgid "" +#~ "The :code:`Strategy` abstraction provides a" +#~ " method called :code:`evaluate` that can" +#~ " directly be used to evaluate the " +#~ "current global model parameters. The " +#~ "current server implementation calls " +#~ ":code:`evaluate` after parameter aggregation " +#~ "and before federated evaluation (see " +#~ "next paragraph)." #~ msgstr "" -#~ msgid "Quickstart TensorFlow/Keras" +#~ msgid "" +#~ "Client-side evaluation happens in the" +#~ " :code:`Client.evaluate` method and can be" +#~ " configured from the server side." #~ msgstr "" #~ msgid "" -#~ "`Quickstart TensorFlow (Tutorial) " -#~ "`_" +#~ ":code:`fraction_evaluate`: a :code:`float` defining" +#~ " the fraction of clients that will" +#~ " be selected for evaluation. If " +#~ ":code:`fraction_evaluate` is set to " +#~ ":code:`0.1` and :code:`100` clients are " +#~ "connected to the server, then :code:`10`" +#~ " will be randomly selected for " +#~ "evaluation. If :code:`fraction_evaluate` is " +#~ "set to :code:`0.0`, federated evaluation " +#~ "will be disabled." #~ msgstr "" #~ msgid "" -#~ "`Quickstart PyTorch (Tutorial) " -#~ "`_" +#~ ":code:`min_evaluate_clients`: an :code:`int`: the" +#~ " minimum number of clients to be " +#~ "selected for evaluation. If " +#~ ":code:`fraction_evaluate` is set to " +#~ ":code:`0.1`, :code:`min_evaluate_clients` is set " +#~ "to 20, and :code:`100` clients are " +#~ "connected to the server, then :code:`20`" +#~ " clients will be selected for " +#~ "evaluation." #~ msgstr "" #~ msgid "" -#~ "`PyTorch: From Centralized To Federated " -#~ "(Tutorial) `_" +#~ ":code:`min_available_clients`: an :code:`int` that" +#~ " defines the minimum number of " +#~ "clients which need to be connected " +#~ "to the server before a round of" +#~ " federated evaluation can start. If " +#~ "fewer than :code:`min_available_clients` are " +#~ "connected to the server, the server " +#~ "will wait until more clients are " +#~ "connected before it continues to sample" +#~ " clients for evaluation." #~ msgstr "" -#~ msgid "Legacy Examples (`flwr_example`)" +#~ msgid "" +#~ ":code:`on_evaluate_config_fn`: a function that " +#~ "returns a configuration dictionary which " +#~ "will be sent to the selected " +#~ "clients. The function will be called " +#~ "during each round and provides a " +#~ "convenient way to customize client-side" +#~ " evaluation from the server side, for" +#~ " example, to configure the number of" +#~ " validation steps performed." #~ msgstr "" #~ msgid "" -#~ "The useage examples in `flwr_example` " -#~ "are deprecated and will be removed " -#~ "in the future. New examples are " -#~ "provided as standalone projects in " -#~ "`examples `_." +#~ "Model parameters can also be evaluated" +#~ " during training. :code:`Client.fit` can " +#~ "return arbitrary evaluation results as a" +#~ " dictionary:" #~ msgstr "" -#~ msgid "Extra Dependencies" +#~ msgid "" +#~ "The same :code:`Strategy`-customization approach " +#~ "can be used to aggregate custom " +#~ "evaluation results coming from individual " +#~ "clients. Clients can return custom " +#~ "metrics to the server by returning " +#~ "a dictionary:" +#~ msgstr "" + +#~ msgid "Enable node authentication in :code:`SuperLink`" #~ msgstr "" #~ msgid "" -#~ "The core Flower framework keeps a " -#~ "minimal set of dependencies. The " -#~ "examples demonstrate Flower in the " -#~ "context of different machine learning " -#~ "frameworks, so additional dependencies need" -#~ " to be installed before an example" -#~ " can be run." +#~ "To enable node authentication, first you" +#~ " need to configure SSL/TLS connections " +#~ "to secure the SuperLink<>SuperNode " +#~ "communication. You can find the complete" +#~ " guide `here `_. After " +#~ "configuring secure connections, you can " +#~ "enable client authentication in a " +#~ "long-running Flower :code:`SuperLink`. Use " +#~ "the following terminal command to start" +#~ " a Flower :code:`SuperNode` that has " +#~ "both secure connections and node " +#~ "authentication enabled:" #~ msgstr "" -#~ msgid "For PyTorch examples::" +#~ msgid "" +#~ "The first flag :code:`--auth-list-" +#~ "public-keys` expects a path to a " +#~ "CSV file storing all known node " +#~ "public keys. You need to store all" +#~ " known node public keys that are " +#~ "allowed to participate in a federation" +#~ " in one CSV file (:code:`.csv`)." #~ msgstr "" -#~ msgid "For TensorFlow examples::" +#~ msgid "" +#~ "The second and third flags :code" +#~ ":`--auth-superlink-private-key` and :code" +#~ ":`--auth-superlink-public-key` expect paths" +#~ " to the server's private and public" +#~ " keys. For development purposes, you " +#~ "can generate a private and public " +#~ "key pair using :code:`ssh-keygen -t " +#~ "ecdsa -b 384`." #~ msgstr "" -#~ msgid "For both PyTorch and TensorFlow examples::" +#~ msgid "Enable node authentication in :code:`SuperNode`" #~ msgstr "" #~ msgid "" -#~ "Please consult :code:`pyproject.toml` for a" -#~ " full list of possible extras " -#~ "(section :code:`[tool.poetry.extras]`)." +#~ "Similar to the long-running Flower " +#~ "server (:code:`SuperLink`), you can easily " +#~ "enable node authentication in the " +#~ "long-running Flower client (:code:`SuperNode`)." +#~ " Use the following terminal command " +#~ "to start an authenticated :code:`SuperNode`:" #~ msgstr "" -#~ msgid "PyTorch Examples" +#~ msgid "" +#~ "The :code:`--auth-supernode-private-key` " +#~ "flag expects a path to the node's" +#~ " private key file and the :code" +#~ ":`--auth-supernode-public-key` flag expects" +#~ " a path to the node's public " +#~ "key file. For development purposes, you" +#~ " can generate a private and public" +#~ " key pair using :code:`ssh-keygen -t" +#~ " ecdsa -b 384`." #~ msgstr "" #~ msgid "" -#~ "Our PyTorch examples are based on " -#~ "PyTorch 1.7. They should work with " -#~ "other releases as well. So far, we" -#~ " provide the following examples." +#~ "You should now have learned how to" +#~ " start a long-running Flower server" +#~ " (:code:`SuperLink`) and client " +#~ "(:code:`SuperNode`) with node authentication " +#~ "enabled. You should also know the " +#~ "significance of the private key and " +#~ "store it safely to minimize security " +#~ "risks." #~ msgstr "" -#~ msgid "CIFAR-10 Image Classification" +#~ msgid "" +#~ "The easiest way to send configuration" +#~ " values to clients is to use a" +#~ " built-in strategy like :code:`FedAvg`. " +#~ "Built-in strategies support so-called" +#~ " configuration functions. A configuration " +#~ "function is a function that the " +#~ "built-in strategy calls to get the" +#~ " configuration dictionary for the current" +#~ " round. It then forwards the " +#~ "configuration dictionary to all the " +#~ "clients selected during that round." #~ msgstr "" #~ msgid "" -#~ "`CIFAR-10 and CIFAR-100 " -#~ "`_ are " -#~ "popular RGB image datasets. The Flower" -#~ " CIFAR-10 example uses PyTorch to " -#~ "train a simple CNN classifier in a" -#~ " federated learning setup with two " -#~ "clients." +#~ "To make the built-in strategies " +#~ "use this function, we can pass it" +#~ " to ``FedAvg`` during initialization using" +#~ " the parameter :code:`on_fit_config_fn`:" #~ msgstr "" -#~ msgid "First, start a Flower server:" +#~ msgid "The :code:`FedAvg` strategy will call this function *every round*." #~ msgstr "" -#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" +#~ msgid "" +#~ "This can be achieved by customizing " +#~ "an existing strategy or by " +#~ ":doc:`implementing a custom strategy from " +#~ "scratch `. " +#~ "Here's a nonsensical example that " +#~ "customizes :code:`FedAvg` by adding a " +#~ "custom ``\"hello\": \"world\"`` configuration " +#~ "key/value pair to the config dict " +#~ "of a *single client* (only the " +#~ "first client in the list, the " +#~ "other clients in this round to not" +#~ " receive this \"special\" config value):" +#~ msgstr "" + +#~ msgid "" +#~ "containing relevant information including: log" +#~ " message level (e.g. :code:`INFO`, " +#~ ":code:`DEBUG`), a timestamp, the line " +#~ "where the logging took place from, " +#~ "as well as the log message itself." +#~ " In this way, the logger would " +#~ "typically display information on your " +#~ "terminal as follows:" +#~ msgstr "" + +#~ msgid "" +#~ "By default, the Flower log is " +#~ "outputted to the terminal where you " +#~ "launch your Federated Learning workload " +#~ "from. This applies for both gRPC-" +#~ "based federation (i.e. when you do " +#~ ":code:`fl.server.start_server`) and when using " +#~ "the :code:`VirtualClientEngine` (i.e. when you" +#~ " do :code:`fl.simulation.start_simulation`). In " +#~ "some situations you might want to " +#~ "save this log to disk. You can " +#~ "do so by calling the " +#~ "`fl.common.logger.configure() " +#~ "`_" +#~ " function. For example:" +#~ msgstr "" + +#~ msgid "" +#~ "With the above, Flower will record " +#~ "the log you see on your terminal" +#~ " to :code:`log.txt`. This file will " +#~ "be created in the same directory " +#~ "as were you are running the code" +#~ " from. If we inspect we see the" +#~ " log above is also recorded but " +#~ "prefixing with :code:`identifier` each line:" +#~ msgstr "" + +#~ msgid "" +#~ "The :code:`fl.common.logger.configure` function, " +#~ "also allows specifying a host to " +#~ "which logs can be pushed (via " +#~ ":code:`POST`) through a native Python " +#~ ":code:`logging.handler.HTTPHandler`. This is a " +#~ "particularly useful feature in " +#~ ":code:`gRPC`-based Federated Learning workloads " +#~ "where otherwise gathering logs from all" +#~ " entities (i.e. the server and the" +#~ " clients) might be cumbersome. Note " +#~ "that in Flower simulation, the server" +#~ " automatically displays all logs. You " +#~ "can still specify a :code:`HTTPHandler` " +#~ "should you wish to backup or " +#~ "analyze the logs somewhere else." #~ msgstr "" -#~ msgid "Then, start the two clients in a new terminal window:" +#~ msgid "" +#~ "This guide describes how to a " +#~ "SSL-enabled secure Flower server " +#~ "(:code:`SuperLink`) can be started and " +#~ "how a Flower client (:code:`SuperNode`) " +#~ "can establish a secure connections to" +#~ " it." #~ msgstr "" -#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" +#~ msgid "" +#~ "The code example comes with a " +#~ ":code:`README.md` file which explains how " +#~ "to start it. Although it is " +#~ "already SSL-enabled, it might be " +#~ "less descriptive on how it does " +#~ "so. Stick to this guide for a " +#~ "deeper introduction to the topic." #~ msgstr "" -#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_cifar`." +#~ msgid "" +#~ "Using SSL-enabled connections requires " +#~ "certificates to be passed to the " +#~ "server and client. For the purpose " +#~ "of this guide we are going to " +#~ "generate self-signed certificates. As " +#~ "this can become quite complex we " +#~ "are going to ask you to run " +#~ "the script in :code:`examples/advanced-" +#~ "tensorflow/certificates/generate.sh` with the " +#~ "following command sequence:" #~ msgstr "" -#~ msgid "ImageNet-2012 Image Classification" +#~ msgid "" +#~ "This will generate the certificates in" +#~ " :code:`examples/advanced-tensorflow/.cache/certificates`." #~ msgstr "" #~ msgid "" -#~ "`ImageNet-2012 `_ is " -#~ "one of the major computer vision " -#~ "datasets. The Flower ImageNet example " -#~ "uses PyTorch to train a ResNet-18 " -#~ "classifier in a federated learning setup" -#~ " with ten clients." +#~ "When setting :code:`root_certificates`, the " +#~ "client expects a file path to " +#~ "PEM-encoded root certificates." #~ msgstr "" -#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" +#~ msgid "The :code:`Strategy` abstraction" #~ msgstr "" -#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" +#~ msgid "" +#~ "All strategy implementation are derived " +#~ "from the abstract base class " +#~ ":code:`flwr.server.strategy.Strategy`, both built-in" +#~ " implementations and third party " +#~ "implementations. This means that custom " +#~ "strategy implementations have the exact " +#~ "same capabilities at their disposal as" +#~ " built-in ones." #~ msgstr "" -#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_imagenet`." +#~ msgid "" +#~ "Creating a new strategy means " +#~ "implementing a new :code:`class` (derived " +#~ "from the abstract base class " +#~ ":code:`Strategy`) that implements for the " +#~ "previously shown abstract methods:" #~ msgstr "" -#~ msgid "TensorFlow Examples" +#~ msgid "The :code:`initialize_parameters` method" #~ msgstr "" #~ msgid "" -#~ "Our TensorFlow examples are based on " -#~ "TensorFlow 2.0 or newer. So far, " -#~ "we provide the following examples." +#~ ":code:`initialize_parameters` is called only " +#~ "once, at the very beginning of an" +#~ " execution. It is responsible for " +#~ "providing the initial global model " +#~ "parameters in a serialized form (i.e.," +#~ " as a :code:`Parameters` object)." #~ msgstr "" -#~ msgid "Fashion-MNIST Image Classification" +#~ msgid "" +#~ "Built-in strategies return user-provided" +#~ " initial parameters. The following example" +#~ " shows how initial parameters can be" +#~ " passed to :code:`FedAvg`:" #~ msgstr "" #~ msgid "" -#~ "`Fashion-MNIST `_ is often used as " -#~ "the \"Hello, world!\" of machine " -#~ "learning. We follow this tradition and" -#~ " provide an example which samples " -#~ "random local datasets from Fashion-MNIST" -#~ " and trains a simple image " -#~ "classification model over those partitions." +#~ "The Flower server will call " +#~ ":code:`initialize_parameters`, which either returns" +#~ " the parameters that were passed to" +#~ " :code:`initial_parameters`, or :code:`None`. If" +#~ " no parameters are returned from " +#~ ":code:`initialize_parameters` (i.e., :code:`None`), " +#~ "the server will randomly select one " +#~ "client and ask it to provide its" +#~ " parameters. This is a convenience " +#~ "feature and not recommended in practice," +#~ " but it can be useful for " +#~ "prototyping. In practice, it is " +#~ "recommended to always use server-side" +#~ " parameter initialization." #~ msgstr "" -#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" +#~ msgid "The :code:`configure_fit` method" #~ msgstr "" -#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" +#~ msgid "" +#~ ":code:`configure_fit` is responsible for " +#~ "configuring the upcoming round of " +#~ "training. What does *configure* mean in" +#~ " this context? Configuring a round " +#~ "means selecting clients and deciding " +#~ "what instructions to send to these " +#~ "clients. The signature of " +#~ ":code:`configure_fit` makes this clear:" #~ msgstr "" #~ msgid "" -#~ "For more details, see " -#~ ":code:`src/py/flwr_example/tensorflow_fashion_mnist`." +#~ "The return value is a list of " +#~ "tuples, each representing the instructions " +#~ "that will be sent to a particular" +#~ " client. Strategy implementations usually " +#~ "perform the following steps in " +#~ ":code:`configure_fit`:" #~ msgstr "" -#~ msgid ":fa:`eye,mr-1` Can Flower run on Juptyter Notebooks / Google Colab?" +#~ msgid "" +#~ "Use the :code:`client_manager` to randomly " +#~ "sample all (or a subset of) " +#~ "available clients (each represented as a" +#~ " :code:`ClientProxy` object)" #~ msgstr "" #~ msgid "" -#~ "`Flower meets KOSMoS `_." +#~ "Pair each :code:`ClientProxy` with the " +#~ "same :code:`FitIns` holding the current " +#~ "global model :code:`parameters` and " +#~ ":code:`config` dict" #~ msgstr "" #~ msgid "" -#~ "If you want to check out " -#~ "everything put together, you should " -#~ "check out the full code example: " -#~ "[https://github.com/adap/flower/tree/main/examples/quickstart-" -#~ "huggingface](https://github.com/adap/flower/tree/main/examples" -#~ "/quickstart-huggingface)." +#~ "More sophisticated implementations can use " +#~ ":code:`configure_fit` to implement custom " +#~ "client selection logic. A client will" +#~ " only participate in a round if " +#~ "the corresponding :code:`ClientProxy` is " +#~ "included in the list returned from " +#~ ":code:`configure_fit`." #~ msgstr "" #~ msgid "" -#~ "First of all, for running the " -#~ "Flower Python server, it is recommended" -#~ " to create a virtual environment and" -#~ " run everything within a `virtualenv " -#~ "`_. " -#~ "For the Flower client implementation in" -#~ " iOS, it is recommended to use " -#~ "Xcode as our IDE." +#~ "The structure of this return value " +#~ "provides a lot of flexibility to " +#~ "the user. Since instructions are defined" +#~ " on a per-client basis, different " +#~ "instructions can be sent to each " +#~ "client. This enables custom strategies " +#~ "to train, for example, different models" +#~ " on different clients, or use " +#~ "different hyperparameters on different clients" +#~ " (via the :code:`config` dict)." +#~ msgstr "" + +#~ msgid "The :code:`aggregate_fit` method" #~ msgstr "" #~ msgid "" -#~ "Since CoreML does not allow the " -#~ "model parameters to be seen before " -#~ "training, and accessing the model " -#~ "parameters during or after the training" -#~ " can only be done by specifying " -#~ "the layer name, we need to know" -#~ " this informations beforehand, through " -#~ "looking at the model specification, " -#~ "which are written as proto files. " -#~ "The implementation can be seen in " -#~ ":code:`MLModelInspect`." +#~ ":code:`aggregate_fit` is responsible for " +#~ "aggregating the results returned by the" +#~ " clients that were selected and asked" +#~ " to train in :code:`configure_fit`." #~ msgstr "" #~ msgid "" -#~ "After we have all of the necessary" -#~ " informations, let's create our Flower " -#~ "client." +#~ "Of course, failures can happen, so " +#~ "there is no guarantee that the " +#~ "server will get results from all " +#~ "the clients it sent instructions to " +#~ "(via :code:`configure_fit`). :code:`aggregate_fit` " +#~ "therefore receives a list of " +#~ ":code:`results`, but also a list of " +#~ ":code:`failures`." #~ msgstr "" #~ msgid "" -#~ "MXNet is no longer maintained and " -#~ "has been moved into `Attic " -#~ "`_. As a " -#~ "result, we would encourage you to " -#~ "use other ML frameworks alongise Flower," -#~ " for example, PyTorch. This tutorial " -#~ "might be removed in future versions " -#~ "of Flower." +#~ ":code:`aggregate_fit` returns an optional " +#~ ":code:`Parameters` object and a dictionary " +#~ "of aggregated metrics. The :code:`Parameters`" +#~ " return value is optional because " +#~ ":code:`aggregate_fit` might decide that the" +#~ " results provided are not sufficient " +#~ "for aggregation (e.g., too many " +#~ "failures)." +#~ msgstr "" + +#~ msgid "The :code:`configure_evaluate` method" #~ msgstr "" #~ msgid "" -#~ "It is recommended to create a " -#~ "virtual environment and run everything " -#~ "within this `virtualenv `_." +#~ ":code:`configure_evaluate` is responsible for " +#~ "configuring the upcoming round of " +#~ "evaluation. What does *configure* mean " +#~ "in this context? Configuring a round " +#~ "means selecting clients and deciding " +#~ "what instructions to send to these " +#~ "clients. The signature of " +#~ ":code:`configure_evaluate` makes this clear:" #~ msgstr "" #~ msgid "" -#~ "First of all, it is recommended to" -#~ " create a virtual environment and run" -#~ " everything within a `virtualenv " -#~ "`_." +#~ "The return value is a list of " +#~ "tuples, each representing the instructions " +#~ "that will be sent to a particular" +#~ " client. Strategy implementations usually " +#~ "perform the following steps in " +#~ ":code:`configure_evaluate`:" #~ msgstr "" -#~ msgid "Since we want to use scikt-learn, let's go ahead and install it:" +#~ msgid "" +#~ "Pair each :code:`ClientProxy` with the " +#~ "same :code:`EvaluateIns` holding the current" +#~ " global model :code:`parameters` and " +#~ ":code:`config` dict" #~ msgstr "" #~ msgid "" -#~ "We load the MNIST dataset from " -#~ "`OpenML `_, a popular" -#~ " image classification dataset of " -#~ "handwritten digits for machine learning. " -#~ "The utility :code:`utils.load_mnist()` downloads " -#~ "the training and test data. The " -#~ "training set is split afterwards into" -#~ " 10 partitions with :code:`utils.partition()`." +#~ "More sophisticated implementations can use " +#~ ":code:`configure_evaluate` to implement custom " +#~ "client selection logic. A client will" +#~ " only participate in a round if " +#~ "the corresponding :code:`ClientProxy` is " +#~ "included in the list returned from " +#~ ":code:`configure_evaluate`." #~ msgstr "" #~ msgid "" -#~ "Now that you have known how " -#~ "federated XGBoost work with Flower, it's" -#~ " time to run some more comprehensive" -#~ " experiments by customising the " -#~ "experimental settings. In the xgboost-" -#~ "comprehensive example (`full code " -#~ "`_), we provide more options " -#~ "to define various experimental setups, " -#~ "including aggregation strategies, data " -#~ "partitioning and centralised/distributed evaluation." -#~ " We also support `Flower simulation " -#~ "`_ making it easy to " -#~ "simulate large client cohorts in a " -#~ "resource-aware manner. Let's take a " -#~ "look!" +#~ "The structure of this return value " +#~ "provides a lot of flexibility to " +#~ "the user. Since instructions are defined" +#~ " on a per-client basis, different " +#~ "instructions can be sent to each " +#~ "client. This enables custom strategies " +#~ "to evaluate, for example, different " +#~ "models on different clients, or use " +#~ "different hyperparameters on different clients" +#~ " (via the :code:`config` dict)." #~ msgstr "" -#~ msgid "|31e4b1afa87c4b968327bbeafbf184d4|" +#~ msgid "The :code:`aggregate_evaluate` method" #~ msgstr "" -#~ msgid "|c9d935b4284e4c389a33d86b33e07c0a|" +#~ msgid "" +#~ ":code:`aggregate_evaluate` is responsible for " +#~ "aggregating the results returned by the" +#~ " clients that were selected and asked" +#~ " to evaluate in :code:`configure_evaluate`." #~ msgstr "" -#~ msgid "|00727b5faffb468f84dd1b03ded88638|" +#~ msgid "" +#~ "Of course, failures can happen, so " +#~ "there is no guarantee that the " +#~ "server will get results from all " +#~ "the clients it sent instructions to " +#~ "(via :code:`configure_evaluate`). " +#~ ":code:`aggregate_evaluate` therefore receives a " +#~ "list of :code:`results`, but also a " +#~ "list of :code:`failures`." #~ msgstr "" -#~ msgid "|daf0cf0ff4c24fd29439af78416cf47b|" +#~ msgid "" +#~ ":code:`aggregate_evaluate` returns an optional " +#~ ":code:`float` (loss) and a dictionary of" +#~ " aggregated metrics. The :code:`float` " +#~ "return value is optional because " +#~ ":code:`aggregate_evaluate` might decide that " +#~ "the results provided are not sufficient" +#~ " for aggregation (e.g., too many " +#~ "failures)." #~ msgstr "" -#~ msgid "|9f093007080d471d94ca90d3e9fde9b6|" +#~ msgid "The :code:`evaluate` method" #~ msgstr "" -#~ msgid "|46a26e6150e0479fbd3dfd655f36eb13|" +#~ msgid "" +#~ ":code:`evaluate` is responsible for evaluating" +#~ " model parameters on the server-side." +#~ " Having :code:`evaluate` in addition to " +#~ ":code:`configure_evaluate`/:code:`aggregate_evaluate` enables" +#~ " strategies to perform both servers-" +#~ "side and client-side (federated) " +#~ "evaluation." #~ msgstr "" -#~ msgid "|3daba297595c4c7fb845d90404a6179a|" +#~ msgid "" +#~ "The return value is again optional " +#~ "because the strategy might not need " +#~ "to implement server-side evaluation or" +#~ " because the user-defined :code:`evaluate`" +#~ " method might not complete successfully " +#~ "(e.g., it might fail to load the" +#~ " server-side evaluation data)." #~ msgstr "" -#~ msgid "|5769874fa9c4455b80b2efda850d39d7|" +#~ msgid "" +#~ "Stable releases are available on `PyPI" +#~ " `_::" #~ msgstr "" -#~ msgid "|ba47ffb421814b0f8f9fa5719093d839|" +#~ msgid "" +#~ "For simulations that use the Virtual " +#~ "Client Engine, ``flwr`` should be " +#~ "installed with the ``simulation`` extra::" #~ msgstr "" -#~ msgid "|aeac5bf79cbf497082e979834717e01b|" +#~ msgid "" +#~ "If you have not added ``conda-" +#~ "forge`` to your channels, you will " +#~ "first need to run the following::" #~ msgstr "" -#~ msgid "|ce27ed4bbe95459dba016afc42486ba2|" +#~ msgid "" +#~ "Once the ``conda-forge`` channel has " +#~ "been enabled, ``flwr`` can be installed" +#~ " with ``conda``::" #~ msgstr "" -#~ msgid "|ae94a7f71dda443cbec2385751427d41|" +#~ msgid "or with ``mamba``::" #~ msgstr "" -#~ msgid "|e61fce4d43d243e7bb08bdde97d81ce6|" +#~ msgid "" +#~ "New (possibly unstable) versions of " +#~ "Flower are sometimes available as " +#~ "pre-release versions (alpha, beta, release" +#~ " candidate) before the stable release " +#~ "happens::" #~ msgstr "" -#~ msgid "|08cb60859b07461588fe44e55810b050|" +#~ msgid "" +#~ "For simulations that use the Virtual " +#~ "Client Engine, ``flwr`` pre-releases " +#~ "should be installed with the " +#~ "``simulation`` extra::" #~ msgstr "" -#~ msgid "``BASE_IMAGE_TAG``" -#~ msgstr "``BASE_IMAGE_TAG``" +#~ msgid "" +#~ "The latest (potentially unstable) changes " +#~ "in Flower are available as nightly " +#~ "releases::" +#~ msgstr "" -#~ msgid "The image tag of the base image." -#~ msgstr "A tag da imagem da imagem base." +#~ msgid "" +#~ "For simulations that use the Virtual " +#~ "Client Engine, ``flwr-nightly`` should " +#~ "be installed with the ``simulation`` " +#~ "extra::" +#~ msgstr "" + +#~ msgid "You can look at everything at ``_ ." +#~ msgstr "" #~ msgid "" -#~ "Open the notebook ``doc/source/tutorial-" -#~ "get-started-with-flower-pytorch.ipynb``:" +#~ "After you finish the visualization, stop" +#~ " Prometheus and Grafana. This is " +#~ "important as they will otherwise block," +#~ " for example port :code:`3000` on " +#~ "your machine as long as they are" +#~ " running." #~ msgstr "" #~ msgid "" -#~ "https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -#~ "/tutorial-get-started-with-flower-" -#~ "pytorch.ipynb" +#~ "In the example above, only one " +#~ "client will be run, so your " +#~ "clients won't run concurrently. Setting " +#~ ":code:`client_num_gpus = 0.5` would allow " +#~ "running two clients and therefore enable" +#~ " them to run concurrently. Be careful" +#~ " not to require more resources than" +#~ " available. If you specified " +#~ ":code:`client_num_gpus = 2`, the simulation" +#~ " wouldn't start (even if you had " +#~ "2 GPUs but decided to set 1 " +#~ "in :code:`ray_init_args`)." #~ msgstr "" #~ msgid "" -#~ "https://colab.research.google.com/github/adap/flower/blob/branch-" -#~ "name/doc/source/tutorial-get-started-with-" -#~ "flower-pytorch.ipynb" +#~ "Q: I see \"This site can't be " +#~ "reached\" when going to " +#~ "``_." +#~ msgstr "" + +#~ msgid "" +#~ "Ray Dashboard: ``_" #~ msgstr "" -#~ msgid "Virutualenv with Pyenv/Virtualenv" +#~ msgid "Ray Metrics: ``_" #~ msgstr "" #~ msgid "" -#~ "It is important to follow the " -#~ "instructions described in comments. For " -#~ "instance, in order to not break " -#~ "how our changelog system works, you " -#~ "should read the information above the" -#~ " ``Changelog entry`` section carefully. You" -#~ " can also checkout some examples and" -#~ " details in the :ref:`changelogentry` " -#~ "appendix." -#~ msgstr "" - -#~ msgid "Open a PR (as shown above)" +#~ "The :code:`VirtualClientEngine` schedules, launches" +#~ " and manages `virtual` clients. These " +#~ "clients are identical to `non-virtual`" +#~ " clients (i.e. the ones you launch" +#~ " via the command `flwr.client.start_client " +#~ "`_) in the" +#~ " sense that they can be configure " +#~ "by creating a class inheriting, for " +#~ "example, from `flwr.client.NumPyClient `_ and therefore" +#~ " behave in an identical way. In " +#~ "addition to that, clients managed by " +#~ "the :code:`VirtualClientEngine` are:" #~ msgstr "" -#~ msgid "How to write a good PR title" +#~ msgid "" +#~ "self-managed: this means that you " +#~ "as a user do not need to " +#~ "launch clients manually, instead this " +#~ "gets delegated to :code:`VirtualClientEngine`'s " +#~ "internals." #~ msgstr "" #~ msgid "" -#~ "A well-crafted PR title helps team" -#~ " members quickly understand the purpose " -#~ "and scope of the changes being " -#~ "proposed. Here's a guide to help " -#~ "you write a good GitHub PR title:" +#~ "The :code:`VirtualClientEngine` implements `virtual`" +#~ " clients using `Ray `_, " +#~ "an open-source framework for scalable" +#~ " Python workloads. In particular, Flower's" +#~ " :code:`VirtualClientEngine` makes use of " +#~ "`Actors `_ to spawn `virtual` clients" +#~ " and run their workload." #~ msgstr "" #~ msgid "" -#~ "1. Be Clear and Concise: Provide a" -#~ " clear summary of the changes in " -#~ "a concise manner. 1. Use Actionable " -#~ "Verbs: Start with verbs like \"Add,\"" -#~ " \"Update,\" or \"Fix\" to indicate " -#~ "the purpose. 1. Include Relevant " -#~ "Information: Mention the affected feature " -#~ "or module for context. 1. Keep it" -#~ " Short: Avoid lengthy titles for easy" -#~ " readability. 1. Use Proper Capitalization" -#~ " and Punctuation: Follow grammar rules " -#~ "for clarity." +#~ "By default the VCE has access to" +#~ " all system resources (i.e. all CPUs," +#~ " all GPUs, etc) since that is " +#~ "also the default behavior when starting" +#~ " Ray. However, in some settings you" +#~ " might want to limit how many " +#~ "of your system resources are used " +#~ "for simulation. You can do this " +#~ "via the :code:`ray_init_args` input argument" +#~ " to :code:`start_simulation` which the VCE" +#~ " internally passes to Ray's " +#~ ":code:`ray.init` command. For a complete " +#~ "list of settings you can configure " +#~ "check the `ray.init `_ " +#~ "documentation. Do not set " +#~ ":code:`ray_init_args` if you want the " +#~ "VCE to use all your system's CPUs" +#~ " and GPUs." #~ msgstr "" #~ msgid "" -#~ "Let's start with a few examples " -#~ "for titles that should be avoided " -#~ "because they do not provide meaningful" -#~ " information:" +#~ "By default the :code:`VirtualClientEngine` " +#~ "assigns a single CPU core (and " +#~ "nothing else) to each virtual client." +#~ " This means that if your system " +#~ "has 10 cores, that many virtual " +#~ "clients can be concurrently running." #~ msgstr "" -#~ msgid "Implement Algorithm" +#~ msgid ":code:`num_cpus` indicates the number of CPU cores a client would get." #~ msgstr "" -#~ msgid "Database" +#~ msgid "" +#~ ":code:`num_gpus` indicates the **ratio** of" +#~ " GPU memory a client gets assigned." #~ msgstr "" -#~ msgid "Add my_new_file.py to codebase" +#~ msgid "" +#~ "While the :code:`client_resources` can be " +#~ "used to control the degree of " +#~ "concurrency in your FL simulation, this" +#~ " does not stop you from running " +#~ "dozens, hundreds or even thousands of" +#~ " clients in the same round and " +#~ "having orders of magnitude more " +#~ "`dormant` (i.e. not participating in a" +#~ " round) clients. Let's say you want" +#~ " to have 100 clients per round " +#~ "but your system can only accommodate " +#~ "8 clients concurrently. The " +#~ ":code:`VirtualClientEngine` will schedule 100 " +#~ "jobs to run (each simulating a " +#~ "client sampled by the strategy) and " +#~ "then will execute them in a " +#~ "resource-aware manner in batches of " +#~ "8." #~ msgstr "" -#~ msgid "Improve code in module" +#~ msgid "" +#~ "Flower's :code:`VirtualClientEngine` allows you " +#~ "to run FL simulations across multiple" +#~ " compute nodes. Before starting your " +#~ "multi-node simulation ensure that you:" #~ msgstr "" -#~ msgid "Change SomeModule" +#~ msgid "" +#~ "Pass :code:`ray_init_args={\"address\"=\"auto\"}` to " +#~ "`start_simulation `_ so the " +#~ ":code:`VirtualClientEngine` attaches to a " +#~ "running Ray instance." #~ msgstr "" #~ msgid "" -#~ "Here are a few positive examples " -#~ "which provide helpful information without " -#~ "repeating how they do it, as that" -#~ " is already visible in the \"Files" -#~ " changed\" section of the PR:" +#~ "Start Ray on you head node: on " +#~ "the terminal type :code:`ray start " +#~ "--head`. This command will print a " +#~ "few lines, one of which indicates " +#~ "how to attach other nodes to the" +#~ " head node." #~ msgstr "" -#~ msgid "Update docs banner to mention Flower Summit 2023" +#~ msgid "" +#~ "Attach other nodes to the head " +#~ "node: copy the command shown after " +#~ "starting the head and execute it " +#~ "on terminal of a new node: for " +#~ "example :code:`ray start " +#~ "--address='192.168.1.132:6379'`" #~ msgstr "" -#~ msgid "Remove unnecessary XGBoost dependency" +#~ msgid "" +#~ "Once your simulation is finished, if " +#~ "you'd like to dismantle your cluster " +#~ "you simply need to run the command" +#~ " :code:`ray stop` in each node's " +#~ "terminal (including the head node)." #~ msgstr "" -#~ msgid "Remove redundant attributes in strategies subclassing FedAvg" +#~ msgid "" +#~ "User :code:`ray status` to check all " +#~ "nodes connected to your head node " +#~ "as well as the total resources " +#~ "available to the :code:`VirtualClientEngine`." #~ msgstr "" #~ msgid "" -#~ "Add CI job to deploy the staging" -#~ " system when the ``main`` branch " -#~ "changes" +#~ "When attaching a new node to the" +#~ " head, all its resources (i.e. all" +#~ " CPUs, all GPUs) will be visible " +#~ "by the head node. This means that" +#~ " the :code:`VirtualClientEngine` can schedule " +#~ "as many `virtual` clients as that " +#~ "node can possible run. In some " +#~ "settings you might want to exclude " +#~ "certain resources from the simulation. " +#~ "You can do this by appending " +#~ "`--num-cpus=` and/or `--num-" +#~ "gpus=` in any :code:`ray " +#~ "start` command (including when starting " +#~ "the head)" #~ msgstr "" #~ msgid "" -#~ "Add new amazing library which will " -#~ "be used to improve the simulation " -#~ "engine" +#~ "The VCE assigns a share of GPU " +#~ "memory to a client that specifies " +#~ "the key :code:`num_gpus` in " +#~ ":code:`client_resources`. This being said, Ray" +#~ " (used internally by the VCE) is " +#~ "by default:" #~ msgstr "" -#~ msgid "Changelog entry" +#~ msgid "" +#~ "not aware of the total VRAM " +#~ "available on the GPUs. This means " +#~ "that if you set :code:`num_gpus=0.5` and" +#~ " you have two GPUs in your " +#~ "system with different (e.g. 32GB and " +#~ "8GB) VRAM amounts, they both would " +#~ "run 2 clients concurrently." #~ msgstr "" #~ msgid "" -#~ "When opening a new PR, inside its" -#~ " description, there should be a " -#~ "``Changelog entry`` header." +#~ "If you want to run several " +#~ "independent Flower simulations on the " +#~ "same machine you need to mask-out" +#~ " your GPUs with " +#~ ":code:`CUDA_VISIBLE_DEVICES=\"\"` when launching" +#~ " your experiment." #~ msgstr "" #~ msgid "" -#~ "Above this header you should see " -#~ "the following comment that explains how" -#~ " to write your changelog entry:" +#~ "In addition, the GPU resource limits " +#~ "passed to :code:`client_resources` are not " +#~ "`enforced` (i.e. they can be exceeded)" +#~ " which can result in the situation" +#~ " of client using more VRAM than " +#~ "the ratio specified when starting the" +#~ " simulation." #~ msgstr "" #~ msgid "" -#~ "Inside the following 'Changelog entry' " -#~ "section, you should put the description" -#~ " of your changes that will be " -#~ "added to the changelog alongside your" -#~ " PR title." +#~ "This would need to be done in " +#~ "the main process (which is where " +#~ "the server would run) and in each" +#~ " Actor created by the VCE. By " +#~ "means of :code:`actor_kwargs` we can " +#~ "pass the reserved key `\"on_actor_init_fn\"`" +#~ " in order to specify a function " +#~ "to be executed upon actor " +#~ "initialization. In this case, to enable" +#~ " GPU growth for TF workloads. It " +#~ "would look as follows:" #~ msgstr "" #~ msgid "" -#~ "If the section is completely empty " -#~ "(without any token) or non-existent, " -#~ "the changelog will just contain the " -#~ "title of the PR for the changelog" -#~ " entry, without any description." +#~ "Model updates can be persisted on " +#~ "the server-side by customizing " +#~ ":code:`Strategy` methods. Implementing custom " +#~ "strategies is always an option, but " +#~ "for many cases it may be more " +#~ "convenient to simply customize an " +#~ "existing strategy. The following code " +#~ "example defines a new " +#~ ":code:`SaveModelStrategy` which customized the " +#~ "existing built-in :code:`FedAvg` strategy. " +#~ "In particular, it customizes " +#~ ":code:`aggregate_fit` by calling " +#~ ":code:`aggregate_fit` in the base class " +#~ "(:code:`FedAvg`). It then continues to " +#~ "save returned (aggregated) weights before " +#~ "it returns those aggregated weights to" +#~ " the caller (i.e., the server):" #~ msgstr "" #~ msgid "" -#~ "If the section contains some text " -#~ "other than tokens, it will use it" -#~ " to add a description to the " -#~ "change." +#~ "For central DP with server-side " +#~ "clipping, there are two :code:`Strategy` " +#~ "classes that act as wrappers around " +#~ "the actual :code:`Strategy` instance (for " +#~ "example, :code:`FedAvg`). The two wrapper " +#~ "classes are " +#~ ":code:`DifferentialPrivacyServerSideFixedClipping` and " +#~ ":code:`DifferentialPrivacyServerSideAdaptiveClipping` for " +#~ "fixed and adaptive clipping." #~ msgstr "" #~ msgid "" -#~ "If the section contains one of the" -#~ " following tokens it will ignore any" -#~ " other text and put the PR " -#~ "under the corresponding section of the" -#~ " changelog:" +#~ "The code sample below enables the " +#~ ":code:`FedAvg` strategy to use server-" +#~ "side fixed clipping using the " +#~ ":code:`DifferentialPrivacyServerSideFixedClipping` wrapper " +#~ "class. The same approach can be " +#~ "used with " +#~ ":code:`DifferentialPrivacyServerSideAdaptiveClipping` by " +#~ "adjusting the corresponding input parameters." #~ msgstr "" -#~ msgid " is for classifying a PR as a general improvement." +#~ msgid "" +#~ "For central DP with client-side " +#~ "clipping, the server sends the clipping" +#~ " value to selected clients on each" +#~ " round. Clients can use existing " +#~ "Flower :code:`Mods` to perform the " +#~ "clipping. Two mods are available for " +#~ "fixed and adaptive client-side clipping:" +#~ " :code:`fixedclipping_mod` and " +#~ ":code:`adaptiveclipping_mod` with corresponding " +#~ "server-side wrappers " +#~ ":code:`DifferentialPrivacyClientSideFixedClipping` and " +#~ ":code:`DifferentialPrivacyClientSideAdaptiveClipping`." #~ msgstr "" -#~ msgid " is to not add the PR to the changelog" +#~ msgid "" +#~ "The code sample below enables the " +#~ ":code:`FedAvg` strategy to use differential" +#~ " privacy with client-side fixed " +#~ "clipping using both the " +#~ ":code:`DifferentialPrivacyClientSideFixedClipping` wrapper " +#~ "class and, on the client, " +#~ ":code:`fixedclipping_mod`:" #~ msgstr "" -#~ msgid " is to add a general baselines change to the PR" +#~ msgid "" +#~ "In addition to the server-side " +#~ "strategy wrapper, the :code:`ClientApp` needs" +#~ " to configure the matching " +#~ ":code:`fixedclipping_mod` to perform the " +#~ "client-side clipping:" #~ msgstr "" -#~ msgid " is to add a general examples change to the PR" +#~ msgid "Below is a code example that shows how to use :code:`LocalDpMod`:" #~ msgstr "" -#~ msgid " is to add a general sdk change to the PR" +#~ msgid "" +#~ "Flower allows full customization of the" +#~ " learning process through the " +#~ ":code:`Strategy` abstraction. A number of " +#~ "built-in strategies are provided in " +#~ "the core framework." #~ msgstr "" -#~ msgid " is to add a general simulations change to the PR" +#~ msgid "Use an existing strategy, for example, :code:`FedAvg`" #~ msgstr "" -#~ msgid "Note that only one token should be used." +#~ msgid "" +#~ "This creates a strategy with all " +#~ "parameters left at their default values" +#~ " and passes it to the " +#~ ":code:`start_server` function. It is usually" +#~ " recommended to adjust a few " +#~ "parameters during instantiation:" #~ msgstr "" #~ msgid "" -#~ "Its content must have a specific " -#~ "format. We will break down what " -#~ "each possibility does:" +#~ "The server can pass new configuration" +#~ " values to the client each round " +#~ "by providing a function to " +#~ ":code:`on_fit_config_fn`. The provided function " +#~ "will be called by the strategy and" +#~ " must return a dictionary of " +#~ "configuration key values pairs that will" +#~ " be sent to the client. It must" +#~ " return a dictionary of arbitrary " +#~ "configuration values :code:`client.fit` and " +#~ ":code:`client.evaluate` functions during each " +#~ "round of federated learning." #~ msgstr "" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains nothing or doesn't exist, " -#~ "the following text will be added " -#~ "to the changelog::" +#~ "The :code:`on_fit_config_fn` can be used " +#~ "to pass arbitrary configuration values " +#~ "from server to client, and potentially" +#~ " change these values each round, for" +#~ " example, to adjust the learning " +#~ "rate. The client will receive the " +#~ "dictionary returned by the " +#~ ":code:`on_fit_config_fn` in its own " +#~ ":code:`client.fit()` function." #~ msgstr "" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains a description (and no " -#~ "token), the following text will be " -#~ "added to the changelog::" +#~ "Similar to :code:`on_fit_config_fn`, there is" +#~ " also :code:`on_evaluate_config_fn` to customize" +#~ " the configuration sent to " +#~ ":code:`client.evaluate()`" #~ msgstr "" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, nothing will change" -#~ " in the changelog." +#~ "Server-side evaluation can be enabled" +#~ " by passing an evaluation function to" +#~ " :code:`evaluate_fn`." #~ msgstr "" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following text" -#~ " will be added to the changelog::" +#~ "Note that since version :code:`1.11.0`, " +#~ ":code:`flower-server-app` no longer " +#~ "supports passing a reference to a " +#~ "`ServerApp` attribute. Instead, you need " +#~ "to pass the path to Flower app " +#~ "via the argument :code:`--app`. This is" +#~ " the path to a directory containing" +#~ " a `pyproject.toml`. You can create a" +#~ " valid Flower app by executing " +#~ ":code:`flwr new` and following the " +#~ "prompt." #~ msgstr "" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following " -#~ "text will be added to the " -#~ "changelog::" +#~ "The following examples are available as" +#~ " standalone projects. Quickstart TensorFlow/Keras" +#~ " ---------------------------" #~ msgstr "" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following " -#~ "text will be added to the " -#~ "changelog::" +#~ "Let's create a new application project" +#~ " in Xcode and add :code:`flwr` as " +#~ "a dependency in your project. For " +#~ "our application, we will store the " +#~ "logic of our app in " +#~ ":code:`FLiOSModel.swift` and the UI elements" +#~ " in :code:`ContentView.swift`. We will " +#~ "focus more on :code:`FLiOSModel.swift` in " +#~ "this quickstart. Please refer to the " +#~ "`full code example " +#~ "`_ to " +#~ "learn more about the app." +#~ msgstr "" + +#~ msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" #~ msgstr "" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following text " -#~ "will be added to the changelog::" +#~ "Then add the mlmodel to the " +#~ "project simply by drag-and-drop, " +#~ "the mlmodel will be bundled inside " +#~ "the application during deployment to " +#~ "your iOS device. We need to pass" +#~ " the url to access mlmodel and " +#~ "run CoreML machine learning processes, " +#~ "it can be retrieved by calling the" +#~ " function :code:`Bundle.main.url`. For the " +#~ "MNIST dataset, we need to preprocess " +#~ "it into :code:`MLBatchProvider` object. The" +#~ " preprocessing is done inside " +#~ ":code:`DataLoader.swift`." #~ msgstr "" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following " -#~ "text will be added to the " -#~ "changelog::" +#~ "Since CoreML does not allow the " +#~ "model parameters to be seen before " +#~ "training, and accessing the model " +#~ "parameters during or after the training" +#~ " can only be done by specifying " +#~ "the layer name, we need to know" +#~ " this information beforehand, through " +#~ "looking at the model specification, " +#~ "which are written as proto files. " +#~ "The implementation can be seen in " +#~ ":code:`MLModelInspect`." #~ msgstr "" #~ msgid "" -#~ "Note that only one token must be" -#~ " provided, otherwise, only the first " -#~ "action (in the order listed above), " -#~ "will be performed." +#~ "Then start the Flower gRPC client " +#~ "and start communicating to the server" +#~ " by passing our Flower client to " +#~ "the function :code:`startFlwrGRPC`." #~ msgstr "" -#~ msgid "Example: MXNet - Run MXNet Federated" +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ "call the provided :code:`MLFlwrClient` and " +#~ "call :code:`startFlwrGRPC()`. The attribute " +#~ ":code:`hostname` and :code:`port` tells the" +#~ " client which server to connect to." +#~ " This can be done by entering " +#~ "the hostname and port in the " +#~ "application before clicking the start " +#~ "button to start the federated learning" +#~ " process." #~ msgstr "" #~ msgid "" -#~ "This tutorial will show you how to" -#~ " use Flower to build a federated " -#~ "version of an existing MXNet workload." -#~ " We are using MXNet to train a" -#~ " Sequential model on the MNIST " -#~ "dataset. We will structure the example" -#~ " similar to our `PyTorch - From " -#~ "Centralized To Federated " -#~ "`_ walkthrough. " -#~ "MXNet and PyTorch are very similar " -#~ "and a very good comparison between " -#~ "MXNet and PyTorch is given `here " -#~ "`_. First, " -#~ "we build a centralized training approach" -#~ " based on the `Handwritten Digit " -#~ "Recognition " -#~ "`_" -#~ " tutorial. Then, we build upon the" -#~ " centralized training code to run the" -#~ " training in a federated fashion." +#~ "For simple workloads we can start " +#~ "a Flower server and leave all the" +#~ " configuration possibilities at their " +#~ "default values. In a file named " +#~ ":code:`server.py`, import Flower and start " +#~ "the server:" #~ msgstr "" #~ msgid "" -#~ "Before we start setting up our " -#~ "MXNet example, we install the " -#~ ":code:`mxnet` and :code:`flwr` packages:" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system in your ios device. The " +#~ "full `source code " +#~ "`_ for" +#~ " this example can be found in " +#~ ":code:`examples/ios`." #~ msgstr "" -#~ msgid "MNIST Training with MXNet" +#~ msgid "" +#~ "Before we start building our JAX " +#~ "example, we need install the packages" +#~ " :code:`jax`, :code:`jaxlib`, :code:`scikit-" +#~ "learn`, and :code:`flwr`:" #~ msgstr "" #~ msgid "" #~ "We begin with a brief description " #~ "of the centralized training code based" -#~ " on a :code:`Sequential` model. If " -#~ "you want a more in-depth " +#~ " on a :code:`Linear Regression` model. " +#~ "If you want a more in-depth " #~ "explanation of what's going on then " -#~ "have a look at the official `MXNet" -#~ " tutorial " -#~ "`_." +#~ "have a look at the official `JAX" +#~ " documentation `_." #~ msgstr "" #~ msgid "" -#~ "Let's create a new file " -#~ "called:code:`mxnet_mnist.py` with all the " +#~ "Let's create a new file called " +#~ ":code:`jax_training.py` with all the " #~ "components required for a traditional " -#~ "(centralized) MNIST training. First, the " -#~ "MXNet package :code:`mxnet` needs to be" -#~ " imported. You can see that we " -#~ "do not yet import the :code:`flwr` " -#~ "package for federated learning. This " -#~ "will be done later." +#~ "(centralized) linear regression training. " +#~ "First, the JAX packages :code:`jax` and" +#~ " :code:`jaxlib` need to be imported. " +#~ "In addition, we need to import " +#~ ":code:`sklearn` since we use " +#~ ":code:`make_regression` for the dataset and" +#~ " :code:`train_test_split` to split the " +#~ "dataset into a training and test " +#~ "set. You can see that we do " +#~ "not yet import the :code:`flwr` package" +#~ " for federated learning. This will be" +#~ " done later." #~ msgstr "" #~ msgid "" #~ "The :code:`load_data()` function loads the " -#~ "MNIST training and test sets." +#~ "mentioned training and test sets." #~ msgstr "" #~ msgid "" -#~ "As already mentioned, we will use " -#~ "the MNIST dataset for this machine " -#~ "learning workload. The model architecture " -#~ "(a very simple :code:`Sequential` model) " -#~ "is defined in :code:`model()`." +#~ "The model architecture (a very simple" +#~ " :code:`Linear Regression` model) is " +#~ "defined in :code:`load_model()`." #~ msgstr "" #~ msgid "" #~ "We now need to define the training" -#~ " (function :code:`train()`) which loops " +#~ " (function :code:`train()`), which loops " #~ "over the training set and measures " -#~ "the loss for each batch of " -#~ "training examples." +#~ "the loss (function :code:`loss_fn()`) for " +#~ "each batch of training examples. The " +#~ "loss function is separate since JAX " +#~ "takes derivatives with a :code:`grad()` " +#~ "function (defined in the :code:`main()` " +#~ "function and called in :code:`train()`)." #~ msgstr "" #~ msgid "" #~ "The evaluation of the model is " -#~ "defined in function :code:`test()`. The " -#~ "function loops over all test samples " -#~ "and measures the loss and accuracy " -#~ "of the model based on the test " -#~ "dataset." +#~ "defined in the function :code:`evaluation()`." +#~ " The function takes all test examples" +#~ " and measures the loss of the " +#~ "linear regression model." #~ msgstr "" #~ msgid "" #~ "Having defined the data loading, model" #~ " architecture, training, and evaluation we" #~ " can put everything together and " -#~ "train our model on MNIST. Note " -#~ "that the GPU/CPU device for the " -#~ "training and testing is defined within" -#~ " the :code:`ctx` (context)." -#~ msgstr "" - -#~ msgid "You can now run your (centralized) MXNet machine learning workload:" -#~ msgstr "" - -#~ msgid "" -#~ "So far this should all look fairly" -#~ " familiar if you've used MXNet (or" -#~ " even PyTorch) before. Let's take the" -#~ " next step and use what we've " -#~ "built to create a simple federated " -#~ "learning system consisting of one server" -#~ " and two clients." -#~ msgstr "" - -#~ msgid "MXNet meets Flower" -#~ msgstr "" - -#~ msgid "" -#~ "So far, it was not easily possible" -#~ " to use MXNet workloads for federated" -#~ " learning because federated learning is " -#~ "not supported in MXNet. Since Flower " -#~ "is fully agnostic towards the underlying" -#~ " machine learning framework, it can " -#~ "be used to federated arbitrary machine" -#~ " learning workloads. This section will " -#~ "show you how Flower can be used" -#~ " to federate our centralized MXNet " -#~ "workload." +#~ "train our model using JAX. As " +#~ "already mentioned, the :code:`jax.grad()` " +#~ "function is defined in :code:`main()` " +#~ "and passed to :code:`train()`." #~ msgstr "" #~ msgid "" -#~ "The concept to federate an existing " -#~ "workload is always the same and " +#~ "The concept of federating an existing" +#~ " workload is always the same and " #~ "easy to understand. We have to " #~ "start a *server* and then use the" -#~ " code in :code:`mxnet_mnist.py` for the " -#~ "*clients* that are connected to the " -#~ "*server*. The *server* sends model " +#~ " code in :code:`jax_training.py` for the" +#~ " *clients* that are connected to the" +#~ " *server*. The *server* sends model " #~ "parameters to the clients. The *clients*" #~ " run the training and update the " #~ "parameters. The updated parameters are " -#~ "sent back to the *server* which " +#~ "sent back to the *server*, which " #~ "averages all received parameter updates. " #~ "This describes one round of the " -#~ "federated learning process and we repeat" -#~ " this for multiple rounds." +#~ "federated learning process, and we " +#~ "repeat this for multiple rounds." #~ msgstr "" #~ msgid "" #~ "Finally, we will define our *client* " #~ "logic in :code:`client.py` and build " -#~ "upon the previously defined MXNet " -#~ "training in :code:`mxnet_mnist.py`. Our " -#~ "*client* needs to import :code:`flwr`, " -#~ "but also :code:`mxnet` to update the " -#~ "parameters on our MXNet model:" +#~ "upon the previously defined JAX training" +#~ " in :code:`jax_training.py`. Our *client* " +#~ "needs to import :code:`flwr`, but also" +#~ " :code:`jax` and :code:`jaxlib` to update" +#~ " the parameters on our JAX model:" #~ msgstr "" #~ msgid "" @@ -23209,36 +32068,39 @@ msgstr "" #~ ":code:`flwr.client.NumPyClient`. Our implementation " #~ "will be based on " #~ ":code:`flwr.client.NumPyClient` and we'll call " -#~ "it :code:`MNISTClient`. :code:`NumPyClient` is " +#~ "it :code:`FlowerClient`. :code:`NumPyClient` is " #~ "slightly easier to implement than " #~ ":code:`Client` if you use a framework" #~ " with good NumPy interoperability (like " -#~ "PyTorch or MXNet) because it avoids " -#~ "some of the boilerplate that would " -#~ "otherwise be necessary. :code:`MNISTClient` " -#~ "needs to implement four methods, two " -#~ "methods for getting/setting model parameters," -#~ " one method for training the model," -#~ " and one method for testing the " -#~ "model:" +#~ "JAX) because it avoids some of the" +#~ " boilerplate that would otherwise be " +#~ "necessary. :code:`FlowerClient` needs to " +#~ "implement four methods, two methods for" +#~ " getting/setting model parameters, one " +#~ "method for training the model, and " +#~ "one method for testing the model:" #~ msgstr "" -#~ msgid "transform MXNet :code:`NDArray`'s to NumPy :code:`ndarray`'s" +#~ msgid ":code:`set_parameters (optional)`" +#~ msgstr "" + +#~ msgid "transform parameters to NumPy :code:`ndarray`'s" #~ msgstr "" #~ msgid "" #~ "The challenging part is to transform " -#~ "the MXNet parameters from :code:`NDArray` " -#~ "to :code:`NumPy Arrays` to make it " -#~ "readable for Flower." +#~ "the JAX model parameters from " +#~ ":code:`DeviceArray` to :code:`NumPy ndarray` " +#~ "to make them compatible with " +#~ "`NumPyClient`." #~ msgstr "" #~ msgid "" #~ "The two :code:`NumPyClient` methods " #~ ":code:`fit` and :code:`evaluate` make use " #~ "of the functions :code:`train()` and " -#~ ":code:`test()` previously defined in " -#~ ":code:`mxnet_mnist.py`. So what we really " +#~ ":code:`evaluate()` previously defined in " +#~ ":code:`jax_training.py`. So what we really " #~ "do here is we tell Flower through" #~ " our :code:`NumPyClient` subclass which of" #~ " our already defined functions to " @@ -23249,662 +32111,656 @@ msgstr "" #~ msgstr "" #~ msgid "" -#~ "Having defined data loading, model " -#~ "architecture, training, and evaluation we " -#~ "can put everything together and train" -#~ " our :code:`Sequential` model on MNIST." +#~ "In this tutorial, we will learn " +#~ "how to train a :code:`Logistic " +#~ "Regression` model on MNIST using Flower" +#~ " and scikit-learn." #~ msgstr "" #~ msgid "" -#~ "in each window (make sure that the" -#~ " server is still running before you" -#~ " do so) and see your MXNet " -#~ "project run federated learning across " -#~ "two clients. Congratulations!" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. However, before" +#~ " setting up the client and server," +#~ " we will define all functionalities " +#~ "that we need for our federated " +#~ "learning setup within :code:`utils.py`. The" +#~ " :code:`utils.py` contains different functions" +#~ " defining all the machine learning " +#~ "basics:" #~ msgstr "" -#~ msgid "" -#~ "The full source code for this " -#~ "example: `MXNet: From Centralized To " -#~ "Federated (Code) " -#~ "`_. Our " -#~ "example is of course somewhat over-" -#~ "simplified because both clients load the" -#~ " exact same dataset, which isn't " -#~ "realistic. You're now prepared to " -#~ "explore this topic further. How about" -#~ " using a CNN or using a " -#~ "different dataset? How about adding more" -#~ " clients?" +#~ msgid ":code:`get_model_parameters()`" #~ msgstr "" -#~ msgid "" -#~ "This guide describes how to a " -#~ "SSL-enabled secure Flower server can " -#~ "be started and how a Flower client" -#~ " can establish a secure connections " -#~ "to it." +#~ msgid "Returns the parameters of a :code:`sklearn` LogisticRegression model" #~ msgstr "" -#~ msgid "" -#~ "The code example comes with a " -#~ "README.md file which will explain how" -#~ " to start it. Although it is " -#~ "already SSL-enabled, it might be " -#~ "less descriptive on how. Stick to " -#~ "this guide for a deeper introduction " -#~ "to the topic." +#~ msgid ":code:`set_model_params()`" #~ msgstr "" -#~ msgid "" -#~ "Using SSL-enabled connections requires " -#~ "certificates to be passed to the " -#~ "server and client. For the purpose " -#~ "of this guide we are going to " -#~ "generate self-signed certificates. As " -#~ "this can become quite complex we " -#~ "are going to ask you to run " -#~ "the script in :code:`examples/advanced-" -#~ "tensorflow/certificates/generate.sh`" +#~ msgid "Sets the parameters of a :code:`sklearn` LogisticRegression model" #~ msgstr "" -#~ msgid "with the following command sequence:" +#~ msgid ":code:`set_initial_params()`" #~ msgstr "" #~ msgid "" -#~ "The approach how the SSL certificates" -#~ " are generated in this example can" -#~ " serve as an inspiration and starting" -#~ " point but should not be taken " -#~ "as complete for production environments. " -#~ "Please refer to other sources regarding" -#~ " the issue of correctly generating " -#~ "certificates for production environments." +#~ "Please check out :code:`utils.py` `here " +#~ "`_ for more details. " +#~ "The pre-defined functions are used " +#~ "in the :code:`client.py` and imported. " +#~ "The :code:`client.py` also requires to " +#~ "import several packages such as Flower" +#~ " and scikit-learn:" #~ msgstr "" #~ msgid "" -#~ "In case you are a researcher you" -#~ " might be just fine using the " -#~ "self-signed certificates generated using " -#~ "the scripts which are part of this" -#~ " guide." +#~ "Prior to local training, we need " +#~ "to load the MNIST dataset, a " +#~ "popular image classification dataset of " +#~ "handwritten digits for machine learning, " +#~ "and partition the dataset for FL. " +#~ "This can be conveniently achieved using" +#~ " `Flower Datasets `_." +#~ " The :code:`FederatedDataset.load_partition()` method" +#~ " loads the partitioned training set " +#~ "for each partition ID defined in " +#~ "the :code:`--partition-id` argument." #~ msgstr "" #~ msgid "" -#~ "We are now going to show how " -#~ "to write a sever which uses the" -#~ " previously generated scripts." +#~ "Next, the logistic regression model is" +#~ " defined and initialized with " +#~ ":code:`utils.set_initial_params()`." #~ msgstr "" #~ msgid "" -#~ "When providing certificates, the server " -#~ "expects a tuple of three certificates." -#~ " :code:`Path` can be used to easily" -#~ " read the contents of those files " -#~ "into byte strings, which is the " -#~ "data type :code:`start_server` expects." +#~ "The Flower server interacts with clients" +#~ " through an interface called " +#~ ":code:`Client`. When the server selects " +#~ "a particular client for training, it " +#~ "sends training instructions over the " +#~ "network. The client receives those " +#~ "instructions and calls one of the " +#~ ":code:`Client` methods to run your code" +#~ " (i.e., to fit the logistic " +#~ "regression we defined earlier)." #~ msgstr "" #~ msgid "" -#~ "We are now going to show how " -#~ "to write a client which uses the" -#~ " previously generated scripts:" +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses " +#~ "scikit-learn. Implementing :code:`NumPyClient` " +#~ "usually means defining the following " +#~ "methods (:code:`set_parameters` is optional " +#~ "though):" #~ msgstr "" -#~ msgid "" -#~ "When setting :code:`root_certificates`, the " -#~ "client expects the PEM-encoded root " -#~ "certificates as a byte string. We " -#~ "are again using :code:`Path` to simplify" -#~ " reading those as byte strings." +#~ msgid ":code:`set_parameters` (optional)" #~ msgstr "" -#~ msgid "" -#~ "You should now have learned how to" -#~ " generate self-signed certificates using" -#~ " the given script, start a SSL-" -#~ "enabled server, and have a client " -#~ "establish a secure connection to it." +#~ msgid "is directly imported with :code:`utils.set_model_params()`" #~ msgstr "" #~ msgid "" -#~ "The simplest way to get started " -#~ "with Flower is by using the " -#~ "pre-made Docker images, which you can" -#~ " find on `Docker Hub " -#~ "`_." -#~ msgstr "" - -#~ msgid "Flower server" +#~ "We can now create an instance of" +#~ " our class :code:`MnistClient` and add " +#~ "one line to actually run this " +#~ "client:" #~ msgstr "" #~ msgid "" -#~ "The command will pull the Docker " -#~ "image with the tag " -#~ "``1.7.0-py3.11-ubuntu22.04`` from Docker Hub. " -#~ "The tag contains the information which" -#~ " Flower, Python and Ubuntu is used." -#~ " In this case, it uses Flower " -#~ "1.7.0, Python 3.11 and Ubuntu 22.04. " -#~ "The ``--rm`` flag tells Docker to " -#~ "remove the container after it exits." +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. If you implement" +#~ " a client of type :code:`NumPyClient` " +#~ "you'll need to first call its " +#~ ":code:`to_client()` method. The string " +#~ ":code:`\"0.0.0.0:8080\"` tells the client " +#~ "which server to connect to. In our" +#~ " case we can run the server and" +#~ " the client on the same machine, " +#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" +#~ " we run a truly federated workload" +#~ " with the server and clients running" +#~ " on different machines, all that " +#~ "needs to change is the " +#~ ":code:`server_address` we pass to the " +#~ "client." #~ msgstr "" -#~ msgid "" -#~ "By default, the Flower server keeps " -#~ "state in-memory. When using the " -#~ "Docker flag ``--rm``, the state is " -#~ "not persisted between container starts. " -#~ "We will show below how to save " -#~ "the state in a file on your " -#~ "host system." +#~ msgid ":code:`server.py`, import Flower and start the server:" #~ msgstr "" #~ msgid "" -#~ "The ``-p :`` flag tells " -#~ "Docker to map the ports " -#~ "``9091``/``9092`` of the host to " -#~ "``9091``/``9092`` of the container, allowing" -#~ " you to access the Driver API " -#~ "on ``http://localhost:9091`` and the Fleet " -#~ "API on ``http://localhost:9092``. Lastly, any" -#~ " flag that comes after the tag " -#~ "is passed to the Flower server. " -#~ "Here, we are passing the flag " -#~ "``--insecure``." +#~ "The number of federated learning rounds" +#~ " is set in :code:`fit_round()` and " +#~ "the evaluation is defined in " +#~ ":code:`get_evaluate_fn()`. The evaluation function" +#~ " is called after each federated " +#~ "learning round and gives you information" +#~ " about loss and accuracy. Note that" +#~ " we also make use of Flower " +#~ "Datasets here to load the test " +#~ "split of the MNIST dataset for " +#~ "server-side evaluation." #~ msgstr "" #~ msgid "" -#~ "The ``--insecure`` flag enables insecure " -#~ "communication (using HTTP, not HTTPS) " -#~ "and should only be used for " -#~ "testing purposes. We strongly recommend " -#~ "enabling `SSL `_ when " -#~ "deploying to a production environment." +#~ "The :code:`main` contains the server-" +#~ "side parameter initialization " +#~ ":code:`utils.set_initial_params()` as well as " +#~ "the aggregation strategy " +#~ ":code:`fl.server.strategy:FedAvg()`. The strategy is" +#~ " the default one, federated averaging " +#~ "(or FedAvg), with two clients and " +#~ "evaluation after each federated learning " +#~ "round. The server can be started " +#~ "with the command " +#~ ":code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " +#~ "strategy=strategy, " +#~ "config=fl.server.ServerConfig(num_rounds=3))`." #~ msgstr "" #~ msgid "" -#~ "You can use ``--help`` to view all" -#~ " available flags that the server " -#~ "supports:" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this example can " +#~ "be found in :code:`examples/sklearn-logreg-" +#~ "mnist`." +#~ msgstr "" + +#~ msgid "" +#~ "In this tutorial we will learn how" +#~ " to train a federated XGBoost model" +#~ " on HIGGS dataset using Flower and" +#~ " :code:`xgboost` package. We use a " +#~ "simple example (`full code xgboost-" +#~ "quickstart `_) with two *clients* " +#~ "and one *server* to demonstrate how " +#~ "federated XGBoost works, and then we " +#~ "dive into a more complex example " +#~ "(`full code xgboost-comprehensive " +#~ "`_) to run various experiments." #~ msgstr "" #~ msgid "" -#~ "If you want to persist the state" -#~ " of the server on your host " -#~ "system, all you need to do is " -#~ "specify a path where you want to" -#~ " save the file on your host " -#~ "system and a name for the database" -#~ " file. In the example below, we " -#~ "tell Docker via the flag ``-v`` to" -#~ " mount the user's home directory " -#~ "(``~/`` on your host) into the " -#~ "``/app/`` directory of the container. " -#~ "Furthermore, we use the flag " -#~ "``--database`` to specify the name of" -#~ " the database file." +#~ "Since we want to use :code:`xgboost` " +#~ "package to build up XGBoost trees, " +#~ "let's go ahead and install " +#~ ":code:`xgboost`:" #~ msgstr "" #~ msgid "" -#~ "As soon as the server starts, the" -#~ " file ``state.db`` is created in the" -#~ " user's home directory on your host" -#~ " system. If the file already exists," -#~ " the server tries to restore the " -#~ "state from the file. To start the" -#~ " server with an empty database, " -#~ "simply remove the ``state.db`` file." +#~ "In a file called :code:`client.py`, " +#~ "import xgboost, Flower, Flower Datasets " +#~ "and other related functions:" #~ msgstr "" #~ msgid "" -#~ "To enable SSL, you will need a " -#~ "CA certificate, a server certificate and" -#~ " a server private key." +#~ "In this example, we split the " +#~ "dataset into 30 partitions with uniform" +#~ " distribution (:code:`IidPartitioner(num_partitions=30)`)." +#~ " Then, we load the partition for " +#~ "the given client based on " +#~ ":code:`partition_id`:" #~ msgstr "" #~ msgid "" -#~ "For testing purposes, you can generate" -#~ " your own self-signed certificates. " -#~ "The `Enable SSL connections " -#~ "`_ page contains " -#~ "a section that will guide you " -#~ "through the process." +#~ "After that, we do train/test splitting" +#~ " on the given partition (client's " +#~ "local data), and transform data format" +#~ " for :code:`xgboost` package." #~ msgstr "" #~ msgid "" -#~ "Assuming all files we need are in" -#~ " the local ``certificates`` directory, we" -#~ " can use the flag ``-v`` to " -#~ "mount the local directory into the " -#~ "``/app/`` directory of the container. " -#~ "This allows the server to access " -#~ "the files within the container. Finally," -#~ " we pass the names of the " -#~ "certificates to the server with the " -#~ "``--certificates`` flag." -#~ msgstr "" - -#~ msgid "Using a different Flower or Python version" +#~ "The functions of :code:`train_test_split` and" +#~ " :code:`transform_dataset_to_dmatrix` are defined " +#~ "as below:" #~ msgstr "" #~ msgid "" -#~ "If you want to use a different " -#~ "version of Flower or Python, you " -#~ "can do so by changing the tag. " -#~ "All versions we provide are available" -#~ " on `Docker Hub " -#~ "`_." +#~ "The :code:`num_local_round` represents the " +#~ "number of iterations for local tree " +#~ "boost. We use CPU for the training" +#~ " in default. One can shift it " +#~ "to GPU by setting :code:`tree_method` to" +#~ " :code:`gpu_hist`. We use AUC as " +#~ "evaluation metric." #~ msgstr "" #~ msgid "" -#~ "The following command returns the " -#~ "current image hash referenced by the " -#~ "``server:1.7.0-py3.11-ubuntu22.04`` tag:" +#~ "After loading the dataset we define " +#~ "the Flower client. We follow the " +#~ "general rule to define :code:`XgbClient` " +#~ "class inherited from :code:`fl.client.Client`." #~ msgstr "" -#~ msgid "Next, we can pin the hash when running a new server container:" +#~ msgid "" +#~ "All required parameters defined above " +#~ "are passed to :code:`XgbClient`'s constructor." #~ msgstr "" #~ msgid "" -#~ "QUICKSTART TUTORIALS: :doc:`PyTorch ` | :doc:`TensorFlow " -#~ "` | :doc:`🤗 " -#~ "Transformers ` " -#~ "| :doc:`JAX ` |" -#~ " :doc:`Pandas ` " -#~ "| :doc:`fastai `" -#~ " | :doc:`PyTorch Lightning ` | :doc:`MXNet " -#~ "` | :doc" -#~ ":`scikit-learn `" -#~ " | :doc:`XGBoost ` | :doc:`Android ` | :doc:`iOS `" +#~ "Then, we override :code:`get_parameters`, " +#~ ":code:`fit` and :code:`evaluate` methods " +#~ "insides :code:`XgbClient` class as follows." #~ msgstr "" -#~ msgid "flower-driver-api" +#~ msgid "" +#~ "Unlike neural network training, XGBoost " +#~ "trees are not started from a " +#~ "specified random weights. In this case," +#~ " we do not use :code:`get_parameters` " +#~ "and :code:`set_parameters` to initialise model" +#~ " parameters for XGBoost. As a result," +#~ " let's return an empty tensor in " +#~ ":code:`get_parameters` when it is called " +#~ "by the server at the first round." #~ msgstr "" -#~ msgid "flower-fleet-api" +#~ msgid "" +#~ "In :code:`fit`, at the first round, " +#~ "we call :code:`xgb.train()` to build up" +#~ " the first set of trees. From " +#~ "the second round, we load the " +#~ "global model sent from server to " +#~ "new build Booster object, and then " +#~ "update model weights on local training" +#~ " data with function :code:`local_boost` as" +#~ " follows:" #~ msgstr "" #~ msgid "" -#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -#~ "[:py:class:`str`, :py:obj:`~typing.Union`\\ " -#~ "[:py:class:`int`, :py:class:`float`, :py:class:`str`, " -#~ ":py:class:`bytes`, :py:class:`bool`, " -#~ ":py:class:`~typing.List`\\ [:py:class:`int`], " -#~ ":py:class:`~typing.List`\\ [:py:class:`float`], " -#~ ":py:class:`~typing.List`\\ [:py:class:`str`], " -#~ ":py:class:`~typing.List`\\ [:py:class:`bytes`], " -#~ ":py:class:`~typing.List`\\ [:py:class:`bool`]]]" +#~ "Given :code:`num_local_round`, we update trees" +#~ " by calling :code:`bst_input.update` method. " +#~ "After training, the last " +#~ ":code:`N=num_local_round` trees will be " +#~ "extracted to send to the server." #~ msgstr "" #~ msgid "" -#~ ":py:obj:`create_error_reply " -#~ "`\\ \\(error\\, " -#~ "ttl\\)" +#~ "In :code:`evaluate`, after loading the " +#~ "global model, we call :code:`bst.eval_set` " +#~ "function to conduct evaluation on valid" +#~ " set. The AUC value will be " +#~ "returned." #~ msgstr "" #~ msgid "" -#~ ":py:obj:`create_reply `\\ " -#~ "\\(content\\, ttl\\)" +#~ "Now, we can create an instance of" +#~ " our class :code:`XgbClient` and add " +#~ "one line to actually run this " +#~ "client:" #~ msgstr "" #~ msgid "" -#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -#~ "[:py:class:`str`, :py:obj:`~typing.Union`\\ " -#~ "[:py:class:`int`, :py:class:`float`, " -#~ ":py:class:`~typing.List`\\ [:py:class:`int`], " -#~ ":py:class:`~typing.List`\\ [:py:class:`float`]]]" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` and " +#~ "call :code:`fl.client.start_client()`. The string" +#~ " :code:`\"[::]:8080\"` tells the client " +#~ "which server to connect to. In our" +#~ " case we can run the server and" +#~ " the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." #~ msgstr "" -#~ msgid "Run Flower server (Driver API and Fleet API)." +#~ msgid "" +#~ "In a file named :code:`server.py`, " +#~ "import Flower and FedXgbBagging from " +#~ ":code:`flwr.server.strategy`." #~ msgstr "" #~ msgid "" -#~ ":py:obj:`start_driver `\\ " -#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" +#~ "We use two clients for this " +#~ "example. An :code:`evaluate_metrics_aggregation` " +#~ "function is defined to collect and " +#~ "wighted average the AUC values from " +#~ "clients. The :code:`config_func` function is" +#~ " to return the current FL round " +#~ "number to client's :code:`fit()` and " +#~ ":code:`evaluate()` methods." #~ msgstr "" -#~ msgid "Start a Flower Driver API server." +#~ msgid "" +#~ "In file :code:`flwr.server.strategy.fedxgb_bagging.py`," +#~ " we define :code:`FedXgbBagging` inherited " +#~ "from :code:`flwr.server.strategy.FedAvg`. Then, we" +#~ " override the :code:`aggregate_fit`, " +#~ ":code:`aggregate_evaluate` and :code:`evaluate` " +#~ "methods as follows:" #~ msgstr "" #~ msgid "" -#~ ":py:obj:`Driver `\\ " -#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ "In :code:`aggregate_fit`, we sequentially " +#~ "aggregate the clients' XGBoost trees by" +#~ " calling :code:`aggregate()` function:" #~ msgstr "" -#~ msgid "`Driver` class provides an interface to the Driver API." +#~ msgid "" +#~ "In this function, we first fetch " +#~ "the number of trees and the number" +#~ " of parallel trees for the current" +#~ " and previous model by calling " +#~ ":code:`_get_tree_nums`. Then, the fetched " +#~ "information will be aggregated. After " +#~ "that, the trees (containing model " +#~ "weights) are aggregated to generate a" +#~ " new tree model." #~ msgstr "" #~ msgid "" -#~ "The IPv4 or IPv6 address of the" -#~ " Driver API server. Defaults to " -#~ "`\"[::]:9091\"`." +#~ "Congratulations! You've successfully built and" +#~ " run your first federated XGBoost " +#~ "system. The AUC values can be " +#~ "checked in :code:`metrics_distributed`. One " +#~ "can see that the average AUC " +#~ "increases over FL rounds." #~ msgstr "" -#~ msgid ":py:obj:`close `\\ \\(\\)" +#~ msgid "" +#~ "The full `source code " +#~ "`_ for this example can be" +#~ " found in :code:`examples/xgboost-quickstart`." #~ msgstr "" -#~ msgid "Disconnect from the SuperLink if connected." +#~ msgid "" +#~ "To do this, we first customise a" +#~ " :code:`ClientManager` in :code:`server_utils.py`:" #~ msgstr "" #~ msgid "" -#~ ":py:obj:`create_message `\\" -#~ " \\(content\\, message\\_type\\, ...\\)" +#~ "The customised :code:`ClientManager` samples " +#~ "all available clients in each FL " +#~ "round based on the order of " +#~ "connection to the server. Then, we " +#~ "define a new strategy :code:`FedXgbCyclic` " +#~ "in :code:`flwr.server.strategy.fedxgb_cyclic.py`, in " +#~ "order to sequentially select only one" +#~ " client in given round and pass " +#~ "the received model to next client." #~ msgstr "" #~ msgid "" -#~ "Time-to-live for the round trip" -#~ " of this message, i.e., the time " -#~ "from sending this message to receiving" -#~ " a reply. It specifies the duration" -#~ " for which the message and its " -#~ "potential reply are considered valid." +#~ "Unlike the original :code:`FedAvg`, we " +#~ "don't perform aggregation here. Instead, " +#~ "we just make a copy of the " +#~ "received client model as global model" +#~ " by overriding :code:`aggregate_fit`." #~ msgstr "" -#~ msgid "start\\_driver" +#~ msgid "" +#~ "Also, the customised :code:`configure_fit` and" +#~ " :code:`configure_evaluate` methods ensure the" +#~ " clients to be sequentially selected " +#~ "given FL round:" #~ msgstr "" #~ msgid "" -#~ "The IPv4 or IPv6 address of the" -#~ " Driver API server. Defaults to " -#~ "`\"[::]:8080\"`." +#~ "In :code:`dataset.py`, we have a " +#~ "function :code:`instantiate_partitioner` to " +#~ "instantiate the data partitioner based " +#~ "on the given :code:`num_partitions` and " +#~ ":code:`partitioner_type`. Currently, we provide " +#~ "four supported partitioner type to " +#~ "simulate the uniformity/non-uniformity in " +#~ "data quantity (uniform, linear, square, " +#~ "exponential)." #~ msgstr "" #~ msgid "" -#~ "A server implementation, either " -#~ "`flwr.server.Server` or a subclass thereof." -#~ " If no instance is provided, then " -#~ "`start_driver` will create one." +#~ "To facilitate centralised evaluation, we " +#~ "define a function in :code:`server_utils.py`:" #~ msgstr "" #~ msgid "" -#~ "An implementation of the class " -#~ "`flwr.server.ClientManager`. If no implementation" -#~ " is provided, then `start_driver` will " -#~ "use `flwr.server.SimpleClientManager`." +#~ "This function returns a evaluation " +#~ "function which instantiates a :code:`Booster`" +#~ " object and loads the global model" +#~ " weights to it. The evaluation is " +#~ "conducted by calling :code:`eval_set()` " +#~ "method, and the tested AUC value " +#~ "is reported." #~ msgstr "" -#~ msgid "The Driver object to use." +#~ msgid "" +#~ "As for distributed evaluation on the " +#~ "clients, it's same as the quick-" +#~ "start example by overriding the " +#~ ":code:`evaluate()` method insides the " +#~ ":code:`XgbClient` class in :code:`client_utils.py`." #~ msgstr "" -#~ msgid "Starting a driver that connects to an insecure server:" +#~ msgid "" +#~ "We also provide an example code " +#~ "(:code:`sim.py`) to use the simulation " +#~ "capabilities of Flower to simulate " +#~ "federated XGBoost training on either a" +#~ " single machine or a cluster of " +#~ "machines." #~ msgstr "" -#~ msgid "Starting a driver that connects to an SSL-enabled server:" +#~ msgid "" +#~ "After importing all required packages, " +#~ "we define a :code:`main()` function to" +#~ " perform the simulation process:" #~ msgstr "" #~ msgid "" -#~ ":py:obj:`run_simulation_from_cli " -#~ "`\\ \\(\\)" +#~ "We first load the dataset and " +#~ "perform data partitioning, and the " +#~ "pre-processed data is stored in a " +#~ ":code:`list`. After the simulation begins, " +#~ "the clients won't need to pre-" +#~ "process their partitions again." #~ msgstr "" -#~ msgid "Run Simulation Engine from the CLI." +#~ msgid "" +#~ "After that, we start the simulation " +#~ "by calling :code:`fl.simulation.start_simulation`:" #~ msgstr "" -#~ msgid "run\\_simulation\\_from\\_cli" +#~ msgid "" +#~ "One of key parameters for " +#~ ":code:`start_simulation` is :code:`client_fn` which" +#~ " returns a function to construct a" +#~ " client. We define it as follows:" #~ msgstr "" #~ msgid "" -#~ "Check out this Federated Learning " -#~ "quickstart tutorial for using Flower " -#~ "with MXNet to train a Sequential " -#~ "model on MNIST." +#~ "In :code:`utils.py`, we define the " +#~ "arguments parsers for clients, server " +#~ "and simulation, allowing users to " +#~ "specify different experimental settings. Let's" +#~ " first see the sever side:" #~ msgstr "" -#~ msgid "Quickstart MXNet" +#~ msgid "" +#~ "This allows user to specify training " +#~ "strategies / the number of total " +#~ "clients / FL rounds / participating " +#~ "clients / clients for evaluation, and" +#~ " evaluation fashion. Note that with " +#~ ":code:`--centralised-eval`, the sever will " +#~ "do centralised evaluation and all " +#~ "functionalities for client evaluation will " +#~ "be disabled." #~ msgstr "" #~ msgid "" -#~ "MXNet is no longer maintained and " -#~ "has been moved into `Attic " -#~ "`_. As a " -#~ "result, we would encourage you to " -#~ "use other ML frameworks alongside " -#~ "Flower, for example, PyTorch. This " -#~ "tutorial might be removed in future " -#~ "versions of Flower." +#~ "This defines various options for client" +#~ " data partitioning. Besides, clients also" +#~ " have an option to conduct evaluation" +#~ " on centralised test set by setting" +#~ " :code:`--centralised-eval`, as well as " +#~ "an option to perform scaled learning " +#~ "rate based on the number of " +#~ "clients by setting :code:`--scaled-lr`." #~ msgstr "" #~ msgid "" -#~ "In this tutorial, we will learn " -#~ "how to train a :code:`Sequential` model" -#~ " on MNIST using Flower and MXNet." +#~ "The full `code " +#~ "`_ for this comprehensive " +#~ "example can be found in :code:`examples" +#~ "/xgboost-comprehensive`." #~ msgstr "" -#~ msgid "Since we want to use MXNet, let's go ahead and install it:" +#~ msgid "|b8714c45b74b4d8fb008e2ebb3bc1d44|" #~ msgstr "" -#~ msgid "" -#~ "Now that we have all our " -#~ "dependencies installed, let's run a " -#~ "simple distributed training with two " -#~ "clients and one server. Our training " -#~ "procedure and network architecture are " -#~ "based on MXNet´s `Hand-written Digit " -#~ "Recognition tutorial " -#~ "`_." +#~ msgid "|75f1561efcfd422ea67d28d1513120dc|" #~ msgstr "" -#~ msgid "" -#~ "In a file called :code:`client.py`, " -#~ "import Flower and MXNet related " -#~ "packages:" +#~ msgid "|6a1f51b235304558a9bdaaabfc93b8d2|" #~ msgstr "" -#~ msgid "In addition, define the device allocation in MXNet with:" +#~ msgid "|35e70dab1fb544af9aa3a9c09c4f9797|" #~ msgstr "" -#~ msgid "" -#~ "We use MXNet to load MNIST, a " -#~ "popular image classification dataset of " -#~ "handwritten digits for machine learning. " -#~ "The MXNet utility :code:`mx.test_utils.get_mnist()`" -#~ " downloads the training and test " -#~ "data." +#~ msgid "|d7efb5705dd3467f991ed23746824a07|" #~ msgstr "" -#~ msgid "" -#~ "Define the training and loss with " -#~ "MXNet. We train the model by " -#~ "looping over the dataset, measure the" -#~ " corresponding loss, and optimize it." +#~ msgid "|94e7b021c7b540bfbedf7f082a41ff87|" #~ msgstr "" -#~ msgid "" -#~ "Next, we define the validation of " -#~ "our machine learning model. We loop " -#~ "over the test set and measure both" -#~ " loss and accuracy on the test " -#~ "set." +#~ msgid "|a80714782dde439ab73936518f91fc3c|" #~ msgstr "" -#~ msgid "" -#~ "After defining the training and testing" -#~ " of a MXNet machine learning model," -#~ " we use these functions to implement" -#~ " a Flower client." +#~ msgid "|c62080ca6197473da57d191c8225a9d9|" #~ msgstr "" -#~ msgid "Our Flower clients will use a simple :code:`Sequential` model:" +#~ msgid "|21a8f1e6a5b14a7bbb8559979d0e8a2b|" #~ msgstr "" -#~ msgid "" -#~ "After loading the dataset with " -#~ ":code:`load_data()` we perform one forward " -#~ "propagation to initialize the model and" -#~ " model parameters with :code:`model(init)`. " -#~ "Next, we implement a Flower client." +#~ msgid "|c310f2a22f7b4917bf42775aae7a1c09|" #~ msgstr "" -#~ msgid "" -#~ "Flower provides a convenience class " -#~ "called :code:`NumPyClient` which makes it " -#~ "easier to implement the :code:`Client` " -#~ "interface when your workload uses MXNet." -#~ " Implementing :code:`NumPyClient` usually means" -#~ " defining the following methods " -#~ "(:code:`set_parameters` is optional though):" +#~ msgid "|a0c5b43401194535a8460bcf02e65f9a|" #~ msgstr "" -#~ msgid "They can be implemented in the following way:" +#~ msgid "|aabfdbd5564e41a790f8ea93cc21a444|" #~ msgstr "" -#~ msgid "" -#~ "We can now create an instance of" -#~ " our class :code:`MNISTClient` and add " -#~ "one line to actually run this " -#~ "client:" +#~ msgid "|c9cc8f160fa647b09e742fe4dc8edb54|" #~ msgstr "" -#~ msgid "" -#~ "That's it for the client. We only" -#~ " have to implement :code:`Client` or " -#~ ":code:`NumPyClient` and call " -#~ ":code:`fl.client.start_client()` or " -#~ ":code:`fl.client.start_numpy_client()`. The string " -#~ ":code:`\"0.0.0.0:8080\"` tells the client " -#~ "which server to connect to. In our" -#~ " case we can run the server and" -#~ " the client on the same machine, " -#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" -#~ " we run a truly federated workload" -#~ " with the server and clients running" -#~ " on different machines, all that " -#~ "needs to change is the " -#~ ":code:`server_address` we pass to the " -#~ "client." +#~ msgid "|7e83aad011cd4907b2f02f907c6922e9|" #~ msgstr "" -#~ msgid "" -#~ "With both client and server ready, " -#~ "we can now run everything and see" -#~ " federated learning in action. Federated" -#~ " learning systems usually have a " -#~ "server and multiple clients. We " -#~ "therefore have to start the server " -#~ "first:" +#~ msgid "|4627c2bb6cc443ae9e079f81f33c9dd9|" #~ msgstr "" -#~ msgid "" -#~ "Congratulations! You've successfully built and" -#~ " run your first federated learning " -#~ "system. The full `source code " -#~ "`_ for this example can " -#~ "be found in :code:`examples/quickstart-mxnet`." +#~ msgid "|131af8322dc5466b827afd24be98f8c0|" #~ msgstr "" -#~ msgid "Sets the parameters of a :code:`sklean` LogisticRegression model" +#~ msgid "|f92920b87f3a40179bf7ddd0b6144c53|" #~ msgstr "" -#~ msgid ":code:`load_mnist()`" +#~ msgid "|d62da263071d45a496f543e41fce3a19|" #~ msgstr "" -#~ msgid "Loads the MNIST dataset using OpenML" +#~ msgid "|ad851971645b4e1fbf8d15bcc0b2ee11|" #~ msgstr "" -#~ msgid ":code:`shuffle()`" +#~ msgid "|929e9a6de6b34edb8488e644e2bb5221|" #~ msgstr "" -#~ msgid "Shuffles data and its label" +#~ msgid "|404cf9c9e8d64784a55646c0f9479cbc|" #~ msgstr "" -#~ msgid ":code:`partition()`" +#~ msgid "|b021ff9d25814458b1e631f8985a648b|" #~ msgstr "" -#~ msgid "Splits datasets into a number of partitions" +#~ msgid "|e6ca84e1df244f238288a768352678e5|" #~ msgstr "" -#~ msgid "" -#~ "We load the MNIST dataset from " -#~ "`OpenML " -#~ "`_, a" -#~ " popular image classification dataset of" -#~ " handwritten digits for machine learning." -#~ " The utility :code:`utils.load_mnist()` downloads" -#~ " the training and test data. The " -#~ "training set is split afterwards into" -#~ " 10 partitions with :code:`utils.partition()`." +#~ msgid "|39c2422082554a21963baffb33a0d057|" #~ msgstr "" -#~ msgid "" -#~ "The number of federated learning rounds" -#~ " is set in :code:`fit_round()` and " -#~ "the evaluation is defined in " -#~ ":code:`get_evaluate_fn()`. The evaluation function" -#~ " is called after each federated " -#~ "learning round and gives you information" -#~ " about loss and accuracy." +#~ msgid "|07ecf5fcd6814e88906accec6fa0fbfb|" #~ msgstr "" -#~ msgid "Let's get stated!" +#~ msgid "|57e78c0ca8a94ba5a64a04b1f2280e55|" +#~ msgstr "" + +#~ msgid "|9819b40e59ee40a4921e1244e8c99bac|" +#~ msgstr "" + +#~ msgid "|797bf279c4894b5ead31dc9b0534ed62|" #~ msgstr "" #~ msgid "" -#~ "We now have a list of ten " -#~ "training sets and ten validation sets" -#~ " (``trainloaders`` and ``valloaders``) " -#~ "representing the data of ten different" -#~ " organizations. Each ``trainloader``/``valloader`` " -#~ "pair contains 4500 training examples and" -#~ " 500 validation examples. There's also " -#~ "a single ``testloader`` (we did not " -#~ "split the test set). Again, this " -#~ "is only necessary for building research" -#~ " or educational systems, actual federated" -#~ " learning systems have their data " -#~ "naturally distributed across multiple " -#~ "partitions." +#~ "If you don't have ``pyenv`` installed," +#~ " the following script that will " +#~ "install it, set it up, and create" +#~ " the virtual environment (with ``Python " +#~ "3.9.20`` by default):" #~ msgstr "" -#~ msgid "|2b5c62c529f6416f840c594cce062fbb|" +#~ msgid "" +#~ "If you already have ``pyenv`` installed" +#~ " (along with the ``pyenv-virtualenv`` " +#~ "plugin), you can use the following " +#~ "convenience script (with ``Python 3.9.20`` " +#~ "by default):" #~ msgstr "" -#~ msgid "|90b334680cb7467d9a04d39b8e8dca9f|" +#~ msgid "|3a7aceef05f0421794726ac54aaf12fd|" #~ msgstr "" -#~ msgid "|65764ceee89f4335bfd93fd0b115e831|" +#~ msgid "|d741075f8e624331b42c0746f7d258a0|" #~ msgstr "" -#~ msgid "|d97319ec28bb407ea0ab9705e38f3bcf|" +#~ msgid "|8fc92d668bcb42b8bda55143847f2329|" #~ msgstr "" -#~ msgid "|11e95ac83a8548d8b3505b4663187d07|" +#~ msgid "|1c705d833a024f22adcaeb8ae3d13b0b|" #~ msgstr "" -#~ msgid "|1dab2f3a23674abc8a6731f20fa10730|" +#~ msgid "|77a037b546a84262b608e04bc82a2c96|" #~ msgstr "" -#~ msgid "|7f0ee162da38450788493a21627306f7|" +#~ msgid "|f568e24c9fb0435690ac628210a4be96|" #~ msgstr "" -#~ msgid "|296a1fb72c514b23b3d8905ff0ff98c6|" +#~ msgid "|a7bf029981514e2593aa3a2b48c9d76a|" #~ msgstr "" -#~ msgid "|5b1408eec0d746cdb91162a9107b6089|" +#~ msgid "|3f645ad807f84be8b1f8f3267173939c|" #~ msgstr "" -#~ msgid "|aef19f4b122c4e8d9f4c57f99bcd5dd2|" +#~ msgid "|a06a9dbd603f45819afd8e8cfc3c4b8f|" #~ msgstr "" -#~ msgid "|2881a86d8fc54ba29d96b29fc2819f4a|" +#~ msgid "|edcf9a04d96e42608fd01a333375febe|" #~ msgstr "" -#~ msgid "|ec1fe880237247e0975f52766775ab84|" +#~ msgid "|3dae22fe797043968e2b7aa7073c78bd|" #~ msgstr "" -#~ msgid "|9fdf048ed58d4467b2718cdf4aaf1ec3|" +#~ msgid "|ba178f75267d4ad8aa7363f20709195f|" #~ msgstr "" -#~ msgid "|ff726bc5505e432388ee2fdd6ef420b9|" +#~ msgid "|c380c750bfd2444abce039a1c6fa8e60|" +#~ msgstr "" + +#~ msgid "|e7cec00a114b48359935c6510595132e|" #~ msgstr "" diff --git a/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po b/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po index 47be2dfda762..a1598faa0ee4 100644 --- a/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po +++ b/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po @@ -7,8 +7,8 @@ msgid "" msgstr "" "Project-Id-Version: Flower main\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2024-05-28 11:47+0200\n" -"PO-Revision-Date: 2024-05-10 06:59+0000\n" +"POT-Creation-Date: 2024-10-10 00:29+0000\n" +"PO-Revision-Date: 2024-06-12 10:09+0000\n" "Last-Translator: Yan Gao \n" "Language: zh_Hans\n" "Language-Team: Chinese (Simplified) `_" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:16 +msgid "`Use the Flower CLI reference documentation `_" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:18 +msgid "" +"Everything listed in the reference documentation is part of the public " +"API. This document explains how Flower maintainers define the public API " +"and how you can determine whether a component is part of the public API " +"or not by reading the Flower source code." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:23 +#, fuzzy +msgid "Flower public API" +msgstr "Flower 客户端。" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:25 +msgid "Flower has a well-defined public API. Let's look at this in more detail." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:29 +msgid "" +"Every component that is reachable by recursively following " +"``__init__.__all__`` starting from the root package (``flwr``) is part of" +" the public API." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:32 +msgid "" +"If you want to determine whether a component " +"(class/function/generator/...) is part of the public API or not, you need" +" to start at the root of the ``flwr`` package. Let's use ``tree -L 1 -d " +"src/py/flwr`` to look at the Python sub-packages contained ``flwr``:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:46 +msgid "" +"Contrast this with the definition of ``__all__`` in the root " +"``src/py/flwr/__init__.py``:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:59 +msgid "" +"You can see that ``flwr`` has six subpackages (``cli``, ``client``, " +"``common``, ``proto``, ``server``, ``simulation``), but only four of them" +" are \"exported\" via ``__all__`` (``client``, ``common``, ``server``, " +"``simulation``)." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:63 +msgid "" +"What does this mean? It means that ``client``, ``common``, ``server`` and" +" ``simulation`` are part of the public API, but ``cli`` and ``proto`` are" +" not. The ``flwr`` subpackages ``cli`` and ``proto`` are private APIs. A " +"private API can change completely from one release to the next (even in " +"patch releases). It can change in a breaking way, it can be renamed (for " +"example, ``flwr.cli`` could be renamed to ``flwr.command``) and it can " +"even be removed completely." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:70 +msgid "Therefore, as a Flower user:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:72 +msgid "``from flwr import client`` ✅ Ok, you're importing a public API." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:73 +msgid "" +"``from flwr import proto`` ❌ Not recommended, you're importing a private " +"API." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:75 +msgid "" +"What about components that are nested deeper in the hierarchy? Let's look" +" at Flower strategies to see another typical pattern. Flower strategies " +"like ``FedAvg`` are often imported using ``from flwr.server.strategy " +"import FedAvg``. Let's look at " +"``src/py/flwr/server/strategy/__init__.py``:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:91 +msgid "" +"What's notable here is that all strategies are implemented in dedicated " +"modules (e.g., ``fedavg.py``). In ``__init__.py``, we *import* the " +"components we want to make part of the public API and then *export* them " +"via ``__all__``. Note that we export the component itself (for example, " +"the ``FedAvg`` class), but not the module it is defined in (for example, " +"``fedavg.py``). This allows us to move the definition of ``FedAvg`` into " +"a different module (or even a module in a subpackage) without breaking " +"the public API (as long as we update the import path in ``__init__.py``)." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:99 +msgid "Therefore:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:101 +msgid "" +"``from flwr.server.strategy import FedAvg`` ✅ Ok, you're importing a " +"class that is part of the public API." +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:5 -msgid "Edge Client Engine" -msgstr "边缘客户端引擎" +#: ../../source/contributor-explanation-public-and-private-apis.rst:103 +msgid "" +"``from flwr.server.strategy import fedavg`` ❌ Not recommended, you're " +"importing a private module." +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:7 +#: ../../source/contributor-explanation-public-and-private-apis.rst:106 msgid "" -"`Flower `_ core framework architecture with Edge " -"Client Engine" -msgstr "具有边缘客户端引擎的`Flower `核心架构" +"This approach is also implemented in the tooling that automatically " +"builds API reference docs." +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:13 -msgid "Virtual Client Engine" -msgstr "虚拟客户端引擎" +#: ../../source/contributor-explanation-public-and-private-apis.rst:110 +msgid "Flower public API of private packages" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:112 +msgid "" +"We also use this to define the public API of private subpackages. Public," +" in this context, means the API that other ``flwr`` subpackages should " +"use. For example, ``flwr.server.driver`` is a private subpackage (it's " +"not exported via ``src/py/flwr/server/__init__.py``'s ``__all__``)." +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:15 +#: ../../source/contributor-explanation-public-and-private-apis.rst:117 msgid "" -"`Flower `_ core framework architecture with Virtual " -"Client Engine" -msgstr "具有虚拟客户端引擎的`Flower `核心架构" +"Still, the private sub-package ``flwr.server.driver`` defines a " +"\"public\" API using ``__all__`` in " +"``src/py/flwr/server/driver/__init__.py``:" +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:21 -msgid "Virtual Client Engine and Edge Client Engine in the same workload" -msgstr "可同步进行的虚拟客户端引擎和边缘客户端引擎" +#: ../../source/contributor-explanation-public-and-private-apis.rst:132 +msgid "" +"The interesting part is that both ``GrpcDriver`` and ``InMemoryDriver`` " +"are never used by Flower framework users, only by other parts of the " +"Flower framework codebase. Those other parts of the codebase import, for " +"example, ``InMemoryDriver`` using ``from flwr.server.driver import " +"InMemoryDriver`` (i.e., the ``InMemoryDriver`` exported via ``__all__``)," +" not ``from flwr.server.driver.in_memory_driver import InMemoryDriver`` " +"(``in_memory_driver.py`` is the module containing the actual " +"``InMemoryDriver`` class definition)." +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:23 +#: ../../source/contributor-explanation-public-and-private-apis.rst:140 msgid "" -"`Flower `_ core framework architecture with both " -"Virtual Client Engine and Edge Client Engine" -msgstr "具有虚拟客户端引擎和边缘客户端引擎的`Flower `核心架构" +"This is because ``flwr.server.driver`` defines a public interface for " +"other ``flwr`` subpackages. This allows codeowners of " +"``flwr.server.driver`` to refactor the package without breaking other " +"``flwr``-internal users." +msgstr "" #: ../../source/contributor-how-to-build-docker-images.rst:2 -msgid "How to build Docker Flower images locally" +#, fuzzy +msgid "How to Build Docker Flower Images Locally" msgstr "如何在本地搭建Docker Flower images" #: ../../source/contributor-how-to-build-docker-images.rst:4 @@ -62,57 +217,34 @@ msgstr "如何在本地搭建Docker Flower images" msgid "" "Flower provides pre-made docker images on `Docker Hub " "`_ that include all necessary dependencies" -" for running the SuperLink. You can also build your own custom docker " -"images from scratch with a different version of Python or Ubuntu if that " -"is what you need. In this guide, we will explain what images exist and " -"how to build them locally." +" for running the SuperLink, SuperNode or ServerApp. You can also build " +"your own custom docker images from scratch with a different version of " +"Python or Linux distribution (Ubuntu/Alpine) if that is what you need. In" +" this guide, we will explain what images exist and how to build them " +"locally." msgstr "" "Flower 在 `Docker Hub `_ " "上提供了预制的 docker 镜像,其中包括运行服务器所需的所有依赖项。如果你需要,也可以使用不同版本的 Python 或 Ubuntu " "从头开始构建自己的定制 docker 镜像。在本指南中,我们将介绍有哪些镜像,以及如何在本地构建它们。" -#: ../../source/contributor-how-to-build-docker-images.rst:9 +#: ../../source/contributor-how-to-build-docker-images.rst:10 #, fuzzy msgid "" "Before we can start, we need to meet a few prerequisites in our local " "development environment." msgstr "在开始之前,我们需要在本地开发环境中满足一些先决条件。" -#: ../../source/contributor-how-to-build-docker-images.rst:11 +#: ../../source/contributor-how-to-build-docker-images.rst:13 #, fuzzy -msgid "Clone the flower repository." +msgid "Clone the ``flower`` repository." msgstr "**叉花仓库**" -#: ../../source/contributor-how-to-build-docker-images.rst:17 -#: ../../source/how-to-run-flower-using-docker.rst:144 +#: ../../source/contributor-how-to-build-docker-images.rst:19 #, fuzzy msgid "Verify the Docker daemon is running." msgstr "验证 Docker 守护进程是否正在运行。" -#: ../../source/contributor-how-to-build-docker-images.rst:19 -#: ../../source/how-to-run-flower-using-docker.rst:146 -#, fuzzy -msgid "" -"Please follow the first section on :doc:`Run Flower using Docker ` which covers this step in more detail." -msgstr "" -"请阅读 :doc:`Run Flower using Docker ` " -"的第一节,其中更详细地介绍了这一步骤。" - -#: ../../source/contributor-how-to-build-docker-images.rst:23 -#, fuzzy -msgid "" -"Currently, Flower provides two images, a ``base`` image and a " -"``superlink`` image. The base image, as the name suggests, contains basic" -" dependencies that the SuperLink needs. This includes system " -"dependencies, Python and Python tools. The SuperLink image is based on " -"the base image, but it additionally installs the SuperLink using ``pip``." -msgstr "" -"目前,Flower " -"提供两个镜像,一个基础镜像和一个服务器镜像。不久还将推出客户端镜像。基础镜像,顾名思义,包含服务器和客户端都需要的基本依赖项。其中包括系统依赖项、Python" -" 和 Python 工具。服务器镜像基于基础镜像,但它会使用 ``pip`` 额外安装 Flower 服务器。" - -#: ../../source/contributor-how-to-build-docker-images.rst:28 +#: ../../source/contributor-how-to-build-docker-images.rst:21 #, fuzzy msgid "" "The build instructions that assemble the images are located in the " @@ -120,235 +252,266 @@ msgid "" "``src/docker``." msgstr "组装镜像的构建说明位于各自的 Dockerfile 中。你可以在 ``src/docker`` 的子目录中找到它们。" -#: ../../source/contributor-how-to-build-docker-images.rst:31 +#: ../../source/contributor-how-to-build-docker-images.rst:24 #, fuzzy msgid "" -"Both, base and SuperLink image are configured via build arguments. " -"Through build arguments, we can make our build more flexible. For " -"example, in the base image, we can specify the version of Python to " -"install using the ``PYTHON_VERSION`` build argument. Some of the build " -"arguments have default values, others must be specified when building the" -" image. All available build arguments for each image are listed in one of" -" the tables below." +"Flower Docker images are configured via build arguments. Through build " +"arguments, we can make the creation of images more flexible. For example," +" in the base image, we can specify the version of Python to install using" +" the ``PYTHON_VERSION`` build argument. Some of the build arguments have " +"default values, others must be specified when building the image. All " +"available build arguments for each image are listed in one of the tables " +"below." msgstr "" "基础镜像和服务器镜像都是通过构建参数配置的。通过联编参数,我们可以使联编更加灵活。例如,在基础镜像中,我们可以使用 " "``PYTHON_VERSION`` 联编参数指定要安装的 Python " "版本。有些联编参数有默认值,有些则必须在联编映像时指定。每个映像的所有可用联编参数都列在下表中。" -#: ../../source/contributor-how-to-build-docker-images.rst:38 +#: ../../source/contributor-how-to-build-docker-images.rst:32 #, fuzzy -msgid "Building the base image" +msgid "Building the Base Image" msgstr "加载数据" -#: ../../source/contributor-how-to-build-docker-images.rst:44 -#: ../../source/contributor-how-to-build-docker-images.rst:86 +#: ../../source/contributor-how-to-build-docker-images.rst:38 +#: ../../source/contributor-how-to-build-docker-images.rst:104 #, fuzzy msgid "Build argument" msgstr "构建文档" -#: ../../source/contributor-how-to-build-docker-images.rst:45 -#: ../../source/contributor-how-to-build-docker-images.rst:87 +#: ../../source/contributor-how-to-build-docker-images.rst:39 +#: ../../source/contributor-how-to-build-docker-images.rst:105 #, fuzzy msgid "Description" msgstr "停用" -#: ../../source/contributor-how-to-build-docker-images.rst:46 -#: ../../source/contributor-how-to-build-docker-images.rst:88 +#: ../../source/contributor-how-to-build-docker-images.rst:40 +#: ../../source/contributor-how-to-build-docker-images.rst:106 #, fuzzy msgid "Required" msgstr "所需变更" -#: ../../source/contributor-how-to-build-docker-images.rst:47 -#: ../../source/contributor-how-to-build-docker-images.rst:89 +#: ../../source/contributor-how-to-build-docker-images.rst:41 +#: ../../source/contributor-how-to-build-docker-images.rst:107 +#: ../../source/docker/persist-superlink-state.rst:19 +#: ../../source/docker/pin-version.rst:12 +#: ../../source/docker/set-environment-variables.rst:8 #, fuzzy msgid "Example" msgstr "实例" +#: ../../source/contributor-how-to-build-docker-images.rst:42 +msgid "``DISTRO``" +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:43 +#, fuzzy +msgid "The Linux distribution to use as the base image." +msgstr "基础镜像的存储库名称。" + +#: ../../source/contributor-how-to-build-docker-images.rst:44 #: ../../source/contributor-how-to-build-docker-images.rst:48 -#: ../../source/contributor-how-to-build-docker-images.rst:94 +#: ../../source/contributor-how-to-build-docker-images.rst:52 +#: ../../source/contributor-how-to-build-docker-images.rst:68 +#: ../../source/contributor-how-to-build-docker-images.rst:75 +#: ../../source/contributor-how-to-build-docker-images.rst:110 #, fuzzy -msgid "``PYTHON_VERSION``" -msgstr "Python 版本" +msgid "No" +msgstr "现在" -#: ../../source/contributor-how-to-build-docker-images.rst:49 +#: ../../source/contributor-how-to-build-docker-images.rst:45 #, fuzzy -msgid "Version of ``python`` to be installed." -msgstr "要安装的 ``python`` 版本。" +msgid "``ubuntu``" +msgstr "``UBUNTU_VERSION``" + +#: ../../source/contributor-how-to-build-docker-images.rst:46 +#, fuzzy +msgid "``DISTRO_VERSION``" +msgstr "``PIP_VERSION``" + +#: ../../source/contributor-how-to-build-docker-images.rst:47 +msgid "Version of the Linux distribution." +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:49 +msgid ":substitution-code:`|ubuntu_version|`" +msgstr "" #: ../../source/contributor-how-to-build-docker-images.rst:50 -#: ../../source/contributor-how-to-build-docker-images.rst:54 -#: ../../source/contributor-how-to-build-docker-images.rst:58 -#: ../../source/contributor-how-to-build-docker-images.rst:108 #, fuzzy -msgid "Yes" -msgstr "类型" +msgid "``PYTHON_VERSION``" +msgstr "Python 版本" #: ../../source/contributor-how-to-build-docker-images.rst:51 #, fuzzy -msgid "``3.11``" -msgstr "``1.0.0rc1``" +msgid "Version of ``python`` to be installed." +msgstr "要安装的 ``python`` 版本。" -#: ../../source/contributor-how-to-build-docker-images.rst:52 +#: ../../source/contributor-how-to-build-docker-images.rst:53 +msgid "``3.11`` or ``3.11.1``" +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:54 #, fuzzy msgid "``PIP_VERSION``" msgstr "``PIP_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:53 +#: ../../source/contributor-how-to-build-docker-images.rst:55 #, fuzzy msgid "Version of ``pip`` to be installed." msgstr "要安装的 ``pip` 版本。" -#: ../../source/contributor-how-to-build-docker-images.rst:55 +#: ../../source/contributor-how-to-build-docker-images.rst:56 +#: ../../source/contributor-how-to-build-docker-images.rst:60 +#: ../../source/contributor-how-to-build-docker-images.rst:64 +#: ../../source/contributor-how-to-build-docker-images.rst:114 #, fuzzy -msgid "``23.0.1``" -msgstr "``1.0.0rc1``" +msgid "Yes" +msgstr "类型" -#: ../../source/contributor-how-to-build-docker-images.rst:56 +#: ../../source/contributor-how-to-build-docker-images.rst:57 +msgid ":substitution-code:`|pip_version|`" +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:58 #, fuzzy msgid "``SETUPTOOLS_VERSION``" msgstr "设置工具版本" -#: ../../source/contributor-how-to-build-docker-images.rst:57 +#: ../../source/contributor-how-to-build-docker-images.rst:59 #, fuzzy msgid "Version of ``setuptools`` to be installed." msgstr "要安装的 `setuptools`` 版本。" -#: ../../source/contributor-how-to-build-docker-images.rst:59 -#, fuzzy -msgid "``69.0.2``" -msgstr "``1.0.0b0``" - -#: ../../source/contributor-how-to-build-docker-images.rst:60 -#: ../../source/contributor-how-to-build-docker-images.rst:98 -#, fuzzy -msgid "``UBUNTU_VERSION``" -msgstr "``UBUNTU_VERSION``" - #: ../../source/contributor-how-to-build-docker-images.rst:61 #, fuzzy -msgid "Version of the official Ubuntu Docker image." -msgstr "官方 Ubuntu Docker 映像的版本。" +msgid ":substitution-code:`|setuptools_version|`" +msgstr "设置工具版本" #: ../../source/contributor-how-to-build-docker-images.rst:62 #, fuzzy -msgid "Defaults to ``22.04``." -msgstr "默认为 ``22.04``。" +msgid "``FLWR_VERSION``" +msgstr "``FLWR_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:65 +#: ../../source/contributor-how-to-build-docker-images.rst:63 #, fuzzy -msgid "" -"The following example creates a base image with Python 3.11.0, pip 23.0.1" -" and setuptools 69.0.2:" -msgstr "下面的示例使用 Python 3.11.0、pip 23.0.1 和 setuptools 69.0.2 创建了基本映像:" +msgid "Version of Flower to be installed." +msgstr "要安装的 Flower 版本。" -#: ../../source/contributor-how-to-build-docker-images.rst:76 -#, fuzzy -msgid "" -"The name of image is ``flwr_base`` and the tag ``0.1.0``. Remember that " -"the build arguments as well as the name and tag can be adapted to your " -"needs. These values serve as examples only." -msgstr "图像名称为 ``flwr_base``,标记为 ``0.1.0``。请记住,编译参数以及名称和标记都可以根据需要进行调整。这些值仅供参考。" +#: ../../source/contributor-how-to-build-docker-images.rst:65 +msgid ":substitution-code:`|stable_flwr_version|`" +msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:80 +#: ../../source/contributor-how-to-build-docker-images.rst:66 #, fuzzy -msgid "Building the SuperLink image" -msgstr "启动服务器" +msgid "``FLWR_PACKAGE``" +msgstr "``FLWR_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:90 +#: ../../source/contributor-how-to-build-docker-images.rst:67 #, fuzzy -msgid "``BASE_REPOSITORY``" -msgstr "基础存储库" +msgid "The Flower package to be installed." +msgstr "要安装的 PyPI 软件包。" -#: ../../source/contributor-how-to-build-docker-images.rst:91 -#, fuzzy -msgid "The repository name of the base image." -msgstr "基础镜像的存储库名称。" +#: ../../source/contributor-how-to-build-docker-images.rst:69 +msgid "``flwr`` or ``flwr-nightly``" +msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:92 +#: ../../source/contributor-how-to-build-docker-images.rst:70 #, fuzzy -msgid "Defaults to ``flwr/base``." -msgstr "默认为 ``flwr/server``。" +msgid "``FLWR_VERSION_REF``" +msgstr "``FLWR_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:95 -#, fuzzy -msgid "The Python version of the base image." -msgstr "基础镜像的存储库名称。" +#: ../../source/contributor-how-to-build-docker-images.rst:71 +msgid "" +"A `direct reference " +"`_ without the ``@`` specifier. If both " +"``FLWR_VERSION`` and ``FLWR_VERSION_REF`` are specified, the " +"``FLWR_VERSION_REF`` has precedence." +msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:96 +#: ../../source/contributor-how-to-build-docker-images.rst:76 #, fuzzy -msgid "Defaults to ``py3.11``." -msgstr "默认为 ``22.04``。" +msgid "`Direct Reference Examples`_" +msgstr "示例请求" -#: ../../source/contributor-how-to-build-docker-images.rst:99 +#: ../../source/contributor-how-to-build-docker-images.rst:78 #, fuzzy -msgid "The Ubuntu version of the base image." -msgstr "基础镜像的存储库名称。" +msgid "" +"The following example creates a base Ubuntu/Alpine image with Python " +"``3.11.0``, pip :substitution-code:`|pip_version|`, setuptools " +":substitution-code:`|setuptools_version|` and Flower :substitution-" +"code:`|stable_flwr_version|`:" +msgstr "下面的示例使用 Python 3.11.0、pip 23.0.1 和 setuptools 69.0.2 创建了基本映像:" -#: ../../source/contributor-how-to-build-docker-images.rst:100 +#: ../../source/contributor-how-to-build-docker-images.rst:93 #, fuzzy -msgid "Defaults to ``ubuntu22.04``." -msgstr "默认为 ``py3.11-ubuntu22.04``。" +msgid "" +"In this example, we specify our image name as ``flwr_base`` and the tag " +"as ``0.1.0``. Remember that the build arguments as well as the name and " +"tag can be adapted to your needs. These values serve as examples only." +msgstr "图像名称为 ``flwr_base``,标记为 ``0.1.0``。请记住,编译参数以及名称和标记都可以根据需要进行调整。这些值仅供参考。" -#: ../../source/contributor-how-to-build-docker-images.rst:102 +#: ../../source/contributor-how-to-build-docker-images.rst:98 #, fuzzy -msgid "``FLWR_PACKAGE``" -msgstr "``FLWR_VERSION``" +msgid "Building a Flower Binary Image" +msgstr "加载数据" -#: ../../source/contributor-how-to-build-docker-images.rst:103 -msgid "The PyPI package to install." -msgstr "" +#: ../../source/contributor-how-to-build-docker-images.rst:108 +#, fuzzy +msgid "``BASE_REPOSITORY``" +msgstr "基础存储库" -#: ../../source/contributor-how-to-build-docker-images.rst:104 +#: ../../source/contributor-how-to-build-docker-images.rst:109 #, fuzzy -msgid "Defaults to ``flwr``." -msgstr "默认为 ``flwr/server``。" +msgid "The repository name of the base image." +msgstr "基础镜像的存储库名称。" -#: ../../source/contributor-how-to-build-docker-images.rst:106 +#: ../../source/contributor-how-to-build-docker-images.rst:111 #, fuzzy -msgid "``FLWR_VERSION``" +msgid "``flwr/base``" msgstr "``FLWR_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:107 +#: ../../source/contributor-how-to-build-docker-images.rst:112 #, fuzzy -msgid "Version of Flower to be installed." -msgstr "要安装的 Flower 版本。" +msgid "``BASE_IMAGE``" +msgstr "基础存储库" -#: ../../source/contributor-how-to-build-docker-images.rst:109 +#: ../../source/contributor-how-to-build-docker-images.rst:113 #, fuzzy -msgid "``1.8.0``" -msgstr "``1.0.0b0``" +msgid "The Tag of the Flower base image." +msgstr "基础镜像的存储库名称。" -#: ../../source/contributor-how-to-build-docker-images.rst:112 -#, fuzzy -msgid "" -"The following example creates a SuperLink image with the official Flower " -"base image py3.11-ubuntu22.04 and Flower 1.8.0:" -msgstr "下面的示例使用官方的 Flower 基本镜像 py3.11-ubuntu22.04 和 Flower 1.7.0 创建了一个服务器镜像:" +#: ../../source/contributor-how-to-build-docker-images.rst:115 +msgid ":substitution-code:`|stable_flwr_version|-py3.11-ubuntu|ubuntu_version|`" +msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:122 -#, fuzzy +#: ../../source/contributor-how-to-build-docker-images.rst:117 msgid "" -"The name of image is ``flwr_superlink`` and the tag ``0.1.0``. Remember " -"that the build arguments as well as the name and tag can be adapted to " -"your needs. These values serve as examples only." -msgstr "图像名称为 ``flwr_server``,标记为 ``0.1.0``。请记住,编译参数以及名称和标记都可以根据需要进行调整。这些值仅供参考。" +"For example, to build a SuperLink image with the latest Flower version, " +"Python 3.11 and Ubuntu 22.04, run the following:" +msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:125 +#: ../../source/contributor-how-to-build-docker-images.rst:128 #, fuzzy msgid "" "If you want to use your own base image instead of the official Flower " -"base image, all you need to do is set the ``BASE_REPOSITORY``, " -"``PYTHON_VERSION`` and ``UBUNTU_VERSION`` build arguments." +"base image, all you need to do is set the ``BASE_REPOSITORY`` build " +"argument to ``flwr_base`` (as we've specified above)." msgstr "" "如果您想使用自己的基础图片而不是 Flower 官方的基础图片,只需设置 ``BASE_REPOSITORY`` 和 " "``BASE_IMAGE_TAG`` " "联编参数即可。`BASE_REPOSITORY``的值必须与您的图像名称一致,`BASE_IMAGE_TAG``的值必须与您的图像标签一致。" -#: ../../source/contributor-how-to-build-docker-images.rst:138 +#: ../../source/contributor-how-to-build-docker-images.rst:140 #, fuzzy msgid "After creating the image, we can test whether the image is working:" msgstr "创建图像后,我们可以测试图像是否正常工作:" +#: ../../source/contributor-how-to-build-docker-images.rst:147 +#, fuzzy +msgid "Direct Reference Examples" +msgstr "示例请求" + #: ../../source/contributor-how-to-contribute-translations.rst:2 msgid "Contribute translations" msgstr "贡献译文" @@ -394,7 +557,7 @@ msgstr "" "您需要做的第一件事就是在本`网页`_上创建一个免费的Weblate帐户。有关个人资料设置的更多信息,请参阅`这里" " `_。" -#: ../../source/contributor-how-to-contribute-translations.rst:29 +#: ../../source/contributor-how-to-contribute-translations.rst:28 msgid "" "Once you are signed in to Weblate, you can navigate to the `Flower " "Framework project `_。在这里,您可以看到网站上现有的各种语言。" -#: ../../source/contributor-how-to-contribute-translations.rst:34 +#: ../../source/contributor-how-to-contribute-translations.rst:32 msgid "" "Once you have selected the language you want to contribute to, you should" " see a similar interface to this:" msgstr "选择您要贡献的语言后,您应该会看到与此类似的界面:" -#: ../../source/contributor-how-to-contribute-translations.rst:39 +#: ../../source/contributor-how-to-contribute-translations.rst:37 msgid "" "The most straight forward option here is to click on the ``Translate`` " "button on the top right (in the ``Translation status`` section). This " @@ -419,11 +582,11 @@ msgid "" "untranslated strings." msgstr "最简单的方法是点击右上角(\"翻译状态 \"部分)的 \"翻译 \"按钮。这将自动带您进入未翻译字符串的翻译界面。" -#: ../../source/contributor-how-to-contribute-translations.rst:43 +#: ../../source/contributor-how-to-contribute-translations.rst:41 msgid "This is what the interface looks like:" msgstr "这就是界面的样子:" -#: ../../source/contributor-how-to-contribute-translations.rst:47 +#: ../../source/contributor-how-to-contribute-translations.rst:45 #, fuzzy msgid "" "You input your translation in the text box at the top and then, once you " @@ -437,7 +600,7 @@ msgstr "" "\"保存并继续\"(保存翻译内容并转到下一个未翻译的字符串)、\"保存并停留\"(保存翻译内容并停留在同一页面)、\"建议\"(将您的翻译添加到建议中供其他用户查看)或" " \"跳过\"(转到下一个未翻译的字符串而不保存任何内容)。" -#: ../../source/contributor-how-to-contribute-translations.rst:54 +#: ../../source/contributor-how-to-contribute-translations.rst:51 msgid "" "In order to help with the translations, you can see on the bottom the " "``Nearby strings``, the ``Comments`` (from other contributors), the " @@ -448,14 +611,14 @@ msgstr "" "为了帮助翻译,您可以在底部看到 \"邻近字符串\"、\"评论\"(来自其他贡献者)、\"自动建议\"(来自机器翻译引擎)、\"其他语言 " "\"中的翻译以及该字符串的 \"历史翻译\"。" -#: ../../source/contributor-how-to-contribute-translations.rst:59 +#: ../../source/contributor-how-to-contribute-translations.rst:56 msgid "" "On the right, under the ``String information`` section, you can also " "click the link under ``Source string location`` in order to view the " "source of the doc file containing the string." msgstr "在右侧的 \"字符串信息 \"部分,您还可以单击 \"源字符串位置 \"下的链接,以查看包含字符串的 doc 文件的源文件。" -#: ../../source/contributor-how-to-contribute-translations.rst:63 +#: ../../source/contributor-how-to-contribute-translations.rst:60 msgid "" "For more information about translating using Weblate, you can check out " "this `in-depth guide " @@ -464,11 +627,11 @@ msgstr "" "有关使用 Weblate 进行翻译的更多信息,您可以查看本 \"深入指南 " "`_\"。" -#: ../../source/contributor-how-to-contribute-translations.rst:67 +#: ../../source/contributor-how-to-contribute-translations.rst:64 msgid "Add new languages" msgstr "添加新语言" -#: ../../source/contributor-how-to-contribute-translations.rst:69 +#: ../../source/contributor-how-to-contribute-translations.rst:66 msgid "" "If you want to add a new language, you will first have to contact us, " "either on `Slack `_, or by opening an issue" @@ -477,127 +640,6 @@ msgstr "" "如果您想添加新语言,请先联系我们,可以在 `Slack `_ 上联系,也可以在我们的 " "`GitHub repo `_ 上提交问题。" -#: ../../source/contributor-how-to-create-new-messages.rst:2 -msgid "Creating New Messages" -msgstr "创建新信息" - -#: ../../source/contributor-how-to-create-new-messages.rst:4 -msgid "" -"This is a simple guide for creating a new type of message between the " -"server and clients in Flower." -msgstr "这是一个如何用Flower在服务器和客户端之间创建新类型的信息的简要指导。" - -#: ../../source/contributor-how-to-create-new-messages.rst:6 -msgid "" -"Let's suppose we have the following example functions in " -":code:`server.py` and :code:`numpy_client.py`..." -msgstr "假设我们在脚本code:`server.py`和code:`numpy_client.py`中有以下的示例函数..." - -#: ../../source/contributor-how-to-create-new-messages.rst:8 -msgid "Server's side:" -msgstr "在服务器端:" - -#: ../../source/contributor-how-to-create-new-messages.rst:17 -msgid "Client's side:" -msgstr "在客户端:" - -#: ../../source/contributor-how-to-create-new-messages.rst:26 -msgid "" -"Let's now see what we need to implement in order to get this simple " -"function between the server and client to work!" -msgstr "现在让我们来看看,为了让服务器和客户端之间的这个简单的函数正常工作,我们需要实现哪些功能!" - -#: ../../source/contributor-how-to-create-new-messages.rst:30 -msgid "Message Types for Protocol Buffers" -msgstr "协议缓冲区的信息类型" - -#: ../../source/contributor-how-to-create-new-messages.rst:32 -#, fuzzy -msgid "" -"The first thing we need to do is to define a message type for the RPC " -"system in :code:`transport.proto`. Note that we have to do it for both " -"the request and response messages. For more details on the syntax of " -"proto3, please see the `official documentation `_." -msgstr "" -"我们需要做的第一件事是在脚本code:`transport.proto`中定义 RPC " -"系统的消息类型。请注意,我们必须对请求信息和响应信息都这样做。有关 proto3 语法的更多详情,请参阅官方文档 " -"`_。" - -#: ../../source/contributor-how-to-create-new-messages.rst:35 -msgid "Within the :code:`ServerMessage` block:" -msgstr "在 :code:`ServerMessage` 代码块中:" - -#: ../../source/contributor-how-to-create-new-messages.rst:52 -msgid "Within the ClientMessage block:" -msgstr "在 ClientMessage 代码块中:" - -#: ../../source/contributor-how-to-create-new-messages.rst:70 -msgid "" -"Make sure to also add a field of the newly created message type in " -":code:`oneof msg`." -msgstr "确保在 :code:`oneof msg` 中也添加一个新创建的消息类型字段。" - -#: ../../source/contributor-how-to-create-new-messages.rst:72 -msgid "Once that is done, we will compile the file with:" -msgstr "完成后,我们将使用:" - -#: ../../source/contributor-how-to-create-new-messages.rst:78 -msgid "If it compiles successfully, you should see the following message:" -msgstr "如果编译成功,你应该会看到以下信息:" - -#: ../../source/contributor-how-to-create-new-messages.rst:87 -msgid "Serialization and Deserialization Functions" -msgstr "序列化和反序列化函数" - -#: ../../source/contributor-how-to-create-new-messages.rst:89 -msgid "" -"Our next step is to add functions to serialize and deserialize Python " -"datatypes to or from our defined RPC message types. You should add these " -"functions in :code:`serde.py`." -msgstr "" -"下一步是添加函数,以便将 Python 数据类型序列化和反序列化为我们定义的 RPC 消息类型或从我们定义的 RPC 消息类型反序列化和反序列化 " -"Python 数据类型。您应该在 :code:`serde.py` 中添加这些函数。" - -#: ../../source/contributor-how-to-create-new-messages.rst:91 -msgid "The four functions:" -msgstr "四种函数:" - -#: ../../source/contributor-how-to-create-new-messages.rst:112 -msgid "Sending the Message from the Server" -msgstr "从服务器发送信息" - -#: ../../source/contributor-how-to-create-new-messages.rst:114 -msgid "" -"Now write the request function in your Client Proxy class (e.g., " -":code:`grpc_client_proxy.py`) using the serde functions you just created:" -msgstr "现在,在客户端代理类(例如 :code:`grpc_client_proxy.py`)中使用刚才创建的 serde 函数编写请求函数:" - -#: ../../source/contributor-how-to-create-new-messages.rst:128 -msgid "Receiving the Message by the Client" -msgstr "由客户端接收信息" - -#: ../../source/contributor-how-to-create-new-messages.rst:130 -msgid "" -"Last step! Modify the code in :code:`message_handler.py` to check the " -"field of your message and call the :code:`example_response` function. " -"Remember to use the serde functions!" -msgstr "" -"最后一步 修改 :code:`message_handler.py` 中的代码,检查信息的字段并调用 " -":code:`example_response` 函数。记住使用 serde 函数!" - -#: ../../source/contributor-how-to-create-new-messages.rst:132 -msgid "Within the handle function:" -msgstr "在句柄函数内:" - -#: ../../source/contributor-how-to-create-new-messages.rst:139 -msgid "And add a new function:" -msgstr "并增加一个新函数:" - -#: ../../source/contributor-how-to-create-new-messages.rst:149 -msgid "Hopefully, when you run your program you will get the intended result!" -msgstr "希望您在运行程序时能得到预期的结果!" - #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:2 msgid "Develop in VSCode Dev Containers" msgstr "使用 VSCode Dev Containers 进行开发" @@ -612,24 +654,24 @@ msgstr "" "在开发 Flower 框架时,我们希望确保所有贡献者使用相同的开发环境来格式化代码或运行测试。为此,我们使用了 VSCode " "远程容器扩展。这是什么?请阅读下面这段话:" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:7 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:8 +#, fuzzy msgid "" "The Visual Studio Code Remote - Containers extension lets you use a " "Docker container as a fully-featured development environment. It allows " "you to open any folder inside (or mounted into) a container and take " "advantage of Visual Studio Code's full feature set. A " -":code:`devcontainer.json` file in your project tells VS Code how to " -"access (or create) a development container with a well-defined tool and " -"runtime stack. This container can be used to run an application or to " -"separate tools, libraries, or runtimes needed for working with a " -"codebase." +"``devcontainer.json`` file in your project tells VS Code how to access " +"(or create) a development container with a well-defined tool and runtime " +"stack. This container can be used to run an application or to separate " +"tools, libraries, or runtimes needed for working with a codebase." msgstr "" "Visual Studio Code Remote - " "Containers扩展可让你将Docker容器用作功能齐全的开发环境。它允许你打开容器内(或挂载到容器内)的任何文件夹,并利用 Visual " "Studio Code 的全部功能集。项目中的 :code:`devcontainer.json` 文件会告诉 VS Code " "如何访问(或创建)一个带有定义明确的工具和运行时栈的开发容器。该容器可用于运行应用程序,也可用于分离处理代码库所需的工具、库或运行时。" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:9 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:16 msgid "" "Workspace files are mounted from the local file system or copied or " "cloned into the container. Extensions are installed and run inside the " @@ -638,33 +680,33 @@ msgid "" " environment just by connecting to a different container." msgstr "工作区文件从本地文件系统加载,或复制或克隆到容器中。扩展在容器内安装和运行,在容器内它们可以完全访问工具、平台和文件系统。这意味着,只需连接到不同的容器,就能无缝切换整个开发环境。" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:11 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:22 #, fuzzy msgid "" "Source: `Official VSCode documentation " "`_" msgstr "来源:`VSCode 官方文档 `_" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:15 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:26 msgid "Getting started" msgstr "开始" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:17 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:28 #, fuzzy msgid "" -"Configuring and setting up the :code:`Dockerfile` as well the " -"configuration for the devcontainer can be a bit more involved. The good " -"thing is you don't have to do it. Usually it should be enough to install " -"`Docker `_ on your system and " -"ensure its available on your command line. Additionally, install the " -"`VSCode Containers Extension `_ on your system and ensure its" +" available on your command line. Additionally, install the `VSCode " +"Containers Extension `_." msgstr "" "配置和设置 :code:`Dockerfile` 以及 devcontainer 的配置可能比较复杂。好在你想做就得做。通常只需在系统中安装 " "Docker 并确保其在命令行中可用即可。此外,请安装 `VSCode Containers Extension " "`_。" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:19 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:35 msgid "" "Now you should be good to go. When starting VSCode, it will ask you to " "run in the container environment and - if you confirm - automatically " @@ -676,13 +718,13 @@ msgstr "" "现在你应该可以开始了。启动 VSCode 时,它会要求你在容器环境中运行,如果你确认,它会自动构建容器并使用它。要手动指示 VSCode 使用 " "devcontainer,可以在安装扩展后,点击 VSCode 窗口左下角的绿色区域,然后选择 \"*(重新)在容器中打开文件夹*\"选项。" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:21 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:41 msgid "" "In some cases your setup might be more involved. For those cases consult " "the following sources:" msgstr "在某些情况下,您的设置可能更复杂。有关这些情况,请参考以下资料:" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:23 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:44 #, fuzzy msgid "" "`Developing inside a Container " @@ -692,7 +734,7 @@ msgstr "" "在容器内开发 `_" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:24 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:46 #, fuzzy msgid "" "`Remote development in Containers " @@ -721,13 +763,13 @@ msgstr "" "依赖关系,然后重新安装(运行 ``poetry install` 前,别忘了删除 ``poetry.lock` (``rm " "poetry.lock`))。" -#: ../../source/contributor-how-to-install-development-versions.rst:12 +#: ../../source/contributor-how-to-install-development-versions.rst:14 msgid "" "``flwr = { version = \"1.0.0a0\", allow-prereleases = true }`` (without " "extras)" msgstr "``flwr = { version = \"1.0.0a0\", allow-prereleases = true }`` (不含额外内容)" -#: ../../source/contributor-how-to-install-development-versions.rst:13 +#: ../../source/contributor-how-to-install-development-versions.rst:15 msgid "" "``flwr = { version = \"1.0.0a0\", allow-prereleases = true, extras = " "[\"simulation\"] }`` (with extras)" @@ -735,17 +777,17 @@ msgstr "" "``flwr = { version = \"1.0.0a0\", allow-prereleases = true, extras = " "[\"simulation\"] }`` (包含额外内容)" -#: ../../source/contributor-how-to-install-development-versions.rst:15 +#: ../../source/contributor-how-to-install-development-versions.rst:18 msgid "" "Install ``flwr`` from a local copy of the Flower source code via " "``pyproject.toml``:" msgstr "通过 ``pyproject.toml`` 从 Flower 源代码的本地副本安装 ``flwr``:" -#: ../../source/contributor-how-to-install-development-versions.rst:17 +#: ../../source/contributor-how-to-install-development-versions.rst:20 msgid "``flwr = { path = \"../../\", develop = true }`` (without extras)" msgstr "``flwr = { path = \"../../\", develop = true }`` (不含额外内容)" -#: ../../source/contributor-how-to-install-development-versions.rst:18 +#: ../../source/contributor-how-to-install-development-versions.rst:21 msgid "" "``flwr = { path = \"../../\", develop = true, extras = [\"simulation\"] " "}`` (with extras)" @@ -753,18 +795,18 @@ msgstr "" "``flwr = { path = \"../../\", develop = true, extras = [\"simulation\"] " "}`` (包含额外内容)" -#: ../../source/contributor-how-to-install-development-versions.rst:20 +#: ../../source/contributor-how-to-install-development-versions.rst:23 msgid "Install ``flwr`` from a local wheel file via ``pyproject.toml``:" msgstr "通过 ``pyproject.toml`` 从本地轮子文件安装 ``flwr``:" -#: ../../source/contributor-how-to-install-development-versions.rst:22 +#: ../../source/contributor-how-to-install-development-versions.rst:25 #, fuzzy msgid "" "``flwr = { path = \"../../dist/flwr-1.8.0-py3-none-any.whl\" }`` (without" " extras)" msgstr "``flwr = { path = \"../../dist/flwr-1.0.0-py3-none-any.whl\" }``(无额外内容)" -#: ../../source/contributor-how-to-install-development-versions.rst:23 +#: ../../source/contributor-how-to-install-development-versions.rst:26 #, fuzzy msgid "" "``flwr = { path = \"../../dist/flwr-1.8.0-py3-none-any.whl\", extras = " @@ -773,7 +815,7 @@ msgstr "" "``flwr = { path = \"../../dist/flwr-1.0.0-py3-none-any.whl\", extras = " "[\"simulation\"] }`` (包含额外内容)" -#: ../../source/contributor-how-to-install-development-versions.rst:25 +#: ../../source/contributor-how-to-install-development-versions.rst:29 msgid "" "Please refer to the Poetry documentation for further details: `Poetry " "Dependency Specification `_" -#: ../../source/contributor-how-to-install-development-versions.rst:28 +#: ../../source/contributor-how-to-install-development-versions.rst:33 msgid "Using pip (recommended on Colab)" msgstr "使用 pip(建议在 Colab 上使用)" -#: ../../source/contributor-how-to-install-development-versions.rst:30 +#: ../../source/contributor-how-to-install-development-versions.rst:35 msgid "Install a ``flwr`` pre-release from PyPI:" msgstr "从 PyPI 安装 ``flwr`` 预发行版:" -#: ../../source/contributor-how-to-install-development-versions.rst:32 +#: ../../source/contributor-how-to-install-development-versions.rst:37 msgid "``pip install -U --pre flwr`` (without extras)" -msgstr "`pip install -U -pre flwr``(不含额外功能)" +msgstr "``pip install -U -pre flwr``(不含额外功能)" -#: ../../source/contributor-how-to-install-development-versions.rst:33 -msgid "``pip install -U --pre flwr[simulation]`` (with extras)" -msgstr "`pip install -U -pre flwr[simulation]``(包含额外功能)" +#: ../../source/contributor-how-to-install-development-versions.rst:38 +msgid "``pip install -U --pre 'flwr[simulation]'`` (with extras)" +msgstr "``pip install -U -pre 'flwr[simulation]'``(包含额外功能)" -#: ../../source/contributor-how-to-install-development-versions.rst:35 +#: ../../source/contributor-how-to-install-development-versions.rst:40 msgid "" "Python packages can be installed from git repositories. Use one of the " "following commands to install the Flower directly from GitHub." msgstr "Python 软件包可以从 git 仓库安装。使用以下命令之一直接从 GitHub 安装 Flower。" -#: ../../source/contributor-how-to-install-development-versions.rst:37 +#: ../../source/contributor-how-to-install-development-versions.rst:43 msgid "Install ``flwr`` from the default GitHub branch (``main``):" msgstr "从 GitHub 的默认分支 (``main`) 安装 ``flwr``:" -#: ../../source/contributor-how-to-install-development-versions.rst:39 +#: ../../source/contributor-how-to-install-development-versions.rst:45 msgid "" "``pip install flwr@git+https://github.com/adap/flower.git`` (without " "extras)" -msgstr "`pip install flwr@git+https://github.com/adap/flower.git`` (不含额外功能)" +msgstr "``pip install flwr@git+https://github.com/adap/flower.git`` (不含额外功能)" -#: ../../source/contributor-how-to-install-development-versions.rst:40 +#: ../../source/contributor-how-to-install-development-versions.rst:46 msgid "" -"``pip install flwr[simulation]@git+https://github.com/adap/flower.git`` " -"(with extras)" +"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git'``" +" (with extras)" msgstr "" -"`pip install " -"flwr[simulation]@git+https://github.com/adap/flower.git``(带附加功能)" +"``pip install " +"'flwr[simulation]@git+https://github.com/adap/flower.git'``(带附加功能)" -#: ../../source/contributor-how-to-install-development-versions.rst:42 +#: ../../source/contributor-how-to-install-development-versions.rst:49 msgid "Install ``flwr`` from a specific GitHub branch (``branch-name``):" msgstr "从特定的 GitHub 分支 (`分支名`) 安装 ``flwr``:" -#: ../../source/contributor-how-to-install-development-versions.rst:44 +#: ../../source/contributor-how-to-install-development-versions.rst:51 msgid "" "``pip install flwr@git+https://github.com/adap/flower.git@branch-name`` " "(without extras)" msgstr "" -"`pip install flwr@git+https://github.com/adap/flower.git@branch-name`` " +"``pip install flwr@git+https://github.com/adap/flower.git@branch-name`` " "(不含附加功能)" -#: ../../source/contributor-how-to-install-development-versions.rst:45 +#: ../../source/contributor-how-to-install-development-versions.rst:53 msgid "" -"``pip install flwr[simulation]@git+https://github.com/adap/flower.git" -"@branch-name`` (with extras)" -msgstr "`pip安装flwr[模拟]@git+https://github.com/adap/flower.git@分支名``(带附加功能)" +"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git" +"@branch-name'`` (with extras)" +msgstr "" +"``pip install " +"'flwr[simulation]@git+https://github.com/adap/flower.git@分支名'``(带附加功能)" -#: ../../source/contributor-how-to-install-development-versions.rst:49 +#: ../../source/contributor-how-to-install-development-versions.rst:57 msgid "Open Jupyter Notebooks on Google Colab" msgstr "在谷歌 Colab 上打开 Jupyter 笔记本" -#: ../../source/contributor-how-to-install-development-versions.rst:51 +#: ../../source/contributor-how-to-install-development-versions.rst:59 #, fuzzy msgid "" "Open the notebook ``doc/source/tutorial-series-get-started-with-flower-" "pytorch.ipynb``:" msgstr "打开笔记本 ``doc/source/tutorial-get-started-with-flower-pytorch.ipynb``:" -#: ../../source/contributor-how-to-install-development-versions.rst:53 +#: ../../source/contributor-how-to-install-development-versions.rst:61 #, fuzzy msgid "" "https://colab.research.google.com/github/adap/flower/blob/main/doc/source" @@ -860,7 +904,7 @@ msgstr "" "https://colab.research.google.com/github/adap/flower/blob/main/doc/source" "/tutorial-get-started-with-flower-pytorch.ipynb" -#: ../../source/contributor-how-to-install-development-versions.rst:55 +#: ../../source/contributor-how-to-install-development-versions.rst:63 msgid "" "Open a development version of the same notebook from branch `branch-name`" " by changing ``main`` to ``branch-name`` (right after ``blob``):" @@ -868,7 +912,7 @@ msgstr "" "将 ``main`` 改为 ``branch-name``(紧跟在 ``blob``之后),从分支 `branch-name` " "打开同一笔记本的开发版本:" -#: ../../source/contributor-how-to-install-development-versions.rst:57 +#: ../../source/contributor-how-to-install-development-versions.rst:66 #, fuzzy msgid "" "https://colab.research.google.com/github/adap/flower/blob/branch-" @@ -877,22 +921,22 @@ msgstr "" "https://colab.research.google.com/github/adap/flower/blob/branch-" "name/doc/source/tutorial-get-started-with-flower-pytorch.ipynb" -#: ../../source/contributor-how-to-install-development-versions.rst:59 +#: ../../source/contributor-how-to-install-development-versions.rst:68 msgid "Install a `whl` on Google Colab:" msgstr "在 Google Colab 上安装 `whl`:" -#: ../../source/contributor-how-to-install-development-versions.rst:61 +#: ../../source/contributor-how-to-install-development-versions.rst:70 msgid "" "In the vertical icon grid on the left hand side, select ``Files`` > " "``Upload to session storage``" msgstr "在左侧的垂直图标网格中,选择 \"文件\">\"上传到会话存储\"" -#: ../../source/contributor-how-to-install-development-versions.rst:62 +#: ../../source/contributor-how-to-install-development-versions.rst:72 #, fuzzy msgid "Upload the whl (e.g., ``flwr-1.8.0-py3-none-any.whl``)" msgstr "更新 whl (e.g., ``flwr-1.7.0-py3-none-any.whl``)" -#: ../../source/contributor-how-to-install-development-versions.rst:63 +#: ../../source/contributor-how-to-install-development-versions.rst:73 #, fuzzy msgid "" "Change ``!pip install -q 'flwr[simulation]' torch torchvision " @@ -913,18 +957,18 @@ msgid "" "change in the future." msgstr "本文件描述了当前的发布流程。今后可能会有变化,也可能不会有变化。" -#: ../../source/contributor-how-to-release-flower.rst:7 +#: ../../source/contributor-how-to-release-flower.rst:8 msgid "During the release" msgstr "在发布期间" -#: ../../source/contributor-how-to-release-flower.rst:9 +#: ../../source/contributor-how-to-release-flower.rst:10 msgid "" "The version number of a release is stated in ``pyproject.toml``. To " "release a new version of Flower, the following things need to happen (in " "that order):" msgstr "版本号在 ``pyproject.toml`` 中说明。要发布 Flower 的新版本,需要完成以下工作(按顺序排列):" -#: ../../source/contributor-how-to-release-flower.rst:11 +#: ../../source/contributor-how-to-release-flower.rst:13 #, fuzzy msgid "" "Run ``python3 src/py/flwr_tool/update_changelog.py `` in " @@ -934,7 +978,7 @@ msgstr "" "运行 ``python3 src/py/flwr_tool/update_changelog.py `` " "以将每项新更改添加到更新日志中(之后可对更新日志进行手动更改,直到看起来不错为止)。" -#: ../../source/contributor-how-to-release-flower.rst:12 +#: ../../source/contributor-how-to-release-flower.rst:16 #, fuzzy msgid "" "Once the changelog has been updated with all the changes, run ``./dev" @@ -948,7 +992,7 @@ msgstr "" "v``,其中````是``pyproject.toml``中的版本(注意前面的``v``)。这将用版本和当前日期替换更新日志中的" " ``Unreleased`` 标头,并为贡献者添加一条感谢信息。打开一个包含这些更改的拉取请求。" -#: ../../source/contributor-how-to-release-flower.rst:13 +#: ../../source/contributor-how-to-release-flower.rst:22 #, fuzzy msgid "" "Once the pull request is merged, tag the release commit with the version " @@ -960,93 +1004,93 @@ msgstr "" "在 PR 合并后立即用版本号标记发布提交:``git tag v0.12.3``,然后``git push --tags``。这将在 GitHub" " 上创建一个包含正确工件和更新日志相关部分的发布草案。" -#: ../../source/contributor-how-to-release-flower.rst:14 +#: ../../source/contributor-how-to-release-flower.rst:26 msgid "Check the draft release on GitHub, and if everything is good, publish it." msgstr "检查 GitHub 上的发布稿,如果一切正常,就发布它。" -#: ../../source/contributor-how-to-release-flower.rst:17 +#: ../../source/contributor-how-to-release-flower.rst:29 msgid "After the release" msgstr "发布后" -#: ../../source/contributor-how-to-release-flower.rst:19 +#: ../../source/contributor-how-to-release-flower.rst:31 msgid "Create a pull request which contains the following changes:" msgstr "创建包含以下更改的拉取请求:" -#: ../../source/contributor-how-to-release-flower.rst:21 +#: ../../source/contributor-how-to-release-flower.rst:33 msgid "Increase the minor version in ``pyproject.toml`` by one." msgstr "将 ``pyproject.toml`` 中的次要版本增加一个。" -#: ../../source/contributor-how-to-release-flower.rst:22 +#: ../../source/contributor-how-to-release-flower.rst:34 msgid "Update all files which contain the current version number if necessary." msgstr "如有必要,更新包含当前版本号的所有文件。" -#: ../../source/contributor-how-to-release-flower.rst:23 +#: ../../source/contributor-how-to-release-flower.rst:35 msgid "Add a new ``Unreleased`` section in ``changelog.md``." msgstr "在 ``changelog.md`` 中添加新的 ``Unreleased`` 部分。" -#: ../../source/contributor-how-to-release-flower.rst:25 +#: ../../source/contributor-how-to-release-flower.rst:37 msgid "" "Merge the pull request on the same day (i.e., before a new nightly " "release gets published to PyPI)." msgstr "在同一天合并拉取请求(即在新版本发布到 PyPI 之前)。" -#: ../../source/contributor-how-to-release-flower.rst:28 +#: ../../source/contributor-how-to-release-flower.rst:41 msgid "Publishing a pre-release" msgstr "发布预发布版本" -#: ../../source/contributor-how-to-release-flower.rst:31 +#: ../../source/contributor-how-to-release-flower.rst:44 msgid "Pre-release naming" msgstr "释放前命名" -#: ../../source/contributor-how-to-release-flower.rst:33 +#: ../../source/contributor-how-to-release-flower.rst:46 msgid "" "PyPI supports pre-releases (alpha, beta, release candidate). Pre-releases" " MUST use one of the following naming patterns:" msgstr "PyPI 支持预发布版本(alpha、beta、release candidate)。预发布版本必须使用以下命名模式之一:" -#: ../../source/contributor-how-to-release-flower.rst:35 +#: ../../source/contributor-how-to-release-flower.rst:49 msgid "Alpha: ``MAJOR.MINOR.PATCHaN``" msgstr "阿尔法 ``MAJOR.MINOR.PATCHaN``" -#: ../../source/contributor-how-to-release-flower.rst:36 +#: ../../source/contributor-how-to-release-flower.rst:50 msgid "Beta: ``MAJOR.MINOR.PATCHbN``" msgstr "贝塔: ``MAJOR.MINOR.PATCHbN``" -#: ../../source/contributor-how-to-release-flower.rst:37 +#: ../../source/contributor-how-to-release-flower.rst:51 msgid "Release candidate (RC): ``MAJOR.MINOR.PATCHrcN``" msgstr "版本代号 (RC): ``MAJOR.MINOR.PATCHrcN``" -#: ../../source/contributor-how-to-release-flower.rst:39 +#: ../../source/contributor-how-to-release-flower.rst:53 msgid "Examples include:" msgstr "例子包括:" -#: ../../source/contributor-how-to-release-flower.rst:41 +#: ../../source/contributor-how-to-release-flower.rst:55 msgid "``1.0.0a0``" msgstr "``1.0.0a0``" -#: ../../source/contributor-how-to-release-flower.rst:42 +#: ../../source/contributor-how-to-release-flower.rst:56 msgid "``1.0.0b0``" msgstr "``1.0.0b0``" -#: ../../source/contributor-how-to-release-flower.rst:43 +#: ../../source/contributor-how-to-release-flower.rst:57 msgid "``1.0.0rc0``" msgstr "``1.0.0rc0``" -#: ../../source/contributor-how-to-release-flower.rst:44 +#: ../../source/contributor-how-to-release-flower.rst:58 msgid "``1.0.0rc1``" msgstr "``1.0.0rc1``" -#: ../../source/contributor-how-to-release-flower.rst:46 +#: ../../source/contributor-how-to-release-flower.rst:60 msgid "" "This is in line with PEP-440 and the recommendations from the Python " "Packaging Authority (PyPA):" msgstr "这符合 PEP-440 和 Python 包装管理局 (PyPA) 的建议:" -#: ../../source/contributor-how-to-release-flower.rst:49 +#: ../../source/contributor-how-to-release-flower.rst:63 msgid "`PEP-440 `_" msgstr "`PEP-440 `_" -#: ../../source/contributor-how-to-release-flower.rst:50 +#: ../../source/contributor-how-to-release-flower.rst:64 msgid "" "`PyPA Choosing a versioning scheme " "`_" -#: ../../source/contributor-how-to-release-flower.rst:52 +#: ../../source/contributor-how-to-release-flower.rst:67 msgid "" "Note that the approach defined by PyPA is not compatible with SemVer " "2.0.0 spec, for details consult the `Semantic Versioning Specification " @@ -1066,26 +1110,26 @@ msgstr "" "规范不兼容,详情请查阅《语义版本规范》`_(特别是关于优先级的第 11 项)。" -#: ../../source/contributor-how-to-release-flower.rst:55 +#: ../../source/contributor-how-to-release-flower.rst:73 msgid "Pre-release classification" msgstr "发布前分类" -#: ../../source/contributor-how-to-release-flower.rst:57 +#: ../../source/contributor-how-to-release-flower.rst:75 msgid "Should the next pre-release be called alpha, beta, or release candidate?" msgstr "下一个预发布版应该叫阿尔法版、贝塔版还是候选发布版?" -#: ../../source/contributor-how-to-release-flower.rst:59 +#: ../../source/contributor-how-to-release-flower.rst:77 msgid "" "RC: feature complete, no known issues (apart from issues that are " "classified as \"won't fix\" for the next stable release) - if no issues " "surface this will become the next stable release" msgstr "RC:功能完整,无已知问题(除了下一个稳定版中被列为 \"不会修复 \"的问题)--如果没有问题出现,这将成为下一个稳定版" -#: ../../source/contributor-how-to-release-flower.rst:60 +#: ../../source/contributor-how-to-release-flower.rst:80 msgid "Beta: feature complete, allowed to have known issues" msgstr "贝塔版:功能完整,允许存在已知问题" -#: ../../source/contributor-how-to-release-flower.rst:61 +#: ../../source/contributor-how-to-release-flower.rst:81 msgid "Alpha: not feature complete, allowed to have known issues" msgstr "阿尔法版:功能不完整,允许存在已知问题" @@ -1103,34 +1147,37 @@ msgstr "" "建议在虚拟环境中运行 Python 设置。本指南展示了如何使用 pyenv virtualenv、poes 或 Anaconda " "创建虚拟环境的三个不同示例。您可以按照说明或选择您喜欢的设置。" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:9 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:10 msgid "Python Version" msgstr "Python 版本" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:11 -#: ../../source/how-to-install-flower.rst:8 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:12 +#: ../../source/how-to-install-flower.rst:7 msgid "" -"Flower requires at least `Python 3.8 `_, " +"Flower requires at least `Python 3.9 `_, " "but `Python 3.10 `_ or above is " "recommended." msgstr "" -"Flower 至少需要 `Python 3.8 `_,但建议使用 `Python " +"Flower 至少需要 `Python 3.9 `_,但建议使用 `Python " "3.10 `_或更高版本。" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:14 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:17 +#, fuzzy msgid "" "Due to a known incompatibility with `ray " "`_, we currently recommend utilizing at " "most `Python 3.11 `_ for running Flower " "simulations." msgstr "" +"由于已知与 `ray `_ 不兼容,我们目前建议最多使用 `Python 3.11" +" `_ 运行 Flower 仿真。" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:19 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:22 #, fuzzy msgid "Virtualenv with Pyenv/Virtualenv" msgstr "Virutualenv 和 Pyenv/Virtualenv" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:21 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:24 msgid "" "One of the recommended virtual environment is `pyenv " "`_/`virtualenv `_。详情请参见 `Flower 示例 " "`_。" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:23 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:29 msgid "" "Once Pyenv is set up, you can use it to install `Python Version 3.10 " "`_ or above:" msgstr "一旦设置好 Pyenv,就可以用它来安装 `Python 3.10 `_ 或更高版本:" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:29 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:36 msgid "Create the virtualenv with:" msgstr "创建虚拟环境:" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:36 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:42 msgid "Activate the virtualenv by running the following command:" msgstr "运行以下命令激活 virtualenv:" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:44 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:49 msgid "Virtualenv with Poetry" msgstr "有诗意的 Virtualenv" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:46 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:51 msgid "" "The Flower examples are based on `Poetry `_ to manage dependencies. After installing Poetry you " @@ -1168,36 +1215,37 @@ msgstr "" "Flower 示例基于 `Poetry `_ 来管理依赖关系。安装 Poetry" " 后,只需创建一个虚拟环境即可:" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:52 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:58 msgid "" "If you open a new terminal you can activate the previously created " "virtual environment with the following command:" msgstr "如果打开一个新终端,可以使用以下命令激活之前创建的虚拟环境:" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:60 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:66 msgid "Virtualenv with Anaconda" msgstr "使用 Anaconda 的 Virtualenv" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:62 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:68 +#, fuzzy msgid "" "If you prefer to use Anaconda for your virtual environment then install " "and setup the `conda `_ package. After setting it up you can " +"/user-guide/install/index.html>`_ package. After setting it up you can " "create a virtual environment with:" msgstr "" "如果你更喜欢在虚拟环境中使用 Anaconda,那么请安装并设置 `conda " "`_ 软件包。设置完成后,您就可以使用以下工具创建虚拟环境:" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:68 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:76 msgid "and activate the virtual environment with:" msgstr "并激活虚拟环境:" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:76 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:83 msgid "And then?" msgstr "然后呢?" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:78 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:85 msgid "" "As soon as you created your virtual environment you clone one of the " "`Flower examples `_." @@ -1209,11 +1257,11 @@ msgstr "" msgid "Write documentation" msgstr "编写文件" -#: ../../source/contributor-how-to-write-documentation.rst:6 +#: ../../source/contributor-how-to-write-documentation.rst:5 msgid "Project layout" msgstr "项目布局" -#: ../../source/contributor-how-to-write-documentation.rst:8 +#: ../../source/contributor-how-to-write-documentation.rst:7 msgid "" "The Flower documentation lives in the ``doc`` directory. The Sphinx-based" " documentation system supports both reStructuredText (``.rst`` files) and" @@ -1223,7 +1271,7 @@ msgstr "" "Markdown(``.md`` 文件)。" #: ../../source/contributor-how-to-write-documentation.rst:10 -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:169 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:196 #, fuzzy msgid "" "Note that, in order to build the documentation locally (with ``poetry run" @@ -1234,20 +1282,20 @@ msgstr "" "请注意,要在本地构建文档(使用 ``poetry run make html``,如下所述),系统上必须安装 ``Pandoc " "_`。" -#: ../../source/contributor-how-to-write-documentation.rst:14 +#: ../../source/contributor-how-to-write-documentation.rst:15 msgid "Edit an existing page" msgstr "编辑现有页面" -#: ../../source/contributor-how-to-write-documentation.rst:16 +#: ../../source/contributor-how-to-write-documentation.rst:17 msgid "Edit an existing ``.rst`` (or ``.md``) file under ``doc/source/``" msgstr "编辑 ``doc/source/`` 下现有的 ``.rst`` (或 ``.md``) 文件" -#: ../../source/contributor-how-to-write-documentation.rst:17 +#: ../../source/contributor-how-to-write-documentation.rst:18 #: ../../source/contributor-how-to-write-documentation.rst:27 msgid "Compile the docs: ``cd doc``, then ``poetry run make html``" msgstr "编译文档: cd doc``,然后 ``poetry run make html``" -#: ../../source/contributor-how-to-write-documentation.rst:18 +#: ../../source/contributor-how-to-write-documentation.rst:19 #: ../../source/contributor-how-to-write-documentation.rst:28 msgid "Open ``doc/build/html/index.html`` in the browser to check the result" msgstr "在浏览器中打开 ``doc/build/html/index.html`` 查看结果" @@ -1282,34 +1330,34 @@ msgstr "" "我们欢迎为Flower做出代码贡献!然而,要知道从哪里开始并非易事。因此,我们提出了一些建议,告诉您从哪里开始,以增加您的 PR 被 Flower" " 代码库接受的机会。" -#: ../../source/contributor-ref-good-first-contributions.rst:11 +#: ../../source/contributor-ref-good-first-contributions.rst:9 msgid "Where to start" msgstr "从哪里开始" -#: ../../source/contributor-ref-good-first-contributions.rst:13 +#: ../../source/contributor-ref-good-first-contributions.rst:11 msgid "" "Until the Flower core library matures it will be easier to get PR's " "accepted if they only touch non-core areas of the codebase. Good " "candidates to get started are:" msgstr "在 Flower 核心库成熟之前,如果 PR 只涉及代码库中的非核心区域,则会更容易被接受。可以从以下方面入手:" -#: ../../source/contributor-ref-good-first-contributions.rst:17 +#: ../../source/contributor-ref-good-first-contributions.rst:14 msgid "Documentation: What's missing? What could be expressed more clearly?" msgstr "文档: 缺少什么?哪些内容可以表达得更清楚?" -#: ../../source/contributor-ref-good-first-contributions.rst:18 +#: ../../source/contributor-ref-good-first-contributions.rst:15 msgid "Baselines: See below." msgstr "Baselines: 见下文。" -#: ../../source/contributor-ref-good-first-contributions.rst:19 +#: ../../source/contributor-ref-good-first-contributions.rst:16 msgid "Examples: See below." msgstr "示例: 见下文。" -#: ../../source/contributor-ref-good-first-contributions.rst:23 +#: ../../source/contributor-ref-good-first-contributions.rst:19 msgid "Request for Flower Baselines" msgstr "Flower Baselines的申请" -#: ../../source/contributor-ref-good-first-contributions.rst:25 +#: ../../source/contributor-ref-good-first-contributions.rst:21 #, fuzzy msgid "" "If you are not familiar with Flower Baselines, you should probably check-" @@ -1319,7 +1367,7 @@ msgstr "" "如果您对 Flower Baselines 还不熟悉,也许可以看看我们的 `Baselines贡献指南 " "`_。" -#: ../../source/contributor-ref-good-first-contributions.rst:27 +#: ../../source/contributor-ref-good-first-contributions.rst:25 #, fuzzy msgid "" "You should then check out the open `issues " @@ -1332,7 +1380,7 @@ msgstr "" "`_" " baseline请求。如果您发现了自己想做的baseline,而它还没有被分配,请随时把它分配给自己,然后开始工作!" -#: ../../source/contributor-ref-good-first-contributions.rst:31 +#: ../../source/contributor-ref-good-first-contributions.rst:30 msgid "" "Otherwise, if you don't find a baseline you'd like to work on, be sure to" " open a new issue with the baseline request template!" @@ -1375,12 +1423,13 @@ msgstr "" "包括 SecAgg、SecAgg+ 和 LightSecAgg 协议。LightSecAgg " "协议尚未实施,因此其图表和抽象在实践中可能并不准确。SecAgg 协议可视为 SecAgg+ 协议的特例。" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:8 -msgid "The :code:`SecAgg+` abstraction" +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:9 +#, fuzzy +msgid "The ``SecAgg+`` abstraction" msgstr "代码:`SecAgg+` 抽象" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:10 -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:161 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:11 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:163 msgid "" "In this implementation, each client will be assigned with a unique index " "(int) for secure aggregation, and thus many python dictionaries used have" @@ -1389,18 +1438,19 @@ msgstr "" "在此实现中,将为每个客户端分配一个唯一索引(int),以确保聚合的安全性,因此使用的许多 python 字典的键都是 int 类型,而不是 " "ClientProxy 类型。" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:65 -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:198 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:67 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:204 msgid "" "The Flower server will execute and process received results in the " "following order:" msgstr "Flower 服务器将按以下顺序执行和处理收到的结果:" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:159 -msgid "The :code:`LightSecAgg` abstraction" +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:161 +#, fuzzy +msgid "The ``LightSecAgg`` abstraction" msgstr "代码:`LightSecAgg` 抽象" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:271 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:277 msgid "Types" msgstr "类型" @@ -1414,7 +1464,7 @@ msgid "" "are not used to contributing to GitHub projects." msgstr "本指南适用于想参与 Flower,但不习惯为 GitHub 项目贡献的人。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:6 +#: ../../source/contributor-tutorial-contribute-on-github.rst:7 #, fuzzy msgid "" "If you're familiar with how contributing on GitHub works, you can " @@ -1425,15 +1475,15 @@ msgstr "" "/getting-started-for-contributors.html>`_ 和 \"优秀的首次贡献示例\" " "`_。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:10 +#: ../../source/contributor-tutorial-contribute-on-github.rst:12 msgid "Setting up the repository" msgstr "建立资源库" -#: ../../source/contributor-tutorial-contribute-on-github.rst:21 +#: ../../source/contributor-tutorial-contribute-on-github.rst:29 msgid "**Create a GitHub account and setup Git**" msgstr "**创建 GitHub 账户并设置 Git**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:13 +#: ../../source/contributor-tutorial-contribute-on-github.rst:15 #, fuzzy msgid "" "Git is a distributed version control tool. This allows for an entire " @@ -1445,20 +1495,20 @@ msgstr "" "Git 是一种分布式版本控制工具。它可以将整个代码库的历史记录保存在每个开发人员的机器上。您需要在本地计算机上安装该软件,可以按照本指南 " "`_ 进行设置。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:16 +#: ../../source/contributor-tutorial-contribute-on-github.rst:21 msgid "" "GitHub, itself, is a code hosting platform for version control and " "collaboration. It allows for everyone to collaborate and work from " "anywhere on remote repositories." msgstr "GitHub 本身是一个用于版本控制和协作的代码托管平台。它允许每个人在任何地方对远程仓库进行协作和工作。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:18 +#: ../../source/contributor-tutorial-contribute-on-github.rst:25 msgid "" "If you haven't already, you will need to create an account on `GitHub " "`_." msgstr "如果还没有,您需要在 `GitHub `_ 上创建一个账户。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:20 +#: ../../source/contributor-tutorial-contribute-on-github.rst:28 msgid "" "The idea behind the generic Git and GitHub workflow boils down to this: " "you download code from a remote repository on GitHub, make changes " @@ -1468,22 +1518,22 @@ msgstr "" "通用的 Git 和 GitHub 工作流程背后的理念可以归结为:从 GitHub 上的远程仓库下载代码,在本地进行修改并使用 Git " "进行跟踪,然后将新的历史记录上传回 GitHub。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:32 +#: ../../source/contributor-tutorial-contribute-on-github.rst:42 msgid "**Forking the Flower repository**" msgstr "**叉花仓库**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:24 +#: ../../source/contributor-tutorial-contribute-on-github.rst:32 #, fuzzy msgid "" "A fork is a personal copy of a GitHub repository. To create one for " -"Flower, you must navigate to ``_ (while " +"Flower, you must navigate to https://github.com/adap/flower (while " "connected to your GitHub account) and click the ``Fork`` button situated " "on the top right of the page." msgstr "" "fork 是 GitHub 仓库的个人副本。要为 Flower 创建一个 fork,您必须导航到 " "https://github.com/adap/flower(同时连接到您的 GitHub 账户),然后点击页面右上方的 ``Fork`` 按钮。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:29 +#: ../../source/contributor-tutorial-contribute-on-github.rst:38 msgid "" "You can change the name if you want, but this is not necessary as this " "version of Flower will be yours and will sit inside your own account " @@ -1493,11 +1543,11 @@ msgstr "" "您可以更改名称,但没有必要,因为这个版本的 Flower " "将是您自己的,并位于您自己的账户中(即,在您自己的版本库列表中)。创建完成后,您会在左上角看到自己的 Flower 版本。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:47 +#: ../../source/contributor-tutorial-contribute-on-github.rst:59 msgid "**Cloning your forked repository**" msgstr "**克隆你的分叉仓库**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:35 +#: ../../source/contributor-tutorial-contribute-on-github.rst:45 msgid "" "The next step is to download the forked repository on your machine to be " "able to make changes to it. On your forked repository page, you should " @@ -1507,28 +1557,28 @@ msgstr "" "下一步是在你的机器上下载分叉版本库,以便对其进行修改。在分叉版本库页面上,首先点击右侧的 \"代码 \"按钮,这样就能复制版本库的 HTTPS " "链接。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:41 +#: ../../source/contributor-tutorial-contribute-on-github.rst:52 msgid "" "Once you copied the \\, you can open a terminal on your machine, " "navigate to the place you want to download the repository to and type:" msgstr "一旦复制了 (),你就可以在你的机器上打开一个终端,导航到你想下载软件源的地方,然后键入:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:47 +#: ../../source/contributor-tutorial-contribute-on-github.rst:59 #, fuzzy msgid "" "This will create a ``flower/`` (or the name of your fork if you renamed " "it) folder in the current working directory." msgstr "这将在当前工作目录下创建一个 `flower/`(如果重命名了,则使用 fork 的名称)文件夹。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:66 +#: ../../source/contributor-tutorial-contribute-on-github.rst:78 msgid "**Add origin**" msgstr "**添加原产地**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:50 +#: ../../source/contributor-tutorial-contribute-on-github.rst:62 msgid "You can then go into the repository folder:" msgstr "然后,您就可以进入存储库文件夹:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:56 +#: ../../source/contributor-tutorial-contribute-on-github.rst:68 msgid "" "And here we will need to add an origin to our repository. The origin is " "the \\ of the remote fork repository. To obtain it, we can do as " @@ -1538,28 +1588,28 @@ msgstr "" "在这里,我们需要为我们的版本库添加一个 origin。origin 是远程 fork 仓库的 " "\\。要获得它,我们可以像前面提到的那样,访问 GitHub 账户上的分叉仓库并复制链接。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:61 +#: ../../source/contributor-tutorial-contribute-on-github.rst:75 msgid "" "Once the \\ is copied, we can type the following command in our " "terminal:" msgstr "一旦复制了 \\ ,我们就可以在终端中键入以下命令:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:90 +#: ../../source/contributor-tutorial-contribute-on-github.rst:102 msgid "**Add upstream**" msgstr "**增加上游**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:69 +#: ../../source/contributor-tutorial-contribute-on-github.rst:81 #, fuzzy msgid "" "Now we will add an upstream address to our repository. Still in the same " "directory, we must run the following command:" msgstr "现在,我们要为版本库添加一个上游地址。还是在同一目录下,我们必须运行以下命令:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:76 +#: ../../source/contributor-tutorial-contribute-on-github.rst:88 msgid "The following diagram visually explains what we did in the previous steps:" msgstr "下图直观地解释了我们在前面步骤中的操作:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:80 +#: ../../source/contributor-tutorial-contribute-on-github.rst:92 msgid "" "The upstream is the GitHub remote address of the parent repository (in " "this case Flower), i.e. the one we eventually want to contribute to and " @@ -1570,17 +1620,17 @@ msgstr "" "上游是父版本库(这里是 Flower)的 GitHub 远程地址,即我们最终要贡献的版本库,因此需要最新的历史记录。origin " "只是我们创建的分叉仓库的 GitHub 远程地址,即我们自己账户中的副本(分叉)。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:84 +#: ../../source/contributor-tutorial-contribute-on-github.rst:97 msgid "" "To make sure our local version of the fork is up-to-date with the latest " "changes from the Flower repository, we can execute the following command:" msgstr "为了确保本地版本的分叉程序与 Flower 代码库的最新更改保持一致,我们可以执行以下命令:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:93 +#: ../../source/contributor-tutorial-contribute-on-github.rst:105 msgid "Setting up the coding environment" msgstr "设置编码环境" -#: ../../source/contributor-tutorial-contribute-on-github.rst:95 +#: ../../source/contributor-tutorial-contribute-on-github.rst:107 #, fuzzy msgid "" "This can be achieved by following this :doc:`getting started guide for " @@ -1589,162 +1639,168 @@ msgid "" "code and test it, you can finally start making changes!" msgstr "您可以按照这份 \"贡献者入门指南\"__(注意,您不需要克隆版本库)来实现这一点。一旦您能够编写代码并进行测试,您就可以开始修改了!" -#: ../../source/contributor-tutorial-contribute-on-github.rst:100 +#: ../../source/contributor-tutorial-contribute-on-github.rst:113 msgid "Making changes" msgstr "做出改变" -#: ../../source/contributor-tutorial-contribute-on-github.rst:102 +#: ../../source/contributor-tutorial-contribute-on-github.rst:115 msgid "" "Before making any changes make sure you are up-to-date with your " "repository:" msgstr "在进行任何更改之前,请确保您的版本库是最新的:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:108 +#: ../../source/contributor-tutorial-contribute-on-github.rst:121 msgid "And with Flower's repository:" msgstr "还有Flower的存储库:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:122 +#: ../../source/contributor-tutorial-contribute-on-github.rst:134 msgid "**Create a new branch**" msgstr "**创建一个新分支**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:115 +#: ../../source/contributor-tutorial-contribute-on-github.rst:128 msgid "" "To make the history cleaner and easier to work with, it is good practice " "to create a new branch for each feature/project that needs to be " "implemented." msgstr "为了使历史记录更简洁、更易于操作,为每个需要实现的功能/项目创建一个新分支是个不错的做法。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:118 +#: ../../source/contributor-tutorial-contribute-on-github.rst:131 msgid "" "To do so, just run the following command inside the repository's " "directory:" msgstr "为此,只需在版本库目录下运行以下命令即可:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:125 +#: ../../source/contributor-tutorial-contribute-on-github.rst:136 msgid "**Make changes**" msgstr "**进行修改**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:125 +#: ../../source/contributor-tutorial-contribute-on-github.rst:137 msgid "Write great code and create wonderful changes using your favorite editor!" msgstr "使用您最喜欢的编辑器编写优秀的代码并创建精彩的更改!" -#: ../../source/contributor-tutorial-contribute-on-github.rst:138 +#: ../../source/contributor-tutorial-contribute-on-github.rst:149 msgid "**Test and format your code**" msgstr "**测试并格式化您的代码**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:128 +#: ../../source/contributor-tutorial-contribute-on-github.rst:139 msgid "" "Don't forget to test and format your code! Otherwise your code won't be " "able to be merged into the Flower repository. This is done so the " "codebase stays consistent and easy to understand." msgstr "不要忘记测试和格式化您的代码!否则您的代码将无法并入 Flower 代码库。这样做是为了使代码库保持一致并易于理解。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:131 +#: ../../source/contributor-tutorial-contribute-on-github.rst:143 msgid "To do so, we have written a few scripts that you can execute:" msgstr "为此,我们编写了一些脚本供您执行:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:150 +#: ../../source/contributor-tutorial-contribute-on-github.rst:162 msgid "**Stage changes**" msgstr "**舞台变化**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:141 +#: ../../source/contributor-tutorial-contribute-on-github.rst:152 msgid "" "Before creating a commit that will update your history, you must specify " "to Git which files it needs to take into account." msgstr "在创建更新历史记录的提交之前,必须向 Git 说明需要考虑哪些文件。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:143 +#: ../../source/contributor-tutorial-contribute-on-github.rst:155 msgid "This can be done with:" msgstr "这可以通过:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:149 +#: ../../source/contributor-tutorial-contribute-on-github.rst:161 +#, fuzzy msgid "" "To check which files have been modified compared to the last version " "(last commit) and to see which files are staged for commit, you can use " -"the :code:`git status` command." +"the ``git status`` command." msgstr "要查看与上一版本(上次提交)相比哪些文件已被修改,以及哪些文件处于提交阶段,可以使用 :code:`git status` 命令。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:160 +#: ../../source/contributor-tutorial-contribute-on-github.rst:173 msgid "**Commit changes**" msgstr "**提交更改**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:153 +#: ../../source/contributor-tutorial-contribute-on-github.rst:165 +#, fuzzy msgid "" -"Once you have added all the files you wanted to commit using :code:`git " -"add`, you can finally create your commit using this command:" +"Once you have added all the files you wanted to commit using ``git add``," +" you can finally create your commit using this command:" msgstr "使用 :code:`git add` 添加完所有要提交的文件后,就可以使用此命令创建提交了:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:159 +#: ../../source/contributor-tutorial-contribute-on-github.rst:172 +#, fuzzy msgid "" "The \\ is there to explain to others what the commit " "does. It should be written in an imperative style and be concise. An " -"example would be :code:`git commit -m \"Add images to README\"`." +"example would be ``git commit -m \"Add images to README\"``." msgstr "" " 用于向他人解释提交的作用。它应该以命令式风格书写,并且简明扼要。例如 :code:`git commit " "-m \"Add images to README\"`。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:171 +#: ../../source/contributor-tutorial-contribute-on-github.rst:185 msgid "**Push the changes to the fork**" msgstr "**将更改推送到分叉**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:163 +#: ../../source/contributor-tutorial-contribute-on-github.rst:176 msgid "" "Once we have committed our changes, we have effectively updated our local" " history, but GitHub has no way of knowing this unless we push our " "changes to our origin's remote address:" msgstr "一旦提交了修改,我们就有效地更新了本地历史记录,但除非我们将修改推送到原点的远程地址,否则 GitHub 无法得知:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:170 +#: ../../source/contributor-tutorial-contribute-on-github.rst:184 msgid "" "Once this is done, you will see on the GitHub that your forked repo was " "updated with the changes you have made." msgstr "完成此操作后,您将在 GitHub 上看到您的分叉仓库已根据您所做的更改进行了更新。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:174 +#: ../../source/contributor-tutorial-contribute-on-github.rst:188 msgid "Creating and merging a pull request (PR)" msgstr "创建和合并拉取请求 (PR)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:206 +#: ../../source/contributor-tutorial-contribute-on-github.rst:226 msgid "**Create the PR**" msgstr "**创建 PR**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:177 +#: ../../source/contributor-tutorial-contribute-on-github.rst:191 msgid "" "Once you have pushed changes, on the GitHub webpage of your repository " "you should see the following message:" msgstr "推送更改后,在仓库的 GitHub 网页上应该会看到以下信息:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:181 +#: ../../source/contributor-tutorial-contribute-on-github.rst:196 #, fuzzy msgid "Otherwise you can always find this option in the ``Branches`` page." msgstr "否则,您可以在 \"分支 \"页面找到该选项。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:183 +#: ../../source/contributor-tutorial-contribute-on-github.rst:198 #, fuzzy msgid "" "Once you click the ``Compare & pull request`` button, you should see " "something similar to this:" msgstr "点击 \"比较和拉取请求 \"按钮后,您应该会看到类似下面的内容:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:187 +#: ../../source/contributor-tutorial-contribute-on-github.rst:203 msgid "At the top you have an explanation of which branch will be merged where:" msgstr "在顶部,你可以看到关于哪个分支将被合并的说明:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:191 +#: ../../source/contributor-tutorial-contribute-on-github.rst:207 msgid "" "In this example you can see that the request is to merge the branch " "``doc-fixes`` from my forked repository to branch ``main`` from the " "Flower repository." msgstr "在这个例子中,你可以看到请求将我分叉的版本库中的分支 ``doc-fixes`` 合并到 Flower 版本库中的分支 ``main``。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:193 +#: ../../source/contributor-tutorial-contribute-on-github.rst:210 +#, fuzzy msgid "" "The title should be changed to adhere to the :ref:`pr_title_format` " "guidelines, otherwise it won't be possible to merge the PR. So in this " "case, a correct title might be ``docs(framework:skip) Fix typos``." msgstr "" +"应该修改标题以符合 :ref:`pr_title_format` 准则,否则将无法合并 PR。因此,在这种情况下,正确的标题可能是 " +"``docs(framework:skip)修复错字``。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:196 +#: ../../source/contributor-tutorial-contribute-on-github.rst:214 msgid "" "The input box in the middle is there for you to describe what your PR " "does and to link it to existing issues. We have placed comments (that " @@ -1752,166 +1808,167 @@ msgid "" "process." msgstr "中间的输入框供您描述 PR 的作用,并将其与现有问题联系起来。我们在此放置了注释(一旦 PR 打开,注释将不会显示),以指导您完成整个过程。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:199 +#: ../../source/contributor-tutorial-contribute-on-github.rst:218 +#, fuzzy msgid "It is important to follow the instructions described in comments." -msgstr "" +msgstr "请务必遵守注释中的说明。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:201 +#: ../../source/contributor-tutorial-contribute-on-github.rst:220 msgid "" "At the bottom you will find the button to open the PR. This will notify " "reviewers that a new PR has been opened and that they should look over it" " to merge or to request changes." msgstr "在底部,您可以找到打开 PR 的按钮。这将通知审核人员新的 PR 已经打开,他们应该查看该 PR 以进行合并或要求修改。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:204 +#: ../../source/contributor-tutorial-contribute-on-github.rst:224 msgid "" "If your PR is not yet ready for review, and you don't want to notify " "anyone, you have the option to create a draft pull request:" msgstr "如果您的 PR 尚未准备好接受审核,而且您不想通知任何人,您可以选择创建一个草案拉取请求:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:209 +#: ../../source/contributor-tutorial-contribute-on-github.rst:230 msgid "**Making new changes**" msgstr "**作出新的改变**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:209 +#: ../../source/contributor-tutorial-contribute-on-github.rst:229 msgid "" "Once the PR has been opened (as draft or not), you can still push new " "commits to it the same way we did before, by making changes to the branch" " associated with the PR." msgstr "一旦 PR 被打开(无论是否作为草案),你仍然可以像以前一样,通过修改与 PR 关联的分支来推送新的提交。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:231 +#: ../../source/contributor-tutorial-contribute-on-github.rst:253 msgid "**Review the PR**" msgstr "**审查 PR**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:212 +#: ../../source/contributor-tutorial-contribute-on-github.rst:233 msgid "" "Once the PR has been opened or once the draft PR has been marked as " "ready, a review from code owners will be automatically requested:" msgstr "一旦 PR 被打开或 PR 草案被标记为就绪,就会自动要求代码所有者进行审核:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:216 +#: ../../source/contributor-tutorial-contribute-on-github.rst:238 msgid "" "Code owners will then look into the code, ask questions, request changes " "or validate the PR." msgstr "然后,代码所有者会查看代码、提出问题、要求修改或验证 PR。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:218 +#: ../../source/contributor-tutorial-contribute-on-github.rst:241 msgid "Merging will be blocked if there are ongoing requested changes." msgstr "如果有正在进行的更改请求,合并将被阻止。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:222 +#: ../../source/contributor-tutorial-contribute-on-github.rst:245 msgid "" "To resolve them, just push the necessary changes to the branch associated" " with the PR:" msgstr "要解决这些问题,只需将必要的更改推送到与 PR 关联的分支即可:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:226 +#: ../../source/contributor-tutorial-contribute-on-github.rst:250 msgid "And resolve the conversation:" msgstr "并解决对话:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:230 +#: ../../source/contributor-tutorial-contribute-on-github.rst:254 msgid "" "Once all the conversations have been resolved, you can re-request a " "review." msgstr "一旦所有对话都得到解决,您就可以重新申请审核。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:251 +#: ../../source/contributor-tutorial-contribute-on-github.rst:274 msgid "**Once the PR is merged**" msgstr "**一旦 PR 被合并**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:234 +#: ../../source/contributor-tutorial-contribute-on-github.rst:256 msgid "" "If all the automatic tests have passed and reviewers have no more changes" " to request, they can approve the PR and merge it." msgstr "如果所有自动测试都已通过,且审核员不再需要修改,他们就可以批准 PR 并将其合并。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:238 +#: ../../source/contributor-tutorial-contribute-on-github.rst:261 msgid "" "Once it is merged, you can delete the branch on GitHub (a button should " "appear to do so) and also delete it locally by doing:" msgstr "合并后,您可以在 GitHub 上删除该分支(会出现一个删除按钮),也可以在本地删除该分支:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:245 +#: ../../source/contributor-tutorial-contribute-on-github.rst:269 msgid "Then you should update your forked repository by doing:" msgstr "然后,你应该更新你的分叉仓库:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:254 +#: ../../source/contributor-tutorial-contribute-on-github.rst:277 msgid "Example of first contribution" msgstr "首次捐款实例" -#: ../../source/contributor-tutorial-contribute-on-github.rst:257 +#: ../../source/contributor-tutorial-contribute-on-github.rst:280 msgid "Problem" msgstr "问题" -#: ../../source/contributor-tutorial-contribute-on-github.rst:259 +#: ../../source/contributor-tutorial-contribute-on-github.rst:282 #, fuzzy msgid "" "For our documentation, we've started to use the `Diàtaxis framework " "`_." msgstr "对于我们的文档,我们已经开始使用 \"Diàtaxis 框架 `_\"。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:261 +#: ../../source/contributor-tutorial-contribute-on-github.rst:285 #, fuzzy msgid "" "Our \"How to\" guides should have titles that continue the sentence \"How" " to …\", for example, \"How to upgrade to Flower 1.0\"." msgstr "我们的 \"如何 \"指南的标题应延续 \"如何...... \"的句式,例如 \"如何升级到 Flower 1.0\"。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:263 +#: ../../source/contributor-tutorial-contribute-on-github.rst:288 msgid "" "Most of our guides do not follow this new format yet, and changing their " "title is (unfortunately) more involved than one might think." msgstr "我们的大多数指南还没有采用这种新格式,而更改其标题(不幸的是)比人们想象的要复杂得多。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:265 +#: ../../source/contributor-tutorial-contribute-on-github.rst:291 #, fuzzy msgid "" "This issue is about changing the title of a doc from present continuous " "to present simple." msgstr "这个问题是关于将文档标题从现在进行时改为现在进行时。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:267 +#: ../../source/contributor-tutorial-contribute-on-github.rst:294 #, fuzzy msgid "" "Let's take the example of \"Saving Progress\" which we changed to \"Save " "Progress\". Does this pass our check?" msgstr "以 \"保存进度 \"为例,我们将其改为 \"保存进度\"。这是否通过了我们的检查?" -#: ../../source/contributor-tutorial-contribute-on-github.rst:269 +#: ../../source/contributor-tutorial-contribute-on-github.rst:297 #, fuzzy msgid "Before: \"How to saving progress\" ❌" msgstr "之前: \"如何保存进度\" ❌" -#: ../../source/contributor-tutorial-contribute-on-github.rst:271 +#: ../../source/contributor-tutorial-contribute-on-github.rst:299 #, fuzzy msgid "After: \"How to save progress\" ✅" msgstr "之后: \"如何保存进度\"✅" -#: ../../source/contributor-tutorial-contribute-on-github.rst:274 +#: ../../source/contributor-tutorial-contribute-on-github.rst:302 msgid "Solution" msgstr "解决方案" -#: ../../source/contributor-tutorial-contribute-on-github.rst:276 +#: ../../source/contributor-tutorial-contribute-on-github.rst:304 #, fuzzy msgid "" "This is a tiny change, but it'll allow us to test your end-to-end setup. " "After cloning and setting up the Flower repo, here's what you should do:" msgstr "这只是一个很小的改动,但可以让我们测试你的端到端设置。克隆并设置好 Flower repo 后,你应该这样做:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:278 +#: ../../source/contributor-tutorial-contribute-on-github.rst:307 #, fuzzy msgid "Find the source file in ``doc/source``" msgstr "在 `doc/source` 中查找源文件" -#: ../../source/contributor-tutorial-contribute-on-github.rst:279 +#: ../../source/contributor-tutorial-contribute-on-github.rst:308 #, fuzzy msgid "" "Make the change in the ``.rst`` file (beware, the dashes under the title " "should be the same length as the title itself)" msgstr "在 `.rst` 文件中进行修改(注意,标题下的破折号应与标题本身的长度相同)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:280 +#: ../../source/contributor-tutorial-contribute-on-github.rst:310 #, fuzzy msgid "" "Build the docs and `check the result `_" -#: ../../source/contributor-tutorial-contribute-on-github.rst:283 +#: ../../source/contributor-tutorial-contribute-on-github.rst:314 msgid "Rename file" msgstr "重命名文件" -#: ../../source/contributor-tutorial-contribute-on-github.rst:285 +#: ../../source/contributor-tutorial-contribute-on-github.rst:316 msgid "" "You might have noticed that the file name still reflects the old wording." " If we just change the file, then we break all existing links to it - it " @@ -1934,33 +1991,33 @@ msgstr "" "您可能已经注意到,文件名仍然反映了旧的措辞。如果我们只是更改文件,那么就会破坏与该文件的所有现有链接--" "避免这种情况是***重要的,破坏链接会损害我们的搜索引擎排名。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:288 +#: ../../source/contributor-tutorial-contribute-on-github.rst:320 #, fuzzy msgid "Here's how to change the file name:" msgstr "下面是更改文件名的方法:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:290 +#: ../../source/contributor-tutorial-contribute-on-github.rst:322 #, fuzzy msgid "Change the file name to ``save-progress.rst``" msgstr "将文件名改为`save-progress.rst`" -#: ../../source/contributor-tutorial-contribute-on-github.rst:291 +#: ../../source/contributor-tutorial-contribute-on-github.rst:323 #, fuzzy msgid "Add a redirect rule to ``doc/source/conf.py``" msgstr "在 `doc/source/conf.py` 中添加重定向规则" -#: ../../source/contributor-tutorial-contribute-on-github.rst:293 +#: ../../source/contributor-tutorial-contribute-on-github.rst:325 #, fuzzy msgid "" "This will cause a redirect from ``saving-progress.html`` to ``save-" "progress.html``, old links will continue to work." msgstr "这将导致从 `saving-progress.html` 重定向到 `save-progress.html`,旧链接将继续工作。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:296 +#: ../../source/contributor-tutorial-contribute-on-github.rst:329 msgid "Apply changes in the index file" msgstr "应用索引文件中的更改" -#: ../../source/contributor-tutorial-contribute-on-github.rst:298 +#: ../../source/contributor-tutorial-contribute-on-github.rst:331 #, fuzzy msgid "" "For the lateral navigation bar to work properly, it is very important to " @@ -1968,85 +2025,90 @@ msgid "" "arborescence of the navbar." msgstr "要使横向导航栏正常工作,更新 `index.rst` 文件也非常重要。我们就是在这里定义整个导航栏的结构。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:301 +#: ../../source/contributor-tutorial-contribute-on-github.rst:335 #, fuzzy msgid "Find and modify the file name in ``index.rst``" msgstr "查找并修改 `index.rst` 中的文件名" -#: ../../source/contributor-tutorial-contribute-on-github.rst:304 +#: ../../source/contributor-tutorial-contribute-on-github.rst:338 msgid "Open PR" msgstr "开放式 PR" -#: ../../source/contributor-tutorial-contribute-on-github.rst:306 +#: ../../source/contributor-tutorial-contribute-on-github.rst:340 #, fuzzy msgid "" "Commit the changes (commit messages are always imperative: \"Do " "something\", in this case \"Change …\")" msgstr "提交更改(提交信息总是命令式的:\"做某事\",这里是 \"更改......\")" -#: ../../source/contributor-tutorial-contribute-on-github.rst:307 +#: ../../source/contributor-tutorial-contribute-on-github.rst:342 msgid "Push the changes to your fork" msgstr "将更改推送到分叉" -#: ../../source/contributor-tutorial-contribute-on-github.rst:308 +#: ../../source/contributor-tutorial-contribute-on-github.rst:343 +#, fuzzy msgid "" "Open a PR (as shown above) with title ``docs(framework) Update how-to " "guide title``" -msgstr "" +msgstr "打开一个 PR(如上图所示),标题为\"`docs(framework) Update how-to guide title```\"。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:309 +#: ../../source/contributor-tutorial-contribute-on-github.rst:344 msgid "Wait for it to be approved!" msgstr "等待审批!" -#: ../../source/contributor-tutorial-contribute-on-github.rst:310 +#: ../../source/contributor-tutorial-contribute-on-github.rst:345 msgid "Congrats! 🥳 You're now officially a Flower contributor!" msgstr "祝贺你 🥳 您现在正式成为 \"Flower \"贡献者!" -#: ../../source/contributor-tutorial-contribute-on-github.rst:314 -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:548 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:946 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:727 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:713 +#: ../../source/contributor-tutorial-contribute-on-github.rst:348 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:573 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1012 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:811 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:857 #: ../../source/tutorial-series-what-is-federated-learning.ipynb:367 msgid "Next steps" msgstr "接下来的步骤" -#: ../../source/contributor-tutorial-contribute-on-github.rst:316 +#: ../../source/contributor-tutorial-contribute-on-github.rst:350 msgid "" "Once you have made your first PR, and want to contribute more, be sure to" " check out the following :" msgstr "一旦您完成了第一份 PR,并希望做出更多贡献,请务必查看以下内容:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:318 +#: ../../source/contributor-tutorial-contribute-on-github.rst:353 #, fuzzy msgid "" ":doc:`Good first contributions `, where you should particularly look into the " -":code:`baselines` contributions." +"``baselines`` contributions." msgstr "" "`优秀的首次贡献 `_,在这里你应该特别看看 :code:`baselines` 的贡献。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:322 +#: ../../source/contributor-tutorial-contribute-on-github.rst:357 #: ../../source/fed/0000-20200102-fed-template.md:60 msgid "Appendix" msgstr "附录" -#: ../../source/contributor-tutorial-contribute-on-github.rst:327 +#: ../../source/contributor-tutorial-contribute-on-github.rst:362 +#, fuzzy msgid "PR title format" -msgstr "" +msgstr "PR 标题格式" -#: ../../source/contributor-tutorial-contribute-on-github.rst:329 +#: ../../source/contributor-tutorial-contribute-on-github.rst:364 +#, fuzzy msgid "We enforce the following PR title format:" -msgstr "" +msgstr "我们执行以下 PR 标题格式:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:335 +#: ../../source/contributor-tutorial-contribute-on-github.rst:370 +#, fuzzy msgid "" "(or ``(:skip) `` to ignore the PR in the " "changelog)" -msgstr "" +msgstr "(或 ``(:skip) `` 忽略更新日志中的 PR)。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:337 +#: ../../source/contributor-tutorial-contribute-on-github.rst:372 +#, fuzzy msgid "" "Where ```` needs to be in ``{ci, fix, feat, docs, refactor, " "break}``, ```` should be in ``{framework, baselines, datasets, " @@ -2054,68 +2116,86 @@ msgid "" "':skip' flag to be used}``, and ```` starts with a capitalised " "verb in the imperative mood." msgstr "" +"其中 ```` 需要使用 ``{ci, fix, feat, docs, refactor, break}``, " +"```` 应该使用 ``{framework, baselines, datasets, examples, 或者 '*' " +"当修改多个项目时需要使用 ':skip'标记}``, 并且 ```` 应该以一个大写的动词开始。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:341 +#: ../../source/contributor-tutorial-contribute-on-github.rst:377 #, fuzzy msgid "Valid examples:" msgstr "实例" -#: ../../source/contributor-tutorial-contribute-on-github.rst:343 +#: ../../source/contributor-tutorial-contribute-on-github.rst:379 +#, fuzzy msgid "``feat(framework) Add flwr build CLI command``" -msgstr "" +msgstr "`feat(框架) 添加 flwr build CLI 命令```" -#: ../../source/contributor-tutorial-contribute-on-github.rst:344 +#: ../../source/contributor-tutorial-contribute-on-github.rst:380 +#, fuzzy msgid "``refactor(examples:skip) Improve quickstart-pytorch logging``" -msgstr "" +msgstr "``refactor(examples:skip) Improve quickstart-pytorch logging``." -#: ../../source/contributor-tutorial-contribute-on-github.rst:345 +#: ../../source/contributor-tutorial-contribute-on-github.rst:381 +#, fuzzy msgid "``ci(*:skip) Enforce PR title format``" -msgstr "" +msgstr "`ci(*:skip)执行 PR 标题格式``。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:347 +#: ../../source/contributor-tutorial-contribute-on-github.rst:383 #, fuzzy msgid "Invalid examples:" msgstr "模拟示例" -#: ../../source/contributor-tutorial-contribute-on-github.rst:349 +#: ../../source/contributor-tutorial-contribute-on-github.rst:385 +#, fuzzy msgid "``feat(framework): Add flwr build CLI command`` (extra ``:``)" -msgstr "" +msgstr "`feat(框架): 添加 flwr build CLI 命令``(额外的``:``)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:350 +#: ../../source/contributor-tutorial-contribute-on-github.rst:386 +#, fuzzy msgid "" "``feat(*) Add flwr build CLI command`` (missing ``skip`` flag along with " "``*``)" -msgstr "" +msgstr "`feat(*)添加flwr构建CLI命令``(缺少``skip``标志和``*``)。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:351 +#: ../../source/contributor-tutorial-contribute-on-github.rst:387 +#, fuzzy msgid "``feat(skip) Add flwr build CLI command`` (missing ````)" -msgstr "" +msgstr "`feat(skip)添加flwr构建CLI命令``(缺少```)。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:352 +#: ../../source/contributor-tutorial-contribute-on-github.rst:388 +#, fuzzy msgid "``feat(framework) add flwr build CLI command`` (non capitalised verb)" -msgstr "" +msgstr "`feat(framework)添加 flwr 构建 CLI 命令``(非大写动词)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:353 +#: ../../source/contributor-tutorial-contribute-on-github.rst:389 +#, fuzzy msgid "``feat(framework) Add flwr build CLI command.`` (dot at the end)" -msgstr "" +msgstr "feat(框架) 添加 flwr 构建 CLI 命令。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:354 +#: ../../source/contributor-tutorial-contribute-on-github.rst:390 +#, fuzzy msgid "``Add flwr build CLI command.`` (missing ``()``)" msgstr "" +"``添加 flwr build CLI 命令.``(缺少``()``) ``Add flwr build CLI " +"command.`` (missing ``()``)" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:2 msgid "Get started as a contributor" msgstr "成为贡献者" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:5 -#: ../../source/how-to-run-flower-using-docker.rst:132 +#: ../../source/docker/run-as-subprocess.rst:11 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:16 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:18 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:13 +#: ../../source/docker/tutorial-quickstart-docker.rst:11 msgid "Prerequisites" msgstr "先决条件" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:7 #, fuzzy -msgid "`Python 3.8 `_ or above" -msgstr "Python 3.7 `_ 或更高版本" +msgid "`Python 3.9 `_ or above" +msgstr "Python 3.9 `_ 或更高版本" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:8 msgid "`Poetry 1.3 `_ or above" @@ -2132,20 +2212,20 @@ msgstr "(可选) `pyenv-virtualenv #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:12 #, fuzzy msgid "" -"Flower uses :code:`pyproject.toml` to manage dependencies and configure " +"Flower uses ``pyproject.toml`` to manage dependencies and configure " "development tools (the ones which support it). Poetry is a build tool " "which supports `PEP 517 `_." msgstr "" "Flower 使用 :code:`pyproject.toml` 来管理依赖关系和配置开发工具(支持它的)。Poetry 是一种支持 `PEP " "517 `_ 的构建工具。" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:18 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:17 msgid "Developer Machine Setup" msgstr "开发者机器设置" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:21 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:20 #, fuzzy -msgid "Preliminarities" +msgid "Preliminaries" msgstr "前言" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:22 @@ -2165,24764 +2245,31702 @@ msgid "" "installation actions to add `brew` to your PATH." msgstr "安装 `homebrew `_。别忘了安装后的操作,将 `brew` 添加到你的 PATH。" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:28 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:29 #, fuzzy msgid "" "Install `xz` (to install different Python versions) and `pandoc` to build" -" the docs::" +" the docs:" msgstr "安装 `xz`(用于安装不同的 Python 版本)和 `pandoc` 以构建文档::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:34 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:36 #, fuzzy msgid "For Ubuntu" msgstr "针对 Ubuntu" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:35 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:38 #, fuzzy msgid "" "Ensure you system (Ubuntu 22.04+) is up-to-date, and you have all " -"necessary packages::" +"necessary packages:" msgstr "确保您的系统(Ubuntu 22.04+)为最新版本,并安装了所有必要的软件包::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:44 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:47 #, fuzzy msgid "Create Flower Dev Environment" msgstr "创建/删除虚拟环境" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:46 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:49 #, fuzzy msgid "" -"1. Clone the `Flower repository `_ from " -"GitHub::" +"Clone the `Flower repository `_ from " +"GitHub:" msgstr "首先,从 GitHub 克隆 \"Flower 存储库 `_\":" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:52 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:56 #, fuzzy msgid "" "Let's create the Python environment for all-things Flower. If you wish to" -" use :code:`pyenv`, we provide two convenience scripts that you can use. " -"If you prefer using something else than :code:`pyenv`, create a new " -"environment, activate and skip to the last point where all packages are " -"installed." +" use ``pyenv``, we provide two convenience scripts that you can use. If " +"you prefer using something else than ``pyenv``, create a new environment," +" activate and skip to the last point where all packages are installed." msgstr "" "让我们为 Flower 创建一个 Python 环境。如果您想使用 :code:`pyenv`,我们提供了两个方便的脚本供您使用。如果你不喜欢使用" " :code:`pyenv`,请创建一个新环境,激活并跳到最后一点,即安装所有软件包。" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:54 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:61 #, fuzzy msgid "" -"If you don't have :code:`pyenv` installed, the following script that will" -" install it, set it up, and create the virtual environment (with " -":code:`Python 3.8.17` by default)::" +"If you don't have ``pyenv`` installed, the following script that will " +"install it, set it up, and create the virtual environment (with " +":substitution-code:`Python |python_full_version|` by default):" msgstr "" -"如果没有安装 :code:`pyenv`,可以使用以下脚本安装 pyenv、设置并创建虚拟环境(默认使用 " -":code:`Python3.8.17)::" +"如果没有安装 :code:`pyenv`,可以使用以下脚本安装 pyenv、设置并创建虚拟环境(默认使用 :code:`Python " +"3.9.20)::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:58 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:69 #, fuzzy msgid "" -"If you already have :code:`pyenv` installed (along with the :code:`pyenv-" -"virtualenv` plugin), you can use the following convenience script (with " -":code:`Python 3.8.17` by default)::" +"If you already have ``pyenv`` installed (along with the ``pyenv-" +"virtualenv`` plugin), you can use the following convenience script (with " +":substitution-code:`Python |python_full_version|` by default):" msgstr "" -"如果没有安装 :code:`pyenv`,可以使用以下脚本安装 pyenv、设置并创建虚拟环境(默认使用 " -":code:`Python3.8.17)::" +"如果没有安装 :code:`pyenv`,可以使用以下脚本安装 pyenv、设置并创建虚拟环境(默认使用 :code:`Python " +"3.9.20)::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:62 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:77 #, fuzzy msgid "" -"3. Install the Flower package in development mode (think :code:`pip " -"install -e`) along with all necessary dependencies::" +"3. Install the Flower package in development mode (think ``pip install " +"-e``) along with all necessary dependencies:" msgstr "第三,在开发模式下安装 Flower 软件包(想想 :code:`pip install -e`)以及所有必要的依赖项::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:69 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:85 msgid "Convenience Scripts" msgstr "便捷脚本" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:71 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:87 +#, fuzzy msgid "" "The Flower repository contains a number of convenience scripts to make " -"recurring development tasks easier and less error-prone. See the " -":code:`/dev` subdirectory for a full list. The following scripts are " -"amongst the most important ones:" +"recurring development tasks easier and less error-prone. See the ``/dev``" +" subdirectory for a full list. The following scripts are amongst the most" +" important ones:" msgstr "Flower 软件仓库包含大量便捷脚本,可使重复性开发任务更轻松、更不易出错。完整列表请参见 :code:`/dev` 子目录。以下是最重要的脚本:" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:77 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:92 msgid "Create/Delete Virtual Environment" msgstr "创建/删除虚拟环境" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:85 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:101 msgid "Compile ProtoBuf Definitions" msgstr "编译 ProtoBuf 定义" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:92 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:108 msgid "Auto-Format Code" msgstr "自动格式化代码" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:99 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:115 msgid "Run Linters and Tests" msgstr "运行分类器和测试" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:106 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:122 +#, fuzzy msgid "Add a pre-commit hook" -msgstr "" +msgstr "添加预先提交钩子" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:108 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:124 +#, fuzzy msgid "" "Developers may integrate a pre-commit hook into their workflow utilizing " "the `pre-commit `_ library. The pre-" "commit hook is configured to execute two primary operations: " "``./dev/format.sh`` and ``./dev/test.sh`` scripts." msgstr "" +"开发人员可利用 `pre-commit `_ " +"库将预提交钩子集成到工作流程中。预提交钩子被配置为执行两个主要操作: `./dev/format.sh`` 和 ``./dev/test.sh``" +" 脚本。" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:110 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:128 +#, fuzzy msgid "There are multiple ways developers can use this:" -msgstr "" +msgstr "开发人员可以通过多种方式使用它:" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:112 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:130 +#, fuzzy msgid "Install the pre-commit hook to your local git directory by simply running:" -msgstr "" +msgstr "在本地 git 目录中安装预提交钩子,只需运行" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:118 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:136 +#, fuzzy msgid "" "Each ``git commit`` will trigger the execution of formatting and " "linting/test scripts." -msgstr "" +msgstr "每次 \"git 提交 \"都会触发格式化和内核/测试脚本的执行。" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:119 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:138 +#, fuzzy msgid "" "If in a hurry, bypass the hook using ``--no-verify`` with the ``git " -"commit`` command. ::" -msgstr "" +"commit`` command." +msgstr "如果赶时间,可使用 ``--no-verify`` 和 ``git commit` 命令绕过钩子:" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:124 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:145 +#, fuzzy msgid "" "For developers who prefer not to install the hook permanently, it is " "possible to execute a one-time check prior to committing changes by using" " the following command:" -msgstr "" +msgstr "对于不想永久安装钩子的开发人员,可以使用以下命令在提交更改之前执行一次性检查:" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:130 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:152 +#, fuzzy msgid "" "This executes the formatting and linting checks/tests on all the files " "without modifying the default behavior of ``git commit``." -msgstr "" +msgstr "这将在不修改 ``git commit`` 默认行为的情况下对所有文件执行格式化和词排检查/测试。" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:133 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:156 msgid "Run Github Actions (CI) locally" msgstr "在本地运行 Github 操作 (CI)" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:135 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:158 #, fuzzy msgid "" "Developers could run the full set of Github Actions workflows under their" " local environment by using `Act `_. " "Please refer to the installation instructions under the linked repository" -" and run the next command under Flower main cloned repository folder::" +" and run the next command under Flower main cloned repository folder:" msgstr "" "开发人员可以使用 `Act _` 在本地环境下运行全套 Github Actions" " 工作流程。请参考链接仓库下的安装说明,并在 Flower 主克隆仓库文件夹下运行下一条命令::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:142 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:167 msgid "" "The Flower default workflow would run by setting up the required Docker " "machines underneath." msgstr "Flower 默认工作流程将通过在下面设置所需的 Docker 机器来运行。" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:147 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:171 msgid "Build Release" msgstr "版本发布" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:149 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:173 +#, fuzzy msgid "" "Flower uses Poetry to build releases. The necessary command is wrapped in" -" a simple script::" +" a simple script:" msgstr "Flower 使用 Poetry 创建发布版本。必要的命令封装在一个简单的脚本中::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:154 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:180 +#, fuzzy msgid "" -"The resulting :code:`.whl` and :code:`.tar.gz` releases will be stored in" -" the :code:`/dist` subdirectory." +"The resulting ``.whl`` and ``.tar.gz`` releases will be stored in the " +"``/dist`` subdirectory." msgstr "生成的 :code:`.whl` 和 :code:`.tar.gz` 版本将存储在 :code:`/dist` 子目录中。" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:159 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:184 msgid "Build Documentation" msgstr "构建文档" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:161 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:186 +#, fuzzy msgid "" "Flower's documentation uses `Sphinx `_. " "There's no convenience script to re-build the documentation yet, but it's" -" pretty easy::" +" pretty easy:" msgstr "" "Flower 的文档使用 `Sphinx `_。目前还没有很方便的脚本来重新构建文档,不过这很容易::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:167 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:194 msgid "This will generate HTML documentation in ``doc/build/html``." msgstr "这将在 ``doc/build/html`` 中生成 HTML 文档。" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:2 -msgid "Example: FedBN in PyTorch - From Centralized To Federated" -msgstr "示例: PyTorch 中的 FedBN - 从集中式到联邦式" - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:4 +#: ../../source/docker/enable-tls.rst:2 #, fuzzy +msgid "Enable TLS for Secure Connections" +msgstr "启用 SSL 连接" + +#: ../../source/docker/enable-tls.rst:4 msgid "" -"This tutorial will show you how to use Flower to build a federated " -"version of an existing machine learning workload with `FedBN " -"`_, a federated training strategy " -"designed for non-iid data. We are using PyTorch to train a Convolutional " -"Neural Network(with Batch Normalization layers) on the CIFAR-10 dataset. " -"When applying FedBN, only few changes needed compared to :doc:`Example: " -"PyTorch - From Centralized To Federated `." +"When operating in a production environment, it is strongly recommended to" +" enable Transport Layer Security (TLS) for each Flower Component to " +"ensure secure communication." msgstr "" -"本教程将向您展示如何使用 Flower 为现有的机器学习框架构建一个联邦学习的版本,并使用 \"FedBN `_\"(一种针对非 iid 数据设计的联邦训练策略)。我们使用 PyTorch 在 CIFAR-10 " -"数据集上训练一个卷积神经网络(带有Batch Normalization层)。在应用 FedBN 时,只需对 `示例: PyTorch - " -"从集中式到联邦式 `_ 做少量改动。" - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:9 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:10 -msgid "Centralized Training" -msgstr "集中式训练" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:10 +#: ../../source/docker/enable-tls.rst:7 #, fuzzy msgid "" -"All files are revised based on :doc:`Example: PyTorch - From Centralized " -"To Federated `. The only " -"thing to do is modifying the file called :code:`cifar.py`, revised part " -"is shown below:" +"To enable TLS, you will need a PEM-encoded root certificate, a PEM-" +"encoded private key and a PEM-encoded certificate chain." +msgstr "要启用 SSL,需要 CA 证书、服务器证书和服务器私钥。" + +#: ../../source/docker/enable-tls.rst:12 +#, fuzzy +msgid "" +"For testing purposes, you can generate your own self-signed certificates." +" The `Enable SSL connections `__ page contains a section that" +" will guide you through the process." msgstr "" -"所有文件均根据 `示例: PyTorch -从集中式到联邦式 `_。唯一要做的就是修改名为 :code:`cifar.py` " -"的文件,修改部分如下所示:" +"出于测试目的,你可以生成自己的自签名证书。启用 SSL 连接 `_ 页面中有一个部分将指导你完成这一过程。" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:13 +#: ../../source/docker/enable-tls.rst:17 msgid "" -"The model architecture defined in class Net() is added with Batch " -"Normalization layers accordingly." -msgstr "类 Net() 中定义的模型架构会相应添加Batch Normalization层。" +"Because Flower containers, by default, run with a non-root user ``app``, " +"the mounted files and directories must have the proper permissions for " +"the user ID ``49999``." +msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:41 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:157 -msgid "You can now run your machine learning workload:" -msgstr "现在,您可以运行您的机器学习工作了:" +#: ../../source/docker/enable-tls.rst:20 +msgid "" +"For example, to change the user ID of all files in the ``certificates/`` " +"directory, you can run ``sudo chown -R 49999:49999 certificates/*``." +msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:47 +#: ../../source/docker/enable-tls.rst:23 +#: ../../source/docker/persist-superlink-state.rst:15 +msgid "" +"If you later want to delete the directory, you can change the user ID " +"back to the current user ID by running ``sudo chown -R $USER:$(id -gn) " +"state``." +msgstr "" + +#: ../../source/docker/enable-tls.rst:27 #, fuzzy +msgid "SuperLink" +msgstr "flower-superlink" + +#: ../../source/docker/enable-tls.rst:29 msgid "" -"So far this should all look fairly familiar if you've used PyTorch " -"before. Let's take the next step and use what we've built to create a " -"federated learning system within FedBN, the system consists of one server" -" and two clients." +"Assuming all files we need are in the local ``certificates`` directory, " +"we can use the flag ``--volume`` to mount the local directory into the " +"``/app/certificates/`` directory of the container:" msgstr "" -"到目前为止,如果您以前使用过 PyTorch,这一切看起来应该相当熟悉。让我们进行下一步,使用我们所构建的内容在 FedBN " -"中创建一个联邦学习系统,该系统由一个服务器和两个客户端组成。" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:51 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:167 -msgid "Federated Training" -msgstr "联邦培训" +#: ../../source/docker/enable-tls.rst +#, fuzzy +msgid "Understanding the command" +msgstr "训练模型" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:53 +#: ../../source/docker/enable-tls.rst:45 ../../source/docker/enable-tls.rst:92 +#: ../../source/docker/enable-tls.rst:125 +#: ../../source/docker/tutorial-quickstart-docker.rst:66 +#: ../../source/docker/tutorial-quickstart-docker.rst:103 +#: ../../source/docker/tutorial-quickstart-docker.rst:217 +#: ../../source/docker/tutorial-quickstart-docker.rst:305 #, fuzzy -msgid "" -"If you have read :doc:`Example: PyTorch - From Centralized To Federated " -"`, the following parts are" -" easy to follow, only :code:`get_parameters` and :code:`set_parameters` " -"function in :code:`client.py` needed to revise. If not, please read the " -":doc:`Example: PyTorch - From Centralized To Federated `. first." +msgid "``docker run``: This tells Docker to run a container from an image." +msgstr "`docker run``: 这是运行新 Docker 容器的命令。" + +#: ../../source/docker/enable-tls.rst:46 ../../source/docker/enable-tls.rst:93 +#: ../../source/docker/enable-tls.rst:126 +#: ../../source/docker/tutorial-quickstart-docker.rst:67 +#: ../../source/docker/tutorial-quickstart-docker.rst:104 +#: ../../source/docker/tutorial-quickstart-docker.rst:218 +#: ../../source/docker/tutorial-quickstart-docker.rst:306 +msgid "``--rm``: Remove the container once it is stopped or the command exits." msgstr "" -"如果你读过 `示例: PyTorch - 从集中式到联邦式 `_,下面的部分就很容易理解了,只需要修改 " -":code:`get_parameters` 和 :code:`set_parameters` 中的 :code:`client.py` " -"函数。如果没有,请阅读 `示例: PyTorch - 从集中式到联邦式 `_。" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:56 +#: ../../source/docker/enable-tls.rst msgid "" -"Our example consists of one *server* and two *clients*. In FedBN, " -":code:`server.py` keeps unchanged, we can start the server directly." -msgstr "我们的示例包括一个*服务器*和两个*客户端*。在 FedBN 中,:code:`server.py` 保持不变,我们可以直接启动服务器。" +"``--volume ./certificates/:/app/certificates/:ro``: Mount the " +"``certificates`` directory in" +msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:62 +#: ../../source/docker/enable-tls.rst msgid "" -"Finally, we will revise our *client* logic by changing " -":code:`get_parameters` and :code:`set_parameters` in :code:`client.py`, " -"we will exclude batch normalization parameters from model parameter list " -"when sending to or receiving from the server." +"the current working directory of the host machine as a read-only volume " +"at the" msgstr "" -"最后,我们将修改 *client* 的逻辑,修改 :code:`client.py` 中的 :code:`get_parameters` 和 " -":code:`set_parameters`,在向服务器发送或从服务器接收时,我们将从模型参数列表中排除batch " -"normalization层的参数。" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:85 -msgid "Now, you can now open two additional terminal windows and run" -msgstr "现在,您可以打开另外两个终端窗口并运行程序" +#: ../../source/docker/enable-tls.rst +msgid "``/app/certificates`` directory inside the container." +msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:91 +#: ../../source/docker/enable-tls.rst msgid "" -"in each window (make sure that the server is still running before you do " -"so) and see your (previously centralized) PyTorch project run federated " -"learning with FedBN strategy across two clients. Congratulations!" -msgstr "确保服务器仍在运行后,然后您就能看到您的 PyTorch 项目(之前是集中式的)通过 FedBN 策略在两个客户端上运行联合学习。祝贺!" +"This allows the container to access the TLS certificates that are stored " +"in the certificates" +msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:94 -#: ../../source/example-jax-from-centralized-to-federated.rst:277 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:310 -#: ../../source/tutorial-quickstart-jax.rst:283 -msgid "Next Steps" -msgstr "下一步工作" +#: ../../source/docker/enable-tls.rst +msgid "directory." +msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:96 +#: ../../source/docker/enable-tls.rst +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"The full source code for this example can be found `here " -"`_. Our example is of course somewhat over-" -"simplified because both clients load the exact same dataset, which isn't " -"realistic. You're now prepared to explore this topic further. How about " -"using different subsets of CIFAR-10 on each client? How about adding more" -" clients?" +":substitution-code:`flwr/superlink:|stable_flwr_version|`: The name of " +"the image to be run and the specific" msgstr "" -"本示例的完整源代码可在 `_ " -"找到。当然,我们的示例有些过于简单,因为两个客户端都加载了完全相同的数据集,这并不真实。让我们准备好进一步探讨这一主题。如在每个客户端使用不同的 " -"CIFAR-10 子集,或者增加客户端的数量。" -#: ../../source/example-jax-from-centralized-to-federated.rst:2 -msgid "Example: JAX - Run JAX Federated" -msgstr "示例: JAX - 运行联邦式 JAX" - -#: ../../source/example-jax-from-centralized-to-federated.rst:4 -#: ../../source/tutorial-quickstart-jax.rst:10 +#: ../../source/docker/enable-tls.rst msgid "" -"This tutorial will show you how to use Flower to build a federated " -"version of an existing JAX workload. We are using JAX to train a linear " -"regression model on a scikit-learn dataset. We will structure the example" -" similar to our `PyTorch - From Centralized To Federated " -"`_ walkthrough. First, we build a centralized " -"training approach based on the `Linear Regression with JAX " -"`_" -" tutorial`. Then, we build upon the centralized training code to run the " -"training in a federated fashion." +"tag of the image. The tag :substitution-code:`|stable_flwr_version|` " +"represents a specific version of the image." msgstr "" -"本教程将向您展示如何使用 Flower 构建现有 JAX 的联邦学习版本。我们将使用 JAX 在 scikit-learn " -"数据集上训练线性回归模型。我们将采用与 `PyTorch - 从集中式到联邦式 " -"`_ 教程中类似的示例结构。首先,我们根据 `JAX 的线性回归 " -"`_" -" 教程构建集中式训练方法。然后,我们在集中式训练代码的基础上以联邦方式运行训练。" -#: ../../source/example-jax-from-centralized-to-federated.rst:10 -#: ../../source/tutorial-quickstart-jax.rst:16 +#: ../../source/docker/enable-tls.rst msgid "" -"Before we start building our JAX example, we need install the packages " -":code:`jax`, :code:`jaxlib`, :code:`scikit-learn`, and :code:`flwr`:" +"``--ssl-ca-certfile certificates/ca.crt``: Specify the location of the CA" +" certificate file" msgstr "" -"在开始构建 JAX 示例之前,我们需要安装软件包 :code:`jax`、:code:`jaxlib`、:code:`scikit-learn` " -"和 :code:`flwr`:" -#: ../../source/example-jax-from-centralized-to-federated.rst:18 -#: ../../source/tutorial-quickstart-jax.rst:24 -msgid "Linear Regression with JAX" -msgstr "使用 JAX 进行线性回归" +#: ../../source/docker/enable-tls.rst +#, fuzzy +msgid "inside the container." +msgstr "使用 VSCode Dev Containers 进行开发" -#: ../../source/example-jax-from-centralized-to-federated.rst:20 -#: ../../source/tutorial-quickstart-jax.rst:26 +#: ../../source/docker/enable-tls.rst msgid "" -"We begin with a brief description of the centralized training code based " -"on a :code:`Linear Regression` model. If you want a more in-depth " -"explanation of what's going on then have a look at the official `JAX " -"documentation `_." +"The ``certificates/ca.crt`` file is a certificate that is used to verify " +"the identity of the" msgstr "" -"首先,我们将简要介绍基于 :code:`Linear Regression` 模型的集中式训练代码。如果您想获得更深入的解释,请参阅官方的 " -"`JAX 文档 `_。" -#: ../../source/example-jax-from-centralized-to-federated.rst:23 -#: ../../source/tutorial-quickstart-jax.rst:29 +#: ../../source/docker/enable-tls.rst +#, fuzzy +msgid "SuperLink." +msgstr "flower-superlink" + +#: ../../source/docker/enable-tls.rst msgid "" -"Let's create a new file called :code:`jax_training.py` with all the " -"components required for a traditional (centralized) linear regression " -"training. First, the JAX packages :code:`jax` and :code:`jaxlib` need to " -"be imported. In addition, we need to import :code:`sklearn` since we use " -":code:`make_regression` for the dataset and :code:`train_test_split` to " -"split the dataset into a training and test set. You can see that we do " -"not yet import the :code:`flwr` package for federated learning. This will" -" be done later." +"``--ssl-certfile certificates/server.pem``: Specify the location of the " +"SuperLink's" msgstr "" -"让我们创建一个名为 :code:`jax_training.py` 的新文件,其中包含传统(集中式)线性回归训练所需的所有组件。首先,需要导入 " -"JAX 包 :code:`jax` 和 :code:`jaxlib`。此外,我们还需要导入 :code:`sklearn`,因为我们使用 " -":code:`make_regression` 创建数据集,并使用 :code:`train_test_split` " -"将数据集拆分成训练集和测试集。您可以看到,我们还没有导入用于联邦学习的 :code:`flwr` 软件包,这将在稍后完成。" -#: ../../source/example-jax-from-centralized-to-federated.rst:37 -#: ../../source/tutorial-quickstart-jax.rst:43 -msgid "" -"The :code:`load_data()` function loads the mentioned training and test " -"sets." -msgstr ":code:`load_data()` 函数会加载上述训练集和测试集。" +#: ../../source/docker/enable-tls.rst +msgid "TLS certificate file inside the container." +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:47 -#: ../../source/tutorial-quickstart-jax.rst:53 +#: ../../source/docker/enable-tls.rst msgid "" -"The model architecture (a very simple :code:`Linear Regression` model) is" -" defined in :code:`load_model()`." -msgstr "模型结构(一个非常简单的 :code:`Linear Regression` 线性回归模型)在 :code:`load_model()` 中定义。" +"The ``certificates/server.pem`` file is used to identify the SuperLink " +"and to encrypt the" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:59 -#: ../../source/tutorial-quickstart-jax.rst:65 -msgid "" -"We now need to define the training (function :code:`train()`), which " -"loops over the training set and measures the loss (function " -":code:`loss_fn()`) for each batch of training examples. The loss function" -" is separate since JAX takes derivatives with a :code:`grad()` function " -"(defined in the :code:`main()` function and called in :code:`train()`)." +#: ../../source/docker/enable-tls.rst +msgid "data that is transmitted over the network." msgstr "" -"现在,我们需要定义训练函数( :code:`train()`)。它循环遍历训练集,并计算每批训练数据的损失值(函数 " -":code:`loss_fn()`)。由于 JAX 使用 :code:`grad()` 函数提取导数(在 :code:`main()` " -"函数中定义,并在 :code:`train()` 中调用),因此损失函数是独立的。" -#: ../../source/example-jax-from-centralized-to-federated.rst:77 -#: ../../source/tutorial-quickstart-jax.rst:83 +#: ../../source/docker/enable-tls.rst msgid "" -"The evaluation of the model is defined in the function " -":code:`evaluation()`. The function takes all test examples and measures " -"the loss of the linear regression model." -msgstr "模型的评估在函数 :code:`evaluation()` 中定义。该函数获取所有测试数据,并计算线性回归模型的损失值。" +"``--ssl-keyfile certificates/server.key``: Specify the location of the " +"SuperLink's" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:88 -#: ../../source/tutorial-quickstart-jax.rst:94 +#: ../../source/docker/enable-tls.rst +msgid "TLS private key file inside the container." +msgstr "" + +#: ../../source/docker/enable-tls.rst msgid "" -"Having defined the data loading, model architecture, training, and " -"evaluation we can put everything together and train our model using JAX. " -"As already mentioned, the :code:`jax.grad()` function is defined in " -":code:`main()` and passed to :code:`train()`." +"The ``certificates/server.key`` file is used to decrypt the data that is " +"transmitted over" msgstr "" -"在定义了数据加载、模型架构、训练和评估之后,我们就可以把这些放在一起,使用 JAX " -"训练我们的模型了。如前所述,:code:`jax.grad()` 函数在 :code:`main()` 中定义,并传递给 " -":code:`train()`。" -#: ../../source/example-jax-from-centralized-to-federated.rst:105 -#: ../../source/tutorial-quickstart-jax.rst:111 -msgid "You can now run your (centralized) JAX linear regression workload:" -msgstr "现在您可以运行(集中式)JAX 线性回归工作了:" +#: ../../source/docker/enable-tls.rst +msgid "the network." +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:111 -#: ../../source/tutorial-quickstart-jax.rst:117 -msgid "" -"So far this should all look fairly familiar if you've used JAX before. " -"Let's take the next step and use what we've built to create a simple " -"federated learning system consisting of one server and two clients." -msgstr "到目前为止,如果你以前使用过 JAX,就会对这一切感到很熟悉。下一步,让我们利用已构建的代码创建一个简单的联邦学习系统(一个服务器和两个客户端)。" +#: ../../source/docker/enable-tls.rst:72 +#, fuzzy +msgid "SuperNode" +msgstr "flower-superlink" -#: ../../source/example-jax-from-centralized-to-federated.rst:115 -#: ../../source/tutorial-quickstart-jax.rst:121 -msgid "JAX meets Flower" -msgstr "JAX 结合 Flower" +#: ../../source/docker/enable-tls.rst:74 +#, fuzzy +msgid "" +"Assuming that the ``ca.crt`` certificate already exists locally, we can " +"use the flag ``--volume`` to mount the local certificate into the " +"container's ``/app/`` directory." +msgstr "" +"假设我们需要的所有文件都在本地的 ``certificates`` 目录中,我们可以使用标记 ``-v`` 将本地目录挂载到容器的 " +"``/app/`` 目录中。这样,服务器就可以访问容器内的文件。最后,我们使用 ``--certificates`` 标志将证书名称传递给服务器。" -#: ../../source/example-jax-from-centralized-to-federated.rst:117 -#: ../../source/tutorial-quickstart-jax.rst:123 +#: ../../source/docker/enable-tls.rst:79 msgid "" -"The concept of federating an existing workload is always the same and " -"easy to understand. We have to start a *server* and then use the code in " -":code:`jax_training.py` for the *clients* that are connected to the " -"*server*. The *server* sends model parameters to the clients. The " -"*clients* run the training and update the parameters. The updated " -"parameters are sent back to the *server*, which averages all received " -"parameter updates. This describes one round of the federated learning " -"process, and we repeat this for multiple rounds." +"If you're generating self-signed certificates and the ``ca.crt`` " +"certificate doesn't exist on the SuperNode, you can copy it over after " +"the generation step." +msgstr "" + +#: ../../source/docker/enable-tls.rst +msgid "``--volume ./ca.crt:/app/ca.crt/:ro``: Mount the ``ca.crt`` file from the" msgstr "" -"把现有工作联邦化的概念始终是相同的,也很容易理解。我们要启动一个*服务器*,然后对连接到*服务器*的*客户端*运行 " -":code:`jax_training.py`中的代码。*服务器*向客户端发送模型参数,*客户端*运行训练并更新参数。更新后的参数被发回*服务器*,然后服务器对所有收到的参数进行平均聚合。以上的描述构成了一轮联邦学习,我们将重复进行多轮学习。" -#: ../../source/example-jax-from-centralized-to-federated.rst:123 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:181 -#: ../../source/tutorial-quickstart-jax.rst:129 +#: ../../source/docker/enable-tls.rst msgid "" -"Our example consists of one *server* and two *clients*. Let's set up " -":code:`server.py` first. The *server* needs to import the Flower package " -":code:`flwr`. Next, we use the :code:`start_server` function to start a " -"server and tell it to perform three rounds of federated learning." +"current working directory of the host machine as a read-only volume at " +"the ``/app/ca.crt``" msgstr "" -"我们的示例包括一个*服务器*和两个*客户端*。让我们先设置 :code:`server.py`。*服务器*需要导入 Flower 软件包 " -":code:`flwr`。接下来,我们使用 :code:`start_server` 函数启动服务器,并让它执行三轮联邦学习。" -#: ../../source/example-jax-from-centralized-to-federated.rst:133 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:191 -#: ../../source/tutorial-quickstart-jax.rst:139 -msgid "We can already start the *server*:" -msgstr "我们已经可以启动*服务器*了:" +#: ../../source/docker/enable-tls.rst +#, fuzzy +msgid "directory inside the container." +msgstr "使用 VSCode Dev Containers 进行开发" -#: ../../source/example-jax-from-centralized-to-federated.rst:139 -#: ../../source/tutorial-quickstart-jax.rst:145 +#: ../../source/docker/enable-tls.rst msgid "" -"Finally, we will define our *client* logic in :code:`client.py` and build" -" upon the previously defined JAX training in :code:`jax_training.py`. Our" -" *client* needs to import :code:`flwr`, but also :code:`jax` and " -":code:`jaxlib` to update the parameters on our JAX model:" +":substitution-code:`flwr/supernode:|stable_flwr_version|`: The name of " +"the image to be run and the specific" msgstr "" -"最后,我们将在 :code:`client.py` 中定义我们的 *client* 逻辑,并以之前在 " -":code:`jax_training.py` 中定义的 JAX 训练为基础。我们的 *client* 需要导入 " -":code:`flwr`,还需要导入 :code:`jax` 和 :code:`jaxlib` 以更新 JAX 模型的参数:" -#: ../../source/example-jax-from-centralized-to-federated.rst:154 -#: ../../source/tutorial-quickstart-jax.rst:160 +#: ../../source/docker/enable-tls.rst msgid "" -"Implementing a Flower *client* basically means implementing a subclass of" -" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " -"Our implementation will be based on :code:`flwr.client.NumPyClient` and " -"we'll call it :code:`FlowerClient`. :code:`NumPyClient` is slightly " -"easier to implement than :code:`Client` if you use a framework with good " -"NumPy interoperability (like JAX) because it avoids some of the " -"boilerplate that would otherwise be necessary. :code:`FlowerClient` needs" -" to implement four methods, two methods for getting/setting model " -"parameters, one method for training the model, and one method for testing" -" the model:" +"``--root-certificates ca.crt``: This specifies the location of the CA " +"certificate file" msgstr "" -"实现一个 Flower *client*基本上意味着去实现一个 :code:`flwr.client.Client` 或 " -":code:`flwr.client.NumPyClient` 的子类。我们的代码实现将基于 " -":code:`flwr.client.NumPyClient`,并将其命名为 :code:`FlowerClient`。如果使用具有良好 " -"NumPy 互操作性的框架(如 JAX),:code:`NumPyClient` 比 " -":code:`Client`更容易实现,因为它避免了一些不必要的操作。:code:`FlowerClient` " -"需要实现四个方法,两个用于获取/设置模型参数,一个用于训练模型,一个用于测试模型:" -#: ../../source/example-jax-from-centralized-to-federated.rst:161 -#: ../../source/tutorial-quickstart-jax.rst:167 -msgid ":code:`set_parameters (optional)`" -msgstr ":code:`set_parameters (可选)`" +#: ../../source/docker/enable-tls.rst +msgid "The ``ca.crt`` file is used to verify the identity of the SuperLink." +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:160 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:219 -#: ../../source/tutorial-quickstart-jax.rst:166 -msgid "" -"set the model parameters on the local model that are received from the " -"server" -msgstr "在本地模型上设置从服务器接收的模型参数" +#: ../../source/docker/enable-tls.rst:105 +msgid "SuperExec" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:161 -#: ../../source/tutorial-quickstart-jax.rst:167 -msgid "transform parameters to NumPy :code:`ndarray`'s" -msgstr "将参数转换为 NumPy :code:`ndarray`格式" +#: ../../source/docker/enable-tls.rst:107 +msgid "" +"Assuming all files we need are in the local ``certificates`` directory " +"where the SuperExec will be executed from, we can use the flag " +"``--volume`` to mount the local directory into the ``/app/certificates/``" +" directory of the container:" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:162 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:220 -#: ../../source/tutorial-quickstart-jax.rst:168 +#: ../../source/docker/enable-tls.rst msgid "" -"loop over the list of model parameters received as NumPy " -":code:`ndarray`'s (think list of neural network layers)" -msgstr "循环遍历以 NumPy :code:`ndarray` 形式接收的模型参数列表(可以看作神经网络的列表)" +":substitution-code:`flwr/superexec:|stable_flwr_version|`: The name of " +"the image to be run and the specific" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:163 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:221 -#: ../../source/tutorial-quickstart-jax.rst:169 -#: ../../source/tutorial-quickstart-pytorch.rst:155 -#: ../../source/tutorial-quickstart-scikitlearn.rst:118 -msgid ":code:`get_parameters`" -msgstr ":code:`get_parameters`" +#: ../../source/docker/enable-tls.rst +msgid "SuperExec." +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:164 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:222 -#: ../../source/tutorial-quickstart-jax.rst:170 +#: ../../source/docker/enable-tls.rst msgid "" -"get the model parameters and return them as a list of NumPy " -":code:`ndarray`'s (which is what :code:`flwr.client.NumPyClient` expects)" +"``--ssl-certfile certificates/server.pem``: Specify the location of the " +"SuperExec's" msgstr "" -"获取模型参数,并以 NumPy :code:`ndarray`的列表形式返回(这正是 " -":code:`flwr.client.NumPyClient`所匹配的格式)" -#: ../../source/example-jax-from-centralized-to-federated.rst:167 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:225 -#: ../../source/tutorial-quickstart-jax.rst:173 -#: ../../source/tutorial-quickstart-pytorch.rst:161 -#: ../../source/tutorial-quickstart-scikitlearn.rst:125 -msgid ":code:`fit`" -msgstr ":code:`fit`" - -#: ../../source/example-jax-from-centralized-to-federated.rst:166 -#: ../../source/example-jax-from-centralized-to-federated.rst:170 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:224 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:228 -#: ../../source/tutorial-quickstart-jax.rst:172 -#: ../../source/tutorial-quickstart-jax.rst:176 +#: ../../source/docker/enable-tls.rst msgid "" -"update the parameters of the local model with the parameters received " -"from the server" -msgstr "用从服务器接收到的参数更新本地模型的参数" +"The ``certificates/server.pem`` file is used to identify the SuperExec " +"and to encrypt the" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:167 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:225 -#: ../../source/tutorial-quickstart-jax.rst:173 -msgid "train the model on the local training set" -msgstr "在本地训练集上训练模型" +#: ../../source/docker/enable-tls.rst +msgid "" +"``--ssl-keyfile certificates/server.key``: Specify the location of the " +"SuperExec's" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:168 -#: ../../source/tutorial-quickstart-jax.rst:174 -msgid "get the updated local model parameters and return them to the server" -msgstr "获取更新后的本地模型参数并返回服务器" +#: ../../source/docker/enable-tls.rst +msgid "" +"``--executor-config root-" +"certificates=\\\"certificates/superlink_ca.crt\\\"``: Specify the" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:172 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:230 -#: ../../source/tutorial-quickstart-jax.rst:178 -#: ../../source/tutorial-quickstart-pytorch.rst:164 -#: ../../source/tutorial-quickstart-scikitlearn.rst:128 -msgid ":code:`evaluate`" -msgstr ":code:`evaluate`" +#: ../../source/docker/enable-tls.rst +msgid "" +"location of the CA certificate file inside the container that the " +"SuperExec executor" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:171 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:229 -#: ../../source/tutorial-quickstart-jax.rst:177 -msgid "evaluate the updated model on the local test set" -msgstr "在本地测试集上评估更新后的模型" +#: ../../source/docker/enable-tls.rst +msgid "should use to verify the SuperLink's identity." +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:172 -#: ../../source/tutorial-quickstart-jax.rst:178 -msgid "return the local loss to the server" -msgstr "向服务器返回本地损失值" +#: ../../source/docker/index.rst:2 +#, fuzzy +msgid "Run Flower using Docker" +msgstr "使用 Docker 运行 Flower" -#: ../../source/example-jax-from-centralized-to-federated.rst:174 -#: ../../source/tutorial-quickstart-jax.rst:180 +#: ../../source/docker/index.rst:4 msgid "" -"The challenging part is to transform the JAX model parameters from " -":code:`DeviceArray` to :code:`NumPy ndarray` to make them compatible with" -" `NumPyClient`." +"Start your Flower journey with our pre-made Docker images on Docker Hub, " +"supporting ``amd64`` and ``arm64v8`` architectures." msgstr "" -"具有挑战性的部分是将 JAX 模型参数从 :code:`DeviceArray` 转换为 :code:`NumPy ndarray`,使其与 " -"`NumPyClient` 兼容。" -#: ../../source/example-jax-from-centralized-to-federated.rst:176 -#: ../../source/tutorial-quickstart-jax.rst:182 +#: ../../source/docker/index.rst:7 msgid "" -"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make" -" use of the functions :code:`train()` and :code:`evaluate()` previously " -"defined in :code:`jax_training.py`. So what we really do here is we tell " -"Flower through our :code:`NumPyClient` subclass which of our already " -"defined functions to call for training and evaluation. We included type " -"annotations to give you a better understanding of the data types that get" -" passed around." +"Our Quickstart guide walks you through containerizing a Flower project " +"and running it end to end using Docker." msgstr "" -"这两个 :code:`NumPyClient` 方法 :code:`fit` 和 :code:`evaluate` 使用了之前在 " -":code:`jax_training.py` 中定义的函数 :code:`train()` 和 " -":code:`evaluate()`。因此,我们在这里要做的就是通过 :code:`NumPyClient` 子类告知 Flower " -"在训练和评估时要调用哪些已定义的函数。我们加入了类型注解,以便让您更好地理解传递的数据类型。" -#: ../../source/example-jax-from-centralized-to-federated.rst:245 -#: ../../source/tutorial-quickstart-jax.rst:251 -msgid "Having defined the federation process, we can run it." -msgstr "定义了联邦进程后,我们就可以运行它了。" +#: ../../source/docker/index.rst:11 +#, fuzzy +msgid "Getting Started" +msgstr "开始" -#: ../../source/example-jax-from-centralized-to-federated.rst:268 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:301 -#: ../../source/tutorial-quickstart-jax.rst:274 -msgid "And that's it. You can now open two additional terminal windows and run" -msgstr "就是这样,现在你可以打开另外两个终端窗口,然后运行" +#: ../../source/docker/index.rst:19 +msgid "Running in Production" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:274 -#: ../../source/tutorial-quickstart-jax.rst:280 -msgid "" -"in each window (make sure that the server is still running before you do " -"so) and see your JAX project run federated learning across two clients. " -"Congratulations!" -msgstr "确保服务器仍在运行,然后在每个客户端窗口就能看到你的 JAX 项目在两个客户端上运行联邦学习了。祝贺!" +#: ../../source/docker/index.rst:28 +#, fuzzy +msgid "Advanced Options" +msgstr "高级安装选项" -#: ../../source/example-jax-from-centralized-to-federated.rst:279 -#: ../../source/tutorial-quickstart-jax.rst:285 +#: ../../source/docker/index.rst:40 +#, fuzzy +msgid "Run Flower using Docker Compose" +msgstr "使用 Docker 运行 Flower" + +#: ../../source/docker/persist-superlink-state.rst:2 +msgid "Persist the State of the SuperLink" +msgstr "" + +#: ../../source/docker/persist-superlink-state.rst:4 +#, fuzzy msgid "" -"The source code of this example was improved over time and can be found " -"here: `Quickstart JAX `_. Our example is somewhat over-simplified because both " -"clients load the same dataset." +"By default, the Flower SuperLink keeps its state in-memory. When using " +"the Docker flag ``--rm``, the state is not persisted between container " +"starts." msgstr "" -"此示例的源代码经过长期改进,可在此处找到: `Quickstart JAX " -"`_。我们的示例有些过于简单,因为两个客户端都加载了相同的数据集。" +"默认情况下,Flower 服务器会将状态保存在内存中。使用 Docker 标志 ``--rm`` " +"时,状态不会在容器启动之间持久化。下面我们将展示如何将状态保存到主机系统上的文件中。" -#: ../../source/example-jax-from-centralized-to-federated.rst:282 -#: ../../source/tutorial-quickstart-jax.rst:288 +#: ../../source/docker/persist-superlink-state.rst:7 msgid "" -"You're now prepared to explore this topic further. How about using a more" -" sophisticated model or using a different dataset? How about adding more " -"clients?" -msgstr "现在,您已准备好进行更深一步探索了。例如使用更复杂的模型或使用不同的数据集会如何?增加更多客户端会如何?" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:2 -msgid "Example: PyTorch - From Centralized To Federated" -msgstr "实例: PyTorch - 从集中式到联邦式" +"If you want to persist the state of the SuperLink on your host system, " +"all you need to do is specify a directory where you want to save the file" +" on your host system and a name for the database file." +msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:4 +#: ../../source/docker/persist-superlink-state.rst:11 msgid "" -"This tutorial will show you how to use Flower to build a federated " -"version of an existing machine learning workload. We are using PyTorch to" -" train a Convolutional Neural Network on the CIFAR-10 dataset. First, we " -"introduce this machine learning task with a centralized training approach" -" based on the `Deep Learning with PyTorch " -"`_ " -"tutorial. Then, we build upon the centralized training code to run the " -"training in a federated fashion." +"By default, the SuperLink container runs with a non-root user called " +"``app`` with the user ID ``49999``. It is recommended to create a new " +"directory and change the user ID of the directory to ``49999`` to ensure " +"the mounted directory has the proper permissions." msgstr "" -"本教程将向您展示如何使用 Flower 构建现有机器学习工作的联邦版本。我们使用 PyTorch 在 CIFAR-10 " -"数据集上训练一个卷积神经网络。首先,我们基于 \"Deep Learning with PyTorch " -"`_\"教程,采用集中式训练方法介绍了这项机器学习任务。然后,我们在集中式训练代码的基础上以联邦方式运行训练。" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:12 +#: ../../source/docker/persist-superlink-state.rst:21 +#, fuzzy msgid "" -"We begin with a brief description of the centralized CNN training code. " -"If you want a more in-depth explanation of what's going on then have a " -"look at the official `PyTorch tutorial " -"`_." +"In the example below, we create a new directory called ``state``, change " +"the user ID and tell Docker via the flag ``--volume`` to mount the local " +"``state`` directory into the ``/app/state`` directory of the container. " +"Lastly, we use the flag ``--database`` to specify the name of the " +"database file." msgstr "" -"我们首先简要介绍一下集中式 CNN 训练代码。如果您想获得更深入的解释,请参阅 PyTorch 官方教程`PyTorch tutorial " -"`_。" +"如果想在主机系统上持久保存服务器的状态,只需在主机系统上指定保存文件的路径和数据库文件的名称即可。在下面的示例中,我们通过标志 ``-v`` 告诉" +" Docker 将用户的主目录(主机上的 ``~/``)挂载到容器的 ``/app/`` 目录中。此外,我们使用标志 ``--database``" +" 来指定数据库文件的名称。" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:15 +#: ../../source/docker/persist-superlink-state.rst:36 +#, fuzzy msgid "" -"Let's create a new file called :code:`cifar.py` with all the components " -"required for a traditional (centralized) training on CIFAR-10. First, all" -" required packages (such as :code:`torch` and :code:`torchvision`) need " -"to be imported. You can see that we do not import any package for " -"federated learning. You can keep all these imports as they are even when " -"we add the federated learning components at a later point." +"As soon as the SuperLink starts, the file ``state.db`` is created in the " +"``state`` directory on your host system. If the file already exists, the " +"SuperLink tries to restore the state from the file. To start the " +"SuperLink with an empty database, ensure that there is no database called" +" ``state.db`` in the ``state`` directory (``rm state.db``) before you " +"execute the ``docker run`` command above." msgstr "" -"让我们创建一个名为 :code:`cifar.py` 的新文件,其中包含 CIFAR-10 " -"传统(集中)培训所需的所有组件。首先,需要导入所有必需的软件包(如 :code:`torch` 和 " -":code:`torchvision`)。您可以看到,我们没有导入任何用于联邦学习的软件包。即使在以后添加联邦学习组件时,也可以保留所有这些导入。" +"服务器一启动,就会在主机系统的用户主目录下创建文件 " +"``state.db``。如果该文件已经存在,服务器会尝试从该文件恢复状态。要以空数据库启动服务器,只需删除 ``state.db`` 文件即可。" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:32 +#: ../../source/docker/pin-version.rst:2 +#, fuzzy +msgid "Pin a Docker Image to a Specific Version" +msgstr "将 Docker 映像固定到特定版本" + +#: ../../source/docker/pin-version.rst:4 +#, fuzzy msgid "" -"As already mentioned we will use the CIFAR-10 dataset for this machine " -"learning workload. The model architecture (a very simple Convolutional " -"Neural Network) is defined in :code:`class Net()`." +"It may happen that we update the images behind the tags. Such updates " +"usually include security updates of system dependencies that should not " +"change the functionality of Flower. However, if you want to ensure that " +"you use a fixed version of the Docker image in your deployments, you can " +"`specify the digest " +"`_ of the image instead of the tag." msgstr "" -"如前所述,我们将使用 CIFAR-10 数据集进行机器学习。模型架构(一个非常简单的卷积神经网络)在 :code:`class Net()` " -"中定义。" +"我们可能会更新标签后面的图像。此类更新通常包括系统依赖项的安全更新,不会改变 Flower " +"的功能。不过,如果您想确保始终使用同一张图片,可以指定图片的哈希值而不是标签。" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:56 +#: ../../source/docker/pin-version.rst:14 +#, fuzzy msgid "" -"The :code:`load_data()` function loads the CIFAR-10 training and test " -"sets. The :code:`transform` normalized the data after loading." +"The following command returns the current image digest referenced by the " +":substitution-code:`superlink:|stable_flwr_version|` tag:" +msgstr "下面的命令将返回由 ``server:1.7.0-py3.11-ubuntu22.04`` 标记引用的当前图像哈希值:" + +#: ../../source/docker/pin-version.rst:23 +msgid "This will output" msgstr "" -":code:`load_data()` 函数加载 CIFAR-10 " -"训练集和测试集。加载数据后,:code:`transform`函数对数据进行了归一化处理。" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:74 -msgid "" -"We now need to define the training (function :code:`train()`) which loops" -" over the training set, measures the loss, backpropagates it, and then " -"takes one optimizer step for each batch of training examples." -msgstr "现在,我们需要定义训练函数(:code:`train()`),该函数在训练集上循环训练,计算损失值并反向传播,然后为每批训练数据在优化器上执行一个优化步骤。" +#: ../../source/docker/pin-version.rst:30 +#, fuzzy +msgid "Next, we can pin the digest when running a new SuperLink container:" +msgstr "接下来,我们可以在运行新服务器容器时将哈希值固定下来:" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:76 -msgid "" -"The evaluation of the model is defined in the function :code:`test()`. " -"The function loops over all test samples and measures the loss of the " -"model based on the test dataset." -msgstr "模型的评估在函数 :code:`test()` 中定义。该函数循环遍历所有测试样本,并计算测试数据集的模型损失值。" +#: ../../source/docker/run-as-root-user.rst:2 +msgid "Run with Root User Privileges" +msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:136 +#: ../../source/docker/run-as-root-user.rst:4 msgid "" -"Having defined the data loading, model architecture, training, and " -"evaluation we can put everything together and train our CNN on CIFAR-10." -msgstr "在确定了数据加载、模型架构、训练和评估之后,我们就可以将所有整合在一起,在 CIFAR-10 上训练我们的 CNN。" +"Flower Docker images, by default, run with a non-root user " +"(username/groupname: ``app``, UID/GID: ``49999``). Using root user is " +"**not recommended** unless it is necessary for specific tasks during the " +"build process." +msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:163 +#: ../../source/docker/run-as-root-user.rst:8 msgid "" -"So far, this should all look fairly familiar if you've used PyTorch " -"before. Let's take the next step and use what we've built to create a " -"simple federated learning system consisting of one server and two " -"clients." +"Always make sure to run the container as a non-root user in production to" +" maintain security best practices." msgstr "" -"到目前为止,如果你以前用过 " -"PyTorch,这一切看起来应该相当熟悉。让我们进行下一步,利用我们所构建的内容创建一个简单联邦学习系统(由一个服务器和两个客户端组成)。" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:169 -msgid "" -"The simple machine learning project discussed in the previous section " -"trains the model on a single dataset (CIFAR-10), we call this centralized" -" learning. This concept of centralized learning, as shown in the previous" -" section, is probably known to most of you, and many of you have used it " -"previously. Normally, if you'd want to run machine learning workloads in " -"a federated fashion, then you'd have to change most of your code and set " -"everything up from scratch. This can be a considerable effort." -msgstr "上一节讨论的简单机器学习项目在单一数据集(CIFAR-10)上训练模型,我们称之为集中学习。如上一节所示,集中学习的概念可能为大多数人所熟知,而且很多人以前都使用过。通常情况下,如果要以联邦方式运行机器学习工作,就必须更改大部分代码,并从头开始设置一切。这可能是一个相当大的工作量。" +#: ../../source/docker/run-as-root-user.rst:12 +msgid "Run a Container with Root User Privileges" +msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:173 +#: ../../source/docker/run-as-root-user.rst:14 msgid "" -"However, with Flower you can evolve your pre-existing code into a " -"federated learning setup without the need for a major rewrite." -msgstr "不过,有了 Flower,您可以轻松地将已有的代码转变成联邦学习的模式,无需进行大量重写。" +"Run the Docker image with the ``-u`` flag and specify ``root`` as the " +"username:" +msgstr "" + +#: ../../source/docker/run-as-root-user.rst:21 +msgid "This command will run the Docker container with root user privileges." +msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:175 +#: ../../source/docker/run-as-root-user.rst:24 +msgid "Run the Build Process with Root User Privileges" +msgstr "" + +#: ../../source/docker/run-as-root-user.rst:26 msgid "" -"The concept is easy to understand. We have to start a *server* and then " -"use the code in :code:`cifar.py` for the *clients* that are connected to " -"the *server*. The *server* sends model parameters to the clients. The " -"*clients* run the training and update the parameters. The updated " -"parameters are sent back to the *server* which averages all received " -"parameter updates. This describes one round of the federated learning " -"process and we repeat this for multiple rounds." +"If you want to switch to the root user during the build process of the " +"Docker image to install missing system dependencies, you can use the " +"``USER root`` directive within your Dockerfile." msgstr "" -"这个概念很容易理解。我们必须启动一个*服务器*,然后对连接到*服务器*的*客户端*使用 " -":code:`cifar.py`中的代码。*服务器*向客户端发送模型参数,*客户端*运行训练并更新参数。更新后的参数被发回*服务器*,然后会对所有收到的参数更新进行平均聚合。以上描述的是一轮联邦学习过程,我们将重复进行多轮学习。" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:197 +#: ../../source/docker/run-as-root-user.rst:30 +#, fuzzy +msgid "SuperNode Dockerfile" +msgstr "创建超级节点 Dockerfile" + +#: ../../source/docker/run-as-subprocess.rst:2 +#, fuzzy +msgid "Run ClientApp as a Subprocess" +msgstr "运行分类器和测试" + +#: ../../source/docker/run-as-subprocess.rst:4 msgid "" -"Finally, we will define our *client* logic in :code:`client.py` and build" -" upon the previously defined centralized training in :code:`cifar.py`. " -"Our *client* needs to import :code:`flwr`, but also :code:`torch` to " -"update the parameters on our PyTorch model:" +"In this mode, the ClientApp is executed as a subprocess within the " +"SuperNode Docker container, rather than running in a separate container. " +"This approach reduces the number of running containers, which can be " +"beneficial for environments with limited resources. However, it also " +"means that the ClientApp is no longer isolated from the SuperNode, which " +"may introduce additional security concerns." msgstr "" -"最后,我们将在 :code:`client.py` 中定义我们的 *client* 逻辑,并以之前在 :code:`cifar.py` " -"中定义的集中式训练为基础。我们的 *client* 不仅需要导入 :code:`flwr`,还需要导入 :code:`torch`,以更新 " -"PyTorch 模型的参数:" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:213 +#: ../../source/docker/run-as-subprocess.rst:13 msgid "" -"Implementing a Flower *client* basically means implementing a subclass of" -" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " -"Our implementation will be based on :code:`flwr.client.NumPyClient` and " -"we'll call it :code:`CifarClient`. :code:`NumPyClient` is slightly easier" -" to implement than :code:`Client` if you use a framework with good NumPy " -"interoperability (like PyTorch or TensorFlow/Keras) because it avoids " -"some of the boilerplate that would otherwise be necessary. " -":code:`CifarClient` needs to implement four methods, two methods for " -"getting/setting model parameters, one method for training the model, and " -"one method for testing the model:" +"Before running the ClientApp as a subprocess, ensure that the FAB " +"dependencies have been installed in the SuperNode images. This can be " +"done by extending the SuperNode image:" msgstr "" -"实现 Flower *client*基本上意味着实现 :code:`flwr.client.Client` 或 " -":code:`flwr.client.NumPyClient` 的子类。我们的代码实现将基于 " -":code:`flwr.client.NumPyClient`,并将其命名为 :code:`CifarClient`。如果使用具有良好 NumPy" -" 互操作性的框架(如 PyTorch 或 TensorFlow/Keras),:code:`NumPyClient`的实现比 " -":code:`Client`略微容易一些,因为它避免了一些不必要的操作。:code:`CifarClient` " -"需要实现四个方法,两个用于获取/设置模型参数,一个用于训练模型,一个用于测试模型:" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:219 -msgid ":code:`set_parameters`" -msgstr ":code:`set_parameters`" +#: ../../source/docker/run-as-subprocess.rst:17 +#, fuzzy +msgid "Dockerfile.supernode" +msgstr "Flower 服务器" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:226 -msgid "get the updated local model weights and return them to the server" -msgstr "获取更新后的本地模型参数并发送回服务器" +#: ../../source/docker/run-as-subprocess.rst:31 +#, fuzzy +msgid "" +"Next, build the SuperNode Docker image by running the following command " +"in the directory where Dockerfile is located:" +msgstr "接下来,我们在 Dockerfile 和 ClientApp 代码所在的目录下运行以下命令,构建 SuperNode Docker 映像。" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:230 -msgid "return the local loss and accuracy to the server" -msgstr "向服务器返回本地损失值和精确度" +#: ../../source/docker/run-as-subprocess.rst:39 +msgid "Run the ClientApp as a Subprocess" +msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:232 +#: ../../source/docker/run-as-subprocess.rst:41 msgid "" -"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make" -" use of the functions :code:`train()` and :code:`test()` previously " -"defined in :code:`cifar.py`. So what we really do here is we tell Flower " -"through our :code:`NumPyClient` subclass which of our already defined " -"functions to call for training and evaluation. We included type " -"annotations to give you a better understanding of the data types that get" -" passed around." +"Start the SuperNode with the flag ``--isolation subprocess``, which tells" +" the SuperNode to execute the ClientApp as a subprocess:" msgstr "" -"这两个 :code:`NumPyClient` 中的方法 :code:`fit` 和 :code:`evaluate` 使用了之前在 " -":code:`cifar.py` 中定义的函数 :code:`train()` 和 :code:`test()`。因此,我们在这里要做的就是通过 " -":code:`NumPyClient` 子类告知 Flower " -"在训练和评估时要调用哪些已定义的函数。我们加入了类型注解,以便让你更好地理解传递的数据类型。" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:280 -msgid "" -"All that's left to do it to define a function that loads both model and " -"data, creates a :code:`CifarClient`, and starts this client. You load " -"your data and model by using :code:`cifar.py`. Start :code:`CifarClient` " -"with the function :code:`fl.client.start_client()` by pointing it at the " -"same IP address we used in :code:`server.py`:" -msgstr "剩下的就是定义模型和数据加载函数了。创建一个:code:`CifarClient`类,并运行这个客服端。您将通过:code:`cifar.py`加载数据和模型。另外,通过:code:`fl.client.start_client()`函数来运行客户端:code:`CifarClient`,需要保证IP地址和:code:`server.py`中所使用的一致:" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:2 +#, fuzzy +msgid "Run Flower Quickstart Examples with Docker Compose" +msgstr "快速入门 iOS" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:307 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:4 msgid "" -"in each window (make sure that the server is running before you do so) " -"and see your (previously centralized) PyTorch project run federated " -"learning across two clients. Congratulations!" -msgstr "确保服务器正在运行后,您就能看到您的 PyTorch 项目(之前是集中式的)在两个客户端上运行联邦学习了。祝贺!" +"Flower provides a set of `quickstart examples " +"`_ to help you get " +"started with the framework. These examples are designed to demonstrate " +"the capabilities of Flower and by default run using the Simulation " +"Engine. This guide demonstrates how to run them using Flower's Deployment" +" Engine via Docker Compose." +msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:312 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:12 msgid "" -"The full source code for this example: `PyTorch: From Centralized To " -"Federated (Code) `_. Our example is, of course, " -"somewhat over-simplified because both clients load the exact same " -"dataset, which isn't realistic. You're now prepared to explore this topic" -" further. How about using different subsets of CIFAR-10 on each client? " -"How about adding more clients?" +"Some quickstart examples may have limitations or requirements that " +"prevent them from running on every environment. For more information, " +"please see Limitations_." msgstr "" -"本示例的完整源代码为:`PyTorch: 从集中式到联合式 " -"`_。当然,我们的示例有些过于简单,因为两个客户端都加载了完全相同的数据集,这并不真实。现在,您已经准备好进一步探讨这一主题了。比如在每个客户端使用不同的" -" CIFAR-10 子集会如何?增加更多客户端会如何?" -#: ../../source/explanation-differential-privacy.rst:2 -#: ../../source/explanation-differential-privacy.rst:11 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:303 -msgid "Differential Privacy" -msgstr "差分隐私" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:18 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:15 +#: ../../source/docker/tutorial-quickstart-docker.rst:13 +#, fuzzy +msgid "Before you start, make sure that:" +msgstr "开始之前,请确保 Docker 守护进程正在运行:" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:20 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:22 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:17 +#: ../../source/docker/tutorial-quickstart-docker.rst:15 +msgid "The ``flwr`` CLI is :doc:`installed <../how-to-install-flower>` locally." +msgstr "" -#: ../../source/explanation-differential-privacy.rst:3 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:21 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:18 +#: ../../source/docker/tutorial-quickstart-docker.rst:16 #, fuzzy -msgid "" -"The information in datasets like healthcare, financial transactions, user" -" preferences, etc., is valuable and has the potential for scientific " -"breakthroughs and provides important business insights. However, such " -"data is also sensitive and there is a risk of compromising individual " -"privacy." -msgstr "医疗保健、金融交易、用户偏好等数据集中的信息非常宝贵,有可能带来科学突破并提供重要的商业见解。然而,这些数据也是敏感数据,存在泄露个人隐私的风险。" +msgid "The Docker daemon is running." +msgstr "验证 Docker 守护进程是否正在运行。" -#: ../../source/explanation-differential-privacy.rst:6 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:22 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:19 +msgid "Docker Compose is `installed `_." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:25 #, fuzzy +msgid "Run the Quickstart Example" +msgstr "示例请求" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:27 msgid "" -"Traditional methods like anonymization alone would not work because of " -"attacks like Re-identification and Data Linkage. That's where " -"differential privacy comes in. It provides the possibility of analyzing " -"data while ensuring the privacy of individuals." -msgstr "单靠匿名等传统方法是行不通的,因为会受到重新识别和数据链接等攻击。这就是差异化隐私的用武之地。它提供了在分析数据的同时确保个人隐私的可能性。" +"Clone the quickstart example you like to run. For example, ``quickstart-" +"pytorch``:" +msgstr "" -#: ../../source/explanation-differential-privacy.rst:12 -#, fuzzy +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:35 msgid "" -"Imagine two datasets that are identical except for a single record (for " -"instance, Alice's data). Differential Privacy (DP) guarantees that any " -"analysis (M), like calculating the average income, will produce nearly " -"identical results for both datasets (O and O' would be similar). This " -"preserves group patterns while obscuring individual details, ensuring the" -" individual's information remains hidden in the crowd." +"Download the `compose.yml " +"`_" +" file into the example directory:" msgstr "" -"试想一下,两个数据集除了一条记录(例如 Alice " -"的数据)之外完全相同。差分隐私(DP)可以保证任何分析(M),比如计算平均收入,对两个数据集都会产生几乎相同的结果(O 和 O' " -"将是相似的)。这既保留了群体模式,又掩盖了个人细节,确保个人的信息隐藏在人群中。" -#: ../../source/explanation-differential-privacy.rst:-1 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:44 #, fuzzy -msgid "DP Intro" -msgstr "DP 介绍" +msgid "Build and start the services using the following command:" +msgstr "运行以下命令激活 virtualenv:" -#: ../../source/explanation-differential-privacy.rst:22 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:50 #, fuzzy msgid "" -"One of the most commonly used mechanisms to achieve DP is adding enough " -"noise to the output of the analysis to mask the contribution of each " -"individual in the data while preserving the overall accuracy of the " -"analysis." -msgstr "实现 DP 的最常用机制之一是在分析输出中加入足够的噪音,以掩盖数据中每个个体的贡献,同时保持分析的整体准确性。" +"Append the following lines to the end of the ``pyproject.toml`` file and " +"save it:" +msgstr "将 ``pyproject.toml`` 中的次要版本增加一个。" -#: ../../source/explanation-differential-privacy.rst:25 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:52 +#: ../../source/docker/tutorial-quickstart-docker.rst:324 #, fuzzy -msgid "Formal Definition" -msgstr "编译 ProtoBuf 定义" +msgid "pyproject.toml" +msgstr "或 ``pyproject.toml```:" -#: ../../source/explanation-differential-privacy.rst:26 -#, fuzzy +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:61 msgid "" -"Differential Privacy (DP) provides statistical guarantees against the " -"information an adversary can infer through the output of a randomized " -"algorithm. It provides an unconditional upper bound on the influence of a" -" single individual on the output of the algorithm by adding noise [1]. A " -"randomized mechanism M provides (:math:`\\epsilon`, " -":math:`\\delta`)-differential privacy if for any two neighboring " -"databases, D :sub:`1` and D :sub:`2`, that differ in only a single " -"record, and for all possible outputs S ⊆ Range(A):" +"You can customize the string that follows ``tool.flwr.federations.`` to " +"fit your needs. However, please note that the string cannot contain a dot" +" (``.``)." msgstr "" -"差分隐私(Differential " -"Privacy,DP)针对对手通过随机算法的输出所能推断出的信息提供统计保证。它为单个个体通过添加噪声对算法输出的影响提供了一个无条件的上限[1]。如果任意两个相邻的数据库D" -" :sub:`1`和D :sub:`2`只有一条记录不同,并且对于所有可能的输出S ⊆ " -"Range(A),随机化机制M提供(:math:`epsilon`,:math:`\\delta`)差异隐私:" -#: ../../source/explanation-differential-privacy.rst:32 -#, fuzzy +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:64 msgid "" -"\\small\n" -"P[M(D_{1} \\in A)] \\leq e^{\\delta} P[M(D_{2} \\in A)] + \\delta" +"In this example, ``local-deployment`` has been used. Just remember to " +"replace ``local-deployment`` with your chosen name in both the " +"``tool.flwr.federations.`` string and the corresponding ``flwr run .`` " +"command." msgstr "" -"\\small\n" -"P[M(D_{1} \\in A)] \\leq e^{\\delta} P[M(D_{2} \\in A)] + \\delta" -#: ../../source/explanation-differential-privacy.rst:38 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:68 #, fuzzy +msgid "Run the example:" +msgstr "将示例联邦化" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:74 +msgid "Follow the logs of the SuperExec service:" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:80 msgid "" -"The :math:`\\epsilon` parameter, also known as the privacy budget, is a " -"metric of privacy loss. It also controls the privacy-utility trade-off; " -"lower :math:`\\epsilon` values indicate higher levels of privacy but are " -"likely to reduce utility as well. The :math:`\\delta` parameter accounts " -"for a small probability on which the upper bound :math:`\\epsilon` does " -"not hold. The amount of noise needed to achieve differential privacy is " -"proportional to the sensitivity of the output, which measures the maximum" -" change in the output due to the inclusion or removal of a single record." +"That is all it takes! You can monitor the progress of the run through the" +" logs of the SuperExec." msgstr "" -":math:`\\epsilon`参数也称为隐私预算,是衡量隐私损失的指标。较低的 :math:`\\epsilon` " -"值表示较高的隐私级别,但也可能降低效用。:math:`\\delta`参数考虑了:math:`\\epsilon`上限不成立的小概率。实现差异化隐私所需的噪声量与输出的灵敏度成正比,而输出的灵敏度是指由于包含或删除一条记录而导致的输出的最大变化。" -#: ../../source/explanation-differential-privacy.rst:45 -#, fuzzy -msgid "Differential Privacy in Machine Learning" -msgstr "差分隐私" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:84 +msgid "Run a Different Quickstart Example" +msgstr "" -#: ../../source/explanation-differential-privacy.rst:46 -#, fuzzy +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:86 msgid "" -"DP can be utilized in machine learning to preserve the privacy of the " -"training data. Differentially private machine learning algorithms are " -"designed in a way to prevent the algorithm to learn any specific " -"information about any individual data points and subsequently prevent the" -" model from revealing sensitive information. Depending on the stage at " -"which noise is introduced, various methods exist for applying DP to " -"machine learning algorithms. One approach involves adding noise to the " -"training data (either to the features or labels), while another method " -"entails injecting noise into the gradients of the loss function during " -"model training. Additionally, such noise can be incorporated into the " -"model's output." +"To run a different quickstart example, such as ``quickstart-tensorflow``," +" first, shut down the Docker Compose services of the current example:" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:93 +msgid "After that, you can repeat the steps above." msgstr "" -"机器学习中可以利用 DP " -"来保护训练数据的隐私。差分保密机器学习算法的设计方式是防止算法学习到任何单个数据点的任何特定信息,从而防止模型泄露敏感信息。根据引入噪声的阶段,有多种方法可将" -" DP " -"应用于机器学习算法。一种方法是在训练数据(特征或标签)中添加噪声,另一种方法是在模型训练过程中向损失函数的梯度注入噪声。此外,这种噪声还可以被纳入模型的输出中。" -#: ../../source/explanation-differential-privacy.rst:53 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:96 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:102 #, fuzzy -msgid "Differential Privacy in Federated Learning" -msgstr "扩大联邦学习的规模" +msgid "Limitations" +msgstr "运行模拟" -#: ../../source/explanation-differential-privacy.rst:54 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:101 #, fuzzy -msgid "" -"Federated learning is a data minimization approach that allows multiple " -"parties to collaboratively train a model without sharing their raw data. " -"However, federated learning also introduces new privacy challenges. The " -"model updates between parties and the central server can leak information" -" about the local data. These leaks can be exploited by attacks such as " -"membership inference and property inference attacks, or model inversion " -"attacks." -msgstr "联合学习是一种数据最小化方法,允许多方在不共享原始数据的情况下合作训练一个模型。然而,联合学习也带来了新的隐私挑战。各方与中央服务器之间的模型更新可能会泄露本地数据信息。这些泄漏信息可能会被攻击利用,如成员推断攻击、属性推断攻击或模型反转攻击。" +msgid "Quickstart Example" +msgstr "快速入门 JAX" -#: ../../source/explanation-differential-privacy.rst:58 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:103 #, fuzzy -msgid "" -"DP can play a crucial role in federated learning to provide privacy for " -"the clients' data." -msgstr "DP 可以在联合学习中发挥重要作用,为客户数据提供隐私保护。" +msgid "quickstart-fastai" +msgstr "快速入门 fastai" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:104 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:106 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:115 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:117 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:121 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:123 +#: ../../source/ref-changelog.md:33 ../../source/ref-changelog.md:399 +#: ../../source/ref-changelog.md:676 ../../source/ref-changelog.md:740 +#: ../../source/ref-changelog.md:798 ../../source/ref-changelog.md:867 +#: ../../source/ref-changelog.md:929 +msgid "None" +msgstr "无" -#: ../../source/explanation-differential-privacy.rst:60 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:105 #, fuzzy -msgid "" -"Depending on the granularity of privacy provision or the location of " -"noise addition, different forms of DP exist in federated learning. In " -"this explainer, we focus on two approaches of DP utilization in federated" -" learning based on where the noise is added: at the server (also known as" -" the center) or at the client (also known as the local)." -msgstr "" -"根据提供隐私的粒度或添加噪声的位置,联合学习中存在不同形式的 DP。在本说明中,我们将根据添加噪声的位置,重点介绍联合学习中利用 DP " -"的两种方法:在服务器(也称为中心)或客户端(也称为本地)。" +msgid "quickstart-huggingface" +msgstr "快速入门教程" -#: ../../source/explanation-differential-privacy.rst:63 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:107 #, fuzzy -msgid "" -"**Central Differential Privacy**: DP is applied by the server and the " -"goal is to prevent the aggregated model from leaking information about " -"each client's data." -msgstr "**中央差分隐私**: DP 由服务器应用,目标是防止聚合模型泄露每个客户的数据信息。" +msgid "quickstart-jax" +msgstr "快速入门 JAX" -#: ../../source/explanation-differential-privacy.rst:65 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:108 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:110 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:125 #, fuzzy msgid "" -"**Local Differential Privacy**: DP is applied on the client side before " -"sending any information to the server and the goal is to prevent the " -"updates that are sent to the server from leaking any information about " -"the client's data." -msgstr "**本地差分隐私**: 在向服务器发送任何信息之前,在客户端应用 DP,目的是防止向服务器发送的更新泄露任何有关客户端数据的信息。" +"The example has not yet been updated to work with the latest ``flwr`` " +"version." +msgstr "涵盖 scikit-learn 和 PyTorch Lightning 的代码示例已更新,以便与最新版本的 Flower 配合使用。" -#: ../../source/explanation-differential-privacy.rst:-1 -#: ../../source/explanation-differential-privacy.rst:68 -#: ../../source/how-to-use-differential-privacy.rst:11 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:109 #, fuzzy -msgid "Central Differential Privacy" -msgstr "差分隐私" +msgid "quickstart-mlcube" +msgstr "快速入门 JAX" -#: ../../source/explanation-differential-privacy.rst:69 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:111 #, fuzzy -msgid "" -"In this approach, which is also known as user-level DP, the central " -"server is responsible for adding noise to the globally aggregated " -"parameters. It should be noted that trust in the server is required." -msgstr "在这种方法(也称为用户级 DP)中,中央服务器负责在全局汇总参数中添加噪声。需要注意的是,这需要对服务器的信任。" +msgid "quickstart-mlx" +msgstr "快速入门 JAX" -#: ../../source/explanation-differential-privacy.rst:76 -#, fuzzy +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:112 msgid "" -"While there are various ways to implement central DP in federated " -"learning, we concentrate on the algorithms proposed by [2] and [3]. The " -"overall approach is to clip the model updates sent by the clients and add" -" some amount of noise to the aggregated model. In each iteration, a " -"random set of clients is chosen with a specific probability for training." -" Each client performs local training on its own data. The update of each " -"client is then clipped by some value `S` (sensitivity `S`). This would " -"limit the impact of any individual client which is crucial for privacy " -"and often beneficial for robustness. A common approach to achieve this is" -" by restricting the `L2` norm of the clients' model updates, ensuring " -"that larger updates are scaled down to fit within the norm `S`." +"`Requires to run on macOS with Apple Silicon `_." msgstr "" -"虽然在联合学习中实现中央数据处理的方法有很多种,但我们将重点放在[2]和[3]提出的算法上。总体方法是剪辑客户端发送的模型更新,并在聚合模型中添加一定量的噪声。在每次迭代中,以特定概率随机选择一组客户端进行训练。每个客户端对自己的数据进行局部训练。然后,每个客户端的更新会被某个值`S`(灵敏度`S`)剪切。这将限制任何单个客户端的影响,这对隐私至关重要,通常也有利于稳健性。实现这一点的常用方法是限制客户机模型更新的" -" `L2` 准则,确保较大的更新被缩减以适应 `S` 准则。" -#: ../../source/explanation-differential-privacy.rst:-1 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:114 #, fuzzy -msgid "clipping" -msgstr "剪贴" +msgid "quickstart-monai" +msgstr "快速入门 JAX" -#: ../../source/explanation-differential-privacy.rst:89 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:116 #, fuzzy -msgid "" -"Afterwards, the Gaussian mechanism is used to add noise in order to " -"distort the sum of all clients' updates. The amount of noise is scaled to" -" the sensitivity value to obtain a privacy guarantee. The Gaussian " -"mechanism is used with a noise sampled from `N (0, σ²)` where `σ = ( " -"noise_scale * S ) / (number of sampled clients)`." -msgstr "" -"然后,使用高斯机制添加噪声,以扭曲所有客户端的更新总和。噪声量与灵敏度值成正比,以获得隐私保证。高斯机制的噪声采样范围为 `N (0, σ²)` " -",其中 σ = ( 噪声规模 * S ) / (采样客户数)`。" +msgid "quickstart-pandas" +msgstr "快速入门Pandas" -#: ../../source/explanation-differential-privacy.rst:94 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:118 #, fuzzy -msgid "Clipping" -msgstr "剪贴" +msgid "quickstart-pytorch-lightning" +msgstr "快速入门 PyTorch Lightning" -#: ../../source/explanation-differential-privacy.rst:96 -#, fuzzy +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:119 msgid "" -"There are two forms of clipping commonly used in Central DP: Fixed " -"Clipping and Adaptive Clipping." -msgstr "中央处理器常用的剪切有两种形式:固定剪切和自适应剪切。" +"Requires an older pip version that is not supported by the Flower Docker " +"images." +msgstr "" -#: ../../source/explanation-differential-privacy.rst:98 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:120 #, fuzzy -msgid "" -"**Fixed Clipping** : A predefined fix threshold is set for the magnitude " -"of clients' updates. Any update exceeding this threshold is clipped back " -"to the threshold value." -msgstr "** 固定削波** : 为客户端更新的大小设置了一个预定义的固定阈值。任何超过该阈值的更新都会被剪切回阈值。" +msgid "quickstart-pytorch" +msgstr "PyTorch快速入门" -#: ../../source/explanation-differential-privacy.rst:100 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:122 #, fuzzy -msgid "" -"**Adaptive Clipping** : The clipping threshold dynamically adjusts based " -"on the observed update distribution [4]. It means that the clipping value" -" is tuned during the rounds with respect to the quantile of the update " -"norm distribution." -msgstr "** 自适应削波** : 削波阈值根据观察到的更新分布动态调整[4]。这意味着,在各轮中,会根据更新规范分布的量化值调整削波值。" +msgid "quickstart-sklearn-tabular" +msgstr "scikit-learn快速入门" -#: ../../source/explanation-differential-privacy.rst:102 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:124 #, fuzzy -msgid "" -"The choice between fixed and adaptive clipping depends on various factors" -" such as privacy requirements, data distribution, model complexity, and " -"others." -msgstr "在固定剪切和自适应剪切之间做出选择取决于各种因素,如隐私要求、数据分布、模型复杂性等。" +msgid "quickstart-tabnet" +msgstr "快速入门 JAX" -#: ../../source/explanation-differential-privacy.rst:-1 -#: ../../source/explanation-differential-privacy.rst:105 -#: ../../source/how-to-use-differential-privacy.rst:96 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:126 #, fuzzy -msgid "Local Differential Privacy" -msgstr "差分隐私" +msgid "quickstart-tensorflow" +msgstr "快速入门 TensorFlow" -#: ../../source/explanation-differential-privacy.rst:107 -#, fuzzy -msgid "" -"In this approach, each client is responsible for performing DP. Local DP " -"avoids the need for a fully trusted aggregator, but it should be noted " -"that local DP leads to a decrease in accuracy but better privacy in " -"comparison to central DP." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:127 +msgid "Only runs on AMD64." msgstr "" -"在这种方法中,每个客户端都负责执行 DP。本地 DP 避免了对完全可信的聚合器的需求,但需要注意的是,与中央 DP 相比,本地 DP " -"会降低准确性,但却能更好地保护隐私。" -#: ../../source/explanation-differential-privacy.rst:116 +#: ../../source/docker/set-environment-variables.rst:2 #, fuzzy -msgid "In this explainer, we focus on two forms of achieving Local DP:" -msgstr "在本说明中,我们将重点介绍实现本地 DP 的两种形式:" +msgid "Set Environment Variables" +msgstr "设置编码环境" -#: ../../source/explanation-differential-privacy.rst:118 +#: ../../source/docker/set-environment-variables.rst:4 #, fuzzy msgid "" -"Each client adds noise to the local updates before sending them to the " -"server. To achieve (:math:`\\epsilon`, :math:`\\delta`)-DP, considering " -"the sensitivity of the local model to be ∆, Gaussian noise is applied " -"with a noise scale of σ where:" -msgstr "" -"每个客户端在向服务器发送本地更新之前,都会在本地更新中加入噪声。为了实现(:math:`\\epsilon`, " -":math:`\\delta`)-DP,考虑到本地模型的灵敏度为 ∆,应用了高斯噪声,噪声尺度为 σ,其中:" +"To set a variable inside a Docker container, you can use the ``-e " +"=`` flag. Multiple ``-e`` flags can be used to set multiple " +"environment variables for a container." +msgstr "要在 Docker 容器内设置变量,可以使用 ``-e =`` 标志。" -#: ../../source/explanation-differential-privacy.rst:120 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:2 #, fuzzy +msgid "Deploy Flower on Multiple Machines with Docker Compose" +msgstr "快速入门 iOS" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:4 msgid "" -"\\small\n" -"\\frac{∆ \\times \\sqrt{2 \\times " -"\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}\n" -"\n" +"This guide will help you set up a Flower project on multiple machines " +"using Docker Compose." msgstr "" -"\\small\n" -"\\frac{∆ \\times \\sqrt{2 \\times " -"\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}\n" -"\n" -#: ../../source/explanation-differential-privacy.rst:125 -#, fuzzy +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:7 msgid "" -"Each client adds noise to the gradients of the model during the local " -"training (DP-SGD). More specifically, in this approach, gradients are " -"clipped and an amount of calibrated noise is injected into the gradients." -msgstr "在局部训练过程中,每个客户端都会向模型的梯度添加噪声(DP-SGD)。更具体地说,在这种方法中,梯度会被剪切,并在梯度中注入一定量的校准噪声。" +"You will learn how to run the Flower client and server components on two " +"separate machines, with Flower configured to use TLS encryption and " +"persist SuperLink state across restarts. A server consists of a SuperLink" +" and ``SuperExec``. For more details about the Flower architecture, refer" +" to the :doc:`../explanation-flower-architecture` explainer page." +msgstr "" -#: ../../source/explanation-differential-privacy.rst:128 -#, fuzzy +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:13 msgid "" -"Please note that these two approaches are providing privacy at different " -"levels." -msgstr "请注意,这两种方法提供了不同层次的隐私。" +"This guide assumes you have completed the :doc:`tutorial-quickstart-" +"docker-compose` tutorial. It is highly recommended that you follow and " +"understand the contents of that tutorial before proceeding with this " +"guide." +msgstr "" -#: ../../source/explanation-differential-privacy.rst:131 -#, fuzzy -msgid "**References:**" -msgstr "参考资料" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:20 +msgid "Before you begin, make sure you have the following prerequisites:" +msgstr "" -#: ../../source/explanation-differential-privacy.rst:133 -#, fuzzy -msgid "[1] Dwork et al. The Algorithmic Foundations of Differential Privacy." -msgstr "[1] Dwork 等:《差分隐私的算法基础》。" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:23 +msgid "The Docker daemon is running on your local machine and the remote machine." +msgstr "" -#: ../../source/explanation-differential-privacy.rst:135 -#, fuzzy +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:24 msgid "" -"[2] McMahan et al. Learning Differentially Private Recurrent Language " -"Models." +"Docker Compose V2 is installed on both your local machine and the remote " +"machine." msgstr "" -"McMahan, H. Brendan等. \"Learning differentially private recurrent " -"language models.\" arXiv preprint arXiv:1710.06963 (2017)." -#: ../../source/explanation-differential-privacy.rst:137 -#, fuzzy -msgid "" -"[3] Geyer et al. Differentially Private Federated Learning: A Client " -"Level Perspective." -msgstr "[3] Geyer 等人。差异化化私人联合学习:客户层面的视角。" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:25 +msgid "You can connect to the remote machine from your local machine." +msgstr "" -#: ../../source/explanation-differential-privacy.rst:139 -#, fuzzy -msgid "[4] Galen et al. Differentially Private Learning with Adaptive Clipping." +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:26 +msgid "Ports ``9091`` and ``9093`` are accessible on the remote machine." msgstr "" -"Andrew, Galen等. \"Differentially private learning with adaptive " -"clipping.\" Advances in Neural Information Processing Systems 34 (2021): " -"17455-17466." -#: ../../source/explanation-federated-evaluation.rst:2 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:292 -msgid "Federated evaluation" -msgstr "联邦学习评估" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:30 +msgid "" +"The guide uses the |quickstart_sklearn_tabular|_ example as an example " +"project." +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:4 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:32 msgid "" -"There are two main approaches to evaluating models in federated learning " -"systems: centralized (or server-side) evaluation and federated (or " -"client-side) evaluation." -msgstr "评估联合学习系统中的模型主要有两种方法:集中(或服务器端)评估和联邦(或客户端)评估。" +"If your project has a different name or location, please remember to " +"adjust the commands/paths accordingly." +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:8 -msgid "Centralized Evaluation" -msgstr "集中评估" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:36 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:22 +#: ../../source/docker/tutorial-quickstart-docker.rst:19 +msgid "Step 1: Set Up" +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:11 -msgid "Built-In Strategies" -msgstr "内置策略" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:38 +msgid "Clone the Flower repository and change to the ``distributed`` directory:" +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:13 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:45 +msgid "Get the IP address from the remote machine and save it for later." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:46 msgid "" -"All built-in strategies support centralized evaluation by providing an " -"evaluation function during initialization. An evaluation function is any " -"function that can take the current global model parameters as input and " -"return evaluation results:" -msgstr "所有内置策略都通过在初始化过程中提供一个评估函数来支持集中评估。评估函数是任何可以将当前全局模型参数作为输入并返回评估结果的函数:" +"Use the ``certs.yml`` Compose file to generate your own self-signed " +"certificates. If you have certificates, you can continue with Step 2." +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:58 -msgid "Custom Strategies" -msgstr "定制策略" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:51 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:221 +msgid "These certificates should be used only for development purposes." +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:60 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:53 msgid "" -"The :code:`Strategy` abstraction provides a method called " -":code:`evaluate` that can directly be used to evaluate the current global" -" model parameters. The current server implementation calls " -":code:`evaluate` after parameter aggregation and before federated " -"evaluation (see next paragraph)." +"For production environments, you may have to use dedicated services to " +"obtain your certificates." msgstr "" -":code:`Strategy` 抽象提供了一个名为 :code:`evaluate` " -"的方法,可直接用于评估当前的全局模型参数。服务器会在参数聚合后和联邦评估前调用 :code:`evaluate`(见下段)。" -#: ../../source/explanation-federated-evaluation.rst:65 -msgid "Federated Evaluation" -msgstr "联邦评估" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:56 +msgid "" +"First, set the environment variables ``SUPERLINK_IP`` and " +"``SUPEREXEC_IP`` with the IP address from the remote machine. For " +"example, if the IP is ``192.168.2.33``, execute:" +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:68 -msgid "Implementing Federated Evaluation" -msgstr "实现联邦评估" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:65 +msgid "Next, generate the self-signed certificates:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:72 +msgid "Step 2: Copy the Server Compose Files" +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:70 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:74 msgid "" -"Client-side evaluation happens in the :code:`Client.evaluate` method and " -"can be configured from the server side." -msgstr "客户端评估在 :code:`Client.evaluate` 方法中进行,并可从服务器端进行配置。" +"Use the method that works best for you to copy the ``server`` directory, " +"the certificates, and your Flower project to the remote machine." +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:101 -msgid "Configuring Federated Evaluation" -msgstr "配置联邦评估" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:77 +msgid "For example, you can use ``scp`` to copy the directories:" +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:103 -msgid "" -"Federated evaluation can be configured from the server side. Built-in " -"strategies support the following arguments:" -msgstr "联邦评估可从服务器端进行配置。内置策略支持以下参数:" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:87 +#, fuzzy +msgid "Step 3: Start the Flower Server Components" +msgstr "然后,我们启动服务器:" -#: ../../source/explanation-federated-evaluation.rst:105 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:89 msgid "" -":code:`fraction_evaluate`: a :code:`float` defining the fraction of " -"clients that will be selected for evaluation. If " -":code:`fraction_evaluate` is set to :code:`0.1` and :code:`100` clients " -"are connected to the server, then :code:`10` will be randomly selected " -"for evaluation. If :code:`fraction_evaluate` is set to :code:`0.0`, " -"federated evaluation will be disabled." +"Log into the remote machine using ``ssh`` and run the following command " +"to start the SuperLink and SuperExec services:" msgstr "" -":code:`fraction_evaluate`: :code:`float`,定义了被选中进行评估的客户端的比例。如果 " -":code:`fraction_evaluate` 设置为 :code:`0.1`,并且 :code:`100` 个客户端连接到服务器,那么 " -":code:`10` 个客户端将被随机选中进行评估。如果 :code:`fraction_evaluate` 设置为 " -":code:`0.0`,联邦评估将被禁用。" -#: ../../source/explanation-federated-evaluation.rst:106 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:102 msgid "" -":code:`min_evaluate_clients`: an :code:`int`: the minimum number of " -"clients to be selected for evaluation. If :code:`fraction_evaluate` is " -"set to :code:`0.1`, :code:`min_evaluate_clients` is set to 20, and " -":code:`100` clients are connected to the server, then :code:`20` clients " -"will be selected for evaluation." +"The Path of the ``PROJECT_DIR`` should be relative to the location of the" +" ``server`` Docker Compose files." msgstr "" -":code:`min_evaluate_clients`:一个 :code:`int`,需要评估的客户的最小数量。如果 " -":code:`fraction_evaluate` 设置为 :code:`0.1`,:code:`min_evaluate_clients` " -"设置为 20,并且有 :code:`100` 个客户端已连接到服务器,那么 :code:`20` 个客户端将被选中进行评估。" -#: ../../source/explanation-federated-evaluation.rst:107 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:105 +msgid "Go back to your terminal on your local machine." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:108 +#, fuzzy +msgid "Step 4: Start the Flower Client Components" +msgstr "然后,我们启动服务器:" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:110 msgid "" -":code:`min_available_clients`: an :code:`int` that defines the minimum " -"number of clients which need to be connected to the server before a round" -" of federated evaluation can start. If fewer than " -":code:`min_available_clients` are connected to the server, the server " -"will wait until more clients are connected before it continues to sample " -"clients for evaluation." +"On your local machine, run the following command to start the client " +"components:" msgstr "" -":code:`min_available_clients`: " -":code:`int`,定义了在一轮联邦评估开始之前,需要连接到服务器的最小客户端数量。如果连接到服务器的客户端数量少于 " -":code:`min_available_clients`,服务器将等待更多客户端连接后,才继续采样客户端进行评估。" -#: ../../source/explanation-federated-evaluation.rst:108 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:120 msgid "" -":code:`on_evaluate_config_fn`: a function that returns a configuration " -"dictionary which will be sent to the selected clients. The function will " -"be called during each round and provides a convenient way to customize " -"client-side evaluation from the server side, for example, to configure " -"the number of validation steps performed." -msgstr "code:`on_evaluate_config_fn`:返回配置字典的函数,该字典将发送给选定的客户端。该函数将在每一轮中被调用,并提供了一种方便的方法来从服务器端自定义客户端评估,例如,配置执行的验证步骤数。" +"The Path of the ``PROJECT_DIR`` should be relative to the location of the" +" ``client`` Docker Compose files." +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:135 -msgid "Evaluating Local Model Updates During Training" -msgstr "评估训练期间的本地模型更新" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:124 +#, fuzzy +msgid "Step 5: Run Your Flower Project" +msgstr "Flower 服务器。" -#: ../../source/explanation-federated-evaluation.rst:137 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:126 msgid "" -"Model parameters can also be evaluated during training. " -":code:`Client.fit` can return arbitrary evaluation results as a " -"dictionary:" -msgstr "模型参数也可在训练过程中进行评估。 :code:`Client.fit`可以字典形式返回任意评估结果:" +"Specify the remote SuperExec IP addresses and the path to the root " +"certificate in the ``[tool.flwr.federations.remote-superexec]`` table in " +"the ``pyproject.toml`` file. Here, we have named our remote federation " +"``remote-superexec``:" +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:177 -msgid "Full Code Example" -msgstr "完整代码示例" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:130 +#, fuzzy +msgid "examples/quickstart-sklearn-tabular/pyproject.toml" +msgstr "scikit-learn快速入门" -#: ../../source/explanation-federated-evaluation.rst:179 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:139 msgid "" -"For a full code example that uses both centralized and federated " -"evaluation, see the *Advanced TensorFlow Example* (the same approach can " -"be applied to workloads implemented in any other framework): " -"https://github.com/adap/flower/tree/main/examples/advanced-tensorflow" +"The Path of the ``root-certificates`` should be relative to the location " +"of the ``pyproject.toml`` file." msgstr "" -"有关同时使用集中评估和联邦评估的完整代码示例,请参阅 *Advanced TensorFlow " -"Example*(同样的方法也可应用于任何其他框架中): " -"https://github.com/adap/flower/tree/main/examples/advanced-tensorflow" -#: ../../source/fed/0000-20200102-fed-template.md:10 -msgid "FED Template" -msgstr "FED 模板" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:142 +msgid "To run the project, execute:" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:12 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:12 -msgid "Table of Contents" -msgstr "目录" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:148 +msgid "" +"That's it! With these steps, you've set up Flower on two separate " +"machines and are ready to start using it." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:14 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:14 -msgid "[Table of Contents](#table-of-contents)" -msgstr "[目录](#table-of-contents)" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:152 +msgid "Step 6: Clean Up" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:15 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:15 -msgid "[Summary](#summary)" -msgstr "[总结](#summary)" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:154 +#, fuzzy +msgid "Shut down the Flower client components:" +msgstr "Flower 客户端。" -#: ../../source/fed/0000-20200102-fed-template.md:16 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:16 -msgid "[Motivation](#motivation)" -msgstr "[动机](#motivation)" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:161 +msgid "Shut down the Flower server components and delete the SuperLink state:" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:17 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:17 -msgid "[Goals](#goals)" -msgstr "[目标](#goals)" +#: ../../source/docker/tutorial-quickstart-docker.rst:2 +#, fuzzy +msgid "Quickstart with Docker" +msgstr "快速入门 iOS" -#: ../../source/fed/0000-20200102-fed-template.md:18 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:18 -msgid "[Non-Goals](#non-goals)" -msgstr "[非目标](#non-goals)" +#: ../../source/docker/tutorial-quickstart-docker.rst:4 +msgid "" +"This quickstart aims to guide you through the process of containerizing a" +" Flower project and running it end to end using Docker on your local " +"machine." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:19 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:19 -msgid "[Proposal](#proposal)" -msgstr "[计划](#proposal)" +#: ../../source/docker/tutorial-quickstart-docker.rst:7 +msgid "" +"This tutorial does not use production-ready settings, so you can focus on" +" understanding the basic workflow that uses the minimum configurations." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:20 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:23 -msgid "[Drawbacks](#drawbacks)" -msgstr "[缺点](#drawbacks)" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:32 +#: ../../source/docker/tutorial-quickstart-docker.rst:21 +msgid "Create a new Flower project (PyTorch):" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:21 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:24 -msgid "[Alternatives Considered](#alternatives-considered)" -msgstr "[备选方案](#alternatives-considered)" +#: ../../source/docker/tutorial-quickstart-docker.rst:39 +msgid "Create a new Docker bridge network called ``flwr-network``:" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:22 -msgid "[Appendix](#appendix)" -msgstr "[附录](#appendix)" +#: ../../source/docker/tutorial-quickstart-docker.rst:45 +msgid "" +"User-defined networks, such as ``flwr-network``, enable IP resolution of " +"container names, a feature absent in the default bridge network. This " +"simplifies quickstart example by avoiding the need to determine host IP " +"first." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:24 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:28 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:76 -msgid "Summary" -msgstr "总结" +#: ../../source/docker/tutorial-quickstart-docker.rst:50 +#, fuzzy +msgid "Step 2: Start the SuperLink" +msgstr "然后,我们启动服务器:" -#: ../../source/fed/0000-20200102-fed-template.md:26 -msgid "\\[TODO - sentence 1: summary of the problem\\]" -msgstr "\\[TODO - 句子 1: 问题概括\\]" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:62 +#: ../../source/docker/tutorial-quickstart-docker.rst:52 +#, fuzzy +msgid "Open your terminal and run:" +msgstr "打开另一台终端,启动第二个客户端:" -#: ../../source/fed/0000-20200102-fed-template.md:28 -msgid "\\[TODO - sentence 2: summary of the solution\\]" -msgstr "\\[TODO - 句子 2: 解决方案概括\\]" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "Understand the command" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:30 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:47 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:77 -msgid "Motivation" -msgstr "动机" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``-p 9091:9091 -p 9092:9092``: Map port ``9091`` and ``9092`` of the " +"container to the same port of" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:32 -#: ../../source/fed/0000-20200102-fed-template.md:36 -#: ../../source/fed/0000-20200102-fed-template.md:40 -#: ../../source/fed/0000-20200102-fed-template.md:44 -#: ../../source/fed/0000-20200102-fed-template.md:48 -#: ../../source/fed/0000-20200102-fed-template.md:54 -#: ../../source/fed/0000-20200102-fed-template.md:58 -msgid "\\[TODO\\]" -msgstr "\\[TODO\\]" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "the host machine, allowing other services to access the Driver API on" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:34 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:53 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:78 -msgid "Goals" -msgstr "目标" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``http://localhost:9091`` and the Fleet API on ``http://localhost:9092``." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:38 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:59 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:79 -msgid "Non-Goals" -msgstr "非目标" +#: ../../source/docker/tutorial-quickstart-docker.rst:71 +#: ../../source/docker/tutorial-quickstart-docker.rst:108 +#: ../../source/docker/tutorial-quickstart-docker.rst:219 +#: ../../source/docker/tutorial-quickstart-docker.rst:309 +msgid "" +"``--network flwr-network``: Make the container join the network named " +"``flwr-network``." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:42 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:65 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:80 -msgid "Proposal" -msgstr "提案" +#: ../../source/docker/tutorial-quickstart-docker.rst:72 +msgid "``--name superlink``: Assign the name ``superlink`` to the container." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:46 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:85 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:129 -msgid "Drawbacks" -msgstr "缺点" +#: ../../source/docker/tutorial-quickstart-docker.rst:73 +#: ../../source/docker/tutorial-quickstart-docker.rst:110 +#: ../../source/docker/tutorial-quickstart-docker.rst:220 +#: ../../source/docker/tutorial-quickstart-docker.rst:311 +msgid "" +"``--detach``: Run the container in the background, freeing up the " +"terminal." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:50 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:86 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:135 -msgid "Alternatives Considered" -msgstr "备选方案" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"tag of the image. The tag :substitution-code:`|stable_flwr_version|` " +"represents a :doc:`specific version ` of the image." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:52 -msgid "\\[Alternative 1\\]" -msgstr "\\[备选 1\\]" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--insecure``: This flag tells the container to operate in an insecure " +"mode, allowing" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:56 -msgid "\\[Alternative 2\\]" -msgstr "\\[备选 2\\]" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "unencrypted communication." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:10 -msgid "Flower Enhancement Doc" -msgstr "Flower 改善文档" +#: ../../source/docker/tutorial-quickstart-docker.rst:80 +#, fuzzy +msgid "Step 3: Start the SuperNode" +msgstr "然后,我们启动服务器:" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:20 -msgid "[Enhancement Doc Template](#enhancement-doc-template)" -msgstr "[增强文档模版](#enhancement-doc-template)" +#: ../../source/docker/tutorial-quickstart-docker.rst:82 +msgid "Start two SuperNode containers." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:21 -msgid "[Metadata](#metadata)" -msgstr "[描述数据](#metadata)" +#: ../../source/docker/tutorial-quickstart-docker.rst:84 +msgid "Start the first container:" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:22 -msgid "[Workflow](#workflow)" -msgstr "[工作流程](#workflow)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``-p 9094:9094``: Map port ``9094`` of the container to the same port of" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:25 -msgid "[GitHub Issues](#github-issues)" -msgstr "[GitHub 问题](#github-issues)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "the host machine, allowing other services to access the SuperNode API on" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:26 -msgid "[Google Docs](#google-docs)" -msgstr "[谷歌文档](#google-docs)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``http://localhost:9094``." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:30 -msgid "A Flower Enhancement is a standardized development process to" -msgstr "改善 Flower 功能是一个标准化的开发流程,目的是" +#: ../../source/docker/tutorial-quickstart-docker.rst:109 +msgid "``--name supernode-1``: Assign the name ``supernode-1`` to the container." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:32 -msgid "provide a common structure for proposing larger changes" -msgstr "为提出更大规模的改动提供一个共同的结构" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``flwr/supernode:|stable_flwr_version|``: This is the name of the image " +"to be run and the specific tag" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:33 -msgid "ensure that the motivation for a change is clear" -msgstr "确保改动的动机明确" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "of the image." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:34 -msgid "persist project information in a version control system" -msgstr "将项目信息保存在版本控制系统中" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--superlink superlink:9092``: Connect to the SuperLink's Fleet API at " +"the address" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:35 -msgid "document the motivation for impactful user-facing changes" -msgstr "记录面向用户的具有影响力的改动的动机" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``superlink:9092``." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:36 -msgid "reserve GitHub issues for tracking work in flight" -msgstr "保留 GitHub 问题,用于跟踪进行中的工作" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--node-config \"partition-id=0 num-partitions=2\"``: Set the partition " +"ID to ``0`` and the" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:37 +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "number of partitions to ``2`` for the SuperNode configuration." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"ensure community participants can successfully drive changes to " -"completion across one or more releases while stakeholders are adequately " -"represented throughout the process" -msgstr "确保社区参与者能够成功推动改动,完成一个或多个版本,同时利益相关者在整个过程中得到充分展现" +"``--supernode-address 0.0.0.0:9094``: Set the address and port number " +"that the SuperNode" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:39 -msgid "Hence, an Enhancement Doc combines aspects of" -msgstr "因此,\"增强文件\"将以下方面结合起来" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "is listening on." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:41 -msgid "a feature, and effort-tracking document" -msgstr "一个功能和效力跟踪文档" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--isolation process``: Tells the SuperNode that the ClientApp is " +"created by separate" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:42 -msgid "a product requirements document" -msgstr "一个产品需要文档" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "independent process. The SuperNode does not attempt to create it." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:43 -msgid "a design document" -msgstr "一个设计文档" +#: ../../source/docker/tutorial-quickstart-docker.rst:124 +#, fuzzy +msgid "Start the second container:" +msgstr "启动服务器" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:45 -msgid "" -"into one file, which is created incrementally in collaboration with the " -"community." -msgstr "该文件是与社区合作逐步创建的。" +#: ../../source/docker/tutorial-quickstart-docker.rst:142 +msgid "Step 4: Start the ClientApp" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:49 +#: ../../source/docker/tutorial-quickstart-docker.rst:144 msgid "" -"For far-fetching changes or features proposed to Flower, an abstraction " -"beyond a single GitHub issue or pull request is required to understand " -"and communicate upcoming changes to the project." +"The ClientApp Docker image comes with a pre-installed version of Flower " +"and serves as a base for building your own ClientApp image. In order to " +"install the FAB dependencies, you will need to create a Dockerfile that " +"extends the ClientApp image and installs the required dependencies." msgstr "" -"对于向 Flower 提出的远期变更或功能,需要一个超越单个 GitHub 问题或拉取请求(pull " -"request)的抽象概念,以了解和沟通项目即将发生的变更。" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:51 +#: ../../source/docker/tutorial-quickstart-docker.rst:149 msgid "" -"The purpose of this process is to reduce the amount of \"tribal " -"knowledge\" in our community. By moving decisions from Slack threads, " -"video calls, and hallway conversations into a well-tracked artifact, this" -" process aims to enhance communication and discoverability." +"Create a ClientApp Dockerfile called ``Dockerfile.clientapp`` and paste " +"the following code into it:" msgstr "" -"这一流程的目的是减少我们社区中 \"部落知识 \"的数量。通过将决策从 Slack " -"线程、视频通话和走廊对话转移到一个跟踪良好的工作环境中,该流程旨在加强沟通和可发现性。" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:55 -msgid "" -"Roughly any larger, user-facing enhancement should follow the Enhancement" -" process. If an enhancement would be described in either written or " -"verbal communication to anyone besides the author or developer, then " -"consider creating an Enhancement Doc." -msgstr "任何较大的、面向用户的增强都应遵循增强流程。如果要以书面或口头形式向作者或开发人员以外的任何人描述增强功能,则应考虑创建改善文档。" +#: ../../source/docker/tutorial-quickstart-docker.rst:152 +#, fuzzy +msgid "Dockerfile.clientapp" +msgstr "Flower 客户端。" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:57 -msgid "" -"Similarly, any technical effort (refactoring, major architectural change)" -" that will impact a large section of the development community should " -"also be communicated widely. The Enhancement process is suited for this " -"even if it will have zero impact on the typical user or operator." -msgstr "同样,任何会对开发社区的大部分人产生影响的技术工作(重构、重大架构变更)也应广泛传播。即使对典型用户或操作员的影响为零,改进流程也适用于这种情况。" +#: ../../source/docker/tutorial-quickstart-docker.rst +#, fuzzy +msgid "Understand the Dockerfile" +msgstr "创建超级节点 Dockerfile" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:61 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"For small changes and additions, going through the Enhancement process " -"would be time-consuming and unnecessary. This includes, for example, " -"adding new Federated Learning algorithms, as these only add features " -"without changing how Flower works or is used." +":substitution-code:`FROM flwr/clientapp:|stable_flwr_version|`: This line" +" specifies that the Docker image" msgstr "" -"对于小的改动和添加,通过 \"改善\"程序既耗时又没有必要。例如,这包括添加新的联邦学习算法,因为这只会增加功能,而不会改变 \"Flower " -"\"的工作或使用方式。" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:63 -msgid "" -"Enhancements are different from feature requests, as they are already " -"providing a laid-out path for implementation and are championed by " -"members of the community." -msgstr "增强功能与功能请求不同,因为它们已经提供了实施路径,并得到了社区成员的支持。" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:67 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"An Enhancement is captured in a Markdown file that follows a defined " -"template and a workflow to review and store enhancement docs for " -"reference — the Enhancement Doc." -msgstr "增强功能被记录在一个 Markdown 文件中,该文件遵循已定义的模板和工作流程,用于审查和存储增强功能文档(即增强功能文档)以供参考。" +"to be built from is the ``flwr/clientapp image``, version :substitution-" +"code:`|stable_flwr_version|`." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:69 -msgid "Enhancement Doc Template" -msgstr "增强文档模板" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``WORKDIR /app``: Set the working directory for the container to ``/app``." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:71 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Each enhancement doc is provided as a Markdown file having the following " -"structure" -msgstr "每个增强文档都以 Markdown 文件的形式提供,其结构如下" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:73 -msgid "Metadata (as [described below](#metadata) in form of a YAML preamble)" -msgstr "描述数据([如下所述](#metadata) 以 YAML 前言的形式出现)" +"Any subsequent commands that reference a directory will be relative to " +"this directory." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:74 -msgid "Title (same as in metadata)" -msgstr "标题(与描述数据中的标题相同)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``COPY pyproject.toml .``: Copy the ``pyproject.toml`` file" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:75 -msgid "Table of Contents (if needed)" -msgstr "目录(如有需要)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"from the current working directory into the container's ``/app`` " +"directory." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:81 -msgid "Notes/Constraints/Caveats (optional)" -msgstr "注意事项/限制/警告(可选)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``RUN sed -i 's/.*flwr\\[simulation\\].*//' pyproject.toml``: Remove the " +"``flwr`` dependency" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:82 -msgid "Design Details (optional)" -msgstr "设计细节(可选)" +#: ../../source/docker/tutorial-quickstart-docker.rst +#, fuzzy +msgid "from the ``pyproject.toml``." +msgstr "或 ``pyproject.toml```:" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:83 -msgid "Graduation Criteria" -msgstr "毕业标准" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``python -m pip install -U --no-cache-dir .``: Run the ``pip`` install " +"command to" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:84 -msgid "Upgrade/Downgrade Strategy (if applicable)" -msgstr "升级/降级策略(如适用)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "install the dependencies defined in the ``pyproject.toml`` file" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:88 -msgid "As a reference, this document follows the above structure." -msgstr "作为参考,本文件采用上述结构。" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"The ``-U`` flag indicates that any existing packages should be upgraded, " +"and" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:90 -#: ../../source/ref-api/flwr.common.Metadata.rst:2 -msgid "Metadata" -msgstr "描述数据" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--no-cache-dir`` prevents pip from using the cache to speed up the " +"installation." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:92 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"**fed-number** (Required) The `fed-number` of the last Flower Enhancement" -" Doc + 1. With this number, it becomes easy to reference other proposals." -msgstr "**fed-number**(必填)上一个Flower增强文件的 \"fed-number \"+1。有了这个编号,就很容易参考其他提案。" +"``ENTRYPOINT [\"flwr-clientapp\"]``: Set the command ``flwr-clientapp`` " +"to be" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:94 -msgid "**title** (Required) The title of the proposal in plain language." -msgstr "**标题** (必填)用简明语言写出提案的标题。" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "the default command run when the container is started." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:96 +#: ../../source/docker/tutorial-quickstart-docker.rst:186 msgid "" -"**status** (Required) The current status of the proposal. See " -"[workflow](#workflow) for the possible states." -msgstr "**status** (必填)提案的当前状态。有关可能的状态,请参阅 [工作流程](#workflow)。" +"Note that `flwr `__ is already installed " +"in the ``flwr/clientapp`` base image, so only other package dependencies " +"such as ``flwr-datasets``, ``torch``, etc., need to be installed. As a " +"result, the ``flwr`` dependency is removed from the ``pyproject.toml`` " +"after it has been copied into the Docker image (see line 5)." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:98 +#: ../../source/docker/tutorial-quickstart-docker.rst:192 +#, fuzzy msgid "" -"**authors** (Required) A list of authors of the proposal. This is simply " -"the GitHub ID." -msgstr "**作者**(必填) 提案的作者列表。这只是 GitHub ID。" +"Next, build the ClientApp Docker image by running the following command " +"in the directory where the Dockerfile is located:" +msgstr "接下来,我们在 Dockerfile 和 ServerApp 代码所在的目录下运行以下命令,构建 ServerApp Docker 镜像。" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:100 +#: ../../source/docker/tutorial-quickstart-docker.rst:201 +#, fuzzy msgid "" -"**creation-date** (Required) The date that the proposal was first " -"submitted in a PR." -msgstr "**创建日期**(必填) 建议书在 PR 中首次提交的日期。" +"The image name was set as ``flwr_clientapp`` with the tag ``0.0.1``. " +"Remember that these values are merely examples, and you can customize " +"them according to your requirements." +msgstr "我们给图片命名为 ``flwr_serverapp``,标签为 ``0.0.1``。请记住,这里选择的值只是一个示例。您可以根据自己的需要进行更改。" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:102 +#: ../../source/docker/tutorial-quickstart-docker.rst:205 +#, fuzzy +msgid "Start the first ClientApp container:" +msgstr "使用虚拟客户端引擎" + +#: ../../source/docker/tutorial-quickstart-docker.rst +#, fuzzy msgid "" -"**last-updated** (Optional) The date that the proposal was last changed " -"significantly." -msgstr "**最后更新** (可选)提案最后一次重大修改的日期。" +"``flwr_clientapp:0.0.1``: This is the name of the image to be run and the" +" specific tag" +msgstr "flwr_serverapp:0.0.1``: 要使用的 Docker 映像的名称和标记。" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:104 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"**see-also** (Optional) A list of other proposals that are relevant to " -"this one." -msgstr "**另见** (可选)与本提案相关的其他提案清单。" +"``--supernode supernode-1:9094``: Connect to the SuperNode's Fleet API at" +" the address" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:106 -msgid "**replaces** (Optional) A list of proposals that this one replaces." -msgstr "**取代**(可选) 这份提案所取代的提案列表。" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``supernode-1:9094``." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:108 -msgid "**superseded-by** (Optional) A list of proposals that this one supersedes." -msgstr "**被取代者** (可选) 此提案取代的提案列表。" +#: ../../source/docker/tutorial-quickstart-docker.rst:226 +msgid "Start the second ClientApp container:" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:111 -msgid "Workflow" -msgstr "工作流程" +#: ../../source/docker/tutorial-quickstart-docker.rst:237 +#, fuzzy +msgid "Step 5: Start the SuperExec" +msgstr "然后,我们启动服务器:" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:113 +#: ../../source/docker/tutorial-quickstart-docker.rst:239 +#, fuzzy msgid "" -"The idea forming the enhancement should already have been discussed or " -"pitched in the community. As such, it needs a champion, usually the " -"author, who shepherds the enhancement. This person also has to find " -"committers to Flower willing to review the proposal." -msgstr "形成增强功能的想法应该已经在社区中讨论过或提出过。因此,它需要一个支持者(通常是作者)来引导增强。这个人还必须找到愿意审核提案的提交者。" +"The procedure for building and running a SuperExec image is almost " +"identical to the ClientApp image." +msgstr "构建和运行 ServerApp 映像的程序与 SuperNode 映像几乎完全相同。" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:115 +#: ../../source/docker/tutorial-quickstart-docker.rst:242 msgid "" -"New enhancements are checked in with a file name in the form of `NNNN-" -"YYYYMMDD-enhancement-title.md`, with `NNNN` being the Flower Enhancement " -"Doc number, to `enhancements`. All enhancements start in `provisional` " -"state as part of a pull request. Discussions are done as part of the pull" -" request review." +"Similar to the ClientApp image, you will need to create a Dockerfile that" +" extends the SuperExec image and installs the required FAB dependencies." msgstr "" -"新的增强功能以 `NNNN-YYYYMMDD-enhancement-title.md` 的文件名签入,其中 `NNNN` " -"是花朵增强文档的编号,并将其转入 `enhancements`。作为拉取请求(pull request)的一部分,所有增强功能都从 " -"`provisional` 状态开始。讨论是作为拉取请求审查的一部分进行的。" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:117 +#: ../../source/docker/tutorial-quickstart-docker.rst:245 msgid "" -"Once an enhancement has been reviewed and approved, its status is changed" -" to `implementable`. The actual implementation is then done in separate " -"pull requests. These pull requests should mention the respective " -"enhancement as part of their description. After the implementation is " -"done, the proposal status is changed to `implemented`." +"Create a SuperExec Dockerfile called ``Dockerfile.superexec`` and paste " +"the following code in:" msgstr "" -"一旦增强功能通过审核和批准,其状态就会变为 " -"`可实施`。实际的实施工作将在单独的拉取请求中完成。这些拉取请求应在其描述中提及相应的增强功能。实施完成后,提案状态将更改为 `已实施`。" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:119 -msgid "" -"Under certain conditions, other states are possible. An Enhancement has " -"the following states:" -msgstr "在某些条件下,还可能出现其他状态。增强提案具有以下状态:" +#: ../../source/docker/tutorial-quickstart-docker.rst:248 +msgid "Dockerfile.superexec" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:121 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"`provisional`: The enhancement has been proposed and is actively being " -"defined. This is the starting state while the proposal is being fleshed " -"out and actively defined and discussed." -msgstr "`暂定`: 已提出改进建议并正在积极定义。这是在提案得到充实、积极定义和讨论时的起始状态。" +":substitution-code:`FROM flwr/superexec:|stable_flwr_version|`: This line" +" specifies that the Docker image" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:122 -msgid "`implementable`: The enhancement has been reviewed and approved." -msgstr "`可实施`: 增强功能已审核通过。" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"to be built from is the ``flwr/superexec image``, version :substitution-" +"code:`|stable_flwr_version|`." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:123 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"`implemented`: The enhancement has been implemented and is no longer " -"actively changed." -msgstr "`已实施`: 增强功能已实施,不再主动更改。" +"``ENTRYPOINT [\"flower-superexec\"``: Set the command ``flower-" +"superexec`` to be" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:124 -msgid "`deferred`: The enhancement is proposed but not actively being worked on." -msgstr "`推迟`: 已提出改进建议,但尚未积极开展工作。" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``\"--executor\", \"flwr.superexec.deployment:executor\"]`` Use the" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:125 +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``flwr.superexec.deployment:executor`` executor to run the ServerApps." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst:283 msgid "" -"`rejected`: The authors and reviewers have decided that this enhancement " -"is not moving forward." -msgstr "`拒绝`: 作者和审稿人已决定不再推进该增强功能。" +"Afterward, in the directory that holds the Dockerfile, execute this " +"Docker command to build the SuperExec image:" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:126 -msgid "`withdrawn`: The authors have withdrawn the enhancement." -msgstr "`撤回`: 作者已撤回增强功能。" +#: ../../source/docker/tutorial-quickstart-docker.rst:290 +#, fuzzy +msgid "Start the SuperExec container:" +msgstr "启动服务器" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:127 -msgid "`replaced`: The enhancement has been replaced by a new enhancement." -msgstr "`已替换`: 增强功能已被新的增强功能取代。" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``-p 9093:9093``: Map port ``9093`` of the container to the same port of" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:131 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Adding an additional process to the ones already provided by GitHub " -"(Issues and Pull Requests) adds more complexity and can be a barrier for " -"potential first-time contributors." -msgstr "在 GitHub 已提供的流程(问题和拉取请求)之外再增加一个流程,会增加复杂性,并可能成为潜在首次贡献者的障碍。" +"the host machine, allowing you to access the SuperExec API on " +"``http://localhost:9093``." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:133 -msgid "" -"Expanding the proposal template beyond the single-sentence description " -"currently required in the features issue template may be a heavy burden " -"for non-native English speakers." -msgstr "对于英语非母语者来说,将提案模板扩展到目前要求的单句描述之外可能是一个沉重的负担。" +#: ../../source/docker/tutorial-quickstart-docker.rst:310 +msgid "``--name superexec``: Assign the name ``superexec`` to the container." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:137 -msgid "GitHub Issues" -msgstr "GitHub 问题" +#: ../../source/docker/tutorial-quickstart-docker.rst +#, fuzzy +msgid "" +"``flwr_superexec:0.0.1``: This is the name of the image to be run and the" +" specific tag" +msgstr "flwr_supernode:0.0.1``: 要使用的 Docker 映像的名称和标记。" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:139 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Using GitHub Issues for these kinds of enhancements is doable. One could " -"use, for example, tags, to differentiate and filter them from other " -"issues. The main issue is in discussing and reviewing an enhancement: " -"GitHub issues only have a single thread for comments. Enhancements " -"usually have multiple threads of discussion at the same time for various " -"parts of the doc. Managing these multiple discussions can be confusing " -"when using GitHub Issues." +"``--executor-config superlink=\\\"superlink:9091\\\"``: Configure the " +"SuperExec executor to" msgstr "" -"使用 GitHub Issues 进行此类改进是可行的。例如,我们可以使用标签来区分和过滤这些问题。主要的问题在于讨论和审查增强功能: " -"GitHub 问题只有一个评论线程。而增强功能通常会同时有多个讨论线程,针对文档的不同部分。在使用 GitHub " -"问题时,管理这些多重讨论会很混乱。" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:141 -msgid "Google Docs" -msgstr "谷歌文档" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "connect to the SuperLink running on port ``9091``." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:143 -msgid "" -"Google Docs allow for multiple threads of discussions. But as Google Docs" -" are hosted outside the project, their discoverability by the community " -"needs to be taken care of. A list of links to all proposals has to be " -"managed and made available for the community. Compared to shipping " -"proposals as part of Flower's repository, the potential for missing links" -" is much higher." +#: ../../source/docker/tutorial-quickstart-docker.rst:320 +msgid "Step 6: Run the Quickstart Project" msgstr "" -"谷歌文档允许多线程讨论。但是,由于谷歌文档是在项目之外托管的,因此需要注意它们是否能被社区发现。我们必须管理所有提案的链接列表,并提供给社区使用。与作为" -" Flower 资源库一部分的提案相比,丢失链接的可能性要大得多。" -#: ../../source/fed/index.md:1 -msgid "FED - Flower Enhancement Doc" -msgstr "FED - Flower 增强文件" +#: ../../source/docker/tutorial-quickstart-docker.rst:322 +#, fuzzy +msgid "Add the following lines to the ``pyproject.toml``:" +msgstr "将 ``pyproject.toml`` 中的次要版本增加一个。" -#: ../../source/how-to-aggregate-evaluation-results.rst:2 -msgid "Aggregate evaluation results" -msgstr "整合评估结果" +#: ../../source/docker/tutorial-quickstart-docker.rst:331 +msgid "Run the ``quickstart-docker`` project by executing the command:" +msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:4 -msgid "" -"The Flower server does not prescribe a way to aggregate evaluation " -"results, but it enables the user to fully customize result aggregation." -msgstr "Flower 服务器没有规定整合评估结果的方法,但用户可以完全自定义如何整合。" +#: ../../source/docker/tutorial-quickstart-docker.rst:337 +msgid "Follow the SuperExec logs to track the execution of the run:" +msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:8 -msgid "Aggregate Custom Evaluation Results" -msgstr "自定义整合评估结果" +#: ../../source/docker/tutorial-quickstart-docker.rst:344 +#, fuzzy +msgid "Step 7: Update the Application" +msgstr "步骤 3:自定义序列化" -#: ../../source/how-to-aggregate-evaluation-results.rst:10 +#: ../../source/docker/tutorial-quickstart-docker.rst:346 msgid "" -"The same :code:`Strategy`-customization approach can be used to aggregate" -" custom evaluation results coming from individual clients. Clients can " -"return custom metrics to the server by returning a dictionary:" -msgstr "同样的 :code:`Strategy` 定制方法也可用于汇总来自单个客户端的自定义评估结果。客户端可以通过返回字典的方式向服务器返回自定义指标:" +"Change the application code. For example, change the ``seed`` in " +"``quickstart_docker/task.py`` to ``43`` and save it:" +msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:36 -msgid "" -"The server can then use a customized strategy to aggregate the metrics " -"provided in these dictionaries:" -msgstr "然后,服务器可以使用定制的策略来汇总这些字典中提供的指标:" +#: ../../source/docker/tutorial-quickstart-docker.rst:349 +#, fuzzy +msgid "quickstart_docker/task.py" +msgstr "快速入门Pandas" -#: ../../source/how-to-authenticate-supernodes.rst:2 -msgid "Authenticate SuperNodes" +#: ../../source/docker/tutorial-quickstart-docker.rst:356 +#, fuzzy +msgid "Stop the current ClientApp containers:" +msgstr "当前客户端属性。" + +#: ../../source/docker/tutorial-quickstart-docker.rst:362 +#, fuzzy +msgid "Rebuild the FAB and ClientApp image:" +msgstr "加载数据" + +#: ../../source/docker/tutorial-quickstart-docker.rst:368 +msgid "Launch two new ClientApp containers based on the newly built image:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:4 -msgid "" -"Flower has built-in support for authenticated SuperNodes that you can use" -" to verify the identities of each SuperNode connecting to a SuperLink. " -"Flower node authentication works similar to how GitHub SSH authentication" -" works:" +#: ../../source/docker/tutorial-quickstart-docker.rst:383 +msgid "Run the updated project:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:7 -msgid "SuperLink (server) stores a list of known (client) node public keys" +#: ../../source/docker/tutorial-quickstart-docker.rst:390 +msgid "Step 8: Clean Up" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:8 -msgid "" -"Using ECDH, both SuperNode and SuperLink independently derive a shared " -"secret" +#: ../../source/docker/tutorial-quickstart-docker.rst:392 +msgid "Remove the containers and the bridge network:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:9 -msgid "" -"Shared secret is used to compute the HMAC value of the message sent from " -"SuperNode to SuperLink as a token" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:408 +#: ../../source/docker/tutorial-quickstart-docker.rst:404 +#, fuzzy +msgid "Where to Go Next" +msgstr "从哪里开始" + +#: ../../source/docker/tutorial-quickstart-docker.rst:406 +msgid ":doc:`enable-tls`" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:10 -msgid "SuperLink verifies the token" +#: ../../source/docker/tutorial-quickstart-docker.rst:407 +msgid ":doc:`persist-superlink-state`" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:12 +#: ../../source/docker/tutorial-quickstart-docker.rst:408 +msgid ":doc:`tutorial-quickstart-docker-compose`" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:2 #, fuzzy +msgid "Quickstart with Docker Compose" +msgstr "快速入门 iOS" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:4 msgid "" -"We recommend you to check out the complete `code example " -"`_ demonstrating federated learning with Flower in an " -"authenticated setting." +"This quickstart shows you how to set up Flower using Docker Compose in a " +"single command, allowing you to focus on developing your application " +"without worrying about the underlying infrastructure." msgstr "" -"请参阅`完整代码示例 " -"`_了解更多信息。" -#: ../../source/how-to-authenticate-supernodes.rst:15 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:8 msgid "" -"This guide covers a preview feature that might change in future versions " -"of Flower." +"You will also learn how to easily enable TLS encryption and persist " +"application state locally, giving you the freedom to choose the " +"configuration that best suits your project's needs." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:24 +msgid "Clone the Docker Compose ``complete`` directory:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:18 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:38 msgid "" -"For increased security, node authentication can only be used when " -"encrypted connections (SSL/TLS) are enabled." +"Export the path of the newly created project. The path should be relative" +" to the location of the Docker Compose files:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:21 -msgid "Enable node authentication in :code:`SuperLink`" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:45 +msgid "" +"Setting the ``PROJECT_DIR`` helps Docker Compose locate the " +"``pyproject.toml`` file, allowing it to install dependencies in the " +"SuperExec and SuperNode images correctly." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:23 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:49 +#, fuzzy +msgid "Step 2: Run Flower in Insecure Mode" +msgstr "Flower 服务器。" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:51 msgid "" -"To enable node authentication, first you need to configure SSL/TLS " -"connections to secure the SuperLink<>SuperNode communication. You can " -"find the complete guide `here `_. After configuring secure connections, you" -" can enable client authentication in a long-running Flower " -":code:`SuperLink`. Use the following terminal command to start a Flower " -":code:`SuperNode` that has both secure connections and node " -"authentication enabled:" +"To begin, start Flower with the most basic configuration. In this setup, " +"Flower will run without TLS and without persisting the state." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:36 -msgid "Let's break down the authentication flags:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:56 +msgid "" +"Without TLS, the data sent between the services remains **unencrypted**. " +"Use it only for development purposes." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:38 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:59 msgid "" -"The first flag :code:`--auth-list-public-keys` expects a path to a CSV " -"file storing all known node public keys. You need to store all known node" -" public keys that are allowed to participate in a federation in one CSV " -"file (:code:`.csv`)." +"For production-oriented use cases, :ref:`enable TLS` for secure data" +" transmission." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:40 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:70 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:184 +#, fuzzy +msgid "``docker compose``: The Docker command to run the Docker Compose tool." +msgstr "`docker run``: 这是运行新 Docker 容器的命令。" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:71 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:185 msgid "" -"A valid CSV file storing known node public keys should list the keys in " -"OpenSSH format, separated by commas and without any comments. For an " -"example, refer to our code sample, which contains a CSV file with two " -"known node public keys." +"``-f compose.yml``: Specify the YAML file that contains the basic Flower " +"service definitions." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:42 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:72 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:190 msgid "" -"The second and third flags :code:`--auth-superlink-private-key` and :code" -":`--auth-superlink-public-key` expect paths to the server's private and " -"public keys. For development purposes, you can generate a private and " -"public key pair using :code:`ssh-keygen -t ecdsa -b 384`." +"``--build``: Rebuild the images for each service if they don't already " +"exist." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:45 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:73 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:191 msgid "" -"In Flower 1.9, there is no support for dynamically removing, editing, or " -"adding known node public keys to the SuperLink. To change the set of " -"known nodes, you need to shut the server down, edit the CSV file, and " -"start the server again. Support for dynamically changing the set of known" -" nodes is on the roadmap to be released in Flower 1.10 (ETA: June)." +"``-d``: Detach the containers from the terminal and run them in the " +"background." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:51 -msgid "Enable node authentication in :code:`SuperNode`" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:76 +msgid "Step 3: Run the Quickstart Project" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:53 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:78 msgid "" -"Similar to the long-running Flower server (:code:`SuperLink`), you can " -"easily enable node authentication in the long-running Flower client " -"(:code:`SuperNode`). Use the following terminal command to start an " -"authenticated :code:`SuperNode`:" +"Now that the Flower services have been started via Docker Compose, it is " +"time to run the quickstart example." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:64 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:81 msgid "" -"The :code:`--auth-supernode-private-key` flag expects a path to the " -"node's private key file and the :code:`--auth-supernode-public-key` flag " -"expects a path to the node's public key file. For development purposes, " -"you can generate a private and public key pair using :code:`ssh-keygen -t" -" ecdsa -b 384`." +"To ensure the ``flwr`` CLI connects to the SuperExec, you need to specify" +" the SuperExec addresses in the ``pyproject.toml`` file." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:68 -msgid "Security notice" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:84 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:232 +msgid "Add the following lines to the ``quickstart-compose/pyproject.toml``:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:70 -msgid "" -"The system's security relies on the credentials of the SuperLink and each" -" SuperNode. Therefore, it is imperative to safeguard and safely store the" -" credentials to avoid security risks such as Public Key Infrastructure " -"(PKI) impersonation attacks. The node authentication mechanism also " -"involves human interaction, so please ensure that all of the " -"communication is done in a secure manner, using trusted communication " -"methods." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:86 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:234 +msgid "quickstart-compose/pyproject.toml" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:75 -#: ../../source/how-to-enable-ssl-connections.rst:65 -#: ../../source/how-to-use-built-in-mods.rst:85 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:287 -msgid "Conclusion" -msgstr "总结" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:93 +msgid "Execute the command to run the quickstart example:" +msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:77 -msgid "" -"You should now have learned how to start a long-running Flower server " -"(:code:`SuperLink`) and client (:code:`SuperNode`) with node " -"authentication enabled. You should also know the significance of the " -"private key and store it safely to minimize security risks." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:99 +msgid "Monitor the SuperExec logs and wait for the summary to appear:" msgstr "" -#: ../../source/how-to-configure-clients.rst:2 -msgid "Configure clients" -msgstr "配置客户端" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:106 +#, fuzzy +msgid "Step 4: Update the Application" +msgstr "步骤 3:自定义序列化" -#: ../../source/how-to-configure-clients.rst:4 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:108 +msgid "In the next step, change the application code." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:110 msgid "" -"Along with model parameters, Flower can send configuration values to " -"clients. Configuration values can be used for various purposes. They are," -" for example, a popular way to control client-side hyperparameters from " -"the server." -msgstr "除了模型参数,Flower 还可以向客户端发送配置值。配置值有多种用途。它们是一种从服务器控制客户端超参数的常用方法。" +"For example, go to the ``task.py`` file in the ``quickstart-" +"compose/quickstart_compose/`` directory and add a ``print`` call in the " +"``get_weights`` function:" +msgstr "" -#: ../../source/how-to-configure-clients.rst:7 -msgid "Configuration values" -msgstr "配置值" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:114 +msgid "quickstart-compose/quickstart_compose/task.py" +msgstr "" -#: ../../source/how-to-configure-clients.rst:9 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:125 +#, fuzzy +msgid "Rebuild and restart the services." +msgstr "我们已经可以启动*服务器*了:" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:129 msgid "" -"Configuration values are represented as a dictionary with ``str`` keys " -"and values of type ``bool``, ``bytes``, ``double`` (64-bit precision " -"float), ``int``, or ``str`` (or equivalent types in different languages)." -" Here is an example of a configuration dictionary in Python:" +"If you have modified the dependencies listed in your ``pyproject.toml`` " +"file, it is essential to rebuild images." msgstr "" -"配置值以字典的形式表示,字典的键为 ``str``,值的类型为 ``bool``、``bytes``、``double``(64 " -"位精度浮点型)、``int``或 ``str`(或不同语言中的等效类型)。下面是一个 Python 配置字典的示例:" -#: ../../source/how-to-configure-clients.rst:20 -msgid "" -"Flower serializes these configuration dictionaries (or *config dict* for " -"short) to their ProtoBuf representation, transports them to the client " -"using gRPC, and then deserializes them back to Python dictionaries." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:132 +msgid "If you haven't made any changes, you can skip this step." msgstr "" -"Flower 将这些配置字典(简称 *config dict*)序列化为 ProtoBuf 表示形式,使用 gRPC " -"将其传输到客户端,然后再反序列化为 Python 字典。" -#: ../../source/how-to-configure-clients.rst:24 -msgid "" -"Currently, there is no support for directly sending collection types " -"(e.g., ``Set``, ``List``, ``Map``) as values in configuration " -"dictionaries. There are several workarounds to send collections as values" -" by converting them to one of the supported value types (and converting " -"them back on the client-side)." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:134 +msgid "Run the following command to rebuild and restart the services:" msgstr "" -"目前,还不支持在配置字典中直接发送作为值的集合类型(例如,`Set``, `List`, " -"`Map``)。有几种变通方法可将集合转换为支持的值类型之一(并在客户端将其转换回),从而将集合作为值发送。" -#: ../../source/how-to-configure-clients.rst:26 -msgid "" -"One can, for example, convert a list of floating-point numbers to a JSON " -"string, then send the JSON string using the configuration dictionary, and" -" then convert the JSON string back to a list of floating-point numbers on" -" the client." -msgstr "例如,可以将浮点数列表转换为 JSON 字符串,然后使用配置字典发送 JSON 字符串,再在客户端将 JSON 字符串转换回浮点数列表。" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:140 +msgid "Run the updated quickstart example:" +msgstr "" -#: ../../source/how-to-configure-clients.rst:30 -msgid "Configuration through built-in strategies" -msgstr "通过内置策略进行配置" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:147 +msgid "In the SuperExec logs, you should find the ``Get weights`` line:" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:164 +msgid "Step 5: Persisting the SuperLink State" +msgstr "" -#: ../../source/how-to-configure-clients.rst:32 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:166 msgid "" -"The easiest way to send configuration values to clients is to use a " -"built-in strategy like :code:`FedAvg`. Built-in strategies support so-" -"called configuration functions. A configuration function is a function " -"that the built-in strategy calls to get the configuration dictionary for " -"the current round. It then forwards the configuration dictionary to all " -"the clients selected during that round." +"In this step, Flower services are configured to persist the state of the " +"SuperLink service, ensuring that it maintains its state even after a " +"restart." msgstr "" -"向客户端发送配置值的最简单方法是使用内置策略,如 " -":code:`FedAvg`。内置策略支持所谓的配置函数。配置函数是内置策略调用的函数,用于获取当前轮的配置字典。然后,它会将配置字典转发给该轮中选择的所有客户端。" -#: ../../source/how-to-configure-clients.rst:34 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:171 msgid "" -"Let's start with a simple example. Imagine we want to send (a) the batch " -"size that the client should use, (b) the current global round of " -"federated learning, and (c) the number of epochs to train on the client-" -"side. Our configuration function could look like this:" -msgstr "让我们从一个简单的例子开始。想象一下,我们想要发送给客户端(a)应该使用的批次大小,(b)当前联邦学习的全局轮次,以及(c)客户端训练的遍历数。我们的配置函数可以是这样的:" +"When working with Docker Compose on Linux, you may need to create the " +"``state`` directory first and change its ownership to ensure proper " +"access and permissions." +msgstr "" -#: ../../source/how-to-configure-clients.rst:47 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:174 msgid "" -"To make the built-in strategies use this function, we can pass it to " -"``FedAvg`` during initialization using the parameter " -":code:`on_fit_config_fn`:" -msgstr "为了让内置策略使用这个函数,我们可以在初始化时使用参数 :code:`on_fit_config_fn` 将它传递给 ``FedAvg`` :" +"For more information, consult the following page: :doc:`persist-" +"superlink-state`." +msgstr "" -#: ../../source/how-to-configure-clients.rst:56 -msgid "One the client side, we receive the configuration dictionary in ``fit``:" -msgstr "在客户端,我们在 ``fit`` 中接收配置字典:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:176 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:226 +msgid "Run the command:" +msgstr "" -#: ../../source/how-to-configure-clients.rst:67 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst msgid "" -"There is also an `on_evaluate_config_fn` to configure evaluation, which " -"works the same way. They are separate functions because one might want to" -" send different configuration values to `evaluate` (for example, to use a" -" different batch size)." +"``-f with-state.yml``: Specifies the path to an additional Docker Compose" +" file that" msgstr "" -"还有一个 `on_evaluate_config_fn` 用于配置评估,其工作方式相同。它们是不同的函数,因为可能需要向 `evaluate` " -"发送不同的配置值(例如,使用不同的批量大小)。" -#: ../../source/how-to-configure-clients.rst:69 -msgid "" -"The built-in strategies call this function every round (that is, every " -"time `Strategy.configure_fit` or `Strategy.configure_evaluate` runs). " -"Calling `on_evaluate_config_fn` every round allows us to vary/change the " -"config dict over consecutive rounds. If we wanted to implement a " -"hyperparameter schedule, for example, to increase the number of local " -"epochs during later rounds, we could do the following:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst +msgid "contains the configuration for persisting the SuperLink state." msgstr "" -"内置策略每轮都会调用此函数(即每次运行 `Strategy.configure_fit` 或 " -"`Strategy.configure_evaluate` 时)。每轮调用 `on_evaluate_config_fn` " -"允许我们在连续几轮中改变配置指令。例如,如果我们想实现一个超参数时间表,以增加后几轮的本地遍历次数,我们可以这样做:" -#: ../../source/how-to-configure-clients.rst:82 -msgid "The :code:`FedAvg` strategy will call this function *every round*." -msgstr "代码:`FedAvg`策略*每轮*都会调用该函数。" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst +msgid "" +"Docker merges Compose files according to `merging rules " +"`_." +msgstr "" -#: ../../source/how-to-configure-clients.rst:85 -msgid "Configuring individual clients" -msgstr "配置个别客户端" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:193 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:247 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:375 +msgid "Rerun the ``quickstart-compose`` project:" +msgstr "" -#: ../../source/how-to-configure-clients.rst:87 -msgid "" -"In some cases, it is necessary to send different configuration values to " -"different clients." -msgstr "在某些情况下,有必要向不同的客户端发送不同的配置值。" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:199 +msgid "Check the content of the ``state`` directory:" +msgstr "" -#: ../../source/how-to-configure-clients.rst:89 -#, fuzzy +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:206 msgid "" -"This can be achieved by customizing an existing strategy or by " -":doc:`implementing a custom strategy from scratch `. Here's a nonsensical example that customizes :code:`FedAvg`" -" by adding a custom ``\"hello\": \"world\"`` configuration key/value pair" -" to the config dict of a *single client* (only the first client in the " -"list, the other clients in this round to not receive this \"special\" " -"config value):" +"You should see a ``state.db`` file in the ``state`` directory. If you " +"restart the service, the state file will be used to restore the state " +"from the previously saved data. This ensures that the data persists even " +"if the containers are stopped and started again." msgstr "" -"这可以通过定制现有策略或 `从头开始实施一个定制策略 `_来实现。下面是一个无厘头的例子,`FedAvg`通过在*单个客户端*的配置指令(config " -"dict)中添加自定义的``\"hello\": \"world\"``配置键/值对添加到此的配置 dict " -"中(仅列表中的第一个客户端,本轮中的其他客户端不会收到此 \"特殊 \"配置值):" -#: ../../source/how-to-configure-logging.rst:2 -msgid "Configure logging" -msgstr "配置日志记录" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:214 +msgid "Step 6: Run Flower with TLS" +msgstr "" -#: ../../source/how-to-configure-logging.rst:4 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:216 msgid "" -"The Flower logger keeps track of all core events that take place in " -"federated learning workloads. It presents information by default " -"following a standard message format:" -msgstr "Flower 日志记录器会跟踪联邦学习工作负载中发生的所有核心事件。它默认按照标准信息格式提供信息:" +"To demonstrate how to enable TLS, generate self-signed certificates using" +" the ``certs.yml`` Compose file." +msgstr "" -#: ../../source/how-to-configure-logging.rst:13 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:223 msgid "" -"containing relevant information including: log message level (e.g. " -":code:`INFO`, :code:`DEBUG`), a timestamp, the line where the logging " -"took place from, as well as the log message itself. In this way, the " -"logger would typically display information on your terminal as follows:" +"For production environments, use a service like `Let's Encrypt " +"`_ to obtain your certificates." msgstr "" -"相关信息包括:日志信息级别(例如 " -":code:`INFO`、:code:`DEBUG`)、时间戳、日志记录的行以及日志信息本身。这样,日志记录器通常会在终端上显示如下信息:" -#: ../../source/how-to-configure-logging.rst:34 -msgid "Saving log to file" -msgstr "将日志保存到文件" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:241 +msgid "Restart the services with TLS enabled:" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:255 +msgid "Step 7: Add another SuperNode" +msgstr "" -#: ../../source/how-to-configure-logging.rst:36 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:257 msgid "" -"By default, the Flower log is outputted to the terminal where you launch " -"your Federated Learning workload from. This applies for both gRPC-based " -"federation (i.e. when you do :code:`fl.server.start_server`) and when " -"using the :code:`VirtualClientEngine` (i.e. when you do " -":code:`fl.simulation.start_simulation`). In some situations you might " -"want to save this log to disk. You can do so by calling the " -"`fl.common.logger.configure() " -"`_" -" function. For example:" +"You can add more SuperNodes and ClientApps by duplicating their " +"definitions in the ``compose.yml`` file." msgstr "" -"默认情况下,Flower 日志会输出到启动联邦学习工作负载的终端。这既适用于基于 gRPC 的联邦学习(即执行 " -":code:`fl.server.start_server` 时),也适用于使用 :code:`VirtualClientEngine` " -"时(即执行 :code:`fl.simulation.start_simulation` " -"时)。在某些情况下,您可能希望将此日志保存到磁盘。为此,您可以调用 `fl.common.logger.configure() " -"`_" -" 函数。例如:" -#: ../../source/how-to-configure-logging.rst:53 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:260 msgid "" -"With the above, Flower will record the log you see on your terminal to " -":code:`log.txt`. This file will be created in the same directory as were " -"you are running the code from. If we inspect we see the log above is also" -" recorded but prefixing with :code:`identifier` each line:" +"Just give each new SuperNode and ClientApp service a unique service name " +"like ``supernode-3``, ``clientapp-3``, etc." msgstr "" -"通过上述操作,Flower 会将您在终端上看到的日志记录到 " -":code:`log.txt`。该文件将创建在运行代码的同一目录下。如果我们检查一下,就会发现上面的日志也被记录了下来,但每一行都以 " -":code:`identifier` 作为前缀:" -#: ../../source/how-to-configure-logging.rst:74 -msgid "Log your own messages" -msgstr "记录自己的信息" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:263 +msgid "In ``compose.yml``, add the following:" +msgstr "" -#: ../../source/how-to-configure-logging.rst:76 -msgid "" -"You might expand the information shown by default with the Flower logger " -"by adding more messages relevant to your application. You can achieve " -"this easily as follows." -msgstr "您可以通过添加更多与应用程序相关的信息来扩展 Flower 日志记录器默认显示的信息。您可以通过以下方法轻松实现这一目标。" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:265 +msgid "compose.yml" +msgstr "" -#: ../../source/how-to-configure-logging.rst:102 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:316 msgid "" -"In this way your logger will show, in addition to the default messages, " -"the ones introduced by the clients as specified above." -msgstr "这样,除默认信息外,您的日志记录器还将显示由客户引入的信息,如上文所述。" - -#: ../../source/how-to-configure-logging.rst:128 -msgid "Log to a remote service" -msgstr "登录远程服务" +"If you also want to enable TLS for the new SuperNodes, duplicate the " +"SuperNode definition for each new SuperNode service in the ``with-" +"tls.yml`` file." +msgstr "" -#: ../../source/how-to-configure-logging.rst:130 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:319 msgid "" -"The :code:`fl.common.logger.configure` function, also allows specifying a" -" host to which logs can be pushed (via :code:`POST`) through a native " -"Python :code:`logging.handler.HTTPHandler`. This is a particularly useful" -" feature in :code:`gRPC`-based Federated Learning workloads where " -"otherwise gathering logs from all entities (i.e. the server and the " -"clients) might be cumbersome. Note that in Flower simulation, the server " -"automatically displays all logs. You can still specify a " -":code:`HTTPHandler` should you wish to backup or analyze the logs " -"somewhere else." +"Make sure that the names of the services match with the one in the " +"``compose.yml`` file." msgstr "" -"此外,:code:`fl.common.logger.configure`函数还允许指定主机,通过本地 Python " -":code:`logging.handler.HTTPHandler`,向该主机推送日志(通过 :code:`POST`)。在基于 " -":code:`gRPC` 的联邦学习工作负载中,这是一个特别有用的功能,否则从所有实体(即服务器和客户端)收集日志可能会很麻烦。请注意,在 " -"Flower 模拟器中,服务器会自动显示所有日志。如果希望在其他地方备份或分析日志,仍可指定 :code:`HTTPHandler`。" -#: ../../source/how-to-enable-ssl-connections.rst:2 -msgid "Enable SSL connections" -msgstr "启用 SSL 连接" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:321 +msgid "In ``with-tls.yml``, add the following:" +msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:4 -#, fuzzy -msgid "" -"This guide describes how to a SSL-enabled secure Flower server " -"(:code:`SuperLink`) can be started and how a Flower client " -"(:code:`SuperNode`) can establish a secure connections to it." -msgstr "本指南介绍如何启动启用 SSL 的安全 Flower 服务器,以及 Flower 客户端如何与其建立安全连接。" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:323 +msgid "with-tls.yml" +msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:7 -msgid "" -"A complete code example demonstrating a secure connection can be found " -"`here `_." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:345 +msgid "Step 8: Persisting the SuperLink State and Enabling TLS" msgstr "" -"有关安全连接的完整代码示例,请参见 `_ 。" -#: ../../source/how-to-enable-ssl-connections.rst:10 -#, fuzzy +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:347 msgid "" -"The code example comes with a :code:`README.md` file which explains how " -"to start it. Although it is already SSL-enabled, it might be less " -"descriptive on how it does so. Stick to this guide for a deeper " -"introduction to the topic." -msgstr "代码示例附带的 README.md 文件将解释如何启动它。虽然它已经启用了 SSL,但对如何启用可能描述较少。请参考本指南,了解更深入的相关介绍。" +"To run Flower with persisted SuperLink state and enabled TLS, a slight " +"change in the ``with-state.yml`` file is required:" +msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:16 -msgid "Certificates" -msgstr "证书" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:350 +msgid "Comment out the lines 2-4 and uncomment the lines 5-9:" +msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:18 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:352 +msgid "with-state.yml" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:369 #, fuzzy -msgid "" -"Using SSL-enabled connections requires certificates to be passed to the " -"server and client. For the purpose of this guide we are going to generate" -" self-signed certificates. As this can become quite complex we are going " -"to ask you to run the script in :code:`examples/advanced-" -"tensorflow/certificates/generate.sh` with the following command sequence:" +msgid "Restart the services:" +msgstr "启动服务器" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:383 +msgid "Step 9: Merge Multiple Compose Files" msgstr "" -"使用支持 SSL 的连接需要向服务器和客户端传递证书。在本指南中,我们将生成自签名证书。由于这可能会变得相当复杂,我们将要求你运行 " -":code:`examples/advanced-tensorflow/certificates/generate.sh` 中的脚本" -#: ../../source/how-to-enable-ssl-connections.rst:29 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:385 msgid "" -"This will generate the certificates in :code:`examples/advanced-" -"tensorflow/.cache/certificates`." -msgstr "这将在 :code:`examples/advanced-tensorflow/.cache/certificates` 中生成证书。" +"You can merge multiple Compose files into a single file. For instance, if" +" you wish to combine the basic configuration with the TLS configuration, " +"execute the following command:" +msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:31 -#, fuzzy +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:394 msgid "" -"The approach for generating SSL certificates in the context of this " -"example can serve as an inspiration and starting point, but it should not" -" be used as a reference for production environments. Please refer to " -"other sources regarding the issue of correctly generating certificates " -"for production environments. For non-critical prototyping or research " -"projects, it might be sufficient to use the self-signed certificates " -"generated using the scripts mentioned in this guide." -msgstr "本示例中生成 SSL 证书的方法可作为启发和起点,但不应被视为生产环境的完整方法。有关在生产环境中正确生成证书的问题,请参考其他资料。" +"This will merge the contents of ``compose.yml`` and ``with-tls.yml`` into" +" a new file called ``my_compose.yml``." +msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:39 -#, fuzzy -msgid "Server (SuperLink)" -msgstr "flower-superlink" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:398 +msgid "Step 10: Clean Up" +msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:41 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:400 #, fuzzy -msgid "" -"Use the following terminal command to start a sever (SuperLink) that uses" -" the previously generated certificates:" -msgstr "现在我们将演示如何编写一个客户端,使用之前生成的脚本:" +msgid "Remove all services and volumes:" +msgstr "从 R 中删除所有项目。" -#: ../../source/how-to-enable-ssl-connections.rst:47 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:410 #, fuzzy -msgid "" -"When providing certificates, the server expects a tuple of three " -"certificates paths: CA certificate, server certificate and server private" -" key." -msgstr "要启用 SSL,需要 CA 证书、服务器证书和服务器私钥。" +msgid ":doc:`run-quickstart-examples-docker-compose`" +msgstr "快速入门 iOS" -#: ../../source/how-to-enable-ssl-connections.rst:51 +#: ../../source/docker/use-a-different-version.rst:2 #, fuzzy -msgid "Client (SuperNode)" -msgstr "客户端状态代码。" +msgid "Use a Different Flower Version" +msgstr "使用不同的 Flower 或 Python 版本" -#: ../../source/how-to-enable-ssl-connections.rst:53 +#: ../../source/docker/use-a-different-version.rst:4 #, fuzzy msgid "" -"Use the following terminal command to start a client (SuperNode) that " -"uses the previously generated certificates:" -msgstr "现在我们将演示如何编写一个客户端,使用之前生成的脚本:" +"If you want to use a different version of Flower, for example Flower " +"nightly, you can do so by changing the tag. All available versions are on" +" `Docker Hub `__." +msgstr "" +"如果您想使用不同版本的 Flower 或 Python,可以通过更改标签来实现。我们提供的所有版本都可以在 `Docker Hub " +"`_ 上找到。" -#: ../../source/how-to-enable-ssl-connections.rst:61 +#: ../../source/docker/use-a-different-version.rst:10 #, fuzzy msgid "" -"When setting :code:`root_certificates`, the client expects a file path to" -" PEM-encoded root certificates." +"When using Flower nightly, the SuperLink nightly image must be paired " +"with the corresponding SuperNode and ServerApp nightly images released on" +" the same day. To ensure the versions are in sync, using the concrete " +"tag, e.g., ``1.10.0.dev20240610`` instead of ``nightly`` is recommended." msgstr "" -"当设置 :code:`root_certificates` 时,客户端希望 PEM 编码的根证书是字节字符串。我们再次使用 " -":code:`Path` 来简化以字节字符串形式读取证书的过程。" +"超级节点 Docker 映像目前仅适用于 1.9.0-nightly 版本。稳定版将在 Flower " +"1.9.0(稳定版)发布时推出(预计发布时间:5 " +"月)。超级节点夜间镜像必须与同一天发布的相应超级链接和服务器应用程序夜间镜像配对。为确保版本同步,建议使用具体标签,例如``1.9.0.dev20240501``,而不是``nightly``。" -#: ../../source/how-to-enable-ssl-connections.rst:67 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:2 +msgid "Example: FedBN in PyTorch - From Centralized To Federated" +msgstr "示例: PyTorch 中的 FedBN - 从集中式到联邦式" + +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:4 #, fuzzy msgid "" -"You should now have learned how to generate self-signed certificates " -"using the given script, start an SSL-enabled server and have a client " -"establish a secure connection to it." -msgstr "现在,你应该已经学会了如何使用给定的脚本生成自签名证书、启动启用 SSL 的服务器并让客户端与其建立安全连接。" +"This tutorial will show you how to use Flower to build a federated " +"version of an existing machine learning workload with `FedBN " +"`_, a federated training strategy " +"designed for non-iid data. We are using PyTorch to train a Convolutional " +"Neural Network(with Batch Normalization layers) on the CIFAR-10 dataset. " +"When applying FedBN, only few changes needed compared to :doc:`Example: " +"PyTorch - From Centralized To Federated `." +msgstr "" +"本教程将向您展示如何使用 Flower 为现有的机器学习框架构建一个联邦学习的版本,并使用 \"FedBN `_\"(一种针对非 iid 数据设计的联邦训练策略)。我们使用 PyTorch 在 CIFAR-10 " +"数据集上训练一个卷积神经网络(带有Batch Normalization层)。在应用 FedBN 时,只需对 `示例: PyTorch - " +"从集中式到联邦式 `_ 做少量改动。" -#: ../../source/how-to-enable-ssl-connections.rst:72 -msgid "Additional resources" -msgstr "补充资源" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:12 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:12 +msgid "Centralized Training" +msgstr "集中式训练" -#: ../../source/how-to-enable-ssl-connections.rst:74 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:14 +#, fuzzy msgid "" -"These additional sources might be relevant if you would like to dive " -"deeper into the topic of certificates:" -msgstr "如果您想更深入地了解证书主题,这些额外的资料来源可能有帮助:" - -#: ../../source/how-to-enable-ssl-connections.rst:76 -msgid "`Let's Encrypt `_" -msgstr "`让我们加密 `_" +"All files are revised based on :doc:`Example: PyTorch - From Centralized " +"To Federated `. The only " +"thing to do is modifying the file called ``cifar.py``, revised part is " +"shown below:" +msgstr "" +"所有文件均根据 `示例: PyTorch -从集中式到联邦式 `_。唯一要做的就是修改名为 :code:`cifar.py` " +"的文件,修改部分如下所示:" -#: ../../source/how-to-enable-ssl-connections.rst:77 -msgid "`certbot `_" -msgstr "`certbot `_" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:18 +msgid "" +"The model architecture defined in class Net() is added with Batch " +"Normalization layers accordingly." +msgstr "类 Net() 中定义的模型架构会相应添加Batch Normalization层。" -#: ../../source/how-to-implement-strategies.rst:2 -msgid "Implement strategies" -msgstr "实施策略" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:47 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:171 +msgid "You can now run your machine learning workload:" +msgstr "现在,您可以运行您的机器学习工作了:" -#: ../../source/how-to-implement-strategies.rst:4 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:53 +#, fuzzy msgid "" -"The strategy abstraction enables implementation of fully custom " -"strategies. A strategy is basically the federated learning algorithm that" -" runs on the server. Strategies decide how to sample clients, how to " -"configure clients for training, how to aggregate updates, and how to " -"evaluate models. Flower provides a few built-in strategies which are " -"based on the same API described below." +"So far this should all look fairly familiar if you've used PyTorch " +"before. Let's take the next step and use what we've built to create a " +"federated learning system within FedBN, the system consists of one server" +" and two clients." msgstr "" -"策略抽象类可以实现完全定制的策略。策略基本上就是在服务器上运行的联邦学习算法。策略决定如何对客户端进行采样、如何配置客户端进行训练、如何聚合参数更新以及如何评估模型。Flower" -" 提供了一些内置策略,这些策略基于下文所述的相同 API。" +"到目前为止,如果您以前使用过 PyTorch,这一切看起来应该相当熟悉。让我们进行下一步,使用我们所构建的内容在 FedBN " +"中创建一个联邦学习系统,该系统由一个服务器和两个客户端组成。" -#: ../../source/how-to-implement-strategies.rst:11 -msgid "The :code:`Strategy` abstraction" -msgstr ":code:`策略 ` 抽象类" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:58 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:182 +msgid "Federated Training" +msgstr "联邦培训" -#: ../../source/how-to-implement-strategies.rst:13 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:60 +#, fuzzy msgid "" -"All strategy implementation are derived from the abstract base class " -":code:`flwr.server.strategy.Strategy`, both built-in implementations and " -"third party implementations. This means that custom strategy " -"implementations have the exact same capabilities at their disposal as " -"built-in ones." +"If you have read :doc:`Example: PyTorch - From Centralized To Federated " +"`, the following parts are" +" easy to follow, only ``get_parameters`` and ``set_parameters`` function " +"in ``client.py`` needed to revise. If not, please read the :doc:`Example:" +" PyTorch - From Centralized To Federated `. first." msgstr "" -"所有策略实现均源自抽象基类 " -":code:`flwr.server.strategy.Strategy`,包括内置实现和第三方实现。这意味着自定义策略实现与内置实现具有完全相同的功能。" +"如果你读过 `示例: PyTorch - 从集中式到联邦式 `_,下面的部分就很容易理解了,只需要修改 " +":code:`get_parameters` 和 :code:`set_parameters` 中的 :code:`client.py` " +"函数。如果没有,请阅读 `示例: PyTorch - 从集中式到联邦式 `_。" -#: ../../source/how-to-implement-strategies.rst:18 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:66 +#, fuzzy msgid "" -"The strategy abstraction defines a few abstract methods that need to be " -"implemented:" -msgstr "策略抽象定义了一些需要实现的抽象方法:" +"Our example consists of one *server* and two *clients*. In FedBN, " +"``server.py`` keeps unchanged, we can start the server directly." +msgstr "我们的示例包括一个*服务器*和两个*客户端*。在 FedBN 中,:code:`server.py` 保持不变,我们可以直接启动服务器。" -#: ../../source/how-to-implement-strategies.rst:75 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:73 +#, fuzzy msgid "" -"Creating a new strategy means implementing a new :code:`class` (derived " -"from the abstract base class :code:`Strategy`) that implements for the " -"previously shown abstract methods:" -msgstr "创建一个新策略意味着要实现一个新的 :code:`class`(从抽象基类 :code:`Strategy` 派生),该类要实现前面显示的抽象方法:" +"Finally, we will revise our *client* logic by changing ``get_parameters``" +" and ``set_parameters`` in ``client.py``, we will exclude batch " +"normalization parameters from model parameter list when sending to or " +"receiving from the server." +msgstr "" +"最后,我们将修改 *client* 的逻辑,修改 :code:`client.py` 中的 :code:`get_parameters` 和 " +":code:`set_parameters`,在向服务器发送或从服务器接收时,我们将从模型参数列表中排除batch " +"normalization层的参数。" -#: ../../source/how-to-implement-strategies.rst:100 -msgid "The Flower server calls these methods in the following order:" -msgstr "Flower 服务器按以下顺序调用这些方法:" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:102 +msgid "Now, you can now open two additional terminal windows and run" +msgstr "现在,您可以打开另外两个终端窗口并运行程序" -#: ../../source/how-to-implement-strategies.rst:177 -msgid "The following sections describe each of those methods in more detail." -msgstr "下文将详细介绍每种方法。" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:108 +msgid "" +"in each window (make sure that the server is still running before you do " +"so) and see your (previously centralized) PyTorch project run federated " +"learning with FedBN strategy across two clients. Congratulations!" +msgstr "确保服务器仍在运行后,然后您就能看到您的 PyTorch 项目(之前是集中式的)通过 FedBN 策略在两个客户端上运行联合学习。祝贺!" -#: ../../source/how-to-implement-strategies.rst:180 -msgid "The :code:`initialize_parameters` method" -msgstr ":code:`初始化参数` 方法" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:113 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:349 +#: ../../source/tutorial-quickstart-jax.rst:319 +msgid "Next Steps" +msgstr "下一步工作" -#: ../../source/how-to-implement-strategies.rst:182 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:115 msgid "" -":code:`initialize_parameters` is called only once, at the very beginning " -"of an execution. It is responsible for providing the initial global model" -" parameters in a serialized form (i.e., as a :code:`Parameters` object)." +"The full source code for this example can be found `here " +"`_. Our example is of course somewhat over-" +"simplified because both clients load the exact same dataset, which isn't " +"realistic. You're now prepared to explore this topic further. How about " +"using different subsets of CIFAR-10 on each client? How about adding more" +" clients?" msgstr "" -":code:`initialize_parameters` 只调用一次,即在执行开始时。它负责以序列化形式(即 " -":code:`Parameters` 对象)提供初始全局模型参数。" +"本示例的完整源代码可在 `_ " +"找到。当然,我们的示例有些过于简单,因为两个客户端都加载了完全相同的数据集,这并不真实。让我们准备好进一步探讨这一主题。如在每个客户端使用不同的 " +"CIFAR-10 子集,或者增加客户端的数量。" -#: ../../source/how-to-implement-strategies.rst:184 -msgid "" -"Built-in strategies return user-provided initial parameters. The " -"following example shows how initial parameters can be passed to " -":code:`FedAvg`:" -msgstr "内置策略会返回用户提供的初始参数。下面的示例展示了如何将初始参数传递给 :code:`FedAvg`:" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:2 +msgid "Example: PyTorch - From Centralized To Federated" +msgstr "实例: PyTorch - 从集中式到联邦式" -#: ../../source/how-to-implement-strategies.rst:209 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:4 msgid "" -"The Flower server will call :code:`initialize_parameters`, which either " -"returns the parameters that were passed to :code:`initial_parameters`, or" -" :code:`None`. If no parameters are returned from " -":code:`initialize_parameters` (i.e., :code:`None`), the server will " -"randomly select one client and ask it to provide its parameters. This is " -"a convenience feature and not recommended in practice, but it can be " -"useful for prototyping. In practice, it is recommended to always use " -"server-side parameter initialization." +"This tutorial will show you how to use Flower to build a federated " +"version of an existing machine learning workload. We are using PyTorch to" +" train a Convolutional Neural Network on the CIFAR-10 dataset. First, we " +"introduce this machine learning task with a centralized training approach" +" based on the `Deep Learning with PyTorch " +"`_ " +"tutorial. Then, we build upon the centralized training code to run the " +"training in a federated fashion." msgstr "" -"Flower 服务器将调用 :code:`initialize_parameters`,返回传给 " -":code:`initial_parameters` 的参数或 :code:`None`。如果 " -":code:`initialize_parameters` 没有返回任何参数(即 " -":code:`None`),服务器将随机选择一个客户端并要求其提供参数。这只是一个便捷的功能,在实际应用中并不推荐使用,但在原型开发中可能很有用。在实践中,建议始终使用服务器端参数初始化。" +"本教程将向您展示如何使用 Flower 构建现有机器学习工作的联邦版本。我们使用 PyTorch 在 CIFAR-10 " +"数据集上训练一个卷积神经网络。首先,我们基于 \"Deep Learning with PyTorch " +"`_\"教程,采用集中式训练方法介绍了这项机器学习任务。然后,我们在集中式训练代码的基础上以联邦方式运行训练。" -#: ../../source/how-to-implement-strategies.rst:213 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:14 msgid "" -"Server-side parameter initialization is a powerful mechanism. It can be " -"used, for example, to resume training from a previously saved checkpoint." -" It is also the fundamental capability needed to implement hybrid " -"approaches, for example, to fine-tune a pre-trained model using federated" -" learning." -msgstr "服务器端参数初始化是一种强大的机制。例如,它可以用来从先前保存的检查点恢复训练。它也是实现混合方法所需的基本能力,例如,使用联邦学习对预先训练好的模型进行微调。" - -#: ../../source/how-to-implement-strategies.rst:216 -msgid "The :code:`configure_fit` method" -msgstr ":code:`configure_fit`方法" +"We begin with a brief description of the centralized CNN training code. " +"If you want a more in-depth explanation of what's going on then have a " +"look at the official `PyTorch tutorial " +"`_." +msgstr "" +"我们首先简要介绍一下集中式 CNN 训练代码。如果您想获得更深入的解释,请参阅 PyTorch 官方教程`PyTorch tutorial " +"`_。" -#: ../../source/how-to-implement-strategies.rst:218 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:18 +#, fuzzy msgid "" -":code:`configure_fit` is responsible for configuring the upcoming round " -"of training. What does *configure* mean in this context? Configuring a " -"round means selecting clients and deciding what instructions to send to " -"these clients. The signature of :code:`configure_fit` makes this clear:" +"Let's create a new file called ``cifar.py`` with all the components " +"required for a traditional (centralized) training on CIFAR-10. First, all" +" required packages (such as ``torch`` and ``torchvision``) need to be " +"imported. You can see that we do not import any package for federated " +"learning. You can keep all these imports as they are even when we add the" +" federated learning components at a later point." msgstr "" -":code:`configure_fit` " -"负责配置即将开始的一轮训练。*配置*在这里是什么意思?配置一轮训练意味着选择客户并决定向这些客户发送什么指令。:code:`configure_fit`" -" 说明了这一点:" - -#: ../../source/how-to-implement-strategies.rst:231 -msgid "" -"The return value is a list of tuples, each representing the instructions " -"that will be sent to a particular client. Strategy implementations " -"usually perform the following steps in :code:`configure_fit`:" -msgstr "返回值是一个元组列表,每个元组代表将发送到特定客户端的指令。策略实现通常在 :code:`configure_fit` 中执行以下步骤:" - -#: ../../source/how-to-implement-strategies.rst:233 -#: ../../source/how-to-implement-strategies.rst:280 -msgid "" -"Use the :code:`client_manager` to randomly sample all (or a subset of) " -"available clients (each represented as a :code:`ClientProxy` object)" -msgstr "" -"使用 :code:`client_manager` 随机抽样所有(或部分)可用客户端(每个客户端都表示为 :code:`ClientProxy` " -"对象)" +"让我们创建一个名为 :code:`cifar.py` 的新文件,其中包含 CIFAR-10 " +"传统(集中)培训所需的所有组件。首先,需要导入所有必需的软件包(如 :code:`torch` 和 " +":code:`torchvision`)。您可以看到,我们没有导入任何用于联邦学习的软件包。即使在以后添加联邦学习组件时,也可以保留所有这些导入。" -#: ../../source/how-to-implement-strategies.rst:234 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:36 +#, fuzzy msgid "" -"Pair each :code:`ClientProxy` with the same :code:`FitIns` holding the " -"current global model :code:`parameters` and :code:`config` dict" +"As already mentioned we will use the CIFAR-10 dataset for this machine " +"learning workload. The model architecture (a very simple Convolutional " +"Neural Network) is defined in ``class Net()``." msgstr "" -"将每个 :code:`ClientProxy` 与持有当前全局模型 :code:`parameters` 和 :code:`config` " -"dict 的 :code:`FitIns` 配对" +"如前所述,我们将使用 CIFAR-10 数据集进行机器学习。模型架构(一个非常简单的卷积神经网络)在 :code:`class Net()` " +"中定义。" -#: ../../source/how-to-implement-strategies.rst:236 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:62 #, fuzzy msgid "" -"More sophisticated implementations can use :code:`configure_fit` to " -"implement custom client selection logic. A client will only participate " -"in a round if the corresponding :code:`ClientProxy` is included in the " -"list returned from :code:`configure_fit`." +"The ``load_data()`` function loads the CIFAR-10 training and test sets. " +"The ``transform`` normalized the data after loading." msgstr "" -"更复杂的实现可以使用 :code:`configure_fit` 来实现自定义的客户端选择逻辑。只有当相应的 " -":code:`ClientProxy` 包含在 :code:`configure_fit` 返回的列表中时,客户端才会参与进来。" +":code:`load_data()` 函数加载 CIFAR-10 " +"训练集和测试集。加载数据后,:code:`transform`函数对数据进行了归一化处理。" -#: ../../source/how-to-implement-strategies.rst:240 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:84 +#, fuzzy msgid "" -"The structure of this return value provides a lot of flexibility to the " -"user. Since instructions are defined on a per-client basis, different " -"instructions can be sent to each client. This enables custom strategies " -"to train, for example, different models on different clients, or use " -"different hyperparameters on different clients (via the :code:`config` " -"dict)." -msgstr "" -"该返回值的结构为用户提供了很大的灵活性。由于指令是按客户端定义的,因此可以向每个客户端发送不同的指令。这使得自定义策略成为可能,例如在不同的客户端上训练不同的模型,或在不同的客户端上使用不同的超参数(通过" -" :code:`config` dict)。" - -#: ../../source/how-to-implement-strategies.rst:243 -msgid "The :code:`aggregate_fit` method" -msgstr ":code:`aggregate_fit` 方法" +"We now need to define the training (function ``train()``) which loops " +"over the training set, measures the loss, backpropagates it, and then " +"takes one optimizer step for each batch of training examples." +msgstr "现在,我们需要定义训练函数(:code:`train()`),该函数在训练集上循环训练,计算损失值并反向传播,然后为每批训练数据在优化器上执行一个优化步骤。" -#: ../../source/how-to-implement-strategies.rst:245 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:88 +#, fuzzy msgid "" -":code:`aggregate_fit` is responsible for aggregating the results returned" -" by the clients that were selected and asked to train in " -":code:`configure_fit`." -msgstr ":code:`aggregate_fit` 负责汇总在 :code:`configure_fit` 中选择并要求训练的客户端所返回的结果。" +"The evaluation of the model is defined in the function ``test()``. The " +"function loops over all test samples and measures the loss of the model " +"based on the test dataset." +msgstr "模型的评估在函数 :code:`test()` 中定义。该函数循环遍历所有测试样本,并计算测试数据集的模型损失值。" -#: ../../source/how-to-implement-strategies.rst:258 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:149 msgid "" -"Of course, failures can happen, so there is no guarantee that the server " -"will get results from all the clients it sent instructions to (via " -":code:`configure_fit`). :code:`aggregate_fit` therefore receives a list " -"of :code:`results`, but also a list of :code:`failures`." -msgstr "" -"当然,失败是有可能发生的,因此无法保证服务器会从它发送指令(通过 :code:`configure_fit`)的所有客户端获得结果。因此 " -":code:`aggregate_fit` 会收到 :code:`results` 的列表,但也会收到 :code:`failures` 的列表。" +"Having defined the data loading, model architecture, training, and " +"evaluation we can put everything together and train our CNN on CIFAR-10." +msgstr "在确定了数据加载、模型架构、训练和评估之后,我们就可以将所有整合在一起,在 CIFAR-10 上训练我们的 CNN。" -#: ../../source/how-to-implement-strategies.rst:260 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:177 msgid "" -":code:`aggregate_fit` returns an optional :code:`Parameters` object and a" -" dictionary of aggregated metrics. The :code:`Parameters` return value is" -" optional because :code:`aggregate_fit` might decide that the results " -"provided are not sufficient for aggregation (e.g., too many failures)." +"So far, this should all look fairly familiar if you've used PyTorch " +"before. Let's take the next step and use what we've built to create a " +"simple federated learning system consisting of one server and two " +"clients." msgstr "" -":code:`aggregate_fit` 返回一个可选的 :code:`Parameters` " -"对象和一个聚合度量的字典。:code:`Parameters` 返回值是可选的,因为 :code:`aggregate_fit` " -"可能会认为所提供的结果不足以进行聚合(例如,失败次数过多)。" - -#: ../../source/how-to-implement-strategies.rst:263 -msgid "The :code:`configure_evaluate` method" -msgstr ":code:`configure_evaluate`方法" +"到目前为止,如果你以前用过 " +"PyTorch,这一切看起来应该相当熟悉。让我们进行下一步,利用我们所构建的内容创建一个简单联邦学习系统(由一个服务器和两个客户端组成)。" -#: ../../source/how-to-implement-strategies.rst:265 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:184 msgid "" -":code:`configure_evaluate` is responsible for configuring the upcoming " -"round of evaluation. What does *configure* mean in this context? " -"Configuring a round means selecting clients and deciding what " -"instructions to send to these clients. The signature of " -":code:`configure_evaluate` makes this clear:" -msgstr "" -":code:`configure_evaluate` " -"负责配置下一轮评估。*配置*在这里是什么意思?配置一轮评估意味着选择客户端并决定向这些客户端发送什么指令。:code:`configure_evaluate`" -" 说明了这一点:" +"The simple machine learning project discussed in the previous section " +"trains the model on a single dataset (CIFAR-10), we call this centralized" +" learning. This concept of centralized learning, as shown in the previous" +" section, is probably known to most of you, and many of you have used it " +"previously. Normally, if you'd want to run machine learning workloads in " +"a federated fashion, then you'd have to change most of your code and set " +"everything up from scratch. This can be a considerable effort." +msgstr "上一节讨论的简单机器学习项目在单一数据集(CIFAR-10)上训练模型,我们称之为集中学习。如上一节所示,集中学习的概念可能为大多数人所熟知,而且很多人以前都使用过。通常情况下,如果要以联邦方式运行机器学习工作,就必须更改大部分代码,并从头开始设置一切。这可能是一个相当大的工作量。" -#: ../../source/how-to-implement-strategies.rst:278 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:191 msgid "" -"The return value is a list of tuples, each representing the instructions " -"that will be sent to a particular client. Strategy implementations " -"usually perform the following steps in :code:`configure_evaluate`:" -msgstr "返回值是一个元组列表,每个元组代表将发送到特定客户端的指令。策略实现通常在 :code:`configure_evaluate` 中执行以下步骤:" +"However, with Flower you can evolve your pre-existing code into a " +"federated learning setup without the need for a major rewrite." +msgstr "不过,有了 Flower,您可以轻松地将已有的代码转变成联邦学习的模式,无需进行大量重写。" -#: ../../source/how-to-implement-strategies.rst:281 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:194 +#, fuzzy msgid "" -"Pair each :code:`ClientProxy` with the same :code:`EvaluateIns` holding " -"the current global model :code:`parameters` and :code:`config` dict" +"The concept is easy to understand. We have to start a *server* and then " +"use the code in ``cifar.py`` for the *clients* that are connected to the " +"*server*. The *server* sends model parameters to the clients. The " +"*clients* run the training and update the parameters. The updated " +"parameters are sent back to the *server* which averages all received " +"parameter updates. This describes one round of the federated learning " +"process and we repeat this for multiple rounds." msgstr "" -"将每个 :code:`ClientProxy` 与持有当前全局模型 :code:`parameters` 和 :code:`config` " -"dict 的 :code:`EvaluateIns` 配对" +"这个概念很容易理解。我们必须启动一个*服务器*,然后对连接到*服务器*的*客户端*使用 " +":code:`cifar.py`中的代码。*服务器*向客户端发送模型参数,*客户端*运行训练并更新参数。更新后的参数被发回*服务器*,然后会对所有收到的参数更新进行平均聚合。以上描述的是一轮联邦学习过程,我们将重复进行多轮学习。" -#: ../../source/how-to-implement-strategies.rst:283 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:201 +#: ../../source/tutorial-quickstart-jax.rst:147 #, fuzzy msgid "" -"More sophisticated implementations can use :code:`configure_evaluate` to " -"implement custom client selection logic. A client will only participate " -"in a round if the corresponding :code:`ClientProxy` is included in the " -"list returned from :code:`configure_evaluate`." +"Our example consists of one *server* and two *clients*. Let's set up " +"``server.py`` first. The *server* needs to import the Flower package " +"``flwr``. Next, we use the ``start_server`` function to start a server " +"and tell it to perform three rounds of federated learning." msgstr "" -"更复杂的实现可以使用 :code:`configure_evaluate` 来实现自定义的客户端选择逻辑。只有当相应的 " -":code:`ClientProxy` 包含在 :code:`configure_evaluate` 返回的列表中时,客户端才会参与进来。" +"我们的示例包括一个*服务器*和两个*客户端*。让我们先设置 :code:`server.py`。*服务器*需要导入 Flower 软件包 " +":code:`flwr`。接下来,我们使用 :code:`start_server` 函数启动服务器,并让它执行三轮联邦学习。" -#: ../../source/how-to-implement-strategies.rst:287 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:215 +#: ../../source/tutorial-quickstart-jax.rst:161 +msgid "We can already start the *server*:" +msgstr "我们已经可以启动*服务器*了:" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:221 +#, fuzzy msgid "" -"The structure of this return value provides a lot of flexibility to the " -"user. Since instructions are defined on a per-client basis, different " -"instructions can be sent to each client. This enables custom strategies " -"to evaluate, for example, different models on different clients, or use " -"different hyperparameters on different clients (via the :code:`config` " -"dict)." +"Finally, we will define our *client* logic in ``client.py`` and build " +"upon the previously defined centralized training in ``cifar.py``. Our " +"*client* needs to import ``flwr``, but also ``torch`` to update the " +"parameters on our PyTorch model:" msgstr "" -"该返回值的结构为用户提供了很大的灵活性。由于指令是按客户端定义的,因此可以向每个客户端发送不同的指令。这使得自定义策略可以在不同客户端上评估不同的模型,或在不同客户端上使用不同的超参数(通过" -" :code:`config` dict)。" - -#: ../../source/how-to-implement-strategies.rst:291 -msgid "The :code:`aggregate_evaluate` method" -msgstr ":code:`aggregate_evaluate` 方法" +"最后,我们将在 :code:`client.py` 中定义我们的 *client* 逻辑,并以之前在 :code:`cifar.py` " +"中定义的集中式训练为基础。我们的 *client* 不仅需要导入 :code:`flwr`,还需要导入 :code:`torch`,以更新 " +"PyTorch 模型的参数:" -#: ../../source/how-to-implement-strategies.rst:293 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:238 +#, fuzzy msgid "" -":code:`aggregate_evaluate` is responsible for aggregating the results " -"returned by the clients that were selected and asked to evaluate in " -":code:`configure_evaluate`." +"Implementing a Flower *client* basically means implementing a subclass of" +" either ``flwr.client.Client`` or ``flwr.client.NumPyClient``. Our " +"implementation will be based on ``flwr.client.NumPyClient`` and we'll " +"call it ``CifarClient``. ``NumPyClient`` is slightly easier to implement " +"than ``Client`` if you use a framework with good NumPy interoperability " +"(like PyTorch or TensorFlow/Keras) because it avoids some of the " +"boilerplate that would otherwise be necessary. ``CifarClient`` needs to " +"implement four methods, two methods for getting/setting model parameters," +" one method for training the model, and one method for testing the model:" msgstr "" -":code:`aggregate_evaluate` 负责汇总在 :code:`configure_evaluate` " -"中选择并要求评估的客户端返回的结果。" +"实现 Flower *client*基本上意味着实现 :code:`flwr.client.Client` 或 " +":code:`flwr.client.NumPyClient` 的子类。我们的代码实现将基于 " +":code:`flwr.client.NumPyClient`,并将其命名为 :code:`CifarClient`。如果使用具有良好 NumPy" +" 互操作性的框架(如 PyTorch 或 TensorFlow/Keras),:code:`NumPyClient`的实现比 " +":code:`Client`略微容易一些,因为它避免了一些不必要的操作。:code:`CifarClient` " +"需要实现四个方法,两个用于获取/设置模型参数,一个用于训练模型,一个用于测试模型:" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:249 +#, fuzzy +msgid "``set_parameters``" +msgstr ":code:`set_parameters`" -#: ../../source/how-to-implement-strategies.rst:306 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:248 +#: ../../source/tutorial-quickstart-jax.rst:192 msgid "" -"Of course, failures can happen, so there is no guarantee that the server " -"will get results from all the clients it sent instructions to (via " -":code:`configure_evaluate`). :code:`aggregate_evaluate` therefore " -"receives a list of :code:`results`, but also a list of :code:`failures`." -msgstr "" -"当然,失败是有可能发生的,因此无法保证服务器会从它发送指令(通过 " -":code:`configure_evaluate`)的所有客户端获得结果。因此, :code:`aggregate_evaluate` 会接收 " -":code:`results` 的列表,但也会接收 :code:`failures` 的列表。" +"set the model parameters on the local model that are received from the " +"server" +msgstr "在本地模型上设置从服务器接收的模型参数" -#: ../../source/how-to-implement-strategies.rst:308 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:249 +#: ../../source/tutorial-quickstart-jax.rst:194 +#, fuzzy msgid "" -":code:`aggregate_evaluate` returns an optional :code:`float` (loss) and a" -" dictionary of aggregated metrics. The :code:`float` return value is " -"optional because :code:`aggregate_evaluate` might decide that the results" -" provided are not sufficient for aggregation (e.g., too many failures)." -msgstr "" -":code:`aggregate_evaluate` 返回一个可选的 " -":code:`float`(损失值)和一个聚合指标字典。:code:`float` 返回值是可选的,因为 " -":code:`aggregate_evaluate` 可能会认为所提供的结果不足以进行聚合(例如,失败次数过多)。" +"loop over the list of model parameters received as NumPy ``ndarray``'s " +"(think list of neural network layers)" +msgstr "循环遍历以 NumPy :code:`ndarray` 形式接收的模型参数列表(可以看作神经网络的列表)" -#: ../../source/how-to-implement-strategies.rst:311 -msgid "The :code:`evaluate` method" -msgstr ":code:`evaluate`方法" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:252 +#: ../../source/tutorial-quickstart-jax.rst:197 +#: ../../source/tutorial-quickstart-scikitlearn.rst:129 +#, fuzzy +msgid "``get_parameters``" +msgstr ":code:`get_parameters`" -#: ../../source/how-to-implement-strategies.rst:313 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:252 +#: ../../source/tutorial-quickstart-jax.rst:197 +#, fuzzy msgid "" -":code:`evaluate` is responsible for evaluating model parameters on the " -"server-side. Having :code:`evaluate` in addition to " -":code:`configure_evaluate`/:code:`aggregate_evaluate` enables strategies " -"to perform both servers-side and client-side (federated) evaluation." +"get the model parameters and return them as a list of NumPy ``ndarray``'s" +" (which is what ``flwr.client.NumPyClient`` expects)" msgstr "" -":code:`evaluate` 负责在服务器端评估模型参数。除了 " -":code:`configure_evaluate`/:code:`aggregate_evaluate` 之外,:code:`evaluate`" -" 可以使策略同时执行服务器端和客户端(联邦)评估。" +"获取模型参数,并以 NumPy :code:`ndarray`的列表形式返回(这正是 " +":code:`flwr.client.NumPyClient`所匹配的格式)" -#: ../../source/how-to-implement-strategies.rst:323 -msgid "" -"The return value is again optional because the strategy might not need to" -" implement server-side evaluation or because the user-defined " -":code:`evaluate` method might not complete successfully (e.g., it might " -"fail to load the server-side evaluation data)." +#: ../../source/example-pytorch-from-centralized-to-federated.rst:257 +#: ../../source/tutorial-quickstart-jax.rst:202 +#: ../../source/tutorial-quickstart-scikitlearn.rst:136 +msgid "``fit``" msgstr "" -"返回值也是可选的,因为策略可能不需要执行服务器端评估,或者因为用户定义的 :code:`evaluate` " -"方法可能无法成功完成(例如,它可能无法加载服务器端评估数据)。" -#: ../../source/how-to-install-flower.rst:2 -msgid "Install Flower" -msgstr "安装Flower" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:255 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:260 +#: ../../source/tutorial-quickstart-jax.rst:200 +#: ../../source/tutorial-quickstart-jax.rst:205 +msgid "" +"update the parameters of the local model with the parameters received " +"from the server" +msgstr "用从服务器接收到的参数更新本地模型的参数" -#: ../../source/how-to-install-flower.rst:6 -msgid "Python version" -msgstr "Python 版本" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:257 +#: ../../source/tutorial-quickstart-jax.rst:202 +msgid "train the model on the local training set" +msgstr "在本地训练集上训练模型" -#: ../../source/how-to-install-flower.rst:12 -msgid "Install stable release" -msgstr "安装稳定版" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:258 +msgid "get the updated local model weights and return them to the server" +msgstr "获取更新后的本地模型参数并发送回服务器" -#: ../../source/how-to-install-flower.rst:15 -#: ../../source/how-to-upgrade-to-flower-next.rst:46 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:263 +#: ../../source/tutorial-quickstart-jax.rst:208 +#: ../../source/tutorial-quickstart-scikitlearn.rst:139 #, fuzzy -msgid "Using pip" -msgstr "使用 pip" - -#: ../../source/how-to-install-flower.rst:17 -msgid "" -"Stable releases are available on `PyPI " -"`_::" -msgstr "稳定版本可在 `PyPI `_::" - -#: ../../source/how-to-install-flower.rst:21 -msgid "" -"For simulations that use the Virtual Client Engine, ``flwr`` should be " -"installed with the ``simulation`` extra::" -msgstr "对于使用虚拟客户端引擎的模拟,`flwr`` 应与`simulation`` 一起安装:" +msgid "``evaluate``" +msgstr ":code:`evaluate`" -#: ../../source/how-to-install-flower.rst:27 -#, fuzzy -msgid "Using conda (or mamba)" -msgstr "使用 conda(或 mamba)" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:262 +#: ../../source/tutorial-quickstart-jax.rst:207 +msgid "evaluate the updated model on the local test set" +msgstr "在本地测试集上评估更新后的模型" -#: ../../source/how-to-install-flower.rst:29 -#, fuzzy -msgid "Flower can also be installed from the ``conda-forge`` channel." -msgstr "Flower 也可以从 ``conda-forge`` 频道安装。" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:263 +msgid "return the local loss and accuracy to the server" +msgstr "向服务器返回本地损失值和精确度" -#: ../../source/how-to-install-flower.rst:31 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:265 #, fuzzy msgid "" -"If you have not added ``conda-forge`` to your channels, you will first " -"need to run the following::" -msgstr "如果您尚未在频道中添加 ``conda-forge``,则首先需要运行以下程序::" +"The two ``NumPyClient`` methods ``fit`` and ``evaluate`` make use of the " +"functions ``train()`` and ``test()`` previously defined in ``cifar.py``. " +"So what we really do here is we tell Flower through our ``NumPyClient`` " +"subclass which of our already defined functions to call for training and " +"evaluation. We included type annotations to give you a better " +"understanding of the data types that get passed around." +msgstr "" +"这两个 :code:`NumPyClient` 中的方法 :code:`fit` 和 :code:`evaluate` 使用了之前在 " +":code:`cifar.py` 中定义的函数 :code:`train()` 和 :code:`test()`。因此,我们在这里要做的就是通过 " +":code:`NumPyClient` 子类告知 Flower " +"在训练和评估时要调用哪些已定义的函数。我们加入了类型注解,以便让你更好地理解传递的数据类型。" -#: ../../source/how-to-install-flower.rst:36 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:315 #, fuzzy msgid "" -"Once the ``conda-forge`` channel has been enabled, ``flwr`` can be " -"installed with ``conda``::" -msgstr "一旦启用了 ``conda-forge`` 频道,就可以使用 ``conda``: 安装 ``flwr``:" +"All that's left to do it to define a function that loads both model and " +"data, creates a ``CifarClient``, and starts this client. You load your " +"data and model by using ``cifar.py``. Start ``CifarClient`` with the " +"function ``fl.client.start_client()`` by pointing it at the same IP " +"address we used in ``server.py``:" +msgstr "剩下的就是定义模型和数据加载函数了。创建一个:code:`CifarClient`类,并运行这个客服端。您将通过:code:`cifar.py`加载数据和模型。另外,通过:code:`fl.client.start_client()`函数来运行客户端:code:`CifarClient`,需要保证IP地址和:code:`server.py`中所使用的一致:" -#: ../../source/how-to-install-flower.rst:40 -#, fuzzy -msgid "or with ``mamba``::" -msgstr "或用 ``mamba`` ::" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:338 +#: ../../source/tutorial-quickstart-jax.rst:309 +msgid "And that's it. You can now open two additional terminal windows and run" +msgstr "就是这样,现在你可以打开另外两个终端窗口,然后运行" -#: ../../source/how-to-install-flower.rst:46 -msgid "Verify installation" -msgstr "验证安装" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:344 +msgid "" +"in each window (make sure that the server is running before you do so) " +"and see your (previously centralized) PyTorch project run federated " +"learning across two clients. Congratulations!" +msgstr "确保服务器正在运行后,您就能看到您的 PyTorch 项目(之前是集中式的)在两个客户端上运行联邦学习了。祝贺!" -#: ../../source/how-to-install-flower.rst:48 -#, fuzzy +#: ../../source/example-pytorch-from-centralized-to-federated.rst:351 msgid "" -"The following command can be used to verify if Flower was successfully " -"installed. If everything worked, it should print the version of Flower to" -" the command line::" -msgstr "可以使用以下命令来验证 Flower 是否安装成功。如果一切正常,它将在命令行中打印 Flower 的版本::" +"The full source code for this example: `PyTorch: From Centralized To " +"Federated (Code) `_. Our example is, of course, " +"somewhat over-simplified because both clients load the exact same " +"dataset, which isn't realistic. You're now prepared to explore this topic" +" further. How about using different subsets of CIFAR-10 on each client? " +"How about adding more clients?" +msgstr "" +"本示例的完整源代码为:`PyTorch: 从集中式到联合式 " +"`_。当然,我们的示例有些过于简单,因为两个客户端都加载了完全相同的数据集,这并不真实。现在,您已经准备好进一步探讨这一主题了。比如在每个客户端使用不同的" +" CIFAR-10 子集会如何?增加更多客户端会如何?" -#: ../../source/how-to-install-flower.rst:55 -msgid "Advanced installation options" -msgstr "高级安装选项" +#: ../../source/explanation-differential-privacy.rst:2 +#: ../../source/explanation-differential-privacy.rst:14 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:303 +msgid "Differential Privacy" +msgstr "差分隐私" -#: ../../source/how-to-install-flower.rst:58 +#: ../../source/explanation-differential-privacy.rst:4 #, fuzzy -msgid "Install via Docker" -msgstr "安装Flower" +msgid "" +"The information in datasets like healthcare, financial transactions, user" +" preferences, etc., is valuable and has the potential for scientific " +"breakthroughs and provides important business insights. However, such " +"data is also sensitive and there is a risk of compromising individual " +"privacy." +msgstr "医疗保健、金融交易、用户偏好等数据集中的信息非常宝贵,有可能带来科学突破并提供重要的商业见解。然而,这些数据也是敏感数据,存在泄露个人隐私的风险。" -#: ../../source/how-to-install-flower.rst:60 +#: ../../source/explanation-differential-privacy.rst:9 #, fuzzy -msgid ":doc:`How to run Flower using Docker `" -msgstr "" -"`TensorFlow快速入门 (教程) `_" - -#: ../../source/how-to-install-flower.rst:63 -msgid "Install pre-release" -msgstr "安装预发布版本" - -#: ../../source/how-to-install-flower.rst:65 msgid "" -"New (possibly unstable) versions of Flower are sometimes available as " -"pre-release versions (alpha, beta, release candidate) before the stable " -"release happens::" -msgstr "在稳定版发布之前,Flower 的新版本(可能是不稳定版)有时会作为预发布版本(alpha、beta、候选发布版本)提供::" +"Traditional methods like anonymization alone would not work because of " +"attacks like Re-identification and Data Linkage. That's where " +"differential privacy comes in. It provides the possibility of analyzing " +"data while ensuring the privacy of individuals." +msgstr "单靠匿名等传统方法是行不通的,因为会受到重新识别和数据链接等攻击。这就是差异化隐私的用武之地。它提供了在分析数据的同时确保个人隐私的可能性。" -#: ../../source/how-to-install-flower.rst:69 +#: ../../source/explanation-differential-privacy.rst:16 +#, fuzzy msgid "" -"For simulations that use the Virtual Client Engine, ``flwr`` pre-releases" -" should be installed with the ``simulation`` extra::" -msgstr "对于使用虚拟客户端引擎的模拟,`flwr``预发行版应与`simulation``一起安装:" - -#: ../../source/how-to-install-flower.rst:74 -msgid "Install nightly release" -msgstr "安装隔夜版本" +"Imagine two datasets that are identical except for a single record (for " +"instance, Alice's data). Differential Privacy (DP) guarantees that any " +"analysis (M), like calculating the average income, will produce nearly " +"identical results for both datasets (O and O' would be similar). This " +"preserves group patterns while obscuring individual details, ensuring the" +" individual's information remains hidden in the crowd." +msgstr "" +"试想一下,两个数据集除了一条记录(例如 Alice " +"的数据)之外完全相同。差分隐私(DP)可以保证任何分析(M),比如计算平均收入,对两个数据集都会产生几乎相同的结果(O 和 O' " +"将是相似的)。这既保留了群体模式,又掩盖了个人细节,确保个人的信息隐藏在人群中。" -#: ../../source/how-to-install-flower.rst:76 -msgid "" -"The latest (potentially unstable) changes in Flower are available as " -"nightly releases::" -msgstr "Flower 中最新(可能不稳定)的更改以隔夜发布的形式提供::" +#: ../../source/explanation-differential-privacy.rst:-1 +#, fuzzy +msgid "DP Intro" +msgstr "DP 介绍" -#: ../../source/how-to-install-flower.rst:80 +#: ../../source/explanation-differential-privacy.rst:27 +#, fuzzy msgid "" -"For simulations that use the Virtual Client Engine, ``flwr-nightly`` " -"should be installed with the ``simulation`` extra::" -msgstr "对于使用虚拟客户端引擎的模拟,`flwr-nightly`应与`simulation`一起安装:" +"One of the most commonly used mechanisms to achieve DP is adding enough " +"noise to the output of the analysis to mask the contribution of each " +"individual in the data while preserving the overall accuracy of the " +"analysis." +msgstr "实现 DP 的最常用机制之一是在分析输出中加入足够的噪音,以掩盖数据中每个个体的贡献,同时保持分析的整体准确性。" -#: ../../source/how-to-monitor-simulation.rst:2 -msgid "Monitor simulation" -msgstr "监控模拟" +#: ../../source/explanation-differential-privacy.rst:32 +#, fuzzy +msgid "Formal Definition" +msgstr "编译 ProtoBuf 定义" -#: ../../source/how-to-monitor-simulation.rst:4 +#: ../../source/explanation-differential-privacy.rst:34 +#, fuzzy msgid "" -"Flower allows you to monitor system resources while running your " -"simulation. Moreover, the Flower simulation engine is powerful and " -"enables you to decide how to allocate resources per client manner and " -"constrain the total usage. Insights from resource consumption can help " -"you make smarter decisions and speed up the execution time." +"Differential Privacy (DP) provides statistical guarantees against the " +"information an adversary can infer through the output of a randomized " +"algorithm. It provides an unconditional upper bound on the influence of a" +" single individual on the output of the algorithm by adding noise [1]. A " +"randomized mechanism M provides (:math:`\\epsilon`, " +":math:`\\delta`)-differential privacy if for any two neighboring " +"databases, D :sub:`1` and D :sub:`2`, that differ in only a single " +"record, and for all possible outputs S ⊆ Range(A):" msgstr "" -"Flower 允许您在运行模拟时监控系统资源。此外,Flower " -"仿真引擎功能强大,能让您决定如何按客户端方式分配资源并限制总使用量。从资源消耗中获得的观察可以帮助您做出更明智的决策,并加快执行时间。" +"差分隐私(Differential " +"Privacy,DP)针对对手通过随机算法的输出所能推断出的信息提供统计保证。它为单个个体通过添加噪声对算法输出的影响提供了一个无条件的上限[1]。如果任意两个相邻的数据库D" +" :sub:`1`和D :sub:`2`只有一条记录不同,并且对于所有可能的输出S ⊆ " +"Range(A),随机化机制M提供(:math:`epsilon`,:math:`\\delta`)差异隐私:" -#: ../../source/how-to-monitor-simulation.rst:6 +#: ../../source/explanation-differential-privacy.rst:42 +#, fuzzy msgid "" -"The specific instructions assume you are using macOS and have the " -"`Homebrew `_ package manager installed." -msgstr "具体说明假定你使用的是 macOS,并且安装了 `Homebrew `_ 软件包管理器。" - -#: ../../source/how-to-monitor-simulation.rst:10 -msgid "Downloads" -msgstr "下载" +"\\small\n" +"P[M(D_{1} \\in A)] \\leq e^{\\epsilon} P[M(D_{2} \\in A)] + \\delta" +msgstr "" +"\\small\n" +"P[M(D_{1} \\in A)] \\leq e^{\\delta} P[M(D_{2} \\in A)] + \\delta" -#: ../../source/how-to-monitor-simulation.rst:16 +#: ../../source/explanation-differential-privacy.rst:47 +#, fuzzy msgid "" -"`Prometheus `_ is used for data collection, while" -" `Grafana `_ will enable you to visualize the " -"collected data. They are both well integrated with `Ray " -"`_ which Flower uses under the hood." +"The :math:`\\epsilon` parameter, also known as the privacy budget, is a " +"metric of privacy loss. It also controls the privacy-utility trade-off; " +"lower :math:`\\epsilon` values indicate higher levels of privacy but are " +"likely to reduce utility as well. The :math:`\\delta` parameter accounts " +"for a small probability on which the upper bound :math:`\\epsilon` does " +"not hold. The amount of noise needed to achieve differential privacy is " +"proportional to the sensitivity of the output, which measures the maximum" +" change in the output due to the inclusion or removal of a single record." msgstr "" -"`Prometheus `_ 用于收集数据,而 `Grafana " -"`_ 则能让你将收集到的数据可视化。它们都与 Flower 在引擎下使用的 `Ray " -"`_ 紧密集成。" +":math:`\\epsilon`参数也称为隐私预算,是衡量隐私损失的指标。较低的 :math:`\\epsilon` " +"值表示较高的隐私级别,但也可能降低效用。:math:`\\delta`参数考虑了:math:`\\epsilon`上限不成立的小概率。实现差异化隐私所需的噪声量与输出的灵敏度成正比,而输出的灵敏度是指由于包含或删除一条记录而导致的输出的最大变化。" -#: ../../source/how-to-monitor-simulation.rst:18 -msgid "" -"Overwrite the configuration files (depending on your device, it might be " -"installed on a different path)." -msgstr "重写配置文件(根据设备的不同,可能安装在不同的路径上)。" +#: ../../source/explanation-differential-privacy.rst:56 +#, fuzzy +msgid "Differential Privacy in Machine Learning" +msgstr "差分隐私" -#: ../../source/how-to-monitor-simulation.rst:20 -msgid "If you are on an M1 Mac, it should be:" -msgstr "如果你使用的是 M1 Mac,应该是这样:" +#: ../../source/explanation-differential-privacy.rst:58 +#, fuzzy +msgid "" +"DP can be utilized in machine learning to preserve the privacy of the " +"training data. Differentially private machine learning algorithms are " +"designed in a way to prevent the algorithm to learn any specific " +"information about any individual data points and subsequently prevent the" +" model from revealing sensitive information. Depending on the stage at " +"which noise is introduced, various methods exist for applying DP to " +"machine learning algorithms. One approach involves adding noise to the " +"training data (either to the features or labels), while another method " +"entails injecting noise into the gradients of the loss function during " +"model training. Additionally, such noise can be incorporated into the " +"model's output." +msgstr "" +"机器学习中可以利用 DP " +"来保护训练数据的隐私。差分保密机器学习算法的设计方式是防止算法学习到任何单个数据点的任何特定信息,从而防止模型泄露敏感信息。根据引入噪声的阶段,有多种方法可将" +" DP " +"应用于机器学习算法。一种方法是在训练数据(特征或标签)中添加噪声,另一种方法是在模型训练过程中向损失函数的梯度注入噪声。此外,这种噪声还可以被纳入模型的输出中。" -#: ../../source/how-to-monitor-simulation.rst:27 -msgid "On the previous generation Intel Mac devices, it should be:" -msgstr "在上一代英特尔 Mac 设备上,应该是这样:" +#: ../../source/explanation-differential-privacy.rst:69 +#, fuzzy +msgid "Differential Privacy in Federated Learning" +msgstr "扩大联邦学习的规模" -#: ../../source/how-to-monitor-simulation.rst:34 +#: ../../source/explanation-differential-privacy.rst:71 +#, fuzzy msgid "" -"Open the respective configuration files and change them. Depending on " -"your device, use one of the two following commands:" -msgstr "打开相应的配置文件并修改它们。根据设备情况,使用以下两个命令之一:" +"Federated learning is a data minimization approach that allows multiple " +"parties to collaboratively train a model without sharing their raw data. " +"However, federated learning also introduces new privacy challenges. The " +"model updates between parties and the central server can leak information" +" about the local data. These leaks can be exploited by attacks such as " +"membership inference and property inference attacks, or model inversion " +"attacks." +msgstr "联合学习是一种数据最小化方法,允许多方在不共享原始数据的情况下合作训练一个模型。然而,联合学习也带来了新的隐私挑战。各方与中央服务器之间的模型更新可能会泄露本地数据信息。这些泄漏信息可能会被攻击利用,如成员推断攻击、属性推断攻击或模型反转攻击。" -#: ../../source/how-to-monitor-simulation.rst:44 +#: ../../source/explanation-differential-privacy.rst:78 +#, fuzzy msgid "" -"and then delete all the text in the file and paste a new Prometheus " -"config you see below. You may adjust the time intervals to your " -"requirements:" -msgstr "然后删除文件中的所有文本,粘贴一个新的 Prometheus 配置文件,如下所示。您可以根据需要调整时间间隔:" +"DP can play a crucial role in federated learning to provide privacy for " +"the clients' data." +msgstr "DP 可以在联合学习中发挥重要作用,为客户数据提供隐私保护。" -#: ../../source/how-to-monitor-simulation.rst:59 +#: ../../source/explanation-differential-privacy.rst:81 +#, fuzzy msgid "" -"Now after you have edited the Prometheus configuration, do the same with " -"the Grafana configuration files. Open those using one of the following " -"commands as before:" -msgstr "编辑完 Prometheus 配置后,请对 Grafana 配置文件执行同样的操作。与之前一样,使用以下命令之一打开这些文件:" +"Depending on the granularity of privacy provision or the location of " +"noise addition, different forms of DP exist in federated learning. In " +"this explainer, we focus on two approaches of DP utilization in federated" +" learning based on where the noise is added: at the server (also known as" +" the center) or at the client (also known as the local)." +msgstr "" +"根据提供隐私的粒度或添加噪声的位置,联合学习中存在不同形式的 DP。在本说明中,我们将根据添加噪声的位置,重点介绍联合学习中利用 DP " +"的两种方法:在服务器(也称为中心)或客户端(也称为本地)。" -#: ../../source/how-to-monitor-simulation.rst:69 +#: ../../source/explanation-differential-privacy.rst:86 +#, fuzzy msgid "" -"Your terminal editor should open and allow you to apply the following " -"configuration as before." -msgstr "您的终端编辑器应该会打开,并允许您像之前一样应用以下配置。" +"**Central Differential Privacy**: DP is applied by the server and the " +"goal is to prevent the aggregated model from leaking information about " +"each client's data." +msgstr "**中央差分隐私**: DP 由服务器应用,目标是防止聚合模型泄露每个客户的数据信息。" -#: ../../source/how-to-monitor-simulation.rst:84 +#: ../../source/explanation-differential-privacy.rst:88 +#, fuzzy msgid "" -"Congratulations, you just downloaded all the necessary software needed " -"for metrics tracking. Now, let’s start it." -msgstr "恭喜您,您刚刚下载了指标跟踪所需的所有软件。现在,让我们开始吧。" +"**Local Differential Privacy**: DP is applied on the client side before " +"sending any information to the server and the goal is to prevent the " +"updates that are sent to the server from leaking any information about " +"the client's data." +msgstr "**本地差分隐私**: 在向服务器发送任何信息之前,在客户端应用 DP,目的是防止向服务器发送的更新泄露任何有关客户端数据的信息。" -#: ../../source/how-to-monitor-simulation.rst:88 -msgid "Tracking metrics" -msgstr "跟踪指标" +#: ../../source/explanation-differential-privacy.rst:-1 +#: ../../source/explanation-differential-privacy.rst:93 +#: ../../source/how-to-use-differential-privacy.rst:15 +#, fuzzy +msgid "Central Differential Privacy" +msgstr "差分隐私" -#: ../../source/how-to-monitor-simulation.rst:90 +#: ../../source/explanation-differential-privacy.rst:95 +#, fuzzy msgid "" -"Before running your Flower simulation, you have to start the monitoring " -"tools you have just installed and configured." -msgstr "在运行 Flower 模拟之前,您必须启动刚刚安装和配置的监控工具。" +"In this approach, which is also known as user-level DP, the central " +"server is responsible for adding noise to the globally aggregated " +"parameters. It should be noted that trust in the server is required." +msgstr "在这种方法(也称为用户级 DP)中,中央服务器负责在全局汇总参数中添加噪声。需要注意的是,这需要对服务器的信任。" -#: ../../source/how-to-monitor-simulation.rst:97 +#: ../../source/explanation-differential-privacy.rst:104 +#, fuzzy msgid "" -"Please include the following argument in your Python code when starting a" -" simulation." -msgstr "开始模拟时,请在 Python 代码中加入以下参数。" +"While there are various ways to implement central DP in federated " +"learning, we concentrate on the algorithms proposed by [2] and [3]. The " +"overall approach is to clip the model updates sent by the clients and add" +" some amount of noise to the aggregated model. In each iteration, a " +"random set of clients is chosen with a specific probability for training." +" Each client performs local training on its own data. The update of each " +"client is then clipped by some value `S` (sensitivity `S`). This would " +"limit the impact of any individual client which is crucial for privacy " +"and often beneficial for robustness. A common approach to achieve this is" +" by restricting the `L2` norm of the clients' model updates, ensuring " +"that larger updates are scaled down to fit within the norm `S`." +msgstr "" +"虽然在联合学习中实现中央数据处理的方法有很多种,但我们将重点放在[2]和[3]提出的算法上。总体方法是剪辑客户端发送的模型更新,并在聚合模型中添加一定量的噪声。在每次迭代中,以特定概率随机选择一组客户端进行训练。每个客户端对自己的数据进行局部训练。然后,每个客户端的更新会被某个值`S`(灵敏度`S`)剪切。这将限制任何单个客户端的影响,这对隐私至关重要,通常也有利于稳健性。实现这一点的常用方法是限制客户机模型更新的" +" `L2` 准则,确保较大的更新被缩减以适应 `S` 准则。" -#: ../../source/how-to-monitor-simulation.rst:108 -msgid "Now, you are ready to start your workload." -msgstr "现在,您可以开始工作了。" +#: ../../source/explanation-differential-privacy.rst:-1 +#, fuzzy +msgid "clipping" +msgstr "剪贴" -#: ../../source/how-to-monitor-simulation.rst:110 +#: ../../source/explanation-differential-privacy.rst:120 +#, fuzzy msgid "" -"Shortly after the simulation starts, you should see the following logs in" -" your terminal:" -msgstr "模拟启动后不久,您就会在终端中看到以下日志:" +"Afterwards, the Gaussian mechanism is used to add noise in order to " +"distort the sum of all clients' updates. The amount of noise is scaled to" +" the sensitivity value to obtain a privacy guarantee. The Gaussian " +"mechanism is used with a noise sampled from `N (0, σ²)` where `σ = ( " +"noise_scale * S ) / (number of sampled clients)`." +msgstr "" +"然后,使用高斯机制添加噪声,以扭曲所有客户端的更新总和。噪声量与灵敏度值成正比,以获得隐私保证。高斯机制的噪声采样范围为 `N (0, σ²)` " +",其中 σ = ( 噪声规模 * S ) / (采样客户数)`。" -#: ../../source/how-to-monitor-simulation.rst:117 -msgid "You can look at everything at ``_ ." -msgstr "您可以在 ``_ 查看所有内容。" +#: ../../source/explanation-differential-privacy.rst:126 +#, fuzzy +msgid "Clipping" +msgstr "剪贴" -#: ../../source/how-to-monitor-simulation.rst:119 +#: ../../source/explanation-differential-privacy.rst:128 +#, fuzzy msgid "" -"It's a Ray Dashboard. You can navigate to Metrics (on the left panel, the" -" lowest option)." -msgstr "这是一个 Ray Dashboard。您可以导航到 \"度量标准\"(左侧面板,最低选项)。" +"There are two forms of clipping commonly used in Central DP: Fixed " +"Clipping and Adaptive Clipping." +msgstr "中央处理器常用的剪切有两种形式:固定剪切和自适应剪切。" -#: ../../source/how-to-monitor-simulation.rst:121 +#: ../../source/explanation-differential-privacy.rst:131 +#, fuzzy msgid "" -"Or alternatively, you can just see them in Grafana by clicking on the " -"right-up corner, “View in Grafana”. Please note that the Ray dashboard is" -" only accessible during the simulation. After the simulation ends, you " -"can only use Grafana to explore the metrics. You can start Grafana by " -"going to ``http://localhost:3000/``." -msgstr "" -"或者,您也可以点击右上角的 \"在 Grafana 中查看\",在 Grafana 中查看它们。请注意,Ray " -"仪表盘只能在模拟期间访问。模拟结束后,您只能使用 Grafana 浏览指标。您可以访问 ``http://localhost:3000/``启动 " -"Grafana。" +"**Fixed Clipping** : A predefined fix threshold is set for the magnitude " +"of clients' updates. Any update exceeding this threshold is clipped back " +"to the threshold value." +msgstr "** 固定削波** : 为客户端更新的大小设置了一个预定义的固定阈值。任何超过该阈值的更新都会被剪切回阈值。" -#: ../../source/how-to-monitor-simulation.rst:123 +#: ../../source/explanation-differential-privacy.rst:133 +#, fuzzy msgid "" -"After you finish the visualization, stop Prometheus and Grafana. This is " -"important as they will otherwise block, for example port :code:`3000` on " -"your machine as long as they are running." -msgstr "完成可视化后,请停止 Prometheus 和 Grafana。这一点很重要,否则只要它们在运行,就会阻塞机器上的端口 :code:`3000`。" - -#: ../../source/how-to-monitor-simulation.rst:132 -msgid "Resource allocation" -msgstr "资源分配" +"**Adaptive Clipping** : The clipping threshold dynamically adjusts based " +"on the observed update distribution [4]. It means that the clipping value" +" is tuned during the rounds with respect to the quantile of the update " +"norm distribution." +msgstr "** 自适应削波** : 削波阈值根据观察到的更新分布动态调整[4]。这意味着,在各轮中,会根据更新规范分布的量化值调整削波值。" -#: ../../source/how-to-monitor-simulation.rst:134 +#: ../../source/explanation-differential-privacy.rst:137 +#, fuzzy msgid "" -"You must understand how the Ray library works to efficiently allocate " -"system resources to simulation clients on your own." -msgstr "您必须了解 Ray 库是如何工作的,才能有效地为自己的仿真客户端分配系统资源。" +"The choice between fixed and adaptive clipping depends on various factors" +" such as privacy requirements, data distribution, model complexity, and " +"others." +msgstr "在固定剪切和自适应剪切之间做出选择取决于各种因素,如隐私要求、数据分布、模型复杂性等。" + +#: ../../source/explanation-differential-privacy.rst:-1 +#: ../../source/explanation-differential-privacy.rst:141 +#: ../../source/how-to-use-differential-privacy.rst:113 +#, fuzzy +msgid "Local Differential Privacy" +msgstr "差分隐私" -#: ../../source/how-to-monitor-simulation.rst:136 +#: ../../source/explanation-differential-privacy.rst:143 +#, fuzzy msgid "" -"Initially, the simulation (which Ray handles under the hood) starts by " -"default with all the available resources on the system, which it shares " -"among the clients. It doesn't mean it divides it equally among all of " -"them, nor that the model training happens at all of them simultaneously. " -"You will learn more about that in the later part of this blog. You can " -"check the system resources by running the following:" +"In this approach, each client is responsible for performing DP. Local DP " +"avoids the need for a fully trusted aggregator, but it should be noted " +"that local DP leads to a decrease in accuracy but better privacy in " +"comparison to central DP." msgstr "" -"最初,模拟(由 Ray " -"在引擎下处理)默认使用系统上的所有可用资源启动,并在客户端之间共享。但这并不意味着它会将资源平均分配给所有客户端,也不意味着模型训练会在所有客户端同时进行。您将在本博客的后半部分了解到更多相关信息。您可以运行以下命令检查系统资源:" +"在这种方法中,每个客户端都负责执行 DP。本地 DP 避免了对完全可信的聚合器的需求,但需要注意的是,与中央 DP 相比,本地 DP " +"会降低准确性,但却能更好地保护隐私。" -#: ../../source/how-to-monitor-simulation.rst:143 -msgid "In Google Colab, the result you see might be similar to this:" -msgstr "在 Google Colab 中,您看到的结果可能与此类似:" +#: ../../source/explanation-differential-privacy.rst:152 +#, fuzzy +msgid "In this explainer, we focus on two forms of achieving Local DP:" +msgstr "在本说明中,我们将重点介绍实现本地 DP 的两种形式:" -#: ../../source/how-to-monitor-simulation.rst:155 +#: ../../source/explanation-differential-privacy.rst:154 +#, fuzzy msgid "" -"However, you can overwrite the defaults. When starting a simulation, do " -"the following (you don't need to overwrite all of them):" -msgstr "不过,您可以覆盖默认值。开始模拟时,请执行以下操作(不必全部覆盖):" +"Each client adds noise to the local updates before sending them to the " +"server. To achieve (:math:`\\epsilon`, :math:`\\delta`)-DP, considering " +"the sensitivity of the local model to be ∆, Gaussian noise is applied " +"with a noise scale of σ where:" +msgstr "" +"每个客户端在向服务器发送本地更新之前,都会在本地更新中加入噪声。为了实现(:math:`\\epsilon`, " +":math:`\\delta`)-DP,考虑到本地模型的灵敏度为 ∆,应用了高斯噪声,噪声尺度为 σ,其中:" -#: ../../source/how-to-monitor-simulation.rst:175 -msgid "Let’s also specify the resource for a single client." -msgstr "我们还可以为单个客户指定资源。" +#: ../../source/explanation-differential-privacy.rst:158 +#, fuzzy +msgid "" +"\\small\n" +"\\frac{∆ \\times \\sqrt{2 \\times " +"\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}" +msgstr "" +"\\small\n" +"\\frac{∆ \\times \\sqrt{2 \\times " +"\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}\n" +"\n" -#: ../../source/how-to-monitor-simulation.rst:205 +#: ../../source/explanation-differential-privacy.rst:163 +#, fuzzy msgid "" -"Now comes the crucial part. Ray will start a new client only when it has " -"all the required resources (such that they run in parallel) when the " -"resources allow." -msgstr "现在到了关键部分。只有在资源允许的情况下,Ray 才会在拥有所有所需资源(如并行运行)时启动新客户端。" +"Each client adds noise to the gradients of the model during the local " +"training (DP-SGD). More specifically, in this approach, gradients are " +"clipped and an amount of calibrated noise is injected into the gradients." +msgstr "在局部训练过程中,每个客户端都会向模型的梯度添加噪声(DP-SGD)。更具体地说,在这种方法中,梯度会被剪切,并在梯度中注入一定量的校准噪声。" -#: ../../source/how-to-monitor-simulation.rst:207 +#: ../../source/explanation-differential-privacy.rst:167 +#, fuzzy msgid "" -"In the example above, only one client will be run, so your clients won't " -"run concurrently. Setting :code:`client_num_gpus = 0.5` would allow " -"running two clients and therefore enable them to run concurrently. Be " -"careful not to require more resources than available. If you specified " -":code:`client_num_gpus = 2`, the simulation wouldn't start (even if you " -"had 2 GPUs but decided to set 1 in :code:`ray_init_args`)." -msgstr "" -"在上面的示例中,将只运行一个客户端,因此您的客户端不会并发运行。设置 :code:`client_num_gpus = 0.5` " -"将允许运行两个客户端,从而使它们能够并发运行。请注意,所需的资源不要超过可用资源。如果您指定 :code:`client_num_gpus = " -"2`,模拟将无法启动(即使您有 2 个 GPU,但决定在 :code:`ray_init_args` 中设置为 1)。" +"Please note that these two approaches are providing privacy at different " +"levels." +msgstr "请注意,这两种方法提供了不同层次的隐私。" -#: ../../source/how-to-monitor-simulation.rst:212 ../../source/ref-faq.rst:2 -msgid "FAQ" -msgstr "常见问题" +#: ../../source/explanation-differential-privacy.rst:169 +#, fuzzy +msgid "**References:**" +msgstr "参考资料" -#: ../../source/how-to-monitor-simulation.rst:214 -msgid "Q: I don't see any metrics logged." -msgstr "问:我没有看到任何指标记录。" +#: ../../source/explanation-differential-privacy.rst:171 +#, fuzzy +msgid "[1] Dwork et al. The Algorithmic Foundations of Differential Privacy." +msgstr "[1] Dwork 等:《差分隐私的算法基础》。" -#: ../../source/how-to-monitor-simulation.rst:216 +#: ../../source/explanation-differential-privacy.rst:173 +#, fuzzy msgid "" -"A: The timeframe might not be properly set. The setting is in the top " -"right corner (\"Last 30 minutes\" by default). Please change the " -"timeframe to reflect the period when the simulation was running." -msgstr "答:时间范围可能没有正确设置。设置在右上角(默认为 \"最后 30 分钟\")。请更改时间框架,以反映模拟运行的时间段。" +"[2] McMahan et al. Learning Differentially Private Recurrent Language " +"Models." +msgstr "" +"McMahan, H. Brendan等. \"Learning differentially private recurrent " +"language models.\" arXiv preprint arXiv:1710.06963 (2017)." -#: ../../source/how-to-monitor-simulation.rst:218 +#: ../../source/explanation-differential-privacy.rst:175 +#, fuzzy msgid "" -"Q: I see “Grafana server not detected. Please make sure the Grafana " -"server is running and refresh this page” after going to the Metrics tab " -"in Ray Dashboard." -msgstr "问:我看到 \"未检测到 Grafana 服务器。请确保 Grafana 服务器正在运行并刷新此页面\"。" +"[3] Geyer et al. Differentially Private Federated Learning: A Client " +"Level Perspective." +msgstr "[3] Geyer 等人。差异化化私人联合学习:客户层面的视角。" -#: ../../source/how-to-monitor-simulation.rst:220 -msgid "" -"A: You probably don't have Grafana running. Please check the running " -"services" -msgstr "答:您可能没有运行 Grafana。请检查正在运行的服务" +#: ../../source/explanation-differential-privacy.rst:177 +#, fuzzy +msgid "[4] Galen et al. Differentially Private Learning with Adaptive Clipping." +msgstr "" +"Andrew, Galen等. \"Differentially private learning with adaptive " +"clipping.\" Advances in Neural Information Processing Systems 34 (2021): " +"17455-17466." -#: ../../source/how-to-monitor-simulation.rst:226 -msgid "" -"Q: I see \"This site can't be reached\" when going to " -"``_." -msgstr "问:在访问 ``_时,我看到 \"无法访问该网站\"。" +#: ../../source/explanation-federated-evaluation.rst:2 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:292 +msgid "Federated evaluation" +msgstr "联邦学习评估" -#: ../../source/how-to-monitor-simulation.rst:228 +#: ../../source/explanation-federated-evaluation.rst:4 msgid "" -"A: Either the simulation has already finished, or you still need to start" -" Prometheus." -msgstr "答:要么模拟已经完成,要么您还需要启动Prometheus。" +"There are two main approaches to evaluating models in federated learning " +"systems: centralized (or server-side) evaluation and federated (or " +"client-side) evaluation." +msgstr "评估联合学习系统中的模型主要有两种方法:集中(或服务器端)评估和联邦(或客户端)评估。" -#: ../../source/how-to-monitor-simulation.rst:232 -msgid "Resources" -msgstr "资源" +#: ../../source/explanation-federated-evaluation.rst:8 +msgid "Centralized Evaluation" +msgstr "集中评估" -#: ../../source/how-to-monitor-simulation.rst:234 -#, fuzzy +#: ../../source/explanation-federated-evaluation.rst:11 +msgid "Built-In Strategies" +msgstr "内置策略" + +#: ../../source/explanation-federated-evaluation.rst:13 msgid "" -"Ray Dashboard: ``_" -msgstr "Ray 仪表盘: ``_" +"All built-in strategies support centralized evaluation by providing an " +"evaluation function during initialization. An evaluation function is any " +"function that can take the current global model parameters as input and " +"return evaluation results:" +msgstr "所有内置策略都通过在初始化过程中提供一个评估函数来支持集中评估。评估函数是任何可以将当前全局模型参数作为输入并返回评估结果的函数:" + +#: ../../source/explanation-federated-evaluation.rst:61 +msgid "Custom Strategies" +msgstr "定制策略" -#: ../../source/how-to-monitor-simulation.rst:236 +#: ../../source/explanation-federated-evaluation.rst:63 #, fuzzy -msgid "Ray Metrics: ``_" +msgid "" +"The ``Strategy`` abstraction provides a method called ``evaluate`` that " +"can directly be used to evaluate the current global model parameters. The" +" current server implementation calls ``evaluate`` after parameter " +"aggregation and before federated evaluation (see next paragraph)." msgstr "" -"Ray 指标: ``_" +":code:`Strategy` 抽象提供了一个名为 :code:`evaluate` " +"的方法,可直接用于评估当前的全局模型参数。服务器会在参数聚合后和联邦评估前调用 :code:`evaluate`(见下段)。" -#: ../../source/how-to-run-flower-using-docker.rst:2 -#, fuzzy -msgid "Run Flower using Docker" -msgstr "使用 Docker 运行 Flower" +#: ../../source/explanation-federated-evaluation.rst:69 +msgid "Federated Evaluation" +msgstr "联邦评估" + +#: ../../source/explanation-federated-evaluation.rst:72 +msgid "Implementing Federated Evaluation" +msgstr "实现联邦评估" -#: ../../source/how-to-run-flower-using-docker.rst:4 +#: ../../source/explanation-federated-evaluation.rst:74 #, fuzzy msgid "" -"The simplest way to get started with Flower is by using the pre-made " -"Docker images, which you can find on `Docker Hub " -"`__." -msgstr "" -"开始使用 Flower 的最简单方法是使用预制的 Docker 镜像,您可以在 `Docker Hub " -"`_ 上找到这些镜像。" +"Client-side evaluation happens in the ``Client.evaluate`` method and can " +"be configured from the server side." +msgstr "客户端评估在 :code:`Client.evaluate` 方法中进行,并可从服务器端进行配置。" -#: ../../source/how-to-run-flower-using-docker.rst:7 -#, fuzzy -msgid "Before you start, make sure that the Docker daemon is running:" -msgstr "开始之前,请确保 Docker 守护进程正在运行:" +#: ../../source/explanation-federated-evaluation.rst:108 +msgid "Configuring Federated Evaluation" +msgstr "配置联邦评估" + +#: ../../source/explanation-federated-evaluation.rst:110 +msgid "" +"Federated evaluation can be configured from the server side. Built-in " +"strategies support the following arguments:" +msgstr "联邦评估可从服务器端进行配置。内置策略支持以下参数:" -#: ../../source/how-to-run-flower-using-docker.rst:14 +#: ../../source/explanation-federated-evaluation.rst:113 #, fuzzy msgid "" -"If you do not see the version of Docker but instead get an error saying " -"that the command was not found, you will need to install Docker first. " -"You can find installation instruction `here `_." +"``fraction_evaluate``: a ``float`` defining the fraction of clients that " +"will be selected for evaluation. If ``fraction_evaluate`` is set to " +"``0.1`` and ``100`` clients are connected to the server, then ``10`` will" +" be randomly selected for evaluation. If ``fraction_evaluate`` is set to " +"``0.0``, federated evaluation will be disabled." msgstr "" -"如果没有看到 Docker 的版本,而是出现找不到命令的错误,则需要先安装 Docker。你可以在 " -"`_ 找到安装说明。" +":code:`fraction_evaluate`: :code:`float`,定义了被选中进行评估的客户端的比例。如果 " +":code:`fraction_evaluate` 设置为 :code:`0.1`,并且 :code:`100` 个客户端连接到服务器,那么 " +":code:`10` 个客户端将被随机选中进行评估。如果 :code:`fraction_evaluate` 设置为 " +":code:`0.0`,联邦评估将被禁用。" -#: ../../source/how-to-run-flower-using-docker.rst:20 +#: ../../source/explanation-federated-evaluation.rst:118 #, fuzzy msgid "" -"On Linux, Docker commands require ``sudo`` privilege. If you want to " -"avoid using ``sudo``, you can follow the `Post-installation steps " -"`_ on the " -"official Docker website." +"``min_evaluate_clients``: an ``int``: the minimum number of clients to be" +" selected for evaluation. If ``fraction_evaluate`` is set to ``0.1``, " +"``min_evaluate_clients`` is set to 20, and ``100`` clients are connected " +"to the server, then ``20`` clients will be selected for evaluation." msgstr "" -"在 Linux 上,Docker 命令需要 ``sudo`` 权限。如果你想避免使用 ``sudo``,可以按照 Docker 官方网站上的 " -"`安装后步骤 `_进行操作。" +":code:`min_evaluate_clients`:一个 :code:`int`,需要评估的客户的最小数量。如果 " +":code:`fraction_evaluate` 设置为 :code:`0.1`,:code:`min_evaluate_clients` " +"设置为 20,并且有 :code:`100` 个客户端已连接到服务器,那么 :code:`20` 个客户端将被选中进行评估。" -#: ../../source/how-to-run-flower-using-docker.rst:26 +#: ../../source/explanation-federated-evaluation.rst:122 +#, fuzzy msgid "" -"To ensure optimal performance and compatibility, the SuperLink, SuperNode" -" and ServerApp image must have the same version when running together. " -"This guarantees seamless integration and avoids potential conflicts or " -"issues that may arise from using different versions." +"``min_available_clients``: an ``int`` that defines the minimum number of " +"clients which need to be connected to the server before a round of " +"federated evaluation can start. If fewer than ``min_available_clients`` " +"are connected to the server, the server will wait until more clients are " +"connected before it continues to sample clients for evaluation." msgstr "" +":code:`min_available_clients`: " +":code:`int`,定义了在一轮联邦评估开始之前,需要连接到服务器的最小客户端数量。如果连接到服务器的客户端数量少于 " +":code:`min_available_clients`,服务器将等待更多客户端连接后,才继续采样客户端进行评估。" -#: ../../source/how-to-run-flower-using-docker.rst:31 +#: ../../source/explanation-federated-evaluation.rst:127 #, fuzzy -msgid "Flower SuperLink" -msgstr "flower-superlink" +msgid "" +"``on_evaluate_config_fn``: a function that returns a configuration " +"dictionary which will be sent to the selected clients. The function will " +"be called during each round and provides a convenient way to customize " +"client-side evaluation from the server side, for example, to configure " +"the number of validation steps performed." +msgstr "code:`on_evaluate_config_fn`:返回配置字典的函数,该字典将发送给选定的客户端。该函数将在每一轮中被调用,并提供了一种方便的方法来从服务器端自定义客户端评估,例如,配置执行的验证步骤数。" -#: ../../source/how-to-run-flower-using-docker.rst:34 -#, fuzzy -msgid "Quickstart" -msgstr "快速入门 JAX" +#: ../../source/explanation-federated-evaluation.rst:157 +msgid "Evaluating Local Model Updates During Training" +msgstr "评估训练期间的本地模型更新" -#: ../../source/how-to-run-flower-using-docker.rst:36 +#: ../../source/explanation-federated-evaluation.rst:159 #, fuzzy -msgid "If you're looking to try out Flower, you can use the following command:" -msgstr "如果您想试用 Flower,可以使用以下命令:" +msgid "" +"Model parameters can also be evaluated during training. ``Client.fit`` " +"can return arbitrary evaluation results as a dictionary:" +msgstr "模型参数也可在训练过程中进行评估。 :code:`Client.fit`可以字典形式返回任意评估结果:" -#: ../../source/how-to-run-flower-using-docker.rst:42 -#, fuzzy +#: ../../source/explanation-federated-evaluation.rst:201 +msgid "Full Code Example" +msgstr "完整代码示例" + +#: ../../source/explanation-federated-evaluation.rst:203 msgid "" -"The command pulls the Docker image with the tag ``1.8.0`` from Docker " -"Hub. The tag specifies the Flower version. In this case, Flower 1.8.0. " -"The ``--rm`` flag tells Docker to remove the container after it exits." +"For a full code example that uses both centralized and federated " +"evaluation, see the *Advanced TensorFlow Example* (the same approach can " +"be applied to workloads implemented in any other framework): " +"https://github.com/adap/flower/tree/main/examples/advanced-tensorflow" msgstr "" -"该命令将从 Docker Hub 提取标签为``1.7.0-py3.11-ubuntu22.04``的 Docker 镜像。标签包含使用 " -"Flower、Python 和 Ubuntu 的信息。在本例中,它使用了 Flower 1.7.0、Python 3.11 和 Ubuntu " -"22.04。rm \"标记告诉 Docker 在退出后移除容器。" +"有关同时使用集中评估和联邦评估的完整代码示例,请参阅 *Advanced TensorFlow " +"Example*(同样的方法也可应用于任何其他框架中): " +"https://github.com/adap/flower/tree/main/examples/advanced-tensorflow" -#: ../../source/how-to-run-flower-using-docker.rst:48 -#, fuzzy +#: ../../source/explanation-flower-architecture.rst:-1 msgid "" -"By default, the Flower SuperLink keeps state in-memory. When using the " -"Docker flag ``--rm``, the state is not persisted between container " -"starts. We will show below how to save the state in a file on your host " -"system." +"Explore the federated learning architecture of the Flower framework, " +"featuring multi-run, concurrent execution, and scalable, secure machine " +"learning while preserving data privacy." msgstr "" -"默认情况下,Flower 服务器会将状态保存在内存中。使用 Docker 标志 ``--rm`` " -"时,状态不会在容器启动之间持久化。下面我们将展示如何将状态保存到主机系统上的文件中。" -#: ../../source/how-to-run-flower-using-docker.rst:52 -#, fuzzy +#: ../../source/explanation-flower-architecture.rst:2 +msgid "Flower Architecture" +msgstr "Flower的架构" + +#: ../../source/explanation-flower-architecture.rst:4 msgid "" -"The ``-p :`` flag tells Docker to map the ports " -"``9091``/``9092`` of the host to ``9091``/``9092`` of the container, " -"allowing you to access the Driver API on ``http://localhost:9091`` and " -"the Fleet API on ``http://localhost:9092``. Lastly, any flag that comes " -"after the tag is passed to the Flower SuperLink. Here, we are passing the" -" flag ``--insecure``." +"This page explains the architecture of deployed Flower federated learning" +" system." msgstr "" -"``-p :`` 标记会告诉 Docker 将主机的端口 ``9091``/``9092`` 映射到容器的端口 " -"``9091``/`9092``,这样你就可以在 ``http://localhost:9091`` 上访问 Driver API,在 " -"``http://localhost:9092`` 上访问 Fleet API。最后,标签后面的任何标志都会传递给 Flower " -"服务器。在这里,我们传递的标志是 ``--insecure`` 。" -#: ../../source/how-to-run-flower-using-docker.rst:59 -#: ../../source/how-to-run-flower-using-docker.rst:238 -#: ../../source/how-to-run-flower-using-docker.rst:354 -#, fuzzy +#: ../../source/explanation-flower-architecture.rst:6 msgid "" -"The ``--insecure`` flag enables insecure communication (using HTTP, not " -"HTTPS) and should only be used for testing purposes. We strongly " -"recommend enabling `SSL `__ when " -"deploying to a production environment." +"In federated learning (FL), there is typically one server and a number of" +" clients that are connected to the server. This is often called a " +"federation." msgstr "" -"不安全 \"标志启用不安全通信(使用 HTTP,而非 HTTPS),只能用于测试目的。我们强烈建议在部署到生产环境时启用 `SSL " -"`_。" -#: ../../source/how-to-run-flower-using-docker.rst:64 -#, fuzzy +#: ../../source/explanation-flower-architecture.rst:9 msgid "" -"You can use ``--help`` to view all available flags that the SuperLink " -"supports:" -msgstr "您可以使用 ``--help`` 查看服务器支持的所有可用标记:" +"The role of the server is to coordinate the training process. The role of" +" each client is to receive tasks from the server, execute those tasks and" +" return the results back to the server." +msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:71 -#, fuzzy -msgid "Mounting a volume to store the state on the host system" -msgstr "在主机系统上挂载卷以存储状态" +#: ../../source/explanation-flower-architecture.rst:13 +msgid "This is sometimes called a hub-and-spoke topology:" +msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:73 +#: ../../source/explanation-flower-architecture.rst:21 #, fuzzy +msgid "Hub-and-spoke topology in federated learning" +msgstr "什么是联邦学习?" + +#: ../../source/explanation-flower-architecture.rst:21 msgid "" -"If you want to persist the state of the SuperLink on your host system, " -"all you need to do is specify a path where you want to save the file on " -"your host system and a name for the database file. In the example below, " -"we tell Docker via the flag ``--volume`` to mount the user's home " -"directory (``~/`` on your host) into the ``/app/`` directory of the " -"container. Furthermore, we use the flag ``--database`` to specify the " -"name of the database file." +"Hub-and-spoke topology in federated learning (one server, multiple " +"clients)." msgstr "" -"如果想在主机系统上持久保存服务器的状态,只需在主机系统上指定保存文件的路径和数据库文件的名称即可。在下面的示例中,我们通过标志 ``-v`` 告诉" -" Docker 将用户的主目录(主机上的 ``~/``)挂载到容器的 ``/app/`` 目录中。此外,我们使用标志 ``--database``" -" 来指定数据库文件的名称。" -#: ../../source/how-to-run-flower-using-docker.rst:86 -#, fuzzy +#: ../../source/explanation-flower-architecture.rst:23 msgid "" -"As soon as the SuperLink starts, the file ``state.db`` is created in the " -"user's home directory on your host system. If the file already exists, " -"the SuperLink tries to restore the state from the file. To start the " -"SuperLink with an empty database, simply remove the ``state.db`` file." +"In a real-world deployment, we typically want to run different projects " +"on such a federation. Each project could use different hyperparameters, " +"different model architectures, different aggregation strategies, or even " +"different machine learning frameworks like PyTorch and TensorFlow." msgstr "" -"服务器一启动,就会在主机系统的用户主目录下创建文件 " -"``state.db``。如果该文件已经存在,服务器会尝试从该文件恢复状态。要以空数据库启动服务器,只需删除 ``state.db`` 文件即可。" -#: ../../source/how-to-run-flower-using-docker.rst:91 -#: ../../source/how-to-run-flower-using-docker.rst:260 -#: ../../source/how-to-run-flower-using-docker.rst:375 -#, fuzzy -msgid "Enabling SSL for secure connections" -msgstr "启用 SSL 连接" - -#: ../../source/how-to-run-flower-using-docker.rst:93 -#, fuzzy +#: ../../source/explanation-flower-architecture.rst:28 msgid "" -"To enable SSL, you will need a PEM-encoded root certificate, a PEM-" -"encoded private key and a PEM-encoded certificate chain." -msgstr "要启用 SSL,需要 CA 证书、服务器证书和服务器私钥。" +"This is why, in Flower, both the server side and the client side are " +"split into two parts. One part is long-lived and responsible for " +"communicating across the network, the other part is short-lived and " +"executes task-specific code." +msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:97 -#, fuzzy +#: ../../source/explanation-flower-architecture.rst:32 +msgid "A Flower `server` consists of **SuperLink** and ``ServerApp``:" +msgstr "" + +#: ../../source/explanation-flower-architecture.rst:34 msgid "" -"For testing purposes, you can generate your own self-signed certificates." -" The `Enable SSL connections `__ page contains a section that" -" will guide you through the process." +"**SuperLink**: a long-running process that forwards task instructions to " +"clients (SuperNodes) and receives task results back." msgstr "" -"出于测试目的,你可以生成自己的自签名证书。启用 SSL 连接 `_ 页面中有一个部分将指导你完成这一过程。" -#: ../../source/how-to-run-flower-using-docker.rst:101 -#, fuzzy +#: ../../source/explanation-flower-architecture.rst:36 msgid "" -"Assuming all files we need are in the local ``certificates`` directory, " -"we can use the flag ``--volume`` to mount the local directory into the " -"``/app/`` directory of the container. This allows the SuperLink to access" -" the files within the container. Finally, we pass the names of the " -"certificates to the SuperLink with the ``--certificates`` flag." +"``ServerApp``: a short-lived process with project-spcific code that " +"customizes all server-side aspects of federated learning systems (client " +"selection, client configuration, result aggregation). This is what AI " +"researchers and AI engineers write when they build Flower apps." msgstr "" -"假设我们需要的所有文件都在本地的 ``certificates`` 目录中,我们可以使用标记 ``-v`` 将本地目录挂载到容器的 " -"``/app/`` 目录中。这样,服务器就可以访问容器内的文件。最后,我们使用 ``--certificates`` 标志将证书名称传递给服务器。" -#: ../../source/how-to-run-flower-using-docker.rst:113 -#, fuzzy -msgid "Flower SuperNode" -msgstr "Flower 服务器" +#: ../../source/explanation-flower-architecture.rst:41 +msgid "A Flower `client` consists of **SuperNode** and ``ClientApp``:" +msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:115 +#: ../../source/explanation-flower-architecture.rst:43 msgid "" -"The SuperNode Docker image comes with a pre-installed version of Flower " -"and serves as a base for building your own SuperNode image." +"**SuperNode**: a long-running process that connects to the SuperLink, " +"asks for tasks, executes tasks (for example, \"train this model on your " +"local data\") and returns task results back to the SuperLink." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:120 +#: ../../source/explanation-flower-architecture.rst:46 msgid "" -"The SuperNode Docker image currently works only with the 1.9.0-nightly " -"release. A stable version will be available when Flower 1.9.0 (stable) " -"gets released (ETA: May). A SuperNode nightly image must be paired with " -"the corresponding SuperLink and ServerApp nightly images released on the " -"same day. To ensure the versions are in sync, using the concrete tag, " -"e.g., ``1.9.0.dev20240501`` instead of ``nightly`` is recommended." +"``ClientApp``: a short-lived process with project-specific code that " +"customizes all client-side aspects of federated learning systems (local " +"model training and evaluation, pre- and post-processing). This is what AI" +" researchers and AI engineers write when they build Flower apps." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:126 +#: ../../source/explanation-flower-architecture.rst:51 msgid "" -"We will use the ``quickstart-pytorch`` example, which you can find in the" -" Flower repository, to illustrate how you can dockerize your ClientApp." +"Why SuperNode and SuperLink? Well, in federated learning, the clients are" +" the actual stars of the show. They hold the training data and they run " +"the actual training. This is why Flower decided to name them " +"**SuperNode**. The **SuperLink** is then responsible for acting as the " +"`missing link` between all those SuperNodes." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:134 +#: ../../source/explanation-flower-architecture.rst:62 #, fuzzy -msgid "" -"Before we can start, we need to meet a few prerequisites in our local " -"development environment. You can skip the first part if you want to run " -"your ClientApp instead of the ``quickstart-pytorch`` example." -msgstr "在开始之前,我们需要在本地开发环境中满足一些先决条件。" +msgid "Basic Flower architecture" +msgstr "Flower的架构" -#: ../../source/how-to-run-flower-using-docker.rst:138 +#: ../../source/explanation-flower-architecture.rst:62 #, fuzzy -msgid "Clone the Flower repository." -msgstr "**叉花仓库**" - -#: ../../source/how-to-run-flower-using-docker.rst:152 -msgid "Creating a SuperNode Dockerfile" -msgstr "" - -#: ../../source/how-to-run-flower-using-docker.rst:154 -#: ../../source/how-to-run-flower-using-docker.rst:289 -msgid "Let's assume the following project layout:" -msgstr "" +msgid "The basic Flower architecture for federated learning." +msgstr "本轮联邦学习。" -#: ../../source/how-to-run-flower-using-docker.rst:163 +#: ../../source/explanation-flower-architecture.rst:64 msgid "" -"First, we need to create a ``requirements.txt`` file in the directory " -"where the ``ClientApp`` code is located. In the file, we list all the " -"dependencies that the ClientApp requires." +"In a Flower app project, users will typically develop the ``ServerApp`` " +"and the ``ClientApp``. All the network communication between `server` and" +" `clients` is taken care of by the SuperLink and SuperNodes." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:175 +#: ../../source/explanation-flower-architecture.rst:70 msgid "" -"Note that `flwr `__ is already installed " -"in the ``flwr/supernode`` base image, so you only need to include other " -"package dependencies in your ``requirements.txt``, such as ``torch``, " -"``tensorflow``, etc." +"For more details, please refer to the |serverapp_link|_ and " +"|clientapp_link|_ documentation." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:179 +#: ../../source/explanation-flower-architecture.rst:73 msgid "" -"Next, we create a Dockerfile. If you use the ``quickstart-pytorch`` " -"example, create a new file called ``Dockerfile.supernode`` in ``examples" -"/quickstart-pytorch``." +"With *multi-run*, multiple ``ServerApp``\\s and ``ClientApp``\\s are now " +"capable of running on the same federation consisting of a single long-" +"running SuperLink and multiple long-running SuperNodes. This is sometimes" +" referred to as `multi-tenancy` or `multi-job`." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:182 +#: ../../source/explanation-flower-architecture.rst:78 msgid "" -"The ``Dockerfile.supernode`` contains the instructions that assemble the " -"SuperNode image." +"As shown in the figure below, two projects, each consisting of a " +"``ServerApp`` and a ``ClientApp``, could share the same SuperLink and " +"SuperNodes." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:196 -msgid "" -"In the first two lines, we instruct Docker to use the SuperNode image " -"tagged ``nightly`` as a base image and set our working directory to " -"``/app``. The following instructions will now be executed in the ``/app``" -" directory. Next, we install the ClientApp dependencies by copying the " -"``requirements.txt`` file into the image and run ``pip install``. In the " -"last two lines, we copy the ``client.py`` module into the image and set " -"the entry point to ``flower-client-app`` with the argument " -"``client:app``. The argument is the object reference of the ClientApp " -"(``:``) that will be run inside the ClientApp." -msgstr "" +#: ../../source/explanation-flower-architecture.rst:87 +#, fuzzy +msgid "Multi-tenancy federated learning architecture" +msgstr "使用联邦学习策略" -#: ../../source/how-to-run-flower-using-docker.rst:205 +#: ../../source/explanation-flower-architecture.rst:87 #, fuzzy -msgid "Building the SuperNode Docker image" -msgstr "启动服务器" +msgid "Multi-tenancy federated learning architecture with Flower" +msgstr "步骤 2:使用 Flower 联邦学习" -#: ../../source/how-to-run-flower-using-docker.rst:207 +#: ../../source/explanation-flower-architecture.rst:89 msgid "" -"Next, we build the SuperNode Docker image by running the following " -"command in the directory where Dockerfile and ClientApp code are located." +"To illustrate how multi-run works, consider one federated learning " +"training run where a ``ServerApp`` and a ``ClientApp`` are participating " +"in ``[run 1]``. Note that a SuperNode will only run a ``ClientApp`` if it" +" is selected to participate in the training run." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:214 +#: ../../source/explanation-flower-architecture.rst:94 msgid "" -"We gave the image the name ``flwr_supernode``, and the tag ``0.0.1``. " -"Remember that the here chosen values only serve as an example. You can " -"change them to your needs." +"In ``[run 1]`` below, all the SuperNodes are selected and therefore run " +"their corresponding ``ClientApp``\\s:" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:219 +#: ../../source/explanation-flower-architecture.rst:103 #, fuzzy -msgid "Running the SuperNode Docker image" -msgstr "启动服务器" +msgid "Multi-tenancy federated learning architecture - Run 1" +msgstr "使用联邦学习策略" -#: ../../source/how-to-run-flower-using-docker.rst:221 -msgid "Now that we have built the SuperNode image, we can finally run it." +#: ../../source/explanation-flower-architecture.rst:103 +msgid "" +"Run 1 in a multi-run federated learning architecture with Flower. All " +"SuperNodes participate in the training round." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:229 -#: ../../source/how-to-run-flower-using-docker.rst:345 -msgid "Let's break down each part of this command:" +#: ../../source/explanation-flower-architecture.rst:106 +msgid "" +"However, in ``[run 2]``, only the first and third SuperNodes are selected" +" to participate in the training:" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:231 -#: ../../source/how-to-run-flower-using-docker.rst:347 -msgid "``docker run``: This is the command to run a new Docker container." -msgstr "" +#: ../../source/explanation-flower-architecture.rst:115 +#, fuzzy +msgid "Multi-tenancy federated learning architecture - Run 2" +msgstr "使用联邦学习策略" -#: ../../source/how-to-run-flower-using-docker.rst:232 -#: ../../source/how-to-run-flower-using-docker.rst:348 +#: ../../source/explanation-flower-architecture.rst:115 msgid "" -"``--rm``: This option specifies that the container should be " -"automatically removed when it stops." +"Run 2 in a multi-run federated learning architecture with Flower. Only " +"the first and third SuperNodes are selected to participate in the " +"training round." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:233 -msgid "``flwr_supernode:0.0.1``: The name the tag of the Docker image to use." -msgstr "" - -#: ../../source/how-to-run-flower-using-docker.rst:234 -#: ../../source/how-to-run-flower-using-docker.rst:350 -msgid "``--insecure``: This option enables insecure communication." +#: ../../source/explanation-flower-architecture.rst:118 +msgid "" +"Therefore, with Flower multi-run, different projects (each consisting of " +"a ``ServerApp`` and ``ClientApp``) can run on different sets of clients." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst +#: ../../source/explanation-flower-architecture.rst:121 msgid "" -"``--server 192.168.1.100:9092``: This option specifies the address of the" -" SuperLinks Fleet" +"To help you start and manage all of the concurrently executing training " +"runs, Flower offers one additional long-running server-side service " +"called **SuperExec**. When you type ``flwr run`` to start a new training " +"run, the ``flwr`` CLI bundles your local project (mainly your " +"``ServerApp`` and ``ClientApp``) and sends it to the **SuperExec**. The " +"**SuperExec** will then take care of starting and managing your " +"``ServerApp``, which in turn selects SuperNodes to execute your " +"``ClientApp``." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst -msgid "API to connect to. Remember to update it with your SuperLink IP." +#: ../../source/explanation-flower-architecture.rst:128 +msgid "" +"This architecture allows many users to (concurrently) run their projects " +"on the same federation, simply by typing ``flwr run`` on their local " +"developer machine." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:248 -msgid "" -"To test running Flower locally, you can create a `bridge network " -"`__, use the ``--network`` argument and pass the " -"name of the Docker network to run your SuperNodes." +#: ../../source/explanation-flower-architecture.rst:137 +msgid "Flower Deployment Engine with SuperExec" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:252 -msgid "" -"Any argument that comes after the tag is passed to the Flower SuperNode " -"binary. To see all available flags that the SuperNode supports, run:" +#: ../../source/explanation-flower-architecture.rst:137 +msgid "The SuperExec service for managing concurrent training runs in Flower." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:262 +#: ../../source/explanation-flower-architecture.rst:141 msgid "" -"To enable SSL, we will need to mount a PEM-encoded root certificate into " -"your SuperNode container." +"This explanation covers the Flower Deployment Engine. An explanation " +"covering the Flower Simulation Engine will follow." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:264 +#: ../../source/explanation-flower-architecture.rst:146 #, fuzzy msgid "" -"Assuming the certificate already exists locally, we can use the flag " -"``--volume`` to mount the local certificate into the container's " -"``/app/`` directory. This allows the SuperNode to access the certificate " -"within the container. Use the ``--certificates`` flag when starting the " -"container." -msgstr "" -"假设我们需要的所有文件都在本地的 ``certificates`` 目录中,我们可以使用标记 ``-v`` 将本地目录挂载到容器的 " -"``/app/`` 目录中。这样,服务器就可以访问容器内的文件。最后,我们使用 ``--certificates`` 标志将证书名称传递给服务器。" +"As we continue to enhance Flower at a rapid pace, we'll periodically " +"update this explainer document. Feel free to share any feedback with us." +msgstr "随着 Flower Next 的不断快速改进,我们将定期更新本指南。如有任何反馈,请随时与我们分享!" -#: ../../source/how-to-run-flower-using-docker.rst:275 -#, fuzzy -msgid "Flower ServerApp" -msgstr "Flower 服务器。" +#: ../../source/fed/0000-20200102-fed-template.md:10 +msgid "FED Template" +msgstr "FED 模板" -#: ../../source/how-to-run-flower-using-docker.rst:277 -msgid "" -"The procedure for building and running a ServerApp image is almost " -"identical to the SuperNode image." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:12 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:12 +msgid "Table of Contents" +msgstr "目录" -#: ../../source/how-to-run-flower-using-docker.rst:279 -msgid "" -"Similar to the SuperNode image, the ServerApp Docker image comes with a " -"pre-installed version of Flower and serves as a base for building your " -"own ServerApp image." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:14 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:14 +msgid "[Table of Contents](#table-of-contents)" +msgstr "[目录](#table-of-contents)" -#: ../../source/how-to-run-flower-using-docker.rst:282 -msgid "" -"We will use the same ``quickstart-pytorch`` example as we do in the " -"Flower SuperNode section. If you have not already done so, please follow " -"the `SuperNode Prerequisites`_ before proceeding." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:15 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:15 +msgid "[Summary](#summary)" +msgstr "[总结](#summary)" -#: ../../source/how-to-run-flower-using-docker.rst:287 -msgid "Creating a ServerApp Dockerfile" -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:16 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:16 +msgid "[Motivation](#motivation)" +msgstr "[动机](#motivation)" -#: ../../source/how-to-run-flower-using-docker.rst:298 -msgid "" -"First, we need to create a Dockerfile in the directory where the " -"``ServerApp`` code is located. If you use the ``quickstart-pytorch`` " -"example, create a new file called ``Dockerfile.serverapp`` in ``examples" -"/quickstart-pytorch``." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:17 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:17 +msgid "[Goals](#goals)" +msgstr "[目标](#goals)" -#: ../../source/how-to-run-flower-using-docker.rst:302 -msgid "" -"The ``Dockerfile.serverapp`` contains the instructions that assemble the " -"ServerApp image." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:18 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:18 +msgid "[Non-Goals](#non-goals)" +msgstr "[非目标](#non-goals)" -#: ../../source/how-to-run-flower-using-docker.rst:313 -msgid "" -"In the first two lines, we instruct Docker to use the ServerApp image " -"tagged ``1.8.0`` as a base image and set our working directory to " -"``/app``. The following instructions will now be executed in the ``/app``" -" directory. In the last two lines, we copy the ``server.py`` module into " -"the image and set the entry point to ``flower-server-app`` with the " -"argument ``server:app``. The argument is the object reference of the " -"ServerApp (``:``) that will be run inside the " -"ServerApp container." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:19 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:19 +msgid "[Proposal](#proposal)" +msgstr "[计划](#proposal)" -#: ../../source/how-to-run-flower-using-docker.rst:321 -#, fuzzy -msgid "Building the ServerApp Docker image" -msgstr "启动服务器" +#: ../../source/fed/0000-20200102-fed-template.md:20 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:23 +msgid "[Drawbacks](#drawbacks)" +msgstr "[缺点](#drawbacks)" -#: ../../source/how-to-run-flower-using-docker.rst:323 -msgid "" -"Next, we build the ServerApp Docker image by running the following " -"command in the directory where Dockerfile and ServerApp code are located." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:21 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:24 +msgid "[Alternatives Considered](#alternatives-considered)" +msgstr "[备选方案](#alternatives-considered)" -#: ../../source/how-to-run-flower-using-docker.rst:330 -msgid "" -"We gave the image the name ``flwr_serverapp``, and the tag ``0.0.1``. " -"Remember that the here chosen values only serve as an example. You can " -"change them to your needs." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:22 +msgid "[Appendix](#appendix)" +msgstr "[附录](#appendix)" -#: ../../source/how-to-run-flower-using-docker.rst:335 -#, fuzzy -msgid "Running the ServerApp Docker image" -msgstr "启动服务器" +#: ../../source/fed/0000-20200102-fed-template.md:24 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:28 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:76 +msgid "Summary" +msgstr "总结" -#: ../../source/how-to-run-flower-using-docker.rst:337 -msgid "Now that we have built the ServerApp image, we can finally run it." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:26 +msgid "\\[TODO - sentence 1: summary of the problem\\]" +msgstr "\\[TODO - 句子 1: 问题概括\\]" -#: ../../source/how-to-run-flower-using-docker.rst:349 -msgid "``flwr_serverapp:0.0.1``: The name the tag of the Docker image to use." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:28 +msgid "\\[TODO - sentence 2: summary of the solution\\]" +msgstr "\\[TODO - 句子 2: 解决方案概括\\]" -#: ../../source/how-to-run-flower-using-docker.rst -msgid "" -"``--server 192.168.1.100:9091``: This option specifies the address of the" -" SuperLinks Driver" -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:30 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:47 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:77 +msgid "Motivation" +msgstr "动机" -#: ../../source/how-to-run-flower-using-docker.rst:363 -msgid "" -"To test running Flower locally, you can create a `bridge network " -"`__, use the ``--network`` argument and pass the " -"name of the Docker network to run your ServerApps." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:32 +#: ../../source/fed/0000-20200102-fed-template.md:36 +#: ../../source/fed/0000-20200102-fed-template.md:40 +#: ../../source/fed/0000-20200102-fed-template.md:44 +#: ../../source/fed/0000-20200102-fed-template.md:48 +#: ../../source/fed/0000-20200102-fed-template.md:54 +#: ../../source/fed/0000-20200102-fed-template.md:58 +msgid "\\[TODO\\]" +msgstr "\\[TODO\\]" -#: ../../source/how-to-run-flower-using-docker.rst:367 -msgid "" -"Any argument that comes after the tag is passed to the Flower ServerApp " -"binary. To see all available flags that the ServerApp supports, run:" -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:34 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:53 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:78 +msgid "Goals" +msgstr "目标" -#: ../../source/how-to-run-flower-using-docker.rst:377 -#, fuzzy -msgid "" -"To enable SSL, we will need to mount a PEM-encoded root certificate into " -"your ServerApp container." -msgstr "要启用 SSL,需要 CA 证书、服务器证书和服务器私钥。" +#: ../../source/fed/0000-20200102-fed-template.md:38 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:59 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:79 +msgid "Non-Goals" +msgstr "非目标" -#: ../../source/how-to-run-flower-using-docker.rst:379 -#, fuzzy -msgid "" -"Assuming the certificate already exists locally, we can use the flag " -"``--volume`` to mount the local certificate into the container's " -"``/app/`` directory. This allows the ServerApp to access the certificate " -"within the container. Use the ``--certificates`` flag when starting the " -"container." -msgstr "" -"假设我们需要的所有文件都在本地的 ``certificates`` 目录中,我们可以使用标记 ``-v`` 将本地目录挂载到容器的 " -"``/app/`` 目录中。这样,服务器就可以访问容器内的文件。最后,我们使用 ``--certificates`` 标志将证书名称传递给服务器。" +#: ../../source/fed/0000-20200102-fed-template.md:42 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:65 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:80 +msgid "Proposal" +msgstr "提案" -#: ../../source/how-to-run-flower-using-docker.rst:390 -#, fuzzy -msgid "Advanced Docker options" -msgstr "高级安装选项" +#: ../../source/fed/0000-20200102-fed-template.md:46 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:85 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:129 +msgid "Drawbacks" +msgstr "缺点" -#: ../../source/how-to-run-flower-using-docker.rst:393 -#, fuzzy -msgid "Using a different Flower version" -msgstr "使用不同的 Flower 或 Python 版本" +#: ../../source/fed/0000-20200102-fed-template.md:50 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:86 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:135 +msgid "Alternatives Considered" +msgstr "备选方案" -#: ../../source/how-to-run-flower-using-docker.rst:395 -#, fuzzy -msgid "" -"If you want to use a different version of Flower, for example Flower " -"nightly, you can do so by changing the tag. All available versions are on" -" `Docker Hub `__." -msgstr "" -"如果您想使用不同版本的 Flower 或 Python,可以通过更改标签来实现。我们提供的所有版本都可以在 `Docker Hub " -"`_ 上找到。" +#: ../../source/fed/0000-20200102-fed-template.md:52 +msgid "\\[Alternative 1\\]" +msgstr "\\[备选 1\\]" -#: ../../source/how-to-run-flower-using-docker.rst:400 -#, fuzzy -msgid "Pinning a Docker image to a specific version" -msgstr "将 Docker 映像固定到特定版本" +#: ../../source/fed/0000-20200102-fed-template.md:56 +msgid "\\[Alternative 2\\]" +msgstr "\\[备选 2\\]" -#: ../../source/how-to-run-flower-using-docker.rst:402 -#, fuzzy -msgid "" -"It may happen that we update the images behind the tags. Such updates " -"usually include security updates of system dependencies that should not " -"change the functionality of Flower. However, if you want to ensure that " -"you always use the same image, you can specify the hash of the image " -"instead of the tag." -msgstr "" -"我们可能会更新标签后面的图像。此类更新通常包括系统依赖项的安全更新,不会改变 Flower " -"的功能。不过,如果您想确保始终使用同一张图片,可以指定图片的哈希值而不是标签。" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:10 +msgid "Flower Enhancement Doc" +msgstr "Flower 改善文档" -#: ../../source/how-to-run-flower-using-docker.rst:407 -#, fuzzy -msgid "" -"The following command returns the current image hash referenced by the " -"``superlink:1.8.0`` tag:" -msgstr "下面的命令将返回由 ``server:1.7.0-py3.11-ubuntu22.04`` 标记引用的当前图像哈希值:" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:20 +msgid "[Enhancement Doc Template](#enhancement-doc-template)" +msgstr "[增强文档模版](#enhancement-doc-template)" -#: ../../source/how-to-run-flower-using-docker.rst:414 -#, fuzzy -msgid "Next, we can pin the hash when running a new SuperLink container:" -msgstr "接下来,我们可以在运行新服务器容器时将哈希值固定下来:" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:21 +msgid "[Metadata](#metadata)" +msgstr "[描述数据](#metadata)" -#: ../../source/how-to-run-flower-using-docker.rst:423 -#, fuzzy -msgid "Setting environment variables" -msgstr "设置编码环境" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:22 +msgid "[Workflow](#workflow)" +msgstr "[工作流程](#workflow)" -#: ../../source/how-to-run-flower-using-docker.rst:425 -#, fuzzy -msgid "" -"To set a variable inside a Docker container, you can use the ``-e " -"=`` flag." -msgstr "要在 Docker 容器内设置变量,可以使用 ``-e =`` 标志。" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:25 +msgid "[GitHub Issues](#github-issues)" +msgstr "[GitHub 问题](#github-issues)" -#: ../../source/how-to-run-simulations.rst:2 -msgid "Run simulations" -msgstr "运行模拟" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:26 +msgid "[Google Docs](#google-docs)" +msgstr "[谷歌文档](#google-docs)" -#: ../../source/how-to-run-simulations.rst:8 -msgid "" -"Simulating Federated Learning workloads is useful for a multitude of use-" -"cases: you might want to run your workload on a large cohort of clients " -"but without having to source, configure and mange a large number of " -"physical devices; you might want to run your FL workloads as fast as " -"possible on the compute systems you have access to without having to go " -"through a complex setup process; you might want to validate your " -"algorithm on different scenarios at varying levels of data and system " -"heterogeneity, client availability, privacy budgets, etc. These are among" -" some of the use-cases where simulating FL workloads makes sense. Flower " -"can accommodate these scenarios by means of its `VirtualClientEngine " -"`_ or " -"VCE." -msgstr "" -"模拟联邦学习工作负载可用于多种案例:您可能希望在大量客户端上运行您的工作负载,但无需采购、配置和管理大量物理设备;您可能希望在您可以访问的计算系统上尽可能快地运行您的" -" FL 工作负载,而无需经过复杂的设置过程;您可能希望在不同数据和系统异构性、客户端可用性、隐私预算等不同水平的场景中验证您的算法。这些都是模拟 " -"FL 工作负载的一些案例。Flower 可以通过其 \"虚拟客户端引擎\"(VirtualClientEngine)_或 VCE 来匹配这些情况。" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:30 +msgid "A Flower Enhancement is a standardized development process to" +msgstr "改善 Flower 功能是一个标准化的开发流程,目的是" -#: ../../source/how-to-run-simulations.rst:10 -msgid "" -"The :code:`VirtualClientEngine` schedules, launches and manages `virtual`" -" clients. These clients are identical to `non-virtual` clients (i.e. the " -"ones you launch via the command `flwr.client.start_client `_) in the sense that they can be configure by " -"creating a class inheriting, for example, from `flwr.client.NumPyClient " -"`_ and therefore behave in an " -"identical way. In addition to that, clients managed by the " -":code:`VirtualClientEngine` are:" -msgstr "" -":code:`VirtualClientEngine`用来规划,启动和管理`虚拟`客户端。这些客户端跟`非虚拟`客户端是一样的(即为您通过`flwr.client.start_client" -" `_启动的客户端),因为它们可以通过创建一个继承自 " -"`flwr.client.NumPyClient `_ " -"的类进行配置,因此其行为方式相同。另外,由 `VirtualClientEngine` 管理的客户端有:" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:32 +msgid "provide a common structure for proposing larger changes" +msgstr "为提出更大规模的改动提供一个共同的结构" -#: ../../source/how-to-run-simulations.rst:12 -msgid "" -"resource-aware: this means that each client gets assigned a portion of " -"the compute and memory on your system. You as a user can control this at " -"the beginning of the simulation and allows you to control the degree of " -"parallelism of your Flower FL simulation. The fewer the resources per " -"client, the more clients can run concurrently on the same hardware." -msgstr "" -"资源感知:这意味着每个客户端都会分配到系统中的一部分计算和内存。作为用户,您可以在模拟开始时对其进行控制,从而控制 Flower FL " -"模拟的并行程度。每个客户端的资源越少,在同一硬件上并发运行的客户端就越多。" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:33 +msgid "ensure that the motivation for a change is clear" +msgstr "确保改动的动机明确" -#: ../../source/how-to-run-simulations.rst:13 -msgid "" -"self-managed: this means that you as a user do not need to launch clients" -" manually, instead this gets delegated to :code:`VirtualClientEngine`'s " -"internals." -msgstr "自管理:这意味着用户无需手动启动客户端,而是由 :code:`VirtualClientEngine` 负责。" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:34 +msgid "persist project information in a version control system" +msgstr "将项目信息保存在版本控制系统中" -#: ../../source/how-to-run-simulations.rst:14 -msgid "" -"ephemeral: this means that a client is only materialized when it is " -"required in the FL process (e.g. to do `fit() `_). The object is destroyed afterwards," -" releasing the resources it was assigned and allowing in this way other " -"clients to participate." -msgstr "" -"即时性:这意味着客户端只有在 FL 进程中需要它时才会被实体化(例如执行 `fit() `_ " -")。之后该对象将被销毁,释放分配给它的资源,并允许其他客户端以这种方式参与。" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:35 +msgid "document the motivation for impactful user-facing changes" +msgstr "记录面向用户的具有影响力的改动的动机" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:36 +msgid "reserve GitHub issues for tracking work in flight" +msgstr "保留 GitHub 问题,用于跟踪进行中的工作" -#: ../../source/how-to-run-simulations.rst:16 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:37 msgid "" -"The :code:`VirtualClientEngine` implements `virtual` clients using `Ray " -"`_, an open-source framework for scalable Python " -"workloads. In particular, Flower's :code:`VirtualClientEngine` makes use " -"of `Actors `_ to " -"spawn `virtual` clients and run their workload." -msgstr "" -":code:`VirtualClientEngine`使用`Ray " -"`_来实现`虚拟`客户端,这是一个用于可扩展 Python 工作负载的开源框架。特别地,Flower 的" -" :code:`VirtualClientEngine` 使用 `Actors `_ 来生成 `virtual` 客户端并运行它们的工作负载。" +"ensure community participants can successfully drive changes to " +"completion across one or more releases while stakeholders are adequately " +"represented throughout the process" +msgstr "确保社区参与者能够成功推动改动,完成一个或多个版本,同时利益相关者在整个过程中得到充分展现" -#: ../../source/how-to-run-simulations.rst:20 -msgid "Launch your Flower simulation" -msgstr "启动 Flower 模拟" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:39 +msgid "Hence, an Enhancement Doc combines aspects of" +msgstr "因此,\"增强文件\"将以下方面结合起来" -#: ../../source/how-to-run-simulations.rst:22 -msgid "" -"Running Flower simulations still require you to define your client class," -" a strategy, and utility functions to download and load (and potentially " -"partition) your dataset. With that out of the way, launching your " -"simulation is done with `start_simulation `_ and a minimal example looks" -" as follows:" -msgstr "" -"运行 Flower 模拟器仍然需要定义客户端类、策略以及下载和加载(可能还需要分割)数据集的实用程序。在完成这些工作后,就可以使用 " -"\"start_simulation `_\" 来启动模拟了,一个最简单的示例如下:" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:41 +msgid "a feature, and effort-tracking document" +msgstr "一个功能和效力跟踪文档" -#: ../../source/how-to-run-simulations.rst:44 -msgid "VirtualClientEngine resources" -msgstr "虚拟客户端引擎资源" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:42 +msgid "a product requirements document" +msgstr "一个产品需要文档" -#: ../../source/how-to-run-simulations.rst:45 -msgid "" -"By default the VCE has access to all system resources (i.e. all CPUs, all" -" GPUs, etc) since that is also the default behavior when starting Ray. " -"However, in some settings you might want to limit how many of your system" -" resources are used for simulation. You can do this via the " -":code:`ray_init_args` input argument to :code:`start_simulation` which " -"the VCE internally passes to Ray's :code:`ray.init` command. For a " -"complete list of settings you can configure check the `ray.init " -"`_" -" documentation. Do not set :code:`ray_init_args` if you want the VCE to " -"use all your system's CPUs and GPUs." -msgstr "" -"默认情况下,VCE 可以访问所有系统资源(即所有 CPU、所有 GPU 等),因为这也是启动 Ray " -"时的默认行为。不过,在某些设置中,您可能希望限制有多少系统资源用于模拟。您可以通过 :code:`ray_init_args` 输入到 " -":code:`start_simulation` 的参数来做到这一点,VCE 会在内部将该参数传递给 Ray 的 :code:`ray.init`" -" 命令。有关您可以配置的设置的完整列表,请查看 `ray.init `_ 文档。如果希望 VCE 使用系统中所有的 CPU 和 " -"GPU,请不要设置 :code:`ray_init_args`。" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:43 +msgid "a design document" +msgstr "一个设计文档" -#: ../../source/how-to-run-simulations.rst:62 -msgid "Assigning client resources" -msgstr "分配客户端资源" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:45 +msgid "" +"into one file, which is created incrementally in collaboration with the " +"community." +msgstr "该文件是与社区合作逐步创建的。" -#: ../../source/how-to-run-simulations.rst:63 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:49 msgid "" -"By default the :code:`VirtualClientEngine` assigns a single CPU core (and" -" nothing else) to each virtual client. This means that if your system has" -" 10 cores, that many virtual clients can be concurrently running." +"For far-fetching changes or features proposed to Flower, an abstraction " +"beyond a single GitHub issue or pull request is required to understand " +"and communicate upcoming changes to the project." msgstr "" -"默认情况下,:code:`VirtualClientEngine` 会为每个虚拟客户端分配一个 CPU " -"内核(不分配其他任何内核)。这意味着,如果系统有 10 个内核,那么可以同时运行这么多虚拟客户端。" +"对于向 Flower 提出的远期变更或功能,需要一个超越单个 GitHub 问题或拉取请求(pull " +"request)的抽象概念,以了解和沟通项目即将发生的变更。" -#: ../../source/how-to-run-simulations.rst:65 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:51 msgid "" -"More often than not, you would probably like to adjust the resources your" -" clients get assigned based on the complexity (i.e. compute and memory " -"footprint) of your FL workload. You can do so when starting your " -"simulation by setting the argument `client_resources` to " -"`start_simulation `_." -" Two keys are internally used by Ray to schedule and spawn workloads (in " -"our case Flower clients):" +"The purpose of this process is to reduce the amount of \"tribal " +"knowledge\" in our community. By moving decisions from Slack threads, " +"video calls, and hallway conversations into a well-tracked artifact, this" +" process aims to enhance communication and discoverability." msgstr "" -"通常情况下,您可能希望根据 FL 工作负载的复杂性(即计算和内存占用)来调整分配给客户端的资源。您可以在启动模拟时将参数 " -"`client_resources` 设置为 `start_simulation `_ 。Ray " -"内部使用两个键来调度和生成工作负载(在我们的例子中是 Flower 客户端):" - -#: ../../source/how-to-run-simulations.rst:67 -msgid ":code:`num_cpus` indicates the number of CPU cores a client would get." -msgstr ":code:`num_cpus` 表示客户端将获得的 CPU 内核数量。" +"这一流程的目的是减少我们社区中 \"部落知识 \"的数量。通过将决策从 Slack " +"线程、视频通话和走廊对话转移到一个跟踪良好的工作环境中,该流程旨在加强沟通和可发现性。" -#: ../../source/how-to-run-simulations.rst:68 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:55 msgid "" -":code:`num_gpus` indicates the **ratio** of GPU memory a client gets " -"assigned." -msgstr ":code:`num_gpus` 表示分配给客户端的 GPU 内存的**比例**。" - -#: ../../source/how-to-run-simulations.rst:70 -msgid "Let's see a few examples:" -msgstr "让我们来看几个例子:" +"Roughly any larger, user-facing enhancement should follow the Enhancement" +" process. If an enhancement would be described in either written or " +"verbal communication to anyone besides the author or developer, then " +"consider creating an Enhancement Doc." +msgstr "任何较大的、面向用户的增强都应遵循增强流程。如果要以书面或口头形式向作者或开发人员以外的任何人描述增强功能,则应考虑创建改善文档。" -#: ../../source/how-to-run-simulations.rst:89 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:57 msgid "" -"While the :code:`client_resources` can be used to control the degree of " -"concurrency in your FL simulation, this does not stop you from running " -"dozens, hundreds or even thousands of clients in the same round and " -"having orders of magnitude more `dormant` (i.e. not participating in a " -"round) clients. Let's say you want to have 100 clients per round but your" -" system can only accommodate 8 clients concurrently. The " -":code:`VirtualClientEngine` will schedule 100 jobs to run (each " -"simulating a client sampled by the strategy) and then will execute them " -"in a resource-aware manner in batches of 8." -msgstr "" -"虽然 :code:`client_resources` 可用来控制 FL " -"模拟的并发程度,但这并不能阻止您在同一轮模拟中运行几十、几百甚至上千个客户端,并拥有数量级更多的 " -"\"休眠\"(即不参与一轮模拟)客户端。比方说,您希望每轮有 100 个客户端,但您的系统只能同时容纳 8 " -"个客户端。:code:`VirtualClientEngine` 将安排运行 100 " -"个工作(每个工作模拟策略采样的一个客户端),然后以资源感知的方式分批执行。" +"Similarly, any technical effort (refactoring, major architectural change)" +" that will impact a large section of the development community should " +"also be communicated widely. The Enhancement process is suited for this " +"even if it will have zero impact on the typical user or operator." +msgstr "同样,任何会对开发社区的大部分人产生影响的技术工作(重构、重大架构变更)也应广泛传播。即使对典型用户或操作员的影响为零,改进流程也适用于这种情况。" -#: ../../source/how-to-run-simulations.rst:91 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:61 msgid "" -"To understand all the intricate details on how resources are used to " -"schedule FL clients and how to define custom resources, please take a " -"look at the `Ray documentation `_." +"For small changes and additions, going through the Enhancement process " +"would be time-consuming and unnecessary. This includes, for example, " +"adding new Federated Learning algorithms, as these only add features " +"without changing how Flower works or is used." msgstr "" -"要了解资源如何用于调度 FL 客户端以及如何定义自定义资源的所有复杂细节,请查看 `Ray 文档 " -"`_。" - -#: ../../source/how-to-run-simulations.rst:94 -msgid "Simulation examples" -msgstr "模拟示例" +"对于小的改动和添加,通过 \"改善\"程序既耗时又没有必要。例如,这包括添加新的联邦学习算法,因为这只会增加功能,而不会改变 \"Flower " +"\"的工作或使用方式。" -#: ../../source/how-to-run-simulations.rst:96 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:63 msgid "" -"A few ready-to-run complete examples for Flower simulation in " -"Tensorflow/Keras and PyTorch are provided in the `Flower repository " -"`_. You can run them on Google Colab too:" -msgstr "" -"在 Tensorflow/Keras 和 PyTorch 中进行 Flower 模拟的几个可随时运行的完整示例已在 `Flower 库 " -"`_ 中提供。您也可以在 Google Colab 上运行它们:" +"Enhancements are different from feature requests, as they are already " +"providing a laid-out path for implementation and are championed by " +"members of the community." +msgstr "增强功能与功能请求不同,因为它们已经提供了实施路径,并得到了社区成员的支持。" -#: ../../source/how-to-run-simulations.rst:98 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:67 msgid "" -"`Tensorflow/Keras Simulation " -"`_: 100 clients collaboratively train a MLP model on MNIST." -msgstr "" -"Tensorflow/Keras模拟 `_:100个客户端在MNIST上协作训练一个MLP模型。" +"An Enhancement is captured in a Markdown file that follows a defined " +"template and a workflow to review and store enhancement docs for " +"reference — the Enhancement Doc." +msgstr "增强功能被记录在一个 Markdown 文件中,该文件遵循已定义的模板和工作流程,用于审查和存储增强功能文档(即增强功能文档)以供参考。" -#: ../../source/how-to-run-simulations.rst:99 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:69 +msgid "Enhancement Doc Template" +msgstr "增强文档模板" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:71 msgid "" -"`PyTorch Simulation `_: 100 clients collaboratively train a CNN model on " -"MNIST." -msgstr "" -"PyTorch 模拟 `_:100 个客户端在 MNIST 上协作训练一个 CNN 模型。" +"Each enhancement doc is provided as a Markdown file having the following " +"structure" +msgstr "每个增强文档都以 Markdown 文件的形式提供,其结构如下" -#: ../../source/how-to-run-simulations.rst:104 -msgid "Multi-node Flower simulations" -msgstr "多节点 Flower 模拟" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:73 +msgid "Metadata (as [described below](#metadata) in form of a YAML preamble)" +msgstr "描述数据([如下所述](#metadata) 以 YAML 前言的形式出现)" -#: ../../source/how-to-run-simulations.rst:106 -msgid "" -"Flower's :code:`VirtualClientEngine` allows you to run FL simulations " -"across multiple compute nodes. Before starting your multi-node simulation" -" ensure that you:" -msgstr "Flower 的 :code:`VirtualClientEngine` 允许您在多个计算节点上运行 FL 模拟。在开始多节点模拟之前,请确保:" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:74 +msgid "Title (same as in metadata)" +msgstr "标题(与描述数据中的标题相同)" -#: ../../source/how-to-run-simulations.rst:108 -msgid "Have the same Python environment in all nodes." -msgstr "所有节点都有相同的 Python 环境。" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:75 +msgid "Table of Contents (if needed)" +msgstr "目录(如有需要)" -#: ../../source/how-to-run-simulations.rst:109 -msgid "Have a copy of your code (e.g. your entire repo) in all nodes." -msgstr "在所有节点上都有一份代码副本(例如整个软件包)。" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:81 +msgid "Notes/Constraints/Caveats (optional)" +msgstr "注意事项/限制/警告(可选)" -#: ../../source/how-to-run-simulations.rst:110 -msgid "" -"Have a copy of your dataset in all nodes (more about this in " -":ref:`simulation considerations `)" -msgstr "在所有节点中都有一份数据集副本(更多相关信息请参阅 :ref:`模拟注意事项`)" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:82 +msgid "Design Details (optional)" +msgstr "设计细节(可选)" -#: ../../source/how-to-run-simulations.rst:111 -msgid "" -"Pass :code:`ray_init_args={\"address\"=\"auto\"}` to `start_simulation " -"`_ so the " -":code:`VirtualClientEngine` attaches to a running Ray instance." -msgstr "" -"将 :code:`ray_init_args={\"address\"=\"auto\"}`传递给 `start_simulation `_ ,这样 " -":code:`VirtualClientEngine`就会连接到正在运行的 Ray 实例。" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:83 +msgid "Graduation Criteria" +msgstr "毕业标准" -#: ../../source/how-to-run-simulations.rst:112 -msgid "" -"Start Ray on you head node: on the terminal type :code:`ray start " -"--head`. This command will print a few lines, one of which indicates how " -"to attach other nodes to the head node." -msgstr "" -"在头部节点上启动 Ray:在终端上输入 :code:`raystart--" -"head`。该命令将打印几行输出,其中一行说明如何将其他节点连接到头部节点。" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:84 +msgid "Upgrade/Downgrade Strategy (if applicable)" +msgstr "升级/降级策略(如适用)" -#: ../../source/how-to-run-simulations.rst:113 -msgid "" -"Attach other nodes to the head node: copy the command shown after " -"starting the head and execute it on terminal of a new node: for example " -":code:`ray start --address='192.168.1.132:6379'`" -msgstr "" -"将其他节点附加到头部节点:复制启动头部后显示的命令,并在新节点的终端上执行:例如 :code:`ray start " -"--address='192.168.1.132:6379'`" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:88 +msgid "As a reference, this document follows the above structure." +msgstr "作为参考,本文件采用上述结构。" -#: ../../source/how-to-run-simulations.rst:115 -msgid "" -"With all the above done, you can run your code from the head node as you " -"would if the simulation was running on a single node." -msgstr "完成上述所有操作后,您就可以在头部节点上运行代码了,就像在单个节点上运行模拟一样。" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:90 +#: ../../source/ref-api/flwr.common.Metadata.rst:2 +msgid "Metadata" +msgstr "描述数据" -#: ../../source/how-to-run-simulations.rst:117 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:92 msgid "" -"Once your simulation is finished, if you'd like to dismantle your cluster" -" you simply need to run the command :code:`ray stop` in each node's " -"terminal (including the head node)." -msgstr "模拟结束后,如果要拆除集群,只需在每个节点(包括头部节点)的终端运行 :code:`ray stop` 命令即可。" +"**fed-number** (Required) The `fed-number` of the last Flower Enhancement" +" Doc + 1. With this number, it becomes easy to reference other proposals." +msgstr "**fed-number**(必填)上一个Flower增强文件的 \"fed-number \"+1。有了这个编号,就很容易参考其他提案。" -#: ../../source/how-to-run-simulations.rst:120 -msgid "Multi-node simulation good-to-know" -msgstr "了解多节点模拟" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:94 +msgid "**title** (Required) The title of the proposal in plain language." +msgstr "**标题** (必填)用简明语言写出提案的标题。" -#: ../../source/how-to-run-simulations.rst:122 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:96 msgid "" -"Here we list a few interesting functionality when running multi-node FL " -"simulations:" -msgstr "在此,我们列举了运行多节点 FL 模拟时的一些有趣功能:" +"**status** (Required) The current status of the proposal. See " +"[workflow](#workflow) for the possible states." +msgstr "**status** (必填)提案的当前状态。有关可能的状态,请参阅 [工作流程](#workflow)。" -#: ../../source/how-to-run-simulations.rst:124 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:98 msgid "" -"User :code:`ray status` to check all nodes connected to your head node as" -" well as the total resources available to the " -":code:`VirtualClientEngine`." -msgstr "" -"使用 :code:`ray status` 查看连接到头部节点的所有节点,以及 :code:`VirtualClientEngine` " -"可用的总资源。" +"**authors** (Required) A list of authors of the proposal. This is simply " +"the GitHub ID." +msgstr "**作者**(必填) 提案的作者列表。这只是 GitHub ID。" -#: ../../source/how-to-run-simulations.rst:126 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:100 msgid "" -"When attaching a new node to the head, all its resources (i.e. all CPUs, " -"all GPUs) will be visible by the head node. This means that the " -":code:`VirtualClientEngine` can schedule as many `virtual` clients as " -"that node can possible run. In some settings you might want to exclude " -"certain resources from the simulation. You can do this by appending " -"`--num-cpus=` and/or `--num-" -"gpus=` in any :code:`ray start` command (including " -"when starting the head)" -msgstr "" -"将新节点附加到头部节点时,头部节点将可见其所有资源(即所有 CPU 和 GPU)。这意味着 :code:`VirtualClientEngine`" -" 可以调度尽可能多的 \"虚拟 \"客户端来运行该节点。在某些设置中,您可能希望将某些资源排除在模拟之外。为此,您可以在任何 :code:`ray" -" start` 命令(包括启动头部时)中添加 `--num-cpus=`和/或 `--num-" -"gpus=`" - -#: ../../source/how-to-run-simulations.rst:132 -msgid "Considerations for simulations" -msgstr "模拟的注意事项" +"**creation-date** (Required) The date that the proposal was first " +"submitted in a PR." +msgstr "**创建日期**(必填) 建议书在 PR 中首次提交的日期。" -#: ../../source/how-to-run-simulations.rst:135 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:102 msgid "" -"We are actively working on these fronts so to make it trivial to run any " -"FL workload with Flower simulation." -msgstr "我们正在积极开展这些方面的工作,以便使 FL 工作负载与 Flower 模拟的运行变得轻而易举。" +"**last-updated** (Optional) The date that the proposal was last changed " +"significantly." +msgstr "**最后更新** (可选)提案最后一次重大修改的日期。" -#: ../../source/how-to-run-simulations.rst:138 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:104 msgid "" -"The current VCE allows you to run Federated Learning workloads in " -"simulation mode whether you are prototyping simple scenarios on your " -"personal laptop or you want to train a complex FL pipeline across " -"multiple high-performance GPU nodes. While we add more capabilities to " -"the VCE, the points below highlight some of the considerations to keep in" -" mind when designing your FL pipeline with Flower. We also highlight a " -"couple of current limitations in our implementation." -msgstr "" -"当前的 VCE 允许您在模拟模式下运行联邦学习工作负载,无论您是在个人笔记本电脑上建立简单的场景原型,还是要在多个高性能 GPU 节点上训练复杂的" -" FL情景。虽然我们为 VCE 增加了更多的功能,但以下几点强调了在使用 Flower 设计 FL " -"时需要注意的一些事项。我们还强调了我们的实现中目前存在的一些局限性。" +"**see-also** (Optional) A list of other proposals that are relevant to " +"this one." +msgstr "**另见** (可选)与本提案相关的其他提案清单。" -#: ../../source/how-to-run-simulations.rst:141 -msgid "GPU resources" -msgstr "GPU 资源" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:106 +msgid "**replaces** (Optional) A list of proposals that this one replaces." +msgstr "**取代**(可选) 这份提案所取代的提案列表。" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:108 +msgid "**superseded-by** (Optional) A list of proposals that this one supersedes." +msgstr "**被取代者** (可选) 此提案取代的提案列表。" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:111 +msgid "Workflow" +msgstr "工作流程" -#: ../../source/how-to-run-simulations.rst:143 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:113 msgid "" -"The VCE assigns a share of GPU memory to a client that specifies the key " -":code:`num_gpus` in :code:`client_resources`. This being said, Ray (used " -"internally by the VCE) is by default:" +"The idea forming the enhancement should already have been discussed or " +"pitched in the community. As such, it needs a champion, usually the " +"author, who shepherds the enhancement. This person also has to find " +"committers to Flower willing to review the proposal." +msgstr "形成增强功能的想法应该已经在社区中讨论过或提出过。因此,它需要一个支持者(通常是作者)来引导增强。这个人还必须找到愿意审核提案的提交者。" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:115 +msgid "" +"New enhancements are checked in with a file name in the form of `NNNN-" +"YYYYMMDD-enhancement-title.md`, with `NNNN` being the Flower Enhancement " +"Doc number, to `enhancements`. All enhancements start in `provisional` " +"state as part of a pull request. Discussions are done as part of the pull" +" request review." msgstr "" -"VCE 会为指定 :code:`client_resources` 中 :code:`num_gpus` 关键字的客户端分配 GPU " -"内存份额。也就是说,Ray(VCE 内部使用)是默认的:" +"新的增强功能以 `NNNN-YYYYMMDD-enhancement-title.md` 的文件名签入,其中 `NNNN` " +"是花朵增强文档的编号,并将其转入 `enhancements`。作为拉取请求(pull request)的一部分,所有增强功能都从 " +"`provisional` 状态开始。讨论是作为拉取请求审查的一部分进行的。" -#: ../../source/how-to-run-simulations.rst:146 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:117 msgid "" -"not aware of the total VRAM available on the GPUs. This means that if you" -" set :code:`num_gpus=0.5` and you have two GPUs in your system with " -"different (e.g. 32GB and 8GB) VRAM amounts, they both would run 2 clients" -" concurrently." +"Once an enhancement has been reviewed and approved, its status is changed" +" to `implementable`. The actual implementation is then done in separate " +"pull requests. These pull requests should mention the respective " +"enhancement as part of their description. After the implementation is " +"done, the proposal status is changed to `implemented`." msgstr "" -"不知道 GPU 上可用的总 VRAM。这意味着,如果您设置 :code:`num_gpus=0.5`,而系统中有两个不同(如 32GB 和 " -"8GB)VRAM 的 GPU,它们都将同时运行 2 个客户端。" +"一旦增强功能通过审核和批准,其状态就会变为 " +"`可实施`。实际的实施工作将在单独的拉取请求中完成。这些拉取请求应在其描述中提及相应的增强功能。实施完成后,提案状态将更改为 `已实施`。" -#: ../../source/how-to-run-simulations.rst:147 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:119 msgid "" -"not aware of other unrelated (i.e. not created by the VCE) workloads are " -"running on the GPU. Two takeaways from this are:" -msgstr "不知道 GPU 上正在运行其他无关(即不是由 VCE 创建)的工作负载。从中可以得到以下两点启示:" +"Under certain conditions, other states are possible. An Enhancement has " +"the following states:" +msgstr "在某些条件下,还可能出现其他状态。增强提案具有以下状态:" -#: ../../source/how-to-run-simulations.rst:149 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:121 msgid "" -"Your Flower server might need a GPU to evaluate the `global model` after " -"aggregation (by instance when making use of the `evaluate method `_)" -msgstr "" -"您的 Flower 服务器可能需要 GPU 来评估聚合后的 \"全局模型\"(例如在使用 \"评估方法\"`_时)" +"`provisional`: The enhancement has been proposed and is actively being " +"defined. This is the starting state while the proposal is being fleshed " +"out and actively defined and discussed." +msgstr "`暂定`: 已提出改进建议并正在积极定义。这是在提案得到充实、积极定义和讨论时的起始状态。" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:122 +msgid "`implementable`: The enhancement has been reviewed and approved." +msgstr "`可实施`: 增强功能已审核通过。" -#: ../../source/how-to-run-simulations.rst:150 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:123 msgid "" -"If you want to run several independent Flower simulations on the same " -"machine you need to mask-out your GPUs with " -":code:`CUDA_VISIBLE_DEVICES=\"\"` when launching your " -"experiment." -msgstr "" -"如果您想在同一台机器上运行多个独立的 Flower 模拟,则需要在启动实验时使用 " -":code:`CUDA_VISIBLE_DEVICES=\"\"` 屏蔽 GPU。" +"`implemented`: The enhancement has been implemented and is no longer " +"actively changed." +msgstr "`已实施`: 增强功能已实施,不再主动更改。" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:124 +msgid "`deferred`: The enhancement is proposed but not actively being worked on." +msgstr "`推迟`: 已提出改进建议,但尚未积极开展工作。" -#: ../../source/how-to-run-simulations.rst:153 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:125 msgid "" -"In addition, the GPU resource limits passed to :code:`client_resources` " -"are not `enforced` (i.e. they can be exceeded) which can result in the " -"situation of client using more VRAM than the ratio specified when " -"starting the simulation." -msgstr "" -"此外,传递给 :code:`client_resources` 的 GPU 资源限制并不是 \"强制 \"的(即可以超出),这可能导致客户端使用的" -" VRAM 超过启动模拟时指定的比例。" +"`rejected`: The authors and reviewers have decided that this enhancement " +"is not moving forward." +msgstr "`拒绝`: 作者和审稿人已决定不再推进该增强功能。" -#: ../../source/how-to-run-simulations.rst:156 -msgid "TensorFlow with GPUs" -msgstr "使用 GPU 的 TensorFlow" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:126 +msgid "`withdrawn`: The authors have withdrawn the enhancement." +msgstr "`撤回`: 作者已撤回增强功能。" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:127 +msgid "`replaced`: The enhancement has been replaced by a new enhancement." +msgstr "`已替换`: 增强功能已被新的增强功能取代。" -#: ../../source/how-to-run-simulations.rst:158 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:131 msgid "" -"When `using a GPU with TensorFlow " -"`_ nearly your entire GPU memory of" -" all your GPUs visible to the process will be mapped. This is done by " -"TensorFlow for optimization purposes. However, in settings such as FL " -"simulations where we want to split the GPU into multiple `virtual` " -"clients, this is not a desirable mechanism. Luckily we can disable this " -"default behavior by `enabling memory growth " -"`_." -msgstr "" -"在 TensorFlow `_ 中使用 GPU 时,几乎所有进程可见的" -" GPU 内存都将被映射。TensorFlow 这样做是出于优化目的。然而,在 FL 模拟等设置中,我们希望将 GPU 分割成多个 \"虚拟 " -"\"客户端,这并不是一个理想的机制。幸运的是,我们可以通过 `启用内存增长 " -"`_来禁用这一默认行为。" +"Adding an additional process to the ones already provided by GitHub " +"(Issues and Pull Requests) adds more complexity and can be a barrier for " +"potential first-time contributors." +msgstr "在 GitHub 已提供的流程(问题和拉取请求)之外再增加一个流程,会增加复杂性,并可能成为潜在首次贡献者的障碍。" -#: ../../source/how-to-run-simulations.rst:160 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:133 msgid "" -"This would need to be done in the main process (which is where the server" -" would run) and in each Actor created by the VCE. By means of " -":code:`actor_kwargs` we can pass the reserved key `\"on_actor_init_fn\"` " -"in order to specify a function to be executed upon actor initialization. " -"In this case, to enable GPU growth for TF workloads. It would look as " -"follows:" -msgstr "" -"这需要在主进程(也就是服务器运行的地方)和 VCE 创建的每个角色中完成。通过 " -":code:`actor_kwargs`,我们可以传递保留关键字`\"on_actor_init_fn\"`,以指定在角色初始化时执行的函数。在本例中,为了使" -" TF 工作负载的 GPU 增长,它看起来如下:" +"Expanding the proposal template beyond the single-sentence description " +"currently required in the features issue template may be a heavy burden " +"for non-native English speakers." +msgstr "对于英语非母语者来说,将提案模板扩展到目前要求的单句描述之外可能是一个沉重的负担。" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:137 +msgid "GitHub Issues" +msgstr "GitHub 问题" -#: ../../source/how-to-run-simulations.rst:179 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:139 msgid "" -"This is precisely the mechanism used in `Tensorflow/Keras Simulation " -"`_ example." +"Using GitHub Issues for these kinds of enhancements is doable. One could " +"use, for example, tags, to differentiate and filter them from other " +"issues. The main issue is in discussing and reviewing an enhancement: " +"GitHub issues only have a single thread for comments. Enhancements " +"usually have multiple threads of discussion at the same time for various " +"parts of the doc. Managing these multiple discussions can be confusing " +"when using GitHub Issues." msgstr "" -"这正是 \"Tensorflow/Keras 模拟 " -"`_\"示例中使用的机制。" +"使用 GitHub Issues 进行此类改进是可行的。例如,我们可以使用标签来区分和过滤这些问题。主要的问题在于讨论和审查增强功能: " +"GitHub 问题只有一个评论线程。而增强功能通常会同时有多个讨论线程,针对文档的不同部分。在使用 GitHub " +"问题时,管理这些多重讨论会很混乱。" -#: ../../source/how-to-run-simulations.rst:183 -msgid "Multi-node setups" -msgstr "多节点设置" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:141 +msgid "Google Docs" +msgstr "谷歌文档" -#: ../../source/how-to-run-simulations.rst:185 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:143 msgid "" -"The VCE does not currently offer a way to control on which node a " -"particular `virtual` client is executed. In other words, if more than a " -"single node have the resources needed by a client to run, then any of " -"those nodes could get the client workload scheduled onto. Later in the FL" -" process (i.e. in a different round) the same client could be executed by" -" a different node. Depending on how your clients access their datasets, " -"this might require either having a copy of all dataset partitions on all " -"nodes or a dataset serving mechanism (e.g. using nfs, a database) to " -"circumvent data duplication." +"Google Docs allow for multiple threads of discussions. But as Google Docs" +" are hosted outside the project, their discoverability by the community " +"needs to be taken care of. A list of links to all proposals has to be " +"managed and made available for the community. Compared to shipping " +"proposals as part of Flower's repository, the potential for missing links" +" is much higher." msgstr "" -"VCE 目前不提供控制特定 \"虚拟 " -"\"客户端在哪个节点上执行的方法。换句话说,如果不止一个节点拥有客户端运行所需的资源,那么这些节点中的任何一个都可能被调度到客户端工作负载上。在 " -"FL " -"进程的稍后阶段(即在另一轮中),同一客户端可以由不同的节点执行。根据客户访问数据集的方式,这可能需要在所有节点上复制所有数据集分区,或采用数据集服务机制(如使用" -" nfs 或数据库)来避免数据重复。" +"谷歌文档允许多线程讨论。但是,由于谷歌文档是在项目之外托管的,因此需要注意它们是否能被社区发现。我们必须管理所有提案的链接列表,并提供给社区使用。与作为" +" Flower 资源库一部分的提案相比,丢失链接的可能性要大得多。" -#: ../../source/how-to-run-simulations.rst:187 -msgid "" -"By definition virtual clients are `stateless` due to their ephemeral " -"nature. A client state can be implemented as part of the Flower client " -"class but users need to ensure this saved to persistent storage (e.g. a " -"database, disk) and that can be retrieve later by the same client " -"regardless on which node it is running from. This is related to the point" -" above also since, in some way, the client's dataset could be seen as a " -"type of `state`." -msgstr "" -"根据定义,虚拟客户端是 \"无状态 \"的,因为它们具有即时性。客户机状态可以作为 Flower " -"客户机类的一部分来实现,但用户需要确保将其保存到持久存储(如数据库、磁盘)中,而且无论客户机在哪个节点上运行,都能在以后检索到。这也与上述观点有关,因为在某种程度上,客户端的数据集可以被视为一种" -" \"状态\"。" +#: ../../source/fed/index.md:1 +msgid "FED - Flower Enhancement Doc" +msgstr "FED - Flower 增强文件" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:2 -msgid "Save and load model checkpoints" -msgstr "保存和加载模型检查点" +#: ../../source/how-to-aggregate-evaluation-results.rst:2 +msgid "Aggregate evaluation results" +msgstr "整合评估结果" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:4 +#: ../../source/how-to-aggregate-evaluation-results.rst:4 msgid "" -"Flower does not automatically save model updates on the server-side. This" -" how-to guide describes the steps to save (and load) model checkpoints in" -" Flower." -msgstr "Flower 不会在服务器端自动保存模型更新。本指南将介绍在 Flower 中保存(和加载)模型检查点的步骤。" +"The Flower server does not prescribe a way to aggregate evaluation " +"results, but it enables the user to fully customize result aggregation." +msgstr "Flower 服务器没有规定整合评估结果的方法,但用户可以完全自定义如何整合。" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:8 -msgid "Model checkpointing" -msgstr "模型检查点" +#: ../../source/how-to-aggregate-evaluation-results.rst:8 +msgid "Aggregate Custom Evaluation Results" +msgstr "自定义整合评估结果" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:10 +#: ../../source/how-to-aggregate-evaluation-results.rst:10 +#, fuzzy msgid "" -"Model updates can be persisted on the server-side by customizing " -":code:`Strategy` methods. Implementing custom strategies is always an " -"option, but for many cases it may be more convenient to simply customize " -"an existing strategy. The following code example defines a new " -":code:`SaveModelStrategy` which customized the existing built-in " -":code:`FedAvg` strategy. In particular, it customizes " -":code:`aggregate_fit` by calling :code:`aggregate_fit` in the base class " -"(:code:`FedAvg`). It then continues to save returned (aggregated) weights" -" before it returns those aggregated weights to the caller (i.e., the " -"server):" -msgstr "" -"模型更新可通过自定义 :code:`Strategy` " -"方法在服务器端持久化。实现自定义策略始终是一种选择,但在许多情况下,简单地自定义现有策略可能更方便。下面的代码示例定义了一个新的 " -":code:`SaveModelStrategy`,它自定义了现有的内置 :code:`FedAvg` " -"策略。特别是,它通过调用基类(:code:`FedAvg`)中的 :code:`aggregate_fit` 来定制 " -":code:`aggregate_fit`。然后继续保存返回的(聚合)参数,然后再将这些聚合参数返回给调用者(即服务器):" - -#: ../../source/how-to-save-and-load-model-checkpoints.rst:47 -msgid "Save and load PyTorch checkpoints" -msgstr "保存和加载 PyTorch 检查点" +"The same ``Strategy``-customization approach can be used to aggregate " +"custom evaluation results coming from individual clients. Clients can " +"return custom metrics to the server by returning a dictionary:" +msgstr "同样的 :code:`Strategy` 定制方法也可用于汇总来自单个客户端的自定义评估结果。客户端可以通过返回字典的方式向服务器返回自定义指标:" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:49 +#: ../../source/how-to-aggregate-evaluation-results.rst:39 msgid "" -"Similar to the previous example but with a few extra steps, we'll show " -"how to store a PyTorch checkpoint we'll use the ``torch.save`` function. " -"Firstly, ``aggregate_fit`` returns a ``Parameters`` object that has to be" -" transformed into a list of NumPy ``ndarray``'s, then those are " -"transformed into the PyTorch ``state_dict`` following the ``OrderedDict``" -" class structure." -msgstr "" -"与前面的例子类似,但多了几个步骤,我们将展示如何存储一个 PyTorch 检查点,我们将使用 ``torch.save`` " -"函数。首先,``aggregate_fit`` 返回一个 ``Parameters`` 对象,它必须被转换成一个 NumPy " -"``ndarray`` 的列表,然后这些对象按照 ``OrderedDict`` 类结构被转换成 PyTorch `state_dict` 对象。" +"The server can then use a customized strategy to aggregate the metrics " +"provided in these dictionaries:" +msgstr "然后,服务器可以使用定制的策略来汇总这些字典中提供的指标:" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:85 -msgid "" -"To load your progress, you simply append the following lines to your " -"code. Note that this will iterate over all saved checkpoints and load the" -" latest one:" -msgstr "要加载进度,只需在代码中添加以下几行。请注意,这将遍历所有已保存的检查点,并加载最新的检查点:" +#: ../../source/how-to-authenticate-supernodes.rst:2 +#, fuzzy +msgid "Authenticate SuperNodes" +msgstr "验证超级节点" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:97 +#: ../../source/how-to-authenticate-supernodes.rst:4 #, fuzzy msgid "" -"Return/use this object of type ``Parameters`` wherever necessary, such as" -" in the ``initial_parameters`` when defining a ``Strategy``." +"Flower has built-in support for authenticated SuperNodes that you can use" +" to verify the identities of each SuperNode connecting to a SuperLink. " +"Flower node authentication works similar to how GitHub SSH authentication" +" works:" msgstr "" -"在必要时返回/使用此 ``Parameters`` 类型的对象,例如在定义 ``Strategy` 时的 " -"``initial_parameters` 中。" +"Flower 内置了对经过身份验证的超级节点的支持,您可以用它来验证连接到超级链接的每个超级节点的身份。Flower 节点身份验证的工作方式与 " +"GitHub SSH 身份验证的工作方式类似:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:2 -msgid "Upgrade to Flower 1.0" -msgstr "升级至 Flower 1.0" +#: ../../source/how-to-authenticate-supernodes.rst:8 +#, fuzzy +msgid "SuperLink (server) stores a list of known (client) node public keys" +msgstr "超级链接(服务器)存储已知(客户端)节点公钥列表" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:4 +#: ../../source/how-to-authenticate-supernodes.rst:9 +#, fuzzy msgid "" -"Flower 1.0 is here. Along with new features, Flower 1.0 provides a stable" -" foundation for future growth. Compared to Flower 0.19 (and other 0.x " -"series releases), there are a few breaking changes that make it necessary" -" to change the code of existing 0.x-series projects." -msgstr "" -"Flower 1.0 正式发布。除了新功能,Flower 1.0 还为未来的发展奠定了稳定的基础。与 Flower 0.19(以及其他 0.x " -"系列版本)相比,有一些破坏性改动需要修改现有 0.x 系列项目的代码。" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:8 -#: ../../source/how-to-upgrade-to-flower-next.rst:43 -msgid "Install update" -msgstr "安装更新" +"Using ECDH, both SuperNode and SuperLink independently derive a shared " +"secret" +msgstr "使用 ECDH,超级节点和超级链路可独立生成共享秘密" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:10 +#: ../../source/how-to-authenticate-supernodes.rst:10 +#, fuzzy msgid "" -"Here's how to update an existing installation to Flower 1.0 using either " -"pip or Poetry:" -msgstr "下面介绍如何使用 pip 或 Poetry 将现有安装更新到 Flower 1.0:" +"Shared secret is used to compute the HMAC value of the message sent from " +"SuperNode to SuperLink as a token" +msgstr "共享秘密用于计算作为令牌从超级节点发送到超级链接的信息的 HMAC 值" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:12 -msgid "pip: add ``-U`` when installing." -msgstr "pip: 安装时添加 ``-U``." +#: ../../source/how-to-authenticate-supernodes.rst:12 +#, fuzzy +msgid "SuperLink verifies the token" +msgstr "超级链接验证令牌" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:14 +#: ../../source/how-to-authenticate-supernodes.rst:14 +#, fuzzy msgid "" -"``python -m pip install -U flwr`` (when using ``start_server`` and " -"``start_client``)" -msgstr "`python -m pip install -U flwr``(当使用`start_server`和`start_client`时)" +"We recommend you to check out the complete `code example " +"`_ demonstrating federated learning with Flower in an " +"authenticated setting." +msgstr "" +"请参阅`完整代码示例 " +"`_了解更多信息。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:15 +#: ../../source/how-to-authenticate-supernodes.rst:20 +#, fuzzy msgid "" -"``python -m pip install -U flwr[simulation]`` (when using " -"``start_simulation``)" -msgstr "`python -m pip install -U flwr[simulation]``(当使用`start_simulation``时)" +"This guide covers a preview feature that might change in future versions " +"of Flower." +msgstr "本指南涵盖的预览功能可能会在 Flower 的未来版本中有所改变。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:17 +#: ../../source/how-to-authenticate-supernodes.rst:24 +#, fuzzy msgid "" -"Poetry: update the ``flwr`` dependency in ``pyproject.toml`` and then " -"reinstall (don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` " -"before running ``poetry install``)." -msgstr "" -"Poetry:更新 ``pyproject.toml`` 中的 ``flwr`` 依赖包,然后重新安装(运行 ``poetry install``" -" 前,别忘了通过 ``rm poetry.lock` 删除 ``poetry.lock`)。" +"For increased security, node authentication can only be used when " +"encrypted connections (SSL/TLS) are enabled." +msgstr "为提高安全性,只有启用加密连接(SSL/TLS)时才能使用节点验证。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:19 -msgid "``flwr = \"^1.0.0\"`` (when using ``start_server`` and ``start_client``)" -msgstr "``flwr = \"^1.0.0\"`` (当使用 ``start_server` 和 ``start_client` 时)" +#: ../../source/how-to-authenticate-supernodes.rst:28 +#, fuzzy +msgid "Enable node authentication in ``SuperLink``" +msgstr "在 :code:`SuperLink` 中启用节点验证" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:20 +#: ../../source/how-to-authenticate-supernodes.rst:30 +#, fuzzy msgid "" -"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` (when " -"using ``start_simulation``)" +"To enable node authentication, first you need to configure SSL/TLS " +"connections to secure the SuperLink<>SuperNode communication. You can " +"find the complete guide `here `_. After configuring secure connections, you" +" can enable client authentication in a long-running Flower ``SuperLink``." +" Use the following terminal command to start a Flower ``SuperNode`` that " +"has both secure connections and node authentication enabled:" msgstr "" -"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] " -"}``(当使用``start_simulation``时)" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:24 -#: ../../source/how-to-upgrade-to-flower-next.rst:100 -msgid "Required changes" -msgstr "所需变更" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:26 -msgid "The following breaking changes require manual updates." -msgstr "以下更改需要手动更新。" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:29 -msgid "General" -msgstr "一般情况" +"要启用节点验证,首先需要配置 SSL/TLS 连接,以确保 SuperLink<>SuperNode 通信的安全。您可以在 " +"`_ " +"找到完整的指南。配置安全连接后,您就可以在长期运行的 Flower " +":code:`SuperLink`中启用客户端身份验证。使用以下终端命令启动一个同时启用了安全连接和节点验证的 Flower " +":code:`SuperNode`:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:31 -msgid "" -"Pass all arguments as keyword arguments (not as positional arguments). " -"Here's an example:" -msgstr "将所有参数作为关键字参数传递(而不是位置参数)。下面是一个例子:" +#: ../../source/how-to-authenticate-supernodes.rst:47 +#, fuzzy +msgid "Let's break down the authentication flags:" +msgstr "让我们来分析一下身份验证标志:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:33 +#: ../../source/how-to-authenticate-supernodes.rst:49 +#, fuzzy msgid "" -"Flower 0.19 (positional arguments): ``start_client(\"127.0.0.1:8080\", " -"FlowerClient())``" -msgstr "Flower 0.19 (位置参数): ``start_client(\"127.0.0.1:8080\", FlowerClient())``" +"The first flag ``--auth-list-public-keys`` expects a path to a CSV file " +"storing all known node public keys. You need to store all known node " +"public keys that are allowed to participate in a federation in one CSV " +"file (``.csv``)." +msgstr "" +"第一个标志 :code:`--auth-list-public-keys`(密码:`--auth-list-public-keys`)需要一个 " +"CSV 文件路径,该文件存储了所有已知节点的公钥。您需要在一个 CSV 文件(:code:`.csv`)中存储所有允许参与联盟的已知节点公钥。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:34 +#: ../../source/how-to-authenticate-supernodes.rst:53 +#, fuzzy msgid "" -"Flower 1.0 (keyword arguments): " -"``start_client(server_address=\"127.0.0.1:8080\", " -"client=FlowerClient())``" +"A valid CSV file storing known node public keys should list the keys in " +"OpenSSH format, separated by commas and without any comments. For an " +"example, refer to our code sample, which contains a CSV file with two " +"known node public keys." msgstr "" -"Flower 1.0(关键字参数): ``start_client(server_address=\"127.0.0.1:8080\", " -"client=FlowerClient())``" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:37 -#: ../../source/ref-api/flwr.client.Client.rst:2 -msgid "Client" -msgstr "客户端" +"存储已知节点公开密钥的有效 CSV 文件应以 OpenSSH " +"格式列出密钥,以逗号分隔,不含任何注释。有关示例,请参阅我们的代码示例,其中包含一个包含两个已知节点公钥的 CSV 文件。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:39 +#: ../../source/how-to-authenticate-supernodes.rst:57 +#, fuzzy msgid "" -"Subclasses of ``NumPyClient``: change ``def get_parameters(self):``` to " -"``def get_parameters(self, config):``" +"The second and third flags ``--auth-superlink-private-key`` and ``--auth-" +"superlink-public-key`` expect paths to the server's private and public " +"keys. For development purposes, you can generate a private and public key" +" pair using ``ssh-keygen -t ecdsa -b 384``." msgstr "" -"NumPyClient的子类:将``def get_parameters(self):```改为``def " -"get_parameters(self,config):``" +"第二和第三个标记 :code:`--auth-superlink-private-key` 和 :code:`--auth-superlink-" +"public-key` 希望指向服务器私钥和公钥的路径。出于开发目的,您可以使用 :code:`ssh-keygen -t ecdsa -b " +"384` 生成一对私钥和公钥。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:40 +#: ../../source/how-to-authenticate-supernodes.rst:64 +#, fuzzy msgid "" -"Subclasses of ``Client``: change ``def get_parameters(self):``` to ``def " -"get_parameters(self, ins: GetParametersIns):``" +"In Flower 1.9, there is no support for dynamically removing, editing, or " +"adding known node public keys to the SuperLink. To change the set of " +"known nodes, you need to shut the server down, edit the CSV file, and " +"start the server again. Support for dynamically changing the set of known" +" nodes is on the roadmap to be released in Flower 1.10 (ETA: June)." msgstr "" -"客户端 \"的子类:将 \"get_parameters(self): \"改为 \"get_parameters(self, ins: " -"GetParametersIns):\"" +"在 Flower 1.9 中,超级链接不支持动态删除、编辑或添加已知节点公钥。要更改已知节点集,您需要关闭服务器,编辑 CSV " +"文件,然后重新启动服务器。动态更改已知节点集的支持已列入 Flower 1.10(预计发布时间:6 月)的路线图。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:43 -msgid "Strategies / ``start_server`` / ``start_simulation``" -msgstr "策略 / ``start_server`` / ``start_simulation``" +#: ../../source/how-to-authenticate-supernodes.rst:71 +#, fuzzy +msgid "Enable node authentication in ``SuperNode``" +msgstr "在 :code:`SuperNode` 中启用节点验证" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:45 +#: ../../source/how-to-authenticate-supernodes.rst:73 +#, fuzzy msgid "" -"Pass ``ServerConfig`` (instead of a dictionary) to ``start_server`` and " -"``start_simulation``. Here's an example:" +"Similar to the long-running Flower server (``SuperLink``), you can easily" +" enable node authentication in the long-running Flower client " +"(``SuperNode``). Use the following terminal command to start an " +"authenticated ``SuperNode``:" msgstr "" -"向 ``start_server`` 和 ``start_simulation` 传递 ``ServerConfig``(而不是 " -"dictionary)。下面是一个例子:" +"与长期运行的 Flower 服务器(:code:`SuperLink`)类似,您也可以在长期运行的 Flower " +"客户端(:code:`SuperNode`)中轻松启用节点身份验证。使用以下终端命令启动已验证的 :code:`SuperNode`:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:47 +#: ../../source/how-to-authenticate-supernodes.rst:85 +#, fuzzy msgid "" -"Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " -"\"round_timeout\": 600.0}, ...)``" +"The ``--auth-supernode-private-key`` flag expects a path to the node's " +"private key file and the ``--auth-supernode-public-key`` flag expects a " +"path to the node's public key file. For development purposes, you can " +"generate a private and public key pair using ``ssh-keygen -t ecdsa -b " +"384``." msgstr "" -"Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " -"\"round_timeout\": 600.0}, ...)``" +":code:`--auth-supernode-private-key`标志需要节点私钥文件的路径,:code:`-auth-supernode-" +"public-key`标志需要节点公钥文件的路径。出于开发目的,可以使用 :code:`ssh-keygen -t ecdsa -b 384` " +"生成一对私钥和公钥。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:48 -msgid "" -"Flower 1.0: ``start_server(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" -msgstr "" -"Flower 1.0: ``start_server(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" +#: ../../source/how-to-authenticate-supernodes.rst:91 +#, fuzzy +msgid "Security notice" +msgstr "安全通知" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:50 +#: ../../source/how-to-authenticate-supernodes.rst:93 +#, fuzzy msgid "" -"Replace ``num_rounds=1`` in ``start_simulation`` with the new " -"``config=ServerConfig(...)`` (see previous item)" -msgstr "将`start_simulation``中的`num_rounds=1``替换为新的`config=ServerConfig(...)`(参见前一项)" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:51 -msgid "" -"Remove ``force_final_distributed_eval`` parameter from calls to " -"``start_server``. Distributed evaluation on all clients can be enabled by" -" configuring the strategy to sample all clients for evaluation after the " -"last round of training." +"The system's security relies on the credentials of the SuperLink and each" +" SuperNode. Therefore, it is imperative to safeguard and safely store the" +" credentials to avoid security risks such as Public Key Infrastructure " +"(PKI) impersonation attacks. The node authentication mechanism also " +"involves human interaction, so please ensure that all of the " +"communication is done in a secure manner, using trusted communication " +"methods." msgstr "" -"删除调用 ``start_server`` 时的 ``force_final_distributed_eval` " -"参数。可以通过配置策略,在最后一轮训练后对所有客户端进行抽样评估,从而启用对所有客户端的分布式评估。" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:52 -msgid "Rename parameter/ndarray conversion functions:" -msgstr "重命名参数/数组转换函数:" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:54 -msgid "``parameters_to_weights`` --> ``parameters_to_ndarrays``" -msgstr "``parameters_to_weights`` --> ``parameters_to_ndarrays``" +"系统的安全性依赖于超级链接和每个超级节点的凭证。因此,必须保护和安全存储凭证,以避免公钥基础设施 (PKI) " +"假冒攻击等安全风险。节点验证机制还涉及人机交互,因此请确保使用可信的通信方法,以安全的方式进行所有通信。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:55 -msgid "``weights_to_parameters`` --> ``ndarrays_to_parameters``" -msgstr "``weights_to_parameters`` --> ``ndarrays_to_parameters``" +#: ../../source/how-to-authenticate-supernodes.rst:100 +#: ../../source/how-to-enable-ssl-connections.rst:71 +#: ../../source/how-to-use-built-in-mods.rst:95 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:287 +msgid "Conclusion" +msgstr "总结" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:57 +#: ../../source/how-to-authenticate-supernodes.rst:102 +#, fuzzy msgid "" -"Strategy initialization: if the strategy relies on the default values for" -" ``fraction_fit`` and ``fraction_evaluate``, set ``fraction_fit`` and " -"``fraction_evaluate`` manually to ``0.1``. Projects that do not manually " -"create a strategy (by calling ``start_server`` or ``start_simulation`` " -"without passing a strategy instance) should now manually initialize " -"FedAvg with ``fraction_fit`` and ``fraction_evaluate`` set to ``0.1``." +"You should now have learned how to start a long-running Flower server " +"(``SuperLink``) and client (``SuperNode``) with node authentication " +"enabled. You should also know the significance of the private key and " +"store it safely to minimize security risks." msgstr "" -"策略初始化:如果策略依赖于 ``fraction_fit`` 和 ``fraction_evaluate`` 的默认值,请手动将 " -"``fraction_fit`` 和 ``fraction_evaluate`` 设置为 ``0.1``。未手动创建策略的项目(调用 " -"``start_server` 或 ``start_simulation` 时未传递策略实例)现在应手动初始化 FedAvg,并将 " -"`fraction_fit` 和 `fraction_evaluate` 设为 `0.1``。" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:58 -msgid "Rename built-in strategy parameters (e.g., ``FedAvg``):" -msgstr "重命名内置策略参数(例如,`FedAvg``):" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:60 -msgid "``fraction_eval`` --> ``fraction_evaluate``" -msgstr "``fraction_eval`` --> ``fraction_evaluate``" +"现在,您应该已经学会了如何启动长期运行的 Flower " +"服务器(:code:`SuperLink`)和客户端(:code:`SuperNode`)并启用节点身份验证。您还应该知道私钥的重要性,并将其安全存储,以尽量减少安全风险。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:61 -msgid "``min_eval_clients`` --> ``min_evaluate_clients``" -msgstr "``min_eval_clients`` --> ``min_evaluate_clients``" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:62 -msgid "``eval_fn`` --> ``evaluate_fn``" -msgstr "``eval_fn`` --> ``evaluate_fn``" +#: ../../source/how-to-configure-clients.rst:2 +msgid "Configure clients" +msgstr "配置客户端" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:64 +#: ../../source/how-to-configure-clients.rst:4 msgid "" -"Rename ``rnd`` to ``server_round``. This impacts multiple methods and " -"functions, for example, ``configure_fit``, ``aggregate_fit``, " -"``configure_evaluate``, ``aggregate_evaluate``, and ``evaluate_fn``." -msgstr "" -"将 `rnd` 更名为 `server_round`。这会影响多个方法和函数,例如 " -"``configure_fit``、``aggregate_fit``、``configure_evaluate``、`aggregate_evaluate``" -" 和 ``evaluate_fn``。" +"Along with model parameters, Flower can send configuration values to " +"clients. Configuration values can be used for various purposes. They are," +" for example, a popular way to control client-side hyperparameters from " +"the server." +msgstr "除了模型参数,Flower 还可以向客户端发送配置值。配置值有多种用途。它们是一种从服务器控制客户端超参数的常用方法。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:65 -msgid "Add ``server_round`` and ``config`` to ``evaluate_fn``:" -msgstr "在 ``evaluate_fn` 中添加 ``server_round` 和 ``config`:" +#: ../../source/how-to-configure-clients.rst:9 +msgid "Configuration values" +msgstr "配置值" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:67 +#: ../../source/how-to-configure-clients.rst:11 msgid "" -"Flower 0.19: ``def evaluate(parameters: NDArrays) -> " -"Optional[Tuple[float, Dict[str, Scalar]]]:``" +"Configuration values are represented as a dictionary with ``str`` keys " +"and values of type ``bool``, ``bytes``, ``double`` (64-bit precision " +"float), ``int``, or ``str`` (or equivalent types in different languages)." +" Here is an example of a configuration dictionary in Python:" msgstr "" -"Flower 0.19: ``def evaluate(parameters: NDArrays) -> " -"Optional[Tuple[float, Dict[str, Scalar]]]:``" +"配置值以字典的形式表示,字典的键为 ``str``,值的类型为 ``bool``、``bytes``、``double``(64 " +"位精度浮点型)、``int``或 ``str`(或不同语言中的等效类型)。下面是一个 Python 配置字典的示例:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:68 +#: ../../source/how-to-configure-clients.rst:25 msgid "" -"Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, " -"config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " -"Scalar]]]:``" +"Flower serializes these configuration dictionaries (or *config dict* for " +"short) to their ProtoBuf representation, transports them to the client " +"using gRPC, and then deserializes them back to Python dictionaries." msgstr "" -"Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, " -"config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " -"Scalar]]]:``" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:71 -msgid "Custom strategies" -msgstr "定制策略" +"Flower 将这些配置字典(简称 *config dict*)序列化为 ProtoBuf 表示形式,使用 gRPC " +"将其传输到客户端,然后再反序列化为 Python 字典。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:73 +#: ../../source/how-to-configure-clients.rst:31 msgid "" -"The type of parameter ``failures`` has changed from " -"``List[BaseException]`` to ``List[Union[Tuple[ClientProxy, FitRes], " -"BaseException]]`` (in ``aggregate_fit``) and " -"``List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]]`` (in " -"``aggregate_evaluate``)" +"Currently, there is no support for directly sending collection types " +"(e.g., ``Set``, ``List``, ``Map``) as values in configuration " +"dictionaries. There are several workarounds to send collections as values" +" by converting them to one of the supported value types (and converting " +"them back on the client-side)." msgstr "" -"参数``failures``的类型已从``List[BaseException]``变为``List[Union[Tuple[ClientProxy," -" FitRes], " -"BaseException]]``(在``agregate_fit``中)和``List[Union[Tuple[ClientProxy, " -"EvaluateRes], BaseException]]``(在``agregate_evaluate``中)" +"目前,还不支持在配置字典中直接发送作为值的集合类型(例如,`Set``, `List`, " +"`Map``)。有几种变通方法可将集合转换为支持的值类型之一(并在客户端将其转换回),从而将集合作为值发送。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:74 +#: ../../source/how-to-configure-clients.rst:36 msgid "" -"The ``Strategy`` method ``evaluate`` now receives the current round of " -"federated learning/evaluation as the first parameter:" -msgstr "``Strategy``方法 的``evaluate``现在会接收当前一轮联邦学习/评估作为第一个参数:" +"One can, for example, convert a list of floating-point numbers to a JSON " +"string, then send the JSON string using the configuration dictionary, and" +" then convert the JSON string back to a list of floating-point numbers on" +" the client." +msgstr "例如,可以将浮点数列表转换为 JSON 字符串,然后使用配置字典发送 JSON 字符串,再在客户端将 JSON 字符串转换回浮点数列表。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:76 -msgid "" -"Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " -"Optional[Tuple[float, Dict[str, Scalar]]]:``" -msgstr "" -"Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " -"Optional[Tuple[float, Dict[str, Scalar]]]:```" +#: ../../source/how-to-configure-clients.rst:41 +msgid "Configuration through built-in strategies" +msgstr "通过内置策略进行配置" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:77 +#: ../../source/how-to-configure-clients.rst:43 +#, fuzzy msgid "" -"Flower 1.0: ``def evaluate(self, server_round: int, parameters: " -"Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" +"The easiest way to send configuration values to clients is to use a " +"built-in strategy like ``FedAvg``. Built-in strategies support so-called " +"configuration functions. A configuration function is a function that the " +"built-in strategy calls to get the configuration dictionary for the " +"current round. It then forwards the configuration dictionary to all the " +"clients selected during that round." msgstr "" -"Flower 1.0: ``def evaluate(self, server_round: int, parameters: " -"Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:80 -msgid "Optional improvements" -msgstr "可选的改进措施" +"向客户端发送配置值的最简单方法是使用内置策略,如 " +":code:`FedAvg`。内置策略支持所谓的配置函数。配置函数是内置策略调用的函数,用于获取当前轮的配置字典。然后,它会将配置字典转发给该轮中选择的所有客户端。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:82 +#: ../../source/how-to-configure-clients.rst:49 msgid "" -"Along with the necessary changes above, there are a number of potential " -"improvements that just became possible:" -msgstr "除了上述必要的改动之外,还有一些潜在的改进措施:" +"Let's start with a simple example. Imagine we want to send (a) the batch " +"size that the client should use, (b) the current global round of " +"federated learning, and (c) the number of epochs to train on the client-" +"side. Our configuration function could look like this:" +msgstr "让我们从一个简单的例子开始。想象一下,我们想要发送给客户端(a)应该使用的批次大小,(b)当前联邦学习的全局轮次,以及(c)客户端训练的遍历数。我们的配置函数可以是这样的:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:84 +#: ../../source/how-to-configure-clients.rst:65 +#, fuzzy msgid "" -"Remove \"placeholder\" methods from subclasses of ``Client`` or " -"``NumPyClient``. If you, for example, use server-side evaluation, then " -"empty placeholder implementations of ``evaluate`` are no longer " -"necessary." -msgstr "" -"删除 ``Client`` 或 ``NumPyClient`` 子类中的 \"占位符 " -"\"方法。例如,如果你使用服务器端评估,那么就不再需要``evaluate``的 \"空占位符 \"实现。" +"To make the built-in strategies use this function, we can pass it to " +"``FedAvg`` during initialization using the parameter " +"``on_fit_config_fn``:" +msgstr "为了让内置策略使用这个函数,我们可以在初始化时使用参数 :code:`on_fit_config_fn` 将它传递给 ``FedAvg`` :" + +#: ../../source/how-to-configure-clients.rst:75 +msgid "One the client side, we receive the configuration dictionary in ``fit``:" +msgstr "在客户端,我们在 ``fit`` 中接收配置字典:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:85 +#: ../../source/how-to-configure-clients.rst:86 msgid "" -"Configure the round timeout via ``start_simulation``: " -"``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, " -"round_timeout=600.0), ...)``" +"There is also an `on_evaluate_config_fn` to configure evaluation, which " +"works the same way. They are separate functions because one might want to" +" send different configuration values to `evaluate` (for example, to use a" +" different batch size)." msgstr "" -"通过 ``start_simulation`` 配置循环超时: ``start_simulation(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:89 -#: ../../source/how-to-upgrade-to-flower-next.rst:317 -msgid "Further help" -msgstr "更多帮助" +"还有一个 `on_evaluate_config_fn` 用于配置评估,其工作方式相同。它们是不同的函数,因为可能需要向 `evaluate` " +"发送不同的配置值(例如,使用不同的批量大小)。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:91 +#: ../../source/how-to-configure-clients.rst:90 msgid "" -"Most official `Flower code examples " -"`_ are already updated" -" to Flower 1.0, they can serve as a reference for using the Flower 1.0 " -"API. If there are further questions, `join the Flower Slack " -"`_ and use the channel ``#questions``." +"The built-in strategies call this function every round (that is, every " +"time `Strategy.configure_fit` or `Strategy.configure_evaluate` runs). " +"Calling `on_evaluate_config_fn` every round allows us to vary/change the " +"config dict over consecutive rounds. If we wanted to implement a " +"hyperparameter schedule, for example, to increase the number of local " +"epochs during later rounds, we could do the following:" msgstr "" -"大多数官方的 `Flower 代码示例 `_" -" 已经更新到 Flower 1.0,它们可以作为使用 Flower 1.0 API 的参考。如果还有其他问题,请加入 Flower Slack " -"`_ 并使用 \"#questions``\"。" +"内置策略每轮都会调用此函数(即每次运行 `Strategy.configure_fit` 或 " +"`Strategy.configure_evaluate` 时)。每轮调用 `on_evaluate_config_fn` " +"允许我们在连续几轮中改变配置指令。例如,如果我们想实现一个超参数时间表,以增加后几轮的本地遍历次数,我们可以这样做:" -#: ../../source/how-to-upgrade-to-flower-next.rst:2 +#: ../../source/how-to-configure-clients.rst:107 #, fuzzy -msgid "Upgrade to Flower Next" -msgstr "升级至 Flower 1.0" +msgid "The ``FedAvg`` strategy will call this function *every round*." +msgstr "代码:`FedAvg`策略*每轮*都会调用该函数。" -#: ../../source/how-to-upgrade-to-flower-next.rst:4 +#: ../../source/how-to-configure-clients.rst:110 +msgid "Configuring individual clients" +msgstr "配置个别客户端" + +#: ../../source/how-to-configure-clients.rst:112 msgid "" -"Welcome to the migration guide for updating Flower to Flower Next! " -"Whether you're a seasoned user or just getting started, this guide will " -"help you smoothly transition your existing setup to take advantage of the" -" latest features and improvements in Flower Next, starting from version " -"1.8." -msgstr "" +"In some cases, it is necessary to send different configuration values to " +"different clients." +msgstr "在某些情况下,有必要向不同的客户端发送不同的配置值。" -#: ../../source/how-to-upgrade-to-flower-next.rst:9 +#: ../../source/how-to-configure-clients.rst:115 +#, fuzzy msgid "" -"This guide shows how to reuse pre-``1.8`` Flower code with minimum code " -"changes by using the *compatibility layer* in Flower Next. In another " -"guide, we will show how to run Flower Next end-to-end with pure Flower " -"Next APIs." +"This can be achieved by customizing an existing strategy or by " +":doc:`implementing a custom strategy from scratch `. Here's a nonsensical example that customizes ``FedAvg`` by " +"adding a custom ``\"hello\": \"world\"`` configuration key/value pair to " +"the config dict of a *single client* (only the first client in the list, " +"the other clients in this round to not receive this \"special\" config " +"value):" msgstr "" +"这可以通过定制现有策略或 `从头开始实施一个定制策略 `_来实现。下面是一个无厘头的例子,`FedAvg`通过在*单个客户端*的配置指令(config " +"dict)中添加自定义的``\"hello\": \"world\"``配置键/值对添加到此的配置 dict " +"中(仅列表中的第一个客户端,本轮中的其他客户端不会收到此 \"特殊 \"配置值):" -#: ../../source/how-to-upgrade-to-flower-next.rst:13 -msgid "Let's dive in!" -msgstr "" +#: ../../source/how-to-configure-logging.rst:2 +msgid "Configure logging" +msgstr "配置日志记录" -#: ../../source/how-to-upgrade-to-flower-next.rst:48 -#, fuzzy +#: ../../source/how-to-configure-logging.rst:4 msgid "" -"Here's how to update an existing installation of Flower to Flower Next " -"with ``pip``:" -msgstr "下面介绍如何使用 pip 或 Poetry 将现有安装更新到 Flower 1.0:" +"The Flower logger keeps track of all core events that take place in " +"federated learning workloads. It presents information by default " +"following a standard message format:" +msgstr "Flower 日志记录器会跟踪联邦学习工作负载中发生的所有核心事件。它默认按照标准信息格式提供信息:" -#: ../../source/how-to-upgrade-to-flower-next.rst:54 +#: ../../source/how-to-configure-logging.rst:13 #, fuzzy -msgid "or if you need Flower Next with simulation:" -msgstr "启动 Flower 模拟" - -#: ../../source/how-to-upgrade-to-flower-next.rst:61 msgid "" -"Ensure you set the following version constraint in your " -"``requirements.txt``" +"containing relevant information including: log message level (e.g. " +"``INFO``, ``DEBUG``), a timestamp, the line where the logging took place " +"from, as well as the log message itself. In this way, the logger would " +"typically display information on your terminal as follows:" msgstr "" +"相关信息包括:日志信息级别(例如 " +":code:`INFO`、:code:`DEBUG`)、时间戳、日志记录的行以及日志信息本身。这样,日志记录器通常会在终端上显示如下信息:" -#: ../../source/how-to-upgrade-to-flower-next.rst:71 -msgid "or ``pyproject.toml``:" -msgstr "" +#: ../../source/how-to-configure-logging.rst:35 +msgid "Saving log to file" +msgstr "将日志保存到文件" -#: ../../source/how-to-upgrade-to-flower-next.rst:82 -#, fuzzy -msgid "Using Poetry" -msgstr "使用 pip" - -#: ../../source/how-to-upgrade-to-flower-next.rst:84 +#: ../../source/how-to-configure-logging.rst:37 #, fuzzy msgid "" -"Update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall " -"(don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` before " -"running ``poetry install``)." +"By default, the Flower log is outputted to the terminal where you launch " +"your Federated Learning workload from. This applies for both gRPC-based " +"federation (i.e. when you do ``fl.server.start_server``) and when using " +"the ``VirtualClientEngine`` (i.e. when you do " +"``fl.simulation.start_simulation``). In some situations you might want to" +" save this log to disk. You can do so by calling the " +"`fl.common.logger.configure() " +"`_" +" function. For example:" msgstr "" -"Poetry:更新 ``pyproject.toml`` 中的 ``flwr`` 依赖包,然后重新安装(运行 ``poetry install``" -" 前,别忘了通过 ``rm poetry.lock` 删除 ``poetry.lock`)。" +"默认情况下,Flower 日志会输出到启动联邦学习工作负载的终端。这既适用于基于 gRPC 的联邦学习(即执行 " +":code:`fl.server.start_server` 时),也适用于使用 :code:`VirtualClientEngine` " +"时(即执行 :code:`fl.simulation.start_simulation` " +"时)。在某些情况下,您可能希望将此日志保存到磁盘。为此,您可以调用 `fl.common.logger.configure() " +"`_" +" 函数。例如:" -#: ../../source/how-to-upgrade-to-flower-next.rst:86 +#: ../../source/how-to-configure-logging.rst:59 #, fuzzy msgid "" -"Ensure you set the following version constraint in your " -"``pyproject.toml``:" -msgstr "将 ``pyproject.toml`` 中的次要版本增加一个。" - -#: ../../source/how-to-upgrade-to-flower-next.rst:102 -msgid "" -"In Flower Next, the *infrastructure* and *application layers* have been " -"decoupled. Instead of starting a client in code via ``start_client()``, " -"you create a |clientapp_link|_ and start it via the command line. Instead" -" of starting a server in code via ``start_server()``, you create a " -"|serverapp_link|_ and start it via the command line. The long-running " -"components of server and client are called SuperLink and SuperNode. The " -"following non-breaking changes that require manual updates and allow you " -"to run your project both in the traditional way and in the Flower Next " -"way:" +"With the above, Flower will record the log you see on your terminal to " +"``log.txt``. This file will be created in the same directory as were you " +"are running the code from. If we inspect we see the log above is also " +"recorded but prefixing with ``identifier`` each line:" msgstr "" +"通过上述操作,Flower 会将您在终端上看到的日志记录到 " +":code:`log.txt`。该文件将创建在运行代码的同一目录下。如果我们检查一下,就会发现上面的日志也被记录了下来,但每一行都以 " +":code:`identifier` 作为前缀:" -#: ../../source/how-to-upgrade-to-flower-next.rst:109 -#, fuzzy -msgid "|clientapp_link|_" -msgstr "客户端" +#: ../../source/how-to-configure-logging.rst:81 +msgid "Log your own messages" +msgstr "记录自己的信息" -#: ../../source/how-to-upgrade-to-flower-next.rst:110 +#: ../../source/how-to-configure-logging.rst:83 msgid "" -"Wrap your existing client with |clientapp_link|_ instead of launching it " -"via |startclient_link|_. Here's an example:" -msgstr "" - -#: ../../source/how-to-upgrade-to-flower-next.rst:132 -#, fuzzy -msgid "|serverapp_link|_" -msgstr "服务器" +"You might expand the information shown by default with the Flower logger " +"by adding more messages relevant to your application. You can achieve " +"this easily as follows." +msgstr "您可以通过添加更多与应用程序相关的信息来扩展 Flower 日志记录器默认显示的信息。您可以通过以下方法轻松实现这一目标。" -#: ../../source/how-to-upgrade-to-flower-next.rst:133 +#: ../../source/how-to-configure-logging.rst:114 msgid "" -"Wrap your existing strategy with |serverapp_link|_ instead of starting " -"the server via |startserver_link|_. Here's an example:" -msgstr "" - -#: ../../source/how-to-upgrade-to-flower-next.rst:154 -msgid "Deployment" -msgstr "" +"In this way your logger will show, in addition to the default messages, " +"the ones introduced by the clients as specified above." +msgstr "这样,除默认信息外,您的日志记录器还将显示由客户引入的信息,如上文所述。" -#: ../../source/how-to-upgrade-to-flower-next.rst:155 -msgid "" -"Run the ``SuperLink`` using |flowernext_superlink_link|_ before running, " -"in sequence, |flowernext_clientapp_link|_ (2x) and " -"|flowernext_serverapp_link|_. There is no need to execute `client.py` and" -" `server.py` as Python scripts." -msgstr "" +#: ../../source/how-to-configure-logging.rst:140 +msgid "Log to a remote service" +msgstr "登录远程服务" -#: ../../source/how-to-upgrade-to-flower-next.rst:158 +#: ../../source/how-to-configure-logging.rst:142 +#, fuzzy msgid "" -"Here's an example to start the server without HTTPS (only for " -"prototyping):" +"The ``fl.common.logger.configure`` function, also allows specifying a " +"host to which logs can be pushed (via ``POST``) through a native Python " +"``logging.handler.HTTPHandler``. This is a particularly useful feature in" +" ``gRPC``-based Federated Learning workloads where otherwise gathering " +"logs from all entities (i.e. the server and the clients) might be " +"cumbersome. Note that in Flower simulation, the server automatically " +"displays all logs. You can still specify a ``HTTPHandler`` should you " +"wish to backup or analyze the logs somewhere else." msgstr "" +"此外,:code:`fl.common.logger.configure`函数还允许指定主机,通过本地 Python " +":code:`logging.handler.HTTPHandler`,向该主机推送日志(通过 :code:`POST`)。在基于 " +":code:`gRPC` 的联邦学习工作负载中,这是一个特别有用的功能,否则从所有实体(即服务器和客户端)收集日志可能会很麻烦。请注意,在 " +"Flower 模拟器中,服务器会自动显示所有日志。如果希望在其他地方备份或分析日志,仍可指定 :code:`HTTPHandler`。" -#: ../../source/how-to-upgrade-to-flower-next.rst:174 -msgid "" -"Here's another example to start with HTTPS. Use the ``--certificates`` " -"command line argument to pass paths to (CA certificate, server " -"certificate, and server private key)." -msgstr "" +#: ../../source/how-to-enable-ssl-connections.rst:2 +msgid "Enable SSL connections" +msgstr "启用 SSL 连接" -#: ../../source/how-to-upgrade-to-flower-next.rst:201 +#: ../../source/how-to-enable-ssl-connections.rst:4 #, fuzzy -msgid "Simulation in CLI" -msgstr "运行模拟" - -#: ../../source/how-to-upgrade-to-flower-next.rst:202 msgid "" -"Wrap your existing client and strategy with |clientapp_link|_ and " -"|serverapp_link|_, respectively. There is no need to use |startsim_link|_" -" anymore. Here's an example:" -msgstr "" +"This guide describes how to a SSL-enabled secure Flower server " +"(``SuperLink``) can be started and how a Flower client (``SuperNode``) " +"can establish a secure connections to it." +msgstr "本指南介绍如何启动启用 SSL 的安全 Flower 服务器,以及 Flower 客户端如何与其建立安全连接。" -#: ../../source/how-to-upgrade-to-flower-next.rst:232 +#: ../../source/how-to-enable-ssl-connections.rst:8 msgid "" -"Run |flower_simulation_link|_ in CLI and point to the ``server_app`` / " -"``client_app`` object in the code instead of executing the Python script." -" Here's an example (assuming the ``server_app`` and ``client_app`` " -"objects are in a ``sim.py`` module):" +"A complete code example demonstrating a secure connection can be found " +"`here `_." msgstr "" +"有关安全连接的完整代码示例,请参见 `_ 。" -#: ../../source/how-to-upgrade-to-flower-next.rst:249 +#: ../../source/how-to-enable-ssl-connections.rst:11 +#, fuzzy msgid "" -"Set default resources for each |clientapp_link|_ using the ``--backend-" -"config`` command line argument instead of setting the " -"``client_resources`` argument in |startsim_link|_. Here's an example:" -msgstr "" +"The code example comes with a ``README.md`` file which explains how to " +"start it. Although it is already SSL-enabled, it might be less " +"descriptive on how it does so. Stick to this guide for a deeper " +"introduction to the topic." +msgstr "代码示例附带的 README.md 文件将解释如何启动它。虽然它已经启用了 SSL,但对如何启用可能描述较少。请参考本指南,了解更深入的相关介绍。" -#: ../../source/how-to-upgrade-to-flower-next.rst:275 -msgid "Simulation in a Notebook" -msgstr "" +#: ../../source/how-to-enable-ssl-connections.rst:16 +msgid "Certificates" +msgstr "证书" -#: ../../source/how-to-upgrade-to-flower-next.rst:276 +#: ../../source/how-to-enable-ssl-connections.rst:18 +#, fuzzy msgid "" -"Run |runsim_link|_ in your notebook instead of |startsim_link|_. Here's " -"an example:" +"Using SSL-enabled connections requires certificates to be passed to the " +"server and client. For the purpose of this guide we are going to generate" +" self-signed certificates. As this can become quite complex we are going " +"to ask you to run the script in ``examples/advanced-" +"tensorflow/certificates/generate.sh`` with the following command " +"sequence:" msgstr "" +"使用支持 SSL 的连接需要向服务器和客户端传递证书。在本指南中,我们将生成自签名证书。由于这可能会变得相当复杂,我们将要求你运行 " +":code:`examples/advanced-tensorflow/certificates/generate.sh` 中的脚本" -#: ../../source/how-to-upgrade-to-flower-next.rst:319 +#: ../../source/how-to-enable-ssl-connections.rst:29 #, fuzzy msgid "" -"Some official `Flower code examples `_ " -"are already updated to Flower Next so they can serve as a reference for " -"using the Flower Next API. If there are further questions, `join the " -"Flower Slack `_ and use the channel " -"``#questions``. You can also `participate in Flower Discuss " -"`_ where you can find us answering questions," -" or share and learn from others about migrating to Flower Next." -msgstr "" -"大多数官方的 `Flower 代码示例 `_" -" 已经更新到 Flower 1.0,它们可以作为使用 Flower 1.0 API 的参考。如果还有其他问题,请加入 Flower Slack " -"`_ 并使用 \"#questions``\"。" +"This will generate the certificates in ``examples/advanced-" +"tensorflow/.cache/certificates``." +msgstr "这将在 :code:`examples/advanced-tensorflow/.cache/certificates` 中生成证书。" -#: ../../source/how-to-upgrade-to-flower-next.rst:325 +#: ../../source/how-to-enable-ssl-connections.rst:32 #, fuzzy -msgid "Important" -msgstr "重要变更:" - -#: ../../source/how-to-upgrade-to-flower-next.rst:328 msgid "" -"As we continuously enhance Flower Next at a rapid pace, we'll be " -"periodically updating this guide. Please feel free to share any feedback " -"with us!" -msgstr "" - -#: ../../source/how-to-upgrade-to-flower-next.rst:334 -msgid "Happy migrating! 🚀" -msgstr "" +"The approach for generating SSL certificates in the context of this " +"example can serve as an inspiration and starting point, but it should not" +" be used as a reference for production environments. Please refer to " +"other sources regarding the issue of correctly generating certificates " +"for production environments. For non-critical prototyping or research " +"projects, it might be sufficient to use the self-signed certificates " +"generated using the scripts mentioned in this guide." +msgstr "本示例中生成 SSL 证书的方法可作为启发和起点,但不应被视为生产环境的完整方法。有关在生产环境中正确生成证书的问题,请参考其他资料。" -#: ../../source/how-to-use-built-in-mods.rst:2 +#: ../../source/how-to-enable-ssl-connections.rst:40 #, fuzzy -msgid "Use Built-in Mods" -msgstr "使用内置调制器" +msgid "Server (SuperLink)" +msgstr "flower-superlink" -#: ../../source/how-to-use-built-in-mods.rst:4 +#: ../../source/how-to-enable-ssl-connections.rst:42 #, fuzzy msgid "" -"**Note: This tutorial covers experimental features. The functionality and" -" interfaces may change in future versions.**" -msgstr "**注:本教程涵盖实验性功能。功能和界面可能会在未来版本中发生变化。" +"Use the following terminal command to start a sever (SuperLink) that uses" +" the previously generated certificates:" +msgstr "现在我们将演示如何编写一个客户端,使用之前生成的脚本:" -#: ../../source/how-to-use-built-in-mods.rst:6 +#: ../../source/how-to-enable-ssl-connections.rst:52 #, fuzzy msgid "" -"In this tutorial, we will learn how to utilize built-in mods to augment " -"the behavior of a ``ClientApp``. Mods (sometimes also called Modifiers) " -"allow us to perform operations before and after a task is processed in " -"the ``ClientApp``." -msgstr "" -"在本教程中,我们将学习如何利用内置模块来增强 ``ClientApp`` 的行为。修改器(有时也称为修改器)允许我们在 ``ClientApp``" -" 处理任务之前和之后执行操作。" +"When providing certificates, the server expects a tuple of three " +"certificates paths: CA certificate, server certificate and server private" +" key." +msgstr "要启用 SSL,需要 CA 证书、服务器证书和服务器私钥。" -#: ../../source/how-to-use-built-in-mods.rst:9 +#: ../../source/how-to-enable-ssl-connections.rst:56 #, fuzzy -msgid "What are Mods?" -msgstr "什么是 Mods?" +msgid "Client (SuperNode)" +msgstr "客户端状态代码。" -#: ../../source/how-to-use-built-in-mods.rst:11 +#: ../../source/how-to-enable-ssl-connections.rst:58 #, fuzzy msgid "" -"A Mod is a callable that wraps around a ``ClientApp``. It can manipulate " -"or inspect the incoming ``Message`` and the resulting outgoing " -"``Message``. The signature for a ``Mod`` is as follows:" -msgstr "" -"Mod 是包裹在 ``ClientApp`` 周围的可调用程序。它可以操作或检查传入的 ``Message`` 和由此产生的传出的 " -"``Message`` 。一个 ``Mod`` 的签名如下:" +"Use the following terminal command to start a client (SuperNode) that " +"uses the previously generated certificates:" +msgstr "现在我们将演示如何编写一个客户端,使用之前生成的脚本:" -#: ../../source/how-to-use-built-in-mods.rst:18 +#: ../../source/how-to-enable-ssl-connections.rst:67 #, fuzzy -msgid "A typical mod function might look something like this:" -msgstr "一个典型的修改函数可能是这样的:" +msgid "" +"When setting ``root_certificates``, the client expects a file path to " +"PEM-encoded root certificates." +msgstr "" +"当设置 :code:`root_certificates` 时,客户端希望 PEM 编码的根证书是字节字符串。我们再次使用 " +":code:`Path` 来简化以字节字符串形式读取证书的过程。" -#: ../../source/how-to-use-built-in-mods.rst:31 +#: ../../source/how-to-enable-ssl-connections.rst:73 #, fuzzy -msgid "Using Mods" -msgstr "使用修改器" +msgid "" +"You should now have learned how to generate self-signed certificates " +"using the given script, start an SSL-enabled server and have a client " +"establish a secure connection to it." +msgstr "现在,你应该已经学会了如何使用给定的脚本生成自签名证书、启动启用 SSL 的服务器并让客户端与其建立安全连接。" -#: ../../source/how-to-use-built-in-mods.rst:33 -#, fuzzy -msgid "To use mods in your ``ClientApp``, you can follow these steps:" -msgstr "要在您的 ``ClientApp`` 中使用 mod,可以按照以下步骤操作:" +#: ../../source/how-to-enable-ssl-connections.rst:78 +msgid "Additional resources" +msgstr "补充资源" -#: ../../source/how-to-use-built-in-mods.rst:36 -#, fuzzy -msgid "1. Import the required mods" -msgstr "1. 导入所需修改" +#: ../../source/how-to-enable-ssl-connections.rst:80 +msgid "" +"These additional sources might be relevant if you would like to dive " +"deeper into the topic of certificates:" +msgstr "如果您想更深入地了解证书主题,这些额外的资料来源可能有帮助:" -#: ../../source/how-to-use-built-in-mods.rst:38 -#, fuzzy -msgid "First, import the built-in mod you intend to use:" -msgstr "首先,导入您打算使用的内置模式:" +#: ../../source/how-to-enable-ssl-connections.rst:83 +msgid "`Let's Encrypt `_" +msgstr "`让我们加密 `_" -#: ../../source/how-to-use-built-in-mods.rst:46 -#, fuzzy -msgid "2. Define your client function" -msgstr "2. 定义客户功能" +#: ../../source/how-to-enable-ssl-connections.rst:84 +msgid "`certbot `_" +msgstr "`certbot `_" -#: ../../source/how-to-use-built-in-mods.rst:48 -#, fuzzy +#: ../../source/how-to-implement-strategies.rst:2 +msgid "Implement strategies" +msgstr "实施策略" + +#: ../../source/how-to-implement-strategies.rst:4 msgid "" -"Define your client function (``client_fn``) that will be wrapped by the " -"mod(s):" -msgstr "定义将被 mod 封装的客户端函数(``client_fn``):" +"The strategy abstraction enables implementation of fully custom " +"strategies. A strategy is basically the federated learning algorithm that" +" runs on the server. Strategies decide how to sample clients, how to " +"configure clients for training, how to aggregate updates, and how to " +"evaluate models. Flower provides a few built-in strategies which are " +"based on the same API described below." +msgstr "" +"策略抽象类可以实现完全定制的策略。策略基本上就是在服务器上运行的联邦学习算法。策略决定如何对客户端进行采样、如何配置客户端进行训练、如何聚合参数更新以及如何评估模型。Flower" +" 提供了一些内置策略,这些策略基于下文所述的相同 API。" -#: ../../source/how-to-use-built-in-mods.rst:57 +#: ../../source/how-to-implement-strategies.rst:11 #, fuzzy -msgid "3. Create the ``ClientApp`` with mods" -msgstr "3. 用模块创建 ``ClientApp``" +msgid "The ``Strategy`` abstraction" +msgstr ":code:`策略 ` 抽象类" -#: ../../source/how-to-use-built-in-mods.rst:59 +#: ../../source/how-to-implement-strategies.rst:13 #, fuzzy msgid "" -"Create your ``ClientApp`` and pass the mods as a list to the ``mods`` " -"argument. The order in which you provide the mods matters:" -msgstr "创建您的 ``ClientApp`` 并将 mods 作为列表传递给 ``mods`` 参数。提供 mod 的顺序很重要:" +"All strategy implementation are derived from the abstract base class " +"``flwr.server.strategy.Strategy``, both built-in implementations and " +"third party implementations. This means that custom strategy " +"implementations have the exact same capabilities at their disposal as " +"built-in ones." +msgstr "" +"所有策略实现均源自抽象基类 " +":code:`flwr.server.strategy.Strategy`,包括内置实现和第三方实现。这意味着自定义策略实现与内置实现具有完全相同的功能。" -#: ../../source/how-to-use-built-in-mods.rst:72 -#, fuzzy -msgid "Order of execution" -msgstr "停用" +#: ../../source/how-to-implement-strategies.rst:18 +msgid "" +"The strategy abstraction defines a few abstract methods that need to be " +"implemented:" +msgstr "策略抽象定义了一些需要实现的抽象方法:" -#: ../../source/how-to-use-built-in-mods.rst:74 +#: ../../source/how-to-implement-strategies.rst:67 #, fuzzy msgid "" -"When the ``ClientApp`` runs, the mods are executed in the order they are " -"provided in the list:" -msgstr "当运行 ``ClientApp`` 时,会按照列表中提供的顺序执行模块:" +"Creating a new strategy means implementing a new ``class`` (derived from " +"the abstract base class ``Strategy``) that implements for the previously " +"shown abstract methods:" +msgstr "创建一个新策略意味着要实现一个新的 :code:`class`(从抽象基类 :code:`Strategy` 派生),该类要实现前面显示的抽象方法:" + +#: ../../source/how-to-implement-strategies.rst:97 +msgid "The Flower server calls these methods in the following order:" +msgstr "Flower 服务器按以下顺序调用这些方法:" + +#: ../../source/how-to-implement-strategies.rst:174 +msgid "The following sections describe each of those methods in more detail." +msgstr "下文将详细介绍每种方法。" -#: ../../source/how-to-use-built-in-mods.rst:76 +#: ../../source/how-to-implement-strategies.rst:177 #, fuzzy -msgid "``example_mod_1`` (outermost mod)" -msgstr "``example_mod_1`` (最外层模块)" +msgid "The ``initialize_parameters`` method" +msgstr ":code:`初始化参数` 方法" -#: ../../source/how-to-use-built-in-mods.rst:77 +#: ../../source/how-to-implement-strategies.rst:179 #, fuzzy -msgid "``example_mod_2`` (next mod)" -msgstr "示例模式 2(下一个模式)" +msgid "" +"``initialize_parameters`` is called only once, at the very beginning of " +"an execution. It is responsible for providing the initial global model " +"parameters in a serialized form (i.e., as a ``Parameters`` object)." +msgstr "" +":code:`initialize_parameters` 只调用一次,即在执行开始时。它负责以序列化形式(即 " +":code:`Parameters` 对象)提供初始全局模型参数。" -#: ../../source/how-to-use-built-in-mods.rst:78 +#: ../../source/how-to-implement-strategies.rst:183 #, fuzzy msgid "" -"Message handler (core function that handles the incoming ``Message`` and " -"returns the outgoing ``Message``)" -msgstr "消息处理程序(处理传入的 \"消息 \"并返回传出的 \"消息 \"的核心函数)" +"Built-in strategies return user-provided initial parameters. The " +"following example shows how initial parameters can be passed to " +"``FedAvg``:" +msgstr "内置策略会返回用户提供的初始参数。下面的示例展示了如何将初始参数传递给 :code:`FedAvg`:" -#: ../../source/how-to-use-built-in-mods.rst:79 +#: ../../source/how-to-implement-strategies.rst:209 #, fuzzy -msgid "``example_mod_2`` (on the way back)" -msgstr "``example_mod_2`` (返回途中)" +msgid "" +"The Flower server will call ``initialize_parameters``, which either " +"returns the parameters that were passed to ``initial_parameters``, or " +"``None``. If no parameters are returned from ``initialize_parameters`` " +"(i.e., ``None``), the server will randomly select one client and ask it " +"to provide its parameters. This is a convenience feature and not " +"recommended in practice, but it can be useful for prototyping. In " +"practice, it is recommended to always use server-side parameter " +"initialization." +msgstr "" +"Flower 服务器将调用 :code:`initialize_parameters`,返回传给 " +":code:`initial_parameters` 的参数或 :code:`None`。如果 " +":code:`initialize_parameters` 没有返回任何参数(即 " +":code:`None`),服务器将随机选择一个客户端并要求其提供参数。这只是一个便捷的功能,在实际应用中并不推荐使用,但在原型开发中可能很有用。在实践中,建议始终使用服务器端参数初始化。" -#: ../../source/how-to-use-built-in-mods.rst:80 +#: ../../source/how-to-implement-strategies.rst:218 +msgid "" +"Server-side parameter initialization is a powerful mechanism. It can be " +"used, for example, to resume training from a previously saved checkpoint." +" It is also the fundamental capability needed to implement hybrid " +"approaches, for example, to fine-tune a pre-trained model using federated" +" learning." +msgstr "服务器端参数初始化是一种强大的机制。例如,它可以用来从先前保存的检查点恢复训练。它也是实现混合方法所需的基本能力,例如,使用联邦学习对预先训练好的模型进行微调。" + +#: ../../source/how-to-implement-strategies.rst:224 #, fuzzy -msgid "``example_mod_1`` (outermost mod on the way back)" -msgstr "``example_mod_1`` (返回途中最外层的模式)" +msgid "The ``configure_fit`` method" +msgstr ":code:`configure_fit`方法" -#: ../../source/how-to-use-built-in-mods.rst:82 +#: ../../source/how-to-implement-strategies.rst:226 #, fuzzy msgid "" -"Each mod has a chance to inspect and modify the incoming ``Message`` " -"before passing it to the next mod, and likewise with the outgoing " -"``Message`` before returning it up the stack." -msgstr "每个模块都有机会检查和修改传入的 \"信息\",然后再将其传递给下一个模块,同样,也有机会检查和修改传出的 \"信息\",然后再将其返回堆栈。" +"``configure_fit`` is responsible for configuring the upcoming round of " +"training. What does *configure* mean in this context? Configuring a round" +" means selecting clients and deciding what instructions to send to these " +"clients. The signature of ``configure_fit`` makes this clear:" +msgstr "" +":code:`configure_fit` " +"负责配置即将开始的一轮训练。*配置*在这里是什么意思?配置一轮训练意味着选择客户并决定向这些客户发送什么指令。:code:`configure_fit`" +" 说明了这一点:" -#: ../../source/how-to-use-built-in-mods.rst:87 +#: ../../source/how-to-implement-strategies.rst:239 #, fuzzy msgid "" -"By following this guide, you have learned how to effectively use mods to " -"enhance your ``ClientApp``'s functionality. Remember that the order of " -"mods is crucial and affects how the input and output are processed." -msgstr "" -"通过本指南,您已学会如何有效地使用 mod 来增强您的 ``ClientApp`` 的功能。请记住,mod " -"的顺序至关重要,它会影响输入和输出的处理方式。" +"The return value is a list of tuples, each representing the instructions " +"that will be sent to a particular client. Strategy implementations " +"usually perform the following steps in ``configure_fit``:" +msgstr "返回值是一个元组列表,每个元组代表将发送到特定客户端的指令。策略实现通常在 :code:`configure_fit` 中执行以下步骤:" -#: ../../source/how-to-use-built-in-mods.rst:89 +#: ../../source/how-to-implement-strategies.rst:243 +#: ../../source/how-to-implement-strategies.rst:307 #, fuzzy -msgid "Enjoy building a more robust and flexible ``ClientApp`` with mods!" -msgstr "使用 mods 构建更强大、更灵活的 \"客户端应用程序\"!" +msgid "" +"Use the ``client_manager`` to randomly sample all (or a subset of) " +"available clients (each represented as a ``ClientProxy`` object)" +msgstr "" +"使用 :code:`client_manager` 随机抽样所有(或部分)可用客户端(每个客户端都表示为 :code:`ClientProxy` " +"对象)" -#: ../../source/how-to-use-differential-privacy.rst:2 +#: ../../source/how-to-implement-strategies.rst:245 #, fuzzy -msgid "Use Differential Privacy" -msgstr "差分隐私" +msgid "" +"Pair each ``ClientProxy`` with the same ``FitIns`` holding the current " +"global model ``parameters`` and ``config`` dict" +msgstr "" +"将每个 :code:`ClientProxy` 与持有当前全局模型 :code:`parameters` 和 :code:`config` " +"dict 的 :code:`FitIns` 配对" -#: ../../source/how-to-use-differential-privacy.rst:3 +#: ../../source/how-to-implement-strategies.rst:248 #, fuzzy msgid "" -"This guide explains how you can utilize differential privacy in the " -"Flower framework. If you are not yet familiar with differential privacy, " -"you can refer to :doc:`explanation-differential-privacy`." +"More sophisticated implementations can use ``configure_fit`` to implement" +" custom client selection logic. A client will only participate in a round" +" if the corresponding ``ClientProxy`` is included in the list returned " +"from ``configure_fit``." msgstr "" -"本指南解释了如何在 Flower 框架中使用差分隐私。如果您还不熟悉差分隐私,可以参考 :doc:`explanation-" -"differential-privacy` 。" +"更复杂的实现可以使用 :code:`configure_fit` 来实现自定义的客户端选择逻辑。只有当相应的 " +":code:`ClientProxy` 包含在 :code:`configure_fit` 返回的列表中时,客户端才会参与进来。" -#: ../../source/how-to-use-differential-privacy.rst:7 +#: ../../source/how-to-implement-strategies.rst:254 #, fuzzy msgid "" -"Differential Privacy in Flower is in a preview phase. If you plan to use " -"these features in a production environment with sensitive data, feel free" -" contact us to discuss your requirements and to receive guidance on how " -"to best use these features." +"The structure of this return value provides a lot of flexibility to the " +"user. Since instructions are defined on a per-client basis, different " +"instructions can be sent to each client. This enables custom strategies " +"to train, for example, different models on different clients, or use " +"different hyperparameters on different clients (via the ``config`` dict)." msgstr "" -"Flower " -"中的差异隐私处于预览阶段。如果您计划在生产环境中使用这些敏感数据功能,请随时联系我们,讨论您的需求,并获得如何最好地使用这些功能的指导。" +"该返回值的结构为用户提供了很大的灵活性。由于指令是按客户端定义的,因此可以向每个客户端发送不同的指令。这使得自定义策略成为可能,例如在不同的客户端上训练不同的模型,或在不同的客户端上使用不同的超参数(通过" +" :code:`config` dict)。" + +#: ../../source/how-to-implement-strategies.rst:261 +#, fuzzy +msgid "The ``aggregate_fit`` method" +msgstr ":code:`aggregate_fit` 方法" -#: ../../source/how-to-use-differential-privacy.rst:12 +#: ../../source/how-to-implement-strategies.rst:263 #, fuzzy msgid "" -"This approach consists of two seprate phases: clipping of the updates and" -" adding noise to the aggregated model. For the clipping phase, Flower " -"framework has made it possible to decide whether to perform clipping on " -"the server side or the client side." -msgstr "这种方法包括两个独立的阶段:对更新进行剪切和在聚合模型中添加噪声。在剪切阶段,Flower 框架可以决定是在服务器端还是在客户端执行剪切。" +"``aggregate_fit`` is responsible for aggregating the results returned by " +"the clients that were selected and asked to train in ``configure_fit``." +msgstr ":code:`aggregate_fit` 负责汇总在 :code:`configure_fit` 中选择并要求训练的客户端所返回的结果。" -#: ../../source/how-to-use-differential-privacy.rst:15 +#: ../../source/how-to-implement-strategies.rst:277 #, fuzzy msgid "" -"**Server-side Clipping**: This approach has the advantage of the server " -"enforcing uniform clipping across all clients' updates and reducing the " -"communication overhead for clipping values. However, it also has the " -"disadvantage of increasing the computational load on the server due to " -"the need to perform the clipping operation for all clients." +"Of course, failures can happen, so there is no guarantee that the server " +"will get results from all the clients it sent instructions to (via " +"``configure_fit``). ``aggregate_fit`` therefore receives a list of " +"``results``, but also a list of ``failures``." msgstr "" -"** 服务器端剪切**: " -"这种方法的优点是服务器可对所有客户端的更新执行统一的剪切,并减少剪切值的通信开销。不过,这种方法也有缺点,那就是需要为所有客户端执行剪切操作,从而增加了服务器的计算负荷。" +"当然,失败是有可能发生的,因此无法保证服务器会从它发送指令(通过 :code:`configure_fit`)的所有客户端获得结果。因此 " +":code:`aggregate_fit` 会收到 :code:`results` 的列表,但也会收到 :code:`failures` 的列表。" -#: ../../source/how-to-use-differential-privacy.rst:16 +#: ../../source/how-to-implement-strategies.rst:282 #, fuzzy msgid "" -"**Client-side Clipping**: This approach has the advantage of reducing the" -" computational overhead on the server. However, it also has the " -"disadvantage of lacking centralized control, as the server has less " -"control over the clipping process." -msgstr "**客户端剪切**: 这种方法的优点是可以减少服务器的计算开销。不过,它也有缺乏集中控制的缺点,因为服务器对剪切过程的控制较少。" +"``aggregate_fit`` returns an optional ``Parameters`` object and a " +"dictionary of aggregated metrics. The ``Parameters`` return value is " +"optional because ``aggregate_fit`` might decide that the results provided" +" are not sufficient for aggregation (e.g., too many failures)." +msgstr "" +":code:`aggregate_fit` 返回一个可选的 :code:`Parameters` " +"对象和一个聚合度量的字典。:code:`Parameters` 返回值是可选的,因为 :code:`aggregate_fit` " +"可能会认为所提供的结果不足以进行聚合(例如,失败次数过多)。" -#: ../../source/how-to-use-differential-privacy.rst:21 +#: ../../source/how-to-implement-strategies.rst:288 #, fuzzy -msgid "Server-side Clipping" -msgstr "服务器端逻辑" +msgid "The ``configure_evaluate`` method" +msgstr ":code:`configure_evaluate`方法" -#: ../../source/how-to-use-differential-privacy.rst:22 +#: ../../source/how-to-implement-strategies.rst:290 #, fuzzy msgid "" -"For central DP with server-side clipping, there are two :code:`Strategy` " -"classes that act as wrappers around the actual :code:`Strategy` instance " -"(for example, :code:`FedAvg`). The two wrapper classes are " -":code:`DifferentialPrivacyServerSideFixedClipping` and " -":code:`DifferentialPrivacyServerSideAdaptiveClipping` for fixed and " -"adaptive clipping." +"``configure_evaluate`` is responsible for configuring the upcoming round " +"of evaluation. What does *configure* mean in this context? Configuring a " +"round means selecting clients and deciding what instructions to send to " +"these clients. The signature of ``configure_evaluate`` makes this clear:" msgstr "" -"对于具有服务器端剪裁功能的中央 DP,有两个 :code:`Strategy` 类作为实际 :code:`Strategy` 实例(例如 " -":code:`FedAvg`)的包装器。这两个封装类分别是 " -":code:`DifferentialPrivacyServerSideFixedClipping` 和 " -":code:`DifferentialPrivacyServerSideAdaptiveClipping` ,用于固定剪辑和自适应剪辑。" +":code:`configure_evaluate` " +"负责配置下一轮评估。*配置*在这里是什么意思?配置一轮评估意味着选择客户端并决定向这些客户端发送什么指令。:code:`configure_evaluate`" +" 说明了这一点:" -#: ../../source/how-to-use-differential-privacy.rst:-1 +#: ../../source/how-to-implement-strategies.rst:303 #, fuzzy -msgid "server side clipping" -msgstr "服务器端逻辑" +msgid "" +"The return value is a list of tuples, each representing the instructions " +"that will be sent to a particular client. Strategy implementations " +"usually perform the following steps in ``configure_evaluate``:" +msgstr "返回值是一个元组列表,每个元组代表将发送到特定客户端的指令。策略实现通常在 :code:`configure_evaluate` 中执行以下步骤:" -#: ../../source/how-to-use-differential-privacy.rst:31 +#: ../../source/how-to-implement-strategies.rst:309 #, fuzzy msgid "" -"The code sample below enables the :code:`FedAvg` strategy to use server-" -"side fixed clipping using the " -":code:`DifferentialPrivacyServerSideFixedClipping` wrapper class. The " -"same approach can be used with " -":code:`DifferentialPrivacyServerSideAdaptiveClipping` by adjusting the " -"corresponding input parameters." +"Pair each ``ClientProxy`` with the same ``EvaluateIns`` holding the " +"current global model ``parameters`` and ``config`` dict" msgstr "" -"下面的代码示例使用 :code:`DifferentialPrivacyServerSideFixedClipping` 封装类使 " -":code:`FedAvg` 策略使用服务器端固定剪辑。通过调整相应的输入参数,同样的方法也可用于 " -":code:`DifferentialPrivacyServerSideAdaptiveClipping`。" +"将每个 :code:`ClientProxy` 与持有当前全局模型 :code:`parameters` 和 :code:`config` " +"dict 的 :code:`EvaluateIns` 配对" -#: ../../source/how-to-use-differential-privacy.rst:52 +#: ../../source/how-to-implement-strategies.rst:312 #, fuzzy -msgid "Client-side Clipping" -msgstr "客户端逻辑" +msgid "" +"More sophisticated implementations can use ``configure_evaluate`` to " +"implement custom client selection logic. A client will only participate " +"in a round if the corresponding ``ClientProxy`` is included in the list " +"returned from ``configure_evaluate``." +msgstr "" +"更复杂的实现可以使用 :code:`configure_evaluate` 来实现自定义的客户端选择逻辑。只有当相应的 " +":code:`ClientProxy` 包含在 :code:`configure_evaluate` 返回的列表中时,客户端才会参与进来。" -#: ../../source/how-to-use-differential-privacy.rst:53 +#: ../../source/how-to-implement-strategies.rst:318 #, fuzzy msgid "" -"For central DP with client-side clipping, the server sends the clipping " -"value to selected clients on each round. Clients can use existing Flower " -":code:`Mods` to perform the clipping. Two mods are available for fixed " -"and adaptive client-side clipping: :code:`fixedclipping_mod` and " -":code:`adaptiveclipping_mod` with corresponding server-side wrappers " -":code:`DifferentialPrivacyClientSideFixedClipping` and " -":code:`DifferentialPrivacyClientSideAdaptiveClipping`." +"The structure of this return value provides a lot of flexibility to the " +"user. Since instructions are defined on a per-client basis, different " +"instructions can be sent to each client. This enables custom strategies " +"to evaluate, for example, different models on different clients, or use " +"different hyperparameters on different clients (via the ``config`` dict)." msgstr "" -"对于带有客户端剪裁功能的中央 DP,服务器会在每一轮向选定的客户端发送剪裁值。客户端可以使用现有的 Flower " -":code:`Mods`来执行剪裁。有两种模式可用于固定和自适应客户端剪辑::code:`fixedclipping_mod` 和 " -":code:`adaptiveclipping_mod`,以及相应的服务器端封装 " -":code:`DifferentialPrivacyClientSideFixedClipping` 和 " -":code:`DifferentialPrivacyClientSideAdaptiveClipping`。" +"该返回值的结构为用户提供了很大的灵活性。由于指令是按客户端定义的,因此可以向每个客户端发送不同的指令。这使得自定义策略可以在不同客户端上评估不同的模型,或在不同客户端上使用不同的超参数(通过" +" :code:`config` dict)。" -#: ../../source/how-to-use-differential-privacy.rst:-1 +#: ../../source/how-to-implement-strategies.rst:325 #, fuzzy -msgid "client side clipping" -msgstr "客户端逻辑" +msgid "The ``aggregate_evaluate`` method" +msgstr ":code:`aggregate_evaluate` 方法" -#: ../../source/how-to-use-differential-privacy.rst:63 +#: ../../source/how-to-implement-strategies.rst:327 #, fuzzy msgid "" -"The code sample below enables the :code:`FedAvg` strategy to use " -"differential privacy with client-side fixed clipping using both the " -":code:`DifferentialPrivacyClientSideFixedClipping` wrapper class and, on " -"the client, :code:`fixedclipping_mod`:" +"``aggregate_evaluate`` is responsible for aggregating the results " +"returned by the clients that were selected and asked to evaluate in " +"``configure_evaluate``." msgstr "" -"下面的代码示例使用 :code:`DifferentialPrivacyClientSideFixedClipping` 封装类和客户端的 " -":code:`fixedclipping_mod` 使 :code:`FedAvg` 策略在客户端固定剪辑的情况下使用差分隐私:" +":code:`aggregate_evaluate` 负责汇总在 :code:`configure_evaluate` " +"中选择并要求评估的客户端返回的结果。" -#: ../../source/how-to-use-differential-privacy.rst:80 +#: ../../source/how-to-implement-strategies.rst:341 #, fuzzy msgid "" -"In addition to the server-side strategy wrapper, the :code:`ClientApp` " -"needs to configure the matching :code:`fixedclipping_mod` to perform the " -"client-side clipping:" +"Of course, failures can happen, so there is no guarantee that the server " +"will get results from all the clients it sent instructions to (via " +"``configure_evaluate``). ``aggregate_evaluate`` therefore receives a list" +" of ``results``, but also a list of ``failures``." msgstr "" -"除了服务器端策略包装器外,:code:`ClientApp` 还需要配置匹配的 :code:`fixedclipping_mod` " -"以执行客户端剪切:" +"当然,失败是有可能发生的,因此无法保证服务器会从它发送指令(通过 " +":code:`configure_evaluate`)的所有客户端获得结果。因此, :code:`aggregate_evaluate` 会接收 " +":code:`results` 的列表,但也会接收 :code:`failures` 的列表。" -#: ../../source/how-to-use-differential-privacy.rst:97 +#: ../../source/how-to-implement-strategies.rst:346 #, fuzzy msgid "" -"To utilize local differential privacy (DP) and add noise to the client " -"model parameters before transmitting them to the server in Flower, you " -"can use the `LocalDpMod`. The following hyperparameters need to be set: " -"clipping norm value, sensitivity, epsilon, and delta." +"``aggregate_evaluate`` returns an optional ``float`` (loss) and a " +"dictionary of aggregated metrics. The ``float`` return value is optional " +"because ``aggregate_evaluate`` might decide that the results provided are" +" not sufficient for aggregation (e.g., too many failures)." msgstr "" -"要利用本地差分隐私(DP)并在将客户端模型参数传输到 Flower 服务器之前为其添加噪声,可以使用 " -"`LocalDpMod`。需要设置以下超参数:剪切规范值、灵敏度、ε 和 delta。" +":code:`aggregate_evaluate` 返回一个可选的 " +":code:`float`(损失值)和一个聚合指标字典。:code:`float` 返回值是可选的,因为 " +":code:`aggregate_evaluate` 可能会认为所提供的结果不足以进行聚合(例如,失败次数过多)。" -#: ../../source/how-to-use-differential-privacy.rst:-1 +#: ../../source/how-to-implement-strategies.rst:352 #, fuzzy -msgid "local DP mod" -msgstr "本地 DP 模式" +msgid "The ``evaluate`` method" +msgstr ":code:`evaluate`方法" -#: ../../source/how-to-use-differential-privacy.rst:104 +#: ../../source/how-to-implement-strategies.rst:354 #, fuzzy -msgid "Below is a code example that shows how to use :code:`LocalDpMod`:" -msgstr "下面的代码示例展示了如何使用 :code:`LocalDpMod`:" +msgid "" +"``evaluate`` is responsible for evaluating model parameters on the " +"server-side. Having ``evaluate`` in addition to " +"``configure_evaluate``/``aggregate_evaluate`` enables strategies to " +"perform both servers-side and client-side (federated) evaluation." +msgstr "" +":code:`evaluate` 负责在服务器端评估模型参数。除了 " +":code:`configure_evaluate`/:code:`aggregate_evaluate` 之外,:code:`evaluate`" +" 可以使策略同时执行服务器端和客户端(联邦)评估。" -#: ../../source/how-to-use-differential-privacy.rst:122 +#: ../../source/how-to-implement-strategies.rst:364 #, fuzzy msgid "" -"Please note that the order of mods, especially those that modify " -"parameters, is important when using multiple modifiers. Typically, " -"differential privacy (DP) modifiers should be the last to operate on " -"parameters." -msgstr "请注意,在使用多个修改器时,修改器(尤其是修改参数的修改器)的顺序非常重要。通常情况下,差分隐私 (DP) 修改器应最后对参数进行操作。" +"The return value is again optional because the strategy might not need to" +" implement server-side evaluation or because the user-defined " +"``evaluate`` method might not complete successfully (e.g., it might fail " +"to load the server-side evaluation data)." +msgstr "" +"返回值也是可选的,因为策略可能不需要执行服务器端评估,或者因为用户定义的 :code:`evaluate` " +"方法可能无法成功完成(例如,它可能无法加载服务器端评估数据)。" -#: ../../source/how-to-use-differential-privacy.rst:125 +#: ../../source/how-to-install-flower.rst:2 +msgid "Install Flower" +msgstr "安装Flower" + +#: ../../source/how-to-install-flower.rst:5 +msgid "Python version" +msgstr "Python 版本" + +#: ../../source/how-to-install-flower.rst:11 +msgid "Install stable release" +msgstr "安装稳定版" + +#: ../../source/how-to-install-flower.rst:14 +#: ../../source/how-to-upgrade-to-flower-next.rst:66 #, fuzzy -msgid "Local Training using Privacy Engines" -msgstr "使用隐私引擎进行本地培训" +msgid "Using pip" +msgstr "使用 pip" + +#: ../../source/how-to-install-flower.rst:16 +#, fuzzy +msgid "Stable releases are available on `PyPI `_:" +msgstr "稳定版本可在 `PyPI `_::" -#: ../../source/how-to-use-differential-privacy.rst:126 +#: ../../source/how-to-install-flower.rst:22 #, fuzzy msgid "" -"For ensuring data instance-level privacy during local model training on " -"the client side, consider leveraging privacy engines such as Opacus and " -"TensorFlow Privacy. For examples of using Flower with these engines, " -"please refer to the Flower examples directory (`Opacus " -"`_, `Tensorflow" -" Privacy `_)." -msgstr "" -"要在客户端本地模型训练期间确保数据实例级隐私,可考虑利用 Opacus 和 TensorFlow Privacy 等隐私引擎。有关将 Flower" -" 与这些引擎结合使用的示例,请参阅 Flower 示例目录(`Opacus " -"`_, `Tensorflow" -" Privacy `_)。" +"For simulations that use the Virtual Client Engine, ``flwr`` should be " +"installed with the ``simulation`` extra:" +msgstr "对于使用虚拟客户端引擎的模拟,`flwr`` 应与`simulation`` 一起安装:" -#: ../../source/how-to-use-strategies.rst:2 -msgid "Use strategies" -msgstr "使用策略" +#: ../../source/how-to-install-flower.rst:30 +#, fuzzy +msgid "Using conda (or mamba)" +msgstr "使用 conda(或 mamba)" -#: ../../source/how-to-use-strategies.rst:4 +#: ../../source/how-to-install-flower.rst:32 +#, fuzzy +msgid "Flower can also be installed from the ``conda-forge`` channel." +msgstr "Flower 也可以从 ``conda-forge`` 频道安装。" + +#: ../../source/how-to-install-flower.rst:34 +#, fuzzy msgid "" -"Flower allows full customization of the learning process through the " -":code:`Strategy` abstraction. A number of built-in strategies are " -"provided in the core framework." -msgstr "Flower 允许通过 :code:`Strategy` 抽象类对学习过程进行完全定制。核心框架中提供了许多内置策略。" +"If you have not added ``conda-forge`` to your channels, you will first " +"need to run the following:" +msgstr "如果您尚未在频道中添加 ``conda-forge``,则首先需要运行以下程序::" -#: ../../source/how-to-use-strategies.rst:6 +#: ../../source/how-to-install-flower.rst:42 +#, fuzzy msgid "" -"There are three ways to customize the way Flower orchestrates the " -"learning process on the server side:" -msgstr "有三种方法可以自定义 Flower 在服务器端协调学习过程的方式:" +"Once the ``conda-forge`` channel has been enabled, ``flwr`` can be " +"installed with ``conda``:" +msgstr "一旦启用了 ``conda-forge`` 频道,就可以使用 ``conda``: 安装 ``flwr``:" -#: ../../source/how-to-use-strategies.rst:8 -msgid "Use an existing strategy, for example, :code:`FedAvg`" -msgstr "使用现有策略,例如 :code:`FedAvg`" +#: ../../source/how-to-install-flower.rst:49 +#, fuzzy +msgid "or with ``mamba``:" +msgstr "或用 ``mamba`` ::" -#: ../../source/how-to-use-strategies.rst:9 -#: ../../source/how-to-use-strategies.rst:40 -msgid "Customize an existing strategy with callback functions" -msgstr "使用回调函数定制现有策略" +#: ../../source/how-to-install-flower.rst:56 +msgid "Verify installation" +msgstr "验证安装" -#: ../../source/how-to-use-strategies.rst:10 -#: ../../source/how-to-use-strategies.rst:87 -msgid "Implement a novel strategy" -msgstr "实施新策略" +#: ../../source/how-to-install-flower.rst:58 +#, fuzzy +msgid "" +"The following command can be used to verify if Flower was successfully " +"installed. If everything worked, it should print the version of Flower to" +" the command line:" +msgstr "可以使用以下命令来验证 Flower 是否安装成功。如果一切正常,它将在命令行中打印 Flower 的版本::" -#: ../../source/how-to-use-strategies.rst:14 -msgid "Use an existing strategy" -msgstr "使用现有策略" +#: ../../source/how-to-install-flower.rst:68 +msgid "Advanced installation options" +msgstr "高级安装选项" -#: ../../source/how-to-use-strategies.rst:16 -msgid "" -"Flower comes with a number of popular federated learning strategies " -"built-in. A built-in strategy can be instantiated as follows:" -msgstr "Flower 内置了许多流行的联邦学习策略。内置策略的实例化方法如下:" +#: ../../source/how-to-install-flower.rst:71 +#, fuzzy +msgid "Install via Docker" +msgstr "安装Flower" -#: ../../source/how-to-use-strategies.rst:25 +#: ../../source/how-to-install-flower.rst:73 +#, fuzzy +msgid ":doc:`Run Flower using Docker `" +msgstr "" +"`TensorFlow快速入门 (教程) `_" + +#: ../../source/how-to-install-flower.rst:76 +msgid "Install pre-release" +msgstr "安装预发布版本" + +#: ../../source/how-to-install-flower.rst:78 +#, fuzzy msgid "" -"This creates a strategy with all parameters left at their default values " -"and passes it to the :code:`start_server` function. It is usually " -"recommended to adjust a few parameters during instantiation:" -msgstr "这会创建一个所有参数都保持默认值的策略,并将其传递给 :code:`start_server` 函数。通常建议在实例化过程中调整一些参数:" +"New (possibly unstable) versions of Flower are sometimes available as " +"pre-release versions (alpha, beta, release candidate) before the stable " +"release happens:" +msgstr "在稳定版发布之前,Flower 的新版本(可能是不稳定版)有时会作为预发布版本(alpha、beta、候选发布版本)提供::" -#: ../../source/how-to-use-strategies.rst:42 +#: ../../source/how-to-install-flower.rst:85 +#, fuzzy msgid "" -"Existing strategies provide several ways to customize their behaviour. " -"Callback functions allow strategies to call user-provided code during " -"execution." -msgstr "现有的策略提供了多种自定义行为的方法。回调函数允许策略在执行过程中调用用户提供的代码。" +"For simulations that use the Virtual Client Engine, ``flwr`` pre-releases" +" should be installed with the ``simulation`` extra:" +msgstr "对于使用虚拟客户端引擎的模拟,`flwr``预发行版应与`simulation``一起安装:" -#: ../../source/how-to-use-strategies.rst:45 -msgid "Configuring client fit and client evaluate" -msgstr "配置客户匹配和客户评估" +#: ../../source/how-to-install-flower.rst:93 +msgid "Install nightly release" +msgstr "安装隔夜版本" -#: ../../source/how-to-use-strategies.rst:47 +#: ../../source/how-to-install-flower.rst:95 +#, fuzzy msgid "" -"The server can pass new configuration values to the client each round by " -"providing a function to :code:`on_fit_config_fn`. The provided function " -"will be called by the strategy and must return a dictionary of " -"configuration key values pairs that will be sent to the client. It must " -"return a dictionary of arbitrary configuration values :code:`client.fit`" -" and :code:`client.evaluate` functions during each round of federated " -"learning." -msgstr "" -"服务器可以通过向 :code:`on_fit_config_fn` " -"提供一个函数,在每一轮向客户端传递新的配置值。提供的函数将被策略调用,并且必须返回一个配置键值对的字典,该字典将被发送到客户端。在每一轮联邦学习期间,它必须返回一个任意配置值" -" dictionary :code:`client.fit`和 :code:`client.evaluate`函数。" +"The latest (potentially unstable) changes in Flower are available as " +"nightly releases:" +msgstr "Flower 中最新(可能不稳定)的更改以隔夜发布的形式提供::" -#: ../../source/how-to-use-strategies.rst:75 +#: ../../source/how-to-install-flower.rst:101 +#, fuzzy msgid "" -"The :code:`on_fit_config_fn` can be used to pass arbitrary configuration " -"values from server to client, and poetentially change these values each " -"round, for example, to adjust the learning rate. The client will receive " -"the dictionary returned by the :code:`on_fit_config_fn` in its own " -":code:`client.fit()` function." -msgstr "" -":code:`on_fit_config_fn`可用于将任意配置值从服务器传递到客户端,并在每一轮改变这些值,例如,调整学习率。客户端将在自己的 " -":code:`client.fit()` 函数中接收 :code:`on_fit_config_fn` 返回的字典。" +"For simulations that use the Virtual Client Engine, ``flwr-nightly`` " +"should be installed with the ``simulation`` extra:" +msgstr "对于使用虚拟客户端引擎的模拟,`flwr-nightly`应与`simulation`一起安装:" + +#: ../../source/how-to-monitor-simulation.rst:2 +msgid "Monitor simulation" +msgstr "监控模拟" -#: ../../source/how-to-use-strategies.rst:78 +#: ../../source/how-to-monitor-simulation.rst:4 msgid "" -"Similar to :code:`on_fit_config_fn`, there is also " -":code:`on_evaluate_config_fn` to customize the configuration sent to " -":code:`client.evaluate()`" +"Flower allows you to monitor system resources while running your " +"simulation. Moreover, the Flower simulation engine is powerful and " +"enables you to decide how to allocate resources per client manner and " +"constrain the total usage. Insights from resource consumption can help " +"you make smarter decisions and speed up the execution time." msgstr "" -"与 :code:`on_fit_config_fn` 类似,还有 :code:`on_evaluate_config_fn` 用于定制发送到 " -":code:`client.evaluate()` 的配置" - -#: ../../source/how-to-use-strategies.rst:81 -msgid "Configuring server-side evaluation" -msgstr "配置服务器端评估" +"Flower 允许您在运行模拟时监控系统资源。此外,Flower " +"仿真引擎功能强大,能让您决定如何按客户端方式分配资源并限制总使用量。从资源消耗中获得的观察可以帮助您做出更明智的决策,并加快执行时间。" -#: ../../source/how-to-use-strategies.rst:83 +#: ../../source/how-to-monitor-simulation.rst:9 msgid "" -"Server-side evaluation can be enabled by passing an evaluation function " -"to :code:`evaluate_fn`." -msgstr "服务器端评估可通过向 :code:`evaluate_fn` 传递评估函数来启用。" +"The specific instructions assume you are using macOS and have the " +"`Homebrew `_ package manager installed." +msgstr "具体说明假定你使用的是 macOS,并且安装了 `Homebrew `_ 软件包管理器。" -#: ../../source/how-to-use-strategies.rst:89 +#: ../../source/how-to-monitor-simulation.rst:13 +msgid "Downloads" +msgstr "下载" + +#: ../../source/how-to-monitor-simulation.rst:19 msgid "" -"Writing a fully custom strategy is a bit more involved, but it provides " -"the most flexibility. Read the `Implementing Strategies `_ guide to learn more." +"`Prometheus `_ is used for data collection, while" +" `Grafana `_ will enable you to visualize the " +"collected data. They are both well integrated with `Ray " +"`_ which Flower uses under the hood." msgstr "" -"编写完全自定义的策略涉及的内容较多,但灵活性最高。阅读 `实施策略 _ " -"指南,了解更多信息。" - -#: ../../source/index.rst:34 -msgid "Tutorial" -msgstr "教程" - -#: ../../source/index.rst:44 -msgid "Quickstart tutorials" -msgstr "快速入门教程" +"`Prometheus `_ 用于收集数据,而 `Grafana " +"`_ 则能让你将收集到的数据可视化。它们都与 Flower 在引擎下使用的 `Ray " +"`_ 紧密集成。" -#: ../../source/index.rst:74 ../../source/index.rst:78 -msgid "How-to guides" -msgstr "操作指南" +#: ../../source/how-to-monitor-simulation.rst:23 +msgid "" +"Overwrite the configuration files (depending on your device, it might be " +"installed on a different path)." +msgstr "重写配置文件(根据设备的不同,可能安装在不同的路径上)。" -#: ../../source/index.rst:99 -msgid "Legacy example guides" -msgstr "旧版指南范例" +#: ../../source/how-to-monitor-simulation.rst:26 +msgid "If you are on an M1 Mac, it should be:" +msgstr "如果你使用的是 M1 Mac,应该是这样:" -#: ../../source/index.rst:108 ../../source/index.rst:112 -msgid "Explanations" -msgstr "说明" +#: ../../source/how-to-monitor-simulation.rst:33 +msgid "On the previous generation Intel Mac devices, it should be:" +msgstr "在上一代英特尔 Mac 设备上,应该是这样:" -#: None:-1 -msgid "API reference" -msgstr "应用程序接口参考" +#: ../../source/how-to-monitor-simulation.rst:40 +msgid "" +"Open the respective configuration files and change them. Depending on " +"your device, use one of the two following commands:" +msgstr "打开相应的配置文件并修改它们。根据设备情况,使用以下两个命令之一:" -#: ../../source/index.rst:137 -msgid "Reference docs" -msgstr "参考文档" +#: ../../source/how-to-monitor-simulation.rst:51 +msgid "" +"and then delete all the text in the file and paste a new Prometheus " +"config you see below. You may adjust the time intervals to your " +"requirements:" +msgstr "然后删除文件中的所有文本,粘贴一个新的 Prometheus 配置文件,如下所示。您可以根据需要调整时间间隔:" -#: ../../source/index.rst:153 -msgid "Contributor tutorials" -msgstr "贡献者教程" +#: ../../source/how-to-monitor-simulation.rst:67 +msgid "" +"Now after you have edited the Prometheus configuration, do the same with " +"the Grafana configuration files. Open those using one of the following " +"commands as before:" +msgstr "编辑完 Prometheus 配置后,请对 Grafana 配置文件执行同样的操作。与之前一样,使用以下命令之一打开这些文件:" -#: ../../source/index.rst:160 -msgid "Contributor how-to guides" -msgstr "投稿指南" +#: ../../source/how-to-monitor-simulation.rst:78 +msgid "" +"Your terminal editor should open and allow you to apply the following " +"configuration as before." +msgstr "您的终端编辑器应该会打开,并允许您像之前一样应用以下配置。" -#: ../../source/index.rst:173 -msgid "Contributor explanations" -msgstr "贡献者解释" +#: ../../source/how-to-monitor-simulation.rst:94 +msgid "" +"Congratulations, you just downloaded all the necessary software needed " +"for metrics tracking. Now, let’s start it." +msgstr "恭喜您,您刚刚下载了指标跟踪所需的所有软件。现在,让我们开始吧。" -#: ../../source/index.rst:179 -msgid "Contributor references" -msgstr "贡献者参考资料" +#: ../../source/how-to-monitor-simulation.rst:98 +msgid "Tracking metrics" +msgstr "跟踪指标" -#: ../../source/index.rst:-1 +#: ../../source/how-to-monitor-simulation.rst:100 msgid "" -"Check out the documentation of the main Flower Framework enabling easy " -"Python development for Federated Learning." -msgstr "查看主 Flower Framework 的文档,轻松实现联邦学习的 Python 开发。" - -#: ../../source/index.rst:2 -msgid "Flower Framework Documentation" -msgstr "Flower 框架文档" +"Before running your Flower simulation, you have to start the monitoring " +"tools you have just installed and configured." +msgstr "在运行 Flower 模拟之前,您必须启动刚刚安装和配置的监控工具。" -#: ../../source/index.rst:7 +#: ../../source/how-to-monitor-simulation.rst:108 msgid "" -"Welcome to Flower's documentation. `Flower `_ is a " -"friendly federated learning framework." -msgstr "欢迎访问 Flower 文档。`Flower `_ 是一个友好的联邦学习框架。" +"Please include the following argument in your Python code when starting a" +" simulation." +msgstr "开始模拟时,请在 Python 代码中加入以下参数。" -#: ../../source/index.rst:11 -msgid "Join the Flower Community" -msgstr "加入 Flower 社区" +#: ../../source/how-to-monitor-simulation.rst:119 +msgid "Now, you are ready to start your workload." +msgstr "现在,您可以开始工作了。" -#: ../../source/index.rst:13 +#: ../../source/how-to-monitor-simulation.rst:121 msgid "" -"The Flower Community is growing quickly - we're a friendly group of " -"researchers, engineers, students, professionals, academics, and other " -"enthusiasts." -msgstr "Flower 社区发展迅速--我们是一个由研究人员、工程师、学生、专业人士、学者和其他爱好者组成的友好团体。" +"Shortly after the simulation starts, you should see the following logs in" +" your terminal:" +msgstr "模拟启动后不久,您就会在终端中看到以下日志:" -#: ../../source/index.rst:15 -msgid "Join us on Slack" -msgstr "在 Slack 上加入我们" +#: ../../source/how-to-monitor-simulation.rst:127 +#, fuzzy +msgid "You can look at everything at http://127.0.0.1:8265 ." +msgstr "您可以在 ``_ 查看所有内容。" -#: ../../source/index.rst:23 -msgid "Flower Framework" -msgstr "Flower 框架" +#: ../../source/how-to-monitor-simulation.rst:129 +msgid "" +"It's a Ray Dashboard. You can navigate to Metrics (on the left panel, the" +" lowest option)." +msgstr "这是一个 Ray Dashboard。您可以导航到 \"度量标准\"(左侧面板,最低选项)。" -#: ../../source/index.rst:25 +#: ../../source/how-to-monitor-simulation.rst:132 msgid "" -"The user guide is targeted at researchers and developers who want to use " -"Flower to bring existing machine learning workloads into a federated " -"setting. One of Flower's design goals was to make this simple. Read on to" -" learn more." +"Or alternatively, you can just see them in Grafana by clicking on the " +"right-up corner, “View in Grafana”. Please note that the Ray dashboard is" +" only accessible during the simulation. After the simulation ends, you " +"can only use Grafana to explore the metrics. You can start Grafana by " +"going to ``http://localhost:3000/``." msgstr "" -"该用户指南面向希望使用 Flower 将现有机器学习工作负载引入联邦环境的研究人员和开发人员。Flower " -"的设计目标之一就是让这一切变得简单。请继续阅读,了解更多信息。" - -#: ../../source/index.rst:30 -msgid "Tutorials" -msgstr "教程" - -#: ../../source/index.rst:32 -msgid "" -"A learning-oriented series of federated learning tutorials, the best " -"place to start." -msgstr "以学习为导向的联邦学习教程系列,最好的起点。" +"或者,您也可以点击右上角的 \"在 Grafana 中查看\",在 Grafana 中查看它们。请注意,Ray " +"仪表盘只能在模拟期间访问。模拟结束后,您只能使用 Grafana 浏览指标。您可以访问 ``http://localhost:3000/``启动 " +"Grafana。" -#: ../../source/index.rst:61 +#: ../../source/how-to-monitor-simulation.rst:137 #, fuzzy msgid "" -"QUICKSTART TUTORIALS: :doc:`PyTorch ` | " -":doc:`TensorFlow ` | :doc:`🤗 Transformers" -" ` | :doc:`JAX ` | :doc:`Pandas ` | :doc:`fastai " -"` | :doc:`PyTorch Lightning ` | :doc:`scikit-learn ` | :doc:`XGBoost ` | " -":doc:`Android ` | :doc:`iOS `" -msgstr "" -"快速入门教程: :doc:`PyTorch ` | :doc:`TensorFlow " -"` | :doc:`🤗 Transformers ` | :doc:`JAX ` | " -":doc:`Pandas ` | :doc:`fastai ` | :doc:`PyTorch Lightning ` | :doc:`MXNet ` | :doc" -":`scikit-learn ` | :doc:`XGBoost " -"` | :doc:`Android ` | :doc:`iOS `" - -#: ../../source/index.rst:63 -msgid "We also made video tutorials for PyTorch:" -msgstr "我们还为 PyTorch 制作了视频教程:" +"After you finish the visualization, stop Prometheus and Grafana. This is " +"important as they will otherwise block, for example port ``3000`` on your" +" machine as long as they are running." +msgstr "完成可视化后,请停止 Prometheus 和 Grafana。这一点很重要,否则只要它们在运行,就会阻塞机器上的端口 :code:`3000`。" -#: ../../source/index.rst:68 -msgid "And TensorFlow:" -msgstr "还有 TensorFlow:" +#: ../../source/how-to-monitor-simulation.rst:147 +msgid "Resource allocation" +msgstr "资源分配" -#: ../../source/index.rst:76 +#: ../../source/how-to-monitor-simulation.rst:149 msgid "" -"Problem-oriented how-to guides show step-by-step how to achieve a " -"specific goal." -msgstr "以问题为导向的 \"如何做 \"指南逐步展示如何实现特定目标。" +"You must understand how the Ray library works to efficiently allocate " +"system resources to simulation clients on your own." +msgstr "您必须了解 Ray 库是如何工作的,才能有效地为自己的仿真客户端分配系统资源。" -#: ../../source/index.rst:110 +#: ../../source/how-to-monitor-simulation.rst:152 msgid "" -"Understanding-oriented concept guides explain and discuss key topics and " -"underlying ideas behind Flower and collaborative AI." -msgstr "以理解为导向的概念指南解释并讨论了Flower和协作式人工智能背后的关键主题和基本思想。" - -#: ../../source/index.rst:120 -msgid "References" -msgstr "参考资料" - -#: ../../source/index.rst:122 -msgid "Information-oriented API reference and other reference material." -msgstr "以信息为导向的 API 参考资料和其他参考资料。" +"Initially, the simulation (which Ray handles under the hood) starts by " +"default with all the available resources on the system, which it shares " +"among the clients. It doesn't mean it divides it equally among all of " +"them, nor that the model training happens at all of them simultaneously. " +"You will learn more about that in the later part of this blog. You can " +"check the system resources by running the following:" +msgstr "" +"最初,模拟(由 Ray " +"在引擎下处理)默认使用系统上的所有可用资源启动,并在客户端之间共享。但这并不意味着它会将资源平均分配给所有客户端,也不意味着模型训练会在所有客户端同时进行。您将在本博客的后半部分了解到更多相关信息。您可以运行以下命令检查系统资源:" -#: ../../source/index.rst:131::1 -#, fuzzy -msgid ":py:obj:`flwr `\\" -msgstr ":py:obj:`flwr `\\" +#: ../../source/how-to-monitor-simulation.rst:164 +msgid "In Google Colab, the result you see might be similar to this:" +msgstr "在 Google Colab 中,您看到的结果可能与此类似:" -#: ../../source/index.rst:131::1 flwr:1 of -#, fuzzy -msgid "Flower main package." -msgstr "Flower 主包装。" +#: ../../source/how-to-monitor-simulation.rst:175 +msgid "" +"However, you can overwrite the defaults. When starting a simulation, do " +"the following (you don't need to overwrite all of them):" +msgstr "不过,您可以覆盖默认值。开始模拟时,请执行以下操作(不必全部覆盖):" -#: ../../source/index.rst:148 -msgid "Contributor docs" -msgstr "贡献者文档" +#: ../../source/how-to-monitor-simulation.rst:195 +msgid "Let’s also specify the resource for a single client." +msgstr "我们还可以为单个客户指定资源。" -#: ../../source/index.rst:150 +#: ../../source/how-to-monitor-simulation.rst:225 msgid "" -"The Flower community welcomes contributions. The following docs are " -"intended to help along the way." -msgstr "Flower 社区欢迎您的贡献。以下文档旨在为您提供帮助。" - -#: ../../source/ref-api-cli.rst:2 -msgid "Flower CLI reference" -msgstr "Flower CLI 参考" +"Now comes the crucial part. Ray will start a new client only when it has " +"all the required resources (such that they run in parallel) when the " +"resources allow." +msgstr "现在到了关键部分。只有在资源允许的情况下,Ray 才会在拥有所有所需资源(如并行运行)时启动新客户端。" -#: ../../source/ref-api-cli.rst:7 +#: ../../source/how-to-monitor-simulation.rst:228 #, fuzzy -msgid "flower-simulation" -msgstr "运行模拟" +msgid "" +"In the example above, only one client will be run, so your clients won't " +"run concurrently. Setting ``client_num_gpus = 0.5`` would allow running " +"two clients and therefore enable them to run concurrently. Be careful not" +" to require more resources than available. If you specified " +"``client_num_gpus = 2``, the simulation wouldn't start (even if you had 2" +" GPUs but decided to set 1 in ``ray_init_args``)." +msgstr "" +"在上面的示例中,将只运行一个客户端,因此您的客户端不会并发运行。设置 :code:`client_num_gpus = 0.5` " +"将允许运行两个客户端,从而使它们能够并发运行。请注意,所需的资源不要超过可用资源。如果您指定 :code:`client_num_gpus = " +"2`,模拟将无法启动(即使您有 2 个 GPU,但决定在 :code:`ray_init_args` 中设置为 1)。" -#: ../../source/ref-api-cli.rst:17 -msgid "flower-superlink" -msgstr "flower-superlink" +#: ../../source/how-to-monitor-simulation.rst:235 ../../source/ref-faq.rst:2 +msgid "FAQ" +msgstr "常见问题" -#: ../../source/ref-api-cli.rst:27 -#, fuzzy -msgid "flower-client-app" -msgstr "Flower 客户端。" +#: ../../source/how-to-monitor-simulation.rst:237 +msgid "Q: I don't see any metrics logged." +msgstr "问:我没有看到任何指标记录。" -#: ../../source/ref-api-cli.rst:37 -#, fuzzy -msgid "flower-server-app" -msgstr "flower-driver-api" +#: ../../source/how-to-monitor-simulation.rst:239 +msgid "" +"A: The timeframe might not be properly set. The setting is in the top " +"right corner (\"Last 30 minutes\" by default). Please change the " +"timeframe to reflect the period when the simulation was running." +msgstr "答:时间范围可能没有正确设置。设置在右上角(默认为 \"最后 30 分钟\")。请更改时间框架,以反映模拟运行的时间段。" -#: ../../source/ref-api/flwr.rst:2 -#, fuzzy -msgid "flwr" -msgstr "Flower" +#: ../../source/how-to-monitor-simulation.rst:243 +msgid "" +"Q: I see “Grafana server not detected. Please make sure the Grafana " +"server is running and refresh this page” after going to the Metrics tab " +"in Ray Dashboard." +msgstr "问:我看到 \"未检测到 Grafana 服务器。请确保 Grafana 服务器正在运行并刷新此页面\"。" -#: ../../source/ref-api/flwr.rst:25 ../../source/ref-api/flwr.server.rst:51 -#, fuzzy -msgid "Modules" -msgstr "模块" +#: ../../source/how-to-monitor-simulation.rst:246 +msgid "" +"A: You probably don't have Grafana running. Please check the running " +"services" +msgstr "答:您可能没有运行 Grafana。请检查正在运行的服务" -#: ../../source/ref-api/flwr.rst:35::1 +#: ../../source/how-to-monitor-simulation.rst:252 #, fuzzy -msgid ":py:obj:`flwr.client `\\" -msgstr ":py:obj:`flwr.client `\\" - -#: ../../source/ref-api/flwr.rst:35::1 flwr.client:1 of -msgid "Flower client." -msgstr "Flower 客户端。" +msgid "" +"Q: I see \"This site can't be reached\" when going to " +"http://127.0.0.1:8265." +msgstr "问:在访问 ``_时,我看到 \"无法访问该网站\"。" -#: ../../source/ref-api/flwr.rst:35::1 -#, fuzzy -msgid ":py:obj:`flwr.common `\\" -msgstr ":py:obj:`flwr.common `\\" +#: ../../source/how-to-monitor-simulation.rst:254 +msgid "" +"A: Either the simulation has already finished, or you still need to start" +" Prometheus." +msgstr "答:要么模拟已经完成,要么您还需要启动Prometheus。" -#: ../../source/ref-api/flwr.rst:35::1 flwr.common:1 of -msgid "Common components shared between server and client." -msgstr "服务器和客户端共享的通用组件。" +#: ../../source/how-to-monitor-simulation.rst:257 +msgid "Resources" +msgstr "资源" -#: ../../source/ref-api/flwr.rst:35::1 +#: ../../source/how-to-monitor-simulation.rst:259 #, fuzzy -msgid ":py:obj:`flwr.server `\\" -msgstr ":py:obj:`flwr.server `\\" - -#: ../../source/ref-api/flwr.rst:35::1 -#: ../../source/ref-api/flwr.server.rst:40::1 flwr.server:1 -#: flwr.server.server.Server:1 of -msgid "Flower server." -msgstr "Flower 服务器。" +msgid "" +"Ray Dashboard: https://docs.ray.io/en/latest/ray-observability/getting-" +"started.html" +msgstr "Ray 仪表盘: ``_" -#: ../../source/ref-api/flwr.rst:35::1 +#: ../../source/how-to-monitor-simulation.rst:261 #, fuzzy -msgid ":py:obj:`flwr.simulation `\\" -msgstr ":py:obj:`flwr.simulation `\\" +msgid "Ray Metrics: https://docs.ray.io/en/latest/cluster/metrics.html" +msgstr "" +"Ray 指标: ``_" -#: ../../source/ref-api/flwr.rst:35::1 flwr.simulation:1 of -#, fuzzy -msgid "Flower simulation." +#: ../../source/how-to-run-simulations.rst:2 +msgid "Run simulations" msgstr "运行模拟" -#: ../../source/ref-api/flwr.client.rst:2 -msgid "client" -msgstr "客户端" - -#: ../../source/ref-api/flwr.client.rst:13 -#: ../../source/ref-api/flwr.common.rst:13 -#: ../../source/ref-api/flwr.server.rst:13 -#: ../../source/ref-api/flwr.simulation.rst:13 -#, fuzzy -msgid "Functions" -msgstr "四种函数:" - -#: ../../source/ref-api/flwr.client.rst:25::1 -#, fuzzy -msgid ":py:obj:`run_client_app `\\ \\(\\)" -msgstr ":py:obj:`run_client_app `\\ \\(\\)" +#: ../../source/how-to-run-simulations.rst:8 +msgid "" +"Simulating Federated Learning workloads is useful for a multitude of use-" +"cases: you might want to run your workload on a large cohort of clients " +"but without having to source, configure and mange a large number of " +"physical devices; you might want to run your FL workloads as fast as " +"possible on the compute systems you have access to without having to go " +"through a complex setup process; you might want to validate your " +"algorithm on different scenarios at varying levels of data and system " +"heterogeneity, client availability, privacy budgets, etc. These are among" +" some of the use-cases where simulating FL workloads makes sense. Flower " +"can accommodate these scenarios by means of its `VirtualClientEngine " +"`_ or " +"VCE." +msgstr "" +"模拟联邦学习工作负载可用于多种案例:您可能希望在大量客户端上运行您的工作负载,但无需采购、配置和管理大量物理设备;您可能希望在您可以访问的计算系统上尽可能快地运行您的" +" FL 工作负载,而无需经过复杂的设置过程;您可能希望在不同数据和系统异构性、客户端可用性、隐私预算等不同水平的场景中验证您的算法。这些都是模拟 " +"FL 工作负载的一些案例。Flower 可以通过其 \"虚拟客户端引擎\"(VirtualClientEngine)_或 VCE 来匹配这些情况。" -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.supernode.app.run_client_app:1 of +#: ../../source/how-to-run-simulations.rst:19 #, fuzzy -msgid "Run Flower client app." -msgstr "Flower 客户端。" +msgid "" +"The ``VirtualClientEngine`` schedules, launches and manages `virtual` " +"clients. These clients are identical to `non-virtual` clients (i.e. the " +"ones you launch via the command `flwr.client.start_client `_) in the sense that they can be configure by " +"creating a class inheriting, for example, from `flwr.client.NumPyClient " +"`_ and therefore behave in an " +"identical way. In addition to that, clients managed by the " +"``VirtualClientEngine`` are:" +msgstr "" +":code:`VirtualClientEngine`用来规划,启动和管理`虚拟`客户端。这些客户端跟`非虚拟`客户端是一样的(即为您通过`flwr.client.start_client" +" `_启动的客户端),因为它们可以通过创建一个继承自 " +"`flwr.client.NumPyClient `_ " +"的类进行配置,因此其行为方式相同。另外,由 `VirtualClientEngine` 管理的客户端有:" -#: ../../source/ref-api/flwr.client.rst:25::1 -#, fuzzy -msgid ":py:obj:`run_supernode `\\ \\(\\)" -msgstr ":py:obj:`run_superlink `\\ \\(\\)" +#: ../../source/how-to-run-simulations.rst:26 +msgid "" +"resource-aware: this means that each client gets assigned a portion of " +"the compute and memory on your system. You as a user can control this at " +"the beginning of the simulation and allows you to control the degree of " +"parallelism of your Flower FL simulation. The fewer the resources per " +"client, the more clients can run concurrently on the same hardware." +msgstr "" +"资源感知:这意味着每个客户端都会分配到系统中的一部分计算和内存。作为用户,您可以在模拟开始时对其进行控制,从而控制 Flower FL " +"模拟的并行程度。每个客户端的资源越少,在同一硬件上并发运行的客户端就越多。" -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.supernode.app.run_supernode:1 of +#: ../../source/how-to-run-simulations.rst:31 #, fuzzy -msgid "Run Flower SuperNode." -msgstr "Flower 服务器。" +msgid "" +"self-managed: this means that you as a user do not need to launch clients" +" manually, instead this gets delegated to ``VirtualClientEngine``'s " +"internals." +msgstr "自管理:这意味着用户无需手动启动客户端,而是由 :code:`VirtualClientEngine` 负责。" -#: ../../source/ref-api/flwr.client.rst:25::1 -#, fuzzy +#: ../../source/how-to-run-simulations.rst:33 msgid "" -":py:obj:`start_client `\\ \\(\\*\\, " -"server\\_address\\[\\, client\\_fn\\, ...\\]\\)" +"ephemeral: this means that a client is only materialized when it is " +"required in the FL process (e.g. to do `fit() `_). The object is destroyed afterwards," +" releasing the resources it was assigned and allowing in this way other " +"clients to participate." msgstr "" -":py:obj:`start_client `\\ \\(\\*\\, " -"server\\_address\\[\\, client\\_fn\\, ...\\]\\)" - -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.app.start_client:1 of -msgid "Start a Flower client node which connects to a Flower server." -msgstr "启动一个 Flower 客户节点,连接到 Flower 服务器。" +"即时性:这意味着客户端只有在 FL 进程中需要它时才会被实体化(例如执行 `fit() `_ " +")。之后该对象将被销毁,释放分配给它的资源,并允许其他客户端以这种方式参与。" -#: ../../source/ref-api/flwr.client.rst:25::1 +#: ../../source/how-to-run-simulations.rst:38 #, fuzzy msgid "" -":py:obj:`start_numpy_client `\\ \\(\\*\\," -" server\\_address\\, client\\)" +"The ``VirtualClientEngine`` implements `virtual` clients using `Ray " +"`_, an open-source framework for scalable Python " +"workloads. In particular, Flower's ``VirtualClientEngine`` makes use of " +"`Actors `_ to spawn " +"`virtual` clients and run their workload." msgstr "" -":py:obj:`start_numpy_client `\\ \\(\\*\\," -" server\\_address\\, client\\)" +":code:`VirtualClientEngine`使用`Ray " +"`_来实现`虚拟`客户端,这是一个用于可扩展 Python 工作负载的开源框架。特别地,Flower 的" +" :code:`VirtualClientEngine` 使用 `Actors `_ 来生成 `virtual` 客户端并运行它们的工作负载。" -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.app.start_numpy_client:1 of -msgid "Start a Flower NumPyClient which connects to a gRPC server." -msgstr "启动 Flower NumPyClient,连接到 gRPC 服务器。" +#: ../../source/how-to-run-simulations.rst:45 +msgid "Launch your Flower simulation" +msgstr "启动 Flower 模拟" -#: ../../source/ref-api/flwr.client.rst:27 -#: ../../source/ref-api/flwr.common.rst:32 -#: ../../source/ref-api/flwr.server.rst:28 -#: ../../source/ref-api/flwr.server.strategy.rst:17 -#: ../../source/ref-api/flwr.server.workflow.rst:17 -#, fuzzy -msgid "Classes" -msgstr "类别" +#: ../../source/how-to-run-simulations.rst:47 +msgid "" +"Running Flower simulations still require you to define your client class," +" a strategy, and utility functions to download and load (and potentially " +"partition) your dataset. With that out of the way, launching your " +"simulation is done with `start_simulation `_ and a minimal example looks" +" as follows:" +msgstr "" +"运行 Flower 模拟器仍然需要定义客户端类、策略以及下载和加载(可能还需要分割)数据集的实用程序。在完成这些工作后,就可以使用 " +"\"start_simulation `_\" 来启动模拟了,一个最简单的示例如下:" + +#: ../../source/how-to-run-simulations.rst:73 +msgid "VirtualClientEngine resources" +msgstr "虚拟客户端引擎资源" -#: ../../source/ref-api/flwr.client.rst:34::1 +#: ../../source/how-to-run-simulations.rst:75 #, fuzzy -msgid ":py:obj:`Client `\\ \\(\\)" -msgstr ":py:obj:`Client `\\ \\(\\)" +msgid "" +"By default the VCE has access to all system resources (i.e. all CPUs, all" +" GPUs, etc) since that is also the default behavior when starting Ray. " +"However, in some settings you might want to limit how many of your system" +" resources are used for simulation. You can do this via the " +"``ray_init_args`` input argument to ``start_simulation`` which the VCE " +"internally passes to Ray's ``ray.init`` command. For a complete list of " +"settings you can configure check the `ray.init " +"`_" +" documentation. Do not set ``ray_init_args`` if you want the VCE to use " +"all your system's CPUs and GPUs." +msgstr "" +"默认情况下,VCE 可以访问所有系统资源(即所有 CPU、所有 GPU 等),因为这也是启动 Ray " +"时的默认行为。不过,在某些设置中,您可能希望限制有多少系统资源用于模拟。您可以通过 :code:`ray_init_args` 输入到 " +":code:`start_simulation` 的参数来做到这一点,VCE 会在内部将该参数传递给 Ray 的 :code:`ray.init`" +" 命令。有关您可以配置的设置的完整列表,请查看 `ray.init `_ 文档。如果希望 VCE 使用系统中所有的 CPU 和 " +"GPU,请不要设置 :code:`ray_init_args`。" -#: ../../source/ref-api/flwr.client.rst:34::1 -#: flwr.client.client.Client:1 of -msgid "Abstract base class for Flower clients." -msgstr "Flower 客户端的抽象基类。" +#: ../../source/how-to-run-simulations.rst:97 +msgid "Assigning client resources" +msgstr "分配客户端资源" -#: ../../source/ref-api/flwr.client.rst:34::1 +#: ../../source/how-to-run-simulations.rst:99 #, fuzzy msgid "" -":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, " -"mods\\]\\)" +"By default the ``VirtualClientEngine`` assigns a single CPU core (and " +"nothing else) to each virtual client. This means that if your system has " +"10 cores, that many virtual clients can be concurrently running." msgstr "" -":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, " -"mods\\]\\)" +"默认情况下,:code:`VirtualClientEngine` 会为每个虚拟客户端分配一个 CPU " +"内核(不分配其他任何内核)。这意味着,如果系统有 10 个内核,那么可以同时运行这么多虚拟客户端。" -#: ../../source/ref-api/flwr.client.rst:34::1 -#: flwr.client.client_app.ClientApp:1 of +#: ../../source/how-to-run-simulations.rst:103 +msgid "" +"More often than not, you would probably like to adjust the resources your" +" clients get assigned based on the complexity (i.e. compute and memory " +"footprint) of your FL workload. You can do so when starting your " +"simulation by setting the argument `client_resources` to " +"`start_simulation `_." +" Two keys are internally used by Ray to schedule and spawn workloads (in " +"our case Flower clients):" +msgstr "" +"通常情况下,您可能希望根据 FL 工作负载的复杂性(即计算和内存占用)来调整分配给客户端的资源。您可以在启动模拟时将参数 " +"`client_resources` 设置为 `start_simulation `_ 。Ray " +"内部使用两个键来调度和生成工作负载(在我们的例子中是 Flower 客户端):" + +#: ../../source/how-to-run-simulations.rst:110 #, fuzzy -msgid "Flower ClientApp." -msgstr "Flower 客户端。" +msgid "``num_cpus`` indicates the number of CPU cores a client would get." +msgstr ":code:`num_cpus` 表示客户端将获得的 CPU 内核数量。" -#: ../../source/ref-api/flwr.client.rst:34::1 +#: ../../source/how-to-run-simulations.rst:111 #, fuzzy -msgid ":py:obj:`NumPyClient `\\ \\(\\)" -msgstr ":py:obj:`NumPyClient `\\ \\(\\)" +msgid "``num_gpus`` indicates the **ratio** of GPU memory a client gets assigned." +msgstr ":code:`num_gpus` 表示分配给客户端的 GPU 内存的**比例**。" -#: ../../source/ref-api/flwr.client.rst:34::1 -#: flwr.client.numpy_client.NumPyClient:1 of -msgid "Abstract base class for Flower clients using NumPy." -msgstr "使用 NumPy 的 Flower 客户端的抽象基类。" +#: ../../source/how-to-run-simulations.rst:113 +msgid "Let's see a few examples:" +msgstr "让我们来看几个例子:" -#: flwr.client.client.Client:1 flwr.client.numpy_client.NumPyClient:1 -#: flwr.server.client_manager.ClientManager:1 -#: flwr.server.driver.driver.Driver:1 flwr.server.strategy.strategy.Strategy:1 -#: of +#: ../../source/how-to-run-simulations.rst:132 #, fuzzy -msgid "Bases: :py:class:`~abc.ABC`" -msgstr "Bases: :py:class:`~abc.ABC`" +msgid "" +"While the ``client_resources`` can be used to control the degree of " +"concurrency in your FL simulation, this does not stop you from running " +"dozens, hundreds or even thousands of clients in the same round and " +"having orders of magnitude more `dormant` (i.e. not participating in a " +"round) clients. Let's say you want to have 100 clients per round but your" +" system can only accommodate 8 clients concurrently. The " +"``VirtualClientEngine`` will schedule 100 jobs to run (each simulating a " +"client sampled by the strategy) and then will execute them in a resource-" +"aware manner in batches of 8." +msgstr "" +"虽然 :code:`client_resources` 可用来控制 FL " +"模拟的并发程度,但这并不能阻止您在同一轮模拟中运行几十、几百甚至上千个客户端,并拥有数量级更多的 " +"\"休眠\"(即不参与一轮模拟)客户端。比方说,您希望每轮有 100 个客户端,但您的系统只能同时容纳 8 " +"个客户端。:code:`VirtualClientEngine` 将安排运行 100 " +"个工作(每个工作模拟策略采样的一个客户端),然后以资源感知的方式分批执行。" -#: ../../source/ref-api/flwr.client.Client.rst:15 -#: ../../source/ref-api/flwr.client.ClientApp.rst:15 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:15 -#: ../../source/ref-api/flwr.common.Array.rst:15 -#: ../../source/ref-api/flwr.common.ClientMessage.rst:15 -#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:15 -#: ../../source/ref-api/flwr.common.Context.rst:15 -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:15 -#: ../../source/ref-api/flwr.common.Error.rst:15 -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:15 -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:15 -#: ../../source/ref-api/flwr.common.EventType.rst:15 -#: ../../source/ref-api/flwr.common.FitIns.rst:15 -#: ../../source/ref-api/flwr.common.FitRes.rst:15 -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:15 -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:15 -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:15 -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:15 -#: ../../source/ref-api/flwr.common.Message.rst:15 -#: ../../source/ref-api/flwr.common.MessageType.rst:15 -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:15 -#: ../../source/ref-api/flwr.common.Metadata.rst:15 -#: ../../source/ref-api/flwr.common.MetricsRecord.rst:15 -#: ../../source/ref-api/flwr.common.Parameters.rst:15 -#: ../../source/ref-api/flwr.common.ParametersRecord.rst:15 -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:15 -#: ../../source/ref-api/flwr.common.RecordSet.rst:15 -#: ../../source/ref-api/flwr.common.ServerMessage.rst:15 -#: ../../source/ref-api/flwr.common.Status.rst:15 -#: ../../source/ref-api/flwr.server.ClientManager.rst:15 -#: ../../source/ref-api/flwr.server.Driver.rst:15 -#: ../../source/ref-api/flwr.server.History.rst:15 -#: ../../source/ref-api/flwr.server.LegacyContext.rst:15 -#: ../../source/ref-api/flwr.server.Server.rst:15 -#: ../../source/ref-api/flwr.server.ServerApp.rst:15 -#: ../../source/ref-api/flwr.server.ServerConfig.rst:15 -#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:15 -#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:15 -#: ../../source/ref-api/flwr.server.strategy.Krum.rst:15 -#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:15 -#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:15 -#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:15 -#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:15 -#, fuzzy -msgid "Methods" -msgstr "方法" +#: ../../source/how-to-run-simulations.rst:140 +msgid "" +"To understand all the intricate details on how resources are used to " +"schedule FL clients and how to define custom resources, please take a " +"look at the `Ray documentation `_." +msgstr "" +"要了解资源如何用于调度 FL 客户端以及如何定义自定义资源的所有复杂细节,请查看 `Ray 文档 " +"`_。" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#, fuzzy -msgid ":py:obj:`evaluate `\\ \\(ins\\)" -msgstr ":py:obj:`evaluate `\\ \\(ins\\)" +#: ../../source/how-to-run-simulations.rst:145 +msgid "Simulation examples" +msgstr "模拟示例" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.evaluate:1 -#: flwr.client.numpy_client.NumPyClient.evaluate:1 of -msgid "Evaluate the provided parameters using the locally held dataset." -msgstr "使用本地数据集评估所提供的参数。" +#: ../../source/how-to-run-simulations.rst:147 +msgid "" +"A few ready-to-run complete examples for Flower simulation in " +"Tensorflow/Keras and PyTorch are provided in the `Flower repository " +"`_. You can run them on Google Colab too:" +msgstr "" +"在 Tensorflow/Keras 和 PyTorch 中进行 Flower 模拟的几个可随时运行的完整示例已在 `Flower 库 " +"`_ 中提供。您也可以在 Google Colab 上运行它们:" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#, fuzzy -msgid ":py:obj:`fit `\\ \\(ins\\)" -msgstr ":py:obj:`fit `\\ \\(ins\\)" +#: ../../source/how-to-run-simulations.rst:151 +msgid "" +"`Tensorflow/Keras Simulation " +"`_: 100 clients collaboratively train a MLP model on MNIST." +msgstr "" +"Tensorflow/Keras模拟 `_:100个客户端在MNIST上协作训练一个MLP模型。" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: flwr.client.client.Client.fit:1 of -msgid "Refine the provided parameters using the locally held dataset." -msgstr "利用本地数据集完善所提供的参数。" +#: ../../source/how-to-run-simulations.rst:154 +msgid "" +"`PyTorch Simulation `_: 100 clients collaboratively train a CNN model on " +"MNIST." +msgstr "" +"PyTorch 模拟 `_:100 个客户端在 MNIST 上协作训练一个 CNN 模型。" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#, fuzzy -msgid ":py:obj:`get_context `\\ \\(\\)" -msgstr ":py:obj:`get_context `\\ \\(\\)" +#: ../../source/how-to-run-simulations.rst:159 +msgid "Multi-node Flower simulations" +msgstr "多节点 Flower 模拟" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.get_context:1 -#: flwr.client.numpy_client.NumPyClient.get_context:1 of +#: ../../source/how-to-run-simulations.rst:161 #, fuzzy -msgid "Get the run context from this client." -msgstr "评估客户端的反应。" +msgid "" +"Flower's ``VirtualClientEngine`` allows you to run FL simulations across " +"multiple compute nodes. Before starting your multi-node simulation ensure" +" that you:" +msgstr "Flower 的 :code:`VirtualClientEngine` 允许您在多个计算节点上运行 FL 模拟。在开始多节点模拟之前,请确保:" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#, fuzzy -msgid ":py:obj:`get_parameters `\\ \\(ins\\)" -msgstr ":py:obj:`get_parameters `\\ \\(ins\\)" +#: ../../source/how-to-run-simulations.rst:164 +msgid "Have the same Python environment in all nodes." +msgstr "所有节点都有相同的 Python 环境。" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.get_parameters:1 -#: flwr.client.numpy_client.NumPyClient.get_parameters:1 of -msgid "Return the current local model parameters." -msgstr "返回当前本地模型参数。" +#: ../../source/how-to-run-simulations.rst:165 +msgid "Have a copy of your code (e.g. your entire repo) in all nodes." +msgstr "在所有节点上都有一份代码副本(例如整个软件包)。" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#, fuzzy -msgid ":py:obj:`get_properties `\\ \\(ins\\)" -msgstr ":py:obj:`get_properties `\\ \\(ins\\)" +#: ../../source/how-to-run-simulations.rst:166 +msgid "" +"Have a copy of your dataset in all nodes (more about this in " +":ref:`simulation considerations `)" +msgstr "在所有节点中都有一份数据集副本(更多相关信息请参阅 :ref:`模拟注意事项`)" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: flwr.client.client.Client.get_properties:1 of -msgid "Return set of client's properties." -msgstr "返回客户端的属性集。" +#: ../../source/how-to-run-simulations.rst:168 +#, fuzzy +msgid "" +"Pass ``ray_init_args={\"address\"=\"auto\"}`` to `start_simulation `_ so the " +"``VirtualClientEngine`` attaches to a running Ray instance." +msgstr "" +"将 :code:`ray_init_args={\"address\"=\"auto\"}`传递给 `start_simulation `_ ,这样 " +":code:`VirtualClientEngine`就会连接到正在运行的 Ray 实例。" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/how-to-run-simulations.rst:171 #, fuzzy -msgid ":py:obj:`set_context `\\ \\(context\\)" -msgstr ":py:obj:`set_context `\\ \\(context\\)" +msgid "" +"Start Ray on you head node: on the terminal type ``ray start --head``. " +"This command will print a few lines, one of which indicates how to attach" +" other nodes to the head node." +msgstr "" +"在头部节点上启动 Ray:在终端上输入 :code:`raystart--" +"head`。该命令将打印几行输出,其中一行说明如何将其他节点连接到头部节点。" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.set_context:1 -#: flwr.client.numpy_client.NumPyClient.set_context:1 of +#: ../../source/how-to-run-simulations.rst:174 #, fuzzy -msgid "Apply a run context to this client." -msgstr "将运行上下文应用于该客户端。" +msgid "" +"Attach other nodes to the head node: copy the command shown after " +"starting the head and execute it on terminal of a new node: for example " +"``ray start --address='192.168.1.132:6379'``" +msgstr "" +"将其他节点附加到头部节点:复制启动头部后显示的命令,并在新节点的终端上执行:例如 :code:`ray start " +"--address='192.168.1.132:6379'`" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/how-to-run-simulations.rst:178 +msgid "" +"With all the above done, you can run your code from the head node as you " +"would if the simulation was running on a single node." +msgstr "完成上述所有操作后,您就可以在头部节点上运行代码了,就像在单个节点上运行模拟一样。" + +#: ../../source/how-to-run-simulations.rst:181 #, fuzzy -msgid ":py:obj:`to_client `\\ \\(\\)" -msgstr ":py:obj:`to_client `\\ \\(\\)" +msgid "" +"Once your simulation is finished, if you'd like to dismantle your cluster" +" you simply need to run the command ``ray stop`` in each node's terminal " +"(including the head node)." +msgstr "模拟结束后,如果要拆除集群,只需在每个节点(包括头部节点)的终端运行 :code:`ray stop` 命令即可。" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: flwr.client.client.Client.to_client:1 of -msgid "Return client (itself)." -msgstr "返回客户端(本身)。" +#: ../../source/how-to-run-simulations.rst:185 +msgid "Multi-node simulation good-to-know" +msgstr "了解多节点模拟" -#: ../../source/ref-api/flwr.client.Client.rst:46 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:46 -#: ../../source/ref-api/flwr.common.Array.rst:28 -#: ../../source/ref-api/flwr.common.ClientMessage.rst:25 -#: ../../source/ref-api/flwr.common.Code.rst:19 -#: ../../source/ref-api/flwr.common.Context.rst:25 -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:25 -#: ../../source/ref-api/flwr.common.Error.rst:25 -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:25 -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:25 -#: ../../source/ref-api/flwr.common.EventType.rst:165 -#: ../../source/ref-api/flwr.common.FitIns.rst:25 -#: ../../source/ref-api/flwr.common.FitRes.rst:25 -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:25 -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:25 -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:25 -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:25 -#: ../../source/ref-api/flwr.common.Message.rst:37 -#: ../../source/ref-api/flwr.common.MessageType.rst:25 -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:25 -#: ../../source/ref-api/flwr.common.Metadata.rst:25 -#: ../../source/ref-api/flwr.common.Parameters.rst:25 -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:25 -#: ../../source/ref-api/flwr.common.RecordSet.rst:25 -#: ../../source/ref-api/flwr.common.ServerMessage.rst:25 -#: ../../source/ref-api/flwr.common.Status.rst:25 -#: ../../source/ref-api/flwr.server.LegacyContext.rst:25 -#: ../../source/ref-api/flwr.server.ServerConfig.rst:25 +#: ../../source/how-to-run-simulations.rst:187 +msgid "" +"Here we list a few interesting functionality when running multi-node FL " +"simulations:" +msgstr "在此,我们列举了运行多节点 FL 模拟时的一些有趣功能:" + +#: ../../source/how-to-run-simulations.rst:189 #, fuzzy -msgid "Attributes" -msgstr "属性" +msgid "" +"User ``ray status`` to check all nodes connected to your head node as " +"well as the total resources available to the ``VirtualClientEngine``." +msgstr "" +"使用 :code:`ray status` 查看连接到头部节点的所有节点,以及 :code:`VirtualClientEngine` " +"可用的总资源。" -#: flwr.client.client.Client.evaluate:1::1 of +#: ../../source/how-to-run-simulations.rst:192 #, fuzzy -msgid ":py:obj:`context `\\" -msgstr ":py:obj:`context `\\" +msgid "" +"When attaching a new node to the head, all its resources (i.e. all CPUs, " +"all GPUs) will be visible by the head node. This means that the " +"``VirtualClientEngine`` can schedule as many `virtual` clients as that " +"node can possible run. In some settings you might want to exclude certain" +" resources from the simulation. You can do this by appending `--num-" +"cpus=` and/or `--num-gpus=` in " +"any ``ray start`` command (including when starting the head)" +msgstr "" +"将新节点附加到头部节点时,头部节点将可见其所有资源(即所有 CPU 和 GPU)。这意味着 :code:`VirtualClientEngine`" +" 可以调度尽可能多的 \"虚拟 \"客户端来运行该节点。在某些设置中,您可能希望将某些资源排除在模拟之外。为此,您可以在任何 :code:`ray" +" start` 命令(包括启动头部时)中添加 `--num-cpus=`和/或 `--num-" +"gpus=`" -#: ../../source/ref-api/flwr.common.Parameters.rst:2 -#: flwr.client.app.start_client flwr.client.app.start_numpy_client -#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit -#: flwr.client.client.Client.get_parameters -#: flwr.client.client.Client.get_properties -#: flwr.client.numpy_client.NumPyClient.evaluate -#: flwr.client.numpy_client.NumPyClient.fit -#: flwr.client.numpy_client.NumPyClient.get_parameters -#: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.common.context.Context flwr.common.message.Error -#: flwr.common.message.Message flwr.common.message.Message.create_error_reply -#: flwr.common.message.Message.create_reply flwr.common.message.Metadata -#: flwr.common.record.parametersrecord.Array flwr.server.app.start_server -#: flwr.server.client_manager.ClientManager.register -#: flwr.server.client_manager.ClientManager.unregister -#: flwr.server.client_manager.SimpleClientManager.register -#: flwr.server.client_manager.SimpleClientManager.unregister -#: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.driver.Driver.create_message -#: flwr.server.driver.driver.Driver.pull_messages -#: flwr.server.driver.driver.Driver.push_messages -#: flwr.server.driver.driver.Driver.send_and_receive -#: flwr.server.strategy.bulyan.Bulyan -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit -#: flwr.server.strategy.fedadagrad.FedAdagrad -#: flwr.server.strategy.fedadam.FedAdam flwr.server.strategy.fedavg.FedAvg -#: flwr.server.strategy.fedavg_android.FedAvgAndroid -#: flwr.server.strategy.fedavgm.FedAvgM flwr.server.strategy.fedopt.FedOpt -#: flwr.server.strategy.fedprox.FedProx -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg -#: flwr.server.strategy.fedyogi.FedYogi flwr.server.strategy.krum.Krum -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate -#: flwr.server.strategy.strategy.Strategy.aggregate_fit -#: flwr.server.strategy.strategy.Strategy.configure_evaluate -#: flwr.server.strategy.strategy.Strategy.configure_fit -#: flwr.server.strategy.strategy.Strategy.evaluate -#: flwr.server.strategy.strategy.Strategy.initialize_parameters -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow -#: flwr.simulation.app.start_simulation -#: flwr.simulation.run_simulation.run_simulation of -msgid "Parameters" -msgstr "参数" +#: ../../source/how-to-run-simulations.rst:202 +msgid "Considerations for simulations" +msgstr "模拟的注意事项" -#: flwr.client.client.Client.evaluate:3 of +#: ../../source/how-to-run-simulations.rst:206 msgid "" -"The evaluation instructions containing (global) model parameters received" -" from the server and a dictionary of configuration values used to " -"customize the local evaluation process." -msgstr "评估指令包含从服务器接收的(全局)模型参数,以及用于定制本地评估流程的配置值字典。" - -#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit -#: flwr.client.client.Client.get_parameters -#: flwr.client.client.Client.get_properties -#: flwr.client.numpy_client.NumPyClient.evaluate -#: flwr.client.numpy_client.NumPyClient.fit -#: flwr.client.numpy_client.NumPyClient.get_parameters -#: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.common.message.Message.create_reply flwr.server.app.start_server -#: flwr.server.client_manager.ClientManager.num_available -#: flwr.server.client_manager.ClientManager.register -#: flwr.server.client_manager.SimpleClientManager.num_available -#: flwr.server.client_manager.SimpleClientManager.register -#: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.driver.Driver.create_message -#: flwr.server.driver.driver.Driver.pull_messages -#: flwr.server.driver.driver.Driver.push_messages -#: flwr.server.driver.driver.Driver.send_and_receive -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate -#: flwr.server.strategy.strategy.Strategy.aggregate_fit -#: flwr.server.strategy.strategy.Strategy.configure_evaluate -#: flwr.server.strategy.strategy.Strategy.configure_fit -#: flwr.server.strategy.strategy.Strategy.evaluate -#: flwr.server.strategy.strategy.Strategy.initialize_parameters -#: flwr.simulation.app.start_simulation of -msgid "Returns" -msgstr "返回" +"We are actively working on these fronts so to make it trivial to run any " +"FL workload with Flower simulation." +msgstr "我们正在积极开展这些方面的工作,以便使 FL 工作负载与 Flower 模拟的运行变得轻而易举。" -#: flwr.client.client.Client.evaluate:8 of +#: ../../source/how-to-run-simulations.rst:209 msgid "" -"The evaluation result containing the loss on the local dataset and other " -"details such as the number of local data examples used for evaluation." -msgstr "评估结果包含本地数据集上的损失值和其他详细信息,如用于评估的本地数据的数量。" +"The current VCE allows you to run Federated Learning workloads in " +"simulation mode whether you are prototyping simple scenarios on your " +"personal laptop or you want to train a complex FL pipeline across " +"multiple high-performance GPU nodes. While we add more capabilities to " +"the VCE, the points below highlight some of the considerations to keep in" +" mind when designing your FL pipeline with Flower. We also highlight a " +"couple of current limitations in our implementation." +msgstr "" +"当前的 VCE 允许您在模拟模式下运行联邦学习工作负载,无论您是在个人笔记本电脑上建立简单的场景原型,还是要在多个高性能 GPU 节点上训练复杂的" +" FL情景。虽然我们为 VCE 增加了更多的功能,但以下几点强调了在使用 Flower 设计 FL " +"时需要注意的一些事项。我们还强调了我们的实现中目前存在的一些局限性。" -#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit -#: flwr.client.client.Client.get_parameters -#: flwr.client.client.Client.get_properties -#: flwr.client.numpy_client.NumPyClient.get_parameters -#: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.common.message.Message.create_reply flwr.server.app.start_server -#: flwr.server.client_manager.ClientManager.num_available -#: flwr.server.client_manager.ClientManager.register -#: flwr.server.client_manager.SimpleClientManager.num_available -#: flwr.server.client_manager.SimpleClientManager.register -#: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.driver.Driver.create_message -#: flwr.server.driver.driver.Driver.pull_messages -#: flwr.server.driver.driver.Driver.push_messages -#: flwr.server.driver.driver.Driver.send_and_receive -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate -#: flwr.server.strategy.strategy.Strategy.aggregate_fit -#: flwr.server.strategy.strategy.Strategy.configure_evaluate -#: flwr.server.strategy.strategy.Strategy.configure_fit -#: flwr.server.strategy.strategy.Strategy.evaluate -#: flwr.server.strategy.strategy.Strategy.initialize_parameters -#: flwr.simulation.app.start_simulation of -msgid "Return type" -msgstr "返回类型" +#: ../../source/how-to-run-simulations.rst:217 +msgid "GPU resources" +msgstr "GPU 资源" -#: flwr.client.client.Client.fit:3 of +#: ../../source/how-to-run-simulations.rst:219 +#, fuzzy msgid "" -"The training instructions containing (global) model parameters received " -"from the server and a dictionary of configuration values used to " -"customize the local training process." -msgstr "训练指令,包含从服务器接收的(全局)模型参数,以及用于定制本地训练过程的配置值字典。" +"The VCE assigns a share of GPU memory to a client that specifies the key " +"``num_gpus`` in ``client_resources``. This being said, Ray (used " +"internally by the VCE) is by default:" +msgstr "" +"VCE 会为指定 :code:`client_resources` 中 :code:`num_gpus` 关键字的客户端分配 GPU " +"内存份额。也就是说,Ray(VCE 内部使用)是默认的:" -#: flwr.client.client.Client.fit:8 of +#: ../../source/how-to-run-simulations.rst:222 +#, fuzzy msgid "" -"The training result containing updated parameters and other details such " -"as the number of local training examples used for training." -msgstr "训练结果包含更新的参数和其他详细信息,如用于训练的本地训练示例的数量。" +"not aware of the total VRAM available on the GPUs. This means that if you" +" set ``num_gpus=0.5`` and you have two GPUs in your system with different" +" (e.g. 32GB and 8GB) VRAM amounts, they both would run 2 clients " +"concurrently." +msgstr "" +"不知道 GPU 上可用的总 VRAM。这意味着,如果您设置 :code:`num_gpus=0.5`,而系统中有两个不同(如 32GB 和 " +"8GB)VRAM 的 GPU,它们都将同时运行 2 个客户端。" -#: flwr.client.client.Client.get_parameters:3 of +#: ../../source/how-to-run-simulations.rst:225 msgid "" -"The get parameters instructions received from the server containing a " -"dictionary of configuration values." -msgstr "从服务器接收的获取参数指令包含配置值字典。" +"not aware of other unrelated (i.e. not created by the VCE) workloads are " +"running on the GPU. Two takeaways from this are:" +msgstr "不知道 GPU 上正在运行其他无关(即不是由 VCE 创建)的工作负载。从中可以得到以下两点启示:" -#: flwr.client.client.Client.get_parameters:7 of -msgid "The current local model parameters." -msgstr "当前的本地模型参数。" - -#: flwr.client.client.Client.get_properties:3 of +#: ../../source/how-to-run-simulations.rst:228 msgid "" -"The get properties instructions received from the server containing a " -"dictionary of configuration values." -msgstr "从服务器接收的获取属性指令包含配置值字典。" - -#: flwr.client.client.Client.get_properties:7 of -msgid "The current client properties." -msgstr "当前客户端属性。" +"Your Flower server might need a GPU to evaluate the `global model` after " +"aggregation (by instance when making use of the `evaluate method `_)" +msgstr "" +"您的 Flower 服务器可能需要 GPU 来评估聚合后的 \"全局模型\"(例如在使用 \"评估方法\"`_时)" -#: ../../source/ref-api/flwr.client.ClientApp.rst:2 +#: ../../source/how-to-run-simulations.rst:231 #, fuzzy -msgid "ClientApp" -msgstr "客户端" +msgid "" +"If you want to run several independent Flower simulations on the same " +"machine you need to mask-out your GPUs with " +"``CUDA_VISIBLE_DEVICES=\"\"`` when launching your experiment." +msgstr "" +"如果您想在同一台机器上运行多个独立的 Flower 模拟,则需要在启动实验时使用 " +":code:`CUDA_VISIBLE_DEVICES=\"\"` 屏蔽 GPU。" -#: flwr.client.client_app.ClientApp:1 flwr.common.constant.MessageType:1 -#: flwr.common.constant.MessageTypeLegacy:1 flwr.common.context.Context:1 -#: flwr.common.message.Error:1 flwr.common.message.Message:1 -#: flwr.common.message.Metadata:1 flwr.common.record.parametersrecord.Array:1 -#: flwr.common.record.recordset.RecordSet:1 flwr.common.typing.ClientMessage:1 -#: flwr.common.typing.DisconnectRes:1 flwr.common.typing.EvaluateIns:1 -#: flwr.common.typing.EvaluateRes:1 flwr.common.typing.FitIns:1 -#: flwr.common.typing.FitRes:1 flwr.common.typing.GetParametersIns:1 -#: flwr.common.typing.GetParametersRes:1 flwr.common.typing.GetPropertiesIns:1 -#: flwr.common.typing.GetPropertiesRes:1 flwr.common.typing.Parameters:1 -#: flwr.common.typing.ReconnectIns:1 flwr.common.typing.ServerMessage:1 -#: flwr.common.typing.Status:1 flwr.server.history.History:1 -#: flwr.server.server.Server:1 flwr.server.server_app.ServerApp:1 -#: flwr.server.server_config.ServerConfig:1 -#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 -#: of +#: ../../source/how-to-run-simulations.rst:235 #, fuzzy -msgid "Bases: :py:class:`object`" -msgstr "Bases: :py:class:`object`" +msgid "" +"In addition, the GPU resource limits passed to ``client_resources`` are " +"not `enforced` (i.e. they can be exceeded) which can result in the " +"situation of client using more VRAM than the ratio specified when " +"starting the simulation." +msgstr "" +"此外,传递给 :code:`client_resources` 的 GPU 资源限制并不是 \"强制 \"的(即可以超出),这可能导致客户端使用的" +" VRAM 超过启动模拟时指定的比例。" -#: flwr.client.app.start_client:41 flwr.client.app.start_numpy_client:36 -#: flwr.client.client_app.ClientApp:4 -#: flwr.client.client_app.ClientApp.evaluate:4 -#: flwr.client.client_app.ClientApp.query:4 -#: flwr.client.client_app.ClientApp.train:4 flwr.server.app.start_server:41 -#: flwr.server.server_app.ServerApp:4 flwr.server.server_app.ServerApp.main:4 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:29 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:22 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:21 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:14 -#: of -msgid "Examples" -msgstr "实例" +#: ../../source/how-to-run-simulations.rst:240 +msgid "TensorFlow with GPUs" +msgstr "使用 GPU 的 TensorFlow" -#: flwr.client.client_app.ClientApp:5 of -#, fuzzy +#: ../../source/how-to-run-simulations.rst:242 msgid "" -"Assuming a typical `Client` implementation named `FlowerClient`, you can " -"wrap it in a `ClientApp` as follows:" -msgstr "假定有一个名为 `FlowerClient` 的典型 `Client` 实现,可以将其封装在一个 `ClientApp` 中,如下所示:" +"When `using a GPU with TensorFlow " +"`_ nearly your entire GPU memory of" +" all your GPUs visible to the process will be mapped. This is done by " +"TensorFlow for optimization purposes. However, in settings such as FL " +"simulations where we want to split the GPU into multiple `virtual` " +"clients, this is not a desirable mechanism. Luckily we can disable this " +"default behavior by `enabling memory growth " +"`_." +msgstr "" +"在 TensorFlow `_ 中使用 GPU 时,几乎所有进程可见的" +" GPU 内存都将被映射。TensorFlow 这样做是出于优化目的。然而,在 FL 模拟等设置中,我们希望将 GPU 分割成多个 \"虚拟 " +"\"客户端,这并不是一个理想的机制。幸运的是,我们可以通过 `启用内存增长 " +"`_来禁用这一默认行为。" -#: flwr.client.client_app.ClientApp:16 of +#: ../../source/how-to-run-simulations.rst:249 #, fuzzy msgid "" -"If the above code is in a Python module called `client`, it can be " -"started as follows:" -msgstr "如果上述代码位于一个名为 \"客户端 \"的 Python 模块中,则可以按如下方式启动它:" +"This would need to be done in the main process (which is where the server" +" would run) and in each Actor created by the VCE. By means of " +"``actor_kwargs`` we can pass the reserved key `\"on_actor_init_fn\"` in " +"order to specify a function to be executed upon actor initialization. In " +"this case, to enable GPU growth for TF workloads. It would look as " +"follows:" +msgstr "" +"这需要在主进程(也就是服务器运行的地方)和 VCE 创建的每个角色中完成。通过 " +":code:`actor_kwargs`,我们可以传递保留关键字`\"on_actor_init_fn\"`,以指定在角色初始化时执行的函数。在本例中,为了使" +" TF 工作负载的 GPU 增长,它看起来如下:" -#: flwr.client.client_app.ClientApp:21 of -#, fuzzy +#: ../../source/how-to-run-simulations.rst:272 msgid "" -"In this `client:app` example, `client` refers to the Python module " -"`client.py` in which the previous code lives in and `app` refers to the " -"global attribute `app` that points to an object of type `ClientApp`." +"This is precisely the mechanism used in `Tensorflow/Keras Simulation " +"`_ example." msgstr "" -"在这个 `client:app` 例子中,`client` 指的是前面代码所在的 Python 模块 `client.py`,而 `app` " -"指的是指向 `ClientApp` 类型对象的全局属性 `app` 。" - -#: flwr.client.client_app.ClientApp.evaluate:1::1 of -#, fuzzy -msgid ":py:obj:`evaluate `\\ \\(\\)" -msgstr ":py:obj:`evaluate `\\ \\(\\)" +"这正是 \"Tensorflow/Keras 模拟 " +"`_\"示例中使用的机制。" -#: flwr.client.client_app.ClientApp.evaluate:1 -#: flwr.client.client_app.ClientApp.evaluate:1::1 of -#, fuzzy -msgid "Return a decorator that registers the evaluate fn with the client app." -msgstr "返回一个装饰器,用于向客户端程序注册评估 fn。" +#: ../../source/how-to-run-simulations.rst:276 +msgid "Multi-node setups" +msgstr "多节点设置" -#: flwr.client.client_app.ClientApp.evaluate:1::1 of -#, fuzzy -msgid ":py:obj:`query `\\ \\(\\)" -msgstr ":py:obj:`query `\\ \\(\\)" +#: ../../source/how-to-run-simulations.rst:278 +msgid "" +"The VCE does not currently offer a way to control on which node a " +"particular `virtual` client is executed. In other words, if more than a " +"single node have the resources needed by a client to run, then any of " +"those nodes could get the client workload scheduled onto. Later in the FL" +" process (i.e. in a different round) the same client could be executed by" +" a different node. Depending on how your clients access their datasets, " +"this might require either having a copy of all dataset partitions on all " +"nodes or a dataset serving mechanism (e.g. using nfs, a database) to " +"circumvent data duplication." +msgstr "" +"VCE 目前不提供控制特定 \"虚拟 " +"\"客户端在哪个节点上执行的方法。换句话说,如果不止一个节点拥有客户端运行所需的资源,那么这些节点中的任何一个都可能被调度到客户端工作负载上。在 " +"FL " +"进程的稍后阶段(即在另一轮中),同一客户端可以由不同的节点执行。根据客户访问数据集的方式,这可能需要在所有节点上复制所有数据集分区,或采用数据集服务机制(如使用" +" nfs 或数据库)来避免数据重复。" -#: flwr.client.client_app.ClientApp.evaluate:1::1 -#: flwr.client.client_app.ClientApp.query:1 of -#, fuzzy -msgid "Return a decorator that registers the query fn with the client app." -msgstr "返回一个向客户端应用程序注册查询 fn 的装饰器。" +#: ../../source/how-to-run-simulations.rst:286 +msgid "" +"By definition virtual clients are `stateless` due to their ephemeral " +"nature. A client state can be implemented as part of the Flower client " +"class but users need to ensure this saved to persistent storage (e.g. a " +"database, disk) and that can be retrieve later by the same client " +"regardless on which node it is running from. This is related to the point" +" above also since, in some way, the client's dataset could be seen as a " +"type of `state`." +msgstr "" +"根据定义,虚拟客户端是 \"无状态 \"的,因为它们具有即时性。客户机状态可以作为 Flower " +"客户机类的一部分来实现,但用户需要确保将其保存到持久存储(如数据库、磁盘)中,而且无论客户机在哪个节点上运行,都能在以后检索到。这也与上述观点有关,因为在某种程度上,客户端的数据集可以被视为一种" +" \"状态\"。" -#: flwr.client.client_app.ClientApp.evaluate:1::1 of -#, fuzzy -msgid ":py:obj:`train `\\ \\(\\)" -msgstr "server.strategy.Strategy" +#: ../../source/how-to-save-and-load-model-checkpoints.rst:2 +msgid "Save and load model checkpoints" +msgstr "保存和加载模型检查点" -#: flwr.client.client_app.ClientApp.evaluate:1::1 -#: flwr.client.client_app.ClientApp.train:1 of -#, fuzzy -msgid "Return a decorator that registers the train fn with the client app." -msgstr "返回一个装饰器,用于在客户端应用程序中注册火车 fn。" +#: ../../source/how-to-save-and-load-model-checkpoints.rst:4 +msgid "" +"Flower does not automatically save model updates on the server-side. This" +" how-to guide describes the steps to save (and load) model checkpoints in" +" Flower." +msgstr "Flower 不会在服务器端自动保存模型更新。本指南将介绍在 Flower 中保存(和加载)模型检查点的步骤。" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:2 -msgid "NumPyClient" -msgstr "NumPyClient" +#: ../../source/how-to-save-and-load-model-checkpoints.rst:8 +msgid "Model checkpointing" +msgstr "模型检查点" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: ../../source/how-to-save-and-load-model-checkpoints.rst:10 #, fuzzy msgid "" -":py:obj:`evaluate `\\ \\(parameters\\, " -"config\\)" +"Model updates can be persisted on the server-side by customizing " +"``Strategy`` methods. Implementing custom strategies is always an option," +" but for many cases it may be more convenient to simply customize an " +"existing strategy. The following code example defines a new " +"``SaveModelStrategy`` which customized the existing built-in ``FedAvg`` " +"strategy. In particular, it customizes ``aggregate_fit`` by calling " +"``aggregate_fit`` in the base class (``FedAvg``). It then continues to " +"save returned (aggregated) weights before it returns those aggregated " +"weights to the caller (i.e., the server):" msgstr "" -":py:obj:`evaluate `\\ \\(parameters\\, " -"config\\)" - -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#, fuzzy -msgid ":py:obj:`fit `\\ \\(parameters\\, config\\)" -msgstr ":py:obj:`fit `\\ \\(parameters\\, config\\)" - -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.numpy_client.NumPyClient.fit:1 of -msgid "Train the provided parameters using the locally held dataset." -msgstr "使用本地数据集训练所提供的参数。" +"模型更新可通过自定义 :code:`Strategy` " +"方法在服务器端持久化。实现自定义策略始终是一种选择,但在许多情况下,简单地自定义现有策略可能更方便。下面的代码示例定义了一个新的 " +":code:`SaveModelStrategy`,它自定义了现有的内置 :code:`FedAvg` " +"策略。特别是,它通过调用基类(:code:`FedAvg`)中的 :code:`aggregate_fit` 来定制 " +":code:`aggregate_fit`。然后继续保存返回的(聚合)参数,然后再将这些聚合参数返回给调用者(即服务器):" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#, fuzzy -msgid ":py:obj:`get_context `\\ \\(\\)" -msgstr ":py:obj:`get_context `\\ \\(\\)" +#: ../../source/how-to-save-and-load-model-checkpoints.rst:53 +msgid "Save and load PyTorch checkpoints" +msgstr "保存和加载 PyTorch 检查点" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#, fuzzy +#: ../../source/how-to-save-and-load-model-checkpoints.rst:55 msgid "" -":py:obj:`get_parameters `\\ " -"\\(config\\)" +"Similar to the previous example but with a few extra steps, we'll show " +"how to store a PyTorch checkpoint we'll use the ``torch.save`` function. " +"Firstly, ``aggregate_fit`` returns a ``Parameters`` object that has to be" +" transformed into a list of NumPy ``ndarray``'s, then those are " +"transformed into the PyTorch ``state_dict`` following the ``OrderedDict``" +" class structure." msgstr "" -":py:obj:`get_parameters `\\ " -"\\(config\\)" +"与前面的例子类似,但多了几个步骤,我们将展示如何存储一个 PyTorch 检查点,我们将使用 ``torch.save`` " +"函数。首先,``aggregate_fit`` 返回一个 ``Parameters`` 对象,它必须被转换成一个 NumPy " +"``ndarray`` 的列表,然后这些对象按照 ``OrderedDict`` 类结构被转换成 PyTorch `state_dict` 对象。" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: ../../source/how-to-save-and-load-model-checkpoints.rst:98 +msgid "" +"To load your progress, you simply append the following lines to your " +"code. Note that this will iterate over all saved checkpoints and load the" +" latest one:" +msgstr "要加载进度,只需在代码中添加以下几行。请注意,这将遍历所有已保存的检查点,并加载最新的检查点:" + +#: ../../source/how-to-save-and-load-model-checkpoints.rst:111 #, fuzzy msgid "" -":py:obj:`get_properties `\\ " -"\\(config\\)" +"Return/use this object of type ``Parameters`` wherever necessary, such as" +" in the ``initial_parameters`` when defining a ``Strategy``." msgstr "" -":py:obj:`get_properties `\\ " -"\\(config\\)" +"在必要时返回/使用此 ``Parameters`` 类型的对象,例如在定义 ``Strategy` 时的 " +"``initial_parameters` 中。" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.numpy_client.NumPyClient.get_properties:1 of -msgid "Return a client's set of properties." -msgstr "返回客户端的属性集。" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:2 +msgid "Upgrade to Flower 1.0" +msgstr "升级至 Flower 1.0" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#, fuzzy +#: ../../source/how-to-upgrade-to-flower-1.0.rst:4 msgid "" -":py:obj:`set_context `\\ " -"\\(context\\)" +"Flower 1.0 is here. Along with new features, Flower 1.0 provides a stable" +" foundation for future growth. Compared to Flower 0.19 (and other 0.x " +"series releases), there are a few breaking changes that make it necessary" +" to change the code of existing 0.x-series projects." msgstr "" -":py:obj:`set_context `\\ " -"\\(context\\)" +"Flower 1.0 正式发布。除了新功能,Flower 1.0 还为未来的发展奠定了稳定的基础。与 Flower 0.19(以及其他 0.x " +"系列版本)相比,有一些破坏性改动需要修改现有 0.x 系列项目的代码。" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#, fuzzy -msgid ":py:obj:`to_client `\\ \\(\\)" -msgstr ":py:obj:`to_client `\\ \\(\\)" - -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.numpy_client.NumPyClient.to_client:1 of -msgid "Convert to object to Client type and return it." -msgstr "将对象转换为客户类型并返回。" - -#: flwr.client.numpy_client.NumPyClient.evaluate:1::1 of -#, fuzzy -msgid ":py:obj:`context `\\" -msgstr ":py:obj:`context `\\" - -#: flwr.client.numpy_client.NumPyClient.evaluate:3 -#: flwr.client.numpy_client.NumPyClient.fit:3 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:5 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:8 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:5 -#: flwr.server.strategy.strategy.Strategy.configure_fit:5 -#: flwr.server.strategy.strategy.Strategy.evaluate:8 of -msgid "The current (global) model parameters." -msgstr "当前(全局)模型参数。" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:10 +#: ../../source/how-to-upgrade-to-flower-next.rst:63 +msgid "Install update" +msgstr "安装更新" -#: flwr.client.numpy_client.NumPyClient.evaluate:5 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:12 msgid "" -"Configuration parameters which allow the server to influence evaluation " -"on the client. It can be used to communicate arbitrary values from the " -"server to the client, for example, to influence the number of examples " -"used for evaluation." -msgstr "允许服务器影响客户端评估的配置参数。它可用于将任意值从服务器传送到客户端,例如,影响用于评估的示例数量。" +"Here's how to update an existing installation to Flower 1.0 using either " +"pip or Poetry:" +msgstr "下面介绍如何使用 pip 或 Poetry 将现有安装更新到 Flower 1.0:" -#: flwr.client.numpy_client.NumPyClient.evaluate:11 of -msgid "" -"* **loss** (*float*) -- The evaluation loss of the model on the local " -"dataset. * **num_examples** (*int*) -- The number of examples used for " -"evaluation. * **metrics** (*Dict[str, Scalar]*) -- A dictionary mapping " -"arbitrary string keys to values of type bool, bytes, float, int, or " -"str. It can be used to communicate arbitrary values back to the server." -msgstr "" -"**loss** (*float*) -- 模型在本地数据集上的评估损失值。**num_examples** (*int*) -- " -"用于评估的示例数量。**metrics** (*Dict[str, Scalar]*) -- 将任意字符串键映射到 " -"bool、bytes、float、int 或 str 类型值的字典。它可用于将任意值传回服务器。" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:14 +msgid "pip: add ``-U`` when installing." +msgstr "pip: 安装时添加 ``-U``." -#: flwr.client.numpy_client.NumPyClient.evaluate:11 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:16 msgid "" -"**loss** (*float*) -- The evaluation loss of the model on the local " -"dataset." -msgstr "**loss** (*float*) -- 模型在本地数据集上的评估损失值。" - -#: flwr.client.numpy_client.NumPyClient.evaluate:12 of -msgid "**num_examples** (*int*) -- The number of examples used for evaluation." -msgstr "**num_examples** (*int*) -- 用于评估的示例数量。" +"``python -m pip install -U flwr`` (when using ``start_server`` and " +"``start_client``)" +msgstr "`python -m pip install -U flwr``(当使用`start_server`和`start_client`时)" -#: flwr.client.numpy_client.NumPyClient.evaluate:13 -#: flwr.client.numpy_client.NumPyClient.fit:13 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:17 msgid "" -"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " -"string keys to values of type bool, bytes, float, int, or str. It can be " -"used to communicate arbitrary values back to the server." -msgstr "" -"**metrics** (*Dict[str, Scalar]*) -- 将任意字符串键映射到 bool、bytes、float、int 或 " -"str 类型值的字典。它可用于将任意值传回服务器。" +"``python -m pip install -U 'flwr[simulation]'`` (when using " +"``start_simulation``)" +msgstr "``python -m pip install -U 'flwr[simulation]'``(当使用`start_simulation``时)" -#: flwr.client.numpy_client.NumPyClient.evaluate:19 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:19 msgid "" -"The previous return type format (int, float, float) and the extended " -"format (int, float, float, Dict[str, Scalar]) have been deprecated and " -"removed since Flower 0.19." +"Poetry: update the ``flwr`` dependency in ``pyproject.toml`` and then " +"reinstall (don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` " +"before running ``poetry install``)." msgstr "" -"自 Flower 0.19 起,之前的返回类型格式(int、float、float)和扩展格式(int、float、float、Dict[str," -" Scalar])已被弃用和移除。" +"Poetry:更新 ``pyproject.toml`` 中的 ``flwr`` 依赖包,然后重新安装(运行 ``poetry install``" +" 前,别忘了通过 ``rm poetry.lock` 删除 ``poetry.lock`)。" -#: flwr.client.numpy_client.NumPyClient.fit:5 of -msgid "" -"Configuration parameters which allow the server to influence training on " -"the client. It can be used to communicate arbitrary values from the " -"server to the client, for example, to set the number of (local) training " -"epochs." -msgstr "允许服务器影响客户端训练的配置参数。它可用于将任意值从服务器传送到客户端,例如设置(本地)训练遍历数。" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:23 +msgid "``flwr = \"^1.0.0\"`` (when using ``start_server`` and ``start_client``)" +msgstr "``flwr = \"^1.0.0\"`` (当使用 ``start_server` 和 ``start_client` 时)" -#: flwr.client.numpy_client.NumPyClient.fit:11 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:24 msgid "" -"* **parameters** (*NDArrays*) -- The locally updated model parameters. * " -"**num_examples** (*int*) -- The number of examples used for training. * " -"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " -"string keys to values of type bool, bytes, float, int, or str. It can " -"be used to communicate arbitrary values back to the server." +"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` (when " +"using ``start_simulation``)" msgstr "" -"**parameters** (*NDArrays*) -- 本地更新的模型参数。**num_examples** (*int*) -- " -"用于训练的示例数量。**metrics** (*Dict[str, Scalar]*) -- 将任意字符串键映射到 " -"bool、bytes、float、int 或 str 类型值的字典。它可用于将任意值传回服务器。" +"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] " +"}``(当使用``start_simulation``时)" -#: flwr.client.numpy_client.NumPyClient.fit:11 of -msgid "**parameters** (*NDArrays*) -- The locally updated model parameters." -msgstr "**parameters** (*NDArrays*) -- 本地更新的模型参数。" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:28 +#: ../../source/how-to-upgrade-to-flower-next.rst:121 +msgid "Required changes" +msgstr "所需变更" -#: flwr.client.numpy_client.NumPyClient.fit:12 of -msgid "**num_examples** (*int*) -- The number of examples used for training." -msgstr "**num_examples** (*int*) -- 用于训练的数据数量。" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:30 +msgid "The following breaking changes require manual updates." +msgstr "以下更改需要手动更新。" -#: flwr.client.numpy_client.NumPyClient.get_parameters:3 of -msgid "" -"Configuration parameters requested by the server. This can be used to " -"tell the client which parameters are needed along with some Scalar " -"attributes." -msgstr "服务器请求的配置参数。这可以用来告诉客户端需要哪些参数以及一些标量属性。" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:33 +msgid "General" +msgstr "一般情况" -#: flwr.client.numpy_client.NumPyClient.get_parameters:8 of -msgid "**parameters** -- The local model parameters as a list of NumPy ndarrays." -msgstr "**parameters** -- NumPy ndarrays 的本地模型参数列表。" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:35 +msgid "" +"Pass all arguments as keyword arguments (not as positional arguments). " +"Here's an example:" +msgstr "将所有参数作为关键字参数传递(而不是位置参数)。下面是一个例子:" -#: flwr.client.numpy_client.NumPyClient.get_properties:3 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:38 msgid "" -"Configuration parameters requested by the server. This can be used to " -"tell the client which properties are needed along with some Scalar " -"attributes." -msgstr "服务器请求的配置参数。这可以用来告诉客户端需要哪些属性以及一些标量属性。" +"Flower 0.19 (positional arguments): ``start_client(\"127.0.0.1:8080\", " +"FlowerClient())``" +msgstr "Flower 0.19 (位置参数): ``start_client(\"127.0.0.1:8080\", FlowerClient())``" -#: flwr.client.numpy_client.NumPyClient.get_properties:8 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:39 msgid "" -"**properties** -- A dictionary mapping arbitrary string keys to values of" -" type bool, bytes, float, int, or str. It can be used to communicate " -"arbitrary property values back to the server." +"Flower 1.0 (keyword arguments): " +"``start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())``" msgstr "" -"**properties** -- 将任意字符串键映射到 bool、bytes、float、int 或 str " -"类型值的字典。它可用于将任意属性值传回服务器。" - -#: ../../source/ref-api/flwr.client.run_client_app.rst:2 -#, fuzzy -msgid "run\\_client\\_app" -msgstr "run\\_client\\_app" +"Flower 1.0(关键字参数): ``start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())``" -#: ../../source/ref-api/flwr.client.run_supernode.rst:2 -#, fuzzy -msgid "run\\_supernode" -msgstr "flower-superlink" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:43 +#: ../../source/ref-api/flwr.client.Client.rst:2 +msgid "Client" +msgstr "客户端" -#: ../../source/ref-api/flwr.client.start_client.rst:2 -#, fuzzy -msgid "start\\_client" -msgstr "启动客户端" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:45 +msgid "" +"Subclasses of ``NumPyClient``: change ``def get_parameters(self):``` to " +"``def get_parameters(self, config):``" +msgstr "" +"NumPyClient的子类:将``def get_parameters(self):```改为``def " +"get_parameters(self,config):``" -#: flwr.client.app.start_client:3 flwr.client.app.start_numpy_client:9 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:47 msgid "" -"The IPv4 or IPv6 address of the server. If the Flower server runs on the " -"same machine on port 8080, then `server_address` would be " -"`\"[::]:8080\"`." +"Subclasses of ``Client``: change ``def get_parameters(self):``` to ``def " +"get_parameters(self, ins: GetParametersIns):``" msgstr "" -"服务器的 IPv4 或 IPv6 地址:如果 Flower 服务器在同一台机器上运行,端口为 " -"8080,则`server_address`应为`\"[::]:8080\"`。" +"客户端 \"的子类:将 \"get_parameters(self): \"改为 \"get_parameters(self, ins: " +"GetParametersIns):\"" -#: flwr.client.app.start_client:7 of -msgid "A callable that instantiates a Client. (default: None)" -msgstr "用于实例化客户端的可调用程序。(默认值:无)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:51 +msgid "Strategies / ``start_server`` / ``start_simulation``" +msgstr "策略 / ``start_server`` / ``start_simulation``" -#: flwr.client.app.start_client:9 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:53 msgid "" -"An implementation of the abstract base class `flwr.client.Client` " -"(default: None)" -msgstr "抽象基类 `flwr.client.Client` 的实现(默认值:无)" +"Pass ``ServerConfig`` (instead of a dictionary) to ``start_server`` and " +"``start_simulation``. Here's an example:" +msgstr "" +"向 ``start_server`` 和 ``start_simulation` 传递 ``ServerConfig``(而不是 " +"dictionary)。下面是一个例子:" -#: flwr.client.app.start_client:12 flwr.client.app.start_numpy_client:15 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:56 msgid "" -"The maximum length of gRPC messages that can be exchanged with the Flower" -" server. The default should be sufficient for most models. Users who " -"train very large models might need to increase this value. Note that the " -"Flower server needs to be started with the same value (see " -"`flwr.server.start_server`), otherwise it will not know about the " -"increased limit and block larger messages." +"Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " +"\"round_timeout\": 600.0}, ...)``" msgstr "" -"可与 Flower 服务器交换的 gRPC 信息的最大长度:默认值对大多数模型都足够了。训练超大模型的用户可能需要增加该值。请注意,Flower " -"服务器需要以相同的值启动(请参阅 `flwr.server.start_server`),否则它将不知道增加的限制并阻止更大的消息。" +"Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " +"\"round_timeout\": 600.0}, ...)``" -#: flwr.client.app.start_client:19 flwr.client.app.start_numpy_client:22 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:58 msgid "" -"The PEM-encoded root certificates as a byte string or a path string. If " -"provided, a secure connection using the certificates will be established " -"to an SSL-enabled Flower server." -msgstr "字节字符串或路径字符串形式的 PEM 编码根证书。如果提供,将使用这些证书与启用 SSL 的 Flower 服务器建立安全连接。" +"Flower 1.0: ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" +msgstr "" +"Flower 1.0: ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" -#: flwr.client.app.start_client:23 flwr.client.app.start_numpy_client:26 of -#, fuzzy +#: ../../source/how-to-upgrade-to-flower-1.0.rst:61 msgid "" -"Starts an insecure gRPC connection when True. Enables HTTPS connection " -"when False, using system certificates if `root_certificates` is None." -msgstr "" -"为 True 时启动不安全的 gRPC 连接。False 时启用 HTTPS 连接,如果 `root_certificates` 为 " -"None,则使用系统证书。" +"Replace ``num_rounds=1`` in ``start_simulation`` with the new " +"``config=ServerConfig(...)`` (see previous item)" +msgstr "将`start_simulation``中的`num_rounds=1``替换为新的`config=ServerConfig(...)`(参见前一项)" -#: flwr.client.app.start_client:26 flwr.client.app.start_numpy_client:29 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:63 msgid "" -"Configure the transport layer. Allowed values: - 'grpc-bidi': gRPC, " -"bidirectional streaming - 'grpc-rere': gRPC, request-response " -"(experimental) - 'rest': HTTP (experimental)" +"Remove ``force_final_distributed_eval`` parameter from calls to " +"``start_server``. Distributed evaluation on all clients can be enabled by" +" configuring the strategy to sample all clients for evaluation after the " +"last round of training." msgstr "" -"配置传输层:允许的值包括 - 'grpc-bidi': gRPC,双向流 - 'grpc-rere': gRPC,请求-响应(实验性) - " -"'rest': HTTP(实验性)" +"删除调用 ``start_server`` 时的 ``force_final_distributed_eval` " +"参数。可以通过配置策略,在最后一轮训练后对所有客户端进行抽样评估,从而启用对所有客户端的分布式评估。" -#: flwr.client.app.start_client:31 of -#, fuzzy -msgid "" -"The maximum number of times the client will try to connect to the server " -"before giving up in case of a connection error. If set to None, there is " -"no limit to the number of tries." -msgstr "客户端在出现连接错误时放弃连接服务器的最大尝试次数。如果设置为 \"无\",则不限制尝试次数。" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:66 +msgid "Rename parameter/ndarray conversion functions:" +msgstr "重命名参数/数组转换函数:" -#: flwr.client.app.start_client:35 of -#, fuzzy -msgid "" -"The maximum duration before the client stops trying to connect to the " -"server in case of connection error. If set to None, there is no limit to " -"the total time." -msgstr "在出现连接错误时,客户端停止尝试连接服务器之前的最长持续时间。如果设置为 \"无\",则总时间没有限制。" - -#: flwr.client.app.start_client:42 flwr.client.app.start_numpy_client:37 of -msgid "Starting a gRPC client with an insecure server connection:" -msgstr "使用不安全的服务器连接启动 gRPC 客户端:" - -#: flwr.client.app.start_client:49 flwr.client.app.start_numpy_client:44 of -#, fuzzy -msgid "Starting an SSL-enabled gRPC client using system certificates:" -msgstr "启动支持 SSL 的 gRPC 客户端:" - -#: flwr.client.app.start_client:60 flwr.client.app.start_numpy_client:52 of -#, fuzzy -msgid "Starting an SSL-enabled gRPC client using provided certificates:" -msgstr "启动支持 SSL 的 gRPC 客户端:" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:68 +msgid "``parameters_to_weights`` --> ``parameters_to_ndarrays``" +msgstr "``parameters_to_weights`` --> ``parameters_to_ndarrays``" -#: ../../source/ref-api/flwr.client.start_numpy_client.rst:2 -#, fuzzy -msgid "start\\_numpy\\_client" -msgstr "start_numpy_client" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:69 +msgid "``weights_to_parameters`` --> ``ndarrays_to_parameters``" +msgstr "``weights_to_parameters`` --> ``ndarrays_to_parameters``" -#: flwr.client.app.start_numpy_client:5 of -#, fuzzy +#: ../../source/how-to-upgrade-to-flower-1.0.rst:71 msgid "" -"This function is deprecated since 1.7.0. Use " -":code:`flwr.client.start_client` instead and first convert your " -":code:`NumPyClient` to type :code:`flwr.client.Client` by executing its " -":code:`to_client()` method." +"Strategy initialization: if the strategy relies on the default values for" +" ``fraction_fit`` and ``fraction_evaluate``, set ``fraction_fit`` and " +"``fraction_evaluate`` manually to ``0.1``. Projects that do not manually " +"create a strategy (by calling ``start_server`` or ``start_simulation`` " +"without passing a strategy instance) should now manually initialize " +"FedAvg with ``fraction_fit`` and ``fraction_evaluate`` set to ``0.1``." msgstr "" -"自 1.7.0 起该函数已被弃用。请使用 :code:`flwr.client.start_client`,并首先通过执行 " -":code:`to_client()`方法将 :code:`NumPyClient`转换为 :code:`flwr.client.Client`。" +"策略初始化:如果策略依赖于 ``fraction_fit`` 和 ``fraction_evaluate`` 的默认值,请手动将 " +"``fraction_fit`` 和 ``fraction_evaluate`` 设置为 ``0.1``。未手动创建策略的项目(调用 " +"``start_server` 或 ``start_simulation` 时未传递策略实例)现在应手动初始化 FedAvg,并将 " +"`fraction_fit` 和 `fraction_evaluate` 设为 `0.1``。" -#: flwr.client.app.start_numpy_client:13 of -msgid "An implementation of the abstract base class `flwr.client.NumPyClient`." -msgstr "抽象基类 `flwr.client.NumPyClient` 的实现。" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:77 +msgid "Rename built-in strategy parameters (e.g., ``FedAvg``):" +msgstr "重命名内置策略参数(例如,`FedAvg``):" -#: ../../source/ref-api/flwr.common.rst:2 -msgid "common" -msgstr "常见" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:79 +msgid "``fraction_eval`` --> ``fraction_evaluate``" +msgstr "``fraction_eval`` --> ``fraction_evaluate``" -#: ../../source/ref-api/flwr.common.rst:30::1 -#, fuzzy -msgid ":py:obj:`array_from_numpy `\\ \\(ndarray\\)" -msgstr ":py:obj:`array_from_numpy `\\ \\(ndarray\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:80 +msgid "``min_eval_clients`` --> ``min_evaluate_clients``" +msgstr "``min_eval_clients`` --> ``min_evaluate_clients``" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.record.conversion_utils.array_from_numpy:1 of -#, fuzzy -msgid "Create Array from NumPy ndarray." -msgstr "将参数对象转换为 NumPy ndarrays。" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:81 +msgid "``eval_fn`` --> ``evaluate_fn``" +msgstr "``eval_fn`` --> ``evaluate_fn``" -#: ../../source/ref-api/flwr.common.rst:30::1 -#, fuzzy -msgid ":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" -msgstr ":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:83 +msgid "" +"Rename ``rnd`` to ``server_round``. This impacts multiple methods and " +"functions, for example, ``configure_fit``, ``aggregate_fit``, " +"``configure_evaluate``, ``aggregate_evaluate``, and ``evaluate_fn``." +msgstr "" +"将 `rnd` 更名为 `server_round`。这会影响多个方法和函数,例如 " +"``configure_fit``、``aggregate_fit``、``configure_evaluate``、`aggregate_evaluate``" +" 和 ``evaluate_fn``。" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.bytes_to_ndarray:1 of -msgid "Deserialize NumPy ndarray from bytes." -msgstr "从字节反序列化 NumPy ndarray。" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:86 +msgid "Add ``server_round`` and ``config`` to ``evaluate_fn``:" +msgstr "在 ``evaluate_fn` 中添加 ``server_round` 和 ``config`:" -#: ../../source/ref-api/flwr.common.rst:30::1 -#, fuzzy +#: ../../source/how-to-upgrade-to-flower-1.0.rst:88 msgid "" -":py:obj:`configure `\\ \\(identifier\\[\\, " -"filename\\, host\\]\\)" +"Flower 0.19: ``def evaluate(parameters: NDArrays) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]:``" msgstr "" -":py:obj:`configure `\\ \\(identifier\\[\\, " -"filename\\, host\\]\\)" - -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.logger.configure:1 of -msgid "Configure logging to file and/or remote log server." -msgstr "配置将日志记录到文件和/或远程日志服务器。" +"Flower 0.19: ``def evaluate(parameters: NDArrays) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]:``" -#: ../../source/ref-api/flwr.common.rst:30::1 -#, fuzzy +#: ../../source/how-to-upgrade-to-flower-1.0.rst:90 msgid "" -":py:obj:`event `\\ \\(event\\_type\\[\\, " -"event\\_details\\]\\)" +"Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, " +"config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " +"Scalar]]]:``" msgstr "" -":py:obj:`event `\\ \\(event\\_type\\[\\, " -"event\\_details\\]\\)" +"Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, " +"config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " +"Scalar]]]:``" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.telemetry.event:1 of -#, fuzzy -msgid "Submit create_event to ThreadPoolExecutor to avoid blocking." -msgstr "将 create_event 提交给 ThreadPoolExecutor 以避免阻塞。" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:94 +msgid "Custom strategies" +msgstr "定制策略" -#: ../../source/ref-api/flwr.common.rst:30::1 -#, fuzzy +#: ../../source/how-to-upgrade-to-flower-1.0.rst:96 msgid "" -":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " -"\\*\\*kwargs\\)" +"The type of parameter ``failures`` has changed from " +"``List[BaseException]`` to ``List[Union[Tuple[ClientProxy, FitRes], " +"BaseException]]`` (in ``aggregate_fit``) and " +"``List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]]`` (in " +"``aggregate_evaluate``)" msgstr "" -":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " -"\\*\\*kwargs\\)" +"参数``failures``的类型已从``List[BaseException]``变为``List[Union[Tuple[ClientProxy," +" FitRes], " +"BaseException]]``(在``agregate_fit``中)和``List[Union[Tuple[ClientProxy, " +"EvaluateRes], BaseException]]``(在``agregate_evaluate``中)" -#: ../../source/ref-api/flwr.common.rst:30::1 logging.Logger.log:1 -#: of -msgid "Log 'msg % args' with the integer severity 'level'." -msgstr "以整数严重性 \"级别 \"记录 \"msg % args\"。" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:100 +msgid "" +"The ``Strategy`` method ``evaluate`` now receives the current round of " +"federated learning/evaluation as the first parameter:" +msgstr "``Strategy``方法 的``evaluate``现在会接收当前一轮联邦学习/评估作为第一个参数:" -#: ../../source/ref-api/flwr.common.rst:30::1 -#, fuzzy -msgid ":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" -msgstr ":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:103 +msgid "" +"Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]:``" +msgstr "" +"Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]:```" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.ndarray_to_bytes:1 of -msgid "Serialize NumPy ndarray to bytes." -msgstr "将 NumPy ndarray 序列化为字节。" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:105 +msgid "" +"Flower 1.0: ``def evaluate(self, server_round: int, parameters: " +"Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" +msgstr "" +"Flower 1.0: ``def evaluate(self, server_round: int, parameters: " +"Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" -#: ../../source/ref-api/flwr.common.rst:30::1 -#, fuzzy -msgid ":py:obj:`now `\\ \\(\\)" -msgstr ":py:obj:`now `\\ \\(\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:109 +msgid "Optional improvements" +msgstr "可选的改进措施" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.date.now:1 of -msgid "Construct a datetime from time.time() with time zone set to UTC." -msgstr "从 time.time() 生成日期时间,时区设置为 UTC。" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:111 +msgid "" +"Along with the necessary changes above, there are a number of potential " +"improvements that just became possible:" +msgstr "除了上述必要的改动之外,还有一些潜在的改进措施:" -#: ../../source/ref-api/flwr.common.rst:30::1 -#, fuzzy +#: ../../source/how-to-upgrade-to-flower-1.0.rst:114 msgid "" -":py:obj:`ndarrays_to_parameters `\\ " -"\\(ndarrays\\)" +"Remove \"placeholder\" methods from subclasses of ``Client`` or " +"``NumPyClient``. If you, for example, use server-side evaluation, then " +"empty placeholder implementations of ``evaluate`` are no longer " +"necessary." msgstr "" -":py:obj:`ndarrays_to_parameters `\\ " -"\\(ndarrays\\)" - -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.ndarrays_to_parameters:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarrays_to_parameters:1 -#: of -msgid "Convert NumPy ndarrays to parameters object." -msgstr "将 NumPy ndarrays 转换为参数对象。" +"删除 ``Client`` 或 ``NumPyClient`` 子类中的 \"占位符 " +"\"方法。例如,如果你使用服务器端评估,那么就不再需要``evaluate``的 \"空占位符 \"实现。" -#: ../../source/ref-api/flwr.common.rst:30::1 -#, fuzzy +#: ../../source/how-to-upgrade-to-flower-1.0.rst:117 msgid "" -":py:obj:`parameters_to_ndarrays `\\ " -"\\(parameters\\)" +"Configure the round timeout via ``start_simulation``: " +"``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, " +"round_timeout=600.0), ...)``" msgstr "" -":py:obj:`parameters_to_ndarrays `\\ " -"\\(parameters\\)" +"通过 ``start_simulation`` 配置循环超时: ``start_simulation(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.parameters_to_ndarrays:1 of -msgid "Convert parameters object to NumPy ndarrays." -msgstr "将参数对象转换为 NumPy ndarrays。" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:121 +#: ../../source/how-to-upgrade-to-flower-next.rst:349 +msgid "Further help" +msgstr "更多帮助" -#: ../../source/ref-api/flwr.common.rst:64::1 -#, fuzzy +#: ../../source/how-to-upgrade-to-flower-1.0.rst:123 msgid "" -":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, " -"data\\)" +"Most official `Flower code examples " +"`_ are already updated" +" to Flower 1.0, they can serve as a reference for using the Flower 1.0 " +"API. If there are further questions, `join the Flower Slack " +"`_ and use the channel ``#questions``." msgstr "" -":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, " -"data\\)" +"大多数官方的 `Flower 代码示例 `_" +" 已经更新到 Flower 1.0,它们可以作为使用 Flower 1.0 API 的参考。如果还有其他问题,请加入 Flower Slack " +"`_ 并使用 \"#questions``\"。" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.parametersrecord.Array:1 of +#: ../../source/how-to-upgrade-to-flower-next.rst:2 #, fuzzy -msgid "Array type." -msgstr "返回类型" +msgid "Upgrade to Flower Next" +msgstr "升级至 Flower 1.0" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:4 #, fuzzy msgid "" -":py:obj:`ClientMessage `\\ " -"\\(\\[get\\_properties\\_res\\, ...\\]\\)" +"Welcome to the migration guide for updating Flower to Flower Next! " +"Whether you're a seasoned user or just getting started, this guide will " +"help you smoothly transition your existing setup to take advantage of the" +" latest features and improvements in Flower Next, starting from version " +"1.8." msgstr "" -":py:obj:`ClientMessage `\\ " -"\\(\\[get\\_properties\\_res\\, ...\\]\\)" - -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.ClientMessage:1 of -msgid "ClientMessage is a container used to hold one result message." -msgstr "ClientMessage 是用于容纳一条结果信息的容器。" +"欢迎阅读从 Flower 升级到 Flower Next 的迁移指南!无论您是经验丰富的用户还是刚刚开始使用 " +"Flower,本指南都将帮助您顺利过渡现有设置,以利用 Flower Next 从 1.8 版开始的最新功能和改进。" -#: ../../source/ref-api/flwr.common.rst:64::1 -#, fuzzy -msgid ":py:obj:`Code `\\ \\(value\\)" -msgstr ":py:obj:`Code `\\ \\(value\\)" - -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.Code:1 of -msgid "Client status codes." -msgstr "客户端状态代码。" - -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:11 #, fuzzy msgid "" -":py:obj:`ConfigsRecord `\\ " -"\\(\\[configs\\_dict\\, keep\\_input\\]\\)" +"This guide shows how to reuse pre-``1.8`` Flower code with minimum code " +"changes by using the *compatibility layer* in Flower Next. In another " +"guide, we will show how to run Flower Next end-to-end with pure Flower " +"Next APIs." msgstr "" -"Flower 1.0: ``start_server(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" +"本指南展示了如何通过使用 Flower Next 中的*可兼容层*,以最小的代码改动重用```1.8```前的 Flower " +"代码。在另一个指南中,我们将介绍如何使用纯 Flower Next API 端到端运行 Flower Next。" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.configsrecord.ConfigsRecord:1 of +#: ../../source/how-to-upgrade-to-flower-next.rst:15 #, fuzzy -msgid "Configs record." -msgstr "配置日志记录" +msgid "Let's dive in!" +msgstr "让我们深入了解一下!" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:68 #, fuzzy -msgid ":py:obj:`Context `\\ \\(state\\)" -msgstr ":py:obj:`Context `\\ \\(state\\)" +msgid "" +"Here's how to update an existing installation of Flower to Flower Next " +"with ``pip``:" +msgstr "下面介绍如何使用 pip 或 Poetry 将现有安装更新到 Flower 1.0:" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.context.Context:1 of +#: ../../source/how-to-upgrade-to-flower-next.rst:74 #, fuzzy -msgid "State of your run." -msgstr "您的运行状态。" +msgid "or if you need Flower Next with simulation:" +msgstr "启动 Flower 模拟" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:80 #, fuzzy -msgid ":py:obj:`DisconnectRes `\\ \\(reason\\)" -msgstr ":py:obj:`DisconnectRes `\\ \\(reason\\)" +msgid "" +"Ensure you set the following version constraint in your " +"``requirements.txt``" +msgstr "确保在 ``requirements.txt`` 中设置了以下版本限制" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.DisconnectRes:1 of -msgid "DisconnectRes message from client to server." -msgstr "客户端向服务器发送 DisconnectRes 信息。" +#: ../../source/how-to-upgrade-to-flower-next.rst:90 +#, fuzzy +msgid "or ``pyproject.toml``:" +msgstr "或 ``pyproject.toml```:" + +#: ../../source/how-to-upgrade-to-flower-next.rst:101 +#, fuzzy +msgid "Using Poetry" +msgstr "使用 pip" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:103 #, fuzzy msgid "" -":py:obj:`EvaluateIns `\\ \\(parameters\\, " -"config\\)" +"Update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall " +"(don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` before " +"running ``poetry install``)." msgstr "" -":py:obj:`EvaluateIns `\\ \\(parameters\\, " -"config\\)" +"Poetry:更新 ``pyproject.toml`` 中的 ``flwr`` 依赖包,然后重新安装(运行 ``poetry install``" +" 前,别忘了通过 ``rm poetry.lock` 删除 ``poetry.lock`)。" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.EvaluateIns:1 of -msgid "Evaluate instructions for a client." -msgstr "评估客户端的指示。" +#: ../../source/how-to-upgrade-to-flower-next.rst:106 +#, fuzzy +msgid "" +"Ensure you set the following version constraint in your " +"``pyproject.toml``:" +msgstr "将 ``pyproject.toml`` 中的次要版本增加一个。" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:123 #, fuzzy msgid "" -":py:obj:`EvaluateRes `\\ \\(status\\, loss\\, " -"num\\_examples\\, metrics\\)" +"In Flower Next, the *infrastructure* and *application layers* have been " +"decoupled. Instead of starting a client in code via ``start_client()``, " +"you create a |clientapp_link|_ and start it via the command line. Instead" +" of starting a server in code via ``start_server()``, you create a " +"|serverapp_link|_ and start it via the command line. The long-running " +"components of server and client are called SuperLink and SuperNode. The " +"following non-breaking changes that require manual updates and allow you " +"to run your project both in the traditional way and in the Flower Next " +"way:" msgstr "" -":py:obj:`EvaluateRes `\\ \\(status\\, loss\\, " -"num\\_examples\\, metrics\\)" +"在 Flower Next " +"中,*基础架构层*和*应用层*已经解耦。你不再需要在代码中通过``start_client()``启动客户端,而是创建一个|clientapp_link|_,然后通过命令行启动它。无需通过``start_server()``在代码中启动服务器,而是创建一个" +" |serverapp_link|_ " +"并通过命令行启动它。服务器和客户端的长期运行组件被称为超级链接(SuperLink)和超级节点(SuperNode)。以下是无需手动更新的非破坏性更改,可让您以传统方式和" +" Flower Next 方式运行项目:" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.EvaluateRes:1 of -msgid "Evaluate response from a client." -msgstr "评估客户端的反应。" +#: ../../source/how-to-upgrade-to-flower-next.rst:132 +#, fuzzy +msgid "|clientapp_link|_" +msgstr "客户端" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:134 #, fuzzy -msgid ":py:obj:`EventType `\\ \\(value\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +msgid "" +"Wrap your existing client with |clientapp_link|_ instead of launching it " +"via |startclient_link|_. Here's an example:" +msgstr "用 |clientapp_link|_ 封装现有客户端,而不是通过 |startclient_link|_ 启动。下面是一个例子:" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.telemetry.EventType:1 of -msgid "Types of telemetry events." -msgstr "遥测事件类型。" +#: ../../source/how-to-upgrade-to-flower-next.rst:157 +#, fuzzy +msgid "|serverapp_link|_" +msgstr "服务器" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:159 #, fuzzy -msgid ":py:obj:`FitIns `\\ \\(parameters\\, config\\)" -msgstr ":py:obj:`FitIns `\\ \\(parameters\\, config\\)" +msgid "" +"Wrap your existing strategy with |serverapp_link|_ instead of starting " +"the server via |startserver_link|_. Here's an example:" +msgstr "用 |serverapp_link|_ 包住现有策略,而不是通过 |startserver_link|_ 启动服务器。下面是一个例子:" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.FitIns:1 of -msgid "Fit instructions for a client." -msgstr "为客户提供安装说明。" +#: ../../source/how-to-upgrade-to-flower-next.rst:180 +#, fuzzy +msgid "Deployment" +msgstr "调配" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:182 #, fuzzy msgid "" -":py:obj:`FitRes `\\ \\(status\\, parameters\\, " -"num\\_examples\\, metrics\\)" +"Run the ``SuperLink`` using |flowernext_superlink_link|_ before running, " +"in sequence, |flowernext_clientapp_link|_ (2x) and " +"|flowernext_serverapp_link|_. There is no need to execute `client.py` and" +" `server.py` as Python scripts." msgstr "" -":py:obj:`FitRes `\\ \\(status\\, parameters\\, " -"num\\_examples\\, metrics\\)" - -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.FitRes:1 of -msgid "Fit response from a client." -msgstr "来自客户端的合适回复。" +"在依次运行 |flowernext_clientapp_link|_ (2x) 和 |flowernext_serverapp_link|_ " +"之前,使用 |flowernext_superlink_link|_ 运行 ``SuperLink`` 。无需将 |client.py` 和 " +"`server.py` 作为 Python 脚本执行。" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:185 #, fuzzy -msgid ":py:obj:`Error `\\ \\(code\\[\\, reason\\]\\)" -msgstr ":py:obj:`Error `\\ \\(code\\[\\, reason\\]\\)" +msgid "" +"Here's an example to start the server without HTTPS (only for " +"prototyping):" +msgstr "下面是一个在不使用 HTTPS 的情况下启动服务器的示例(仅用于原型开发):" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.message.Error:1 of +#: ../../source/how-to-upgrade-to-flower-next.rst:201 #, fuzzy -msgid "A dataclass that stores information about an error that occurred." -msgstr "数据类,用于存储所发生错误的相关信息。" +msgid "" +"Here's another example to start with HTTPS. Use the ``--ssl-ca-" +"certfile``, ``--ssl-certfile``, and ``--ssl-keyfile`` command line " +"options to pass paths to (CA certificate, server certificate, and server " +"private key)." +msgstr "下面是另一个使用 HTTPS 的示例。使用 ``--certificates`` 命令行参数传递路径(CA 证书、服务器证书和服务器私钥)。" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:229 #, fuzzy -msgid ":py:obj:`GetParametersIns `\\ \\(config\\)" -msgstr ":py:obj:`GetParametersIns `\\ \\(config\\)" - -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetParametersIns:1 of -msgid "Parameters request for a client." -msgstr "客户端的参数请求。" +msgid "Simulation in CLI" +msgstr "运行模拟" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:231 #, fuzzy msgid "" -":py:obj:`GetParametersRes `\\ \\(status\\, " -"parameters\\)" +"Wrap your existing client and strategy with |clientapp_link|_ and " +"|serverapp_link|_, respectively. There is no need to use |startsim_link|_" +" anymore. Here's an example:" msgstr "" -":py:obj:`GetParametersRes `\\ \\(status\\, " -"parameters\\)" - -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetParametersRes:1 of -msgid "Response when asked to return parameters." -msgstr "要求返回参数时的响应。" +"分别用 |clientapp_link|_ 和 |serverapp_link|_ 封装现有的客户端和策略。无需再使用 " +"|startsim_link|_。下面是一个示例:" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:264 #, fuzzy -msgid ":py:obj:`GetPropertiesIns `\\ \\(config\\)" -msgstr ":py:obj:`GetPropertiesIns `\\ \\(config\\)" - -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetPropertiesIns:1 of -msgid "Properties request for a client." -msgstr "客户端的属性请求。" +msgid "" +"Run |flower_simulation_link|_ in CLI and point to the ``server_app`` / " +"``client_app`` object in the code instead of executing the Python script." +" Here's an example (assuming the ``server_app`` and ``client_app`` " +"objects are in a ``sim.py`` module):" +msgstr "" +"在 CLI 中运行 |flower_simulation_link|_ 并指向代码中的 ``server_app`` " +"/``client_app`` 对象,而不是执行 Python 脚本。下面是一个示例(假定 `server_app`` 和 " +"`client_app`` 对象位于 `sim.py`` 模块中):" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:281 #, fuzzy msgid "" -":py:obj:`GetPropertiesRes `\\ \\(status\\, " -"properties\\)" +"Set default resources for each |clientapp_link|_ using the ``--backend-" +"config`` command line argument instead of setting the " +"``client_resources`` argument in |startsim_link|_. Here's an example:" msgstr "" -":py:obj:`GetPropertiesRes `\\ \\(status\\, " -"properties\\)" +"使用 ``--backend-config`` 命令行参数为每个 |clientapp_link|_ 设置默认资源,而不是在 " +"|startsim_link|_ 中设置 ``client_resources`` 参数。下面是一个例子:" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetPropertiesRes:1 of -msgid "Properties response from a client." -msgstr "来自客户端的属性响应。" +#: ../../source/how-to-upgrade-to-flower-next.rst:305 +#, fuzzy +msgid "Simulation in a Notebook" +msgstr "笔记本中的模拟" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:307 #, fuzzy msgid "" -":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " -"error\\]\\)" +"Run |runsim_link|_ in your notebook instead of |startsim_link|_. Here's " +"an example:" +msgstr "在笔记本中运行 |runsim_link|_,而不是 |startsim_link|_。下面是一个例子:" + +#: ../../source/how-to-upgrade-to-flower-next.rst:351 +#, fuzzy +msgid "" +"Some official `Flower code examples `_ " +"are already updated to Flower Next so they can serve as a reference for " +"using the Flower Next API. If there are further questions, `join the " +"Flower Slack `_ and use the channel " +"``#questions``. You can also `participate in Flower Discuss " +"`_ where you can find us answering questions," +" or share and learn from others about migrating to Flower Next." msgstr "" -":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " -"error\\]\\)" +"大多数官方的 `Flower 代码示例 `_" +" 已经更新到 Flower 1.0,它们可以作为使用 Flower 1.0 API 的参考。如果还有其他问题,请加入 Flower Slack " +"`_ 并使用 \"#questions``\"。" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.message.Message:1 of +#: ../../source/how-to-upgrade-to-flower-next.rst:358 #, fuzzy -msgid "State of your application from the viewpoint of the entity using it." -msgstr "从使用实体的角度看应用程序的状态。" +msgid "Important" +msgstr "重要变更:" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:360 #, fuzzy -msgid ":py:obj:`MessageType `\\ \\(\\)" -msgstr ":py:obj:`MessageType `\\ \\(\\)" +msgid "" +"As we continuously enhance Flower Next at a rapid pace, we'll be " +"periodically updating this guide. Please feel free to share any feedback " +"with us!" +msgstr "随着 Flower Next 的不断快速改进,我们将定期更新本指南。如有任何反馈,请随时与我们分享!" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.constant.MessageType:1 of +#: ../../source/how-to-upgrade-to-flower-next.rst:366 #, fuzzy -msgid "Message type." -msgstr "信息类型。" +msgid "Happy migrating! 🚀" +msgstr "移民愉快!🚀" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-built-in-mods.rst:2 #, fuzzy -msgid ":py:obj:`MessageTypeLegacy `\\ \\(\\)" -msgstr ":py:obj:`MessageTypeLegacy `\\ \\(\\)" +msgid "Use Built-in Mods" +msgstr "使用内置调制器" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.constant.MessageTypeLegacy:1 of +#: ../../source/how-to-use-built-in-mods.rst:4 #, fuzzy -msgid "Legacy message type." -msgstr "传统信息类型。" +msgid "" +"**Note: This tutorial covers experimental features. The functionality and" +" interfaces may change in future versions.**" +msgstr "**注:本教程涵盖实验性功能。功能和界面可能会在未来版本中发生变化。" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-built-in-mods.rst:7 #, fuzzy msgid "" -":py:obj:`Metadata `\\ \\(run\\_id\\, " -"message\\_id\\, src\\_node\\_id\\, ...\\)" +"In this tutorial, we will learn how to utilize built-in mods to augment " +"the behavior of a ``ClientApp``. Mods (sometimes also called Modifiers) " +"allow us to perform operations before and after a task is processed in " +"the ``ClientApp``." msgstr "" -":py:obj:`Metadata `\\ \\(run\\_id\\, " -"message\\_id\\, src\\_node\\_id\\, ...\\)" +"在本教程中,我们将学习如何利用内置模块来增强 ``ClientApp`` 的行为。修改器(有时也称为修改器)允许我们在 ``ClientApp``" +" 处理任务之前和之后执行操作。" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.message.Metadata:1 of +#: ../../source/how-to-use-built-in-mods.rst:12 #, fuzzy -msgid "A dataclass holding metadata associated with the current message." -msgstr "数据类型,包含与当前报文相关的元数据。" +msgid "What are Mods?" +msgstr "什么是 Mods?" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-built-in-mods.rst:14 #, fuzzy msgid "" -":py:obj:`MetricsRecord `\\ " -"\\(\\[metrics\\_dict\\, keep\\_input\\]\\)" +"A Mod is a callable that wraps around a ``ClientApp``. It can manipulate " +"or inspect the incoming ``Message`` and the resulting outgoing " +"``Message``. The signature for a ``Mod`` is as follows:" msgstr "" -":py:obj:`MetricsRecord `\\ " -"\\(\\[metrics\\_dict\\, keep\\_input\\]\\)" +"Mod 是包裹在 ``ClientApp`` 周围的可调用程序。它可以操作或检查传入的 ``Message`` 和由此产生的传出的 " +"``Message`` 。一个 ``Mod`` 的签名如下:" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.metricsrecord.MetricsRecord:1 of +#: ../../source/how-to-use-built-in-mods.rst:23 #, fuzzy -msgid "Metrics record." -msgstr "指标记录。" +msgid "A typical mod function might look something like this:" +msgstr "一个典型的修改函数可能是这样的:" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-built-in-mods.rst:36 #, fuzzy -msgid ":py:obj:`NDArray `\\" -msgstr ":py:obj:`NDArray `\\" +msgid "Using Mods" +msgstr "使用修改器" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-built-in-mods.rst:38 #, fuzzy -msgid "" -"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " -":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" -msgstr "" -"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " -":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" +msgid "To use mods in your ``ClientApp``, you can follow these steps:" +msgstr "要在您的 ``ClientApp`` 中使用 mod,可以按照以下步骤操作:" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-built-in-mods.rst:41 #, fuzzy -msgid "" -":py:obj:`Parameters `\\ \\(tensors\\, " -"tensor\\_type\\)" -msgstr "" -":py:obj:`Parameters `\\ \\(tensors\\, " -"tensor\\_type\\)" - -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.Parameters:1 of -msgid "Model parameters." -msgstr "模型参数。" +msgid "1. Import the required mods" +msgstr "1. 导入所需修改" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-built-in-mods.rst:43 #, fuzzy -msgid "" -":py:obj:`ParametersRecord `\\ " -"\\(\\[array\\_dict\\, keep\\_input\\]\\)" -msgstr "" -":py:obj:`ParametersRecord `\\ " -"\\(\\[array\\_dict\\, keep\\_input\\]\\)" +msgid "First, import the built-in mod you intend to use:" +msgstr "首先,导入您打算使用的内置模式:" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.parametersrecord.ParametersRecord:1 of +#: ../../source/how-to-use-built-in-mods.rst:51 #, fuzzy -msgid "Parameters record." -msgstr "参数" +msgid "2. Define your client function" +msgstr "2. 定义客户功能" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-built-in-mods.rst:53 #, fuzzy -msgid ":py:obj:`ReconnectIns `\\ \\(seconds\\)" -msgstr ":py:obj:`ReconnectIns `\\ \\(seconds\\)" +msgid "" +"Define your client function (``client_fn``) that will be wrapped by the " +"mod(s):" +msgstr "定义将被 mod 封装的客户端函数(``client_fn``):" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.ReconnectIns:1 of -msgid "ReconnectIns message from server to client." -msgstr "服务器发送给客户端的重新连接信息。" +#: ../../source/how-to-use-built-in-mods.rst:62 +#, fuzzy +msgid "3. Create the ``ClientApp`` with mods" +msgstr "3. 用模块创建 ``ClientApp``" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-built-in-mods.rst:64 #, fuzzy msgid "" -":py:obj:`RecordSet `\\ " -"\\(\\[parameters\\_records\\, ...\\]\\)" -msgstr "" -":py:obj:`RecordSet `\\ " -"\\(\\[parameters\\_records\\, ...\\]\\)" +"Create your ``ClientApp`` and pass the mods as a list to the ``mods`` " +"argument. The order in which you provide the mods matters:" +msgstr "创建您的 ``ClientApp`` 并将 mods 作为列表传递给 ``mods`` 参数。提供 mod 的顺序很重要:" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.recordset.RecordSet:1 of +#: ../../source/how-to-use-built-in-mods.rst:78 #, fuzzy -msgid "RecordSet stores groups of parameters, metrics and configs." -msgstr "RecordSet 可存储参数、指标和配置组。" +msgid "Order of execution" +msgstr "停用" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-built-in-mods.rst:80 #, fuzzy msgid "" -":py:obj:`ServerMessage `\\ " -"\\(\\[get\\_properties\\_ins\\, ...\\]\\)" -msgstr "" -":py:obj:`ServerMessage `\\ " -"\\(\\[get\\_properties\\_ins\\, ...\\]\\)" - -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.ServerMessage:1 of -msgid "ServerMessage is a container used to hold one instruction message." -msgstr "ServerMessage 是用于容纳一条指令信息的容器。" +"When the ``ClientApp`` runs, the mods are executed in the order they are " +"provided in the list:" +msgstr "当运行 ``ClientApp`` 时,会按照列表中提供的顺序执行模块:" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-built-in-mods.rst:83 #, fuzzy -msgid ":py:obj:`Status `\\ \\(code\\, message\\)" -msgstr ":py:obj:`Status `\\ \\(code\\, message\\)" - -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.Status:1 of -msgid "Client status." -msgstr "客户端状态。" +msgid "``example_mod_1`` (outermost mod)" +msgstr "``example_mod_1`` (最外层模块)" -#: ../../source/ref-api/flwr.common.Array.rst:2 +#: ../../source/how-to-use-built-in-mods.rst:84 #, fuzzy -msgid "Array" -msgstr "数组" +msgid "``example_mod_2`` (next mod)" +msgstr "示例模式 2(下一个模式)" -#: flwr.common.record.parametersrecord.Array:3 of +#: ../../source/how-to-use-built-in-mods.rst:85 #, fuzzy msgid "" -"A dataclass containing serialized data from an array-like or tensor-like " -"object along with some metadata about it." -msgstr "数据类,包含数组类或张量类对象的序列化数据以及相关元数据。" +"Message handler (core function that handles the incoming ``Message`` and " +"returns the outgoing ``Message``)" +msgstr "消息处理程序(处理传入的 \"消息 \"并返回传出的 \"消息 \"的核心函数)" -#: flwr.common.record.parametersrecord.Array:6 of +#: ../../source/how-to-use-built-in-mods.rst:87 #, fuzzy -msgid "" -"A string representing the data type of the serialised object (e.g. " -"`np.float32`)" -msgstr "表示序列化对象数据类型的字符串(例如 `np.float32`)" +msgid "``example_mod_2`` (on the way back)" +msgstr "``example_mod_2`` (返回途中)" -#: flwr.common.record.parametersrecord.Array:8 of +#: ../../source/how-to-use-built-in-mods.rst:88 #, fuzzy -msgid "" -"A list representing the shape of the unserialized array-like object. This" -" is used to deserialize the data (depending on the serialization method) " -"or simply as a metadata field." -msgstr "代表未序列化数组对象形状的列表。它可用于反序列化数据(取决于序列化方法),或仅作为元数据字段使用。" +msgid "``example_mod_1`` (outermost mod on the way back)" +msgstr "``example_mod_1`` (返回途中最外层的模式)" -#: flwr.common.record.parametersrecord.Array:12 of +#: ../../source/how-to-use-built-in-mods.rst:90 #, fuzzy msgid "" -"A string indicating the type of serialisation mechanism used to generate " -"the bytes in `data` from an array-like or tensor-like object." -msgstr "表示序列化机制类型的字符串,用于从类似数组或类似张量的对象中生成 `data` 中的字节。" - -#: flwr.common.record.parametersrecord.Array:15 of -#, fuzzy -msgid "A buffer of bytes containing the data." -msgstr "包含数据的字节缓冲区。" +"Each mod has a chance to inspect and modify the incoming ``Message`` " +"before passing it to the next mod, and likewise with the outgoing " +"``Message`` before returning it up the stack." +msgstr "每个模块都有机会检查和修改传入的 \"信息\",然后再将其传递给下一个模块,同样,也有机会检查和修改传出的 \"信息\",然后再将其返回堆栈。" -#: ../../source/ref-api/flwr.common.Array.rst:26::1 +#: ../../source/how-to-use-built-in-mods.rst:97 #, fuzzy -msgid ":py:obj:`numpy `\\ \\(\\)" -msgstr "server.strategy.Strategy" +msgid "" +"By following this guide, you have learned how to effectively use mods to " +"enhance your ``ClientApp``'s functionality. Remember that the order of " +"mods is crucial and affects how the input and output are processed." +msgstr "" +"通过本指南,您已学会如何有效地使用 mod 来增强您的 ``ClientApp`` 的功能。请记住,mod " +"的顺序至关重要,它会影响输入和输出的处理方式。" -#: ../../source/ref-api/flwr.common.Array.rst:26::1 -#: flwr.common.record.parametersrecord.Array.numpy:1 of +#: ../../source/how-to-use-built-in-mods.rst:101 #, fuzzy -msgid "Return the array as a NumPy array." -msgstr "以 NumPy ndarrays 列表形式返回模型参数" +msgid "Enjoy building a more robust and flexible ``ClientApp`` with mods!" +msgstr "使用 mods 构建更强大、更灵活的 \"客户端应用程序\"!" -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +#: ../../source/how-to-use-differential-privacy.rst:2 #, fuzzy -msgid ":py:obj:`dtype `\\" -msgstr ":py:obj:`dtype `\\" +msgid "Use Differential Privacy" +msgstr "差分隐私" -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +#: ../../source/how-to-use-differential-privacy.rst:4 #, fuzzy -msgid ":py:obj:`shape `\\" -msgstr "server.strategy.Strategy" +msgid "" +"This guide explains how you can utilize differential privacy in the " +"Flower framework. If you are not yet familiar with differential privacy, " +"you can refer to :doc:`explanation-differential-privacy`." +msgstr "" +"本指南解释了如何在 Flower 框架中使用差分隐私。如果您还不熟悉差分隐私,可以参考 :doc:`explanation-" +"differential-privacy` 。" -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +#: ../../source/how-to-use-differential-privacy.rst:10 #, fuzzy -msgid ":py:obj:`stype `\\" -msgstr "server.strategy.Strategy" +msgid "" +"Differential Privacy in Flower is in a preview phase. If you plan to use " +"these features in a production environment with sensitive data, feel free" +" contact us to discuss your requirements and to receive guidance on how " +"to best use these features." +msgstr "" +"Flower " +"中的差异隐私处于预览阶段。如果您计划在生产环境中使用这些敏感数据功能,请随时联系我们,讨论您的需求,并获得如何最好地使用这些功能的指导。" -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +#: ../../source/how-to-use-differential-privacy.rst:17 #, fuzzy -msgid ":py:obj:`data `\\" -msgstr ":py:obj:`data `\\" +msgid "" +"This approach consists of two separate phases: clipping of the updates " +"and adding noise to the aggregated model. For the clipping phase, Flower " +"framework has made it possible to decide whether to perform clipping on " +"the server side or the client side." +msgstr "这种方法包括两个独立的阶段:对更新进行剪切和在聚合模型中添加噪声。在剪切阶段,Flower 框架可以决定是在服务器端还是在客户端执行剪切。" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:2 +#: ../../source/how-to-use-differential-privacy.rst:21 #, fuzzy -msgid "ClientMessage" -msgstr "客户端" +msgid "" +"**Server-side Clipping**: This approach has the advantage of the server " +"enforcing uniform clipping across all clients' updates and reducing the " +"communication overhead for clipping values. However, it also has the " +"disadvantage of increasing the computational load on the server due to " +"the need to perform the clipping operation for all clients." +msgstr "" +"** 服务器端剪切**: " +"这种方法的优点是服务器可对所有客户端的更新执行统一的剪切,并减少剪切值的通信开销。不过,这种方法也有缺点,那就是需要为所有客户端执行剪切操作,从而增加了服务器的计算负荷。" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +#: ../../source/how-to-use-differential-privacy.rst:26 #, fuzzy -msgid ":py:obj:`evaluate_res `\\" -msgstr ":py:obj:`evaluate_res `\\" +msgid "" +"**Client-side Clipping**: This approach has the advantage of reducing the" +" computational overhead on the server. However, it also has the " +"disadvantage of lacking centralized control, as the server has less " +"control over the clipping process." +msgstr "**客户端剪切**: 这种方法的优点是可以减少服务器的计算开销。不过,它也有缺乏集中控制的缺点,因为服务器对剪切过程的控制较少。" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +#: ../../source/how-to-use-differential-privacy.rst:31 #, fuzzy -msgid ":py:obj:`fit_res `\\" -msgstr ":py:obj:`fit_res `\\" +msgid "Server-side Clipping" +msgstr "服务器端逻辑" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +#: ../../source/how-to-use-differential-privacy.rst:33 #, fuzzy msgid "" -":py:obj:`get_parameters_res " -"`\\" +"For central DP with server-side clipping, there are two ``Strategy`` " +"classes that act as wrappers around the actual ``Strategy`` instance (for" +" example, ``FedAvg``). The two wrapper classes are " +"``DifferentialPrivacyServerSideFixedClipping`` and " +"``DifferentialPrivacyServerSideAdaptiveClipping`` for fixed and adaptive " +"clipping." msgstr "" -":py:obj:`get_parameters_res " -"`\\" +"对于具有服务器端剪裁功能的中央 DP,有两个 :code:`Strategy` 类作为实际 :code:`Strategy` 实例(例如 " +":code:`FedAvg`)的包装器。这两个封装类分别是 " +":code:`DifferentialPrivacyServerSideFixedClipping` 和 " +":code:`DifferentialPrivacyServerSideAdaptiveClipping` ,用于固定剪辑和自适应剪辑。" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +#: ../../source/how-to-use-differential-privacy.rst:-1 +#, fuzzy +msgid "server side clipping" +msgstr "服务器端逻辑" + +#: ../../source/how-to-use-differential-privacy.rst:43 #, fuzzy msgid "" -":py:obj:`get_properties_res " -"`\\" +"The code sample below enables the ``FedAvg`` strategy to use server-side " +"fixed clipping using the ``DifferentialPrivacyServerSideFixedClipping`` " +"wrapper class. The same approach can be used with " +"``DifferentialPrivacyServerSideAdaptiveClipping`` by adjusting the " +"corresponding input parameters." msgstr "" -":py:obj:`get_properties_res " -"`\\" +"下面的代码示例使用 :code:`DifferentialPrivacyServerSideFixedClipping` 封装类使 " +":code:`FedAvg` 策略使用服务器端固定剪辑。通过调整相应的输入参数,同样的方法也可用于 " +":code:`DifferentialPrivacyServerSideAdaptiveClipping`。" -#: ../../source/ref-api/flwr.common.Code.rst:2 +#: ../../source/how-to-use-differential-privacy.rst:64 #, fuzzy -msgid "Code" -msgstr "代码" +msgid "Client-side Clipping" +msgstr "客户端逻辑" -#: flwr.common.typing.Code:1 of +#: ../../source/how-to-use-differential-privacy.rst:66 #, fuzzy -msgid "Bases: :py:class:`~enum.Enum`" -msgstr "Bases: :py:class:`~enum.Enum`" +msgid "" +"For central DP with client-side clipping, the server sends the clipping " +"value to selected clients on each round. Clients can use existing Flower " +"``Mods`` to perform the clipping. Two mods are available for fixed and " +"adaptive client-side clipping: ``fixedclipping_mod`` and " +"``adaptiveclipping_mod`` with corresponding server-side wrappers " +"``DifferentialPrivacyClientSideFixedClipping`` and " +"``DifferentialPrivacyClientSideAdaptiveClipping``." +msgstr "" +"对于带有客户端剪裁功能的中央 DP,服务器会在每一轮向选定的客户端发送剪裁值。客户端可以使用现有的 Flower " +":code:`Mods`来执行剪裁。有两种模式可用于固定和自适应客户端剪辑::code:`fixedclipping_mod` 和 " +":code:`adaptiveclipping_mod`,以及相应的服务器端封装 " +":code:`DifferentialPrivacyClientSideFixedClipping` 和 " +":code:`DifferentialPrivacyClientSideAdaptiveClipping`。" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 +#: ../../source/how-to-use-differential-privacy.rst:-1 #, fuzzy -msgid ":py:obj:`OK `\\" -msgstr ":py:obj:`OK `\\" +msgid "client side clipping" +msgstr "客户端逻辑" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 +#: ../../source/how-to-use-differential-privacy.rst:78 #, fuzzy msgid "" -":py:obj:`GET_PROPERTIES_NOT_IMPLEMENTED " -"`\\" +"The code sample below enables the ``FedAvg`` strategy to use differential" +" privacy with client-side fixed clipping using both the " +"``DifferentialPrivacyClientSideFixedClipping`` wrapper class and, on the " +"client, ``fixedclipping_mod``:" msgstr "" -":py:obj:`GET_PROPERTIES_NOT_IMPLEMENTED " -"`\\" +"下面的代码示例使用 :code:`DifferentialPrivacyClientSideFixedClipping` 封装类和客户端的 " +":code:`fixedclipping_mod` 使 :code:`FedAvg` 策略在客户端固定剪辑的情况下使用差分隐私:" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 +#: ../../source/how-to-use-differential-privacy.rst:97 #, fuzzy msgid "" -":py:obj:`GET_PARAMETERS_NOT_IMPLEMENTED " -"`\\" +"In addition to the server-side strategy wrapper, the ``ClientApp`` needs " +"to configure the matching ``fixedclipping_mod`` to perform the client-" +"side clipping:" msgstr "" -":py:obj:`GET_PARAMETERS_NOT_IMPLEMENTED " -"`\\" - -#: ../../source/ref-api/flwr.common.Code.rst:26::1 -#, fuzzy -msgid ":py:obj:`FIT_NOT_IMPLEMENTED `\\" -msgstr ":py:obj:`FIT_NOT_IMPLEMENTED `\\" +"除了服务器端策略包装器外,:code:`ClientApp` 还需要配置匹配的 :code:`fixedclipping_mod` " +"以执行客户端剪切:" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 +#: ../../source/how-to-use-differential-privacy.rst:115 #, fuzzy msgid "" -":py:obj:`EVALUATE_NOT_IMPLEMENTED " -"`\\" +"To utilize local differential privacy (DP) and add noise to the client " +"model parameters before transmitting them to the server in Flower, you " +"can use the `LocalDpMod`. The following hyperparameters need to be set: " +"clipping norm value, sensitivity, epsilon, and delta." msgstr "" -":py:obj:`EVALUATE_NOT_IMPLEMENTED " -"`\\" +"要利用本地差分隐私(DP)并在将客户端模型参数传输到 Flower 服务器之前为其添加噪声,可以使用 " +"`LocalDpMod`。需要设置以下超参数:剪切规范值、灵敏度、ε 和 delta。" -#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:2 +#: ../../source/how-to-use-differential-privacy.rst:-1 #, fuzzy -msgid "ConfigsRecord" -msgstr "配置日志记录" +msgid "local DP mod" +msgstr "本地 DP 模式" -#: flwr.common.record.configsrecord.ConfigsRecord:1 of +#: ../../source/how-to-use-differential-privacy.rst:125 #, fuzzy -msgid "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -"[:py:class:`str`, :py:class:`int` | :py:class:`float` | :py:class:`str` |" -" :py:class:`bytes` | :py:class:`bool` | :py:class:`~typing.List`\\ " -"[:py:class:`int`] | :py:class:`~typing.List`\\ [:py:class:`float`] | " -":py:class:`~typing.List`\\ [:py:class:`str`] | :py:class:`~typing.List`\\" -" [:py:class:`bytes`] | :py:class:`~typing.List`\\ [:py:class:`bool`]]" -msgstr "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -"[:py:class:`str`, :py:obj:`~typing.Union`\\ [:py:class:`int`, " -":py:class:`float`, :py:class:`str`, :py:class:`bytes`, :py:class:`bool`, " -":py:class:`~typing.List`\\ [:py:class:`int`], :py:class:`~typing.List`\\ " -"[:py:class:`float`], :py:class:`~typing.List`\\ [:py:class:`str`], " -":py:class:`~typing.List`\\ [:py:class:`bytes`], " -":py:class:`~typing.List`\\ [:py:class:`bool`]]]" +msgid "Below is a code example that shows how to use ``LocalDpMod``:" +msgstr "下面的代码示例展示了如何使用 :code:`LocalDpMod`:" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/how-to-use-differential-privacy.rst:140 #, fuzzy -msgid ":py:obj:`clear `\\ \\(\\)" -msgstr ":py:obj:`clear `\\ \\(\\)" +msgid "" +"Please note that the order of mods, especially those that modify " +"parameters, is important when using multiple modifiers. Typically, " +"differential privacy (DP) modifiers should be the last to operate on " +"parameters." +msgstr "请注意,在使用多个修改器时,修改器(尤其是修改参数的修改器)的顺序非常重要。通常情况下,差分隐私 (DP) 修改器应最后对参数进行操作。" -#: flwr.common.record.typeddict.TypedDict.clear:1 -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/how-to-use-differential-privacy.rst:145 #, fuzzy -msgid "Remove all items from R." -msgstr "从 R 中删除所有项目。" +msgid "Local Training using Privacy Engines" +msgstr "使用隐私引擎进行本地培训" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/how-to-use-differential-privacy.rst:147 #, fuzzy -msgid ":py:obj:`count_bytes `\\ \\(\\)" -msgstr ":py:obj:`count_bytes `\\ \\(\\)" +msgid "" +"For ensuring data instance-level privacy during local model training on " +"the client side, consider leveraging privacy engines such as Opacus and " +"TensorFlow Privacy. For examples of using Flower with these engines, " +"please refer to the Flower examples directory (`Opacus " +"`_, `Tensorflow" +" Privacy `_)." +msgstr "" +"要在客户端本地模型训练期间确保数据实例级隐私,可考虑利用 Opacus 和 TensorFlow Privacy 等隐私引擎。有关将 Flower" +" 与这些引擎结合使用的示例,请参阅 Flower 示例目录(`Opacus " +"`_, `Tensorflow" +" Privacy `_)。" -#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:1 -#: flwr.common.record.metricsrecord.MetricsRecord.count_bytes:1 -#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:1 -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -#, fuzzy -msgid "Return number of Bytes stored in this object." -msgstr "返回存储在此对象中的字节数。" +#: ../../source/how-to-use-strategies.rst:2 +msgid "Use strategies" +msgstr "使用策略" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/how-to-use-strategies.rst:4 #, fuzzy -msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgid "" +"Flower allows full customization of the learning process through the " +"``Strategy`` abstraction. A number of built-in strategies are provided in" +" the core framework." +msgstr "Flower 允许通过 :code:`Strategy` 抽象类对学习过程进行完全定制。核心框架中提供了许多内置策略。" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 -#: flwr.common.record.typeddict.TypedDict.get:1 of -#, fuzzy -msgid "d defaults to None." -msgstr "d 默认为 \"无\"。" +#: ../../source/how-to-use-strategies.rst:7 +msgid "" +"There are three ways to customize the way Flower orchestrates the " +"learning process on the server side:" +msgstr "有三种方法可以自定义 Flower 在服务器端协调学习过程的方式:" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/how-to-use-strategies.rst:10 #, fuzzy -msgid ":py:obj:`items `\\ \\(\\)" -msgstr ":py:obj:`items `\\ \\(\\)" +msgid "Use an existing strategy, for example, ``FedAvg``" +msgstr "使用现有策略,例如 :code:`FedAvg`" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -#, fuzzy -msgid ":py:obj:`keys `\\ \\(\\)" -msgstr ":py:obj:`keys `\\ \\(\\)" +#: ../../source/how-to-use-strategies.rst:11 +#: ../../source/how-to-use-strategies.rst:43 +msgid "Customize an existing strategy with callback functions" +msgstr "使用回调函数定制现有策略" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -#, fuzzy -msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" -msgstr ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +#: ../../source/how-to-use-strategies.rst:12 +#: ../../source/how-to-use-strategies.rst:99 +msgid "Implement a novel strategy" +msgstr "实施新策略" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 -#: flwr.common.record.typeddict.TypedDict.pop:1 of -#, fuzzy -msgid "If key is not found, d is returned if given, otherwise KeyError is raised." -msgstr "如果未找到 key,则返回 d(如果给定),否则引发 KeyError。" +#: ../../source/how-to-use-strategies.rst:15 +msgid "Use an existing strategy" +msgstr "使用现有策略" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -#, fuzzy +#: ../../source/how-to-use-strategies.rst:17 msgid "" -":py:obj:`update `\\ \\(\\[E\\, " -"\\]\\*\\*F\\)" -msgstr "" -":py:obj:`update `\\ \\(\\[E\\, " -"\\]\\*\\*F\\)" - -#: flwr.common.record.typeddict.TypedDict.clear:1::1 -#: flwr.common.record.typeddict.TypedDict.update:1 of -#, fuzzy -msgid "Update R from dict/iterable E and F." -msgstr "根据二进制/可迭代 E 和 F 更新 R。" +"Flower comes with a number of popular federated learning strategies " +"built-in. A built-in strategy can be instantiated as follows:" +msgstr "Flower 内置了许多流行的联邦学习策略。内置策略的实例化方法如下:" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/how-to-use-strategies.rst:27 #, fuzzy -msgid ":py:obj:`values `\\ \\(\\)" -msgstr ":py:obj:`values `\\ \\(\\)" +msgid "" +"This creates a strategy with all parameters left at their default values " +"and passes it to the ``start_server`` function. It is usually recommended" +" to adjust a few parameters during instantiation:" +msgstr "这会创建一个所有参数都保持默认值的策略,并将其传递给 :code:`start_server` 函数。通常建议在实例化过程中调整一些参数:" -#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:3 of -#, fuzzy -msgid "This function counts booleans as occupying 1 Byte." -msgstr "该函数将布尔值计算为占用 1 个字节。" +#: ../../source/how-to-use-strategies.rst:45 +msgid "" +"Existing strategies provide several ways to customize their behaviour. " +"Callback functions allow strategies to call user-provided code during " +"execution." +msgstr "现有的策略提供了多种自定义行为的方法。回调函数允许策略在执行过程中调用用户提供的代码。" -#: ../../source/ref-api/flwr.common.Context.rst:2 -#, fuzzy -msgid "Context" -msgstr "背景" +#: ../../source/how-to-use-strategies.rst:49 +msgid "Configuring client fit and client evaluate" +msgstr "配置客户匹配和客户评估" -#: flwr.common.context.Context:3 of +#: ../../source/how-to-use-strategies.rst:51 #, fuzzy msgid "" -"Holds records added by the entity in a given run and that will stay " -"local. This means that the data it holds will never leave the system it's" -" running from. This can be used as an intermediate storage or scratchpad " -"when executing mods. It can also be used as a memory to access at " -"different points during the lifecycle of this entity (e.g. across " -"multiple rounds)" -msgstr "保存实体在给定运行中添加的记录,这些记录将保留在本地。这意味着它保存的数据永远不会离开运行的系统。在执行模式时,它可用作中间存储或抓取板。它还可以作为存储器,在实体生命周期的不同阶段(如多轮)进行访问。" - -#: ../../source/ref-api/flwr.common.Context.rst:28::1 -#, fuzzy -msgid ":py:obj:`state `\\" -msgstr "server.strategy.Strategy" +"The server can pass new configuration values to the client each round by " +"providing a function to ``on_fit_config_fn``. The provided function will " +"be called by the strategy and must return a dictionary of configuration " +"key values pairs that will be sent to the client. It must return a " +"dictionary of arbitrary configuration values ``client.fit`` and " +"``client.evaluate`` functions during each round of federated learning." +msgstr "" +"服务器可以通过向 :code:`on_fit_config_fn` " +"提供一个函数,在每一轮向客户端传递新的配置值。提供的函数将被策略调用,并且必须返回一个配置键值对的字典,该字典将被发送到客户端。在每一轮联邦学习期间,它必须返回一个任意配置值" +" dictionary :code:`client.fit`和 :code:`client.evaluate`函数。" -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:2 +#: ../../source/how-to-use-strategies.rst:84 #, fuzzy -msgid "DisconnectRes" -msgstr "断开Res" +msgid "" +"The ``on_fit_config_fn`` can be used to pass arbitrary configuration " +"values from server to client, and potentially change these values each " +"round, for example, to adjust the learning rate. The client will receive " +"the dictionary returned by the ``on_fit_config_fn`` in its own " +"``client.fit()`` function." +msgstr "" +":code:`on_fit_config_fn`可用于将任意配置值从服务器传递到客户端,并在每一轮改变这些值,例如,调整学习率。客户端将在自己的 " +":code:`client.fit()` 函数中接收 :code:`on_fit_config_fn` 返回的字典。" -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:28::1 +#: ../../source/how-to-use-strategies.rst:89 #, fuzzy -msgid ":py:obj:`reason `\\" -msgstr ":py:obj:`reason `\\" +msgid "" +"Similar to ``on_fit_config_fn``, there is also ``on_evaluate_config_fn`` " +"to customize the configuration sent to ``client.evaluate()``" +msgstr "" +"与 :code:`on_fit_config_fn` 类似,还有 :code:`on_evaluate_config_fn` 用于定制发送到 " +":code:`client.evaluate()` 的配置" -#: ../../source/ref-api/flwr.common.Error.rst:2 -#, fuzzy -msgid "Error" -msgstr "错误" +#: ../../source/how-to-use-strategies.rst:93 +msgid "Configuring server-side evaluation" +msgstr "配置服务器端评估" -#: flwr.common.message.Error:3 of +#: ../../source/how-to-use-strategies.rst:95 #, fuzzy -msgid "An identifier for the error." -msgstr "错误的标识符。" +msgid "" +"Server-side evaluation can be enabled by passing an evaluation function " +"to ``evaluate_fn``." +msgstr "服务器端评估可通过向 :code:`evaluate_fn` 传递评估函数来启用。" -#: flwr.common.message.Error:5 of -#, fuzzy -msgid "A reason for why the error arose (e.g. an exception stack-trace)" -msgstr "出错原因(如异常堆栈跟踪)" +#: ../../source/how-to-use-strategies.rst:101 +msgid "" +"Writing a fully custom strategy is a bit more involved, but it provides " +"the most flexibility. Read the `Implementing Strategies `_ guide to learn more." +msgstr "" +"编写完全自定义的策略涉及的内容较多,但灵活性最高。阅读 `实施策略 _ " +"指南,了解更多信息。" -#: flwr.common.Error.code:1::1 of -#, fuzzy -msgid ":py:obj:`code `\\" -msgstr ":py:obj:`code `\\" +#: ../../source/index.rst:34 +msgid "Tutorial" +msgstr "教程" -#: flwr.common.Error.code:1 flwr.common.Error.code:1::1 of -#, fuzzy -msgid "Error code." -msgstr "错误代码。" +#: ../../source/index.rst:44 +msgid "Quickstart tutorials" +msgstr "快速入门教程" -#: flwr.common.Error.code:1::1 of -#, fuzzy -msgid ":py:obj:`reason `\\" -msgstr ":py:obj:`reason `\\" +#: ../../source/index.rst:81 ../../source/index.rst:85 +msgid "How-to guides" +msgstr "操作指南" -#: flwr.common.Error.code:1::1 flwr.common.Error.reason:1 of -#, fuzzy -msgid "Reason reported about the error." -msgstr "报告的错误原因。" +#: ../../source/index.rst:106 +msgid "Legacy example guides" +msgstr "旧版指南范例" -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:2 -#, fuzzy -msgid "EvaluateIns" +#: ../../source/index.rst:114 ../../source/index.rst:119 +msgid "Explanations" msgstr "说明" -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 -#, fuzzy -msgid ":py:obj:`parameters `\\" -msgstr ":py:obj:`parameters `\\" - -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 -#, fuzzy -msgid ":py:obj:`config `\\" -msgstr ":py:obj:`config `\\" +#: None:-1 +msgid "API reference" +msgstr "应用程序接口参考" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:2 -#, fuzzy -msgid "EvaluateRes" -msgstr "评估Res" +#: ../../source/index.rst:145 +msgid "Reference docs" +msgstr "参考文档" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 -#, fuzzy -msgid ":py:obj:`status `\\" -msgstr ":py:obj:`status `\\" +#: ../../source/index.rst:160 +msgid "Contributor tutorials" +msgstr "贡献者教程" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 -#, fuzzy -msgid ":py:obj:`loss `\\" -msgstr ":py:obj:`loss `\\" +#: ../../source/index.rst:167 +msgid "Contributor how-to guides" +msgstr "投稿指南" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 -#, fuzzy -msgid ":py:obj:`num_examples `\\" -msgstr ":py:obj:`num_examples `\\" +#: ../../source/index.rst:179 +msgid "Contributor explanations" +msgstr "贡献者解释" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 -#, fuzzy -msgid ":py:obj:`metrics `\\" -msgstr ":py:obj:`metrics `\\" +#: ../../source/index.rst:185 +msgid "Contributor references" +msgstr "贡献者参考资料" -#: ../../source/ref-api/flwr.common.EventType.rst:2 -#, fuzzy -msgid "EventType" -msgstr "返回类型" +#: ../../source/index.rst:-1 +msgid "" +"Check out the documentation of the main Flower Framework enabling easy " +"Python development for Federated Learning." +msgstr "查看主 Flower Framework 的文档,轻松实现联邦学习的 Python 开发。" -#: flwr.common.telemetry.EventType:1 of -#, fuzzy -msgid "Bases: :py:class:`str`, :py:class:`~enum.Enum`" -msgstr "Bases: :py:class:`str`, :py:class:`~enum.Enum`" +#: ../../source/index.rst:2 +msgid "Flower Framework Documentation" +msgstr "Flower 框架文档" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy +#: ../../source/index.rst:7 msgid "" -":py:obj:`encode `\\ \\(\\[encoding\\, " -"errors\\]\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +"Welcome to Flower's documentation. `Flower `_ is a " +"friendly federated learning framework." +msgstr "欢迎访问 Flower 文档。`Flower `_ 是一个友好的联邦学习框架。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.encode:1 of -msgid "Encode the string using the codec registered for encoding." -msgstr "" +#: ../../source/index.rst:11 +msgid "Join the Flower Community" +msgstr "加入 Flower 社区" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy +#: ../../source/index.rst:13 msgid "" -":py:obj:`replace `\\ \\(old\\, new\\[\\, " -"count\\]\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +"The Flower Community is growing quickly - we're a friendly group of " +"researchers, engineers, students, professionals, academics, and other " +"enthusiasts." +msgstr "Flower 社区发展迅速--我们是一个由研究人员、工程师、学生、专业人士、学者和其他爱好者组成的友好团体。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.replace:1 of -msgid "Return a copy with all occurrences of substring old replaced by new." +#: ../../source/index.rst:16 +msgid "Join us on Slack" +msgstr "在 Slack 上加入我们" + +#: ../../source/index.rst:23 +msgid "Flower Framework" +msgstr "Flower 框架" + +#: ../../source/index.rst:25 +msgid "" +"The user guide is targeted at researchers and developers who want to use " +"Flower to bring existing machine learning workloads into a federated " +"setting. One of Flower's design goals was to make this simple. Read on to" +" learn more." msgstr "" +"该用户指南面向希望使用 Flower 将现有机器学习工作负载引入联邦环境的研究人员和开发人员。Flower " +"的设计目标之一就是让这一切变得简单。请继续阅读,了解更多信息。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy +#: ../../source/index.rst:30 +msgid "Tutorials" +msgstr "教程" + +#: ../../source/index.rst:32 msgid "" -":py:obj:`split `\\ \\(\\[sep\\, " -"maxsplit\\]\\)" -msgstr ":py:obj:`PING `\\" +"A learning-oriented series of federated learning tutorials, the best " +"place to start." +msgstr "以学习为导向的联邦学习教程系列,最好的起点。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.rsplit:1 flwr.common.EventType.split:1 of +#: ../../source/index.rst:62 +#, fuzzy msgid "" -"Return a list of the substrings in the string, using sep as the separator" -" string." +"QUICKSTART TUTORIALS: :doc:`PyTorch ` | " +":doc:`TensorFlow ` | :doc:`MLX ` | :doc:`🤗 Transformers ` | :doc:`JAX ` | :doc:`Pandas " +"` | :doc:`fastai ` | :doc:`PyTorch Lightning ` | :doc:`scikit-learn ` | " +":doc:`XGBoost ` | :doc:`Android ` | :doc:`iOS `" msgstr "" +"快速入门教程: :doc:`PyTorch ` | :doc:`TensorFlow " +"` | :doc:`🤗 Transformers ` | :doc:`JAX ` | " +":doc:`Pandas ` | :doc:`fastai ` | :doc:`PyTorch Lightning ` | :doc:`MXNet ` | :doc" +":`scikit-learn ` | :doc:`XGBoost " +"` | :doc:`Android ` | :doc:`iOS `" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy +#: ../../source/index.rst:70 +msgid "We also made video tutorials for PyTorch:" +msgstr "我们还为 PyTorch 制作了视频教程:" + +#: ../../source/index.rst:75 +msgid "And TensorFlow:" +msgstr "还有 TensorFlow:" + +#: ../../source/index.rst:83 msgid "" -":py:obj:`rsplit `\\ \\(\\[sep\\, " -"maxsplit\\]\\)" -msgstr ":py:obj:`PING `\\" +"Problem-oriented how-to guides show step-by-step how to achieve a " +"specific goal." +msgstr "以问题为导向的 \"如何做 \"指南逐步展示如何实现特定目标。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`join `\\ \\(iterable\\, \\/\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +#: ../../source/index.rst:116 +msgid "" +"Understanding-oriented concept guides explain and discuss key topics and " +"underlying ideas behind Flower and collaborative AI." +msgstr "以理解为导向的概念指南解释并讨论了Flower和协作式人工智能背后的关键主题和基本思想。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.join:1 of -msgid "Concatenate any number of strings." -msgstr "" +#: ../../source/index.rst:128 +msgid "References" +msgstr "参考资料" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`capitalize `\\ \\(\\)" -msgstr ":py:obj:`PING `\\" +#: ../../source/index.rst:130 +msgid "Information-oriented API reference and other reference material." +msgstr "以信息为导向的 API 参考资料和其他参考资料。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.capitalize:1 of -msgid "Return a capitalized version of the string." -msgstr "" +#: ../../source/index.rst:139::1 +#, fuzzy +msgid ":py:obj:`flwr `\\" +msgstr ":py:obj:`flwr `\\" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/index.rst:139::1 flwr:1 of #, fuzzy -msgid ":py:obj:`casefold `\\ \\(\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +msgid "Flower main package." +msgstr "Flower 主包装。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.casefold:1 of -msgid "Return a version of the string suitable for caseless comparisons." -msgstr "" +#: ../../source/index.rst:155 +msgid "Contributor docs" +msgstr "贡献者文档" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`title `\\ \\(\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +#: ../../source/index.rst:157 +msgid "" +"The Flower community welcomes contributions. The following docs are " +"intended to help along the way." +msgstr "Flower 社区欢迎您的贡献。以下文档旨在为您提供帮助。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.title:1 of -msgid "Return a version of the string where each word is titlecased." -msgstr "" +#: ../../source/ref-api-cli.rst:2 +msgid "Flower CLI reference" +msgstr "Flower CLI 参考" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api-cli.rst:7 #, fuzzy -msgid "" -":py:obj:`center `\\ \\(width\\[\\, " -"fillchar\\]\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +msgid "flwr CLI" +msgstr "Flower 客户端" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.center:1 of +#: ../../flwr:1 #, fuzzy -msgid "Return a centered string of length width." -msgstr "返回客户端的属性集。" +msgid "flwr is the Flower command line interface." +msgstr "注册 Flower ClientProxy 实例。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api-cli.rst #, fuzzy -msgid "" -":py:obj:`count `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" -msgstr ":py:obj:`Context `\\ \\(state\\)" +msgid "Options" +msgstr "解决方案" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../flwr:1 +#, fuzzy +msgid "Install completion for the current shell." +msgstr "当前运行的标识符。" + +#: ../../flwr:1 msgid "" -"Return the number of non-overlapping occurrences of substring sub in " -"string S[start:end]." +"Show completion for the current shell, to copy it or customize the " +"installation." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy +#: ../../flwr build:1 +msgid "Build a Flower App into a Flower App Bundle (FAB)." +msgstr "" + +#: ../../flwr build:1 msgid "" -":py:obj:`expandtabs `\\ " -"\\(\\[tabsize\\]\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +"You can run ``flwr build`` without any arguments to bundle the app " +"located in the current directory. Alternatively, you can you can specify " +"a path using the ``--app`` option to bundle an app located at the " +"provided path. For example:" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.expandtabs:1 of -msgid "Return a copy where all tab characters are expanded using spaces." +#: ../../flwr build:1 +msgid "``flwr build --app ./apps/flower-hello-world``." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../flwr build:1 +msgid "Path of the Flower App to bundle into a FAB" +msgstr "" + +#: ../../flwr install:1 #, fuzzy -msgid "" -":py:obj:`find `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" -msgstr ":py:obj:`PING `\\" +msgid "Install a Flower App Bundle." +msgstr "安装Flower" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../flwr install:1 +msgid "It can be ran with a single FAB file argument:" +msgstr "" + +#: ../../flwr install:1 +msgid "``flwr install ./target_project.fab``" +msgstr "" + +#: ../../flwr install:1 +msgid "The target install directory can be specified with ``--flwr-dir``:" +msgstr "" + +#: ../../flwr install:1 +msgid "``flwr install ./target_project.fab --flwr-dir ./docs/flwr``" +msgstr "" + +#: ../../flwr install:1 msgid "" -"Return the lowest index in S where substring sub is found, such that sub " -"is contained within S[start:end]." +"This will install ``target_project`` to ``./docs/flwr/``. By default, " +"``flwr-dir`` is equal to:" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`partition `\\ \\(sep\\, \\/\\)" -msgstr ":py:obj:`partition_id `\\" +#: ../../flwr install:1 +msgid "``$FLWR_HOME/`` if ``$FLWR_HOME`` is defined" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.partition:1 flwr.common.EventType.rpartition:1 of -msgid "Partition the string into three parts using the given separator." +#: ../../flwr install:1 +msgid "``$XDG_DATA_HOME/.flwr/`` if ``$XDG_DATA_HOME`` is defined" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../flwr install:1 +msgid "``$HOME/.flwr/`` in all other cases" +msgstr "" + +#: ../../flwr install:1 +msgid "The desired install path." +msgstr "" + +#: ../../source/ref-api-cli.rst #, fuzzy -msgid "" -":py:obj:`index `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" -msgstr ":py:obj:`Context `\\ \\(state\\)" +msgid "Arguments" +msgstr "参数解析器" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../flwr install:1 log:1 new:1 run:1 #, fuzzy -msgid "" -":py:obj:`ljust `\\ \\(width\\[\\, " -"fillchar\\]\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +msgid "Optional argument" +msgstr "可选的改进措施" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.ljust:1 of -msgid "Return a left-justified string of length width." +#: ../../flwr install:1 +msgid "The source FAB file to install." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`lower `\\ \\(\\)" -msgstr ":py:obj:`now `\\ \\(\\)" +#: ../../flwr log:1 +msgid "Get logs from a Flower project run." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.lower:1 of -msgid "Return a copy of the string converted to lowercase." +#: ../../flwr log:1 +msgid "Flag to stream or print logs from the Flower run" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../flwr log run #, fuzzy -msgid ":py:obj:`lstrip `\\ \\(\\[chars\\]\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +msgid "default" +msgstr "工作流程" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.lstrip:1 of -msgid "Return a copy of the string with leading whitespace removed." +#: ../../flwr log:1 +msgid "``True``" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../flwr log:1 #, fuzzy -msgid "" -":py:obj:`rfind `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" -msgstr ":py:obj:`PING `\\" +msgid "Required argument" +msgstr "构建文档" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -"Return the highest index in S where substring sub is found, such that sub" -" is contained within S[start:end]." -msgstr "" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid "" -":py:obj:`rindex `\\ \\(sub\\[\\, " -"start\\[\\, end\\]\\]\\)" -msgstr ":py:obj:`Context `\\ \\(state\\)" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid "" -":py:obj:`rjust `\\ \\(width\\[\\, " -"fillchar\\]\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.rjust:1 of -msgid "Return a right-justified string of length width." -msgstr "" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../flwr log:1 #, fuzzy -msgid ":py:obj:`rstrip `\\ \\(\\[chars\\]\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +msgid "The Flower run ID to query" +msgstr "加入 Flower 社区" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.rstrip:1 of -msgid "Return a copy of the string with trailing whitespace removed." +#: ../../flwr log:1 +msgid "Path of the Flower project to run" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`rpartition `\\ \\(sep\\, \\/\\)" -msgstr ":py:obj:`partition_id `\\" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid "" -":py:obj:`splitlines `\\ " -"\\(\\[keepends\\]\\)" -msgstr ":py:obj:`PING `\\" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.splitlines:1 of -msgid "Return a list of the lines in the string, breaking at line boundaries." +#: ../../flwr log:1 +msgid "Name of the federation to run the app on" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../flwr new:1 #, fuzzy -msgid ":py:obj:`strip `\\ \\(\\[chars\\]\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +msgid "Create new Flower App." +msgstr "Flower 服务器。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.strip:1 of -msgid "Return a copy of the string with leading and trailing whitespace removed." +#: ../../flwr new:1 +msgid "The ML framework to use" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../flwr new #, fuzzy -msgid ":py:obj:`swapcase `\\ \\(\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +msgid "options" +msgstr "解决方案" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.swapcase:1 of +#: ../../flwr new:1 msgid "" -"Convert uppercase characters to lowercase and lowercase characters to " -"uppercase." +"PyTorch | TensorFlow | sklearn | HuggingFace | JAX | MLX | NumPy | " +"FlowerTune | Flower Baseline" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`translate `\\ \\(table\\, \\/\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.translate:1 of -msgid "Replace each character in the string using the given translation table." +#: ../../flwr new:1 +msgid "The Flower username of the author" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../flwr new:1 #, fuzzy -msgid ":py:obj:`upper `\\ \\(\\)" -msgstr ":py:obj:`PING `\\" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.upper:1 of -msgid "Return a copy of the string converted to uppercase." -msgstr "" +msgid "The name of the Flower App" +msgstr "基础镜像的存储库名称。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../flwr run:1 #, fuzzy -msgid "" -":py:obj:`startswith `\\ \\(prefix\\[\\," -" start\\[\\, end\\]\\]\\)" -msgstr ":py:obj:`Status `\\ \\(code\\, message\\)" +msgid "Run Flower App." +msgstr "Flower 服务器。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "Return True if S starts with the specified prefix, False otherwise." +#: ../../flwr run:1 +msgid "Override configuration key-value pairs, should be of the format:" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy +#: ../../flwr run:1 msgid "" -":py:obj:`endswith `\\ \\(suffix\\[\\, " -"start\\[\\, end\\]\\]\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "Return True if S ends with the specified suffix, False otherwise." +"`--run-config 'key1=\"value1\" key2=\"value2\"' --run-config " +"'key3=\"value3\"'`" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy +#: ../../flwr run:1 msgid "" -":py:obj:`removeprefix `\\ " -"\\(prefix\\, \\/\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.removeprefix:1 of -msgid "Return a str with the given prefix string removed if present." +"Note that `key1`, `key2`, and `key3` in this example need to exist inside" +" the `pyproject.toml` in order to be properly overriden." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy +#: ../../flwr run:1 msgid "" -":py:obj:`removesuffix `\\ " -"\\(suffix\\, \\/\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.removesuffix:1 of -msgid "Return a str with the given suffix string removed if present." +"Use `--stream` with `flwr run` to display logs; logs are not streamed by " +"default." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../flwr run:1 #, fuzzy -msgid ":py:obj:`isascii `\\ \\(\\)" -msgstr ":py:obj:`PING `\\" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isascii:1 of -msgid "Return True if all characters in the string are ASCII, False otherwise." -msgstr "" +msgid "``False``" +msgstr "``FLWR_VERSION``" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../flwr run:1 #, fuzzy -msgid ":py:obj:`islower `\\ \\(\\)" -msgstr ":py:obj:`now `\\ \\(\\)" +msgid "Path of the Flower App to run." +msgstr "基础镜像的存储库名称。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.islower:1 of -msgid "Return True if the string is a lowercase string, False otherwise." +#: ../../flwr run:1 +msgid "Name of the federation to run the app on." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api-cli.rst:16 #, fuzzy -msgid ":py:obj:`isupper `\\ \\(\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +msgid "flower-simulation" +msgstr "运行模拟" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isupper:1 of -msgid "Return True if the string is an uppercase string, False otherwise." -msgstr "" +#: ../../source/ref-api-cli.rst:26 +msgid "flower-superlink" +msgstr "flower-superlink" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api-cli.rst:36 #, fuzzy -msgid ":py:obj:`istitle `\\ \\(\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.istitle:1 of -msgid "Return True if the string is a title-cased string, False otherwise." -msgstr "" +msgid "flower-supernode" +msgstr "Flower 服务器" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api-cli.rst:46 #, fuzzy -msgid ":py:obj:`isspace `\\ \\(\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +msgid "flower-server-app" +msgstr "flower-driver-api" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isspace:1 of -msgid "Return True if the string is a whitespace string, False otherwise." +#: ../../source/ref-api-cli.rst:50 +msgid "" +"Note that since version ``1.11.0``, ``flower-server-app`` no longer " +"supports passing a reference to a `ServerApp` attribute. Instead, you " +"need to pass the path to Flower app via the argument ``--app``. This is " +"the path to a directory containing a `pyproject.toml`. You can create a " +"valid Flower app by executing ``flwr new`` and following the prompt." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api-cli.rst:64 #, fuzzy -msgid ":py:obj:`isdecimal `\\ \\(\\)" -msgstr ":py:obj:`PING `\\" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isdecimal:1 of -msgid "Return True if the string is a decimal string, False otherwise." -msgstr "" +msgid "flower-superexec" +msgstr "flower-superlink" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.rst:2 #, fuzzy -msgid ":py:obj:`isdigit `\\ \\(\\)" -msgstr ":py:obj:`PING `\\" +msgid "flwr" +msgstr "Flower" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isdigit:1 of -msgid "Return True if the string is a digit string, False otherwise." -msgstr "" +#: ../../source/ref-api/flwr.client.rst:43 ../../source/ref-api/flwr.rst:25 +#: ../../source/ref-api/flwr.server.rst:48 +#, fuzzy +msgid "Modules" +msgstr "模块" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.rst:35::1 #, fuzzy -msgid ":py:obj:`isnumeric `\\ \\(\\)" -msgstr ":py:obj:`PING `\\" +msgid ":py:obj:`flwr.client `\\" +msgstr ":py:obj:`flwr.client `\\" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isnumeric:1 of -msgid "Return True if the string is a numeric string, False otherwise." -msgstr "" +#: ../../source/ref-api/flwr.rst:35::1 flwr.client:1 of +msgid "Flower client." +msgstr "Flower 客户端。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.rst:35::1 #, fuzzy -msgid ":py:obj:`isalpha `\\ \\(\\)" -msgstr ":py:obj:`PING `\\" +msgid ":py:obj:`flwr.common `\\" +msgstr ":py:obj:`flwr.common `\\" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isalpha:1 of -msgid "Return True if the string is an alphabetic string, False otherwise." -msgstr "" +#: ../../source/ref-api/flwr.rst:35::1 flwr.common:1 of +msgid "Common components shared between server and client." +msgstr "服务器和客户端共享的通用组件。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.rst:35::1 #, fuzzy -msgid ":py:obj:`isalnum `\\ \\(\\)" -msgstr ":py:obj:`PING `\\" +msgid ":py:obj:`flwr.server `\\" +msgstr ":py:obj:`flwr.server `\\" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isalnum:1 of -msgid "Return True if the string is an alpha-numeric string, False otherwise." -msgstr "" +#: ../../source/ref-api/flwr.rst:35::1 +#: ../../source/ref-api/flwr.server.rst:37::1 flwr.server:1 +#: flwr.server.server.Server:1 of +msgid "Flower server." +msgstr "Flower 服务器。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.rst:35::1 #, fuzzy -msgid ":py:obj:`isidentifier `\\ \\(\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isidentifier:1 of -msgid "Return True if the string is a valid Python identifier, False otherwise." -msgstr "" +msgid ":py:obj:`flwr.simulation `\\" +msgstr ":py:obj:`flwr.simulation `\\" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.rst:35::1 flwr.simulation:1 of #, fuzzy -msgid ":py:obj:`isprintable `\\ \\(\\)" -msgstr ":py:obj:`PING `\\" +msgid "Flower simulation." +msgstr "运行模拟" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isprintable:1 of -msgid "Return True if the string is printable, False otherwise." -msgstr "" +#: ../../source/ref-api/flwr.client.rst:2 +msgid "client" +msgstr "客户端" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.mod.rst:13 +#: ../../source/ref-api/flwr.client.rst:13 +#: ../../source/ref-api/flwr.common.rst:13 +#: ../../source/ref-api/flwr.server.rst:13 +#: ../../source/ref-api/flwr.simulation.rst:13 #, fuzzy -msgid ":py:obj:`zfill `\\ \\(width\\, \\/\\)" -msgstr ":py:obj:`PING `\\" +msgid "Functions" +msgstr "四种函数:" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.zfill:1 of +#: ../../source/ref-api/flwr.client.rst:23::1 +#, fuzzy msgid "" -"Pad a numeric string with zeros on the left, to fill a field of the given" -" width." +":py:obj:`start_client `\\ \\(\\*\\, " +"server\\_address\\[\\, client\\_fn\\, ...\\]\\)" msgstr "" +":py:obj:`start_client `\\ \\(\\*\\, " +"server\\_address\\[\\, client\\_fn\\, ...\\]\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.rst:23::1 +#: flwr.client.app.start_client:1 of +msgid "Start a Flower client node which connects to a Flower server." +msgstr "启动一个 Flower 客户节点,连接到 Flower 服务器。" + +#: ../../source/ref-api/flwr.client.rst:23::1 #, fuzzy msgid "" -":py:obj:`format `\\ \\(\\*args\\, " -"\\*\\*kwargs\\)" +":py:obj:`start_numpy_client `\\ \\(\\*\\," +" server\\_address\\, client\\)" msgstr "" -":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " -"\\*\\*kwargs\\)" +":py:obj:`start_numpy_client `\\ \\(\\*\\," +" server\\_address\\, client\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "Return a formatted version of S, using substitutions from args and kwargs." -msgstr "" +#: ../../source/ref-api/flwr.client.rst:23::1 +#: flwr.client.app.start_numpy_client:1 of +msgid "Start a Flower NumPyClient which connects to a gRPC server." +msgstr "启动 Flower NumPyClient,连接到 gRPC 服务器。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.mod.rst:30 +#: ../../source/ref-api/flwr.client.rst:25 +#: ../../source/ref-api/flwr.common.rst:32 +#: ../../source/ref-api/flwr.server.rst:24 +#: ../../source/ref-api/flwr.server.strategy.rst:17 +#: ../../source/ref-api/flwr.server.workflow.rst:17 #, fuzzy -msgid ":py:obj:`format_map `\\ \\(mapping\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "Return a formatted version of S, using substitutions from mapping." -msgstr "" +msgid "Classes" +msgstr "类别" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.rst:32::1 #, fuzzy -msgid ":py:obj:`maketrans `\\" -msgstr ":py:obj:`TRAIN `\\" +msgid ":py:obj:`Client `\\ \\(\\)" +msgstr ":py:obj:`Client `\\ \\(\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.maketrans:1 of -msgid "Return a translation table usable for str.translate()." -msgstr "" +#: ../../source/ref-api/flwr.client.rst:32::1 +#: flwr.client.client.Client:1 of +msgid "Abstract base class for Flower clients." +msgstr "Flower 客户端的抽象基类。" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.client.rst:32::1 #, fuzzy -msgid ":py:obj:`PING `\\" -msgstr ":py:obj:`PING `\\" +msgid "" +":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, " +"mods\\]\\)" +msgstr "" +":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, " +"mods\\]\\)" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.client.rst:32::1 +#: flwr.client.client_app.ClientApp:1 of #, fuzzy -msgid ":py:obj:`START_CLIENT_ENTER `\\" -msgstr ":py:obj:`START_CLIENT_ENTER `\\" +msgid "Flower ClientApp." +msgstr "Flower 客户端。" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.client.rst:32::1 #, fuzzy -msgid ":py:obj:`START_CLIENT_LEAVE `\\" -msgstr ":py:obj:`START_CLIENT_LEAVE `\\" +msgid ":py:obj:`NumPyClient `\\ \\(\\)" +msgstr ":py:obj:`NumPyClient `\\ \\(\\)" -#: flwr.common.EventType.capitalize:1::1 of -#, fuzzy -msgid ":py:obj:`START_SERVER_ENTER `\\" -msgstr ":py:obj:`START_SERVER_ENTER `\\" +#: ../../source/ref-api/flwr.client.rst:32::1 +#: flwr.client.numpy_client.NumPyClient:1 of +msgid "Abstract base class for Flower clients using NumPy." +msgstr "使用 NumPy 的 Flower 客户端的抽象基类。" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.client.rst:50::1 #, fuzzy -msgid ":py:obj:`START_SERVER_LEAVE `\\" -msgstr ":py:obj:`START_SERVER_LEAVE `\\" +msgid ":py:obj:`flwr.client.mod `\\" +msgstr ":py:obj:`flwr.client `\\" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.client.rst:50::1 flwr.client.mod:1 of #, fuzzy -msgid "" -":py:obj:`RUN_DRIVER_API_ENTER " -"`\\" -msgstr "" -":py:obj:`RUN_DRIVER_API_ENTER " -"`\\" +msgid "Flower Built-in Mods." +msgstr "使用内置调制器" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.client.client.Client:1 flwr.client.numpy_client.NumPyClient:1 +#: flwr.server.client_manager.ClientManager:1 +#: flwr.server.driver.driver.Driver:1 flwr.server.strategy.strategy.Strategy:1 +#: of #, fuzzy -msgid "" -":py:obj:`RUN_DRIVER_API_LEAVE " -"`\\" -msgstr "" -":py:obj:`RUN_DRIVER_API_LEAVE " -"`\\" +msgid "Bases: :py:class:`~abc.ABC`" +msgstr "Bases: :py:class:`~abc.ABC`" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.client.Client.rst:15 +#: ../../source/ref-api/flwr.client.ClientApp.rst:15 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:15 +#: ../../source/ref-api/flwr.client.mod.LocalDpMod.rst:15 +#: ../../source/ref-api/flwr.common.Array.rst:15 +#: ../../source/ref-api/flwr.common.ClientMessage.rst:15 +#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:15 +#: ../../source/ref-api/flwr.common.Context.rst:15 +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:15 +#: ../../source/ref-api/flwr.common.Error.rst:15 +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:15 +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:15 +#: ../../source/ref-api/flwr.common.EventType.rst:15 +#: ../../source/ref-api/flwr.common.FitIns.rst:15 +#: ../../source/ref-api/flwr.common.FitRes.rst:15 +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:15 +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:15 +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:15 +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:15 +#: ../../source/ref-api/flwr.common.Message.rst:15 +#: ../../source/ref-api/flwr.common.MessageType.rst:15 +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:15 +#: ../../source/ref-api/flwr.common.Metadata.rst:15 +#: ../../source/ref-api/flwr.common.MetricsRecord.rst:15 +#: ../../source/ref-api/flwr.common.Parameters.rst:15 +#: ../../source/ref-api/flwr.common.ParametersRecord.rst:15 +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:15 +#: ../../source/ref-api/flwr.common.RecordSet.rst:15 +#: ../../source/ref-api/flwr.common.ServerMessage.rst:15 +#: ../../source/ref-api/flwr.common.Status.rst:15 +#: ../../source/ref-api/flwr.server.ClientManager.rst:15 +#: ../../source/ref-api/flwr.server.Driver.rst:15 +#: ../../source/ref-api/flwr.server.History.rst:15 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:15 +#: ../../source/ref-api/flwr.server.Server.rst:15 +#: ../../source/ref-api/flwr.server.ServerApp.rst:15 +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:15 +#: ../../source/ref-api/flwr.server.ServerConfig.rst:15 +#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:15 +#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:15 +#: ../../source/ref-api/flwr.server.strategy.Krum.rst:15 +#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:15 +#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:15 +#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:15 +#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:15 #, fuzzy -msgid "" -":py:obj:`RUN_FLEET_API_ENTER " -"`\\" -msgstr "" -":py:obj:`RUN_FLEET_API_ENTER " -"`\\" +msgid "Methods" +msgstr "方法" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.client.Client.rst:44::1 #, fuzzy -msgid "" -":py:obj:`RUN_FLEET_API_LEAVE " -"`\\" -msgstr "" -":py:obj:`RUN_FLEET_API_LEAVE " -"`\\" +msgid ":py:obj:`evaluate `\\ \\(ins\\)" +msgstr ":py:obj:`evaluate `\\ \\(ins\\)" -#: flwr.common.EventType.capitalize:1::1 of -#, fuzzy -msgid "" -":py:obj:`RUN_SUPERLINK_ENTER " -"`\\" -msgstr "" -":py:obj:`RUN_SUPERLINK_ENTER " -"`\\" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.evaluate:1 +#: flwr.client.numpy_client.NumPyClient.evaluate:1 of +msgid "Evaluate the provided parameters using the locally held dataset." +msgstr "使用本地数据集评估所提供的参数。" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.client.Client.rst:44::1 #, fuzzy -msgid "" -":py:obj:`RUN_SUPERLINK_LEAVE " -"`\\" -msgstr "" -":py:obj:`RUN_SUPERLINK_LEAVE " -"`\\" +msgid ":py:obj:`fit `\\ \\(ins\\)" +msgstr ":py:obj:`fit `\\ \\(ins\\)" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: flwr.client.client.Client.fit:1 of +msgid "Refine the provided parameters using the locally held dataset." +msgstr "利用本地数据集完善所提供的参数。" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 #, fuzzy -msgid "" -":py:obj:`START_SIMULATION_ENTER " -"`\\" -msgstr "" -":py:obj:`START_SIMULATION_ENTER " -"`\\" +msgid ":py:obj:`get_context `\\ \\(\\)" +msgstr ":py:obj:`get_context `\\ \\(\\)" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.get_context:1 +#: flwr.client.numpy_client.NumPyClient.get_context:1 of #, fuzzy -msgid "" -":py:obj:`START_SIMULATION_LEAVE " -"`\\" -msgstr "" -":py:obj:`START_SIMULATION_LEAVE " -"`\\" +msgid "Get the run context from this client." +msgstr "评估客户端的反应。" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.client.Client.rst:44::1 #, fuzzy -msgid ":py:obj:`DRIVER_CONNECT `\\" -msgstr ":py:obj:`DRIVER_CONNECT `\\" +msgid ":py:obj:`get_parameters `\\ \\(ins\\)" +msgstr ":py:obj:`get_parameters `\\ \\(ins\\)" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.get_parameters:1 +#: flwr.client.numpy_client.NumPyClient.get_parameters:1 of +msgid "Return the current local model parameters." +msgstr "返回当前本地模型参数。" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 #, fuzzy -msgid ":py:obj:`DRIVER_DISCONNECT `\\" -msgstr ":py:obj:`DRIVER_DISCONNECT `\\" +msgid ":py:obj:`get_properties `\\ \\(ins\\)" +msgstr ":py:obj:`get_properties `\\ \\(ins\\)" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: flwr.client.client.Client.get_properties:1 of +msgid "Return set of client's properties." +msgstr "返回客户端的属性集。" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 #, fuzzy -msgid ":py:obj:`START_DRIVER_ENTER `\\" -msgstr ":py:obj:`START_DRIVER_ENTER `\\" +msgid ":py:obj:`set_context `\\ \\(context\\)" +msgstr ":py:obj:`set_context `\\ \\(context\\)" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.set_context:1 +#: flwr.client.numpy_client.NumPyClient.set_context:1 of #, fuzzy -msgid ":py:obj:`START_DRIVER_LEAVE `\\" -msgstr ":py:obj:`START_DRIVER_LEAVE `\\" +msgid "Apply a run context to this client." +msgstr "将运行上下文应用于该客户端。" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.client.Client.rst:44::1 #, fuzzy -msgid "" -":py:obj:`RUN_CLIENT_APP_ENTER " -"`\\" -msgstr "" -":py:obj:`RUN_CLIENT_APP_ENTER " -"`\\" +msgid ":py:obj:`to_client `\\ \\(\\)" +msgstr ":py:obj:`to_client `\\ \\(\\)" -#: flwr.common.EventType.capitalize:1::1 of -#, fuzzy -msgid "" -":py:obj:`RUN_CLIENT_APP_LEAVE " -"`\\" -msgstr "" -":py:obj:`RUN_CLIENT_APP_LEAVE " -"`\\" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: flwr.client.client.Client.to_client:1 of +msgid "Return client (itself)." +msgstr "返回客户端(本身)。" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.client.Client.rst:46 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:46 +#: ../../source/ref-api/flwr.common.Array.rst:28 +#: ../../source/ref-api/flwr.common.ClientMessage.rst:25 +#: ../../source/ref-api/flwr.common.Code.rst:19 +#: ../../source/ref-api/flwr.common.Context.rst:25 +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:25 +#: ../../source/ref-api/flwr.common.Error.rst:25 +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:25 +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:25 +#: ../../source/ref-api/flwr.common.EventType.rst:165 +#: ../../source/ref-api/flwr.common.FitIns.rst:25 +#: ../../source/ref-api/flwr.common.FitRes.rst:25 +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:25 +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:25 +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:25 +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:25 +#: ../../source/ref-api/flwr.common.Message.rst:37 +#: ../../source/ref-api/flwr.common.MessageType.rst:25 +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:25 +#: ../../source/ref-api/flwr.common.Metadata.rst:25 +#: ../../source/ref-api/flwr.common.Parameters.rst:25 +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:25 +#: ../../source/ref-api/flwr.common.RecordSet.rst:25 +#: ../../source/ref-api/flwr.common.ServerMessage.rst:25 +#: ../../source/ref-api/flwr.common.Status.rst:25 +#: ../../source/ref-api/flwr.server.Driver.rst:40 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:25 +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:25 +#: ../../source/ref-api/flwr.server.ServerConfig.rst:25 #, fuzzy -msgid "" -":py:obj:`RUN_SERVER_APP_ENTER " -"`\\" -msgstr "" -":py:obj:`RUN_SERVER_APP_ENTER " -"`\\" +msgid "Attributes" +msgstr "属性" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.client.Client.context:1::1 of #, fuzzy -msgid "" -":py:obj:`RUN_SERVER_APP_LEAVE " -"`\\" -msgstr "" -":py:obj:`RUN_SERVER_APP_LEAVE " -"`\\" +msgid ":py:obj:`context `\\" +msgstr ":py:obj:`context `\\" -#: flwr.common.EventType.capitalize:1::1 of -#, fuzzy -msgid "" -":py:obj:`RUN_SUPERNODE_ENTER " -"`\\" +#: flwr.client.Client.context:1 flwr.client.Client.context:1::1 +#: flwr.client.NumPyClient.context:1 +#: flwr.client.NumPyClient.context:1::1 of +msgid "Getter for `Context` client attribute." msgstr "" -":py:obj:`RUN_SUPERLINK_ENTER " -"`\\" -#: flwr.common.EventType.capitalize:1::1 of -#, fuzzy -msgid "" -":py:obj:`RUN_SUPERNODE_LEAVE " -"`\\" -msgstr "" -":py:obj:`RUN_SUPERLINK_LEAVE " -"`\\" +#: ../../source/ref-api/flwr.common.Parameters.rst:2 +#: flwr.client.app.start_client flwr.client.app.start_numpy_client +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.mod.localdp_mod.LocalDpMod +#: flwr.client.numpy_client.NumPyClient.evaluate +#: flwr.client.numpy_client.NumPyClient.fit +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.context.Context flwr.common.message.Error +#: flwr.common.message.Message flwr.common.message.Message.create_error_reply +#: flwr.common.message.Message.create_reply flwr.common.message.Metadata +#: flwr.common.record.configsrecord.ConfigsRecord +#: flwr.common.record.metricsrecord.MetricsRecord +#: flwr.common.record.parametersrecord.Array +#: flwr.common.record.parametersrecord.ParametersRecord +#: flwr.common.record.recordset.RecordSet flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.ClientManager.unregister +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.unregister +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.serverapp_components.ServerAppComponents +#: flwr.server.strategy.bulyan.Bulyan +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.fedadagrad.FedAdagrad +#: flwr.server.strategy.fedadam.FedAdam flwr.server.strategy.fedavg.FedAvg +#: flwr.server.strategy.fedavg_android.FedAvgAndroid +#: flwr.server.strategy.fedavgm.FedAvgM flwr.server.strategy.fedopt.FedOpt +#: flwr.server.strategy.fedprox.FedProx +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg +#: flwr.server.strategy.fedyogi.FedYogi flwr.server.strategy.krum.Krum +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow +#: flwr.simulation.run_simulation.run_simulation of +msgid "Parameters" +msgstr "参数" -#: flwr.common.EventType.capitalize:3 of +#: flwr.client.client.Client.evaluate:3 of msgid "" -"More specifically, make the first character have upper case and the rest " -"lower case." -msgstr "" +"The evaluation instructions containing (global) model parameters received" +" from the server and a dictionary of configuration values used to " +"customize the local evaluation process." +msgstr "评估指令包含从服务器接收的(全局)模型参数,以及用于定制本地评估流程的配置值字典。" -#: flwr.common.EventType.center:3 flwr.common.EventType.ljust:3 -#: flwr.common.EventType.rjust:3 of -msgid "Padding is done using the specified fill character (default is a space)." -msgstr "" +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.numpy_client.NumPyClient.evaluate +#: flwr.client.numpy_client.NumPyClient.fit +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.message.Message.create_reply flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.num_available +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.SimpleClientManager.num_available +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters of +msgid "Returns" +msgstr "返回" -#: flwr.common.EventType.count:1 of +#: flwr.client.client.Client.evaluate:8 of msgid "" -"Return the number of non-overlapping occurrences of substring sub in " -"string S[start:end]. Optional arguments start and end are interpreted as" -" in slice notation." -msgstr "" - -#: flwr.common.EventType.encode:3 of -msgid "encoding" -msgstr "" +"The evaluation result containing the loss on the local dataset and other " +"details such as the number of local data examples used for evaluation." +msgstr "评估结果包含本地数据集上的损失值和其他详细信息,如用于评估的本地数据的数量。" -#: flwr.common.EventType.encode:4 of -msgid "The encoding in which to encode the string." -msgstr "" +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.message.Message.create_reply flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.num_available +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.SimpleClientManager.num_available +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters of +msgid "Return type" +msgstr "返回类型" -#: flwr.common.EventType.encode:9 of -#, fuzzy -msgid "errors" -msgstr "错误" +#: flwr.client.client.Client.fit:3 of +msgid "" +"The training instructions containing (global) model parameters received " +"from the server and a dictionary of configuration values used to " +"customize the local training process." +msgstr "训练指令,包含从服务器接收的(全局)模型参数,以及用于定制本地训练过程的配置值字典。" -#: flwr.common.EventType.encode:6 of +#: flwr.client.client.Client.fit:8 of msgid "" -"The error handling scheme to use for encoding errors. The default is " -"'strict' meaning that encoding errors raise a UnicodeEncodeError. Other " -"possible values are 'ignore', 'replace' and 'xmlcharrefreplace' as well " -"as any other name registered with codecs.register_error that can handle " -"UnicodeEncodeErrors." -msgstr "" +"The training result containing updated parameters and other details such " +"as the number of local training examples used for training." +msgstr "训练结果包含更新的参数和其他详细信息,如用于训练的本地训练示例的数量。" -#: flwr.common.EventType.endswith:1 of +#: flwr.client.client.Client.get_parameters:3 of msgid "" -"Return True if S ends with the specified suffix, False otherwise. With " -"optional start, test S beginning at that position. With optional end, " -"stop comparing S at that position. suffix can also be a tuple of strings " -"to try." -msgstr "" +"The get parameters instructions received from the server containing a " +"dictionary of configuration values." +msgstr "从服务器接收的获取参数指令包含配置值字典。" -#: flwr.common.EventType.expandtabs:3 of -msgid "If tabsize is not given, a tab size of 8 characters is assumed." -msgstr "" +#: flwr.client.client.Client.get_parameters:7 of +msgid "The current local model parameters." +msgstr "当前的本地模型参数。" -#: flwr.common.EventType.find:1 flwr.common.EventType.index:1 of +#: flwr.client.client.Client.get_properties:3 of msgid "" -"Return the lowest index in S where substring sub is found, such that sub " -"is contained within S[start:end]. Optional arguments start and end are " -"interpreted as in slice notation." -msgstr "" +"The get properties instructions received from the server containing a " +"dictionary of configuration values." +msgstr "从服务器接收的获取属性指令包含配置值字典。" -#: flwr.common.EventType.find:5 flwr.common.EventType.rfind:5 of -msgid "Return -1 on failure." -msgstr "" +#: flwr.client.client.Client.get_properties:7 of +msgid "The current client properties." +msgstr "当前客户端属性。" -#: flwr.common.EventType.format:1 of -msgid "" -"Return a formatted version of S, using substitutions from args and " -"kwargs. The substitutions are identified by braces ('{' and '}')." -msgstr "" - -#: flwr.common.EventType.format_map:1 of -msgid "" -"Return a formatted version of S, using substitutions from mapping. The " -"substitutions are identified by braces ('{' and '}')." -msgstr "" +#: ../../source/ref-api/flwr.client.ClientApp.rst:2 +#, fuzzy +msgid "ClientApp" +msgstr "客户端" -#: flwr.common.EventType.index:5 flwr.common.EventType.rindex:5 of -msgid "Raises ValueError when the substring is not found." -msgstr "" +#: flwr.client.client_app.ClientApp:1 flwr.client.mod.localdp_mod.LocalDpMod:1 +#: flwr.common.constant.MessageType:1 flwr.common.constant.MessageTypeLegacy:1 +#: flwr.common.context.Context:1 flwr.common.message.Error:1 +#: flwr.common.message.Message:1 flwr.common.message.Metadata:1 +#: flwr.common.record.parametersrecord.Array:1 +#: flwr.common.record.recordset.RecordSet:1 flwr.common.typing.ClientMessage:1 +#: flwr.common.typing.DisconnectRes:1 flwr.common.typing.EvaluateIns:1 +#: flwr.common.typing.EvaluateRes:1 flwr.common.typing.FitIns:1 +#: flwr.common.typing.FitRes:1 flwr.common.typing.GetParametersIns:1 +#: flwr.common.typing.GetParametersRes:1 flwr.common.typing.GetPropertiesIns:1 +#: flwr.common.typing.GetPropertiesRes:1 flwr.common.typing.Parameters:1 +#: flwr.common.typing.ReconnectIns:1 flwr.common.typing.ServerMessage:1 +#: flwr.common.typing.Status:1 flwr.server.history.History:1 +#: flwr.server.server.Server:1 flwr.server.server_app.ServerApp:1 +#: flwr.server.server_config.ServerConfig:1 +#: flwr.server.serverapp_components.ServerAppComponents:1 +#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 +#: of +#, fuzzy +msgid "Bases: :py:class:`object`" +msgstr "Bases: :py:class:`object`" -#: flwr.common.EventType.isalnum:3 of -msgid "" -"A string is alpha-numeric if all characters in the string are alpha-" -"numeric and there is at least one character in the string." -msgstr "" +#: flwr.client.app.start_client:41 flwr.client.app.start_numpy_client:36 +#: flwr.client.client_app.ClientApp:4 +#: flwr.client.client_app.ClientApp.evaluate:4 +#: flwr.client.client_app.ClientApp.query:4 +#: flwr.client.client_app.ClientApp.train:4 +#: flwr.client.mod.localdp_mod.LocalDpMod:22 +#: flwr.common.record.configsrecord.ConfigsRecord:20 +#: flwr.common.record.metricsrecord.MetricsRecord:19 +#: flwr.common.record.parametersrecord.ParametersRecord:22 +#: flwr.common.record.recordset.RecordSet:23 flwr.server.app.start_server:41 +#: flwr.server.server_app.ServerApp:4 flwr.server.server_app.ServerApp.main:4 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:29 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:22 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:21 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:14 +#: of +msgid "Examples" +msgstr "实例" -#: flwr.common.EventType.isalpha:3 of +#: flwr.client.client_app.ClientApp:5 of +#, fuzzy msgid "" -"A string is alphabetic if all characters in the string are alphabetic and" -" there is at least one character in the string." -msgstr "" +"Assuming a typical `Client` implementation named `FlowerClient`, you can " +"wrap it in a `ClientApp` as follows:" +msgstr "假定有一个名为 `FlowerClient` 的典型 `Client` 实现,可以将其封装在一个 `ClientApp` 中,如下所示:" -#: flwr.common.EventType.isascii:3 of +#: flwr.client.client_app.ClientApp:16 of +#, fuzzy msgid "" -"ASCII characters have code points in the range U+0000-U+007F. Empty " -"string is ASCII too." -msgstr "" +"If the above code is in a Python module called `client`, it can be " +"started as follows:" +msgstr "如果上述代码位于一个名为 \"客户端 \"的 Python 模块中,则可以按如下方式启动它:" -#: flwr.common.EventType.isdecimal:3 of +#: flwr.client.client_app.ClientApp:21 of +#, fuzzy msgid "" -"A string is a decimal string if all characters in the string are decimal " -"and there is at least one character in the string." +"In this `client:app` example, `client` refers to the Python module " +"`client.py` in which the previous code lives in and `app` refers to the " +"global attribute `app` that points to an object of type `ClientApp`." msgstr "" +"在这个 `client:app` 例子中,`client` 指的是前面代码所在的 Python 模块 `client.py`,而 `app` " +"指的是指向 `ClientApp` 类型对象的全局属性 `app` 。" -#: flwr.common.EventType.isdigit:3 of -msgid "" -"A string is a digit string if all characters in the string are digits and" -" there is at least one character in the string." -msgstr "" +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +#, fuzzy +msgid ":py:obj:`evaluate `\\ \\(\\)" +msgstr ":py:obj:`evaluate `\\ \\(\\)" -#: flwr.common.EventType.isidentifier:3 of -msgid "" -"Call keyword.iskeyword(s) to test whether string s is a reserved " -"identifier, such as \"def\" or \"class\"." -msgstr "" +#: flwr.client.client_app.ClientApp.evaluate:1 +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +#, fuzzy +msgid "Return a decorator that registers the evaluate fn with the client app." +msgstr "返回一个装饰器,用于向客户端程序注册评估 fn。" -#: flwr.common.EventType.islower:3 of -msgid "" -"A string is lowercase if all cased characters in the string are lowercase" -" and there is at least one cased character in the string." -msgstr "" +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +#, fuzzy +msgid ":py:obj:`query `\\ \\(\\)" +msgstr ":py:obj:`query `\\ \\(\\)" -#: flwr.common.EventType.isnumeric:3 of -msgid "" -"A string is numeric if all characters in the string are numeric and there" -" is at least one character in the string." -msgstr "" +#: flwr.client.client_app.ClientApp.evaluate:1::1 +#: flwr.client.client_app.ClientApp.query:1 of +#, fuzzy +msgid "Return a decorator that registers the query fn with the client app." +msgstr "返回一个向客户端应用程序注册查询 fn 的装饰器。" -#: flwr.common.EventType.isprintable:3 of -msgid "" -"A string is printable if all of its characters are considered printable " -"in repr() or if it is empty." -msgstr "" +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +#, fuzzy +msgid ":py:obj:`train `\\ \\(\\)" +msgstr "server.strategy.Strategy" -#: flwr.common.EventType.isspace:3 of -msgid "" -"A string is whitespace if all characters in the string are whitespace and" -" there is at least one character in the string." -msgstr "" +#: flwr.client.client_app.ClientApp.evaluate:1::1 +#: flwr.client.client_app.ClientApp.train:1 of +#, fuzzy +msgid "Return a decorator that registers the train fn with the client app." +msgstr "返回一个装饰器,用于在客户端应用程序中注册火车 fn。" -#: flwr.common.EventType.istitle:3 of -msgid "" -"In a title-cased string, upper- and title-case characters may only follow" -" uncased characters and lowercase characters only cased ones." -msgstr "" +#: ../../source/ref-api/flwr.client.NumPyClient.rst:2 +msgid "NumPyClient" +msgstr "NumPyClient" -#: flwr.common.EventType.isupper:3 of +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#, fuzzy msgid "" -"A string is uppercase if all cased characters in the string are uppercase" -" and there is at least one cased character in the string." +":py:obj:`evaluate `\\ \\(parameters\\, " +"config\\)" msgstr "" +":py:obj:`evaluate `\\ \\(parameters\\, " +"config\\)" -#: flwr.common.EventType.join:3 of -msgid "" -"The string whose method is called is inserted in between each given " -"string. The result is returned as a new string." -msgstr "" +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#, fuzzy +msgid ":py:obj:`fit `\\ \\(parameters\\, config\\)" +msgstr ":py:obj:`fit `\\ \\(parameters\\, config\\)" -#: flwr.common.EventType.join:6 of -msgid "Example: '.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'" -msgstr "" +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.numpy_client.NumPyClient.fit:1 of +msgid "Train the provided parameters using the locally held dataset." +msgstr "使用本地数据集训练所提供的参数。" -#: flwr.common.EventType.lstrip:3 flwr.common.EventType.rstrip:3 -#: flwr.common.EventType.strip:3 of -msgid "If chars is given and not None, remove characters in chars instead." -msgstr "" +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#, fuzzy +msgid ":py:obj:`get_context `\\ \\(\\)" +msgstr ":py:obj:`get_context `\\ \\(\\)" -#: flwr.common.EventType.maketrans:3 of +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#, fuzzy msgid "" -"If there is only one argument, it must be a dictionary mapping Unicode " -"ordinals (integers) or characters to Unicode ordinals, strings or None. " -"Character keys will be then converted to ordinals. If there are two " -"arguments, they must be strings of equal length, and in the resulting " -"dictionary, each character in x will be mapped to the character at the " -"same position in y. If there is a third argument, it must be a string, " -"whose characters will be mapped to None in the result." +":py:obj:`get_parameters `\\ " +"\\(config\\)" msgstr "" +":py:obj:`get_parameters `\\ " +"\\(config\\)" -#: flwr.common.EventType.partition:3 of +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#, fuzzy msgid "" -"This will search for the separator in the string. If the separator is " -"found, returns a 3-tuple containing the part before the separator, the " -"separator itself, and the part after it." +":py:obj:`get_properties `\\ " +"\\(config\\)" msgstr "" +":py:obj:`get_properties `\\ " +"\\(config\\)" -#: flwr.common.EventType.partition:7 of -msgid "" -"If the separator is not found, returns a 3-tuple containing the original " -"string and two empty strings." -msgstr "" +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.numpy_client.NumPyClient.get_properties:1 of +msgid "Return a client's set of properties." +msgstr "返回客户端的属性集。" -#: flwr.common.EventType.removeprefix:3 of +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#, fuzzy msgid "" -"If the string starts with the prefix string, return string[len(prefix):]." -" Otherwise, return a copy of the original string." +":py:obj:`set_context `\\ " +"\\(context\\)" msgstr "" +":py:obj:`set_context `\\ " +"\\(context\\)" -#: flwr.common.EventType.removesuffix:3 of -msgid "" -"If the string ends with the suffix string and that suffix is not empty, " -"return string[:-len(suffix)]. Otherwise, return a copy of the original " -"string." -msgstr "" +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#, fuzzy +msgid ":py:obj:`to_client `\\ \\(\\)" +msgstr ":py:obj:`to_client `\\ \\(\\)" -#: flwr.common.EventType.replace:5 of +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.numpy_client.NumPyClient.to_client:1 of +msgid "Convert to object to Client type and return it." +msgstr "将对象转换为客户类型并返回。" + +#: flwr.client.NumPyClient.context:1::1 of #, fuzzy -msgid "count" -msgstr "背景" +msgid ":py:obj:`context `\\" +msgstr ":py:obj:`context `\\" -#: flwr.common.EventType.replace:4 of -msgid "" -"Maximum number of occurrences to replace. -1 (the default value) means " -"replace all occurrences." -msgstr "" +#: flwr.client.numpy_client.NumPyClient.evaluate:3 +#: flwr.client.numpy_client.NumPyClient.fit:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:5 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:8 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:5 +#: flwr.server.strategy.strategy.Strategy.configure_fit:5 +#: flwr.server.strategy.strategy.Strategy.evaluate:8 of +msgid "The current (global) model parameters." +msgstr "当前(全局)模型参数。" -#: flwr.common.EventType.replace:7 of +#: flwr.client.numpy_client.NumPyClient.evaluate:5 of msgid "" -"If the optional argument count is given, only the first count occurrences" -" are replaced." -msgstr "" +"Configuration parameters which allow the server to influence evaluation " +"on the client. It can be used to communicate arbitrary values from the " +"server to the client, for example, to influence the number of examples " +"used for evaluation." +msgstr "允许服务器影响客户端评估的配置参数。它可用于将任意值从服务器传送到客户端,例如,影响用于评估的示例数量。" -#: flwr.common.EventType.rfind:1 flwr.common.EventType.rindex:1 of +#: flwr.client.numpy_client.NumPyClient.evaluate:11 of msgid "" -"Return the highest index in S where substring sub is found, such that sub" -" is contained within S[start:end]. Optional arguments start and end are " -"interpreted as in slice notation." +"* **loss** (*float*) -- The evaluation loss of the model on the local " +"dataset. * **num_examples** (*int*) -- The number of examples used for " +"evaluation. * **metrics** (*Dict[str, Scalar]*) -- A dictionary mapping " +"arbitrary string keys to values of type bool, bytes, float, int, or " +"str. It can be used to communicate arbitrary values back to the server." msgstr "" +"**loss** (*float*) -- 模型在本地数据集上的评估损失值。**num_examples** (*int*) -- " +"用于评估的示例数量。**metrics** (*Dict[str, Scalar]*) -- 将任意字符串键映射到 " +"bool、bytes、float、int 或 str 类型值的字典。它可用于将任意值传回服务器。" -#: flwr.common.EventType.rpartition:3 of +#: flwr.client.numpy_client.NumPyClient.evaluate:11 of msgid "" -"This will search for the separator in the string, starting at the end. If" -" the separator is found, returns a 3-tuple containing the part before the" -" separator, the separator itself, and the part after it." -msgstr "" +"**loss** (*float*) -- The evaluation loss of the model on the local " +"dataset." +msgstr "**loss** (*float*) -- 模型在本地数据集上的评估损失值。" -#: flwr.common.EventType.rpartition:7 of +#: flwr.client.numpy_client.NumPyClient.evaluate:12 of +msgid "**num_examples** (*int*) -- The number of examples used for evaluation." +msgstr "**num_examples** (*int*) -- 用于评估的示例数量。" + +#: flwr.client.numpy_client.NumPyClient.evaluate:13 +#: flwr.client.numpy_client.NumPyClient.fit:13 of msgid "" -"If the separator is not found, returns a 3-tuple containing two empty " -"strings and the original string." +"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " +"string keys to values of type bool, bytes, float, int, or str. It can be " +"used to communicate arbitrary values back to the server." msgstr "" +"**metrics** (*Dict[str, Scalar]*) -- 将任意字符串键映射到 bool、bytes、float、int 或 " +"str 类型值的字典。它可用于将任意值传回服务器。" -#: flwr.common.EventType.rsplit:7 flwr.common.EventType.split:7 of -msgid "sep" +#: flwr.client.numpy_client.NumPyClient.evaluate:19 of +msgid "" +"The previous return type format (int, float, float) and the extended " +"format (int, float, float, Dict[str, Scalar]) have been deprecated and " +"removed since Flower 0.19." msgstr "" +"自 Flower 0.19 起,之前的返回类型格式(int、float、float)和扩展格式(int、float、float、Dict[str," +" Scalar])已被弃用和移除。" -#: flwr.common.EventType.rsplit:4 flwr.common.EventType.split:4 of -msgid "The separator used to split the string." -msgstr "" +#: flwr.client.numpy_client.NumPyClient.fit:5 of +msgid "" +"Configuration parameters which allow the server to influence training on " +"the client. It can be used to communicate arbitrary values from the " +"server to the client, for example, to set the number of (local) training " +"epochs." +msgstr "允许服务器影响客户端训练的配置参数。它可用于将任意值从服务器传送到客户端,例如设置(本地)训练遍历数。" -#: flwr.common.EventType.rsplit:6 flwr.common.EventType.split:6 of +#: flwr.client.numpy_client.NumPyClient.fit:11 of msgid "" -"When set to None (the default value), will split on any whitespace " -"character (including \\\\n \\\\r \\\\t \\\\f and spaces) and will discard" -" empty strings from the result." +"* **parameters** (*NDArrays*) -- The locally updated model parameters. * " +"**num_examples** (*int*) -- The number of examples used for training. * " +"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " +"string keys to values of type bool, bytes, float, int, or str. It can " +"be used to communicate arbitrary values back to the server." msgstr "" +"**parameters** (*NDArrays*) -- 本地更新的模型参数。**num_examples** (*int*) -- " +"用于训练的示例数量。**metrics** (*Dict[str, Scalar]*) -- 将任意字符串键映射到 " +"bool、bytes、float、int 或 str 类型值的字典。它可用于将任意值传回服务器。" -#: flwr.common.EventType.rsplit:11 flwr.common.EventType.split:11 of -msgid "maxsplit" -msgstr "" +#: flwr.client.numpy_client.NumPyClient.fit:11 of +msgid "**parameters** (*NDArrays*) -- The locally updated model parameters." +msgstr "**parameters** (*NDArrays*) -- 本地更新的模型参数。" -#: flwr.common.EventType.rsplit:10 flwr.common.EventType.split:10 of +#: flwr.client.numpy_client.NumPyClient.fit:12 of +msgid "**num_examples** (*int*) -- The number of examples used for training." +msgstr "**num_examples** (*int*) -- 用于训练的数据数量。" + +#: flwr.client.numpy_client.NumPyClient.get_parameters:3 of msgid "" -"Maximum number of splits (starting from the left). -1 (the default value)" -" means no limit." -msgstr "" +"Configuration parameters requested by the server. This can be used to " +"tell the client which parameters are needed along with some Scalar " +"attributes." +msgstr "服务器请求的配置参数。这可以用来告诉客户端需要哪些参数以及一些标量属性。" -#: flwr.common.EventType.rsplit:13 of -msgid "Splitting starts at the end of the string and works to the front." -msgstr "" +#: flwr.client.numpy_client.NumPyClient.get_parameters:8 of +msgid "**parameters** -- The local model parameters as a list of NumPy ndarrays." +msgstr "**parameters** -- NumPy ndarrays 的本地模型参数列表。" -#: flwr.common.EventType.split:13 of +#: flwr.client.numpy_client.NumPyClient.get_properties:3 of msgid "" -"Note, str.split() is mainly useful for data that has been intentionally " -"delimited. With natural text that includes punctuation, consider using " -"the regular expression module." -msgstr "" +"Configuration parameters requested by the server. This can be used to " +"tell the client which properties are needed along with some Scalar " +"attributes." +msgstr "服务器请求的配置参数。这可以用来告诉客户端需要哪些属性以及一些标量属性。" -#: flwr.common.EventType.splitlines:3 of +#: flwr.client.numpy_client.NumPyClient.get_properties:8 of msgid "" -"Line breaks are not included in the resulting list unless keepends is " -"given and true." +"**properties** -- A dictionary mapping arbitrary string keys to values of" +" type bool, bytes, float, int, or str. It can be used to communicate " +"arbitrary property values back to the server." msgstr "" +"**properties** -- 将任意字符串键映射到 bool、bytes、float、int 或 str " +"类型值的字典。它可用于将任意属性值传回服务器。" -#: flwr.common.EventType.startswith:1 of -msgid "" -"Return True if S starts with the specified prefix, False otherwise. With " -"optional start, test S beginning at that position. With optional end, " -"stop comparing S at that position. prefix can also be a tuple of strings " -"to try." -msgstr "" +#: ../../source/ref-api/flwr.client.mod.rst:2 +#, fuzzy +msgid "mod" +msgstr "模块" -#: flwr.common.EventType.title:3 of +#: ../../source/ref-api/flwr.client.mod.rst:28::1 msgid "" -"More specifically, words start with uppercased characters and all " -"remaining cased characters have lower case." +":py:obj:`adaptiveclipping_mod `\\ " +"\\(msg\\, ctxt\\, call\\_next\\)" msgstr "" -#: flwr.common.EventType.translate:5 of +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:1 of #, fuzzy -msgid "table" -msgstr "数据库" +msgid "Client-side adaptive clipping modifier." +msgstr "客户端逻辑" -#: flwr.common.EventType.translate:4 of +#: ../../source/ref-api/flwr.client.mod.rst:28::1 msgid "" -"Translation table, which must be a mapping of Unicode ordinals to Unicode" -" ordinals, strings, or None." +":py:obj:`fixedclipping_mod `\\ " +"\\(msg\\, ctxt\\, call\\_next\\)" msgstr "" -#: flwr.common.EventType.translate:7 of -msgid "" -"The table must implement lookup/indexing via __getitem__, for instance a " -"dictionary or list. If this operation raises LookupError, the character " -"is left untouched. Characters mapped to None are deleted." -msgstr "" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:1 of +#, fuzzy +msgid "Client-side fixed clipping modifier." +msgstr "客户端逻辑" -#: flwr.common.EventType.zfill:3 of -msgid "The string is never truncated." +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#, fuzzy +msgid ":py:obj:`make_ffn `\\ \\(ffn\\, mods\\)" +msgstr ":py:obj:`Client `\\ \\(\\)" + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.utils.make_ffn:1 of +msgid "." msgstr "" -#: ../../source/ref-api/flwr.common.FitIns.rst:2 +#: ../../source/ref-api/flwr.client.mod.rst:28::1 #, fuzzy -msgid "FitIns" -msgstr "FitIns" +msgid "" +":py:obj:`message_size_mod `\\ \\(msg\\," +" ctxt\\, call\\_next\\)" +msgstr "" +":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " +"error\\]\\)" -#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.comms_mods.message_size_mod:1 of #, fuzzy -msgid ":py:obj:`parameters `\\" -msgstr ":py:obj:`parameters `\\" +msgid "Message size mod." +msgstr "信息类型。" -#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 +#: ../../source/ref-api/flwr.client.mod.rst:28::1 #, fuzzy -msgid ":py:obj:`config `\\" -msgstr ":py:obj:`config `\\" +msgid "" +":py:obj:`parameters_size_mod `\\ " +"\\(msg\\, ctxt\\, call\\_next\\)" +msgstr "" +":py:obj:`ParametersRecord `\\ " +"\\(\\[array\\_dict\\, keep\\_input\\]\\)" -#: ../../source/ref-api/flwr.common.FitRes.rst:2 +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.comms_mods.parameters_size_mod:1 of #, fuzzy -msgid "FitRes" -msgstr "FitRes" +msgid "Parameters size mod." +msgstr "参数" -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +#: ../../source/ref-api/flwr.client.mod.rst:28::1 #, fuzzy -msgid ":py:obj:`status `\\" -msgstr ":py:obj:`status `\\" +msgid "" +":py:obj:`secagg_mod `\\ \\(msg\\, ctxt\\, " +"call\\_next\\)" +msgstr ":py:obj:`set_context `\\ \\(context\\)" -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 -#, fuzzy -msgid ":py:obj:`parameters `\\" -msgstr ":py:obj:`parameters `\\" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.secure_aggregation.secagg_mod.secagg_mod:1 of +msgid "Handle incoming message and return results, following the SecAgg protocol." +msgstr "" -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +#: ../../source/ref-api/flwr.client.mod.rst:28::1 #, fuzzy -msgid ":py:obj:`num_examples `\\" -msgstr ":py:obj:`num_examples `\\" +msgid "" +":py:obj:`secaggplus_mod `\\ \\(msg\\, " +"ctxt\\, call\\_next\\)" +msgstr "" +":py:obj:`SecAggPlusWorkflow `\\ " +"\\(num\\_shares\\, ...\\[\\, ...\\]\\)" -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 -#, fuzzy -msgid ":py:obj:`metrics `\\" -msgstr ":py:obj:`metrics `\\" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.secure_aggregation.secaggplus_mod.secaggplus_mod:1 of +msgid "" +"Handle incoming message and return results, following the SecAgg+ " +"protocol." +msgstr "" -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:2 +#: ../../source/ref-api/flwr.client.mod.rst:35::1 #, fuzzy -msgid "GetParametersIns" -msgstr "参数" +msgid "" +":py:obj:`LocalDpMod `\\ \\(clipping\\_norm\\," +" sensitivity\\, ...\\)" +msgstr "" +":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, " +"mods\\]\\)" -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:28::1 +#: ../../source/ref-api/flwr.client.mod.rst:35::1 +#: flwr.client.mod.localdp_mod.LocalDpMod:1 of #, fuzzy -msgid ":py:obj:`config `\\" -msgstr ":py:obj:`config `\\" +msgid "Modifier for local differential privacy." +msgstr "差分隐私" -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:2 +#: ../../source/ref-api/flwr.client.mod.LocalDpMod.rst:2 #, fuzzy -msgid "GetParametersRes" -msgstr "参数" +msgid "LocalDpMod" +msgstr "本地 DP 模式" -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 -#, fuzzy -msgid ":py:obj:`status `\\" -msgstr ":py:obj:`status `\\" +#: flwr.client.mod.localdp_mod.LocalDpMod:3 of +msgid "" +"This mod clips the client model updates and adds noise to the params " +"before sending them to the server." +msgstr "" -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 -#, fuzzy -msgid ":py:obj:`parameters `\\" -msgstr ":py:obj:`parameters `\\" +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:12 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:10 +#: flwr.client.mod.localdp_mod.LocalDpMod:6 of +msgid "It operates on messages of type `MessageType.TRAIN`." +msgstr "" -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:2 +#: flwr.client.mod.localdp_mod.LocalDpMod:8 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:15 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:8 +#: of #, fuzzy -msgid "GetPropertiesIns" -msgstr "GetPropertiesIns" +msgid "The value of the clipping norm." +msgstr "削波法线的值。" -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:28::1 -#, fuzzy -msgid ":py:obj:`config `\\" -msgstr ":py:obj:`config `\\" +#: flwr.client.mod.localdp_mod.LocalDpMod:10 of +msgid "The sensitivity of the client model." +msgstr "" -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:2 -#, fuzzy -msgid "GetPropertiesRes" -msgstr "GetPropertiesRes" +#: flwr.client.mod.localdp_mod.LocalDpMod:12 of +msgid "" +"The privacy budget. Smaller value of epsilon indicates a higher level of " +"privacy protection." +msgstr "" -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 -#, fuzzy -msgid ":py:obj:`status `\\" -msgstr ":py:obj:`status `\\" +#: flwr.client.mod.localdp_mod.LocalDpMod:15 of +msgid "" +"The failure probability. The probability that the privacy mechanism fails" +" to provide the desired level of privacy. A smaller value of delta " +"indicates a stricter privacy guarantee." +msgstr "" -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 -#, fuzzy -msgid ":py:obj:`properties `\\" -msgstr ":py:obj:`properties `\\" +#: flwr.client.mod.localdp_mod.LocalDpMod:23 of +msgid "Create an instance of the local DP mod and add it to the client-side mods:" +msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:2 +#: ../../source/ref-api/flwr.client.mod.adaptiveclipping_mod.rst:2 +msgid "adaptiveclipping\\_mod" +msgstr "" + +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:3 of #, fuzzy -msgid "Message" -msgstr "服务器端" +msgid "" +"This mod needs to be used with the " +"DifferentialPrivacyClientSideAdaptiveClipping server-side strategy " +"wrapper." +msgstr "用 \"DifferentialPrivacyClientSideAdaptiveClipping \"包装器对策略进行包装:" -#: flwr.common.Message.content:1::1 flwr.common.Message.metadata:1 -#: flwr.common.message.Message:3 of +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:6 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:6 of #, fuzzy -msgid "A dataclass including information about the message to be executed." -msgstr "数据类型,包括要执行的信息的相关信息。" +msgid "The wrapper sends the clipping_norm value to the client." +msgstr "向客户发送近端因子mu" -#: flwr.common.message.Message:5 of -#, fuzzy -msgid "" -"Holds records either sent by another entity (e.g. sent by the server-side" -" logic to a client, or vice-versa) or that will be sent to it." -msgstr "保存由其他实体发送的记录(如由服务器端逻辑发送到客户端,反之亦然)或将发送到该实体的记录。" +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:8 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:8 of +msgid "This mod clips the client model updates before sending them to the server." +msgstr "" -#: flwr.common.message.Message:8 of -#, fuzzy +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:10 of msgid "" -"A dataclass that captures information about an error that took place when" -" processing another message." -msgstr "数据类,用于捕捉处理其他报文时发生的错误信息。" +"It also sends KEY_NORM_BIT to the server for computing the new clipping " +"value." +msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:15 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:13 +#: flwr.server.driver.driver.Driver.send_and_receive:18 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:54 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:61 +#: of #, fuzzy -msgid "" -":py:obj:`create_error_reply `\\ " -"\\(error\\[\\, ttl\\]\\)" +msgid "Notes" +msgstr "无" + +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:16 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:14 of +msgid "Consider the order of mods when using multiple." msgstr "" -":py:obj:`create_error_reply `\\ " -"\\(error\\, ttl\\)" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.create_error_reply:1 of +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:18 of +msgid "Typically, adaptiveclipping_mod should be the last to operate on params." +msgstr "" + +#: ../../source/ref-api/flwr.client.mod.fixedclipping_mod.rst:2 #, fuzzy -msgid "Construct a reply message indicating an error happened." -msgstr "构建一条回复信息,说明发生了错误。" +msgid "fixedclipping\\_mod" +msgstr "剪贴" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:3 of #, fuzzy msgid "" -":py:obj:`create_reply `\\ " -"\\(content\\[\\, ttl\\]\\)" +"This mod needs to be used with the " +"DifferentialPrivacyClientSideFixedClipping server-side strategy wrapper." +msgstr "用 \"DifferentialPrivacyClientSideFixedClipping \"包装器包装策略:" + +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:16 of +msgid "Typically, fixedclipping_mod should be the last to operate on params." msgstr "" -":py:obj:`create_reply `\\ \\(content\\," -" ttl\\)" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.create_reply:1 of -#, fuzzy -msgid "Create a reply to this message with specified content and TTL." -msgstr "以指定的内容和 TTL 创建对该信息的回复。" +#: ../../source/ref-api/flwr.client.mod.make_ffn.rst:2 +msgid "make\\_ffn" +msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#, fuzzy -msgid ":py:obj:`has_content `\\ \\(\\)" -msgstr ":py:obj:`has_content `\\ \\(\\)" +#: ../../source/ref-api/flwr.client.mod.message_size_mod.rst:2 +msgid "message\\_size\\_mod" +msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.has_content:1 of -#, fuzzy -msgid "Return True if message has content, else False." -msgstr "如果信息有内容,则返回 True,否则返回 False。" +#: flwr.client.mod.comms_mods.message_size_mod:3 of +msgid "This mod logs the size in bytes of the message being transmited." +msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: ../../source/ref-api/flwr.client.mod.parameters_size_mod.rst:2 #, fuzzy -msgid ":py:obj:`has_error `\\ \\(\\)" -msgstr ":py:obj:`has_error `\\ \\(\\)" +msgid "parameters\\_size\\_mod" +msgstr "参数" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.has_error:1 of -#, fuzzy -msgid "Return True if message has an error, else False." -msgstr "如果信息有错误,则返回 True,否则返回 False。" +#: flwr.client.mod.comms_mods.parameters_size_mod:3 of +msgid "" +"This mod logs the number of parameters transmitted in the message as well" +" as their size in bytes." +msgstr "" -#: flwr.common.Message.content:1::1 of -#, fuzzy -msgid ":py:obj:`content `\\" -msgstr ":py:obj:`content `\\" +#: ../../source/ref-api/flwr.client.mod.secagg_mod.rst:2 +msgid "secagg\\_mod" +msgstr "" -#: flwr.common.Message.content:1 flwr.common.Message.content:1::1 -#: of +#: ../../source/ref-api/flwr.client.mod.secaggplus_mod.rst:2 #, fuzzy -msgid "The content of this message." -msgstr "评估客户端的反应。" +msgid "secaggplus\\_mod" +msgstr "工作流程" -#: flwr.common.Message.content:1::1 of +#: ../../source/ref-api/flwr.client.start_client.rst:2 #, fuzzy -msgid ":py:obj:`error `\\" -msgstr ":py:obj:`error `\\" +msgid "start\\_client" +msgstr "启动客户端" -#: flwr.common.Message.content:1::1 flwr.common.Message.error:1 of -#, fuzzy -msgid "Error captured by this message." -msgstr "该信息捕捉到的错误。" +#: flwr.client.app.start_client:3 flwr.client.app.start_numpy_client:9 of +msgid "" +"The IPv4 or IPv6 address of the server. If the Flower server runs on the " +"same machine on port 8080, then `server_address` would be " +"`\"[::]:8080\"`." +msgstr "" +"服务器的 IPv4 或 IPv6 地址:如果 Flower 服务器在同一台机器上运行,端口为 " +"8080,则`server_address`应为`\"[::]:8080\"`。" -#: flwr.common.Message.content:1::1 of -#, fuzzy -msgid ":py:obj:`metadata `\\" -msgstr ":py:obj:`metadata `\\" +#: flwr.client.app.start_client:7 of +msgid "A callable that instantiates a Client. (default: None)" +msgstr "用于实例化客户端的可调用程序。(默认值:无)" -#: flwr.common.message.Message.create_error_reply:3 of -#, fuzzy -msgid "The error that was encountered." -msgstr "遇到的错误。" +#: flwr.client.app.start_client:9 of +msgid "" +"An implementation of the abstract base class `flwr.client.Client` " +"(default: None)" +msgstr "抽象基类 `flwr.client.Client` 的实现(默认值:无)" -#: flwr.common.message.Message.create_error_reply:5 -#: flwr.common.message.Message.create_reply:9 of +#: flwr.client.app.start_client:12 flwr.client.app.start_numpy_client:15 of msgid "" -"Time-to-live for this message in seconds. If unset, it will be set based " -"on the remaining time for the received message before it expires. This " -"follows the equation: ttl = msg.meta.ttl - (reply.meta.created_at - " -"msg.meta.created_at)" +"The maximum length of gRPC messages that can be exchanged with the Flower" +" server. The default should be sufficient for most models. Users who " +"train very large models might need to increase this value. Note that the " +"Flower server needs to be started with the same value (see " +"`flwr.server.start_server`), otherwise it will not know about the " +"increased limit and block larger messages." msgstr "" +"可与 Flower 服务器交换的 gRPC 信息的最大长度:默认值对大多数模型都足够了。训练超大模型的用户可能需要增加该值。请注意,Flower " +"服务器需要以相同的值启动(请参阅 `flwr.server.start_server`),否则它将不知道增加的限制并阻止更大的消息。" -#: flwr.common.message.Message.create_error_reply:5 -#: flwr.common.message.Message.create_reply:9 of +#: flwr.client.app.start_client:19 flwr.client.app.start_numpy_client:22 of msgid "" -"Time-to-live for this message in seconds. If unset, it will be set based " -"on the remaining time for the received message before it expires. This " -"follows the equation:" -msgstr "" +"The PEM-encoded root certificates as a byte string or a path string. If " +"provided, a secure connection using the certificates will be established " +"to an SSL-enabled Flower server." +msgstr "字节字符串或路径字符串形式的 PEM 编码根证书。如果提供,将使用这些证书与启用 SSL 的 Flower 服务器建立安全连接。" -#: flwr.common.message.Message.create_error_reply:9 -#: flwr.common.message.Message.create_reply:13 of -msgid "ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at)" +#: flwr.client.app.start_client:23 flwr.client.app.start_numpy_client:26 of +#, fuzzy +msgid "" +"Starts an insecure gRPC connection when True. Enables HTTPS connection " +"when False, using system certificates if `root_certificates` is None." msgstr "" +"为 True 时启动不安全的 gRPC 连接。False 时启用 HTTPS 连接,如果 `root_certificates` 为 " +"None,则使用系统证书。" -#: flwr.common.message.Message.create_reply:3 of -#, fuzzy +#: flwr.client.app.start_client:26 flwr.client.app.start_numpy_client:29 of msgid "" -"The method generates a new `Message` as a reply to this message. It " -"inherits 'run_id', 'src_node_id', 'dst_node_id', and 'message_type' from " -"this message and sets 'reply_to_message' to the ID of this message." +"Configure the transport layer. Allowed values: - 'grpc-bidi': gRPC, " +"bidirectional streaming - 'grpc-rere': gRPC, request-response " +"(experimental) - 'rest': HTTP (experimental)" msgstr "" -"该方法会生成一条新的 \"信息\",作为对该信息的回复。该方法继承了该消息的 " -"\"run_id\"、\"src_node_id\"、\"dst_node_id \"和 \"message_type\",并将 " -"\"reply_to_message \"设置为该消息的 ID。" +"配置传输层:允许的值包括 - 'grpc-bidi': gRPC,双向流 - 'grpc-rere': gRPC,请求-响应(实验性) - " +"'rest': HTTP(实验性)" -#: flwr.common.message.Message.create_reply:7 of +#: flwr.client.app.start_client:31 of #, fuzzy -msgid "The content for the reply message." -msgstr "回复信息的内容。" +msgid "" +"The maximum number of times the client will try to connect to the server " +"before giving up in case of a connection error. If set to None, there is " +"no limit to the number of tries." +msgstr "客户端在出现连接错误时放弃连接服务器的最大尝试次数。如果设置为 \"无\",则不限制尝试次数。" -#: flwr.common.message.Message.create_reply:16 of +#: flwr.client.app.start_client:35 of #, fuzzy -msgid "A new `Message` instance representing the reply." -msgstr "代表回复的新的 `Message` 实例。" +msgid "" +"The maximum duration before the client stops trying to connect to the " +"server in case of connection error. If set to None, there is no limit to " +"the total time." +msgstr "在出现连接错误时,客户端停止尝试连接服务器之前的最长持续时间。如果设置为 \"无\",则总时间没有限制。" -#: ../../source/ref-api/flwr.common.MessageType.rst:2 -#, fuzzy -msgid "MessageType" -msgstr "返回类型" +#: flwr.client.app.start_client:42 flwr.client.app.start_numpy_client:37 of +msgid "Starting a gRPC client with an insecure server connection:" +msgstr "使用不安全的服务器连接启动 gRPC 客户端:" -#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +#: flwr.client.app.start_client:49 flwr.client.app.start_numpy_client:44 of #, fuzzy -msgid ":py:obj:`EVALUATE `\\" -msgstr ":py:obj:`EVALUATE `\\" +msgid "Starting an SSL-enabled gRPC client using system certificates:" +msgstr "启动支持 SSL 的 gRPC 客户端:" -#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +#: flwr.client.app.start_client:60 flwr.client.app.start_numpy_client:52 of #, fuzzy -msgid ":py:obj:`QUERY `\\" -msgstr ":py:obj:`QUERY `\\" +msgid "Starting an SSL-enabled gRPC client using provided certificates:" +msgstr "启动支持 SSL 的 gRPC 客户端:" -#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +#: ../../source/ref-api/flwr.client.start_numpy_client.rst:2 #, fuzzy -msgid ":py:obj:`TRAIN `\\" -msgstr ":py:obj:`TRAIN `\\" +msgid "start\\_numpy\\_client" +msgstr "start_numpy_client" -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:2 +#: flwr.client.app.start_numpy_client:5 of #, fuzzy -msgid "MessageTypeLegacy" -msgstr "MessageTypeLegacy" +msgid "" +"This function is deprecated since 1.7.0. Use " +":code:`flwr.client.start_client` instead and first convert your " +":code:`NumPyClient` to type :code:`flwr.client.Client` by executing its " +":code:`to_client()` method." +msgstr "" +"自 1.7.0 起该函数已被弃用。请使用 :code:`flwr.client.start_client`,并首先通过执行 " +":code:`to_client()`方法将 :code:`NumPyClient`转换为 :code:`flwr.client.Client`。" -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 -#, fuzzy -msgid ":py:obj:`GET_PARAMETERS `\\" -msgstr ":py:obj:`GET_PARAMETERS `\\" +#: flwr.client.app.start_numpy_client:13 of +msgid "An implementation of the abstract base class `flwr.client.NumPyClient`." +msgstr "抽象基类 `flwr.client.NumPyClient` 的实现。" -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 -#, fuzzy -msgid ":py:obj:`GET_PROPERTIES `\\" -msgstr ":py:obj:`GET_PROPERTIES `\\" +#: ../../source/ref-api/flwr.common.rst:2 +msgid "common" +msgstr "常见" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.run_id:1 flwr.common.message.Metadata:3 of +#: ../../source/ref-api/flwr.common.rst:30::1 #, fuzzy -msgid "An identifier for the current run." -msgstr "当前运行的标识符。" +msgid ":py:obj:`array_from_numpy `\\ \\(ndarray\\)" +msgstr ":py:obj:`array_from_numpy `\\ \\(ndarray\\)" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.message_id:1 flwr.common.message.Metadata:5 of +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.record.conversion_utils.array_from_numpy:1 of #, fuzzy -msgid "An identifier for the current message." -msgstr "当前信息的标识符。" +msgid "Create Array from NumPy ndarray." +msgstr "将参数对象转换为 NumPy ndarrays。" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.src_node_id:1 flwr.common.message.Metadata:7 of +#: ../../source/ref-api/flwr.common.rst:30::1 #, fuzzy -msgid "An identifier for the node sending this message." -msgstr "发送此信息的节点的标识符。" - -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.dst_node_id:1 flwr.common.message.Metadata:9 of -#, fuzzy -msgid "An identifier for the node receiving this message." -msgstr "接收此信息的节点的标识符。" +msgid ":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" +msgstr ":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.reply_to_message:1 flwr.common.message.Metadata:11 of -#, fuzzy -msgid "An identifier for the message this message replies to." -msgstr "该信息回复的信息的标识符。" +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.bytes_to_ndarray:1 of +msgid "Deserialize NumPy ndarray from bytes." +msgstr "从字节反序列化 NumPy ndarray。" -#: flwr.common.message.Metadata:13 of +#: ../../source/ref-api/flwr.common.rst:30::1 #, fuzzy msgid "" -"An identifier for grouping messages. In some settings, this is used as " -"the FL round." -msgstr "用于分组报文的标识符。在某些设置中,它被用作 FL 轮。" +":py:obj:`configure `\\ \\(identifier\\[\\, " +"filename\\, host\\]\\)" +msgstr "" +":py:obj:`configure `\\ \\(identifier\\[\\, " +"filename\\, host\\]\\)" -#: flwr.common.message.Metadata:16 of +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.logger.configure:1 of +msgid "Configure logging to file and/or remote log server." +msgstr "配置将日志记录到文件和/或远程日志服务器。" + +#: ../../source/ref-api/flwr.common.rst:30::1 #, fuzzy -msgid "Time-to-live for this message in seconds." -msgstr "该信息的有效时间。" +msgid "" +":py:obj:`event `\\ \\(event\\_type\\[\\, " +"event\\_details\\]\\)" +msgstr "" +":py:obj:`event `\\ \\(event\\_type\\[\\, " +"event\\_details\\]\\)" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.message_type:1 flwr.common.message.Metadata:18 of +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.telemetry.event:1 of #, fuzzy -msgid "A string that encodes the action to be executed on the receiving end." -msgstr "编码接收端要执行的操作的字符串。" +msgid "Submit create_event to ThreadPoolExecutor to avoid blocking." +msgstr "将 create_event 提交给 ThreadPoolExecutor 以避免阻塞。" -#: flwr.common.message.Metadata:21 of +#: ../../source/ref-api/flwr.common.rst:30::1 #, fuzzy msgid "" -"An identifier that can be used when loading a particular data partition " -"for a ClientApp. Making use of this identifier is more relevant when " -"conducting simulations." -msgstr "为 ClientApp 加载特定数据分区时可使用的标识符。在进行模拟时,使用该标识符更有意义。" +":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " +"\\*\\*kwargs\\)" +msgstr "" +":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " +"\\*\\*kwargs\\)" -#: flwr.common.Metadata.created_at:1::1 of +#: ../../source/ref-api/flwr.common.rst:30::1 logging.Logger.log:1 +#: of +msgid "Log 'msg % args' with the integer severity 'level'." +msgstr "以整数严重性 \"级别 \"记录 \"msg % args\"。" + +#: ../../source/ref-api/flwr.common.rst:30::1 #, fuzzy -msgid ":py:obj:`created_at `\\" -msgstr ":py:obj:`ttl `\\" +msgid ":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" +msgstr ":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" -#: flwr.common.Metadata.created_at:1 -#: flwr.common.Metadata.created_at:1::1 of -msgid "Unix timestamp when the message was created." -msgstr "" +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.ndarray_to_bytes:1 of +msgid "Serialize NumPy ndarray to bytes." +msgstr "将 NumPy ndarray 序列化为字节。" -#: flwr.common.Metadata.created_at:1::1 of +#: ../../source/ref-api/flwr.common.rst:30::1 #, fuzzy -msgid ":py:obj:`dst_node_id `\\" -msgstr ":py:obj:`dst_node_id `\\" +msgid "" +":py:obj:`ndarrays_to_parameters `\\ " +"\\(ndarrays\\)" +msgstr "" +":py:obj:`ndarrays_to_parameters `\\ " +"\\(ndarrays\\)" -#: flwr.common.Metadata.created_at:1::1 of -#, fuzzy -msgid ":py:obj:`group_id `\\" -msgstr ":py:obj:`group_id `\\" +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.ndarrays_to_parameters:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarrays_to_parameters:1 +#: of +msgid "Convert NumPy ndarrays to parameters object." +msgstr "将 NumPy ndarrays 转换为参数对象。" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.group_id:1 of +#: ../../source/ref-api/flwr.common.rst:30::1 #, fuzzy -msgid "An identifier for grouping messages." -msgstr "用于分组信息的标识符。" +msgid ":py:obj:`now `\\ \\(\\)" +msgstr ":py:obj:`now `\\ \\(\\)" -#: flwr.common.Metadata.created_at:1::1 of -#, fuzzy -msgid ":py:obj:`message_id `\\" -msgstr ":py:obj:`message_id `\\" +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.date.now:1 of +msgid "Construct a datetime from time.time() with time zone set to UTC." +msgstr "从 time.time() 生成日期时间,时区设置为 UTC。" -#: flwr.common.Metadata.created_at:1::1 of +#: ../../source/ref-api/flwr.common.rst:30::1 #, fuzzy -msgid ":py:obj:`message_type `\\" -msgstr ":py:obj:`message_type `\\" +msgid "" +":py:obj:`parameters_to_ndarrays `\\ " +"\\(parameters\\)" +msgstr "" +":py:obj:`parameters_to_ndarrays `\\ " +"\\(parameters\\)" -#: flwr.common.Metadata.created_at:1::1 of -#, fuzzy -msgid ":py:obj:`partition_id `\\" -msgstr ":py:obj:`partition_id `\\" +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.parameters_to_ndarrays:1 of +msgid "Convert parameters object to NumPy ndarrays." +msgstr "将参数对象转换为 NumPy ndarrays。" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.partition_id:1 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "An identifier telling which data partition a ClientApp should use." -msgstr "告诉 ClientApp 应使用哪个数据分区的标识符。" +msgid "" +":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, " +"data\\)" +msgstr "" +":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, " +"data\\)" -#: flwr.common.Metadata.created_at:1::1 of +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.parametersrecord.Array:1 of #, fuzzy -msgid ":py:obj:`reply_to_message `\\" -msgstr ":py:obj:`reply_to_message `\\" +msgid "Array type." +msgstr "返回类型" -#: flwr.common.Metadata.created_at:1::1 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid ":py:obj:`run_id `\\" -msgstr ":py:obj:`run_id `\\" +msgid "" +":py:obj:`ClientMessage `\\ " +"\\(\\[get\\_properties\\_res\\, ...\\]\\)" +msgstr "" +":py:obj:`ClientMessage `\\ " +"\\(\\[get\\_properties\\_res\\, ...\\]\\)" -#: flwr.common.Metadata.created_at:1::1 of -#, fuzzy -msgid ":py:obj:`src_node_id `\\" -msgstr ":py:obj:`src_node_id `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.ClientMessage:1 of +msgid "ClientMessage is a container used to hold one result message." +msgstr "ClientMessage 是用于容纳一条结果信息的容器。" -#: flwr.common.Metadata.created_at:1::1 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid ":py:obj:`ttl `\\" -msgstr ":py:obj:`ttl `\\" +msgid ":py:obj:`Code `\\ \\(value\\)" +msgstr ":py:obj:`Code `\\ \\(value\\)" -#: flwr.common.Metadata.created_at:1::1 flwr.common.Metadata.ttl:1 -#: of -#, fuzzy -msgid "Time-to-live for this message." -msgstr "该信息的有效时间。" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.Code:1 of +msgid "Client status codes." +msgstr "客户端状态代码。" -#: ../../source/ref-api/flwr.common.MetricsRecord.rst:2 +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "MetricsRecord" -msgstr "MetricsRecord" +msgid ":py:obj:`Config `\\" +msgstr ":py:obj:`config `\\" -#: flwr.common.record.metricsrecord.MetricsRecord:1 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy msgid "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -"[:py:class:`str`, :py:class:`int` | :py:class:`float` | " -":py:class:`~typing.List`\\ [:py:class:`int`] | :py:class:`~typing.List`\\" -" [:py:class:`float`]]" +"alias of :py:class:`dict`\\ [:py:class:`str`, :py:class:`bool` | " +":py:class:`bytes` | :py:class:`float` | :py:class:`int` | " +":py:class:`str`]" msgstr "" "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " "[:py:class:`str`, :py:obj:`~typing.Union`\\ [:py:class:`int`, " ":py:class:`float`, :py:class:`~typing.List`\\ [:py:class:`int`], " ":py:class:`~typing.List`\\ [:py:class:`float`]]]" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -#, fuzzy -msgid ":py:obj:`clear `\\ \\(\\)" -msgstr ":py:obj:`clear `\\ \\(\\)" - -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -#, fuzzy -msgid ":py:obj:`count_bytes `\\ \\(\\)" -msgstr ":py:obj:`count_bytes `\\ \\(\\)" - -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -#, fuzzy -msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" - -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -#, fuzzy -msgid ":py:obj:`items `\\ \\(\\)" -msgstr ":py:obj:`items `\\ \\(\\)" - -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid ":py:obj:`keys `\\ \\(\\)" -msgstr ":py:obj:`keys `\\ \\(\\)" +msgid "" +":py:obj:`ConfigsRecord `\\ " +"\\(\\[configs\\_dict\\, keep\\_input\\]\\)" +msgstr "" +"Flower 1.0: ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.configsrecord.ConfigsRecord:1 of #, fuzzy -msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" -msgstr ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgid "Configs record." +msgstr "配置日志记录" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy msgid "" -":py:obj:`update `\\ \\(\\[E\\, " -"\\]\\*\\*F\\)" -msgstr "" -":py:obj:`update `\\ \\(\\[E\\, " -"\\]\\*\\*F\\)" +":py:obj:`Context `\\ \\(node\\_id\\, " +"node\\_config\\, state\\, run\\_config\\)" +msgstr ":py:obj:`Context `\\ \\(state\\)" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.context.Context:1 of #, fuzzy -msgid ":py:obj:`values `\\ \\(\\)" -msgstr ":py:obj:`values `\\ \\(\\)" +msgid "Context of your run." +msgstr "您的运行状态。" -#: ../../source/ref-api/flwr.common.NDArray.rst:2 +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "NDArray" -msgstr "NDArray" +msgid ":py:obj:`DisconnectRes `\\ \\(reason\\)" +msgstr ":py:obj:`DisconnectRes `\\ \\(reason\\)" -#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 -#, fuzzy -msgid ":py:obj:`tensors `\\" -msgstr ":py:obj:`tensors `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.DisconnectRes:1 of +msgid "DisconnectRes message from client to server." +msgstr "客户端向服务器发送 DisconnectRes 信息。" -#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid ":py:obj:`tensor_type `\\" -msgstr ":py:obj:`tensor_type `\\" +msgid ":py:obj:`Error `\\ \\(code\\[\\, reason\\]\\)" +msgstr ":py:obj:`Error `\\ \\(code\\[\\, reason\\]\\)" -#: ../../source/ref-api/flwr.common.ParametersRecord.rst:2 +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.message.Error:1 of #, fuzzy -msgid "ParametersRecord" -msgstr "参数" +msgid "A dataclass that stores information about an error that occurred." +msgstr "数据类,用于存储所发生错误的相关信息。" -#: flwr.common.record.parametersrecord.ParametersRecord:1 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy msgid "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -"[:py:class:`str`, :py:class:`~flwr.common.record.parametersrecord.Array`]" +":py:obj:`EvaluateIns `\\ \\(parameters\\, " +"config\\)" msgstr "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -"[:py:class:`str`, :py:class:`~flwr.common.record.parametersrecord.Array`]" +":py:obj:`EvaluateIns `\\ \\(parameters\\, " +"config\\)" -#: flwr.common.record.parametersrecord.ParametersRecord:3 of +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.EvaluateIns:1 of +msgid "Evaluate instructions for a client." +msgstr "评估客户端的指示。" + +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy msgid "" -"A dataclass storing named Arrays in order. This means that it holds " -"entries as an OrderedDict[str, Array]. ParametersRecord objects can be " -"viewed as an equivalent to PyTorch's state_dict, but holding serialised " -"tensors instead." +":py:obj:`EvaluateRes `\\ \\(status\\, loss\\, " +"num\\_examples\\, metrics\\)" msgstr "" -"按顺序存储命名数组的数据类。这意味着它以 OrderedDict[str, Array] 的形式保存条目。ParametersRecord " -"对象相当于 PyTorch 的 state_dict,但它保存的是序列化的张量。" +":py:obj:`EvaluateRes `\\ \\(status\\, loss\\, " +"num\\_examples\\, metrics\\)" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -#, fuzzy -msgid ":py:obj:`clear `\\ \\(\\)" -msgstr ":py:obj:`clear `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.EvaluateRes:1 of +msgid "Evaluate response from a client." +msgstr "评估客户端的反应。" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid ":py:obj:`count_bytes `\\ \\(\\)" -msgstr ":py:obj:`count_bytes `\\ \\(\\)" +msgid ":py:obj:`EventType `\\ \\(value\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -#, fuzzy -msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.telemetry.EventType:1 of +msgid "Types of telemetry events." +msgstr "遥测事件类型。" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid ":py:obj:`items `\\ \\(\\)" -msgstr ":py:obj:`items `\\ \\(\\)" +msgid ":py:obj:`FitIns `\\ \\(parameters\\, config\\)" +msgstr ":py:obj:`FitIns `\\ \\(parameters\\, config\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.FitIns:1 of +msgid "Fit instructions for a client." +msgstr "为客户提供安装说明。" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid ":py:obj:`keys `\\ \\(\\)" -msgstr ":py:obj:`keys `\\ \\(\\)" +msgid "" +":py:obj:`FitRes `\\ \\(status\\, parameters\\, " +"num\\_examples\\, metrics\\)" +msgstr "" +":py:obj:`FitRes `\\ \\(status\\, parameters\\, " +"num\\_examples\\, metrics\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.FitRes:1 of +msgid "Fit response from a client." +msgstr "来自客户端的合适回复。" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" -msgstr ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgid ":py:obj:`GetParametersIns `\\ \\(config\\)" +msgstr ":py:obj:`GetParametersIns `\\ \\(config\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetParametersIns:1 of +msgid "Parameters request for a client." +msgstr "客户端的参数请求。" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy msgid "" -":py:obj:`update `\\ \\(\\[E\\, " -"\\]\\*\\*F\\)" +":py:obj:`GetParametersRes `\\ \\(status\\, " +"parameters\\)" msgstr "" -":py:obj:`update `\\ \\(\\[E\\, " -"\\]\\*\\*F\\)" +":py:obj:`GetParametersRes `\\ \\(status\\, " +"parameters\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetParametersRes:1 of +msgid "Response when asked to return parameters." +msgstr "要求返回参数时的响应。" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid ":py:obj:`values `\\ \\(\\)" -msgstr ":py:obj:`values `\\ \\(\\)" +msgid ":py:obj:`GetPropertiesIns `\\ \\(config\\)" +msgstr ":py:obj:`GetPropertiesIns `\\ \\(config\\)" -#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:3 of +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetPropertiesIns:1 of +msgid "Properties request for a client." +msgstr "客户端的属性请求。" + +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy msgid "" -"Note that a small amount of Bytes might also be included in this counting" -" that correspond to metadata of the serialized object (e.g. of NumPy " -"array) needed for deseralization." -msgstr "请注意,该计数中还可能包含少量字节,这些字节与序列化对象(如 NumPy 数组)的元数据相对应,需要进行去eralization。" +":py:obj:`GetPropertiesRes `\\ \\(status\\, " +"properties\\)" +msgstr "" +":py:obj:`GetPropertiesRes `\\ \\(status\\, " +"properties\\)" -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:2 +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetPropertiesRes:1 of +msgid "Properties response from a client." +msgstr "来自客户端的属性响应。" + +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "ReconnectIns" -msgstr "启用 SSL 连接" +msgid "" +":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " +"error\\]\\)" +msgstr "" +":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " +"error\\]\\)" -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:28::1 +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.message.Message:1 of #, fuzzy -msgid ":py:obj:`seconds `\\" -msgstr ":py:obj:`seconds `\\" +msgid "State of your application from the viewpoint of the entity using it." +msgstr "从使用实体的角度看应用程序的状态。" -#: ../../source/ref-api/flwr.common.RecordSet.rst:2 +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "RecordSet" -msgstr "RecordSet" +msgid ":py:obj:`MessageType `\\ \\(\\)" +msgstr ":py:obj:`MessageType `\\ \\(\\)" -#: flwr.common.RecordSet.configs_records:1::1 of +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.constant.MessageType:1 of #, fuzzy -msgid ":py:obj:`configs_records `\\" -msgstr ":py:obj:`configs_records `\\" +msgid "Message type." +msgstr "信息类型。" -#: flwr.common.RecordSet.configs_records:1 -#: flwr.common.RecordSet.configs_records:1::1 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "Dictionary holding ConfigsRecord instances." -msgstr "包含 ConfigsRecord 实例的字典。" +msgid ":py:obj:`MessageTypeLegacy `\\ \\(\\)" +msgstr ":py:obj:`MessageTypeLegacy `\\ \\(\\)" -#: flwr.common.RecordSet.configs_records:1::1 of +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.constant.MessageTypeLegacy:1 of #, fuzzy -msgid ":py:obj:`metrics_records `\\" -msgstr ":py:obj:`metrics_records `\\" +msgid "Legacy message type." +msgstr "传统信息类型。" -#: flwr.common.RecordSet.configs_records:1::1 -#: flwr.common.RecordSet.metrics_records:1 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "Dictionary holding MetricsRecord instances." -msgstr "保存 MetricsRecord 实例的字典。" +msgid "" +":py:obj:`Metadata `\\ \\(run\\_id\\, " +"message\\_id\\, src\\_node\\_id\\, ...\\)" +msgstr "" +":py:obj:`Metadata `\\ \\(run\\_id\\, " +"message\\_id\\, src\\_node\\_id\\, ...\\)" -#: flwr.common.RecordSet.configs_records:1::1 of +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.message.Metadata:1 of #, fuzzy -msgid ":py:obj:`parameters_records `\\" -msgstr ":py:obj:`parameters_records `\\" +msgid "A dataclass holding metadata associated with the current message." +msgstr "数据类型,包含与当前报文相关的元数据。" -#: flwr.common.RecordSet.configs_records:1::1 -#: flwr.common.RecordSet.parameters_records:1 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "Dictionary holding ParametersRecord instances." -msgstr "存放 ParametersRecord 实例的字典。" +msgid ":py:obj:`Metrics `\\" +msgstr ":py:obj:`metrics `\\" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:2 +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "ServerMessage" -msgstr "服务器端" +msgid "" +":py:obj:`MetricsRecord `\\ " +"\\(\\[metrics\\_dict\\, keep\\_input\\]\\)" +msgstr "" +":py:obj:`MetricsRecord `\\ " +"\\(\\[metrics\\_dict\\, keep\\_input\\]\\)" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.metricsrecord.MetricsRecord:1 of #, fuzzy -msgid ":py:obj:`evaluate_ins `\\" -msgstr ":py:obj:`evaluate_ins `\\" +msgid "Metrics recod." +msgstr "指标记录。" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid ":py:obj:`fit_ins `\\" -msgstr ":py:obj:`fit_ins `\\" +msgid ":py:obj:`NDArray `\\" +msgstr ":py:obj:`NDArray `\\" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy msgid "" -":py:obj:`get_parameters_ins " -"`\\" +"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " +":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" msgstr "" -":py:obj:`get_parameters_ins " -"`\\" +"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " +":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +#: ../../source/ref-api/flwr.common.rst:68::1 +#, fuzzy +msgid ":py:obj:`NDArrays `\\" +msgstr ":py:obj:`NDArray `\\" + +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy msgid "" -":py:obj:`get_properties_ins " -"`\\" +"alias of :py:class:`list`\\ [:py:class:`~numpy.ndarray`\\ " +"[:py:obj:`~typing.Any`, :py:class:`~numpy.dtype`\\ " +"[:py:obj:`~typing.Any`]]]" msgstr "" -":py:obj:`get_properties_ins " -"`\\" +"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " +":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" -#: ../../source/ref-api/flwr.common.Status.rst:2 +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "Status" -msgstr "客户端状态。" +msgid "" +":py:obj:`Parameters `\\ \\(tensors\\, " +"tensor\\_type\\)" +msgstr "" +":py:obj:`Parameters `\\ \\(tensors\\, " +"tensor\\_type\\)" -#: ../../source/ref-api/flwr.common.Status.rst:29::1 -#, fuzzy -msgid ":py:obj:`code `\\" -msgstr ":py:obj:`code `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.Parameters:1 of +msgid "Model parameters." +msgstr "模型参数。" -#: ../../source/ref-api/flwr.common.Status.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid ":py:obj:`message `\\" -msgstr ":py:obj:`message `\\" +msgid "" +":py:obj:`ParametersRecord `\\ " +"\\(\\[array\\_dict\\, keep\\_input\\]\\)" +msgstr "" +":py:obj:`ParametersRecord `\\ " +"\\(\\[array\\_dict\\, keep\\_input\\]\\)" -#: ../../source/ref-api/flwr.common.array_from_numpy.rst:2 +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.parametersrecord.ParametersRecord:1 of #, fuzzy -msgid "array\\_from\\_numpy" -msgstr "array\\_from\\_numpy" +msgid "Parameters record." +msgstr "参数" -#: ../../source/ref-api/flwr.common.bytes_to_ndarray.rst:2 +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "bytes\\_to\\_ndarray" -msgstr "bytes\\_to\\_ndarray" +msgid ":py:obj:`Properties `\\" +msgstr ":py:obj:`properties `\\" -#: ../../source/ref-api/flwr.common.configure.rst:2 +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "configure" -msgstr "配置日志记录" +msgid ":py:obj:`ReconnectIns `\\ \\(seconds\\)" +msgstr ":py:obj:`ReconnectIns `\\ \\(seconds\\)" -#: ../../source/ref-api/flwr.common.event.rst:2 -#, fuzzy -msgid "event" -msgstr "事件" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.ReconnectIns:1 of +msgid "ReconnectIns message from server to client." +msgstr "服务器发送给客户端的重新连接信息。" -#: ../../source/ref-api/flwr.common.log.rst:2 +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "log" -msgstr "登录" - -#: logging.Logger.log:3 of msgid "" -"To pass exception information, use the keyword argument exc_info with a " -"true value, e.g." -msgstr "要传递异常信息,请使用带 true 值的关键字参数 exc_info,例如。" - -#: logging.Logger.log:6 of -#, python-format -msgid "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" -msgstr "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" +":py:obj:`RecordSet `\\ " +"\\(\\[parameters\\_records\\, ...\\]\\)" +msgstr "" +":py:obj:`RecordSet `\\ " +"\\(\\[parameters\\_records\\, ...\\]\\)" -#: ../../source/ref-api/flwr.common.ndarray_to_bytes.rst:2 +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.recordset.RecordSet:1 of #, fuzzy -msgid "ndarray\\_to\\_bytes" -msgstr "ndarray\\_to\\_bytes" +msgid "RecordSet stores groups of parameters, metrics and configs." +msgstr "RecordSet 可存储参数、指标和配置组。" -#: ../../source/ref-api/flwr.common.ndarrays_to_parameters.rst:2 +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "ndarrays\\_to\\_parameters" -msgstr "ndarrays\\_to\\_parameters" +msgid "" +":py:obj:`ServerMessage `\\ " +"\\(\\[get\\_properties\\_ins\\, ...\\]\\)" +msgstr "" +":py:obj:`ServerMessage `\\ " +"\\(\\[get\\_properties\\_ins\\, ...\\]\\)" -#: ../../source/ref-api/flwr.common.now.rst:2 -#, fuzzy -msgid "now" -msgstr "现在" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.ServerMessage:1 of +msgid "ServerMessage is a container used to hold one instruction message." +msgstr "ServerMessage 是用于容纳一条指令信息的容器。" -#: ../../source/ref-api/flwr.common.parameters_to_ndarrays.rst:2 +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "parameters\\_to\\_ndarrays" -msgstr "parameters\\_to\\_ndarrays" +msgid ":py:obj:`Status `\\ \\(code\\, message\\)" +msgstr ":py:obj:`Status `\\ \\(code\\, message\\)" -#: ../../source/ref-api/flwr.server.rst:2 -msgid "server" -msgstr "服务器" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.Status:1 of +msgid "Client status." +msgstr "客户端状态。" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.common.Array.rst:2 #, fuzzy -msgid ":py:obj:`run_driver_api `\\ \\(\\)" -msgstr ":py:obj:`run_driver_api `\\ \\(\\)" +msgid "Array" +msgstr "数组" -#: ../../source/ref-api/flwr.server.rst:26::1 -#: flwr.server.app.run_driver_api:1 of +#: flwr.common.record.parametersrecord.Array:3 of #, fuzzy -msgid "Run Flower server (Driver API)." -msgstr "flower-driver-api" +msgid "" +"A dataclass containing serialized data from an array-like or tensor-like " +"object along with some metadata about it." +msgstr "数据类,包含数组类或张量类对象的序列化数据以及相关元数据。" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: flwr.common.record.parametersrecord.Array:6 of #, fuzzy -msgid ":py:obj:`run_fleet_api `\\ \\(\\)" -msgstr ":py:obj:`run_fleet_api `\\ \\(\\)" +msgid "" +"A string representing the data type of the serialised object (e.g. " +"`np.float32`)" +msgstr "表示序列化对象数据类型的字符串(例如 `np.float32`)" -#: ../../source/ref-api/flwr.server.rst:26::1 -#: flwr.server.app.run_fleet_api:1 of +#: flwr.common.record.parametersrecord.Array:8 of #, fuzzy -msgid "Run Flower server (Fleet API)." -msgstr "Flower 服务器。" +msgid "" +"A list representing the shape of the unserialized array-like object. This" +" is used to deserialize the data (depending on the serialization method) " +"or simply as a metadata field." +msgstr "代表未序列化数组对象形状的列表。它可用于反序列化数据(取决于序列化方法),或仅作为元数据字段使用。" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: flwr.common.record.parametersrecord.Array:12 of #, fuzzy -msgid ":py:obj:`run_server_app `\\ \\(\\)" -msgstr ":py:obj:`run_server_app `\\ \\(\\)" +msgid "" +"A string indicating the type of serialisation mechanism used to generate " +"the bytes in `data` from an array-like or tensor-like object." +msgstr "表示序列化机制类型的字符串,用于从类似数组或类似张量的对象中生成 `data` 中的字节。" -#: ../../source/ref-api/flwr.server.rst:26::1 -#: flwr.server.run_serverapp.run_server_app:1 of +#: flwr.common.record.parametersrecord.Array:15 of #, fuzzy -msgid "Run Flower server app." -msgstr "Flower 服务器。" +msgid "A buffer of bytes containing the data." +msgstr "包含数据的字节缓冲区。" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: ../../source/ref-api/flwr.common.Array.rst:26::1 #, fuzzy -msgid ":py:obj:`run_superlink `\\ \\(\\)" -msgstr ":py:obj:`run_superlink `\\ \\(\\)" +msgid ":py:obj:`numpy `\\ \\(\\)" +msgstr "server.strategy.Strategy" -#: ../../source/ref-api/flwr.server.rst:26::1 -#: flwr.server.app.run_superlink:1 of +#: ../../source/ref-api/flwr.common.Array.rst:26::1 +#: flwr.common.record.parametersrecord.Array.numpy:1 of #, fuzzy -msgid "Run Flower SuperLink (Driver API and Fleet API)." -msgstr "运行 Flower 服务器(Driver API 和 Fleet API)。" +msgid "Return the array as a NumPy array." +msgstr "以 NumPy ndarrays 列表形式返回模型参数" -#: ../../source/ref-api/flwr.server.rst:26::1 +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of #, fuzzy -msgid "" -":py:obj:`start_server `\\ \\(\\*\\[\\, " -"server\\_address\\, server\\, ...\\]\\)" -msgstr "" -":py:obj:`start_server `\\ \\(\\*\\[\\, " -"server\\_address\\, server\\, ...\\]\\)" +msgid ":py:obj:`dtype `\\" +msgstr ":py:obj:`dtype `\\" -#: ../../source/ref-api/flwr.server.rst:26::1 -#: flwr.server.app.start_server:1 of -msgid "Start a Flower server using the gRPC transport layer." -msgstr "使用 gRPC 传输层启动 Flower 服务器。" +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +#, fuzzy +msgid ":py:obj:`shape `\\" +msgstr "server.strategy.Strategy" -#: ../../source/ref-api/flwr.server.rst:40::1 +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of #, fuzzy -msgid ":py:obj:`ClientManager `\\ \\(\\)" -msgstr ":py:obj:`ClientManager `\\ \\(\\)" +msgid ":py:obj:`stype `\\" +msgstr "server.strategy.Strategy" -#: ../../source/ref-api/flwr.server.rst:40::1 -#: flwr.server.client_manager.ClientManager:1 of +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of #, fuzzy -msgid "Abstract base class for managing Flower clients." -msgstr "Flower 客户端的抽象基类。" +msgid ":py:obj:`data `\\" +msgstr ":py:obj:`data `\\" -#: ../../source/ref-api/flwr.server.rst:40::1 +#: ../../source/ref-api/flwr.common.ClientMessage.rst:2 #, fuzzy -msgid ":py:obj:`Driver `\\ \\(\\)" -msgstr ":py:obj:`run_driver_api `\\ \\(\\)" +msgid "ClientMessage" +msgstr "客户端" -#: ../../source/ref-api/flwr.server.rst:40::1 -#: flwr.server.driver.driver.Driver:1 of +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 #, fuzzy -msgid "Abstract base Driver class for the Driver API." -msgstr "Flower 客户端的抽象基类。" +msgid ":py:obj:`evaluate_res `\\" +msgstr ":py:obj:`evaluate_res `\\" -#: ../../source/ref-api/flwr.server.rst:40::1 +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 #, fuzzy -msgid ":py:obj:`History `\\ \\(\\)" -msgstr ":py:obj:`History `\\ \\(\\)" +msgid ":py:obj:`fit_res `\\" +msgstr ":py:obj:`fit_res `\\" -#: ../../source/ref-api/flwr.server.rst:40::1 -#: flwr.server.history.History:1 of +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 #, fuzzy -msgid "History class for training and/or evaluation metrics collection." -msgstr "**hist** -- 包含训练和评估指标的对象。" +msgid "" +":py:obj:`get_parameters_res " +"`\\" +msgstr "" +":py:obj:`get_parameters_res " +"`\\" -#: ../../source/ref-api/flwr.server.rst:40::1 +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 #, fuzzy msgid "" -":py:obj:`LegacyContext `\\ \\(state\\[\\, " -"config\\, strategy\\, ...\\]\\)" +":py:obj:`get_properties_res " +"`\\" msgstr "" -":py:obj:`LegacyContext `\\ \\(state\\[\\, " -"config\\, strategy\\, ...\\]\\)" +":py:obj:`get_properties_res " +"`\\" -#: ../../source/ref-api/flwr.server.rst:40::1 -#: flwr.server.compat.legacy_context.LegacyContext:1 of +#: ../../source/ref-api/flwr.common.Code.rst:2 #, fuzzy -msgid "Legacy Context." -msgstr "传承背景。" +msgid "Code" +msgstr "代码" + +#: flwr.common.typing.Code:1 of +#, fuzzy +msgid "Bases: :py:class:`~enum.Enum`" +msgstr "Bases: :py:class:`~enum.Enum`" + +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +#, fuzzy +msgid ":py:obj:`OK `\\" +msgstr ":py:obj:`OK `\\" -#: ../../source/ref-api/flwr.server.rst:40::1 +#: ../../source/ref-api/flwr.common.Code.rst:26::1 #, fuzzy msgid "" -":py:obj:`Server `\\ \\(\\*\\, client\\_manager\\[\\, " -"strategy\\]\\)" +":py:obj:`GET_PROPERTIES_NOT_IMPLEMENTED " +"`\\" msgstr "" -":py:obj:`Server `\\ \\(\\*\\, client\\_manager\\[\\, " -"strategy\\]\\)" +":py:obj:`GET_PROPERTIES_NOT_IMPLEMENTED " +"`\\" -#: ../../source/ref-api/flwr.server.rst:40::1 +#: ../../source/ref-api/flwr.common.Code.rst:26::1 #, fuzzy msgid "" -":py:obj:`ServerApp `\\ \\(\\[server\\, config\\, " -"strategy\\, ...\\]\\)" -msgstr "server.strategy.Strategy" +":py:obj:`GET_PARAMETERS_NOT_IMPLEMENTED " +"`\\" +msgstr "" +":py:obj:`GET_PARAMETERS_NOT_IMPLEMENTED " +"`\\" -#: ../../source/ref-api/flwr.server.rst:40::1 -#: flwr.server.server_app.ServerApp:1 of +#: ../../source/ref-api/flwr.common.Code.rst:26::1 #, fuzzy -msgid "Flower ServerApp." -msgstr "Flower 服务器。" +msgid ":py:obj:`FIT_NOT_IMPLEMENTED `\\" +msgstr ":py:obj:`FIT_NOT_IMPLEMENTED `\\" -#: ../../source/ref-api/flwr.server.rst:40::1 +#: ../../source/ref-api/flwr.common.Code.rst:26::1 #, fuzzy msgid "" -":py:obj:`ServerConfig `\\ \\(\\[num\\_rounds\\," -" round\\_timeout\\]\\)" +":py:obj:`EVALUATE_NOT_IMPLEMENTED " +"`\\" msgstr "" -"Flower 1.0: ``start_server(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" +":py:obj:`EVALUATE_NOT_IMPLEMENTED " +"`\\" -#: ../../source/ref-api/flwr.server.rst:40::1 -#: flwr.server.server_config.ServerConfig:1 of +#: ../../source/ref-api/flwr.common.Config.rst:2 #, fuzzy -msgid "Flower server config." -msgstr "Flower 服务器。" +msgid "Config" +msgstr "配置日志记录" -#: ../../source/ref-api/flwr.server.rst:40::1 +#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:2 #, fuzzy -msgid ":py:obj:`SimpleClientManager `\\ \\(\\)" -msgstr ":py:obj:`SimpleClientManager `\\ \\(\\)" +msgid "ConfigsRecord" +msgstr "配置日志记录" -#: ../../source/ref-api/flwr.server.rst:40::1 -#: flwr.server.client_manager.SimpleClientManager:1 of +#: flwr.common.record.configsrecord.ConfigsRecord:1 of #, fuzzy -msgid "Provides a pool of available clients." -msgstr "使用部分可用客户进行评估。" +msgid "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`int` | :py:class:`float` | :py:class:`str` |" +" :py:class:`bytes` | :py:class:`bool` | :py:class:`list`\\ " +"[:py:class:`int`] | :py:class:`list`\\ [:py:class:`float`] | " +":py:class:`list`\\ [:py:class:`str`] | :py:class:`list`\\ " +"[:py:class:`bytes`] | :py:class:`list`\\ [:py:class:`bool`]]" +msgstr "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:obj:`~typing.Union`\\ [:py:class:`int`, " +":py:class:`float`, :py:class:`str`, :py:class:`bytes`, :py:class:`bool`, " +":py:class:`~typing.List`\\ [:py:class:`int`], :py:class:`~typing.List`\\ " +"[:py:class:`float`], :py:class:`~typing.List`\\ [:py:class:`str`], " +":py:class:`~typing.List`\\ [:py:class:`bytes`], " +":py:class:`~typing.List`\\ [:py:class:`bool`]]]" -#: ../../source/ref-api/flwr.server.rst:59::1 -#, fuzzy -msgid ":py:obj:`flwr.server.strategy `\\" -msgstr "server.strategy.Strategy" +#: flwr.common.record.configsrecord.ConfigsRecord:3 of +msgid "" +"A :code:`ConfigsRecord` is a Python dictionary designed to ensure that " +"each key-value pair adheres to specified data types. A " +":code:`ConfigsRecord` is one of the types of records that a " +"`flwr.common.RecordSet `_ supports " +"and can therefore be used to construct :code:`common.Message` objects." +msgstr "" -#: ../../source/ref-api/flwr.server.rst:59::1 -#: flwr.server.strategy:1 of -msgid "Contains the strategy abstraction and different implementations." -msgstr "包含策略抽象和不同的实现方法。" +#: flwr.common.record.configsrecord.ConfigsRecord:9 of +msgid "" +"A dictionary that stores basic types (i.e. `str`, `int`, `float`, `bytes`" +" as defined in `ConfigsScalar`) and lists of such types (see " +"`ConfigsScalarList`)." +msgstr "" -#: ../../source/ref-api/flwr.server.rst:59::1 -#, fuzzy -msgid ":py:obj:`flwr.server.workflow `\\" -msgstr "server.strategy.Strategy" +#: flwr.common.record.configsrecord.ConfigsRecord:13 of +msgid "" +"A boolean indicating whether config passed should be deleted from the " +"input dictionary immediately after adding them to the record. When set to" +" True, the data is duplicated in memory. If memory is a concern, set it " +"to False." +msgstr "" -#: ../../source/ref-api/flwr.server.rst:59::1 -#: flwr.server.workflow:1 of +#: flwr.common.record.configsrecord.ConfigsRecord:21 of +msgid "" +"The usage of a :code:`ConfigsRecord` is envisioned for sending " +"configuration values telling the target node how to perform a certain " +"action (e.g. train/evaluate a model ). You can use standard Python built-" +"in types such as :code:`float`, :code:`str` , :code:`bytes`. All types " +"allowed are defined in :code:`flwr.common.ConfigsRecordValues`. While " +"lists are supported, we encourage you to use a :code:`ParametersRecord` " +"instead if these are of high dimensionality." +msgstr "" + +#: flwr.common.record.configsrecord.ConfigsRecord:29 of +msgid "" +"Let's see some examples of how to construct a :code:`ConfigsRecord` from " +"scratch:" +msgstr "" + +#: flwr.common.record.configsrecord.ConfigsRecord:42 of +msgid "" +"Just like the other types of records in a :code:`flwr.common.RecordSet`, " +"types are enforced. If you need to add a custom data structure or object," +" we recommend to serialise it into bytes and save it as such (bytes are " +"allowed in a :code:`ConfigsRecord`)" +msgstr "" + +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "Workflows." -msgstr "工作流程" +msgid ":py:obj:`clear `\\ \\(\\)" +msgstr ":py:obj:`clear `\\ \\(\\)" -#: ../../source/ref-api/flwr.server.ClientManager.rst:2 +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "ClientManager" -msgstr "客户端" +msgid ":py:obj:`count_bytes `\\ \\(\\)" +msgstr ":py:obj:`count_bytes `\\ \\(\\)" -#: flwr.server.client_manager.ClientManager.all:1::1 of +#: collections.abc.MutableMapping.clear:1::1 +#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:1 +#: flwr.common.record.metricsrecord.MetricsRecord.count_bytes:1 +#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:1 of #, fuzzy -msgid ":py:obj:`all `\\ \\(\\)" -msgstr ":py:obj:`all `\\ \\(\\)" +msgid "Return number of Bytes stored in this object." +msgstr "返回存储在此对象中的字节数。" -#: flwr.server.client_manager.ClientManager.all:1 -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.all:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "Return all available clients." -msgstr "返回所有可用客户。" +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -#: flwr.server.client_manager.ClientManager.all:1::1 of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid ":py:obj:`num_available `\\ \\(\\)" -msgstr ":py:obj:`num_available `\\ \\(\\)" +msgid ":py:obj:`items `\\ \\(\\)" +msgstr ":py:obj:`items `\\ \\(\\)" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.num_available:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.num_available:1 of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "Return the number of available clients." -msgstr "返回样本大小和所需的可用客户数量。" +msgid ":py:obj:`keys `\\ \\(\\)" +msgstr ":py:obj:`keys `\\ \\(\\)" -#: flwr.server.client_manager.ClientManager.all:1::1 of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid ":py:obj:`register `\\ \\(client\\)" -msgstr ":py:obj:`register `\\ \\(client\\)" +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.register:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.register:1 of +#: collections.abc.MutableMapping.clear:1::1 +#: collections.abc.MutableMapping.pop:1 of #, fuzzy -msgid "Register Flower ClientProxy instance." -msgstr "注册 Flower ClientProxy 实例。" +msgid "If key is not found, d is returned if given, otherwise KeyError is raised." +msgstr "如果未找到 key,则返回 d(如果给定),否则引发 KeyError。" -#: flwr.server.client_manager.ClientManager.all:1::1 of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "" -":py:obj:`sample `\\ " -"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" -msgstr "" -":py:obj:`sample `\\ " -"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" +msgid ":py:obj:`popitem `\\ \\(\\)" +msgstr ":py:obj:`items `\\ \\(\\)" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.sample:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.sample:1 of -#, fuzzy -msgid "Sample a number of Flower ClientProxy instances." -msgstr "取样若干 Flower ClientProxy 实例。" +#: collections.abc.MutableMapping.clear:1::1 +#: collections.abc.MutableMapping.popitem:1 of +msgid "as a 2-tuple; but raise KeyError if D is empty." +msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid ":py:obj:`unregister `\\ \\(client\\)" -msgstr ":py:obj:`unregister `\\ \\(client\\)" +msgid "" +":py:obj:`setdefault `\\ " +"\\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.unregister:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.unregister:1 of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "Unregister Flower ClientProxy instance." -msgstr "取消注册 Flower ClientProxy 实例。" +msgid "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" +msgstr "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" -#: flwr.server.client_manager.ClientManager.all:1::1 of -#, fuzzy +#: collections.abc.MutableMapping.clear:1::1 +#: collections.abc.MutableMapping.update:1 of msgid "" -":py:obj:`wait_for `\\ " -"\\(num\\_clients\\, timeout\\)" +"If E present and has a .keys() method, does: for k in E: D[k] = E[k] " +"If E present and lacks .keys() method, does: for (k, v) in E: D[k] = " +"v In either case, this is followed by: for k, v in F.items(): D[k] = v" msgstr "" -":py:obj:`wait_for `\\ " -"\\(num\\_clients\\, timeout\\)" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.wait_for:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.wait_for:1 of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "Wait until at least `num_clients` are available." -msgstr "等待至少 `num_clients` 可用。" +msgid ":py:obj:`values `\\ \\(\\)" +msgstr ":py:obj:`values `\\ \\(\\)" -#: flwr.server.client_manager.ClientManager.num_available:3 -#: flwr.server.client_manager.SimpleClientManager.num_available:3 of +#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:3 of #, fuzzy -msgid "**num_available** -- The number of currently available clients." -msgstr "**num_available** -- 当前可用客户端的数量。" +msgid "This function counts booleans as occupying 1 Byte." +msgstr "该函数将布尔值计算为占用 1 个字节。" -#: flwr.server.client_manager.ClientManager.register:6 -#: flwr.server.client_manager.SimpleClientManager.register:6 of +#: ../../source/ref-api/flwr.common.Context.rst:2 #, fuzzy -msgid "" -"**success** -- Indicating if registration was successful. False if " -"ClientProxy is already registered or can not be registered for any " -"reason." -msgstr "**success** -- 表示注册是否成功。如果 ClientProxy 已注册或因故无法注册,则为 False。" +msgid "Context" +msgstr "背景" -#: flwr.server.client_manager.ClientManager.unregister:3 -#: flwr.server.client_manager.SimpleClientManager.unregister:3 of +#: flwr.common.context.Context:3 of #, fuzzy -msgid "This method is idempotent." -msgstr "这种方法是幂等的。" +msgid "The ID that identifies the node." +msgstr "错误的标识符。" -#: ../../source/ref-api/flwr.server.Driver.rst:2 -#, fuzzy -msgid "Driver" -msgstr "服务器" +#: flwr.common.context.Context:5 of +msgid "" +"A config (key/value mapping) unique to the node and independent of the " +"`run_config`. This config persists across all runs this node participates" +" in." +msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 of +#: flwr.common.context.Context:8 of #, fuzzy msgid "" -":py:obj:`create_message `\\ " -"\\(content\\, message\\_type\\, ...\\[\\, ttl\\]\\)" +"Holds records added by the entity in a given run and that will stay " +"local. This means that the data it holds will never leave the system it's" +" running from. This can be used as an intermediate storage or scratchpad " +"when executing mods. It can also be used as a memory to access at " +"different points during the lifecycle of this entity (e.g. across " +"multiple rounds)" +msgstr "保存实体在给定运行中添加的记录,这些记录将保留在本地。这意味着它保存的数据永远不会离开运行的系统。在执行模式时,它可用作中间存储或抓取板。它还可以作为存储器,在实体生命周期的不同阶段(如多轮)进行访问。" + +#: flwr.common.context.Context:15 of +msgid "" +"A config (key/value mapping) held by the entity in a given run and that " +"will stay local. It can be used at any point during the lifecycle of this" +" entity (e.g. across multiple rounds)" msgstr "" -":py:obj:`create_message `\\ " -"\\(content\\, message\\_type\\, ...\\)" -#: flwr.server.driver.driver.Driver.create_message:1 -#: flwr.server.driver.driver.Driver.create_message:1::1 of +#: ../../source/ref-api/flwr.common.Context.rst:31::1 #, fuzzy -msgid "Create a new message with specified parameters." -msgstr "使用指定参数创建新信息。" +msgid ":py:obj:`node_id `\\" +msgstr ":py:obj:`src_node_id `\\" -#: flwr.server.driver.driver.Driver.create_message:1::1 of +#: ../../source/ref-api/flwr.common.Context.rst:31::1 #, fuzzy -msgid ":py:obj:`get_node_ids `\\ \\(\\)" -msgstr ":py:obj:`get_node_ids `\\ \\(\\)" +msgid ":py:obj:`node_config `\\" +msgstr ":py:obj:`config `\\" -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.get_node_ids:1 of +#: ../../source/ref-api/flwr.common.Context.rst:31::1 #, fuzzy -msgid "Get node IDs." -msgstr "获取节点 ID。" +msgid ":py:obj:`state `\\" +msgstr "server.strategy.Strategy" -#: flwr.server.driver.driver.Driver.create_message:1::1 of +#: ../../source/ref-api/flwr.common.Context.rst:31::1 #, fuzzy -msgid "" -":py:obj:`pull_messages `\\ " -"\\(message\\_ids\\)" -msgstr "" -":py:obj:`pull_messages `\\ " -"\\(message\\_ids\\)" +msgid ":py:obj:`run_config `\\" +msgstr ":py:obj:`config `\\" -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.pull_messages:1 of +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:2 #, fuzzy -msgid "Pull messages based on message IDs." -msgstr "根据信息 ID 提取信息。" +msgid "DisconnectRes" +msgstr "断开Res" -#: flwr.server.driver.driver.Driver.create_message:1::1 of +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:28::1 #, fuzzy -msgid "" -":py:obj:`push_messages `\\ " -"\\(messages\\)" -msgstr "" -":py:obj:`push_messages `\\ " -"\\(messages\\)" +msgid ":py:obj:`reason `\\" +msgstr ":py:obj:`reason `\\" -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.push_messages:1 of +#: ../../source/ref-api/flwr.common.Error.rst:2 #, fuzzy -msgid "Push messages to specified node IDs." -msgstr "向指定的节点 ID 推送信息。" +msgid "Error" +msgstr "错误" -#: flwr.server.driver.driver.Driver.create_message:1::1 of +#: flwr.common.message.Error:3 of #, fuzzy -msgid "" -":py:obj:`send_and_receive `\\ " -"\\(messages\\, \\*\\[\\, timeout\\]\\)" -msgstr "" -"Flower 1.0: ``start_server(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" +msgid "An identifier for the error." +msgstr "错误的标识符。" -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.send_and_receive:1 of +#: flwr.common.message.Error:5 of #, fuzzy -msgid "Push messages to specified node IDs and pull the reply messages." -msgstr "向指定的节点 ID 推送信息并提取回复信息。" +msgid "A reason for why the error arose (e.g. an exception stack-trace)" +msgstr "出错原因(如异常堆栈跟踪)" -#: flwr.server.driver.driver.Driver.create_message:3 of +#: flwr.common.Error.code:1::1 of #, fuzzy -msgid "" -"This method constructs a new `Message` with given content and metadata. " -"The `run_id` and `src_node_id` will be set automatically." -msgstr "本方法使用给定的内容和元数据构建新的 `Message` 。run_id \"和 \"src_node_id \"将自动设置。" +msgid ":py:obj:`code `\\" +msgstr ":py:obj:`code `\\" -#: flwr.server.driver.driver.Driver.create_message:6 of +#: flwr.common.Error.code:1 flwr.common.Error.code:1::1 of #, fuzzy -msgid "" -"The content for the new message. This holds records that are to be sent " -"to the destination node." -msgstr "新信息的内容。其中包含要发送到目的节点的记录。" +msgid "Error code." +msgstr "错误代码。" -#: flwr.server.driver.driver.Driver.create_message:9 of +#: flwr.common.Error.code:1::1 of #, fuzzy -msgid "" -"The type of the message, defining the action to be executed on the " -"receiving end." -msgstr "信息类型,定义接收端要执行的操作。" +msgid ":py:obj:`reason `\\" +msgstr ":py:obj:`reason `\\" -#: flwr.server.driver.driver.Driver.create_message:12 of +#: flwr.common.Error.code:1::1 flwr.common.Error.reason:1 of #, fuzzy -msgid "The ID of the destination node to which the message is being sent." -msgstr "信息发送目的地节点的 ID。" +msgid "Reason reported about the error." +msgstr "报告的错误原因。" -#: flwr.server.driver.driver.Driver.create_message:14 of +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:2 #, fuzzy -msgid "" -"The ID of the group to which this message is associated. In some " -"settings, this is used as the FL round." -msgstr "与该信息相关联的组的 ID。在某些设置中,它被用作 FL 轮。" +msgid "EvaluateIns" +msgstr "说明" -#: flwr.server.driver.driver.Driver.create_message:17 of +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 #, fuzzy -msgid "" -"Time-to-live for the round trip of this message, i.e., the time from " -"sending this message to receiving a reply. It specifies in seconds the " -"duration for which the message and its potential reply are considered " -"valid. If unset, the default TTL (i.e., `common.DEFAULT_TTL`) will be " -"used." -msgstr "此报文往返的有效时间,即从发送此报文到收到回复的时间。它规定了信息及其潜在回复被视为有效的持续时间。" +msgid ":py:obj:`parameters `\\" +msgstr ":py:obj:`parameters `\\" -#: flwr.server.driver.driver.Driver.create_message:23 of +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 #, fuzzy -msgid "" -"**message** -- A new `Message` instance with the specified content and " -"metadata." -msgstr "**message** -- 具有指定内容和元数据的新 \"信息 \"实例。" +msgid ":py:obj:`config `\\" +msgstr ":py:obj:`config `\\" -#: flwr.server.driver.driver.Driver.pull_messages:3 of +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:2 #, fuzzy -msgid "" -"This method is used to collect messages from the SuperLink that " -"correspond to a set of given message IDs." -msgstr "该方法用于从超级链接中收集与一组给定消息 ID 相对应的消息。" +msgid "EvaluateRes" +msgstr "评估Res" -#: flwr.server.driver.driver.Driver.pull_messages:6 of +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 #, fuzzy -msgid "An iterable of message IDs for which reply messages are to be retrieved." -msgstr "要检索回复信息的信息 ID 的可迭代项。" +msgid ":py:obj:`status `\\" +msgstr ":py:obj:`status `\\" -#: flwr.server.driver.driver.Driver.pull_messages:9 of +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 #, fuzzy -msgid "**messages** -- An iterable of messages received." -msgstr "**messages** -- 收到的信息迭代。" +msgid ":py:obj:`loss `\\" +msgstr ":py:obj:`loss `\\" -#: flwr.server.driver.driver.Driver.push_messages:3 of +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 #, fuzzy -msgid "" -"This method takes an iterable of messages and sends each message to the " -"node specified in `dst_node_id`." -msgstr "该方法接收一个可迭代的消息,并将每条消息发送到 `dst_node_id` 中指定的节点。" +msgid ":py:obj:`num_examples `\\" +msgstr ":py:obj:`num_examples `\\" -#: flwr.server.driver.driver.Driver.push_messages:6 -#: flwr.server.driver.driver.Driver.send_and_receive:7 of +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 #, fuzzy -msgid "An iterable of messages to be sent." -msgstr "要发送的信息迭代。" +msgid ":py:obj:`metrics `\\" +msgstr ":py:obj:`metrics `\\" -#: flwr.server.driver.driver.Driver.push_messages:9 of +#: ../../source/ref-api/flwr.common.EventType.rst:2 #, fuzzy -msgid "" -"**message_ids** -- An iterable of IDs for the messages that were sent, " -"which can be used to pull replies." -msgstr "**message_ids** -- 已发送信息的可迭代 ID,可用于提取回复信息。" +msgid "EventType" +msgstr "返回类型" -#: flwr.server.driver.driver.Driver.send_and_receive:3 of +#: flwr.common.telemetry.EventType:1 of #, fuzzy -msgid "" -"This method sends a list of messages to their destination node IDs and " -"then waits for the replies. It continues to pull replies until either all" -" replies are received or the specified timeout duration is exceeded." -msgstr "该方法会向目标节点 ID 发送信息列表,然后等待回复。它会继续提取回复,直到收到所有回复或超过指定的超时时间。" +msgid "Bases: :py:class:`str`, :py:class:`~enum.Enum`" +msgstr "Bases: :py:class:`str`, :py:class:`~enum.Enum`" -#: flwr.server.driver.driver.Driver.send_and_receive:9 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy msgid "" -"The timeout duration in seconds. If specified, the method will wait for " -"replies for this duration. If `None`, there is no time limit and the " -"method will wait until replies for all messages are received." -msgstr "超时时间(秒)。如果指定,该方法将在此期限内等待回复。如果指定为 \"无\",则没有时间限制,该方法将等待直到收到所有信息的回复。" - -#: flwr.server.driver.driver.Driver.send_and_receive:14 of -#, fuzzy -msgid "**replies** -- An iterable of reply messages received from the SuperLink." -msgstr "**replies** -- 从超级链接收到的回复信息的迭代。" +":py:obj:`encode `\\ \\(\\[encoding\\, " +"errors\\]\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: flwr.server.driver.driver.Driver.send_and_receive:18 -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:53 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:60 -#: of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.encode:1 of #, fuzzy -msgid "Notes" -msgstr "无" +msgid "Encode the string using the codec registered for encoding." +msgstr "使用注册的编码解码器对字符串进行编码。" -#: flwr.server.driver.driver.Driver.send_and_receive:19 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy msgid "" -"This method uses `push_messages` to send the messages and `pull_messages`" -" to collect the replies. If `timeout` is set, the method may not return " -"replies for all sent messages. A message remains valid until its TTL, " -"which is not affected by `timeout`." -msgstr "" -"该方法使用 `push_messages` 发送信息,并使用 `pull_messages` 收集回复。如果设置了 " -"`timeout`,该方法可能不会返回所有已发送消息的回复。消息在其 TTL 之前一直有效,不受 `timeout` 影响。" +":py:obj:`replace `\\ \\(old\\, new\\[\\, " +"count\\]\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: ../../source/ref-api/flwr.server.History.rst:2 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.replace:1 of #, fuzzy -msgid "History" -msgstr "历史" +msgid "Return a copy with all occurrences of substring old replaced by new." +msgstr "返回用 new 替换子串 old 的所有出现次数的副本。" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy msgid "" -":py:obj:`add_loss_centralized " -"`\\ \\(server\\_round\\, " -"loss\\)" -msgstr "" -":py:obj:`add_loss_centralized " -"`\\ \\(server\\_round\\, " -"loss\\)" +":py:obj:`split `\\ \\(\\[sep\\, " +"maxsplit\\]\\)" +msgstr ":py:obj:`PING `\\" -#: flwr.server.history.History.add_loss_centralized:1 -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.rsplit:1 flwr.common.EventType.split:1 of #, fuzzy -msgid "Add one loss entry (from centralized evaluation)." -msgstr "集中评估" +msgid "" +"Return a list of the substrings in the string, using sep as the separator" +" string." +msgstr "使用 sep 作为分隔符,返回字符串中的子字符串列表。" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy msgid "" -":py:obj:`add_loss_distributed " -"`\\ \\(server\\_round\\, " -"loss\\)" -msgstr "" -":py:obj:`add_loss_distributed " -"`\\ \\(server\\_round\\, " -"loss\\)" +":py:obj:`rsplit `\\ \\(\\[sep\\, " +"maxsplit\\]\\)" +msgstr ":py:obj:`PING `\\" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_loss_distributed:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Add one loss entry (from distributed evaluation)." -msgstr "增加一个损失条目(来自分布式评估)。" +msgid ":py:obj:`join `\\ \\(iterable\\, \\/\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.join:1 of #, fuzzy -msgid "" -":py:obj:`add_metrics_centralized " -"`\\ \\(server\\_round\\, " -"metrics\\)" -msgstr "" -":py:obj:`add_metrics_centralized " -"`\\ \\(server\\_round\\, " -"metrics\\)" +msgid "Concatenate any number of strings." +msgstr "连接任意数量的字符串。" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_metrics_centralized:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Add metrics entries (from centralized evaluation)." -msgstr "集中评估" +msgid ":py:obj:`capitalize `\\ \\(\\)" +msgstr ":py:obj:`PING `\\" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.capitalize:1 of #, fuzzy -msgid "" -":py:obj:`add_metrics_distributed " -"`\\ \\(server\\_round\\, " -"metrics\\)" -msgstr "" -":py:obj:`add_metrics_distributed " -"`\\ \\(server\\_round\\, " -"metrics\\)" +msgid "Return a capitalized version of the string." +msgstr "返回字符串的大写版本。" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_metrics_distributed:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Add metrics entries (from distributed evaluation)." -msgstr "定制的集中/分布式评估" +msgid ":py:obj:`casefold `\\ \\(\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.casefold:1 of #, fuzzy -msgid "" -":py:obj:`add_metrics_distributed_fit " -"`\\ \\(server\\_round\\," -" ...\\)" -msgstr "" -":py:obj:`add_metrics_distributed_fit " -"`\\ \\(server\\_round\\," -" ...\\)" +msgid "Return a version of the string suitable for caseless comparisons." +msgstr "返回适合无例比较的字符串版本。" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_metrics_distributed_fit:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Add metrics entries (from distributed fit)." -msgstr "添加度量条目(来自分布式拟合)。" +msgid ":py:obj:`title `\\ \\(\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:2 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.title:1 of #, fuzzy -msgid "LegacyContext" -msgstr "遗留上下文" +msgid "Return a version of the string where each word is titlecased." +msgstr "返回字符串的版本,其中每个单词都使用了标题大小写。" -#: flwr.server.compat.legacy_context.LegacyContext:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Bases: :py:class:`~flwr.common.context.Context`" -msgstr "Bases: :py:class:`~flwr.common.context.Context`" +msgid "" +":py:obj:`center `\\ \\(width\\[\\, " +"fillchar\\]\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.center:1 of #, fuzzy -msgid ":py:obj:`config `\\" -msgstr "server.strategy.Strategy" +msgid "Return a centered string of length width." +msgstr "返回客户端的属性集。" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid ":py:obj:`strategy `\\" -msgstr "server.strategy.Strategy" +msgid "" +":py:obj:`count `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" +msgstr ":py:obj:`Context `\\ \\(state\\)" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid ":py:obj:`client_manager `\\" -msgstr ":py:obj:`client_manager `\\" +msgid "" +"Return the number of non-overlapping occurrences of substring sub in " +"string S[start:end]." +msgstr "返回子字符串 sub 在字符串 S[start:end] 中非重叠出现的次数。" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid ":py:obj:`history `\\" -msgstr "server.strategy.Strategy" +msgid "" +":py:obj:`expandtabs `\\ " +"\\(\\[tabsize\\]\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.expandtabs:1 of #, fuzzy -msgid ":py:obj:`state `\\" -msgstr "server.strategy.Strategy" - -#: ../../source/ref-api/flwr.server.Server.rst:2 -msgid "Server" -msgstr "服务器" +msgid "Return a copy where all tab characters are expanded using spaces." +msgstr "返回使用空格扩展所有制表符的副本。" -#: flwr.server.server.Server.client_manager:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid ":py:obj:`client_manager `\\ \\(\\)" -msgstr ":py:obj:`client_manager `\\ \\(\\)" +msgid "" +":py:obj:`find `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" +msgstr ":py:obj:`PING `\\" -#: flwr.server.server.Server.client_manager:1 -#: flwr.server.server.Server.client_manager:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Return ClientManager." -msgstr "返回客户端(本身)。" +msgid "" +"Return the lowest index in S where substring sub is found, such that sub " +"is contained within S[start:end]." +msgstr "返回在 S 中找到子串 sub 的最低索引,且 sub 包含在 S[start:end] 中。" -#: flwr.server.server.Server.client_manager:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "" -":py:obj:`disconnect_all_clients " -"`\\ \\(timeout\\)" -msgstr "" -":py:obj:`disconnect_all_clients " -"`\\ \\(timeout\\)" +msgid ":py:obj:`partition `\\ \\(sep\\, \\/\\)" +msgstr ":py:obj:`partition_id `\\" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.disconnect_all_clients:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.partition:1 flwr.common.EventType.rpartition:1 of #, fuzzy -msgid "Send shutdown signal to all clients." -msgstr "向所有客户端发送关闭信号。" +msgid "Partition the string into three parts using the given separator." +msgstr "使用给定的分隔符将字符串分为三部分。" -#: flwr.server.server.Server.client_manager:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy msgid "" -":py:obj:`evaluate_round `\\ " -"\\(server\\_round\\, timeout\\)" -msgstr "" -":py:obj:`evaluate_round `\\ " -"\\(server\\_round\\, timeout\\)" +":py:obj:`index `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" +msgstr ":py:obj:`Context `\\ \\(state\\)" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.evaluate_round:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Validate current global model on a number of clients." -msgstr "当前(全局)模型参数。" +msgid "" +":py:obj:`ljust `\\ \\(width\\[\\, " +"fillchar\\]\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: flwr.server.server.Server.client_manager:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.ljust:1 of #, fuzzy -msgid ":py:obj:`fit `\\ \\(num\\_rounds\\, timeout\\)" -msgstr ":py:obj:`fit `\\ \\(num\\_rounds\\, timeout\\)" +msgid "Return a left-justified string of length width." +msgstr "返回长度为 width 的左对齐字符串。" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.fit:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Run federated averaging for a number of rounds." -msgstr "联邦平均动量策略。" +msgid ":py:obj:`lower `\\ \\(\\)" +msgstr ":py:obj:`now `\\ \\(\\)" -#: flwr.server.server.Server.client_manager:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.lower:1 of +#, fuzzy +msgid "Return a copy of the string converted to lowercase." +msgstr "返回转换为小写的字符串副本。" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`lstrip `\\ \\(\\[chars\\]\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.lstrip:1 of +#, fuzzy +msgid "Return a copy of the string with leading whitespace removed." +msgstr "返回去掉前导空白的字符串副本。" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy msgid "" -":py:obj:`fit_round `\\ \\(server\\_round\\," -" timeout\\)" -msgstr "" -":py:obj:`fit_round `\\ \\(server\\_round\\," -" timeout\\)" +":py:obj:`rfind `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" +msgstr ":py:obj:`PING `\\" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.fit_round:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Perform a single round of federated averaging." -msgstr "本轮联邦学习。" +msgid "" +"Return the highest index in S where substring sub is found, such that sub" +" is contained within S[start:end]." +msgstr "返回在 S 中找到子串 sub 的最高索引,且 sub 包含在 S[start:end] 中。" -#: flwr.server.server.Server.client_manager:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy msgid "" -":py:obj:`set_max_workers `\\ " -"\\(max\\_workers\\)" -msgstr "" -":py:obj:`set_max_workers `\\ " -"\\(max\\_workers\\)" +":py:obj:`rindex `\\ \\(sub\\[\\, " +"start\\[\\, end\\]\\]\\)" +msgstr ":py:obj:`Context `\\ \\(state\\)" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.set_max_workers:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Set the max_workers used by ThreadPoolExecutor." -msgstr "设置 ThreadPoolExecutor 使用的最大工作器数。" +msgid "" +":py:obj:`rjust `\\ \\(width\\[\\, " +"fillchar\\]\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: flwr.server.server.Server.client_manager:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.rjust:1 of #, fuzzy -msgid ":py:obj:`set_strategy `\\ \\(strategy\\)" -msgstr ":py:obj:`set_strategy `\\ \\(strategy\\)" +msgid "Return a right-justified string of length width." +msgstr "返回长度为 width 的右对齐字符串。" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.set_strategy:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Replace server strategy." -msgstr "server.strategy" +msgid ":py:obj:`rstrip `\\ \\(\\[chars\\]\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: ../../source/ref-api/flwr.server.ServerApp.rst:2 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.rstrip:1 of #, fuzzy -msgid "ServerApp" -msgstr "服务器" +msgid "Return a copy of the string with trailing whitespace removed." +msgstr "返回去掉尾部空白的字符串副本。" -#: flwr.server.server_app.ServerApp:5 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Use the `ServerApp` with an existing `Strategy`:" -msgstr "使用现有策略" +msgid ":py:obj:`rpartition `\\ \\(sep\\, \\/\\)" +msgstr ":py:obj:`partition_id `\\" -#: flwr.server.server_app.ServerApp:15 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Use the `ServerApp` with a custom main function:" -msgstr "使用带有自定义主函数的 `ServerApp`:" +msgid "" +":py:obj:`splitlines `\\ " +"\\(\\[keepends\\]\\)" +msgstr ":py:obj:`PING `\\" -#: flwr.server.server_app.ServerApp.main:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.splitlines:1 of #, fuzzy -msgid ":py:obj:`main `\\ \\(\\)" -msgstr "server.strategy.Strategy" +msgid "Return a list of the lines in the string, breaking at line boundaries." +msgstr "返回字符串中的行列表,以行为分界线。" -#: flwr.server.server_app.ServerApp.main:1 -#: flwr.server.server_app.ServerApp.main:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Return a decorator that registers the main fn with the server app." -msgstr "返回向服务器应用程序注册 main fn 的装饰器。" +msgid ":py:obj:`strip `\\ \\(\\[chars\\]\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: ../../source/ref-api/flwr.server.ServerConfig.rst:2 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.strip:1 of #, fuzzy -msgid "ServerConfig" -msgstr "服务器" +msgid "Return a copy of the string with leading and trailing whitespace removed." +msgstr "返回去掉前导和尾部空白的字符串副本。" -#: flwr.server.server_config.ServerConfig:3 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "" -"All attributes have default values which allows users to configure just " -"the ones they care about." -msgstr "所有属性都有默认值,用户只需配置自己关心的属性即可。" +msgid ":py:obj:`swapcase `\\ \\(\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.swapcase:1 of #, fuzzy -msgid ":py:obj:`num_rounds `\\" -msgstr ":py:obj:`num_rounds `\\" +msgid "" +"Convert uppercase characters to lowercase and lowercase characters to " +"uppercase." +msgstr "将大写字母转换为小写字母,将小写字母转换为大写字母。" -#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid ":py:obj:`round_timeout `\\" -msgstr ":py:obj:`round_timeout `\\" +msgid ":py:obj:`translate `\\ \\(table\\, \\/\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:2 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.translate:1 of #, fuzzy -msgid "SimpleClientManager" -msgstr "SimpleClientManager" +msgid "Replace each character in the string using the given translation table." +msgstr "使用给定的翻译表替换字符串中的每个字符。" -#: flwr.server.client_manager.SimpleClientManager:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Bases: :py:class:`~flwr.server.client_manager.ClientManager`" -msgstr "Bases: :py:class:`~flwr.server.client_manager.ClientManager`" +msgid ":py:obj:`upper `\\ \\(\\)" +msgstr ":py:obj:`PING `\\" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.upper:1 of #, fuzzy -msgid ":py:obj:`all `\\ \\(\\)" -msgstr ":py:obj:`all `\\ \\(\\)" +msgid "Return a copy of the string converted to uppercase." +msgstr "返回转换为大写字符串的副本。" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy msgid "" -":py:obj:`num_available `\\" -" \\(\\)" -msgstr "" -":py:obj:`num_available `\\" -" \\(\\)" +":py:obj:`startswith `\\ \\(prefix\\[\\," +" start\\[\\, end\\]\\]\\)" +msgstr ":py:obj:`Status `\\ \\(code\\, message\\)" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "" -":py:obj:`register `\\ " -"\\(client\\)" -msgstr "" -":py:obj:`register `\\ " -"\\(client\\)" +msgid "Return True if S starts with the specified prefix, False otherwise." +msgstr "如果 S 以指定前缀开头,则返回 True,否则返回 False。" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy msgid "" -":py:obj:`sample `\\ " -"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" -msgstr "" -":py:obj:`sample `\\ " -"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" +":py:obj:`endswith `\\ \\(suffix\\[\\, " +"start\\[\\, end\\]\\]\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "" -":py:obj:`unregister `\\ " -"\\(client\\)" -msgstr "" -":py:obj:`unregister `\\ " -"\\(client\\)" +msgid "Return True if S ends with the specified suffix, False otherwise." +msgstr "如果 S 以指定后缀结束,则返回 True,否则返回 False。" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy msgid "" -":py:obj:`wait_for `\\ " -"\\(num\\_clients\\[\\, timeout\\]\\)" -msgstr "" -":py:obj:`wait_for `\\ " -"\\(num\\_clients\\[\\, timeout\\]\\)" +":py:obj:`removeprefix `\\ " +"\\(prefix\\, \\/\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: flwr.server.client_manager.SimpleClientManager.wait_for:3 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.removeprefix:1 of #, fuzzy -msgid "" -"Blocks until the requested number of clients is available or until a " -"timeout is reached. Current timeout default: 1 day." -msgstr "阻塞,直到请求的客户端数量可用或达到超时为止。当前超时默认值:1 天。" +msgid "Return a str with the given prefix string removed if present." +msgstr "返回一个字符串,如果存在,则去掉给定的前缀字符串。" -#: flwr.server.client_manager.SimpleClientManager.wait_for:6 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "The number of clients to wait for." -msgstr "需要等待的客户数量。" +msgid "" +":py:obj:`removesuffix `\\ " +"\\(suffix\\, \\/\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: flwr.server.client_manager.SimpleClientManager.wait_for:8 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.removesuffix:1 of #, fuzzy -msgid "The time in seconds to wait for, defaults to 86400 (24h)." -msgstr "以秒为单位的等待时间,默认为 86400(24 小时)。" +msgid "Return a str with the given suffix string removed if present." +msgstr "返回一个字符串,如果存在给定的后缀字符串,则将其删除。" -#: flwr.server.client_manager.SimpleClientManager.wait_for:11 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "**success**" -msgstr "**success**" +msgid ":py:obj:`isascii `\\ \\(\\)" +msgstr ":py:obj:`PING `\\" -#: ../../source/ref-api/flwr.server.run_driver_api.rst:2 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isascii:1 of #, fuzzy -msgid "run\\_driver\\_api" -msgstr "flower-driver-api" +msgid "Return True if all characters in the string are ASCII, False otherwise." +msgstr "如果字符串中的所有字符都是 ASCII 码,则返回 True,否则返回 False。" -#: ../../source/ref-api/flwr.server.run_fleet_api.rst:2 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "run\\_fleet\\_api" -msgstr "run\\_fleet\\_api" +msgid ":py:obj:`islower `\\ \\(\\)" +msgstr ":py:obj:`now `\\ \\(\\)" -#: ../../source/ref-api/flwr.server.run_server_app.rst:2 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.islower:1 of #, fuzzy -msgid "run\\_server\\_app" -msgstr "run\\_server\\_app" +msgid "Return True if the string is a lowercase string, False otherwise." +msgstr "如果字符串是小写字符串,则返回 True,否则返回 False。" -#: ../../source/ref-api/flwr.server.run_superlink.rst:2 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "run\\_superlink" -msgstr "flower-superlink" +msgid ":py:obj:`isupper `\\ \\(\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: ../../source/ref-api/flwr.server.start_server.rst:2 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isupper:1 of #, fuzzy -msgid "start\\_server" -msgstr "server.start_server" +msgid "Return True if the string is an uppercase string, False otherwise." +msgstr "如果字符串是大写字符串,则返回 True,否则返回 False。" -#: flwr.server.app.start_server:3 of -msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." -msgstr "服务器的 IPv4 或 IPv6 地址。默认为 `\"[::]:8080\"。" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`istitle `\\ \\(\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: flwr.server.app.start_server:5 of -msgid "" -"A server implementation, either `flwr.server.Server` or a subclass " -"thereof. If no instance is provided, then `start_server` will create one." -msgstr "服务器实现,可以是 `flwr.server.Server` 或其子类。如果没有提供实例,`start_server` 将创建一个。" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.istitle:1 of +#, fuzzy +msgid "Return True if the string is a title-cased string, False otherwise." +msgstr "如果字符串是带标题的字符串,则返回 True,否则返回 False。" -#: flwr.server.app.start_server:9 flwr.simulation.app.start_simulation:28 of -msgid "" -"Currently supported values are `num_rounds` (int, default: 1) and " -"`round_timeout` in seconds (float, default: None)." -msgstr "目前支持的值有:`num_rounds`(int,默认值:1)和以秒为单位的`round_timeout`(float,默认值:无)。" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`isspace `\\ \\(\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: flwr.server.app.start_server:12 of -msgid "" -"An implementation of the abstract base class " -"`flwr.server.strategy.Strategy`. If no strategy is provided, then " -"`start_server` will use `flwr.server.strategy.FedAvg`." -msgstr "" -"抽象基类 `flwr.server.strategy.Strategy` 的实现。如果没有提供策略,`start_server` 将使用 " -"`flwr.server.strategy.FedAvg`。" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isspace:1 of +#, fuzzy +msgid "Return True if the string is a whitespace string, False otherwise." +msgstr "如果字符串是空白字符串,则返回 True,否则返回 False。" -#: flwr.server.app.start_server:16 of -msgid "" -"An implementation of the abstract base class `flwr.server.ClientManager`." -" If no implementation is provided, then `start_server` will use " -"`flwr.server.client_manager.SimpleClientManager`." -msgstr "" -"抽象基类 `flwr.server.ClientManager` 的实现。如果没有提供实现,`start_server` 将使用 " -"`flwr.server.client_manager.SimpleClientManager`。" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`isdecimal `\\ \\(\\)" +msgstr ":py:obj:`PING `\\" -#: flwr.server.app.start_server:21 of -msgid "" -"The maximum length of gRPC messages that can be exchanged with the Flower" -" clients. The default should be sufficient for most models. Users who " -"train very large models might need to increase this value. Note that the " -"Flower clients need to be started with the same value (see " -"`flwr.client.start_client`), otherwise clients will not know about the " -"increased limit and block larger messages." -msgstr "" -"可与 Flower 客户端交换的 gRPC 消息的最大长度:默认值对大多数模型都足够了。训练超大模型的用户可能需要增加该值。请注意,Flower " -"客户端需要以相同的值启动(请参阅 `flwr.client.start_client`),否则客户端将不知道已增加的限制并阻止更大的消息。" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isdecimal:1 of +#, fuzzy +msgid "Return True if the string is a decimal string, False otherwise." +msgstr "如果字符串是十进制字符串,则返回 True,否则返回 False。" -#: flwr.server.app.start_server:28 of -msgid "" -"Tuple containing root certificate, server certificate, and private key to" -" start a secure SSL-enabled server. The tuple is expected to have three " -"bytes elements in the following order: * CA certificate. * " -"server certificate. * server private key." -msgstr "" -"包含根证书、服务器证书和私钥的元组,用于启动启用 SSL 的安全服务器。元组应按以下顺序包含三个字节元素: * CA 证书,* 服务器证书, * " -"服务器私钥。" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`isdigit `\\ \\(\\)" +msgstr ":py:obj:`PING `\\" -#: flwr.server.app.start_server:28 of -msgid "" -"Tuple containing root certificate, server certificate, and private key to" -" start a secure SSL-enabled server. The tuple is expected to have three " -"bytes elements in the following order:" -msgstr "包含根证书、服务器证书和私钥的元组,用于启动启用 SSL 的安全服务器。元组应按以下顺序包含三个字节元素:" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isdigit:1 of +#, fuzzy +msgid "Return True if the string is a digit string, False otherwise." +msgstr "如果字符串是数字字符串,则返回 True,否则返回 False。" -#: flwr.server.app.start_server:32 of -msgid "CA certificate." -msgstr "CA 证书。" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`isnumeric `\\ \\(\\)" +msgstr ":py:obj:`PING `\\" -#: flwr.server.app.start_server:33 of -msgid "server certificate." -msgstr "服务器证书。" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isnumeric:1 of +#, fuzzy +msgid "Return True if the string is a numeric string, False otherwise." +msgstr "如果字符串是数字字符串,则返回 True,否则返回 False。" -#: flwr.server.app.start_server:34 of -msgid "server private key." -msgstr "服务器私人密钥。" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`isalpha `\\ \\(\\)" +msgstr ":py:obj:`PING `\\" -#: flwr.server.app.start_server:37 of -msgid "**hist** -- Object containing training and evaluation metrics." -msgstr "**hist** -- 包含训练和评估指标的对象。" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isalpha:1 of +#, fuzzy +msgid "Return True if the string is an alphabetic string, False otherwise." +msgstr "如果字符串是字母字符串,则返回 True,否则返回 False。" -#: flwr.server.app.start_server:42 of -msgid "Starting an insecure server:" -msgstr "启动不安全的服务器:" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`isalnum `\\ \\(\\)" +msgstr ":py:obj:`PING `\\" -#: flwr.server.app.start_server:46 of -msgid "Starting an SSL-enabled server:" -msgstr "启动支持 SSL 的服务器:" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isalnum:1 of +#, fuzzy +msgid "Return True if the string is an alpha-numeric string, False otherwise." +msgstr "如果字符串是字母数字字符串,则返回 True,否则返回 False。" -#: ../../source/ref-api/flwr.server.strategy.rst:2 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "strategy" -msgstr "Krum 策略。" +msgid ":py:obj:`isidentifier `\\ \\(\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isidentifier:1 of #, fuzzy -msgid "" -":py:obj:`Bulyan `\\ \\(\\*\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\)" -msgstr "" -":py:obj:`Bulyan `\\ \\(\\*\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\)" +msgid "Return True if the string is a valid Python identifier, False otherwise." +msgstr "如果字符串是有效的 Python 标识符,则返回 True,否则返回 False。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.bulyan.Bulyan:1 of -msgid "Bulyan strategy." -msgstr "Bulyan 策略。" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`isprintable `\\ \\(\\)" +msgstr ":py:obj:`PING `\\" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isprintable:1 of #, fuzzy -msgid "" -":py:obj:`DPFedAvgAdaptive `\\ " -"\\(strategy\\, num\\_sampled\\_clients\\)" -msgstr "" -":py:obj:`DPFedAvgAdaptive `\\ " -"\\(strategy\\, num\\_sampled\\_clients\\)" +msgid "Return True if the string is printable, False otherwise." +msgstr "如果字符串可打印,则返回 True,否则返回 False。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of -msgid "Wrapper for configuring a Strategy for DP with Adaptive Clipping." -msgstr "用于配置具有自适应剪切功能的 DP 策略的包装器。" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`zfill `\\ \\(width\\, \\/\\)" +msgstr ":py:obj:`PING `\\" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.zfill:1 of #, fuzzy msgid "" -":py:obj:`DPFedAvgFixed `\\ " -"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" -msgstr "" -":py:obj:`DPFedAvgFixed `\\ " -"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" - -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 of -msgid "Wrapper for configuring a Strategy for DP with Fixed Clipping." -msgstr "封装器,用于为具有固定剪切功能的 DP 配置策略。" +"Pad a numeric string with zeros on the left, to fill a field of the given" +" width." +msgstr "在数字字符串左侧填充零,以填满给定宽度的字段。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy msgid "" -":py:obj:`DifferentialPrivacyClientSideAdaptiveClipping " -"`\\ " -"\\(...\\)" +":py:obj:`format `\\ \\(\\*args\\, " +"\\*\\*kwargs\\)" msgstr "" -":py:obj:`DifferentialPrivacyClientSideAdaptiveClipping " -"`\\ " -"\\(...\\)" +":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " +"\\*\\*kwargs\\)" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 -#: of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Strategy wrapper for central DP with client-side adaptive clipping." -msgstr "用于配置具有自适应剪切功能的 DP 策略的包装器。" +msgid "Return a formatted version of S, using substitutions from args and kwargs." +msgstr "使用 args 和 kwargs 的替换,返回 S 的格式化版本。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "" -":py:obj:`DifferentialPrivacyServerSideAdaptiveClipping " -"`\\ " -"\\(...\\)" -msgstr "" -":py:obj:`DifferentialPrivacyServerSideAdaptiveClipping " -"`\\ " -"\\(...\\)" +msgid ":py:obj:`format_map `\\ \\(mapping\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 -#: of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Strategy wrapper for central DP with server-side adaptive clipping." -msgstr "用于配置具有自适应剪切功能的 DP 策略的包装器。" +msgid "Return a formatted version of S, using substitutions from mapping." +msgstr "使用映射中的替换,返回 S 的格式化版本。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "" -":py:obj:`DifferentialPrivacyClientSideFixedClipping " -"`\\ " -"\\(...\\)" -msgstr "" -":py:obj:`DifferentialPrivacyClientSideFixedClipping " -"`\\ " -"\\(...\\)" +msgid ":py:obj:`maketrans `\\" +msgstr ":py:obj:`TRAIN `\\" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 -#: of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.maketrans:1 of #, fuzzy -msgid "Strategy wrapper for central DP with client-side fixed clipping." -msgstr "封装器,用于为具有固定剪切功能的 DP 配置策略。" +msgid "Return a translation table usable for str.translate()." +msgstr "返回可用于 str.translate() 的翻译表。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy -msgid "" -":py:obj:`DifferentialPrivacyServerSideFixedClipping " -"`\\ " -"\\(...\\)" -msgstr "" -":py:obj:`DifferentialPrivacyServerSideFixedClipping " -"`\\ " -"\\(...\\)" +msgid ":py:obj:`PING `\\" +msgstr ":py:obj:`PING `\\" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 -#: of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy -msgid "Strategy wrapper for central DP with server-side fixed clipping." -msgstr "封装器,用于为具有固定剪切功能的 DP 配置策略。" +msgid ":py:obj:`START_CLIENT_ENTER `\\" +msgstr ":py:obj:`START_CLIENT_ENTER `\\" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy -msgid "" -":py:obj:`FedAdagrad `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" -msgstr "" -":py:obj:`FedAdagrad `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" - -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedadagrad.FedAdagrad:1 of -msgid "FedAdagrad strategy - Adaptive Federated Optimization using Adagrad." -msgstr "FedAdagrad 策略 - 使用 Adagrad 进行自适应联合优化。" +msgid ":py:obj:`START_CLIENT_LEAVE `\\" +msgstr ":py:obj:`START_CLIENT_LEAVE `\\" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy -msgid "" -":py:obj:`FedAdam `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" -msgstr "" -":py:obj:`FedAdam `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +msgid ":py:obj:`START_SERVER_ENTER `\\" +msgstr ":py:obj:`START_SERVER_ENTER `\\" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedadam.FedAdam:1 of -msgid "FedAdam - Adaptive Federated Optimization using Adam." -msgstr "FedAdam - 使用 Adam 进行自适应联合优化。" +#: flwr.common.EventType.capitalize:1::1 of +#, fuzzy +msgid ":py:obj:`START_SERVER_LEAVE `\\" +msgstr ":py:obj:`START_SERVER_LEAVE `\\" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy msgid "" -":py:obj:`FedAvg `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +":py:obj:`START_SIMULATION_ENTER " +"`\\" msgstr "" -":py:obj:`FedAvg `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" - -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedavg.FedAvg:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of -msgid "Federated Averaging strategy." -msgstr "联邦平均策略。" +":py:obj:`START_SIMULATION_ENTER " +"`\\" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy msgid "" -":py:obj:`FedAvgAndroid `\\ " -"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" +":py:obj:`START_SIMULATION_LEAVE " +"`\\" msgstr "" -":py:obj:`FedAvgAndroid `\\ " -"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" +":py:obj:`START_SIMULATION_LEAVE " +"`\\" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy msgid "" -":py:obj:`FedAvgM `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +":py:obj:`RUN_SUPEREXEC_ENTER " +"`\\" msgstr "" -":py:obj:`FedAvgM `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" - -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedavgm.FedAvgM:1 of -msgid "Federated Averaging with Momentum strategy." -msgstr "联邦平均动量策略。" +":py:obj:`RUN_SUPERLINK_ENTER " +"`\\" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy msgid "" -":py:obj:`FedMedian `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +":py:obj:`RUN_SUPEREXEC_LEAVE " +"`\\" msgstr "" -":py:obj:`FedMedian `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +":py:obj:`RUN_SUPERLINK_LEAVE " +"`\\" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedmedian.FedMedian:1 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy -msgid "Configurable FedMedian strategy implementation." -msgstr "可配置的 FedAvg 策略实施。" +msgid "" +":py:obj:`CLI_FLOWER_SIMULATION_ENTER " +"`\\" +msgstr "" +":py:obj:`START_SIMULATION_ENTER " +"`\\" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy msgid "" -":py:obj:`FedOpt `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +":py:obj:`CLI_FLOWER_SIMULATION_LEAVE " +"`\\" msgstr "" -":py:obj:`FedOpt `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +":py:obj:`START_SIMULATION_LEAVE " +"`\\" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedopt.FedOpt:1 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy -msgid "Federated Optim strategy." -msgstr "联邦优化策略。" +msgid "" +":py:obj:`PYTHON_API_RUN_SIMULATION_ENTER " +"`\\" +msgstr "" +":py:obj:`START_SIMULATION_ENTER " +"`\\" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy msgid "" -":py:obj:`FedProx `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +":py:obj:`PYTHON_API_RUN_SIMULATION_LEAVE " +"`\\" msgstr "" -":py:obj:`FedProx `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" - -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedprox.FedProx:1 of -msgid "Federated Optimization strategy." -msgstr "联邦优化策略。" +":py:obj:`START_SIMULATION_LEAVE " +"`\\" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy msgid "" -":py:obj:`FedTrimmedAvg `\\ " -"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" +":py:obj:`RUN_SUPERLINK_ENTER " +"`\\" msgstr "" -":py:obj:`FedTrimmedAvg `\\ " -"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" - -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 of -msgid "Federated Averaging with Trimmed Mean [Dong Yin, et al., 2021]." -msgstr "带修剪均值的联邦平均法[Dong Yin 等,2021]。" +":py:obj:`RUN_SUPERLINK_ENTER " +"`\\" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy msgid "" -":py:obj:`FedXgbBagging `\\ " -"\\(\\[evaluate\\_function\\]\\)" +":py:obj:`RUN_SUPERLINK_LEAVE " +"`\\" msgstr "" -":py:obj:`FedXgbBagging `\\ " -"\\(\\[evaluate\\_function\\]\\)" +":py:obj:`RUN_SUPERLINK_LEAVE " +"`\\" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy -msgid "Configurable FedXgbBagging strategy implementation." -msgstr "可配置的 FedXgbNAvg 策略实施。" +msgid "" +":py:obj:`RUN_SUPERNODE_ENTER " +"`\\" +msgstr "" +":py:obj:`RUN_SUPERLINK_ENTER " +"`\\" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy msgid "" -":py:obj:`FedXgbCyclic `\\ " -"\\(\\*\\*kwargs\\)" +":py:obj:`RUN_SUPERNODE_LEAVE " +"`\\" msgstr "" -":py:obj:`FedXgbCyclic `\\ " -"\\(\\*\\*kwargs\\)" +":py:obj:`RUN_SUPERLINK_LEAVE " +"`\\" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy -msgid "Configurable FedXgbCyclic strategy implementation." -msgstr "可配置的 FedAvg 策略实施。" +msgid "" +":py:obj:`RUN_SERVER_APP_ENTER " +"`\\" +msgstr "" +":py:obj:`RUN_SERVER_APP_ENTER " +"`\\" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy msgid "" -":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " -"\\*\\*kwargs\\)" +":py:obj:`RUN_SERVER_APP_LEAVE " +"`\\" msgstr "" -":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " -"\\*\\*kwargs\\)" - -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 of -msgid "Configurable FedXgbNnAvg strategy implementation." -msgstr "可配置的 FedXgbNAvg 策略实施。" +":py:obj:`RUN_SERVER_APP_LEAVE " +"`\\" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy msgid "" -":py:obj:`FedYogi `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +":py:obj:`RUN_CLIENT_APP_ENTER " +"`\\" msgstr "" -":py:obj:`FedYogi `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" - -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedyogi.FedYogi:1 of -msgid "FedYogi [Reddi et al., 2020] strategy." -msgstr "FedYogi [Reddi 等人,2020] 策略。" +":py:obj:`RUN_CLIENT_APP_ENTER " +"`\\" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy msgid "" -":py:obj:`FaultTolerantFedAvg " -"`\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +":py:obj:`RUN_CLIENT_APP_LEAVE " +"`\\" msgstr "" -":py:obj:`FaultTolerantFedAvg " -"`\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" - -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 of -msgid "Configurable fault-tolerant FedAvg strategy implementation." -msgstr "可配置的容错 FedAvg 策略实施。" +":py:obj:`RUN_CLIENT_APP_LEAVE " +"`\\" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.capitalize:3 of #, fuzzy msgid "" -":py:obj:`Krum `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" -msgstr "" -":py:obj:`Krum `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +"More specifically, make the first character have upper case and the rest " +"lower case." +msgstr "更具体地说,让第一个字符大写,其余字符小写。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.krum.Krum:1 of +#: flwr.common.EventType.center:3 flwr.common.EventType.ljust:3 +#: flwr.common.EventType.rjust:3 of #, fuzzy -msgid "Krum [Blanchard et al., 2017] strategy." -msgstr "FedYogi [Reddi 等人,2020] 策略。" +msgid "Padding is done using the specified fill character (default is a space)." +msgstr "使用指定的填充字符(默认为空格)进行填充。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.count:1 of #, fuzzy msgid "" -":py:obj:`QFedAvg `\\ \\(\\*\\[\\, " -"q\\_param\\, qffl\\_learning\\_rate\\, ...\\]\\)" -msgstr "" -":py:obj:`QFedAvg `\\ \\(\\*\\[\\, " -"q\\_param\\, qffl\\_learning\\_rate\\, ...\\]\\)" +"Return the number of non-overlapping occurrences of substring sub in " +"string S[start:end]. Optional arguments start and end are interpreted as" +" in slice notation." +msgstr "返回子串 sub 在字符串 S[start:end] 中非重叠出现的次数。 可选参数 start 和 end 按切分符号解释。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.qfedavg.QFedAvg:1 of -msgid "Configurable QFedAvg strategy implementation." -msgstr "可配置的 QFedAvg 策略实施。" +#: flwr.common.EventType.encode:3 of +#, fuzzy +msgid "encoding" +msgstr "编码" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.encode:4 of #, fuzzy -msgid ":py:obj:`Strategy `\\ \\(\\)" -msgstr "server.strategy.Strategy" +msgid "The encoding in which to encode the string." +msgstr "字符串的编码。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.strategy.Strategy:1 of -msgid "Abstract base class for server strategy implementations." -msgstr "服务器策略实现的抽象基类。" +#: flwr.common.EventType.encode:9 of +#, fuzzy +msgid "errors" +msgstr "错误" -#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:2 +#: flwr.common.EventType.encode:6 of #, fuzzy -msgid "Bulyan" -msgstr "Bulyan" +msgid "" +"The error handling scheme to use for encoding errors. The default is " +"'strict' meaning that encoding errors raise a UnicodeEncodeError. Other " +"possible values are 'ignore', 'replace' and 'xmlcharrefreplace' as well " +"as any other name registered with codecs.register_error that can handle " +"UnicodeEncodeErrors." +msgstr "" +"编码错误的错误处理方案。默认值为 \"strict\",即编码错误会引发 UnicodeEncodeError。 其他可能的值包括 " +"\"ignore\"、\"replace \"和 \"xmlcharrefreplace\",以及通过 codecs.register_error" +" 注册的、可处理 UnicodeEncodeErrror 的其他名称。" -#: flwr.server.strategy.bulyan.Bulyan:1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 -#: flwr.server.strategy.fedavgm.FedAvgM:1 -#: flwr.server.strategy.fedmedian.FedMedian:1 -#: flwr.server.strategy.fedopt.FedOpt:1 flwr.server.strategy.fedprox.FedProx:1 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 -#: flwr.server.strategy.krum.Krum:1 flwr.server.strategy.qfedavg.QFedAvg:1 of +#: flwr.common.EventType.endswith:1 of #, fuzzy -msgid "Bases: :py:class:`~flwr.server.strategy.fedavg.FedAvg`" -msgstr "server.strategy.DPFedAvgFixed" +msgid "" +"Return True if S ends with the specified suffix, False otherwise. With " +"optional start, test S beginning at that position. With optional end, " +"stop comparing S at that position. suffix can also be a tuple of strings " +"to try." +msgstr "" +"如果 S 以指定后缀结束,则返回 True,否则返回 False。如果起始位置可选,则从该位置开始测试 S。如果使用可选的 " +"end,则在该位置停止比较 S。后缀也可以是要尝试的字符串元组。" -#: flwr.server.strategy.bulyan.Bulyan:3 of -msgid "Implementation based on https://arxiv.org/abs/1802.07927." -msgstr "实施基于 https://arxiv.org/abs/1802.07927。" +#: flwr.common.EventType.expandtabs:3 of +#, fuzzy +msgid "If tabsize is not given, a tab size of 8 characters is assumed." +msgstr "如果未给出制表符大小,则假定制表符大小为 8 个字符。" -#: flwr.server.strategy.bulyan.Bulyan:5 -#: flwr.server.strategy.fedadagrad.FedAdagrad:5 -#: flwr.server.strategy.fedadam.FedAdam:5 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:5 -#: flwr.server.strategy.fedavgm.FedAvgM:5 flwr.server.strategy.fedopt.FedOpt:5 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:5 -#: flwr.server.strategy.fedyogi.FedYogi:5 flwr.server.strategy.krum.Krum:5 of -msgid "Fraction of clients used during training. Defaults to 1.0." -msgstr "训练期间使用客户的比例。默认为 1.0。" +#: flwr.common.EventType.find:1 flwr.common.EventType.index:1 of +#, fuzzy +msgid "" +"Return the lowest index in S where substring sub is found, such that sub " +"is contained within S[start:end]. Optional arguments start and end are " +"interpreted as in slice notation." +msgstr "返回在 S 中找到子串 sub 的最低索引,即 sub 包含在 S[start:end] 中。 可选参数 start 和 end 按切分符号解释。" -#: flwr.server.strategy.bulyan.Bulyan:7 -#: flwr.server.strategy.fedadagrad.FedAdagrad:7 -#: flwr.server.strategy.fedadam.FedAdam:7 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:7 -#: flwr.server.strategy.fedavgm.FedAvgM:7 flwr.server.strategy.fedopt.FedOpt:7 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:7 -#: flwr.server.strategy.fedyogi.FedYogi:7 flwr.server.strategy.krum.Krum:7 of -msgid "Fraction of clients used during validation. Defaults to 1.0." -msgstr "验证过程中使用的客户端比例。默认为 1.0。" +#: flwr.common.EventType.find:5 flwr.common.EventType.rfind:5 of +#, fuzzy +msgid "Return -1 on failure." +msgstr "失败时返回-1。" -#: flwr.server.strategy.bulyan.Bulyan:9 -#: flwr.server.strategy.fedadagrad.FedAdagrad:9 -#: flwr.server.strategy.fedadam.FedAdam:9 flwr.server.strategy.fedavg.FedAvg:13 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:9 -#: flwr.server.strategy.fedavgm.FedAvgM:9 flwr.server.strategy.fedopt.FedOpt:9 -#: flwr.server.strategy.fedprox.FedProx:45 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:9 -#: flwr.server.strategy.fedyogi.FedYogi:9 flwr.server.strategy.krum.Krum:9 of -msgid "Minimum number of clients used during training. Defaults to 2." -msgstr "训练期间使用的最少客户数。默认为 2。" +#: flwr.common.EventType.format:1 of +#, fuzzy +msgid "" +"Return a formatted version of S, using substitutions from args and " +"kwargs. The substitutions are identified by braces ('{' and '}')." +msgstr "使用来自 args 和 kwargs 的替换,返回 S 的格式化版本。替换用大括号('{'和'}')标识。" -#: flwr.server.strategy.bulyan.Bulyan:11 -#: flwr.server.strategy.fedadagrad.FedAdagrad:11 -#: flwr.server.strategy.fedadam.FedAdam:11 -#: flwr.server.strategy.fedavg.FedAvg:15 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:11 -#: flwr.server.strategy.fedavgm.FedAvgM:11 -#: flwr.server.strategy.fedopt.FedOpt:11 -#: flwr.server.strategy.fedprox.FedProx:47 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:11 -#: flwr.server.strategy.fedyogi.FedYogi:11 flwr.server.strategy.krum.Krum:11 of -msgid "Minimum number of clients used during validation. Defaults to 2." -msgstr "验证过程中使用的最少客户端数量。默认为 2。" +#: flwr.common.EventType.format_map:1 of +#, fuzzy +msgid "" +"Return a formatted version of S, using substitutions from mapping. The " +"substitutions are identified by braces ('{' and '}')." +msgstr "使用映射中的替换,返回 S 的格式化版本。替换用大括号('{'和'}')标识。" -#: flwr.server.strategy.bulyan.Bulyan:13 -#: flwr.server.strategy.fedadagrad.FedAdagrad:13 -#: flwr.server.strategy.fedadam.FedAdam:13 -#: flwr.server.strategy.fedavg.FedAvg:17 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:13 -#: flwr.server.strategy.fedavgm.FedAvgM:13 -#: flwr.server.strategy.fedopt.FedOpt:13 -#: flwr.server.strategy.fedprox.FedProx:49 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:13 -#: flwr.server.strategy.fedyogi.FedYogi:13 flwr.server.strategy.krum.Krum:13 of -msgid "Minimum number of total clients in the system. Defaults to 2." -msgstr "系统中客户总数的最小值。默认为 2。" +#: flwr.common.EventType.index:5 flwr.common.EventType.rindex:5 of +#, fuzzy +msgid "Raises ValueError when the substring is not found." +msgstr "如果未找到子串,则引发 ValueError。" -#: flwr.server.strategy.bulyan.Bulyan:15 flwr.server.strategy.krum.Krum:15 of -msgid "Number of malicious clients in the system. Defaults to 0." -msgstr "系统中恶意客户端的数量。默认为 0。" +#: flwr.common.EventType.isalnum:3 of +#, fuzzy +msgid "" +"A string is alpha-numeric if all characters in the string are alpha-" +"numeric and there is at least one character in the string." +msgstr "如果字符串中的所有字符都是字母数字,且字符串中至少有一个字符,则该字符串为字母数字字符串。" -#: flwr.server.strategy.bulyan.Bulyan:17 -#: flwr.server.strategy.fedadagrad.FedAdagrad:15 -#: flwr.server.strategy.fedadam.FedAdam:15 -#: flwr.server.strategy.fedavg.FedAvg:19 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:15 -#: flwr.server.strategy.fedavgm.FedAvgM:15 -#: flwr.server.strategy.fedopt.FedOpt:15 -#: flwr.server.strategy.fedprox.FedProx:51 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:15 -#: flwr.server.strategy.fedyogi.FedYogi:17 -#: flwr.server.strategy.fedyogi.FedYogi:18 -#: flwr.server.strategy.fedyogi.FedYogi:19 flwr.server.strategy.krum.Krum:20 of -msgid "Optional function used for validation. Defaults to None." -msgstr "用于验证的可选函数。默认为 \"无\"。" +#: flwr.common.EventType.isalpha:3 of +#, fuzzy +msgid "" +"A string is alphabetic if all characters in the string are alphabetic and" +" there is at least one character in the string." +msgstr "如果字符串中的所有字符都是字母,并且字符串中至少有一个字符,那么该字符串就是字母字符串。" -#: flwr.server.strategy.bulyan.Bulyan:19 -#: flwr.server.strategy.fedadagrad.FedAdagrad:17 -#: flwr.server.strategy.fedadam.FedAdam:17 -#: flwr.server.strategy.fedavg.FedAvg:21 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:17 -#: flwr.server.strategy.fedavgm.FedAvgM:17 -#: flwr.server.strategy.fedopt.FedOpt:17 -#: flwr.server.strategy.fedprox.FedProx:53 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:17 -#: flwr.server.strategy.fedyogi.FedYogi:20 flwr.server.strategy.krum.Krum:22 of -msgid "Function used to configure training. Defaults to None." -msgstr "用于配置训练的功能。默认为 \"无\"。" +#: flwr.common.EventType.isascii:3 of +#, fuzzy +msgid "" +"ASCII characters have code points in the range U+0000-U+007F. Empty " +"string is ASCII too." +msgstr "ASCII 字符的码位范围为 U+0000-U+007F。空字符串也是 ASCII 字符。" -#: flwr.server.strategy.bulyan.Bulyan:21 -#: flwr.server.strategy.fedadagrad.FedAdagrad:19 -#: flwr.server.strategy.fedadam.FedAdam:19 -#: flwr.server.strategy.fedavg.FedAvg:23 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:19 -#: flwr.server.strategy.fedavgm.FedAvgM:19 -#: flwr.server.strategy.fedopt.FedOpt:19 -#: flwr.server.strategy.fedprox.FedProx:55 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:19 -#: flwr.server.strategy.fedyogi.FedYogi:22 flwr.server.strategy.krum.Krum:24 of -msgid "Function used to configure validation. Defaults to None." -msgstr "用于配置验证的函数。默认为 \"无\"。" +#: flwr.common.EventType.isdecimal:3 of +#, fuzzy +msgid "" +"A string is a decimal string if all characters in the string are decimal " +"and there is at least one character in the string." +msgstr "如果字符串中的所有字符都是十进制,并且字符串中至少有一个字符是十进制,那么该字符串就是十进制字符串。" -#: flwr.server.strategy.bulyan.Bulyan:23 -#: flwr.server.strategy.fedadagrad.FedAdagrad:25 -#: flwr.server.strategy.fedadam.FedAdam:21 -#: flwr.server.strategy.fedavg.FedAvg:25 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:21 -#: flwr.server.strategy.fedavgm.FedAvgM:21 -#: flwr.server.strategy.fedopt.FedOpt:21 -#: flwr.server.strategy.fedprox.FedProx:57 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:21 -#: flwr.server.strategy.fedyogi.FedYogi:24 flwr.server.strategy.krum.Krum:26 of -msgid "Whether or not accept rounds containing failures. Defaults to True." -msgstr "是否接受包含失败的轮。默认为 True。" +#: flwr.common.EventType.isdigit:3 of +#, fuzzy +msgid "" +"A string is a digit string if all characters in the string are digits and" +" there is at least one character in the string." +msgstr "如果字符串中的所有字符都是数字,并且字符串中至少有一个字符,那么该字符串就是数字字符串。" -#: flwr.server.strategy.bulyan.Bulyan:25 -#: flwr.server.strategy.fedadagrad.FedAdagrad:27 -#: flwr.server.strategy.fedadam.FedAdam:23 -#: flwr.server.strategy.fedavg.FedAvg:27 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:24 -#: flwr.server.strategy.fedavgm.FedAvgM:23 -#: flwr.server.strategy.fedopt.FedOpt:23 -#: flwr.server.strategy.fedprox.FedProx:59 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:23 -#: flwr.server.strategy.fedyogi.FedYogi:26 flwr.server.strategy.krum.Krum:28 of -msgid "Initial global model parameters." -msgstr "初始全局模型参数。" +#: flwr.common.EventType.isidentifier:3 of +#, fuzzy +msgid "" +"Call keyword.iskeyword(s) to test whether string s is a reserved " +"identifier, such as \"def\" or \"class\"." +msgstr "调用 keyword.iskeyword(s) 测试字符串 s 是否为保留标识符,如 \"def \"或 \"class\"。" -#: flwr.server.strategy.bulyan.Bulyan:27 of +#: flwr.common.EventType.islower:3 of +#, fuzzy msgid "" -"Byzantine resilient aggregation rule that is used as the first step of " -"the Bulyan (e.g., Krum)" -msgstr "Byzantine弹性聚合规则,用作 Bulyan 的第一步(如 Krum)" +"A string is lowercase if all cased characters in the string are lowercase" +" and there is at least one cased character in the string." +msgstr "如果字符串中的所有大小写字符都是小写,且字符串中至少有一个大小写字符,则该字符串为小写字符串。" -#: flwr.server.strategy.bulyan.Bulyan:29 of -msgid "arguments to the first_aggregation rule" -msgstr "第一聚类规则的参数" +#: flwr.common.EventType.isnumeric:3 of +#, fuzzy +msgid "" +"A string is numeric if all characters in the string are numeric and there" +" is at least one character in the string." +msgstr "如果字符串中的所有字符都是数字,且字符串中至少有一个字符,则该字符串为数字字符串。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.EventType.isprintable:3 of #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +"A string is printable if all of its characters are considered printable " +"in repr() or if it is empty." +msgstr "如果字符串的所有字符在 repr() 中都被认为是可打印的,或者字符串为空,那么该字符串就是可打印的。" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "Aggregate evaluation losses using weighted average." -msgstr "采用加权平均法计算评估损失总额。" +#: flwr.common.EventType.isspace:3 of +#, fuzzy +msgid "" +"A string is whitespace if all characters in the string are whitespace and" +" there is at least one character in the string." +msgstr "如果字符串中的所有字符都是空格,且字符串中至少有一个字符,则该字符串为空格。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.EventType.istitle:3 of #, fuzzy msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" -msgstr "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"In a title-cased string, upper- and title-case characters may only follow" +" uncased characters and lowercase characters only cased ones." +msgstr "在标题大小写字符串中,大写和标题大小写字符只能跟在无大小写字符之后,小写字符只能跟在有大小写字符之后。" -#: flwr.server.strategy.bulyan.Bulyan.aggregate_fit:1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "Aggregate fit results using Bulyan." -msgstr "使用 Bulyan 技术汇总拟合结果。" +#: flwr.common.EventType.isupper:3 of +#, fuzzy +msgid "" +"A string is uppercase if all cased characters in the string are uppercase" +" and there is at least one cased character in the string." +msgstr "如果字符串中所有带大小写的字符都是大写,并且字符串中至少有一个带大小写的字符,则该字符串为大写字符串。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.EventType.join:3 of #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +"The string whose method is called is inserted in between each given " +"string. The result is returned as a new string." +msgstr "方法被调用的字符串会被插入每个给定的字符串之间。结果将以新字符串的形式返回。" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_evaluate:1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.configure_evaluate:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_evaluate:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_evaluate:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.configure_evaluate:1 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:1 of -msgid "Configure the next round of evaluation." -msgstr "配置下一轮评估。" +#: flwr.common.EventType.join:6 of +#, fuzzy +msgid "Example: '.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'" +msgstr "示例:'.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.EventType.lstrip:3 flwr.common.EventType.rstrip:3 +#: flwr.common.EventType.strip:3 of +#, fuzzy +msgid "If chars is given and not None, remove characters in chars instead." +msgstr "如果给定的是 chars 而不是 None,则删除 chars 中的字符。" + +#: flwr.common.EventType.maketrans:3 of #, fuzzy msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"If there is only one argument, it must be a dictionary mapping Unicode " +"ordinals (integers) or characters to Unicode ordinals, strings or None. " +"Character keys will be then converted to ordinals. If there are two " +"arguments, they must be strings of equal length, and in the resulting " +"dictionary, each character in x will be mapped to the character at the " +"same position in y. If there is a third argument, it must be a string, " +"whose characters will be mapped to None in the result." msgstr "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"如果只有一个参数,则必须是一个将 Unicode 序号(整数)或字符映射到 Unicode 序号、字符串或 None " +"的字典。字符键将被转换为序号。如果有两个参数,它们必须是长度相等的字符串,在生成的字典中,x 中的每个字符将被映射到 y 中相同位置的字符。" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_fit:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_fit:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_fit:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_fit:1 -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.configure_fit:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.configure_fit:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_fit:1 -#: flwr.server.strategy.fedprox.FedProx.configure_fit:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_fit:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.configure_fit:1 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.configure_fit:1 of -msgid "Configure the next round of training." -msgstr "配置下一轮训练。" +#: flwr.common.EventType.partition:3 of +#, fuzzy +msgid "" +"This will search for the separator in the string. If the separator is " +"found, returns a 3-tuple containing the part before the separator, the " +"separator itself, and the part after it." +msgstr "它会在字符串中搜索分隔符。 如果找到分隔符,则返回一个包含分隔符前部分、分隔符本身和分隔符后部分的 3 元组。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.EventType.partition:7 of #, fuzzy msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" -msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"If the separator is not found, returns a 3-tuple containing the original " +"string and two empty strings." +msgstr "如果找不到分隔符,则返回一个包含原始字符串和两个空字符串的 3 元组。" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.evaluate:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.evaluate:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.evaluate:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.evaluate:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "Evaluate model parameters using an evaluation function." -msgstr "使用评估函数评估模型参数。" +#: flwr.common.EventType.removeprefix:3 of +#, fuzzy +msgid "" +"If the string starts with the prefix string, return string[len(prefix):]." +" Otherwise, return a copy of the original string." +msgstr "如果字符串以前缀字符串开始,则返回 string[len(prefix):]。否则,返回原始字符串的副本。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.EventType.removesuffix:3 of #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"If the string ends with the suffix string and that suffix is not empty, " +"return string[:-len(suffix)]. Otherwise, return a copy of the original " +"string." +msgstr "如果字符串以后缀字符串结尾,且后缀不为空,则返回 string[:-len(suffix)]。否则,返回原始字符串的副本。" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.initialize_parameters:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.initialize_parameters:1 -#: flwr.server.strategy.fedavgm.FedAvgM.initialize_parameters:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "Initialize global model parameters." -msgstr "初始化全局模型参数。" +#: flwr.common.EventType.replace:5 of +#, fuzzy +msgid "count" +msgstr "背景" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.EventType.replace:4 of #, fuzzy msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"Maximum number of occurrences to replace. -1 (the default value) means " +"replace all occurrences." +msgstr "要替换的最大出现次数。-1(默认值)表示替换所有出现次数。" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.num_evaluation_clients:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_evaluation_clients:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.num_evaluation_clients:1 of -msgid "Use a fraction of available clients for evaluation." -msgstr "使用部分可用客户进行评估。" +#: flwr.common.EventType.replace:7 of +#, fuzzy +msgid "" +"If the optional argument count is given, only the first count occurrences" +" are replaced." +msgstr "如果给出可选参数 count,则只替换第一个计数出现的次数。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.EventType.rfind:1 flwr.common.EventType.rindex:1 of #, fuzzy msgid "" -":py:obj:`num_fit_clients `\\" -" \\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_fit_clients `\\" -" \\(num\\_available\\_clients\\)" +"Return the highest index in S where substring sub is found, such that sub" +" is contained within S[start:end]. Optional arguments start and end are " +"interpreted as in slice notation." +msgstr "返回在 S 中找到子串 sub 且 sub 包含在 S[start:end] 中的最高索引。 可选参数 start 和 end 按切分符号解释。" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.num_fit_clients:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_fit_clients:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.num_fit_clients:1 of -msgid "Return the sample size and the required number of available clients." -msgstr "返回样本大小和所需的可用客户数量。" +#: flwr.common.EventType.rpartition:3 of +#, fuzzy +msgid "" +"This will search for the separator in the string, starting at the end. If" +" the separator is found, returns a 3-tuple containing the part before the" +" separator, the separator itself, and the part after it." +msgstr "它会从字符串的末尾开始搜索分隔符。如果找到分隔符,则返回一个包含分隔符前部分、分隔符本身和分隔符后部分的 3 元组。" -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:2 -msgid "DPFedAvgAdaptive" -msgstr "DPFedAvgAdaptive" +#: flwr.common.EventType.rpartition:7 of +#, fuzzy +msgid "" +"If the separator is not found, returns a 3-tuple containing two empty " +"strings and the original string." +msgstr "如果找不到分隔符,则返回一个包含两个空字符串和原始字符串的 3 元组。" -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of +#: flwr.common.EventType.rsplit:7 flwr.common.EventType.split:7 of #, fuzzy -msgid "Bases: :py:class:`~flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed`" -msgstr "Bases: :py:class:`~flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed`" +msgid "sep" +msgstr "sep" -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:3 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:3 of +#: flwr.common.EventType.rsplit:4 flwr.common.EventType.split:4 of #, fuzzy -msgid "This class is deprecated and will be removed in a future release." -msgstr "该类已被弃用,将在以后的版本中删除。" +msgid "The separator used to split the string." +msgstr "用于分割字符串的分隔符。" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.EventType.rsplit:6 flwr.common.EventType.split:6 of #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +"When set to None (the default value), will split on any whitespace " +"character (including \\\\n \\\\r \\\\t \\\\f and spaces) and will discard" +" empty strings from the result." +msgstr "当设置为 \"无\"(默认值)时,将对任何空白字符(包括 \\n \\r \\t \\f 和空格)进行分割,并从结果中剔除空字符串。" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -msgid "Aggregate evaluation losses using the given strategy." -msgstr "使用给定的策略汇总评估损失。" +#: flwr.common.EventType.rsplit:11 flwr.common.EventType.split:11 of +#, fuzzy +msgid "maxsplit" +msgstr "最大分割" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.EventType.rsplit:10 flwr.common.EventType.split:10 of #, fuzzy msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" -msgstr "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +"Maximum number of splits (starting from the left). -1 (the default value)" +" means no limit." +msgstr "最大分割次数(从左边开始)。-1(默认值)表示没有限制。" -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.aggregate_fit:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -msgid "Aggregate training results as in DPFedAvgFixed and update clip norms." -msgstr "汇总 DPFedAvgFixed 中的训练结果并更新片段标准。" +#: flwr.common.EventType.rsplit:13 of +#, fuzzy +msgid "Splitting starts at the end of the string and works to the front." +msgstr "从琴弦末端开始分弦,一直到琴弦前端。" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.EventType.split:13 of #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" - -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:1 of -msgid "Configure the next round of evaluation using the specified strategy." -msgstr "使用指定策略配置下一轮评估。" +"Note, str.split() is mainly useful for data that has been intentionally " +"delimited. With natural text that includes punctuation, consider using " +"the regular expression module." +msgstr "注意,str.split() 主要适用于有意分隔的数据。 对于包含标点符号的自然文本,可以考虑使用正则表达式模块。" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.EventType.splitlines:3 of #, fuzzy msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"Line breaks are not included in the resulting list unless keepends is " +"given and true." +msgstr "除非指定 keepends 为 true,否则换行符不会包含在生成的列表中。" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.EventType.startswith:1 of #, fuzzy msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"Return True if S starts with the specified prefix, False otherwise. With " +"optional start, test S beginning at that position. With optional end, " +"stop comparing S at that position. prefix can also be a tuple of strings " +"to try." msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" - -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.evaluate:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.evaluate:1 of -msgid "Evaluate model parameters using an evaluation function from the strategy." -msgstr "使用策略中的评估函数评估模型参数。" +"如果 S 以指定的前缀开始,则返回 True,否则返回 False。如果选择 start,则从该位置开始测试 S。如果使用可选的 " +"end,则在该位置停止比较 S。" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.EventType.title:3 of #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" - -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.initialize_parameters:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.initialize_parameters:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.initialize_parameters:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.initialize_parameters:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.initialize_parameters:1 of -msgid "Initialize global model parameters using given strategy." -msgstr "使用给定的策略初始化全局模型参数。" +"More specifically, words start with uppercased characters and all " +"remaining cased characters have lower case." +msgstr "更具体地说,单词以大写字母开头,其余所有大小写字符均为小写。" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:3 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:6 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:3 -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:3 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:3 -#: flwr.server.strategy.strategy.Strategy.configure_fit:3 -#: flwr.server.strategy.strategy.Strategy.evaluate:6 of -msgid "The current round of federated learning." -msgstr "本轮联邦学习。" +#: flwr.common.EventType.translate:5 of +#, fuzzy +msgid "table" +msgstr "数据库" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:7 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:10 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:7 -#: flwr.server.strategy.strategy.Strategy.configure_fit:7 -#: flwr.server.strategy.strategy.Strategy.initialize_parameters:3 of -msgid "The client manager which holds all currently connected clients." -msgstr "客户端管理器,用于管理当前连接的所有客户端。" +#: flwr.common.EventType.translate:4 of +#, fuzzy +msgid "" +"Translation table, which must be a mapping of Unicode ordinals to Unicode" +" ordinals, strings, or None." +msgstr "翻译表,必须是 Unicode 序号到 Unicode 序号、字符串或无的映射。" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:10 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:10 of +#: flwr.common.EventType.translate:7 of +#, fuzzy msgid "" -"**evaluate_configuration** -- A list of tuples. Each tuple in the list " -"identifies a `ClientProxy` and the `EvaluateIns` for this particular " -"`ClientProxy`. If a particular `ClientProxy` is not included in this " -"list, it means that this `ClientProxy` will not participate in the next " -"round of federated evaluation." +"The table must implement lookup/indexing via __getitem__, for instance a " +"dictionary or list. If this operation raises LookupError, the character " +"is left untouched. Characters mapped to None are deleted." msgstr "" -"**evaluate_configuration** -- " -"一个元组列表。列表中的每个元组都标识了一个`ClientProxy`和该特定`ClientProxy`的`EvaluateIns`。如果某个特定的" -" `ClientProxy` 未包含在此列表中,则表示该 `ClientProxy` 将不参与下一轮联合评估。" +"表必须通过 __getitem__ 实现查找/索引,例如字典或列表。 如果该操作引发 LookupError,该字符将保持不变。 映射为 None" +" 的字符将被删除。" -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:2 -msgid "DPFedAvgFixed" -msgstr "DPFedAvgFixed" +#: flwr.common.EventType.zfill:3 of +#, fuzzy +msgid "The string is never truncated." +msgstr "字符串不会被截断。" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 -#: flwr.server.strategy.fedavg.FedAvg:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of +#: ../../source/ref-api/flwr.common.FitIns.rst:2 #, fuzzy -msgid "Bases: :py:class:`~flwr.server.strategy.strategy.Strategy`" -msgstr "Bases: :py:class:`~flwr.server.strategy.strategy.Strategy`" +msgid "FitIns" +msgstr "FitIns" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 #, fuzzy -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +msgid ":py:obj:`parameters `\\" +msgstr ":py:obj:`parameters `\\" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 #, fuzzy -msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" -msgstr "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +msgid ":py:obj:`config `\\" +msgstr ":py:obj:`config `\\" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_fit:1 of -msgid "Aggregate training results using unweighted aggregation." -msgstr "使用非加权汇总法汇总训练结果。" +#: ../../source/ref-api/flwr.common.FitRes.rst:2 +#, fuzzy +msgid "FitRes" +msgstr "FitRes" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 #, fuzzy -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +msgid ":py:obj:`status `\\" +msgstr ":py:obj:`status `\\" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 #, fuzzy -msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +msgid ":py:obj:`parameters `\\" +msgstr ":py:obj:`parameters `\\" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:1 of -msgid "" -"Configure the next round of training incorporating Differential Privacy " -"(DP)." -msgstr "配置包含差分隐私 (DP) 的下一轮训练。" +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +#, fuzzy +msgid ":py:obj:`num_examples `\\" +msgstr ":py:obj:`num_examples `\\" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 #, fuzzy -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" -msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +msgid ":py:obj:`metrics `\\" +msgstr ":py:obj:`metrics `\\" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:2 #, fuzzy -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" - -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:3 of -msgid "" -"Configuration of the next training round includes information related to " -"DP, such as clip norm and noise stddev." -msgstr "下一轮训练的配置包括与 DP 相关的信息,如片段规范和噪声 stddev。" - -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:13 -#: flwr.server.strategy.strategy.Strategy.configure_fit:10 of -msgid "" -"**fit_configuration** -- A list of tuples. Each tuple in the list " -"identifies a `ClientProxy` and the `FitIns` for this particular " -"`ClientProxy`. If a particular `ClientProxy` is not included in this " -"list, it means that this `ClientProxy` will not participate in the next " -"round of federated learning." -msgstr "" -"**fit_configuration** -- " -"一个元组列表。列表中的每个元组都标识了一个`ClientProxy`和该特定`ClientProxy`的`FitIns'。如果某个特定的`ClientProxy`不在此列表中,则表示该`ClientProxy`将不参加下一轮联合学习。" +msgid "GetParametersIns" +msgstr "参数" -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:2 +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:28::1 #, fuzzy -msgid "DifferentialPrivacyClientSideAdaptiveClipping" -msgstr "DifferentialPrivacyClientSideAdaptiveClipping" +msgid ":py:obj:`config `\\" +msgstr ":py:obj:`config `\\" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:3 -#: of +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:2 #, fuzzy -msgid "Use `adaptiveclipping_mod` modifier at the client side." -msgstr "在客户端使用 \"adaptiveclipping_mod \"修改器。" +msgid "GetParametersRes" +msgstr "参数" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:5 -#: of +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 #, fuzzy -msgid "" -"In comparison to `DifferentialPrivacyServerSideAdaptiveClipping`, which " -"performs clipping on the server-side, " -"`DifferentialPrivacyClientSideAdaptiveClipping` expects clipping to " -"happen on the client-side, usually by using the built-in " -"`adaptiveclipping_mod`." -msgstr "" -"与在服务器端执行剪切的 `DifferentialPrivacyServerSideAdaptiveClipping` " -"相比,`DifferentialPrivacyClientSideAdaptiveClipping` 希望在客户端进行剪切,通常使用内置的 " -"`adaptiveclipping_mod`。" +msgid ":py:obj:`status `\\" +msgstr ":py:obj:`status `\\" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:10 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:3 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:10 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:3 -#: of +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 #, fuzzy -msgid "The strategy to which DP functionalities will be added by this wrapper." -msgstr "该包装器将添加 DP 功能的策略。" +msgid ":py:obj:`parameters `\\" +msgstr ":py:obj:`parameters `\\" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:12 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:5 -#: of +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:2 #, fuzzy -msgid "The noise multiplier for the Gaussian mechanism for model updates." -msgstr "用于模型更新的高斯机制的噪声乘数。" +msgid "GetPropertiesIns" +msgstr "GetPropertiesIns" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:14 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:7 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:17 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:10 -#: of +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:28::1 #, fuzzy -msgid "The number of clients that are sampled on each round." -msgstr "每轮取样的客户数。" +msgid ":py:obj:`config `\\" +msgstr ":py:obj:`config `\\" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:16 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:9 -#: of +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:2 #, fuzzy -msgid "" -"The initial value of clipping norm. Defaults to 0.1. Andrew et al. " -"recommends to set to 0.1." -msgstr "剪切规范的初始值。默认为 0.1。安德鲁等人建议设置为 0.1。" +msgid "GetPropertiesRes" +msgstr "GetPropertiesRes" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:19 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:12 -#: of +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 #, fuzzy -msgid "The desired quantile of updates which should be clipped. Defaults to 0.5." -msgstr "需要剪切的更新量化值。默认为 0.5。" +msgid ":py:obj:`status `\\" +msgstr ":py:obj:`status `\\" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:21 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:14 -#: of +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 #, fuzzy -msgid "" -"The learning rate for the clipping norm adaptation. Defaults to 0.2. " -"Andrew et al. recommends to set to 0.2." -msgstr "剪切规范适应的学习率。默认为 0.2。安德鲁等人建议设置为 0.2。" +msgid ":py:obj:`properties `\\" +msgstr ":py:obj:`properties `\\" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:24 -#: of +#: ../../source/ref-api/flwr.common.Message.rst:2 #, fuzzy -msgid "" -"The stddev of the noise added to the count of updates currently below the" -" estimate. Andrew et al. recommends to set to `expected_num_records/20`" -msgstr "添加到当前低于估计值的更新计数中的噪声的 stddev。安德鲁等人建议设置为 \"expected_num_records/20" +msgid "Message" +msgstr "服务器端" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:30 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:23 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:22 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:15 -#: of +#: flwr.common.Message.content:1::1 flwr.common.Message.metadata:1 +#: flwr.common.message.Message:3 of #, fuzzy -msgid "Create a strategy:" -msgstr "server.strategy" +msgid "A dataclass including information about the message to be executed." +msgstr "数据类型,包括要执行的信息的相关信息。" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:34 -#: of +#: flwr.common.message.Message:5 of #, fuzzy msgid "" -"Wrap the strategy with the " -"`DifferentialPrivacyClientSideAdaptiveClipping` wrapper:" -msgstr "用 \"DifferentialPrivacyClientSideAdaptiveClipping \"包装器对策略进行包装:" - -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:40 -#: of -#, fuzzy -msgid "On the client, add the `adaptiveclipping_mod` to the client-side mods:" -msgstr "在客户端,将 \"adaptiveclipping_mod \"添加到客户端模块中:" +"Holds records either sent by another entity (e.g. sent by the server-side" +" logic to a client, or vice-versa) or that will be sent to it." +msgstr "保存由其他实体发送的记录(如由服务器端逻辑发送到客户端,反之亦然)或将发送到该实体的记录。" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: flwr.common.message.Message:8 of #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +"A dataclass that captures information about an error that took place when" +" processing another message." +msgstr "数据类,用于捕捉处理其他报文时发生的错误信息。" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.Message.rst:35::1 #, fuzzy msgid "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +":py:obj:`create_error_reply `\\ " +"\\(error\\[\\, ttl\\]\\)" msgstr "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +":py:obj:`create_error_reply `\\ " +"\\(error\\, ttl\\)" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_fit:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_fit:1 -#: of +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.create_error_reply:1 of #, fuzzy -msgid "Aggregate training results and update clip norms." -msgstr "汇总 DPFedAvgFixed 中的训练结果并更新片段标准。" +msgid "Construct a reply message indicating an error happened." +msgstr "构建一条回复信息,说明发生了错误。" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.Message.rst:35::1 #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`create_reply `\\ " +"\\(content\\[\\, ttl\\]\\)" msgstr "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`create_reply `\\ \\(content\\," +" ttl\\)" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.create_reply:1 of #, fuzzy -msgid "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +msgid "Create a reply to this message with specified content and TTL." +msgstr "以指定的内容和 TTL 创建对该信息的回复。" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.Message.rst:35::1 #, fuzzy -msgid "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" -msgstr "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" +msgid ":py:obj:`has_content `\\ \\(\\)" +msgstr ":py:obj:`has_content `\\ \\(\\)" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.has_content:1 of #, fuzzy -msgid "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" +msgid "Return True if message has content, else False." +msgstr "如果信息有内容,则返回 True,否则返回 False。" -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:2 +#: ../../source/ref-api/flwr.common.Message.rst:35::1 #, fuzzy -msgid "DifferentialPrivacyClientSideFixedClipping" -msgstr "差分隐私" +msgid ":py:obj:`has_error `\\ \\(\\)" +msgstr ":py:obj:`has_error `\\ \\(\\)" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:3 -#: of +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.has_error:1 of #, fuzzy -msgid "Use `fixedclipping_mod` modifier at the client side." -msgstr "在客户端使用 `fixedclipping_mod` 修改器。" +msgid "Return True if message has an error, else False." +msgstr "如果信息有错误,则返回 True,否则返回 False。" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:5 -#: of +#: flwr.common.Message.content:1::1 of #, fuzzy -msgid "" -"In comparison to `DifferentialPrivacyServerSideFixedClipping`, which " -"performs clipping on the server-side, " -"`DifferentialPrivacyClientSideFixedClipping` expects clipping to happen " -"on the client-side, usually by using the built-in `fixedclipping_mod`." -msgstr "" -"与在服务器端执行剪切的 \"DifferentialPrivacyServerSideFixedClipping " -"\"相比,\"DifferentialPrivacyClientSideFixedClipping \"希望在客户端进行剪切,通常是使用内置的 " -"\"fixedclipping_mod\"。" +msgid ":py:obj:`content `\\" +msgstr ":py:obj:`content `\\" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:12 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:5 +#: flwr.common.Message.content:1 flwr.common.Message.content:1::1 #: of #, fuzzy -msgid "" -"The noise multiplier for the Gaussian mechanism for model updates. A " -"value of 1.0 or higher is recommended for strong privacy." -msgstr "模型更新高斯机制的噪声乘数。建议使用 1.0 或更高的值,以获得较强的隐私性。" +msgid "The content of this message." +msgstr "评估客户端的反应。" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:15 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:8 -#: of +#: flwr.common.Message.content:1::1 of #, fuzzy -msgid "The value of the clipping norm." -msgstr "削波法线的值。" +msgid ":py:obj:`error `\\" +msgstr ":py:obj:`error `\\" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:26 -#: of +#: flwr.common.Message.content:1::1 flwr.common.Message.error:1 of #, fuzzy -msgid "" -"Wrap the strategy with the `DifferentialPrivacyClientSideFixedClipping` " -"wrapper:" -msgstr "用 \"DifferentialPrivacyClientSideFixedClipping \"包装器包装策略:" +msgid "Error captured by this message." +msgstr "该信息捕捉到的错误。" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:32 -#: of +#: flwr.common.Message.content:1::1 of #, fuzzy -msgid "On the client, add the `fixedclipping_mod` to the client-side mods:" -msgstr "在客户端,将 \"fixedclipping_mod \"添加到客户端模块中:" +msgid ":py:obj:`metadata `\\" +msgstr ":py:obj:`metadata `\\" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: flwr.common.message.Message.create_error_reply:3 of #, fuzzy -msgid "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +msgid "The error that was encountered." +msgstr "遇到的错误。" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: flwr.common.message.Message.create_error_reply:5 +#: flwr.common.message.Message.create_reply:9 of #, fuzzy msgid "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +"Time-to-live for this message in seconds. If unset, it will be set based " +"on the remaining time for the received message before it expires. This " +"follows the equation: ttl = msg.meta.ttl - (reply.meta.created_at - " +"msg.meta.created_at)" msgstr "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" - -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_fit:1 -#: of -#, fuzzy -msgid "Add noise to the aggregated parameters." -msgstr "然后将汇总结果序列化:" +"该信息的有效时间(秒)。如果未设置,则将根据收到的信息过期前的剩余时间来设置。其计算公式为:ttl = msg.meta.ttl - " +"(reply.meta.created_at - msg.meta.created_at)" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: flwr.common.message.Message.create_error_reply:5 +#: flwr.common.message.Message.create_reply:9 of #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +"Time-to-live for this message in seconds. If unset, it will be set based " +"on the remaining time for the received message before it expires. This " +"follows the equation:" +msgstr "该信息的有效时间(秒)。如果未设置,则将根据接收到的信息过期前的剩余时间来设置。其计算公式如下" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: flwr.common.message.Message.create_error_reply:9 +#: flwr.common.message.Message.create_reply:13 of #, fuzzy -msgid "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +msgid "ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at)" +msgstr "ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at)" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: flwr.common.message.Message.create_reply:3 of #, fuzzy msgid "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" +"The method generates a new `Message` as a reply to this message. It " +"inherits 'run_id', 'src_node_id', 'dst_node_id', and 'message_type' from " +"this message and sets 'reply_to_message' to the ID of this message." msgstr "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" +"该方法会生成一条新的 \"信息\",作为对该信息的回复。该方法继承了该消息的 " +"\"run_id\"、\"src_node_id\"、\"dst_node_id \"和 \"message_type\",并将 " +"\"reply_to_message \"设置为该消息的 ID。" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: flwr.common.message.Message.create_reply:7 of #, fuzzy -msgid "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" +msgid "The content for the reply message." +msgstr "回复信息的内容。" -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:2 +#: flwr.common.message.Message.create_reply:16 of #, fuzzy -msgid "DifferentialPrivacyServerSideAdaptiveClipping" -msgstr "DifferentialPrivacyServerSideAdaptiveClipping" +msgid "A new `Message` instance representing the reply." +msgstr "代表回复的新的 `Message` 实例。" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:17 -#: of +#: ../../source/ref-api/flwr.common.MessageType.rst:2 #, fuzzy -msgid "" -"The standard deviation of the noise added to the count of updates below " -"the estimate. Andrew et al. recommends to set to " -"`expected_num_records/20`" -msgstr "添加到低于估计值的更新计数中的噪声标准偏差。安德鲁等人建议设置为 \"expected_num_records/20" +msgid "MessageType" +msgstr "返回类型" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:27 -#: of +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 #, fuzzy -msgid "" -"Wrap the strategy with the DifferentialPrivacyServerSideAdaptiveClipping " -"wrapper" -msgstr "用 DifferentialPrivacyServerSideAdaptiveClipping 封装器封装策略" +msgid ":py:obj:`EVALUATE `\\" +msgstr ":py:obj:`EVALUATE `\\" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 #, fuzzy -msgid "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +msgid ":py:obj:`QUERY `\\" +msgstr ":py:obj:`QUERY `\\" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 #, fuzzy -msgid "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" -msgstr "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +msgid ":py:obj:`TRAIN `\\" +msgstr ":py:obj:`TRAIN `\\" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:2 #, fuzzy -msgid "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +msgid "MessageTypeLegacy" +msgstr "MessageTypeLegacy" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 #, fuzzy -msgid "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +msgid ":py:obj:`GET_PARAMETERS `\\" +msgstr ":py:obj:`GET_PARAMETERS `\\" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 #, fuzzy -msgid "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" -msgstr "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" +msgid ":py:obj:`GET_PROPERTIES `\\" +msgstr ":py:obj:`GET_PROPERTIES `\\" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.run_id:1 flwr.common.message.Metadata:3 of #, fuzzy -msgid "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" +msgid "An identifier for the current run." +msgstr "当前运行的标识符。" -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:2 +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.message_id:1 flwr.common.message.Metadata:5 of #, fuzzy -msgid "DifferentialPrivacyServerSideFixedClipping" -msgstr "差分隐私" +msgid "An identifier for the current message." +msgstr "当前信息的标识符。" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:19 -#: of +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.src_node_id:1 flwr.common.message.Metadata:7 of #, fuzzy -msgid "" -"Wrap the strategy with the DifferentialPrivacyServerSideFixedClipping " -"wrapper" -msgstr "用 DifferentialPrivacyServerSideFixedClipping 封装器封装策略" +msgid "An identifier for the node sending this message." +msgstr "发送此信息的节点的标识符。" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.dst_node_id:1 flwr.common.message.Metadata:9 of #, fuzzy -msgid "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +msgid "An identifier for the node receiving this message." +msgstr "接收此信息的节点的标识符。" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.reply_to_message:1 flwr.common.message.Metadata:11 of +#, fuzzy +msgid "An identifier for the message this message replies to." +msgstr "该信息回复的信息的标识符。" + +#: flwr.common.message.Metadata:13 of #, fuzzy msgid "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" -msgstr "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +"An identifier for grouping messages. In some settings, this is used as " +"the FL round." +msgstr "用于分组报文的标识符。在某些设置中,它被用作 FL 轮。" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:1 -#: of +#: flwr.common.message.Metadata:16 of #, fuzzy -msgid "Compute the updates, clip, and pass them for aggregation." -msgstr "计算更新、剪辑并将其传递给聚合。" +msgid "Time-to-live for this message in seconds." +msgstr "该信息的有效时间。" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.message_type:1 flwr.common.message.Metadata:18 of #, fuzzy -msgid "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +msgid "A string that encodes the action to be executed on the receiving end." +msgstr "编码接收端要执行的操作的字符串。" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: flwr.common.Metadata.created_at:1::1 of #, fuzzy -msgid "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +msgid ":py:obj:`created_at `\\" +msgstr ":py:obj:`ttl `\\" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: flwr.common.Metadata.created_at:1 +#: flwr.common.Metadata.created_at:1::1 of #, fuzzy -msgid "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" -msgstr "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" +msgid "Unix timestamp when the message was created." +msgstr "创建信息时的 Unix 时间戳。" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: flwr.common.Metadata.created_at:1::1 of #, fuzzy -msgid "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" +msgid ":py:obj:`dst_node_id `\\" +msgstr ":py:obj:`dst_node_id `\\" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:3 -#: of +#: flwr.common.Metadata.created_at:1::1 of #, fuzzy -msgid "Afterward, add noise to the aggregated parameters." -msgstr "然后,在汇总参数中添加噪声。" +msgid ":py:obj:`group_id `\\" +msgstr ":py:obj:`group_id `\\" -#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:2 +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.group_id:1 of #, fuzzy -msgid "FaultTolerantFedAvg" -msgstr "server.strategy.FaultTolerantFedAvg" +msgid "An identifier for grouping messages." +msgstr "用于分组信息的标识符。" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: flwr.common.Metadata.created_at:1::1 of #, fuzzy -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +msgid ":py:obj:`message_id `\\" +msgstr ":py:obj:`message_id `\\" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.common.Metadata.created_at:1::1 of +#, fuzzy +msgid ":py:obj:`message_type `\\" +msgstr ":py:obj:`message_type `\\" + +#: flwr.common.Metadata.created_at:1::1 of +#, fuzzy +msgid ":py:obj:`reply_to_message `\\" +msgstr ":py:obj:`reply_to_message `\\" + +#: flwr.common.Metadata.created_at:1::1 of +#, fuzzy +msgid ":py:obj:`run_id `\\" +msgstr ":py:obj:`run_id `\\" + +#: flwr.common.Metadata.created_at:1::1 of +#, fuzzy +msgid ":py:obj:`src_node_id `\\" +msgstr ":py:obj:`src_node_id `\\" + +#: flwr.common.Metadata.created_at:1::1 of +#, fuzzy +msgid ":py:obj:`ttl `\\" +msgstr ":py:obj:`ttl `\\" + +#: flwr.common.Metadata.created_at:1::1 flwr.common.Metadata.ttl:1 #: of #, fuzzy +msgid "Time-to-live for this message." +msgstr "该信息的有效时间。" + +#: ../../source/ref-api/flwr.common.Metrics.rst:2 +#, fuzzy +msgid "Metrics" +msgstr "MetricsRecord" + +#: ../../source/ref-api/flwr.common.MetricsRecord.rst:2 +#, fuzzy +msgid "MetricsRecord" +msgstr "MetricsRecord" + +#: flwr.common.record.metricsrecord.MetricsRecord:1 of +#, fuzzy msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`int` | :py:class:`float` | " +":py:class:`list`\\ [:py:class:`int`] | :py:class:`list`\\ " +"[:py:class:`float`]]" msgstr "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:obj:`~typing.Union`\\ [:py:class:`int`, " +":py:class:`float`, :py:class:`~typing.List`\\ [:py:class:`int`], " +":py:class:`~typing.List`\\ [:py:class:`float`]]]" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_fit:1 -#: flwr.server.strategy.fedadagrad.FedAdagrad.aggregate_fit:1 -#: flwr.server.strategy.fedadam.FedAdam.aggregate_fit:1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_fit:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_fit:1 -#: flwr.server.strategy.fedavgm.FedAvgM.aggregate_fit:1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.aggregate_fit:1 -#: flwr.server.strategy.fedyogi.FedYogi.aggregate_fit:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_fit:1 of -msgid "Aggregate fit results using weighted average." -msgstr "使用加权平均法汇总拟合结果。" +#: flwr.common.record.metricsrecord.MetricsRecord:3 of +msgid "" +"A :code:`MetricsRecord` is a Python dictionary designed to ensure that " +"each key-value pair adheres to specified data types. A " +":code:`MetricsRecord` is one of the types of records that a " +"`flwr.common.RecordSet `_ supports " +"and can therefore be used to construct :code:`common.Message` objects." +msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of -#, fuzzy +#: flwr.common.record.metricsrecord.MetricsRecord:9 of msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"A dictionary that stores basic types (i.e. `int`, `float` as defined in " +"`MetricsScalar`) and list of such types (see `MetricsScalarList`)." msgstr "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of -#, fuzzy +#: flwr.common.record.metricsrecord.MetricsRecord:12 of msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"A boolean indicating whether metrics should be deleted from the input " +"dictionary immediately after adding them to the record. When set to True," +" the data is duplicated in memory. If memory is a concern, set it to " +"False." msgstr "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of -#, fuzzy +#: flwr.common.record.metricsrecord.MetricsRecord:20 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"The usage of a :code:`MetricsRecord` is envisioned for communicating " +"results obtained when a node performs an action. A few typical examples " +"include: communicating the training accuracy after a model is trained " +"locally by a :code:`ClientApp`, reporting the validation loss obtained at" +" a :code:`ClientApp`, or, more generally, the output of executing a query" +" by the :code:`ClientApp`. Common to these examples is that the output " +"can be typically represented by a single scalar (:code:`int`, " +":code:`float`) or list of scalars." msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of -#, fuzzy +#: flwr.common.record.metricsrecord.MetricsRecord:28 of msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"Let's see some examples of how to construct a :code:`MetricsRecord` from " +"scratch:" msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of -#, fuzzy +#: flwr.common.record.metricsrecord.MetricsRecord:39 of msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"Since types are enforced, the types of the objects inserted are checked. " +"For a :code:`MetricsRecord`, value types allowed are those in defined in " +":code:`flwr.common.MetricsRecordValues`. Similarly, only :code:`str` keys" +" are allowed." msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of -#, fuzzy +#: flwr.common.record.metricsrecord.MetricsRecord:50 of msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"If you need a more versatily type of record try :code:`ConfigsRecord` or " +":code:`ParametersRecord`." msgstr "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:2 -#: ../../source/ref-changelog.md:905 -msgid "FedAdagrad" -msgstr "FedAdagrad" +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`clear `\\ \\(\\)" +msgstr ":py:obj:`clear `\\ \\(\\)" -#: flwr.server.strategy.fedadagrad.FedAdagrad:1 -#: flwr.server.strategy.fedadam.FedAdam:1 -#: flwr.server.strategy.fedyogi.FedYogi:1 of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "Bases: :py:class:`~flwr.server.strategy.fedopt.FedOpt`" -msgstr "Bases: :py:class:`~flwr.server.strategy.fedopt.FedOpt`" +msgid ":py:obj:`count_bytes `\\ \\(\\)" +msgstr ":py:obj:`count_bytes `\\ \\(\\)" -#: flwr.server.strategy.fedadagrad.FedAdagrad:3 -#: flwr.server.strategy.fedadam.FedAdam:3 flwr.server.strategy.fedopt.FedOpt:3 -#: flwr.server.strategy.fedyogi.FedYogi:3 of -msgid "Implementation based on https://arxiv.org/abs/2003.00295v5" -msgstr "实施基于 https://arxiv.org/abs/2003.00295v5" - -#: flwr.server.strategy.fedadagrad.FedAdagrad:21 -#: flwr.server.strategy.fedadagrad.FedAdagrad:23 -#: flwr.server.strategy.fedadam.FedAdam:25 -#: flwr.server.strategy.fedadam.FedAdam:27 -#: flwr.server.strategy.fedavg.FedAvg:29 flwr.server.strategy.fedavg.FedAvg:31 -#: flwr.server.strategy.fedopt.FedOpt:25 flwr.server.strategy.fedopt.FedOpt:27 -#: flwr.server.strategy.fedprox.FedProx:61 -#: flwr.server.strategy.fedprox.FedProx:63 -#: flwr.server.strategy.fedyogi.FedYogi:28 -#: flwr.server.strategy.fedyogi.FedYogi:30 of -msgid "Metrics aggregation function, optional." -msgstr "指标汇总功能,可选。" - -#: flwr.server.strategy.fedadagrad.FedAdagrad:29 -#: flwr.server.strategy.fedadam.FedAdam:29 -#: flwr.server.strategy.fedopt.FedOpt:29 of -msgid "Server-side learning rate. Defaults to 1e-1." -msgstr "服务器端学习率。默认为 1e-1。" - -#: flwr.server.strategy.fedadagrad.FedAdagrad:31 -#: flwr.server.strategy.fedadam.FedAdam:31 -#: flwr.server.strategy.fedopt.FedOpt:31 of -msgid "Client-side learning rate. Defaults to 1e-1." -msgstr "客户端学习率。默认为 1e-1。" +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -#: flwr.server.strategy.fedadagrad.FedAdagrad:33 -#: flwr.server.strategy.fedadam.FedAdam:37 -#: flwr.server.strategy.fedopt.FedOpt:37 of -msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-9." -msgstr "控制算法的适应度。默认为 1e-9。" +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`items `\\ \\(\\)" +msgstr ":py:obj:`items `\\ \\(\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +msgid ":py:obj:`keys `\\ \\(\\)" +msgstr ":py:obj:`keys `\\ \\(\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "" -":py:obj:`aggregate_fit `\\" -" \\(server\\_round\\, results\\, failures\\)" -msgstr "" -":py:obj:`aggregate_fit `\\" -" \\(server\\_round\\, results\\, failures\\)" +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +msgid ":py:obj:`popitem `\\ \\(\\)" +msgstr ":py:obj:`items `\\ \\(\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy msgid "" -":py:obj:`configure_fit `\\" -" \\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit `\\" -" \\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`setdefault `\\ " +"\\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +msgid ":py:obj:`values `\\ \\(\\)" +msgstr ":py:obj:`values `\\ \\(\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.common.NDArray.rst:2 #, fuzzy -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +msgid "NDArray" +msgstr "NDArray" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.common.NDArrays.rst:2 #, fuzzy -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +msgid "NDArrays" +msgstr "NDArray" -#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:2 +#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 #, fuzzy -msgid "FedAdam" -msgstr "FedAdagrad" +msgid ":py:obj:`tensors `\\" +msgstr ":py:obj:`tensors `\\" -#: flwr.server.strategy.fedadam.FedAdam:33 -#: flwr.server.strategy.fedyogi.FedYogi:36 of -msgid "Momentum parameter. Defaults to 0.9." -msgstr "动量参数。默认为 0.9。" +#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 +#, fuzzy +msgid ":py:obj:`tensor_type `\\" +msgstr ":py:obj:`tensor_type `\\" -#: flwr.server.strategy.fedadam.FedAdam:35 -#: flwr.server.strategy.fedyogi.FedYogi:38 of -msgid "Second moment parameter. Defaults to 0.99." -msgstr "第二动量参数。默认为 0.99。" +#: ../../source/ref-api/flwr.common.ParametersRecord.rst:2 +#, fuzzy +msgid "ParametersRecord" +msgstr "参数" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.parametersrecord.ParametersRecord:1 of #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`~flwr.common.record.parametersrecord.Array`]" msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`~flwr.common.record.parametersrecord.Array`]" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.parametersrecord.ParametersRecord:3 of #, fuzzy msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"A dataclass storing named Arrays in order. This means that it holds " +"entries as an OrderedDict[str, Array]. ParametersRecord objects can be " +"viewed as an equivalent to PyTorch's state_dict, but holding serialised " +"tensors instead. A :code:`ParametersRecord` is one of the types of " +"records that a `flwr.common.RecordSet " +"`_ supports and can therefore be " +"used to construct :code:`common.Message` objects." msgstr "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"按顺序存储命名数组的数据类。这意味着它以 OrderedDict[str, Array] 的形式保存条目。ParametersRecord " +"对象相当于 PyTorch 的 state_dict,但它保存的是序列化的张量。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -#, fuzzy -msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +#: flwr.common.record.parametersrecord.ParametersRecord:10 of +msgid "A dictionary that stores serialized array-like or tensor-like objects." msgstr "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -#, fuzzy +#: flwr.common.record.parametersrecord.ParametersRecord:12 of msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"A boolean indicating whether parameters should be deleted from the input " +"dictionary immediately after adding them to the record. If False, the " +"dictionary passed to `set_parameters()` will be empty once exiting from " +"that function. This is the desired behaviour when working with very large" +" models/tensors/arrays. However, if you plan to continue working with " +"your parameters after adding it to the record, set this flag to True. " +"When set to True, the data is duplicated in memory." msgstr "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -#, fuzzy +#: flwr.common.record.parametersrecord.ParametersRecord:23 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"The usage of :code:`ParametersRecord` is envisioned for storing data " +"arrays (e.g. parameters of a machine learning model). These first need to" +" be serialized into a :code:`flwr.common.Array` data structure." msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.parametersrecord.ParametersRecord:27 of #, fuzzy +msgid "Let's see some examples:" +msgstr "让我们来看几个例子:" + +#: flwr.common.record.parametersrecord.ParametersRecord:50 of msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"Now that the NumPy array is embedded into a :code:`ParametersRecord` it " +"could be sent if added as part of a :code:`common.Message` or it could be" +" saved as a persistent state of a :code:`ClientApp` via its context. " +"Regardless of the usecase, we will sooner or later want to recover the " +"array in its original NumPy representation. For the example above, where " +"the array was serialized using the built-in utility function, " +"deserialization can be done as follows:" msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -#, fuzzy +#: flwr.common.record.parametersrecord.ParametersRecord:65 of msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"If you need finer control on how your arrays are serialized and " +"deserialized, you can construct :code:`Array` objects directly like this:" msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -#, fuzzy +#: flwr.common.record.parametersrecord.ParametersRecord:83 of msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"Note that different arrays (e.g. from PyTorch, Tensorflow) might require " +"different serialization mechanism. Howerver, they often support a " +"conversion to NumPy, therefore allowing to use the same or similar steps " +"as in the example above." msgstr "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:2 +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "FedAvg" -msgstr "DP-FedAvg" +msgid ":py:obj:`clear `\\ \\(\\)" +msgstr ":py:obj:`clear `\\ \\(\\)" -#: flwr.server.strategy.fedavg.FedAvg:3 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:3 of -msgid "Implementation based on https://arxiv.org/abs/1602.05629" -msgstr "实施基于 https://arxiv.org/abs/1602.05629" +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`count_bytes `\\ \\(\\)" +msgstr ":py:obj:`count_bytes `\\ \\(\\)" -#: flwr.server.strategy.fedavg.FedAvg:5 flwr.server.strategy.fedprox.FedProx:37 -#: of -msgid "" -"Fraction of clients used during training. In case `min_fit_clients` is " -"larger than `fraction_fit * available_clients`, `min_fit_clients` will " -"still be sampled. Defaults to 1.0." -msgstr "" -"训练过程中使用的客户端比例。如果 `min_fit_clients` 大于 `fraction_fit * " -"available_clients`,则仍会对 `min_fit_clients` 进行采样。默认为 1.0。" +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -#: flwr.server.strategy.fedavg.FedAvg:9 flwr.server.strategy.fedprox.FedProx:41 -#: of -msgid "" -"Fraction of clients used during validation. In case " -"`min_evaluate_clients` is larger than `fraction_evaluate * " -"available_clients`, `min_evaluate_clients` will still be sampled. " -"Defaults to 1.0." -msgstr "" -"验证过程中使用的客户端的比例。如果 `min_evaluate_clients` 大于 `fraction_evaluate * " -"available_clients`,则仍会对 `min_evaluate_clients` 进行采样。默认为 1.0。" +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`items `\\ \\(\\)" +msgstr ":py:obj:`items `\\ \\(\\)" -#: flwr.server.strategy.fedavg.FedAvg:33 of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "Enable (True) or disable (False) in-place aggregation of model updates." -msgstr "启用(真)或禁用(假)模型更新的就地聚合。" +msgid ":py:obj:`keys `\\ \\(\\)" +msgstr ":py:obj:`keys `\\ \\(\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" + +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`popitem `\\ \\(\\)" +msgstr ":py:obj:`items `\\ \\(\\)" + +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +":py:obj:`setdefault `\\ " +"\\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" msgstr "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +msgid ":py:obj:`values `\\ \\(\\)" +msgstr ":py:obj:`values `\\ \\(\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:3 of #, fuzzy msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"Note that a small amount of Bytes might also be included in this counting" +" that correspond to metadata of the serialized object (e.g. of NumPy " +"array) needed for deseralization." +msgstr "请注意,该计数中还可能包含少量字节,这些字节与序列化对象(如 NumPy 数组)的元数据相对应,需要进行去eralization。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.common.Properties.rst:2 #, fuzzy -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" -msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +msgid "Properties" +msgstr "GetPropertiesRes" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:2 +#, fuzzy +msgid "ReconnectIns" +msgstr "启用 SSL 连接" + +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:28::1 +#, fuzzy +msgid ":py:obj:`seconds `\\" +msgstr ":py:obj:`seconds `\\" + +#: ../../source/ref-api/flwr.common.RecordSet.rst:2 #, fuzzy +msgid "RecordSet" +msgstr "RecordSet" + +#: flwr.common.record.recordset.RecordSet:3 of msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"A :code:`RecordSet` is the unified mechanism by which parameters, metrics" +" and configs can be either stored as part of a `flwr.common.Context " +"`_ in your apps or communicated as part of a " +"`flwr.common.Message `_ between your apps." msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -#, fuzzy +#: flwr.common.record.recordset.RecordSet:9 of msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"A dictionary of :code:`ParametersRecords` that can be used to record and " +"communicate model parameters and high-dimensional arrays." msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -#, fuzzy +#: flwr.common.record.recordset.RecordSet:12 of msgid "" -":py:obj:`num_fit_clients `\\" -" \\(num\\_available\\_clients\\)" +"A dictionary of :code:`MetricsRecord` that can be used to record and " +"communicate scalar-valued metrics that are the result of performing and " +"action, for example, by a :code:`ClientApp`." msgstr "" -":py:obj:`num_fit_clients `\\" -" \\(num\\_available\\_clients\\)" -#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:2 -#, fuzzy -msgid "FedAvgAndroid" -msgstr "DPFedAvgAdaptive" +#: flwr.common.record.recordset.RecordSet:16 of +msgid "" +"A dictionary of :code:`ConfigsRecord` that can be used to record and " +"communicate configuration values to an entity (e.g. to a " +":code:`ClientApp`) for it to adjust how an action is performed." +msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of -#, fuzzy +#: flwr.common.record.recordset.RecordSet:24 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +"A :code:`RecordSet` can hold three types of records, each designed with " +"an specific purpose. What is common to all of them is that they are " +"Python dictionaries designed to ensure that each key-value pair adheres " +"to specified data types." msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.recordset.RecordSet:29 of #, fuzzy +msgid "Let's see an example." +msgstr "让我们来看几个例子:" + +#: flwr.common.record.recordset.RecordSet:47 of msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +"Adding a :code:`ParametersRecord` follows the same steps as above but " +"first, the array needs to be serialized and represented as a " +":code:`flwr.common.Array`. If the array is a :code:`NumPy` array, you can" +" use the built-in utility function `array_from_numpy " +"`_. It is often possible to convert an" +" array first to :code:`NumPy` and then use the aforementioned function." msgstr "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of -#, fuzzy +#: flwr.common.record.recordset.RecordSet:66 of msgid "" -":py:obj:`bytes_to_ndarray " -"`\\ \\(tensor\\)" +"For additional examples on how to construct each of the records types " +"shown above, please refer to the documentation for :code:`ConfigsRecord`," +" :code:`MetricsRecord` and :code:`ParametersRecord`." msgstr "" -":py:obj:`bytes_to_ndarray " -"`\\ \\(tensor\\)" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.bytes_to_ndarray:1 of +#: flwr.common.RecordSet.configs_records:1::1 of #, fuzzy -msgid "Deserialize NumPy array from bytes." -msgstr "从字节反序列化 NumPy ndarray。" +msgid ":py:obj:`configs_records `\\" +msgstr ":py:obj:`configs_records `\\" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.common.RecordSet.configs_records:1 +#: flwr.common.RecordSet.configs_records:1::1 of #, fuzzy -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +msgid "Dictionary holding ConfigsRecord instances." +msgstr "包含 ConfigsRecord 实例的字典。" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.common.RecordSet.configs_records:1::1 of #, fuzzy -msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +msgid ":py:obj:`metrics_records `\\" +msgstr ":py:obj:`metrics_records `\\" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.common.RecordSet.configs_records:1::1 +#: flwr.common.RecordSet.metrics_records:1 of #, fuzzy -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" -msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +msgid "Dictionary holding MetricsRecord instances." +msgstr "保存 MetricsRecord 实例的字典。" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.common.RecordSet.configs_records:1::1 of #, fuzzy -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +msgid ":py:obj:`parameters_records `\\" +msgstr ":py:obj:`parameters_records `\\" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.common.RecordSet.configs_records:1::1 +#: flwr.common.RecordSet.parameters_records:1 of #, fuzzy -msgid "" -":py:obj:`ndarray_to_bytes " -"`\\ \\(ndarray\\)" -msgstr "" -":py:obj:`ndarray_to_bytes " -"`\\ \\(ndarray\\)" +msgid "Dictionary holding ParametersRecord instances." +msgstr "存放 ParametersRecord 实例的字典。" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarray_to_bytes:1 of +#: ../../source/ref-api/flwr.common.ServerMessage.rst:2 #, fuzzy -msgid "Serialize NumPy array to bytes." -msgstr "将 NumPy ndarray 序列化为字节。" +msgid "ServerMessage" +msgstr "服务器端" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 #, fuzzy -msgid "" -":py:obj:`ndarrays_to_parameters " -"`\\ " -"\\(ndarrays\\)" -msgstr "" -":py:obj:`ndarrays_to_parameters " -"`\\ " -"\\(ndarrays\\)" +msgid ":py:obj:`evaluate_ins `\\" +msgstr ":py:obj:`evaluate_ins `\\" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 #, fuzzy -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +msgid ":py:obj:`fit_ins `\\" +msgstr ":py:obj:`fit_ins `\\" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 #, fuzzy msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`get_parameters_ins " +"`\\" msgstr "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`get_parameters_ins " +"`\\" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 #, fuzzy msgid "" -":py:obj:`parameters_to_ndarrays " -"`\\ " -"\\(parameters\\)" +":py:obj:`get_properties_ins " +"`\\" msgstr "" -":py:obj:`parameters_to_ndarrays " -"`\\ " -"\\(parameters\\)" +":py:obj:`get_properties_ins " +"`\\" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.parameters_to_ndarrays:1 -#: of +#: ../../source/ref-api/flwr.common.Status.rst:2 #, fuzzy -msgid "Convert parameters object to NumPy weights." -msgstr "将参数对象转换为 NumPy ndarrays。" +msgid "Status" +msgstr "客户端状态。" -#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:2 +#: ../../source/ref-api/flwr.common.Status.rst:29::1 #, fuzzy -msgid "FedAvgM" -msgstr "DP-FedAvg" +msgid ":py:obj:`code `\\" +msgstr ":py:obj:`code `\\" -#: flwr.server.strategy.fedavgm.FedAvgM:3 of +#: ../../source/ref-api/flwr.common.Status.rst:29::1 #, fuzzy -msgid "Implementation based on https://arxiv.org/abs/1909.06335" -msgstr "实施基于 https://arxiv.org/pdf/1909.06335.pdf" +msgid ":py:obj:`message `\\" +msgstr ":py:obj:`message `\\" -#: flwr.server.strategy.fedavgm.FedAvgM:25 of -msgid "" -"Server-side learning rate used in server-side optimization. Defaults to " -"1.0." -msgstr "服务器端优化中使用的服务器端学习率。默认为 1.0。" +#: ../../source/ref-api/flwr.common.array_from_numpy.rst:2 +#, fuzzy +msgid "array\\_from\\_numpy" +msgstr "array\\_from\\_numpy" -#: flwr.server.strategy.fedavgm.FedAvgM:28 of -msgid "Server-side momentum factor used for FedAvgM. Defaults to 0.0." -msgstr "用于 FedAvgM 的服务器端动量因子。默认为 0.0。" +#: ../../source/ref-api/flwr.common.bytes_to_ndarray.rst:2 +#, fuzzy +msgid "bytes\\_to\\_ndarray" +msgstr "bytes\\_to\\_ndarray" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.common.configure.rst:2 #, fuzzy -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +msgid "configure" +msgstr "配置日志记录" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.common.event.rst:2 #, fuzzy -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" -msgstr "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +msgid "event" +msgstr "事件" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.common.log.rst:2 #, fuzzy +msgid "log" +msgstr "登录" + +#: logging.Logger.log:3 of msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +"To pass exception information, use the keyword argument exc_info with a " +"true value, e.g." +msgstr "要传递异常信息,请使用带 true 值的关键字参数 exc_info,例如。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: logging.Logger.log:6 of +#, python-format +msgid "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" +msgstr "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" + +#: ../../source/ref-api/flwr.common.ndarray_to_bytes.rst:2 #, fuzzy -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +msgid "ndarray\\_to\\_bytes" +msgstr "ndarray\\_to\\_bytes" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.common.ndarrays_to_parameters.rst:2 #, fuzzy -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" -msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +msgid "ndarrays\\_to\\_parameters" +msgstr "ndarrays\\_to\\_parameters" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.common.now.rst:2 #, fuzzy -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +msgid "now" +msgstr "现在" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.common.parameters_to_ndarrays.rst:2 #, fuzzy -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +msgid "parameters\\_to\\_ndarrays" +msgstr "parameters\\_to\\_ndarrays" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.rst:2 +msgid "server" +msgstr "服务器" + +#: ../../source/ref-api/flwr.server.rst:22::1 #, fuzzy msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`start_server `\\ \\(\\*\\[\\, " +"server\\_address\\, server\\, ...\\]\\)" msgstr "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`start_server `\\ \\(\\*\\[\\, " +"server\\_address\\, server\\, ...\\]\\)" -#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:2 +#: ../../source/ref-api/flwr.server.rst:22::1 +#: flwr.server.app.start_server:1 of +msgid "Start a Flower server using the gRPC transport layer." +msgstr "使用 gRPC 传输层启动 Flower 服务器。" + +#: ../../source/ref-api/flwr.server.rst:37::1 #, fuzzy -msgid "FedMedian" -msgstr "联邦医保" +msgid ":py:obj:`ClientManager `\\ \\(\\)" +msgstr ":py:obj:`ClientManager `\\ \\(\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.client_manager.ClientManager:1 of #, fuzzy -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +msgid "Abstract base class for managing Flower clients." +msgstr "Flower 客户端的抽象基类。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.rst:37::1 #, fuzzy -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" -msgstr "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +msgid ":py:obj:`Driver `\\ \\(\\)" +msgstr ":py:obj:`run_driver_api `\\ \\(\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedmedian.FedMedian.aggregate_fit:1 of -msgid "Aggregate fit results using median." -msgstr "使用中位数汇总拟合结果。" +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.driver.driver.Driver:1 of +#, fuzzy +msgid "Abstract base Driver class for the Driver API." +msgstr "Flower 客户端的抽象基类。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.rst:37::1 #, fuzzy -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +msgid ":py:obj:`History `\\ \\(\\)" +msgstr ":py:obj:`History `\\ \\(\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.history.History:1 of #, fuzzy -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +msgid "History class for training and/or evaluation metrics collection." +msgstr "**hist** -- 包含训练和评估指标的对象。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.rst:37::1 #, fuzzy msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`LegacyContext `\\ \\(context\\[\\, " +"config\\, strategy\\, ...\\]\\)" msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`LegacyContext `\\ \\(state\\[\\, " +"config\\, strategy\\, ...\\]\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.compat.legacy_context.LegacyContext:1 of #, fuzzy -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +msgid "Legacy Context." +msgstr "传承背景。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.rst:37::1 #, fuzzy msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`Server `\\ \\(\\*\\, client\\_manager\\[\\, " +"strategy\\]\\)" msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`Server `\\ \\(\\*\\, client\\_manager\\[\\, " +"strategy\\]\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.rst:37::1 #, fuzzy msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`ServerApp `\\ \\(\\[server\\, config\\, " +"strategy\\, ...\\]\\)" +msgstr "server.strategy.Strategy" -#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:2 +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.server_app.ServerApp:1 of #, fuzzy -msgid "FedOpt" -msgstr "FedOpt" - -#: flwr.server.strategy.fedopt.FedOpt:33 of -msgid "Momentum parameter. Defaults to 0.0." -msgstr "动量参数。默认为 0.0。" - -#: flwr.server.strategy.fedopt.FedOpt:35 of -msgid "Second moment parameter. Defaults to 0.0." -msgstr "第二动量参数。默认为 0.0。" +msgid "Flower ServerApp." +msgstr "Flower 服务器。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.rst:37::1 #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +":py:obj:`ServerAppComponents `\\ " +"\\(\\[server\\, config\\, ...\\]\\)" +msgstr "server.strategy.Strategy" + +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.serverapp_components.ServerAppComponents:1 of +msgid "Components to construct a ServerApp." msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.rst:37::1 #, fuzzy msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +":py:obj:`ServerConfig `\\ \\(\\[num\\_rounds\\," +" round\\_timeout\\]\\)" msgstr "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"Flower 1.0: ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.server_config.ServerConfig:1 of #, fuzzy -msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +msgid "Flower server config." +msgstr "Flower 服务器。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.rst:37::1 #, fuzzy -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +msgid ":py:obj:`SimpleClientManager `\\ \\(\\)" +msgstr ":py:obj:`SimpleClientManager `\\ \\(\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.client_manager.SimpleClientManager:1 of #, fuzzy -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" -msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +msgid "Provides a pool of available clients." +msgstr "使用部分可用客户进行评估。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.rst:56::1 #, fuzzy -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +msgid ":py:obj:`flwr.server.strategy `\\" +msgstr "server.strategy.Strategy" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -#, fuzzy -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.server.rst:56::1 +#: flwr.server.strategy:1 of +msgid "Contains the strategy abstraction and different implementations." +msgstr "包含策略抽象和不同的实现方法。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.rst:56::1 #, fuzzy -msgid "" -":py:obj:`num_fit_clients `\\" -" \\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_fit_clients `\\" -" \\(num\\_available\\_clients\\)" +msgid ":py:obj:`flwr.server.workflow `\\" +msgstr "server.strategy.Strategy" -#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:2 +#: ../../source/ref-api/flwr.server.rst:56::1 +#: flwr.server.workflow:1 of #, fuzzy -msgid "FedProx" -msgstr "FedProx" - -#: flwr.server.strategy.fedprox.FedProx:3 of -msgid "Implementation based on https://arxiv.org/abs/1812.06127" -msgstr "实施基于 https://arxiv.org/abs/1812.06127" +msgid "Workflows." +msgstr "工作流程" -#: flwr.server.strategy.fedprox.FedProx:5 of -msgid "" -"The strategy in itself will not be different than FedAvg, the client " -"needs to be adjusted. A proximal term needs to be added to the loss " -"function during the training:" -msgstr "策略本身与 FedAvg 并无不同,客户端需要进行调整。在训练过程中,需要在损失函数中添加一个近端项:" +#: ../../source/ref-api/flwr.server.ClientManager.rst:2 +#, fuzzy +msgid "ClientManager" +msgstr "客户端" -#: flwr.server.strategy.fedprox.FedProx:9 of -msgid "" -"\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" -"\n" -msgstr "" -"\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" -"\n" +#: flwr.server.client_manager.ClientManager.all:1::1 of +#, fuzzy +msgid ":py:obj:`all `\\ \\(\\)" +msgstr ":py:obj:`all `\\ \\(\\)" -#: flwr.server.strategy.fedprox.FedProx:12 of -msgid "" -"Where $w^t$ are the global parameters and $w$ are the local weights the " -"function will be optimized with." -msgstr "其中,$w^t$ 是全局参数,$w$ 是优化函数的局部权重。" +#: flwr.server.client_manager.ClientManager.all:1 +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.all:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#, fuzzy +msgid "Return all available clients." +msgstr "返回所有可用客户。" -#: flwr.server.strategy.fedprox.FedProx:15 of -msgid "In PyTorch, for example, the loss would go from:" -msgstr "例如,在 PyTorch 中,损失将从:" +#: flwr.server.client_manager.ClientManager.all:1::1 of +#, fuzzy +msgid ":py:obj:`num_available `\\ \\(\\)" +msgstr ":py:obj:`num_available `\\ \\(\\)" -#: flwr.server.strategy.fedprox.FedProx:21 of -msgid "To:" -msgstr "致:" +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.num_available:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.num_available:1 of +#, fuzzy +msgid "Return the number of available clients." +msgstr "返回样本大小和所需的可用客户数量。" -#: flwr.server.strategy.fedprox.FedProx:30 of -msgid "" -"With `global_params` being a copy of the parameters before the training " -"takes place." -msgstr "其中,\"global_params \"是训练前的参数副本。" +#: flwr.server.client_manager.ClientManager.all:1::1 of +#, fuzzy +msgid ":py:obj:`register `\\ \\(client\\)" +msgstr ":py:obj:`register `\\ \\(client\\)" -#: flwr.server.strategy.fedprox.FedProx:65 of -msgid "" -"The weight of the proximal term used in the optimization. 0.0 makes this " -"strategy equivalent to FedAvg, and the higher the coefficient, the more " -"regularization will be used (that is, the client parameters will need to " -"be closer to the server parameters during training)." -msgstr "" -"优化中使用的近端项权重。0.0 使该策略等同于 " -"FedAvg,系数越大,使用的正则化就越多(也就是说,在训练过程中,客户端参数需要更接近服务器参数)。" +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.register:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.register:1 of +#, fuzzy +msgid "Register Flower ClientProxy instance." +msgstr "注册 Flower ClientProxy 实例。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.ClientManager.all:1::1 of #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +":py:obj:`sample `\\ " +"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +":py:obj:`sample `\\ " +"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.sample:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.sample:1 of #, fuzzy -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" -msgstr "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +msgid "Sample a number of Flower ClientProxy instances." +msgstr "取样若干 Flower ClientProxy 实例。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.ClientManager.all:1::1 of #, fuzzy -msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +msgid ":py:obj:`unregister `\\ \\(client\\)" +msgstr ":py:obj:`unregister `\\ \\(client\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.unregister:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.unregister:1 of #, fuzzy -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +msgid "Unregister Flower ClientProxy instance." +msgstr "取消注册 Flower ClientProxy 实例。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.ClientManager.all:1::1 of #, fuzzy msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`wait_for `\\ " +"\\(num\\_clients\\, timeout\\)" msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`wait_for `\\ " +"\\(num\\_clients\\, timeout\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.wait_for:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.wait_for:1 of #, fuzzy -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +msgid "Wait until at least `num_clients` are available." +msgstr "等待至少 `num_clients` 可用。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.ClientManager.num_available:3 +#: flwr.server.client_manager.SimpleClientManager.num_available:3 of #, fuzzy -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +msgid "**num_available** -- The number of currently available clients." +msgstr "**num_available** -- 当前可用客户端的数量。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.ClientManager.register:6 +#: flwr.server.client_manager.SimpleClientManager.register:6 of #, fuzzy msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"**success** -- Indicating if registration was successful. False if " +"ClientProxy is already registered or can not be registered for any " +"reason." +msgstr "**success** -- 表示注册是否成功。如果 ClientProxy 已注册或因故无法注册,则为 False。" -#: flwr.server.strategy.fedprox.FedProx.configure_fit:3 of -msgid "Sends the proximal factor mu to the clients" -msgstr "向客户发送近端因子mu" - -#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:2 +#: flwr.server.client_manager.ClientManager.unregister:3 +#: flwr.server.client_manager.SimpleClientManager.unregister:3 of #, fuzzy -msgid "FedTrimmedAvg" -msgstr "server.strategy.FedTrimmedAvg" +msgid "This method is idempotent." +msgstr "这种方法是幂等的。" -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:3 of +#: ../../source/ref-api/flwr.server.Driver.rst:2 #, fuzzy -msgid "Implemented based on: https://arxiv.org/abs/1803.01498" -msgstr "实施基于 https://arxiv.org/abs/1802.07927。" - -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:25 of -msgid "Fraction to cut off of both tails of the distribution. Defaults to 0.2." -msgstr "截取分布两个尾部的分数。默认为 0.2。" +msgid "Driver" +msgstr "服务器" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +":py:obj:`create_message `\\ " +"\\(content\\, message\\_type\\, ...\\[\\, ttl\\]\\)" msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +":py:obj:`create_message `\\ " +"\\(content\\, message\\_type\\, ...\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.create_message:1 of #, fuzzy -msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" -msgstr "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +msgid "Create a new message with specified parameters." +msgstr "使用指定参数创建新信息。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.aggregate_fit:1 of -msgid "Aggregate fit results using trimmed average." -msgstr "使用修剪平均值汇总拟合结果。" +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#, fuzzy +msgid ":py:obj:`get_node_ids `\\ \\(\\)" +msgstr ":py:obj:`get_node_ids `\\ \\(\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.get_node_ids:1 of #, fuzzy -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +msgid "Get node IDs." +msgstr "获取节点 ID。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 #, fuzzy msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`pull_messages `\\ " +"\\(message\\_ids\\)" msgstr "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`pull_messages `\\ " +"\\(message\\_ids\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.pull_messages:1 of #, fuzzy -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" -msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +msgid "Pull messages based on message IDs." +msgstr "根据信息 ID 提取信息。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`push_messages `\\ " +"\\(messages\\)" msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`push_messages `\\ " +"\\(messages\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.push_messages:1 of #, fuzzy -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +msgid "Push messages to specified node IDs." +msgstr "向指定的节点 ID 推送信息。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 #, fuzzy msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`send_and_receive `\\ " +"\\(messages\\, \\*\\[\\, timeout\\]\\)" msgstr "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"Flower 1.0: ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" -#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:2 +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.send_and_receive:1 of #, fuzzy -msgid "FedXgbBagging" -msgstr "FedXgbBagging" +msgid "Push messages to specified node IDs and pull the reply messages." +msgstr "向指定的节点 ID 推送信息并提取回复信息。" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: flwr.server.driver.driver.Driver.create_message:1::1 of #, fuzzy -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +msgid ":py:obj:`run `\\" +msgstr ":py:obj:`run_driver_api `\\ \\(\\)" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: flwr.server.Driver.run:1 +#: flwr.server.driver.driver.Driver.create_message:1::1 of #, fuzzy -msgid "Aggregate evaluation metrics using average." -msgstr "采用加权平均法计算评估损失总额。" +msgid "Run information." +msgstr "运行模拟" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: flwr.server.driver.driver.Driver.create_message:3 of #, fuzzy msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" -msgstr "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +"This method constructs a new `Message` with given content and metadata. " +"The `run_id` and `src_node_id` will be set automatically." +msgstr "本方法使用给定的内容和元数据构建新的 `Message` 。run_id \"和 \"src_node_id \"将自动设置。" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_fit:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_fit:1 of +#: flwr.server.driver.driver.Driver.create_message:6 of #, fuzzy -msgid "Aggregate fit results using bagging." -msgstr "使用 Bulyan 技术汇总拟合结果。" +msgid "" +"The content for the new message. This holds records that are to be sent " +"to the destination node." +msgstr "新信息的内容。其中包含要发送到目的节点的记录。" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: flwr.server.driver.driver.Driver.create_message:9 of #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"The type of the message, defining the action to be executed on the " +"receiving end." +msgstr "信息类型,定义接收端要执行的操作。" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: flwr.server.driver.driver.Driver.create_message:12 of #, fuzzy -msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +msgid "The ID of the destination node to which the message is being sent." +msgstr "信息发送目的地节点的 ID。" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: flwr.server.driver.driver.Driver.create_message:14 of #, fuzzy msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" -msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"The ID of the group to which this message is associated. In some " +"settings, this is used as the FL round." +msgstr "与该信息相关联的组的 ID。在某些设置中,它被用作 FL 轮。" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: flwr.server.driver.driver.Driver.create_message:17 of #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"Time-to-live for the round trip of this message, i.e., the time from " +"sending this message to receiving a reply. It specifies in seconds the " +"duration for which the message and its potential reply are considered " +"valid. If unset, the default TTL (i.e., `common.DEFAULT_TTL`) will be " +"used." +msgstr "此报文往返的有效时间,即从发送此报文到收到回复的时间。它规定了信息及其潜在回复被视为有效的持续时间。" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: flwr.server.driver.driver.Driver.create_message:23 of #, fuzzy msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"**message** -- A new `Message` instance with the specified content and " +"metadata." +msgstr "**message** -- 具有指定内容和元数据的新 \"信息 \"实例。" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: flwr.server.driver.driver.Driver.pull_messages:3 of #, fuzzy msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"This method is used to collect messages from the SuperLink that " +"correspond to a set of given message IDs." +msgstr "该方法用于从超级链接中收集与一组给定消息 ID 相对应的消息。" -#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:2 +#: flwr.server.driver.driver.Driver.pull_messages:6 of #, fuzzy -msgid "FedXgbCyclic" -msgstr "FedXgbCyclic" +msgid "An iterable of message IDs for which reply messages are to be retrieved." +msgstr "要检索回复信息的信息 ID 的可迭代项。" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: flwr.server.driver.driver.Driver.pull_messages:9 of #, fuzzy -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +msgid "**messages** -- An iterable of messages received." +msgstr "**messages** -- 收到的信息迭代。" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: flwr.server.driver.driver.Driver.push_messages:3 of #, fuzzy msgid "" -":py:obj:`aggregate_fit " -"`\\ \\(server\\_round\\," -" results\\, failures\\)" -msgstr "" -":py:obj:`aggregate_fit " -"`\\ \\(server\\_round\\," -" results\\, failures\\)" +"This method takes an iterable of messages and sends each message to the " +"node specified in `dst_node_id`." +msgstr "该方法接收一个可迭代的消息,并将每条消息发送到 `dst_node_id` 中指定的节点。" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: flwr.server.driver.driver.Driver.push_messages:6 +#: flwr.server.driver.driver.Driver.send_and_receive:7 of #, fuzzy -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +msgid "An iterable of messages to be sent." +msgstr "要发送的信息迭代。" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: flwr.server.driver.driver.Driver.push_messages:9 of #, fuzzy msgid "" -":py:obj:`configure_fit " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +"**message_ids** -- An iterable of IDs for the messages that were sent, " +"which can be used to pull replies." +msgstr "**message_ids** -- 已发送信息的可迭代 ID,可用于提取回复信息。" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: flwr.server.driver.driver.Driver.send_and_receive:3 of #, fuzzy msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" -msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"This method sends a list of messages to their destination node IDs and " +"then waits for the replies. It continues to pull replies until either all" +" replies are received or the specified timeout duration is exceeded." +msgstr "该方法会向目标节点 ID 发送信息列表,然后等待回复。它会继续提取回复,直到收到所有回复或超过指定的超时时间。" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: flwr.server.driver.driver.Driver.send_and_receive:9 of #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"The timeout duration in seconds. If specified, the method will wait for " +"replies for this duration. If `None`, there is no time limit and the " +"method will wait until replies for all messages are received." +msgstr "超时时间(秒)。如果指定,该方法将在此期限内等待回复。如果指定为 \"无\",则没有时间限制,该方法将等待直到收到所有信息的回复。" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: flwr.server.driver.driver.Driver.send_and_receive:14 of #, fuzzy -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +msgid "**replies** -- An iterable of reply messages received from the SuperLink." +msgstr "**replies** -- 从超级链接收到的回复信息的迭代。" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: flwr.server.driver.driver.Driver.send_and_receive:19 of #, fuzzy msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"This method uses `push_messages` to send the messages and `pull_messages`" +" to collect the replies. If `timeout` is set, the method may not return " +"replies for all sent messages. A message remains valid until its TTL, " +"which is not affected by `timeout`." msgstr "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"该方法使用 `push_messages` 发送信息,并使用 `pull_messages` 收集回复。如果设置了 " +"`timeout`,该方法可能不会返回所有已发送消息的回复。消息在其 TTL 之前一直有效,不受 `timeout` 影响。" -#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:2 +#: ../../source/ref-api/flwr.server.History.rst:2 #, fuzzy -msgid "FedXgbNnAvg" -msgstr "DP-FedAvg" +msgid "History" +msgstr "历史" -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:5 of +#: flwr.server.history.History.add_loss_centralized:1::1 of #, fuzzy msgid "" -"This strategy is deprecated, but a copy of it is available in Flower " -"Baselines: " -"https://github.com/adap/flower/tree/main/baselines/hfedxgboost." +":py:obj:`add_loss_centralized " +"`\\ \\(server\\_round\\, " +"loss\\)" msgstr "" -"该策略已被弃用,但在 Flower Baselines: " -"https://github.com/adap/flower/tree/main/baselines/hfedxgboost 中有其副本。" +":py:obj:`add_loss_centralized " +"`\\ \\(server\\_round\\, " +"loss\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.history.History.add_loss_centralized:1 +#: flwr.server.history.History.add_loss_centralized:1::1 of #, fuzzy -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +msgid "Add one loss entry (from centralized evaluation)." +msgstr "集中评估" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.history.History.add_loss_centralized:1::1 of #, fuzzy msgid "" -":py:obj:`aggregate_fit " -"`\\ \\(server\\_round\\, " -"results\\, failures\\)" +":py:obj:`add_loss_distributed " +"`\\ \\(server\\_round\\, " +"loss\\)" msgstr "" -":py:obj:`aggregate_fit " -"`\\ \\(server\\_round\\, " -"results\\, failures\\)" +":py:obj:`add_loss_distributed " +"`\\ \\(server\\_round\\, " +"loss\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_loss_distributed:1 of #, fuzzy -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +msgid "Add one loss entry (from distributed evaluation)." +msgstr "增加一个损失条目(来自分布式评估)。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.history.History.add_loss_centralized:1::1 of #, fuzzy msgid "" -":py:obj:`configure_fit " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +":py:obj:`add_metrics_centralized " +"`\\ \\(server\\_round\\, " +"metrics\\)" msgstr "" -":py:obj:`configure_fit " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +":py:obj:`add_metrics_centralized " +"`\\ \\(server\\_round\\, " +"metrics\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_centralized:1 of #, fuzzy -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" -msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +msgid "Add metrics entries (from centralized evaluation)." +msgstr "集中评估" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.history.History.add_loss_centralized:1::1 of #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`add_metrics_distributed " +"`\\ \\(server\\_round\\, " +"metrics\\)" msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`add_metrics_distributed " +"`\\ \\(server\\_round\\, " +"metrics\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_distributed:1 of #, fuzzy -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +msgid "Add metrics entries (from distributed evaluation)." +msgstr "定制的集中/分布式评估" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.history.History.add_loss_centralized:1::1 of #, fuzzy msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`add_metrics_distributed_fit " +"`\\ \\(server\\_round\\," +" ...\\)" msgstr "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`add_metrics_distributed_fit " +"`\\ \\(server\\_round\\," +" ...\\)" -#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:2 +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_distributed_fit:1 of #, fuzzy -msgid "FedYogi" -msgstr "FedYogi" +msgid "Add metrics entries (from distributed fit)." +msgstr "添加度量条目(来自分布式拟合)。" -#: flwr.server.strategy.fedyogi.FedYogi:32 of +#: ../../source/ref-api/flwr.server.LegacyContext.rst:2 #, fuzzy -msgid "Server-side learning rate. Defaults to 1e-2." -msgstr "服务器端学习率。默认为 1e-1。" +msgid "LegacyContext" +msgstr "遗留上下文" -#: flwr.server.strategy.fedyogi.FedYogi:34 of +#: flwr.server.compat.legacy_context.LegacyContext:1 of #, fuzzy -msgid "Client-side learning rate. Defaults to 0.0316." -msgstr "客户端学习率。默认为 1e-1。" +msgid "Bases: :py:class:`~flwr.common.context.Context`" +msgstr "Bases: :py:class:`~flwr.common.context.Context`" -#: flwr.server.strategy.fedyogi.FedYogi:40 of +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 #, fuzzy -msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-3." -msgstr "控制算法的适应度。默认为 1e-9。" +msgid ":py:obj:`config `\\" +msgstr "server.strategy.Strategy" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 #, fuzzy -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +msgid ":py:obj:`strategy `\\" +msgstr "server.strategy.Strategy" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 #, fuzzy -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" -msgstr "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +msgid ":py:obj:`client_manager `\\" +msgstr ":py:obj:`client_manager `\\" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 #, fuzzy -msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +msgid ":py:obj:`history `\\" +msgstr "server.strategy.Strategy" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 #, fuzzy -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +msgid ":py:obj:`node_id `\\" +msgstr "server.strategy.Strategy" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 #, fuzzy -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" -msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +msgid ":py:obj:`node_config `\\" +msgstr "server.strategy.Strategy" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 #, fuzzy -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +msgid ":py:obj:`state `\\" +msgstr "server.strategy.Strategy" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 #, fuzzy -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +msgid ":py:obj:`run_config `\\" +msgstr "server.strategy.Strategy" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -#, fuzzy -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.server.Server.rst:2 +msgid "Server" +msgstr "服务器" -#: ../../source/ref-api/flwr.server.strategy.Krum.rst:2 +#: flwr.server.server.Server.client_manager:1::1 of #, fuzzy -msgid "Krum" -msgstr "Krum" +msgid ":py:obj:`client_manager `\\ \\(\\)" +msgstr ":py:obj:`client_manager `\\ \\(\\)" -#: flwr.server.strategy.krum.Krum:3 of +#: flwr.server.server.Server.client_manager:1 +#: flwr.server.server.Server.client_manager:1::1 of #, fuzzy -msgid "Implementation based on https://arxiv.org/abs/1703.02757" -msgstr "实施基于 https://arxiv.org/abs/2304.07537。" - -#: flwr.server.strategy.krum.Krum:17 of -msgid "" -"Number of clients to keep before averaging (MultiKrum). Defaults to 0, in" -" that case classical Krum is applied." -msgstr "求平均值前保留的客户端数量(MultiKrum)。默认值为 0,在这种情况下会应用经典 Krum。" +msgid "Return ClientManager." +msgstr "返回客户端(本身)。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.server.Server.client_manager:1::1 of #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +":py:obj:`disconnect_all_clients " +"`\\ \\(timeout\\)" msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +":py:obj:`disconnect_all_clients " +"`\\ \\(timeout\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.disconnect_all_clients:1 of +#, fuzzy +msgid "Send shutdown signal to all clients." +msgstr "向所有客户端发送关闭信号。" + +#: flwr.server.server.Server.client_manager:1::1 of #, fuzzy msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +":py:obj:`evaluate_round `\\ " +"\\(server\\_round\\, timeout\\)" msgstr "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +":py:obj:`evaluate_round `\\ " +"\\(server\\_round\\, timeout\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.krum.Krum.aggregate_fit:1 of -msgid "Aggregate fit results using Krum." -msgstr "使用 Krum 汇总拟合结果。" +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.evaluate_round:1 of +#, fuzzy +msgid "Validate current global model on a number of clients." +msgstr "当前(全局)模型参数。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.server.Server.client_manager:1::1 of #, fuzzy -msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +msgid ":py:obj:`fit `\\ \\(num\\_rounds\\, timeout\\)" +msgstr ":py:obj:`fit `\\ \\(num\\_rounds\\, timeout\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.fit:1 of #, fuzzy -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +msgid "Run federated averaging for a number of rounds." +msgstr "联邦平均动量策略。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.server.Server.client_manager:1::1 of #, fuzzy msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`fit_round `\\ \\(server\\_round\\," +" timeout\\)" msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`fit_round `\\ \\(server\\_round\\," +" timeout\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.fit_round:1 of #, fuzzy -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +msgid "Perform a single round of federated averaging." +msgstr "本轮联邦学习。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.server.Server.client_manager:1::1 of #, fuzzy msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`set_max_workers `\\ " +"\\(max\\_workers\\)" msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`set_max_workers `\\ " +"\\(max\\_workers\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.set_max_workers:1 of #, fuzzy -msgid "" -":py:obj:`num_fit_clients `\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_fit_clients `\\ " -"\\(num\\_available\\_clients\\)" +msgid "Set the max_workers used by ThreadPoolExecutor." +msgstr "设置 ThreadPoolExecutor 使用的最大工作器数。" -#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:2 +#: flwr.server.server.Server.client_manager:1::1 of #, fuzzy -msgid "QFedAvg" -msgstr "DP-FedAvg" +msgid ":py:obj:`set_strategy `\\ \\(strategy\\)" +msgstr ":py:obj:`set_strategy `\\ \\(strategy\\)" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.set_strategy:1 of #, fuzzy -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +msgid "Replace server strategy." +msgstr "server.strategy" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.ServerApp.rst:2 #, fuzzy -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" -msgstr "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +msgid "ServerApp" +msgstr "服务器" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: flwr.server.server_app.ServerApp:5 of #, fuzzy -msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +msgid "Use the `ServerApp` with an existing `Strategy`:" +msgstr "使用现有策略" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: flwr.server.server_app.ServerApp:17 of #, fuzzy -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +msgid "Use the `ServerApp` with a custom main function:" +msgstr "使用带有自定义主函数的 `ServerApp`:" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: flwr.server.server_app.ServerApp.main:1::1 of #, fuzzy -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" -msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +msgid ":py:obj:`main `\\ \\(\\)" +msgstr "server.strategy.Strategy" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: flwr.server.server_app.ServerApp.main:1 +#: flwr.server.server_app.ServerApp.main:1::1 of +#, fuzzy +msgid "Return a decorator that registers the main fn with the server app." +msgstr "返回向服务器应用程序注册 main fn 的装饰器。" + +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:2 +#, fuzzy +msgid "ServerAppComponents" +msgstr "服务器" + +#: flwr.server.serverapp_components.ServerAppComponents:3 of #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"A server implementation, either `flwr.server.Server` or a subclass " +"thereof. If no instance is provided, one will be created internally." +msgstr "服务器实现,可以是 `flwr.server.Server` 或其子类。如果没有提供实例,`start_server` 将创建一个。" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: flwr.server.app.start_server:9 +#: flwr.server.serverapp_components.ServerAppComponents:6 of +msgid "" +"Currently supported values are `num_rounds` (int, default: 1) and " +"`round_timeout` in seconds (float, default: None)." +msgstr "目前支持的值有:`num_rounds`(int,默认值:1)和以秒为单位的`round_timeout`(float,默认值:无)。" + +#: flwr.server.serverapp_components.ServerAppComponents:9 of #, fuzzy msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"An implementation of the abstract base class " +"`flwr.server.strategy.Strategy`. If no strategy is provided, then " +"`flwr.server.strategy.FedAvg` will be used." msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"抽象基类 `flwr.server.strategy.Strategy` 的实现。如果没有提供策略,`start_server` 将使用 " +"`flwr.server.strategy.FedAvg`。" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: flwr.server.serverapp_components.ServerAppComponents:13 of #, fuzzy msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"An implementation of the class `flwr.server.ClientManager`. If no " +"implementation is provided, then `flwr.server.SimpleClientManager` will " +"be used." msgstr "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"抽象基类 `flwr.server.ClientManager` 的实现。如果没有提供实现,`start_server` 将使用 " +"`flwr.server.client_manager.SimpleClientManager`。" -#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:2 +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 #, fuzzy -msgid "Strategy" -msgstr "Krum 策略。" +msgid "" +":py:obj:`client_manager " +"`\\" +msgstr ":py:obj:`client_manager `\\ \\(\\)" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 #, fuzzy -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +msgid ":py:obj:`config `\\" +msgstr "server.strategy.Strategy" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of -msgid "Aggregate evaluation results." -msgstr "聚合评估结果。" +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 +#, fuzzy +msgid ":py:obj:`server `\\" +msgstr ":py:obj:`run_server_app `\\ \\(\\)" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 #, fuzzy -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" -msgstr "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +msgid ":py:obj:`strategy `\\" +msgstr "server.strategy.Strategy" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:1 of -msgid "Aggregate training results." -msgstr "汇总训练结果。" +#: ../../source/ref-api/flwr.server.ServerConfig.rst:2 +#, fuzzy +msgid "ServerConfig" +msgstr "服务器" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: flwr.server.server_config.ServerConfig:3 of #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"All attributes have default values which allows users to configure just " +"the ones they care about." +msgstr "所有属性都有默认值,用户只需配置自己关心的属性即可。" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 #, fuzzy -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +msgid ":py:obj:`num_rounds `\\" +msgstr ":py:obj:`num_rounds `\\" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 #, fuzzy -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" -msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +msgid ":py:obj:`round_timeout `\\" +msgstr ":py:obj:`round_timeout `\\" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.evaluate:1 of -msgid "Evaluate the current model parameters." -msgstr "评估当前的模型参数。" +#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:2 +#, fuzzy +msgid "SimpleClientManager" +msgstr "SimpleClientManager" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: flwr.server.client_manager.SimpleClientManager:1 of #, fuzzy -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +msgid "Bases: :py:class:`~flwr.server.client_manager.ClientManager`" +msgstr "Bases: :py:class:`~flwr.server.client_manager.ClientManager`" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.initialize_parameters:1 of -msgid "Initialize the (global) model parameters." -msgstr "初始化(全局)模型参数。" +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#, fuzzy +msgid ":py:obj:`all `\\ \\(\\)" +msgstr ":py:obj:`all `\\ \\(\\)" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:5 of +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#, fuzzy msgid "" -"Successful updates from the previously selected and configured clients. " -"Each pair of `(ClientProxy, FitRes` constitutes a successful update from " -"one of the previously selected clients. Not that not all previously " -"selected clients are necessarily included in this list: a client might " -"drop out and not submit a result. For each client that did not submit an " -"update, there should be an `Exception` in `failures`." +":py:obj:`num_available `\\" +" \\(\\)" msgstr "" -"从先前选定和配置的客户端进行的成功更新。每一对`(ClientProxy, " -"FitRes)`都是来自先前选定客户端的一次成功更新。但并非所有先前选定的客户机都一定包含在此列表中:客户机可能会退出,不提交结果。对于每个没有提交更新的客户端,`failures`中都应该有一个`Exception`。" - -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:13 -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:13 of -msgid "Exceptions that occurred while the server was waiting for client updates." -msgstr "服务器等待客户端更新时发生的异常。" +":py:obj:`num_available `\\" +" \\(\\)" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:16 of +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#, fuzzy msgid "" -"**aggregation_result** -- The aggregated evaluation result. Aggregation " -"typically uses some variant of a weighted average." -msgstr "**aggregation_result** -- 汇总的评估结果。聚合通常使用某种加权平均值。" +":py:obj:`register `\\ " +"\\(client\\)" +msgstr "" +":py:obj:`register `\\ " +"\\(client\\)" -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:5 of +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#, fuzzy msgid "" -"Successful updates from the previously selected and configured clients. " -"Each pair of `(ClientProxy, FitRes)` constitutes a successful update from" -" one of the previously selected clients. Not that not all previously " -"selected clients are necessarily included in this list: a client might " -"drop out and not submit a result. For each client that did not submit an " -"update, there should be an `Exception` in `failures`." +":py:obj:`sample `\\ " +"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" msgstr "" -"来自先前选定和配置的客户端的成功更新。每一对`(ClientProxy, " -"FitRes)`都构成先前选定的客户端之一的一次成功更新。但并非所有先前选定的客户机都一定包含在此列表中:客户机可能会退出,不提交结果。对于每个没有提交更新的客户端,\"失败" -" \"中都应该有一个 \"异常\"。" +":py:obj:`sample `\\ " +"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:17 of +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#, fuzzy msgid "" -"**parameters** -- If parameters are returned, then the server will treat " -"these as the new global model parameters (i.e., it will replace the " -"previous parameters with the ones returned from this method). If `None` " -"is returned (e.g., because there were only failures and no viable " -"results) then the server will no update the previous model parameters, " -"the updates received in this round are discarded, and the global model " -"parameters remain the same." +":py:obj:`unregister `\\ " +"\\(client\\)" msgstr "" -"**parameters** -- 如果返回参数,那么服务器将把这些参数作为新的全局模型参数(即用本方法返回的参数替换之前的参数)。如果返回 " -"\"无\"(例如,因为只有失败而没有可行的结果),那么服务器将不再更新之前的模型参数,本轮收到的更新将被丢弃,全局模型参数保持不变。" +":py:obj:`unregister `\\ " +"\\(client\\)" -#: flwr.server.strategy.strategy.Strategy.evaluate:3 of +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#, fuzzy msgid "" -"This function can be used to perform centralized (i.e., server-side) " -"evaluation of model parameters." -msgstr "该函数可用于对模型参数进行集中(即服务器端)评估。" +":py:obj:`wait_for `\\ " +"\\(num\\_clients\\[\\, timeout\\]\\)" +msgstr "" +":py:obj:`wait_for `\\ " +"\\(num\\_clients\\[\\, timeout\\]\\)" -#: flwr.server.strategy.strategy.Strategy.evaluate:11 of +#: flwr.server.client_manager.SimpleClientManager.wait_for:3 of +#, fuzzy msgid "" -"**evaluation_result** -- The evaluation result, usually a Tuple " -"containing loss and a dictionary containing task-specific metrics (e.g., " -"accuracy)." -msgstr "**evaluation_result** -- 评估结果,通常是一个元组,包含损失值和一个字典,字典中包含特定任务的指标(如准确率)。" +"Blocks until the requested number of clients is available or until a " +"timeout is reached. Current timeout default: 1 day." +msgstr "阻塞,直到请求的客户端数量可用或达到超时为止。当前超时默认值:1 天。" -#: flwr.server.strategy.strategy.Strategy.initialize_parameters:6 of -msgid "" -"**parameters** -- If parameters are returned, then the server will treat " -"these as the initial global model parameters." -msgstr "**parameters** -- 如果返回参数,服务器将把这些参数视为初始全局模型参数。" +#: flwr.server.client_manager.SimpleClientManager.wait_for:6 of +#, fuzzy +msgid "The number of clients to wait for." +msgstr "需要等待的客户数量。" -#: ../../source/ref-api/flwr.server.workflow.rst:2 +#: flwr.server.client_manager.SimpleClientManager.wait_for:8 of #, fuzzy -msgid "workflow" -msgstr "工作流程" +msgid "The time in seconds to wait for, defaults to 86400 (24h)." +msgstr "以秒为单位的等待时间,默认为 86400(24 小时)。" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.client_manager.SimpleClientManager.wait_for:11 of #, fuzzy -msgid "" -":py:obj:`DefaultWorkflow `\\ " -"\\(\\[fit\\_workflow\\, ...\\]\\)" -msgstr "" -":py:obj:`DefaultWorkflow `\\ " -"\\(\\[fit\\_workflow\\, ...\\]\\)" +msgid "**success**" +msgstr "**success**" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 -#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 of +#: ../../source/ref-api/flwr.server.start_server.rst:2 #, fuzzy -msgid "Default workflow in Flower." -msgstr "Flower 中的默认工作流程。" +msgid "start\\_server" +msgstr "server.start_server" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 -#, fuzzy +#: flwr.server.app.start_server:3 of +msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." +msgstr "服务器的 IPv4 或 IPv6 地址。默认为 `\"[::]:8080\"。" + +#: flwr.server.app.start_server:5 of msgid "" -":py:obj:`SecAggPlusWorkflow `\\ " -"\\(num\\_shares\\, ...\\[\\, ...\\]\\)" +"A server implementation, either `flwr.server.Server` or a subclass " +"thereof. If no instance is provided, then `start_server` will create one." +msgstr "服务器实现,可以是 `flwr.server.Server` 或其子类。如果没有提供实例,`start_server` 将创建一个。" + +#: flwr.server.app.start_server:12 of +msgid "" +"An implementation of the abstract base class " +"`flwr.server.strategy.Strategy`. If no strategy is provided, then " +"`start_server` will use `flwr.server.strategy.FedAvg`." msgstr "" -":py:obj:`SecAggPlusWorkflow `\\ " -"\\(num\\_shares\\, ...\\[\\, ...\\]\\)" +"抽象基类 `flwr.server.strategy.Strategy` 的实现。如果没有提供策略,`start_server` 将使用 " +"`flwr.server.strategy.FedAvg`。" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 -#: of -#, fuzzy -msgid "The workflow for the SecAgg+ protocol." -msgstr "SecAgg+ 协议的工作流程。" +#: flwr.server.app.start_server:16 of +msgid "" +"An implementation of the abstract base class `flwr.server.ClientManager`." +" If no implementation is provided, then `start_server` will use " +"`flwr.server.client_manager.SimpleClientManager`." +msgstr "" +"抽象基类 `flwr.server.ClientManager` 的实现。如果没有提供实现,`start_server` 将使用 " +"`flwr.server.client_manager.SimpleClientManager`。" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 -#, fuzzy +#: flwr.server.app.start_server:21 of msgid "" -":py:obj:`SecAggWorkflow `\\ " -"\\(reconstruction\\_threshold\\, \\*\\)" +"The maximum length of gRPC messages that can be exchanged with the Flower" +" clients. The default should be sufficient for most models. Users who " +"train very large models might need to increase this value. Note that the " +"Flower clients need to be started with the same value (see " +"`flwr.client.start_client`), otherwise clients will not know about the " +"increased limit and block larger messages." msgstr "" -":py:obj:`SecAggWorkflow `\\ " -"\\(reconstruction\\_threshold\\, \\*\\)" +"可与 Flower 客户端交换的 gRPC 消息的最大长度:默认值对大多数模型都足够了。训练超大模型的用户可能需要增加该值。请注意,Flower " +"客户端需要以相同的值启动(请参阅 `flwr.client.start_client`),否则客户端将不知道已增加的限制并阻止更大的消息。" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of -#, fuzzy -msgid "The workflow for the SecAgg protocol." -msgstr "SecAgg 协议的工作流程。" +#: flwr.server.app.start_server:28 of +msgid "" +"Tuple containing root certificate, server certificate, and private key to" +" start a secure SSL-enabled server. The tuple is expected to have three " +"bytes elements in the following order: * CA certificate. * " +"server certificate. * server private key." +msgstr "" +"包含根证书、服务器证书和私钥的元组,用于启动启用 SSL 的安全服务器。元组应按以下顺序包含三个字节元素: * CA 证书,* 服务器证书, * " +"服务器私钥。" -#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:2 -#, fuzzy -msgid "DefaultWorkflow" -msgstr "工作流程" +#: flwr.server.app.start_server:28 of +msgid "" +"Tuple containing root certificate, server certificate, and private key to" +" start a secure SSL-enabled server. The tuple is expected to have three " +"bytes elements in the following order:" +msgstr "包含根证书、服务器证书和私钥的元组,用于启动启用 SSL 的安全服务器。元组应按以下顺序包含三个字节元素:" -#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:2 +#: flwr.server.app.start_server:32 of +msgid "CA certificate." +msgstr "CA 证书。" + +#: flwr.server.app.start_server:33 of +msgid "server certificate." +msgstr "服务器证书。" + +#: flwr.server.app.start_server:34 of +msgid "server private key." +msgstr "服务器私人密钥。" + +#: flwr.server.app.start_server:37 of +msgid "**hist** -- Object containing training and evaluation metrics." +msgstr "**hist** -- 包含训练和评估指标的对象。" + +#: flwr.server.app.start_server:42 of +msgid "Starting an insecure server:" +msgstr "启动不安全的服务器:" + +#: flwr.server.app.start_server:46 of +msgid "Starting an SSL-enabled server:" +msgstr "启动支持 SSL 的服务器:" + +#: ../../source/ref-api/flwr.server.strategy.rst:2 #, fuzzy -msgid "SecAggPlusWorkflow" -msgstr "工作流程" +msgid "strategy" +msgstr "Krum 策略。" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:3 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -"The SecAgg+ protocol ensures the secure summation of integer vectors " -"owned by multiple parties, without accessing any individual integer " -"vector. This workflow allows the server to compute the weighted average " -"of model parameters across all clients, ensuring individual contributions" -" remain private. This is achieved by clients sending both, a weighting " -"factor and a weighted version of the locally updated parameters, both of " -"which are masked for privacy. Specifically, each client uploads \"[w, w *" -" params]\" with masks, where weighting factor 'w' is the number of " -"examples ('num_examples') and 'params' represents the model parameters " -"('parameters') from the client's `FitRes`. The server then aggregates " -"these contributions to compute the weighted average of model parameters." +":py:obj:`Bulyan `\\ \\(\\*\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\)" msgstr "" -"SecAgg+ " -"协议可确保对多方拥有的整数向量进行安全求和,而不会访问任何单个整数向量。该工作流程允许服务器计算所有客户端模型参数的加权平均值,确保个人贡献保持私密。这可以通过客户端同时发送加权因子和本地更新参数的加权版本来实现,为了保护隐私,两者都会被屏蔽。具体来说,每个客户端都会上传带掩码的\"[w," -" w * params]\",其中加权因子 \"w \"是示例数(\"num_examples\"),\"params \"代表客户端 " -"\"FitRes \"中的模型参数(\"parameters\")。然后,服务器会汇总这些贡献,计算模型参数的加权平均值。" +":py:obj:`Bulyan `\\ \\(\\*\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\)" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:14 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.bulyan.Bulyan:1 of +msgid "Bulyan strategy." +msgstr "Bulyan 策略。" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -"The protocol involves four main stages: - 'setup': Send SecAgg+ " -"configuration to clients and collect their public keys. - 'share keys': " -"Broadcast public keys among clients and collect encrypted secret" -msgstr "协议包括四个主要阶段: - 设置\": 向客户端发送 SecAgg+ 配置并收集其公钥。- 共享密钥\": 在客户端之间广播公钥,并收集加密密钥。" +":py:obj:`DPFedAvgAdaptive `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\)" +msgstr "" +":py:obj:`DPFedAvgAdaptive `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\)" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:17 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:17 -#: of -#, fuzzy -msgid "key shares." -msgstr "关键股份。" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of +msgid "Wrapper for configuring a Strategy for DP with Adaptive Clipping." +msgstr "用于配置具有自适应剪切功能的 DP 策略的包装器。" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:18 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:18 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -"'collect masked vectors': Forward encrypted secret key shares to target " -"clients and collect masked model parameters." -msgstr "收集屏蔽向量\": 向目标客户端转发加密密钥共享,并收集屏蔽模型参数。" +":py:obj:`DPFedAvgFixed `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" +msgstr "" +":py:obj:`DPFedAvgFixed `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:20 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:20 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 of +msgid "Wrapper for configuring a Strategy for DP with Fixed Clipping." +msgstr "封装器,用于为具有固定剪切功能的 DP 配置策略。" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -"'unmask': Collect secret key shares to decrypt and aggregate the model " -"parameters." -msgstr "解密\": 收集密钥共享,解密并汇总模型参数。" +":py:obj:`DifferentialPrivacyClientSideAdaptiveClipping " +"`\\ " +"\\(...\\)" +msgstr "" +":py:obj:`DifferentialPrivacyClientSideAdaptiveClipping " +"`\\ " +"\\(...\\)" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:22 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:22 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 #: of #, fuzzy -msgid "" -"Only the aggregated model parameters are exposed and passed to " -"`Strategy.aggregate_fit`, ensuring individual data privacy." -msgstr "只有聚合模型参数才会公开并传递给 `Strategy.aggregate_fit`,从而确保个人数据隐私。" +msgid "Strategy wrapper for central DP with client-side adaptive clipping." +msgstr "用于配置具有自适应剪切功能的 DP 策略的包装器。" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:25 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -"The number of shares into which each client's private key is split under " -"the SecAgg+ protocol. If specified as a float, it represents the " -"proportion of all selected clients, and the number of shares will be set " -"dynamically in the run time. A private key can be reconstructed from " -"these shares, allowing for the secure aggregation of model updates. Each " -"client sends one share to each of its neighbors while retaining one." +":py:obj:`DifferentialPrivacyClientSideFixedClipping " +"`\\ " +"\\(...\\)" msgstr "" -"在 SecAgg+ " -"协议下,每个客户的私钥被分成的份数。如果指定为浮点数,则代表所有选定客户的比例,份额数将在运行时动态设置。私钥可以从这些份额中重建,从而实现模型更新的安全聚合。每个客户端向其每个邻居发送一份,同时保留一份。" +":py:obj:`DifferentialPrivacyClientSideFixedClipping " +"`\\ " +"\\(...\\)" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:25 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:32 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 #: of #, fuzzy -msgid "" -"The minimum number of shares required to reconstruct a client's private " -"key, or, if specified as a float, it represents the proportion of the " -"total number of shares needed for reconstruction. This threshold ensures " -"privacy by allowing for the recovery of contributions from dropped " -"clients during aggregation, without compromising individual client data." -msgstr "重建客户私钥所需的最小份数,如果指定为浮动,则表示重建所需的份数占总份数的比例。这个阈值允许在聚合过程中恢复掉线客户的贡献,从而确保隐私,而不会泄露单个客户的数据。" +msgid "Strategy wrapper for central DP with client-side fixed clipping." +msgstr "封装器,用于为具有固定剪切功能的 DP 配置策略。" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:31 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:38 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -"The maximum value of the weight that can be assigned to any single " -"client's update during the weighted average calculation on the server " -"side, e.g., in the FedAvg algorithm." -msgstr "在服务器端进行加权平均计算(如 FedAvg 算法)时,可分配给任何单个客户端更新的权重的最大值。" +":py:obj:`DifferentialPrivacyServerSideAdaptiveClipping " +"`\\ " +"\\(...\\)" +msgstr "" +":py:obj:`DifferentialPrivacyServerSideAdaptiveClipping " +"`\\ " +"\\(...\\)" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:35 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:42 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 #: of #, fuzzy -msgid "" -"The range within which model parameters are clipped before quantization. " -"This parameter ensures each model parameter is bounded within " -"[-clipping_range, clipping_range], facilitating quantization." -msgstr "量化前模型参数的裁剪范围。该参数可确保每个模型参数都在 [-clipping_range, clipping_range] 范围内,便于量化。" +msgid "Strategy wrapper for central DP with server-side adaptive clipping." +msgstr "用于配置具有自适应剪切功能的 DP 策略的包装器。" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:39 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:46 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -"The size of the range into which floating-point model parameters are " -"quantized, mapping each parameter to an integer in [0, " -"quantization_range-1]. This facilitates cryptographic operations on the " -"model updates." -msgstr "浮点模型参数量化范围的大小,将每个参数映射为 [0, quantization_range-1] 中的整数。这有助于对模型更新进行加密操作。" - -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:43 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:50 -#: of -#, fuzzy -msgid "" -"The range of values from which random mask entries are uniformly sampled " -"([0, modulus_range-1]). `modulus_range` must be less than 4294967296. " -"Please use 2**n values for `modulus_range` to prevent overflow issues." +":py:obj:`DifferentialPrivacyServerSideFixedClipping " +"`\\ " +"\\(...\\)" msgstr "" -"对随机掩码条目进行均匀采样的数值范围([0, modulus_range-1])。modulus_range \"必须小于 " -"4294967296。为防止出现溢出问题,请为 `modulus_range` 使用 2**n 的值。" +":py:obj:`DifferentialPrivacyServerSideFixedClipping " +"`\\ " +"\\(...\\)" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:47 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:54 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 #: of #, fuzzy -msgid "" -"The timeout duration in seconds. If specified, the workflow will wait for" -" replies for this duration each time. If `None`, there is no time limit " -"and the workflow will wait until replies for all messages are received." -msgstr "超时时间(秒)。如果指定,工作流将在每次等待回复的时间内等待回复。如果指定为 \"无\",则没有时间限制,工作流程将一直等待到收到所有信息的回复。" +msgid "Strategy wrapper for central DP with server-side fixed clipping." +msgstr "封装器,用于为具有固定剪切功能的 DP 配置策略。" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:61 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -"Generally, higher `num_shares` means more robust to dropouts while " -"increasing the computational costs; higher `reconstruction_threshold` " -"means better privacy guarantees but less tolerance to dropouts." -msgstr "一般来说,\"份额数 \"越高,意味着对丢弃的鲁棒性越强,同时计算成本也会增加;\"重构阈值 \"越高,意味着隐私保证越好,但对丢弃的容忍度越低。" +":py:obj:`FaultTolerantFedAvg " +"`\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" +msgstr "" +":py:obj:`FaultTolerantFedAvg " +"`\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:58 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:64 -#: of -#, fuzzy -msgid "Too large `max_weight` may compromise the precision of the quantization." -msgstr "过大的 `max_weight` 可能会影响量化的精度。" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 of +msgid "Configurable fault-tolerant FedAvg strategy implementation." +msgstr "可配置的容错 FedAvg 策略实施。" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:59 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:65 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy -msgid "`modulus_range` must be 2**n and larger than `quantization_range`." -msgstr "modulus_range \"必须为 2**n,且大于 \"quantization_range\"。" +msgid "" +":py:obj:`FedAdagrad `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" +msgstr "" +":py:obj:`FedAdagrad `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:66 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedadagrad.FedAdagrad:1 of +msgid "FedAdagrad strategy - Adaptive Federated Optimization using Adagrad." +msgstr "FedAdagrad 策略 - 使用 Adagrad 进行自适应联合优化。" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -"When `num_shares` is a float, it is interpreted as the proportion of all " -"selected clients, and hence the number of shares will be determined in " -"the runtime. This allows for dynamic adjustment based on the total number" -" of participating clients." -msgstr "当 `num_shares` 为浮点数时,它被解释为所有选定客户端的比例,因此份额数将在运行时确定。这样就可以根据参与客户端的总数进行动态调整。" +":py:obj:`FedAdam `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" +msgstr "" +":py:obj:`FedAdam `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:69 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedadam.FedAdam:1 of +msgid "FedAdam - Adaptive Federated Optimization using Adam." +msgstr "FedAdam - 使用 Adam 进行自适应联合优化。" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -"Similarly, when `reconstruction_threshold` is a float, it is interpreted " -"as the proportion of the number of shares needed for the reconstruction " -"of a private key. This feature enables flexibility in setting the " -"security threshold relative to the number of distributed shares." +":py:obj:`FedAvg `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -"同样,当 `reconstruction_threshold` " -"为浮点数时,它被解释为重建私钥所需的份额数比例。这一功能使我们可以根据分发的份额数灵活设置安全阈值。" +":py:obj:`FedAvg `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:73 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedavg.FedAvg:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of +msgid "Federated Averaging strategy." +msgstr "联邦平均策略。" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -"`num_shares`, `reconstruction_threshold`, and the quantization parameters" -" (`clipping_range`, `quantization_range`, `modulus_range`) play critical " -"roles in balancing privacy, robustness, and efficiency within the SecAgg+" -" protocol." +":py:obj:`FedAvgAndroid `\\ " +"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" msgstr "" -"份额数\"、\"重建阈值 \"和量化参数(\"裁剪范围\"、\"量化范围\"、\"模数范围\")在平衡 SecAgg+ " -"协议的隐私性、稳健性和效率方面发挥着关键作用。" +":py:obj:`FedAvgAndroid `\\ " +"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`collect_masked_vectors_stage " -"`\\" -" \\(driver\\, ...\\)" +":py:obj:`FedAvgM `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -":py:obj:`collect_masked_vectors_stage " -"`\\" -" \\(driver\\, ...\\)" +":py:obj:`FedAvgM `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of -#, fuzzy -msgid "Execute the 'collect masked vectors' stage." -msgstr "执行 \"收集屏蔽向量 \"阶段。" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedavgm.FedAvgM:1 of +msgid "Federated Averaging with Momentum strategy." +msgstr "联邦平均动量策略。" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`setup_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +":py:obj:`FedMedian `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -":py:obj:`setup_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +":py:obj:`FedMedian `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.setup_stage:1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedmedian.FedMedian:1 of #, fuzzy -msgid "Execute the 'setup' stage." -msgstr "执行 \"设置 \"阶段。" +msgid "Configurable FedMedian strategy implementation." +msgstr "可配置的 FedAvg 策略实施。" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`share_keys_stage " -"`\\ " -"\\(driver\\, context\\, state\\)" +":py:obj:`FedOpt `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -":py:obj:`share_keys_stage " -"`\\ " -"\\(driver\\, context\\, state\\)" +":py:obj:`FedOpt `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.share_keys_stage:1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedopt.FedOpt:1 of #, fuzzy -msgid "Execute the 'share keys' stage." -msgstr "执行 \"共享密钥 \"阶段。" +msgid "Federated Optim strategy." +msgstr "联邦优化策略。" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`unmask_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +":py:obj:`FedProx `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -":py:obj:`unmask_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +":py:obj:`FedProx `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.unmask_stage:1 -#: of -msgid "Execute the 'unmask' stage." -msgstr "" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedprox.FedProx:1 of +msgid "Federated Optimization strategy." +msgstr "联邦优化策略。" -#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:2 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy -msgid "SecAggWorkflow" -msgstr "工作流程" - -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of msgid "" -"Bases: " -":py:class:`~flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow`" +":py:obj:`FedTrimmedAvg `\\ " +"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" msgstr "" +":py:obj:`FedTrimmedAvg `\\ " +"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:3 of -msgid "" -"The SecAgg protocol ensures the secure summation of integer vectors owned" -" by multiple parties, without accessing any individual integer vector. " -"This workflow allows the server to compute the weighted average of model " -"parameters across all clients, ensuring individual contributions remain " -"private. This is achieved by clients sending both, a weighting factor and" -" a weighted version of the locally updated parameters, both of which are " -"masked for privacy. Specifically, each client uploads \"[w, w * params]\"" -" with masks, where weighting factor 'w' is the number of examples " -"('num_examples') and 'params' represents the model parameters " -"('parameters') from the client's `FitRes`. The server then aggregates " -"these contributions to compute the weighted average of model parameters." -msgstr "" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 of +msgid "Federated Averaging with Trimmed Mean [Dong Yin, et al., 2021]." +msgstr "带修剪均值的联邦平均法[Dong Yin 等,2021]。" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:14 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#, fuzzy msgid "" -"The protocol involves four main stages: - 'setup': Send SecAgg " -"configuration to clients and collect their public keys. - 'share keys': " -"Broadcast public keys among clients and collect encrypted secret" +":py:obj:`FedXgbBagging `\\ " +"\\(\\[evaluate\\_function\\]\\)" msgstr "" +":py:obj:`FedXgbBagging `\\ " +"\\(\\[evaluate\\_function\\]\\)" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:54 of -msgid "" -"Each client's private key is split into N shares under the SecAgg " -"protocol, where N is the number of selected clients." -msgstr "" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 of +#, fuzzy +msgid "Configurable FedXgbBagging strategy implementation." +msgstr "可配置的 FedXgbNAvg 策略实施。" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:56 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#, fuzzy msgid "" -"Generally, higher `reconstruction_threshold` means better privacy " -"guarantees but less tolerance to dropouts." +":py:obj:`FedXgbCyclic `\\ " +"\\(\\*\\*kwargs\\)" msgstr "" +":py:obj:`FedXgbCyclic `\\ " +"\\(\\*\\*kwargs\\)" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:60 of -msgid "" -"When `reconstruction_threshold` is a float, it is interpreted as the " -"proportion of the number of all selected clients needed for the " -"reconstruction of a private key. This feature enables flexibility in " -"setting the security threshold relative to the number of selected " -"clients." -msgstr "" - -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:64 of -msgid "" -"`reconstruction_threshold`, and the quantization parameters " -"(`clipping_range`, `quantization_range`, `modulus_range`) play critical " -"roles in balancing privacy, robustness, and efficiency within the SecAgg " -"protocol." -msgstr "" - -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of -msgid "" -":py:obj:`collect_masked_vectors_stage " -"`\\ " -"\\(driver\\, ...\\)" -msgstr "" - -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of -msgid "" -":py:obj:`setup_stage `\\" -" \\(driver\\, context\\, state\\)" -msgstr "" - -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of -msgid "" -":py:obj:`share_keys_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" -msgstr "" - -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of -msgid "" -":py:obj:`unmask_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" -msgstr "" - -#: ../../source/ref-api/flwr.simulation.rst:2 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 of #, fuzzy -msgid "simulation" -msgstr "运行模拟" - -#: ../../source/ref-api/flwr.simulation.rst:18::1 -msgid "" -":py:obj:`start_simulation `\\ \\(\\*\\," -" client\\_fn\\[\\, ...\\]\\)" -msgstr "" - -#: ../../source/ref-api/flwr.simulation.rst:18::1 -#: flwr.simulation.app.start_simulation:1 of -msgid "Start a Ray-based Flower simulation server." -msgstr "启动基于 Ray 的Flower模拟服务器。" +msgid "Configurable FedXgbCyclic strategy implementation." +msgstr "可配置的 FedAvg 策略实施。" -#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#, fuzzy msgid "" -":py:obj:`run_simulation `\\ " -"\\(server\\_app\\, client\\_app\\, ...\\)" +":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " +"\\*\\*kwargs\\)" msgstr "" +":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " +"\\*\\*kwargs\\)" -#: ../../source/ref-api/flwr.simulation.rst:18::1 -#: flwr.simulation.run_simulation.run_simulation:1 of -msgid "Run a Flower App using the Simulation Engine." -msgstr "" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 of +msgid "Configurable FedXgbNnAvg strategy implementation." +msgstr "可配置的 FedXgbNAvg 策略实施。" -#: ../../source/ref-api/flwr.simulation.run_simulation.rst:2 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy -msgid "run\\_simulation" -msgstr "运行模拟" - -#: flwr.simulation.run_simulation.run_simulation:3 of msgid "" -"The `ServerApp` to be executed. It will send messages to different " -"`ClientApp` instances running on different (virtual) SuperNodes." +":py:obj:`FedYogi `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" +":py:obj:`FedYogi `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" -#: flwr.simulation.run_simulation.run_simulation:6 of -msgid "" -"The `ClientApp` to be executed by each of the SuperNodes. It will receive" -" messages sent by the `ServerApp`." -msgstr "" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedyogi.FedYogi:1 of +msgid "FedYogi [Reddi et al., 2020] strategy." +msgstr "FedYogi [Reddi 等人,2020] 策略。" -#: flwr.simulation.run_simulation.run_simulation:9 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#, fuzzy msgid "" -"Number of nodes that run a ClientApp. They can be sampled by a Driver in " -"the ServerApp and receive a Message describing what the ClientApp should " -"perform." -msgstr "" - -#: flwr.simulation.run_simulation.run_simulation:13 of -msgid "A simulation backend that runs `ClientApp`s." +":py:obj:`Krum `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" +":py:obj:`Krum `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" -#: flwr.simulation.run_simulation.run_simulation:15 of -msgid "" -"'A dictionary, e.g {\"\": , \"\": } to " -"configure a backend. Values supported in are those included by " -"`flwr.common.typing.ConfigsRecordValues`." -msgstr "" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.krum.Krum:1 of +#, fuzzy +msgid "Krum [Blanchard et al., 2017] strategy." +msgstr "FedYogi [Reddi 等人,2020] 策略。" -#: flwr.simulation.run_simulation.run_simulation:19 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#, fuzzy msgid "" -"A boolean to indicate whether to enable GPU growth on the main thread. " -"This is desirable if you make use of a TensorFlow model on your " -"`ServerApp` while having your `ClientApp` running on the same GPU. " -"Without enabling this, you might encounter an out-of-memory error because" -" TensorFlow, by default, allocates all GPU memory. Read more about how " -"`tf.config.experimental.set_memory_growth()` works in the TensorFlow " -"documentation: https://www.tensorflow.org/api/stable." +":py:obj:`QFedAvg `\\ \\(\\*\\[\\, " +"q\\_param\\, qffl\\_learning\\_rate\\, ...\\]\\)" msgstr "" +":py:obj:`QFedAvg `\\ \\(\\*\\[\\, " +"q\\_param\\, qffl\\_learning\\_rate\\, ...\\]\\)" -#: flwr.simulation.run_simulation.run_simulation:26 of -msgid "" -"When diabled, only INFO, WARNING and ERROR log messages will be shown. If" -" enabled, DEBUG-level logs will be displayed." -msgstr "" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.qfedavg.QFedAvg:1 of +msgid "Configurable QFedAvg strategy implementation." +msgstr "可配置的 QFedAvg 策略实施。" -#: ../../source/ref-api/flwr.simulation.start_simulation.rst:2 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy -msgid "start\\_simulation" -msgstr "start_simulation" - -#: flwr.simulation.app.start_simulation:3 of -msgid "" -"A function creating client instances. The function must take a single " -"`str` argument called `cid`. It should return a single client instance of" -" type Client. Note that the created client instances are ephemeral and " -"will often be destroyed after a single method invocation. Since client " -"instances are not long-lived, they should not attempt to carry state over" -" method invocations. Any state required by the instance (model, dataset, " -"hyperparameters, ...) should be (re-)created in either the call to " -"`client_fn` or the call to any of the client methods (e.g., load " -"evaluation data in the `evaluate` method itself)." -msgstr "" -"创建客户端实例的函数。该函数必须接受一个名为 `cid` 的 `str` 参数。它应返回一个 Client " -"类型的客户端实例。请注意,创建的客户端实例是短暂的,通常在调用一个方法后就会被销毁。由于客户机实例不是长期存在的,它们不应试图在方法调用时携带状态数据。实例所需的任何状态数据(模型、数据集、超参数......)都应在调用" -" `client_fn` 或任何客户端方法(例如,在 `evaluate` 方法中加载评估数据)时(重新)创建。" +msgid ":py:obj:`Strategy `\\ \\(\\)" +msgstr "server.strategy.Strategy" -#: flwr.simulation.app.start_simulation:13 of -msgid "" -"The total number of clients in this simulation. This must be set if " -"`clients_ids` is not set and vice-versa." -msgstr "本次模拟的客户总数。如果未设置 `clients_ids`,则必须设置该参数,反之亦然。" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.strategy.Strategy:1 of +msgid "Abstract base class for server strategy implementations." +msgstr "服务器策略实现的抽象基类。" -#: flwr.simulation.app.start_simulation:16 of -msgid "" -"List `client_id`s for each client. This is only required if `num_clients`" -" is not set. Setting both `num_clients` and `clients_ids` with " -"`len(clients_ids)` not equal to `num_clients` generates an error." -msgstr "" -"列出每个客户的 `client_id`。只有在未设置 `num_clients` " -"时才需要这样做。同时设置`num_clients`和`clients_ids`,且`len(clients_ids)`不等于`num_clients`,会产生错误。" +#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:2 +#, fuzzy +msgid "Bulyan" +msgstr "Bulyan" -#: flwr.simulation.app.start_simulation:20 of +#: flwr.server.strategy.bulyan.Bulyan:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 +#: flwr.server.strategy.fedavgm.FedAvgM:1 +#: flwr.server.strategy.fedmedian.FedMedian:1 +#: flwr.server.strategy.fedopt.FedOpt:1 flwr.server.strategy.fedprox.FedProx:1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 +#: flwr.server.strategy.krum.Krum:1 flwr.server.strategy.qfedavg.QFedAvg:1 of #, fuzzy -msgid "" -"CPU and GPU resources for a single client. Supported keys are `num_cpus` " -"and `num_gpus`. To understand the GPU utilization caused by `num_gpus`, " -"as well as using custom resources, please consult the Ray documentation." -msgstr "" -"\"num_gpus\": 0.0` 单个客户端的 CPU 和 GPU 资源。支持的键值为 `num_cpus` 和 `num_gpus`。要了解" -" `num_gpus` 所导致的 GPU 利用率,以及使用自定义资源的情况,请查阅 Ray 文档。" +msgid "Bases: :py:class:`~flwr.server.strategy.fedavg.FedAvg`" +msgstr "server.strategy.DPFedAvgFixed" -#: flwr.simulation.app.start_simulation:25 of -msgid "" -"An implementation of the abstract base class `flwr.server.Server`. If no " -"instance is provided, then `start_server` will create one." -msgstr "抽象基类 `flwr.server.Server`的实现。如果没有提供实例,`start_server` 将创建一个。" +#: flwr.server.strategy.bulyan.Bulyan:3 of +msgid "Implementation based on https://arxiv.org/abs/1802.07927." +msgstr "实施基于 https://arxiv.org/abs/1802.07927。" -#: flwr.simulation.app.start_simulation:31 of -msgid "" -"An implementation of the abstract base class `flwr.server.Strategy`. If " -"no strategy is provided, then `start_server` will use " -"`flwr.server.strategy.FedAvg`." -msgstr "" -"抽象基类 `flwr.server.strategy` 的实现。如果没有提供策略,`start_server` 将使用 " -"`flwr.server.strategy.FedAvg`。" +#: flwr.server.strategy.bulyan.Bulyan:5 +#: flwr.server.strategy.fedadagrad.FedAdagrad:5 +#: flwr.server.strategy.fedadam.FedAdam:5 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:5 +#: flwr.server.strategy.fedavgm.FedAvgM:5 flwr.server.strategy.fedopt.FedOpt:5 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:5 +#: flwr.server.strategy.fedyogi.FedYogi:5 flwr.server.strategy.krum.Krum:5 of +msgid "Fraction of clients used during training. Defaults to 1.0." +msgstr "训练期间使用客户的比例。默认为 1.0。" -#: flwr.simulation.app.start_simulation:35 of -msgid "" -"An implementation of the abstract base class `flwr.server.ClientManager`." -" If no implementation is provided, then `start_simulation` will use " -"`flwr.server.client_manager.SimpleClientManager`." -msgstr "" -"抽象基类 `flwr.server.ClientManager` 的实现。如果没有提供实现,`start_simulation` 将使用 " -"`flwr.server.client_manager.SimpleClientManager`。" +#: flwr.server.strategy.bulyan.Bulyan:7 +#: flwr.server.strategy.fedadagrad.FedAdagrad:7 +#: flwr.server.strategy.fedadam.FedAdam:7 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:7 +#: flwr.server.strategy.fedavgm.FedAvgM:7 flwr.server.strategy.fedopt.FedOpt:7 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:7 +#: flwr.server.strategy.fedyogi.FedYogi:7 flwr.server.strategy.krum.Krum:7 of +msgid "Fraction of clients used during validation. Defaults to 1.0." +msgstr "验证过程中使用的客户端比例。默认为 1.0。" -#: flwr.simulation.app.start_simulation:39 of -msgid "" -"Optional dictionary containing arguments for the call to `ray.init`. If " -"ray_init_args is None (the default), Ray will be initialized with the " -"following default args: { \"ignore_reinit_error\": True, " -"\"include_dashboard\": False } An empty dictionary can be used " -"(ray_init_args={}) to prevent any arguments from being passed to " -"ray.init." -msgstr "" -"可选字典,包含调用 `ray.init` 时的参数。如果 ray_init_args 为 None(默认值),则将使用以下默认参数初始化 Ray:" -" { \"ignore_reinit_error\": True, \"include_dashboard\": False } " -"可以使用空字典(ray_init_args={})来防止向 ray.init 传递任何参数。" +#: flwr.server.strategy.bulyan.Bulyan:9 +#: flwr.server.strategy.fedadagrad.FedAdagrad:9 +#: flwr.server.strategy.fedadam.FedAdam:9 flwr.server.strategy.fedavg.FedAvg:13 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:9 +#: flwr.server.strategy.fedavgm.FedAvgM:9 flwr.server.strategy.fedopt.FedOpt:9 +#: flwr.server.strategy.fedprox.FedProx:45 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:9 +#: flwr.server.strategy.fedyogi.FedYogi:9 flwr.server.strategy.krum.Krum:9 of +msgid "Minimum number of clients used during training. Defaults to 2." +msgstr "训练期间使用的最少客户数。默认为 2。" -#: flwr.simulation.app.start_simulation:39 of -msgid "" -"Optional dictionary containing arguments for the call to `ray.init`. If " -"ray_init_args is None (the default), Ray will be initialized with the " -"following default args:" -msgstr "可选字典,包含调用 `ray.init` 时的参数。如果 ray_init_args 为 None(默认值),则将使用以下默认参数初始化 Ray:" +#: flwr.server.strategy.bulyan.Bulyan:11 +#: flwr.server.strategy.fedadagrad.FedAdagrad:11 +#: flwr.server.strategy.fedadam.FedAdam:11 +#: flwr.server.strategy.fedavg.FedAvg:15 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:11 +#: flwr.server.strategy.fedavgm.FedAvgM:11 +#: flwr.server.strategy.fedopt.FedOpt:11 +#: flwr.server.strategy.fedprox.FedProx:47 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:11 +#: flwr.server.strategy.fedyogi.FedYogi:11 flwr.server.strategy.krum.Krum:11 of +msgid "Minimum number of clients used during validation. Defaults to 2." +msgstr "验证过程中使用的最少客户端数量。默认为 2。" -#: flwr.simulation.app.start_simulation:43 of -msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" -msgstr "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" +#: flwr.server.strategy.bulyan.Bulyan:13 +#: flwr.server.strategy.fedadagrad.FedAdagrad:13 +#: flwr.server.strategy.fedadam.FedAdam:13 +#: flwr.server.strategy.fedavg.FedAvg:17 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:13 +#: flwr.server.strategy.fedavgm.FedAvgM:13 +#: flwr.server.strategy.fedopt.FedOpt:13 +#: flwr.server.strategy.fedprox.FedProx:49 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:13 +#: flwr.server.strategy.fedyogi.FedYogi:13 flwr.server.strategy.krum.Krum:13 of +msgid "Minimum number of total clients in the system. Defaults to 2." +msgstr "系统中客户总数的最小值。默认为 2。" -#: flwr.simulation.app.start_simulation:45 of -msgid "" -"An empty dictionary can be used (ray_init_args={}) to prevent any " -"arguments from being passed to ray.init." -msgstr "可以使用空字典 (ray_init_args={}) 来防止向 ray.init 传递任何参数。" +#: flwr.server.strategy.bulyan.Bulyan:15 flwr.server.strategy.krum.Krum:15 of +msgid "Number of malicious clients in the system. Defaults to 0." +msgstr "系统中恶意客户端的数量。默认为 0。" -#: flwr.simulation.app.start_simulation:48 of -msgid "" -"Set to True to prevent `ray.shutdown()` in case " -"`ray.is_initialized()=True`." -msgstr "设为 True 可在 `ray.is_initialized()=True` 情况下阻止 `ray.shutdown()` 。" +#: flwr.server.strategy.bulyan.Bulyan:17 +#: flwr.server.strategy.fedadagrad.FedAdagrad:15 +#: flwr.server.strategy.fedadam.FedAdam:15 +#: flwr.server.strategy.fedavg.FedAvg:19 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:15 +#: flwr.server.strategy.fedavgm.FedAvgM:15 +#: flwr.server.strategy.fedopt.FedOpt:15 +#: flwr.server.strategy.fedprox.FedProx:51 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:15 +#: flwr.server.strategy.fedyogi.FedYogi:17 +#: flwr.server.strategy.fedyogi.FedYogi:18 +#: flwr.server.strategy.fedyogi.FedYogi:19 flwr.server.strategy.krum.Krum:20 of +msgid "Optional function used for validation. Defaults to None." +msgstr "用于验证的可选函数。默认为 \"无\"。" -#: flwr.simulation.app.start_simulation:50 of -#, fuzzy -msgid "" -"Optionally specify the type of actor to use. The actor object, which " -"persists throughout the simulation, will be the process in charge of " -"executing a ClientApp wrapping input argument `client_fn`." -msgstr "可选择指定要使用的actor类型。actor对象将在整个模拟过程中持续存在,它将是负责运行客户端作业(即其 `fit()`方法)的进程。" - -#: flwr.simulation.app.start_simulation:54 of -msgid "" -"If you want to create your own Actor classes, you might need to pass some" -" input argument. You can use this dictionary for such purpose." -msgstr "如果您想创建自己的 Actor 类,可能需要传递一些输入参数。为此,您可以使用本字典。" - -#: flwr.simulation.app.start_simulation:57 of -msgid "" -"(default: \"DEFAULT\") Optional string (\"DEFAULT\" or \"SPREAD\") for " -"the VCE to choose in which node the actor is placed. If you are an " -"advanced user needed more control you can use lower-level scheduling " -"strategies to pin actors to specific compute nodes (e.g. via " -"NodeAffinitySchedulingStrategy). Please note this is an advanced feature." -" For all details, please refer to the Ray documentation: " -"https://docs.ray.io/en/latest/ray-core/scheduling/index.html" -msgstr "" -"(默认:\"DEFAULT\")可选字符串(\"DEFAULT \"或 \"SPREAD\"),供 VCE " -"选择将行为体放置在哪个节点上。如果你是需要更多控制权的高级用户,可以使用低级调度策略将actor固定到特定计算节点(例如,通过 " -"NodeAffinitySchedulingStrategy)。请注意,这是一项高级功能。有关详细信息,请参阅 Ray " -"文档:https://docs.ray.io/en/latest/ray-core/scheduling/index.html" +#: flwr.server.strategy.bulyan.Bulyan:19 +#: flwr.server.strategy.fedadagrad.FedAdagrad:17 +#: flwr.server.strategy.fedadam.FedAdam:17 +#: flwr.server.strategy.fedavg.FedAvg:21 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:17 +#: flwr.server.strategy.fedavgm.FedAvgM:17 +#: flwr.server.strategy.fedopt.FedOpt:17 +#: flwr.server.strategy.fedprox.FedProx:53 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:17 +#: flwr.server.strategy.fedyogi.FedYogi:20 flwr.server.strategy.krum.Krum:22 of +msgid "Function used to configure training. Defaults to None." +msgstr "用于配置训练的功能。默认为 \"无\"。" -#: flwr.simulation.app.start_simulation:66 of -msgid "**hist** -- Object containing metrics from training." -msgstr "**hist** -- 包含训练指标的对象。" +#: flwr.server.strategy.bulyan.Bulyan:21 +#: flwr.server.strategy.fedadagrad.FedAdagrad:19 +#: flwr.server.strategy.fedadam.FedAdam:19 +#: flwr.server.strategy.fedavg.FedAvg:23 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:19 +#: flwr.server.strategy.fedavgm.FedAvgM:19 +#: flwr.server.strategy.fedopt.FedOpt:19 +#: flwr.server.strategy.fedprox.FedProx:55 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:19 +#: flwr.server.strategy.fedyogi.FedYogi:22 flwr.server.strategy.krum.Krum:24 of +msgid "Function used to configure validation. Defaults to None." +msgstr "用于配置验证的函数。默认为 \"无\"。" -#: ../../source/ref-changelog.md:1 -msgid "Changelog" -msgstr "更新日志" +#: flwr.server.strategy.bulyan.Bulyan:23 +#: flwr.server.strategy.fedadagrad.FedAdagrad:25 +#: flwr.server.strategy.fedadam.FedAdam:21 +#: flwr.server.strategy.fedavg.FedAvg:25 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:21 +#: flwr.server.strategy.fedavgm.FedAvgM:21 +#: flwr.server.strategy.fedopt.FedOpt:21 +#: flwr.server.strategy.fedprox.FedProx:57 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:21 +#: flwr.server.strategy.fedyogi.FedYogi:24 flwr.server.strategy.krum.Krum:26 of +msgid "Whether or not accept rounds containing failures. Defaults to True." +msgstr "是否接受包含失败的轮。默认为 True。" -#: ../../source/ref-changelog.md:3 -msgid "Unreleased" -msgstr "尚未发布" - -#: ../../source/ref-changelog.md:5 ../../source/ref-changelog.md:19 -#: ../../source/ref-changelog.md:83 ../../source/ref-changelog.md:176 -#: ../../source/ref-changelog.md:276 ../../source/ref-changelog.md:360 -#: ../../source/ref-changelog.md:424 ../../source/ref-changelog.md:482 -#: ../../source/ref-changelog.md:551 ../../source/ref-changelog.md:680 -#: ../../source/ref-changelog.md:722 ../../source/ref-changelog.md:789 -#: ../../source/ref-changelog.md:855 ../../source/ref-changelog.md:900 -#: ../../source/ref-changelog.md:939 ../../source/ref-changelog.md:972 -#: ../../source/ref-changelog.md:1022 -msgid "What's new?" -msgstr "有什么新内容?" +#: flwr.server.strategy.bulyan.Bulyan:25 +#: flwr.server.strategy.fedadagrad.FedAdagrad:27 +#: flwr.server.strategy.fedadam.FedAdam:23 +#: flwr.server.strategy.fedavg.FedAvg:27 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:24 +#: flwr.server.strategy.fedavgm.FedAvgM:23 +#: flwr.server.strategy.fedopt.FedOpt:23 +#: flwr.server.strategy.fedprox.FedProx:59 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:23 +#: flwr.server.strategy.fedyogi.FedYogi:26 flwr.server.strategy.krum.Krum:28 of +msgid "Initial global model parameters." +msgstr "初始全局模型参数。" -#: ../../source/ref-changelog.md:7 ../../source/ref-changelog.md:71 -#: ../../source/ref-changelog.md:146 ../../source/ref-changelog.md:258 -#: ../../source/ref-changelog.md:348 ../../source/ref-changelog.md:412 -#: ../../source/ref-changelog.md:470 ../../source/ref-changelog.md:539 -#: ../../source/ref-changelog.md:601 ../../source/ref-changelog.md:620 -#: ../../source/ref-changelog.md:776 ../../source/ref-changelog.md:847 -#: ../../source/ref-changelog.md:884 ../../source/ref-changelog.md:927 -msgid "Incompatible changes" -msgstr "不兼容的更改" +#: flwr.server.strategy.bulyan.Bulyan:27 of +msgid "" +"Byzantine resilient aggregation rule that is used as the first step of " +"the Bulyan (e.g., Krum)" +msgstr "Byzantine弹性聚合规则,用作 Bulyan 的第一步(如 Krum)" -#: ../../source/ref-changelog.md:9 ../../source/ref-changelog.md:73 -#: ../../source/ref-changelog.md:350 ../../source/ref-changelog.md:414 -#: ../../source/ref-changelog.md:472 ../../source/ref-changelog.md:541 -#: ../../source/ref-changelog.md:603 -msgid "None" -msgstr "无" +#: flwr.server.strategy.bulyan.Bulyan:29 of +msgid "arguments to the first_aggregation rule" +msgstr "第一聚类规则的参数" -#: ../../source/ref-changelog.md:11 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy -msgid "v1.8.0 (2024-04-03)" -msgstr "v1.3.0 (2023-02-06)" - -#: ../../source/ref-changelog.md:13 ../../source/ref-changelog.md:77 -#: ../../source/ref-changelog.md:170 ../../source/ref-changelog.md:270 -#: ../../source/ref-changelog.md:354 ../../source/ref-changelog.md:418 -#: ../../source/ref-changelog.md:476 ../../source/ref-changelog.md:545 -#: ../../source/ref-changelog.md:614 -msgid "Thanks to our contributors" -msgstr "感谢我们的贡献者" - -#: ../../source/ref-changelog.md:15 ../../source/ref-changelog.md:79 -#: ../../source/ref-changelog.md:172 ../../source/ref-changelog.md:272 -#: ../../source/ref-changelog.md:356 ../../source/ref-changelog.md:420 -#: ../../source/ref-changelog.md:478 msgid "" -"We would like to give our special thanks to all the contributors who made" -" the new version of Flower possible (in `git shortlog` order):" -msgstr "在此,我们要特别感谢所有为 Flower 的新版本做出贡献的人员(按 `git shortlog` 顺序排列):" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" +msgstr "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" -#: ../../source/ref-changelog.md:17 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "Aggregate evaluation losses using weighted average." +msgstr "采用加权平均法计算评估损失总额。" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata " -"Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, `Ikko Eltociear " -"Ashimine`, `Jack Cook`, `Javier`, `Raj Parekh`, `Robert Steiner`, " -"`Sebastian van der Voort`, `Taner Topal`, `Yan Gao`, `mohammadnaseri`, " -"`tabdar-khan` " +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " -"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " -"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " -"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-changelog.md:21 -msgid "" -"**Introduce Flower Next high-level API (stable)** " -"([#3002](https://github.com/adap/flower/pull/3002), " -"[#2934](https://github.com/adap/flower/pull/2934), " -"[#2958](https://github.com/adap/flower/pull/2958), " -"[#3173](https://github.com/adap/flower/pull/3173), " -"[#3174](https://github.com/adap/flower/pull/3174), " -"[#2923](https://github.com/adap/flower/pull/2923), " -"[#2691](https://github.com/adap/flower/pull/2691), " -"[#3079](https://github.com/adap/flower/pull/3079), " -"[#2961](https://github.com/adap/flower/pull/2961), " -"[#2924](https://github.com/adap/flower/pull/2924), " -"[#3166](https://github.com/adap/flower/pull/3166), " -"[#3031](https://github.com/adap/flower/pull/3031), " -"[#3057](https://github.com/adap/flower/pull/3057), " -"[#3000](https://github.com/adap/flower/pull/3000), " -"[#3113](https://github.com/adap/flower/pull/3113), " -"[#2957](https://github.com/adap/flower/pull/2957), " -"[#3183](https://github.com/adap/flower/pull/3183), " -"[#3180](https://github.com/adap/flower/pull/3180), " -"[#3035](https://github.com/adap/flower/pull/3035), " -"[#3189](https://github.com/adap/flower/pull/3189), " -"[#3185](https://github.com/adap/flower/pull/3185), " -"[#3190](https://github.com/adap/flower/pull/3190), " -"[#3191](https://github.com/adap/flower/pull/3191), " -"[#3195](https://github.com/adap/flower/pull/3195), " -"[#3197](https://github.com/adap/flower/pull/3197))" -msgstr "" +#: flwr.server.strategy.bulyan.Bulyan.aggregate_fit:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "Aggregate fit results using Bulyan." +msgstr "使用 Bulyan 技术汇总拟合结果。" -#: ../../source/ref-changelog.md:23 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"The Flower Next high-level API is stable! Flower Next is the future of " -"Flower - all new features (like Flower Mods) will be built on top of it. " -"You can start to migrate your existing projects to Flower Next by using " -"`ServerApp` and `ClientApp` (check out `quickstart-pytorch` or " -"`quickstart-tensorflow`, a detailed migration guide will follow shortly)." -" Flower Next allows you to run multiple projects concurrently (we call " -"this multi-run) and execute the same project in either simulation " -"environments or deployment environments without having to change a single" -" line of code. The best part? It's fully compatible with existing Flower " -"projects that use `Strategy`, `NumPyClient` & co." +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" -#: ../../source/ref-changelog.md:25 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_evaluate:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.configure_evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.configure_evaluate:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:1 of +msgid "Configure the next round of evaluation." +msgstr "配置下一轮评估。" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Introduce Flower Next low-level API (preview)** " -"([#3062](https://github.com/adap/flower/pull/3062), " -"[#3034](https://github.com/adap/flower/pull/3034), " -"[#3069](https://github.com/adap/flower/pull/3069))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"** 统一客户端应用程序接口** ([#2303](https://github.com/adap/flower/pull/2303), " -"[#2390](https://github.com/adap/flower/pull/2390), " -"[#2493](https://github.com/adap/flower/pull/2493))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:27 -msgid "" -"In addition to the Flower Next *high-level* API that uses `Strategy`, " -"`NumPyClient` & co, Flower 1.8 also comes with a preview version of the " -"new Flower Next *low-level* API. The low-level API allows for granular " -"control of every aspect of the learning process by sending/receiving " -"individual messages to/from client nodes. The new `ServerApp` supports " -"registering a custom `main` function that allows writing custom training " -"loops for methods like async FL, cyclic training, or federated analytics." -" The new `ClientApp` supports registering `train`, `evaluate` and `query`" -" functions that can access the raw message received from the `ServerApp`." -" New abstractions like `RecordSet`, `Message` and `Context` further " -"enable sending multiple models, multiple sets of config values and " -"metrics, stateful computations on the client node and implementations of " -"custom SMPC protocols, to name just a few." -msgstr "" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_fit:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_fit:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_fit:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_fit:1 +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.configure_fit:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.configure_fit:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_fit:1 +#: flwr.server.strategy.fedprox.FedProx.configure_fit:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_fit:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.configure_fit:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.configure_fit:1 of +msgid "Configure the next round of training." +msgstr "配置下一轮训练。" -#: ../../source/ref-changelog.md:29 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Introduce Flower Mods (preview)** " -"([#3054](https://github.com/adap/flower/pull/3054), " -"[#2911](https://github.com/adap/flower/pull/2911), " -"[#3083](https://github.com/adap/flower/pull/3083))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"**引入新的模拟引擎** ([#1969](https://github.com/adap/flower/pull/1969), " -"[#2221](https://github.com/adap/flower/pull/2221), " -"[#2248](https://github.com/adap/flower/pull/2248))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:31 -msgid "" -"Flower Modifiers (we call them Mods) can intercept messages and analyze, " -"edit or handle them directly. Mods can be used to develop pluggable " -"modules that work across different projects. Flower 1.8 already includes " -"mods to log the size of a message, the number of parameters sent over the" -" network, differential privacy with fixed clipping and adaptive clipping," -" local differential privacy and secure aggregation protocols SecAgg and " -"SecAgg+. The Flower Mods API is released as a preview, but researchers " -"can already use it to experiment with arbirtrary SMPC protocols." -msgstr "" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.evaluate:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "Evaluate model parameters using an evaluation function." +msgstr "使用评估函数评估模型参数。" -#: ../../source/ref-changelog.md:33 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Fine-tune LLMs with LLM FlowerTune** " -"([#3029](https://github.com/adap/flower/pull/3029), " -"[#3089](https://github.com/adap/flower/pull/3089), " -"[#3092](https://github.com/adap/flower/pull/3092), " -"[#3100](https://github.com/adap/flower/pull/3100), " -"[#3114](https://github.com/adap/flower/pull/3114), " -"[#3162](https://github.com/adap/flower/pull/3162), " -"[#3172](https://github.com/adap/flower/pull/3172))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " -"[#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475)))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:35 -msgid "" -"We are introducing LLM FlowerTune, an introductory example that " -"demonstrates federated LLM fine-tuning of pre-trained Llama2 models on " -"the Alpaca-GPT4 dataset. The example is built to be easily adapted to use" -" different models and/or datasets. Read our blog post [LLM FlowerTune: " -"Federated LLM Fine-tuning with Flower](https://flower.ai/blog/2024-03-14" -"-llm-flowertune-federated-llm-finetuning-with-flower/) for more details." -msgstr "" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.initialize_parameters:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.initialize_parameters:1 +#: flwr.server.strategy.fedavgm.FedAvgM.initialize_parameters:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "Initialize global model parameters." +msgstr "初始化全局模型参数。" -#: ../../source/ref-changelog.md:37 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Introduce built-in Differential Privacy (preview)** " -"([#2798](https://github.com/adap/flower/pull/2798), " -"[#2959](https://github.com/adap/flower/pull/2959), " -"[#3038](https://github.com/adap/flower/pull/3038), " -"[#3147](https://github.com/adap/flower/pull/3147), " -"[#2909](https://github.com/adap/flower/pull/2909), " -"[#2893](https://github.com/adap/flower/pull/2893), " -"[#2892](https://github.com/adap/flower/pull/2892), " -"[#3039](https://github.com/adap/flower/pull/3039), " -"[#3074](https://github.com/adap/flower/pull/3074))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"** 支持 SSL 的服务器和客户端** ([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.num_evaluation_clients:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_evaluation_clients:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.num_evaluation_clients:1 of +msgid "Use a fraction of available clients for evaluation." +msgstr "使用部分可用客户进行评估。" -#: ../../source/ref-changelog.md:39 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"Built-in Differential Privacy is here! Flower supports both central and " -"local differential privacy (DP). Central DP can be configured with either" -" fixed or adaptive clipping. The clipping can happen either on the " -"server-side or the client-side. Local DP does both clipping and noising " -"on the client-side. A new documentation page [explains Differential " -"Privacy approaches](https://flower.ai/docs/framework/explanation-" -"differential-privacy.html) and a new how-to guide describes [how to use " -"the new Differential Privacy components](https://flower.ai/docs/framework" -"/how-to-use-differential-privacy.html) in Flower." +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" msgstr "" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:41 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.num_fit_clients:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_fit_clients:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.num_fit_clients:1 of +msgid "Return the sample size and the required number of available clients." +msgstr "返回样本大小和所需的可用客户数量。" + +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:2 +msgid "DPFedAvgAdaptive" +msgstr "DPFedAvgAdaptive" + +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of +#, fuzzy +msgid "Bases: :py:class:`~flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed`" +msgstr "Bases: :py:class:`~flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed`" + +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:3 of +#, fuzzy +msgid "This class is deprecated and will be removed in a future release." +msgstr "该类已被弃用,将在以后的版本中删除。" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**Introduce built-in Secure Aggregation (preview)** " -"([#3120](https://github.com/adap/flower/pull/3120), " -"[#3110](https://github.com/adap/flower/pull/3110), " -"[#3108](https://github.com/adap/flower/pull/3108))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"**引入新的模拟引擎** ([#1969](https://github.com/adap/flower/pull/1969), " -"[#2221](https://github.com/adap/flower/pull/2221), " -"[#2248](https://github.com/adap/flower/pull/2248))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "Aggregate evaluation losses using the given strategy." +msgstr "使用给定的策略汇总评估损失。" -#: ../../source/ref-changelog.md:43 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"Built-in Secure Aggregation is here! Flower now supports different secure" -" aggregation protocols out-of-the-box. The best part? You can add secure " -"aggregation to your Flower projects with only a few lines of code. In " -"this initial release, we inlcude support for SecAgg and SecAgg+, but more" -" protocols will be implemented shortly. We'll also add detailed docs that" -" explain secure aggregation and how to use it in Flower. You can already " -"check out the new code example that shows how to use Flower to easily " -"combine Federated Learning, Differential Privacy and Secure Aggregation " -"in the same project." +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-changelog.md:45 +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.aggregate_fit:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "Aggregate training results as in DPFedAvgFixed and update clip norms." +msgstr "汇总 DPFedAvgFixed 中的训练结果并更新片段标准。" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**Introduce** `flwr` **CLI (preview)** " -"([#2942](https://github.com/adap/flower/pull/2942), " -"[#3055](https://github.com/adap/flower/pull/3055), " -"[#3111](https://github.com/adap/flower/pull/3111), " -"[#3130](https://github.com/adap/flower/pull/3130), " -"[#3136](https://github.com/adap/flower/pull/3136), " -"[#3094](https://github.com/adap/flower/pull/3094), " -"[#3059](https://github.com/adap/flower/pull/3059), " -"[#3049](https://github.com/adap/flower/pull/3049), " -"[#3142](https://github.com/adap/flower/pull/3142))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**普通改进**([#1872](https://github.com/adap/flower/pull/1872), " -"[#1866](https://github.com/adap/flower/pull/1866), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1837](https://github.com/adap/flower/pull/1837), " -"[#1477](https://github.com/adap/flower/pull/1477), " -"[#2171](https://github.com/adap/flower/pull/2171))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:47 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:1 of +msgid "Configure the next round of evaluation using the specified strategy." +msgstr "使用指定策略配置下一轮评估。" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"A new `flwr` CLI command allows creating new Flower projects (`flwr new`)" -" and then running them using the Simulation Engine (`flwr run`)." +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:49 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**Introduce Flower Next Simulation Engine** " -"([#3024](https://github.com/adap/flower/pull/3024), " -"[#3061](https://github.com/adap/flower/pull/3061), " -"[#2997](https://github.com/adap/flower/pull/2997), " -"[#2783](https://github.com/adap/flower/pull/2783), " -"[#3184](https://github.com/adap/flower/pull/3184), " -"[#3075](https://github.com/adap/flower/pull/3075), " -"[#3047](https://github.com/adap/flower/pull/3047), " -"[#2998](https://github.com/adap/flower/pull/2998), " -"[#3009](https://github.com/adap/flower/pull/3009), " -"[#3008](https://github.com/adap/flower/pull/3008))" -msgstr "" -"**引入(试验性)REST API** ([#1594](https://github.com/adap/flower/pull/1594), " -"[#1690](https://github.com/adap/flower/pull/1690), " -"[#1695](https://github.com/adap/flower/pull/1695), " -"[#1712](https://github.com/adap/flower/pull/1712), " -"[#1802](https://github.com/adap/flower/pull/1802), " -"[#1770](https://github.com/adap/flower/pull/1770), " -"[#1733](https://github.com/adap/flower/pull/1733))" - -#: ../../source/ref-changelog.md:51 -msgid "" -"The Flower Simulation Engine can now run Flower Next projects. For " -"notebook environments, there's also a new `run_simulation` function that " -"can run `ServerApp` and `ClientApp`." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:53 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.evaluate:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.evaluate:1 of +msgid "Evaluate model parameters using an evaluation function from the strategy." +msgstr "使用策略中的评估函数评估模型参数。" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**Handle SuperNode connection errors** " -"([#2969](https://github.com/adap/flower/pull/2969))" -msgstr "** 添加一个新的 gRPC 选项**([#2197](https://github.com/adap/flower/pull/2197))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:55 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.initialize_parameters:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.initialize_parameters:1 of +msgid "Initialize global model parameters using given strategy." +msgstr "使用给定的策略初始化全局模型参数。" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:6 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:3 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:3 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:3 +#: flwr.server.strategy.strategy.Strategy.configure_fit:3 +#: flwr.server.strategy.strategy.Strategy.evaluate:6 of +msgid "The current round of federated learning." +msgstr "本轮联邦学习。" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:7 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:10 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:7 +#: flwr.server.strategy.strategy.Strategy.configure_fit:7 +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:3 of +msgid "The client manager which holds all currently connected clients." +msgstr "客户端管理器,用于管理当前连接的所有客户端。" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:10 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:10 of msgid "" -"A SuperNode will now try to reconnect indefinitely to the SuperLink in " -"case of connection errors. The arguments `--max-retries` and `--max-wait-" -"time` can now be passed to the `flower-client-app` command. `--max-" -"retries` will define the number of tentatives the client should make " -"before it gives up trying to reconnect to the SuperLink, and, `--max-" -"wait-time` defines the time before the SuperNode gives up trying to " -"reconnect to the SuperLink." +"**evaluate_configuration** -- A list of tuples. Each tuple in the list " +"identifies a `ClientProxy` and the `EvaluateIns` for this particular " +"`ClientProxy`. If a particular `ClientProxy` is not included in this " +"list, it means that this `ClientProxy` will not participate in the next " +"round of federated evaluation." msgstr "" +"**evaluate_configuration** -- " +"一个元组列表。列表中的每个元组都标识了一个`ClientProxy`和该特定`ClientProxy`的`EvaluateIns`。如果某个特定的" +" `ClientProxy` 未包含在此列表中,则表示该 `ClientProxy` 将不参与下一轮联合评估。" -#: ../../source/ref-changelog.md:57 +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:2 +msgid "DPFedAvgFixed" +msgstr "DPFedAvgFixed" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 +#: flwr.server.strategy.fedavg.FedAvg:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of +#, fuzzy +msgid "Bases: :py:class:`~flwr.server.strategy.strategy.Strategy`" +msgstr "Bases: :py:class:`~flwr.server.strategy.strategy.Strategy`" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**General updates to Flower Baselines** " -"([#2904](https://github.com/adap/flower/pull/2904), " -"[#2482](https://github.com/adap/flower/pull/2482), " -"[#2985](https://github.com/adap/flower/pull/2985), " -"[#2968](https://github.com/adap/flower/pull/2968))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"**引入新的 Flower Baseline: FedProx MNIST** " -"([#1513](https://github.com/adap/flower/pull/1513), " -"[#1680](https://github.com/adap/flower/pull/1680), " -"[#1681](https://github.com/adap/flower/pull/1681), " -"[#1679](https://github.com/adap/flower/pull/1679)" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" -#: ../../source/ref-changelog.md:59 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"There's a new [FedStar](https://flower.ai/docs/baselines/fedstar.html) " -"baseline. Several other baselined have been updated as well." +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-changelog.md:61 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_fit:1 of +msgid "Aggregate training results using unweighted aggregation." +msgstr "使用非加权汇总法汇总训练结果。" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"**Improve documentation and translations** " -"([#3050](https://github.com/adap/flower/pull/3050), " -"[#3044](https://github.com/adap/flower/pull/3044), " -"[#3043](https://github.com/adap/flower/pull/3043), " -"[#2986](https://github.com/adap/flower/pull/2986), " -"[#3041](https://github.com/adap/flower/pull/3041), " -"[#3046](https://github.com/adap/flower/pull/3046), " -"[#3042](https://github.com/adap/flower/pull/3042), " -"[#2978](https://github.com/adap/flower/pull/2978), " -"[#2952](https://github.com/adap/flower/pull/2952), " -"[#3167](https://github.com/adap/flower/pull/3167), " -"[#2953](https://github.com/adap/flower/pull/2953), " -"[#3045](https://github.com/adap/flower/pull/3045), " -"[#2654](https://github.com/adap/flower/pull/2654), " -"[#3082](https://github.com/adap/flower/pull/3082), " -"[#2990](https://github.com/adap/flower/pull/2990), " -"[#2989](https://github.com/adap/flower/pull/2989))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:63 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"As usual, we merged many smaller and larger improvements to the " -"documentation. A special thank you goes to [Sebastian van der " -"Voort](https://github.com/svdvoort) for landing a big documentation PR!" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:65 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:1 of +msgid "" +"Configure the next round of training incorporating Differential Privacy " +"(DP)." +msgstr "配置包含差分隐私 (DP) 的下一轮训练。" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**General updates to Flower Examples** " -"([3134](https://github.com/adap/flower/pull/3134), " -"[2996](https://github.com/adap/flower/pull/2996), " -"[2930](https://github.com/adap/flower/pull/2930), " -"[2967](https://github.com/adap/flower/pull/2967), " -"[2467](https://github.com/adap/flower/pull/2467), " -"[2910](https://github.com/adap/flower/pull/2910), " -"[#2918](https://github.com/adap/flower/pull/2918), " -"[#2773](https://github.com/adap/flower/pull/2773), " -"[#3063](https://github.com/adap/flower/pull/3063), " -"[#3116](https://github.com/adap/flower/pull/3116), " -"[#3117](https://github.com/adap/flower/pull/3117))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"** 更新文档** ([#1494](https://github.com/adap/flower/pull/1494), " -"[#1496](https://github.com/adap/flower/pull/1496), " -"[#1500](https://github.com/adap/flower/pull/1500), " -"[#1503](https://github.com/adap/flower/pull/1503), " -"[#1505](https://github.com/adap/flower/pull/1505), " -"[#1524](https://github.com/adap/flower/pull/1524), " -"[#1518](https://github.com/adap/flower/pull/1518), " -"[#1519](https://github.com/adap/flower/pull/1519), " -"[#1515](https://github.com/adap/flower/pull/1515))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:67 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"Two new examples show federated training of a Vision Transformer (ViT) " -"and federated learning in a medical context using the popular MONAI " -"library. `quickstart-pytorch` and `quickstart-tensorflow` demonstrate the" -" new Flower Next `ServerApp` and `ClientApp`. Many other examples " -"received considerable updates as well." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:69 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:3 of msgid "" -"**General improvements** " -"([#3171](https://github.com/adap/flower/pull/3171), " -"[3099](https://github.com/adap/flower/pull/3099), " -"[3003](https://github.com/adap/flower/pull/3003), " -"[3145](https://github.com/adap/flower/pull/3145), " -"[3017](https://github.com/adap/flower/pull/3017), " -"[3085](https://github.com/adap/flower/pull/3085), " -"[3012](https://github.com/adap/flower/pull/3012), " -"[3119](https://github.com/adap/flower/pull/3119), " -"[2991](https://github.com/adap/flower/pull/2991), " -"[2970](https://github.com/adap/flower/pull/2970), " -"[2980](https://github.com/adap/flower/pull/2980), " -"[3086](https://github.com/adap/flower/pull/3086), " -"[2932](https://github.com/adap/flower/pull/2932), " -"[2928](https://github.com/adap/flower/pull/2928), " -"[2941](https://github.com/adap/flower/pull/2941), " -"[2933](https://github.com/adap/flower/pull/2933), " -"[3181](https://github.com/adap/flower/pull/3181), " -"[2973](https://github.com/adap/flower/pull/2973), " -"[2992](https://github.com/adap/flower/pull/2992), " -"[2915](https://github.com/adap/flower/pull/2915), " -"[3040](https://github.com/adap/flower/pull/3040), " -"[3022](https://github.com/adap/flower/pull/3022), " -"[3032](https://github.com/adap/flower/pull/3032), " -"[2902](https://github.com/adap/flower/pull/2902), " -"[2931](https://github.com/adap/flower/pull/2931), " -"[3005](https://github.com/adap/flower/pull/3005), " -"[3132](https://github.com/adap/flower/pull/3132), " -"[3115](https://github.com/adap/flower/pull/3115), " -"[2944](https://github.com/adap/flower/pull/2944), " -"[3064](https://github.com/adap/flower/pull/3064), " -"[3106](https://github.com/adap/flower/pull/3106), " -"[2974](https://github.com/adap/flower/pull/2974), " -"[3178](https://github.com/adap/flower/pull/3178), " -"[2993](https://github.com/adap/flower/pull/2993), " -"[3186](https://github.com/adap/flower/pull/3186), " -"[3091](https://github.com/adap/flower/pull/3091), " -"[3125](https://github.com/adap/flower/pull/3125), " -"[3093](https://github.com/adap/flower/pull/3093), " -"[3013](https://github.com/adap/flower/pull/3013), " -"[3033](https://github.com/adap/flower/pull/3033), " -"[3133](https://github.com/adap/flower/pull/3133), " -"[3068](https://github.com/adap/flower/pull/3068), " -"[2916](https://github.com/adap/flower/pull/2916), " -"[2975](https://github.com/adap/flower/pull/2975), " -"[2984](https://github.com/adap/flower/pull/2984), " -"[2846](https://github.com/adap/flower/pull/2846), " -"[3077](https://github.com/adap/flower/pull/3077), " -"[3143](https://github.com/adap/flower/pull/3143), " -"[2921](https://github.com/adap/flower/pull/2921), " -"[3101](https://github.com/adap/flower/pull/3101), " -"[2927](https://github.com/adap/flower/pull/2927), " -"[2995](https://github.com/adap/flower/pull/2995), " -"[2972](https://github.com/adap/flower/pull/2972), " -"[2912](https://github.com/adap/flower/pull/2912), " -"[3065](https://github.com/adap/flower/pull/3065), " -"[3028](https://github.com/adap/flower/pull/3028), " -"[2922](https://github.com/adap/flower/pull/2922), " -"[2982](https://github.com/adap/flower/pull/2982), " -"[2914](https://github.com/adap/flower/pull/2914), " -"[3179](https://github.com/adap/flower/pull/3179), " -"[3080](https://github.com/adap/flower/pull/3080), " -"[2994](https://github.com/adap/flower/pull/2994), " -"[3187](https://github.com/adap/flower/pull/3187), " -"[2926](https://github.com/adap/flower/pull/2926), " -"[3018](https://github.com/adap/flower/pull/3018), " -"[3144](https://github.com/adap/flower/pull/3144), " -"[3011](https://github.com/adap/flower/pull/3011), " -"[#3152](https://github.com/adap/flower/pull/3152), " -"[#2836](https://github.com/adap/flower/pull/2836), " -"[#2929](https://github.com/adap/flower/pull/2929), " -"[#2943](https://github.com/adap/flower/pull/2943), " -"[#2955](https://github.com/adap/flower/pull/2955), " -"[#2954](https://github.com/adap/flower/pull/2954))" +"Configuration of the next training round includes information related to " +"DP, such as clip norm and noise stddev." +msgstr "下一轮训练的配置包括与 DP 相关的信息,如片段规范和噪声 stddev。" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:13 +#: flwr.server.strategy.strategy.Strategy.configure_fit:10 of +msgid "" +"**fit_configuration** -- A list of tuples. Each tuple in the list " +"identifies a `ClientProxy` and the `FitIns` for this particular " +"`ClientProxy`. If a particular `ClientProxy` is not included in this " +"list, it means that this `ClientProxy` will not participate in the next " +"round of federated learning." msgstr "" +"**fit_configuration** -- " +"一个元组列表。列表中的每个元组都标识了一个`ClientProxy`和该特定`ClientProxy`的`FitIns'。如果某个特定的`ClientProxy`不在此列表中,则表示该`ClientProxy`将不参加下一轮联合学习。" -#: ../../source/ref-changelog.md:75 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:2 #, fuzzy -msgid "v1.7.0 (2024-02-05)" -msgstr "v1.3.0 (2023-02-06)" +msgid "DifferentialPrivacyClientSideAdaptiveClipping" +msgstr "DifferentialPrivacyClientSideAdaptiveClipping" -#: ../../source/ref-changelog.md:81 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:3 +#: of #, fuzzy -msgid "" -"`Aasheesh Singh`, `Adam Narozniak`, `Aml Hassan Esmil`, `Charles " -"Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo " -"Gabrielli`, `Gustavo Bertoli`, `HelinLin`, `Heng Pan`, `Javier`, `M S " -"Chaitanya Kumar`, `Mohammad Naseri`, `Nikos Vlachakis`, `Pritam Neog`, " -"`Robert Kuska`, `Robert Steiner`, `Taner Topal`, `Yahia Salaheldin " -"Shaaban`, `Yan Gao`, `Yasar Abbas` " -msgstr "" -"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " -"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " -"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " -"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " +msgid "Use `adaptiveclipping_mod` modifier at the client side." +msgstr "在客户端使用 \"adaptiveclipping_mod \"修改器。" -#: ../../source/ref-changelog.md:85 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:5 +#: of #, fuzzy msgid "" -"**Introduce stateful clients (experimental)** " -"([#2770](https://github.com/adap/flower/pull/2770), " -"[#2686](https://github.com/adap/flower/pull/2686), " -"[#2696](https://github.com/adap/flower/pull/2696), " -"[#2643](https://github.com/adap/flower/pull/2643), " -"[#2769](https://github.com/adap/flower/pull/2769))" +"In comparison to `DifferentialPrivacyServerSideAdaptiveClipping`, which " +"performs clipping on the server-side, " +"`DifferentialPrivacyClientSideAdaptiveClipping` expects clipping to " +"happen on the client-side, usually by using the built-in " +"`adaptiveclipping_mod`." msgstr "" -"** baselines的普通更新** ([#2301](https://github.com/adap/flower/pull/2301), " -"[#2305](https://github.com/adap/flower/pull/2305), " -"[#2307](https://github.com/adap/flower/pull/2307), " -"[#2327](https://github.com/adap/flower/pull/2327), " -"[#2435](https://github.com/adap/flower/pull/2435))" +"与在服务器端执行剪切的 `DifferentialPrivacyServerSideAdaptiveClipping` " +"相比,`DifferentialPrivacyClientSideAdaptiveClipping` 希望在客户端进行剪切,通常使用内置的 " +"`adaptiveclipping_mod`。" -#: ../../source/ref-changelog.md:87 -msgid "" -"Subclasses of `Client` and `NumPyClient` can now store local state that " -"remains on the client. Let's start with the highlight first: this new " -"feature is compatible with both simulated clients (via " -"`start_simulation`) and networked clients (via `start_client`). It's also" -" the first preview of new abstractions like `Context` and `RecordSet`. " -"Clients can access state of type `RecordSet` via `state: RecordSet = " -"self.context.state`. Changes to this `RecordSet` are preserved across " -"different rounds of execution to enable stateful computations in a " -"unified way across simulation and deployment." -msgstr "" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:10 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:3 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:10 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:3 +#: of +#, fuzzy +msgid "The strategy to which DP functionalities will be added by this wrapper." +msgstr "该包装器将添加 DP 功能的策略。" -#: ../../source/ref-changelog.md:89 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:12 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:5 +#: of #, fuzzy -msgid "" -"**Improve performance** " -"([#2293](https://github.com/adap/flower/pull/2293))" -msgstr "**改进示例笔记** ([#2005](https://github.com/adap/flower/pull/2005))" +msgid "The noise multiplier for the Gaussian mechanism for model updates." +msgstr "用于模型更新的高斯机制的噪声乘数。" -#: ../../source/ref-changelog.md:91 -msgid "" -"Flower is faster than ever. All `FedAvg`-derived strategies now use in-" -"place aggregation to reduce memory consumption. The Flower client " -"serialization/deserialization has been rewritten from the ground up, " -"which results in significant speedups, especially when the client-side " -"training time is short." -msgstr "" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:14 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:7 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:17 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:10 +#: of +#, fuzzy +msgid "The number of clients that are sampled on each round." +msgstr "每轮取样的客户数。" -#: ../../source/ref-changelog.md:93 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:16 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:9 +#: of #, fuzzy msgid "" -"**Support Federated Learning with Apple MLX and Flower** " -"([#2693](https://github.com/adap/flower/pull/2693))" -msgstr "" -"** 添加使用 fastai 和 Flower 进行联邦学习的新示例** " -"([#1598](https://github.com/adap/flower/pull/1598))" +"The initial value of clipping norm. Defaults to 0.1. Andrew et al. " +"recommends to set to 0.1." +msgstr "剪切规范的初始值。默认为 0.1。安德鲁等人建议设置为 0.1。" -#: ../../source/ref-changelog.md:95 -msgid "" -"Flower has official support for federated learning using [Apple " -"MLX](https://ml-explore.github.io/mlx) via the new `quickstart-mlx` code " -"example." -msgstr "" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:19 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:12 +#: of +#, fuzzy +msgid "The desired quantile of updates which should be clipped. Defaults to 0.5." +msgstr "需要剪切的更新量化值。默认为 0.5。" -#: ../../source/ref-changelog.md:97 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:21 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:14 +#: of #, fuzzy msgid "" -"**Introduce new XGBoost cyclic strategy** " -"([#2666](https://github.com/adap/flower/pull/2666), " -"[#2668](https://github.com/adap/flower/pull/2668))" -msgstr "" -"**介绍 iOS SDK(预览版)** ([#1621](https://github.com/adap/flower/pull/1621), " -"[#1764](https://github.com/adap/flower/pull/1764))" +"The learning rate for the clipping norm adaptation. Defaults to 0.2. " +"Andrew et al. recommends to set to 0.2." +msgstr "剪切规范适应的学习率。默认为 0.2。安德鲁等人建议设置为 0.2。" -#: ../../source/ref-changelog.md:99 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:24 +#: of +#, fuzzy msgid "" -"A new strategy called `FedXgbCyclic` supports a client-by-client style of" -" training (often called cyclic). The `xgboost-comprehensive` code example" -" shows how to use it in a full project. In addition to that, `xgboost-" -"comprehensive` now also supports simulation mode. With this, Flower " -"offers best-in-class XGBoost support." -msgstr "" +"The stddev of the noise added to the count of updates currently below the" +" estimate. Andrew et al. recommends to set to `expected_num_records/20`" +msgstr "添加到当前低于估计值的更新计数中的噪声的 stddev。安德鲁等人建议设置为 \"expected_num_records/20" -#: ../../source/ref-changelog.md:101 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:30 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:23 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:22 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:15 +#: of #, fuzzy -msgid "" -"**Support Python 3.11** " -"([#2394](https://github.com/adap/flower/pull/2394))" -msgstr "** 支持 Python 3.10** ([#1320](https://github.com/adap/flower/pull/1320))" +msgid "Create a strategy:" +msgstr "server.strategy" -#: ../../source/ref-changelog.md:103 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:34 +#: of +#, fuzzy msgid "" -"Framework tests now run on Python 3.8, 3.9, 3.10, and 3.11. This will " -"ensure better support for users using more recent Python versions." -msgstr "" +"Wrap the strategy with the " +"`DifferentialPrivacyClientSideAdaptiveClipping` wrapper:" +msgstr "用 \"DifferentialPrivacyClientSideAdaptiveClipping \"包装器对策略进行包装:" -#: ../../source/ref-changelog.md:105 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:40 +#: of #, fuzzy -msgid "" -"**Update gRPC and ProtoBuf dependencies** " -"([#2814](https://github.com/adap/flower/pull/2814))" -msgstr "" -"**更新 REST API 以支持创建和删除节点** " -"([#2283](https://github.com/adap/flower/pull/2283))" +msgid "On the client, add the `adaptiveclipping_mod` to the client-side mods:" +msgstr "在客户端,将 \"adaptiveclipping_mod \"添加到客户端模块中:" -#: ../../source/ref-changelog.md:107 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"The `grpcio` and `protobuf` dependencies were updated to their latest " -"versions for improved security and performance." +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" msgstr "" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" -#: ../../source/ref-changelog.md:109 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**Introduce Docker image for Flower server** " -"([#2700](https://github.com/adap/flower/pull/2700), " -"[#2688](https://github.com/adap/flower/pull/2688), " -"[#2705](https://github.com/adap/flower/pull/2705), " -"[#2695](https://github.com/adap/flower/pull/2695), " -"[#2747](https://github.com/adap/flower/pull/2747), " -"[#2746](https://github.com/adap/flower/pull/2746), " -"[#2680](https://github.com/adap/flower/pull/2680), " -"[#2682](https://github.com/adap/flower/pull/2682), " -"[#2701](https://github.com/adap/flower/pull/2701))" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -"** 支持 SSL 的服务器和客户端** ([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_fit:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_fit:1 +#: of +#, fuzzy +msgid "Aggregate training results and update clip norms." +msgstr "汇总 DPFedAvgFixed 中的训练结果并更新片段标准。" -#: ../../source/ref-changelog.md:111 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"The Flower server can now be run using an official Docker image. A new " -"how-to guide explains [how to run Flower using " -"Docker](https://flower.ai/docs/framework/how-to-run-flower-using-" -"docker.html). An official Flower client Docker image will follow." +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:113 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**Introduce** `flower-via-docker-compose` **example** " -"([#2626](https://github.com/adap/flower/pull/2626))" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**介绍Flower Android SDK** " -"([#2131](https://github.com/adap/flower/pull/2131))" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:115 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**Introduce** `quickstart-sklearn-tabular` **example** " -"([#2719](https://github.com/adap/flower/pull/2719))" -msgstr "**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" +msgstr "" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:117 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**Introduce** `custom-metrics` **example** " -"([#1958](https://github.com/adap/flower/pull/1958))" -msgstr "**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" +msgstr "" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" -#: ../../source/ref-changelog.md:119 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:2 +#, fuzzy +msgid "DifferentialPrivacyClientSideFixedClipping" +msgstr "差分隐私" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:3 +#: of +#, fuzzy +msgid "Use `fixedclipping_mod` modifier at the client side." +msgstr "在客户端使用 `fixedclipping_mod` 修改器。" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:5 +#: of #, fuzzy msgid "" -"**Update code examples to use Flower Datasets** " -"([#2450](https://github.com/adap/flower/pull/2450), " -"[#2456](https://github.com/adap/flower/pull/2456), " -"[#2318](https://github.com/adap/flower/pull/2318), " -"[#2712](https://github.com/adap/flower/pull/2712))" +"In comparison to `DifferentialPrivacyServerSideFixedClipping`, which " +"performs clipping on the server-side, " +"`DifferentialPrivacyClientSideFixedClipping` expects clipping to happen " +"on the client-side, usually by using the built-in `fixedclipping_mod`." msgstr "" -"更新开发人员工具([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310)" +"与在服务器端执行剪切的 \"DifferentialPrivacyServerSideFixedClipping " +"\"相比,\"DifferentialPrivacyClientSideFixedClipping \"希望在客户端进行剪切,通常是使用内置的 " +"\"fixedclipping_mod\"。" -#: ../../source/ref-changelog.md:121 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:12 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:5 +#: of +#, fuzzy msgid "" -"Several code examples were updated to use [Flower " -"Datasets](https://flower.ai/docs/datasets/)." -msgstr "" +"The noise multiplier for the Gaussian mechanism for model updates. A " +"value of 1.0 or higher is recommended for strong privacy." +msgstr "模型更新高斯机制的噪声乘数。建议使用 1.0 或更高的值,以获得较强的隐私性。" -#: ../../source/ref-changelog.md:123 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:26 +#: of #, fuzzy msgid "" -"**General updates to Flower Examples** " -"([#2381](https://github.com/adap/flower/pull/2381), " -"[#2805](https://github.com/adap/flower/pull/2805), " -"[#2782](https://github.com/adap/flower/pull/2782), " -"[#2806](https://github.com/adap/flower/pull/2806), " -"[#2829](https://github.com/adap/flower/pull/2829), " -"[#2825](https://github.com/adap/flower/pull/2825), " -"[#2816](https://github.com/adap/flower/pull/2816), " -"[#2726](https://github.com/adap/flower/pull/2726), " -"[#2659](https://github.com/adap/flower/pull/2659), " -"[#2655](https://github.com/adap/flower/pull/2655))" +"Wrap the strategy with the `DifferentialPrivacyClientSideFixedClipping` " +"wrapper:" +msgstr "用 \"DifferentialPrivacyClientSideFixedClipping \"包装器包装策略:" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:32 +#: of +#, fuzzy +msgid "On the client, add the `fixedclipping_mod` to the client-side mods:" +msgstr "在客户端,将 \"fixedclipping_mod \"添加到客户端模块中:" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +#, fuzzy +msgid "" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" msgstr "" -"**改进(试验性)驱动程序应用程序接口** ([#1663](https://github.com/adap/flower/pull/1663)," -" [#1666](https://github.com/adap/flower/pull/1666), " -"[#1667](https://github.com/adap/flower/pull/1667), " -"[#1664](https://github.com/adap/flower/pull/1664), " -"[#1675](https://github.com/adap/flower/pull/1675), " -"[#1676](https://github.com/adap/flower/pull/1676), " -"[#1693](https://github.com/adap/flower/pull/1693), " -"[#1662](https://github.com/adap/flower/pull/1662), " -"[#1794](https://github.com/adap/flower/pull/1794))" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" -#: ../../source/ref-changelog.md:125 -msgid "Many Flower code examples received substantial updates." +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +#, fuzzy +msgid "" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-changelog.md:127 ../../source/ref-changelog.md:220 -msgid "**Update Flower Baselines**" -msgstr "**更新 Flower Baselines**" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_fit:1 +#: of +#, fuzzy +msgid "Add noise to the aggregated parameters." +msgstr "然后将汇总结果序列化:" -#: ../../source/ref-changelog.md:129 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"HFedXGBoost ([#2226](https://github.com/adap/flower/pull/2226), " -"[#2771](https://github.com/adap/flower/pull/2771))" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " -"[#2615](https://github.com/adap/flower/pull/2615))" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:130 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of #, fuzzy -msgid "FedVSSL ([#2412](https://github.com/adap/flower/pull/2412))" -msgstr "FjORD [#2431](https://github.com/adap/flower/pull/2431)" +msgid "" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:131 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of #, fuzzy -msgid "FedNova ([#2179](https://github.com/adap/flower/pull/2179))" -msgstr "FjORD [#2431](https://github.com/adap/flower/pull/2431)" +msgid "" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" +msgstr "" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:132 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of #, fuzzy -msgid "HeteroFL ([#2439](https://github.com/adap/flower/pull/2439))" -msgstr "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" +msgid "" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" +msgstr "" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" -#: ../../source/ref-changelog.md:133 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:2 #, fuzzy -msgid "FedAvgM ([#2246](https://github.com/adap/flower/pull/2246))" -msgstr "FedPer [#2266](https://github.com/adap/flower/pull/2266)" +msgid "DifferentialPrivacyServerSideAdaptiveClipping" +msgstr "DifferentialPrivacyServerSideAdaptiveClipping" -#: ../../source/ref-changelog.md:134 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:17 +#: of #, fuzzy -msgid "FedPara ([#2722](https://github.com/adap/flower/pull/2722))" -msgstr "FedPer [#2266](https://github.com/adap/flower/pull/2266)" +msgid "" +"The standard deviation of the noise added to the count of updates below " +"the estimate. Andrew et al. recommends to set to " +"`expected_num_records/20`" +msgstr "添加到低于估计值的更新计数中的噪声标准偏差。安德鲁等人建议设置为 \"expected_num_records/20" -#: ../../source/ref-changelog.md:136 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:27 +#: of #, fuzzy msgid "" -"**Improve documentation** " -"([#2674](https://github.com/adap/flower/pull/2674), " -"[#2480](https://github.com/adap/flower/pull/2480), " -"[#2826](https://github.com/adap/flower/pull/2826), " -"[#2727](https://github.com/adap/flower/pull/2727), " -"[#2761](https://github.com/adap/flower/pull/2761), " -"[#2900](https://github.com/adap/flower/pull/2900))" -msgstr "" -"** 更新文档** ([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614)))" +"Wrap the strategy with the DifferentialPrivacyServerSideAdaptiveClipping " +"wrapper" +msgstr "用 DifferentialPrivacyServerSideAdaptiveClipping 封装器封装策略" -#: ../../source/ref-changelog.md:138 -msgid "" -"**Improved testing and development infrastructure** " -"([#2797](https://github.com/adap/flower/pull/2797), " -"[#2676](https://github.com/adap/flower/pull/2676), " -"[#2644](https://github.com/adap/flower/pull/2644), " -"[#2656](https://github.com/adap/flower/pull/2656), " -"[#2848](https://github.com/adap/flower/pull/2848), " -"[#2675](https://github.com/adap/flower/pull/2675), " -"[#2735](https://github.com/adap/flower/pull/2735), " -"[#2767](https://github.com/adap/flower/pull/2767), " -"[#2732](https://github.com/adap/flower/pull/2732), " -"[#2744](https://github.com/adap/flower/pull/2744), " -"[#2681](https://github.com/adap/flower/pull/2681), " -"[#2699](https://github.com/adap/flower/pull/2699), " -"[#2745](https://github.com/adap/flower/pull/2745), " -"[#2734](https://github.com/adap/flower/pull/2734), " -"[#2731](https://github.com/adap/flower/pull/2731), " -"[#2652](https://github.com/adap/flower/pull/2652), " -"[#2720](https://github.com/adap/flower/pull/2720), " -"[#2721](https://github.com/adap/flower/pull/2721), " -"[#2717](https://github.com/adap/flower/pull/2717), " -"[#2864](https://github.com/adap/flower/pull/2864), " -"[#2694](https://github.com/adap/flower/pull/2694), " -"[#2709](https://github.com/adap/flower/pull/2709), " -"[#2658](https://github.com/adap/flower/pull/2658), " -"[#2796](https://github.com/adap/flower/pull/2796), " -"[#2692](https://github.com/adap/flower/pull/2692), " -"[#2657](https://github.com/adap/flower/pull/2657), " -"[#2813](https://github.com/adap/flower/pull/2813), " -"[#2661](https://github.com/adap/flower/pull/2661), " -"[#2398](https://github.com/adap/flower/pull/2398))" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +#, fuzzy +msgid "" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" msgstr "" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" -#: ../../source/ref-changelog.md:140 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"The Flower testing and development infrastructure has received " -"substantial updates. This makes Flower 1.7 the most tested release ever." +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-changelog.md:142 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**Update dependencies** " -"([#2753](https://github.com/adap/flower/pull/2753), " -"[#2651](https://github.com/adap/flower/pull/2651), " -"[#2739](https://github.com/adap/flower/pull/2739), " -"[#2837](https://github.com/adap/flower/pull/2837), " -"[#2788](https://github.com/adap/flower/pull/2788), " -"[#2811](https://github.com/adap/flower/pull/2811), " -"[#2774](https://github.com/adap/flower/pull/2774), " -"[#2790](https://github.com/adap/flower/pull/2790), " -"[#2751](https://github.com/adap/flower/pull/2751), " -"[#2850](https://github.com/adap/flower/pull/2850), " -"[#2812](https://github.com/adap/flower/pull/2812), " -"[#2872](https://github.com/adap/flower/pull/2872), " -"[#2736](https://github.com/adap/flower/pull/2736), " -"[#2756](https://github.com/adap/flower/pull/2756), " -"[#2857](https://github.com/adap/flower/pull/2857), " -"[#2757](https://github.com/adap/flower/pull/2757), " -"[#2810](https://github.com/adap/flower/pull/2810), " -"[#2740](https://github.com/adap/flower/pull/2740), " -"[#2789](https://github.com/adap/flower/pull/2789))" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**更新Example** ([#1772](https://github.com/adap/flower/pull/1772), " -"[#1873](https://github.com/adap/flower/pull/1873), " -"[#1981](https://github.com/adap/flower/pull/1981), " -"[#1988](https://github.com/adap/flower/pull/1988), " -"[#1984](https://github.com/adap/flower/pull/1984), " -"[#1982](https://github.com/adap/flower/pull/1982), " -"[#2112](https://github.com/adap/flower/pull/2112), " -"[#2144](https://github.com/adap/flower/pull/2144), " -"[#2174](https://github.com/adap/flower/pull/2174), " -"[#2225](https://github.com/adap/flower/pull/2225), " -"[#2183](https://github.com/adap/flower/pull/2183))" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:144 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"**General improvements** " -"([#2803](https://github.com/adap/flower/pull/2803), " -"[#2847](https://github.com/adap/flower/pull/2847), " -"[#2877](https://github.com/adap/flower/pull/2877), " -"[#2690](https://github.com/adap/flower/pull/2690), " -"[#2889](https://github.com/adap/flower/pull/2889), " -"[#2874](https://github.com/adap/flower/pull/2874), " -"[#2819](https://github.com/adap/flower/pull/2819), " -"[#2689](https://github.com/adap/flower/pull/2689), " -"[#2457](https://github.com/adap/flower/pull/2457), " -"[#2870](https://github.com/adap/flower/pull/2870), " -"[#2669](https://github.com/adap/flower/pull/2669), " -"[#2876](https://github.com/adap/flower/pull/2876), " -"[#2885](https://github.com/adap/flower/pull/2885), " -"[#2858](https://github.com/adap/flower/pull/2858), " -"[#2867](https://github.com/adap/flower/pull/2867), " -"[#2351](https://github.com/adap/flower/pull/2351), " -"[#2886](https://github.com/adap/flower/pull/2886), " -"[#2860](https://github.com/adap/flower/pull/2860), " -"[#2828](https://github.com/adap/flower/pull/2828), " -"[#2869](https://github.com/adap/flower/pull/2869), " -"[#2875](https://github.com/adap/flower/pull/2875), " -"[#2733](https://github.com/adap/flower/pull/2733), " -"[#2488](https://github.com/adap/flower/pull/2488), " -"[#2646](https://github.com/adap/flower/pull/2646), " -"[#2879](https://github.com/adap/flower/pull/2879), " -"[#2821](https://github.com/adap/flower/pull/2821), " -"[#2855](https://github.com/adap/flower/pull/2855), " -"[#2800](https://github.com/adap/flower/pull/2800), " -"[#2807](https://github.com/adap/flower/pull/2807), " -"[#2801](https://github.com/adap/flower/pull/2801), " -"[#2804](https://github.com/adap/flower/pull/2804), " -"[#2851](https://github.com/adap/flower/pull/2851), " -"[#2787](https://github.com/adap/flower/pull/2787), " -"[#2852](https://github.com/adap/flower/pull/2852), " -"[#2672](https://github.com/adap/flower/pull/2672), " -"[#2759](https://github.com/adap/flower/pull/2759))" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:148 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**Deprecate** `start_numpy_client` " -"([#2563](https://github.com/adap/flower/pull/2563), " -"[#2718](https://github.com/adap/flower/pull/2718))" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " -"[#2508](https://github.com/adap/flower/pull/2508))" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:150 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"Until now, clients of type `NumPyClient` needed to be started via " -"`start_numpy_client`. In our efforts to consolidate framework APIs, we " -"have introduced changes, and now all client types should start via " -"`start_client`. To continue using `NumPyClient` clients, you simply need " -"to first call the `.to_client()` method and then pass returned `Client` " -"object to `start_client`. The examples and the documentation have been " -"updated accordingly." +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" msgstr "" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" -#: ../../source/ref-changelog.md:152 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:2 #, fuzzy -msgid "" -"**Deprecate legacy DP wrappers** " -"([#2749](https://github.com/adap/flower/pull/2749))" -msgstr "**移除过时的 KerasClient**([#857](https://github.com/adap/flower/pull/857))" +msgid "DifferentialPrivacyServerSideFixedClipping" +msgstr "差分隐私" -#: ../../source/ref-changelog.md:154 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:19 +#: of +#, fuzzy msgid "" -"Legacy DP wrapper classes are deprecated, but still functional. This is " -"in preparation for an all-new pluggable version of differential privacy " -"support in Flower." -msgstr "" +"Wrap the strategy with the DifferentialPrivacyServerSideFixedClipping " +"wrapper" +msgstr "用 DifferentialPrivacyServerSideFixedClipping 封装器封装策略" -#: ../../source/ref-changelog.md:156 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**Make optional arg** `--callable` **in** `flower-client` **a required " -"positional arg** ([#2673](https://github.com/adap/flower/pull/2673))" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" msgstr "" -"**从** `start_client` 中移除** `rest` **实验参数 " -"([#2324](https://github.com/adap/flower/pull/2324))" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" -#: ../../source/ref-changelog.md:158 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**Rename** `certificates` **to** `root_certificates` **in** `Driver` " -"([#2890](https://github.com/adap/flower/pull/2890))" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -"**重新命名** `rnd` ** to** `server_round` " -"([#1321](https://github.com/adap/flower/pull/1321))" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-changelog.md:160 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:1 +#: of #, fuzzy -msgid "" -"**Drop experimental** `Task` **fields** " -"([#2866](https://github.com/adap/flower/pull/2866), " -"[#2865](https://github.com/adap/flower/pull/2865))" -msgstr "" -"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " -"[#2615](https://github.com/adap/flower/pull/2615))" +msgid "Compute the updates, clip, and pass them for aggregation." +msgstr "计算更新、剪辑并将其传递给聚合。" -#: ../../source/ref-changelog.md:162 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"Experimental fields `sa`, `legacy_server_message` and " -"`legacy_client_message` were removed from `Task` message. The removed " -"fields are superseded by the new `RecordSet` abstraction." +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:164 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**Retire MXNet examples** " -"([#2724](https://github.com/adap/flower/pull/2724))" -msgstr "**新的 scikit-learn 代码示例** ([#748](https://github.com/adap/flower/pull/748))" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:166 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"The development of the MXNet fremework has ended and the project is now " -"[archived on GitHub](https://github.com/apache/mxnet). Existing MXNet " -"examples won't receive updates." +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:168 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of #, fuzzy -msgid "v1.6.0 (2023-11-28)" -msgstr "v1.4.0 (2023-04-21)" - -#: ../../source/ref-changelog.md:174 msgid "" -"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " -"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " -"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`," -" `Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, " -"`Steve Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, " -"`cnxdeveloper`, `k3nfalt` " +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" msgstr "" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:3 +#: of +#, fuzzy +msgid "Afterward, add noise to the aggregated parameters." +msgstr "然后,在汇总参数中添加噪声。" + +#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:2 +#, fuzzy +msgid "FaultTolerantFedAvg" +msgstr "server.strategy.FaultTolerantFedAvg" -#: ../../source/ref-changelog.md:178 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"**Add experimental support for Python 3.12** " -"([#2565](https://github.com/adap/flower/pull/2565))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"** 增加对 Python 3.12 的实验支持** " -"([#2565](https://github.com/adap/flower/pull/2565))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" -#: ../../source/ref-changelog.md:180 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**Add new XGBoost examples** " -"([#2612](https://github.com/adap/flower/pull/2612), " -"[#2554](https://github.com/adap/flower/pull/2554), " -"[#2617](https://github.com/adap/flower/pull/2617), " -"[#2618](https://github.com/adap/flower/pull/2618), " -"[#2619](https://github.com/adap/flower/pull/2619), " -"[#2567](https://github.com/adap/flower/pull/2567))" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"**引入(试验性)Driver API** ([#1520](https://github.com/adap/flower/pull/1520)," -" [#1525](https://github.com/adap/flower/pull/1525), " -"[#1545](https://github.com/adap/flower/pull/1545), " -"[#1546](https://github.com/adap/flower/pull/1546), " -"[#1550](https://github.com/adap/flower/pull/1550), " -"[#1551](https://github.com/adap/flower/pull/1551), " -"[#1567](https://github.com/adap/flower/pull/1567))" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_fit:1 +#: flwr.server.strategy.fedadagrad.FedAdagrad.aggregate_fit:1 +#: flwr.server.strategy.fedadam.FedAdam.aggregate_fit:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_fit:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_fit:1 +#: flwr.server.strategy.fedavgm.FedAvgM.aggregate_fit:1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.aggregate_fit:1 +#: flwr.server.strategy.fedyogi.FedYogi.aggregate_fit:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_fit:1 of +msgid "Aggregate fit results using weighted average." +msgstr "使用加权平均法汇总拟合结果。" -#: ../../source/ref-changelog.md:182 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"We have added a new `xgboost-quickstart` example alongside a new " -"`xgboost-comprehensive` example that goes more in-depth." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:184 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**Add Vertical FL example** " -"([#2598](https://github.com/adap/flower/pull/2598))" -msgstr "**新的 iOS CoreML 代码示例**([#1289](https://github.com/adap/flower/pull/1289))" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:186 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"We had many questions about Vertical Federated Learning using Flower, so " -"we decided to add an simple example for it on the [Titanic " -"dataset](https://www.kaggle.com/competitions/titanic/data) alongside a " -"tutorial (in the README)." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:188 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"**Support custom** `ClientManager` **in** `start_driver()` " -"([#2292](https://github.com/adap/flower/pull/2292))" -msgstr "**在***`start_driver()`中支持自定义***`ClientManager([#2292](https://github.com/adap/flower/pull/2292))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:190 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"**Update REST API to support create and delete nodes** " -"([#2283](https://github.com/adap/flower/pull/2283))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**更新 REST API 以支持创建和删除节点** " -"([#2283](https://github.com/adap/flower/pull/2283))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:192 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**Update the Android SDK** " -"([#2187](https://github.com/adap/flower/pull/2187))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**介绍Flower Android SDK** " -"([#2131](https://github.com/adap/flower/pull/2131))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" + +#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:2 +#: ../../source/ref-changelog.md:1231 +msgid "FedAdagrad" +msgstr "FedAdagrad" -#: ../../source/ref-changelog.md:194 +#: flwr.server.strategy.fedadagrad.FedAdagrad:1 +#: flwr.server.strategy.fedadam.FedAdam:1 +#: flwr.server.strategy.fedyogi.FedYogi:1 of #, fuzzy -msgid "Add gRPC request-response capability to the Android SDK." -msgstr "为 C++ SDK 添加 gRPC 请求-响应功能。" +msgid "Bases: :py:class:`~flwr.server.strategy.fedopt.FedOpt`" +msgstr "Bases: :py:class:`~flwr.server.strategy.fedopt.FedOpt`" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:3 +#: flwr.server.strategy.fedadam.FedAdam:3 flwr.server.strategy.fedopt.FedOpt:3 +#: flwr.server.strategy.fedyogi.FedYogi:3 of +msgid "Implementation based on https://arxiv.org/abs/2003.00295v5" +msgstr "实施基于 https://arxiv.org/abs/2003.00295v5" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:21 +#: flwr.server.strategy.fedadagrad.FedAdagrad:23 +#: flwr.server.strategy.fedadam.FedAdam:25 +#: flwr.server.strategy.fedadam.FedAdam:27 +#: flwr.server.strategy.fedavg.FedAvg:29 flwr.server.strategy.fedavg.FedAvg:31 +#: flwr.server.strategy.fedopt.FedOpt:25 flwr.server.strategy.fedopt.FedOpt:27 +#: flwr.server.strategy.fedprox.FedProx:61 +#: flwr.server.strategy.fedprox.FedProx:63 +#: flwr.server.strategy.fedyogi.FedYogi:28 +#: flwr.server.strategy.fedyogi.FedYogi:30 of +msgid "Metrics aggregation function, optional." +msgstr "指标汇总功能,可选。" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:29 +#: flwr.server.strategy.fedadam.FedAdam:29 +#: flwr.server.strategy.fedopt.FedOpt:29 of +msgid "Server-side learning rate. Defaults to 1e-1." +msgstr "服务器端学习率。默认为 1e-1。" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:31 +#: flwr.server.strategy.fedadam.FedAdam:31 +#: flwr.server.strategy.fedopt.FedOpt:31 of +msgid "Client-side learning rate. Defaults to 1e-1." +msgstr "客户端学习率。默认为 1e-1。" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:33 +#: flwr.server.strategy.fedadam.FedAdam:37 +#: flwr.server.strategy.fedopt.FedOpt:37 of +msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-9." +msgstr "控制算法的适应度。默认为 1e-9。" -#: ../../source/ref-changelog.md:196 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Update the C++ SDK** " -"([#2537](https://github.com/adap/flower/pull/2537), " -"[#2528](https://github.com/adap/flower/pull/2528), " -"[#2523](https://github.com/adap/flower/pull/2523), " -"[#2522](https://github.com/adap/flower/pull/2522))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"** 更新 C++ SDK** ([#2537](https://github/com/adap/flower/pull/2537), " -"[#2528](https://github/com/adap/flower/pull/2528), " -"[#2523](https://github.com/adap/flower/pull/2523), " -"[#2522](https://github.com/adap/flower/pull/2522))" - -#: ../../source/ref-changelog.md:198 -msgid "Add gRPC request-response capability to the C++ SDK." -msgstr "为 C++ SDK 添加 gRPC 请求-响应功能。" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" -#: ../../source/ref-changelog.md:200 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Make HTTPS the new default** " -"([#2591](https://github.com/adap/flower/pull/2591), " -"[#2636](https://github.com/adap/flower/pull/2636))" +":py:obj:`aggregate_fit `\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -"Baselines文档([#2290](https://github.com/adap/flower/pull/2290), " -"[#2400](https://github.com/adap/flower/pull/2400)" +":py:obj:`aggregate_fit `\\" +" \\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-changelog.md:202 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"Flower is moving to HTTPS by default. The new `flower-server` requires " -"passing `--certificates`, but users can enable `--insecure` to use HTTP " -"for prototyping. The same applies to `flower-client`, which can either " -"use user-provided credentials or gRPC-bundled certificates to connect to " -"an HTTPS-enabled server or requires opt-out via passing `--insecure` to " -"enable insecure HTTP connections." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:204 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"For backward compatibility, `start_client()` and `start_numpy_client()` " -"will still start in insecure mode by default. In a future release, " -"insecure connections will require user opt-in by passing `insecure=True`." +":py:obj:`configure_fit `\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_fit `\\" +" \\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:206 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Unify client API** ([#2303](https://github.com/adap/flower/pull/2303), " -"[#2390](https://github.com/adap/flower/pull/2390), " -"[#2493](https://github.com/adap/flower/pull/2493))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"** 统一客户端应用程序接口** ([#2303](https://github.com/adap/flower/pull/2303), " -"[#2390](https://github.com/adap/flower/pull/2390), " -"[#2493](https://github.com/adap/flower/pull/2493))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:208 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"Using the `client_fn`, Flower clients can interchangeably run as " -"standalone processes (i.e. via `start_client`) or in simulation (i.e. via" -" `start_simulation`) without requiring changes to how the client class is" -" defined and instantiated. The `to_client()` function is introduced to " -"convert a `NumPyClient` to a `Client`." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"使用 `client_fn`,Flower 客户端可以作为独立进程(即通过 `start_client`)或在模拟中(即通过 " -"`start_simulation`)交替运行,而无需更改客户端类的定义和实例化方式。调用 `start_numpy_client` 现已过时。" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:210 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Add new** `Bulyan` **strategy** " -"([#1817](https://github.com/adap/flower/pull/1817), " -"[#1891](https://github.com/adap/flower/pull/1891))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**添加新**\"Bulyan " -"\"**策略**([#1817](https://github.com/adap/flower/pull/1817), " -"[#1891](https://github.com/adap/flower/pull/1891)" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:212 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"The new `Bulyan` strategy implements Bulyan by [El Mhamdi et al., " -"2018](https://arxiv.org/abs/1802.07927)" -msgstr "新的 \"Bulyan\"策略通过[El Mhamdi 等人,2018](https://arxiv.org/abs/1802.07927)实现" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:214 +#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:2 #, fuzzy -msgid "" -"**Add new** `XGB Bagging` **strategy** " -"([#2611](https://github.com/adap/flower/pull/2611))" -msgstr "**添加新的`FedProx`策略** ([#1619](https://github.com/adap/flower/pull/1619))" +msgid "FedAdam" +msgstr "FedAdagrad" + +#: flwr.server.strategy.fedadam.FedAdam:33 +#: flwr.server.strategy.fedyogi.FedYogi:36 of +msgid "Momentum parameter. Defaults to 0.9." +msgstr "动量参数。默认为 0.9。" + +#: flwr.server.strategy.fedadam.FedAdam:35 +#: flwr.server.strategy.fedyogi.FedYogi:38 of +msgid "Second moment parameter. Defaults to 0.99." +msgstr "第二动量参数。默认为 0.99。" -#: ../../source/ref-changelog.md:216 ../../source/ref-changelog.md:218 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Introduce `WorkloadState`** " -"([#2564](https://github.com/adap/flower/pull/2564), " -"[#2632](https://github.com/adap/flower/pull/2632))" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -"**新的内置策略**([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822)" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" -#: ../../source/ref-changelog.md:222 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " -"[#2286](https://github.com/adap/flower/pull/2286), " -"[#2509](https://github.com/adap/flower/pull/2509))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " -"[#2286](https://github.com/adap/flower/pull/2286), " -"[#2509](https://github.com/adap/flower/pull/2509))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-changelog.md:224 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"Baselines Docs ([#2290](https://github.com/adap/flower/pull/2290), " -"[#2400](https://github.com/adap/flower/pull/2400))" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -"Baselines文档([#2290](https://github.com/adap/flower/pull/2290), " -"[#2400](https://github.com/adap/flower/pull/2400)" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" -#: ../../source/ref-changelog.md:226 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " -"[#2507](https://github.com/adap/flower/pull/2507))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " -"[#2507](https://github.com/adap/flower/pull/2507))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:228 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " -"[#2508](https://github.com/adap/flower/pull/2508))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " -"[#2508](https://github.com/adap/flower/pull/2508))" - -#: ../../source/ref-changelog.md:230 -msgid "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" -msgstr "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" - -#: ../../source/ref-changelog.md:232 -msgid "FjORD [#2431](https://github.com/adap/flower/pull/2431)" -msgstr "FjORD [#2431](https://github.com/adap/flower/pull/2431)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:234 -msgid "MOON [#2421](https://github.com/adap/flower/pull/2421)" -msgstr "MOON [#2421](https://github.com/adap/flower/pull/2421)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:236 -msgid "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" -msgstr "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:238 -msgid "FedPer [#2266](https://github.com/adap/flower/pull/2266)" -msgstr "FedPer [#2266](https://github.com/adap/flower/pull/2266)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:240 -msgid "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" -msgstr "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" +#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:2 +#, fuzzy +msgid "FedAvg" +msgstr "DP-FedAvg" -#: ../../source/ref-changelog.md:242 -msgid "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" -msgstr "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" +#: flwr.server.strategy.fedavg.FedAvg:3 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:3 of +msgid "Implementation based on https://arxiv.org/abs/1602.05629" +msgstr "实施基于 https://arxiv.org/abs/1602.05629" -#: ../../source/ref-changelog.md:244 +#: flwr.server.strategy.fedavg.FedAvg:5 flwr.server.strategy.fedprox.FedProx:37 +#: of msgid "" -"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " -"[#2615](https://github.com/adap/flower/pull/2615))" +"Fraction of clients used during training. In case `min_fit_clients` is " +"larger than `fraction_fit * available_clients`, `min_fit_clients` will " +"still be sampled. Defaults to 1.0." msgstr "" -"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " -"[#2615](https://github.com/adap/flower/pull/2615))" +"训练过程中使用的客户端比例。如果 `min_fit_clients` 大于 `fraction_fit * " +"available_clients`,则仍会对 `min_fit_clients` 进行采样。默认为 1.0。" -#: ../../source/ref-changelog.md:246 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg:9 flwr.server.strategy.fedprox.FedProx:41 +#: of msgid "" -"**General updates to Flower Examples** " -"([#2384](https://github.com/adap/flower/pull/2384), " -"[#2425](https://github.com/adap/flower/pull/2425), " -"[#2526](https://github.com/adap/flower/pull/2526), " -"[#2302](https://github.com/adap/flower/pull/2302), " -"[#2545](https://github.com/adap/flower/pull/2545))" +"Fraction of clients used during validation. In case " +"`min_evaluate_clients` is larger than `fraction_evaluate * " +"available_clients`, `min_evaluate_clients` will still be sampled. " +"Defaults to 1.0." msgstr "" -"** 更新 C++ SDK** ([#2537](https://github/com/adap/flower/pull/2537), " -"[#2528](https://github/com/adap/flower/pull/2528), " -"[#2523](https://github.com/adap/flower/pull/2523), " -"[#2522](https://github.com/adap/flower/pull/2522))" +"验证过程中使用的客户端的比例。如果 `min_evaluate_clients` 大于 `fraction_evaluate * " +"available_clients`,则仍会对 `min_evaluate_clients` 进行采样。默认为 1.0。" + +#: flwr.server.strategy.fedavg.FedAvg:33 of +#, fuzzy +msgid "Enable (True) or disable (False) in-place aggregation of model updates." +msgstr "启用(真)或禁用(假)模型更新的就地聚合。" -#: ../../source/ref-changelog.md:248 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**General updates to Flower Baselines** " -"([#2301](https://github.com/adap/flower/pull/2301), " -"[#2305](https://github.com/adap/flower/pull/2305), " -"[#2307](https://github.com/adap/flower/pull/2307), " -"[#2327](https://github.com/adap/flower/pull/2327), " -"[#2435](https://github.com/adap/flower/pull/2435), " -"[#2462](https://github.com/adap/flower/pull/2462), " -"[#2463](https://github.com/adap/flower/pull/2463), " -"[#2461](https://github.com/adap/flower/pull/2461), " -"[#2469](https://github.com/adap/flower/pull/2469), " -"[#2466](https://github.com/adap/flower/pull/2466), " -"[#2471](https://github.com/adap/flower/pull/2471), " -"[#2472](https://github.com/adap/flower/pull/2472), " -"[#2470](https://github.com/adap/flower/pull/2470))" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -"**普通改进**([#2309](https://github.com/adap/flower/pull/2309), " -"[#2310](https://github.com/adap/flower/pull/2310), " -"[2313](https://github.com/adap/flower/pull/2313), " -"[#2316](https://github.com/adap/flower/pull/2316), " -"[2317](https://github.com/adap/flower/pull/2317),[#2349](https://github.com/adap/flower/pull/2349)," -" [#2360](https://github.com/adap/flower/pull/2360), " -"[#2402](https://github.com/adap/flower/pull/2402), " -"[#2446](https://github.com/adap/flower/pull/2446) " -"[#2561](https://github.com/adap/flower/pull/2561))" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" -#: ../../source/ref-changelog.md:250 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**General updates to the simulation engine** " -"([#2331](https://github.com/adap/flower/pull/2331), " -"[#2447](https://github.com/adap/flower/pull/2447), " -"[#2448](https://github.com/adap/flower/pull/2448), " -"[#2294](https://github.com/adap/flower/pull/2294))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"**模拟引擎的普通更新** ([#2331](https://github.com/adap/flower/pull/2331), " -"[#2447](https://github.com/adap/flower/pull/2447), " -"[#2448](https://github.com/adap/flower/pull/2448))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-changelog.md:252 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**General updates to Flower SDKs** " -"([#2288](https://github.com/adap/flower/pull/2288), " -"[#2429](https://github.com/adap/flower/pull/2429), " -"[#2555](https://github.com/adap/flower/pull/2555), " -"[#2543](https://github.com/adap/flower/pull/2543), " -"[#2544](https://github.com/adap/flower/pull/2544), " -"[#2597](https://github.com/adap/flower/pull/2597), " -"[#2623](https://github.com/adap/flower/pull/2623))" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " -"[#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475)))" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" -#: ../../source/ref-changelog.md:254 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**General improvements** " -"([#2309](https://github.com/adap/flower/pull/2309), " -"[#2310](https://github.com/adap/flower/pull/2310), " -"[#2313](https://github.com/adap/flower/pull/2313), " -"[#2316](https://github.com/adap/flower/pull/2316), " -"[#2317](https://github.com/adap/flower/pull/2317), " -"[#2349](https://github.com/adap/flower/pull/2349), " -"[#2360](https://github.com/adap/flower/pull/2360), " -"[#2402](https://github.com/adap/flower/pull/2402), " -"[#2446](https://github.com/adap/flower/pull/2446), " -"[#2561](https://github.com/adap/flower/pull/2561), " -"[#2273](https://github.com/adap/flower/pull/2273), " -"[#2267](https://github.com/adap/flower/pull/2267), " -"[#2274](https://github.com/adap/flower/pull/2274), " -"[#2275](https://github.com/adap/flower/pull/2275), " -"[#2432](https://github.com/adap/flower/pull/2432), " -"[#2251](https://github.com/adap/flower/pull/2251), " -"[#2321](https://github.com/adap/flower/pull/2321), " -"[#1936](https://github.com/adap/flower/pull/1936), " -"[#2408](https://github.com/adap/flower/pull/2408), " -"[#2413](https://github.com/adap/flower/pull/2413), " -"[#2401](https://github.com/adap/flower/pull/2401), " -"[#2531](https://github.com/adap/flower/pull/2531), " -"[#2534](https://github.com/adap/flower/pull/2534), " -"[#2535](https://github.com/adap/flower/pull/2535), " -"[#2521](https://github.com/adap/flower/pull/2521), " -"[#2553](https://github.com/adap/flower/pull/2553), " -"[#2596](https://github.com/adap/flower/pull/2596))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:256 ../../source/ref-changelog.md:346 -#: ../../source/ref-changelog.md:410 ../../source/ref-changelog.md:464 -#: ../../source/ref-changelog.md:531 -msgid "Flower received many improvements under the hood, too many to list here." -msgstr "Flower 进行了许多改进,这里就不一一列举了。" - -#: ../../source/ref-changelog.md:260 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Remove support for Python 3.7** " -"([#2280](https://github.com/adap/flower/pull/2280), " -"[#2299](https://github.com/adap/flower/pull/2299), " -"[#2304](https://github.com/adap/flower/pull/2304), " -"[#2306](https://github.com/adap/flower/pull/2306), " -"[#2355](https://github.com/adap/flower/pull/2355), " -"[#2356](https://github.com/adap/flower/pull/2356))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"**移除对 Python 3.7 的支持** " -"([#2280](https://github.com/adap/flower/pull/2280), " -"[#2299](https://github.com/adap/flower/pull/2299), " -"[#2304](https://github.com/adap/flower/pull/2304), " -"[#2306](https://github.com/adap/flower/pull/2306), " -"[#2355](https://github.com/adap/flower/pull/2355), " -"[#2356](https://github.com/adap/flower/pull/2356))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:262 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"Python 3.7 support was deprecated in Flower 1.5, and this release removes" -" support. Flower now requires Python 3.8." -msgstr "在 Flower 1.5 中,Python 3.7 支持已被弃用,本版本将删除该支持。Flower 现在需要 Python 3.8。" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:264 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Remove experimental argument** `rest` **from** `start_client` " -"([#2324](https://github.com/adap/flower/pull/2324))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**从** `start_client` 中移除** `rest` **实验参数 " -"([#2324](https://github.com/adap/flower/pull/2324))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:266 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"The (still experimental) argument `rest` was removed from `start_client` " -"and `start_numpy_client`. Use `transport=\"rest\"` to opt into the " -"experimental REST API instead." +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" msgstr "" -"删除了 `start_client` 和 `start_numpy_client` 中的参数 `rest`(仍属试验性质)。请使用 " -"`transport=\"rest\"` 来选择使用试验性 REST API。" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:268 -msgid "v1.5.0 (2023-08-31)" -msgstr "v1.5.0 (2023-08-31)" +#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:2 +#, fuzzy +msgid "FedAvgAndroid" +msgstr "DPFedAvgAdaptive" -#: ../../source/ref-changelog.md:274 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " -"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " -"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " -"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " -"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " -"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " -"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" -#: ../../source/ref-changelog.md:278 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"**Introduce new simulation engine** " -"([#1969](https://github.com/adap/flower/pull/1969), " -"[#2221](https://github.com/adap/flower/pull/2221), " -"[#2248](https://github.com/adap/flower/pull/2248))" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"**引入新的模拟引擎** ([#1969](https://github.com/adap/flower/pull/1969), " -"[#2221](https://github.com/adap/flower/pull/2221), " -"[#2248](https://github.com/adap/flower/pull/2248))" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-changelog.md:280 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"The new simulation engine has been rewritten from the ground up, yet it " -"remains fully backwards compatible. It offers much improved stability and" -" memory handling, especially when working with GPUs. Simulations " -"transparently adapt to different settings to scale simulation in CPU-" -"only, CPU+GPU, multi-GPU, or multi-node multi-GPU environments." +":py:obj:`bytes_to_ndarray " +"`\\ \\(tensor\\)" msgstr "" -"新的模拟引擎从头开始重新编写,但仍完全向后兼容。它的稳定性和内存处理能力大大提高,尤其是在使用 GPU 时。仿真可透明地适应不同的设置,以在仅 " -"CPU、CPU+GPU、多 GPU 或多节点多 GPU 环境中扩展模拟。" +":py:obj:`bytes_to_ndarray " +"`\\ \\(tensor\\)" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.bytes_to_ndarray:1 of +#, fuzzy +msgid "Deserialize NumPy array from bytes." +msgstr "从字节反序列化 NumPy ndarray。" -#: ../../source/ref-changelog.md:282 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"Comprehensive documentation includes a new [how-to run " -"simulations](https://flower.ai/docs/framework/how-to-run-" -"simulations.html) guide, new [simulation-" -"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " -"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" -"tensorflow.html) notebooks, and a new [YouTube tutorial " -"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"综合文档包括新的[how-to run simulations](https://flower.ai/docs/framework/how-to-" -"run-simulations.html) guide, new [simulation-" -"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " -"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" -"tensorflow.html) notebooks, and a new [YouTube tutorial " -"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)。" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:284 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"**Restructure Flower Docs** " -"([#1824](https://github.com/adap/flower/pull/1824), " -"[#1865](https://github.com/adap/flower/pull/1865), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1887](https://github.com/adap/flower/pull/1887), " -"[#1919](https://github.com/adap/flower/pull/1919), " -"[#1922](https://github.com/adap/flower/pull/1922), " -"[#1920](https://github.com/adap/flower/pull/1920), " -"[#1923](https://github.com/adap/flower/pull/1923), " -"[#1924](https://github.com/adap/flower/pull/1924), " -"[#1962](https://github.com/adap/flower/pull/1962), " -"[#2006](https://github.com/adap/flower/pull/2006), " -"[#2133](https://github.com/adap/flower/pull/2133), " -"[#2203](https://github.com/adap/flower/pull/2203), " -"[#2215](https://github.com/adap/flower/pull/2215), " -"[#2122](https://github.com/adap/flower/pull/2122), " -"[#2223](https://github.com/adap/flower/pull/2223), " -"[#2219](https://github.com/adap/flower/pull/2219), " -"[#2232](https://github.com/adap/flower/pull/2232), " -"[#2233](https://github.com/adap/flower/pull/2233), " -"[#2234](https://github.com/adap/flower/pull/2234), " -"[#2235](https://github.com/adap/flower/pull/2235), " -"[#2237](https://github.com/adap/flower/pull/2237), " -"[#2238](https://github.com/adap/flower/pull/2238), " -"[#2242](https://github.com/adap/flower/pull/2242), " -"[#2231](https://github.com/adap/flower/pull/2231), " -"[#2243](https://github.com/adap/flower/pull/2243), " -"[#2227](https://github.com/adap/flower/pull/2227))" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**重构 Flower 文档** ([#1824](https://github.com/adap/flower/pull/1824), " -"[#1865](https://github.com/adap/flower/pull/1865), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1887](https://github.com/adap/flower/pull/1887), " -"[#1919](https://github.com/adap/flower/pull/1919), " -"[#1922](https://github.com/adap/flower/pull/1922), " -"[#1920](https://github.com/adap/flower/pull/1920), " -"[#1923](https://github.com/adap/flower/pull/1923), " -"[#1924](https://github.com/adap/flower/pull/1924), " -"[#1962](https://github.com/adap/flower/pull/1962), " -"[#2006](https://github.com/adap/flower/pull/2006), " -"[#2133](https://github.com/adap/flower/pull/2133), " -"[#2203](https://github.com/adap/flower/pull/2203), " -"[#2215](https://github.com/adap/flower/pull/2215), " -"[#2122](https://github.com/adap/flower/pull/2122), " -"[#2223](https://github.com/adap/flower/pull/2223), " -"[#2219](https://github.com/adap/flower/pull/2219), " -"[#2232](https://github.com/adap/flower/pull/2232), " -"[#2233](https://github.com/adap/flower/pull/2233), " -"[#2234](https://github.com/adap/flower/pull/2234), " -"[#2235](https://github.com/adap/flower/pull/2235), " -"[#2237](https://github.com/adap/flower/pull/2237), " -"[#2238](https://github.com/adap/flower/pull/2238), " -"[#2242](https://github.com/adap/flower/pull/2242), " -"[#2231](https://github.com/adap/flower/pull/2231), " -"[#2243](https://github.com/adap/flower/pull/2243), " -"[#2227](https://github.com/adap/flower/pull/2227))" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:286 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"Much effort went into a completely restructured Flower docs experience. " -"The documentation on [flower.ai/docs](https://flower.ai/docs) is now " -"divided into Flower Framework, Flower Baselines, Flower Android SDK, " -"Flower iOS SDK, and code example projects." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"Flower 文档体验的全面重构耗费了大量精力。现在,[flower.ai/docs](flower.ai/docs)上的文档分为 Flower " -"Framework、Flower Baselines、Flower Android SDK、Flower iOS SDK 和代码示例项目。" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:288 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"**Introduce Flower Swift SDK** " -"([#1858](https://github.com/adap/flower/pull/1858), " -"[#1897](https://github.com/adap/flower/pull/1897))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"**介绍 Flower Swift SDK** " -"([#1858](https://github.com/adap/flower/pull/1858), " -"[#1897](https://github.com/adap/flower/pull/1897))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:290 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"This is the first preview release of the Flower Swift SDK. Flower support" -" on iOS is improving, and alongside the Swift SDK and code example, there" -" is now also an iOS quickstart tutorial." +":py:obj:`ndarray_to_bytes " +"`\\ \\(ndarray\\)" msgstr "" -"这是 Flower Swift SDK 的首个预览版。Flower 对 iOS 的支持正在不断改进,除了 Swift SDK " -"和代码示例外,现在还有 iOS 快速入门教程。" +":py:obj:`ndarray_to_bytes " +"`\\ \\(ndarray\\)" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarray_to_bytes:1 of +#, fuzzy +msgid "Serialize NumPy array to bytes." +msgstr "将 NumPy ndarray 序列化为字节。" -#: ../../source/ref-changelog.md:292 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"**Introduce Flower Android SDK** " -"([#2131](https://github.com/adap/flower/pull/2131))" +":py:obj:`ndarrays_to_parameters " +"`\\ " +"\\(ndarrays\\)" msgstr "" -"**介绍Flower Android SDK** " -"([#2131](https://github.com/adap/flower/pull/2131))" +":py:obj:`ndarrays_to_parameters " +"`\\ " +"\\(ndarrays\\)" -#: ../../source/ref-changelog.md:294 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"This is the first preview release of the Flower Kotlin SDK. Flower " -"support on Android is improving, and alongside the Kotlin SDK and code " -"example, there is now also an Android quickstart tutorial." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"这是 Flower Kotlin SDK 的首个预览版。Flower 对 Android 的支持正在不断改进,除了 Kotlin SDK " -"和代码示例,现在还有 Android 快速入门教程。" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:296 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"**Introduce new end-to-end testing infrastructure** " -"([#1842](https://github.com/adap/flower/pull/1842), " -"[#2071](https://github.com/adap/flower/pull/2071), " -"[#2072](https://github.com/adap/flower/pull/2072), " -"[#2068](https://github.com/adap/flower/pull/2068), " -"[#2067](https://github.com/adap/flower/pull/2067), " -"[#2069](https://github.com/adap/flower/pull/2069), " -"[#2073](https://github.com/adap/flower/pull/2073), " -"[#2070](https://github.com/adap/flower/pull/2070), " -"[#2074](https://github.com/adap/flower/pull/2074), " -"[#2082](https://github.com/adap/flower/pull/2082), " -"[#2084](https://github.com/adap/flower/pull/2084), " -"[#2093](https://github.com/adap/flower/pull/2093), " -"[#2109](https://github.com/adap/flower/pull/2109), " -"[#2095](https://github.com/adap/flower/pull/2095), " -"[#2140](https://github.com/adap/flower/pull/2140), " -"[#2137](https://github.com/adap/flower/pull/2137), " -"[#2165](https://github.com/adap/flower/pull/2165))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"*介绍新的端到端测试** ([#1842](https://github.com/adap/flower/pull/1842), " -"[#2071](https://github.com/adap/flower/pull/2071), " -"[#2072](https://github.com/adap/flower/pull/2072), " -"[#2068](https://github.com/adap/flower/pull/2068), " -"[#2067](https://github.com/adap/flower/pull/2067), " -"[#2069](https://github.com/adap/flower/pull/2069), " -"[#2073](https://github.com/adap/flower/pull/2073), " -"[#2070](https://github.com/adap/flower/pull/2070), " -"[#2074](https://github.com/adap/flower/pull/2074), " -"[#2082](https://github.com/adap/flower/pull/2082), " -"[#2084](https://github.com/adap/flower/pull/2084), " -"[#2093](https://github.com/adap/flower/pull/2093), " -"[#2109](https://github.com/adap/flower/pull/2109), " -"[#2095](https://github.com/adap/flower/pull/2095), " -"[#2140](https://github.com/adap/flower/pull/2140), " -"[#2137](https://github.com/adap/flower/pull/2137), " -"[#2165](https://github.com/adap/flower/pull/2165))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:298 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"A new testing infrastructure ensures that new changes stay compatible " -"with existing framework integrations or strategies." -msgstr "新的测试设施可确保新的变更与现有的框架集成或策略保持兼容。" +":py:obj:`parameters_to_ndarrays " +"`\\ " +"\\(parameters\\)" +msgstr "" +":py:obj:`parameters_to_ndarrays " +"`\\ " +"\\(parameters\\)" -#: ../../source/ref-changelog.md:300 -msgid "**Deprecate Python 3.7**" -msgstr "** 过时的 Python 3.7**" +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.parameters_to_ndarrays:1 +#: of +#, fuzzy +msgid "Convert parameters object to NumPy weights." +msgstr "将参数对象转换为 NumPy ndarrays。" + +#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:2 +#, fuzzy +msgid "FedAvgM" +msgstr "DP-FedAvg" + +#: flwr.server.strategy.fedavgm.FedAvgM:3 of +#, fuzzy +msgid "Implementation based on https://arxiv.org/abs/1909.06335" +msgstr "实施基于 https://arxiv.org/pdf/1909.06335.pdf" -#: ../../source/ref-changelog.md:302 +#: flwr.server.strategy.fedavgm.FedAvgM:25 of msgid "" -"Since Python 3.7 reached its end of life (EOL) on 2023-06-27, support for" -" Python 3.7 is now deprecated and will be removed in an upcoming release." -msgstr "由于 Python 3.7 已于 2023-06-27 弃用 (EOL),对 Python 3.7 的支持现已废弃,并将在即将发布的版本中移除。" +"Server-side learning rate used in server-side optimization. Defaults to " +"1.0." +msgstr "服务器端优化中使用的服务器端学习率。默认为 1.0。" -#: ../../source/ref-changelog.md:304 +#: flwr.server.strategy.fedavgm.FedAvgM:28 of +msgid "Server-side momentum factor used for FedAvgM. Defaults to 0.0." +msgstr "用于 FedAvgM 的服务器端动量因子。默认为 0.0。" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Add new** `FedTrimmedAvg` **strategy** " -"([#1769](https://github.com/adap/flower/pull/1769), " -"[#1853](https://github.com/adap/flower/pull/1853))" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -"**添加新的**`FedTrimmedAvg`**策略**([#1769](https://github.com/adap/flower/pull/1769)," -" [#1853](https://github.com/adap/flower/pull/1853)" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" -#: ../../source/ref-changelog.md:306 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"The new `FedTrimmedAvg` strategy implements Trimmed Mean by [Dong Yin, " -"2018](https://arxiv.org/abs/1803.01498)." +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"新的 \"FedTrimmedAvg \"策略实现了[Dong Yin, " -"2018](https://arxiv.org/abs/1803.01498)的 \"Trimmed Mean\"。" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-changelog.md:308 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Introduce start_driver** " -"([#1697](https://github.com/adap/flower/pull/1697))" -msgstr "**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" +msgstr "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" -#: ../../source/ref-changelog.md:310 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"In addition to `start_server` and using the raw Driver API, there is a " -"new `start_driver` function that allows for running `start_server` " -"scripts as a Flower driver with only a single-line code change. Check out" -" the `mt-pytorch` code example to see a working example using " -"`start_driver`." +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"除了 `start_server` 和使用原始驱动 API 之外,还有一个新的 `start_driver` 函数,只需修改一行代码,就能将 " -"`start_server` 脚本作为 Flower 驱动程序运行。请查看 `mt-pytorch` 代码示例,了解使用 " -"`start_driver` 的工作示例。" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:312 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Add parameter aggregation to** `mt-pytorch` **code example** " -"([#1785](https://github.com/adap/flower/pull/1785))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"为 `mt-pytorch` **代码示例**添加参数聚合 " -"([#1785](https://github.com/adap/flower/pull/1785))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:314 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"The `mt-pytorch` example shows how to aggregate parameters when writing a" -" driver script. The included `driver.py` and `server.py` have been " -"aligned to demonstrate both the low-level way and the high-level way of " -"building server-side logic." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"`mt-pytorch`示例展示了如何在编写驱动程序脚本时聚合参数。附带的 `driver.py` 和 `server.py` " -"已经进行了调整,以演示构建服务器端逻辑的低级方法和高级方法。" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:316 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Migrate experimental REST API to Starlette** " -"([2171](https://github.com/adap/flower/pull/2171))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**将实验性 REST API 移植到 Starlette** " -"([2171](https://github.com/adap/flower/pull/2171))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:318 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"The (experimental) REST API used to be implemented in " -"[FastAPI](https://fastapi.tiangolo.com/), but it has now been migrated to" -" use [Starlette](https://www.starlette.io/) directly." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"REST API(试验性)曾在 [FastAPI](https://fastapi.tiangolo.com/) 中实现,但现在已迁移到直接使用 " -"[Starlette](https://www.starlette.io/) 。" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:320 -msgid "" -"Please note: The REST request-response API is still experimental and will" -" likely change significantly over time." -msgstr "请注意:REST 请求-响应 API 仍处于试验阶段,随着时间的推移可能会发生重大变化。" +#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:2 +#, fuzzy +msgid "FedMedian" +msgstr "联邦医保" -#: ../../source/ref-changelog.md:322 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Introduce experimental gRPC request-response API** " -"([#1867](https://github.com/adap/flower/pull/1867), " -"[#1901](https://github.com/adap/flower/pull/1901))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"**引入实验性 gRPC 请求-响应 API** " -"([#1867](https://github.com/adap/flower/pull/1867), " -"[#1901](https://github.com/adap/flower/pull/1901)" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" -#: ../../source/ref-changelog.md:324 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"In addition to the existing gRPC API (based on bidirectional streaming) " -"and the experimental REST API, there is now a new gRPC API that uses a " -"request-response model to communicate with client nodes." +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"除了现有的 gRPC 应用程序接口(基于双向流)和试验性 REST 应用程序接口外,现在还有一个新的 gRPC " -"应用程序接口,它使用请求-响应模型与客户端节点通信。" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-changelog.md:326 -msgid "" -"Please note: The gRPC request-response API is still experimental and will" -" likely change significantly over time." -msgstr "请注意:gRPC 请求-响应 API 仍处于试验阶段,随着时间的推移可能会发生重大变化。" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedmedian.FedMedian.aggregate_fit:1 of +msgid "Aggregate fit results using median." +msgstr "使用中位数汇总拟合结果。" -#: ../../source/ref-changelog.md:328 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Replace the experimental** `start_client(rest=True)` **with the new** " -"`start_client(transport=\"rest\")` " -"([#1880](https://github.com/adap/flower/pull/1880))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**用新的** `start_client(transport=\"rest\")` 替换实验性** " -"`start_client(rest=True)` " -"([#1880](https://github.com/adap/flower/pull/1880))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:330 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"The (experimental) `start_client` argument `rest` was deprecated in " -"favour of a new argument `transport`. `start_client(transport=\"rest\")` " -"will yield the same behaviour as `start_client(rest=True)` did before. " -"All code should migrate to the new argument `transport`. The deprecated " -"argument `rest` will be removed in a future release." +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"已废弃(试验性的)`start_client`参数`rest`,改用新参数`transport`。`start_client(transport=\"rest\")`将产生与以前的`start_client(rest=True)`相同的行为。所有代码都应迁移到新参数" -" `transport`。过时的参数 `rest` 将在今后的版本中删除。" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:332 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Add a new gRPC option** " -"([#2197](https://github.com/adap/flower/pull/2197))" -msgstr "** 添加一个新的 gRPC 选项**([#2197](https://github.com/adap/flower/pull/2197))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:334 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"We now start a gRPC server with the `grpc.keepalive_permit_without_calls`" -" option set to 0 by default. This prevents the clients from sending " -"keepalive pings when there is no outstanding stream." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"现在我们启动一个 gRPC 服务器,并将 `grpc.keepalive_permit_without_calls` 选项默认设置为 " -"0。这将防止客户端在没有未处理数据流时发送 keepalive pings。" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:336 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Improve example notebooks** " -"([#2005](https://github.com/adap/flower/pull/2005))" -msgstr "**改进示例笔记** ([#2005](https://github.com/adap/flower/pull/2005))" - -#: ../../source/ref-changelog.md:338 -msgid "There's a new 30min Federated Learning PyTorch tutorial!" -msgstr "有一个新的 30 分钟的联邦学习 PyTorch 教程!" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:340 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Example updates** ([#1772](https://github.com/adap/flower/pull/1772), " -"[#1873](https://github.com/adap/flower/pull/1873), " -"[#1981](https://github.com/adap/flower/pull/1981), " -"[#1988](https://github.com/adap/flower/pull/1988), " -"[#1984](https://github.com/adap/flower/pull/1984), " -"[#1982](https://github.com/adap/flower/pull/1982), " -"[#2112](https://github.com/adap/flower/pull/2112), " -"[#2144](https://github.com/adap/flower/pull/2144), " -"[#2174](https://github.com/adap/flower/pull/2174), " -"[#2225](https://github.com/adap/flower/pull/2225), " -"[#2183](https://github.com/adap/flower/pull/2183))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**更新Example** ([#1772](https://github.com/adap/flower/pull/1772), " -"[#1873](https://github.com/adap/flower/pull/1873), " -"[#1981](https://github.com/adap/flower/pull/1981), " -"[#1988](https://github.com/adap/flower/pull/1988), " -"[#1984](https://github.com/adap/flower/pull/1984), " -"[#1982](https://github.com/adap/flower/pull/1982), " -"[#2112](https://github.com/adap/flower/pull/2112), " -"[#2144](https://github.com/adap/flower/pull/2144), " -"[#2174](https://github.com/adap/flower/pull/2174), " -"[#2225](https://github.com/adap/flower/pull/2225), " -"[#2183](https://github.com/adap/flower/pull/2183))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" + +#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:2 +#, fuzzy +msgid "FedOpt" +msgstr "FedOpt" + +#: flwr.server.strategy.fedopt.FedOpt:33 of +msgid "Momentum parameter. Defaults to 0.0." +msgstr "动量参数。默认为 0.0。" + +#: flwr.server.strategy.fedopt.FedOpt:35 of +msgid "Second moment parameter. Defaults to 0.0." +msgstr "第二动量参数。默认为 0.0。" -#: ../../source/ref-changelog.md:342 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"Many examples have received significant updates, including simplified " -"advanced-tensorflow and advanced-pytorch examples, improved macOS " -"compatibility of TensorFlow examples, and code examples for simulation. A" -" major upgrade is that all code examples now have a `requirements.txt` " -"(in addition to `pyproject.toml`)." +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -"许多示例都进行了重大更新,包括简化了 advanced-tensorflow 和 advanced-pytorch 示例,改进了 " -"TensorFlow 示例的 macOS 兼容性,以及模拟代码示例。一项重大升级是所有代码示例现在都有了 " -"\"requirements.txt\"(除 \"pyproject.toml \"外)。" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" -#: ../../source/ref-changelog.md:344 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**General improvements** " -"([#1872](https://github.com/adap/flower/pull/1872), " -"[#1866](https://github.com/adap/flower/pull/1866), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1837](https://github.com/adap/flower/pull/1837), " -"[#1477](https://github.com/adap/flower/pull/1477), " -"[#2171](https://github.com/adap/flower/pull/2171))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"**普通改进**([#1872](https://github.com/adap/flower/pull/1872), " -"[#1866](https://github.com/adap/flower/pull/1866), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1837](https://github.com/adap/flower/pull/1837), " -"[#1477](https://github.com/adap/flower/pull/1477), " -"[#2171](https://github.com/adap/flower/pull/2171))" - -#: ../../source/ref-changelog.md:352 -msgid "v1.4.0 (2023-04-21)" -msgstr "v1.4.0 (2023-04-21)" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-changelog.md:358 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " -"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " -"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " -"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " -"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " -"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " -"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " -"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " -"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" -#: ../../source/ref-changelog.md:362 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Introduce support for XGBoost (**`FedXgbNnAvg` **strategy and " -"example)** ([#1694](https://github.com/adap/flower/pull/1694), " -"[#1709](https://github.com/adap/flower/pull/1709), " -"[#1715](https://github.com/adap/flower/pull/1715), " -"[#1717](https://github.com/adap/flower/pull/1717), " -"[#1763](https://github.com/adap/flower/pull/1763), " -"[#1795](https://github.com/adap/flower/pull/1795))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**引入对XGBoost的支持(**`FedXgbNnAvg` **策略和示例)** " -"([#1694](https://github.com/adap/flower/pull/1694), " -"[#1709](https://github.com/adap/flower/pull/1709), " -"[#1715](https://github.com/adap/flower/pull/1715), " -"[#1717](https://github.com/adap/flower/pull/1717), " -"[#1763](https://github.com/adap/flower/pull/1763), " -"[#1795](https://github.com/adap/flower/pull/1795))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:364 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"XGBoost is a tree-based ensemble machine learning algorithm that uses " -"gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg`" -" " -"[strategy](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," -" and a [code example](https://github.com/adap/flower/tree/main/examples" -"/xgboost-quickstart) that demonstrates the usage of this new strategy in " -"an XGBoost project." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"XGBoost 是一种基于树的集合机器学习算法,它使用梯度提升来提高模型的准确性。我们添加了一个新的 " -"\"FedXgbNnAvg\"[策略](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)和一个[代码示例](https://github.com/adap/flower/tree/main/examples" -"/xgboost-quickstart),演示如何在 XGBoost 项目中使用这个新策略。" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:366 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Introduce iOS SDK (preview)** " -"([#1621](https://github.com/adap/flower/pull/1621), " -"[#1764](https://github.com/adap/flower/pull/1764))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"**介绍 iOS SDK(预览版)** ([#1621](https://github.com/adap/flower/pull/1621), " -"[#1764](https://github.com/adap/flower/pull/1764))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:368 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"This is a major update for anyone wanting to implement Federated Learning" -" on iOS mobile devices. We now have a swift iOS SDK present under " -"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" -" that will facilitate greatly the app creating process. To showcase its " -"use, the [iOS " -"example](https://github.com/adap/flower/tree/main/examples/ios) has also " -"been updated!" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"对于想要在 iOS 移动设备上实施联邦学习的人来说,这是一次重大更新。现在,我们在 " -"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" -" 下提供了一个迅捷的 iOS SDK,这将大大方便应用程序的创建过程。为了展示其使用情况,我们还更新了 [iOS " -"示例](https://github.com/adap/flower/tree/main/examples/ios)!" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:370 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Introduce new \"What is Federated Learning?\" tutorial** " -"([#1657](https://github.com/adap/flower/pull/1657), " -"[#1721](https://github.com/adap/flower/pull/1721))" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" msgstr "" -"**引入新的 " -"\"什么是联邦学习?\"教程**([#1657](https://github.com/adap/flower/pull/1657), " -"[#1721](https://github.com/adap/flower/pull/1721)" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:372 -msgid "" -"A new [entry-level tutorial](https://flower.ai/docs/framework/tutorial-" -"what-is-federated-learning.html) in our documentation explains the basics" -" of Fedetated Learning. It enables anyone who's unfamiliar with Federated" -" Learning to start their journey with Flower. Forward it to anyone who's " -"interested in Federated Learning!" -msgstr "" -"我们的文档中新增了一个[入门级教程](https://flower.ai/docs/framework/tutorial-what-is-" -"federated-learning.html),解释了联邦学习的基础知识。它让任何不熟悉联邦学习的人都能开始 Flower " -"之旅。请转发给对联邦学习感兴趣的人!" +#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:2 +#, fuzzy +msgid "FedProx" +msgstr "FedProx" -#: ../../source/ref-changelog.md:374 -msgid "" -"**Introduce new Flower Baseline: FedProx MNIST** " -"([#1513](https://github.com/adap/flower/pull/1513), " -"[#1680](https://github.com/adap/flower/pull/1680), " -"[#1681](https://github.com/adap/flower/pull/1681), " -"[#1679](https://github.com/adap/flower/pull/1679))" -msgstr "" -"**引入新的 Flower Baseline: FedProx MNIST** " -"([#1513](https://github.com/adap/flower/pull/1513), " -"[#1680](https://github.com/adap/flower/pull/1680), " -"[#1681](https://github.com/adap/flower/pull/1681), " -"[#1679](https://github.com/adap/flower/pull/1679)" +#: flwr.server.strategy.fedprox.FedProx:3 of +msgid "Implementation based on https://arxiv.org/abs/1812.06127" +msgstr "实施基于 https://arxiv.org/abs/1812.06127" -#: ../../source/ref-changelog.md:376 +#: flwr.server.strategy.fedprox.FedProx:5 of msgid "" -"This new baseline replicates the MNIST+CNN task from the paper [Federated" -" Optimization in Heterogeneous Networks (Li et al., " -"2018)](https://arxiv.org/abs/1812.06127). It uses the `FedProx` strategy," -" which aims at making convergence more robust in heterogeneous settings." -msgstr "" -"这条新Baseline复现了论文[Federated Optimization in Heterogeneous Networks (Li et " -"al., 2018)](https://arxiv.org/abs/1812.06127)中的 MNIST+CNN 任务。它使用 " -"\"FedProx \"策略,旨在使收敛在异构环境中更加稳健。" +"The strategy in itself will not be different than FedAvg, the client " +"needs to be adjusted. A proximal term needs to be added to the loss " +"function during the training:" +msgstr "策略本身与 FedAvg 并无不同,客户端需要进行调整。在训练过程中,需要在损失函数中添加一个近端项:" -#: ../../source/ref-changelog.md:378 +#: flwr.server.strategy.fedprox.FedProx:9 of msgid "" -"**Introduce new Flower Baseline: FedAvg FEMNIST** " -"([#1655](https://github.com/adap/flower/pull/1655))" +"\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" +"\n" msgstr "" -"**引入新的 Flower Baseline: FedAvg FEMNIST** " -"([#1655](https://github.com/adap/flower/pull/1655))" +"\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" +"\n" -#: ../../source/ref-changelog.md:380 +#: flwr.server.strategy.fedprox.FedProx:12 of msgid "" -"This new baseline replicates an experiment evaluating the performance of " -"the FedAvg algorithm on the FEMNIST dataset from the paper [LEAF: A " -"Benchmark for Federated Settings (Caldas et al., " -"2018)](https://arxiv.org/abs/1812.01097)." -msgstr "" -"这一新Baseline复现了论文[LEAF: A Benchmark for Federated Settings(Caldas 等人,2018 " -"年)](https://arxiv.org/abs/1812.01097)中评估 FedAvg 算法在 FEMNIST 数据集上性能的实验。" +"Where $w^t$ are the global parameters and $w$ are the local weights the " +"function will be optimized with." +msgstr "其中,$w^t$ 是全局参数,$w$ 是优化函数的局部权重。" -#: ../../source/ref-changelog.md:382 -msgid "" -"**Introduce (experimental) REST API** " -"([#1594](https://github.com/adap/flower/pull/1594), " -"[#1690](https://github.com/adap/flower/pull/1690), " -"[#1695](https://github.com/adap/flower/pull/1695), " -"[#1712](https://github.com/adap/flower/pull/1712), " -"[#1802](https://github.com/adap/flower/pull/1802), " -"[#1770](https://github.com/adap/flower/pull/1770), " -"[#1733](https://github.com/adap/flower/pull/1733))" -msgstr "" -"**引入(试验性)REST API** ([#1594](https://github.com/adap/flower/pull/1594), " -"[#1690](https://github.com/adap/flower/pull/1690), " -"[#1695](https://github.com/adap/flower/pull/1695), " -"[#1712](https://github.com/adap/flower/pull/1712), " -"[#1802](https://github.com/adap/flower/pull/1802), " -"[#1770](https://github.com/adap/flower/pull/1770), " -"[#1733](https://github.com/adap/flower/pull/1733))" +#: flwr.server.strategy.fedprox.FedProx:15 of +msgid "In PyTorch, for example, the loss would go from:" +msgstr "例如,在 PyTorch 中,损失将从:" -#: ../../source/ref-changelog.md:384 -msgid "" -"A new REST API has been introduced as an alternative to the gRPC-based " -"communication stack. In this initial version, the REST API only supports " -"anonymous clients." -msgstr "作为基于 gRPC 的通信栈的替代方案,我们引入了新的 REST API。在初始版本中,REST API 仅支持匿名客户端。" +#: flwr.server.strategy.fedprox.FedProx:21 of +msgid "To:" +msgstr "致:" -#: ../../source/ref-changelog.md:386 +#: flwr.server.strategy.fedprox.FedProx:30 of msgid "" -"Please note: The REST API is still experimental and will likely change " -"significantly over time." -msgstr "请注意:REST API 仍处于试验阶段,随着时间的推移可能会发生重大变化。" +"With `global_params` being a copy of the parameters before the training " +"takes place." +msgstr "其中,\"global_params \"是训练前的参数副本。" -#: ../../source/ref-changelog.md:388 +#: flwr.server.strategy.fedprox.FedProx:65 of msgid "" -"**Improve the (experimental) Driver API** " -"([#1663](https://github.com/adap/flower/pull/1663), " -"[#1666](https://github.com/adap/flower/pull/1666), " -"[#1667](https://github.com/adap/flower/pull/1667), " -"[#1664](https://github.com/adap/flower/pull/1664), " -"[#1675](https://github.com/adap/flower/pull/1675), " -"[#1676](https://github.com/adap/flower/pull/1676), " -"[#1693](https://github.com/adap/flower/pull/1693), " -"[#1662](https://github.com/adap/flower/pull/1662), " -"[#1794](https://github.com/adap/flower/pull/1794))" +"The weight of the proximal term used in the optimization. 0.0 makes this " +"strategy equivalent to FedAvg, and the higher the coefficient, the more " +"regularization will be used (that is, the client parameters will need to " +"be closer to the server parameters during training)." msgstr "" -"**改进(试验性)驱动程序应用程序接口** ([#1663](https://github.com/adap/flower/pull/1663)," -" [#1666](https://github.com/adap/flower/pull/1666), " -"[#1667](https://github.com/adap/flower/pull/1667), " -"[#1664](https://github.com/adap/flower/pull/1664), " -"[#1675](https://github.com/adap/flower/pull/1675), " -"[#1676](https://github.com/adap/flower/pull/1676), " -"[#1693](https://github.com/adap/flower/pull/1693), " -"[#1662](https://github.com/adap/flower/pull/1662), " -"[#1794](https://github.com/adap/flower/pull/1794))" +"优化中使用的近端项权重。0.0 使该策略等同于 " +"FedAvg,系数越大,使用的正则化就越多(也就是说,在训练过程中,客户端参数需要更接近服务器参数)。" -#: ../../source/ref-changelog.md:390 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"The Driver API is still an experimental feature, but this release " -"introduces some major upgrades. One of the main improvements is the " -"introduction of an SQLite database to store server state on disk (instead" -" of in-memory). Another improvement is that tasks (instructions or " -"results) that have been delivered will now be deleted. This greatly " -"improves the memory efficiency of a long-running Flower server." +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -"驱动程序应用程序接口(Driver API)仍是一项试验性功能,但这一版本引入了一些重大升级。主要改进之一是引入了 SQLite " -"数据库,将服务器状态存储在磁盘上(而不是内存中)。另一项改进是,已交付的任务(指令或结果)现在将被删除。这大大提高了长期运行的 Flower " -"服务器的内存效率。" - -#: ../../source/ref-changelog.md:392 -msgid "" -"**Fix spilling issues related to Ray during simulations** " -"([#1698](https://github.com/adap/flower/pull/1698))" -msgstr "**修复模拟过程中与Ray有关的溢出问题** ([#1698](https://github.com/adap/flower/pull/1698))" - -#: ../../source/ref-changelog.md:394 -msgid "" -"While running long simulations, `ray` was sometimes spilling huge amounts" -" of data that would make the training unable to continue. This is now " -"fixed! 🎉" -msgstr "在运行长时间模拟时,`ray` 有时会溢出大量数据,导致训练无法继续。现在这个问题已经解决!🎉" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" -#: ../../source/ref-changelog.md:396 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Add new example using** `TabNet` **and Flower** " -"([#1725](https://github.com/adap/flower/pull/1725))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"** 添加使用** `TabNet` ** 的新示例** " -"([#1725](https://github.com/adap/flower/pull/1725))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-changelog.md:398 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"TabNet is a powerful and flexible framework for training machine learning" -" models on tabular data. We now have a federated example using Flower: " -"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples" -"/quickstart-tabnet)." +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -"TabNet 是一个强大而灵活的框架,用于在表格数据上训练机器学习模型。我们现在有一个使用 Flower 的联邦示例:[quickstart-" -"tabnet](https://github.com/adap/flower/tree/main/examples/quickstart-" -"tabnet)。" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" -#: ../../source/ref-changelog.md:400 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Add new how-to guide for monitoring simulations** " -"([#1649](https://github.com/adap/flower/pull/1649))" -msgstr "** 添加新的模拟监控指南** ([#1649](https://github.com/adap/flower/pull/1649))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:402 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"We now have a documentation guide to help users monitor their performance" -" during simulations." -msgstr "我们现在有一份文档指南,可帮助用户在模拟过程中监控其性能。" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:404 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Add training metrics to** `History` **object during simulations** " -"([#1696](https://github.com/adap/flower/pull/1696))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"**在模拟过程中为***`历史`***对象添加训练指标*** " -"([#1696](https://github.com/adap/flower/pull/1696))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:406 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"The `fit_metrics_aggregation_fn` can be used to aggregate training " -"metrics, but previous releases did not save the results in the `History` " -"object. This is now the case!" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"`fit_metrics_aggregation_fn`可用于汇总训练指标,但以前的版本不会将结果保存在 \"History " -"\"对象中。现在可以了!" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:408 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**General improvements** " -"([#1659](https://github.com/adap/flower/pull/1659), " -"[#1646](https://github.com/adap/flower/pull/1646), " -"[#1647](https://github.com/adap/flower/pull/1647), " -"[#1471](https://github.com/adap/flower/pull/1471), " -"[#1648](https://github.com/adap/flower/pull/1648), " -"[#1651](https://github.com/adap/flower/pull/1651), " -"[#1652](https://github.com/adap/flower/pull/1652), " -"[#1653](https://github.com/adap/flower/pull/1653), " -"[#1659](https://github.com/adap/flower/pull/1659), " -"[#1665](https://github.com/adap/flower/pull/1665), " -"[#1670](https://github.com/adap/flower/pull/1670), " -"[#1672](https://github.com/adap/flower/pull/1672), " -"[#1677](https://github.com/adap/flower/pull/1677), " -"[#1684](https://github.com/adap/flower/pull/1684), " -"[#1683](https://github.com/adap/flower/pull/1683), " -"[#1686](https://github.com/adap/flower/pull/1686), " -"[#1682](https://github.com/adap/flower/pull/1682), " -"[#1685](https://github.com/adap/flower/pull/1685), " -"[#1692](https://github.com/adap/flower/pull/1692), " -"[#1705](https://github.com/adap/flower/pull/1705), " -"[#1708](https://github.com/adap/flower/pull/1708), " -"[#1711](https://github.com/adap/flower/pull/1711), " -"[#1713](https://github.com/adap/flower/pull/1713), " -"[#1714](https://github.com/adap/flower/pull/1714), " -"[#1718](https://github.com/adap/flower/pull/1718), " -"[#1716](https://github.com/adap/flower/pull/1716), " -"[#1723](https://github.com/adap/flower/pull/1723), " -"[#1735](https://github.com/adap/flower/pull/1735), " -"[#1678](https://github.com/adap/flower/pull/1678), " -"[#1750](https://github.com/adap/flower/pull/1750), " -"[#1753](https://github.com/adap/flower/pull/1753), " -"[#1736](https://github.com/adap/flower/pull/1736), " -"[#1766](https://github.com/adap/flower/pull/1766), " -"[#1760](https://github.com/adap/flower/pull/1760), " -"[#1775](https://github.com/adap/flower/pull/1775), " -"[#1776](https://github.com/adap/flower/pull/1776), " -"[#1777](https://github.com/adap/flower/pull/1777), " -"[#1779](https://github.com/adap/flower/pull/1779), " -"[#1784](https://github.com/adap/flower/pull/1784), " -"[#1773](https://github.com/adap/flower/pull/1773), " -"[#1755](https://github.com/adap/flower/pull/1755), " -"[#1789](https://github.com/adap/flower/pull/1789), " -"[#1788](https://github.com/adap/flower/pull/1788), " -"[#1798](https://github.com/adap/flower/pull/1798), " -"[#1799](https://github.com/adap/flower/pull/1799), " -"[#1739](https://github.com/adap/flower/pull/1739), " -"[#1800](https://github.com/adap/flower/pull/1800), " -"[#1804](https://github.com/adap/flower/pull/1804), " -"[#1805](https://github.com/adap/flower/pull/1805))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**普通改进** ([#1659](https://github.com/adap/flower/pull/1659), " -"[#1646](https://github.com/adap/flower/pull/1646), " -"[#1647](https://github.com/adap/flower/pull/1647), " -"[#1471](https://github.com/adap/flower/pull/1471), " -"[#1648](https://github.com/adap/flower/pull/1648), " -"[#1651](https://github.com/adap/flower/pull/1651), " -"[#1652](https://github.com/adap/flower/pull/1652), " -"[#1653](https://github.com/adap/flower/pull/1653), " -"[#1659](https://github.com/adap/flower/pull/1659), " -"[#1665](https://github.com/adap/flower/pull/1665), " -"[#1670](https://github.com/adap/flower/pull/1670), " -"[#1672](https://github.com/adap/flower/pull/1672), " -"[#1677](https://github.com/adap/flower/pull/1677), " -"[#1684](https://github.com/adap/flower/pull/1684), " -"[#1683](https://github.com/adap/flower/pull/1683), " -"[#1686](https://github.com/adap/flower/pull/1686), " -"[#1682](https://github.com/adap/flower/pull/1682), " -"[#1685](https://github.com/adap/flower/pull/1685), " -"[#1692](https://github.com/adap/flower/pull/1692), " -"[#1705](https://github.com/adap/flower/pull/1705), " -"[#1708](https://github.com/adap/flower/pull/1708), " -"[#1711](https://github.com/adap/flower/pull/1711), " -"[#1713](https://github.com/adap/flower/pull/1713), " -"[#1714](https://github.com/adap/flower/pull/1714), " -"[#1718](https://github.com/adap/flower/pull/1718), " -"[#1716](https://github.com/adap/flower/pull/1716), " -"[#1723](https://github.com/adap/flower/pull/1723), " -"[#1735](https://github.com/adap/flower/pull/1735), " -"[#1678](https://github.com/adap/flower/pull/1678), " -"[#1750](https://github.com/adap/flower/pull/1750), " -"[#1753](https://github.com/adap/flower/pull/1753), " -"[#1736](https://github.com/adap/flower/pull/1736), " -"[#1766](https://github.com/adap/flower/pull/1766), " -"[#1760](https://github.com/adap/flower/pull/1760), " -"[#1775](https://github.com/adap/flower/pull/1775), " -"[#1776](https://github.com/adap/flower/pull/1776), " -"[#1777](https://github.com/adap/flower/pull/1777), " -"[#1779](https://github.com/adap/flower/pull/1779), " -"[#1784](https://github.com/adap/flower/pull/1784), " -"[#1773](https://github.com/adap/flower/pull/1773), " -"[#1755](https://github.com/adap/flower/pull/1755), " -"[#1789](https://github.com/adap/flower/pull/1789), " -"[#1788](https://github.com/adap/flower/pull/1788), " -"[#1798](https://github.com/adap/flower/pull/1798), " -"[#1799](https://github.com/adap/flower/pull/1799), " -"[#1739](https://github.com/adap/flower/pull/1739), " -"[#1800](https://github.com/adap/flower/pull/1800), " -"[#1804](https://github.com/adap/flower/pull/1804), " -"[#1805](https://github.com/adap/flower/pull/1805))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:416 -msgid "v1.3.0 (2023-02-06)" -msgstr "v1.3.0 (2023-02-06)" +#: flwr.server.strategy.fedprox.FedProx.configure_fit:3 of +msgid "Sends the proximal factor mu to the clients" +msgstr "向客户发送近端因子mu" -#: ../../source/ref-changelog.md:422 -msgid "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" -msgstr "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" +#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:2 +#, fuzzy +msgid "FedTrimmedAvg" +msgstr "server.strategy.FedTrimmedAvg" -#: ../../source/ref-changelog.md:426 -msgid "" -"**Add support for** `workload_id` **and** `group_id` **in Driver API** " -"([#1595](https://github.com/adap/flower/pull/1595))" -msgstr "" -"**在驱动程序应用程序接口中添加对** `workload_id` **和** `group_id` **的支持** " -"([#1595](https://github.com/adap/flower/pull/1595))" +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:3 of +#, fuzzy +msgid "Implemented based on: https://arxiv.org/abs/1803.01498" +msgstr "实施基于 https://arxiv.org/abs/1802.07927。" + +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:25 of +msgid "Fraction to cut off of both tails of the distribution. Defaults to 0.2." +msgstr "截取分布两个尾部的分数。默认为 0.2。" -#: ../../source/ref-changelog.md:428 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"The (experimental) Driver API now supports a `workload_id` that can be " -"used to identify which workload a task belongs to. It also supports a new" -" `group_id` that can be used, for example, to indicate the current " -"training round. Both the `workload_id` and `group_id` enable client nodes" -" to decide whether they want to handle a task or not." +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"驱动程序 API(试验性)现在支持 `workload_id`,可用于识别任务所属的工作量。它还支持新的 " -"`group_id`,例如,可用于指示当前的训练轮次。通过 `workload_id` 和 `group_id` " -"客户端节点可以决定是否要处理某个任务。" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" -#: ../../source/ref-changelog.md:430 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Make Driver API and Fleet API address configurable** " -"([#1637](https://github.com/adap/flower/pull/1637))" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"**使Driver API 和Fleet " -"API地址可配置**([#1637](https://github.com/adap/flower/pull/1637))" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.aggregate_fit:1 of +msgid "Aggregate fit results using trimmed average." +msgstr "使用修剪平均值汇总拟合结果。" -#: ../../source/ref-changelog.md:432 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"The (experimental) long-running Flower server (Driver API and Fleet API) " -"can now configure the server address of both Driver API (via `--driver-" -"api-address`) and Fleet API (via `--fleet-api-address`) when starting:" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"长期运行的 Flower 服务器(Driver API 和 Fleet API)现在可以在启动时配置 Driver API(通过 " -"`--driver-api-address`)和 Fleet API(通过 `-fleet-api-address`)的服务器地址:" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:434 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " -"\"0.0.0.0:8086\"`" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " -"\"0.0.0.0:8086\"`" - -#: ../../source/ref-changelog.md:436 -msgid "Both IPv4 and IPv6 addresses are supported." -msgstr "支持 IPv4 和 IPv6 地址。" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:438 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Add new example of Federated Learning using fastai and Flower** " -"([#1598](https://github.com/adap/flower/pull/1598))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"** 添加使用 fastai 和 Flower 进行联邦学习的新示例** " -"([#1598](https://github.com/adap/flower/pull/1598))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:440 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"A new code example (`quickstart-fastai`) demonstrates federated learning " -"with [fastai](https://www.fast.ai/) and Flower. You can find it here: " -"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples" -"/quickstart-fastai)." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"一个新的代码示例(`quickstart-fastai`)演示了使用 [fastai](https://www.fast.ai/) 和 " -"Flower 的联邦学习。您可以在这里找到它: [quickstart-" -"fastai](https://github.com/adap/flower/tree/main/examples/quickstart-" -"fastai)。" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:442 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Make Android example compatible with** `flwr >= 1.0.0` **and the latest" -" versions of Android** " -"([#1603](https://github.com/adap/flower/pull/1603))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**使安卓示例兼容** `flwr >= 1.0.0` **和最新版本的安卓** " -"([#1603](https://github.com/adap/flower/pull/1603))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:444 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"The Android code example has received a substantial update: the project " -"is compatible with Flower 1.0 (and later), the UI received a full " -"refresh, and the project is updated to be compatible with newer Android " -"tooling." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"Android 代码示例已进行了大幅更新:项目兼容 Flower 1.0(及更高版本),用户界面已全面刷新,项目已更新为兼容较新的 Android" -" 工具。" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:446 -msgid "" -"**Add new `FedProx` strategy** " -"([#1619](https://github.com/adap/flower/pull/1619))" -msgstr "**添加新的`FedProx`策略** ([#1619](https://github.com/adap/flower/pull/1619))" +#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:2 +#, fuzzy +msgid "FedXgbBagging" +msgstr "FedXgbBagging" -#: ../../source/ref-changelog.md:448 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"This " -"[strategy](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" -" is almost identical to " -"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," -" but helps users replicate what is described in this " -"[paper](https://arxiv.org/abs/1812.06127). It essentially adds a " -"parameter called `proximal_mu` to regularize the local models with " -"respect to the global models." +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"该[策略](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)与[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)几乎相同,但可以帮助用户复现本[论文](https://arxiv.org/abs/1812.06127)中的描述。它的本质是添加一个名为" -" `proximal_mu`的参数,使局部模型与全局模型正则化。" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" -#: ../../source/ref-changelog.md:450 -msgid "" -"**Add new metrics to telemetry events** " -"([#1640](https://github.com/adap/flower/pull/1640))" -msgstr "**为遥测事件添加新指标**([#1640](https://github.com/adap/flower/pull/1640))" +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +#, fuzzy +msgid "Aggregate evaluation metrics using average." +msgstr "采用加权平均法计算评估损失总额。" -#: ../../source/ref-changelog.md:452 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"An updated event structure allows, for example, the clustering of events " -"within the same workload." -msgstr "例如,更新后的事件结构可以将同一工作负载中的事件集中在一起。" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-changelog.md:454 -msgid "" -"**Add new custom strategy tutorial section** " -"[#1623](https://github.com/adap/flower/pull/1623)" -msgstr "**添加新的自定义策略教程部分** [#1623](https://github.com/adap/flower/pull/1623)" +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_fit:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_fit:1 of +#, fuzzy +msgid "Aggregate fit results using bagging." +msgstr "使用 Bulyan 技术汇总拟合结果。" -#: ../../source/ref-changelog.md:456 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"The Flower tutorial now has a new section that covers implementing a " -"custom strategy from scratch: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"Flower 教程新增了一个章节,介绍如何从零开始实施自定义策略: [在 Colab " -"中打开](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:458 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"**Add new custom serialization tutorial section** " -"([#1622](https://github.com/adap/flower/pull/1622))" -msgstr "** 添加新的自定义序列化教程部分** ([#1622](https://github.com/adap/flower/pull/1622))" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:460 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"The Flower tutorial now has a new section that covers custom " -"serialization: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -"/tutorial-customize-the-client-pytorch.ipynb)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"Flower 教程现在新增了一个章节,介绍自定义序列化: [在 Colab " -"中打开](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -"/tutorial-customize-the-client-pytorch.ipynb)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:462 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"**General improvements** " -"([#1638](https://github.com/adap/flower/pull/1638), " -"[#1634](https://github.com/adap/flower/pull/1634), " -"[#1636](https://github.com/adap/flower/pull/1636), " -"[#1635](https://github.com/adap/flower/pull/1635), " -"[#1633](https://github.com/adap/flower/pull/1633), " -"[#1632](https://github.com/adap/flower/pull/1632), " -"[#1631](https://github.com/adap/flower/pull/1631), " -"[#1630](https://github.com/adap/flower/pull/1630), " -"[#1627](https://github.com/adap/flower/pull/1627), " -"[#1593](https://github.com/adap/flower/pull/1593), " -"[#1616](https://github.com/adap/flower/pull/1616), " -"[#1615](https://github.com/adap/flower/pull/1615), " -"[#1607](https://github.com/adap/flower/pull/1607), " -"[#1609](https://github.com/adap/flower/pull/1609), " -"[#1608](https://github.com/adap/flower/pull/1608), " -"[#1603](https://github.com/adap/flower/pull/1603), " -"[#1590](https://github.com/adap/flower/pull/1590), " -"[#1580](https://github.com/adap/flower/pull/1580), " -"[#1599](https://github.com/adap/flower/pull/1599), " -"[#1600](https://github.com/adap/flower/pull/1600), " -"[#1601](https://github.com/adap/flower/pull/1601), " -"[#1597](https://github.com/adap/flower/pull/1597), " -"[#1595](https://github.com/adap/flower/pull/1595), " -"[#1591](https://github.com/adap/flower/pull/1591), " -"[#1588](https://github.com/adap/flower/pull/1588), " -"[#1589](https://github.com/adap/flower/pull/1589), " -"[#1587](https://github.com/adap/flower/pull/1587), " -"[#1573](https://github.com/adap/flower/pull/1573), " -"[#1581](https://github.com/adap/flower/pull/1581), " -"[#1578](https://github.com/adap/flower/pull/1578), " -"[#1574](https://github.com/adap/flower/pull/1574), " -"[#1572](https://github.com/adap/flower/pull/1572), " -"[#1586](https://github.com/adap/flower/pull/1586))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"**普通改进** ([#1638](https://github.com/adap/flower/pull/1638), " -"[#1634](https://github.com/adap/flower/pull/1634), " -"[#1636](https://github.com/adap/flower/pull/1636), " -"[#1635](https://github.com/adap/flower/pull/1635), " -"[#1633](https://github.com/adap/flower/pull/1633), " -"[#1632](https://github.com/adap/flower/pull/1632), " -"[#1631](https://github.com/adap/flower/pull/1631), " -"[#1630](https://github.com/adap/flower/pull/1630), " -"[#1627](https://github. com/adap/flower/pull/1627), " -"[#1593](https://github.com/adap/flower/pull/1593), " -"[#1616](https://github.com/adap/flower/pull/1616), " -"[#1615](https://github.com/adap/flower/pull/1615), " -"[#1607](https://github.com/adap/flower/pull/1607), " -"[#1609](https://github.com/adap/flower/pull/1609), " -"[#1608](https://github.com/adap/flower/pull/1608), " -"[#1603](https://github.com/adap/flower/pull/1603), " -"[#1590](https://github. com/adap/flower/pull/1590), " -"[#1580](https://github.com/adap/flower/pull/1580), " -"[#1599](https://github.com/adap/flower/pull/1599), " -"[#1600](https://github.com/adap/flower/pull/1600), " -"[#1601](https://github.com/adap/flower/pull/1601), " -"[#1597](https://github.com/adap/flower/pull/1597), " -"[#1595](https://github.com/adap/flower/pull/1595), " -"[#1591](https://github.com/adap/flower/pull/1591), " -"[#1588](https://github. com/adap/flower/pull/1588), " -"[#1589](https://github.com/adap/flower/pull/1589), " -"[#1587](https://github.com/adap/flower/pull/1587), " -"[#1573](https://github.com/adap/flower/pull/1573), " -"[#1581](https://github.com/adap/flower/pull/1581), " -"[#1578](https://github.com/adap/flower/pull/1578), " -"[#1574](https://github.com/adap/flower/pull/1574), " -"[#1572](https://github.com/adap/flower/pull/1572), " -"[#1586](https://github.com/adap/flower/pull/1586))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:466 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"**Updated documentation** " -"([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"** 更新文档** ([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614)))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:468 ../../source/ref-changelog.md:535 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"As usual, the documentation has improved quite a bit. It is another step " -"in our effort to make the Flower documentation the best documentation of " -"any project. Stay tuned and as always, feel free to provide feedback!" -msgstr "和往常一样,我们的文档有了很大的改进。这是我们努力使 Flower 文档成为所有项目中最好文档的又一步骤。请继续关注,并随时提供反馈意见!" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:474 -msgid "v1.2.0 (2023-01-13)" -msgstr "v1.2.0 (2023-01-13)" +#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:2 +#, fuzzy +msgid "FedXgbCyclic" +msgstr "FedXgbCyclic" -#: ../../source/ref-changelog.md:480 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." -" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." -" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" -#: ../../source/ref-changelog.md:484 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"**Introduce new Flower Baseline: FedAvg MNIST** " -"([#1497](https://github.com/adap/flower/pull/1497), " -"[#1552](https://github.com/adap/flower/pull/1552))" +":py:obj:`aggregate_fit " +"`\\ \\(server\\_round\\," +" results\\, failures\\)" msgstr "" -"**引入新的 Flower Baseline: FedAvg MNIST** " -"([#1497](https://github.com/adap/flower/pull/1497), " -"[#1552](https://github.com/adap/flower/pull/1552))" +":py:obj:`aggregate_fit " +"`\\ \\(server\\_round\\," +" results\\, failures\\)" -#: ../../source/ref-changelog.md:486 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"Over the coming weeks, we will be releasing a number of new reference " -"implementations useful especially to FL newcomers. They will typically " -"revisit well known papers from the literature, and be suitable for " -"integration in your own application or for experimentation, in order to " -"deepen your knowledge of FL in general. Today's release is the first in " -"this series. [Read more.](https://flower.ai/blog/2023-01-12-fl-starter-" -"pack-fedavg-mnist-cnn/)" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"在未来几周内,我们将发布一些新的参考,特别是对 FL " -"新手有用的方法。它们通常会重温文献中的知名论文,适合集成到您自己的应用程序中或用于实验,以加深您对 FL " -"的总体了解。今天发布的是该系列中的第一篇。[阅读全文](https://flower.ai/blog/2023-01-12-fl-starter-" -"pack-fedavg-mnist-cnn/)" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:488 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"**Improve GPU support in simulations** " -"([#1555](https://github.com/adap/flower/pull/1555))" -msgstr "**改进模拟中的 GPU 支持**([#1555](https://github.com/adap/flower/pull/1555))" +":py:obj:`configure_fit " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" +msgstr "" +":py:obj:`configure_fit " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" -#: ../../source/ref-changelog.md:490 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"The Ray-based Virtual Client Engine (`start_simulation`) has been updated" -" to improve GPU support. The update includes some of the hard-earned " -"lessons from scaling simulations in GPU cluster environments. New " -"defaults make running GPU-based simulations substantially more robust." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"基于 Ray 的虚拟客户端引擎 (`start_simulation`)已更新,以改进对 GPU 的支持。此次更新包含了在 GPU " -"集群环境中扩展模拟的一些经验教训。新的默认设置使基于 GPU 的模拟运行更加稳健。" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:492 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"**Improve GPU support in Jupyter Notebook tutorials** " -"([#1527](https://github.com/adap/flower/pull/1527), " -"[#1558](https://github.com/adap/flower/pull/1558))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"**改进 Jupyter Notebook 教程中的 GPU 支持** " -"([#1527](https://github.com/adap/flower/pull/1527), " -"[#1558](https://github.com/adap/flower/pull/1558))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:494 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"Some users reported that Jupyter Notebooks have not always been easy to " -"use on GPU instances. We listened and made improvements to all of our " -"Jupyter notebooks! Check out the updated notebooks here:" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"一些用户报告说,在 GPU 实例上使用 Jupyter 笔记本并不是很方便。我们听取了他们的意见,并对所有 Jupyter " -"笔记本进行了改进!点击这里查看更新后的笔记本:" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:496 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"[An Introduction to Federated Learning](https://flower.ai/docs/framework" -"/tutorial-get-started-with-flower-pytorch.html)" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"[联邦学习简介](https://flower.ai/docs/framework/tutorial-get-started-with-" -"flower-pytorch.html)" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" + +#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:2 +#, fuzzy +msgid "FedXgbNnAvg" +msgstr "DP-FedAvg" -#: ../../source/ref-changelog.md:497 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:5 of +#, fuzzy msgid "" -"[Strategies in Federated Learning](https://flower.ai/docs/framework" -"/tutorial-use-a-federated-learning-strategy-pytorch.html)" +"This strategy is deprecated, but a copy of it is available in Flower " +"Baselines: " +"https://github.com/adap/flower/tree/main/baselines/hfedxgboost." msgstr "" -"[联邦学习策略](https://flower.ai/docs/framework/tutorial-use-a-federated-" -"learning-strategy-pytorch.html)" +"该策略已被弃用,但在 Flower Baselines: " +"https://github.com/adap/flower/tree/main/baselines/hfedxgboost 中有其副本。" -#: ../../source/ref-changelog.md:498 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"[Building a Strategy](https://flower.ai/docs/framework/tutorial-build-a" -"-strategy-from-scratch-pytorch.html)" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"[制定策略](https://flower.ai/docs/framework/tutorial-build-a-strategy-from-" -"scratch-pytorch.html)" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" -#: ../../source/ref-changelog.md:499 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"[Client and NumPyClient](https://flower.ai/docs/framework/tutorial-" -"customize-the-client-pytorch.html)" +":py:obj:`aggregate_fit " +"`\\ \\(server\\_round\\, " +"results\\, failures\\)" msgstr "" -"[客户端和 NumPyClient](https://flower.ai/docs/framework/tutorial-customize-" -"the-client-pytorch.html)" +":py:obj:`aggregate_fit " +"`\\ \\(server\\_round\\, " +"results\\, failures\\)" -#: ../../source/ref-changelog.md:501 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Introduce optional telemetry** " -"([#1533](https://github.com/adap/flower/pull/1533), " -"[#1544](https://github.com/adap/flower/pull/1544), " -"[#1584](https://github.com/adap/flower/pull/1584))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**引入可选遥测**([#1533](https://github.com/adap/flower/pull/1533), " -"[#1544](https://github.com/adap/flower/pull/1544), " -"[#1584](https://github.com/adap/flower/pull/1584)" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:503 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"After a [request for " -"feedback](https://github.com/adap/flower/issues/1534) from the community," -" the Flower open-source project introduces optional collection of " -"*anonymous* usage metrics to make well-informed decisions to improve " -"Flower. Doing this enables the Flower team to understand how Flower is " -"used and what challenges users might face." +":py:obj:`configure_fit " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -"在社区发出[反馈请求](https://github.com/adap/flower/issues/1534)之后,Flower " -"开放源码项目引入了可选的*匿名*使用指标收集,以便在充分知情的情况下做出改进 Flower 的决定。这样做能让 Flower 团队了解 " -"Flower 的使用情况以及用户可能面临的挑战。" +":py:obj:`configure_fit " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" -#: ../../source/ref-changelog.md:505 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Flower is a friendly framework for collaborative AI and data science.**" -" Staying true to this statement, Flower makes it easy to disable " -"telemetry for users who do not want to share anonymous usage metrics. " -"[Read more.](https://flower.ai/docs/telemetry.html)." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"**Flower 是一个用于协作式人工智能和数据科学的友好框架。** Flower " -"遵循这一声明,让不想分享匿名使用指标的用户可以轻松禁用遥测技术。[阅读全文](https://flower.ai/docs/telemetry.html)。" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:507 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Introduce (experimental) Driver API** " -"([#1520](https://github.com/adap/flower/pull/1520), " -"[#1525](https://github.com/adap/flower/pull/1525), " -"[#1545](https://github.com/adap/flower/pull/1545), " -"[#1546](https://github.com/adap/flower/pull/1546), " -"[#1550](https://github.com/adap/flower/pull/1550), " -"[#1551](https://github.com/adap/flower/pull/1551), " -"[#1567](https://github.com/adap/flower/pull/1567))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"**引入(试验性)Driver API** ([#1520](https://github.com/adap/flower/pull/1520)," -" [#1525](https://github.com/adap/flower/pull/1525), " -"[#1545](https://github.com/adap/flower/pull/1545), " -"[#1546](https://github.com/adap/flower/pull/1546), " -"[#1550](https://github.com/adap/flower/pull/1550), " -"[#1551](https://github.com/adap/flower/pull/1551), " -"[#1567](https://github.com/adap/flower/pull/1567))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:509 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"Flower now has a new (experimental) Driver API which will enable fully " -"programmable, async, and multi-tenant Federated Learning and Federated " -"Analytics applications. Phew, that's a lot! Going forward, the Driver API" -" will be the abstraction that many upcoming features will be built on - " -"and you can start building those things now, too." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"Flower 现在有了一个新的(试验性的)驱动程序应用程序接口(Driver " -"API),它将支持完全可编程、异步和多租户的联邦学习(Federated Learning)和联邦分析(Federated " -"Analytics)应用程序。展望未来,Driver API 将成为许多即将推出的功能的抽象基础,您现在就可以开始构建这些功能。" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:511 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"The Driver API also enables a new execution mode in which the server runs" -" indefinitely. Multiple individual workloads can run concurrently and " -"start and stop their execution independent of the server. This is " -"especially useful for users who want to deploy Flower in production." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"驱动程序应用程序接口还支持一种新的执行模式,在这种模式下,服务器可无限期运行。多个单独的工作负载可以同时运行,并独立于服务器启动和停止执行。这对于希望在生产中部署" -" Flower 的用户来说尤其有用。" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:513 -msgid "" -"To learn more, check out the `mt-pytorch` code example. We look forward " -"to you feedback!" -msgstr "要了解更多信息,请查看 `mt-pytorch` 代码示例。我们期待您的反馈!" +#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:2 +#, fuzzy +msgid "FedYogi" +msgstr "FedYogi" -#: ../../source/ref-changelog.md:515 -msgid "" -"Please note: *The Driver API is still experimental and will likely change" -" significantly over time.*" -msgstr "请注意:Driver API仍处于试验阶段,随着时间的推移可能会发生重大变化。*" +#: flwr.server.strategy.fedyogi.FedYogi:32 of +#, fuzzy +msgid "Server-side learning rate. Defaults to 1e-2." +msgstr "服务器端学习率。默认为 1e-1。" -#: ../../source/ref-changelog.md:517 -msgid "" -"**Add new Federated Analytics with Pandas example** " -"([#1469](https://github.com/adap/flower/pull/1469), " -"[#1535](https://github.com/adap/flower/pull/1535))" -msgstr "" -"** 添加新的使用 Pandas " -"的联邦分析示例**([#1469](https://github.com/adap/flower/pull/1469), " -"[#1535](https://github.com/adap/flower/pull/1535)" +#: flwr.server.strategy.fedyogi.FedYogi:34 of +#, fuzzy +msgid "Client-side learning rate. Defaults to 0.0316." +msgstr "客户端学习率。默认为 1e-1。" -#: ../../source/ref-changelog.md:519 -msgid "" -"A new code example (`quickstart-pandas`) demonstrates federated analytics" -" with Pandas and Flower. You can find it here: [quickstart-" -"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" -"pandas)." +#: flwr.server.strategy.fedyogi.FedYogi:40 of +#, fuzzy +msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-3." +msgstr "控制算法的适应度。默认为 1e-9。" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -"新代码示例(`quickstart-pandas`)演示了使用 Pandas 和 Flower 进行联邦分析。您可以在此处找到它: " -"[quickstart-pandas](https://github.com/adap/flower/tree/main/examples" -"/quickstart-pandas)。" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" -#: ../../source/ref-changelog.md:521 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Add new strategies: Krum and MultiKrum** " -"([#1481](https://github.com/adap/flower/pull/1481))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"**添加新策略: Krum 和 MultiKrum** " -"([#1481](https://github.com/adap/flower/pull/1481))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-changelog.md:523 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"Edoardo, a computer science student at the Sapienza University of Rome, " -"contributed a new `Krum` strategy that enables users to easily use Krum " -"and MultiKrum in their workloads." +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -"罗马萨皮恩扎大学(Sapienza University)计算机科学专业的学生埃多尔多(Edoardo)提出了一种新的 \"Krum " -"\"策略,使用户能够在其工作负载中轻松使用 Krum 和 MultiKrum。" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" -#: ../../source/ref-changelog.md:525 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Update C++ example to be compatible with Flower v1.2.0** " -"([#1495](https://github.com/adap/flower/pull/1495))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"** 更新 C++ 示例,与 Flower v1.2.0 兼容** " -"([#1495](https://github.com/adap/flower/pull/1495))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:527 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"The C++ code example has received a substantial update to make it " -"compatible with the latest version of Flower." -msgstr "为了与最新版本的 Flower 兼容,C++ 示例代码进行了大幅更新。" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:529 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**General improvements** " -"([#1491](https://github.com/adap/flower/pull/1491), " -"[#1504](https://github.com/adap/flower/pull/1504), " -"[#1506](https://github.com/adap/flower/pull/1506), " -"[#1514](https://github.com/adap/flower/pull/1514), " -"[#1522](https://github.com/adap/flower/pull/1522), " -"[#1523](https://github.com/adap/flower/pull/1523), " -"[#1526](https://github.com/adap/flower/pull/1526), " -"[#1528](https://github.com/adap/flower/pull/1528), " -"[#1547](https://github.com/adap/flower/pull/1547), " -"[#1549](https://github.com/adap/flower/pull/1549), " -"[#1560](https://github.com/adap/flower/pull/1560), " -"[#1564](https://github.com/adap/flower/pull/1564), " -"[#1566](https://github.com/adap/flower/pull/1566))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"**普通改进** ([#1491](https://github.com/adap/flower/pull/1491), " -"[#1504](https://github.com/adap/flower/pull/1504), " -"[#1506](https://github.com/adap/flower/pull/1506), " -"[#1514](https://github.com/adap/flower/pull/1514), " -"[#1522](https://github.com/adap/flower/pull/1522), " -"[#1523](https://github.com/adap/flower/pull/1523), " -"[#1526](https://github. com/adap/flower/pull/1526), " -"[#1528](https://github.com/adap/flower/pull/1528), " -"[#1547](https://github.com/adap/flower/pull/1547), " -"[#1549](https://github.com/adap/flower/pull/1549), " -"[#1560](https://github.com/adap/flower/pull/1560), " -"[#1564](https://github.com/adap/flower/pull/1564), " -"[#1566](https://github.com/adap/flower/pull/1566))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:533 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Updated documentation** " -"([#1494](https://github.com/adap/flower/pull/1494), " -"[#1496](https://github.com/adap/flower/pull/1496), " -"[#1500](https://github.com/adap/flower/pull/1500), " -"[#1503](https://github.com/adap/flower/pull/1503), " -"[#1505](https://github.com/adap/flower/pull/1505), " -"[#1524](https://github.com/adap/flower/pull/1524), " -"[#1518](https://github.com/adap/flower/pull/1518), " -"[#1519](https://github.com/adap/flower/pull/1519), " -"[#1515](https://github.com/adap/flower/pull/1515))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"** 更新文档** ([#1494](https://github.com/adap/flower/pull/1494), " -"[#1496](https://github.com/adap/flower/pull/1496), " -"[#1500](https://github.com/adap/flower/pull/1500), " -"[#1503](https://github.com/adap/flower/pull/1503), " -"[#1505](https://github.com/adap/flower/pull/1505), " -"[#1524](https://github.com/adap/flower/pull/1524), " -"[#1518](https://github.com/adap/flower/pull/1518), " -"[#1519](https://github.com/adap/flower/pull/1519), " -"[#1515](https://github.com/adap/flower/pull/1515))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:537 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"One highlight is the new [first time contributor " -"guide](https://flower.ai/docs/first-time-contributors.html): if you've " -"never contributed on GitHub before, this is the perfect place to start!" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"其中一个亮点是新的[首次贡献者指南](https://flower.ai/docs/first-time-" -"contributors.html):如果你以前从未在 GitHub 上做过贡献,这将是一个完美的开始!" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:543 -msgid "v1.1.0 (2022-10-31)" -msgstr "v1.1.0 (2022-10-31)" +#: ../../source/ref-api/flwr.server.strategy.Krum.rst:2 +#, fuzzy +msgid "Krum" +msgstr "Krum" + +#: flwr.server.strategy.krum.Krum:3 of +#, fuzzy +msgid "Implementation based on https://arxiv.org/abs/1703.02757" +msgstr "实施基于 https://arxiv.org/abs/2304.07537。" -#: ../../source/ref-changelog.md:547 +#: flwr.server.strategy.krum.Krum:17 of msgid "" -"We would like to give our **special thanks** to all the contributors who " -"made the new version of Flower possible (in `git shortlog` order):" -msgstr "在此,我们向所有促成 Flower 新版本的贡献者致以**特别的谢意(按 \"git shortlog \"顺序排列):" +"Number of clients to keep before averaging (MultiKrum). Defaults to 0, in" +" that case classical Krum is applied." +msgstr "求平均值前保留的客户端数量(MultiKrum)。默认值为 0,在这种情况下会应用经典 Krum。" -#: ../../source/ref-changelog.md:549 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " -"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " -"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " -"`danielnugraha`, `edogab33`" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " -"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " -"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " -"`danielnugraha`, `edogab33`" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" -#: ../../source/ref-changelog.md:553 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Introduce Differential Privacy wrappers (preview)** " -"([#1357](https://github.com/adap/flower/pull/1357), " -"[#1460](https://github.com/adap/flower/pull/1460))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"**引入差分隐私包装器(预览)** ([#1357](https://github.com/adap/flower/pull/1357), " -"[#1460](https://github.com/adap/flower/pull/1460))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.krum.Krum.aggregate_fit:1 of +msgid "Aggregate fit results using Krum." +msgstr "使用 Krum 汇总拟合结果。" -#: ../../source/ref-changelog.md:555 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"The first (experimental) preview of pluggable Differential Privacy " -"wrappers enables easy configuration and usage of differential privacy " -"(DP). The pluggable DP wrappers enable framework-agnostic **and** " -"strategy-agnostic usage of both client-side DP and server-side DP. Head " -"over to the Flower docs, a new explainer goes into more detail." +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -"可插拔差分隐私封装器的首个(实验性)预览版可轻松配置和使用差分隐私(DP)。可插拔的差分隐私封装器可实现客户端差分隐私和服务器端差分隐私的框架无关**以及**策略无关的使用。请访问" -" Flower 文档,新的解释器会提供更多细节。" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" -#: ../../source/ref-changelog.md:557 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**New iOS CoreML code example** " -"([#1289](https://github.com/adap/flower/pull/1289))" -msgstr "**新的 iOS CoreML 代码示例**([#1289](https://github.com/adap/flower/pull/1289))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:559 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"Flower goes iOS! A massive new code example shows how Flower clients can " -"be built for iOS. The code example contains both Flower iOS SDK " -"components that can be used for many tasks, and one task example running " -"on CoreML." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"Flower 进入 iOS!大量新代码示例展示了如何为 iOS 构建 Flower 客户端。该代码示例包含可用于多种任务的 Flower iOS " -"SDK 组件,以及在 CoreML 上运行的一个任务示例。" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:561 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**New FedMedian strategy** " -"([#1461](https://github.com/adap/flower/pull/1461))" -msgstr "**新的联邦医疗策略** ([#1461](https://github.com/adap/flower/pull/1461))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:563 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"The new `FedMedian` strategy implements Federated Median (FedMedian) by " -"[Yin et al., 2018](https://arxiv.org/pdf/1803.01498v1.pdf)." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"新的 \"FedMedian \"战略实现了[Yin " -"等人,2018]的联邦中值(FedMedian)(https://arxiv.org/pdf/1803.01498v1.pdf)。" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:565 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Log** `Client` **exceptions in Virtual Client Engine** " -"([#1493](https://github.com/adap/flower/pull/1493))" -msgstr "**虚拟客户端引擎中的**日志**`客户端`**异常([#1493](https://github.com/adap/flower/pull/1493))" +":py:obj:`num_fit_clients `\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" +":py:obj:`num_fit_clients `\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:567 -msgid "" -"All `Client` exceptions happening in the VCE are now logged by default " -"and not just exposed to the configured `Strategy` (via the `failures` " -"argument)." -msgstr "VCE 中发生的所有 \"客户端 \"异常现在都会被默认记录下来,而不只是暴露给配置的 `Strategy`(通过 `failures`参数)。" +#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:2 +#, fuzzy +msgid "QFedAvg" +msgstr "DP-FedAvg" -#: ../../source/ref-changelog.md:569 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Improve Virtual Client Engine internals** " -"([#1401](https://github.com/adap/flower/pull/1401), " -"[#1453](https://github.com/adap/flower/pull/1453))" -msgstr "**改进虚拟客户端引擎内部**([#1401](https://github.com/adap/flower/pull/1401)、[#1453](https://github.com/adap/flower/pull/1453))" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" +msgstr "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" -#: ../../source/ref-changelog.md:571 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"Some internals of the Virtual Client Engine have been revamped. The VCE " -"now uses Ray 2.0 under the hood, the value type of the `client_resources`" -" dictionary changed to `float` to allow fractions of resources to be " -"allocated." +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"虚拟客户端引擎的部分内部结构已进行了修改。VCE 现在使用 Ray 2.0,\"client_resources \"字典的值类型改为 " -"\"float\",以允许分配分数资源。" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-changelog.md:573 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " -"Client Engine**" -msgstr "**支持虚拟客户端引擎中的可选** `Client`**/**`NumPyClient` **方法**" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" +msgstr "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" -#: ../../source/ref-changelog.md:575 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"The Virtual Client Engine now has full support for optional `Client` (and" -" `NumPyClient`) methods." -msgstr "虚拟客户端引擎现在完全支持可选的 `Client`(和 `NumPyClient`)方法。" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:577 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Provide type information to packages using** `flwr` " -"([#1377](https://github.com/adap/flower/pull/1377))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"**使用** `flwr`向软件包提供类型信息 " -"([#1377](https://github.com/adap/flower/pull/1377))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:579 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"The package `flwr` is now bundled with a `py.typed` file indicating that " -"the package is typed. This enables typing support for projects or " -"packages that use `flwr` by enabling them to improve their code using " -"static type checkers like `mypy`." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"软件包 `flwr` 现在捆绑了一个 `py.typed` 文件,表明该软件包是类型化的。这样,使用 `flwr` 的项目或软件包就可以使用 " -"`mypy` 等静态类型检查器改进代码,从而获得类型支持。" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:581 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Updated code example** " -"([#1344](https://github.com/adap/flower/pull/1344), " -"[#1347](https://github.com/adap/flower/pull/1347))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"** 更新代码示例** ([#1344](https://github.com/adap/flower/pull/1344), " -"[#1347](https://github.com/adap/flower/pull/1347))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:583 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"The code examples covering scikit-learn and PyTorch Lightning have been " -"updated to work with the latest version of Flower." -msgstr "涵盖 scikit-learn 和 PyTorch Lightning 的代码示例已更新,以便与最新版本的 Flower 配合使用。" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" + +#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:2 +#, fuzzy +msgid "Strategy" +msgstr "Krum 策略。" -#: ../../source/ref-changelog.md:585 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"**Updated documentation** " -"([#1355](https://github.com/adap/flower/pull/1355), " -"[#1558](https://github.com/adap/flower/pull/1558), " -"[#1379](https://github.com/adap/flower/pull/1379), " -"[#1380](https://github.com/adap/flower/pull/1380), " -"[#1381](https://github.com/adap/flower/pull/1381), " -"[#1332](https://github.com/adap/flower/pull/1332), " -"[#1391](https://github.com/adap/flower/pull/1391), " -"[#1403](https://github.com/adap/flower/pull/1403), " -"[#1364](https://github.com/adap/flower/pull/1364), " -"[#1409](https://github.com/adap/flower/pull/1409), " -"[#1419](https://github.com/adap/flower/pull/1419), " -"[#1444](https://github.com/adap/flower/pull/1444), " -"[#1448](https://github.com/adap/flower/pull/1448), " -"[#1417](https://github.com/adap/flower/pull/1417), " -"[#1449](https://github.com/adap/flower/pull/1449), " -"[#1465](https://github.com/adap/flower/pull/1465), " -"[#1467](https://github.com/adap/flower/pull/1467))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"**更新文档** ([#1355](https://github.com/adap/flower/pull/1355), " -"[#1558](https://github.com/adap/flower/pull/1558), " -"[#1379](https://github.com/adap/flower/pull/1379), " -"[#1380](https://github.com/adap/flower/pull/1380), " -"[#1381](https://github.com/adap/flower/pull/1381), " -"[#1332](https://github.com/adap/flower/pull/1332), " -"[#1391](https://github.com/adap/flower/pull/1391), " -"[#1403](https://github.com/adap/flower/pull/1403), " -"[#1364](https://github. com/adap/flower/pull/1364), " -"[#1409](https://github.com/adap/flower/pull/1409), " -"[#1419](https://github.com/adap/flower/pull/1419), " -"[#1444](https://github.com/adap/flower/pull/1444), " -"[#1448](https://github.com/adap/flower/pull/1448), " -"[#1417](https://github.com/adap/flower/pull/1417), " -"[#1449](https://github.com/adap/flower/pull/1449), " -"[#1465](https://github.com/adap/flower/pull/1465), " -"[#1467](https://github.com/adap/flower/pull/1467))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" -#: ../../source/ref-changelog.md:587 -msgid "" -"There have been so many documentation updates that it doesn't even make " -"sense to list them individually." -msgstr "文档更新的数量之多,甚至没有必要逐一列出。" +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +msgid "Aggregate evaluation results." +msgstr "聚合评估结果。" -#: ../../source/ref-changelog.md:589 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"**Restructured documentation** " -"([#1387](https://github.com/adap/flower/pull/1387))" -msgstr "**重构文档**([#1387](https://github.com/adap/flower/pull/1387))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-changelog.md:591 -msgid "" -"The documentation has been restructured to make it easier to navigate. " -"This is just the first step in a larger effort to make the Flower " -"documentation the best documentation of any project ever. Stay tuned!" -msgstr "我们对文档进行了重组,使其更易于浏览。这只是让 Flower 文档成为所有项目中最好文档的第一步。敬请期待!" +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:1 of +msgid "Aggregate training results." +msgstr "汇总训练结果。" -#: ../../source/ref-changelog.md:593 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"**Open in Colab button** " -"([#1389](https://github.com/adap/flower/pull/1389))" -msgstr "**在 Colab 中打开按钮** ([#1389](https://github.com/adap/flower/pull/1389))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:595 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"The four parts of the Flower Federated Learning Tutorial now come with a " -"new `Open in Colab` button. No need to install anything on your local " -"machine, you can now use and learn about Flower in your browser, it's " -"only a single click away." +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"Flower 联邦学习教程的四个部分现在都带有一个新的 \"在 Colab 中打开 " -"\"按钮。现在,您无需在本地计算机上安装任何软件,只需点击一下,就可以在浏览器中使用和学习 Flower。" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:597 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"**Improved tutorial** ([#1468](https://github.com/adap/flower/pull/1468)," -" [#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " -"[#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475)))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.evaluate:1 of +msgid "Evaluate the current model parameters." +msgstr "评估当前的模型参数。" -#: ../../source/ref-changelog.md:599 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"The Flower Federated Learning Tutorial has two brand-new parts covering " -"custom strategies (still WIP) and the distinction between `Client` and " -"`NumPyClient`. The existing parts one and two have also been improved " -"(many small changes and fixes)." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"Flower 联邦学习教程有两个全新的部分,涉及自定义策略(仍处于 WIP 阶段)和 `Client` 与 `NumPyClient` " -"之间的区别。现有的第一和第二部分也得到了改进(许多小改动和修正)。" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:605 -msgid "v1.0.0 (2022-07-28)" -msgstr "v1.0.0 (2022-07-28)" +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:1 of +msgid "Initialize the (global) model parameters." +msgstr "初始化(全局)模型参数。" -#: ../../source/ref-changelog.md:607 -msgid "Highlights" -msgstr "亮点" - -#: ../../source/ref-changelog.md:609 -msgid "Stable **Virtual Client Engine** (accessible via `start_simulation`)" -msgstr "稳定的**虚拟客户端引擎**(可通过`start_simulation`访问)" - -#: ../../source/ref-changelog.md:610 -msgid "All `Client`/`NumPyClient` methods are now optional" -msgstr "所有 `Client`/`NumPyClient` 方法现在都是可选的了" - -#: ../../source/ref-changelog.md:611 -msgid "Configurable `get_parameters`" -msgstr "可配置的`get_parameters`" - -#: ../../source/ref-changelog.md:612 -msgid "" -"Tons of small API cleanups resulting in a more coherent developer " -"experience" -msgstr "对大量小型应用程序接口进行了清理,使开发人员的体验更加一致" - -#: ../../source/ref-changelog.md:616 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:5 of msgid "" -"We would like to give our **special thanks** to all the contributors who " -"made Flower 1.0 possible (in reverse [GitHub " -"Contributors](https://github.com/adap/flower/graphs/contributors) order):" +"Successful updates from the previously selected and configured clients. " +"Each pair of `(ClientProxy, FitRes` constitutes a successful update from " +"one of the previously selected clients. Not that not all previously " +"selected clients are necessarily included in this list: a client might " +"drop out and not submit a result. For each client that did not submit an " +"update, there should be an `Exception` in `failures`." msgstr "" -"在此,我们谨向所有促成 Flower 1.0 的贡献者致以**特别的谢意(按[GitHub " -"贡献者](https://github.com/adap/flower/graphs/contributors) 倒序排列):" +"从先前选定和配置的客户端进行的成功更新。每一对`(ClientProxy, " +"FitRes)`都是来自先前选定客户端的一次成功更新。但并非所有先前选定的客户机都一定包含在此列表中:客户机可能会退出,不提交结果。对于每个没有提交更新的客户端,`failures`中都应该有一个`Exception`。" -#: ../../source/ref-changelog.md:618 -msgid "" -"[@rtaiello](https://github.com/rtaiello), " -"[@g-pichler](https://github.com/g-pichler), [@rob-" -"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" -"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " -"[@nfnt](https://github.com/nfnt), " -"[@tatiana-s](https://github.com/tatiana-s), " -"[@TParcollet](https://github.com/TParcollet), " -"[@vballoli](https://github.com/vballoli), " -"[@negedng](https://github.com/negedng), " -"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " -"[@hei411](https://github.com/hei411), " -"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " -"[@AmitChaulwar](https://github.com/AmitChaulwar), " -"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" -"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " -"[@lbhm](https://github.com/lbhm), " -"[@sishtiaq](https://github.com/sishtiaq), " -"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" -"/Jueun-Park), [@architjen](https://github.com/architjen), " -"[@PratikGarai](https://github.com/PratikGarai), " -"[@mrinaald](https://github.com/mrinaald), " -"[@zliel](https://github.com/zliel), " -"[@MeiruiJiang](https://github.com/MeiruiJiang), " -"[@sancarlim](https://github.com/sancarlim), " -"[@gubertoli](https://github.com/gubertoli), " -"[@Vingt100](https://github.com/Vingt100), " -"[@MakGulati](https://github.com/MakGulati), " -"[@cozek](https://github.com/cozek), " -"[@jafermarq](https://github.com/jafermarq), " -"[@sisco0](https://github.com/sisco0), " -"[@akhilmathurs](https://github.com/akhilmathurs), " -"[@CanTuerk](https://github.com/CanTuerk), " -"[@mariaboerner1987](https://github.com/mariaboerner1987), " -"[@pedropgusmao](https://github.com/pedropgusmao), " -"[@tanertopal](https://github.com/tanertopal), " -"[@danieljanes](https://github.com/danieljanes)." -msgstr "" -"[@rtaiello](https://github.com/rtaiello), " -"[@g-pichler](https://github.com/g-pichler), [@rob-" -"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" -"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " -"[@nfnt](https://github.com/nfnt), " -"[@tatiana-s](https://github.com/tatiana-s), " -"[@TParcollet](https://github.com/TParcollet), " -"[@vballoli](https://github.com/vballoli), " -"[@negedng](https://github.com/negedng), " -"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " -"[@hei411](https://github.com/hei411), " -"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " -"[@AmitChaulwar](https://github.com/AmitChaulwar), " -"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" -"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " -"[@lbhm](https://github.com/lbhm), " -"[@sishtiaq](https://github.com/sishtiaq), " -"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" -"/Jueun-Park), [@architjen](https://github.com/architjen), " -"[@PratikGarai](https://github.com/PratikGarai), " -"[@mrinaald](https://github.com/mrinaald), " -"[@zliel](https://github.com/zliel), " -"[@MeiruiJiang](https://github.com/MeiruiJiang), " -"[@sancarlim](https://github.com/sancarlim), " -"[@gubertoli](https://github.com/gubertoli), " -"[@Vingt100](https://github.com/Vingt100), " -"[@MakGulati](https://github.com/MakGulati), " -"[@cozek](https://github.com/cozek), " -"[@jafermarq](https://github.com/jafermarq), " -"[@sisco0](https://github.com/sisco0), " -"[@akhilmathurs](https://github.com/akhilmathurs), " -"[@CanTuerk](https://github.com/CanTuerk), " -"[@mariaboerner1987](https://github.com/mariaboerner1987), " -"[@pedropgusmao](https://github.com/pedropgusmao), " -"[@tanertopal](https://github.com/tanertopal), " -"[@danieljanes](https://github.com/danieljanes)." +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:13 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:13 of +msgid "Exceptions that occurred while the server was waiting for client updates." +msgstr "服务器等待客户端更新时发生的异常。" -#: ../../source/ref-changelog.md:622 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:16 of msgid "" -"**All arguments must be passed as keyword arguments** " -"([#1338](https://github.com/adap/flower/pull/1338))" -msgstr "** 所有参数必须作为关键字参数传递** ([#1338](https://github.com/adap/flower/pull/1338))" +"**aggregation_result** -- The aggregated evaluation result. Aggregation " +"typically uses some variant of a weighted average." +msgstr "**aggregation_result** -- 汇总的评估结果。聚合通常使用某种加权平均值。" -#: ../../source/ref-changelog.md:624 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:5 of msgid "" -"Pass all arguments as keyword arguments, positional arguments are not " -"longer supported. Code that uses positional arguments (e.g., " -"`start_client(\"127.0.0.1:8080\", FlowerClient())`) must add the keyword " -"for each positional argument (e.g., " -"`start_client(server_address=\"127.0.0.1:8080\", " -"client=FlowerClient())`)." +"Successful updates from the previously selected and configured clients. " +"Each pair of `(ClientProxy, FitRes)` constitutes a successful update from" +" one of the previously selected clients. Not that not all previously " +"selected clients are necessarily included in this list: a client might " +"drop out and not submit a result. For each client that did not submit an " +"update, there should be an `Exception` in `failures`." msgstr "" -"以关键字参数传递所有参数,不再支持位置参数。使用位置参数的代码(例如,`start_client(\"127.0.0.1:8080\", " -"FlowerClient())`)必须为每个位置参数添加关键字(例如,`start_client(server_address=\"127.0.0.1:8080\"," -" client=FlowerClient())`)。" +"来自先前选定和配置的客户端的成功更新。每一对`(ClientProxy, " +"FitRes)`都构成先前选定的客户端之一的一次成功更新。但并非所有先前选定的客户机都一定包含在此列表中:客户机可能会退出,不提交结果。对于每个没有提交更新的客户端,\"失败" +" \"中都应该有一个 \"异常\"。" -#: ../../source/ref-changelog.md:626 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:17 of msgid "" -"**Introduce configuration object** `ServerConfig` **in** `start_server` " -"**and** `start_simulation` " -"([#1317](https://github.com/adap/flower/pull/1317))" +"**parameters** -- If parameters are returned, then the server will treat " +"these as the new global model parameters (i.e., it will replace the " +"previous parameters with the ones returned from this method). If `None` " +"is returned (e.g., because there were only failures and no viable " +"results) then the server will no update the previous model parameters, " +"the updates received in this round are discarded, and the global model " +"parameters remain the same." msgstr "" -"**在*** `start_server` ***和*** `start_simulation` 中引入配置对象*** " -"`ServerConfig` ([#1317](https://github.com/adap/flower/pull/1317))" +"**parameters** -- 如果返回参数,那么服务器将把这些参数作为新的全局模型参数(即用本方法返回的参数替换之前的参数)。如果返回 " +"\"无\"(例如,因为只有失败而没有可行的结果),那么服务器将不再更新之前的模型参数,本轮收到的更新将被丢弃,全局模型参数保持不变。" -#: ../../source/ref-changelog.md:628 +#: flwr.server.strategy.strategy.Strategy.evaluate:3 of msgid "" -"Instead of a config dictionary `{\"num_rounds\": 3, \"round_timeout\": " -"600.0}`, `start_server` and `start_simulation` now expect a configuration" -" object of type `flwr.server.ServerConfig`. `ServerConfig` takes the same" -" arguments that as the previous config dict, but it makes writing type-" -"safe code easier and the default parameters values more transparent." -msgstr "" -"并非配置字典`{\"num_rounds\": 3, \"round_timeout\": 600.0}`, `start_server`和 " -"`start_simulation`现在用一个类型为 " -"`flwr.server.ServerConfig`的配置对象。`ServerConfig`接收的参数与之前的 config dict " -"相同,但它使编写类型安全代码变得更容易,默认参数值也更加透明。" +"This function can be used to perform centralized (i.e., server-side) " +"evaluation of model parameters." +msgstr "该函数可用于对模型参数进行集中(即服务器端)评估。" -#: ../../source/ref-changelog.md:630 +#: flwr.server.strategy.strategy.Strategy.evaluate:11 of msgid "" -"**Rename built-in strategy parameters for clarity** " -"([#1334](https://github.com/adap/flower/pull/1334))" -msgstr "**重新命名内置策略参数,使其更加清晰** ([#1334](https://github.com/adap/flower/pull/1334))" +"**evaluation_result** -- The evaluation result, usually a Tuple " +"containing loss and a dictionary containing task-specific metrics (e.g., " +"accuracy)." +msgstr "**evaluation_result** -- 评估结果,通常是一个元组,包含损失值和一个字典,字典中包含特定任务的指标(如准确率)。" -#: ../../source/ref-changelog.md:632 +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:6 of msgid "" -"The following built-in strategy parameters were renamed to improve " -"readability and consistency with other API's:" -msgstr "以下内置策略参数已重新命名,以提高可读性并与其他 API 保持一致:" - -#: ../../source/ref-changelog.md:634 -msgid "`fraction_eval` --> `fraction_evaluate`" -msgstr "`fraction_eval` --> `fraction_evaluate`" - -#: ../../source/ref-changelog.md:635 -msgid "`min_eval_clients` --> `min_evaluate_clients`" -msgstr "`min_eval_clients` --> `min_evaluate_clients`" - -#: ../../source/ref-changelog.md:636 -msgid "`eval_fn` --> `evaluate_fn`" -msgstr "`eval_fn` --> `evaluate_fn`" +"**parameters** -- If parameters are returned, then the server will treat " +"these as the initial global model parameters." +msgstr "**parameters** -- 如果返回参数,服务器将把这些参数视为初始全局模型参数。" -#: ../../source/ref-changelog.md:638 -msgid "" -"**Update default arguments of built-in strategies** " -"([#1278](https://github.com/adap/flower/pull/1278))" -msgstr "**更新内置策略的默认参数** ([#1278](https://github.com/adap/flower/pull/1278))" +#: ../../source/ref-api/flwr.server.workflow.rst:2 +#, fuzzy +msgid "workflow" +msgstr "工作流程" -#: ../../source/ref-changelog.md:640 +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#, fuzzy msgid "" -"All built-in strategies now use `fraction_fit=1.0` and " -"`fraction_evaluate=1.0`, which means they select *all* currently " -"available clients for training and evaluation. Projects that relied on " -"the previous default values can get the previous behaviour by " -"initializing the strategy in the following way:" +":py:obj:`DefaultWorkflow `\\ " +"\\(\\[fit\\_workflow\\, ...\\]\\)" msgstr "" -"所有内置策略现在都使用 \"fraction_fit=1.0 \"和 " -"\"fraction_evaluate=1.0\",这意味着它们会选择*所有*当前可用的客户端进行训练和评估。依赖以前默认值的项目可以通过以下方式初始化策略,获得以前的行为:" +":py:obj:`DefaultWorkflow `\\ " +"\\(\\[fit\\_workflow\\, ...\\]\\)" -#: ../../source/ref-changelog.md:642 -msgid "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" -msgstr "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 of +#, fuzzy +msgid "Default workflow in Flower." +msgstr "Flower 中的默认工作流程。" -#: ../../source/ref-changelog.md:644 +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#, fuzzy msgid "" -"**Add** `server_round` **to** `Strategy.evaluate` " -"([#1334](https://github.com/adap/flower/pull/1334))" +":py:obj:`SecAggPlusWorkflow `\\ " +"\\(num\\_shares\\, ...\\[\\, ...\\]\\)" msgstr "" -"**添加*** `server_round` ***到*** `Strategy.evaluate` " -"([#1334](https://github.com/adap/flower/pull/1334))" +":py:obj:`SecAggPlusWorkflow `\\ " +"\\(num\\_shares\\, ...\\[\\, ...\\]\\)" -#: ../../source/ref-changelog.md:646 -msgid "" -"The `Strategy` method `evaluate` now receives the current round of " -"federated learning/evaluation as the first parameter." -msgstr "`Strategy`的`evaluate` 方法现在会接收当前一轮联邦学习/评估作为第一个参数。" +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 +#: of +#, fuzzy +msgid "The workflow for the SecAgg+ protocol." +msgstr "SecAgg+ 协议的工作流程。" -#: ../../source/ref-changelog.md:648 +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#, fuzzy msgid "" -"**Add** `server_round` **and** `config` **parameters to** `evaluate_fn` " -"([#1334](https://github.com/adap/flower/pull/1334))" +":py:obj:`SecAggWorkflow `\\ " +"\\(reconstruction\\_threshold\\, \\*\\)" msgstr "" -"**将*** `server_round` **和*** `config` **参数添加到*** `evaluate_fn` " -"([#1334](https://github.com/adap/flower/pull/1334))" +":py:obj:`SecAggWorkflow `\\ " +"\\(reconstruction\\_threshold\\, \\*\\)" -#: ../../source/ref-changelog.md:650 +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of +#, fuzzy +msgid "The workflow for the SecAgg protocol." +msgstr "SecAgg 协议的工作流程。" + +#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:2 +#, fuzzy +msgid "DefaultWorkflow" +msgstr "工作流程" + +#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:2 +#, fuzzy +msgid "SecAggPlusWorkflow" +msgstr "工作流程" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:3 +#: of +#, fuzzy msgid "" -"The `evaluate_fn` passed to built-in strategies like `FedAvg` now takes " -"three parameters: (1) The current round of federated learning/evaluation " -"(`server_round`), (2) the model parameters to evaluate (`parameters`), " -"and (3) a config dictionary (`config`)." +"The SecAgg+ protocol ensures the secure summation of integer vectors " +"owned by multiple parties, without accessing any individual integer " +"vector. This workflow allows the server to compute the weighted average " +"of model parameters across all clients, ensuring individual contributions" +" remain private. This is achieved by clients sending both, a weighting " +"factor and a weighted version of the locally updated parameters, both of " +"which are masked for privacy. Specifically, each client uploads \"[w, w *" +" params]\" with masks, where weighting factor 'w' is the number of " +"examples ('num_examples') and 'params' represents the model parameters " +"('parameters') from the client's `FitRes`. The server then aggregates " +"these contributions to compute the weighted average of model parameters." msgstr "" -"传递给内置策略(如 `FedAvg`)的 `evaluate_fn` 现在需要三个参数:(1) 当前一轮联邦学习/评估 " -"(`server_round`),(2) 要评估的模型参数 (`parameters`),(3) 配置字典 (`config`)。" +"SecAgg+ " +"协议可确保对多方拥有的整数向量进行安全求和,而不会访问任何单个整数向量。该工作流程允许服务器计算所有客户端模型参数的加权平均值,确保个人贡献保持私密。这可以通过客户端同时发送加权因子和本地更新参数的加权版本来实现,为了保护隐私,两者都会被屏蔽。具体来说,每个客户端都会上传带掩码的\"[w," +" w * params]\",其中加权因子 \"w \"是示例数(\"num_examples\"),\"params \"代表客户端 " +"\"FitRes \"中的模型参数(\"parameters\")。然后,服务器会汇总这些贡献,计算模型参数的加权平均值。" -#: ../../source/ref-changelog.md:652 -msgid "" -"**Rename** `rnd` **to** `server_round` " -"([#1321](https://github.com/adap/flower/pull/1321))" +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:14 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:14 +#: of +msgid "The protocol involves four main stages:" msgstr "" -"**重新命名** `rnd` ** to** `server_round` " -"([#1321](https://github.com/adap/flower/pull/1321))" -#: ../../source/ref-changelog.md:654 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:16 +#: of msgid "" -"Several Flower methods and functions (`evaluate_fn`, `configure_fit`, " -"`aggregate_fit`, `configure_evaluate`, `aggregate_evaluate`) receive the " -"current round of federated learning/evaluation as their first parameter. " -"To improve reaability and avoid confusion with *random*, this parameter " -"has been renamed from `rnd` to `server_round`." +"'setup': Send SecAgg+ configuration to clients and collect their public " +"keys." msgstr "" -"几个 Flower " -"方法和函数(`evaluate_fn`、`configure_fit`、`aggregate_fit`、`configure_evaluate`、`aggregate_evaluate`)的第一个参数是当前一轮的联邦学习/评估。为提高可重复性并避免与" -" *random* 混淆,该参数已从 `rnd` 更名为 `server_round`。" -#: ../../source/ref-changelog.md:656 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:17 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:17 +#: of msgid "" -"**Move** `flwr.dataset` **to** `flwr_baselines` " -"([#1273](https://github.com/adap/flower/pull/1273))" +"'share keys': Broadcast public keys among clients and collect encrypted " +"secret key shares." msgstr "" -"**移动*** `flwr.dataset` **到*** `flwr_baselines` " -"([#1273](https://github.com/adap/flower/pull/1273))" -#: ../../source/ref-changelog.md:658 -msgid "The experimental package `flwr.dataset` was migrated to Flower Baselines." -msgstr "实验软件包 `flwr.dataset` 已迁移至 Flower Baselines。" +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:19 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:19 +#: of +#, fuzzy +msgid "" +"'collect masked vectors': Forward encrypted secret key shares to target " +"clients and collect masked model parameters." +msgstr "收集屏蔽向量\": 向目标客户端转发加密密钥共享,并收集屏蔽模型参数。" -#: ../../source/ref-changelog.md:660 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:21 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:21 +#: of +#, fuzzy msgid "" -"**Remove experimental strategies** " -"([#1280](https://github.com/adap/flower/pull/1280))" -msgstr "**删除实验策略** ([#1280](https://github.com/adap/flower/pull/1280))" +"'unmask': Collect secret key shares to decrypt and aggregate the model " +"parameters." +msgstr "解密\": 收集密钥共享,解密并汇总模型参数。" -#: ../../source/ref-changelog.md:662 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:23 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:23 +#: of +#, fuzzy msgid "" -"Remove unmaintained experimental strategies (`FastAndSlow`, `FedFSv0`, " -"`FedFSv1`)." -msgstr "移除未维护的试验性策略(`FastAndSlow`、`FedFSv0`、`FedFSv1`)。" +"Only the aggregated model parameters are exposed and passed to " +"`Strategy.aggregate_fit`, ensuring individual data privacy." +msgstr "只有聚合模型参数才会公开并传递给 `Strategy.aggregate_fit`,从而确保个人数据隐私。" -#: ../../source/ref-changelog.md:664 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:26 +#: of +#, fuzzy msgid "" -"**Rename** `Weights` **to** `NDArrays` " -"([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"The number of shares into which each client's private key is split under " +"the SecAgg+ protocol. If specified as a float, it represents the " +"proportion of all selected clients, and the number of shares will be set " +"dynamically in the run time. A private key can be reconstructed from " +"these shares, allowing for the secure aggregation of model updates. Each " +"client sends one share to each of its neighbors while retaining one." msgstr "" -"**重新命名** `Weights` **到** `NDArrays` " -"([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"在 SecAgg+ " +"协议下,每个客户的私钥被分成的份数。如果指定为浮点数,则代表所有选定客户的比例,份额数将在运行时动态设置。私钥可以从这些份额中重建,从而实现模型更新的安全聚合。每个客户端向其每个邻居发送一份,同时保留一份。" -#: ../../source/ref-changelog.md:666 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:26 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:33 +#: of +#, fuzzy msgid "" -"`flwr.common.Weights` was renamed to `flwr.common.NDArrays` to better " -"capture what this type is all about." -msgstr "flwr.common.Weights \"更名为 \"flwr.common.NDArrays\",以更好地反映该类型的含义。" +"The minimum number of shares required to reconstruct a client's private " +"key, or, if specified as a float, it represents the proportion of the " +"total number of shares needed for reconstruction. This threshold ensures " +"privacy by allowing for the recovery of contributions from dropped " +"clients during aggregation, without compromising individual client data." +msgstr "重建客户私钥所需的最小份数,如果指定为浮动,则表示重建所需的份数占总份数的比例。这个阈值允许在聚合过程中恢复掉线客户的贡献,从而确保隐私,而不会泄露单个客户的数据。" -#: ../../source/ref-changelog.md:668 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:32 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:39 +#: of +#, fuzzy msgid "" -"**Remove antiquated** `force_final_distributed_eval` **from** " -"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" -msgstr "" -"**从** `start_server` 中移除过时的** `force_final_distributed_eval` " -"([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"The maximum value of the weight that can be assigned to any single " +"client's update during the weighted average calculation on the server " +"side, e.g., in the FedAvg algorithm." +msgstr "在服务器端进行加权平均计算(如 FedAvg 算法)时,可分配给任何单个客户端更新的权重的最大值。" -#: ../../source/ref-changelog.md:670 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:36 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:43 +#: of +#, fuzzy msgid "" -"The `start_server` parameter `force_final_distributed_eval` has long been" -" a historic artefact, in this release it is finally gone for good." -msgstr "" -"start_server \"参数 \"force_final_distributed_eval " -"\"长期以来一直是个历史遗留问题,在此版本中终于永远消失了。" +"The range within which model parameters are clipped before quantization. " +"This parameter ensures each model parameter is bounded within " +"[-clipping_range, clipping_range], facilitating quantization." +msgstr "量化前模型参数的裁剪范围。该参数可确保每个模型参数都在 [-clipping_range, clipping_range] 范围内,便于量化。" -#: ../../source/ref-changelog.md:672 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:40 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:47 +#: of +#, fuzzy msgid "" -"**Make** `get_parameters` **configurable** " -"([#1242](https://github.com/adap/flower/pull/1242))" -msgstr "" -"**使** `get_parameters` **可配置** " -"([#1242](https://github.com/adap/flower/pull/1242))" +"The size of the range into which floating-point model parameters are " +"quantized, mapping each parameter to an integer in [0, " +"quantization_range-1]. This facilitates cryptographic operations on the " +"model updates." +msgstr "浮点模型参数量化范围的大小,将每个参数映射为 [0, quantization_range-1] 中的整数。这有助于对模型更新进行加密操作。" -#: ../../source/ref-changelog.md:674 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:44 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:51 +#: of +#, fuzzy msgid "" -"The `get_parameters` method now accepts a configuration dictionary, just " -"like `get_properties`, `fit`, and `evaluate`." +"The range of values from which random mask entries are uniformly sampled " +"([0, modulus_range-1]). `modulus_range` must be less than 4294967296. " +"Please use 2**n values for `modulus_range` to prevent overflow issues." msgstr "" -"现在,\"get_parameters \"方法与 \"get_properties\"、\"fit \"和 \"evaluate " -"\"一样,都接受配置字典。" +"对随机掩码条目进行均匀采样的数值范围([0, modulus_range-1])。modulus_range \"必须小于 " +"4294967296。为防止出现溢出问题,请为 `modulus_range` 使用 2**n 的值。" -#: ../../source/ref-changelog.md:676 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:48 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:55 +#: of +#, fuzzy msgid "" -"**Replace** `num_rounds` **in** `start_simulation` **with new** `config` " -"**parameter** ([#1281](https://github.com/adap/flower/pull/1281))" -msgstr "" -"**用新的** `config` 参数** 替换** `num_rounds` ** in** `start_simulation` ** " -"([#1281](https://github.com/adap/flower/pull/1281))" +"The timeout duration in seconds. If specified, the workflow will wait for" +" replies for this duration each time. If `None`, there is no time limit " +"and the workflow will wait until replies for all messages are received." +msgstr "超时时间(秒)。如果指定,工作流将在每次等待回复的时间内等待回复。如果指定为 \"无\",则没有时间限制,工作流程将一直等待到收到所有信息的回复。" -#: ../../source/ref-changelog.md:678 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:62 +#: of +#, fuzzy msgid "" -"The `start_simulation` function now accepts a configuration dictionary " -"`config` instead of the `num_rounds` integer. This improves the " -"consistency between `start_simulation` and `start_server` and makes " -"transitioning between the two easier." -msgstr "" -"现在,`start_simulation`(开始模拟)` 函数接受配置字典 `config` 而不是 `num_rounds` 整数。这改进了 " -"`start_simulation` 和 `start_server` 之间的一致性,并使两者之间的转换更容易。" +"Generally, higher `num_shares` means more robust to dropouts while " +"increasing the computational costs; higher `reconstruction_threshold` " +"means better privacy guarantees but less tolerance to dropouts." +msgstr "一般来说,\"份额数 \"越高,意味着对丢弃的鲁棒性越强,同时计算成本也会增加;\"重构阈值 \"越高,意味着隐私保证越好,但对丢弃的容忍度越低。" -#: ../../source/ref-changelog.md:682 -msgid "" -"**Support Python 3.10** " -"([#1320](https://github.com/adap/flower/pull/1320))" -msgstr "** 支持 Python 3.10** ([#1320](https://github.com/adap/flower/pull/1320))" +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:59 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:65 +#: of +#, fuzzy +msgid "Too large `max_weight` may compromise the precision of the quantization." +msgstr "过大的 `max_weight` 可能会影响量化的精度。" -#: ../../source/ref-changelog.md:684 -msgid "" -"The previous Flower release introduced experimental support for Python " -"3.10, this release declares Python 3.10 support as stable." -msgstr "上一个 Flower 版本引入了对 Python 3.10 的实验支持,而本版本则宣布对 Python 3.10 的支持为稳定支持。" +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:60 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:66 +#: of +#, fuzzy +msgid "`modulus_range` must be 2**n and larger than `quantization_range`." +msgstr "modulus_range \"必须为 2**n,且大于 \"quantization_range\"。" -#: ../../source/ref-changelog.md:686 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:67 +#: of +#, fuzzy msgid "" -"**Make all** `Client` **and** `NumPyClient` **methods optional** " -"([#1260](https://github.com/adap/flower/pull/1260), " -"[#1277](https://github.com/adap/flower/pull/1277))" -msgstr "" -"**使所有** `Client` **和** `NumPyClient` **方法成为可选** " -"([#1260](https://github.com/adap/flower/pull/1260), " -"[#1277](https://github.com/adap/flower/pull/1277))" +"When `num_shares` is a float, it is interpreted as the proportion of all " +"selected clients, and hence the number of shares will be determined in " +"the runtime. This allows for dynamic adjustment based on the total number" +" of participating clients." +msgstr "当 `num_shares` 为浮点数时,它被解释为所有选定客户端的比例,因此份额数将在运行时确定。这样就可以根据参与客户端的总数进行动态调整。" -#: ../../source/ref-changelog.md:688 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:70 +#: of +#, fuzzy msgid "" -"The `Client`/`NumPyClient` methods `get_properties`, `get_parameters`, " -"`fit`, and `evaluate` are all optional. This enables writing clients that" -" implement, for example, only `fit`, but no other method. No need to " -"implement `evaluate` when using centralized evaluation!" +"Similarly, when `reconstruction_threshold` is a float, it is interpreted " +"as the proportion of the number of shares needed for the reconstruction " +"of a private key. This feature enables flexibility in setting the " +"security threshold relative to the number of distributed shares." msgstr "" -"`Client`/`NumPyClient`的 \"get_properties\"、\"get_parameters\"、\"fit \"和 " -"\"evaluate \"方法都是可选的。这样就可以编写只实现 `fit` 而不实现其他方法的客户端。使用集中评估时,无需实现 " -"`evaluate`!" +"同样,当 `reconstruction_threshold` " +"为浮点数时,它被解释为重建私钥所需的份额数比例。这一功能使我们可以根据分发的份额数灵活设置安全阈值。" -#: ../../source/ref-changelog.md:690 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:74 +#: of +#, fuzzy msgid "" -"**Enable passing a** `Server` **instance to** `start_simulation` " -"([#1281](https://github.com/adap/flower/pull/1281))" +"`num_shares`, `reconstruction_threshold`, and the quantization parameters" +" (`clipping_range`, `quantization_range`, `modulus_range`) play critical " +"roles in balancing privacy, robustness, and efficiency within the SecAgg+" +" protocol." msgstr "" -"**启用向** `start_simulation` 传递** `Server` 实例 " -"([#1281](https://github.com/adap/flower/pull/1281))" +"份额数\"、\"重建阈值 \"和量化参数(\"裁剪范围\"、\"量化范围\"、\"模数范围\")在平衡 SecAgg+ " +"协议的隐私性、稳健性和效率方面发挥着关键作用。" -#: ../../source/ref-changelog.md:692 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +#, fuzzy msgid "" -"Similar to `start_server`, `start_simulation` now accepts a full `Server`" -" instance. This enables users to heavily customize the execution of " -"eperiments and opens the door to running, for example, async FL using the" -" Virtual Client Engine." +":py:obj:`collect_masked_vectors_stage " +"`\\" +" \\(driver\\, ...\\)" msgstr "" -"与 `start_server` 类似,`start_simulation` 现在也接受一个完整的 `Server` " -"实例。这使得用户可以对实验的执行进行大量自定义,并为使用虚拟客户端引擎运行异步 FL 等打开了大门。" +":py:obj:`collect_masked_vectors_stage " +"`\\" +" \\(driver\\, ...\\)" -#: ../../source/ref-changelog.md:694 -msgid "" -"**Update code examples** " -"([#1291](https://github.com/adap/flower/pull/1291), " -"[#1286](https://github.com/adap/flower/pull/1286), " -"[#1282](https://github.com/adap/flower/pull/1282))" -msgstr "" -"**更新代码示例** ([#1291](https://github.com/adap/flower/pull/1291), " -"[#1286](https://github.com/adap/flower/pull/1286), " -"[#1282](https://github.com/adap/flower/pull/1282))" +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +#, fuzzy +msgid "Execute the 'collect masked vectors' stage." +msgstr "执行 \"收集屏蔽向量 \"阶段。" -#: ../../source/ref-changelog.md:696 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +#, fuzzy msgid "" -"Many code examples received small or even large maintenance updates, " -"among them are" -msgstr "许多代码示例都进行了小规模甚至大规模的维护更新,其中包括" - -#: ../../source/ref-changelog.md:698 -msgid "`scikit-learn`" -msgstr "`scikit-learn`" +":py:obj:`setup_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" +msgstr "" +":py:obj:`setup_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" -#: ../../source/ref-changelog.md:699 -msgid "`simulation_pytorch`" -msgstr "`simulation_pytorch`" +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.setup_stage:1 +#: of +#, fuzzy +msgid "Execute the 'setup' stage." +msgstr "执行 \"设置 \"阶段。" -#: ../../source/ref-changelog.md:700 -msgid "`quickstart_pytorch`" -msgstr "`quickstart_pytorch`" +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +#, fuzzy +msgid "" +":py:obj:`share_keys_stage " +"`\\ " +"\\(driver\\, context\\, state\\)" +msgstr "" +":py:obj:`share_keys_stage " +"`\\ " +"\\(driver\\, context\\, state\\)" -#: ../../source/ref-changelog.md:701 -msgid "`quickstart_simulation`" -msgstr "`quickstart_simulation`" +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.share_keys_stage:1 +#: of +#, fuzzy +msgid "Execute the 'share keys' stage." +msgstr "执行 \"共享密钥 \"阶段。" -#: ../../source/ref-changelog.md:702 -msgid "`quickstart_tensorflow`" -msgstr "`quickstart_tensorflow`" +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +#, fuzzy +msgid "" +":py:obj:`unmask_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" +msgstr "" +":py:obj:`unmask_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" -#: ../../source/ref-changelog.md:703 -msgid "`advanced_tensorflow`" -msgstr "`advanced_tensorflow`" +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.unmask_stage:1 +#: of +#, fuzzy +msgid "Execute the 'unmask' stage." +msgstr "执行 \"解除屏蔽 \"阶段。" -#: ../../source/ref-changelog.md:705 -msgid "" -"**Remove the obsolete simulation example** " -"([#1328](https://github.com/adap/flower/pull/1328))" -msgstr "**删除过时的模拟示例** ([#1328](https://github.com/adap/flower/pull/1328))" +#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:2 +#, fuzzy +msgid "SecAggWorkflow" +msgstr "工作流程" -#: ../../source/ref-changelog.md:707 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of +#, fuzzy msgid "" -"Removes the obsolete `simulation` example and renames " -"`quickstart_simulation` to `simulation_tensorflow` so it fits withs the " -"naming of `simulation_pytorch`" +"Bases: " +":py:class:`~flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow`" msgstr "" -"删除过时的 \"simulation \"示例,并将 \"quickstart_simulation \"重命名为 " -"\"simulation_tensorflow\",使其与 \"simulation_pytorch \"的命名一致" +"基础: " +":py:class:`~flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow`." -#: ../../source/ref-changelog.md:709 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:3 of +#, fuzzy msgid "" -"**Update documentation** " -"([#1223](https://github.com/adap/flower/pull/1223), " -"[#1209](https://github.com/adap/flower/pull/1209), " -"[#1251](https://github.com/adap/flower/pull/1251), " -"[#1257](https://github.com/adap/flower/pull/1257), " -"[#1267](https://github.com/adap/flower/pull/1267), " -"[#1268](https://github.com/adap/flower/pull/1268), " -"[#1300](https://github.com/adap/flower/pull/1300), " -"[#1304](https://github.com/adap/flower/pull/1304), " -"[#1305](https://github.com/adap/flower/pull/1305), " -"[#1307](https://github.com/adap/flower/pull/1307))" +"The SecAgg protocol ensures the secure summation of integer vectors owned" +" by multiple parties, without accessing any individual integer vector. " +"This workflow allows the server to compute the weighted average of model " +"parameters across all clients, ensuring individual contributions remain " +"private. This is achieved by clients sending both, a weighting factor and" +" a weighted version of the locally updated parameters, both of which are " +"masked for privacy. Specifically, each client uploads \"[w, w * params]\"" +" with masks, where weighting factor 'w' is the number of examples " +"('num_examples') and 'params' represents the model parameters " +"('parameters') from the client's `FitRes`. The server then aggregates " +"these contributions to compute the weighted average of model parameters." msgstr "" -"**更新文档** ([#1223](https://github.com/adap/flower/pull/1223), " -"[#1209](https://github.com/adap/flower/pull/1209), " -"[#1251](https://github.com/adap/flower/pull/1251), " -"[#1257](https://github.com/adap/flower/pull/1257), " -"[#1267](https://github.com/adap/flower/pull/1267), " -"[#1268](https://github.com/adap/flower/pull/1268), " -"[#1300](https://github.com/adap/flower/pull/1300), " -"[#1304](https://github.com/adap/flower/pull/1304), " -"[#1305](https://github.com/adap/flower/pull/1305), " -"[#1307](https://github.com/adap/flower/pull/1307))" +"SecAgg " +"协议可确保对多方拥有的整数向量进行安全求和,而不会访问任何单个整数向量。该工作流程允许服务器计算所有客户端模型参数的加权平均值,确保个人贡献保持私密。这可以通过客户端同时发送加权因子和本地更新参数的加权版本来实现,为了保护隐私,两者都会被屏蔽。具体来说,每个客户端都会上传带掩码的\"[w," +" w * params]\",其中加权因子 \"w \"是示例数(\"num_examples\"),\"params \"代表客户端 " +"\"FitRes \"中的模型参数(\"parameters\")。然后,服务器会汇总这些贡献,计算模型参数的加权平均值。" -#: ../../source/ref-changelog.md:711 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:16 of msgid "" -"One substantial documentation update fixes multiple smaller rendering " -"issues, makes titles more succinct to improve navigation, removes a " -"deprecated library, updates documentation dependencies, includes the " -"`flwr.common` module in the API reference, includes support for markdown-" -"based documentation, migrates the changelog from `.rst` to `.md`, and " -"fixes a number of smaller details!" +"'setup': Send SecAgg configuration to clients and collect their public " +"keys." msgstr "" -"其中一个实质性的文档更新修复了多个较小的渲染问题,使标题更加简洁以改善导航,删除了一个已废弃的库,更新了文档依赖关系,在 API 参考中包含了 " -"`flwr.common` 模块,包含了对基于 markdown 的文档的支持,将更新日志从 `.rst` 移植到了 " -"`.md`,并修复了一些较小的细节!" -#: ../../source/ref-changelog.md:713 ../../source/ref-changelog.md:768 -#: ../../source/ref-changelog.md:837 ../../source/ref-changelog.md:876 -msgid "**Minor updates**" -msgstr "**小规模更新**" +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:55 of +#, fuzzy +msgid "" +"Each client's private key is split into N shares under the SecAgg " +"protocol, where N is the number of selected clients." +msgstr "根据 SecAgg 协议,每个客户的私人密钥被分成 N 份,其中 N 是所选客户的数量。" -#: ../../source/ref-changelog.md:715 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:57 of +#, fuzzy msgid "" -"Add round number to fit and evaluate log messages " -"([#1266](https://github.com/adap/flower/pull/1266))" -msgstr "添加四舍五入数字,以适应和评估日志信息([#1266](https://github.com/adap/flower/pull/1266))" +"Generally, higher `reconstruction_threshold` means better privacy " +"guarantees but less tolerance to dropouts." +msgstr "一般来说,\"重建阈值 \"越高,隐私保证就越好,但对丢包的容忍度就越低。" -#: ../../source/ref-changelog.md:716 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:61 of +#, fuzzy msgid "" -"Add secure gRPC connection to the `advanced_tensorflow` code example " -"([#847](https://github.com/adap/flower/pull/847))" +"When `reconstruction_threshold` is a float, it is interpreted as the " +"proportion of the number of all selected clients needed for the " +"reconstruction of a private key. This feature enables flexibility in " +"setting the security threshold relative to the number of selected " +"clients." msgstr "" -"为 `advanced_tensorflow` 代码示例添加安全 gRPC 连接 " -"([#847](https://github.com/adap/flower/pull/847))" +"当 `reconstruction_threshold` " +"为浮点数时,它被解释为重建私钥所需的所有选定客户端数量的比例。此功能可根据所选客户端的数量灵活设置安全阈值。" -#: ../../source/ref-changelog.md:717 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:65 of +#, fuzzy msgid "" -"Update developer tooling " -"([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310))" +"`reconstruction_threshold`, and the quantization parameters " +"(`clipping_range`, `quantization_range`, `modulus_range`) play critical " +"roles in balancing privacy, robustness, and efficiency within the SecAgg " +"protocol." msgstr "" -"更新开发人员工具([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310)" +"重构阈值 \"和量化参数(\"裁剪范围\"、\"量化范围\"、\"模量范围\")在 SecAgg " +"协议中平衡隐私性、鲁棒性和效率方面起着至关重要的作用。" -#: ../../source/ref-changelog.md:718 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +#, fuzzy msgid "" -"Rename ProtoBuf messages to improve consistency " -"([#1214](https://github.com/adap/flower/pull/1214), " -"[#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +":py:obj:`collect_masked_vectors_stage " +"`\\ " +"\\(driver\\, ...\\)" msgstr "" -"重命名 ProtoBuf 消息以提高一致性([#1214](https://github.com/adap/flower/pull/1214), " -"[#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259)" - -#: ../../source/ref-changelog.md:720 -msgid "v0.19.0 (2022-05-18)" -msgstr "v0.19.0 (2022-05-18)" +":py:obj:`collect_masked_vectors_stage " +"`\\(driver\\," +" ...\\)" -#: ../../source/ref-changelog.md:724 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +#, fuzzy msgid "" -"**Flower Baselines (preview): FedOpt, FedBN, FedAvgM** " -"([#919](https://github.com/adap/flower/pull/919), " -"[#1127](https://github.com/adap/flower/pull/1127), " -"[#914](https://github.com/adap/flower/pull/914))" +":py:obj:`setup_stage `\\" +" \\(driver\\, context\\, state\\)" msgstr "" -"**Flower Baselines(预览): FedOpt、FedBN、FedAvgM** " -"([#919](https://github.com/adap/flower/pull/919), " -"[#1127](https://github.com/adap/flower/pull/1127), " -"[#914](https://github.com/adap/flower/pull/914))" +":py:obj:`setup_stage " +"`\\(driver\\, context\\," +" state\\)" -#: ../../source/ref-changelog.md:726 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of #, fuzzy msgid "" -"The first preview release of Flower Baselines has arrived! We're " -"kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " -"FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how " -"to use [Flower Baselines](https://flower.ai/docs/using-baselines.html). " -"With this first preview release we're also inviting the community to " -"[contribute their own baselines](https://flower.ai/docs/baselines/how-to-" -"contribute-baselines.html)." +":py:obj:`share_keys_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -"Flower Baselines 的第一个预览版已经发布!我们通过实现 " -"FedOpt(FedYogi、FedAdam、FedAdagrad)、FedBN 和 FedAvgM 来启动 Flower " -"Baselines。请查阅文档了解如何使用 [Flower Baselines](https://flower.ai/docs/using-" -"baselines.html)。在首次发布预览版时,我们还邀请社区成员[贡献自己的Baselines](https://flower.ai/docs" -"/contributing-baselines.html)。" +"py:obj:`share_keys_stage " +"`\\(driver\\, " +"context\\, state\\)" -#: ../../source/ref-changelog.md:728 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +#, fuzzy msgid "" -"**C++ client SDK (preview) and code example** " -"([#1111](https://github.com/adap/flower/pull/1111))" -msgstr "**C++客户端SDK(预览版)和代码示例**([#1111](https://github.com/adap/flower/pull/1111))" +":py:obj:`unmask_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" +msgstr "" +":py:obj:`unmask_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" -#: ../../source/ref-changelog.md:730 +#: ../../source/ref-api/flwr.simulation.rst:2 +#, fuzzy +msgid "simulation" +msgstr "运行模拟" + +#: ../../source/ref-api/flwr.simulation.rst:18::1 +#, fuzzy msgid "" -"Preview support for Flower clients written in C++. The C++ preview " -"includes a Flower client SDK and a quickstart code example that " -"demonstrates a simple C++ client using the SDK." +":py:obj:`run_simulation `\\ " +"\\(server\\_app\\, client\\_app\\, ...\\)" msgstr "" -"预览版支持用 C++ 编写的 Flower 客户端。C++ 预览版包括一个 Flower 客户端 SDK 和一个快速入门代码示例,使用 SDK " -"演示了一个简单的 C++ 客户端。" +":py:obj:`run_simulation `\\ " +"\\(server\\_app\\, client\\_app\\, ...\\)" -#: ../../source/ref-changelog.md:732 +#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: flwr.simulation.run_simulation.run_simulation:1 of +#, fuzzy +msgid "Run a Flower App using the Simulation Engine." +msgstr "使用模拟引擎运行花朵应用程序。" + +#: ../../source/ref-api/flwr.simulation.rst:18::1 +#, fuzzy msgid "" -"**Add experimental support for Python 3.10 and Python 3.11** " -"([#1135](https://github.com/adap/flower/pull/1135))" +":py:obj:`start_simulation `\\ " +"\\(\\*args\\, \\*\\*kwargs\\)" msgstr "" -"** 增加对 Python 3.10 和 Python 3.11 的实验支持** " -"([#1135](https://github.com/adap/flower/pull/1135))" +":py:obj:`start_simulation `\\ \\(\\*\\," +" client\\_fn\\[\\, ...\\]\\)" -#: ../../source/ref-changelog.md:734 -msgid "" -"Python 3.10 is the latest stable release of Python and Python 3.11 is due" -" to be released in October. This Flower release adds experimental support" -" for both Python versions." +#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: flwr.simulation.start_simulation:1 of +msgid "Log error stating that module `ray` could not be imported." msgstr "" -"Python 3.10 是 Python 的最新稳定版本,Python 3.11 将于 10 月份发布。Flower 版本增加了对这两个 " -"Python 版本的实验支持。" -#: ../../source/ref-changelog.md:736 -msgid "" -"**Aggregate custom metrics through user-provided functions** " -"([#1144](https://github.com/adap/flower/pull/1144))" -msgstr "**通过用户提供的函数聚合自定义指标**([#1144](https://github.com/adap/flower/pull/1144))" +#: ../../source/ref-api/flwr.simulation.run_simulation.rst:2 +#, fuzzy +msgid "run\\_simulation" +msgstr "运行模拟" -#: ../../source/ref-changelog.md:738 +#: flwr.simulation.run_simulation.run_simulation:3 of +#, fuzzy msgid "" -"Custom metrics (e.g., `accuracy`) can now be aggregated without having to" -" customize the strategy. Built-in strategies support two new arguments, " -"`fit_metrics_aggregation_fn` and `evaluate_metrics_aggregation_fn`, that " -"allow passing custom metric aggregation functions." -msgstr "" -"现在无需定制策略即可聚合自定义度量(如`准确度`)。内置策略支持两个新参数:`fit_metrics_aggregation_fn` " -"和`evaluate_metrics_aggregation_fn`,允许传递自定义度量聚合函数。" +"The `ServerApp` to be executed. It will send messages to different " +"`ClientApp` instances running on different (virtual) SuperNodes." +msgstr "要执行的 `ServerApp`。它将向运行在不同(虚拟)超级节点上的不同 `ClientApp`实例发送消息。" -#: ../../source/ref-changelog.md:740 +#: flwr.simulation.run_simulation.run_simulation:6 of +#, fuzzy msgid "" -"**User-configurable round timeout** " -"([#1162](https://github.com/adap/flower/pull/1162))" -msgstr "**用户可配置的回合超时**([#1162](https://github.com/adap/flower/pull/1162))" +"The `ClientApp` to be executed by each of the SuperNodes. It will receive" +" messages sent by the `ServerApp`." +msgstr "由每个超级节点执行的 `ClientApp`。它将接收由 `ServerApp` 发送的信息。" -#: ../../source/ref-changelog.md:742 +#: flwr.simulation.run_simulation.run_simulation:9 of +#, fuzzy msgid "" -"A new configuration value allows the round timeout to be set for " -"`start_server` and `start_simulation`. If the `config` dictionary " -"contains a `round_timeout` key (with a `float` value in seconds), the " -"server will wait *at least* `round_timeout` seconds before it closes the " -"connection." -msgstr "" -"新的配置值允许为 `start_server` 和 `start_simulation` 设置回合超时。如果 `config` 字典中包含一个 " -"`round_timeout` 键(以秒为单位的 `float`值),服务器将至少等待 ** `round_timeout` 秒后才关闭连接。" +"Number of nodes that run a ClientApp. They can be sampled by a Driver in " +"the ServerApp and receive a Message describing what the ClientApp should " +"perform." +msgstr "运行 ClientApp 的节点数。它们可被 ServerApp 中的驱动程序采样,并接收描述 ClientApp 应执行的操作的信息。" -#: ../../source/ref-changelog.md:744 -msgid "" -"**Enable both federated evaluation and centralized evaluation to be used " -"at the same time in all built-in strategies** " -"([#1091](https://github.com/adap/flower/pull/1091))" -msgstr "" -"**允许在所有内置策略中同时使用联邦评价和集中评估** " -"([#1091](https://github.com/adap/flower/pull/1091))" +#: flwr.simulation.run_simulation.run_simulation:12 of +#, fuzzy +msgid "A simulation backend that runs `ClientApp`s." +msgstr "运行 \"客户端应用程序 \"的模拟后台。" -#: ../../source/ref-changelog.md:746 +#: flwr.simulation.run_simulation.run_simulation:14 of msgid "" -"Built-in strategies can now perform both federated evaluation (i.e., " -"client-side) and centralized evaluation (i.e., server-side) in the same " -"round. Federated evaluation can be disabled by setting `fraction_eval` to" -" `0.0`." +"'A dictionary to configure a backend. Separate dictionaries to configure " +"different elements of backend. Supported top-level keys are `init_args` " +"for values parsed to initialisation of backend, `client_resources` to " +"define the resources for clients, and `actor` to define the actor " +"parameters. Values supported in are those included by " +"`flwr.common.typing.ConfigsRecordValues`." msgstr "" -"内置策略现在可以在同一轮中同时执行联邦评估(即客户端)和集中评估(即服务器端)。可以通过将 `fraction_eval` 设置为 " -"`0.0`来禁用联邦评估。" -#: ../../source/ref-changelog.md:748 +#: flwr.simulation.run_simulation.run_simulation:21 of +#, fuzzy msgid "" -"**Two new Jupyter Notebook tutorials** " -"([#1141](https://github.com/adap/flower/pull/1141))" +"A boolean to indicate whether to enable GPU growth on the main thread. " +"This is desirable if you make use of a TensorFlow model on your " +"`ServerApp` while having your `ClientApp` running on the same GPU. " +"Without enabling this, you might encounter an out-of-memory error because" +" TensorFlow, by default, allocates all GPU memory. Read more about how " +"`tf.config.experimental.set_memory_growth()` works in the TensorFlow " +"documentation: https://www.tensorflow.org/api/stable." msgstr "" -"**两本新的 Jupyter Notebook 教程** " -"([#1141](https://github.com/adap/flower/pull/1141))" +"布尔值,用于指示是否在主线程上启用 GPU 增长。如果您在 \"ServerApp \"上使用 TensorFlow 模型,同时让 " +"\"ClientApp \"在同一 GPU 上运行,则最好启用此选项。如果不启用此功能,您可能会遇到内存不足的错误,因为 TensorFlow " +"默认会分配所有 GPU 内存。有关 `tf.config.experimental.set_memory_growth()` " +"如何工作的更多信息,请参阅 TensorFlow 文档:https://www.tensorflow.org/api/stable。" -#: ../../source/ref-changelog.md:750 +#: flwr.simulation.run_simulation.run_simulation:28 of +#, fuzzy msgid "" -"Two Jupyter Notebook tutorials (compatible with Google Colab) explain " -"basic and intermediate Flower features:" -msgstr "两本 Jupyter Notebook 教程(与 Google Colab 兼容)介绍了 Flower 的基本和中级功能:" +"When disabled, only INFO, WARNING and ERROR log messages will be shown. " +"If enabled, DEBUG-level logs will be displayed." +msgstr "启用后,将只显示 INFO、WARNING 和 ERROR 日志信息。启用后,将显示 DEBUG 级日志。" -#: ../../source/ref-changelog.md:752 -msgid "" -"*An Introduction to Federated Learning*: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" -"-Intro-to-FL-PyTorch.ipynb)" -msgstr "" -"*联邦学习简介*: [在 Colab " -"中打开](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" -"-Intro-to-FL-PyTorch.ipynb)" +#: ../../source/ref-api/flwr.simulation.start_simulation.rst:2 +#, fuzzy +msgid "start\\_simulation" +msgstr "start_simulation" -#: ../../source/ref-changelog.md:754 +#: ../../source/ref-changelog.md:1 +msgid "Changelog" +msgstr "更新日志" + +#: ../../source/ref-changelog.md:3 +#, fuzzy +msgid "v1.11.1 (2024-09-11)" +msgstr "v1.3.0 (2023-02-06)" + +#: ../../source/ref-changelog.md:5 ../../source/ref-changelog.md:37 +#: ../../source/ref-changelog.md:141 ../../source/ref-changelog.md:239 +#: ../../source/ref-changelog.md:339 ../../source/ref-changelog.md:403 +#: ../../source/ref-changelog.md:496 ../../source/ref-changelog.md:596 +#: ../../source/ref-changelog.md:680 ../../source/ref-changelog.md:744 +#: ../../source/ref-changelog.md:802 ../../source/ref-changelog.md:871 +#: ../../source/ref-changelog.md:940 +msgid "Thanks to our contributors" +msgstr "感谢我们的贡献者" + +#: ../../source/ref-changelog.md:7 ../../source/ref-changelog.md:39 +#: ../../source/ref-changelog.md:143 ../../source/ref-changelog.md:241 +#: ../../source/ref-changelog.md:341 ../../source/ref-changelog.md:405 +#: ../../source/ref-changelog.md:498 ../../source/ref-changelog.md:598 +#: ../../source/ref-changelog.md:682 ../../source/ref-changelog.md:746 +#: ../../source/ref-changelog.md:804 msgid "" -"*Using Strategies in Federated Learning*: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" -"-Strategies-in-FL-PyTorch.ipynb)" -msgstr "" -"*在联邦学习中使用策略*: [在 Colab " -"中打开](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" -"-Strategies-in-FL-PyTorch.ipynb)" +"We would like to give our special thanks to all the contributors who made" +" the new version of Flower possible (in `git shortlog` order):" +msgstr "在此,我们要特别感谢所有为 Flower 的新版本做出贡献的人员(按 `git shortlog` 顺序排列):" -#: ../../source/ref-changelog.md:756 +#: ../../source/ref-changelog.md:9 +#, fuzzy msgid "" -"**New FedAvgM strategy (Federated Averaging with Server Momentum)** " -"([#1076](https://github.com/adap/flower/pull/1076))" +"`Charles Beauville`, `Chong Shen Ng`, `Daniel J. Beutel`, `Heng Pan`, " +"`Javier`, `Robert Steiner`, `Yan Gao` " msgstr "" -"**新的 FedAvgM 策略(带服务器动量的联邦平均)** " -"([#1076](https://github.com/adap/flower/pull/1076))" +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " -#: ../../source/ref-changelog.md:758 +#: ../../source/ref-changelog.md:11 +#, fuzzy +msgid "Improvements" +msgstr "可选的改进措施" + +#: ../../source/ref-changelog.md:13 +#, fuzzy msgid "" -"The new `FedAvgM` strategy implements Federated Averaging with Server " -"Momentum \\[Hsu et al., 2019\\]." -msgstr "新的 \"FedAvgM \"策略实现了带服务器动量的联邦平均[Hsu et al., 2019\\]." +"**Implement** `keys/values/items` **methods for** `TypedDict` " +"([#4146](https://github.com/adap/flower/pull/4146))" +msgstr "" +"**使** `get_parameters` **可配置** " +"([#1242](https://github.com/adap/flower/pull/1242))" -#: ../../source/ref-changelog.md:760 +#: ../../source/ref-changelog.md:15 +#, fuzzy msgid "" -"**New advanced PyTorch code example** " -"([#1007](https://github.com/adap/flower/pull/1007))" -msgstr "**新的 PyTorch 高级代码示例** ([#1007](https://github.com/adap/flower/pull/1007))" +"**Fix parsing of** `--executor-config` **if present** " +"([#4125](https://github.com/adap/flower/pull/4125))" +msgstr "**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" -#: ../../source/ref-changelog.md:762 +#: ../../source/ref-changelog.md:17 +#, fuzzy msgid "" -"A new code example (`advanced_pytorch`) demonstrates advanced Flower " -"concepts with PyTorch." -msgstr "新代码示例 (`advanced_pytorch`) 演示了 PyTorch 的高级 Flower 概念。" +"**Adjust framework name in templates docstrings** " +"([#4127](https://github.com/adap/flower/pull/4127))" +msgstr "**新的 scikit-learn 代码示例** ([#748](https://github.com/adap/flower/pull/748))" -#: ../../source/ref-changelog.md:764 +#: ../../source/ref-changelog.md:19 +#, fuzzy msgid "" -"**New JAX code example** " -"([#906](https://github.com/adap/flower/pull/906), " -"[#1143](https://github.com/adap/flower/pull/1143))" +"**Update** `flwr new` **Hugging Face template** " +"([#4169](https://github.com/adap/flower/pull/4169))" msgstr "" -"**新的 JAX 代码示例**([#906](https://github.com/adap/flower/pull/906), " -"[#1143](https://github.com/adap/flower/pull/1143)" +"**新的Hugging Face Transformers代码示例** " +"([#863](https://github.com/adap/flower/pull/863))" -#: ../../source/ref-changelog.md:766 +#: ../../source/ref-changelog.md:21 +#, fuzzy msgid "" -"A new code example (`jax_from_centralized_to_federated`) shows federated " -"learning with JAX and Flower." -msgstr "新代码示例(`jax_from_centralized_to_federated`)展示了使用 JAX 和 Flower 的联邦学习。" +"**Fix** `flwr new` **FlowerTune template** " +"([#4123](https://github.com/adap/flower/pull/4123))" +msgstr "**新的 iOS CoreML 代码示例**([#1289](https://github.com/adap/flower/pull/1289))" -#: ../../source/ref-changelog.md:770 +#: ../../source/ref-changelog.md:23 +#, fuzzy msgid "" -"New option to keep Ray running if Ray was already initialized in " -"`start_simulation` ([#1177](https://github.com/adap/flower/pull/1177))" +"**Add buffer time after** `ServerApp` **thread initialization** " +"([#4119](https://github.com/adap/flower/pull/4119))" msgstr "" -"新增选项,用于在 \"start_simulation\"(开始模拟)中已初始化 Ray 的情况下保持 Ray " -"运行([#1177](https://github.com/adap/flower/pull/1177))" +"**在模拟过程中为***`历史`***对象添加训练指标*** " +"([#1696](https://github.com/adap/flower/pull/1696))" -#: ../../source/ref-changelog.md:771 +#: ../../source/ref-changelog.md:25 +#, fuzzy msgid "" -"Add support for custom `ClientManager` as a `start_simulation` parameter " -"([#1171](https://github.com/adap/flower/pull/1171))" -msgstr "" -"添加对自定义 \"客户端管理器 \"作为 \"start_simulation " -"\"参数的支持([#1171](https://github.com/adap/flower/pull/1171))" +"**Handle unsuitable resources for simulation** " +"([#4143](https://github.com/adap/flower/pull/4143))" +msgstr "** 添加新的模拟监控指南** ([#1649](https://github.com/adap/flower/pull/1649))" -#: ../../source/ref-changelog.md:772 +#: ../../source/ref-changelog.md:27 +#, fuzzy msgid "" -"New documentation for [implementing " -"strategies](https://flower.ai/docs/framework/how-to-implement-" -"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " -"[#1175](https://github.com/adap/flower/pull/1175))" +"**Update example READMEs** " +"([#4117](https://github.com/adap/flower/pull/4117))" msgstr "" -"[实施战略](https://flower.ai/docs/framework/how-to-implement-strategies.html)" -" 的新文件([#1097](https://github.com/adap/flower/pull/1097), " -"[#1175](https://github.com/adap/flower/pull/1175)" +"**介绍Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" -#: ../../source/ref-changelog.md:773 +#: ../../source/ref-changelog.md:29 +#, fuzzy msgid "" -"New mobile-friendly documentation theme " -"([#1174](https://github.com/adap/flower/pull/1174))" -msgstr "新的移动友好型文档主题 ([#1174](https://github.com/adap/flower/pull/1174))" +"**Update SuperNode authentication docs** " +"([#4160](https://github.com/adap/flower/pull/4160))" +msgstr "** 添加一个新的 gRPC 选项**([#2197](https://github.com/adap/flower/pull/2197))" -#: ../../source/ref-changelog.md:774 -msgid "" -"Limit version range for (optional) `ray` dependency to include only " -"compatible releases (`>=1.9.2,<1.12.0`) " -"([#1205](https://github.com/adap/flower/pull/1205))" -msgstr "" -"限制(可选)`ray`依赖的版本范围,使其仅包含兼容版本(`>=1.9.2,<1.12.0`) " -"([#1205](https://github.com/adap/flower/pull/1205))" +#: ../../source/ref-changelog.md:31 ../../source/ref-changelog.md:111 +#: ../../source/ref-changelog.md:227 ../../source/ref-changelog.md:323 +#: ../../source/ref-changelog.md:397 ../../source/ref-changelog.md:472 +#: ../../source/ref-changelog.md:584 ../../source/ref-changelog.md:674 +#: ../../source/ref-changelog.md:738 ../../source/ref-changelog.md:796 +#: ../../source/ref-changelog.md:865 ../../source/ref-changelog.md:927 +#: ../../source/ref-changelog.md:946 ../../source/ref-changelog.md:1102 +#: ../../source/ref-changelog.md:1173 ../../source/ref-changelog.md:1210 +#: ../../source/ref-changelog.md:1253 +msgid "Incompatible changes" +msgstr "不兼容的更改" -#: ../../source/ref-changelog.md:778 -msgid "" -"**Remove deprecated support for Python 3.6** " -"([#871](https://github.com/adap/flower/pull/871))" -msgstr "**删除对 Python 3.6 的过时支持** ([#871](https://github.com/adap/flower/pull/871))" +#: ../../source/ref-changelog.md:35 +#, fuzzy +msgid "v1.11.0 (2024-08-30)" +msgstr "v1.3.0 (2023-02-06)" -#: ../../source/ref-changelog.md:779 +#: ../../source/ref-changelog.md:41 +#, fuzzy msgid "" -"**Remove deprecated KerasClient** " -"([#857](https://github.com/adap/flower/pull/857))" -msgstr "**移除过时的 KerasClient**([#857](https://github.com/adap/flower/pull/857))" +"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " +"Beutel`, `Daniel Nata Nugraha`, `Danny`, `Edoardo Gabrielli`, `Heng Pan`," +" `Javier`, `Meng Yan`, `Michal Danilowski`, `Mohammad Naseri`, `Robert " +"Steiner`, `Steve Laskaridis`, `Taner Topal`, `Yan Gao` " +msgstr "" +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " -#: ../../source/ref-changelog.md:780 -msgid "" -"**Remove deprecated no-op extra installs** " -"([#973](https://github.com/adap/flower/pull/973))" -msgstr "**移除过时的不操作额外安装** ([#973](https://github.com/adap/flower/pull/973))" +#: ../../source/ref-changelog.md:43 ../../source/ref-changelog.md:147 +#: ../../source/ref-changelog.md:245 ../../source/ref-changelog.md:345 +#: ../../source/ref-changelog.md:409 ../../source/ref-changelog.md:502 +#: ../../source/ref-changelog.md:602 ../../source/ref-changelog.md:686 +#: ../../source/ref-changelog.md:750 ../../source/ref-changelog.md:808 +#: ../../source/ref-changelog.md:877 ../../source/ref-changelog.md:1006 +#: ../../source/ref-changelog.md:1048 ../../source/ref-changelog.md:1115 +#: ../../source/ref-changelog.md:1181 ../../source/ref-changelog.md:1226 +#: ../../source/ref-changelog.md:1265 ../../source/ref-changelog.md:1298 +#: ../../source/ref-changelog.md:1348 +msgid "What's new?" +msgstr "有什么新内容?" -#: ../../source/ref-changelog.md:781 +#: ../../source/ref-changelog.md:45 msgid "" -"**Remove deprecated proto fields from** `FitRes` **and** `EvaluateRes` " -"([#869](https://github.com/adap/flower/pull/869))" +"**Deliver Flower App Bundle (FAB) to SuperLink and SuperNodes** " +"([#4006](https://github.com/adap/flower/pull/4006), " +"[#3945](https://github.com/adap/flower/pull/3945), " +"[#3999](https://github.com/adap/flower/pull/3999), " +"[#4027](https://github.com/adap/flower/pull/4027), " +"[#3851](https://github.com/adap/flower/pull/3851), " +"[#3946](https://github.com/adap/flower/pull/3946), " +"[#4003](https://github.com/adap/flower/pull/4003), " +"[#4029](https://github.com/adap/flower/pull/4029), " +"[#3942](https://github.com/adap/flower/pull/3942), " +"[#3957](https://github.com/adap/flower/pull/3957), " +"[#4020](https://github.com/adap/flower/pull/4020), " +"[#4044](https://github.com/adap/flower/pull/4044), " +"[#3852](https://github.com/adap/flower/pull/3852), " +"[#4019](https://github.com/adap/flower/pull/4019), " +"[#4031](https://github.com/adap/flower/pull/4031), " +"[#4036](https://github.com/adap/flower/pull/4036), " +"[#4049](https://github.com/adap/flower/pull/4049), " +"[#4017](https://github.com/adap/flower/pull/4017), " +"[#3943](https://github.com/adap/flower/pull/3943), " +"[#3944](https://github.com/adap/flower/pull/3944), " +"[#4011](https://github.com/adap/flower/pull/4011), " +"[#3619](https://github.com/adap/flower/pull/3619))" msgstr "" -"**从** `FitRes` **和** `EvaluateRes` 中移除已废弃的 proto 字段 " -"([#869](https://github.com/adap/flower/pull/869))" -#: ../../source/ref-changelog.md:782 +#: ../../source/ref-changelog.md:47 msgid "" -"**Remove deprecated QffedAvg strategy (replaced by QFedAvg)** " -"([#1107](https://github.com/adap/flower/pull/1107))" +"Dynamic code updates are here! `flwr run` can now ship and install the " +"latest version of your `ServerApp` and `ClientApp` to an already-running " +"federation (SuperLink and SuperNodes)." msgstr "" -"**移除过时的 QffedAvg 策略(由 QFedAvg 取代)** " -"([#1107](https://github.com/adap/flower/pull/1107))" -#: ../../source/ref-changelog.md:783 +#: ../../source/ref-changelog.md:49 msgid "" -"**Remove deprecated DefaultStrategy strategy** " -"([#1142](https://github.com/adap/flower/pull/1142))" +"How does it work? `flwr run` bundles your Flower app into a single FAB " +"(Flower App Bundle) file. It then ships this FAB file, via the SuperExec," +" to both the SuperLink and those SuperNodes that need it. This allows you" +" to keep SuperExec, SuperLink and SuperNodes running as permanent " +"infrastructure, and then ship code updates (including completely new " +"projects!) dynamically." msgstr "" -"**删除过时的 DefaultStrategy 策略** " -"([#1142](https://github.com/adap/flower/pull/1142))" -#: ../../source/ref-changelog.md:784 +#: ../../source/ref-changelog.md:51 +msgid "`flwr run` is all you need." +msgstr "" + +#: ../../source/ref-changelog.md:53 +#, fuzzy msgid "" -"**Remove deprecated support for eval_fn accuracy return value** " -"([#1142](https://github.com/adap/flower/pull/1142))" +"**Introduce isolated** `ClientApp` **execution** " +"([#3970](https://github.com/adap/flower/pull/3970), " +"[#3976](https://github.com/adap/flower/pull/3976), " +"[#4002](https://github.com/adap/flower/pull/4002), " +"[#4001](https://github.com/adap/flower/pull/4001), " +"[#4034](https://github.com/adap/flower/pull/4034), " +"[#4037](https://github.com/adap/flower/pull/4037), " +"[#3977](https://github.com/adap/flower/pull/3977), " +"[#4042](https://github.com/adap/flower/pull/4042), " +"[#3978](https://github.com/adap/flower/pull/3978), " +"[#4039](https://github.com/adap/flower/pull/4039), " +"[#4033](https://github.com/adap/flower/pull/4033), " +"[#3971](https://github.com/adap/flower/pull/3971), " +"[#4035](https://github.com/adap/flower/pull/4035), " +"[#3973](https://github.com/adap/flower/pull/3973), " +"[#4032](https://github.com/adap/flower/pull/4032))" msgstr "" -"**删除已过时的对 eval_fn 返回值准确性的支持** " -"([#1142](https://github.com/adap/flower/pull/1142))" +"**普通改进** ([#1491](https://github.com/adap/flower/pull/1491), " +"[#1504](https://github.com/adap/flower/pull/1504), " +"[#1506](https://github.com/adap/flower/pull/1506), " +"[#1514](https://github.com/adap/flower/pull/1514), " +"[#1522](https://github.com/adap/flower/pull/1522), " +"[#1523](https://github.com/adap/flower/pull/1523), " +"[#1526](https://github. com/adap/flower/pull/1526), " +"[#1528](https://github.com/adap/flower/pull/1528), " +"[#1547](https://github.com/adap/flower/pull/1547), " +"[#1549](https://github.com/adap/flower/pull/1549), " +"[#1560](https://github.com/adap/flower/pull/1560), " +"[#1564](https://github.com/adap/flower/pull/1564), " +"[#1566](https://github.com/adap/flower/pull/1566))" -#: ../../source/ref-changelog.md:785 +#: ../../source/ref-changelog.md:55 msgid "" -"**Remove deprecated support for passing initial parameters as NumPy " -"ndarrays** ([#1142](https://github.com/adap/flower/pull/1142))" +"The SuperNode can now run your `ClientApp` in a fully isolated way. In an" +" enterprise deployment, this allows you to set strict limits on what the " +"`ClientApp` can and cannot do." msgstr "" -"**移除对以 NumPy ndarrays 传递初始参数的过时支持** " -"([#1142](https://github.com/adap/flower/pull/1142))" -#: ../../source/ref-changelog.md:787 -msgid "v0.18.0 (2022-02-28)" -msgstr "v0.18.0 (2022-02-28)" +#: ../../source/ref-changelog.md:57 +msgid "`flower-supernode` supports three `--isolation` modes:" +msgstr "" -#: ../../source/ref-changelog.md:791 +#: ../../source/ref-changelog.md:59 msgid "" -"**Improved Virtual Client Engine compatibility with Jupyter Notebook / " -"Google Colab** ([#866](https://github.com/adap/flower/pull/866), " -"[#872](https://github.com/adap/flower/pull/872), " -"[#833](https://github.com/adap/flower/pull/833), " -"[#1036](https://github.com/adap/flower/pull/1036))" +"Unset: The SuperNode runs the `ClientApp` in the same process (as in " +"previous versions of Flower). This is the default mode." msgstr "" -"**改进了虚拟客户端引擎与 Jupyter Notebook / Google Colab 的兼容性** " -"([#866](https://github.com/adap/flower/pull/866), " -"[#872](https://github.com/adap/flower/pull/872), " -"[#833](https://github.com/adap/flower/pull/833), " -"[#1036](https://github.com/adap/flower/pull/1036))" -#: ../../source/ref-changelog.md:793 +#: ../../source/ref-changelog.md:60 msgid "" -"Simulations (using the Virtual Client Engine through `start_simulation`) " -"now work more smoothly on Jupyter Notebooks (incl. Google Colab) after " -"installing Flower with the `simulation` extra (`pip install " -"flwr[simulation]`)." +"`--isolation=subprocess`: The SuperNode starts a subprocess to run the " +"`ClientApp`." msgstr "" -"通过 `start_simulation` 在 Jupyter 笔记本(包括 Google Colab)上安装 Flower 并附加 " -"`simulation` (`pip install flwr[simulation]`)后,模拟(通过 `start_simulation` " -"使用虚拟客户端引擎)现在可以更流畅地运行。" -#: ../../source/ref-changelog.md:795 +#: ../../source/ref-changelog.md:61 msgid "" -"**New Jupyter Notebook code example** " -"([#833](https://github.com/adap/flower/pull/833))" +"`--isolation=process`: The SuperNode expects an externally-managed " +"process to run the `ClientApp`. This external process is not managed by " +"the SuperNode, so it has to be started beforehand and terminated " +"manually. The common way to use this isolation mode is via the new " +"`flwr/clientapp` Docker image." msgstr "" -"**新的 Jupyter Notebook 代码示例** " -"([#833](https://github.com/adap/flower/pull/833))" -#: ../../source/ref-changelog.md:797 +#: ../../source/ref-changelog.md:63 +#, fuzzy msgid "" -"A new code example (`quickstart_simulation`) demonstrates Flower " -"simulations using the Virtual Client Engine through Jupyter Notebook " -"(incl. Google Colab)." +"**Improve Docker support for enterprise deployments** " +"([#4050](https://github.com/adap/flower/pull/4050), " +"[#4090](https://github.com/adap/flower/pull/4090), " +"[#3784](https://github.com/adap/flower/pull/3784), " +"[#3998](https://github.com/adap/flower/pull/3998), " +"[#4094](https://github.com/adap/flower/pull/4094), " +"[#3722](https://github.com/adap/flower/pull/3722))" msgstr "" -"新代码示例(`quickstart_simulation`)通过 Jupyter Notebook(包括 Google " -"Colab)演示了使用虚拟客户端引擎进行 Flower 模拟。" +"**移除对 Python 3.7 的支持** " +"([#2280](https://github.com/adap/flower/pull/2280), " +"[#2299](https://github.com/adap/flower/pull/2299), " +"[#2304](https://github.com/adap/flower/pull/2304), " +"[#2306](https://github.com/adap/flower/pull/2306), " +"[#2355](https://github.com/adap/flower/pull/2355), " +"[#2356](https://github.com/adap/flower/pull/2356))" -#: ../../source/ref-changelog.md:799 +#: ../../source/ref-changelog.md:65 msgid "" -"**Client properties (feature preview)** " -"([#795](https://github.com/adap/flower/pull/795))" -msgstr "**客户端属性(功能预览)** ([#795](https://github.com/adap/flower/pull/795))" +"Flower 1.11 ships many Docker improvements that are especially useful for" +" enterprise deployments:" +msgstr "" -#: ../../source/ref-changelog.md:801 -msgid "" -"Clients can implement a new method `get_properties` to enable server-side" -" strategies to query client properties." -msgstr "客户端可以实现一个新方法 `get_properties`,以启用服务器端策略来查询客户端属性。" +#: ../../source/ref-changelog.md:67 +msgid "`flwr/supernode` comes with a new Alpine Docker image." +msgstr "" -#: ../../source/ref-changelog.md:803 +#: ../../source/ref-changelog.md:68 msgid "" -"**Experimental Android support with TFLite** " -"([#865](https://github.com/adap/flower/pull/865))" -msgstr "** 使用 TFLite 实验性支持安卓系统** ([#865](https://github.com/adap/flower/pull/865))" +"`flwr/clientapp` is a new image to be used with the `--isolation=process`" +" option. In this mode, SuperNode and `ClientApp` run in two different " +"Docker containers. `flwr/supernode` (preferably the Alpine version) runs " +"the long-running SuperNode with `--isolation=process`. `flwr/clientapp` " +"runs the `ClientApp`. This is the recommended way to deploy Flower in " +"enterprise settings." +msgstr "" -#: ../../source/ref-changelog.md:805 +#: ../../source/ref-changelog.md:69 msgid "" -"Android support has finally arrived in `main`! Flower is both client-" -"agnostic and framework-agnostic by design. One can integrate arbitrary " -"client platforms and with this release, using Flower on Android has " -"become a lot easier." +"New all-in-one Docker Compose enables you to easily start a full Flower " +"Deployment Engine on a single machine." msgstr "" -"`main`终于支持 Android 了!Flower 的设计与客户端和框架无关。我们可以集成任意客户端平台,有了这个版本,在安卓系统上使用 " -"Flower 就变得更容易了。" -#: ../../source/ref-changelog.md:807 +#: ../../source/ref-changelog.md:70 msgid "" -"The example uses TFLite on the client side, along with a new " -"`FedAvgAndroid` strategy. The Android client and `FedAvgAndroid` are " -"still experimental, but they are a first step towards a fully-fledged " -"Android SDK and a unified `FedAvg` implementation that integrated the new" -" functionality from `FedAvgAndroid`." +"Completely new Docker documentation: " +"https://flower.ai/docs/framework/docker/index.html" msgstr "" -"该示例在客户端使用了 TFLite 以及新的 `FedAvgAndroid`策略。Android 客户端和 " -"`FedAvgAndroid`仍处于试验阶段,但这是向成熟的 Android SDK 和集成了 `FedAvgAndroid`新功能的统一 " -"`FedAvg`实现迈出的第一步。" -#: ../../source/ref-changelog.md:809 +#: ../../source/ref-changelog.md:72 +#, fuzzy msgid "" -"**Make gRPC keepalive time user-configurable and decrease default " -"keepalive time** ([#1069](https://github.com/adap/flower/pull/1069))" +"**Improve SuperNode authentication** " +"([#4043](https://github.com/adap/flower/pull/4043), " +"[#4047](https://github.com/adap/flower/pull/4047), " +"[#4074](https://github.com/adap/flower/pull/4074))" msgstr "" -"**使 gRPC 保持连接时间可由用户配置,并缩短默认保持连接时间** " -"([#1069](https://github.com/adap/flower/pull/1069))" +"** 统一客户端应用程序接口** ([#2303](https://github.com/adap/flower/pull/2303), " +"[#2390](https://github.com/adap/flower/pull/2390), " +"[#2493](https://github.com/adap/flower/pull/2493))" -#: ../../source/ref-changelog.md:811 +#: ../../source/ref-changelog.md:74 msgid "" -"The default gRPC keepalive time has been reduced to increase the " -"compatibility of Flower with more cloud environments (for example, " -"Microsoft Azure). Users can configure the keepalive time to customize the" -" gRPC stack based on specific requirements." +"SuperNode auth has been improved in several ways, including improved " +"logging, improved testing, and improved error handling." msgstr "" -"为提高 Flower 与更多云环境(如 Microsoft Azure)的兼容性,缩短了默认 gRPC 保持时间。用户可以根据具体要求配置 " -"keepalive 时间,自定义 gRPC 堆栈。" -#: ../../source/ref-changelog.md:813 +#: ../../source/ref-changelog.md:76 +#, fuzzy msgid "" -"**New differential privacy example using Opacus and PyTorch** " -"([#805](https://github.com/adap/flower/pull/805))" +"**Update** `flwr new` **templates** " +"([#3933](https://github.com/adap/flower/pull/3933), " +"[#3894](https://github.com/adap/flower/pull/3894), " +"[#3930](https://github.com/adap/flower/pull/3930), " +"[#3931](https://github.com/adap/flower/pull/3931), " +"[#3997](https://github.com/adap/flower/pull/3997), " +"[#3979](https://github.com/adap/flower/pull/3979), " +"[#3965](https://github.com/adap/flower/pull/3965), " +"[#4013](https://github.com/adap/flower/pull/4013), " +"[#4064](https://github.com/adap/flower/pull/4064))" msgstr "" -"**使用 Opacus 和 PyTorch 的新差分隐私示例** " -"([#805](https://github.com/adap/flower/pull/805))" +"**普通改进**([#1872](https://github.com/adap/flower/pull/1872), " +"[#1866](https://github.com/adap/flower/pull/1866), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1837](https://github.com/adap/flower/pull/1837), " +"[#1477](https://github.com/adap/flower/pull/1477), " +"[#2171](https://github.com/adap/flower/pull/2171))" -#: ../../source/ref-changelog.md:815 +#: ../../source/ref-changelog.md:78 msgid "" -"A new code example (`opacus`) demonstrates differentially-private " -"federated learning with Opacus, PyTorch, and Flower." -msgstr "一个新的代码示例(\"opacus\")演示了使用 Opacus、PyTorch 和 Flower 进行差分隐私的联邦学习。" +"All `flwr new` templates have been updated to show the latest recommended" +" use of Flower APIs." +msgstr "" -#: ../../source/ref-changelog.md:817 +#: ../../source/ref-changelog.md:80 +#, fuzzy msgid "" -"**New Hugging Face Transformers code example** " -"([#863](https://github.com/adap/flower/pull/863))" +"**Improve Simulation Engine** " +"([#4095](https://github.com/adap/flower/pull/4095), " +"[#3913](https://github.com/adap/flower/pull/3913), " +"[#4059](https://github.com/adap/flower/pull/4059), " +"[#3954](https://github.com/adap/flower/pull/3954), " +"[#4071](https://github.com/adap/flower/pull/4071), " +"[#3985](https://github.com/adap/flower/pull/3985), " +"[#3988](https://github.com/adap/flower/pull/3988))" msgstr "" -"**新的Hugging Face Transformers代码示例** " -"([#863](https://github.com/adap/flower/pull/863))" +"**移除对 Python 3.7 的支持** " +"([#2280](https://github.com/adap/flower/pull/2280), " +"[#2299](https://github.com/adap/flower/pull/2299), " +"[#2304](https://github.com/adap/flower/pull/2304), " +"[#2306](https://github.com/adap/flower/pull/2306), " +"[#2355](https://github.com/adap/flower/pull/2355), " +"[#2356](https://github.com/adap/flower/pull/2356))" -#: ../../source/ref-changelog.md:819 +#: ../../source/ref-changelog.md:82 msgid "" -"A new code example (`quickstart_huggingface`) demonstrates usage of " -"Hugging Face Transformers with Flower." -msgstr "新的代码示例(`quickstart_huggingface`)证明了结合Flower和Hugging Face Transformers的实用性。" +"The Flower Simulation Engine comes with several updates, including " +"improved run config support, verbose logging, simulation backend " +"configuration via `flwr run`, and more." +msgstr "" -#: ../../source/ref-changelog.md:821 +#: ../../source/ref-changelog.md:84 +#, fuzzy msgid "" -"**New MLCube code example** " -"([#779](https://github.com/adap/flower/pull/779), " -"[#1034](https://github.com/adap/flower/pull/1034), " -"[#1065](https://github.com/adap/flower/pull/1065), " -"[#1090](https://github.com/adap/flower/pull/1090))" +"**Improve** `RecordSet` " +"([#4052](https://github.com/adap/flower/pull/4052), " +"[#3218](https://github.com/adap/flower/pull/3218), " +"[#4016](https://github.com/adap/flower/pull/4016))" msgstr "" -"**新的 MLCube 代码示例** ([#779](https://github.com/adap/flower/pull/779), " -"[#1034](https://github.com/adap/flower/pull/1034), " -"[#1065](https://github.com/adap/flower/pull/1065), " -"[#1090](https://github.com/adap/flower/pull/1090))" +"** 统一客户端应用程序接口** ([#2303](https://github.com/adap/flower/pull/2303), " +"[#2390](https://github.com/adap/flower/pull/2390), " +"[#2493](https://github.com/adap/flower/pull/2493))" -#: ../../source/ref-changelog.md:823 +#: ../../source/ref-changelog.md:86 msgid "" -"A new code example (`quickstart_mlcube`) demonstrates usage of MLCube " -"with Flower." -msgstr "新代码示例(\"quickstart_mlcube\")演示了 MLCube 与 Flower 的用法。" +"`RecordSet` is the core object to exchange model parameters, " +"configuration values and metrics between `ClientApp` and `ServerApp`. " +"This release ships several smaller improvements to `RecordSet` and " +"related `*Record` types." +msgstr "" -#: ../../source/ref-changelog.md:825 +#: ../../source/ref-changelog.md:88 +#, fuzzy msgid "" -"**SSL-enabled server and client** " -"([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" +"**Update documentation** " +"([#3972](https://github.com/adap/flower/pull/3972), " +"[#3925](https://github.com/adap/flower/pull/3925), " +"[#4061](https://github.com/adap/flower/pull/4061), " +"[#3984](https://github.com/adap/flower/pull/3984), " +"[#3917](https://github.com/adap/flower/pull/3917), " +"[#3900](https://github.com/adap/flower/pull/3900), " +"[#4066](https://github.com/adap/flower/pull/4066), " +"[#3765](https://github.com/adap/flower/pull/3765), " +"[#4021](https://github.com/adap/flower/pull/4021), " +"[#3906](https://github.com/adap/flower/pull/3906), " +"[#4063](https://github.com/adap/flower/pull/4063), " +"[#4076](https://github.com/adap/flower/pull/4076), " +"[#3920](https://github.com/adap/flower/pull/3920), " +"[#3916](https://github.com/adap/flower/pull/3916))" msgstr "" -"** 支持 SSL 的服务器和客户端** ([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" +"**更新Example** ([#1772](https://github.com/adap/flower/pull/1772), " +"[#1873](https://github.com/adap/flower/pull/1873), " +"[#1981](https://github.com/adap/flower/pull/1981), " +"[#1988](https://github.com/adap/flower/pull/1988), " +"[#1984](https://github.com/adap/flower/pull/1984), " +"[#1982](https://github.com/adap/flower/pull/1982), " +"[#2112](https://github.com/adap/flower/pull/2112), " +"[#2144](https://github.com/adap/flower/pull/2144), " +"[#2174](https://github.com/adap/flower/pull/2174), " +"[#2225](https://github.com/adap/flower/pull/2225), " +"[#2183](https://github.com/adap/flower/pull/2183))" -#: ../../source/ref-changelog.md:827 +#: ../../source/ref-changelog.md:90 +msgid "" +"Many parts of the documentation, including the main tutorial, have been " +"migrated to show new Flower APIs and other new Flower features like the " +"improved Docker support." +msgstr "" + +#: ../../source/ref-changelog.md:92 +msgid "" +"**Migrate code example to use new Flower APIs** " +"([#3758](https://github.com/adap/flower/pull/3758), " +"[#3701](https://github.com/adap/flower/pull/3701), " +"[#3919](https://github.com/adap/flower/pull/3919), " +"[#3918](https://github.com/adap/flower/pull/3918), " +"[#3934](https://github.com/adap/flower/pull/3934), " +"[#3893](https://github.com/adap/flower/pull/3893), " +"[#3833](https://github.com/adap/flower/pull/3833), " +"[#3922](https://github.com/adap/flower/pull/3922), " +"[#3846](https://github.com/adap/flower/pull/3846), " +"[#3777](https://github.com/adap/flower/pull/3777), " +"[#3874](https://github.com/adap/flower/pull/3874), " +"[#3873](https://github.com/adap/flower/pull/3873), " +"[#3935](https://github.com/adap/flower/pull/3935), " +"[#3754](https://github.com/adap/flower/pull/3754), " +"[#3980](https://github.com/adap/flower/pull/3980), " +"[#4089](https://github.com/adap/flower/pull/4089), " +"[#4046](https://github.com/adap/flower/pull/4046), " +"[#3314](https://github.com/adap/flower/pull/3314), " +"[#3316](https://github.com/adap/flower/pull/3316), " +"[#3295](https://github.com/adap/flower/pull/3295), " +"[#3313](https://github.com/adap/flower/pull/3313))" +msgstr "" + +#: ../../source/ref-changelog.md:94 +msgid "Many code examples have been migrated to use new Flower APIs." +msgstr "" + +#: ../../source/ref-changelog.md:96 +msgid "" +"**Update Flower framework, framework internals and quality " +"infrastructure** ([#4018](https://github.com/adap/flower/pull/4018), " +"[#4053](https://github.com/adap/flower/pull/4053), " +"[#4098](https://github.com/adap/flower/pull/4098), " +"[#4067](https://github.com/adap/flower/pull/4067), " +"[#4105](https://github.com/adap/flower/pull/4105), " +"[#4048](https://github.com/adap/flower/pull/4048), " +"[#4107](https://github.com/adap/flower/pull/4107), " +"[#4069](https://github.com/adap/flower/pull/4069), " +"[#3915](https://github.com/adap/flower/pull/3915), " +"[#4101](https://github.com/adap/flower/pull/4101), " +"[#4108](https://github.com/adap/flower/pull/4108), " +"[#3914](https://github.com/adap/flower/pull/3914), " +"[#4068](https://github.com/adap/flower/pull/4068), " +"[#4041](https://github.com/adap/flower/pull/4041), " +"[#4040](https://github.com/adap/flower/pull/4040), " +"[#3986](https://github.com/adap/flower/pull/3986), " +"[#4026](https://github.com/adap/flower/pull/4026), " +"[#3961](https://github.com/adap/flower/pull/3961), " +"[#3975](https://github.com/adap/flower/pull/3975), " +"[#3983](https://github.com/adap/flower/pull/3983), " +"[#4091](https://github.com/adap/flower/pull/4091), " +"[#3982](https://github.com/adap/flower/pull/3982), " +"[#4079](https://github.com/adap/flower/pull/4079), " +"[#4073](https://github.com/adap/flower/pull/4073), " +"[#4060](https://github.com/adap/flower/pull/4060), " +"[#4106](https://github.com/adap/flower/pull/4106), " +"[#4080](https://github.com/adap/flower/pull/4080), " +"[#3974](https://github.com/adap/flower/pull/3974), " +"[#3996](https://github.com/adap/flower/pull/3996), " +"[#3991](https://github.com/adap/flower/pull/3991), " +"[#3981](https://github.com/adap/flower/pull/3981), " +"[#4093](https://github.com/adap/flower/pull/4093), " +"[#4100](https://github.com/adap/flower/pull/4100), " +"[#3939](https://github.com/adap/flower/pull/3939), " +"[#3955](https://github.com/adap/flower/pull/3955), " +"[#3940](https://github.com/adap/flower/pull/3940), " +"[#4038](https://github.com/adap/flower/pull/4038))" +msgstr "" + +#: ../../source/ref-changelog.md:98 ../../source/ref-changelog.md:205 +msgid "" +"As always, many parts of the Flower framework and quality infrastructure " +"were improved and updated." +msgstr "" + +#: ../../source/ref-changelog.md:100 ../../source/ref-changelog.md:217 +#: ../../source/ref-changelog.md:309 ../../source/ref-changelog.md:1292 +msgid "Deprecations" +msgstr "停用" + +#: ../../source/ref-changelog.md:102 +#, fuzzy msgid "" -"SSL enables secure encrypted connections between clients and servers. " -"This release open-sources the Flower secure gRPC implementation to make " -"encrypted communication channels accessible to all Flower users." -msgstr "SSL 可实现客户端与服务器之间的安全加密连接。该版本开源了 Flower 安全 gRPC 实现,使所有 Flower 用户都能访问加密通信通道。" +"**Deprecate accessing `Context` via `Client.context`** " +"([#3797](https://github.com/adap/flower/pull/3797))" +msgstr "**移除过时的不操作额外安装** ([#973](https://github.com/adap/flower/pull/973))" -#: ../../source/ref-changelog.md:829 +#: ../../source/ref-changelog.md:104 msgid "" -"**Updated** `FedAdam` **and** `FedYogi` **strategies** " -"([#885](https://github.com/adap/flower/pull/885), " -"[#895](https://github.com/adap/flower/pull/895))" +"Now that both `client_fn` and `server_fn` receive a `Context` object, " +"accessing `Context` via `Client.context` is deprecated. `Client.context` " +"will be removed in a future release. If you need to access `Context` in " +"your `Client` implementation, pass it manually when creating the `Client`" +" instance in `client_fn`:" msgstr "" -"**更新**`FedAdam`**和**`FedYogi`**战略** " -"([#885](https://github.com/adap/flower/pull/885), " -"[#895](https://github.com/adap/flower/pull/895))" -#: ../../source/ref-changelog.md:831 +#: ../../source/ref-changelog.md:113 +#, fuzzy msgid "" -"`FedAdam` and `FedAdam` match the latest version of the Adaptive " -"Federated Optimization paper." -msgstr "FedAdam \"和 \"FedAdam \"与最新版本的 \"自适应联邦优化 \"论文相匹配。" +"**Update CLIs to accept an app directory instead of** `ClientApp` **and**" +" `ServerApp` ([#3952](https://github.com/adap/flower/pull/3952), " +"[#4077](https://github.com/adap/flower/pull/4077), " +"[#3850](https://github.com/adap/flower/pull/3850))" +msgstr "" +"**引入新的模拟引擎** ([#1969](https://github.com/adap/flower/pull/1969), " +"[#2221](https://github.com/adap/flower/pull/2221), " +"[#2248](https://github.com/adap/flower/pull/2248))" -#: ../../source/ref-changelog.md:833 +#: ../../source/ref-changelog.md:115 msgid "" -"**Initialize** `start_simulation` **with a list of client IDs** " -"([#860](https://github.com/adap/flower/pull/860))" +"The CLI commands `flower-supernode` and `flower-server-app` now accept an" +" app directory as argument (instead of references to a `ClientApp` or " +"`ServerApp`). An app directory is any directory containing a " +"`pyproject.toml` file (with the appropriate Flower config fields set). " +"The easiest way to generate a compatible project structure is to use " +"`flwr new`." msgstr "" -"**初始化** `start_simulation` **使用客户端 ID 列表** " -"([#860](https://github.com/adap/flower/pull/860))" -#: ../../source/ref-changelog.md:835 +#: ../../source/ref-changelog.md:117 +#, fuzzy msgid "" -"`start_simulation` can now be called with a list of client IDs " -"(`clients_ids`, type: `List[str]`). Those IDs will be passed to the " -"`client_fn` whenever a client needs to be initialized, which can make it " -"easier to load data partitions that are not accessible through `int` " -"identifiers." +"**Disable** `flower-client-app` **CLI command** " +"([#4022](https://github.com/adap/flower/pull/4022))" msgstr "" -"现在可以使用客户端 ID 列表(`clients_ids`,类型:`List[str]`)调用 " -"`start_simulation`。每当需要初始化客户端时,这些 ID 就会被传递到 `client_fn` 中,这样就能更轻松地加载无法通过 " -"`int` 标识符访问的数据分区。" +"**介绍Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" -#: ../../source/ref-changelog.md:839 -msgid "" -"Update `num_examples` calculation in PyTorch code examples in " -"([#909](https://github.com/adap/flower/pull/909))" +#: ../../source/ref-changelog.md:119 +msgid "`flower-client-app` has been disabled. Use `flower-supernode` instead." msgstr "" -"更新 PyTorch 代码示例中的 \"num_examples \"计算 " -"([#909](https://github.com/adap/flower/pull/909))" -#: ../../source/ref-changelog.md:840 +#: ../../source/ref-changelog.md:121 +#, fuzzy msgid "" -"Expose Flower version through `flwr.__version__` " -"([#952](https://github.com/adap/flower/pull/952))" -msgstr "" -"通过 `flwr.__version__` 公开 Flower 版本 " -"([#952](https://github.com/adap/flower/pull/952))" +"**Use spaces instead of commas for separating config args** " +"([#4000](https://github.com/adap/flower/pull/4000))" +msgstr "**服务器和策略的自定义指标** ([#717](https://github.com/adap/flower/pull/717))" -#: ../../source/ref-changelog.md:841 +#: ../../source/ref-changelog.md:123 msgid "" -"`start_server` in `app.py` now returns a `History` object containing " -"metrics from training ([#974](https://github.com/adap/flower/pull/974))" +"When passing configs (run config, node config) to Flower, you now need to" +" separate key-value pairs using spaces instead of commas. For example:" msgstr "" -"`app.py`中的 `start_server`现在会返回一个 `History` " -"对象,其中包含训练中的指标([#974](https://github.com/adap/flower/pull/974))" -#: ../../source/ref-changelog.md:842 -msgid "" -"Make `max_workers` (used by `ThreadPoolExecutor`) configurable " -"([#978](https://github.com/adap/flower/pull/978))" +#: ../../source/ref-changelog.md:129 +msgid "Previously, you could pass configs using commas, like this:" msgstr "" -"使 `max_workers`(由 " -"`ThreadPoolExecutor`使用)可配置([#978](https://github.com/adap/flower/pull/978))" -#: ../../source/ref-changelog.md:843 +#: ../../source/ref-changelog.md:135 +#, fuzzy msgid "" -"Increase sleep time after server start to three seconds in all code " -"examples ([#1086](https://github.com/adap/flower/pull/1086))" -msgstr "在所有代码示例中,将服务器启动后的休眠时间延长至三秒([#1086](https://github.com/adap/flower/pull/1086))" +"**Remove** `flwr example` **CLI command** " +"([#4084](https://github.com/adap/flower/pull/4084))" +msgstr "**移除过时的 KerasClient**([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/ref-changelog.md:844 +#: ../../source/ref-changelog.md:137 msgid "" -"Added a new FAQ section to the documentation " -"([#948](https://github.com/adap/flower/pull/948))" -msgstr "在文档中添加了新的常见问题部分 ([#948](https://github.com/adap/flower/pull/948))" +"The experimental `flwr example` CLI command has been removed. Use `flwr " +"new` to generate a project and then run it using `flwr run`." +msgstr "" -#: ../../source/ref-changelog.md:845 -msgid "" -"And many more under-the-hood changes, library updates, documentation " -"changes, and tooling improvements!" -msgstr "还有更多底层更改、库更新、文档更改和工具改进!" +#: ../../source/ref-changelog.md:139 +#, fuzzy +msgid "v1.10.0 (2024-07-24)" +msgstr "v1.0.0 (2022-07-28)" -#: ../../source/ref-changelog.md:849 +#: ../../source/ref-changelog.md:145 +#, fuzzy msgid "" -"**Removed** `flwr_example` **and** `flwr_experimental` **from release " -"build** ([#869](https://github.com/adap/flower/pull/869))" +"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " +"Beutel`, `Daniel Nata Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, " +"`Ikko Eltociear Ashimine`, `Javier`, `Jiahao Tan`, `Mohammad Naseri`, " +"`Robert Steiner`, `Sebastian van der Voort`, `Taner Topal`, `Yan Gao` " msgstr "" -"**从发布版中删除**`flwr_example`**和**`flwr_experimental`** " -"([#869](https://github.com/adap/flower/pull/869))" +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " -#: ../../source/ref-changelog.md:851 +#: ../../source/ref-changelog.md:149 +#, fuzzy msgid "" -"The packages `flwr_example` and `flwr_experimental` have been deprecated " -"since Flower 0.12.0 and they are not longer included in Flower release " -"builds. The associated extras (`baseline`, `examples-pytorch`, `examples-" -"tensorflow`, `http-logger`, `ops`) are now no-op and will be removed in " -"an upcoming release." +"**Introduce** `flwr run` **(beta)** " +"([#3810](https://github.com/adap/flower/pull/3810), " +"[#3826](https://github.com/adap/flower/pull/3826), " +"[#3880](https://github.com/adap/flower/pull/3880), " +"[#3807](https://github.com/adap/flower/pull/3807), " +"[#3800](https://github.com/adap/flower/pull/3800), " +"[#3814](https://github.com/adap/flower/pull/3814), " +"[#3811](https://github.com/adap/flower/pull/3811), " +"[#3809](https://github.com/adap/flower/pull/3809), " +"[#3819](https://github.com/adap/flower/pull/3819))" msgstr "" -"自 Flower 0.12.0 起,软件包 `flwr_example` 和 `flwr_experimental` 已被弃用,它们不再包含在 " -"Flower 的发布版本中。相关的额外包(`baseline`, `examples-pytorch`, `examples-" -"tensorflow`, `http-logger`, `ops`)现在已不再使用,并将在即将发布的版本中移除。" +"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " +"[#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475)))" -#: ../../source/ref-changelog.md:853 -msgid "v0.17.0 (2021-09-24)" -msgstr "v0.17.0 (2021-09-24)" +#: ../../source/ref-changelog.md:151 +msgid "" +"Flower 1.10 ships the first beta release of the new `flwr run` command. " +"`flwr run` can run different projects using `flwr run path/to/project`, " +"it enables you to easily switch between different federations using `flwr" +" run . federation` and it runs your Flower project using either local " +"simulation or the new (experimental) SuperExec service. This allows " +"Flower to scale federatated learning from fast local simulation to large-" +"scale production deployment, seamlessly. All projects generated with " +"`flwr new` are immediately runnable using `flwr run`. Give it a try: use " +"`flwr new` to generate a project and then run it using `flwr run`." +msgstr "" + +#: ../../source/ref-changelog.md:153 +#, fuzzy +msgid "" +"**Introduce run config** " +"([#3751](https://github.com/adap/flower/pull/3751), " +"[#3750](https://github.com/adap/flower/pull/3750), " +"[#3845](https://github.com/adap/flower/pull/3845), " +"[#3824](https://github.com/adap/flower/pull/3824), " +"[#3746](https://github.com/adap/flower/pull/3746), " +"[#3728](https://github.com/adap/flower/pull/3728), " +"[#3730](https://github.com/adap/flower/pull/3730), " +"[#3725](https://github.com/adap/flower/pull/3725), " +"[#3729](https://github.com/adap/flower/pull/3729), " +"[#3580](https://github.com/adap/flower/pull/3580), " +"[#3578](https://github.com/adap/flower/pull/3578), " +"[#3576](https://github.com/adap/flower/pull/3576), " +"[#3798](https://github.com/adap/flower/pull/3798), " +"[#3732](https://github.com/adap/flower/pull/3732), " +"[#3815](https://github.com/adap/flower/pull/3815))" +msgstr "" +"**引入(试验性)REST API** ([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" -#: ../../source/ref-changelog.md:857 +#: ../../source/ref-changelog.md:155 msgid "" -"**Experimental virtual client engine** " -"([#781](https://github.com/adap/flower/pull/781) " -"[#790](https://github.com/adap/flower/pull/790) " -"[#791](https://github.com/adap/flower/pull/791))" +"The new run config feature allows you to run your Flower project in " +"different configurations without having to change a single line of code. " +"You can now build a configurable `ServerApp` and `ClientApp` that read " +"configuration values at runtime. This enables you to specify config " +"values like `learning-rate=0.01` in `pyproject.toml` (under the " +"`[tool.flwr.app.config]` key). These config values can then be easily " +"overridden via `flwr run --run-config learning-rate=0.02`, and read from " +"`Context` using `lr = context.run_config[\"learning-rate\"]`. Create a " +"new project using `flwr new` to see run config in action." msgstr "" -"**实验性虚拟客户端引擎** ([#781](https://github.com/adap/flower/pull/781) " -"[#790](https://github.com/adap/flower/pull/790) " -"[#791](https://github.com/adap/flower/pull/791))" -#: ../../source/ref-changelog.md:859 +#: ../../source/ref-changelog.md:157 +#, fuzzy msgid "" -"One of Flower's goals is to enable research at scale. This release " -"enables a first (experimental) peek at a major new feature, codenamed the" -" virtual client engine. Virtual clients enable simulations that scale to " -"a (very) large number of clients on a single machine or compute cluster. " -"The easiest way to test the new functionality is to look at the two new " -"code examples called `quickstart_simulation` and `simulation_pytorch`." +"**Generalize** `client_fn` **signature to** `client_fn(context: Context) " +"-> Client` ([#3779](https://github.com/adap/flower/pull/3779), " +"[#3697](https://github.com/adap/flower/pull/3697), " +"[#3694](https://github.com/adap/flower/pull/3694), " +"[#3696](https://github.com/adap/flower/pull/3696))" msgstr "" -"Flower 的目标之一是实现大规模研究。这一版本首次(试验性地)展示了代号为 \"虚拟客户端引擎 " -"\"的重要新功能。虚拟客户端可以在单台机器或计算集群上对大量客户端进行模拟。测试新功能的最简单方法是查看名为 " -"\"quickstart_simulation \"和 \"simulation_pytorch \"的两个新代码示例。" +"** 更新 C++ SDK** ([#2537](https://github/com/adap/flower/pull/2537), " +"[#2528](https://github/com/adap/flower/pull/2528), " +"[#2523](https://github.com/adap/flower/pull/2523), " +"[#2522](https://github.com/adap/flower/pull/2522))" -#: ../../source/ref-changelog.md:861 +#: ../../source/ref-changelog.md:159 msgid "" -"The feature is still experimental, so there's no stability guarantee for " -"the API. It's also not quite ready for prime time and comes with a few " -"known caveats. However, those who are curious are encouraged to try it " -"out and share their thoughts." +"The `client_fn` signature has been generalized to `client_fn(context: " +"Context) -> Client`. It now receives a `Context` object instead of the " +"(now depreacated) `cid: str`. `Context` allows accessing `node_id`, " +"`node_config` and `run_config`, among other things. This enables you to " +"build a configurable `ClientApp` that leverages the new run config " +"system." msgstr "" -"该功能仍处于试验阶段,因此无法保证 API " -"的稳定性。此外,它还没有完全准备好进入黄金时间,并有一些已知的注意事项。不过,我们鼓励好奇的用户尝试使用并分享他们的想法。" -#: ../../source/ref-changelog.md:863 +#: ../../source/ref-changelog.md:161 msgid "" -"**New built-in strategies** " -"([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822))" +"The previous signature `client_fn(cid: str)` is now deprecated and " +"support for it will be removed in a future release. Use " +"`client_fn(context: Context) -> Client` everywhere." msgstr "" -"**新的内置策略**([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822)" -#: ../../source/ref-changelog.md:865 +#: ../../source/ref-changelog.md:163 +#, fuzzy msgid "" -"FedYogi - Federated learning strategy using Yogi on server-side. " -"Implementation based on https://arxiv.org/abs/2003.00295" -msgstr "FedYogi - 在服务器端使用 Yogi 的联邦学习策略。基于 https://arxiv.org/abs/2003.00295 实现" +"**Introduce new** `server_fn(context)` " +"([#3773](https://github.com/adap/flower/pull/3773), " +"[#3796](https://github.com/adap/flower/pull/3796), " +"[#3771](https://github.com/adap/flower/pull/3771))" +msgstr "" +"**引入可选遥测**([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584)" -#: ../../source/ref-changelog.md:866 +#: ../../source/ref-changelog.md:165 msgid "" -"FedAdam - Federated learning strategy using Adam on server-side. " -"Implementation based on https://arxiv.org/abs/2003.00295" -msgstr "FedAdam - 在服务器端使用 Adam 的联邦学习策略。基于 https://arxiv.org/abs/2003.00295 实现" +"In addition to the new `client_fn(context:Context)`, a new " +"`server_fn(context: Context) -> ServerAppComponents` can now be passed to" +" `ServerApp` (instead of passing, for example, `Strategy`, directly). " +"This enables you to leverage the full `Context` on the server-side to " +"build a configurable `ServerApp`." +msgstr "" -#: ../../source/ref-changelog.md:868 +#: ../../source/ref-changelog.md:167 +#, fuzzy msgid "" -"**New PyTorch Lightning code example** " -"([#617](https://github.com/adap/flower/pull/617))" +"**Relaunch all** `flwr new` **templates** " +"([#3877](https://github.com/adap/flower/pull/3877), " +"[#3821](https://github.com/adap/flower/pull/3821), " +"[#3587](https://github.com/adap/flower/pull/3587), " +"[#3795](https://github.com/adap/flower/pull/3795), " +"[#3875](https://github.com/adap/flower/pull/3875), " +"[#3859](https://github.com/adap/flower/pull/3859), " +"[#3760](https://github.com/adap/flower/pull/3760))" msgstr "" -"**新的 PyTorch Lightning 代码示例** " -"([#617](https://github.com/adap/flower/pull/617))" +"** 更新文档** ([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614)))" -#: ../../source/ref-changelog.md:870 +#: ../../source/ref-changelog.md:169 msgid "" -"**New Variational Auto-Encoder code example** " -"([#752](https://github.com/adap/flower/pull/752))" -msgstr "**新的变分自动编码器代码示例** ([#752](https://github.com/adap/flower/pull/752))" +"All `flwr new` templates have been significantly updated to showcase new " +"Flower features and best practices. This includes using `flwr run` and " +"the new run config feature. You can now easily create a new project using" +" `flwr new` and, after following the instructions to install it, `flwr " +"run` it." +msgstr "" -#: ../../source/ref-changelog.md:872 +#: ../../source/ref-changelog.md:171 +#, fuzzy msgid "" -"**New scikit-learn code example** " -"([#748](https://github.com/adap/flower/pull/748))" -msgstr "**新的 scikit-learn 代码示例** ([#748](https://github.com/adap/flower/pull/748))" +"**Introduce** `flower-supernode` **(preview)** " +"([#3353](https://github.com/adap/flower/pull/3353))" +msgstr "" +"**介绍Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" -#: ../../source/ref-changelog.md:874 +#: ../../source/ref-changelog.md:173 msgid "" -"**New experimental TensorBoard strategy** " -"([#789](https://github.com/adap/flower/pull/789))" -msgstr "**新的实验性 TensorBoard 策略**([#789](https://github.com/adap/flower/pull/789))" +"The new `flower-supernode` CLI is here to replace `flower-client-app`. " +"`flower-supernode` brings full multi-app support to the Flower client-" +"side. It also allows to pass `--node-config` to the SuperNode, which is " +"accessible in your `ClientApp` via `Context` (using the new " +"`client_fn(context: Context)` signature)." +msgstr "" -#: ../../source/ref-changelog.md:878 +#: ../../source/ref-changelog.md:175 +#, fuzzy msgid "" -"Improved advanced TensorFlow code example " -"([#769](https://github.com/adap/flower/pull/769))" -msgstr "改进的高级 TensorFlow 代码示例([#769](https://github.com/adap/flower/pull/769)" +"**Introduce node config** " +"([#3782](https://github.com/adap/flower/pull/3782), " +"[#3780](https://github.com/adap/flower/pull/3780), " +"[#3695](https://github.com/adap/flower/pull/3695), " +"[#3886](https://github.com/adap/flower/pull/3886))" +msgstr "" +"**引入新的 Flower Baseline: FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679)" -#: ../../source/ref-changelog.md:879 +#: ../../source/ref-changelog.md:177 msgid "" -"Warning when `min_available_clients` is misconfigured " -"([#830](https://github.com/adap/flower/pull/830))" +"A new node config feature allows you to pass a static configuration to " +"the SuperNode. This configuration is read-only and available to every " +"`ClientApp` running on that SuperNode. A `ClientApp` can access the node " +"config via `Context` (`context.node_config`)." msgstr "" -"当 `min_available_clients` 配置错误时发出警告 " -"([#830](https://github.com/adap/flower/pull/830))" -#: ../../source/ref-changelog.md:880 +#: ../../source/ref-changelog.md:179 msgid "" -"Improved gRPC server docs " -"([#841](https://github.com/adap/flower/pull/841))" -msgstr "改进了 gRPC 服务器文档([#841](https://github.com/adap/flower/pull/841))" +"**Introduce SuperExec (experimental)** " +"([#3605](https://github.com/adap/flower/pull/3605), " +"[#3723](https://github.com/adap/flower/pull/3723), " +"[#3731](https://github.com/adap/flower/pull/3731), " +"[#3589](https://github.com/adap/flower/pull/3589), " +"[#3604](https://github.com/adap/flower/pull/3604), " +"[#3622](https://github.com/adap/flower/pull/3622), " +"[#3838](https://github.com/adap/flower/pull/3838), " +"[#3720](https://github.com/adap/flower/pull/3720), " +"[#3606](https://github.com/adap/flower/pull/3606), " +"[#3602](https://github.com/adap/flower/pull/3602), " +"[#3603](https://github.com/adap/flower/pull/3603), " +"[#3555](https://github.com/adap/flower/pull/3555), " +"[#3808](https://github.com/adap/flower/pull/3808), " +"[#3724](https://github.com/adap/flower/pull/3724), " +"[#3658](https://github.com/adap/flower/pull/3658), " +"[#3629](https://github.com/adap/flower/pull/3629))" +msgstr "" -#: ../../source/ref-changelog.md:881 +#: ../../source/ref-changelog.md:181 msgid "" -"Improved error message in `NumPyClient` " -"([#851](https://github.com/adap/flower/pull/851))" -msgstr "改进了 `NumPyClient` 中的错误信息 ([#851](https://github.com/adap/flower/pull/851))" +"This is the first experimental release of Flower SuperExec, a new service" +" that executes your runs. It's not ready for production deployment just " +"yet, but don't hesitate to give it a try if you're interested." +msgstr "" -#: ../../source/ref-changelog.md:882 +#: ../../source/ref-changelog.md:183 +#, fuzzy msgid "" -"Improved PyTorch quickstart code example " -"([#852](https://github.com/adap/flower/pull/852))" -msgstr "改进的 PyTorch 快速启动代码示例 ([#852](https://github.com/adap/flower/pull/852))" +"**Add new federated learning with tabular data example** " +"([#3568](https://github.com/adap/flower/pull/3568))" +msgstr "" +"** 添加使用 fastai 和 Flower 进行联邦学习的新示例** " +"([#1598](https://github.com/adap/flower/pull/1598))" -#: ../../source/ref-changelog.md:886 +#: ../../source/ref-changelog.md:185 msgid "" -"**Disabled final distributed evaluation** " -"([#800](https://github.com/adap/flower/pull/800))" -msgstr "**禁用最终分布式评价** ([#800](https://github.com/adap/flower/pull/800))" +"A new code example exemplifies a federated learning setup using the " +"Flower framework on the Adult Census Income tabular dataset." +msgstr "" -#: ../../source/ref-changelog.md:888 +#: ../../source/ref-changelog.md:187 +#, fuzzy msgid "" -"Prior behaviour was to perform a final round of distributed evaluation on" -" all connected clients, which is often not required (e.g., when using " -"server-side evaluation). The prior behaviour can be enabled by passing " -"`force_final_distributed_eval=True` to `start_server`." +"**Create generic adapter layer (preview)** " +"([#3538](https://github.com/adap/flower/pull/3538), " +"[#3536](https://github.com/adap/flower/pull/3536), " +"[#3540](https://github.com/adap/flower/pull/3540))" msgstr "" -"之前的行为是在所有连接的客户端上执行最后一轮分布式评估,而这通常是不需要的(例如,在使用服务器端评估时)。可以通过向 `start_server`" -" 传递 `force_final_distributed_eval=True` 来启用之前的行为。" +"** 统一客户端应用程序接口** ([#2303](https://github.com/adap/flower/pull/2303), " +"[#2390](https://github.com/adap/flower/pull/2390), " +"[#2493](https://github.com/adap/flower/pull/2493))" -#: ../../source/ref-changelog.md:890 +#: ../../source/ref-changelog.md:189 msgid "" -"**Renamed q-FedAvg strategy** " -"([#802](https://github.com/adap/flower/pull/802))" -msgstr "**更名为 q-FedAvg 策略** ([#802](https://github.com/adap/flower/pull/802))" +"A new generic gRPC adapter layer allows 3rd-party frameworks to integrate" +" with Flower in a transparent way. This makes Flower more modular and " +"allows for integration into other federated learning solutions and " +"platforms." +msgstr "" -#: ../../source/ref-changelog.md:892 +#: ../../source/ref-changelog.md:191 +#, fuzzy msgid "" -"The strategy named `QffedAvg` was renamed to `QFedAvg` to better reflect " -"the notation given in the original paper (q-FFL is the optimization " -"objective, q-FedAvg is the proposed solver). Note the original (now " -"deprecated) `QffedAvg` class is still available for compatibility reasons" -" (it will be removed in a future release)." +"**Refactor Flower Simulation Engine** " +"([#3581](https://github.com/adap/flower/pull/3581), " +"[#3471](https://github.com/adap/flower/pull/3471), " +"[#3804](https://github.com/adap/flower/pull/3804), " +"[#3468](https://github.com/adap/flower/pull/3468), " +"[#3839](https://github.com/adap/flower/pull/3839), " +"[#3806](https://github.com/adap/flower/pull/3806), " +"[#3861](https://github.com/adap/flower/pull/3861), " +"[#3543](https://github.com/adap/flower/pull/3543), " +"[#3472](https://github.com/adap/flower/pull/3472), " +"[#3829](https://github.com/adap/flower/pull/3829), " +"[#3469](https://github.com/adap/flower/pull/3469))" msgstr "" -"名为 `QffedAvg` 的策略已更名为 `QFedAvg`,以更好地反映原始论文中给出的符号(q-FFL 是优化目标,q-FedAvg " -"是建议的求解器)。请注意,出于兼容性原因,原始(现已废弃)的 `QffedAvg` 类仍然可用(它将在未来的版本中移除)。" +"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " +"[#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475)))" -#: ../../source/ref-changelog.md:894 +#: ../../source/ref-changelog.md:193 msgid "" -"**Deprecated and renamed code example** `simulation_pytorch` **to** " -"`simulation_pytorch_legacy` " -"([#791](https://github.com/adap/flower/pull/791))" +"The Simulation Engine was significantly refactored. This results in " +"faster and more stable simulations. It is also the foundation for " +"upcoming changes that aim to provide the next level of performance and " +"configurability in federated learning simulations." msgstr "" -"**删除并重命名代码示例**`simulation_pytorch`**为**`simulation_pytorch_legacy` " -"([#791](https://github.com/adap/flower/pull/791))" -#: ../../source/ref-changelog.md:896 +#: ../../source/ref-changelog.md:195 +#, fuzzy msgid "" -"This example has been replaced by a new example. The new example is based" -" on the experimental virtual client engine, which will become the new " -"default way of doing most types of large-scale simulations in Flower. The" -" existing example was kept for reference purposes, but it might be " -"removed in the future." -msgstr "" -"该示例已被新示例取代。新示例基于试验性虚拟客户端引擎,它将成为在 Flower " -"中进行大多数类型大规模模拟的新的默认方式。现有示例将作为参考保留,但将来可能会删除。" - -#: ../../source/ref-changelog.md:898 -msgid "v0.16.0 (2021-05-11)" -msgstr "v0.16.0 (2021-05-11)" - -#: ../../source/ref-changelog.md:902 -msgid "" -"**New built-in strategies** " -"([#549](https://github.com/adap/flower/pull/549))" -msgstr "**新的内置策略** ([#549](https://github.com/adap/flower/pull/549))" - -#: ../../source/ref-changelog.md:904 -msgid "(abstract) FedOpt" -msgstr "(摘要) FedOpt" +"**Optimize Docker containers** " +"([#3591](https://github.com/adap/flower/pull/3591))" +msgstr "新文档主题 ([#551](https://github.com/adap/flower/pull/551))" -#: ../../source/ref-changelog.md:907 +#: ../../source/ref-changelog.md:197 msgid "" -"**Custom metrics for server and strategies** " -"([#717](https://github.com/adap/flower/pull/717))" -msgstr "**服务器和策略的自定义指标** ([#717](https://github.com/adap/flower/pull/717))" +"Flower Docker containers were optimized and updated to use that latest " +"Flower framework features." +msgstr "" -#: ../../source/ref-changelog.md:909 +#: ../../source/ref-changelog.md:199 +#, fuzzy msgid "" -"The Flower server is now fully task-agnostic, all remaining instances of " -"task-specific metrics (such as `accuracy`) have been replaced by custom " -"metrics dictionaries. Flower 0.15 introduced the capability to pass a " -"dictionary containing custom metrics from client to server. As of this " -"release, custom metrics replace task-specific metrics on the server." +"**Improve logging** ([#3776](https://github.com/adap/flower/pull/3776), " +"[#3789](https://github.com/adap/flower/pull/3789))" msgstr "" -"Flower 服务器现在完全与任务无关,所有剩余的任务特定度量(如 \"准确度\")都已被自定义度量字典取代。Flower 0.15 " -"引入了从客户端向服务器传递包含自定义指标的字典的功能。从本版本开始,自定义指标将取代服务器上的特定任务指标。" +"** 更新代码示例** ([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/ref-changelog.md:911 +#: ../../source/ref-changelog.md:201 msgid "" -"Custom metric dictionaries are now used in two user-facing APIs: they are" -" returned from Strategy methods `aggregate_fit`/`aggregate_evaluate` and " -"they enable evaluation functions passed to built-in strategies (via " -"`eval_fn`) to return more than two evaluation metrics. Strategies can " -"even return *aggregated* metrics dictionaries for the server to keep " -"track of." +"Improved logging aims to be more concise and helpful to show you the " +"details you actually care about." msgstr "" -"自定义度量字典现在可在两个面向用户的 API 中使用:它们可从策略方法 `aggregate_fit`/`aggregate_evaluate` " -"返回,还可使传递给内置策略(通过 `eval_fn`)的评估函数返回两个以上的评估度量。策略甚至可以返回 *aggregated* " -"指标字典,以便服务器跟踪。" -#: ../../source/ref-changelog.md:913 +#: ../../source/ref-changelog.md:203 +#, fuzzy msgid "" -"Strategy implementations should migrate their `aggregate_fit` and " -"`aggregate_evaluate` methods to the new return type (e.g., by simply " -"returning an empty `{}`), server-side evaluation functions should migrate" -" from `return loss, accuracy` to `return loss, {\"accuracy\": accuracy}`." +"**Refactor framework internals** " +"([#3621](https://github.com/adap/flower/pull/3621), " +"[#3792](https://github.com/adap/flower/pull/3792), " +"[#3772](https://github.com/adap/flower/pull/3772), " +"[#3805](https://github.com/adap/flower/pull/3805), " +"[#3583](https://github.com/adap/flower/pull/3583), " +"[#3825](https://github.com/adap/flower/pull/3825), " +"[#3597](https://github.com/adap/flower/pull/3597), " +"[#3802](https://github.com/adap/flower/pull/3802), " +"[#3569](https://github.com/adap/flower/pull/3569))" msgstr "" -"Strategy 实现应将其 `aggregate_fit` 和 `aggregate_evaluate` " -"方法迁移到新的返回类型(例如,只需返回空的 `{}`),服务器端评估函数应从 `return loss, accuracy` 迁移到 " -"`return loss, {\"accuracy\": accuracy}`。" +"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " +"[#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475)))" -#: ../../source/ref-changelog.md:915 -msgid "" -"Flower 0.15-style return types are deprecated (but still supported), " -"compatibility will be removed in a future release." -msgstr "Flower 0.15 风格的返回类型已被弃用(但仍受支持),兼容性将在未来的版本中移除。" +#: ../../source/ref-changelog.md:207 +#, fuzzy +msgid "Documentation improvements" +msgstr "可选的改进措施" -#: ../../source/ref-changelog.md:917 +#: ../../source/ref-changelog.md:209 +#, fuzzy msgid "" -"**Migration warnings for deprecated functionality** " -"([#690](https://github.com/adap/flower/pull/690))" -msgstr "** 过时功能的迁移警告** ([#690](https://github.com/adap/flower/pull/690))" +"**Add 🇰🇷 Korean translations** " +"([#3680](https://github.com/adap/flower/pull/3680))" +msgstr "**在 Colab 中打开按钮** ([#1389](https://github.com/adap/flower/pull/1389))" -#: ../../source/ref-changelog.md:919 +#: ../../source/ref-changelog.md:211 +#, fuzzy msgid "" -"Earlier versions of Flower were often migrated to new APIs, while " -"maintaining compatibility with legacy APIs. This release introduces " -"detailed warning messages if usage of deprecated APIs is detected. The " -"new warning messages often provide details on how to migrate to more " -"recent APIs, thus easing the transition from one release to another." +"**Update translations** " +"([#3586](https://github.com/adap/flower/pull/3586), " +"[#3679](https://github.com/adap/flower/pull/3679), " +"[#3570](https://github.com/adap/flower/pull/3570), " +"[#3681](https://github.com/adap/flower/pull/3681), " +"[#3617](https://github.com/adap/flower/pull/3617), " +"[#3674](https://github.com/adap/flower/pull/3674), " +"[#3671](https://github.com/adap/flower/pull/3671), " +"[#3572](https://github.com/adap/flower/pull/3572), " +"[#3631](https://github.com/adap/flower/pull/3631))" msgstr "" -"Flower 早期版本通常会迁移到新的应用程序接口,同时保持与旧版应用程序接口的兼容。如果检测到使用了过时的 " -"API,本版本将引入详细的警告信息。新的警告信息通常会详细说明如何迁移到更新的 API,从而简化从一个版本到另一个版本的过渡。" +"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " +"[#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475)))" -#: ../../source/ref-changelog.md:921 +#: ../../source/ref-changelog.md:213 +#, fuzzy msgid "" -"Improved docs and docstrings " -"([#691](https://github.com/adap/flower/pull/691) " -"[#692](https://github.com/adap/flower/pull/692) " -"[#713](https://github.com/adap/flower/pull/713))" +"**Update documentation** " +"([#3864](https://github.com/adap/flower/pull/3864), " +"[#3688](https://github.com/adap/flower/pull/3688), " +"[#3562](https://github.com/adap/flower/pull/3562), " +"[#3641](https://github.com/adap/flower/pull/3641), " +"[#3384](https://github.com/adap/flower/pull/3384), " +"[#3634](https://github.com/adap/flower/pull/3634), " +"[#3823](https://github.com/adap/flower/pull/3823), " +"[#3793](https://github.com/adap/flower/pull/3793), " +"[#3707](https://github.com/adap/flower/pull/3707))" msgstr "" -"改进了文档和文档说明 ([#691](https://github.com/adap/flower/pull/691) " -"[#692](https://github.com/adap/flower/pull/692) " -"[#713](https://github.com/adap/flower/pull/713))" - -#: ../../source/ref-changelog.md:923 -msgid "MXNet example and documentation" -msgstr "MXNet 示例和文档" +"**普通改进**([#1872](https://github.com/adap/flower/pull/1872), " +"[#1866](https://github.com/adap/flower/pull/1866), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1837](https://github.com/adap/flower/pull/1837), " +"[#1477](https://github.com/adap/flower/pull/1477), " +"[#2171](https://github.com/adap/flower/pull/2171))" -#: ../../source/ref-changelog.md:925 +#: ../../source/ref-changelog.md:215 msgid "" -"FedBN implementation in example PyTorch: From Centralized To Federated " -"([#696](https://github.com/adap/flower/pull/696) " -"[#702](https://github.com/adap/flower/pull/702) " -"[#705](https://github.com/adap/flower/pull/705))" +"Updated documentation includes new install instructions for different " +"shells, a new Flower Code Examples documentation landing page, new `flwr`" +" CLI docs and an updated federated XGBoost code example." msgstr "" -"PyTorch 示例中的 FedBN 实现: 从集中到联邦 " -"([#696](https://github.com/adap/flower/pull/696) " -"[#702](https://github.com/adap/flower/pull/702) " -"[#705](https://github.com/adap/flower/pull/705))" -#: ../../source/ref-changelog.md:929 -msgid "" -"**Serialization-agnostic server** " -"([#721](https://github.com/adap/flower/pull/721))" -msgstr "**序列化无关服务器** ([#721](https://github.com/adap/flower/pull/721))" +#: ../../source/ref-changelog.md:219 +msgid "**Deprecate** `client_fn(cid: str)`" +msgstr "" -#: ../../source/ref-changelog.md:931 +#: ../../source/ref-changelog.md:221 msgid "" -"The Flower server is now fully serialization-agnostic. Prior usage of " -"class `Weights` (which represents parameters as deserialized NumPy " -"ndarrays) was replaced by class `Parameters` (e.g., in `Strategy`). " -"`Parameters` objects are fully serialization-agnostic and represents " -"parameters as byte arrays, the `tensor_type` attributes indicates how " -"these byte arrays should be interpreted (e.g., for " -"serialization/deserialization)." +"`client_fn` used to have a signature `client_fn(cid: str) -> Client`. " +"This signature is now deprecated. Use the new signature " +"`client_fn(context: Context) -> Client` instead. The new argument " +"`context` allows accessing `node_id`, `node_config`, `run_config` and " +"other `Context` features. When running using the simulation engine (or " +"using `flower-supernode` with a custom `--node-config partition-id=...`)," +" `context.node_config[\"partition-id\"]` will return an `int` partition " +"ID that can be used with Flower Datasets to load a different partition of" +" the dataset on each simulated or deployed SuperNode." msgstr "" -"Flower 服务器现在完全不依赖序列化。之前使用的 `Weights` 类(以反序列化的 NumPy ndarrays 表示参数)已被 " -"`Parameters` 类取代(例如在 `Strategy`中)。参数 " -"\"对象与序列化完全无关,它以字节数组的形式表示参数,\"tensor_type \"属性表示如何解释这些字节数组(例如,用于序列化/反序列化)。" -#: ../../source/ref-changelog.md:933 +#: ../../source/ref-changelog.md:223 msgid "" -"Built-in strategies implement this approach by handling serialization and" -" deserialization to/from `Weights` internally. Custom/3rd-party Strategy " -"implementations should update to the slightly changed Strategy method " -"definitions. Strategy authors can consult PR " -"[#721](https://github.com/adap/flower/pull/721) to see how strategies can" -" easily migrate to the new format." +"**Deprecate passing** `Server/ServerConfig/Strategy/ClientManager` **to**" +" `ServerApp` **directly**" msgstr "" -"内置策略通过在内部处理序列化和反序列化到/从`Weights`来实现这种方法。自定义/第三方策略实现应更新为稍有改动的策略方法定义。策略作者可查阅" -" PR [#721](https://github.com/adap/flower/pull/721) 以了解如何将策略轻松迁移到新格式。" -#: ../../source/ref-changelog.md:935 +#: ../../source/ref-changelog.md:225 msgid "" -"Deprecated `flwr.server.Server.evaluate`, use " -"`flwr.server.Server.evaluate_round` instead " -"([#717](https://github.com/adap/flower/pull/717))" +"Creating `ServerApp` using `ServerApp(config=config, strategy=strategy)` " +"is now deprecated. Instead of passing " +"`Server/ServerConfig/Strategy/ClientManager` to `ServerApp` directly, " +"pass them wrapped in a `server_fn(context: Context) -> " +"ServerAppComponents` function, like this: " +"`ServerApp(server_fn=server_fn)`. `ServerAppComponents` can hold " +"references to `Server/ServerConfig/Strategy/ClientManager`. In addition " +"to that, `server_fn` allows you to access `Context` (for example, to read" +" the `run_config`)." msgstr "" -"已弃用 `flwr.server.Server.evaluate`,改用 " -"`flwr.server.Server.evaluate_round`([#717](https://github.com/adap/flower/pull/717)" - -#: ../../source/ref-changelog.md:937 -msgid "v0.15.0 (2021-03-12)" -msgstr "v0.15.0 (2021-03-12)" -#: ../../source/ref-changelog.md:941 +#: ../../source/ref-changelog.md:229 +#, fuzzy msgid "" -"**Server-side parameter initialization** " -"([#658](https://github.com/adap/flower/pull/658))" -msgstr "**服务器端参数初始化** ([#658](https://github.com/adap/flower/pull/658))" +"**Remove support for `client_ids` in `start_simulation`** " +"([#3699](https://github.com/adap/flower/pull/3699))" +msgstr "**改进模拟中的 GPU 支持**([#1555](https://github.com/adap/flower/pull/1555))" -#: ../../source/ref-changelog.md:943 +#: ../../source/ref-changelog.md:231 msgid "" -"Model parameters can now be initialized on the server-side. Server-side " -"parameter initialization works via a new `Strategy` method called " -"`initialize_parameters`." +"The (rarely used) feature that allowed passing custom `client_ids` to the" +" `start_simulation` function was removed. This removal is part of a " +"bigger effort to refactor the simulation engine and unify how the Flower " +"internals work in simulation and deployment." msgstr "" -"现在可以在服务器端初始化模型参数。服务器端参数初始化通过名为 \"initialize_parameters \"的新 \"Strategy " -"\"方法进行。" -#: ../../source/ref-changelog.md:945 +#: ../../source/ref-changelog.md:233 +#, fuzzy msgid "" -"Built-in strategies support a new constructor argument called " -"`initial_parameters` to set the initial parameters. Built-in strategies " -"will provide these initial parameters to the server on startup and then " -"delete them to free the memory afterwards." -msgstr "" -"内置策略支持名为 \"initial_parameters " -"\"的新构造函数参数,用于设置初始参数。内置策略会在启动时向服务器提供这些初始参数,然后删除它们以释放内存。" +"**Remove `flower-driver-api` and `flower-fleet-api`** " +"([#3418](https://github.com/adap/flower/pull/3418))" +msgstr "**移除过时的 KerasClient**([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/ref-changelog.md:964 +#: ../../source/ref-changelog.md:235 msgid "" -"If no initial parameters are provided to the strategy, the server will " -"continue to use the current behaviour (namely, it will ask one of the " -"connected clients for its parameters and use these as the initial global " -"parameters)." -msgstr "如果没有向策略提供初始参数,服务器将继续使用当前行为(即向其中一个已连接的客户端询问参数,并将这些参数用作初始全局参数)。" +"The two deprecated CLI commands `flower-driver-api` and `flower-fleet-" +"api` were removed in an effort to streamline the SuperLink developer " +"experience. Use `flower-superlink` instead." +msgstr "" -#: ../../source/ref-changelog.md:966 -msgid "Deprecations" -msgstr "停用" +#: ../../source/ref-changelog.md:237 +#, fuzzy +msgid "v1.9.0 (2024-06-10)" +msgstr "v1.3.0 (2023-02-06)" -#: ../../source/ref-changelog.md:968 +#: ../../source/ref-changelog.md:243 +#, fuzzy msgid "" -"Deprecate `flwr.server.strategy.DefaultStrategy` (migrate to " -"`flwr.server.strategy.FedAvg`, which is equivalent)" +"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " +"Beutel`, `Daniel Nata Nugraha`, `Heng Pan`, `Javier`, `Mahdi Beitollahi`," +" `Robert Steiner`, `Taner Topal`, `Yan Gao`, `bapic`, `mohammadnaseri` " msgstr "" -"停用 `flwr.server.strategy.DefaultStrategy`(迁移到等价的 " -"`flwr.server.strategy.FedAvg`)" +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " -#: ../../source/ref-changelog.md:970 -msgid "v0.14.0 (2021-02-18)" -msgstr "v0.14.0 (2021-02-18)" +#: ../../source/ref-changelog.md:247 +#, fuzzy +msgid "" +"**Introduce built-in authentication (preview)** " +"([#2946](https://github.com/adap/flower/pull/2946), " +"[#3388](https://github.com/adap/flower/pull/3388), " +"[#2948](https://github.com/adap/flower/pull/2948), " +"[#2917](https://github.com/adap/flower/pull/2917), " +"[#3386](https://github.com/adap/flower/pull/3386), " +"[#3308](https://github.com/adap/flower/pull/3308), " +"[#3001](https://github.com/adap/flower/pull/3001), " +"[#3409](https://github.com/adap/flower/pull/3409), " +"[#2999](https://github.com/adap/flower/pull/2999), " +"[#2979](https://github.com/adap/flower/pull/2979), " +"[#3389](https://github.com/adap/flower/pull/3389), " +"[#3503](https://github.com/adap/flower/pull/3503), " +"[#3366](https://github.com/adap/flower/pull/3366), " +"[#3357](https://github.com/adap/flower/pull/3357))" +msgstr "" +"** 更新文档** ([#1494](https://github.com/adap/flower/pull/1494), " +"[#1496](https://github.com/adap/flower/pull/1496), " +"[#1500](https://github.com/adap/flower/pull/1500), " +"[#1503](https://github.com/adap/flower/pull/1503), " +"[#1505](https://github.com/adap/flower/pull/1505), " +"[#1524](https://github.com/adap/flower/pull/1524), " +"[#1518](https://github.com/adap/flower/pull/1518), " +"[#1519](https://github.com/adap/flower/pull/1519), " +"[#1515](https://github.com/adap/flower/pull/1515))" -#: ../../source/ref-changelog.md:974 +#: ../../source/ref-changelog.md:249 msgid "" -"**Generalized** `Client.fit` **and** `Client.evaluate` **return values** " -"([#610](https://github.com/adap/flower/pull/610) " -"[#572](https://github.com/adap/flower/pull/572) " -"[#633](https://github.com/adap/flower/pull/633))" +"Flower 1.9 introduces the first build-in version of client node " +"authentication. In previous releases, users often wrote glue code to " +"connect Flower to external authentication systems. With this release, the" +" SuperLink can authenticate SuperNodes using a built-in authentication " +"system. A new [how-to guide](https://flower.ai/docs/framework/how-to-" +"authenticate-supernodes.html) and a new [code " +"example](https://github.com/adap/flower/tree/main/examples/flower-" +"authentication) help you to get started." msgstr "" -"**通用** `Client.fit` **和** `Client.evaluate` **返回值** " -"([#610](https://github.com/adap/flower/pull/610) " -"[#572](https://github.com/adap/flower/pull/572) " -"[#633](https://github.com/adap/flower/pull/633))" -#: ../../source/ref-changelog.md:976 +#: ../../source/ref-changelog.md:251 msgid "" -"Clients can now return an additional dictionary mapping `str` keys to " -"values of the following types: `bool`, `bytes`, `float`, `int`, `str`. " -"This means one can return almost arbitrary values from `fit`/`evaluate` " -"and make use of them on the server side!" +"This is the first preview release of the Flower-native authentication " +"system. Many additional features are on the roadmap for upcoming Flower " +"releases - stay tuned." msgstr "" -"客户端现在可以返回一个额外的字典,将 `str` 键映射为以下类型的值: " -"bool`、`bytes`、`float`、`int`、`str`。这意味着我们可以从 `fit`/`evaluate` " -"返回几乎任意的值,并在服务器端使用它们!" -#: ../../source/ref-changelog.md:978 +#: ../../source/ref-changelog.md:253 +#, fuzzy msgid "" -"This improvement also allowed for more consistent return types between " -"`fit` and `evaluate`: `evaluate` should now return a tuple `(float, int, " -"dict)` representing the loss, number of examples, and a dictionary " -"holding arbitrary problem-specific values like accuracy." +"**Introduce end-to-end Docker support** " +"([#3483](https://github.com/adap/flower/pull/3483), " +"[#3266](https://github.com/adap/flower/pull/3266), " +"[#3390](https://github.com/adap/flower/pull/3390), " +"[#3283](https://github.com/adap/flower/pull/3283), " +"[#3285](https://github.com/adap/flower/pull/3285), " +"[#3391](https://github.com/adap/flower/pull/3391), " +"[#3403](https://github.com/adap/flower/pull/3403), " +"[#3458](https://github.com/adap/flower/pull/3458), " +"[#3533](https://github.com/adap/flower/pull/3533), " +"[#3453](https://github.com/adap/flower/pull/3453), " +"[#3486](https://github.com/adap/flower/pull/3486), " +"[#3290](https://github.com/adap/flower/pull/3290))" msgstr "" -"这一改进还使 `fit` 和 `evaluate` 之间的返回类型更加一致:`evaluate` 现在应返回一个元组`(float, int, " -"dict)`,代表损失、示例数和一个包含特定问题任意值(如准确度)的字典。" +"**引入(试验性)REST API** ([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" -#: ../../source/ref-changelog.md:980 +#: ../../source/ref-changelog.md:255 msgid "" -"In case you wondered: this feature is compatible with existing projects, " -"the additional dictionary return value is optional. New code should " -"however migrate to the new return types to be compatible with upcoming " -"Flower releases (`fit`: `List[np.ndarray], int, Dict[str, Scalar]`, " -"`evaluate`: `float, int, Dict[str, Scalar]`). See the example below for " -"details." +"Full Flower Next Docker support is here! With the release of Flower 1.9, " +"Flower provides stable Docker images for the Flower SuperLink, the Flower" +" SuperNode, and the Flower `ServerApp`. This set of images enables you to" +" run all Flower components in Docker. Check out the new [how-to " +"guide](https://flower.ai/docs/framework/how-to-run-flower-using-" +"docker.html) to get stated." msgstr "" -"如果你想知道:此功能与现有项目兼容,额外的字典返回值是可选的。不过,新代码应迁移到新的返回类型,以便与即将发布的 Flower " -"版本兼容(`fit`: `List[np.ndarray], int, Dict[str, Scalar]`,`evaluate`: " -"`float, int, Dict[str, Scalar]`)。详见下面的示例。" -#: ../../source/ref-changelog.md:982 +#: ../../source/ref-changelog.md:257 +#, fuzzy msgid "" -"*Code example:* note the additional dictionary return values in both " -"`FlwrClient.fit` and `FlwrClient.evaluate`:" -msgstr "*代码示例:* 注意 `FlwrClient.fit` 和 `FlwrClient.evaluate` 中的附加字典返回值:" - -#: ../../source/ref-changelog.md:997 -msgid "" -"**Generalized** `config` **argument in** `Client.fit` **and** " -"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" +"**Re-architect Flower Next simulation engine** " +"([#3307](https://github.com/adap/flower/pull/3307), " +"[#3355](https://github.com/adap/flower/pull/3355), " +"[#3272](https://github.com/adap/flower/pull/3272), " +"[#3273](https://github.com/adap/flower/pull/3273), " +"[#3417](https://github.com/adap/flower/pull/3417), " +"[#3281](https://github.com/adap/flower/pull/3281), " +"[#3343](https://github.com/adap/flower/pull/3343), " +"[#3326](https://github.com/adap/flower/pull/3326))" msgstr "" -"**在**`Client.fit` " -"**和**`Client.evaluate`中泛化**`config`参数([#595](https://github.com/adap/flower/pull/595))" +"** 更新文档** ([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614)))" -#: ../../source/ref-changelog.md:999 +#: ../../source/ref-changelog.md:259 msgid "" -"The `config` argument used to be of type `Dict[str, str]`, which means " -"that dictionary values were expected to be strings. The new release " -"generalizes this to enable values of the following types: `bool`, " -"`bytes`, `float`, `int`, `str`." +"Flower Next simulations now use a new in-memory `Driver` that improves " +"the reliability of simulations, especially in notebook environments. This" +" is a significant step towards a complete overhaul of the Flower Next " +"simulation architecture." msgstr "" -"`config`参数曾是 \"字典[str, str]\"类型,这意味着字典值应是字符串。新版本将其扩展为以下类型的值: " -"bool`、`bytes`、`float`、`int`、`str`。" -#: ../../source/ref-changelog.md:1001 +#: ../../source/ref-changelog.md:261 +#, fuzzy msgid "" -"This means one can now pass almost arbitrary values to `fit`/`evaluate` " -"using the `config` dictionary. Yay, no more `str(epochs)` on the server-" -"side and `int(config[\"epochs\"])` on the client side!" +"**Upgrade simulation engine** " +"([#3354](https://github.com/adap/flower/pull/3354), " +"[#3378](https://github.com/adap/flower/pull/3378), " +"[#3262](https://github.com/adap/flower/pull/3262), " +"[#3435](https://github.com/adap/flower/pull/3435), " +"[#3501](https://github.com/adap/flower/pull/3501), " +"[#3482](https://github.com/adap/flower/pull/3482), " +"[#3494](https://github.com/adap/flower/pull/3494))" msgstr "" -"这意味着现在可以使用 `config` 字典向 `fit`/`evaluate` 传递几乎任意的值。耶,服务器端不再需要 " -"`str(epochs)`,客户端不再需要 `int(config[\"epochs\"])`!" +"** 更新文档** ([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614)))" -#: ../../source/ref-changelog.md:1003 +#: ../../source/ref-changelog.md:263 msgid "" -"*Code example:* note that the `config` dictionary now contains non-`str` " -"values in both `Client.fit` and `Client.evaluate`:" -msgstr "*代码示例:* 注意 `config` 字典现在在 `Client.fit` 和 `Client.evaluate` 中都包含非 `str` 值:" - -#: ../../source/ref-changelog.md:1020 -msgid "v0.13.0 (2021-01-08)" -msgstr "v0.13.0 (2021-01-08)" +"The Flower Next simulation engine comes with improved and configurable " +"logging. The Ray-based simulation backend in Flower 1.9 was updated to " +"use Ray 2.10." +msgstr "" -#: ../../source/ref-changelog.md:1024 +#: ../../source/ref-changelog.md:265 +#, fuzzy msgid "" -"New example: PyTorch From Centralized To Federated " -"([#549](https://github.com/adap/flower/pull/549))" -msgstr "新示例: PyTorch 从集中到联邦 ([#549](https://github.com/adap/flower/pull/549))" - -#: ../../source/ref-changelog.md:1025 -msgid "Improved documentation" -msgstr "改进文档" - -#: ../../source/ref-changelog.md:1026 -msgid "New documentation theme ([#551](https://github.com/adap/flower/pull/551))" -msgstr "新文档主题 ([#551](https://github.com/adap/flower/pull/551))" +"**Introduce FedPFT baseline** " +"([#3268](https://github.com/adap/flower/pull/3268))" +msgstr "**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" -#: ../../source/ref-changelog.md:1027 -msgid "New API reference ([#554](https://github.com/adap/flower/pull/554))" -msgstr "新的 API 参考 ([#554](https://github.com/adap/flower/pull/554))" +#: ../../source/ref-changelog.md:267 +msgid "" +"FedPFT allows you to perform one-shot Federated Learning by leveraging " +"widely available foundational models, dramatically reducing communication" +" costs while delivering high performing models. This is work led by Mahdi" +" Beitollahi from Huawei Noah's Ark Lab (Montreal, Canada). Read all the " +"details in their paper: \"Parametric Feature Transfer: One-shot Federated" +" Learning with Foundation Models\" " +"([arxiv](https://arxiv.org/abs/2402.01862))" +msgstr "" -#: ../../source/ref-changelog.md:1028 +#: ../../source/ref-changelog.md:269 +#, fuzzy msgid "" -"Updated examples documentation " -"([#549](https://github.com/adap/flower/pull/549))" -msgstr "更新了示例文档 ([#549](https://github.com/adap/flower/pull/549))" +"**Launch additional** `flwr new` **templates for Apple MLX, Hugging Face " +"Transformers, scikit-learn and TensorFlow** " +"([#3291](https://github.com/adap/flower/pull/3291), " +"[#3139](https://github.com/adap/flower/pull/3139), " +"[#3284](https://github.com/adap/flower/pull/3284), " +"[#3251](https://github.com/adap/flower/pull/3251), " +"[#3376](https://github.com/adap/flower/pull/3376), " +"[#3287](https://github.com/adap/flower/pull/3287))" +msgstr "" +"**移除对 Python 3.7 的支持** " +"([#2280](https://github.com/adap/flower/pull/2280), " +"[#2299](https://github.com/adap/flower/pull/2299), " +"[#2304](https://github.com/adap/flower/pull/2304), " +"[#2306](https://github.com/adap/flower/pull/2306), " +"[#2355](https://github.com/adap/flower/pull/2355), " +"[#2356](https://github.com/adap/flower/pull/2356))" -#: ../../source/ref-changelog.md:1029 +#: ../../source/ref-changelog.md:271 msgid "" -"Removed obsolete documentation " -"([#548](https://github.com/adap/flower/pull/548))" -msgstr "删除了过时的文档 ([#548](https://github.com/adap/flower/pull/548))" +"The `flwr` CLI's `flwr new` command is starting to become everone's " +"favorite way of creating new Flower projects. This release introduces " +"additional `flwr new` templates for Apple MLX, Hugging Face Transformers," +" scikit-learn and TensorFlow. In addition to that, existing templates " +"also received updates." +msgstr "" -#: ../../source/ref-changelog.md:1031 -msgid "Bugfix:" -msgstr "错误修正:" +#: ../../source/ref-changelog.md:273 +#, fuzzy +msgid "" +"**Refine** `RecordSet` **API** " +"([#3209](https://github.com/adap/flower/pull/3209), " +"[#3331](https://github.com/adap/flower/pull/3331), " +"[#3334](https://github.com/adap/flower/pull/3334), " +"[#3335](https://github.com/adap/flower/pull/3335), " +"[#3375](https://github.com/adap/flower/pull/3375), " +"[#3368](https://github.com/adap/flower/pull/3368))" +msgstr "" +"**普通改进**([#1872](https://github.com/adap/flower/pull/1872), " +"[#1866](https://github.com/adap/flower/pull/1866), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1837](https://github.com/adap/flower/pull/1837), " +"[#1477](https://github.com/adap/flower/pull/1477), " +"[#2171](https://github.com/adap/flower/pull/2171))" -#: ../../source/ref-changelog.md:1033 +#: ../../source/ref-changelog.md:275 msgid "" -"`Server.fit` does not disconnect clients when finished, disconnecting the" -" clients is now handled in `flwr.server.start_server` " -"([#553](https://github.com/adap/flower/pull/553) " -"[#540](https://github.com/adap/flower/issues/540))." +"`RecordSet` is part of the Flower Next low-level API preview release. In " +"Flower 1.9, `RecordSet` received a number of usability improvements that " +"make it easier to build `RecordSet`-based `ServerApp`s and `ClientApp`s." msgstr "" -"Server.fit \"完成后不会断开客户端连接,现在断开客户端连接是在 \"flwr.server.start_server " -"\"中处理的([#553](https://github.com/adap/flower/pull/553) " -"[#540](https://github.com/adap/flower/issues/540))。" -#: ../../source/ref-changelog.md:1035 -msgid "v0.12.0 (2020-12-07)" -msgstr "v0.12.0 (2020-12-07)" +#: ../../source/ref-changelog.md:277 +#, fuzzy +msgid "" +"**Beautify logging** ([#3379](https://github.com/adap/flower/pull/3379), " +"[#3430](https://github.com/adap/flower/pull/3430), " +"[#3461](https://github.com/adap/flower/pull/3461), " +"[#3360](https://github.com/adap/flower/pull/3360), " +"[#3433](https://github.com/adap/flower/pull/3433))" +msgstr "" +"** 更新 C++ SDK** ([#2537](https://github/com/adap/flower/pull/2537), " +"[#2528](https://github/com/adap/flower/pull/2528), " +"[#2523](https://github.com/adap/flower/pull/2523), " +"[#2522](https://github.com/adap/flower/pull/2522))" -#: ../../source/ref-changelog.md:1037 ../../source/ref-changelog.md:1053 -msgid "Important changes:" -msgstr "重要变更:" +#: ../../source/ref-changelog.md:279 +msgid "" +"Logs received a substantial update. Not only are logs now much nicer to " +"look at, but they are also more configurable." +msgstr "" -#: ../../source/ref-changelog.md:1039 +#: ../../source/ref-changelog.md:281 +#, fuzzy msgid "" -"Added an example for embedded devices " -"([#507](https://github.com/adap/flower/pull/507))" -msgstr "添加了嵌入式设备示例 ([#507](https://github.com/adap/flower/pull/507))" +"**Improve reliability** " +"([#3564](https://github.com/adap/flower/pull/3564), " +"[#3561](https://github.com/adap/flower/pull/3561), " +"[#3566](https://github.com/adap/flower/pull/3566), " +"[#3462](https://github.com/adap/flower/pull/3462), " +"[#3225](https://github.com/adap/flower/pull/3225), " +"[#3514](https://github.com/adap/flower/pull/3514), " +"[#3535](https://github.com/adap/flower/pull/3535), " +"[#3372](https://github.com/adap/flower/pull/3372))" +msgstr "" +"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " +"[#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475)))" -#: ../../source/ref-changelog.md:1040 +#: ../../source/ref-changelog.md:283 msgid "" -"Added a new NumPyClient (in addition to the existing KerasClient) " -"([#504](https://github.com/adap/flower/pull/504) " -"[#508](https://github.com/adap/flower/pull/508))" +"Flower 1.9 includes reliability improvements across many parts of the " +"system. One example is a much improved SuperNode shutdown procedure." msgstr "" -"添加了一个新的 NumPyClient(除现有的 KerasClient " -"之外)([#504](https://github.com/adap/flower/pull/504) " -"[#508](https://github.com/adap/flower/pull/508)" -#: ../../source/ref-changelog.md:1041 +#: ../../source/ref-changelog.md:285 +#, fuzzy msgid "" -"Deprecated `flwr_example` package and started to migrate examples into " -"the top-level `examples` directory " -"([#494](https://github.com/adap/flower/pull/494) " -"[#512](https://github.com/adap/flower/pull/512))" +"**Update Swift and C++ SDKs** " +"([#3321](https://github.com/adap/flower/pull/3321), " +"[#2763](https://github.com/adap/flower/pull/2763))" msgstr "" -"弃用 `flwr_example` 软件包,并开始将示例迁移到顶层的 `examples` 目录 " -"([#494](https://github.com/adap/flower/pull/494) " -"[#512](https://github.com/adap/flower/pull/512))" +"** 更新代码示例** ([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/ref-changelog.md:1043 -msgid "v0.11.0 (2020-11-30)" -msgstr "v0.11.0 (2020-11-30)" +#: ../../source/ref-changelog.md:287 +msgid "" +"In the C++ SDK, communication-related code is now separate from main " +"client logic. A new abstract class `Communicator` has been introduced " +"alongside a gRPC implementation of it." +msgstr "" + +#: ../../source/ref-changelog.md:289 +msgid "" +"**Improve testing, tooling and CI/CD infrastructure** " +"([#3294](https://github.com/adap/flower/pull/3294), " +"[#3282](https://github.com/adap/flower/pull/3282), " +"[#3311](https://github.com/adap/flower/pull/3311), " +"[#2878](https://github.com/adap/flower/pull/2878), " +"[#3333](https://github.com/adap/flower/pull/3333), " +"[#3255](https://github.com/adap/flower/pull/3255), " +"[#3349](https://github.com/adap/flower/pull/3349), " +"[#3400](https://github.com/adap/flower/pull/3400), " +"[#3401](https://github.com/adap/flower/pull/3401), " +"[#3399](https://github.com/adap/flower/pull/3399), " +"[#3346](https://github.com/adap/flower/pull/3346), " +"[#3398](https://github.com/adap/flower/pull/3398), " +"[#3397](https://github.com/adap/flower/pull/3397), " +"[#3347](https://github.com/adap/flower/pull/3347), " +"[#3502](https://github.com/adap/flower/pull/3502), " +"[#3387](https://github.com/adap/flower/pull/3387), " +"[#3542](https://github.com/adap/flower/pull/3542), " +"[#3396](https://github.com/adap/flower/pull/3396), " +"[#3496](https://github.com/adap/flower/pull/3496), " +"[#3465](https://github.com/adap/flower/pull/3465), " +"[#3473](https://github.com/adap/flower/pull/3473), " +"[#3484](https://github.com/adap/flower/pull/3484), " +"[#3521](https://github.com/adap/flower/pull/3521), " +"[#3363](https://github.com/adap/flower/pull/3363), " +"[#3497](https://github.com/adap/flower/pull/3497), " +"[#3464](https://github.com/adap/flower/pull/3464), " +"[#3495](https://github.com/adap/flower/pull/3495), " +"[#3478](https://github.com/adap/flower/pull/3478), " +"[#3271](https://github.com/adap/flower/pull/3271))" +msgstr "" + +#: ../../source/ref-changelog.md:291 +msgid "" +"As always, the Flower tooling, testing, and CI/CD infrastructure has " +"received many updates." +msgstr "" + +#: ../../source/ref-changelog.md:293 +msgid "" +"**Improve documentation** " +"([#3530](https://github.com/adap/flower/pull/3530), " +"[#3539](https://github.com/adap/flower/pull/3539), " +"[#3425](https://github.com/adap/flower/pull/3425), " +"[#3520](https://github.com/adap/flower/pull/3520), " +"[#3286](https://github.com/adap/flower/pull/3286), " +"[#3516](https://github.com/adap/flower/pull/3516), " +"[#3523](https://github.com/adap/flower/pull/3523), " +"[#3545](https://github.com/adap/flower/pull/3545), " +"[#3498](https://github.com/adap/flower/pull/3498), " +"[#3439](https://github.com/adap/flower/pull/3439), " +"[#3440](https://github.com/adap/flower/pull/3440), " +"[#3382](https://github.com/adap/flower/pull/3382), " +"[#3559](https://github.com/adap/flower/pull/3559), " +"[#3432](https://github.com/adap/flower/pull/3432), " +"[#3278](https://github.com/adap/flower/pull/3278), " +"[#3371](https://github.com/adap/flower/pull/3371), " +"[#3519](https://github.com/adap/flower/pull/3519), " +"[#3267](https://github.com/adap/flower/pull/3267), " +"[#3204](https://github.com/adap/flower/pull/3204), " +"[#3274](https://github.com/adap/flower/pull/3274))" +msgstr "" -#: ../../source/ref-changelog.md:1045 -msgid "Incompatible changes:" -msgstr "不兼容的更改:" +#: ../../source/ref-changelog.md:295 +msgid "" +"As always, the Flower documentation has received many updates. Notable " +"new pages include:" +msgstr "" -#: ../../source/ref-changelog.md:1047 +#: ../../source/ref-changelog.md:297 msgid "" -"Renamed strategy methods " -"([#486](https://github.com/adap/flower/pull/486)) to unify the naming of " -"Flower's public APIs. Other public methods/functions (e.g., every method " -"in `Client`, but also `Strategy.evaluate`) do not use the `on_` prefix, " -"which is why we're removing it from the four methods in Strategy. To " -"migrate rename the following `Strategy` methods accordingly:" +"[How-to upgrate to Flower Next (Flower Next migration " +"guide)](https://flower.ai/docs/framework/how-to-upgrade-to-flower-" +"next.html)" msgstr "" -"重命名了策略方法([#486](https://github.com/adap/flower/pull/486)),以统一 Flower公共 " -"API 的命名。其他公共方法/函数(例如 `Client` 中的每个方法,以及 `Strategy.evaluate`)不使用 `on_` " -"前缀,这就是我们从 Strategy 中的四个方法中移除它的原因。迁移时,请相应地重命名以下 `Strategy` 方法:" -#: ../../source/ref-changelog.md:1048 -msgid "`on_configure_evaluate` => `configure_evaluate`" -msgstr "`on_configure_evaluate` => `configure_evaluate`" +#: ../../source/ref-changelog.md:299 +#, fuzzy +msgid "" +"[How-to run Flower using Docker](https://flower.ai/docs/framework/how-to-" +"run-flower-using-docker.html)" +msgstr "" +"`TensorFlow快速入门 (教程) `_" -#: ../../source/ref-changelog.md:1049 -msgid "`on_aggregate_evaluate` => `aggregate_evaluate`" -msgstr "`on_aggregate_evaluate` => `aggregate_evaluate`" +#: ../../source/ref-changelog.md:301 +msgid "" +"[Flower Mods reference](https://flower.ai/docs/framework/ref-" +"api/flwr.client.mod.html#module-flwr.client.mod)" +msgstr "" -#: ../../source/ref-changelog.md:1050 -msgid "`on_configure_fit` => `configure_fit`" -msgstr "`on_configure_fit` => `configure_fit`" +#: ../../source/ref-changelog.md:303 +#, fuzzy +msgid "" +"**General updates to Flower Examples** " +"([#3205](https://github.com/adap/flower/pull/3205), " +"[#3226](https://github.com/adap/flower/pull/3226), " +"[#3211](https://github.com/adap/flower/pull/3211), " +"[#3252](https://github.com/adap/flower/pull/3252), " +"[#3427](https://github.com/adap/flower/pull/3427), " +"[#3410](https://github.com/adap/flower/pull/3410), " +"[#3426](https://github.com/adap/flower/pull/3426), " +"[#3228](https://github.com/adap/flower/pull/3228), " +"[#3342](https://github.com/adap/flower/pull/3342), " +"[#3200](https://github.com/adap/flower/pull/3200), " +"[#3202](https://github.com/adap/flower/pull/3202), " +"[#3394](https://github.com/adap/flower/pull/3394), " +"[#3488](https://github.com/adap/flower/pull/3488), " +"[#3329](https://github.com/adap/flower/pull/3329), " +"[#3526](https://github.com/adap/flower/pull/3526), " +"[#3392](https://github.com/adap/flower/pull/3392), " +"[#3474](https://github.com/adap/flower/pull/3474), " +"[#3269](https://github.com/adap/flower/pull/3269))" +msgstr "" +"**更新文档** ([#1223](https://github.com/adap/flower/pull/1223), " +"[#1209](https://github.com/adap/flower/pull/1209), " +"[#1251](https://github.com/adap/flower/pull/1251), " +"[#1257](https://github.com/adap/flower/pull/1257), " +"[#1267](https://github.com/adap/flower/pull/1267), " +"[#1268](https://github.com/adap/flower/pull/1268), " +"[#1300](https://github.com/adap/flower/pull/1300), " +"[#1304](https://github.com/adap/flower/pull/1304), " +"[#1305](https://github.com/adap/flower/pull/1305), " +"[#1307](https://github.com/adap/flower/pull/1307))" -#: ../../source/ref-changelog.md:1051 -msgid "`on_aggregate_fit` => `aggregate_fit`" -msgstr "`on_aggregate_fit` => `aggregate_fit`" +#: ../../source/ref-changelog.md:305 +#, fuzzy +msgid "As always, Flower code examples have received many updates." +msgstr "许多 \"Flower \"代码示例得到了大幅更新。" -#: ../../source/ref-changelog.md:1055 +#: ../../source/ref-changelog.md:307 msgid "" -"Deprecated `DefaultStrategy` " -"([#479](https://github.com/adap/flower/pull/479)). To migrate use " -"`FedAvg` instead." +"**General improvements** " +"([#3532](https://github.com/adap/flower/pull/3532), " +"[#3318](https://github.com/adap/flower/pull/3318), " +"[#3565](https://github.com/adap/flower/pull/3565), " +"[#3296](https://github.com/adap/flower/pull/3296), " +"[#3305](https://github.com/adap/flower/pull/3305), " +"[#3246](https://github.com/adap/flower/pull/3246), " +"[#3224](https://github.com/adap/flower/pull/3224), " +"[#3475](https://github.com/adap/flower/pull/3475), " +"[#3297](https://github.com/adap/flower/pull/3297), " +"[#3317](https://github.com/adap/flower/pull/3317), " +"[#3429](https://github.com/adap/flower/pull/3429), " +"[#3196](https://github.com/adap/flower/pull/3196), " +"[#3534](https://github.com/adap/flower/pull/3534), " +"[#3240](https://github.com/adap/flower/pull/3240), " +"[#3365](https://github.com/adap/flower/pull/3365), " +"[#3407](https://github.com/adap/flower/pull/3407), " +"[#3563](https://github.com/adap/flower/pull/3563), " +"[#3344](https://github.com/adap/flower/pull/3344), " +"[#3330](https://github.com/adap/flower/pull/3330), " +"[#3436](https://github.com/adap/flower/pull/3436), " +"[#3300](https://github.com/adap/flower/pull/3300), " +"[#3327](https://github.com/adap/flower/pull/3327), " +"[#3254](https://github.com/adap/flower/pull/3254), " +"[#3253](https://github.com/adap/flower/pull/3253), " +"[#3419](https://github.com/adap/flower/pull/3419), " +"[#3289](https://github.com/adap/flower/pull/3289), " +"[#3208](https://github.com/adap/flower/pull/3208), " +"[#3245](https://github.com/adap/flower/pull/3245), " +"[#3319](https://github.com/adap/flower/pull/3319), " +"[#3203](https://github.com/adap/flower/pull/3203), " +"[#3423](https://github.com/adap/flower/pull/3423), " +"[#3352](https://github.com/adap/flower/pull/3352), " +"[#3292](https://github.com/adap/flower/pull/3292), " +"[#3261](https://github.com/adap/flower/pull/3261))" +msgstr "" + +#: ../../source/ref-changelog.md:311 +#, fuzzy +msgid "**Deprecate Python 3.8 support**" +msgstr "** 过时的 Python 3.8**" + +#: ../../source/ref-changelog.md:313 +#, fuzzy +msgid "" +"Python 3.8 will stop receiving security fixes in [October " +"2024](https://devguide.python.org/versions/). Support for Python 3.8 is " +"now deprecated and will be removed in an upcoming release." +msgstr "由于 Python 3.8 已于 2024-10-01 弃用 (EOL),对 Python 3.7 的支持现已废弃,并将在即将发布的版本中移除。" + +#: ../../source/ref-changelog.md:315 +#, fuzzy +msgid "" +"**Deprecate (experimental)** `flower-driver-api` **and** `flower-fleet-" +"api` ([#3416](https://github.com/adap/flower/pull/3416), " +"[#3420](https://github.com/adap/flower/pull/3420))" msgstr "" -"已废弃的 `DefaultStrategy` ([#479](https://github.com/adap/flower/pull/479)) " -"。迁移时请使用 `FedAvg`。" +"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " +"[#2615](https://github.com/adap/flower/pull/2615))" -#: ../../source/ref-changelog.md:1056 +#: ../../source/ref-changelog.md:317 msgid "" -"Simplified examples and baselines " -"([#484](https://github.com/adap/flower/pull/484))." -msgstr "简化示例和baselines([#484](https://github.com/adap/flower/pull/484))。" +"Flower 1.9 deprecates the two (experimental) commands `flower-driver-api`" +" and `flower-fleet-api`. Both commands will be removed in an upcoming " +"release. Use `flower-superlink` instead." +msgstr "" -#: ../../source/ref-changelog.md:1057 +#: ../../source/ref-changelog.md:319 +#, fuzzy msgid "" -"Removed presently unused `on_conclude_round` from strategy interface " -"([#483](https://github.com/adap/flower/pull/483))." +"**Deprecate** `--server` **in favor of** `--superlink` " +"([#3518](https://github.com/adap/flower/pull/3518))" msgstr "" -"删除了策略界面中目前未使用的 " -"\"on_conclude_round\"([#483](https://github.com/adap/flower/pull/483))。" +"**启用向** `start_simulation` 传递** `Server` 实例 " +"([#1281](https://github.com/adap/flower/pull/1281))" -#: ../../source/ref-changelog.md:1058 +#: ../../source/ref-changelog.md:321 msgid "" -"Set minimal Python version to 3.6.1 instead of 3.6.9 " -"([#471](https://github.com/adap/flower/pull/471))." +"The commands `flower-server-app` and `flower-client-app` should use " +"`--superlink` instead of the now deprecated `--server`. Support for " +"`--server` will be removed in a future release." msgstr "" -"将最小 Python 版本设为 3.6.1,而不是 3.6.9 " -"([#471](https://github.com/adap/flower/pull/471))." -#: ../../source/ref-changelog.md:1059 +#: ../../source/ref-changelog.md:325 msgid "" -"Improved `Strategy` docstrings " -"([#470](https://github.com/adap/flower/pull/470))." +"**Replace** `flower-superlink` **CLI option** `--certificates` **with** " +"`--ssl-ca-certfile` **,** `--ssl-certfile` **and** `--ssl-keyfile` " +"([#3512](https://github.com/adap/flower/pull/3512), " +"[#3408](https://github.com/adap/flower/pull/3408))" msgstr "" -"改进了 `Strategy` " -"docstrings([#470](https://github.com/adap/flower/pull/470))。" - -#: ../../source/ref-example-projects.rst:2 -msgid "Example projects" -msgstr "项目实例" -#: ../../source/ref-example-projects.rst:4 +#: ../../source/ref-changelog.md:327 msgid "" -"Flower comes with a number of usage examples. The examples demonstrate " -"how Flower can be used to federate different kinds of existing machine " -"learning pipelines, usually leveraging popular machine learning " -"frameworks such as `PyTorch `_ or `TensorFlow " -"`_." +"SSL-related `flower-superlink` CLI arguments were restructured in an " +"incompatible way. Instead of passing a single `--certificates` flag with " +"three values, you now need to pass three flags (`--ssl-ca-certfile`, " +"`--ssl-certfile` and `--ssl-keyfile`) with one value each. Check out the " +"[SSL connections](https://flower.ai/docs/framework/how-to-enable-ssl-" +"connections.html) documentation page for details." msgstr "" -"Flower 附带了许多使用示例。这些示例演示了如何使用 Flower 联邦不同类型的现有机器学习形式,通常是利用流行的机器学习框架,如 " -"`PyTorch `_ 或 `TensorFlow " -"`_。" -#: ../../source/ref-example-projects.rst:10 +#: ../../source/ref-changelog.md:329 #, fuzzy msgid "" -"The following examples are available as standalone projects. Quickstart " -"TensorFlow/Keras ---------------------------" -msgstr "以下示例可作为独立项目使用。" - -#: ../../source/ref-example-projects.rst:14 -msgid "" -"The TensorFlow/Keras quickstart example shows CIFAR-10 image " -"classification with MobileNetV2:" -msgstr "TensorFlow/Keras 快速入门示例展示了使用 MobileNetV2 进行的 CIFAR-10 图像分类:" +"**Remove SuperLink** `--vce` **option** " +"([#3513](https://github.com/adap/flower/pull/3513))" +msgstr "**重构文档**([#1387](https://github.com/adap/flower/pull/1387))" -#: ../../source/ref-example-projects.rst:17 +#: ../../source/ref-changelog.md:331 msgid "" -"`Quickstart TensorFlow (Code) " -"`_" +"Instead of separately starting a SuperLink and a `ServerApp` for " +"simulation, simulations must now be started using the single `flower-" +"simulation` command." msgstr "" -"`TensorFlow快速入门 (代码) `_" -#: ../../source/ref-example-projects.rst:18 +#: ../../source/ref-changelog.md:333 #, fuzzy -msgid ":doc:`Quickstart TensorFlow (Tutorial) `" +msgid "" +"**Merge** `--grpc-rere` **and** `--rest` **SuperLink options** " +"([#3527](https://github.com/adap/flower/pull/3527))" msgstr "" -"`TensorFlow快速入门 (教程) `_" +"**重新命名** `rnd` ** to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" -#: ../../source/ref-example-projects.rst:19 +#: ../../source/ref-changelog.md:335 msgid "" -"`Quickstart TensorFlow (Blog Post) `_" +"To simplify the usage of `flower-superlink`, previously separate sets of " +"CLI options for gRPC and REST were merged into one unified set of " +"options. Consult the [Flower CLI reference " +"documentation](https://flower.ai/docs/framework/ref-api-cli.html) for " +"details." msgstr "" -"`TensorFlow快速入门 (博客) `_" -#: ../../source/ref-example-projects.rst:23 -#: ../../source/tutorial-quickstart-pytorch.rst:5 -msgid "Quickstart PyTorch" -msgstr "PyTorch快速入门" +#: ../../source/ref-changelog.md:337 +#, fuzzy +msgid "v1.8.0 (2024-04-03)" +msgstr "v1.3.0 (2023-02-06)" -#: ../../source/ref-example-projects.rst:25 +#: ../../source/ref-changelog.md:343 +#, fuzzy msgid "" -"The PyTorch quickstart example shows CIFAR-10 image classification with a" -" simple Convolutional Neural Network:" -msgstr "PyTorch 快速入门范例展示了使用简单卷积神经网络进行 CIFAR-10 图像分类的情况:" +"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata " +"Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, `Ikko Eltociear " +"Ashimine`, `Jack Cook`, `Javier`, `Raj Parekh`, `Robert Steiner`, " +"`Sebastian van der Voort`, `Taner Topal`, `Yan Gao`, `mohammadnaseri`, " +"`tabdar-khan` " +msgstr "" +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " -#: ../../source/ref-example-projects.rst:28 +#: ../../source/ref-changelog.md:347 +#, fuzzy msgid "" -"`Quickstart PyTorch (Code) " -"`_" +"**Introduce Flower Next high-level API (stable)** " +"([#3002](https://github.com/adap/flower/pull/3002), " +"[#2934](https://github.com/adap/flower/pull/2934), " +"[#2958](https://github.com/adap/flower/pull/2958), " +"[#3173](https://github.com/adap/flower/pull/3173), " +"[#3174](https://github.com/adap/flower/pull/3174), " +"[#2923](https://github.com/adap/flower/pull/2923), " +"[#2691](https://github.com/adap/flower/pull/2691), " +"[#3079](https://github.com/adap/flower/pull/3079), " +"[#2961](https://github.com/adap/flower/pull/2961), " +"[#2924](https://github.com/adap/flower/pull/2924), " +"[#3166](https://github.com/adap/flower/pull/3166), " +"[#3031](https://github.com/adap/flower/pull/3031), " +"[#3057](https://github.com/adap/flower/pull/3057), " +"[#3000](https://github.com/adap/flower/pull/3000), " +"[#3113](https://github.com/adap/flower/pull/3113), " +"[#2957](https://github.com/adap/flower/pull/2957), " +"[#3183](https://github.com/adap/flower/pull/3183), " +"[#3180](https://github.com/adap/flower/pull/3180), " +"[#3035](https://github.com/adap/flower/pull/3035), " +"[#3189](https://github.com/adap/flower/pull/3189), " +"[#3185](https://github.com/adap/flower/pull/3185), " +"[#3190](https://github.com/adap/flower/pull/3190), " +"[#3191](https://github.com/adap/flower/pull/3191), " +"[#3195](https://github.com/adap/flower/pull/3195), " +"[#3197](https://github.com/adap/flower/pull/3197))" msgstr "" -"`PyTorch快速入门 (代码) `_" +"**介绍 Flower Next 高级应用程序接口(稳定版)** " +"([#3002](https://github.com/adap/flower/pull/3002), " +"[#2934](https://github.com/adap/flower/pull/2934), " +"[#2958](https://github.com/adap/flower/pull/2958), " +"[#3173](https://github.com/adap/flower/pull/3173), " +"[#3174](https://github.com/adap/flower/pull/3174), " +"[#2923](https://github.com/adap/flower/pull/2923), " +"[#2691](https://github.com/adap/flower/pull/2691), " +"[#3079](https://github.com/adap/flower/pull/3079), " +"[#2961](https://github.com/adap/flower/pull/2961), " +"[#2924](https://github.com/adap/flower/pull/2924), " +"[#3166](https://github.com/adap/flower/pull/3166), " +"[#3031](https://github.com/adap/flower/pull/3031), " +"[#3057](https://github.com/adap/flower/pull/3057), " +"[#3000](https://github.com/adap/flower/pull/3000), " +"[#3113](https://github.com/adap/flower/pull/3113), " +"[#2957](https://github.com/adap/flower/pull/2957), " +"[#3183](https://github.com/adap/flower/pull/3183), " +"[#3180](https://github.com/adap/flower/pull/3180), " +"[#3035](https://github.com/adap/flower/pull/3035), " +"[#3189](https://github.com/adap/flower/pull/3189), " +"[#3185](https://github.com/adap/flower/pull/3185), " +"[#3190](https://github.com/adap/flower/pull/3190), " +"[#3191](https://github.com/adap/flower/pull/3191), " +"[#3195](https://github.com/adap/flower/pull/3195), " +"[#3197](https://github.com/adap/flower/pull/3197))" -#: ../../source/ref-example-projects.rst:29 +#: ../../source/ref-changelog.md:349 #, fuzzy -msgid ":doc:`Quickstart PyTorch (Tutorial) `" +msgid "" +"The Flower Next high-level API is stable! Flower Next is the future of " +"Flower - all new features (like Flower Mods) will be built on top of it. " +"You can start to migrate your existing projects to Flower Next by using " +"`ServerApp` and `ClientApp` (check out `quickstart-pytorch` or " +"`quickstart-tensorflow`, a detailed migration guide will follow shortly)." +" Flower Next allows you to run multiple projects concurrently (we call " +"this multi-run) and execute the same project in either simulation " +"environments or deployment environments without having to change a single" +" line of code. The best part? It's fully compatible with existing Flower " +"projects that use `Strategy`, `NumPyClient` & co." msgstr "" -"`PyTorch快速入门 (教程) `_" - -#: ../../source/ref-example-projects.rst:33 -msgid "PyTorch: From Centralized To Federated" -msgstr "PyTorch: 从集中式到联邦式" +"Flower Next 高级应用程序接口已经稳定!Flower Next 是 Flower 的未来 - 所有新功能(如 Flower " +"Mods)都将构建在它之上。您可以使用 `ServerApp` 和 `ClientApp` 开始将现有项目迁移到 Flower Next(请查看 " +"`quickstart-pytorch` 或 `quickstart-tensorflow` ,详细的迁移指南将在不久后发布)。Flower " +"Next 允许您同时运行多个项目(我们称之为多重运行),并在模拟环境或部署环境中执行同一项目,而无需更改任何代码。最棒的是什么?它与使用 " +"`Strategy`、`NumPyClient` 等的现有 Flower 项目完全兼容。" -#: ../../source/ref-example-projects.rst:35 +#: ../../source/ref-changelog.md:351 +#, fuzzy msgid "" -"This example shows how a regular PyTorch project can be federated using " -"Flower:" -msgstr "本例展示了如何使用 Flower 联邦化一个普通的 PyTorch 项目:" +"**Introduce Flower Next low-level API (preview)** " +"([#3062](https://github.com/adap/flower/pull/3062), " +"[#3034](https://github.com/adap/flower/pull/3034), " +"[#3069](https://github.com/adap/flower/pull/3069))" +msgstr "" +"** 统一客户端应用程序接口** ([#2303](https://github.com/adap/flower/pull/2303), " +"[#2390](https://github.com/adap/flower/pull/2390), " +"[#2493](https://github.com/adap/flower/pull/2493))" -#: ../../source/ref-example-projects.rst:37 +#: ../../source/ref-changelog.md:353 +#, fuzzy msgid "" -"`PyTorch: From Centralized To Federated (Code) " -"`_" +"In addition to the Flower Next *high-level* API that uses `Strategy`, " +"`NumPyClient` & co, Flower 1.8 also comes with a preview version of the " +"new Flower Next *low-level* API. The low-level API allows for granular " +"control of every aspect of the learning process by sending/receiving " +"individual messages to/from client nodes. The new `ServerApp` supports " +"registering a custom `main` function that allows writing custom training " +"loops for methods like async FL, cyclic training, or federated analytics." +" The new `ClientApp` supports registering `train`, `evaluate` and `query`" +" functions that can access the raw message received from the `ServerApp`." +" New abstractions like `RecordSet`, `Message` and `Context` further " +"enable sending multiple models, multiple sets of config values and " +"metrics, stateful computations on the client node and implementations of " +"custom SMPC protocols, to name just a few." msgstr "" -"PyTorch: 从集中式到联邦式(代码) `_" +"除了使用 \"Strategy\"、\"NumPyClient \"等的 Flower Next 高级应用程序接口外,Flower 1.8 " +"还提供了新的 Flower Next " +"低级应用程序接口的预览版。低级应用程序接口允许通过向/从客户端节点发送/接收单个消息,对学习过程的各个方面进行细粒度控制。新的 " +"\"ServerApp \"支持注册一个自定义的 \"main \"函数,允许为异步FL、循环训练或联合分析等方法编写自定义训练循环。新的 " +"\"ClientApp \"支持注册 \"训练\"、\"评估 \"和 \"查询 \"函数,这些函数可以访问从 \"ServerApp " +"\"接收到的原始信息。新的抽象(如 \"RecordSet\"、\"Message \"和 " +"\"Context\")进一步支持发送多个模型、多套配置值和指标、客户端节点上的有状态计算以及自定义 SMPC 协议的实现等。" -#: ../../source/ref-example-projects.rst:38 +#: ../../source/ref-changelog.md:355 #, fuzzy msgid "" -":doc:`PyTorch: From Centralized To Federated (Tutorial) `" +"**Introduce Flower Mods (preview)** " +"([#3054](https://github.com/adap/flower/pull/3054), " +"[#2911](https://github.com/adap/flower/pull/2911), " +"[#3083](https://github.com/adap/flower/pull/3083))" msgstr "" -"PyTorch: 从集中式到联邦式(教程) `_" - -#: ../../source/ref-example-projects.rst:42 -msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" -msgstr "树莓派和 Nvidia Jetson 上的联邦学习" +"**引入新的模拟引擎** ([#1969](https://github.com/adap/flower/pull/1969), " +"[#2221](https://github.com/adap/flower/pull/2221), " +"[#2248](https://github.com/adap/flower/pull/2248))" -#: ../../source/ref-example-projects.rst:44 +#: ../../source/ref-changelog.md:357 +#, fuzzy msgid "" -"This example shows how Flower can be used to build a federated learning " -"system that run across Raspberry Pi and Nvidia Jetson:" -msgstr "本示例展示了如何利用 Flower 建立一个跨 Raspberry Pi 和 Nvidia Jetson 运行的联邦学习系统:" +"Flower Modifiers (we call them Mods) can intercept messages and analyze, " +"edit or handle them directly. Mods can be used to develop pluggable " +"modules that work across different projects. Flower 1.8 already includes " +"mods to log the size of a message, the number of parameters sent over the" +" network, differential privacy with fixed clipping and adaptive clipping," +" local differential privacy and secure aggregation protocols SecAgg and " +"SecAgg+. The Flower Mods API is released as a preview, but researchers " +"can already use it to experiment with arbirtrary SMPC protocols." +msgstr "" +"Flower Modifiers(我们称之为 " +"Mods)可以拦截信息,并直接对其进行分析、编辑或处理。修改器可用于开发可在不同项目中使用的可插拔模块。Flower 1.8 " +"已经包含了记录信息大小、通过网络发送的参数数量、固定剪切和自适应剪切的差分隐私、本地差分隐私以及安全聚合协议 SecAgg 和 SecAgg+ 的" +" Mods。Flower Mods API 作为预览版发布,但研究人员已经可以用它来试验任意的 SMPC 协议。" -#: ../../source/ref-example-projects.rst:46 +#: ../../source/ref-changelog.md:359 +#, fuzzy msgid "" -"`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " -"`_" +"**Fine-tune LLMs with LLM FlowerTune** " +"([#3029](https://github.com/adap/flower/pull/3029), " +"[#3089](https://github.com/adap/flower/pull/3089), " +"[#3092](https://github.com/adap/flower/pull/3092), " +"[#3100](https://github.com/adap/flower/pull/3100), " +"[#3114](https://github.com/adap/flower/pull/3114), " +"[#3162](https://github.com/adap/flower/pull/3162), " +"[#3172](https://github.com/adap/flower/pull/3172))" msgstr "" -"Raspberry Pi 和 Nvidia Jetson 上的联邦学习(代码) " -"`_" +"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " +"[#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475)))" -#: ../../source/ref-example-projects.rst:47 +#: ../../source/ref-changelog.md:361 +#, fuzzy msgid "" -"`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " -"`_" +"We are introducing LLM FlowerTune, an introductory example that " +"demonstrates federated LLM fine-tuning of pre-trained Llama2 models on " +"the Alpaca-GPT4 dataset. The example is built to be easily adapted to use" +" different models and/or datasets. Read our blog post [LLM FlowerTune: " +"Federated LLM Fine-tuning with Flower](https://flower.ai/blog/2024-03-14" +"-llm-flowertune-federated-llm-finetuning-with-flower/) for more details." msgstr "" -"Raspberry Pi和 Nvidia Jetson 上的联邦学习(博客) " -"`_" +"我们将介绍 LLM FlowerTune,这是一个介绍性示例,演示了在 Alpaca-GPT4 数据集上对预先训练好的 Llama2 模型进行联合" +" LLM 微调。该示例可轻松调整以使用不同的模型和/或数据集。请阅读我们的博文 [LLM FlowerTune: Federated LLM " +"Fine-tuning with Flower](https://flower.ai/blog/2024-03-14-llm-" +"flowertune-federated-llm-finetuning-with-flower/) 了解更多详情。" -#: ../../source/ref-faq.rst:4 +#: ../../source/ref-changelog.md:363 +#, fuzzy msgid "" -"This page collects answers to commonly asked questions about Federated " -"Learning with Flower." -msgstr "本页收集了有关 \"Flower 联邦学习 \"常见问题的答案。" +"**Introduce built-in Differential Privacy (preview)** " +"([#2798](https://github.com/adap/flower/pull/2798), " +"[#2959](https://github.com/adap/flower/pull/2959), " +"[#3038](https://github.com/adap/flower/pull/3038), " +"[#3147](https://github.com/adap/flower/pull/3147), " +"[#2909](https://github.com/adap/flower/pull/2909), " +"[#2893](https://github.com/adap/flower/pull/2893), " +"[#2892](https://github.com/adap/flower/pull/2892), " +"[#3039](https://github.com/adap/flower/pull/3039), " +"[#3074](https://github.com/adap/flower/pull/3074))" +msgstr "" +"** 支持 SSL 的服务器和客户端** ([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" -#: ../../source/ref-faq.rst +#: ../../source/ref-changelog.md:365 #, fuzzy -msgid ":fa:`eye,mr-1` Can Flower run on Jupyter Notebooks / Google Colab?" -msgstr ":fa:`eye,mr-1` Flower 可以在 Juptyter Notebooks / Google Colab 上运行吗?" - -#: ../../source/ref-faq.rst:8 msgid "" -"Yes, it can! Flower even comes with a few under-the-hood optimizations to" -" make it work even better on Colab. Here's a quickstart example:" -msgstr "是的,它可以!Flower 甚至还进行了一些底层优化,使其在 Colab 上运行得更好。下面是一个快速启动示例:" +"Built-in Differential Privacy is here! Flower supports both central and " +"local differential privacy (DP). Central DP can be configured with either" +" fixed or adaptive clipping. The clipping can happen either on the " +"server-side or the client-side. Local DP does both clipping and noising " +"on the client-side. A new documentation page [explains Differential " +"Privacy approaches](https://flower.ai/docs/framework/explanation-" +"differential-privacy.html) and a new how-to guide describes [how to use " +"the new Differential Privacy components](https://flower.ai/docs/framework" +"/how-to-use-differential-privacy.html) in Flower." +msgstr "" +"内置差分保密功能!Flower 支持中央和本地差分保密 (DP)。中央差分隐私可配置为固定或自适应剪切。剪切可以发生在服务器端或客户端。本地 DP" +" 在客户端进行剪切和噪声处理。新的文档页面[解释差分隐私方法](https://flower.ai/docs/framework" +"/explanation-differential-privacy.html) " +"和新的操作指南[如何使用新的差分隐私组件](https://flower.ai/docs/framework/how-to-use-" +"differential-privacy.html) 介绍了 Flower 的使用方法。" -#: ../../source/ref-faq.rst:10 +#: ../../source/ref-changelog.md:367 +#, fuzzy msgid "" -"`Flower simulation PyTorch " -"`_" +"**Introduce built-in Secure Aggregation (preview)** " +"([#3120](https://github.com/adap/flower/pull/3120), " +"[#3110](https://github.com/adap/flower/pull/3110), " +"[#3108](https://github.com/adap/flower/pull/3108))" msgstr "" -"`Flower 模拟 PyTorch " -"`_" +"**引入新的模拟引擎** ([#1969](https://github.com/adap/flower/pull/1969), " +"[#2221](https://github.com/adap/flower/pull/2221), " +"[#2248](https://github.com/adap/flower/pull/2248))" -#: ../../source/ref-faq.rst:11 +#: ../../source/ref-changelog.md:369 +#, fuzzy msgid "" -"`Flower simulation TensorFlow/Keras " -"`_" +"Built-in Secure Aggregation is here! Flower now supports different secure" +" aggregation protocols out-of-the-box. The best part? You can add secure " +"aggregation to your Flower projects with only a few lines of code. In " +"this initial release, we inlcude support for SecAgg and SecAgg+, but more" +" protocols will be implemented shortly. We'll also add detailed docs that" +" explain secure aggregation and how to use it in Flower. You can already " +"check out the new code example that shows how to use Flower to easily " +"combine Federated Learning, Differential Privacy and Secure Aggregation " +"in the same project." msgstr "" -"`Flower模拟TensorFlow/Keras " -"`_" +"内置安全聚合功能!Flower 现在支持不同的安全聚合协议。最棒的是什么?只需几行代码,您就可以将安全聚合添加到 Flower " +"项目中。在这个初始版本中,我们包含了对 SecAgg 和 SecAgg+ " +"的支持,但更多协议将很快实现。我们还将添加详细的文档,解释安全聚合以及如何在 Flower 中使用它。您可以查看新的代码示例,了解如何使用 " +"Flower 在同一项目中轻松结合联合学习、差分隐私和安全聚合。" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` How can I run Federated Learning on a Raspberry Pi?" -msgstr ":fa:`eye,mr-1` 如何在 Raspberry Pi 上运行联邦学习?" - -#: ../../source/ref-faq.rst:15 -msgid "" -"Find the `blog post about federated learning on embedded device here " -"`_" -" and the corresponding `GitHub code example " -"`_." -msgstr "" -"请点击此处查看有关嵌入式设备联邦学习的 " -"\"博文\"`_和相应的" -" \"GitHub 代码示例\"`_。" - -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Does Flower support federated learning on Android devices?" -msgstr ":fa:`eye,mr-1` Flower 是否支持安卓设备上的联邦学习?" - -#: ../../source/ref-faq.rst:19 +#: ../../source/ref-changelog.md:371 +#, fuzzy msgid "" -"Yes, it does. Please take a look at our `blog post " -"`_ or check out the code examples:" +"**Introduce** `flwr` **CLI (preview)** " +"([#2942](https://github.com/adap/flower/pull/2942), " +"[#3055](https://github.com/adap/flower/pull/3055), " +"[#3111](https://github.com/adap/flower/pull/3111), " +"[#3130](https://github.com/adap/flower/pull/3130), " +"[#3136](https://github.com/adap/flower/pull/3136), " +"[#3094](https://github.com/adap/flower/pull/3094), " +"[#3059](https://github.com/adap/flower/pull/3059), " +"[#3049](https://github.com/adap/flower/pull/3049), " +"[#3142](https://github.com/adap/flower/pull/3142))" msgstr "" -"是的,确实如此。请查看我们的 \"博客文章 `_\" 或查看代码示例:" - -#: ../../source/ref-faq.rst:21 -msgid "" -"`Android Kotlin example `_" -msgstr "`Android Kotlin 示例 `_" - -#: ../../source/ref-faq.rst:22 -msgid "`Android Java example `_" -msgstr "Android Java 示例 `_" - -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Can I combine federated learning with blockchain?" -msgstr ":fa:`eye,mr-1` 我可以将联邦学习与区块链结合起来吗?" +"**普通改进**([#1872](https://github.com/adap/flower/pull/1872), " +"[#1866](https://github.com/adap/flower/pull/1866), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1837](https://github.com/adap/flower/pull/1837), " +"[#1477](https://github.com/adap/flower/pull/1477), " +"[#2171](https://github.com/adap/flower/pull/2171))" -#: ../../source/ref-faq.rst:26 +#: ../../source/ref-changelog.md:373 +#, fuzzy msgid "" -"Yes, of course. A list of available examples using Flower within a " -"blockchain environment is available here:" -msgstr "当然可以。有关在区块链环境中使用 Flower 的可用示例列表,请点击此处:" +"A new `flwr` CLI command allows creating new Flower projects (`flwr new`)" +" and then running them using the Simulation Engine (`flwr run`)." +msgstr "新的 `flwr` CLI 命令允许创建新的 Flower 项目(`flwr new`),然后使用仿真引擎运行它们(`flwr run`)。" -#: ../../source/ref-faq.rst:28 +#: ../../source/ref-changelog.md:375 +#, fuzzy msgid "" -"`Flower meets Nevermined GitHub Repository `_." +"**Introduce Flower Next Simulation Engine** " +"([#3024](https://github.com/adap/flower/pull/3024), " +"[#3061](https://github.com/adap/flower/pull/3061), " +"[#2997](https://github.com/adap/flower/pull/2997), " +"[#2783](https://github.com/adap/flower/pull/2783), " +"[#3184](https://github.com/adap/flower/pull/3184), " +"[#3075](https://github.com/adap/flower/pull/3075), " +"[#3047](https://github.com/adap/flower/pull/3047), " +"[#2998](https://github.com/adap/flower/pull/2998), " +"[#3009](https://github.com/adap/flower/pull/3009), " +"[#3008](https://github.com/adap/flower/pull/3008))" msgstr "" -"`Flower meets Nevermined GitHub Repository `_." +"**引入(试验性)REST API** ([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" -#: ../../source/ref-faq.rst:29 +#: ../../source/ref-changelog.md:377 +#, fuzzy msgid "" -"`Flower meets Nevermined YouTube video " -"`_." +"The Flower Simulation Engine can now run Flower Next projects. For " +"notebook environments, there's also a new `run_simulation` function that " +"can run `ServerApp` and `ClientApp`." msgstr "" -"`Flower meets Nevermined YouTube 视频 " -"`_." +"Flower 模拟引擎现在可以运行 Flower Next 项目。对于笔记本环境,还有一个新的 `run_simulation` 函数,可以运行 " +"`ServerApp` 和 `ClientApp`。" -#: ../../source/ref-faq.rst:30 +#: ../../source/ref-changelog.md:379 #, fuzzy msgid "" -"`Flower meets KOSMoS `_." -msgstr "" -"`Flower meets KOSMoS `_." +"**Handle SuperNode connection errors** " +"([#2969](https://github.com/adap/flower/pull/2969))" +msgstr "** 添加一个新的 gRPC 选项**([#2197](https://github.com/adap/flower/pull/2197))" -#: ../../source/ref-faq.rst:31 +#: ../../source/ref-changelog.md:381 +#, fuzzy msgid "" -"`Flower meets Talan blog post `_ ." +"A SuperNode will now try to reconnect indefinitely to the SuperLink in " +"case of connection errors. The arguments `--max-retries` and `--max-wait-" +"time` can now be passed to the `flower-client-app` command. `--max-" +"retries` will define the number of tentatives the client should make " +"before it gives up trying to reconnect to the SuperLink, and, `--max-" +"wait-time` defines the time before the SuperNode gives up trying to " +"reconnect to the SuperLink." msgstr "" -"`Flower meets Talan博文 `_ 。" +"如果出现连接错误,超级节点现在会尝试无限期地重新连接超级链接。现在可以向 `flower-client-app` 命令传递参数 `-ax-" +"retries` 和 `-max-wait-time`。最大重试次数 \"将定义客户端在放弃重新连接超级链接之前的重试次数,而 \"最大等待时间 " +"\"则定义超级节点放弃重新连接超级链接之前的等待时间。" -#: ../../source/ref-faq.rst:32 +#: ../../source/ref-changelog.md:383 +#, fuzzy msgid "" -"`Flower meets Talan GitHub Repository " -"`_ ." +"**General updates to Flower Baselines** " +"([#2904](https://github.com/adap/flower/pull/2904), " +"[#2482](https://github.com/adap/flower/pull/2482), " +"[#2985](https://github.com/adap/flower/pull/2985), " +"[#2968](https://github.com/adap/flower/pull/2968))" msgstr "" -"`Flower meets Talan GitHub Repository " -"`_ ." - -#: ../../source/ref-telemetry.md:1 -msgid "Telemetry" -msgstr "遥测功能" +"**引入新的 Flower Baseline: FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679)" -#: ../../source/ref-telemetry.md:3 +#: ../../source/ref-changelog.md:385 +#, fuzzy msgid "" -"The Flower open-source project collects **anonymous** usage metrics to " -"make well-informed decisions to improve Flower. Doing this enables the " -"Flower team to understand how Flower is used and what challenges users " -"might face." +"There's a new [FedStar](https://flower.ai/docs/baselines/fedstar.html) " +"baseline. Several other baselined have been updated as well." msgstr "" -"Flower 开源项目收集**匿名**使用指标,以便在充分知情的情况下做出改进 Flower 的决定。这样做能让 Flower 团队了解 " -"Flower 的使用情况以及用户可能面临的挑战。" +"有一条新的 [FedStar](https://flower.ai/docs/baselines/fedstar.html) " +"基准线。其他几条基准线也已更新。" -#: ../../source/ref-telemetry.md:5 -msgid "" -"**Flower is a friendly framework for collaborative AI and data science.**" -" Staying true to this statement, Flower makes it easy to disable " -"telemetry for users that do not want to share anonymous usage metrics." -msgstr "**Flower 是一个用于协作式人工智能和数据科学的友好框架。** Flower 遵循这一声明,让不想分享匿名使用指标的用户可以轻松禁用遥测技术。" - -#: ../../source/ref-telemetry.md:7 -msgid "Principles" -msgstr "原则" - -#: ../../source/ref-telemetry.md:9 -msgid "We follow strong principles guarding anonymous usage metrics collection:" -msgstr "我们遵循严格的匿名使用指标收集原则:" - -#: ../../source/ref-telemetry.md:11 +#: ../../source/ref-changelog.md:387 +#, fuzzy msgid "" -"**Optional:** You will always be able to disable telemetry; read on to " -"learn “[How to opt-out](#how-to-opt-out)”." -msgstr "**可选:** 您始终可以禁用遥测功能;请继续阅读\"[如何退出](#how-to-opt-out)\"。" +"**Improve documentation and translations** " +"([#3050](https://github.com/adap/flower/pull/3050), " +"[#3044](https://github.com/adap/flower/pull/3044), " +"[#3043](https://github.com/adap/flower/pull/3043), " +"[#2986](https://github.com/adap/flower/pull/2986), " +"[#3041](https://github.com/adap/flower/pull/3041), " +"[#3046](https://github.com/adap/flower/pull/3046), " +"[#3042](https://github.com/adap/flower/pull/3042), " +"[#2978](https://github.com/adap/flower/pull/2978), " +"[#2952](https://github.com/adap/flower/pull/2952), " +"[#3167](https://github.com/adap/flower/pull/3167), " +"[#2953](https://github.com/adap/flower/pull/2953), " +"[#3045](https://github.com/adap/flower/pull/3045), " +"[#2654](https://github.com/adap/flower/pull/2654), " +"[#3082](https://github.com/adap/flower/pull/3082), " +"[#2990](https://github.com/adap/flower/pull/2990), " +"[#2989](https://github.com/adap/flower/pull/2989))" +msgstr "" +"**改进文件和翻译** ([#3050](https://github.com/adap/flower/pull/3050), " +"[#3044](https://github.com/adap/flower/pull/3044), " +"[#3043](https://github.com/adap/flower/pull/3043), " +"[#2986](https://github.com/adap/flower/pull/2986), " +"[#3041](https://github.com/adap/flower/pull/3041), " +"[#3046](https://github.com/adap/flower/pull/3046), " +"[#3042](https://github.com/adap/flower/pull/3042), " +"[#2978](https://github.com/adap/flower/pull/2978), " +"[#2952](https://github.com/adap/flower/pull/2952), " +"[#3167](https://github.com/adap/flower/pull/3167), " +"[#2953](https://github.com/adap/flower/pull/2953), " +"[#3045](https://github.com/adap/flower/pull/3045), " +"[#2654](https://github.com/adap/flower/pull/2654), " +"[#3082](https://github.com/adap/flower/pull/3082), " +"[#2990](https://github.com/adap/flower/pull/2990), " +"[#2989](https://github.com/adap/flower/pull/2989))" -#: ../../source/ref-telemetry.md:12 +#: ../../source/ref-changelog.md:389 +#, fuzzy msgid "" -"**Anonymous:** The reported usage metrics are anonymous and do not " -"contain any personally identifiable information (PII). See “[Collected " -"metrics](#collected-metrics)” to understand what metrics are being " -"reported." +"As usual, we merged many smaller and larger improvements to the " +"documentation. A special thank you goes to [Sebastian van der " +"Voort](https://github.com/svdvoort) for landing a big documentation PR!" msgstr "" -"**匿名:** 报告的使用指标是匿名的,不包含任何个人身份信息 (PII)。请参阅\"[收集的指标](#collected-metrics) " -"\"了解报告的指标。" +"像往常一样,我们合并了许多对文档的较大和较小的改进。特别要感谢 [Sebastian van der " +"Voort](https://github.com/svdvoort),他为我们带来了一份重要的文档 PR!" -#: ../../source/ref-telemetry.md:13 +#: ../../source/ref-changelog.md:391 +#, fuzzy msgid "" -"**Transparent:** You can easily inspect what anonymous metrics are being " -"reported; see the section “[How to inspect what is being reported](#how-" -"to-inspect-what-is-being-reported)”" +"**General updates to Flower Examples** " +"([3134](https://github.com/adap/flower/pull/3134), " +"[2996](https://github.com/adap/flower/pull/2996), " +"[2930](https://github.com/adap/flower/pull/2930), " +"[2967](https://github.com/adap/flower/pull/2967), " +"[2467](https://github.com/adap/flower/pull/2467), " +"[2910](https://github.com/adap/flower/pull/2910), " +"[#2918](https://github.com/adap/flower/pull/2918), " +"[#2773](https://github.com/adap/flower/pull/2773), " +"[#3063](https://github.com/adap/flower/pull/3063), " +"[#3116](https://github.com/adap/flower/pull/3116), " +"[#3117](https://github.com/adap/flower/pull/3117))" msgstr "" -"**透明:** 您可以轻松查看正在报告的匿名指标;请参阅\"[如何查看正在报告的指标](#how-to-inspect-what-is-" -"being-reported)\"部分" +"** 更新文档** ([#1494](https://github.com/adap/flower/pull/1494), " +"[#1496](https://github.com/adap/flower/pull/1496), " +"[#1500](https://github.com/adap/flower/pull/1500), " +"[#1503](https://github.com/adap/flower/pull/1503), " +"[#1505](https://github.com/adap/flower/pull/1505), " +"[#1524](https://github.com/adap/flower/pull/1524), " +"[#1518](https://github.com/adap/flower/pull/1518), " +"[#1519](https://github.com/adap/flower/pull/1519), " +"[#1515](https://github.com/adap/flower/pull/1515))" -#: ../../source/ref-telemetry.md:14 +#: ../../source/ref-changelog.md:393 +#, fuzzy msgid "" -"**Open for feedback:** You can always reach out to us if you have " -"feedback; see the section “[How to contact us](#how-to-contact-us)” for " -"details." -msgstr "**欢迎反馈:** 如果您有反馈意见,可以随时联系我们;详情请参见\"[如何联系我们](#how-to-contact-us) \"部分。" - -#: ../../source/ref-telemetry.md:16 -msgid "How to opt-out" -msgstr "如何退出" +"Two new examples show federated training of a Vision Transformer (ViT) " +"and federated learning in a medical context using the popular MONAI " +"library. `quickstart-pytorch` and `quickstart-tensorflow` demonstrate the" +" new Flower Next `ServerApp` and `ClientApp`. Many other examples " +"received considerable updates as well." +msgstr "" +"两个新示例展示了视觉转换器(ViT)的联合训练,以及使用流行的 MONAI 库在医疗环境中进行的联合学习。quickstart-pytorch " +"\"和 \"quickstart-tensorflow \"展示了新的 Flower Next \"ServerApp \"和 " +"\"ClientApp\"。许多其他示例也得到了大量更新。" -#: ../../source/ref-telemetry.md:18 +#: ../../source/ref-changelog.md:395 +#, fuzzy msgid "" -"When Flower starts, it will check for an environment variable called " -"`FLWR_TELEMETRY_ENABLED`. Telemetry can easily be disabled by setting " -"`FLWR_TELEMETRY_ENABLED=0`. Assuming you are starting a Flower server or " -"client, simply do so by prepending your command as in:" +"**General improvements** " +"([#3171](https://github.com/adap/flower/pull/3171), " +"[3099](https://github.com/adap/flower/pull/3099), " +"[3003](https://github.com/adap/flower/pull/3003), " +"[3145](https://github.com/adap/flower/pull/3145), " +"[3017](https://github.com/adap/flower/pull/3017), " +"[3085](https://github.com/adap/flower/pull/3085), " +"[3012](https://github.com/adap/flower/pull/3012), " +"[3119](https://github.com/adap/flower/pull/3119), " +"[2991](https://github.com/adap/flower/pull/2991), " +"[2970](https://github.com/adap/flower/pull/2970), " +"[2980](https://github.com/adap/flower/pull/2980), " +"[3086](https://github.com/adap/flower/pull/3086), " +"[2932](https://github.com/adap/flower/pull/2932), " +"[2928](https://github.com/adap/flower/pull/2928), " +"[2941](https://github.com/adap/flower/pull/2941), " +"[2933](https://github.com/adap/flower/pull/2933), " +"[3181](https://github.com/adap/flower/pull/3181), " +"[2973](https://github.com/adap/flower/pull/2973), " +"[2992](https://github.com/adap/flower/pull/2992), " +"[2915](https://github.com/adap/flower/pull/2915), " +"[3040](https://github.com/adap/flower/pull/3040), " +"[3022](https://github.com/adap/flower/pull/3022), " +"[3032](https://github.com/adap/flower/pull/3032), " +"[2902](https://github.com/adap/flower/pull/2902), " +"[2931](https://github.com/adap/flower/pull/2931), " +"[3005](https://github.com/adap/flower/pull/3005), " +"[3132](https://github.com/adap/flower/pull/3132), " +"[3115](https://github.com/adap/flower/pull/3115), " +"[2944](https://github.com/adap/flower/pull/2944), " +"[3064](https://github.com/adap/flower/pull/3064), " +"[3106](https://github.com/adap/flower/pull/3106), " +"[2974](https://github.com/adap/flower/pull/2974), " +"[3178](https://github.com/adap/flower/pull/3178), " +"[2993](https://github.com/adap/flower/pull/2993), " +"[3186](https://github.com/adap/flower/pull/3186), " +"[3091](https://github.com/adap/flower/pull/3091), " +"[3125](https://github.com/adap/flower/pull/3125), " +"[3093](https://github.com/adap/flower/pull/3093), " +"[3013](https://github.com/adap/flower/pull/3013), " +"[3033](https://github.com/adap/flower/pull/3033), " +"[3133](https://github.com/adap/flower/pull/3133), " +"[3068](https://github.com/adap/flower/pull/3068), " +"[2916](https://github.com/adap/flower/pull/2916), " +"[2975](https://github.com/adap/flower/pull/2975), " +"[2984](https://github.com/adap/flower/pull/2984), " +"[2846](https://github.com/adap/flower/pull/2846), " +"[3077](https://github.com/adap/flower/pull/3077), " +"[3143](https://github.com/adap/flower/pull/3143), " +"[2921](https://github.com/adap/flower/pull/2921), " +"[3101](https://github.com/adap/flower/pull/3101), " +"[2927](https://github.com/adap/flower/pull/2927), " +"[2995](https://github.com/adap/flower/pull/2995), " +"[2972](https://github.com/adap/flower/pull/2972), " +"[2912](https://github.com/adap/flower/pull/2912), " +"[3065](https://github.com/adap/flower/pull/3065), " +"[3028](https://github.com/adap/flower/pull/3028), " +"[2922](https://github.com/adap/flower/pull/2922), " +"[2982](https://github.com/adap/flower/pull/2982), " +"[2914](https://github.com/adap/flower/pull/2914), " +"[3179](https://github.com/adap/flower/pull/3179), " +"[3080](https://github.com/adap/flower/pull/3080), " +"[2994](https://github.com/adap/flower/pull/2994), " +"[3187](https://github.com/adap/flower/pull/3187), " +"[2926](https://github.com/adap/flower/pull/2926), " +"[3018](https://github.com/adap/flower/pull/3018), " +"[3144](https://github.com/adap/flower/pull/3144), " +"[3011](https://github.com/adap/flower/pull/3011), " +"[#3152](https://github.com/adap/flower/pull/3152), " +"[#2836](https://github.com/adap/flower/pull/2836), " +"[#2929](https://github.com/adap/flower/pull/2929), " +"[#2943](https://github.com/adap/flower/pull/2943), " +"[#2955](https://github.com/adap/flower/pull/2955), " +"[#2954](https://github.com/adap/flower/pull/2954))" msgstr "" -"Flower 启动时,会检查环境变量 `FLWR_TELEMETRY_ENABLED` 是否存在。通过设置 " -"`FLWR_TELEMETRY_ENABLED=0` 可以轻松禁用遥测功能。假设你启动的是 Flower " -"服务器或客户端,只需在命令前添加以下内容即可:" +"**一般改进**([#3171](https://github.com/adap/flower/pull/3171), " +"[3099](https://github.com/adap/flower/pull/3099), " +"[3003](https://github.com/adap/flower/pull/3003), " +"[3145](https://github.com/adap/flower/pull/3145), " +"[3017](https://github.com/adap/flower/pull/3017), " +"[3085](https://github.com/adap/flower/pull/3085), " +"[3012](https://github.com/adap/flower/pull/3012), " +"[3119](https://github.com/adap/flower/pull/3119), " +"[2991](https://github.com/adap/flower/pull/2991), " +"[2970](https://github.com/adap/flower/pull/2970), " +"[2980](https://github.com/adap/flower/pull/2980), " +"[3086](https://github.com/adap/flower/pull/3086), " +"[2932](https://github.com/adap/flower/pull/2932), " +"[2928](https://github.com/adap/flower/pull/2928), " +"[2941](https://github.com/adap/flower/pull/2941), " +"[2933](https://github.com/adap/flower/pull/2933), " +"[3181](https://github.com/adap/flower/pull/3181), " +"[2973](https://github.com/adap/flower/pull/2973), " +"[2992](https://github.com/adap/flower/pull/2992), " +"[2915](https://github.com/adap/flower/pull/2915), " +"[3040](https://github.com/adap/flower/pull/3040), " +"[3022](https://github.com/adap/flower/pull/3022), " +"[3032](https://github.com/adap/flower/pull/3032), " +"[2902](https://github.com/adap/flower/pull/2902), " +"[2931](https://github.com/adap/flower/pull/2931), " +"[3005](https://github.com/adap/flower/pull/3005), " +"[3132](https://github.com/adap/flower/pull/3132), " +"[3115](https://github.com/adap/flower/pull/3115), " +"[2944](https://github.com/adap/flower/pull/2944), " +"[3064](https://github.com/adap/flower/pull/3064), " +"[3106](https://github.com/adap/flower/pull/3106), " +"[2974](https://github.com/adap/flower/pull/2974), " +"[3178](https://github.com/adap/flower/pull/3178), " +"[2993](https://github.com/adap/flower/pull/2993), " +"[3186](https://github.com/adap/flower/pull/3186), " +"[3091](https://github.com/adap/flower/pull/3091), " +"[3125](https://github.com/adap/flower/pull/3125), " +"[3093](https://github.com/adap/flower/pull/3093), " +"[3013](https://github.com/adap/flower/pull/3013), " +"[3033](https://github.com/adap/flower/pull/3033), " +"[3133](https://github.com/adap/flower/pull/3133), " +"[3068](https://github.com/adap/flower/pull/3068), " +"[2916](https://github.com/adap/flower/pull/2916), " +"[2975](https://github.com/adap/flower/pull/2975), " +"[2984](https://github.com/adap/flower/pull/2984), " +"[2846](https://github.com/adap/flower/pull/2846), " +"[3077](https://github.com/adap/flower/pull/3077), " +"[3143](https://github.com/adap/flower/pull/3143), " +"[2921](https://github.com/adap/flower/pull/2921), " +"[3101](https://github.com/adap/flower/pull/3101), " +"[2927](https://github.com/adap/flower/pull/2927), " +"[2995](https://github.com/adap/flower/pull/2995), " +"[2972](https://github.com/adap/flower/pull/2972), " +"[2912](https://github.com/adap/flower/pull/2912), " +"[3065](https://github.com/adap/flower/pull/3065), " +"[3028](https://github.com/adap/flower/pull/3028), " +"[2922](https://github.com/adap/flower/pull/2922), " +"[2982](https://github.com/adap/flower/pull/2982), " +"[2914](https://github.com/adap/flower/pull/2914), " +"[3179](https://github.com/adap/flower/pull/3179), " +"[3080](https://github.com/adap/flower/pull/3080), " +"[2994](https://github.com/adap/flower/pull/2994), " +"[3187](https://github.com/adap/flower/pull/3187), " +"[2926](https://github.com/adap/flower/pull/2926), " +"[3018](https://github.com/adap/flower/pull/3018), " +"[3144](https://github.com/adap/flower/pull/3144), " +"[3011](https://github.com/adap/flower/pull/3011), " +"[#3152](https://github.com/adap/flower/pull/3152), " +"[#2836](https://github.com/adap/flower/pull/2836), " +"[#2929](https://github.com/adap/flower/pull/2929), " +"[#2943](https://github.com/adap/flower/pull/2943), " +"[#2955](https://github.com/adap/flower/pull/2955), " +"[#2954](https://github.com/adap/flower/pull/2954))" -#: ../../source/ref-telemetry.md:24 +#: ../../source/ref-changelog.md:401 +#, fuzzy +msgid "v1.7.0 (2024-02-05)" +msgstr "v1.3.0 (2023-02-06)" + +#: ../../source/ref-changelog.md:407 +#, fuzzy msgid "" -"Alternatively, you can export `FLWR_TELEMETRY_ENABLED=0` in, for example," -" `.bashrc` (or whatever configuration file applies to your environment) " -"to disable Flower telemetry permanently." +"`Aasheesh Singh`, `Adam Narozniak`, `Aml Hassan Esmil`, `Charles " +"Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo " +"Gabrielli`, `Gustavo Bertoli`, `HelinLin`, `Heng Pan`, `Javier`, `M S " +"Chaitanya Kumar`, `Mohammad Naseri`, `Nikos Vlachakis`, `Pritam Neog`, " +"`Robert Kuska`, `Robert Steiner`, `Taner Topal`, `Yahia Salaheldin " +"Shaaban`, `Yan Gao`, `Yasar Abbas` " msgstr "" -"或者,你也可以在 `.bashrc`(或任何适用于你的环境的配置文件)中导出 `FLWR_TELEMETRY_ENABLED=0` 来永久禁用 " -"Flower telemetry。" - -#: ../../source/ref-telemetry.md:26 -msgid "Collected metrics" -msgstr "收集的指标" +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " -#: ../../source/ref-telemetry.md:28 -msgid "Flower telemetry collects the following metrics:" -msgstr "Flower 遥测技术收集以下指标:" +#: ../../source/ref-changelog.md:411 +#, fuzzy +msgid "" +"**Introduce stateful clients (experimental)** " +"([#2770](https://github.com/adap/flower/pull/2770), " +"[#2686](https://github.com/adap/flower/pull/2686), " +"[#2696](https://github.com/adap/flower/pull/2696), " +"[#2643](https://github.com/adap/flower/pull/2643), " +"[#2769](https://github.com/adap/flower/pull/2769))" +msgstr "" +"** baselines的普通更新** ([#2301](https://github.com/adap/flower/pull/2301), " +"[#2305](https://github.com/adap/flower/pull/2305), " +"[#2307](https://github.com/adap/flower/pull/2307), " +"[#2327](https://github.com/adap/flower/pull/2327), " +"[#2435](https://github.com/adap/flower/pull/2435))" -#: ../../source/ref-telemetry.md:30 +#: ../../source/ref-changelog.md:413 +#, fuzzy msgid "" -"**Flower version.** Understand which versions of Flower are currently " -"being used. This helps us to decide whether we should invest effort into " -"releasing a patch version for an older version of Flower or instead use " -"the bandwidth to build new features." -msgstr "**了解目前使用的 Flower 版本。这有助于我们决定是否应该投入精力为旧版本的 Flower 发布补丁版本,还是利用带宽来构建新功能。" +"Subclasses of `Client` and `NumPyClient` can now store local state that " +"remains on the client. Let's start with the highlight first: this new " +"feature is compatible with both simulated clients (via " +"`start_simulation`) and networked clients (via `start_client`). It's also" +" the first preview of new abstractions like `Context` and `RecordSet`. " +"Clients can access state of type `RecordSet` via `state: RecordSet = " +"self.context.state`. Changes to this `RecordSet` are preserved across " +"different rounds of execution to enable stateful computations in a " +"unified way across simulation and deployment." +msgstr "" +"客户端 \"和 \"NumPyClient \"的子类现在可以存储保留在客户端上的本地状态。让我们先从亮点开始:这一新功能与模拟客户端(通过 " +"`start_simulation`)和网络客户端(通过 `start_client`)兼容。这也是 `Context` 和 " +"`RecordSet` 等新抽象的首次预览。客户端可以通过 `state.RecordSet` 访问 `RecordSet` 类型的状态: " +"RecordSet = self.context.state`。对该 `RecordSet` " +"的更改会在不同轮执行中保留,以便在模拟和部署中以统一的方式进行有状态计算。" -#: ../../source/ref-telemetry.md:32 +#: ../../source/ref-changelog.md:415 +#, fuzzy msgid "" -"**Operating system.** Enables us to answer questions such as: *Should we " -"create more guides for Linux, macOS, or Windows?*" -msgstr "**操作系统**使我们能够回答以下问题: *我们应该为 Linux、macOS 还是 Windows 创建更多指南?*" +"**Improve performance** " +"([#2293](https://github.com/adap/flower/pull/2293))" +msgstr "**改进示例笔记** ([#2005](https://github.com/adap/flower/pull/2005))" -#: ../../source/ref-telemetry.md:34 +#: ../../source/ref-changelog.md:417 +#, fuzzy msgid "" -"**Python version.** Knowing the Python version helps us, for example, to " -"decide whether we should invest effort into supporting old versions of " -"Python or stop supporting them and start taking advantage of new Python " -"features." -msgstr "**例如,了解 Python 版本有助于我们决定是否应该投入精力支持旧版本的 Python,还是停止支持这些版本并开始利用新的 Python 功能。" +"Flower is faster than ever. All `FedAvg`-derived strategies now use in-" +"place aggregation to reduce memory consumption. The Flower client " +"serialization/deserialization has been rewritten from the ground up, " +"which results in significant speedups, especially when the client-side " +"training time is short." +msgstr "" +"Flower 的速度比以往更快。所有源于 `FedAvg` 的策略现在都使用就地聚合,以减少内存消耗。Flower " +"客户端序列化/解序列化已从头开始重写,从而显著提高了速度,尤其是在客户端训练时间较短的情况下。" -#: ../../source/ref-telemetry.md:36 +#: ../../source/ref-changelog.md:419 +#, fuzzy msgid "" -"**Hardware properties.** Understanding the hardware environment that " -"Flower is being used in helps to decide whether we should, for example, " -"put more effort into supporting low-resource environments." -msgstr "**硬件属性** 了解 Flower 的硬件使用环境,有助于决定我们是否应在支持低资源环境等方面投入更多精力。" +"**Support Federated Learning with Apple MLX and Flower** " +"([#2693](https://github.com/adap/flower/pull/2693))" +msgstr "" +"** 添加使用 fastai 和 Flower 进行联邦学习的新示例** " +"([#1598](https://github.com/adap/flower/pull/1598))" -#: ../../source/ref-telemetry.md:38 +#: ../../source/ref-changelog.md:421 +#, fuzzy msgid "" -"**Execution mode.** Knowing what execution mode Flower starts in enables " -"us to understand how heavily certain features are being used and better " -"prioritize based on that." -msgstr "** 执行模式** 了解 Flower 的启动执行模式,能让我们了解某些功能的使用率,并据此更好地确定优先级。" +"Flower has official support for federated learning using [Apple " +"MLX](https://ml-explore.github.io/mlx) via the new `quickstart-mlx` code " +"example." +msgstr "" +"通过新的 `quickstart-mlx` 代码示例,Flower 正式支持使用 [Apple MLX](https://ml-" +"explore.github.io/mlx)的联合学习。" -#: ../../source/ref-telemetry.md:40 +#: ../../source/ref-changelog.md:423 +#, fuzzy msgid "" -"**Cluster.** Flower telemetry assigns a random in-memory cluster ID each " -"time a Flower workload starts. This allows us to understand which device " -"types not only start Flower workloads but also successfully complete " -"them." -msgstr "" -"**每次 Flower 工作负载启动时,Flower 遥测都会随机分配一个内存集群 ID。这样,我们就能了解哪些设备类型不仅启动了 Flower " -"工作负载,而且还成功完成了它们。" - -#: ../../source/ref-telemetry.md:42 -msgid "" -"**Source.** Flower telemetry tries to store a random source ID in " -"`~/.flwr/source` the first time a telemetry event is generated. The " -"source ID is important to identify whether an issue is recurring or " -"whether an issue is triggered by multiple clusters running concurrently " -"(which often happens in simulation). For example, if a device runs " -"multiple workloads at the same time, and this results in an issue, then, " -"in order to reproduce the issue, multiple workloads must be started at " -"the same time." +"**Introduce new XGBoost cyclic strategy** " +"([#2666](https://github.com/adap/flower/pull/2666), " +"[#2668](https://github.com/adap/flower/pull/2668))" msgstr "" -"**Source.** Flower 遥测会在第一次生成遥测事件时,尝试在 `~/.flwr/source` 中存储一个随机源 ID。源 ID " -"对于识别问题是否反复出现或问题是否由多个集群同时运行触发(这在模拟中经常发生)非常重要。例如,如果设备同时运行多个工作负载并导致问题,那么为了重现问题,必须同时启动多个工作负载。" +"**介绍 iOS SDK(预览版)** ([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" -#: ../../source/ref-telemetry.md:44 +#: ../../source/ref-changelog.md:425 +#, fuzzy msgid "" -"You may delete the source ID at any time. If you wish for all events " -"logged under a specific source ID to be deleted, you can send a deletion " -"request mentioning the source ID to `telemetry@flower.ai`. All events " -"related to that source ID will then be permanently deleted." +"A new strategy called `FedXgbCyclic` supports a client-by-client style of" +" training (often called cyclic). The `xgboost-comprehensive` code example" +" shows how to use it in a full project. In addition to that, `xgboost-" +"comprehensive` now also supports simulation mode. With this, Flower " +"offers best-in-class XGBoost support." msgstr "" -"您可以随时删除源 ID。如果您希望删除特定源 ID 下记录的所有事件,可以向 `telemetry@flower.ai` 发送删除请求,并提及该源" -" ID。届时,与该源 ID 相关的所有事件都将被永久删除。" +"名为 `FedXgbCyclic` 的新策略支持逐个客户端的训练风格(通常称为循环)。xgboost-comprehensive " +"\"代码示例展示了如何在一个完整的项目中使用它。除此之外,`xgboost-comprehensive` 现在还支持模拟模式。由此,Flower " +"提供了同类最佳的 XGBoost 支持。" -#: ../../source/ref-telemetry.md:46 +#: ../../source/ref-changelog.md:427 +#, fuzzy msgid "" -"We will not collect any personally identifiable information. If you think" -" any of the metrics collected could be misused in any way, please [get in" -" touch with us](#how-to-contact-us). We will update this page to reflect " -"any changes to the metrics collected and publish changes in the " -"changelog." -msgstr "" -"我们不会收集任何个人身份信息。如果您认为所收集的任何指标可能以任何方式被滥用,请[与我们联系](#how-to-contact-" -"us)。我们将更新本页面,以反映对所收集指标的任何更改,并在更新日志中公布更改内容。" +"**Support Python 3.11** " +"([#2394](https://github.com/adap/flower/pull/2394))" +msgstr "** 支持 Python 3.10** ([#1320](https://github.com/adap/flower/pull/1320))" -#: ../../source/ref-telemetry.md:48 +#: ../../source/ref-changelog.md:429 +#, fuzzy msgid "" -"If you think other metrics would be helpful for us to better guide our " -"decisions, please let us know! We will carefully review them; if we are " -"confident that they do not compromise user privacy, we may add them." -msgstr "如果您认为其他指标有助于我们更好地指导决策,请告诉我们!我们将仔细审查这些指标;如果我们确信它们不会损害用户隐私,我们可能会添加这些指标。" - -#: ../../source/ref-telemetry.md:50 -msgid "How to inspect what is being reported" -msgstr "如何检查报告中的内容" +"Framework tests now run on Python 3.8, 3.9, 3.10, and 3.11. This will " +"ensure better support for users using more recent Python versions." +msgstr "框架测试现在可在 Python 3.8、3.9、3.10 和 3.11 上运行。这将确保为使用最新 Python 版本的用户提供更好的支持。" -#: ../../source/ref-telemetry.md:52 +#: ../../source/ref-changelog.md:431 +#, fuzzy msgid "" -"We wanted to make it very easy for you to inspect what anonymous usage " -"metrics are reported. You can view all the reported telemetry information" -" by setting the environment variable `FLWR_TELEMETRY_LOGGING=1`. Logging " -"is disabled by default. You may use logging independently from " -"`FLWR_TELEMETRY_ENABLED` so that you can inspect the telemetry feature " -"without sending any metrics." +"**Update gRPC and ProtoBuf dependencies** " +"([#2814](https://github.com/adap/flower/pull/2814))" msgstr "" -"我们希望能让您轻松查看所报告的匿名使用指标。通过设置环境变量 `FLWR_TELEMETRY_LOGGING=1` " -"可以查看所有报告的遥测信息。日志记录默认为禁用。您可以不使用 `FLWR_TELEMETRY_ENABLED` " -"而单独使用日志记录,这样就可以在不发送任何指标的情况下检查遥测功能。" +"**更新 REST API 以支持创建和删除节点** " +"([#2283](https://github.com/adap/flower/pull/2283))" -#: ../../source/ref-telemetry.md:58 +#: ../../source/ref-changelog.md:433 +#, fuzzy msgid "" -"The inspect Flower telemetry without sending any anonymous usage metrics," -" use both environment variables:" -msgstr "在不发送任何匿名使用指标的情况下检查 Flower 遥测,可使用这两个环境变量:" - -#: ../../source/ref-telemetry.md:64 -msgid "How to contact us" -msgstr "如何联系我们" +"The `grpcio` and `protobuf` dependencies were updated to their latest " +"versions for improved security and performance." +msgstr "为提高安全性和性能,\"grpcio \"和 \"protobuf \"依赖项已更新至最新版本。" -#: ../../source/ref-telemetry.md:66 +#: ../../source/ref-changelog.md:435 +#, fuzzy msgid "" -"We want to hear from you. If you have any feedback or ideas on how to " -"improve the way we handle anonymous usage metrics, reach out to us via " -"[Slack](https://flower.ai/join-slack/) (channel `#telemetry`) or email " -"(`telemetry@flower.ai`)." +"**Introduce Docker image for Flower server** " +"([#2700](https://github.com/adap/flower/pull/2700), " +"[#2688](https://github.com/adap/flower/pull/2688), " +"[#2705](https://github.com/adap/flower/pull/2705), " +"[#2695](https://github.com/adap/flower/pull/2695), " +"[#2747](https://github.com/adap/flower/pull/2747), " +"[#2746](https://github.com/adap/flower/pull/2746), " +"[#2680](https://github.com/adap/flower/pull/2680), " +"[#2682](https://github.com/adap/flower/pull/2682), " +"[#2701](https://github.com/adap/flower/pull/2701))" msgstr "" -"我们希望听到您的意见。如果您对如何改进我们处理匿名使用指标的方式有任何反馈或想法,请通过 [Slack](https://flower.ai" -"/join-slack/) (频道 `#telemetry`)或电子邮件 (`telemetry@flower.ai`)与我们联系。" - -#: ../../source/tutorial-quickstart-android.rst:-1 -msgid "" -"Read this Federated Learning quickstart tutorial for creating an Android " -"app using Flower." -msgstr "阅读本联邦学习快速入门教程,了解如何使用 Flower 创建 Android 应用程序。" - -#: ../../source/tutorial-quickstart-android.rst:5 -msgid "Quickstart Android" -msgstr "快速入门 Android" - -#: ../../source/tutorial-quickstart-android.rst:10 -msgid "" -"Let's build a federated learning system using TFLite and Flower on " -"Android!" -msgstr "让我们在 Android 上使用 TFLite 和 Flower 构建一个联邦学习系统!" +"** 支持 SSL 的服务器和客户端** ([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" -#: ../../source/tutorial-quickstart-android.rst:12 +#: ../../source/ref-changelog.md:437 +#, fuzzy msgid "" -"Please refer to the `full code example " -"`_ to learn " -"more." +"The Flower server can now be run using an official Docker image. A new " +"how-to guide explains [how to run Flower using " +"Docker](https://flower.ai/docs/framework/how-to-run-flower-using-" +"docker.html). An official Flower client Docker image will follow." msgstr "" -"请参阅`完整代码示例 " -"`_了解更多信息。" - -#: ../../source/tutorial-quickstart-fastai.rst:-1 -msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with FastAI to train a vision model on CIFAR-10." -msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 FastAI 在 CIFAR-10 上训练视觉模型。" - -#: ../../source/tutorial-quickstart-fastai.rst:5 -msgid "Quickstart fastai" -msgstr "快速入门 fastai" - -#: ../../source/tutorial-quickstart-fastai.rst:10 -msgid "Let's build a federated learning system using fastai and Flower!" -msgstr "让我们用 fastai 和 Flower 建立一个联邦学习系统!" +"现在可以使用官方 Docker 映像运行 Flower 服务器了。新的操作指南介绍了 [如何使用 Docker 运行 " +"Flower](https://flower.ai/docs/framework/how-to-run-flower-using-" +"docker.html)。Flower 客户端 Docker 官方镜像将随后发布。" -#: ../../source/tutorial-quickstart-fastai.rst:12 +#: ../../source/ref-changelog.md:439 +#, fuzzy msgid "" -"Please refer to the `full code example " -"`_ " -"to learn more." +"**Introduce** `flower-via-docker-compose` **example** " +"([#2626](https://github.com/adap/flower/pull/2626))" msgstr "" -"请参阅 `完整代码示例 `_了解更多信息。" +"**介绍Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" -#: ../../source/tutorial-quickstart-huggingface.rst:-1 +#: ../../source/ref-changelog.md:441 +#, fuzzy msgid "" -"Check out this Federating Learning quickstart tutorial for using Flower " -"with HuggingFace Transformers in order to fine-tune an LLM." -msgstr "查看此联邦学习 快速入门教程,了解如何使用 Flower 和 HuggingFace Transformers 来微调 LLM。" - -#: ../../source/tutorial-quickstart-huggingface.rst:5 -msgid "Quickstart 🤗 Transformers" -msgstr "🤗 Transformers快速入门" +"**Introduce** `quickstart-sklearn-tabular` **example** " +"([#2719](https://github.com/adap/flower/pull/2719))" +msgstr "**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" -#: ../../source/tutorial-quickstart-huggingface.rst:10 +#: ../../source/ref-changelog.md:443 +#, fuzzy msgid "" -"Let's build a federated learning system using Hugging Face Transformers " -"and Flower!" -msgstr "让我们用Hugging Face Transformers和Flower来构建一个联邦学习系统!" +"**Introduce** `custom-metrics` **example** " +"([#1958](https://github.com/adap/flower/pull/1958))" +msgstr "**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" -#: ../../source/tutorial-quickstart-huggingface.rst:12 +#: ../../source/ref-changelog.md:445 +#, fuzzy msgid "" -"We will leverage Hugging Face to federate the training of language models" -" over multiple clients using Flower. More specifically, we will fine-tune" -" a pre-trained Transformer model (distilBERT) for sequence classification" -" over a dataset of IMDB ratings. The end goal is to detect if a movie " -"rating is positive or negative." +"**Update code examples to use Flower Datasets** " +"([#2450](https://github.com/adap/flower/pull/2450), " +"[#2456](https://github.com/adap/flower/pull/2456), " +"[#2318](https://github.com/adap/flower/pull/2318), " +"[#2712](https://github.com/adap/flower/pull/2712))" msgstr "" -"我们将利用Hugging Face技术,使用 Flower 在多个客户端上联邦训练语言模型。更具体地说,我们将对预先训练好的 " -"Transformer 模型(distilBERT)进行微调,以便在 IMDB 评分数据集上进行序列分类。最终目标是检测电影评分是正面还是负面。" +"更新开发人员工具([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310)" -#: ../../source/tutorial-quickstart-huggingface.rst:18 -msgid "Dependencies" -msgstr "依赖关系" +#: ../../source/ref-changelog.md:447 +#, fuzzy +msgid "" +"Several code examples were updated to use [Flower " +"Datasets](https://flower.ai/docs/datasets/)." +msgstr "更新了多个代码示例,以使用 [Flower Datasets](https://flower.ai/docs/datasets/) 。" -#: ../../source/tutorial-quickstart-huggingface.rst:20 +#: ../../source/ref-changelog.md:449 +#, fuzzy msgid "" -"To follow along this tutorial you will need to install the following " -"packages: :code:`datasets`, :code:`evaluate`, :code:`flwr`, " -":code:`torch`, and :code:`transformers`. This can be done using " -":code:`pip`:" +"**General updates to Flower Examples** " +"([#2381](https://github.com/adap/flower/pull/2381), " +"[#2805](https://github.com/adap/flower/pull/2805), " +"[#2782](https://github.com/adap/flower/pull/2782), " +"[#2806](https://github.com/adap/flower/pull/2806), " +"[#2829](https://github.com/adap/flower/pull/2829), " +"[#2825](https://github.com/adap/flower/pull/2825), " +"[#2816](https://github.com/adap/flower/pull/2816), " +"[#2726](https://github.com/adap/flower/pull/2726), " +"[#2659](https://github.com/adap/flower/pull/2659), " +"[#2655](https://github.com/adap/flower/pull/2655))" msgstr "" -"要学习本教程,您需要安装以下软件包: :code:`datasets`、 :code:`evaluate`、 :code:`flwr`、 " -":code:`torch`和 :code:`transformers`。这可以通过 :code:`pip` 来完成:" +"**改进(试验性)驱动程序应用程序接口** ([#1663](https://github.com/adap/flower/pull/1663)," +" [#1666](https://github.com/adap/flower/pull/1666), " +"[#1667](https://github.com/adap/flower/pull/1667), " +"[#1664](https://github.com/adap/flower/pull/1664), " +"[#1675](https://github.com/adap/flower/pull/1675), " +"[#1676](https://github.com/adap/flower/pull/1676), " +"[#1693](https://github.com/adap/flower/pull/1693), " +"[#1662](https://github.com/adap/flower/pull/1662), " +"[#1794](https://github.com/adap/flower/pull/1794))" -#: ../../source/tutorial-quickstart-huggingface.rst:30 -msgid "Standard Hugging Face workflow" -msgstr "标准Hugging Face工作流程" +#: ../../source/ref-changelog.md:451 +#, fuzzy +msgid "Many Flower code examples received substantial updates." +msgstr "许多 \"Flower \"代码示例得到了大幅更新。" -#: ../../source/tutorial-quickstart-huggingface.rst:33 -msgid "Handling the data" -msgstr "处理数据" +#: ../../source/ref-changelog.md:453 ../../source/ref-changelog.md:546 +msgid "**Update Flower Baselines**" +msgstr "**更新 Flower Baselines**" -#: ../../source/tutorial-quickstart-huggingface.rst:35 +#: ../../source/ref-changelog.md:455 +#, fuzzy msgid "" -"To fetch the IMDB dataset, we will use Hugging Face's :code:`datasets` " -"library. We then need to tokenize the data and create :code:`PyTorch` " -"dataloaders, this is all done in the :code:`load_data` function:" +"HFedXGBoost ([#2226](https://github.com/adap/flower/pull/2226), " +"[#2771](https://github.com/adap/flower/pull/2771))" msgstr "" -"为了获取 IMDB 数据集,我们将使用 Hugging Face 的 :code:`datasets` 库。然后,我们需要对数据进行标记化,并创建" -" :code:`PyTorch` 数据加载器,这些都将在 :code:`load_data` 函数中完成:" - -#: ../../source/tutorial-quickstart-huggingface.rst:81 -msgid "Training and testing the model" -msgstr "训练和测试模型" +"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " +"[#2615](https://github.com/adap/flower/pull/2615))" -#: ../../source/tutorial-quickstart-huggingface.rst:83 -msgid "" -"Once we have a way of creating our trainloader and testloader, we can " -"take care of the training and testing. This is very similar to any " -":code:`PyTorch` training or testing loop:" -msgstr "" -"有了创建 trainloader 和 testloader 的方法后,我们就可以进行训练和测试了。这与任何 :code:`PyTorch` " -"训练或测试循环都非常相似:" +#: ../../source/ref-changelog.md:456 +#, fuzzy +msgid "FedVSSL ([#2412](https://github.com/adap/flower/pull/2412))" +msgstr "FjORD [#2431](https://github.com/adap/flower/pull/2431)" -#: ../../source/tutorial-quickstart-huggingface.rst:121 -msgid "Creating the model itself" -msgstr "创建模型本身" +#: ../../source/ref-changelog.md:457 +#, fuzzy +msgid "FedNova ([#2179](https://github.com/adap/flower/pull/2179))" +msgstr "FjORD [#2431](https://github.com/adap/flower/pull/2431)" -#: ../../source/tutorial-quickstart-huggingface.rst:123 -msgid "" -"To create the model itself, we will just load the pre-trained distillBERT" -" model using Hugging Face’s :code:`AutoModelForSequenceClassification` :" -msgstr "" -"要创建模型本身,我们只需使用 Hugging Face 的 :code:`AutoModelForSequenceClassification` " -"加载预训练的 distillBERT 模型:" +#: ../../source/ref-changelog.md:458 +#, fuzzy +msgid "HeteroFL ([#2439](https://github.com/adap/flower/pull/2439))" +msgstr "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" -#: ../../source/tutorial-quickstart-huggingface.rst:136 -msgid "Federating the example" -msgstr "将示例联邦化" +#: ../../source/ref-changelog.md:459 +#, fuzzy +msgid "FedAvgM ([#2246](https://github.com/adap/flower/pull/2246))" +msgstr "FedPer [#2266](https://github.com/adap/flower/pull/2266)" -#: ../../source/tutorial-quickstart-huggingface.rst:139 -msgid "Creating the IMDBClient" -msgstr "创建 IMDBClient" +#: ../../source/ref-changelog.md:460 +#, fuzzy +msgid "FedPara ([#2722](https://github.com/adap/flower/pull/2722))" +msgstr "FedPer [#2266](https://github.com/adap/flower/pull/2266)" -#: ../../source/tutorial-quickstart-huggingface.rst:141 +#: ../../source/ref-changelog.md:462 +#, fuzzy msgid "" -"To federate our example to multiple clients, we first need to write our " -"Flower client class (inheriting from :code:`flwr.client.NumPyClient`). " -"This is very easy, as our model is a standard :code:`PyTorch` model:" +"**Improve documentation** " +"([#2674](https://github.com/adap/flower/pull/2674), " +"[#2480](https://github.com/adap/flower/pull/2480), " +"[#2826](https://github.com/adap/flower/pull/2826), " +"[#2727](https://github.com/adap/flower/pull/2727), " +"[#2761](https://github.com/adap/flower/pull/2761), " +"[#2900](https://github.com/adap/flower/pull/2900))" msgstr "" -"要将我们的示例联邦到多个客户端,我们首先需要编写 Flower 客户端类(继承自 " -":code:`flwr.client.NumPyClient`)。这很容易,因为我们的模型是一个标准的 :code:`PyTorch` 模型:" +"** 更新文档** ([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614)))" -#: ../../source/tutorial-quickstart-huggingface.rst:169 +#: ../../source/ref-changelog.md:464 +#, fuzzy msgid "" -"The :code:`get_parameters` function lets the server get the client's " -"parameters. Inversely, the :code:`set_parameters` function allows the " -"server to send its parameters to the client. Finally, the :code:`fit` " -"function trains the model locally for the client, and the " -":code:`evaluate` function tests the model locally and returns the " -"relevant metrics." -msgstr "" -":code:`get_parameters` " -"函数允许服务器获取客户端的参数。相反,:code:`set_parameters`函数允许服务器将其参数发送给客户端。最后,:code:`fit`函数在本地为客户端训练模型,:code:`evaluate`函数在本地测试模型并返回相关指标。" +"**Improved testing and development infrastructure** " +"([#2797](https://github.com/adap/flower/pull/2797), " +"[#2676](https://github.com/adap/flower/pull/2676), " +"[#2644](https://github.com/adap/flower/pull/2644), " +"[#2656](https://github.com/adap/flower/pull/2656), " +"[#2848](https://github.com/adap/flower/pull/2848), " +"[#2675](https://github.com/adap/flower/pull/2675), " +"[#2735](https://github.com/adap/flower/pull/2735), " +"[#2767](https://github.com/adap/flower/pull/2767), " +"[#2732](https://github.com/adap/flower/pull/2732), " +"[#2744](https://github.com/adap/flower/pull/2744), " +"[#2681](https://github.com/adap/flower/pull/2681), " +"[#2699](https://github.com/adap/flower/pull/2699), " +"[#2745](https://github.com/adap/flower/pull/2745), " +"[#2734](https://github.com/adap/flower/pull/2734), " +"[#2731](https://github.com/adap/flower/pull/2731), " +"[#2652](https://github.com/adap/flower/pull/2652), " +"[#2720](https://github.com/adap/flower/pull/2720), " +"[#2721](https://github.com/adap/flower/pull/2721), " +"[#2717](https://github.com/adap/flower/pull/2717), " +"[#2864](https://github.com/adap/flower/pull/2864), " +"[#2694](https://github.com/adap/flower/pull/2694), " +"[#2709](https://github.com/adap/flower/pull/2709), " +"[#2658](https://github.com/adap/flower/pull/2658), " +"[#2796](https://github.com/adap/flower/pull/2796), " +"[#2692](https://github.com/adap/flower/pull/2692), " +"[#2657](https://github.com/adap/flower/pull/2657), " +"[#2813](https://github.com/adap/flower/pull/2813), " +"[#2661](https://github.com/adap/flower/pull/2661), " +"[#2398](https://github.com/adap/flower/pull/2398))" +msgstr "" +"**改进测试和开发基础设施** ([#2797](https://github.com/adap/flower/pull/2797), " +"[#2676](https://github.com/adap/flower/pull/2676), " +"[#2644](https://github.com/adap/flower/pull/2644), " +"[#2656](https://github.com/adap/flower/pull/2656), " +"[#2848](https://github.com/adap/flower/pull/2848), " +"[#2675](https://github.com/adap/flower/pull/2675), " +"[#2735](https://github.com/adap/flower/pull/2735), " +"[#2767](https://github.com/adap/flower/pull/2767), " +"[#2732](https://github.com/adap/flower/pull/2732), " +"[#2744](https://github.com/adap/flower/pull/2744), " +"[#2681](https://github.com/adap/flower/pull/2681), " +"[#2699](https://github.com/adap/flower/pull/2699), " +"[#2745](https://github.com/adap/flower/pull/2745), " +"[#2734](https://github.com/adap/flower/pull/2734), " +"[#2731](https://github.com/adap/flower/pull/2731), " +"[#2652](https://github.com/adap/flower/pull/2652), " +"[#2720](https://github.com/adap/flower/pull/2720), " +"[#2721](https://github.com/adap/flower/pull/2721), " +"[#2717](https://github.com/adap/flower/pull/2717), " +"[#2864](https://github.com/adap/flower/pull/2864), " +"[#2694](https://github.com/adap/flower/pull/2694), " +"[#2709](https://github.com/adap/flower/pull/2709), " +"[#2658](https://github.com/adap/flower/pull/2658), " +"[#2796](https://github.com/adap/flower/pull/2796), " +"[#2692](https://github.com/adap/flower/pull/2692), " +"[#2657](https://github.com/adap/flower/pull/2657), " +"[#2813](https://github.com/adap/flower/pull/2813), " +"[#2661](https://github.com/adap/flower/pull/2661), " +"[#2398](https://github.com/adap/flower/pull/2398))" -#: ../../source/tutorial-quickstart-huggingface.rst:175 -msgid "Starting the server" -msgstr "启动服务器" +#: ../../source/ref-changelog.md:466 +#, fuzzy +msgid "" +"The Flower testing and development infrastructure has received " +"substantial updates. This makes Flower 1.7 the most tested release ever." +msgstr "Flower 测试和开发基础架构已得到大幅更新。这使得 Flower 1.7 成为有史以来经过最多测试的版本。" -#: ../../source/tutorial-quickstart-huggingface.rst:177 +#: ../../source/ref-changelog.md:468 +#, fuzzy msgid "" -"Now that we have a way to instantiate clients, we need to create our " -"server in order to aggregate the results. Using Flower, this can be done " -"very easily by first choosing a strategy (here, we are using " -":code:`FedAvg`, which will define the global weights as the average of " -"all the clients' weights at each round) and then using the " -":code:`flwr.server.start_server` function:" +"**Update dependencies** " +"([#2753](https://github.com/adap/flower/pull/2753), " +"[#2651](https://github.com/adap/flower/pull/2651), " +"[#2739](https://github.com/adap/flower/pull/2739), " +"[#2837](https://github.com/adap/flower/pull/2837), " +"[#2788](https://github.com/adap/flower/pull/2788), " +"[#2811](https://github.com/adap/flower/pull/2811), " +"[#2774](https://github.com/adap/flower/pull/2774), " +"[#2790](https://github.com/adap/flower/pull/2790), " +"[#2751](https://github.com/adap/flower/pull/2751), " +"[#2850](https://github.com/adap/flower/pull/2850), " +"[#2812](https://github.com/adap/flower/pull/2812), " +"[#2872](https://github.com/adap/flower/pull/2872), " +"[#2736](https://github.com/adap/flower/pull/2736), " +"[#2756](https://github.com/adap/flower/pull/2756), " +"[#2857](https://github.com/adap/flower/pull/2857), " +"[#2757](https://github.com/adap/flower/pull/2757), " +"[#2810](https://github.com/adap/flower/pull/2810), " +"[#2740](https://github.com/adap/flower/pull/2740), " +"[#2789](https://github.com/adap/flower/pull/2789))" msgstr "" -"现在我们有了实例化客户端的方法,我们需要创建服务器,以便汇总结果。使用 Flower,首先选择一个策略(这里我们使用 " -":code:`FedAvg`,它将把全局模型参数定义为每轮所有客户端模型参数的平均值),然后使用 " -":code:`flwr.server.start_server`函数,就可以非常轻松地完成这项工作:" +"**更新Example** ([#1772](https://github.com/adap/flower/pull/1772), " +"[#1873](https://github.com/adap/flower/pull/1873), " +"[#1981](https://github.com/adap/flower/pull/1981), " +"[#1988](https://github.com/adap/flower/pull/1988), " +"[#1984](https://github.com/adap/flower/pull/1984), " +"[#1982](https://github.com/adap/flower/pull/1982), " +"[#2112](https://github.com/adap/flower/pull/2112), " +"[#2144](https://github.com/adap/flower/pull/2144), " +"[#2174](https://github.com/adap/flower/pull/2174), " +"[#2225](https://github.com/adap/flower/pull/2225), " +"[#2183](https://github.com/adap/flower/pull/2183))" -#: ../../source/tutorial-quickstart-huggingface.rst:205 +#: ../../source/ref-changelog.md:470 +#, fuzzy msgid "" -"The :code:`weighted_average` function is there to provide a way to " -"aggregate the metrics distributed amongst the clients (basically this " -"allows us to display a nice average accuracy and loss for every round)." +"**General improvements** " +"([#2803](https://github.com/adap/flower/pull/2803), " +"[#2847](https://github.com/adap/flower/pull/2847), " +"[#2877](https://github.com/adap/flower/pull/2877), " +"[#2690](https://github.com/adap/flower/pull/2690), " +"[#2889](https://github.com/adap/flower/pull/2889), " +"[#2874](https://github.com/adap/flower/pull/2874), " +"[#2819](https://github.com/adap/flower/pull/2819), " +"[#2689](https://github.com/adap/flower/pull/2689), " +"[#2457](https://github.com/adap/flower/pull/2457), " +"[#2870](https://github.com/adap/flower/pull/2870), " +"[#2669](https://github.com/adap/flower/pull/2669), " +"[#2876](https://github.com/adap/flower/pull/2876), " +"[#2885](https://github.com/adap/flower/pull/2885), " +"[#2858](https://github.com/adap/flower/pull/2858), " +"[#2867](https://github.com/adap/flower/pull/2867), " +"[#2351](https://github.com/adap/flower/pull/2351), " +"[#2886](https://github.com/adap/flower/pull/2886), " +"[#2860](https://github.com/adap/flower/pull/2860), " +"[#2828](https://github.com/adap/flower/pull/2828), " +"[#2869](https://github.com/adap/flower/pull/2869), " +"[#2875](https://github.com/adap/flower/pull/2875), " +"[#2733](https://github.com/adap/flower/pull/2733), " +"[#2488](https://github.com/adap/flower/pull/2488), " +"[#2646](https://github.com/adap/flower/pull/2646), " +"[#2879](https://github.com/adap/flower/pull/2879), " +"[#2821](https://github.com/adap/flower/pull/2821), " +"[#2855](https://github.com/adap/flower/pull/2855), " +"[#2800](https://github.com/adap/flower/pull/2800), " +"[#2807](https://github.com/adap/flower/pull/2807), " +"[#2801](https://github.com/adap/flower/pull/2801), " +"[#2804](https://github.com/adap/flower/pull/2804), " +"[#2851](https://github.com/adap/flower/pull/2851), " +"[#2787](https://github.com/adap/flower/pull/2787), " +"[#2852](https://github.com/adap/flower/pull/2852), " +"[#2672](https://github.com/adap/flower/pull/2672), " +"[#2759](https://github.com/adap/flower/pull/2759))" msgstr "" -"使用 :code:`weighted_average` " -"函数是为了提供一种方法来汇总分布在客户端的指标(基本上,这可以让我们显示每一轮的平均精度和损失值)。" - -#: ../../source/tutorial-quickstart-huggingface.rst:209 -msgid "Putting everything together" -msgstr "把所有东西放在一起" - -#: ../../source/tutorial-quickstart-huggingface.rst:211 -msgid "We can now start client instances using:" -msgstr "现在我们可以使用:" +"**一般改进** ([#2803](https://github.com/adap/flower/pull/2803), " +"[#2847](https://github.com/adap/flower/pull/2847), " +"[#2877](https://github.com/adap/flower/pull/2877), " +"[#2690](https://github.com/adap/flower/pull/2690), " +"[#2889](https://github.com/adap/flower/pull/2889), " +"[#2874](https://github.com/adap/flower/pull/2874), " +"[#2819](https://github.com/adap/flower/pull/2819), " +"[#2689](https://github.com/adap/flower/pull/2689), " +"[#2457](https://github.com/adap/flower/pull/2457), " +"[#2870](https://github.com/adap/flower/pull/2870), " +"[#2669](https://github.com/adap/flower/pull/2669), " +"[#2876](https://github.com/adap/flower/pull/2876), " +"[#2885](https://github.com/adap/flower/pull/2885), " +"[#2858](https://github.com/adap/flower/pull/2858), " +"[#2867](https://github.com/adap/flower/pull/2867), " +"[#2351](https://github.com/adap/flower/pull/2351), " +"[#2886](https://github.com/adap/flower/pull/2886), " +"[#2860](https://github.com/adap/flower/pull/2860), " +"[#2828](https://github.com/adap/flower/pull/2828), " +"[#2869](https://github.com/adap/flower/pull/2869), " +"[#2875](https://github.com/adap/flower/pull/2875), " +"[#2733](https://github.com/adap/flower/pull/2733), " +"[#2488](https://github.com/adap/flower/pull/2488), " +"[#2646](https://github.com/adap/flower/pull/2646), " +"[#2879](https://github.com/adap/flower/pull/2879), " +"[#2821](https://github.com/adap/flower/pull/2821), " +"[#2855](https://github.com/adap/flower/pull/2855), " +"[#2800](https://github.com/adap/flower/pull/2800), " +"[#2807](https://github.com/adap/flower/pull/2807), " +"[#2801](https://github.com/adap/flower/pull/2801), " +"[#2804](https://github.com/adap/flower/pull/2804), " +"[#2851](https://github.com/adap/flower/pull/2851), " +"[#2787](https://github.com/adap/flower/pull/2787), " +"[#2852](https://github.com/adap/flower/pull/2852), " +"[#2672](https://github.com/adap/flower/pull/2672), " +"[#2759](https://github.com/adap/flower/pull/2759))" -#: ../../source/tutorial-quickstart-huggingface.rst:221 +#: ../../source/ref-changelog.md:474 +#, fuzzy msgid "" -"And they will be able to connect to the server and start the federated " -"training." -msgstr "他们就能连接到服务器,开始联邦训练。" +"**Deprecate** `start_numpy_client` " +"([#2563](https://github.com/adap/flower/pull/2563), " +"[#2718](https://github.com/adap/flower/pull/2718))" +msgstr "" +"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " +"[#2508](https://github.com/adap/flower/pull/2508))" -#: ../../source/tutorial-quickstart-huggingface.rst:223 +#: ../../source/ref-changelog.md:476 #, fuzzy msgid "" -"If you want to check out everything put together, you should check out " -"the `full code example `_ ." +"Until now, clients of type `NumPyClient` needed to be started via " +"`start_numpy_client`. In our efforts to consolidate framework APIs, we " +"have introduced changes, and now all client types should start via " +"`start_client`. To continue using `NumPyClient` clients, you simply need " +"to first call the `.to_client()` method and then pass returned `Client` " +"object to `start_client`. The examples and the documentation have been " +"updated accordingly." msgstr "" -"如果您想查看所有内容,请查看完整的代码示例: [https://github.com/adap/flower/tree/main/examples" -"/quickstart-" -"huggingface](https://github.com/adap/flower/tree/main/examples" -"/quickstart-huggingface)." +"到目前为止,\"NumPyClient \"类型的客户端需要通过 \"start_numpy_client \"启动。为了整合框架 " +"API,我们引入了一些变化,现在所有客户端类型都应通过 `start_client` 启动。要继续使用 `NumPyClient` " +"客户端,只需首先调用 `.to_client()` 方法,然后将返回的 `Client` 对象传递给 " +"`start_client`。示例和文档已相应更新。" -#: ../../source/tutorial-quickstart-huggingface.rst:226 +#: ../../source/ref-changelog.md:478 +#, fuzzy msgid "" -"Of course, this is a very basic example, and a lot can be added or " -"modified, it was just to showcase how simply we could federate a Hugging " -"Face workflow using Flower." -msgstr "当然,这只是一个非常基本的示例,还可以添加或修改很多内容,只是为了展示我们可以如何简单地使用 Flower 联合Hugging Face的工作流程。" +"**Deprecate legacy DP wrappers** " +"([#2749](https://github.com/adap/flower/pull/2749))" +msgstr "**移除过时的 KerasClient**([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/tutorial-quickstart-huggingface.rst:229 +#: ../../source/ref-changelog.md:480 +#, fuzzy msgid "" -"Note that in this example we used :code:`PyTorch`, but we could have very" -" well used :code:`TensorFlow`." -msgstr "请注意,在本例中我们使用了 :code:`PyTorch`,但也完全可以使用 :code:`TensorFlow`。" +"Legacy DP wrapper classes are deprecated, but still functional. This is " +"in preparation for an all-new pluggable version of differential privacy " +"support in Flower." +msgstr "传统的 DP 封装类已废弃,但仍可正常使用。这是为 Flower 中的全新可插拔差分隐私支持版本做准备。" -#: ../../source/tutorial-quickstart-ios.rst:-1 +#: ../../source/ref-changelog.md:482 +#, fuzzy msgid "" -"Read this Federated Learning quickstart tutorial for creating an iOS app " -"using Flower to train a neural network on MNIST." -msgstr "阅读本联邦学习快速入门教程,了解如何使用 Flower 创建 iOS 应用程序,并在 MNIST 上训练神经网络。" +"**Make optional arg** `--callable` **in** `flower-client` **a required " +"positional arg** ([#2673](https://github.com/adap/flower/pull/2673))" +msgstr "" +"**从** `start_client` 中移除** `rest` **实验参数 " +"([#2324](https://github.com/adap/flower/pull/2324))" -#: ../../source/tutorial-quickstart-ios.rst:5 -msgid "Quickstart iOS" -msgstr "快速入门 iOS" +#: ../../source/ref-changelog.md:484 +#, fuzzy +msgid "" +"**Rename** `certificates` **to** `root_certificates` **in** `Driver` " +"([#2890](https://github.com/adap/flower/pull/2890))" +msgstr "" +"**重新命名** `rnd` ** to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" -#: ../../source/tutorial-quickstart-ios.rst:10 +#: ../../source/ref-changelog.md:486 +#, fuzzy msgid "" -"In this tutorial we will learn how to train a Neural Network on MNIST " -"using Flower and CoreML on iOS devices." -msgstr "在本教程中,我们将学习如何在 iOS 设备上使用 Flower 和 CoreML 在 MNIST 上训练神经网络。" +"**Drop experimental** `Task` **fields** " +"([#2866](https://github.com/adap/flower/pull/2866), " +"[#2865](https://github.com/adap/flower/pull/2865))" +msgstr "" +"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " +"[#2615](https://github.com/adap/flower/pull/2615))" -#: ../../source/tutorial-quickstart-ios.rst:12 +#: ../../source/ref-changelog.md:488 #, fuzzy msgid "" -"First of all, for running the Flower Python server, it is recommended to " -"create a virtual environment and run everything within a :doc:`virtualenv" -" `. For the Flower client " -"implementation in iOS, it is recommended to use Xcode as our IDE." +"Experimental fields `sa`, `legacy_server_message` and " +"`legacy_client_message` were removed from `Task` message. The removed " +"fields are superseded by the new `RecordSet` abstraction." msgstr "" -"首先,为了运行 Flower Python 服务器,建议创建一个虚拟环境,并在 `virtualenv " -"`_ 中运行一切。对于在 iOS 中实现 " -"Flower 客户端,建议使用 Xcode 作为我们的集成开发环境。" +"从 `Task` 消息中删除了试验性字段 `sa`、 `legacy_server_message` 和 " +"`legacy_client_message`。删除的字段已被新的 `RecordSet` 抽象所取代。" -#: ../../source/tutorial-quickstart-ios.rst:15 +#: ../../source/ref-changelog.md:490 +#, fuzzy msgid "" -"Our example consists of one Python *server* and two iPhone *clients* that" -" all have the same model." -msgstr "我们的示例包括一个 Python *服务器*和两个 iPhone *客户端*,它们都具有相同的模型。" +"**Retire MXNet examples** " +"([#2724](https://github.com/adap/flower/pull/2724))" +msgstr "**新的 scikit-learn 代码示例** ([#748](https://github.com/adap/flower/pull/748))" -#: ../../source/tutorial-quickstart-ios.rst:17 +#: ../../source/ref-changelog.md:492 +#, fuzzy msgid "" -"*Clients* are responsible for generating individual weight updates for " -"the model based on their local datasets. These updates are then sent to " -"the *server* which will aggregate them to produce a better model. " -"Finally, the *server* sends this improved version of the model back to " -"each *client*. A complete cycle of weight updates is called a *round*." -msgstr "*客户端*负责根据其本地数据集为模型生成独立的模型参数。然后,这些参数更新会被发送到*服务器*,由*服务器*汇总后生成一个更好的模型。最后,*服务器*将改进后的模型发送回每个*客户端*。一个完整的参数更新周期称为一*轮*。" - -#: ../../source/tutorial-quickstart-ios.rst:21 -msgid "" -"Now that we have a rough idea of what is going on, let's get started to " -"setup our Flower server environment. We first need to install Flower. You" -" can do this by using pip:" -msgstr "现在我们已经有了一个大致的概念,让我们开始设置 Flower 服务器环境吧。首先,我们需要安装 Flower。你可以使用 pip 来安装:" - -#: ../../source/tutorial-quickstart-ios.rst:27 -msgid "Or Poetry:" -msgstr "或者Poetry:" +"The development of the MXNet fremework has ended and the project is now " +"[archived on GitHub](https://github.com/apache/mxnet). Existing MXNet " +"examples won't receive updates." +msgstr "" +"MXNet fremework 的开发工作已经结束,该项目现已[归档于 " +"GitHub](https://github.com/apache/mxnet)。现有的 MXNet 示例不会收到更新。" -#: ../../source/tutorial-quickstart-ios.rst:34 -#: ../../source/tutorial-quickstart-pytorch.rst:37 -#: ../../source/tutorial-quickstart-scikitlearn.rst:40 -#: ../../source/tutorial-quickstart-tensorflow.rst:29 -#: ../../source/tutorial-quickstart-xgboost.rst:55 -msgid "Flower Client" -msgstr "Flower 客户端" +#: ../../source/ref-changelog.md:494 +#, fuzzy +msgid "v1.6.0 (2023-11-28)" +msgstr "v1.4.0 (2023-04-21)" -#: ../../source/tutorial-quickstart-ios.rst:36 +#: ../../source/ref-changelog.md:500 +#, fuzzy msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training using CoreML as our local training pipeline and " -"MNIST as our dataset. For simplicity reasons we will use the complete " -"Flower client with CoreML, that has been implemented and stored inside " -"the Swift SDK. The client implementation can be seen below:" +"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " +"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " +"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`," +" `Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, " +"`Steve Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, " +"`cnxdeveloper`, `k3nfalt` " msgstr "" -"现在我们已经安装了所有依赖项,让我们使用 CoreML 作为本地训练框架和 MNIST " -"作为数据集,运行一个简单的分布式训练。为了简单起见,我们将使用 CoreML 的完整 Flower 客户端,该客户端已在 Swift SDK " -"中实现并存储。客户端实现如下:" +"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " +"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " +"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`," +" `Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, " +"`Steve Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, " +"`cnxdeveloper`, `k3nfalt` " -#: ../../source/tutorial-quickstart-ios.rst:72 +#: ../../source/ref-changelog.md:504 msgid "" -"Let's create a new application project in Xcode and add :code:`flwr` as a" -" dependency in your project. For our application, we will store the logic" -" of our app in :code:`FLiOSModel.swift` and the UI elements in " -":code:`ContentView.swift`. We will focus more on :code:`FLiOSModel.swift`" -" in this quickstart. Please refer to the `full code example " -"`_ to learn more " -"about the app." +"**Add experimental support for Python 3.12** " +"([#2565](https://github.com/adap/flower/pull/2565))" msgstr "" -"让我们在 Xcode 中创建一个新的应用程序项目,并在项目中添加 :code:`flwr` 作为依赖关系。对于我们的应用程序,我们将在 " -":code:`FLiOSModel.swift` 中存储应用程序的逻辑,在 :code:`ContentView.swift` 中存储 UI " -"元素。在本快速入门中,我们将更多地关注 :code:`FLiOSModel.swift`。请参阅 `完整代码示例 " -"`_ 以了解更多有关应用程序的信息。" - -#: ../../source/tutorial-quickstart-ios.rst:75 -msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" -msgstr "在 :code:`FLiOSModel.swift` 中导入 Flower 和 CoreML 相关软件包:" +"** 增加对 Python 3.12 的实验支持** " +"([#2565](https://github.com/adap/flower/pull/2565))" -#: ../../source/tutorial-quickstart-ios.rst:83 +#: ../../source/ref-changelog.md:506 +#, fuzzy msgid "" -"Then add the mlmodel to the project simply by drag-and-drop, the mlmodel " -"will be bundled inside the application during deployment to your iOS " -"device. We need to pass the url to access mlmodel and run CoreML machine " -"learning processes, it can be retrieved by calling the function " -":code:`Bundle.main.url`. For the MNIST dataset, we need to preprocess it " -"into :code:`MLBatchProvider` object. The preprocessing is done inside " -":code:`DataLoader.swift`." +"**Add new XGBoost examples** " +"([#2612](https://github.com/adap/flower/pull/2612), " +"[#2554](https://github.com/adap/flower/pull/2554), " +"[#2617](https://github.com/adap/flower/pull/2617), " +"[#2618](https://github.com/adap/flower/pull/2618), " +"[#2619](https://github.com/adap/flower/pull/2619), " +"[#2567](https://github.com/adap/flower/pull/2567))" msgstr "" -"然后通过拖放将 mlmodel 添加到项目中,在部署到 iOS 设备时,mlmodel 将被捆绑到应用程序中。我们需要传递 url 以访问 " -"mlmodel 并运行 CoreML 机器学习进程,可通过调用函数 :code:`Bundle.main.url` 获取。对于 MNIST " -"数据集,我们需要将其预处理为 :code:`MLBatchProvider` 对象。预处理在 :code:`DataLoader.swift` " -"中完成。" +"**引入(试验性)Driver API** ([#1520](https://github.com/adap/flower/pull/1520)," +" [#1525](https://github.com/adap/flower/pull/1525), " +"[#1545](https://github.com/adap/flower/pull/1545), " +"[#1546](https://github.com/adap/flower/pull/1546), " +"[#1550](https://github.com/adap/flower/pull/1550), " +"[#1551](https://github.com/adap/flower/pull/1551), " +"[#1567](https://github.com/adap/flower/pull/1567))" -#: ../../source/tutorial-quickstart-ios.rst:99 +#: ../../source/ref-changelog.md:508 #, fuzzy msgid "" -"Since CoreML does not allow the model parameters to be seen before " -"training, and accessing the model parameters during or after the training" -" can only be done by specifying the layer name, we need to know this " -"information beforehand, through looking at the model specification, which" -" are written as proto files. The implementation can be seen in " -":code:`MLModelInspect`." +"We have added a new `xgboost-quickstart` example alongside a new " +"`xgboost-comprehensive` example that goes more in-depth." msgstr "" -"由于 CoreML 不允许在训练前查看模型参数,而在训练过程中或训练后访问模型参数只能通过指定层名来完成,因此我们需要事先通过查看模型规范(写成 " -"proto 文件)来了解这些信息。具体实现可参见 :code:`MLModelInspect`。" +"我们添加了一个新的 \"xgboost-quickstart \"示例和一个新的 \"xgboost-comprehensive " +"\"示例,后者更加深入。" -#: ../../source/tutorial-quickstart-ios.rst:102 +#: ../../source/ref-changelog.md:510 #, fuzzy msgid "" -"After we have all of the necessary information, let's create our Flower " -"client." -msgstr "获得所有必要信息后,让我们创建 Flower 客户端。" +"**Add Vertical FL example** " +"([#2598](https://github.com/adap/flower/pull/2598))" +msgstr "**新的 iOS CoreML 代码示例**([#1289](https://github.com/adap/flower/pull/1289))" -#: ../../source/tutorial-quickstart-ios.rst:117 +#: ../../source/ref-changelog.md:512 +#, fuzzy msgid "" -"Then start the Flower gRPC client and start communicating to the server " -"by passing our Flower client to the function :code:`startFlwrGRPC`." -msgstr "然后启动 Flower gRPC 客户端,并通过将 Flower 客户端传递给函数 :code:`startFlwrGRPC` 来开始与服务器通信。" +"We had many questions about Vertical Federated Learning using Flower, so " +"we decided to add an simple example for it on the [Titanic " +"dataset](https://www.kaggle.com/competitions/titanic/data) alongside a " +"tutorial (in the README)." +msgstr "" +"我们收到了许多关于使用 Flower 进行垂直联合学习的问题,因此我们决定在 [Titanic " +"数据集](https://www.kaggle.com/competitions/titanic/data) 上添加一个简单的示例,并附上教程(在" +" README 中)。" -#: ../../source/tutorial-quickstart-ios.rst:124 +#: ../../source/ref-changelog.md:514 msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -"call the provided :code:`MLFlwrClient` and call :code:`startFlwrGRPC()`. " -"The attribute :code:`hostname` and :code:`port` tells the client which " -"server to connect to. This can be done by entering the hostname and port " -"in the application before clicking the start button to start the " -"federated learning process." -msgstr "" -"这就是客户端。我们只需实现 :code:`Client` 或调用提供的 :code:`MLFlwrClient` 并调用 " -":code:`startFlwrGRPC()`。属性 :code:`hostname` 和 :code:`port` " -"会告诉客户端要连接到哪个服务器。这可以通过在应用程序中输入主机名和端口来实现,然后再点击开始按钮启动联邦学习进程。" +"**Support custom** `ClientManager` **in** `start_driver()` " +"([#2292](https://github.com/adap/flower/pull/2292))" +msgstr "**在***`start_driver()`中支持自定义***`ClientManager([#2292](https://github.com/adap/flower/pull/2292))" -#: ../../source/tutorial-quickstart-ios.rst:129 -#: ../../source/tutorial-quickstart-pytorch.rst:203 -#: ../../source/tutorial-quickstart-scikitlearn.rst:167 -#: ../../source/tutorial-quickstart-tensorflow.rst:98 -#: ../../source/tutorial-quickstart-xgboost.rst:309 -msgid "Flower Server" -msgstr "Flower 服务器" +#: ../../source/ref-changelog.md:516 +msgid "" +"**Update REST API to support create and delete nodes** " +"([#2283](https://github.com/adap/flower/pull/2283))" +msgstr "" +"**更新 REST API 以支持创建和删除节点** " +"([#2283](https://github.com/adap/flower/pull/2283))" -#: ../../source/tutorial-quickstart-ios.rst:131 -#: ../../source/tutorial-quickstart-pytorch.rst:205 -#: ../../source/tutorial-quickstart-tensorflow.rst:100 +#: ../../source/ref-changelog.md:518 +#, fuzzy msgid "" -"For simple workloads we can start a Flower server and leave all the " -"configuration possibilities at their default values. In a file named " -":code:`server.py`, import Flower and start the server:" +"**Update the Android SDK** " +"([#2187](https://github.com/adap/flower/pull/2187))" msgstr "" -"对于简单的工作负载,我们可以启动 Flower 服务器,并将所有配置选项保留为默认值。在名为 :code:`server.py` 的文件中,导入 " -"Flower 并启动服务器:" +"**介绍Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" -#: ../../source/tutorial-quickstart-ios.rst:142 -#: ../../source/tutorial-quickstart-pytorch.rst:216 -#: ../../source/tutorial-quickstart-scikitlearn.rst:230 -#: ../../source/tutorial-quickstart-tensorflow.rst:112 -msgid "Train the model, federated!" -msgstr "联邦训练模型!" +#: ../../source/ref-changelog.md:520 +#, fuzzy +msgid "Add gRPC request-response capability to the Android SDK." +msgstr "为 C++ SDK 添加 gRPC 请求-响应功能。" -#: ../../source/tutorial-quickstart-ios.rst:144 -#: ../../source/tutorial-quickstart-pytorch.rst:218 -#: ../../source/tutorial-quickstart-tensorflow.rst:114 -#: ../../source/tutorial-quickstart-xgboost.rst:525 +#: ../../source/ref-changelog.md:522 +#, fuzzy msgid "" -"With both client and server ready, we can now run everything and see " -"federated learning in action. FL systems usually have a server and " -"multiple clients. We therefore have to start the server first:" -msgstr "客户端和服务器都已准备就绪,我们现在可以运行一切,看看联邦学习的实际效果。FL 系统通常有一个服务器和多个客户端。因此,我们必须先启动服务器:" +"**Update the C++ SDK** " +"([#2537](https://github.com/adap/flower/pull/2537), " +"[#2528](https://github.com/adap/flower/pull/2528), " +"[#2523](https://github.com/adap/flower/pull/2523), " +"[#2522](https://github.com/adap/flower/pull/2522))" +msgstr "" +"** 更新 C++ SDK** ([#2537](https://github/com/adap/flower/pull/2537), " +"[#2528](https://github/com/adap/flower/pull/2528), " +"[#2523](https://github.com/adap/flower/pull/2523), " +"[#2522](https://github.com/adap/flower/pull/2522))" + +#: ../../source/ref-changelog.md:524 +msgid "Add gRPC request-response capability to the C++ SDK." +msgstr "为 C++ SDK 添加 gRPC 请求-响应功能。" -#: ../../source/tutorial-quickstart-ios.rst:152 +#: ../../source/ref-changelog.md:526 +#, fuzzy msgid "" -"Once the server is running we can start the clients in different " -"terminals. Build and run the client through your Xcode, one through Xcode" -" Simulator and the other by deploying it to your iPhone. To see more " -"about how to deploy your app to iPhone or Simulator visit `here " -"`_." +"**Make HTTPS the new default** " +"([#2591](https://github.com/adap/flower/pull/2591), " +"[#2636](https://github.com/adap/flower/pull/2636))" msgstr "" -"服务器运行后,我们就可以在不同的终端启动客户端。通过 Xcode 构建并运行客户端,一个通过 Xcode 模拟器,另一个通过部署到 " -"iPhone。要了解更多有关如何将应用程序部署到 iPhone 或模拟器的信息,请访问 `此处 " -"`_。" +"Baselines文档([#2290](https://github.com/adap/flower/pull/2290), " +"[#2400](https://github.com/adap/flower/pull/2400)" -#: ../../source/tutorial-quickstart-ios.rst:156 +#: ../../source/ref-changelog.md:528 +#, fuzzy msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system in your ios device. The full `source code " -"`_ for this " -"example can be found in :code:`examples/ios`." +"Flower is moving to HTTPS by default. The new `flower-server` requires " +"passing `--certificates`, but users can enable `--insecure` to use HTTP " +"for prototyping. The same applies to `flower-client`, which can either " +"use user-provided credentials or gRPC-bundled certificates to connect to " +"an HTTPS-enabled server or requires opt-out via passing `--insecure` to " +"enable insecure HTTP connections." msgstr "" -"恭喜您! 您已经成功地在 ios 设备中构建并运行了第一个联邦学习系统。本示例的`完整源代码 " -"`_ 可在 " -":code:`examples/ios` 中找到。" +"Flower 默认使用 HTTPS。新的 \"flower-server \"需要通过\"--证书\",但用户可以启用\"--不安全 \"来使用 " +"HTTP 进行原型开发。这同样适用于 `flower-client`,它可以使用用户提供的凭证或 gRPC 绑定证书连接到支持 HTTPS " +"的服务器,也可以通过传递 `--insecure`来启用不安全的 HTTP 连接。" -#: ../../source/tutorial-quickstart-jax.rst:-1 +#: ../../source/ref-changelog.md:530 +#, fuzzy msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with Jax to train a linear regression model on a scikit-learn dataset." -msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 Jax 在 scikit-learn 数据集上训练线性回归模型。" - -#: ../../source/tutorial-quickstart-jax.rst:5 -msgid "Quickstart JAX" -msgstr "快速入门 JAX" +"For backward compatibility, `start_client()` and `start_numpy_client()` " +"will still start in insecure mode by default. In a future release, " +"insecure connections will require user opt-in by passing `insecure=True`." +msgstr "" +"为了向后兼容,`start_client()` 和 `start_numpy_client()` " +"默认仍以不安全模式启动。在未来的版本中,不安全连接将需要用户通过传递 `insecure=True` 进行选择。" -#: ../../source/tutorial-quickstart-pandas.rst:-1 +#: ../../source/ref-changelog.md:532 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with Pandas to perform Federated Analytics." -msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 Pandas 执行联邦分析。" - -#: ../../source/tutorial-quickstart-pandas.rst:5 -msgid "Quickstart Pandas" -msgstr "快速入门Pandas" - -#: ../../source/tutorial-quickstart-pandas.rst:10 -msgid "Let's build a federated analytics system using Pandas and Flower!" -msgstr "让我们使用 Pandas 和 Flower 建立一个联邦分析系统!" +"**Unify client API** ([#2303](https://github.com/adap/flower/pull/2303), " +"[#2390](https://github.com/adap/flower/pull/2390), " +"[#2493](https://github.com/adap/flower/pull/2493))" +msgstr "" +"** 统一客户端应用程序接口** ([#2303](https://github.com/adap/flower/pull/2303), " +"[#2390](https://github.com/adap/flower/pull/2390), " +"[#2493](https://github.com/adap/flower/pull/2493))" -#: ../../source/tutorial-quickstart-pandas.rst:12 +#: ../../source/ref-changelog.md:534 +#, fuzzy msgid "" -"Please refer to the `full code example " -"`_ " -"to learn more." +"Using the `client_fn`, Flower clients can interchangeably run as " +"standalone processes (i.e. via `start_client`) or in simulation (i.e. via" +" `start_simulation`) without requiring changes to how the client class is" +" defined and instantiated. The `to_client()` function is introduced to " +"convert a `NumPyClient` to a `Client`." msgstr "" -"请参阅 `完整代码示例 `_\" 了解更多信息。" +"使用 `client_fn`,Flower 客户端可以作为独立进程(即通过 `start_client`)或在模拟中(即通过 " +"`start_simulation`)交替运行,而无需更改客户端类的定义和实例化方式。调用 `start_numpy_client` 现已过时。" -#: ../../source/tutorial-quickstart-pytorch.rst:-1 +#: ../../source/ref-changelog.md:536 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with PyTorch to train a CNN model on MNIST." -msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 PyTorch 在 MNIST 上训练 CNN 模型。" +"**Add new** `Bulyan` **strategy** " +"([#1817](https://github.com/adap/flower/pull/1817), " +"[#1891](https://github.com/adap/flower/pull/1891))" +msgstr "" +"**添加新**\"Bulyan " +"\"**策略**([#1817](https://github.com/adap/flower/pull/1817), " +"[#1891](https://github.com/adap/flower/pull/1891)" -#: ../../source/tutorial-quickstart-pytorch.rst:13 +#: ../../source/ref-changelog.md:538 msgid "" -"In this tutorial we will learn how to train a Convolutional Neural " -"Network on CIFAR10 using Flower and PyTorch." -msgstr "在本教程中,我们将学习如何使用 Flower 和 PyTorch 在 CIFAR10 上训练卷积神经网络。" +"The new `Bulyan` strategy implements Bulyan by [El Mhamdi et al., " +"2018](https://arxiv.org/abs/1802.07927)" +msgstr "新的 \"Bulyan\"策略通过[El Mhamdi 等人,2018](https://arxiv.org/abs/1802.07927)实现" -#: ../../source/tutorial-quickstart-pytorch.rst:15 -#: ../../source/tutorial-quickstart-xgboost.rst:39 +#: ../../source/ref-changelog.md:540 #, fuzzy msgid "" -"First of all, it is recommended to create a virtual environment and run " -"everything within a :doc:`virtualenv `." -msgstr "" -"首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" +"**Add new** `XGB Bagging` **strategy** " +"([#2611](https://github.com/adap/flower/pull/2611))" +msgstr "**添加新的`FedProx`策略** ([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/tutorial-quickstart-pytorch.rst:17 -#: ../../source/tutorial-quickstart-scikitlearn.rst:14 +#: ../../source/ref-changelog.md:542 ../../source/ref-changelog.md:544 +#, fuzzy msgid "" -"Our example consists of one *server* and two *clients* all having the " -"same model." -msgstr "我们的例子包括一个*服务器*和两个*客户端*,它们都有相同的模型。" +"**Introduce `WorkloadState`** " +"([#2564](https://github.com/adap/flower/pull/2564), " +"[#2632](https://github.com/adap/flower/pull/2632))" +msgstr "" +"**新的内置策略**([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822)" -#: ../../source/tutorial-quickstart-pytorch.rst:19 +#: ../../source/ref-changelog.md:548 msgid "" -"*Clients* are responsible for generating individual weight-updates for " -"the model based on their local datasets. These updates are then sent to " -"the *server* which will aggregate them to produce a better model. " -"Finally, the *server* sends this improved version of the model back to " -"each *client*. A complete cycle of weight updates is called a *round*." -msgstr "*客户端*负责在其本地数据集上更新模型参数。然后,这些参数会被发送到*服务器*,由*服务器*聚合后生成一个更好的模型。最后,*服务器*将改进后的模型发送回每个*客户端*。一个完整的模型参数更新周期称为一*轮*。" +"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " +"[#2286](https://github.com/adap/flower/pull/2286), " +"[#2509](https://github.com/adap/flower/pull/2509))" +msgstr "" +"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " +"[#2286](https://github.com/adap/flower/pull/2286), " +"[#2509](https://github.com/adap/flower/pull/2509))" -#: ../../source/tutorial-quickstart-pytorch.rst:23 +#: ../../source/ref-changelog.md:550 msgid "" -"Now that we have a rough idea of what is going on, let's get started. We " -"first need to install Flower. You can do this by running :" -msgstr "现在,我们已经有了一个大致的概念了,那就让我们开始吧。首先,我们需要安装 Flower。可以通过运行 :" +"Baselines Docs ([#2290](https://github.com/adap/flower/pull/2290), " +"[#2400](https://github.com/adap/flower/pull/2400))" +msgstr "" +"Baselines文档([#2290](https://github.com/adap/flower/pull/2290), " +"[#2400](https://github.com/adap/flower/pull/2400)" -#: ../../source/tutorial-quickstart-pytorch.rst:29 +#: ../../source/ref-changelog.md:552 msgid "" -"Since we want to use PyTorch to solve a computer vision task, let's go " -"ahead and install PyTorch and the **torchvision** library:" -msgstr "既然我们想用 PyTorch 解决计算机视觉任务,那就继续安装 PyTorch 和 **torchvision** 库吧:" +"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " +"[#2507](https://github.com/adap/flower/pull/2507))" +msgstr "" +"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " +"[#2507](https://github.com/adap/flower/pull/2507))" -#: ../../source/tutorial-quickstart-pytorch.rst:39 +#: ../../source/ref-changelog.md:554 msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training with two clients and one server. Our training " -"procedure and network architecture are based on PyTorch's `Deep Learning " -"with PyTorch " -"`_." +"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " +"[#2508](https://github.com/adap/flower/pull/2508))" msgstr "" -"现在我们已经安装了所有的依赖项,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。我们的训练过程和网络架构基于 PyTorch " -"的《Deep Learning with PyTorch " -"`_》。" +"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " +"[#2508](https://github.com/adap/flower/pull/2508))" -#: ../../source/tutorial-quickstart-pytorch.rst:41 -msgid "" -"In a file called :code:`client.py`, import Flower and PyTorch related " -"packages:" -msgstr "在名为 :code:`client.py` 的文件中,导入 Flower 和 PyTorch 相关软件包:" +#: ../../source/ref-changelog.md:556 +msgid "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" +msgstr "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" -#: ../../source/tutorial-quickstart-pytorch.rst:56 -msgid "In addition, we define the device allocation in PyTorch with:" -msgstr "此外,我们还在 PyTorch 中定义了设备分配:" +#: ../../source/ref-changelog.md:558 +msgid "FjORD [#2431](https://github.com/adap/flower/pull/2431)" +msgstr "FjORD [#2431](https://github.com/adap/flower/pull/2431)" -#: ../../source/tutorial-quickstart-pytorch.rst:62 -msgid "" -"We use PyTorch to load CIFAR10, a popular colored image classification " -"dataset for machine learning. The PyTorch :code:`DataLoader()` downloads " -"the training and test data that are then normalized." -msgstr "" -"我们使用 PyTorch 来加载 CIFAR10,这是一个用于机器学习的流行彩色图像分类数据集。PyTorch " -":code:`DataLoader()`下载训练数据和测试数据,然后进行归一化处理。" +#: ../../source/ref-changelog.md:560 +msgid "MOON [#2421](https://github.com/adap/flower/pull/2421)" +msgstr "MOON [#2421](https://github.com/adap/flower/pull/2421)" -#: ../../source/tutorial-quickstart-pytorch.rst:78 -msgid "" -"Define the loss and optimizer with PyTorch. The training of the dataset " -"is done by looping over the dataset, measure the corresponding loss and " -"optimize it." -msgstr "使用 PyTorch 定义损失和优化器。数据集的训练是通过循环数据集、测量相应的损失值并对其进行优化来完成的。" +#: ../../source/ref-changelog.md:562 +msgid "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" +msgstr "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" -#: ../../source/tutorial-quickstart-pytorch.rst:94 -msgid "" -"Define then the validation of the machine learning network. We loop over" -" the test set and measure the loss and accuracy of the test set." -msgstr "然后定义机器学习网络的验证。我们在测试集上循环,计算测试集的损失值和准确率。" +#: ../../source/ref-changelog.md:564 +msgid "FedPer [#2266](https://github.com/adap/flower/pull/2266)" +msgstr "FedPer [#2266](https://github.com/adap/flower/pull/2266)" -#: ../../source/tutorial-quickstart-pytorch.rst:113 -msgid "" -"After defining the training and testing of a PyTorch machine learning " -"model, we use the functions for the Flower clients." -msgstr "在定义了 PyTorch 机器学习模型的训练和测试之后,我们将这些功能用于 Flower 客户端。" +#: ../../source/ref-changelog.md:566 +msgid "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" +msgstr "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" -#: ../../source/tutorial-quickstart-pytorch.rst:115 -msgid "" -"The Flower clients will use a simple CNN adapted from 'PyTorch: A 60 " -"Minute Blitz':" -msgstr "Flower 客户端将使用一个简单的从“PyTorch: 60 分钟突击\"改编的CNN:" +#: ../../source/ref-changelog.md:568 +msgid "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" +msgstr "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" -#: ../../source/tutorial-quickstart-pytorch.rst:142 +#: ../../source/ref-changelog.md:570 msgid "" -"After loading the data set with :code:`load_data()` we define the Flower " -"interface." -msgstr "使用 :code:`load_data()` 加载数据集后,我们定义了 Flower 接口。" +"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " +"[#2615](https://github.com/adap/flower/pull/2615))" +msgstr "" +"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " +"[#2615](https://github.com/adap/flower/pull/2615))" -#: ../../source/tutorial-quickstart-pytorch.rst:144 -#: ../../source/tutorial-quickstart-tensorflow.rst:54 +#: ../../source/ref-changelog.md:572 +#, fuzzy msgid "" -"The Flower server interacts with clients through an interface called " -":code:`Client`. When the server selects a particular client for training," -" it sends training instructions over the network. The client receives " -"those instructions and calls one of the :code:`Client` methods to run " -"your code (i.e., to train the neural network we defined earlier)." +"**General updates to Flower Examples** " +"([#2384](https://github.com/adap/flower/pull/2384), " +"[#2425](https://github.com/adap/flower/pull/2425), " +"[#2526](https://github.com/adap/flower/pull/2526), " +"[#2302](https://github.com/adap/flower/pull/2302), " +"[#2545](https://github.com/adap/flower/pull/2545))" msgstr "" -"Flower 服务器通过一个名为 :code:`Client` " -"的接口与客户端交互。当服务器选择一个特定的客户端进行训练时,它会通过网络发送训练指令。客户端接收到这些指令后,会调用 :code:`Client`" -" 方法之一来运行您的代码(即训练我们之前定义的神经网络)。" +"** 更新 C++ SDK** ([#2537](https://github/com/adap/flower/pull/2537), " +"[#2528](https://github/com/adap/flower/pull/2528), " +"[#2523](https://github.com/adap/flower/pull/2523), " +"[#2522](https://github.com/adap/flower/pull/2522))" -#: ../../source/tutorial-quickstart-pytorch.rst:150 +#: ../../source/ref-changelog.md:574 +#, fuzzy msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses PyTorch. Implementing :code:`NumPyClient` usually means " -"defining the following methods (:code:`set_parameters` is optional " -"though):" +"**General updates to Flower Baselines** " +"([#2301](https://github.com/adap/flower/pull/2301), " +"[#2305](https://github.com/adap/flower/pull/2305), " +"[#2307](https://github.com/adap/flower/pull/2307), " +"[#2327](https://github.com/adap/flower/pull/2327), " +"[#2435](https://github.com/adap/flower/pull/2435), " +"[#2462](https://github.com/adap/flower/pull/2462), " +"[#2463](https://github.com/adap/flower/pull/2463), " +"[#2461](https://github.com/adap/flower/pull/2461), " +"[#2469](https://github.com/adap/flower/pull/2469), " +"[#2466](https://github.com/adap/flower/pull/2466), " +"[#2471](https://github.com/adap/flower/pull/2471), " +"[#2472](https://github.com/adap/flower/pull/2472), " +"[#2470](https://github.com/adap/flower/pull/2470))" msgstr "" -"Flower 提供了一个名为 :code:`NumPyClient` 的便捷类,当您的工作负载使用 PyTorch 时,它使 " -":code:`Client` 接口的实现变得更容易。实现 :code:`NumPyClient` " -"通常意味着定义以下方法(:code:`set_parameters` 是可选的):" - -#: ../../source/tutorial-quickstart-pytorch.rst:156 -#: ../../source/tutorial-quickstart-scikitlearn.rst:119 -msgid "return the model weight as a list of NumPy ndarrays" -msgstr "以 NumPy ndarrays 列表形式返回模型参数" - -#: ../../source/tutorial-quickstart-pytorch.rst:157 -#: ../../source/tutorial-quickstart-scikitlearn.rst:121 -msgid ":code:`set_parameters` (optional)" -msgstr ":code:`set_parameters` (可选)" +"**普通改进**([#2309](https://github.com/adap/flower/pull/2309), " +"[#2310](https://github.com/adap/flower/pull/2310), " +"[2313](https://github.com/adap/flower/pull/2313), " +"[#2316](https://github.com/adap/flower/pull/2316), " +"[2317](https://github.com/adap/flower/pull/2317),[#2349](https://github.com/adap/flower/pull/2349)," +" [#2360](https://github.com/adap/flower/pull/2360), " +"[#2402](https://github.com/adap/flower/pull/2402), " +"[#2446](https://github.com/adap/flower/pull/2446) " +"[#2561](https://github.com/adap/flower/pull/2561))" -#: ../../source/tutorial-quickstart-pytorch.rst:158 -#: ../../source/tutorial-quickstart-scikitlearn.rst:121 +#: ../../source/ref-changelog.md:576 +#, fuzzy msgid "" -"update the local model weights with the parameters received from the " -"server" -msgstr "用从服务器接收到的参数更新本地模型参数" - -#: ../../source/tutorial-quickstart-pytorch.rst:160 -#: ../../source/tutorial-quickstart-scikitlearn.rst:124 -msgid "set the local model weights" -msgstr "设置本地模型参数" - -#: ../../source/tutorial-quickstart-pytorch.rst:161 -#: ../../source/tutorial-quickstart-scikitlearn.rst:125 -msgid "train the local model" -msgstr "训练本地模型" - -#: ../../source/tutorial-quickstart-pytorch.rst:162 -#: ../../source/tutorial-quickstart-scikitlearn.rst:126 -msgid "receive the updated local model weights" -msgstr "接收更新的本地模型参数" - -#: ../../source/tutorial-quickstart-pytorch.rst:164 -#: ../../source/tutorial-quickstart-scikitlearn.rst:128 -msgid "test the local model" -msgstr "测试本地模型" - -#: ../../source/tutorial-quickstart-pytorch.rst:166 -msgid "which can be implemented in the following way:" -msgstr "可以通过以下方式实现:" +"**General updates to the simulation engine** " +"([#2331](https://github.com/adap/flower/pull/2331), " +"[#2447](https://github.com/adap/flower/pull/2447), " +"[#2448](https://github.com/adap/flower/pull/2448), " +"[#2294](https://github.com/adap/flower/pull/2294))" +msgstr "" +"**模拟引擎的普通更新** ([#2331](https://github.com/adap/flower/pull/2331), " +"[#2447](https://github.com/adap/flower/pull/2447), " +"[#2448](https://github.com/adap/flower/pull/2448))" -#: ../../source/tutorial-quickstart-pytorch.rst:189 -#: ../../source/tutorial-quickstart-tensorflow.rst:82 +#: ../../source/ref-changelog.md:578 +#, fuzzy msgid "" -"We can now create an instance of our class :code:`CifarClient` and add " -"one line to actually run this client:" -msgstr "现在我们可以创建一个 :code:`CifarClient` 类的实例,并添加一行来实际运行该客户端:" +"**General updates to Flower SDKs** " +"([#2288](https://github.com/adap/flower/pull/2288), " +"[#2429](https://github.com/adap/flower/pull/2429), " +"[#2555](https://github.com/adap/flower/pull/2555), " +"[#2543](https://github.com/adap/flower/pull/2543), " +"[#2544](https://github.com/adap/flower/pull/2544), " +"[#2597](https://github.com/adap/flower/pull/2597), " +"[#2623](https://github.com/adap/flower/pull/2623))" +msgstr "" +"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " +"[#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475)))" -#: ../../source/tutorial-quickstart-pytorch.rst:196 -#: ../../source/tutorial-quickstart-tensorflow.rst:90 +#: ../../source/ref-changelog.md:580 #, fuzzy msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " -"implement a client of type :code:`NumPyClient` you'll need to first call " -"its :code:`to_client()` method. The string :code:`\"[::]:8080\"` tells " -"the client which server to connect to. In our case we can run the server " -"and the client on the same machine, therefore we use " -":code:`\"[::]:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we point the client at." +"**General improvements** " +"([#2309](https://github.com/adap/flower/pull/2309), " +"[#2310](https://github.com/adap/flower/pull/2310), " +"[#2313](https://github.com/adap/flower/pull/2313), " +"[#2316](https://github.com/adap/flower/pull/2316), " +"[#2317](https://github.com/adap/flower/pull/2317), " +"[#2349](https://github.com/adap/flower/pull/2349), " +"[#2360](https://github.com/adap/flower/pull/2360), " +"[#2402](https://github.com/adap/flower/pull/2402), " +"[#2446](https://github.com/adap/flower/pull/2446), " +"[#2561](https://github.com/adap/flower/pull/2561), " +"[#2273](https://github.com/adap/flower/pull/2273), " +"[#2267](https://github.com/adap/flower/pull/2267), " +"[#2274](https://github.com/adap/flower/pull/2274), " +"[#2275](https://github.com/adap/flower/pull/2275), " +"[#2432](https://github.com/adap/flower/pull/2432), " +"[#2251](https://github.com/adap/flower/pull/2251), " +"[#2321](https://github.com/adap/flower/pull/2321), " +"[#1936](https://github.com/adap/flower/pull/1936), " +"[#2408](https://github.com/adap/flower/pull/2408), " +"[#2413](https://github.com/adap/flower/pull/2413), " +"[#2401](https://github.com/adap/flower/pull/2401), " +"[#2531](https://github.com/adap/flower/pull/2531), " +"[#2534](https://github.com/adap/flower/pull/2534), " +"[#2535](https://github.com/adap/flower/pull/2535), " +"[#2521](https://github.com/adap/flower/pull/2521), " +"[#2553](https://github.com/adap/flower/pull/2553), " +"[#2596](https://github.com/adap/flower/pull/2596))" msgstr "" -"这就是客户端。我们只需实现 :code:`Client` 或 :code:`NumPyClient` 并调用 " -":code:`fl.client.start_client()` 或 " -":code:`fl.client.start_numpy_client()`。字符串 " -":code:`\"[::]:8080\"`会告诉客户端要连接的服务器。在本例中,我们可以在同一台机器上运行服务器和客户端,因此使用 " -":code:`\"[::]:8080\"。如果我们运行的是真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变的只是客户端指向的 " -":code:`server_address`。" +"**一般改进** ([#2309](https://github.com/adap/flower/pull/2309), " +"[#2310](https://github.com/adap/flower/pull/2310), " +"[#2313](https://github.com/adap/flower/pull/2313), " +"[#2316](https://github.com/adap/flower/pull/2316), " +"[#2317](https://github.com/adap/flower/pull/2317), " +"[#2349](https://github.com/adap/flower/pull/2349), " +"[#2360](https://github.com/adap/flower/pull/2360), " +"[#2402](https://github.com/adap/flower/pull/2402), " +"[#2446](https://github.com/adap/flower/pull/2446), " +"[#2561](https://github.com/adap/flower/pull/2561), " +"[#2273](https://github.com/adap/flower/pull/2273), " +"[#2267](https://github.com/adap/flower/pull/2267), " +"[#2274](https://github.com/adap/flower/pull/2274), " +"[#2275](https://github.com/adap/flower/pull/2275), " +"[#2432](https://github.com/adap/flower/pull/2432), " +"[#2251](https://github.com/adap/flower/pull/2251), " +"[#2321](https://github.com/adap/flower/pull/2321), " +"[#1936](https://github.com/adap/flower/pull/1936), " +"[#2408](https://github.com/adap/flower/pull/2408), " +"[#2413](https://github.com/adap/flower/pull/2413), " +"[#2401](https://github.com/adap/flower/pull/2401), " +"[#2531](https://github.com/adap/flower/pull/2531), " +"[#2534](https://github.com/adap/flower/pull/2534), " +"[#2535](https://github.com/adap/flower/pull/2535), " +"[#2521](https://github.com/adap/flower/pull/2521), " +"[#2553](https://github.com/adap/flower/pull/2553), " +"[#2596](https://github.com/adap/flower/pull/2596))" -#: ../../source/tutorial-quickstart-pytorch.rst:226 -#: ../../source/tutorial-quickstart-scikitlearn.rst:239 -#: ../../source/tutorial-quickstart-tensorflow.rst:122 -#: ../../source/tutorial-quickstart-xgboost.rst:533 -msgid "" -"Once the server is running we can start the clients in different " -"terminals. Open a new terminal and start the first client:" -msgstr "服务器运行后,我们就可以在不同终端启动客户端了。打开一个新终端,启动第一个客户端:" +#: ../../source/ref-changelog.md:582 ../../source/ref-changelog.md:672 +#: ../../source/ref-changelog.md:736 ../../source/ref-changelog.md:790 +#: ../../source/ref-changelog.md:857 +msgid "Flower received many improvements under the hood, too many to list here." +msgstr "Flower 进行了许多改进,这里就不一一列举了。" -#: ../../source/tutorial-quickstart-pytorch.rst:233 -#: ../../source/tutorial-quickstart-scikitlearn.rst:246 -#: ../../source/tutorial-quickstart-tensorflow.rst:129 -#: ../../source/tutorial-quickstart-xgboost.rst:540 -msgid "Open another terminal and start the second client:" -msgstr "打开另一台终端,启动第二个客户端:" +#: ../../source/ref-changelog.md:586 +msgid "" +"**Remove support for Python 3.7** " +"([#2280](https://github.com/adap/flower/pull/2280), " +"[#2299](https://github.com/adap/flower/pull/2299), " +"[#2304](https://github.com/adap/flower/pull/2304), " +"[#2306](https://github.com/adap/flower/pull/2306), " +"[#2355](https://github.com/adap/flower/pull/2355), " +"[#2356](https://github.com/adap/flower/pull/2356))" +msgstr "" +"**移除对 Python 3.7 的支持** " +"([#2280](https://github.com/adap/flower/pull/2280), " +"[#2299](https://github.com/adap/flower/pull/2299), " +"[#2304](https://github.com/adap/flower/pull/2304), " +"[#2306](https://github.com/adap/flower/pull/2306), " +"[#2355](https://github.com/adap/flower/pull/2355), " +"[#2356](https://github.com/adap/flower/pull/2356))" -#: ../../source/tutorial-quickstart-pytorch.rst:239 -#: ../../source/tutorial-quickstart-scikitlearn.rst:252 -#: ../../source/tutorial-quickstart-xgboost.rst:546 +#: ../../source/ref-changelog.md:588 msgid "" -"Each client will have its own dataset. You should now see how the " -"training does in the very first terminal (the one that started the " -"server):" -msgstr "每个客户端都有自己的数据集。现在你应该看到第一个终端(启动服务器的终端)的训练效果了:" +"Python 3.7 support was deprecated in Flower 1.5, and this release removes" +" support. Flower now requires Python 3.8." +msgstr "在 Flower 1.5 中,Python 3.7 支持已被弃用,本版本将删除该支持。Flower 现在需要 Python 3.8。" -#: ../../source/tutorial-quickstart-pytorch.rst:271 +#: ../../source/ref-changelog.md:590 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this example can be found in :code:`examples" -"/quickstart-pytorch`." +"**Remove experimental argument** `rest` **from** `start_client` " +"([#2324](https://github.com/adap/flower/pull/2324))" msgstr "" -"恭喜您!您已经成功构建并运行了第一个联邦学习系统。本示例的`完整源代码 " -"`_ 可以在 :code:`examples/quickstart-pytorch` 中找到。" +"**从** `start_client` 中移除** `rest` **实验参数 " +"([#2324](https://github.com/adap/flower/pull/2324))" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:-1 +#: ../../source/ref-changelog.md:592 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with PyTorch Lightning to train an Auto Encoder model on MNIST." -msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 PyTorch Lightning 在 MNIST 上训练自动编码器模型。" +"The (still experimental) argument `rest` was removed from `start_client` " +"and `start_numpy_client`. Use `transport=\"rest\"` to opt into the " +"experimental REST API instead." +msgstr "" +"删除了 `start_client` 和 `start_numpy_client` 中的参数 `rest`(仍属试验性质)。请使用 " +"`transport=\"rest\"` 来选择使用试验性 REST API。" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:5 -msgid "Quickstart PyTorch Lightning" -msgstr "快速入门 PyTorch Lightning" +#: ../../source/ref-changelog.md:594 +msgid "v1.5.0 (2023-08-31)" +msgstr "v1.5.0 (2023-08-31)" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:10 +#: ../../source/ref-changelog.md:600 msgid "" -"Let's build a horizontal federated learning system using PyTorch " -"Lightning and Flower!" -msgstr "让我们使用 PyTorch Lightning 和 Flower 构建一个水平联邦学习系统!" - -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:12 -msgid "" -"Please refer to the `full code example " -"`_ to learn more." +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " msgstr "" -"请参阅 `完整代码示例 `_ 了解更多信息。" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:-1 -msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with scikit-learn to train a linear regression model." -msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 scikit-learn 训练线性回归模型。" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:5 -msgid "Quickstart scikit-learn" -msgstr "scikit-learn快速入门" +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " -#: ../../source/tutorial-quickstart-scikitlearn.rst:10 +#: ../../source/ref-changelog.md:604 msgid "" -"In this tutorial, we will learn how to train a :code:`Logistic " -"Regression` model on MNIST using Flower and scikit-learn." +"**Introduce new simulation engine** " +"([#1969](https://github.com/adap/flower/pull/1969), " +"[#2221](https://github.com/adap/flower/pull/2221), " +"[#2248](https://github.com/adap/flower/pull/2248))" msgstr "" -"在本教程中,我们将学习如何使用 Flower 和 scikit-learn 在 MNIST 上训练一个 :code:`Logistic " -"Regression` 模型。" +"**引入新的模拟引擎** ([#1969](https://github.com/adap/flower/pull/1969), " +"[#2221](https://github.com/adap/flower/pull/2221), " +"[#2248](https://github.com/adap/flower/pull/2248))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:12 -#, fuzzy +#: ../../source/ref-changelog.md:606 msgid "" -"It is recommended to create a virtual environment and run everything " -"within this :doc:`virtualenv `." +"The new simulation engine has been rewritten from the ground up, yet it " +"remains fully backwards compatible. It offers much improved stability and" +" memory handling, especially when working with GPUs. Simulations " +"transparently adapt to different settings to scale simulation in CPU-" +"only, CPU+GPU, multi-GPU, or multi-node multi-GPU environments." msgstr "" -"建议创建一个虚拟环境,并在此 `virtualenv `_ 中运行所有内容。" +"新的模拟引擎从头开始重新编写,但仍完全向后兼容。它的稳定性和内存处理能力大大提高,尤其是在使用 GPU 时。仿真可透明地适应不同的设置,以在仅 " +"CPU、CPU+GPU、多 GPU 或多节点多 GPU 环境中扩展模拟。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:16 +#: ../../source/ref-changelog.md:608 msgid "" -"*Clients* are responsible for generating individual model parameter " -"updates for the model based on their local datasets. These updates are " -"then sent to the *server* which will aggregate them to produce an updated" -" global model. Finally, the *server* sends this improved version of the " -"model back to each *client*. A complete cycle of parameters updates is " -"called a *round*." -msgstr "*客户端*负责根据其本地数据集为模型生成单独的模型参数更新。然后,这些参数更新将被发送到*服务器*,由*服务器*汇总后生成一个更新的全局模型。最后,*服务器*将这一改进版模型发回给每个*客户端*。一个完整的参数更新周期称为一*轮*。" +"Comprehensive documentation includes a new [how-to run " +"simulations](https://flower.ai/docs/framework/how-to-run-" +"simulations.html) guide, new [simulation-" +"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " +"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" +"tensorflow.html) notebooks, and a new [YouTube tutorial " +"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)." +msgstr "" +"综合文档包括新的[how-to run simulations](https://flower.ai/docs/framework/how-to-" +"run-simulations.html) guide, new [simulation-" +"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " +"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" +"tensorflow.html) notebooks, and a new [YouTube tutorial " +"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:20 +#: ../../source/ref-changelog.md:610 msgid "" -"Now that we have a rough idea of what is going on, let's get started. We " -"first need to install Flower. You can do this by running:" -msgstr "现在,我们已经有了一个大致的概念,让我们开始吧。首先,我们需要安装 Flower。运行:" +"**Restructure Flower Docs** " +"([#1824](https://github.com/adap/flower/pull/1824), " +"[#1865](https://github.com/adap/flower/pull/1865), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1887](https://github.com/adap/flower/pull/1887), " +"[#1919](https://github.com/adap/flower/pull/1919), " +"[#1922](https://github.com/adap/flower/pull/1922), " +"[#1920](https://github.com/adap/flower/pull/1920), " +"[#1923](https://github.com/adap/flower/pull/1923), " +"[#1924](https://github.com/adap/flower/pull/1924), " +"[#1962](https://github.com/adap/flower/pull/1962), " +"[#2006](https://github.com/adap/flower/pull/2006), " +"[#2133](https://github.com/adap/flower/pull/2133), " +"[#2203](https://github.com/adap/flower/pull/2203), " +"[#2215](https://github.com/adap/flower/pull/2215), " +"[#2122](https://github.com/adap/flower/pull/2122), " +"[#2223](https://github.com/adap/flower/pull/2223), " +"[#2219](https://github.com/adap/flower/pull/2219), " +"[#2232](https://github.com/adap/flower/pull/2232), " +"[#2233](https://github.com/adap/flower/pull/2233), " +"[#2234](https://github.com/adap/flower/pull/2234), " +"[#2235](https://github.com/adap/flower/pull/2235), " +"[#2237](https://github.com/adap/flower/pull/2237), " +"[#2238](https://github.com/adap/flower/pull/2238), " +"[#2242](https://github.com/adap/flower/pull/2242), " +"[#2231](https://github.com/adap/flower/pull/2231), " +"[#2243](https://github.com/adap/flower/pull/2243), " +"[#2227](https://github.com/adap/flower/pull/2227))" +msgstr "" +"**重构 Flower 文档** ([#1824](https://github.com/adap/flower/pull/1824), " +"[#1865](https://github.com/adap/flower/pull/1865), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1887](https://github.com/adap/flower/pull/1887), " +"[#1919](https://github.com/adap/flower/pull/1919), " +"[#1922](https://github.com/adap/flower/pull/1922), " +"[#1920](https://github.com/adap/flower/pull/1920), " +"[#1923](https://github.com/adap/flower/pull/1923), " +"[#1924](https://github.com/adap/flower/pull/1924), " +"[#1962](https://github.com/adap/flower/pull/1962), " +"[#2006](https://github.com/adap/flower/pull/2006), " +"[#2133](https://github.com/adap/flower/pull/2133), " +"[#2203](https://github.com/adap/flower/pull/2203), " +"[#2215](https://github.com/adap/flower/pull/2215), " +"[#2122](https://github.com/adap/flower/pull/2122), " +"[#2223](https://github.com/adap/flower/pull/2223), " +"[#2219](https://github.com/adap/flower/pull/2219), " +"[#2232](https://github.com/adap/flower/pull/2232), " +"[#2233](https://github.com/adap/flower/pull/2233), " +"[#2234](https://github.com/adap/flower/pull/2234), " +"[#2235](https://github.com/adap/flower/pull/2235), " +"[#2237](https://github.com/adap/flower/pull/2237), " +"[#2238](https://github.com/adap/flower/pull/2238), " +"[#2242](https://github.com/adap/flower/pull/2242), " +"[#2231](https://github.com/adap/flower/pull/2231), " +"[#2243](https://github.com/adap/flower/pull/2243), " +"[#2227](https://github.com/adap/flower/pull/2227))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:26 +#: ../../source/ref-changelog.md:612 #, fuzzy -msgid "Since we want to use scikit-learn, let's go ahead and install it:" -msgstr "既然我们要使用 scikt-learn,那就继续安装吧:" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:32 -msgid "Or simply install all dependencies using Poetry:" -msgstr "或者直接使用 Poetry 安装所有依赖项:" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:42 msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training with two clients and one server. However, before " -"setting up the client and server, we will define all functionalities that" -" we need for our federated learning setup within :code:`utils.py`. The " -":code:`utils.py` contains different functions defining all the machine " -"learning basics:" +"Much effort went into a completely restructured Flower docs experience. " +"The documentation on [flower.ai/docs](https://flower.ai/docs) is now " +"divided into Flower Framework, Flower Baselines, Flower Android SDK, " +"Flower iOS SDK, and code example projects." msgstr "" -"现在我们已经安装了所有的依赖项,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。不过,在设置客户端和服务器之前,我们将在 " -":code:`utils.py` 中定义联邦学习设置所需的所有功能。:code:`utils.py`包含定义所有机器学习基础知识的不同函数:" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:45 -msgid ":code:`get_model_parameters()`" -msgstr ":code:`get_model_parameters()`" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:46 -msgid "Returns the parameters of a :code:`sklearn` LogisticRegression model" -msgstr "返回 :code:`sklearn` LogisticRegression 模型的参数" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:47 -msgid ":code:`set_model_params()`" -msgstr ":code:`set_model_params()`" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:48 -#, fuzzy -msgid "Sets the parameters of a :code:`sklearn` LogisticRegression model" -msgstr "设置:code:`sklean`的LogisticRegression模型的参数" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:50 -msgid ":code:`set_initial_params()`" -msgstr ":code:`set_initial_params()`" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:50 -msgid "Initializes the model parameters that the Flower server will ask for" -msgstr "初始化 Flower 服务器将要求的模型参数" +"Flower 文档体验的全面重构耗费了大量精力。现在,[flower.ai/docs](flower.ai/docs)上的文档分为 Flower " +"Framework、Flower Baselines、Flower Android SDK、Flower iOS SDK 和代码示例项目。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:52 +#: ../../source/ref-changelog.md:614 msgid "" -"Please check out :code:`utils.py` `here " -"`_ for more details. The pre-defined functions are used in" -" the :code:`client.py` and imported. The :code:`client.py` also requires " -"to import several packages such as Flower and scikit-learn:" +"**Introduce Flower Swift SDK** " +"([#1858](https://github.com/adap/flower/pull/1858), " +"[#1897](https://github.com/adap/flower/pull/1897))" msgstr "" -"更多详情请查看 :code:`utils.py`` 这里 " -"`_。在 :code:`client.py` 中使用并导入了预定义函数。:code:`client.py` " -"还需要导入几个软件包,如 Flower 和 scikit-learn:" +"**介绍 Flower Swift SDK** " +"([#1858](https://github.com/adap/flower/pull/1858), " +"[#1897](https://github.com/adap/flower/pull/1897))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:67 +#: ../../source/ref-changelog.md:616 msgid "" -"Prior to local training, we need to load the MNIST dataset, a popular " -"image classification dataset of handwritten digits for machine learning, " -"and partition the dataset for FL. This can be conveniently achieved using" -" `Flower Datasets `_. The " -":code:`FederatedDataset.load_partition()` method loads the partitioned " -"training set for each partition ID defined in the :code:`--partition-id` " -"argument." +"This is the first preview release of the Flower Swift SDK. Flower support" +" on iOS is improving, and alongside the Swift SDK and code example, there" +" is now also an iOS quickstart tutorial." msgstr "" +"这是 Flower Swift SDK 的首个预览版。Flower 对 iOS 的支持正在不断改进,除了 Swift SDK " +"和代码示例外,现在还有 iOS 快速入门教程。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:95 +#: ../../source/ref-changelog.md:618 msgid "" -"Next, the logistic regression model is defined and initialized with " -":code:`utils.set_initial_params()`." -msgstr "接下来,使用 :code:`utils.set_initial_params()` 对逻辑回归模型进行定义和初始化。" +"**Introduce Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" +msgstr "" +"**介绍Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:107 +#: ../../source/ref-changelog.md:620 msgid "" -"The Flower server interacts with clients through an interface called " -":code:`Client`. When the server selects a particular client for training," -" it sends training instructions over the network. The client receives " -"those instructions and calls one of the :code:`Client` methods to run " -"your code (i.e., to fit the logistic regression we defined earlier)." +"This is the first preview release of the Flower Kotlin SDK. Flower " +"support on Android is improving, and alongside the Kotlin SDK and code " +"example, there is now also an Android quickstart tutorial." msgstr "" -"Flower 服务器通过一个名为 :code:`Client` " -"的接口与客户端交互。当服务器选择一个特定的客户端进行训练时,它会通过网络发送训练指令。客户端接收到这些指令后,会调用 :code:`Client`" -" 方法之一来运行您的代码(即拟合我们之前定义的逻辑回归)。" +"这是 Flower Kotlin SDK 的首个预览版。Flower 对 Android 的支持正在不断改进,除了 Kotlin SDK " +"和代码示例,现在还有 Android 快速入门教程。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:113 +#: ../../source/ref-changelog.md:622 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses scikit-learn. Implementing :code:`NumPyClient` usually " -"means defining the following methods (:code:`set_parameters` is optional " -"though):" +"**Introduce new end-to-end testing infrastructure** " +"([#1842](https://github.com/adap/flower/pull/1842), " +"[#2071](https://github.com/adap/flower/pull/2071), " +"[#2072](https://github.com/adap/flower/pull/2072), " +"[#2068](https://github.com/adap/flower/pull/2068), " +"[#2067](https://github.com/adap/flower/pull/2067), " +"[#2069](https://github.com/adap/flower/pull/2069), " +"[#2073](https://github.com/adap/flower/pull/2073), " +"[#2070](https://github.com/adap/flower/pull/2070), " +"[#2074](https://github.com/adap/flower/pull/2074), " +"[#2082](https://github.com/adap/flower/pull/2082), " +"[#2084](https://github.com/adap/flower/pull/2084), " +"[#2093](https://github.com/adap/flower/pull/2093), " +"[#2109](https://github.com/adap/flower/pull/2109), " +"[#2095](https://github.com/adap/flower/pull/2095), " +"[#2140](https://github.com/adap/flower/pull/2140), " +"[#2137](https://github.com/adap/flower/pull/2137), " +"[#2165](https://github.com/adap/flower/pull/2165))" msgstr "" -"Flower 提供了一个名为 :code:`NumPyClient` 的便捷类,当你的工作负载使用 scikit-learn " -"时,它可以让你更容易地实现 :code:`Client` 接口。实现 :code:`NumPyClient` " -"通常意味着定义以下方法(:code:`set_parameters` 是可选的):" +"*介绍新的端到端测试** ([#1842](https://github.com/adap/flower/pull/1842), " +"[#2071](https://github.com/adap/flower/pull/2071), " +"[#2072](https://github.com/adap/flower/pull/2072), " +"[#2068](https://github.com/adap/flower/pull/2068), " +"[#2067](https://github.com/adap/flower/pull/2067), " +"[#2069](https://github.com/adap/flower/pull/2069), " +"[#2073](https://github.com/adap/flower/pull/2073), " +"[#2070](https://github.com/adap/flower/pull/2070), " +"[#2074](https://github.com/adap/flower/pull/2074), " +"[#2082](https://github.com/adap/flower/pull/2082), " +"[#2084](https://github.com/adap/flower/pull/2084), " +"[#2093](https://github.com/adap/flower/pull/2093), " +"[#2109](https://github.com/adap/flower/pull/2109), " +"[#2095](https://github.com/adap/flower/pull/2095), " +"[#2140](https://github.com/adap/flower/pull/2140), " +"[#2137](https://github.com/adap/flower/pull/2137), " +"[#2165](https://github.com/adap/flower/pull/2165))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:122 -msgid "is directly imported with :code:`utils.set_model_params()`" -msgstr "直接导入 :code:`utils.set_model_params()`" +#: ../../source/ref-changelog.md:624 +msgid "" +"A new testing infrastructure ensures that new changes stay compatible " +"with existing framework integrations or strategies." +msgstr "新的测试设施可确保新的变更与现有的框架集成或策略保持兼容。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:130 -msgid "The methods can be implemented in the following way:" -msgstr "这些方法可以通过以下方式实现:" +#: ../../source/ref-changelog.md:626 +msgid "**Deprecate Python 3.7**" +msgstr "** 过时的 Python 3.7**" -#: ../../source/tutorial-quickstart-scikitlearn.rst:153 +#: ../../source/ref-changelog.md:628 msgid "" -"We can now create an instance of our class :code:`MnistClient` and add " -"one line to actually run this client:" -msgstr "现在我们可以创建一个 :code:`MnistClient` 类的实例,并添加一行来实际运行该客户端:" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:160 -#, fuzzy +"Since Python 3.7 reached its end of life (EOL) on 2023-06-27, support for" +" Python 3.7 is now deprecated and will be removed in an upcoming release." +msgstr "由于 Python 3.7 已于 2023-06-27 弃用 (EOL),对 Python 3.7 的支持现已废弃,并将在即将发布的版本中移除。" + +#: ../../source/ref-changelog.md:630 msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " -"implement a client of type :code:`NumPyClient` you'll need to first call " -"its :code:`to_client()` method. The string :code:`\"0.0.0.0:8080\"` tells" -" the client which server to connect to. In our case we can run the server" -" and the client on the same machine, therefore we use " -":code:`\"0.0.0.0:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we pass to the client." +"**Add new** `FedTrimmedAvg` **strategy** " +"([#1769](https://github.com/adap/flower/pull/1769), " +"[#1853](https://github.com/adap/flower/pull/1853))" msgstr "" -"这就是客户端。我们只需实现 :code:`Client` 或 :code:`NumPyClient` 并调用 " -":code:`fl.client.start_client()` 或 " -":code:`fl.client.start_numpy_client()`。字符串 " -":code:`\"0.0.0.0:8080\"`会告诉客户端要连接的服务器。在本例中,我们可以在同一台机器上运行服务器和客户端,因此我们使用 " -":code:`\"0.0.0.0:8080\"`。如果我们运行的是真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变的只是传递给客户端的" -" :code:`server_address`。" +"**添加新的**`FedTrimmedAvg`**策略**([#1769](https://github.com/adap/flower/pull/1769)," +" [#1853](https://github.com/adap/flower/pull/1853)" -#: ../../source/tutorial-quickstart-scikitlearn.rst:169 +#: ../../source/ref-changelog.md:632 msgid "" -"The following Flower server is a little bit more advanced and returns an " -"evaluation function for the server-side evaluation. First, we import " -"again all required libraries such as Flower and scikit-learn." +"The new `FedTrimmedAvg` strategy implements Trimmed Mean by [Dong Yin, " +"2018](https://arxiv.org/abs/1803.01498)." msgstr "" -"下面的 Flower 服务器更先进一些,会返回一个用于服务器端评估的评估函数。首先,我们再次导入所有需要的库,如 Flower 和 scikit-" -"learn。" +"新的 \"FedTrimmedAvg \"策略实现了[Dong Yin, " +"2018](https://arxiv.org/abs/1803.01498)的 \"Trimmed Mean\"。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:172 -msgid ":code:`server.py`, import Flower and start the server:" -msgstr ":code:`server.py`, 导入 Flower 并启动服务器:" +#: ../../source/ref-changelog.md:634 +msgid "" +"**Introduce start_driver** " +"([#1697](https://github.com/adap/flower/pull/1697))" +msgstr "**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:185 -#, fuzzy +#: ../../source/ref-changelog.md:636 msgid "" -"The number of federated learning rounds is set in :code:`fit_round()` and" -" the evaluation is defined in :code:`get_evaluate_fn()`. The evaluation " -"function is called after each federated learning round and gives you " -"information about loss and accuracy. Note that we also make use of Flower" -" Datasets here to load the test split of the MNIST dataset for server-" -"side evaluation." +"In addition to `start_server` and using the raw Driver API, there is a " +"new `start_driver` function that allows for running `start_server` " +"scripts as a Flower driver with only a single-line code change. Check out" +" the `mt-pytorch` code example to see a working example using " +"`start_driver`." msgstr "" -"联邦学习轮数在 :code:`fit_round()` 中设置,评估在 :code:`get_evaluate_fn()` " -"中定义。每轮联邦学习后都会调用评估函数,并提供有关损失值和准确率的信息。" +"除了 `start_server` 和使用原始驱动 API 之外,还有一个新的 `start_driver` 函数,只需修改一行代码,就能将 " +"`start_server` 脚本作为 Flower 驱动程序运行。请查看 `mt-pytorch` 代码示例,了解使用 " +"`start_driver` 的工作示例。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:213 +#: ../../source/ref-changelog.md:638 msgid "" -"The :code:`main` contains the server-side parameter initialization " -":code:`utils.set_initial_params()` as well as the aggregation strategy " -":code:`fl.server.strategy:FedAvg()`. The strategy is the default one, " -"federated averaging (or FedAvg), with two clients and evaluation after " -"each federated learning round. The server can be started with the command" -" :code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " -"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))`." +"**Add parameter aggregation to** `mt-pytorch` **code example** " +"([#1785](https://github.com/adap/flower/pull/1785))" msgstr "" -":code:`main`包含服务器端参数初始化:code:`utils.set_initial_params()`以及聚合策略 " -":code:`fl.server.strategy:FedAvg()`。该策略是默认的联邦平均(或 " -"FedAvg)策略,有两个客户端,在每轮联邦学习后进行评估。可以使用 " -":code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " -"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))` 命令启动服务器。" +"为 `mt-pytorch` **代码示例**添加参数聚合 " +"([#1785](https://github.com/adap/flower/pull/1785))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:232 +#: ../../source/ref-changelog.md:640 msgid "" -"With both client and server ready, we can now run everything and see " -"federated learning in action. Federated learning systems usually have a " -"server and multiple clients. We, therefore, have to start the server " -"first:" -msgstr "客户端和服务器都准备就绪后,我们现在就可以运行一切,看看联邦学习的运行情况。联邦学习系统通常有一个服务器和多个客户端。因此,我们必须先启动服务器:" +"The `mt-pytorch` example shows how to aggregate parameters when writing a" +" driver script. The included `driver.py` and `server.py` have been " +"aligned to demonstrate both the low-level way and the high-level way of " +"building server-side logic." +msgstr "" +"`mt-pytorch`示例展示了如何在编写驱动程序脚本时聚合参数。附带的 `driver.py` 和 `server.py` " +"已经进行了调整,以演示构建服务器端逻辑的低级方法和高级方法。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:286 +#: ../../source/ref-changelog.md:642 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this example can be found in :code:`examples/sklearn-logreg-" -"mnist`." +"**Migrate experimental REST API to Starlette** " +"([2171](https://github.com/adap/flower/pull/2171))" msgstr "" -"恭喜您!您已经成功构建并运行了第一个联邦学习系统。本示例的`完整源代码 " -"`_ 可以在 :code:`examples/sklearn-logreg-mnist` 中找到。" +"**将实验性 REST API 移植到 Starlette** " +"([2171](https://github.com/adap/flower/pull/2171))" -#: ../../source/tutorial-quickstart-tensorflow.rst:-1 +#: ../../source/ref-changelog.md:644 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with TensorFlow to train a MobilNetV2 model on CIFAR-10." -msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 TensorFlow 在 CIFAR-10 上训练 MobilNetV2 模型。" - -#: ../../source/tutorial-quickstart-tensorflow.rst:5 -msgid "Quickstart TensorFlow" -msgstr "快速入门 TensorFlow" - -#: ../../source/tutorial-quickstart-tensorflow.rst:13 -msgid "Let's build a federated learning system in less than 20 lines of code!" -msgstr "让我们用不到 20 行代码构建一个联邦学习系统!" - -#: ../../source/tutorial-quickstart-tensorflow.rst:15 -msgid "Before Flower can be imported we have to install it:" -msgstr "在导入 Flower 之前,我们必须先安装它:" +"The (experimental) REST API used to be implemented in " +"[FastAPI](https://fastapi.tiangolo.com/), but it has now been migrated to" +" use [Starlette](https://www.starlette.io/) directly." +msgstr "" +"REST API(试验性)曾在 [FastAPI](https://fastapi.tiangolo.com/) 中实现,但现在已迁移到直接使用 " +"[Starlette](https://www.starlette.io/) 。" -#: ../../source/tutorial-quickstart-tensorflow.rst:21 +#: ../../source/ref-changelog.md:646 msgid "" -"Since we want to use the Keras API of TensorFlow (TF), we have to install" -" TF as well:" -msgstr "由于我们要使用 TensorFlow (TF) 的 Keras API,因此还必须安装 TF:" +"Please note: The REST request-response API is still experimental and will" +" likely change significantly over time." +msgstr "请注意:REST 请求-响应 API 仍处于试验阶段,随着时间的推移可能会发生重大变化。" -#: ../../source/tutorial-quickstart-tensorflow.rst:31 -msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" -msgstr "接下来,在名为 :code:`client.py` 的文件中导入 Flower 和 TensorFlow:" +#: ../../source/ref-changelog.md:648 +msgid "" +"**Introduce experimental gRPC request-response API** " +"([#1867](https://github.com/adap/flower/pull/1867), " +"[#1901](https://github.com/adap/flower/pull/1901))" +msgstr "" +"**引入实验性 gRPC 请求-响应 API** " +"([#1867](https://github.com/adap/flower/pull/1867), " +"[#1901](https://github.com/adap/flower/pull/1901)" -#: ../../source/tutorial-quickstart-tensorflow.rst:38 +#: ../../source/ref-changelog.md:650 msgid "" -"We use the Keras utilities of TF to load CIFAR10, a popular colored image" -" classification dataset for machine learning. The call to " -":code:`tf.keras.datasets.cifar10.load_data()` downloads CIFAR10, caches " -"it locally, and then returns the entire training and test set as NumPy " -"ndarrays." +"In addition to the existing gRPC API (based on bidirectional streaming) " +"and the experimental REST API, there is now a new gRPC API that uses a " +"request-response model to communicate with client nodes." msgstr "" -"我们使用 TF 的 Keras 实用程序加载 CIFAR10,这是一个用于机器学习的流行彩色图像分类数据集。调用 " -":code:`tf.keras.datasets.cifar10.load_data()` 会下载 CIFAR10,将其缓存到本地,然后以 " -"NumPy ndarrays 的形式返回整个训练集和测试集。" +"除了现有的 gRPC 应用程序接口(基于双向流)和试验性 REST 应用程序接口外,现在还有一个新的 gRPC " +"应用程序接口,它使用请求-响应模型与客户端节点通信。" -#: ../../source/tutorial-quickstart-tensorflow.rst:47 +#: ../../source/ref-changelog.md:652 msgid "" -"Next, we need a model. For the purpose of this tutorial, we use " -"MobilNetV2 with 10 output classes:" -msgstr "接下来,我们需要一个模型。在本教程中,我们使用带有 10 个输出类的 MobilNetV2:" +"Please note: The gRPC request-response API is still experimental and will" +" likely change significantly over time." +msgstr "请注意:gRPC 请求-响应 API 仍处于试验阶段,随着时间的推移可能会发生重大变化。" -#: ../../source/tutorial-quickstart-tensorflow.rst:60 +#: ../../source/ref-changelog.md:654 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses Keras. The :code:`NumPyClient` interface defines three " -"methods which can be implemented in the following way:" +"**Replace the experimental** `start_client(rest=True)` **with the new** " +"`start_client(transport=\"rest\")` " +"([#1880](https://github.com/adap/flower/pull/1880))" msgstr "" -"Flower 提供了一个名为 :code:`NumPyClient` 的便捷类,当您的工作负载使用 Keras 时,该类可以更轻松地实现 " -":code:`Client` 接口。:code:`NumPyClient` 接口定义了三个方法,可以通过以下方式实现:" +"**用新的** `start_client(transport=\"rest\")` 替换实验性** " +"`start_client(rest=True)` " +"([#1880](https://github.com/adap/flower/pull/1880))" -#: ../../source/tutorial-quickstart-tensorflow.rst:135 -msgid "Each client will have its own dataset." -msgstr "每个客户都有自己的数据集。" +#: ../../source/ref-changelog.md:656 +msgid "" +"The (experimental) `start_client` argument `rest` was deprecated in " +"favour of a new argument `transport`. `start_client(transport=\"rest\")` " +"will yield the same behaviour as `start_client(rest=True)` did before. " +"All code should migrate to the new argument `transport`. The deprecated " +"argument `rest` will be removed in a future release." +msgstr "" +"已废弃(试验性的)`start_client`参数`rest`,改用新参数`transport`。`start_client(transport=\"rest\")`将产生与以前的`start_client(rest=True)`相同的行为。所有代码都应迁移到新参数" +" `transport`。过时的参数 `rest` 将在今后的版本中删除。" -#: ../../source/tutorial-quickstart-tensorflow.rst:137 +#: ../../source/ref-changelog.md:658 msgid "" -"You should now see how the training does in the very first terminal (the " -"one that started the server):" -msgstr "现在你应该能在第一个终端(启动服务器的终端)看到训练的效果了:" +"**Add a new gRPC option** " +"([#2197](https://github.com/adap/flower/pull/2197))" +msgstr "** 添加一个新的 gRPC 选项**([#2197](https://github.com/adap/flower/pull/2197))" -#: ../../source/tutorial-quickstart-tensorflow.rst:169 +#: ../../source/ref-changelog.md:660 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this can be found in :code:`examples" -"/quickstart-tensorflow/client.py`." +"We now start a gRPC server with the `grpc.keepalive_permit_without_calls`" +" option set to 0 by default. This prevents the clients from sending " +"keepalive pings when there is no outstanding stream." msgstr "" -"恭喜您!您已经成功构建并运行了第一个联邦学习系统。`完整的源代码 " -"`_ 可以在 :code:`examples/quickstart-" -"tensorflow/client.py` 中找到。" +"现在我们启动一个 gRPC 服务器,并将 `grpc.keepalive_permit_without_calls` 选项默认设置为 " +"0。这将防止客户端在没有未处理数据流时发送 keepalive pings。" -#: ../../source/tutorial-quickstart-xgboost.rst:-1 +#: ../../source/ref-changelog.md:662 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with XGBoost to train classification models on trees." -msgstr "查看此联邦学习 快速入门教程,了解如何使用 Flower 和 XGBoost 上训练分类模型。" +"**Improve example notebooks** " +"([#2005](https://github.com/adap/flower/pull/2005))" +msgstr "**改进示例笔记** ([#2005](https://github.com/adap/flower/pull/2005))" -#: ../../source/tutorial-quickstart-xgboost.rst:5 -msgid "Quickstart XGBoost" -msgstr "XGBoost快速入门" +#: ../../source/ref-changelog.md:664 +msgid "There's a new 30min Federated Learning PyTorch tutorial!" +msgstr "有一个新的 30 分钟的联邦学习 PyTorch 教程!" -#: ../../source/tutorial-quickstart-xgboost.rst:14 -msgid "Federated XGBoost" -msgstr "联邦化 XGBoost" +#: ../../source/ref-changelog.md:666 +msgid "" +"**Example updates** ([#1772](https://github.com/adap/flower/pull/1772), " +"[#1873](https://github.com/adap/flower/pull/1873), " +"[#1981](https://github.com/adap/flower/pull/1981), " +"[#1988](https://github.com/adap/flower/pull/1988), " +"[#1984](https://github.com/adap/flower/pull/1984), " +"[#1982](https://github.com/adap/flower/pull/1982), " +"[#2112](https://github.com/adap/flower/pull/2112), " +"[#2144](https://github.com/adap/flower/pull/2144), " +"[#2174](https://github.com/adap/flower/pull/2174), " +"[#2225](https://github.com/adap/flower/pull/2225), " +"[#2183](https://github.com/adap/flower/pull/2183))" +msgstr "" +"**更新Example** ([#1772](https://github.com/adap/flower/pull/1772), " +"[#1873](https://github.com/adap/flower/pull/1873), " +"[#1981](https://github.com/adap/flower/pull/1981), " +"[#1988](https://github.com/adap/flower/pull/1988), " +"[#1984](https://github.com/adap/flower/pull/1984), " +"[#1982](https://github.com/adap/flower/pull/1982), " +"[#2112](https://github.com/adap/flower/pull/2112), " +"[#2144](https://github.com/adap/flower/pull/2144), " +"[#2174](https://github.com/adap/flower/pull/2174), " +"[#2225](https://github.com/adap/flower/pull/2225), " +"[#2183](https://github.com/adap/flower/pull/2183))" -#: ../../source/tutorial-quickstart-xgboost.rst:16 +#: ../../source/ref-changelog.md:668 msgid "" -"EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient " -"implementation of gradient-boosted decision tree (**GBDT**), that " -"maximises the computational boundaries for boosted tree methods. It's " -"primarily designed to enhance both the performance and computational " -"speed of machine learning models. In XGBoost, trees are constructed " -"concurrently, unlike the sequential approach taken by GBDT." +"Many examples have received significant updates, including simplified " +"advanced-tensorflow and advanced-pytorch examples, improved macOS " +"compatibility of TensorFlow examples, and code examples for simulation. A" +" major upgrade is that all code examples now have a `requirements.txt` " +"(in addition to `pyproject.toml`)." msgstr "" -"EXtreme Gradient " -"Boosting(**XGBoost**)是梯度提升决策树(**GBDT**)的一种稳健而高效的实现方法,能最大限度地提高提升树方法的计算边界。它主要用于提高机器学习模型的性能和计算速度。在" -" XGBoost 中,决策树是并发构建的,与 GBDT 采用的顺序方法不同。" +"许多示例都进行了重大更新,包括简化了 advanced-tensorflow 和 advanced-pytorch 示例,改进了 " +"TensorFlow 示例的 macOS 兼容性,以及模拟代码示例。一项重大升级是所有代码示例现在都有了 " +"\"requirements.txt\"(除 \"pyproject.toml \"外)。" -#: ../../source/tutorial-quickstart-xgboost.rst:20 +#: ../../source/ref-changelog.md:670 msgid "" -"Often, for tabular data on medium-sized datasets with fewer than 10k " -"training examples, XGBoost surpasses the results of deep learning " -"techniques." -msgstr "对于训练示例少于 10k 的中型数据集上的表格数据,XGBoost 的结果往往超过深度学习技术。" +"**General improvements** " +"([#1872](https://github.com/adap/flower/pull/1872), " +"[#1866](https://github.com/adap/flower/pull/1866), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1837](https://github.com/adap/flower/pull/1837), " +"[#1477](https://github.com/adap/flower/pull/1477), " +"[#2171](https://github.com/adap/flower/pull/2171))" +msgstr "" +"**普通改进**([#1872](https://github.com/adap/flower/pull/1872), " +"[#1866](https://github.com/adap/flower/pull/1866), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1837](https://github.com/adap/flower/pull/1837), " +"[#1477](https://github.com/adap/flower/pull/1477), " +"[#2171](https://github.com/adap/flower/pull/2171))" -#: ../../source/tutorial-quickstart-xgboost.rst:23 -msgid "Why federated XGBoost?" -msgstr "为什么选择联邦 XGBoost?" +#: ../../source/ref-changelog.md:678 +msgid "v1.4.0 (2023-04-21)" +msgstr "v1.4.0 (2023-04-21)" -#: ../../source/tutorial-quickstart-xgboost.rst:25 +#: ../../source/ref-changelog.md:684 msgid "" -"Indeed, as the demand for data privacy and decentralized learning grows, " -"there's an increasing requirement to implement federated XGBoost systems " -"for specialised applications, like survival analysis and financial fraud " -"detection." -msgstr "事实上,随着对数据隐私和分散学习的需求不断增长,越来越多的专业应用(如生存分析和金融欺诈检测)需要实施联邦 XGBoost 系统。" +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " +"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " +"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " +"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " +"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" +msgstr "" +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " +"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " +"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " +"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " +"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" -#: ../../source/tutorial-quickstart-xgboost.rst:27 +#: ../../source/ref-changelog.md:688 msgid "" -"Federated learning ensures that raw data remains on the local device, " -"making it an attractive approach for sensitive domains where data " -"security and privacy are paramount. Given the robustness and efficiency " -"of XGBoost, combining it with federated learning offers a promising " -"solution for these specific challenges." +"**Introduce support for XGBoost (**`FedXgbNnAvg` **strategy and " +"example)** ([#1694](https://github.com/adap/flower/pull/1694), " +"[#1709](https://github.com/adap/flower/pull/1709), " +"[#1715](https://github.com/adap/flower/pull/1715), " +"[#1717](https://github.com/adap/flower/pull/1717), " +"[#1763](https://github.com/adap/flower/pull/1763), " +"[#1795](https://github.com/adap/flower/pull/1795))" msgstr "" -"联邦学习可确保原始数据保留在本地设备上,因此对于数据安全和隐私至关重要的敏感领域来说,这是一种极具吸引力的方法。鉴于 XGBoost " -"的稳健性和高效性,将其与联邦学习相结合为应对这些特定挑战提供了一种前景广阔的解决方案。" +"**引入对XGBoost的支持(**`FedXgbNnAvg` **策略和示例)** " +"([#1694](https://github.com/adap/flower/pull/1694), " +"[#1709](https://github.com/adap/flower/pull/1709), " +"[#1715](https://github.com/adap/flower/pull/1715), " +"[#1717](https://github.com/adap/flower/pull/1717), " +"[#1763](https://github.com/adap/flower/pull/1763), " +"[#1795](https://github.com/adap/flower/pull/1795))" -#: ../../source/tutorial-quickstart-xgboost.rst:30 +#: ../../source/ref-changelog.md:690 msgid "" -"In this tutorial we will learn how to train a federated XGBoost model on " -"HIGGS dataset using Flower and :code:`xgboost` package. We use a simple " -"example (`full code xgboost-quickstart " -"`_)" -" with two *clients* and one *server* to demonstrate how federated XGBoost" -" works, and then we dive into a more complex example (`full code xgboost-" -"comprehensive `_) to run various experiments." +"XGBoost is a tree-based ensemble machine learning algorithm that uses " +"gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg`" +" " +"[strategy](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," +" and a [code example](https://github.com/adap/flower/tree/main/examples" +"/xgboost-quickstart) that demonstrates the usage of this new strategy in " +"an XGBoost project." msgstr "" -"在本教程中,我们将学习如何使用 Flower 和 :code:`xgboost` 软件包在 HIGGS 数据集上训练联邦 XGBoost " -"模型。我们将使用一个包含两个 * 客户端* 和一个 * 服务器* 的简单示例 (`完整代码 xgboost-quickstart " -"`_)来演示联邦 XGBoost 如何工作,然后我们将深入到一个更复杂的示例 (`完整代码 xgboost-" -"comprehensive `_),以运行各种实验。" - -#: ../../source/tutorial-quickstart-xgboost.rst:37 -msgid "Environment Setup" -msgstr "环境设定" - -#: ../../source/tutorial-quickstart-xgboost.rst:41 -msgid "" -"We first need to install Flower and Flower Datasets. You can do this by " -"running :" -msgstr "我们首先需要安装 Flower 和 Flower Datasets。您可以通过运行 :" - -#: ../../source/tutorial-quickstart-xgboost.rst:47 -msgid "" -"Since we want to use :code:`xgboost` package to build up XGBoost trees, " -"let's go ahead and install :code:`xgboost`:" -msgstr "既然我们要使用 :code:`xgboost` 软件包来构建 XGBoost 树,那就继续安装 :code:`xgboost`:" +"XGBoost 是一种基于树的集合机器学习算法,它使用梯度提升来提高模型的准确性。我们添加了一个新的 " +"\"FedXgbNnAvg\"[策略](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)和一个[代码示例](https://github.com/adap/flower/tree/main/examples" +"/xgboost-quickstart),演示如何在 XGBoost 项目中使用这个新策略。" -#: ../../source/tutorial-quickstart-xgboost.rst:57 +#: ../../source/ref-changelog.md:692 msgid "" -"*Clients* are responsible for generating individual weight-updates for " -"the model based on their local datasets. Now that we have all our " -"dependencies installed, let's run a simple distributed training with two " -"clients and one server." -msgstr "*客户端*负责根据其本地数据集为模型生成单独的模型参数更新。现在我们已经安装了所有的依赖项,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。" +"**Introduce iOS SDK (preview)** " +"([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" +msgstr "" +"**介绍 iOS SDK(预览版)** ([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" -#: ../../source/tutorial-quickstart-xgboost.rst:60 +#: ../../source/ref-changelog.md:694 msgid "" -"In a file called :code:`client.py`, import xgboost, Flower, Flower " -"Datasets and other related functions:" -msgstr "在名为 :code:`client.py` 的文件中,导入 xgboost、Flower、Flower Datasets 和其他相关函数:" - -#: ../../source/tutorial-quickstart-xgboost.rst:87 -msgid "Dataset partition and hyper-parameter selection" -msgstr "数据集划分和超参数选择" +"This is a major update for anyone wanting to implement Federated Learning" +" on iOS mobile devices. We now have a swift iOS SDK present under " +"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" +" that will facilitate greatly the app creating process. To showcase its " +"use, the [iOS " +"example](https://github.com/adap/flower/tree/main/examples/ios) has also " +"been updated!" +msgstr "" +"对于想要在 iOS 移动设备上实施联邦学习的人来说,这是一次重大更新。现在,我们在 " +"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" +" 下提供了一个迅捷的 iOS SDK,这将大大方便应用程序的创建过程。为了展示其使用情况,我们还更新了 [iOS " +"示例](https://github.com/adap/flower/tree/main/examples/ios)!" -#: ../../source/tutorial-quickstart-xgboost.rst:89 +#: ../../source/ref-changelog.md:696 msgid "" -"Prior to local training, we require loading the HIGGS dataset from Flower" -" Datasets and conduct data partitioning for FL:" -msgstr "在本地训练之前,我们需要从 Flower Datasets 加载 HIGGS 数据集,并对 FL 进行数据分区:" +"**Introduce new \"What is Federated Learning?\" tutorial** " +"([#1657](https://github.com/adap/flower/pull/1657), " +"[#1721](https://github.com/adap/flower/pull/1721))" +msgstr "" +"**引入新的 " +"\"什么是联邦学习?\"教程**([#1657](https://github.com/adap/flower/pull/1657), " +"[#1721](https://github.com/adap/flower/pull/1721)" -#: ../../source/tutorial-quickstart-xgboost.rst:102 +#: ../../source/ref-changelog.md:698 msgid "" -"In this example, we split the dataset into two partitions with uniform " -"distribution (:code:`IidPartitioner(num_partitions=2)`). Then, we load " -"the partition for the given client based on :code:`node_id`:" +"A new [entry-level tutorial](https://flower.ai/docs/framework/tutorial-" +"what-is-federated-learning.html) in our documentation explains the basics" +" of Fedetated Learning. It enables anyone who's unfamiliar with Federated" +" Learning to start their journey with Flower. Forward it to anyone who's " +"interested in Federated Learning!" msgstr "" -"在此示例中,我们将数据集分割成两个均匀分布的分区(:code:`IidPartitioner(num_partitions=2)`)。然后,我们根据" -" :code:`node_id` 为给定客户端加载分区:" +"我们的文档中新增了一个[入门级教程](https://flower.ai/docs/framework/tutorial-what-is-" +"federated-learning.html),解释了联邦学习的基础知识。它让任何不熟悉联邦学习的人都能开始 Flower " +"之旅。请转发给对联邦学习感兴趣的人!" -#: ../../source/tutorial-quickstart-xgboost.rst:121 +#: ../../source/ref-changelog.md:700 msgid "" -"After that, we do train/test splitting on the given partition (client's " -"local data), and transform data format for :code:`xgboost` package." -msgstr "然后,我们在给定的分区(客户端的本地数据)上进行训练/测试分割,并为 :code:`xgboost` 软件包转换数据格式。" +"**Introduce new Flower Baseline: FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679))" +msgstr "" +"**引入新的 Flower Baseline: FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679)" -#: ../../source/tutorial-quickstart-xgboost.rst:134 +#: ../../source/ref-changelog.md:702 msgid "" -"The functions of :code:`train_test_split` and " -":code:`transform_dataset_to_dmatrix` are defined as below:" -msgstr ":code:`train_test_split` 和 :code:`transform_dataset_too_dmatrix` 的函数定义如下:" - -#: ../../source/tutorial-quickstart-xgboost.rst:158 -msgid "Finally, we define the hyper-parameters used for XGBoost training." -msgstr "最后,我们定义了用于 XGBoost 训练的超参数。" +"This new baseline replicates the MNIST+CNN task from the paper [Federated" +" Optimization in Heterogeneous Networks (Li et al., " +"2018)](https://arxiv.org/abs/1812.06127). It uses the `FedProx` strategy," +" which aims at making convergence more robust in heterogeneous settings." +msgstr "" +"这条新Baseline复现了论文[Federated Optimization in Heterogeneous Networks (Li et " +"al., 2018)](https://arxiv.org/abs/1812.06127)中的 MNIST+CNN 任务。它使用 " +"\"FedProx \"策略,旨在使收敛在异构环境中更加稳健。" -#: ../../source/tutorial-quickstart-xgboost.rst:174 +#: ../../source/ref-changelog.md:704 msgid "" -"The :code:`num_local_round` represents the number of iterations for local" -" tree boost. We use CPU for the training in default. One can shift it to " -"GPU by setting :code:`tree_method` to :code:`gpu_hist`. We use AUC as " -"evaluation metric." +"**Introduce new Flower Baseline: FedAvg FEMNIST** " +"([#1655](https://github.com/adap/flower/pull/1655))" msgstr "" -"代码:`num_local_round`表示本地树的迭代次数。我们默认使用 CPU 进行训练。可以通过将 :code:`tree_method` " -"设置为 :code:`gpu_hist`,将其转换为 GPU。我们使用 AUC 作为评估指标。" - -#: ../../source/tutorial-quickstart-xgboost.rst:181 -msgid "Flower client definition for XGBoost" -msgstr "用于 XGBoost 的 Flower 客户端定义" +"**引入新的 Flower Baseline: FedAvg FEMNIST** " +"([#1655](https://github.com/adap/flower/pull/1655))" -#: ../../source/tutorial-quickstart-xgboost.rst:183 +#: ../../source/ref-changelog.md:706 msgid "" -"After loading the dataset we define the Flower client. We follow the " -"general rule to define :code:`XgbClient` class inherited from " -":code:`fl.client.Client`." +"This new baseline replicates an experiment evaluating the performance of " +"the FedAvg algorithm on the FEMNIST dataset from the paper [LEAF: A " +"Benchmark for Federated Settings (Caldas et al., " +"2018)](https://arxiv.org/abs/1812.01097)." msgstr "" -"加载数据集后,我们定义 Flower 客户端。我们按照一般规则定义从 :code:`fl.client.Client` 继承而来的 " -":code:`XgbClient` 类。" +"这一新Baseline复现了论文[LEAF: A Benchmark for Federated Settings(Caldas 等人,2018 " +"年)](https://arxiv.org/abs/1812.01097)中评估 FedAvg 算法在 FEMNIST 数据集上性能的实验。" -#: ../../source/tutorial-quickstart-xgboost.rst:193 +#: ../../source/ref-changelog.md:708 msgid "" -"The :code:`self.bst` is used to keep the Booster objects that remain " -"consistent across rounds, allowing them to store predictions from trees " -"integrated in earlier rounds and maintain other essential data structures" -" for training." +"**Introduce (experimental) REST API** " +"([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" msgstr "" -"代码:`self.bst`用于保存在各轮中保持一致的 Booster " -"对象,使其能够存储在前几轮中集成的树的预测结果,并维护其他用于训练的重要数据结构。" +"**引入(试验性)REST API** ([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" -#: ../../source/tutorial-quickstart-xgboost.rst:196 +#: ../../source/ref-changelog.md:710 msgid "" -"Then, we override :code:`get_parameters`, :code:`fit` and " -":code:`evaluate` methods insides :code:`XgbClient` class as follows." -msgstr "" -"然后,我们在 :code:`XgbClient` 类中重写 :code:`get_parameters`、:code:`fit` 和 " -":code:`evaluate` 方法如下。" +"A new REST API has been introduced as an alternative to the gRPC-based " +"communication stack. In this initial version, the REST API only supports " +"anonymous clients." +msgstr "作为基于 gRPC 的通信栈的替代方案,我们引入了新的 REST API。在初始版本中,REST API 仅支持匿名客户端。" -#: ../../source/tutorial-quickstart-xgboost.rst:210 +#: ../../source/ref-changelog.md:712 msgid "" -"Unlike neural network training, XGBoost trees are not started from a " -"specified random weights. In this case, we do not use " -":code:`get_parameters` and :code:`set_parameters` to initialise model " -"parameters for XGBoost. As a result, let's return an empty tensor in " -":code:`get_parameters` when it is called by the server at the first " -"round." -msgstr "" -"与神经网络训练不同,XGBoost 树不是从指定的随机参数开始的。在这种情况下,我们不使用 :code:`get_parameters` 和 " -":code:`set_parameters` 来初始化 XGBoost 的模型参数。因此,当服务器在第一轮调用 " -":code:`get_parameters` 时,让我们在 :code:`get_parameters` 中返回一个空张量。" +"Please note: The REST API is still experimental and will likely change " +"significantly over time." +msgstr "请注意:REST API 仍处于试验阶段,随着时间的推移可能会发生重大变化。" -#: ../../source/tutorial-quickstart-xgboost.rst:251 +#: ../../source/ref-changelog.md:714 msgid "" -"In :code:`fit`, at the first round, we call :code:`xgb.train()` to build " -"up the first set of trees. the returned Booster object and config are " -"stored in :code:`self.bst` and :code:`self.config`, respectively. From " -"the second round, we load the global model sent from server to " -":code:`self.bst`, and then update model weights on local training data " -"with function :code:`local_boost` as follows:" +"**Improve the (experimental) Driver API** " +"([#1663](https://github.com/adap/flower/pull/1663), " +"[#1666](https://github.com/adap/flower/pull/1666), " +"[#1667](https://github.com/adap/flower/pull/1667), " +"[#1664](https://github.com/adap/flower/pull/1664), " +"[#1675](https://github.com/adap/flower/pull/1675), " +"[#1676](https://github.com/adap/flower/pull/1676), " +"[#1693](https://github.com/adap/flower/pull/1693), " +"[#1662](https://github.com/adap/flower/pull/1662), " +"[#1794](https://github.com/adap/flower/pull/1794))" msgstr "" -"在 :code:`fit`中,第一轮我们调用 :code:`xgb.train()`来建立第一组树,返回的 Booster 对象和 config " -"分别存储在 :code:`self.bst` 和 :code:`self.config` 中。从第二轮开始,我们将服务器发送的全局模型加载到 " -":code:`self.bst`,然后使用函数 :code:`local_boost`更新本地训练数据的模型权重,如下所示:" +"**改进(试验性)驱动程序应用程序接口** ([#1663](https://github.com/adap/flower/pull/1663)," +" [#1666](https://github.com/adap/flower/pull/1666), " +"[#1667](https://github.com/adap/flower/pull/1667), " +"[#1664](https://github.com/adap/flower/pull/1664), " +"[#1675](https://github.com/adap/flower/pull/1675), " +"[#1676](https://github.com/adap/flower/pull/1676), " +"[#1693](https://github.com/adap/flower/pull/1693), " +"[#1662](https://github.com/adap/flower/pull/1662), " +"[#1794](https://github.com/adap/flower/pull/1794))" -#: ../../source/tutorial-quickstart-xgboost.rst:269 +#: ../../source/ref-changelog.md:716 msgid "" -"Given :code:`num_local_round`, we update trees by calling " -":code:`self.bst.update` method. After training, the last " -":code:`N=num_local_round` trees will be extracted to send to the server." +"The Driver API is still an experimental feature, but this release " +"introduces some major upgrades. One of the main improvements is the " +"introduction of an SQLite database to store server state on disk (instead" +" of in-memory). Another improvement is that tasks (instructions or " +"results) that have been delivered will now be deleted. This greatly " +"improves the memory efficiency of a long-running Flower server." msgstr "" -"给定 :code:`num_local_round`,我们通过调用 " -":code:`self.bst.update`方法更新树。训练结束后,我们将提取最后一个 :code:`N=num_local_round` " -"树并发送给服务器。" +"驱动程序应用程序接口(Driver API)仍是一项试验性功能,但这一版本引入了一些重大升级。主要改进之一是引入了 SQLite " +"数据库,将服务器状态存储在磁盘上(而不是内存中)。另一项改进是,已交付的任务(指令或结果)现在将被删除。这大大提高了长期运行的 Flower " +"服务器的内存效率。" -#: ../../source/tutorial-quickstart-xgboost.rst:291 +#: ../../source/ref-changelog.md:718 msgid "" -"In :code:`evaluate`, we call :code:`self.bst.eval_set` function to " -"conduct evaluation on valid set. The AUC value will be returned." -msgstr "在 :code:`evaluate`中,我们调用 :code:`self.bst.eval_set`函数对有效集合进行评估。将返回 AUC 值。" +"**Fix spilling issues related to Ray during simulations** " +"([#1698](https://github.com/adap/flower/pull/1698))" +msgstr "**修复模拟过程中与Ray有关的溢出问题** ([#1698](https://github.com/adap/flower/pull/1698))" -#: ../../source/tutorial-quickstart-xgboost.rst:294 +#: ../../source/ref-changelog.md:720 msgid "" -"Now, we can create an instance of our class :code:`XgbClient` and add one" -" line to actually run this client:" -msgstr "现在,我们可以创建一个 :code:`XgbClient` 类的实例,并添加一行来实际运行该客户端:" +"While running long simulations, `ray` was sometimes spilling huge amounts" +" of data that would make the training unable to continue. This is now " +"fixed! 🎉" +msgstr "在运行长时间模拟时,`ray` 有时会溢出大量数据,导致训练无法继续。现在这个问题已经解决!🎉" -#: ../../source/tutorial-quickstart-xgboost.rst:300 +#: ../../source/ref-changelog.md:722 msgid "" -"That's it for the client. We only have to implement :code:`Client`and " -"call :code:`fl.client.start_client()`. The string :code:`\"[::]:8080\"` " -"tells the client which server to connect to. In our case we can run the " -"server and the client on the same machine, therefore we use " -":code:`\"[::]:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we point the client at." +"**Add new example using** `TabNet` **and Flower** " +"([#1725](https://github.com/adap/flower/pull/1725))" msgstr "" -"这就是客户端。我们只需实现 :code:`客户端`并调用 :code:`fl.client.start_client()`。字符串 " -":code:`\"[::]:8080\"`会告诉客户端要连接的服务器。在本例中,我们可以在同一台机器上运行服务器和客户端,因此我们使用 " -":code:`\"[::]:8080\"`。如果我们运行的是真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变的只是客户端指向的 " -":code:`server_address`。" +"** 添加使用** `TabNet` ** 的新示例** " +"([#1725](https://github.com/adap/flower/pull/1725))" -#: ../../source/tutorial-quickstart-xgboost.rst:311 +#: ../../source/ref-changelog.md:724 msgid "" -"These updates are then sent to the *server* which will aggregate them to " -"produce a better model. Finally, the *server* sends this improved version" -" of the model back to each *client* to finish a complete FL round." +"TabNet is a powerful and flexible framework for training machine learning" +" models on tabular data. We now have a federated example using Flower: " +"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples" +"/quickstart-tabnet)." msgstr "" -"然后,这些更新会被发送到*服务器*,由*服务器*聚合后生成一个更好的模型。最后,*服务器*将这个改进版的模型发回给每个*客户端*,以完成一轮完整的" -" FL。" +"TabNet 是一个强大而灵活的框架,用于在表格数据上训练机器学习模型。我们现在有一个使用 Flower 的联邦示例:[quickstart-" +"tabnet](https://github.com/adap/flower/tree/main/examples/quickstart-" +"tabnet)。" -#: ../../source/tutorial-quickstart-xgboost.rst:314 +#: ../../source/ref-changelog.md:726 msgid "" -"In a file named :code:`server.py`, import Flower and FedXgbBagging from " -":code:`flwr.server.strategy`." -msgstr "" -"在名为 :code:`server.py` 的文件中,从 :code:`flwr.server.strategy` 导入 Flower 和 " -"FedXgbBagging。" - -#: ../../source/tutorial-quickstart-xgboost.rst:316 -msgid "We first define a strategy for XGBoost bagging aggregation." -msgstr "我们首先定义了 XGBoost bagging聚合策略。" +"**Add new how-to guide for monitoring simulations** " +"([#1649](https://github.com/adap/flower/pull/1649))" +msgstr "** 添加新的模拟监控指南** ([#1649](https://github.com/adap/flower/pull/1649))" -#: ../../source/tutorial-quickstart-xgboost.rst:339 +#: ../../source/ref-changelog.md:728 msgid "" -"We use two clients for this example. An " -":code:`evaluate_metrics_aggregation` function is defined to collect and " -"wighted average the AUC values from clients." -msgstr "" -"本示例使用两个客户端。我们定义了一个 :code:`evaluate_metrics_aggregation` 函数,用于收集客户机的 AUC " -"值并求取平均值。" - -#: ../../source/tutorial-quickstart-xgboost.rst:342 -msgid "Then, we start the server:" -msgstr "然后,我们启动服务器:" - -#: ../../source/tutorial-quickstart-xgboost.rst:354 -msgid "Tree-based bagging aggregation" -msgstr "基于树的bagging聚合" +"We now have a documentation guide to help users monitor their performance" +" during simulations." +msgstr "我们现在有一份文档指南,可帮助用户在模拟过程中监控其性能。" -#: ../../source/tutorial-quickstart-xgboost.rst:356 +#: ../../source/ref-changelog.md:730 msgid "" -"You must be curious about how bagging aggregation works. Let's look into " -"the details." -msgstr "您一定很好奇bagging聚合是如何工作的。让我们来详细了解一下。" +"**Add training metrics to** `History` **object during simulations** " +"([#1696](https://github.com/adap/flower/pull/1696))" +msgstr "" +"**在模拟过程中为***`历史`***对象添加训练指标*** " +"([#1696](https://github.com/adap/flower/pull/1696))" -#: ../../source/tutorial-quickstart-xgboost.rst:358 +#: ../../source/ref-changelog.md:732 msgid "" -"In file :code:`flwr.server.strategy.fedxgb_bagging.py`, we define " -":code:`FedXgbBagging` inherited from :code:`flwr.server.strategy.FedAvg`." -" Then, we override the :code:`aggregate_fit`, :code:`aggregate_evaluate` " -"and :code:`evaluate` methods as follows:" +"The `fit_metrics_aggregation_fn` can be used to aggregate training " +"metrics, but previous releases did not save the results in the `History` " +"object. This is now the case!" msgstr "" -"在文件 :code:`flwr.server.strategy.fedxgb_bagging.py`中,我们定义了从 " -":code:`flwr.server.strategy.FedAvg`继承的 :code:`FedXgbBagging`。然后,我们覆盖 " -":code:`aggregate_fit`、:code:`aggregate_evaluate` 和 :code:`evaluate` 方法如下:" +"`fit_metrics_aggregation_fn`可用于汇总训练指标,但以前的版本不会将结果保存在 \"History " +"\"对象中。现在可以了!" -#: ../../source/tutorial-quickstart-xgboost.rst:454 +#: ../../source/ref-changelog.md:734 msgid "" -"In :code:`aggregate_fit`, we sequentially aggregate the clients' XGBoost " -"trees by calling :code:`aggregate()` function:" +"**General improvements** " +"([#1659](https://github.com/adap/flower/pull/1659), " +"[#1646](https://github.com/adap/flower/pull/1646), " +"[#1647](https://github.com/adap/flower/pull/1647), " +"[#1471](https://github.com/adap/flower/pull/1471), " +"[#1648](https://github.com/adap/flower/pull/1648), " +"[#1651](https://github.com/adap/flower/pull/1651), " +"[#1652](https://github.com/adap/flower/pull/1652), " +"[#1653](https://github.com/adap/flower/pull/1653), " +"[#1659](https://github.com/adap/flower/pull/1659), " +"[#1665](https://github.com/adap/flower/pull/1665), " +"[#1670](https://github.com/adap/flower/pull/1670), " +"[#1672](https://github.com/adap/flower/pull/1672), " +"[#1677](https://github.com/adap/flower/pull/1677), " +"[#1684](https://github.com/adap/flower/pull/1684), " +"[#1683](https://github.com/adap/flower/pull/1683), " +"[#1686](https://github.com/adap/flower/pull/1686), " +"[#1682](https://github.com/adap/flower/pull/1682), " +"[#1685](https://github.com/adap/flower/pull/1685), " +"[#1692](https://github.com/adap/flower/pull/1692), " +"[#1705](https://github.com/adap/flower/pull/1705), " +"[#1708](https://github.com/adap/flower/pull/1708), " +"[#1711](https://github.com/adap/flower/pull/1711), " +"[#1713](https://github.com/adap/flower/pull/1713), " +"[#1714](https://github.com/adap/flower/pull/1714), " +"[#1718](https://github.com/adap/flower/pull/1718), " +"[#1716](https://github.com/adap/flower/pull/1716), " +"[#1723](https://github.com/adap/flower/pull/1723), " +"[#1735](https://github.com/adap/flower/pull/1735), " +"[#1678](https://github.com/adap/flower/pull/1678), " +"[#1750](https://github.com/adap/flower/pull/1750), " +"[#1753](https://github.com/adap/flower/pull/1753), " +"[#1736](https://github.com/adap/flower/pull/1736), " +"[#1766](https://github.com/adap/flower/pull/1766), " +"[#1760](https://github.com/adap/flower/pull/1760), " +"[#1775](https://github.com/adap/flower/pull/1775), " +"[#1776](https://github.com/adap/flower/pull/1776), " +"[#1777](https://github.com/adap/flower/pull/1777), " +"[#1779](https://github.com/adap/flower/pull/1779), " +"[#1784](https://github.com/adap/flower/pull/1784), " +"[#1773](https://github.com/adap/flower/pull/1773), " +"[#1755](https://github.com/adap/flower/pull/1755), " +"[#1789](https://github.com/adap/flower/pull/1789), " +"[#1788](https://github.com/adap/flower/pull/1788), " +"[#1798](https://github.com/adap/flower/pull/1798), " +"[#1799](https://github.com/adap/flower/pull/1799), " +"[#1739](https://github.com/adap/flower/pull/1739), " +"[#1800](https://github.com/adap/flower/pull/1800), " +"[#1804](https://github.com/adap/flower/pull/1804), " +"[#1805](https://github.com/adap/flower/pull/1805))" msgstr "" -"在 :code:`aggregate_fit` 中,我们通过调用 :code:`aggregate()` 函数,按顺序聚合客户端的 XGBoost" -" 树:" +"**普通改进** ([#1659](https://github.com/adap/flower/pull/1659), " +"[#1646](https://github.com/adap/flower/pull/1646), " +"[#1647](https://github.com/adap/flower/pull/1647), " +"[#1471](https://github.com/adap/flower/pull/1471), " +"[#1648](https://github.com/adap/flower/pull/1648), " +"[#1651](https://github.com/adap/flower/pull/1651), " +"[#1652](https://github.com/adap/flower/pull/1652), " +"[#1653](https://github.com/adap/flower/pull/1653), " +"[#1659](https://github.com/adap/flower/pull/1659), " +"[#1665](https://github.com/adap/flower/pull/1665), " +"[#1670](https://github.com/adap/flower/pull/1670), " +"[#1672](https://github.com/adap/flower/pull/1672), " +"[#1677](https://github.com/adap/flower/pull/1677), " +"[#1684](https://github.com/adap/flower/pull/1684), " +"[#1683](https://github.com/adap/flower/pull/1683), " +"[#1686](https://github.com/adap/flower/pull/1686), " +"[#1682](https://github.com/adap/flower/pull/1682), " +"[#1685](https://github.com/adap/flower/pull/1685), " +"[#1692](https://github.com/adap/flower/pull/1692), " +"[#1705](https://github.com/adap/flower/pull/1705), " +"[#1708](https://github.com/adap/flower/pull/1708), " +"[#1711](https://github.com/adap/flower/pull/1711), " +"[#1713](https://github.com/adap/flower/pull/1713), " +"[#1714](https://github.com/adap/flower/pull/1714), " +"[#1718](https://github.com/adap/flower/pull/1718), " +"[#1716](https://github.com/adap/flower/pull/1716), " +"[#1723](https://github.com/adap/flower/pull/1723), " +"[#1735](https://github.com/adap/flower/pull/1735), " +"[#1678](https://github.com/adap/flower/pull/1678), " +"[#1750](https://github.com/adap/flower/pull/1750), " +"[#1753](https://github.com/adap/flower/pull/1753), " +"[#1736](https://github.com/adap/flower/pull/1736), " +"[#1766](https://github.com/adap/flower/pull/1766), " +"[#1760](https://github.com/adap/flower/pull/1760), " +"[#1775](https://github.com/adap/flower/pull/1775), " +"[#1776](https://github.com/adap/flower/pull/1776), " +"[#1777](https://github.com/adap/flower/pull/1777), " +"[#1779](https://github.com/adap/flower/pull/1779), " +"[#1784](https://github.com/adap/flower/pull/1784), " +"[#1773](https://github.com/adap/flower/pull/1773), " +"[#1755](https://github.com/adap/flower/pull/1755), " +"[#1789](https://github.com/adap/flower/pull/1789), " +"[#1788](https://github.com/adap/flower/pull/1788), " +"[#1798](https://github.com/adap/flower/pull/1798), " +"[#1799](https://github.com/adap/flower/pull/1799), " +"[#1739](https://github.com/adap/flower/pull/1739), " +"[#1800](https://github.com/adap/flower/pull/1800), " +"[#1804](https://github.com/adap/flower/pull/1804), " +"[#1805](https://github.com/adap/flower/pull/1805))" + +#: ../../source/ref-changelog.md:742 +msgid "v1.3.0 (2023-02-06)" +msgstr "v1.3.0 (2023-02-06)" -#: ../../source/tutorial-quickstart-xgboost.rst:513 +#: ../../source/ref-changelog.md:748 msgid "" -"In this function, we first fetch the number of trees and the number of " -"parallel trees for the current and previous model by calling " -":code:`_get_tree_nums`. Then, the fetched information will be aggregated." -" After that, the trees (containing model weights) are aggregated to " -"generate a new tree model." +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" msgstr "" -"在该函数中,我们首先通过调用 :code:`_get_tree_nums` " -"获取当前模型和上一个模型的树数和并行树数。然后,对获取的信息进行聚合。然后,聚合树(包含模型参数)生成新的树模型。" +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" -#: ../../source/tutorial-quickstart-xgboost.rst:518 +#: ../../source/ref-changelog.md:752 msgid "" -"After traversal of all clients' models, a new global model is generated, " -"followed by the serialisation, and sending back to each client." -msgstr "在遍历所有客户端的模型后,会生成一个新的全局模型,然后进行序列化,并发回给每个客户端。" - -#: ../../source/tutorial-quickstart-xgboost.rst:523 -msgid "Launch Federated XGBoost!" -msgstr "启动联邦 XGBoost!" +"**Add support for** `workload_id` **and** `group_id` **in Driver API** " +"([#1595](https://github.com/adap/flower/pull/1595))" +msgstr "" +"**在驱动程序应用程序接口中添加对** `workload_id` **和** `group_id` **的支持** " +"([#1595](https://github.com/adap/flower/pull/1595))" -#: ../../source/tutorial-quickstart-xgboost.rst:585 +#: ../../source/ref-changelog.md:754 msgid "" -"Congratulations! You've successfully built and run your first federated " -"XGBoost system. The AUC values can be checked in " -":code:`metrics_distributed`. One can see that the average AUC increases " -"over FL rounds." +"The (experimental) Driver API now supports a `workload_id` that can be " +"used to identify which workload a task belongs to. It also supports a new" +" `group_id` that can be used, for example, to indicate the current " +"training round. Both the `workload_id` and `group_id` enable client nodes" +" to decide whether they want to handle a task or not." msgstr "" -"恭喜您!您已成功构建并运行了第一个联邦 XGBoost 系统。可以在 :code:`metrics_distributed` 中查看 AUC " -"值。我们可以看到,平均 AUC 随 FL 轮数的增加而增加。" +"驱动程序 API(试验性)现在支持 `workload_id`,可用于识别任务所属的工作量。它还支持新的 " +"`group_id`,例如,可用于指示当前的训练轮次。通过 `workload_id` 和 `group_id` " +"客户端节点可以决定是否要处理某个任务。" -#: ../../source/tutorial-quickstart-xgboost.rst:590 +#: ../../source/ref-changelog.md:756 msgid "" -"The full `source code `_ for this example can be found in :code:`examples" -"/xgboost-quickstart`." +"**Make Driver API and Fleet API address configurable** " +"([#1637](https://github.com/adap/flower/pull/1637))" msgstr "" -"此示例的`完整源代码 `_ 可在 :code:`examples/xgboost-quickstart` 中找到。" - -#: ../../source/tutorial-quickstart-xgboost.rst:594 -msgid "Comprehensive Federated XGBoost" -msgstr "综合的联邦 XGBoost" +"**使Driver API 和Fleet " +"API地址可配置**([#1637](https://github.com/adap/flower/pull/1637))" -#: ../../source/tutorial-quickstart-xgboost.rst:596 -#, fuzzy +#: ../../source/ref-changelog.md:758 msgid "" -"Now that you have known how federated XGBoost work with Flower, it's time" -" to run some more comprehensive experiments by customising the " -"experimental settings. In the xgboost-comprehensive example (`full code " -"`_), we provide more options to define various experimental" -" setups, including aggregation strategies, data partitioning and " -"centralised/distributed evaluation. We also support :doc:`Flower " -"simulation ` making it easy to simulate large " -"client cohorts in a resource-aware manner. Let's take a look!" +"The (experimental) long-running Flower server (Driver API and Fleet API) " +"can now configure the server address of both Driver API (via `--driver-" +"api-address`) and Fleet API (via `--fleet-api-address`) when starting:" msgstr "" -"既然您已经知道联合 XGBoost 如何与 Flower 协同工作,那么现在就该通过自定义实验设置来运行一些更综合的实验了。在 xgboost-" -"comprehensive 示例 (`完整代码 " -"`_)中,我们提供了更多选项来定义各种实验设置,包括数据分区和集中/分布式评估。让我们一起来看看!" +"长期运行的 Flower 服务器(Driver API 和 Fleet API)现在可以在启动时配置 Driver API(通过 " +"`--driver-api-address`)和 Fleet API(通过 `-fleet-api-address`)的服务器地址:" -#: ../../source/tutorial-quickstart-xgboost.rst:603 +#: ../../source/ref-changelog.md:760 #, fuzzy -msgid "Cyclic training" -msgstr "集中式训练" - -#: ../../source/tutorial-quickstart-xgboost.rst:605 msgid "" -"In addition to bagging aggregation, we offer a cyclic training scheme, " -"which performs FL in a client-by-client fashion. Instead of aggregating " -"multiple clients, there is only one single client participating in the " -"training per round in the cyclic training scenario. The trained local " -"XGBoost trees will be passed to the next client as an initialised model " -"for next round's boosting." +"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " +"\"0.0.0.0:8086\"`" msgstr "" +"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " +"\"0.0.0.0:8086\"`" -#: ../../source/tutorial-quickstart-xgboost.rst:609 -msgid "" -"To do this, we first customise a :code:`ClientManager` in " -":code:`server_utils.py`:" -msgstr "" +#: ../../source/ref-changelog.md:762 +msgid "Both IPv4 and IPv6 addresses are supported." +msgstr "支持 IPv4 和 IPv6 地址。" -#: ../../source/tutorial-quickstart-xgboost.rst:649 +#: ../../source/ref-changelog.md:764 msgid "" -"The customised :code:`ClientManager` samples all available clients in " -"each FL round based on the order of connection to the server. Then, we " -"define a new strategy :code:`FedXgbCyclic` in " -":code:`flwr.server.strategy.fedxgb_cyclic.py`, in order to sequentially " -"select only one client in given round and pass the received model to next" -" client." +"**Add new example of Federated Learning using fastai and Flower** " +"([#1598](https://github.com/adap/flower/pull/1598))" msgstr "" +"** 添加使用 fastai 和 Flower 进行联邦学习的新示例** " +"([#1598](https://github.com/adap/flower/pull/1598))" -#: ../../source/tutorial-quickstart-xgboost.rst:690 +#: ../../source/ref-changelog.md:766 msgid "" -"Unlike the original :code:`FedAvg`, we don't perform aggregation here. " -"Instead, we just make a copy of the received client model as global model" -" by overriding :code:`aggregate_fit`." +"A new code example (`quickstart-fastai`) demonstrates federated learning " +"with [fastai](https://www.fast.ai/) and Flower. You can find it here: " +"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples" +"/quickstart-fastai)." msgstr "" +"一个新的代码示例(`quickstart-fastai`)演示了使用 [fastai](https://www.fast.ai/) 和 " +"Flower 的联邦学习。您可以在这里找到它: [quickstart-" +"fastai](https://github.com/adap/flower/tree/main/examples/quickstart-" +"fastai)。" -#: ../../source/tutorial-quickstart-xgboost.rst:693 +#: ../../source/ref-changelog.md:768 msgid "" -"Also, the customised :code:`configure_fit` and :code:`configure_evaluate`" -" methods ensure the clients to be sequentially selected given FL round:" +"**Make Android example compatible with** `flwr >= 1.0.0` **and the latest" +" versions of Android** " +"([#1603](https://github.com/adap/flower/pull/1603))" msgstr "" +"**使安卓示例兼容** `flwr >= 1.0.0` **和最新版本的安卓** " +"([#1603](https://github.com/adap/flower/pull/1603))" -#: ../../source/tutorial-quickstart-xgboost.rst:757 -msgid "Customised data partitioning" -msgstr "定制数据分区" - -#: ../../source/tutorial-quickstart-xgboost.rst:759 +#: ../../source/ref-changelog.md:770 msgid "" -"In :code:`dataset.py`, we have a function :code:`instantiate_partitioner`" -" to instantiate the data partitioner based on the given " -":code:`num_partitions` and :code:`partitioner_type`. Currently, we " -"provide four supported partitioner type to simulate the uniformity/non-" -"uniformity in data quantity (uniform, linear, square, exponential)." -msgstr "" -"在 :code:`dataset.py` 中,我们有一个函数 :code:`instantiate_partitioner` 来根据给定的 " -":code:`num_partitions` 和 :code:`partitioner_type` " -"来实例化数据分区器。目前,我们提供四种支持的分区器类型(均匀、线性、正方形、指数)来模拟数据量的均匀性/非均匀性。" - -#: ../../source/tutorial-quickstart-xgboost.rst:790 -msgid "Customised centralised/distributed evaluation" -msgstr "定制的集中/分布式评估" +"The Android code example has received a substantial update: the project " +"is compatible with Flower 1.0 (and later), the UI received a full " +"refresh, and the project is updated to be compatible with newer Android " +"tooling." +msgstr "" +"Android 代码示例已进行了大幅更新:项目兼容 Flower 1.0(及更高版本),用户界面已全面刷新,项目已更新为兼容较新的 Android" +" 工具。" -#: ../../source/tutorial-quickstart-xgboost.rst:792 -#, fuzzy +#: ../../source/ref-changelog.md:772 msgid "" -"To facilitate centralised evaluation, we define a function in " -":code:`server_utils.py`:" -msgstr "为便于集中评估,我们在 :code:`server.py` 中定义了一个函数:" +"**Add new `FedProx` strategy** " +"([#1619](https://github.com/adap/flower/pull/1619))" +msgstr "**添加新的`FedProx`策略** ([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/tutorial-quickstart-xgboost.rst:824 +#: ../../source/ref-changelog.md:774 msgid "" -"This function returns a evaluation function which instantiates a " -":code:`Booster` object and loads the global model weights to it. The " -"evaluation is conducted by calling :code:`eval_set()` method, and the " -"tested AUC value is reported." +"This " +"[strategy](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" +" is almost identical to " +"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," +" but helps users replicate what is described in this " +"[paper](https://arxiv.org/abs/1812.06127). It essentially adds a " +"parameter called `proximal_mu` to regularize the local models with " +"respect to the global models." msgstr "" -"此函数返回一个评估函数,该函数实例化一个 :code:`Booster` 对象,并向其加载全局模型参数。评估通过调用 " -":code:`eval_set()` 方法进行,并报告测试的 AUC 值。" +"该[策略](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)与[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)几乎相同,但可以帮助用户复现本[论文](https://arxiv.org/abs/1812.06127)中的描述。它的本质是添加一个名为" +" `proximal_mu`的参数,使局部模型与全局模型正则化。" -#: ../../source/tutorial-quickstart-xgboost.rst:827 -#, fuzzy +#: ../../source/ref-changelog.md:776 msgid "" -"As for distributed evaluation on the clients, it's same as the quick-" -"start example by overriding the :code:`evaluate()` method insides the " -":code:`XgbClient` class in :code:`client_utils.py`." -msgstr "" -"至于客户端上的分布式评估,与快速启动示例相同,通过覆盖 :code:`client.py` 中 :code:`XgbClient` 类内部的 " -":code:`evaluate()` 方法。" +"**Add new metrics to telemetry events** " +"([#1640](https://github.com/adap/flower/pull/1640))" +msgstr "**为遥测事件添加新指标**([#1640](https://github.com/adap/flower/pull/1640))" -#: ../../source/tutorial-quickstart-xgboost.rst:831 -#, fuzzy -msgid "Flower simulation" -msgstr "运行模拟" +#: ../../source/ref-changelog.md:778 +msgid "" +"An updated event structure allows, for example, the clustering of events " +"within the same workload." +msgstr "例如,更新后的事件结构可以将同一工作负载中的事件集中在一起。" -#: ../../source/tutorial-quickstart-xgboost.rst:832 +#: ../../source/ref-changelog.md:780 msgid "" -"We also provide an example code (:code:`sim.py`) to use the simulation " -"capabilities of Flower to simulate federated XGBoost training on either a" -" single machine or a cluster of machines." -msgstr "" +"**Add new custom strategy tutorial section** " +"[#1623](https://github.com/adap/flower/pull/1623)" +msgstr "**添加新的自定义策略教程部分** [#1623](https://github.com/adap/flower/pull/1623)" -#: ../../source/tutorial-quickstart-xgboost.rst:866 +#: ../../source/ref-changelog.md:782 msgid "" -"After importing all required packages, we define a :code:`main()` " -"function to perform the simulation process:" +"The Flower tutorial now has a new section that covers implementing a " +"custom strategy from scratch: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" msgstr "" +"Flower 教程新增了一个章节,介绍如何从零开始实施自定义策略: [在 Colab " +"中打开](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" -#: ../../source/tutorial-quickstart-xgboost.rst:921 +#: ../../source/ref-changelog.md:784 msgid "" -"We first load the dataset and perform data partitioning, and the pre-" -"processed data is stored in a :code:`list`. After the simulation begins, " -"the clients won't need to pre-process their partitions again." -msgstr "" +"**Add new custom serialization tutorial section** " +"([#1622](https://github.com/adap/flower/pull/1622))" +msgstr "** 添加新的自定义序列化教程部分** ([#1622](https://github.com/adap/flower/pull/1622))" -#: ../../source/tutorial-quickstart-xgboost.rst:924 -msgid "Then, we define the strategies and other hyper-parameters:" +#: ../../source/ref-changelog.md:786 +msgid "" +"The Flower tutorial now has a new section that covers custom " +"serialization: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-customize-the-client-pytorch.ipynb)" msgstr "" +"Flower 教程现在新增了一个章节,介绍自定义序列化: [在 Colab " +"中打开](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-customize-the-client-pytorch.ipynb)" -#: ../../source/tutorial-quickstart-xgboost.rst:975 +#: ../../source/ref-changelog.md:788 msgid "" -"After that, we start the simulation by calling " -":code:`fl.simulation.start_simulation`:" +"**General improvements** " +"([#1638](https://github.com/adap/flower/pull/1638), " +"[#1634](https://github.com/adap/flower/pull/1634), " +"[#1636](https://github.com/adap/flower/pull/1636), " +"[#1635](https://github.com/adap/flower/pull/1635), " +"[#1633](https://github.com/adap/flower/pull/1633), " +"[#1632](https://github.com/adap/flower/pull/1632), " +"[#1631](https://github.com/adap/flower/pull/1631), " +"[#1630](https://github.com/adap/flower/pull/1630), " +"[#1627](https://github.com/adap/flower/pull/1627), " +"[#1593](https://github.com/adap/flower/pull/1593), " +"[#1616](https://github.com/adap/flower/pull/1616), " +"[#1615](https://github.com/adap/flower/pull/1615), " +"[#1607](https://github.com/adap/flower/pull/1607), " +"[#1609](https://github.com/adap/flower/pull/1609), " +"[#1608](https://github.com/adap/flower/pull/1608), " +"[#1603](https://github.com/adap/flower/pull/1603), " +"[#1590](https://github.com/adap/flower/pull/1590), " +"[#1580](https://github.com/adap/flower/pull/1580), " +"[#1599](https://github.com/adap/flower/pull/1599), " +"[#1600](https://github.com/adap/flower/pull/1600), " +"[#1601](https://github.com/adap/flower/pull/1601), " +"[#1597](https://github.com/adap/flower/pull/1597), " +"[#1595](https://github.com/adap/flower/pull/1595), " +"[#1591](https://github.com/adap/flower/pull/1591), " +"[#1588](https://github.com/adap/flower/pull/1588), " +"[#1589](https://github.com/adap/flower/pull/1589), " +"[#1587](https://github.com/adap/flower/pull/1587), " +"[#1573](https://github.com/adap/flower/pull/1573), " +"[#1581](https://github.com/adap/flower/pull/1581), " +"[#1578](https://github.com/adap/flower/pull/1578), " +"[#1574](https://github.com/adap/flower/pull/1574), " +"[#1572](https://github.com/adap/flower/pull/1572), " +"[#1586](https://github.com/adap/flower/pull/1586))" msgstr "" +"**普通改进** ([#1638](https://github.com/adap/flower/pull/1638), " +"[#1634](https://github.com/adap/flower/pull/1634), " +"[#1636](https://github.com/adap/flower/pull/1636), " +"[#1635](https://github.com/adap/flower/pull/1635), " +"[#1633](https://github.com/adap/flower/pull/1633), " +"[#1632](https://github.com/adap/flower/pull/1632), " +"[#1631](https://github.com/adap/flower/pull/1631), " +"[#1630](https://github.com/adap/flower/pull/1630), " +"[#1627](https://github. com/adap/flower/pull/1627), " +"[#1593](https://github.com/adap/flower/pull/1593), " +"[#1616](https://github.com/adap/flower/pull/1616), " +"[#1615](https://github.com/adap/flower/pull/1615), " +"[#1607](https://github.com/adap/flower/pull/1607), " +"[#1609](https://github.com/adap/flower/pull/1609), " +"[#1608](https://github.com/adap/flower/pull/1608), " +"[#1603](https://github.com/adap/flower/pull/1603), " +"[#1590](https://github. com/adap/flower/pull/1590), " +"[#1580](https://github.com/adap/flower/pull/1580), " +"[#1599](https://github.com/adap/flower/pull/1599), " +"[#1600](https://github.com/adap/flower/pull/1600), " +"[#1601](https://github.com/adap/flower/pull/1601), " +"[#1597](https://github.com/adap/flower/pull/1597), " +"[#1595](https://github.com/adap/flower/pull/1595), " +"[#1591](https://github.com/adap/flower/pull/1591), " +"[#1588](https://github. com/adap/flower/pull/1588), " +"[#1589](https://github.com/adap/flower/pull/1589), " +"[#1587](https://github.com/adap/flower/pull/1587), " +"[#1573](https://github.com/adap/flower/pull/1573), " +"[#1581](https://github.com/adap/flower/pull/1581), " +"[#1578](https://github.com/adap/flower/pull/1578), " +"[#1574](https://github.com/adap/flower/pull/1574), " +"[#1572](https://github.com/adap/flower/pull/1572), " +"[#1586](https://github.com/adap/flower/pull/1586))" -#: ../../source/tutorial-quickstart-xgboost.rst:995 +#: ../../source/ref-changelog.md:792 msgid "" -"One of key parameters for :code:`start_simulation` is :code:`client_fn` " -"which returns a function to construct a client. We define it as follows:" +"**Updated documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" msgstr "" +"** 更新文档** ([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614)))" -#: ../../source/tutorial-quickstart-xgboost.rst:1038 -msgid "Arguments parser" -msgstr "参数解析器" - -#: ../../source/tutorial-quickstart-xgboost.rst:1040 -#, fuzzy +#: ../../source/ref-changelog.md:794 ../../source/ref-changelog.md:861 msgid "" -"In :code:`utils.py`, we define the arguments parsers for clients, server " -"and simulation, allowing users to specify different experimental " -"settings. Let's first see the sever side:" -msgstr "在 :code:`utils.py` 中,我们定义了客户端和服务器端的参数解析器,允许用户指定不同的实验设置。让我们先看看服务器端:" +"As usual, the documentation has improved quite a bit. It is another step " +"in our effort to make the Flower documentation the best documentation of " +"any project. Stay tuned and as always, feel free to provide feedback!" +msgstr "和往常一样,我们的文档有了很大的改进。这是我们努力使 Flower 文档成为所有项目中最好文档的又一步骤。请继续关注,并随时提供反馈意见!" -#: ../../source/tutorial-quickstart-xgboost.rst:1086 -#, fuzzy +#: ../../source/ref-changelog.md:800 +msgid "v1.2.0 (2023-01-13)" +msgstr "v1.2.0 (2023-01-13)" + +#: ../../source/ref-changelog.md:806 msgid "" -"This allows user to specify training strategies / the number of total " -"clients / FL rounds / participating clients / clients for evaluation, and" -" evaluation fashion. Note that with :code:`--centralised-eval`, the sever" -" will do centralised evaluation and all functionalities for client " -"evaluation will be disabled." +"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." +" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" msgstr "" -"这允许用户指定总客户数/FL 轮数/参与客户数/评估客户数以及评估方式。请注意,如果使用 :code:`--centralised-" -"eval`,服务器将进行集中评估,客户端评估的所有功能将被禁用。" - -#: ../../source/tutorial-quickstart-xgboost.rst:1090 -msgid "Then, the argument parser on client side:" -msgstr "然后是客户端的参数解析器:" +"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." +" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" -#: ../../source/tutorial-quickstart-xgboost.rst:1144 -#, fuzzy +#: ../../source/ref-changelog.md:810 msgid "" -"This defines various options for client data partitioning. Besides, " -"clients also have an option to conduct evaluation on centralised test set" -" by setting :code:`--centralised-eval`, as well as an option to perform " -"scaled learning rate based on the number of clients by setting :code" -":`--scaled-lr`." -msgstr "这定义了客户端数据分区的各种选项。此外,通过设置 :code:`-centralised-eval`,客户端还可以选择在集中测试集上进行评估。" - -#: ../../source/tutorial-quickstart-xgboost.rst:1148 -msgid "We also have an argument parser for simulation:" +"**Introduce new Flower Baseline: FedAvg MNIST** " +"([#1497](https://github.com/adap/flower/pull/1497), " +"[#1552](https://github.com/adap/flower/pull/1552))" msgstr "" +"**引入新的 Flower Baseline: FedAvg MNIST** " +"([#1497](https://github.com/adap/flower/pull/1497), " +"[#1552](https://github.com/adap/flower/pull/1552))" -#: ../../source/tutorial-quickstart-xgboost.rst:1226 -msgid "This integrates all arguments for both client and server sides." +#: ../../source/ref-changelog.md:812 +msgid "" +"Over the coming weeks, we will be releasing a number of new reference " +"implementations useful especially to FL newcomers. They will typically " +"revisit well known papers from the literature, and be suitable for " +"integration in your own application or for experimentation, in order to " +"deepen your knowledge of FL in general. Today's release is the first in " +"this series. [Read more.](https://flower.ai/blog/2023-01-12-fl-starter-" +"pack-fedavg-mnist-cnn/)" msgstr "" +"在未来几周内,我们将发布一些新的参考,特别是对 FL " +"新手有用的方法。它们通常会重温文献中的知名论文,适合集成到您自己的应用程序中或用于实验,以加深您对 FL " +"的总体了解。今天发布的是该系列中的第一篇。[阅读全文](https://flower.ai/blog/2023-01-12-fl-starter-" +"pack-fedavg-mnist-cnn/)" -#: ../../source/tutorial-quickstart-xgboost.rst:1229 -msgid "Example commands" -msgstr "命令示例" - -#: ../../source/tutorial-quickstart-xgboost.rst:1231 -#, fuzzy +#: ../../source/ref-changelog.md:814 msgid "" -"To run a centralised evaluated experiment with bagging strategy on 5 " -"clients with exponential distribution for 50 rounds, we first start the " -"server as below:" -msgstr "为了在 5 个客户端上进行 50 轮指数分布的集中评估实验,我们首先启动服务器,如下所示:" - -#: ../../source/tutorial-quickstart-xgboost.rst:1238 -msgid "Then, on each client terminal, we start the clients:" -msgstr "然后,我们在每个客户终端上启动客户机:" +"**Improve GPU support in simulations** " +"([#1555](https://github.com/adap/flower/pull/1555))" +msgstr "**改进模拟中的 GPU 支持**([#1555](https://github.com/adap/flower/pull/1555))" -#: ../../source/tutorial-quickstart-xgboost.rst:1244 -msgid "To run the same experiment with Flower simulation:" +#: ../../source/ref-changelog.md:816 +msgid "" +"The Ray-based Virtual Client Engine (`start_simulation`) has been updated" +" to improve GPU support. The update includes some of the hard-earned " +"lessons from scaling simulations in GPU cluster environments. New " +"defaults make running GPU-based simulations substantially more robust." msgstr "" +"基于 Ray 的虚拟客户端引擎 (`start_simulation`)已更新,以改进对 GPU 的支持。此次更新包含了在 GPU " +"集群环境中扩展模拟的一些经验教训。新的默认设置使基于 GPU 的模拟运行更加稳健。" -#: ../../source/tutorial-quickstart-xgboost.rst:1250 -#, fuzzy +#: ../../source/ref-changelog.md:818 msgid "" -"The full `code `_ for this comprehensive example can be found in" -" :code:`examples/xgboost-comprehensive`." +"**Improve GPU support in Jupyter Notebook tutorials** " +"([#1527](https://github.com/adap/flower/pull/1527), " +"[#1558](https://github.com/adap/flower/pull/1558))" msgstr "" -"此综合示例的全部`源代码 `_ 可在 :code:`examples/xgboost-comprehensive` 中找到。" - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:9 -msgid "Build a strategy from scratch" -msgstr "从零开始制定策略" +"**改进 Jupyter Notebook 教程中的 GPU 支持** " +"([#1527](https://github.com/adap/flower/pull/1527), " +"[#1558](https://github.com/adap/flower/pull/1558))" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:11 +#: ../../source/ref-changelog.md:820 msgid "" -"Welcome to the third part of the Flower federated learning tutorial. In " -"previous parts of this tutorial, we introduced federated learning with " -"PyTorch and Flower (`part 1 `__) and we learned how strategies " -"can be used to customize the execution on both the server and the clients" -" (`part 2 `__)." +"Some users reported that Jupyter Notebooks have not always been easy to " +"use on GPU instances. We listened and made improvements to all of our " +"Jupyter notebooks! Check out the updated notebooks here:" msgstr "" -"欢迎来到 Flower 联邦学习教程的第三部分。在本教程的前几部分,我们介绍了 PyTorch 和 Flower 的联邦学习(`part 1 " -"`__),并学习了如何使用策略来定制服务器和客户端的执行(`part 2 " -"`__)。" +"一些用户报告说,在 GPU 实例上使用 Jupyter 笔记本并不是很方便。我们听取了他们的意见,并对所有 Jupyter " +"笔记本进行了改进!点击这里查看更新后的笔记本:" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:13 +#: ../../source/ref-changelog.md:822 msgid "" -"In this notebook, we'll continue to customize the federated learning " -"system we built previously by creating a custom version of FedAvg (again," -" using `Flower `__ and `PyTorch " -"`__)." +"[An Introduction to Federated Learning](https://flower.ai/docs/framework" +"/tutorial-get-started-with-flower-pytorch.html)" msgstr "" -"在本笔记中,我们将通过创建 FedAvg 的自定义版本(再次使用 `Flower `__ 和 " -"`PyTorch `__),继续定制我们之前构建的联邦学习系统。" +"[联邦学习简介](https://flower.ai/docs/framework/tutorial-get-started-with-" +"flower-pytorch.html)" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:15 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:16 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:15 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:15 +#: ../../source/ref-changelog.md:823 msgid "" -"`Star Flower on GitHub `__ ⭐️ and join " -"the Flower community on Slack to connect, ask questions, and get help: " -"`Join Slack `__ 🌼 We'd love to hear from " -"you in the ``#introductions`` channel! And if anything is unclear, head " -"over to the ``#questions`` channel." +"[Strategies in Federated Learning](https://flower.ai/docs/framework" +"/tutorial-use-a-federated-learning-strategy-pytorch.html)" msgstr "" -"`Star Flower on GitHub `__ ⭐️ 并加入 Slack " -"上的 Flower 社区,进行交流、提问并获得帮助: 加入 Slack `__ 🌼 " -"我们希望在 ``#introductions`` 频道听到您的声音!如果有任何不清楚的地方,请访问 ``#questions`` 频道。" - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:17 -msgid "Let's build a new ``Strategy`` from scratch!" -msgstr "让我们从头开始构建一个新的``Strategy``!" - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:29 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:29 -msgid "Preparation" -msgstr "准备工作" +"[联邦学习策略](https://flower.ai/docs/framework/tutorial-use-a-federated-" +"learning-strategy-pytorch.html)" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:31 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:32 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:31 +#: ../../source/ref-changelog.md:824 msgid "" -"Before we begin with the actual code, let's make sure that we have " -"everything we need." -msgstr "在开始实际代码之前,让我们先确保我们已经准备好了所需的一切。" - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:43 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:44 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:43 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:43 -msgid "Installing dependencies" -msgstr "安装依赖项" - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:45 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:46 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:45 -msgid "First, we install the necessary packages:" -msgstr "首先,我们安装必要的软件包:" +"[Building a Strategy](https://flower.ai/docs/framework/tutorial-build-a" +"-strategy-from-scratch-pytorch.html)" +msgstr "" +"[制定策略](https://flower.ai/docs/framework/tutorial-build-a-strategy-from-" +"scratch-pytorch.html)" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:65 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:66 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:65 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:65 +#: ../../source/ref-changelog.md:825 msgid "" -"Now that we have all dependencies installed, we can import everything we " -"need for this tutorial:" -msgstr "现在我们已经安装了所有依赖项,可以导入本教程所需的所有内容:" +"[Client and NumPyClient](https://flower.ai/docs/framework/tutorial-" +"customize-the-client-pytorch.html)" +msgstr "" +"[客户端和 NumPyClient](https://flower.ai/docs/framework/tutorial-customize-" +"the-client-pytorch.html)" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:101 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:102 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:101 +#: ../../source/ref-changelog.md:827 msgid "" -"It is possible to switch to a runtime that has GPU acceleration enabled " -"(on Google Colab: ``Runtime > Change runtime type > Hardware acclerator: " -"GPU > Save``). Note, however, that Google Colab is not always able to " -"offer GPU acceleration. If you see an error related to GPU availability " -"in one of the following sections, consider switching back to CPU-based " -"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " -"has GPU acceleration enabled, you should see the output ``Training on " -"cuda``, otherwise it'll say ``Training on cpu``." +"**Introduce optional telemetry** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" msgstr "" -"可以切换到已启用 GPU 加速的运行时(在 Google Colab 上: 运行时 > 更改运行时类型 > 硬件加速: GPU > " -"保存``)。但请注意,Google Colab 并非总能提供 GPU 加速。如果在以下部分中看到与 GPU 可用性相关的错误,请考虑通过设置 " -"``DEVICE = torch.device(\"cpu\")`` 切回基于 CPU 的执行。如果运行时已启用 GPU " -"加速,你应该会看到输出``Training on cuda``,否则会显示``Training on cpu``。" - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:114 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:115 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:114 -msgid "Data loading" -msgstr "数据加载" +"**引入可选遥测**([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584)" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:116 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:116 +#: ../../source/ref-changelog.md:829 msgid "" -"Let's now load the CIFAR-10 training and test set, partition them into " -"ten smaller datasets (each split into training and validation set), and " -"wrap everything in their own ``DataLoader``. We introduce a new parameter" -" ``num_clients`` which allows us to call ``load_datasets`` with different" -" numbers of clients." +"After a [request for " +"feedback](https://github.com/adap/flower/issues/1534) from the community," +" the Flower open-source project introduces optional collection of " +"*anonymous* usage metrics to make well-informed decisions to improve " +"Flower. Doing this enables the Flower team to understand how Flower is " +"used and what challenges users might face." msgstr "" -"现在,让我们加载 CIFAR-10 训练集和测试集,将它们分割成 10 " -"个较小的数据集(每个数据集又分为训练集和验证集),并将所有数据都封装在各自的 ``DataLoader`` 中。我们引入了一个新参数 " -"``num_clients``,它允许我们使用不同数量的客户端调用 ``load_datasets``。" - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:167 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:168 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:167 -msgid "Model training/evaluation" -msgstr "模型培训/评估" +"在社区发出[反馈请求](https://github.com/adap/flower/issues/1534)之后,Flower " +"开放源码项目引入了可选的*匿名*使用指标收集,以便在充分知情的情况下做出改进 Flower 的决定。这样做能让 Flower 团队了解 " +"Flower 的使用情况以及用户可能面临的挑战。" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:169 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:170 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:169 +#: ../../source/ref-changelog.md:831 msgid "" -"Let's continue with the usual model definition (including " -"``set_parameters`` and ``get_parameters``), training and test functions:" -msgstr "让我们继续使用常见的模型定义(包括 `set_parameters` 和 `get_parameters`)、训练和测试函数:" - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:258 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:258 -msgid "Flower client" -msgstr "Flower 客户端" +"**Flower is a friendly framework for collaborative AI and data science.**" +" Staying true to this statement, Flower makes it easy to disable " +"telemetry for users who do not want to share anonymous usage metrics. " +"[Read more.](https://flower.ai/docs/telemetry.html)." +msgstr "" +"**Flower 是一个用于协作式人工智能和数据科学的友好框架。** Flower " +"遵循这一声明,让不想分享匿名使用指标的用户可以轻松禁用遥测技术。[阅读全文](https://flower.ai/docs/telemetry.html)。" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:260 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:260 +#: ../../source/ref-changelog.md:833 msgid "" -"To implement the Flower client, we (again) create a subclass of " -"``flwr.client.NumPyClient`` and implement the three methods " -"``get_parameters``, ``fit``, and ``evaluate``. Here, we also pass the " -"``cid`` to the client and use it log additional details:" +"**Introduce (experimental) Driver API** " +"([#1520](https://github.com/adap/flower/pull/1520), " +"[#1525](https://github.com/adap/flower/pull/1525), " +"[#1545](https://github.com/adap/flower/pull/1545), " +"[#1546](https://github.com/adap/flower/pull/1546), " +"[#1550](https://github.com/adap/flower/pull/1550), " +"[#1551](https://github.com/adap/flower/pull/1551), " +"[#1567](https://github.com/adap/flower/pull/1567))" msgstr "" -"为了实现 Flower 客户端,我们(再次)创建了 ``flwr.client.NumPyClient`` 的子类,并实现了 " -"``get_parameters``、``fit`` 和 ``evaluate``三个方法。在这里,我们还将 ``cid`` " -"传递给客户端,并使用它记录其他详细信息:" - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:308 -msgid "Let's test what we have so far before we continue:" -msgstr "在继续之前,让我们先测试一下我们目前掌握的情况:" +"**引入(试验性)Driver API** ([#1520](https://github.com/adap/flower/pull/1520)," +" [#1525](https://github.com/adap/flower/pull/1525), " +"[#1545](https://github.com/adap/flower/pull/1545), " +"[#1546](https://github.com/adap/flower/pull/1546), " +"[#1550](https://github.com/adap/flower/pull/1550), " +"[#1551](https://github.com/adap/flower/pull/1551), " +"[#1567](https://github.com/adap/flower/pull/1567))" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:339 -msgid "Build a Strategy from scratch" -msgstr "从零开始构建策略" +#: ../../source/ref-changelog.md:835 +msgid "" +"Flower now has a new (experimental) Driver API which will enable fully " +"programmable, async, and multi-tenant Federated Learning and Federated " +"Analytics applications. Phew, that's a lot! Going forward, the Driver API" +" will be the abstraction that many upcoming features will be built on - " +"and you can start building those things now, too." +msgstr "" +"Flower 现在有了一个新的(试验性的)驱动程序应用程序接口(Driver " +"API),它将支持完全可编程、异步和多租户的联邦学习(Federated Learning)和联邦分析(Federated " +"Analytics)应用程序。展望未来,Driver API 将成为许多即将推出的功能的抽象基础,您现在就可以开始构建这些功能。" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:341 +#: ../../source/ref-changelog.md:837 msgid "" -"Let’s overwrite the ``configure_fit`` method such that it passes a higher" -" learning rate (potentially also other hyperparameters) to the optimizer " -"of a fraction of the clients. We will keep the sampling of the clients as" -" it is in ``FedAvg`` and then change the configuration dictionary (one of" -" the ``FitIns`` attributes)." +"The Driver API also enables a new execution mode in which the server runs" +" indefinitely. Multiple individual workloads can run concurrently and " +"start and stop their execution independent of the server. This is " +"especially useful for users who want to deploy Flower in production." msgstr "" -"让我们重写 ``configure_fit`` 方法,使其向一部分客户的优化器传递更高的学习率(可能还有其他超参数)。我们将保持 " -"``FedAvg`` 中的客户端采样,然后更改配置字典(``FitIns`` 属性之一)。" +"驱动程序应用程序接口还支持一种新的执行模式,在这种模式下,服务器可无限期运行。多个单独的工作负载可以同时运行,并独立于服务器启动和停止执行。这对于希望在生产中部署" +" Flower 的用户来说尤其有用。" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:507 +#: ../../source/ref-changelog.md:839 msgid "" -"The only thing left is to use the newly created custom Strategy " -"``FedCustom`` when starting the experiment:" -msgstr "剩下的唯一工作就是在启动实验时使用新创建的自定义策略 ``FedCustom`` :" +"To learn more, check out the `mt-pytorch` code example. We look forward " +"to you feedback!" +msgstr "要了解更多信息,请查看 `mt-pytorch` 代码示例。我们期待您的反馈!" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:534 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:932 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:697 -msgid "Recap" -msgstr "回顾" +#: ../../source/ref-changelog.md:841 +msgid "" +"Please note: *The Driver API is still experimental and will likely change" +" significantly over time.*" +msgstr "请注意:Driver API仍处于试验阶段,随着时间的推移可能会发生重大变化。*" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:536 +#: ../../source/ref-changelog.md:843 msgid "" -"In this notebook, we’ve seen how to implement a custom strategy. A custom" -" strategy enables granular control over client node configuration, result" -" aggregation, and more. To define a custom strategy, you only have to " -"overwrite the abstract methods of the (abstract) base class ``Strategy``." -" To make custom strategies even more powerful, you can pass custom " -"functions to the constructor of your new class (``__init__``) and then " -"call these functions whenever needed." +"**Add new Federated Analytics with Pandas example** " +"([#1469](https://github.com/adap/flower/pull/1469), " +"[#1535](https://github.com/adap/flower/pull/1535))" msgstr "" -"在本笔记中,我们了解了如何实施自定义策略。自定义策略可以对客户端节点配置、结果聚合等进行细粒度控制。要定义自定义策略,只需覆盖(抽象)基类 " -"``Strategy`` " -"的抽象方法即可。为使自定义策略更加强大,您可以将自定义函数传递给新类的构造函数(`__init__``),然后在需要时调用这些函数。" +"** 添加新的使用 Pandas " +"的联邦分析示例**([#1469](https://github.com/adap/flower/pull/1469), " +"[#1535](https://github.com/adap/flower/pull/1535)" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:550 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:948 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:729 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:715 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:369 +#: ../../source/ref-changelog.md:845 msgid "" -"Before you continue, make sure to join the Flower community on Slack: " -"`Join Slack `__" +"A new code example (`quickstart-pandas`) demonstrates federated analytics" +" with Pandas and Flower. You can find it here: [quickstart-" +"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" +"pandas)." msgstr "" -"在继续之前,请务必加入 Slack 上的 Flower 社区:`Join Slack `__" +"新代码示例(`quickstart-pandas`)演示了使用 Pandas 和 Flower 进行联邦分析。您可以在此处找到它: " +"[quickstart-pandas](https://github.com/adap/flower/tree/main/examples" +"/quickstart-pandas)。" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:552 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:950 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:731 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:717 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:371 +#: ../../source/ref-changelog.md:847 msgid "" -"There's a dedicated ``#questions`` channel if you need help, but we'd " -"also love to hear who you are in ``#introductions``!" -msgstr "如果您需要帮助,我们有专门的 ``#questions`` 频道,但我们也很乐意在 ``#introductions`` 中了解您是谁!" +"**Add new strategies: Krum and MultiKrum** " +"([#1481](https://github.com/adap/flower/pull/1481))" +msgstr "" +"**添加新策略: Krum 和 MultiKrum** " +"([#1481](https://github.com/adap/flower/pull/1481))" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:554 +#: ../../source/ref-changelog.md:849 msgid "" -"The `Flower Federated Learning Tutorial - Part 4 " -"`__ introduces ``Client``, the flexible API underlying " -"``NumPyClient``." +"Edoardo, a computer science student at the Sapienza University of Rome, " +"contributed a new `Krum` strategy that enables users to easily use Krum " +"and MultiKrum in their workloads." msgstr "" -"Flower联邦学习教程 - 第4部分 `__ 介绍了``Client``,它是``NumPyClient``底层的灵活应用程序接口。" - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:9 -msgid "Customize the client" -msgstr "自定义客户端" +"罗马萨皮恩扎大学(Sapienza University)计算机科学专业的学生埃多尔多(Edoardo)提出了一种新的 \"Krum " +"\"策略,使用户能够在其工作负载中轻松使用 Krum 和 MultiKrum。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:11 +#: ../../source/ref-changelog.md:851 msgid "" -"Welcome to the fourth part of the Flower federated learning tutorial. In " -"the previous parts of this tutorial, we introduced federated learning " -"with PyTorch and Flower (`part 1 `__), we learned how " -"strategies can be used to customize the execution on both the server and " -"the clients (`part 2 `__), and we built our own " -"custom strategy from scratch (`part 3 `__)." +"**Update C++ example to be compatible with Flower v1.2.0** " +"([#1495](https://github.com/adap/flower/pull/1495))" msgstr "" -"欢迎来到 Flower 联邦学习教程的第四部分。在本教程的前几部分中,我们介绍了 PyTorch 和 Flower 的联邦学习(`part 1 " -"`__),了解了如何使用策略来定制服务器和客户端的执行(`part 2 " -"`__),并从头开始构建了我们自己的定制策略(`part 3 " -"`__)。" +"** 更新 C++ 示例,与 Flower v1.2.0 兼容** " +"([#1495](https://github.com/adap/flower/pull/1495))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:14 +#: ../../source/ref-changelog.md:853 msgid "" -"In this notebook, we revisit ``NumPyClient`` and introduce a new " -"baseclass for building clients, simply named ``Client``. In previous " -"parts of this tutorial, we've based our client on ``NumPyClient``, a " -"convenience class which makes it easy to work with machine learning " -"libraries that have good NumPy interoperability. With ``Client``, we gain" -" a lot of flexibility that we didn't have before, but we'll also have to " -"do a few things the we didn't have to do before." -msgstr "" -"在本笔记中,我们将重温 ``NumPyClient`` 并引入一个用于构建客户端的新基类,简单命名为 " -"``Client``。在本教程的前几部分中,我们的客户端基于``NumPyClient``,这是一个便捷类,可以让我们轻松地与具有良好 NumPy" -" 互操作性的机器学习库协同工作。有了 ``Client``,我们获得了很多以前没有的灵活性,但我们也必须做一些以前不需要做的事情。" +"The C++ code example has received a substantial update to make it " +"compatible with the latest version of Flower." +msgstr "为了与最新版本的 Flower 兼容,C++ 示例代码进行了大幅更新。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:18 +#: ../../source/ref-changelog.md:855 msgid "" -"Let's go deeper and see what it takes to move from ``NumPyClient`` to " -"``Client``!" -msgstr "让我们深入了解一下从 ``NumPyClient`` 到 ``Client`` 的过程!" - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:30 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:29 -msgid "Step 0: Preparation" -msgstr "步骤 0:准备工作" +"**General improvements** " +"([#1491](https://github.com/adap/flower/pull/1491), " +"[#1504](https://github.com/adap/flower/pull/1504), " +"[#1506](https://github.com/adap/flower/pull/1506), " +"[#1514](https://github.com/adap/flower/pull/1514), " +"[#1522](https://github.com/adap/flower/pull/1522), " +"[#1523](https://github.com/adap/flower/pull/1523), " +"[#1526](https://github.com/adap/flower/pull/1526), " +"[#1528](https://github.com/adap/flower/pull/1528), " +"[#1547](https://github.com/adap/flower/pull/1547), " +"[#1549](https://github.com/adap/flower/pull/1549), " +"[#1560](https://github.com/adap/flower/pull/1560), " +"[#1564](https://github.com/adap/flower/pull/1564), " +"[#1566](https://github.com/adap/flower/pull/1566))" +msgstr "" +"**普通改进** ([#1491](https://github.com/adap/flower/pull/1491), " +"[#1504](https://github.com/adap/flower/pull/1504), " +"[#1506](https://github.com/adap/flower/pull/1506), " +"[#1514](https://github.com/adap/flower/pull/1514), " +"[#1522](https://github.com/adap/flower/pull/1522), " +"[#1523](https://github.com/adap/flower/pull/1523), " +"[#1526](https://github. com/adap/flower/pull/1526), " +"[#1528](https://github.com/adap/flower/pull/1528), " +"[#1547](https://github.com/adap/flower/pull/1547), " +"[#1549](https://github.com/adap/flower/pull/1549), " +"[#1560](https://github.com/adap/flower/pull/1560), " +"[#1564](https://github.com/adap/flower/pull/1564), " +"[#1566](https://github.com/adap/flower/pull/1566))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:117 +#: ../../source/ref-changelog.md:859 msgid "" -"Let's now load the CIFAR-10 training and test set, partition them into " -"ten smaller datasets (each split into training and validation set), and " -"wrap everything in their own ``DataLoader``." +"**Updated documentation** " +"([#1494](https://github.com/adap/flower/pull/1494), " +"[#1496](https://github.com/adap/flower/pull/1496), " +"[#1500](https://github.com/adap/flower/pull/1500), " +"[#1503](https://github.com/adap/flower/pull/1503), " +"[#1505](https://github.com/adap/flower/pull/1505), " +"[#1524](https://github.com/adap/flower/pull/1524), " +"[#1518](https://github.com/adap/flower/pull/1518), " +"[#1519](https://github.com/adap/flower/pull/1519), " +"[#1515](https://github.com/adap/flower/pull/1515))" msgstr "" -"现在,让我们加载 CIFAR-10 训练集和测试集,将它们分割成十个较小的数据集(每个数据集又分为训练集和验证集),并将所有数据都封装在各自的 " -"``DataLoader`` 中。" - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:259 -msgid "Step 1: Revisiting NumPyClient" -msgstr "步骤 1:重温 NumPyClient" +"** 更新文档** ([#1494](https://github.com/adap/flower/pull/1494), " +"[#1496](https://github.com/adap/flower/pull/1496), " +"[#1500](https://github.com/adap/flower/pull/1500), " +"[#1503](https://github.com/adap/flower/pull/1503), " +"[#1505](https://github.com/adap/flower/pull/1505), " +"[#1524](https://github.com/adap/flower/pull/1524), " +"[#1518](https://github.com/adap/flower/pull/1518), " +"[#1519](https://github.com/adap/flower/pull/1519), " +"[#1515](https://github.com/adap/flower/pull/1515))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:261 +#: ../../source/ref-changelog.md:863 msgid "" -"So far, we've implemented our client by subclassing " -"``flwr.client.NumPyClient``. The three methods we implemented are " -"``get_parameters``, ``fit``, and ``evaluate``. Finally, we wrap the " -"creation of instances of this class in a function called ``client_fn``:" +"One highlight is the new [first time contributor " +"guide](https://flower.ai/docs/first-time-contributors.html): if you've " +"never contributed on GitHub before, this is the perfect place to start!" msgstr "" -"到目前为止,我们通过子类化 ``flwr.client.NumPyClient`` " -"实现了我们的客户端。我们实现了三个方法:``get_parameters``, ``fit`, 和``evaluate``。最后,我们用一个名为 " -"``client_fn`` 的函数来创建该类的实例:" +"其中一个亮点是新的[首次贡献者指南](https://flower.ai/docs/first-time-" +"contributors.html):如果你以前从未在 GitHub 上做过贡献,这将是一个完美的开始!" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:309 +#: ../../source/ref-changelog.md:869 +msgid "v1.1.0 (2022-10-31)" +msgstr "v1.1.0 (2022-10-31)" + +#: ../../source/ref-changelog.md:873 msgid "" -"We've seen this before, there's nothing new so far. The only *tiny* " -"difference compared to the previous notebook is naming, we've changed " -"``FlowerClient`` to ``FlowerNumPyClient`` and ``client_fn`` to " -"``numpyclient_fn``. Let's run it to see the output we get:" -msgstr "" -"我们以前见过这种情况,目前没有什么新东西。与之前的笔记相比,唯一*小*的不同是命名,我们把 ``FlowerClient`` 改成了 " -"``FlowerNumPyClient``,把 `client_fn` 改成了 ``numpyclient_fn``。让我们运行它看看输出结果:" +"We would like to give our **special thanks** to all the contributors who " +"made the new version of Flower possible (in `git shortlog` order):" +msgstr "在此,我们向所有促成 Flower 新版本的贡献者致以**特别的谢意(按 \"git shortlog \"顺序排列):" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:339 +#: ../../source/ref-changelog.md:875 msgid "" -"This works as expected, two clients are training for three rounds of " -"federated learning." -msgstr "结果不出所料,两个客户端正在进行三轮联邦学习训练。" +"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " +"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " +"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " +"`danielnugraha`, `edogab33`" +msgstr "" +"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " +"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " +"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " +"`danielnugraha`, `edogab33`" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:341 +#: ../../source/ref-changelog.md:879 msgid "" -"Let's dive a little bit deeper and discuss how Flower executes this " -"simulation. Whenever a client is selected to do some work, " -"``start_simulation`` calls the function ``numpyclient_fn`` to create an " -"instance of our ``FlowerNumPyClient`` (along with loading the model and " -"the data)." +"**Introduce Differential Privacy wrappers (preview)** " +"([#1357](https://github.com/adap/flower/pull/1357), " +"[#1460](https://github.com/adap/flower/pull/1460))" msgstr "" -"让我们再深入一点,讨论一下 Flower 是如何执行模拟的。每当一个客户端被选中进行工作时,`start_simulation`` 就会调用函数 " -"`numpyclient_fn` 来创建我们的 ``FlowerNumPyClient`` 实例(同时加载模型和数据)。" +"**引入差分隐私包装器(预览)** ([#1357](https://github.com/adap/flower/pull/1357), " +"[#1460](https://github.com/adap/flower/pull/1460))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:343 +#: ../../source/ref-changelog.md:881 msgid "" -"But here's the perhaps surprising part: Flower doesn't actually use the " -"``FlowerNumPyClient`` object directly. Instead, it wraps the object to " -"makes it look like a subclass of ``flwr.client.Client``, not " -"``flwr.client.NumPyClient``. In fact, the Flower core framework doesn't " -"know how to handle ``NumPyClient``'s, it only knows how to handle " -"``Client``'s. ``NumPyClient`` is just a convenience abstraction built on " -"top of ``Client``." +"The first (experimental) preview of pluggable Differential Privacy " +"wrappers enables easy configuration and usage of differential privacy " +"(DP). The pluggable DP wrappers enable framework-agnostic **and** " +"strategy-agnostic usage of both client-side DP and server-side DP. Head " +"over to the Flower docs, a new explainer goes into more detail." msgstr "" -"但令人惊讶的部分也许就在这里: Flower 实际上并不直接使用 ``FlowerNumPyClient`` " -"对象。相反,它封装了该对象,使其看起来像 ``flwr.client.Client`` 的子类,而不是 " -"``flwr.client.NumPyClient``。事实上,Flower 核心框架不知道如何处理 " -"``NumPyClient``,它只知道如何处理 ``Client``。``NumPyClient`` " -"只是建立在``Client``之上的便捷抽象类。" +"可插拔差分隐私封装器的首个(实验性)预览版可轻松配置和使用差分隐私(DP)。可插拔的差分隐私封装器可实现客户端差分隐私和服务器端差分隐私的框架无关**以及**策略无关的使用。请访问" +" Flower 文档,新的解释器会提供更多细节。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:345 +#: ../../source/ref-changelog.md:883 msgid "" -"Instead of building on top of ``NumPyClient``, we can directly build on " -"top of ``Client``." -msgstr "与其在 ``NumPyClient`` 上构建,我们可以直接在 ``Client`` 上构建。" +"**New iOS CoreML code example** " +"([#1289](https://github.com/adap/flower/pull/1289))" +msgstr "**新的 iOS CoreML 代码示例**([#1289](https://github.com/adap/flower/pull/1289))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:357 -msgid "Step 2: Moving from ``NumPyClient`` to ``Client``" -msgstr "步骤 2:从 ``NumPyClient`` 移至 ``Client``" +#: ../../source/ref-changelog.md:885 +msgid "" +"Flower goes iOS! A massive new code example shows how Flower clients can " +"be built for iOS. The code example contains both Flower iOS SDK " +"components that can be used for many tasks, and one task example running " +"on CoreML." +msgstr "" +"Flower 进入 iOS!大量新代码示例展示了如何为 iOS 构建 Flower 客户端。该代码示例包含可用于多种任务的 Flower iOS " +"SDK 组件,以及在 CoreML 上运行的一个任务示例。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:359 +#: ../../source/ref-changelog.md:887 msgid "" -"Let's try to do the same thing using ``Client`` instead of " -"``NumPyClient``." -msgstr "让我们尝试使用 ``Client`` 代替 ``NumPyClient`` 做同样的事情。" +"**New FedMedian strategy** " +"([#1461](https://github.com/adap/flower/pull/1461))" +msgstr "**新的联邦医疗策略** ([#1461](https://github.com/adap/flower/pull/1461))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:465 +#: ../../source/ref-changelog.md:889 msgid "" -"Before we discuss the code in more detail, let's try to run it! Gotta " -"make sure our new ``Client``-based client works, right?" -msgstr "在详细讨论代码之前,让我们试着运行它!必须确保我们基于 ``Client`` 的新客户端能正常运行,对吗?" +"The new `FedMedian` strategy implements Federated Median (FedMedian) by " +"[Yin et al., 2018](https://arxiv.org/pdf/1803.01498v1.pdf)." +msgstr "" +"新的 \"FedMedian \"战略实现了[Yin " +"等人,2018]的联邦中值(FedMedian)(https://arxiv.org/pdf/1803.01498v1.pdf)。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:490 +#: ../../source/ref-changelog.md:891 msgid "" -"That's it, we're now using ``Client``. It probably looks similar to what " -"we've done with ``NumPyClient``. So what's the difference?" -msgstr "就是这样,我们现在开始使用 ``Client``。它看起来可能与我们使用 ``NumPyClient`` 所做的类似。那么有什么不同呢?" +"**Log** `Client` **exceptions in Virtual Client Engine** " +"([#1493](https://github.com/adap/flower/pull/1493))" +msgstr "**虚拟客户端引擎中的**日志**`客户端`**异常([#1493](https://github.com/adap/flower/pull/1493))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:492 +#: ../../source/ref-changelog.md:893 msgid "" -"First of all, it's more code. But why? The difference comes from the fact" -" that ``Client`` expects us to take care of parameter serialization and " -"deserialization. For Flower to be able to send parameters over the " -"network, it eventually needs to turn these parameters into ``bytes``. " -"Turning parameters (e.g., NumPy ``ndarray``'s) into raw bytes is called " -"serialization. Turning raw bytes into something more useful (like NumPy " -"``ndarray``'s) is called deserialization. Flower needs to do both: it " -"needs to serialize parameters on the server-side and send them to the " -"client, the client needs to deserialize them to use them for local " -"training, and then serialize the updated parameters again to send them " -"back to the server, which (finally!) deserializes them again in order to " -"aggregate them with the updates received from other clients." -msgstr "" -"首先,它的代码更多。但为什么呢?区别在于 ``Client`` 希望我们处理参数的序列化和反序列化。Flower " -"要想通过网络发送参数,最终需要将这些参数转化为 ``字节``。把参数(例如 NumPy 的 ``ndarray`` " -"参数)变成原始字节叫做序列化。将原始字节转换成更有用的东西(如 NumPy ``ndarray`)称为反序列化。Flower " -"需要同时做这两件事:它需要在服务器端序列化参数并将其发送到客户端,客户端需要反序列化参数以便将其用于本地训练,然后再次序列化更新后的参数并将其发送回服务器,服务器(最后)再次反序列化参数以便将其与从其他客户端接收到的更新汇总在一起。" +"All `Client` exceptions happening in the VCE are now logged by default " +"and not just exposed to the configured `Strategy` (via the `failures` " +"argument)." +msgstr "VCE 中发生的所有 \"客户端 \"异常现在都会被默认记录下来,而不只是暴露给配置的 `Strategy`(通过 `failures`参数)。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:495 +#: ../../source/ref-changelog.md:895 msgid "" -"The only *real* difference between Client and NumPyClient is that " -"NumPyClient takes care of serialization and deserialization for you. It " -"can do so because it expects you to return parameters as NumPy ndarray's," -" and it knows how to handle these. This makes working with machine " -"learning libraries that have good NumPy support (most of them) a breeze." -msgstr "" -"Client 与 NumPyClient 之间的唯一**真正区别在于,NumPyClient " -"会为你处理序列化和反序列化。NumPyClient之所以能做到这一点,是因为它预计你会以NumPy " -"ndarray的形式返回参数,而且它知道如何处理这些参数。这使得与具有良好 NumPy 支持的大多数机器学习库一起工作变得轻而易举。" +"**Improve Virtual Client Engine internals** " +"([#1401](https://github.com/adap/flower/pull/1401), " +"[#1453](https://github.com/adap/flower/pull/1453))" +msgstr "**改进虚拟客户端引擎内部**([#1401](https://github.com/adap/flower/pull/1401)、[#1453](https://github.com/adap/flower/pull/1453))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:497 +#: ../../source/ref-changelog.md:897 msgid "" -"In terms of API, there's one major difference: all methods in Client take" -" exactly one argument (e.g., ``FitIns`` in ``Client.fit``) and return " -"exactly one value (e.g., ``FitRes`` in ``Client.fit``). The methods in " -"``NumPyClient`` on the other hand have multiple arguments (e.g., " -"``parameters`` and ``config`` in ``NumPyClient.fit``) and multiple return" -" values (e.g., ``parameters``, ``num_example``, and ``metrics`` in " -"``NumPyClient.fit``) if there are multiple things to handle. These " -"``*Ins`` and ``*Res`` objects in ``Client`` wrap all the individual " -"values you're used to from ``NumPyClient``." +"Some internals of the Virtual Client Engine have been revamped. The VCE " +"now uses Ray 2.0 under the hood, the value type of the `client_resources`" +" dictionary changed to `float` to allow fractions of resources to be " +"allocated." msgstr "" -"在 API 方面,有一个主要区别:Client 中的所有方法都只接受一个参数(例如,``Client.fit`` 中的 " -"``FitIns``),并只返回一个值(例如,``Client.fit`` 中的 " -"``FitRes``)。另一方面,``NumPyClient``中的方法有多个参数(例如,``NumPyClient.fit``中的``parameters``和``config``)和多个返回值(例如,``NumPyClient.fit``中的``parameters``、``num_example``和``metrics``)。在" -" ``Client`` 中的这些 ``*Ins`` 和 ``*Res`` 对象封装了你在 ``NumPyClient`` 中习惯使用的所有单个值。" +"虚拟客户端引擎的部分内部结构已进行了修改。VCE 现在使用 Ray 2.0,\"client_resources \"字典的值类型改为 " +"\"float\",以允许分配分数资源。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:510 -msgid "Step 3: Custom serialization" -msgstr "步骤 3:自定义序列化" +#: ../../source/ref-changelog.md:899 +msgid "" +"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " +"Client Engine**" +msgstr "**支持虚拟客户端引擎中的可选** `Client`**/**`NumPyClient` **方法**" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:512 +#: ../../source/ref-changelog.md:901 msgid "" -"Here we will explore how to implement custom serialization with a simple " -"example." -msgstr "下面我们将通过一个简单的示例来探讨如何实现自定义序列化。" +"The Virtual Client Engine now has full support for optional `Client` (and" +" `NumPyClient`) methods." +msgstr "虚拟客户端引擎现在完全支持可选的 `Client`(和 `NumPyClient`)方法。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:514 +#: ../../source/ref-changelog.md:903 msgid "" -"But first what is serialization? Serialization is just the process of " -"converting an object into raw bytes, and equally as important, " -"deserialization is the process of converting raw bytes back into an " -"object. This is very useful for network communication. Indeed, without " -"serialization, you could not just a Python object through the internet." +"**Provide type information to packages using** `flwr` " +"([#1377](https://github.com/adap/flower/pull/1377))" msgstr "" -"首先,什么是序列化?序列化只是将对象转换为原始字节的过程,同样重要的是,反序列化是将原始字节转换回对象的过程。这对网络通信非常有用。事实上,如果没有序列化,你就无法通过互联网传输一个" -" Python 对象。" +"**使用** `flwr`向软件包提供类型信息 " +"([#1377](https://github.com/adap/flower/pull/1377))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:516 +#: ../../source/ref-changelog.md:905 msgid "" -"Federated Learning relies heavily on internet communication for training " -"by sending Python objects back and forth between the clients and the " -"server. This means that serialization is an essential part of Federated " -"Learning." -msgstr "通过在客户端和服务器之间来回发送 Python 对象,联合学习在很大程度上依赖于互联网通信进行训练。这意味着序列化是联邦学习的重要组成部分。" - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:518 -msgid "" -"In the following section, we will write a basic example where instead of " -"sending a serialized version of our ``ndarray``\\ s containing our " -"parameters, we will first convert the ``ndarray`` into sparse matrices, " -"before sending them. This technique can be used to save bandwidth, as in " -"certain cases where the weights of a model are sparse (containing many 0 " -"entries), converting them to a sparse matrix can greatly improve their " -"bytesize." +"The package `flwr` is now bundled with a `py.typed` file indicating that " +"the package is typed. This enables typing support for projects or " +"packages that use `flwr` by enabling them to improve their code using " +"static type checkers like `mypy`." msgstr "" -"在下面的章节中,我们将编写一个基本示例,在发送包含参数的 ``ndarray`` 前,我们将首先把 ``ndarray`` " -"转换为稀疏矩阵,而不是发送序列化版本。这种技术可以用来节省带宽,因为在某些情况下,模型的参数是稀疏的(包含许多 0 " -"条目),将它们转换成稀疏矩阵可以大大提高它们的字节数。" - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:521 -msgid "Our custom serialization/deserialization functions" -msgstr "我们的定制序列化/反序列化功能" +"软件包 `flwr` 现在捆绑了一个 `py.typed` 文件,表明该软件包是类型化的。这样,使用 `flwr` 的项目或软件包就可以使用 " +"`mypy` 等静态类型检查器改进代码,从而获得类型支持。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:523 +#: ../../source/ref-changelog.md:907 msgid "" -"This is where the real serialization/deserialization will happen, " -"especially in ``ndarray_to_sparse_bytes`` for serialization and " -"``sparse_bytes_to_ndarray`` for deserialization." +"**Updated code example** " +"([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" msgstr "" -"这才是真正的序列化/反序列化,尤其是在用于序列化的 ``ndarray_too_sparse_bytes`` 和用于反序列化的 " -"``sparse_bytes_too_ndarray`` 中。" - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:525 -msgid "" -"Note that we imported the ``scipy.sparse`` library in order to convert " -"our arrays." -msgstr "请注意,为了转换数组,我们导入了 ``scipy.sparse`` 库。" - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:613 -msgid "Client-side" -msgstr "客户端" +"** 更新代码示例** ([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:615 +#: ../../source/ref-changelog.md:909 msgid "" -"To be able to serialize our ``ndarray``\\ s into sparse parameters, we " -"will just have to call our custom functions in our " -"``flwr.client.Client``." -msgstr "为了能够将我们的 ``ndarray`` 序列化为稀疏参数,我们只需在 ``flwr.client.Client`` 中调用我们的自定义函数。" +"The code examples covering scikit-learn and PyTorch Lightning have been " +"updated to work with the latest version of Flower." +msgstr "涵盖 scikit-learn 和 PyTorch Lightning 的代码示例已更新,以便与最新版本的 Flower 配合使用。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:617 +#: ../../source/ref-changelog.md:911 msgid "" -"Indeed, in ``get_parameters`` we need to serialize the parameters we got " -"from our network using our custom ``ndarrays_to_sparse_parameters`` " -"defined above." +"**Updated documentation** " +"([#1355](https://github.com/adap/flower/pull/1355), " +"[#1558](https://github.com/adap/flower/pull/1558), " +"[#1379](https://github.com/adap/flower/pull/1379), " +"[#1380](https://github.com/adap/flower/pull/1380), " +"[#1381](https://github.com/adap/flower/pull/1381), " +"[#1332](https://github.com/adap/flower/pull/1332), " +"[#1391](https://github.com/adap/flower/pull/1391), " +"[#1403](https://github.com/adap/flower/pull/1403), " +"[#1364](https://github.com/adap/flower/pull/1364), " +"[#1409](https://github.com/adap/flower/pull/1409), " +"[#1419](https://github.com/adap/flower/pull/1419), " +"[#1444](https://github.com/adap/flower/pull/1444), " +"[#1448](https://github.com/adap/flower/pull/1448), " +"[#1417](https://github.com/adap/flower/pull/1417), " +"[#1449](https://github.com/adap/flower/pull/1449), " +"[#1465](https://github.com/adap/flower/pull/1465), " +"[#1467](https://github.com/adap/flower/pull/1467))" msgstr "" -"事实上,在 `get_parameters` 中,我们需要使用上文定义的自定义 `ndarrays_too_sparse_parameters` " -"序列化从网络中获取的参数。" +"**更新文档** ([#1355](https://github.com/adap/flower/pull/1355), " +"[#1558](https://github.com/adap/flower/pull/1558), " +"[#1379](https://github.com/adap/flower/pull/1379), " +"[#1380](https://github.com/adap/flower/pull/1380), " +"[#1381](https://github.com/adap/flower/pull/1381), " +"[#1332](https://github.com/adap/flower/pull/1332), " +"[#1391](https://github.com/adap/flower/pull/1391), " +"[#1403](https://github.com/adap/flower/pull/1403), " +"[#1364](https://github. com/adap/flower/pull/1364), " +"[#1409](https://github.com/adap/flower/pull/1409), " +"[#1419](https://github.com/adap/flower/pull/1419), " +"[#1444](https://github.com/adap/flower/pull/1444), " +"[#1448](https://github.com/adap/flower/pull/1448), " +"[#1417](https://github.com/adap/flower/pull/1417), " +"[#1449](https://github.com/adap/flower/pull/1449), " +"[#1465](https://github.com/adap/flower/pull/1465), " +"[#1467](https://github.com/adap/flower/pull/1467))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:619 +#: ../../source/ref-changelog.md:913 msgid "" -"In ``fit``, we first need to deserialize the parameters coming from the " -"server using our custom ``sparse_parameters_to_ndarrays`` and then we " -"need to serialize our local results with " -"``ndarrays_to_sparse_parameters``." -msgstr "" -"在 ``fit`` 中,我们首先需要使用自定义的 ``sparse_parameters_to_ndarrays`` " -"反序列化来自服务器的参数,然后使用 ``ndarrays_to_sparse_parameters`` 序列化本地结果。" +"There have been so many documentation updates that it doesn't even make " +"sense to list them individually." +msgstr "文档更新的数量之多,甚至没有必要逐一列出。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:621 +#: ../../source/ref-changelog.md:915 msgid "" -"In ``evaluate``, we will only need to deserialize the global parameters " -"with our custom function." -msgstr "在 ``evaluate`` 中,我们只需要用自定义函数反序列化全局参数。" - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:725 -msgid "Server-side" -msgstr "服务器端" +"**Restructured documentation** " +"([#1387](https://github.com/adap/flower/pull/1387))" +msgstr "**重构文档**([#1387](https://github.com/adap/flower/pull/1387))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:727 +#: ../../source/ref-changelog.md:917 msgid "" -"For this example, we will just use ``FedAvg`` as a strategy. To change " -"the serialization and deserialization here, we only need to reimplement " -"the ``evaluate`` and ``aggregate_fit`` functions of ``FedAvg``. The other" -" functions of the strategy will be inherited from the super class " -"``FedAvg``." -msgstr "" -"在本例中,我们将只使用 ``FedAvg`` 作为策略。要改变这里的序列化和反序列化,我们只需重新实现 ``FedAvg`` 的 " -"``evaluate`` 和 ``aggregate_fit`` 函数。策略的其他函数将从超类 ``FedAvg`` 继承。" - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:729 -msgid "As you can see only one line as change in ``evaluate``:" -msgstr "正如你所看到的,``evaluate``中只修改了一行:" +"The documentation has been restructured to make it easier to navigate. " +"This is just the first step in a larger effort to make the Flower " +"documentation the best documentation of any project ever. Stay tuned!" +msgstr "我们对文档进行了重组,使其更易于浏览。这只是让 Flower 文档成为所有项目中最好文档的第一步。敬请期待!" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:735 +#: ../../source/ref-changelog.md:919 msgid "" -"And for ``aggregate_fit``, we will first deserialize every result we " -"received:" -msgstr "而对于 ``aggregate_fit``,我们将首先反序列化收到的每个结果:" - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:744 -msgid "And then serialize the aggregated result:" -msgstr "然后将汇总结果序列化:" - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:903 -msgid "We can now run our custom serialization example!" -msgstr "现在我们可以运行自定义序列化示例!" +"**Open in Colab button** " +"([#1389](https://github.com/adap/flower/pull/1389))" +msgstr "**在 Colab 中打开按钮** ([#1389](https://github.com/adap/flower/pull/1389))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:934 +#: ../../source/ref-changelog.md:921 msgid "" -"In this part of the tutorial, we've seen how we can build clients by " -"subclassing either ``NumPyClient`` or ``Client``. ``NumPyClient`` is a " -"convenience abstraction that makes it easier to work with machine " -"learning libraries that have good NumPy interoperability. ``Client`` is a" -" more flexible abstraction that allows us to do things that are not " -"possible in ``NumPyClient``. In order to do so, it requires us to handle " -"parameter serialization and deserialization ourselves." +"The four parts of the Flower Federated Learning Tutorial now come with a " +"new `Open in Colab` button. No need to install anything on your local " +"machine, you can now use and learn about Flower in your browser, it's " +"only a single click away." msgstr "" -"在本部分教程中,我们已经了解了如何通过子类化 ``NumPyClient`` 或 ``Client`` 来构建客户端。NumPyClient " -"\"是一个便捷的抽象类,可以让我们更容易地与具有良好NumPy互操作性的机器学习库一起工作。``Client``是一个更灵活的抽象类,允许我们做一些在`NumPyClient``中做不到的事情。为此,它要求我们自己处理参数序列化和反序列化。" - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:952 -msgid "" -"This is the final part of the Flower tutorial (for now!), " -"congratulations! You're now well equipped to understand the rest of the " -"documentation. There are many topics we didn't cover in the tutorial, we " -"recommend the following resources:" -msgstr "这暂时是 Flower 教程的最后一部分,恭喜您!您现在已经具备了理解其余文档的能力。本教程还有许多内容没有涉及,我们推荐您参考以下资源:" - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:954 -msgid "`Read Flower Docs `__" -msgstr "阅读Flower文档 `__" +"Flower 联邦学习教程的四个部分现在都带有一个新的 \"在 Colab 中打开 " +"\"按钮。现在,您无需在本地计算机上安装任何软件,只需点击一下,就可以在浏览器中使用和学习 Flower。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:955 +#: ../../source/ref-changelog.md:923 msgid "" -"`Check out Flower Code Examples " -"`__" -msgstr "查看 Flower 代码示例 `__" +"**Improved tutorial** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" +msgstr "" +"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " +"[#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475)))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:956 +#: ../../source/ref-changelog.md:925 msgid "" -"`Use Flower Baselines for your research " -"`__" -msgstr "使用 \"Flower Baselines \"进行研究 `__" +"The Flower Federated Learning Tutorial has two brand-new parts covering " +"custom strategies (still WIP) and the distinction between `Client` and " +"`NumPyClient`. The existing parts one and two have also been improved " +"(many small changes and fixes)." +msgstr "" +"Flower 联邦学习教程有两个全新的部分,涉及自定义策略(仍处于 WIP 阶段)和 `Client` 与 `NumPyClient` " +"之间的区别。现有的第一和第二部分也得到了改进(许多小改动和修正)。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:957 -msgid "" -"`Watch Flower Summit 2023 videos `__" -msgstr "观看 2023 年Flower峰会视频 `__" +#: ../../source/ref-changelog.md:931 +msgid "v1.0.0 (2022-07-28)" +msgstr "v1.0.0 (2022-07-28)" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:9 -msgid "Get started with Flower" -msgstr "开始使用Flower" +#: ../../source/ref-changelog.md:933 +msgid "Highlights" +msgstr "亮点" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:11 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:11 -msgid "Welcome to the Flower federated learning tutorial!" -msgstr "欢迎阅读Flower联邦学习教程!" +#: ../../source/ref-changelog.md:935 +msgid "Stable **Virtual Client Engine** (accessible via `start_simulation`)" +msgstr "稳定的**虚拟客户端引擎**(可通过`start_simulation`访问)" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:13 -#, fuzzy -msgid "" -"In this notebook, we'll build a federated learning system using Flower, " -"`Flower Datasets `__ and PyTorch. In " -"part 1, we use PyTorch for the model training pipeline and data loading. " -"In part 2, we continue to federate the PyTorch-based pipeline using " -"Flower." -msgstr "" -"在本笔记中,我们将使用 Flower 和 PyTorch 构建一个联邦学习系统。在第一部分中,我们使用 PyTorch " -"进行模型训练和数据加载。在第二部分中,我们将继续使用 Flower 联邦化基于 PyTorch 的框架。" +#: ../../source/ref-changelog.md:936 +msgid "All `Client`/`NumPyClient` methods are now optional" +msgstr "所有 `Client`/`NumPyClient` 方法现在都是可选的了" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:17 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:19 -msgid "Let's get started!" -msgstr "让我们开始吧!" +#: ../../source/ref-changelog.md:937 +msgid "Configurable `get_parameters`" +msgstr "可配置的`get_parameters`" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:31 +#: ../../source/ref-changelog.md:938 msgid "" -"Before we begin with any actual code, let's make sure that we have " -"everything we need." -msgstr "在开始编写实际代码之前,让我们先确保我们已经准备好了所需的一切。" +"Tons of small API cleanups resulting in a more coherent developer " +"experience" +msgstr "对大量小型应用程序接口进行了清理,使开发人员的体验更加一致" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:45 -#, fuzzy +#: ../../source/ref-changelog.md:942 msgid "" -"Next, we install the necessary packages for PyTorch (``torch`` and " -"``torchvision``), Flower Datasets (``flwr-datasets``) and Flower " -"(``flwr``):" -msgstr "接下来,我们为 PyTorch(`torch`` 和`torchvision``)和 Flower(`flwr`)安装必要的软件包:" +"We would like to give our **special thanks** to all the contributors who " +"made Flower 1.0 possible (in reverse [GitHub " +"Contributors](https://github.com/adap/flower/graphs/contributors) order):" +msgstr "" +"在此,我们谨向所有促成 Flower 1.0 的贡献者致以**特别的谢意(按[GitHub " +"贡献者](https://github.com/adap/flower/graphs/contributors) 倒序排列):" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:105 -#, fuzzy +#: ../../source/ref-changelog.md:944 msgid "" -"It is possible to switch to a runtime that has GPU acceleration enabled " -"(on Google Colab: ``Runtime > Change runtime type > Hardware accelerator:" -" GPU > Save``). Note, however, that Google Colab is not always able to " -"offer GPU acceleration. If you see an error related to GPU availability " -"in one of the following sections, consider switching back to CPU-based " -"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " -"has GPU acceleration enabled, you should see the output ``Training on " -"cuda``, otherwise it'll say ``Training on cpu``." -msgstr "" -"可以切换到已启用 GPU 加速的运行时(在 Google Colab 上: 运行时 > 更改运行时类型 > 硬件加速: GPU > " -"保存``)。但请注意,Google Colab 并非总能提供 GPU 加速。如果在以下部分中看到与 GPU 可用性相关的错误,请考虑通过设置 " -"``DEVICE = torch.device(\"cpu\")`` 切回基于 CPU 的执行。如果运行时已启用 GPU " -"加速,你应该会看到输出``Training on cuda``,否则会显示``Training on cpu``。" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:118 -msgid "Loading the data" -msgstr "加载数据" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:120 -#, fuzzy -msgid "" -"Federated learning can be applied to many different types of tasks across" -" different domains. In this tutorial, we introduce federated learning by " -"training a simple convolutional neural network (CNN) on the popular " -"CIFAR-10 dataset. CIFAR-10 can be used to train image classifiers that " -"distinguish between images from ten different classes: 'airplane', " -"'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', and " -"'truck'." -msgstr "" -"联邦学习可应用于不同领域的多种不同类型任务。在本教程中,我们将通过在流行的 CIFAR-10 数据集上训练一个简单的卷积神经网络 (CNN) " -"来介绍联合学习。CIFAR-10 可用于训练图像分类器,以区分来自十个不同类别的图像:" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:131 -msgid "" -"We simulate having multiple datasets from multiple organizations (also " -"called the \"cross-silo\" setting in federated learning) by splitting the" -" original CIFAR-10 dataset into multiple partitions. Each partition will " -"represent the data from a single organization. We're doing this purely " -"for experimentation purposes, in the real world there's no need for data " -"splitting because each organization already has their own data (so the " -"data is naturally partitioned)." +"[@rtaiello](https://github.com/rtaiello), " +"[@g-pichler](https://github.com/g-pichler), [@rob-" +"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" +"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " +"[@nfnt](https://github.com/nfnt), " +"[@tatiana-s](https://github.com/tatiana-s), " +"[@TParcollet](https://github.com/TParcollet), " +"[@vballoli](https://github.com/vballoli), " +"[@negedng](https://github.com/negedng), " +"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " +"[@hei411](https://github.com/hei411), " +"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " +"[@AmitChaulwar](https://github.com/AmitChaulwar), " +"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" +"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " +"[@lbhm](https://github.com/lbhm), " +"[@sishtiaq](https://github.com/sishtiaq), " +"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" +"/Jueun-Park), [@architjen](https://github.com/architjen), " +"[@PratikGarai](https://github.com/PratikGarai), " +"[@mrinaald](https://github.com/mrinaald), " +"[@zliel](https://github.com/zliel), " +"[@MeiruiJiang](https://github.com/MeiruiJiang), " +"[@sancarlim](https://github.com/sancarlim), " +"[@gubertoli](https://github.com/gubertoli), " +"[@Vingt100](https://github.com/Vingt100), " +"[@MakGulati](https://github.com/MakGulati), " +"[@cozek](https://github.com/cozek), " +"[@jafermarq](https://github.com/jafermarq), " +"[@sisco0](https://github.com/sisco0), " +"[@akhilmathurs](https://github.com/akhilmathurs), " +"[@CanTuerk](https://github.com/CanTuerk), " +"[@mariaboerner1987](https://github.com/mariaboerner1987), " +"[@pedropgusmao](https://github.com/pedropgusmao), " +"[@tanertopal](https://github.com/tanertopal), " +"[@danieljanes](https://github.com/danieljanes)." msgstr "" -"我们通过将原始 CIFAR-10 数据集拆分成多个分区来模拟来自多个组织的多个数据集(也称为联邦学习中的 \"跨分区 " -"\"设置)。每个分区代表一个组织的数据。我们这样做纯粹是为了实验目的,在现实世界中不需要拆分数据,因为每个组织都已经有了自己的数据(所以数据是自然分区的)。" +"[@rtaiello](https://github.com/rtaiello), " +"[@g-pichler](https://github.com/g-pichler), [@rob-" +"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" +"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " +"[@nfnt](https://github.com/nfnt), " +"[@tatiana-s](https://github.com/tatiana-s), " +"[@TParcollet](https://github.com/TParcollet), " +"[@vballoli](https://github.com/vballoli), " +"[@negedng](https://github.com/negedng), " +"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " +"[@hei411](https://github.com/hei411), " +"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " +"[@AmitChaulwar](https://github.com/AmitChaulwar), " +"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" +"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " +"[@lbhm](https://github.com/lbhm), " +"[@sishtiaq](https://github.com/sishtiaq), " +"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" +"/Jueun-Park), [@architjen](https://github.com/architjen), " +"[@PratikGarai](https://github.com/PratikGarai), " +"[@mrinaald](https://github.com/mrinaald), " +"[@zliel](https://github.com/zliel), " +"[@MeiruiJiang](https://github.com/MeiruiJiang), " +"[@sancarlim](https://github.com/sancarlim), " +"[@gubertoli](https://github.com/gubertoli), " +"[@Vingt100](https://github.com/Vingt100), " +"[@MakGulati](https://github.com/MakGulati), " +"[@cozek](https://github.com/cozek), " +"[@jafermarq](https://github.com/jafermarq), " +"[@sisco0](https://github.com/sisco0), " +"[@akhilmathurs](https://github.com/akhilmathurs), " +"[@CanTuerk](https://github.com/CanTuerk), " +"[@mariaboerner1987](https://github.com/mariaboerner1987), " +"[@pedropgusmao](https://github.com/pedropgusmao), " +"[@tanertopal](https://github.com/tanertopal), " +"[@danieljanes](https://github.com/danieljanes)." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:133 -#, fuzzy +#: ../../source/ref-changelog.md:948 msgid "" -"Each organization will act as a client in the federated learning system. " -"So having ten organizations participate in a federation means having ten " -"clients connected to the federated learning server." -msgstr "每个组织都将充当联邦学习系统中的客户端。因此,有十个组织参与联邦学习,就意味着有十个客户端连接到联邦学习服务器:" +"**All arguments must be passed as keyword arguments** " +"([#1338](https://github.com/adap/flower/pull/1338))" +msgstr "** 所有参数必须作为关键字参数传递** ([#1338](https://github.com/adap/flower/pull/1338))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:144 +#: ../../source/ref-changelog.md:950 msgid "" -"Let's now create the Federated Dataset abstraction that from ``flwr-" -"datasets`` that partitions the CIFAR-10. We will create small training " -"and test set for each edge device and wrap each of them into a PyTorch " -"``DataLoader``:" +"Pass all arguments as keyword arguments, positional arguments are not " +"longer supported. Code that uses positional arguments (e.g., " +"`start_client(\"127.0.0.1:8080\", FlowerClient())`) must add the keyword " +"for each positional argument (e.g., " +"`start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())`)." msgstr "" +"以关键字参数传递所有参数,不再支持位置参数。使用位置参数的代码(例如,`start_client(\"127.0.0.1:8080\", " +"FlowerClient())`)必须为每个位置参数添加关键字(例如,`start_client(server_address=\"127.0.0.1:8080\"," +" client=FlowerClient())`)。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:198 -#, fuzzy +#: ../../source/ref-changelog.md:952 msgid "" -"We now have a list of ten training sets and ten validation sets " -"(``trainloaders`` and ``valloaders``) representing the data of ten " -"different organizations. Each ``trainloader``/``valloader`` pair contains" -" 4000 training examples and 1000 validation examples. There's also a " -"single ``testloader`` (we did not split the test set). Again, this is " -"only necessary for building research or educational systems, actual " -"federated learning systems have their data naturally distributed across " -"multiple partitions." +"**Introduce configuration object** `ServerConfig` **in** `start_server` " +"**and** `start_simulation` " +"([#1317](https://github.com/adap/flower/pull/1317))" msgstr "" -"现在,我们有一个包含十个训练集和十个验证集(`trainloaders`` 和`valloaders``)的列表,代表十个不同组织的数据。每对 " -"``trainloader``/``valloader`` 都包含 4500 个训练示例和 500 个验证数据。还有一个单独的 " -"``测试加载器``(我们没有拆分测试集)。同样,这只有在构建研究或教育系统时才有必要,实际的联邦学习系统的数据自然分布在多个分区中。" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:201 -msgid "" -"Let's take a look at the first batch of images and labels in the first " -"training set (i.e., ``trainloaders[0]``) before we move on:" -msgstr "在继续之前,让我们先看看第一个训练集中的第一批图像和标签(即 ``trainloaders[0]``):" +"**在*** `start_server` ***和*** `start_simulation` 中引入配置对象*** " +"`ServerConfig` ([#1317](https://github.com/adap/flower/pull/1317))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:240 +#: ../../source/ref-changelog.md:954 msgid "" -"The output above shows a random batch of images from the first " -"``trainloader`` in our list of ten ``trainloaders``. It also prints the " -"labels associated with each image (i.e., one of the ten possible labels " -"we've seen above). If you run the cell again, you should see another " -"batch of images." +"Instead of a config dictionary `{\"num_rounds\": 3, \"round_timeout\": " +"600.0}`, `start_server` and `start_simulation` now expect a configuration" +" object of type `flwr.server.ServerConfig`. `ServerConfig` takes the same" +" arguments that as the previous config dict, but it makes writing type-" +"safe code easier and the default parameters values more transparent." msgstr "" -"上面的输出显示了来自十个 \"trainloader \"列表中第一个 \"trainloader " -"\"的随机图像。它还打印了与每幅图像相关的标签(即我们上面看到的十个可能标签之一)。如果您再次运行该单元,应该会看到另一批图像。" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:252 -msgid "Step 1: Centralized Training with PyTorch" -msgstr "步骤 1:使用 PyTorch 进行集中训练" +"并非配置字典`{\"num_rounds\": 3, \"round_timeout\": 600.0}`, `start_server`和 " +"`start_simulation`现在用一个类型为 " +"`flwr.server.ServerConfig`的配置对象。`ServerConfig`接收的参数与之前的 config dict " +"相同,但它使编写类型安全代码变得更容易,默认参数值也更加透明。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:263 +#: ../../source/ref-changelog.md:956 msgid "" -"Next, we're going to use PyTorch to define a simple convolutional neural " -"network. This introduction assumes basic familiarity with PyTorch, so it " -"doesn't cover the PyTorch-related aspects in full detail. If you want to " -"dive deeper into PyTorch, we recommend `DEEP LEARNING WITH PYTORCH: A 60 " -"MINUTE BLITZ " -"`__." -msgstr "" -"接下来,我们将使用 PyTorch 来定义一个简单的卷积神经网络。本介绍假定您对 PyTorch 有基本的了解,因此不会详细介绍与 PyTorch" -" 相关的内容。如果你想更深入地了解 PyTorch,我们推荐你阅读 `DEEP LEARNING WITH PYTORCH: a 60 " -"minute blitz " -"`__。" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:275 -msgid "Defining the model" -msgstr "定义模型" +"**Rename built-in strategy parameters for clarity** " +"([#1334](https://github.com/adap/flower/pull/1334))" +msgstr "**重新命名内置策略参数,使其更加清晰** ([#1334](https://github.com/adap/flower/pull/1334))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:277 +#: ../../source/ref-changelog.md:958 msgid "" -"We use the simple CNN described in the `PyTorch tutorial " -"`__:" -msgstr "" -"我们使用` PyTorch 教程 " -"`__ 中描述的简单 CNN:" +"The following built-in strategy parameters were renamed to improve " +"readability and consistency with other API's:" +msgstr "以下内置策略参数已重新命名,以提高可读性并与其他 API 保持一致:" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:314 -msgid "Let's continue with the usual training and test functions:" -msgstr "让我们继续进行常规的训练和测试功能:" +#: ../../source/ref-changelog.md:960 +msgid "`fraction_eval` --> `fraction_evaluate`" +msgstr "`fraction_eval` --> `fraction_evaluate`" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:374 -msgid "Training the model" -msgstr "训练模型" +#: ../../source/ref-changelog.md:961 +msgid "`min_eval_clients` --> `min_evaluate_clients`" +msgstr "`min_eval_clients` --> `min_evaluate_clients`" + +#: ../../source/ref-changelog.md:962 +msgid "`eval_fn` --> `evaluate_fn`" +msgstr "`eval_fn` --> `evaluate_fn`" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:376 +#: ../../source/ref-changelog.md:964 msgid "" -"We now have all the basic building blocks we need: a dataset, a model, a " -"training function, and a test function. Let's put them together to train " -"the model on the dataset of one of our organizations " -"(``trainloaders[0]``). This simulates the reality of most machine " -"learning projects today: each organization has their own data and trains " -"models only on this internal data:" -msgstr "现在我们拥有了所需的所有基本构件:数据集、模型、训练函数和测试函数。让我们把它们放在一起,在我们其中一个组织的数据集(``trainloaders[0]``)上训练模型。这模拟了当今大多数机器学习项目的实际情况:每个组织都有自己的数据,并且只在这些内部数据上训练模型:" +"**Update default arguments of built-in strategies** " +"([#1278](https://github.com/adap/flower/pull/1278))" +msgstr "**更新内置策略的默认参数** ([#1278](https://github.com/adap/flower/pull/1278))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:406 +#: ../../source/ref-changelog.md:966 msgid "" -"Training the simple CNN on our CIFAR-10 split for 5 epochs should result " -"in a test set accuracy of about 41%, which is not good, but at the same " -"time, it doesn't really matter for the purposes of this tutorial. The " -"intent was just to show a simplistic centralized training pipeline that " -"sets the stage for what comes next - federated learning!" +"All built-in strategies now use `fraction_fit=1.0` and " +"`fraction_evaluate=1.0`, which means they select *all* currently " +"available clients for training and evaluation. Projects that relied on " +"the previous default values can get the previous behaviour by " +"initializing the strategy in the following way:" msgstr "" -"在我们的 CIFAR-10 分片上对简单 CNN 进行 5 个遍历的训练后,测试集的准确率应为 " -"41%,这并不理想,但同时对本教程而言也并不重要。我们只是想展示一个简单的集中式训练流程,为接下来的联邦学习做好铺垫!" +"所有内置策略现在都使用 \"fraction_fit=1.0 \"和 " +"\"fraction_evaluate=1.0\",这意味着它们会选择*所有*当前可用的客户端进行训练和评估。依赖以前默认值的项目可以通过以下方式初始化策略,获得以前的行为:" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:418 -msgid "Step 2: Federated Learning with Flower" -msgstr "步骤 2:使用 Flower 联邦学习" +#: ../../source/ref-changelog.md:968 +msgid "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" +msgstr "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:420 +#: ../../source/ref-changelog.md:970 msgid "" -"Step 1 demonstrated a simple centralized training pipeline. All data was " -"in one place (i.e., a single ``trainloader`` and a single ``valloader``)." -" Next, we'll simulate a situation where we have multiple datasets in " -"multiple organizations and where we train a model over these " -"organizations using federated learning." +"**Add** `server_round` **to** `Strategy.evaluate` " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -"步骤 1 演示了一个简单的集中式训练流程。所有数据都在一个地方(即一个 \"trainloader \"和一个 " -"\"valloader\")。接下来,我们将模拟在多个组织中拥有多个数据集的情况,并使用联邦学习在这些组织中训练一个模型。" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:432 -msgid "Updating model parameters" -msgstr "更新模型参数" +"**添加*** `server_round` ***到*** `Strategy.evaluate` " +"([#1334](https://github.com/adap/flower/pull/1334))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:434 +#: ../../source/ref-changelog.md:972 msgid "" -"In federated learning, the server sends the global model parameters to " -"the client, and the client updates the local model with the parameters " -"received from the server. It then trains the model on the local data " -"(which changes the model parameters locally) and sends the " -"updated/changed model parameters back to the server (or, alternatively, " -"it sends just the gradients back to the server, not the full model " -"parameters)." -msgstr "在联邦学习中,服务器将全局模型参数发送给客户端,客户端根据从服务器接收到的参数更新本地模型。然后,客户端根据本地数据对模型进行训练(在本地更改模型参数),并将更新/更改后的模型参数发回服务器(或者,客户端只将梯度参数发回服务器,而不是全部模型参数)。" +"The `Strategy` method `evaluate` now receives the current round of " +"federated learning/evaluation as the first parameter." +msgstr "`Strategy`的`evaluate` 方法现在会接收当前一轮联邦学习/评估作为第一个参数。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:436 +#: ../../source/ref-changelog.md:974 msgid "" -"We need two helper functions to update the local model with parameters " -"received from the server and to get the updated model parameters from the" -" local model: ``set_parameters`` and ``get_parameters``. The following " -"two functions do just that for the PyTorch model above." +"**Add** `server_round` **and** `config` **parameters to** `evaluate_fn` " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -"我们需要两个辅助函数,用从服务器接收到的参数更新本地模型,并从本地模型获取更新后的模型参数:`` " -"set_parameters```和`get_parameters``。下面两个函数就是为上面的 PyTorch 模型做这些工作的。" +"**将*** `server_round` **和*** `config` **参数添加到*** `evaluate_fn` " +"([#1334](https://github.com/adap/flower/pull/1334))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:438 +#: ../../source/ref-changelog.md:976 msgid "" -"The details of how this works are not really important here (feel free to" -" consult the PyTorch documentation if you want to learn more). In " -"essence, we use ``state_dict`` to access PyTorch model parameter tensors." -" The parameter tensors are then converted to/from a list of NumPy " -"ndarray's (which Flower knows how to serialize/deserialize):" +"The `evaluate_fn` passed to built-in strategies like `FedAvg` now takes " +"three parameters: (1) The current round of federated learning/evaluation " +"(`server_round`), (2) the model parameters to evaluate (`parameters`), " +"and (3) a config dictionary (`config`)." msgstr "" -"在这里,如何工作的细节并不重要(如果你想了解更多,请随时查阅 PyTorch 文档)。本质上,我们使用 ``state_dict`` 访问 " -"PyTorch 模型参数张量。然后,参数张量会被转换成/转换成 NumPy ndarray 列表(Flower 知道如何序列化/反序列化):" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:466 -msgid "Implementing a Flower client" -msgstr "实现 Flower 客户端" +"传递给内置策略(如 `FedAvg`)的 `evaluate_fn` 现在需要三个参数:(1) 当前一轮联邦学习/评估 " +"(`server_round`),(2) 要评估的模型参数 (`parameters`),(3) 配置字典 (`config`)。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:468 +#: ../../source/ref-changelog.md:978 msgid "" -"With that out of the way, let's move on to the interesting part. " -"Federated learning systems consist of a server and multiple clients. In " -"Flower, we create clients by implementing subclasses of " -"``flwr.client.Client`` or ``flwr.client.NumPyClient``. We use " -"``NumPyClient`` in this tutorial because it is easier to implement and " -"requires us to write less boilerplate." +"**Rename** `rnd` **to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" msgstr "" -"说完这些,让我们进入有趣的部分。联邦学习系统由一个服务器和多个客户端组成。在 Flower 中,我们通过实现 " -"``flwr.client.Client`` 或 ``flwr.client.NumPyClient`` " -"的子类来创建客户端。在本教程中,我们使用``NumPyClient``,因为它更容易实现,需要我们编写的模板也更少。" +"**重新命名** `rnd` ** to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:470 +#: ../../source/ref-changelog.md:980 msgid "" -"To implement the Flower client, we create a subclass of " -"``flwr.client.NumPyClient`` and implement the three methods " -"``get_parameters``, ``fit``, and ``evaluate``:" +"Several Flower methods and functions (`evaluate_fn`, `configure_fit`, " +"`aggregate_fit`, `configure_evaluate`, `aggregate_evaluate`) receive the " +"current round of federated learning/evaluation as their first parameter. " +"To improve reaability and avoid confusion with *random*, this parameter " +"has been renamed from `rnd` to `server_round`." msgstr "" -"为实现 Flower 客户端,我们创建了 ``flwr.client.NumPyClient`` 的子类,并实现了 " -"``get_parameters``、``fit`` 和``evaluate`` 三个方法:" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:472 -msgid "``get_parameters``: Return the current local model parameters" -msgstr "``get_parameters``: 返回当前本地模型参数" +"几个 Flower " +"方法和函数(`evaluate_fn`、`configure_fit`、`aggregate_fit`、`configure_evaluate`、`aggregate_evaluate`)的第一个参数是当前一轮的联邦学习/评估。为提高可重复性并避免与" +" *random* 混淆,该参数已从 `rnd` 更名为 `server_round`。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:473 +#: ../../source/ref-changelog.md:982 msgid "" -"``fit``: Receive model parameters from the server, train the model " -"parameters on the local data, and return the (updated) model parameters " -"to the server" -msgstr "``fit``: 从服务器接收模型参数,在本地数据上训练模型参数,并将(更新的)模型参数返回服务器" +"**Move** `flwr.dataset` **to** `flwr_baselines` " +"([#1273](https://github.com/adap/flower/pull/1273))" +msgstr "" +"**移动*** `flwr.dataset` **到*** `flwr_baselines` " +"([#1273](https://github.com/adap/flower/pull/1273))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:474 -msgid "" -"``evaluate``: Receive model parameters from the server, evaluate the " -"model parameters on the local data, and return the evaluation result to " -"the server" -msgstr "``evaluate ``: 从服务器接收模型参数,在本地数据上评估模型参数,并将评估结果返回服务器" +#: ../../source/ref-changelog.md:984 +msgid "The experimental package `flwr.dataset` was migrated to Flower Baselines." +msgstr "实验软件包 `flwr.dataset` 已迁移至 Flower Baselines。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:476 +#: ../../source/ref-changelog.md:986 msgid "" -"We mentioned that our clients will use the previously defined PyTorch " -"components for model training and evaluation. Let's see a simple Flower " -"client implementation that brings everything together:" -msgstr "" -"我们提到,我们的客户端将使用之前定义的 PyTorch 组件进行模型训练和评估。让我们来看看一个简单的 Flower " -"客户端实现,它将一切都整合在一起:" +"**Remove experimental strategies** " +"([#1280](https://github.com/adap/flower/pull/1280))" +msgstr "**删除实验策略** ([#1280](https://github.com/adap/flower/pull/1280))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:513 +#: ../../source/ref-changelog.md:988 msgid "" -"Our class ``FlowerClient`` defines how local training/evaluation will be " -"performed and allows Flower to call the local training/evaluation through" -" ``fit`` and ``evaluate``. Each instance of ``FlowerClient`` represents a" -" *single client* in our federated learning system. Federated learning " -"systems have multiple clients (otherwise, there's not much to federate), " -"so each client will be represented by its own instance of " -"``FlowerClient``. If we have, for example, three clients in our workload," -" then we'd have three instances of ``FlowerClient``. Flower calls " -"``FlowerClient.fit`` on the respective instance when the server selects a" -" particular client for training (and ``FlowerClient.evaluate`` for " -"evaluation)." -msgstr "" -"我们的类 ``FlowerClient`` 定义了本地训练/评估的执行方式,并允许 Flower 通过 ``fit`` 和 " -"``evaluate`` 调用本地训练/评估。每个 ``FlowerClient`` " -"实例都代表联邦学习系统中的*单个客户端*。联邦学习系统有多个客户端(否则就没有什么可联邦的),因此每个客户端都将由自己的 " -"``FlowerClient`` 实例来代表。例如,如果我们的工作负载中有三个客户端,那么我们就会有三个 ``FlowerClient`` " -"实例。当服务器选择特定客户端进行训练时,Flower 会调用相应实例上的 ``FlowerClient.fit`` (评估时调用 " -"``FlowerClient.evaluate``)。" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:517 -msgid "Using the Virtual Client Engine" -msgstr "使用虚拟客户端引擎" +"Remove unmaintained experimental strategies (`FastAndSlow`, `FedFSv0`, " +"`FedFSv1`)." +msgstr "移除未维护的试验性策略(`FastAndSlow`、`FedFSv0`、`FedFSv1`)。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:519 +#: ../../source/ref-changelog.md:990 msgid "" -"In this notebook, we want to simulate a federated learning system with 10" -" clients on a single machine. This means that the server and all 10 " -"clients will live on a single machine and share resources such as CPU, " -"GPU, and memory. Having 10 clients would mean having 10 instances of " -"``FlowerClient`` in memory. Doing this on a single machine can quickly " -"exhaust the available memory resources, even if only a subset of these " -"clients participates in a single round of federated learning." +"**Rename** `Weights` **to** `NDArrays` " +"([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -"在本笔记中,我们要模拟一个联邦学习系统,在一台机器上有 10 个客户端。这意味着服务器和所有 10 个客户端都将位于一台机器上,并共享 " -"CPU、GPU 和内存等资源。有 10 个客户端就意味着内存中有 10 个 ``FlowerClient`` " -"实例。在单台机器上这样做会很快耗尽可用的内存资源,即使这些客户端中只有一个子集参与了一轮联邦学习。" +"**重新命名** `Weights` **到** `NDArrays` " +"([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:521 +#: ../../source/ref-changelog.md:992 msgid "" -"In addition to the regular capabilities where server and clients run on " -"multiple machines, Flower, therefore, provides special simulation " -"capabilities that create ``FlowerClient`` instances only when they are " -"actually necessary for training or evaluation. To enable the Flower " -"framework to create clients when necessary, we need to implement a " -"function called ``client_fn`` that creates a ``FlowerClient`` instance on" -" demand. Flower calls ``client_fn`` whenever it needs an instance of one " -"particular client to call ``fit`` or ``evaluate`` (those instances are " -"usually discarded after use, so they should not keep any local state). " -"Clients are identified by a client ID, or short ``cid``. The ``cid`` can " -"be used, for example, to load different local data partitions for " -"different clients, as can be seen below:" -msgstr "" -"除了服务器和客户端在多台机器上运行的常规功能外,Flower 还提供了特殊的模拟功能,即只有在训练或评估实际需要时才创建 " -"``FlowerClient`` 实例。为了让 Flower 框架能在必要时创建客户端,我们需要实现一个名为 ``client_fn`` " -"的函数,它能按需创建一个 ``FlowerClient`` 实例。每当 Flower 需要一个特定的客户端实例来调用 ``fit`` 或 " -"``evaluate`` 时,它就会调用 " -"``client_fn``(这些实例在使用后通常会被丢弃,因此它们不应保留任何本地状态)。客户端由一个客户端 ID 或简短的 ``cid`` " -"标识。例如,可以使用 ``cid`` 为不同的客户端加载不同的本地数据分区,如下所示:" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:556 -msgid "Starting the training" -msgstr "开始训练" +"`flwr.common.Weights` was renamed to `flwr.common.NDArrays` to better " +"capture what this type is all about." +msgstr "flwr.common.Weights \"更名为 \"flwr.common.NDArrays\",以更好地反映该类型的含义。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:558 +#: ../../source/ref-changelog.md:994 msgid "" -"We now have the class ``FlowerClient`` which defines client-side " -"training/evaluation and ``client_fn`` which allows Flower to create " -"``FlowerClient`` instances whenever it needs to call ``fit`` or " -"``evaluate`` on one particular client. The last step is to start the " -"actual simulation using ``flwr.simulation.start_simulation``." +"**Remove antiquated** `force_final_distributed_eval` **from** " +"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -"现在我们有了定义客户端训练/评估的类 ``FlowerClient`` 和允许 Flower 在需要调用某个客户端的 ``fit` 或 " -"``evaluate` 时创建 ``FlowerClient`` 实例的 ``client_fn` 类。最后一步是使用 " -"``flwr.simulation.start_simulation`` 启动实际模拟。" +"**从** `start_server` 中移除过时的** `force_final_distributed_eval` " +"([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:560 +#: ../../source/ref-changelog.md:996 msgid "" -"The function ``start_simulation`` accepts a number of arguments, amongst " -"them the ``client_fn`` used to create ``FlowerClient`` instances, the " -"number of clients to simulate (``num_clients``), the number of federated " -"learning rounds (``num_rounds``), and the strategy. The strategy " -"encapsulates the federated learning approach/algorithm, for example, " -"*Federated Averaging* (FedAvg)." +"The `start_server` parameter `force_final_distributed_eval` has long been" +" a historic artefact, in this release it is finally gone for good." msgstr "" -"函数 ``start_simulation`` 接受许多参数,其中包括用于创建 ``FlowerClient`` 实例的 " -"``client_fn``、要模拟的客户端数量(``num_clients``)、联邦学习轮数(``num_rounds``)和策略。策略封装了联邦学习方法/算法,例如*联邦平均*" -" (FedAvg)。" +"start_server \"参数 \"force_final_distributed_eval " +"\"长期以来一直是个历史遗留问题,在此版本中终于永远消失了。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:562 +#: ../../source/ref-changelog.md:998 msgid "" -"Flower has a number of built-in strategies, but we can also use our own " -"strategy implementations to customize nearly all aspects of the federated" -" learning approach. For this example, we use the built-in ``FedAvg`` " -"implementation and customize it using a few basic parameters. The last " -"step is the actual call to ``start_simulation`` which - you guessed it - " -"starts the simulation:" +"**Make** `get_parameters` **configurable** " +"([#1242](https://github.com/adap/flower/pull/1242))" msgstr "" -"Flower 有许多内置策略,但我们也可以使用自己的策略实现来定制联邦学习方法的几乎所有方面。在本例中,我们使用内置的 ``FedAvg`` " -"实现,并使用一些基本参数对其进行定制。最后一步是实际调用 ``start_simulation``开始模拟:" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:608 -msgid "Behind the scenes" -msgstr "幕后" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:610 -msgid "So how does this work? How does Flower execute this simulation?" -msgstr "那么它是如何工作的呢?Flower 如何进行模拟?" +"**使** `get_parameters` **可配置** " +"([#1242](https://github.com/adap/flower/pull/1242))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:612 -#, python-format +#: ../../source/ref-changelog.md:1000 msgid "" -"When we call ``start_simulation``, we tell Flower that there are 10 " -"clients (``num_clients=10``). Flower then goes ahead an asks the " -"``FedAvg`` strategy to select clients. ``FedAvg`` knows that it should " -"select 100% of the available clients (``fraction_fit=1.0``), so it goes " -"ahead and selects 10 random clients (i.e., 100% of 10)." +"The `get_parameters` method now accepts a configuration dictionary, just " +"like `get_properties`, `fit`, and `evaluate`." msgstr "" -"当我们调用 ``start_simulation`` 时,我们会告诉 Flower 有 10 " -"个客户(`num_clients=10``)。然后,Flower 会要求 ``FedAvg`` 策略选择客户。``FedAvg`` 知道它应该选择" -" 100%的可用客户(``fraction_fit=1.0``),所以它会随机选择 10 个客户(即 10 的 100%)。" +"现在,\"get_parameters \"方法与 \"get_properties\"、\"fit \"和 \"evaluate " +"\"一样,都接受配置字典。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:614 +#: ../../source/ref-changelog.md:1002 msgid "" -"Flower then asks the selected 10 clients to train the model. When the " -"server receives the model parameter updates from the clients, it hands " -"those updates over to the strategy (*FedAvg*) for aggregation. The " -"strategy aggregates those updates and returns the new global model, which" -" then gets used in the next round of federated learning." +"**Replace** `num_rounds` **in** `start_simulation` **with new** `config` " +"**parameter** ([#1281](https://github.com/adap/flower/pull/1281))" msgstr "" -"然后,Flower 会要求选定的 10 " -"个客户端对模型进行训练。服务器收到客户端的模型参数更新后,会将这些更新交给策略(*FedAvg*)进行聚合。策略会聚合这些更新并返回新的全局模型,然后将其用于下一轮联邦学习。" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:626 -msgid "Where's the accuracy?" -msgstr "准确度在哪里找?" +"**用新的** `config` 参数** 替换** `num_rounds` ** in** `start_simulation` ** " +"([#1281](https://github.com/adap/flower/pull/1281))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:628 +#: ../../source/ref-changelog.md:1004 msgid "" -"You may have noticed that all metrics except for ``losses_distributed`` " -"are empty. Where did the ``{\"accuracy\": float(accuracy)}`` go?" +"The `start_simulation` function now accepts a configuration dictionary " +"`config` instead of the `num_rounds` integer. This improves the " +"consistency between `start_simulation` and `start_server` and makes " +"transitioning between the two easier." msgstr "" -"您可能已经注意到,除了 ``losses_distributed`` 以外,所有指标都是空的。{\"准确度\": " -"float(准确度)}``去哪儿了?" +"现在,`start_simulation`(开始模拟)` 函数接受配置字典 `config` 而不是 `num_rounds` 整数。这改进了 " +"`start_simulation` 和 `start_server` 之间的一致性,并使两者之间的转换更容易。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:630 +#: ../../source/ref-changelog.md:1008 msgid "" -"Flower can automatically aggregate losses returned by individual clients," -" but it cannot do the same for metrics in the generic metrics dictionary " -"(the one with the ``accuracy`` key). Metrics dictionaries can contain " -"very different kinds of metrics and even key/value pairs that are not " -"metrics at all, so the framework does not (and can not) know how to " -"handle these automatically." -msgstr "" -"Flower 可以自动汇总单个客户端返回的损失值,但无法对通用度量字典中的度量进行同样的处理(即带有 \"准确度 " -"\"键的度量字典)。度量值字典可以包含非常不同种类的度量值,甚至包含根本不是度量值的键/值对,因此框架不知道(也无法知道)如何自动处理这些度量值。" +"**Support Python 3.10** " +"([#1320](https://github.com/adap/flower/pull/1320))" +msgstr "** 支持 Python 3.10** ([#1320](https://github.com/adap/flower/pull/1320))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:632 +#: ../../source/ref-changelog.md:1010 msgid "" -"As users, we need to tell the framework how to handle/aggregate these " -"custom metrics, and we do so by passing metric aggregation functions to " -"the strategy. The strategy will then call these functions whenever it " -"receives fit or evaluate metrics from clients. The two possible functions" -" are ``fit_metrics_aggregation_fn`` and " -"``evaluate_metrics_aggregation_fn``." -msgstr "" -"作为用户,我们需要告诉框架如何处理/聚合这些自定义指标,为此,我们将指标聚合函数传递给策略。然后,只要从客户端接收到拟合或评估指标,策略就会调用这些函数。两个可能的函数是" -" ``fit_metrics_aggregation_fn`` 和 ``evaluate_metrics_aggregation_fn``。" +"The previous Flower release introduced experimental support for Python " +"3.10, this release declares Python 3.10 support as stable." +msgstr "上一个 Flower 版本引入了对 Python 3.10 的实验支持,而本版本则宣布对 Python 3.10 的支持为稳定支持。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:634 +#: ../../source/ref-changelog.md:1012 msgid "" -"Let's create a simple weighted averaging function to aggregate the " -"``accuracy`` metric we return from ``evaluate``:" -msgstr "让我们创建一个简单的加权平均函数来汇总从 ``evaluate`` 返回的 ``accuracy`` 指标:" +"**Make all** `Client` **and** `NumPyClient` **methods optional** " +"([#1260](https://github.com/adap/flower/pull/1260), " +"[#1277](https://github.com/adap/flower/pull/1277))" +msgstr "" +"**使所有** `Client` **和** `NumPyClient` **方法成为可选** " +"([#1260](https://github.com/adap/flower/pull/1260), " +"[#1277](https://github.com/adap/flower/pull/1277))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:660 +#: ../../source/ref-changelog.md:1014 msgid "" -"The only thing left to do is to tell the strategy to call this function " -"whenever it receives evaluation metric dictionaries from the clients:" -msgstr "剩下要做的就是告诉策略,每当它从客户端接收到评估度量字典时,都要调用这个函数:" +"The `Client`/`NumPyClient` methods `get_properties`, `get_parameters`, " +"`fit`, and `evaluate` are all optional. This enables writing clients that" +" implement, for example, only `fit`, but no other method. No need to " +"implement `evaluate` when using centralized evaluation!" +msgstr "" +"`Client`/`NumPyClient`的 \"get_properties\"、\"get_parameters\"、\"fit \"和 " +"\"evaluate \"方法都是可选的。这样就可以编写只实现 `fit` 而不实现其他方法的客户端。使用集中评估时,无需实现 " +"`evaluate`!" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:697 +#: ../../source/ref-changelog.md:1016 msgid "" -"We now have a full system that performs federated training and federated " -"evaluation. It uses the ``weighted_average`` function to aggregate custom" -" evaluation metrics and calculates a single ``accuracy`` metric across " -"all clients on the server side." +"**Enable passing a** `Server` **instance to** `start_simulation` " +"([#1281](https://github.com/adap/flower/pull/1281))" msgstr "" -"我们现在有了一个完整的系统,可以执行联邦训练和联邦评估。它使用 ``weighted_average`` " -"函数汇总自定义评估指标,并在服务器端计算所有客户端的单一 ``accuracy`` 指标。" +"**启用向** `start_simulation` 传递** `Server` 实例 " +"([#1281](https://github.com/adap/flower/pull/1281))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:699 +#: ../../source/ref-changelog.md:1018 msgid "" -"The other two categories of metrics (``losses_centralized`` and " -"``metrics_centralized``) are still empty because they only apply when " -"centralized evaluation is being used. Part two of the Flower tutorial " -"will cover centralized evaluation." +"Similar to `start_server`, `start_simulation` now accepts a full `Server`" +" instance. This enables users to heavily customize the execution of " +"eperiments and opens the door to running, for example, async FL using the" +" Virtual Client Engine." msgstr "" -"其他两类指标(`losses_centralized`` 和 " -"`metrics_centralized`)仍然是空的,因为它们只适用于集中评估。Flower 教程的第二部分将介绍集中式评估。" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:711 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:351 -msgid "Final remarks" -msgstr "结束语" +"与 `start_server` 类似,`start_simulation` 现在也接受一个完整的 `Server` " +"实例。这使得用户可以对实验的执行进行大量自定义,并为使用虚拟客户端引擎运行异步 FL 等打开了大门。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:713 +#: ../../source/ref-changelog.md:1020 msgid "" -"Congratulations, you just trained a convolutional neural network, " -"federated over 10 clients! With that, you understand the basics of " -"federated learning with Flower. The same approach you've seen can be used" -" with other machine learning frameworks (not just PyTorch) and tasks (not" -" just CIFAR-10 images classification), for example NLP with Hugging Face " -"Transformers or speech with SpeechBrain." +"**Update code examples** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" msgstr "" -"恭喜您,你刚刚训练了一个由 10 个客户端组成的卷积神经网络!这样,你就了解了使用 Flower " -"进行联邦学习的基础知识。你所看到的方法同样适用于其他机器学习框架(不只是 PyTorch)和任务(不只是 CIFAR-10 图像分类),例如使用 " -"Hugging Face Transformers 的 NLP 或使用 SpeechBrain 的语音。" +"**更新代码示例** ([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:715 +#: ../../source/ref-changelog.md:1022 msgid "" -"In the next notebook, we're going to cover some more advanced concepts. " -"Want to customize your strategy? Initialize parameters on the server " -"side? Or evaluate the aggregated model on the server side? We'll cover " -"all this and more in the next tutorial." -msgstr "在下一个笔记中,我们将介绍一些更先进的概念。想定制你的策略吗?在服务器端初始化参数?或者在服务器端评估聚合模型?我们将在下一个教程中介绍所有这些内容以及更多。" +"Many code examples received small or even large maintenance updates, " +"among them are" +msgstr "许多代码示例都进行了小规模甚至大规模的维护更新,其中包括" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:733 -msgid "" -"The `Flower Federated Learning Tutorial - Part 2 " -"`__ goes into more depth about strategies and all " -"the advanced things you can build with them." -msgstr "" -"`Flower 联邦学习教程 - 第 2 部分 `__ 更深入地介绍了策略以及可以使用策略构建的所有高级功能。" +#: ../../source/ref-changelog.md:1024 +msgid "`scikit-learn`" +msgstr "`scikit-learn`" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:9 -msgid "Use a federated learning strategy" -msgstr "使用联邦学习策略" +#: ../../source/ref-changelog.md:1025 +msgid "`simulation_pytorch`" +msgstr "`simulation_pytorch`" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:11 -msgid "" -"Welcome to the next part of the federated learning tutorial. In previous " -"parts of this tutorial, we introduced federated learning with PyTorch and" -" Flower (`part 1 `__)." -msgstr "" -"欢迎来到联邦学习教程的下一部分。在本教程的前几部分,我们介绍了使用 PyTorch 和 Flower 进行联邦学习(`第 1 部分 " -"`___)。" +#: ../../source/ref-changelog.md:1026 +msgid "`quickstart_pytorch`" +msgstr "`quickstart_pytorch`" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:13 -msgid "" -"In this notebook, we'll begin to customize the federated learning system " -"we built in the introductory notebook (again, using `Flower " -"`__ and `PyTorch `__)." -msgstr "" -"在本笔记中,我们将开始定制在入门笔记中构建的联邦学习系统(再次使用 `Flower `__ 和 " -"`PyTorch `__)。" +#: ../../source/ref-changelog.md:1027 +msgid "`quickstart_simulation`" +msgstr "`quickstart_simulation`" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:17 -msgid "Let's move beyond FedAvg with Flower strategies!" -msgstr "让我们超越 FedAvg,采用Flower策略!" +#: ../../source/ref-changelog.md:1028 +msgid "`quickstart_tensorflow`" +msgstr "`quickstart_tensorflow`" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:309 -msgid "Strategy customization" -msgstr "策略定制" +#: ../../source/ref-changelog.md:1029 +msgid "`advanced_tensorflow`" +msgstr "`advanced_tensorflow`" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:311 +#: ../../source/ref-changelog.md:1031 msgid "" -"So far, everything should look familiar if you've worked through the " -"introductory notebook. With that, we're ready to introduce a number of " -"new features." -msgstr "到目前为止,如果您已经阅读过入门笔记本,那么一切都应该很熟悉了。接下来,我们将介绍一些新功能。" - -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:323 -msgid "Server-side parameter **initialization**" -msgstr "服务器端参数 **初始化**" +"**Remove the obsolete simulation example** " +"([#1328](https://github.com/adap/flower/pull/1328))" +msgstr "**删除过时的模拟示例** ([#1328](https://github.com/adap/flower/pull/1328))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:325 +#: ../../source/ref-changelog.md:1033 msgid "" -"Flower, by default, initializes the global model by asking one random " -"client for the initial parameters. In many cases, we want more control " -"over parameter initialization though. Flower therefore allows you to " -"directly pass the initial parameters to the Strategy:" +"Removes the obsolete `simulation` example and renames " +"`quickstart_simulation` to `simulation_tensorflow` so it fits withs the " +"naming of `simulation_pytorch`" msgstr "" -"默认情况下,Flower 会通过向一个随机客户端询问初始参数来初始化全局模型。但在许多情况下,我们需要对参数初始化进行更多控制。因此,Flower" -" 允许您直接将初始参数传递给策略:" +"删除过时的 \"simulation \"示例,并将 \"quickstart_simulation \"重命名为 " +"\"simulation_tensorflow\",使其与 \"simulation_pytorch \"的命名一致" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:370 +#: ../../source/ref-changelog.md:1035 msgid "" -"Passing ``initial_parameters`` to the ``FedAvg`` strategy prevents Flower" -" from asking one of the clients for the initial parameters. If we look " -"closely, we can see that the logs do not show any calls to the " -"``FlowerClient.get_parameters`` method." +"**Update documentation** " +"([#1223](https://github.com/adap/flower/pull/1223), " +"[#1209](https://github.com/adap/flower/pull/1209), " +"[#1251](https://github.com/adap/flower/pull/1251), " +"[#1257](https://github.com/adap/flower/pull/1257), " +"[#1267](https://github.com/adap/flower/pull/1267), " +"[#1268](https://github.com/adap/flower/pull/1268), " +"[#1300](https://github.com/adap/flower/pull/1300), " +"[#1304](https://github.com/adap/flower/pull/1304), " +"[#1305](https://github.com/adap/flower/pull/1305), " +"[#1307](https://github.com/adap/flower/pull/1307))" msgstr "" -"向 ``FedAvg`` 策略传递 ``initial_parameters`` 可以防止 Flower " -"向其中一个客户端询问初始参数。如果我们仔细观察,就会发现日志中没有显示对 ``FlowerClient.get_parameters`` " -"方法的任何调用。" - -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:382 -msgid "Starting with a customized strategy" -msgstr "从定制战略开始" +"**更新文档** ([#1223](https://github.com/adap/flower/pull/1223), " +"[#1209](https://github.com/adap/flower/pull/1209), " +"[#1251](https://github.com/adap/flower/pull/1251), " +"[#1257](https://github.com/adap/flower/pull/1257), " +"[#1267](https://github.com/adap/flower/pull/1267), " +"[#1268](https://github.com/adap/flower/pull/1268), " +"[#1300](https://github.com/adap/flower/pull/1300), " +"[#1304](https://github.com/adap/flower/pull/1304), " +"[#1305](https://github.com/adap/flower/pull/1305), " +"[#1307](https://github.com/adap/flower/pull/1307))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:384 +#: ../../source/ref-changelog.md:1037 msgid "" -"We've seen the function ``start_simulation`` before. It accepts a number " -"of arguments, amongst them the ``client_fn`` used to create " -"``FlowerClient`` instances, the number of clients to simulate " -"``num_clients``, the number of rounds ``num_rounds``, and the strategy." +"One substantial documentation update fixes multiple smaller rendering " +"issues, makes titles more succinct to improve navigation, removes a " +"deprecated library, updates documentation dependencies, includes the " +"`flwr.common` module in the API reference, includes support for markdown-" +"based documentation, migrates the changelog from `.rst` to `.md`, and " +"fixes a number of smaller details!" msgstr "" -"我们以前见过函数 ``start_simulation``。它接受许多参数,其中包括用于创建 ``FlowerClient`` 实例的 " -"``client_fn``、要模拟的客户数量 ``num_clients``、回合数 ``num_rounds``和策略。" - -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:386 -msgid "" -"The strategy encapsulates the federated learning approach/algorithm, for " -"example, ``FedAvg`` or ``FedAdagrad``. Let's try to use a different " -"strategy this time:" -msgstr "该策略封装了联邦学习方法/算法,例如`FedAvg``或`FedAdagrad``。这次让我们尝试使用不同的策略:" +"其中一个实质性的文档更新修复了多个较小的渲染问题,使标题更加简洁以改善导航,删除了一个已废弃的库,更新了文档依赖关系,在 API 参考中包含了 " +"`flwr.common` 模块,包含了对基于 markdown 的文档的支持,将更新日志从 `.rst` 移植到了 " +"`.md`,并修复了一些较小的细节!" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:424 -msgid "Server-side parameter **evaluation**" -msgstr "服务器端参数**评估**" +#: ../../source/ref-changelog.md:1039 ../../source/ref-changelog.md:1094 +#: ../../source/ref-changelog.md:1163 ../../source/ref-changelog.md:1202 +msgid "**Minor updates**" +msgstr "**小规模更新**" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:426 +#: ../../source/ref-changelog.md:1041 msgid "" -"Flower can evaluate the aggregated model on the server-side or on the " -"client-side. Client-side and server-side evaluation are similar in some " -"ways, but different in others." -msgstr "Flower 可以在服务器端或客户端评估聚合模型。客户端和服务器端评估在某些方面相似,但也有不同之处。" +"Add round number to fit and evaluate log messages " +"([#1266](https://github.com/adap/flower/pull/1266))" +msgstr "添加四舍五入数字,以适应和评估日志信息([#1266](https://github.com/adap/flower/pull/1266))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:428 +#: ../../source/ref-changelog.md:1042 msgid "" -"**Centralized Evaluation** (or *server-side evaluation*) is conceptually " -"simple: it works the same way that evaluation in centralized machine " -"learning does. If there is a server-side dataset that can be used for " -"evaluation purposes, then that's great. We can evaluate the newly " -"aggregated model after each round of training without having to send the " -"model to clients. We're also fortunate in the sense that our entire " -"evaluation dataset is available at all times." -msgstr "**集中评估**(或*服务器端评估*)在概念上很简单:它的工作方式与集中式机器学习中的评估方式相同。如果有一个服务器端数据集可用于评估目的,那就太好了。我们可以在每一轮训练后对新聚合的模型进行评估,而无需将模型发送给客户端。我们也很幸运,因为我们的整个评估数据集随时可用。" +"Add secure gRPC connection to the `advanced_tensorflow` code example " +"([#847](https://github.com/adap/flower/pull/847))" +msgstr "" +"为 `advanced_tensorflow` 代码示例添加安全 gRPC 连接 " +"([#847](https://github.com/adap/flower/pull/847))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:430 +#: ../../source/ref-changelog.md:1043 msgid "" -"**Federated Evaluation** (or *client-side evaluation*) is more complex, " -"but also more powerful: it doesn't require a centralized dataset and " -"allows us to evaluate models over a larger set of data, which often " -"yields more realistic evaluation results. In fact, many scenarios require" -" us to use **Federated Evaluation** if we want to get representative " -"evaluation results at all. But this power comes at a cost: once we start " -"to evaluate on the client side, we should be aware that our evaluation " -"dataset can change over consecutive rounds of learning if those clients " -"are not always available. Moreover, the dataset held by each client can " -"also change over consecutive rounds. This can lead to evaluation results " -"that are not stable, so even if we would not change the model, we'd see " -"our evaluation results fluctuate over consecutive rounds." -msgstr "**联邦评估**(或*客户端评估*)更为复杂,但也更为强大:它不需要集中的数据集,允许我们在更大的数据集上对模型进行评估,这通常会产生更真实的评估结果。事实上,如果我们想得到有代表性的评估结果,很多情况下都需要使用**联邦评估**。但是,这种能力是有代价的:一旦我们开始在客户端进行评估,我们就应该意识到,如果这些客户端并不总是可用,我们的评估数据集可能会在连续几轮学习中发生变化。此外,每个客户端所拥有的数据集也可能在连续几轮学习中发生变化。这可能会导致评估结果不稳定,因此即使我们不改变模型,也会看到评估结果在连续几轮中波动。" +"Update developer tooling " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" +msgstr "" +"更新开发人员工具([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310)" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:433 +#: ../../source/ref-changelog.md:1044 msgid "" -"We've seen how federated evaluation works on the client side (i.e., by " -"implementing the ``evaluate`` method in ``FlowerClient``). Now let's see " -"how we can evaluate aggregated model parameters on the server-side:" +"Rename ProtoBuf messages to improve consistency " +"([#1214](https://github.com/adap/flower/pull/1214), " +"[#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -"我们已经了解了联邦评估如何在客户端工作(即通过在 ``FlowerClient`` 中实现 ``evaluate`` " -"方法)。现在让我们看看如何在服务器端评估聚合模型参数:" +"重命名 ProtoBuf 消息以提高一致性([#1214](https://github.com/adap/flower/pull/1214), " +"[#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259)" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:490 -msgid "Sending/receiving arbitrary values to/from clients" -msgstr "向/从客户端发送/接收任意值" +#: ../../source/ref-changelog.md:1046 +msgid "v0.19.0 (2022-05-18)" +msgstr "v0.19.0 (2022-05-18)" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:492 +#: ../../source/ref-changelog.md:1050 msgid "" -"In some situations, we want to configure client-side execution (training," -" evaluation) from the server-side. One example for that is the server " -"asking the clients to train for a certain number of local epochs. Flower " -"provides a way to send configuration values from the server to the " -"clients using a dictionary. Let's look at an example where the clients " -"receive values from the server through the ``config`` parameter in " -"``fit`` (``config`` is also available in ``evaluate``). The ``fit`` " -"method receives the configuration dictionary through the ``config`` " -"parameter and can then read values from this dictionary. In this example," -" it reads ``server_round`` and ``local_epochs`` and uses those values to " -"improve the logging and configure the number of local training epochs:" +"**Flower Baselines (preview): FedOpt, FedBN, FedAvgM** " +"([#919](https://github.com/adap/flower/pull/919), " +"[#1127](https://github.com/adap/flower/pull/1127), " +"[#914](https://github.com/adap/flower/pull/914))" msgstr "" -"在某些情况下,我们希望从服务器端配置客户端的执行(训练、评估)。其中一个例子就是服务器要求客户端训练一定数量的本地遍历。Flower " -"提供了一种使用字典从服务器向客户端发送配置值的方法。让我们来看一个例子:客户端通过 ``fit`` 中的 ``config`` " -"参数从服务器接收配置值(``evaluate`` 中也有 ``config`` 参数)。``fit`` 方法通过 ``config`` " -"参数接收配置字典,然后从字典中读取值。在本例中,它读取了 ``server_round`` 和 " -"``local_epochs``,并使用这些值来改进日志记录和配置本地训练遍历的数量:" +"**Flower Baselines(预览): FedOpt、FedBN、FedAvgM** " +"([#919](https://github.com/adap/flower/pull/919), " +"[#1127](https://github.com/adap/flower/pull/1127), " +"[#914](https://github.com/adap/flower/pull/914))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:546 +#: ../../source/ref-changelog.md:1052 +#, fuzzy msgid "" -"So how can we send this config dictionary from server to clients? The " -"built-in Flower Strategies provide way to do this, and it works similarly" -" to the way server-side evaluation works. We provide a function to the " -"strategy, and the strategy calls this function for every round of " -"federated learning:" +"The first preview release of Flower Baselines has arrived! We're " +"kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " +"FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how " +"to use [Flower Baselines](https://flower.ai/docs/using-baselines.html). " +"With this first preview release we're also inviting the community to " +"[contribute their own baselines](https://flower.ai/docs/baselines/how-to-" +"contribute-baselines.html)." msgstr "" -"那么,如何将配置字典从服务器发送到客户端呢?内置的 \"Flower策略\"(Flower " -"Strategies)提供了这样的方法,其工作原理与服务器端评估的工作原理类似。我们为策略提供一个函数,策略会在每一轮联邦学习中调用这个函数:" +"Flower Baselines 的第一个预览版已经发布!我们通过实现 " +"FedOpt(FedYogi、FedAdam、FedAdagrad)、FedBN 和 FedAvgM 来启动 Flower " +"Baselines。请查阅文档了解如何使用 [Flower Baselines](https://flower.ai/docs/using-" +"baselines.html)。在首次发布预览版时,我们还邀请社区成员[贡献自己的Baselines](https://flower.ai/docs" +"/contributing-baselines.html)。" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:576 +#: ../../source/ref-changelog.md:1054 msgid "" -"Next, we'll just pass this function to the FedAvg strategy before " -"starting the simulation:" -msgstr "接下来,我们只需在开始模拟前将此函数传递给 FedAvg 策略即可:" +"**C++ client SDK (preview) and code example** " +"([#1111](https://github.com/adap/flower/pull/1111))" +msgstr "**C++客户端SDK(预览版)和代码示例**([#1111](https://github.com/adap/flower/pull/1111))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:613 +#: ../../source/ref-changelog.md:1056 msgid "" -"As we can see, the client logs now include the current round of federated" -" learning (which they read from the ``config`` dictionary). We can also " -"configure local training to run for one epoch during the first and second" -" round of federated learning, and then for two epochs during the third " -"round." +"Preview support for Flower clients written in C++. The C++ preview " +"includes a Flower client SDK and a quickstart code example that " +"demonstrates a simple C++ client using the SDK." msgstr "" -"我们可以看到,客户端日志现在包含了当前一轮的联邦学习(从 ``config`` " -"字典中读取)。我们还可以将本地训练配置为在第一轮和第二轮联邦学习期间运行一个遍历,然后在第三轮联邦学习期间运行两个遍历。" +"预览版支持用 C++ 编写的 Flower 客户端。C++ 预览版包括一个 Flower 客户端 SDK 和一个快速入门代码示例,使用 SDK " +"演示了一个简单的 C++ 客户端。" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:615 +#: ../../source/ref-changelog.md:1058 msgid "" -"Clients can also return arbitrary values to the server. To do so, they " -"return a dictionary from ``fit`` and/or ``evaluate``. We have seen and " -"used this concept throughout this notebook without mentioning it " -"explicitly: our ``FlowerClient`` returns a dictionary containing a custom" -" key/value pair as the third return value in ``evaluate``." +"**Add experimental support for Python 3.10 and Python 3.11** " +"([#1135](https://github.com/adap/flower/pull/1135))" msgstr "" -"客户端还可以向服务器返回任意值。为此,它们会从 ``fit`` 和/或 ``evaluate`` " -"返回一个字典。我们在本笔记中看到并使用了这一概念,但并未明确提及:我们的 ``FlowerClient`` 返回一个包含自定义键/值对的字典,作为" -" ``evaluate`` 中的第三个返回值。" +"** 增加对 Python 3.10 和 Python 3.11 的实验支持** " +"([#1135](https://github.com/adap/flower/pull/1135))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:627 -msgid "Scaling federated learning" -msgstr "扩大联邦学习的规模" +#: ../../source/ref-changelog.md:1060 +msgid "" +"Python 3.10 is the latest stable release of Python and Python 3.11 is due" +" to be released in October. This Flower release adds experimental support" +" for both Python versions." +msgstr "" +"Python 3.10 是 Python 的最新稳定版本,Python 3.11 将于 10 月份发布。Flower 版本增加了对这两个 " +"Python 版本的实验支持。" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:629 +#: ../../source/ref-changelog.md:1062 msgid "" -"As a last step in this notebook, let's see how we can use Flower to " -"experiment with a large number of clients." -msgstr "作为本笔记的最后一步,让我们看看如何使用 Flower 对大量客户端进行实验。" +"**Aggregate custom metrics through user-provided functions** " +"([#1144](https://github.com/adap/flower/pull/1144))" +msgstr "**通过用户提供的函数聚合自定义指标**([#1144](https://github.com/adap/flower/pull/1144))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:651 -#, python-format +#: ../../source/ref-changelog.md:1064 msgid "" -"We now have 1000 partitions, each holding 45 training and 5 validation " -"examples. Given that the number of training examples on each client is " -"quite small, we should probably train the model a bit longer, so we " -"configure the clients to perform 3 local training epochs. We should also " -"adjust the fraction of clients selected for training during each round " -"(we don't want all 1000 clients participating in every round), so we " -"adjust ``fraction_fit`` to ``0.05``, which means that only 5% of " -"available clients (so 50 clients) will be selected for training each " -"round:" +"Custom metrics (e.g., `accuracy`) can now be aggregated without having to" +" customize the strategy. Built-in strategies support two new arguments, " +"`fit_metrics_aggregation_fn` and `evaluate_metrics_aggregation_fn`, that " +"allow passing custom metric aggregation functions." msgstr "" -"现在我们有 1000 个分区,每个分区有 45 个训练数据和 5 " -"个验证数据。鉴于每个客户端上的训练示例数量较少,我们可能需要对模型进行更长时间的训练,因此我们将客户端配置为执行 3 " -"个本地训练遍历。我们还应该调整每轮训练中被选中的客户端的比例(我们不希望每轮训练都有 1000 个客户端参与),因此我们将 " -"``fraction_fit`` 调整为 ``0.05``,这意味着每轮训练只选中 5%的可用客户端(即 50 个客户端):" +"现在无需定制策略即可聚合自定义度量(如`准确度`)。内置策略支持两个新参数:`fit_metrics_aggregation_fn` " +"和`evaluate_metrics_aggregation_fn`,允许传递自定义度量聚合函数。" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:699 +#: ../../source/ref-changelog.md:1066 msgid "" -"In this notebook, we've seen how we can gradually enhance our system by " -"customizing the strategy, initializing parameters on the server side, " -"choosing a different strategy, and evaluating models on the server-side. " -"That's quite a bit of flexibility with so little code, right?" -msgstr "在本笔记中,我们看到了如何通过自定义策略、在服务器端初始化参数、选择不同的策略以及在服务器端评估模型来逐步增强我们的系统。用这么少的代码就能实现这么大的灵活性,不是吗?" +"**User-configurable round timeout** " +"([#1162](https://github.com/adap/flower/pull/1162))" +msgstr "**用户可配置的回合超时**([#1162](https://github.com/adap/flower/pull/1162))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:701 +#: ../../source/ref-changelog.md:1068 msgid "" -"In the later sections, we've seen how we can communicate arbitrary values" -" between server and clients to fully customize client-side execution. " -"With that capability, we built a large-scale Federated Learning " -"simulation using the Flower Virtual Client Engine and ran an experiment " -"involving 1000 clients in the same workload - all in a Jupyter Notebook!" +"A new configuration value allows the round timeout to be set for " +"`start_server` and `start_simulation`. If the `config` dictionary " +"contains a `round_timeout` key (with a `float` value in seconds), the " +"server will wait *at least* `round_timeout` seconds before it closes the " +"connection." msgstr "" -"在后面的章节中,我们将看到如何在服务器和客户端之间传递任意值,以完全自定义客户端执行。有了这种能力,我们使用 Flower " -"虚拟客户端引擎构建了一个大规模的联邦学习模拟,并在 Jupyter Notebook 中进行了一次实验,在相同的工作负载中运行了 1000 " -"个客户端!" +"新的配置值允许为 `start_server` 和 `start_simulation` 设置回合超时。如果 `config` 字典中包含一个 " +"`round_timeout` 键(以秒为单位的 `float`值),服务器将至少等待 ** `round_timeout` 秒后才关闭连接。" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:719 +#: ../../source/ref-changelog.md:1070 msgid "" -"The `Flower Federated Learning Tutorial - Part 3 " -"`__ shows how to build a fully custom ``Strategy`` from " -"scratch." +"**Enable both federated evaluation and centralized evaluation to be used " +"at the same time in all built-in strategies** " +"([#1091](https://github.com/adap/flower/pull/1091))" msgstr "" -"`Flower 联邦学习教程 - 第 3 部分 `__ 展示了如何从头开始构建完全自定义的 \"策略\"。" +"**允许在所有内置策略中同时使用联邦评价和集中评估** " +"([#1091](https://github.com/adap/flower/pull/1091))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:9 -msgid "What is Federated Learning?" -msgstr "什么是联邦学习?" +#: ../../source/ref-changelog.md:1072 +msgid "" +"Built-in strategies can now perform both federated evaluation (i.e., " +"client-side) and centralized evaluation (i.e., server-side) in the same " +"round. Federated evaluation can be disabled by setting `fraction_eval` to" +" `0.0`." +msgstr "" +"内置策略现在可以在同一轮中同时执行联邦评估(即客户端)和集中评估(即服务器端)。可以通过将 `fraction_eval` 设置为 " +"`0.0`来禁用联邦评估。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:13 +#: ../../source/ref-changelog.md:1074 msgid "" -"In this tutorial, you will learn what federated learning is, build your " -"first system in Flower, and gradually extend it. If you work through all " -"parts of the tutorial, you will be able to build advanced federated " -"learning systems that approach the current state of the art in the field." +"**Two new Jupyter Notebook tutorials** " +"([#1141](https://github.com/adap/flower/pull/1141))" msgstr "" -"在本教程中,你将了解什么是联邦学习,用 Flower " -"搭建第一个系统,并逐步对其进行扩展。如果你能完成本教程的所有部分,你就能构建高级的联邦学习系统,从而接近该领域当前的技术水平。" +"**两本新的 Jupyter Notebook 教程** " +"([#1141](https://github.com/adap/flower/pull/1141))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:15 +#: ../../source/ref-changelog.md:1076 msgid "" -"🧑‍🏫 This tutorial starts at zero and expects no familiarity with " -"federated learning. Only a basic understanding of data science and Python" -" programming is assumed." -msgstr "🧑‍🏫 本教程从零开始,不要求熟悉联邦学习。仅假定对数据科学和 Python 编程有基本了解。" +"Two Jupyter Notebook tutorials (compatible with Google Colab) explain " +"basic and intermediate Flower features:" +msgstr "两本 Jupyter Notebook 教程(与 Google Colab 兼容)介绍了 Flower 的基本和中级功能:" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:17 +#: ../../source/ref-changelog.md:1078 msgid "" -"`Star Flower on GitHub `__ ⭐️ and join " -"the open-source Flower community on Slack to connect, ask questions, and " -"get help: `Join Slack `__ 🌼 We'd love to " -"hear from you in the ``#introductions`` channel! And if anything is " -"unclear, head over to the ``#questions`` channel." +"*An Introduction to Federated Learning*: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" +"-Intro-to-FL-PyTorch.ipynb)" msgstr "" -"`Star Flower on GitHub `__ ⭐️ 并加入 Slack " -"上的开源 Flower 社区,进行交流、提问并获得帮助: 加入 Slack `__ 🌼" -" 我们希望在 ``#introductions`` 频道听到您的声音!如果有任何不清楚的地方,请访问 ``#questions`` 频道。" +"*联邦学习简介*: [在 Colab " +"中打开](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" +"-Intro-to-FL-PyTorch.ipynb)" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:31 -msgid "Classic machine learning" -msgstr "经典机器学习" +#: ../../source/ref-changelog.md:1080 +msgid "" +"*Using Strategies in Federated Learning*: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" +"-Strategies-in-FL-PyTorch.ipynb)" +msgstr "" +"*在联邦学习中使用策略*: [在 Colab " +"中打开](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" +"-Strategies-in-FL-PyTorch.ipynb)" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:33 +#: ../../source/ref-changelog.md:1082 msgid "" -"Before we begin to discuss federated learning, let us quickly recap how " -"most machine learning works today." -msgstr "在开始讨论联邦学习之前,让我们先快速回顾一下目前大多数机器学习的工作原理。" +"**New FedAvgM strategy (Federated Averaging with Server Momentum)** " +"([#1076](https://github.com/adap/flower/pull/1076))" +msgstr "" +"**新的 FedAvgM 策略(带服务器动量的联邦平均)** " +"([#1076](https://github.com/adap/flower/pull/1076))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:35 +#: ../../source/ref-changelog.md:1084 msgid "" -"In machine learning, we have a model, and we have data. The model could " -"be a neural network (as depicted here), or something else, like classical" -" linear regression." -msgstr "在机器学习中,我们有一个模型和数据。模型可以是一个神经网络(如图所示),也可以是其他东西,比如经典的线性回归。" +"The new `FedAvgM` strategy implements Federated Averaging with Server " +"Momentum \\[Hsu et al., 2019\\]." +msgstr "新的 \"FedAvgM \"策略实现了带服务器动量的联邦平均[Hsu et al., 2019\\]." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 -msgid "|d8bf04f23d9b46d8a23cc6f4887d7873|" +#: ../../source/ref-changelog.md:1086 +msgid "" +"**New advanced PyTorch code example** " +"([#1007](https://github.com/adap/flower/pull/1007))" +msgstr "**新的 PyTorch 高级代码示例** ([#1007](https://github.com/adap/flower/pull/1007))" + +#: ../../source/ref-changelog.md:1088 +msgid "" +"A new code example (`advanced_pytorch`) demonstrates advanced Flower " +"concepts with PyTorch." +msgstr "新代码示例 (`advanced_pytorch`) 演示了 PyTorch 的高级 Flower 概念。" + +#: ../../source/ref-changelog.md:1090 +msgid "" +"**New JAX code example** " +"([#906](https://github.com/adap/flower/pull/906), " +"[#1143](https://github.com/adap/flower/pull/1143))" msgstr "" +"**新的 JAX 代码示例**([#906](https://github.com/adap/flower/pull/906), " +"[#1143](https://github.com/adap/flower/pull/1143)" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 -msgid "Model and data" -msgstr "模型和数据" +#: ../../source/ref-changelog.md:1092 +msgid "" +"A new code example (`jax_from_centralized_to_federated`) shows federated " +"learning with JAX and Flower." +msgstr "新代码示例(`jax_from_centralized_to_federated`)展示了使用 JAX 和 Flower 的联邦学习。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:47 +#: ../../source/ref-changelog.md:1096 msgid "" -"We train the model using the data to perform a useful task. A task could " -"be to detect objects in images, transcribe an audio recording, or play a " -"game like Go." -msgstr "我们使用数据来训练模型,以完成一项有用的任务。任务可以是检测图像中的物体、转录音频或玩围棋等游戏。" +"New option to keep Ray running if Ray was already initialized in " +"`start_simulation` ([#1177](https://github.com/adap/flower/pull/1177))" +msgstr "" +"新增选项,用于在 \"start_simulation\"(开始模拟)中已初始化 Ray 的情况下保持 Ray " +"运行([#1177](https://github.com/adap/flower/pull/1177))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 -msgid "|5aa1711387d74d0f8b9c499e1a51627e|" +#: ../../source/ref-changelog.md:1097 +msgid "" +"Add support for custom `ClientManager` as a `start_simulation` parameter " +"([#1171](https://github.com/adap/flower/pull/1171))" msgstr "" +"添加对自定义 \"客户端管理器 \"作为 \"start_simulation " +"\"参数的支持([#1171](https://github.com/adap/flower/pull/1171))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 -msgid "Train model using data" -msgstr "使用数据训练模型" +#: ../../source/ref-changelog.md:1098 +msgid "" +"New documentation for [implementing " +"strategies](https://flower.ai/docs/framework/how-to-implement-" +"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " +"[#1175](https://github.com/adap/flower/pull/1175))" +msgstr "" +"[实施战略](https://flower.ai/docs/framework/how-to-implement-strategies.html)" +" 的新文件([#1097](https://github.com/adap/flower/pull/1097), " +"[#1175](https://github.com/adap/flower/pull/1175)" + +#: ../../source/ref-changelog.md:1099 +msgid "" +"New mobile-friendly documentation theme " +"([#1174](https://github.com/adap/flower/pull/1174))" +msgstr "新的移动友好型文档主题 ([#1174](https://github.com/adap/flower/pull/1174))" + +#: ../../source/ref-changelog.md:1100 +msgid "" +"Limit version range for (optional) `ray` dependency to include only " +"compatible releases (`>=1.9.2,<1.12.0`) " +"([#1205](https://github.com/adap/flower/pull/1205))" +msgstr "" +"限制(可选)`ray`依赖的版本范围,使其仅包含兼容版本(`>=1.9.2,<1.12.0`) " +"([#1205](https://github.com/adap/flower/pull/1205))" + +#: ../../source/ref-changelog.md:1104 +msgid "" +"**Remove deprecated support for Python 3.6** " +"([#871](https://github.com/adap/flower/pull/871))" +msgstr "**删除对 Python 3.6 的过时支持** ([#871](https://github.com/adap/flower/pull/871))" + +#: ../../source/ref-changelog.md:1105 +msgid "" +"**Remove deprecated KerasClient** " +"([#857](https://github.com/adap/flower/pull/857))" +msgstr "**移除过时的 KerasClient**([#857](https://github.com/adap/flower/pull/857))" + +#: ../../source/ref-changelog.md:1106 +msgid "" +"**Remove deprecated no-op extra installs** " +"([#973](https://github.com/adap/flower/pull/973))" +msgstr "**移除过时的不操作额外安装** ([#973](https://github.com/adap/flower/pull/973))" + +#: ../../source/ref-changelog.md:1107 +msgid "" +"**Remove deprecated proto fields from** `FitRes` **and** `EvaluateRes` " +"([#869](https://github.com/adap/flower/pull/869))" +msgstr "" +"**从** `FitRes` **和** `EvaluateRes` 中移除已废弃的 proto 字段 " +"([#869](https://github.com/adap/flower/pull/869))" + +#: ../../source/ref-changelog.md:1108 +msgid "" +"**Remove deprecated QffedAvg strategy (replaced by QFedAvg)** " +"([#1107](https://github.com/adap/flower/pull/1107))" +msgstr "" +"**移除过时的 QffedAvg 策略(由 QFedAvg 取代)** " +"([#1107](https://github.com/adap/flower/pull/1107))" + +#: ../../source/ref-changelog.md:1109 +msgid "" +"**Remove deprecated DefaultStrategy strategy** " +"([#1142](https://github.com/adap/flower/pull/1142))" +msgstr "" +"**删除过时的 DefaultStrategy 策略** " +"([#1142](https://github.com/adap/flower/pull/1142))" + +#: ../../source/ref-changelog.md:1110 +msgid "" +"**Remove deprecated support for eval_fn accuracy return value** " +"([#1142](https://github.com/adap/flower/pull/1142))" +msgstr "" +"**删除已过时的对 eval_fn 返回值准确性的支持** " +"([#1142](https://github.com/adap/flower/pull/1142))" + +#: ../../source/ref-changelog.md:1111 +msgid "" +"**Remove deprecated support for passing initial parameters as NumPy " +"ndarrays** ([#1142](https://github.com/adap/flower/pull/1142))" +msgstr "" +"**移除对以 NumPy ndarrays 传递初始参数的过时支持** " +"([#1142](https://github.com/adap/flower/pull/1142))" + +#: ../../source/ref-changelog.md:1113 +msgid "v0.18.0 (2022-02-28)" +msgstr "v0.18.0 (2022-02-28)" + +#: ../../source/ref-changelog.md:1117 +msgid "" +"**Improved Virtual Client Engine compatibility with Jupyter Notebook / " +"Google Colab** ([#866](https://github.com/adap/flower/pull/866), " +"[#872](https://github.com/adap/flower/pull/872), " +"[#833](https://github.com/adap/flower/pull/833), " +"[#1036](https://github.com/adap/flower/pull/1036))" +msgstr "" +"**改进了虚拟客户端引擎与 Jupyter Notebook / Google Colab 的兼容性** " +"([#866](https://github.com/adap/flower/pull/866), " +"[#872](https://github.com/adap/flower/pull/872), " +"[#833](https://github.com/adap/flower/pull/833), " +"[#1036](https://github.com/adap/flower/pull/1036))" + +#: ../../source/ref-changelog.md:1119 +msgid "" +"Simulations (using the Virtual Client Engine through `start_simulation`) " +"now work more smoothly on Jupyter Notebooks (incl. Google Colab) after " +"installing Flower with the `simulation` extra (`pip install " +"'flwr[simulation]'`)." +msgstr "" +"通过 `start_simulation` 在 Jupyter 笔记本(包括 Google Colab)上安装 Flower 并附加 " +"`simulation` (`pip install 'flwr[simulation]'`)后,模拟(通过 `start_simulation`" +" 使用虚拟客户端引擎)现在可以更流畅地运行。" + +#: ../../source/ref-changelog.md:1121 +msgid "" +"**New Jupyter Notebook code example** " +"([#833](https://github.com/adap/flower/pull/833))" +msgstr "" +"**新的 Jupyter Notebook 代码示例** " +"([#833](https://github.com/adap/flower/pull/833))" + +#: ../../source/ref-changelog.md:1123 +msgid "" +"A new code example (`quickstart_simulation`) demonstrates Flower " +"simulations using the Virtual Client Engine through Jupyter Notebook " +"(incl. Google Colab)." +msgstr "" +"新代码示例(`quickstart_simulation`)通过 Jupyter Notebook(包括 Google " +"Colab)演示了使用虚拟客户端引擎进行 Flower 模拟。" + +#: ../../source/ref-changelog.md:1125 +msgid "" +"**Client properties (feature preview)** " +"([#795](https://github.com/adap/flower/pull/795))" +msgstr "**客户端属性(功能预览)** ([#795](https://github.com/adap/flower/pull/795))" + +#: ../../source/ref-changelog.md:1127 +msgid "" +"Clients can implement a new method `get_properties` to enable server-side" +" strategies to query client properties." +msgstr "客户端可以实现一个新方法 `get_properties`,以启用服务器端策略来查询客户端属性。" + +#: ../../source/ref-changelog.md:1129 +msgid "" +"**Experimental Android support with TFLite** " +"([#865](https://github.com/adap/flower/pull/865))" +msgstr "** 使用 TFLite 实验性支持安卓系统** ([#865](https://github.com/adap/flower/pull/865))" + +#: ../../source/ref-changelog.md:1131 +msgid "" +"Android support has finally arrived in `main`! Flower is both client-" +"agnostic and framework-agnostic by design. One can integrate arbitrary " +"client platforms and with this release, using Flower on Android has " +"become a lot easier." +msgstr "" +"`main`终于支持 Android 了!Flower 的设计与客户端和框架无关。我们可以集成任意客户端平台,有了这个版本,在安卓系统上使用 " +"Flower 就变得更容易了。" + +#: ../../source/ref-changelog.md:1133 +msgid "" +"The example uses TFLite on the client side, along with a new " +"`FedAvgAndroid` strategy. The Android client and `FedAvgAndroid` are " +"still experimental, but they are a first step towards a fully-fledged " +"Android SDK and a unified `FedAvg` implementation that integrated the new" +" functionality from `FedAvgAndroid`." +msgstr "" +"该示例在客户端使用了 TFLite 以及新的 `FedAvgAndroid`策略。Android 客户端和 " +"`FedAvgAndroid`仍处于试验阶段,但这是向成熟的 Android SDK 和集成了 `FedAvgAndroid`新功能的统一 " +"`FedAvg`实现迈出的第一步。" + +#: ../../source/ref-changelog.md:1135 +msgid "" +"**Make gRPC keepalive time user-configurable and decrease default " +"keepalive time** ([#1069](https://github.com/adap/flower/pull/1069))" +msgstr "" +"**使 gRPC 保持连接时间可由用户配置,并缩短默认保持连接时间** " +"([#1069](https://github.com/adap/flower/pull/1069))" + +#: ../../source/ref-changelog.md:1137 +msgid "" +"The default gRPC keepalive time has been reduced to increase the " +"compatibility of Flower with more cloud environments (for example, " +"Microsoft Azure). Users can configure the keepalive time to customize the" +" gRPC stack based on specific requirements." +msgstr "" +"为提高 Flower 与更多云环境(如 Microsoft Azure)的兼容性,缩短了默认 gRPC 保持时间。用户可以根据具体要求配置 " +"keepalive 时间,自定义 gRPC 堆栈。" + +#: ../../source/ref-changelog.md:1139 +msgid "" +"**New differential privacy example using Opacus and PyTorch** " +"([#805](https://github.com/adap/flower/pull/805))" +msgstr "" +"**使用 Opacus 和 PyTorch 的新差分隐私示例** " +"([#805](https://github.com/adap/flower/pull/805))" + +#: ../../source/ref-changelog.md:1141 +msgid "" +"A new code example (`opacus`) demonstrates differentially-private " +"federated learning with Opacus, PyTorch, and Flower." +msgstr "一个新的代码示例(\"opacus\")演示了使用 Opacus、PyTorch 和 Flower 进行差分隐私的联邦学习。" + +#: ../../source/ref-changelog.md:1143 +msgid "" +"**New Hugging Face Transformers code example** " +"([#863](https://github.com/adap/flower/pull/863))" +msgstr "" +"**新的Hugging Face Transformers代码示例** " +"([#863](https://github.com/adap/flower/pull/863))" + +#: ../../source/ref-changelog.md:1145 +msgid "" +"A new code example (`quickstart_huggingface`) demonstrates usage of " +"Hugging Face Transformers with Flower." +msgstr "新的代码示例(`quickstart_huggingface`)证明了结合Flower和Hugging Face Transformers的实用性。" + +#: ../../source/ref-changelog.md:1147 +msgid "" +"**New MLCube code example** " +"([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" +msgstr "" +"**新的 MLCube 代码示例** ([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" + +#: ../../source/ref-changelog.md:1149 +msgid "" +"A new code example (`quickstart_mlcube`) demonstrates usage of MLCube " +"with Flower." +msgstr "新代码示例(\"quickstart_mlcube\")演示了 MLCube 与 Flower 的用法。" + +#: ../../source/ref-changelog.md:1151 +msgid "" +"**SSL-enabled server and client** " +"([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" +msgstr "" +"** 支持 SSL 的服务器和客户端** ([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" + +#: ../../source/ref-changelog.md:1153 +msgid "" +"SSL enables secure encrypted connections between clients and servers. " +"This release open-sources the Flower secure gRPC implementation to make " +"encrypted communication channels accessible to all Flower users." +msgstr "SSL 可实现客户端与服务器之间的安全加密连接。该版本开源了 Flower 安全 gRPC 实现,使所有 Flower 用户都能访问加密通信通道。" + +#: ../../source/ref-changelog.md:1155 +msgid "" +"**Updated** `FedAdam` **and** `FedYogi` **strategies** " +"([#885](https://github.com/adap/flower/pull/885), " +"[#895](https://github.com/adap/flower/pull/895))" +msgstr "" +"**更新**`FedAdam`**和**`FedYogi`**战略** " +"([#885](https://github.com/adap/flower/pull/885), " +"[#895](https://github.com/adap/flower/pull/895))" + +#: ../../source/ref-changelog.md:1157 +msgid "" +"`FedAdam` and `FedAdam` match the latest version of the Adaptive " +"Federated Optimization paper." +msgstr "FedAdam \"和 \"FedAdam \"与最新版本的 \"自适应联邦优化 \"论文相匹配。" + +#: ../../source/ref-changelog.md:1159 +msgid "" +"**Initialize** `start_simulation` **with a list of client IDs** " +"([#860](https://github.com/adap/flower/pull/860))" +msgstr "" +"**初始化** `start_simulation` **使用客户端 ID 列表** " +"([#860](https://github.com/adap/flower/pull/860))" + +#: ../../source/ref-changelog.md:1161 +msgid "" +"`start_simulation` can now be called with a list of client IDs " +"(`clients_ids`, type: `List[str]`). Those IDs will be passed to the " +"`client_fn` whenever a client needs to be initialized, which can make it " +"easier to load data partitions that are not accessible through `int` " +"identifiers." +msgstr "" +"现在可以使用客户端 ID 列表(`clients_ids`,类型:`List[str]`)调用 " +"`start_simulation`。每当需要初始化客户端时,这些 ID 就会被传递到 `client_fn` 中,这样就能更轻松地加载无法通过 " +"`int` 标识符访问的数据分区。" + +#: ../../source/ref-changelog.md:1165 +msgid "" +"Update `num_examples` calculation in PyTorch code examples in " +"([#909](https://github.com/adap/flower/pull/909))" +msgstr "" +"更新 PyTorch 代码示例中的 \"num_examples \"计算 " +"([#909](https://github.com/adap/flower/pull/909))" + +#: ../../source/ref-changelog.md:1166 +msgid "" +"Expose Flower version through `flwr.__version__` " +"([#952](https://github.com/adap/flower/pull/952))" +msgstr "" +"通过 `flwr.__version__` 公开 Flower 版本 " +"([#952](https://github.com/adap/flower/pull/952))" + +#: ../../source/ref-changelog.md:1167 +msgid "" +"`start_server` in `app.py` now returns a `History` object containing " +"metrics from training ([#974](https://github.com/adap/flower/pull/974))" +msgstr "" +"`app.py`中的 `start_server`现在会返回一个 `History` " +"对象,其中包含训练中的指标([#974](https://github.com/adap/flower/pull/974))" + +#: ../../source/ref-changelog.md:1168 +msgid "" +"Make `max_workers` (used by `ThreadPoolExecutor`) configurable " +"([#978](https://github.com/adap/flower/pull/978))" +msgstr "" +"使 `max_workers`(由 " +"`ThreadPoolExecutor`使用)可配置([#978](https://github.com/adap/flower/pull/978))" + +#: ../../source/ref-changelog.md:1169 +msgid "" +"Increase sleep time after server start to three seconds in all code " +"examples ([#1086](https://github.com/adap/flower/pull/1086))" +msgstr "在所有代码示例中,将服务器启动后的休眠时间延长至三秒([#1086](https://github.com/adap/flower/pull/1086))" + +#: ../../source/ref-changelog.md:1170 +msgid "" +"Added a new FAQ section to the documentation " +"([#948](https://github.com/adap/flower/pull/948))" +msgstr "在文档中添加了新的常见问题部分 ([#948](https://github.com/adap/flower/pull/948))" + +#: ../../source/ref-changelog.md:1171 +msgid "" +"And many more under-the-hood changes, library updates, documentation " +"changes, and tooling improvements!" +msgstr "还有更多底层更改、库更新、文档更改和工具改进!" + +#: ../../source/ref-changelog.md:1175 +msgid "" +"**Removed** `flwr_example` **and** `flwr_experimental` **from release " +"build** ([#869](https://github.com/adap/flower/pull/869))" +msgstr "" +"**从发布版中删除**`flwr_example`**和**`flwr_experimental`** " +"([#869](https://github.com/adap/flower/pull/869))" + +#: ../../source/ref-changelog.md:1177 +msgid "" +"The packages `flwr_example` and `flwr_experimental` have been deprecated " +"since Flower 0.12.0 and they are not longer included in Flower release " +"builds. The associated extras (`baseline`, `examples-pytorch`, `examples-" +"tensorflow`, `http-logger`, `ops`) are now no-op and will be removed in " +"an upcoming release." +msgstr "" +"自 Flower 0.12.0 起,软件包 `flwr_example` 和 `flwr_experimental` 已被弃用,它们不再包含在 " +"Flower 的发布版本中。相关的额外包(`baseline`, `examples-pytorch`, `examples-" +"tensorflow`, `http-logger`, `ops`)现在已不再使用,并将在即将发布的版本中移除。" + +#: ../../source/ref-changelog.md:1179 +msgid "v0.17.0 (2021-09-24)" +msgstr "v0.17.0 (2021-09-24)" + +#: ../../source/ref-changelog.md:1183 +msgid "" +"**Experimental virtual client engine** " +"([#781](https://github.com/adap/flower/pull/781) " +"[#790](https://github.com/adap/flower/pull/790) " +"[#791](https://github.com/adap/flower/pull/791))" +msgstr "" +"**实验性虚拟客户端引擎** ([#781](https://github.com/adap/flower/pull/781) " +"[#790](https://github.com/adap/flower/pull/790) " +"[#791](https://github.com/adap/flower/pull/791))" + +#: ../../source/ref-changelog.md:1185 +msgid "" +"One of Flower's goals is to enable research at scale. This release " +"enables a first (experimental) peek at a major new feature, codenamed the" +" virtual client engine. Virtual clients enable simulations that scale to " +"a (very) large number of clients on a single machine or compute cluster. " +"The easiest way to test the new functionality is to look at the two new " +"code examples called `quickstart_simulation` and `simulation_pytorch`." +msgstr "" +"Flower 的目标之一是实现大规模研究。这一版本首次(试验性地)展示了代号为 \"虚拟客户端引擎 " +"\"的重要新功能。虚拟客户端可以在单台机器或计算集群上对大量客户端进行模拟。测试新功能的最简单方法是查看名为 " +"\"quickstart_simulation \"和 \"simulation_pytorch \"的两个新代码示例。" + +#: ../../source/ref-changelog.md:1187 +msgid "" +"The feature is still experimental, so there's no stability guarantee for " +"the API. It's also not quite ready for prime time and comes with a few " +"known caveats. However, those who are curious are encouraged to try it " +"out and share their thoughts." +msgstr "" +"该功能仍处于试验阶段,因此无法保证 API " +"的稳定性。此外,它还没有完全准备好进入黄金时间,并有一些已知的注意事项。不过,我们鼓励好奇的用户尝试使用并分享他们的想法。" + +#: ../../source/ref-changelog.md:1189 +msgid "" +"**New built-in strategies** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" +msgstr "" +"**新的内置策略**([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822)" + +#: ../../source/ref-changelog.md:1191 +msgid "" +"FedYogi - Federated learning strategy using Yogi on server-side. " +"Implementation based on https://arxiv.org/abs/2003.00295" +msgstr "FedYogi - 在服务器端使用 Yogi 的联邦学习策略。基于 https://arxiv.org/abs/2003.00295 实现" + +#: ../../source/ref-changelog.md:1192 +msgid "" +"FedAdam - Federated learning strategy using Adam on server-side. " +"Implementation based on https://arxiv.org/abs/2003.00295" +msgstr "FedAdam - 在服务器端使用 Adam 的联邦学习策略。基于 https://arxiv.org/abs/2003.00295 实现" + +#: ../../source/ref-changelog.md:1194 +msgid "" +"**New PyTorch Lightning code example** " +"([#617](https://github.com/adap/flower/pull/617))" +msgstr "" +"**新的 PyTorch Lightning 代码示例** " +"([#617](https://github.com/adap/flower/pull/617))" + +#: ../../source/ref-changelog.md:1196 +msgid "" +"**New Variational Auto-Encoder code example** " +"([#752](https://github.com/adap/flower/pull/752))" +msgstr "**新的变分自动编码器代码示例** ([#752](https://github.com/adap/flower/pull/752))" + +#: ../../source/ref-changelog.md:1198 +msgid "" +"**New scikit-learn code example** " +"([#748](https://github.com/adap/flower/pull/748))" +msgstr "**新的 scikit-learn 代码示例** ([#748](https://github.com/adap/flower/pull/748))" + +#: ../../source/ref-changelog.md:1200 +msgid "" +"**New experimental TensorBoard strategy** " +"([#789](https://github.com/adap/flower/pull/789))" +msgstr "**新的实验性 TensorBoard 策略**([#789](https://github.com/adap/flower/pull/789))" + +#: ../../source/ref-changelog.md:1204 +msgid "" +"Improved advanced TensorFlow code example " +"([#769](https://github.com/adap/flower/pull/769))" +msgstr "改进的高级 TensorFlow 代码示例([#769](https://github.com/adap/flower/pull/769)" + +#: ../../source/ref-changelog.md:1205 +msgid "" +"Warning when `min_available_clients` is misconfigured " +"([#830](https://github.com/adap/flower/pull/830))" +msgstr "" +"当 `min_available_clients` 配置错误时发出警告 " +"([#830](https://github.com/adap/flower/pull/830))" + +#: ../../source/ref-changelog.md:1206 +msgid "" +"Improved gRPC server docs " +"([#841](https://github.com/adap/flower/pull/841))" +msgstr "改进了 gRPC 服务器文档([#841](https://github.com/adap/flower/pull/841))" + +#: ../../source/ref-changelog.md:1207 +msgid "" +"Improved error message in `NumPyClient` " +"([#851](https://github.com/adap/flower/pull/851))" +msgstr "改进了 `NumPyClient` 中的错误信息 ([#851](https://github.com/adap/flower/pull/851))" + +#: ../../source/ref-changelog.md:1208 +msgid "" +"Improved PyTorch quickstart code example " +"([#852](https://github.com/adap/flower/pull/852))" +msgstr "改进的 PyTorch 快速启动代码示例 ([#852](https://github.com/adap/flower/pull/852))" + +#: ../../source/ref-changelog.md:1212 +msgid "" +"**Disabled final distributed evaluation** " +"([#800](https://github.com/adap/flower/pull/800))" +msgstr "**禁用最终分布式评价** ([#800](https://github.com/adap/flower/pull/800))" + +#: ../../source/ref-changelog.md:1214 +msgid "" +"Prior behaviour was to perform a final round of distributed evaluation on" +" all connected clients, which is often not required (e.g., when using " +"server-side evaluation). The prior behaviour can be enabled by passing " +"`force_final_distributed_eval=True` to `start_server`." +msgstr "" +"之前的行为是在所有连接的客户端上执行最后一轮分布式评估,而这通常是不需要的(例如,在使用服务器端评估时)。可以通过向 `start_server`" +" 传递 `force_final_distributed_eval=True` 来启用之前的行为。" + +#: ../../source/ref-changelog.md:1216 +msgid "" +"**Renamed q-FedAvg strategy** " +"([#802](https://github.com/adap/flower/pull/802))" +msgstr "**更名为 q-FedAvg 策略** ([#802](https://github.com/adap/flower/pull/802))" + +#: ../../source/ref-changelog.md:1218 +msgid "" +"The strategy named `QffedAvg` was renamed to `QFedAvg` to better reflect " +"the notation given in the original paper (q-FFL is the optimization " +"objective, q-FedAvg is the proposed solver). Note the original (now " +"deprecated) `QffedAvg` class is still available for compatibility reasons" +" (it will be removed in a future release)." +msgstr "" +"名为 `QffedAvg` 的策略已更名为 `QFedAvg`,以更好地反映原始论文中给出的符号(q-FFL 是优化目标,q-FedAvg " +"是建议的求解器)。请注意,出于兼容性原因,原始(现已废弃)的 `QffedAvg` 类仍然可用(它将在未来的版本中移除)。" + +#: ../../source/ref-changelog.md:1220 +msgid "" +"**Deprecated and renamed code example** `simulation_pytorch` **to** " +"`simulation_pytorch_legacy` " +"([#791](https://github.com/adap/flower/pull/791))" +msgstr "" +"**删除并重命名代码示例**`simulation_pytorch`**为**`simulation_pytorch_legacy` " +"([#791](https://github.com/adap/flower/pull/791))" + +#: ../../source/ref-changelog.md:1222 +msgid "" +"This example has been replaced by a new example. The new example is based" +" on the experimental virtual client engine, which will become the new " +"default way of doing most types of large-scale simulations in Flower. The" +" existing example was kept for reference purposes, but it might be " +"removed in the future." +msgstr "" +"该示例已被新示例取代。新示例基于试验性虚拟客户端引擎,它将成为在 Flower " +"中进行大多数类型大规模模拟的新的默认方式。现有示例将作为参考保留,但将来可能会删除。" + +#: ../../source/ref-changelog.md:1224 +msgid "v0.16.0 (2021-05-11)" +msgstr "v0.16.0 (2021-05-11)" + +#: ../../source/ref-changelog.md:1228 +msgid "" +"**New built-in strategies** " +"([#549](https://github.com/adap/flower/pull/549))" +msgstr "**新的内置策略** ([#549](https://github.com/adap/flower/pull/549))" + +#: ../../source/ref-changelog.md:1230 +msgid "(abstract) FedOpt" +msgstr "(摘要) FedOpt" + +#: ../../source/ref-changelog.md:1233 +msgid "" +"**Custom metrics for server and strategies** " +"([#717](https://github.com/adap/flower/pull/717))" +msgstr "**服务器和策略的自定义指标** ([#717](https://github.com/adap/flower/pull/717))" + +#: ../../source/ref-changelog.md:1235 +msgid "" +"The Flower server is now fully task-agnostic, all remaining instances of " +"task-specific metrics (such as `accuracy`) have been replaced by custom " +"metrics dictionaries. Flower 0.15 introduced the capability to pass a " +"dictionary containing custom metrics from client to server. As of this " +"release, custom metrics replace task-specific metrics on the server." +msgstr "" +"Flower 服务器现在完全与任务无关,所有剩余的任务特定度量(如 \"准确度\")都已被自定义度量字典取代。Flower 0.15 " +"引入了从客户端向服务器传递包含自定义指标的字典的功能。从本版本开始,自定义指标将取代服务器上的特定任务指标。" + +#: ../../source/ref-changelog.md:1237 +msgid "" +"Custom metric dictionaries are now used in two user-facing APIs: they are" +" returned from Strategy methods `aggregate_fit`/`aggregate_evaluate` and " +"they enable evaluation functions passed to built-in strategies (via " +"`eval_fn`) to return more than two evaluation metrics. Strategies can " +"even return *aggregated* metrics dictionaries for the server to keep " +"track of." +msgstr "" +"自定义度量字典现在可在两个面向用户的 API 中使用:它们可从策略方法 `aggregate_fit`/`aggregate_evaluate` " +"返回,还可使传递给内置策略(通过 `eval_fn`)的评估函数返回两个以上的评估度量。策略甚至可以返回 *aggregated* " +"指标字典,以便服务器跟踪。" + +#: ../../source/ref-changelog.md:1239 +msgid "" +"Strategy implementations should migrate their `aggregate_fit` and " +"`aggregate_evaluate` methods to the new return type (e.g., by simply " +"returning an empty `{}`), server-side evaluation functions should migrate" +" from `return loss, accuracy` to `return loss, {\"accuracy\": accuracy}`." +msgstr "" +"Strategy 实现应将其 `aggregate_fit` 和 `aggregate_evaluate` " +"方法迁移到新的返回类型(例如,只需返回空的 `{}`),服务器端评估函数应从 `return loss, accuracy` 迁移到 " +"`return loss, {\"accuracy\": accuracy}`。" + +#: ../../source/ref-changelog.md:1241 +msgid "" +"Flower 0.15-style return types are deprecated (but still supported), " +"compatibility will be removed in a future release." +msgstr "Flower 0.15 风格的返回类型已被弃用(但仍受支持),兼容性将在未来的版本中移除。" + +#: ../../source/ref-changelog.md:1243 +msgid "" +"**Migration warnings for deprecated functionality** " +"([#690](https://github.com/adap/flower/pull/690))" +msgstr "** 过时功能的迁移警告** ([#690](https://github.com/adap/flower/pull/690))" + +#: ../../source/ref-changelog.md:1245 +msgid "" +"Earlier versions of Flower were often migrated to new APIs, while " +"maintaining compatibility with legacy APIs. This release introduces " +"detailed warning messages if usage of deprecated APIs is detected. The " +"new warning messages often provide details on how to migrate to more " +"recent APIs, thus easing the transition from one release to another." +msgstr "" +"Flower 早期版本通常会迁移到新的应用程序接口,同时保持与旧版应用程序接口的兼容。如果检测到使用了过时的 " +"API,本版本将引入详细的警告信息。新的警告信息通常会详细说明如何迁移到更新的 API,从而简化从一个版本到另一个版本的过渡。" + +#: ../../source/ref-changelog.md:1247 +msgid "" +"Improved docs and docstrings " +"([#691](https://github.com/adap/flower/pull/691) " +"[#692](https://github.com/adap/flower/pull/692) " +"[#713](https://github.com/adap/flower/pull/713))" +msgstr "" +"改进了文档和文档说明 ([#691](https://github.com/adap/flower/pull/691) " +"[#692](https://github.com/adap/flower/pull/692) " +"[#713](https://github.com/adap/flower/pull/713))" + +#: ../../source/ref-changelog.md:1249 +msgid "MXNet example and documentation" +msgstr "MXNet 示例和文档" + +#: ../../source/ref-changelog.md:1251 +msgid "" +"FedBN implementation in example PyTorch: From Centralized To Federated " +"([#696](https://github.com/adap/flower/pull/696) " +"[#702](https://github.com/adap/flower/pull/702) " +"[#705](https://github.com/adap/flower/pull/705))" +msgstr "" +"PyTorch 示例中的 FedBN 实现: 从集中到联邦 " +"([#696](https://github.com/adap/flower/pull/696) " +"[#702](https://github.com/adap/flower/pull/702) " +"[#705](https://github.com/adap/flower/pull/705))" + +#: ../../source/ref-changelog.md:1255 +msgid "" +"**Serialization-agnostic server** " +"([#721](https://github.com/adap/flower/pull/721))" +msgstr "**序列化无关服务器** ([#721](https://github.com/adap/flower/pull/721))" + +#: ../../source/ref-changelog.md:1257 +msgid "" +"The Flower server is now fully serialization-agnostic. Prior usage of " +"class `Weights` (which represents parameters as deserialized NumPy " +"ndarrays) was replaced by class `Parameters` (e.g., in `Strategy`). " +"`Parameters` objects are fully serialization-agnostic and represents " +"parameters as byte arrays, the `tensor_type` attributes indicates how " +"these byte arrays should be interpreted (e.g., for " +"serialization/deserialization)." +msgstr "" +"Flower 服务器现在完全不依赖序列化。之前使用的 `Weights` 类(以反序列化的 NumPy ndarrays 表示参数)已被 " +"`Parameters` 类取代(例如在 `Strategy`中)。参数 " +"\"对象与序列化完全无关,它以字节数组的形式表示参数,\"tensor_type \"属性表示如何解释这些字节数组(例如,用于序列化/反序列化)。" + +#: ../../source/ref-changelog.md:1259 +msgid "" +"Built-in strategies implement this approach by handling serialization and" +" deserialization to/from `Weights` internally. Custom/3rd-party Strategy " +"implementations should update to the slightly changed Strategy method " +"definitions. Strategy authors can consult PR " +"[#721](https://github.com/adap/flower/pull/721) to see how strategies can" +" easily migrate to the new format." +msgstr "" +"内置策略通过在内部处理序列化和反序列化到/从`Weights`来实现这种方法。自定义/第三方策略实现应更新为稍有改动的策略方法定义。策略作者可查阅" +" PR [#721](https://github.com/adap/flower/pull/721) 以了解如何将策略轻松迁移到新格式。" + +#: ../../source/ref-changelog.md:1261 +msgid "" +"Deprecated `flwr.server.Server.evaluate`, use " +"`flwr.server.Server.evaluate_round` instead " +"([#717](https://github.com/adap/flower/pull/717))" +msgstr "" +"已弃用 `flwr.server.Server.evaluate`,改用 " +"`flwr.server.Server.evaluate_round`([#717](https://github.com/adap/flower/pull/717)" + +#: ../../source/ref-changelog.md:1263 +msgid "v0.15.0 (2021-03-12)" +msgstr "v0.15.0 (2021-03-12)" + +#: ../../source/ref-changelog.md:1267 +msgid "" +"**Server-side parameter initialization** " +"([#658](https://github.com/adap/flower/pull/658))" +msgstr "**服务器端参数初始化** ([#658](https://github.com/adap/flower/pull/658))" + +#: ../../source/ref-changelog.md:1269 +msgid "" +"Model parameters can now be initialized on the server-side. Server-side " +"parameter initialization works via a new `Strategy` method called " +"`initialize_parameters`." +msgstr "" +"现在可以在服务器端初始化模型参数。服务器端参数初始化通过名为 \"initialize_parameters \"的新 \"Strategy " +"\"方法进行。" + +#: ../../source/ref-changelog.md:1271 +msgid "" +"Built-in strategies support a new constructor argument called " +"`initial_parameters` to set the initial parameters. Built-in strategies " +"will provide these initial parameters to the server on startup and then " +"delete them to free the memory afterwards." +msgstr "" +"内置策略支持名为 \"initial_parameters " +"\"的新构造函数参数,用于设置初始参数。内置策略会在启动时向服务器提供这些初始参数,然后删除它们以释放内存。" + +#: ../../source/ref-changelog.md:1290 +msgid "" +"If no initial parameters are provided to the strategy, the server will " +"continue to use the current behaviour (namely, it will ask one of the " +"connected clients for its parameters and use these as the initial global " +"parameters)." +msgstr "如果没有向策略提供初始参数,服务器将继续使用当前行为(即向其中一个已连接的客户端询问参数,并将这些参数用作初始全局参数)。" + +#: ../../source/ref-changelog.md:1294 +msgid "" +"Deprecate `flwr.server.strategy.DefaultStrategy` (migrate to " +"`flwr.server.strategy.FedAvg`, which is equivalent)" +msgstr "" +"停用 `flwr.server.strategy.DefaultStrategy`(迁移到等价的 " +"`flwr.server.strategy.FedAvg`)" + +#: ../../source/ref-changelog.md:1296 +msgid "v0.14.0 (2021-02-18)" +msgstr "v0.14.0 (2021-02-18)" + +#: ../../source/ref-changelog.md:1300 +msgid "" +"**Generalized** `Client.fit` **and** `Client.evaluate` **return values** " +"([#610](https://github.com/adap/flower/pull/610) " +"[#572](https://github.com/adap/flower/pull/572) " +"[#633](https://github.com/adap/flower/pull/633))" +msgstr "" +"**通用** `Client.fit` **和** `Client.evaluate` **返回值** " +"([#610](https://github.com/adap/flower/pull/610) " +"[#572](https://github.com/adap/flower/pull/572) " +"[#633](https://github.com/adap/flower/pull/633))" + +#: ../../source/ref-changelog.md:1302 +msgid "" +"Clients can now return an additional dictionary mapping `str` keys to " +"values of the following types: `bool`, `bytes`, `float`, `int`, `str`. " +"This means one can return almost arbitrary values from `fit`/`evaluate` " +"and make use of them on the server side!" +msgstr "" +"客户端现在可以返回一个额外的字典,将 `str` 键映射为以下类型的值: " +"bool`、`bytes`、`float`、`int`、`str`。这意味着我们可以从 `fit`/`evaluate` " +"返回几乎任意的值,并在服务器端使用它们!" + +#: ../../source/ref-changelog.md:1304 +msgid "" +"This improvement also allowed for more consistent return types between " +"`fit` and `evaluate`: `evaluate` should now return a tuple `(float, int, " +"dict)` representing the loss, number of examples, and a dictionary " +"holding arbitrary problem-specific values like accuracy." +msgstr "" +"这一改进还使 `fit` 和 `evaluate` 之间的返回类型更加一致:`evaluate` 现在应返回一个元组`(float, int, " +"dict)`,代表损失、示例数和一个包含特定问题任意值(如准确度)的字典。" + +#: ../../source/ref-changelog.md:1306 +msgid "" +"In case you wondered: this feature is compatible with existing projects, " +"the additional dictionary return value is optional. New code should " +"however migrate to the new return types to be compatible with upcoming " +"Flower releases (`fit`: `List[np.ndarray], int, Dict[str, Scalar]`, " +"`evaluate`: `float, int, Dict[str, Scalar]`). See the example below for " +"details." +msgstr "" +"如果你想知道:此功能与现有项目兼容,额外的字典返回值是可选的。不过,新代码应迁移到新的返回类型,以便与即将发布的 Flower " +"版本兼容(`fit`: `List[np.ndarray], int, Dict[str, Scalar]`,`evaluate`: " +"`float, int, Dict[str, Scalar]`)。详见下面的示例。" + +#: ../../source/ref-changelog.md:1308 +msgid "" +"*Code example:* note the additional dictionary return values in both " +"`FlwrClient.fit` and `FlwrClient.evaluate`:" +msgstr "*代码示例:* 注意 `FlwrClient.fit` 和 `FlwrClient.evaluate` 中的附加字典返回值:" + +#: ../../source/ref-changelog.md:1323 +msgid "" +"**Generalized** `config` **argument in** `Client.fit` **and** " +"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" +msgstr "" +"**在**`Client.fit` " +"**和**`Client.evaluate`中泛化**`config`参数([#595](https://github.com/adap/flower/pull/595))" + +#: ../../source/ref-changelog.md:1325 +msgid "" +"The `config` argument used to be of type `Dict[str, str]`, which means " +"that dictionary values were expected to be strings. The new release " +"generalizes this to enable values of the following types: `bool`, " +"`bytes`, `float`, `int`, `str`." +msgstr "" +"`config`参数曾是 \"字典[str, str]\"类型,这意味着字典值应是字符串。新版本将其扩展为以下类型的值: " +"bool`、`bytes`、`float`、`int`、`str`。" + +#: ../../source/ref-changelog.md:1327 +msgid "" +"This means one can now pass almost arbitrary values to `fit`/`evaluate` " +"using the `config` dictionary. Yay, no more `str(epochs)` on the server-" +"side and `int(config[\"epochs\"])` on the client side!" +msgstr "" +"这意味着现在可以使用 `config` 字典向 `fit`/`evaluate` 传递几乎任意的值。耶,服务器端不再需要 " +"`str(epochs)`,客户端不再需要 `int(config[\"epochs\"])`!" + +#: ../../source/ref-changelog.md:1329 +msgid "" +"*Code example:* note that the `config` dictionary now contains non-`str` " +"values in both `Client.fit` and `Client.evaluate`:" +msgstr "*代码示例:* 注意 `config` 字典现在在 `Client.fit` 和 `Client.evaluate` 中都包含非 `str` 值:" + +#: ../../source/ref-changelog.md:1346 +msgid "v0.13.0 (2021-01-08)" +msgstr "v0.13.0 (2021-01-08)" + +#: ../../source/ref-changelog.md:1350 +msgid "" +"New example: PyTorch From Centralized To Federated " +"([#549](https://github.com/adap/flower/pull/549))" +msgstr "新示例: PyTorch 从集中到联邦 ([#549](https://github.com/adap/flower/pull/549))" + +#: ../../source/ref-changelog.md:1351 +msgid "Improved documentation" +msgstr "改进文档" + +#: ../../source/ref-changelog.md:1352 +msgid "New documentation theme ([#551](https://github.com/adap/flower/pull/551))" +msgstr "新文档主题 ([#551](https://github.com/adap/flower/pull/551))" + +#: ../../source/ref-changelog.md:1353 +msgid "New API reference ([#554](https://github.com/adap/flower/pull/554))" +msgstr "新的 API 参考 ([#554](https://github.com/adap/flower/pull/554))" + +#: ../../source/ref-changelog.md:1354 +msgid "" +"Updated examples documentation " +"([#549](https://github.com/adap/flower/pull/549))" +msgstr "更新了示例文档 ([#549](https://github.com/adap/flower/pull/549))" + +#: ../../source/ref-changelog.md:1355 +msgid "" +"Removed obsolete documentation " +"([#548](https://github.com/adap/flower/pull/548))" +msgstr "删除了过时的文档 ([#548](https://github.com/adap/flower/pull/548))" + +#: ../../source/ref-changelog.md:1357 +msgid "Bugfix:" +msgstr "错误修正:" + +#: ../../source/ref-changelog.md:1359 +msgid "" +"`Server.fit` does not disconnect clients when finished, disconnecting the" +" clients is now handled in `flwr.server.start_server` " +"([#553](https://github.com/adap/flower/pull/553) " +"[#540](https://github.com/adap/flower/issues/540))." +msgstr "" +"Server.fit \"完成后不会断开客户端连接,现在断开客户端连接是在 \"flwr.server.start_server " +"\"中处理的([#553](https://github.com/adap/flower/pull/553) " +"[#540](https://github.com/adap/flower/issues/540))。" + +#: ../../source/ref-changelog.md:1361 +msgid "v0.12.0 (2020-12-07)" +msgstr "v0.12.0 (2020-12-07)" + +#: ../../source/ref-changelog.md:1363 ../../source/ref-changelog.md:1379 +msgid "Important changes:" +msgstr "重要变更:" + +#: ../../source/ref-changelog.md:1365 +msgid "" +"Added an example for embedded devices " +"([#507](https://github.com/adap/flower/pull/507))" +msgstr "添加了嵌入式设备示例 ([#507](https://github.com/adap/flower/pull/507))" + +#: ../../source/ref-changelog.md:1366 +msgid "" +"Added a new NumPyClient (in addition to the existing KerasClient) " +"([#504](https://github.com/adap/flower/pull/504) " +"[#508](https://github.com/adap/flower/pull/508))" +msgstr "" +"添加了一个新的 NumPyClient(除现有的 KerasClient " +"之外)([#504](https://github.com/adap/flower/pull/504) " +"[#508](https://github.com/adap/flower/pull/508)" + +#: ../../source/ref-changelog.md:1367 +msgid "" +"Deprecated `flwr_example` package and started to migrate examples into " +"the top-level `examples` directory " +"([#494](https://github.com/adap/flower/pull/494) " +"[#512](https://github.com/adap/flower/pull/512))" +msgstr "" +"弃用 `flwr_example` 软件包,并开始将示例迁移到顶层的 `examples` 目录 " +"([#494](https://github.com/adap/flower/pull/494) " +"[#512](https://github.com/adap/flower/pull/512))" + +#: ../../source/ref-changelog.md:1369 +msgid "v0.11.0 (2020-11-30)" +msgstr "v0.11.0 (2020-11-30)" + +#: ../../source/ref-changelog.md:1371 +msgid "Incompatible changes:" +msgstr "不兼容的更改:" + +#: ../../source/ref-changelog.md:1373 +msgid "" +"Renamed strategy methods " +"([#486](https://github.com/adap/flower/pull/486)) to unify the naming of " +"Flower's public APIs. Other public methods/functions (e.g., every method " +"in `Client`, but also `Strategy.evaluate`) do not use the `on_` prefix, " +"which is why we're removing it from the four methods in Strategy. To " +"migrate rename the following `Strategy` methods accordingly:" +msgstr "" +"重命名了策略方法([#486](https://github.com/adap/flower/pull/486)),以统一 Flower公共 " +"API 的命名。其他公共方法/函数(例如 `Client` 中的每个方法,以及 `Strategy.evaluate`)不使用 `on_` " +"前缀,这就是我们从 Strategy 中的四个方法中移除它的原因。迁移时,请相应地重命名以下 `Strategy` 方法:" + +#: ../../source/ref-changelog.md:1374 +msgid "`on_configure_evaluate` => `configure_evaluate`" +msgstr "`on_configure_evaluate` => `configure_evaluate`" + +#: ../../source/ref-changelog.md:1375 +msgid "`on_aggregate_evaluate` => `aggregate_evaluate`" +msgstr "`on_aggregate_evaluate` => `aggregate_evaluate`" + +#: ../../source/ref-changelog.md:1376 +msgid "`on_configure_fit` => `configure_fit`" +msgstr "`on_configure_fit` => `configure_fit`" + +#: ../../source/ref-changelog.md:1377 +msgid "`on_aggregate_fit` => `aggregate_fit`" +msgstr "`on_aggregate_fit` => `aggregate_fit`" + +#: ../../source/ref-changelog.md:1381 +msgid "" +"Deprecated `DefaultStrategy` " +"([#479](https://github.com/adap/flower/pull/479)). To migrate use " +"`FedAvg` instead." +msgstr "" +"已废弃的 `DefaultStrategy` ([#479](https://github.com/adap/flower/pull/479)) " +"。迁移时请使用 `FedAvg`。" + +#: ../../source/ref-changelog.md:1382 +msgid "" +"Simplified examples and baselines " +"([#484](https://github.com/adap/flower/pull/484))." +msgstr "简化示例和baselines([#484](https://github.com/adap/flower/pull/484))。" + +#: ../../source/ref-changelog.md:1383 +msgid "" +"Removed presently unused `on_conclude_round` from strategy interface " +"([#483](https://github.com/adap/flower/pull/483))." +msgstr "" +"删除了策略界面中目前未使用的 " +"\"on_conclude_round\"([#483](https://github.com/adap/flower/pull/483))。" + +#: ../../source/ref-changelog.md:1384 +msgid "" +"Set minimal Python version to 3.6.1 instead of 3.6.9 " +"([#471](https://github.com/adap/flower/pull/471))." +msgstr "" +"将最小 Python 版本设为 3.6.1,而不是 3.6.9 " +"([#471](https://github.com/adap/flower/pull/471))." + +#: ../../source/ref-changelog.md:1385 +msgid "" +"Improved `Strategy` docstrings " +"([#470](https://github.com/adap/flower/pull/470))." +msgstr "" +"改进了 `Strategy` " +"docstrings([#470](https://github.com/adap/flower/pull/470))。" + +#: ../../source/ref-example-projects.rst:2 +msgid "Example projects" +msgstr "项目实例" + +#: ../../source/ref-example-projects.rst:4 +msgid "" +"Flower comes with a number of usage examples. The examples demonstrate " +"how Flower can be used to federate different kinds of existing machine " +"learning pipelines, usually leveraging popular machine learning " +"frameworks such as `PyTorch `_ or `TensorFlow " +"`_." +msgstr "" +"Flower 附带了许多使用示例。这些示例演示了如何使用 Flower 联邦不同类型的现有机器学习形式,通常是利用流行的机器学习框架,如 " +"`PyTorch `_ 或 `TensorFlow " +"`_。" + +#: ../../source/ref-example-projects.rst:9 +#, fuzzy +msgid "The following examples are available as standalone projects." +msgstr "以下示例可作为独立项目使用。" + +#: ../../source/ref-example-projects.rst:12 +#, fuzzy +msgid "Quickstart TensorFlow/Keras" +msgstr "快速入门 TensorFlow" + +#: ../../source/ref-example-projects.rst:14 +msgid "" +"The TensorFlow/Keras quickstart example shows CIFAR-10 image " +"classification with MobileNetV2:" +msgstr "TensorFlow/Keras 快速入门示例展示了使用 MobileNetV2 进行的 CIFAR-10 图像分类:" + +#: ../../source/ref-example-projects.rst:17 +msgid "" +"`Quickstart TensorFlow (Code) " +"`_" +msgstr "" +"`TensorFlow快速入门 (代码) `_" + +#: ../../source/ref-example-projects.rst:19 +#, fuzzy +msgid ":doc:`Quickstart TensorFlow (Tutorial) `" +msgstr "" +"`TensorFlow快速入门 (教程) `_" + +#: ../../source/ref-example-projects.rst:20 +msgid "" +"`Quickstart TensorFlow (Blog Post) `_" +msgstr "" +"`TensorFlow快速入门 (博客) `_" + +#: ../../source/ref-example-projects.rst:24 +#: ../../source/tutorial-quickstart-pytorch.rst:4 +msgid "Quickstart PyTorch" +msgstr "PyTorch快速入门" + +#: ../../source/ref-example-projects.rst:26 +msgid "" +"The PyTorch quickstart example shows CIFAR-10 image classification with a" +" simple Convolutional Neural Network:" +msgstr "PyTorch 快速入门范例展示了使用简单卷积神经网络进行 CIFAR-10 图像分类的情况:" + +#: ../../source/ref-example-projects.rst:29 +msgid "" +"`Quickstart PyTorch (Code) " +"`_" +msgstr "" +"`PyTorch快速入门 (代码) `_" + +#: ../../source/ref-example-projects.rst:31 +#, fuzzy +msgid ":doc:`Quickstart PyTorch (Tutorial) `" +msgstr "" +"`PyTorch快速入门 (教程) `_" + +#: ../../source/ref-example-projects.rst:34 +msgid "PyTorch: From Centralized To Federated" +msgstr "PyTorch: 从集中式到联邦式" + +#: ../../source/ref-example-projects.rst:36 +msgid "" +"This example shows how a regular PyTorch project can be federated using " +"Flower:" +msgstr "本例展示了如何使用 Flower 联邦化一个普通的 PyTorch 项目:" + +#: ../../source/ref-example-projects.rst:38 +msgid "" +"`PyTorch: From Centralized To Federated (Code) " +"`_" +msgstr "" +"PyTorch: 从集中式到联邦式(代码) `_" + +#: ../../source/ref-example-projects.rst:40 +#, fuzzy +msgid "" +":doc:`PyTorch: From Centralized To Federated (Tutorial) `" +msgstr "" +"PyTorch: 从集中式到联邦式(教程) `_" + +#: ../../source/ref-example-projects.rst:44 +msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" +msgstr "树莓派和 Nvidia Jetson 上的联邦学习" + +#: ../../source/ref-example-projects.rst:46 +msgid "" +"This example shows how Flower can be used to build a federated learning " +"system that run across Raspberry Pi and Nvidia Jetson:" +msgstr "本示例展示了如何利用 Flower 建立一个跨 Raspberry Pi 和 Nvidia Jetson 运行的联邦学习系统:" + +#: ../../source/ref-example-projects.rst:49 +msgid "" +"`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " +"`_" +msgstr "" +"Raspberry Pi 和 Nvidia Jetson 上的联邦学习(代码) " +"`_" + +#: ../../source/ref-example-projects.rst:51 +msgid "" +"`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " +"`_" +msgstr "" +"Raspberry Pi和 Nvidia Jetson 上的联邦学习(博客) " +"`_" + +#: ../../source/ref-faq.rst:4 +msgid "" +"This page collects answers to commonly asked questions about Federated " +"Learning with Flower." +msgstr "本页收集了有关 \"Flower 联邦学习 \"常见问题的答案。" + +#: ../../source/ref-faq.rst +#, fuzzy +msgid ":fa:`eye,mr-1` Can Flower run on Jupyter Notebooks / Google Colab?" +msgstr ":fa:`eye,mr-1` Flower 可以在 Juptyter Notebooks / Google Colab 上运行吗?" + +#: ../../source/ref-faq.rst:9 +msgid "" +"Yes, it can! Flower even comes with a few under-the-hood optimizations to" +" make it work even better on Colab. Here's a quickstart example:" +msgstr "是的,它可以!Flower 甚至还进行了一些底层优化,使其在 Colab 上运行得更好。下面是一个快速启动示例:" + +#: ../../source/ref-faq.rst:11 +msgid "" +"`Flower simulation PyTorch " +"`_" +msgstr "" +"`Flower 模拟 PyTorch " +"`_" + +#: ../../source/ref-faq.rst:12 +msgid "" +"`Flower simulation TensorFlow/Keras " +"`_" +msgstr "" +"`Flower模拟TensorFlow/Keras " +"`_" + +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` How can I run Federated Learning on a Raspberry Pi?" +msgstr ":fa:`eye,mr-1` 如何在 Raspberry Pi 上运行联邦学习?" + +#: ../../source/ref-faq.rst:16 +msgid "" +"Find the `blog post about federated learning on embedded device here " +"`_" +" and the corresponding `GitHub code example " +"`_." +msgstr "" +"请点击此处查看有关嵌入式设备联邦学习的 " +"\"博文\"`_和相应的" +" \"GitHub 代码示例\"`_。" + +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Does Flower support federated learning on Android devices?" +msgstr ":fa:`eye,mr-1` Flower 是否支持安卓设备上的联邦学习?" + +#: ../../source/ref-faq.rst:20 +msgid "" +"Yes, it does. Please take a look at our `blog post " +"`_ or check out the code examples:" +msgstr "" +"是的,确实如此。请查看我们的 \"博客文章 `_\" 或查看代码示例:" + +#: ../../source/ref-faq.rst:22 +msgid "" +"`Android Kotlin example `_" +msgstr "`Android Kotlin 示例 `_" + +#: ../../source/ref-faq.rst:23 +msgid "`Android Java example `_" +msgstr "Android Java 示例 `_" + +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Can I combine federated learning with blockchain?" +msgstr ":fa:`eye,mr-1` 我可以将联邦学习与区块链结合起来吗?" + +#: ../../source/ref-faq.rst:27 +msgid "" +"Yes, of course. A list of available examples using Flower within a " +"blockchain environment is available here:" +msgstr "当然可以。有关在区块链环境中使用 Flower 的可用示例列表,请点击此处:" + +#: ../../source/ref-faq.rst:30 +msgid "`FLock: A Decentralised AI Training Platform `_." +msgstr "" + +#: ../../source/ref-faq.rst:30 +msgid "Contribute to on-chain training the model and earn rewards." +msgstr "" + +#: ../../source/ref-faq.rst:31 +#, fuzzy +msgid "Local blockchain with federated learning simulation." +msgstr "扩大联邦学习的规模" + +#: ../../source/ref-faq.rst:32 +msgid "" +"`Flower meets Nevermined GitHub Repository `_." +msgstr "" +"`Flower meets Nevermined GitHub Repository `_." + +#: ../../source/ref-faq.rst:33 +msgid "" +"`Flower meets Nevermined YouTube video " +"`_." +msgstr "" +"`Flower meets Nevermined YouTube 视频 " +"`_." + +#: ../../source/ref-faq.rst:34 +#, fuzzy +msgid "" +"`Flower meets KOSMoS `_." +msgstr "" +"`Flower meets KOSMoS `_." + +#: ../../source/ref-faq.rst:35 +msgid "" +"`Flower meets Talan blog post `_ ." +msgstr "" +"`Flower meets Talan博文 `_ 。" + +#: ../../source/ref-faq.rst:36 +msgid "" +"`Flower meets Talan GitHub Repository " +"`_ ." +msgstr "" +"`Flower meets Talan GitHub Repository " +"`_ ." + +#: ../../source/ref-telemetry.md:1 +msgid "Telemetry" +msgstr "遥测功能" + +#: ../../source/ref-telemetry.md:3 +msgid "" +"The Flower open-source project collects **anonymous** usage metrics to " +"make well-informed decisions to improve Flower. Doing this enables the " +"Flower team to understand how Flower is used and what challenges users " +"might face." +msgstr "" +"Flower 开源项目收集**匿名**使用指标,以便在充分知情的情况下做出改进 Flower 的决定。这样做能让 Flower 团队了解 " +"Flower 的使用情况以及用户可能面临的挑战。" + +#: ../../source/ref-telemetry.md:5 +msgid "" +"**Flower is a friendly framework for collaborative AI and data science.**" +" Staying true to this statement, Flower makes it easy to disable " +"telemetry for users that do not want to share anonymous usage metrics." +msgstr "**Flower 是一个用于协作式人工智能和数据科学的友好框架。** Flower 遵循这一声明,让不想分享匿名使用指标的用户可以轻松禁用遥测技术。" + +#: ../../source/ref-telemetry.md:7 +msgid "Principles" +msgstr "原则" + +#: ../../source/ref-telemetry.md:9 +msgid "We follow strong principles guarding anonymous usage metrics collection:" +msgstr "我们遵循严格的匿名使用指标收集原则:" + +#: ../../source/ref-telemetry.md:11 +msgid "" +"**Optional:** You will always be able to disable telemetry; read on to " +"learn “[How to opt-out](#how-to-opt-out)”." +msgstr "**可选:** 您始终可以禁用遥测功能;请继续阅读\"[如何退出](#how-to-opt-out)\"。" + +#: ../../source/ref-telemetry.md:12 +msgid "" +"**Anonymous:** The reported usage metrics are anonymous and do not " +"contain any personally identifiable information (PII). See “[Collected " +"metrics](#collected-metrics)” to understand what metrics are being " +"reported." +msgstr "" +"**匿名:** 报告的使用指标是匿名的,不包含任何个人身份信息 (PII)。请参阅\"[收集的指标](#collected-metrics) " +"\"了解报告的指标。" + +#: ../../source/ref-telemetry.md:13 +msgid "" +"**Transparent:** You can easily inspect what anonymous metrics are being " +"reported; see the section “[How to inspect what is being reported](#how-" +"to-inspect-what-is-being-reported)”" +msgstr "" +"**透明:** 您可以轻松查看正在报告的匿名指标;请参阅\"[如何查看正在报告的指标](#how-to-inspect-what-is-" +"being-reported)\"部分" + +#: ../../source/ref-telemetry.md:14 +msgid "" +"**Open for feedback:** You can always reach out to us if you have " +"feedback; see the section “[How to contact us](#how-to-contact-us)” for " +"details." +msgstr "**欢迎反馈:** 如果您有反馈意见,可以随时联系我们;详情请参见\"[如何联系我们](#how-to-contact-us) \"部分。" + +#: ../../source/ref-telemetry.md:16 +msgid "How to opt-out" +msgstr "如何退出" + +#: ../../source/ref-telemetry.md:18 +msgid "" +"When Flower starts, it will check for an environment variable called " +"`FLWR_TELEMETRY_ENABLED`. Telemetry can easily be disabled by setting " +"`FLWR_TELEMETRY_ENABLED=0`. Assuming you are starting a Flower server or " +"client, simply do so by prepending your command as in:" +msgstr "" +"Flower 启动时,会检查环境变量 `FLWR_TELEMETRY_ENABLED` 是否存在。通过设置 " +"`FLWR_TELEMETRY_ENABLED=0` 可以轻松禁用遥测功能。假设你启动的是 Flower " +"服务器或客户端,只需在命令前添加以下内容即可:" + +#: ../../source/ref-telemetry.md:24 +msgid "" +"Alternatively, you can export `FLWR_TELEMETRY_ENABLED=0` in, for example," +" `.bashrc` (or whatever configuration file applies to your environment) " +"to disable Flower telemetry permanently." +msgstr "" +"或者,你也可以在 `.bashrc`(或任何适用于你的环境的配置文件)中导出 `FLWR_TELEMETRY_ENABLED=0` 来永久禁用 " +"Flower telemetry。" + +#: ../../source/ref-telemetry.md:26 +msgid "Collected metrics" +msgstr "收集的指标" + +#: ../../source/ref-telemetry.md:28 +msgid "Flower telemetry collects the following metrics:" +msgstr "Flower 遥测技术收集以下指标:" + +#: ../../source/ref-telemetry.md:30 +msgid "" +"**Flower version.** Understand which versions of Flower are currently " +"being used. This helps us to decide whether we should invest effort into " +"releasing a patch version for an older version of Flower or instead use " +"the bandwidth to build new features." +msgstr "**了解目前使用的 Flower 版本。这有助于我们决定是否应该投入精力为旧版本的 Flower 发布补丁版本,还是利用带宽来构建新功能。" + +#: ../../source/ref-telemetry.md:32 +msgid "" +"**Operating system.** Enables us to answer questions such as: *Should we " +"create more guides for Linux, macOS, or Windows?*" +msgstr "**操作系统**使我们能够回答以下问题: *我们应该为 Linux、macOS 还是 Windows 创建更多指南?*" + +#: ../../source/ref-telemetry.md:34 +msgid "" +"**Python version.** Knowing the Python version helps us, for example, to " +"decide whether we should invest effort into supporting old versions of " +"Python or stop supporting them and start taking advantage of new Python " +"features." +msgstr "**例如,了解 Python 版本有助于我们决定是否应该投入精力支持旧版本的 Python,还是停止支持这些版本并开始利用新的 Python 功能。" + +#: ../../source/ref-telemetry.md:36 +msgid "" +"**Hardware properties.** Understanding the hardware environment that " +"Flower is being used in helps to decide whether we should, for example, " +"put more effort into supporting low-resource environments." +msgstr "**硬件属性** 了解 Flower 的硬件使用环境,有助于决定我们是否应在支持低资源环境等方面投入更多精力。" + +#: ../../source/ref-telemetry.md:38 +msgid "" +"**Execution mode.** Knowing what execution mode Flower starts in enables " +"us to understand how heavily certain features are being used and better " +"prioritize based on that." +msgstr "** 执行模式** 了解 Flower 的启动执行模式,能让我们了解某些功能的使用率,并据此更好地确定优先级。" + +#: ../../source/ref-telemetry.md:40 +msgid "" +"**Cluster.** Flower telemetry assigns a random in-memory cluster ID each " +"time a Flower workload starts. This allows us to understand which device " +"types not only start Flower workloads but also successfully complete " +"them." +msgstr "" +"**每次 Flower 工作负载启动时,Flower 遥测都会随机分配一个内存集群 ID。这样,我们就能了解哪些设备类型不仅启动了 Flower " +"工作负载,而且还成功完成了它们。" + +#: ../../source/ref-telemetry.md:42 +msgid "" +"**Source.** Flower telemetry tries to store a random source ID in " +"`~/.flwr/source` the first time a telemetry event is generated. The " +"source ID is important to identify whether an issue is recurring or " +"whether an issue is triggered by multiple clusters running concurrently " +"(which often happens in simulation). For example, if a device runs " +"multiple workloads at the same time, and this results in an issue, then, " +"in order to reproduce the issue, multiple workloads must be started at " +"the same time." +msgstr "" +"**Source.** Flower 遥测会在第一次生成遥测事件时,尝试在 `~/.flwr/source` 中存储一个随机源 ID。源 ID " +"对于识别问题是否反复出现或问题是否由多个集群同时运行触发(这在模拟中经常发生)非常重要。例如,如果设备同时运行多个工作负载并导致问题,那么为了重现问题,必须同时启动多个工作负载。" + +#: ../../source/ref-telemetry.md:44 +msgid "" +"You may delete the source ID at any time. If you wish for all events " +"logged under a specific source ID to be deleted, you can send a deletion " +"request mentioning the source ID to `telemetry@flower.ai`. All events " +"related to that source ID will then be permanently deleted." +msgstr "" +"您可以随时删除源 ID。如果您希望删除特定源 ID 下记录的所有事件,可以向 `telemetry@flower.ai` 发送删除请求,并提及该源" +" ID。届时,与该源 ID 相关的所有事件都将被永久删除。" + +#: ../../source/ref-telemetry.md:46 +msgid "" +"We will not collect any personally identifiable information. If you think" +" any of the metrics collected could be misused in any way, please [get in" +" touch with us](#how-to-contact-us). We will update this page to reflect " +"any changes to the metrics collected and publish changes in the " +"changelog." +msgstr "" +"我们不会收集任何个人身份信息。如果您认为所收集的任何指标可能以任何方式被滥用,请[与我们联系](#how-to-contact-" +"us)。我们将更新本页面,以反映对所收集指标的任何更改,并在更新日志中公布更改内容。" + +#: ../../source/ref-telemetry.md:48 +msgid "" +"If you think other metrics would be helpful for us to better guide our " +"decisions, please let us know! We will carefully review them; if we are " +"confident that they do not compromise user privacy, we may add them." +msgstr "如果您认为其他指标有助于我们更好地指导决策,请告诉我们!我们将仔细审查这些指标;如果我们确信它们不会损害用户隐私,我们可能会添加这些指标。" + +#: ../../source/ref-telemetry.md:50 +msgid "How to inspect what is being reported" +msgstr "如何检查报告中的内容" + +#: ../../source/ref-telemetry.md:52 +msgid "" +"We wanted to make it very easy for you to inspect what anonymous usage " +"metrics are reported. You can view all the reported telemetry information" +" by setting the environment variable `FLWR_TELEMETRY_LOGGING=1`. Logging " +"is disabled by default. You may use logging independently from " +"`FLWR_TELEMETRY_ENABLED` so that you can inspect the telemetry feature " +"without sending any metrics." +msgstr "" +"我们希望能让您轻松查看所报告的匿名使用指标。通过设置环境变量 `FLWR_TELEMETRY_LOGGING=1` " +"可以查看所有报告的遥测信息。日志记录默认为禁用。您可以不使用 `FLWR_TELEMETRY_ENABLED` " +"而单独使用日志记录,这样就可以在不发送任何指标的情况下检查遥测功能。" + +#: ../../source/ref-telemetry.md:58 +msgid "" +"The inspect Flower telemetry without sending any anonymous usage metrics," +" use both environment variables:" +msgstr "在不发送任何匿名使用指标的情况下检查 Flower 遥测,可使用这两个环境变量:" + +#: ../../source/ref-telemetry.md:64 +msgid "How to contact us" +msgstr "如何联系我们" + +#: ../../source/ref-telemetry.md:66 +msgid "" +"We want to hear from you. If you have any feedback or ideas on how to " +"improve the way we handle anonymous usage metrics, reach out to us via " +"[Slack](https://flower.ai/join-slack/) (channel `#telemetry`) or email " +"(`telemetry@flower.ai`)." +msgstr "" +"我们希望听到您的意见。如果您对如何改进我们处理匿名使用指标的方式有任何反馈或想法,请通过 [Slack](https://flower.ai" +"/join-slack/) (频道 `#telemetry`)或电子邮件 (`telemetry@flower.ai`)与我们联系。" + +#: ../../source/tutorial-quickstart-android.rst:-1 +msgid "" +"Read this Federated Learning quickstart tutorial for creating an Android " +"app using Flower." +msgstr "阅读本联邦学习快速入门教程,了解如何使用 Flower 创建 Android 应用程序。" + +#: ../../source/tutorial-quickstart-android.rst:4 +msgid "Quickstart Android" +msgstr "快速入门 Android" + +#: ../../source/tutorial-quickstart-android.rst:9 +msgid "" +"Let's build a federated learning system using TFLite and Flower on " +"Android!" +msgstr "让我们在 Android 上使用 TFLite 和 Flower 构建一个联邦学习系统!" + +#: ../../source/tutorial-quickstart-android.rst:11 +msgid "" +"Please refer to the `full code example " +"`_ to learn " +"more." +msgstr "" +"请参阅`完整代码示例 " +"`_了解更多信息。" + +#: ../../source/tutorial-quickstart-fastai.rst:4 +msgid "Quickstart fastai" +msgstr "快速入门 fastai" + +#: ../../source/tutorial-quickstart-fastai.rst:6 +#, fuzzy +msgid "" +"In this federated learning tutorial we will learn how to train a " +"SqueezeNet model on MNIST using Flower and fastai. It is recommended to " +"create a virtual environment and run everything within a :doc:`virtualenv" +" `." +msgstr "" +"首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" + +#: ../../source/tutorial-quickstart-fastai.rst:10 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:11 +msgid "Then, clone the code example directly from GitHub:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:18 +msgid "" +"This will create a new directory called `quickstart-fastai` containing " +"the following files:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:31 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:32 +#, fuzzy +msgid "Next, activate your environment, then run:" +msgstr "并激活虚拟环境:" + +#: ../../source/tutorial-quickstart-fastai.rst:41 +msgid "" +"This example by default runs the Flower Simulation Engine, creating a " +"federation of 10 nodes using `FedAvg `_ " +"as the aggregation strategy. The dataset will be partitioned using Flower" +" Dataset's `IidPartitioner `_." +" Let's run the project:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:54 +#: ../../source/tutorial-quickstart-huggingface.rst:61 +#: ../../source/tutorial-quickstart-mlx.rst:60 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:55 +#: ../../source/tutorial-quickstart-pytorch.rst:62 +#: ../../source/tutorial-quickstart-tensorflow.rst:62 +msgid "With default arguments you will see an output like this one:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:98 +#: ../../source/tutorial-quickstart-huggingface.rst:112 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:105 +#: ../../source/tutorial-quickstart-pytorch.rst:103 +#: ../../source/tutorial-quickstart-tensorflow.rst:103 +msgid "" +"You can also override the parameters defined in the " +"``[tool.flwr.app.config]`` section in ``pyproject.toml`` like this:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:108 +#, fuzzy +msgid "" +"Check the `source code `_ of this tutorial in ``examples/quickstart-fasai`` " +"in the Flower GitHub repository." +msgstr "" +"此示例的`完整源代码 `_ 可在 :code:`examples/xgboost-quickstart` 中找到。" + +#: ../../source/tutorial-quickstart-huggingface.rst:-1 +#, fuzzy +msgid "" +"Check out this Federating Learning quickstart tutorial for using Flower " +"with 🤗 HuggingFace Transformers in order to fine-tune an LLM." +msgstr "查看此联邦学习 快速入门教程,了解如何使用 Flower 和 HuggingFace Transformers 来微调 LLM。" + +#: ../../source/tutorial-quickstart-huggingface.rst:4 +msgid "Quickstart 🤗 Transformers" +msgstr "🤗 Transformers快速入门" + +#: ../../source/tutorial-quickstart-huggingface.rst:6 +#, fuzzy +msgid "" +"In this federated learning tutorial we will learn how to train a large " +"language model (LLM) on the `IMDB " +"`_ dataset using Flower" +" and the 🤗 Hugging Face Transformers library. It is recommended to create" +" a virtual environment and run everything within a :doc:`virtualenv " +"`." +msgstr "" +"首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" + +#: ../../source/tutorial-quickstart-huggingface.rst:12 +msgid "" +"Let's use ``flwr new`` to create a complete Flower+🤗 Hugging Face " +"project. It will generate all the files needed to run, by default with " +"the Flower Simulation Engine, a federation of 10 nodes using |fedavg|_ " +"The dataset will be partitioned using |flowerdatasets|_'s " +"|iidpartitioner|_." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:17 +#: ../../source/tutorial-quickstart-mlx.rst:17 +#: ../../source/tutorial-quickstart-pytorch.rst:18 +#: ../../source/tutorial-quickstart-tensorflow.rst:18 +#, fuzzy +msgid "" +"Now that we have a rough idea of what this example is about, let's get " +"started. First, install Flower in your new environment:" +msgstr "现在,我们已经有了一个大致的概念,让我们开始吧。首先,我们需要安装 Flower。运行:" + +#: ../../source/tutorial-quickstart-huggingface.rst:25 +msgid "" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``HuggingFace``), give a name to your " +"project, and type in your developer name:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:33 +#: ../../source/tutorial-quickstart-mlx.rst:32 +#: ../../source/tutorial-quickstart-pytorch.rst:34 +#: ../../source/tutorial-quickstart-tensorflow.rst:34 +msgid "" +"After running it you'll notice a new directory with your project name has" +" been created. It should have the following structure:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:47 +#: ../../source/tutorial-quickstart-mlx.rst:46 +#: ../../source/tutorial-quickstart-pytorch.rst:48 +#: ../../source/tutorial-quickstart-tensorflow.rst:48 +msgid "" +"If you haven't yet installed the project and its dependencies, you can do" +" so by:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:54 +#: ../../source/tutorial-quickstart-pytorch.rst:55 +#: ../../source/tutorial-quickstart-tensorflow.rst:55 +msgid "To run the project, do:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:102 +msgid "You can also run the project with GPU as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:109 +msgid "" +"This will use the default arguments where each ``ClientApp`` will use 2 " +"CPUs and at most 4 ``ClientApp``\\s will run in a given GPU." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:120 +#: ../../source/tutorial-quickstart-mlx.rst:110 +#: ../../source/tutorial-quickstart-pytorch.rst:111 +msgid "" +"What follows is an explanation of each component in the project you just " +"created: dataset partition, the model, defining the ``ClientApp`` and " +"defining the ``ServerApp``." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:124 +#: ../../source/tutorial-quickstart-mlx.rst:114 +#: ../../source/tutorial-quickstart-pytorch.rst:115 +#: ../../source/tutorial-quickstart-tensorflow.rst:112 +#, fuzzy +msgid "The Data" +msgstr "加载数据" + +#: ../../source/tutorial-quickstart-huggingface.rst:126 +msgid "" +"This tutorial uses |flowerdatasets|_ to easily download and partition the" +" `IMDB `_ dataset. In " +"this example you'll make use of the |iidpartitioner|_ to generate " +"``num_partitions`` partitions. You can choose |otherpartitioners|_ " +"available in Flower Datasets. To tokenize the text, we will also load the" +" tokenizer from the pre-trained Transformer model that we'll use during " +"training - more on that in the next section. Each ``ClientApp`` will call" +" this function to create dataloaders with the data that correspond to " +"their data partition." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:171 +#: ../../source/tutorial-quickstart-mlx.rst:155 +#: ../../source/tutorial-quickstart-pytorch.rst:150 +#: ../../source/tutorial-quickstart-tensorflow.rst:139 +#, fuzzy +msgid "The Model" +msgstr "训练模型" + +#: ../../source/tutorial-quickstart-huggingface.rst:173 +#, fuzzy +msgid "" +"We will leverage 🤗 Hugging Face to federate the training of language " +"models over multiple clients using Flower. More specifically, we will " +"fine-tune a pre-trained Transformer model (|berttiny|_) for sequence " +"classification over the dataset of IMDB ratings. The end goal is to " +"detect if a movie rating is positive or negative. If you have access to " +"larger GPUs, feel free to use larger models!" +msgstr "" +"我们将利用Hugging Face技术,使用 Flower 在多个客户端上联邦训练语言模型。更具体地说,我们将对预先训练好的 " +"Transformer 模型(distilBERT)进行微调,以便在 IMDB 评分数据集上进行序列分类。最终目标是检测电影评分是正面还是负面。" + +#: ../../source/tutorial-quickstart-huggingface.rst:185 +msgid "" +"Note that here, ``model_name`` is a string that will be loaded from the " +"``Context`` in the ClientApp and ServerApp." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:188 +msgid "" +"In addition to loading the pretrained model weights and architecture, we " +"also include two utility functions to perform both training (i.e. " +"``train()``) and evaluation (i.e. ``test()``) using the above model. " +"These functions should look fairly familiar if you have some prior " +"experience with PyTorch. Note these functions do not have anything " +"specific to Flower. That being said, the training function will normally " +"be called, as we'll see later, from a Flower client passing its own data." +" In summary, your clients can use standard training/testing functions to " +"perform local training or evaluation:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:228 +#: ../../source/tutorial-quickstart-mlx.rst:199 +#: ../../source/tutorial-quickstart-pytorch.rst:224 +#: ../../source/tutorial-quickstart-tensorflow.rst:168 +#, fuzzy +msgid "The ClientApp" +msgstr "客户端" + +#: ../../source/tutorial-quickstart-huggingface.rst:230 +msgid "" +"The main changes we have to make to use 🤗 Hugging Face with Flower will " +"be found in the ``get_weights()`` and ``set_weights()`` functions. Under " +"the hood, the ``transformers`` library uses PyTorch, which means we can " +"reuse the ``get_weights()`` and ``set_weights()`` code that we defined in" +" the :doc:`Quickstart PyTorch ` tutorial. As" +" a reminder, in ``get_weights()``, PyTorch model parameters are extracted" +" and represented as a list of NumPy arrays. The ``set_weights()`` " +"function that's the opposite: given a list of NumPy arrays it applies " +"them to an existing PyTorch model. Doing this in fairly easy in PyTorch." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:241 +#: ../../source/tutorial-quickstart-pytorch.rst:234 +msgid "" +"The specific implementation of ``get_weights()`` and ``set_weights()`` " +"depends on the type of models you use. The ones shown below work for a " +"wide range of PyTorch models but you might need to adjust them if you " +"have more exotic model architectures." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:257 +#: ../../source/tutorial-quickstart-pytorch.rst:250 +msgid "" +"The rest of the functionality is directly inspired by the centralized " +"case. The ``fit()`` method in the client trains the model using the local" +" dataset. Similarly, the ``evaluate()`` method is used to evaluate the " +"model received on a held-out validation set that the client might have:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:283 +msgid "" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparemeters defined in your " +"``pyproject.toml`` to configure the run. In this tutorial we access the " +"``local-epochs`` setting to control the number of epochs a ``ClientApp`` " +"will perform when running the ``fit()`` method. You could define " +"additional hyperparameters in ``pyproject.toml`` and access them here." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:316 +#: ../../source/tutorial-quickstart-mlx.rst:361 +#: ../../source/tutorial-quickstart-pytorch.rst:307 +#: ../../source/tutorial-quickstart-tensorflow.rst:232 +#, fuzzy +msgid "The ServerApp" +msgstr "服务器" + +#: ../../source/tutorial-quickstart-huggingface.rst:318 +msgid "" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"|serverappcomponents|_ as opposed to a |client|_ In this example we use " +"the `FedAvg` strategy. To it we pass a randomly initialized model that " +"will server as the global model to federated. Note that the value of " +"``fraction_fit`` is read from the run config. You can find the default " +"value defined in the ``pyproject.toml``." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:356 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system for an LLM." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:361 +msgid "" +"Check the source code of the extended version of this tutorial in " +"|quickstart_hf_link|_ in the Flower GitHub repository. For a " +"comprehensive example of a federated fine-tuning of an LLM with Flower, " +"refer to the |flowertune|_ example in the Flower GitHub repository." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:-1 +msgid "" +"Read this Federated Learning quickstart tutorial for creating an iOS app " +"using Flower to train a neural network on MNIST." +msgstr "阅读本联邦学习快速入门教程,了解如何使用 Flower 创建 iOS 应用程序,并在 MNIST 上训练神经网络。" + +#: ../../source/tutorial-quickstart-ios.rst:4 +msgid "Quickstart iOS" +msgstr "快速入门 iOS" + +#: ../../source/tutorial-quickstart-ios.rst:9 +msgid "" +"In this tutorial we will learn how to train a Neural Network on MNIST " +"using Flower and CoreML on iOS devices." +msgstr "在本教程中,我们将学习如何在 iOS 设备上使用 Flower 和 CoreML 在 MNIST 上训练神经网络。" + +#: ../../source/tutorial-quickstart-ios.rst:12 +#, fuzzy +msgid "" +"First of all, for running the Flower Python server, it is recommended to " +"create a virtual environment and run everything within a :doc:`virtualenv" +" `. For the Flower client " +"implementation in iOS, it is recommended to use Xcode as our IDE." +msgstr "" +"首先,为了运行 Flower Python 服务器,建议创建一个虚拟环境,并在 `virtualenv " +"`_ 中运行一切。对于在 iOS 中实现 " +"Flower 客户端,建议使用 Xcode 作为我们的集成开发环境。" + +#: ../../source/tutorial-quickstart-ios.rst:17 +msgid "" +"Our example consists of one Python *server* and two iPhone *clients* that" +" all have the same model." +msgstr "我们的示例包括一个 Python *服务器*和两个 iPhone *客户端*,它们都具有相同的模型。" + +#: ../../source/tutorial-quickstart-ios.rst:20 +msgid "" +"*Clients* are responsible for generating individual weight updates for " +"the model based on their local datasets. These updates are then sent to " +"the *server* which will aggregate them to produce a better model. " +"Finally, the *server* sends this improved version of the model back to " +"each *client*. A complete cycle of weight updates is called a *round*." +msgstr "*客户端*负责根据其本地数据集为模型生成独立的模型参数。然后,这些参数更新会被发送到*服务器*,由*服务器*汇总后生成一个更好的模型。最后,*服务器*将改进后的模型发送回每个*客户端*。一个完整的参数更新周期称为一*轮*。" + +#: ../../source/tutorial-quickstart-ios.rst:26 +msgid "" +"Now that we have a rough idea of what is going on, let's get started to " +"setup our Flower server environment. We first need to install Flower. You" +" can do this by using pip:" +msgstr "现在我们已经有了一个大致的概念,让我们开始设置 Flower 服务器环境吧。首先,我们需要安装 Flower。你可以使用 pip 来安装:" + +#: ../../source/tutorial-quickstart-ios.rst:33 +msgid "Or Poetry:" +msgstr "或者Poetry:" + +#: ../../source/tutorial-quickstart-ios.rst:40 +#: ../../source/tutorial-quickstart-scikitlearn.rst:43 +#: ../../source/tutorial-quickstart-xgboost.rst:65 +msgid "Flower Client" +msgstr "Flower 客户端" + +#: ../../source/tutorial-quickstart-ios.rst:42 +msgid "" +"Now that we have all our dependencies installed, let's run a simple " +"distributed training using CoreML as our local training pipeline and " +"MNIST as our dataset. For simplicity reasons we will use the complete " +"Flower client with CoreML, that has been implemented and stored inside " +"the Swift SDK. The client implementation can be seen below:" +msgstr "" +"现在我们已经安装了所有依赖项,让我们使用 CoreML 作为本地训练框架和 MNIST " +"作为数据集,运行一个简单的分布式训练。为了简单起见,我们将使用 CoreML 的完整 Flower 客户端,该客户端已在 Swift SDK " +"中实现并存储。客户端实现如下:" + +#: ../../source/tutorial-quickstart-ios.rst:80 +#, fuzzy +msgid "" +"Let's create a new application project in Xcode and add ``flwr`` as a " +"dependency in your project. For our application, we will store the logic " +"of our app in ``FLiOSModel.swift`` and the UI elements in " +"``ContentView.swift``. We will focus more on ``FLiOSModel.swift`` in this" +" quickstart. Please refer to the `full code example " +"`_ to learn more " +"about the app." +msgstr "" +"让我们在 Xcode 中创建一个新的应用程序项目,并在项目中添加 :code:`flwr` 作为依赖关系。对于我们的应用程序,我们将在 " +":code:`FLiOSModel.swift` 中存储应用程序的逻辑,在 :code:`ContentView.swift` 中存储 UI " +"元素。在本快速入门中,我们将更多地关注 :code:`FLiOSModel.swift`。请参阅 `完整代码示例 " +"`_ 以了解更多有关应用程序的信息。" + +#: ../../source/tutorial-quickstart-ios.rst:86 +#, fuzzy +msgid "Import Flower and CoreML related packages in ``FLiOSModel.swift``:" +msgstr "在 :code:`FLiOSModel.swift` 中导入 Flower 和 CoreML 相关软件包:" + +#: ../../source/tutorial-quickstart-ios.rst:94 +#, fuzzy +msgid "" +"Then add the mlmodel to the project simply by drag-and-drop, the mlmodel " +"will be bundled inside the application during deployment to your iOS " +"device. We need to pass the url to access mlmodel and run CoreML machine " +"learning processes, it can be retrieved by calling the function " +"``Bundle.main.url``. For the MNIST dataset, we need to preprocess it into" +" ``MLBatchProvider`` object. The preprocessing is done inside " +"``DataLoader.swift``." +msgstr "" +"然后通过拖放将 mlmodel 添加到项目中,在部署到 iOS 设备时,mlmodel 将被捆绑到应用程序中。我们需要传递 url 以访问 " +"mlmodel 并运行 CoreML 机器学习进程,可通过调用函数 :code:`Bundle.main.url` 获取。对于 MNIST " +"数据集,我们需要将其预处理为 :code:`MLBatchProvider` 对象。预处理在 :code:`DataLoader.swift` " +"中完成。" + +#: ../../source/tutorial-quickstart-ios.rst:112 +#, fuzzy +msgid "" +"Since CoreML does not allow the model parameters to be seen before " +"training, and accessing the model parameters during or after the training" +" can only be done by specifying the layer name, we need to know this " +"information beforehand, through looking at the model specification, which" +" are written as proto files. The implementation can be seen in " +"``MLModelInspect``." +msgstr "" +"由于 CoreML 不允许在训练前查看模型参数,而在训练过程中或训练后访问模型参数只能通过指定层名来完成,因此我们需要事先通过查看模型规范(写成 " +"proto 文件)来了解这些信息。具体实现可参见 :code:`MLModelInspect`。" + +#: ../../source/tutorial-quickstart-ios.rst:118 +#, fuzzy +msgid "" +"After we have all of the necessary information, let's create our Flower " +"client." +msgstr "获得所有必要信息后,让我们创建 Flower 客户端。" + +#: ../../source/tutorial-quickstart-ios.rst:133 +#, fuzzy +msgid "" +"Then start the Flower gRPC client and start communicating to the server " +"by passing our Flower client to the function ``startFlwrGRPC``." +msgstr "然后启动 Flower gRPC 客户端,并通过将 Flower 客户端传递给函数 :code:`startFlwrGRPC` 来开始与服务器通信。" + +#: ../../source/tutorial-quickstart-ios.rst:141 +#, fuzzy +msgid "" +"That's it for the client. We only have to implement ``Client`` or call " +"the provided ``MLFlwrClient`` and call ``startFlwrGRPC()``. The attribute" +" ``hostname`` and ``port`` tells the client which server to connect to. " +"This can be done by entering the hostname and port in the application " +"before clicking the start button to start the federated learning process." +msgstr "" +"这就是客户端。我们只需实现 :code:`Client` 或调用提供的 :code:`MLFlwrClient` 并调用 " +":code:`startFlwrGRPC()`。属性 :code:`hostname` 和 :code:`port` " +"会告诉客户端要连接到哪个服务器。这可以通过在应用程序中输入主机名和端口来实现,然后再点击开始按钮启动联邦学习进程。" + +#: ../../source/tutorial-quickstart-ios.rst:148 +#: ../../source/tutorial-quickstart-scikitlearn.rst:179 +#: ../../source/tutorial-quickstart-xgboost.rst:358 +msgid "Flower Server" +msgstr "Flower 服务器" + +#: ../../source/tutorial-quickstart-ios.rst:150 +#, fuzzy +msgid "" +"For simple workloads we can start a Flower server and leave all the " +"configuration possibilities at their default values. In a file named " +"``server.py``, import Flower and start the server:" +msgstr "" +"对于简单的工作负载,我们可以启动 Flower 服务器,并将所有配置选项保留为默认值。在名为 :code:`server.py` 的文件中,导入 " +"Flower 并启动服务器:" + +#: ../../source/tutorial-quickstart-ios.rst:161 +#: ../../source/tutorial-quickstart-scikitlearn.rst:254 +msgid "Train the model, federated!" +msgstr "联邦训练模型!" + +#: ../../source/tutorial-quickstart-ios.rst:163 +#: ../../source/tutorial-quickstart-xgboost.rst:590 +msgid "" +"With both client and server ready, we can now run everything and see " +"federated learning in action. FL systems usually have a server and " +"multiple clients. We therefore have to start the server first:" +msgstr "客户端和服务器都已准备就绪,我们现在可以运行一切,看看联邦学习的实际效果。FL 系统通常有一个服务器和多个客户端。因此,我们必须先启动服务器:" + +#: ../../source/tutorial-quickstart-ios.rst:171 +msgid "" +"Once the server is running we can start the clients in different " +"terminals. Build and run the client through your Xcode, one through Xcode" +" Simulator and the other by deploying it to your iPhone. To see more " +"about how to deploy your app to iPhone or Simulator visit `here " +"`_." +msgstr "" +"服务器运行后,我们就可以在不同的终端启动客户端。通过 Xcode 构建并运行客户端,一个通过 Xcode 模拟器,另一个通过部署到 " +"iPhone。要了解更多有关如何将应用程序部署到 iPhone 或模拟器的信息,请访问 `此处 " +"`_。" + +#: ../../source/tutorial-quickstart-ios.rst:177 +#, fuzzy +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system in your ios device. The full `source code " +"`_ for this " +"example can be found in ``examples/ios``." +msgstr "" +"恭喜您! 您已经成功地在 ios 设备中构建并运行了第一个联邦学习系统。本示例的`完整源代码 " +"`_ 可在 " +":code:`examples/ios` 中找到。" + +#: ../../source/tutorial-quickstart-jax.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with Jax to train a linear regression model on a scikit-learn dataset." +msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 Jax 在 scikit-learn 数据集上训练线性回归模型。" + +#: ../../source/tutorial-quickstart-jax.rst:4 +msgid "Quickstart JAX" +msgstr "快速入门 JAX" + +#: ../../source/tutorial-quickstart-jax.rst:9 +msgid "" +"This tutorial will show you how to use Flower to build a federated " +"version of an existing JAX workload. We are using JAX to train a linear " +"regression model on a scikit-learn dataset. We will structure the example" +" similar to our `PyTorch - From Centralized To Federated " +"`_ walkthrough. First, we build a centralized " +"training approach based on the `Linear Regression with JAX " +"`_" +" tutorial`. Then, we build upon the centralized training code to run the " +"training in a federated fashion." +msgstr "" +"本教程将向您展示如何使用 Flower 构建现有 JAX 的联邦学习版本。我们将使用 JAX 在 scikit-learn " +"数据集上训练线性回归模型。我们将采用与 `PyTorch - 从集中式到联邦式 " +"`_ 教程中类似的示例结构。首先,我们根据 `JAX 的线性回归 " +"`_" +" 教程构建集中式训练方法。然后,我们在集中式训练代码的基础上以联邦方式运行训练。" + +#: ../../source/tutorial-quickstart-jax.rst:20 +#, fuzzy +msgid "" +"Before we start building our JAX example, we need install the packages " +"``jax``, ``jaxlib``, ``scikit-learn``, and ``flwr``:" +msgstr "" +"在开始构建 JAX 示例之前,我们需要安装软件包 :code:`jax`、:code:`jaxlib`、:code:`scikit-learn` " +"和 :code:`flwr`:" + +#: ../../source/tutorial-quickstart-jax.rst:28 +msgid "Linear Regression with JAX" +msgstr "使用 JAX 进行线性回归" + +#: ../../source/tutorial-quickstart-jax.rst:30 +#, fuzzy +msgid "" +"We begin with a brief description of the centralized training code based " +"on a ``Linear Regression`` model. If you want a more in-depth explanation" +" of what's going on then have a look at the official `JAX documentation " +"`_." +msgstr "" +"首先,我们将简要介绍基于 :code:`Linear Regression` 模型的集中式训练代码。如果您想获得更深入的解释,请参阅官方的 " +"`JAX 文档 `_。" + +#: ../../source/tutorial-quickstart-jax.rst:34 +#, fuzzy +msgid "" +"Let's create a new file called ``jax_training.py`` with all the " +"components required for a traditional (centralized) linear regression " +"training. First, the JAX packages ``jax`` and ``jaxlib`` need to be " +"imported. In addition, we need to import ``sklearn`` since we use " +"``make_regression`` for the dataset and ``train_test_split`` to split the" +" dataset into a training and test set. You can see that we do not yet " +"import the ``flwr`` package for federated learning. This will be done " +"later." +msgstr "" +"让我们创建一个名为 :code:`jax_training.py` 的新文件,其中包含传统(集中式)线性回归训练所需的所有组件。首先,需要导入 " +"JAX 包 :code:`jax` 和 :code:`jaxlib`。此外,我们还需要导入 :code:`sklearn`,因为我们使用 " +":code:`make_regression` 创建数据集,并使用 :code:`train_test_split` " +"将数据集拆分成训练集和测试集。您可以看到,我们还没有导入用于联邦学习的 :code:`flwr` 软件包,这将在稍后完成。" + +#: ../../source/tutorial-quickstart-jax.rst:51 +#, fuzzy +msgid "The ``load_data()`` function loads the mentioned training and test sets." +msgstr ":code:`load_data()` 函数会加载上述训练集和测试集。" + +#: ../../source/tutorial-quickstart-jax.rst:63 +#, fuzzy +msgid "" +"The model architecture (a very simple ``Linear Regression`` model) is " +"defined in ``load_model()``." +msgstr "模型结构(一个非常简单的 :code:`Linear Regression` 线性回归模型)在 :code:`load_model()` 中定义。" + +#: ../../source/tutorial-quickstart-jax.rst:73 +#, fuzzy +msgid "" +"We now need to define the training (function ``train()``), which loops " +"over the training set and measures the loss (function ``loss_fn()``) for " +"each batch of training examples. The loss function is separate since JAX " +"takes derivatives with a ``grad()`` function (defined in the ``main()`` " +"function and called in ``train()``)." +msgstr "" +"现在,我们需要定义训练函数( :code:`train()`)。它循环遍历训练集,并计算每批训练数据的损失值(函数 " +":code:`loss_fn()`)。由于 JAX 使用 :code:`grad()` 函数提取导数(在 :code:`main()` " +"函数中定义,并在 :code:`train()` 中调用),因此损失函数是独立的。" + +#: ../../source/tutorial-quickstart-jax.rst:95 +#, fuzzy +msgid "" +"The evaluation of the model is defined in the function ``evaluation()``. " +"The function takes all test examples and measures the loss of the linear " +"regression model." +msgstr "模型的评估在函数 :code:`evaluation()` 中定义。该函数获取所有测试数据,并计算线性回归模型的损失值。" + +#: ../../source/tutorial-quickstart-jax.rst:107 +#, fuzzy +msgid "" +"Having defined the data loading, model architecture, training, and " +"evaluation we can put everything together and train our model using JAX. " +"As already mentioned, the ``jax.grad()`` function is defined in " +"``main()`` and passed to ``train()``." +msgstr "" +"在定义了数据加载、模型架构、训练和评估之后,我们就可以把这些放在一起,使用 JAX " +"训练我们的模型了。如前所述,:code:`jax.grad()` 函数在 :code:`main()` 中定义,并传递给 " +":code:`train()`。" + +#: ../../source/tutorial-quickstart-jax.rst:126 +msgid "You can now run your (centralized) JAX linear regression workload:" +msgstr "现在您可以运行(集中式)JAX 线性回归工作了:" + +#: ../../source/tutorial-quickstart-jax.rst:132 +msgid "" +"So far this should all look fairly familiar if you've used JAX before. " +"Let's take the next step and use what we've built to create a simple " +"federated learning system consisting of one server and two clients." +msgstr "到目前为止,如果你以前使用过 JAX,就会对这一切感到很熟悉。下一步,让我们利用已构建的代码创建一个简单的联邦学习系统(一个服务器和两个客户端)。" + +#: ../../source/tutorial-quickstart-jax.rst:137 +msgid "JAX meets Flower" +msgstr "JAX 结合 Flower" + +#: ../../source/tutorial-quickstart-jax.rst:139 +#, fuzzy +msgid "" +"The concept of federating an existing workload is always the same and " +"easy to understand. We have to start a *server* and then use the code in " +"``jax_training.py`` for the *clients* that are connected to the *server*." +" The *server* sends model parameters to the clients. The *clients* run " +"the training and update the parameters. The updated parameters are sent " +"back to the *server*, which averages all received parameter updates. This" +" describes one round of the federated learning process, and we repeat " +"this for multiple rounds." +msgstr "" +"把现有工作联邦化的概念始终是相同的,也很容易理解。我们要启动一个*服务器*,然后对连接到*服务器*的*客户端*运行 " +":code:`jax_training.py`中的代码。*服务器*向客户端发送模型参数,*客户端*运行训练并更新参数。更新后的参数被发回*服务器*,然后服务器对所有收到的参数进行平均聚合。以上的描述构成了一轮联邦学习,我们将重复进行多轮学习。" + +#: ../../source/tutorial-quickstart-jax.rst:167 +#, fuzzy +msgid "" +"Finally, we will define our *client* logic in ``client.py`` and build " +"upon the previously defined JAX training in ``jax_training.py``. Our " +"*client* needs to import ``flwr``, but also ``jax`` and ``jaxlib`` to " +"update the parameters on our JAX model:" +msgstr "" +"最后,我们将在 :code:`client.py` 中定义我们的 *client* 逻辑,并以之前在 " +":code:`jax_training.py` 中定义的 JAX 训练为基础。我们的 *client* 需要导入 " +":code:`flwr`,还需要导入 :code:`jax` 和 :code:`jaxlib` 以更新 JAX 模型的参数:" + +#: ../../source/tutorial-quickstart-jax.rst:182 +#, fuzzy +msgid "" +"Implementing a Flower *client* basically means implementing a subclass of" +" either ``flwr.client.Client`` or ``flwr.client.NumPyClient``. Our " +"implementation will be based on ``flwr.client.NumPyClient`` and we'll " +"call it ``FlowerClient``. ``NumPyClient`` is slightly easier to implement" +" than ``Client`` if you use a framework with good NumPy interoperability " +"(like JAX) because it avoids some of the boilerplate that would otherwise" +" be necessary. ``FlowerClient`` needs to implement four methods, two " +"methods for getting/setting model parameters, one method for training the" +" model, and one method for testing the model:" +msgstr "" +"实现一个 Flower *client*基本上意味着去实现一个 :code:`flwr.client.Client` 或 " +":code:`flwr.client.NumPyClient` 的子类。我们的代码实现将基于 " +":code:`flwr.client.NumPyClient`,并将其命名为 :code:`FlowerClient`。如果使用具有良好 " +"NumPy 互操作性的框架(如 JAX),:code:`NumPyClient` 比 " +":code:`Client`更容易实现,因为它避免了一些不必要的操作。:code:`FlowerClient` " +"需要实现四个方法,两个用于获取/设置模型参数,一个用于训练模型,一个用于测试模型:" + +#: ../../source/tutorial-quickstart-jax.rst:194 +#, fuzzy +msgid "``set_parameters (optional)``" +msgstr ":code:`set_parameters (可选)`" + +#: ../../source/tutorial-quickstart-jax.rst:193 +#, fuzzy +msgid "transform parameters to NumPy ``ndarray``'s" +msgstr "将参数转换为 NumPy :code:`ndarray`格式" + +#: ../../source/tutorial-quickstart-jax.rst:203 +msgid "get the updated local model parameters and return them to the server" +msgstr "获取更新后的本地模型参数并返回服务器" + +#: ../../source/tutorial-quickstart-jax.rst:208 +msgid "return the local loss to the server" +msgstr "向服务器返回本地损失值" + +#: ../../source/tutorial-quickstart-jax.rst:210 +#, fuzzy +msgid "" +"The challenging part is to transform the JAX model parameters from " +"``DeviceArray`` to ``NumPy ndarray`` to make them compatible with " +"`NumPyClient`." +msgstr "" +"具有挑战性的部分是将 JAX 模型参数从 :code:`DeviceArray` 转换为 :code:`NumPy ndarray`,使其与 " +"`NumPyClient` 兼容。" + +#: ../../source/tutorial-quickstart-jax.rst:213 +#, fuzzy +msgid "" +"The two ``NumPyClient`` methods ``fit`` and ``evaluate`` make use of the " +"functions ``train()`` and ``evaluate()`` previously defined in " +"``jax_training.py``. So what we really do here is we tell Flower through " +"our ``NumPyClient`` subclass which of our already defined functions to " +"call for training and evaluation. We included type annotations to give " +"you a better understanding of the data types that get passed around." +msgstr "" +"这两个 :code:`NumPyClient` 方法 :code:`fit` 和 :code:`evaluate` 使用了之前在 " +":code:`jax_training.py` 中定义的函数 :code:`train()` 和 " +":code:`evaluate()`。因此,我们在这里要做的就是通过 :code:`NumPyClient` 子类告知 Flower " +"在训练和评估时要调用哪些已定义的函数。我们加入了类型注解,以便让您更好地理解传递的数据类型。" + +#: ../../source/tutorial-quickstart-jax.rst:286 +msgid "Having defined the federation process, we can run it." +msgstr "定义了联邦进程后,我们就可以运行它了。" + +#: ../../source/tutorial-quickstart-jax.rst:315 +msgid "" +"in each window (make sure that the server is still running before you do " +"so) and see your JAX project run federated learning across two clients. " +"Congratulations!" +msgstr "确保服务器仍在运行,然后在每个客户端窗口就能看到你的 JAX 项目在两个客户端上运行联邦学习了。祝贺!" + +#: ../../source/tutorial-quickstart-jax.rst:321 +msgid "" +"The source code of this example was improved over time and can be found " +"here: `Quickstart JAX `_. Our example is somewhat over-simplified because both " +"clients load the same dataset." +msgstr "" +"此示例的源代码经过长期改进,可在此处找到: `Quickstart JAX " +"`_。我们的示例有些过于简单,因为两个客户端都加载了相同的数据集。" + +#: ../../source/tutorial-quickstart-jax.rst:325 +msgid "" +"You're now prepared to explore this topic further. How about using a more" +" sophisticated model or using a different dataset? How about adding more " +"clients?" +msgstr "现在,您已准备好进行更深一步探索了。例如使用更复杂的模型或使用不同的数据集会如何?增加更多客户端会如何?" + +#: ../../source/tutorial-quickstart-mlx.rst:4 +#, fuzzy +msgid "Quickstart MLX" +msgstr "快速入门 JAX" + +#: ../../source/tutorial-quickstart-mlx.rst:6 +#, fuzzy +msgid "" +"In this federated learning tutorial we will learn how to train simple MLP" +" on MNIST using Flower and MLX. It is recommended to create a virtual " +"environment and run everything within a :doc:`virtualenv `." +msgstr "" +"首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" + +#: ../../source/tutorial-quickstart-mlx.rst:10 +msgid "" +"Let's use `flwr new` to create a complete Flower+MLX project. It will " +"generate all the files needed to run, by default with the Simulation " +"Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:25 +msgid "" +"Then, run the command below. You will be prompted to select of the " +"available templates (choose ``MLX``), give a name to your project, and " +"type in your developer name:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:53 +msgid "To run the project do:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:102 +msgid "" +"You can also override the parameters defined in " +"``[tool.flwr.app.config]`` section in the ``pyproject.toml`` like this:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:116 +msgid "" +"We will use `Flower Datasets `_ to " +"easily download and partition the `MNIST` dataset. In this example you'll" +" make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:157 +msgid "" +"We define the model as in the `centralized MLX example " +"`_, it's a " +"simple MLP:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:180 +msgid "" +"We also define some utility functions to test our model and to iterate " +"over batches." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:201 +msgid "" +"The main changes we have to make to use `MLX` with `Flower` will be found" +" in the ``get_params()`` and ``set_params()`` functions. Indeed, MLX " +"doesn't provide an easy way to convert the model parameters into a list " +"of ``np.array`` objects (the format we need for the serialization of the " +"messages to work)." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:206 +msgid "The way MLX stores its parameters is as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:219 +msgid "" +"Therefore, to get our list of ``np.array`` objects, we need to extract " +"each array and convert them into a NumPy array:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:228 +msgid "" +"For the ``set_params()`` function, we perform the reverse operation. We " +"receive a list of NumPy arrays and want to convert them into MLX " +"parameters. Therefore, we iterate through pairs of parameters and assign " +"them to the `weight` and `bias` keys of each layer dict:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:243 +msgid "" +"The rest of the functionality is directly inspired by the centralized " +"case. The ``fit()`` method in the client trains the model using the local" +" dataset:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:259 +msgid "" +"Here, after updating the parameters, we perform the training as in the " +"centralized case, and return the new parameters." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:262 +msgid "And for the ``evaluate()`` method of the client:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:272 +msgid "" +"We also begin by updating the parameters with the ones sent by the " +"server, and then we compute the loss and accuracy using the functions " +"defined above. In the constructor of the ``FlowerClient`` we instantiate " +"the `MLP` model as well as other components such as the optimizer." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:277 +#, fuzzy +msgid "Putting everything together we have:" +msgstr "把所有东西放在一起" + +#: ../../source/tutorial-quickstart-mlx.rst:331 +msgid "" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that " +"``context`` enables you to get access to hyperparemeters defined in " +"``pyproject.toml`` to configure the run. In this tutorial we access, " +"among other hyperparameters, the ``local-epochs`` setting to control the " +"number of epochs a ``ClientApp`` will perform when running the ``fit()`` " +"method." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:363 +msgid "" +"To construct a ``ServerApp``, we define a ``server_fn()`` callback with " +"an identical signature to that of ``client_fn()``, but the return type is" +" `ServerAppComponents `_ as " +"opposed to `Client `_. In this example we use the " +"``FedAvg`` strategy." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:386 +#: ../../source/tutorial-quickstart-pytorch.rst:344 +#: ../../source/tutorial-quickstart-tensorflow.rst:266 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:390 +#, fuzzy +msgid "" +"Check the `source code `_ of the extended version of this tutorial in ``examples" +"/quickstart-mlx`` in the Flower GitHub repository." +msgstr "" +"此示例的`完整源代码 `_ 可在 :code:`examples/xgboost-quickstart` 中找到。" + +#: ../../source/tutorial-quickstart-pandas.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with Pandas to perform Federated Analytics." +msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 Pandas 执行联邦分析。" + +#: ../../source/tutorial-quickstart-pandas.rst:4 +msgid "Quickstart Pandas" +msgstr "快速入门Pandas" + +#: ../../source/tutorial-quickstart-pandas.rst:9 +msgid "Let's build a federated analytics system using Pandas and Flower!" +msgstr "让我们使用 Pandas 和 Flower 建立一个联邦分析系统!" + +#: ../../source/tutorial-quickstart-pandas.rst:11 +msgid "" +"Please refer to the `full code example " +"`_ " +"to learn more." +msgstr "" +"请参阅 `完整代码示例 `_\" 了解更多信息。" + +#: ../../source/tutorial-quickstart-pytorch.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with PyTorch to train a CNN model on MNIST." +msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 PyTorch 在 MNIST 上训练 CNN 模型。" + +#: ../../source/tutorial-quickstart-pytorch.rst:6 +#, fuzzy +msgid "" +"In this federated learning tutorial we will learn how to train a " +"Convolutional Neural Network on CIFAR-10 using Flower and PyTorch. It is " +"recommended to create a virtual environment and run everything within a " +":doc:`virtualenv `." +msgstr "" +"首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" + +#: ../../source/tutorial-quickstart-pytorch.rst:11 +msgid "" +"Let's use `flwr new` to create a complete Flower+PyTorch project. It will" +" generate all the files needed to run, by default with the Flower " +"Simulation Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:26 +msgid "" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``PyTorch``), give a name to your project, " +"and type in your developer name:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:117 +msgid "" +"This tutorial uses `Flower Datasets `_ " +"to easily download and partition the `CIFAR-10` dataset. In this example " +"you'll make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets. Each " +"``ClientApp`` will call this function to create dataloaders with the data" +" that correspond to their data partition." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:152 +msgid "" +"We defined a simple Convolutional Neural Network (CNN), but feel free to " +"replace it with a more sophisticated model if you'd like:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:177 +msgid "" +"In addition to defining the model architecture, we also include two " +"utility functions to perform both training (i.e. ``train()``) and " +"evaluation (i.e. ``test()``) using the above model. These functions " +"should look fairly familiar if you have some prior experience with " +"PyTorch. Note these functions do not have anything specific to Flower. " +"That being said, the training function will normally be called, as we'll " +"see later, from a Flower client passing its own data. In summary, your " +"clients can use standard training/testing functions to perform local " +"training or evaluation:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:226 +msgid "" +"The main changes we have to make to use `PyTorch` with `Flower` will be " +"found in the ``get_weights()`` and ``set_weights()`` functions. In " +"``get_weights()`` PyTorch model parameters are extracted and represented " +"as a list of NumPy arrays. The ``set_weights()`` function that's the " +"oposite: given a list of NumPy arrays it applies them to an existing " +"PyTorch model. Doing this in fairly easy in PyTorch." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:282 +msgid "" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparemeters defined in your " +"``pyproject.toml`` to configure the run. In this tutorial we access the " +"`local-epochs` setting to control the number of epochs a ``ClientApp`` " +"will perform when running the ``fit()`` method. You could define " +"additioinal hyperparameters in ``pyproject.toml`` and access them here." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:309 +msgid "" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"`ServerAppComponents `_ as " +"opposed to a `Client `_. In this example we use the " +"`FedAvg`. To it we pass a randomly initialized model that will server as " +"the global model to federated. Note that the value of ``fraction_fit`` is" +" read from the run config. You can find the default value defined in the " +"``pyproject.toml``." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:348 +#, fuzzy +msgid "" +"Check the `source code `_ of the extended version of this tutorial in " +"``examples/quickstart-pytorch`` in the Flower GitHub repository." +msgstr "" +"此示例的`完整源代码 `_ 可在 :code:`examples/xgboost-quickstart` 中找到。" + +#: ../../source/tutorial-quickstart-pytorch.rst:354 +#: ../../source/tutorial-quickstart-tensorflow.rst:278 +#, fuzzy +msgid "Video tutorial" +msgstr "教程" + +#: ../../source/tutorial-quickstart-pytorch.rst:358 +msgid "" +"The video shown below shows how to setup a PyTorch + Flower project using" +" our previously recommended APIs. A new video tutorial will be released " +"that shows the new APIs (as the content above does)" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:4 +msgid "Quickstart PyTorch Lightning" +msgstr "快速入门 PyTorch Lightning" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:6 +#, fuzzy +msgid "" +"In this federated learning tutorial we will learn how to train an " +"AutoEncoder model on MNIST using Flower and PyTorch Lightning. It is " +"recommended to create a virtual environment and run everything within a " +":doc:`virtualenv `." +msgstr "" +"首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:19 +msgid "" +"This will create a new directory called `quickstart-pytorch-lightning` " +"containing the following files:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:42 +msgid "" +"By default, Flower Simulation Engine will be started and it will create a" +" federation of 4 nodes using `FedAvg `_ " +"as the aggregation strategy. The dataset will be partitioned using Flower" +" Dataset's `IidPartitioner `_." +" To run the project, do:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:93 +msgid "" +"Each simulated `ClientApp` (two per round) will also log a summary of " +"their local training process. Expect this output to be similar to:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:115 +#, fuzzy +msgid "" +"Check the `source code `_ of this tutorial in ``examples" +"/quickstart-pytorch-lightning`` in the Flower GitHub repository." +msgstr "" +"此示例的`完整源代码 `_ 可在 :code:`examples/xgboost-quickstart` 中找到。" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with scikit-learn to train a linear regression model." +msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 scikit-learn 训练线性回归模型。" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:4 +msgid "Quickstart scikit-learn" +msgstr "scikit-learn快速入门" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:9 +#, fuzzy +msgid "" +"In this tutorial, we will learn how to train a ``Logistic Regression`` " +"model on MNIST using Flower and scikit-learn." +msgstr "" +"在本教程中,我们将学习如何使用 Flower 和 scikit-learn 在 MNIST 上训练一个 :code:`Logistic " +"Regression` 模型。" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:12 +#, fuzzy +msgid "" +"It is recommended to create a virtual environment and run everything " +"within this :doc:`virtualenv `." +msgstr "" +"建议创建一个虚拟环境,并在此 `virtualenv `_ 中运行所有内容。" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:15 +msgid "" +"Our example consists of one *server* and two *clients* all having the " +"same model." +msgstr "我们的例子包括一个*服务器*和两个*客户端*,它们都有相同的模型。" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:17 +msgid "" +"*Clients* are responsible for generating individual model parameter " +"updates for the model based on their local datasets. These updates are " +"then sent to the *server* which will aggregate them to produce an updated" +" global model. Finally, the *server* sends this improved version of the " +"model back to each *client*. A complete cycle of parameters updates is " +"called a *round*." +msgstr "*客户端*负责根据其本地数据集为模型生成单独的模型参数更新。然后,这些参数更新将被发送到*服务器*,由*服务器*汇总后生成一个更新的全局模型。最后,*服务器*将这一改进版模型发回给每个*客户端*。一个完整的参数更新周期称为一*轮*。" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:23 +msgid "" +"Now that we have a rough idea of what is going on, let's get started. We " +"first need to install Flower. You can do this by running:" +msgstr "现在,我们已经有了一个大致的概念,让我们开始吧。首先,我们需要安装 Flower。运行:" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:30 +#, fuzzy +msgid "Since we want to use scikit-learn, let's go ahead and install it:" +msgstr "既然我们要使用 scikt-learn,那就继续安装吧:" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:36 +msgid "Or simply install all dependencies using Poetry:" +msgstr "或者直接使用 Poetry 安装所有依赖项:" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:45 +#, fuzzy +msgid "" +"Now that we have all our dependencies installed, let's run a simple " +"distributed training with two clients and one server. However, before " +"setting up the client and server, we will define all functionalities that" +" we need for our federated learning setup within ``utils.py``. The " +"``utils.py`` contains different functions defining all the machine " +"learning basics:" +msgstr "" +"现在我们已经安装了所有的依赖项,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。不过,在设置客户端和服务器之前,我们将在 " +":code:`utils.py` 中定义联邦学习设置所需的所有功能。:code:`utils.py`包含定义所有机器学习基础知识的不同函数:" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:51 +#, fuzzy +msgid "``get_model_parameters()``" +msgstr ":code:`get_model_parameters()`" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:52 +#, fuzzy +msgid "Returns the parameters of a ``sklearn`` LogisticRegression model" +msgstr "返回 :code:`sklearn` LogisticRegression 模型的参数" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:53 +#, fuzzy +msgid "``set_model_params()``" +msgstr ":code:`set_model_params()`" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:54 +#, fuzzy +msgid "Sets the parameters of a ``sklearn`` LogisticRegression model" +msgstr "设置:code:`sklean`的LogisticRegression模型的参数" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:56 +#, fuzzy +msgid "``set_initial_params()``" +msgstr ":code:`set_initial_params()`" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:56 +msgid "Initializes the model parameters that the Flower server will ask for" +msgstr "初始化 Flower 服务器将要求的模型参数" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:58 +#, fuzzy +msgid "" +"Please check out ``utils.py`` `here " +"`_ for more details. The pre-defined functions are used in" +" the ``client.py`` and imported. The ``client.py`` also requires to " +"import several packages such as Flower and scikit-learn:" +msgstr "" +"更多详情请查看 :code:`utils.py`` 这里 " +"`_。在 :code:`client.py` 中使用并导入了预定义函数。:code:`client.py` " +"还需要导入几个软件包,如 Flower 和 scikit-learn:" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:75 +#, fuzzy +msgid "" +"Prior to local training, we need to load the MNIST dataset, a popular " +"image classification dataset of handwritten digits for machine learning, " +"and partition the dataset for FL. This can be conveniently achieved using" +" `Flower Datasets `_. The " +"``FederatedDataset.load_partition()`` method loads the partitioned " +"training set for each partition ID defined in the ``--partition-id`` " +"argument." +msgstr "" +"在本地训练之前,我们需要加载 MNIST 数据集(一个用于机器学习的流行手写数字图像分类数据集),并对数据集进行 FL 分区。使用 " +"\"Flower Datasets " +"`_\"可以方便地实现这一点。:code:`FederatedDataset.load_partition()`" +" 方法为 :code:`--partition-id` 参数中定义的每个分区 ID 加载分区训练集。" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:106 +#, fuzzy +msgid "" +"Next, the logistic regression model is defined and initialized with " +"``utils.set_initial_params()``." +msgstr "接下来,使用 :code:`utils.set_initial_params()` 对逻辑回归模型进行定义和初始化。" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:119 +#, fuzzy +msgid "" +"The Flower server interacts with clients through an interface called " +"``Client``. When the server selects a particular client for training, it " +"sends training instructions over the network. The client receives those " +"instructions and calls one of the ``Client`` methods to run your code " +"(i.e., to fit the logistic regression we defined earlier)." +msgstr "" +"Flower 服务器通过一个名为 :code:`Client` " +"的接口与客户端交互。当服务器选择一个特定的客户端进行训练时,它会通过网络发送训练指令。客户端接收到这些指令后,会调用 :code:`Client`" +" 方法之一来运行您的代码(即拟合我们之前定义的逻辑回归)。" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:124 +#, fuzzy +msgid "" +"Flower provides a convenience class called ``NumPyClient`` which makes it" +" easier to implement the ``Client`` interface when your workload uses " +"scikit-learn. Implementing ``NumPyClient`` usually means defining the " +"following methods (``set_parameters`` is optional though):" +msgstr "" +"Flower 提供了一个名为 :code:`NumPyClient` 的便捷类,当你的工作负载使用 scikit-learn " +"时,它可以让你更容易地实现 :code:`Client` 接口。实现 :code:`NumPyClient` " +"通常意味着定义以下方法(:code:`set_parameters` 是可选的):" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:130 +msgid "return the model weight as a list of NumPy ndarrays" +msgstr "以 NumPy ndarrays 列表形式返回模型参数" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:132 +#, fuzzy +msgid "``set_parameters`` (optional)" +msgstr ":code:`set_parameters` (可选)" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:132 +msgid "" +"update the local model weights with the parameters received from the " +"server" +msgstr "用从服务器接收到的参数更新本地模型参数" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:133 +#, fuzzy +msgid "is directly imported with ``utils.set_model_params()``" +msgstr "直接导入 :code:`utils.set_model_params()`" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:135 +msgid "set the local model weights" +msgstr "设置本地模型参数" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:136 +msgid "train the local model" +msgstr "训练本地模型" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:137 +#, fuzzy +msgid "return the updated local model weights" +msgstr "接收更新的本地模型参数" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:139 +msgid "test the local model" +msgstr "测试本地模型" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:141 +msgid "The methods can be implemented in the following way:" +msgstr "这些方法可以通过以下方式实现:" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:163 +#, fuzzy +msgid "" +"We can now create an instance of our class ``MnistClient`` and add one " +"line to actually run this client:" +msgstr "现在我们可以创建一个 :code:`MnistClient` 类的实例,并添加一行来实际运行该客户端:" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:170 +#, fuzzy +msgid "" +"That's it for the client. We only have to implement ``Client`` or " +"``NumPyClient`` and call ``fl.client.start_client()``. If you implement a" +" client of type ``NumPyClient`` you'll need to first call its " +"``to_client()`` method. The string ``\"0.0.0.0:8080\"`` tells the client " +"which server to connect to. In our case we can run the server and the " +"client on the same machine, therefore we use ``\"0.0.0.0:8080\"``. If we " +"run a truly federated workload with the server and clients running on " +"different machines, all that needs to change is the ``server_address`` we" +" pass to the client." +msgstr "" +"这就是客户端。我们只需实现 :code:`Client` 或 :code:`NumPyClient` 并调用 " +":code:`fl.client.start_client()` 或 " +":code:`fl.client.start_numpy_client()`。字符串 " +":code:`\"0.0.0.0:8080\"`会告诉客户端要连接的服务器。在本例中,我们可以在同一台机器上运行服务器和客户端,因此我们使用 " +":code:`\"0.0.0.0:8080\"`。如果我们运行的是真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变的只是传递给客户端的" +" :code:`server_address`。" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:181 +msgid "" +"The following Flower server is a little bit more advanced and returns an " +"evaluation function for the server-side evaluation. First, we import " +"again all required libraries such as Flower and scikit-learn." +msgstr "" +"下面的 Flower 服务器更先进一些,会返回一个用于服务器端评估的评估函数。首先,我们再次导入所有需要的库,如 Flower 和 scikit-" +"learn。" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:185 +#, fuzzy +msgid "``server.py``, import Flower and start the server:" +msgstr ":code:`server.py`, 导入 Flower 并启动服务器:" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:198 +#, fuzzy +msgid "" +"The number of federated learning rounds is set in ``fit_round()`` and the" +" evaluation is defined in ``get_evaluate_fn()``. The evaluation function " +"is called after each federated learning round and gives you information " +"about loss and accuracy. Note that we also make use of Flower Datasets " +"here to load the test split of the MNIST dataset for server-side " +"evaluation." +msgstr "" +"联邦学习轮数在 :code:`fit_round()` 中设置,评估在 :code:`get_evaluate_fn()` " +"中定义。每轮联邦学习后都会调用评估函数,并提供有关损失值和准确率的信息。" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:228 +#, fuzzy +msgid "" +"The ``main`` contains the server-side parameter initialization " +"``utils.set_initial_params()`` as well as the aggregation strategy " +"``fl.server.strategy:FedAvg()``. The strategy is the default one, " +"federated averaging (or FedAvg), with two clients and evaluation after " +"each federated learning round. The server can be started with the command" +" ``fl.server.start_server(server_address=\"0.0.0.0:8080\", " +"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))``." +msgstr "" +":code:`main`包含服务器端参数初始化:code:`utils.set_initial_params()`以及聚合策略 " +":code:`fl.server.strategy:FedAvg()`。该策略是默认的联邦平均(或 " +"FedAvg)策略,有两个客户端,在每轮联邦学习后进行评估。可以使用 " +":code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " +"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))` 命令启动服务器。" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:256 +msgid "" +"With both client and server ready, we can now run everything and see " +"federated learning in action. Federated learning systems usually have a " +"server and multiple clients. We, therefore, have to start the server " +"first:" +msgstr "客户端和服务器都准备就绪后,我们现在就可以运行一切,看看联邦学习的运行情况。联邦学习系统通常有一个服务器和多个客户端。因此,我们必须先启动服务器:" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:264 +#: ../../source/tutorial-quickstart-xgboost.rst:598 +msgid "" +"Once the server is running we can start the clients in different " +"terminals. Open a new terminal and start the first client:" +msgstr "服务器运行后,我们就可以在不同终端启动客户端了。打开一个新终端,启动第一个客户端:" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:271 +#: ../../source/tutorial-quickstart-xgboost.rst:605 +msgid "Open another terminal and start the second client:" +msgstr "打开另一台终端,启动第二个客户端:" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:277 +#: ../../source/tutorial-quickstart-xgboost.rst:611 +msgid "" +"Each client will have its own dataset. You should now see how the " +"training does in the very first terminal (the one that started the " +"server):" +msgstr "每个客户端都有自己的数据集。现在你应该看到第一个终端(启动服务器的终端)的训练效果了:" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:311 +#, fuzzy +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system. The full `source code " +"`_ for this example can be found in ``examples/sklearn-logreg-" +"mnist``." +msgstr "" +"恭喜您!您已经成功构建并运行了第一个联邦学习系统。本示例的`完整源代码 " +"`_ 可以在 :code:`examples/sklearn-logreg-mnist` 中找到。" + +#: ../../source/tutorial-quickstart-tensorflow.rst:-1 +#, fuzzy +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with TensorFlow to train a CNN model on CIFAR-10." +msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 TensorFlow 在 CIFAR-10 上训练 MobilNetV2 模型。" + +#: ../../source/tutorial-quickstart-tensorflow.rst:4 +msgid "Quickstart TensorFlow" +msgstr "快速入门 TensorFlow" + +#: ../../source/tutorial-quickstart-tensorflow.rst:6 +#, fuzzy +msgid "" +"In this tutorial we will learn how to train a Convolutional Neural " +"Network on CIFAR-10 using the Flower framework and TensorFlow. First of " +"all, it is recommended to create a virtual environment and run everything" +" within a :doc:`virtualenv `." +msgstr "" +"首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" + +#: ../../source/tutorial-quickstart-tensorflow.rst:11 +msgid "" +"Let's use `flwr new` to create a complete Flower+TensorFlow project. It " +"will generate all the files needed to run, by default with the Flower " +"Simulation Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:26 +msgid "" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``TensorFlow``), give a name to your project," +" and type in your developer name:" +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:114 +msgid "" +"This tutorial uses `Flower Datasets `_ " +"to easily download and partition the `CIFAR-10` dataset. In this example " +"you'll make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets. Each " +"``ClientApp`` will call this function to create the ``NumPy`` arrays that" +" correspond to their data partition." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:141 +msgid "" +"Next, we need a model. We defined a simple Convolutional Neural Network " +"(CNN), but feel free to replace it with a more sophisticated model if " +"you'd like:" +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:170 +msgid "" +"With `TensorFlow`, we can use the built-in ``get_weights()`` and " +"``set_weights()`` functions, which simplifies the implementation with " +"`Flower`. The rest of the functionality in the ClientApp is directly " +"inspired by the centralized case. The ``fit()`` method in the client " +"trains the model using the local dataset. Similarly, the ``evaluate()`` " +"method is used to evaluate the model received on a held-out validation " +"set that the client might have:" +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:203 +msgid "" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparameters defined in your " +"``pyproject.toml`` to configure the run. For example, in this tutorial we" +" access the `local-epochs` setting to control the number of epochs a " +"``ClientApp`` will perform when running the ``fit()`` method, in addition" +" to `batch-size`. You could define additional hyperparameters in " +"``pyproject.toml`` and access them here." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:234 +msgid "" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"`ServerAppComponents `_ as " +"opposed to a `Client `_. In this example we use the " +"`FedAvg`. To it we pass a randomly initialized model that will serve as " +"the global model to federate." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:270 +#, fuzzy +msgid "" +"Check the source code of the extended version of this tutorial in " +"|quickstart_tf_link|_ in the Flower GitHub repository." +msgstr "" +"此示例的`完整源代码 `_ 可在 :code:`examples/xgboost-quickstart` 中找到。" + +#: ../../source/tutorial-quickstart-tensorflow.rst:282 +msgid "" +"The video shown below shows how to setup a TensorFlow + Flower project " +"using our previously recommended APIs. A new video tutorial will be " +"released that shows the new APIs (as the content above does)" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with XGBoost to train classification models on trees." +msgstr "查看此联邦学习 快速入门教程,了解如何使用 Flower 和 XGBoost 上训练分类模型。" + +#: ../../source/tutorial-quickstart-xgboost.rst:4 +msgid "Quickstart XGBoost" +msgstr "XGBoost快速入门" + +#: ../../source/tutorial-quickstart-xgboost.rst:13 +msgid "Federated XGBoost" +msgstr "联邦化 XGBoost" + +#: ../../source/tutorial-quickstart-xgboost.rst:15 +msgid "" +"EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient " +"implementation of gradient-boosted decision tree (**GBDT**), that " +"maximises the computational boundaries for boosted tree methods. It's " +"primarily designed to enhance both the performance and computational " +"speed of machine learning models. In XGBoost, trees are constructed " +"concurrently, unlike the sequential approach taken by GBDT." +msgstr "" +"EXtreme Gradient " +"Boosting(**XGBoost**)是梯度提升决策树(**GBDT**)的一种稳健而高效的实现方法,能最大限度地提高提升树方法的计算边界。它主要用于提高机器学习模型的性能和计算速度。在" +" XGBoost 中,决策树是并发构建的,与 GBDT 采用的顺序方法不同。" + +#: ../../source/tutorial-quickstart-xgboost.rst:21 +msgid "" +"Often, for tabular data on medium-sized datasets with fewer than 10k " +"training examples, XGBoost surpasses the results of deep learning " +"techniques." +msgstr "对于训练示例少于 10k 的中型数据集上的表格数据,XGBoost 的结果往往超过深度学习技术。" + +#: ../../source/tutorial-quickstart-xgboost.rst:25 +msgid "Why federated XGBoost?" +msgstr "为什么选择联邦 XGBoost?" + +#: ../../source/tutorial-quickstart-xgboost.rst:27 +msgid "" +"Indeed, as the demand for data privacy and decentralized learning grows, " +"there's an increasing requirement to implement federated XGBoost systems " +"for specialised applications, like survival analysis and financial fraud " +"detection." +msgstr "事实上,随着对数据隐私和分散学习的需求不断增长,越来越多的专业应用(如生存分析和金融欺诈检测)需要实施联邦 XGBoost 系统。" + +#: ../../source/tutorial-quickstart-xgboost.rst:31 +msgid "" +"Federated learning ensures that raw data remains on the local device, " +"making it an attractive approach for sensitive domains where data " +"security and privacy are paramount. Given the robustness and efficiency " +"of XGBoost, combining it with federated learning offers a promising " +"solution for these specific challenges." +msgstr "" +"联邦学习可确保原始数据保留在本地设备上,因此对于数据安全和隐私至关重要的敏感领域来说,这是一种极具吸引力的方法。鉴于 XGBoost " +"的稳健性和高效性,将其与联邦学习相结合为应对这些特定挑战提供了一种前景广阔的解决方案。" + +#: ../../source/tutorial-quickstart-xgboost.rst:36 +#, fuzzy +msgid "" +"In this tutorial we will learn how to train a federated XGBoost model on " +"HIGGS dataset using Flower and ``xgboost`` package. We use a simple " +"example (`full code xgboost-quickstart " +"`_)" +" with two *clients* and one *server* to demonstrate how federated XGBoost" +" works, and then we dive into a more complex example (`full code xgboost-" +"comprehensive `_) to run various experiments." +msgstr "" +"在本教程中,我们将学习如何使用 Flower 和 :code:`xgboost` 软件包在 HIGGS 数据集上训练联邦 XGBoost " +"模型。我们将使用一个包含两个 * 客户端* 和一个 * 服务器* 的简单示例 (`完整代码 xgboost-quickstart " +"`_)来演示联邦 XGBoost 如何工作,然后我们将深入到一个更复杂的示例 (`完整代码 xgboost-" +"comprehensive `_),以运行各种实验。" + +#: ../../source/tutorial-quickstart-xgboost.rst:46 +msgid "Environment Setup" +msgstr "环境设定" + +#: ../../source/tutorial-quickstart-xgboost.rst:48 +#, fuzzy +msgid "" +"First of all, it is recommended to create a virtual environment and run " +"everything within a :doc:`virtualenv `." +msgstr "" +"首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" + +#: ../../source/tutorial-quickstart-xgboost.rst:51 +msgid "" +"We first need to install Flower and Flower Datasets. You can do this by " +"running :" +msgstr "我们首先需要安装 Flower 和 Flower Datasets。您可以通过运行 :" + +#: ../../source/tutorial-quickstart-xgboost.rst:57 +#, fuzzy +msgid "" +"Since we want to use ``xgboost`` package to build up XGBoost trees, let's" +" go ahead and install ``xgboost``:" +msgstr "既然我们要使用 :code:`xgboost` 软件包来构建 XGBoost 树,那就继续安装 :code:`xgboost`:" + +#: ../../source/tutorial-quickstart-xgboost.rst:67 +msgid "" +"*Clients* are responsible for generating individual weight-updates for " +"the model based on their local datasets. Now that we have all our " +"dependencies installed, let's run a simple distributed training with two " +"clients and one server." +msgstr "*客户端*负责根据其本地数据集为模型生成单独的模型参数更新。现在我们已经安装了所有的依赖项,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。" + +#: ../../source/tutorial-quickstart-xgboost.rst:71 +#, fuzzy +msgid "" +"In a file called ``client.py``, import xgboost, Flower, Flower Datasets " +"and other related functions:" +msgstr "在名为 :code:`client.py` 的文件中,导入 xgboost、Flower、Flower Datasets 和其他相关函数:" + +#: ../../source/tutorial-quickstart-xgboost.rst:99 +msgid "Dataset partition and hyper-parameter selection" +msgstr "数据集划分和超参数选择" + +#: ../../source/tutorial-quickstart-xgboost.rst:101 +msgid "" +"Prior to local training, we require loading the HIGGS dataset from Flower" +" Datasets and conduct data partitioning for FL:" +msgstr "在本地训练之前,我们需要从 Flower Datasets 加载 HIGGS 数据集,并对 FL 进行数据分区:" + +#: ../../source/tutorial-quickstart-xgboost.rst:115 +#, fuzzy +msgid "" +"In this example, we split the dataset into 30 partitions with uniform " +"distribution (``IidPartitioner(num_partitions=30)``). Then, we load the " +"partition for the given client based on ``partition_id``:" +msgstr "" +"在此示例中,我们将数据集分割成两个均匀分布的分区(:code:`IidPartitioner(num_partitions=2)`)。然后,我们根据" +" :code:`node_id` 为给定客户端加载分区:" + +#: ../../source/tutorial-quickstart-xgboost.rst:135 +#, fuzzy +msgid "" +"After that, we do train/test splitting on the given partition (client's " +"local data), and transform data format for ``xgboost`` package." +msgstr "然后,我们在给定的分区(客户端的本地数据)上进行训练/测试分割,并为 :code:`xgboost` 软件包转换数据格式。" + +#: ../../source/tutorial-quickstart-xgboost.rst:149 +#, fuzzy +msgid "" +"The functions of ``train_test_split`` and " +"``transform_dataset_to_dmatrix`` are defined as below:" +msgstr ":code:`train_test_split` 和 :code:`transform_dataset_too_dmatrix` 的函数定义如下:" + +#: ../../source/tutorial-quickstart-xgboost.rst:174 +msgid "Finally, we define the hyper-parameters used for XGBoost training." +msgstr "最后,我们定义了用于 XGBoost 训练的超参数。" + +#: ../../source/tutorial-quickstart-xgboost.rst:190 +#, fuzzy +msgid "" +"The ``num_local_round`` represents the number of iterations for local " +"tree boost. We use CPU for the training in default. One can shift it to " +"GPU by setting ``tree_method`` to ``gpu_hist``. We use AUC as evaluation " +"metric." +msgstr "" +"代码:`num_local_round`表示本地树的迭代次数。我们默认使用 CPU 进行训练。可以通过将 :code:`tree_method` " +"设置为 :code:`gpu_hist`,将其转换为 GPU。我们使用 AUC 作为评估指标。" + +#: ../../source/tutorial-quickstart-xgboost.rst:195 +msgid "Flower client definition for XGBoost" +msgstr "用于 XGBoost 的 Flower 客户端定义" + +#: ../../source/tutorial-quickstart-xgboost.rst:197 +#, fuzzy +msgid "" +"After loading the dataset we define the Flower client. We follow the " +"general rule to define ``XgbClient`` class inherited from " +"``fl.client.Client``." +msgstr "" +"加载数据集后,我们定义 Flower 客户端。我们按照一般规则定义从 :code:`fl.client.Client` 继承而来的 " +":code:`XgbClient` 类。" + +#: ../../source/tutorial-quickstart-xgboost.rst:219 +msgid "" +"All required parameters defined above are passed to ``XgbClient``'s " +"constructor." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:221 +#, fuzzy +msgid "" +"Then, we override ``get_parameters``, ``fit`` and ``evaluate`` methods " +"insides ``XgbClient`` class as follows." +msgstr "" +"然后,我们在 :code:`XgbClient` 类中重写 :code:`get_parameters`、:code:`fit` 和 " +":code:`evaluate` 方法如下。" + +#: ../../source/tutorial-quickstart-xgboost.rst:236 +#, fuzzy +msgid "" +"Unlike neural network training, XGBoost trees are not started from a " +"specified random weights. In this case, we do not use ``get_parameters`` " +"and ``set_parameters`` to initialise model parameters for XGBoost. As a " +"result, let's return an empty tensor in ``get_parameters`` when it is " +"called by the server at the first round." +msgstr "" +"与神经网络训练不同,XGBoost 树不是从指定的随机参数开始的。在这种情况下,我们不使用 :code:`get_parameters` 和 " +":code:`set_parameters` 来初始化 XGBoost 的模型参数。因此,当服务器在第一轮调用 " +":code:`get_parameters` 时,让我们在 :code:`get_parameters` 中返回一个空张量。" + +#: ../../source/tutorial-quickstart-xgboost.rst:278 +#, fuzzy +msgid "" +"In ``fit``, at the first round, we call ``xgb.train()`` to build up the " +"first set of trees. From the second round, we load the global model sent " +"from server to new build Booster object, and then update model weights on" +" local training data with function ``local_boost`` as follows:" +msgstr "" +"在 :code:`fit`中,第一轮我们调用 :code:`xgb.train()`来建立第一组树,返回的 Booster 对象和 config " +"分别存储在 :code:`self.bst` 和 :code:`self.config` 中。从第二轮开始,我们将服务器发送的全局模型加载到 " +":code:`self.bst`,然后使用函数 :code:`local_boost`更新本地训练数据的模型权重,如下所示:" + +#: ../../source/tutorial-quickstart-xgboost.rst:298 +#, fuzzy +msgid "" +"Given ``num_local_round``, we update trees by calling " +"``bst_input.update`` method. After training, the last " +"``N=num_local_round`` trees will be extracted to send to the server." +msgstr "" +"给定 :code:`num_local_round`,我们通过调用 " +":code:`self.bst.update`方法更新树。训练结束后,我们将提取最后一个 :code:`N=num_local_round` " +"树并发送给服务器。" + +#: ../../source/tutorial-quickstart-xgboost.rst:330 +#, fuzzy +msgid "" +"In ``evaluate``, after loading the global model, we call ``bst.eval_set``" +" function to conduct evaluation on valid set. The AUC value will be " +"returned." +msgstr "在 :code:`evaluate`中,我们调用 :code:`self.bst.eval_set`函数对有效集合进行评估。将返回 AUC 值。" + +#: ../../source/tutorial-quickstart-xgboost.rst:333 +#, fuzzy +msgid "" +"Now, we can create an instance of our class ``XgbClient`` and add one " +"line to actually run this client:" +msgstr "现在,我们可以创建一个 :code:`XgbClient` 类的实例,并添加一行来实际运行该客户端:" + +#: ../../source/tutorial-quickstart-xgboost.rst:350 +#, fuzzy +msgid "" +"That's it for the client. We only have to implement ``Client`` and call " +"``fl.client.start_client()``. The string ``\"[::]:8080\"`` tells the " +"client which server to connect to. In our case we can run the server and " +"the client on the same machine, therefore we use ``\"[::]:8080\"``. If we" +" run a truly federated workload with the server and clients running on " +"different machines, all that needs to change is the ``server_address`` we" +" point the client at." +msgstr "" +"这就是客户端。我们只需实现 :code:`客户端`并调用 :code:`fl.client.start_client()`。字符串 " +":code:`\"[::]:8080\"`会告诉客户端要连接的服务器。在本例中,我们可以在同一台机器上运行服务器和客户端,因此我们使用 " +":code:`\"[::]:8080\"`。如果我们运行的是真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变的只是客户端指向的 " +":code:`server_address`。" + +#: ../../source/tutorial-quickstart-xgboost.rst:360 +msgid "" +"These updates are then sent to the *server* which will aggregate them to " +"produce a better model. Finally, the *server* sends this improved version" +" of the model back to each *client* to finish a complete FL round." +msgstr "" +"然后,这些更新会被发送到*服务器*,由*服务器*聚合后生成一个更好的模型。最后,*服务器*将这个改进版的模型发回给每个*客户端*,以完成一轮完整的" +" FL。" + +#: ../../source/tutorial-quickstart-xgboost.rst:364 +#, fuzzy +msgid "" +"In a file named ``server.py``, import Flower and FedXgbBagging from " +"``flwr.server.strategy``." +msgstr "" +"在名为 :code:`server.py` 的文件中,从 :code:`flwr.server.strategy` 导入 Flower 和 " +"FedXgbBagging。" + +#: ../../source/tutorial-quickstart-xgboost.rst:367 +msgid "We first define a strategy for XGBoost bagging aggregation." +msgstr "我们首先定义了 XGBoost bagging聚合策略。" + +#: ../../source/tutorial-quickstart-xgboost.rst:401 +#, fuzzy +msgid "" +"We use two clients for this example. An ``evaluate_metrics_aggregation`` " +"function is defined to collect and wighted average the AUC values from " +"clients. The ``config_func`` function is to return the current FL round " +"number to client's ``fit()`` and ``evaluate()`` methods." +msgstr "" +"本示例使用两个客户端。我们定义了一个 :code:`evaluate_metrics_aggregation` 函数,用于收集客户机的 AUC " +"值并求取平均值。" + +#: ../../source/tutorial-quickstart-xgboost.rst:406 +msgid "Then, we start the server:" +msgstr "然后,我们启动服务器:" + +#: ../../source/tutorial-quickstart-xgboost.rst:418 +msgid "Tree-based bagging aggregation" +msgstr "基于树的bagging聚合" + +#: ../../source/tutorial-quickstart-xgboost.rst:420 +msgid "" +"You must be curious about how bagging aggregation works. Let's look into " +"the details." +msgstr "您一定很好奇bagging聚合是如何工作的。让我们来详细了解一下。" + +#: ../../source/tutorial-quickstart-xgboost.rst:422 +#, fuzzy +msgid "" +"In file ``flwr.server.strategy.fedxgb_bagging.py``, we define " +"``FedXgbBagging`` inherited from ``flwr.server.strategy.FedAvg``. Then, " +"we override the ``aggregate_fit``, ``aggregate_evaluate`` and " +"``evaluate`` methods as follows:" +msgstr "" +"在文件 :code:`flwr.server.strategy.fedxgb_bagging.py`中,我们定义了从 " +":code:`flwr.server.strategy.FedAvg`继承的 :code:`FedXgbBagging`。然后,我们覆盖 " +":code:`aggregate_fit`、:code:`aggregate_evaluate` 和 :code:`evaluate` 方法如下:" + +#: ../../source/tutorial-quickstart-xgboost.rst:519 +#, fuzzy +msgid "" +"In ``aggregate_fit``, we sequentially aggregate the clients' XGBoost " +"trees by calling ``aggregate()`` function:" +msgstr "" +"在 :code:`aggregate_fit` 中,我们通过调用 :code:`aggregate()` 函数,按顺序聚合客户端的 XGBoost" +" 树:" + +#: ../../source/tutorial-quickstart-xgboost.rst:579 +#, fuzzy +msgid "" +"In this function, we first fetch the number of trees and the number of " +"parallel trees for the current and previous model by calling " +"``_get_tree_nums``. Then, the fetched information will be aggregated. " +"After that, the trees (containing model weights) are aggregated to " +"generate a new tree model." +msgstr "" +"在该函数中,我们首先通过调用 :code:`_get_tree_nums` " +"获取当前模型和上一个模型的树数和并行树数。然后,对获取的信息进行聚合。然后,聚合树(包含模型参数)生成新的树模型。" + +#: ../../source/tutorial-quickstart-xgboost.rst:584 +msgid "" +"After traversal of all clients' models, a new global model is generated, " +"followed by the serialisation, and sending back to each client." +msgstr "在遍历所有客户端的模型后,会生成一个新的全局模型,然后进行序列化,并发回给每个客户端。" + +#: ../../source/tutorial-quickstart-xgboost.rst:588 +msgid "Launch Federated XGBoost!" +msgstr "启动联邦 XGBoost!" + +#: ../../source/tutorial-quickstart-xgboost.rst:664 +#, fuzzy +msgid "" +"Congratulations! You've successfully built and run your first federated " +"XGBoost system. The AUC values can be checked in ``metrics_distributed``." +" One can see that the average AUC increases over FL rounds." +msgstr "" +"恭喜您!您已成功构建并运行了第一个联邦 XGBoost 系统。可以在 :code:`metrics_distributed` 中查看 AUC " +"值。我们可以看到,平均 AUC 随 FL 轮数的增加而增加。" + +#: ../../source/tutorial-quickstart-xgboost.rst:668 +#, fuzzy +msgid "" +"The full `source code `_ for this example can be found in ``examples" +"/xgboost-quickstart``." +msgstr "" +"此示例的`完整源代码 `_ 可在 :code:`examples/xgboost-quickstart` 中找到。" + +#: ../../source/tutorial-quickstart-xgboost.rst:673 +msgid "Comprehensive Federated XGBoost" +msgstr "综合的联邦 XGBoost" + +#: ../../source/tutorial-quickstart-xgboost.rst:675 +#, fuzzy +msgid "" +"Now that you have known how federated XGBoost work with Flower, it's time" +" to run some more comprehensive experiments by customising the " +"experimental settings. In the xgboost-comprehensive example (`full code " +"`_), we provide more options to define various experimental" +" setups, including aggregation strategies, data partitioning and " +"centralised/distributed evaluation. We also support :doc:`Flower " +"simulation ` making it easy to simulate large " +"client cohorts in a resource-aware manner. Let's take a look!" +msgstr "" +"既然您已经知道联合 XGBoost 如何与 Flower 协同工作,那么现在就该通过自定义实验设置来运行一些更综合的实验了。在 xgboost-" +"comprehensive 示例 (`完整代码 " +"`_)中,我们提供了更多选项来定义各种实验设置,包括数据分区和集中/分布式评估。让我们一起来看看!" + +#: ../../source/tutorial-quickstart-xgboost.rst:685 +#, fuzzy +msgid "Cyclic training" +msgstr "集中式训练" + +#: ../../source/tutorial-quickstart-xgboost.rst:687 +#, fuzzy +msgid "" +"In addition to bagging aggregation, we offer a cyclic training scheme, " +"which performs FL in a client-by-client fashion. Instead of aggregating " +"multiple clients, there is only one single client participating in the " +"training per round in the cyclic training scenario. The trained local " +"XGBoost trees will be passed to the next client as an initialised model " +"for next round's boosting." +msgstr "" +"除了袋式聚合,我们还提供了一种循环训练方案,它以逐个客户端的方式执行 " +"FL。在循环训练方案中,每轮只有一个客户端参与训练,而不是多个客户端聚合在一起。训练好的本地 XGBoost " +"树将传递给下一个客户端,作为下一轮提升的初始化模型。" + +#: ../../source/tutorial-quickstart-xgboost.rst:693 +#, fuzzy +msgid "To do this, we first customise a ``ClientManager`` in ``server_utils.py``:" +msgstr "为此,我们首先要在 :code:`server_utils.py` 中自定义一个 :code:`ClientManager`:" + +#: ../../source/tutorial-quickstart-xgboost.rst:733 +#, fuzzy +msgid "" +"The customised ``ClientManager`` samples all available clients in each FL" +" round based on the order of connection to the server. Then, we define a " +"new strategy ``FedXgbCyclic`` in " +"``flwr.server.strategy.fedxgb_cyclic.py``, in order to sequentially " +"select only one client in given round and pass the received model to next" +" client." +msgstr "" +"定制的 :code:`ClientManager` 会根据连接服务器的顺序,在每轮 FL 中对所有可用客户端进行采样。然后,我们在 " +":code:`flwr.server.strategy.fedxgb_cyclic.py`\"中定义了一个新策略 " +":code:`FedXgbCyclic`,以便在给定回合中按顺序只选择一个客户端,并将接收到的模型传递给下一个客户端。" + +#: ../../source/tutorial-quickstart-xgboost.rst:775 +#, fuzzy +msgid "" +"Unlike the original ``FedAvg``, we don't perform aggregation here. " +"Instead, we just make a copy of the received client model as global model" +" by overriding ``aggregate_fit``." +msgstr "" +"与最初的 :code:`FedAvg` 不同,我们在这里不执行聚合。相反,我们只是通过覆盖 :code:`aggregate_fit` " +"将接收到的客户端模型复制为全局模型。" + +#: ../../source/tutorial-quickstart-xgboost.rst:778 +#, fuzzy +msgid "" +"Also, the customised ``configure_fit`` and ``configure_evaluate`` methods" +" ensure the clients to be sequentially selected given FL round:" +msgstr "" +"此外,定制的 :code:`configure_fit` 和 :code:`configure_evaluate` 方法可确保在 FL " +"轮中按顺序选择客户:" + +#: ../../source/tutorial-quickstart-xgboost.rst:840 +msgid "Customised data partitioning" +msgstr "定制数据分区" + +#: ../../source/tutorial-quickstart-xgboost.rst:842 +#, fuzzy +msgid "" +"In ``dataset.py``, we have a function ``instantiate_partitioner`` to " +"instantiate the data partitioner based on the given ``num_partitions`` " +"and ``partitioner_type``. Currently, we provide four supported " +"partitioner type to simulate the uniformity/non-uniformity in data " +"quantity (uniform, linear, square, exponential)." +msgstr "" +"在 :code:`dataset.py` 中,我们有一个函数 :code:`instantiate_partitioner` 来根据给定的 " +":code:`num_partitions` 和 :code:`partitioner_type` " +"来实例化数据分区器。目前,我们提供四种支持的分区器类型(均匀、线性、正方形、指数)来模拟数据量的均匀性/非均匀性。" + +#: ../../source/tutorial-quickstart-xgboost.rst:873 +msgid "Customised centralised/distributed evaluation" +msgstr "定制的集中/分布式评估" + +#: ../../source/tutorial-quickstart-xgboost.rst:875 +#, fuzzy +msgid "" +"To facilitate centralised evaluation, we define a function in " +"``server_utils.py``:" +msgstr "为便于集中评估,我们在 :code:`server.py` 中定义了一个函数:" + +#: ../../source/tutorial-quickstart-xgboost.rst:907 +#, fuzzy +msgid "" +"This function returns a evaluation function which instantiates a " +"``Booster`` object and loads the global model weights to it. The " +"evaluation is conducted by calling ``eval_set()`` method, and the tested " +"AUC value is reported." +msgstr "" +"此函数返回一个评估函数,该函数实例化一个 :code:`Booster` 对象,并向其加载全局模型参数。评估通过调用 " +":code:`eval_set()` 方法进行,并报告测试的 AUC 值。" + +#: ../../source/tutorial-quickstart-xgboost.rst:911 +#, fuzzy +msgid "" +"As for distributed evaluation on the clients, it's same as the quick-" +"start example by overriding the ``evaluate()`` method insides the " +"``XgbClient`` class in ``client_utils.py``." +msgstr "" +"至于客户端上的分布式评估,与快速启动示例相同,通过覆盖 :code:`client.py` 中 :code:`XgbClient` 类内部的 " +":code:`evaluate()` 方法。" + +#: ../../source/tutorial-quickstart-xgboost.rst:916 +#, fuzzy +msgid "Flower simulation" +msgstr "运行模拟" + +#: ../../source/tutorial-quickstart-xgboost.rst:918 +#, fuzzy +msgid "" +"We also provide an example code (``sim.py``) to use the simulation " +"capabilities of Flower to simulate federated XGBoost training on either a" +" single machine or a cluster of machines." +msgstr "我们还提供了一个示例代码(:code:`sim.py`),用于使用 Flower 的模拟功能在单台机器或机器集群上模拟联合 XGBoost 训练。" + +#: ../../source/tutorial-quickstart-xgboost.rst:954 +#, fuzzy +msgid "" +"After importing all required packages, we define a ``main()`` function to" +" perform the simulation process:" +msgstr "导入所有需要的软件包后,我们定义了一个 :code:`main()` 函数来执行模拟程序:" + +#: ../../source/tutorial-quickstart-xgboost.rst:1010 +#, fuzzy +msgid "" +"We first load the dataset and perform data partitioning, and the pre-" +"processed data is stored in a ``list``. After the simulation begins, the " +"clients won't need to pre-process their partitions again." +msgstr "我们首先加载数据集并执行数据分区,预处理后的数据存储在 :code:`list` 中。模拟开始后,客户端就不需要再预处理分区了。" + +#: ../../source/tutorial-quickstart-xgboost.rst:1014 +#, fuzzy +msgid "Then, we define the strategies and other hyper-parameters:" +msgstr "然后,我们定义策略和其他超参数:" + +#: ../../source/tutorial-quickstart-xgboost.rst:1065 +#, fuzzy +msgid "" +"After that, we start the simulation by calling " +"``fl.simulation.start_simulation``:" +msgstr "然后,我们调用 :code:`fl.simulation.start_simulation` 开始模拟:" + +#: ../../source/tutorial-quickstart-xgboost.rst:1085 +#, fuzzy +msgid "" +"One of key parameters for ``start_simulation`` is ``client_fn`` which " +"returns a function to construct a client. We define it as follows:" +msgstr "" +":code:`start_simulation` 的一个关键参数是 " +":code:`client_fn`,它返回一个用于构建客户端的函数。我们将其定义如下:" + +#: ../../source/tutorial-quickstart-xgboost.rst:1126 +msgid "Arguments parser" +msgstr "参数解析器" + +#: ../../source/tutorial-quickstart-xgboost.rst:1128 +#, fuzzy +msgid "" +"In ``utils.py``, we define the arguments parsers for clients, server and " +"simulation, allowing users to specify different experimental settings. " +"Let's first see the sever side:" +msgstr "在 :code:`utils.py` 中,我们定义了客户端和服务器端的参数解析器,允许用户指定不同的实验设置。让我们先看看服务器端:" + +#: ../../source/tutorial-quickstart-xgboost.rst:1175 +#, fuzzy +msgid "" +"This allows user to specify training strategies / the number of total " +"clients / FL rounds / participating clients / clients for evaluation, and" +" evaluation fashion. Note that with ``--centralised-eval``, the sever " +"will do centralised evaluation and all functionalities for client " +"evaluation will be disabled." +msgstr "" +"这允许用户指定总客户数/FL 轮数/参与客户数/评估客户数以及评估方式。请注意,如果使用 :code:`--centralised-" +"eval`,服务器将进行集中评估,客户端评估的所有功能将被禁用。" + +#: ../../source/tutorial-quickstart-xgboost.rst:1180 +msgid "Then, the argument parser on client side:" +msgstr "然后是客户端的参数解析器:" + +#: ../../source/tutorial-quickstart-xgboost.rst:1234 +#, fuzzy +msgid "" +"This defines various options for client data partitioning. Besides, " +"clients also have an option to conduct evaluation on centralised test set" +" by setting ``--centralised-eval``, as well as an option to perform " +"scaled learning rate based on the number of clients by setting " +"``--scaled-lr``." +msgstr "这定义了客户端数据分区的各种选项。此外,通过设置 :code:`-centralised-eval`,客户端还可以选择在集中测试集上进行评估。" + +#: ../../source/tutorial-quickstart-xgboost.rst:1239 +#, fuzzy +msgid "We also have an argument parser for simulation:" +msgstr "我们还有一个用于模拟的参数解析器:" + +#: ../../source/tutorial-quickstart-xgboost.rst:1317 +#, fuzzy +msgid "This integrates all arguments for both client and server sides." +msgstr "这整合了客户端和服务器端的所有参数。" + +#: ../../source/tutorial-quickstart-xgboost.rst:1320 +msgid "Example commands" +msgstr "命令示例" + +#: ../../source/tutorial-quickstart-xgboost.rst:1322 +#, fuzzy +msgid "" +"To run a centralised evaluated experiment with bagging strategy on 5 " +"clients with exponential distribution for 50 rounds, we first start the " +"server as below:" +msgstr "为了在 5 个客户端上进行 50 轮指数分布的集中评估实验,我们首先启动服务器,如下所示:" + +#: ../../source/tutorial-quickstart-xgboost.rst:1329 +msgid "Then, on each client terminal, we start the clients:" +msgstr "然后,我们在每个客户终端上启动客户机:" + +#: ../../source/tutorial-quickstart-xgboost.rst:1335 +#, fuzzy +msgid "To run the same experiment with Flower simulation:" +msgstr "运行与 Flower 模拟相同的实验:" + +#: ../../source/tutorial-quickstart-xgboost.rst:1341 +#, fuzzy +msgid "" +"The full `code `_ for this comprehensive example can be found in" +" ``examples/xgboost-comprehensive``." +msgstr "" +"此综合示例的全部`源代码 `_ 可在 :code:`examples/xgboost-comprehensive` 中找到。" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:9 +msgid "Build a strategy from scratch" +msgstr "从零开始制定策略" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:11 +#, fuzzy +msgid "" +"Welcome to the third part of the Flower federated learning tutorial. In " +"previous parts of this tutorial, we introduced federated learning with " +"PyTorch and the Flower framework (`part 1 " +"`__) and we learned how strategies can be used to customize " +"the execution on both the server and the clients (`part 2 " +"`__)." +msgstr "" +"欢迎来到 Flower 联邦学习教程的第三部分。在本教程的前几部分,我们介绍了 PyTorch 和 Flower 的联邦学习(`part 1 " +"`__),并学习了如何使用策略来定制服务器和客户端的执行(`part 2 " +"`__)。" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:13 +#, fuzzy +msgid "" +"In this notebook, we'll continue to customize the federated learning " +"system we built previously by creating a custom version of FedAvg using " +"the Flower framework, Flower Datasets, and PyTorch." +msgstr "" +"在本笔记中,我们将通过创建 FedAvg 的自定义版本(再次使用 `Flower `__ 和 " +"`PyTorch `__),继续定制我们之前构建的联邦学习系统。" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:15 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:16 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:15 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:15 +#, fuzzy +msgid "" +"`Star Flower on GitHub `__ ⭐️ and join " +"the Flower community on Flower Discuss and the Flower Slack to connect, " +"ask questions, and get help: - `Join Flower Discuss " +"`__ We'd love to hear from you in the " +"``Introduction`` topic! If anything is unclear, post in ``Flower Help - " +"Beginners``. - `Join Flower Slack `__ We'd " +"love to hear from you in the ``#introductions`` channel! If anything is " +"unclear, head over to the ``#questions`` channel." +msgstr "" +"`Star Flower on GitHub `__ ⭐️ 并加入 Slack " +"上的 Flower 社区,进行交流、提问并获得帮助: 加入 Slack `__ 🌼 " +"我们希望在 ``#introductions`` 频道听到您的声音!如果有任何不清楚的地方,请访问 ``#questions`` 频道。" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:18 +#, fuzzy +msgid "Let's build a new ``Strategy`` from scratch! 🌼" +msgstr "让我们从头开始构建一个新的``Strategy``!" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:30 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:30 +msgid "Preparation" +msgstr "准备工作" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:32 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:33 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:32 +msgid "" +"Before we begin with the actual code, let's make sure that we have " +"everything we need." +msgstr "在开始实际代码之前,让我们先确保我们已经准备好了所需的一切。" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:44 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:45 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:44 +msgid "Installing dependencies" +msgstr "安装依赖项" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:46 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:47 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:46 +msgid "First, we install the necessary packages:" +msgstr "首先,我们安装必要的软件包:" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:66 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:67 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:66 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:66 +msgid "" +"Now that we have all dependencies installed, we can import everything we " +"need for this tutorial:" +msgstr "现在我们已经安装了所有依赖项,可以导入本教程所需的所有内容:" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:106 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:106 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:106 +msgid "" +"It is possible to switch to a runtime that has GPU acceleration enabled " +"(on Google Colab: ``Runtime > Change runtime type > Hardware acclerator: " +"GPU > Save``). Note, however, that Google Colab is not always able to " +"offer GPU acceleration. If you see an error related to GPU availability " +"in one of the following sections, consider switching back to CPU-based " +"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " +"has GPU acceleration enabled, you should see the output ``Training on " +"cuda``, otherwise it'll say ``Training on cpu``." +msgstr "" +"可以切换到已启用 GPU 加速的运行时(在 Google Colab 上: 运行时 > 更改运行时类型 > 硬件加速: GPU > " +"保存``)。但请注意,Google Colab 并非总能提供 GPU 加速。如果在以下部分中看到与 GPU 可用性相关的错误,请考虑通过设置 " +"``DEVICE = torch.device(\"cpu\")`` 切回基于 CPU 的执行。如果运行时已启用 GPU " +"加速,你应该会看到输出``Training on cuda``,否则会显示``Training on cpu``。" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:119 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:119 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:119 +msgid "Data loading" +msgstr "数据加载" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:121 +msgid "" +"Let's now load the CIFAR-10 training and test set, partition them into " +"ten smaller datasets (each split into training and validation set), and " +"wrap everything in their own ``DataLoader``." +msgstr "" +"现在,让我们加载 CIFAR-10 训练集和测试集,将它们分割成十个较小的数据集(每个数据集又分为训练集和验证集),并将所有数据都封装在各自的 " +"``DataLoader`` 中。" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:163 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:163 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:169 +msgid "Model training/evaluation" +msgstr "模型培训/评估" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:165 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:165 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:171 +msgid "" +"Let's continue with the usual model definition (including " +"``set_parameters`` and ``get_parameters``), training and test functions:" +msgstr "让我们继续使用常见的模型定义(包括 `set_parameters` 和 `get_parameters`)、训练和测试函数:" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:256 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:262 +msgid "Flower client" +msgstr "Flower 客户端" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:258 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:264 +#, fuzzy +msgid "" +"To implement the Flower client, we (again) create a subclass of " +"``flwr.client.NumPyClient`` and implement the three methods " +"``get_parameters``, ``fit``, and ``evaluate``. Here, we also pass the " +"``partition_id`` to the client and use it log additional details. We then" +" create an instance of ``ClientApp`` and pass it the ``client_fn``." +msgstr "" +"为了实现 Flower 客户端,我们(再次)创建了 ``flwr.client.NumPyClient`` 的子类,并实现了 " +"``get_parameters``、``fit`` 和 ``evaluate``三个方法。在这里,我们还将 ``cid`` " +"传递给客户端,并使用它记录其他详细信息:" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:311 +msgid "Let's test what we have so far before we continue:" +msgstr "在继续之前,让我们先测试一下我们目前掌握的情况:" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:357 +msgid "Build a Strategy from scratch" +msgstr "从零开始构建策略" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:359 +msgid "" +"Let’s overwrite the ``configure_fit`` method such that it passes a higher" +" learning rate (potentially also other hyperparameters) to the optimizer " +"of a fraction of the clients. We will keep the sampling of the clients as" +" it is in ``FedAvg`` and then change the configuration dictionary (one of" +" the ``FitIns`` attributes)." +msgstr "" +"让我们重写 ``configure_fit`` 方法,使其向一部分客户的优化器传递更高的学习率(可能还有其他超参数)。我们将保持 " +"``FedAvg`` 中的客户端采样,然后更改配置字典(``FitIns`` 属性之一)。" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:523 +msgid "" +"The only thing left is to use the newly created custom Strategy " +"``FedCustom`` when starting the experiment:" +msgstr "剩下的唯一工作就是在启动实验时使用新创建的自定义策略 ``FedCustom`` :" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:559 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:998 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:841 +msgid "Recap" +msgstr "回顾" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:561 +msgid "" +"In this notebook, we’ve seen how to implement a custom strategy. A custom" +" strategy enables granular control over client node configuration, result" +" aggregation, and more. To define a custom strategy, you only have to " +"overwrite the abstract methods of the (abstract) base class ``Strategy``." +" To make custom strategies even more powerful, you can pass custom " +"functions to the constructor of your new class (``__init__``) and then " +"call these functions whenever needed." +msgstr "" +"在本笔记中,我们了解了如何实施自定义策略。自定义策略可以对客户端节点配置、结果聚合等进行细粒度控制。要定义自定义策略,只需覆盖(抽象)基类 " +"``Strategy`` " +"的抽象方法即可。为使自定义策略更加强大,您可以将自定义函数传递给新类的构造函数(`__init__``),然后在需要时调用这些函数。" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:575 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1014 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:813 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:859 +#, fuzzy +msgid "" +"Before you continue, make sure to join the Flower community on Flower " +"Discuss (`Join Flower Discuss `__) and on " +"Slack (`Join Slack `__)." +msgstr "" +"在继续之前,请务必加入 Slack 上的 Flower 社区:`Join Slack `__" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:577 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1016 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:815 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:861 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:371 +msgid "" +"There's a dedicated ``#questions`` channel if you need help, but we'd " +"also love to hear who you are in ``#introductions``!" +msgstr "如果您需要帮助,我们有专门的 ``#questions`` 频道,但我们也很乐意在 ``#introductions`` 中了解您是谁!" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:579 +msgid "" +"The `Flower Federated Learning Tutorial - Part 4 " +"`__ introduces ``Client``, the flexible API underlying " +"``NumPyClient``." +msgstr "" +"Flower联邦学习教程 - 第4部分 `__ 介绍了``Client``,它是``NumPyClient``底层的灵活应用程序接口。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:9 +msgid "Customize the client" +msgstr "自定义客户端" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:11 +msgid "" +"Welcome to the fourth part of the Flower federated learning tutorial. In " +"the previous parts of this tutorial, we introduced federated learning " +"with PyTorch and Flower (`part 1 `__), we learned how " +"strategies can be used to customize the execution on both the server and " +"the clients (`part 2 `__), and we built our own " +"custom strategy from scratch (`part 3 `__)." +msgstr "" +"欢迎来到 Flower 联邦学习教程的第四部分。在本教程的前几部分中,我们介绍了 PyTorch 和 Flower 的联邦学习(`part 1 " +"`__),了解了如何使用策略来定制服务器和客户端的执行(`part 2 " +"`__),并从头开始构建了我们自己的定制策略(`part 3 " +"`__)。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:14 +msgid "" +"In this notebook, we revisit ``NumPyClient`` and introduce a new " +"baseclass for building clients, simply named ``Client``. In previous " +"parts of this tutorial, we've based our client on ``NumPyClient``, a " +"convenience class which makes it easy to work with machine learning " +"libraries that have good NumPy interoperability. With ``Client``, we gain" +" a lot of flexibility that we didn't have before, but we'll also have to " +"do a few things the we didn't have to do before." +msgstr "" +"在本笔记中,我们将重温 ``NumPyClient`` 并引入一个用于构建客户端的新基类,简单命名为 " +"``Client``。在本教程的前几部分中,我们的客户端基于``NumPyClient``,这是一个便捷类,可以让我们轻松地与具有良好 NumPy" +" 互操作性的机器学习库协同工作。有了 ``Client``,我们获得了很多以前没有的灵活性,但我们也必须做一些以前不需要做的事情。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:19 +#, fuzzy +msgid "" +"Let's go deeper and see what it takes to move from ``NumPyClient`` to " +"``Client``! 🌼" +msgstr "让我们深入了解一下从 ``NumPyClient`` 到 ``Client`` 的过程!" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:31 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:30 +msgid "Step 0: Preparation" +msgstr "步骤 0:准备工作" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:121 +#, fuzzy +msgid "" +"Let's now define a loading function for the CIFAR-10 training and test " +"set, partition them into ``num_partitions`` smaller datasets (each split " +"into training and validation set), and wrap everything in their own " +"``DataLoader``." +msgstr "" +"现在,让我们加载 CIFAR-10 训练集和测试集,将它们分割成十个较小的数据集(每个数据集又分为训练集和验证集),并将所有数据都封装在各自的 " +"``DataLoader`` 中。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:256 +msgid "Step 1: Revisiting NumPyClient" +msgstr "步骤 1:重温 NumPyClient" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:258 +#, fuzzy +msgid "" +"So far, we've implemented our client by subclassing " +"``flwr.client.NumPyClient``. The three methods we implemented are " +"``get_parameters``, ``fit``, and ``evaluate``." +msgstr "" +"到目前为止,我们通过子类化 ``flwr.client.NumPyClient`` " +"实现了我们的客户端。我们实现了三个方法:``get_parameters``, ``fit`, 和``evaluate``。最后,我们用一个名为 " +"``client_fn`` 的函数来创建该类的实例:" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:299 +msgid "" +"Then, we define the function ``numpyclient_fn`` that is used by Flower to" +" create the ``FlowerNumpyClient`` instances on demand. Finally, we create" +" the ``ClientApp`` and pass the ``numpyclient_fn`` to it." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:328 +#, fuzzy +msgid "" +"We've seen this before, there's nothing new so far. The only *tiny* " +"difference compared to the previous notebook is naming, we've changed " +"``FlowerClient`` to ``FlowerNumPyClient`` and ``client_fn`` to " +"``numpyclient_fn``. Next, we configure the number of federated learning " +"rounds using ``ServerConfig`` and create the ``ServerApp`` with this " +"config:" +msgstr "" +"我们以前见过这种情况,目前没有什么新东西。与之前的笔记相比,唯一*小*的不同是命名,我们把 ``FlowerClient`` 改成了 " +"``FlowerNumPyClient``,把 `client_fn` 改成了 ``numpyclient_fn``。让我们运行它看看输出结果:" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:355 +msgid "" +"Finally, we specify the resources for each client and run the simulation " +"to see the output we get:" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:389 +#, fuzzy +msgid "" +"This works as expected, ten clients are training for three rounds of " +"federated learning." +msgstr "结果不出所料,两个客户端正在进行三轮联邦学习训练。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:391 +#, fuzzy +msgid "" +"Let's dive a little bit deeper and discuss how Flower executes this " +"simulation. Whenever a client is selected to do some work, " +"``run_simulation`` launches the ``ClientApp`` object which in turn calls " +"the function ``numpyclient_fn`` to create an instance of our " +"``FlowerNumPyClient`` (along with loading the model and the data)." +msgstr "" +"让我们再深入一点,讨论一下 Flower 是如何执行模拟的。每当一个客户端被选中进行工作时,`start_simulation`` 就会调用函数 " +"`numpyclient_fn` 来创建我们的 ``FlowerNumPyClient`` 实例(同时加载模型和数据)。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:393 +msgid "" +"But here's the perhaps surprising part: Flower doesn't actually use the " +"``FlowerNumPyClient`` object directly. Instead, it wraps the object to " +"makes it look like a subclass of ``flwr.client.Client``, not " +"``flwr.client.NumPyClient``. In fact, the Flower core framework doesn't " +"know how to handle ``NumPyClient``'s, it only knows how to handle " +"``Client``'s. ``NumPyClient`` is just a convenience abstraction built on " +"top of ``Client``." +msgstr "" +"但令人惊讶的部分也许就在这里: Flower 实际上并不直接使用 ``FlowerNumPyClient`` " +"对象。相反,它封装了该对象,使其看起来像 ``flwr.client.Client`` 的子类,而不是 " +"``flwr.client.NumPyClient``。事实上,Flower 核心框架不知道如何处理 " +"``NumPyClient``,它只知道如何处理 ``Client``。``NumPyClient`` " +"只是建立在``Client``之上的便捷抽象类。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:395 +msgid "" +"Instead of building on top of ``NumPyClient``, we can directly build on " +"top of ``Client``." +msgstr "与其在 ``NumPyClient`` 上构建,我们可以直接在 ``Client`` 上构建。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:407 +msgid "Step 2: Moving from ``NumPyClient`` to ``Client``" +msgstr "步骤 2:从 ``NumPyClient`` 移至 ``Client``" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:409 +msgid "" +"Let's try to do the same thing using ``Client`` instead of " +"``NumPyClient``." +msgstr "让我们尝试使用 ``Client`` 代替 ``NumPyClient`` 做同样的事情。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:519 +msgid "" +"Before we discuss the code in more detail, let's try to run it! Gotta " +"make sure our new ``Client``-based client works, right?" +msgstr "在详细讨论代码之前,让我们试着运行它!必须确保我们基于 ``Client`` 的新客户端能正常运行,对吗?" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:545 +msgid "" +"That's it, we're now using ``Client``. It probably looks similar to what " +"we've done with ``NumPyClient``. So what's the difference?" +msgstr "就是这样,我们现在开始使用 ``Client``。它看起来可能与我们使用 ``NumPyClient`` 所做的类似。那么有什么不同呢?" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:547 +msgid "" +"First of all, it's more code. But why? The difference comes from the fact" +" that ``Client`` expects us to take care of parameter serialization and " +"deserialization. For Flower to be able to send parameters over the " +"network, it eventually needs to turn these parameters into ``bytes``. " +"Turning parameters (e.g., NumPy ``ndarray``'s) into raw bytes is called " +"serialization. Turning raw bytes into something more useful (like NumPy " +"``ndarray``'s) is called deserialization. Flower needs to do both: it " +"needs to serialize parameters on the server-side and send them to the " +"client, the client needs to deserialize them to use them for local " +"training, and then serialize the updated parameters again to send them " +"back to the server, which (finally!) deserializes them again in order to " +"aggregate them with the updates received from other clients." +msgstr "" +"首先,它的代码更多。但为什么呢?区别在于 ``Client`` 希望我们处理参数的序列化和反序列化。Flower " +"要想通过网络发送参数,最终需要将这些参数转化为 ``字节``。把参数(例如 NumPy 的 ``ndarray`` " +"参数)变成原始字节叫做序列化。将原始字节转换成更有用的东西(如 NumPy ``ndarray`)称为反序列化。Flower " +"需要同时做这两件事:它需要在服务器端序列化参数并将其发送到客户端,客户端需要反序列化参数以便将其用于本地训练,然后再次序列化更新后的参数并将其发送回服务器,服务器(最后)再次反序列化参数以便将其与从其他客户端接收到的更新汇总在一起。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:550 +msgid "" +"The only *real* difference between Client and NumPyClient is that " +"NumPyClient takes care of serialization and deserialization for you. It " +"can do so because it expects you to return parameters as NumPy ndarray's," +" and it knows how to handle these. This makes working with machine " +"learning libraries that have good NumPy support (most of them) a breeze." +msgstr "" +"Client 与 NumPyClient 之间的唯一**真正区别在于,NumPyClient " +"会为你处理序列化和反序列化。NumPyClient之所以能做到这一点,是因为它预计你会以NumPy " +"ndarray的形式返回参数,而且它知道如何处理这些参数。这使得与具有良好 NumPy 支持的大多数机器学习库一起工作变得轻而易举。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:552 +msgid "" +"In terms of API, there's one major difference: all methods in Client take" +" exactly one argument (e.g., ``FitIns`` in ``Client.fit``) and return " +"exactly one value (e.g., ``FitRes`` in ``Client.fit``). The methods in " +"``NumPyClient`` on the other hand have multiple arguments (e.g., " +"``parameters`` and ``config`` in ``NumPyClient.fit``) and multiple return" +" values (e.g., ``parameters``, ``num_example``, and ``metrics`` in " +"``NumPyClient.fit``) if there are multiple things to handle. These " +"``*Ins`` and ``*Res`` objects in ``Client`` wrap all the individual " +"values you're used to from ``NumPyClient``." +msgstr "" +"在 API 方面,有一个主要区别:Client 中的所有方法都只接受一个参数(例如,``Client.fit`` 中的 " +"``FitIns``),并只返回一个值(例如,``Client.fit`` 中的 " +"``FitRes``)。另一方面,``NumPyClient``中的方法有多个参数(例如,``NumPyClient.fit``中的``parameters``和``config``)和多个返回值(例如,``NumPyClient.fit``中的``parameters``、``num_example``和``metrics``)。在" +" ``Client`` 中的这些 ``*Ins`` 和 ``*Res`` 对象封装了你在 ``NumPyClient`` 中习惯使用的所有单个值。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:565 +msgid "Step 3: Custom serialization" +msgstr "步骤 3:自定义序列化" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:567 +msgid "" +"Here we will explore how to implement custom serialization with a simple " +"example." +msgstr "下面我们将通过一个简单的示例来探讨如何实现自定义序列化。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:569 +msgid "" +"But first what is serialization? Serialization is just the process of " +"converting an object into raw bytes, and equally as important, " +"deserialization is the process of converting raw bytes back into an " +"object. This is very useful for network communication. Indeed, without " +"serialization, you could not just a Python object through the internet." +msgstr "" +"首先,什么是序列化?序列化只是将对象转换为原始字节的过程,同样重要的是,反序列化是将原始字节转换回对象的过程。这对网络通信非常有用。事实上,如果没有序列化,你就无法通过互联网传输一个" +" Python 对象。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:571 +msgid "" +"Federated Learning relies heavily on internet communication for training " +"by sending Python objects back and forth between the clients and the " +"server. This means that serialization is an essential part of Federated " +"Learning." +msgstr "通过在客户端和服务器之间来回发送 Python 对象,联合学习在很大程度上依赖于互联网通信进行训练。这意味着序列化是联邦学习的重要组成部分。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:573 +msgid "" +"In the following section, we will write a basic example where instead of " +"sending a serialized version of our ``ndarray``\\ s containing our " +"parameters, we will first convert the ``ndarray`` into sparse matrices, " +"before sending them. This technique can be used to save bandwidth, as in " +"certain cases where the weights of a model are sparse (containing many 0 " +"entries), converting them to a sparse matrix can greatly improve their " +"bytesize." +msgstr "" +"在下面的章节中,我们将编写一个基本示例,在发送包含参数的 ``ndarray`` 前,我们将首先把 ``ndarray`` " +"转换为稀疏矩阵,而不是发送序列化版本。这种技术可以用来节省带宽,因为在某些情况下,模型的参数是稀疏的(包含许多 0 " +"条目),将它们转换成稀疏矩阵可以大大提高它们的字节数。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:576 +msgid "Our custom serialization/deserialization functions" +msgstr "我们的定制序列化/反序列化功能" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:578 +msgid "" +"This is where the real serialization/deserialization will happen, " +"especially in ``ndarray_to_sparse_bytes`` for serialization and " +"``sparse_bytes_to_ndarray`` for deserialization." +msgstr "" +"这才是真正的序列化/反序列化,尤其是在用于序列化的 ``ndarray_too_sparse_bytes`` 和用于反序列化的 " +"``sparse_bytes_too_ndarray`` 中。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:580 +msgid "" +"Note that we imported the ``scipy.sparse`` library in order to convert " +"our arrays." +msgstr "请注意,为了转换数组,我们导入了 ``scipy.sparse`` 库。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:668 +msgid "Client-side" +msgstr "客户端" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:670 +msgid "" +"To be able to serialize our ``ndarray``\\ s into sparse parameters, we " +"will just have to call our custom functions in our " +"``flwr.client.Client``." +msgstr "为了能够将我们的 ``ndarray`` 序列化为稀疏参数,我们只需在 ``flwr.client.Client`` 中调用我们的自定义函数。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:672 +msgid "" +"Indeed, in ``get_parameters`` we need to serialize the parameters we got " +"from our network using our custom ``ndarrays_to_sparse_parameters`` " +"defined above." +msgstr "" +"事实上,在 `get_parameters` 中,我们需要使用上文定义的自定义 `ndarrays_too_sparse_parameters` " +"序列化从网络中获取的参数。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:674 +msgid "" +"In ``fit``, we first need to deserialize the parameters coming from the " +"server using our custom ``sparse_parameters_to_ndarrays`` and then we " +"need to serialize our local results with " +"``ndarrays_to_sparse_parameters``." +msgstr "" +"在 ``fit`` 中,我们首先需要使用自定义的 ``sparse_parameters_to_ndarrays`` " +"反序列化来自服务器的参数,然后使用 ``ndarrays_to_sparse_parameters`` 序列化本地结果。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:676 +msgid "" +"In ``evaluate``, we will only need to deserialize the global parameters " +"with our custom function." +msgstr "在 ``evaluate`` 中,我们只需要用自定义函数反序列化全局参数。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:781 +msgid "Server-side" +msgstr "服务器端" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:783 +msgid "" +"For this example, we will just use ``FedAvg`` as a strategy. To change " +"the serialization and deserialization here, we only need to reimplement " +"the ``evaluate`` and ``aggregate_fit`` functions of ``FedAvg``. The other" +" functions of the strategy will be inherited from the super class " +"``FedAvg``." +msgstr "" +"在本例中,我们将只使用 ``FedAvg`` 作为策略。要改变这里的序列化和反序列化,我们只需重新实现 ``FedAvg`` 的 " +"``evaluate`` 和 ``aggregate_fit`` 函数。策略的其他函数将从超类 ``FedAvg`` 继承。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:785 +msgid "As you can see only one line as change in ``evaluate``:" +msgstr "正如你所看到的,``evaluate``中只修改了一行:" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:791 +msgid "" +"And for ``aggregate_fit``, we will first deserialize every result we " +"received:" +msgstr "而对于 ``aggregate_fit``,我们将首先反序列化收到的每个结果:" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:800 +msgid "And then serialize the aggregated result:" +msgstr "然后将汇总结果序列化:" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:959 +msgid "We can now run our custom serialization example!" +msgstr "现在我们可以运行自定义序列化示例!" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1000 +msgid "" +"In this part of the tutorial, we've seen how we can build clients by " +"subclassing either ``NumPyClient`` or ``Client``. ``NumPyClient`` is a " +"convenience abstraction that makes it easier to work with machine " +"learning libraries that have good NumPy interoperability. ``Client`` is a" +" more flexible abstraction that allows us to do things that are not " +"possible in ``NumPyClient``. In order to do so, it requires us to handle " +"parameter serialization and deserialization ourselves." +msgstr "" +"在本部分教程中,我们已经了解了如何通过子类化 ``NumPyClient`` 或 ``Client`` 来构建客户端。NumPyClient " +"\"是一个便捷的抽象类,可以让我们更容易地与具有良好NumPy互操作性的机器学习库一起工作。``Client``是一个更灵活的抽象类,允许我们做一些在`NumPyClient``中做不到的事情。为此,它要求我们自己处理参数序列化和反序列化。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1018 +msgid "" +"This is the final part of the Flower tutorial (for now!), " +"congratulations! You're now well equipped to understand the rest of the " +"documentation. There are many topics we didn't cover in the tutorial, we " +"recommend the following resources:" +msgstr "这暂时是 Flower 教程的最后一部分,恭喜您!您现在已经具备了理解其余文档的能力。本教程还有许多内容没有涉及,我们推荐您参考以下资源:" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1020 +msgid "`Read Flower Docs `__" +msgstr "阅读Flower文档 `__" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1021 +#, fuzzy +msgid "`Check out Flower Code Examples `__" +msgstr "查看 Flower 代码示例 `__" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1022 +msgid "" +"`Use Flower Baselines for your research " +"`__" +msgstr "使用 \"Flower Baselines \"进行研究 `__" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1023 +#, fuzzy +msgid "" +"`Watch Flower AI Summit 2024 videos `__" +msgstr "观看 2023 年Flower峰会视频 `__" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:9 +msgid "Get started with Flower" +msgstr "开始使用Flower" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:11 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:11 +msgid "Welcome to the Flower federated learning tutorial!" +msgstr "欢迎阅读Flower联邦学习教程!" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:13 +#, fuzzy +msgid "" +"In this notebook, we'll build a federated learning system using the " +"Flower framework, Flower Datasets and PyTorch. In part 1, we use PyTorch " +"for the model training pipeline and data loading. In part 2, we federate " +"the PyTorch project using Flower." +msgstr "" +"在本笔记中,我们将使用 Flower 和 PyTorch 构建一个联邦学习系统。在第一部分中,我们使用 PyTorch " +"进行模型训练和数据加载。在第二部分中,我们将继续使用 Flower 联邦化基于 PyTorch 的框架。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:18 +#, fuzzy +msgid "Let's get started! 🌼" +msgstr "让我们开始吧!" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:32 +msgid "" +"Before we begin with any actual code, let's make sure that we have " +"everything we need." +msgstr "在开始编写实际代码之前,让我们先确保我们已经准备好了所需的一切。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:44 +#, fuzzy +msgid "Install dependencies" +msgstr "安装依赖项" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:46 +#, fuzzy +msgid "" +"Next, we install the necessary packages for PyTorch (``torch`` and " +"``torchvision``), Flower Datasets (``flwr-datasets``) and Flower " +"(``flwr``):" +msgstr "接下来,我们为 PyTorch(`torch`` 和`torchvision``)和 Flower(`flwr`)安装必要的软件包:" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:109 +#, fuzzy +msgid "" +"It is possible to switch to a runtime that has GPU acceleration enabled " +"(on Google Colab: ``Runtime > Change runtime type > Hardware accelerator:" +" GPU > Save``). Note, however, that Google Colab is not always able to " +"offer GPU acceleration. If you see an error related to GPU availability " +"in one of the following sections, consider switching back to CPU-based " +"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " +"has GPU acceleration enabled, you should see the output ``Training on " +"cuda``, otherwise it'll say ``Training on cpu``." +msgstr "" +"可以切换到已启用 GPU 加速的运行时(在 Google Colab 上: 运行时 > 更改运行时类型 > 硬件加速: GPU > " +"保存``)。但请注意,Google Colab 并非总能提供 GPU 加速。如果在以下部分中看到与 GPU 可用性相关的错误,请考虑通过设置 " +"``DEVICE = torch.device(\"cpu\")`` 切回基于 CPU 的执行。如果运行时已启用 GPU " +"加速,你应该会看到输出``Training on cuda``,否则会显示``Training on cpu``。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:122 +#, fuzzy +msgid "Load the data" +msgstr "加载数据" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:124 +#, fuzzy +msgid "" +"Federated learning can be applied to many different types of tasks across" +" different domains. In this tutorial, we introduce federated learning by " +"training a simple convolutional neural network (CNN) on the popular " +"CIFAR-10 dataset. CIFAR-10 can be used to train image classifiers that " +"distinguish between images from ten different classes: 'airplane', " +"'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', and " +"'truck'." +msgstr "" +"联邦学习可应用于不同领域的多种不同类型任务。在本教程中,我们将通过在流行的 CIFAR-10 数据集上训练一个简单的卷积神经网络 (CNN) " +"来介绍联合学习。CIFAR-10 可用于训练图像分类器,以区分来自十个不同类别的图像:" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:135 +#, fuzzy +msgid "" +"We simulate having multiple datasets from multiple organizations (also " +"called the \"cross-silo\" setting in federated learning) by splitting the" +" original CIFAR-10 dataset into multiple partitions. Each partition will " +"represent the data from a single organization. We're doing this purely " +"for experimentation purposes, in the real world there's no need for data " +"splitting because each organization already has their own data (the data " +"is naturally partitioned)." +msgstr "" +"我们通过将原始 CIFAR-10 数据集拆分成多个分区来模拟来自多个组织的多个数据集(也称为联邦学习中的 \"跨分区 " +"\"设置)。每个分区代表一个组织的数据。我们这样做纯粹是为了实验目的,在现实世界中不需要拆分数据,因为每个组织都已经有了自己的数据(所以数据是自然分区的)。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:137 +#, fuzzy +msgid "" +"Each organization will act as a client in the federated learning system. " +"Having ten organizations participate in a federation means having ten " +"clients connected to the federated learning server." +msgstr "每个组织都将充当联邦学习系统中的客户端。因此,有十个组织参与联邦学习,就意味着有十个客户端连接到联邦学习服务器:" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:148 +#, fuzzy +msgid "" +"We use the Flower Datasets library (``flwr-datasets``) to partition " +"CIFAR-10 into ten partitions using ``FederatedDataset``. We will create a" +" small training and test set for each of the ten organizations and wrap " +"each of these into a PyTorch ``DataLoader``:" +msgstr "" +"现在,让我们从 ``flwr-datasets`` 中创建 Federated Dataset 抽象,以分割 " +"CIFAR-10。我们将为每个边缘设备创建小型训练集和测试集,并将它们分别封装到 PyTorch ``DataLoader`` 中:" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:196 +#, fuzzy +msgid "" +"We now have a function that can return a training set and validation set " +"(``trainloader`` and ``valloader``) representing one dataset from one of " +"ten different organizations. Each ``trainloader``/``valloader`` pair " +"contains 4000 training examples and 1000 validation examples. There's " +"also a single ``testloader`` (we did not split the test set). Again, this" +" is only necessary for building research or educational systems, actual " +"federated learning systems have their data naturally distributed across " +"multiple partitions." +msgstr "" +"现在,我们有一个包含十个训练集和十个验证集(`trainloaders`` 和`valloaders``)的列表,代表十个不同组织的数据。每对 " +"``trainloader``/``valloader`` 都包含 4500 个训练示例和 500 个验证数据。还有一个单独的 " +"``测试加载器``(我们没有拆分测试集)。同样,这只有在构建研究或教育系统时才有必要,实际的联邦学习系统的数据自然分布在多个分区中。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:199 +#, fuzzy +msgid "" +"Let's take a look at the first batch of images and labels in the first " +"training set (i.e., ``trainloader`` from ``partition_id=0``) before we " +"move on:" +msgstr "在继续之前,让我们先看看第一个训练集中的第一批图像和标签(即 ``trainloaders[0]``):" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:241 +#, fuzzy +msgid "" +"The output above shows a random batch of images from the ``trainloader`` " +"from the first of ten partitions. It also prints the labels associated " +"with each image (i.e., one of the ten possible labels we've seen above). " +"If you run the cell again, you should see another batch of images." +msgstr "" +"上面的输出显示了来自十个 \"trainloader \"列表中第一个 \"trainloader " +"\"的随机图像。它还打印了与每幅图像相关的标签(即我们上面看到的十个可能标签之一)。如果您再次运行该单元,应该会看到另一批图像。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:253 +msgid "Step 1: Centralized Training with PyTorch" +msgstr "步骤 1:使用 PyTorch 进行集中训练" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:264 +msgid "" +"Next, we're going to use PyTorch to define a simple convolutional neural " +"network. This introduction assumes basic familiarity with PyTorch, so it " +"doesn't cover the PyTorch-related aspects in full detail. If you want to " +"dive deeper into PyTorch, we recommend `DEEP LEARNING WITH PYTORCH: A 60 " +"MINUTE BLITZ " +"`__." +msgstr "" +"接下来,我们将使用 PyTorch 来定义一个简单的卷积神经网络。本介绍假定您对 PyTorch 有基本的了解,因此不会详细介绍与 PyTorch" +" 相关的内容。如果你想更深入地了解 PyTorch,我们推荐你阅读 `DEEP LEARNING WITH PYTORCH: a 60 " +"minute blitz " +"`__。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:276 +#, fuzzy +msgid "Define the model" +msgstr "定义模型" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:278 +msgid "" +"We use the simple CNN described in the `PyTorch tutorial " +"`__:" +msgstr "" +"我们使用` PyTorch 教程 " +"`__ 中描述的简单 CNN:" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:315 +msgid "Let's continue with the usual training and test functions:" +msgstr "让我们继续进行常规的训练和测试功能:" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:375 +#, fuzzy +msgid "Train the model" +msgstr "训练模型" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:377 +#, fuzzy +msgid "" +"We now have all the basic building blocks we need: a dataset, a model, a " +"training function, and a test function. Let's put them together to train " +"the model on the dataset of one of our organizations " +"(``partition_id=0``). This simulates the reality of most machine learning" +" projects today: each organization has their own data and trains models " +"only on this internal data:" +msgstr "现在我们拥有了所需的所有基本构件:数据集、模型、训练函数和测试函数。让我们把它们放在一起,在我们其中一个组织的数据集(``trainloaders[0]``)上训练模型。这模拟了当今大多数机器学习项目的实际情况:每个组织都有自己的数据,并且只在这些内部数据上训练模型:" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:406 +#, fuzzy +msgid "" +"Training the simple CNN on our CIFAR-10 split for 5 epochs should result " +"in a test set accuracy of about 41%, which is not good, but at the same " +"time, it doesn't really matter for the purposes of this tutorial. The " +"intent was just to show a simple centralized training pipeline that sets " +"the stage for what comes next - federated learning!" +msgstr "" +"在我们的 CIFAR-10 分片上对简单 CNN 进行 5 个遍历的训练后,测试集的准确率应为 " +"41%,这并不理想,但同时对本教程而言也并不重要。我们只是想展示一个简单的集中式训练流程,为接下来的联邦学习做好铺垫!" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:418 +msgid "Step 2: Federated Learning with Flower" +msgstr "步骤 2:使用 Flower 联邦学习" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:420 +msgid "" +"Step 1 demonstrated a simple centralized training pipeline. All data was " +"in one place (i.e., a single ``trainloader`` and a single ``valloader``)." +" Next, we'll simulate a situation where we have multiple datasets in " +"multiple organizations and where we train a model over these " +"organizations using federated learning." +msgstr "" +"步骤 1 演示了一个简单的集中式训练流程。所有数据都在一个地方(即一个 \"trainloader \"和一个 " +"\"valloader\")。接下来,我们将模拟在多个组织中拥有多个数据集的情况,并使用联邦学习在这些组织中训练一个模型。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:432 +#, fuzzy +msgid "Update model parameters" +msgstr "更新模型参数" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:434 +#, fuzzy +msgid "" +"In federated learning, the server sends global model parameters to the " +"client, and the client updates the local model with parameters received " +"from the server. It then trains the model on the local data (which " +"changes the model parameters locally) and sends the updated/changed model" +" parameters back to the server (or, alternatively, it sends just the " +"gradients back to the server, not the full model parameters)." +msgstr "在联邦学习中,服务器将全局模型参数发送给客户端,客户端根据从服务器接收到的参数更新本地模型。然后,客户端根据本地数据对模型进行训练(在本地更改模型参数),并将更新/更改后的模型参数发回服务器(或者,客户端只将梯度参数发回服务器,而不是全部模型参数)。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:436 +msgid "" +"We need two helper functions to update the local model with parameters " +"received from the server and to get the updated model parameters from the" +" local model: ``set_parameters`` and ``get_parameters``. The following " +"two functions do just that for the PyTorch model above." +msgstr "" +"我们需要两个辅助函数,用从服务器接收到的参数更新本地模型,并从本地模型获取更新后的模型参数:`` " +"set_parameters```和`get_parameters``。下面两个函数就是为上面的 PyTorch 模型做这些工作的。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:438 +#, fuzzy +msgid "" +"The details of how this works are not really important here (feel free to" +" consult the PyTorch documentation if you want to learn more). In " +"essence, we use ``state_dict`` to access PyTorch model parameter tensors." +" The parameter tensors are then converted to/from a list of NumPy " +"ndarray's (which the Flower ``NumPyClient`` knows how to " +"serialize/deserialize):" +msgstr "" +"在这里,如何工作的细节并不重要(如果你想了解更多,请随时查阅 PyTorch 文档)。本质上,我们使用 ``state_dict`` 访问 " +"PyTorch 模型参数张量。然后,参数张量会被转换成/转换成 NumPy ndarray 列表(Flower 知道如何序列化/反序列化):" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:466 +#, fuzzy +msgid "Define the Flower ClientApp" +msgstr "Flower 客户端。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:468 +#, fuzzy +msgid "" +"With that out of the way, let's move on to the interesting part. " +"Federated learning systems consist of a server and multiple clients. In " +"Flower, we create a ``ServerApp`` and a ``ClientApp`` to run the server-" +"side and client-side code, respectively." +msgstr "" +"说完这些,让我们进入有趣的部分。联邦学习系统由一个服务器和多个客户端组成。在 Flower 中,我们通过实现 " +"``flwr.client.Client`` 或 ``flwr.client.NumPyClient`` " +"的子类来创建客户端。在本教程中,我们使用``NumPyClient``,因为它更容易实现,需要我们编写的模板也更少。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:470 +#, fuzzy +msgid "" +"The first step toward creating a ``ClientApp`` is to implement a " +"subclasses of ``flwr.client.Client`` or ``flwr.client.NumPyClient``. We " +"use ``NumPyClient`` in this tutorial because it is easier to implement " +"and requires us to write less boilerplate. To implement ``NumPyClient``, " +"we create a subclass that implements the three methods " +"``get_parameters``, ``fit``, and ``evaluate``:" +msgstr "" +"说完这些,让我们进入有趣的部分。联邦学习系统由一个服务器和多个客户端组成。在 Flower 中,我们通过实现 " +"``flwr.client.Client`` 或 ``flwr.client.NumPyClient`` " +"的子类来创建客户端。在本教程中,我们使用``NumPyClient``,因为它更容易实现,需要我们编写的模板也更少。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:472 +msgid "``get_parameters``: Return the current local model parameters" +msgstr "``get_parameters``: 返回当前本地模型参数" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:473 +#, fuzzy +msgid "" +"``fit``: Receive model parameters from the server, train the model on the" +" local data, and return the updated model parameters to the server" +msgstr "``fit``: 从服务器接收模型参数,在本地数据上训练模型参数,并将(更新的)模型参数返回服务器" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:474 +#, fuzzy +msgid "" +"``evaluate``: Receive model parameters from the server, evaluate the " +"model on the local data, and return the evaluation result to the server" +msgstr "``evaluate ``: 从服务器接收模型参数,在本地数据上评估模型参数,并将评估结果返回服务器" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:476 +msgid "" +"We mentioned that our clients will use the previously defined PyTorch " +"components for model training and evaluation. Let's see a simple Flower " +"client implementation that brings everything together:" +msgstr "" +"我们提到,我们的客户端将使用之前定义的 PyTorch 组件进行模型训练和评估。让我们来看看一个简单的 Flower " +"客户端实现,它将一切都整合在一起:" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:513 +#, fuzzy +msgid "" +"Our class ``FlowerClient`` defines how local training/evaluation will be " +"performed and allows Flower to call the local training/evaluation through" +" ``fit`` and ``evaluate``. Each instance of ``FlowerClient`` represents a" +" *single client* in our federated learning system. Federated learning " +"systems have multiple clients (otherwise, there's not much to federate), " +"so each client will be represented by its own instance of " +"``FlowerClient``. If we have, for example, three clients in our workload," +" then we'd have three instances of ``FlowerClient`` (one on each of the " +"machines we'd start the client on). Flower calls ``FlowerClient.fit`` on " +"the respective instance when the server selects a particular client for " +"training (and ``FlowerClient.evaluate`` for evaluation)." +msgstr "" +"我们的类 ``FlowerClient`` 定义了本地训练/评估的执行方式,并允许 Flower 通过 ``fit`` 和 " +"``evaluate`` 调用本地训练/评估。每个 ``FlowerClient`` " +"实例都代表联邦学习系统中的*单个客户端*。联邦学习系统有多个客户端(否则就没有什么可联邦的),因此每个客户端都将由自己的 " +"``FlowerClient`` 实例来代表。例如,如果我们的工作负载中有三个客户端,那么我们就会有三个 ``FlowerClient`` " +"实例。当服务器选择特定客户端进行训练时,Flower 会调用相应实例上的 ``FlowerClient.fit`` (评估时调用 " +"``FlowerClient.evaluate``)。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:516 +#, fuzzy +msgid "" +"In this notebook, we want to simulate a federated learning system with 10" +" clients *on a single machine*. This means that the server and all 10 " +"clients will live on a single machine and share resources such as CPU, " +"GPU, and memory. Having 10 clients would mean having 10 instances of " +"``FlowerClient`` in memory. Doing this on a single machine can quickly " +"exhaust the available memory resources, even if only a subset of these " +"clients participates in a single round of federated learning." +msgstr "" +"在本笔记中,我们要模拟一个联邦学习系统,在一台机器上有 10 个客户端。这意味着服务器和所有 10 个客户端都将位于一台机器上,并共享 " +"CPU、GPU 和内存等资源。有 10 个客户端就意味着内存中有 10 个 ``FlowerClient`` " +"实例。在单台机器上这样做会很快耗尽可用的内存资源,即使这些客户端中只有一个子集参与了一轮联邦学习。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:518 +#, fuzzy +msgid "" +"In addition to the regular capabilities where server and clients run on " +"multiple machines, Flower, therefore, provides special simulation " +"capabilities that create ``FlowerClient`` instances only when they are " +"actually necessary for training or evaluation. To enable the Flower " +"framework to create clients when necessary, we need to implement a " +"function that creates a ``FlowerClient`` instance on demand. We typically" +" call this function ``client_fn``. Flower calls ``client_fn`` whenever it" +" needs an instance of one particular client to call ``fit`` or " +"``evaluate`` (those instances are usually discarded after use, so they " +"should not keep any local state). In federated learning experiments using" +" Flower, clients are identified by a partition ID, or ``partition-id``. " +"This ``partition-id`` is used to load different local data partitions for" +" different clients, as can be seen below. The value of ``partition-id`` " +"is retrieved from the ``node_config`` dictionary in the ``Context`` " +"object, which holds the information that persists throughout each " +"training round." +msgstr "" +"除了服务器和客户端在多台机器上运行的常规功能外,Flower 还提供了特殊的模拟功能,即只有在训练或评估实际需要时才创建 " +"``FlowerClient`` 实例。为了让 Flower 框架能在必要时创建客户端,我们需要实现一个名为 ``client_fn`` " +"的函数,它能按需创建一个 ``FlowerClient`` 实例。每当 Flower 需要一个特定的客户端实例来调用 ``fit`` 或 " +"``evaluate`` 时,它就会调用 " +"``client_fn``(这些实例在使用后通常会被丢弃,因此它们不应保留任何本地状态)。客户端由一个客户端 ID 或简短的 ``cid`` " +"标识。例如,可以使用 ``cid`` 为不同的客户端加载不同的本地数据分区,如下所示:" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:522 +#, fuzzy +msgid "" +"With this, we have the class ``FlowerClient`` which defines client-side " +"training/evaluation and ``client_fn`` which allows Flower to create " +"``FlowerClient`` instances whenever it needs to call ``fit`` or " +"``evaluate`` on one particular client. Last, but definitely not least, we" +" create an instance of ``ClientApp`` and pass it the ``client_fn``. " +"``ClientApp`` is the entrypoint that a running Flower client uses to call" +" your code (as defined in, for example, ``FlowerClient.fit``)." +msgstr "" +"现在我们有了定义客户端训练/评估的类 ``FlowerClient`` 和允许 Flower 在需要调用某个客户端的 ``fit` 或 " +"``evaluate` 时创建 ``FlowerClient`` 实例的 ``client_fn` 类。最后一步是使用 " +"``flwr.simulation.start_simulation`` 启动实际模拟。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:563 +#, fuzzy +msgid "Define the Flower ServerApp" +msgstr "Flower 服务器。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:565 +#, fuzzy +msgid "" +"On the server side, we need to configure a strategy which encapsulates " +"the federated learning approach/algorithm, for example, *Federated " +"Averaging* (FedAvg). Flower has a number of built-in strategies, but we " +"can also use our own strategy implementations to customize nearly all " +"aspects of the federated learning approach. For this example, we use the " +"built-in ``FedAvg`` implementation and customize it using a few basic " +"parameters:" +msgstr "" +"Flower 有许多内置策略,但我们也可以使用自己的策略实现来定制联邦学习方法的几乎所有方面。在本例中,我们使用内置的 ``FedAvg`` " +"实现,并使用一些基本参数对其进行定制。最后一步是实际调用 ``start_simulation``开始模拟:" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:592 +msgid "" +"Similar to ``ClientApp``, we create a ``ServerApp`` using a utility " +"function ``server_fn``. In ``server_fn``, we pass an instance of " +"``ServerConfig`` for defining the number of federated learning rounds " +"(``num_rounds``) and we also pass the previously created ``strategy``. " +"The ``server_fn`` returns a ``ServerAppComponents`` object containing the" +" settings that define the ``ServerApp`` behaviour. ``ServerApp`` is the " +"entrypoint that Flower uses to call all your server-side code (for " +"example, the strategy)." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:629 +#, fuzzy +msgid "Run the training" +msgstr "开始训练" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:631 +msgid "" +"In simulation, we often want to control the amount of resources each " +"client can use. In the next cell, we specify a ``backend_config`` " +"dictionary with the ``client_resources`` key (required) for defining the " +"amount of CPU and GPU resources each client can access." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:659 +msgid "" +"The last step is the actual call to ``run_simulation`` which - you " +"guessed it - runs the simulation. ``run_simulation`` accepts a number of " +"arguments: - ``server_app`` and ``client_app``: the previously created " +"``ServerApp`` and ``ClientApp`` objects, respectively - " +"``num_supernodes``: the number of ``SuperNodes`` to simulate which equals" +" the number of clients for Flower simulation - ``backend_config``: the " +"resource allocation used in this simulation" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:686 +msgid "Behind the scenes" +msgstr "幕后" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:688 +msgid "So how does this work? How does Flower execute this simulation?" +msgstr "那么它是如何工作的呢?Flower 如何进行模拟?" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:690 +#, fuzzy, python-format +msgid "" +"When we call ``run_simulation``, we tell Flower that there are 10 clients" +" (``num_supernodes=10``, where 1 ``SuperNode`` launches 1 ``ClientApp``)." +" Flower then goes ahead an asks the ``ServerApp`` to issue an " +"instructions to those nodes using the ``FedAvg`` strategy. ``FedAvg`` " +"knows that it should select 100% of the available clients " +"(``fraction_fit=1.0``), so it goes ahead and selects 10 random clients " +"(i.e., 100% of 10)." +msgstr "" +"当我们调用 ``start_simulation`` 时,我们会告诉 Flower 有 10 " +"个客户(`num_clients=10``)。然后,Flower 会要求 ``FedAvg`` 策略选择客户。``FedAvg`` 知道它应该选择" +" 100%的可用客户(``fraction_fit=1.0``),所以它会随机选择 10 个客户(即 10 的 100%)。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:692 +#, fuzzy +msgid "" +"Flower then asks the selected 10 clients to train the model. Each of the " +"10 ``ClientApp`` instances receives a message, which causes it to call " +"``client_fn`` to create an instance of ``FlowerClient``. It then calls " +"``.fit()`` on each the ``FlowerClient`` instances and returns the " +"resulting model parameter updates to the ``ServerApp``. When the " +"``ServerApp`` receives the model parameter updates from the clients, it " +"hands those updates over to the strategy (*FedAvg*) for aggregation. The " +"strategy aggregates those updates and returns the new global model, which" +" then gets used in the next round of federated learning." +msgstr "" +"然后,Flower 会要求选定的 10 " +"个客户端对模型进行训练。服务器收到客户端的模型参数更新后,会将这些更新交给策略(*FedAvg*)进行聚合。策略会聚合这些更新并返回新的全局模型,然后将其用于下一轮联邦学习。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:705 +msgid "Where's the accuracy?" +msgstr "准确度在哪里找?" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:707 +msgid "" +"You may have noticed that all metrics except for ``losses_distributed`` " +"are empty. Where did the ``{\"accuracy\": float(accuracy)}`` go?" +msgstr "" +"您可能已经注意到,除了 ``losses_distributed`` 以外,所有指标都是空的。{\"准确度\": " +"float(准确度)}``去哪儿了?" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:709 +msgid "" +"Flower can automatically aggregate losses returned by individual clients," +" but it cannot do the same for metrics in the generic metrics dictionary " +"(the one with the ``accuracy`` key). Metrics dictionaries can contain " +"very different kinds of metrics and even key/value pairs that are not " +"metrics at all, so the framework does not (and can not) know how to " +"handle these automatically." +msgstr "" +"Flower 可以自动汇总单个客户端返回的损失值,但无法对通用度量字典中的度量进行同样的处理(即带有 \"准确度 " +"\"键的度量字典)。度量值字典可以包含非常不同种类的度量值,甚至包含根本不是度量值的键/值对,因此框架不知道(也无法知道)如何自动处理这些度量值。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:711 +msgid "" +"As users, we need to tell the framework how to handle/aggregate these " +"custom metrics, and we do so by passing metric aggregation functions to " +"the strategy. The strategy will then call these functions whenever it " +"receives fit or evaluate metrics from clients. The two possible functions" +" are ``fit_metrics_aggregation_fn`` and " +"``evaluate_metrics_aggregation_fn``." +msgstr "" +"作为用户,我们需要告诉框架如何处理/聚合这些自定义指标,为此,我们将指标聚合函数传递给策略。然后,只要从客户端接收到拟合或评估指标,策略就会调用这些函数。两个可能的函数是" +" ``fit_metrics_aggregation_fn`` 和 ``evaluate_metrics_aggregation_fn``。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:713 +msgid "" +"Let's create a simple weighted averaging function to aggregate the " +"``accuracy`` metric we return from ``evaluate``:" +msgstr "让我们创建一个简单的加权平均函数来汇总从 ``evaluate`` 返回的 ``accuracy`` 指标:" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:781 +msgid "" +"We now have a full system that performs federated training and federated " +"evaluation. It uses the ``weighted_average`` function to aggregate custom" +" evaluation metrics and calculates a single ``accuracy`` metric across " +"all clients on the server side." +msgstr "" +"我们现在有了一个完整的系统,可以执行联邦训练和联邦评估。它使用 ``weighted_average`` " +"函数汇总自定义评估指标,并在服务器端计算所有客户端的单一 ``accuracy`` 指标。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:783 +msgid "" +"The other two categories of metrics (``losses_centralized`` and " +"``metrics_centralized``) are still empty because they only apply when " +"centralized evaluation is being used. Part two of the Flower tutorial " +"will cover centralized evaluation." +msgstr "" +"其他两类指标(`losses_centralized`` 和 " +"`metrics_centralized`)仍然是空的,因为它们只适用于集中评估。Flower 教程的第二部分将介绍集中式评估。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:795 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:351 +msgid "Final remarks" +msgstr "结束语" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:797 +msgid "" +"Congratulations, you just trained a convolutional neural network, " +"federated over 10 clients! With that, you understand the basics of " +"federated learning with Flower. The same approach you've seen can be used" +" with other machine learning frameworks (not just PyTorch) and tasks (not" +" just CIFAR-10 images classification), for example NLP with Hugging Face " +"Transformers or speech with SpeechBrain." +msgstr "" +"恭喜您,你刚刚训练了一个由 10 个客户端组成的卷积神经网络!这样,你就了解了使用 Flower " +"进行联邦学习的基础知识。你所看到的方法同样适用于其他机器学习框架(不只是 PyTorch)和任务(不只是 CIFAR-10 图像分类),例如使用 " +"Hugging Face Transformers 的 NLP 或使用 SpeechBrain 的语音。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:799 +msgid "" +"In the next notebook, we're going to cover some more advanced concepts. " +"Want to customize your strategy? Initialize parameters on the server " +"side? Or evaluate the aggregated model on the server side? We'll cover " +"all this and more in the next tutorial." +msgstr "在下一个笔记中,我们将介绍一些更先进的概念。想定制你的策略吗?在服务器端初始化参数?或者在服务器端评估聚合模型?我们将在下一个教程中介绍所有这些内容以及更多。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:817 +msgid "" +"The `Flower Federated Learning Tutorial - Part 2 " +"`__ goes into more depth about strategies and all " +"the advanced things you can build with them." +msgstr "" +"`Flower 联邦学习教程 - 第 2 部分 `__ 更深入地介绍了策略以及可以使用策略构建的所有高级功能。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:9 +msgid "Use a federated learning strategy" +msgstr "使用联邦学习策略" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:11 +msgid "" +"Welcome to the next part of the federated learning tutorial. In previous " +"parts of this tutorial, we introduced federated learning with PyTorch and" +" Flower (`part 1 `__)." +msgstr "" +"欢迎来到联邦学习教程的下一部分。在本教程的前几部分,我们介绍了使用 PyTorch 和 Flower 进行联邦学习(`第 1 部分 " +"`___)。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:13 +#, fuzzy +msgid "" +"In this notebook, we'll begin to customize the federated learning system " +"we built in the introductory notebook again, using the Flower framework, " +"Flower Datasets, and PyTorch." +msgstr "" +"在本笔记中,我们将开始定制在入门笔记中构建的联邦学习系统(再次使用 `Flower `__ 和 " +"`PyTorch `__)。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:18 +#, fuzzy +msgid "Let's move beyond FedAvg with Flower strategies! 🌼" +msgstr "让我们超越 FedAvg,采用Flower策略!" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:121 +#, fuzzy +msgid "" +"Let's now load the CIFAR-10 training and test set, partition them into " +"ten smaller datasets (each split into training and validation set), and " +"wrap everything in their own ``DataLoader``. We introduce a new parameter" +" ``num_partitions`` which allows us to call ``load_datasets`` with " +"different numbers of partitions." +msgstr "" +"现在,让我们加载 CIFAR-10 训练集和测试集,将它们分割成 10 " +"个较小的数据集(每个数据集又分为训练集和验证集),并将所有数据都封装在各自的 ``DataLoader`` 中。我们引入了一个新参数 " +"``num_clients``,它允许我们使用不同数量的客户端调用 ``load_datasets``。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:321 +msgid "Strategy customization" +msgstr "策略定制" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:323 +msgid "" +"So far, everything should look familiar if you've worked through the " +"introductory notebook. With that, we're ready to introduce a number of " +"new features." +msgstr "到目前为止,如果您已经阅读过入门笔记本,那么一切都应该很熟悉了。接下来,我们将介绍一些新功能。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:335 +msgid "Server-side parameter **initialization**" +msgstr "服务器端参数 **初始化**" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:337 +#, fuzzy +msgid "" +"Flower, by default, initializes the global model by asking one random " +"client for the initial parameters. In many cases, we want more control " +"over parameter initialization though. Flower therefore allows you to " +"directly pass the initial parameters to the Strategy. We create an " +"instance of ``Net()`` and get the paramaters as follows:" +msgstr "" +"默认情况下,Flower 会通过向一个随机客户端询问初始参数来初始化全局模型。但在许多情况下,我们需要对参数初始化进行更多控制。因此,Flower" +" 允许您直接将初始参数传递给策略:" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:358 +msgid "" +"Next, we create a ``server_fn`` that returns the components needed for " +"the server. Within ``server_fn``, we create a Strategy that uses the " +"initial parameters." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:393 +#, fuzzy +msgid "" +"Passing ``initial_parameters`` to the ``FedAvg`` strategy prevents Flower" +" from asking one of the clients for the initial parameters. In " +"``server_fn``, we pass this new ``strategy`` and a ``ServerConfig`` for " +"defining the number of federated learning rounds (``num_rounds``)." +msgstr "" +"向 ``FedAvg`` 策略传递 ``initial_parameters`` 可以防止 Flower " +"向其中一个客户端询问初始参数。如果我们仔细观察,就会发现日志中没有显示对 ``FlowerClient.get_parameters`` " +"方法的任何调用。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:395 +msgid "" +"Similar to the ``ClientApp``, we now create the ``ServerApp`` using the " +"``server_fn``:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:416 +msgid "" +"Last but not least, we specify the resources for each client and run the " +"simulation." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:448 +#, fuzzy +msgid "" +"If we look closely, we can see that the logs do not show any calls to the" +" ``FlowerClient.get_parameters`` method." +msgstr "" +"向 ``FedAvg`` 策略传递 ``initial_parameters`` 可以防止 Flower " +"向其中一个客户端询问初始参数。如果我们仔细观察,就会发现日志中没有显示对 ``FlowerClient.get_parameters`` " +"方法的任何调用。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:460 +msgid "Starting with a customized strategy" +msgstr "从定制战略开始" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:462 +#, fuzzy +msgid "" +"We've seen the function ``run_simulation`` before. It accepts a number of" +" arguments, amongst them the ``server_app`` which wraps around the " +"strategy and number of training rounds, ``client_app`` which wraps around" +" the ``client_fn`` used to create ``FlowerClient`` instances, and the " +"number of clients to simulate which equals ``num_supernodes``." +msgstr "" +"我们以前见过函数 ``start_simulation``。它接受许多参数,其中包括用于创建 ``FlowerClient`` 实例的 " +"``client_fn``、要模拟的客户数量 ``num_clients``、回合数 ``num_rounds``和策略。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:464 +msgid "" +"The strategy encapsulates the federated learning approach/algorithm, for " +"example, ``FedAvg`` or ``FedAdagrad``. Let's try to use a different " +"strategy this time:" +msgstr "该策略封装了联邦学习方法/算法,例如`FedAvg``或`FedAdagrad``。这次让我们尝试使用不同的策略:" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:509 +msgid "Server-side parameter **evaluation**" +msgstr "服务器端参数**评估**" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:511 +msgid "" +"Flower can evaluate the aggregated model on the server-side or on the " +"client-side. Client-side and server-side evaluation are similar in some " +"ways, but different in others." +msgstr "Flower 可以在服务器端或客户端评估聚合模型。客户端和服务器端评估在某些方面相似,但也有不同之处。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:513 +msgid "" +"**Centralized Evaluation** (or *server-side evaluation*) is conceptually " +"simple: it works the same way that evaluation in centralized machine " +"learning does. If there is a server-side dataset that can be used for " +"evaluation purposes, then that's great. We can evaluate the newly " +"aggregated model after each round of training without having to send the " +"model to clients. We're also fortunate in the sense that our entire " +"evaluation dataset is available at all times." +msgstr "**集中评估**(或*服务器端评估*)在概念上很简单:它的工作方式与集中式机器学习中的评估方式相同。如果有一个服务器端数据集可用于评估目的,那就太好了。我们可以在每一轮训练后对新聚合的模型进行评估,而无需将模型发送给客户端。我们也很幸运,因为我们的整个评估数据集随时可用。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:515 +msgid "" +"**Federated Evaluation** (or *client-side evaluation*) is more complex, " +"but also more powerful: it doesn't require a centralized dataset and " +"allows us to evaluate models over a larger set of data, which often " +"yields more realistic evaluation results. In fact, many scenarios require" +" us to use **Federated Evaluation** if we want to get representative " +"evaluation results at all. But this power comes at a cost: once we start " +"to evaluate on the client side, we should be aware that our evaluation " +"dataset can change over consecutive rounds of learning if those clients " +"are not always available. Moreover, the dataset held by each client can " +"also change over consecutive rounds. This can lead to evaluation results " +"that are not stable, so even if we would not change the model, we'd see " +"our evaluation results fluctuate over consecutive rounds." +msgstr "**联邦评估**(或*客户端评估*)更为复杂,但也更为强大:它不需要集中的数据集,允许我们在更大的数据集上对模型进行评估,这通常会产生更真实的评估结果。事实上,如果我们想得到有代表性的评估结果,很多情况下都需要使用**联邦评估**。但是,这种能力是有代价的:一旦我们开始在客户端进行评估,我们就应该意识到,如果这些客户端并不总是可用,我们的评估数据集可能会在连续几轮学习中发生变化。此外,每个客户端所拥有的数据集也可能在连续几轮学习中发生变化。这可能会导致评估结果不稳定,因此即使我们不改变模型,也会看到评估结果在连续几轮中波动。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:518 +msgid "" +"We've seen how federated evaluation works on the client side (i.e., by " +"implementing the ``evaluate`` method in ``FlowerClient``). Now let's see " +"how we can evaluate aggregated model parameters on the server-side:" +msgstr "" +"我们已经了解了联邦评估如何在客户端工作(即通过在 ``FlowerClient`` 中实现 ``evaluate`` " +"方法)。现在让我们看看如何在服务器端评估聚合模型参数:" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:549 +msgid "" +"We create a ``FedAvg`` strategy and pass ``evaluate_fn`` to it. Then, we " +"create a ``ServerApp`` that uses this strategy." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:586 +#, fuzzy +msgid "Finally, we run the simulation." +msgstr "运行模拟" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:613 +msgid "Sending/receiving arbitrary values to/from clients" +msgstr "向/从客户端发送/接收任意值" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:615 +msgid "" +"In some situations, we want to configure client-side execution (training," +" evaluation) from the server-side. One example for that is the server " +"asking the clients to train for a certain number of local epochs. Flower " +"provides a way to send configuration values from the server to the " +"clients using a dictionary. Let's look at an example where the clients " +"receive values from the server through the ``config`` parameter in " +"``fit`` (``config`` is also available in ``evaluate``). The ``fit`` " +"method receives the configuration dictionary through the ``config`` " +"parameter and can then read values from this dictionary. In this example," +" it reads ``server_round`` and ``local_epochs`` and uses those values to " +"improve the logging and configure the number of local training epochs:" +msgstr "" +"在某些情况下,我们希望从服务器端配置客户端的执行(训练、评估)。其中一个例子就是服务器要求客户端训练一定数量的本地遍历。Flower " +"提供了一种使用字典从服务器向客户端发送配置值的方法。让我们来看一个例子:客户端通过 ``fit`` 中的 ``config`` " +"参数从服务器接收配置值(``evaluate`` 中也有 ``config`` 参数)。``fit`` 方法通过 ``config`` " +"参数接收配置字典,然后从字典中读取值。在本例中,它读取了 ``server_round`` 和 " +"``local_epochs``,并使用这些值来改进日志记录和配置本地训练遍历的数量:" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:674 +msgid "" +"So how can we send this config dictionary from server to clients? The " +"built-in Flower Strategies provide way to do this, and it works similarly" +" to the way server-side evaluation works. We provide a function to the " +"strategy, and the strategy calls this function for every round of " +"federated learning:" +msgstr "" +"那么,如何将配置字典从服务器发送到客户端呢?内置的 \"Flower策略\"(Flower " +"Strategies)提供了这样的方法,其工作原理与服务器端评估的工作原理类似。我们为策略提供一个函数,策略会在每一轮联邦学习中调用这个函数:" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:704 +#, fuzzy +msgid "" +"Next, we'll pass this function to the FedAvg strategy before starting the" +" simulation:" +msgstr "接下来,我们只需在开始模拟前将此函数传递给 FedAvg 策略即可:" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:749 +msgid "" +"As we can see, the client logs now include the current round of federated" +" learning (which they read from the ``config`` dictionary). We can also " +"configure local training to run for one epoch during the first and second" +" round of federated learning, and then for two epochs during the third " +"round." +msgstr "" +"我们可以看到,客户端日志现在包含了当前一轮的联邦学习(从 ``config`` " +"字典中读取)。我们还可以将本地训练配置为在第一轮和第二轮联邦学习期间运行一个遍历,然后在第三轮联邦学习期间运行两个遍历。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:751 +msgid "" +"Clients can also return arbitrary values to the server. To do so, they " +"return a dictionary from ``fit`` and/or ``evaluate``. We have seen and " +"used this concept throughout this notebook without mentioning it " +"explicitly: our ``FlowerClient`` returns a dictionary containing a custom" +" key/value pair as the third return value in ``evaluate``." +msgstr "" +"客户端还可以向服务器返回任意值。为此,它们会从 ``fit`` 和/或 ``evaluate`` " +"返回一个字典。我们在本笔记中看到并使用了这一概念,但并未明确提及:我们的 ``FlowerClient`` 返回一个包含自定义键/值对的字典,作为" +" ``evaluate`` 中的第三个返回值。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:763 +msgid "Scaling federated learning" +msgstr "扩大联邦学习的规模" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:765 +msgid "" +"As a last step in this notebook, let's see how we can use Flower to " +"experiment with a large number of clients." +msgstr "作为本笔记的最后一步,让我们看看如何使用 Flower 对大量客户端进行实验。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:785 +msgid "" +"Note that we can reuse the ``ClientApp`` for different ``num-partitions``" +" since the Context is defined by the ``num_supernodes`` argument in " +"``run_simulation()``." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:787 +#, fuzzy, python-format +msgid "" +"We now have 1000 partitions, each holding 45 training and 5 validation " +"examples. Given that the number of training examples on each client is " +"quite small, we should probably train the model a bit longer, so we " +"configure the clients to perform 3 local training epochs. We should also " +"adjust the fraction of clients selected for training during each round " +"(we don't want all 1000 clients participating in every round), so we " +"adjust ``fraction_fit`` to ``0.025``, which means that only 2.5% of " +"available clients (so 25 clients) will be selected for training each " +"round:" +msgstr "" +"现在我们有 1000 个分区,每个分区有 45 个训练数据和 5 " +"个验证数据。鉴于每个客户端上的训练示例数量较少,我们可能需要对模型进行更长时间的训练,因此我们将客户端配置为执行 3 " +"个本地训练遍历。我们还应该调整每轮训练中被选中的客户端的比例(我们不希望每轮训练都有 1000 个客户端参与),因此我们将 " +"``fraction_fit`` 调整为 ``0.05``,这意味着每轮训练只选中 5%的可用客户端(即 50 个客户端):" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:843 +msgid "" +"In this notebook, we've seen how we can gradually enhance our system by " +"customizing the strategy, initializing parameters on the server side, " +"choosing a different strategy, and evaluating models on the server-side. " +"That's quite a bit of flexibility with so little code, right?" +msgstr "在本笔记中,我们看到了如何通过自定义策略、在服务器端初始化参数、选择不同的策略以及在服务器端评估模型来逐步增强我们的系统。用这么少的代码就能实现这么大的灵活性,不是吗?" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:845 +msgid "" +"In the later sections, we've seen how we can communicate arbitrary values" +" between server and clients to fully customize client-side execution. " +"With that capability, we built a large-scale Federated Learning " +"simulation using the Flower Virtual Client Engine and ran an experiment " +"involving 1000 clients in the same workload - all in a Jupyter Notebook!" +msgstr "" +"在后面的章节中,我们将看到如何在服务器和客户端之间传递任意值,以完全自定义客户端执行。有了这种能力,我们使用 Flower " +"虚拟客户端引擎构建了一个大规模的联邦学习模拟,并在 Jupyter Notebook 中进行了一次实验,在相同的工作负载中运行了 1000 " +"个客户端!" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:863 +msgid "" +"The `Flower Federated Learning Tutorial - Part 3 " +"`__ shows how to build a fully custom ``Strategy`` from " +"scratch." +msgstr "" +"`Flower 联邦学习教程 - 第 3 部分 `__ 展示了如何从头开始构建完全自定义的 \"策略\"。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:9 +msgid "What is Federated Learning?" +msgstr "什么是联邦学习?" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:13 +msgid "" +"In this tutorial, you will learn what federated learning is, build your " +"first system in Flower, and gradually extend it. If you work through all " +"parts of the tutorial, you will be able to build advanced federated " +"learning systems that approach the current state of the art in the field." +msgstr "" +"在本教程中,你将了解什么是联邦学习,用 Flower " +"搭建第一个系统,并逐步对其进行扩展。如果你能完成本教程的所有部分,你就能构建高级的联邦学习系统,从而接近该领域当前的技术水平。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:15 +msgid "" +"🧑‍🏫 This tutorial starts at zero and expects no familiarity with " +"federated learning. Only a basic understanding of data science and Python" +" programming is assumed." +msgstr "🧑‍🏫 本教程从零开始,不要求熟悉联邦学习。仅假定对数据科学和 Python 编程有基本了解。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:17 +msgid "" +"`Star Flower on GitHub `__ ⭐️ and join " +"the open-source Flower community on Slack to connect, ask questions, and " +"get help: `Join Slack `__ 🌼 We'd love to " +"hear from you in the ``#introductions`` channel! And if anything is " +"unclear, head over to the ``#questions`` channel." +msgstr "" +"`Star Flower on GitHub `__ ⭐️ 并加入 Slack " +"上的开源 Flower 社区,进行交流、提问并获得帮助: 加入 Slack `__ 🌼" +" 我们希望在 ``#introductions`` 频道听到您的声音!如果有任何不清楚的地方,请访问 ``#questions`` 频道。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:19 +msgid "Let's get started!" +msgstr "让我们开始吧!" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:31 +msgid "Classic machine learning" +msgstr "经典机器学习" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:33 +msgid "" +"Before we begin to discuss federated learning, let us quickly recap how " +"most machine learning works today." +msgstr "在开始讨论联邦学习之前,让我们先快速回顾一下目前大多数机器学习的工作原理。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:35 +msgid "" +"In machine learning, we have a model, and we have data. The model could " +"be a neural network (as depicted here), or something else, like classical" +" linear regression." +msgstr "在机器学习中,我们有一个模型和数据。模型可以是一个神经网络(如图所示),也可以是其他东西,比如经典的线性回归。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 +msgid "|ac0a9766e26044d6aea222a829859b20|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 +msgid "Model and data" +msgstr "模型和数据" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:47 +msgid "" +"We train the model using the data to perform a useful task. A task could " +"be to detect objects in images, transcribe an audio recording, or play a " +"game like Go." +msgstr "我们使用数据来训练模型,以完成一项有用的任务。任务可以是检测图像中的物体、转录音频或玩围棋等游戏。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 +msgid "|36cd6e248b1443ce8a82b5a025bba368|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 +msgid "Train model using data" +msgstr "使用数据训练模型" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:59 +msgid "" +"Now, in practice, the training data we work with doesn't originate on the" +" machine we train the model on. It gets created somewhere else." +msgstr "实际上,我们使用的训练数据并不来自我们训练模型的机器。它是在其他地方创建的。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:61 +msgid "" +"It originates on a smartphone by the user interacting with an app, a car " +"collecting sensor data, a laptop receiving input via the keyboard, or a " +"smart speaker listening to someone trying to sing a song." +msgstr "它源于智能手机上用户与应用程序的交互、汽车上传感器数据的收集、笔记本电脑上键盘输入的接收,或者智能扬声器上某人试着唱的歌。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 +msgid "|bf4fb057f4774df39e1dcb5c71fd804a|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 +msgid "Data on a phone" +msgstr "手机上的数据" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:73 +msgid "" +"What's also important to mention, this \"somewhere else\" is usually not " +"just one place, it's many places. It could be several devices all running" +" the same app. But it could also be several organizations, all generating" +" data for the same task." +msgstr "" +"值得一提的是,这个 \"其他地方 " +"\"通常不只是一个地方,而是很多地方。它可能是多个运行同一应用程序的设备。但也可能是多个组织,都在为同一任务生成数据。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 +msgid "|71bb9f3c74c04f959b9bc1f02b736c95|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 +msgid "Data is on many devices" +msgstr "数据存在于多种设备中" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:85 +msgid "" +"So to use machine learning, or any kind of data analysis, the approach " +"that has been used in the past was to collect all data on a central " +"server. This server can be somewhere in a data center, or somewhere in " +"the cloud." +msgstr "因此,要使用机器学习或任何类型的数据分析,过去使用的方法是在中央服务器上收集所有数据。这个服务器可以在数据中心的某个地方,也可以在云端的某个地方。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 +msgid "|7605632e1b0f49599ffacf841491fcfb|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 +msgid "Central data collection" +msgstr "集中数据收集" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:97 +msgid "" +"Once all the data is collected in one place, we can finally use machine " +"learning algorithms to train our model on the data. This is the machine " +"learning approach that we've basically always relied on." +msgstr "一旦所有数据都收集到一处,我们最终就可以使用机器学习算法在数据上训练我们的模型。这就是我们基本上一直依赖的机器学习方法。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 +msgid "|91b1b5a7d3484eb7a2350c1923f18307|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 +msgid "Central model training" +msgstr "集中模型训练" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:130 +msgid "Challenges of classical machine learning" +msgstr "经典机器学习面临的挑战" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:132 +msgid "" +"The classic machine learning approach we've just seen can be used in some" +" cases. Great examples include categorizing holiday photos, or analyzing " +"web traffic. Cases, where all the data is naturally available on a " +"centralized server." +msgstr "我们刚刚看到的经典机器学习方法可以在某些情况下使用。很好的例子包括对假日照片进行分类或分析网络流量。在这些案例中,所有数据自然都可以在中央服务器上获得。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 +msgid "|5405ed430e4746e28b083b146fb71731|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 +msgid "Centralized possible" +msgstr "可集中管理" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:144 +msgid "" +"But the approach can not be used in many other cases. Cases, where the " +"data is not available on a centralized server, or cases where the data " +"available on one server is not enough to train a good model." +msgstr "但这种方法并不适用于许多其他情况。例如,集中服务器上没有数据,或者一台服务器上的数据不足以训练出一个好的模型。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 +msgid "|a389e87dab394eb48a8949aa2397687b|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 +msgid "Centralized impossible" +msgstr "无法集中" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:156 +msgid "" +"There are many reasons why the classic centralized machine learning " +"approach does not work for a large number of highly important real-world " +"use cases. Those reasons include:" +msgstr "传统的集中式机器学习方法无法满足现实世界中大量极为重要的使用案例,原因有很多。这些原因包括:" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:158 +msgid "" +"**Regulations**: GDPR (Europe), CCPA (California), PIPEDA (Canada), LGPD " +"(Brazil), PDPL (Argentina), KVKK (Turkey), POPI (South Africa), FSS " +"(Russia), CDPR (China), PDPB (India), PIPA (Korea), APPI (Japan), PDP " +"(Indonesia), PDPA (Singapore), APP (Australia), and other regulations " +"protect sensitive data from being moved. In fact, those regulations " +"sometimes even prevent single organizations from combining their own " +"users' data for artificial intelligence training because those users live" +" in different parts of the world, and their data is governed by different" +" data protection regulations." +msgstr "" +"**法规**: " +"GDPR(欧洲)、CCPA(加利福尼亚)、PIPEDA(加拿大)、LGPD(巴西)、PDPL(阿根廷)、KVKK(土耳其)、POPI(南非)、FSS(俄罗斯)、CDPR(中国)、PDPB(印度)、PIPA(韩国)、APPI(日本)、PDP(印度尼西亚)、PDPA(新加坡)、APP(澳大利亚)等法规保护敏感数据不被移动。事实上,这些法规有时甚至会阻止单个组织将自己的用户数据用于人工智能培训,因为这些用户生活在世界不同地区,他们的数据受不同的数据保护法规管辖。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:160 +msgid "" +"**User preference**: In addition to regulation, there are use cases where" +" users just expect that no data leaves their device, ever. If you type " +"your passwords and credit card info into the digital keyboard of your " +"phone, you don't expect those passwords to end up on the server of the " +"company that developed that keyboard, do you? In fact, that use case was " +"the reason federated learning was invented in the first place." +msgstr "" +"**用户偏好**: " +"除了法规之外,在一些使用案例中,用户只是希望数据永远不会离开他们的设备。如果你在手机的数字键盘上输入密码和信用卡信息,你不会希望这些密码最终出现在开发该键盘的公司的服务器上吧?事实上,这种用例正是联邦学习发明的初衷。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:161 +msgid "" +"**Data volume**: Some sensors, like cameras, produce such a high data " +"volume that it is neither feasible nor economic to collect all the data " +"(due to, for example, bandwidth or communication efficiency). Think about" +" a national rail service with hundreds of train stations across the " +"country. If each of these train stations is outfitted with a number of " +"security cameras, the volume of raw on-device data they produce requires " +"incredibly powerful and exceedingly expensive infrastructure to process " +"and store. And most of the data isn't even useful." +msgstr "" +"**数据量**: " +"有些传感器(如摄像头)产生的数据量很大,收集所有数据既不可行,也不经济(例如,由于带宽或通信效率的原因)。试想一下全国铁路服务,全国有数百个火车站。如果每个火车站都安装了许多安全摄像头,那么它们所产生的大量原始设备数据就需要功能强大且极其昂贵的基础设施来处理和存储。而大部分数据甚至都是无用的。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:164 +msgid "Examples where centralized machine learning does not work include:" +msgstr "集中式机器学习不起作用的例子包括:" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:166 +msgid "" +"Sensitive healthcare records from multiple hospitals to train cancer " +"detection models" +msgstr "用多家医院的敏感医疗记录训练癌症检测模型" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:167 +msgid "" +"Financial information from different organizations to detect financial " +"fraud" +msgstr "不同组织的财务信息,以侦查财务欺诈行为" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:168 +msgid "Location data from your electric car to make better range prediction" +msgstr "通过电动汽车的定位数据更好地预测续航里程" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:169 +msgid "End-to-end encrypted messages to train better auto-complete models" +msgstr "端到端加密信息可训练出更好的自动完成模型" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:171 +msgid "" +"The popularity of privacy-enhancing systems like the `Brave " +"`__ browser or the `Signal `__ " +"messenger shows that users care about privacy. In fact, they choose the " +"privacy-enhancing version over other alternatives, if such an alternative" +" exists. But what can we do to apply machine learning and data science to" +" these cases to utilize private data? After all, these are all areas that" +" would benefit significantly from recent advances in AI." +msgstr "" +"像 `Brave `__浏览器或 `Signal " +"`__信息管理器这样的隐私增强系统的流行表明,用户关心隐私。事实上,他们会选择隐私性更好的产品。但是,我们能做些什么来将机器学习和数据科学应用到这些情况中,以利用隐私数据呢?毕竟,这些领域都将从人工智能的最新进展中受益匪浅。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:186 +msgid "Federated learning" +msgstr "联邦学习" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:188 +msgid "" +"Federated learning simply reverses this approach. It enables machine " +"learning on distributed data by moving the training to the data, instead " +"of moving the data to the training. Here's the single-sentence " +"explanation:" +msgstr "联邦学习简单地颠覆了这种方法。它通过将训练转移到数据上,而不是将数据转移到训练上,在分布式数据上实现机器学习。下面是一句话的解释:" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:190 +msgid "Central machine learning: move the data to the computation" +msgstr "集中式机器学习:将数据转移到计算中心" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:191 +msgid "Federated (machine) learning: move the computation to the data" +msgstr "联邦式(机器)学习:将计算转移到数据上" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:193 +msgid "" +"By doing so, it enables us to use machine learning (and other data " +"science approaches) in areas where it wasn't possible before. We can now " +"train excellent medical AI models by enabling different hospitals to work" +" together. We can solve financial fraud by training AI models on the data" +" of different financial institutions. We can build novel privacy-" +"enhancing applications (such as secure messaging) that have better built-" +"in AI than their non-privacy-enhancing alternatives. And those are just a" +" few of the examples that come to mind. As we deploy federated learning, " +"we discover more and more areas that can suddenly be reinvented because " +"they now have access to vast amounts of previously inaccessible data." +msgstr "这样,我们就能在以前不可能的领域使用机器学习(和其他数据科学方法)。现在,我们可以通过让不同的医院协同工作来训练优秀的医疗人工智能模型。我们可以通过在不同金融机构的数据上训练人工智能模型来解决金融欺诈问题。我们可以构建新颖的隐私增强型应用(如安全信息),其内置的人工智能比非隐私增强型应用更好。以上只是我想到的几个例子。随着联邦学习的部署,我们会发现越来越多的领域可以突然重获新生,因为它们现在可以访问大量以前无法访问的数据。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:196 +msgid "" +"So how does federated learning work, exactly? Let's start with an " +"intuitive explanation." +msgstr "那么,联邦学习究竟是如何运作的呢?让我们从直观的解释开始。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:199 +msgid "Federated learning in five steps" +msgstr "联邦学习的五个步骤" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:202 +msgid "Step 0: Initialize global model" +msgstr "步骤 0:初始化全局模型" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:204 +msgid "" +"We start by initializing the model on the server. This is exactly the " +"same in classic centralized learning: we initialize the model parameters," +" either randomly or from a previously saved checkpoint." +msgstr "我们首先在服务器上初始化模型。这与经典的集中式学习完全相同:我们随机或从先前保存的检查点初始化模型参数。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 +msgid "|89c412136a5146ec8dc32c0973729f12|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 +msgid "Initialize global model" +msgstr "初始化全局模型" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:217 +msgid "" +"Step 1: Send model to a number of connected organizations/devices (client" +" nodes)" +msgstr "第 1 步:将模型发送到多个连接的组织/设备(客户节点)" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:219 +msgid "" +"Next, we send the parameters of the global model to the connected client " +"nodes (think: edge devices like smartphones or servers belonging to " +"organizations). This is to ensure that each participating node starts " +"their local training using the same model parameters. We often use only a" +" few of the connected nodes instead of all nodes. The reason for this is " +"that selecting more and more client nodes has diminishing returns." +msgstr "接下来,我们会将全局模型的参数发送到连接的客户端节点(如智能手机等边缘设备或企业的服务器)。这是为了确保每个参与节点都使用相同的模型参数开始本地训练。我们通常只使用几个连接节点,而不是所有节点。这样做的原因是,选择越来越多的客户端节点会导致收益递减。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 +msgid "|9503d3dc3a144e8aa295f8800cd8a766|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 +msgid "Send global model" +msgstr "发送全局模型" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:232 +msgid "" +"Step 2: Train model locally on the data of each organization/device " +"(client node)" +msgstr "步骤 2:在本地对每个机构/设备(客户端节点)的数据进行模型训练" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:234 +msgid "" +"Now that all (selected) client nodes have the latest version of the " +"global model parameters, they start the local training. They use their " +"own local dataset to train their own local model. They don't train the " +"model until full convergence, but they only train for a little while. " +"This could be as little as one epoch on the local data, or even just a " +"few steps (mini-batches)." +msgstr "" +"现在,所有(选定的)客户端节点都有了最新版本的全局模型参数,它们开始进行本地训练。它们使用自己的本地数据集来训练自己的本地模型。它们不会一直训练到模型完全收敛为止,而只是训练一小段时间。这可能只是本地数据上的一个遍历,甚至只是几个步骤" +"(mini-batches)。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 +msgid "|aadb59e29b9e445d8e239d9a8a7045cb|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 +msgid "Train on local data" +msgstr "根据本地数据进行训练" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:247 +msgid "Step 3: Return model updates back to the server" +msgstr "步骤 3:将模型参数更新返回服务器" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:249 +msgid "" +"After local training, each client node has a slightly different version " +"of the model parameters they originally received. The parameters are all " +"different because each client node has different examples in its local " +"dataset. The client nodes then send those model updates back to the " +"server. The model updates they send can either be the full model " +"parameters or just the gradients that were accumulated during local " +"training." +msgstr "经过本地训练后,每个客户节点最初收到的模型参数都会略有不同。参数之所以不同,是因为每个客户端节点的本地数据集中都有不同的数据。然后,客户端节点将这些模型更新发回服务器。它们发送的模型更新既可以是完整的模型参数,也可以只是本地训练过程中积累的梯度。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 +msgid "|a7579ad7734347508e959d9e14f2f53d|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 +msgid "Send model updates" +msgstr "发送模型参数更新" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:262 +msgid "Step 4: Aggregate model updates into a new global model" +msgstr "步骤 4:将模型更新聚合到新的全局模型中" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:264 +msgid "" +"The server receives model updates from the selected client nodes. If it " +"selected 100 client nodes, it now has 100 slightly different versions of " +"the original global model, each trained on the local data of one client. " +"But didn't we want to have one model that contains the learnings from the" +" data of all 100 client nodes?" +msgstr "" +"服务器从选定的客户端节点接收模型更新。如果服务器选择了 100 个客户端节点,那么它现在就拥有 100 " +"个略有不同的原始全局模型版本,每个版本都是根据一个客户端的本地数据训练出来的。难道我们不希望有一个包含所有 100 个客户节点数据的模型吗?" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:266 +msgid "" +"In order to get one single model, we have to combine all the model " +"updates we received from the client nodes. This process is called " +"*aggregation*, and there are many different ways to do it. The most basic" +" way to do it is called *Federated Averaging* (`McMahan et al., 2016 " +"`__), often abbreviated as *FedAvg*. " +"*FedAvg* takes the 100 model updates and, as the name suggests, averages " +"them. To be more precise, it takes the *weighted average* of the model " +"updates, weighted by the number of examples each client used for " +"training. The weighting is important to make sure that each data example " +"has the same \"influence\" on the resulting global model. If one client " +"has 10 examples, and another client has 100 examples, then - without " +"weighting - each of the 10 examples would influence the global model ten " +"times as much as each of the 100 examples." +msgstr "" +"为了得到一个单一的模型,我们必须将从客户端节点收到的所有模型更新合并起来。这个过程称为*聚合*,有许多不同的方法。最基本的方法称为 " +"*Federated Averaging* (`McMahan等人,2016 " +"`__),通常缩写为*FedAvg*。*FedAvg* 可以把100 " +"个模型更新进行平均。更准确地说,它取的是模型更新的*加权平均值*,根据每个客户端用于训练的数据数量进行加权。加权对于确保每个数据示例对生成的全局模型具有相同的" +" \"影响 \"非常重要。如果一个客户端有 10 个数据点,而另一个客户有 100 个数据点,那么在不加权的情况下,10 个示例对全局模型的影响是" +" 100 个示例的 10 倍。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 +msgid "|73d15dd1d4fc41678b2d54815503fbe8|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 +msgid "Aggregate model updates" +msgstr "聚合模型参数更新" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:280 +msgid "Step 5: Repeat steps 1 to 4 until the model converges" +msgstr "步骤 5:重复步骤 1 至 4,直至模型收敛" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:282 +msgid "" +"Steps 1 to 4 are what we call a single round of federated learning. The " +"global model parameters get sent to the participating client nodes (step " +"1), the client nodes train on their local data (step 2), they send their " +"updated models to the server (step 3), and the server then aggregates the" +" model updates to get a new version of the global model (step 4)." +msgstr "" +"步骤 1 至 4 就是我们所说的单轮联邦学习。全局模型参数被发送到参与的客户端节点(第 1 步),客户端节点对其本地数据进行训练(第 2 " +"步),然后将更新后的模型发送到服务器(第 3 步),服务器汇总模型更新,得到新版本的全局模型(第 4 步)。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:284 +msgid "" +"During a single round, each client node that participates in that " +"iteration only trains for a little while. This means that after the " +"aggregation step (step 4), we have a model that has been trained on all " +"the data of all participating client nodes, but only for a little while. " +"We then have to repeat this training process over and over again to " +"eventually arrive at a fully trained model that performs well across the " +"data of all client nodes." +msgstr "" +"在一轮迭代中,每个参与迭代的客户节点只训练一小段时间。这意味着,在聚合步骤(步骤 " +"4)之后,我们的模型已经在所有参与的客户节点的所有数据上训练过了,但只训练了一小会儿。然后,我们必须一次又一次地重复这一训练过程,最终得到一个经过全面训练的模型,该模型在所有客户节点的数据中都表现良好。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:289 +msgid "" +"Congratulations, you now understand the basics of federated learning. " +"There's a lot more to discuss, of course, but that was federated learning" +" in a nutshell. In later parts of this tutorial, we will go into more " +"detail. Interesting questions include: How can we select the best client " +"nodes that should participate in the next round? What's the best way to " +"aggregate model updates? How can we handle failing client nodes " +"(stragglers)?" +msgstr "" +"恭喜您,现在您已经了解了联邦学习的基础知识。当然,要讨论的内容还有很多,但这只是联邦学习的一个缩影。在本教程的后半部分,我们将进行更详细的介绍。有趣的问题包括" +" 我们如何选择最好的客户端节点参与下一轮学习?聚合模型更新的最佳方法是什么?如何处理失败的客户端节点(落伍者)?" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:294 +msgid "" +"Just like we can train a model on the decentralized data of different " +"client nodes, we can also evaluate the model on that data to receive " +"valuable metrics. This is called federated evaluation, sometimes " +"abbreviated as FE. In fact, federated evaluation is an integral part of " +"most federated learning systems." +msgstr "" +"就像我们可以在不同客户节点的分散数据上训练一个模型一样,我们也可以在这些数据上对模型进行评估,以获得有价值的指标。这就是所谓的联邦评估,有时简称为" +" FE。事实上,联邦评估是大多数联邦学习系统不可或缺的一部分。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:297 +msgid "Federated analytics" +msgstr "联邦分析" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:299 +msgid "" +"In many cases, machine learning isn't necessary to derive value from " +"data. Data analysis can yield valuable insights, but again, there's often" +" not enough data to get a clear answer. What's the average age at which " +"people develop a certain type of health condition? Federated analytics " +"enables such queries over multiple client nodes. It is usually used in " +"conjunction with other privacy-enhancing technologies like secure " +"aggregation to prevent the server from seeing the results submitted by " +"individual client nodes." +msgstr "在很多情况下,机器学习并不是从数据中获取价值的必要条件。数据分析可以产生有价值的见解,但同样,往往没有足够的数据来获得明确的答案。人们患某种健康疾病的平均年龄是多少?联邦分析可以通过多个客户端节点进行此类查询。它通常与安全聚合等其他隐私增强技术结合使用,以防止服务器看到单个客户端节点提交的结果。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:305 +msgid "" +"Differential privacy (DP) is often mentioned in the context of Federated " +"Learning. It is a privacy-preserving method used when analyzing and " +"sharing statistical data, ensuring the privacy of individual " +"participants. DP achieves this by adding statistical noise to the model " +"updates, ensuring any individual participants’ information cannot be " +"distinguished or re-identified. This technique can be considered an " +"optimization that provides a quantifiable privacy protection measure." +msgstr "" +"差分隐私(DP)经常在联邦学习中被提及。这是一种在分析和共享统计数据时使用的隐私保护方法,可确保单个参与者的隐私。DP " +"通过在模型更新中添加统计噪声来实现这一目的,确保任何个体参与者的信息都无法被区分或重新识别。这种技术可被视为一种优化,提供了一种可量化的隐私保护措施。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:326 +msgid "Flower" +msgstr "Flower" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:328 +msgid "" +"Federated learning, federated evaluation, and federated analytics require" +" infrastructure to move machine learning models back and forth, train and" +" evaluate them on local data, and then aggregate the updated models. " +"Flower provides the infrastructure to do exactly that in an easy, " +"scalable, and secure way. In short, Flower presents a unified approach to" +" federated learning, analytics, and evaluation. It allows the user to " +"federate any workload, any ML framework, and any programming language." +msgstr "" +"联邦学习、联邦评估和联邦分析需要基础框架来来回移动机器学习模型,在本地数据上对其进行训练和评估,然后汇总更新的模型。Flower " +"提供的基础架构正是以简单、可扩展和安全的方式实现这些目标的。简而言之,Flower " +"为联邦学习、分析和评估提供了一种统一的方法。它允许用户联邦化任何工作负载、任何 ML 框架和任何编程语言。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 +msgid "|55472eef61274ba1b739408607e109df|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 +msgid "" +"Flower federated learning server and client nodes (car, scooter, personal" +" computer, roomba, and phone)" +msgstr "Flower联邦学习服务器和客户端节点(汽车、滑板车、个人电脑、roomba 和电话)" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:353 +msgid "" +"Congratulations, you just learned the basics of federated learning and " +"how it relates to the classic (centralized) machine learning!" +msgstr "恭喜您,您刚刚了解了联邦学习的基础知识,以及它与传统(集中式)机器学习的关系!" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:355 +msgid "" +"In the next part of this tutorial, we are going to build a first " +"federated learning system with Flower." +msgstr "在本教程的下一部分,我们将用 Flower 建立第一个联邦学习系统。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:369 +msgid "" +"Before you continue, make sure to join the Flower community on Slack: " +"`Join Slack `__" +msgstr "" +"在继续之前,请务必加入 Slack 上的 Flower 社区:`Join Slack `__" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:373 +msgid "" +"The `Flower Federated Learning Tutorial - Part 1 " +"`__ shows how to build a simple federated learning system " +"with PyTorch and Flower." +msgstr "" +"`Flower 联邦学习教程 - 第 1 部分 `__ 展示了如何使用 PyTorch 和 Flower " +"构建一个简单的联邦学习系统。" + +#~ msgid "Before the release" +#~ msgstr "发布前" + +#~ msgid "" +#~ "Update the changelog (``changelog.md``) with" +#~ " all relevant changes that happened " +#~ "after the last release. If the " +#~ "last release was tagged ``v1.2.0``, you" +#~ " can use the following URL to " +#~ "see all commits that got merged " +#~ "into ``main`` since then:" +#~ msgstr "" +#~ "更新更新日志 (``changelog.md``),加入上次发布后发生的所有相关变更。如果上次发布的版本被标记为 " +#~ "``v1.2.0``,则可以使用以下 URL 查看此后合并到 ``main`` 的所有提交:" + +#~ msgid "" +#~ "`GitHub: Compare v1.2.0...main " +#~ "`_" +#~ msgstr "" +#~ "`GitHub: Compare v1.2.0...main " +#~ "`_" + +#~ msgid "" +#~ "Thank the authors who contributed since" +#~ " the last release. This can be " +#~ "done by running the ``./dev/add-" +#~ "shortlog.sh`` convenience script (it can " +#~ "be ran multiple times and will " +#~ "update the names in the list if" +#~ " new contributors were added in the" +#~ " meantime)." +#~ msgstr "" +#~ "感谢自上次发布以来做出贡献的作者。可以通过运行 ``./dev/add-shortlog.sh`` " +#~ "方便脚本来完成(可以多次运行,如果在此期间有新的贡献者加入,则会更新列表中的名字)。" + +#~ msgid "" +#~ "Update the ``changelog.md`` section header " +#~ "``Unreleased`` to contain the version " +#~ "number and date for the release " +#~ "you are building. Create a pull " +#~ "request with the change." +#~ msgstr "" +#~ "更新 ``changelog.md`` 部分的标题 ``Unreleased`` " +#~ "以包含你正在构建的版本的版本号和日期。创建一个包含更改的拉取请求。" + +#~ msgid "" +#~ "Second, create a virtual environment " +#~ "(and activate it). If you chose to" +#~ " use :code:`pyenv` (with the :code" +#~ ":`pyenv-virtualenv` plugin) and already " +#~ "have it installed , you can use" +#~ " the following convenience script (by " +#~ "default it will use :code:`Python " +#~ "3.8.17`, but you can change it by" +#~ " providing a specific :code:``)::" +#~ msgstr "" +#~ "其次,创建虚拟环境(并激活它)。如果您选择使用 :code:`pyenv`(使用 :code:`pyenv-" +#~ "virtualenv`插件),并且已经安装了该插件,则可以使用下面的便捷脚本(默认情况下使用 " +#~ ":code:`Python3.8.17`,但您可以通过提供特定的 :code:`<版本>`来更改)::" + +#~ msgid "flwr (Python API reference)" +#~ msgstr "flwr(Python API 参考)" + +#~ msgid "..." +#~ msgstr "..." + +#~ msgid "Starting a client with an insecure server connection:" +#~ msgstr "使用不安全的服务器连接启动客户端:" + +#~ msgid "server.strategy.FedAvg" +#~ msgstr "server.strategy.FedAvg" + +#~ msgid "server.strategy.FedAvgM" +#~ msgstr "server.strategy.FedAvgM" + +#~ msgid "Configurable FedAvg with Momentum strategy implementation." +#~ msgstr "可配置的 FedAvg 动量策略实施。" + +#~ msgid "Fraction of clients used during training. Defaults to 0.1." +#~ msgstr "训练期间使用客户的比例。默认为 0.1。" + +#~ msgid "Fraction of clients used during validation. Defaults to 0.1." +#~ msgstr "验证过程中使用的客户端比例。默认为 0.1。" + +#~ msgid "server.strategy.FedMedian" +#~ msgstr "server.strategy.FedMedian" + +#~ msgid "server.strategy.QFedAvg" +#~ msgstr "server.strategy.QFedAvg" + +#~ msgid "server.strategy.FedOpt" +#~ msgstr "server.strategy.FedOpt" + +#~ msgid "Configurable FedAdagrad strategy implementation." +#~ msgstr "可配置的 FedAdagrad 策略实施。" + +#~ msgid "Federated Optim strategy interface." +#~ msgstr "Federated Optim 策略界面。" + +#~ msgid "server.strategy.FedProx" +#~ msgstr "server.strategy.FedProx" + +#~ msgid "Configurable FedProx strategy implementation." +#~ msgstr "可配置的 FedProx 策略实施。" + +#~ msgid "server.strategy.FedAdagrad" +#~ msgstr "server.strategy.FedAdagrad" + +#~ msgid "Paper: https://arxiv.org/abs/2003.00295" +#~ msgstr "论文: https://arxiv.org/abs/2003.00295" + +#~ msgid "Federated learning strategy using Adagrad on server-side." +#~ msgstr "在服务器端使用 Adagrad 的联邦学习策略。" + +#~ msgid "server.strategy.FedAdam" +#~ msgstr "server.strategy.FedAdam" + +#~ msgid "server.strategy.FedYogi" +#~ msgstr "server.strategy.FedYogi" + +#~ msgid "Adaptive Federated Optimization using Yogi." +#~ msgstr "使用 Yogi 的自适应联合优化。" + +#~ msgid "Federated learning strategy using Yogi on server-side." +#~ msgstr "在服务器端使用 Yogi 的联邦学习策略。" + +#~ msgid "Paper: https://arxiv.org/abs/1803.01498" +#~ msgstr "论文:https://arxiv.org/abs/1803.01498" + +#~ msgid "server.strategy.Krum" +#~ msgstr "server.strategy.Krum" + +#~ msgid "Configurable Krum strategy implementation." +#~ msgstr "可配置的 Krum 策略实施。" + +#~ msgid "server.strategy.Bulyan" +#~ msgstr "server.strategy.Bulyan" + +#~ msgid "Bulyan strategy implementation." +#~ msgstr "Bulyan策略的实施。" + +#~ msgid "server.strategy.FedXgbNnAvg" +#~ msgstr "server.strategy.FedXgbNnAvg" + +#~ msgid "Federated XGBoost [Ma et al., 2023] strategy." +#~ msgstr "Federated XGBoost [Ma 等人,2023] 策略。" + +#~ msgid "server.strategy.DPFedAvgAdaptive" +#~ msgstr "server.strategy.DPFedAvgAdaptive" + +#~ msgid "" +#~ "**Fix the incorrect return types of " +#~ "Strategy** " +#~ "([#2432](https://github.com/adap/flower/pull/2432/files))" +#~ msgstr "" +#~ "**修复策略的错误返回类型** " +#~ "([#2432](https://github.com/adap/flower/pull/2432/files))" + +#~ msgid "" +#~ "The types of the return values in" +#~ " the docstrings in two methods " +#~ "(`aggregate_fit` and `aggregate_evaluate`) now " +#~ "match the hint types in the code." +#~ msgstr "" +#~ "两个方法(\"aggregate_fit \"和 " +#~ "\"aggregate_evaluate\")的文档说明中的返回值类型现在与代码中的提示类型一致。" + +#~ msgid "" +#~ "**Update Flower Examples** " +#~ "([#2384](https://github.com/adap/flower/pull/2384),[#2425](https://github.com/adap/flower/pull/2425)," +#~ " [#2526](https://github.com/adap/flower/pull/2526))" +#~ msgstr "" +#~ "** 更新 Flower Examples** " +#~ "([#2384](https://github.com/adap/flower/pull/2384),[#2425](https://github.com/adap/flower/pull/2425)," +#~ " [#2526](https://github.com/adap/flower/pull/2526))" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. The string " +#~ ":code:`\"0.0.0.0:8080\"` tells the client " +#~ "which server to connect to. In our" +#~ " case we can run the server and" +#~ " the client on the same machine, " +#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" +#~ " we run a truly federated workload" +#~ " with the server and clients running" +#~ " on different machines, all that " +#~ "needs to change is the " +#~ ":code:`server_address` we pass to the " +#~ "client." +#~ msgstr "" +#~ "对于客户端就需要做这么多。我们仅需要实现 " +#~ ":code:`Client`或者:code:`NumPyClient`然后调用:code:`fl.client.start_client()`。字符串" +#~ " :code:`\"0.0.0.0:8080\"` " +#~ "告诉客户端要连接到哪个服务器。在我们的例子中,我们可以在同一台机器上运行服务器和客户端,因此我们使用:code:`\"0.0.0.0:8080\"`。如果我们运行真正联邦学习的工作负载,服务器和客户端在不同的机器上运行,则需要更改的只是我们传递给客户端的" +#~ " server_address 。" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. The string " +#~ ":code:`\"[::]:8080\"` tells the client which" +#~ " server to connect to. In our " +#~ "case we can run the server and " +#~ "the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." +#~ msgstr "" +#~ "对于客户来说就是这样了。我们只需实现 :code:`Client` 或 " +#~ ":code:`NumPyClient` 并调用:code:`fl.client.start_client()` " +#~ "即可。字符串 :code:`\"[::]:8080\"` " +#~ "告诉客户端要连接到哪个服务器。在我们的例子中,我们可以在同一台机器上运行服务器和客户端,因此我们使用 " +#~ ":code:`\"[::]:8080\"`。如果我们运行真正联邦的工作负载,服务器和客户端运行在不同的机器上,则需要更改的只是我们指向客户端的" +#~ " server_address 。" + +#~ msgid "" +#~ "Let's now load the CIFAR-10 training " +#~ "and test set, partition them into " +#~ "ten smaller datasets (each split into" +#~ " training and validation set), and " +#~ "wrap the resulting partitions by " +#~ "creating a PyTorch ``DataLoader`` for " +#~ "each of them:" +#~ msgstr "" +#~ "现在,让我们加载 CIFAR-10 训练集和测试集,将它们分割成 10 " +#~ "个较小的数据集(每个数据集又分为训练集和验证集),并通过为每个数据集创建 PyTorch " +#~ "``DataLoader`` 来包装由此产生的分割集:" + +#~ msgid "|e1dd4b4129b040bea23a894266227080|" +#~ msgstr "|e1dd4b4129b040bea23a894266227080|" + +#~ msgid "|c0d4cc6a442948dca8da40d2440068d9|" +#~ msgstr "|c0d4cc6a442948dca8da40d2440068d9|" + +#~ msgid "|174e1e4fa1f149a19bfbc8bc1126f46a|" +#~ msgstr "|174e1e4fa1f149a19bfbc8bc1126f46a|" + +#~ msgid "|4e021a3dc08249d2a89daa3ab03c2714|" +#~ msgstr "|4e021a3dc08249d2a89daa3ab03c2714|" + +#~ msgid "|e74a1d5ce7eb49688651f2167a59065b|" +#~ msgstr "|e74a1d5ce7eb49688651f2167a59065b|" + +#~ msgid "|eb29ec4c7aef4e93976795ed72df647e|" +#~ msgstr "|eb29ec4c7aef4e93976795ed72df647e|" + +#~ msgid "|c2f699d8ac484f5081721a6f1511f70d|" +#~ msgstr "|c2f699d8ac484f5081721a6f1511f70d|" + +#~ msgid "|cf42accdacbf4e5eb4fa0503108ba7a7|" +#~ msgstr "|cf42accdacbf4e5eb4fa0503108ba7a7|" + +#~ msgid "|5ec8356bc2564fa09178b1ceed5beccc|" +#~ msgstr "|5ec8356bc2564fa09178b1ceed5beccc|" + +#~ msgid "|7c9329e97bd0430bad335ab605a897a7|" +#~ msgstr "|7c9329e97bd0430bad335ab605a897a7|" + +#~ msgid "|88002bbce1094ba1a83c9151df18f707|" +#~ msgstr "|88002bbce1094ba1a83c9151df18f707|" + +#~ msgid "|391766aee87c482c834c93f7c22225e2|" +#~ msgstr "|391766aee87c482c834c93f7c22225e2|" + +#~ msgid "|93b9a15bd27f4e91b40f642c253dfaac|" +#~ msgstr "|93b9a15bd27f4e91b40f642c253dfaac|" + +#~ msgid "|a23d9638f96342ef9d25209951e2d564|" +#~ msgstr "|a23d9638f96342ef9d25209951e2d564|" + +#~ msgid "Upload the whl (e.g., ``flwr-1.6.0-py3-none-any.whl``)" +#~ msgstr "上传 whl(例如 ``flwr-1.6.0-py3-none-any.whl``)" + +#~ msgid "" +#~ "Change ``!pip install -q 'flwr[simulation]'" +#~ " torch torchvision matplotlib`` to ``!pip" +#~ " install -q 'flwr-1.6.0-py3-none-" +#~ "any.whl[simulation]' torch torchvision matplotlib``" +#~ msgstr "" +#~ "将``!pip install -q 'flwr[simulation]' torch" +#~ " torchvision matplotlib``更改为``!pip install -q " +#~ "'flwr-1.6.0-py3-none-any.whl[simulation]' torch " +#~ "torch torchvision matplotlib``" + +#~ msgid "" +#~ "All that's left to do it to " +#~ "define a function that loads both " +#~ "model and data, creates a " +#~ ":code:`CifarClient`, and starts this client." +#~ " You load your data and model " +#~ "by using :code:`cifar.py`. Start " +#~ ":code:`CifarClient` with the function " +#~ ":code:`fl.client.start_numpy_client()` by pointing " +#~ "it at the same IP address we " +#~ "used in :code:`server.py`:" +#~ msgstr "" +#~ "剩下要做的就是定义一个加载模型和数据的函数,创建一个 :code:`CifarClient` 并启动该客户端。使用" +#~ " :code:`cifar.py` 加载数据和模型。使用函数 " +#~ ":code:`fl.client.start_numpy_client()` 启动 " +#~ ":code:`CifarClient`,将其指向我们在 :code:`server.py` 中使用的相同 " +#~ "IP 地址:" + +#~ msgid "" +#~ "The :code:`VirtualClientEngine` schedules, launches" +#~ " and manages `virtual` clients. These " +#~ "clients are identical to `non-virtual`" +#~ " clients (i.e. the ones you launch" +#~ " via the command `flwr.client.start_numpy_client" +#~ " `_)" +#~ " in the sense that they can be" +#~ " configure by creating a class " +#~ "inheriting, for example, from " +#~ "`flwr.client.NumPyClient `_ and therefore " +#~ "behave in an identical way. In " +#~ "addition to that, clients managed by " +#~ "the :code:`VirtualClientEngine` are:" +#~ msgstr "" +#~ "代码:`VirtualClientEngine`调度、启动和管理`虚拟`客户端。这些客户端与 \"非虚拟 " +#~ "\"客户端(即通过命令 `flwr.client.start_numpy_client `_启动的客户端)完全相同,它们可以通过创建一个继承自 \"flwr.client.NumPyClient " +#~ "`_\" " +#~ "的类来配置,因此行为方式也完全相同。除此之外,由 :code:`VirtualClientEngine` " +#~ "管理的客户端还包括:" + +#~ msgid "Example: Walk-Through PyTorch & MNIST" +#~ msgstr "实例: PyTorch 和 MNIST 的演练" + +#~ msgid "" +#~ "In this tutorial we will learn, " +#~ "how to train a Convolutional Neural " +#~ "Network on MNIST using Flower and " +#~ "PyTorch." +#~ msgstr "在本教程中,我们将学习如何使用 Flower 和 PyTorch 在 MNIST 上训练卷积神经网络。" + +#~ msgid "" +#~ "Since we want to use PyTorch to" +#~ " solve a computer vision task, let's" +#~ " go ahead an install PyTorch and " +#~ "the **torchvision** library:" +#~ msgstr "我们想用 PyTorch 来做计算机视觉任务,需要先安装 PyTorch 和 **torchvision** 库:" + +#~ msgid "Ready... Set... Train!" +#~ msgstr "准备...设置...训练!" + +#~ msgid "" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. Our training " +#~ "procedure and network architecture are " +#~ "based on PyTorch's `Basic MNIST Example" +#~ " `_. " +#~ "This will allow you see how easy" +#~ " it is to wrap your code with" +#~ " Flower and begin training in a " +#~ "federated way. We provide you with " +#~ "two helper scripts, namely *run-" +#~ "server.sh*, and *run-clients.sh*. Don't " +#~ "be afraid to look inside, they are" +#~ " simple enough =)." +#~ msgstr "" +#~ "现在我们已经安装了所有的依赖包,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。我们的训练过程和网络架构基于 " +#~ "PyTorch 的 `Basic MNIST Example " +#~ "`_。您会发现用 " +#~ "Flower 来封装您的代码并进行联邦学习训练是多么容易。我们为您提供了两个辅助脚本,即 *run-" +#~ "server.sh* 和 *run-clients.sh*。别害怕,它们很简单 =)。" + +#~ msgid "" +#~ "Go ahead and launch on a terminal" +#~ " the *run-server.sh* script first as" +#~ " follows:" +#~ msgstr "首先在终端上启动 *run-server.sh* 脚本,如下所示:" + +#~ msgid "Now that the server is up and running, go ahead and launch the clients." +#~ msgstr "现在服务器已经启动并运行,请继续启动客户端。" + +#~ msgid "" +#~ "Et voilà! You should be seeing the" +#~ " training procedure and, after a few" +#~ " iterations, the test accuracy for " +#~ "each client." +#~ msgstr "然后就可以了!您应该能看到训练过程,以及经过几次反复后,每个客户端的测试准确率。" + +#~ msgid "Now, let's see what is really happening inside." +#~ msgstr "现在,让我们看看里面到底发生了什么。" + +#~ msgid "" +#~ "Inside the server helper script *run-" +#~ "server.sh* you will find the following" +#~ " code that basically runs the " +#~ ":code:`server.py`" +#~ msgstr "在服务器辅助脚本 *run-server.sh* 中,你可以找到以下代码,这些代码基本上都是运行 :code:`server.py` 的代码" + +#~ msgid "" +#~ "We can go a bit deeper and " +#~ "see that :code:`server.py` simply launches " +#~ "a server that will coordinate three " +#~ "rounds of training. Flower Servers are" +#~ " very customizable, but for simple " +#~ "workloads, we can start a server " +#~ "using the :ref:`start_server ` function and leave " +#~ "all the configuration possibilities at " +#~ "their default values, as seen below." +#~ msgstr "" +#~ "我们可以再深入一点,:code:`server.py` 只是启动了一个服务器,该服务器将协调三轮训练。Flower " +#~ "服务器是非常容易修改的,但对于简单的工作,我们可以使用 :ref:`start_server `函数启动服务器,并将所有可能的配置保留为默认值,如下所示。" + +#~ msgid "" +#~ "Next, let's take a look at the " +#~ "*run-clients.sh* file. You will see " +#~ "that it contains the main loop " +#~ "that starts a set of *clients*." +#~ msgstr "接下来,让我们看看 *run-clients.sh* 文件。您会看到它包含了用来启动多个 *客户端* 的代码。" + +#~ msgid "" +#~ "**cid**: is the client ID. It is" +#~ " an integer that uniquely identifies " +#~ "client identifier." +#~ msgstr "**cid**:是客户 ID。它是一个整数,可唯一标识客户标识符。" + +#~ msgid "**sever_address**: String that identifies IP and port of the server." +#~ msgstr "**sever_address**: 标识服务器 IP 和端口的字符串。" + +#~ msgid "" +#~ "**nb_clients**: This defines the number " +#~ "of clients being created. This piece " +#~ "of information is not required by " +#~ "the client, but it helps us " +#~ "partition the original MNIST dataset to" +#~ " make sure that every client is " +#~ "working on unique subsets of both " +#~ "*training* and *test* sets." +#~ msgstr "" +#~ "**nb_clients**: 这定义了正在创建的客户端数量。客户端并不需要这一信息,但它有助于我们对原始 " +#~ "MNIST 数据集进行划分,以确保每个客户端都在 *training* 和 *test*" +#~ " 数据集上有独立的数据。" + +#~ msgid "" +#~ "Again, we can go deeper and look" +#~ " inside :code:`flwr_example/quickstart-" +#~ "pytorch/client.py`. After going through the" +#~ " argument parsing code at the " +#~ "beginning of our :code:`main` function, " +#~ "you will find a call to " +#~ ":code:`mnist.load_data`. This function is " +#~ "responsible for partitioning the original " +#~ "MNIST datasets (*training* and *test*) " +#~ "and returning a :code:`torch.utils.data.DataLoader`" +#~ " s for each of them. We then" +#~ " instantiate a :code:`PytorchMNISTClient` object" +#~ " with our client ID, our DataLoaders," +#~ " the number of epochs in each " +#~ "round, and which device we want to" +#~ " use for training (CPU or GPU)." +#~ msgstr "" +#~ "我们可以深入看一下 :code:`flwr_example/quickstart-" +#~ "pytorch/client.py`。查看 :code:`main` 函数开头的参数解析代码后,你会发现一个对" +#~ " :code:`mnist.load_data` 的调用。该函数负责分割原始 MNIST " +#~ "数据集(*training* 和 *test*),并为每个数据集返回一个 " +#~ ":code:`torch.utils.data.DataLoader` 。然后,我们实例化一个 " +#~ ":code:`PytorchMNISTClient` 对象,其中包含我们的客户端 ID、 " +#~ "DataLoader、每一轮中的遍历数,以及我们希望用于训练的设备(CPU 或 GPU)。" + +#~ msgid "" +#~ "The :code:`PytorchMNISTClient` object when " +#~ "finally passed to :code:`fl.client.start_client` " +#~ "along with the server's address as " +#~ "the training process begins." +#~ msgstr "" +#~ "当训练过程开始时,:code:`PytorchMNISTClient` 对象会连同服务器地址一起传递给 " +#~ ":code:`fl.client.start_client`。" + +#~ msgid "A Closer Look" +#~ msgstr "仔细看一下" + +#~ msgid "" +#~ "Now, let's look closely into the " +#~ ":code:`PytorchMNISTClient` inside :code:`flwr_example" +#~ ".quickstart-pytorch.mnist` and see what it" +#~ " is doing:" +#~ msgstr "" +#~ "现在,让我们仔细研究一下 :code:`flwr_example.quickstart-pytorch.mnist`" +#~ " 中的 :code:`PytorchMNISTClient`,看看它在做什么:" + +#~ msgid "" +#~ "The first thing to notice is that" +#~ " :code:`PytorchMNISTClient` instantiates a CNN" +#~ " model inside its constructor" +#~ msgstr "首先要注意的是 :code:`PytorchMNISTClient` 在其构造函数中实例化了一个 CNN 模型" + +#~ msgid "" +#~ "The code for the CNN is available" +#~ " under :code:`quickstart-pytorch.mnist` and " +#~ "it is reproduced below. It is the" +#~ " same network found in `Basic MNIST" +#~ " Example " +#~ "`_." +#~ msgstr "" +#~ "CNN 的代码可在 :code:`quickstart-pytorch.mnist` " +#~ "下找到,现复制如下。它与 `Basic MNIST Example " +#~ "`_中的网络相同。" + +#~ msgid "" +#~ "The second thing to notice is that" +#~ " :code:`PytorchMNISTClient` class inherits from" +#~ " the :code:`fl.client.Client`, and hence it" +#~ " must implement the following methods:" +#~ msgstr "" +#~ "第二件要注意的事是 :code:`PytorchMNISTClient` 类继承自 " +#~ ":code:`fl.client.Client`,因此它必须实现以下方法:" + +#~ msgid "" +#~ "When comparing the abstract class to " +#~ "its derived class :code:`PytorchMNISTClient` " +#~ "you will notice that :code:`fit` calls" +#~ " a :code:`train` function and that " +#~ ":code:`evaluate` calls a :code:`test`: " +#~ "function." +#~ msgstr "" +#~ "将抽象类与其派生类 :code:`PytorchMNISTClient` 进行比较时,您会发现 " +#~ ":code:`fit` 调用了一个 :code:`train` 函数,而 " +#~ ":code:`evaluate` 则调用了一个 :code:`test`: 函数。" + +#~ msgid "" +#~ "These functions can both be found " +#~ "inside the same :code:`quickstart-" +#~ "pytorch.mnist` module:" +#~ msgstr "这些函数都可以在同一个 :code:`quickstart-pytorch.mnist` 模块中找到:" + +#~ msgid "" +#~ "Observe that these functions encapsulate " +#~ "regular training and test loops and " +#~ "provide :code:`fit` and :code:`evaluate` with" +#~ " final statistics for each round. You" +#~ " could substitute them with your " +#~ "custom train and test loops and " +#~ "change the network architecture, and the" +#~ " entire example would still work " +#~ "flawlessly. As a matter of fact, " +#~ "why not try and modify the code" +#~ " to an example of your liking?" +#~ msgstr "" +#~ "请注意,这些函数封装了常规的训练和测试循环,并为 :code:`fit` 和 " +#~ ":code:`evaluate` " +#~ "提供了每轮的最终统计数据。您可以用自定义的训练和测试循环来替代它们,并改变网络结构,整个示例仍然可以完美运行。事实上,为什么不按照自己的喜好修改代码呢?" + +#~ msgid "Give It a Try" +#~ msgstr "试试看" + +#~ msgid "" +#~ "Looking through the quickstart code " +#~ "description above will have given a " +#~ "good understanding of how *clients* and" +#~ " *servers* work in Flower, how to " +#~ "run a simple experiment, and the " +#~ "internals of a client wrapper. Here " +#~ "are a few things you could try " +#~ "on your own and get more " +#~ "experience with Flower:" +#~ msgstr "" +#~ "通过上面的快速入门代码描述,你将对 Flower " +#~ "中*客户端*和*服务器*的工作方式、如何运行一个简单的实验以及客户端封装器的内部结构有一个很好的了解。您可以自己尝试以下内容,以获得更多使用" +#~ " Flower 的经验:" + +#~ msgid "" +#~ "Try and change :code:`PytorchMNISTClient` so" +#~ " it can accept different architectures." +#~ msgstr "尝试修改 :code:`PytorchMNISTClient`,使其可以接受不同的架构。" + +#~ msgid "" +#~ "Modify the :code:`train` function so " +#~ "that it accepts different optimizers" +#~ msgstr "修改 :code:`train` 函数,使其接受不同的优化器" + +#~ msgid "" +#~ "Modify the :code:`test` function so that" +#~ " it proves not only the top-1 " +#~ "(regular accuracy) but also the top-5" +#~ " accuracy?" +#~ msgstr "修改 :code:`test` 函数,使其不仅能输出前 1 名(常规精确度),还能证明前 5 名的精确度?" + +#~ msgid "" +#~ "Go larger! Try to adapt the code" +#~ " to larger images and datasets. Why" +#~ " not try training on ImageNet with" +#~ " a ResNet-50?" +#~ msgstr "让我们尝试让代码适应更大的图像和数据集。为什么不尝试使用 ResNet-50 在 ImageNet 上进行训练呢?" + +#~ msgid "You are ready now. Enjoy learning in a federated way!" +#~ msgstr "您现在已经准备就绪。尽情享受联邦学习的乐趣吧!" + +#~ msgid "Differential privacy" +#~ msgstr "差别隐私" + +#~ msgid "" +#~ "Flower provides differential privacy (DP) " +#~ "wrapper classes for the easy integration" +#~ " of the central DP guarantees " +#~ "provided by DP-FedAvg into training " +#~ "pipelines defined in any of the " +#~ "various ML frameworks that Flower is " +#~ "compatible with." +#~ msgstr "" +#~ "Flower 提供了差分隐私 (DP) 封装类,可将 DP-FedAvg " +#~ "提供的核心 DP 轻松集成到 Flower 兼容的各种 ML " +#~ "框架中定义的训练模式中。" + +#~ msgid "" +#~ "Please note that these components are" +#~ " still experimental; the correct " +#~ "configuration of DP for a specific " +#~ "task is still an unsolved problem." +#~ msgstr "请注意,这些组件仍处于试验阶段,如何为特定任务正确配置 DP 仍是一个尚未解决的问题。" + +#~ msgid "" +#~ "The name DP-FedAvg is misleading " +#~ "since it can be applied on top " +#~ "of any FL algorithm that conforms " +#~ "to the general structure prescribed by" +#~ " the FedOpt family of algorithms." +#~ msgstr "DP-FedAvg 这个名称容易引起误解,因为它可以应用于任何符合 FedOpt 系列算法规定的一般结构的 FL 算法之上。" + +#~ msgid "DP-FedAvg" +#~ msgstr "DP-FedAvg" + +#~ msgid "" +#~ "DP-FedAvg, originally proposed by " +#~ "McMahan et al. [mcmahan]_ and extended" +#~ " by Andrew et al. [andrew]_, is " +#~ "essentially FedAvg with the following " +#~ "modifications." +#~ msgstr "DP-FedAvg 最初由McMahan等人提出,并由Andrew等人加以扩展。" + +#~ msgid "" +#~ "**Clipping** : The influence of each " +#~ "client's update is bounded by clipping" +#~ " it. This is achieved by enforcing" +#~ " a cap on the L2 norm of " +#~ "the update, scaling it down if " +#~ "needed." +#~ msgstr "**裁剪** : 裁剪会影响到每个客户端的模型参数。具体做法是对参数的 L2 准则设置上限,必要时将其缩减。" + +#~ msgid "" +#~ "**Noising** : Gaussian noise, calibrated " +#~ "to the clipping threshold, is added " +#~ "to the average computed at the " +#~ "server." +#~ msgstr "**噪声** : 在服务器计算出的平均值中加入高斯噪声,该噪声根据剪切阈值进行校准。" + +#~ msgid "" +#~ "The distribution of the update norm " +#~ "has been shown to vary from " +#~ "task-to-task and to evolve as " +#~ "training progresses. This variability is " +#~ "crucial in understanding its impact on" +#~ " differential privacy guarantees, emphasizing " +#~ "the need for an adaptive approach " +#~ "[andrew]_ that continuously adjusts the " +#~ "clipping threshold to track a " +#~ "prespecified quantile of the update norm" +#~ " distribution." +#~ msgstr "事实证明,参数更新准则的分布会随着任务的不同而变化,并随着训练的进展而演变。因此,我们采用了一种自适应方法,该方法会不断调整剪切阈值,以跟踪参数更新准则分布的预设量化值。" + +#~ msgid "Simplifying Assumptions" +#~ msgstr "简化假设" + +#~ msgid "" +#~ "We make (and attempt to enforce) a" +#~ " number of assumptions that must be" +#~ " satisfied to ensure that the " +#~ "training process actually realizes the " +#~ ":math:`(\\epsilon, \\delta)` guarantees the " +#~ "user has in mind when configuring " +#~ "the setup." +#~ msgstr "" +#~ "我们提出(并试图执行)了一系列必须满足的假设,以确保训练过程真正实现用户在配置设置时所定的 " +#~ ":math:`(\\epsilon,\\delta)` 。" + +#~ msgid "" +#~ "**Fixed-size subsampling** :Fixed-size " +#~ "subsamples of the clients must be " +#~ "taken at each round, as opposed to" +#~ " variable-sized Poisson subsamples." +#~ msgstr "** 固定大小的子样本** :与可变大小的泊松分布子样本相比,每轮必须抽取固定大小的客户端子样本。" + +#~ msgid "" +#~ "**Unweighted averaging** : The contributions" +#~ " from all the clients must weighted" +#~ " equally in the aggregate to " +#~ "eliminate the requirement for the server" +#~ " to know in advance the sum of" +#~ " the weights of all clients available" +#~ " for selection." +#~ msgstr "**非加权平均**: 所有客户端的贡献必须加权相等,这样服务器就不需要事先知道所有客户的权重总和。" + +#~ msgid "" +#~ "**No client failures** : The set " +#~ "of available clients must stay constant" +#~ " across all rounds of training. In" +#~ " other words, clients cannot drop out" +#~ " or fail." +#~ msgstr "**没有失败的客户端** : 在各轮训练中,可用客户端的数量必须保持不变。换句话说,客户端不能退出或失败。" + +#~ msgid "" +#~ "The first two are useful for " +#~ "eliminating a multitude of complications " +#~ "associated with calibrating the noise to" +#~ " the clipping threshold, while the " +#~ "third one is required to comply " +#~ "with the assumptions of the privacy " +#~ "analysis." +#~ msgstr "前两种方法有助于消除将噪声校准为削波阈值所带来的诸多复杂问题,而第三种方法则需要符合隐私分析的假设。" + +#~ msgid "" +#~ "These restrictions are in line with " +#~ "constraints imposed by Andrew et al. " +#~ "[andrew]_." +#~ msgstr "这些限制与 Andrew 等人所施加的限制一致。" + +#~ msgid "Customizable Responsibility for Noise injection" +#~ msgstr "可定制的噪声注入" + +#~ msgid "" +#~ "In contrast to other implementations " +#~ "where the addition of noise is " +#~ "performed at the server, you can " +#~ "configure the site of noise injection" +#~ " to better match your threat model." +#~ " We provide users with the " +#~ "flexibility to set up the training " +#~ "such that each client independently adds" +#~ " a small amount of noise to the" +#~ " clipped update, with the result that" +#~ " simply aggregating the noisy updates " +#~ "is equivalent to the explicit addition" +#~ " of noise to the non-noisy " +#~ "aggregate at the server." +#~ msgstr "与其他在服务器上添加噪声的实现方法不同,您可以配置噪声注入的位置,以便更好地匹配您的威胁模型。我们为用户提供了设置训练的灵活性,使每个客户端都能独立地为剪切参数更新添加少量噪声,这样,只需聚合噪声更新,就相当于在服务器上为非噪声聚合添加噪声了。" + +#~ msgid "" +#~ "To be precise, if we let :math:`m`" +#~ " be the number of clients sampled " +#~ "each round and :math:`\\sigma_\\Delta` be " +#~ "the scale of the total Gaussian " +#~ "noise that needs to be added to" +#~ " the sum of the model updates, " +#~ "we can use simple maths to show" +#~ " that this is equivalent to each " +#~ "client adding noise with scale " +#~ ":math:`\\sigma_\\Delta/\\sqrt{m}`." +#~ msgstr "" +#~ "准确地说,我们假设每轮采样的客户端数量为:math:`m`,:math:`\\sigma_\\Delta` " +#~ "为需要添加到模型更新总和中的总高斯噪声的规模,我们就可以用简单的数学方法证明了,这相当于每个客户端都添加了规模为 " +#~ ":math:`\\sigma_\\Delta/\\sqrt{m}` 的噪声。" + +#~ msgid "Wrapper-based approach" +#~ msgstr "基于封装的方法" + +#~ msgid "" +#~ "Introducing DP to an existing workload" +#~ " can be thought of as adding an" +#~ " extra layer of security around it." +#~ " This inspired us to provide the " +#~ "additional server and client-side logic" +#~ " needed to make the training process" +#~ " differentially private as wrappers for " +#~ "instances of the :code:`Strategy` and " +#~ ":code:`NumPyClient` abstract classes respectively." +#~ " This wrapper-based approach has the" +#~ " advantage of being easily composable " +#~ "with other wrappers that someone might" +#~ " contribute to the Flower library in" +#~ " the future, e.g., for secure " +#~ "aggregation. Using Inheritance instead can " +#~ "be tedious because that would require" +#~ " the creation of new sub- classes " +#~ "every time a new class implementing " +#~ ":code:`Strategy` or :code:`NumPyClient` is " +#~ "defined." +#~ msgstr "" +#~ "在现有工作负载中引入 DP " +#~ "可以被认为是在其周围增加了一层额外的安全性。受此启发,我们提供了额外的服务器端和客户端逻辑,分别作为 " +#~ ":code:`Strategy` 和 :code:`NumPyClient` " +#~ "抽象类实例的封装器,使训练过程具有不同的隐私性。这种基于封装器的方法的优点是可以很容易地与将来有人贡献给 Flower " +#~ "的其他封装器(例如用于安全聚合的封装器)进行组合。使用继承可能会比较繁琐,因为每次定义实现 :code:`Strategy`" +#~ " 或 :code:`NumPyClient` 的新类时,都需要创建新的子类。" + +#~ msgid "" +#~ "The first version of our solution " +#~ "was to define a decorator whose " +#~ "constructor accepted, among other things, " +#~ "a boolean-valued variable indicating " +#~ "whether adaptive clipping was to be " +#~ "enabled or not. We quickly realized " +#~ "that this would clutter its " +#~ ":code:`__init__()` function with variables " +#~ "corresponding to hyperparameters of adaptive" +#~ " clipping that would remain unused " +#~ "when it was disabled. A cleaner " +#~ "implementation could be achieved by " +#~ "splitting the functionality into two " +#~ "decorators, :code:`DPFedAvgFixed` and " +#~ ":code:`DPFedAvgAdaptive`, with the latter sub-" +#~ " classing the former. The constructors " +#~ "for both classes accept a boolean " +#~ "parameter :code:`server_side_noising`, which, as " +#~ "the name suggests, determines where " +#~ "noising is to be performed." +#~ msgstr "" +#~ "我们的第一版解决方案是定义一个装饰器,其构造函数接受一个布尔值变量,表示是否启用自适应剪裁。我们很快意识到,这样会使其 " +#~ ":code:`__init__()` " +#~ "函数中与自适应裁剪超参数相对应的变量变得杂乱无章,而这些变量在自适应裁剪被禁用时将保持未使用状态。要实现更简洁的功能,可以将该功能拆分为两个装饰器,即" +#~ " :code:`DPFedAvgFixed` 和 " +#~ ":code:`DPFedAvgAdaptive`,后者是前者的子类。这两个类的构造函数都接受一个布尔参数 " +#~ ":code:`server_side_noising`,顾名思义,它决定了在哪里加噪声。" + +#~ msgid "" +#~ "The server-side capabilities required " +#~ "for the original version of DP-" +#~ "FedAvg, i.e., the one which performed" +#~ " fixed clipping, can be completely " +#~ "captured with the help of wrapper " +#~ "logic for just the following two " +#~ "methods of the :code:`Strategy` abstract " +#~ "class." +#~ msgstr "" +#~ "只需对 :code:`Strategy` 抽象类的以下两个方法进行封装,就能完全捕获 DP-" +#~ "FedAvg 原始版本(即执行固定剪裁的版本)所需的服务器端功能。" + +#~ msgid "" +#~ ":code:`configure_fit()` : The config " +#~ "dictionary being sent by the wrapped " +#~ ":code:`Strategy` to each client needs to" +#~ " be augmented with an additional " +#~ "value equal to the clipping threshold" +#~ " (keyed under :code:`dpfedavg_clip_norm`) and," +#~ " if :code:`server_side_noising=true`, another one" +#~ " equal to the scale of the " +#~ "Gaussian noise that needs to be " +#~ "added at the client (keyed under " +#~ ":code:`dpfedavg_noise_stddev`). This entails " +#~ "*post*-processing of the results returned " +#~ "by the wrappee's implementation of " +#~ ":code:`configure_fit()`." +#~ msgstr "" +#~ ":code:`configure_fit()` :由封装的 :code:`Strategy` " +#~ "发送到每个客户端的配置字典需要使用等于裁剪阈值的附加值(在 :code:`dpfedavg_clip_norm` " +#~ "下键入)进行扩充。并且,如果 " +#~ "server_side_noising=true,则另一个值等于需要在客户端添加的高斯噪声的大小(在 " +#~ "dpfedavg_noise_stddev 下键入)。这需要对封装后的configure_fit() " +#~ "所返回的结果进行后处理。" + +#~ msgid "" +#~ ":code:`aggregate_fit()`: We check whether any" +#~ " of the sampled clients dropped out" +#~ " or failed to upload an update " +#~ "before the round timed out. In " +#~ "that case, we need to abort the" +#~ " current round, discarding any successful" +#~ " updates that were received, and move" +#~ " on to the next one. On the " +#~ "other hand, if all clients responded " +#~ "successfully, we must force the " +#~ "averaging of the updates to happen " +#~ "in an unweighted manner by intercepting" +#~ " the :code:`parameters` field of " +#~ ":code:`FitRes` for each received update " +#~ "and setting it to 1. Furthermore, " +#~ "if :code:`server_side_noising=true`, each update " +#~ "is perturbed with an amount of " +#~ "noise equal to what it would have" +#~ " been subjected to had client-side" +#~ " noising being enabled. This entails " +#~ "*pre*-processing of the arguments to " +#~ "this method before passing them on " +#~ "to the wrappee's implementation of " +#~ ":code:`aggregate_fit()`." +#~ msgstr "" +#~ ":code:`aggregate_fit()`: " +#~ "我们会检查是否有任何客户端在本轮超时前退出或未能上传参数更新。在这种情况下,我们需要中止当前一轮,丢弃已收到的所有参数更新,然后继续下一轮。另一方面,如果所有客户端都成功响应,我们就必须通过拦截" +#~ " :code:`FitRes` 的 :code:`parameters` 字段并将其设置为 " +#~ "1,强制以不加权的方式平均更新。此外,如果 " +#~ ":code:`server_side_noising=true`,每次更新都会受到一定量的噪声扰动,其扰动量相当于启用客户端噪声时的扰动量。" +#~ " 这就需要在将本方法的参数传递给封装的 :code:`aggregate_fit()` " +#~ "之前,对参数进行*预*处理。" + +#~ msgid "" +#~ "We can't directly change the aggregation" +#~ " function of the wrapped strategy to" +#~ " force it to add noise to the" +#~ " aggregate, hence we simulate client-" +#~ "side noising to implement server-side" +#~ " noising." +#~ msgstr "我们无法直接改变封装策略的聚合函数,迫使它在聚合中添加噪声,因此我们模拟客户端噪声来实现服务器端噪声。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:59 -msgid "" -"Now, in practice, the training data we work with doesn't originate on the" -" machine we train the model on. It gets created somewhere else." -msgstr "实际上,我们使用的训练数据并不来自我们训练模型的机器。它是在其他地方创建的。" +#~ msgid "" +#~ "These changes have been put together " +#~ "into a class called :code:`DPFedAvgFixed`, " +#~ "whose constructor accepts the strategy " +#~ "being decorated, the clipping threshold " +#~ "and the number of clients sampled " +#~ "every round as compulsory arguments. The" +#~ " user is expected to specify the " +#~ "clipping threshold since the order of" +#~ " magnitude of the update norms is " +#~ "highly dependent on the model being " +#~ "trained and providing a default value" +#~ " would be misleading. The number of" +#~ " clients sampled at every round is" +#~ " required to calculate the amount of" +#~ " noise that must be added to " +#~ "each individual update, either by the" +#~ " server or the clients." +#~ msgstr "" +#~ "这些变化被整合到一个名为 :code:`DPFedAvgFixed` " +#~ "的类中,其构造函数接受被装饰的策略、剪切阈值和每轮采样的客户数作为必选参数。用户需要指定剪切阈值,因为参数更新规范的数量级在很大程度上取决于正在训练的模型,提供默认值会产生误导。每轮采样的客户端数量是计算服务器或客户在每次参数更新时添加的噪音量所必需的。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:61 -msgid "" -"It originates on a smartphone by the user interacting with an app, a car " -"collecting sensor data, a laptop receiving input via the keyboard, or a " -"smart speaker listening to someone trying to sing a song." -msgstr "它源于智能手机上用户与应用程序的交互、汽车上传感器数据的收集、笔记本电脑上键盘输入的接收,或者智能扬声器上某人试着唱的歌。" +#~ msgid "" +#~ "The additional functionality required to " +#~ "facilitate adaptive clipping has been " +#~ "provided in :code:`DPFedAvgAdaptive`, a " +#~ "subclass of :code:`DPFedAvgFixed`. It " +#~ "overrides the above-mentioned methods to" +#~ " do the following." +#~ msgstr "" +#~ "自适应剪裁所需的附加功能在 :code:`DPFedAvgAdaptive` 中提供,其是 " +#~ ":code:`DPFedAvgFixed` 的子类。它重写了上述方法,以实现以下功能。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 -msgid "|2bc8e069228d4873804061ff4a95048c|" -msgstr "" +#~ msgid "" +#~ ":code:`configure_fit()` : It intercepts the" +#~ " config dict returned by " +#~ ":code:`super.configure_fit()` to add the " +#~ "key-value pair " +#~ ":code:`dpfedavg_adaptive_clip_enabled:True` to it, " +#~ "which the client interprets as an " +#~ "instruction to include an indicator bit" +#~ " (1 if update norm <= clipping " +#~ "threshold, 0 otherwise) in the results" +#~ " returned by it." +#~ msgstr "" +#~ ":code:`configure_fit()`:它截取由 :code:`super.configure_fit()` " +#~ "返回的 config 字典,并在其中添加键-值对 " +#~ ":code:`dpfedavg_adaptive_clip_enabled:True\",客户端将其解释为在返回结果中包含一个指示位(如果参数更新范式" +#~ " <= 剪裁阈值,则为 1,否则为 0)的指令。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 -msgid "Data on a phone" -msgstr "手机上的数据" +#~ msgid "" +#~ ":code:`aggregate_fit()` : It follows a " +#~ "call to :code:`super.aggregate_fit()` with one" +#~ " to :code:`__update_clip_norm__()`, a procedure" +#~ " which adjusts the clipping threshold " +#~ "on the basis of the indicator bits" +#~ " received from the sampled clients." +#~ msgstr ":code:`aggregate_fit()`:在调用:code:`super.aggregate_fit()`后,再调用:code:`__update_clip_norm__()`,该过程根据从采样客户端接收到的指示位调整裁剪阈值。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:73 -msgid "" -"What's also important to mention, this \"somewhere else\" is usually not " -"just one place, it's many places. It could be several devices all running" -" the same app. But it could also be several organizations, all generating" -" data for the same task." -msgstr "" -"值得一提的是,这个 \"其他地方 " -"\"通常不只是一个地方,而是很多地方。它可能是多个运行同一应用程序的设备。但也可能是多个组织,都在为同一任务生成数据。" +#~ msgid "" +#~ "The client-side capabilities required " +#~ "can be completely captured through " +#~ "wrapper logic for just the :code:`fit()`" +#~ " method of the :code:`NumPyClient` abstract" +#~ " class. To be precise, we need " +#~ "to *post-process* the update computed" +#~ " by the wrapped client to clip " +#~ "it, if necessary, to the threshold " +#~ "value supplied by the server as " +#~ "part of the config dictionary. In " +#~ "addition to this, it may need to" +#~ " perform some extra work if either" +#~ " (or both) of the following keys " +#~ "are also present in the dict." +#~ msgstr "" +#~ "客户端所需的功能完全可以通过 :code:`NumPyClient` 抽象类的 " +#~ ":code:`fit()` " +#~ "方法的封装逻辑来实现。准确地说,我们需要对封装客户端计算的参数更新进行处理,以便在必要时将其剪切到服务器作为配置字典的一部分提供的阈值。除此之外,如果配置字典中还存在以下任一(或两个)键,客户端可能还需要执行一些额外的工作。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 -msgid "|c258488766324dc9a6807f0e7c4fd5f4|" -msgstr "" +#~ msgid "" +#~ ":code:`dpfedavg_noise_stddev` : Generate and " +#~ "add the specified amount of noise " +#~ "to the clipped update." +#~ msgstr "code:`dpfedavg_noise_stddev`:生成并在剪切参数更新中添加指定数量的噪声。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 -msgid "Data is on many devices" -msgstr "数据存在于多种设备中" +#~ msgid "" +#~ ":code:`dpfedavg_adaptive_clip_enabled` : Augment the" +#~ " metrics dict in the :code:`FitRes` " +#~ "object being returned to the server " +#~ "with an indicator bit, calculated as " +#~ "described earlier." +#~ msgstr "" +#~ ":code:`dpfedavg_adaptive_clip_enabled`:在返回给服务器的 :code:`FitRes`" +#~ " 对象中的度量值字典中增加一个指标位,计算方法如前所述。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:85 -msgid "" -"So to use machine learning, or any kind of data analysis, the approach " -"that has been used in the past was to collect all data on a central " -"server. This server can be somewhere in a data center, or somewhere in " -"the cloud." -msgstr "因此,要使用机器学习或任何类型的数据分析,过去使用的方法是在中央服务器上收集所有数据。这个服务器可以在数据中心的某个地方,也可以在云端的某个地方。" +#~ msgid "Performing the :math:`(\\epsilon, \\delta)` analysis" +#~ msgstr "进行 :math:`(epsilon, \\delta)` 分析" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 -msgid "|d5f962c3f4ec48529efda980868c14b0|" -msgstr "" +#~ msgid "" +#~ "Assume you have trained for :math:`n`" +#~ " rounds with sampling fraction :math:`q`" +#~ " and noise multiplier :math:`z`. In " +#~ "order to calculate the :math:`\\epsilon` " +#~ "value this would result in for a" +#~ " particular :math:`\\delta`, the following " +#~ "script may be used." +#~ msgstr "" +#~ "假设您已经训练了 :math:`n` 轮,采样比例为 :math:`q`,噪声乘数为 " +#~ ":math:`z`。为了计算特定 :math:`\\delta` 的 :math:`epsilon`" +#~ " 值,可以使用下面的脚本。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 -msgid "Central data collection" -msgstr "集中数据收集" +#~ msgid "Flower driver SDK." +#~ msgstr "Flower 服务器。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:97 -msgid "" -"Once all the data is collected in one place, we can finally use machine " -"learning algorithms to train our model on the data. This is the machine " -"learning approach that we've basically always relied on." -msgstr "一旦所有数据都收集到一处,我们最终就可以使用机器学习算法在数据上训练我们的模型。这就是我们基本上一直依赖的机器学习方法。" +#~ msgid "driver" +#~ msgstr "服务器" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 -msgid "|a5eccea18d4c43a68b54b65043cabef8|" -msgstr "" +#~ msgid "Get task results." +#~ msgstr "汇总训练结果。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 -msgid "Central model training" -msgstr "集中模型训练" +#~ msgid "Request for run ID." +#~ msgstr "Flower 基线申请" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:130 -msgid "Challenges of classical machine learning" -msgstr "经典机器学习面临的挑战" +#~ msgid "Get client IDs." +#~ msgstr "返回客户端(本身)。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:132 -msgid "" -"The classic machine learning approach we've just seen can be used in some" -" cases. Great examples include categorizing holiday photos, or analyzing " -"web traffic. Cases, where all the data is naturally available on a " -"centralized server." -msgstr "我们刚刚看到的经典机器学习方法可以在某些情况下使用。很好的例子包括对假日照片进行分类或分析网络流量。在这些案例中,所有数据自然都可以在中央服务器上获得。" +#~ msgid "" +#~ "Flower usage examples used to be " +#~ "bundled with Flower in a package " +#~ "called ``flwr_example``. We are migrating " +#~ "those examples to standalone projects to" +#~ " make them easier to use. All " +#~ "new examples are based in the " +#~ "directory `examples " +#~ "`_." +#~ msgstr "" +#~ "Flower 的使用示例曾与 Flower 捆绑在一个名为 ``flwr_example``" +#~ " 的软件包中。我们正在将这些示例迁移到独立项目中,以使它们更易于使用。所有新示例都位于目录 `examples " +#~ "`_。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 -msgid "|f17662f7df2d42f68cac70a1fdeda8a7|" -msgstr "" +#~ msgid "Quickstart TensorFlow/Keras" +#~ msgstr "快速入门 TensorFlow/Keras" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 -msgid "Centralized possible" -msgstr "可集中管理" +#~ msgid "Legacy Examples (`flwr_example`)" +#~ msgstr "传统示例 (`flwr_example`)" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:144 -msgid "" -"But the approach can not be used in many other cases. Cases, where the " -"data is not available on a centralized server, or cases where the data " -"available on one server is not enough to train a good model." -msgstr "但这种方法并不适用于许多其他情况。例如,集中服务器上没有数据,或者一台服务器上的数据不足以训练出一个好的模型。" +#~ msgid "" +#~ "The useage examples in `flwr_example` " +#~ "are deprecated and will be removed " +#~ "in the future. New examples are " +#~ "provided as standalone projects in " +#~ "`examples `_." +#~ msgstr "" +#~ "在 `flwr_example` 中的使用示例已被弃用,今后将被移除。新示例将作为独立项目在 " +#~ "`examples `_" +#~ " 中提供。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 -msgid "|241fc906441a4f038c625a19d30d01b2|" -msgstr "" +#~ msgid "Extra Dependencies" +#~ msgstr "额外依赖" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 -msgid "Centralized impossible" -msgstr "无法集中" +#~ msgid "" +#~ "The core Flower framework keeps a " +#~ "minimal set of dependencies. The " +#~ "examples demonstrate Flower in the " +#~ "context of different machine learning " +#~ "frameworks, so additional dependencies need" +#~ " to be installed before an example" +#~ " can be run." +#~ msgstr "" +#~ "Flower 核心框架只保留了最低限度的依赖项。这些示例在不同机器学习框架的背景下演示了 " +#~ "Flower,因此在运行示例之前需要安装额外的依赖项。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:156 -msgid "" -"There are many reasons why the classic centralized machine learning " -"approach does not work for a large number of highly important real-world " -"use cases. Those reasons include:" -msgstr "传统的集中式机器学习方法无法满足现实世界中大量极为重要的使用案例,原因有很多。这些原因包括:" +#~ msgid "For PyTorch examples::" +#~ msgstr "PyTorch 示例::" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:158 -msgid "" -"**Regulations**: GDPR (Europe), CCPA (California), PIPEDA (Canada), LGPD " -"(Brazil), PDPL (Argentina), KVKK (Turkey), POPI (South Africa), FSS " -"(Russia), CDPR (China), PDPB (India), PIPA (Korea), APPI (Japan), PDP " -"(Indonesia), PDPA (Singapore), APP (Australia), and other regulations " -"protect sensitive data from being moved. In fact, those regulations " -"sometimes even prevent single organizations from combining their own " -"users' data for artificial intelligence training because those users live" -" in different parts of the world, and their data is governed by different" -" data protection regulations." -msgstr "" -"**法规**: " -"GDPR(欧洲)、CCPA(加利福尼亚)、PIPEDA(加拿大)、LGPD(巴西)、PDPL(阿根廷)、KVKK(土耳其)、POPI(南非)、FSS(俄罗斯)、CDPR(中国)、PDPB(印度)、PIPA(韩国)、APPI(日本)、PDP(印度尼西亚)、PDPA(新加坡)、APP(澳大利亚)等法规保护敏感数据不被移动。事实上,这些法规有时甚至会阻止单个组织将自己的用户数据用于人工智能培训,因为这些用户生活在世界不同地区,他们的数据受不同的数据保护法规管辖。" +#~ msgid "For TensorFlow examples::" +#~ msgstr "TensorFlow 示例::" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:160 -msgid "" -"**User preference**: In addition to regulation, there are use cases where" -" users just expect that no data leaves their device, ever. If you type " -"your passwords and credit card info into the digital keyboard of your " -"phone, you don't expect those passwords to end up on the server of the " -"company that developed that keyboard, do you? In fact, that use case was " -"the reason federated learning was invented in the first place." -msgstr "" -"**用户偏好**: " -"除了法规之外,在一些使用案例中,用户只是希望数据永远不会离开他们的设备。如果你在手机的数字键盘上输入密码和信用卡信息,你不会希望这些密码最终出现在开发该键盘的公司的服务器上吧?事实上,这种用例正是联邦学习发明的初衷。" +#~ msgid "For both PyTorch and TensorFlow examples::" +#~ msgstr "PyTorch 和 TensorFlow 示例::" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:161 -msgid "" -"**Data volume**: Some sensors, like cameras, produce such a high data " -"volume that it is neither feasible nor economic to collect all the data " -"(due to, for example, bandwidth or communication efficiency). Think about" -" a national rail service with hundreds of train stations across the " -"country. If each of these train stations is outfitted with a number of " -"security cameras, the volume of raw on-device data they produce requires " -"incredibly powerful and exceedingly expensive infrastructure to process " -"and store. And most of the data isn't even useful." -msgstr "" -"**数据量**: " -"有些传感器(如摄像头)产生的数据量很大,收集所有数据既不可行,也不经济(例如,由于带宽或通信效率的原因)。试想一下全国铁路服务,全国有数百个火车站。如果每个火车站都安装了许多安全摄像头,那么它们所产生的大量原始设备数据就需要功能强大且极其昂贵的基础设施来处理和存储。而大部分数据甚至都是无用的。" +#~ msgid "" +#~ "Please consult :code:`pyproject.toml` for a" +#~ " full list of possible extras " +#~ "(section :code:`[tool.poetry.extras]`)." +#~ msgstr "" +#~ "请参阅 :code:`pyproject.toml`,了解可能的 extras 的完整列表(章节 " +#~ ":code:`[tool.poems.extras]`)。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:164 -msgid "Examples where centralized machine learning does not work include:" -msgstr "集中式机器学习不起作用的例子包括:" +#~ msgid "PyTorch Examples" +#~ msgstr "PyTorch 示例" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:166 -msgid "" -"Sensitive healthcare records from multiple hospitals to train cancer " -"detection models" -msgstr "用多家医院的敏感医疗记录训练癌症检测模型" +#~ msgid "" +#~ "Our PyTorch examples are based on " +#~ "PyTorch 1.7. They should work with " +#~ "other releases as well. So far, we" +#~ " provide the following examples." +#~ msgstr "我们的 PyTorch 示例基于 PyTorch 1.7。它们应该也能在其他版本中使用。到目前为止,我们提供了以下示例。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:167 -msgid "" -"Financial information from different organizations to detect financial " -"fraud" -msgstr "不同组织的财务信息,以侦查财务欺诈行为" +#~ msgid "CIFAR-10 Image Classification" +#~ msgstr "CIFAR-10 图像分类" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:168 -msgid "Location data from your electric car to make better range prediction" -msgstr "通过电动汽车的定位数据更好地预测续航里程" +#~ msgid "" +#~ "`CIFAR-10 and CIFAR-100 " +#~ "`_ are " +#~ "popular RGB image datasets. The Flower" +#~ " CIFAR-10 example uses PyTorch to " +#~ "train a simple CNN classifier in a" +#~ " federated learning setup with two " +#~ "clients." +#~ msgstr "" +#~ "CIFAR-10 和 CIFAR-100 " +#~ "``_ 是流行的 RGB" +#~ " 图像数据集。Flower CIFAR-10 示例使用 PyTorch " +#~ "在有两个客户端的联邦学习设置中训练一个简单的 CNN 分类器。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:169 -msgid "End-to-end encrypted messages to train better auto-complete models" -msgstr "端到端加密信息可训练出更好的自动完成模型" +#~ msgid "First, start a Flower server:" +#~ msgstr "首先,启动 Flower 服务器:" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:171 -msgid "" -"The popularity of privacy-enhancing systems like the `Brave " -"`__ browser or the `Signal `__ " -"messenger shows that users care about privacy. In fact, they choose the " -"privacy-enhancing version over other alternatives, if such an alternative" -" exists. But what can we do to apply machine learning and data science to" -" these cases to utilize private data? After all, these are all areas that" -" would benefit significantly from recent advances in AI." -msgstr "" -"像 `Brave `__浏览器或 `Signal " -"`__信息管理器这样的隐私增强系统的流行表明,用户关心隐私。事实上,他们会选择隐私性更好的产品。但是,我们能做些什么来将机器学习和数据科学应用到这些情况中,以利用隐私数据呢?毕竟,这些领域都将从人工智能的最新进展中受益匪浅。" +#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" +#~ msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:186 -msgid "Federated learning" -msgstr "联邦学习" +#~ msgid "Then, start the two clients in a new terminal window:" +#~ msgstr "然后,在新的终端窗口中启动两个客户端:" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:188 -msgid "" -"Federated learning simply reverses this approach. It enables machine " -"learning on distributed data by moving the training to the data, instead " -"of moving the data to the training. Here's the single-sentence " -"explanation:" -msgstr "联邦学习简单地颠覆了这种方法。它通过将训练转移到数据上,而不是将数据转移到训练上,在分布式数据上实现机器学习。下面是一句话的解释:" +#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" +#~ msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:190 -msgid "Central machine learning: move the data to the computation" -msgstr "集中式机器学习:将数据转移到计算中心" +#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_cifar`." +#~ msgstr "更多详情,请参阅 :code:`src/py/flwr_example/pytorch_cifar`。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:191 -msgid "Federated (machine) learning: move the computation to the data" -msgstr "联邦式(机器)学习:将计算转移到数据上" +#~ msgid "ImageNet-2012 Image Classification" +#~ msgstr "ImageNet-2012 图像分类" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:193 -msgid "" -"By doing so, it enables us to use machine learning (and other data " -"science approaches) in areas where it wasn't possible before. We can now " -"train excellent medical AI models by enabling different hospitals to work" -" together. We can solve financial fraud by training AI models on the data" -" of different financial institutions. We can build novel privacy-" -"enhancing applications (such as secure messaging) that have better built-" -"in AI than their non-privacy-enhancing alternatives. And those are just a" -" few of the examples that come to mind. As we deploy federated learning, " -"we discover more and more areas that can suddenly be reinvented because " -"they now have access to vast amounts of previously inaccessible data." -msgstr "这样,我们就能在以前不可能的领域使用机器学习(和其他数据科学方法)。现在,我们可以通过让不同的医院协同工作来训练优秀的医疗人工智能模型。我们可以通过在不同金融机构的数据上训练人工智能模型来解决金融欺诈问题。我们可以构建新颖的隐私增强型应用(如安全信息),其内置的人工智能比非隐私增强型应用更好。以上只是我想到的几个例子。随着联邦学习的部署,我们会发现越来越多的领域可以突然重获新生,因为它们现在可以访问大量以前无法访问的数据。" +#~ msgid "" +#~ "`ImageNet-2012 `_ is " +#~ "one of the major computer vision " +#~ "datasets. The Flower ImageNet example " +#~ "uses PyTorch to train a ResNet-18 " +#~ "classifier in a federated learning setup" +#~ " with ten clients." +#~ msgstr "" +#~ "ImageNet-2012 `_ " +#~ "是主要的计算机视觉数据集之一。Flower ImageNet 示例使用 PyTorch " +#~ "在有十个客户端的联邦学习设置中训练 ResNet-18 分类器。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:196 -msgid "" -"So how does federated learning work, exactly? Let's start with an " -"intuitive explanation." -msgstr "那么,联邦学习究竟是如何运作的呢?让我们从直观的解释开始。" +#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" +#~ msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:199 -msgid "Federated learning in five steps" -msgstr "联邦学习的五个步骤" +#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" +#~ msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:202 -msgid "Step 0: Initialize global model" -msgstr "步骤 0:初始化全局模型" +#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_imagenet`." +#~ msgstr "更多详情,请参阅 :code:`src/py/flwr_example/pytorch_imagenet`。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:204 -msgid "" -"We start by initializing the model on the server. This is exactly the " -"same in classic centralized learning: we initialize the model parameters," -" either randomly or from a previously saved checkpoint." -msgstr "我们首先在服务器上初始化模型。这与经典的集中式学习完全相同:我们随机或从先前保存的检查点初始化模型参数。" +#~ msgid "TensorFlow Examples" +#~ msgstr "TensorFlow 示例" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 -msgid "|0aa5aa05810b44b6a835cecce28f3137|" -msgstr "" +#~ msgid "" +#~ "Our TensorFlow examples are based on " +#~ "TensorFlow 2.0 or newer. So far, " +#~ "we provide the following examples." +#~ msgstr "我们的 TensorFlow 示例基于 TensorFlow 2.0 或更新版本。到目前为止,我们提供了以下示例。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 -msgid "Initialize global model" -msgstr "初始化全局模型" +#~ msgid "Fashion-MNIST Image Classification" +#~ msgstr "Fashion-MNIST 图像分类" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:217 -msgid "" -"Step 1: Send model to a number of connected organizations/devices (client" -" nodes)" -msgstr "第 1 步:将模型发送到多个连接的组织/设备(客户节点)" +#~ msgid "" +#~ "`Fashion-MNIST `_ is often used as " +#~ "the \"Hello, world!\" of machine " +#~ "learning. We follow this tradition and" +#~ " provide an example which samples " +#~ "random local datasets from Fashion-MNIST" +#~ " and trains a simple image " +#~ "classification model over those partitions." +#~ msgstr "" +#~ "`Fashion-MNIST `_ 经常被用作机器学习的 \"你好,世界!\"。我们遵循这一传统" +#~ ",提供了一个从Fashion-MNIST 中随机抽样本地数据集的示例,并在这些分区上训练一个简单的图像分类模型。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:219 -msgid "" -"Next, we send the parameters of the global model to the connected client " -"nodes (think: edge devices like smartphones or servers belonging to " -"organizations). This is to ensure that each participating node starts " -"their local training using the same model parameters. We often use only a" -" few of the connected nodes instead of all nodes. The reason for this is " -"that selecting more and more client nodes has diminishing returns." -msgstr "接下来,我们会将全局模型的参数发送到连接的客户端节点(如智能手机等边缘设备或企业的服务器)。这是为了确保每个参与节点都使用相同的模型参数开始本地训练。我们通常只使用几个连接节点,而不是所有节点。这样做的原因是,选择越来越多的客户端节点会导致收益递减。" +#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" +#~ msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 -msgid "|c742940dd4bf4de09d8d0d5e8d179638|" -msgstr "" +#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" +#~ msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 -msgid "Send global model" -msgstr "发送全局模型" +#~ msgid "" +#~ "For more details, see " +#~ ":code:`src/py/flwr_example/tensorflow_fashion_mnist`." +#~ msgstr "更多详情,请参阅 :code:`src/py/flwr_example/tensorflow_fashion_mnist`。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:232 -msgid "" -"Step 2: Train model locally on the data of each organization/device " -"(client node)" -msgstr "步骤 2:在本地对每个机构/设备(客户端节点)的数据进行模型训练" +#~ msgid "``BASE_IMAGE_TAG``" +#~ msgstr "基本图像标签" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:234 -msgid "" -"Now that all (selected) client nodes have the latest version of the " -"global model parameters, they start the local training. They use their " -"own local dataset to train their own local model. They don't train the " -"model until full convergence, but they only train for a little while. " -"This could be as little as one epoch on the local data, or even just a " -"few steps (mini-batches)." -msgstr "" -"现在,所有(选定的)客户端节点都有了最新版本的全局模型参数,它们开始进行本地训练。它们使用自己的本地数据集来训练自己的本地模型。它们不会一直训练到模型完全收敛为止,而只是训练一小段时间。这可能只是本地数据上的一个遍历,甚至只是几个步骤" -"(mini-batches)。" +#~ msgid "The image tag of the base image." +#~ msgstr "基础图像的图像标记。" + +#~ msgid "" +#~ "It is important to follow the " +#~ "instructions described in comments. For " +#~ "instance, in order to not break " +#~ "how our changelog system works, you " +#~ "should read the information above the" +#~ " ``Changelog entry`` section carefully. You" +#~ " can also checkout some examples and" +#~ " details in the :ref:`changelogentry` " +#~ "appendix." +#~ msgstr "" +#~ "请务必遵守注释中的说明。例如,为了不破坏我们的更新日志系统,你应该仔细阅读\"`更新日志条目``\"部分上面的信息。您还可以查看 " +#~ ":ref:`changelogentry` 附录中的一些示例和细节。" + +#~ msgid "Open a PR (as shown above)" +#~ msgstr "打开 PR(如上图所示)" + +#~ msgid "How to write a good PR title" +#~ msgstr "如何撰写好的公关标题" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 -msgid "|1f169ab4601a47e1a226f1628f4ebddb|" -msgstr "" +#~ msgid "" +#~ "A well-crafted PR title helps team" +#~ " members quickly understand the purpose " +#~ "and scope of the changes being " +#~ "proposed. Here's a guide to help " +#~ "you write a good GitHub PR title:" +#~ msgstr "一个精心撰写的公关标题能帮助团队成员迅速了解所提修改的目的和范围。以下指南可帮助您撰写一个好的 GitHub PR 标题:" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 -msgid "Train on local data" -msgstr "根据本地数据进行训练" +#~ msgid "" +#~ "1. Be Clear and Concise: Provide a" +#~ " clear summary of the changes in " +#~ "a concise manner. 1. Use Actionable " +#~ "Verbs: Start with verbs like \"Add,\"" +#~ " \"Update,\" or \"Fix\" to indicate " +#~ "the purpose. 1. Include Relevant " +#~ "Information: Mention the affected feature " +#~ "or module for context. 1. Keep it" +#~ " Short: Avoid lengthy titles for easy" +#~ " readability. 1. Use Proper Capitalization" +#~ " and Punctuation: Follow grammar rules " +#~ "for clarity." +#~ msgstr "" +#~ "1. 简明扼要: 以简明扼要的方式清楚地概述变化。1. 使用可操作的动词: 使用 " +#~ "\"添加\"、\"更新 \"或 \"修复 \"等动词来表明目的。1. 包含相关信息: " +#~ "提及受影响的功能或模块以了解上下文。1. 简短:避免冗长的标题,以方便阅读。1. 使用正确的大小写和标点符号:" +#~ " 遵守语法规则,以确保清晰。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:247 -msgid "Step 3: Return model updates back to the server" -msgstr "步骤 3:将模型参数更新返回服务器" +#~ msgid "" +#~ "Let's start with a few examples " +#~ "for titles that should be avoided " +#~ "because they do not provide meaningful" +#~ " information:" +#~ msgstr "让我们先举例说明几个应该避免使用的标题,因为它们不能提供有意义的信息:" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:249 -msgid "" -"After local training, each client node has a slightly different version " -"of the model parameters they originally received. The parameters are all " -"different because each client node has different examples in its local " -"dataset. The client nodes then send those model updates back to the " -"server. The model updates they send can either be the full model " -"parameters or just the gradients that were accumulated during local " -"training." -msgstr "经过本地训练后,每个客户节点最初收到的模型参数都会略有不同。参数之所以不同,是因为每个客户端节点的本地数据集中都有不同的数据。然后,客户端节点将这些模型更新发回服务器。它们发送的模型更新既可以是完整的模型参数,也可以只是本地训练过程中积累的梯度。" +#~ msgid "Implement Algorithm" +#~ msgstr "执行算法" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 -msgid "|12cfa9cde14440ecb8c8f6c1d7185bec|" -msgstr "" +#~ msgid "Add my_new_file.py to codebase" +#~ msgstr "在代码库中添加 my_new_file.py" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 -msgid "Send model updates" -msgstr "发送模型参数更新" +#~ msgid "Improve code in module" +#~ msgstr "改进模块中的代码" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:262 -msgid "Step 4: Aggregate model updates into a new global model" -msgstr "步骤 4:将模型更新聚合到新的全局模型中" +#~ msgid "Change SomeModule" +#~ msgstr "更改 SomeModule" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:264 -msgid "" -"The server receives model updates from the selected client nodes. If it " -"selected 100 client nodes, it now has 100 slightly different versions of " -"the original global model, each trained on the local data of one client. " -"But didn't we want to have one model that contains the learnings from the" -" data of all 100 client nodes?" -msgstr "" -"服务器从选定的客户端节点接收模型更新。如果服务器选择了 100 个客户端节点,那么它现在就拥有 100 " -"个略有不同的原始全局模型版本,每个版本都是根据一个客户端的本地数据训练出来的。难道我们不希望有一个包含所有 100 个客户节点数据的模型吗?" +#~ msgid "" +#~ "Here are a few positive examples " +#~ "which provide helpful information without " +#~ "repeating how they do it, as that" +#~ " is already visible in the \"Files" +#~ " changed\" section of the PR:" +#~ msgstr "这里有几个正面的例子,提供了有用的信息,但没有重复他们是如何做的,因为在 PR 的 \"已更改文件 \"部分已经可以看到:" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:266 -msgid "" -"In order to get one single model, we have to combine all the model " -"updates we received from the client nodes. This process is called " -"*aggregation*, and there are many different ways to do it. The most basic" -" way to do it is called *Federated Averaging* (`McMahan et al., 2016 " -"`__), often abbreviated as *FedAvg*. " -"*FedAvg* takes the 100 model updates and, as the name suggests, averages " -"them. To be more precise, it takes the *weighted average* of the model " -"updates, weighted by the number of examples each client used for " -"training. The weighting is important to make sure that each data example " -"has the same \"influence\" on the resulting global model. If one client " -"has 10 examples, and another client has 100 examples, then - without " -"weighting - each of the 10 examples would influence the global model ten " -"times as much as each of the 100 examples." -msgstr "" -"为了得到一个单一的模型,我们必须将从客户端节点收到的所有模型更新合并起来。这个过程称为*聚合*,有许多不同的方法。最基本的方法称为 " -"*Federated Averaging* (`McMahan等人,2016 " -"`__),通常缩写为*FedAvg*。*FedAvg* 可以把100 " -"个模型更新进行平均。更准确地说,它取的是模型更新的*加权平均值*,根据每个客户端用于训练的数据数量进行加权。加权对于确保每个数据示例对生成的全局模型具有相同的" -" \"影响 \"非常重要。如果一个客户端有 10 个数据点,而另一个客户有 100 个数据点,那么在不加权的情况下,10 个示例对全局模型的影响是" -" 100 个示例的 10 倍。" +#~ msgid "Update docs banner to mention Flower Summit 2023" +#~ msgstr "更新文件横幅,提及 2023 年 Flower 峰会" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 -msgid "|72939caf6e294b0986fee6dde96614d7|" -msgstr "" +#~ msgid "Remove unnecessary XGBoost dependency" +#~ msgstr "移除不必要的 XGBoost 依赖性" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 -msgid "Aggregate model updates" -msgstr "聚合模型参数更新" +#~ msgid "Remove redundant attributes in strategies subclassing FedAvg" +#~ msgstr "删除 FedAvg 子类化策略中的多余属性" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:280 -msgid "Step 5: Repeat steps 1 to 4 until the model converges" -msgstr "步骤 5:重复步骤 1 至 4,直至模型收敛" +#~ msgid "" +#~ "Add CI job to deploy the staging" +#~ " system when the ``main`` branch " +#~ "changes" +#~ msgstr "添加 CI 作业,以便在 \"主 \"分支发生变化时部署暂存系统" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:282 -msgid "" -"Steps 1 to 4 are what we call a single round of federated learning. The " -"global model parameters get sent to the participating client nodes (step " -"1), the client nodes train on their local data (step 2), they send their " -"updated models to the server (step 3), and the server then aggregates the" -" model updates to get a new version of the global model (step 4)." -msgstr "" -"步骤 1 至 4 就是我们所说的单轮联邦学习。全局模型参数被发送到参与的客户端节点(第 1 步),客户端节点对其本地数据进行训练(第 2 " -"步),然后将更新后的模型发送到服务器(第 3 步),服务器汇总模型更新,得到新版本的全局模型(第 4 步)。" +#~ msgid "" +#~ "Add new amazing library which will " +#~ "be used to improve the simulation " +#~ "engine" +#~ msgstr "添加新的惊人库,用于改进模拟引擎" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:284 -msgid "" -"During a single round, each client node that participates in that " -"iteration only trains for a little while. This means that after the " -"aggregation step (step 4), we have a model that has been trained on all " -"the data of all participating client nodes, but only for a little while. " -"We then have to repeat this training process over and over again to " -"eventually arrive at a fully trained model that performs well across the " -"data of all client nodes." -msgstr "" -"在一轮迭代中,每个参与迭代的客户节点只训练一小段时间。这意味着,在聚合步骤(步骤 " -"4)之后,我们的模型已经在所有参与的客户节点的所有数据上训练过了,但只训练了一小会儿。然后,我们必须一次又一次地重复这一训练过程,最终得到一个经过全面训练的模型,该模型在所有客户节点的数据中都表现良好。" +#~ msgid "Changelog entry" +#~ msgstr "更新日志" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:289 -msgid "" -"Congratulations, you now understand the basics of federated learning. " -"There's a lot more to discuss, of course, but that was federated learning" -" in a nutshell. In later parts of this tutorial, we will go into more " -"detail. Interesting questions include: How can we select the best client " -"nodes that should participate in the next round? What's the best way to " -"aggregate model updates? How can we handle failing client nodes " -"(stragglers)?" -msgstr "" -"恭喜您,现在您已经了解了联邦学习的基础知识。当然,要讨论的内容还有很多,但这只是联邦学习的一个缩影。在本教程的后半部分,我们将进行更详细的介绍。有趣的问题包括" -" 我们如何选择最好的客户端节点参与下一轮学习?聚合模型更新的最佳方法是什么?如何处理失败的客户端节点(落伍者)?" +#~ msgid "" +#~ "When opening a new PR, inside its" +#~ " description, there should be a " +#~ "``Changelog entry`` header." +#~ msgstr "打开一个新 PR 时,在其描述中应有一个 ``Changelog entry`` 标头。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:294 -msgid "" -"Just like we can train a model on the decentralized data of different " -"client nodes, we can also evaluate the model on that data to receive " -"valuable metrics. This is called federated evaluation, sometimes " -"abbreviated as FE. In fact, federated evaluation is an integral part of " -"most federated learning systems." -msgstr "" -"就像我们可以在不同客户节点的分散数据上训练一个模型一样,我们也可以在这些数据上对模型进行评估,以获得有价值的指标。这就是所谓的联邦评估,有时简称为" -" FE。事实上,联邦评估是大多数联邦学习系统不可或缺的一部分。" +#~ msgid "" +#~ "Above this header you should see " +#~ "the following comment that explains how" +#~ " to write your changelog entry:" +#~ msgstr "在页眉上方,你会看到以下注释,说明如何编写更新日志条目:" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:297 -msgid "Federated analytics" -msgstr "联邦分析" +#~ msgid "" +#~ "Inside the following 'Changelog entry' " +#~ "section, you should put the description" +#~ " of your changes that will be " +#~ "added to the changelog alongside your" +#~ " PR title." +#~ msgstr "在下面的 \"更新日志条目 \"部分中,您应该在 PR 标题旁边写上将添加到更新日志中的更改描述。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:299 -msgid "" -"In many cases, machine learning isn't necessary to derive value from " -"data. Data analysis can yield valuable insights, but again, there's often" -" not enough data to get a clear answer. What's the average age at which " -"people develop a certain type of health condition? Federated analytics " -"enables such queries over multiple client nodes. It is usually used in " -"conjunction with other privacy-enhancing technologies like secure " -"aggregation to prevent the server from seeing the results submitted by " -"individual client nodes." -msgstr "在很多情况下,机器学习并不是从数据中获取价值的必要条件。数据分析可以产生有价值的见解,但同样,往往没有足够的数据来获得明确的答案。人们患某种健康疾病的平均年龄是多少?联邦分析可以通过多个客户端节点进行此类查询。它通常与安全聚合等其他隐私增强技术结合使用,以防止服务器看到单个客户端节点提交的结果。" +#~ msgid "" +#~ "If the section is completely empty " +#~ "(without any token) or non-existent, " +#~ "the changelog will just contain the " +#~ "title of the PR for the changelog" +#~ " entry, without any description." +#~ msgstr "如果该部分完全为空(没有任何标记)或不存在,更新日志将只包含更新日志条目的 PR 标题,而不包含任何描述。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:305 -msgid "" -"Differential privacy (DP) is often mentioned in the context of Federated " -"Learning. It is a privacy-preserving method used when analyzing and " -"sharing statistical data, ensuring the privacy of individual " -"participants. DP achieves this by adding statistical noise to the model " -"updates, ensuring any individual participants’ information cannot be " -"distinguished or re-identified. This technique can be considered an " -"optimization that provides a quantifiable privacy protection measure." -msgstr "" -"差分隐私(DP)经常在联邦学习中被提及。这是一种在分析和共享统计数据时使用的隐私保护方法,可确保单个参与者的隐私。DP " -"通过在模型更新中添加统计噪声来实现这一目的,确保任何个体参与者的信息都无法被区分或重新识别。这种技术可被视为一种优化,提供了一种可量化的隐私保护措施。" +#~ msgid "" +#~ "If the section contains some text " +#~ "other than tokens, it will use it" +#~ " to add a description to the " +#~ "change." +#~ msgstr "如果该部分包含标记以外的文本,它将使用这些文本为更改添加说明。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:326 -msgid "Flower" -msgstr "Flower" +#~ msgid "" +#~ "If the section contains one of the" +#~ " following tokens it will ignore any" +#~ " other text and put the PR " +#~ "under the corresponding section of the" +#~ " changelog:" +#~ msgstr "如果该部分包含以下标记之一,它将忽略任何其他文本,并将 PR 放在更新日志的相应部分下:" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:328 -msgid "" -"Federated learning, federated evaluation, and federated analytics require" -" infrastructure to move machine learning models back and forth, train and" -" evaluate them on local data, and then aggregate the updated models. " -"Flower provides the infrastructure to do exactly that in an easy, " -"scalable, and secure way. In short, Flower presents a unified approach to" -" federated learning, analytics, and evaluation. It allows the user to " -"federate any workload, any ML framework, and any programming language." -msgstr "" -"联邦学习、联邦评估和联邦分析需要基础框架来来回移动机器学习模型,在本地数据上对其进行训练和评估,然后汇总更新的模型。Flower " -"提供的基础架构正是以简单、可扩展和安全的方式实现这些目标的。简而言之,Flower " -"为联邦学习、分析和评估提供了一种统一的方法。它允许用户联邦化任何工作负载、任何 ML 框架和任何编程语言。" +#~ msgid " is for classifying a PR as a general improvement." +#~ msgstr " 用于将 PR 划分为一般改进。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 -msgid "|83a8daee45da4a98b8d6f24ae098fc50|" -msgstr "" +#~ msgid " is to not add the PR to the changelog" +#~ msgstr "表示不将 PR 添加到更新日志中" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 -msgid "" -"Flower federated learning server and client nodes (car, scooter, personal" -" computer, roomba, and phone)" -msgstr "Flower联邦学习服务器和客户端节点(汽车、滑板车、个人电脑、roomba 和电话)" +#~ msgid " is to add a general baselines change to the PR" +#~ msgstr " 是指在 PR 中添加一般基线更改" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:353 -msgid "" -"Congratulations, you just learned the basics of federated learning and " -"how it relates to the classic (centralized) machine learning!" -msgstr "恭喜您,您刚刚了解了联邦学习的基础知识,以及它与传统(集中式)机器学习的关系!" +#~ msgid " is to add a general examples change to the PR" +#~ msgstr " 是在 PR 中添加对一般示例的修改" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:355 -msgid "" -"In the next part of this tutorial, we are going to build a first " -"federated learning system with Flower." -msgstr "在本教程的下一部分,我们将用 Flower 建立第一个联邦学习系统。" +#~ msgid " is to add a general sdk change to the PR" +#~ msgstr " 是指在 PR 中添加一般的 sdk 更改" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:373 -msgid "" -"The `Flower Federated Learning Tutorial - Part 1 " -"`__ shows how to build a simple federated learning system " -"with PyTorch and Flower." -msgstr "" -"`Flower 联邦学习教程 - 第 1 部分 `__ 展示了如何使用 PyTorch 和 Flower " -"构建一个简单的联邦学习系统。" +#~ msgid " is to add a general simulations change to the PR" +#~ msgstr "(模拟)是在 PR 中添加一般模拟变更" -#~ msgid "Before the release" -#~ msgstr "发布前" +#~ msgid "Note that only one token should be used." +#~ msgstr "请注意,只能使用一个标记。" #~ msgid "" -#~ "Update the changelog (``changelog.md``) with" -#~ " all relevant changes that happened " -#~ "after the last release. If the " -#~ "last release was tagged ``v1.2.0``, you" -#~ " can use the following URL to " -#~ "see all commits that got merged " -#~ "into ``main`` since then:" -#~ msgstr "" -#~ "更新更新日志 (``changelog.md``),加入上次发布后发生的所有相关变更。如果上次发布的版本被标记为 " -#~ "``v1.2.0``,则可以使用以下 URL 查看此后合并到 ``main`` 的所有提交:" +#~ "Its content must have a specific " +#~ "format. We will break down what " +#~ "each possibility does:" +#~ msgstr "其内容必须有特定的格式。我们将分析每种可能性的作用:" #~ msgid "" -#~ "`GitHub: Compare v1.2.0...main " -#~ "`_" -#~ msgstr "" -#~ "`GitHub: Compare v1.2.0...main " -#~ "`_" +#~ "If the ``### Changelog entry`` section" +#~ " contains nothing or doesn't exist, " +#~ "the following text will be added " +#~ "to the changelog::" +#~ msgstr "如果 ``#### Changelog entry`` 部分不包含任何内容或不存在,则会在更新日志中添加以下文本::" #~ msgid "" -#~ "Thank the authors who contributed since" -#~ " the last release. This can be " -#~ "done by running the ``./dev/add-" -#~ "shortlog.sh`` convenience script (it can " -#~ "be ran multiple times and will " -#~ "update the names in the list if" -#~ " new contributors were added in the" -#~ " meantime)." -#~ msgstr "" -#~ "感谢自上次发布以来做出贡献的作者。可以通过运行 ``./dev/add-shortlog.sh`` " -#~ "方便脚本来完成(可以多次运行,如果在此期间有新的贡献者加入,则会更新列表中的名字)。" +#~ "If the ``### Changelog entry`` section" +#~ " contains a description (and no " +#~ "token), the following text will be " +#~ "added to the changelog::" +#~ msgstr "如果 ``#### Changelog entry`` 部分包含描述(但没有标记),则会在更新日志中添加以下文本::" #~ msgid "" -#~ "Update the ``changelog.md`` section header " -#~ "``Unreleased`` to contain the version " -#~ "number and date for the release " -#~ "you are building. Create a pull " -#~ "request with the change." -#~ msgstr "" -#~ "更新 ``changelog.md`` 部分的标题 ``Unreleased`` " -#~ "以包含你正在构建的版本的版本号和日期。创建一个包含更改的拉取请求。" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, nothing will change" +#~ " in the changelog." +#~ msgstr "如果 ``#### Changelog entry`` 部分包含 ````,更新日志中将不会有任何更改。" #~ msgid "" -#~ "Second, create a virtual environment " -#~ "(and activate it). If you chose to" -#~ " use :code:`pyenv` (with the :code" -#~ ":`pyenv-virtualenv` plugin) and already " -#~ "have it installed , you can use" -#~ " the following convenience script (by " -#~ "default it will use :code:`Python " -#~ "3.8.17`, but you can change it by" -#~ " providing a specific :code:``)::" -#~ msgstr "" -#~ "其次,创建虚拟环境(并激活它)。如果您选择使用 :code:`pyenv`(使用 :code:`pyenv-" -#~ "virtualenv`插件),并且已经安装了该插件,则可以使用下面的便捷脚本(默认情况下使用 " -#~ ":code:`Python3.8.17`,但您可以通过提供特定的 :code:`<版本>`来更改)::" - -#~ msgid "flwr (Python API reference)" -#~ msgstr "flwr(Python API 参考)" - -#~ msgid "..." -#~ msgstr "..." - -#~ msgid "Starting a client with an insecure server connection:" -#~ msgstr "使用不安全的服务器连接启动客户端:" - -#~ msgid "server.strategy.FedAvg" -#~ msgstr "server.strategy.FedAvg" - -#~ msgid "server.strategy.FedAvgM" -#~ msgstr "server.strategy.FedAvgM" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following text" +#~ " will be added to the changelog::" +#~ msgstr "如果 ``### Changelog entry`` 部分包含 ````,则会在更新日志中添加以下文本::" -#~ msgid "Configurable FedAvg with Momentum strategy implementation." -#~ msgstr "可配置的 FedAvg 动量策略实施。" +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following " +#~ "text will be added to the " +#~ "changelog::" +#~ msgstr "如果``### 更新日志条目``部分包含``<基准线>``,则会在更新日志中添加以下文本::" -#~ msgid "Fraction of clients used during training. Defaults to 0.1." -#~ msgstr "训练期间使用客户的比例。默认为 0.1。" +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following " +#~ "text will be added to the " +#~ "changelog::" +#~ msgstr "如果``### 更新日志条目``部分包含``<示例>``,则会在更新日志中添加以下文本::" -#~ msgid "Fraction of clients used during validation. Defaults to 0.1." -#~ msgstr "验证过程中使用的客户端比例。默认为 0.1。" +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following text " +#~ "will be added to the changelog::" +#~ msgstr "如果``### 更新日志条目``部分包含````,则会在更新日志中添加以下文本::" -#~ msgid "server.strategy.FedMedian" -#~ msgstr "server.strategy.FedMedian" +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following " +#~ "text will be added to the " +#~ "changelog::" +#~ msgstr "如果 ``### Changelog entry`` 部分包含 ````,则会在更新日志中添加以下文本::" -#~ msgid "server.strategy.QFedAvg" -#~ msgstr "server.strategy.QFedAvg" +#~ msgid "" +#~ "Note that only one token must be" +#~ " provided, otherwise, only the first " +#~ "action (in the order listed above), " +#~ "will be performed." +#~ msgstr "请注意,必须只提供一个标记,否则将只执行第一个操作(按上述顺序)。" -#~ msgid "server.strategy.FedOpt" -#~ msgstr "server.strategy.FedOpt" +#~ msgid "Example: MXNet - Run MXNet Federated" +#~ msgstr "示例: MXNet - 运行联邦式 MXNet" -#~ msgid "Configurable FedAdagrad strategy implementation." -#~ msgstr "可配置的 FedAdagrad 策略实施。" +#~ msgid "" +#~ "This tutorial will show you how to" +#~ " use Flower to build a federated " +#~ "version of an existing MXNet workload." +#~ " We are using MXNet to train a" +#~ " Sequential model on the MNIST " +#~ "dataset. We will structure the example" +#~ " similar to our `PyTorch - From " +#~ "Centralized To Federated " +#~ "`_ walkthrough. " +#~ "MXNet and PyTorch are very similar " +#~ "and a very good comparison between " +#~ "MXNet and PyTorch is given `here " +#~ "`_. First, " +#~ "we build a centralized training approach" +#~ " based on the `Handwritten Digit " +#~ "Recognition " +#~ "`_" +#~ " tutorial. Then, we build upon the" +#~ " centralized training code to run the" +#~ " training in a federated fashion." +#~ msgstr "" +#~ "本教程将向您展示如何使用 Flower 构建现有 MXNet 的联学习版本。我们将使用" +#~ " MXNet 在 MNIST 数据集上训练一个序列模型。另外,我们将采用与我们的 " +#~ "`PyTorch - 从集中式到联邦式 " +#~ "`_ 教程类似的示例结构。MXNet" +#~ " 和 PyTorch 非常相似,参考 `此处 " +#~ "`_对 MXNet " +#~ "和 PyTorch 进行了详细的比较。首先,我们根据 `手写数字识别 " +#~ "`" +#~ " 教程 建立了集中式训练方法。然后,我们在集中式训练代码的基础上,以联邦方式运行训练。" -#~ msgid "Federated Optim strategy interface." -#~ msgstr "Federated Optim 策略界面。" +#~ msgid "" +#~ "Before we start setting up our " +#~ "MXNet example, we install the " +#~ ":code:`mxnet` and :code:`flwr` packages:" +#~ msgstr "在开始设置 MXNet 示例之前,我们先安装 :code:`mxnet` 和 :code:`flwr` 软件包:" -#~ msgid "server.strategy.FedProx" -#~ msgstr "server.strategy.FedProx" +#~ msgid "MNIST Training with MXNet" +#~ msgstr "使用 MXNet 进行 MNIST 训练" -#~ msgid "Configurable FedProx strategy implementation." -#~ msgstr "可配置的 FedProx 策略实施。" +#~ msgid "" +#~ "We begin with a brief description " +#~ "of the centralized training code based" +#~ " on a :code:`Sequential` model. If " +#~ "you want a more in-depth " +#~ "explanation of what's going on then " +#~ "have a look at the official `MXNet" +#~ " tutorial " +#~ "`_." +#~ msgstr "" +#~ "首先,我们将简要介绍基于 :code:`Sequential` " +#~ "模型的集中式训练代码。如果您想获得更深入的解释,请参阅官方的 `MXNet教程 " +#~ "`_。" -#~ msgid "server.strategy.FedAdagrad" -#~ msgstr "server.strategy.FedAdagrad" +#~ msgid "" +#~ "Let's create a new file " +#~ "called:code:`mxnet_mnist.py` with all the " +#~ "components required for a traditional " +#~ "(centralized) MNIST training. First, the " +#~ "MXNet package :code:`mxnet` needs to be" +#~ " imported. You can see that we " +#~ "do not yet import the :code:`flwr` " +#~ "package for federated learning. This " +#~ "will be done later." +#~ msgstr "" +#~ "让我们创建一个名为:code:`mxnet_mnist.py`的新文件,其中包含传统(集中式)MNIST " +#~ "训练所需的所有组件。首先,需要导入 MXNet 包 " +#~ ":code:`mxnet`。您可以看到,我们尚未导入用于联合学习的 :code:`flwr` 包,这将在稍后完成。" -#~ msgid "Paper: https://arxiv.org/abs/2003.00295" -#~ msgstr "论文: https://arxiv.org/abs/2003.00295" +#~ msgid "" +#~ "The :code:`load_data()` function loads the " +#~ "MNIST training and test sets." +#~ msgstr ":code:`load_data()` 函数加载 MNIST 训练集和测试集。" -#~ msgid "Federated learning strategy using Adagrad on server-side." -#~ msgstr "在服务器端使用 Adagrad 的联邦学习策略。" +#~ msgid "" +#~ "As already mentioned, we will use " +#~ "the MNIST dataset for this machine " +#~ "learning workload. The model architecture " +#~ "(a very simple :code:`Sequential` model) " +#~ "is defined in :code:`model()`." +#~ msgstr "" +#~ "如前所述,我们将使用 MNIST 数据集进行机器学习。模型架构(一个非常简单的 " +#~ ":code:`Sequential` 模型)在 :code:`model()` 中定义。" -#~ msgid "server.strategy.FedAdam" -#~ msgstr "server.strategy.FedAdam" +#~ msgid "" +#~ "We now need to define the training" +#~ " (function :code:`train()`) which loops " +#~ "over the training set and measures " +#~ "the loss for each batch of " +#~ "training examples." +#~ msgstr "现在,我们需要定义训练函数( :code:`train()`),该函数在训练集上循环训练,并计算每批训练示例的损失值。" -#~ msgid "server.strategy.FedYogi" -#~ msgstr "server.strategy.FedYogi" +#~ msgid "" +#~ "The evaluation of the model is " +#~ "defined in function :code:`test()`. The " +#~ "function loops over all test samples " +#~ "and measures the loss and accuracy " +#~ "of the model based on the test " +#~ "dataset." +#~ msgstr "模型的评估在函数 :code:`test()` 中定义。该函数循环遍历所有测试样本,并根据测试数据集计算模型的损失值和准确度。" -#~ msgid "Adaptive Federated Optimization using Yogi." -#~ msgstr "使用 Yogi 的自适应联合优化。" +#~ msgid "" +#~ "Having defined the data loading, model" +#~ " architecture, training, and evaluation we" +#~ " can put everything together and " +#~ "train our model on MNIST. Note " +#~ "that the GPU/CPU device for the " +#~ "training and testing is defined within" +#~ " the :code:`ctx` (context)." +#~ msgstr "" +#~ "在定义了数据加载、模型架构、训练和评估之后,我们就可以把所有放在一起,在 MNIST " +#~ "上训练我们的模型了。请注意,用于训练和测试的 GPU/CPU 设备是在 :code:`ctx`中定义的。" -#~ msgid "Federated learning strategy using Yogi on server-side." -#~ msgstr "在服务器端使用 Yogi 的联邦学习策略。" +#~ msgid "You can now run your (centralized) MXNet machine learning workload:" +#~ msgstr "现在,您可以运行(集中式)MXNet 机器学习工作:" -#~ msgid "Paper: https://arxiv.org/abs/1803.01498" -#~ msgstr "论文:https://arxiv.org/abs/1803.01498" +#~ msgid "" +#~ "So far this should all look fairly" +#~ " familiar if you've used MXNet (or" +#~ " even PyTorch) before. Let's take the" +#~ " next step and use what we've " +#~ "built to create a simple federated " +#~ "learning system consisting of one server" +#~ " and two clients." +#~ msgstr "" +#~ "到目前为止,如果你以前使用过 MXNet(甚至 " +#~ "PyTorch),这一切看起来应该相当熟悉。下一步,让我们利用已构建的内容创建一个简单联邦学习系统(由一个服务器和两个客户端组成)。" -#~ msgid "server.strategy.Krum" -#~ msgstr "server.strategy.Krum" +#~ msgid "MXNet meets Flower" +#~ msgstr "MXNet 结合 Flower" -#~ msgid "Configurable Krum strategy implementation." -#~ msgstr "可配置的 Krum 策略实施。" +#~ msgid "" +#~ "So far, it was not easily possible" +#~ " to use MXNet workloads for federated" +#~ " learning because federated learning is " +#~ "not supported in MXNet. Since Flower " +#~ "is fully agnostic towards the underlying" +#~ " machine learning framework, it can " +#~ "be used to federated arbitrary machine" +#~ " learning workloads. This section will " +#~ "show you how Flower can be used" +#~ " to federate our centralized MXNet " +#~ "workload." +#~ msgstr "" +#~ "由于 MXNet 目前不支持联邦学习,因此无法轻松地直接将 MXNet " +#~ "用于联邦学习之中。Flower 与底层机器学习框架完全无关,因此它可用于任意联邦式机器学习工作。本节将向你展示如何使用 " +#~ "Flower 将我们的集中式 MXNet 改为联邦式训练。" -#~ msgid "server.strategy.Bulyan" -#~ msgstr "server.strategy.Bulyan" +#~ msgid "" +#~ "The concept to federate an existing " +#~ "workload is always the same and " +#~ "easy to understand. We have to " +#~ "start a *server* and then use the" +#~ " code in :code:`mxnet_mnist.py` for the " +#~ "*clients* that are connected to the " +#~ "*server*. The *server* sends model " +#~ "parameters to the clients. The *clients*" +#~ " run the training and update the " +#~ "parameters. The updated parameters are " +#~ "sent back to the *server* which " +#~ "averages all received parameter updates. " +#~ "This describes one round of the " +#~ "federated learning process and we repeat" +#~ " this for multiple rounds." +#~ msgstr "" +#~ "将现有模型框架联邦化的概念始终是相同的,也很容易理解。我们必须启动一个*服务器*,然后对连接到*服务器*的*客户端*使用 " +#~ ":code:`mxnet_mnist.py`中的代码。*服务器*向客户端发送模型参数,然后*客户端*运行训练并更新参数。更新后的参数被发回*服务器*,然后会对所有收到的参数更新进行平均聚合。以上描述的是一轮联邦学习过程,我们将重复进行多轮学习。" -#~ msgid "Bulyan strategy implementation." -#~ msgstr "Bulyan策略的实施。" +#~ msgid "" +#~ "Finally, we will define our *client* " +#~ "logic in :code:`client.py` and build " +#~ "upon the previously defined MXNet " +#~ "training in :code:`mxnet_mnist.py`. Our " +#~ "*client* needs to import :code:`flwr`, " +#~ "but also :code:`mxnet` to update the " +#~ "parameters on our MXNet model:" +#~ msgstr "" +#~ "最后,我们将在 :code:`client.py` 中定义我们的 *client* " +#~ "逻辑,并以之前在 :code:`mxnet_mnist.py` 中定义的 MXNet " +#~ "训练为基础。我们的 *client* 不仅需要导入 :code:`flwr`,还需要导入 " +#~ ":code:`mxnet`,以更新 MXNet 模型的参数:" -#~ msgid "server.strategy.FedXgbNnAvg" -#~ msgstr "server.strategy.FedXgbNnAvg" +#~ msgid "" +#~ "Implementing a Flower *client* basically " +#~ "means implementing a subclass of either" +#~ " :code:`flwr.client.Client` or " +#~ ":code:`flwr.client.NumPyClient`. Our implementation " +#~ "will be based on " +#~ ":code:`flwr.client.NumPyClient` and we'll call " +#~ "it :code:`MNISTClient`. :code:`NumPyClient` is " +#~ "slightly easier to implement than " +#~ ":code:`Client` if you use a framework" +#~ " with good NumPy interoperability (like " +#~ "PyTorch or MXNet) because it avoids " +#~ "some of the boilerplate that would " +#~ "otherwise be necessary. :code:`MNISTClient` " +#~ "needs to implement four methods, two " +#~ "methods for getting/setting model parameters," +#~ " one method for training the model," +#~ " and one method for testing the " +#~ "model:" +#~ msgstr "" +#~ "实现 Flower *client*基本上意味着要实现 " +#~ ":code:`flwr.client.Client` 或 " +#~ ":code:`flwr.client.NumPyClient` 的子类。我们的代码实现将基于 " +#~ ":code:`flwr.client.NumPyClient`,并将其命名为 " +#~ ":code:`MNISTClient`。如果使用具有良好 NumPy 互操作性的框架(如 PyTorch" +#~ " 或 MXNet),:code:`NumPyClient` 比 " +#~ ":code:`Client`更容易实现,因为它避免了一些不必要的操作。:code:`MNISTClient` " +#~ "需要实现四个方法,两个用于获取/设置模型参数,一个用于训练模型,一个用于测试模型:" -#~ msgid "Federated XGBoost [Ma et al., 2023] strategy." -#~ msgstr "Federated XGBoost [Ma 等人,2023] 策略。" +#~ msgid "transform MXNet :code:`NDArray`'s to NumPy :code:`ndarray`'s" +#~ msgstr "将 MXNet :code:`NDArray` 转换为 NumPy :code:`ndarray`" -#~ msgid "server.strategy.DPFedAvgAdaptive" -#~ msgstr "server.strategy.DPFedAvgAdaptive" +#~ msgid "" +#~ "The challenging part is to transform " +#~ "the MXNet parameters from :code:`NDArray` " +#~ "to :code:`NumPy Arrays` to make it " +#~ "readable for Flower." +#~ msgstr "" +#~ "具有挑战性的部分是将 MXNet 参数从 :code:`NDArray` 转换为 " +#~ ":code:`NumPy Arrays` 以便 Flower 可以读取。" #~ msgid "" -#~ "**Fix the incorrect return types of " -#~ "Strategy** " -#~ "([#2432](https://github.com/adap/flower/pull/2432/files))" +#~ "The two :code:`NumPyClient` methods " +#~ ":code:`fit` and :code:`evaluate` make use " +#~ "of the functions :code:`train()` and " +#~ ":code:`test()` previously defined in " +#~ ":code:`mxnet_mnist.py`. So what we really " +#~ "do here is we tell Flower through" +#~ " our :code:`NumPyClient` subclass which of" +#~ " our already defined functions to " +#~ "call for training and evaluation. We " +#~ "included type annotations to give you" +#~ " a better understanding of the data" +#~ " types that get passed around." #~ msgstr "" -#~ "**修复策略的错误返回类型** " -#~ "([#2432](https://github.com/adap/flower/pull/2432/files))" +#~ "这两个 :code:`NumPyClient` 方法 :code:`fit` 和 " +#~ ":code:`evaluate` 使用了之前在 :code:`mxnet_mnist.py` " +#~ "中定义的函数 :code:`train()` 和 :code:`test()`。因此,我们要做的就是通过" +#~ " :code:`NumPyClient` 子类告知 Flower " +#~ "在训练和评估时要调用哪些已定义的函数。我们加入了类型注解,以便让您更好地理解传递的数据类型。" #~ msgid "" -#~ "The types of the return values in" -#~ " the docstrings in two methods " -#~ "(`aggregate_fit` and `aggregate_evaluate`) now " -#~ "match the hint types in the code." +#~ "Having defined data loading, model " +#~ "architecture, training, and evaluation we " +#~ "can put everything together and train" +#~ " our :code:`Sequential` model on MNIST." #~ msgstr "" -#~ "两个方法(\"aggregate_fit \"和 " -#~ "\"aggregate_evaluate\")的文档说明中的返回值类型现在与代码中的提示类型一致。" +#~ "在定义了数据加载、模型架构、训练和评估之后,我们就可以将所有内容整合在一起,在 MNIST 上训练我们的 " +#~ ":code:`Sequential` 模型。" #~ msgid "" -#~ "**Update Flower Examples** " -#~ "([#2384](https://github.com/adap/flower/pull/2384),[#2425](https://github.com/adap/flower/pull/2425)," -#~ " [#2526](https://github.com/adap/flower/pull/2526))" +#~ "in each window (make sure that the" +#~ " server is still running before you" +#~ " do so) and see your MXNet " +#~ "project run federated learning across " +#~ "two clients. Congratulations!" +#~ msgstr "确保服务器仍在运行后,然后就能在每个窗口中看到 MXNet 项目在两个客户端上运行联邦学习了。祝贺!" + +#~ msgid "" +#~ "The full source code for this " +#~ "example: `MXNet: From Centralized To " +#~ "Federated (Code) " +#~ "`_. Our " +#~ "example is of course somewhat over-" +#~ "simplified because both clients load the" +#~ " exact same dataset, which isn't " +#~ "realistic. You're now prepared to " +#~ "explore this topic further. How about" +#~ " using a CNN or using a " +#~ "different dataset? How about adding more" +#~ " clients?" #~ msgstr "" -#~ "** 更新 Flower Examples** " -#~ "([#2384](https://github.com/adap/flower/pull/2384),[#2425](https://github.com/adap/flower/pull/2425)," -#~ " [#2526](https://github.com/adap/flower/pull/2526))" +#~ "此示例的完整源代码在:\"MXNet: From Centralized To " +#~ "Federated (Code) " +#~ "`_。当然,我们的示例有些过于简单,因为两个客户端都加载了完全相同的数据集,这并不真实。现在您已经准备好进一步探讨了。使用" +#~ " CNN 或使用不同的数据集会如何?添加更多客户端会如何?" + +#~ msgid "with the following command sequence:" +#~ msgstr "使用以下命令序列:" #~ msgid "" -#~ "That's it for the client. We only" -#~ " have to implement :code:`Client` or " -#~ ":code:`NumPyClient` and call " -#~ ":code:`fl.client.start_client()`. The string " -#~ ":code:`\"0.0.0.0:8080\"` tells the client " -#~ "which server to connect to. In our" -#~ " case we can run the server and" -#~ " the client on the same machine, " -#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" -#~ " we run a truly federated workload" -#~ " with the server and clients running" -#~ " on different machines, all that " -#~ "needs to change is the " -#~ ":code:`server_address` we pass to the " -#~ "client." +#~ "In case you are a researcher you" +#~ " might be just fine using the " +#~ "self-signed certificates generated using " +#~ "the scripts which are part of this" +#~ " guide." +#~ msgstr "如果你是一名研究人员,使用本指南中的脚本生成的自签名证书就可以了。" + +#~ msgid "" +#~ "We are now going to show how " +#~ "to write a sever which uses the" +#~ " previously generated scripts." +#~ msgstr "现在,我们将展示如何编写一个使用先前生成的脚本的服务器。" + +#~ msgid "" +#~ "When providing certificates, the server " +#~ "expects a tuple of three certificates." +#~ " :code:`Path` can be used to easily" +#~ " read the contents of those files " +#~ "into byte strings, which is the " +#~ "data type :code:`start_server` expects." #~ msgstr "" -#~ "对于客户端就需要做这么多。我们仅需要实现 " -#~ ":code:`Client`或者:code:`NumPyClient`然后调用:code:`fl.client.start_client()`。字符串" -#~ " :code:`\"0.0.0.0:8080\"` " -#~ "告诉客户端要连接到哪个服务器。在我们的例子中,我们可以在同一台机器上运行服务器和客户端,因此我们使用:code:`\"0.0.0.0:8080\"`。如果我们运行真正联邦学习的工作负载,服务器和客户端在不同的机器上运行,则需要更改的只是我们传递给客户端的" -#~ " server_address 。" +#~ "在提供证书时,服务器希望得到由三个证书组成的元组。 :code:`Path` " +#~ "可用于轻松地将这些文件的内容读取为字节字符串,这就是 :code:`start_server` 期望的数据类型。" + +#~ msgid "Flower server" +#~ msgstr "Flower 服务器" + +#~ msgid "flower-driver-api" +#~ msgstr "flower-driver-api" + +#~ msgid "flower-fleet-api" +#~ msgstr "flower-fleet-api" #~ msgid "" -#~ "That's it for the client. We only" -#~ " have to implement :code:`Client` or " -#~ ":code:`NumPyClient` and call " -#~ ":code:`fl.client.start_client()`. The string " -#~ ":code:`\"[::]:8080\"` tells the client which" -#~ " server to connect to. In our " -#~ "case we can run the server and " -#~ "the client on the same machine, " -#~ "therefore we use :code:`\"[::]:8080\"`. If " -#~ "we run a truly federated workload " -#~ "with the server and clients running " -#~ "on different machines, all that needs" -#~ " to change is the :code:`server_address`" -#~ " we point the client at." +#~ ":py:obj:`start_driver `\\ " +#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" #~ msgstr "" -#~ "对于客户来说就是这样了。我们只需实现 :code:`Client` 或 " -#~ ":code:`NumPyClient` 并调用:code:`fl.client.start_client()` " -#~ "即可。字符串 :code:`\"[::]:8080\"` " -#~ "告诉客户端要连接到哪个服务器。在我们的例子中,我们可以在同一台机器上运行服务器和客户端,因此我们使用 " -#~ ":code:`\"[::]:8080\"`。如果我们运行真正联邦的工作负载,服务器和客户端运行在不同的机器上,则需要更改的只是我们指向客户端的" -#~ " server_address 。" +#~ ":py:obj:`start_driver `\\ " +#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" + +#~ msgid "Start a Flower Driver API server." +#~ msgstr "启动基于 Ray 的Flower模拟服务器。" #~ msgid "" -#~ "Let's now load the CIFAR-10 training " -#~ "and test set, partition them into " -#~ "ten smaller datasets (each split into" -#~ " training and validation set), and " -#~ "wrap the resulting partitions by " -#~ "creating a PyTorch ``DataLoader`` for " -#~ "each of them:" +#~ ":py:obj:`Driver `\\ " +#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" #~ msgstr "" -#~ "现在,让我们加载 CIFAR-10 训练集和测试集,将它们分割成 10 " -#~ "个较小的数据集(每个数据集又分为训练集和验证集),并通过为每个数据集创建 PyTorch " -#~ "``DataLoader`` 来包装由此产生的分割集:" +#~ "Flower 1.0: ``start_server(..., " +#~ "config=flwr.server.ServerConfig(num_rounds=3, " +#~ "round_timeout=600.0), ...)``" -#~ msgid "|e1dd4b4129b040bea23a894266227080|" -#~ msgstr "|e1dd4b4129b040bea23a894266227080|" +#~ msgid "`Driver` class provides an interface to the Driver API." +#~ msgstr "`Driver` 类为驱动程序 API 提供了一个接口。" -#~ msgid "|c0d4cc6a442948dca8da40d2440068d9|" -#~ msgstr "|c0d4cc6a442948dca8da40d2440068d9|" +#~ msgid "" +#~ "The IPv4 or IPv6 address of the" +#~ " Driver API server. Defaults to " +#~ "`\"[::]:9091\"`." +#~ msgstr "服务器的 IPv4 或 IPv6 地址。默认为 `\"[::]:8080\"。" -#~ msgid "|174e1e4fa1f149a19bfbc8bc1126f46a|" -#~ msgstr "|174e1e4fa1f149a19bfbc8bc1126f46a|" +#~ msgid ":py:obj:`close `\\ \\(\\)" +#~ msgstr "server.strategy.Strategy" -#~ msgid "|4e021a3dc08249d2a89daa3ab03c2714|" -#~ msgstr "|4e021a3dc08249d2a89daa3ab03c2714|" +#~ msgid "Disconnect from the SuperLink if connected." +#~ msgstr "如果已连接,请断开与超级链接的连接。" -#~ msgid "|e74a1d5ce7eb49688651f2167a59065b|" -#~ msgstr "|e74a1d5ce7eb49688651f2167a59065b|" +#~ msgid "start\\_driver" +#~ msgstr "启动客户端" -#~ msgid "|eb29ec4c7aef4e93976795ed72df647e|" -#~ msgstr "|eb29ec4c7aef4e93976795ed72df647e|" +#~ msgid "" +#~ "The IPv4 or IPv6 address of the" +#~ " Driver API server. Defaults to " +#~ "`\"[::]:8080\"`." +#~ msgstr "服务器的 IPv4 或 IPv6 地址。默认为 `\"[::]:8080\"。" -#~ msgid "|c2f699d8ac484f5081721a6f1511f70d|" -#~ msgstr "|c2f699d8ac484f5081721a6f1511f70d|" +#~ msgid "" +#~ "A server implementation, either " +#~ "`flwr.server.Server` or a subclass thereof." +#~ " If no instance is provided, then " +#~ "`start_driver` will create one." +#~ msgstr "服务器实现,可以是 `flwr.server.Server` 或其子类。如果没有提供实例,`start_server` 将创建一个。" -#~ msgid "|cf42accdacbf4e5eb4fa0503108ba7a7|" -#~ msgstr "|cf42accdacbf4e5eb4fa0503108ba7a7|" +#~ msgid "" +#~ "An implementation of the class " +#~ "`flwr.server.ClientManager`. If no implementation" +#~ " is provided, then `start_driver` will " +#~ "use `flwr.server.SimpleClientManager`." +#~ msgstr "" +#~ "抽象基类 `flwr.server.ClientManager` " +#~ "的实现。如果没有提供实现,`start_server` 将使用 " +#~ "`flwr.server.client_manager.SimpleClientManager`。" -#~ msgid "|5ec8356bc2564fa09178b1ceed5beccc|" -#~ msgstr "|5ec8356bc2564fa09178b1ceed5beccc|" +#~ msgid "The Driver object to use." +#~ msgstr "要使用的驱动程序对象。" -#~ msgid "|7c9329e97bd0430bad335ab605a897a7|" -#~ msgstr "|7c9329e97bd0430bad335ab605a897a7|" +#~ msgid "Starting a driver that connects to an insecure server:" +#~ msgstr "启动不安全的服务器:" -#~ msgid "|88002bbce1094ba1a83c9151df18f707|" -#~ msgstr "|88002bbce1094ba1a83c9151df18f707|" +#~ msgid "Starting a driver that connects to an SSL-enabled server:" +#~ msgstr "启动支持 SSL 的服务器:" -#~ msgid "|391766aee87c482c834c93f7c22225e2|" -#~ msgstr "|391766aee87c482c834c93f7c22225e2|" +#~ msgid "" +#~ ":py:obj:`run_simulation_from_cli " +#~ "`\\ \\(\\)" +#~ msgstr "" -#~ msgid "|93b9a15bd27f4e91b40f642c253dfaac|" -#~ msgstr "|93b9a15bd27f4e91b40f642c253dfaac|" +#~ msgid "Run Simulation Engine from the CLI." +#~ msgstr "" -#~ msgid "|a23d9638f96342ef9d25209951e2d564|" -#~ msgstr "|a23d9638f96342ef9d25209951e2d564|" +#~ msgid "run\\_simulation\\_from\\_cli" +#~ msgstr "运行模拟" -#~ msgid "Upload the whl (e.g., ``flwr-1.6.0-py3-none-any.whl``)" -#~ msgstr "上传 whl(例如 ``flwr-1.6.0-py3-none-any.whl``)" +#~ msgid "" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with MXNet to train a Sequential " +#~ "model on MNIST." +#~ msgstr "查看此联邦学习 快速入门教程,了解如何使用 Flower 和 MXNet 在 MNIST 上训练序列模型。" + +#~ msgid "Quickstart MXNet" +#~ msgstr "快速入门 MXNet" #~ msgid "" -#~ "Change ``!pip install -q 'flwr[simulation]'" -#~ " torch torchvision matplotlib`` to ``!pip" -#~ " install -q 'flwr-1.6.0-py3-none-" -#~ "any.whl[simulation]' torch torchvision matplotlib``" +#~ "MXNet is no longer maintained and " +#~ "has been moved into `Attic " +#~ "`_. As a " +#~ "result, we would encourage you to " +#~ "use other ML frameworks alongside " +#~ "Flower, for example, PyTorch. This " +#~ "tutorial might be removed in future " +#~ "versions of Flower." #~ msgstr "" -#~ "将``!pip install -q 'flwr[simulation]' torch" -#~ " torchvision matplotlib``更改为``!pip install -q " -#~ "'flwr-1.6.0-py3-none-any.whl[simulation]' torch " -#~ "torch torchvision matplotlib``" #~ msgid "" -#~ "All that's left to do it to " -#~ "define a function that loads both " -#~ "model and data, creates a " -#~ ":code:`CifarClient`, and starts this client." -#~ " You load your data and model " -#~ "by using :code:`cifar.py`. Start " -#~ ":code:`CifarClient` with the function " -#~ ":code:`fl.client.start_numpy_client()` by pointing " -#~ "it at the same IP address we " -#~ "used in :code:`server.py`:" +#~ "In this tutorial, we will learn " +#~ "how to train a :code:`Sequential` model" +#~ " on MNIST using Flower and MXNet." +#~ msgstr "在本教程中,我们将学习如何使用 Flower 和 MXNet 在 MNIST 上训练 :code:`Sequential` 模型。" + +#~ msgid "Since we want to use MXNet, let's go ahead and install it:" +#~ msgstr "既然我们要使用 MXNet,那就继续安装吧:" + +#~ msgid "" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. Our training " +#~ "procedure and network architecture are " +#~ "based on MXNet´s `Hand-written Digit " +#~ "Recognition tutorial " +#~ "`_." #~ msgstr "" -#~ "剩下要做的就是定义一个加载模型和数据的函数,创建一个 :code:`CifarClient` 并启动该客户端。使用" -#~ " :code:`cifar.py` 加载数据和模型。使用函数 " -#~ ":code:`fl.client.start_numpy_client()` 启动 " -#~ ":code:`CifarClient`,将其指向我们在 :code:`server.py` 中使用的相同 " -#~ "IP 地址:" +#~ "现在,我们已经安装了所有依赖项,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。我们的训练程序和网络架构基于 " +#~ "MXNet 的 `手写数字识别教程 " +#~ "`_\"。" #~ msgid "" -#~ "The :code:`VirtualClientEngine` schedules, launches" -#~ " and manages `virtual` clients. These " -#~ "clients are identical to `non-virtual`" -#~ " clients (i.e. the ones you launch" -#~ " via the command `flwr.client.start_numpy_client" -#~ " `_)" -#~ " in the sense that they can be" -#~ " configure by creating a class " -#~ "inheriting, for example, from " -#~ "`flwr.client.NumPyClient `_ and therefore " -#~ "behave in an identical way. In " -#~ "addition to that, clients managed by " -#~ "the :code:`VirtualClientEngine` are:" +#~ "In a file called :code:`client.py`, " +#~ "import Flower and MXNet related " +#~ "packages:" +#~ msgstr "在名为 :code:`client.py` 的文件中,导入 Flower 和 MXNet 相关软件包:" + +#~ msgid "In addition, define the device allocation in MXNet with:" +#~ msgstr "此外,还可以在 MXNet 中定义设备分配:" + +#~ msgid "" +#~ "We use MXNet to load MNIST, a " +#~ "popular image classification dataset of " +#~ "handwritten digits for machine learning. " +#~ "The MXNet utility :code:`mx.test_utils.get_mnist()`" +#~ " downloads the training and test " +#~ "data." #~ msgstr "" -#~ "代码:`VirtualClientEngine`调度、启动和管理`虚拟`客户端。这些客户端与 \"非虚拟 " -#~ "\"客户端(即通过命令 `flwr.client.start_numpy_client `_启动的客户端)完全相同,它们可以通过创建一个继承自 \"flwr.client.NumPyClient " -#~ "`_\" " -#~ "的类来配置,因此行为方式也完全相同。除此之外,由 :code:`VirtualClientEngine` " -#~ "管理的客户端还包括:" +#~ "我们使用 MXNet 加载 MNIST,这是一个用于机器学习的流行手写数字图像分类数据集。MXNet" +#~ " 工具 :code:`mx.test_utils.get_mnist()` 会下载训练和测试数据。" -#~ msgid "Example: Walk-Through PyTorch & MNIST" -#~ msgstr "实例: PyTorch 和 MNIST 的演练" +#~ msgid "" +#~ "Define the training and loss with " +#~ "MXNet. We train the model by " +#~ "looping over the dataset, measure the" +#~ " corresponding loss, and optimize it." +#~ msgstr "用 MXNet 定义训练和损失值。我们在数据集上循环训练模型,测量相应的损失值,并对其进行优化。" #~ msgid "" -#~ "In this tutorial we will learn, " -#~ "how to train a Convolutional Neural " -#~ "Network on MNIST using Flower and " -#~ "PyTorch." -#~ msgstr "在本教程中,我们将学习如何使用 Flower 和 PyTorch 在 MNIST 上训练卷积神经网络。" +#~ "Next, we define the validation of " +#~ "our machine learning model. We loop " +#~ "over the test set and measure both" +#~ " loss and accuracy on the test " +#~ "set." +#~ msgstr "接下来,我们定义机器学习模型的验证。我们在测试集上循环,测量测试集上的损失值和准确率。" #~ msgid "" -#~ "Since we want to use PyTorch to" -#~ " solve a computer vision task, let's" -#~ " go ahead an install PyTorch and " -#~ "the **torchvision** library:" -#~ msgstr "我们想用 PyTorch 来做计算机视觉任务,需要先安装 PyTorch 和 **torchvision** 库:" +#~ "After defining the training and testing" +#~ " of a MXNet machine learning model," +#~ " we use these functions to implement" +#~ " a Flower client." +#~ msgstr "在定义了 MXNet 机器学习模型的训练和测试后,我们使用这些函数实现了 Flower 客户端。" -#~ msgid "Ready... Set... Train!" -#~ msgstr "准备...设置...训练!" +#~ msgid "Our Flower clients will use a simple :code:`Sequential` model:" +#~ msgstr "我们的 Flower 客户端将使用简单的 :code:`Sequential` 模型:" #~ msgid "" -#~ "Now that we have all our " -#~ "dependencies installed, let's run a " -#~ "simple distributed training with two " -#~ "clients and one server. Our training " -#~ "procedure and network architecture are " -#~ "based on PyTorch's `Basic MNIST Example" -#~ " `_. " -#~ "This will allow you see how easy" -#~ " it is to wrap your code with" -#~ " Flower and begin training in a " -#~ "federated way. We provide you with " -#~ "two helper scripts, namely *run-" -#~ "server.sh*, and *run-clients.sh*. Don't " -#~ "be afraid to look inside, they are" -#~ " simple enough =)." +#~ "After loading the dataset with " +#~ ":code:`load_data()` we perform one forward " +#~ "propagation to initialize the model and" +#~ " model parameters with :code:`model(init)`. " +#~ "Next, we implement a Flower client." #~ msgstr "" -#~ "现在我们已经安装了所有的依赖包,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。我们的训练过程和网络架构基于 " -#~ "PyTorch 的 `Basic MNIST Example " -#~ "`_。您会发现用 " -#~ "Flower 来封装您的代码并进行联邦学习训练是多么容易。我们为您提供了两个辅助脚本,即 *run-" -#~ "server.sh* 和 *run-clients.sh*。别害怕,它们很简单 =)。" +#~ "使用 :code:`load_data()` 加载数据集后,我们会执行一次前向传播,使用 " +#~ ":code:`model(init)` 初始化模型和模型参数。接下来,我们实现一个 Flower " +#~ "客户端。" #~ msgid "" -#~ "Go ahead and launch on a terminal" -#~ " the *run-server.sh* script first as" -#~ " follows:" -#~ msgstr "首先在终端上启动 *run-server.sh* 脚本,如下所示:" +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses MXNet." +#~ " Implementing :code:`NumPyClient` usually means" +#~ " defining the following methods " +#~ "(:code:`set_parameters` is optional though):" +#~ msgstr "" +#~ "Flower 提供了一个名为 :code:`NumPyClient` 的便捷类,当您的工作负载使用" +#~ " MXNet 时,它可以让您更轻松地实现 :code:`Client` 接口。实现 " +#~ ":code:`NumPyClient` 通常意味着定义以下方法(:code:`set_parameters` " +#~ "是可选的):" -#~ msgid "Now that the server is up and running, go ahead and launch the clients." -#~ msgstr "现在服务器已经启动并运行,请继续启动客户端。" +#~ msgid "They can be implemented in the following way:" +#~ msgstr "它们可以通过以下方式实现:" #~ msgid "" -#~ "Et voilà! You should be seeing the" -#~ " training procedure and, after a few" -#~ " iterations, the test accuracy for " -#~ "each client." -#~ msgstr "然后就可以了!您应该能看到训练过程,以及经过几次反复后,每个客户端的测试准确率。" +#~ "We can now create an instance of" +#~ " our class :code:`MNISTClient` and add " +#~ "one line to actually run this " +#~ "client:" +#~ msgstr "现在我们可以创建一个 :code:`MNISTClient` 类的实例,并添加一行来实际运行该客户端:" -#~ msgid "Now, let's see what is really happening inside." -#~ msgstr "现在,让我们看看里面到底发生了什么。" +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()` or " +#~ ":code:`fl.client.start_numpy_client()`. The string " +#~ ":code:`\"0.0.0.0:8080\"` tells the client " +#~ "which server to connect to. In our" +#~ " case we can run the server and" +#~ " the client on the same machine, " +#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" +#~ " we run a truly federated workload" +#~ " with the server and clients running" +#~ " on different machines, all that " +#~ "needs to change is the " +#~ ":code:`server_address` we pass to the " +#~ "client." +#~ msgstr "" +#~ "这就是客户端。我们只需实现 :code:`Client` 或 :code:`NumPyClient`" +#~ " 并调用 :code:`fl.client.start_client()` 或 " +#~ ":code:`fl.client.start_numpy_client()`。字符串 " +#~ ":code:`\"0.0.0.0:8080\"`会告诉客户端要连接的服务器。在本例中,我们可以在同一台机器上运行服务器和客户端,因此我们使用" +#~ " " +#~ ":code:`\"0.0.0.0:8080\"`。如果我们运行的是真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变的只是传递给客户端的" +#~ " :code:`server_address`。" #~ msgid "" -#~ "Inside the server helper script *run-" -#~ "server.sh* you will find the following" -#~ " code that basically runs the " -#~ ":code:`server.py`" -#~ msgstr "在服务器辅助脚本 *run-server.sh* 中,你可以找到以下代码,这些代码基本上都是运行 :code:`server.py` 的代码" +#~ "With both client and server ready, " +#~ "we can now run everything and see" +#~ " federated learning in action. Federated" +#~ " learning systems usually have a " +#~ "server and multiple clients. We " +#~ "therefore have to start the server " +#~ "first:" +#~ msgstr "客户端和服务器都准备就绪后,我们现在就可以运行一切,看看联邦学习的运行情况。联邦学习系统通常有一个服务器和多个客户端。因此,我们必须先启动服务器:" #~ msgid "" -#~ "We can go a bit deeper and " -#~ "see that :code:`server.py` simply launches " -#~ "a server that will coordinate three " -#~ "rounds of training. Flower Servers are" -#~ " very customizable, but for simple " -#~ "workloads, we can start a server " -#~ "using the :ref:`start_server ` function and leave " -#~ "all the configuration possibilities at " -#~ "their default values, as seen below." +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this example can " +#~ "be found in :code:`examples/quickstart-mxnet`." #~ msgstr "" -#~ "我们可以再深入一点,:code:`server.py` 只是启动了一个服务器,该服务器将协调三轮训练。Flower " -#~ "服务器是非常容易修改的,但对于简单的工作,我们可以使用 :ref:`start_server `函数启动服务器,并将所有可能的配置保留为默认值,如下所示。" +#~ "恭喜您!您已经成功构建并运行了第一个联邦学习系统。本示例的`完整源代码 " +#~ "`_ 可在 :code:`examples/quickstart-" +#~ "mxnet` 中找到。" -#~ msgid "" -#~ "Next, let's take a look at the " -#~ "*run-clients.sh* file. You will see " -#~ "that it contains the main loop " -#~ "that starts a set of *clients*." -#~ msgstr "接下来,让我们看看 *run-clients.sh* 文件。您会看到它包含了用来启动多个 *客户端* 的代码。" +#~ msgid ":code:`load_mnist()`" +#~ msgstr ":code:`load_mnist()`" -#~ msgid "" -#~ "**cid**: is the client ID. It is" -#~ " an integer that uniquely identifies " -#~ "client identifier." -#~ msgstr "**cid**:是客户 ID。它是一个整数,可唯一标识客户标识符。" +#~ msgid "Loads the MNIST dataset using OpenML" +#~ msgstr "使用 OpenML 加载 MNIST 数据集" -#~ msgid "**sever_address**: String that identifies IP and port of the server." -#~ msgstr "**sever_address**: 标识服务器 IP 和端口的字符串。" +#~ msgid ":code:`shuffle()`" +#~ msgstr ":code:`shuffle()`" + +#~ msgid "Shuffles data and its label" +#~ msgstr "对数据及其标签进行洗牌" + +#~ msgid ":code:`partition()`" +#~ msgstr ":code:`partition()`" + +#~ msgid "Splits datasets into a number of partitions" +#~ msgstr "将数据集分割成多个分区" #~ msgid "" -#~ "**nb_clients**: This defines the number " -#~ "of clients being created. This piece " -#~ "of information is not required by " -#~ "the client, but it helps us " -#~ "partition the original MNIST dataset to" -#~ " make sure that every client is " -#~ "working on unique subsets of both " -#~ "*training* and *test* sets." +#~ "We load the MNIST dataset from " +#~ "`OpenML " +#~ "`_, a" +#~ " popular image classification dataset of" +#~ " handwritten digits for machine learning." +#~ " The utility :code:`utils.load_mnist()` downloads" +#~ " the training and test data. The " +#~ "training set is split afterwards into" +#~ " 10 partitions with :code:`utils.partition()`." #~ msgstr "" -#~ "**nb_clients**: 这定义了正在创建的客户端数量。客户端并不需要这一信息,但它有助于我们对原始 " -#~ "MNIST 数据集进行划分,以确保每个客户端都在 *training* 和 *test*" -#~ " 数据集上有独立的数据。" +#~ "我们从 `OpenML `_ 中加载 " +#~ "MNIST 数据集,这是一个用于机器学习的流行手写数字图像分类数据集。实用程序 " +#~ ":code:`utils.load_mnist()` 下载训练和测试数据。然后使用 " +#~ ":code:`utils.partition()`将训练集分割成 10 个分区。" -#~ msgid "" -#~ "Again, we can go deeper and look" -#~ " inside :code:`flwr_example/quickstart-" -#~ "pytorch/client.py`. After going through the" -#~ " argument parsing code at the " -#~ "beginning of our :code:`main` function, " -#~ "you will find a call to " -#~ ":code:`mnist.load_data`. This function is " -#~ "responsible for partitioning the original " -#~ "MNIST datasets (*training* and *test*) " -#~ "and returning a :code:`torch.utils.data.DataLoader`" -#~ " s for each of them. We then" -#~ " instantiate a :code:`PytorchMNISTClient` object" -#~ " with our client ID, our DataLoaders," -#~ " the number of epochs in each " -#~ "round, and which device we want to" -#~ " use for training (CPU or GPU)." +#~ msgid "Let's get stated!" +#~ msgstr "让我们开始吧!" + +#~ msgid "|2b5c62c529f6416f840c594cce062fbb|" #~ msgstr "" -#~ "我们可以深入看一下 :code:`flwr_example/quickstart-" -#~ "pytorch/client.py`。查看 :code:`main` 函数开头的参数解析代码后,你会发现一个对" -#~ " :code:`mnist.load_data` 的调用。该函数负责分割原始 MNIST " -#~ "数据集(*training* 和 *test*),并为每个数据集返回一个 " -#~ ":code:`torch.utils.data.DataLoader` 。然后,我们实例化一个 " -#~ ":code:`PytorchMNISTClient` 对象,其中包含我们的客户端 ID、 " -#~ "DataLoader、每一轮中的遍历数,以及我们希望用于训练的设备(CPU 或 GPU)。" -#~ msgid "" -#~ "The :code:`PytorchMNISTClient` object when " -#~ "finally passed to :code:`fl.client.start_client` " -#~ "along with the server's address as " -#~ "the training process begins." +#~ msgid "|90b334680cb7467d9a04d39b8e8dca9f|" #~ msgstr "" -#~ "当训练过程开始时,:code:`PytorchMNISTClient` 对象会连同服务器地址一起传递给 " -#~ ":code:`fl.client.start_client`。" -#~ msgid "A Closer Look" -#~ msgstr "仔细看一下" +#~ msgid "|65764ceee89f4335bfd93fd0b115e831|" +#~ msgstr "" -#~ msgid "" -#~ "Now, let's look closely into the " -#~ ":code:`PytorchMNISTClient` inside :code:`flwr_example" -#~ ".quickstart-pytorch.mnist` and see what it" -#~ " is doing:" +#~ msgid "|d97319ec28bb407ea0ab9705e38f3bcf|" #~ msgstr "" -#~ "现在,让我们仔细研究一下 :code:`flwr_example.quickstart-pytorch.mnist`" -#~ " 中的 :code:`PytorchMNISTClient`,看看它在做什么:" -#~ msgid "" -#~ "The first thing to notice is that" -#~ " :code:`PytorchMNISTClient` instantiates a CNN" -#~ " model inside its constructor" -#~ msgstr "首先要注意的是 :code:`PytorchMNISTClient` 在其构造函数中实例化了一个 CNN 模型" +#~ msgid "|11e95ac83a8548d8b3505b4663187d07|" +#~ msgstr "" -#~ msgid "" -#~ "The code for the CNN is available" -#~ " under :code:`quickstart-pytorch.mnist` and " -#~ "it is reproduced below. It is the" -#~ " same network found in `Basic MNIST" -#~ " Example " -#~ "`_." +#~ msgid "|1dab2f3a23674abc8a6731f20fa10730|" +#~ msgstr "" + +#~ msgid "|7f0ee162da38450788493a21627306f7|" #~ msgstr "" -#~ "CNN 的代码可在 :code:`quickstart-pytorch.mnist` " -#~ "下找到,现复制如下。它与 `Basic MNIST Example " -#~ "`_中的网络相同。" -#~ msgid "" -#~ "The second thing to notice is that" -#~ " :code:`PytorchMNISTClient` class inherits from" -#~ " the :code:`fl.client.Client`, and hence it" -#~ " must implement the following methods:" +#~ msgid "|296a1fb72c514b23b3d8905ff0ff98c6|" #~ msgstr "" -#~ "第二件要注意的事是 :code:`PytorchMNISTClient` 类继承自 " -#~ ":code:`fl.client.Client`,因此它必须实现以下方法:" -#~ msgid "" -#~ "When comparing the abstract class to " -#~ "its derived class :code:`PytorchMNISTClient` " -#~ "you will notice that :code:`fit` calls" -#~ " a :code:`train` function and that " -#~ ":code:`evaluate` calls a :code:`test`: " -#~ "function." +#~ msgid "|5b1408eec0d746cdb91162a9107b6089|" #~ msgstr "" -#~ "将抽象类与其派生类 :code:`PytorchMNISTClient` 进行比较时,您会发现 " -#~ ":code:`fit` 调用了一个 :code:`train` 函数,而 " -#~ ":code:`evaluate` 则调用了一个 :code:`test`: 函数。" -#~ msgid "" -#~ "These functions can both be found " -#~ "inside the same :code:`quickstart-" -#~ "pytorch.mnist` module:" -#~ msgstr "这些函数都可以在同一个 :code:`quickstart-pytorch.mnist` 模块中找到:" +#~ msgid "|aef19f4b122c4e8d9f4c57f99bcd5dd2|" +#~ msgstr "" -#~ msgid "" -#~ "Observe that these functions encapsulate " -#~ "regular training and test loops and " -#~ "provide :code:`fit` and :code:`evaluate` with" -#~ " final statistics for each round. You" -#~ " could substitute them with your " -#~ "custom train and test loops and " -#~ "change the network architecture, and the" -#~ " entire example would still work " -#~ "flawlessly. As a matter of fact, " -#~ "why not try and modify the code" -#~ " to an example of your liking?" +#~ msgid "|2881a86d8fc54ba29d96b29fc2819f4a|" #~ msgstr "" -#~ "请注意,这些函数封装了常规的训练和测试循环,并为 :code:`fit` 和 " -#~ ":code:`evaluate` " -#~ "提供了每轮的最终统计数据。您可以用自定义的训练和测试循环来替代它们,并改变网络结构,整个示例仍然可以完美运行。事实上,为什么不按照自己的喜好修改代码呢?" -#~ msgid "Give It a Try" -#~ msgstr "试试看" +#~ msgid "|ec1fe880237247e0975f52766775ab84|" +#~ msgstr "" -#~ msgid "" -#~ "Looking through the quickstart code " -#~ "description above will have given a " -#~ "good understanding of how *clients* and" -#~ " *servers* work in Flower, how to " -#~ "run a simple experiment, and the " -#~ "internals of a client wrapper. Here " -#~ "are a few things you could try " -#~ "on your own and get more " -#~ "experience with Flower:" +#~ msgid "|9fdf048ed58d4467b2718cdf4aaf1ec3|" #~ msgstr "" -#~ "通过上面的快速入门代码描述,你将对 Flower " -#~ "中*客户端*和*服务器*的工作方式、如何运行一个简单的实验以及客户端封装器的内部结构有一个很好的了解。您可以自己尝试以下内容,以获得更多使用" -#~ " Flower 的经验:" -#~ msgid "" -#~ "Try and change :code:`PytorchMNISTClient` so" -#~ " it can accept different architectures." -#~ msgstr "尝试修改 :code:`PytorchMNISTClient`,使其可以接受不同的架构。" +#~ msgid "|ff726bc5505e432388ee2fdd6ef420b9|" +#~ msgstr "" #~ msgid "" -#~ "Modify the :code:`train` function so " -#~ "that it accepts different optimizers" -#~ msgstr "修改 :code:`train` 函数,使其接受不同的优化器" +#~ "Currently, Flower provides two images, a" +#~ " ``base`` image and a ``superlink`` " +#~ "image. The base image, as the name" +#~ " suggests, contains basic dependencies that" +#~ " the SuperLink needs. This includes " +#~ "system dependencies, Python and Python " +#~ "tools. The SuperLink image is based " +#~ "on the base image, but it " +#~ "additionally installs the SuperLink using " +#~ "``pip``." +#~ msgstr "" +#~ "目前,Flower " +#~ "提供两个镜像,一个基础镜像和一个服务器镜像。不久还将推出客户端镜像。基础镜像,顾名思义,包含服务器和客户端都需要的基本依赖项。其中包括系统依赖项、Python" +#~ " 和 Python 工具。服务器镜像基于基础镜像,但它会使用 ``pip`` 额外安装" +#~ " Flower 服务器。" -#~ msgid "" -#~ "Modify the :code:`test` function so that" -#~ " it proves not only the top-1 " -#~ "(regular accuracy) but also the top-5" -#~ " accuracy?" -#~ msgstr "修改 :code:`test` 函数,使其不仅能输出前 1 名(常规精确度),还能证明前 5 名的精确度?" +#~ msgid "``3.11``" +#~ msgstr "``1.0.0rc1``" -#~ msgid "" -#~ "Go larger! Try to adapt the code" -#~ " to larger images and datasets. Why" -#~ " not try training on ImageNet with" -#~ " a ResNet-50?" -#~ msgstr "让我们尝试让代码适应更大的图像和数据集。为什么不尝试使用 ResNet-50 在 ImageNet 上进行训练呢?" +#~ msgid "Defaults to ``22.04``." +#~ msgstr "默认为 ``22.04``。" -#~ msgid "You are ready now. Enjoy learning in a federated way!" -#~ msgstr "您现在已经准备就绪。尽情享受联邦学习的乐趣吧!" +#~ msgid "Building the SuperLink image" +#~ msgstr "启动服务器" -#~ msgid "Differential privacy" -#~ msgstr "差别隐私" +#~ msgid "Defaults to ``flwr/base``." +#~ msgstr "默认为 ``flwr/server``。" -#~ msgid "" -#~ "Flower provides differential privacy (DP) " -#~ "wrapper classes for the easy integration" -#~ " of the central DP guarantees " -#~ "provided by DP-FedAvg into training " -#~ "pipelines defined in any of the " -#~ "various ML frameworks that Flower is " -#~ "compatible with." -#~ msgstr "" -#~ "Flower 提供了差分隐私 (DP) 封装类,可将 DP-FedAvg " -#~ "提供的核心 DP 轻松集成到 Flower 兼容的各种 ML " -#~ "框架中定义的训练模式中。" +#~ msgid "The Python version of the base image." +#~ msgstr "基础镜像的存储库名称。" -#~ msgid "" -#~ "Please note that these components are" -#~ " still experimental; the correct " -#~ "configuration of DP for a specific " -#~ "task is still an unsolved problem." -#~ msgstr "请注意,这些组件仍处于试验阶段,如何为特定任务正确配置 DP 仍是一个尚未解决的问题。" +#~ msgid "Defaults to ``py3.11``." +#~ msgstr "默认为 ``22.04``。" -#~ msgid "" -#~ "The name DP-FedAvg is misleading " -#~ "since it can be applied on top " -#~ "of any FL algorithm that conforms " -#~ "to the general structure prescribed by" -#~ " the FedOpt family of algorithms." -#~ msgstr "DP-FedAvg 这个名称容易引起误解,因为它可以应用于任何符合 FedOpt 系列算法规定的一般结构的 FL 算法之上。" +#~ msgid "Defaults to ``ubuntu22.04``." +#~ msgstr "默认为 ``py3.11-ubuntu22.04``。" -#~ msgid "DP-FedAvg" -#~ msgstr "DP-FedAvg" +#~ msgid "Defaults to ``flwr``." +#~ msgstr "默认为 ``flwr/server``。" #~ msgid "" -#~ "DP-FedAvg, originally proposed by " -#~ "McMahan et al. [mcmahan]_ and extended" -#~ " by Andrew et al. [andrew]_, is " -#~ "essentially FedAvg with the following " -#~ "modifications." -#~ msgstr "DP-FedAvg 最初由McMahan等人提出,并由Andrew等人加以扩展。" +#~ "The name of image is ``flwr_superlink``" +#~ " and the tag ``0.1.0``. Remember that" +#~ " the build arguments as well as " +#~ "the name and tag can be adapted" +#~ " to your needs. These values serve" +#~ " as examples only." +#~ msgstr "图像名称为 ``flwr_server``,标记为 ``0.1.0``。请记住,编译参数以及名称和标记都可以根据需要进行调整。这些值仅供参考。" + +#~ msgid "Creating New Messages" +#~ msgstr "创建新信息" #~ msgid "" -#~ "**Clipping** : The influence of each " -#~ "client's update is bounded by clipping" -#~ " it. This is achieved by enforcing" -#~ " a cap on the L2 norm of " -#~ "the update, scaling it down if " -#~ "needed." -#~ msgstr "**裁剪** : 裁剪会影响到每个客户端的模型参数。具体做法是对参数的 L2 准则设置上限,必要时将其缩减。" +#~ "This is a simple guide for " +#~ "creating a new type of message " +#~ "between the server and clients in " +#~ "Flower." +#~ msgstr "这是一个如何用Flower在服务器和客户端之间创建新类型的信息的简要指导。" #~ msgid "" -#~ "**Noising** : Gaussian noise, calibrated " -#~ "to the clipping threshold, is added " -#~ "to the average computed at the " -#~ "server." -#~ msgstr "**噪声** : 在服务器计算出的平均值中加入高斯噪声,该噪声根据剪切阈值进行校准。" +#~ "Let's suppose we have the following " +#~ "example functions in :code:`server.py` and " +#~ ":code:`numpy_client.py`..." +#~ msgstr "假设我们在脚本code:`server.py`和code:`numpy_client.py`中有以下的示例函数..." + +#~ msgid "Server's side:" +#~ msgstr "在服务器端:" + +#~ msgid "Client's side:" +#~ msgstr "在客户端:" #~ msgid "" -#~ "The distribution of the update norm " -#~ "has been shown to vary from " -#~ "task-to-task and to evolve as " -#~ "training progresses. This variability is " -#~ "crucial in understanding its impact on" -#~ " differential privacy guarantees, emphasizing " -#~ "the need for an adaptive approach " -#~ "[andrew]_ that continuously adjusts the " -#~ "clipping threshold to track a " -#~ "prespecified quantile of the update norm" -#~ " distribution." -#~ msgstr "事实证明,参数更新准则的分布会随着任务的不同而变化,并随着训练的进展而演变。因此,我们采用了一种自适应方法,该方法会不断调整剪切阈值,以跟踪参数更新准则分布的预设量化值。" +#~ "Let's now see what we need to " +#~ "implement in order to get this " +#~ "simple function between the server and" +#~ " client to work!" +#~ msgstr "现在让我们来看看,为了让服务器和客户端之间的这个简单的函数正常工作,我们需要实现哪些功能!" -#~ msgid "Simplifying Assumptions" -#~ msgstr "简化假设" +#~ msgid "Message Types for Protocol Buffers" +#~ msgstr "协议缓冲区的信息类型" #~ msgid "" -#~ "We make (and attempt to enforce) a" -#~ " number of assumptions that must be" -#~ " satisfied to ensure that the " -#~ "training process actually realizes the " -#~ ":math:`(\\epsilon, \\delta)` guarantees the " -#~ "user has in mind when configuring " -#~ "the setup." +#~ "The first thing we need to do " +#~ "is to define a message type for" +#~ " the RPC system in :code:`transport.proto`." +#~ " Note that we have to do it " +#~ "for both the request and response " +#~ "messages. For more details on the " +#~ "syntax of proto3, please see the " +#~ "`official documentation `_." #~ msgstr "" -#~ "我们提出(并试图执行)了一系列必须满足的假设,以确保训练过程真正实现用户在配置设置时所定的 " -#~ ":math:`(\\epsilon,\\delta)` 。" +#~ "我们需要做的第一件事是在脚本code:`transport.proto`中定义 RPC " +#~ "系统的消息类型。请注意,我们必须对请求信息和响应信息都这样做。有关 proto3 语法的更多详情,请参阅官方文档" +#~ " `_。" -#~ msgid "" -#~ "**Fixed-size subsampling** :Fixed-size " -#~ "subsamples of the clients must be " -#~ "taken at each round, as opposed to" -#~ " variable-sized Poisson subsamples." -#~ msgstr "** 固定大小的子样本** :与可变大小的泊松分布子样本相比,每轮必须抽取固定大小的客户端子样本。" +#~ msgid "Within the :code:`ServerMessage` block:" +#~ msgstr "在 :code:`ServerMessage` 代码块中:" -#~ msgid "" -#~ "**Unweighted averaging** : The contributions" -#~ " from all the clients must weighted" -#~ " equally in the aggregate to " -#~ "eliminate the requirement for the server" -#~ " to know in advance the sum of" -#~ " the weights of all clients available" -#~ " for selection." -#~ msgstr "**非加权平均**: 所有客户端的贡献必须加权相等,这样服务器就不需要事先知道所有客户的权重总和。" +#~ msgid "Within the ClientMessage block:" +#~ msgstr "在 ClientMessage 代码块中:" #~ msgid "" -#~ "**No client failures** : The set " -#~ "of available clients must stay constant" -#~ " across all rounds of training. In" -#~ " other words, clients cannot drop out" -#~ " or fail." -#~ msgstr "**没有失败的客户端** : 在各轮训练中,可用客户端的数量必须保持不变。换句话说,客户端不能退出或失败。" +#~ "Make sure to also add a field " +#~ "of the newly created message type " +#~ "in :code:`oneof msg`." +#~ msgstr "确保在 :code:`oneof msg` 中也添加一个新创建的消息类型字段。" -#~ msgid "" -#~ "The first two are useful for " -#~ "eliminating a multitude of complications " -#~ "associated with calibrating the noise to" -#~ " the clipping threshold, while the " -#~ "third one is required to comply " -#~ "with the assumptions of the privacy " -#~ "analysis." -#~ msgstr "前两种方法有助于消除将噪声校准为削波阈值所带来的诸多复杂问题,而第三种方法则需要符合隐私分析的假设。" +#~ msgid "Once that is done, we will compile the file with:" +#~ msgstr "完成后,我们将使用:" + +#~ msgid "If it compiles successfully, you should see the following message:" +#~ msgstr "如果编译成功,你应该会看到以下信息:" + +#~ msgid "Serialization and Deserialization Functions" +#~ msgstr "序列化和反序列化函数" #~ msgid "" -#~ "These restrictions are in line with " -#~ "constraints imposed by Andrew et al. " -#~ "[andrew]_." -#~ msgstr "这些限制与 Andrew 等人所施加的限制一致。" +#~ "Our next step is to add functions" +#~ " to serialize and deserialize Python " +#~ "datatypes to or from our defined " +#~ "RPC message types. You should add " +#~ "these functions in :code:`serde.py`." +#~ msgstr "" +#~ "下一步是添加函数,以便将 Python 数据类型序列化和反序列化为我们定义的 RPC " +#~ "消息类型或从我们定义的 RPC 消息类型反序列化和反序列化 Python 数据类型。您应该在" +#~ " :code:`serde.py` 中添加这些函数。" -#~ msgid "Customizable Responsibility for Noise injection" -#~ msgstr "可定制的噪声注入" +#~ msgid "The four functions:" +#~ msgstr "四种函数:" + +#~ msgid "Sending the Message from the Server" +#~ msgstr "从服务器发送信息" #~ msgid "" -#~ "In contrast to other implementations " -#~ "where the addition of noise is " -#~ "performed at the server, you can " -#~ "configure the site of noise injection" -#~ " to better match your threat model." -#~ " We provide users with the " -#~ "flexibility to set up the training " -#~ "such that each client independently adds" -#~ " a small amount of noise to the" -#~ " clipped update, with the result that" -#~ " simply aggregating the noisy updates " -#~ "is equivalent to the explicit addition" -#~ " of noise to the non-noisy " -#~ "aggregate at the server." -#~ msgstr "与其他在服务器上添加噪声的实现方法不同,您可以配置噪声注入的位置,以便更好地匹配您的威胁模型。我们为用户提供了设置训练的灵活性,使每个客户端都能独立地为剪切参数更新添加少量噪声,这样,只需聚合噪声更新,就相当于在服务器上为非噪声聚合添加噪声了。" +#~ "Now write the request function in " +#~ "your Client Proxy class (e.g., " +#~ ":code:`grpc_client_proxy.py`) using the serde " +#~ "functions you just created:" +#~ msgstr "现在,在客户端代理类(例如 :code:`grpc_client_proxy.py`)中使用刚才创建的 serde 函数编写请求函数:" + +#~ msgid "Receiving the Message by the Client" +#~ msgstr "由客户端接收信息" #~ msgid "" -#~ "To be precise, if we let :math:`m`" -#~ " be the number of clients sampled " -#~ "each round and :math:`\\sigma_\\Delta` be " -#~ "the scale of the total Gaussian " -#~ "noise that needs to be added to" -#~ " the sum of the model updates, " -#~ "we can use simple maths to show" -#~ " that this is equivalent to each " -#~ "client adding noise with scale " -#~ ":math:`\\sigma_\\Delta/\\sqrt{m}`." +#~ "Last step! Modify the code in " +#~ ":code:`message_handler.py` to check the field" +#~ " of your message and call the " +#~ ":code:`example_response` function. Remember to " +#~ "use the serde functions!" #~ msgstr "" -#~ "准确地说,我们假设每轮采样的客户端数量为:math:`m`,:math:`\\sigma_\\Delta` " -#~ "为需要添加到模型更新总和中的总高斯噪声的规模,我们就可以用简单的数学方法证明了,这相当于每个客户端都添加了规模为 " -#~ ":math:`\\sigma_\\Delta/\\sqrt{m}` 的噪声。" +#~ "最后一步 修改 :code:`message_handler.py` 中的代码,检查信息的字段并调用" +#~ " :code:`example_response` 函数。记住使用 serde 函数!" -#~ msgid "Wrapper-based approach" -#~ msgstr "基于封装的方法" +#~ msgid "Within the handle function:" +#~ msgstr "在句柄函数内:" + +#~ msgid "And add a new function:" +#~ msgstr "并增加一个新函数:" + +#~ msgid "Hopefully, when you run your program you will get the intended result!" +#~ msgstr "希望您在运行程序时能得到预期的结果!" + +#~ msgid ":py:obj:`run_driver_api `\\ \\(\\)" +#~ msgstr ":py:obj:`run_driver_api `\\ \\(\\)" + +#~ msgid "Run Flower server (Driver API)." +#~ msgstr "flower-driver-api" + +#~ msgid ":py:obj:`run_fleet_api `\\ \\(\\)" +#~ msgstr ":py:obj:`run_fleet_api `\\ \\(\\)" + +#~ msgid "Run Flower server (Fleet API)." +#~ msgstr "Flower 服务器。" + +#~ msgid "Unreleased" +#~ msgstr "尚未发布" + +#~ msgid "|d8bf04f23d9b46d8a23cc6f4887d7873|" +#~ msgstr "|d8bf04f23d9b46d8a23cc6f4887d7873|" + +#~ msgid "|5aa1711387d74d0f8b9c499e1a51627e|" +#~ msgstr "|5aa1711387d74d0f8b9c499e1a51627e|" + +#~ msgid "|2bc8e069228d4873804061ff4a95048c|" +#~ msgstr "|2bc8e069228d4873804061ff4a95048c|" + +#~ msgid "|c258488766324dc9a6807f0e7c4fd5f4|" +#~ msgstr "|c258488766324dc9a6807f0e7c4fd5f4|" + +#~ msgid "|d5f962c3f4ec48529efda980868c14b0|" +#~ msgstr "|d5f962c3f4ec48529efda980868c14b0|" + +#~ msgid "|a5eccea18d4c43a68b54b65043cabef8|" +#~ msgstr "|a5eccea18d4c43a68b54b65043cabef8|" + +#~ msgid "|f17662f7df2d42f68cac70a1fdeda8a7|" +#~ msgstr "|f17662f7df2d42f68cac70a1fdeda8a7|" + +#~ msgid "|241fc906441a4f038c625a19d30d01b2|" +#~ msgstr "|241fc906441a4f038c625a19d30d01b2|" + +#~ msgid "|0aa5aa05810b44b6a835cecce28f3137|" +#~ msgstr "|0aa5aa05810b44b6a835cecce28f3137|" + +#~ msgid "|c742940dd4bf4de09d8d0d5e8d179638|" +#~ msgstr "|c742940dd4bf4de09d8d0d5e8d179638|" + +#~ msgid "|1f169ab4601a47e1a226f1628f4ebddb|" +#~ msgstr "|1f169ab4601a47e1a226f1628f4ebddb|" + +#~ msgid "|12cfa9cde14440ecb8c8f6c1d7185bec|" +#~ msgstr "|12cfa9cde14440ecb8c8f6c1d7185bec|" + +#~ msgid "|72939caf6e294b0986fee6dde96614d7|" +#~ msgstr "|72939caf6e294b0986fee6dde96614d7|" + +#~ msgid "|83a8daee45da4a98b8d6f24ae098fc50|" +#~ msgstr "|83a8daee45da4a98b8d6f24ae098fc50|" + +#~ msgid "Edge Client Engine" +#~ msgstr "边缘客户端引擎" #~ msgid "" -#~ "Introducing DP to an existing workload" -#~ " can be thought of as adding an" -#~ " extra layer of security around it." -#~ " This inspired us to provide the " -#~ "additional server and client-side logic" -#~ " needed to make the training process" -#~ " differentially private as wrappers for " -#~ "instances of the :code:`Strategy` and " -#~ ":code:`NumPyClient` abstract classes respectively." -#~ " This wrapper-based approach has the" -#~ " advantage of being easily composable " -#~ "with other wrappers that someone might" -#~ " contribute to the Flower library in" -#~ " the future, e.g., for secure " -#~ "aggregation. Using Inheritance instead can " -#~ "be tedious because that would require" -#~ " the creation of new sub- classes " -#~ "every time a new class implementing " -#~ ":code:`Strategy` or :code:`NumPyClient` is " -#~ "defined." -#~ msgstr "" -#~ "在现有工作负载中引入 DP " -#~ "可以被认为是在其周围增加了一层额外的安全性。受此启发,我们提供了额外的服务器端和客户端逻辑,分别作为 " -#~ ":code:`Strategy` 和 :code:`NumPyClient` " -#~ "抽象类实例的封装器,使训练过程具有不同的隐私性。这种基于封装器的方法的优点是可以很容易地与将来有人贡献给 Flower " -#~ "的其他封装器(例如用于安全聚合的封装器)进行组合。使用继承可能会比较繁琐,因为每次定义实现 :code:`Strategy`" -#~ " 或 :code:`NumPyClient` 的新类时,都需要创建新的子类。" +#~ "`Flower `_ core framework " +#~ "architecture with Edge Client Engine" +#~ msgstr "具有边缘客户端引擎的`Flower `核心架构" + +#~ msgid "Virtual Client Engine" +#~ msgstr "虚拟客户端引擎" #~ msgid "" -#~ "The first version of our solution " -#~ "was to define a decorator whose " -#~ "constructor accepted, among other things, " -#~ "a boolean-valued variable indicating " -#~ "whether adaptive clipping was to be " -#~ "enabled or not. We quickly realized " -#~ "that this would clutter its " -#~ ":code:`__init__()` function with variables " -#~ "corresponding to hyperparameters of adaptive" -#~ " clipping that would remain unused " -#~ "when it was disabled. A cleaner " -#~ "implementation could be achieved by " -#~ "splitting the functionality into two " -#~ "decorators, :code:`DPFedAvgFixed` and " -#~ ":code:`DPFedAvgAdaptive`, with the latter sub-" -#~ " classing the former. The constructors " -#~ "for both classes accept a boolean " -#~ "parameter :code:`server_side_noising`, which, as " -#~ "the name suggests, determines where " -#~ "noising is to be performed." -#~ msgstr "" -#~ "我们的第一版解决方案是定义一个装饰器,其构造函数接受一个布尔值变量,表示是否启用自适应剪裁。我们很快意识到,这样会使其 " -#~ ":code:`__init__()` " -#~ "函数中与自适应裁剪超参数相对应的变量变得杂乱无章,而这些变量在自适应裁剪被禁用时将保持未使用状态。要实现更简洁的功能,可以将该功能拆分为两个装饰器,即" -#~ " :code:`DPFedAvgFixed` 和 " -#~ ":code:`DPFedAvgAdaptive`,后者是前者的子类。这两个类的构造函数都接受一个布尔参数 " -#~ ":code:`server_side_noising`,顾名思义,它决定了在哪里加噪声。" +#~ "`Flower `_ core framework " +#~ "architecture with Virtual Client Engine" +#~ msgstr "具有虚拟客户端引擎的`Flower `核心架构" + +#~ msgid "Virtual Client Engine and Edge Client Engine in the same workload" +#~ msgstr "可同步进行的虚拟客户端引擎和边缘客户端引擎" #~ msgid "" -#~ "The server-side capabilities required " -#~ "for the original version of DP-" -#~ "FedAvg, i.e., the one which performed" -#~ " fixed clipping, can be completely " -#~ "captured with the help of wrapper " -#~ "logic for just the following two " -#~ "methods of the :code:`Strategy` abstract " -#~ "class." -#~ msgstr "" -#~ "只需对 :code:`Strategy` 抽象类的以下两个方法进行封装,就能完全捕获 DP-" -#~ "FedAvg 原始版本(即执行固定剪裁的版本)所需的服务器端功能。" +#~ "`Flower `_ core framework " +#~ "architecture with both Virtual Client " +#~ "Engine and Edge Client Engine" +#~ msgstr "具有虚拟客户端引擎和边缘客户端引擎的`Flower `核心架构" + +#~ msgid "Clone the flower repository." +#~ msgstr "**叉花仓库**" #~ msgid "" -#~ ":code:`configure_fit()` : The config " -#~ "dictionary being sent by the wrapped " -#~ ":code:`Strategy` to each client needs to" -#~ " be augmented with an additional " -#~ "value equal to the clipping threshold" -#~ " (keyed under :code:`dpfedavg_clip_norm`) and," -#~ " if :code:`server_side_noising=true`, another one" -#~ " equal to the scale of the " -#~ "Gaussian noise that needs to be " -#~ "added at the client (keyed under " -#~ ":code:`dpfedavg_noise_stddev`). This entails " -#~ "*post*-processing of the results returned " -#~ "by the wrappee's implementation of " -#~ ":code:`configure_fit()`." +#~ "Please follow the first section on " +#~ ":doc:`Run Flower using Docker ` which " +#~ "covers this step in more detail." #~ msgstr "" -#~ ":code:`configure_fit()` :由封装的 :code:`Strategy` " -#~ "发送到每个客户端的配置字典需要使用等于裁剪阈值的附加值(在 :code:`dpfedavg_clip_norm` " -#~ "下键入)进行扩充。并且,如果 " -#~ "server_side_noising=true,则另一个值等于需要在客户端添加的高斯噪声的大小(在 " -#~ "dpfedavg_noise_stddev 下键入)。这需要对封装后的configure_fit() " -#~ "所返回的结果进行后处理。" +#~ "请阅读 :doc:`Run Flower using Docker " +#~ "` " +#~ "的第一节,其中更详细地介绍了这一步骤。" -#~ msgid "" -#~ ":code:`aggregate_fit()`: We check whether any" -#~ " of the sampled clients dropped out" -#~ " or failed to upload an update " -#~ "before the round timed out. In " -#~ "that case, we need to abort the" -#~ " current round, discarding any successful" -#~ " updates that were received, and move" -#~ " on to the next one. On the " -#~ "other hand, if all clients responded " -#~ "successfully, we must force the " -#~ "averaging of the updates to happen " -#~ "in an unweighted manner by intercepting" -#~ " the :code:`parameters` field of " -#~ ":code:`FitRes` for each received update " -#~ "and setting it to 1. Furthermore, " -#~ "if :code:`server_side_noising=true`, each update " -#~ "is perturbed with an amount of " -#~ "noise equal to what it would have" -#~ " been subjected to had client-side" -#~ " noising being enabled. This entails " -#~ "*pre*-processing of the arguments to " -#~ "this method before passing them on " -#~ "to the wrappee's implementation of " -#~ ":code:`aggregate_fit()`." +#~ msgid "``22.04``" +#~ msgstr "``1.0.0rc1``" + +#~ msgid "``23.0.1``" +#~ msgstr "``1.0.0rc1``" + +#~ msgid "``69.0.2``" +#~ msgstr "``1.0.0b0``" + +#~ msgid "``1.8.0``" +#~ msgstr "``1.0.0b0``" + +#~ msgid "Building the SuperLink/SuperNode or ServerApp image" +#~ msgstr "启动服务器" + +#~ msgid "``1.8.0-py3.10-ubuntu22.04``" #~ msgstr "" -#~ ":code:`aggregate_fit()`: " -#~ "我们会检查是否有任何客户端在本轮超时前退出或未能上传参数更新。在这种情况下,我们需要中止当前一轮,丢弃已收到的所有参数更新,然后继续下一轮。另一方面,如果所有客户端都成功响应,我们就必须通过拦截" -#~ " :code:`FitRes` 的 :code:`parameters` 字段并将其设置为 " -#~ "1,强制以不加权的方式平均更新。此外,如果 " -#~ ":code:`server_side_noising=true`,每次更新都会受到一定量的噪声扰动,其扰动量相当于启用客户端噪声时的扰动量。" -#~ " 这就需要在将本方法的参数传递给封装的 :code:`aggregate_fit()` " -#~ "之前,对参数进行*预*处理。" #~ msgid "" -#~ "We can't directly change the aggregation" -#~ " function of the wrapped strategy to" -#~ " force it to add noise to the" -#~ " aggregate, hence we simulate client-" -#~ "side noising to implement server-side" -#~ " noising." -#~ msgstr "我们无法直接改变封装策略的聚合函数,迫使它在聚合中添加噪声,因此我们模拟客户端噪声来实现服务器端噪声。" +#~ "The following example creates a " +#~ "SuperLink/SuperNode or ServerApp image with" +#~ " the official Flower base image:" +#~ msgstr "下面的示例使用官方的 Flower 基本镜像 py3.11-ubuntu22.04 和 Flower 1.7.0 创建了一个服务器镜像:" + +#~ msgid "Trigger the CI for building the Docker images." +#~ msgstr "官方 Ubuntu Docker 映像的版本。" #~ msgid "" -#~ "These changes have been put together " -#~ "into a class called :code:`DPFedAvgFixed`, " -#~ "whose constructor accepts the strategy " -#~ "being decorated, the clipping threshold " -#~ "and the number of clients sampled " -#~ "every round as compulsory arguments. The" -#~ " user is expected to specify the " -#~ "clipping threshold since the order of" -#~ " magnitude of the update norms is " -#~ "highly dependent on the model being " -#~ "trained and providing a default value" -#~ " would be misleading. The number of" -#~ " clients sampled at every round is" -#~ " required to calculate the amount of" -#~ " noise that must be added to " -#~ "each individual update, either by the" -#~ " server or the clients." +#~ "To trigger the workflow, a collaborator" +#~ " must create a ``workflow_dispatch`` event" +#~ " in the GitHub CI. This can be" +#~ " done either through the UI or " +#~ "via the GitHub CLI. The event " +#~ "requires only one input, the Flower " +#~ "version, to be released." #~ msgstr "" -#~ "这些变化被整合到一个名为 :code:`DPFedAvgFixed` " -#~ "的类中,其构造函数接受被装饰的策略、剪切阈值和每轮采样的客户数作为必选参数。用户需要指定剪切阈值,因为参数更新规范的数量级在很大程度上取决于正在训练的模型,提供默认值会产生误导。每轮采样的客户端数量是计算服务器或客户在每次参数更新时添加的噪音量所必需的。" + +#~ msgid "**Via the UI**" +#~ msgstr "**审查 PR**" #~ msgid "" -#~ "The additional functionality required to " -#~ "facilitate adaptive clipping has been " -#~ "provided in :code:`DPFedAvgAdaptive`, a " -#~ "subclass of :code:`DPFedAvgFixed`. It " -#~ "overrides the above-mentioned methods to" -#~ " do the following." +#~ "Go to the ``Build docker images`` " +#~ "workflow `page " +#~ "`_." #~ msgstr "" -#~ "自适应剪裁所需的附加功能在 :code:`DPFedAvgAdaptive` 中提供,其是 " -#~ ":code:`DPFedAvgFixed` 的子类。它重写了上述方法,以实现以下功能。" #~ msgid "" -#~ ":code:`configure_fit()` : It intercepts the" -#~ " config dict returned by " -#~ ":code:`super.configure_fit()` to add the " -#~ "key-value pair " -#~ ":code:`dpfedavg_adaptive_clip_enabled:True` to it, " -#~ "which the client interprets as an " -#~ "instruction to include an indicator bit" -#~ " (1 if update norm <= clipping " -#~ "threshold, 0 otherwise) in the results" -#~ " returned by it." +#~ "Click on the ``Run workflow`` button " +#~ "and type the new version of Flower" +#~ " in the ``Version of Flower`` input" +#~ " field." +#~ msgstr "" + +#~ msgid "Click on the **green** ``Run workflow`` button." +#~ msgstr "" + +#~ msgid "**Via the GitHub CI**" #~ msgstr "" -#~ ":code:`configure_fit()`:它截取由 :code:`super.configure_fit()` " -#~ "返回的 config 字典,并在其中添加键-值对 " -#~ ":code:`dpfedavg_adaptive_clip_enabled:True\",客户端将其解释为在返回结果中包含一个指示位(如果参数更新范式" -#~ " <= 剪裁阈值,则为 1,否则为 0)的指令。" #~ msgid "" -#~ ":code:`aggregate_fit()` : It follows a " -#~ "call to :code:`super.aggregate_fit()` with one" -#~ " to :code:`__update_clip_norm__()`, a procedure" -#~ " which adjusts the clipping threshold " -#~ "on the basis of the indicator bits" -#~ " received from the sampled clients." -#~ msgstr ":code:`aggregate_fit()`:在调用:code:`super.aggregate_fit()`后,再调用:code:`__update_clip_norm__()`,该过程根据从采样客户端接收到的指示位调整裁剪阈值。" +#~ "Make sure you are logged in via" +#~ " ``gh auth login`` and that the " +#~ "current working directory is the root" +#~ " of the Flower repository." +#~ msgstr "" #~ msgid "" -#~ "The client-side capabilities required " -#~ "can be completely captured through " -#~ "wrapper logic for just the :code:`fit()`" -#~ " method of the :code:`NumPyClient` abstract" -#~ " class. To be precise, we need " -#~ "to *post-process* the update computed" -#~ " by the wrapped client to clip " -#~ "it, if necessary, to the threshold " -#~ "value supplied by the server as " -#~ "part of the config dictionary. In " -#~ "addition to this, it may need to" -#~ " perform some extra work if either" -#~ " (or both) of the following keys " -#~ "are also present in the dict." +#~ "Trigger the workflow via ``gh workflow" +#~ " run docker-images.yml -f flwr-" +#~ "version=``." #~ msgstr "" -#~ "客户端所需的功能完全可以通过 :code:`NumPyClient` 抽象类的 " -#~ ":code:`fit()` " -#~ "方法的封装逻辑来实现。准确地说,我们需要对封装客户端计算的参数更新进行处理,以便在必要时将其剪切到服务器作为配置字典的一部分提供的阈值。除此之外,如果配置字典中还存在以下任一(或两个)键,客户端可能还需要执行一些额外的工作。" + +#~ msgid "Example: JAX - Run JAX Federated" +#~ msgstr "示例: JAX - 运行联邦式 JAX" #~ msgid "" -#~ ":code:`dpfedavg_noise_stddev` : Generate and " -#~ "add the specified amount of noise " -#~ "to the clipped update." -#~ msgstr "code:`dpfedavg_noise_stddev`:生成并在剪切参数更新中添加指定数量的噪声。" +#~ "The simplest way to get started " +#~ "with Flower is by using the " +#~ "pre-made Docker images, which you can" +#~ " find on `Docker Hub " +#~ "`__. Supported " +#~ "architectures include ``amd64`` and " +#~ "``arm64v8``." +#~ msgstr "" +#~ "开始使用 Flower 的最简单方法是使用预制的 Docker 镜像,您可以在 " +#~ "`Docker Hub `_" +#~ " 上找到这些镜像。" #~ msgid "" -#~ ":code:`dpfedavg_adaptive_clip_enabled` : Augment the" -#~ " metrics dict in the :code:`FitRes` " -#~ "object being returned to the server " -#~ "with an indicator bit, calculated as " -#~ "described earlier." +#~ "If you do not see the version " +#~ "of Docker but instead get an error" +#~ " saying that the command was not " +#~ "found, you will need to install " +#~ "Docker first. You can find installation" +#~ " instruction `here `_." #~ msgstr "" -#~ ":code:`dpfedavg_adaptive_clip_enabled`:在返回给服务器的 :code:`FitRes`" -#~ " 对象中的度量值字典中增加一个指标位,计算方法如前所述。" +#~ "如果没有看到 Docker 的版本,而是出现找不到命令的错误,则需要先安装 Docker。你可以在" +#~ " `_ 找到安装说明。" -#~ msgid "Performing the :math:`(\\epsilon, \\delta)` analysis" -#~ msgstr "进行 :math:`(epsilon, \\delta)` 分析" +#~ msgid "" +#~ "On Linux, Docker commands require " +#~ "``sudo`` privilege. If you want to " +#~ "avoid using ``sudo``, you can follow " +#~ "the `Post-installation steps " +#~ "`_" +#~ " on the official Docker website." +#~ msgstr "" +#~ "在 Linux 上,Docker 命令需要 ``sudo`` " +#~ "权限。如果你想避免使用 ``sudo``,可以按照 Docker 官方网站上的 `安装后步骤" +#~ " `_进行操作。" #~ msgid "" -#~ "Assume you have trained for :math:`n`" -#~ " rounds with sampling fraction :math:`q`" -#~ " and noise multiplier :math:`z`. In " -#~ "order to calculate the :math:`\\epsilon` " -#~ "value this would result in for a" -#~ " particular :math:`\\delta`, the following " -#~ "script may be used." +#~ "To ensure optimal performance and " +#~ "compatibility, the SuperLink, SuperNode and" +#~ " ServerApp image must have the same" +#~ " version when running together. This " +#~ "guarantees seamless integration and avoids " +#~ "potential conflicts or issues that may" +#~ " arise from using different versions." #~ msgstr "" -#~ "假设您已经训练了 :math:`n` 轮,采样比例为 :math:`q`,噪声乘数为 " -#~ ":math:`z`。为了计算特定 :math:`\\delta` 的 :math:`epsilon`" -#~ " 值,可以使用下面的脚本。" +#~ "为确保最佳性能和兼容性,SuperLink、SuperNode 和 ServerApp " +#~ "映像在一起运行时必须具有相同的版本。这可确保无缝集成,并避免因使用不同版本而可能产生的潜在冲突或问题。" -#~ msgid "Flower driver SDK." -#~ msgstr "Flower 服务器。" +#~ msgid "Flower SuperLink" +#~ msgstr "flower-superlink" -#~ msgid "driver" -#~ msgstr "服务器" +#~ msgid "Quickstart" +#~ msgstr "快速入门 JAX" -#~ msgid "Get task results." -#~ msgstr "汇总训练结果。" +#~ msgid "If you're looking to try out Flower, you can use the following command:" +#~ msgstr "如果您想试用 Flower,可以使用以下命令:" -#~ msgid "Request for run ID." -#~ msgstr "Flower 基线申请" +#~ msgid "" +#~ "The command pulls the Docker image " +#~ "with the tag ``1.8.0`` from Docker " +#~ "Hub. The tag specifies the Flower " +#~ "version. In this case, Flower 1.8.0. " +#~ "The ``--rm`` flag tells Docker to " +#~ "remove the container after it exits." +#~ msgstr "" +#~ "该命令将从 Docker Hub 提取标签为``1.7.0-py3.11-ubuntu22.04``的" +#~ " Docker 镜像。标签包含使用 Flower、Python 和 Ubuntu" +#~ " 的信息。在本例中,它使用了 Flower 1.7.0、Python 3.11 和" +#~ " Ubuntu 22.04。rm \"标记告诉 Docker 在退出后移除容器。" -#~ msgid "Get client IDs." -#~ msgstr "返回客户端(本身)。" +#~ msgid "" +#~ "The ``-p :`` flag tells " +#~ "Docker to map the ports " +#~ "``9091``/``9092`` of the host to " +#~ "``9091``/``9092`` of the container, allowing" +#~ " you to access the Driver API " +#~ "on ``http://localhost:9091`` and the Fleet " +#~ "API on ``http://localhost:9092``. Lastly, any" +#~ " flag that comes after the tag " +#~ "is passed to the Flower SuperLink. " +#~ "Here, we are passing the flag " +#~ "``--insecure``." +#~ msgstr "" +#~ "``-p :`` 标记会告诉 Docker 将主机的端口" +#~ " ``9091``/``9092`` 映射到容器的端口 ``9091``/`9092``,这样你就可以在" +#~ " ``http://localhost:9091`` 上访问 Driver API,在 " +#~ "``http://localhost:9092`` 上访问 Fleet " +#~ "API。最后,标签后面的任何标志都会传递给 Flower 服务器。在这里,我们传递的标志是 " +#~ "``--insecure`` 。" #~ msgid "" -#~ "Flower usage examples used to be " -#~ "bundled with Flower in a package " -#~ "called ``flwr_example``. We are migrating " -#~ "those examples to standalone projects to" -#~ " make them easier to use. All " -#~ "new examples are based in the " -#~ "directory `examples " -#~ "`_." +#~ "The ``--insecure`` flag enables insecure " +#~ "communication (using HTTP, not HTTPS) " +#~ "and should only be used for " +#~ "testing purposes. We strongly recommend " +#~ "enabling `SSL `__ when " +#~ "deploying to a production environment." #~ msgstr "" -#~ "Flower 的使用示例曾与 Flower 捆绑在一个名为 ``flwr_example``" -#~ " 的软件包中。我们正在将这些示例迁移到独立项目中,以使它们更易于使用。所有新示例都位于目录 `examples " -#~ "`_。" +#~ "不安全 \"标志启用不安全通信(使用 HTTP,而非 " +#~ "HTTPS),只能用于测试目的。我们强烈建议在部署到生产环境时启用 `SSL " +#~ "`_。" -#~ msgid "Quickstart TensorFlow/Keras" -#~ msgstr "快速入门 TensorFlow/Keras" +#~ msgid "" +#~ "You can use ``--help`` to view all" +#~ " available flags that the SuperLink " +#~ "supports:" +#~ msgstr "您可以使用 ``--help`` 查看服务器支持的所有可用标记:" -#~ msgid "Legacy Examples (`flwr_example`)" -#~ msgstr "传统示例 (`flwr_example`)" +#~ msgid "Mounting a volume to store the state on the host system" +#~ msgstr "在主机系统上挂载卷以存储状态" #~ msgid "" -#~ "The useage examples in `flwr_example` " -#~ "are deprecated and will be removed " -#~ "in the future. New examples are " -#~ "provided as standalone projects in " -#~ "`examples `_." +#~ "If you want to persist the state" +#~ " of the SuperLink on your host " +#~ "system, all you need to do is " +#~ "specify a directory where you want " +#~ "to save the file on your host " +#~ "system and a name for the database" +#~ " file. By default, the SuperLink " +#~ "container runs with a non-root " +#~ "user called ``app`` with the user " +#~ "ID ``49999``. It is recommended to " +#~ "create new directory and change the " +#~ "user ID of the directory to " +#~ "``49999`` to ensure the mounted " +#~ "directory has the proper permissions. If" +#~ " you later want to delete the " +#~ "directory, you can change the user " +#~ "ID back to the current user ID " +#~ "by running ``sudo chown -R $USER:$(id" +#~ " -gn) state``." #~ msgstr "" -#~ "在 `flwr_example` 中的使用示例已被弃用,今后将被移除。新示例将作为独立项目在 " -#~ "`examples `_" -#~ " 中提供。" -#~ msgid "Extra Dependencies" -#~ msgstr "额外依赖" +#~ msgid "" +#~ "Assuming all files we need are in" +#~ " the local ``certificates`` directory, we" +#~ " can use the flag ``--volume`` to " +#~ "mount the local directory into the " +#~ "``/app/certificates/`` directory of the " +#~ "container. This allows the SuperLink to" +#~ " access the files within the " +#~ "container. The ``ro`` stands for " +#~ "``read-only``. Docker volumes default to" +#~ " ``read-write``; that option tells " +#~ "Docker to make the volume ``read-" +#~ "only`` instead. Finally, we pass the " +#~ "names of the certificates and key " +#~ "file to the SuperLink with the " +#~ "``--ssl-ca-certfile``, ``--ssl-certfile`` " +#~ "and ``--ssl-keyfile`` flag." +#~ msgstr "" +#~ "假设我们需要的所有文件都在本地的 ``certificates`` 目录中,我们可以使用标记 " +#~ "``-v`` 将本地目录挂载到容器的 ``/app/`` " +#~ "目录中。这样,服务器就可以访问容器内的文件。最后,我们使用 ``--certificates`` " +#~ "标志将证书名称传递给服务器。" #~ msgid "" -#~ "The core Flower framework keeps a " -#~ "minimal set of dependencies. The " -#~ "examples demonstrate Flower in the " -#~ "context of different machine learning " -#~ "frameworks, so additional dependencies need" -#~ " to be installed before an example" -#~ " can be run." +#~ "Because Flower containers, by default, " +#~ "run with a non-root user ``app``," +#~ " the mounted files and directories " +#~ "must have the proper permissions for " +#~ "the user ID ``49999``. For example, " +#~ "to change the user ID of all " +#~ "files in the ``certificates/`` directory, " +#~ "you can run ``sudo chown -R " +#~ "49999:49999 certificates/*``." #~ msgstr "" -#~ "Flower 核心框架只保留了最低限度的依赖项。这些示例在不同机器学习框架的背景下演示了 " -#~ "Flower,因此在运行示例之前需要安装额外的依赖项。" -#~ msgid "For PyTorch examples::" -#~ msgstr "PyTorch 示例::" +#~ msgid "" +#~ "The SuperNode Docker image comes with" +#~ " a pre-installed version of Flower" +#~ " and serves as a base for " +#~ "building your own SuperNode image." +#~ msgstr "超级节点 Docker 镜像预装了 Flower 版本,可作为构建自己的超级节点镜像的基础。" -#~ msgid "For TensorFlow examples::" -#~ msgstr "TensorFlow 示例::" +#~ msgid "" +#~ "We will use the ``quickstart-pytorch``" +#~ " example, which you can find in " +#~ "the Flower repository, to illustrate how" +#~ " you can dockerize your ClientApp." +#~ msgstr "" +#~ "我们将使用 \"quickstart-pytorch\"(快速启动-pytorch)示例来说明如何对 " +#~ "ClientApp 进行 docker 化。" -#~ msgid "For both PyTorch and TensorFlow examples::" -#~ msgstr "PyTorch 和 TensorFlow 示例::" +#~ msgid "" +#~ "Before we can start, we need to" +#~ " meet a few prerequisites in our " +#~ "local development environment. You can " +#~ "skip the first part if you want" +#~ " to run your ClientApp instead of " +#~ "the ``quickstart-pytorch`` example." +#~ msgstr "在开始之前,我们需要在本地开发环境中满足一些先决条件。" + +#~ msgid "Let's assume the following project layout:" +#~ msgstr "假设项目布局如下" #~ msgid "" -#~ "Please consult :code:`pyproject.toml` for a" -#~ " full list of possible extras " -#~ "(section :code:`[tool.poetry.extras]`)." +#~ "First, we need to create a " +#~ "``requirements.txt`` file in the directory " +#~ "where the ``ClientApp`` code is located." +#~ " In the file, we list all the" +#~ " dependencies that the ClientApp requires." #~ msgstr "" -#~ "请参阅 :code:`pyproject.toml`,了解可能的 extras 的完整列表(章节 " -#~ ":code:`[tool.poems.extras]`)。" +#~ "首先,我们需要在 ``ClientApp`` 代码所在的目录中创建一个 " +#~ "``requirements.txt`` 文件。在该文件中,我们列出了 ClientApp " +#~ "需要的所有依赖项。" -#~ msgid "PyTorch Examples" -#~ msgstr "PyTorch 示例" +#~ msgid "" +#~ "Note that `flwr `__" +#~ " is already installed in the " +#~ "``flwr/supernode`` base image, so you " +#~ "only need to include other package " +#~ "dependencies in your ``requirements.txt``, " +#~ "such as ``torch``, ``tensorflow``, etc." +#~ msgstr "" +#~ "请注意,`flwr `__ " +#~ "已经安装在`flwr/supernode``基础镜像中,因此只需在`requirements.txt``中包含其他依赖包,如`torch``、`tensorflow`等。" #~ msgid "" -#~ "Our PyTorch examples are based on " -#~ "PyTorch 1.7. They should work with " -#~ "other releases as well. So far, we" -#~ " provide the following examples." -#~ msgstr "我们的 PyTorch 示例基于 PyTorch 1.7。它们应该也能在其他版本中使用。到目前为止,我们提供了以下示例。" +#~ "Next, we create a Dockerfile. If " +#~ "you use the ``quickstart-pytorch`` " +#~ "example, create a new file called " +#~ "``Dockerfile.supernode`` in ``examples/quickstart-" +#~ "pytorch``." +#~ msgstr "" +#~ "接下来,我们创建一个 Dockerfile。如果使用 ``quickstart-pytorch``" +#~ " 示例,请在 ``examples/quickstart-pytorch`` 中创建一个名为" +#~ " ``Dockerfile.supernode`` 的新文件。" -#~ msgid "CIFAR-10 Image Classification" -#~ msgstr "CIFAR-10 图像分类" +#~ msgid "" +#~ "The ``Dockerfile.supernode`` contains the " +#~ "instructions that assemble the SuperNode " +#~ "image." +#~ msgstr "Dockerfile.supernode \"包含组装超级节点映像的指令。" #~ msgid "" -#~ "`CIFAR-10 and CIFAR-100 " -#~ "`_ are " -#~ "popular RGB image datasets. The Flower" -#~ " CIFAR-10 example uses PyTorch to " -#~ "train a simple CNN classifier in a" -#~ " federated learning setup with two " -#~ "clients." +#~ "In the first two lines, we " +#~ "instruct Docker to use the SuperNode " +#~ "image tagged ``nightly`` as a base " +#~ "image and set our working directory " +#~ "to ``/app``. The following instructions " +#~ "will now be executed in the " +#~ "``/app`` directory. Next, we install the" +#~ " ClientApp dependencies by copying the " +#~ "``requirements.txt`` file into the image " +#~ "and run ``pip install``. In the " +#~ "last two lines, we copy the " +#~ "``client.py`` module into the image and" +#~ " set the entry point to ``flower-" +#~ "client-app`` with the argument " +#~ "``client:app``. The argument is the " +#~ "object reference of the ClientApp " +#~ "(``:``) that will be run" +#~ " inside the ClientApp." #~ msgstr "" -#~ "CIFAR-10 和 CIFAR-100 " -#~ "``_ 是流行的 RGB" -#~ " 图像数据集。Flower CIFAR-10 示例使用 PyTorch " -#~ "在有两个客户端的联邦学习设置中训练一个简单的 CNN 分类器。" - -#~ msgid "First, start a Flower server:" -#~ msgstr "首先,启动 Flower 服务器:" +#~ "在前两行中,我们指示 Docker 使用标记为 ``nightly`` 的 " +#~ "SuperNode 镜像作为基础镜像,并将工作目录设置为 ``/app``。下面的指令将在 " +#~ "``/app`` 目录中执行。接下来,我们通过将 ``requirements.txt`` " +#~ "文件复制到映像中并运行 ``pip install`` 来安装 ClientApp " +#~ "依赖项。最后两行,我们将 ``client.py`` 模块复制到映像中,并将入口点设置为 " +#~ "``flower-client-app``,参数为 ``client:app``。参数是将在 " +#~ "ClientApp 内运行的 ClientApp 的对象引用(``<模块>:<属性>``)。" -#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" -#~ msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" +#~ msgid "Building the SuperNode Docker image" +#~ msgstr "启动服务器" -#~ msgid "Then, start the two clients in a new terminal window:" -#~ msgstr "然后,在新的终端窗口中启动两个客户端:" +#~ msgid "" +#~ "We gave the image the name " +#~ "``flwr_supernode``, and the tag ``0.0.1``. " +#~ "Remember that the here chosen values " +#~ "only serve as an example. You can" +#~ " change them to your needs." +#~ msgstr "" +#~ "我们将图像命名为 ``flwr_supernode``,标签为 " +#~ "``0.0.1``。请记住,这里选择的值只是一个示例。您可以根据自己的需要进行更改。" -#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" -#~ msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" +#~ msgid "Running the SuperNode Docker image" +#~ msgstr "启动服务器" -#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_cifar`." -#~ msgstr "更多详情,请参阅 :code:`src/py/flwr_example/pytorch_cifar`。" +#~ msgid "Now that we have built the SuperNode image, we can finally run it." +#~ msgstr "现在,我们已经构建了超级节点镜像,终于可以运行它了。" -#~ msgid "ImageNet-2012 Image Classification" -#~ msgstr "ImageNet-2012 图像分类" +#~ msgid "Let's break down each part of this command:" +#~ msgstr "让我们来分析一下这条命令的各个部分:" #~ msgid "" -#~ "`ImageNet-2012 `_ is " -#~ "one of the major computer vision " -#~ "datasets. The Flower ImageNet example " -#~ "uses PyTorch to train a ResNet-18 " -#~ "classifier in a federated learning setup" -#~ " with ten clients." -#~ msgstr "" -#~ "ImageNet-2012 `_ " -#~ "是主要的计算机视觉数据集之一。Flower ImageNet 示例使用 PyTorch " -#~ "在有十个客户端的联邦学习设置中训练 ResNet-18 分类器。" +#~ "``--rm``: This option specifies that the" +#~ " container should be automatically removed" +#~ " when it stops." +#~ msgstr "`-rm``: 该选项指定容器停止时应自动移除。" -#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" -#~ msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" +#~ msgid "``--insecure``: This option enables insecure communication." +#~ msgstr "不安全\": 该选项启用不安全通信。" -#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" -#~ msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" +#~ msgid "" +#~ "``--superlink 192.168.1.100:9092``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Fleet" +#~ msgstr "``--server 192.168.1.100:9092``: 该选项指定超级链接舰队的地址" -#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_imagenet`." -#~ msgstr "更多详情,请参阅 :code:`src/py/flwr_example/pytorch_imagenet`。" +#~ msgid "API to connect to. Remember to update it with your SuperLink IP." +#~ msgstr "要连接的 API。记住用您的超级链接 IP 更新它。" -#~ msgid "TensorFlow Examples" -#~ msgstr "TensorFlow 示例" +#~ msgid "" +#~ "To test running Flower locally, you " +#~ "can create a `bridge network " +#~ "`__, use the ``--network`` argument" +#~ " and pass the name of the " +#~ "Docker network to run your SuperNodes." +#~ msgstr "" +#~ "要测试在本地运行 Flower,可以创建一个 \"桥接网络 " +#~ "`__\",使用\"--网络 \"参数并传递 Docker " +#~ "网络的名称,以运行超级节点。" #~ msgid "" -#~ "Our TensorFlow examples are based on " -#~ "TensorFlow 2.0 or newer. So far, " -#~ "we provide the following examples." -#~ msgstr "我们的 TensorFlow 示例基于 TensorFlow 2.0 或更新版本。到目前为止,我们提供了以下示例。" +#~ "Any argument that comes after the " +#~ "tag is passed to the Flower " +#~ "SuperNode binary. To see all available" +#~ " flags that the SuperNode supports, " +#~ "run:" +#~ msgstr "标记后的任何参数都将传递给 Flower 超级节点二进制文件。要查看超级节点支持的所有可用标记,请运行" -#~ msgid "Fashion-MNIST Image Classification" -#~ msgstr "Fashion-MNIST 图像分类" +#~ msgid "" +#~ "To enable SSL, we will need to " +#~ "mount a PEM-encoded root certificate " +#~ "into your SuperNode container." +#~ msgstr "要启用 SSL,我们需要将 PEM 编码的根证书挂载到 SuperNode 容器中。" #~ msgid "" -#~ "`Fashion-MNIST `_ is often used as " -#~ "the \"Hello, world!\" of machine " -#~ "learning. We follow this tradition and" -#~ " provide an example which samples " -#~ "random local datasets from Fashion-MNIST" -#~ " and trains a simple image " -#~ "classification model over those partitions." +#~ "Similar to the SuperNode image, the " +#~ "ServerApp Docker image comes with a " +#~ "pre-installed version of Flower and " +#~ "serves as a base for building your" +#~ " own ServerApp image." #~ msgstr "" -#~ "`Fashion-MNIST `_ 经常被用作机器学习的 \"你好,世界!\"。我们遵循这一传统" -#~ ",提供了一个从Fashion-MNIST 中随机抽样本地数据集的示例,并在这些分区上训练一个简单的图像分类模型。" +#~ "与 SuperNode 映像类似,ServerApp Docker 映像也预装了 " +#~ "Flower 版本,可作为构建自己的 ServerApp 映像的基础。" -#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" -#~ msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" +#~ msgid "" +#~ "We will use the same ``quickstart-" +#~ "pytorch`` example as we do in the" +#~ " Flower SuperNode section. If you " +#~ "have not already done so, please " +#~ "follow the `SuperNode Prerequisites`_ before" +#~ " proceeding." +#~ msgstr "" +#~ "我们将使用与 \"Flower SuperNode \"部分相同的 " +#~ "\"quickstart-pytorch \"示例。如果您还没有这样做,请在继续之前遵循 " +#~ "\"SuperNode 先决条件\"。" -#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" -#~ msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" +#~ msgid "Creating a ServerApp Dockerfile" +#~ msgstr "创建 ServerApp Dockerfile" #~ msgid "" -#~ "For more details, see " -#~ ":code:`src/py/flwr_example/tensorflow_fashion_mnist`." -#~ msgstr "更多详情,请参阅 :code:`src/py/flwr_example/tensorflow_fashion_mnist`。" - -#~ msgid "``BASE_IMAGE_TAG``" -#~ msgstr "基本图像标签" +#~ "First, we need to create a " +#~ "Dockerfile in the directory where the" +#~ " ``ServerApp`` code is located. If " +#~ "you use the ``quickstart-pytorch`` " +#~ "example, create a new file called " +#~ "``Dockerfile.serverapp`` in ``examples/quickstart-" +#~ "pytorch``." +#~ msgstr "" +#~ "首先,我们需要在 ``ServerApp`` 代码所在的目录中创建一个 Dockerfile。如果使用" +#~ " ``quickstart-pytorch`` 示例,请在 ``examples" +#~ "/quickstart-pytorch`` 中创建一个名为 ``Dockerfile.serverapp``" +#~ " 的新文件。" -#~ msgid "The image tag of the base image." -#~ msgstr "基础图像的图像标记。" +#~ msgid "" +#~ "The ``Dockerfile.serverapp`` contains the " +#~ "instructions that assemble the ServerApp " +#~ "image." +#~ msgstr "Dockerfile.serverapp \"包含组装 ServerApp 镜像的说明。" #~ msgid "" -#~ "It is important to follow the " -#~ "instructions described in comments. For " -#~ "instance, in order to not break " -#~ "how our changelog system works, you " -#~ "should read the information above the" -#~ " ``Changelog entry`` section carefully. You" -#~ " can also checkout some examples and" -#~ " details in the :ref:`changelogentry` " -#~ "appendix." +#~ "In the first two lines, we " +#~ "instruct Docker to use the ServerApp " +#~ "image tagged ``1.8.0`` as a base " +#~ "image and set our working directory " +#~ "to ``/app``. The following instructions " +#~ "will now be executed in the " +#~ "``/app`` directory. In the last two " +#~ "lines, we copy the ``server.py`` module" +#~ " into the image and set the " +#~ "entry point to ``flower-server-app`` " +#~ "with the argument ``server:app``. The " +#~ "argument is the object reference of " +#~ "the ServerApp (``:``) that " +#~ "will be run inside the ServerApp " +#~ "container." #~ msgstr "" -#~ "请务必遵守注释中的说明。例如,为了不破坏我们的更新日志系统,你应该仔细阅读\"`更新日志条目``\"部分上面的信息。您还可以查看 " -#~ ":ref:`changelogentry` 附录中的一些示例和细节。" +#~ "在前两行中,我们指示 Docker 使用标记为 ``1.8.0`` 的 " +#~ "ServerApp 镜像作为基础镜像,并将工作目录设置为 ``/app``。下面的指令将在 " +#~ "``/app`` 目录中执行。在最后两行中,我们将 ``server.py`` " +#~ "模块复制到映像中,并将入口点设置为 ``flower-server-app``,参数为 " +#~ "``server:app``。参数是将在 ServerApp 容器内运行的 ServerApp " +#~ "的对象引用(``<模块>:<属性>``)。" -#~ msgid "Open a PR (as shown above)" -#~ msgstr "打开 PR(如上图所示)" +#~ msgid "Building the ServerApp Docker image" +#~ msgstr "启动服务器" -#~ msgid "How to write a good PR title" -#~ msgstr "如何撰写好的公关标题" +#~ msgid "Running the ServerApp Docker image" +#~ msgstr "启动服务器" + +#~ msgid "Now that we have built the ServerApp image, we can finally run it." +#~ msgstr "现在我们已经构建了 ServerApp 镜像,终于可以运行它了。" #~ msgid "" -#~ "A well-crafted PR title helps team" -#~ " members quickly understand the purpose " -#~ "and scope of the changes being " -#~ "proposed. Here's a guide to help " -#~ "you write a good GitHub PR title:" -#~ msgstr "一个精心撰写的公关标题能帮助团队成员迅速了解所提修改的目的和范围。以下指南可帮助您撰写一个好的 GitHub PR 标题:" +#~ "``--superlink 192.168.1.100:9091``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Driver" +#~ msgstr "``--server 192.168.1.100:9091``: 此选项指定超级链接驱动程序的地址" #~ msgid "" -#~ "1. Be Clear and Concise: Provide a" -#~ " clear summary of the changes in " -#~ "a concise manner. 1. Use Actionable " -#~ "Verbs: Start with verbs like \"Add,\"" -#~ " \"Update,\" or \"Fix\" to indicate " -#~ "the purpose. 1. Include Relevant " -#~ "Information: Mention the affected feature " -#~ "or module for context. 1. Keep it" -#~ " Short: Avoid lengthy titles for easy" -#~ " readability. 1. Use Proper Capitalization" -#~ " and Punctuation: Follow grammar rules " -#~ "for clarity." +#~ "To test running Flower locally, you " +#~ "can create a `bridge network " +#~ "`__, use the ``--network`` argument" +#~ " and pass the name of the " +#~ "Docker network to run your ServerApps." #~ msgstr "" -#~ "1. 简明扼要: 以简明扼要的方式清楚地概述变化。1. 使用可操作的动词: 使用 " -#~ "\"添加\"、\"更新 \"或 \"修复 \"等动词来表明目的。1. 包含相关信息: " -#~ "提及受影响的功能或模块以了解上下文。1. 简短:避免冗长的标题,以方便阅读。1. 使用正确的大小写和标点符号:" -#~ " 遵守语法规则,以确保清晰。" +#~ "要测试在本地运行 Flower,可以创建一个 ``bridge network " +#~ "`___,使用 ``--network`` 参数并传递 Docker " +#~ "网络的名称,以运行 ServerApps。" #~ msgid "" -#~ "Let's start with a few examples " -#~ "for titles that should be avoided " -#~ "because they do not provide meaningful" -#~ " information:" -#~ msgstr "让我们先举例说明几个应该避免使用的标题,因为它们不能提供有意义的信息:" - -#~ msgid "Implement Algorithm" -#~ msgstr "执行算法" +#~ "Any argument that comes after the " +#~ "tag is passed to the Flower " +#~ "ServerApp binary. To see all available" +#~ " flags that the ServerApp supports, " +#~ "run:" +#~ msgstr "标记后的任何参数都将传递给 Flower ServerApp 二进制文件。要查看 ServerApp 支持的所有可用标记,请运行" -#~ msgid "Add my_new_file.py to codebase" -#~ msgstr "在代码库中添加 my_new_file.py" +#~ msgid "" +#~ "To enable SSL, we will need to " +#~ "mount a PEM-encoded root certificate " +#~ "into your ServerApp container." +#~ msgstr "要启用 SSL,需要 CA 证书、服务器证书和服务器私钥。" -#~ msgid "Improve code in module" -#~ msgstr "改进模块中的代码" +#~ msgid "" +#~ "Assuming the certificate already exists " +#~ "locally, we can use the flag " +#~ "``--volume`` to mount the local " +#~ "certificate into the container's ``/app/`` " +#~ "directory. This allows the ServerApp to" +#~ " access the certificate within the " +#~ "container. Use the ``--root-certificates`` " +#~ "flags when starting the container." +#~ msgstr "" +#~ "假设我们需要的所有文件都在本地的 ``certificates`` 目录中,我们可以使用标记 " +#~ "``-v`` 将本地目录挂载到容器的 ``/app/`` " +#~ "目录中。这样,服务器就可以访问容器内的文件。最后,我们使用 ``--certificates`` " +#~ "标志将证书名称传递给服务器。" -#~ msgid "Change SomeModule" -#~ msgstr "更改 SomeModule" +#~ msgid "Run with root user privileges" +#~ msgstr "" #~ msgid "" -#~ "Here are a few positive examples " -#~ "which provide helpful information without " -#~ "repeating how they do it, as that" -#~ " is already visible in the \"Files" -#~ " changed\" section of the PR:" -#~ msgstr "这里有几个正面的例子,提供了有用的信息,但没有重复他们是如何做的,因为在 PR 的 \"已更改文件 \"部分已经可以看到:" +#~ "Flower Docker images, by default, run" +#~ " with a non-root user " +#~ "(username/groupname: ``app``, UID/GID: ``49999``)." +#~ " Using root user is not recommended" +#~ " unless it is necessary for specific" +#~ " tasks during the build process. " +#~ "Always make sure to run the " +#~ "container as a non-root user in" +#~ " production to maintain security best " +#~ "practices." +#~ msgstr "" -#~ msgid "Update docs banner to mention Flower Summit 2023" -#~ msgstr "更新文件横幅,提及 2023 年 Flower 峰会" +#~ msgid "**Run a container with root user privileges**" +#~ msgstr "" -#~ msgid "Remove unnecessary XGBoost dependency" -#~ msgstr "移除不必要的 XGBoost 依赖性" +#~ msgid "**Run the build process with root user privileges**" +#~ msgstr "" -#~ msgid "Remove redundant attributes in strategies subclassing FedAvg" -#~ msgstr "删除 FedAvg 子类化策略中的多余属性" +#~ msgid ":py:obj:`run_client_app `\\ \\(\\)" +#~ msgstr ":py:obj:`run_client_app `\\ \\(\\)" -#~ msgid "" -#~ "Add CI job to deploy the staging" -#~ " system when the ``main`` branch " -#~ "changes" -#~ msgstr "添加 CI 作业,以便在 \"主 \"分支发生变化时部署暂存系统" +#~ msgid ":py:obj:`run_supernode `\\ \\(\\)" +#~ msgstr ":py:obj:`run_superlink `\\ \\(\\)" -#~ msgid "" -#~ "Add new amazing library which will " -#~ "be used to improve the simulation " -#~ "engine" -#~ msgstr "添加新的惊人库,用于改进模拟引擎" +#~ msgid "d defaults to None." +#~ msgstr "d 默认为 \"无\"。" -#~ msgid "Changelog entry" -#~ msgstr "更新日志" +#~ msgid "Update R from dict/iterable E and F." +#~ msgstr "根据二进制/可迭代 E 和 F 更新 R。" #~ msgid "" -#~ "When opening a new PR, inside its" -#~ " description, there should be a " -#~ "``Changelog entry`` header." -#~ msgstr "打开一个新 PR 时,在其描述中应有一个 ``Changelog entry`` 标头。" +#~ ":py:obj:`RUN_DRIVER_API_ENTER " +#~ "`\\" +#~ msgstr "" +#~ ":py:obj:`RUN_DRIVER_API_ENTER " +#~ "`\\" #~ msgid "" -#~ "Above this header you should see " -#~ "the following comment that explains how" -#~ " to write your changelog entry:" -#~ msgstr "在页眉上方,你会看到以下注释,说明如何编写更新日志条目:" +#~ ":py:obj:`RUN_DRIVER_API_LEAVE " +#~ "`\\" +#~ msgstr "" +#~ ":py:obj:`RUN_DRIVER_API_LEAVE " +#~ "`\\" #~ msgid "" -#~ "Inside the following 'Changelog entry' " -#~ "section, you should put the description" -#~ " of your changes that will be " -#~ "added to the changelog alongside your" -#~ " PR title." -#~ msgstr "在下面的 \"更新日志条目 \"部分中,您应该在 PR 标题旁边写上将添加到更新日志中的更改描述。" +#~ ":py:obj:`RUN_FLEET_API_ENTER " +#~ "`\\" +#~ msgstr "" +#~ ":py:obj:`RUN_FLEET_API_ENTER " +#~ "`\\" #~ msgid "" -#~ "If the section is completely empty " -#~ "(without any token) or non-existent, " -#~ "the changelog will just contain the " -#~ "title of the PR for the changelog" -#~ " entry, without any description." -#~ msgstr "如果该部分完全为空(没有任何标记)或不存在,更新日志将只包含更新日志条目的 PR 标题,而不包含任何描述。" +#~ ":py:obj:`RUN_FLEET_API_LEAVE " +#~ "`\\" +#~ msgstr "" +#~ ":py:obj:`RUN_FLEET_API_LEAVE " +#~ "`\\" -#~ msgid "" -#~ "If the section contains some text " -#~ "other than tokens, it will use it" -#~ " to add a description to the " -#~ "change." -#~ msgstr "如果该部分包含标记以外的文本,它将使用这些文本为更改添加说明。" +#~ msgid ":py:obj:`DRIVER_CONNECT `\\" +#~ msgstr ":py:obj:`DRIVER_CONNECT `\\" + +#~ msgid ":py:obj:`DRIVER_DISCONNECT `\\" +#~ msgstr ":py:obj:`DRIVER_DISCONNECT `\\" #~ msgid "" -#~ "If the section contains one of the" -#~ " following tokens it will ignore any" -#~ " other text and put the PR " -#~ "under the corresponding section of the" -#~ " changelog:" -#~ msgstr "如果该部分包含以下标记之一,它将忽略任何其他文本,并将 PR 放在更新日志的相应部分下:" +#~ ":py:obj:`START_DRIVER_ENTER " +#~ "`\\" +#~ msgstr "" +#~ ":py:obj:`START_DRIVER_ENTER " +#~ "`\\" -#~ msgid " is for classifying a PR as a general improvement." -#~ msgstr " 用于将 PR 划分为一般改进。" +#~ msgid "" +#~ ":py:obj:`START_DRIVER_LEAVE " +#~ "`\\" +#~ msgstr "" +#~ ":py:obj:`START_DRIVER_LEAVE " +#~ "`\\" -#~ msgid " is to not add the PR to the changelog" -#~ msgstr "表示不将 PR 添加到更新日志中" +#~ msgid "" +#~ "An identifier that can be used " +#~ "when loading a particular data partition" +#~ " for a ClientApp. Making use of " +#~ "this identifier is more relevant when" +#~ " conducting simulations." +#~ msgstr "为 ClientApp 加载特定数据分区时可使用的标识符。在进行模拟时,使用该标识符更有意义。" -#~ msgid " is to add a general baselines change to the PR" -#~ msgstr " 是指在 PR 中添加一般基线更改" +#~ msgid ":py:obj:`partition_id `\\" +#~ msgstr ":py:obj:`partition_id `\\" -#~ msgid " is to add a general examples change to the PR" -#~ msgstr " 是在 PR 中添加对一般示例的修改" +#~ msgid "An identifier telling which data partition a ClientApp should use." +#~ msgstr "告诉 ClientApp 应使用哪个数据分区的标识符。" -#~ msgid " is to add a general sdk change to the PR" -#~ msgstr " 是指在 PR 中添加一般的 sdk 更改" +#~ msgid ":py:obj:`run_superlink `\\ \\(\\)" +#~ msgstr ":py:obj:`run_superlink `\\ \\(\\)" -#~ msgid " is to add a general simulations change to the PR" -#~ msgstr "(模拟)是在 PR 中添加一般模拟变更" +#~ msgid "Run Flower SuperLink (Driver API and Fleet API)." +#~ msgstr "运行 Flower 服务器(Driver API 和 Fleet API)。" -#~ msgid "Note that only one token should be used." -#~ msgstr "请注意,只能使用一个标记。" +#~ msgid "run\\_driver\\_api" +#~ msgstr "flower-driver-api" + +#~ msgid "run\\_fleet\\_api" +#~ msgstr "run\\_fleet\\_api" #~ msgid "" -#~ "Its content must have a specific " -#~ "format. We will break down what " -#~ "each possibility does:" -#~ msgstr "其内容必须有特定的格式。我们将分析每种可能性的作用:" +#~ "The protocol involves four main stages:" +#~ " - 'setup': Send SecAgg+ configuration " +#~ "to clients and collect their public " +#~ "keys. - 'share keys': Broadcast public" +#~ " keys among clients and collect " +#~ "encrypted secret" +#~ msgstr "" +#~ "协议包括四个主要阶段: - 设置\": 向客户端发送 SecAgg+ " +#~ "配置并收集其公钥。- 共享密钥\": 在客户端之间广播公钥,并收集加密密钥。" + +#~ msgid "key shares." +#~ msgstr "关键股份。" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains nothing or doesn't exist, " -#~ "the following text will be added " -#~ "to the changelog::" -#~ msgstr "如果 ``#### Changelog entry`` 部分不包含任何内容或不存在,则会在更新日志中添加以下文本::" +#~ "The protocol involves four main stages:" +#~ " - 'setup': Send SecAgg configuration " +#~ "to clients and collect their public " +#~ "keys. - 'share keys': Broadcast public" +#~ " keys among clients and collect " +#~ "encrypted secret" +#~ msgstr "" +#~ "协议包括四个主要阶段: - 设置\": 向客户端发送 SecAgg " +#~ "配置并收集它们的公钥。- 共享密钥\": 在客户端之间广播公钥并收集加密密钥。" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains a description (and no " -#~ "token), the following text will be " -#~ "added to the changelog::" -#~ msgstr "如果 ``#### Changelog entry`` 部分包含描述(但没有标记),则会在更新日志中添加以下文本::" +#~ "'A dictionary, e.g {\"\": , " +#~ "\"\": } to configure a " +#~ "backend. Values supported in are" +#~ " those included by " +#~ "`flwr.common.typing.ConfigsRecordValues`." +#~ msgstr "" +#~ "字典,例如 {\"\": , \"\": " +#~ "} 来配置后端。 中支持的值是 " +#~ "`flwr.common.typing.ConfigsRecordValues`中包含的值。" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, nothing will change" -#~ " in the changelog." -#~ msgstr "如果 ``#### Changelog entry`` 部分包含 ````,更新日志中将不会有任何更改。" +#~ "The total number of clients in " +#~ "this simulation. This must be set " +#~ "if `clients_ids` is not set and " +#~ "vice-versa." +#~ msgstr "本次模拟的客户总数。如果未设置 `clients_ids`,则必须设置该参数,反之亦然。" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following text" -#~ " will be added to the changelog::" -#~ msgstr "如果 ``### Changelog entry`` 部分包含 ````,则会在更新日志中添加以下文本::" +#~ "In this tutorial we will learn how" +#~ " to train a Convolutional Neural " +#~ "Network on CIFAR10 using Flower and " +#~ "PyTorch." +#~ msgstr "在本教程中,我们将学习如何使用 Flower 和 PyTorch 在 CIFAR10 上训练卷积神经网络。" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following " -#~ "text will be added to the " -#~ "changelog::" -#~ msgstr "如果``### 更新日志条目``部分包含``<基准线>``,则会在更新日志中添加以下文本::" +#~ "*Clients* are responsible for generating " +#~ "individual weight-updates for the model" +#~ " based on their local datasets. These" +#~ " updates are then sent to the " +#~ "*server* which will aggregate them to" +#~ " produce a better model. Finally, the" +#~ " *server* sends this improved version " +#~ "of the model back to each " +#~ "*client*. A complete cycle of weight " +#~ "updates is called a *round*." +#~ msgstr "*客户端*负责在其本地数据集上更新模型参数。然后,这些参数会被发送到*服务器*,由*服务器*聚合后生成一个更好的模型。最后,*服务器*将改进后的模型发送回每个*客户端*。一个完整的模型参数更新周期称为一*轮*。" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following " -#~ "text will be added to the " -#~ "changelog::" -#~ msgstr "如果``### 更新日志条目``部分包含``<示例>``,则会在更新日志中添加以下文本::" +#~ "Now that we have a rough idea " +#~ "of what is going on, let's get " +#~ "started. We first need to install " +#~ "Flower. You can do this by running" +#~ " :" +#~ msgstr "现在,我们已经有了一个大致的概念了,那就让我们开始吧。首先,我们需要安装 Flower。可以通过运行 :" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following text " -#~ "will be added to the changelog::" -#~ msgstr "如果``### 更新日志条目``部分包含````,则会在更新日志中添加以下文本::" +#~ "Since we want to use PyTorch to" +#~ " solve a computer vision task, let's" +#~ " go ahead and install PyTorch and " +#~ "the **torchvision** library:" +#~ msgstr "既然我们想用 PyTorch 解决计算机视觉任务,那就继续安装 PyTorch 和 **torchvision** 库吧:" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following " -#~ "text will be added to the " -#~ "changelog::" -#~ msgstr "如果 ``### Changelog entry`` 部分包含 ````,则会在更新日志中添加以下文本::" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. Our training " +#~ "procedure and network architecture are " +#~ "based on PyTorch's `Deep Learning with" +#~ " PyTorch " +#~ "`_." +#~ msgstr "" +#~ "现在我们已经安装了所有的依赖项,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。我们的训练过程和网络架构基于 " +#~ "PyTorch 的《Deep Learning with PyTorch " +#~ "`_》。" #~ msgid "" -#~ "Note that only one token must be" -#~ " provided, otherwise, only the first " -#~ "action (in the order listed above), " -#~ "will be performed." -#~ msgstr "请注意,必须只提供一个标记,否则将只执行第一个操作(按上述顺序)。" +#~ "In a file called :code:`client.py`, " +#~ "import Flower and PyTorch related " +#~ "packages:" +#~ msgstr "在名为 :code:`client.py` 的文件中,导入 Flower 和 PyTorch 相关软件包:" -#~ msgid "Example: MXNet - Run MXNet Federated" -#~ msgstr "示例: MXNet - 运行联邦式 MXNet" +#~ msgid "In addition, we define the device allocation in PyTorch with:" +#~ msgstr "此外,我们还在 PyTorch 中定义了设备分配:" #~ msgid "" -#~ "This tutorial will show you how to" -#~ " use Flower to build a federated " -#~ "version of an existing MXNet workload." -#~ " We are using MXNet to train a" -#~ " Sequential model on the MNIST " -#~ "dataset. We will structure the example" -#~ " similar to our `PyTorch - From " -#~ "Centralized To Federated " -#~ "`_ walkthrough. " -#~ "MXNet and PyTorch are very similar " -#~ "and a very good comparison between " -#~ "MXNet and PyTorch is given `here " -#~ "`_. First, " -#~ "we build a centralized training approach" -#~ " based on the `Handwritten Digit " -#~ "Recognition " -#~ "`_" -#~ " tutorial. Then, we build upon the" -#~ " centralized training code to run the" -#~ " training in a federated fashion." +#~ "We use PyTorch to load CIFAR10, a" +#~ " popular colored image classification " +#~ "dataset for machine learning. The " +#~ "PyTorch :code:`DataLoader()` downloads the " +#~ "training and test data that are " +#~ "then normalized." #~ msgstr "" -#~ "本教程将向您展示如何使用 Flower 构建现有 MXNet 的联学习版本。我们将使用" -#~ " MXNet 在 MNIST 数据集上训练一个序列模型。另外,我们将采用与我们的 " -#~ "`PyTorch - 从集中式到联邦式 " -#~ "`_ 教程类似的示例结构。MXNet" -#~ " 和 PyTorch 非常相似,参考 `此处 " -#~ "`_对 MXNet " -#~ "和 PyTorch 进行了详细的比较。首先,我们根据 `手写数字识别 " -#~ "`" -#~ " 教程 建立了集中式训练方法。然后,我们在集中式训练代码的基础上,以联邦方式运行训练。" +#~ "我们使用 PyTorch 来加载 " +#~ "CIFAR10,这是一个用于机器学习的流行彩色图像分类数据集。PyTorch " +#~ ":code:`DataLoader()`下载训练数据和测试数据,然后进行归一化处理。" #~ msgid "" -#~ "Before we start setting up our " -#~ "MXNet example, we install the " -#~ ":code:`mxnet` and :code:`flwr` packages:" -#~ msgstr "在开始设置 MXNet 示例之前,我们先安装 :code:`mxnet` 和 :code:`flwr` 软件包:" +#~ "Define the loss and optimizer with " +#~ "PyTorch. The training of the dataset " +#~ "is done by looping over the " +#~ "dataset, measure the corresponding loss " +#~ "and optimize it." +#~ msgstr "使用 PyTorch 定义损失和优化器。数据集的训练是通过循环数据集、测量相应的损失值并对其进行优化来完成的。" -#~ msgid "MNIST Training with MXNet" -#~ msgstr "使用 MXNet 进行 MNIST 训练" +#~ msgid "" +#~ "Define then the validation of the " +#~ "machine learning network. We loop over" +#~ " the test set and measure the " +#~ "loss and accuracy of the test set." +#~ msgstr "然后定义机器学习网络的验证。我们在测试集上循环,计算测试集的损失值和准确率。" #~ msgid "" -#~ "We begin with a brief description " -#~ "of the centralized training code based" -#~ " on a :code:`Sequential` model. If " -#~ "you want a more in-depth " -#~ "explanation of what's going on then " -#~ "have a look at the official `MXNet" -#~ " tutorial " -#~ "`_." -#~ msgstr "" -#~ "首先,我们将简要介绍基于 :code:`Sequential` " -#~ "模型的集中式训练代码。如果您想获得更深入的解释,请参阅官方的 `MXNet教程 " -#~ "`_。" +#~ "After defining the training and testing" +#~ " of a PyTorch machine learning model," +#~ " we use the functions for the " +#~ "Flower clients." +#~ msgstr "在定义了 PyTorch 机器学习模型的训练和测试之后,我们将这些功能用于 Flower 客户端。" #~ msgid "" -#~ "Let's create a new file " -#~ "called:code:`mxnet_mnist.py` with all the " -#~ "components required for a traditional " -#~ "(centralized) MNIST training. First, the " -#~ "MXNet package :code:`mxnet` needs to be" -#~ " imported. You can see that we " -#~ "do not yet import the :code:`flwr` " -#~ "package for federated learning. This " -#~ "will be done later." -#~ msgstr "" -#~ "让我们创建一个名为:code:`mxnet_mnist.py`的新文件,其中包含传统(集中式)MNIST " -#~ "训练所需的所有组件。首先,需要导入 MXNet 包 " -#~ ":code:`mxnet`。您可以看到,我们尚未导入用于联合学习的 :code:`flwr` 包,这将在稍后完成。" +#~ "The Flower clients will use a " +#~ "simple CNN adapted from 'PyTorch: A " +#~ "60 Minute Blitz':" +#~ msgstr "Flower 客户端将使用一个简单的从“PyTorch: 60 分钟突击\"改编的CNN:" #~ msgid "" -#~ "The :code:`load_data()` function loads the " -#~ "MNIST training and test sets." -#~ msgstr ":code:`load_data()` 函数加载 MNIST 训练集和测试集。" +#~ "After loading the data set with " +#~ ":code:`load_data()` we define the Flower " +#~ "interface." +#~ msgstr "使用 :code:`load_data()` 加载数据集后,我们定义了 Flower 接口。" #~ msgid "" -#~ "As already mentioned, we will use " -#~ "the MNIST dataset for this machine " -#~ "learning workload. The model architecture " -#~ "(a very simple :code:`Sequential` model) " -#~ "is defined in :code:`model()`." +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses " +#~ "PyTorch. Implementing :code:`NumPyClient` usually" +#~ " means defining the following methods " +#~ "(:code:`set_parameters` is optional though):" #~ msgstr "" -#~ "如前所述,我们将使用 MNIST 数据集进行机器学习。模型架构(一个非常简单的 " -#~ ":code:`Sequential` 模型)在 :code:`model()` 中定义。" +#~ "Flower 提供了一个名为 :code:`NumPyClient` 的便捷类,当您的工作负载使用" +#~ " PyTorch 时,它使 :code:`Client` 接口的实现变得更容易。实现 " +#~ ":code:`NumPyClient` 通常意味着定义以下方法(:code:`set_parameters` " +#~ "是可选的):" -#~ msgid "" -#~ "We now need to define the training" -#~ " (function :code:`train()`) which loops " -#~ "over the training set and measures " -#~ "the loss for each batch of " -#~ "training examples." -#~ msgstr "现在,我们需要定义训练函数( :code:`train()`),该函数在训练集上循环训练,并计算每批训练示例的损失值。" +#~ msgid "which can be implemented in the following way:" +#~ msgstr "可以通过以下方式实现:" #~ msgid "" -#~ "The evaluation of the model is " -#~ "defined in function :code:`test()`. The " -#~ "function loops over all test samples " -#~ "and measures the loss and accuracy " -#~ "of the model based on the test " -#~ "dataset." -#~ msgstr "模型的评估在函数 :code:`test()` 中定义。该函数循环遍历所有测试样本,并根据测试数据集计算模型的损失值和准确度。" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this example can " +#~ "be found in :code:`examples/quickstart-" +#~ "pytorch`." +#~ msgstr "" +#~ "恭喜您!您已经成功构建并运行了第一个联邦学习系统。本示例的`完整源代码 " +#~ "`_ 可以在 :code:`examples/quickstart-" +#~ "pytorch` 中找到。" #~ msgid "" -#~ "Having defined the data loading, model" -#~ " architecture, training, and evaluation we" -#~ " can put everything together and " -#~ "train our model on MNIST. Note " -#~ "that the GPU/CPU device for the " -#~ "training and testing is defined within" -#~ " the :code:`ctx` (context)." +#~ "The :code:`self.bst` is used to keep " +#~ "the Booster objects that remain " +#~ "consistent across rounds, allowing them " +#~ "to store predictions from trees " +#~ "integrated in earlier rounds and " +#~ "maintain other essential data structures " +#~ "for training." #~ msgstr "" -#~ "在定义了数据加载、模型架构、训练和评估之后,我们就可以把所有放在一起,在 MNIST " -#~ "上训练我们的模型了。请注意,用于训练和测试的 GPU/CPU 设备是在 :code:`ctx`中定义的。" +#~ "代码:`self.bst`用于保存在各轮中保持一致的 Booster " +#~ "对象,使其能够存储在前几轮中集成的树的预测结果,并维护其他用于训练的重要数据结构。" -#~ msgid "You can now run your (centralized) MXNet machine learning workload:" -#~ msgstr "现在,您可以运行(集中式)MXNet 机器学习工作:" +#~ msgid "Implementing a Flower client" +#~ msgstr "实现 Flower 客户端" #~ msgid "" -#~ "So far this should all look fairly" -#~ " familiar if you've used MXNet (or" -#~ " even PyTorch) before. Let's take the" -#~ " next step and use what we've " -#~ "built to create a simple federated " -#~ "learning system consisting of one server" -#~ " and two clients." +#~ "To implement the Flower client, we " +#~ "create a subclass of " +#~ "``flwr.client.NumPyClient`` and implement the " +#~ "three methods ``get_parameters``, ``fit``, and" +#~ " ``evaluate``:" #~ msgstr "" -#~ "到目前为止,如果你以前使用过 MXNet(甚至 " -#~ "PyTorch),这一切看起来应该相当熟悉。下一步,让我们利用已构建的内容创建一个简单联邦学习系统(由一个服务器和两个客户端组成)。" +#~ "为实现 Flower 客户端,我们创建了 ``flwr.client.NumPyClient`` " +#~ "的子类,并实现了 ``get_parameters``、``fit`` 和``evaluate`` " +#~ "三个方法:" -#~ msgid "MXNet meets Flower" -#~ msgstr "MXNet 结合 Flower" +#~ msgid "" +#~ "The function ``start_simulation`` accepts a" +#~ " number of arguments, amongst them " +#~ "the ``client_fn`` used to create " +#~ "``FlowerClient`` instances, the number of " +#~ "clients to simulate (``num_clients``), the " +#~ "number of federated learning rounds " +#~ "(``num_rounds``), and the strategy. The " +#~ "strategy encapsulates the federated learning" +#~ " approach/algorithm, for example, *Federated " +#~ "Averaging* (FedAvg)." +#~ msgstr "" +#~ "函数 ``start_simulation`` 接受许多参数,其中包括用于创建 " +#~ "``FlowerClient`` 实例的 " +#~ "``client_fn``、要模拟的客户端数量(``num_clients``)、联邦学习轮数(``num_rounds``)和策略。策略封装了联邦学习方法/算法,例如*联邦平均*" +#~ " (FedAvg)。" #~ msgid "" -#~ "So far, it was not easily possible" -#~ " to use MXNet workloads for federated" -#~ " learning because federated learning is " -#~ "not supported in MXNet. Since Flower " -#~ "is fully agnostic towards the underlying" -#~ " machine learning framework, it can " -#~ "be used to federated arbitrary machine" -#~ " learning workloads. This section will " -#~ "show you how Flower can be used" -#~ " to federate our centralized MXNet " -#~ "workload." +#~ "The only thing left to do is " +#~ "to tell the strategy to call this" +#~ " function whenever it receives evaluation" +#~ " metric dictionaries from the clients:" +#~ msgstr "剩下要做的就是告诉策略,每当它从客户端接收到评估度量字典时,都要调用这个函数:" + +#~ msgid "|93b02017c78049bbbd5ae456dcb2c91b|" +#~ msgstr "" + +#~ msgid "|01471150fd5144c080a176b43e92a3ff|" +#~ msgstr "" + +#~ msgid "|9bc21c7dbd17444a8f070c60786e3484|" +#~ msgstr "" + +#~ msgid "|3047bbce54b34099ae559963d0420d79|" +#~ msgstr "" + +#~ msgid "|e9f8ce948593444fb838d2f354c7ec5d|" +#~ msgstr "" + +#~ msgid "|c24c1478b30e4f74839208628a842d1e|" +#~ msgstr "" + +#~ msgid "|1b3613d7a58847b59e1d3180802dbc09|" +#~ msgstr "" + +#~ msgid "|9980b5213db547d0b8024a50992b9e3f|" +#~ msgstr "" + +#~ msgid "|c7afb4c92d154bfaa5e8cb9a150e17f1|" +#~ msgstr "" + +#~ msgid "|032eb6fed6924ac387b9f13854919196|" +#~ msgstr "" + +#~ msgid "|fbf225add7fd4df5a9bf25a95597d954|" +#~ msgstr "" + +#~ msgid "|7efbe3d29d8349b89594e8947e910525|" +#~ msgstr "" + +#~ msgid "|329fb3c04c744eda83bb51fa444c2266|" +#~ msgstr "" + +#~ msgid "|c00bf2750bc24d229737a0fe1395f0fc|" +#~ msgstr "" + +#~ msgid "run\\_client\\_app" +#~ msgstr "run\\_client\\_app" + +#~ msgid "run\\_supernode" +#~ msgstr "flower-superlink" + +#~ msgid "Retrieve the corresponding layout by the string key." #~ msgstr "" -#~ "由于 MXNet 目前不支持联邦学习,因此无法轻松地直接将 MXNet " -#~ "用于联邦学习之中。Flower 与底层机器学习框架完全无关,因此它可用于任意联邦式机器学习工作。本节将向你展示如何使用 " -#~ "Flower 将我们的集中式 MXNet 改为联邦式训练。" #~ msgid "" -#~ "The concept to federate an existing " -#~ "workload is always the same and " -#~ "easy to understand. We have to " -#~ "start a *server* and then use the" -#~ " code in :code:`mxnet_mnist.py` for the " -#~ "*clients* that are connected to the " -#~ "*server*. The *server* sends model " -#~ "parameters to the clients. The *clients*" -#~ " run the training and update the " -#~ "parameters. The updated parameters are " -#~ "sent back to the *server* which " -#~ "averages all received parameter updates. " -#~ "This describes one round of the " -#~ "federated learning process and we repeat" -#~ " this for multiple rounds." +#~ "When there isn't an exact match, " +#~ "all the existing keys in the " +#~ "layout map will be treated as a" +#~ " regex and map against the input " +#~ "key again. The first match will be" +#~ " returned, based on the key insertion" +#~ " order. Return None if there isn't" +#~ " any match found." #~ msgstr "" -#~ "将现有模型框架联邦化的概念始终是相同的,也很容易理解。我们必须启动一个*服务器*,然后对连接到*服务器*的*客户端*使用 " -#~ ":code:`mxnet_mnist.py`中的代码。*服务器*向客户端发送模型参数,然后*客户端*运行训练并更新参数。更新后的参数被发回*服务器*,然后会对所有收到的参数更新进行平均聚合。以上描述的是一轮联邦学习过程,我们将重复进行多轮学习。" -#~ msgid "" -#~ "Finally, we will define our *client* " -#~ "logic in :code:`client.py` and build " -#~ "upon the previously defined MXNet " -#~ "training in :code:`mxnet_mnist.py`. Our " -#~ "*client* needs to import :code:`flwr`, " -#~ "but also :code:`mxnet` to update the " -#~ "parameters on our MXNet model:" +#~ msgid "the string key as the query for the layout." #~ msgstr "" -#~ "最后,我们将在 :code:`client.py` 中定义我们的 *client* " -#~ "逻辑,并以之前在 :code:`mxnet_mnist.py` 中定义的 MXNet " -#~ "训练为基础。我们的 *client* 不仅需要导入 :code:`flwr`,还需要导入 " -#~ ":code:`mxnet`,以更新 MXNet 模型的参数:" -#~ msgid "" -#~ "Implementing a Flower *client* basically " -#~ "means implementing a subclass of either" -#~ " :code:`flwr.client.Client` or " -#~ ":code:`flwr.client.NumPyClient`. Our implementation " -#~ "will be based on " -#~ ":code:`flwr.client.NumPyClient` and we'll call " -#~ "it :code:`MNISTClient`. :code:`NumPyClient` is " -#~ "slightly easier to implement than " -#~ ":code:`Client` if you use a framework" -#~ " with good NumPy interoperability (like " -#~ "PyTorch or MXNet) because it avoids " -#~ "some of the boilerplate that would " -#~ "otherwise be necessary. :code:`MNISTClient` " -#~ "needs to implement four methods, two " -#~ "methods for getting/setting model parameters," -#~ " one method for training the model," -#~ " and one method for testing the " -#~ "model:" +#~ msgid "Corresponding layout based on the query." #~ msgstr "" -#~ "实现 Flower *client*基本上意味着要实现 " -#~ ":code:`flwr.client.Client` 或 " -#~ ":code:`flwr.client.NumPyClient` 的子类。我们的代码实现将基于 " -#~ ":code:`flwr.client.NumPyClient`,并将其命名为 " -#~ ":code:`MNISTClient`。如果使用具有良好 NumPy 互操作性的框架(如 PyTorch" -#~ " 或 MXNet),:code:`NumPyClient` 比 " -#~ ":code:`Client`更容易实现,因为它避免了一些不必要的操作。:code:`MNISTClient` " -#~ "需要实现四个方法,两个用于获取/设置模型参数,一个用于训练模型,一个用于测试模型:" -#~ msgid "transform MXNet :code:`NDArray`'s to NumPy :code:`ndarray`'s" -#~ msgstr "将 MXNet :code:`NDArray` 转换为 NumPy :code:`ndarray`" +#~ msgid "run\\_server\\_app" +#~ msgstr "run\\_server\\_app" + +#~ msgid "run\\_superlink" +#~ msgstr "flower-superlink" + +#~ msgid "Start a Ray-based Flower simulation server." +#~ msgstr "启动基于 Ray 的Flower模拟服务器。" #~ msgid "" -#~ "The challenging part is to transform " -#~ "the MXNet parameters from :code:`NDArray` " -#~ "to :code:`NumPy Arrays` to make it " -#~ "readable for Flower." +#~ "A function creating `Client` instances. " +#~ "The function must have the signature " +#~ "`client_fn(context: Context). It should return" +#~ " a single client instance of type " +#~ "`Client`. Note that the created client" +#~ " instances are ephemeral and will " +#~ "often be destroyed after a single " +#~ "method invocation. Since client instances " +#~ "are not long-lived, they should " +#~ "not attempt to carry state over " +#~ "method invocations. Any state required " +#~ "by the instance (model, dataset, " +#~ "hyperparameters, ...) should be (re-)created" +#~ " in either the call to `client_fn`" +#~ " or the call to any of the " +#~ "client methods (e.g., load evaluation " +#~ "data in the `evaluate` method itself)." #~ msgstr "" -#~ "具有挑战性的部分是将 MXNet 参数从 :code:`NDArray` 转换为 " -#~ ":code:`NumPy Arrays` 以便 Flower 可以读取。" +#~ "创建客户端实例的函数。该函数必须接受一个名为 `cid` 的 `str` 参数。它应返回一个" +#~ " Client " +#~ "类型的客户端实例。请注意,创建的客户端实例是短暂的,通常在调用一个方法后就会被销毁。由于客户机实例不是长期存在的,它们不应试图在方法调用时携带状态数据。实例所需的任何状态数据(模型、数据集、超参数......)都应在调用" +#~ " `client_fn` 或任何客户端方法(例如,在 `evaluate` " +#~ "方法中加载评估数据)时(重新)创建。" + +#~ msgid "The total number of clients in this simulation." +#~ msgstr "需要等待的客户数量。" #~ msgid "" -#~ "The two :code:`NumPyClient` methods " -#~ ":code:`fit` and :code:`evaluate` make use " -#~ "of the functions :code:`train()` and " -#~ ":code:`test()` previously defined in " -#~ ":code:`mxnet_mnist.py`. So what we really " -#~ "do here is we tell Flower through" -#~ " our :code:`NumPyClient` subclass which of" -#~ " our already defined functions to " -#~ "call for training and evaluation. We " -#~ "included type annotations to give you" -#~ " a better understanding of the data" -#~ " types that get passed around." +#~ "UNSUPPORTED, WILL BE REMOVED. USE " +#~ "`num_clients` INSTEAD. List `client_id`s for" +#~ " each client. This is only required" +#~ " if `num_clients` is not set. Setting" +#~ " both `num_clients` and `clients_ids` with" +#~ " `len(clients_ids)` not equal to " +#~ "`num_clients` generates an error. Using " +#~ "this argument will raise an error." #~ msgstr "" -#~ "这两个 :code:`NumPyClient` 方法 :code:`fit` 和 " -#~ ":code:`evaluate` 使用了之前在 :code:`mxnet_mnist.py` " -#~ "中定义的函数 :code:`train()` 和 :code:`test()`。因此,我们要做的就是通过" -#~ " :code:`NumPyClient` 子类告知 Flower " -#~ "在训练和评估时要调用哪些已定义的函数。我们加入了类型注解,以便让您更好地理解传递的数据类型。" +#~ "列出每个客户的 `client_id`。只有在未设置 `num_clients` " +#~ "时才需要这样做。同时设置`num_clients`和`clients_ids`,且`len(clients_ids)`不等于`num_clients`,会产生错误。" #~ msgid "" -#~ "Having defined data loading, model " -#~ "architecture, training, and evaluation we " -#~ "can put everything together and train" -#~ " our :code:`Sequential` model on MNIST." +#~ "CPU and GPU resources for a single" +#~ " client. Supported keys are `num_cpus` " +#~ "and `num_gpus`. To understand the GPU" +#~ " utilization caused by `num_gpus`, as " +#~ "well as using custom resources, please" +#~ " consult the Ray documentation." #~ msgstr "" -#~ "在定义了数据加载、模型架构、训练和评估之后,我们就可以将所有内容整合在一起,在 MNIST 上训练我们的 " -#~ ":code:`Sequential` 模型。" +#~ "\"num_gpus\": 0.0` 单个客户端的 CPU 和 GPU " +#~ "资源。支持的键值为 `num_cpus` 和 `num_gpus`。要了解 " +#~ "`num_gpus` 所导致的 GPU 利用率,以及使用自定义资源的情况,请查阅 Ray" +#~ " 文档。" #~ msgid "" -#~ "in each window (make sure that the" -#~ " server is still running before you" -#~ " do so) and see your MXNet " -#~ "project run federated learning across " -#~ "two clients. Congratulations!" -#~ msgstr "确保服务器仍在运行后,然后就能在每个窗口中看到 MXNet 项目在两个客户端上运行联邦学习了。祝贺!" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.Server`. If no instance" +#~ " is provided, then `start_server` will " +#~ "create one." +#~ msgstr "抽象基类 `flwr.server.Server`的实现。如果没有提供实例,`start_server` 将创建一个。" #~ msgid "" -#~ "The full source code for this " -#~ "example: `MXNet: From Centralized To " -#~ "Federated (Code) " -#~ "`_. Our " -#~ "example is of course somewhat over-" -#~ "simplified because both clients load the" -#~ " exact same dataset, which isn't " -#~ "realistic. You're now prepared to " -#~ "explore this topic further. How about" -#~ " using a CNN or using a " -#~ "different dataset? How about adding more" -#~ " clients?" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.Strategy`. If no " +#~ "strategy is provided, then `start_server` " +#~ "will use `flwr.server.strategy.FedAvg`." #~ msgstr "" -#~ "此示例的完整源代码在:\"MXNet: From Centralized To " -#~ "Federated (Code) " -#~ "`_。当然,我们的示例有些过于简单,因为两个客户端都加载了完全相同的数据集,这并不真实。现在您已经准备好进一步探讨了。使用" -#~ " CNN 或使用不同的数据集会如何?添加更多客户端会如何?" - -#~ msgid "with the following command sequence:" -#~ msgstr "使用以下命令序列:" +#~ "抽象基类 `flwr.server.strategy` 的实现。如果没有提供策略,`start_server`" +#~ " 将使用 `flwr.server.strategy.FedAvg`。" #~ msgid "" -#~ "In case you are a researcher you" -#~ " might be just fine using the " -#~ "self-signed certificates generated using " -#~ "the scripts which are part of this" -#~ " guide." -#~ msgstr "如果你是一名研究人员,使用本指南中的脚本生成的自签名证书就可以了。" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.ClientManager`. If no " +#~ "implementation is provided, then " +#~ "`start_simulation` will use " +#~ "`flwr.server.client_manager.SimpleClientManager`." +#~ msgstr "" +#~ "抽象基类 `flwr.server.ClientManager` " +#~ "的实现。如果没有提供实现,`start_simulation` 将使用 " +#~ "`flwr.server.client_manager.SimpleClientManager`。" #~ msgid "" -#~ "We are now going to show how " -#~ "to write a sever which uses the" -#~ " previously generated scripts." -#~ msgstr "现在,我们将展示如何编写一个使用先前生成的脚本的服务器。" +#~ "Optional dictionary containing arguments for" +#~ " the call to `ray.init`. If " +#~ "ray_init_args is None (the default), Ray" +#~ " will be initialized with the " +#~ "following default args: { " +#~ "\"ignore_reinit_error\": True, \"include_dashboard\": " +#~ "False } An empty dictionary can " +#~ "be used (ray_init_args={}) to prevent " +#~ "any arguments from being passed to " +#~ "ray.init." +#~ msgstr "" +#~ "可选字典,包含调用 `ray.init` 时的参数。如果 ray_init_args 为" +#~ " None(默认值),则将使用以下默认参数初始化 Ray: { " +#~ "\"ignore_reinit_error\": True, \"include_dashboard\": " +#~ "False } 可以使用空字典(ray_init_args={})来防止向 ray.init " +#~ "传递任何参数。" #~ msgid "" -#~ "When providing certificates, the server " -#~ "expects a tuple of three certificates." -#~ " :code:`Path` can be used to easily" -#~ " read the contents of those files " -#~ "into byte strings, which is the " -#~ "data type :code:`start_server` expects." +#~ "Optional dictionary containing arguments for" +#~ " the call to `ray.init`. If " +#~ "ray_init_args is None (the default), Ray" +#~ " will be initialized with the " +#~ "following default args:" #~ msgstr "" -#~ "在提供证书时,服务器希望得到由三个证书组成的元组。 :code:`Path` " -#~ "可用于轻松地将这些文件的内容读取为字节字符串,这就是 :code:`start_server` 期望的数据类型。" +#~ "可选字典,包含调用 `ray.init` 时的参数。如果 ray_init_args 为" +#~ " None(默认值),则将使用以下默认参数初始化 Ray:" -#~ msgid "Flower server" -#~ msgstr "Flower 服务器" +#~ msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" +#~ msgstr "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" -#~ msgid "flower-driver-api" -#~ msgstr "flower-driver-api" +#~ msgid "" +#~ "An empty dictionary can be used " +#~ "(ray_init_args={}) to prevent any arguments" +#~ " from being passed to ray.init." +#~ msgstr "可以使用空字典 (ray_init_args={}) 来防止向 ray.init 传递任何参数。" -#~ msgid "flower-fleet-api" -#~ msgstr "flower-fleet-api" +#~ msgid "" +#~ "Set to True to prevent `ray.shutdown()`" +#~ " in case `ray.is_initialized()=True`." +#~ msgstr "设为 True 可在 `ray.is_initialized()=True` 情况下阻止 `ray.shutdown()` 。" #~ msgid "" -#~ ":py:obj:`start_driver `\\ " -#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" -#~ msgstr "" -#~ ":py:obj:`start_driver `\\ " -#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" +#~ "Optionally specify the type of actor " +#~ "to use. The actor object, which " +#~ "persists throughout the simulation, will " +#~ "be the process in charge of " +#~ "executing a ClientApp wrapping input " +#~ "argument `client_fn`." +#~ msgstr "可选择指定要使用的actor类型。actor对象将在整个模拟过程中持续存在,它将是负责运行客户端作业(即其 `fit()`方法)的进程。" -#~ msgid "Start a Flower Driver API server." -#~ msgstr "启动基于 Ray 的Flower模拟服务器。" +#~ msgid "" +#~ "If you want to create your own " +#~ "Actor classes, you might need to " +#~ "pass some input argument. You can " +#~ "use this dictionary for such purpose." +#~ msgstr "如果您想创建自己的 Actor 类,可能需要传递一些输入参数。为此,您可以使用本字典。" #~ msgid "" -#~ ":py:obj:`Driver `\\ " -#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ "(default: \"DEFAULT\") Optional string " +#~ "(\"DEFAULT\" or \"SPREAD\") for the VCE" +#~ " to choose in which node the " +#~ "actor is placed. If you are an " +#~ "advanced user needed more control you" +#~ " can use lower-level scheduling " +#~ "strategies to pin actors to specific " +#~ "compute nodes (e.g. via " +#~ "NodeAffinitySchedulingStrategy). Please note this" +#~ " is an advanced feature. For all " +#~ "details, please refer to the Ray " +#~ "documentation: https://docs.ray.io/en/latest/ray-" +#~ "core/scheduling/index.html" #~ msgstr "" -#~ "Flower 1.0: ``start_server(..., " -#~ "config=flwr.server.ServerConfig(num_rounds=3, " -#~ "round_timeout=600.0), ...)``" +#~ "(默认:\"DEFAULT\")可选字符串(\"DEFAULT \"或 \"SPREAD\"),供 " +#~ "VCE " +#~ "选择将行为体放置在哪个节点上。如果你是需要更多控制权的高级用户,可以使用低级调度策略将actor固定到特定计算节点(例如,通过 " +#~ "NodeAffinitySchedulingStrategy)。请注意,这是一项高级功能。有关详细信息,请参阅 Ray " +#~ "文档:https://docs.ray.io/en/latest/ray-core/scheduling/index.html" -#~ msgid "`Driver` class provides an interface to the Driver API." -#~ msgstr "`Driver` 类为驱动程序 API 提供了一个接口。" +#~ msgid "**hist** -- Object containing metrics from training." +#~ msgstr "**hist** -- 包含训练指标的对象。" #~ msgid "" -#~ "The IPv4 or IPv6 address of the" -#~ " Driver API server. Defaults to " -#~ "`\"[::]:9091\"`." -#~ msgstr "服务器的 IPv4 或 IPv6 地址。默认为 `\"[::]:8080\"。" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with FastAI to train a vision " +#~ "model on CIFAR-10." +#~ msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 FastAI 在 CIFAR-10 上训练视觉模型。" -#~ msgid ":py:obj:`close `\\ \\(\\)" -#~ msgstr "server.strategy.Strategy" +#~ msgid "Let's build a federated learning system using fastai and Flower!" +#~ msgstr "让我们用 fastai 和 Flower 建立一个联邦学习系统!" -#~ msgid "Disconnect from the SuperLink if connected." -#~ msgstr "如果已连接,请断开与超级链接的连接。" +#~ msgid "" +#~ "Please refer to the `full code " +#~ "example `_ to learn more." +#~ msgstr "" +#~ "请参阅 `完整代码示例 " +#~ "`_了解更多信息。" -#~ msgid "start\\_driver" -#~ msgstr "启动客户端" +#~ msgid "" +#~ "Let's build a federated learning system" +#~ " using Hugging Face Transformers and " +#~ "Flower!" +#~ msgstr "让我们用Hugging Face Transformers和Flower来构建一个联邦学习系统!" + +#~ msgid "Dependencies" +#~ msgstr "依赖关系" #~ msgid "" -#~ "The IPv4 or IPv6 address of the" -#~ " Driver API server. Defaults to " -#~ "`\"[::]:8080\"`." -#~ msgstr "服务器的 IPv4 或 IPv6 地址。默认为 `\"[::]:8080\"。" +#~ "To follow along this tutorial you " +#~ "will need to install the following " +#~ "packages: :code:`datasets`, :code:`evaluate`, " +#~ ":code:`flwr`, :code:`torch`, and " +#~ ":code:`transformers`. This can be done " +#~ "using :code:`pip`:" +#~ msgstr "" +#~ "要学习本教程,您需要安装以下软件包: :code:`datasets`、 :code:`evaluate`、 " +#~ ":code:`flwr`、 :code:`torch`和 :code:`transformers`。这可以通过" +#~ " :code:`pip` 来完成:" + +#~ msgid "Standard Hugging Face workflow" +#~ msgstr "标准Hugging Face工作流程" + +#~ msgid "Handling the data" +#~ msgstr "处理数据" #~ msgid "" -#~ "A server implementation, either " -#~ "`flwr.server.Server` or a subclass thereof." -#~ " If no instance is provided, then " -#~ "`start_driver` will create one." -#~ msgstr "服务器实现,可以是 `flwr.server.Server` 或其子类。如果没有提供实例,`start_server` 将创建一个。" +#~ "To fetch the IMDB dataset, we will" +#~ " use Hugging Face's :code:`datasets` " +#~ "library. We then need to tokenize " +#~ "the data and create :code:`PyTorch` " +#~ "dataloaders, this is all done in " +#~ "the :code:`load_data` function:" +#~ msgstr "" +#~ "为了获取 IMDB 数据集,我们将使用 Hugging Face 的 " +#~ ":code:`datasets` 库。然后,我们需要对数据进行标记化,并创建 :code:`PyTorch` " +#~ "数据加载器,这些都将在 :code:`load_data` 函数中完成:" + +#~ msgid "Training and testing the model" +#~ msgstr "训练和测试模型" #~ msgid "" -#~ "An implementation of the class " -#~ "`flwr.server.ClientManager`. If no implementation" -#~ " is provided, then `start_driver` will " -#~ "use `flwr.server.SimpleClientManager`." +#~ "Once we have a way of creating " +#~ "our trainloader and testloader, we can" +#~ " take care of the training and " +#~ "testing. This is very similar to " +#~ "any :code:`PyTorch` training or testing " +#~ "loop:" #~ msgstr "" -#~ "抽象基类 `flwr.server.ClientManager` " -#~ "的实现。如果没有提供实现,`start_server` 将使用 " -#~ "`flwr.server.client_manager.SimpleClientManager`。" +#~ "有了创建 trainloader 和 testloader " +#~ "的方法后,我们就可以进行训练和测试了。这与任何 :code:`PyTorch` 训练或测试循环都非常相似:" -#~ msgid "The Driver object to use." -#~ msgstr "要使用的驱动程序对象。" +#~ msgid "Creating the model itself" +#~ msgstr "创建模型本身" -#~ msgid "Starting a driver that connects to an insecure server:" -#~ msgstr "启动不安全的服务器:" +#~ msgid "" +#~ "To create the model itself, we " +#~ "will just load the pre-trained " +#~ "distillBERT model using Hugging Face’s " +#~ ":code:`AutoModelForSequenceClassification` :" +#~ msgstr "" +#~ "要创建模型本身,我们只需使用 Hugging Face 的 " +#~ ":code:`AutoModelForSequenceClassification` 加载预训练的 " +#~ "distillBERT 模型:" -#~ msgid "Starting a driver that connects to an SSL-enabled server:" -#~ msgstr "启动支持 SSL 的服务器:" +#~ msgid "Creating the IMDBClient" +#~ msgstr "创建 IMDBClient" #~ msgid "" -#~ ":py:obj:`run_simulation_from_cli " -#~ "`\\ \\(\\)" +#~ "To federate our example to multiple " +#~ "clients, we first need to write " +#~ "our Flower client class (inheriting from" +#~ " :code:`flwr.client.NumPyClient`). This is very" +#~ " easy, as our model is a " +#~ "standard :code:`PyTorch` model:" #~ msgstr "" +#~ "要将我们的示例联邦到多个客户端,我们首先需要编写 Flower 客户端类(继承自 " +#~ ":code:`flwr.client.NumPyClient`)。这很容易,因为我们的模型是一个标准的 " +#~ ":code:`PyTorch` 模型:" -#~ msgid "Run Simulation Engine from the CLI." +#~ msgid "" +#~ "The :code:`get_parameters` function lets the" +#~ " server get the client's parameters. " +#~ "Inversely, the :code:`set_parameters` function " +#~ "allows the server to send its " +#~ "parameters to the client. Finally, the" +#~ " :code:`fit` function trains the model " +#~ "locally for the client, and the " +#~ ":code:`evaluate` function tests the model " +#~ "locally and returns the relevant " +#~ "metrics." #~ msgstr "" +#~ ":code:`get_parameters` " +#~ "函数允许服务器获取客户端的参数。相反,:code:`set_parameters`函数允许服务器将其参数发送给客户端。最后,:code:`fit`函数在本地为客户端训练模型,:code:`evaluate`函数在本地测试模型并返回相关指标。" -#~ msgid "run\\_simulation\\_from\\_cli" -#~ msgstr "运行模拟" +#~ msgid "Starting the server" +#~ msgstr "启动服务器" #~ msgid "" -#~ "Check out this Federated Learning " -#~ "quickstart tutorial for using Flower " -#~ "with MXNet to train a Sequential " -#~ "model on MNIST." -#~ msgstr "查看此联邦学习 快速入门教程,了解如何使用 Flower 和 MXNet 在 MNIST 上训练序列模型。" - -#~ msgid "Quickstart MXNet" -#~ msgstr "快速入门 MXNet" +#~ "Now that we have a way to " +#~ "instantiate clients, we need to create" +#~ " our server in order to aggregate " +#~ "the results. Using Flower, this can " +#~ "be done very easily by first " +#~ "choosing a strategy (here, we are " +#~ "using :code:`FedAvg`, which will define " +#~ "the global weights as the average " +#~ "of all the clients' weights at " +#~ "each round) and then using the " +#~ ":code:`flwr.server.start_server` function:" +#~ msgstr "" +#~ "现在我们有了实例化客户端的方法,我们需要创建服务器,以便汇总结果。使用 Flower,首先选择一个策略(这里我们使用 " +#~ ":code:`FedAvg`,它将把全局模型参数定义为每轮所有客户端模型参数的平均值),然后使用 " +#~ ":code:`flwr.server.start_server`函数,就可以非常轻松地完成这项工作:" #~ msgid "" -#~ "MXNet is no longer maintained and " -#~ "has been moved into `Attic " -#~ "`_. As a " -#~ "result, we would encourage you to " -#~ "use other ML frameworks alongside " -#~ "Flower, for example, PyTorch. This " -#~ "tutorial might be removed in future " -#~ "versions of Flower." +#~ "The :code:`weighted_average` function is there" +#~ " to provide a way to aggregate " +#~ "the metrics distributed amongst the " +#~ "clients (basically this allows us to " +#~ "display a nice average accuracy and " +#~ "loss for every round)." #~ msgstr "" +#~ "使用 :code:`weighted_average` " +#~ "函数是为了提供一种方法来汇总分布在客户端的指标(基本上,这可以让我们显示每一轮的平均精度和损失值)。" + +#~ msgid "Putting everything together" +#~ msgstr "把所有东西放在一起" + +#~ msgid "We can now start client instances using:" +#~ msgstr "现在我们可以使用:" #~ msgid "" -#~ "In this tutorial, we will learn " -#~ "how to train a :code:`Sequential` model" -#~ " on MNIST using Flower and MXNet." -#~ msgstr "在本教程中,我们将学习如何使用 Flower 和 MXNet 在 MNIST 上训练 :code:`Sequential` 模型。" +#~ "And they will be able to connect" +#~ " to the server and start the " +#~ "federated training." +#~ msgstr "他们就能连接到服务器,开始联邦训练。" -#~ msgid "Since we want to use MXNet, let's go ahead and install it:" -#~ msgstr "既然我们要使用 MXNet,那就继续安装吧:" +#~ msgid "" +#~ "If you want to check out " +#~ "everything put together, you should " +#~ "check out the `full code example " +#~ "`_ ." +#~ msgstr "" +#~ "如果您想查看所有内容,请查看完整的代码示例: " +#~ "[https://github.com/adap/flower/tree/main/examples/quickstart-" +#~ "huggingface](https://github.com/adap/flower/tree/main/examples" +#~ "/quickstart-huggingface)." #~ msgid "" -#~ "Now that we have all our " -#~ "dependencies installed, let's run a " -#~ "simple distributed training with two " -#~ "clients and one server. Our training " -#~ "procedure and network architecture are " -#~ "based on MXNet´s `Hand-written Digit " -#~ "Recognition tutorial " -#~ "`_." +#~ "Of course, this is a very basic" +#~ " example, and a lot can be " +#~ "added or modified, it was just to" +#~ " showcase how simply we could " +#~ "federate a Hugging Face workflow using" +#~ " Flower." #~ msgstr "" -#~ "现在,我们已经安装了所有依赖项,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。我们的训练程序和网络架构基于 " -#~ "MXNet 的 `手写数字识别教程 " -#~ "`_\"。" +#~ "当然,这只是一个非常基本的示例,还可以添加或修改很多内容,只是为了展示我们可以如何简单地使用 Flower " +#~ "联合Hugging Face的工作流程。" + +#~ msgid "" +#~ "Note that in this example we used" +#~ " :code:`PyTorch`, but we could have " +#~ "very well used :code:`TensorFlow`." +#~ msgstr "请注意,在本例中我们使用了 :code:`PyTorch`,但也完全可以使用 :code:`TensorFlow`。" #~ msgid "" -#~ "In a file called :code:`client.py`, " -#~ "import Flower and MXNet related " -#~ "packages:" -#~ msgstr "在名为 :code:`client.py` 的文件中,导入 Flower 和 MXNet 相关软件包:" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with PyTorch Lightning to train an " +#~ "Auto Encoder model on MNIST." +#~ msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 PyTorch Lightning 在 MNIST 上训练自动编码器模型。" -#~ msgid "In addition, define the device allocation in MXNet with:" -#~ msgstr "此外,还可以在 MXNet 中定义设备分配:" +#~ msgid "" +#~ "Let's build a horizontal federated " +#~ "learning system using PyTorch Lightning " +#~ "and Flower!" +#~ msgstr "让我们使用 PyTorch Lightning 和 Flower 构建一个水平联邦学习系统!" #~ msgid "" -#~ "We use MXNet to load MNIST, a " -#~ "popular image classification dataset of " -#~ "handwritten digits for machine learning. " -#~ "The MXNet utility :code:`mx.test_utils.get_mnist()`" -#~ " downloads the training and test " -#~ "data." +#~ "Please refer to the `full code " +#~ "example `_ to learn " +#~ "more." #~ msgstr "" -#~ "我们使用 MXNet 加载 MNIST,这是一个用于机器学习的流行手写数字图像分类数据集。MXNet" -#~ " 工具 :code:`mx.test_utils.get_mnist()` 会下载训练和测试数据。" +#~ "请参阅 `完整代码示例 " +#~ "`_ 了解更多信息。" -#~ msgid "" -#~ "Define the training and loss with " -#~ "MXNet. We train the model by " -#~ "looping over the dataset, measure the" -#~ " corresponding loss, and optimize it." -#~ msgstr "用 MXNet 定义训练和损失值。我们在数据集上循环训练模型,测量相应的损失值,并对其进行优化。" +#~ msgid "Let's build a federated learning system in less than 20 lines of code!" +#~ msgstr "让我们用不到 20 行代码构建一个联邦学习系统!" + +#~ msgid "Before Flower can be imported we have to install it:" +#~ msgstr "在导入 Flower 之前,我们必须先安装它:" #~ msgid "" -#~ "Next, we define the validation of " -#~ "our machine learning model. We loop " -#~ "over the test set and measure both" -#~ " loss and accuracy on the test " -#~ "set." -#~ msgstr "接下来,我们定义机器学习模型的验证。我们在测试集上循环,测量测试集上的损失值和准确率。" +#~ "Since we want to use the Keras " +#~ "API of TensorFlow (TF), we have to" +#~ " install TF as well:" +#~ msgstr "由于我们要使用 TensorFlow (TF) 的 Keras API,因此还必须安装 TF:" + +#~ msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" +#~ msgstr "接下来,在名为 :code:`client.py` 的文件中导入 Flower 和 TensorFlow:" #~ msgid "" -#~ "After defining the training and testing" -#~ " of a MXNet machine learning model," -#~ " we use these functions to implement" -#~ " a Flower client." -#~ msgstr "在定义了 MXNet 机器学习模型的训练和测试后,我们使用这些函数实现了 Flower 客户端。" +#~ "We use the Keras utilities of TF" +#~ " to load CIFAR10, a popular colored" +#~ " image classification dataset for machine" +#~ " learning. The call to " +#~ ":code:`tf.keras.datasets.cifar10.load_data()` downloads " +#~ "CIFAR10, caches it locally, and then " +#~ "returns the entire training and test " +#~ "set as NumPy ndarrays." +#~ msgstr "" +#~ "我们使用 TF 的 Keras 实用程序加载 " +#~ "CIFAR10,这是一个用于机器学习的流行彩色图像分类数据集。调用 " +#~ ":code:`tf.keras.datasets.cifar10.load_data()` 会下载 " +#~ "CIFAR10,将其缓存到本地,然后以 NumPy ndarrays 的形式返回整个训练集和测试集。" -#~ msgid "Our Flower clients will use a simple :code:`Sequential` model:" -#~ msgstr "我们的 Flower 客户端将使用简单的 :code:`Sequential` 模型:" +#~ msgid "" +#~ "Next, we need a model. For the " +#~ "purpose of this tutorial, we use " +#~ "MobilNetV2 with 10 output classes:" +#~ msgstr "接下来,我们需要一个模型。在本教程中,我们使用带有 10 个输出类的 MobilNetV2:" #~ msgid "" -#~ "After loading the dataset with " -#~ ":code:`load_data()` we perform one forward " -#~ "propagation to initialize the model and" -#~ " model parameters with :code:`model(init)`. " -#~ "Next, we implement a Flower client." +#~ "The Flower server interacts with clients" +#~ " through an interface called " +#~ ":code:`Client`. When the server selects " +#~ "a particular client for training, it " +#~ "sends training instructions over the " +#~ "network. The client receives those " +#~ "instructions and calls one of the " +#~ ":code:`Client` methods to run your code" +#~ " (i.e., to train the neural network" +#~ " we defined earlier)." #~ msgstr "" -#~ "使用 :code:`load_data()` 加载数据集后,我们会执行一次前向传播,使用 " -#~ ":code:`model(init)` 初始化模型和模型参数。接下来,我们实现一个 Flower " -#~ "客户端。" +#~ "Flower 服务器通过一个名为 :code:`Client` " +#~ "的接口与客户端交互。当服务器选择一个特定的客户端进行训练时,它会通过网络发送训练指令。客户端接收到这些指令后,会调用 " +#~ ":code:`Client` 方法之一来运行您的代码(即训练我们之前定义的神经网络)。" #~ msgid "" #~ "Flower provides a convenience class " #~ "called :code:`NumPyClient` which makes it " #~ "easier to implement the :code:`Client` " -#~ "interface when your workload uses MXNet." -#~ " Implementing :code:`NumPyClient` usually means" -#~ " defining the following methods " -#~ "(:code:`set_parameters` is optional though):" +#~ "interface when your workload uses Keras." +#~ " The :code:`NumPyClient` interface defines " +#~ "three methods which can be implemented" +#~ " in the following way:" #~ msgstr "" #~ "Flower 提供了一个名为 :code:`NumPyClient` 的便捷类,当您的工作负载使用" -#~ " MXNet 时,它可以让您更轻松地实现 :code:`Client` 接口。实现 " -#~ ":code:`NumPyClient` 通常意味着定义以下方法(:code:`set_parameters` " -#~ "是可选的):" - -#~ msgid "They can be implemented in the following way:" -#~ msgstr "它们可以通过以下方式实现:" +#~ " Keras 时,该类可以更轻松地实现 :code:`Client` " +#~ "接口。:code:`NumPyClient` 接口定义了三个方法,可以通过以下方式实现:" #~ msgid "" #~ "We can now create an instance of" -#~ " our class :code:`MNISTClient` and add " +#~ " our class :code:`CifarClient` and add " #~ "one line to actually run this " #~ "client:" -#~ msgstr "现在我们可以创建一个 :code:`MNISTClient` 类的实例,并添加一行来实际运行该客户端:" +#~ msgstr "现在我们可以创建一个 :code:`CifarClient` 类的实例,并添加一行来实际运行该客户端:" #~ msgid "" #~ "That's it for the client. We only" #~ " have to implement :code:`Client` or " #~ ":code:`NumPyClient` and call " -#~ ":code:`fl.client.start_client()` or " -#~ ":code:`fl.client.start_numpy_client()`. The string " -#~ ":code:`\"0.0.0.0:8080\"` tells the client " -#~ "which server to connect to. In our" -#~ " case we can run the server and" -#~ " the client on the same machine, " -#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" -#~ " we run a truly federated workload" -#~ " with the server and clients running" -#~ " on different machines, all that " -#~ "needs to change is the " -#~ ":code:`server_address` we pass to the " -#~ "client." +#~ ":code:`fl.client.start_client()`. If you implement" +#~ " a client of type :code:`NumPyClient` " +#~ "you'll need to first call its " +#~ ":code:`to_client()` method. The string " +#~ ":code:`\"[::]:8080\"` tells the client which" +#~ " server to connect to. In our " +#~ "case we can run the server and " +#~ "the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." #~ msgstr "" #~ "这就是客户端。我们只需实现 :code:`Client` 或 :code:`NumPyClient`" #~ " 并调用 :code:`fl.client.start_client()` 或 " #~ ":code:`fl.client.start_numpy_client()`。字符串 " -#~ ":code:`\"0.0.0.0:8080\"`会告诉客户端要连接的服务器。在本例中,我们可以在同一台机器上运行服务器和客户端,因此我们使用" -#~ " " -#~ ":code:`\"0.0.0.0:8080\"`。如果我们运行的是真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变的只是传递给客户端的" +#~ ":code:`\"[::]:8080\"`会告诉客户端要连接的服务器。在本例中,我们可以在同一台机器上运行服务器和客户端,因此使用 " +#~ ":code:`\"[::]:8080\"。如果我们运行的是真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变的只是客户端指向的" #~ " :code:`server_address`。" +#~ msgid "Each client will have its own dataset." +#~ msgstr "每个客户都有自己的数据集。" + #~ msgid "" -#~ "With both client and server ready, " -#~ "we can now run everything and see" -#~ " federated learning in action. Federated" -#~ " learning systems usually have a " -#~ "server and multiple clients. We " -#~ "therefore have to start the server " -#~ "first:" -#~ msgstr "客户端和服务器都准备就绪后,我们现在就可以运行一切,看看联邦学习的运行情况。联邦学习系统通常有一个服务器和多个客户端。因此,我们必须先启动服务器:" +#~ "You should now see how the " +#~ "training does in the very first " +#~ "terminal (the one that started the " +#~ "server):" +#~ msgstr "现在你应该能在第一个终端(启动服务器的终端)看到训练的效果了:" #~ msgid "" #~ "Congratulations! You've successfully built and" #~ " run your first federated learning " #~ "system. The full `source code " #~ "`_ for this example can " -#~ "be found in :code:`examples/quickstart-mxnet`." +#~ "tensorflow/client.py>`_ for this can be " +#~ "found in :code:`examples/quickstart-" +#~ "tensorflow/client.py`." #~ msgstr "" -#~ "恭喜您!您已经成功构建并运行了第一个联邦学习系统。本示例的`完整源代码 " +#~ "恭喜您!您已经成功构建并运行了第一个联邦学习系统。`完整的源代码 " #~ "`_ 可在 :code:`examples/quickstart-" -#~ "mxnet` 中找到。" +#~ "tensorflow/client.py>`_ 可以在 :code:`examples/quickstart-" +#~ "tensorflow/client.py` 中找到。" -#~ msgid ":code:`load_mnist()`" -#~ msgstr ":code:`load_mnist()`" +#~ msgid "|e5918c1c06a4434bbe4bf49235e40059|" +#~ msgstr "" -#~ msgid "Loads the MNIST dataset using OpenML" -#~ msgstr "使用 OpenML 加载 MNIST 数据集" +#~ msgid "|c0165741bd1944f09ec55ce49032377d|" +#~ msgstr "" -#~ msgid ":code:`shuffle()`" -#~ msgstr ":code:`shuffle()`" +#~ msgid "|0a0ac9427ac7487b8e52d75ed514f04e|" +#~ msgstr "" -#~ msgid "Shuffles data and its label" -#~ msgstr "对数据及其标签进行洗牌" +#~ msgid "|5defee3ea4ca40d99fcd3e4ea045be25|" +#~ msgstr "" -#~ msgid ":code:`partition()`" -#~ msgstr ":code:`partition()`" +#~ msgid "|74f26ca701254d3db57d7899bd91eb55|" +#~ msgstr "" -#~ msgid "Splits datasets into a number of partitions" -#~ msgstr "将数据集分割成多个分区" +#~ msgid "|bda79f21f8154258a40e5766b2634ad7|" +#~ msgstr "" + +#~ msgid "|89d30862e62e4f9989e193483a08680a|" +#~ msgstr "" + +#~ msgid "|77e9918671c54b4f86e01369c0785ce8|" +#~ msgstr "" + +#~ msgid "|7e4ccef37cc94148a067107b34eb7447|" +#~ msgstr "" + +#~ msgid "|28e47e4cded14479a0846c8e5f22c872|" +#~ msgstr "" + +#~ msgid "|4b8c5d1afa144294b76ffc76e4658a38|" +#~ msgstr "" + +#~ msgid "|9dbdb3a0f6cb4a129fac863eaa414c17|" +#~ msgstr "" + +#~ msgid "|81749d0ac0834c36a83bd38f433fea31|" +#~ msgstr "" + +#~ msgid "|ed9aae51da70428eab7eef32f21e819e|" +#~ msgstr "" + +#~ msgid "|e87b69b2ada74ea49412df16f4a0b9cc|" +#~ msgstr "" + +#~ msgid "|33cacb7d985c4906b348515c1a5cd993|" +#~ msgstr "" + +#~ msgid "|cc080a555947492fa66131dc3a967603|" +#~ msgstr "" + +#~ msgid "|085c3e0fb8664c6aa06246636524b20b|" +#~ msgstr "" + +#~ msgid "|bfe69c74e48c45d49b50251c38c2a019|" +#~ msgstr "" + +#~ msgid "|ebbecd651f0348d99c6511ea859bf4ca|" +#~ msgstr "" + +#~ msgid "|163117eb654a4273babba413cf8065f5|" +#~ msgstr "" + +#~ msgid "|452ac3ba453b4cd1be27be1ba7560d64|" +#~ msgstr "" + +#~ msgid "|f403fcd69e4e44409627e748b404c086|" +#~ msgstr "" + +#~ msgid "|4b00fe63870145968f8443619a792a42|" +#~ msgstr "" + +#~ msgid "|368378731066486fa4397e89bc6b870c|" +#~ msgstr "" + +#~ msgid "|a66aa83d85bf4ffba7ed660b718066da|" +#~ msgstr "" + +#~ msgid "|82324b9af72a4582a81839d55caab767|" +#~ msgstr "" + +#~ msgid "|fbf2da0da3cc4f8ab3b3eff852d80c41|" +#~ msgstr "" #~ msgid "" -#~ "We load the MNIST dataset from " -#~ "`OpenML " -#~ "`_, a" -#~ " popular image classification dataset of" -#~ " handwritten digits for machine learning." -#~ " The utility :code:`utils.load_mnist()` downloads" -#~ " the training and test data. The " -#~ "training set is split afterwards into" -#~ " 10 partitions with :code:`utils.partition()`." +#~ "Some quickstart examples may have " +#~ "limitations or requirements that prevent " +#~ "them from running on every environment." +#~ " For more information, please see " +#~ "`Limitations`_." #~ msgstr "" -#~ "我们从 `OpenML `_ 中加载 " -#~ "MNIST 数据集,这是一个用于机器学习的流行手写数字图像分类数据集。实用程序 " -#~ ":code:`utils.load_mnist()` 下载训练和测试数据。然后使用 " -#~ ":code:`utils.partition()`将训练集分割成 10 个分区。" -#~ msgid "Let's get stated!" -#~ msgstr "让我们开始吧!" +#~ msgid "" +#~ "Change the application code. For " +#~ "example, change the ``seed`` in " +#~ "``quickstart_docker/task.py`` to ``43`` and " +#~ "save it:" +#~ msgstr "" -#~ msgid "|2b5c62c529f6416f840c594cce062fbb|" +#~ msgid ":code:`fit`" +#~ msgstr ":code:`fit`" + +#~ msgid "" +#~ "Note that since version :code:`1.11.0`, " +#~ ":code:`flower-server-app` no longer " +#~ "supports passing a reference to a " +#~ "`ServerApp` attribute. Instead, you need " +#~ "to pass the path to Flower app " +#~ "via the argument :code:`--app`. This is" +#~ " the path to a directory containing" +#~ " a `pyproject.toml`. You can create a" +#~ " valid Flower app by executing " +#~ ":code:`flwr new` and following the " +#~ "prompt." #~ msgstr "" -#~ msgid "|90b334680cb7467d9a04d39b8e8dca9f|" +#~ msgid "" +#~ "All required parameters defined above " +#~ "are passed to :code:`XgbClient`'s constructor." #~ msgstr "" -#~ msgid "|65764ceee89f4335bfd93fd0b115e831|" +#~ msgid "|b8714c45b74b4d8fb008e2ebb3bc1d44|" #~ msgstr "" -#~ msgid "|d97319ec28bb407ea0ab9705e38f3bcf|" +#~ msgid "|75f1561efcfd422ea67d28d1513120dc|" #~ msgstr "" -#~ msgid "|11e95ac83a8548d8b3505b4663187d07|" +#~ msgid "|6a1f51b235304558a9bdaaabfc93b8d2|" #~ msgstr "" -#~ msgid "|1dab2f3a23674abc8a6731f20fa10730|" +#~ msgid "|35e70dab1fb544af9aa3a9c09c4f9797|" #~ msgstr "" -#~ msgid "|7f0ee162da38450788493a21627306f7|" +#~ msgid "|d7efb5705dd3467f991ed23746824a07|" #~ msgstr "" -#~ msgid "|296a1fb72c514b23b3d8905ff0ff98c6|" +#~ msgid "|94e7b021c7b540bfbedf7f082a41ff87|" #~ msgstr "" -#~ msgid "|5b1408eec0d746cdb91162a9107b6089|" +#~ msgid "|a80714782dde439ab73936518f91fc3c|" #~ msgstr "" -#~ msgid "|aef19f4b122c4e8d9f4c57f99bcd5dd2|" +#~ msgid "|c62080ca6197473da57d191c8225a9d9|" #~ msgstr "" -#~ msgid "|2881a86d8fc54ba29d96b29fc2819f4a|" +#~ msgid "|21a8f1e6a5b14a7bbb8559979d0e8a2b|" #~ msgstr "" -#~ msgid "|ec1fe880237247e0975f52766775ab84|" +#~ msgid "|c310f2a22f7b4917bf42775aae7a1c09|" #~ msgstr "" -#~ msgid "|9fdf048ed58d4467b2718cdf4aaf1ec3|" +#~ msgid "|a0c5b43401194535a8460bcf02e65f9a|" #~ msgstr "" -#~ msgid "|ff726bc5505e432388ee2fdd6ef420b9|" +#~ msgid "|aabfdbd5564e41a790f8ea93cc21a444|" +#~ msgstr "" + +#~ msgid "|c9cc8f160fa647b09e742fe4dc8edb54|" +#~ msgstr "" + +#~ msgid "|7e83aad011cd4907b2f02f907c6922e9|" +#~ msgstr "" + +#~ msgid "|4627c2bb6cc443ae9e079f81f33c9dd9|" +#~ msgstr "" + +#~ msgid "|131af8322dc5466b827afd24be98f8c0|" +#~ msgstr "" + +#~ msgid "|f92920b87f3a40179bf7ddd0b6144c53|" +#~ msgstr "" + +#~ msgid "|d62da263071d45a496f543e41fce3a19|" +#~ msgstr "" + +#~ msgid "|ad851971645b4e1fbf8d15bcc0b2ee11|" +#~ msgstr "" + +#~ msgid "|929e9a6de6b34edb8488e644e2bb5221|" +#~ msgstr "" + +#~ msgid "|404cf9c9e8d64784a55646c0f9479cbc|" +#~ msgstr "" + +#~ msgid "|b021ff9d25814458b1e631f8985a648b|" +#~ msgstr "" + +#~ msgid "|e6ca84e1df244f238288a768352678e5|" +#~ msgstr "" + +#~ msgid "|39c2422082554a21963baffb33a0d057|" +#~ msgstr "" + +#~ msgid "|07ecf5fcd6814e88906accec6fa0fbfb|" +#~ msgstr "" + +#~ msgid "|57e78c0ca8a94ba5a64a04b1f2280e55|" +#~ msgstr "" + +#~ msgid "|9819b40e59ee40a4921e1244e8c99bac|" +#~ msgstr "" + +#~ msgid "|797bf279c4894b5ead31dc9b0534ed62|" +#~ msgstr "" + +#~ msgid "|3a7aceef05f0421794726ac54aaf12fd|" +#~ msgstr "" + +#~ msgid "|d741075f8e624331b42c0746f7d258a0|" +#~ msgstr "" + +#~ msgid "|8fc92d668bcb42b8bda55143847f2329|" +#~ msgstr "" + +#~ msgid "|1c705d833a024f22adcaeb8ae3d13b0b|" +#~ msgstr "" + +#~ msgid "|77a037b546a84262b608e04bc82a2c96|" +#~ msgstr "" + +#~ msgid "|f568e24c9fb0435690ac628210a4be96|" +#~ msgstr "" + +#~ msgid "|a7bf029981514e2593aa3a2b48c9d76a|" +#~ msgstr "" + +#~ msgid "|3f645ad807f84be8b1f8f3267173939c|" +#~ msgstr "" + +#~ msgid "|a06a9dbd603f45819afd8e8cfc3c4b8f|" +#~ msgstr "" + +#~ msgid "|edcf9a04d96e42608fd01a333375febe|" +#~ msgstr "" + +#~ msgid "|3dae22fe797043968e2b7aa7073c78bd|" +#~ msgstr "" + +#~ msgid "|ba178f75267d4ad8aa7363f20709195f|" +#~ msgstr "" + +#~ msgid "|c380c750bfd2444abce039a1c6fa8e60|" +#~ msgstr "" + +#~ msgid "|e7cec00a114b48359935c6510595132e|" #~ msgstr "" diff --git a/doc/source/_static/docker-ci-release.png b/doc/source/_static/docker-ci-release.png deleted file mode 100644 index 6ec97ce9fb06..000000000000 Binary files a/doc/source/_static/docker-ci-release.png and /dev/null differ diff --git a/doc/source/_static/flower-architecture-ECE.png b/doc/source/_static/flower-architecture-ECE.png deleted file mode 100755 index 8ccc83469c5d..000000000000 Binary files a/doc/source/_static/flower-architecture-ECE.png and /dev/null differ diff --git a/doc/source/_static/flower-architecture-VCE.png b/doc/source/_static/flower-architecture-VCE.png deleted file mode 100755 index a7ff1a2c2ace..000000000000 Binary files a/doc/source/_static/flower-architecture-VCE.png and /dev/null differ diff --git a/doc/source/_static/flower-architecture-basic-architecture.svg b/doc/source/_static/flower-architecture-basic-architecture.svg new file mode 100644 index 000000000000..65d0ccc05e96 --- /dev/null +++ b/doc/source/_static/flower-architecture-basic-architecture.svg @@ -0,0 +1,4 @@ + + + +
 Client
SuperNode
ClientApp
 Client
SuperNode
ClientApp
 Client
SuperNode
ClientApp
 Server
ServerApp
SuperLink
\ No newline at end of file diff --git a/doc/source/_static/flower-architecture-deployment-engine.svg b/doc/source/_static/flower-architecture-deployment-engine.svg new file mode 100644 index 000000000000..2e8dbdfd2626 --- /dev/null +++ b/doc/source/_static/flower-architecture-deployment-engine.svg @@ -0,0 +1,4 @@ + + + +
 User
 Client
 Server
ServerApp
[run 1]
SuperLink
SuperExec



SuperNode
ServerApp
[run 2]
ClientApp
[run 1]
Deployment Engine Executor
flwr run
ClientApp
[run 2]
 Client
SuperNode
ClientApp
[run 1]
ClientApp
[run 2]
 Client
SuperNode
ClientApp
[run 1]
ClientApp
[run 2]
\ No newline at end of file diff --git a/doc/source/_static/flower-architecture-hub-and-spoke.svg b/doc/source/_static/flower-architecture-hub-and-spoke.svg new file mode 100644 index 000000000000..c97f74f2413d --- /dev/null +++ b/doc/source/_static/flower-architecture-hub-and-spoke.svg @@ -0,0 +1,4 @@ + + + +
 

Client
 

Server
 

Client
 

Client
 

Client
\ No newline at end of file diff --git a/doc/source/_static/flower-architecture-multi-run-1.svg b/doc/source/_static/flower-architecture-multi-run-1.svg new file mode 100644 index 000000000000..4e75224f5b59 --- /dev/null +++ b/doc/source/_static/flower-architecture-multi-run-1.svg @@ -0,0 +1,4 @@ + + + +
 Client
 Server
ServerApp
[run 1]
SuperLink
SuperNode
ServerApp
[run 2]
ClientApp
[run 1]
ClientApp
[run 2]
 Client
SuperNode
ClientApp
[run 1]
ClientApp
[run 2]
 Client
SuperNode
ClientApp
[run 1]
ClientApp
[run 2]
\ No newline at end of file diff --git a/doc/source/_static/flower-architecture-multi-run-2.svg b/doc/source/_static/flower-architecture-multi-run-2.svg new file mode 100644 index 000000000000..b6d20453e98f --- /dev/null +++ b/doc/source/_static/flower-architecture-multi-run-2.svg @@ -0,0 +1,4 @@ + + + +
 Client
 Server
ServerApp
[run 1]
SuperLink
SuperNode
ServerApp
[run 2]
ClientApp
[run 1]
ClientApp
[run 2]
 Client
SuperNode
ClientApp
[run 1]
ClientApp
[run 2]
 Client
SuperNode
ClientApp
[run 1]
ClientApp
\ No newline at end of file diff --git a/doc/source/_static/flower-architecture-multi-run.svg b/doc/source/_static/flower-architecture-multi-run.svg new file mode 100644 index 000000000000..91df0c514b52 --- /dev/null +++ b/doc/source/_static/flower-architecture-multi-run.svg @@ -0,0 +1,4 @@ + + + +
 Client
 Server
ServerApp
[run 1]
SuperLink
SuperNode
ServerApp
[run 2]
ClientApp
[run 1]
ClientApp
[run 2]
 Client
SuperNode
ClientApp
[run 1]
ClientApp
[run 2]
 Client
SuperNode
ClientApp
[run 1]
ClientApp
[run 2]
\ No newline at end of file diff --git a/doc/source/_static/flower-architecture.drawio.png b/doc/source/_static/flower-architecture.drawio.png deleted file mode 100755 index a9c3914a1839..000000000000 Binary files a/doc/source/_static/flower-architecture.drawio.png and /dev/null differ diff --git a/doc/source/_templates/base.html b/doc/source/_templates/base.html index 768c560f4f6a..925e40765b13 100644 --- a/doc/source/_templates/base.html +++ b/doc/source/_templates/base.html @@ -46,9 +46,7 @@ {#- Site title -#} {%- block htmltitle -%} - {% if versions %} - Flower Framework {{ current_version.url }} - {% elif not docstitle %} + {% if not docstitle %} {{ title|striptags|e }} {% elif pagename == master_doc %} {{ docstitle|striptags|e }} diff --git a/doc/source/_templates/sidebar/lang.html b/doc/source/_templates/sidebar/lang.html index b377a53f9c40..bbea57571838 100644 --- a/doc/source/_templates/sidebar/lang.html +++ b/doc/source/_templates/sidebar/lang.html @@ -1,9 +1,14 @@ {% if versions or lang %} - + - + {% endif %} diff --git a/doc/source/conf.py b/doc/source/conf.py index feb173c0efa8..6111a972218f 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -17,6 +17,7 @@ import datetime import os import sys + from git import Repo from sphinx.application import ConfigError @@ -63,11 +64,14 @@ # Make version list accessible for the html templates html_context["versions"] = list() -versions = [ - tag.name - for tag in repo.tags - if int(tag.name[1]) > 0 and int(tag.name.split(".")[1]) >= 5 -] +versions = sorted( + [ + tag.name + for tag in repo.tags + if int(tag.name[1]) > 0 and int(tag.name.split(".")[1]) >= 8 + ], + key=lambda x: [int(part) for part in x[1:].split(".")], +) versions.append("main") for version in versions: html_context["versions"].append({"name": version}) @@ -85,8 +89,18 @@ copyright = f"{datetime.date.today().year} Flower Labs GmbH" author = "The Flower Authors" -# The full version, including alpha/beta/rc tags -release = "1.9.0" +# The full version of the next release, including alpha/beta/rc tags +release = "1.13.0" +# The current released version +rst_prolog = """ +.. |stable_flwr_version| replace:: 1.12.0 +.. |stable_flwr_superlink_docker_digest| replace:: 4b317d5b6030710b476f4dbfab2c3a33021ad40a0fcfa54d7edd45e0c51d889c +.. |ubuntu_version| replace:: 24.04 +.. |setuptools_version| replace:: 70.3.0 +.. |pip_version| replace:: 24.1.2 +.. |python_version| replace:: 3.9 +.. |python_full_version| replace:: 3.9.20 +""" # -- General configuration --------------------------------------------------- @@ -108,6 +122,9 @@ "sphinxcontrib.youtube", "sphinx_reredirects", "nbsphinx", + "sphinx_click", + "sphinx_substitution_extensions", + "sphinxext.opengraph", ] # Generate .rst files @@ -235,8 +252,6 @@ def find_test_modules(package_path): "creating-new-messages": "contributor-how-to-create-new-messages.html", "write-documentation": "contributor-how-to-write-documentation.html", "release-process": "contributor-how-to-release-flower.html", - # Restructuring: contributor explanations - "architecture": "contributor-explanation-architecture.html", # Restructuring: contributor references "good-first-contributions": "contributor-ref-good-first-contributions.html", "secagg": "contributor-ref-secure-aggregation-protocols.html", @@ -248,7 +263,11 @@ def find_test_modules(package_path): "quickstart-mxnet": "index.html", "tutorial-quickstart-mxnet": "index.html", "example-mxnet-walk-through": "index.html", - "ref-api/flwr.simulation.run_simulation_from_cli.html": "index.html", + "ref-api/flwr.simulation.run_simulation_from_cli": "index.html", + "contributor-how-to-create-new-messages": "index.html", + "example-jax-from-centralized-to-federated": "tutorial-quickstart-jax.html", + "architecture": "explanation-flower-architecture.html", + "contributor-explanation-architecture.html": "explanation-flower-architecture.html", } # -- Options for HTML output ------------------------------------------------- @@ -257,7 +276,7 @@ def find_test_modules(package_path): # a list of builtin themes. # html_theme = "furo" -html_title = f"Flower Framework" +html_title = "Flower Framework" html_logo = "_static/flower-logo.png" html_favicon = "_static/favicon.ico" html_baseurl = "https://flower.ai/docs/framework/" diff --git a/doc/source/contributor-explanation-architecture.rst b/doc/source/contributor-explanation-architecture.rst deleted file mode 100644 index a20a84313118..000000000000 --- a/doc/source/contributor-explanation-architecture.rst +++ /dev/null @@ -1,26 +0,0 @@ -Flower Architecture -=================== - -Edge Client Engine ------------------- - -`Flower `_ core framework architecture with Edge Client Engine - -.. figure:: _static/flower-architecture-ECE.png - :width: 80 % - -Virtual Client Engine ---------------------- - -`Flower `_ core framework architecture with Virtual Client Engine - -.. figure:: _static/flower-architecture-VCE.png - :width: 80 % - -Virtual Client Engine and Edge Client Engine in the same workload ------------------------------------------------------------------ - -`Flower `_ core framework architecture with both Virtual Client Engine and Edge Client Engine - -.. figure:: _static/flower-architecture.drawio.png - :width: 80 % diff --git a/doc/source/contributor-explanation-public-and-private-apis.rst b/doc/source/contributor-explanation-public-and-private-apis.rst new file mode 100644 index 000000000000..ac62ae341f14 --- /dev/null +++ b/doc/source/contributor-explanation-public-and-private-apis.rst @@ -0,0 +1,142 @@ +Public and private APIs +======================= + +In Python, everything is public. To enable developers to understand which components can +be relied upon, Flower declares a public API. Components that are part of the public API +can be relied upon. Changes to the public API are announced in the release notes and are +subject to deprecation policies. + +Everything that is not part of the public API is part of the private API. Even though +Python allows accessing them, user code should never use those components. Private APIs +can change at any time, even in patch releases. + +How can you determine whether a component is part of the public API or not? Easy: + +- `Use the Flower API reference documentation `_ +- `Use the Flower CLI reference documentation `_ + +Everything listed in the reference documentation is part of the public API. This +document explains how Flower maintainers define the public API and how you can determine +whether a component is part of the public API or not by reading the Flower source code. + +Flower public API +----------------- + +Flower has a well-defined public API. Let's look at this in more detail. + +.. important:: + + Every component that is reachable by recursively following ``__init__.__all__`` + starting from the root package (``flwr``) is part of the public API. + +If you want to determine whether a component (class/function/generator/...) is part of +the public API or not, you need to start at the root of the ``flwr`` package. Let's use +``tree -L 1 -d src/py/flwr`` to look at the Python sub-packages contained ``flwr``: + +.. code-block:: bash + + flwr + ├── cli + ├── client + ├── common + ├── proto + ├── server + └── simulation + +Contrast this with the definition of ``__all__`` in the root +``src/py/flwr/__init__.py``: + +.. code-block:: python + + # From `flwr/__init__.py` + __all__ = [ + "client", + "common", + "server", + "simulation", + ] + +You can see that ``flwr`` has six subpackages (``cli``, ``client``, ``common``, +``proto``, ``server``, ``simulation``), but only four of them are "exported" via +``__all__`` (``client``, ``common``, ``server``, ``simulation``). + +What does this mean? It means that ``client``, ``common``, ``server`` and ``simulation`` +are part of the public API, but ``cli`` and ``proto`` are not. The ``flwr`` subpackages +``cli`` and ``proto`` are private APIs. A private API can change completely from one +release to the next (even in patch releases). It can change in a breaking way, it can be +renamed (for example, ``flwr.cli`` could be renamed to ``flwr.command``) and it can even +be removed completely. + +Therefore, as a Flower user: + +- ``from flwr import client`` ✅ Ok, you're importing a public API. +- ``from flwr import proto`` ❌ Not recommended, you're importing a private API. + +What about components that are nested deeper in the hierarchy? Let's look at Flower +strategies to see another typical pattern. Flower strategies like ``FedAvg`` are often +imported using ``from flwr.server.strategy import FedAvg``. Let's look at +``src/py/flwr/server/strategy/__init__.py``: + +.. code-block:: python + + from .fedavg import FedAvg as FedAvg + + # ... more imports + + __all__ = [ + "FedAvg", + # ... more exports + ] + +What's notable here is that all strategies are implemented in dedicated modules (e.g., +``fedavg.py``). In ``__init__.py``, we *import* the components we want to make part of +the public API and then *export* them via ``__all__``. Note that we export the component +itself (for example, the ``FedAvg`` class), but not the module it is defined in (for +example, ``fedavg.py``). This allows us to move the definition of ``FedAvg`` into a +different module (or even a module in a subpackage) without breaking the public API (as +long as we update the import path in ``__init__.py``). + +Therefore: + +- ``from flwr.server.strategy import FedAvg`` ✅ Ok, you're importing a class that is + part of the public API. +- ``from flwr.server.strategy import fedavg`` ❌ Not recommended, you're importing a + private module. + +This approach is also implemented in the tooling that automatically builds API reference +docs. + +Flower public API of private packages +------------------------------------- + +We also use this to define the public API of private subpackages. Public, in this +context, means the API that other ``flwr`` subpackages should use. For example, +``flwr.server.driver`` is a private subpackage (it's not exported via +``src/py/flwr/server/__init__.py``'s ``__all__``). + +Still, the private sub-package ``flwr.server.driver`` defines a "public" API using +``__all__`` in ``src/py/flwr/server/driver/__init__.py``: + +.. code-block:: python + + from .driver import Driver + from .grpc_driver import GrpcDriver + from .inmemory_driver import InMemoryDriver + + __all__ = [ + "Driver", + "GrpcDriver", + "InMemoryDriver", + ] + +The interesting part is that both ``GrpcDriver`` and ``InMemoryDriver`` are never used +by Flower framework users, only by other parts of the Flower framework codebase. Those +other parts of the codebase import, for example, ``InMemoryDriver`` using ``from +flwr.server.driver import InMemoryDriver`` (i.e., the ``InMemoryDriver`` exported via +``__all__``), not ``from flwr.server.driver.in_memory_driver import InMemoryDriver`` +(``in_memory_driver.py`` is the module containing the actual ``InMemoryDriver`` class +definition). + +This is because ``flwr.server.driver`` defines a public interface for other ``flwr`` +subpackages. This allows codeowners of ``flwr.server.driver`` to refactor the package +without breaking other ``flwr``-internal users. diff --git a/doc/source/contributor-how-to-build-docker-images.rst b/doc/source/contributor-how-to-build-docker-images.rst index 2efc739f54f0..0b3ce243ce50 100644 --- a/doc/source/contributor-how-to-build-docker-images.rst +++ b/doc/source/contributor-how-to-build-docker-images.rst @@ -1,137 +1,162 @@ -How to build Docker Flower images locally +How to Build Docker Flower Images Locally ========================================= Flower provides pre-made docker images on `Docker Hub `_ -that include all necessary dependencies for running the SuperLink, SuperNode or ServerApp. -You can also build your own custom docker images from scratch with a different version of Python -or Linux distribution (Ubuntu/Alpine) if that is what you need. In this guide, we will explain what -images exist and how to build them locally. +that include all necessary dependencies for running the SuperLink, SuperNode or +ServerApp. You can also build your own custom docker images from scratch with a +different version of Python or Linux distribution (Ubuntu/Alpine) if that is what you +need. In this guide, we will explain what images exist and how to build them locally. -Before we can start, we need to meet a few prerequisites in our local development environment. +Before we can start, we need to meet a few prerequisites in our local development +environment. -#. Clone the flower repository. +1. Clone the ``flower`` repository. - .. code-block:: bash + .. code-block:: bash - $ git clone https://github.com/adap/flower.git && cd flower + $ git clone --depth=1 https://github.com/adap/flower.git && cd flower -#. Verify the Docker daemon is running. +2. Verify the Docker daemon is running. - Please follow the first section on - :doc:`Run Flower using Docker ` - which covers this step in more detail. + The build instructions that assemble the images are located in the respective + Dockerfiles. You can find them in the subdirectories of ``src/docker``. + Flower Docker images are configured via build arguments. Through build arguments, we + can make the creation of images more flexible. For example, in the base image, we can + specify the version of Python to install using the ``PYTHON_VERSION`` build argument. + Some of the build arguments have default values, others must be specified when + building the image. All available build arguments for each image are listed in one of + the tables below. -The build instructions that assemble the images are located in the respective Dockerfiles. You -can find them in the subdirectories of ``src/docker``. - -Flower Docker images are configured via build arguments. Through build arguments, we can make the -creation of images more flexible. For example, in the base image, we can specify the version of -Python to install using the ``PYTHON_VERSION`` build argument. Some of the build arguments have -default values, others must be specified when building the image. All available build arguments for -each image are listed in one of the tables below. - -Building the base image +Building the Base Image ----------------------- .. list-table:: - :widths: 25 45 15 15 - :header-rows: 1 - - * - Build argument - - Description - - Required - - Example - * - ``DISTRO`` - - The Linux distribution to use as the base image. - - No - - ``ubuntu`` - * - ``DISTRO_VERSION`` - - Version of the Linux distribution. - - No - - ``22.04`` - * - ``PYTHON_VERSION`` - - Version of ``python`` to be installed. - - No - - ``3.11`` or ``3.11.1`` - * - ``PIP_VERSION`` - - Version of ``pip`` to be installed. - - Yes - - ``23.0.1`` - * - ``SETUPTOOLS_VERSION`` - - Version of ``setuptools`` to be installed. - - Yes - - ``69.0.2`` - * - ``FLWR_VERSION`` - - Version of Flower to be installed. - - Yes - - ``1.8.0`` - * - ``FLWR_PACKAGE`` - - The Flower package to be installed. - - No - - ``flwr`` or ``flwr-nightly`` - - -The following example creates a base Ubuntu/Alpine image with Python 3.11.0, pip 23.0.1, -setuptools 69.0.2 and Flower 1.8.0: + :widths: 25 45 15 15 + :header-rows: 1 + + - - Build argument + - Description + - Required + - Example + - - ``DISTRO`` + - The Linux distribution to use as the base image. + - No + - ``ubuntu`` + - - ``DISTRO_VERSION`` + - Version of the Linux distribution. + - No + - :substitution-code:`|ubuntu_version|` + - - ``PYTHON_VERSION`` + - Version of ``python`` to be installed. + - No + - ``3.11`` or ``3.11.1`` + - - ``PIP_VERSION`` + - Version of ``pip`` to be installed. + - Yes + - :substitution-code:`|pip_version|` + - - ``SETUPTOOLS_VERSION`` + - Version of ``setuptools`` to be installed. + - Yes + - :substitution-code:`|setuptools_version|` + - - ``FLWR_VERSION`` + - Version of Flower to be installed. + - Yes + - :substitution-code:`|stable_flwr_version|` + - - ``FLWR_PACKAGE`` + - The Flower package to be installed. + - No + - ``flwr`` or ``flwr-nightly`` + - - ``FLWR_VERSION_REF`` + - A `direct reference + `_ + without the ``@`` specifier. If both ``FLWR_VERSION`` and ``FLWR_VERSION_REF`` + are specified, the ``FLWR_VERSION_REF`` has precedence. + - No + - `Direct Reference Examples`_ + +The following example creates a base Ubuntu/Alpine image with Python ``3.11.0``, pip +:substitution-code:`|pip_version|`, setuptools :substitution-code:`|setuptools_version|` +and Flower :substitution-code:`|stable_flwr_version|`: .. code-block:: bash + :substitutions: - $ cd src/docker/base/ - $ docker build \ - --build-arg PYTHON_VERSION=3.11.0 \ - --build-arg FLWR_VERSION=1.8.0 \ - --build-arg PIP_VERSION=23.0.1 \ - --build-arg SETUPTOOLS_VERSION=69.0.2 \ - -t flwr_base:0.1.0 . + $ cd src/docker/base/ + $ docker build \ + --build-arg PYTHON_VERSION=3.11.0 \ + --build-arg FLWR_VERSION=|stable_flwr_version| \ + --build-arg PIP_VERSION=|pip_version| \ + --build-arg SETUPTOOLS_VERSION=|setuptools_version| \ + -t flwr_base:0.1.0 . -The name of image is ``flwr_base`` and the tag ``0.1.0``. Remember that the build arguments as well -as the name and tag can be adapted to your needs. These values serve as examples only. +In this example, we specify our image name as ``flwr_base`` and the tag as ``0.1.0``. +Remember that the build arguments as well as the name and tag can be adapted to your +needs. These values serve as examples only. -Building the SuperLink/SuperNode or ServerApp image ---------------------------------------------------- +Building a Flower Binary Image +------------------------------ .. list-table:: - :widths: 25 45 15 15 - :header-rows: 1 - - * - Build argument - - Description - - Required - - Example - * - ``BASE_REPOSITORY`` - - The repository name of the base image. - - No - - ``flwr/base`` - * - ``BASE_IMAGE`` - - The Tag of the Flower base image. - - Yes - - ``1.8.0-py3.10-ubuntu22.04`` - -The following example creates a SuperLink/SuperNode or ServerApp image with the official Flower -base image: + :widths: 25 45 15 15 + :header-rows: 1 + + - - Build argument + - Description + - Required + - Example + - - ``BASE_REPOSITORY`` + - The repository name of the base image. + - No + - ``flwr/base`` + - - ``BASE_IMAGE`` + - The Tag of the Flower base image. + - Yes + - :substitution-code:`|stable_flwr_version|-py3.11-ubuntu|ubuntu_version|` + +For example, to build a SuperLink image with the latest Flower version, Python 3.11 and +Ubuntu 22.04, run the following: .. code-block:: bash + :substitutions: - $ cd src/docker// - $ docker build \ - --build-arg BASE_IMAGE=-py- \ - -t flwr_superlink:0.1.0 . - + $ cd src/docker/superlink + $ docker build \ + --build-arg BASE_IMAGE=|stable_flwr_version|-py3.11-ubuntu22.04 \ + -t flwr_superlink:0.1.0 . -If you want to use your own base image instead of the official Flower base image, all you need to do -is set the ``BASE_REPOSITORY`` build argument. +If you want to use your own base image instead of the official Flower base image, all +you need to do is set the ``BASE_REPOSITORY`` build argument to ``flwr_base`` (as we've +specified above). .. code-block:: bash - $ cd src/docker/superlink/ - $ docker build \ - --build-arg BASE_REPOSITORY=flwr_base \ - --build-arg BASE_IMAGE=0.1.0 - -t flwr_superlink:0.1.0 . + $ cd src/docker/superlink/ + $ docker build \ + --build-arg BASE_REPOSITORY=flwr_base \ + --build-arg BASE_IMAGE=0.1.0 + -t flwr_superlink:0.1.0 . After creating the image, we can test whether the image is working: .. code-block:: bash - $ docker run --rm flwr_superlink:0.1.0 --help + $ docker run --rm flwr_superlink:0.1.0 --help + +Direct Reference Examples +------------------------- + +.. code-block:: bash + :substitutions: + + # main branch + git+https://github.com/adap/flower.git@main + + # commit hash + git+https://github.com/adap/flower.git@1187c707f1894924bfa693d99611cf6f93431835 + + # tag + git+https://github.com/adap/flower.git@|stable_flwr_version| + + # artifact store + https://artifact.flower.ai/py/main/latest/flwr-|stable_flwr_version|-py3-none-any.whl diff --git a/doc/source/contributor-how-to-contribute-translations.rst b/doc/source/contributor-how-to-contribute-translations.rst index ba59901cf1c4..5fff62833b0e 100644 --- a/doc/source/contributor-how-to-contribute-translations.rst +++ b/doc/source/contributor-how-to-contribute-translations.rst @@ -2,70 +2,67 @@ Contribute translations ======================= Since `Flower 1.5 -`_ we -have introduced translations to our doc pages, but, as you might have noticed, -the translations are often imperfect. If you speak languages other than -English, you might be able to help us in our effort to make Federated Learning -accessible to as many people as possible by contributing to those translations! -This might also be a great opportunity for those wanting to become open source -contributors with little prerequisites. +`_ we have +introduced translations to our doc pages, but, as you might have noticed, the +translations are often imperfect. If you speak languages other than English, you might +be able to help us in our effort to make Federated Learning accessible to as many people +as possible by contributing to those translations! This might also be a great +opportunity for those wanting to become open source contributors with little +prerequisites. Our translation project is publicly available over on `Weblate -`_, this where most -of the work will happen. +`_, this where most of the +work will happen. Contribute to existing languages -------------------------------- .. youtube:: 10_Xfy5BOfQ - :width: 100% + :width: 100% -The first thing you will need to do in order to contribute is to create a -free Weblate account on this `page -`_. More information -about profile settings can be found `here +The first thing you will need to do in order to contribute is to create a free Weblate +account on this `page `_. More +information about profile settings can be found `here `_. -Once you are signed in to Weblate, you can navigate to the `Flower Framework -project `_. Here, -you should see the different existing languages that can be found on the -website. +Once you are signed in to Weblate, you can navigate to the `Flower Framework project +`_. Here, you should see the +different existing languages that can be found on the website. -Once you have selected the language you want to contribute to, you should see a -similar interface to this: +Once you have selected the language you want to contribute to, you should see a similar +interface to this: - .. image:: _static/weblate_status.png + .. image:: _static/weblate_status.png -The most straight forward option here is to click on the ``Translate`` button -on the top right (in the ``Translation status`` section). This will -automatically bring you to the translation interface for untranslated strings. +The most straight forward option here is to click on the ``Translate`` button on the top +right (in the ``Translation status`` section). This will automatically bring you to the +translation interface for untranslated strings. This is what the interface looks like: - .. image:: _static/weblate_interface.png + .. image:: _static/weblate_interface.png -You input your translation in the text box at the top and then, once you are -happy with it, you either press ``Save and continue`` (to save the translation -and go to the next untranslated string), ``Save and stay`` (to save the -translation and stay on the same page), ``Suggest`` (to add your translation to -suggestions for other users to view), or ``Skip`` (to go to the next -untranslated string without saving anything). +You input your translation in the text box at the top and then, once you are happy with +it, you either press ``Save and continue`` (to save the translation and go to the next +untranslated string), ``Save and stay`` (to save the translation and stay on the same +page), ``Suggest`` (to add your translation to suggestions for other users to view), or +``Skip`` (to go to the next untranslated string without saving anything). In order to help with the translations, you can see on the bottom the ``Nearby -strings``, the ``Comments`` (from other contributors), the ``Automatic -suggestions`` (from machine translation engines), the translations in ``Other -languages``, and the ``History`` of translations for this string. +strings``, the ``Comments`` (from other contributors), the ``Automatic suggestions`` +(from machine translation engines), the translations in ``Other languages``, and the +``History`` of translations for this string. -On the right, under the ``String information`` section, you can also click the -link under ``Source string location`` in order to view the source of the doc -file containing the string. +On the right, under the ``String information`` section, you can also click the link +under ``Source string location`` in order to view the source of the doc file containing +the string. -For more information about translating using Weblate, you can check out this -`in-depth guide `_. +For more information about translating using Weblate, you can check out this `in-depth +guide `_. Add new languages ----------------- -If you want to add a new language, you will first have to contact us, either on -`Slack `_, or by opening an issue on our `GitHub -repo `_. +If you want to add a new language, you will first have to contact us, either on `Slack +`_, or by opening an issue on our `GitHub repo +`_. diff --git a/doc/source/contributor-how-to-create-new-messages.rst b/doc/source/contributor-how-to-create-new-messages.rst deleted file mode 100644 index 3f1849bdce47..000000000000 --- a/doc/source/contributor-how-to-create-new-messages.rst +++ /dev/null @@ -1,153 +0,0 @@ -Creating New Messages -===================== - -This is a simple guide for creating a new type of message between the server and clients in Flower. - -Let's suppose we have the following example functions in :code:`server.py` and :code:`numpy_client.py`... - -Server's side: - -.. code-block:: python - - def example_request(self, client: ClientProxy) -> Tuple[str, int]: - question = "Could you find the sum of the list, Bob?" - l = [1, 2, 3] - return client.request(question, l) - -Client's side: - -.. code-block:: python - - def example_response(self, question: str, l: List[int]) -> Tuple[str, int]: - response = "Here you go Alice!" - answer = sum(question) - return response, answer - -Let's now see what we need to implement in order to get this simple function between the server and client to work! - - -Message Types for Protocol Buffers ----------------------------------- - -The first thing we need to do is to define a message type for the RPC system in :code:`transport.proto`. -Note that we have to do it for both the request and response messages. For more details on the syntax of proto3, please see the `official documentation `_. - -Within the :code:`ServerMessage` block: - -.. code-block:: proto - - message ExampleIns{ - string question=1; - repeated int64 l=2; - } - oneof msg { - ReconnectIns reconnect_ins = 1; - GetPropertiesIns get_properties_ins = 2; - GetParametersIns get_parameters_ins = 3; - FitIns fit_ins = 4; - EvaluateIns evaluate_ins = 5; - ExampleIns example_ins = 6; - } - -Within the ClientMessage block: - -.. code-block:: proto - - message ExampleRes{ - string response = 1; - int64 answer = 2; - } - - oneof msg { - DisconnectRes disconnect_res = 1; - GetPropertiesRes get_properties_res = 2; - GetParametersRes get_parameters_res = 3; - FitRes fit_res = 4; - EvaluateRes evaluate_res = 5; - ExampleRes examples_res = 6; - } - -Make sure to also add a field of the newly created message type in :code:`oneof msg`. - -Once that is done, we will compile the file with: - -.. code-block:: shell - - $ python -m flwr_tool.protoc - -If it compiles successfully, you should see the following message: - -.. code-block:: shell - - Writing mypy to flwr/proto/transport_pb2.pyi - Writing mypy to flwr/proto/transport_pb2_grpc.pyi - - -Serialization and Deserialization Functions --------------------------------------------- - -Our next step is to add functions to serialize and deserialize Python datatypes to or from our defined RPC message types. You should add these functions in :code:`serde.py`. - -The four functions: - -.. code-block:: python - - def example_msg_to_proto(question: str, l: List[int]) -> ServerMessage.ExampleIns: - return ServerMessage.ExampleIns(question=question, l=l) - - - def example_msg_from_proto(msg: ServerMessage.ExampleIns) -> Tuple[str, List[int]]: - return msg.question, msg.l - - - def example_res_to_proto(response: str, answer: int) -> ClientMessage.ExampleRes: - return ClientMessage.ExampleRes(response=response, answer=answer) - - - def example_res_from_proto(res: ClientMessage.ExampleRes) -> Tuple[str, int]: - return res.response, res.answer - - -Sending the Message from the Server ------------------------------------ - -Now write the request function in your Client Proxy class (e.g., :code:`grpc_client_proxy.py`) using the serde functions you just created: - -.. code-block:: python - - def request(self, question: str, l: List[int]) -> Tuple[str, int]: - request_msg = serde.example_msg_to_proto(question, l) - client_msg: ClientMessage = self.bridge.request( - ServerMessage(example_ins=request_msg) - ) - response, answer = serde.example_res_from_proto(client_msg.examples_res) - return response, answer - - -Receiving the Message by the Client ------------------------------------ - -Last step! Modify the code in :code:`message_handler.py` to check the field of your message and call the :code:`example_response` function. Remember to use the serde functions! - -Within the handle function: - -.. code-block:: python - - if server_msg.HasField("example_ins"): - return _example_response(client, server_msg.example_ins), 0, True - -And add a new function: - -.. code-block:: python - - def _example_response(client: Client, msg: ServerMessage.ExampleIns) -> ClientMessage: - question,l = serde.evaluate_ins_from_proto(msg) - response, answer = client.example_response(question,l) - example_res = serde.example_res_to_proto(response,answer) - return ClientMessage(examples_res=example_res) - -Hopefully, when you run your program you will get the intended result! - -.. code-block:: shell - - ('Here you go Alice!', 6) diff --git a/doc/source/contributor-how-to-develop-in-vscode-dev-containers.rst b/doc/source/contributor-how-to-develop-in-vscode-dev-containers.rst index c861457b6edc..79f52f8d8f6f 100644 --- a/doc/source/contributor-how-to-develop-in-vscode-dev-containers.rst +++ b/doc/source/contributor-how-to-develop-in-vscode-dev-containers.rst @@ -1,24 +1,47 @@ Develop in VSCode Dev Containers ================================ -When working on the Flower framework we want to ensure that all contributors use the same developer environment to format code or run tests. For this purpose we are using the VSCode Remote Containers extension. What is it? Read the following quote: - - - The Visual Studio Code Remote - Containers extension lets you use a Docker container as a fully-featured development environment. It allows you to open any folder inside (or mounted into) a container and take advantage of Visual Studio Code's full feature set. A :code:`devcontainer.json` file in your project tells VS Code how to access (or create) a development container with a well-defined tool and runtime stack. This container can be used to run an application or to separate tools, libraries, or runtimes needed for working with a codebase. - - Workspace files are mounted from the local file system or copied or cloned into the container. Extensions are installed and run inside the container, where they have full access to the tools, platform, and file system. This means that you can seamlessly switch your entire development environment just by connecting to a different container. - -Source: `Official VSCode documentation `_ - +When working on the Flower framework we want to ensure that all contributors use the +same developer environment to format code or run tests. For this purpose we are using +the VSCode Remote Containers extension. What is it? Read the following quote: + + The Visual Studio Code Remote - Containers extension lets you use a Docker container + as a fully-featured development environment. It allows you to open any folder inside + (or mounted into) a container and take advantage of Visual Studio Code's full + feature set. A ``devcontainer.json`` file in your project tells VS Code how to + access (or create) a development container with a well-defined tool and runtime + stack. This container can be used to run an application or to separate tools, + libraries, or runtimes needed for working with a codebase. + + Workspace files are mounted from the local file system or copied or cloned into the + container. Extensions are installed and run inside the container, where they have + full access to the tools, platform, and file system. This means that you can + seamlessly switch your entire development environment just by connecting to a + different container. + +Source: `Official VSCode documentation +`_ Getting started --------------- -Configuring and setting up the :code:`Dockerfile` as well the configuration for the devcontainer can be a bit more involved. The good thing is you don't have to do it. Usually it should be enough to install `Docker `_ on your system and ensure its available on your command line. Additionally, install the `VSCode Containers Extension `_. - -Now you should be good to go. When starting VSCode, it will ask you to run in the container environment and - if you confirm - automatically build the container and use it. To manually instruct VSCode to use the devcontainer, you can, after installing the extension, click the green area in the bottom left corner of your VSCode window and select the option *(Re)Open Folder in Container*. - -In some cases your setup might be more involved. For those cases consult the following sources: - -* `Developing inside a Container `_ -* `Remote development in Containers `_ +Configuring and setting up the ``Dockerfile`` as well the configuration for the +devcontainer can be a bit more involved. The good thing is you don't have to do it. +Usually it should be enough to install `Docker +`_ on your system and ensure its available on +your command line. Additionally, install the `VSCode Containers Extension +`_. + +Now you should be good to go. When starting VSCode, it will ask you to run in the +container environment and - if you confirm - automatically build the container and use +it. To manually instruct VSCode to use the devcontainer, you can, after installing the +extension, click the green area in the bottom left corner of your VSCode window and +select the option *(Re)Open Folder in Container*. + +In some cases your setup might be more involved. For those cases consult the following +sources: + +- `Developing inside a Container + `_ +- `Remote development in Containers + `_ diff --git a/doc/source/contributor-how-to-install-development-versions.rst b/doc/source/contributor-how-to-install-development-versions.rst index 15e2939ef138..61c123a24309 100644 --- a/doc/source/contributor-how-to-install-development-versions.rst +++ b/doc/source/contributor-how-to-install-development-versions.rst @@ -7,10 +7,13 @@ Install development versions of Flower Using Poetry (recommended) ~~~~~~~~~~~~~~~~~~~~~~~~~~ -Install a ``flwr`` pre-release from PyPI: update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall (don't forget to delete ``poetry.lock`` (``rm poetry.lock``) before running ``poetry install``). +Install a ``flwr`` pre-release from PyPI: update the ``flwr`` dependency in +``pyproject.toml`` and then reinstall (don't forget to delete ``poetry.lock`` (``rm +poetry.lock``) before running ``poetry install``). - ``flwr = { version = "1.0.0a0", allow-prereleases = true }`` (without extras) -- ``flwr = { version = "1.0.0a0", allow-prereleases = true, extras = ["simulation"] }`` (with extras) +- ``flwr = { version = "1.0.0a0", allow-prereleases = true, extras = ["simulation"] }`` + (with extras) Install ``flwr`` from a local copy of the Flower source code via ``pyproject.toml``: @@ -20,9 +23,11 @@ Install ``flwr`` from a local copy of the Flower source code via ``pyproject.tom Install ``flwr`` from a local wheel file via ``pyproject.toml``: - ``flwr = { path = "../../dist/flwr-1.8.0-py3-none-any.whl" }`` (without extras) -- ``flwr = { path = "../../dist/flwr-1.8.0-py3-none-any.whl", extras = ["simulation"] }`` (with extras) +- ``flwr = { path = "../../dist/flwr-1.8.0-py3-none-any.whl", extras = ["simulation"] + }`` (with extras) -Please refer to the Poetry documentation for further details: `Poetry Dependency Specification `_ +Please refer to the Poetry documentation for further details: `Poetry Dependency +Specification `_ Using pip (recommended on Colab) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -30,20 +35,23 @@ Using pip (recommended on Colab) Install a ``flwr`` pre-release from PyPI: - ``pip install -U --pre flwr`` (without extras) -- ``pip install -U --pre flwr[simulation]`` (with extras) +- ``pip install -U --pre 'flwr[simulation]'`` (with extras) -Python packages can be installed from git repositories. Use one of the following commands to install the Flower directly from GitHub. +Python packages can be installed from git repositories. Use one of the following +commands to install the Flower directly from GitHub. Install ``flwr`` from the default GitHub branch (``main``): - ``pip install flwr@git+https://github.com/adap/flower.git`` (without extras) -- ``pip install flwr[simulation]@git+https://github.com/adap/flower.git`` (with extras) +- ``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git'`` (with + extras) Install ``flwr`` from a specific GitHub branch (``branch-name``): -- ``pip install flwr@git+https://github.com/adap/flower.git@branch-name`` (without extras) -- ``pip install flwr[simulation]@git+https://github.com/adap/flower.git@branch-name`` (with extras) - +- ``pip install flwr@git+https://github.com/adap/flower.git@branch-name`` (without + extras) +- ``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git@branch-name'`` + (with extras) Open Jupyter Notebooks on Google Colab -------------------------------------- @@ -52,12 +60,15 @@ Open the notebook ``doc/source/tutorial-series-get-started-with-flower-pytorch.i - https://colab.research.google.com/github/adap/flower/blob/main/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb -Open a development version of the same notebook from branch `branch-name` by changing ``main`` to ``branch-name`` (right after ``blob``): +Open a development version of the same notebook from branch `branch-name` by changing +``main`` to ``branch-name`` (right after ``blob``): - https://colab.research.google.com/github/adap/flower/blob/branch-name/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb Install a `whl` on Google Colab: -1. In the vertical icon grid on the left hand side, select ``Files`` > ``Upload to session storage`` +1. In the vertical icon grid on the left hand side, select ``Files`` > ``Upload to + session storage`` 2. Upload the whl (e.g., ``flwr-1.8.0-py3-none-any.whl``) -3. Change ``!pip install -q 'flwr[simulation]' torch torchvision matplotlib`` to ``!pip install -q 'flwr-1.8.0-py3-none-any.whl[simulation]' torch torchvision matplotlib`` +3. Change ``!pip install -q 'flwr[simulation]' torch torchvision matplotlib`` to ``!pip + install -q 'flwr-1.8.0-py3-none-any.whl[simulation]' torch torchvision matplotlib`` diff --git a/doc/source/contributor-how-to-release-flower.rst b/doc/source/contributor-how-to-release-flower.rst index fc4c2d436b05..fafc02cab64c 100644 --- a/doc/source/contributor-how-to-release-flower.rst +++ b/doc/source/contributor-how-to-release-flower.rst @@ -1,35 +1,29 @@ Release Flower ============== -This document describes the current release process. It may or may not change in the future. +This document describes the current release process. It may or may not change in the +future. During the release ------------------ -The version number of a release is stated in ``pyproject.toml``. To release a new version of Flower, the following things need to happen (in that order): - -1. Run ``python3 src/py/flwr_tool/update_changelog.py `` in order to add every new change to the changelog (feel free to make manual changes to the changelog afterwards until it looks good). -2. Once the changelog has been updated with all the changes, run ``./dev/prepare-release-changelog.sh v``, where ```` is the version stated in ``pyproject.toml`` (notice the ``v`` added before it). This will replace the ``Unreleased`` header of the changelog by the version and current date, and it will add a thanking message for the contributors. Open a pull request with those changes. -3. Once the pull request is merged, tag the release commit with the version number as soon as the PR is merged: ``git tag v`` (notice the ``v`` added before the version number), then ``git push --tags``. This will create a draft release on GitHub containing the correct artifacts and the relevant part of the changelog. +The version number of a release is stated in ``pyproject.toml``. To release a new +version of Flower, the following things need to happen (in that order): + +1. Run ``python3 src/py/flwr_tool/update_changelog.py `` in order to add + every new change to the changelog (feel free to make manual changes to the changelog + afterwards until it looks good). +2. Once the changelog has been updated with all the changes, run + ``./dev/prepare-release-changelog.sh v``, where ```` is the + version stated in ``pyproject.toml`` (notice the ``v`` added before it). This will + replace the ``Unreleased`` header of the changelog by the version and current date, + and it will add a thanking message for the contributors. Open a pull request with + those changes. +3. Once the pull request is merged, tag the release commit with the version number as + soon as the PR is merged: ``git tag v`` (notice the ``v`` added before + the version number), then ``git push --tags``. This will create a draft release on + GitHub containing the correct artifacts and the relevant part of the changelog. 4. Check the draft release on GitHub, and if everything is good, publish it. -5. Trigger the CI for building the Docker images. - -To trigger the workflow, a collaborator must create a ``workflow_dispatch`` event in the -GitHub CI. This can be done either through the UI or via the GitHub CLI. The event requires only one -input, the Flower version, to be released. - -**Via the UI** - -1. Go to the ``Build docker images`` workflow `page `_. -2. Click on the ``Run workflow`` button and type the new version of Flower in the ``Version of Flower`` input field. -3. Click on the **green** ``Run workflow`` button. - -.. image:: _static/docker-ci-release.png - -**Via the GitHub CI** - -1. Make sure you are logged in via ``gh auth login`` and that the current working directory is the root of the Flower repository. -2. Trigger the workflow via ``gh workflow run docker-images.yml -f flwr-version=``. After the release ----------------- @@ -40,7 +34,8 @@ Create a pull request which contains the following changes: 2. Update all files which contain the current version number if necessary. 3. Add a new ``Unreleased`` section in ``changelog.md``. -Merge the pull request on the same day (i.e., before a new nightly release gets published to PyPI). +Merge the pull request on the same day (i.e., before a new nightly release gets +published to PyPI). Publishing a pre-release ------------------------ @@ -48,7 +43,8 @@ Publishing a pre-release Pre-release naming ~~~~~~~~~~~~~~~~~~ -PyPI supports pre-releases (alpha, beta, release candidate). Pre-releases MUST use one of the following naming patterns: +PyPI supports pre-releases (alpha, beta, release candidate). Pre-releases MUST use one +of the following naming patterns: - Alpha: ``MAJOR.MINOR.PATCHaN`` - Beta: ``MAJOR.MINOR.PATCHbN`` @@ -61,19 +57,25 @@ Examples include: - ``1.0.0rc0`` - ``1.0.0rc1`` -This is in line with PEP-440 and the recommendations from the Python Packaging -Authority (PyPA): +This is in line with PEP-440 and the recommendations from the Python Packaging Authority +(PyPA): - `PEP-440 `_ -- `PyPA Choosing a versioning scheme `_ +- `PyPA Choosing a versioning scheme + `_ -Note that the approach defined by PyPA is not compatible with SemVer 2.0.0 spec, for details consult the `Semantic Versioning Specification `_ (specifically item 11 on precedence). +Note that the approach defined by PyPA is not compatible with SemVer 2.0.0 spec, for +details consult the `Semantic Versioning Specification +`_ (specifically item 11 on +precedence). Pre-release classification ~~~~~~~~~~~~~~~~~~~~~~~~~~ Should the next pre-release be called alpha, beta, or release candidate? -- RC: feature complete, no known issues (apart from issues that are classified as "won't fix" for the next stable release) - if no issues surface this will become the next stable release +- RC: feature complete, no known issues (apart from issues that are classified as "won't + fix" for the next stable release) - if no issues surface this will become the next + stable release - Beta: feature complete, allowed to have known issues - Alpha: not feature complete, allowed to have known issues diff --git a/doc/source/contributor-how-to-set-up-a-virtual-env.rst b/doc/source/contributor-how-to-set-up-a-virtual-env.rst index 8b684e24c658..7e54ed64c9c9 100644 --- a/doc/source/contributor-how-to-set-up-a-virtual-env.rst +++ b/doc/source/contributor-how-to-set-up-a-virtual-env.rst @@ -1,26 +1,33 @@ Set up a virtual env ==================== -It is recommended to run your Python setup within a virtual environment. -This guide shows three different examples how to create a virtual environment with pyenv virtualenv, poetry, or Anaconda. -You can follow the instructions or choose your preferred setup. +It is recommended to run your Python setup within a virtual environment. This guide +shows three different examples how to create a virtual environment with pyenv +virtualenv, poetry, or Anaconda. You can follow the instructions or choose your +preferred setup. Python Version -------------- -Flower requires at least `Python 3.8 `_, but `Python 3.10 `_ or above is recommended. +Flower requires at least `Python 3.9 `_, but `Python 3.10 +`_ or above is recommended. .. note:: - Due to a known incompatibility with `ray `_, - we currently recommend utilizing at most `Python 3.11 `_ for - running Flower simulations. + + Due to a known incompatibility with `ray `_, we + currently recommend utilizing at most `Python 3.11 `_ + for running Flower simulations. Virtualenv with Pyenv/Virtualenv -------------------------------- -One of the recommended virtual environment is `pyenv `_/`virtualenv `_. Please see `Flower examples `_ for details. +One of the recommended virtual environment is `pyenv +`_/`virtualenv +`_. Please see `Flower examples +`_ for details. -Once Pyenv is set up, you can use it to install `Python Version 3.10 `_ or above: +Once Pyenv is set up, you can use it to install `Python Version 3.10 +`_ or above: .. code-block:: shell @@ -32,34 +39,35 @@ Create the virtualenv with: pyenv virtualenv 3.10.12 flower-3.10.12 - Activate the virtualenv by running the following command: .. code-block:: shell echo flower-3.10.12 > .python-version - Virtualenv with Poetry ---------------------- -The Flower examples are based on `Poetry `_ to manage dependencies. After installing Poetry you simply create a virtual environment with: +The Flower examples are based on `Poetry `_ to manage +dependencies. After installing Poetry you simply create a virtual environment with: .. code-block:: shell poetry shell -If you open a new terminal you can activate the previously created virtual environment with the following command: +If you open a new terminal you can activate the previously created virtual environment +with the following command: .. code-block:: shell source $(poetry env info --path)/bin/activate - Virtualenv with Anaconda ------------------------ -If you prefer to use Anaconda for your virtual environment then install and setup the `conda `_ package. After setting it up you can create a virtual environment with: +If you prefer to use Anaconda for your virtual environment then install and setup the +`conda `_ +package. After setting it up you can create a virtual environment with: .. code-block:: shell @@ -71,8 +79,8 @@ and activate the virtual environment with: conda activate flower-3.10.12 - And then? --------- -As soon as you created your virtual environment you clone one of the `Flower examples `_. +As soon as you created your virtual environment you clone one of the `Flower examples +`_. diff --git a/doc/source/contributor-how-to-write-documentation.rst b/doc/source/contributor-how-to-write-documentation.rst index fcd8c5bb18c6..6209530b71e0 100644 --- a/doc/source/contributor-how-to-write-documentation.rst +++ b/doc/source/contributor-how-to-write-documentation.rst @@ -1,14 +1,15 @@ Write documentation =================== - Project layout -------------- -The Flower documentation lives in the ``doc`` directory. The Sphinx-based documentation system supports both reStructuredText (``.rst`` files) and Markdown (``.md`` files). - -Note that, in order to build the documentation locally (with ``poetry run make html``, like described below), `Pandoc `_ needs to be installed on the system. +The Flower documentation lives in the ``doc`` directory. The Sphinx-based documentation +system supports both reStructuredText (``.rst`` files) and Markdown (``.md`` files). +Note that, in order to build the documentation locally (with ``poetry run make html``, +like described below), `Pandoc `_ needs to be +installed on the system. Edit an existing page --------------------- @@ -17,7 +18,6 @@ Edit an existing page 2. Compile the docs: ``cd doc``, then ``poetry run make html`` 3. Open ``doc/build/html/index.html`` in the browser to check the result - Create a new page ----------------- diff --git a/doc/source/contributor-ref-good-first-contributions.rst b/doc/source/contributor-ref-good-first-contributions.rst index 2b8ce88413f5..a715e006f905 100644 --- a/doc/source/contributor-ref-good-first-contributions.rst +++ b/doc/source/contributor-ref-good-first-contributions.rst @@ -1,41 +1,41 @@ Good first contributions ======================== -We welcome contributions to Flower! However, it is not always easy to know -where to start. We therefore put together a few recommendations on where to -start to increase your chances of getting your PR accepted into the Flower -codebase. - +We welcome contributions to Flower! However, it is not always easy to know where to +start. We therefore put together a few recommendations on where to start to increase +your chances of getting your PR accepted into the Flower codebase. Where to start -------------- -Until the Flower core library matures it will be easier to get PR's accepted if -they only touch non-core areas of the codebase. Good candidates to get started -are: +Until the Flower core library matures it will be easier to get PR's accepted if they +only touch non-core areas of the codebase. Good candidates to get started are: - Documentation: What's missing? What could be expressed more clearly? - Baselines: See below. - Examples: See below. - Request for Flower Baselines ---------------------------- -If you are not familiar with Flower Baselines, you should probably check-out our `contributing guide for baselines `_. +If you are not familiar with Flower Baselines, you should probably check-out our +`contributing guide for baselines +`_. -You should then check out the open -`issues `_ for baseline requests. -If you find a baseline that you'd like to work on and that has no assignees, feel free to assign it to yourself and start working on it! +You should then check out the open `issues +`_ +for baseline requests. If you find a baseline that you'd like to work on and that has no +assignees, feel free to assign it to yourself and start working on it! -Otherwise, if you don't find a baseline you'd like to work on, be sure to open a new issue with the baseline request template! +Otherwise, if you don't find a baseline you'd like to work on, be sure to open a new +issue with the baseline request template! Request for examples -------------------- -We wish we had more time to write usage examples because we believe they help -users to get started with building what they want to build. Here are a few -ideas where we'd be happy to accept a PR: +We wish we had more time to write usage examples because we believe they help users to +get started with building what they want to build. Here are a few ideas where we'd be +happy to accept a PR: - Llama 2 fine-tuning, with Hugging Face Transformers and PyTorch - XGBoost diff --git a/doc/source/contributor-ref-secure-aggregation-protocols.rst b/doc/source/contributor-ref-secure-aggregation-protocols.rst index 7107d04b8cd0..347cb2724424 100644 --- a/doc/source/contributor-ref-secure-aggregation-protocols.rst +++ b/doc/source/contributor-ref-secure-aggregation-protocols.rst @@ -1,13 +1,16 @@ Secure Aggregation Protocols ============================ -Include SecAgg, SecAgg+, and LightSecAgg protocol. The LightSecAgg protocol has not been implemented yet, so its diagram and abstraction may not be accurate in practice. -The SecAgg protocol can be considered as a special case of the SecAgg+ protocol. +Include SecAgg, SecAgg+, and LightSecAgg protocol. The LightSecAgg protocol has not been +implemented yet, so its diagram and abstraction may not be accurate in practice. The +SecAgg protocol can be considered as a special case of the SecAgg+ protocol. -The :code:`SecAgg+` abstraction -------------------------------- +The ``SecAgg+`` abstraction +--------------------------- -In this implementation, each client will be assigned with a unique index (int) for secure aggregation, and thus many python dictionaries used have keys of int type rather than ClientProxy type. +In this implementation, each client will be assigned with a unique index (int) for +secure aggregation, and thus many python dictionaries used have keys of int type rather +than ClientProxy type. .. code-block:: python @@ -15,9 +18,7 @@ In this implementation, each client will be assigned with a unique index (int) f """Abstract base class for the SecAgg+ protocol implementations.""" @abstractmethod - def generate_graph( - self, clients: List[ClientProxy], k: int - ) -> ClientGraph: + def generate_graph(self, clients: List[ClientProxy], k: int) -> ClientGraph: """Build a k-degree undirected graph of clients. Each client will only generate pair-wise masks with its k neighbours. k is equal to the number of clients in SecAgg, i.e., a complete graph. @@ -31,16 +32,16 @@ In this implementation, each client will be assigned with a unique index (int) f @abstractmethod def ask_keys( - self, - clients: List[ClientProxy], ask_keys_ins_list: List[AskKeysIns] + self, clients: List[ClientProxy], ask_keys_ins_list: List[AskKeysIns] ) -> AskKeysResultsAndFailures: """Ask public keys. (AskKeysIns is an empty class, and hence ask_keys_ins_list can be omitted.)""" @abstractmethod def share_keys( self, - clients: List[ClientProxy], public_keys_dict: Dict[int, AskKeysRes], - graph: ClientGraph + clients: List[ClientProxy], + public_keys_dict: Dict[int, AskKeysRes], + graph: ClientGraph, ) -> ShareKeysResultsAndFailures: """Send public keys.""" @@ -48,17 +49,18 @@ In this implementation, each client will be assigned with a unique index (int) f def ask_vectors( clients: List[ClientProxy], forward_packet_list_dict: Dict[int, List[ShareKeysPacket]], - client_instructions=None: Dict[int, FitIns] + client_instructions: Dict[int, FitIns] = None, ) -> AskVectorsResultsAndFailures: """Ask vectors of local model parameters. (If client_instructions is not None, local models will be trained in the ask vectors stage, - rather than trained parallelly as the protocol goes through the previous stages.)""" + rather than trained parallelly as the protocol goes through the previous stages.) + """ @abstractmethod def unmask_vectors( clients: List[ClientProxy], dropout_clients: List[ClientProxy], - graph: ClientGraph + graph: ClientGraph, ) -> UnmaskVectorsResultsAndFailures: """Unmask and compute the aggregated model. UnmaskVectorRes contains shares of keys needed to generate masks.""" @@ -155,10 +157,12 @@ The Flower server will execute and process received results in the following ord deactivate P end -The :code:`LightSecAgg` abstraction ------------------------------------ +The ``LightSecAgg`` abstraction +------------------------------- -In this implementation, each client will be assigned with a unique index (int) for secure aggregation, and thus many python dictionaries used have keys of int type rather than ClientProxy type. +In this implementation, each client will be assigned with a unique index (int) for +secure aggregation, and thus many python dictionaries used have keys of int type rather +than ClientProxy type. .. code-block:: python @@ -174,7 +178,8 @@ In this implementation, each client will be assigned with a unique index (int) f @abstractmethod def ask_encrypted_encoded_masks( self, - clients: List[ClientProxy], public_keys_dict: Dict[int, LightSecAggSetupConfigRes] + clients: List[ClientProxy], + public_keys_dict: Dict[int, LightSecAggSetupConfigRes], ) -> AskEncryptedEncodedMasksResultsAndFailures: """Ask encrypted encoded masks. The protocol adopts Diffie-Hellman keys to build pair-wise secured channels to transfer encoded mask.""" @@ -183,15 +188,16 @@ In this implementation, each client will be assigned with a unique index (int) f self, clients: List[ClientProxy], forward_packet_list_dict: Dict[int, List[EncryptedEncodedMasksPacket]], - client_instructions=None: Dict[int, FitIns] + client_instructions: Dict[int, FitIns] = None, ) -> AskMaskedModelsResultsAndFailures: """Ask the masked local models. (If client_instructions is not None, local models will be trained in the ask vectors stage, - rather than trained parallelly as the protocol goes through the previous stages.)""" + rather than trained parallelly as the protocol goes through the previous stages.) + """ @abstractmethod def ask_aggregated_encoded_masks( - clients: List[ClientProxy] + clients: List[ClientProxy], ) -> AskAggregatedEncodedMasksResultsAndFailures: """Ask aggregated encoded masks""" @@ -272,158 +278,157 @@ Types .. code-block:: python - # the SecAgg+ protocol + # the SecAgg+ protocol + + ClientGraph = Dict[int, List[int]] - ClientGraph = Dict[int, List[int]] + SetupConfigResultsAndFailures = Tuple[ + List[Tuple[ClientProxy, SetupConfigRes]], List[BaseException] + ] - SetupConfigResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, SetupConfigRes]], List[BaseException] - ] + AskKeysResultsAndFailures = Tuple[ + List[Tuple[ClientProxy, AskKeysRes]], List[BaseException] + ] - AskKeysResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, AskKeysRes]], List[BaseException] - ] + ShareKeysResultsAndFailures = Tuple[ + List[Tuple[ClientProxy, ShareKeysRes]], List[BaseException] + ] - ShareKeysResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, ShareKeysRes]], List[BaseException] - ] + AskVectorsResultsAndFailures = Tuple[ + List[Tuple[ClientProxy, AskVectorsRes]], List[BaseException] + ] - AskVectorsResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, AskVectorsRes]], List[BaseException] - ] + UnmaskVectorsResultsAndFailures = Tuple[ + List[Tuple[ClientProxy, UnmaskVectorsRes]], List[BaseException] + ] - UnmaskVectorsResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, UnmaskVectorsRes]], List[BaseException] - ] + FitResultsAndFailures = Tuple[List[Tuple[ClientProxy, FitRes]], List[BaseException]] - FitResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, FitRes]], List[BaseException] - ] + @dataclass + class SetupConfigIns: + sec_agg_cfg_dict: Dict[str, Scalar] - @dataclass - class SetupConfigIns: - sec_agg_cfg_dict: Dict[str, Scalar] + @dataclass + class SetupConfigRes: + pass - @dataclass - class SetupConfigRes: - pass + @dataclass + class AskKeysIns: + pass - @dataclass - class AskKeysIns: - pass + @dataclass + class AskKeysRes: + """Ask Keys Stage Response from client to server""" - @dataclass - class AskKeysRes: - """Ask Keys Stage Response from client to server""" - pk1: bytes - pk2: bytes + pk1: bytes + pk2: bytes - @dataclass - class ShareKeysIns: - public_keys_dict: Dict[int, AskKeysRes] + @dataclass + class ShareKeysIns: + public_keys_dict: Dict[int, AskKeysRes] - @dataclass - class ShareKeysPacket: - source: int - destination: int - ciphertext: bytes + @dataclass + class ShareKeysPacket: + source: int + destination: int + ciphertext: bytes - @dataclass - class ShareKeysRes: - share_keys_res_list: List[ShareKeysPacket] + @dataclass + class ShareKeysRes: + share_keys_res_list: List[ShareKeysPacket] - @dataclass - class AskVectorsIns: - ask_vectors_in_list: List[ShareKeysPacket] - fit_ins: FitIns + @dataclass + class AskVectorsIns: + ask_vectors_in_list: List[ShareKeysPacket] + fit_ins: FitIns - @dataclass - class AskVectorsRes: - parameters: Parameters + @dataclass + class AskVectorsRes: + parameters: Parameters - @dataclass - class UnmaskVectorsIns: - available_clients: List[int] - dropout_clients: List[int] + @dataclass + class UnmaskVectorsIns: + available_clients: List[int] + dropout_clients: List[int] - @dataclass - class UnmaskVectorsRes: - share_dict: Dict[int, bytes] + @dataclass + class UnmaskVectorsRes: + share_dict: Dict[int, bytes] - # the LightSecAgg protocol + # the LightSecAgg protocol - LightSecAggSetupConfigResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, LightSecAggSetupConfigRes]], List[BaseException] - ] + LightSecAggSetupConfigResultsAndFailures = Tuple[ + List[Tuple[ClientProxy, LightSecAggSetupConfigRes]], List[BaseException] + ] - AskEncryptedEncodedMasksResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, AskEncryptedEncodedMasksRes]], List[BaseException] - ] + AskEncryptedEncodedMasksResultsAndFailures = Tuple[ + List[Tuple[ClientProxy, AskEncryptedEncodedMasksRes]], List[BaseException] + ] - AskMaskedModelsResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, AskMaskedModelsRes]], List[BaseException] - ] + AskMaskedModelsResultsAndFailures = Tuple[ + List[Tuple[ClientProxy, AskMaskedModelsRes]], List[BaseException] + ] - AskAggregatedEncodedMasksResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, AskAggregatedEncodedMasksRes]], List[BaseException] - ] + AskAggregatedEncodedMasksResultsAndFailures = Tuple[ + List[Tuple[ClientProxy, AskAggregatedEncodedMasksRes]], List[BaseException] + ] - @dataclass - class LightSecAggSetupConfigIns: - sec_agg_cfg_dict: Dict[str, Scalar] + @dataclass + class LightSecAggSetupConfigIns: + sec_agg_cfg_dict: Dict[str, Scalar] - @dataclass - class LightSecAggSetupConfigRes: - pk: bytes + @dataclass + class LightSecAggSetupConfigRes: + pk: bytes - @dataclass - class AskEncryptedEncodedMasksIns: - public_keys_dict: Dict[int, LightSecAggSetupConfigRes] + @dataclass + class AskEncryptedEncodedMasksIns: + public_keys_dict: Dict[int, LightSecAggSetupConfigRes] - @dataclass - class EncryptedEncodedMasksPacket: - source: int - destination: int - ciphertext: bytes + @dataclass + class EncryptedEncodedMasksPacket: + source: int + destination: int + ciphertext: bytes - @dataclass - class AskEncryptedEncodedMasksRes: - packet_list: List[EncryptedEncodedMasksPacket] + @dataclass + class AskEncryptedEncodedMasksRes: + packet_list: List[EncryptedEncodedMasksPacket] - @dataclass - class AskMaskedModelsIns: - packet_list: List[EncryptedEncodedMasksPacket] - fit_ins: FitIns + @dataclass + class AskMaskedModelsIns: + packet_list: List[EncryptedEncodedMasksPacket] + fit_ins: FitIns - @dataclass - class AskMaskedModelsRes: - parameters: Parameters + @dataclass + class AskMaskedModelsRes: + parameters: Parameters - @dataclass - class AskAggregatedEncodedMasksIns: - surviving_clients: List[int] + @dataclass + class AskAggregatedEncodedMasksIns: + surviving_clients: List[int] - @dataclass - class AskAggregatedEncodedMasksRes: - aggregated_encoded_mask: Parameters + @dataclass + class AskAggregatedEncodedMasksRes: + aggregated_encoded_mask: Parameters diff --git a/doc/source/contributor-tutorial-contribute-on-github.rst b/doc/source/contributor-tutorial-contribute-on-github.rst index 6970e7e8a580..22c6c6ef86b0 100644 --- a/doc/source/contributor-tutorial-contribute-on-github.rst +++ b/doc/source/contributor-tutorial-contribute-on-github.rst @@ -1,100 +1,113 @@ Contribute on GitHub ==================== -This guide is for people who want to get involved with Flower, but who are not used to contributing to GitHub projects. - -If you're familiar with how contributing on GitHub works, you can directly checkout our :doc:`getting started guide for contributors `. +This guide is for people who want to get involved with Flower, but who are not used to +contributing to GitHub projects. +If you're familiar with how contributing on GitHub works, you can directly checkout our +:doc:`getting started guide for contributors +`. Setting up the repository ------------------------- 1. **Create a GitHub account and setup Git** - Git is a distributed version control tool. This allows for an entire codebase's history to be stored and every developer's machine. - It is a software that will need to be installed on your local machine, you can follow this `guide `_ to set it up. - - GitHub, itself, is a code hosting platform for version control and collaboration. It allows for everyone to collaborate and work from anywhere on remote repositories. - - If you haven't already, you will need to create an account on `GitHub `_. - - The idea behind the generic Git and GitHub workflow boils down to this: - you download code from a remote repository on GitHub, make changes locally and keep track of them using Git and then you upload your new history back to GitHub. - + Git is a distributed version control tool. This allows for an entire codebase's + history to be stored and every developer's machine. It is a software that will + need to be installed on your local machine, you can follow this `guide + `_ to + set it up. + + GitHub, itself, is a code hosting platform for version control and collaboration. + It allows for everyone to collaborate and work from anywhere on remote + repositories. + + If you haven't already, you will need to create an account on `GitHub + `_. + + The idea behind the generic Git and GitHub workflow boils down to this: you + download code from a remote repository on GitHub, make changes locally and keep + track of them using Git and then you upload your new history back to GitHub. 2. **Forking the Flower repository** - A fork is a personal copy of a GitHub repository. To create one for Flower, you must navigate to ``_ (while connected to your GitHub account) - and click the ``Fork`` button situated on the top right of the page. - - .. image:: _static/fork_button.png + A fork is a personal copy of a GitHub repository. To create one for Flower, you + must navigate to https://github.com/adap/flower (while connected to your GitHub + account) and click the ``Fork`` button situated on the top right of the page. - You can change the name if you want, but this is not necessary as this version of Flower will be yours and will sit inside your own account (i.e., in your own list of repositories). - Once created, you should see on the top left corner that you are looking at your own version of Flower. + .. image:: _static/fork_button.png - .. image:: _static/fork_link.png + You can change the name if you want, but this is not necessary as this version of + Flower will be yours and will sit inside your own account (i.e., in your own list + of repositories). Once created, you should see on the top left corner that you + are looking at your own version of Flower. + .. image:: _static/fork_link.png 3. **Cloning your forked repository** - The next step is to download the forked repository on your machine to be able to make changes to it. - On your forked repository page, you should first click on the ``Code`` button on the right, - this will give you the ability to copy the HTTPS link of the repository. + The next step is to download the forked repository on your machine to be able to + make changes to it. On your forked repository page, you should first click on the + ``Code`` button on the right, this will give you the ability to copy the HTTPS + link of the repository. - .. image:: _static/cloning_fork.png + .. image:: _static/cloning_fork.png - Once you copied the \, you can open a terminal on your machine, navigate to the place you want to download the repository to and type: + Once you copied the \, you can open a terminal on your machine, navigate to + the place you want to download the repository to and type: - .. code-block:: shell + .. code-block:: shell - $ git clone - - This will create a ``flower/`` (or the name of your fork if you renamed it) folder in the current working directory. + $ git clone + This will create a ``flower/`` (or the name of your fork if you renamed it) + folder in the current working directory. 4. **Add origin** - You can then go into the repository folder: - - .. code-block:: shell - - $ cd flower + You can then go into the repository folder: - And here we will need to add an origin to our repository. The origin is the \ of the remote fork repository. - To obtain it, we can do as previously mentioned by going to our fork repository on our GitHub account and copying the link. + .. code-block:: shell - .. image:: _static/cloning_fork.png + $ cd flower - Once the \ is copied, we can type the following command in our terminal: + And here we will need to add an origin to our repository. The origin is the + \ of the remote fork repository. To obtain it, we can do as previously + mentioned by going to our fork repository on our GitHub account and copying the + link. - .. code-block:: shell + .. image:: _static/cloning_fork.png - $ git remote add origin + Once the \ is copied, we can type the following command in our terminal: + .. code-block:: shell + $ git remote add origin 5. **Add upstream** - Now we will add an upstream address to our repository. - Still in the same directory, we must run the following command: + Now we will add an upstream address to our repository. Still in the same + directory, we must run the following command: - .. code-block:: shell + .. code-block:: shell - $ git remote add upstream https://github.com/adap/flower.git + $ git remote add upstream https://github.com/adap/flower.git - The following diagram visually explains what we did in the previous steps: + The following diagram visually explains what we did in the previous steps: - .. image:: _static/github_schema.png + .. image:: _static/github_schema.png - The upstream is the GitHub remote address of the parent repository (in this case Flower), - i.e. the one we eventually want to contribute to and therefore need an up-to-date history of. - The origin is just the GitHub remote address of the forked repository we created, i.e. the copy (fork) in our own account. + The upstream is the GitHub remote address of the parent repository (in this case + Flower), i.e. the one we eventually want to contribute to and therefore need an + up-to-date history of. The origin is just the GitHub remote address of the forked + repository we created, i.e. the copy (fork) in our own account. - To make sure our local version of the fork is up-to-date with the latest changes from the Flower repository, - we can execute the following command: + To make sure our local version of the fork is up-to-date with the latest changes + from the Flower repository, we can execute the following command: - .. code-block:: shell - - $ git pull upstream main + .. code-block:: shell + $ git pull upstream main Setting up the coding environment --------------------------------- -This can be achieved by following this :doc:`getting started guide for contributors ` (note that you won't need to clone the repository). -Once you are able to write code and test it, you can finally start making changes! - +This can be achieved by following this :doc:`getting started guide for contributors +` (note that you won't need to clone +the repository). Once you are able to write code and test it, you can finally start +making changes! Making changes -------------- @@ -112,211 +125,233 @@ And with Flower's repository: $ git pull upstream main 1. **Create a new branch** - To make the history cleaner and easier to work with, it is good practice to - create a new branch for each feature/project that needs to be implemented. - - To do so, just run the following command inside the repository's directory: + To make the history cleaner and easier to work with, it is good practice to + create a new branch for each feature/project that needs to be implemented. - .. code-block:: shell + To do so, just run the following command inside the repository's directory: - $ git switch -c + .. code-block:: shell + $ git switch -c 2. **Make changes** - Write great code and create wonderful changes using your favorite editor! - + Write great code and create wonderful changes using your favorite editor! 3. **Test and format your code** - Don't forget to test and format your code! Otherwise your code won't be able to be merged into the Flower repository. - This is done so the codebase stays consistent and easy to understand. - - To do so, we have written a few scripts that you can execute: + Don't forget to test and format your code! Otherwise your code won't be able to + be merged into the Flower repository. This is done so the codebase stays + consistent and easy to understand. - .. code-block:: shell + To do so, we have written a few scripts that you can execute: - $ ./dev/format.sh # to format your code - $ ./dev/test.sh # to test that your code can be accepted - $ ./baselines/dev/format.sh # same as above but for code added to baselines - $ ./baselines/dev/test.sh # same as above but for code added to baselines + .. code-block:: shell + $ ./dev/format.sh # to format your code + $ ./dev/test.sh # to test that your code can be accepted + $ ./baselines/dev/format.sh # same as above but for code added to baselines + $ ./baselines/dev/test.sh # same as above but for code added to baselines 4. **Stage changes** - Before creating a commit that will update your history, you must specify to Git which files it needs to take into account. - - This can be done with: + Before creating a commit that will update your history, you must specify to Git + which files it needs to take into account. - .. code-block:: shell + This can be done with: - $ git add + .. code-block:: shell - To check which files have been modified compared to the last version (last commit) and to see which files are staged for commit, - you can use the :code:`git status` command. + $ git add + To check which files have been modified compared to the last version (last + commit) and to see which files are staged for commit, you can use the ``git + status`` command. 5. **Commit changes** - Once you have added all the files you wanted to commit using :code:`git add`, you can finally create your commit using this command: + Once you have added all the files you wanted to commit using ``git add``, you can + finally create your commit using this command: - .. code-block:: shell + .. code-block:: shell - $ git commit -m "" - - The \ is there to explain to others what the commit does. It should be written in an imperative style and be concise. - An example would be :code:`git commit -m "Add images to README"`. + $ git commit -m "" + The \ is there to explain to others what the commit does. It + should be written in an imperative style and be concise. An example would be + ``git commit -m "Add images to README"``. 6. **Push the changes to the fork** - Once we have committed our changes, we have effectively updated our local history, but GitHub has no way of knowing this unless we push - our changes to our origin's remote address: - - .. code-block:: shell + Once we have committed our changes, we have effectively updated our local + history, but GitHub has no way of knowing this unless we push our changes to our + origin's remote address: - $ git push -u origin + .. code-block:: shell - Once this is done, you will see on the GitHub that your forked repo was updated with the changes you have made. + $ git push -u origin + Once this is done, you will see on the GitHub that your forked repo was updated + with the changes you have made. Creating and merging a pull request (PR) ---------------------------------------- 1. **Create the PR** - Once you have pushed changes, on the GitHub webpage of your repository you should see the following message: - - .. image:: _static/compare_and_pr.png + Once you have pushed changes, on the GitHub webpage of your repository you should + see the following message: - Otherwise you can always find this option in the ``Branches`` page. + .. image:: _static/compare_and_pr.png - Once you click the ``Compare & pull request`` button, you should see something similar to this: + Otherwise you can always find this option in the ``Branches`` page. - .. image:: _static/creating_pr.png + Once you click the ``Compare & pull request`` button, you should see something + similar to this: - At the top you have an explanation of which branch will be merged where: + .. image:: _static/creating_pr.png - .. image:: _static/merging_branch.png + At the top you have an explanation of which branch will be merged where: - In this example you can see that the request is to merge the branch ``doc-fixes`` from my forked repository to branch ``main`` from the Flower repository. + .. image:: _static/merging_branch.png - The title should be changed to adhere to the :ref:`pr_title_format` guidelines, otherwise it won't be possible to merge the PR. So in this case, - a correct title might be ``docs(framework:skip) Fix typos``. + In this example you can see that the request is to merge the branch ``doc-fixes`` + from my forked repository to branch ``main`` from the Flower repository. - The input box in the middle is there for you to describe what your PR does and to link it to existing issues. - We have placed comments (that won't be rendered once the PR is opened) to guide you through the process. + The title should be changed to adhere to the :ref:`pr_title_format` guidelines, + otherwise it won't be possible to merge the PR. So in this case, a correct title + might be ``docs(framework:skip) Fix typos``. - It is important to follow the instructions described in comments. + The input box in the middle is there for you to describe what your PR does and to + link it to existing issues. We have placed comments (that won't be rendered once + the PR is opened) to guide you through the process. - At the bottom you will find the button to open the PR. This will notify reviewers that a new PR has been opened and - that they should look over it to merge or to request changes. + It is important to follow the instructions described in comments. - If your PR is not yet ready for review, and you don't want to notify anyone, you have the option to create a draft pull request: + At the bottom you will find the button to open the PR. This will notify reviewers + that a new PR has been opened and that they should look over it to merge or to + request changes. - .. image:: _static/draft_pr.png + If your PR is not yet ready for review, and you don't want to notify anyone, you + have the option to create a draft pull request: + .. image:: _static/draft_pr.png 2. **Making new changes** - Once the PR has been opened (as draft or not), you can still push new commits to it the same way we did before, by making changes to the branch associated with the PR. - + Once the PR has been opened (as draft or not), you can still push new commits to + it the same way we did before, by making changes to the branch associated with + the PR. 3. **Review the PR** - Once the PR has been opened or once the draft PR has been marked as ready, a review from code owners will be automatically requested: - - .. image:: _static/opened_pr.png - - Code owners will then look into the code, ask questions, request changes or validate the PR. + Once the PR has been opened or once the draft PR has been marked as ready, a + review from code owners will be automatically requested: - Merging will be blocked if there are ongoing requested changes. + .. image:: _static/opened_pr.png - .. image:: _static/changes_requested.png + Code owners will then look into the code, ask questions, request changes or + validate the PR. - To resolve them, just push the necessary changes to the branch associated with the PR: + Merging will be blocked if there are ongoing requested changes. - .. image:: _static/make_changes.png + .. image:: _static/changes_requested.png - And resolve the conversation: + To resolve them, just push the necessary changes to the branch associated with + the PR: - .. image:: _static/resolve_conv.png + .. image:: _static/make_changes.png - Once all the conversations have been resolved, you can re-request a review. + And resolve the conversation: + .. image:: _static/resolve_conv.png + Once all the conversations have been resolved, you can re-request a review. 4. **Once the PR is merged** - If all the automatic tests have passed and reviewers have no more changes to request, they can approve the PR and merge it. + If all the automatic tests have passed and reviewers have no more changes to + request, they can approve the PR and merge it. - .. image:: _static/merging_pr.png + .. image:: _static/merging_pr.png - Once it is merged, you can delete the branch on GitHub (a button should appear to do so) and also delete it locally by doing: + Once it is merged, you can delete the branch on GitHub (a button should appear to + do so) and also delete it locally by doing: - .. code-block:: shell + .. code-block:: shell - $ git switch main - $ git branch -D + $ git switch main + $ git branch -D - Then you should update your forked repository by doing: + Then you should update your forked repository by doing: - .. code-block:: shell - - $ git pull upstream main # to update the local repository - $ git push origin main # to push the changes to the remote repository + .. code-block:: shell + $ git pull upstream main # to update the local repository + $ git push origin main # to push the changes to the remote repository Example of first contribution ----------------------------- Problem -******* +~~~~~~~ -For our documentation, we've started to use the `Diàtaxis framework `_. +For our documentation, we've started to use the `Diàtaxis framework +`_. -Our "How to" guides should have titles that continue the sentence "How to …", for example, "How to upgrade to Flower 1.0". +Our "How to" guides should have titles that continue the sentence "How to …", for +example, "How to upgrade to Flower 1.0". -Most of our guides do not follow this new format yet, and changing their title is (unfortunately) more involved than one might think. +Most of our guides do not follow this new format yet, and changing their title is +(unfortunately) more involved than one might think. -This issue is about changing the title of a doc from present continuous to present simple. +This issue is about changing the title of a doc from present continuous to present +simple. -Let's take the example of "Saving Progress" which we changed to "Save Progress". Does this pass our check? +Let's take the example of "Saving Progress" which we changed to "Save Progress". Does +this pass our check? Before: "How to saving progress" ❌ After: "How to save progress" ✅ Solution -******** +~~~~~~~~ -This is a tiny change, but it'll allow us to test your end-to-end setup. After cloning and setting up the Flower repo, here's what you should do: +This is a tiny change, but it'll allow us to test your end-to-end setup. After cloning +and setting up the Flower repo, here's what you should do: - Find the source file in ``doc/source`` -- Make the change in the ``.rst`` file (beware, the dashes under the title should be the same length as the title itself) -- Build the docs and `check the result `_ +- Make the change in the ``.rst`` file (beware, the dashes under the title should be the + same length as the title itself) +- Build the docs and `check the result + `_ Rename file -::::::::::: ++++++++++++ -You might have noticed that the file name still reflects the old wording. -If we just change the file, then we break all existing links to it - it is **very important** to avoid that, breaking links can harm our search engine ranking. +You might have noticed that the file name still reflects the old wording. If we just +change the file, then we break all existing links to it - it is **very important** to +avoid that, breaking links can harm our search engine ranking. Here's how to change the file name: - Change the file name to ``save-progress.rst`` - Add a redirect rule to ``doc/source/conf.py`` -This will cause a redirect from ``saving-progress.html`` to ``save-progress.html``, old links will continue to work. +This will cause a redirect from ``saving-progress.html`` to ``save-progress.html``, old +links will continue to work. Apply changes in the index file -::::::::::::::::::::::::::::::: ++++++++++++++++++++++++++++++++ -For the lateral navigation bar to work properly, it is very important to update the ``index.rst`` file as well. -This is where we define the whole arborescence of the navbar. +For the lateral navigation bar to work properly, it is very important to update the +``index.rst`` file as well. This is where we define the whole arborescence of the +navbar. - Find and modify the file name in ``index.rst`` Open PR -::::::: ++++++++ -- Commit the changes (commit messages are always imperative: "Do something", in this case "Change …") +- Commit the changes (commit messages are always imperative: "Do something", in this + case "Change …") - Push the changes to your fork - Open a PR (as shown above) with title ``docs(framework) Update how-to guide title`` - Wait for it to be approved! - Congrats! 🥳 You're now officially a Flower contributor! - Next steps ---------- -Once you have made your first PR, and want to contribute more, be sure to check out the following : - -- :doc:`Good first contributions `, where you should particularly look into the :code:`baselines` contributions. +Once you have made your first PR, and want to contribute more, be sure to check out the +following : +- :doc:`Good first contributions `, where you + should particularly look into the ``baselines`` contributions. Appendix -------- @@ -324,7 +359,7 @@ Appendix .. _pr_title_format: PR title format -*************** +~~~~~~~~~~~~~~~ We enforce the following PR title format: @@ -334,9 +369,10 @@ We enforce the following PR title format: (or ``(:skip) `` to ignore the PR in the changelog) -Where ```` needs to be in ``{ci, fix, feat, docs, refactor, break}``, ```` -should be in ``{framework, baselines, datasets, examples, or '*' when modifying multiple projects which requires the ':skip' flag to be used}``, -and ```` starts with a capitalised verb in the imperative mood. +Where ```` needs to be in ``{ci, fix, feat, docs, refactor, break}``, +```` should be in ``{framework, baselines, datasets, examples, or '*' when +modifying multiple projects which requires the ':skip' flag to be used}``, and +```` starts with a capitalised verb in the imperative mood. Valid examples: diff --git a/doc/source/contributor-tutorial-get-started-as-a-contributor.rst b/doc/source/contributor-tutorial-get-started-as-a-contributor.rst index 43f9739987ac..11b0d3760d4a 100644 --- a/doc/source/contributor-tutorial-get-started-as-a-contributor.rst +++ b/doc/source/contributor-tutorial-get-started-as-a-contributor.rst @@ -4,168 +4,195 @@ Get started as a contributor Prerequisites ------------- -- `Python 3.8 `_ or above +- `Python 3.9 `_ or above - `Poetry 1.3 `_ or above - (Optional) `pyenv `_ - (Optional) `pyenv-virtualenv `_ -Flower uses :code:`pyproject.toml` to manage dependencies and configure -development tools (the ones which support it). Poetry is a build tool which -supports `PEP 517 `_. - +Flower uses ``pyproject.toml`` to manage dependencies and configure development tools +(the ones which support it). Poetry is a build tool which supports `PEP 517 +`_. Developer Machine Setup ----------------------- -Preliminarities -~~~~~~~~~~~~~~~ +Preliminaries +~~~~~~~~~~~~~ + Some system-wide dependencies are needed. For macOS -^^^^^^^^^ ++++++++++ + +- Install `homebrew `_. Don't forget the post-installation actions to + add `brew` to your PATH. +- Install `xz` (to install different Python versions) and `pandoc` to build the docs: -* Install `homebrew `_. Don't forget the post-installation actions to add `brew` to your PATH. -* Install `xz` (to install different Python versions) and `pandoc` to build the - docs:: + :: - $ brew install xz pandoc + $ brew install xz pandoc For Ubuntu -^^^^^^^^^^ -Ensure you system (Ubuntu 22.04+) is up-to-date, and you have all necessary -packages:: +++++++++++ - $ apt update - $ apt install build-essential zlib1g-dev libssl-dev libsqlite3-dev \ - libreadline-dev libbz2-dev libffi-dev liblzma-dev pandoc +Ensure you system (Ubuntu 22.04+) is up-to-date, and you have all necessary packages: +:: + + $ apt update + $ apt install build-essential zlib1g-dev libssl-dev libsqlite3-dev \ + libreadline-dev libbz2-dev libffi-dev liblzma-dev pandoc Create Flower Dev Environment ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -1. Clone the `Flower repository `_ from -GitHub:: +1. Clone the `Flower repository `_ from GitHub: - $ git clone git@github.com:adap/flower.git - $ cd flower +:: -2. Let's create the Python environment for all-things Flower. If you wish to use :code:`pyenv`, we provide two convenience scripts that you can use. If you prefer using something else than :code:`pyenv`, create a new environment, activate and skip to the last point where all packages are installed. + $ git clone git@github.com:adap/flower.git + $ cd flower -* If you don't have :code:`pyenv` installed, the following script that will install it, set it up, and create the virtual environment (with :code:`Python 3.8.17` by default):: +2. Let's create the Python environment for all-things Flower. If you wish to use + ``pyenv``, we provide two convenience scripts that you can use. If you prefer using + something else than ``pyenv``, create a new environment, activate and skip to the + last point where all packages are installed. - $ ./dev/setup-defaults.sh # once completed, run the bootstrap script +- If you don't have ``pyenv`` installed, the following script that will install it, set + it up, and create the virtual environment (with :substitution-code:`Python + |python_full_version|` by default): -* If you already have :code:`pyenv` installed (along with the :code:`pyenv-virtualenv` plugin), you can use the following convenience script (with :code:`Python 3.8.17` by default):: + :: - $ ./dev/venv-create.sh # once completed, run the `bootstrap.sh` script + $ ./dev/setup-defaults.sh # once completed, run the bootstrap script -3. Install the Flower package in development mode (think -:code:`pip install -e`) along with all necessary dependencies:: +- If you already have ``pyenv`` installed (along with the ``pyenv-virtualenv`` plugin), + you can use the following convenience script (with :substitution-code:`Python + |python_full_version|` by default): - (flower-) $ ./dev/bootstrap.sh + :: + $ ./dev/venv-create.sh # once completed, run the `bootstrap.sh` script + +3. Install the Flower package in development mode (think ``pip install -e``) along with +all necessary dependencies: + +:: + + (flower-) $ ./dev/bootstrap.sh Convenience Scripts ------------------- -The Flower repository contains a number of convenience scripts to make -recurring development tasks easier and less error-prone. See the :code:`/dev` -subdirectory for a full list. The following scripts are amongst the most -important ones: +The Flower repository contains a number of convenience scripts to make recurring +development tasks easier and less error-prone. See the ``/dev`` subdirectory for a full +list. The following scripts are amongst the most important ones: Create/Delete Virtual Environment ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -:: +.. code-block:: shell + :substitutions: - $ ./dev/venv-create.sh # Default is 3.8.17 - $ ./dev/venv-delete.sh # Default is 3.8.17 + $ ./dev/venv-create.sh # Default is |python_full_version| + $ ./dev/venv-delete.sh # Default is |python_full_version| Compile ProtoBuf Definitions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :: - $ python -m flwr_tool.protoc + $ python -m flwr_tool.protoc Auto-Format Code ~~~~~~~~~~~~~~~~ :: - $ ./dev/format.sh + $ ./dev/format.sh Run Linters and Tests ~~~~~~~~~~~~~~~~~~~~~ :: - $ ./dev/test.sh + $ ./dev/test.sh Add a pre-commit hook ~~~~~~~~~~~~~~~~~~~~~ -Developers may integrate a pre-commit hook into their workflow utilizing the `pre-commit `_ library. The pre-commit hook is configured to execute two primary operations: ``./dev/format.sh`` and ``./dev/test.sh`` scripts. +Developers may integrate a pre-commit hook into their workflow utilizing the `pre-commit +`_ library. The pre-commit hook is configured to +execute two primary operations: ``./dev/format.sh`` and ``./dev/test.sh`` scripts. There are multiple ways developers can use this: 1. Install the pre-commit hook to your local git directory by simply running: :: - - $ pre-commit install - - Each ``git commit`` will trigger the execution of formatting and linting/test scripts. - - If in a hurry, bypass the hook using ``--no-verify`` with the ``git commit`` command. + $ pre-commit install + + - Each ``git commit`` will trigger the execution of formatting and linting/test + scripts. + - If in a hurry, bypass the hook using ``--no-verify`` with the ``git commit`` + command. + :: - - $ git commit --no-verify -m "Add new feature" - -2. For developers who prefer not to install the hook permanently, it is possible to execute a one-time check prior to committing changes by using the following command: - + + $ git commit --no-verify -m "Add new feature" + +2. For developers who prefer not to install the hook permanently, it is possible to + execute a one-time check prior to committing changes by using the following command: + :: - $ pre-commit run --all-files - - This executes the formatting and linting checks/tests on all the files without modifying the default behavior of ``git commit``. + $ pre-commit run --all-files + + This executes the formatting and linting checks/tests on all the files without + modifying the default behavior of ``git commit``. Run Github Actions (CI) locally ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Developers could run the full set of Github Actions workflows under their local -environment by using `Act `_. Please refer to -the installation instructions under the linked repository and run the next -command under Flower main cloned repository folder:: +environment by using `Act `_. Please refer to the +installation instructions under the linked repository and run the next command under +Flower main cloned repository folder: - $ act +:: -The Flower default workflow would run by setting up the required Docker -machines underneath. + $ act +The Flower default workflow would run by setting up the required Docker machines +underneath. Build Release ------------- -Flower uses Poetry to build releases. The necessary command is wrapped in a -simple script:: +Flower uses Poetry to build releases. The necessary command is wrapped in a simple +script: - $ ./dev/build.sh +:: -The resulting :code:`.whl` and :code:`.tar.gz` releases will be stored in the -:code:`/dist` subdirectory. + $ ./dev/build.sh +The resulting ``.whl`` and ``.tar.gz`` releases will be stored in the ``/dist`` +subdirectory. Build Documentation ------------------- Flower's documentation uses `Sphinx `_. There's no -convenience script to re-build the documentation yet, but it's pretty easy:: +convenience script to re-build the documentation yet, but it's pretty easy: + +:: - $ cd doc - $ make html + $ cd doc + $ make html This will generate HTML documentation in ``doc/build/html``. -Note that, in order to build the documentation locally -(with ``poetry run make html``, like described below), -`Pandoc `_ needs to be installed on the system. +Note that, in order to build the documentation locally (with ``poetry run make html``, +like described below), `Pandoc `_ needs to be +installed on the system. diff --git a/doc/source/docker/enable-tls.rst b/doc/source/docker/enable-tls.rst new file mode 100644 index 000000000000..f50edb8c651d --- /dev/null +++ b/doc/source/docker/enable-tls.rst @@ -0,0 +1,152 @@ +Enable TLS for Secure Connections +================================= + +When operating in a production environment, it is strongly recommended to enable +Transport Layer Security (TLS) for each Flower Component to ensure secure communication. + +To enable TLS, you will need a PEM-encoded root certificate, a PEM-encoded private key +and a PEM-encoded certificate chain. + +.. note:: + + For testing purposes, you can generate your own self-signed certificates. The + `Enable SSL connections + `__ + page contains a section that will guide you through the process. + +Because Flower containers, by default, run with a non-root user ``app``, the mounted +files and directories must have the proper permissions for the user ID ``49999``. + +For example, to change the user ID of all files in the ``certificates/`` directory, you +can run ``sudo chown -R 49999:49999 certificates/*``. + +If you later want to delete the directory, you can change the user ID back to the +current user ID by running ``sudo chown -R $USER:$(id -gn) state``. + +SuperLink +--------- + +Assuming all files we need are in the local ``certificates`` directory, we can use the +flag ``--volume`` to mount the local directory into the ``/app/certificates/`` directory +of the container: + +.. code-block:: bash + :substitutions: + + $ docker run --rm \ + --volume ./certificates/:/app/certificates/:ro \ + flwr/superlink:|stable_flwr_version| \ + --ssl-ca-certfile certificates/ca.crt \ + --ssl-certfile certificates/server.pem \ + --ssl-keyfile certificates/server.key + +.. dropdown:: Understanding the command + + * ``docker run``: This tells Docker to run a container from an image. + * ``--rm``: Remove the container once it is stopped or the command exits. + * | ``--volume ./certificates/:/app/certificates/:ro``: Mount the ``certificates`` directory in + | the current working directory of the host machine as a read-only volume at the + | ``/app/certificates`` directory inside the container. + | + | This allows the container to access the TLS certificates that are stored in the certificates + | directory. + * | :substitution-code:`flwr/superlink:|stable_flwr_version|`: The name of the image to be run and the specific + | tag of the image. The tag :substitution-code:`|stable_flwr_version|` represents a specific version of the image. + * | ``--ssl-ca-certfile certificates/ca.crt``: Specify the location of the CA certificate file + | inside the container. + | + | The ``certificates/ca.crt`` file is a certificate that is used to verify the identity of the + | SuperLink. + * | ``--ssl-certfile certificates/server.pem``: Specify the location of the SuperLink's + | TLS certificate file inside the container. + | + | The ``certificates/server.pem`` file is used to identify the SuperLink and to encrypt the + | data that is transmitted over the network. + * | ``--ssl-keyfile certificates/server.key``: Specify the location of the SuperLink's + | TLS private key file inside the container. + | + | The ``certificates/server.key`` file is used to decrypt the data that is transmitted over + | the network. + +SuperNode +--------- + +Assuming that the ``ca.crt`` certificate already exists locally, we can use the flag +``--volume`` to mount the local certificate into the container's ``/app/`` directory. + +.. note:: + + If you're generating self-signed certificates and the ``ca.crt`` certificate doesn't + exist on the SuperNode, you can copy it over after the generation step. + +.. code-block:: bash + :substitutions: + + $ docker run --rm \ + --volume ./ca.crt:/app/ca.crt/:ro \ + flwr/supernode:|stable_flwr_version| \ + --root-certificates ca.crt + +.. dropdown:: Understanding the command + + * ``docker run``: This tells Docker to run a container from an image. + * ``--rm``: Remove the container once it is stopped or the command exits. + * | ``--volume ./ca.crt:/app/ca.crt/:ro``: Mount the ``ca.crt`` file from the + | current working directory of the host machine as a read-only volume at the ``/app/ca.crt`` + | directory inside the container. + * | :substitution-code:`flwr/supernode:|stable_flwr_version|`: The name of the image to be run and the specific + | tag of the image. The tag :substitution-code:`|stable_flwr_version|` represents a specific version of the image. + * | ``--root-certificates ca.crt``: This specifies the location of the CA certificate file + | inside the container. + | + | The ``ca.crt`` file is used to verify the identity of the SuperLink. + +SuperExec +--------- + +Assuming all files we need are in the local ``certificates`` directory where the +SuperExec will be executed from, we can use the flag ``--volume`` to mount the local +directory into the ``/app/certificates/`` directory of the container: + +.. code-block:: bash + :substitutions: + + $ docker run --rm \ + --volume ./certificates/:/app/certificates/:ro \ + flwr/superexec:|stable_flwr_version| \ + --ssl-ca-certfile certificates/ca.crt \ + --ssl-certfile certificates/server.pem \ + --ssl-keyfile certificates/server.key \ + --executor-config \ + root-certificates=\"certificates/superlink_ca.crt\" + +.. dropdown:: Understanding the command + + * ``docker run``: This tells Docker to run a container from an image. + * ``--rm``: Remove the container once it is stopped or the command exits. + * | ``--volume ./certificates/:/app/certificates/:ro``: Mount the ``certificates`` directory in + | the current working directory of the host machine as a read-only volume at the + | ``/app/certificates`` directory inside the container. + | + | This allows the container to access the TLS certificates that are stored in the certificates + | directory. + * | :substitution-code:`flwr/superexec:|stable_flwr_version|`: The name of the image to be run and the specific + | tag of the image. The tag :substitution-code:`|stable_flwr_version|` represents a specific version of the image. + * | ``--ssl-ca-certfile certificates/ca.crt``: Specify the location of the CA certificate file + | inside the container. + | + | The ``certificates/ca.crt`` file is a certificate that is used to verify the identity of the + | SuperExec. + * | ``--ssl-certfile certificates/server.pem``: Specify the location of the SuperExec's + | TLS certificate file inside the container. + | + | The ``certificates/server.pem`` file is used to identify the SuperExec and to encrypt the + | data that is transmitted over the network. + * | ``--ssl-keyfile certificates/server.key``: Specify the location of the SuperExec's + | TLS private key file inside the container. + | + | The ``certificates/server.key`` file is used to decrypt the data that is transmitted over + | the network. + * | ``--executor-config root-certificates=\"certificates/superlink_ca.crt\"``: Specify the + | location of the CA certificate file inside the container that the SuperExec executor + | should use to verify the SuperLink's identity. diff --git a/doc/source/docker/index.rst b/doc/source/docker/index.rst new file mode 100644 index 000000000000..6449317ef19a --- /dev/null +++ b/doc/source/docker/index.rst @@ -0,0 +1,47 @@ +Run Flower using Docker +======================= + +Start your Flower journey with our pre-made Docker images on Docker Hub, supporting +``amd64`` and ``arm64v8`` architectures. + +Our Quickstart guide walks you through containerizing a Flower project and running it +end to end using Docker. + +Getting Started +--------------- + +.. toctree:: + :maxdepth: 1 + + tutorial-quickstart-docker + +Running in Production +--------------------- + +.. toctree:: + :maxdepth: 1 + + enable-tls + persist-superlink-state + +Advanced Options +---------------- + +.. toctree:: + :maxdepth: 1 + + set-environment-variables + run-as-root-user + run-as-subprocess + pin-version + use-a-different-version + +Run Flower using Docker Compose +------------------------------- + +.. toctree:: + :maxdepth: 1 + + tutorial-quickstart-docker-compose + run-quickstart-examples-docker-compose + tutorial-deploy-on-multiple-machines diff --git a/doc/source/docker/persist-superlink-state.rst b/doc/source/docker/persist-superlink-state.rst new file mode 100644 index 000000000000..214e408c44c3 --- /dev/null +++ b/doc/source/docker/persist-superlink-state.rst @@ -0,0 +1,40 @@ +Persist the State of the SuperLink +================================== + +By default, the Flower SuperLink keeps its state in-memory. When using the Docker flag +``--rm``, the state is not persisted between container starts. + +If you want to persist the state of the SuperLink on your host system, all you need to +do is specify a directory where you want to save the file on your host system and a name +for the database file. + +By default, the SuperLink container runs with a non-root user called ``app`` with the +user ID ``49999``. It is recommended to create a new directory and change the user ID of +the directory to ``49999`` to ensure the mounted directory has the proper permissions. + +If you later want to delete the directory, you can change the user ID back to the +current user ID by running ``sudo chown -R $USER:$(id -gn) state``. + +Example +------- + +In the example below, we create a new directory called ``state``, change the user ID and +tell Docker via the flag ``--volume`` to mount the local ``state`` directory into the +``/app/state`` directory of the container. Lastly, we use the flag ``--database`` to +specify the name of the database file. + +.. code-block:: bash + :substitutions: + + $ mkdir state + $ sudo chown -R 49999:49999 state + $ docker run --rm \ + --volume ./state/:/app/state flwr/superlink:|stable_flwr_version| \ + --database state.db \ + ... + +As soon as the SuperLink starts, the file ``state.db`` is created in the ``state`` +directory on your host system. If the file already exists, the SuperLink tries to +restore the state from the file. To start the SuperLink with an empty database, ensure +that there is no database called ``state.db`` in the ``state`` directory (``rm +state.db``) before you execute the ``docker run`` command above. diff --git a/doc/source/docker/pin-version.rst b/doc/source/docker/pin-version.rst new file mode 100644 index 000000000000..4a69860aa428 --- /dev/null +++ b/doc/source/docker/pin-version.rst @@ -0,0 +1,37 @@ +Pin a Docker Image to a Specific Version +======================================== + +It may happen that we update the images behind the tags. Such updates usually include +security updates of system dependencies that should not change the functionality of +Flower. However, if you want to ensure that you use a fixed version of the Docker image +in your deployments, you can `specify the digest +`_ +of the image instead of the tag. + +Example +------- + +The following command returns the current image digest referenced by the +:substitution-code:`superlink:|stable_flwr_version|` tag: + +.. code-block:: bash + :substitutions: + + $ docker pull flwr/superlink:|stable_flwr_version| + $ docker inspect --format='{{index .RepoDigests 0}}' flwr/superlink:|stable_flwr_version| + +This will output + +.. code-block:: bash + :substitutions: + + flwr/superlink@sha256:|stable__flwr_superlink_docker_digest| + +Next, we can pin the digest when running a new SuperLink container: + +.. code-block:: bash + :substitutions: + + $ docker run \ + --rm flwr/superlink@sha256:|latest_version_docker_sha| \ + [OPTIONS] diff --git a/doc/source/docker/run-as-root-user.rst b/doc/source/docker/run-as-root-user.rst new file mode 100644 index 000000000000..5f8e5eae43af --- /dev/null +++ b/doc/source/docker/run-as-root-user.rst @@ -0,0 +1,46 @@ +Run with Root User Privileges +============================= + +Flower Docker images, by default, run with a non-root user (username/groupname: ``app``, +UID/GID: ``49999``). Using root user is **not recommended** unless it is necessary for +specific tasks during the build process. + +Always make sure to run the container as a non-root user in production to maintain +security best practices. + +Run a Container with Root User Privileges +----------------------------------------- + +Run the Docker image with the ``-u`` flag and specify ``root`` as the username: + +.. code-block:: bash + :substitutions: + + $ docker run --rm -u root flwr/superlink:|stable_flwr_version| + +This command will run the Docker container with root user privileges. + +Run the Build Process with Root User Privileges +----------------------------------------------- + +If you want to switch to the root user during the build process of the Docker image to +install missing system dependencies, you can use the ``USER root`` directive within your +Dockerfile. + +.. code-block:: dockerfile + :caption: SuperNode Dockerfile + :substitutions: + + FROM flwr/supernode:|stable_flwr_version| + + # Switch to root user + USER root + + # Install missing dependencies (requires root access) + RUN apt-get update && apt-get install -y + + # Switch back to non-root user app + USER app + + # Continue with your Docker image build process + # ... diff --git a/doc/source/docker/run-as-subprocess.rst b/doc/source/docker/run-as-subprocess.rst new file mode 100644 index 000000000000..d97319ff52af --- /dev/null +++ b/doc/source/docker/run-as-subprocess.rst @@ -0,0 +1,53 @@ +Run ClientApp as a Subprocess +============================= + +In this mode, the ClientApp is executed as a subprocess within the SuperNode Docker +container, rather than running in a separate container. This approach reduces the number +of running containers, which can be beneficial for environments with limited resources. +However, it also means that the ClientApp is no longer isolated from the SuperNode, +which may introduce additional security concerns. + +Prerequisites +------------- + +1. Before running the ClientApp as a subprocess, ensure that the FAB dependencies have + been installed in the SuperNode images. This can be done by extending the SuperNode + image: + + .. code-block:: dockerfile + :caption: Dockerfile.supernode + :linenos: + :substitutions: + + FROM flwr/supernode:|stable_flwr_version| + + WORKDIR /app + COPY pyproject.toml . + RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ + && python -m pip install -U --no-cache-dir . + + ENTRYPOINT ["flower-supernode"] + +2. Next, build the SuperNode Docker image by running the following command in the + directory where Dockerfile is located: + + .. code-block:: shell + + $ docker build -f Dockerfile.supernode -t flwr_supernode:0.0.1 . + +Run the ClientApp as a Subprocess +--------------------------------- + +Start the SuperNode with the flag ``--isolation subprocess``, which tells the SuperNode +to execute the ClientApp as a subprocess: + +.. code-block:: shell + + $ docker run --rm \ + --detach \ + flwr_supernode:0.0.1 \ + --insecure \ + --superlink superlink:9092 \ + --node-config "partition-id=1 num-partitions=2" \ + --supernode-address localhost:9094 \ + --isolation subprocess diff --git a/doc/source/docker/run-quickstart-examples-docker-compose.rst b/doc/source/docker/run-quickstart-examples-docker-compose.rst new file mode 100644 index 000000000000..b31f0035e143 --- /dev/null +++ b/doc/source/docker/run-quickstart-examples-docker-compose.rst @@ -0,0 +1,127 @@ +Run Flower Quickstart Examples with Docker Compose +================================================== + +Flower provides a set of `quickstart examples +`_ to help you get started with the +framework. These examples are designed to demonstrate the capabilities of Flower and by +default run using the Simulation Engine. This guide demonstrates how to run them using +Flower's Deployment Engine via Docker Compose. + +.. important:: + + Some quickstart examples may have limitations or requirements that prevent them from + running on every environment. For more information, please see Limitations_. + +Prerequisites +------------- + +Before you start, make sure that: + +- The ``flwr`` CLI is :doc:`installed <../how-to-install-flower>` locally. +- The Docker daemon is running. +- Docker Compose is `installed `_. + +Run the Quickstart Example +-------------------------- + +1. Clone the quickstart example you like to run. For example, ``quickstart-pytorch``: + + .. code-block:: bash + + $ git clone --depth=1 https://github.com/adap/flower.git \ + && mv flower/examples/quickstart-pytorch . \ + && rm -rf flower && cd quickstart-pytorch + +2. Download the `compose.yml + `_ file + into the example directory: + + .. code-block:: bash + + $ curl https://raw.githubusercontent.com/adap/flower/refs/heads/main/src/docker/complete/compose.yml \ + -o compose.yml + +3. Build and start the services using the following command: + + .. code-block:: bash + + $ docker compose up --build -d + +4. Append the following lines to the end of the ``pyproject.toml`` file and save it: + + .. code-block:: toml + :caption: pyproject.toml + + [tool.flwr.federations.local-deployment] + address = "127.0.0.1:9093" + insecure = true + + .. note:: + + You can customize the string that follows ``tool.flwr.federations.`` to fit your + needs. However, please note that the string cannot contain a dot (``.``). + + In this example, ``local-deployment`` has been used. Just remember to replace + ``local-deployment`` with your chosen name in both the ``tool.flwr.federations.`` + string and the corresponding ``flwr run .`` command. + +5. Run the example: + + .. code-block:: bash + + $ flwr run . local-deployment + +6. Follow the logs of the SuperExec service: + + .. code-block:: bash + + $ docker compose logs superexec -f + +That is all it takes! You can monitor the progress of the run through the logs of the +SuperExec. + +Run a Different Quickstart Example +---------------------------------- + +To run a different quickstart example, such as ``quickstart-tensorflow``, first, shut +down the Docker Compose services of the current example: + +.. code-block:: bash + + $ docker compose down + +After that, you can repeat the steps above. + +Limitations +----------- + +.. list-table:: + :header-rows: 1 + + - - Quickstart Example + - Limitations + - - quickstart-fastai + - None + - - quickstart-huggingface + - None + - - quickstart-jax + - The example has not yet been updated to work with the latest ``flwr`` version. + - - quickstart-mlcube + - The example has not yet been updated to work with the latest ``flwr`` version. + - - quickstart-mlx + - `Requires to run on macOS with Apple Silicon + `_. + - - quickstart-monai + - None + - - quickstart-pandas + - None + - - quickstart-pytorch-lightning + - Requires an older pip version that is not supported by the Flower Docker images. + - - quickstart-pytorch + - None + - - quickstart-sklearn-tabular + - None + - - quickstart-tabnet + - The example has not yet been updated to work with the latest ``flwr`` version. + - - quickstart-tensorflow + - Only runs on AMD64. diff --git a/doc/source/docker/set-environment-variables.rst b/doc/source/docker/set-environment-variables.rst new file mode 100644 index 000000000000..f5d860812bab --- /dev/null +++ b/doc/source/docker/set-environment-variables.rst @@ -0,0 +1,14 @@ +Set Environment Variables +========================= + +To set a variable inside a Docker container, you can use the ``-e =`` flag. +Multiple ``-e`` flags can be used to set multiple environment variables for a container. + +Example +------- + +.. code-block:: bash + :substitutions: + + $ docker run -e FLWR_TELEMETRY_ENABLED=0 -e FLWR_TELEMETRY_LOGGING=0 \ + --rm flwr/superlink:|stable_flwr_version| diff --git a/doc/source/docker/tutorial-deploy-on-multiple-machines.rst b/doc/source/docker/tutorial-deploy-on-multiple-machines.rst new file mode 100644 index 000000000000..72958c926ba9 --- /dev/null +++ b/doc/source/docker/tutorial-deploy-on-multiple-machines.rst @@ -0,0 +1,171 @@ +Deploy Flower on Multiple Machines with Docker Compose +====================================================== + +This guide will help you set up a Flower project on multiple machines using Docker +Compose. + +You will learn how to run the Flower client and server components on two separate +machines, with Flower configured to use TLS encryption and persist SuperLink state +across restarts. A server consists of a SuperLink and ``SuperExec``. For more details +about the Flower architecture, refer to the :doc:`../explanation-flower-architecture` +explainer page. + +This guide assumes you have completed the :doc:`tutorial-quickstart-docker-compose` +tutorial. It is highly recommended that you follow and understand the contents of that +tutorial before proceeding with this guide. + +Prerequisites +------------- + +Before you begin, make sure you have the following prerequisites: + +- The ``flwr`` CLI is :doc:`installed <../how-to-install-flower>` locally. +- The Docker daemon is running on your local machine and the remote machine. +- Docker Compose V2 is installed on both your local machine and the remote machine. +- You can connect to the remote machine from your local machine. +- Ports ``9091`` and ``9093`` are accessible on the remote machine. + +.. note:: + + The guide uses the |quickstart_sklearn_tabular|_ example as an example project. + + If your project has a different name or location, please remember to adjust the + commands/paths accordingly. + +Step 1: Set Up +-------------- + +1. Clone the Flower repository and change to the ``distributed`` directory: + + .. code-block:: bash + + $ git clone --depth=1 https://github.com/adap/flower.git + $ cd flower/src/docker/distributed + +2. Get the IP address from the remote machine and save it for later. +3. Use the ``certs.yml`` Compose file to generate your own self-signed certificates. If + you have certificates, you can continue with Step 2. + + .. important:: + + These certificates should be used only for development purposes. + + For production environments, you may have to use dedicated services to obtain + your certificates. + + First, set the environment variables ``SUPERLINK_IP`` and ``SUPEREXEC_IP`` with the + IP address from the remote machine. For example, if the IP is ``192.168.2.33``, + execute: + + .. code-block:: bash + + $ export SUPERLINK_IP=192.168.2.33 + $ export SUPEREXEC_IP=192.168.2.33 + + Next, generate the self-signed certificates: + + .. code-block:: bash + + $ docker compose -f certs.yml -f ../complete/certs.yml up --build + +Step 2: Copy the Server Compose Files +------------------------------------- + +Use the method that works best for you to copy the ``server`` directory, the +certificates, and your Flower project to the remote machine. + +For example, you can use ``scp`` to copy the directories: + +.. code-block:: bash + + $ scp -r ./server \ + ./superexec-certificates \ + ./superlink-certificates \ + ../../../examples/quickstart-sklearn-tabular remote:~/distributed + +Step 3: Start the Flower Server Components +------------------------------------------ + +Log into the remote machine using ``ssh`` and run the following command to start the +SuperLink and SuperExec services: + +.. code-block:: bash + + $ ssh + # In your remote machine + $ cd + $ export PROJECT_DIR=../quickstart-sklearn-tabular + $ docker compose -f server/compose.yml up --build -d + +.. note:: + + The Path of the ``PROJECT_DIR`` should be relative to the location of the ``server`` + Docker Compose files. + +Go back to your terminal on your local machine. + +Step 4: Start the Flower Client Components +------------------------------------------ + +On your local machine, run the following command to start the client components: + +.. code-block:: bash + + # In the `docker/distributed` directory + $ export PROJECT_DIR=../../../../examples/quickstart-sklearn-tabular + $ docker compose -f client/compose.yml up --build -d + +.. note:: + + The Path of the ``PROJECT_DIR`` should be relative to the location of the ``client`` + Docker Compose files. + +Step 5: Run Your Flower Project +------------------------------- + +Specify the remote SuperExec IP addresses and the path to the root certificate in the +``[tool.flwr.federations.remote-superexec]`` table in the ``pyproject.toml`` file. Here, +we have named our remote federation ``remote-superexec``: + +.. code-block:: toml + :caption: examples/quickstart-sklearn-tabular/pyproject.toml + + [tool.flwr.federations.remote-superexec] + address = "192.168.2.33:9093" + root-certificates = "../../src/docker/distributed/superexec-certificates/ca.crt" + +.. note:: + + The Path of the ``root-certificates`` should be relative to the location of the + ``pyproject.toml`` file. + +To run the project, execute: + +.. code-block:: bash + + $ flwr run ../../../examples/quickstart-sklearn-tabular remote-superexec + +That's it! With these steps, you've set up Flower on two separate machines and are ready +to start using it. + +Step 6: Clean Up +---------------- + +Shut down the Flower client components: + +.. code-block:: bash + + # In the `docker/distributed` directory + $ docker compose -f client/compose.yml down + +Shut down the Flower server components and delete the SuperLink state: + +.. code-block:: bash + + $ ssh + $ cd + $ docker compose -f server/compose.yml down -v + +.. |quickstart_sklearn_tabular| replace:: ``examples/quickstart-sklearn-tabular`` + +.. _quickstart_sklearn_tabular: https://github.com/adap/flower/tree/main/examples/quickstart-sklearn-tabular diff --git a/doc/source/docker/tutorial-quickstart-docker-compose.rst b/doc/source/docker/tutorial-quickstart-docker-compose.rst new file mode 100644 index 000000000000..bff3125c1b16 --- /dev/null +++ b/doc/source/docker/tutorial-quickstart-docker-compose.rst @@ -0,0 +1,410 @@ +Quickstart with Docker Compose +============================== + +This quickstart shows you how to set up Flower using Docker Compose in a single command, +allowing you to focus on developing your application without worrying about the +underlying infrastructure. + +You will also learn how to easily enable TLS encryption and persist application state +locally, giving you the freedom to choose the configuration that best suits your +project's needs. + +Prerequisites +------------- + +Before you start, make sure that: + +- The ``flwr`` CLI is :doc:`installed <../how-to-install-flower>` locally. +- The Docker daemon is running. +- Docker Compose is `installed `_. + +Step 1: Set Up +-------------- + +1. Clone the Docker Compose ``complete`` directory: + + .. code-block:: bash + + $ git clone --depth=1 https://github.com/adap/flower.git _tmp \ + && mv _tmp/src/docker/complete . \ + && rm -rf _tmp && cd complete + +2. Create a new Flower project (PyTorch): + + .. code-block:: bash + + $ flwr new quickstart-compose --framework PyTorch --username flower + +3. Export the path of the newly created project. The path should be relative to the + location of the Docker Compose files: + + .. code-block:: bash + + $ export PROJECT_DIR=quickstart-compose + + Setting the ``PROJECT_DIR`` helps Docker Compose locate the ``pyproject.toml`` file, + allowing it to install dependencies in the SuperExec and SuperNode images correctly. + +Step 2: Run Flower in Insecure Mode +----------------------------------- + +To begin, start Flower with the most basic configuration. In this setup, Flower will run +without TLS and without persisting the state. + +.. note:: + + Without TLS, the data sent between the services remains **unencrypted**. Use it only + for development purposes. + + For production-oriented use cases, :ref:`enable TLS` for secure data + transmission. + +Open your terminal and run: + +.. code-block:: bash + + $ docker compose -f compose.yml up --build -d + +.. dropdown:: Understand the command + + * ``docker compose``: The Docker command to run the Docker Compose tool. + * ``-f compose.yml``: Specify the YAML file that contains the basic Flower service definitions. + * ``--build``: Rebuild the images for each service if they don't already exist. + * ``-d``: Detach the containers from the terminal and run them in the background. + +Step 3: Run the Quickstart Project +---------------------------------- + +Now that the Flower services have been started via Docker Compose, it is time to run the +quickstart example. + +To ensure the ``flwr`` CLI connects to the SuperExec, you need to specify the SuperExec +addresses in the ``pyproject.toml`` file. + +1. Add the following lines to the ``quickstart-compose/pyproject.toml``: + + .. code-block:: toml + :caption: quickstart-compose/pyproject.toml + + [tool.flwr.federations.docker-compose] + address = "127.0.0.1:9093" + insecure = true + +2. Execute the command to run the quickstart example: + + .. code-block:: bash + + $ flwr run quickstart-compose docker-compose + +3. Monitor the SuperExec logs and wait for the summary to appear: + + .. code-block:: bash + + $ docker compose logs superexec -f + +Step 4: Update the Application +------------------------------ + +In the next step, change the application code. + +1. For example, go to the ``task.py`` file in the + ``quickstart-compose/quickstart_compose/`` directory and add a ``print`` call in the + ``get_weights`` function: + + .. code-block:: python + :caption: quickstart-compose/quickstart_compose/task.py + + # ... + def get_weights(net): + print("Get weights") + return [val.cpu().numpy() for _, val in net.state_dict().items()] + + + # ... + +2. Rebuild and restart the services. + + .. note:: + + If you have modified the dependencies listed in your ``pyproject.toml`` file, it + is essential to rebuild images. + + If you haven't made any changes, you can skip this step. + + Run the following command to rebuild and restart the services: + + .. code-block:: bash + + $ docker compose -f compose.yml up --build -d + +3. Run the updated quickstart example: + + .. code-block:: bash + + $ flwr run quickstart-compose docker-compose + $ docker compose logs superexec -f + + In the SuperExec logs, you should find the ``Get weights`` line: + + .. code-block:: + :emphasize-lines: 9 + + superexec-1 | INFO : Starting Flower SuperExec + superexec-1 | WARNING : Option `--insecure` was set. Starting insecure HTTP server. + superexec-1 | INFO : Starting Flower SuperExec gRPC server on 0.0.0.0:9093 + superexec-1 | INFO : ExecServicer.StartRun + superexec-1 | 🎊 Successfully installed quickstart-compose to /app/.flwr/apps/flower/quickstart-compose/1.0.0. + superexec-1 | INFO : Created run -6767165609169293507 + superexec-1 | INFO : Started run -6767165609169293507 + superexec-1 | WARNING : Option `--insecure` was set. Starting insecure HTTP client connected to superlink:9091. + superexec-1 | Get weights + superexec-1 | INFO : Starting Flower ServerApp, config: num_rounds=3, no round_timeout + +Step 5: Persisting the SuperLink State +-------------------------------------- + +In this step, Flower services are configured to persist the state of the SuperLink +service, ensuring that it maintains its state even after a restart. + +.. note:: + + When working with Docker Compose on Linux, you may need to create the ``state`` + directory first and change its ownership to ensure proper access and permissions. + + For more information, consult the following page: :doc:`persist-superlink-state`. + +1. Run the command: + + .. code-block:: bash + + $ docker compose -f compose.yml -f with-state.yml up --build -d + + .. dropdown:: Understand the command + + * ``docker compose``: The Docker command to run the Docker Compose tool. + * ``-f compose.yml``: Specify the YAML file that contains the basic Flower service definitions. + * | ``-f with-state.yml``: Specifies the path to an additional Docker Compose file that + | contains the configuration for persisting the SuperLink state. + | + | Docker merges Compose files according to `merging rules `_. + * ``--build``: Rebuild the images for each service if they don't already exist. + * ``-d``: Detach the containers from the terminal and run them in the background. + +2. Rerun the ``quickstart-compose`` project: + + .. code-block:: bash + + $ flwr run quickstart-compose docker-compose + +3. Check the content of the ``state`` directory: + + .. code-block:: bash + + $ ls state/ + state.db + + You should see a ``state.db`` file in the ``state`` directory. If you restart the + service, the state file will be used to restore the state from the previously saved + data. This ensures that the data persists even if the containers are stopped and + started again. + +.. _tls: + +Step 6: Run Flower with TLS +--------------------------- + +1. To demonstrate how to enable TLS, generate self-signed certificates using the + ``certs.yml`` Compose file. + + .. important:: + + These certificates should be used only for development purposes. + + For production environments, use a service like `Let's Encrypt + `_ to obtain your certificates. + + Run the command: + + .. code-block:: bash + + $ docker compose -f certs.yml up --build + +2. Add the following lines to the ``quickstart-compose/pyproject.toml``: + + .. code-block:: toml + :caption: quickstart-compose/pyproject.toml + + [tool.flwr.federations.docker-compose-tls] + address = "127.0.0.1:9093" + root-certificates = "../superexec-certificates/ca.crt" + +3. Restart the services with TLS enabled: + + .. code-block:: bash + + $ docker compose -f compose.yml -f with-tls.yml up --build -d + +4. Rerun the ``quickstart-compose`` project: + + .. code-block:: bash + + $ flwr run quickstart-compose docker-compose-tls + $ docker compose logs superexec -f + +Step 7: Add another SuperNode +----------------------------- + +You can add more SuperNodes and ClientApps by duplicating their definitions in the +``compose.yml`` file. + +Just give each new SuperNode and ClientApp service a unique service name like +``supernode-3``, ``clientapp-3``, etc. + +In ``compose.yml``, add the following: + +.. code-block:: yaml + :caption: compose.yml + :substitutions: + + # other service definitions + + supernode-3: + image: flwr/supernode:${FLWR_VERSION:-|stable_flwr_version|} + command: + - --insecure + - --superlink + - superlink:9092 + - --supernode-address + - 0.0.0.0:9096 + - --isolation + - process + - --node-config + - "partition-id=1 num-partitions=2" + depends_on: + - superlink + + clientapp-3: + build: + context: ${PROJECT_DIR:-.} + dockerfile_inline: | + FROM flwr/clientapp:${FLWR_VERSION:-|stable_flwr_version|} + + USER root + RUN apt-get update \ + && apt-get -y --no-install-recommends install \ + build-essential \ + && rm -rf /var/lib/apt/lists/* + USER app + + WORKDIR /app + COPY --chown=app:app pyproject.toml . + RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ + && python -m pip install -U --no-cache-dir . + + ENTRYPOINT ["flwr-clientapp"] + command: + - --supernode + - supernode-3:9096 + deploy: + resources: + limits: + cpus: "2" + stop_signal: SIGINT + depends_on: + - supernode-3 + +If you also want to enable TLS for the new SuperNodes, duplicate the SuperNode +definition for each new SuperNode service in the ``with-tls.yml`` file. + +Make sure that the names of the services match with the one in the ``compose.yml`` file. + +In ``with-tls.yml``, add the following: + +.. code-block:: yaml + :caption: with-tls.yml + + # other service definitions + + supernode-3: + command: + - --superlink + - superlink:9092 + - --supernode-address + - 0.0.0.0:9096 + - --isolation + - process + - --node-config + - "partition-id=1 num-partitions=2" + - --root-certificates + - certificates/ca.crt + secrets: + - source: superlink-ca-certfile + target: /app/certificates/ca.crt + +Step 8: Persisting the SuperLink State and Enabling TLS +------------------------------------------------------- + +To run Flower with persisted SuperLink state and enabled TLS, a slight change in the +``with-state.yml`` file is required: + +1. Comment out the lines 2-4 and uncomment the lines 5-9: + + .. code-block:: yaml + :caption: with-state.yml + :linenos: + :emphasize-lines: 2-9 + + superlink: + # command: + # - --insecure + # - --database=state/state.db + command: + - --ssl-ca-certfile=certificates/ca.crt + - --ssl-certfile=certificates/server.pem + - --ssl-keyfile=certificates/server.key + - --database=state/state.db + volumes: + - ./state/:/app/state/:rw + +2. Restart the services: + + .. code-block:: bash + + $ docker compose -f compose.yml -f with-tls.yml -f with-state.yml up --build -d + +3. Rerun the ``quickstart-compose`` project: + + .. code-block:: bash + + $ flwr run quickstart-compose docker-compose-tls + $ docker compose logs superexec -f + +Step 9: Merge Multiple Compose Files +------------------------------------ + +You can merge multiple Compose files into a single file. For instance, if you wish to +combine the basic configuration with the TLS configuration, execute the following +command: + +.. code-block:: bash + + $ docker compose -f compose.yml \ + -f with-tls.yml config --no-path-resolution > my_compose.yml + +This will merge the contents of ``compose.yml`` and ``with-tls.yml`` into a new file +called ``my_compose.yml``. + +Step 10: Clean Up +----------------- + +Remove all services and volumes: + +.. code-block:: bash + + $ docker compose down -v + $ docker compose -f certs.yml down -v + +Where to Go Next +---------------- + +- :doc:`run-quickstart-examples-docker-compose` diff --git a/doc/source/docker/tutorial-quickstart-docker.rst b/doc/source/docker/tutorial-quickstart-docker.rst new file mode 100644 index 000000000000..993754dcf109 --- /dev/null +++ b/doc/source/docker/tutorial-quickstart-docker.rst @@ -0,0 +1,408 @@ +Quickstart with Docker +====================== + +This quickstart aims to guide you through the process of containerizing a Flower project +and running it end to end using Docker on your local machine. + +This tutorial does not use production-ready settings, so you can focus on understanding +the basic workflow that uses the minimum configurations. + +Prerequisites +------------- + +Before you start, make sure that: + +- The ``flwr`` CLI is :doc:`installed <../how-to-install-flower>` locally. +- The Docker daemon is running. + +Step 1: Set Up +-------------- + +1. Create a new Flower project (PyTorch): + + .. code-block:: bash + + $ flwr new quickstart-docker --framework PyTorch --username flower + + 🔨 Creating Flower project quickstart-docker... + 🎊 Project creation successful. + + Use the following command to run your project: + + cd quickstart-docker + pip install -e . + flwr run + + $ cd quickstart-docker + $ pip install -e . + +2. Create a new Docker bridge network called ``flwr-network``: + + .. code-block:: bash + + $ docker network create --driver bridge flwr-network + + User-defined networks, such as ``flwr-network``, enable IP resolution of container + names, a feature absent in the default bridge network. This simplifies quickstart + example by avoiding the need to determine host IP first. + +Step 2: Start the SuperLink +--------------------------- + +Open your terminal and run: + +.. code-block:: bash + :substitutions: + + $ docker run --rm \ + -p 9091:9091 -p 9092:9092 \ + --network flwr-network \ + --name superlink \ + --detach \ + flwr/superlink:|stable_flwr_version| --insecure + +.. dropdown:: Understand the command + + * ``docker run``: This tells Docker to run a container from an image. + * ``--rm``: Remove the container once it is stopped or the command exits. + * | ``-p 9091:9091 -p 9092:9092``: Map port ``9091`` and ``9092`` of the container to the same port of + | the host machine, allowing other services to access the Driver API on + | ``http://localhost:9091`` and the Fleet API on ``http://localhost:9092``. + * ``--network flwr-network``: Make the container join the network named ``flwr-network``. + * ``--name superlink``: Assign the name ``superlink`` to the container. + * ``--detach``: Run the container in the background, freeing up the terminal. + * | :substitution-code:`flwr/superlink:|stable_flwr_version|`: The name of the image to be run and the specific + | tag of the image. The tag :substitution-code:`|stable_flwr_version|` represents a :doc:`specific version ` of the image. + * | ``--insecure``: This flag tells the container to operate in an insecure mode, allowing + | unencrypted communication. + +Step 3: Start the SuperNode +--------------------------- + +Start two SuperNode containers. + +1. Start the first container: + + .. code-block:: bash + :substitutions: + + $ docker run --rm \ + -p 9094:9094 \ + --network flwr-network \ + --name supernode-1 \ + --detach \ + flwr/supernode:|stable_flwr_version| \ + --insecure \ + --superlink superlink:9092 \ + --node-config "partition-id=0 num-partitions=2" \ + --supernode-address 0.0.0.0:9094 \ + --isolation process + + .. dropdown:: Understand the command + + * ``docker run``: This tells Docker to run a container from an image. + * ``--rm``: Remove the container once it is stopped or the command exits. + * | ``-p 9094:9094``: Map port ``9094`` of the container to the same port of + | the host machine, allowing other services to access the SuperNode API on + | ``http://localhost:9094``. + * ``--network flwr-network``: Make the container join the network named ``flwr-network``. + * ``--name supernode-1``: Assign the name ``supernode-1`` to the container. + * ``--detach``: Run the container in the background, freeing up the terminal. + * | ``flwr/supernode:|stable_flwr_version|``: This is the name of the image to be run and the specific tag + | of the image. + * | ``--insecure``: This flag tells the container to operate in an insecure mode, allowing + | unencrypted communication. + * | ``--superlink superlink:9092``: Connect to the SuperLink's Fleet API at the address + | ``superlink:9092``. + * | ``--node-config "partition-id=0 num-partitions=2"``: Set the partition ID to ``0`` and the + | number of partitions to ``2`` for the SuperNode configuration. + * | ``--supernode-address 0.0.0.0:9094``: Set the address and port number that the SuperNode + | is listening on. + * | ``--isolation process``: Tells the SuperNode that the ClientApp is created by separate + | independent process. The SuperNode does not attempt to create it. + +2. Start the second container: + + .. code-block:: shell + :substitutions: + + $ docker run --rm \ + -p 9095:9095 \ + --network flwr-network \ + --name supernode-2 \ + --detach \ + flwr/supernode:|stable_flwr_version| \ + --insecure \ + --superlink superlink:9092 \ + --node-config "partition-id=1 num-partitions=2" \ + --supernode-address 0.0.0.0:9095 \ + --isolation process + +Step 4: Start the ClientApp +--------------------------- + +The ClientApp Docker image comes with a pre-installed version of Flower and serves as a +base for building your own ClientApp image. In order to install the FAB dependencies, +you will need to create a Dockerfile that extends the ClientApp image and installs the +required dependencies. + +1. Create a ClientApp Dockerfile called ``Dockerfile.clientapp`` and paste the following + code into it: + + .. code-block:: dockerfile + :caption: Dockerfile.clientapp + :linenos: + :substitutions: + + FROM flwr/clientapp:|stable_flwr_version| + + WORKDIR /app + COPY pyproject.toml . + RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ + && python -m pip install -U --no-cache-dir . + + ENTRYPOINT ["flwr-clientapp"] + + .. dropdown:: Understand the Dockerfile + + * | :substitution-code:`FROM flwr/clientapp:|stable_flwr_version|`: This line specifies that the Docker image + | to be built from is the ``flwr/clientapp image``, version :substitution-code:`|stable_flwr_version|`. + * | ``WORKDIR /app``: Set the working directory for the container to ``/app``. + | Any subsequent commands that reference a directory will be relative to this directory. + * | ``COPY pyproject.toml .``: Copy the ``pyproject.toml`` file + | from the current working directory into the container's ``/app`` directory. + * | ``RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml``: Remove the ``flwr`` dependency + | from the ``pyproject.toml``. + * | ``python -m pip install -U --no-cache-dir .``: Run the ``pip`` install command to + | install the dependencies defined in the ``pyproject.toml`` file + | + | The ``-U`` flag indicates that any existing packages should be upgraded, and + | ``--no-cache-dir`` prevents pip from using the cache to speed up the installation. + * | ``ENTRYPOINT ["flwr-clientapp"]``: Set the command ``flwr-clientapp`` to be + | the default command run when the container is started. + + .. important:: + + Note that `flwr `__ is already installed in the + ``flwr/clientapp`` base image, so only other package dependencies such as + ``flwr-datasets``, ``torch``, etc., need to be installed. As a result, the + ``flwr`` dependency is removed from the ``pyproject.toml`` after it has been + copied into the Docker image (see line 5). + +2. Next, build the ClientApp Docker image by running the following command in the + directory where the Dockerfile is located: + + .. code-block:: bash + + $ docker build -f Dockerfile.clientapp -t flwr_clientapp:0.0.1 . + + .. note:: + + The image name was set as ``flwr_clientapp`` with the tag ``0.0.1``. Remember + that these values are merely examples, and you can customize them according to + your requirements. + +3. Start the first ClientApp container: + + .. code-block:: bash + + $ docker run --rm \ + --network flwr-network \ + --detach \ + flwr_clientapp:0.0.1 \ + --supernode supernode-1:9094 + + .. dropdown:: Understand the command + + * ``docker run``: This tells Docker to run a container from an image. + * ``--rm``: Remove the container once it is stopped or the command exits. + * ``--network flwr-network``: Make the container join the network named ``flwr-network``. + * ``--detach``: Run the container in the background, freeing up the terminal. + * | ``flwr_clientapp:0.0.1``: This is the name of the image to be run and the specific tag + | of the image. + * | ``--supernode supernode-1:9094``: Connect to the SuperNode's Fleet API at the address + | ``supernode-1:9094``. + +4. Start the second ClientApp container: + + .. code-block:: shell + + $ docker run --rm \ + --network flwr-network \ + --detach \ + flwr_clientapp:0.0.1 \ + --supernode supernode-2:9095 + +Step 5: Start the SuperExec +--------------------------- + +The procedure for building and running a SuperExec image is almost identical to the +ClientApp image. + +Similar to the ClientApp image, you will need to create a Dockerfile that extends the +SuperExec image and installs the required FAB dependencies. + +1. Create a SuperExec Dockerfile called ``Dockerfile.superexec`` and paste the following + code in: + + .. code-block:: dockerfile + :caption: Dockerfile.superexec + :substitutions: + + FROM flwr/superexec:|stable_flwr_version| + + WORKDIR /app + + COPY pyproject.toml . + RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ + && python -m pip install -U --no-cache-dir . + + ENTRYPOINT ["flower-superexec", "--executor", "flwr.superexec.deployment:executor"] + + .. dropdown:: Understand the Dockerfile + + * | :substitution-code:`FROM flwr/superexec:|stable_flwr_version|`: This line specifies that the Docker image + | to be built from is the ``flwr/superexec image``, version :substitution-code:`|stable_flwr_version|`. + * | ``WORKDIR /app``: Set the working directory for the container to ``/app``. + | Any subsequent commands that reference a directory will be relative to this directory. + * | ``COPY pyproject.toml .``: Copy the ``pyproject.toml`` file + | from the current working directory into the container's ``/app`` directory. + * | ``RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml``: Remove the ``flwr`` dependency + | from the ``pyproject.toml``. + * | ``python -m pip install -U --no-cache-dir .``: Run the ``pip`` install command to + | install the dependencies defined in the ``pyproject.toml`` file + | + | The ``-U`` flag indicates that any existing packages should be upgraded, and + | ``--no-cache-dir`` prevents pip from using the cache to speed up the installation. + * | ``ENTRYPOINT ["flower-superexec"``: Set the command ``flower-superexec`` to be + | the default command run when the container is started. + | + | ``"--executor", "flwr.superexec.deployment:executor"]`` Use the + | ``flwr.superexec.deployment:executor`` executor to run the ServerApps. + +2. Afterward, in the directory that holds the Dockerfile, execute this Docker command to + build the SuperExec image: + + .. code-block:: bash + + $ docker build -f Dockerfile.superexec -t flwr_superexec:0.0.1 . + +3. Start the SuperExec container: + + .. code-block:: bash + + $ docker run --rm \ + -p 9093:9093 \ + --network flwr-network \ + --name superexec \ + --detach \ + flwr_superexec:0.0.1 \ + --insecure \ + --executor-config superlink=\"superlink:9091\" + + .. dropdown:: Understand the command + + * ``docker run``: This tells Docker to run a container from an image. + * ``--rm``: Remove the container once it is stopped or the command exits. + * | ``-p 9093:9093``: Map port ``9093`` of the container to the same port of + | the host machine, allowing you to access the SuperExec API on ``http://localhost:9093``. + * ``--network flwr-network``: Make the container join the network named ``flwr-network``. + * ``--name superexec``: Assign the name ``superexec`` to the container. + * ``--detach``: Run the container in the background, freeing up the terminal. + * | ``flwr_superexec:0.0.1``: This is the name of the image to be run and the specific tag + | of the image. + * | ``--insecure``: This flag tells the container to operate in an insecure mode, allowing + | unencrypted communication. + * | ``--executor-config superlink=\"superlink:9091\"``: Configure the SuperExec executor to + | connect to the SuperLink running on port ``9091``. + +Step 6: Run the Quickstart Project +---------------------------------- + +1. Add the following lines to the ``pyproject.toml``: + + .. code-block:: toml + :caption: pyproject.toml + + [tool.flwr.federations.docker] + address = "127.0.0.1:9093" + insecure = true + +2. Run the ``quickstart-docker`` project by executing the command: + + .. code-block:: bash + + $ flwr run . docker + +3. Follow the SuperExec logs to track the execution of the run: + + .. code-block:: bash + + $ docker logs -f superexec + +Step 7: Update the Application +------------------------------ + +1. Change the application code. For example, change the ``seed`` in + ``quickstart_docker/task.py`` to ``43`` and save it: + + .. code-block:: python + :caption: quickstart_docker/task.py + + # ... + partition_train_test = partition.train_test_split(test_size=0.2, seed=43) + # ... + +2. Stop the current ClientApp containers: + + .. code-block:: bash + + $ docker stop $(docker ps -a -q --filter ancestor=flwr_clientapp:0.0.1) + +3. Rebuild the FAB and ClientApp image: + + .. code-block:: bash + + $ docker build -f Dockerfile.clientapp -t flwr_clientapp:0.0.1 . + +4. Launch two new ClientApp containers based on the newly built image: + + .. code-block:: bash + + $ docker run --rm \ + --network flwr-network \ + --detach \ + flwr_clientapp:0.0.1 \ + --supernode supernode-1:9094 + $ docker run --rm \ + --network flwr-network \ + --detach \ + flwr_clientapp:0.0.1 \ + --supernode supernode-2:9095 + +5. Run the updated project: + + .. code-block:: bash + + $ flwr run . docker + +Step 8: Clean Up +---------------- + +Remove the containers and the bridge network: + +.. code-block:: bash + + $ docker stop $(docker ps -a -q --filter ancestor=flwr_clientapp:0.0.1) \ + supernode-1 \ + supernode-2 \ + superexec \ + superlink + $ docker network rm flwr-network + +Where to Go Next +---------------- + +- :doc:`enable-tls` +- :doc:`persist-superlink-state` +- :doc:`tutorial-quickstart-docker-compose` diff --git a/doc/source/docker/use-a-different-version.rst b/doc/source/docker/use-a-different-version.rst new file mode 100644 index 000000000000..9108f5157dcd --- /dev/null +++ b/doc/source/docker/use-a-different-version.rst @@ -0,0 +1,13 @@ +Use a Different Flower Version +============================== + +If you want to use a different version of Flower, for example Flower nightly, you can do +so by changing the tag. All available versions are on `Docker Hub +`__. + +.. important:: + + When using Flower nightly, the SuperLink nightly image must be paired with the + corresponding SuperNode and ServerApp nightly images released on the same day. To + ensure the versions are in sync, using the concrete tag, e.g., + ``1.10.0.dev20240610`` instead of ``nightly`` is recommended. diff --git a/doc/source/example-fedbn-pytorch-from-centralized-to-federated.rst b/doc/source/example-fedbn-pytorch-from-centralized-to-federated.rst index 0139f3b8dc31..4a9d4607d9a5 100644 --- a/doc/source/example-fedbn-pytorch-from-centralized-to-federated.rst +++ b/doc/source/example-fedbn-pytorch-from-centralized-to-federated.rst @@ -1,16 +1,22 @@ Example: FedBN in PyTorch - From Centralized To Federated ========================================================= -This tutorial will show you how to use Flower to build a federated version of an existing machine learning workload with `FedBN `_, a federated training strategy designed for non-iid data. -We are using PyTorch to train a Convolutional Neural Network(with Batch Normalization layers) on the CIFAR-10 dataset. -When applying FedBN, only few changes needed compared to :doc:`Example: PyTorch - From Centralized To Federated `. +This tutorial will show you how to use Flower to build a federated version of an +existing machine learning workload with `FedBN `_, a +federated training strategy designed for non-iid data. We are using PyTorch to train a +Convolutional Neural Network(with Batch Normalization layers) on the CIFAR-10 dataset. +When applying FedBN, only few changes needed compared to :doc:`Example: PyTorch - From +Centralized To Federated `. Centralized Training -------------------- -All files are revised based on :doc:`Example: PyTorch - From Centralized To Federated `. -The only thing to do is modifying the file called :code:`cifar.py`, revised part is shown below: -The model architecture defined in class Net() is added with Batch Normalization layers accordingly. +All files are revised based on :doc:`Example: PyTorch - From Centralized To Federated +`. The only thing to do is modifying the +file called ``cifar.py``, revised part is shown below: + +The model architecture defined in class Net() is added with Batch Normalization layers +accordingly. .. code-block:: python @@ -40,26 +46,33 @@ The model architecture defined in class Net() is added with Batch Normalization You can now run your machine learning workload: -.. code-block:: python +.. code-block:: bash python3 cifar.py -So far this should all look fairly familiar if you've used PyTorch before. -Let's take the next step and use what we've built to create a federated learning system within FedBN, the system consists of one server and two clients. +So far this should all look fairly familiar if you've used PyTorch before. Let's take +the next step and use what we've built to create a federated learning system within +FedBN, the system consists of one server and two clients. Federated Training ------------------ -If you have read :doc:`Example: PyTorch - From Centralized To Federated `, the following parts are easy to follow, only :code:`get_parameters` and :code:`set_parameters` function in :code:`client.py` needed to revise. -If not, please read the :doc:`Example: PyTorch - From Centralized To Federated `. first. +If you have read :doc:`Example: PyTorch - From Centralized To Federated +`, the following parts are easy to +follow, only ``get_parameters`` and ``set_parameters`` function in ``client.py`` needed +to revise. If not, please read the :doc:`Example: PyTorch - From Centralized To +Federated `. first. -Our example consists of one *server* and two *clients*. In FedBN, :code:`server.py` keeps unchanged, we can start the server directly. +Our example consists of one *server* and two *clients*. In FedBN, ``server.py`` keeps +unchanged, we can start the server directly. -.. code-block:: python +.. code-block:: bash python3 server.py -Finally, we will revise our *client* logic by changing :code:`get_parameters` and :code:`set_parameters` in :code:`client.py`, we will exclude batch normalization parameters from model parameter list when sending to or receiving from the server. +Finally, we will revise our *client* logic by changing ``get_parameters`` and +``set_parameters`` in ``client.py``, we will exclude batch normalization parameters from +model parameter list when sending to or receiving from the server. .. code-block:: python @@ -71,11 +84,15 @@ Finally, we will revise our *client* logic by changing :code:`get_parameters` an def get_parameters(self, config) -> List[np.ndarray]: # Return model parameters as a list of NumPy ndarrays, excluding parameters of BN layers when using FedBN - return [val.cpu().numpy() for name, val in self.model.state_dict().items() if 'bn' not in name] + return [ + val.cpu().numpy() + for name, val in self.model.state_dict().items() + if "bn" not in name + ] def set_parameters(self, parameters: List[np.ndarray]) -> None: # Set model parameters from a list of NumPy ndarrays - keys = [k for k in self.model.state_dict().keys() if 'bn' not in k] + keys = [k for k in self.model.state_dict().keys() if "bn" not in k] params_dict = zip(keys, parameters) state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) self.model.load_state_dict(state_dict, strict=False) @@ -84,15 +101,20 @@ Finally, we will revise our *client* logic by changing :code:`get_parameters` an Now, you can now open two additional terminal windows and run -.. code-block:: python +.. code-block:: bash python3 client.py -in each window (make sure that the server is still running before you do so) and see your (previously centralized) PyTorch project run federated learning with FedBN strategy across two clients. Congratulations! +in each window (make sure that the server is still running before you do so) and see +your (previously centralized) PyTorch project run federated learning with FedBN strategy +across two clients. Congratulations! Next Steps ---------- -The full source code for this example can be found `here `_. -Our example is of course somewhat over-simplified because both clients load the exact same dataset, which isn't realistic. -You're now prepared to explore this topic further. How about using different subsets of CIFAR-10 on each client? How about adding more clients? +The full source code for this example can be found `here +`_. +Our example is of course somewhat over-simplified because both clients load the exact +same dataset, which isn't realistic. You're now prepared to explore this topic further. +How about using different subsets of CIFAR-10 on each client? How about adding more +clients? diff --git a/doc/source/example-jax-from-centralized-to-federated.rst b/doc/source/example-jax-from-centralized-to-federated.rst deleted file mode 100644 index 6b06a288a67a..000000000000 --- a/doc/source/example-jax-from-centralized-to-federated.rst +++ /dev/null @@ -1,282 +0,0 @@ -Example: JAX - Run JAX Federated -================================ - -This tutorial will show you how to use Flower to build a federated version of an existing JAX workload. -We are using JAX to train a linear regression model on a scikit-learn dataset. -We will structure the example similar to our `PyTorch - From Centralized To Federated `_ walkthrough. -First, we build a centralized training approach based on the `Linear Regression with JAX `_ tutorial`. -Then, we build upon the centralized training code to run the training in a federated fashion. - -Before we start building our JAX example, we need install the packages :code:`jax`, :code:`jaxlib`, :code:`scikit-learn`, and :code:`flwr`: - -.. code-block:: shell - - $ pip install jax jaxlib scikit-learn flwr - - -Linear Regression with JAX --------------------------- - -We begin with a brief description of the centralized training code based on a :code:`Linear Regression` model. -If you want a more in-depth explanation of what's going on then have a look at the official `JAX documentation `_. - -Let's create a new file called :code:`jax_training.py` with all the components required for a traditional (centralized) linear regression training. -First, the JAX packages :code:`jax` and :code:`jaxlib` need to be imported. In addition, we need to import :code:`sklearn` since we use :code:`make_regression` for the dataset and :code:`train_test_split` to split the dataset into a training and test set. -You can see that we do not yet import the :code:`flwr` package for federated learning. This will be done later. - -.. code-block:: python - - from typing import Dict, List, Tuple, Callable - import jax - import jax.numpy as jnp - from sklearn.datasets import make_regression - from sklearn.model_selection import train_test_split - - key = jax.random.PRNGKey(0) - -The :code:`load_data()` function loads the mentioned training and test sets. - -.. code-block:: python - - def load_data() -> Tuple[List[np.ndarray], List[np.ndarray], List[np.ndarray], List[np.ndarray]]: - # create our dataset and start with similar datasets for different clients - X, y = make_regression(n_features=3, random_state=0) - X, X_test, y, y_test = train_test_split(X, y) - return X, y, X_test, y_test - -The model architecture (a very simple :code:`Linear Regression` model) is defined in :code:`load_model()`. - -.. code-block:: python - - def load_model(model_shape) -> Dict: - # model weights - params = { - 'b' : jax.random.uniform(key), - 'w' : jax.random.uniform(key, model_shape) - } - return params - -We now need to define the training (function :code:`train()`), which loops over the training set and measures the loss (function :code:`loss_fn()`) for each batch of training examples. The loss function is separate since JAX takes derivatives with a :code:`grad()` function (defined in the :code:`main()` function and called in :code:`train()`). - -.. code-block:: python - - def loss_fn(params, X, y) -> Callable: - err = jnp.dot(X, params['w']) + params['b'] - y - return jnp.mean(jnp.square(err)) # mse - - def train(params, grad_fn, X, y) -> Tuple[np.array, float, int]: - num_examples = X.shape[0] - for epochs in range(10): - grads = grad_fn(params, X, y) - params = jax.tree_multimap(lambda p, g: p - 0.05 * g, params, grads) - loss = loss_fn(params,X, y) - # if epochs % 10 == 9: - # print(f'For Epoch {epochs} loss {loss}') - return params, loss, num_examples - -The evaluation of the model is defined in the function :code:`evaluation()`. The function takes all test examples and measures the loss of the linear regression model. - -.. code-block:: python - - def evaluation(params, grad_fn, X_test, y_test) -> Tuple[float, int]: - num_examples = X_test.shape[0] - err_test = loss_fn(params, X_test, y_test) - loss_test = jnp.mean(jnp.square(err_test)) - # print(f'Test loss {loss_test}') - return loss_test, num_examples - -Having defined the data loading, model architecture, training, and evaluation we can put everything together and train our model using JAX. As already mentioned, the :code:`jax.grad()` function is defined in :code:`main()` and passed to :code:`train()`. - -.. code-block:: python - - def main(): - X, y, X_test, y_test = load_data() - model_shape = X.shape[1:] - grad_fn = jax.grad(loss_fn) - print("Model Shape", model_shape) - params = load_model(model_shape) - params, loss, num_examples = train(params, grad_fn, X, y) - evaluation(params, grad_fn, X_test, y_test) - - - if __name__ == "__main__": - main() - -You can now run your (centralized) JAX linear regression workload: - -.. code-block:: python - - python3 jax_training.py - -So far this should all look fairly familiar if you've used JAX before. -Let's take the next step and use what we've built to create a simple federated learning system consisting of one server and two clients. - -JAX meets Flower ----------------- - -The concept of federating an existing workload is always the same and easy to understand. -We have to start a *server* and then use the code in :code:`jax_training.py` for the *clients* that are connected to the *server*. -The *server* sends model parameters to the clients. The *clients* run the training and update the parameters. -The updated parameters are sent back to the *server*, which averages all received parameter updates. -This describes one round of the federated learning process, and we repeat this for multiple rounds. - -Our example consists of one *server* and two *clients*. Let's set up :code:`server.py` first. The *server* needs to import the Flower package :code:`flwr`. -Next, we use the :code:`start_server` function to start a server and tell it to perform three rounds of federated learning. - -.. code-block:: python - - import flwr as fl - - if __name__ == "__main__": - fl.server.start_server(server_address="0.0.0.0:8080", config=fl.server.ServerConfig(num_rounds=3)) - -We can already start the *server*: - -.. code-block:: python - - python3 server.py - -Finally, we will define our *client* logic in :code:`client.py` and build upon the previously defined JAX training in :code:`jax_training.py`. -Our *client* needs to import :code:`flwr`, but also :code:`jax` and :code:`jaxlib` to update the parameters on our JAX model: - -.. code-block:: python - - from typing import Dict, List, Callable, Tuple - - import flwr as fl - import numpy as np - import jax - import jax.numpy as jnp - - import jax_training - - -Implementing a Flower *client* basically means implementing a subclass of either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. -Our implementation will be based on :code:`flwr.client.NumPyClient` and we'll call it :code:`FlowerClient`. -:code:`NumPyClient` is slightly easier to implement than :code:`Client` if you use a framework with good NumPy interoperability (like JAX) because it avoids some of the boilerplate that would otherwise be necessary. -:code:`FlowerClient` needs to implement four methods, two methods for getting/setting model parameters, one method for training the model, and one method for testing the model: - -#. :code:`set_parameters (optional)` - * set the model parameters on the local model that are received from the server - * transform parameters to NumPy :code:`ndarray`'s - * loop over the list of model parameters received as NumPy :code:`ndarray`'s (think list of neural network layers) -#. :code:`get_parameters` - * get the model parameters and return them as a list of NumPy :code:`ndarray`'s (which is what :code:`flwr.client.NumPyClient` expects) -#. :code:`fit` - * update the parameters of the local model with the parameters received from the server - * train the model on the local training set - * get the updated local model parameters and return them to the server -#. :code:`evaluate` - * update the parameters of the local model with the parameters received from the server - * evaluate the updated model on the local test set - * return the local loss to the server - -The challenging part is to transform the JAX model parameters from :code:`DeviceArray` to :code:`NumPy ndarray` to make them compatible with `NumPyClient`. - -The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make use of the functions :code:`train()` and :code:`evaluate()` previously defined in :code:`jax_training.py`. -So what we really do here is we tell Flower through our :code:`NumPyClient` subclass which of our already defined functions to call for training and evaluation. -We included type annotations to give you a better understanding of the data types that get passed around. - -.. code-block:: python - - - class FlowerClient(fl.client.NumPyClient): - """Flower client implementing using linear regression and JAX.""" - - def __init__( - self, - params: Dict, - grad_fn: Callable, - train_x: List[np.ndarray], - train_y: List[np.ndarray], - test_x: List[np.ndarray], - test_y: List[np.ndarray], - ) -> None: - self.params= params - self.grad_fn = grad_fn - self.train_x = train_x - self.train_y = train_y - self.test_x = test_x - self.test_y = test_y - - def get_parameters(self, config) -> Dict: - # Return model parameters as a list of NumPy ndarrays - parameter_value = [] - for _, val in self.params.items(): - parameter_value.append(np.array(val)) - return parameter_value - - def set_parameters(self, parameters: List[np.ndarray]) -> Dict: - # Collect model parameters and update the parameters of the local model - value=jnp.ndarray - params_item = list(zip(self.params.keys(),parameters)) - for item in params_item: - key = item[0] - value = item[1] - self.params[key] = value - return self.params - - - def fit( - self, parameters: List[np.ndarray], config: Dict - ) -> Tuple[List[np.ndarray], int, Dict]: - # Set model parameters, train model, return updated model parameters - print("Start local training") - self.params = self.set_parameters(parameters) - self.params, loss, num_examples = jax_training.train(self.params, self.grad_fn, self.train_x, self.train_y) - results = {"loss": float(loss)} - print("Training results", results) - return self.get_parameters(config={}), num_examples, results - - def evaluate( - self, parameters: List[np.ndarray], config: Dict - ) -> Tuple[float, int, Dict]: - # Set model parameters, evaluate the model on a local test dataset, return result - print("Start evaluation") - self.params = self.set_parameters(parameters) - loss, num_examples = jax_training.evaluation(self.params,self.grad_fn, self.test_x, self.test_y) - print("Evaluation accuracy & loss", loss) - return ( - float(loss), - num_examples, - {"loss": float(loss)}, - ) - -Having defined the federation process, we can run it. - -.. code-block:: python - - def main() -> None: - """Load data, start MNISTClient.""" - - # Load data - train_x, train_y, test_x, test_y = jax_training.load_data() - grad_fn = jax.grad(jax_training.loss_fn) - - # Load model (from centralized training) and initialize parameters - model_shape = train_x.shape[1:] - params = jax_training.load_model(model_shape) - - # Start Flower client - client = FlowerClient(params, grad_fn, train_x, train_y, test_x, test_y) - fl.client.start_client(server_address="0.0.0.0:8080", client.to_client()) - - if __name__ == "__main__": - main() - - -And that's it. You can now open two additional terminal windows and run - -.. code-block:: python - - python3 client.py - -in each window (make sure that the server is still running before you do so) and see your JAX project run federated learning across two clients. Congratulations! - -Next Steps ----------- - -The source code of this example was improved over time and can be found here: `Quickstart JAX `_. -Our example is somewhat over-simplified because both clients load the same dataset. - -You're now prepared to explore this topic further. How about using a more sophisticated model or using a different dataset? How about adding more clients? diff --git a/doc/source/example-pytorch-from-centralized-to-federated.rst b/doc/source/example-pytorch-from-centralized-to-federated.rst index 0c458a136a81..9629a7fed6e8 100644 --- a/doc/source/example-pytorch-from-centralized-to-federated.rst +++ b/doc/source/example-pytorch-from-centralized-to-federated.rst @@ -1,21 +1,25 @@ Example: PyTorch - From Centralized To Federated ================================================ -This tutorial will show you how to use Flower to build a federated version of an existing machine learning workload. -We are using PyTorch to train a Convolutional Neural Network on the CIFAR-10 dataset. -First, we introduce this machine learning task with a centralized training approach based on the `Deep Learning with PyTorch `_ tutorial. -Then, we build upon the centralized training code to run the training in a federated fashion. +This tutorial will show you how to use Flower to build a federated version of an +existing machine learning workload. We are using PyTorch to train a Convolutional Neural +Network on the CIFAR-10 dataset. First, we introduce this machine learning task with a +centralized training approach based on the `Deep Learning with PyTorch +`_ tutorial. Then, +we build upon the centralized training code to run the training in a federated fashion. Centralized Training -------------------- -We begin with a brief description of the centralized CNN training code. -If you want a more in-depth explanation of what's going on then have a look at the official `PyTorch tutorial `_. +We begin with a brief description of the centralized CNN training code. If you want a +more in-depth explanation of what's going on then have a look at the official `PyTorch +tutorial `_. -Let's create a new file called :code:`cifar.py` with all the components required for a traditional (centralized) training on CIFAR-10. -First, all required packages (such as :code:`torch` and :code:`torchvision`) need to be imported. -You can see that we do not import any package for federated learning. -You can keep all these imports as they are even when we add the federated learning components at a later point. +Let's create a new file called ``cifar.py`` with all the components required for a +traditional (centralized) training on CIFAR-10. First, all required packages (such as +``torch`` and ``torchvision``) need to be imported. You can see that we do not import +any package for federated learning. You can keep all these imports as they are even when +we add the federated learning components at a later point. .. code-block:: python @@ -29,7 +33,9 @@ You can keep all these imports as they are even when we add the federated learni from torch import Tensor from torchvision.datasets import CIFAR10 -As already mentioned we will use the CIFAR-10 dataset for this machine learning workload. The model architecture (a very simple Convolutional Neural Network) is defined in :code:`class Net()`. +As already mentioned we will use the CIFAR-10 dataset for this machine learning +workload. The model architecture (a very simple Convolutional Neural Network) is defined +in ``class Net()``. .. code-block:: python @@ -53,13 +59,17 @@ As already mentioned we will use the CIFAR-10 dataset for this machine learning x = self.fc3(x) return x -The :code:`load_data()` function loads the CIFAR-10 training and test sets. The :code:`transform` normalized the data after loading. +The ``load_data()`` function loads the CIFAR-10 training and test sets. The +``transform`` normalized the data after loading. .. code-block:: python DATA_ROOT = "~/data/cifar-10" - def load_data() -> Tuple[torch.utils.data.DataLoader, torch.utils.data.DataLoader, Dict]: + + def load_data() -> ( + Tuple[torch.utils.data.DataLoader, torch.utils.data.DataLoader, Dict] + ): """Load CIFAR-10 (training and test set).""" transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] @@ -68,12 +78,15 @@ The :code:`load_data()` function loads the CIFAR-10 training and test sets. The trainloader = torch.utils.data.DataLoader(trainset, batch_size=32, shuffle=True) testset = CIFAR10(DATA_ROOT, train=False, download=True, transform=transform) testloader = torch.utils.data.DataLoader(testset, batch_size=32, shuffle=False) - num_examples = {"trainset" : len(trainset), "testset" : len(testset)} + num_examples = {"trainset": len(trainset), "testset": len(testset)} return trainloader, testloader, num_examples -We now need to define the training (function :code:`train()`) which loops over the training set, measures the loss, backpropagates it, and then takes one optimizer step for each batch of training examples. +We now need to define the training (function ``train()``) which loops over the training +set, measures the loss, backpropagates it, and then takes one optimizer step for each +batch of training examples. -The evaluation of the model is defined in the function :code:`test()`. The function loops over all test samples and measures the loss of the model based on the test dataset. +The evaluation of the model is defined in the function ``test()``. The function loops +over all test samples and measures the loss of the model based on the test dataset. .. code-block:: python @@ -133,7 +146,8 @@ The evaluation of the model is defined in the function :code:`test()`. The funct accuracy = correct / total return loss, accuracy -Having defined the data loading, model architecture, training, and evaluation we can put everything together and train our CNN on CIFAR-10. +Having defined the data loading, model architecture, training, and evaluation we can put +everything together and train our CNN on CIFAR-10. .. code-block:: python @@ -143,7 +157,7 @@ Having defined the data loading, model architecture, training, and evaluation we print("Load data") trainloader, testloader, _ = load_data() print("Start training") - net=Net().to(DEVICE) + net = Net().to(DEVICE) train(net=net, trainloader=trainloader, epochs=2, device=DEVICE) print("Evaluate model") loss, accuracy = test(net=net, testloader=testloader, device=DEVICE) @@ -156,46 +170,57 @@ Having defined the data loading, model architecture, training, and evaluation we You can now run your machine learning workload: -.. code-block:: python +.. code-block:: bash python3 cifar.py -So far, this should all look fairly familiar if you've used PyTorch before. -Let's take the next step and use what we've built to create a simple federated learning system consisting of one server and two clients. +So far, this should all look fairly familiar if you've used PyTorch before. Let's take +the next step and use what we've built to create a simple federated learning system +consisting of one server and two clients. Federated Training ------------------ -The simple machine learning project discussed in the previous section trains the model on a single dataset (CIFAR-10), we call this centralized learning. -This concept of centralized learning, as shown in the previous section, is probably known to most of you, and many of you have used it previously. -Normally, if you'd want to run machine learning workloads in a federated fashion, then you'd have to change most of your code and set everything up from scratch. This can be a considerable effort. +The simple machine learning project discussed in the previous section trains the model +on a single dataset (CIFAR-10), we call this centralized learning. This concept of +centralized learning, as shown in the previous section, is probably known to most of +you, and many of you have used it previously. Normally, if you'd want to run machine +learning workloads in a federated fashion, then you'd have to change most of your code +and set everything up from scratch. This can be a considerable effort. -However, with Flower you can evolve your pre-existing code into a federated learning setup without the need for a major rewrite. +However, with Flower you can evolve your pre-existing code into a federated learning +setup without the need for a major rewrite. -The concept is easy to understand. -We have to start a *server* and then use the code in :code:`cifar.py` for the *clients* that are connected to the *server*. -The *server* sends model parameters to the clients. The *clients* run the training and update the parameters. -The updated parameters are sent back to the *server* which averages all received parameter updates. -This describes one round of the federated learning process and we repeat this for multiple rounds. +The concept is easy to understand. We have to start a *server* and then use the code in +``cifar.py`` for the *clients* that are connected to the *server*. The *server* sends +model parameters to the clients. The *clients* run the training and update the +parameters. The updated parameters are sent back to the *server* which averages all +received parameter updates. This describes one round of the federated learning process +and we repeat this for multiple rounds. -Our example consists of one *server* and two *clients*. Let's set up :code:`server.py` first. The *server* needs to import the Flower package :code:`flwr`. -Next, we use the :code:`start_server` function to start a server and tell it to perform three rounds of federated learning. +Our example consists of one *server* and two *clients*. Let's set up ``server.py`` +first. The *server* needs to import the Flower package ``flwr``. Next, we use the +``start_server`` function to start a server and tell it to perform three rounds of +federated learning. .. code-block:: python import flwr as fl if __name__ == "__main__": - fl.server.start_server(server_address="0.0.0.0:8080", config=fl.server.ServerConfig(num_rounds=3)) + fl.server.start_server( + server_address="0.0.0.0:8080", config=fl.server.ServerConfig(num_rounds=3) + ) We can already start the *server*: -.. code-block:: python +.. code-block:: bash python3 server.py -Finally, we will define our *client* logic in :code:`client.py` and build upon the previously defined centralized training in :code:`cifar.py`. -Our *client* needs to import :code:`flwr`, but also :code:`torch` to update the parameters on our PyTorch model: +Finally, we will define our *client* logic in ``client.py`` and build upon the +previously defined centralized training in ``cifar.py``. Our *client* needs to import +``flwr``, but also ``torch`` to update the parameters on our PyTorch model: .. code-block:: python @@ -210,28 +235,38 @@ Our *client* needs to import :code:`flwr`, but also :code:`torch` to update the DEVICE: str = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") -Implementing a Flower *client* basically means implementing a subclass of either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. -Our implementation will be based on :code:`flwr.client.NumPyClient` and we'll call it :code:`CifarClient`. -:code:`NumPyClient` is slightly easier to implement than :code:`Client` if you use a framework with good NumPy interoperability (like PyTorch or TensorFlow/Keras) because it avoids some of the boilerplate that would otherwise be necessary. -:code:`CifarClient` needs to implement four methods, two methods for getting/setting model parameters, one method for training the model, and one method for testing the model: - -#. :code:`set_parameters` - * set the model parameters on the local model that are received from the server - * loop over the list of model parameters received as NumPy :code:`ndarray`'s (think list of neural network layers) -#. :code:`get_parameters` - * get the model parameters and return them as a list of NumPy :code:`ndarray`'s (which is what :code:`flwr.client.NumPyClient` expects) -#. :code:`fit` - * update the parameters of the local model with the parameters received from the server - * train the model on the local training set - * get the updated local model weights and return them to the server -#. :code:`evaluate` - * update the parameters of the local model with the parameters received from the server - * evaluate the updated model on the local test set - * return the local loss and accuracy to the server - -The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make use of the functions :code:`train()` and :code:`test()` previously defined in :code:`cifar.py`. -So what we really do here is we tell Flower through our :code:`NumPyClient` subclass which of our already defined functions to call for training and evaluation. -We included type annotations to give you a better understanding of the data types that get passed around. +Implementing a Flower *client* basically means implementing a subclass of either +``flwr.client.Client`` or ``flwr.client.NumPyClient``. Our implementation will be based +on ``flwr.client.NumPyClient`` and we'll call it ``CifarClient``. ``NumPyClient`` is +slightly easier to implement than ``Client`` if you use a framework with good NumPy +interoperability (like PyTorch or TensorFlow/Keras) because it avoids some of the +boilerplate that would otherwise be necessary. ``CifarClient`` needs to implement four +methods, two methods for getting/setting model parameters, one method for training the +model, and one method for testing the model: + +1. ``set_parameters`` + - set the model parameters on the local model that are received from the server + - loop over the list of model parameters received as NumPy ``ndarray``'s (think + list of neural network layers) +2. ``get_parameters`` + - get the model parameters and return them as a list of NumPy ``ndarray``'s + (which is what ``flwr.client.NumPyClient`` expects) +3. ``fit`` + - update the parameters of the local model with the parameters received from the + server + - train the model on the local training set + - get the updated local model weights and return them to the server +4. ``evaluate`` + - update the parameters of the local model with the parameters received from the + server + - evaluate the updated model on the local test set + - return the local loss and accuracy to the server + +The two ``NumPyClient`` methods ``fit`` and ``evaluate`` make use of the functions +``train()`` and ``test()`` previously defined in ``cifar.py``. So what we really do here +is we tell Flower through our ``NumPyClient`` subclass which of our already defined +functions to call for training and evaluation. We included type annotations to give you +a better understanding of the data types that get passed around. .. code-block:: python @@ -277,8 +312,10 @@ We included type annotations to give you a better understanding of the data type loss, accuracy = cifar.test(self.model, self.testloader, device=DEVICE) return float(loss), self.num_examples["testset"], {"accuracy": float(accuracy)} -All that's left to do it to define a function that loads both model and data, creates a :code:`CifarClient`, and starts this client. -You load your data and model by using :code:`cifar.py`. Start :code:`CifarClient` with the function :code:`fl.client.start_client()` by pointing it at the same IP address we used in :code:`server.py`: +All that's left to do it to define a function that loads both model and data, creates a +``CifarClient``, and starts this client. You load your data and model by using +``cifar.py``. Start ``CifarClient`` with the function ``fl.client.start_client()`` by +pointing it at the same IP address we used in ``server.py``: .. code-block:: python @@ -300,15 +337,20 @@ You load your data and model by using :code:`cifar.py`. Start :code:`CifarClient And that's it. You can now open two additional terminal windows and run -.. code-block:: python +.. code-block:: bash python3 client.py -in each window (make sure that the server is running before you do so) and see your (previously centralized) PyTorch project run federated learning across two clients. Congratulations! +in each window (make sure that the server is running before you do so) and see your +(previously centralized) PyTorch project run federated learning across two clients. +Congratulations! Next Steps ---------- -The full source code for this example: `PyTorch: From Centralized To Federated (Code) `_. -Our example is, of course, somewhat over-simplified because both clients load the exact same dataset, which isn't realistic. -You're now prepared to explore this topic further. How about using different subsets of CIFAR-10 on each client? How about adding more clients? +The full source code for this example: `PyTorch: From Centralized To Federated (Code) +`_. +Our example is, of course, somewhat over-simplified because both clients load the exact +same dataset, which isn't realistic. You're now prepared to explore this topic further. +How about using different subsets of CIFAR-10 on each client? How about adding more +clients? diff --git a/doc/source/explanation-differential-privacy.rst b/doc/source/explanation-differential-privacy.rst index 69fd333f9b13..06e9dbdedd39 100644 --- a/doc/source/explanation-differential-privacy.rst +++ b/doc/source/explanation-differential-privacy.rst @@ -1,133 +1,171 @@ Differential Privacy ==================== -The information in datasets like healthcare, financial transactions, user preferences, etc., is valuable and has the potential for scientific breakthroughs and provides important business insights. -However, such data is also sensitive and there is a risk of compromising individual privacy. -Traditional methods like anonymization alone would not work because of attacks like Re-identification and Data Linkage. -That's where differential privacy comes in. It provides the possibility of analyzing data while ensuring the privacy of individuals. +The information in datasets like healthcare, financial transactions, user preferences, +etc., is valuable and has the potential for scientific breakthroughs and provides +important business insights. However, such data is also sensitive and there is a risk of +compromising individual privacy. +Traditional methods like anonymization alone would not work because of attacks like +Re-identification and Data Linkage. That's where differential privacy comes in. It +provides the possibility of analyzing data while ensuring the privacy of individuals. Differential Privacy -------------------- -Imagine two datasets that are identical except for a single record (for instance, Alice's data). -Differential Privacy (DP) guarantees that any analysis (M), like calculating the average income, will produce nearly identical results for both datasets (O and O' would be similar). -This preserves group patterns while obscuring individual details, ensuring the individual's information remains hidden in the crowd. -.. image:: ./_static/DP/dp-intro.png - :align: center - :width: 400 - :alt: DP Intro +Imagine two datasets that are identical except for a single record (for instance, +Alice's data). Differential Privacy (DP) guarantees that any analysis (M), like +calculating the average income, will produce nearly identical results for both datasets +(O and O' would be similar). This preserves group patterns while obscuring individual +details, ensuring the individual's information remains hidden in the crowd. +.. image:: ./_static/DP/dp-intro.png + :align: center + :width: 400 + :alt: DP Intro -One of the most commonly used mechanisms to achieve DP is adding enough noise to the output of the analysis to mask the contribution of each individual in the data while preserving the overall accuracy of the analysis. +One of the most commonly used mechanisms to achieve DP is adding enough noise to the +output of the analysis to mask the contribution of each individual in the data while +preserving the overall accuracy of the analysis. Formal Definition ~~~~~~~~~~~~~~~~~ -Differential Privacy (DP) provides statistical guarantees against the information an adversary can infer through the output of a randomized algorithm. -It provides an unconditional upper bound on the influence of a single individual on the output of the algorithm by adding noise [1]. -A randomized mechanism -M provides (:math:`\epsilon`, :math:`\delta`)-differential privacy if for any two neighboring databases, D :sub:`1` and D :sub:`2`, that differ in only a single record, -and for all possible outputs S ⊆ Range(A): - -.. math:: - \small - P[M(D_{1} \in A)] \leq e^{\delta} P[M(D_{2} \in A)] + \delta +Differential Privacy (DP) provides statistical guarantees against the information an +adversary can infer through the output of a randomized algorithm. It provides an +unconditional upper bound on the influence of a single individual on the output of the +algorithm by adding noise [1]. A randomized mechanism M provides (:math:`\epsilon`, +:math:`\delta`)-differential privacy if for any two neighboring databases, D :sub:`1` +and D :sub:`2`, that differ in only a single record, and for all possible outputs S ⊆ +Range(A): +.. math:: -The :math:`\epsilon` parameter, also known as the privacy budget, is a metric of privacy loss. -It also controls the privacy-utility trade-off; lower :math:`\epsilon` values indicate higher levels of privacy but are likely to reduce utility as well. -The :math:`\delta` parameter accounts for a small probability on which the upper bound :math:`\epsilon` does not hold. -The amount of noise needed to achieve differential privacy is proportional to the sensitivity of the output, which measures the maximum change in the output due to the inclusion or removal of a single record. + \small + P[M(D_{1} \in A)] \leq e^{\epsilon} P[M(D_{2} \in A)] + \delta +The :math:`\epsilon` parameter, also known as the privacy budget, is a metric of privacy +loss. It also controls the privacy-utility trade-off; lower :math:`\epsilon` values +indicate higher levels of privacy but are likely to reduce utility as well. The +:math:`\delta` parameter accounts for a small probability on which the upper bound +:math:`\epsilon` does not hold. The amount of noise needed to achieve differential +privacy is proportional to the sensitivity of the output, which measures the maximum +change in the output due to the inclusion or removal of a single record. Differential Privacy in Machine Learning ---------------------------------------- + DP can be utilized in machine learning to preserve the privacy of the training data. -Differentially private machine learning algorithms are designed in a way to prevent the algorithm to learn any specific information about any individual data points and subsequently prevent the model from revealing sensitive information. -Depending on the stage at which noise is introduced, various methods exist for applying DP to machine learning algorithms. -One approach involves adding noise to the training data (either to the features or labels), while another method entails injecting noise into the gradients of the loss function during model training. -Additionally, such noise can be incorporated into the model's output. +Differentially private machine learning algorithms are designed in a way to prevent the +algorithm to learn any specific information about any individual data points and +subsequently prevent the model from revealing sensitive information. Depending on the +stage at which noise is introduced, various methods exist for applying DP to machine +learning algorithms. One approach involves adding noise to the training data (either to +the features or labels), while another method entails injecting noise into the gradients +of the loss function during model training. Additionally, such noise can be incorporated +into the model's output. Differential Privacy in Federated Learning ------------------------------------------ -Federated learning is a data minimization approach that allows multiple parties to collaboratively train a model without sharing their raw data. -However, federated learning also introduces new privacy challenges. The model updates between parties and the central server can leak information about the local data. -These leaks can be exploited by attacks such as membership inference and property inference attacks, or model inversion attacks. -DP can play a crucial role in federated learning to provide privacy for the clients' data. +Federated learning is a data minimization approach that allows multiple parties to +collaboratively train a model without sharing their raw data. However, federated +learning also introduces new privacy challenges. The model updates between parties and +the central server can leak information about the local data. These leaks can be +exploited by attacks such as membership inference and property inference attacks, or +model inversion attacks. -Depending on the granularity of privacy provision or the location of noise addition, different forms of DP exist in federated learning. -In this explainer, we focus on two approaches of DP utilization in federated learning based on where the noise is added: at the server (also known as the center) or at the client (also known as the local). +DP can play a crucial role in federated learning to provide privacy for the clients' +data. -- **Central Differential Privacy**: DP is applied by the server and the goal is to prevent the aggregated model from leaking information about each client's data. +Depending on the granularity of privacy provision or the location of noise addition, +different forms of DP exist in federated learning. In this explainer, we focus on two +approaches of DP utilization in federated learning based on where the noise is added: at +the server (also known as the center) or at the client (also known as the local). -- **Local Differential Privacy**: DP is applied on the client side before sending any information to the server and the goal is to prevent the updates that are sent to the server from leaking any information about the client's data. +- **Central Differential Privacy**: DP is applied by the server and the goal is to + prevent the aggregated model from leaking information about each client's data. +- **Local Differential Privacy**: DP is applied on the client side before sending any + information to the server and the goal is to prevent the updates that are sent to the + server from leaking any information about the client's data. Central Differential Privacy ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -In this approach, which is also known as user-level DP, the central server is responsible for adding noise to the globally aggregated parameters. It should be noted that trust in the server is required. + +In this approach, which is also known as user-level DP, the central server is +responsible for adding noise to the globally aggregated parameters. It should be noted +that trust in the server is required. .. image:: ./_static/DP/CDP.png - :align: center - :width: 400 - :alt: Central Differential Privacy - -While there are various ways to implement central DP in federated learning, we concentrate on the algorithms proposed by [2] and [3]. -The overall approach is to clip the model updates sent by the clients and add some amount of noise to the aggregated model. -In each iteration, a random set of clients is chosen with a specific probability for training. -Each client performs local training on its own data. -The update of each client is then clipped by some value `S` (sensitivity `S`). -This would limit the impact of any individual client which is crucial for privacy and often beneficial for robustness. -A common approach to achieve this is by restricting the `L2` norm of the clients' model updates, ensuring that larger updates are scaled down to fit within the norm `S`. + :align: center + :width: 400 + :alt: Central Differential Privacy + +While there are various ways to implement central DP in federated learning, we +concentrate on the algorithms proposed by [2] and [3]. The overall approach is to clip +the model updates sent by the clients and add some amount of noise to the aggregated +model. In each iteration, a random set of clients is chosen with a specific probability +for training. Each client performs local training on its own data. The update of each +client is then clipped by some value `S` (sensitivity `S`). This would limit the impact +of any individual client which is crucial for privacy and often beneficial for +robustness. A common approach to achieve this is by restricting the `L2` norm of the +clients' model updates, ensuring that larger updates are scaled down to fit within the +norm `S`. .. image:: ./_static/DP/clipping.png - :align: center - :width: 300 - :alt: clipping + :align: center + :width: 300 + :alt: clipping -Afterwards, the Gaussian mechanism is used to add noise in order to distort the sum of all clients' updates. -The amount of noise is scaled to the sensitivity value to obtain a privacy guarantee. -The Gaussian mechanism is used with a noise sampled from `N (0, σ²)` where `σ = ( noise_scale * S ) / (number of sampled clients)`. +Afterwards, the Gaussian mechanism is used to add noise in order to distort the sum of +all clients' updates. The amount of noise is scaled to the sensitivity value to obtain a +privacy guarantee. The Gaussian mechanism is used with a noise sampled from `N (0, σ²)` +where `σ = ( noise_scale * S ) / (number of sampled clients)`. Clipping -^^^^^^^^ - -There are two forms of clipping commonly used in Central DP: Fixed Clipping and Adaptive Clipping. +++++++++ -- **Fixed Clipping** : A predefined fix threshold is set for the magnitude of clients' updates. Any update exceeding this threshold is clipped back to the threshold value. +There are two forms of clipping commonly used in Central DP: Fixed Clipping and Adaptive +Clipping. -- **Adaptive Clipping** : The clipping threshold dynamically adjusts based on the observed update distribution [4]. It means that the clipping value is tuned during the rounds with respect to the quantile of the update norm distribution. +- **Fixed Clipping** : A predefined fix threshold is set for the magnitude of clients' + updates. Any update exceeding this threshold is clipped back to the threshold value. +- **Adaptive Clipping** : The clipping threshold dynamically adjusts based on the + observed update distribution [4]. It means that the clipping value is tuned during the + rounds with respect to the quantile of the update norm distribution. -The choice between fixed and adaptive clipping depends on various factors such as privacy requirements, data distribution, model complexity, and others. +The choice between fixed and adaptive clipping depends on various factors such as +privacy requirements, data distribution, model complexity, and others. Local Differential Privacy ~~~~~~~~~~~~~~~~~~~~~~~~~~ -In this approach, each client is responsible for performing DP. -Local DP avoids the need for a fully trusted aggregator, but it should be noted that local DP leads to a decrease in accuracy but better privacy in comparison to central DP. +In this approach, each client is responsible for performing DP. Local DP avoids the need +for a fully trusted aggregator, but it should be noted that local DP leads to a decrease +in accuracy but better privacy in comparison to central DP. .. image:: ./_static/DP/LDP.png - :align: center - :width: 400 - :alt: Local Differential Privacy - + :align: center + :width: 400 + :alt: Local Differential Privacy In this explainer, we focus on two forms of achieving Local DP: -- Each client adds noise to the local updates before sending them to the server. To achieve (:math:`\epsilon`, :math:`\delta`)-DP, considering the sensitivity of the local model to be ∆, Gaussian noise is applied with a noise scale of σ where: +- Each client adds noise to the local updates before sending them to the server. To + achieve (:math:`\epsilon`, :math:`\delta`)-DP, considering the sensitivity of the + local model to be ∆, Gaussian noise is applied with a noise scale of σ where: .. math:: + \small \frac{∆ \times \sqrt{2 \times \log\left(\frac{1.25}{\delta}\right)}}{\epsilon} - -- Each client adds noise to the gradients of the model during the local training (DP-SGD). More specifically, in this approach, gradients are clipped and an amount of calibrated noise is injected into the gradients. - +- Each client adds noise to the gradients of the model during the local training + (DP-SGD). More specifically, in this approach, gradients are clipped and an amount of + calibrated noise is injected into the gradients. Please note that these two approaches are providing privacy at different levels. - **References:** [1] Dwork et al. The Algorithmic Foundations of Differential Privacy. diff --git a/doc/source/explanation-federated-evaluation.rst b/doc/source/explanation-federated-evaluation.rst index bcdca9bae700..c56a5d48b2f6 100644 --- a/doc/source/explanation-federated-evaluation.rst +++ b/doc/source/explanation-federated-evaluation.rst @@ -1,8 +1,8 @@ Federated evaluation ==================== -There are two main approaches to evaluating models in federated learning systems: centralized (or server-side) evaluation and federated (or client-side) evaluation. - +There are two main approaches to evaluating models in federated learning systems: +centralized (or server-side) evaluation and federated (or client-side) evaluation. Centralized Evaluation ---------------------- @@ -10,15 +10,17 @@ Centralized Evaluation Built-In Strategies ~~~~~~~~~~~~~~~~~~~ -All built-in strategies support centralized evaluation by providing an evaluation function during initialization. -An evaluation function is any function that can take the current global model parameters as input and return evaluation results: +All built-in strategies support centralized evaluation by providing an evaluation +function during initialization. An evaluation function is any function that can take the +current global model parameters as input and return evaluation results: .. code-block:: python - + from flwr.common import NDArrays, Scalar - + from typing import Dict, Optional, Tuple + def get_evaluate_fn(model): """Return an evaluation function for server-side evaluation.""" @@ -38,6 +40,7 @@ An evaluation function is any function that can take the current global model pa return evaluate + # Load and compile model for server-side parameter evaluation model = tf.keras.applications.EfficientNetB0( input_shape=(32, 32, 3), weights=None, classes=10 @@ -47,7 +50,7 @@ An evaluation function is any function that can take the current global model pa # Create strategy strategy = fl.server.strategy.FedAvg( - # ... other FedAvg arguments + # ... other FedAvg arguments evaluate_fn=get_evaluate_fn(model), ) @@ -57,9 +60,10 @@ An evaluation function is any function that can take the current global model pa Custom Strategies ~~~~~~~~~~~~~~~~~ -The :code:`Strategy` abstraction provides a method called :code:`evaluate` that can directly be used to evaluate the current global model parameters. -The current server implementation calls :code:`evaluate` after parameter aggregation and before federated evaluation (see next paragraph). - +The ``Strategy`` abstraction provides a method called ``evaluate`` that can directly be +used to evaluate the current global model parameters. The current server implementation +calls ``evaluate`` after parameter aggregation and before federated evaluation (see next +paragraph). Federated Evaluation -------------------- @@ -67,7 +71,8 @@ Federated Evaluation Implementing Federated Evaluation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Client-side evaluation happens in the :code:`Client.evaluate` method and can be configured from the server side. +Client-side evaluation happens in the ``Client.evaluate`` method and can be configured +from the server side. .. code-block:: python @@ -79,9 +84,11 @@ Client-side evaluation happens in the :code:`Client.evaluate` method and can be def get_parameters(self, config): # ... + pass def fit(self, parameters, config): # ... + pass def evaluate(self, parameters, config): """Evaluate parameters on the locally held test set.""" @@ -100,12 +107,27 @@ Client-side evaluation happens in the :code:`Client.evaluate` method and can be Configuring Federated Evaluation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Federated evaluation can be configured from the server side. Built-in strategies support the following arguments: - -- :code:`fraction_evaluate`: a :code:`float` defining the fraction of clients that will be selected for evaluation. If :code:`fraction_evaluate` is set to :code:`0.1` and :code:`100` clients are connected to the server, then :code:`10` will be randomly selected for evaluation. If :code:`fraction_evaluate` is set to :code:`0.0`, federated evaluation will be disabled. -- :code:`min_evaluate_clients`: an :code:`int`: the minimum number of clients to be selected for evaluation. If :code:`fraction_evaluate` is set to :code:`0.1`, :code:`min_evaluate_clients` is set to 20, and :code:`100` clients are connected to the server, then :code:`20` clients will be selected for evaluation. -- :code:`min_available_clients`: an :code:`int` that defines the minimum number of clients which need to be connected to the server before a round of federated evaluation can start. If fewer than :code:`min_available_clients` are connected to the server, the server will wait until more clients are connected before it continues to sample clients for evaluation. -- :code:`on_evaluate_config_fn`: a function that returns a configuration dictionary which will be sent to the selected clients. The function will be called during each round and provides a convenient way to customize client-side evaluation from the server side, for example, to configure the number of validation steps performed. +Federated evaluation can be configured from the server side. Built-in strategies support +the following arguments: + +- ``fraction_evaluate``: a ``float`` defining the fraction of clients that will be + selected for evaluation. If ``fraction_evaluate`` is set to ``0.1`` and ``100`` + clients are connected to the server, then ``10`` will be randomly selected for + evaluation. If ``fraction_evaluate`` is set to ``0.0``, federated evaluation will be + disabled. +- ``min_evaluate_clients``: an ``int``: the minimum number of clients to be selected for + evaluation. If ``fraction_evaluate`` is set to ``0.1``, ``min_evaluate_clients`` is + set to 20, and ``100`` clients are connected to the server, then ``20`` clients will + be selected for evaluation. +- ``min_available_clients``: an ``int`` that defines the minimum number of clients which + need to be connected to the server before a round of federated evaluation can start. + If fewer than ``min_available_clients`` are connected to the server, the server will + wait until more clients are connected before it continues to sample clients for + evaluation. +- ``on_evaluate_config_fn``: a function that returns a configuration dictionary which + will be sent to the selected clients. The function will be called during each round + and provides a convenient way to customize client-side evaluation from the server + side, for example, to configure the number of validation steps performed. .. code-block:: python @@ -118,6 +140,7 @@ Federated evaluation can be configured from the server side. Built-in strategies val_steps = 5 if server_round < 4 else 10 return {"val_steps": val_steps} + # Create strategy strategy = fl.server.strategy.FedAvg( # ... other FedAvg arguments @@ -130,11 +153,11 @@ Federated evaluation can be configured from the server side. Built-in strategies # Start Flower server for four rounds of federated learning fl.server.start_server(server_address="[::]:8080", strategy=strategy) - Evaluating Local Model Updates During Training ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Model parameters can also be evaluated during training. :code:`Client.fit` can return arbitrary evaluation results as a dictionary: +Model parameters can also be evaluated during training. ``Client.fit`` can return +arbitrary evaluation results as a dictionary: .. code-block:: python @@ -146,6 +169,7 @@ Model parameters can also be evaluated during training. :code:`Client.fit` can r def get_parameters(self, config): # ... + pass def fit(self, parameters, config): """Train parameters on the locally held training set.""" @@ -171,9 +195,12 @@ Model parameters can also be evaluated during training. :code:`Client.fit` can r def evaluate(self, parameters, config): # ... - + pass Full Code Example ----------------- -For a full code example that uses both centralized and federated evaluation, see the *Advanced TensorFlow Example* (the same approach can be applied to workloads implemented in any other framework): https://github.com/adap/flower/tree/main/examples/advanced-tensorflow +For a full code example that uses both centralized and federated evaluation, see the +*Advanced TensorFlow Example* (the same approach can be applied to workloads implemented +in any other framework): +https://github.com/adap/flower/tree/main/examples/advanced-tensorflow diff --git a/doc/source/explanation-flower-architecture.rst b/doc/source/explanation-flower-architecture.rst new file mode 100644 index 000000000000..e82da56dcefa --- /dev/null +++ b/doc/source/explanation-flower-architecture.rst @@ -0,0 +1,158 @@ +Flower Architecture +=================== + +This page explains the architecture of deployed Flower federated learning system. + +In federated learning (FL), there is typically one server and a number of clients that +are connected to the server. This is often called a federation. + +The role of the server is to coordinate the training process. The role of each client is +to receive tasks from the server, execute those tasks and return the results back to the +server. + +This is sometimes called a hub-and-spoke topology: + +.. figure:: ./_static/flower-architecture-hub-and-spoke.svg + :align: center + :width: 600 + :alt: Hub-and-spoke topology in federated learning + :class: no-scaled-link + + Hub-and-spoke topology in federated learning (one server, multiple clients). + +In a real-world deployment, we typically want to run different projects on such a +federation. Each project could use different hyperparameters, different model +architectures, different aggregation strategies, or even different machine learning +frameworks like PyTorch and TensorFlow. + +This is why, in Flower, both the server side and the client side are split into two +parts. One part is long-lived and responsible for communicating across the network, the +other part is short-lived and executes task-specific code. + +A Flower `server` consists of **SuperLink** and ``ServerApp``: + +- **SuperLink**: a long-running process that forwards task instructions to clients + (SuperNodes) and receives task results back. +- ``ServerApp``: a short-lived process with project-spcific code that customizes all + server-side aspects of federated learning systems (client selection, client + configuration, result aggregation). This is what AI researchers and AI engineers write + when they build Flower apps. + +A Flower `client` consists of **SuperNode** and ``ClientApp``: + +- **SuperNode**: a long-running process that connects to the SuperLink, asks for tasks, + executes tasks (for example, "train this model on your local data") and returns task + results back to the SuperLink. +- ``ClientApp``: a short-lived process with project-specific code that customizes all + client-side aspects of federated learning systems (local model training and + evaluation, pre- and post-processing). This is what AI researchers and AI engineers + write when they build Flower apps. + +Why SuperNode and SuperLink? Well, in federated learning, the clients are the actual +stars of the show. They hold the training data and they run the actual training. This is +why Flower decided to name them **SuperNode**. The **SuperLink** is then responsible for +acting as the `missing link` between all those SuperNodes. + +.. figure:: ./_static/flower-architecture-basic-architecture.svg + :align: center + :width: 600 + :alt: Basic Flower architecture + :class: no-scaled-link + + The basic Flower architecture for federated learning. + +In a Flower app project, users will typically develop the ``ServerApp`` and the +``ClientApp``. All the network communication between `server` and `clients` is taken +care of by the SuperLink and SuperNodes. + +.. tip:: + + For more details, please refer to the |serverapp_link|_ and |clientapp_link|_ + documentation. + +With *multi-run*, multiple ``ServerApp``\s and ``ClientApp``\s are now capable of +running on the same federation consisting of a single long-running SuperLink and +multiple long-running SuperNodes. This is sometimes referred to as `multi-tenancy` or +`multi-job`. + +As shown in the figure below, two projects, each consisting of a ``ServerApp`` and a +``ClientApp``, could share the same SuperLink and SuperNodes. + +.. figure:: ./_static/flower-architecture-multi-run.svg + :align: center + :width: 600 + :alt: Multi-tenancy federated learning architecture + :class: no-scaled-link + + Multi-tenancy federated learning architecture with Flower + +To illustrate how multi-run works, consider one federated learning training run where a +``ServerApp`` and a ``ClientApp`` are participating in ``[run 1]``. Note that a +SuperNode will only run a ``ClientApp`` if it is selected to participate in the training +run. + +In ``[run 1]`` below, all the SuperNodes are selected and therefore run their +corresponding ``ClientApp``\s: + +.. figure:: ./_static/flower-architecture-multi-run-1.svg + :align: center + :width: 600 + :alt: Multi-tenancy federated learning architecture - Run 1 + :class: no-scaled-link + + Run 1 in a multi-run federated learning architecture with Flower. All SuperNodes + participate in the training round. + +However, in ``[run 2]``, only the first and third SuperNodes are selected to participate +in the training: + +.. figure:: ./_static/flower-architecture-multi-run-2.svg + :align: center + :width: 600 + :alt: Multi-tenancy federated learning architecture - Run 2 + :class: no-scaled-link + + Run 2 in a multi-run federated learning architecture with Flower. Only the first and + third SuperNodes are selected to participate in the training round. + +Therefore, with Flower multi-run, different projects (each consisting of a ``ServerApp`` +and ``ClientApp``) can run on different sets of clients. + +To help you start and manage all of the concurrently executing training runs, Flower +offers one additional long-running server-side service called **SuperExec**. When you +type ``flwr run`` to start a new training run, the ``flwr`` CLI bundles your local +project (mainly your ``ServerApp`` and ``ClientApp``) and sends it to the **SuperExec**. +The **SuperExec** will then take care of starting and managing your ``ServerApp``, which +in turn selects SuperNodes to execute your ``ClientApp``. + +This architecture allows many users to (concurrently) run their projects on the same +federation, simply by typing ``flwr run`` on their local developer machine. + +.. figure:: ./_static/flower-architecture-deployment-engine.svg + :align: center + :width: 800 + :alt: Flower Deployment Engine with SuperExec + :class: no-scaled-link + + The SuperExec service for managing concurrent training runs in Flower. + +.. note:: + + This explanation covers the Flower Deployment Engine. An explanation covering the + Flower Simulation Engine will follow. + +.. important:: + + As we continue to enhance Flower at a rapid pace, we'll periodically update this + explainer document. Feel free to share any feedback with us. + +.. |clientapp_link| replace:: ``ClientApp`` + +.. |serverapp_link| replace:: ``ServerApp`` + +.. _clientapp_link: ref-api/flwr.client.ClientApp.html + +.. _serverapp_link: ref-api/flwr.server.ServerApp.html + +.. meta:: + :description: Explore the federated learning architecture of the Flower framework, featuring multi-run, concurrent execution, and scalable, secure machine learning while preserving data privacy. diff --git a/doc/source/how-to-aggregate-evaluation-results.rst b/doc/source/how-to-aggregate-evaluation-results.rst index fa4ba88b8ff0..be6e20068c88 100644 --- a/doc/source/how-to-aggregate-evaluation-results.rst +++ b/doc/source/how-to-aggregate-evaluation-results.rst @@ -1,14 +1,15 @@ Aggregate evaluation results ============================ -The Flower server does not prescribe a way to aggregate evaluation results, but it enables the user to fully customize result aggregation. - +The Flower server does not prescribe a way to aggregate evaluation results, but it +enables the user to fully customize result aggregation. Aggregate Custom Evaluation Results ----------------------------------- -The same :code:`Strategy`-customization approach can be used to aggregate custom evaluation results coming from individual clients. -Clients can return custom metrics to the server by returning a dictionary: +The same ``Strategy``-customization approach can be used to aggregate custom evaluation +results coming from individual clients. Clients can return custom metrics to the server +by returning a dictionary: .. code-block:: python @@ -16,9 +17,11 @@ Clients can return custom metrics to the server by returning a dictionary: def get_parameters(self, config): # ... + pass def fit(self, parameters, config): # ... + pass def evaluate(self, parameters, config): """Evaluate parameters on the locally held test set.""" @@ -33,7 +36,8 @@ Clients can return custom metrics to the server by returning a dictionary: num_examples_test = len(self.x_test) return loss, num_examples_test, {"accuracy": accuracy} -The server can then use a customized strategy to aggregate the metrics provided in these dictionaries: +The server can then use a customized strategy to aggregate the metrics provided in these +dictionaries: .. code-block:: python @@ -50,7 +54,9 @@ The server can then use a customized strategy to aggregate the metrics provided return None, {} # Call aggregate_evaluate from base class (FedAvg) to aggregate loss and metrics - aggregated_loss, aggregated_metrics = super().aggregate_evaluate(server_round, results, failures) + aggregated_loss, aggregated_metrics = super().aggregate_evaluate( + server_round, results, failures + ) # Weigh accuracy of each client by number of examples used accuracies = [r.metrics["accuracy"] * r.num_examples for _, r in results] @@ -58,11 +64,14 @@ The server can then use a customized strategy to aggregate the metrics provided # Aggregate and print custom metric aggregated_accuracy = sum(accuracies) / sum(examples) - print(f"Round {server_round} accuracy aggregated from client results: {aggregated_accuracy}") + print( + f"Round {server_round} accuracy aggregated from client results: {aggregated_accuracy}" + ) # Return aggregated loss and metrics (i.e., aggregated accuracy) return aggregated_loss, {"accuracy": aggregated_accuracy} + # Create strategy and run server strategy = AggregateCustomMetricStrategy( # (same arguments as FedAvg here) diff --git a/doc/source/how-to-authenticate-supernodes.rst b/doc/source/how-to-authenticate-supernodes.rst index 9eb5e010ea4b..a2dd499dbc10 100644 --- a/doc/source/how-to-authenticate-supernodes.rst +++ b/doc/source/how-to-authenticate-supernodes.rst @@ -1,79 +1,104 @@ Authenticate SuperNodes ======================= -Flower has built-in support for authenticated SuperNodes that you can use to verify the identities of each SuperNode connecting to a SuperLink. -Flower node authentication works similar to how GitHub SSH authentication works: +Flower has built-in support for authenticated SuperNodes that you can use to verify the +identities of each SuperNode connecting to a SuperLink. Flower node authentication works +similar to how GitHub SSH authentication works: -* SuperLink (server) stores a list of known (client) node public keys -* Using ECDH, both SuperNode and SuperLink independently derive a shared secret -* Shared secret is used to compute the HMAC value of the message sent from SuperNode to SuperLink as a token -* SuperLink verifies the token +- SuperLink (server) stores a list of known (client) node public keys +- Using ECDH, both SuperNode and SuperLink independently derive a shared secret +- Shared secret is used to compute the HMAC value of the message sent from SuperNode to + SuperLink as a token +- SuperLink verifies the token -We recommend you to check out the complete `code example `_ demonstrating federated learning with Flower in an authenticated setting. +We recommend you to check out the complete `code example +`_ +demonstrating federated learning with Flower in an authenticated setting. .. note:: + This guide covers a preview feature that might change in future versions of Flower. .. note:: - For increased security, node authentication can only be used when encrypted connections (SSL/TLS) are enabled. -Enable node authentication in :code:`SuperLink` ------------------------------------------------ + For increased security, node authentication can only be used when encrypted + connections (SSL/TLS) are enabled. + +Enable node authentication in ``SuperLink`` +------------------------------------------- -To enable node authentication, first you need to configure SSL/TLS connections to secure the SuperLink<>SuperNode communication. You can find the complete guide -`here `_. -After configuring secure connections, you can enable client authentication in a long-running Flower :code:`SuperLink`. -Use the following terminal command to start a Flower :code:`SuperNode` that has both secure connections and node authentication enabled: +To enable node authentication, first you need to configure SSL/TLS connections to secure +the SuperLink<>SuperNode communication. You can find the complete guide `here +`_. After +configuring secure connections, you can enable client authentication in a long-running +Flower ``SuperLink``. Use the following terminal command to start a Flower ``SuperNode`` +that has both secure connections and node authentication enabled: .. code-block:: bash flower-superlink - --ssl-ca-certfile certificates/ca.crt - --ssl-certfile certificates/server.pem + --ssl-ca-certfile certificates/ca.crt + --ssl-certfile certificates/server.pem --ssl-keyfile certificates/server.key --auth-list-public-keys keys/client_public_keys.csv --auth-superlink-private-key keys/server_credentials --auth-superlink-public-key keys/server_credentials.pub - + Let's break down the authentication flags: -1. The first flag :code:`--auth-list-public-keys` expects a path to a CSV file storing all known node public keys. You need to store all known node public keys that are allowed to participate in a federation in one CSV file (:code:`.csv`). +1. The first flag ``--auth-list-public-keys`` expects a path to a CSV file storing all + known node public keys. You need to store all known node public keys that are allowed + to participate in a federation in one CSV file (``.csv``). - A valid CSV file storing known node public keys should list the keys in OpenSSH format, separated by commas and without any comments. For an example, refer to our code sample, which contains a CSV file with two known node public keys. + A valid CSV file storing known node public keys should list the keys in OpenSSH + format, separated by commas and without any comments. For an example, refer to + our code sample, which contains a CSV file with two known node public keys. -2. The second and third flags :code:`--auth-superlink-private-key` and :code:`--auth-superlink-public-key` expect paths to the server's private and public keys. For development purposes, you can generate a private and public key pair using :code:`ssh-keygen -t ecdsa -b 384`. +2. The second and third flags ``--auth-superlink-private-key`` and + ``--auth-superlink-public-key`` expect paths to the server's private and public keys. + For development purposes, you can generate a private and public key pair using + ``ssh-keygen -t ecdsa -b 384``. .. note:: - In Flower 1.9, there is no support for dynamically removing, editing, or adding known node public keys to the SuperLink. - To change the set of known nodes, you need to shut the server down, edit the CSV file, and start the server again. - Support for dynamically changing the set of known nodes is on the roadmap to be released in Flower 1.10 (ETA: June). + In Flower 1.9, there is no support for dynamically removing, editing, or adding + known node public keys to the SuperLink. To change the set of known nodes, you need + to shut the server down, edit the CSV file, and start the server again. Support for + dynamically changing the set of known nodes is on the roadmap to be released in + Flower 1.10 (ETA: June). -Enable node authentication in :code:`SuperNode` -------------------------------------------------- +Enable node authentication in ``SuperNode`` +------------------------------------------- -Similar to the long-running Flower server (:code:`SuperLink`), you can easily enable node authentication in the long-running Flower client (:code:`SuperNode`). -Use the following terminal command to start an authenticated :code:`SuperNode`: +Similar to the long-running Flower server (``SuperLink``), you can easily enable node +authentication in the long-running Flower client (``SuperNode``). Use the following +terminal command to start an authenticated ``SuperNode``: .. code-block:: bash - - flower-client-app client:app - --root-certificates certificates/ca.crt - --server 127.0.0.1:9092 - --auth-supernode-private-key keys/client_credentials - --auth-supernode-public-key keys/client_credentials.pub -The :code:`--auth-supernode-private-key` flag expects a path to the node's private key file and the :code:`--auth-supernode-public-key` flag expects a path to the node's public key file. For development purposes, you can generate a private and public key pair using :code:`ssh-keygen -t ecdsa -b 384`. + flower-supernode + --root-certificates certificates/ca.crt + --superlink 127.0.0.1:9092 + --auth-supernode-private-key keys/client_credentials + --auth-supernode-public-key keys/client_credentials.pub +The ``--auth-supernode-private-key`` flag expects a path to the node's private key file +and the ``--auth-supernode-public-key`` flag expects a path to the node's public key +file. For development purposes, you can generate a private and public key pair using +``ssh-keygen -t ecdsa -b 384``. Security notice --------------- -The system's security relies on the credentials of the SuperLink and each SuperNode. Therefore, it is imperative to safeguard and safely store the credentials to avoid security risks such as Public Key Infrastructure (PKI) impersonation attacks. -The node authentication mechanism also involves human interaction, so please ensure that all of the communication is done in a secure manner, using trusted communication methods. - +The system's security relies on the credentials of the SuperLink and each SuperNode. +Therefore, it is imperative to safeguard and safely store the credentials to avoid +security risks such as Public Key Infrastructure (PKI) impersonation attacks. The node +authentication mechanism also involves human interaction, so please ensure that all of +the communication is done in a secure manner, using trusted communication methods. Conclusion ---------- -You should now have learned how to start a long-running Flower server (:code:`SuperLink`) and client (:code:`SuperNode`) with node authentication enabled. You should also know the significance of the private key and store it safely to minimize security risks. +You should now have learned how to start a long-running Flower server (``SuperLink``) +and client (``SuperNode``) with node authentication enabled. You should also know the +significance of the private key and store it safely to minimize security risks. diff --git a/doc/source/how-to-configure-clients.rst b/doc/source/how-to-configure-clients.rst index ff0a2f4033df..c950ab3be9e7 100644 --- a/doc/source/how-to-configure-clients.rst +++ b/doc/source/how-to-configure-clients.rst @@ -1,37 +1,55 @@ Configure clients ================= -Along with model parameters, Flower can send configuration values to clients. Configuration values can be used for various purposes. They are, for example, a popular way to control client-side hyperparameters from the server. +Along with model parameters, Flower can send configuration values to clients. +Configuration values can be used for various purposes. They are, for example, a popular +way to control client-side hyperparameters from the server. Configuration values -------------------- -Configuration values are represented as a dictionary with ``str`` keys and values of type ``bool``, ``bytes``, ``double`` (64-bit precision float), ``int``, or ``str`` (or equivalent types in different languages). Here is an example of a configuration dictionary in Python: +Configuration values are represented as a dictionary with ``str`` keys and values of +type ``bool``, ``bytes``, ``double`` (64-bit precision float), ``int``, or ``str`` (or +equivalent types in different languages). Here is an example of a configuration +dictionary in Python: .. code-block:: python config_dict = { - "dropout": True, # str key, bool value + "dropout": True, # str key, bool value "learning_rate": 0.01, # str key, float value - "batch_size": 32, # str key, int value - "optimizer": "sgd", # str key, str value + "batch_size": 32, # str key, int value + "optimizer": "sgd", # str key, str value } -Flower serializes these configuration dictionaries (or *config dict* for short) to their ProtoBuf representation, transports them to the client using gRPC, and then deserializes them back to Python dictionaries. +Flower serializes these configuration dictionaries (or *config dict* for short) to their +ProtoBuf representation, transports them to the client using gRPC, and then deserializes +them back to Python dictionaries. .. note:: - Currently, there is no support for directly sending collection types (e.g., ``Set``, ``List``, ``Map``) as values in configuration dictionaries. There are several workarounds to send collections as values by converting them to one of the supported value types (and converting them back on the client-side). - - One can, for example, convert a list of floating-point numbers to a JSON string, then send the JSON string using the configuration dictionary, and then convert the JSON string back to a list of floating-point numbers on the client. + Currently, there is no support for directly sending collection types (e.g., ``Set``, + ``List``, ``Map``) as values in configuration dictionaries. There are several + workarounds to send collections as values by converting them to one of the supported + value types (and converting them back on the client-side). + One can, for example, convert a list of floating-point numbers to a JSON string, + then send the JSON string using the configuration dictionary, and then convert the + JSON string back to a list of floating-point numbers on the client. Configuration through built-in strategies ----------------------------------------- -The easiest way to send configuration values to clients is to use a built-in strategy like :code:`FedAvg`. Built-in strategies support so-called configuration functions. A configuration function is a function that the built-in strategy calls to get the configuration dictionary for the current round. It then forwards the configuration dictionary to all the clients selected during that round. +The easiest way to send configuration values to clients is to use a built-in strategy +like ``FedAvg``. Built-in strategies support so-called configuration functions. A +configuration function is a function that the built-in strategy calls to get the +configuration dictionary for the current round. It then forwards the configuration +dictionary to all the clients selected during that round. -Let's start with a simple example. Imagine we want to send (a) the batch size that the client should use, (b) the current global round of federated learning, and (c) the number of epochs to train on the client-side. Our configuration function could look like this: +Let's start with a simple example. Imagine we want to send (a) the batch size that the +client should use, (b) the current global round of federated learning, and (c) the +number of epochs to train on the client-side. Our configuration function could look like +this: .. code-block:: python @@ -44,12 +62,13 @@ Let's start with a simple example. Imagine we want to send (a) the batch size th } return config -To make the built-in strategies use this function, we can pass it to ``FedAvg`` during initialization using the parameter :code:`on_fit_config_fn`: +To make the built-in strategies use this function, we can pass it to ``FedAvg`` during +initialization using the parameter ``on_fit_config_fn``: .. code-block:: python strategy = FedAvg( - ..., # Other FedAvg parameters + ..., # Other FedAvg parameters on_fit_config_fn=fit_config, # The fit_config function we defined earlier ) @@ -64,9 +83,15 @@ One the client side, we receive the configuration dictionary in ``fit``: print(config["local_epochs"]) # Prints `2` # ... (rest of `fit` method) -There is also an `on_evaluate_config_fn` to configure evaluation, which works the same way. They are separate functions because one might want to send different configuration values to `evaluate` (for example, to use a different batch size). +There is also an `on_evaluate_config_fn` to configure evaluation, which works the same +way. They are separate functions because one might want to send different configuration +values to `evaluate` (for example, to use a different batch size). -The built-in strategies call this function every round (that is, every time `Strategy.configure_fit` or `Strategy.configure_evaluate` runs). Calling `on_evaluate_config_fn` every round allows us to vary/change the config dict over consecutive rounds. If we wanted to implement a hyperparameter schedule, for example, to increase the number of local epochs during later rounds, we could do the following: +The built-in strategies call this function every round (that is, every time +`Strategy.configure_fit` or `Strategy.configure_evaluate` runs). Calling +`on_evaluate_config_fn` every round allows us to vary/change the config dict over +consecutive rounds. If we wanted to implement a hyperparameter schedule, for example, to +increase the number of local epochs during later rounds, we could do the following: .. code-block:: python @@ -79,14 +104,19 @@ The built-in strategies call this function every round (that is, every time `Str } return config -The :code:`FedAvg` strategy will call this function *every round*. +The ``FedAvg`` strategy will call this function *every round*. Configuring individual clients ------------------------------ -In some cases, it is necessary to send different configuration values to different clients. +In some cases, it is necessary to send different configuration values to different +clients. -This can be achieved by customizing an existing strategy or by :doc:`implementing a custom strategy from scratch `. Here's a nonsensical example that customizes :code:`FedAvg` by adding a custom ``"hello": "world"`` configuration key/value pair to the config dict of a *single client* (only the first client in the list, the other clients in this round to not receive this "special" config value): +This can be achieved by customizing an existing strategy or by :doc:`implementing a +custom strategy from scratch `. Here's a nonsensical +example that customizes ``FedAvg`` by adding a custom ``"hello": "world"`` configuration +key/value pair to the config dict of a *single client* (only the first client in the +list, the other clients in this round to not receive this "special" config value): .. code-block:: python @@ -94,7 +124,9 @@ This can be achieved by customizing an existing strategy or by :doc:`implementin def configure_fit( self, server_round: int, parameters: Parameters, client_manager: ClientManager ) -> List[Tuple[ClientProxy, FitIns]]: - client_instructions = super().configure_fit(server_round, parameters, client_manager) + client_instructions = super().configure_fit( + server_round, parameters, client_manager + ) # Add special "hello": "world" config key/value pair, # but only to the first client in the list @@ -103,6 +135,7 @@ This can be achieved by customizing an existing strategy or by :doc:`implementin return client_instructions + # Create strategy and run server strategy = CustomClientConfigStrategy( # ... (same arguments as plain FedAvg here) diff --git a/doc/source/how-to-configure-logging.rst b/doc/source/how-to-configure-logging.rst index d5559429a73c..bb7461390b42 100644 --- a/doc/source/how-to-configure-logging.rst +++ b/doc/source/how-to-configure-logging.rst @@ -1,17 +1,19 @@ Configure logging ================= -The Flower logger keeps track of all core events that take place in federated learning workloads. -It presents information by default following a standard message format: +The Flower logger keeps track of all core events that take place in federated learning +workloads. It presents information by default following a standard message format: .. code-block:: python DEFAULT_FORMATTER = logging.Formatter( - "%(levelname)s %(name)s %(asctime)s | %(filename)s:%(lineno)d | %(message)s" + "%(levelname)s %(name)s %(asctime)s | %(filename)s:%(lineno)d | %(message)s" ) -containing relevant information including: log message level (e.g. :code:`INFO`, :code:`DEBUG`), a timestamp, the line where the logging took place from, as well as the log message itself. -In this way, the logger would typically display information on your terminal as follows: +containing relevant information including: log message level (e.g. ``INFO``, ``DEBUG``), +a timestamp, the line where the logging took place from, as well as the log message +itself. In this way, the logger would typically display information on your terminal as +follows: .. code-block:: bash @@ -29,29 +31,35 @@ In this way, the logger would typically display information on your terminal as INFO flwr 2023-07-15 15:32:36,118 | server.py:125 | fit progress: (5, 358.6936808824539, {'accuracy': 0.3467}, 18.964264554999318) ... - Saving log to file -------------------- +------------------ -By default, the Flower log is outputted to the terminal where you launch your Federated Learning workload from. This applies for both gRPC-based federation (i.e. when you do :code:`fl.server.start_server`) and when using the :code:`VirtualClientEngine` (i.e. when you do :code:`fl.simulation.start_simulation`). -In some situations you might want to save this log to disk. You can do so by calling the `fl.common.logger.configure() `_ function. For example: +By default, the Flower log is outputted to the terminal where you launch your Federated +Learning workload from. This applies for both gRPC-based federation (i.e. when you do +``fl.server.start_server``) and when using the ``VirtualClientEngine`` (i.e. when you do +``fl.simulation.start_simulation``). In some situations you might want to save this log +to disk. You can do so by calling the `fl.common.logger.configure() +`_ function. For +example: .. code-block:: python - - import flwr as fl - - ... - # in your main file and before launching your experiment - # add an identifier to your logger - # then specify the name of the file where the log should be outputted to - fl.common.logger.configure(identifier="myFlowerExperiment", filename="log.txt") + import flwr as fl + + ... - # then start your workload - fl.simulation.start_simulation(...) # or fl.server.start_server(...) + # in your main file and before launching your experiment + # add an identifier to your logger + # then specify the name of the file where the log should be outputted to + fl.common.logger.configure(identifier="myFlowerExperiment", filename="log.txt") -With the above, Flower will record the log you see on your terminal to :code:`log.txt`. This file will be created in the same directory as were you are running the code from. -If we inspect we see the log above is also recorded but prefixing with :code:`identifier` each line: + # then start your workload + fl.simulation.start_simulation(...) # or fl.server.start_server(...) + +With the above, Flower will record the log you see on your terminal to ``log.txt``. This +file will be created in the same directory as were you are running the code from. If we +inspect we see the log above is also recorded but prefixing with ``identifier`` each +line: .. code-block:: bash @@ -69,12 +77,11 @@ If we inspect we see the log above is also recorded but prefixing with :code:`id myFlowerExperiment | INFO flwr 2023-07-15 15:32:36,118 | server.py:125 | fit progress: (5, 358.6936808824539, {'accuracy': 0.3467}, 18.964264554999318) ... - Log your own messages --------------------- -You might expand the information shown by default with the Flower logger by adding more messages relevant to your application. -You can achieve this easily as follows. +You might expand the information shown by default with the Flower logger by adding more +messages relevant to your application. You can achieve this easily as follows. .. code-block:: python @@ -84,25 +91,31 @@ You can achieve this easily as follows. # For example, let's say you want to add to the log some info about the training on your client for debugging purposes + class FlowerClient(fl.client.NumPyClient): - def __init__(self, cid: int ...): + def __init__( + self, + cid: int, + # ... + ): self.cid = cid - self.net = ... - ... + self.net = net + # ... def fit(self, parameters, config): log(INFO, f"Printing a custom INFO message at the start of fit() :)") - + set_params(self.net, parameters) log(DEBUG, f"Client {self.cid} is doing fit() with config: {config}") - ... + # ... -In this way your logger will show, in addition to the default messages, the ones introduced by the clients as specified above. +In this way your logger will show, in addition to the default messages, the ones +introduced by the clients as specified above. .. code-block:: bash - + ... INFO flwr 2023-07-15 16:18:21,726 | server.py:89 | Initializing global parameters INFO flwr 2023-07-15 16:18:21,726 | server.py:276 | Requesting initial parameters from one random client @@ -123,10 +136,13 @@ In this way your logger will show, in addition to the default messages, the ones DEBUG flwr 2023-07-15 16:18:28,617 | main.py:63 | Client 13 is doing fit() with config: {'epochs': 5, 'batch_size': 64} ... - Log to a remote service ----------------------- -The :code:`fl.common.logger.configure` function, also allows specifying a host to which logs can be pushed (via :code:`POST`) through a native Python :code:`logging.handler.HTTPHandler`. -This is a particularly useful feature in :code:`gRPC`-based Federated Learning workloads where otherwise gathering logs from all entities (i.e. the server and the clients) might be cumbersome. -Note that in Flower simulation, the server automatically displays all logs. You can still specify a :code:`HTTPHandler` should you wish to backup or analyze the logs somewhere else. +The ``fl.common.logger.configure`` function, also allows specifying a host to which logs +can be pushed (via ``POST``) through a native Python ``logging.handler.HTTPHandler``. +This is a particularly useful feature in ``gRPC``-based Federated Learning workloads +where otherwise gathering logs from all entities (i.e. the server and the clients) might +be cumbersome. Note that in Flower simulation, the server automatically displays all +logs. You can still specify a ``HTTPHandler`` should you wish to backup or analyze the +logs somewhere else. diff --git a/doc/source/how-to-enable-ssl-connections.rst b/doc/source/how-to-enable-ssl-connections.rst index 870f4b0f64c9..cd8590bc3436 100644 --- a/doc/source/how-to-enable-ssl-connections.rst +++ b/doc/source/how-to-enable-ssl-connections.rst @@ -1,80 +1,84 @@ Enable SSL connections ====================== -This guide describes how to a SSL-enabled secure Flower server (:code:`SuperLink`) can be started and -how a Flower client (:code:`SuperNode`) can establish a secure connections to it. +This guide describes how to a SSL-enabled secure Flower server (``SuperLink``) can be +started and how a Flower client (``SuperNode``) can establish a secure connections to +it. -A complete code example demonstrating a secure connection can be found -`here `_. - -The code example comes with a :code:`README.md` file which explains how to start it. Although it is -already SSL-enabled, it might be less descriptive on how it does so. Stick to this guide for a deeper -introduction to the topic. +A complete code example demonstrating a secure connection can be found `here +`_. +The code example comes with a ``README.md`` file which explains how to start it. +Although it is already SSL-enabled, it might be less descriptive on how it does so. +Stick to this guide for a deeper introduction to the topic. Certificates ------------ -Using SSL-enabled connections requires certificates to be passed to the server and client. For -the purpose of this guide we are going to generate self-signed certificates. As this can become -quite complex we are going to ask you to run the script in -:code:`examples/advanced-tensorflow/certificates/generate.sh` -with the following command sequence: +Using SSL-enabled connections requires certificates to be passed to the server and +client. For the purpose of this guide we are going to generate self-signed certificates. +As this can become quite complex we are going to ask you to run the script in +``examples/advanced-tensorflow/certificates/generate.sh`` with the following command +sequence: .. code-block:: bash - cd examples/advanced-tensorflow/certificates - ./generate.sh - -This will generate the certificates in :code:`examples/advanced-tensorflow/.cache/certificates`. + cd examples/advanced-tensorflow/certificates + ./generate.sh -The approach for generating SSL certificates in the context of this example can serve as an inspiration and -starting point, but it should not be used as a reference for production environments. Please refer to other -sources regarding the issue of correctly generating certificates for production environments. -For non-critical prototyping or research projects, it might be sufficient to use the self-signed certificates generated using -the scripts mentioned in this guide. +This will generate the certificates in +``examples/advanced-tensorflow/.cache/certificates``. +The approach for generating SSL certificates in the context of this example can serve as +an inspiration and starting point, but it should not be used as a reference for +production environments. Please refer to other sources regarding the issue of correctly +generating certificates for production environments. For non-critical prototyping or +research projects, it might be sufficient to use the self-signed certificates generated +using the scripts mentioned in this guide. Server (SuperLink) ------------------ -Use the following terminal command to start a sever (SuperLink) that uses the previously generated certificates: +Use the following terminal command to start a sever (SuperLink) that uses the previously +generated certificates: .. code-block:: bash - flower-superlink - --ssl-ca-certfile certificates/ca.crt - --ssl-certfile certificates/server.pem + flower-superlink + --ssl-ca-certfile certificates/ca.crt + --ssl-certfile certificates/server.pem --ssl-keyfile certificates/server.key -When providing certificates, the server expects a tuple of three certificates paths: CA certificate, server certificate and server private key. - +When providing certificates, the server expects a tuple of three certificates paths: CA +certificate, server certificate and server private key. Client (SuperNode) ------------------ -Use the following terminal command to start a client (SuperNode) that uses the previously generated certificates: +Use the following terminal command to start a client (SuperNode) that uses the +previously generated certificates: .. code-block:: bash - flower-client-app client:app - --root-certificates certificates/ca.crt - --server 127.0.0.1:9092 - -When setting :code:`root_certificates`, the client expects a file path to PEM-encoded root certificates. + flower-supernode + --root-certificates certificates/ca.crt + --superlink 127.0.0.1:9092 +When setting ``root_certificates``, the client expects a file path to PEM-encoded root +certificates. Conclusion ---------- -You should now have learned how to generate self-signed certificates using the given script, start an -SSL-enabled server and have a client establish a secure connection to it. - +You should now have learned how to generate self-signed certificates using the given +script, start an SSL-enabled server and have a client establish a secure connection to +it. Additional resources -------------------- -These additional sources might be relevant if you would like to dive deeper into the topic of certificates: +These additional sources might be relevant if you would like to dive deeper into the +topic of certificates: -* `Let's Encrypt `_ -* `certbot `_ +- `Let's Encrypt `_ +- `certbot `_ diff --git a/doc/source/how-to-implement-strategies.rst b/doc/source/how-to-implement-strategies.rst index 01bbb3042973..075d8a0116c4 100644 --- a/doc/source/how-to-implement-strategies.rst +++ b/doc/source/how-to-implement-strategies.rst @@ -1,22 +1,21 @@ Implement strategies ==================== -The strategy abstraction enables implementation of fully custom strategies. A -strategy is basically the federated learning algorithm that runs on the server. -Strategies decide how to sample clients, how to configure clients for training, -how to aggregate updates, and how to evaluate models. Flower provides a few -built-in strategies which are based on the same API described below. +The strategy abstraction enables implementation of fully custom strategies. A strategy +is basically the federated learning algorithm that runs on the server. Strategies decide +how to sample clients, how to configure clients for training, how to aggregate updates, +and how to evaluate models. Flower provides a few built-in strategies which are based on +the same API described below. -The :code:`Strategy` abstraction --------------------------------- +The ``Strategy`` abstraction +---------------------------- All strategy implementation are derived from the abstract base class -:code:`flwr.server.strategy.Strategy`, both built-in implementations and third -party implementations. This means that custom strategy implementations have the -exact same capabilities at their disposal as built-in ones. +``flwr.server.strategy.Strategy``, both built-in implementations and third party +implementations. This means that custom strategy implementations have the exact same +capabilities at their disposal as built-in ones. -The strategy abstraction defines a few abstract methods that need to be -implemented: +The strategy abstraction defines a few abstract methods that need to be implemented: .. code-block:: python @@ -31,10 +30,7 @@ implemented: @abstractmethod def configure_fit( - self, - server_round: int, - parameters: Parameters, - client_manager: ClientManager + self, server_round: int, parameters: Parameters, client_manager: ClientManager ) -> List[Tuple[ClientProxy, FitIns]]: """Configure the next round of training.""" @@ -49,10 +45,7 @@ implemented: @abstractmethod def configure_evaluate( - self, - server_round: int, - parameters: Parameters, - client_manager: ClientManager + self, server_round: int, parameters: Parameters, client_manager: ClientManager ) -> List[Tuple[ClientProxy, EvaluateIns]]: """Configure the next round of evaluation.""" @@ -71,31 +64,35 @@ implemented: ) -> Optional[Tuple[float, Dict[str, Scalar]]]: """Evaluate the current model parameters.""" - -Creating a new strategy means implementing a new :code:`class` (derived from the -abstract base class :code:`Strategy`) that implements for the previously shown -abstract methods: +Creating a new strategy means implementing a new ``class`` (derived from the abstract +base class ``Strategy``) that implements for the previously shown abstract methods: .. code-block:: python class SotaStrategy(Strategy): def initialize_parameters(self, client_manager): # Your implementation here + pass def configure_fit(self, server_round, parameters, client_manager): # Your implementation here + pass def aggregate_fit(self, server_round, results, failures): # Your implementation here + pass def configure_evaluate(self, server_round, parameters, client_manager): # Your implementation here + pass def aggregate_evaluate(self, server_round, results, failures): # Your implementation here + pass def evaluate(self, parameters): # Your implementation here + pass The Flower server calls these methods in the following order: @@ -176,12 +173,15 @@ The Flower server calls these methods in the following order: The following sections describe each of those methods in more detail. -The :code:`initialize_parameters` method ----------------------------------------- +The ``initialize_parameters`` method +------------------------------------ -:code:`initialize_parameters` is called only once, at the very beginning of an execution. It is responsible for providing the initial global model parameters in a serialized form (i.e., as a :code:`Parameters` object). +``initialize_parameters`` is called only once, at the very beginning of an execution. It +is responsible for providing the initial global model parameters in a serialized form +(i.e., as a ``Parameters`` object). -Built-in strategies return user-provided initial parameters. The following example shows how initial parameters can be passed to :code:`FedAvg`: +Built-in strategies return user-provided initial parameters. The following example shows +how initial parameters can be passed to ``FedAvg``: .. code-block:: python @@ -200,49 +200,68 @@ Built-in strategies return user-provided initial parameters. The following examp # Serialize ndarrays to `Parameters` parameters = fl.common.ndarrays_to_parameters(weights) - # Use the serialized parameters as the initial global parameters + # Use the serialized parameters as the initial global parameters strategy = fl.server.strategy.FedAvg( initial_parameters=parameters, ) fl.server.start_server(config=fl.server.ServerConfig(num_rounds=3), strategy=strategy) -The Flower server will call :code:`initialize_parameters`, which either returns the parameters that were passed to :code:`initial_parameters`, or :code:`None`. If no parameters are returned from :code:`initialize_parameters` (i.e., :code:`None`), the server will randomly select one client and ask it to provide its parameters. This is a convenience feature and not recommended in practice, but it can be useful for prototyping. In practice, it is recommended to always use server-side parameter initialization. +The Flower server will call ``initialize_parameters``, which either returns the +parameters that were passed to ``initial_parameters``, or ``None``. If no parameters are +returned from ``initialize_parameters`` (i.e., ``None``), the server will randomly +select one client and ask it to provide its parameters. This is a convenience feature +and not recommended in practice, but it can be useful for prototyping. In practice, it +is recommended to always use server-side parameter initialization. .. note:: - Server-side parameter initialization is a powerful mechanism. It can be used, for example, to resume training from a previously saved checkpoint. It is also the fundamental capability needed to implement hybrid approaches, for example, to fine-tune a pre-trained model using federated learning. + Server-side parameter initialization is a powerful mechanism. It can be used, for + example, to resume training from a previously saved checkpoint. It is also the + fundamental capability needed to implement hybrid approaches, for example, to + fine-tune a pre-trained model using federated learning. -The :code:`configure_fit` method --------------------------------- +The ``configure_fit`` method +---------------------------- -:code:`configure_fit` is responsible for configuring the upcoming round of training. What does *configure* mean in this context? Configuring a round means selecting clients and deciding what instructions to send to these clients. The signature of :code:`configure_fit` makes this clear: +``configure_fit`` is responsible for configuring the upcoming round of training. What +does *configure* mean in this context? Configuring a round means selecting clients and +deciding what instructions to send to these clients. The signature of ``configure_fit`` +makes this clear: .. code-block:: python @abstractmethod def configure_fit( - self, - server_round: int, - parameters: Parameters, - client_manager: ClientManager + self, server_round: int, parameters: Parameters, client_manager: ClientManager ) -> List[Tuple[ClientProxy, FitIns]]: """Configure the next round of training.""" -The return value is a list of tuples, each representing the instructions that will be sent to a particular client. Strategy implementations usually perform the following steps in :code:`configure_fit`: +The return value is a list of tuples, each representing the instructions that will be +sent to a particular client. Strategy implementations usually perform the following +steps in ``configure_fit``: -* Use the :code:`client_manager` to randomly sample all (or a subset of) available clients (each represented as a :code:`ClientProxy` object) -* Pair each :code:`ClientProxy` with the same :code:`FitIns` holding the current global model :code:`parameters` and :code:`config` dict +- Use the ``client_manager`` to randomly sample all (or a subset of) available clients + (each represented as a ``ClientProxy`` object) +- Pair each ``ClientProxy`` with the same ``FitIns`` holding the current global model + ``parameters`` and ``config`` dict -More sophisticated implementations can use :code:`configure_fit` to implement custom client selection logic. A client will only participate in a round if the corresponding :code:`ClientProxy` is included in the list returned from :code:`configure_fit`. +More sophisticated implementations can use ``configure_fit`` to implement custom client +selection logic. A client will only participate in a round if the corresponding +``ClientProxy`` is included in the list returned from ``configure_fit``. .. note:: - The structure of this return value provides a lot of flexibility to the user. Since instructions are defined on a per-client basis, different instructions can be sent to each client. This enables custom strategies to train, for example, different models on different clients, or use different hyperparameters on different clients (via the :code:`config` dict). + The structure of this return value provides a lot of flexibility to the user. Since + instructions are defined on a per-client basis, different instructions can be sent + to each client. This enables custom strategies to train, for example, different + models on different clients, or use different hyperparameters on different clients + (via the ``config`` dict). -The :code:`aggregate_fit` method --------------------------------- +The ``aggregate_fit`` method +---------------------------- -:code:`aggregate_fit` is responsible for aggregating the results returned by the clients that were selected and asked to train in :code:`configure_fit`. +``aggregate_fit`` is responsible for aggregating the results returned by the clients +that were selected and asked to train in ``configure_fit``. .. code-block:: python @@ -255,42 +274,58 @@ The :code:`aggregate_fit` method ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: """Aggregate training results.""" -Of course, failures can happen, so there is no guarantee that the server will get results from all the clients it sent instructions to (via :code:`configure_fit`). :code:`aggregate_fit` therefore receives a list of :code:`results`, but also a list of :code:`failures`. +Of course, failures can happen, so there is no guarantee that the server will get +results from all the clients it sent instructions to (via ``configure_fit``). +``aggregate_fit`` therefore receives a list of ``results``, but also a list of +``failures``. -:code:`aggregate_fit` returns an optional :code:`Parameters` object and a dictionary of aggregated metrics. The :code:`Parameters` return value is optional because :code:`aggregate_fit` might decide that the results provided are not sufficient for aggregation (e.g., too many failures). +``aggregate_fit`` returns an optional ``Parameters`` object and a dictionary of +aggregated metrics. The ``Parameters`` return value is optional because +``aggregate_fit`` might decide that the results provided are not sufficient for +aggregation (e.g., too many failures). -The :code:`configure_evaluate` method -------------------------------------- +The ``configure_evaluate`` method +--------------------------------- -:code:`configure_evaluate` is responsible for configuring the upcoming round of evaluation. What does *configure* mean in this context? Configuring a round means selecting clients and deciding what instructions to send to these clients. The signature of :code:`configure_evaluate` makes this clear: +``configure_evaluate`` is responsible for configuring the upcoming round of evaluation. +What does *configure* mean in this context? Configuring a round means selecting clients +and deciding what instructions to send to these clients. The signature of +``configure_evaluate`` makes this clear: .. code-block:: python @abstractmethod def configure_evaluate( - self, - server_round: int, - parameters: Parameters, - client_manager: ClientManager + self, server_round: int, parameters: Parameters, client_manager: ClientManager ) -> List[Tuple[ClientProxy, EvaluateIns]]: """Configure the next round of evaluation.""" -The return value is a list of tuples, each representing the instructions that will be sent to a particular client. Strategy implementations usually perform the following steps in :code:`configure_evaluate`: +The return value is a list of tuples, each representing the instructions that will be +sent to a particular client. Strategy implementations usually perform the following +steps in ``configure_evaluate``: -* Use the :code:`client_manager` to randomly sample all (or a subset of) available clients (each represented as a :code:`ClientProxy` object) -* Pair each :code:`ClientProxy` with the same :code:`EvaluateIns` holding the current global model :code:`parameters` and :code:`config` dict +- Use the ``client_manager`` to randomly sample all (or a subset of) available clients + (each represented as a ``ClientProxy`` object) +- Pair each ``ClientProxy`` with the same ``EvaluateIns`` holding the current global + model ``parameters`` and ``config`` dict -More sophisticated implementations can use :code:`configure_evaluate` to implement custom client selection logic. A client will only participate in a round if the corresponding :code:`ClientProxy` is included in the list returned from :code:`configure_evaluate`. +More sophisticated implementations can use ``configure_evaluate`` to implement custom +client selection logic. A client will only participate in a round if the corresponding +``ClientProxy`` is included in the list returned from ``configure_evaluate``. .. note:: - The structure of this return value provides a lot of flexibility to the user. Since instructions are defined on a per-client basis, different instructions can be sent to each client. This enables custom strategies to evaluate, for example, different models on different clients, or use different hyperparameters on different clients (via the :code:`config` dict). - + The structure of this return value provides a lot of flexibility to the user. Since + instructions are defined on a per-client basis, different instructions can be sent + to each client. This enables custom strategies to evaluate, for example, different + models on different clients, or use different hyperparameters on different clients + (via the ``config`` dict). -The :code:`aggregate_evaluate` method -------------------------------------- +The ``aggregate_evaluate`` method +--------------------------------- -:code:`aggregate_evaluate` is responsible for aggregating the results returned by the clients that were selected and asked to evaluate in :code:`configure_evaluate`. +``aggregate_evaluate`` is responsible for aggregating the results returned by the +clients that were selected and asked to evaluate in ``configure_evaluate``. .. code-block:: python @@ -303,21 +338,29 @@ The :code:`aggregate_evaluate` method ) -> Tuple[Optional[float], Dict[str, Scalar]]: """Aggregate evaluation results.""" -Of course, failures can happen, so there is no guarantee that the server will get results from all the clients it sent instructions to (via :code:`configure_evaluate`). :code:`aggregate_evaluate` therefore receives a list of :code:`results`, but also a list of :code:`failures`. +Of course, failures can happen, so there is no guarantee that the server will get +results from all the clients it sent instructions to (via ``configure_evaluate``). +``aggregate_evaluate`` therefore receives a list of ``results``, but also a list of +``failures``. -:code:`aggregate_evaluate` returns an optional :code:`float` (loss) and a dictionary of aggregated metrics. The :code:`float` return value is optional because :code:`aggregate_evaluate` might decide that the results provided are not sufficient for aggregation (e.g., too many failures). +``aggregate_evaluate`` returns an optional ``float`` (loss) and a dictionary of +aggregated metrics. The ``float`` return value is optional because +``aggregate_evaluate`` might decide that the results provided are not sufficient for +aggregation (e.g., too many failures). -The :code:`evaluate` method ---------------------------- +The ``evaluate`` method +----------------------- -:code:`evaluate` is responsible for evaluating model parameters on the server-side. Having :code:`evaluate` in addition to :code:`configure_evaluate`/:code:`aggregate_evaluate` enables strategies to perform both servers-side and client-side (federated) evaluation. +``evaluate`` is responsible for evaluating model parameters on the server-side. Having +``evaluate`` in addition to ``configure_evaluate``/``aggregate_evaluate`` enables +strategies to perform both servers-side and client-side (federated) evaluation. .. code-block:: python @abstractmethod - def evaluate( - self, parameters: Parameters - ) -> Optional[Tuple[float, Dict[str, Scalar]]]: + def evaluate(self, parameters: Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]: """Evaluate the current model parameters.""" -The return value is again optional because the strategy might not need to implement server-side evaluation or because the user-defined :code:`evaluate` method might not complete successfully (e.g., it might fail to load the server-side evaluation data). +The return value is again optional because the strategy might not need to implement +server-side evaluation or because the user-defined ``evaluate`` method might not +complete successfully (e.g., it might fail to load the server-side evaluation data). diff --git a/doc/source/how-to-install-flower.rst b/doc/source/how-to-install-flower.rst index 964b23125c0b..89cdf8b836cf 100644 --- a/doc/source/how-to-install-flower.rst +++ b/doc/source/how-to-install-flower.rst @@ -1,12 +1,11 @@ Install Flower ============== - Python version -------------- -Flower requires at least `Python 3.8 `_, but `Python 3.10 `_ or above is recommended. - +Flower requires at least `Python 3.9 `_, but `Python 3.10 +`_ or above is recommended. Install stable release ---------------------- @@ -14,42 +13,56 @@ Install stable release Using pip ~~~~~~~~~ -Stable releases are available on `PyPI `_:: +Stable releases are available on `PyPI `_: + +:: - python -m pip install flwr + python -m pip install flwr -For simulations that use the Virtual Client Engine, ``flwr`` should be installed with the ``simulation`` extra:: +For simulations that use the Virtual Client Engine, ``flwr`` should be installed with +the ``simulation`` extra: - python -m pip install flwr[simulation] +:: + python -m pip install "flwr[simulation]" Using conda (or mamba) ~~~~~~~~~~~~~~~~~~~~~~ Flower can also be installed from the ``conda-forge`` channel. -If you have not added ``conda-forge`` to your channels, you will first need to run the following:: +If you have not added ``conda-forge`` to your channels, you will first need to run the +following: + +:: + + conda config --add channels conda-forge + conda config --set channel_priority strict - conda config --add channels conda-forge - conda config --set channel_priority strict +Once the ``conda-forge`` channel has been enabled, ``flwr`` can be installed with +``conda``: -Once the ``conda-forge`` channel has been enabled, ``flwr`` can be installed with ``conda``:: +:: - conda install flwr + conda install flwr -or with ``mamba``:: +or with ``mamba``: - mamba install flwr +:: + mamba install flwr Verify installation ------------------- -The following command can be used to verify if Flower was successfully installed. If everything worked, it should print the version of Flower to the command line:: +The following command can be used to verify if Flower was successfully installed. If +everything worked, it should print the version of Flower to the command line: - python -c "import flwr;print(flwr.__version__)" - 1.8.0 +.. code-block:: bash + :substitutions: + python -c "import flwr;print(flwr.__version__)" + |stable_flwr_version| Advanced installation options ----------------------------- @@ -57,26 +70,37 @@ Advanced installation options Install via Docker ~~~~~~~~~~~~~~~~~~ -:doc:`How to run Flower using Docker ` +:doc:`Run Flower using Docker ` Install pre-release ~~~~~~~~~~~~~~~~~~~ -New (possibly unstable) versions of Flower are sometimes available as pre-release versions (alpha, beta, release candidate) before the stable release happens:: +New (possibly unstable) versions of Flower are sometimes available as pre-release +versions (alpha, beta, release candidate) before the stable release happens: - python -m pip install -U --pre flwr +:: -For simulations that use the Virtual Client Engine, ``flwr`` pre-releases should be installed with the ``simulation`` extra:: + python -m pip install -U --pre flwr - python -m pip install -U --pre flwr[simulation] +For simulations that use the Virtual Client Engine, ``flwr`` pre-releases should be +installed with the ``simulation`` extra: + +:: + + python -m pip install -U --pre 'flwr[simulation]' Install nightly release ~~~~~~~~~~~~~~~~~~~~~~~ -The latest (potentially unstable) changes in Flower are available as nightly releases:: +The latest (potentially unstable) changes in Flower are available as nightly releases: + +:: + + python -m pip install -U flwr-nightly - python -m pip install -U flwr-nightly +For simulations that use the Virtual Client Engine, ``flwr-nightly`` should be installed +with the ``simulation`` extra: -For simulations that use the Virtual Client Engine, ``flwr-nightly`` should be installed with the ``simulation`` extra:: +:: - python -m pip install -U flwr-nightly[simulation] + python -m pip install -U flwr-nightly[simulation] diff --git a/doc/source/how-to-monitor-simulation.rst b/doc/source/how-to-monitor-simulation.rst index f6c26a701d94..f540e22a6a77 100644 --- a/doc/source/how-to-monitor-simulation.rst +++ b/doc/source/how-to-monitor-simulation.rst @@ -1,109 +1,120 @@ Monitor simulation ================== -Flower allows you to monitor system resources while running your simulation. Moreover, the Flower simulation engine is powerful and enables you to decide how to allocate resources per client manner and constrain the total usage. Insights from resource consumption can help you make smarter decisions and speed up the execution time. - -The specific instructions assume you are using macOS and have the `Homebrew `_ package manager installed. +Flower allows you to monitor system resources while running your simulation. Moreover, +the Flower simulation engine is powerful and enables you to decide how to allocate +resources per client manner and constrain the total usage. Insights from resource +consumption can help you make smarter decisions and speed up the execution time. +The specific instructions assume you are using macOS and have the `Homebrew +`_ package manager installed. Downloads --------- .. code-block:: bash - brew install prometheus grafana + brew install prometheus grafana -`Prometheus `_ is used for data collection, while `Grafana `_ will enable you to visualize the collected data. They are both well integrated with `Ray `_ which Flower uses under the hood. +`Prometheus `_ is used for data collection, while `Grafana +`_ will enable you to visualize the collected data. They are both +well integrated with `Ray `_ which Flower uses under the hood. -Overwrite the configuration files (depending on your device, it might be installed on a different path). +Overwrite the configuration files (depending on your device, it might be installed on a +different path). If you are on an M1 Mac, it should be: .. code-block:: bash - /opt/homebrew/etc/prometheus.yml - /opt/homebrew/etc/grafana/grafana.ini + /opt/homebrew/etc/prometheus.yml + /opt/homebrew/etc/grafana/grafana.ini On the previous generation Intel Mac devices, it should be: .. code-block:: bash - /usr/local/etc/prometheus.yml - /usr/local/etc/grafana/grafana.ini + /usr/local/etc/prometheus.yml + /usr/local/etc/grafana/grafana.ini -Open the respective configuration files and change them. Depending on your device, use one of the two following commands: +Open the respective configuration files and change them. Depending on your device, use +one of the two following commands: .. code-block:: bash - # M1 macOS - open /opt/homebrew/etc/prometheus.yml + # M1 macOS + open /opt/homebrew/etc/prometheus.yml - # Intel macOS - open /usr/local/etc/prometheus.yml + # Intel macOS + open /usr/local/etc/prometheus.yml -and then delete all the text in the file and paste a new Prometheus config you see below. You may adjust the time intervals to your requirements: +and then delete all the text in the file and paste a new Prometheus config you see +below. You may adjust the time intervals to your requirements: .. code-block:: bash - global: - scrape_interval: 1s - evaluation_interval: 1s + global: + scrape_interval: 1s + evaluation_interval: 1s - scrape_configs: - # Scrape from each ray node as defined in the service_discovery.json provided by ray. - - job_name: 'ray' - file_sd_configs: - - files: - - '/tmp/ray/prom_metrics_service_discovery.json' + scrape_configs: + # Scrape from each ray node as defined in the service_discovery.json provided by ray. + - job_name: 'ray' + file_sd_configs: + - files: + - '/tmp/ray/prom_metrics_service_discovery.json' -Now after you have edited the Prometheus configuration, do the same with the Grafana configuration files. Open those using one of the following commands as before: +Now after you have edited the Prometheus configuration, do the same with the Grafana +configuration files. Open those using one of the following commands as before: .. code-block:: python - # M1 macOS - open /opt/homebrew/etc/grafana/grafana.ini + # M1 macOS + open / opt / homebrew / etc / grafana / grafana.ini - # Intel macOS - open /usr/local/etc/grafana/grafana.ini + # Intel macOS + open / usr / local / etc / grafana / grafana.ini -Your terminal editor should open and allow you to apply the following configuration as before. +Your terminal editor should open and allow you to apply the following configuration as +before. .. code-block:: bash - [security] - allow_embedding = true - - [auth.anonymous] - enabled = true - org_name = Main Org. - org_role = Viewer + [security] + allow_embedding = true - [paths] - provisioning = /tmp/ray/session_latest/metrics/grafana/provisioning + [auth.anonymous] + enabled = true + org_name = Main Org. + org_role = Viewer -Congratulations, you just downloaded all the necessary software needed for metrics tracking. Now, let’s start it. + [paths] + provisioning = /tmp/ray/session_latest/metrics/grafana/provisioning +Congratulations, you just downloaded all the necessary software needed for metrics +tracking. Now, let’s start it. Tracking metrics ---------------- -Before running your Flower simulation, you have to start the monitoring tools you have just installed and configured. +Before running your Flower simulation, you have to start the monitoring tools you have +just installed and configured. .. code-block:: bash - brew services start prometheus - brew services start grafana + brew services start prometheus + brew services start grafana Please include the following argument in your Python code when starting a simulation. .. code-block:: python - fl.simulation.start_simulation( - # ... - # all the args you used before - # ... - ray_init_args = {"include_dashboard": True} - ) + fl.simulation.start_simulation( + # ... + # all the args you used before + # ... + ray_init_args={"include_dashboard": True} + ) Now, you are ready to start your workload. @@ -111,126 +122,140 @@ Shortly after the simulation starts, you should see the following logs in your t .. code-block:: bash - 2023-01-20 16:22:58,620 INFO [worker.py:1529](http://worker.py:1529/) -- Started a local Ray instance. View the dashboard at http://127.0.0.1:8265 - + 2023-01-20 16:22:58,620 INFO [worker.py:1529](http://worker.py:1529/) -- Started a local Ray instance. View the dashboard at http://127.0.0.1:8265 -You can look at everything at ``_ . +You can look at everything at http://127.0.0.1:8265 . -It's a Ray Dashboard. You can navigate to Metrics (on the left panel, the lowest option). +It's a Ray Dashboard. You can navigate to Metrics (on the left panel, the lowest +option). -Or alternatively, you can just see them in Grafana by clicking on the right-up corner, “View in Grafana”. Please note that the Ray dashboard is only accessible during the simulation. After the simulation ends, you can only use Grafana to explore the metrics. You can start Grafana by going to ``http://localhost:3000/``. +Or alternatively, you can just see them in Grafana by clicking on the right-up corner, +“View in Grafana”. Please note that the Ray dashboard is only accessible during the +simulation. After the simulation ends, you can only use Grafana to explore the metrics. +You can start Grafana by going to ``http://localhost:3000/``. -After you finish the visualization, stop Prometheus and Grafana. This is important as they will otherwise block, for example port :code:`3000` on your machine as long as they are running. +After you finish the visualization, stop Prometheus and Grafana. This is important as +they will otherwise block, for example port ``3000`` on your machine as long as they are +running. .. code-block:: bash - brew services stop prometheus - brew services stop grafana - + brew services stop prometheus + brew services stop grafana Resource allocation ------------------- -You must understand how the Ray library works to efficiently allocate system resources to simulation clients on your own. +You must understand how the Ray library works to efficiently allocate system resources +to simulation clients on your own. -Initially, the simulation (which Ray handles under the hood) starts by default with all the available resources on the system, which it shares among the clients. It doesn't mean it divides it equally among all of them, nor that the model training happens at all of them simultaneously. You will learn more about that in the later part of this blog. You can check the system resources by running the following: +Initially, the simulation (which Ray handles under the hood) starts by default with all +the available resources on the system, which it shares among the clients. It doesn't +mean it divides it equally among all of them, nor that the model training happens at all +of them simultaneously. You will learn more about that in the later part of this blog. +You can check the system resources by running the following: .. code-block:: python - import ray - ray.available_resources() + import ray + + ray.available_resources() In Google Colab, the result you see might be similar to this: .. code-block:: bash - {'memory': 8020104807.0, - 'GPU': 1.0, - 'object_store_memory': 4010052403.0, - 'CPU': 2.0, - 'accelerator_type:T4': 1.0, - 'node:172.28.0.2': 1.0} + {'memory': 8020104807.0, + 'GPU': 1.0, + 'object_store_memory': 4010052403.0, + 'CPU': 2.0, + 'accelerator_type:T4': 1.0, + 'node:172.28.0.2': 1.0} - -However, you can overwrite the defaults. When starting a simulation, do the following (you don't need to overwrite all of them): +However, you can overwrite the defaults. When starting a simulation, do the following +(you don't need to overwrite all of them): .. code-block:: python - num_cpus = 2 - num_gpus = 1 - ram_memory = 16_000 * 1024 * 1024 # 16 GB - fl.simulation.start_simulation( - # ... - # all the args you were specifying before - # ... - ray_init_args = { - "include_dashboard": True, # we need this one for tracking - "num_cpus": num_cpus, - "num_gpus": num_gpus, - "memory": ram_memory, - } - ) - + num_cpus = 2 + num_gpus = 1 + ram_memory = 16_000 * 1024 * 1024 # 16 GB + fl.simulation.start_simulation( + # ... + # all the args you were specifying before + # ... + ray_init_args={ + "include_dashboard": True, # we need this one for tracking + "num_cpus": num_cpus, + "num_gpus": num_gpus, + "memory": ram_memory, + } + ) Let’s also specify the resource for a single client. .. code-block:: python - # Total resources for simulation - num_cpus = 4 - num_gpus = 1 - ram_memory = 16_000 * 1024 * 1024 # 16 GB - - # Single client resources - client_num_cpus = 2 - client_num_gpus = 1 - - fl.simulation.start_simulation( - # ... - # all the args you were specifying before - # ... - ray_init_args = { - "include_dashboard": True, # we need this one for tracking - "num_cpus": num_cpus, - "num_gpus": num_gpus, - "memory": ram_memory, - }, - # The argument below is new - client_resources = { - "num_cpus": client_num_cpus, - "num_gpus": client_num_gpus, - } - ) - -Now comes the crucial part. Ray will start a new client only when it has all the required resources (such that they run in parallel) when the resources allow. - -In the example above, only one client will be run, so your clients won't run concurrently. Setting :code:`client_num_gpus = 0.5` would allow running two clients and therefore enable them to run concurrently. -Be careful not to require more resources than available. If you specified :code:`client_num_gpus = 2`, the simulation wouldn't start (even if you had 2 GPUs but decided to set 1 in :code:`ray_init_args`). - + # Total resources for simulation + num_cpus = 4 + num_gpus = 1 + ram_memory = 16_000 * 1024 * 1024 # 16 GB + + # Single client resources + client_num_cpus = 2 + client_num_gpus = 1 + + fl.simulation.start_simulation( + # ... + # all the args you were specifying before + # ... + ray_init_args={ + "include_dashboard": True, # we need this one for tracking + "num_cpus": num_cpus, + "num_gpus": num_gpus, + "memory": ram_memory, + }, + # The argument below is new + client_resources={ + "num_cpus": client_num_cpus, + "num_gpus": client_num_gpus, + }, + ) + +Now comes the crucial part. Ray will start a new client only when it has all the +required resources (such that they run in parallel) when the resources allow. + +In the example above, only one client will be run, so your clients won't run +concurrently. Setting ``client_num_gpus = 0.5`` would allow running two clients and +therefore enable them to run concurrently. Be careful not to require more resources than +available. If you specified ``client_num_gpus = 2``, the simulation wouldn't start (even +if you had 2 GPUs but decided to set 1 in ``ray_init_args``). FAQ --- Q: I don't see any metrics logged. -A: The timeframe might not be properly set. The setting is in the top right corner ("Last 30 minutes" by default). Please change the timeframe to reflect the period when the simulation was running. +A: The timeframe might not be properly set. The setting is in the top right corner +("Last 30 minutes" by default). Please change the timeframe to reflect the period when +the simulation was running. -Q: I see “Grafana server not detected. Please make sure the Grafana server is running and refresh this page” after going to the Metrics tab in Ray Dashboard. +Q: I see “Grafana server not detected. Please make sure the Grafana server is running +and refresh this page” after going to the Metrics tab in Ray Dashboard. A: You probably don't have Grafana running. Please check the running services .. code-block:: bash - brew services list + brew services list -Q: I see "This site can't be reached" when going to ``_. +Q: I see "This site can't be reached" when going to http://127.0.0.1:8265. A: Either the simulation has already finished, or you still need to start Prometheus. - Resources --------- -Ray Dashboard: ``_ +Ray Dashboard: https://docs.ray.io/en/latest/ray-observability/getting-started.html -Ray Metrics: ``_ +Ray Metrics: https://docs.ray.io/en/latest/cluster/metrics.html diff --git a/doc/source/how-to-run-flower-using-docker.rst b/doc/source/how-to-run-flower-using-docker.rst deleted file mode 100644 index 375857b85b71..000000000000 --- a/doc/source/how-to-run-flower-using-docker.rst +++ /dev/null @@ -1,489 +0,0 @@ -Run Flower using Docker -======================= - -The simplest way to get started with Flower is by using the pre-made Docker images, which you can -find on `Docker Hub `__. Supported architectures include ``amd64`` -and ``arm64v8``. - -Before you start, make sure that the Docker daemon is running: - -.. code-block:: bash - - $ docker -v - Docker version 26.0.0, build 2ae903e - -If you do not see the version of Docker but instead get an error saying that the command -was not found, you will need to install Docker first. You can find installation instruction -`here `_. - -.. note:: - - On Linux, Docker commands require ``sudo`` privilege. If you want to avoid using ``sudo``, - you can follow the `Post-installation steps `_ - on the official Docker website. - -.. important:: - - To ensure optimal performance and compatibility, the SuperLink, SuperNode and ServerApp image - must have the same version when running together. This guarantees seamless integration and - avoids potential conflicts or issues that may arise from using different versions. - -Flower SuperLink ----------------- - -Quickstart -~~~~~~~~~~ - -If you're looking to try out Flower, you can use the following command: - -.. code-block:: bash - - $ docker run --rm -p 9091:9091 -p 9092:9092 flwr/superlink:1.8.0 --insecure - -The command pulls the Docker image with the tag ``1.8.0`` from Docker Hub. The tag specifies -the Flower version. In this case, Flower 1.8.0. The ``--rm`` flag tells Docker to remove the -container after it exits. - -.. note:: - - By default, the Flower SuperLink keeps state in-memory. When using the Docker flag ``--rm``, the - state is not persisted between container starts. We will show below how to save the state in a - file on your host system. - -The ``-p :`` flag tells Docker to map the ports ``9091``/``9092`` of the host to -``9091``/``9092`` of the container, allowing you to access the Driver API on ``http://localhost:9091`` -and the Fleet API on ``http://localhost:9092``. Lastly, any flag that comes after the tag is passed -to the Flower SuperLink. Here, we are passing the flag ``--insecure``. - -.. attention:: - - The ``--insecure`` flag enables insecure communication (using HTTP, not HTTPS) and should only be - used for testing purposes. We strongly recommend enabling - `SSL `__ - when deploying to a production environment. - -You can use ``--help`` to view all available flags that the SuperLink supports: - -.. code-block:: bash - - $ docker run --rm flwr/superlink:1.8.0 --help - -Mounting a volume to store the state on the host system -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -If you want to persist the state of the SuperLink on your host system, all you need to do is specify -a directory where you want to save the file on your host system and a name for the database file. By -default, the SuperLink container runs with a non-root user called ``app`` with the user ID -``49999``. It is recommended to create new directory and change the user ID of the directory to -``49999`` to ensure the mounted directory has the proper permissions. If you later want to delete -the directory, you can change the user ID back to the current user ID by running -``sudo chown -R $USER:$(id -gn) state``. - -In the example below, we create a new directory, change the user ID and tell Docker via the flag -``--volume`` to mount the local ``state`` directory into the ``/app/state`` directory of the -container. Furthermore, we use the flag ``--database`` to specify the name of the database file. - -.. code-block:: bash - - $ mkdir state - $ sudo chmod -R 49999:49999 state - $ docker run --rm \ - -p 9091:9091 -p 9092:9092 --volume ./state/:/app/state flwr/superlink:1.8.0 \ - --insecure \ - --database state.db - -As soon as the SuperLink starts, the file ``state.db`` is created in the ``state`` directory on -your host system. If the file already exists, the SuperLink tries to restore the state from the -file. To start the SuperLink with an empty database, simply remove the ``state.db`` file. - -Enabling SSL for secure connections -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To enable SSL, you will need a PEM-encoded root certificate, a PEM-encoded private key and a -PEM-encoded certificate chain. - -.. note:: - For testing purposes, you can generate your own self-signed certificates. The - `Enable SSL connections `__ - page contains a section that will guide you through the process. - -Assuming all files we need are in the local ``certificates`` directory, we can use the flag -``--volume`` to mount the local directory into the ``/app/certificates/`` directory of the container. -This allows the SuperLink to access the files within the container. The ``ro`` stands for -``read-only``. Docker volumes default to ``read-write``; that option tells Docker to make the volume -``read-only`` instead. Finally, we pass the names of the certificates and key file to the SuperLink -with the ``--ssl-ca-certfile``, ``--ssl-certfile`` and ``--ssl-keyfile`` flag. - -.. code-block:: bash - - $ docker run --rm \ - -p 9091:9091 -p 9092:9092 \ - --volume ./certificates/:/app/certificates/:ro flwr/superlink:nightly \ - --ssl-ca-certfile certificates/ca.crt \ - --ssl-certfile certificates/server.pem \ - --ssl-keyfile certificates/server.key - -.. note:: - - Because Flower containers, by default, run with a non-root user ``app``, the mounted files and - directories must have the proper permissions for the user ID ``49999``. For example, to change the - user ID of all files in the ``certificates/`` directory, you can run - ``sudo chown -R 49999:49999 certificates/*``. - -Flower SuperNode ----------------- - -The SuperNode Docker image comes with a pre-installed version of Flower and serves as a base for -building your own SuperNode image. - -.. important:: - - The SuperNode Docker image currently works only with the 1.9.0-nightly release. A stable version - will be available when Flower 1.9.0 (stable) gets released (ETA: May). A SuperNode nightly image - must be paired with the corresponding SuperLink and ServerApp nightly images released on the same - day. To ensure the versions are in sync, using the concrete tag, e.g., ``1.9.0.dev20240501`` - instead of ``nightly`` is recommended. - -We will use the ``quickstart-pytorch`` example, which you can find in -the Flower repository, to illustrate how you can dockerize your ClientApp. - -.. _SuperNode Prerequisites: - -Prerequisites -~~~~~~~~~~~~~ - -Before we can start, we need to meet a few prerequisites in our local development environment. -You can skip the first part if you want to run your ClientApp instead of the ``quickstart-pytorch`` -example. - -#. Clone the Flower repository. - - .. code-block:: bash - - $ git clone --depth=1 https://github.com/adap/flower.git && cd flower/examples/quickstart-pytorch - -#. Verify the Docker daemon is running. - - Please follow the first section on - :doc:`Run Flower using Docker ` - which covers this step in more detail. - - -Creating a SuperNode Dockerfile -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Let's assume the following project layout: - -.. code-block:: bash - - $ tree . - . - ├── client.py # ClientApp code - └── - -First, we need to create a ``requirements.txt`` file in the directory where the ``ClientApp`` code -is located. In the file, we list all the dependencies that the ClientApp requires. - -.. code-block:: - - flwr-datasets[vision]>=0.1.0,<1.0.0 - torch==2.2.1 - torchvision==0.17.1 - tqdm==4.66.3 - -.. important:: - - Note that `flwr `__ is already installed in the ``flwr/supernode`` - base image, so you only need to include other package dependencies in your ``requirements.txt``, - such as ``torch``, ``tensorflow``, etc. - -Next, we create a Dockerfile. If you use the ``quickstart-pytorch`` example, create a new -file called ``Dockerfile.supernode`` in ``examples/quickstart-pytorch``. - -The ``Dockerfile.supernode`` contains the instructions that assemble the SuperNode image. - -.. code-block:: dockerfile - - FROM flwr/supernode:nightly - - WORKDIR /app - - COPY requirements.txt . - RUN python -m pip install -U --no-cache-dir -r requirements.txt - - COPY client.py ./ - ENTRYPOINT ["flower-client-app", "client:app"] - -In the first two lines, we instruct Docker to use the SuperNode image tagged ``nightly`` as a base -image and set our working directory to ``/app``. The following instructions will now be -executed in the ``/app`` directory. Next, we install the ClientApp dependencies by copying the -``requirements.txt`` file into the image and run ``pip install``. In the last two lines, -we copy the ``client.py`` module into the image and set the entry point to ``flower-client-app`` with -the argument ``client:app``. The argument is the object reference of the ClientApp -(``:``) that will be run inside the ClientApp. - -Building the SuperNode Docker image -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Next, we build the SuperNode Docker image by running the following command in the directory where -Dockerfile and ClientApp code are located. - -.. code-block:: bash - - $ docker build -f Dockerfile.supernode -t flwr_supernode:0.0.1 . - -We gave the image the name ``flwr_supernode``, and the tag ``0.0.1``. Remember that the here chosen -values only serve as an example. You can change them to your needs. - - -Running the SuperNode Docker image -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Now that we have built the SuperNode image, we can finally run it. - -.. code-block:: bash - - $ docker run --rm flwr_supernode:0.0.1 \ - --insecure \ - --server 192.168.1.100:9092 - -Let's break down each part of this command: - -* ``docker run``: This is the command to run a new Docker container. -* ``--rm``: This option specifies that the container should be automatically removed when it stops. -* ``flwr_supernode:0.0.1``: The name the tag of the Docker image to use. -* ``--insecure``: This option enables insecure communication. - -.. attention:: - - The ``--insecure`` flag enables insecure communication (using HTTP, not HTTPS) and should only be - used for testing purposes. We strongly recommend enabling - `SSL `__ - when deploying to a production environment. - -* | ``--server 192.168.1.100:9092``: This option specifies the address of the SuperLinks Fleet - | API to connect to. Remember to update it with your SuperLink IP. - -.. note:: - - To test running Flower locally, you can create a - `bridge network `__, - use the ``--network`` argument and pass the name of the Docker network to run your SuperNodes. - -Any argument that comes after the tag is passed to the Flower SuperNode binary. -To see all available flags that the SuperNode supports, run: - -.. code-block:: bash - - $ docker run --rm flwr/supernode:nightly --help - -Enabling SSL for secure connections -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To enable SSL, we will need to mount a PEM-encoded root certificate into your SuperNode container. - -Assuming the certificate already exists locally, we can use the flag ``--volume`` to mount the local -certificate into the container's ``/app/`` directory. This allows the SuperNode to access the -certificate within the container. Use the ``--root-certificates`` flag when starting the container. - -.. code-block:: bash - - $ docker run --rm --volume ./ca.crt:/app/ca.crt flwr_supernode:0.0.1 \ - --server 192.168.1.100:9092 \ - --root-certificates ca.crt - -Flower ServerApp ----------------- - -The procedure for building and running a ServerApp image is almost identical to the SuperNode image. - -Similar to the SuperNode image, the ServerApp Docker image comes with a pre-installed version of -Flower and serves as a base for building your own ServerApp image. - -We will use the same ``quickstart-pytorch`` example as we do in the Flower SuperNode section. -If you have not already done so, please follow the `SuperNode Prerequisites`_ before proceeding. - - -Creating a ServerApp Dockerfile -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Let's assume the following project layout: - -.. code-block:: bash - - $ tree . - . - ├── server.py # ServerApp code - └── - -First, we need to create a Dockerfile in the directory where the ``ServerApp`` code is located. -If you use the ``quickstart-pytorch`` example, create a new file called ``Dockerfile.serverapp`` in -``examples/quickstart-pytorch``. - -The ``Dockerfile.serverapp`` contains the instructions that assemble the ServerApp image. - -.. code-block:: dockerfile - - FROM flwr/serverapp:1.8.0 - - WORKDIR /app - - COPY server.py ./ - ENTRYPOINT ["flower-server-app", "server:app"] - -In the first two lines, we instruct Docker to use the ServerApp image tagged ``1.8.0`` as a base -image and set our working directory to ``/app``. The following instructions will now be -executed in the ``/app`` directory. In the last two lines, we copy the ``server.py`` module into the -image and set the entry point to ``flower-server-app`` with the argument ``server:app``. -The argument is the object reference of the ServerApp (``:``) that will be run -inside the ServerApp container. - -Building the ServerApp Docker image -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Next, we build the ServerApp Docker image by running the following command in the directory where -Dockerfile and ServerApp code are located. - -.. code-block:: bash - - $ docker build -f Dockerfile.serverapp -t flwr_serverapp:0.0.1 . - -We gave the image the name ``flwr_serverapp``, and the tag ``0.0.1``. Remember that the here chosen -values only serve as an example. You can change them to your needs. - - -Running the ServerApp Docker image -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Now that we have built the ServerApp image, we can finally run it. - -.. code-block:: bash - - $ docker run --rm flwr_serverapp:0.0.1 \ - --insecure \ - --server 192.168.1.100:9091 - -Let's break down each part of this command: - -* ``docker run``: This is the command to run a new Docker container. -* ``--rm``: This option specifies that the container should be automatically removed when it stops. -* ``flwr_serverapp:0.0.1``: The name the tag of the Docker image to use. -* ``--insecure``: This option enables insecure communication. - -.. attention:: - - The ``--insecure`` flag enables insecure communication (using HTTP, not HTTPS) and should only be - used for testing purposes. We strongly recommend enabling - `SSL `__ - when deploying to a production environment. - -* | ``--server 192.168.1.100:9091``: This option specifies the address of the SuperLinks Driver - | API to connect to. Remember to update it with your SuperLink IP. - -.. note:: - To test running Flower locally, you can create a - `bridge network `__, - use the ``--network`` argument and pass the name of the Docker network to run your ServerApps. - -Any argument that comes after the tag is passed to the Flower ServerApp binary. -To see all available flags that the ServerApp supports, run: - -.. code-block:: bash - - $ docker run --rm flwr/serverapp:1.8.0 --help - -Enabling SSL for secure connections -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To enable SSL, we will need to mount a PEM-encoded root certificate into your ServerApp container. - -Assuming the certificate already exists locally, we can use the flag ``--volume`` to mount the local -certificate into the container's ``/app/`` directory. This allows the ServerApp to access the -certificate within the container. Use the ``--root-certificates`` flags when starting the container. - -.. code-block:: bash - - $ docker run --rm --volume ./ca.crt:/app/ca.crt flwr_serverapp:0.0.1 \ - --server 192.168.1.100:9091 \ - --root-certificates ca.crt - -Advanced Docker options ------------------------ - -Run with root user privileges -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Flower Docker images, by default, run with a non-root user (username/groupname: ``app``, -UID/GID: ``49999``). Using root user is not recommended unless it is necessary for specific -tasks during the build process. Always make sure to run the container as a non-root user in -production to maintain security best practices. - -**Run a container with root user privileges** - -Run the Docker image with the ``-u`` flag and specify ``root`` as the username: - -.. code-block:: bash - - $ docker run --rm -u root flwr/superlink:1.8.0 - -This command will run the Docker container with root user privileges. - -**Run the build process with root user privileges** - -If you want to switch to the root user during the build process of the Docker image to install -missing system dependencies, you can use the ``USER root`` directive within your Dockerfile. - -.. code-block:: dockerfile - - FROM flwr/supernode:1.8.0 - - # Switch to root user - USER root - - # Install missing dependencies (requires root access) - RUN apt-get update && apt-get install -y - - # Switch back to non-root user app - USER app - - # Continue with your Docker image build process - ... - -Using a different Flower version -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -If you want to use a different version of Flower, for example Flower nightly, you can do so by -changing the tag. All available versions are on `Docker Hub `__. - -Pinning a Docker image to a specific version -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -It may happen that we update the images behind the tags. Such updates usually include security -updates of system dependencies that should not change the functionality of Flower. However, if you -want to ensure that you always use the same image, you can specify the hash of the image instead of -the tag. - -The following command returns the current image hash referenced by the ``superlink:1.8.0`` tag: - -.. code-block:: bash - - $ docker inspect --format='{{index .RepoDigests 0}}' flwr/superlink:1.8.0 - flwr/superlink@sha256:1b855d1fa4e344e4d95db99793f2bb35d8c63f6a1decdd736863bfe4bb0fe46c - -Next, we can pin the hash when running a new SuperLink container: - -.. code-block:: bash - - $ docker run \ - --rm flwr/superlink@sha256:1b855d1fa4e344e4d95db99793f2bb35d8c63f6a1decdd736863bfe4bb0fe46c \ - --insecure - -Setting environment variables -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To set a variable inside a Docker container, you can use the ``-e =`` flag. - -.. code-block:: bash - - $ docker run -e FLWR_TELEMETRY_ENABLED=0 \ - --rm flwr/superlink:1.8.0 --insecure diff --git a/doc/source/how-to-run-simulations.rst b/doc/source/how-to-run-simulations.rst index d1dcb511ed51..fb4eed17b4e7 100644 --- a/doc/source/how-to-run-simulations.rst +++ b/doc/source/how-to-run-simulations.rst @@ -1,48 +1,85 @@ Run simulations =============== -.. youtube:: cRebUIGB5RU - :url_parameters: ?list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB - :width: 100% - -Simulating Federated Learning workloads is useful for a multitude of use-cases: you might want to run your workload on a large cohort of clients but without having to source, configure and mange a large number of physical devices; you might want to run your FL workloads as fast as possible on the compute systems you have access to without having to go through a complex setup process; you might want to validate your algorithm on different scenarios at varying levels of data and system heterogeneity, client availability, privacy budgets, etc. These are among some of the use-cases where simulating FL workloads makes sense. Flower can accommodate these scenarios by means of its `VirtualClientEngine `_ or VCE. - -The :code:`VirtualClientEngine` schedules, launches and manages `virtual` clients. These clients are identical to `non-virtual` clients (i.e. the ones you launch via the command `flwr.client.start_client `_) in the sense that they can be configure by creating a class inheriting, for example, from `flwr.client.NumPyClient `_ and therefore behave in an identical way. In addition to that, clients managed by the :code:`VirtualClientEngine` are: - -* resource-aware: this means that each client gets assigned a portion of the compute and memory on your system. You as a user can control this at the beginning of the simulation and allows you to control the degree of parallelism of your Flower FL simulation. The fewer the resources per client, the more clients can run concurrently on the same hardware. -* self-managed: this means that you as a user do not need to launch clients manually, instead this gets delegated to :code:`VirtualClientEngine`'s internals. -* ephemeral: this means that a client is only materialized when it is required in the FL process (e.g. to do `fit() `_). The object is destroyed afterwards, releasing the resources it was assigned and allowing in this way other clients to participate. - -The :code:`VirtualClientEngine` implements `virtual` clients using `Ray `_, an open-source framework for scalable Python workloads. In particular, Flower's :code:`VirtualClientEngine` makes use of `Actors `_ to spawn `virtual` clients and run their workload. - +.. youtube:: cRebUIGB5RU + :url_parameters: ?list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB + :width: 100% + +Simulating Federated Learning workloads is useful for a multitude of use-cases: you +might want to run your workload on a large cohort of clients but without having to +source, configure and mange a large number of physical devices; you might want to run +your FL workloads as fast as possible on the compute systems you have access to without +having to go through a complex setup process; you might want to validate your algorithm +on different scenarios at varying levels of data and system heterogeneity, client +availability, privacy budgets, etc. These are among some of the use-cases where +simulating FL workloads makes sense. Flower can accommodate these scenarios by means of +its `VirtualClientEngine +`_ or VCE. + +The ``VirtualClientEngine`` schedules, launches and manages `virtual` clients. These +clients are identical to `non-virtual` clients (i.e. the ones you launch via the command +`flwr.client.start_client `_) in the sense that they can +be configure by creating a class inheriting, for example, from `flwr.client.NumPyClient +`_ and therefore behave in an identical way. +In addition to that, clients managed by the ``VirtualClientEngine`` are: + +- resource-aware: this means that each client gets assigned a portion of the compute and + memory on your system. You as a user can control this at the beginning of the + simulation and allows you to control the degree of parallelism of your Flower FL + simulation. The fewer the resources per client, the more clients can run concurrently + on the same hardware. +- self-managed: this means that you as a user do not need to launch clients manually, + instead this gets delegated to ``VirtualClientEngine``'s internals. +- ephemeral: this means that a client is only materialized when it is required in the FL + process (e.g. to do `fit() `_). The object + is destroyed afterwards, releasing the resources it was assigned and allowing in this + way other clients to participate. + +The ``VirtualClientEngine`` implements `virtual` clients using `Ray +`_, an open-source framework for scalable Python workloads. In +particular, Flower's ``VirtualClientEngine`` makes use of `Actors +`_ to spawn `virtual` clients and +run their workload. Launch your Flower simulation ----------------------------- -Running Flower simulations still require you to define your client class, a strategy, and utility functions to download and load (and potentially partition) your dataset. With that out of the way, launching your simulation is done with `start_simulation `_ and a minimal example looks as follows: - +Running Flower simulations still require you to define your client class, a strategy, +and utility functions to download and load (and potentially partition) your dataset. +With that out of the way, launching your simulation is done with `start_simulation +`_ and a minimal example looks as +follows: .. code-block:: python import flwr as fl from flwr.server.strategy import FedAvg - + + def client_fn(cid: str): # Return a standard Flower client return MyFlowerClient().to_client() + # Launch the simulation hist = fl.simulation.start_simulation( - client_fn=client_fn, # A function to run a _virtual_ client when required - num_clients=50, # Total number of clients available - config=fl.server.ServerConfig(num_rounds=3), # Specify number of FL rounds - strategy=FedAvg() # A Flower strategy + client_fn=client_fn, # A function to run a _virtual_ client when required + num_clients=50, # Total number of clients available + config=fl.server.ServerConfig(num_rounds=3), # Specify number of FL rounds + strategy=FedAvg(), # A Flower strategy ) - VirtualClientEngine resources ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -By default the VCE has access to all system resources (i.e. all CPUs, all GPUs, etc) since that is also the default behavior when starting Ray. However, in some settings you might want to limit how many of your system resources are used for simulation. You can do this via the :code:`ray_init_args` input argument to :code:`start_simulation` which the VCE internally passes to Ray's :code:`ray.init` command. For a complete list of settings you can configure check the `ray.init `_ documentation. Do not set :code:`ray_init_args` if you want the VCE to use all your system's CPUs and GPUs. + +By default the VCE has access to all system resources (i.e. all CPUs, all GPUs, etc) +since that is also the default behavior when starting Ray. However, in some settings you +might want to limit how many of your system resources are used for simulation. You can +do this via the ``ray_init_args`` input argument to ``start_simulation`` which the VCE +internally passes to Ray's ``ray.init`` command. For a complete list of settings you can +configure check the `ray.init +`_ documentation. +Do not set ``ray_init_args`` if you want the VCE to use all your system's CPUs and GPUs. .. code-block:: python @@ -50,22 +87,28 @@ By default the VCE has access to all system resources (i.e. all CPUs, all GPUs, # Launch the simulation by limiting resources visible to Flower's VCE hist = fl.simulation.start_simulation( - ... + # ... # Out of all CPUs and GPUs available in your system, # only 8xCPUs and 1xGPUs would be used for simulation. - ray_init_args = {'num_cpus': 8, 'num_gpus': 1} + ray_init_args={"num_cpus": 8, "num_gpus": 1} ) - - Assigning client resources ~~~~~~~~~~~~~~~~~~~~~~~~~~ -By default the :code:`VirtualClientEngine` assigns a single CPU core (and nothing else) to each virtual client. This means that if your system has 10 cores, that many virtual clients can be concurrently running. -More often than not, you would probably like to adjust the resources your clients get assigned based on the complexity (i.e. compute and memory footprint) of your FL workload. You can do so when starting your simulation by setting the argument `client_resources` to `start_simulation `_. Two keys are internally used by Ray to schedule and spawn workloads (in our case Flower clients): +By default the ``VirtualClientEngine`` assigns a single CPU core (and nothing else) to +each virtual client. This means that if your system has 10 cores, that many virtual +clients can be concurrently running. -* :code:`num_cpus` indicates the number of CPU cores a client would get. -* :code:`num_gpus` indicates the **ratio** of GPU memory a client gets assigned. +More often than not, you would probably like to adjust the resources your clients get +assigned based on the complexity (i.e. compute and memory footprint) of your FL +workload. You can do so when starting your simulation by setting the argument +`client_resources` to `start_simulation +`_. Two keys are internally used by +Ray to schedule and spawn workloads (in our case Flower clients): + +- ``num_cpus`` indicates the number of CPU cores a client would get. +- ``num_gpus`` indicates the **ratio** of GPU memory a client gets assigned. Let's see a few examples: @@ -74,90 +117,140 @@ Let's see a few examples: import flwr as fl # each client gets 1xCPU (this is the default if no resources are specified) - my_client_resources = {'num_cpus': 1, 'num_gpus': 0.0} + my_client_resources = {"num_cpus": 1, "num_gpus": 0.0} # each client gets 2xCPUs and half a GPU. (with a single GPU, 2 clients run concurrently) - my_client_resources = {'num_cpus': 2, 'num_gpus': 0.5} + my_client_resources = {"num_cpus": 2, "num_gpus": 0.5} # 10 client can run concurrently on a single GPU, but only if you have 20 CPU threads. - my_client_resources = {'num_cpus': 2, 'num_gpus': 0.1} + my_client_resources = {"num_cpus": 2, "num_gpus": 0.1} # Launch the simulation hist = fl.simulation.start_simulation( - ... - client_resources = my_client_resources # A Python dict specifying CPU/GPU resources + # ... + client_resources=my_client_resources # A Python dict specifying CPU/GPU resources ) -While the :code:`client_resources` can be used to control the degree of concurrency in your FL simulation, this does not stop you from running dozens, hundreds or even thousands of clients in the same round and having orders of magnitude more `dormant` (i.e. not participating in a round) clients. Let's say you want to have 100 clients per round but your system can only accommodate 8 clients concurrently. The :code:`VirtualClientEngine` will schedule 100 jobs to run (each simulating a client sampled by the strategy) and then will execute them in a resource-aware manner in batches of 8. +While the ``client_resources`` can be used to control the degree of concurrency in your +FL simulation, this does not stop you from running dozens, hundreds or even thousands of +clients in the same round and having orders of magnitude more `dormant` (i.e. not +participating in a round) clients. Let's say you want to have 100 clients per round but +your system can only accommodate 8 clients concurrently. The ``VirtualClientEngine`` +will schedule 100 jobs to run (each simulating a client sampled by the strategy) and +then will execute them in a resource-aware manner in batches of 8. -To understand all the intricate details on how resources are used to schedule FL clients and how to define custom resources, please take a look at the `Ray documentation `_. +To understand all the intricate details on how resources are used to schedule FL clients +and how to define custom resources, please take a look at the `Ray documentation +`_. Simulation examples ~~~~~~~~~~~~~~~~~~~ -A few ready-to-run complete examples for Flower simulation in Tensorflow/Keras and PyTorch are provided in the `Flower repository `_. You can run them on Google Colab too: - -* `Tensorflow/Keras Simulation `_: 100 clients collaboratively train a MLP model on MNIST. -* `PyTorch Simulation `_: 100 clients collaboratively train a CNN model on MNIST. - +A few ready-to-run complete examples for Flower simulation in Tensorflow/Keras and +PyTorch are provided in the `Flower repository `_. You +can run them on Google Colab too: +- `Tensorflow/Keras Simulation + `_: 100 + clients collaboratively train a MLP model on MNIST. +- `PyTorch Simulation + `_: 100 clients + collaboratively train a CNN model on MNIST. Multi-node Flower simulations ----------------------------- -Flower's :code:`VirtualClientEngine` allows you to run FL simulations across multiple compute nodes. Before starting your multi-node simulation ensure that you: - -#. Have the same Python environment in all nodes. -#. Have a copy of your code (e.g. your entire repo) in all nodes. -#. Have a copy of your dataset in all nodes (more about this in :ref:`simulation considerations `) -#. Pass :code:`ray_init_args={"address"="auto"}` to `start_simulation `_ so the :code:`VirtualClientEngine` attaches to a running Ray instance. -#. Start Ray on you head node: on the terminal type :code:`ray start --head`. This command will print a few lines, one of which indicates how to attach other nodes to the head node. -#. Attach other nodes to the head node: copy the command shown after starting the head and execute it on terminal of a new node: for example :code:`ray start --address='192.168.1.132:6379'` - -With all the above done, you can run your code from the head node as you would if the simulation was running on a single node. - -Once your simulation is finished, if you'd like to dismantle your cluster you simply need to run the command :code:`ray stop` in each node's terminal (including the head node). +Flower's ``VirtualClientEngine`` allows you to run FL simulations across multiple +compute nodes. Before starting your multi-node simulation ensure that you: + +1. Have the same Python environment in all nodes. +2. Have a copy of your code (e.g. your entire repo) in all nodes. +3. Have a copy of your dataset in all nodes (more about this in :ref:`simulation + considerations `) +4. Pass ``ray_init_args={"address"="auto"}`` to `start_simulation + `_ so the ``VirtualClientEngine`` + attaches to a running Ray instance. +5. Start Ray on you head node: on the terminal type ``ray start --head``. This command + will print a few lines, one of which indicates how to attach other nodes to the head + node. +6. Attach other nodes to the head node: copy the command shown after starting the head + and execute it on terminal of a new node: for example ``ray start + --address='192.168.1.132:6379'`` + +With all the above done, you can run your code from the head node as you would if the +simulation was running on a single node. + +Once your simulation is finished, if you'd like to dismantle your cluster you simply +need to run the command ``ray stop`` in each node's terminal (including the head node). Multi-node simulation good-to-know ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Here we list a few interesting functionality when running multi-node FL simulations: -User :code:`ray status` to check all nodes connected to your head node as well as the total resources available to the :code:`VirtualClientEngine`. +User ``ray status`` to check all nodes connected to your head node as well as the total +resources available to the ``VirtualClientEngine``. -When attaching a new node to the head, all its resources (i.e. all CPUs, all GPUs) will be visible by the head node. This means that the :code:`VirtualClientEngine` can schedule as many `virtual` clients as that node can possible run. In some settings you might want to exclude certain resources from the simulation. You can do this by appending `--num-cpus=` and/or `--num-gpus=` in any :code:`ray start` command (including when starting the head) +When attaching a new node to the head, all its resources (i.e. all CPUs, all GPUs) will +be visible by the head node. This means that the ``VirtualClientEngine`` can schedule as +many `virtual` clients as that node can possible run. In some settings you might want to +exclude certain resources from the simulation. You can do this by appending +`--num-cpus=` and/or `--num-gpus=` in any ``ray +start`` command (including when starting the head) .. _considerations-for-simulations: - Considerations for simulations ------------------------------ .. note:: - We are actively working on these fronts so to make it trivial to run any FL workload with Flower simulation. + We are actively working on these fronts so to make it trivial to run any FL workload + with Flower simulation. -The current VCE allows you to run Federated Learning workloads in simulation mode whether you are prototyping simple scenarios on your personal laptop or you want to train a complex FL pipeline across multiple high-performance GPU nodes. While we add more capabilities to the VCE, the points below highlight some of the considerations to keep in mind when designing your FL pipeline with Flower. We also highlight a couple of current limitations in our implementation. +The current VCE allows you to run Federated Learning workloads in simulation mode +whether you are prototyping simple scenarios on your personal laptop or you want to +train a complex FL pipeline across multiple high-performance GPU nodes. While we add +more capabilities to the VCE, the points below highlight some of the considerations to +keep in mind when designing your FL pipeline with Flower. We also highlight a couple of +current limitations in our implementation. GPU resources ~~~~~~~~~~~~~ -The VCE assigns a share of GPU memory to a client that specifies the key :code:`num_gpus` in :code:`client_resources`. This being said, Ray (used internally by the VCE) is by default: - - -* not aware of the total VRAM available on the GPUs. This means that if you set :code:`num_gpus=0.5` and you have two GPUs in your system with different (e.g. 32GB and 8GB) VRAM amounts, they both would run 2 clients concurrently. -* not aware of other unrelated (i.e. not created by the VCE) workloads are running on the GPU. Two takeaways from this are: +The VCE assigns a share of GPU memory to a client that specifies the key ``num_gpus`` in +``client_resources``. This being said, Ray (used internally by the VCE) is by default: - * Your Flower server might need a GPU to evaluate the `global model` after aggregation (by instance when making use of the `evaluate method `_) - * If you want to run several independent Flower simulations on the same machine you need to mask-out your GPUs with :code:`CUDA_VISIBLE_DEVICES=""` when launching your experiment. +- not aware of the total VRAM available on the GPUs. This means that if you set + ``num_gpus=0.5`` and you have two GPUs in your system with different (e.g. 32GB and + 8GB) VRAM amounts, they both would run 2 clients concurrently. +- not aware of other unrelated (i.e. not created by the VCE) workloads are running on + the GPU. Two takeaways from this are: + - Your Flower server might need a GPU to evaluate the `global model` after aggregation + (by instance when making use of the `evaluate method + `_) + - If you want to run several independent Flower simulations on the same machine you + need to mask-out your GPUs with ``CUDA_VISIBLE_DEVICES=""`` when launching + your experiment. -In addition, the GPU resource limits passed to :code:`client_resources` are not `enforced` (i.e. they can be exceeded) which can result in the situation of client using more VRAM than the ratio specified when starting the simulation. +In addition, the GPU resource limits passed to ``client_resources`` are not `enforced` +(i.e. they can be exceeded) which can result in the situation of client using more VRAM +than the ratio specified when starting the simulation. TensorFlow with GPUs -"""""""""""""""""""" +++++++++++++++++++++ -When `using a GPU with TensorFlow `_ nearly your entire GPU memory of all your GPUs visible to the process will be mapped. This is done by TensorFlow for optimization purposes. However, in settings such as FL simulations where we want to split the GPU into multiple `virtual` clients, this is not a desirable mechanism. Luckily we can disable this default behavior by `enabling memory growth `_. +When `using a GPU with TensorFlow `_ nearly your +entire GPU memory of all your GPUs visible to the process will be mapped. This is done +by TensorFlow for optimization purposes. However, in settings such as FL simulations +where we want to split the GPU into multiple `virtual` clients, this is not a desirable +mechanism. Luckily we can disable this default behavior by `enabling memory growth +`_. -This would need to be done in the main process (which is where the server would run) and in each Actor created by the VCE. By means of :code:`actor_kwargs` we can pass the reserved key `"on_actor_init_fn"` in order to specify a function to be executed upon actor initialization. In this case, to enable GPU growth for TF workloads. It would look as follows: +This would need to be done in the main process (which is where the server would run) and +in each Actor created by the VCE. By means of ``actor_kwargs`` we can pass the reserved +key `"on_actor_init_fn"` in order to specify a function to be executed upon actor +initialization. In this case, to enable GPU growth for TF workloads. It would look as +follows: .. code-block:: python @@ -170,19 +263,29 @@ This would need to be done in the main process (which is where the server would # Start Flower simulation hist = fl.simulation.start_simulation( - ... + # ... actor_kwargs={ - "on_actor_init_fn": enable_tf_gpu_growth # <-- To be executed upon actor init. + "on_actor_init_fn": enable_tf_gpu_growth # <-- To be executed upon actor init. }, ) -This is precisely the mechanism used in `Tensorflow/Keras Simulation `_ example. - +This is precisely the mechanism used in `Tensorflow/Keras Simulation +`_ example. Multi-node setups ~~~~~~~~~~~~~~~~~ -* The VCE does not currently offer a way to control on which node a particular `virtual` client is executed. In other words, if more than a single node have the resources needed by a client to run, then any of those nodes could get the client workload scheduled onto. Later in the FL process (i.e. in a different round) the same client could be executed by a different node. Depending on how your clients access their datasets, this might require either having a copy of all dataset partitions on all nodes or a dataset serving mechanism (e.g. using nfs, a database) to circumvent data duplication. - -* By definition virtual clients are `stateless` due to their ephemeral nature. A client state can be implemented as part of the Flower client class but users need to ensure this saved to persistent storage (e.g. a database, disk) and that can be retrieve later by the same client regardless on which node it is running from. This is related to the point above also since, in some way, the client's dataset could be seen as a type of `state`. - +- The VCE does not currently offer a way to control on which node a particular `virtual` + client is executed. In other words, if more than a single node have the resources + needed by a client to run, then any of those nodes could get the client workload + scheduled onto. Later in the FL process (i.e. in a different round) the same client + could be executed by a different node. Depending on how your clients access their + datasets, this might require either having a copy of all dataset partitions on all + nodes or a dataset serving mechanism (e.g. using nfs, a database) to circumvent data + duplication. +- By definition virtual clients are `stateless` due to their ephemeral nature. A client + state can be implemented as part of the Flower client class but users need to ensure + this saved to persistent storage (e.g. a database, disk) and that can be retrieve + later by the same client regardless on which node it is running from. This is related + to the point above also since, in some way, the client's dataset could be seen as a + type of `state`. diff --git a/doc/source/how-to-save-and-load-model-checkpoints.rst b/doc/source/how-to-save-and-load-model-checkpoints.rst index 0d711e375cd8..f2f12dae97be 100644 --- a/doc/source/how-to-save-and-load-model-checkpoints.rst +++ b/doc/source/how-to-save-and-load-model-checkpoints.rst @@ -1,17 +1,19 @@ Save and load model checkpoints =============================== -Flower does not automatically save model updates on the server-side. This how-to guide describes the steps to save (and load) model checkpoints in Flower. - +Flower does not automatically save model updates on the server-side. This how-to guide +describes the steps to save (and load) model checkpoints in Flower. Model checkpointing ------------------- -Model updates can be persisted on the server-side by customizing :code:`Strategy` methods. -Implementing custom strategies is always an option, but for many cases it may be more convenient to simply customize an existing strategy. -The following code example defines a new :code:`SaveModelStrategy` which customized the existing built-in :code:`FedAvg` strategy. -In particular, it customizes :code:`aggregate_fit` by calling :code:`aggregate_fit` in the base class (:code:`FedAvg`). -It then continues to save returned (aggregated) weights before it returns those aggregated weights to the caller (i.e., the server): +Model updates can be persisted on the server-side by customizing ``Strategy`` methods. +Implementing custom strategies is always an option, but for many cases it may be more +convenient to simply customize an existing strategy. The following code example defines +a new ``SaveModelStrategy`` which customized the existing built-in ``FedAvg`` strategy. +In particular, it customizes ``aggregate_fit`` by calling ``aggregate_fit`` in the base +class (``FedAvg``). It then continues to save returned (aggregated) weights before it +returns those aggregated weights to the caller (i.e., the server): .. code-block:: python @@ -24,11 +26,15 @@ It then continues to save returned (aggregated) weights before it returns those ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: # Call aggregate_fit from base class (FedAvg) to aggregate parameters and metrics - aggregated_parameters, aggregated_metrics = super().aggregate_fit(server_round, results, failures) - + aggregated_parameters, aggregated_metrics = super().aggregate_fit( + server_round, results, failures + ) + if aggregated_parameters is not None: # Convert `Parameters` to `List[np.ndarray]` - aggregated_ndarrays: List[np.ndarray] = fl.common.parameters_to_ndarrays(aggregated_parameters) + aggregated_ndarrays: List[np.ndarray] = fl.common.parameters_to_ndarrays( + aggregated_parameters + ) # Save aggregated_ndarrays print(f"Saving round {server_round} aggregated_ndarrays...") @@ -36,24 +42,27 @@ It then continues to save returned (aggregated) weights before it returns those return aggregated_parameters, aggregated_metrics + # Create strategy and run server strategy = SaveModelStrategy( # (same arguments as FedAvg here) ) fl.server.start_server(strategy=strategy) - Save and load PyTorch checkpoints --------------------------------- -Similar to the previous example but with a few extra steps, we'll show how to -store a PyTorch checkpoint we'll use the ``torch.save`` function. -Firstly, ``aggregate_fit`` returns a ``Parameters`` object that has to be transformed into a list of NumPy ``ndarray``'s, -then those are transformed into the PyTorch ``state_dict`` following the ``OrderedDict`` class structure. +Similar to the previous example but with a few extra steps, we'll show how to store a +PyTorch checkpoint we'll use the ``torch.save`` function. Firstly, ``aggregate_fit`` +returns a ``Parameters`` object that has to be transformed into a list of NumPy +``ndarray``'s, then those are transformed into the PyTorch ``state_dict`` following the +``OrderedDict`` class structure. .. code-block:: python net = cifar.Net().to(DEVICE) + + class SaveModelStrategy(fl.server.strategy.FedAvg): def aggregate_fit( self, @@ -64,14 +73,18 @@ then those are transformed into the PyTorch ``state_dict`` following the ``Order """Aggregate model weights using weighted average and store checkpoint""" # Call aggregate_fit from base class (FedAvg) to aggregate parameters and metrics - aggregated_parameters, aggregated_metrics = super().aggregate_fit(server_round, results, failures) - + aggregated_parameters, aggregated_metrics = super().aggregate_fit( + server_round, results, failures + ) + if aggregated_parameters is not None: print(f"Saving round {server_round} aggregated_parameters...") # Convert `Parameters` to `List[np.ndarray]` - aggregated_ndarrays: List[np.ndarray] = fl.common.parameters_to_ndarrays(aggregated_parameters) - + aggregated_ndarrays: List[np.ndarray] = fl.common.parameters_to_ndarrays( + aggregated_parameters + ) + # Convert `List[np.ndarray]` to PyTorch`state_dict` params_dict = zip(net.state_dict().keys(), aggregated_ndarrays) state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) @@ -82,7 +95,8 @@ then those are transformed into the PyTorch ``state_dict`` following the ``Order return aggregated_parameters, aggregated_metrics -To load your progress, you simply append the following lines to your code. Note that this will iterate over all saved checkpoints and load the latest one: +To load your progress, you simply append the following lines to your code. Note that +this will iterate over all saved checkpoints and load the latest one: .. code-block:: python @@ -94,4 +108,5 @@ To load your progress, you simply append the following lines to your code. Note state_dict_ndarrays = [v.cpu().numpy() for v in net.state_dict().values()] parameters = fl.common.ndarrays_to_parameters(state_dict_ndarrays) -Return/use this object of type ``Parameters`` wherever necessary, such as in the ``initial_parameters`` when defining a ``Strategy``. \ No newline at end of file +Return/use this object of type ``Parameters`` wherever necessary, such as in the +``initial_parameters`` when defining a ``Strategy``. diff --git a/doc/source/how-to-upgrade-to-flower-1.0.rst b/doc/source/how-to-upgrade-to-flower-1.0.rst index 3a55a1a953f5..5f10f16a551f 100644 --- a/doc/source/how-to-upgrade-to-flower-1.0.rst +++ b/doc/source/how-to-upgrade-to-flower-1.0.rst @@ -1,8 +1,10 @@ Upgrade to Flower 1.0 ===================== -Flower 1.0 is here. Along with new features, Flower 1.0 provides a stable foundation for future growth. Compared to Flower 0.19 (and other 0.x series releases), there are a few breaking changes that make it necessary to change the code of existing 0.x-series projects. - +Flower 1.0 is here. Along with new features, Flower 1.0 provides a stable foundation for +future growth. Compared to Flower 0.19 (and other 0.x series releases), there are a few +breaking changes that make it necessary to change the code of existing 0.x-series +projects. Install update -------------- @@ -12,13 +14,15 @@ Here's how to update an existing installation to Flower 1.0 using either pip or - pip: add ``-U`` when installing. - ``python -m pip install -U flwr`` (when using ``start_server`` and ``start_client``) - - ``python -m pip install -U flwr[simulation]`` (when using ``start_simulation``) + - ``python -m pip install -U 'flwr[simulation]'`` (when using ``start_simulation``) -- Poetry: update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall (don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` before running ``poetry install``). +- Poetry: update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall (don't + forget to delete ``poetry.lock`` via ``rm poetry.lock`` before running ``poetry + install``). - ``flwr = "^1.0.0"`` (when using ``start_server`` and ``start_client``) - - ``flwr = { version = "^1.0.0", extras = ["simulation"] }`` (when using ``start_simulation``) - + - ``flwr = { version = "^1.0.0", extras = ["simulation"] }`` (when using + ``start_simulation``) Required changes ---------------- @@ -28,64 +32,96 @@ The following breaking changes require manual updates. General ~~~~~~~ -Pass all arguments as keyword arguments (not as positional arguments). Here's an example: +Pass all arguments as keyword arguments (not as positional arguments). Here's an +example: - Flower 0.19 (positional arguments): ``start_client("127.0.0.1:8080", FlowerClient())`` -- Flower 1.0 (keyword arguments): ``start_client(server_address="127.0.0.1:8080", client=FlowerClient())`` +- Flower 1.0 (keyword arguments): ``start_client(server_address="127.0.0.1:8080", + client=FlowerClient())`` Client ~~~~~~ -- Subclasses of ``NumPyClient``: change ``def get_parameters(self):``` to ``def get_parameters(self, config):`` -- Subclasses of ``Client``: change ``def get_parameters(self):``` to ``def get_parameters(self, ins: GetParametersIns):`` +- Subclasses of ``NumPyClient``: change ``def get_parameters(self):``` to ``def + get_parameters(self, config):`` +- Subclasses of ``Client``: change ``def get_parameters(self):``` to ``def + get_parameters(self, ins: GetParametersIns):`` Strategies / ``start_server`` / ``start_simulation`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- Pass ``ServerConfig`` (instead of a dictionary) to ``start_server`` and ``start_simulation``. Here's an example: +- Pass ``ServerConfig`` (instead of a dictionary) to ``start_server`` and + ``start_simulation``. Here's an example: - - Flower 0.19: ``start_server(..., config={"num_rounds": 3, "round_timeout": 600.0}, ...)`` - - Flower 1.0: ``start_server(..., config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), ...)`` + - Flower 0.19: ``start_server(..., config={"num_rounds": 3, "round_timeout": 600.0}, + ...)`` + - Flower 1.0: ``start_server(..., config=flwr.server.ServerConfig(num_rounds=3, + round_timeout=600.0), ...)`` -- Replace ``num_rounds=1`` in ``start_simulation`` with the new ``config=ServerConfig(...)`` (see previous item) -- Remove ``force_final_distributed_eval`` parameter from calls to ``start_server``. Distributed evaluation on all clients can be enabled by configuring the strategy to sample all clients for evaluation after the last round of training. +- Replace ``num_rounds=1`` in ``start_simulation`` with the new + ``config=ServerConfig(...)`` (see previous item) +- Remove ``force_final_distributed_eval`` parameter from calls to ``start_server``. + Distributed evaluation on all clients can be enabled by configuring the strategy to + sample all clients for evaluation after the last round of training. - Rename parameter/ndarray conversion functions: - ``parameters_to_weights`` --> ``parameters_to_ndarrays`` - ``weights_to_parameters`` --> ``ndarrays_to_parameters`` -- Strategy initialization: if the strategy relies on the default values for ``fraction_fit`` and ``fraction_evaluate``, set ``fraction_fit`` and ``fraction_evaluate`` manually to ``0.1``. Projects that do not manually create a strategy (by calling ``start_server`` or ``start_simulation`` without passing a strategy instance) should now manually initialize FedAvg with ``fraction_fit`` and ``fraction_evaluate`` set to ``0.1``. +- Strategy initialization: if the strategy relies on the default values for + ``fraction_fit`` and ``fraction_evaluate``, set ``fraction_fit`` and + ``fraction_evaluate`` manually to ``0.1``. Projects that do not manually create a + strategy (by calling ``start_server`` or ``start_simulation`` without passing a + strategy instance) should now manually initialize FedAvg with ``fraction_fit`` and + ``fraction_evaluate`` set to ``0.1``. - Rename built-in strategy parameters (e.g., ``FedAvg``): - ``fraction_eval`` --> ``fraction_evaluate`` - ``min_eval_clients`` --> ``min_evaluate_clients`` - ``eval_fn`` --> ``evaluate_fn`` -- Rename ``rnd`` to ``server_round``. This impacts multiple methods and functions, for example, ``configure_fit``, ``aggregate_fit``, ``configure_evaluate``, ``aggregate_evaluate``, and ``evaluate_fn``. +- Rename ``rnd`` to ``server_round``. This impacts multiple methods and functions, for + example, ``configure_fit``, ``aggregate_fit``, ``configure_evaluate``, + ``aggregate_evaluate``, and ``evaluate_fn``. - Add ``server_round`` and ``config`` to ``evaluate_fn``: - - Flower 0.19: ``def evaluate(parameters: NDArrays) -> Optional[Tuple[float, Dict[str, Scalar]]]:`` - - Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, Scalar]]]:`` + - Flower 0.19: ``def evaluate(parameters: NDArrays) -> Optional[Tuple[float, Dict[str, + Scalar]]]:`` + - Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, config: + Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, Scalar]]]:`` Custom strategies ~~~~~~~~~~~~~~~~~ -- The type of parameter ``failures`` has changed from ``List[BaseException]`` to ``List[Union[Tuple[ClientProxy, FitRes], BaseException]]`` (in ``aggregate_fit``) and ``List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]]`` (in ``aggregate_evaluate``) -- The ``Strategy`` method ``evaluate`` now receives the current round of federated learning/evaluation as the first parameter: +- The type of parameter ``failures`` has changed from ``List[BaseException]`` to + ``List[Union[Tuple[ClientProxy, FitRes], BaseException]]`` (in ``aggregate_fit``) and + ``List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]]`` (in + ``aggregate_evaluate``) +- The ``Strategy`` method ``evaluate`` now receives the current round of federated + learning/evaluation as the first parameter: - - Flower 0.19: ``def evaluate(self, parameters: Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:`` - - Flower 1.0: ``def evaluate(self, server_round: int, parameters: Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:`` + - Flower 0.19: ``def evaluate(self, parameters: Parameters) -> Optional[Tuple[float, + Dict[str, Scalar]]]:`` + - Flower 1.0: ``def evaluate(self, server_round: int, parameters: Parameters) -> + Optional[Tuple[float, Dict[str, Scalar]]]:`` Optional improvements --------------------- -Along with the necessary changes above, there are a number of potential improvements that just became possible: - -- Remove "placeholder" methods from subclasses of ``Client`` or ``NumPyClient``. If you, for example, use server-side evaluation, then empty placeholder implementations of ``evaluate`` are no longer necessary. -- Configure the round timeout via ``start_simulation``: ``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), ...)`` +Along with the necessary changes above, there are a number of potential improvements +that just became possible: +- Remove "placeholder" methods from subclasses of ``Client`` or ``NumPyClient``. If you, + for example, use server-side evaluation, then empty placeholder implementations of + ``evaluate`` are no longer necessary. +- Configure the round timeout via ``start_simulation``: ``start_simulation(..., + config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), ...)`` Further help ------------ -Most official `Flower code examples `_ are already updated to Flower 1.0, they can serve as a reference for using the Flower 1.0 API. If there are further questions, `join the Flower Slack `_ and use the channel ``#questions``. +Most official `Flower code examples +`_ are already updated to Flower 1.0, +they can serve as a reference for using the Flower 1.0 API. If there are further +questions, `join the Flower Slack `_ and use the channel +``#questions``. diff --git a/doc/source/how-to-upgrade-to-flower-next.rst b/doc/source/how-to-upgrade-to-flower-next.rst index eebe894f56ec..9a476f9865e1 100644 --- a/doc/source/how-to-upgrade-to-flower-next.rst +++ b/doc/source/how-to-upgrade-to-flower-next.rst @@ -1,11 +1,13 @@ Upgrade to Flower Next ====================== -Welcome to the migration guide for updating Flower to Flower Next! Whether you're a seasoned user -or just getting started, this guide will help you smoothly transition your existing setup to take -advantage of the latest features and improvements in Flower Next, starting from version 1.8. +Welcome to the migration guide for updating Flower to Flower Next! Whether you're a +seasoned user or just getting started, this guide will help you smoothly transition your +existing setup to take advantage of the latest features and improvements in Flower Next, +starting from version 1.8. .. note:: + This guide shows how to reuse pre-``1.8`` Flower code with minimum code changes by using the *compatibility layer* in Flower Next. In another guide, we will show how to run Flower Next end-to-end with pure Flower Next APIs. @@ -18,26 +20,44 @@ Let's dive in! - https://github.com/jgm/pandoc/issues/3973#issuecomment-337087394 .. |clientapp_link| replace:: ``ClientApp()`` + .. |serverapp_link| replace:: ``ServerApp()`` + .. |startclient_link| replace:: ``start_client()`` + .. |startserver_link| replace:: ``start_server()`` + .. |startsim_link| replace:: ``start_simulation()`` + .. |runsim_link| replace:: ``run_simulation()`` + .. |flowernext_superlink_link| replace:: ``flower-superlink`` + .. |flowernext_clientapp_link| replace:: ``flower-client-app`` + .. |flowernext_serverapp_link| replace:: ``flower-server-app`` + .. |flower_simulation_link| replace:: ``flower-simulation`` + .. _clientapp_link: ref-api/flwr.client.ClientApp.html + +.. _flower_simulation_link: ref-api-cli.html#flower-simulation + +.. _flowernext_clientapp_link: ref-api-cli.html#flower-client-app + +.. _flowernext_serverapp_link: ref-api-cli.html#flower-server-app + +.. _flowernext_superlink_link: ref-api-cli.html#flower-superlink + +.. _runsim_link: ref-api/flwr.simulation.run_simulation.html + .. _serverapp_link: ref-api/flwr.server.ServerApp.html + .. _startclient_link: ref-api/flwr.client.start_client.html + .. _startserver_link: ref-api/flwr.server.start_server.html -.. _startsim_link: ref-api/flwr.simulation.start_simulation.html -.. _runsim_link: ref-api/flwr.simulation.run_simulation.html -.. _flowernext_superlink_link: ref-api-cli.html#flower-superlink -.. _flowernext_clientapp_link: ref-api-cli.html#flower-client-app -.. _flowernext_serverapp_link: ref-api-cli.html#flower-server-app -.. _flower_simulation_link: ref-api-cli.html#flower-simulation +.. _startsim_link: ref-api/flwr.simulation.start_simulation.html Install update -------------- @@ -48,19 +68,18 @@ Using pip Here's how to update an existing installation of Flower to Flower Next with ``pip``: .. code-block:: bash - + $ python -m pip install -U flwr or if you need Flower Next with simulation: .. code-block:: bash - - $ python -m pip install -U flwr[simulation] + $ python -m pip install -U "flwr[simulation]" Ensure you set the following version constraint in your ``requirements.txt`` -.. code-block:: +.. code-block:: # Without simulation support flwr>=1.8,<2.0 @@ -81,32 +100,37 @@ or ``pyproject.toml``: Using Poetry ~~~~~~~~~~~~ -Update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall (don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` before running ``poetry install``). +Update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall (don't forget to +delete ``poetry.lock`` via ``rm poetry.lock`` before running ``poetry install``). Ensure you set the following version constraint in your ``pyproject.toml``: .. code-block:: toml + :substitutions: - [tool.poetry.dependencies] - python = "^3.8" + [tool.poetry.dependencies] + python = "^|python_version|" - # Without simulation support - flwr = ">=1.8,<2.0" + # Without simulation support + flwr = ">=1.8,<2.0" - # With simulation support - flwr = { version = ">=1.8,<2.0", extras = ["simulation"] } + # With simulation support + flwr = { version = ">=1.8,<2.0", extras = ["simulation"] } Required changes ---------------- In Flower Next, the *infrastructure* and *application layers* have been decoupled. -Instead of starting a client in code via ``start_client()``, you create a |clientapp_link|_ and start it via the command line. -Instead of starting a server in code via ``start_server()``, you create a |serverapp_link|_ and start it via the command line. -The long-running components of server and client are called SuperLink and SuperNode. -The following non-breaking changes that require manual updates and allow you to run your project both in the traditional way and in the Flower Next way: +Instead of starting a client in code via ``start_client()``, you create a +|clientapp_link|_ and start it via the command line. Instead of starting a server in +code via ``start_server()``, you create a |serverapp_link|_ and start it via the command +line. The long-running components of server and client are called SuperLink and +SuperNode. The following non-breaking changes that require manual updates and allow you +to run your project both in the traditional way and in the Flower Next way: |clientapp_link|_ ~~~~~~~~~~~~~~~~~ + - Wrap your existing client with |clientapp_link|_ instead of launching it via |startclient_link|_. Here's an example: @@ -115,23 +139,25 @@ The following non-breaking changes that require manual updates and allow you to # Flower 1.8 def client_fn(cid: str): - return flwr.client.FlowerClient().to_client() - + return flwr.client.FlowerClient().to_client() + + app = flwr.client.ClientApp( - client_fn=client_fn, + client_fn=client_fn, ) # Flower 1.7 if __name__ == "__main__": flwr.client.start_client( - server_address="127.0.0.1:8080", - client=flwr.client.FlowerClient().to_client(), + server_address="127.0.0.1:8080", + client=flwr.client.FlowerClient().to_client(), ) |serverapp_link|_ ~~~~~~~~~~~~~~~~~ -- Wrap your existing strategy with |serverapp_link|_ instead of starting the server - via |startserver_link|_. Here's an example: + +- Wrap your existing strategy with |serverapp_link|_ instead of starting the server via + |startserver_link|_. Here's an example: .. code-block:: python :emphasize-lines: 2,9 @@ -152,13 +178,14 @@ The following non-breaking changes that require manual updates and allow you to Deployment ~~~~~~~~~~ + - Run the ``SuperLink`` using |flowernext_superlink_link|_ before running, in sequence, - |flowernext_clientapp_link|_ (2x) and |flowernext_serverapp_link|_. There is no need to - execute `client.py` and `server.py` as Python scripts. + |flowernext_clientapp_link|_ (2x) and |flowernext_serverapp_link|_. There is no need + to execute `client.py` and `server.py` as Python scripts. - Here's an example to start the server without HTTPS (only for prototyping): .. code-block:: bash - + # Start a Superlink $ flower-superlink --insecure @@ -171,8 +198,9 @@ Deployment # In yet another terminal window, run the ServerApp (this starts the actual training run) $ flower-server-app server:app --insecure -- Here's another example to start with HTTPS. Use the ``--ssl-ca-certfile``, ``--ssl-certfile``, and ``--ssl-keyfile`` command line - options to pass paths to (CA certificate, server certificate, and server private key). +- Here's another example to start with HTTPS. Use the ``--ssl-ca-certfile``, + ``--ssl-certfile``, and ``--ssl-keyfile`` command line options to pass paths to (CA + certificate, server certificate, and server private key). .. code-block:: bash @@ -185,20 +213,21 @@ Deployment # In a new terminal window, start a long-running secure SuperNode $ flower-client-app client:app \ --root-certificates \ - --server 127.0.0.1:9092 + --superlink 127.0.0.1:9092 # In another terminal window, start another long-running secure SuperNode (at least 2 SuperNodes are required) $ flower-client-app client:app \ --root-certificates \ - --server 127.0.0.1:9092 + --superlink 127.0.0.1:9092 # In yet another terminal window, run the ServerApp (this starts the actual training run) $ flower-server-app server:app \ --root-certificates \ - --server 127.0.0.1:9091 + --superlink 127.0.0.1:9091 Simulation in CLI ~~~~~~~~~~~~~~~~~ + - Wrap your existing client and strategy with |clientapp_link|_ and |serverapp_link|_, respectively. There is no need to use |startsim_link|_ anymore. Here's an example: @@ -208,13 +237,16 @@ Simulation in CLI # Regular Flower client implementation class FlowerClient(NumPyClient): # ... + pass + # Flower 1.8 def client_fn(cid: str): - return FlowerClient().to_client() - + return FlowerClient().to_client() + + client_app = flwr.client.ClientApp( - client_fn=client_fn, + client_fn=client_fn, ) server_app = flwr.server.ServerApp( @@ -226,12 +258,12 @@ Simulation in CLI if __name__ == "__main__": hist = flwr.simulation.start_simulation( num_clients=100, - ... + # ... ) -- Run |flower_simulation_link|_ in CLI and point to the ``server_app`` / ``client_app`` object in the - code instead of executing the Python script. Here's an example (assuming the - ``server_app`` and ``client_app`` objects are in a ``sim.py`` module): +- Run |flower_simulation_link|_ in CLI and point to the ``server_app`` / ``client_app`` + object in the code instead of executing the Python script. Here's an example (assuming + the ``server_app`` and ``client_app`` objects are in a ``sim.py`` module): .. code-block:: bash @@ -246,8 +278,8 @@ Simulation in CLI # Flower 1.7 $ python sim.py -- Set default resources for each |clientapp_link|_ using the ``--backend-config`` command - line argument instead of setting the ``client_resources`` argument in +- Set default resources for each |clientapp_link|_ using the ``--backend-config`` + command line argument instead of setting the ``client_resources`` argument in |startsim_link|_. Here's an example: .. code-block:: bash @@ -266,26 +298,27 @@ Simulation in CLI # Flower 1.7 (in `sim.py`) if __name__ == "__main__": hist = flwr.simulation.start_simulation( - num_clients=100, - client_resources = {'num_cpus': 2, "num_gpus": 0.25}, - ... + num_clients=100, client_resources={"num_cpus": 2, "num_gpus": 0.25}, ... ) Simulation in a Notebook ~~~~~~~~~~~~~~~~~~~~~~~~ + - Run |runsim_link|_ in your notebook instead of |startsim_link|_. Here's an example: .. code-block:: python :emphasize-lines: 19,27 - NUM_CLIENTS = + NUM_CLIENTS = 10 # Replace by any integer greater than zero + def client_fn(cid: str): # ... - return FlowerClient().to_client() - + return FlowerClient().to_client() + + client_app = flwr.client.ClientApp( - client_fn=client_fn, + client_fn=client_fn, ) server_app = flwr.server.ServerApp( @@ -297,7 +330,7 @@ Simulation in a Notebook # Flower 1.8 flwr.simulation.run_simulation( - server_app=server_app, + server_app=server_app, client_app=client_app, num_supernodes=NUM_CLIENTS, backend_config=backend_config, @@ -312,18 +345,17 @@ Simulation in a Notebook client_resources=backend_config["client_resources"], ) - Further help ------------ Some official `Flower code examples `_ are already -updated to Flower Next so they can serve as a reference for using the Flower Next API. If there are -further questions, `join the Flower Slack `_ and use the channel ``#questions``. -You can also `participate in Flower Discuss `_ where you can find us -answering questions, or share and learn from others about migrating to Flower Next. +updated to Flower Next so they can serve as a reference for using the Flower Next API. +If there are further questions, `join the Flower Slack `_ +and use the channel ``#questions``. You can also `participate in Flower Discuss +`_ where you can find us answering questions, or share and +learn from others about migrating to Flower Next. .. admonition:: Important - :class: important As we continuously enhance Flower Next at a rapid pace, we'll be periodically updating this guide. Please feel free to share any feedback with us! diff --git a/doc/source/how-to-use-built-in-mods.rst b/doc/source/how-to-use-built-in-mods.rst index 341139175074..970b2055ec23 100644 --- a/doc/source/how-to-use-built-in-mods.rst +++ b/doc/source/how-to-use-built-in-mods.rst @@ -1,14 +1,19 @@ Use Built-in Mods ================= -**Note: This tutorial covers experimental features. The functionality and interfaces may change in future versions.** +**Note: This tutorial covers experimental features. The functionality and interfaces may +change in future versions.** -In this tutorial, we will learn how to utilize built-in mods to augment the behavior of a ``ClientApp``. Mods (sometimes also called Modifiers) allow us to perform operations before and after a task is processed in the ``ClientApp``. +In this tutorial, we will learn how to utilize built-in mods to augment the behavior of +a ``ClientApp``. Mods (sometimes also called Modifiers) allow us to perform operations +before and after a task is processed in the ``ClientApp``. What are Mods? -------------- -A Mod is a callable that wraps around a ``ClientApp``. It can manipulate or inspect the incoming ``Message`` and the resulting outgoing ``Message``. The signature for a ``Mod`` is as follows: +A Mod is a callable that wraps around a ``ClientApp``. It can manipulate or inspect the +incoming ``Message`` and the resulting outgoing ``Message``. The signature for a ``Mod`` +is as follows: .. code-block:: python @@ -51,12 +56,13 @@ Define your client function (``client_fn``) that will be wrapped by the mod(s): def client_fn(cid): # Your client code goes here. - return # your client + return # your client 3. Create the ``ClientApp`` with mods -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Create your ``ClientApp`` and pass the mods as a list to the ``mods`` argument. The order in which you provide the mods matters: +Create your ``ClientApp`` and pass the mods as a list to the ``mods`` argument. The +order in which you provide the mods matters: .. code-block:: python @@ -65,25 +71,31 @@ Create your ``ClientApp`` and pass the mods as a list to the ``mods`` argument. mods=[ example_mod_1, # Mod 1 example_mod_2, # Mod 2 - ] + ], ) Order of execution ------------------ -When the ``ClientApp`` runs, the mods are executed in the order they are provided in the list: +When the ``ClientApp`` runs, the mods are executed in the order they are provided in the +list: 1. ``example_mod_1`` (outermost mod) 2. ``example_mod_2`` (next mod) -3. Message handler (core function that handles the incoming ``Message`` and returns the outgoing ``Message``) +3. Message handler (core function that handles the incoming ``Message`` and returns the + outgoing ``Message``) 4. ``example_mod_2`` (on the way back) 5. ``example_mod_1`` (outermost mod on the way back) -Each mod has a chance to inspect and modify the incoming ``Message`` before passing it to the next mod, and likewise with the outgoing ``Message`` before returning it up the stack. +Each mod has a chance to inspect and modify the incoming ``Message`` before passing it +to the next mod, and likewise with the outgoing ``Message`` before returning it up the +stack. Conclusion ---------- -By following this guide, you have learned how to effectively use mods to enhance your ``ClientApp``'s functionality. Remember that the order of mods is crucial and affects how the input and output are processed. +By following this guide, you have learned how to effectively use mods to enhance your +``ClientApp``'s functionality. Remember that the order of mods is crucial and affects +how the input and output are processed. Enjoy building a more robust and flexible ``ClientApp`` with mods! diff --git a/doc/source/how-to-use-differential-privacy.rst b/doc/source/how-to-use-differential-privacy.rst index c8901bd906cc..67e54271bb2e 100644 --- a/doc/source/how-to-use-differential-privacy.rst +++ b/doc/source/how-to-use-differential-privacy.rst @@ -1,126 +1,151 @@ Use Differential Privacy ------------------------- -This guide explains how you can utilize differential privacy in the Flower framework. If you are not yet familiar with differential privacy, you can refer to :doc:`explanation-differential-privacy`. +======================== -.. warning:: +This guide explains how you can utilize differential privacy in the Flower framework. If +you are not yet familiar with differential privacy, you can refer to +:doc:`explanation-differential-privacy`. - Differential Privacy in Flower is in a preview phase. If you plan to use these features in a production environment with sensitive data, feel free contact us to discuss your requirements and to receive guidance on how to best use these features. +.. warning:: + Differential Privacy in Flower is in a preview phase. If you plan to use these + features in a production environment with sensitive data, feel free contact us to + discuss your requirements and to receive guidance on how to best use these features. Central Differential Privacy -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -This approach consists of two seprate phases: clipping of the updates and adding noise to the aggregated model. -For the clipping phase, Flower framework has made it possible to decide whether to perform clipping on the server side or the client side. - -- **Server-side Clipping**: This approach has the advantage of the server enforcing uniform clipping across all clients' updates and reducing the communication overhead for clipping values. However, it also has the disadvantage of increasing the computational load on the server due to the need to perform the clipping operation for all clients. -- **Client-side Clipping**: This approach has the advantage of reducing the computational overhead on the server. However, it also has the disadvantage of lacking centralized control, as the server has less control over the clipping process. +---------------------------- +This approach consists of two separate phases: clipping of the updates and adding noise +to the aggregated model. For the clipping phase, Flower framework has made it possible +to decide whether to perform clipping on the server side or the client side. +- **Server-side Clipping**: This approach has the advantage of the server enforcing + uniform clipping across all clients' updates and reducing the communication overhead + for clipping values. However, it also has the disadvantage of increasing the + computational load on the server due to the need to perform the clipping operation for + all clients. +- **Client-side Clipping**: This approach has the advantage of reducing the + computational overhead on the server. However, it also has the disadvantage of lacking + centralized control, as the server has less control over the clipping process. Server-side Clipping -^^^^^^^^^^^^^^^^^^^^ -For central DP with server-side clipping, there are two :code:`Strategy` classes that act as wrappers around the actual :code:`Strategy` instance (for example, :code:`FedAvg`). -The two wrapper classes are :code:`DifferentialPrivacyServerSideFixedClipping` and :code:`DifferentialPrivacyServerSideAdaptiveClipping` for fixed and adaptive clipping. +~~~~~~~~~~~~~~~~~~~~ -.. image:: ./_static/DP/serversideCDP.png - :align: center - :width: 700 - :alt: server side clipping +For central DP with server-side clipping, there are two ``Strategy`` classes that act as +wrappers around the actual ``Strategy`` instance (for example, ``FedAvg``). The two +wrapper classes are ``DifferentialPrivacyServerSideFixedClipping`` and +``DifferentialPrivacyServerSideAdaptiveClipping`` for fixed and adaptive clipping. +.. image:: ./_static/DP/serversideCDP.png + :align: center + :width: 700 + :alt: server side clipping -The code sample below enables the :code:`FedAvg` strategy to use server-side fixed clipping using the :code:`DifferentialPrivacyServerSideFixedClipping` wrapper class. -The same approach can be used with :code:`DifferentialPrivacyServerSideAdaptiveClipping` by adjusting the corresponding input parameters. +The code sample below enables the ``FedAvg`` strategy to use server-side fixed clipping +using the ``DifferentialPrivacyServerSideFixedClipping`` wrapper class. The same +approach can be used with ``DifferentialPrivacyServerSideAdaptiveClipping`` by adjusting +the corresponding input parameters. .. code-block:: python - from flwr.server.strategy import DifferentialPrivacyClientSideFixedClipping - - # Create the strategy - strategy = fl.server.strategy.FedAvg(...) - - # Wrap the strategy with the DifferentialPrivacyServerSideFixedClipping wrapper - dp_strategy = DifferentialPrivacyServerSideFixedClipping( - strategy, - cfg.noise_multiplier, - cfg.clipping_norm, - cfg.num_sampled_clients, - ) + from flwr.server.strategy import DifferentialPrivacyClientSideFixedClipping + # Create the strategy + strategy = fl.server.strategy.FedAvg(...) + # Wrap the strategy with the DifferentialPrivacyServerSideFixedClipping wrapper + dp_strategy = DifferentialPrivacyServerSideFixedClipping( + strategy, + cfg.noise_multiplier, + cfg.clipping_norm, + cfg.num_sampled_clients, + ) Client-side Clipping -^^^^^^^^^^^^^^^^^^^^ -For central DP with client-side clipping, the server sends the clipping value to selected clients on each round. -Clients can use existing Flower :code:`Mods` to perform the clipping. -Two mods are available for fixed and adaptive client-side clipping: :code:`fixedclipping_mod` and :code:`adaptiveclipping_mod` with corresponding server-side wrappers :code:`DifferentialPrivacyClientSideFixedClipping` and :code:`DifferentialPrivacyClientSideAdaptiveClipping`. +~~~~~~~~~~~~~~~~~~~~ -.. image:: ./_static/DP/clientsideCDP.png - :align: center - :width: 800 - :alt: client side clipping +For central DP with client-side clipping, the server sends the clipping value to +selected clients on each round. Clients can use existing Flower ``Mods`` to perform the +clipping. Two mods are available for fixed and adaptive client-side clipping: +``fixedclipping_mod`` and ``adaptiveclipping_mod`` with corresponding server-side +wrappers ``DifferentialPrivacyClientSideFixedClipping`` and +``DifferentialPrivacyClientSideAdaptiveClipping``. +.. image:: ./_static/DP/clientsideCDP.png + :align: center + :width: 800 + :alt: client side clipping -The code sample below enables the :code:`FedAvg` strategy to use differential privacy with client-side fixed clipping using both the :code:`DifferentialPrivacyClientSideFixedClipping` wrapper class and, on the client, :code:`fixedclipping_mod`: +The code sample below enables the ``FedAvg`` strategy to use differential privacy with +client-side fixed clipping using both the ``DifferentialPrivacyClientSideFixedClipping`` +wrapper class and, on the client, ``fixedclipping_mod``: .. code-block:: python - from flwr.server.strategy import DifferentialPrivacyClientSideFixedClipping + from flwr.server.strategy import DifferentialPrivacyClientSideFixedClipping - # Create the strategy - strategy = fl.server.strategy.FedAvg(...) + # Create the strategy + strategy = fl.server.strategy.FedAvg(...) - # Wrap the strategy with the DifferentialPrivacyClientSideFixedClipping wrapper - dp_strategy = DifferentialPrivacyClientSideFixedClipping( - strategy, - cfg.noise_multiplier, - cfg.clipping_norm, - cfg.num_sampled_clients, - ) + # Wrap the strategy with the DifferentialPrivacyClientSideFixedClipping wrapper + dp_strategy = DifferentialPrivacyClientSideFixedClipping( + strategy, + cfg.noise_multiplier, + cfg.clipping_norm, + cfg.num_sampled_clients, + ) -In addition to the server-side strategy wrapper, the :code:`ClientApp` needs to configure the matching :code:`fixedclipping_mod` to perform the client-side clipping: +In addition to the server-side strategy wrapper, the ``ClientApp`` needs to configure +the matching ``fixedclipping_mod`` to perform the client-side clipping: .. code-block:: python - from flwr.client.mod import fixedclipping_mod - - # Add fixedclipping_mod to the client-side mods - app = fl.client.ClientApp( - client_fn=client_fn, - mods=[ - fixedclipping_mod, - ] - ) + from flwr.client.mod import fixedclipping_mod + # Add fixedclipping_mod to the client-side mods + app = fl.client.ClientApp( + client_fn=client_fn, + mods=[ + fixedclipping_mod, + ], + ) Local Differential Privacy -~~~~~~~~~~~~~~~~~~~~~~~~~~ -To utilize local differential privacy (DP) and add noise to the client model parameters before transmitting them to the server in Flower, you can use the `LocalDpMod`. The following hyperparameters need to be set: clipping norm value, sensitivity, epsilon, and delta. +-------------------------- + +To utilize local differential privacy (DP) and add noise to the client model parameters +before transmitting them to the server in Flower, you can use the `LocalDpMod`. The +following hyperparameters need to be set: clipping norm value, sensitivity, epsilon, and +delta. .. image:: ./_static/DP/localdp.png - :align: center - :width: 700 - :alt: local DP mod + :align: center + :width: 700 + :alt: local DP mod -Below is a code example that shows how to use :code:`LocalDpMod`: +Below is a code example that shows how to use ``LocalDpMod``: .. code-block:: python - from flwr.client.mod.localdp_mod import LocalDpMod - - # Create an instance of the mod with the required params - local_dp_obj = LocalDpMod( - cfg.clipping_norm, cfg.sensitivity, cfg.epsilon, cfg.delta - ) - # Add local_dp_obj to the client-side mods + from flwr.client.mod.localdp_mod import LocalDpMod - app = fl.client.ClientApp( - client_fn=client_fn, - mods=[local_dp_obj], - ) + # Create an instance of the mod with the required params + local_dp_obj = LocalDpMod(cfg.clipping_norm, cfg.sensitivity, cfg.epsilon, cfg.delta) + # Add local_dp_obj to the client-side mods + app = fl.client.ClientApp( + client_fn=client_fn, + mods=[local_dp_obj], + ) -Please note that the order of mods, especially those that modify parameters, is important when using multiple modifiers. Typically, differential privacy (DP) modifiers should be the last to operate on parameters. +Please note that the order of mods, especially those that modify parameters, is +important when using multiple modifiers. Typically, differential privacy (DP) modifiers +should be the last to operate on parameters. Local Training using Privacy Engines -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -For ensuring data instance-level privacy during local model training on the client side, consider leveraging privacy engines such as Opacus and TensorFlow Privacy. For examples of using Flower with these engines, please refer to the Flower examples directory (`Opacus `_, `Tensorflow Privacy `_). \ No newline at end of file +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +For ensuring data instance-level privacy during local model training on the client side, +consider leveraging privacy engines such as Opacus and TensorFlow Privacy. For examples +of using Flower with these engines, please refer to the Flower examples directory +(`Opacus `_, `Tensorflow +Privacy `_). diff --git a/doc/source/how-to-use-strategies.rst b/doc/source/how-to-use-strategies.rst index d0e2cd63a091..b4803c6059b3 100644 --- a/doc/source/how-to-use-strategies.rst +++ b/doc/source/how-to-use-strategies.rst @@ -1,19 +1,21 @@ Use strategies ============== -Flower allows full customization of the learning process through the :code:`Strategy` abstraction. A number of built-in strategies are provided in the core framework. +Flower allows full customization of the learning process through the ``Strategy`` +abstraction. A number of built-in strategies are provided in the core framework. -There are three ways to customize the way Flower orchestrates the learning process on the server side: - -* Use an existing strategy, for example, :code:`FedAvg` -* Customize an existing strategy with callback functions -* Implement a novel strategy +There are three ways to customize the way Flower orchestrates the learning process on +the server side: +- Use an existing strategy, for example, ``FedAvg`` +- Customize an existing strategy with callback functions +- Implement a novel strategy Use an existing strategy ------------------------ -Flower comes with a number of popular federated learning strategies built-in. A built-in strategy can be instantiated as follows: +Flower comes with a number of popular federated learning strategies built-in. A built-in +strategy can be instantiated as follows: .. code-block:: python @@ -22,7 +24,9 @@ Flower comes with a number of popular federated learning strategies built-in. A strategy = fl.server.strategy.FedAvg() fl.server.start_server(config=fl.server.ServerConfig(num_rounds=3), strategy=strategy) -This creates a strategy with all parameters left at their default values and passes it to the :code:`start_server` function. It is usually recommended to adjust a few parameters during instantiation: +This creates a strategy with all parameters left at their default values and passes it +to the ``start_server`` function. It is usually recommended to adjust a few parameters +during instantiation: .. code-block:: python @@ -35,22 +39,26 @@ This creates a strategy with all parameters left at their default values and pas ) fl.server.start_server(config=fl.server.ServerConfig(num_rounds=3), strategy=strategy) - Customize an existing strategy with callback functions ------------------------------------------------------ -Existing strategies provide several ways to customize their behaviour. Callback functions allow strategies to call user-provided code during execution. +Existing strategies provide several ways to customize their behaviour. Callback +functions allow strategies to call user-provided code during execution. Configuring client fit and client evaluate ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The server can pass new configuration values to the client each round by providing a function to :code:`on_fit_config_fn`. The provided function will be called by the strategy and must return a dictionary of configuration key values pairs that will be sent to the client. -It must return a dictionary of arbitrary configuration values :code:`client.fit` and :code:`client.evaluate` functions during each round of federated learning. +The server can pass new configuration values to the client each round by providing a +function to ``on_fit_config_fn``. The provided function will be called by the strategy +and must return a dictionary of configuration key values pairs that will be sent to the +client. It must return a dictionary of arbitrary configuration values ``client.fit`` and +``client.evaluate`` functions during each round of federated learning. .. code-block:: python import flwr as fl + def get_on_fit_config_fn() -> Callable[[int], Dict[str, str]]: """Return a function which returns training configurations.""" @@ -64,6 +72,7 @@ It must return a dictionary of arbitrary configuration values :code:`client.fit return fit_config + strategy = fl.server.strategy.FedAvg( fraction_fit=0.1, min_fit_clients=10, @@ -72,18 +81,23 @@ It must return a dictionary of arbitrary configuration values :code:`client.fit ) fl.server.start_server(config=fl.server.ServerConfig(num_rounds=3), strategy=strategy) -The :code:`on_fit_config_fn` can be used to pass arbitrary configuration values from server to client, and poetentially change these values each round, for example, to adjust the learning rate. -The client will receive the dictionary returned by the :code:`on_fit_config_fn` in its own :code:`client.fit()` function. +The ``on_fit_config_fn`` can be used to pass arbitrary configuration values from server +to client, and potentially change these values each round, for example, to adjust the +learning rate. The client will receive the dictionary returned by the +``on_fit_config_fn`` in its own ``client.fit()`` function. -Similar to :code:`on_fit_config_fn`, there is also :code:`on_evaluate_config_fn` to customize the configuration sent to :code:`client.evaluate()` +Similar to ``on_fit_config_fn``, there is also ``on_evaluate_config_fn`` to customize +the configuration sent to ``client.evaluate()`` Configuring server-side evaluation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Server-side evaluation can be enabled by passing an evaluation function to :code:`evaluate_fn`. - +Server-side evaluation can be enabled by passing an evaluation function to +``evaluate_fn``. Implement a novel strategy -------------------------- -Writing a fully custom strategy is a bit more involved, but it provides the most flexibility. Read the `Implementing Strategies `_ guide to learn more. +Writing a fully custom strategy is a bit more involved, but it provides the most +flexibility. Read the `Implementing Strategies `_ +guide to learn more. diff --git a/doc/source/index.rst b/doc/source/index.rst index df41d9d4ccb0..197599d595a8 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -2,15 +2,16 @@ Flower Framework Documentation ============================== .. meta:: - :description: Check out the documentation of the main Flower Framework enabling easy Python development for Federated Learning. - -Welcome to Flower's documentation. `Flower `_ is a friendly federated learning framework. + :description: Check out the documentation of the main Flower Framework enabling easy Python development for Federated Learning. +Welcome to Flower's documentation. `Flower `_ is a friendly federated +learning framework. Join the Flower Community ------------------------- -The Flower Community is growing quickly - we're a friendly group of researchers, engineers, students, professionals, academics, and other enthusiasts. +The Flower Community is growing quickly - we're a friendly group of researchers, +engineers, students, professionals, academics, and other enthusiasts. .. button-link:: https://flower.ai/join-slack :color: primary @@ -18,13 +19,12 @@ The Flower Community is growing quickly - we're a friendly group of researchers, Join us on Slack - Flower Framework ---------------- -The user guide is targeted at researchers and developers who want to use Flower -to bring existing machine learning workloads into a federated setting. One of -Flower's design goals was to make this simple. Read on to learn more. +The user guide is targeted at researchers and developers who want to use Flower to bring +existing machine learning workloads into a federated setting. One of Flower's design +goals was to make this simple. Read on to learn more. Tutorials ~~~~~~~~~ @@ -32,43 +32,50 @@ Tutorials A learning-oriented series of federated learning tutorials, the best place to start. .. toctree:: - :maxdepth: 1 - :caption: Tutorial + :maxdepth: 1 + :caption: Tutorial - tutorial-series-what-is-federated-learning - tutorial-series-get-started-with-flower-pytorch - tutorial-series-use-a-federated-learning-strategy-pytorch - tutorial-series-build-a-strategy-from-scratch-pytorch - tutorial-series-customize-the-client-pytorch + tutorial-series-what-is-federated-learning + tutorial-series-get-started-with-flower-pytorch + tutorial-series-use-a-federated-learning-strategy-pytorch + tutorial-series-build-a-strategy-from-scratch-pytorch + tutorial-series-customize-the-client-pytorch .. toctree:: - :maxdepth: 1 - :caption: Quickstart tutorials - :hidden: - - tutorial-quickstart-pytorch - tutorial-quickstart-tensorflow - tutorial-quickstart-huggingface - tutorial-quickstart-jax - tutorial-quickstart-pandas - tutorial-quickstart-fastai - tutorial-quickstart-pytorch-lightning - tutorial-quickstart-scikitlearn - tutorial-quickstart-xgboost - tutorial-quickstart-android - tutorial-quickstart-ios - -QUICKSTART TUTORIALS: :doc:`PyTorch ` | :doc:`TensorFlow ` | :doc:`🤗 Transformers ` | :doc:`JAX ` | :doc:`Pandas ` | :doc:`fastai ` | :doc:`PyTorch Lightning ` | :doc:`scikit-learn ` | :doc:`XGBoost ` | :doc:`Android ` | :doc:`iOS ` + :maxdepth: 1 + :caption: Quickstart tutorials + :hidden: + + tutorial-quickstart-pytorch + tutorial-quickstart-tensorflow + tutorial-quickstart-mlx + tutorial-quickstart-huggingface + tutorial-quickstart-jax + tutorial-quickstart-pandas + tutorial-quickstart-fastai + tutorial-quickstart-pytorch-lightning + tutorial-quickstart-scikitlearn + tutorial-quickstart-xgboost + tutorial-quickstart-android + tutorial-quickstart-ios + +QUICKSTART TUTORIALS: :doc:`PyTorch ` | :doc:`TensorFlow +` | :doc:`MLX ` | :doc:`🤗 +Transformers ` | :doc:`JAX ` | +:doc:`Pandas ` | :doc:`fastai ` +| :doc:`PyTorch Lightning ` | :doc:`scikit-learn +` | :doc:`XGBoost ` | +:doc:`Android ` | :doc:`iOS ` We also made video tutorials for PyTorch: -.. youtube:: jOmmuzMIQ4c - :width: 80% +.. youtube:: jOmmuzMIQ4c + :width: 80% And TensorFlow: -.. youtube:: FGTc2TQq7VM - :width: 80% +.. youtube:: FGTc2TQq7VM + :width: 80% How-to guides ~~~~~~~~~~~~~ @@ -76,45 +83,46 @@ How-to guides Problem-oriented how-to guides show step-by-step how to achieve a specific goal. .. toctree:: - :maxdepth: 1 - :caption: How-to guides - - how-to-install-flower - how-to-configure-clients - how-to-use-strategies - how-to-implement-strategies - how-to-aggregate-evaluation-results - how-to-save-and-load-model-checkpoints - how-to-run-simulations - how-to-monitor-simulation - how-to-configure-logging - how-to-enable-ssl-connections - how-to-use-built-in-mods - how-to-use-differential-privacy - how-to-authenticate-supernodes - how-to-run-flower-using-docker - how-to-upgrade-to-flower-1.0 - how-to-upgrade-to-flower-next + :maxdepth: 1 + :caption: How-to guides + + how-to-install-flower + how-to-configure-clients + how-to-use-strategies + how-to-implement-strategies + how-to-aggregate-evaluation-results + how-to-save-and-load-model-checkpoints + how-to-run-simulations + how-to-monitor-simulation + how-to-configure-logging + how-to-enable-ssl-connections + how-to-use-built-in-mods + how-to-use-differential-privacy + how-to-authenticate-supernodes + docker/index + how-to-upgrade-to-flower-1.0 + how-to-upgrade-to-flower-next .. toctree:: - :maxdepth: 1 - :caption: Legacy example guides + :maxdepth: 1 + :caption: Legacy example guides - example-pytorch-from-centralized-to-federated - example-jax-from-centralized-to-federated - example-fedbn-pytorch-from-centralized-to-federated + example-pytorch-from-centralized-to-federated + example-fedbn-pytorch-from-centralized-to-federated Explanations ~~~~~~~~~~~~ -Understanding-oriented concept guides explain and discuss key topics and underlying ideas behind Flower and collaborative AI. +Understanding-oriented concept guides explain and discuss key topics and underlying +ideas behind Flower and collaborative AI. .. toctree:: - :maxdepth: 1 - :caption: Explanations + :maxdepth: 1 + :caption: Explanations - explanation-federated-evaluation - explanation-differential-privacy + explanation-federated-evaluation + explanation-differential-privacy + explanation-flower-architecture References ~~~~~~~~~~ @@ -122,72 +130,77 @@ References Information-oriented API reference and other reference material. .. autosummary:: - :toctree: ref-api - :template: autosummary/module.rst - :caption: API reference - :recursive: + :toctree: ref-api + :template: autosummary/module.rst + :caption: API reference + :recursive: - flwr + flwr .. toctree:: - :maxdepth: 2 + :maxdepth: 2 - ref-api-cli + ref-api-cli .. toctree:: - :maxdepth: 1 - :caption: Reference docs - - ref-example-projects - ref-telemetry - ref-changelog - ref-faq + :maxdepth: 1 + :caption: Reference docs + ref-example-projects + ref-telemetry + ref-changelog + ref-faq Contributor docs ---------------- -The Flower community welcomes contributions. The following docs are intended to help along the way. - +The Flower community welcomes contributions. The following docs are intended to help +along the way. .. toctree:: - :maxdepth: 1 - :caption: Contributor tutorials + :maxdepth: 1 + :caption: Contributor tutorials - contributor-tutorial-contribute-on-github - contributor-tutorial-get-started-as-a-contributor + contributor-tutorial-contribute-on-github + contributor-tutorial-get-started-as-a-contributor .. toctree:: - :maxdepth: 1 - :caption: Contributor how-to guides - - contributor-how-to-install-development-versions - contributor-how-to-set-up-a-virtual-env - contributor-how-to-develop-in-vscode-dev-containers - contributor-how-to-create-new-messages - contributor-how-to-write-documentation - contributor-how-to-release-flower - contributor-how-to-contribute-translations - contributor-how-to-build-docker-images + :maxdepth: 1 + :caption: Contributor how-to guides + + contributor-how-to-install-development-versions + contributor-how-to-set-up-a-virtual-env + contributor-how-to-develop-in-vscode-dev-containers + contributor-how-to-write-documentation + contributor-how-to-release-flower + contributor-how-to-contribute-translations + contributor-how-to-build-docker-images .. toctree:: - :maxdepth: 1 - :caption: Contributor explanations + :maxdepth: 1 + :caption: Contributor explanations - contributor-explanation-architecture + contributor-explanation-public-and-private-apis .. toctree:: - :maxdepth: 1 - :caption: Contributor references + :maxdepth: 1 + :caption: Contributor references + + fed/index + contributor-ref-good-first-contributions + contributor-ref-secure-aggregation-protocols + +.. + Indices and tables - fed/index - contributor-ref-good-first-contributions - contributor-ref-secure-aggregation-protocols +.. + ------------------ +.. + * :ref:`genindex` -.. Indices and tables -.. ------------------ +.. + * :ref:`modindex` -.. * :ref:`genindex` -.. * :ref:`modindex` -.. * :ref:`search` +.. + * :ref:`search` diff --git a/doc/source/ref-api-cli.rst b/doc/source/ref-api-cli.rst index 296c2219a065..e95132bbadba 100644 --- a/doc/source/ref-api-cli.rst +++ b/doc/source/ref-api-cli.rst @@ -1,42 +1,69 @@ Flower CLI reference ==================== +.. _flwr-apiref: + +flwr CLI +-------- + +.. click:: flwr.cli.app:typer_click_object + :prog: flwr + :nested: full + .. _flower-simulation-apiref: flower-simulation -~~~~~~~~~~~~~~~~~ +----------------- .. argparse:: - :module: flwr.simulation.run_simulation - :func: _parse_args_run_simulation - :prog: flower-simulation + :module: flwr.simulation.run_simulation + :func: _parse_args_run_simulation + :prog: flower-simulation .. _flower-superlink-apiref: flower-superlink -~~~~~~~~~~~~~~~~ +---------------- .. argparse:: - :module: flwr.server.app - :func: _parse_args_run_superlink - :prog: flower-superlink + :module: flwr.server.app + :func: _parse_args_run_superlink + :prog: flower-superlink -.. _flower-driver-api-apiref: +.. _flower-supernode-apiref: -flower-client-app -~~~~~~~~~~~~~~~~~ +flower-supernode +---------------- .. argparse:: - :module: flwr.client.supernode.app - :func: _parse_args_run_client_app - :prog: flower-client-app + :module: flwr.client.supernode.app + :func: _parse_args_run_supernode + :prog: flower-supernode .. _flower-server-app-apiref: flower-server-app -~~~~~~~~~~~~~~~~~ +----------------- + +.. note:: + + Note that since version ``1.11.0``, ``flower-server-app`` no longer supports passing + a reference to a `ServerApp` attribute. Instead, you need to pass the path to Flower + app via the argument ``--app``. This is the path to a directory containing a + `pyproject.toml`. You can create a valid Flower app by executing ``flwr new`` and + following the prompt. + +.. argparse:: + :module: flwr.server.run_serverapp + :func: _parse_args_run_server_app + :prog: flower-server-app + +.. _flower-superexec-apiref: + +flower-superexec +---------------- .. argparse:: - :module: flwr.server.run_serverapp - :func: _parse_args_run_server_app - :prog: flower-server-app + :module: flwr.superexec.app + :func: _parse_args_run_superexec + :prog: flower-superexec diff --git a/doc/source/ref-changelog.md b/doc/source/ref-changelog.md index c742b8cd9cbe..f88a75feabd3 100644 --- a/doc/source/ref-changelog.md +++ b/doc/source/ref-changelog.md @@ -1,13 +1,409 @@ # Changelog -## Unreleased +## v1.12.0 (2024-10-14) + +### Thanks to our contributors + +We would like to give our special thanks to all the contributors who made the new version of Flower possible (in `git shortlog` order): + +`Adam Narozniak`, `Audris`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Heng Pan`, `Javier`, `Jiahao Tan`, `Julian Rußmeyer`, `Mohammad Naseri`, `Ray Sun`, `Robert Steiner`, `Yan Gao`, `xiliguguagua` ### What's new? +- **Introduce SuperExec log streaming** ([#3577](https://github.com/adap/flower/pull/3577), [#3584](https://github.com/adap/flower/pull/3584), [#4242](https://github.com/adap/flower/pull/4242), [#3611](https://github.com/adap/flower/pull/3611), [#3613](https://github.com/adap/flower/pull/3613)) + + Flower now supports log streaming from a remote SuperExec using the `flwr log` command. This new feature allows you to monitor logs from SuperExec in real time via `flwr log ` (or `flwr log `). + +- **Improve `flwr new` templates** ([#4291](https://github.com/adap/flower/pull/4291), [#4292](https://github.com/adap/flower/pull/4292), [#4293](https://github.com/adap/flower/pull/4293), [#4294](https://github.com/adap/flower/pull/4294), [#4295](https://github.com/adap/flower/pull/4295)) + + The `flwr new` command templates for MLX, NumPy, sklearn, JAX, and PyTorch have been updated to improve usability and consistency across frameworks. + +- **Migrate ID handling to use unsigned 64-bit integers** ([#4170](https://github.com/adap/flower/pull/4170), [#4237](https://github.com/adap/flower/pull/4237), [#4243](https://github.com/adap/flower/pull/4243)) + + Node IDs, run IDs, and related fields have been migrated from signed 64-bit integers (`sint64`) to unsigned 64-bit integers (`uint64`). To support this change, the `uint64` type is fully supported in all communications. You may now use `uint64` values in config and metric dictionaries. For Python users, that means using `int` values larger than the maximum value of `sint64` but less than the maximum value of `uint64`. + +- **Add Flower architecture explanation** ([#3270](https://github.com/adap/flower/pull/3270)) + + A new [Flower architecture explainer](https://flower.ai/docs/framework/explanation-flower-architecture.html) page introduces Flower components step-by-step. Check out the `EXPLANATIONS` section of the Flower documentation if you're interested. + +- **Introduce FedRep baseline** ([#3790](https://github.com/adap/flower/pull/3790)) + + FedRep is a federated learning algorithm that learns shared data representations across clients while allowing each to maintain personalized local models, balancing collaboration and individual adaptation. Read all the details in the paper: "Exploiting Shared Representations for Personalized Federated Learning" ([arxiv](https://arxiv.org/abs/2102.07078)) + +- **Improve FlowerTune template and LLM evaluation pipelines** ([#4286](https://github.com/adap/flower/pull/4286), [#3769](https://github.com/adap/flower/pull/3769), [#4272](https://github.com/adap/flower/pull/4272), [#4257](https://github.com/adap/flower/pull/4257), [#4220](https://github.com/adap/flower/pull/4220), [#4282](https://github.com/adap/flower/pull/4282), [#4171](https://github.com/adap/flower/pull/4171), [#4228](https://github.com/adap/flower/pull/4228), [#4258](https://github.com/adap/flower/pull/4258), [#4296](https://github.com/adap/flower/pull/4296), [#4287](https://github.com/adap/flower/pull/4287), [#4217](https://github.com/adap/flower/pull/4217), [#4249](https://github.com/adap/flower/pull/4249), [#4324](https://github.com/adap/flower/pull/4324), [#4219](https://github.com/adap/flower/pull/4219), [#4327](https://github.com/adap/flower/pull/4327)) + + Refined evaluation pipelines, metrics, and documentation for the upcoming FlowerTune LLM Leaderboard across multiple domains including Finance, Medical, and general NLP. Stay tuned for the official launch—we welcome all federated learning and LLM enthusiasts to participate in this exciting challenge! + +- **Enhance Docker Support and Documentation** ([#4191](https://github.com/adap/flower/pull/4191), [#4251](https://github.com/adap/flower/pull/4251), [#4190](https://github.com/adap/flower/pull/4190), [#3928](https://github.com/adap/flower/pull/3928), [#4298](https://github.com/adap/flower/pull/4298), [#4192](https://github.com/adap/flower/pull/4192), [#4136](https://github.com/adap/flower/pull/4136), [#4187](https://github.com/adap/flower/pull/4187), [#4261](https://github.com/adap/flower/pull/4261), [#4177](https://github.com/adap/flower/pull/4177), [#4176](https://github.com/adap/flower/pull/4176), [#4189](https://github.com/adap/flower/pull/4189), [#4297](https://github.com/adap/flower/pull/4297), [#4226](https://github.com/adap/flower/pull/4226)) + + Upgraded Ubuntu base image to 24.04, added SBOM and gcc to Docker images, and comprehensively updated [Docker documentation](https://flower.ai/docs/framework/docker/index.html) including quickstart guides and distributed Docker Compose instructions. + +- **Introduce Flower glossary** ([#4165](https://github.com/adap/flower/pull/4165), [#4235](https://github.com/adap/flower/pull/4235)) + + Added the [Federated Learning glossary](https://flower.ai/glossary/) to the Flower repository, located under the `flower/glossary/` directory. This resource aims to provide clear definitions and explanations of key FL concepts. Community contributions are highly welcomed to help expand and refine this knowledge base — this is probably the easiest way to become a Flower contributor! + +- **Implement Message Time-to-Live (TTL)** ([#3620](https://github.com/adap/flower/pull/3620), [#3596](https://github.com/adap/flower/pull/3596), [#3615](https://github.com/adap/flower/pull/3615), [#3609](https://github.com/adap/flower/pull/3609), [#3635](https://github.com/adap/flower/pull/3635)) + + Added comprehensive TTL support for messages in Flower's SuperLink. Messages are now automatically expired and cleaned up based on configurable TTL values, available through the low-level API (and used by default in the high-level API). + +- **Improve FAB handling** ([#4303](https://github.com/adap/flower/pull/4303), [#4264](https://github.com/adap/flower/pull/4264), [#4305](https://github.com/adap/flower/pull/4305), [#4304](https://github.com/adap/flower/pull/4304)) + + An 8-character hash is now appended to the FAB file name. The `flwr install` command installs FABs with a more flattened folder structure, reducing it from 3 levels to 1. + +- **Update documentation** ([#3341](https://github.com/adap/flower/pull/3341), [#3338](https://github.com/adap/flower/pull/3338), [#3927](https://github.com/adap/flower/pull/3927), [#4152](https://github.com/adap/flower/pull/4152), [#4151](https://github.com/adap/flower/pull/4151), [#3993](https://github.com/adap/flower/pull/3993)) + + Updated quickstart tutorials (PyTorch Lightning, TensorFlow, Hugging Face, Fastai) to use the new `flwr run` command and removed default title from documentation base template. A new blockchain example has been added to FAQ. + +- **Update example projects** ([#3716](https://github.com/adap/flower/pull/3716), [#4007](https://github.com/adap/flower/pull/4007), [#4130](https://github.com/adap/flower/pull/4130), [#4234](https://github.com/adap/flower/pull/4234), [#4206](https://github.com/adap/flower/pull/4206), [#4188](https://github.com/adap/flower/pull/4188), [#4247](https://github.com/adap/flower/pull/4247), [#4331](https://github.com/adap/flower/pull/4331)) + + Refreshed multiple example projects including vertical FL, PyTorch (advanced), Pandas, Secure Aggregation, and XGBoost examples. Optimized Hugging Face quickstart with a smaller language model and removed legacy simulation examples. + +- **Update translations** ([#4070](https://github.com/adap/flower/pull/4070), [#4316](https://github.com/adap/flower/pull/4316), [#4252](https://github.com/adap/flower/pull/4252), [#4256](https://github.com/adap/flower/pull/4256), [#4210](https://github.com/adap/flower/pull/4210), [#4263](https://github.com/adap/flower/pull/4263), [#4259](https://github.com/adap/flower/pull/4259)) + +- **General improvements** ([#4239](https://github.com/adap/flower/pull/4239), [4276](https://github.com/adap/flower/pull/4276), [4204](https://github.com/adap/flower/pull/4204), [4184](https://github.com/adap/flower/pull/4184), [4227](https://github.com/adap/flower/pull/4227), [4183](https://github.com/adap/flower/pull/4183), [4202](https://github.com/adap/flower/pull/4202), [4250](https://github.com/adap/flower/pull/4250), [4267](https://github.com/adap/flower/pull/4267), [4246](https://github.com/adap/flower/pull/4246), [4240](https://github.com/adap/flower/pull/4240), [4265](https://github.com/adap/flower/pull/4265), [4238](https://github.com/adap/flower/pull/4238), [4275](https://github.com/adap/flower/pull/4275), [4318](https://github.com/adap/flower/pull/4318), [#4178](https://github.com/adap/flower/pull/4178), [#4315](https://github.com/adap/flower/pull/4315), [#4241](https://github.com/adap/flower/pull/4241), [#4289](https://github.com/adap/flower/pull/4289), [#4290](https://github.com/adap/flower/pull/4290), [#4181](https://github.com/adap/flower/pull/4181), [#4208](https://github.com/adap/flower/pull/4208), [#4225](https://github.com/adap/flower/pull/4225), [#4314](https://github.com/adap/flower/pull/4314), [#4174](https://github.com/adap/flower/pull/4174), [#4203](https://github.com/adap/flower/pull/4203), [#4274](https://github.com/adap/flower/pull/4274), [#3154](https://github.com/adap/flower/pull/3154), [#4201](https://github.com/adap/flower/pull/4201), [#4268](https://github.com/adap/flower/pull/4268), [#4254](https://github.com/adap/flower/pull/4254), [#3990](https://github.com/adap/flower/pull/3990), [#4212](https://github.com/adap/flower/pull/4212), [#2938](https://github.com/adap/flower/pull/2938), [#4205](https://github.com/adap/flower/pull/4205), [#4222](https://github.com/adap/flower/pull/4222), [#4313](https://github.com/adap/flower/pull/4313), [#3936](https://github.com/adap/flower/pull/3936), [#4278](https://github.com/adap/flower/pull/4278), [#4319](https://github.com/adap/flower/pull/4319), [#4332](https://github.com/adap/flower/pull/4332), [#4333](https://github.com/adap/flower/pull/4333)) + + As always, many parts of the Flower framework and quality infrastructure were improved and updated. + +### Incompatible changes + +- **Drop Python 3.8 support and update minimum version to 3.9** ([#4180](https://github.com/adap/flower/pull/4180), [#4213](https://github.com/adap/flower/pull/4213), [#4193](https://github.com/adap/flower/pull/4193), [#4199](https://github.com/adap/flower/pull/4199), [#4196](https://github.com/adap/flower/pull/4196), [#4195](https://github.com/adap/flower/pull/4195), [#4198](https://github.com/adap/flower/pull/4198), [#4194](https://github.com/adap/flower/pull/4194)) + + Python 3.8 support was deprecated in Flower 1.9, and this release removes support. Flower now requires Python 3.9 or later (Python 3.11 is recommended). CI and documentation were updated to use Python 3.9 as the minimum supported version. Flower now supports Python 3.9 to 3.12. + +## v1.11.1 (2024-09-11) + +### Thanks to our contributors + +We would like to give our special thanks to all the contributors who made the new version of Flower possible (in `git shortlog` order): + +`Charles Beauville`, `Chong Shen Ng`, `Daniel J. Beutel`, `Heng Pan`, `Javier`, `Robert Steiner`, `Yan Gao` + +### Improvements + +- **Implement** `keys/values/items` **methods for** `TypedDict` ([#4146](https://github.com/adap/flower/pull/4146)) + +- **Fix parsing of** `--executor-config` **if present** ([#4125](https://github.com/adap/flower/pull/4125)) + +- **Adjust framework name in templates docstrings** ([#4127](https://github.com/adap/flower/pull/4127)) + +- **Update** `flwr new` **Hugging Face template** ([#4169](https://github.com/adap/flower/pull/4169)) + +- **Fix** `flwr new` **FlowerTune template** ([#4123](https://github.com/adap/flower/pull/4123)) + +- **Add buffer time after** `ServerApp` **thread initialization** ([#4119](https://github.com/adap/flower/pull/4119)) + +- **Handle unsuitable resources for simulation** ([#4143](https://github.com/adap/flower/pull/4143)) + +- **Update example READMEs** ([#4117](https://github.com/adap/flower/pull/4117)) + +- **Update SuperNode authentication docs** ([#4160](https://github.com/adap/flower/pull/4160)) + ### Incompatible changes None +## v1.11.0 (2024-08-30) + +### Thanks to our contributors + +We would like to give our special thanks to all the contributors who made the new version of Flower possible (in `git shortlog` order): + +`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Danny`, `Edoardo Gabrielli`, `Heng Pan`, `Javier`, `Meng Yan`, `Michal Danilowski`, `Mohammad Naseri`, `Robert Steiner`, `Steve Laskaridis`, `Taner Topal`, `Yan Gao` + +### What's new? + +- **Deliver Flower App Bundle (FAB) to SuperLink and SuperNodes** ([#4006](https://github.com/adap/flower/pull/4006), [#3945](https://github.com/adap/flower/pull/3945), [#3999](https://github.com/adap/flower/pull/3999), [#4027](https://github.com/adap/flower/pull/4027), [#3851](https://github.com/adap/flower/pull/3851), [#3946](https://github.com/adap/flower/pull/3946), [#4003](https://github.com/adap/flower/pull/4003), [#4029](https://github.com/adap/flower/pull/4029), [#3942](https://github.com/adap/flower/pull/3942), [#3957](https://github.com/adap/flower/pull/3957), [#4020](https://github.com/adap/flower/pull/4020), [#4044](https://github.com/adap/flower/pull/4044), [#3852](https://github.com/adap/flower/pull/3852), [#4019](https://github.com/adap/flower/pull/4019), [#4031](https://github.com/adap/flower/pull/4031), [#4036](https://github.com/adap/flower/pull/4036), [#4049](https://github.com/adap/flower/pull/4049), [#4017](https://github.com/adap/flower/pull/4017), [#3943](https://github.com/adap/flower/pull/3943), [#3944](https://github.com/adap/flower/pull/3944), [#4011](https://github.com/adap/flower/pull/4011), [#3619](https://github.com/adap/flower/pull/3619)) + + Dynamic code updates are here! `flwr run` can now ship and install the latest version of your `ServerApp` and `ClientApp` to an already-running federation (SuperLink and SuperNodes). + + How does it work? `flwr run` bundles your Flower app into a single FAB (Flower App Bundle) file. It then ships this FAB file, via the SuperExec, to both the SuperLink and those SuperNodes that need it. This allows you to keep SuperExec, SuperLink and SuperNodes running as permanent infrastructure, and then ship code updates (including completely new projects!) dynamically. + + `flwr run` is all you need. + +- **Introduce isolated** `ClientApp` **execution** ([#3970](https://github.com/adap/flower/pull/3970), [#3976](https://github.com/adap/flower/pull/3976), [#4002](https://github.com/adap/flower/pull/4002), [#4001](https://github.com/adap/flower/pull/4001), [#4034](https://github.com/adap/flower/pull/4034), [#4037](https://github.com/adap/flower/pull/4037), [#3977](https://github.com/adap/flower/pull/3977), [#4042](https://github.com/adap/flower/pull/4042), [#3978](https://github.com/adap/flower/pull/3978), [#4039](https://github.com/adap/flower/pull/4039), [#4033](https://github.com/adap/flower/pull/4033), [#3971](https://github.com/adap/flower/pull/3971), [#4035](https://github.com/adap/flower/pull/4035), [#3973](https://github.com/adap/flower/pull/3973), [#4032](https://github.com/adap/flower/pull/4032)) + + The SuperNode can now run your `ClientApp` in a fully isolated way. In an enterprise deployment, this allows you to set strict limits on what the `ClientApp` can and cannot do. + + `flower-supernode` supports three `--isolation` modes: + + - Unset: The SuperNode runs the `ClientApp` in the same process (as in previous versions of Flower). This is the default mode. + - `--isolation=subprocess`: The SuperNode starts a subprocess to run the `ClientApp`. + - `--isolation=process`: The SuperNode expects an externally-managed process to run the `ClientApp`. This external process is not managed by the SuperNode, so it has to be started beforehand and terminated manually. The common way to use this isolation mode is via the new `flwr/clientapp` Docker image. + +- **Improve Docker support for enterprise deployments** ([#4050](https://github.com/adap/flower/pull/4050), [#4090](https://github.com/adap/flower/pull/4090), [#3784](https://github.com/adap/flower/pull/3784), [#3998](https://github.com/adap/flower/pull/3998), [#4094](https://github.com/adap/flower/pull/4094), [#3722](https://github.com/adap/flower/pull/3722)) + + Flower 1.11 ships many Docker improvements that are especially useful for enterprise deployments: + + - `flwr/supernode` comes with a new Alpine Docker image. + - `flwr/clientapp` is a new image to be used with the `--isolation=process` option. In this mode, SuperNode and `ClientApp` run in two different Docker containers. `flwr/supernode` (preferably the Alpine version) runs the long-running SuperNode with `--isolation=process`. `flwr/clientapp` runs the `ClientApp`. This is the recommended way to deploy Flower in enterprise settings. + - New all-in-one Docker Compose enables you to easily start a full Flower Deployment Engine on a single machine. + - Completely new Docker documentation: https://flower.ai/docs/framework/docker/index.html + +- **Improve SuperNode authentication** ([#4043](https://github.com/adap/flower/pull/4043), [#4047](https://github.com/adap/flower/pull/4047), [#4074](https://github.com/adap/flower/pull/4074)) + + SuperNode auth has been improved in several ways, including improved logging, improved testing, and improved error handling. + +- **Update** `flwr new` **templates** ([#3933](https://github.com/adap/flower/pull/3933), [#3894](https://github.com/adap/flower/pull/3894), [#3930](https://github.com/adap/flower/pull/3930), [#3931](https://github.com/adap/flower/pull/3931), [#3997](https://github.com/adap/flower/pull/3997), [#3979](https://github.com/adap/flower/pull/3979), [#3965](https://github.com/adap/flower/pull/3965), [#4013](https://github.com/adap/flower/pull/4013), [#4064](https://github.com/adap/flower/pull/4064)) + + All `flwr new` templates have been updated to show the latest recommended use of Flower APIs. + +- **Improve Simulation Engine** ([#4095](https://github.com/adap/flower/pull/4095), [#3913](https://github.com/adap/flower/pull/3913), [#4059](https://github.com/adap/flower/pull/4059), [#3954](https://github.com/adap/flower/pull/3954), [#4071](https://github.com/adap/flower/pull/4071), [#3985](https://github.com/adap/flower/pull/3985), [#3988](https://github.com/adap/flower/pull/3988)) + + The Flower Simulation Engine comes with several updates, including improved run config support, verbose logging, simulation backend configuration via `flwr run`, and more. + +- **Improve** `RecordSet` ([#4052](https://github.com/adap/flower/pull/4052), [#3218](https://github.com/adap/flower/pull/3218), [#4016](https://github.com/adap/flower/pull/4016)) + + `RecordSet` is the core object to exchange model parameters, configuration values and metrics between `ClientApp` and `ServerApp`. This release ships several smaller improvements to `RecordSet` and related `*Record` types. + +- **Update documentation** ([#3972](https://github.com/adap/flower/pull/3972), [#3925](https://github.com/adap/flower/pull/3925), [#4061](https://github.com/adap/flower/pull/4061), [#3984](https://github.com/adap/flower/pull/3984), [#3917](https://github.com/adap/flower/pull/3917), [#3900](https://github.com/adap/flower/pull/3900), [#4066](https://github.com/adap/flower/pull/4066), [#3765](https://github.com/adap/flower/pull/3765), [#4021](https://github.com/adap/flower/pull/4021), [#3906](https://github.com/adap/flower/pull/3906), [#4063](https://github.com/adap/flower/pull/4063), [#4076](https://github.com/adap/flower/pull/4076), [#3920](https://github.com/adap/flower/pull/3920), [#3916](https://github.com/adap/flower/pull/3916)) + + Many parts of the documentation, including the main tutorial, have been migrated to show new Flower APIs and other new Flower features like the improved Docker support. + +- **Migrate code example to use new Flower APIs** ([#3758](https://github.com/adap/flower/pull/3758), [#3701](https://github.com/adap/flower/pull/3701), [#3919](https://github.com/adap/flower/pull/3919), [#3918](https://github.com/adap/flower/pull/3918), [#3934](https://github.com/adap/flower/pull/3934), [#3893](https://github.com/adap/flower/pull/3893), [#3833](https://github.com/adap/flower/pull/3833), [#3922](https://github.com/adap/flower/pull/3922), [#3846](https://github.com/adap/flower/pull/3846), [#3777](https://github.com/adap/flower/pull/3777), [#3874](https://github.com/adap/flower/pull/3874), [#3873](https://github.com/adap/flower/pull/3873), [#3935](https://github.com/adap/flower/pull/3935), [#3754](https://github.com/adap/flower/pull/3754), [#3980](https://github.com/adap/flower/pull/3980), [#4089](https://github.com/adap/flower/pull/4089), [#4046](https://github.com/adap/flower/pull/4046), [#3314](https://github.com/adap/flower/pull/3314), [#3316](https://github.com/adap/flower/pull/3316), [#3295](https://github.com/adap/flower/pull/3295), [#3313](https://github.com/adap/flower/pull/3313)) + + Many code examples have been migrated to use new Flower APIs. + +- **Update Flower framework, framework internals and quality infrastructure** ([#4018](https://github.com/adap/flower/pull/4018), [#4053](https://github.com/adap/flower/pull/4053), [#4098](https://github.com/adap/flower/pull/4098), [#4067](https://github.com/adap/flower/pull/4067), [#4105](https://github.com/adap/flower/pull/4105), [#4048](https://github.com/adap/flower/pull/4048), [#4107](https://github.com/adap/flower/pull/4107), [#4069](https://github.com/adap/flower/pull/4069), [#3915](https://github.com/adap/flower/pull/3915), [#4101](https://github.com/adap/flower/pull/4101), [#4108](https://github.com/adap/flower/pull/4108), [#3914](https://github.com/adap/flower/pull/3914), [#4068](https://github.com/adap/flower/pull/4068), [#4041](https://github.com/adap/flower/pull/4041), [#4040](https://github.com/adap/flower/pull/4040), [#3986](https://github.com/adap/flower/pull/3986), [#4026](https://github.com/adap/flower/pull/4026), [#3961](https://github.com/adap/flower/pull/3961), [#3975](https://github.com/adap/flower/pull/3975), [#3983](https://github.com/adap/flower/pull/3983), [#4091](https://github.com/adap/flower/pull/4091), [#3982](https://github.com/adap/flower/pull/3982), [#4079](https://github.com/adap/flower/pull/4079), [#4073](https://github.com/adap/flower/pull/4073), [#4060](https://github.com/adap/flower/pull/4060), [#4106](https://github.com/adap/flower/pull/4106), [#4080](https://github.com/adap/flower/pull/4080), [#3974](https://github.com/adap/flower/pull/3974), [#3996](https://github.com/adap/flower/pull/3996), [#3991](https://github.com/adap/flower/pull/3991), [#3981](https://github.com/adap/flower/pull/3981), [#4093](https://github.com/adap/flower/pull/4093), [#4100](https://github.com/adap/flower/pull/4100), [#3939](https://github.com/adap/flower/pull/3939), [#3955](https://github.com/adap/flower/pull/3955), [#3940](https://github.com/adap/flower/pull/3940), [#4038](https://github.com/adap/flower/pull/4038)) + + As always, many parts of the Flower framework and quality infrastructure were improved and updated. + +### Deprecations + +- **Deprecate accessing `Context` via `Client.context`** ([#3797](https://github.com/adap/flower/pull/3797)) + + Now that both `client_fn` and `server_fn` receive a `Context` object, accessing `Context` via `Client.context` is deprecated. `Client.context` will be removed in a future release. If you need to access `Context` in your `Client` implementation, pass it manually when creating the `Client` instance in `client_fn`: + + ```python + def client_fn(context: Context) -> Client: + return FlowerClient(context).to_client() + ``` + +### Incompatible changes + +- **Update CLIs to accept an app directory instead of** `ClientApp` **and** `ServerApp` ([#3952](https://github.com/adap/flower/pull/3952), [#4077](https://github.com/adap/flower/pull/4077), [#3850](https://github.com/adap/flower/pull/3850)) + + The CLI commands `flower-supernode` and `flower-server-app` now accept an app directory as argument (instead of references to a `ClientApp` or `ServerApp`). An app directory is any directory containing a `pyproject.toml` file (with the appropriate Flower config fields set). The easiest way to generate a compatible project structure is to use `flwr new`. + +- **Disable** `flower-client-app` **CLI command** ([#4022](https://github.com/adap/flower/pull/4022)) + + `flower-client-app` has been disabled. Use `flower-supernode` instead. + +- **Use spaces instead of commas for separating config args** ([#4000](https://github.com/adap/flower/pull/4000)) + + When passing configs (run config, node config) to Flower, you now need to separate key-value pairs using spaces instead of commas. For example: + + ```bash + flwr run . --run-config "learning-rate=0.01 num_rounds=10" # Works + ``` + + Previously, you could pass configs using commas, like this: + + ```bash + flwr run . --run-config "learning-rate=0.01,num_rounds=10" # Doesn't work + ``` + +- **Remove** `flwr example` **CLI command** ([#4084](https://github.com/adap/flower/pull/4084)) + + The experimental `flwr example` CLI command has been removed. Use `flwr new` to generate a project and then run it using `flwr run`. + +## v1.10.0 (2024-07-24) + +### Thanks to our contributors + +We would like to give our special thanks to all the contributors who made the new version of Flower possible (in `git shortlog` order): + +`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, `Ikko Eltociear Ashimine`, `Javier`, `Jiahao Tan`, `Mohammad Naseri`, `Robert Steiner`, `Sebastian van der Voort`, `Taner Topal`, `Yan Gao` + +### What's new? + +- **Introduce** `flwr run` **(beta)** ([#3810](https://github.com/adap/flower/pull/3810), [#3826](https://github.com/adap/flower/pull/3826), [#3880](https://github.com/adap/flower/pull/3880), [#3807](https://github.com/adap/flower/pull/3807), [#3800](https://github.com/adap/flower/pull/3800), [#3814](https://github.com/adap/flower/pull/3814), [#3811](https://github.com/adap/flower/pull/3811), [#3809](https://github.com/adap/flower/pull/3809), [#3819](https://github.com/adap/flower/pull/3819)) + + Flower 1.10 ships the first beta release of the new `flwr run` command. `flwr run` can run different projects using `flwr run path/to/project`, it enables you to easily switch between different federations using `flwr run . federation` and it runs your Flower project using either local simulation or the new (experimental) SuperExec service. This allows Flower to scale federatated learning from fast local simulation to large-scale production deployment, seamlessly. All projects generated with `flwr new` are immediately runnable using `flwr run`. Give it a try: use `flwr new` to generate a project and then run it using `flwr run`. + +- **Introduce run config** ([#3751](https://github.com/adap/flower/pull/3751), [#3750](https://github.com/adap/flower/pull/3750), [#3845](https://github.com/adap/flower/pull/3845), [#3824](https://github.com/adap/flower/pull/3824), [#3746](https://github.com/adap/flower/pull/3746), [#3728](https://github.com/adap/flower/pull/3728), [#3730](https://github.com/adap/flower/pull/3730), [#3725](https://github.com/adap/flower/pull/3725), [#3729](https://github.com/adap/flower/pull/3729), [#3580](https://github.com/adap/flower/pull/3580), [#3578](https://github.com/adap/flower/pull/3578), [#3576](https://github.com/adap/flower/pull/3576), [#3798](https://github.com/adap/flower/pull/3798), [#3732](https://github.com/adap/flower/pull/3732), [#3815](https://github.com/adap/flower/pull/3815)) + + The new run config feature allows you to run your Flower project in different configurations without having to change a single line of code. You can now build a configurable `ServerApp` and `ClientApp` that read configuration values at runtime. This enables you to specify config values like `learning-rate=0.01` in `pyproject.toml` (under the `[tool.flwr.app.config]` key). These config values can then be easily overridden via `flwr run --run-config learning-rate=0.02`, and read from `Context` using `lr = context.run_config["learning-rate"]`. Create a new project using `flwr new` to see run config in action. + +- **Generalize** `client_fn` **signature to** `client_fn(context: Context) -> Client` ([#3779](https://github.com/adap/flower/pull/3779), [#3697](https://github.com/adap/flower/pull/3697), [#3694](https://github.com/adap/flower/pull/3694), [#3696](https://github.com/adap/flower/pull/3696)) + + The `client_fn` signature has been generalized to `client_fn(context: Context) -> Client`. It now receives a `Context` object instead of the (now depreacated) `cid: str`. `Context` allows accessing `node_id`, `node_config` and `run_config`, among other things. This enables you to build a configurable `ClientApp` that leverages the new run config system. + + The previous signature `client_fn(cid: str)` is now deprecated and support for it will be removed in a future release. Use `client_fn(context: Context) -> Client` everywhere. + +- **Introduce new** `server_fn(context)` ([#3773](https://github.com/adap/flower/pull/3773), [#3796](https://github.com/adap/flower/pull/3796), [#3771](https://github.com/adap/flower/pull/3771)) + + In addition to the new `client_fn(context:Context)`, a new `server_fn(context: Context) -> ServerAppComponents` can now be passed to `ServerApp` (instead of passing, for example, `Strategy`, directly). This enables you to leverage the full `Context` on the server-side to build a configurable `ServerApp`. + +- **Relaunch all** `flwr new` **templates** ([#3877](https://github.com/adap/flower/pull/3877), [#3821](https://github.com/adap/flower/pull/3821), [#3587](https://github.com/adap/flower/pull/3587), [#3795](https://github.com/adap/flower/pull/3795), [#3875](https://github.com/adap/flower/pull/3875), [#3859](https://github.com/adap/flower/pull/3859), [#3760](https://github.com/adap/flower/pull/3760)) + + All `flwr new` templates have been significantly updated to showcase new Flower features and best practices. This includes using `flwr run` and the new run config feature. You can now easily create a new project using `flwr new` and, after following the instructions to install it, `flwr run` it. + +- **Introduce** `flower-supernode` **(preview)** ([#3353](https://github.com/adap/flower/pull/3353)) + + The new `flower-supernode` CLI is here to replace `flower-client-app`. `flower-supernode` brings full multi-app support to the Flower client-side. It also allows to pass `--node-config` to the SuperNode, which is accessible in your `ClientApp` via `Context` (using the new `client_fn(context: Context)` signature). + +- **Introduce node config** ([#3782](https://github.com/adap/flower/pull/3782), [#3780](https://github.com/adap/flower/pull/3780), [#3695](https://github.com/adap/flower/pull/3695), [#3886](https://github.com/adap/flower/pull/3886)) + + A new node config feature allows you to pass a static configuration to the SuperNode. This configuration is read-only and available to every `ClientApp` running on that SuperNode. A `ClientApp` can access the node config via `Context` (`context.node_config`). + +- **Introduce SuperExec (experimental)** ([#3605](https://github.com/adap/flower/pull/3605), [#3723](https://github.com/adap/flower/pull/3723), [#3731](https://github.com/adap/flower/pull/3731), [#3589](https://github.com/adap/flower/pull/3589), [#3604](https://github.com/adap/flower/pull/3604), [#3622](https://github.com/adap/flower/pull/3622), [#3838](https://github.com/adap/flower/pull/3838), [#3720](https://github.com/adap/flower/pull/3720), [#3606](https://github.com/adap/flower/pull/3606), [#3602](https://github.com/adap/flower/pull/3602), [#3603](https://github.com/adap/flower/pull/3603), [#3555](https://github.com/adap/flower/pull/3555), [#3808](https://github.com/adap/flower/pull/3808), [#3724](https://github.com/adap/flower/pull/3724), [#3658](https://github.com/adap/flower/pull/3658), [#3629](https://github.com/adap/flower/pull/3629)) + + This is the first experimental release of Flower SuperExec, a new service that executes your runs. It's not ready for production deployment just yet, but don't hesitate to give it a try if you're interested. + +- **Add new federated learning with tabular data example** ([#3568](https://github.com/adap/flower/pull/3568)) + + A new code example exemplifies a federated learning setup using the Flower framework on the Adult Census Income tabular dataset. + +- **Create generic adapter layer (preview)** ([#3538](https://github.com/adap/flower/pull/3538), [#3536](https://github.com/adap/flower/pull/3536), [#3540](https://github.com/adap/flower/pull/3540)) + + A new generic gRPC adapter layer allows 3rd-party frameworks to integrate with Flower in a transparent way. This makes Flower more modular and allows for integration into other federated learning solutions and platforms. + +- **Refactor Flower Simulation Engine** ([#3581](https://github.com/adap/flower/pull/3581), [#3471](https://github.com/adap/flower/pull/3471), [#3804](https://github.com/adap/flower/pull/3804), [#3468](https://github.com/adap/flower/pull/3468), [#3839](https://github.com/adap/flower/pull/3839), [#3806](https://github.com/adap/flower/pull/3806), [#3861](https://github.com/adap/flower/pull/3861), [#3543](https://github.com/adap/flower/pull/3543), [#3472](https://github.com/adap/flower/pull/3472), [#3829](https://github.com/adap/flower/pull/3829), [#3469](https://github.com/adap/flower/pull/3469)) + + The Simulation Engine was significantly refactored. This results in faster and more stable simulations. It is also the foundation for upcoming changes that aim to provide the next level of performance and configurability in federated learning simulations. + +- **Optimize Docker containers** ([#3591](https://github.com/adap/flower/pull/3591)) + + Flower Docker containers were optimized and updated to use that latest Flower framework features. + +- **Improve logging** ([#3776](https://github.com/adap/flower/pull/3776), [#3789](https://github.com/adap/flower/pull/3789)) + + Improved logging aims to be more concise and helpful to show you the details you actually care about. + +- **Refactor framework internals** ([#3621](https://github.com/adap/flower/pull/3621), [#3792](https://github.com/adap/flower/pull/3792), [#3772](https://github.com/adap/flower/pull/3772), [#3805](https://github.com/adap/flower/pull/3805), [#3583](https://github.com/adap/flower/pull/3583), [#3825](https://github.com/adap/flower/pull/3825), [#3597](https://github.com/adap/flower/pull/3597), [#3802](https://github.com/adap/flower/pull/3802), [#3569](https://github.com/adap/flower/pull/3569)) + + As always, many parts of the Flower framework and quality infrastructure were improved and updated. + +### Documentation improvements + +- **Add 🇰🇷 Korean translations** ([#3680](https://github.com/adap/flower/pull/3680)) + +- **Update translations** ([#3586](https://github.com/adap/flower/pull/3586), [#3679](https://github.com/adap/flower/pull/3679), [#3570](https://github.com/adap/flower/pull/3570), [#3681](https://github.com/adap/flower/pull/3681), [#3617](https://github.com/adap/flower/pull/3617), [#3674](https://github.com/adap/flower/pull/3674), [#3671](https://github.com/adap/flower/pull/3671), [#3572](https://github.com/adap/flower/pull/3572), [#3631](https://github.com/adap/flower/pull/3631)) + +- **Update documentation** ([#3864](https://github.com/adap/flower/pull/3864), [#3688](https://github.com/adap/flower/pull/3688), [#3562](https://github.com/adap/flower/pull/3562), [#3641](https://github.com/adap/flower/pull/3641), [#3384](https://github.com/adap/flower/pull/3384), [#3634](https://github.com/adap/flower/pull/3634), [#3823](https://github.com/adap/flower/pull/3823), [#3793](https://github.com/adap/flower/pull/3793), [#3707](https://github.com/adap/flower/pull/3707)) + + Updated documentation includes new install instructions for different shells, a new Flower Code Examples documentation landing page, new `flwr` CLI docs and an updated federated XGBoost code example. + +### Deprecations + +- **Deprecate** `client_fn(cid: str)` + + `client_fn` used to have a signature `client_fn(cid: str) -> Client`. This signature is now deprecated. Use the new signature `client_fn(context: Context) -> Client` instead. The new argument `context` allows accessing `node_id`, `node_config`, `run_config` and other `Context` features. When running using the simulation engine (or using `flower-supernode` with a custom `--node-config partition-id=...`), `context.node_config["partition-id"]` will return an `int` partition ID that can be used with Flower Datasets to load a different partition of the dataset on each simulated or deployed SuperNode. + +- **Deprecate passing** `Server/ServerConfig/Strategy/ClientManager` **to** `ServerApp` **directly** + + Creating `ServerApp` using `ServerApp(config=config, strategy=strategy)` is now deprecated. Instead of passing `Server/ServerConfig/Strategy/ClientManager` to `ServerApp` directly, pass them wrapped in a `server_fn(context: Context) -> ServerAppComponents` function, like this: `ServerApp(server_fn=server_fn)`. `ServerAppComponents` can hold references to `Server/ServerConfig/Strategy/ClientManager`. In addition to that, `server_fn` allows you to access `Context` (for example, to read the `run_config`). + +### Incompatible changes + +- **Remove support for `client_ids` in `start_simulation`** ([#3699](https://github.com/adap/flower/pull/3699)) + + The (rarely used) feature that allowed passing custom `client_ids` to the `start_simulation` function was removed. This removal is part of a bigger effort to refactor the simulation engine and unify how the Flower internals work in simulation and deployment. + +- **Remove `flower-driver-api` and `flower-fleet-api`** ([#3418](https://github.com/adap/flower/pull/3418)) + + The two deprecated CLI commands `flower-driver-api` and `flower-fleet-api` were removed in an effort to streamline the SuperLink developer experience. Use `flower-superlink` instead. + +## v1.9.0 (2024-06-10) + +### Thanks to our contributors + +We would like to give our special thanks to all the contributors who made the new version of Flower possible (in `git shortlog` order): + +`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Heng Pan`, `Javier`, `Mahdi Beitollahi`, `Robert Steiner`, `Taner Topal`, `Yan Gao`, `bapic`, `mohammadnaseri` + +### What's new? + +- **Introduce built-in authentication (preview)** ([#2946](https://github.com/adap/flower/pull/2946), [#3388](https://github.com/adap/flower/pull/3388), [#2948](https://github.com/adap/flower/pull/2948), [#2917](https://github.com/adap/flower/pull/2917), [#3386](https://github.com/adap/flower/pull/3386), [#3308](https://github.com/adap/flower/pull/3308), [#3001](https://github.com/adap/flower/pull/3001), [#3409](https://github.com/adap/flower/pull/3409), [#2999](https://github.com/adap/flower/pull/2999), [#2979](https://github.com/adap/flower/pull/2979), [#3389](https://github.com/adap/flower/pull/3389), [#3503](https://github.com/adap/flower/pull/3503), [#3366](https://github.com/adap/flower/pull/3366), [#3357](https://github.com/adap/flower/pull/3357)) + + Flower 1.9 introduces the first build-in version of client node authentication. In previous releases, users often wrote glue code to connect Flower to external authentication systems. With this release, the SuperLink can authenticate SuperNodes using a built-in authentication system. A new [how-to guide](https://flower.ai/docs/framework/how-to-authenticate-supernodes.html) and a new [code example](https://github.com/adap/flower/tree/main/examples/flower-authentication) help you to get started. + + This is the first preview release of the Flower-native authentication system. Many additional features are on the roadmap for upcoming Flower releases - stay tuned. + +- **Introduce end-to-end Docker support** ([#3483](https://github.com/adap/flower/pull/3483), [#3266](https://github.com/adap/flower/pull/3266), [#3390](https://github.com/adap/flower/pull/3390), [#3283](https://github.com/adap/flower/pull/3283), [#3285](https://github.com/adap/flower/pull/3285), [#3391](https://github.com/adap/flower/pull/3391), [#3403](https://github.com/adap/flower/pull/3403), [#3458](https://github.com/adap/flower/pull/3458), [#3533](https://github.com/adap/flower/pull/3533), [#3453](https://github.com/adap/flower/pull/3453), [#3486](https://github.com/adap/flower/pull/3486), [#3290](https://github.com/adap/flower/pull/3290)) + + Full Flower Next Docker support is here! With the release of Flower 1.9, Flower provides stable Docker images for the Flower SuperLink, the Flower SuperNode, and the Flower `ServerApp`. This set of images enables you to run all Flower components in Docker. Check out the new [how-to guide](https://flower.ai/docs/framework/how-to-run-flower-using-docker.html) to get stated. + +- **Re-architect Flower Next simulation engine** ([#3307](https://github.com/adap/flower/pull/3307), [#3355](https://github.com/adap/flower/pull/3355), [#3272](https://github.com/adap/flower/pull/3272), [#3273](https://github.com/adap/flower/pull/3273), [#3417](https://github.com/adap/flower/pull/3417), [#3281](https://github.com/adap/flower/pull/3281), [#3343](https://github.com/adap/flower/pull/3343), [#3326](https://github.com/adap/flower/pull/3326)) + + Flower Next simulations now use a new in-memory `Driver` that improves the reliability of simulations, especially in notebook environments. This is a significant step towards a complete overhaul of the Flower Next simulation architecture. + +- **Upgrade simulation engine** ([#3354](https://github.com/adap/flower/pull/3354), [#3378](https://github.com/adap/flower/pull/3378), [#3262](https://github.com/adap/flower/pull/3262), [#3435](https://github.com/adap/flower/pull/3435), [#3501](https://github.com/adap/flower/pull/3501), [#3482](https://github.com/adap/flower/pull/3482), [#3494](https://github.com/adap/flower/pull/3494)) + + The Flower Next simulation engine comes with improved and configurable logging. The Ray-based simulation backend in Flower 1.9 was updated to use Ray 2.10. + +- **Introduce FedPFT baseline** ([#3268](https://github.com/adap/flower/pull/3268)) + + FedPFT allows you to perform one-shot Federated Learning by leveraging widely available foundational models, dramatically reducing communication costs while delivering high performing models. This is work led by Mahdi Beitollahi from Huawei Noah's Ark Lab (Montreal, Canada). Read all the details in their paper: "Parametric Feature Transfer: One-shot Federated Learning with Foundation Models" ([arxiv](https://arxiv.org/abs/2402.01862)) + +- **Launch additional** `flwr new` **templates for Apple MLX, Hugging Face Transformers, scikit-learn and TensorFlow** ([#3291](https://github.com/adap/flower/pull/3291), [#3139](https://github.com/adap/flower/pull/3139), [#3284](https://github.com/adap/flower/pull/3284), [#3251](https://github.com/adap/flower/pull/3251), [#3376](https://github.com/adap/flower/pull/3376), [#3287](https://github.com/adap/flower/pull/3287)) + + The `flwr` CLI's `flwr new` command is starting to become everone's favorite way of creating new Flower projects. This release introduces additional `flwr new` templates for Apple MLX, Hugging Face Transformers, scikit-learn and TensorFlow. In addition to that, existing templates also received updates. + +- **Refine** `RecordSet` **API** ([#3209](https://github.com/adap/flower/pull/3209), [#3331](https://github.com/adap/flower/pull/3331), [#3334](https://github.com/adap/flower/pull/3334), [#3335](https://github.com/adap/flower/pull/3335), [#3375](https://github.com/adap/flower/pull/3375), [#3368](https://github.com/adap/flower/pull/3368)) + + `RecordSet` is part of the Flower Next low-level API preview release. In Flower 1.9, `RecordSet` received a number of usability improvements that make it easier to build `RecordSet`-based `ServerApp`s and `ClientApp`s. + +- **Beautify logging** ([#3379](https://github.com/adap/flower/pull/3379), [#3430](https://github.com/adap/flower/pull/3430), [#3461](https://github.com/adap/flower/pull/3461), [#3360](https://github.com/adap/flower/pull/3360), [#3433](https://github.com/adap/flower/pull/3433)) + + Logs received a substantial update. Not only are logs now much nicer to look at, but they are also more configurable. + +- **Improve reliability** ([#3564](https://github.com/adap/flower/pull/3564), [#3561](https://github.com/adap/flower/pull/3561), [#3566](https://github.com/adap/flower/pull/3566), [#3462](https://github.com/adap/flower/pull/3462), [#3225](https://github.com/adap/flower/pull/3225), [#3514](https://github.com/adap/flower/pull/3514), [#3535](https://github.com/adap/flower/pull/3535), [#3372](https://github.com/adap/flower/pull/3372)) + + Flower 1.9 includes reliability improvements across many parts of the system. One example is a much improved SuperNode shutdown procedure. + +- **Update Swift and C++ SDKs** ([#3321](https://github.com/adap/flower/pull/3321), [#2763](https://github.com/adap/flower/pull/2763)) + + In the C++ SDK, communication-related code is now separate from main client logic. A new abstract class `Communicator` has been introduced alongside a gRPC implementation of it. + +- **Improve testing, tooling and CI/CD infrastructure** ([#3294](https://github.com/adap/flower/pull/3294), [#3282](https://github.com/adap/flower/pull/3282), [#3311](https://github.com/adap/flower/pull/3311), [#2878](https://github.com/adap/flower/pull/2878), [#3333](https://github.com/adap/flower/pull/3333), [#3255](https://github.com/adap/flower/pull/3255), [#3349](https://github.com/adap/flower/pull/3349), [#3400](https://github.com/adap/flower/pull/3400), [#3401](https://github.com/adap/flower/pull/3401), [#3399](https://github.com/adap/flower/pull/3399), [#3346](https://github.com/adap/flower/pull/3346), [#3398](https://github.com/adap/flower/pull/3398), [#3397](https://github.com/adap/flower/pull/3397), [#3347](https://github.com/adap/flower/pull/3347), [#3502](https://github.com/adap/flower/pull/3502), [#3387](https://github.com/adap/flower/pull/3387), [#3542](https://github.com/adap/flower/pull/3542), [#3396](https://github.com/adap/flower/pull/3396), [#3496](https://github.com/adap/flower/pull/3496), [#3465](https://github.com/adap/flower/pull/3465), [#3473](https://github.com/adap/flower/pull/3473), [#3484](https://github.com/adap/flower/pull/3484), [#3521](https://github.com/adap/flower/pull/3521), [#3363](https://github.com/adap/flower/pull/3363), [#3497](https://github.com/adap/flower/pull/3497), [#3464](https://github.com/adap/flower/pull/3464), [#3495](https://github.com/adap/flower/pull/3495), [#3478](https://github.com/adap/flower/pull/3478), [#3271](https://github.com/adap/flower/pull/3271)) + + As always, the Flower tooling, testing, and CI/CD infrastructure has received many updates. + +- **Improve documentation** ([#3530](https://github.com/adap/flower/pull/3530), [#3539](https://github.com/adap/flower/pull/3539), [#3425](https://github.com/adap/flower/pull/3425), [#3520](https://github.com/adap/flower/pull/3520), [#3286](https://github.com/adap/flower/pull/3286), [#3516](https://github.com/adap/flower/pull/3516), [#3523](https://github.com/adap/flower/pull/3523), [#3545](https://github.com/adap/flower/pull/3545), [#3498](https://github.com/adap/flower/pull/3498), [#3439](https://github.com/adap/flower/pull/3439), [#3440](https://github.com/adap/flower/pull/3440), [#3382](https://github.com/adap/flower/pull/3382), [#3559](https://github.com/adap/flower/pull/3559), [#3432](https://github.com/adap/flower/pull/3432), [#3278](https://github.com/adap/flower/pull/3278), [#3371](https://github.com/adap/flower/pull/3371), [#3519](https://github.com/adap/flower/pull/3519), [#3267](https://github.com/adap/flower/pull/3267), [#3204](https://github.com/adap/flower/pull/3204), [#3274](https://github.com/adap/flower/pull/3274)) + + As always, the Flower documentation has received many updates. Notable new pages include: + + - [How-to upgrate to Flower Next (Flower Next migration guide)](https://flower.ai/docs/framework/how-to-upgrade-to-flower-next.html) + + - [How-to run Flower using Docker](https://flower.ai/docs/framework/how-to-run-flower-using-docker.html) + + - [Flower Mods reference](https://flower.ai/docs/framework/ref-api/flwr.client.mod.html#module-flwr.client.mod) + +- **General updates to Flower Examples** ([#3205](https://github.com/adap/flower/pull/3205), [#3226](https://github.com/adap/flower/pull/3226), [#3211](https://github.com/adap/flower/pull/3211), [#3252](https://github.com/adap/flower/pull/3252), [#3427](https://github.com/adap/flower/pull/3427), [#3410](https://github.com/adap/flower/pull/3410), [#3426](https://github.com/adap/flower/pull/3426), [#3228](https://github.com/adap/flower/pull/3228), [#3342](https://github.com/adap/flower/pull/3342), [#3200](https://github.com/adap/flower/pull/3200), [#3202](https://github.com/adap/flower/pull/3202), [#3394](https://github.com/adap/flower/pull/3394), [#3488](https://github.com/adap/flower/pull/3488), [#3329](https://github.com/adap/flower/pull/3329), [#3526](https://github.com/adap/flower/pull/3526), [#3392](https://github.com/adap/flower/pull/3392), [#3474](https://github.com/adap/flower/pull/3474), [#3269](https://github.com/adap/flower/pull/3269)) + + As always, Flower code examples have received many updates. + +- **General improvements** ([#3532](https://github.com/adap/flower/pull/3532), [#3318](https://github.com/adap/flower/pull/3318), [#3565](https://github.com/adap/flower/pull/3565), [#3296](https://github.com/adap/flower/pull/3296), [#3305](https://github.com/adap/flower/pull/3305), [#3246](https://github.com/adap/flower/pull/3246), [#3224](https://github.com/adap/flower/pull/3224), [#3475](https://github.com/adap/flower/pull/3475), [#3297](https://github.com/adap/flower/pull/3297), [#3317](https://github.com/adap/flower/pull/3317), [#3429](https://github.com/adap/flower/pull/3429), [#3196](https://github.com/adap/flower/pull/3196), [#3534](https://github.com/adap/flower/pull/3534), [#3240](https://github.com/adap/flower/pull/3240), [#3365](https://github.com/adap/flower/pull/3365), [#3407](https://github.com/adap/flower/pull/3407), [#3563](https://github.com/adap/flower/pull/3563), [#3344](https://github.com/adap/flower/pull/3344), [#3330](https://github.com/adap/flower/pull/3330), [#3436](https://github.com/adap/flower/pull/3436), [#3300](https://github.com/adap/flower/pull/3300), [#3327](https://github.com/adap/flower/pull/3327), [#3254](https://github.com/adap/flower/pull/3254), [#3253](https://github.com/adap/flower/pull/3253), [#3419](https://github.com/adap/flower/pull/3419), [#3289](https://github.com/adap/flower/pull/3289), [#3208](https://github.com/adap/flower/pull/3208), [#3245](https://github.com/adap/flower/pull/3245), [#3319](https://github.com/adap/flower/pull/3319), [#3203](https://github.com/adap/flower/pull/3203), [#3423](https://github.com/adap/flower/pull/3423), [#3352](https://github.com/adap/flower/pull/3352), [#3292](https://github.com/adap/flower/pull/3292), [#3261](https://github.com/adap/flower/pull/3261)) + +### Deprecations + +- **Deprecate Python 3.8 support** + + Python 3.8 will stop receiving security fixes in [October 2024](https://devguide.python.org/versions/). Support for Python 3.8 is now deprecated and will be removed in an upcoming release. + +- **Deprecate (experimental)** `flower-driver-api` **and** `flower-fleet-api` ([#3416](https://github.com/adap/flower/pull/3416), [#3420](https://github.com/adap/flower/pull/3420)) + + Flower 1.9 deprecates the two (experimental) commands `flower-driver-api` and `flower-fleet-api`. Both commands will be removed in an upcoming release. Use `flower-superlink` instead. + +- **Deprecate** `--server` **in favor of** `--superlink` ([#3518](https://github.com/adap/flower/pull/3518)) + + The commands `flower-server-app` and `flower-client-app` should use `--superlink` instead of the now deprecated `--server`. Support for `--server` will be removed in a future release. + +### Incompatible changes + +- **Replace** `flower-superlink` **CLI option** `--certificates` **with** `--ssl-ca-certfile` **,** `--ssl-certfile` **and** `--ssl-keyfile` ([#3512](https://github.com/adap/flower/pull/3512), [#3408](https://github.com/adap/flower/pull/3408)) + + SSL-related `flower-superlink` CLI arguments were restructured in an incompatible way. Instead of passing a single `--certificates` flag with three values, you now need to pass three flags (`--ssl-ca-certfile`, `--ssl-certfile` and `--ssl-keyfile`) with one value each. Check out the [SSL connections](https://flower.ai/docs/framework/how-to-enable-ssl-connections.html) documentation page for details. + +- **Remove SuperLink** `--vce` **option** ([#3513](https://github.com/adap/flower/pull/3513)) + + Instead of separately starting a SuperLink and a `ServerApp` for simulation, simulations must now be started using the single `flower-simulation` command. + +- **Merge** `--grpc-rere` **and** `--rest` **SuperLink options** ([#3527](https://github.com/adap/flower/pull/3527)) + + To simplify the usage of `flower-superlink`, previously separate sets of CLI options for gRPC and REST were merged into one unified set of options. Consult the [Flower CLI reference documentation](https://flower.ai/docs/framework/ref-api-cli.html) for details. + ## v1.8.0 (2024-04-03) ### Thanks to our contributors @@ -790,7 +1186,7 @@ We would like to give our **special thanks** to all the contributors who made Fl - **Improved Virtual Client Engine compatibility with Jupyter Notebook / Google Colab** ([#866](https://github.com/adap/flower/pull/866), [#872](https://github.com/adap/flower/pull/872), [#833](https://github.com/adap/flower/pull/833), [#1036](https://github.com/adap/flower/pull/1036)) - Simulations (using the Virtual Client Engine through `start_simulation`) now work more smoothly on Jupyter Notebooks (incl. Google Colab) after installing Flower with the `simulation` extra (`pip install flwr[simulation]`). + Simulations (using the Virtual Client Engine through `start_simulation`) now work more smoothly on Jupyter Notebooks (incl. Google Colab) after installing Flower with the `simulation` extra (`pip install 'flwr[simulation]'`). - **New Jupyter Notebook code example** ([#833](https://github.com/adap/flower/pull/833)) diff --git a/doc/source/ref-example-projects.rst b/doc/source/ref-example-projects.rst index 597e3a596c51..4f0a3014e1d4 100644 --- a/doc/source/ref-example-projects.rst +++ b/doc/source/ref-example-projects.rst @@ -1,48 +1,52 @@ Example projects ================ -Flower comes with a number of usage examples. The examples demonstrate how -Flower can be used to federate different kinds of existing machine learning -pipelines, usually leveraging popular machine learning frameworks such as -`PyTorch `_ or -`TensorFlow `_. +Flower comes with a number of usage examples. The examples demonstrate how Flower can be +used to federate different kinds of existing machine learning pipelines, usually +leveraging popular machine learning frameworks such as `PyTorch `_ +or `TensorFlow `_. The following examples are available as standalone projects. + Quickstart TensorFlow/Keras --------------------------- -The TensorFlow/Keras quickstart example shows CIFAR-10 image classification -with MobileNetV2: +The TensorFlow/Keras quickstart example shows CIFAR-10 image classification with +MobileNetV2: -- `Quickstart TensorFlow (Code) `_ +- `Quickstart TensorFlow (Code) + `_ - :doc:`Quickstart TensorFlow (Tutorial) ` -- `Quickstart TensorFlow (Blog Post) `_ - +- `Quickstart TensorFlow (Blog Post) + `_ Quickstart PyTorch ------------------ -The PyTorch quickstart example shows CIFAR-10 image classification -with a simple Convolutional Neural Network: +The PyTorch quickstart example shows CIFAR-10 image classification with a simple +Convolutional Neural Network: -- `Quickstart PyTorch (Code) `_ +- `Quickstart PyTorch (Code) + `_ - :doc:`Quickstart PyTorch (Tutorial) ` - PyTorch: From Centralized To Federated -------------------------------------- This example shows how a regular PyTorch project can be federated using Flower: -- `PyTorch: From Centralized To Federated (Code) `_ -- :doc:`PyTorch: From Centralized To Federated (Tutorial) ` - +- `PyTorch: From Centralized To Federated (Code) + `_ +- :doc:`PyTorch: From Centralized To Federated (Tutorial) + ` Federated Learning on Raspberry Pi and Nvidia Jetson ---------------------------------------------------- -This example shows how Flower can be used to build a federated learning system that run across Raspberry Pi and Nvidia Jetson: - -- `Federated Learning on Raspberry Pi and Nvidia Jetson (Code) `_ -- `Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) `_ +This example shows how Flower can be used to build a federated learning system that run +across Raspberry Pi and Nvidia Jetson: +- `Federated Learning on Raspberry Pi and Nvidia Jetson (Code) + `_ +- `Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) + `_ diff --git a/doc/source/ref-faq.rst b/doc/source/ref-faq.rst index 26b7dca4a0a7..0bd004f81858 100644 --- a/doc/source/ref-faq.rst +++ b/doc/source/ref-faq.rst @@ -1,7 +1,8 @@ FAQ === -This page collects answers to commonly asked questions about Federated Learning with Flower. +This page collects answers to commonly asked questions about Federated Learning with +Flower. .. dropdown:: :fa:`eye,mr-1` Can Flower run on Jupyter Notebooks / Google Colab? @@ -25,6 +26,9 @@ This page collects answers to commonly asked questions about Federated Learning Yes, of course. A list of available examples using Flower within a blockchain environment is available here: + * `FLock: A Decentralised AI Training Platform `_. + * Contribute to on-chain training the model and earn rewards. + * Local blockchain with federated learning simulation. * `Flower meets Nevermined GitHub Repository `_. * `Flower meets Nevermined YouTube video `_. * `Flower meets KOSMoS `_. diff --git a/doc/source/tutorial-quickstart-android.rst b/doc/source/tutorial-quickstart-android.rst index 9177236d5a7c..f2691203078c 100644 --- a/doc/source/tutorial-quickstart-android.rst +++ b/doc/source/tutorial-quickstart-android.rst @@ -1,12 +1,12 @@ .. _quickstart-android: - Quickstart Android ================== .. meta:: - :description: Read this Federated Learning quickstart tutorial for creating an Android app using Flower. + :description: Read this Federated Learning quickstart tutorial for creating an Android app using Flower. Let's build a federated learning system using TFLite and Flower on Android! -Please refer to the `full code example `_ to learn more. +Please refer to the `full code example +`_ to learn more. diff --git a/doc/source/tutorial-quickstart-fastai.rst b/doc/source/tutorial-quickstart-fastai.rst index 63f5ac176082..d52c570b0195 100644 --- a/doc/source/tutorial-quickstart-fastai.rst +++ b/doc/source/tutorial-quickstart-fastai.rst @@ -1,12 +1,110 @@ .. _quickstart-fastai: - Quickstart fastai ================= -.. meta:: - :description: Check out this Federated Learning quickstart tutorial for using Flower with FastAI to train a vision model on CIFAR-10. +In this federated learning tutorial we will learn how to train a SqueezeNet model on +MNIST using Flower and fastai. It is recommended to create a virtual environment and run +everything within a :doc:`virtualenv `. + +Then, clone the code example directly from GitHub: + +.. code-block:: shell + + git clone --depth=1 https://github.com/adap/flower.git _tmp \ + && mv _tmp/examples/quickstart-fastai . \ + && rm -rf _tmp && cd quickstart-fastai + +This will create a new directory called `quickstart-fastai` containing the following +files: + +.. code-block:: shell + + quickstart-fastai + ├── fastai_example + │ ├── client_app.py # Defines your ClientApp + │ ├── server_app.py # Defines your ServerApp + │ └── task.py # Defines your model, training and data loading + ├── pyproject.toml # Project metadata like dependencies and configs + └── README.md + +Next, activate your environment, then run: + +.. code-block:: shell + + # Navigate to the example directory + $ cd path/to/quickstart-fastai + + # Install project and dependencies + $ pip install -e . + +This example by default runs the Flower Simulation Engine, creating a federation of 10 +nodes using `FedAvg +`_ +as the aggregation strategy. The dataset will be partitioned using Flower Dataset's +`IidPartitioner +`_. +Let's run the project: + +.. code-block:: shell + + # Run with default arguments + $ flwr run . + +With default arguments you will see an output like this one: + +.. code-block:: shell + + Loading project configuration... + Success + INFO : Starting Flower ServerApp, config: num_rounds=3, no round_timeout + INFO : + INFO : [INIT] + INFO : Using initial global parameters provided by strategy + INFO : Starting evaluation of initial global parameters + INFO : Evaluation returned no results (`None`) + INFO : + INFO : [ROUND 1] + INFO : configure_fit: strategy sampled 5 clients (out of 10) + INFO : aggregate_fit: received 5 results and 0 failures + WARNING : No fit_metrics_aggregation_fn provided + INFO : configure_evaluate: strategy sampled 5 clients (out of 10) + INFO : aggregate_evaluate: received 5 results and 0 failures + INFO : + INFO : [ROUND 2] + INFO : configure_fit: strategy sampled 5 clients (out of 10) + INFO : aggregate_fit: received 5 results and 0 failures + INFO : configure_evaluate: strategy sampled 5 clients (out of 10) + INFO : aggregate_evaluate: received 5 results and 0 failures + INFO : + INFO : [ROUND 3] + INFO : configure_fit: strategy sampled 5 clients (out of 10) + INFO : aggregate_fit: received 5 results and 0 failures + INFO : configure_evaluate: strategy sampled 5 clients (out of 10) + INFO : aggregate_evaluate: received 5 results and 0 failures + INFO : + INFO : [SUMMARY] + INFO : Run finished 3 round(s) in 143.02s + INFO : History (loss, distributed): + INFO : round 1: 2.699497365951538 + INFO : round 2: 0.9549586296081543 + INFO : round 3: 0.6627192616462707 + INFO : History (metrics, distributed, evaluate): + INFO : {'accuracy': [(1, 0.09766666889190674), + INFO : (2, 0.6948333323001862), + INFO : (3, 0.7721666693687439)]} + INFO : + +You can also override the parameters defined in the ``[tool.flwr.app.config]`` section +in ``pyproject.toml`` like this: + +.. code-block:: shell + + # Override some arguments + $ flwr run . --run-config num-server-rounds=5 -Let's build a federated learning system using fastai and Flower! +.. note:: -Please refer to the `full code example `_ to learn more. + Check the `source code + `_ of this + tutorial in ``examples/quickstart-fasai`` in the Flower GitHub repository. diff --git a/doc/source/tutorial-quickstart-huggingface.rst b/doc/source/tutorial-quickstart-huggingface.rst index 7d8128230901..3c9d3981e587 100644 --- a/doc/source/tutorial-quickstart-huggingface.rst +++ b/doc/source/tutorial-quickstart-huggingface.rst @@ -1,111 +1,219 @@ .. _quickstart-huggingface: - Quickstart 🤗 Transformers ========================== -.. meta:: - :description: Check out this Federating Learning quickstart tutorial for using Flower with HuggingFace Transformers in order to fine-tune an LLM. +In this federated learning tutorial we will learn how to train a large language model +(LLM) on the `IMDB `_ dataset using +Flower and the 🤗 Hugging Face Transformers library. It is recommended to create a +virtual environment and run everything within a :doc:`virtualenv +`. + +Let's use ``flwr new`` to create a complete Flower+🤗 Hugging Face project. It will +generate all the files needed to run, by default with the Flower Simulation Engine, a +federation of 10 nodes using |fedavg|_ The dataset will be partitioned using +|flowerdatasets|_'s |iidpartitioner|_. + +Now that we have a rough idea of what this example is about, let's get started. First, +install Flower in your new environment: + +.. code-block:: shell + + # In a new Python environment + $ pip install flwr + +Then, run the command below. You will be prompted to select one of the available +templates (choose ``HuggingFace``), give a name to your project, and type in your +developer name: + +.. code-block:: shell + + $ flwr new + +After running it you'll notice a new directory with your project name has been created. +It should have the following structure: + +.. code-block:: shell + + + ├── + │ ├── __init__.py + │ ├── client_app.py # Defines your ClientApp + │ ├── server_app.py # Defines your ServerApp + │ └── task.py # Defines your model, training and data loading + ├── pyproject.toml # Project metadata like dependencies and configs + └── README.md + +If you haven't yet installed the project and its dependencies, you can do so by: + +.. code-block:: shell + + # From the directory where your pyproject.toml is + $ pip install -e . + +To run the project, do: -Let's build a federated learning system using Hugging Face Transformers and Flower! +.. code-block:: shell + + # Run with default arguments + $ flwr run . -We will leverage Hugging Face to federate the training of language models over multiple clients using Flower. -More specifically, we will fine-tune a pre-trained Transformer model (distilBERT) -for sequence classification over a dataset of IMDB ratings. -The end goal is to detect if a movie rating is positive or negative. +With default arguments you will see an output like this one: -Dependencies ------------- +.. code-block:: shell -To follow along this tutorial you will need to install the following packages: -:code:`datasets`, :code:`evaluate`, :code:`flwr`, :code:`torch`, and :code:`transformers`. -This can be done using :code:`pip`: + Loading project configuration... + Success + INFO : Starting Flower ServerApp, config: num_rounds=3, no round_timeout + INFO : + INFO : [INIT] + INFO : Using initial global parameters provided by strategy + INFO : Starting evaluation of initial global parameters + INFO : Evaluation returned no results (`None`) + INFO : + INFO : [ROUND 1] + INFO : configure_fit: strategy sampled 2 clients (out of 10) + INFO : aggregate_fit: received 2 results and 0 failures + WARNING : No fit_metrics_aggregation_fn provided + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + WARNING : No evaluate_metrics_aggregation_fn provided + INFO : + INFO : [ROUND 2] + INFO : configure_fit: strategy sampled 5 clients (out of 10) + INFO : aggregate_fit: received 5 results and 0 failures + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + INFO : + INFO : [ROUND 3] + INFO : configure_fit: strategy sampled 5 clients (out of 10) + INFO : aggregate_fit: received 5 results and 0 failures + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + INFO : + INFO : [SUMMARY] + INFO : Run finished 3 round(s) in 249.11s + INFO : History (loss, distributed): + INFO : round 1: 0.02111011856794357 + INFO : round 2: 0.019722302150726317 + INFO : round 3: 0.018227258533239362 + INFO : + +You can also run the project with GPU as follows: .. code-block:: shell - $ pip install datasets evaluate flwr torch transformers + # Run with default arguments + $ flwr run . localhost-gpu + +This will use the default arguments where each ``ClientApp`` will use 2 CPUs and at most +4 ``ClientApp``\s will run in a given GPU. + +You can also override the parameters defined in the ``[tool.flwr.app.config]`` section +in ``pyproject.toml`` like this: +.. code-block:: shell + + # Override some arguments + $ flwr run . --run-config "num-server-rounds=5 fraction-fit=0.2" -Standard Hugging Face workflow ------------------------------- +What follows is an explanation of each component in the project you just created: +dataset partition, the model, defining the ``ClientApp`` and defining the ``ServerApp``. -Handling the data -^^^^^^^^^^^^^^^^^ +The Data +-------- -To fetch the IMDB dataset, we will use Hugging Face's :code:`datasets` library. -We then need to tokenize the data and create :code:`PyTorch` dataloaders, -this is all done in the :code:`load_data` function: +This tutorial uses |flowerdatasets|_ to easily download and partition the `IMDB +`_ dataset. In this example you'll +make use of the |iidpartitioner|_ to generate ``num_partitions`` partitions. You can +choose |otherpartitioners|_ available in Flower Datasets. To tokenize the text, we will +also load the tokenizer from the pre-trained Transformer model that we'll use during +training - more on that in the next section. Each ``ClientApp`` will call this function +to create dataloaders with the data that correspond to their data partition. .. code-block:: python - import random - import torch - from datasets import load_dataset - from torch.utils.data import DataLoader - from transformers import AutoTokenizer, DataCollatorWithPadding - - DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - CHECKPOINT = "distilbert-base-uncased" - - def load_data(): - """Load IMDB data (training and eval)""" - raw_datasets = load_dataset("imdb") - raw_datasets = raw_datasets.shuffle(seed=42) - # remove unnecessary data split - del raw_datasets["unsupervised"] - tokenizer = AutoTokenizer.from_pretrained(CHECKPOINT) - def tokenize_function(examples): - return tokenizer(examples["text"], truncation=True) - # We will take a small sample in order to reduce the compute time, this is optional - train_population = random.sample(range(len(raw_datasets["train"])), 100) - test_population = random.sample(range(len(raw_datasets["test"])), 100) - tokenized_datasets = raw_datasets.map(tokenize_function, batched=True) - tokenized_datasets["train"] = tokenized_datasets["train"].select(train_population) - tokenized_datasets["test"] = tokenized_datasets["test"].select(test_population) - tokenized_datasets = tokenized_datasets.remove_columns("text") - tokenized_datasets = tokenized_datasets.rename_column("label", "labels") - data_collator = DataCollatorWithPadding(tokenizer=tokenizer) - trainloader = DataLoader( - tokenized_datasets["train"], - shuffle=True, - batch_size=32, - collate_fn=data_collator, - ) - testloader = DataLoader( - tokenized_datasets["test"], batch_size=32, collate_fn=data_collator + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="stanfordnlp/imdb", + partitioners={"train": partitioner}, + ) + partition = fds.load_partition(partition_id) + # Divide data: 80% train, 20% test + partition_train_test = partition.train_test_split(test_size=0.2, seed=42) + + tokenizer = AutoTokenizer.from_pretrained(model_name) + + + def tokenize_function(examples): + return tokenizer( + examples["text"], truncation=True, add_special_tokens=True, max_length=512 ) - return trainloader, testloader -Training and testing the model -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + partition_train_test = partition_train_test.map(tokenize_function, batched=True) + partition_train_test = partition_train_test.remove_columns("text") + partition_train_test = partition_train_test.rename_column("label", "labels") + + data_collator = DataCollatorWithPadding(tokenizer=tokenizer) + trainloader = DataLoader( + partition_train_test["train"], + shuffle=True, + batch_size=32, + collate_fn=data_collator, + ) + + testloader = DataLoader( + partition_train_test["test"], batch_size=32, collate_fn=data_collator + ) + +The Model +--------- -Once we have a way of creating our trainloader and testloader, -we can take care of the training and testing. -This is very similar to any :code:`PyTorch` training or testing loop: +We will leverage 🤗 Hugging Face to federate the training of language models over +multiple clients using Flower. More specifically, we will fine-tune a pre-trained +Transformer model (|berttiny|_) for sequence classification over the dataset of IMDB +ratings. The end goal is to detect if a movie rating is positive or negative. If you +have access to larger GPUs, feel free to use larger models! .. code-block:: python - from evaluate import load as load_metric - from transformers import AdamW + net = AutoModelForSequenceClassification.from_pretrained( + model_name, num_labels=num_labels + ) + +Note that here, ``model_name`` is a string that will be loaded from the ``Context`` in +the ClientApp and ServerApp. - def train(net, trainloader, epochs): +In addition to loading the pretrained model weights and architecture, we also include +two utility functions to perform both training (i.e. ``train()``) and evaluation (i.e. +``test()``) using the above model. These functions should look fairly familiar if you +have some prior experience with PyTorch. Note these functions do not have anything +specific to Flower. That being said, the training function will normally be called, as +we'll see later, from a Flower client passing its own data. In summary, your clients can +use standard training/testing functions to perform local training or evaluation: + +.. code-block:: python + + def train(net, trainloader, epochs, device): optimizer = AdamW(net.parameters(), lr=5e-5) net.train() for _ in range(epochs): for batch in trainloader: - batch = {k: v.to(DEVICE) for k, v in batch.items()} + batch = {k: v.to(device) for k, v in batch.items()} outputs = net(**batch) loss = outputs.loss loss.backward() optimizer.step() optimizer.zero_grad() - def test(net, testloader): + + + def test(net, testloader, device): metric = load_metric("accuracy") loss = 0 net.eval() for batch in testloader: - batch = {k: v.to(DEVICE) for k, v in batch.items()} + batch = {k: v.to(device) for k, v in batch.items()} with torch.no_grad(): outputs = net(**batch) logits = outputs.logits @@ -116,114 +224,180 @@ This is very similar to any :code:`PyTorch` training or testing loop: accuracy = metric.compute()["accuracy"] return loss, accuracy +The ClientApp +------------- -Creating the model itself -^^^^^^^^^^^^^^^^^^^^^^^^^ +The main changes we have to make to use 🤗 Hugging Face with Flower will be found in the +``get_weights()`` and ``set_weights()`` functions. Under the hood, the ``transformers`` +library uses PyTorch, which means we can reuse the ``get_weights()`` and +``set_weights()`` code that we defined in the :doc:`Quickstart PyTorch +` tutorial. As a reminder, in ``get_weights()``, PyTorch +model parameters are extracted and represented as a list of NumPy arrays. The +``set_weights()`` function that's the opposite: given a list of NumPy arrays it applies +them to an existing PyTorch model. Doing this in fairly easy in PyTorch. -To create the model itself, -we will just load the pre-trained distillBERT model using Hugging Face’s :code:`AutoModelForSequenceClassification` : +.. note:: + + The specific implementation of ``get_weights()`` and ``set_weights()`` depends on + the type of models you use. The ones shown below work for a wide range of PyTorch + models but you might need to adjust them if you have more exotic model + architectures. .. code-block:: python - from transformers import AutoModelForSequenceClassification + def get_weights(net): + return [val.cpu().numpy() for _, val in net.state_dict().items()] - net = AutoModelForSequenceClassification.from_pretrained( - CHECKPOINT, num_labels=2 - ).to(DEVICE) + def set_weights(net, parameters): + params_dict = zip(net.state_dict().keys(), parameters) + state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) + net.load_state_dict(state_dict, strict=True) -Federating the example ----------------------- +The rest of the functionality is directly inspired by the centralized case. The +``fit()`` method in the client trains the model using the local dataset. Similarly, the +``evaluate()`` method is used to evaluate the model received on a held-out validation +set that the client might have: -Creating the IMDBClient -^^^^^^^^^^^^^^^^^^^^^^^ +.. code-block:: python -To federate our example to multiple clients, -we first need to write our Flower client class (inheriting from :code:`flwr.client.NumPyClient`). -This is very easy, as our model is a standard :code:`PyTorch` model: + class FlowerClient(NumPyClient): + def __init__(self, net, trainloader, testloader, local_epochs): + self.net = net + self.trainloader = trainloader + self.testloader = testloader + self.local_epochs = local_epochs + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + self.net.to(self.device) + + def fit(self, parameters, config): + set_weights(self.net, parameters) + train(self.net, self.trainloader, epochs=self.local_epochs, device=self.device) + return get_weights(self.net), len(self.trainloader), {} + + def evaluate(self, parameters, config): + set_weights(self.net, parameters) + loss, accuracy = test(self.net, self.testloader, self.device) + return float(loss), len(self.testloader), {"accuracy": accuracy} + +Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` defined above by +means of a ``client_fn()`` callback. Note that the `context` enables you to get access +to hyperparemeters defined in your ``pyproject.toml`` to configure the run. In this +tutorial we access the ``local-epochs`` setting to control the number of epochs a +``ClientApp`` will perform when running the ``fit()`` method. You could define +additional hyperparameters in ``pyproject.toml`` and access them here. .. code-block:: python - from collections import OrderedDict - import flwr as fl - - class IMDBClient(fl.client.NumPyClient): - def get_parameters(self, config): - return [val.cpu().numpy() for _, val in net.state_dict().items()] - def set_parameters(self, parameters): - params_dict = zip(net.state_dict().keys(), parameters) - state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict}) - net.load_state_dict(state_dict, strict=True) - def fit(self, parameters, config): - self.set_parameters(parameters) - print("Training Started...") - train(net, trainloader, epochs=1) - print("Training Finished.") - return self.get_parameters(config={}), len(trainloader), {} - def evaluate(self, parameters, config): - self.set_parameters(parameters) - loss, accuracy = test(net, testloader) - return float(loss), len(testloader), {"accuracy": float(accuracy)} - - -The :code:`get_parameters` function lets the server get the client's parameters. -Inversely, the :code:`set_parameters` function allows the server to send its parameters to the client. -Finally, the :code:`fit` function trains the model locally for the client, -and the :code:`evaluate` function tests the model locally and returns the relevant metrics. - -Starting the server -^^^^^^^^^^^^^^^^^^^ - -Now that we have a way to instantiate clients, we need to create our server in order to aggregate the results. -Using Flower, this can be done very easily by first choosing a strategy (here, we are using :code:`FedAvg`, -which will define the global weights as the average of all the clients' weights at each round) -and then using the :code:`flwr.server.start_server` function: + def client_fn(context: Context): -.. code-block:: python + # Get this client's dataset partition + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + model_name = context.run_config["model-name"] + trainloader, valloader = load_data(partition_id, num_partitions, model_name) - def weighted_average(metrics): - accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] - losses = [num_examples * m["loss"] for num_examples, m in metrics] - examples = [num_examples for num_examples, _ in metrics] - return {"accuracy": sum(accuracies) / sum(examples), "loss": sum(losses) / sum(examples)} - - # Define strategy - strategy = fl.server.strategy.FedAvg( - fraction_fit=1.0, - fraction_evaluate=1.0, - evaluate_metrics_aggregation_fn=weighted_average, - ) + # Load model + num_labels = context.run_config["num-labels"] + net = AutoModelForSequenceClassification.from_pretrained( + model_name, num_labels=num_labels + ) - # Start server - fl.server.start_server( - server_address="0.0.0.0:8080", - config=fl.server.ServerConfig(num_rounds=3), - strategy=strategy, - ) + local_epochs = context.run_config["local-epochs"] + + # Return Client instance + return FlowerClient(net, trainloader, valloader, local_epochs).to_client() -The :code:`weighted_average` function is there to provide a way to aggregate the metrics distributed amongst -the clients (basically this allows us to display a nice average accuracy and loss for every round). + # Flower ClientApp + app = ClientApp(client_fn) -Putting everything together ---------------------------- +The ServerApp +------------- -We can now start client instances using: +To construct a ``ServerApp`` we define a ``server_fn()`` callback with an identical +signature to that of ``client_fn()`` but the return type is |serverappcomponents|_ as +opposed to a |client|_ In this example we use the `FedAvg` strategy. To it we pass a +randomly initialized model that will server as the global model to federated. Note that +the value of ``fraction_fit`` is read from the run config. You can find the default +value defined in the ``pyproject.toml``. .. code-block:: python - fl.client.start_client( - server_address="127.0.0.1:8080", - client=IMDBClient().to_client() - ) + def server_fn(context: Context): + # Read from config + num_rounds = context.run_config["num-server-rounds"] + fraction_fit = context.run_config["fraction-fit"] + + # Initialize global model + model_name = context.run_config["model-name"] + num_labels = context.run_config["num-labels"] + net = AutoModelForSequenceClassification.from_pretrained( + model_name, num_labels=num_labels + ) + + weights = get_weights(net) + initial_parameters = ndarrays_to_parameters(weights) + + # Define strategy + strategy = FedAvg( + fraction_fit=fraction_fit, + fraction_evaluate=1.0, + initial_parameters=initial_parameters, + ) + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(strategy=strategy, config=config) -And they will be able to connect to the server and start the federated training. + # Create ServerApp + app = ServerApp(server_fn=server_fn) -If you want to check out everything put together, -you should check out the `full code example `_ . +Congratulations! You've successfully built and run your first federated learning system +for an LLM. -Of course, this is a very basic example, and a lot can be added or modified, -it was just to showcase how simply we could federate a Hugging Face workflow using Flower. +.. note:: -Note that in this example we used :code:`PyTorch`, but we could have very well used :code:`TensorFlow`. + Check the source code of the extended version of this tutorial in + |quickstart_hf_link|_ in the Flower GitHub repository. For a comprehensive example + of a federated fine-tuning of an LLM with Flower, refer to the |flowertune|_ example + in the Flower GitHub repository. + +.. |quickstart_hf_link| replace:: ``examples/quickstart-huggingface`` + +.. |fedavg| replace:: ``FedAvg`` + +.. |iidpartitioner| replace:: ``IidPartitioner`` + +.. |otherpartitioners| replace:: other partitioners + +.. |berttiny| replace:: ``bert-tiny`` + +.. |serverappcomponents| replace:: ``ServerAppComponents`` + +.. |client| replace:: ``Client`` + +.. |flowerdatasets| replace:: Flower Datasets + +.. |flowertune| replace:: FlowerTune LLM + +.. _berttiny: https://huggingface.co/prajjwal1/bert-tiny + +.. _client: ref-api/flwr.client.Client.html#client + +.. _fedavg: ref-api/flwr.server.strategy.FedAvg.html#flwr.server.strategy.FedAvg + +.. _flowerdatasets: https://flower.ai/docs/datasets/ + +.. _flowertune: https://github.com/adap/flower/tree/main/examples/flowertune-llm + +.. _iidpartitioner: https://flower.ai/docs/datasets/ref-api/flwr_datasets.partitioner.IidPartitioner.html#flwr_datasets.partitioner.IidPartitioner + +.. _otherpartitioners: https://flower.ai/docs/datasets/ref-api/flwr_datasets.partitioner.html + +.. _quickstart_hf_link: https://github.com/adap/flower/tree/main/examples/quickstart-huggingface + +.. _serverappcomponents: ref-api/flwr.server.ServerAppComponents.html#serverappcomponents + +.. meta:: + :description: Check out this Federating Learning quickstart tutorial for using Flower with 🤗 HuggingFace Transformers in order to fine-tune an LLM. diff --git a/doc/source/tutorial-quickstart-ios.rst b/doc/source/tutorial-quickstart-ios.rst index e4315ce569fb..8a9250f8dfb0 100644 --- a/doc/source/tutorial-quickstart-ios.rst +++ b/doc/source/tutorial-quickstart-ios.rst @@ -1,136 +1,155 @@ .. _quickstart-ios: - Quickstart iOS ============== .. meta:: - :description: Read this Federated Learning quickstart tutorial for creating an iOS app using Flower to train a neural network on MNIST. + :description: Read this Federated Learning quickstart tutorial for creating an iOS app using Flower to train a neural network on MNIST. -In this tutorial we will learn how to train a Neural Network on MNIST using Flower and CoreML on iOS devices. +In this tutorial we will learn how to train a Neural Network on MNIST using Flower and +CoreML on iOS devices. -First of all, for running the Flower Python server, it is recommended to create a virtual environment and run everything within a :doc:`virtualenv `. -For the Flower client implementation in iOS, it is recommended to use Xcode as our IDE. +First of all, for running the Flower Python server, it is recommended to create a +virtual environment and run everything within a :doc:`virtualenv +`. For the Flower client implementation in iOS, +it is recommended to use Xcode as our IDE. -Our example consists of one Python *server* and two iPhone *clients* that all have the same model. +Our example consists of one Python *server* and two iPhone *clients* that all have the +same model. -*Clients* are responsible for generating individual weight updates for the model based on their local datasets. -These updates are then sent to the *server* which will aggregate them to produce a better model. Finally, the *server* sends this improved version of the model back to each *client*. -A complete cycle of weight updates is called a *round*. +*Clients* are responsible for generating individual weight updates for the model based +on their local datasets. These updates are then sent to the *server* which will +aggregate them to produce a better model. Finally, the *server* sends this improved +version of the model back to each *client*. A complete cycle of weight updates is called +a *round*. -Now that we have a rough idea of what is going on, let's get started to setup our Flower server environment. We first need to install Flower. You can do this by using pip: +Now that we have a rough idea of what is going on, let's get started to setup our Flower +server environment. We first need to install Flower. You can do this by using pip: .. code-block:: shell - $ pip install flwr + $ pip install flwr Or Poetry: .. code-block:: shell - $ poetry add flwr + $ poetry add flwr Flower Client ------------- -Now that we have all our dependencies installed, let's run a simple distributed training using CoreML as our local training pipeline and MNIST as our dataset. -For simplicity reasons we will use the complete Flower client with CoreML, that has been implemented and stored inside the Swift SDK. The client implementation can be seen below: +Now that we have all our dependencies installed, let's run a simple distributed training +using CoreML as our local training pipeline and MNIST as our dataset. For simplicity +reasons we will use the complete Flower client with CoreML, that has been implemented +and stored inside the Swift SDK. The client implementation can be seen below: .. code-block:: swift - /// Parses the parameters from the local model and returns them as GetParametersRes struct - /// - /// - Returns: Parameters from the local model - public func getParameters() -> GetParametersRes { - let parameters = parameters.weightsToParameters() - let status = Status(code: .ok, message: String()) - - return GetParametersRes(parameters: parameters, status: status) - } - - /// Calls the routine to fit the local model - /// - /// - Returns: The result from the local training, e.g., updated parameters - public func fit(ins: FitIns) -> FitRes { - let status = Status(code: .ok, message: String()) - let result = runMLTask(configuration: parameters.parametersToWeights(parameters: ins.parameters), task: .train) - let parameters = parameters.weightsToParameters() - - return FitRes(parameters: parameters, numExamples: result.numSamples, status: status) - } + /// Parses the parameters from the local model and returns them as GetParametersRes struct + /// + /// - Returns: Parameters from the local model + public func getParameters() -> GetParametersRes { + let parameters = parameters.weightsToParameters() + let status = Status(code: .ok, message: String()) - /// Calls the routine to evaluate the local model - /// - /// - Returns: The result from the evaluation, e.g., loss - public func evaluate(ins: EvaluateIns) -> EvaluateRes { - let status = Status(code: .ok, message: String()) - let result = runMLTask(configuration: parameters.parametersToWeights(parameters: ins.parameters), task: .test) + return GetParametersRes(parameters: parameters, status: status) + } - return EvaluateRes(loss: Float(result.loss), numExamples: result.numSamples, status: status) - } + /// Calls the routine to fit the local model + /// + /// - Returns: The result from the local training, e.g., updated parameters + public func fit(ins: FitIns) -> FitRes { + let status = Status(code: .ok, message: String()) + let result = runMLTask(configuration: parameters.parametersToWeights(parameters: ins.parameters), task: .train) + let parameters = parameters.weightsToParameters() + + return FitRes(parameters: parameters, numExamples: result.numSamples, status: status) + } + + /// Calls the routine to evaluate the local model + /// + /// - Returns: The result from the evaluation, e.g., loss + public func evaluate(ins: EvaluateIns) -> EvaluateRes { + let status = Status(code: .ok, message: String()) + let result = runMLTask(configuration: parameters.parametersToWeights(parameters: ins.parameters), task: .test) + + return EvaluateRes(loss: Float(result.loss), numExamples: result.numSamples, status: status) + } -Let's create a new application project in Xcode and add :code:`flwr` as a dependency in your project. For our application, we will store the logic of our app in :code:`FLiOSModel.swift` and the UI elements in :code:`ContentView.swift`. -We will focus more on :code:`FLiOSModel.swift` in this quickstart. Please refer to the `full code example `_ to learn more about the app. +Let's create a new application project in Xcode and add ``flwr`` as a dependency in your +project. For our application, we will store the logic of our app in ``FLiOSModel.swift`` +and the UI elements in ``ContentView.swift``. We will focus more on ``FLiOSModel.swift`` +in this quickstart. Please refer to the `full code example +`_ to learn more about the app. -Import Flower and CoreML related packages in :code:`FLiOSModel.swift`: +Import Flower and CoreML related packages in ``FLiOSModel.swift``: .. code-block:: swift - import Foundation - import CoreML - import flwr + import Foundation + import CoreML + import flwr -Then add the mlmodel to the project simply by drag-and-drop, the mlmodel will be bundled inside the application during deployment to your iOS device. -We need to pass the url to access mlmodel and run CoreML machine learning processes, it can be retrieved by calling the function :code:`Bundle.main.url`. -For the MNIST dataset, we need to preprocess it into :code:`MLBatchProvider` object. The preprocessing is done inside :code:`DataLoader.swift`. +Then add the mlmodel to the project simply by drag-and-drop, the mlmodel will be bundled +inside the application during deployment to your iOS device. We need to pass the url to +access mlmodel and run CoreML machine learning processes, it can be retrieved by calling +the function ``Bundle.main.url``. For the MNIST dataset, we need to preprocess it into +``MLBatchProvider`` object. The preprocessing is done inside ``DataLoader.swift``. .. code-block:: swift - // prepare train dataset - let trainBatchProvider = DataLoader.trainBatchProvider() { _ in } + // prepare train dataset + let trainBatchProvider = DataLoader.trainBatchProvider() { _ in } - // prepare test dataset - let testBatchProvider = DataLoader.testBatchProvider() { _ in } + // prepare test dataset + let testBatchProvider = DataLoader.testBatchProvider() { _ in } - // load them together - let dataLoader = MLDataLoader(trainBatchProvider: trainBatchProvider, - testBatchProvider: testBatchProvider) + // load them together + let dataLoader = MLDataLoader(trainBatchProvider: trainBatchProvider, + testBatchProvider: testBatchProvider) -Since CoreML does not allow the model parameters to be seen before training, and accessing the model parameters during or after the training can only be done by specifying the layer name, -we need to know this information beforehand, through looking at the model specification, which are written as proto files. The implementation can be seen in :code:`MLModelInspect`. +Since CoreML does not allow the model parameters to be seen before training, and +accessing the model parameters during or after the training can only be done by +specifying the layer name, we need to know this information beforehand, through looking +at the model specification, which are written as proto files. The implementation can be +seen in ``MLModelInspect``. After we have all of the necessary information, let's create our Flower client. .. code-block:: swift - let compiledModelUrl = try MLModel.compileModel(at: url) + let compiledModelUrl = try MLModel.compileModel(at: url) - // inspect the model to be able to access the model parameters - // to access the model we need to know the layer name - // since the model parameters are stored as key value pairs - let modelInspect = try MLModelInspect(serializedData: Data(contentsOf: url)) - let layerWrappers = modelInspect.getLayerWrappers() - self.mlFlwrClient = MLFlwrClient(layerWrappers: layerWrappers, - dataLoader: dataLoader, - compiledModelUrl: compiledModelUrl) + // inspect the model to be able to access the model parameters + // to access the model we need to know the layer name + // since the model parameters are stored as key value pairs + let modelInspect = try MLModelInspect(serializedData: Data(contentsOf: url)) + let layerWrappers = modelInspect.getLayerWrappers() + self.mlFlwrClient = MLFlwrClient(layerWrappers: layerWrappers, + dataLoader: dataLoader, + compiledModelUrl: compiledModelUrl) -Then start the Flower gRPC client and start communicating to the server by passing our Flower client to the function :code:`startFlwrGRPC`. +Then start the Flower gRPC client and start communicating to the server by passing our +Flower client to the function ``startFlwrGRPC``. .. code-block:: swift - self.flwrGRPC = FlwrGRPC(serverHost: hostname, serverPort: port) - self.flwrGRPC.startFlwrGRPC(client: self.mlFlwrClient) + self.flwrGRPC = FlwrGRPC(serverHost: hostname, serverPort: port) + self.flwrGRPC.startFlwrGRPC(client: self.mlFlwrClient) -That's it for the client. We only have to implement :code:`Client` or call the provided -:code:`MLFlwrClient` and call :code:`startFlwrGRPC()`. The attribute :code:`hostname` and :code:`port` tells the client which server to connect to. -This can be done by entering the hostname and port in the application before clicking the start button to start the federated learning process. +That's it for the client. We only have to implement ``Client`` or call the provided +``MLFlwrClient`` and call ``startFlwrGRPC()``. The attribute ``hostname`` and ``port`` +tells the client which server to connect to. This can be done by entering the hostname +and port in the application before clicking the start button to start the federated +learning process. Flower Server ------------- -For simple workloads we can start a Flower server and leave all the -configuration possibilities at their default values. In a file named -:code:`server.py`, import Flower and start the server: +For simple workloads we can start a Flower server and leave all the configuration +possibilities at their default values. In a file named ``server.py``, import Flower and +start the server: .. code-block:: python @@ -141,18 +160,21 @@ configuration possibilities at their default values. In a file named Train the model, federated! --------------------------- -With both client and server ready, we can now run everything and see federated -learning in action. FL systems usually have a server and multiple clients. We -therefore have to start the server first: +With both client and server ready, we can now run everything and see federated learning +in action. FL systems usually have a server and multiple clients. We therefore have to +start the server first: .. code-block:: shell $ python server.py -Once the server is running we can start the clients in different terminals. -Build and run the client through your Xcode, one through Xcode Simulator and the other by deploying it to your iPhone. -To see more about how to deploy your app to iPhone or Simulator visit `here `_. +Once the server is running we can start the clients in different terminals. Build and +run the client through your Xcode, one through Xcode Simulator and the other by +deploying it to your iPhone. To see more about how to deploy your app to iPhone or +Simulator visit `here +`_. -Congratulations! -You've successfully built and run your first federated learning system in your ios device. -The full `source code `_ for this example can be found in :code:`examples/ios`. +Congratulations! You've successfully built and run your first federated learning system +in your ios device. The full `source code +`_ for this example can be found +in ``examples/ios``. diff --git a/doc/source/tutorial-quickstart-jax.rst b/doc/source/tutorial-quickstart-jax.rst index d2b9243e2bb3..0581e95d8d42 100644 --- a/doc/source/tutorial-quickstart-jax.rst +++ b/doc/source/tutorial-quickstart-jax.rst @@ -1,34 +1,42 @@ .. _quickstart-jax: - Quickstart JAX ============== .. meta:: - :description: Check out this Federated Learning quickstart tutorial for using Flower with Jax to train a linear regression model on a scikit-learn dataset. - -This tutorial will show you how to use Flower to build a federated version of an existing JAX workload. -We are using JAX to train a linear regression model on a scikit-learn dataset. -We will structure the example similar to our `PyTorch - From Centralized To Federated `_ walkthrough. -First, we build a centralized training approach based on the `Linear Regression with JAX `_ tutorial`. -Then, we build upon the centralized training code to run the training in a federated fashion. - -Before we start building our JAX example, we need install the packages :code:`jax`, :code:`jaxlib`, :code:`scikit-learn`, and :code:`flwr`: + :description: Check out this Federated Learning quickstart tutorial for using Flower with Jax to train a linear regression model on a scikit-learn dataset. + +This tutorial will show you how to use Flower to build a federated version of an +existing JAX workload. We are using JAX to train a linear regression model on a +scikit-learn dataset. We will structure the example similar to our `PyTorch - From +Centralized To Federated +`_ +walkthrough. First, we build a centralized training approach based on the `Linear +Regression with JAX +`_ tutorial`. +Then, we build upon the centralized training code to run the training in a federated +fashion. + +Before we start building our JAX example, we need install the packages ``jax``, +``jaxlib``, ``scikit-learn``, and ``flwr``: .. code-block:: shell - $ pip install jax jaxlib scikit-learn flwr - + $ pip install jax jaxlib scikit-learn flwr Linear Regression with JAX -------------------------- -We begin with a brief description of the centralized training code based on a :code:`Linear Regression` model. -If you want a more in-depth explanation of what's going on then have a look at the official `JAX documentation `_. +We begin with a brief description of the centralized training code based on a ``Linear +Regression`` model. If you want a more in-depth explanation of what's going on then have +a look at the official `JAX documentation `_. -Let's create a new file called :code:`jax_training.py` with all the components required for a traditional (centralized) linear regression training. -First, the JAX packages :code:`jax` and :code:`jaxlib` need to be imported. In addition, we need to import :code:`sklearn` since we use :code:`make_regression` for the dataset and :code:`train_test_split` to split the dataset into a training and test set. -You can see that we do not yet import the :code:`flwr` package for federated learning. This will be done later. +Let's create a new file called ``jax_training.py`` with all the components required for +a traditional (centralized) linear regression training. First, the JAX packages ``jax`` +and ``jaxlib`` need to be imported. In addition, we need to import ``sklearn`` since we +use ``make_regression`` for the dataset and ``train_test_split`` to split the dataset +into a training and test set. You can see that we do not yet import the ``flwr`` package +for federated learning. This will be done later. .. code-block:: python @@ -40,47 +48,52 @@ You can see that we do not yet import the :code:`flwr` package for federated lea key = jax.random.PRNGKey(0) -The :code:`load_data()` function loads the mentioned training and test sets. +The ``load_data()`` function loads the mentioned training and test sets. .. code-block:: python - def load_data() -> Tuple[List[np.ndarray], List[np.ndarray], List[np.ndarray], List[np.ndarray]]: + def load_data() -> ( + Tuple[List[np.ndarray], List[np.ndarray], List[np.ndarray], List[np.ndarray]] + ): # create our dataset and start with similar datasets for different clients X, y = make_regression(n_features=3, random_state=0) X, X_test, y, y_test = train_test_split(X, y) return X, y, X_test, y_test -The model architecture (a very simple :code:`Linear Regression` model) is defined in :code:`load_model()`. +The model architecture (a very simple ``Linear Regression`` model) is defined in +``load_model()``. .. code-block:: python def load_model(model_shape) -> Dict: # model weights - params = { - 'b' : jax.random.uniform(key), - 'w' : jax.random.uniform(key, model_shape) - } + params = {"b": jax.random.uniform(key), "w": jax.random.uniform(key, model_shape)} return params -We now need to define the training (function :code:`train()`), which loops over the training set and measures the loss (function :code:`loss_fn()`) for each batch of training examples. The loss function is separate since JAX takes derivatives with a :code:`grad()` function (defined in the :code:`main()` function and called in :code:`train()`). +We now need to define the training (function ``train()``), which loops over the training +set and measures the loss (function ``loss_fn()``) for each batch of training examples. +The loss function is separate since JAX takes derivatives with a ``grad()`` function +(defined in the ``main()`` function and called in ``train()``). .. code-block:: python def loss_fn(params, X, y) -> Callable: - err = jnp.dot(X, params['w']) + params['b'] - y + err = jnp.dot(X, params["w"]) + params["b"] - y return jnp.mean(jnp.square(err)) # mse + def train(params, grad_fn, X, y) -> Tuple[np.array, float, int]: num_examples = X.shape[0] for epochs in range(10): grads = grad_fn(params, X, y) params = jax.tree_multimap(lambda p, g: p - 0.05 * g, params, grads) - loss = loss_fn(params,X, y) + loss = loss_fn(params, X, y) # if epochs % 10 == 9: # print(f'For Epoch {epochs} loss {loss}') return params, loss, num_examples -The evaluation of the model is defined in the function :code:`evaluation()`. The function takes all test examples and measures the loss of the linear regression model. +The evaluation of the model is defined in the function ``evaluation()``. The function +takes all test examples and measures the loss of the linear regression model. .. code-block:: python @@ -91,7 +104,9 @@ The evaluation of the model is defined in the function :code:`evaluation()`. The # print(f'Test loss {loss_test}') return loss_test, num_examples -Having defined the data loading, model architecture, training, and evaluation we can put everything together and train our model using JAX. As already mentioned, the :code:`jax.grad()` function is defined in :code:`main()` and passed to :code:`train()`. +Having defined the data loading, model architecture, training, and evaluation we can put +everything together and train our model using JAX. As already mentioned, the +``jax.grad()`` function is defined in ``main()`` and passed to ``train()``. .. code-block:: python @@ -100,7 +115,7 @@ Having defined the data loading, model architecture, training, and evaluation we model_shape = X.shape[1:] grad_fn = jax.grad(loss_fn) print("Model Shape", model_shape) - params = load_model(model_shape) + params = load_model(model_shape) params, loss, num_examples = train(params, grad_fn, X, y) evaluation(params, grad_fn, X_test, y_test) @@ -110,40 +125,48 @@ Having defined the data loading, model architecture, training, and evaluation we You can now run your (centralized) JAX linear regression workload: -.. code-block:: python +.. code-block:: bash python3 jax_training.py -So far this should all look fairly familiar if you've used JAX before. -Let's take the next step and use what we've built to create a simple federated learning system consisting of one server and two clients. +So far this should all look fairly familiar if you've used JAX before. Let's take the +next step and use what we've built to create a simple federated learning system +consisting of one server and two clients. JAX meets Flower ---------------- -The concept of federating an existing workload is always the same and easy to understand. -We have to start a *server* and then use the code in :code:`jax_training.py` for the *clients* that are connected to the *server*. -The *server* sends model parameters to the clients. The *clients* run the training and update the parameters. -The updated parameters are sent back to the *server*, which averages all received parameter updates. -This describes one round of the federated learning process, and we repeat this for multiple rounds. +The concept of federating an existing workload is always the same and easy to +understand. We have to start a *server* and then use the code in ``jax_training.py`` for +the *clients* that are connected to the *server*. The *server* sends model parameters to +the clients. The *clients* run the training and update the parameters. The updated +parameters are sent back to the *server*, which averages all received parameter updates. +This describes one round of the federated learning process, and we repeat this for +multiple rounds. -Our example consists of one *server* and two *clients*. Let's set up :code:`server.py` first. The *server* needs to import the Flower package :code:`flwr`. -Next, we use the :code:`start_server` function to start a server and tell it to perform three rounds of federated learning. +Our example consists of one *server* and two *clients*. Let's set up ``server.py`` +first. The *server* needs to import the Flower package ``flwr``. Next, we use the +``start_server`` function to start a server and tell it to perform three rounds of +federated learning. .. code-block:: python import flwr as fl if __name__ == "__main__": - fl.server.start_server(server_address="0.0.0.0:8080", config=fl.server.ServerConfig(num_rounds=3)) + fl.server.start_server( + server_address="0.0.0.0:8080", config=fl.server.ServerConfig(num_rounds=3) + ) We can already start the *server*: -.. code-block:: python +.. code-block:: bash python3 server.py -Finally, we will define our *client* logic in :code:`client.py` and build upon the previously defined JAX training in :code:`jax_training.py`. -Our *client* needs to import :code:`flwr`, but also :code:`jax` and :code:`jaxlib` to update the parameters on our JAX model: +Finally, we will define our *client* logic in ``client.py`` and build upon the +previously defined JAX training in ``jax_training.py``. Our *client* needs to import +``flwr``, but also ``jax`` and ``jaxlib`` to update the parameters on our JAX model: .. code-block:: python @@ -156,36 +179,45 @@ Our *client* needs to import :code:`flwr`, but also :code:`jax` and :code:`jaxli import jax_training - -Implementing a Flower *client* basically means implementing a subclass of either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. -Our implementation will be based on :code:`flwr.client.NumPyClient` and we'll call it :code:`FlowerClient`. -:code:`NumPyClient` is slightly easier to implement than :code:`Client` if you use a framework with good NumPy interoperability (like JAX) because it avoids some of the boilerplate that would otherwise be necessary. -:code:`FlowerClient` needs to implement four methods, two methods for getting/setting model parameters, one method for training the model, and one method for testing the model: - -#. :code:`set_parameters (optional)` - * set the model parameters on the local model that are received from the server - * transform parameters to NumPy :code:`ndarray`'s - * loop over the list of model parameters received as NumPy :code:`ndarray`'s (think list of neural network layers) -#. :code:`get_parameters` - * get the model parameters and return them as a list of NumPy :code:`ndarray`'s (which is what :code:`flwr.client.NumPyClient` expects) -#. :code:`fit` - * update the parameters of the local model with the parameters received from the server - * train the model on the local training set - * get the updated local model parameters and return them to the server -#. :code:`evaluate` - * update the parameters of the local model with the parameters received from the server - * evaluate the updated model on the local test set - * return the local loss to the server - -The challenging part is to transform the JAX model parameters from :code:`DeviceArray` to :code:`NumPy ndarray` to make them compatible with `NumPyClient`. - -The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make use of the functions :code:`train()` and :code:`evaluate()` previously defined in :code:`jax_training.py`. -So what we really do here is we tell Flower through our :code:`NumPyClient` subclass which of our already defined functions to call for training and evaluation. -We included type annotations to give you a better understanding of the data types that get passed around. +Implementing a Flower *client* basically means implementing a subclass of either +``flwr.client.Client`` or ``flwr.client.NumPyClient``. Our implementation will be based +on ``flwr.client.NumPyClient`` and we'll call it ``FlowerClient``. ``NumPyClient`` is +slightly easier to implement than ``Client`` if you use a framework with good NumPy +interoperability (like JAX) because it avoids some of the boilerplate that would +otherwise be necessary. ``FlowerClient`` needs to implement four methods, two methods +for getting/setting model parameters, one method for training the model, and one method +for testing the model: + +1. ``set_parameters (optional)`` + - set the model parameters on the local model that are received from the server + - transform parameters to NumPy ``ndarray``'s + - loop over the list of model parameters received as NumPy ``ndarray``'s (think + list of neural network layers) +2. ``get_parameters`` + - get the model parameters and return them as a list of NumPy ``ndarray``'s + (which is what ``flwr.client.NumPyClient`` expects) +3. ``fit`` + - update the parameters of the local model with the parameters received from the + server + - train the model on the local training set + - get the updated local model parameters and return them to the server +4. ``evaluate`` + - update the parameters of the local model with the parameters received from the + server + - evaluate the updated model on the local test set + - return the local loss to the server + +The challenging part is to transform the JAX model parameters from ``DeviceArray`` to +``NumPy ndarray`` to make them compatible with `NumPyClient`. + +The two ``NumPyClient`` methods ``fit`` and ``evaluate`` make use of the functions +``train()`` and ``evaluate()`` previously defined in ``jax_training.py``. So what we +really do here is we tell Flower through our ``NumPyClient`` subclass which of our +already defined functions to call for training and evaluation. We included type +annotations to give you a better understanding of the data types that get passed around. .. code-block:: python - class FlowerClient(fl.client.NumPyClient): """Flower client implementing using linear regression and JAX.""" @@ -198,7 +230,7 @@ We included type annotations to give you a better understanding of the data type test_x: List[np.ndarray], test_y: List[np.ndarray], ) -> None: - self.params= params + self.params = params self.grad_fn = grad_fn self.train_x = train_x self.train_y = train_y @@ -211,25 +243,26 @@ We included type annotations to give you a better understanding of the data type for _, val in self.params.items(): parameter_value.append(np.array(val)) return parameter_value - + def set_parameters(self, parameters: List[np.ndarray]) -> Dict: # Collect model parameters and update the parameters of the local model - value=jnp.ndarray - params_item = list(zip(self.params.keys(),parameters)) + value = jnp.ndarray + params_item = list(zip(self.params.keys(), parameters)) for item in params_item: key = item[0] value = item[1] self.params[key] = value return self.params - def fit( self, parameters: List[np.ndarray], config: Dict ) -> Tuple[List[np.ndarray], int, Dict]: # Set model parameters, train model, return updated model parameters print("Start local training") self.params = self.set_parameters(parameters) - self.params, loss, num_examples = jax_training.train(self.params, self.grad_fn, self.train_x, self.train_y) + self.params, loss, num_examples = jax_training.train( + self.params, self.grad_fn, self.train_x, self.train_y + ) results = {"loss": float(loss)} print("Training results", results) return self.get_parameters(config={}), num_examples, results @@ -240,7 +273,9 @@ We included type annotations to give you a better understanding of the data type # Set model parameters, evaluate the model on a local test dataset, return result print("Start evaluation") self.params = self.set_parameters(parameters) - loss, num_examples = jax_training.evaluation(self.params,self.grad_fn, self.test_x, self.test_y) + loss, num_examples = jax_training.evaluation( + self.params, self.grad_fn, self.test_x, self.test_y + ) print("Evaluation accuracy & loss", loss) return ( float(loss), @@ -267,22 +302,25 @@ Having defined the federation process, we can run it. client = FlowerClient(params, grad_fn, train_x, train_y, test_x, test_y) fl.client.start_client(server_address="0.0.0.0:8080", client=client.to_client()) + if __name__ == "__main__": main() - And that's it. You can now open two additional terminal windows and run -.. code-block:: python +.. code-block:: bash python3 client.py -in each window (make sure that the server is still running before you do so) and see your JAX project run federated learning across two clients. Congratulations! +in each window (make sure that the server is still running before you do so) and see +your JAX project run federated learning across two clients. Congratulations! Next Steps ---------- -The source code of this example was improved over time and can be found here: `Quickstart JAX `_. +The source code of this example was improved over time and can be found here: +`Quickstart JAX `_. Our example is somewhat over-simplified because both clients load the same dataset. -You're now prepared to explore this topic further. How about using a more sophisticated model or using a different dataset? How about adding more clients? +You're now prepared to explore this topic further. How about using a more sophisticated +model or using a different dataset? How about adding more clients? diff --git a/doc/source/tutorial-quickstart-mlx.rst b/doc/source/tutorial-quickstart-mlx.rst new file mode 100644 index 000000000000..40e870ddc822 --- /dev/null +++ b/doc/source/tutorial-quickstart-mlx.rst @@ -0,0 +1,393 @@ +.. _quickstart-mlx: + +Quickstart MLX +============== + +In this federated learning tutorial we will learn how to train simple MLP on MNIST using +Flower and MLX. It is recommended to create a virtual environment and run everything +within a :doc:`virtualenv `. + +Let's use `flwr new` to create a complete Flower+MLX project. It will generate all the +files needed to run, by default with the Simulation Engine, a federation of 10 nodes +using `FedAvg +`_. +The dataset will be partitioned using Flower Dataset's `IidPartitioner +`_. + +Now that we have a rough idea of what this example is about, let's get started. First, +install Flower in your new environment: + +.. code-block:: shell + + # In a new Python environment + $ pip install flwr + +Then, run the command below. You will be prompted to select of the available templates +(choose ``MLX``), give a name to your project, and type in your developer name: + +.. code-block:: shell + + $ flwr new + +After running it you'll notice a new directory with your project name has been created. +It should have the following structure: + +.. code-block:: shell + + + ├── + │ ├── __init__.py + │ ├── client_app.py # Defines your ClientApp + │ ├── server_app.py # Defines your ServerApp + │ └── task.py # Defines your model, training and data loading + ├── pyproject.toml # Project metadata like dependencies and configs + └── README.md + +If you haven't yet installed the project and its dependencies, you can do so by: + +.. code-block:: shell + + # From the directory where your pyproject.toml is + $ pip install -e . + +To run the project do: + +.. code-block:: shell + + # Run with default arguments + $ flwr run . + +With default arguments you will see an output like this one: + +.. code-block:: shell + + Loading project configuration... + Success + INFO : Starting Flower ServerApp, config: num_rounds=3, no round_timeout + INFO : + INFO : [INIT] + INFO : Requesting initial parameters from one random client + WARNING : FAB ID is not provided; the default ClientApp will be loaded. + INFO : Received initial parameters from one random client + INFO : Evaluating initial global parameters + INFO : + INFO : [ROUND 1] + INFO : configure_fit: strategy sampled 10 clients (out of 10) + INFO : aggregate_fit: received 10 results and 0 failures + WARNING : No fit_metrics_aggregation_fn provided + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + WARNING : No evaluate_metrics_aggregation_fn provided + INFO : + INFO : [ROUND 2] + INFO : configure_fit: strategy sampled 10 clients (out of 10) + INFO : aggregate_fit: received 10 results and 0 failures + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + INFO : + INFO : [ROUND 3] + INFO : configure_fit: strategy sampled 10 clients (out of 10) + INFO : aggregate_fit: received 10 results and 0 failures + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + INFO : + INFO : [SUMMARY] + INFO : Run finished 3 round(s) in 8.15s + INFO : History (loss, distributed): + INFO : round 1: 2.243802046775818 + INFO : round 2: 2.101812958717346 + INFO : round 3: 1.7419301986694335 + INFO : + +You can also override the parameters defined in ``[tool.flwr.app.config]`` section in +the ``pyproject.toml`` like this: + +.. code-block:: shell + + # Override some arguments + $ flwr run . --run-config "num-server-rounds=5 lr=0.05" + +What follows is an explanation of each component in the project you just created: +dataset partition, the model, defining the ``ClientApp`` and defining the ``ServerApp``. + +The Data +-------- + +We will use `Flower Datasets `_ to easily download and +partition the `MNIST` dataset. In this example you'll make use of the `IidPartitioner +`_ +to generate `num_partitions` partitions. You can choose `other partitioners +`_ available in +Flower Datasets: + +.. code-block:: python + + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="ylecun/mnist", + partitioners={"train": partitioner}, + ) + partition = fds.load_partition(partition_id) + partition_splits = partition.train_test_split(test_size=0.2, seed=42) + + partition_splits["train"].set_format("numpy") + partition_splits["test"].set_format("numpy") + + train_partition = partition_splits["train"].map( + lambda img: {"img": img.reshape(-1, 28 * 28).squeeze().astype(np.float32) / 255.0}, + input_columns="image", + ) + test_partition = partition_splits["test"].map( + lambda img: {"img": img.reshape(-1, 28 * 28).squeeze().astype(np.float32) / 255.0}, + input_columns="image", + ) + + data = ( + train_partition["img"], + train_partition["label"].astype(np.uint32), + test_partition["img"], + test_partition["label"].astype(np.uint32), + ) + + train_images, train_labels, test_images, test_labels = map(mx.array, data) + +The Model +--------- + +We define the model as in the `centralized MLX example +`_, it's a simple MLP: + +.. code-block:: python + + class MLP(nn.Module): + """A simple MLP.""" + + def __init__( + self, num_layers: int, input_dim: int, hidden_dim: int, output_dim: int + ): + super().__init__() + layer_sizes = [input_dim] + [hidden_dim] * num_layers + [output_dim] + self.layers = [ + nn.Linear(idim, odim) + for idim, odim in zip(layer_sizes[:-1], layer_sizes[1:]) + ] + + def __call__(self, x): + for l in self.layers[:-1]: + x = mx.maximum(l(x), 0.0) + return self.layers[-1](x) + +We also define some utility functions to test our model and to iterate over batches. + +.. code-block:: python + + def loss_fn(model, X, y): + return mx.mean(nn.losses.cross_entropy(model(X), y)) + + + def eval_fn(model, X, y): + return mx.mean(mx.argmax(model(X), axis=1) == y) + + + def batch_iterate(batch_size, X, y): + perm = mx.array(np.random.permutation(y.size)) + for s in range(0, y.size, batch_size): + ids = perm[s : s + batch_size] + yield X[ids], y[ids] + +The ClientApp +~~~~~~~~~~~~~ + +The main changes we have to make to use `MLX` with `Flower` will be found in the +``get_params()`` and ``set_params()`` functions. Indeed, MLX doesn't provide an easy way +to convert the model parameters into a list of ``np.array`` objects (the format we need +for the serialization of the messages to work). + +The way MLX stores its parameters is as follows: + +.. code-block:: shell + + { + "layers": [ + {"weight": mlx.core.array, "bias": mlx.core.array}, + {"weight": mlx.core.array, "bias": mlx.core.array}, + ..., + {"weight": mlx.core.array, "bias": mlx.core.array} + ] + } + +Therefore, to get our list of ``np.array`` objects, we need to extract each array and +convert them into a NumPy array: + +.. code-block:: python + + def get_params(model): + layers = model.parameters()["layers"] + return [np.array(val) for layer in layers for _, val in layer.items()] + +For the ``set_params()`` function, we perform the reverse operation. We receive a list +of NumPy arrays and want to convert them into MLX parameters. Therefore, we iterate +through pairs of parameters and assign them to the `weight` and `bias` keys of each +layer dict: + +.. code-block:: python + + def set_params(model, parameters): + new_params = {} + new_params["layers"] = [ + {"weight": mx.array(parameters[i]), "bias": mx.array(parameters[i + 1])} + for i in range(0, len(parameters), 2) + ] + model.update(new_params) + +The rest of the functionality is directly inspired by the centralized case. The +``fit()`` method in the client trains the model using the local dataset: + +.. code-block:: python + + def fit(self, parameters, config): + self.set_parameters(parameters) + for _ in range(self.num_epochs): + for X, y in batch_iterate( + self.batch_size, self.train_images, self.train_labels + ): + _, grads = self.loss_and_grad_fn(self.model, X, y) + self.optimizer.update(self.model, grads) + mx.eval(self.model.parameters(), self.optimizer.state) + return self.get_parameters(config={}), len(self.train_images), {} + +Here, after updating the parameters, we perform the training as in the centralized case, +and return the new parameters. + +And for the ``evaluate()`` method of the client: + +.. code-block:: python + + def evaluate(self, parameters, config): + self.set_parameters(parameters) + accuracy = eval_fn(self.model, self.test_images, self.test_labels) + loss = loss_fn(self.model, self.test_images, self.test_labels) + return loss.item(), len(self.test_images), {"accuracy": accuracy.item()} + +We also begin by updating the parameters with the ones sent by the server, and then we +compute the loss and accuracy using the functions defined above. In the constructor of +the ``FlowerClient`` we instantiate the `MLP` model as well as other components such as +the optimizer. + +Putting everything together we have: + +.. code-block:: python + + class FlowerClient(NumPyClient): + def __init__( + self, + data, + num_layers, + hidden_dim, + num_classes, + batch_size, + learning_rate, + num_epochs, + ): + self.num_layers = num_layers + self.hidden_dim = hidden_dim + self.num_classes = num_classes + self.batch_size = batch_size + self.learning_rate = learning_rate + self.num_epochs = num_epochs + + self.train_images, self.train_labels, self.test_images, self.test_labels = data + self.model = MLP( + num_layers, self.train_images.shape[-1], hidden_dim, num_classes + ) + self.optimizer = optim.SGD(learning_rate=learning_rate) + self.loss_and_grad_fn = nn.value_and_grad(self.model, loss_fn) + self.num_epochs = num_epochs + self.batch_size = batch_size + + def get_parameters(self, config): + return get_params(self.model) + + def set_parameters(self, parameters): + set_params(self.model, parameters) + + def fit(self, parameters, config): + self.set_parameters(parameters) + for _ in range(self.num_epochs): + for X, y in batch_iterate( + self.batch_size, self.train_images, self.train_labels + ): + _, grads = self.loss_and_grad_fn(self.model, X, y) + self.optimizer.update(self.model, grads) + mx.eval(self.model.parameters(), self.optimizer.state) + return self.get_parameters(config={}), len(self.train_images), {} + + def evaluate(self, parameters, config): + self.set_parameters(parameters) + accuracy = eval_fn(self.model, self.test_images, self.test_labels) + loss = loss_fn(self.model, self.test_images, self.test_labels) + return loss.item(), len(self.test_images), {"accuracy": accuracy.item()} + +Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` defined above by +means of a ``client_fn()`` callback. Note that ``context`` enables you to get access to +hyperparemeters defined in ``pyproject.toml`` to configure the run. In this tutorial we +access, among other hyperparameters, the ``local-epochs`` setting to control the number +of epochs a ``ClientApp`` will perform when running the ``fit()`` method. + +.. code-block:: python + + def client_fn(context: Context): + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + data = load_data(partition_id, num_partitions) + + num_layers = context.run_config["num-layers"] + hidden_dim = context.run_config["hidden-dim"] + num_classes = 10 + batch_size = context.run_config["batch-size"] + learning_rate = context.run_config["lr"] + num_epochs = context.run_config["local-epochs"] + + # Return Client instance + return FlowerClient( + data, num_layers, hidden_dim, num_classes, batch_size, learning_rate, num_epochs + ).to_client() + + + # Flower ClientApp + app = ClientApp(client_fn) + +The ServerApp ++++++++++++++ + +To construct a ``ServerApp``, we define a ``server_fn()`` callback with an identical +signature to that of ``client_fn()``, but the return type is `ServerAppComponents +`_ +as opposed to `Client +`_. In this +example we use the ``FedAvg`` strategy. + +.. code-block:: python + + def server_fn(context: Context): + # Read from config + num_rounds = context.run_config["num-server-rounds"] + + # Define strategy + strategy = FedAvg() + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(strategy=strategy, config=config) + + + # Create ServerApp + app = ServerApp(server_fn=server_fn) + +Congratulations! You've successfully built and run your first federated learning system. + +.. note:: + + Check the `source code + `_ of the extended + version of this tutorial in ``examples/quickstart-mlx`` in the Flower GitHub + repository. diff --git a/doc/source/tutorial-quickstart-pandas.rst b/doc/source/tutorial-quickstart-pandas.rst index bb9cb1b28b54..00d831a15736 100644 --- a/doc/source/tutorial-quickstart-pandas.rst +++ b/doc/source/tutorial-quickstart-pandas.rst @@ -1,12 +1,12 @@ .. _quickstart-pandas: - Quickstart Pandas ================= .. meta:: - :description: Check out this Federated Learning quickstart tutorial for using Flower with Pandas to perform Federated Analytics. + :description: Check out this Federated Learning quickstart tutorial for using Flower with Pandas to perform Federated Analytics. Let's build a federated analytics system using Pandas and Flower! -Please refer to the `full code example `_ to learn more. +Please refer to the `full code example +`_ to learn more. diff --git a/doc/source/tutorial-quickstart-pytorch-lightning.rst b/doc/source/tutorial-quickstart-pytorch-lightning.rst index acfbecf41260..089865a2969d 100644 --- a/doc/source/tutorial-quickstart-pytorch-lightning.rst +++ b/doc/source/tutorial-quickstart-pytorch-lightning.rst @@ -1,12 +1,118 @@ .. _quickstart-pytorch-lightning: - Quickstart PyTorch Lightning ============================ -.. meta:: - :description: Check out this Federated Learning quickstart tutorial for using Flower with PyTorch Lightning to train an Auto Encoder model on MNIST. +In this federated learning tutorial we will learn how to train an AutoEncoder model on +MNIST using Flower and PyTorch Lightning. It is recommended to create a virtual +environment and run everything within a :doc:`virtualenv +`. + +Then, clone the code example directly from GitHub: + +.. code-block:: shell + + git clone --depth=1 https://github.com/adap/flower.git _tmp \ + && mv _tmp/examples/quickstart-pytorch-lightning . \ + && rm -rf _tmp && cd quickstart-pytorch-lightning + +This will create a new directory called `quickstart-pytorch-lightning` containing the +following files: + +.. code-block:: shell + + quickstart-pytorch-lightning + ├── pytorchlightning_example + │ ├── client_app.py # Defines your ClientApp + │ ├── server_app.py # Defines your ServerApp + │ └── task.py # Defines your model, training and data loading + ├── pyproject.toml # Project metadata like dependencies and configs + └── README.md + +Next, activate your environment, then run: + +.. code-block:: shell + + # Navigate to the example directory + $ cd path/to/quickstart-pytorch-lightning + + # Install project and dependencies + $ pip install -e . + +By default, Flower Simulation Engine will be started and it will create a federation of +4 nodes using `FedAvg +`_ +as the aggregation strategy. The dataset will be partitioned using Flower Dataset's +`IidPartitioner +`_. +To run the project, do: + +.. code-block:: shell + + # Run with default arguments + $ flwr run . + +With default arguments you will see an output like this one: + +.. code-block:: shell + + Loading project configuration... + Success + INFO : Starting Flower ServerApp, config: num_rounds=3, no round_timeout + INFO : + INFO : [INIT] + INFO : Using initial global parameters provided by strategy + INFO : Starting evaluation of initial global parameters + INFO : Evaluation returned no results (`None`) + INFO : + INFO : [ROUND 1] + INFO : configure_fit: strategy sampled 2 clients (out of 4) + INFO : aggregate_evaluate: received 2 results and 0 failures + WARNING : No evaluate_metrics_aggregation_fn provided + INFO : + INFO : [ROUND 2] + INFO : configure_fit: strategy sampled 2 clients (out of 4) + INFO : aggregate_fit: received 2 results and 0 failures + INFO : configure_evaluate: strategy sampled 2 clients (out of 4) + INFO : aggregate_evaluate: received 2 results and 0 failures + INFO : + INFO : [ROUND 3] + INFO : configure_fit: strategy sampled 2 clients (out of 4) + INFO : aggregate_fit: received 2 results and 0 failures + INFO : configure_evaluate: strategy sampled 2 clients (out of 4) + INFO : aggregate_evaluate: received 2 results and 0 failures + INFO : + INFO : [SUMMARY] + INFO : Run finished 3 round(s) in 136.92s + INFO : History (loss, distributed): + INFO : round 1: 0.04982871934771538 + INFO : round 2: 0.046457378193736076 + INFO : round 3: 0.04506748169660568 + INFO : + +Each simulated `ClientApp` (two per round) will also log a summary of their local +training process. Expect this output to be similar to: + +.. code-block:: shell + + # The left part indicates the process ID running the `ClientApp` + (ClientAppActor pid=38155) ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ + (ClientAppActor pid=38155) ┃ Test metric ┃ DataLoader 0 ┃ + (ClientAppActor pid=38155) ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ + (ClientAppActor pid=38155) │ test_loss │ 0.045175597071647644 │ + (ClientAppActor pid=38155) └───────────────────────────┴───────────────────────────┘ + +You can also override the parameters defined in the ``[tool.flwr.app.config]`` section +in ``pyproject.toml`` like this: + +.. code-block:: shell + + # Override some arguments + $ flwr run . --run-config num-server-rounds=5 -Let's build a horizontal federated learning system using PyTorch Lightning and Flower! +.. note:: -Please refer to the `full code example `_ to learn more. + Check the `source code + `_ + of this tutorial in ``examples/quickstart-pytorch-lightning`` in the Flower GitHub + repository. diff --git a/doc/source/tutorial-quickstart-pytorch.rst b/doc/source/tutorial-quickstart-pytorch.rst index 895590808a2b..6b99e378d086 100644 --- a/doc/source/tutorial-quickstart-pytorch.rst +++ b/doc/source/tutorial-quickstart-pytorch.rst @@ -1,123 +1,163 @@ .. _quickstart-pytorch: - Quickstart PyTorch ================== -.. meta:: - :description: Check out this Federated Learning quickstart tutorial for using Flower with PyTorch to train a CNN model on MNIST. - -.. youtube:: jOmmuzMIQ4c - :width: 100% +In this federated learning tutorial we will learn how to train a Convolutional Neural +Network on CIFAR-10 using Flower and PyTorch. It is recommended to create a virtual +environment and run everything within a :doc:`virtualenv +`. -In this tutorial we will learn how to train a Convolutional Neural Network on CIFAR10 using Flower and PyTorch. +Let's use `flwr new` to create a complete Flower+PyTorch project. It will generate all +the files needed to run, by default with the Flower Simulation Engine, a federation of +10 nodes using `FedAvg +`_. +The dataset will be partitioned using Flower Dataset's `IidPartitioner +`_. -First of all, it is recommended to create a virtual environment and run everything within a :doc:`virtualenv `. +Now that we have a rough idea of what this example is about, let's get started. First, +install Flower in your new environment: -Our example consists of one *server* and two *clients* all having the same model. +.. code-block:: shell -*Clients* are responsible for generating individual weight-updates for the model based on their local datasets. -These updates are then sent to the *server* which will aggregate them to produce a better model. Finally, the *server* sends this improved version of the model back to each *client*. -A complete cycle of weight updates is called a *round*. + # In a new Python environment + $ pip install flwr -Now that we have a rough idea of what is going on, let's get started. We first need to install Flower. You can do this by running : +Then, run the command below. You will be prompted to select one of the available +templates (choose ``PyTorch``), give a name to your project, and type in your developer +name: .. code-block:: shell - $ pip install flwr + $ flwr new -Since we want to use PyTorch to solve a computer vision task, let's go ahead and install PyTorch and the **torchvision** library: +After running it you'll notice a new directory with your project name has been created. +It should have the following structure: .. code-block:: shell - $ pip install torch torchvision + + ├── + │ ├── __init__.py + │ ├── client_app.py # Defines your ClientApp + │ ├── server_app.py # Defines your ServerApp + │ └── task.py # Defines your model, training and data loading + ├── pyproject.toml # Project metadata like dependencies and configs + └── README.md +If you haven't yet installed the project and its dependencies, you can do so by: -Flower Client -------------- - -Now that we have all our dependencies installed, let's run a simple distributed training with two clients and one server. Our training procedure and network architecture are based on PyTorch's `Deep Learning with PyTorch `_. +.. code-block:: shell -In a file called :code:`client.py`, import Flower and PyTorch related packages: + # From the directory where your pyproject.toml is + $ pip install -e . -.. code-block:: python +To run the project, do: - from collections import OrderedDict +.. code-block:: shell - import torch - import torch.nn as nn - import torch.nn.functional as F - import torchvision.transforms as transforms - from torch.utils.data import DataLoader - from torchvision.datasets import CIFAR10 + # Run with default arguments + $ flwr run . - import flwr as fl +With default arguments you will see an output like this one: -In addition, we define the device allocation in PyTorch with: +.. code-block:: shell -.. code-block:: python + Loading project configuration... + Success + WARNING : FAB ID is not provided; the default ClientApp will be loaded. + INFO : Starting Flower ServerApp, config: num_rounds=3, no round_timeout + INFO : + INFO : [INIT] + INFO : Using initial global parameters provided by strategy + INFO : Evaluating initial global parameters + INFO : + INFO : [ROUND 1] + INFO : configure_fit: strategy sampled 5 clients (out of 10) + INFO : aggregate_fit: received 5 results and 0 failures + WARNING : No fit_metrics_aggregation_fn provided + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + WARNING : No evaluate_metrics_aggregation_fn provided + INFO : + INFO : [ROUND 2] + INFO : configure_fit: strategy sampled 5 clients (out of 10) + INFO : aggregate_fit: received 5 results and 0 failures + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + INFO : + INFO : [ROUND 3] + INFO : configure_fit: strategy sampled 5 clients (out of 10) + INFO : aggregate_fit: received 5 results and 0 failures + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + INFO : + INFO : [SUMMARY] + INFO : Run finished 3 round(s) in 21.35s + INFO : History (loss, distributed): + INFO : round 1: 2.2978184528648855 + INFO : round 2: 2.173852103948593 + INFO : round 3: 2.039920600131154 + INFO : + +You can also override the parameters defined in the ``[tool.flwr.app.config]`` section +in ``pyproject.toml`` like this: - DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") +.. code-block:: shell -We use PyTorch to load CIFAR10, a popular colored image classification dataset for machine learning. The PyTorch :code:`DataLoader()` downloads the training and test data that are then normalized. + # Override some arguments + $ flwr run . --run-config "num-server-rounds=5 local-epochs=3" -.. code-block:: python +What follows is an explanation of each component in the project you just created: +dataset partition, the model, defining the ``ClientApp`` and defining the ``ServerApp``. - def load_data(): - """Load CIFAR-10 (training and test set).""" - transform = transforms.Compose( - [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] - ) - trainset = CIFAR10(".", train=True, download=True, transform=transform) - testset = CIFAR10(".", train=False, download=True, transform=transform) - trainloader = DataLoader(trainset, batch_size=32, shuffle=True) - testloader = DataLoader(testset, batch_size=32) - num_examples = {"trainset" : len(trainset), "testset" : len(testset)} - return trainloader, testloader, num_examples +The Data +-------- -Define the loss and optimizer with PyTorch. The training of the dataset is done by looping over the dataset, measure the corresponding loss and optimize it. +This tutorial uses `Flower Datasets `_ to easily +download and partition the `CIFAR-10` dataset. In this example you'll make use of the +`IidPartitioner +`_ +to generate `num_partitions` partitions. You can choose `other partitioners +`_ available in +Flower Datasets. Each ``ClientApp`` will call this function to create dataloaders with +the data that correspond to their data partition. .. code-block:: python - def train(net, trainloader, epochs): - """Train the network on the training set.""" - criterion = torch.nn.CrossEntropyLoss() - optimizer = torch.optim.SGD(net.parameters(), lr=0.001, momentum=0.9) - for _ in range(epochs): - for images, labels in trainloader: - images, labels = images.to(DEVICE), labels.to(DEVICE) - optimizer.zero_grad() - loss = criterion(net(images), labels) - loss.backward() - optimizer.step() + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="uoft-cs/cifar10", + partitioners={"train": partitioner}, + ) + partition = fds.load_partition(partition_id) + # Divide data on each node: 80% train, 20% test + partition_train_test = partition.train_test_split(test_size=0.2, seed=42) + pytorch_transforms = Compose([ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) -Define then the validation of the machine learning network. We loop over the test set and measure the loss and accuracy of the test set. -.. code-block:: python + def apply_transforms(batch): + """Apply transforms to the partition from FederatedDataset.""" + batch["img"] = [pytorch_transforms(img) for img in batch["img"]] + return batch - def test(net, testloader): - """Validate the network on the entire test set.""" - criterion = torch.nn.CrossEntropyLoss() - correct, total, loss = 0, 0, 0.0 - with torch.no_grad(): - for data in testloader: - images, labels = data[0].to(DEVICE), data[1].to(DEVICE) - outputs = net(images) - loss += criterion(outputs, labels).item() - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() - accuracy = correct / total - return loss, accuracy -After defining the training and testing of a PyTorch machine learning model, we use the functions for the Flower clients. + partition_train_test = partition_train_test.with_transform(apply_transforms) + trainloader = DataLoader(partition_train_test["train"], batch_size=32, shuffle=True) + testloader = DataLoader(partition_train_test["test"], batch_size=32) + +The Model +--------- -The Flower clients will use a simple CNN adapted from 'PyTorch: A 60 Minute Blitz': +We defined a simple Convolutional Neural Network (CNN), but feel free to replace it with +a more sophisticated model if you'd like: .. code-block:: python class Net(nn.Module): - def __init__(self) -> None: + """Model (simple CNN adapted from 'PyTorch: A 60 Minute Blitz')""" + + def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) @@ -126,148 +166,201 @@ The Flower clients will use a simple CNN adapted from 'PyTorch: A 60 Minute Blit self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) - def forward(self, x: torch.Tensor) -> torch.Tensor: + def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 16 * 5 * 5) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) - x = self.fc3(x) - return x - - # Load model and data - net = Net().to(DEVICE) - trainloader, testloader, num_examples = load_data() - -After loading the data set with :code:`load_data()` we define the Flower interface. - -The Flower server interacts with clients through an interface called -:code:`Client`. When the server selects a particular client for training, it -sends training instructions over the network. The client receives those -instructions and calls one of the :code:`Client` methods to run your code -(i.e., to train the neural network we defined earlier). - -Flower provides a convenience class called :code:`NumPyClient` which makes it -easier to implement the :code:`Client` interface when your workload uses PyTorch. -Implementing :code:`NumPyClient` usually means defining the following methods -(:code:`set_parameters` is optional though): - -#. :code:`get_parameters` - * return the model weight as a list of NumPy ndarrays -#. :code:`set_parameters` (optional) - * update the local model weights with the parameters received from the server -#. :code:`fit` - * set the local model weights - * train the local model - * receive the updated local model weights -#. :code:`evaluate` - * test the local model - -which can be implemented in the following way: + return self.fc3(x) + +In addition to defining the model architecture, we also include two utility functions to +perform both training (i.e. ``train()``) and evaluation (i.e. ``test()``) using the +above model. These functions should look fairly familiar if you have some prior +experience with PyTorch. Note these functions do not have anything specific to Flower. +That being said, the training function will normally be called, as we'll see later, from +a Flower client passing its own data. In summary, your clients can use standard +training/testing functions to perform local training or evaluation: + +.. code-block:: python + + def train(net, trainloader, epochs, device): + """Train the model on the training set.""" + net.to(device) # move model to GPU if available + criterion = torch.nn.CrossEntropyLoss().to(device) + optimizer = torch.optim.SGD(net.parameters(), lr=0.1, momentum=0.9) + net.train() + running_loss = 0.0 + for _ in range(epochs): + for batch in trainloader: + images = batch["img"] + labels = batch["label"] + optimizer.zero_grad() + loss = criterion(net(images.to(device)), labels.to(device)) + loss.backward() + optimizer.step() + running_loss += loss.item() + + avg_trainloss = running_loss / len(trainloader) + return avg_trainloss + + + def test(net, testloader, device): + """Validate the model on the test set.""" + net.to(device) + criterion = torch.nn.CrossEntropyLoss() + correct, loss = 0, 0.0 + with torch.no_grad(): + for batch in testloader: + images = batch["img"].to(device) + labels = batch["label"].to(device) + outputs = net(images) + loss += criterion(outputs, labels).item() + correct += (torch.max(outputs.data, 1)[1] == labels).sum().item() + accuracy = correct / len(testloader.dataset) + return loss, accuracy + +The ClientApp +------------- + +The main changes we have to make to use `PyTorch` with `Flower` will be found in the +``get_weights()`` and ``set_weights()`` functions. In ``get_weights()`` PyTorch model +parameters are extracted and represented as a list of NumPy arrays. The +``set_weights()`` function that's the oposite: given a list of NumPy arrays it applies +them to an existing PyTorch model. Doing this in fairly easy in PyTorch. + +.. note:: + + The specific implementation of ``get_weights()`` and ``set_weights()`` depends on + the type of models you use. The ones shown below work for a wide range of PyTorch + models but you might need to adjust them if you have more exotic model + architectures. .. code-block:: python - class CifarClient(fl.client.NumPyClient): - def get_parameters(self, config): - return [val.cpu().numpy() for _, val in net.state_dict().items()] + def get_weights(net): + return [val.cpu().numpy() for _, val in net.state_dict().items()] - def set_parameters(self, parameters): - params_dict = zip(net.state_dict().keys(), parameters) - state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) - net.load_state_dict(state_dict, strict=True) + + def set_weights(net, parameters): + params_dict = zip(net.state_dict().keys(), parameters) + state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) + net.load_state_dict(state_dict, strict=True) + +The rest of the functionality is directly inspired by the centralized case. The +``fit()`` method in the client trains the model using the local dataset. Similarly, the +``evaluate()`` method is used to evaluate the model received on a held-out validation +set that the client might have: + +.. code-block:: python + + class FlowerClient(NumPyClient): + def __init__(self, net, trainloader, valloader, local_epochs): + self.net = net + self.trainloader = trainloader + self.valloader = valloader + self.local_epochs = local_epochs + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + self.net.to(device) def fit(self, parameters, config): - self.set_parameters(parameters) - train(net, trainloader, epochs=1) - return self.get_parameters(config={}), num_examples["trainset"], {} + set_weights(self.net, parameters) + results = train( + self.net, + self.trainloader, + self.valloader, + self.local_epochs, + self.device, + ) + return get_weights(self.net), len(self.trainloader.dataset), results def evaluate(self, parameters, config): - self.set_parameters(parameters) - loss, accuracy = test(net, testloader) - return float(loss), num_examples["testset"], {"accuracy": float(accuracy)} + set_weights(self.net, parameters) + loss, accuracy = test(self.net, self.valloader, self.device) + return loss, len(self.valloader.dataset), {"accuracy": accuracy} -We can now create an instance of our class :code:`CifarClient` and add one line -to actually run this client: +Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` defined above by +means of a ``client_fn()`` callback. Note that the `context` enables you to get access +to hyperparemeters defined in your ``pyproject.toml`` to configure the run. In this +tutorial we access the `local-epochs` setting to control the number of epochs a +``ClientApp`` will perform when running the ``fit()`` method. You could define +additioinal hyperparameters in ``pyproject.toml`` and access them here. .. code-block:: python - fl.client.start_client(server_address="[::]:8080", client=CifarClient().to_client()) - -That's it for the client. We only have to implement :code:`Client` or -:code:`NumPyClient` and call :code:`fl.client.start_client()`. If you implement a client of type :code:`NumPyClient` you'll need to first call its :code:`to_client()` method. The string :code:`"[::]:8080"` tells the client which server to connect to. In our case we can run the server and the client on the same machine, therefore we use -:code:`"[::]:8080"`. If we run a truly federated workload with the server and -clients running on different machines, all that needs to change is the -:code:`server_address` we point the client at. + def client_fn(context: Context): + # Load model and data + net = Net() + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + trainloader, valloader = load_data(partition_id, num_partitions) + local_epochs = context.run_config["local-epochs"] -Flower Server -------------- + # Return Client instance + return FlowerClient(net, trainloader, valloader, local_epochs).to_client() -For simple workloads we can start a Flower server and leave all the -configuration possibilities at their default values. In a file named -:code:`server.py`, import Flower and start the server: -.. code-block:: python + # Flower ClientApp + app = ClientApp(client_fn) - import flwr as fl +The ServerApp +------------- - fl.server.start_server(config=fl.server.ServerConfig(num_rounds=3)) +To construct a ``ServerApp`` we define a ``server_fn()`` callback with an identical +signature to that of ``client_fn()`` but the return type is `ServerAppComponents +`_ +as opposed to a `Client +`_. In this +example we use the `FedAvg`. To it we pass a randomly initialized model that will server +as the global model to federated. Note that the value of ``fraction_fit`` is read from +the run config. You can find the default value defined in the ``pyproject.toml``. -Train the model, federated! ---------------------------- +.. code-block:: python -With both client and server ready, we can now run everything and see federated -learning in action. FL systems usually have a server and multiple clients. We -therefore have to start the server first: + def server_fn(context: Context): + # Read from config + num_rounds = context.run_config["num-server-rounds"] + fraction_fit = context.run_config["fraction-fit"] + + # Initialize model parameters + ndarrays = get_weights(Net()) + parameters = ndarrays_to_parameters(ndarrays) + + # Define strategy + strategy = FedAvg( + fraction_fit=fraction_fit, + fraction_evaluate=1.0, + min_available_clients=2, + initial_parameters=parameters, + ) + config = ServerConfig(num_rounds=num_rounds) -.. code-block:: shell + return ServerAppComponents(strategy=strategy, config=config) - $ python server.py -Once the server is running we can start the clients in different terminals. -Open a new terminal and start the first client: + # Create ServerApp + app = ServerApp(server_fn=server_fn) -.. code-block:: shell +Congratulations! You've successfully built and run your first federated learning system. - $ python client.py +.. note:: -Open another terminal and start the second client: + Check the `source code + `_ of the + extended version of this tutorial in ``examples/quickstart-pytorch`` in the Flower + GitHub repository. -.. code-block:: shell +Video tutorial +-------------- - $ python client.py +.. note:: -Each client will have its own dataset. -You should now see how the training does in the very first terminal (the one that started the server): + The video shown below shows how to setup a PyTorch + Flower project using our + previously recommended APIs. A new video tutorial will be released that shows the + new APIs (as the content above does) -.. code-block:: shell +.. meta:: + :description: Check out this Federated Learning quickstart tutorial for using Flower with PyTorch to train a CNN model on MNIST. - INFO flower 2021-02-25 14:00:27,227 | app.py:76 | Flower server running (insecure, 3 rounds) - INFO flower 2021-02-25 14:00:27,227 | server.py:72 | Getting initial parameters - INFO flower 2021-02-25 14:01:15,881 | server.py:74 | Evaluating initial parameters - INFO flower 2021-02-25 14:01:15,881 | server.py:87 | [TIME] FL starting - DEBUG flower 2021-02-25 14:01:41,310 | server.py:165 | fit_round: strategy sampled 2 clients (out of 2) - DEBUG flower 2021-02-25 14:02:00,256 | server.py:177 | fit_round received 2 results and 0 failures - DEBUG flower 2021-02-25 14:02:00,262 | server.py:139 | evaluate: strategy sampled 2 clients - DEBUG flower 2021-02-25 14:02:03,047 | server.py:149 | evaluate received 2 results and 0 failures - DEBUG flower 2021-02-25 14:02:03,049 | server.py:165 | fit_round: strategy sampled 2 clients (out of 2) - DEBUG flower 2021-02-25 14:02:23,908 | server.py:177 | fit_round received 2 results and 0 failures - DEBUG flower 2021-02-25 14:02:23,915 | server.py:139 | evaluate: strategy sampled 2 clients - DEBUG flower 2021-02-25 14:02:27,120 | server.py:149 | evaluate received 2 results and 0 failures - DEBUG flower 2021-02-25 14:02:27,122 | server.py:165 | fit_round: strategy sampled 2 clients (out of 2) - DEBUG flower 2021-02-25 14:03:04,660 | server.py:177 | fit_round received 2 results and 0 failures - DEBUG flower 2021-02-25 14:03:04,671 | server.py:139 | evaluate: strategy sampled 2 clients - DEBUG flower 2021-02-25 14:03:09,273 | server.py:149 | evaluate received 2 results and 0 failures - INFO flower 2021-02-25 14:03:09,273 | server.py:122 | [TIME] FL finished in 113.39180790000046 - INFO flower 2021-02-25 14:03:09,274 | app.py:109 | app_fit: losses_distributed [(1, 650.9747924804688), (2, 526.2535400390625), (3, 473.76959228515625)] - INFO flower 2021-02-25 14:03:09,274 | app.py:110 | app_fit: accuracies_distributed [] - INFO flower 2021-02-25 14:03:09,274 | app.py:111 | app_fit: losses_centralized [] - INFO flower 2021-02-25 14:03:09,274 | app.py:112 | app_fit: accuracies_centralized [] - DEBUG flower 2021-02-25 14:03:09,276 | server.py:139 | evaluate: strategy sampled 2 clients - DEBUG flower 2021-02-25 14:03:11,852 | server.py:149 | evaluate received 2 results and 0 failures - INFO flower 2021-02-25 14:03:11,852 | app.py:121 | app_evaluate: federated loss: 473.76959228515625 - INFO flower 2021-02-25 14:03:11,852 | app.py:122 | app_evaluate: results [('ipv6:[::1]:36602', EvaluateRes(loss=351.4906005859375, num_examples=10000, accuracy=0.0, metrics={'accuracy': 0.6067})), ('ipv6:[::1]:36604', EvaluateRes(loss=353.92742919921875, num_examples=10000, accuracy=0.0, metrics={'accuracy': 0.6005}))] - INFO flower 2021-02-25 14:03:27,514 | app.py:127 | app_evaluate: failures [] - -Congratulations! -You've successfully built and run your first federated learning system. -The full `source code `_ for this example can be found in :code:`examples/quickstart-pytorch`. +.. youtube:: jOmmuzMIQ4c + :width: 100% diff --git a/doc/source/tutorial-quickstart-scikitlearn.rst b/doc/source/tutorial-quickstart-scikitlearn.rst index 93322842cc70..56bdf18cad17 100644 --- a/doc/source/tutorial-quickstart-scikitlearn.rst +++ b/doc/source/tutorial-quickstart-scikitlearn.rst @@ -1,77 +1,89 @@ .. _quickstart-scikitlearn: - Quickstart scikit-learn ======================= .. meta:: - :description: Check out this Federated Learning quickstart tutorial for using Flower with scikit-learn to train a linear regression model. + :description: Check out this Federated Learning quickstart tutorial for using Flower with scikit-learn to train a linear regression model. -In this tutorial, we will learn how to train a :code:`Logistic Regression` model on MNIST using Flower and scikit-learn. +In this tutorial, we will learn how to train a ``Logistic Regression`` model on MNIST +using Flower and scikit-learn. -It is recommended to create a virtual environment and run everything within this :doc:`virtualenv `. +It is recommended to create a virtual environment and run everything within this +:doc:`virtualenv `. Our example consists of one *server* and two *clients* all having the same model. -*Clients* are responsible for generating individual model parameter updates for the model based on their local datasets. -These updates are then sent to the *server* which will aggregate them to produce an updated global model. Finally, the *server* sends this improved version of the model back to each *client*. -A complete cycle of parameters updates is called a *round*. +*Clients* are responsible for generating individual model parameter updates for the +model based on their local datasets. These updates are then sent to the *server* which +will aggregate them to produce an updated global model. Finally, the *server* sends this +improved version of the model back to each *client*. A complete cycle of parameters +updates is called a *round*. -Now that we have a rough idea of what is going on, let's get started. We first need to install Flower. You can do this by running: +Now that we have a rough idea of what is going on, let's get started. We first need to +install Flower. You can do this by running: .. code-block:: shell - $ pip install flwr + $ pip install flwr Since we want to use scikit-learn, let's go ahead and install it: .. code-block:: shell - $ pip install scikit-learn + $ pip install scikit-learn Or simply install all dependencies using Poetry: .. code-block:: shell - $ poetry install - + $ poetry install Flower Client ------------- -Now that we have all our dependencies installed, let's run a simple distributed training with two clients and one server. -However, before setting up the client and server, we will define all functionalities that we need for our federated learning setup within :code:`utils.py`. The :code:`utils.py` contains different functions defining all the machine learning basics: +Now that we have all our dependencies installed, let's run a simple distributed training +with two clients and one server. However, before setting up the client and server, we +will define all functionalities that we need for our federated learning setup within +``utils.py``. The ``utils.py`` contains different functions defining all the machine +learning basics: -* :code:`get_model_parameters()` - * Returns the parameters of a :code:`sklearn` LogisticRegression model -* :code:`set_model_params()` - * Sets the parameters of a :code:`sklearn` LogisticRegression model -* :code:`set_initial_params()` - * Initializes the model parameters that the Flower server will ask for +- ``get_model_parameters()`` + - Returns the parameters of a ``sklearn`` LogisticRegression model +- ``set_model_params()`` + - Sets the parameters of a ``sklearn`` LogisticRegression model +- ``set_initial_params()`` + - Initializes the model parameters that the Flower server will ask for -Please check out :code:`utils.py` `here `_ for more details. -The pre-defined functions are used in the :code:`client.py` and imported. The :code:`client.py` also requires to import several packages such as Flower and scikit-learn: +Please check out ``utils.py`` `here +`_ for +more details. The pre-defined functions are used in the ``client.py`` and imported. The +``client.py`` also requires to import several packages such as Flower and scikit-learn: .. code-block:: python - import argparse - import warnings - - from sklearn.linear_model import LogisticRegression - from sklearn.metrics import log_loss - - import flwr as fl - import utils - from flwr_datasets import FederatedDataset + import argparse + import warnings + + from sklearn.linear_model import LogisticRegression + from sklearn.metrics import log_loss + + import flwr as fl + import utils + from flwr_datasets import FederatedDataset -Prior to local training, we need to load the MNIST dataset, a popular image classification dataset of handwritten digits for machine learning, and partition the dataset for FL. This can be conveniently achieved using `Flower Datasets `_. -The :code:`FederatedDataset.load_partition()` method loads the partitioned training set for each partition ID defined in the :code:`--partition-id` argument. +Prior to local training, we need to load the MNIST dataset, a popular image +classification dataset of handwritten digits for machine learning, and partition the +dataset for FL. This can be conveniently achieved using `Flower Datasets +`_. The ``FederatedDataset.load_partition()`` method +loads the partitioned training set for each partition ID defined in the +``--partition-id`` argument. .. code-block:: python if __name__ == "__main__": N_CLIENTS = 10 - + parser = argparse.ArgumentParser(description="Flower") parser.add_argument( "--partition-id", @@ -82,17 +94,17 @@ The :code:`FederatedDataset.load_partition()` method loads the partitioned train ) args = parser.parse_args() partition_id = args.partition_id - + fds = FederatedDataset(dataset="mnist", partitioners={"train": N_CLIENTS}) - + dataset = fds.load_partition(partition_id, "train").with_format("numpy") X, y = dataset["image"].reshape((len(dataset), -1)), dataset["label"] - + X_train, X_test = X[: int(0.8 * len(X))], X[int(0.8 * len(X)) :] y_train, y_test = y[: int(0.8 * len(y))], y[int(0.8 * len(y)) :] - -Next, the logistic regression model is defined and initialized with :code:`utils.set_initial_params()`. +Next, the logistic regression model is defined and initialized with +``utils.set_initial_params()``. .. code-block:: python @@ -104,28 +116,27 @@ Next, the logistic regression model is defined and initialized with :code:`utils utils.set_initial_params(model) -The Flower server interacts with clients through an interface called -:code:`Client`. When the server selects a particular client for training, it -sends training instructions over the network. The client receives those -instructions and calls one of the :code:`Client` methods to run your code -(i.e., to fit the logistic regression we defined earlier). - -Flower provides a convenience class called :code:`NumPyClient` which makes it -easier to implement the :code:`Client` interface when your workload uses scikit-learn. -Implementing :code:`NumPyClient` usually means defining the following methods -(:code:`set_parameters` is optional though): - -#. :code:`get_parameters` - * return the model weight as a list of NumPy ndarrays -#. :code:`set_parameters` (optional) - * update the local model weights with the parameters received from the server - * is directly imported with :code:`utils.set_model_params()` -#. :code:`fit` - * set the local model weights - * train the local model - * receive the updated local model weights -#. :code:`evaluate` - * test the local model +The Flower server interacts with clients through an interface called ``Client``. When +the server selects a particular client for training, it sends training instructions over +the network. The client receives those instructions and calls one of the ``Client`` +methods to run your code (i.e., to fit the logistic regression we defined earlier). + +Flower provides a convenience class called ``NumPyClient`` which makes it easier to +implement the ``Client`` interface when your workload uses scikit-learn. Implementing +``NumPyClient`` usually means defining the following methods (``set_parameters`` is +optional though): + +1. ``get_parameters`` + - return the model weight as a list of NumPy ndarrays +2. ``set_parameters`` (optional) + - update the local model weights with the parameters received from the server + - is directly imported with ``utils.set_model_params()`` +3. ``fit`` + - set the local model weights + - train the local model + - return the updated local model weights +4. ``evaluate`` + - test the local model The methods can be implemented in the following way: @@ -149,27 +160,29 @@ The methods can be implemented in the following way: accuracy = model.score(X_test, y_test) return loss, len(X_test), {"accuracy": accuracy} - -We can now create an instance of our class :code:`MnistClient` and add one line -to actually run this client: +We can now create an instance of our class ``MnistClient`` and add one line to actually +run this client: .. code-block:: python fl.client.start_client("0.0.0.0:8080", client=MnistClient().to_client()) -That's it for the client. We only have to implement :code:`Client` or -:code:`NumPyClient` and call :code:`fl.client.start_client()`. If you implement a client of type :code:`NumPyClient` you'll need to first call its :code:`to_client()` method. The string :code:`"0.0.0.0:8080"` tells the client which server to connect to. In our case we can run the server and the client on the same machine, therefore we use -:code:`"0.0.0.0:8080"`. If we run a truly federated workload with the server and -clients running on different machines, all that needs to change is the -:code:`server_address` we pass to the client. +That's it for the client. We only have to implement ``Client`` or ``NumPyClient`` and +call ``fl.client.start_client()``. If you implement a client of type ``NumPyClient`` +you'll need to first call its ``to_client()`` method. The string ``"0.0.0.0:8080"`` +tells the client which server to connect to. In our case we can run the server and the +client on the same machine, therefore we use ``"0.0.0.0:8080"``. If we run a truly +federated workload with the server and clients running on different machines, all that +needs to change is the ``server_address`` we pass to the client. Flower Server ------------- -The following Flower server is a little bit more advanced and returns an evaluation function for the server-side evaluation. -First, we import again all required libraries such as Flower and scikit-learn. +The following Flower server is a little bit more advanced and returns an evaluation +function for the server-side evaluation. First, we import again all required libraries +such as Flower and scikit-learn. -:code:`server.py`, import Flower and start the server: +``server.py``, import Flower and start the server: .. code-block:: python @@ -179,12 +192,14 @@ First, we import again all required libraries such as Flower and scikit-learn. from sklearn.metrics import log_loss from sklearn.linear_model import LogisticRegression from typing import Dict - + from flwr_datasets import FederatedDataset -The number of federated learning rounds is set in :code:`fit_round()` and the evaluation is defined in :code:`get_evaluate_fn()`. -The evaluation function is called after each federated learning round and gives you information about loss and accuracy. -Note that we also make use of Flower Datasets here to load the test split of the MNIST dataset for server-side evaluation. +The number of federated learning rounds is set in ``fit_round()`` and the evaluation is +defined in ``get_evaluate_fn()``. The evaluation function is called after each federated +learning round and gives you information about loss and accuracy. Note that we also make +use of Flower Datasets here to load the test split of the MNIST dataset for server-side +evaluation. .. code-block:: python @@ -210,7 +225,13 @@ Note that we also make use of Flower Datasets here to load the test split of the return evaluate -The :code:`main` contains the server-side parameter initialization :code:`utils.set_initial_params()` as well as the aggregation strategy :code:`fl.server.strategy:FedAvg()`. The strategy is the default one, federated averaging (or FedAvg), with two clients and evaluation after each federated learning round. The server can be started with the command :code:`fl.server.start_server(server_address="0.0.0.0:8080", strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))`. +The ``main`` contains the server-side parameter initialization +``utils.set_initial_params()`` as well as the aggregation strategy +``fl.server.strategy:FedAvg()``. The strategy is the default one, federated averaging +(or FedAvg), with two clients and evaluation after each federated learning round. The +server can be started with the command +``fl.server.start_server(server_address="0.0.0.0:8080", strategy=strategy, +config=fl.server.ServerConfig(num_rounds=3))``. .. code-block:: python @@ -223,21 +244,25 @@ The :code:`main` contains the server-side parameter initialization :code:`utils. evaluate_fn=get_evaluate_fn(model), on_fit_config_fn=fit_round, ) - fl.server.start_server(server_address="0.0.0.0:8080", strategy=strategy, config=fl.server.ServerConfig(num_rounds=3)) - + fl.server.start_server( + server_address="0.0.0.0:8080", + strategy=strategy, + config=fl.server.ServerConfig(num_rounds=3), + ) Train the model, federated! --------------------------- -With both client and server ready, we can now run everything and see federated -learning in action. Federated learning systems usually have a server and multiple clients. We, therefore, have to start the server first: +With both client and server ready, we can now run everything and see federated learning +in action. Federated learning systems usually have a server and multiple clients. We, +therefore, have to start the server first: .. code-block:: shell $ python3 server.py -Once the server is running we can start the clients in different terminals. -Open a new terminal and start the first client: +Once the server is running we can start the clients in different terminals. Open a new +terminal and start the first client: .. code-block:: shell @@ -249,8 +274,8 @@ Open another terminal and start the second client: $ python3 client.py -Each client will have its own dataset. -You should now see how the training does in the very first terminal (the one that started the server): +Each client will have its own dataset. You should now see how the training does in the +very first terminal (the one that started the server): .. code-block:: shell @@ -283,6 +308,7 @@ You should now see how the training does in the very first terminal (the one tha INFO flower 2022-01-13 13:43:21,232 | app.py:122 | app_evaluate: results [('ipv4:127.0.0.1:53980', EvaluateRes(loss=0.5843629240989685, num_examples=10000, accuracy=0.0, metrics={'accuracy': 0.8217})), ('ipv4:127.0.0.1:53982', EvaluateRes(loss=0.5843629240989685, num_examples=10000, accuracy=0.0, metrics={'accuracy': 0.8217}))] INFO flower 2022-01-13 13:43:21,232 | app.py:127 | app_evaluate: failures [] -Congratulations! -You've successfully built and run your first federated learning system. -The full `source code `_ for this example can be found in :code:`examples/sklearn-logreg-mnist`. +Congratulations! You've successfully built and run your first federated learning system. +The full `source code +`_ for this +example can be found in ``examples/sklearn-logreg-mnist``. diff --git a/doc/source/tutorial-quickstart-tensorflow.rst b/doc/source/tutorial-quickstart-tensorflow.rst index bd63eb461d21..66cf69de6390 100644 --- a/doc/source/tutorial-quickstart-tensorflow.rst +++ b/doc/source/tutorial-quickstart-tensorflow.rst @@ -1,171 +1,290 @@ .. _quickstart-tensorflow: - Quickstart TensorFlow ===================== -.. meta:: - :description: Check out this Federated Learning quickstart tutorial for using Flower with TensorFlow to train a MobilNetV2 model on CIFAR-10. +In this tutorial we will learn how to train a Convolutional Neural Network on CIFAR-10 +using the Flower framework and TensorFlow. First of all, it is recommended to create a +virtual environment and run everything within a :doc:`virtualenv +`. + +Let's use `flwr new` to create a complete Flower+TensorFlow project. It will generate +all the files needed to run, by default with the Flower Simulation Engine, a federation +of 10 nodes using `FedAvg +`_. +The dataset will be partitioned using Flower Dataset's `IidPartitioner +`_. + +Now that we have a rough idea of what this example is about, let's get started. First, +install Flower in your new environment: -.. youtube:: FGTc2TQq7VM - :width: 100% +.. code-block:: shell -Let's build a federated learning system in less than 20 lines of code! + # In a new Python environment + $ pip install flwr -Before Flower can be imported we have to install it: +Then, run the command below. You will be prompted to select one of the available +templates (choose ``TensorFlow``), give a name to your project, and type in your +developer name: .. code-block:: shell - $ pip install flwr + $ flwr new -Since we want to use the Keras API of TensorFlow (TF), we have to install TF as well: +After running it you'll notice a new directory with your project name has been created. +It should have the following structure: .. code-block:: shell - $ pip install tensorflow + + ├── + │ ├── __init__.py + │ ├── client_app.py # Defines your ClientApp + │ ├── server_app.py # Defines your ServerApp + │ └── task.py # Defines your model, training and data loading + ├── pyproject.toml # Project metadata like dependencies and configs + └── README.md +If you haven't yet installed the project and its dependencies, you can do so by: -Flower Client -------------- +.. code-block:: shell -Next, in a file called :code:`client.py`, import Flower and TensorFlow: + # From the directory where your pyproject.toml is + $ pip install -e . -.. code-block:: python +To run the project, do: - import flwr as fl - import tensorflow as tf +.. code-block:: shell -We use the Keras utilities of TF to load CIFAR10, a popular colored image classification -dataset for machine learning. The call to -:code:`tf.keras.datasets.cifar10.load_data()` downloads CIFAR10, caches it locally, -and then returns the entire training and test set as NumPy ndarrays. + # Run with default arguments + $ flwr run . -.. code-block:: python +With default arguments you will see an output like this one: + +.. code-block:: shell + + Loading project configuration... + Success + INFO : Starting Flower ServerApp, config: num_rounds=3, no round_timeout + INFO : + INFO : [INIT] + INFO : Using initial global parameters provided by strategy + INFO : Starting evaluation of initial global parameters + INFO : Evaluation returned no results (`None`) + INFO : + INFO : [ROUND 1] + INFO : configure_fit: strategy sampled 10 clients (out of 10) + INFO : aggregate_fit: received 10 results and 0 failures + WARNING : No fit_metrics_aggregation_fn provided + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + WARNING : No evaluate_metrics_aggregation_fn provided + INFO : + INFO : [ROUND 2] + INFO : configure_fit: strategy sampled 10 clients (out of 10) + INFO : aggregate_fit: received 10 results and 0 failures + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + INFO : + INFO : [ROUND 3] + INFO : configure_fit: strategy sampled 10 clients (out of 10) + INFO : aggregate_fit: received 10 results and 0 failures + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + INFO : + INFO : [SUMMARY] + INFO : Run finished 3 round(s) in 31.31s + INFO : History (loss, distributed): + INFO : round 1: 1.9066195368766785 + INFO : round 2: 1.657227087020874 + INFO : round 3: 1.559039831161499 + INFO : + +You can also override the parameters defined in the ``[tool.flwr.app.config]`` section +in ``pyproject.toml`` like this: + +.. code-block:: shell + + # Override some arguments + $ flwr run . --run-config "num-server-rounds=5 batch-size=16" - (x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data() +The Data +-------- -Next, we need a model. For the purpose of this tutorial, we use MobilNetV2 with 10 output classes: +This tutorial uses `Flower Datasets `_ to easily +download and partition the `CIFAR-10` dataset. In this example you'll make use of the +`IidPartitioner +`_ +to generate `num_partitions` partitions. You can choose `other partitioners +`_ available in +Flower Datasets. Each ``ClientApp`` will call this function to create the ``NumPy`` +arrays that correspond to their data partition. .. code-block:: python - model = tf.keras.applications.MobileNetV2((32, 32, 3), classes=10, weights=None) - model.compile("adam", "sparse_categorical_crossentropy", metrics=["accuracy"]) + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="uoft-cs/cifar10", + partitioners={"train": partitioner}, + ) + partition = fds.load_partition(partition_id, "train") + partition.set_format("numpy") -The Flower server interacts with clients through an interface called -:code:`Client`. When the server selects a particular client for training, it -sends training instructions over the network. The client receives those -instructions and calls one of the :code:`Client` methods to run your code -(i.e., to train the neural network we defined earlier). + # Divide data on each node: 80% train, 20% test + partition = partition.train_test_split(test_size=0.2) + x_train, y_train = partition["train"]["img"] / 255.0, partition["train"]["label"] + x_test, y_test = partition["test"]["img"] / 255.0, partition["test"]["label"] -Flower provides a convenience class called :code:`NumPyClient` which makes it -easier to implement the :code:`Client` interface when your workload uses Keras. -The :code:`NumPyClient` interface defines three methods which can be -implemented in the following way: +The Model +--------- + +Next, we need a model. We defined a simple Convolutional Neural Network (CNN), but feel +free to replace it with a more sophisticated model if you'd like: .. code-block:: python - class CifarClient(fl.client.NumPyClient): - def get_parameters(self, config): - return model.get_weights() + def load_model(learning_rate: float = 0.001): + # Define a simple CNN for CIFAR-10 and set Adam optimizer + model = keras.Sequential( + [ + keras.Input(shape=(32, 32, 3)), + layers.Conv2D(32, kernel_size=(3, 3), activation="relu"), + layers.MaxPooling2D(pool_size=(2, 2)), + layers.Conv2D(64, kernel_size=(3, 3), activation="relu"), + layers.MaxPooling2D(pool_size=(2, 2)), + layers.Flatten(), + layers.Dropout(0.5), + layers.Dense(10, activation="softmax"), + ] + ) + model.compile( + "adam", + loss="sparse_categorical_crossentropy", + metrics=["accuracy"], + ) + return model + +The ClientApp +------------- - def fit(self, parameters, config): - model.set_weights(parameters) - model.fit(x_train, y_train, epochs=1, batch_size=32, steps_per_epoch=3) - return model.get_weights(), len(x_train), {} +With `TensorFlow`, we can use the built-in ``get_weights()`` and ``set_weights()`` +functions, which simplifies the implementation with `Flower`. The rest of the +functionality in the ClientApp is directly inspired by the centralized case. The +``fit()`` method in the client trains the model using the local dataset. Similarly, the +``evaluate()`` method is used to evaluate the model received on a held-out validation +set that the client might have: - def evaluate(self, parameters, config): - model.set_weights(parameters) - loss, accuracy = model.evaluate(x_test, y_test) - return loss, len(x_test), {"accuracy": float(accuracy)} +.. code-block:: python + + class FlowerClient(NumPyClient): + def __init__(self, model, data, epochs, batch_size, verbose): + self.model = model + self.x_train, self.y_train, self.x_test, self.y_test = data + self.epochs = epochs + self.batch_size = batch_size + self.verbose = verbose + def fit(self, parameters, config): + self.model.set_weights(parameters) + self.model.fit( + self.x_train, + self.y_train, + epochs=self.epochs, + batch_size=self.batch_size, + verbose=self.verbose, + ) + return self.model.get_weights(), len(self.x_train), {} -We can now create an instance of our class :code:`CifarClient` and add one line -to actually run this client: + def evaluate(self, parameters, config): + self.model.set_weights(parameters) + loss, accuracy = self.model.evaluate(self.x_test, self.y_test, verbose=0) + return loss, len(self.x_test), {"accuracy": accuracy} + +Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` defined above by +means of a ``client_fn()`` callback. Note that the `context` enables you to get access +to hyperparameters defined in your ``pyproject.toml`` to configure the run. For example, +in this tutorial we access the `local-epochs` setting to control the number of epochs a +``ClientApp`` will perform when running the ``fit()`` method, in addition to +`batch-size`. You could define additional hyperparameters in ``pyproject.toml`` and +access them here. .. code-block:: python - fl.client.start_client(server_address="[::]:8080", client=CifarClient().to_client()) + def client_fn(context: Context): + # Load model and data + net = load_model() + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + data = load_data(partition_id, num_partitions) + epochs = context.run_config["local-epochs"] + batch_size = context.run_config["batch-size"] + verbose = context.run_config.get("verbose") -That's it for the client. We only have to implement :code:`Client` or -:code:`NumPyClient` and call :code:`fl.client.start_client()`. If you implement a client of type :code:`NumPyClient` you'll need to first call its :code:`to_client()` method. The string :code:`"[::]:8080"` tells the client which server to connect to. In our case we can run the server and the client on the same machine, therefore we use -:code:`"[::]:8080"`. If we run a truly federated workload with the server and -clients running on different machines, all that needs to change is the -:code:`server_address` we point the client at. + # Return Client instance + return FlowerClient(net, data, epochs, batch_size, verbose).to_client() -Flower Server + # Flower ClientApp + app = ClientApp(client_fn=client_fn) + +The ServerApp ------------- -For simple workloads we can start a Flower server and leave all the -configuration possibilities at their default values. In a file named -:code:`server.py`, import Flower and start the server: +To construct a ``ServerApp`` we define a ``server_fn()`` callback with an identical +signature to that of ``client_fn()`` but the return type is `ServerAppComponents +`_ +as opposed to a `Client +`_. In this +example we use the `FedAvg`. To it we pass a randomly initialized model that will serve +as the global model to federate. .. code-block:: python - import flwr as fl + def server_fn(context: Context): + # Read from config + num_rounds = context.run_config["num-server-rounds"] - fl.server.start_server(config=fl.server.ServerConfig(num_rounds=3)) + # Get parameters to initialize global model + parameters = ndarrays_to_parameters(load_model().get_weights()) + # Define strategy + strategy = strategy = FedAvg( + fraction_fit=1.0, + fraction_evaluate=1.0, + min_available_clients=2, + initial_parameters=parameters, + ) + config = ServerConfig(num_rounds=num_rounds) -Train the model, federated! ---------------------------- + return ServerAppComponents(strategy=strategy, config=config) -With both client and server ready, we can now run everything and see federated -learning in action. FL systems usually have a server and multiple clients. We -therefore have to start the server first: -.. code-block:: shell + # Create ServerApp + app = ServerApp(server_fn=server_fn) - $ python server.py +Congratulations! You've successfully built and run your first federated learning system. -Once the server is running we can start the clients in different terminals. -Open a new terminal and start the first client: +.. note:: -.. code-block:: shell + Check the source code of the extended version of this tutorial in + |quickstart_tf_link|_ in the Flower GitHub repository. - $ python client.py +.. |quickstart_tf_link| replace:: ``examples/quickstart-tensorflow`` -Open another terminal and start the second client: +.. _quickstart_tf_link: https://github.com/adap/flower/blob/main/examples/quickstart-tensorflow -.. code-block:: shell +Video tutorial +-------------- - $ python client.py +.. note:: -Each client will have its own dataset. + The video shown below shows how to setup a TensorFlow + Flower project using our + previously recommended APIs. A new video tutorial will be released that shows the + new APIs (as the content above does) -You should now see how the training does in the very first terminal (the one -that started the server): - -.. code-block:: shell +.. meta:: + :description: Check out this Federated Learning quickstart tutorial for using Flower with TensorFlow to train a CNN model on CIFAR-10. - INFO flower 2021-02-25 14:15:46,741 | app.py:76 | Flower server running (insecure, 3 rounds) - INFO flower 2021-02-25 14:15:46,742 | server.py:72 | Getting initial parameters - INFO flower 2021-02-25 14:16:01,770 | server.py:74 | Evaluating initial parameters - INFO flower 2021-02-25 14:16:01,770 | server.py:87 | [TIME] FL starting - DEBUG flower 2021-02-25 14:16:12,341 | server.py:165 | fit_round: strategy sampled 2 clients (out of 2) - DEBUG flower 2021-02-25 14:21:17,235 | server.py:177 | fit_round received 2 results and 0 failures - DEBUG flower 2021-02-25 14:21:17,512 | server.py:139 | evaluate: strategy sampled 2 clients - DEBUG flower 2021-02-25 14:21:29,628 | server.py:149 | evaluate received 2 results and 0 failures - DEBUG flower 2021-02-25 14:21:29,696 | server.py:165 | fit_round: strategy sampled 2 clients (out of 2) - DEBUG flower 2021-02-25 14:25:59,917 | server.py:177 | fit_round received 2 results and 0 failures - DEBUG flower 2021-02-25 14:26:00,227 | server.py:139 | evaluate: strategy sampled 2 clients - DEBUG flower 2021-02-25 14:26:11,457 | server.py:149 | evaluate received 2 results and 0 failures - DEBUG flower 2021-02-25 14:26:11,530 | server.py:165 | fit_round: strategy sampled 2 clients (out of 2) - DEBUG flower 2021-02-25 14:30:43,389 | server.py:177 | fit_round received 2 results and 0 failures - DEBUG flower 2021-02-25 14:30:43,630 | server.py:139 | evaluate: strategy sampled 2 clients - DEBUG flower 2021-02-25 14:30:53,384 | server.py:149 | evaluate received 2 results and 0 failures - INFO flower 2021-02-25 14:30:53,384 | server.py:122 | [TIME] FL finished in 891.6143046000007 - INFO flower 2021-02-25 14:30:53,385 | app.py:109 | app_fit: losses_distributed [(1, 2.3196680545806885), (2, 2.3202896118164062), (3, 2.1818180084228516)] - INFO flower 2021-02-25 14:30:53,385 | app.py:110 | app_fit: accuracies_distributed [] - INFO flower 2021-02-25 14:30:53,385 | app.py:111 | app_fit: losses_centralized [] - INFO flower 2021-02-25 14:30:53,385 | app.py:112 | app_fit: accuracies_centralized [] - DEBUG flower 2021-02-25 14:30:53,442 | server.py:139 | evaluate: strategy sampled 2 clients - DEBUG flower 2021-02-25 14:31:02,848 | server.py:149 | evaluate received 2 results and 0 failures - INFO flower 2021-02-25 14:31:02,848 | app.py:121 | app_evaluate: federated loss: 2.1818180084228516 - INFO flower 2021-02-25 14:31:02,848 | app.py:125 | app_evaluate: results [('ipv4:127.0.0.1:57158', EvaluateRes(loss=2.1818180084228516, num_examples=10000, accuracy=0.0, metrics={'accuracy': 0.21610000729560852})), ('ipv4:127.0.0.1:57160', EvaluateRes(loss=2.1818180084228516, num_examples=10000, accuracy=0.0, metrics={'accuracy': 0.21610000729560852}))] - INFO flower 2021-02-25 14:31:02,848 | app.py:127 | app_evaluate: failures [] flower 2020-07-15 10:07:56,396 | app.py:77 | app_evaluate: failures [] - -Congratulations! You've successfully built and run your first federated -learning system. The full `source code `_ for this can be found in -:code:`examples/quickstart-tensorflow/client.py`. +.. youtube:: FGTc2TQq7VM + :width: 100% diff --git a/doc/source/tutorial-quickstart-xgboost.rst b/doc/source/tutorial-quickstart-xgboost.rst index 7ac055138814..fe15227fdf11 100644 --- a/doc/source/tutorial-quickstart-xgboost.rst +++ b/doc/source/tutorial-quickstart-xgboost.rst @@ -1,63 +1,75 @@ .. _quickstart-xgboost: - Quickstart XGBoost -===================== +================== .. meta:: - :description: Check out this Federated Learning quickstart tutorial for using Flower with XGBoost to train classification models on trees. + :description: Check out this Federated Learning quickstart tutorial for using Flower with XGBoost to train classification models on trees. -.. youtube:: AY1vpXUpesc - :width: 100% +.. youtube:: AY1vpXUpesc + :width: 100% Federated XGBoost -------------------- +----------------- -EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient implementation of gradient-boosted decision tree (**GBDT**), that maximises the computational boundaries for boosted tree methods. -It's primarily designed to enhance both the performance and computational speed of machine learning models. -In XGBoost, trees are constructed concurrently, unlike the sequential approach taken by GBDT. +EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient implementation of +gradient-boosted decision tree (**GBDT**), that maximises the computational boundaries +for boosted tree methods. It's primarily designed to enhance both the performance and +computational speed of machine learning models. In XGBoost, trees are constructed +concurrently, unlike the sequential approach taken by GBDT. -Often, for tabular data on medium-sized datasets with fewer than 10k training examples, XGBoost surpasses the results of deep learning techniques. +Often, for tabular data on medium-sized datasets with fewer than 10k training examples, +XGBoost surpasses the results of deep learning techniques. Why federated XGBoost? -~~~~~~~~~~~~~~~~~~~~~~~~~ - -Indeed, as the demand for data privacy and decentralized learning grows, there's an increasing requirement to implement federated XGBoost systems for specialised applications, like survival analysis and financial fraud detection. +~~~~~~~~~~~~~~~~~~~~~~ -Federated learning ensures that raw data remains on the local device, making it an attractive approach for sensitive domains where data security and privacy are paramount. -Given the robustness and efficiency of XGBoost, combining it with federated learning offers a promising solution for these specific challenges. +Indeed, as the demand for data privacy and decentralized learning grows, there's an +increasing requirement to implement federated XGBoost systems for specialised +applications, like survival analysis and financial fraud detection. -In this tutorial we will learn how to train a federated XGBoost model on HIGGS dataset using Flower and :code:`xgboost` package. -We use a simple example (`full code xgboost-quickstart `_) with two *clients* and one *server* -to demonstrate how federated XGBoost works, -and then we dive into a more complex example (`full code xgboost-comprehensive `_) to run various experiments. +Federated learning ensures that raw data remains on the local device, making it an +attractive approach for sensitive domains where data security and privacy are paramount. +Given the robustness and efficiency of XGBoost, combining it with federated learning +offers a promising solution for these specific challenges. +In this tutorial we will learn how to train a federated XGBoost model on HIGGS dataset +using Flower and ``xgboost`` package. We use a simple example (`full code +xgboost-quickstart +`_) with two +*clients* and one *server* to demonstrate how federated XGBoost works, and then we dive +into a more complex example (`full code xgboost-comprehensive +`_) to run +various experiments. Environment Setup --------------------- +----------------- -First of all, it is recommended to create a virtual environment and run everything within a :doc:`virtualenv `. +First of all, it is recommended to create a virtual environment and run everything +within a :doc:`virtualenv `. We first need to install Flower and Flower Datasets. You can do this by running : .. code-block:: shell - $ pip install flwr flwr-datasets + $ pip install flwr flwr-datasets -Since we want to use :code:`xgboost` package to build up XGBoost trees, let's go ahead and install :code:`xgboost`: +Since we want to use ``xgboost`` package to build up XGBoost trees, let's go ahead and +install ``xgboost``: .. code-block:: shell - $ pip install xgboost - + $ pip install xgboost Flower Client ------------------ +------------- -*Clients* are responsible for generating individual weight-updates for the model based on their local datasets. -Now that we have all our dependencies installed, let's run a simple distributed training with two clients and one server. +*Clients* are responsible for generating individual weight-updates for the model based +on their local datasets. Now that we have all our dependencies installed, let's run a +simple distributed training with two clients and one server. -In a file called :code:`client.py`, import xgboost, Flower, Flower Datasets and other related functions: +In a file called ``client.py``, import xgboost, Flower, Flower Datasets and other +related functions: .. code-block:: python @@ -84,9 +96,10 @@ In a file called :code:`client.py`, import xgboost, Flower, Flower Datasets and from flwr_datasets.partitioner import IidPartitioner Dataset partition and hyper-parameter selection -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Prior to local training, we require loading the HIGGS dataset from Flower Datasets and conduct data partitioning for FL: +Prior to local training, we require loading the HIGGS dataset from Flower Datasets and +conduct data partitioning for FL: .. code-block:: python @@ -96,29 +109,31 @@ Prior to local training, we require loading the HIGGS dataset from Flower Datase fds = FederatedDataset(dataset="jxie/higgs", partitioners={"train": partitioner}) # Load the partition for this `node_id` - partition = fds.load_partition(node_id=args.node_id, split="train") + partition = fds.load_partition(partition_id=args.partition_id, split="train") partition.set_format("numpy") -In this example, we split the dataset into two partitions with uniform distribution (:code:`IidPartitioner(num_partitions=2)`). -Then, we load the partition for the given client based on :code:`node_id`: +In this example, we split the dataset into 30 partitions with uniform distribution +(``IidPartitioner(num_partitions=30)``). Then, we load the partition for the given +client based on ``partition_id``: .. code-block:: python - # We first define arguments parser for user to specify the client/node ID. + # We first define arguments parser for user to specify the client/partition ID. parser = argparse.ArgumentParser() parser.add_argument( - "--node-id", + "--partition-id", default=0, type=int, - help="Node ID used for the current client.", + help="Partition ID used for the current client.", ) args = parser.parse_args() - # Load the partition for this `node_id`. - partition = fds.load_partition(idx=args.node_id, split="train") + # Load the partition for this `partition_id`. + partition = fds.load_partition(idx=args.partition_id, split="train") partition.set_format("numpy") -After that, we do train/test splitting on the given partition (client's local data), and transform data format for :code:`xgboost` package. +After that, we do train/test splitting on the given partition (client's local data), and +transform data format for ``xgboost`` package. .. code-block:: python @@ -131,7 +146,8 @@ After that, we do train/test splitting on the given partition (client's local da train_dmatrix = transform_dataset_to_dmatrix(train_data) valid_dmatrix = transform_dataset_to_dmatrix(valid_data) -The functions of :code:`train_test_split` and :code:`transform_dataset_to_dmatrix` are defined as below: +The functions of ``train_test_split`` and ``transform_dataset_to_dmatrix`` are defined +as below: .. code-block:: python @@ -171,29 +187,39 @@ Finally, we define the hyper-parameters used for XGBoost training. "tree_method": "hist", } -The :code:`num_local_round` represents the number of iterations for local tree boost. -We use CPU for the training in default. -One can shift it to GPU by setting :code:`tree_method` to :code:`gpu_hist`. -We use AUC as evaluation metric. - +The ``num_local_round`` represents the number of iterations for local tree boost. We use +CPU for the training in default. One can shift it to GPU by setting ``tree_method`` to +``gpu_hist``. We use AUC as evaluation metric. Flower client definition for XGBoost -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -After loading the dataset we define the Flower client. -We follow the general rule to define :code:`XgbClient` class inherited from :code:`fl.client.Client`. +After loading the dataset we define the Flower client. We follow the general rule to +define ``XgbClient`` class inherited from ``fl.client.Client``. .. code-block:: python class XgbClient(fl.client.Client): - def __init__(self): - self.bst = None - self.config = None + def __init__( + self, + train_dmatrix, + valid_dmatrix, + num_train, + num_val, + num_local_round, + params, + ): + self.train_dmatrix = train_dmatrix + self.valid_dmatrix = valid_dmatrix + self.num_train = num_train + self.num_val = num_val + self.num_local_round = num_local_round + self.params = params -The :code:`self.bst` is used to keep the Booster objects that remain consistent across rounds, -allowing them to store predictions from trees integrated in earlier rounds and maintain other essential data structures for training. +All required parameters defined above are passed to ``XgbClient``'s constructor. -Then, we override :code:`get_parameters`, :code:`fit` and :code:`evaluate` methods insides :code:`XgbClient` class as follows. +Then, we override ``get_parameters``, ``fit`` and ``evaluate`` methods insides +``XgbClient`` class as follows. .. code-block:: python @@ -207,34 +233,35 @@ Then, we override :code:`get_parameters`, :code:`fit` and :code:`evaluate` metho parameters=Parameters(tensor_type="", tensors=[]), ) -Unlike neural network training, XGBoost trees are not started from a specified random weights. -In this case, we do not use :code:`get_parameters` and :code:`set_parameters` to initialise model parameters for XGBoost. -As a result, let's return an empty tensor in :code:`get_parameters` when it is called by the server at the first round. +Unlike neural network training, XGBoost trees are not started from a specified random +weights. In this case, we do not use ``get_parameters`` and ``set_parameters`` to +initialise model parameters for XGBoost. As a result, let's return an empty tensor in +``get_parameters`` when it is called by the server at the first round. .. code-block:: python def fit(self, ins: FitIns) -> FitRes: - if not self.bst: + global_round = int(ins.config["global_round"]) + if global_round == 1: # First round local training - log(INFO, "Start training at round 1") bst = xgb.train( - params, - train_dmatrix, - num_boost_round=num_local_round, - evals=[(valid_dmatrix, "validate"), (train_dmatrix, "train")], + self.params, + self.train_dmatrix, + num_boost_round=self.num_local_round, + evals=[(self.valid_dmatrix, "validate"), (self.train_dmatrix, "train")], ) - self.config = bst.save_config() - self.bst = bst else: + bst = xgb.Booster(params=self.params) for item in ins.parameters.tensors: global_model = bytearray(item) # Load global model into booster - self.bst.load_model(global_model) - self.bst.load_config(self.config) + bst.load_model(global_model) - bst = self._local_boost() + # Local training + bst = self._local_boost(bst) + # Save model local_model = bst.save_raw("json") local_model_bytes = bytes(local_model) @@ -244,74 +271,98 @@ As a result, let's return an empty tensor in :code:`get_parameters` when it is c message="OK", ), parameters=Parameters(tensor_type="", tensors=[local_model_bytes]), - num_examples=num_train, + num_examples=self.num_train, metrics={}, ) -In :code:`fit`, at the first round, we call :code:`xgb.train()` to build up the first set of trees. -the returned Booster object and config are stored in :code:`self.bst` and :code:`self.config`, respectively. -From the second round, we load the global model sent from server to :code:`self.bst`, -and then update model weights on local training data with function :code:`local_boost` as follows: +In ``fit``, at the first round, we call ``xgb.train()`` to build up the first set of +trees. From the second round, we load the global model sent from server to new build +Booster object, and then update model weights on local training data with function +``local_boost`` as follows: .. code-block:: python - def _local_boost(self): + def _local_boost(self, bst_input): # Update trees based on local training data. - for i in range(num_local_round): - self.bst.update(train_dmatrix, self.bst.num_boosted_rounds()) + for i in range(self.num_local_round): + bst_input.update(self.train_dmatrix, bst_input.num_boosted_rounds()) - # Extract the last N=num_local_round trees for sever aggregation - bst = self.bst[ - self.bst.num_boosted_rounds() - - num_local_round : self.bst.num_boosted_rounds() + # Bagging: extract the last N=num_local_round trees for sever aggregation + bst = bst_input[ + bst_input.num_boosted_rounds() + - self.num_local_round : bst_input.num_boosted_rounds() ] -Given :code:`num_local_round`, we update trees by calling :code:`self.bst.update` method. -After training, the last :code:`N=num_local_round` trees will be extracted to send to the server. + return bst + +Given ``num_local_round``, we update trees by calling ``bst_input.update`` method. After +training, the last ``N=num_local_round`` trees will be extracted to send to the server. .. code-block:: python def evaluate(self, ins: EvaluateIns) -> EvaluateRes: - eval_results = self.bst.eval_set( - evals=[(valid_dmatrix, "valid")], - iteration=self.bst.num_boosted_rounds() - 1, + # Load global model + bst = xgb.Booster(params=self.params) + for para in ins.parameters.tensors: + para_b = bytearray(para) + bst.load_model(para_b) + + # Run evaluation + eval_results = bst.eval_set( + evals=[(self.valid_dmatrix, "valid")], + iteration=bst.num_boosted_rounds() - 1, ) auc = round(float(eval_results.split("\t")[1].split(":")[1]), 4) + global_round = ins.config["global_round"] + log(INFO, f"AUC = {auc} at round {global_round}") + return EvaluateRes( status=Status( code=Code.OK, message="OK", ), loss=0.0, - num_examples=num_val, + num_examples=self.num_val, metrics={"AUC": auc}, ) -In :code:`evaluate`, we call :code:`self.bst.eval_set` function to conduct evaluation on valid set. -The AUC value will be returned. +In ``evaluate``, after loading the global model, we call ``bst.eval_set`` function to +conduct evaluation on valid set. The AUC value will be returned. -Now, we can create an instance of our class :code:`XgbClient` and add one line to actually run this client: +Now, we can create an instance of our class ``XgbClient`` and add one line to actually +run this client: .. code-block:: python - fl.client.start_client(server_address="127.0.0.1:8080", client=XgbClient()) - -That's it for the client. We only have to implement :code:`Client`and call :code:`fl.client.start_client()`. -The string :code:`"[::]:8080"` tells the client which server to connect to. -In our case we can run the server and the client on the same machine, therefore we use -:code:`"[::]:8080"`. If we run a truly federated workload with the server and -clients running on different machines, all that needs to change is the -:code:`server_address` we point the client at. + fl.client.start_client( + server_address="127.0.0.1:8080", + client=XgbClient( + train_dmatrix, + valid_dmatrix, + num_train, + num_val, + num_local_round, + params, + ).to_client(), + ) +That's it for the client. We only have to implement ``Client`` and call +``fl.client.start_client()``. The string ``"[::]:8080"`` tells the client which server +to connect to. In our case we can run the server and the client on the same machine, +therefore we use ``"[::]:8080"``. If we run a truly federated workload with the server +and clients running on different machines, all that needs to change is the +``server_address`` we point the client at. Flower Server ------------------- +------------- -These updates are then sent to the *server* which will aggregate them to produce a better model. -Finally, the *server* sends this improved version of the model back to each *client* to finish a complete FL round. +These updates are then sent to the *server* which will aggregate them to produce a +better model. Finally, the *server* sends this improved version of the model back to +each *client* to finish a complete FL round. -In a file named :code:`server.py`, import Flower and FedXgbBagging from :code:`flwr.server.strategy`. +In a file named ``server.py``, import Flower and FedXgbBagging from +``flwr.server.strategy``. We first define a strategy for XGBoost bagging aggregation. @@ -325,8 +376,11 @@ We first define a strategy for XGBoost bagging aggregation. min_evaluate_clients=2, fraction_evaluate=1.0, evaluate_metrics_aggregation_fn=evaluate_metrics_aggregation, + on_evaluate_config_fn=config_func, + on_fit_config_fn=config_func, ) + def evaluate_metrics_aggregation(eval_metrics): """Return an aggregated metric (AUC) for evaluation.""" total_num = sum([num for num, _ in eval_metrics]) @@ -336,8 +390,18 @@ We first define a strategy for XGBoost bagging aggregation. metrics_aggregated = {"AUC": auc_aggregated} return metrics_aggregated -We use two clients for this example. -An :code:`evaluate_metrics_aggregation` function is defined to collect and wighted average the AUC values from clients. + + def config_func(rnd: int) -> Dict[str, str]: + """Return a configuration with global epochs.""" + config = { + "global_round": str(rnd), + } + return config + +We use two clients for this example. An ``evaluate_metrics_aggregation`` function is +defined to collect and wighted average the AUC values from clients. The ``config_func`` +function is to return the current FL round number to client's ``fit()`` and +``evaluate()`` methods. Then, we start the server: @@ -346,17 +410,18 @@ Then, we start the server: # Start Flower server fl.server.start_server( server_address="0.0.0.0:8080", - config=fl.server.ServerConfig(num_rounds=num_rounds), + config=fl.server.ServerConfig(num_rounds=5), strategy=strategy, ) Tree-based bagging aggregation -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You must be curious about how bagging aggregation works. Let's look into the details. -In file :code:`flwr.server.strategy.fedxgb_bagging.py`, we define :code:`FedXgbBagging` inherited from :code:`flwr.server.strategy.FedAvg`. -Then, we override the :code:`aggregate_fit`, :code:`aggregate_evaluate` and :code:`evaluate` methods as follows: +In file ``flwr.server.strategy.fedxgb_bagging.py``, we define ``FedXgbBagging`` +inherited from ``flwr.server.strategy.FedAvg``. Then, we override the ``aggregate_fit``, +``aggregate_evaluate`` and ``evaluate`` methods as follows: .. code-block:: python @@ -451,7 +516,8 @@ Then, we override the :code:`aggregate_fit`, :code:`aggregate_evaluate` and :cod loss, metrics = eval_res return loss, metrics -In :code:`aggregate_fit`, we sequentially aggregate the clients' XGBoost trees by calling :code:`aggregate()` function: +In ``aggregate_fit``, we sequentially aggregate the clients' XGBoost trees by calling +``aggregate()`` function: .. code-block:: python @@ -510,255 +576,273 @@ In :code:`aggregate_fit`, we sequentially aggregate the clients' XGBoost trees b ) return tree_num, paral_tree_num -In this function, we first fetch the number of trees and the number of parallel trees for the current and previous model -by calling :code:`_get_tree_nums`. -Then, the fetched information will be aggregated. -After that, the trees (containing model weights) are aggregated to generate a new tree model. - -After traversal of all clients' models, a new global model is generated, -followed by the serialisation, and sending back to each client. +In this function, we first fetch the number of trees and the number of parallel trees +for the current and previous model by calling ``_get_tree_nums``. Then, the fetched +information will be aggregated. After that, the trees (containing model weights) are +aggregated to generate a new tree model. +After traversal of all clients' models, a new global model is generated, followed by the +serialisation, and sending back to each client. Launch Federated XGBoost! -------------------------------- +------------------------- -With both client and server ready, we can now run everything and see federated -learning in action. FL systems usually have a server and multiple clients. We -therefore have to start the server first: +With both client and server ready, we can now run everything and see federated learning +in action. FL systems usually have a server and multiple clients. We therefore have to +start the server first: .. code-block:: shell $ python3 server.py -Once the server is running we can start the clients in different terminals. -Open a new terminal and start the first client: +Once the server is running we can start the clients in different terminals. Open a new +terminal and start the first client: .. code-block:: shell - $ python3 client.py --node-id=0 + $ python3 client.py --partition-id=0 Open another terminal and start the second client: .. code-block:: shell - $ python3 client.py --node-id=1 + $ python3 client.py --partition-id=1 -Each client will have its own dataset. -You should now see how the training does in the very first terminal (the one that started the server): +Each client will have its own dataset. You should now see how the training does in the +very first terminal (the one that started the server): .. code-block:: shell - INFO flwr 2023-11-20 11:21:56,454 | app.py:163 | Starting Flower server, config: ServerConfig(num_rounds=5, round_timeout=None) - INFO flwr 2023-11-20 11:21:56,473 | app.py:176 | Flower ECE: gRPC server running (5 rounds), SSL is disabled - INFO flwr 2023-11-20 11:21:56,473 | server.py:89 | Initializing global parameters - INFO flwr 2023-11-20 11:21:56,473 | server.py:276 | Requesting initial parameters from one random client - INFO flwr 2023-11-20 11:22:38,302 | server.py:280 | Received initial parameters from one random client - INFO flwr 2023-11-20 11:22:38,302 | server.py:91 | Evaluating initial parameters - INFO flwr 2023-11-20 11:22:38,302 | server.py:104 | FL starting - DEBUG flwr 2023-11-20 11:22:38,302 | server.py:222 | fit_round 1: strategy sampled 2 clients (out of 2) - DEBUG flwr 2023-11-20 11:22:38,636 | server.py:236 | fit_round 1 received 2 results and 0 failures - DEBUG flwr 2023-11-20 11:22:38,643 | server.py:173 | evaluate_round 1: strategy sampled 2 clients (out of 2) - DEBUG flwr 2023-11-20 11:22:38,653 | server.py:187 | evaluate_round 1 received 2 results and 0 failures - DEBUG flwr 2023-11-20 11:22:38,653 | server.py:222 | fit_round 2: strategy sampled 2 clients (out of 2) - DEBUG flwr 2023-11-20 11:22:38,721 | server.py:236 | fit_round 2 received 2 results and 0 failures - DEBUG flwr 2023-11-20 11:22:38,745 | server.py:173 | evaluate_round 2: strategy sampled 2 clients (out of 2) - DEBUG flwr 2023-11-20 11:22:38,756 | server.py:187 | evaluate_round 2 received 2 results and 0 failures - DEBUG flwr 2023-11-20 11:22:38,756 | server.py:222 | fit_round 3: strategy sampled 2 clients (out of 2) - DEBUG flwr 2023-11-20 11:22:38,831 | server.py:236 | fit_round 3 received 2 results and 0 failures - DEBUG flwr 2023-11-20 11:22:38,868 | server.py:173 | evaluate_round 3: strategy sampled 2 clients (out of 2) - DEBUG flwr 2023-11-20 11:22:38,881 | server.py:187 | evaluate_round 3 received 2 results and 0 failures - DEBUG flwr 2023-11-20 11:22:38,881 | server.py:222 | fit_round 4: strategy sampled 2 clients (out of 2) - DEBUG flwr 2023-11-20 11:22:38,960 | server.py:236 | fit_round 4 received 2 results and 0 failures - DEBUG flwr 2023-11-20 11:22:39,012 | server.py:173 | evaluate_round 4: strategy sampled 2 clients (out of 2) - DEBUG flwr 2023-11-20 11:22:39,026 | server.py:187 | evaluate_round 4 received 2 results and 0 failures - DEBUG flwr 2023-11-20 11:22:39,026 | server.py:222 | fit_round 5: strategy sampled 2 clients (out of 2) - DEBUG flwr 2023-11-20 11:22:39,111 | server.py:236 | fit_round 5 received 2 results and 0 failures - DEBUG flwr 2023-11-20 11:22:39,177 | server.py:173 | evaluate_round 5: strategy sampled 2 clients (out of 2) - DEBUG flwr 2023-11-20 11:22:39,193 | server.py:187 | evaluate_round 5 received 2 results and 0 failures - INFO flwr 2023-11-20 11:22:39,193 | server.py:153 | FL finished in 0.8905023969999988 - INFO flwr 2023-11-20 11:22:39,193 | app.py:226 | app_fit: losses_distributed [(1, 0), (2, 0), (3, 0), (4, 0), (5, 0)] - INFO flwr 2023-11-20 11:22:39,193 | app.py:227 | app_fit: metrics_distributed_fit {} - INFO flwr 2023-11-20 11:22:39,193 | app.py:228 | app_fit: metrics_distributed {'AUC': [(1, 0.7572), (2, 0.7705), (3, 0.77595), (4, 0.78), (5, 0.78385)]} - INFO flwr 2023-11-20 11:22:39,193 | app.py:229 | app_fit: losses_centralized [] - INFO flwr 2023-11-20 11:22:39,193 | app.py:230 | app_fit: metrics_centralized {} - -Congratulations! -You've successfully built and run your first federated XGBoost system. -The AUC values can be checked in :code:`metrics_distributed`. -One can see that the average AUC increases over FL rounds. - -The full `source code `_ for this example can be found in :code:`examples/xgboost-quickstart`. - + INFO : Starting Flower server, config: num_rounds=5, no round_timeout + INFO : Flower ECE: gRPC server running (5 rounds), SSL is disabled + INFO : [INIT] + INFO : Requesting initial parameters from one random client + INFO : Received initial parameters from one random client + INFO : Evaluating initial global parameters + INFO : + INFO : [ROUND 1] + INFO : configure_fit: strategy sampled 2 clients (out of 2) + INFO : aggregate_fit: received 2 results and 0 failures + INFO : configure_evaluate: strategy sampled 2 clients (out of 2) + INFO : aggregate_evaluate: received 2 results and 0 failures + INFO : + INFO : [ROUND 2] + INFO : configure_fit: strategy sampled 2 clients (out of 2) + INFO : aggregate_fit: received 2 results and 0 failures + INFO : configure_evaluate: strategy sampled 2 clients (out of 2) + INFO : aggregate_evaluate: received 2 results and 0 failures + INFO : + INFO : [ROUND 3] + INFO : configure_fit: strategy sampled 2 clients (out of 2) + INFO : aggregate_fit: received 2 results and 0 failures + INFO : configure_evaluate: strategy sampled 2 clients (out of 2) + INFO : aggregate_evaluate: received 2 results and 0 failures + INFO : + INFO : [ROUND 4] + INFO : configure_fit: strategy sampled 2 clients (out of 2) + INFO : aggregate_fit: received 2 results and 0 failures + INFO : configure_evaluate: strategy sampled 2 clients (out of 2) + INFO : aggregate_evaluate: received 2 results and 0 failures + INFO : + INFO : [ROUND 5] + INFO : configure_fit: strategy sampled 2 clients (out of 2) + INFO : aggregate_fit: received 2 results and 0 failures + INFO : configure_evaluate: strategy sampled 2 clients (out of 2) + INFO : aggregate_evaluate: received 2 results and 0 failures + INFO : + INFO : [SUMMARY] + INFO : Run finished 5 round(s) in 1.67s + INFO : History (loss, distributed): + INFO : round 1: 0 + INFO : round 2: 0 + INFO : round 3: 0 + INFO : round 4: 0 + INFO : round 5: 0 + INFO : History (metrics, distributed, evaluate): + INFO : {'AUC': [(1, 0.76755), (2, 0.775), (3, 0.77935), (4, 0.7836), (5, 0.7872)]} + +Congratulations! You've successfully built and run your first federated XGBoost system. +The AUC values can be checked in ``metrics_distributed``. One can see that the average +AUC increases over FL rounds. + +The full `source code +`_ for this +example can be found in ``examples/xgboost-quickstart``. Comprehensive Federated XGBoost ------------------------------------ +------------------------------- -Now that you have known how federated XGBoost work with Flower, it's time to run some more comprehensive experiments by customising the experimental settings. -In the xgboost-comprehensive example (`full code `_), -we provide more options to define various experimental setups, including aggregation strategies, data partitioning and centralised/distributed evaluation. -We also support :doc:`Flower simulation ` making it easy to simulate large client cohorts in a resource-aware manner. -Let's take a look! +Now that you have known how federated XGBoost work with Flower, it's time to run some +more comprehensive experiments by customising the experimental settings. In the +xgboost-comprehensive example (`full code +`_), we provide +more options to define various experimental setups, including aggregation strategies, +data partitioning and centralised/distributed evaluation. We also support :doc:`Flower +simulation ` making it easy to simulate large client cohorts in +a resource-aware manner. Let's take a look! Cyclic training -~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~ -In addition to bagging aggregation, we offer a cyclic training scheme, which performs FL in a client-by-client fashion. -Instead of aggregating multiple clients, there is only one single client participating in the training per round in the cyclic training scenario. -The trained local XGBoost trees will be passed to the next client as an initialised model for next round's boosting. +In addition to bagging aggregation, we offer a cyclic training scheme, which performs FL +in a client-by-client fashion. Instead of aggregating multiple clients, there is only +one single client participating in the training per round in the cyclic training +scenario. The trained local XGBoost trees will be passed to the next client as an +initialised model for next round's boosting. -To do this, we first customise a :code:`ClientManager` in :code:`server_utils.py`: +To do this, we first customise a ``ClientManager`` in ``server_utils.py``: .. code-block:: python - class CyclicClientManager(SimpleClientManager): - """Provides a cyclic client selection rule.""" - - def sample( - self, - num_clients: int, - min_num_clients: Optional[int] = None, - criterion: Optional[Criterion] = None, - ) -> List[ClientProxy]: - """Sample a number of Flower ClientProxy instances.""" - - # Block until at least num_clients are connected. - if min_num_clients is None: - min_num_clients = num_clients - self.wait_for(min_num_clients) - - # Sample clients which meet the criterion - available_cids = list(self.clients) - if criterion is not None: - available_cids = [ - cid for cid in available_cids if criterion.select(self.clients[cid]) - ] - - if num_clients > len(available_cids): - log( - INFO, - "Sampling failed: number of available clients" - " (%s) is less than number of requested clients (%s).", - len(available_cids), - num_clients, - ) - return [] - - # Return all available clients - return [self.clients[cid] for cid in available_cids] - -The customised :code:`ClientManager` samples all available clients in each FL round based on the order of connection to the server. -Then, we define a new strategy :code:`FedXgbCyclic` in :code:`flwr.server.strategy.fedxgb_cyclic.py`, -in order to sequentially select only one client in given round and pass the received model to next client. + class CyclicClientManager(SimpleClientManager): + """Provides a cyclic client selection rule.""" + + def sample( + self, + num_clients: int, + min_num_clients: Optional[int] = None, + criterion: Optional[Criterion] = None, + ) -> List[ClientProxy]: + """Sample a number of Flower ClientProxy instances.""" + + # Block until at least num_clients are connected. + if min_num_clients is None: + min_num_clients = num_clients + self.wait_for(min_num_clients) + + # Sample clients which meet the criterion + available_cids = list(self.clients) + if criterion is not None: + available_cids = [ + cid for cid in available_cids if criterion.select(self.clients[cid]) + ] + + if num_clients > len(available_cids): + log( + INFO, + "Sampling failed: number of available clients" + " (%s) is less than number of requested clients (%s).", + len(available_cids), + num_clients, + ) + return [] + + # Return all available clients + return [self.clients[cid] for cid in available_cids] + +The customised ``ClientManager`` samples all available clients in each FL round based on +the order of connection to the server. Then, we define a new strategy ``FedXgbCyclic`` +in ``flwr.server.strategy.fedxgb_cyclic.py``, in order to sequentially select only one +client in given round and pass the received model to next client. .. code-block:: python - class FedXgbCyclic(FedAvg): - """Configurable FedXgbCyclic strategy implementation.""" - - # pylint: disable=too-many-arguments,too-many-instance-attributes, line-too-long - def __init__( - self, - **kwargs: Any, - ): - self.global_model: Optional[bytes] = None - super().__init__(**kwargs) - - def aggregate_fit( - self, - server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: - """Aggregate fit results using bagging.""" - if not results: - return None, {} - # Do not aggregate if there are failures and failures are not accepted - if not self.accept_failures and failures: - return None, {} - - # Fetch the client model from last round as global model - for _, fit_res in results: - update = fit_res.parameters.tensors - for bst in update: - self.global_model = bst - - return ( - Parameters(tensor_type="", tensors=[cast(bytes, self.global_model)]), - {}, - ) - -Unlike the original :code:`FedAvg`, we don't perform aggregation here. -Instead, we just make a copy of the received client model as global model by overriding :code:`aggregate_fit`. - -Also, the customised :code:`configure_fit` and :code:`configure_evaluate` methods ensure the clients to be sequentially selected given FL round: + class FedXgbCyclic(FedAvg): + """Configurable FedXgbCyclic strategy implementation.""" + + # pylint: disable=too-many-arguments,too-many-instance-attributes, line-too-long + def __init__( + self, + **kwargs: Any, + ): + self.global_model: Optional[bytes] = None + super().__init__(**kwargs) + + def aggregate_fit( + self, + server_round: int, + results: List[Tuple[ClientProxy, FitRes]], + failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], + ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + """Aggregate fit results using bagging.""" + if not results: + return None, {} + # Do not aggregate if there are failures and failures are not accepted + if not self.accept_failures and failures: + return None, {} + + # Fetch the client model from last round as global model + for _, fit_res in results: + update = fit_res.parameters.tensors + for bst in update: + self.global_model = bst + + return ( + Parameters(tensor_type="", tensors=[cast(bytes, self.global_model)]), + {}, + ) + +Unlike the original ``FedAvg``, we don't perform aggregation here. Instead, we just make +a copy of the received client model as global model by overriding ``aggregate_fit``. + +Also, the customised ``configure_fit`` and ``configure_evaluate`` methods ensure the +clients to be sequentially selected given FL round: .. code-block:: python - def configure_fit( - self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: - """Configure the next round of training.""" - config = {} - if self.on_fit_config_fn is not None: - # Custom fit config function provided - config = self.on_fit_config_fn(server_round) - fit_ins = FitIns(parameters, config) - - # Sample clients - sample_size, min_num_clients = self.num_fit_clients( - client_manager.num_available() - ) - clients = client_manager.sample( - num_clients=sample_size, - min_num_clients=min_num_clients, - ) - - # Sample the clients sequentially given server_round - sampled_idx = (server_round - 1) % len(clients) - sampled_clients = [clients[sampled_idx]] - - # Return client/config pairs - return [(client, fit_ins) for client in sampled_clients] - - def configure_evaluate( - self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, EvaluateIns]]: - """Configure the next round of evaluation.""" - # Do not configure federated evaluation if fraction eval is 0. - if self.fraction_evaluate == 0.0: - return [] - - # Parameters and config - config = {} - if self.on_evaluate_config_fn is not None: - # Custom evaluation config function provided - config = self.on_evaluate_config_fn(server_round) - evaluate_ins = EvaluateIns(parameters, config) - - # Sample clients - sample_size, min_num_clients = self.num_evaluation_clients( - client_manager.num_available() - ) - clients = client_manager.sample( - num_clients=sample_size, - min_num_clients=min_num_clients, - ) - - # Sample the clients sequentially given server_round - sampled_idx = (server_round - 1) % len(clients) - sampled_clients = [clients[sampled_idx]] - - # Return client/config pairs - return [(client, evaluate_ins) for client in sampled_clients] + def configure_fit( + self, server_round: int, parameters: Parameters, client_manager: ClientManager + ) -> List[Tuple[ClientProxy, FitIns]]: + """Configure the next round of training.""" + config = {} + if self.on_fit_config_fn is not None: + # Custom fit config function provided + config = self.on_fit_config_fn(server_round) + fit_ins = FitIns(parameters, config) + + # Sample clients + sample_size, min_num_clients = self.num_fit_clients(client_manager.num_available()) + clients = client_manager.sample( + num_clients=sample_size, + min_num_clients=min_num_clients, + ) + + # Sample the clients sequentially given server_round + sampled_idx = (server_round - 1) % len(clients) + sampled_clients = [clients[sampled_idx]] + + # Return client/config pairs + return [(client, fit_ins) for client in sampled_clients] + + def configure_evaluate( + self, server_round: int, parameters: Parameters, client_manager: ClientManager + ) -> List[Tuple[ClientProxy, EvaluateIns]]: + """Configure the next round of evaluation.""" + # Do not configure federated evaluation if fraction eval is 0. + if self.fraction_evaluate == 0.0: + return [] + # Parameters and config + config = {} + if self.on_evaluate_config_fn is not None: + # Custom evaluation config function provided + config = self.on_evaluate_config_fn(server_round) + evaluate_ins = EvaluateIns(parameters, config) + + # Sample clients + sample_size, min_num_clients = self.num_evaluation_clients( + client_manager.num_available() + ) + clients = client_manager.sample( + num_clients=sample_size, + min_num_clients=min_num_clients, + ) + + # Sample the clients sequentially given server_round + sampled_idx = (server_round - 1) % len(clients) + sampled_clients = [clients[sampled_idx]] + + # Return client/config pairs + return [(client, evaluate_ins) for client in sampled_clients] Customised data partitioning -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -In :code:`dataset.py`, we have a function :code:`instantiate_partitioner` to instantiate the data partitioner -based on the given :code:`num_partitions` and :code:`partitioner_type`. -Currently, we provide four supported partitioner type to simulate the uniformity/non-uniformity in data quantity (uniform, linear, square, exponential). +In ``dataset.py``, we have a function ``instantiate_partitioner`` to instantiate the +data partitioner based on the given ``num_partitions`` and ``partitioner_type``. +Currently, we provide four supported partitioner type to simulate the +uniformity/non-uniformity in data quantity (uniform, linear, square, exponential). .. code-block:: python @@ -785,11 +869,10 @@ Currently, we provide four supported partitioner type to simulate the uniformity ) return partitioner - Customised centralised/distributed evaluation -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -To facilitate centralised evaluation, we define a function in :code:`server_utils.py`: +To facilitate centralised evaluation, we define a function in ``server_utils.py``: .. code-block:: python @@ -821,105 +904,112 @@ To facilitate centralised evaluation, we define a function in :code:`server_util return evaluate_fn -This function returns a evaluation function which instantiates a :code:`Booster` object and loads the global model weights to it. -The evaluation is conducted by calling :code:`eval_set()` method, and the tested AUC value is reported. +This function returns a evaluation function which instantiates a ``Booster`` object and +loads the global model weights to it. The evaluation is conducted by calling +``eval_set()`` method, and the tested AUC value is reported. As for distributed evaluation on the clients, it's same as the quick-start example by -overriding the :code:`evaluate()` method insides the :code:`XgbClient` class in :code:`client_utils.py`. +overriding the ``evaluate()`` method insides the ``XgbClient`` class in +``client_utils.py``. Flower simulation -~~~~~~~~~~~~~~~~~~~~ -We also provide an example code (:code:`sim.py`) to use the simulation capabilities of Flower to simulate federated XGBoost training on either a single machine or a cluster of machines. - -.. code-block:: python +~~~~~~~~~~~~~~~~~ - from logging import INFO - import xgboost as xgb - from tqdm import tqdm - - import flwr as fl - from flwr_datasets import FederatedDataset - from flwr.common.logger import log - from flwr.server.strategy import FedXgbBagging, FedXgbCyclic - - from dataset import ( - instantiate_partitioner, - train_test_split, - transform_dataset_to_dmatrix, - separate_xy, - resplit, - ) - from utils import ( - sim_args_parser, - NUM_LOCAL_ROUND, - BST_PARAMS, - ) - from server_utils import ( - eval_config, - fit_config, - evaluate_metrics_aggregation, - get_evaluate_fn, - CyclicClientManager, - ) - from client_utils import XgbClient - -After importing all required packages, we define a :code:`main()` function to perform the simulation process: +We also provide an example code (``sim.py``) to use the simulation capabilities of +Flower to simulate federated XGBoost training on either a single machine or a cluster of +machines. .. code-block:: python - def main(): - # Parse arguments for experimental settings - args = sim_args_parser() + from logging import INFO + import xgboost as xgb + from tqdm import tqdm - # Load (HIGGS) dataset and conduct partitioning - partitioner = instantiate_partitioner( - partitioner_type=args.partitioner_type, num_partitions=args.pool_size + import flwr as fl + from flwr_datasets import FederatedDataset + from flwr.common.logger import log + from flwr.server.strategy import FedXgbBagging, FedXgbCyclic + + from dataset import ( + instantiate_partitioner, + train_test_split, + transform_dataset_to_dmatrix, + separate_xy, + resplit, ) - fds = FederatedDataset( - dataset="jxie/higgs", - partitioners={"train": partitioner}, - resplitter=resplit, + from utils import ( + sim_args_parser, + NUM_LOCAL_ROUND, + BST_PARAMS, ) + from server_utils import ( + eval_config, + fit_config, + evaluate_metrics_aggregation, + get_evaluate_fn, + CyclicClientManager, + ) + from client_utils import XgbClient - # Load centralised test set - if args.centralised_eval or args.centralised_eval_client: - log(INFO, "Loading centralised test set...") - test_data = fds.load_split("test") - test_data.set_format("numpy") - num_test = test_data.shape[0] - test_dmatrix = transform_dataset_to_dmatrix(test_data) - - # Load partitions and reformat data to DMatrix for xgboost - log(INFO, "Loading client local partitions...") - train_data_list = [] - valid_data_list = [] - - # Load and process all client partitions. This upfront cost is amortized soon - # after the simulation begins since clients wont need to preprocess their partition. - for node_id in tqdm(range(args.pool_size), desc="Extracting client partition"): - # Extract partition for client with node_id - partition = fds.load_partition(node_id=node_id, split="train") - partition.set_format("numpy") - - if args.centralised_eval_client: - # Use centralised test set for evaluation - train_data = partition - num_train = train_data.shape[0] - x_test, y_test = separate_xy(test_data) - valid_data_list.append(((x_test, y_test), num_test)) - else: - # Train/test splitting - train_data, valid_data, num_train, num_val = train_test_split( - partition, test_fraction=args.test_fraction, seed=args.seed - ) - x_valid, y_valid = separate_xy(valid_data) - valid_data_list.append(((x_valid, y_valid), num_val)) +After importing all required packages, we define a ``main()`` function to perform the +simulation process: - x_train, y_train = separate_xy(train_data) - train_data_list.append(((x_train, y_train), num_train)) +.. code-block:: python -We first load the dataset and perform data partitioning, and the pre-processed data is stored in a :code:`list`. -After the simulation begins, the clients won't need to pre-process their partitions again. + def main(): + # Parse arguments for experimental settings + args = sim_args_parser() + + # Load (HIGGS) dataset and conduct partitioning + partitioner = instantiate_partitioner( + partitioner_type=args.partitioner_type, num_partitions=args.pool_size + ) + fds = FederatedDataset( + dataset="jxie/higgs", + partitioners={"train": partitioner}, + resplitter=resplit, + ) + + # Load centralised test set + if args.centralised_eval or args.centralised_eval_client: + log(INFO, "Loading centralised test set...") + test_data = fds.load_split("test") + test_data.set_format("numpy") + num_test = test_data.shape[0] + test_dmatrix = transform_dataset_to_dmatrix(test_data) + + # Load partitions and reformat data to DMatrix for xgboost + log(INFO, "Loading client local partitions...") + train_data_list = [] + valid_data_list = [] + + # Load and process all client partitions. This upfront cost is amortized soon + # after the simulation begins since clients wont need to preprocess their partition. + for node_id in tqdm(range(args.pool_size), desc="Extracting client partition"): + # Extract partition for client with node_id + partition = fds.load_partition(node_id=node_id, split="train") + partition.set_format("numpy") + + if args.centralised_eval_client: + # Use centralised test set for evaluation + train_data = partition + num_train = train_data.shape[0] + x_test, y_test = separate_xy(test_data) + valid_data_list.append(((x_test, y_test), num_test)) + else: + # Train/test splitting + train_data, valid_data, num_train, num_val = train_test_split( + partition, test_fraction=args.test_fraction, seed=args.seed + ) + x_valid, y_valid = separate_xy(valid_data) + valid_data_list.append(((x_valid, y_valid), num_val)) + + x_train, y_train = separate_xy(train_data) + train_data_list.append(((x_train, y_train), num_train)) + +We first load the dataset and perform data partitioning, and the pre-processed data is +stored in a ``list``. After the simulation begins, the clients won't need to pre-process +their partitions again. Then, we define the strategies and other hyper-parameters: @@ -929,21 +1019,21 @@ Then, we define the strategies and other hyper-parameters: if args.train_method == "bagging": # Bagging training strategy = FedXgbBagging( - evaluate_function=get_evaluate_fn(test_dmatrix) - if args.centralised_eval - else None, + evaluate_function=( + get_evaluate_fn(test_dmatrix) if args.centralised_eval else None + ), fraction_fit=(float(args.num_clients_per_round) / args.pool_size), min_fit_clients=args.num_clients_per_round, min_available_clients=args.pool_size, - min_evaluate_clients=args.num_evaluate_clients - if not args.centralised_eval - else 0, + min_evaluate_clients=( + args.num_evaluate_clients if not args.centralised_eval else 0 + ), fraction_evaluate=1.0 if not args.centralised_eval else 0.0, on_evaluate_config_fn=eval_config, on_fit_config_fn=fit_config, - evaluate_metrics_aggregation_fn=evaluate_metrics_aggregation - if not args.centralised_eval - else None, + evaluate_metrics_aggregation_fn=( + evaluate_metrics_aggregation if not args.centralised_eval else None + ), ) else: # Cyclic training @@ -972,7 +1062,7 @@ Then, we define the strategies and other hyper-parameters: new_lr = params["eta"] / args.pool_size params.update({"eta": new_lr}) -After that, we start the simulation by calling :code:`fl.simulation.start_simulation`: +After that, we start the simulation by calling ``fl.simulation.start_simulation``: .. code-block:: python @@ -992,53 +1082,52 @@ After that, we start the simulation by calling :code:`fl.simulation.start_simula client_manager=CyclicClientManager() if args.train_method == "cyclic" else None, ) -One of key parameters for :code:`start_simulation` is :code:`client_fn` which returns a function to construct a client. -We define it as follows: +One of key parameters for ``start_simulation`` is ``client_fn`` which returns a function +to construct a client. We define it as follows: .. code-block:: python - def get_client_fn( - train_data_list, valid_data_list, train_method, params, num_local_round - ): - """Return a function to construct a client. - - The VirtualClientEngine will execute this function whenever a client is sampled by - the strategy to participate. - """ - - def client_fn(cid: str) -> fl.client.Client: - """Construct a FlowerClient with its own dataset partition.""" - x_train, y_train = train_data_list[int(cid)][0] - x_valid, y_valid = valid_data_list[int(cid)][0] + def get_client_fn( + train_data_list, valid_data_list, train_method, params, num_local_round + ): + """Return a function to construct a client. - # Reformat data to DMatrix - train_dmatrix = xgb.DMatrix(x_train, label=y_train) - valid_dmatrix = xgb.DMatrix(x_valid, label=y_valid) + The VirtualClientEngine will execute this function whenever a client is sampled by + the strategy to participate. + """ - # Fetch the number of examples - num_train = train_data_list[int(cid)][1] - num_val = valid_data_list[int(cid)][1] + def client_fn(cid: str) -> fl.client.Client: + """Construct a FlowerClient with its own dataset partition.""" + x_train, y_train = train_data_list[int(cid)][0] + x_valid, y_valid = valid_data_list[int(cid)][0] - # Create and return client - return XgbClient( - train_dmatrix, - valid_dmatrix, - num_train, - num_val, - num_local_round, - params, - train_method, - ) + # Reformat data to DMatrix + train_dmatrix = xgb.DMatrix(x_train, label=y_train) + valid_dmatrix = xgb.DMatrix(x_valid, label=y_valid) - return client_fn + # Fetch the number of examples + num_train = train_data_list[int(cid)][1] + num_val = valid_data_list[int(cid)][1] + # Create and return client + return XgbClient( + train_dmatrix, + valid_dmatrix, + num_train, + num_val, + num_local_round, + params, + train_method, + ) + return client_fn Arguments parser -~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~ -In :code:`utils.py`, we define the arguments parsers for clients, server and simulation, allowing users to specify different experimental settings. -Let's first see the sever side: +In ``utils.py``, we define the arguments parsers for clients, server and simulation, +allowing users to specify different experimental settings. Let's first see the sever +side: .. code-block:: python @@ -1046,190 +1135,192 @@ Let's first see the sever side: def server_args_parser(): - """Parse arguments to define experimental settings on server side.""" - parser = argparse.ArgumentParser() - - parser.add_argument( - "--train-method", - default="bagging", - type=str, - choices=["bagging", "cyclic"], - help="Training methods selected from bagging aggregation or cyclic training.", - ) - parser.add_argument( - "--pool-size", default=2, type=int, help="Number of total clients." - ) - parser.add_argument( - "--num-rounds", default=5, type=int, help="Number of FL rounds." - ) - parser.add_argument( - "--num-clients-per-round", - default=2, - type=int, - help="Number of clients participate in training each round.", - ) - parser.add_argument( - "--num-evaluate-clients", - default=2, - type=int, - help="Number of clients selected for evaluation.", - ) - parser.add_argument( - "--centralised-eval", - action="store_true", - help="Conduct centralised evaluation (True), or client evaluation on hold-out data (False).", - ) - - args = parser.parse_args() - return args - -This allows user to specify training strategies / the number of total clients / FL rounds / participating clients / clients for evaluation, -and evaluation fashion. Note that with :code:`--centralised-eval`, the sever will do centralised evaluation -and all functionalities for client evaluation will be disabled. + """Parse arguments to define experimental settings on server side.""" + parser = argparse.ArgumentParser() + + parser.add_argument( + "--train-method", + default="bagging", + type=str, + choices=["bagging", "cyclic"], + help="Training methods selected from bagging aggregation or cyclic training.", + ) + parser.add_argument( + "--pool-size", default=2, type=int, help="Number of total clients." + ) + parser.add_argument( + "--num-rounds", default=5, type=int, help="Number of FL rounds." + ) + parser.add_argument( + "--num-clients-per-round", + default=2, + type=int, + help="Number of clients participate in training each round.", + ) + parser.add_argument( + "--num-evaluate-clients", + default=2, + type=int, + help="Number of clients selected for evaluation.", + ) + parser.add_argument( + "--centralised-eval", + action="store_true", + help="Conduct centralised evaluation (True), or client evaluation on hold-out data (False).", + ) + + args = parser.parse_args() + return args + +This allows user to specify training strategies / the number of total clients / FL +rounds / participating clients / clients for evaluation, and evaluation fashion. Note +that with ``--centralised-eval``, the sever will do centralised evaluation and all +functionalities for client evaluation will be disabled. Then, the argument parser on client side: .. code-block:: python def client_args_parser(): - """Parse arguments to define experimental settings on client side.""" - parser = argparse.ArgumentParser() - - parser.add_argument( - "--train-method", - default="bagging", - type=str, - choices=["bagging", "cyclic"], - help="Training methods selected from bagging aggregation or cyclic training.", - ) - parser.add_argument( - "--num-partitions", default=10, type=int, help="Number of partitions." - ) - parser.add_argument( - "--partitioner-type", - default="uniform", - type=str, - choices=["uniform", "linear", "square", "exponential"], - help="Partitioner types.", - ) - parser.add_argument( - "--node-id", - default=0, - type=int, - help="Node ID used for the current client.", - ) - parser.add_argument( - "--seed", default=42, type=int, help="Seed used for train/test splitting." - ) - parser.add_argument( - "--test-fraction", - default=0.2, - type=float, - help="Test fraction for train/test splitting.", - ) - parser.add_argument( - "--centralised-eval", - action="store_true", - help="Conduct evaluation on centralised test set (True), or on hold-out data (False).", - ) - parser.add_argument( - "--scaled-lr", - action="store_true", - help="Perform scaled learning rate based on the number of clients (True).", - ) - - args = parser.parse_args() - return args - -This defines various options for client data partitioning. -Besides, clients also have an option to conduct evaluation on centralised test set by setting :code:`--centralised-eval`, -as well as an option to perform scaled learning rate based on the number of clients by setting :code:`--scaled-lr`. + """Parse arguments to define experimental settings on client side.""" + parser = argparse.ArgumentParser() + + parser.add_argument( + "--train-method", + default="bagging", + type=str, + choices=["bagging", "cyclic"], + help="Training methods selected from bagging aggregation or cyclic training.", + ) + parser.add_argument( + "--num-partitions", default=10, type=int, help="Number of partitions." + ) + parser.add_argument( + "--partitioner-type", + default="uniform", + type=str, + choices=["uniform", "linear", "square", "exponential"], + help="Partitioner types.", + ) + parser.add_argument( + "--node-id", + default=0, + type=int, + help="Node ID used for the current client.", + ) + parser.add_argument( + "--seed", default=42, type=int, help="Seed used for train/test splitting." + ) + parser.add_argument( + "--test-fraction", + default=0.2, + type=float, + help="Test fraction for train/test splitting.", + ) + parser.add_argument( + "--centralised-eval", + action="store_true", + help="Conduct evaluation on centralised test set (True), or on hold-out data (False).", + ) + parser.add_argument( + "--scaled-lr", + action="store_true", + help="Perform scaled learning rate based on the number of clients (True).", + ) + + args = parser.parse_args() + return args + +This defines various options for client data partitioning. Besides, clients also have an +option to conduct evaluation on centralised test set by setting ``--centralised-eval``, +as well as an option to perform scaled learning rate based on the number of clients by +setting ``--scaled-lr``. We also have an argument parser for simulation: .. code-block:: python - def sim_args_parser(): - """Parse arguments to define experimental settings on server side.""" - parser = argparse.ArgumentParser() - - parser.add_argument( - "--train-method", - default="bagging", - type=str, - choices=["bagging", "cyclic"], - help="Training methods selected from bagging aggregation or cyclic training.", - ) - - # Server side - parser.add_argument( - "--pool-size", default=5, type=int, help="Number of total clients." - ) - parser.add_argument( - "--num-rounds", default=30, type=int, help="Number of FL rounds." - ) - parser.add_argument( - "--num-clients-per-round", - default=5, - type=int, - help="Number of clients participate in training each round.", - ) - parser.add_argument( - "--num-evaluate-clients", - default=5, - type=int, - help="Number of clients selected for evaluation.", - ) - parser.add_argument( - "--centralised-eval", - action="store_true", - help="Conduct centralised evaluation (True), or client evaluation on hold-out data (False).", - ) - parser.add_argument( - "--num-cpus-per-client", - default=2, - type=int, - help="Number of CPUs used for per client.", - ) - - # Client side - parser.add_argument( - "--partitioner-type", - default="uniform", - type=str, - choices=["uniform", "linear", "square", "exponential"], - help="Partitioner types.", - ) - parser.add_argument( - "--seed", default=42, type=int, help="Seed used for train/test splitting." - ) - parser.add_argument( - "--test-fraction", - default=0.2, - type=float, - help="Test fraction for train/test splitting.", - ) - parser.add_argument( - "--centralised-eval-client", - action="store_true", - help="Conduct evaluation on centralised test set (True), or on hold-out data (False).", - ) - parser.add_argument( - "--scaled-lr", - action="store_true", - help="Perform scaled learning rate based on the number of clients (True).", - ) - - args = parser.parse_args() - return args + def sim_args_parser(): + """Parse arguments to define experimental settings on server side.""" + parser = argparse.ArgumentParser() + + parser.add_argument( + "--train-method", + default="bagging", + type=str, + choices=["bagging", "cyclic"], + help="Training methods selected from bagging aggregation or cyclic training.", + ) + + # Server side + parser.add_argument( + "--pool-size", default=5, type=int, help="Number of total clients." + ) + parser.add_argument( + "--num-rounds", default=30, type=int, help="Number of FL rounds." + ) + parser.add_argument( + "--num-clients-per-round", + default=5, + type=int, + help="Number of clients participate in training each round.", + ) + parser.add_argument( + "--num-evaluate-clients", + default=5, + type=int, + help="Number of clients selected for evaluation.", + ) + parser.add_argument( + "--centralised-eval", + action="store_true", + help="Conduct centralised evaluation (True), or client evaluation on hold-out data (False).", + ) + parser.add_argument( + "--num-cpus-per-client", + default=2, + type=int, + help="Number of CPUs used for per client.", + ) + + # Client side + parser.add_argument( + "--partitioner-type", + default="uniform", + type=str, + choices=["uniform", "linear", "square", "exponential"], + help="Partitioner types.", + ) + parser.add_argument( + "--seed", default=42, type=int, help="Seed used for train/test splitting." + ) + parser.add_argument( + "--test-fraction", + default=0.2, + type=float, + help="Test fraction for train/test splitting.", + ) + parser.add_argument( + "--centralised-eval-client", + action="store_true", + help="Conduct evaluation on centralised test set (True), or on hold-out data (False).", + ) + parser.add_argument( + "--scaled-lr", + action="store_true", + help="Perform scaled learning rate based on the number of clients (True).", + ) + + args = parser.parse_args() + return args This integrates all arguments for both client and server sides. Example commands -~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~ -To run a centralised evaluated experiment with bagging strategy on 5 clients with exponential distribution for 50 rounds, -we first start the server as below: +To run a centralised evaluated experiment with bagging strategy on 5 clients with +exponential distribution for 50 rounds, we first start the server as below: .. code-block:: shell @@ -1247,4 +1338,6 @@ To run the same experiment with Flower simulation: $ python3 sim.py --train-method=bagging --pool-size=5 --num-rounds=50 --num-clients-per-round=5 --partitioner-type=exponential --centralised-eval -The full `code `_ for this comprehensive example can be found in :code:`examples/xgboost-comprehensive`. +The full `code +`_ for this +comprehensive example can be found in ``examples/xgboost-comprehensive``. diff --git a/doc/source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb b/doc/source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb index c5fc777e7f26..803ce729d43b 100644 --- a/doc/source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb +++ b/doc/source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb @@ -7,13 +7,15 @@ "source": [ "# Build a strategy from scratch\n", "\n", - "Welcome to the third part of the Flower federated learning tutorial. In previous parts of this tutorial, we introduced federated learning with PyTorch and Flower ([part 1](https://flower.ai/docs/framework/tutorial-get-started-with-flower-pytorch.html)) and we learned how strategies can be used to customize the execution on both the server and the clients ([part 2](https://flower.ai/docs/framework/tutorial-use-a-federated-learning-strategy-pytorch.html)).\n", + "Welcome to the third part of the Flower federated learning tutorial. In previous parts of this tutorial, we introduced federated learning with PyTorch and the Flower framework ([part 1](https://flower.ai/docs/framework/tutorial-get-started-with-flower-pytorch.html)) and we learned how strategies can be used to customize the execution on both the server and the clients ([part 2](https://flower.ai/docs/framework/tutorial-use-a-federated-learning-strategy-pytorch.html)).\n", "\n", - "In this notebook, we'll continue to customize the federated learning system we built previously by creating a custom version of FedAvg (again, using [Flower](https://flower.ai/) and [PyTorch](https://pytorch.org/)).\n", + "In this notebook, we'll continue to customize the federated learning system we built previously by creating a custom version of FedAvg using the Flower framework, Flower Datasets, and PyTorch.\n", "\n", - "> [Star Flower on GitHub](https://github.com/adap/flower) ⭐️ and join the Flower community on Slack to connect, ask questions, and get help: [Join Slack](https://flower.ai/join-slack) 🌼 We'd love to hear from you in the `#introductions` channel! And if anything is unclear, head over to the `#questions` channel.\n", + "> [Star Flower on GitHub](https://github.com/adap/flower) ⭐️ and join the Flower community on Flower Discuss and the Flower Slack to connect, ask questions, and get help:\n", + "> - [Join Flower Discuss](https://discuss.flower.ai/) We'd love to hear from you in the `Introduction` topic! If anything is unclear, post in `Flower Help - Beginners`.\n", + "> - [Join Flower Slack](https://flower.ai/join-slack) We'd love to hear from you in the `#introductions` channel! If anything is unclear, head over to the `#questions` channel.\n", "\n", - "Let's build a new `Strategy` from scratch!" + "Let's build a new `Strategy` from scratch! 🌼" ] }, { @@ -40,7 +42,7 @@ "metadata": {}, "outputs": [], "source": [ - "!pip install -q flwr[simulation] torch torchvision" + "!pip install -q flwr[simulation] flwr-datasets[vision] torch torchvision" ] }, { @@ -64,15 +66,19 @@ "import torch.nn as nn\n", "import torch.nn.functional as F\n", "import torchvision.transforms as transforms\n", - "from torch.utils.data import DataLoader, random_split\n", - "from torchvision.datasets import CIFAR10\n", + "from torch.utils.data import DataLoader\n", "\n", - "import flwr as fl\n", + "import flwr\n", + "from flwr.client import Client, ClientApp, NumPyClient\n", + "from flwr.common import Context\n", + "from flwr.server import ServerApp, ServerConfig, ServerAppComponents\n", + "from flwr.server.strategy import Strategy\n", + "from flwr.simulation import run_simulation\n", + "from flwr_datasets import FederatedDataset\n", "\n", "DEVICE = torch.device(\"cpu\") # Try \"cuda\" to train on GPU\n", - "print(\n", - " f\"Training on {DEVICE} using PyTorch {torch.__version__} and Flower {fl.__version__}\"\n", - ")" + "print(f\"Training on {DEVICE}\")\n", + "print(f\"Flower {flwr.__version__} / PyTorch {torch.__version__}\")" ] }, { @@ -88,7 +94,7 @@ "source": [ "### Data loading\n", "\n", - "Let's now load the CIFAR-10 training and test set, partition them into ten smaller datasets (each split into training and validation set), and wrap everything in their own `DataLoader`. We introduce a new parameter `num_clients` which allows us to call `load_datasets` with different numbers of clients." + "Let's now load the CIFAR-10 training and test set, partition them into ten smaller datasets (each split into training and validation set), and wrap everything in their own `DataLoader`." ] }, { @@ -97,37 +103,28 @@ "metadata": {}, "outputs": [], "source": [ - "NUM_CLIENTS = 10\n", - "\n", - "\n", - "def load_datasets(num_clients: int):\n", - " # Download and transform CIFAR-10 (train and test)\n", - " transform = transforms.Compose(\n", + "def load_datasets(partition_id, num_partitions: int):\n", + " fds = FederatedDataset(dataset=\"cifar10\", partitioners={\"train\": num_partitions})\n", + " partition = fds.load_partition(partition_id)\n", + " # Divide data on each node: 80% train, 20% test\n", + " partition_train_test = partition.train_test_split(test_size=0.2, seed=42)\n", + " pytorch_transforms = transforms.Compose(\n", " [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]\n", " )\n", - " trainset = CIFAR10(\"./dataset\", train=True, download=True, transform=transform)\n", - " testset = CIFAR10(\"./dataset\", train=False, download=True, transform=transform)\n", - "\n", - " # Split training set into `num_clients` partitions to simulate different local datasets\n", - " partition_size = len(trainset) // num_clients\n", - " lengths = [partition_size] * num_clients\n", - " datasets = random_split(trainset, lengths, torch.Generator().manual_seed(42))\n", - "\n", - " # Split each partition into train/val and create DataLoader\n", - " trainloaders = []\n", - " valloaders = []\n", - " for ds in datasets:\n", - " len_val = len(ds) // 10 # 10 % validation set\n", - " len_train = len(ds) - len_val\n", - " lengths = [len_train, len_val]\n", - " ds_train, ds_val = random_split(ds, lengths, torch.Generator().manual_seed(42))\n", - " trainloaders.append(DataLoader(ds_train, batch_size=32, shuffle=True))\n", - " valloaders.append(DataLoader(ds_val, batch_size=32))\n", - " testloader = DataLoader(testset, batch_size=32)\n", - " return trainloaders, valloaders, testloader\n", - "\n", "\n", - "trainloaders, valloaders, testloader = load_datasets(NUM_CLIENTS)" + " def apply_transforms(batch):\n", + " # Instead of passing transforms to CIFAR10(..., transform=transform)\n", + " # we will use this function to dataset.with_transform(apply_transforms)\n", + " # The transforms object is exactly the same\n", + " batch[\"img\"] = [pytorch_transforms(img) for img in batch[\"img\"]]\n", + " return batch\n", + "\n", + " partition_train_test = partition_train_test.with_transform(apply_transforms)\n", + " trainloader = DataLoader(partition_train_test[\"train\"], batch_size=32, shuffle=True)\n", + " valloader = DataLoader(partition_train_test[\"test\"], batch_size=32)\n", + " testset = fds.load_split(\"test\").with_transform(apply_transforms)\n", + " testloader = DataLoader(testset, batch_size=32)\n", + " return trainloader, valloader, testloader" ] }, { @@ -182,7 +179,8 @@ " net.train()\n", " for epoch in range(epochs):\n", " correct, total, epoch_loss = 0, 0, 0.0\n", - " for images, labels in trainloader:\n", + " for batch in trainloader:\n", + " images, labels = batch[\"img\"], batch[\"label\"]\n", " images, labels = images.to(DEVICE), labels.to(DEVICE)\n", " optimizer.zero_grad()\n", " outputs = net(images)\n", @@ -204,7 +202,8 @@ " correct, total, loss = 0, 0, 0.0\n", " net.eval()\n", " with torch.no_grad():\n", - " for images, labels in testloader:\n", + " for batch in testloader:\n", + " images, labels = batch[\"img\"], batch[\"label\"]\n", " images, labels = images.to(DEVICE), labels.to(DEVICE)\n", " outputs = net(images)\n", " loss += criterion(outputs, labels).item()\n", @@ -222,7 +221,7 @@ "source": [ "### Flower client\n", "\n", - "To implement the Flower client, we (again) create a subclass of `flwr.client.NumPyClient` and implement the three methods `get_parameters`, `fit`, and `evaluate`. Here, we also pass the `cid` to the client and use it log additional details:" + "To implement the Flower client, we (again) create a subclass of `flwr.client.NumPyClient` and implement the three methods `get_parameters`, `fit`, and `evaluate`. Here, we also pass the `partition_id` to the client and use it log additional details. We then create an instance of `ClientApp` and pass it the `client_fn`." ] }, { @@ -231,35 +230,40 @@ "metadata": {}, "outputs": [], "source": [ - "class FlowerClient(fl.client.NumPyClient):\n", - " def __init__(self, cid, net, trainloader, valloader):\n", - " self.cid = cid\n", + "class FlowerClient(NumPyClient):\n", + " def __init__(self, partition_id, net, trainloader, valloader):\n", + " self.partition_id = partition_id\n", " self.net = net\n", " self.trainloader = trainloader\n", " self.valloader = valloader\n", "\n", " def get_parameters(self, config):\n", - " print(f\"[Client {self.cid}] get_parameters\")\n", + " print(f\"[Client {self.partition_id}] get_parameters\")\n", " return get_parameters(self.net)\n", "\n", " def fit(self, parameters, config):\n", - " print(f\"[Client {self.cid}] fit, config: {config}\")\n", + " print(f\"[Client {self.partition_id}] fit, config: {config}\")\n", " set_parameters(self.net, parameters)\n", " train(self.net, self.trainloader, epochs=1)\n", " return get_parameters(self.net), len(self.trainloader), {}\n", "\n", " def evaluate(self, parameters, config):\n", - " print(f\"[Client {self.cid}] evaluate, config: {config}\")\n", + " print(f\"[Client {self.partition_id}] evaluate, config: {config}\")\n", " set_parameters(self.net, parameters)\n", " loss, accuracy = test(self.net, self.valloader)\n", " return float(loss), len(self.valloader), {\"accuracy\": float(accuracy)}\n", "\n", "\n", - "def client_fn(cid) -> FlowerClient:\n", + "def client_fn(context: Context) -> Client:\n", " net = Net().to(DEVICE)\n", - " trainloader = trainloaders[int(cid)]\n", - " valloader = valloaders[int(cid)]\n", - " return FlowerClient(cid, net, trainloader, valloader)" + " partition_id = context.node_config[\"partition-id\"]\n", + " num_partitions = context.node_config[\"num-partitions\"]\n", + " trainloader, valloader, _ = load_datasets(partition_id, num_partitions)\n", + " return FlowerClient(partition_id, net, trainloader, valloader).to_client()\n", + "\n", + "\n", + "# Create the ClientApp\n", + "client = ClientApp(client_fn=client_fn)" ] }, { @@ -275,16 +279,31 @@ "metadata": {}, "outputs": [], "source": [ - "# Specify client resources if you need GPU (defaults to 1 CPU and 0 GPU)\n", - "client_resources = None\n", - "if DEVICE.type == \"cuda\":\n", - " client_resources = {\"num_gpus\": 1}\n", + "NUM_PARTITIONS = 10\n", "\n", - "fl.simulation.start_simulation(\n", - " client_fn=client_fn,\n", - " num_clients=2,\n", - " config=fl.server.ServerConfig(num_rounds=3),\n", - " client_resources=client_resources,\n", + "\n", + "def server_fn(context: Context) -> ServerAppComponents:\n", + " # Configure the server for just 3 rounds of training\n", + " config = ServerConfig(num_rounds=3)\n", + " # If no strategy is provided, by default, ServerAppComponents will use FedAvg\n", + " return ServerAppComponents(config=config)\n", + "\n", + "\n", + "# Create the ServerApp\n", + "server = ServerApp(server_fn=server_fn)\n", + "\n", + "# Specify the resources each of your clients need\n", + "# If set to none, by default, each client will be allocated 2x CPU and 0x GPUs\n", + "backend_config = {\"client_resources\": None}\n", + "if DEVICE.type == \"cuda\":\n", + " backend_config = {\"client_resources\": {\"num_gpus\": 1}}\n", + "\n", + "# Run simulation\n", + "run_simulation(\n", + " server_app=server,\n", + " client_app=client,\n", + " num_supernodes=NUM_PARTITIONS,\n", + " backend_config=backend_config,\n", ")" ] }, @@ -303,15 +322,13 @@ "metadata": {}, "outputs": [], "source": [ - "from typing import Callable, Union\n", + "from typing import Union\n", "\n", "from flwr.common import (\n", " EvaluateIns,\n", " EvaluateRes,\n", " FitIns,\n", " FitRes,\n", - " MetricsAggregationFn,\n", - " NDArrays,\n", " Parameters,\n", " Scalar,\n", " ndarrays_to_parameters,\n", @@ -322,7 +339,7 @@ "from flwr.server.strategy.aggregate import aggregate, weighted_loss_avg\n", "\n", "\n", - "class FedCustom(fl.server.strategy.Strategy):\n", + "class FedCustom(Strategy):\n", " def __init__(\n", " self,\n", " fraction_fit: float = 1.0,\n", @@ -347,7 +364,7 @@ " \"\"\"Initialize global model parameters.\"\"\"\n", " net = Net()\n", " ndarrays = get_parameters(net)\n", - " return fl.common.ndarrays_to_parameters(ndarrays)\n", + " return ndarrays_to_parameters(ndarrays)\n", "\n", " def configure_fit(\n", " self, server_round: int, parameters: Parameters, client_manager: ClientManager\n", @@ -465,12 +482,21 @@ "metadata": {}, "outputs": [], "source": [ - "fl.simulation.start_simulation(\n", - " client_fn=client_fn,\n", - " num_clients=2,\n", - " config=fl.server.ServerConfig(num_rounds=3),\n", - " strategy=FedCustom(), # <-- pass the new strategy here\n", - " client_resources=client_resources,\n", + "def server_fn(context: Context) -> ServerAppComponents:\n", + " # Configure the server for just 3 rounds of training\n", + " config = ServerConfig(num_rounds=3)\n", + " return ServerAppComponents(\n", + " config=config,\n", + " strategy=FedCustom(), # <-- pass the new strategy here\n", + " )\n", + "\n", + "\n", + "# Run simulation\n", + "run_simulation(\n", + " server_app=server,\n", + " client_app=client,\n", + " num_supernodes=NUM_PARTITIONS,\n", + " backend_config=backend_config,\n", ")" ] }, @@ -489,7 +515,7 @@ "source": [ "## Next steps\n", "\n", - "Before you continue, make sure to join the Flower community on Slack: [Join Slack](https://flower.ai/join-slack/)\n", + "Before you continue, make sure to join the Flower community on Flower Discuss ([Join Flower Discuss](https://discuss.flower.ai)) and on Slack ([Join Slack](https://flower.ai/join-slack/)).\n", "\n", "There's a dedicated `#questions` channel if you need help, but we'd also love to hear who you are in `#introductions`!\n", "\n", diff --git a/doc/source/tutorial-series-customize-the-client-pytorch.ipynb b/doc/source/tutorial-series-customize-the-client-pytorch.ipynb index dbdd1094173c..0d1a926c339c 100644 --- a/doc/source/tutorial-series-customize-the-client-pytorch.ipynb +++ b/doc/source/tutorial-series-customize-the-client-pytorch.ipynb @@ -11,9 +11,11 @@ "\n", "In this notebook, we revisit `NumPyClient` and introduce a new baseclass for building clients, simply named `Client`. In previous parts of this tutorial, we've based our client on `NumPyClient`, a convenience class which makes it easy to work with machine learning libraries that have good NumPy interoperability. With `Client`, we gain a lot of flexibility that we didn't have before, but we'll also have to do a few things the we didn't have to do before.\n", "\n", - "> [Star Flower on GitHub](https://github.com/adap/flower) ⭐️ and join the Flower community on Slack to connect, ask questions, and get help: [Join Slack](https://flower.ai/join-slack) 🌼 We'd love to hear from you in the `#introductions` channel! And if anything is unclear, head over to the `#questions` channel.\n", + "> [Star Flower on GitHub](https://github.com/adap/flower) ⭐️ and join the Flower community on Flower Discuss and the Flower Slack to connect, ask questions, and get help:\n", + "> - [Join Flower Discuss](https://discuss.flower.ai/) We'd love to hear from you in the `Introduction` topic! If anything is unclear, post in `Flower Help - Beginners`.\n", + "> - [Join Flower Slack](https://flower.ai/join-slack) We'd love to hear from you in the `#introductions` channel! If anything is unclear, head over to the `#questions` channel.\n", "\n", - "Let's go deeper and see what it takes to move from `NumPyClient` to `Client`!" + "Let's go deeper and see what it takes to move from `NumPyClient` to `Client`! 🌼" ] }, { @@ -40,7 +42,7 @@ "metadata": {}, "outputs": [], "source": [ - "!pip install -q flwr[simulation] torch torchvision scipy" + "!pip install -q flwr[simulation] flwr-datasets[vision] torch torchvision scipy" ] }, { @@ -57,22 +59,25 @@ "outputs": [], "source": [ "from collections import OrderedDict\n", - "from typing import Dict, List, Optional, Tuple\n", + "from typing import List\n", "\n", "import numpy as np\n", "import torch\n", "import torch.nn as nn\n", "import torch.nn.functional as F\n", "import torchvision.transforms as transforms\n", - "from torch.utils.data import DataLoader, random_split\n", - "from torchvision.datasets import CIFAR10\n", + "from torch.utils.data import DataLoader\n", "\n", - "import flwr as fl\n", + "import flwr\n", + "from flwr.client import Client, ClientApp, NumPyClient\n", + "from flwr.common import Context\n", + "from flwr.server import ServerApp, ServerConfig, ServerAppComponents\n", + "from flwr.simulation import run_simulation\n", + "from flwr_datasets import FederatedDataset\n", "\n", "DEVICE = torch.device(\"cpu\") # Try \"cuda\" to train on GPU\n", - "print(\n", - " f\"Training on {DEVICE} using PyTorch {torch.__version__} and Flower {fl.__version__}\"\n", - ")" + "print(f\"Training on {DEVICE}\")\n", + "print(f\"Flower {flwr.__version__} / PyTorch {torch.__version__}\")" ] }, { @@ -88,7 +93,7 @@ "source": [ "### Data loading\n", "\n", - "Let's now load the CIFAR-10 training and test set, partition them into ten smaller datasets (each split into training and validation set), and wrap everything in their own `DataLoader`." + "Let's now define a loading function for the CIFAR-10 training and test set, partition them into `num_partitions` smaller datasets (each split into training and validation set), and wrap everything in their own `DataLoader`." ] }, { @@ -97,37 +102,28 @@ "metadata": {}, "outputs": [], "source": [ - "NUM_CLIENTS = 10\n", - "\n", - "\n", - "def load_datasets(num_clients: int):\n", - " # Download and transform CIFAR-10 (train and test)\n", - " transform = transforms.Compose(\n", + "def load_datasets(partition_id: int, num_partitions: int):\n", + " fds = FederatedDataset(dataset=\"cifar10\", partitioners={\"train\": num_partitions})\n", + " partition = fds.load_partition(partition_id)\n", + " # Divide data on each node: 80% train, 20% test\n", + " partition_train_test = partition.train_test_split(test_size=0.2, seed=42)\n", + " pytorch_transforms = transforms.Compose(\n", " [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]\n", " )\n", - " trainset = CIFAR10(\"./dataset\", train=True, download=True, transform=transform)\n", - " testset = CIFAR10(\"./dataset\", train=False, download=True, transform=transform)\n", - "\n", - " # Split training set into `num_clients` partitions to simulate different local datasets\n", - " partition_size = len(trainset) // num_clients\n", - " lengths = [partition_size] * num_clients\n", - " datasets = random_split(trainset, lengths, torch.Generator().manual_seed(42))\n", - "\n", - " # Split each partition into train/val and create DataLoader\n", - " trainloaders = []\n", - " valloaders = []\n", - " for ds in datasets:\n", - " len_val = len(ds) // 10 # 10 % validation set\n", - " len_train = len(ds) - len_val\n", - " lengths = [len_train, len_val]\n", - " ds_train, ds_val = random_split(ds, lengths, torch.Generator().manual_seed(42))\n", - " trainloaders.append(DataLoader(ds_train, batch_size=32, shuffle=True))\n", - " valloaders.append(DataLoader(ds_val, batch_size=32))\n", - " testloader = DataLoader(testset, batch_size=32)\n", - " return trainloaders, valloaders, testloader\n", - "\n", "\n", - "trainloaders, valloaders, testloader = load_datasets(NUM_CLIENTS)" + " def apply_transforms(batch):\n", + " # Instead of passing transforms to CIFAR10(..., transform=transform)\n", + " # we will use this function to dataset.with_transform(apply_transforms)\n", + " # The transforms object is exactly the same\n", + " batch[\"img\"] = [pytorch_transforms(img) for img in batch[\"img\"]]\n", + " return batch\n", + "\n", + " partition_train_test = partition_train_test.with_transform(apply_transforms)\n", + " trainloader = DataLoader(partition_train_test[\"train\"], batch_size=32, shuffle=True)\n", + " valloader = DataLoader(partition_train_test[\"test\"], batch_size=32)\n", + " testset = fds.load_split(\"test\").with_transform(apply_transforms)\n", + " testloader = DataLoader(testset, batch_size=32)\n", + " return trainloader, valloader, testloader" ] }, { @@ -182,7 +178,8 @@ " net.train()\n", " for epoch in range(epochs):\n", " correct, total, epoch_loss = 0, 0, 0.0\n", - " for images, labels in trainloader:\n", + " for batch in trainloader:\n", + " images, labels = batch[\"img\"], batch[\"label\"]\n", " images, labels = images.to(DEVICE), labels.to(DEVICE)\n", " optimizer.zero_grad()\n", " outputs = net(images)\n", @@ -204,7 +201,8 @@ " correct, total, loss = 0, 0, 0.0\n", " net.eval()\n", " with torch.no_grad():\n", - " for images, labels in testloader:\n", + " for batch in testloader:\n", + " images, labels = batch[\"img\"], batch[\"label\"]\n", " images, labels = images.to(DEVICE), labels.to(DEVICE)\n", " outputs = net(images)\n", " loss += criterion(outputs, labels).item()\n", @@ -222,7 +220,7 @@ "source": [ "## Step 1: Revisiting NumPyClient\n", "\n", - "So far, we've implemented our client by subclassing `flwr.client.NumPyClient`. The three methods we implemented are `get_parameters`, `fit`, and `evaluate`. Finally, we wrap the creation of instances of this class in a function called `client_fn`:" + "So far, we've implemented our client by subclassing `flwr.client.NumPyClient`. The three methods we implemented are `get_parameters`, `fit`, and `evaluate`. " ] }, { @@ -231,42 +229,60 @@ "metadata": {}, "outputs": [], "source": [ - "class FlowerNumPyClient(fl.client.NumPyClient):\n", - " def __init__(self, cid, net, trainloader, valloader):\n", - " self.cid = cid\n", + "class FlowerNumPyClient(NumPyClient):\n", + " def __init__(self, partition_id, net, trainloader, valloader):\n", + " self.partition_id = partition_id\n", " self.net = net\n", " self.trainloader = trainloader\n", " self.valloader = valloader\n", "\n", " def get_parameters(self, config):\n", - " print(f\"[Client {self.cid}] get_parameters\")\n", + " print(f\"[Client {self.partition_id}] get_parameters\")\n", " return get_parameters(self.net)\n", "\n", " def fit(self, parameters, config):\n", - " print(f\"[Client {self.cid}] fit, config: {config}\")\n", + " print(f\"[Client {self.partition_id}] fit, config: {config}\")\n", " set_parameters(self.net, parameters)\n", " train(self.net, self.trainloader, epochs=1)\n", " return get_parameters(self.net), len(self.trainloader), {}\n", "\n", " def evaluate(self, parameters, config):\n", - " print(f\"[Client {self.cid}] evaluate, config: {config}\")\n", + " print(f\"[Client {self.partition_id}] evaluate, config: {config}\")\n", " set_parameters(self.net, parameters)\n", " loss, accuracy = test(self.net, self.valloader)\n", - " return float(loss), len(self.valloader), {\"accuracy\": float(accuracy)}\n", + " return float(loss), len(self.valloader), {\"accuracy\": float(accuracy)}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Then, we define the function `numpyclient_fn` that is used by Flower to create the `FlowerNumpyClient` instances on demand. Finally, we create the `ClientApp` and pass the `numpyclient_fn` to it." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def numpyclient_fn(context: Context) -> Client:\n", + " net = Net().to(DEVICE)\n", + " partition_id = context.node_config[\"partition-id\"]\n", + " num_partitions = context.node_config[\"num-partitions\"]\n", + " trainloader, valloader, _ = load_datasets(partition_id, num_partitions)\n", + " return FlowerNumPyClient(partition_id, net, trainloader, valloader).to_client()\n", "\n", "\n", - "def numpyclient_fn(cid) -> FlowerNumPyClient:\n", - " net = Net().to(DEVICE)\n", - " trainloader = trainloaders[int(cid)]\n", - " valloader = valloaders[int(cid)]\n", - " return FlowerNumPyClient(cid, net, trainloader, valloader)" + "# Create the ClientApp\n", + "numpyclient = ClientApp(client_fn=numpyclient_fn)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "We've seen this before, there's nothing new so far. The only *tiny* difference compared to the previous notebook is naming, we've changed `FlowerClient` to `FlowerNumPyClient` and `client_fn` to `numpyclient_fn`. Let's run it to see the output we get:" + "We've seen this before, there's nothing new so far. The only *tiny* difference compared to the previous notebook is naming, we've changed `FlowerClient` to `FlowerNumPyClient` and `client_fn` to `numpyclient_fn`. Next, we configure the number of federated learning rounds using `ServerConfig` and create the `ServerApp` with this config:" ] }, { @@ -275,16 +291,43 @@ "metadata": {}, "outputs": [], "source": [ - "# Specify client resources if you need GPU (defaults to 1 CPU and 0 GPU)\n", - "client_resources = None\n", + "def server_fn(context: Context) -> ServerAppComponents:\n", + " # Configure the server for 3 rounds of training\n", + " config = ServerConfig(num_rounds=3)\n", + " return ServerAppComponents(config=config)\n", + "\n", + "\n", + "# Create ServerApp\n", + "server = ServerApp(server_fn=server_fn)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, we specify the resources for each client and run the simulation to see the output we get:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Specify the resources each of your clients need\n", + "# If set to none, by default, each client will be allocated 2x CPU and 0x GPUs\n", + "backend_config = {\"client_resources\": None}\n", "if DEVICE.type == \"cuda\":\n", - " client_resources = {\"num_gpus\": 1}\n", + " backend_config = {\"client_resources\": {\"num_gpus\": 1}}\n", + "\n", + "NUM_PARTITIONS = 10\n", "\n", - "fl.simulation.start_simulation(\n", - " client_fn=numpyclient_fn,\n", - " num_clients=2,\n", - " config=fl.server.ServerConfig(num_rounds=3),\n", - " client_resources=client_resources,\n", + "# Run simulation\n", + "run_simulation(\n", + " server_app=server,\n", + " client_app=numpyclient,\n", + " num_supernodes=NUM_PARTITIONS,\n", + " backend_config=backend_config,\n", ")" ] }, @@ -292,9 +335,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "This works as expected, two clients are training for three rounds of federated learning.\n", + "This works as expected, ten clients are training for three rounds of federated learning.\n", "\n", - "Let's dive a little bit deeper and discuss how Flower executes this simulation. Whenever a client is selected to do some work, `start_simulation` calls the function `numpyclient_fn` to create an instance of our `FlowerNumPyClient` (along with loading the model and the data).\n", + "Let's dive a little bit deeper and discuss how Flower executes this simulation. Whenever a client is selected to do some work, `run_simulation` launches the `ClientApp` object which in turn calls the function `numpyclient_fn` to create an instance of our `FlowerNumPyClient` (along with loading the model and the data).\n", "\n", "But here's the perhaps surprising part: Flower doesn't actually use the `FlowerNumPyClient` object directly. Instead, it wraps the object to makes it look like a subclass of `flwr.client.Client`, not `flwr.client.NumPyClient`. In fact, the Flower core framework doesn't know how to handle `NumPyClient`'s, it only knows how to handle `Client`'s. `NumPyClient` is just a convenience abstraction built on top of `Client`. \n", "\n", @@ -330,15 +373,15 @@ ")\n", "\n", "\n", - "class FlowerClient(fl.client.Client):\n", - " def __init__(self, cid, net, trainloader, valloader):\n", - " self.cid = cid\n", + "class FlowerClient(Client):\n", + " def __init__(self, partition_id, net, trainloader, valloader):\n", + " self.partition_id = partition_id\n", " self.net = net\n", " self.trainloader = trainloader\n", " self.valloader = valloader\n", "\n", " def get_parameters(self, ins: GetParametersIns) -> GetParametersRes:\n", - " print(f\"[Client {self.cid}] get_parameters\")\n", + " print(f\"[Client {self.partition_id}] get_parameters\")\n", "\n", " # Get parameters as a list of NumPy ndarray's\n", " ndarrays: List[np.ndarray] = get_parameters(self.net)\n", @@ -354,7 +397,7 @@ " )\n", "\n", " def fit(self, ins: FitIns) -> FitRes:\n", - " print(f\"[Client {self.cid}] fit, config: {ins.config}\")\n", + " print(f\"[Client {self.partition_id}] fit, config: {ins.config}\")\n", "\n", " # Deserialize parameters to NumPy ndarray's\n", " parameters_original = ins.parameters\n", @@ -378,7 +421,7 @@ " )\n", "\n", " def evaluate(self, ins: EvaluateIns) -> EvaluateRes:\n", - " print(f\"[Client {self.cid}] evaluate, config: {ins.config}\")\n", + " print(f\"[Client {self.partition_id}] evaluate, config: {ins.config}\")\n", "\n", " # Deserialize parameters to NumPy ndarray's\n", " parameters_original = ins.parameters\n", @@ -386,7 +429,6 @@ "\n", " set_parameters(self.net, ndarrays_original)\n", " loss, accuracy = test(self.net, self.valloader)\n", - " # return float(loss), len(self.valloader), {\"accuracy\": float(accuracy)}\n", "\n", " # Build and return response\n", " status = Status(code=Code.OK, message=\"Success\")\n", @@ -398,11 +440,16 @@ " )\n", "\n", "\n", - "def client_fn(cid) -> FlowerClient:\n", + "def client_fn(context: Context) -> Client:\n", " net = Net().to(DEVICE)\n", - " trainloader = trainloaders[int(cid)]\n", - " valloader = valloaders[int(cid)]\n", - " return FlowerClient(cid, net, trainloader, valloader)" + " partition_id = context.node_config[\"partition-id\"]\n", + " num_partitions = context.node_config[\"num-partitions\"]\n", + " trainloader, valloader, _ = load_datasets(partition_id, num_partitions)\n", + " return FlowerClient(partition_id, net, trainloader, valloader).to_client()\n", + "\n", + "\n", + "# Create the ClientApp\n", + "client = ClientApp(client_fn=client_fn)" ] }, { @@ -418,11 +465,12 @@ "metadata": {}, "outputs": [], "source": [ - "fl.simulation.start_simulation(\n", - " client_fn=client_fn,\n", - " num_clients=2,\n", - " config=fl.server.ServerConfig(num_rounds=3),\n", - " client_resources=client_resources,\n", + "# Run simulation\n", + "run_simulation(\n", + " server_app=server,\n", + " client_app=client,\n", + " num_supernodes=NUM_PARTITIONS,\n", + " backend_config=backend_config,\n", ")" ] }, @@ -578,15 +626,15 @@ ")\n", "\n", "\n", - "class FlowerClient(fl.client.Client):\n", - " def __init__(self, cid, net, trainloader, valloader):\n", - " self.cid = cid\n", + "class FlowerClient(Client):\n", + " def __init__(self, partition_id, net, trainloader, valloader):\n", + " self.partition_id = partition_id\n", " self.net = net\n", " self.trainloader = trainloader\n", " self.valloader = valloader\n", "\n", " def get_parameters(self, ins: GetParametersIns) -> GetParametersRes:\n", - " print(f\"[Client {self.cid}] get_parameters\")\n", + " print(f\"[Client {self.partition_id}] get_parameters\")\n", "\n", " # Get parameters as a list of NumPy ndarray's\n", " ndarrays: List[np.ndarray] = get_parameters(self.net)\n", @@ -602,7 +650,7 @@ " )\n", "\n", " def fit(self, ins: FitIns) -> FitRes:\n", - " print(f\"[Client {self.cid}] fit, config: {ins.config}\")\n", + " print(f\"[Client {self.partition_id}] fit, config: {ins.config}\")\n", "\n", " # Deserialize parameters to NumPy ndarray's using our custom function\n", " parameters_original = ins.parameters\n", @@ -626,7 +674,7 @@ " )\n", "\n", " def evaluate(self, ins: EvaluateIns) -> EvaluateRes:\n", - " print(f\"[Client {self.cid}] evaluate, config: {ins.config}\")\n", + " print(f\"[Client {self.partition_id}] evaluate, config: {ins.config}\")\n", "\n", " # Deserialize parameters to NumPy ndarray's using our custom function\n", " parameters_original = ins.parameters\n", @@ -645,11 +693,12 @@ " )\n", "\n", "\n", - "def client_fn(cid) -> FlowerClient:\n", + "def client_fn(context: Context) -> Client:\n", " net = Net().to(DEVICE)\n", - " trainloader = trainloaders[int(cid)]\n", - " valloader = valloaders[int(cid)]\n", - " return FlowerClient(cid, net, trainloader, valloader)" + " partition_id = context.node_config[\"partition-id\"]\n", + " num_partitions = context.node_config[\"num-partitions\"]\n", + " trainloader, valloader, _ = load_datasets(partition_id, num_partitions)\n", + " return FlowerClient(partition_id, net, trainloader, valloader).to_client()" ] }, { @@ -843,14 +892,24 @@ "metadata": {}, "outputs": [], "source": [ - "strategy = FedSparse()\n", + "def server_fn(context: Context) -> ServerAppComponents:\n", + " # Configure the server for just 3 rounds of training\n", + " config = ServerConfig(num_rounds=3)\n", + " return ServerAppComponents(\n", + " config=config,\n", + " strategy=FedSparse(), # <-- pass the new strategy here\n", + " )\n", + "\n", + "\n", + "# Create the ServerApp\n", + "server = ServerApp(server_fn=server_fn)\n", "\n", - "fl.simulation.start_simulation(\n", - " strategy=strategy,\n", - " client_fn=client_fn,\n", - " num_clients=2,\n", - " config=fl.server.ServerConfig(num_rounds=3),\n", - " client_resources=client_resources,\n", + "# Run simulation\n", + "run_simulation(\n", + " server_app=server,\n", + " client_app=client,\n", + " num_supernodes=NUM_PARTITIONS,\n", + " backend_config=backend_config,\n", ")" ] }, @@ -869,16 +928,16 @@ "source": [ "## Next steps\n", "\n", - "Before you continue, make sure to join the Flower community on Slack: [Join Slack](https://flower.ai/join-slack/)\n", + "Before you continue, make sure to join the Flower community on Flower Discuss ([Join Flower Discuss](https://discuss.flower.ai)) and on Slack ([Join Slack](https://flower.ai/join-slack/)).\n", "\n", "There's a dedicated `#questions` channel if you need help, but we'd also love to hear who you are in `#introductions`!\n", "\n", "This is the final part of the Flower tutorial (for now!), congratulations! You're now well equipped to understand the rest of the documentation. There are many topics we didn't cover in the tutorial, we recommend the following resources:\n", "\n", "- [Read Flower Docs](https://flower.ai/docs/)\n", - "- [Check out Flower Code Examples](https://github.com/adap/flower/tree/main/examples)\n", + "- [Check out Flower Code Examples](https://flower.ai/docs/examples/)\n", "- [Use Flower Baselines for your research](https://flower.ai/docs/baselines/)\n", - "- [Watch Flower Summit 2023 videos](https://flower.ai/conf/flower-summit-2023/)\n" + "- [Watch Flower AI Summit 2024 videos](https://flower.ai/conf/flower-ai-summit-2024/)\n" ] } ], diff --git a/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb b/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb index d8e6e58fafab..4d126d67463c 100644 --- a/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb +++ b/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb @@ -9,11 +9,13 @@ "\n", "Welcome to the Flower federated learning tutorial!\n", "\n", - "In this notebook, we'll build a federated learning system using Flower, [Flower Datasets](https://flower.ai/docs/datasets/) and PyTorch. In part 1, we use PyTorch for the model training pipeline and data loading. In part 2, we continue to federate the PyTorch-based pipeline using Flower.\n", + "In this notebook, we'll build a federated learning system using the Flower framework, Flower Datasets and PyTorch. In part 1, we use PyTorch for the model training pipeline and data loading. In part 2, we federate the PyTorch project using Flower.\n", "\n", - "> [Star Flower on GitHub](https://github.com/adap/flower) ⭐️ and join the Flower community on Slack to connect, ask questions, and get help: [Join Slack](https://flower.ai/join-slack) 🌼 We'd love to hear from you in the `#introductions` channel! And if anything is unclear, head over to the `#questions` channel.\n", + "> [Star Flower on GitHub](https://github.com/adap/flower) ⭐️ and join the Flower community on Flower Discuss and the Flower Slack to connect, ask questions, and get help:\n", + "> - [Join Flower Discuss](https://discuss.flower.ai/) We'd love to hear from you in the `Introduction` topic! If anything is unclear, post in `Flower Help - Beginners`.\n", + "> - [Join Flower Slack](https://flower.ai/join-slack) We'd love to hear from you in the `#introductions` channel! If anything is unclear, head over to the `#questions` channel.\n", "\n", - "Let's get started!" + "Let's get started! 🌼" ] }, { @@ -29,7 +31,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Installing dependencies\n", + "### Install dependencies\n", "\n", "Next, we install the necessary packages for PyTorch (`torch` and `torchvision`), Flower Datasets (`flwr-datasets`) and Flower (`flwr`):" ] @@ -40,7 +42,7 @@ "metadata": {}, "outputs": [], "source": [ - "!pip install -q flwr[simulation] flwr_datasets[vision] torch torchvision matplotlib" + "!pip install -q flwr[simulation] flwr-datasets[vision] torch torchvision matplotlib" ] }, { @@ -68,14 +70,17 @@ "from datasets.utils.logging import disable_progress_bar\n", "from torch.utils.data import DataLoader\n", "\n", - "import flwr as fl\n", - "from flwr.common import Metrics\n", + "import flwr\n", + "from flwr.client import Client, ClientApp, NumPyClient\n", + "from flwr.common import Metrics, Context\n", + "from flwr.server import ServerApp, ServerConfig, ServerAppComponents\n", + "from flwr.server.strategy import FedAvg\n", + "from flwr.simulation import run_simulation\n", "from flwr_datasets import FederatedDataset\n", "\n", "DEVICE = torch.device(\"cpu\") # Try \"cuda\" to train on GPU\n", - "print(\n", - " f\"Training on {DEVICE} using PyTorch {torch.__version__} and Flower {fl.__version__}\"\n", - ")\n", + "print(f\"Training on {DEVICE}\")\n", + "print(f\"Flower {flwr.__version__} / PyTorch {torch.__version__}\")\n", "disable_progress_bar()" ] }, @@ -90,8 +95,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "\n", - "### Loading the data\n", + "### Load the data\n", "\n", "Federated learning can be applied to many different types of tasks across different domains. In this tutorial, we introduce federated learning by training a simple convolutional neural network (CNN) on the popular CIFAR-10 dataset. CIFAR-10 can be used to train image classifiers that distinguish between images from ten different classes: 'airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', and 'truck'." ] @@ -100,17 +104,16 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We simulate having multiple datasets from multiple organizations (also called the \"cross-silo\" setting in federated learning) by splitting the original CIFAR-10 dataset into multiple partitions. Each partition will represent the data from a single organization. We're doing this purely for experimentation purposes, in the real world there's no need for data splitting because each organization already has their own data (so the data is naturally partitioned).\n", + "We simulate having multiple datasets from multiple organizations (also called the \"cross-silo\" setting in federated learning) by splitting the original CIFAR-10 dataset into multiple partitions. Each partition will represent the data from a single organization. We're doing this purely for experimentation purposes, in the real world there's no need for data splitting because each organization already has their own data (the data is naturally partitioned).\n", "\n", - "Each organization will act as a client in the federated learning system. So having ten organizations participate in a federation means having ten clients connected to the federated learning server.\n" + "Each organization will act as a client in the federated learning system. Having ten organizations participate in a federation means having ten clients connected to the federated learning server.\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "\n", - "Let's now create the Federated Dataset abstraction that from `flwr-datasets` that partitions the CIFAR-10. We will create small training and test set for each edge device and wrap each of them into a PyTorch `DataLoader`:" + "We use the Flower Datasets library (`flwr-datasets`) to partition CIFAR-10 into ten partitions using `FederatedDataset`. We will create a small training and test set for each of the ten organizations and wrap each of these into a PyTorch `DataLoader`:" ] }, { @@ -123,46 +126,40 @@ "BATCH_SIZE = 32\n", "\n", "\n", - "def load_datasets():\n", + "def load_datasets(partition_id: int):\n", " fds = FederatedDataset(dataset=\"cifar10\", partitioners={\"train\": NUM_CLIENTS})\n", + " partition = fds.load_partition(partition_id)\n", + " # Divide data on each node: 80% train, 20% test\n", + " partition_train_test = partition.train_test_split(test_size=0.2, seed=42)\n", + " pytorch_transforms = transforms.Compose(\n", + " [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]\n", + " )\n", "\n", " def apply_transforms(batch):\n", " # Instead of passing transforms to CIFAR10(..., transform=transform)\n", " # we will use this function to dataset.with_transform(apply_transforms)\n", " # The transforms object is exactly the same\n", - " transform = transforms.Compose(\n", - " [\n", - " transforms.ToTensor(),\n", - " transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n", - " ]\n", - " )\n", - " batch[\"img\"] = [transform(img) for img in batch[\"img\"]]\n", + " batch[\"img\"] = [pytorch_transforms(img) for img in batch[\"img\"]]\n", " return batch\n", "\n", " # Create train/val for each partition and wrap it into DataLoader\n", - " trainloaders = []\n", - " valloaders = []\n", - " for partition_id in range(NUM_CLIENTS):\n", - " partition = fds.load_partition(partition_id, \"train\")\n", - " partition = partition.with_transform(apply_transforms)\n", - " partition = partition.train_test_split(train_size=0.8, seed=42)\n", - " trainloaders.append(DataLoader(partition[\"train\"], batch_size=BATCH_SIZE))\n", - " valloaders.append(DataLoader(partition[\"test\"], batch_size=BATCH_SIZE))\n", + " partition_train_test = partition_train_test.with_transform(apply_transforms)\n", + " trainloader = DataLoader(\n", + " partition_train_test[\"train\"], batch_size=BATCH_SIZE, shuffle=True\n", + " )\n", + " valloader = DataLoader(partition_train_test[\"test\"], batch_size=BATCH_SIZE)\n", " testset = fds.load_split(\"test\").with_transform(apply_transforms)\n", " testloader = DataLoader(testset, batch_size=BATCH_SIZE)\n", - " return trainloaders, valloaders, testloader\n", - "\n", - "\n", - "trainloaders, valloaders, testloader = load_datasets()" + " return trainloader, valloader, testloader" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "We now have a list of ten training sets and ten validation sets (`trainloaders` and `valloaders`) representing the data of ten different organizations. Each `trainloader`/`valloader` pair contains 4000 training examples and 1000 validation examples. There's also a single `testloader` (we did not split the test set). Again, this is only necessary for building research or educational systems, actual federated learning systems have their data naturally distributed across multiple partitions.\n", + "We now have a function that can return a training set and validation set (`trainloader` and `valloader`) representing one dataset from one of ten different organizations. Each `trainloader`/`valloader` pair contains 4000 training examples and 1000 validation examples. There's also a single `testloader` (we did not split the test set). Again, this is only necessary for building research or educational systems, actual federated learning systems have their data naturally distributed across multiple partitions.\n", "\n", - "Let's take a look at the first batch of images and labels in the first training set (i.e., `trainloaders[0]`) before we move on:" + "Let's take a look at the first batch of images and labels in the first training set (i.e., `trainloader` from `partition_id=0`) before we move on:" ] }, { @@ -171,11 +168,14 @@ "metadata": {}, "outputs": [], "source": [ - "batch = next(iter(trainloaders[0]))\n", + "trainloader, _, _ = load_datasets(partition_id=0)\n", + "batch = next(iter(trainloader))\n", "images, labels = batch[\"img\"], batch[\"label\"]\n", + "\n", "# Reshape and convert images to a NumPy array\n", "# matplotlib requires images with the shape (height, width, 3)\n", "images = images.permute(0, 2, 3, 1).numpy()\n", + "\n", "# Denormalize\n", "images = images / 2 + 0.5\n", "\n", @@ -185,7 +185,7 @@ "# Loop over the images and plot them\n", "for i, ax in enumerate(axs.flat):\n", " ax.imshow(images[i])\n", - " ax.set_title(trainloaders[0].dataset.features[\"label\"].int2str([labels[i]])[0])\n", + " ax.set_title(trainloader.dataset.features[\"label\"].int2str([labels[i]])[0])\n", " ax.axis(\"off\")\n", "\n", "# Show the plot\n", @@ -197,7 +197,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The output above shows a random batch of images from the first `trainloader` in our list of ten `trainloaders`. It also prints the labels associated with each image (i.e., one of the ten possible labels we've seen above). If you run the cell again, you should see another batch of images." + "The output above shows a random batch of images from the `trainloader` from the first of ten partitions. It also prints the labels associated with each image (i.e., one of the ten possible labels we've seen above). If you run the cell again, you should see another batch of images." ] }, { @@ -219,7 +219,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Defining the model\n", + "### Define the model\n", "\n", "We use the simple CNN described in the [PyTorch tutorial](https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html#define-a-convolutional-neural-network):" ] @@ -309,9 +309,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Training the model\n", + "### Train the model\n", "\n", - "We now have all the basic building blocks we need: a dataset, a model, a training function, and a test function. Let's put them together to train the model on the dataset of one of our organizations (`trainloaders[0]`). This simulates the reality of most machine learning projects today: each organization has their own data and trains models only on this internal data: " + "We now have all the basic building blocks we need: a dataset, a model, a training function, and a test function. Let's put them together to train the model on the dataset of one of our organizations (`partition_id=0`). This simulates the reality of most machine learning projects today: each organization has their own data and trains models only on this internal data: " ] }, { @@ -320,8 +320,7 @@ "metadata": {}, "outputs": [], "source": [ - "trainloader = trainloaders[0]\n", - "valloader = valloaders[0]\n", + "trainloader, valloader, testloader = load_datasets(partition_id=0)\n", "net = Net().to(DEVICE)\n", "\n", "for epoch in range(5):\n", @@ -337,7 +336,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Training the simple CNN on our CIFAR-10 split for 5 epochs should result in a test set accuracy of about 41%, which is not good, but at the same time, it doesn't really matter for the purposes of this tutorial. The intent was just to show a simplistic centralized training pipeline that sets the stage for what comes next - federated learning!" + "Training the simple CNN on our CIFAR-10 split for 5 epochs should result in a test set accuracy of about 41%, which is not good, but at the same time, it doesn't really matter for the purposes of this tutorial. The intent was just to show a simple centralized training pipeline that sets the stage for what comes next - federated learning!" ] }, { @@ -353,13 +352,13 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Updating model parameters\n", + "### Update model parameters\n", "\n", - "In federated learning, the server sends the global model parameters to the client, and the client updates the local model with the parameters received from the server. It then trains the model on the local data (which changes the model parameters locally) and sends the updated/changed model parameters back to the server (or, alternatively, it sends just the gradients back to the server, not the full model parameters).\n", + "In federated learning, the server sends global model parameters to the client, and the client updates the local model with parameters received from the server. It then trains the model on the local data (which changes the model parameters locally) and sends the updated/changed model parameters back to the server (or, alternatively, it sends just the gradients back to the server, not the full model parameters).\n", "\n", "We need two helper functions to update the local model with parameters received from the server and to get the updated model parameters from the local model: `set_parameters` and `get_parameters`. The following two functions do just that for the PyTorch model above.\n", "\n", - "The details of how this works are not really important here (feel free to consult the PyTorch documentation if you want to learn more). In essence, we use `state_dict` to access PyTorch model parameter tensors. The parameter tensors are then converted to/from a list of NumPy ndarray's (which Flower knows how to serialize/deserialize):" + "The details of how this works are not really important here (feel free to consult the PyTorch documentation if you want to learn more). In essence, we use `state_dict` to access PyTorch model parameter tensors. The parameter tensors are then converted to/from a list of NumPy ndarray's (which the Flower `NumPyClient` knows how to serialize/deserialize):" ] }, { @@ -382,15 +381,15 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Implementing a Flower client\n", + "### Define the Flower ClientApp\n", "\n", - "With that out of the way, let's move on to the interesting part. Federated learning systems consist of a server and multiple clients. In Flower, we create clients by implementing subclasses of `flwr.client.Client` or `flwr.client.NumPyClient`. We use `NumPyClient` in this tutorial because it is easier to implement and requires us to write less boilerplate.\n", + "With that out of the way, let's move on to the interesting part. Federated learning systems consist of a server and multiple clients. In Flower, we create a `ServerApp` and a `ClientApp` to run the server-side and client-side code, respectively.\n", "\n", - "To implement the Flower client, we create a subclass of `flwr.client.NumPyClient` and implement the three methods `get_parameters`, `fit`, and `evaluate`:\n", + "The first step toward creating a `ClientApp` is to implement a subclasses of `flwr.client.Client` or `flwr.client.NumPyClient`. We use `NumPyClient` in this tutorial because it is easier to implement and requires us to write less boilerplate. To implement `NumPyClient`, we create a subclass that implements the three methods `get_parameters`, `fit`, and `evaluate`:\n", "\n", "* `get_parameters`: Return the current local model parameters\n", - "* `fit`: Receive model parameters from the server, train the model parameters on the local data, and return the (updated) model parameters to the server\n", - "* `evaluate`: Receive model parameters from the server, evaluate the model parameters on the local data, and return the evaluation result to the server\n", + "* `fit`: Receive model parameters from the server, train the model on the local data, and return the updated model parameters to the server\n", + "* `evaluate`: Receive model parameters from the server, evaluate the model on the local data, and return the evaluation result to the server\n", "\n", "We mentioned that our clients will use the previously defined PyTorch components for model training and evaluation. Let's see a simple Flower client implementation that brings everything together:" ] @@ -401,7 +400,7 @@ "metadata": {}, "outputs": [], "source": [ - "class FlowerClient(fl.client.NumPyClient):\n", + "class FlowerClient(NumPyClient):\n", " def __init__(self, net, trainloader, valloader):\n", " self.net = net\n", " self.trainloader = trainloader\n", @@ -425,13 +424,13 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Our class `FlowerClient` defines how local training/evaluation will be performed and allows Flower to call the local training/evaluation through `fit` and `evaluate`. Each instance of `FlowerClient` represents a *single client* in our federated learning system. Federated learning systems have multiple clients (otherwise, there's not much to federate), so each client will be represented by its own instance of `FlowerClient`. If we have, for example, three clients in our workload, then we'd have three instances of `FlowerClient`. Flower calls `FlowerClient.fit` on the respective instance when the server selects a particular client for training (and `FlowerClient.evaluate` for evaluation).\n", + "Our class `FlowerClient` defines how local training/evaluation will be performed and allows Flower to call the local training/evaluation through `fit` and `evaluate`. Each instance of `FlowerClient` represents a *single client* in our federated learning system. Federated learning systems have multiple clients (otherwise, there's not much to federate), so each client will be represented by its own instance of `FlowerClient`. If we have, for example, three clients in our workload, then we'd have three instances of `FlowerClient` (one on each of the machines we'd start the client on). Flower calls `FlowerClient.fit` on the respective instance when the server selects a particular client for training (and `FlowerClient.evaluate` for evaluation).\n", "\n", - "### Using the Virtual Client Engine\n", + "In this notebook, we want to simulate a federated learning system with 10 clients *on a single machine*. This means that the server and all 10 clients will live on a single machine and share resources such as CPU, GPU, and memory. Having 10 clients would mean having 10 instances of `FlowerClient` in memory. Doing this on a single machine can quickly exhaust the available memory resources, even if only a subset of these clients participates in a single round of federated learning.\n", "\n", - "In this notebook, we want to simulate a federated learning system with 10 clients on a single machine. This means that the server and all 10 clients will live on a single machine and share resources such as CPU, GPU, and memory. Having 10 clients would mean having 10 instances of `FlowerClient` in memory. Doing this on a single machine can quickly exhaust the available memory resources, even if only a subset of these clients participates in a single round of federated learning.\n", + "In addition to the regular capabilities where server and clients run on multiple machines, Flower, therefore, provides special simulation capabilities that create `FlowerClient` instances only when they are actually necessary for training or evaluation. To enable the Flower framework to create clients when necessary, we need to implement a function that creates a `FlowerClient` instance on demand. We typically call this function `client_fn`. Flower calls `client_fn` whenever it needs an instance of one particular client to call `fit` or `evaluate` (those instances are usually discarded after use, so they should not keep any local state). In federated learning experiments using Flower, clients are identified by a partition ID, or `partition-id`. This `partition-id` is used to load different local data partitions for different clients, as can be seen below. The value of `partition-id` is retrieved from the `node_config` dictionary in the `Context` object, which holds the information that persists throughout each training round. \n", "\n", - "In addition to the regular capabilities where server and clients run on multiple machines, Flower, therefore, provides special simulation capabilities that create `FlowerClient` instances only when they are actually necessary for training or evaluation. To enable the Flower framework to create clients when necessary, we need to implement a function called `client_fn` that creates a `FlowerClient` instance on demand. Flower calls `client_fn` whenever it needs an instance of one particular client to call `fit` or `evaluate` (those instances are usually discarded after use, so they should not keep any local state). Clients are identified by a client ID, or short `cid`. The `cid` can be used, for example, to load different local data partitions for different clients, as can be seen below:" + "With this, we have the class `FlowerClient` which defines client-side training/evaluation and `client_fn` which allows Flower to create `FlowerClient` instances whenever it needs to call `fit` or `evaluate` on one particular client. Last, but definitely not least, we create an instance of `ClientApp` and pass it the `client_fn`. `ClientApp` is the entrypoint that a running Flower client uses to call your code (as defined in, for example, `FlowerClient.fit`)." ] }, { @@ -440,7 +439,7 @@ "metadata": {}, "outputs": [], "source": [ - "def client_fn(cid: str) -> FlowerClient:\n", + "def client_fn(context: Context) -> Client:\n", " \"\"\"Create a Flower client representing a single organization.\"\"\"\n", "\n", " # Load model\n", @@ -448,25 +447,28 @@ "\n", " # Load data (CIFAR-10)\n", " # Note: each client gets a different trainloader/valloader, so each client\n", - " # will train and evaluate on their own unique data\n", - " trainloader = trainloaders[int(cid)]\n", - " valloader = valloaders[int(cid)]\n", + " # will train and evaluate on their own unique data partition\n", + " # Read the node_config to fetch data partition associated to this node\n", + " partition_id = context.node_config[\"partition-id\"]\n", + " trainloader, valloader, _ = load_datasets(partition_id=partition_id)\n", + "\n", + " # Create a single Flower client representing a single organization\n", + " # FlowerClient is a subclass of NumPyClient, so we need to call .to_client()\n", + " # to convert it to a subclass of `flwr.client.Client`\n", + " return FlowerClient(net, trainloader, valloader).to_client()\n", + "\n", "\n", - " # Create a single Flower client representing a single organization\n", - " return FlowerClient(net, trainloader, valloader).to_client()" + "# Create the ClientApp\n", + "client = ClientApp(client_fn=client_fn)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### Starting the training\n", - "\n", - "We now have the class `FlowerClient` which defines client-side training/evaluation and `client_fn` which allows Flower to create `FlowerClient` instances whenever it needs to call `fit` or `evaluate` on one particular client. The last step is to start the actual simulation using `flwr.simulation.start_simulation`. \n", - "\n", - "The function `start_simulation` accepts a number of arguments, amongst them the `client_fn` used to create `FlowerClient` instances, the number of clients to simulate (`num_clients`), the number of federated learning rounds (`num_rounds`), and the strategy. The strategy encapsulates the federated learning approach/algorithm, for example, *Federated Averaging* (FedAvg).\n", + "### Define the Flower ServerApp\n", "\n", - "Flower has a number of built-in strategies, but we can also use our own strategy implementations to customize nearly all aspects of the federated learning approach. For this example, we use the built-in `FedAvg` implementation and customize it using a few basic parameters. The last step is the actual call to `start_simulation` which - you guessed it - starts the simulation:" + "On the server side, we need to configure a strategy which encapsulates the federated learning approach/algorithm, for example, *Federated Averaging* (FedAvg). Flower has a number of built-in strategies, but we can also use our own strategy implementations to customize nearly all aspects of the federated learning approach. For this example, we use the built-in `FedAvg` implementation and customize it using a few basic parameters:" ] }, { @@ -476,30 +478,94 @@ "outputs": [], "source": [ "# Create FedAvg strategy\n", - "strategy = fl.server.strategy.FedAvg(\n", + "strategy = FedAvg(\n", " fraction_fit=1.0, # Sample 100% of available clients for training\n", " fraction_evaluate=0.5, # Sample 50% of available clients for evaluation\n", " min_fit_clients=10, # Never sample less than 10 clients for training\n", " min_evaluate_clients=5, # Never sample less than 5 clients for evaluation\n", " min_available_clients=10, # Wait until all 10 clients are available\n", - ")\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Similar to `ClientApp`, we create a `ServerApp` using a utility function `server_fn`. In `server_fn`, we pass an instance of `ServerConfig` for defining the number of federated learning rounds (`num_rounds`) and we also pass the previously created `strategy`. The `server_fn` returns a `ServerAppComponents` object containing the settings that define the `ServerApp` behaviour. `ServerApp` is the entrypoint that Flower uses to call all your server-side code (for example, the strategy)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def server_fn(context: Context) -> ServerAppComponents:\n", + " \"\"\"Construct components that set the ServerApp behaviour.\n", + "\n", + " You can use the settings in `context.run_config` to parameterize the\n", + " construction of all elements (e.g the strategy or the number of rounds)\n", + " wrapped in the returned ServerAppComponents object.\n", + " \"\"\"\n", + "\n", + " # Configure the server for 5 rounds of training\n", + " config = ServerConfig(num_rounds=5)\n", + "\n", + " return ServerAppComponents(strategy=strategy, config=config)\n", "\n", - "# Specify the resources each of your clients need. By default, each\n", - "# client will be allocated 1x CPU and 0x GPUs\n", - "client_resources = {\"num_cpus\": 1, \"num_gpus\": 0.0}\n", + "\n", + "# Create the ServerApp\n", + "server = ServerApp(server_fn=server_fn)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Run the training\n", + "\n", + "In simulation, we often want to control the amount of resources each client can use. In the next cell, we specify a `backend_config` dictionary with the `client_resources` key (required) for defining the amount of CPU and GPU resources each client can access." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Specify the resources each of your clients need\n", + "# By default, each client will be allocated 1x CPU and 0x GPUs\n", + "backend_config = {\"client_resources\": {\"num_cpus\": 1, \"num_gpus\": 0.0}}\n", + "\n", + "# When running on GPU, assign an entire GPU for each client\n", "if DEVICE.type == \"cuda\":\n", - " # here we are assigning an entire GPU for each client.\n", - " client_resources = {\"num_cpus\": 1, \"num_gpus\": 1.0}\n", - " # Refer to our documentation for more details about Flower Simulations\n", - " # and how to setup these `client_resources`.\n", - "\n", - "# Start simulation\n", - "fl.simulation.start_simulation(\n", - " client_fn=client_fn,\n", - " num_clients=NUM_CLIENTS,\n", - " config=fl.server.ServerConfig(num_rounds=5),\n", - " strategy=strategy,\n", - " client_resources=client_resources,\n", + " backend_config = {\"client_resources\": {\"num_cpus\": 1, \"num_gpus\": 1.0}}\n", + " # Refer to our Flower framework documentation for more details about Flower simulations\n", + " # and how to set up the `backend_config`" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The last step is the actual call to `run_simulation` which - you guessed it - runs the simulation. `run_simulation` accepts a number of arguments:\n", + "- `server_app` and `client_app`: the previously created `ServerApp` and `ClientApp` objects, respectively\n", + "- `num_supernodes`: the number of `SuperNodes` to simulate which equals the number of clients for Flower simulation\n", + "- `backend_config`: the resource allocation used in this simulation" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Run simulation\n", + "run_simulation(\n", + " server_app=server,\n", + " client_app=client,\n", + " num_supernodes=NUM_CLIENTS,\n", + " backend_config=backend_config,\n", ")" ] }, @@ -511,9 +577,9 @@ "\n", "So how does this work? How does Flower execute this simulation?\n", "\n", - "When we call `start_simulation`, we tell Flower that there are 10 clients (`num_clients=10`). Flower then goes ahead an asks the `FedAvg` strategy to select clients. `FedAvg` knows that it should select 100% of the available clients (`fraction_fit=1.0`), so it goes ahead and selects 10 random clients (i.e., 100% of 10).\n", + "When we call `run_simulation`, we tell Flower that there are 10 clients (`num_supernodes=10`, where 1 `SuperNode` launches 1 `ClientApp`). Flower then goes ahead an asks the `ServerApp` to issue an instructions to those nodes using the `FedAvg` strategy. `FedAvg` knows that it should select 100% of the available clients (`fraction_fit=1.0`), so it goes ahead and selects 10 random clients (i.e., 100% of 10).\n", "\n", - "Flower then asks the selected 10 clients to train the model. When the server receives the model parameter updates from the clients, it hands those updates over to the strategy (*FedAvg*) for aggregation. The strategy aggregates those updates and returns the new global model, which then gets used in the next round of federated learning." + "Flower then asks the selected 10 clients to train the model. Each of the 10 `ClientApp` instances receives a message, which causes it to call `client_fn` to create an instance of `FlowerClient`. It then calls `.fit()` on each the `FlowerClient` instances and returns the resulting model parameter updates to the `ServerApp`. When the `ServerApp` receives the model parameter updates from the clients, it hands those updates over to the strategy (*FedAvg*) for aggregation. The strategy aggregates those updates and returns the new global model, which then gets used in the next round of federated learning." ] }, { @@ -546,36 +612,45 @@ " return {\"accuracy\": sum(accuracies) / sum(examples)}" ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The only thing left to do is to tell the strategy to call this function whenever it receives evaluation metric dictionaries from the clients:" - ] - }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "# Create FedAvg strategy\n", - "strategy = fl.server.strategy.FedAvg(\n", - " fraction_fit=1.0,\n", - " fraction_evaluate=0.5,\n", - " min_fit_clients=10,\n", - " min_evaluate_clients=5,\n", - " min_available_clients=10,\n", - " evaluate_metrics_aggregation_fn=weighted_average, # <-- pass the metric aggregation function\n", - ")\n", - "\n", - "# Start simulation\n", - "fl.simulation.start_simulation(\n", - " client_fn=client_fn,\n", - " num_clients=NUM_CLIENTS,\n", - " config=fl.server.ServerConfig(num_rounds=5),\n", - " strategy=strategy,\n", - " client_resources=client_resources,\n", + "def server_fn(context: Context) -> ServerAppComponents:\n", + " \"\"\"Construct components that set the ServerApp behaviour.\n", + "\n", + " You can use settings in `context.run_config` to parameterize the\n", + " construction of all elements (e.g the strategy or the number of rounds)\n", + " wrapped in the returned ServerAppComponents object.\n", + " \"\"\"\n", + "\n", + " # Create FedAvg strategy\n", + " strategy = FedAvg(\n", + " fraction_fit=1.0,\n", + " fraction_evaluate=0.5,\n", + " min_fit_clients=10,\n", + " min_evaluate_clients=5,\n", + " min_available_clients=10,\n", + " evaluate_metrics_aggregation_fn=weighted_average, # <-- pass the metric aggregation function\n", + " )\n", + "\n", + " # Configure the server for 5 rounds of training\n", + " config = ServerConfig(num_rounds=5)\n", + "\n", + " return ServerAppComponents(strategy=strategy, config=config)\n", + "\n", + "\n", + "# Create a new server instance with the updated FedAvg strategy\n", + "server = ServerApp(server_fn=server_fn)\n", + "\n", + "# Run simulation\n", + "run_simulation(\n", + " server_app=server,\n", + " client_app=client,\n", + " num_supernodes=NUM_CLIENTS,\n", + " backend_config=backend_config,\n", ")" ] }, @@ -605,7 +680,7 @@ "source": [ "## Next steps\n", "\n", - "Before you continue, make sure to join the Flower community on Slack: [Join Slack](https://flower.ai/join-slack/)\n", + "Before you continue, make sure to join the Flower community on Flower Discuss ([Join Flower Discuss](https://discuss.flower.ai)) and on Slack ([Join Slack](https://flower.ai/join-slack/)).\n", "\n", "There's a dedicated `#questions` channel if you need help, but we'd also love to hear who you are in `#introductions`!\n", "\n", @@ -620,11 +695,11 @@ "toc_visible": true }, "kernelspec": { - "display_name": "flwr", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" } }, "nbformat": 4, - "nbformat_minor": 0 + "nbformat_minor": 4 } diff --git a/doc/source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb b/doc/source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb index e20a8d83f674..a361365f4fb0 100644 --- a/doc/source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb +++ b/doc/source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb @@ -9,11 +9,13 @@ "\n", "Welcome to the next part of the federated learning tutorial. In previous parts of this tutorial, we introduced federated learning with PyTorch and Flower ([part 1](https://flower.ai/docs/framework/tutorial-get-started-with-flower-pytorch.html)).\n", "\n", - "In this notebook, we'll begin to customize the federated learning system we built in the introductory notebook (again, using [Flower](https://flower.ai/) and [PyTorch](https://pytorch.org/)).\n", + "In this notebook, we'll begin to customize the federated learning system we built in the introductory notebook again, using the Flower framework, Flower Datasets, and PyTorch.\n", "\n", - "> [Star Flower on GitHub](https://github.com/adap/flower) ⭐️ and join the Flower community on Slack to connect, ask questions, and get help: [Join Slack](https://flower.ai/join-slack) 🌼 We'd love to hear from you in the `#introductions` channel! And if anything is unclear, head over to the `#questions` channel.\n", + "> [Star Flower on GitHub](https://github.com/adap/flower) ⭐️ and join the Flower community on Flower Discuss and the Flower Slack to connect, ask questions, and get help:\n", + "> - [Join Flower Discuss](https://discuss.flower.ai/) We'd love to hear from you in the `Introduction` topic! If anything is unclear, post in `Flower Help - Beginners`.\n", + "> - [Join Flower Slack](https://flower.ai/join-slack) We'd love to hear from you in the `#introductions` channel! If anything is unclear, head over to the `#questions` channel.\n", "\n", - "Let's move beyond FedAvg with Flower strategies!" + "Let's move beyond FedAvg with Flower strategies! 🌼" ] }, { @@ -40,7 +42,7 @@ "metadata": {}, "outputs": [], "source": [ - "!pip install -q flwr[simulation] torch torchvision" + "!pip install -q flwr[simulation] flwr-datasets[vision] torch torchvision" ] }, { @@ -64,15 +66,19 @@ "import torch.nn as nn\n", "import torch.nn.functional as F\n", "import torchvision.transforms as transforms\n", - "from torch.utils.data import DataLoader, random_split\n", - "from torchvision.datasets import CIFAR10\n", + "from torch.utils.data import DataLoader\n", "\n", - "import flwr as fl\n", + "import flwr\n", + "from flwr.client import Client, ClientApp, NumPyClient\n", + "from flwr.server import ServerApp, ServerConfig, ServerAppComponents\n", + "from flwr.server.strategy import FedAvg, FedAdagrad\n", + "from flwr.simulation import run_simulation\n", + "from flwr_datasets import FederatedDataset\n", + "from flwr.common import ndarrays_to_parameters, NDArrays, Scalar, Context\n", "\n", "DEVICE = torch.device(\"cpu\") # Try \"cuda\" to train on GPU\n", - "print(\n", - " f\"Training on {DEVICE} using PyTorch {torch.__version__} and Flower {fl.__version__}\"\n", - ")" + "print(f\"Training on {DEVICE}\")\n", + "print(f\"Flower {flwr.__version__} / PyTorch {torch.__version__}\")" ] }, { @@ -88,7 +94,7 @@ "source": [ "### Data loading\n", "\n", - "Let's now load the CIFAR-10 training and test set, partition them into ten smaller datasets (each split into training and validation set), and wrap everything in their own `DataLoader`. We introduce a new parameter `num_clients` which allows us to call `load_datasets` with different numbers of clients." + "Let's now load the CIFAR-10 training and test set, partition them into ten smaller datasets (each split into training and validation set), and wrap everything in their own `DataLoader`. We introduce a new parameter `num_partitions` which allows us to call `load_datasets` with different numbers of partitions." ] }, { @@ -97,37 +103,34 @@ "metadata": {}, "outputs": [], "source": [ - "NUM_CLIENTS = 10\n", + "NUM_PARTITIONS = 10\n", + "BATCH_SIZE = 32\n", "\n", "\n", - "def load_datasets(num_clients: int):\n", - " # Download and transform CIFAR-10 (train and test)\n", - " transform = transforms.Compose(\n", + "def load_datasets(partition_id: int, num_partitions: int):\n", + " fds = FederatedDataset(dataset=\"cifar10\", partitioners={\"train\": num_partitions})\n", + " partition = fds.load_partition(partition_id)\n", + " # Divide data on each node: 80% train, 20% test\n", + " partition_train_test = partition.train_test_split(test_size=0.2, seed=42)\n", + " pytorch_transforms = transforms.Compose(\n", " [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]\n", " )\n", - " trainset = CIFAR10(\"./dataset\", train=True, download=True, transform=transform)\n", - " testset = CIFAR10(\"./dataset\", train=False, download=True, transform=transform)\n", - "\n", - " # Split training set into `num_clients` partitions to simulate different local datasets\n", - " partition_size = len(trainset) // num_clients\n", - " lengths = [partition_size] * num_clients\n", - " datasets = random_split(trainset, lengths, torch.Generator().manual_seed(42))\n", - "\n", - " # Split each partition into train/val and create DataLoader\n", - " trainloaders = []\n", - " valloaders = []\n", - " for ds in datasets:\n", - " len_val = len(ds) // 10 # 10 % validation set\n", - " len_train = len(ds) - len_val\n", - " lengths = [len_train, len_val]\n", - " ds_train, ds_val = random_split(ds, lengths, torch.Generator().manual_seed(42))\n", - " trainloaders.append(DataLoader(ds_train, batch_size=32, shuffle=True))\n", - " valloaders.append(DataLoader(ds_val, batch_size=32))\n", - " testloader = DataLoader(testset, batch_size=32)\n", - " return trainloaders, valloaders, testloader\n", "\n", + " def apply_transforms(batch):\n", + " # Instead of passing transforms to CIFAR10(..., transform=transform)\n", + " # we will use this function to dataset.with_transform(apply_transforms)\n", + " # The transforms object is exactly the same\n", + " batch[\"img\"] = [pytorch_transforms(img) for img in batch[\"img\"]]\n", + " return batch\n", "\n", - "trainloaders, valloaders, testloader = load_datasets(NUM_CLIENTS)" + " partition_train_test = partition_train_test.with_transform(apply_transforms)\n", + " trainloader = DataLoader(\n", + " partition_train_test[\"train\"], batch_size=BATCH_SIZE, shuffle=True\n", + " )\n", + " valloader = DataLoader(partition_train_test[\"test\"], batch_size=BATCH_SIZE)\n", + " testset = fds.load_split(\"test\").with_transform(apply_transforms)\n", + " testloader = DataLoader(testset, batch_size=BATCH_SIZE)\n", + " return trainloader, valloader, testloader" ] }, { @@ -182,7 +185,8 @@ " net.train()\n", " for epoch in range(epochs):\n", " correct, total, epoch_loss = 0, 0, 0.0\n", - " for images, labels in trainloader:\n", + " for batch in trainloader:\n", + " images, labels = batch[\"img\"], batch[\"label\"]\n", " images, labels = images.to(DEVICE), labels.to(DEVICE)\n", " optimizer.zero_grad()\n", " outputs = net(images)\n", @@ -204,7 +208,8 @@ " correct, total, loss = 0, 0, 0.0\n", " net.eval()\n", " with torch.no_grad():\n", - " for images, labels in testloader:\n", + " for batch in testloader:\n", + " images, labels = batch[\"img\"], batch[\"label\"]\n", " images, labels = images.to(DEVICE), labels.to(DEVICE)\n", " outputs = net(images)\n", " loss += criterion(outputs, labels).item()\n", @@ -222,7 +227,7 @@ "source": [ "### Flower client\n", "\n", - "To implement the Flower client, we (again) create a subclass of `flwr.client.NumPyClient` and implement the three methods `get_parameters`, `fit`, and `evaluate`. Here, we also pass the `cid` to the client and use it log additional details:" + "To implement the Flower client, we (again) create a subclass of `flwr.client.NumPyClient` and implement the three methods `get_parameters`, `fit`, and `evaluate`. Here, we also pass the `partition_id` to the client and use it log additional details. We then create an instance of `ClientApp` and pass it the `client_fn`." ] }, { @@ -231,35 +236,43 @@ "metadata": {}, "outputs": [], "source": [ - "class FlowerClient(fl.client.NumPyClient):\n", - " def __init__(self, cid, net, trainloader, valloader):\n", - " self.cid = cid\n", + "class FlowerClient(NumPyClient):\n", + " def __init__(self, partition_id, net, trainloader, valloader):\n", + " self.partition_id = partition_id\n", " self.net = net\n", " self.trainloader = trainloader\n", " self.valloader = valloader\n", "\n", " def get_parameters(self, config):\n", - " print(f\"[Client {self.cid}] get_parameters\")\n", + " print(f\"[Client {self.partition_id}] get_parameters\")\n", " return get_parameters(self.net)\n", "\n", " def fit(self, parameters, config):\n", - " print(f\"[Client {self.cid}] fit, config: {config}\")\n", + " print(f\"[Client {self.partition_id}] fit, config: {config}\")\n", " set_parameters(self.net, parameters)\n", " train(self.net, self.trainloader, epochs=1)\n", " return get_parameters(self.net), len(self.trainloader), {}\n", "\n", " def evaluate(self, parameters, config):\n", - " print(f\"[Client {self.cid}] evaluate, config: {config}\")\n", + " print(f\"[Client {self.partition_id}] evaluate, config: {config}\")\n", " set_parameters(self.net, parameters)\n", " loss, accuracy = test(self.net, self.valloader)\n", " return float(loss), len(self.valloader), {\"accuracy\": float(accuracy)}\n", "\n", "\n", - "def client_fn(cid) -> FlowerClient:\n", + "def client_fn(context: Context) -> Client:\n", " net = Net().to(DEVICE)\n", - " trainloader = trainloaders[int(cid)]\n", - " valloader = valloaders[int(cid)]\n", - " return FlowerClient(cid, net, trainloader, valloader)" + "\n", + " # Read the node_config to fetch data partition associated to this node\n", + " partition_id = context.node_config[\"partition-id\"]\n", + " num_partitions = context.node_config[\"num-partitions\"]\n", + "\n", + " trainloader, valloader, _ = load_datasets(partition_id, num_partitions)\n", + " return FlowerClient(partition_id, net, trainloader, valloader).to_client()\n", + "\n", + "\n", + "# Create the ClientApp\n", + "client = ClientApp(client_fn=client_fn)" ] }, { @@ -277,7 +290,7 @@ "source": [ "### Server-side parameter **initialization**\n", "\n", - "Flower, by default, initializes the global model by asking one random client for the initial parameters. In many cases, we want more control over parameter initialization though. Flower therefore allows you to directly pass the initial parameters to the Strategy:" + "Flower, by default, initializes the global model by asking one random client for the initial parameters. In many cases, we want more control over parameter initialization though. Flower therefore allows you to directly pass the initial parameters to the Strategy. We create an instance of `Net()` and get the paramaters as follows:" ] }, { @@ -287,30 +300,84 @@ "outputs": [], "source": [ "# Create an instance of the model and get the parameters\n", - "params = get_parameters(Net())\n", - "\n", - "# Pass parameters to the Strategy for server-side parameter initialization\n", - "strategy = fl.server.strategy.FedAvg(\n", - " fraction_fit=0.3,\n", - " fraction_evaluate=0.3,\n", - " min_fit_clients=3,\n", - " min_evaluate_clients=3,\n", - " min_available_clients=NUM_CLIENTS,\n", - " initial_parameters=fl.common.ndarrays_to_parameters(params),\n", - ")\n", - "\n", - "# Specify client resources if you need GPU (defaults to 1 CPU and 0 GPU)\n", - "client_resources = None\n", + "params = get_parameters(Net())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, we create a `server_fn` that returns the components needed for the server. Within `server_fn`, we create a Strategy that uses the initial parameters." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def server_fn(context: Context) -> ServerAppComponents:\n", + " # Create FedAvg strategy\n", + " strategy = FedAvg(\n", + " fraction_fit=0.3,\n", + " fraction_evaluate=0.3,\n", + " min_fit_clients=3,\n", + " min_evaluate_clients=3,\n", + " min_available_clients=NUM_PARTITIONS,\n", + " initial_parameters=ndarrays_to_parameters(\n", + " params\n", + " ), # Pass initial model parameters\n", + " )\n", + "\n", + " # Configure the server for 3 rounds of training\n", + " config = ServerConfig(num_rounds=3)\n", + " return ServerAppComponents(strategy=strategy, config=config)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Passing `initial_parameters` to the `FedAvg` strategy prevents Flower from asking one of the clients for the initial parameters. In `server_fn`, we pass this new `strategy` and a `ServerConfig` for defining the number of federated learning rounds (`num_rounds`). \n", + "\n", + "Similar to the `ClientApp`, we now create the `ServerApp` using the `server_fn`:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Create ServerApp\n", + "server = ServerApp(server_fn=server_fn)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Last but not least, we specify the resources for each client and run the simulation." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Specify the resources each of your clients need\n", + "# If set to none, by default, each client will be allocated 2x CPU and 0x GPUs\n", + "backend_config = {\"client_resources\": None}\n", "if DEVICE.type == \"cuda\":\n", - " client_resources = {\"num_gpus\": 1}\n", - "\n", - "# Start simulation\n", - "fl.simulation.start_simulation(\n", - " client_fn=client_fn,\n", - " num_clients=NUM_CLIENTS,\n", - " config=fl.server.ServerConfig(num_rounds=3), # Just three rounds\n", - " strategy=strategy,\n", - " client_resources=client_resources,\n", + " backend_config = {\"client_resources\": {\"num_gpus\": 1}}\n", + "\n", + "# Run simulation\n", + "run_simulation(\n", + " server_app=server,\n", + " client_app=client,\n", + " num_supernodes=NUM_PARTITIONS,\n", + " backend_config=backend_config,\n", ")" ] }, @@ -318,7 +385,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Passing `initial_parameters` to the `FedAvg` strategy prevents Flower from asking one of the clients for the initial parameters. If we look closely, we can see that the logs do not show any calls to the `FlowerClient.get_parameters` method." + " If we look closely, we can see that the logs do not show any calls to the `FlowerClient.get_parameters` method." ] }, { @@ -327,7 +394,7 @@ "source": [ "### Starting with a customized strategy\n", "\n", - "We've seen the function `start_simulation` before. It accepts a number of arguments, amongst them the `client_fn` used to create `FlowerClient` instances, the number of clients to simulate `num_clients`, the number of rounds `num_rounds`, and the strategy.\n", + "We've seen the function `run_simulation` before. It accepts a number of arguments, amongst them the `server_app` which wraps around the strategy and number of training rounds, `client_app` which wraps around the `client_fn` used to create `FlowerClient` instances, and the number of clients to simulate which equals `num_supernodes`.\n", "\n", "The strategy encapsulates the federated learning approach/algorithm, for example, `FedAvg` or `FedAdagrad`. Let's try to use a different strategy this time:" ] @@ -338,23 +405,30 @@ "metadata": {}, "outputs": [], "source": [ - "# Create FedAdam strategy\n", - "strategy = fl.server.strategy.FedAdagrad(\n", - " fraction_fit=0.3,\n", - " fraction_evaluate=0.3,\n", - " min_fit_clients=3,\n", - " min_evaluate_clients=3,\n", - " min_available_clients=NUM_CLIENTS,\n", - " initial_parameters=fl.common.ndarrays_to_parameters(get_parameters(Net())),\n", - ")\n", - "\n", - "# Start simulation\n", - "fl.simulation.start_simulation(\n", - " client_fn=client_fn,\n", - " num_clients=NUM_CLIENTS,\n", - " config=fl.server.ServerConfig(num_rounds=3), # Just three rounds\n", - " strategy=strategy,\n", - " client_resources=client_resources,\n", + "def server_fn(context: Context) -> ServerAppComponents:\n", + " # Create FedAdagrad strategy\n", + " strategy = FedAdagrad(\n", + " fraction_fit=0.3,\n", + " fraction_evaluate=0.3,\n", + " min_fit_clients=3,\n", + " min_evaluate_clients=3,\n", + " min_available_clients=NUM_PARTITIONS,\n", + " initial_parameters=ndarrays_to_parameters(params),\n", + " )\n", + " # Configure the server for 3 rounds of training\n", + " config = ServerConfig(num_rounds=3)\n", + " return ServerAppComponents(strategy=strategy, config=config)\n", + "\n", + "\n", + "# Create the ServerApp\n", + "server = ServerApp(server_fn=server_fn)\n", + "\n", + "# Run simulation\n", + "run_simulation(\n", + " server_app=server,\n", + " client_app=client,\n", + " num_supernodes=NUM_PARTITIONS,\n", + " backend_config=backend_config,\n", ")" ] }, @@ -379,42 +453,72 @@ "metadata": {}, "outputs": [], "source": [ - "# The `evaluate` function will be by Flower called after every round\n", + "# The `evaluate` function will be called by Flower after every round\n", "def evaluate(\n", " server_round: int,\n", - " parameters: fl.common.NDArrays,\n", - " config: Dict[str, fl.common.Scalar],\n", - ") -> Optional[Tuple[float, Dict[str, fl.common.Scalar]]]:\n", + " parameters: NDArrays,\n", + " config: Dict[str, Scalar],\n", + ") -> Optional[Tuple[float, Dict[str, Scalar]]]:\n", " net = Net().to(DEVICE)\n", - " valloader = valloaders[0]\n", + " _, _, testloader = load_datasets(0, NUM_PARTITIONS)\n", " set_parameters(net, parameters) # Update model with the latest parameters\n", - " loss, accuracy = test(net, valloader)\n", + " loss, accuracy = test(net, testloader)\n", " print(f\"Server-side evaluation loss {loss} / accuracy {accuracy}\")\n", " return loss, {\"accuracy\": accuracy}" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We create a `FedAvg` strategy and pass `evaluate_fn` to it. Then, we create a `ServerApp` that uses this strategy." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def server_fn(context: Context) -> ServerAppComponents:\n", + " # Create the FedAvg strategy\n", + " strategy = FedAvg(\n", + " fraction_fit=0.3,\n", + " fraction_evaluate=0.3,\n", + " min_fit_clients=3,\n", + " min_evaluate_clients=3,\n", + " min_available_clients=NUM_PARTITIONS,\n", + " initial_parameters=ndarrays_to_parameters(params),\n", + " evaluate_fn=evaluate, # Pass the evaluation function\n", + " )\n", + " # Configure the server for 3 rounds of training\n", + " config = ServerConfig(num_rounds=3)\n", + " return ServerAppComponents(strategy=strategy, config=config)\n", + "\n", + "\n", + "# Create the ServerApp\n", + "server = ServerApp(server_fn=server_fn)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, we run the simulation." + ] + }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "strategy = fl.server.strategy.FedAvg(\n", - " fraction_fit=0.3,\n", - " fraction_evaluate=0.3,\n", - " min_fit_clients=3,\n", - " min_evaluate_clients=3,\n", - " min_available_clients=NUM_CLIENTS,\n", - " initial_parameters=fl.common.ndarrays_to_parameters(get_parameters(Net())),\n", - " evaluate_fn=evaluate, # Pass the evaluation function\n", - ")\n", - "\n", - "fl.simulation.start_simulation(\n", - " client_fn=client_fn,\n", - " num_clients=NUM_CLIENTS,\n", - " config=fl.server.ServerConfig(num_rounds=3), # Just three rounds\n", - " strategy=strategy,\n", - " client_resources=client_resources,\n", + "# Run simulation\n", + "run_simulation(\n", + " server_app=server,\n", + " client_app=client,\n", + " num_supernodes=NUM_PARTITIONS,\n", + " backend_config=backend_config,\n", ")" ] }, @@ -433,15 +537,15 @@ "metadata": {}, "outputs": [], "source": [ - "class FlowerClient(fl.client.NumPyClient):\n", - " def __init__(self, cid, net, trainloader, valloader):\n", - " self.cid = cid\n", + "class FlowerClient(NumPyClient):\n", + " def __init__(self, pid, net, trainloader, valloader):\n", + " self.pid = pid # partition ID of a client\n", " self.net = net\n", " self.trainloader = trainloader\n", " self.valloader = valloader\n", "\n", " def get_parameters(self, config):\n", - " print(f\"[Client {self.cid}] get_parameters\")\n", + " print(f\"[Client {self.pid}] get_parameters\")\n", " return get_parameters(self.net)\n", "\n", " def fit(self, parameters, config):\n", @@ -450,23 +554,28 @@ " local_epochs = config[\"local_epochs\"]\n", "\n", " # Use values provided by the config\n", - " print(f\"[Client {self.cid}, round {server_round}] fit, config: {config}\")\n", + " print(f\"[Client {self.pid}, round {server_round}] fit, config: {config}\")\n", " set_parameters(self.net, parameters)\n", " train(self.net, self.trainloader, epochs=local_epochs)\n", " return get_parameters(self.net), len(self.trainloader), {}\n", "\n", " def evaluate(self, parameters, config):\n", - " print(f\"[Client {self.cid}] evaluate, config: {config}\")\n", + " print(f\"[Client {self.pid}] evaluate, config: {config}\")\n", " set_parameters(self.net, parameters)\n", " loss, accuracy = test(self.net, self.valloader)\n", " return float(loss), len(self.valloader), {\"accuracy\": float(accuracy)}\n", "\n", "\n", - "def client_fn(cid) -> FlowerClient:\n", + "def client_fn(context: Context) -> Client:\n", " net = Net().to(DEVICE)\n", - " trainloader = trainloaders[int(cid)]\n", - " valloader = valloaders[int(cid)]\n", - " return FlowerClient(cid, net, trainloader, valloader)" + " partition_id = context.node_config[\"partition-id\"]\n", + " num_partitions = context.node_config[\"num-partitions\"]\n", + " trainloader, valloader, _ = load_datasets(partition_id, num_partitions)\n", + " return FlowerClient(partition_id, net, trainloader, valloader).to_client()\n", + "\n", + "\n", + "# Create the ClientApp\n", + "client = ClientApp(client_fn=client_fn)" ] }, { @@ -490,7 +599,7 @@ " \"\"\"\n", " config = {\n", " \"server_round\": server_round, # The current round of federated learning\n", - " \"local_epochs\": 1 if server_round < 2 else 2, #\n", + " \"local_epochs\": 1 if server_round < 2 else 2,\n", " }\n", " return config" ] @@ -499,7 +608,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Next, we'll just pass this function to the FedAvg strategy before starting the simulation:" + "Next, we'll pass this function to the FedAvg strategy before starting the simulation:" ] }, { @@ -508,23 +617,31 @@ "metadata": {}, "outputs": [], "source": [ - "strategy = fl.server.strategy.FedAvg(\n", - " fraction_fit=0.3,\n", - " fraction_evaluate=0.3,\n", - " min_fit_clients=3,\n", - " min_evaluate_clients=3,\n", - " min_available_clients=NUM_CLIENTS,\n", - " initial_parameters=fl.common.ndarrays_to_parameters(get_parameters(Net())),\n", - " evaluate_fn=evaluate,\n", - " on_fit_config_fn=fit_config, # Pass the fit_config function\n", - ")\n", - "\n", - "fl.simulation.start_simulation(\n", - " client_fn=client_fn,\n", - " num_clients=NUM_CLIENTS,\n", - " config=fl.server.ServerConfig(num_rounds=3), # Just three rounds\n", - " strategy=strategy,\n", - " client_resources=client_resources,\n", + "def server_fn(context: Context) -> ServerAppComponents:\n", + " # Create FedAvg strategy\n", + " strategy = FedAvg(\n", + " fraction_fit=0.3,\n", + " fraction_evaluate=0.3,\n", + " min_fit_clients=3,\n", + " min_evaluate_clients=3,\n", + " min_available_clients=NUM_PARTITIONS,\n", + " initial_parameters=ndarrays_to_parameters(params),\n", + " evaluate_fn=evaluate,\n", + " on_fit_config_fn=fit_config, # Pass the fit_config function\n", + " )\n", + " config = ServerConfig(num_rounds=3)\n", + " return ServerAppComponents(strategy=strategy, config=config)\n", + "\n", + "\n", + "# Create the ServerApp\n", + "server = ServerApp(server_fn=server_fn)\n", + "\n", + "# Run simulation\n", + "run_simulation(\n", + " server_app=server,\n", + " client_app=client,\n", + " num_supernodes=NUM_PARTITIONS,\n", + " backend_config=backend_config,\n", ")" ] }, @@ -552,16 +669,16 @@ "metadata": {}, "outputs": [], "source": [ - "NUM_CLIENTS = 1000\n", - "\n", - "trainloaders, valloaders, testloader = load_datasets(NUM_CLIENTS)" + "NUM_PARTITIONS = 1000" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "We now have 1000 partitions, each holding 45 training and 5 validation examples. Given that the number of training examples on each client is quite small, we should probably train the model a bit longer, so we configure the clients to perform 3 local training epochs. We should also adjust the fraction of clients selected for training during each round (we don't want all 1000 clients participating in every round), so we adjust `fraction_fit` to `0.05`, which means that only 5% of available clients (so 50 clients) will be selected for training each round:\n" + "Note that we can reuse the `ClientApp` for different `num-partitions` since the Context is defined by the `num_supernodes` argument in `run_simulation()`. \n", + "\n", + "We now have 1000 partitions, each holding 45 training and 5 validation examples. Given that the number of training examples on each client is quite small, we should probably train the model a bit longer, so we configure the clients to perform 3 local training epochs. We should also adjust the fraction of clients selected for training during each round (we don't want all 1000 clients participating in every round), so we adjust `fraction_fit` to `0.025`, which means that only 2.5% of available clients (so 25 clients) will be selected for training each round:\n" ] }, { @@ -578,22 +695,30 @@ " return config\n", "\n", "\n", - "strategy = fl.server.strategy.FedAvg(\n", - " fraction_fit=0.025, # Train on 25 clients (each round)\n", - " fraction_evaluate=0.05, # Evaluate on 50 clients (each round)\n", - " min_fit_clients=20,\n", - " min_evaluate_clients=40,\n", - " min_available_clients=NUM_CLIENTS,\n", - " initial_parameters=fl.common.ndarrays_to_parameters(get_parameters(Net())),\n", - " on_fit_config_fn=fit_config,\n", - ")\n", - "\n", - "fl.simulation.start_simulation(\n", - " client_fn=client_fn,\n", - " num_clients=NUM_CLIENTS,\n", - " config=fl.server.ServerConfig(num_rounds=3), # Just three rounds\n", - " strategy=strategy,\n", - " client_resources=client_resources,\n", + "def server_fn(context: Context) -> ServerAppComponents:\n", + " # Create FedAvg strategy\n", + " strategy = FedAvg(\n", + " fraction_fit=0.025, # Train on 25 clients (each round)\n", + " fraction_evaluate=0.05, # Evaluate on 50 clients (each round)\n", + " min_fit_clients=20,\n", + " min_evaluate_clients=40,\n", + " min_available_clients=NUM_PARTITIONS,\n", + " initial_parameters=ndarrays_to_parameters(params),\n", + " on_fit_config_fn=fit_config,\n", + " )\n", + " config = ServerConfig(num_rounds=3)\n", + " return ServerAppComponents(strategy=strategy, config=config)\n", + "\n", + "\n", + "# Create the ServerApp\n", + "server = ServerApp(server_fn=server_fn)\n", + "\n", + "# Run simulation\n", + "run_simulation(\n", + " server_app=server,\n", + " client_app=client,\n", + " num_supernodes=NUM_PARTITIONS,\n", + " backend_config=backend_config,\n", ")" ] }, @@ -614,7 +739,7 @@ "source": [ "## Next steps\n", "\n", - "Before you continue, make sure to join the Flower community on Slack: [Join Slack](https://flower.ai/join-slack/)\n", + "Before you continue, make sure to join the Flower community on Flower Discuss ([Join Flower Discuss](https://discuss.flower.ai)) and on Slack ([Join Slack](https://flower.ai/join-slack/)).\n", "\n", "There's a dedicated `#questions` channel if you need help, but we'd also love to hear who you are in `#introductions`!\n", "\n", diff --git a/e2e/bare-client-auth/pyproject.toml b/e2e/bare-client-auth/pyproject.toml deleted file mode 100644 index 839f0779cc01..000000000000 --- a/e2e/bare-client-auth/pyproject.toml +++ /dev/null @@ -1,20 +0,0 @@ -[build-system] -requires = ["hatchling"] -build-backend = "hatchling.build" - -[project] -name = "bare_client_auth_test" -version = "0.1.0" -description = "Client-auth-enabled bare Federated Learning test with Flower" -authors = [ - { name = "The Flower Authors", email = "hello@flower.ai" }, -] -dependencies = [ - "flwr @ {root:parent:parent:uri}", -] - -[tool.hatch.build.targets.wheel] -packages = ["."] - -[tool.hatch.metadata] -allow-direct-references = true diff --git a/e2e/bare-https/pyproject.toml b/e2e/bare-https/pyproject.toml deleted file mode 100644 index de8aa92cbd02..000000000000 --- a/e2e/bare-https/pyproject.toml +++ /dev/null @@ -1,20 +0,0 @@ -[build-system] -requires = ["hatchling"] -build-backend = "hatchling.build" - -[project] -name = "bare_https_test" -version = "0.1.0" -description = "HTTPS-enabled bare Federated Learning test with Flower" -authors = [ - { name = "The Flower Authors", email = "hello@flower.ai" }, -] -dependencies = [ - "flwr @ {root:parent:parent:uri}", -] - -[tool.hatch.build.targets.wheel] -packages = ["."] - -[tool.hatch.metadata] -allow-direct-references = true diff --git a/e2e/bare/pyproject.toml b/e2e/bare/pyproject.toml deleted file mode 100644 index ba8c1b2b2276..000000000000 --- a/e2e/bare/pyproject.toml +++ /dev/null @@ -1,20 +0,0 @@ -[build-system] -requires = ["hatchling"] -build-backend = "hatchling.build" - -[project] -name = "bare_test" -version = "0.1.0" -description = "Bare Federated Learning test with Flower" -authors = [ - { name = "The Flower Authors", email = "hello@flower.ai" }, -] -dependencies = [ - "flwr[simulation,rest] @ {root:parent:parent:uri}", -] - -[tool.hatch.build.targets.wheel] -packages = ["."] - -[tool.hatch.metadata] -allow-direct-references = true diff --git a/e2e/docker/client.py b/e2e/docker/client.py index 8451b810416b..44313c7c3af6 100644 --- a/e2e/docker/client.py +++ b/e2e/docker/client.py @@ -9,6 +9,7 @@ from torchvision.transforms import Compose, Normalize, ToTensor from flwr.client import ClientApp, NumPyClient +from flwr.common import Context # ############################################################################# # 1. Regular PyTorch pipeline: nn.Module, train, test, and DataLoader @@ -122,7 +123,7 @@ def evaluate(self, parameters, config): return loss, len(testloader.dataset), {"accuracy": accuracy} -def client_fn(cid: str): +def client_fn(context: Context): """Create and return an instance of Flower `Client`.""" return FlowerClient().to_client() diff --git a/e2e/docker/compose.yaml b/e2e/docker/compose.yaml index 073ca9f60a57..c31bc81692f2 100644 --- a/e2e/docker/compose.yaml +++ b/e2e/docker/compose.yaml @@ -19,7 +19,7 @@ services: resources: limits: cpus: '2' - command: [ "--insecure", "--server", "superlink:9092" ] + command: [ "--insecure", "--superlink", "superlink:9092" ] depends_on: - superlink @@ -27,7 +27,7 @@ services: serverapp: build: dockerfile: serverapp.Dockerfile - command: [ "--insecure", "--server", "superlink:9091" ] + command: [ "--insecure", "--superlink", "superlink:9091" ] # enforce dependency for graceful execution depends_on: - superlink diff --git a/e2e/docker/pyproject.toml b/e2e/docker/pyproject.toml index 955f30c7bf8d..def93ed4065d 100644 --- a/e2e/docker/pyproject.toml +++ b/e2e/docker/pyproject.toml @@ -6,9 +6,7 @@ build-backend = "hatchling.build" name = "e2e-docker" version = "0.1.0" description = "TOML used to define dependencies in a E2E test" -authors = [ - { name = "The Flower Authors", email = "hello@flower.ai" }, -] +authors = [{ name = "The Flower Authors", email = "hello@flower.ai" }] dependencies = [ "flwr-datasets[vision]>=0.1.0,<1.0.0", "torch==2.2.1", diff --git a/e2e/bare-client-auth/README.md b/e2e/e2e-bare-auth/README.md similarity index 100% rename from e2e/bare-client-auth/README.md rename to e2e/e2e-bare-auth/README.md diff --git a/e2e/bare-https/certificate.conf b/e2e/e2e-bare-auth/certificate.conf similarity index 94% rename from e2e/bare-https/certificate.conf rename to e2e/e2e-bare-auth/certificate.conf index ea97fcbb700d..04a2ed388174 100644 --- a/e2e/bare-https/certificate.conf +++ b/e2e/e2e-bare-auth/certificate.conf @@ -18,3 +18,4 @@ subjectAltName = @alt_names DNS.1 = localhost IP.1 = ::1 IP.2 = 127.0.0.1 +IP.3 = 0.0.0.0 diff --git a/e2e/e2e-bare-auth/e2e_bare_auth/__init__.py b/e2e/e2e-bare-auth/e2e_bare_auth/__init__.py new file mode 100644 index 000000000000..713eba0cf451 --- /dev/null +++ b/e2e/e2e-bare-auth/e2e_bare_auth/__init__.py @@ -0,0 +1 @@ +"""bare_auth_e2e.""" diff --git a/e2e/bare-client-auth/client.py b/e2e/e2e-bare-auth/e2e_bare_auth/client_app.py similarity index 79% rename from e2e/bare-client-auth/client.py rename to e2e/e2e-bare-auth/e2e_bare_auth/client_app.py index e82f17088bd9..c7b0d59b8ea5 100644 --- a/e2e/bare-client-auth/client.py +++ b/e2e/e2e-bare-auth/e2e_bare_auth/client_app.py @@ -1,13 +1,14 @@ import numpy as np -import flwr as fl +from flwr.client import ClientApp, NumPyClient +from flwr.common import Context model_params = np.array([1]) objective = 5 # Define Flower client -class FlowerClient(fl.client.NumPyClient): +class FlowerClient(NumPyClient): def get_parameters(self, config): return model_params @@ -23,10 +24,10 @@ def evaluate(self, parameters, config): return loss, 1, {"accuracy": accuracy} -def client_fn(cid): +def client_fn(context: Context): return FlowerClient().to_client() -app = fl.client.ClientApp( +app = ClientApp( client_fn=client_fn, ) diff --git a/e2e/bare-client-auth/server.py b/e2e/e2e-bare-auth/e2e_bare_auth/server_app.py similarity index 97% rename from e2e/bare-client-auth/server.py rename to e2e/e2e-bare-auth/e2e_bare_auth/server_app.py index e10d5ebc5760..035f6e3ecab2 100644 --- a/e2e/bare-client-auth/server.py +++ b/e2e/e2e-bare-auth/e2e_bare_auth/server_app.py @@ -9,7 +9,7 @@ def main(driver, context): # Construct the LegacyContext context = fl.server.LegacyContext( - state=context.state, + context=context, config=fl.server.ServerConfig(num_rounds=3), ) diff --git a/e2e/bare-client-auth/generate.sh b/e2e/e2e-bare-auth/generate.sh similarity index 100% rename from e2e/bare-client-auth/generate.sh rename to e2e/e2e-bare-auth/generate.sh diff --git a/e2e/e2e-bare-auth/pyproject.toml b/e2e/e2e-bare-auth/pyproject.toml new file mode 100644 index 000000000000..d3ca5e543011 --- /dev/null +++ b/e2e/e2e-bare-auth/pyproject.toml @@ -0,0 +1,31 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "e2e-bare-auth" +version = "1.0.0" +description = "Auth-enabled bare Federated Learning test with Flower" +license = "Apache-2.0" +dependencies = ["flwr @ {root:parent:parent:uri}"] + +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.hatch.metadata] +allow-direct-references = true + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "e2e_bare_auth.server_app:app" +clientapp = "e2e_bare_auth.client_app:app" + +[tool.flwr.app.config] + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 10 diff --git a/e2e/bare-https/README.md b/e2e/e2e-bare-https/README.md similarity index 100% rename from e2e/bare-https/README.md rename to e2e/e2e-bare-https/README.md diff --git a/e2e/bare-client-auth/certificate.conf b/e2e/e2e-bare-https/certificate.conf similarity index 100% rename from e2e/bare-client-auth/certificate.conf rename to e2e/e2e-bare-https/certificate.conf diff --git a/e2e/e2e-bare-https/e2e_bare_https/__init__.py b/e2e/e2e-bare-https/e2e_bare_https/__init__.py new file mode 100644 index 000000000000..473c050856ba --- /dev/null +++ b/e2e/e2e-bare-https/e2e_bare_https/__init__.py @@ -0,0 +1 @@ +"""bare_https_e2e.""" diff --git a/e2e/bare-https/client.py b/e2e/e2e-bare-https/e2e_bare_https/client_app.py similarity index 75% rename from e2e/bare-https/client.py rename to e2e/e2e-bare-https/e2e_bare_https/client_app.py index 8f5c1412fd01..184978a30457 100644 --- a/e2e/bare-https/client.py +++ b/e2e/e2e-bare-https/e2e_bare_https/client_app.py @@ -2,14 +2,15 @@ import numpy as np -import flwr as fl +from flwr.client import ClientApp, NumPyClient, start_client +from flwr.common import Context model_params = np.array([1]) objective = 5 # Define Flower client -class FlowerClient(fl.client.NumPyClient): +class FlowerClient(NumPyClient): def get_parameters(self, config): return model_params @@ -25,19 +26,19 @@ def evaluate(self, parameters, config): return loss, 1, {"accuracy": accuracy} -def client_fn(cid): +def client_fn(context: Context): return FlowerClient().to_client() -app = fl.client.ClientApp( +app = ClientApp( client_fn=client_fn, ) if __name__ == "__main__": # Start Flower client - fl.client.start_client( + start_client( server_address="127.0.0.1:8080", client=FlowerClient().to_client(), - root_certificates=Path("certificates/ca.crt").read_bytes(), + root_certificates=Path("../certificates/ca.crt").read_bytes(), insecure=False, ) diff --git a/e2e/bare-https/server.py b/e2e/e2e-bare-https/e2e_bare_https/server_app.py similarity index 81% rename from e2e/bare-https/server.py rename to e2e/e2e-bare-https/e2e_bare_https/server_app.py index e10d5ebc5760..cb466e703161 100644 --- a/e2e/bare-https/server.py +++ b/e2e/e2e-bare-https/e2e_bare_https/server_app.py @@ -9,7 +9,7 @@ def main(driver, context): # Construct the LegacyContext context = fl.server.LegacyContext( - state=context.state, + context=context, config=fl.server.ServerConfig(num_rounds=3), ) @@ -31,9 +31,9 @@ def main(driver, context): server_address="127.0.0.1:8080", config=fl.server.ServerConfig(num_rounds=3), certificates=( - Path("certificates/ca.crt").read_bytes(), - Path("certificates/server.pem").read_bytes(), - Path("certificates/server.key").read_bytes(), + Path("../certificates/ca.crt").read_bytes(), + Path("../certificates/server.pem").read_bytes(), + Path("../certificates/server.key").read_bytes(), ), ) diff --git a/e2e/bare-https/generate.sh b/e2e/e2e-bare-https/generate.sh similarity index 100% rename from e2e/bare-https/generate.sh rename to e2e/e2e-bare-https/generate.sh diff --git a/e2e/e2e-bare-https/pyproject.toml b/e2e/e2e-bare-https/pyproject.toml new file mode 100644 index 000000000000..e1ec84157788 --- /dev/null +++ b/e2e/e2e-bare-https/pyproject.toml @@ -0,0 +1,31 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "e2e-bare-https" +version = "1.0.0" +description = "HTTPS-enabled bare Federated Learning test with Flower" +license = "Apache-2.0" +dependencies = ["flwr @ {root:parent:parent:uri}"] + +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.hatch.metadata] +allow-direct-references = true + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "e2e_bare_https.server_app:app" +clientapp = "e2e_bare_https.client_app:app" + +[tool.flwr.app.config] + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 10 diff --git a/e2e/bare-https/simulation.py b/e2e/e2e-bare-https/simulation.py similarity index 100% rename from e2e/bare-https/simulation.py rename to e2e/e2e-bare-https/simulation.py diff --git a/e2e/bare/README.md b/e2e/e2e-bare/README.md similarity index 100% rename from e2e/bare/README.md rename to e2e/e2e-bare/README.md diff --git a/e2e/e2e-bare/e2e_bare/__init__.py b/e2e/e2e-bare/e2e_bare/__init__.py new file mode 100644 index 000000000000..5e7430011fc2 --- /dev/null +++ b/e2e/e2e-bare/e2e_bare/__init__.py @@ -0,0 +1 @@ +"""bare_e2e.""" diff --git a/e2e/bare/client.py b/e2e/e2e-bare/e2e_bare/client_app.py similarity index 85% rename from e2e/bare/client.py rename to e2e/e2e-bare/e2e_bare/client_app.py index 402d775ac3a9..943e60d5db9f 100644 --- a/e2e/bare/client.py +++ b/e2e/e2e-bare/e2e_bare/client_app.py @@ -2,8 +2,8 @@ import numpy as np -import flwr as fl -from flwr.common import ConfigsRecord +from flwr.client import ClientApp, NumPyClient, start_client +from flwr.common import ConfigsRecord, Context SUBSET_SIZE = 1000 STATE_VAR = "timestamp" @@ -14,7 +14,7 @@ # Define Flower client -class FlowerClient(fl.client.NumPyClient): +class FlowerClient(NumPyClient): def get_parameters(self, config): return model_params @@ -51,16 +51,14 @@ def evaluate(self, parameters, config): ) -def client_fn(cid): +def client_fn(context: Context): return FlowerClient().to_client() -app = fl.client.ClientApp( +app = ClientApp( client_fn=client_fn, ) if __name__ == "__main__": # Start Flower client - fl.client.start_client( - server_address="127.0.0.1:8080", client=FlowerClient().to_client() - ) + start_client(server_address="127.0.0.1:8080", client=FlowerClient().to_client()) diff --git a/e2e/server.py b/e2e/e2e-bare/e2e_bare/server_app.py similarity index 98% rename from e2e/server.py rename to e2e/e2e-bare/e2e_bare/server_app.py index c678cd0a2446..cb4f65eed0da 100644 --- a/e2e/server.py +++ b/e2e/e2e-bare/e2e_bare/server_app.py @@ -38,7 +38,7 @@ def record_state_metrics(metrics): def main(driver, context): # Construct the LegacyContext context = fl.server.LegacyContext( - state=context.state, + context=context, config=fl.server.ServerConfig(num_rounds=3), ) diff --git a/e2e/e2e-bare/pyproject.toml b/e2e/e2e-bare/pyproject.toml new file mode 100644 index 000000000000..12099fcd9027 --- /dev/null +++ b/e2e/e2e-bare/pyproject.toml @@ -0,0 +1,31 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "e2e-bare" +version = "1.0.0" +description = "Bare Federated Learning test with Flower" +license = "Apache-2.0" +dependencies = ["flwr[simulation,rest] @ {root:parent:parent:uri}"] + +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.hatch.metadata] +allow-direct-references = true + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "e2e_bare.server_app:app" +clientapp = "e2e_bare.client_app:app" + +[tool.flwr.app.config] + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 10 diff --git a/e2e/pytorch/simulation.py b/e2e/e2e-bare/simulation.py similarity index 97% rename from e2e/pytorch/simulation.py rename to e2e/e2e-bare/simulation.py index 25868eb8e33f..fd41812eb3a8 100644 --- a/e2e/pytorch/simulation.py +++ b/e2e/e2e-bare/simulation.py @@ -1,7 +1,7 @@ from typing import List, Tuple import numpy as np -from client import client_fn +from e2e_bare.client_app import client_fn import flwr as fl from flwr.common import Metrics diff --git a/e2e/fastai/README.md b/e2e/e2e-fastai/README.md similarity index 100% rename from e2e/fastai/README.md rename to e2e/e2e-fastai/README.md diff --git a/e2e/e2e-fastai/e2e_fastai/__init__.py b/e2e/e2e-fastai/e2e_fastai/__init__.py new file mode 100644 index 000000000000..e64b144c6501 --- /dev/null +++ b/e2e/e2e-fastai/e2e_fastai/__init__.py @@ -0,0 +1 @@ +"""fastai_e2e.""" diff --git a/e2e/fastai/client.py b/e2e/e2e-fastai/e2e_fastai/client_app.py similarity index 90% rename from e2e/fastai/client.py rename to e2e/e2e-fastai/e2e_fastai/client_app.py index 1d98a1134941..161b27b5a548 100644 --- a/e2e/fastai/client.py +++ b/e2e/e2e-fastai/e2e_fastai/client_app.py @@ -5,7 +5,8 @@ import torch from fastai.vision.all import * -import flwr as fl +from flwr.client import ClientApp, NumPyClient, start_client +from flwr.common import Context warnings.filterwarnings("ignore", category=UserWarning) @@ -29,7 +30,7 @@ # Define Flower client -class FlowerClient(fl.client.NumPyClient): +class FlowerClient(NumPyClient): def get_parameters(self, config): return [val.cpu().numpy() for _, val in learn.model.state_dict().items()] @@ -49,18 +50,18 @@ def evaluate(self, parameters, config): return loss, len(dls.valid), {"accuracy": 1 - error_rate} -def client_fn(cid): +def client_fn(context: Context): return FlowerClient().to_client() -app = fl.client.ClientApp( +app = ClientApp( client_fn=client_fn, ) if __name__ == "__main__": # Start Flower client - fl.client.start_client( + start_client( server_address="127.0.0.1:8080", client=FlowerClient().to_client(), ) diff --git a/e2e/e2e-fastai/e2e_fastai/server_app.py b/e2e/e2e-fastai/e2e_fastai/server_app.py new file mode 100644 index 000000000000..cb4f65eed0da --- /dev/null +++ b/e2e/e2e-fastai/e2e_fastai/server_app.py @@ -0,0 +1,79 @@ +import numpy as np + +import flwr as fl + +STATE_VAR = "timestamp" + + +# Define metric aggregation function +def record_state_metrics(metrics): + """Ensure that timestamps are monotonically increasing.""" + if not metrics: + return {} + + if STATE_VAR not in metrics[0][1]: + # Do nothing if keyword is not present + return {} + + states = [] + for _, m in metrics: + # split string and covert timestamps to float + states.append([float(tt) for tt in m[STATE_VAR].split(",")]) + + for client_state in states: + if len(client_state) == 1: + continue + deltas = np.diff(client_state) + assert np.all( + deltas > 0 + ), f"Timestamps are not monotonically increasing: {client_state}" + + return {STATE_VAR: states} + + +app = fl.server.ServerApp() + + +@app.main() +def main(driver, context): + # Construct the LegacyContext + context = fl.server.LegacyContext( + context=context, + config=fl.server.ServerConfig(num_rounds=3), + ) + + # Create the workflow + workflow = fl.server.workflow.DefaultWorkflow() + + # Execute + workflow(driver, context) + + hist = context.history + assert ( + hist.losses_distributed[-1][1] == 0 + or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 + ) + + +if __name__ == "__main__": + strategy = fl.server.strategy.FedAvg( + evaluate_metrics_aggregation_fn=record_state_metrics + ) + + hist = fl.server.start_server( + server_address="0.0.0.0:8080", + config=fl.server.ServerConfig(num_rounds=3), + strategy=strategy, + ) + + assert ( + hist.losses_distributed[-1][1] == 0 + or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 + ) + + if STATE_VAR in hist.metrics_distributed: + # The checks in record_state_metrics don't do anythinng if client's state has a single entry + state_metrics_last_round = hist.metrics_distributed[STATE_VAR][-1] + assert ( + len(state_metrics_last_round[1][0]) == 2 * state_metrics_last_round[0] + ), "There should be twice as many entries in the client state as rounds" diff --git a/e2e/fastai/pyproject.toml b/e2e/e2e-fastai/pyproject.toml similarity index 50% rename from e2e/fastai/pyproject.toml rename to e2e/e2e-fastai/pyproject.toml index 53d3b7e7baf1..6b1cbd66600e 100644 --- a/e2e/fastai/pyproject.toml +++ b/e2e/e2e-fastai/pyproject.toml @@ -3,16 +3,15 @@ requires = ["hatchling"] build-backend = "hatchling.build" [project] -name = "quickstart-fastai" -version = "0.1.0" +name = "e2e-fastai" +version = "1.0.0" description = "Fastai Federated Learning E2E test with Flower" -authors = [ - { name = "The Flower Authors", email = "hello@flower.ai" }, -] +license = "Apache-2.0" dependencies = [ "flwr[simulation] @ {root:parent:parent:uri}", "fastai>=2.7.12,<3.0.0", "torch>=2.0.0,!=2.0.1,<2.1.0", + "spacy==3.7.6", ] [tool.hatch.build.targets.wheel] @@ -20,3 +19,18 @@ packages = ["."] [tool.hatch.metadata] allow-direct-references = true + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "e2e_fastai.server_app:app" +clientapp = "e2e_fastai.client_app:app" + +[tool.flwr.app.config] + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 10 diff --git a/e2e/opacus/simulation.py b/e2e/e2e-fastai/simulation.py similarity index 86% rename from e2e/opacus/simulation.py rename to e2e/e2e-fastai/simulation.py index bf05a77cf32a..daf217d14765 100644 --- a/e2e/opacus/simulation.py +++ b/e2e/e2e-fastai/simulation.py @@ -1,4 +1,4 @@ -from client import client_fn +from e2e_fastai.client_app import client_fn import flwr as fl diff --git a/e2e/jax/README.md b/e2e/e2e-jax/README.md similarity index 100% rename from e2e/jax/README.md rename to e2e/e2e-jax/README.md diff --git a/e2e/e2e-jax/e2e_jax/__init__.py b/e2e/e2e-jax/e2e_jax/__init__.py new file mode 100644 index 000000000000..18a53ad4b76c --- /dev/null +++ b/e2e/e2e-jax/e2e_jax/__init__.py @@ -0,0 +1 @@ +"""jax_e2e.""" diff --git a/e2e/jax/client.py b/e2e/e2e-jax/e2e_jax/client_app.py similarity index 84% rename from e2e/jax/client.py rename to e2e/e2e-jax/e2e_jax/client_app.py index 347a005d923a..d834cf2b3610 100644 --- a/e2e/jax/client.py +++ b/e2e/e2e-jax/e2e_jax/client_app.py @@ -3,10 +3,11 @@ from typing import Dict, List, Tuple import jax -import jax_training import numpy as np +from e2e_jax import jax_training -import flwr as fl +from flwr.client import ClientApp, NumPyClient, start_client +from flwr.common import Context # Load data and determine model shape train_x, train_y, test_x, test_y = jax_training.load_data() @@ -14,7 +15,7 @@ model_shape = train_x.shape[1:] -class FlowerClient(fl.client.NumPyClient): +class FlowerClient(NumPyClient): def __init__(self): self.params = jax_training.load_model(model_shape) @@ -48,16 +49,14 @@ def evaluate( return float(loss), num_examples, {"loss": float(loss)} -def client_fn(cid): +def client_fn(context: Context): return FlowerClient().to_client() -app = fl.client.ClientApp( +app = ClientApp( client_fn=client_fn, ) if __name__ == "__main__": # Start Flower client - fl.client.start_client( - server_address="127.0.0.1:8080", client=FlowerClient().to_client() - ) + start_client(server_address="127.0.0.1:8080", client=FlowerClient().to_client()) diff --git a/e2e/jax/jax_training.py b/e2e/e2e-jax/e2e_jax/jax_training.py similarity index 100% rename from e2e/jax/jax_training.py rename to e2e/e2e-jax/e2e_jax/jax_training.py diff --git a/e2e/e2e-jax/e2e_jax/server_app.py b/e2e/e2e-jax/e2e_jax/server_app.py new file mode 100644 index 000000000000..cb4f65eed0da --- /dev/null +++ b/e2e/e2e-jax/e2e_jax/server_app.py @@ -0,0 +1,79 @@ +import numpy as np + +import flwr as fl + +STATE_VAR = "timestamp" + + +# Define metric aggregation function +def record_state_metrics(metrics): + """Ensure that timestamps are monotonically increasing.""" + if not metrics: + return {} + + if STATE_VAR not in metrics[0][1]: + # Do nothing if keyword is not present + return {} + + states = [] + for _, m in metrics: + # split string and covert timestamps to float + states.append([float(tt) for tt in m[STATE_VAR].split(",")]) + + for client_state in states: + if len(client_state) == 1: + continue + deltas = np.diff(client_state) + assert np.all( + deltas > 0 + ), f"Timestamps are not monotonically increasing: {client_state}" + + return {STATE_VAR: states} + + +app = fl.server.ServerApp() + + +@app.main() +def main(driver, context): + # Construct the LegacyContext + context = fl.server.LegacyContext( + context=context, + config=fl.server.ServerConfig(num_rounds=3), + ) + + # Create the workflow + workflow = fl.server.workflow.DefaultWorkflow() + + # Execute + workflow(driver, context) + + hist = context.history + assert ( + hist.losses_distributed[-1][1] == 0 + or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 + ) + + +if __name__ == "__main__": + strategy = fl.server.strategy.FedAvg( + evaluate_metrics_aggregation_fn=record_state_metrics + ) + + hist = fl.server.start_server( + server_address="0.0.0.0:8080", + config=fl.server.ServerConfig(num_rounds=3), + strategy=strategy, + ) + + assert ( + hist.losses_distributed[-1][1] == 0 + or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 + ) + + if STATE_VAR in hist.metrics_distributed: + # The checks in record_state_metrics don't do anythinng if client's state has a single entry + state_metrics_last_round = hist.metrics_distributed[STATE_VAR][-1] + assert ( + len(state_metrics_last_round[1][0]) == 2 * state_metrics_last_round[0] + ), "There should be twice as many entries in the client state as rounds" diff --git a/e2e/jax/pyproject.toml b/e2e/e2e-jax/pyproject.toml similarity index 56% rename from e2e/jax/pyproject.toml rename to e2e/e2e-jax/pyproject.toml index bb024ba14d23..b259f66a7bc3 100644 --- a/e2e/jax/pyproject.toml +++ b/e2e/e2e-jax/pyproject.toml @@ -3,12 +3,10 @@ requires = ["hatchling"] build-backend = "hatchling.build" [project] -name = "jax_example" -version = "0.1.0" +name = "e2e-jax" +version = "1.0.0" description = "JAX example training a linear regression model with federated learning" -authors = [ - { name = "The Flower Authors", email = "hello@flower.ai" }, -] +license = "Apache-2.0" dependencies = [ "flwr[simulation] @ {root:parent:parent:uri}", "jax==0.4.13", @@ -22,3 +20,18 @@ packages = ["."] [tool.hatch.metadata] allow-direct-references = true + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "e2e_jax.server_app:app" +clientapp = "e2e_jax.client_app:app" + +[tool.flwr.app.config] + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 10 diff --git a/e2e/pytorch-lightning/simulation.py b/e2e/e2e-jax/simulation.py similarity index 87% rename from e2e/pytorch-lightning/simulation.py rename to e2e/e2e-jax/simulation.py index bf05a77cf32a..105f5ab01a47 100644 --- a/e2e/pytorch-lightning/simulation.py +++ b/e2e/e2e-jax/simulation.py @@ -1,4 +1,4 @@ -from client import client_fn +from e2e_jax.client_app import client_fn import flwr as fl diff --git a/e2e/opacus/.gitignore b/e2e/e2e-opacus/.gitignore similarity index 100% rename from e2e/opacus/.gitignore rename to e2e/e2e-opacus/.gitignore diff --git a/e2e/opacus/README.md b/e2e/e2e-opacus/README.md similarity index 100% rename from e2e/opacus/README.md rename to e2e/e2e-opacus/README.md diff --git a/e2e/e2e-opacus/e2e_opacus/__init__.py b/e2e/e2e-opacus/e2e_opacus/__init__.py new file mode 100644 index 000000000000..e477387c100a --- /dev/null +++ b/e2e/e2e-opacus/e2e_opacus/__init__.py @@ -0,0 +1 @@ +"""opacus_e2e.""" diff --git a/e2e/opacus/client.py b/e2e/e2e-opacus/e2e_opacus/client_app.py similarity index 94% rename from e2e/opacus/client.py rename to e2e/e2e-opacus/e2e_opacus/client_app.py index c9ebe319063a..4077b44f5cd8 100644 --- a/e2e/opacus/client.py +++ b/e2e/e2e-opacus/e2e_opacus/client_app.py @@ -9,7 +9,8 @@ from torch.utils.data import DataLoader from torchvision.datasets import CIFAR10 -import flwr as fl +from flwr.client import ClientApp, NumPyClient, start_client +from flwr.common import Context # Define parameters. PARAMS = { @@ -78,7 +79,7 @@ def load_data(): transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] ) - data = CIFAR10("./data", train=True, download=True, transform=transform) + data = CIFAR10("./../data", train=True, download=True, transform=transform) split = math.floor(len(data) * 0.01 * PARAMS["train_split"]) trainset = torch.utils.data.Subset(data, list(range(0, split))) testset = torch.utils.data.Subset( @@ -95,7 +96,7 @@ def load_data(): # Define Flower client. -class FlowerClient(fl.client.NumPyClient): +class FlowerClient(NumPyClient): def __init__(self, model) -> None: super().__init__() # Create a privacy engine which will add DP and keep track of the privacy budget. @@ -139,16 +140,16 @@ def evaluate(self, parameters, config): return float(loss), len(testloader), {"accuracy": float(accuracy)} -def client_fn(cid): +def client_fn(context: Context): model = Net() return FlowerClient(model).to_client() -app = fl.client.ClientApp( +app = ClientApp( client_fn=client_fn, ) if __name__ == "__main__": - fl.client.start_client( + start_client( server_address="127.0.0.1:8080", client=FlowerClient(model).to_client() ) diff --git a/e2e/e2e-opacus/e2e_opacus/server_app.py b/e2e/e2e-opacus/e2e_opacus/server_app.py new file mode 100644 index 000000000000..cb4f65eed0da --- /dev/null +++ b/e2e/e2e-opacus/e2e_opacus/server_app.py @@ -0,0 +1,79 @@ +import numpy as np + +import flwr as fl + +STATE_VAR = "timestamp" + + +# Define metric aggregation function +def record_state_metrics(metrics): + """Ensure that timestamps are monotonically increasing.""" + if not metrics: + return {} + + if STATE_VAR not in metrics[0][1]: + # Do nothing if keyword is not present + return {} + + states = [] + for _, m in metrics: + # split string and covert timestamps to float + states.append([float(tt) for tt in m[STATE_VAR].split(",")]) + + for client_state in states: + if len(client_state) == 1: + continue + deltas = np.diff(client_state) + assert np.all( + deltas > 0 + ), f"Timestamps are not monotonically increasing: {client_state}" + + return {STATE_VAR: states} + + +app = fl.server.ServerApp() + + +@app.main() +def main(driver, context): + # Construct the LegacyContext + context = fl.server.LegacyContext( + context=context, + config=fl.server.ServerConfig(num_rounds=3), + ) + + # Create the workflow + workflow = fl.server.workflow.DefaultWorkflow() + + # Execute + workflow(driver, context) + + hist = context.history + assert ( + hist.losses_distributed[-1][1] == 0 + or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 + ) + + +if __name__ == "__main__": + strategy = fl.server.strategy.FedAvg( + evaluate_metrics_aggregation_fn=record_state_metrics + ) + + hist = fl.server.start_server( + server_address="0.0.0.0:8080", + config=fl.server.ServerConfig(num_rounds=3), + strategy=strategy, + ) + + assert ( + hist.losses_distributed[-1][1] == 0 + or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 + ) + + if STATE_VAR in hist.metrics_distributed: + # The checks in record_state_metrics don't do anythinng if client's state has a single entry + state_metrics_last_round = hist.metrics_distributed[STATE_VAR][-1] + assert ( + len(state_metrics_last_round[1][0]) == 2 * state_metrics_last_round[0] + ), "There should be twice as many entries in the client state as rounds" diff --git a/e2e/opacus/pyproject.toml b/e2e/e2e-opacus/pyproject.toml similarity index 51% rename from e2e/opacus/pyproject.toml rename to e2e/e2e-opacus/pyproject.toml index cee9fc1914cf..54aa54a7b357 100644 --- a/e2e/opacus/pyproject.toml +++ b/e2e/e2e-opacus/pyproject.toml @@ -3,12 +3,10 @@ requires = ["hatchling"] build-backend = "hatchling.build" [project] -name = "opacus_e2e" -version = "0.1.0" +name = "e2e-opacus" +version = "1.0.0" description = "Opacus E2E testing" -authors = [ - { name = "The Flower Authors", email = "hello@flower.ai" }, -] +license = "Apache-2.0" dependencies = [ "flwr[simulation] @ {root:parent:parent:uri}", "opacus>=1.4.0,<2.0.0", @@ -21,3 +19,18 @@ packages = ["."] [tool.hatch.metadata] allow-direct-references = true + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "e2e_opacus.server_app:app" +clientapp = "e2e_opacus.client_app:app" + +[tool.flwr.app.config] + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 10 diff --git a/e2e/fastai/simulation.py b/e2e/e2e-opacus/simulation.py similarity index 86% rename from e2e/fastai/simulation.py rename to e2e/e2e-opacus/simulation.py index bf05a77cf32a..62d381f679c8 100644 --- a/e2e/fastai/simulation.py +++ b/e2e/e2e-opacus/simulation.py @@ -1,4 +1,4 @@ -from client import client_fn +from e2e_opacus.client_app import client_fn import flwr as fl diff --git a/e2e/pandas/README.md b/e2e/e2e-pandas/README.md similarity index 100% rename from e2e/pandas/README.md rename to e2e/e2e-pandas/README.md diff --git a/e2e/e2e-pandas/e2e_pandas/__init__.py b/e2e/e2e-pandas/e2e_pandas/__init__.py new file mode 100644 index 000000000000..608568ea7a62 --- /dev/null +++ b/e2e/e2e-pandas/e2e_pandas/__init__.py @@ -0,0 +1 @@ +"""pandas_e2e.""" diff --git a/e2e/pandas/client.py b/e2e/e2e-pandas/e2e_pandas/client_app.py similarity index 71% rename from e2e/pandas/client.py rename to e2e/e2e-pandas/e2e_pandas/client_app.py index 19e15f5a3b11..98316988cd82 100644 --- a/e2e/pandas/client.py +++ b/e2e/e2e-pandas/e2e_pandas/client_app.py @@ -3,9 +3,13 @@ import numpy as np import pandas as pd -import flwr as fl +from flwr.client import ClientApp, NumPyClient, start_client +from flwr.common import Context -df = pd.read_csv("./data/client.csv") +try: + df = pd.read_csv("./../data/client.csv") # for new Flower +except FileNotFoundError: + df = pd.read_csv("./data/client.csv") # for legacy Flower column_names = ["sepal length (cm)", "sepal width (cm)"] @@ -16,7 +20,7 @@ def compute_hist(df: pd.DataFrame, col_name: str) -> np.ndarray: # Define Flower client -class FlowerClient(fl.client.NumPyClient): +class FlowerClient(NumPyClient): def fit( self, parameters: List[np.ndarray], config: Dict[str, str] ) -> Tuple[List[np.ndarray], int, Dict]: @@ -32,17 +36,17 @@ def fit( ) -def client_fn(cid): +def client_fn(context: Context): return FlowerClient().to_client() -app = fl.client.ClientApp( +app = ClientApp( client_fn=client_fn, ) if __name__ == "__main__": # Start Flower client - fl.client.start_client( + start_client( server_address="127.0.0.1:8080", client=FlowerClient().to_client(), ) diff --git a/e2e/pandas/server.py b/e2e/e2e-pandas/e2e_pandas/server_app.py similarity index 95% rename from e2e/pandas/server.py rename to e2e/e2e-pandas/e2e_pandas/server_app.py index ef0e92a11ea2..06f3eb68bb28 100644 --- a/e2e/pandas/server.py +++ b/e2e/e2e-pandas/e2e_pandas/server_app.py @@ -1,4 +1,4 @@ -from strategy import FedAnalytics +from e2e_pandas.strategy import FedAnalytics import flwr as fl @@ -9,7 +9,7 @@ def main(driver, context): # Construct the LegacyContext context = fl.server.LegacyContext( - state=context.state, + context=context, config=fl.server.ServerConfig(num_rounds=3), strategy=FedAnalytics(), ) diff --git a/e2e/pandas/strategy.py b/e2e/e2e-pandas/e2e_pandas/strategy.py similarity index 100% rename from e2e/pandas/strategy.py rename to e2e/e2e-pandas/e2e_pandas/strategy.py diff --git a/e2e/e2e-pandas/pyproject.toml b/e2e/e2e-pandas/pyproject.toml new file mode 100644 index 000000000000..f10b05b44756 --- /dev/null +++ b/e2e/e2e-pandas/pyproject.toml @@ -0,0 +1,38 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "e2e-pandas" +version = "1.0.0" +description = "Pandas E2E test with Flower" +license = "Apache-2.0" +authors = [{ name = "Ragy Haddad", email = "ragy202@gmail.com" }] +maintainers = [{ name = "The Flower Authors", email = "hello@flower.ai" }] +dependencies = [ + "flwr[simulation] @ {root:parent:parent:uri}", + "numpy>=1.21.0,<2.0.0", + "pandas>=2.0.0,<3.0.0", + "scikit-learn>=1.1.1,<2.0.0", +] + +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.hatch.metadata] +allow-direct-references = true + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "e2e_pandas.server_app:app" +clientapp = "e2e_pandas.client_app:app" + +[tool.flwr.app.config] + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 10 diff --git a/e2e/pandas/simulation.py b/e2e/e2e-pandas/simulation.py similarity index 84% rename from e2e/pandas/simulation.py rename to e2e/e2e-pandas/simulation.py index 8160fb744229..f34ff34a6a13 100644 --- a/e2e/pandas/simulation.py +++ b/e2e/e2e-pandas/simulation.py @@ -1,5 +1,5 @@ -from client import client_fn -from strategy import FedAnalytics +from e2e_pandas.client_app import client_fn +from e2e_pandas.strategy import FedAnalytics import flwr as fl diff --git a/e2e/pytorch-lightning/README.md b/e2e/e2e-pytorch-lightning/README.md similarity index 100% rename from e2e/pytorch-lightning/README.md rename to e2e/e2e-pytorch-lightning/README.md diff --git a/e2e/e2e-pytorch-lightning/e2e_pytorch_lightning/__init__.py b/e2e/e2e-pytorch-lightning/e2e_pytorch_lightning/__init__.py new file mode 100644 index 000000000000..7e10bceaa6b0 --- /dev/null +++ b/e2e/e2e-pytorch-lightning/e2e_pytorch_lightning/__init__.py @@ -0,0 +1 @@ +"""pytorch_lightning_e2e.""" diff --git a/e2e/pytorch-lightning/client.py b/e2e/e2e-pytorch-lightning/e2e_pytorch_lightning/client_app.py similarity index 87% rename from e2e/pytorch-lightning/client.py rename to e2e/e2e-pytorch-lightning/e2e_pytorch_lightning/client_app.py index fdd55b3dc344..3d2903037e85 100644 --- a/e2e/pytorch-lightning/client.py +++ b/e2e/e2e-pytorch-lightning/e2e_pytorch_lightning/client_app.py @@ -1,13 +1,14 @@ from collections import OrderedDict -import mnist import pytorch_lightning as pl import torch +from e2e_pytorch_lightning import mnist -import flwr as fl +from flwr.client import ClientApp, NumPyClient, start_client +from flwr.common import Context -class FlowerClient(fl.client.NumPyClient): +class FlowerClient(NumPyClient): def __init__(self, model, train_loader, val_loader, test_loader): self.model = model self.train_loader = train_loader @@ -51,7 +52,7 @@ def _set_parameters(model, parameters): model.load_state_dict(state_dict, strict=True) -def client_fn(cid): +def client_fn(context: Context): model = mnist.LitAutoEncoder() train_loader, val_loader, test_loader = mnist.load_data() @@ -59,7 +60,7 @@ def client_fn(cid): return FlowerClient(model, train_loader, val_loader, test_loader).to_client() -app = fl.client.ClientApp( +app = ClientApp( client_fn=client_fn, ) @@ -71,7 +72,7 @@ def main() -> None: # Flower client client = FlowerClient(model, train_loader, val_loader, test_loader).to_client() - fl.client.start_client(server_address="127.0.0.1:8080", client=client) + start_client(server_address="127.0.0.1:8080", client=client) if __name__ == "__main__": diff --git a/e2e/pytorch-lightning/mnist.py b/e2e/e2e-pytorch-lightning/e2e_pytorch_lightning/mnist.py similarity index 94% rename from e2e/pytorch-lightning/mnist.py rename to e2e/e2e-pytorch-lightning/e2e_pytorch_lightning/mnist.py index b23efc50d1e4..977a9ea524e8 100644 --- a/e2e/pytorch-lightning/mnist.py +++ b/e2e/e2e-pytorch-lightning/e2e_pytorch_lightning/mnist.py @@ -62,7 +62,7 @@ def _evaluate(self, batch, stage=None): def load_data(): # Training / validation set trainset = MNIST( - "./data", train=True, download=True, transform=transforms.ToTensor() + "./../data", train=True, download=True, transform=transforms.ToTensor() ) trainset = Subset(trainset, range(1000)) mnist_train, mnist_val = random_split(trainset, [800, 200]) @@ -71,7 +71,7 @@ def load_data(): # Test set testset = MNIST( - "./data", train=False, download=True, transform=transforms.ToTensor() + "./../data", train=False, download=True, transform=transforms.ToTensor() ) testset = Subset(testset, range(10)) test_loader = DataLoader(testset, batch_size=32, shuffle=False, num_workers=0) diff --git a/e2e/e2e-pytorch-lightning/e2e_pytorch_lightning/server_app.py b/e2e/e2e-pytorch-lightning/e2e_pytorch_lightning/server_app.py new file mode 100644 index 000000000000..cb4f65eed0da --- /dev/null +++ b/e2e/e2e-pytorch-lightning/e2e_pytorch_lightning/server_app.py @@ -0,0 +1,79 @@ +import numpy as np + +import flwr as fl + +STATE_VAR = "timestamp" + + +# Define metric aggregation function +def record_state_metrics(metrics): + """Ensure that timestamps are monotonically increasing.""" + if not metrics: + return {} + + if STATE_VAR not in metrics[0][1]: + # Do nothing if keyword is not present + return {} + + states = [] + for _, m in metrics: + # split string and covert timestamps to float + states.append([float(tt) for tt in m[STATE_VAR].split(",")]) + + for client_state in states: + if len(client_state) == 1: + continue + deltas = np.diff(client_state) + assert np.all( + deltas > 0 + ), f"Timestamps are not monotonically increasing: {client_state}" + + return {STATE_VAR: states} + + +app = fl.server.ServerApp() + + +@app.main() +def main(driver, context): + # Construct the LegacyContext + context = fl.server.LegacyContext( + context=context, + config=fl.server.ServerConfig(num_rounds=3), + ) + + # Create the workflow + workflow = fl.server.workflow.DefaultWorkflow() + + # Execute + workflow(driver, context) + + hist = context.history + assert ( + hist.losses_distributed[-1][1] == 0 + or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 + ) + + +if __name__ == "__main__": + strategy = fl.server.strategy.FedAvg( + evaluate_metrics_aggregation_fn=record_state_metrics + ) + + hist = fl.server.start_server( + server_address="0.0.0.0:8080", + config=fl.server.ServerConfig(num_rounds=3), + strategy=strategy, + ) + + assert ( + hist.losses_distributed[-1][1] == 0 + or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 + ) + + if STATE_VAR in hist.metrics_distributed: + # The checks in record_state_metrics don't do anythinng if client's state has a single entry + state_metrics_last_round = hist.metrics_distributed[STATE_VAR][-1] + assert ( + len(state_metrics_last_round[1][0]) == 2 * state_metrics_last_round[0] + ), "There should be twice as many entries in the client state as rounds" diff --git a/e2e/pytorch-lightning/pyproject.toml b/e2e/e2e-pytorch-lightning/pyproject.toml similarity index 50% rename from e2e/pytorch-lightning/pyproject.toml rename to e2e/e2e-pytorch-lightning/pyproject.toml index 8706ef098d8b..66ecbb6296d0 100644 --- a/e2e/pytorch-lightning/pyproject.toml +++ b/e2e/e2e-pytorch-lightning/pyproject.toml @@ -3,12 +3,10 @@ requires = ["hatchling"] build-backend = "hatchling.build" [project] -name = "quickstart-pytorch-lightning-test" -version = "0.1.0" +name = "e2e-pytorch-lightning" +version = "1.0.0" description = "Federated Learning E2E test with Flower and PyTorch Lightning" -authors = [ - { name = "The Flower Authors", email = "hello@flower.ai" }, -] +license = "Apache-2.0" dependencies = [ "flwr[simulation] @ {root:parent:parent:uri}", "pytorch-lightning==2.2.4", @@ -20,3 +18,18 @@ packages = ["."] [tool.hatch.metadata] allow-direct-references = true + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "e2e_pytorch_lightning.server_app:app" +clientapp = "e2e_pytorch_lightning.client_app:app" + +[tool.flwr.app.config] + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 10 diff --git a/e2e/e2e-pytorch-lightning/simulation.py b/e2e/e2e-pytorch-lightning/simulation.py new file mode 100644 index 000000000000..882498f7bba4 --- /dev/null +++ b/e2e/e2e-pytorch-lightning/simulation.py @@ -0,0 +1,14 @@ +from e2e_pytorch_lightning.client_app import client_fn + +import flwr as fl + +hist = fl.simulation.start_simulation( + client_fn=client_fn, + num_clients=2, + config=fl.server.ServerConfig(num_rounds=3), +) + +assert ( + hist.losses_distributed[-1][1] == 0 + or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 +) diff --git a/e2e/pytorch/README.md b/e2e/e2e-pytorch/README.md similarity index 100% rename from e2e/pytorch/README.md rename to e2e/e2e-pytorch/README.md diff --git a/e2e/e2e-pytorch/e2e_pytorch/__init__.py b/e2e/e2e-pytorch/e2e_pytorch/__init__.py new file mode 100644 index 000000000000..96bfb84c0c09 --- /dev/null +++ b/e2e/e2e-pytorch/e2e_pytorch/__init__.py @@ -0,0 +1 @@ +"""pytorch_e2e.""" diff --git a/e2e/pytorch/client.py b/e2e/e2e-pytorch/e2e_pytorch/client_app.py similarity index 92% rename from e2e/pytorch/client.py rename to e2e/e2e-pytorch/e2e_pytorch/client_app.py index dbfbfed1ffa7..988cd774018d 100644 --- a/e2e/pytorch/client.py +++ b/e2e/e2e-pytorch/e2e_pytorch/client_app.py @@ -10,8 +10,8 @@ from torchvision.transforms import Compose, Normalize, ToTensor from tqdm import tqdm -import flwr as fl -from flwr.common import ConfigsRecord +from flwr.client import ClientApp, NumPyClient, start_client +from flwr.common import ConfigsRecord, Context # ############################################################################# # 1. Regular PyTorch pipeline: nn.Module, train, test, and DataLoader @@ -72,8 +72,8 @@ def test(net, testloader): def load_data(): """Load CIFAR-10 (training and test set).""" trf = Compose([ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) - trainset = CIFAR10("./data", train=True, download=True, transform=trf) - testset = CIFAR10("./data", train=False, download=True, transform=trf) + trainset = CIFAR10("./../data", train=True, download=True, transform=trf) + testset = CIFAR10("./../data", train=False, download=True, transform=trf) trainset = Subset(trainset, range(SUBSET_SIZE)) testset = Subset(testset, range(10)) return DataLoader(trainset, batch_size=32, shuffle=True), DataLoader(testset) @@ -89,7 +89,7 @@ def load_data(): # Define Flower client -class FlowerClient(fl.client.NumPyClient): +class FlowerClient(NumPyClient): def get_parameters(self, config): return [val.cpu().numpy() for _, val in net.state_dict().items()] @@ -136,18 +136,18 @@ def set_parameters(model, parameters): return -def client_fn(cid): +def client_fn(context: Context): return FlowerClient().to_client() -app = fl.client.ClientApp( +app = ClientApp( client_fn=client_fn, ) if __name__ == "__main__": # Start Flower client - fl.client.start_client( + start_client( server_address="127.0.0.1:8080", client=FlowerClient().to_client(), ) diff --git a/e2e/e2e-pytorch/e2e_pytorch/server_app.py b/e2e/e2e-pytorch/e2e_pytorch/server_app.py new file mode 100644 index 000000000000..cb4f65eed0da --- /dev/null +++ b/e2e/e2e-pytorch/e2e_pytorch/server_app.py @@ -0,0 +1,79 @@ +import numpy as np + +import flwr as fl + +STATE_VAR = "timestamp" + + +# Define metric aggregation function +def record_state_metrics(metrics): + """Ensure that timestamps are monotonically increasing.""" + if not metrics: + return {} + + if STATE_VAR not in metrics[0][1]: + # Do nothing if keyword is not present + return {} + + states = [] + for _, m in metrics: + # split string and covert timestamps to float + states.append([float(tt) for tt in m[STATE_VAR].split(",")]) + + for client_state in states: + if len(client_state) == 1: + continue + deltas = np.diff(client_state) + assert np.all( + deltas > 0 + ), f"Timestamps are not monotonically increasing: {client_state}" + + return {STATE_VAR: states} + + +app = fl.server.ServerApp() + + +@app.main() +def main(driver, context): + # Construct the LegacyContext + context = fl.server.LegacyContext( + context=context, + config=fl.server.ServerConfig(num_rounds=3), + ) + + # Create the workflow + workflow = fl.server.workflow.DefaultWorkflow() + + # Execute + workflow(driver, context) + + hist = context.history + assert ( + hist.losses_distributed[-1][1] == 0 + or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 + ) + + +if __name__ == "__main__": + strategy = fl.server.strategy.FedAvg( + evaluate_metrics_aggregation_fn=record_state_metrics + ) + + hist = fl.server.start_server( + server_address="0.0.0.0:8080", + config=fl.server.ServerConfig(num_rounds=3), + strategy=strategy, + ) + + assert ( + hist.losses_distributed[-1][1] == 0 + or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 + ) + + if STATE_VAR in hist.metrics_distributed: + # The checks in record_state_metrics don't do anythinng if client's state has a single entry + state_metrics_last_round = hist.metrics_distributed[STATE_VAR][-1] + assert ( + len(state_metrics_last_round[1][0]) == 2 * state_metrics_last_round[0] + ), "There should be twice as many entries in the client state as rounds" diff --git a/e2e/pytorch/pyproject.toml b/e2e/e2e-pytorch/pyproject.toml similarity index 53% rename from e2e/pytorch/pyproject.toml rename to e2e/e2e-pytorch/pyproject.toml index 8c59c43d50df..0e48334693d3 100644 --- a/e2e/pytorch/pyproject.toml +++ b/e2e/e2e-pytorch/pyproject.toml @@ -3,12 +3,10 @@ requires = ["hatchling"] build-backend = "hatchling.build" [project] -name = "pytorch_e2e" -version = "0.1.0" +name = "e2e-pytorch" +version = "1.0.0" description = "PyTorch Federated Learning E2E test with Flower" -authors = [ - { name = "The Flower Authors", email = "hello@flower.ai" }, -] +license = "Apache-2.0" dependencies = [ "flwr[simulation] @ {root:parent:parent:uri}", "torch>=1.12.0,<2.0.0", @@ -21,3 +19,18 @@ packages = ["."] [tool.hatch.metadata] allow-direct-references = true + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "e2e_pytorch.server_app:app" +clientapp = "e2e_pytorch.client_app:app" + +[tool.flwr.app.config] + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 10 diff --git a/e2e/bare/simulation.py b/e2e/e2e-pytorch/simulation.py similarity index 96% rename from e2e/bare/simulation.py rename to e2e/e2e-pytorch/simulation.py index 25868eb8e33f..c465fbc4816e 100644 --- a/e2e/bare/simulation.py +++ b/e2e/e2e-pytorch/simulation.py @@ -1,7 +1,7 @@ from typing import List, Tuple import numpy as np -from client import client_fn +from e2e_pytorch.client_app import client_fn import flwr as fl from flwr.common import Metrics diff --git a/e2e/pytorch/simulation_next.py b/e2e/e2e-pytorch/simulation_next.py similarity index 82% rename from e2e/pytorch/simulation_next.py rename to e2e/e2e-pytorch/simulation_next.py index ba1719dfb75b..01692a9e0850 100644 --- a/e2e/pytorch/simulation_next.py +++ b/e2e/e2e-pytorch/simulation_next.py @@ -1,4 +1,4 @@ -from client import app as client_app +from e2e_pytorch.client_app import app as client_app import flwr as fl diff --git a/e2e/scikit-learn/README.md b/e2e/e2e-scikit-learn/README.md similarity index 71% rename from e2e/scikit-learn/README.md rename to e2e/e2e-scikit-learn/README.md index a3f4f74f8b0c..d880a113b9f4 100644 --- a/e2e/scikit-learn/README.md +++ b/e2e/e2e-scikit-learn/README.md @@ -2,4 +2,4 @@ This directory is used for testing Flower with scikit-learn by using a simple logistic regression task. -It uses the `FedAvg` strategy with central evaluation. \ No newline at end of file +It uses the `FedAvg` strategy with central evaluation. diff --git a/e2e/e2e-scikit-learn/e2e_scikit_learn/__init__.py b/e2e/e2e-scikit-learn/e2e_scikit_learn/__init__.py new file mode 100644 index 000000000000..3e9806ba3387 --- /dev/null +++ b/e2e/e2e-scikit-learn/e2e_scikit_learn/__init__.py @@ -0,0 +1 @@ +"""scikit_learn_e2e.""" diff --git a/e2e/scikit-learn/client.py b/e2e/e2e-scikit-learn/e2e_scikit_learn/client_app.py similarity index 84% rename from e2e/scikit-learn/client.py rename to e2e/e2e-scikit-learn/e2e_scikit_learn/client_app.py index b0691e75a79d..ae00c240c9ba 100644 --- a/e2e/scikit-learn/client.py +++ b/e2e/e2e-scikit-learn/e2e_scikit_learn/client_app.py @@ -1,11 +1,12 @@ import warnings import numpy as np -import utils +from e2e_scikit_learn import utils from sklearn.linear_model import LogisticRegression from sklearn.metrics import log_loss -import flwr as fl +from flwr.client import ClientApp, NumPyClient, start_client +from flwr.common import Context # Load MNIST dataset from https://www.openml.org/d/554 (X_train, y_train), (X_test, y_test) = utils.load_mnist() @@ -26,7 +27,7 @@ # Define Flower client -class FlowerClient(fl.client.NumPyClient): +class FlowerClient(NumPyClient): def get_parameters(self, config): # type: ignore return utils.get_model_parameters(model) @@ -45,16 +46,14 @@ def evaluate(self, parameters, config): # type: ignore return loss, len(X_test), {"accuracy": accuracy} -def client_fn(cid): +def client_fn(context: Context): return FlowerClient().to_client() -app = fl.client.ClientApp( +app = ClientApp( client_fn=client_fn, ) if __name__ == "__main__": # Start Flower client - fl.client.start_client( - server_address="0.0.0.0:8080", client=FlowerClient().to_client() - ) + start_client(server_address="0.0.0.0:8080", client=FlowerClient().to_client()) diff --git a/e2e/e2e-scikit-learn/e2e_scikit_learn/server_app.py b/e2e/e2e-scikit-learn/e2e_scikit_learn/server_app.py new file mode 100644 index 000000000000..cb4f65eed0da --- /dev/null +++ b/e2e/e2e-scikit-learn/e2e_scikit_learn/server_app.py @@ -0,0 +1,79 @@ +import numpy as np + +import flwr as fl + +STATE_VAR = "timestamp" + + +# Define metric aggregation function +def record_state_metrics(metrics): + """Ensure that timestamps are monotonically increasing.""" + if not metrics: + return {} + + if STATE_VAR not in metrics[0][1]: + # Do nothing if keyword is not present + return {} + + states = [] + for _, m in metrics: + # split string and covert timestamps to float + states.append([float(tt) for tt in m[STATE_VAR].split(",")]) + + for client_state in states: + if len(client_state) == 1: + continue + deltas = np.diff(client_state) + assert np.all( + deltas > 0 + ), f"Timestamps are not monotonically increasing: {client_state}" + + return {STATE_VAR: states} + + +app = fl.server.ServerApp() + + +@app.main() +def main(driver, context): + # Construct the LegacyContext + context = fl.server.LegacyContext( + context=context, + config=fl.server.ServerConfig(num_rounds=3), + ) + + # Create the workflow + workflow = fl.server.workflow.DefaultWorkflow() + + # Execute + workflow(driver, context) + + hist = context.history + assert ( + hist.losses_distributed[-1][1] == 0 + or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 + ) + + +if __name__ == "__main__": + strategy = fl.server.strategy.FedAvg( + evaluate_metrics_aggregation_fn=record_state_metrics + ) + + hist = fl.server.start_server( + server_address="0.0.0.0:8080", + config=fl.server.ServerConfig(num_rounds=3), + strategy=strategy, + ) + + assert ( + hist.losses_distributed[-1][1] == 0 + or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 + ) + + if STATE_VAR in hist.metrics_distributed: + # The checks in record_state_metrics don't do anythinng if client's state has a single entry + state_metrics_last_round = hist.metrics_distributed[STATE_VAR][-1] + assert ( + len(state_metrics_last_round[1][0]) == 2 * state_metrics_last_round[0] + ), "There should be twice as many entries in the client state as rounds" diff --git a/e2e/scikit-learn/utils.py b/e2e/e2e-scikit-learn/e2e_scikit_learn/utils.py similarity index 100% rename from e2e/scikit-learn/utils.py rename to e2e/e2e-scikit-learn/e2e_scikit_learn/utils.py diff --git a/e2e/scikit-learn/pyproject.toml b/e2e/e2e-scikit-learn/pyproject.toml similarity index 55% rename from e2e/scikit-learn/pyproject.toml rename to e2e/e2e-scikit-learn/pyproject.toml index caba2324d44f..aef9a4a8a00b 100644 --- a/e2e/scikit-learn/pyproject.toml +++ b/e2e/e2e-scikit-learn/pyproject.toml @@ -3,17 +3,18 @@ requires = ["hatchling"] build-backend = "hatchling.build" [project] -name = "sklearn-mnist-test" -version = "0.1.0" +name = "e2e-scikit-learn" +version = "1.0.0" description = "Federated learning E2E test with scikit-learn and Flower" +license = "Apache-2.0" authors = [ { name = "The Flower Authors", email = "hello@flower.ai" }, - { name = "Kaushik Amar Das", email = "kaushik.das@iiitg.ac.in"}, + { name = "Kaushik Amar Das", email = "kaushik.das@iiitg.ac.in" }, ] dependencies = [ "flwr[simulation,rest] @ {root:parent:parent:uri}", "scikit-learn>=1.1.1,<2.0.0", - "openml>=0.14.0,<0.15.0" + "openml>=0.14.0,<0.15.0", ] [tool.hatch.build.targets.wheel] @@ -21,3 +22,18 @@ packages = ["."] [tool.hatch.metadata] allow-direct-references = true + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "e2e_scikit_learn.server_app:app" +clientapp = "e2e_scikit_learn.client_app:app" + +[tool.flwr.app.config] + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 10 diff --git a/e2e/e2e-scikit-learn/simulation.py b/e2e/e2e-scikit-learn/simulation.py new file mode 100644 index 000000000000..b0c9a3b58e7a --- /dev/null +++ b/e2e/e2e-scikit-learn/simulation.py @@ -0,0 +1,14 @@ +from e2e_scikit_learn.client_app import client_fn + +import flwr as fl + +hist = fl.simulation.start_simulation( + client_fn=client_fn, + num_clients=2, + config=fl.server.ServerConfig(num_rounds=3), +) + +assert ( + hist.losses_distributed[-1][1] == 0 + or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 +) diff --git a/e2e/e2e-tensorflow/README.md b/e2e/e2e-tensorflow/README.md new file mode 100644 index 000000000000..15b6d8e0718d --- /dev/null +++ b/e2e/e2e-tensorflow/README.md @@ -0,0 +1,5 @@ +# Flower with Tensorflow testing + +This directory is used for testing Flower with Tensorflow by using the CIFAR10 dataset and a CNN. + +It uses a subset of size 1000 for the training data and 10 data points for the testing. diff --git a/e2e/e2e-tensorflow/e2e_tensorflow/__init__.py b/e2e/e2e-tensorflow/e2e_tensorflow/__init__.py new file mode 100644 index 000000000000..4a10173f8bd3 --- /dev/null +++ b/e2e/e2e-tensorflow/e2e_tensorflow/__init__.py @@ -0,0 +1 @@ +"""tensorflow_e2e.""" diff --git a/e2e/tensorflow/client.py b/e2e/e2e-tensorflow/e2e_tensorflow/client_app.py similarity index 81% rename from e2e/tensorflow/client.py rename to e2e/e2e-tensorflow/e2e_tensorflow/client_app.py index 779be0c3746d..351f495a3acb 100644 --- a/e2e/tensorflow/client.py +++ b/e2e/e2e-tensorflow/e2e_tensorflow/client_app.py @@ -2,7 +2,8 @@ import tensorflow as tf -import flwr as fl +from flwr.client import ClientApp, NumPyClient, start_client +from flwr.common import Context SUBSET_SIZE = 1000 @@ -18,7 +19,7 @@ # Define Flower client -class FlowerClient(fl.client.NumPyClient): +class FlowerClient(NumPyClient): def get_parameters(self, config): return model.get_weights() @@ -33,16 +34,14 @@ def evaluate(self, parameters, config): return loss, len(x_test), {"accuracy": accuracy} -def client_fn(cid): +def client_fn(context: Context): return FlowerClient().to_client() -app = fl.client.ClientApp( +app = ClientApp( client_fn=client_fn, ) if __name__ == "__main__": # Start Flower client - fl.client.start_client( - server_address="127.0.0.1:8080", client=FlowerClient().to_client() - ) + start_client(server_address="127.0.0.1:8080", client=FlowerClient().to_client()) diff --git a/e2e/e2e-tensorflow/e2e_tensorflow/server_app.py b/e2e/e2e-tensorflow/e2e_tensorflow/server_app.py new file mode 100644 index 000000000000..cb4f65eed0da --- /dev/null +++ b/e2e/e2e-tensorflow/e2e_tensorflow/server_app.py @@ -0,0 +1,79 @@ +import numpy as np + +import flwr as fl + +STATE_VAR = "timestamp" + + +# Define metric aggregation function +def record_state_metrics(metrics): + """Ensure that timestamps are monotonically increasing.""" + if not metrics: + return {} + + if STATE_VAR not in metrics[0][1]: + # Do nothing if keyword is not present + return {} + + states = [] + for _, m in metrics: + # split string and covert timestamps to float + states.append([float(tt) for tt in m[STATE_VAR].split(",")]) + + for client_state in states: + if len(client_state) == 1: + continue + deltas = np.diff(client_state) + assert np.all( + deltas > 0 + ), f"Timestamps are not monotonically increasing: {client_state}" + + return {STATE_VAR: states} + + +app = fl.server.ServerApp() + + +@app.main() +def main(driver, context): + # Construct the LegacyContext + context = fl.server.LegacyContext( + context=context, + config=fl.server.ServerConfig(num_rounds=3), + ) + + # Create the workflow + workflow = fl.server.workflow.DefaultWorkflow() + + # Execute + workflow(driver, context) + + hist = context.history + assert ( + hist.losses_distributed[-1][1] == 0 + or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 + ) + + +if __name__ == "__main__": + strategy = fl.server.strategy.FedAvg( + evaluate_metrics_aggregation_fn=record_state_metrics + ) + + hist = fl.server.start_server( + server_address="0.0.0.0:8080", + config=fl.server.ServerConfig(num_rounds=3), + strategy=strategy, + ) + + assert ( + hist.losses_distributed[-1][1] == 0 + or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 + ) + + if STATE_VAR in hist.metrics_distributed: + # The checks in record_state_metrics don't do anythinng if client's state has a single entry + state_metrics_last_round = hist.metrics_distributed[STATE_VAR][-1] + assert ( + len(state_metrics_last_round[1][0]) == 2 * state_metrics_last_round[0] + ), "There should be twice as many entries in the client state as rounds" diff --git a/e2e/tensorflow/pyproject.toml b/e2e/e2e-tensorflow/pyproject.toml similarity index 52% rename from e2e/tensorflow/pyproject.toml rename to e2e/e2e-tensorflow/pyproject.toml index 4b035873223c..dd89123944c7 100644 --- a/e2e/tensorflow/pyproject.toml +++ b/e2e/e2e-tensorflow/pyproject.toml @@ -3,12 +3,10 @@ requires = ["hatchling"] build-backend = "hatchling.build" [project] -name = "quickstart-tensorflow-test" -version = "0.1.0" +name = "e2e-tensorflow" +version = "1.0.0" description = "Keras Federated Learning E2E test with Flower" -authors = [ - { name = "The Flower Authors", email = "hello@flower.ai" }, -] +license = "Apache-2.0" dependencies = [ "flwr[simulation] @ {root:parent:parent:uri}", "tensorflow-cpu>=2.9.1,!=2.11.1", @@ -20,3 +18,18 @@ packages = ["."] [tool.hatch.metadata] allow-direct-references = true + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "e2e_tensorflow.server_app:app" +clientapp = "e2e_tensorflow.client_app:app" + +[tool.flwr.app.config] + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 10 diff --git a/e2e/jax/simulation.py b/e2e/e2e-tensorflow/simulation.py similarity index 85% rename from e2e/jax/simulation.py rename to e2e/e2e-tensorflow/simulation.py index bf05a77cf32a..108be2ae10be 100644 --- a/e2e/jax/simulation.py +++ b/e2e/e2e-tensorflow/simulation.py @@ -1,4 +1,4 @@ -from client import client_fn +from e2e_tensorflow.client_app import client_fn import flwr as fl diff --git a/e2e/tensorflow/simulation_next.py b/e2e/e2e-tensorflow/simulation_next.py similarity index 81% rename from e2e/tensorflow/simulation_next.py rename to e2e/e2e-tensorflow/simulation_next.py index ba1719dfb75b..0fd75d8cd0ec 100644 --- a/e2e/tensorflow/simulation_next.py +++ b/e2e/e2e-tensorflow/simulation_next.py @@ -1,4 +1,4 @@ -from client import app as client_app +from e2e_tensorflow.client_app import app as client_app import flwr as fl diff --git a/e2e/pandas/pyproject.toml b/e2e/pandas/pyproject.toml deleted file mode 100644 index f8f8488a7006..000000000000 --- a/e2e/pandas/pyproject.toml +++ /dev/null @@ -1,26 +0,0 @@ -[build-system] -requires = ["hatchling"] -build-backend = "hatchling.build" - -[project] -name = "quickstart-pandas" -version = "0.1.0" -description = "Pandas E2E test with Flower" -authors = [ - { name = "Ragy Haddad", email = "ragy202@gmail.com" }, -] -maintainers = [ - { name = "The Flower Authors", email = "hello@flower.ai" }, -] -dependencies = [ - "flwr[simulation] @ {root:parent:parent:uri}", - "numpy>=1.21.0,<2.0.0", - "pandas>=2.0.0,<3.0.0", - "scikit-learn>=1.1.1,<2.0.0", -] - -[tool.hatch.build.targets.wheel] -packages = ["."] - -[tool.hatch.metadata] -allow-direct-references = true diff --git a/e2e/pyproject.toml b/e2e/pyproject.toml new file mode 100644 index 000000000000..0fc4c77f44b2 --- /dev/null +++ b/e2e/pyproject.toml @@ -0,0 +1,28 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "e2e" +version = "1.0.0" +description = "Project configuration for ServerApp in E2E tests." +license = "Apache-2.0" +dependencies = [] + +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "server:app" +clientapp = "" + +[tool.flwr.app.config] + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 10 diff --git a/e2e/scikit-learn/simulation.py b/e2e/scikit-learn/simulation.py deleted file mode 100644 index bf05a77cf32a..000000000000 --- a/e2e/scikit-learn/simulation.py +++ /dev/null @@ -1,14 +0,0 @@ -from client import client_fn - -import flwr as fl - -hist = fl.simulation.start_simulation( - client_fn=client_fn, - num_clients=2, - config=fl.server.ServerConfig(num_rounds=3), -) - -assert ( - hist.losses_distributed[-1][1] == 0 - or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 -) diff --git a/e2e/strategies/client.py b/e2e/strategies/client.py index 505340e013a5..0403416cc3b7 100644 --- a/e2e/strategies/client.py +++ b/e2e/strategies/client.py @@ -2,7 +2,8 @@ import tensorflow as tf -import flwr as fl +from flwr.client import ClientApp, NumPyClient, start_client +from flwr.common import Context SUBSET_SIZE = 1000 @@ -33,7 +34,7 @@ def get_model(): # Define Flower client -class FlowerClient(fl.client.NumPyClient): +class FlowerClient(NumPyClient): def get_parameters(self, config): return model.get_weights() @@ -48,17 +49,15 @@ def evaluate(self, parameters, config): return loss, len(x_test), {"accuracy": accuracy} -def client_fn(cid): +def client_fn(context: Context): return FlowerClient().to_client() -app = fl.client.ClientApp( +app = ClientApp( client_fn=client_fn, ) if __name__ == "__main__": # Start Flower client - fl.client.start_client( - server_address="127.0.0.1:8080", client=FlowerClient().to_client() - ) + start_client(server_address="127.0.0.1:8080", client=FlowerClient().to_client()) diff --git a/e2e/strategies/pyproject.toml b/e2e/strategies/pyproject.toml index 5cc74b20fa24..3ad62ec836a7 100644 --- a/e2e/strategies/pyproject.toml +++ b/e2e/strategies/pyproject.toml @@ -9,7 +9,7 @@ description = "Keras Federated Learning Quickstart with Flower" authors = ["The Flower Authors "] [tool.poetry.dependencies] -python = ">=3.8,<3.11" +python = ">=3.9,<3.11" flwr = { path = "../../", develop = true, extras = ["simulation"] } tensorflow-cpu = "^2.9.1, !=2.11.1" tensorflow-io-gcs-filesystem = "<0.35.0" diff --git a/e2e/strategies/test.py b/e2e/strategies/test.py index abf9cdb5a5c7..c567f33b236b 100644 --- a/e2e/strategies/test.py +++ b/e2e/strategies/test.py @@ -3,8 +3,8 @@ import tensorflow as tf from client import SUBSET_SIZE, FlowerClient, get_model -import flwr as fl -from flwr.common import ndarrays_to_parameters +from flwr.common import Context, ndarrays_to_parameters +from flwr.server import ServerConfig from flwr.server.strategy import ( FaultTolerantFedAvg, FedAdagrad, @@ -15,6 +15,7 @@ FedYogi, QFedAvg, ) +from flwr.simulation import start_simulation STRATEGY_LIST = [ FedMedian, @@ -42,8 +43,7 @@ def get_strat(name): init_model = get_model() -def client_fn(cid): - _ = cid +def client_fn(context: Context): return FlowerClient() @@ -71,10 +71,10 @@ def evaluate(server_round, parameters, config): if start_idx >= OPT_IDX: strat_args["tau"] = 0.01 -hist = fl.simulation.start_simulation( +hist = start_simulation( client_fn=client_fn, num_clients=2, - config=fl.server.ServerConfig(num_rounds=3), + config=ServerConfig(num_rounds=3), strategy=strategy(**strat_args), ) diff --git a/e2e/tabnet/README.md b/e2e/tabnet/README.md deleted file mode 100644 index 258043c3ffa8..000000000000 --- a/e2e/tabnet/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Flower with Tabnet testing - -This directory is used for testing Flower with Tabnet. - -It uses the `FedAvg` strategy. diff --git a/e2e/tabnet/client.py b/e2e/tabnet/client.py deleted file mode 100644 index 1a7ecfd68f73..000000000000 --- a/e2e/tabnet/client.py +++ /dev/null @@ -1,95 +0,0 @@ -import os - -import tabnet -import tensorflow as tf -import tensorflow_datasets as tfds - -import flwr as fl - -train_size = 125 -BATCH_SIZE = 50 -col_names = ["sepal_length", "sepal_width", "petal_length", "petal_width"] - - -def transform(ds): - features = tf.unstack(ds["features"]) - labels = ds["label"] - - x = dict(zip(col_names, features)) - y = tf.one_hot(labels, 3) - return x, y - - -def prepare_iris_dataset(): - ds_full = tfds.load(name="iris", split=tfds.Split.TRAIN) - ds_full = ds_full.shuffle(150, seed=0) - - ds_train = ds_full.take(train_size) - ds_train = ds_train.map(transform) - ds_train = ds_train.batch(BATCH_SIZE) - - ds_test = ds_full.skip(train_size) - ds_test = ds_test.map(transform) - ds_test = ds_test.batch(BATCH_SIZE) - - feature_columns = [] - for col_name in col_names: - feature_columns.append(tf.feature_column.numeric_column(col_name)) - - return ds_train, ds_test, feature_columns - - -ds_train, ds_test, feature_columns = prepare_iris_dataset() -# Make TensorFlow log less verbose -os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" - -# Load TabNet model -model = tabnet.TabNetClassifier( - feature_columns, - num_classes=3, - feature_dim=8, - output_dim=4, - num_decision_steps=4, - relaxation_factor=1.0, - sparsity_coefficient=1e-5, - batch_momentum=0.98, - virtual_batch_size=None, - norm_type="group", - num_groups=1, -) -lr = tf.keras.optimizers.schedules.ExponentialDecay( - 0.01, decay_steps=100, decay_rate=0.9, staircase=False -) -optimizer = tf.keras.optimizers.Adam(lr) -model.compile(optimizer, loss="categorical_crossentropy", metrics=["accuracy"]) - - -# Define Flower client -class FlowerClient(fl.client.NumPyClient): - def get_parameters(self, config): - return model.get_weights() - - def fit(self, parameters, config): - model.set_weights(parameters) - model.fit(ds_train, epochs=25) - return model.get_weights(), len(ds_train), {} - - def evaluate(self, parameters, config): - model.set_weights(parameters) - loss, accuracy = model.evaluate(ds_test) - return loss, len(ds_train), {"accuracy": accuracy} - - -def client_fn(cid): - return FlowerClient().to_client() - - -app = fl.client.ClientApp( - client_fn=client_fn, -) - -if __name__ == "__main__": - # Start Flower client - fl.client.start_client( - server_address="127.0.0.1:8080", client=FlowerClient().to_client() - ) diff --git a/e2e/tabnet/pyproject.toml b/e2e/tabnet/pyproject.toml deleted file mode 100644 index 99379ddb607e..000000000000 --- a/e2e/tabnet/pyproject.toml +++ /dev/null @@ -1,25 +0,0 @@ -[build-system] -requires = ["hatchling"] -build-backend = "hatchling.build" - -[project] -name = "quickstart-tabnet-test" -version = "0.1.0" -description = "Tabnet Federated Learning E2E test with Flower" -authors = [ - { name = "The Flower Authors", email = "hello@flower.ai" }, -] -dependencies = [ - "flwr[simulation] @ {root:parent:parent:uri}", - "tensorflow-cpu>=2.9.1,!=2.11.1; platform_machine == \"x86_64\"", - "tensorflow-macos>=2.9.1,!=2.11.1; sys_platform == \"darwin\" and platform_machine == \"arm64\"", - "tensorflow_datasets==4.9.2", - "tensorflow-io-gcs-filesystem<0.35.0", - "tabnet==0.1.6", -] - -[tool.hatch.build.targets.wheel] -packages = ["."] - -[tool.hatch.metadata] -allow-direct-references = true diff --git a/e2e/tabnet/simulation.py b/e2e/tabnet/simulation.py deleted file mode 100644 index bf05a77cf32a..000000000000 --- a/e2e/tabnet/simulation.py +++ /dev/null @@ -1,14 +0,0 @@ -from client import client_fn - -import flwr as fl - -hist = fl.simulation.start_simulation( - client_fn=client_fn, - num_clients=2, - config=fl.server.ServerConfig(num_rounds=3), -) - -assert ( - hist.losses_distributed[-1][1] == 0 - or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 -) diff --git a/e2e/tensorflow/README.md b/e2e/tensorflow/README.md deleted file mode 100644 index 7d8a245a3fb0..000000000000 --- a/e2e/tensorflow/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Flower with PyTorch testing - -This directory is used for testing Flower with PyTorch by using the CIFAR10 dataset and a CNN. - -It uses a subset of size 1000 for the training data and 10 data points for the testing. diff --git a/e2e/tensorflow/simulation.py b/e2e/tensorflow/simulation.py deleted file mode 100644 index bf05a77cf32a..000000000000 --- a/e2e/tensorflow/simulation.py +++ /dev/null @@ -1,14 +0,0 @@ -from client import client_fn - -import flwr as fl - -hist = fl.simulation.start_simulation( - client_fn=client_fn, - num_clients=2, - config=fl.server.ServerConfig(num_rounds=3), -) - -assert ( - hist.losses_distributed[-1][1] == 0 - or (hist.losses_distributed[0][1] / hist.losses_distributed[-1][1]) >= 0.98 -) diff --git a/e2e/test.sh b/e2e/test_legacy.sh similarity index 51% rename from e2e/test.sh rename to e2e/test_legacy.sh index 4ea17a4f994b..b336ee0cb717 100755 --- a/e2e/test.sh +++ b/e2e/test_legacy.sh @@ -1,28 +1,19 @@ #!/bin/bash set -e -case "$1" in - pandas) - server_file="server.py" - ;; - bare-https) - ./generate.sh - server_file="server.py" - ;; - *) - server_file="../server.py" - ;; -esac +if [ "$1" = "e2e-bare-https" ]; then + ./../generate.sh +fi # run the first command in background and save output to a temporary file: -timeout 2m python $server_file & +timeout 2m python server_app.py & pid=$! sleep 3 -python client.py & +python client_app.py & sleep 3 -python client.py & +python client_app.py & sleep 3 wait $pid diff --git a/e2e/test_reconnection.sh b/e2e/test_reconnection.sh new file mode 100755 index 000000000000..80788b92ebde --- /dev/null +++ b/e2e/test_reconnection.sh @@ -0,0 +1,88 @@ +#!/bin/bash +set -e + +case "$1" in + rest) + rest_arg="--rest" + server_app_address="http://localhost:9091" + server_address="http://localhost:9093" + db_arg="--database :flwr-in-memory-state:" + ;; + sqlite) + rest_arg="" + server_address="127.0.0.1:9092" + server_app_address="127.0.0.1:9091" + db_arg="--database $(date +%s).db" + ;; + *) + rest_arg="" + server_address="127.0.0.1:9092" + server_app_address="127.0.0.1:9091" + db_arg="--database :flwr-in-memory-state:" + ;; +esac + +dir_arg="./.." + +timeout 2m flower-superlink --insecure $db_arg $rest_arg & +sl_pid=$! +echo "Starting SuperLink" +sleep 3 + +timeout 2m flower-supernode ./ --insecure $rest_arg --superlink $server_address & +cl1_pid=$! +echo "Starting first client" +sleep 3 + +timeout 2m flower-supernode ./ --insecure $rest_arg --superlink $server_address & +cl2_pid=$! +echo "Starting second client" +sleep 3 + +# Kill superlink, this should send the clients into their retry loops +kill $sl_pid +echo "Killing Superlink" +sleep 3 + +# Restart superlink, the clients should now be able to reconnect to it +timeout 2m flower-superlink --insecure $db_arg $rest_arg & +sl_pid=$! +echo "Restarting Superlink" +sleep 20 + +# Kill first client, this should send a DeleteNode message to the Superlink +kill $cl1_pid +echo "Killing first client" +sleep 3 + +# Starting new client, this is so we have enough clients to start the server-app +timeout 2m flower-supernode ./ --insecure $rest_arg --superlink $server_address & +cl1_pid=$! +echo "Starting new client" +sleep 5 + +# We start the server-app to begining the training +timeout 2m flower-server-app ./ $rest_arg --superlink $server_app_address & +pid=$! +echo "Starting server-app to start training" + +# Kill first client as soon as the training starts, +# the server-app should just receive a failure in this case and continue the rounds +# when enough clients are connected +kill $cl1_pid +echo "Killing first client" +sleep 1 + +# Restart first client so enough clients are connected to continue the FL rounds +timeout 2m flower-supernode ./ --insecure $rest_arg --superlink $server_address & +cl1_pid=$! +echo "Starting new client" + +wait $pid +res=$? + +if [[ "$res" = "0" ]]; + then echo "Training worked correctly"; kill $cl1_pid; kill $cl2_pid; kill $sl_pid; + else echo "Training had an issue" && exit 1; +fi + diff --git a/e2e/test_superexec.sh b/e2e/test_superexec.sh new file mode 100755 index 000000000000..ae79128c6ac1 --- /dev/null +++ b/e2e/test_superexec.sh @@ -0,0 +1,122 @@ +#!/bin/bash +set -e + +# Set connectivity parameters +case "$1" in + secure) + ./generate.sh + server_arg='--ssl-ca-certfile ../certificates/ca.crt + --ssl-certfile ../certificates/server.pem + --ssl-keyfile ../certificates/server.key' + client_arg='--root-certificates ../certificates/ca.crt' + # For $superexec_arg, note special ordering of single- and double-quotes + superexec_arg='--executor-config 'root-certificates=\"../certificates/ca.crt\"'' + superexec_arg="$server_arg $superexec_arg" + ;; + insecure) + server_arg='--insecure' + client_arg=$server_arg + superexec_arg=$server_arg + ;; +esac + +# Set authentication parameters +case "$2" in + client-auth) + server_auth='--auth-list-public-keys ../keys/client_public_keys.csv + --auth-superlink-private-key ../keys/server_credentials + --auth-superlink-public-key ../keys/server_credentials.pub' + client_auth_1='--auth-supernode-private-key ../keys/client_credentials_1 + --auth-supernode-public-key ../keys/client_credentials_1.pub' + client_auth_2='--auth-supernode-private-key ../keys/client_credentials_2 + --auth-supernode-public-key ../keys/client_credentials_2.pub' + server_address='127.0.0.1:9092' + ;; + *) + server_auth='' + client_auth_1='' + client_auth_2='' + server_address='127.0.0.1:9092' + ;; +esac + +# Set engine +case "$3" in + deployment-engine) + superexec_engine_arg='--executor flwr.superexec.deployment:executor' + ;; + simulation-engine) + superexec_engine_arg='--executor flwr.superexec.simulation:executor + --executor-config 'num-supernodes=10'' + ;; +esac + + +# Create and install Flower app +flwr new e2e-tmp-test --framework numpy --username flwrlabs +cd e2e-tmp-test +# Remove flwr dependency from `pyproject.toml`. Seems necessary so that it does +# not override the wheel dependency +if [[ "$OSTYPE" == "darwin"* ]]; then + # macOS (Darwin) system + sed -i '' '/flwr\[simulation\]/d' pyproject.toml +else + # Non-macOS system (Linux) + sed -i '/flwr\[simulation\]/d' pyproject.toml +fi +pip install -e . --no-deps + +# Check if the first argument is 'insecure' +if [ "$1" == "insecure" ]; then + # If $1 is 'insecure', append the first line + echo -e $"\n[tool.flwr.federations.superexec]\naddress = \"127.0.0.1:9093\"\ninsecure = true" >> pyproject.toml +else + # Otherwise, append the second line + echo -e $"\n[tool.flwr.federations.superexec]\naddress = \"127.0.0.1:9093\"\nroot-certificates = \"../certificates/ca.crt\"" >> pyproject.toml +fi + +timeout 2m flower-superlink $server_arg $server_auth & +sl_pid=$! +sleep 2 + +timeout 2m flower-supernode ./ $client_arg \ + --superlink $server_address $client_auth_1 \ + --node-config "partition-id=0 num-partitions=2" --max-retries 0 & +cl1_pid=$! +sleep 2 + +timeout 2m flower-supernode ./ $client_arg \ + --superlink $server_address $client_auth_2 \ + --node-config "partition-id=1 num-partitions=2" --max-retries 0 & +cl2_pid=$! +sleep 2 + +timeout 2m flower-superexec $superexec_arg $superexec_engine_arg 2>&1 | tee flwr_output.log & +se_pid=$(pgrep -f "flower-superexec") +sleep 2 + +timeout 1m flwr run --run-config num-server-rounds=1 ../e2e-tmp-test superexec + +# Initialize a flag to track if training is successful +found_success=false +timeout=120 # Timeout after 120 seconds +elapsed=0 + +# Check for "Success" in a loop with a timeout +while [ "$found_success" = false ] && [ $elapsed -lt $timeout ]; do + if grep -q "Run finished" flwr_output.log; then + echo "Training worked correctly!" + found_success=true + kill $cl1_pid; kill $cl2_pid; sleep 1; kill $sl_pid; kill $se_pid; + else + echo "Waiting for training ... ($elapsed seconds elapsed)" + fi + # Sleep for a short period and increment the elapsed time + sleep 2 + elapsed=$((elapsed + 2)) +done + +if [ "$found_success" = false ]; then + echo "Training had an issue and timed out." + kill $cl1_pid; kill $cl2_pid; kill $sl_pid; kill $se_pid; +fi diff --git a/e2e/test_driver.sh b/e2e/test_superlink.sh similarity index 70% rename from e2e/test_driver.sh rename to e2e/test_superlink.sh index 7a50764ab0ca..2016f6da1933 100755 --- a/e2e/test_driver.sh +++ b/e2e/test_superlink.sh @@ -2,12 +2,7 @@ set -e case "$1" in - pandas) - server_arg="--insecure" - client_arg="--insecure" - server_dir="./" - ;; - bare-https) + e2e-bare-https | e2e-bare-auth) ./generate.sh server_arg="--ssl-ca-certfile certificates/ca.crt --ssl-certfile certificates/server.pem --ssl-keyfile certificates/server.key" client_arg="--root-certificates certificates/ca.crt" @@ -16,13 +11,14 @@ case "$1" in *) server_arg="--insecure" client_arg="--insecure" - server_dir="./.." + server_dir="./" ;; esac case "$2" in rest) - rest_arg="--rest" + rest_arg_superlink="--fleet-api-type rest" + rest_arg_supernode="--rest" server_address="http://localhost:9093" server_app_address="127.0.0.1:9091" db_arg="--database :flwr-in-memory-state:" @@ -31,7 +27,8 @@ case "$2" in client_auth_2="" ;; sqlite) - rest_arg="" + rest_arg_superlink="" + rest_arg_supernode="" server_address="127.0.0.1:9092" server_app_address="127.0.0.1:9091" db_arg="--database $(date +%s).db" @@ -40,19 +37,18 @@ case "$2" in client_auth_2="" ;; client-auth) - ./generate.sh - rest_arg="" + rest_arg_superlink="" + rest_arg_supernode="" server_address="127.0.0.1:9092" server_app_address="127.0.0.1:9091" db_arg="--database :flwr-in-memory-state:" - server_arg="--ssl-ca-certfile certificates/ca.crt --ssl-certfile certificates/server.pem --ssl-keyfile certificates/server.key" - client_arg="--root-certificates certificates/ca.crt" server_auth="--auth-list-public-keys keys/client_public_keys.csv --auth-superlink-private-key keys/server_credentials --auth-superlink-public-key keys/server_credentials.pub" client_auth_1="--auth-supernode-private-key keys/client_credentials_1 --auth-supernode-public-key keys/client_credentials_1.pub" client_auth_2="--auth-supernode-private-key keys/client_credentials_2 --auth-supernode-public-key keys/client_credentials_2.pub" ;; *) - rest_arg="" + rest_arg_superlink="" + rest_arg_supernode="" server_address="127.0.0.1:9092" server_app_address="127.0.0.1:9091" db_arg="--database :flwr-in-memory-state:" @@ -62,19 +58,19 @@ case "$2" in ;; esac -timeout 2m flower-superlink $server_arg $db_arg $rest_arg $server_auth & +timeout 2m flower-superlink $server_arg $db_arg $rest_arg_superlink $server_auth & sl_pid=$! sleep 3 -timeout 2m flower-client-app client:app $client_arg $rest_arg --server $server_address $client_auth_1 & +timeout 2m flower-supernode ./ $client_arg $rest_arg_supernode --superlink $server_address $client_auth_1 & cl1_pid=$! sleep 3 -timeout 2m flower-client-app client:app $client_arg $rest_arg --server $server_address $client_auth_2 & +timeout 2m flower-supernode ./ $client_arg $rest_arg_supernode --superlink $server_address $client_auth_2 & cl2_pid=$! sleep 3 -timeout 2m flower-server-app server:app $client_arg --dir $server_dir --server $server_app_address & +timeout 2m flower-server-app $server_dir $client_arg --superlink $server_app_address & pid=$! wait $pid diff --git a/examples/advanced-pytorch/.gitignore b/examples/advanced-pytorch/.gitignore new file mode 100644 index 000000000000..014ee796bf45 --- /dev/null +++ b/examples/advanced-pytorch/.gitignore @@ -0,0 +1,3 @@ +__pycache__/ +outputs/ +wandb/ diff --git a/examples/advanced-pytorch/README.md b/examples/advanced-pytorch/README.md index c1ba85b95879..1771173c3925 100644 --- a/examples/advanced-pytorch/README.md +++ b/examples/advanced-pytorch/README.md @@ -1,71 +1,90 @@ -# Advanced Flower Example (PyTorch) +--- +tags: [advanced, vision, fds, wandb] +dataset: [Fashion-MNIST] +framework: [torch, torchvision] +--- -This example demonstrates an advanced federated learning setup using Flower with PyTorch. This example uses [Flower Datasets](https://flower.ai/docs/datasets/) and it differs from the quickstart example in the following ways: +# Federated Learning with PyTorch and Flower (Advanced Example) -- 10 clients (instead of just 2) -- Each client holds a local dataset of 5000 training examples and 1000 test examples (note that using the `run.sh` script will only select 10 data samples by default, as the `--toy` argument is set). -- Server-side model evaluation after parameter aggregation -- Hyperparameter schedule using config functions -- Custom return values -- Server-side parameter initialization +> \[!TIP\] +> This example shows intermediate and advanced functionality of Flower. It you are new to Flower, it is recommended to start from the [quickstart-pytorch](https://github.com/adap/flower/tree/main/examples/quickstart-pytorch) example or the [quickstart PyTorch tutorial](https://flower.ai/docs/framework/tutorial-quickstart-pytorch.html). -## Project Setup +This example shows how to extend your `ClientApp` and `ServerApp` capabilities compared to what's shown in the [`quickstart-pytorch`](https://github.com/adap/flower/tree/main/examples/quickstart-pytorch) example. In particular, it will show how the `ClientApp`'s state (and object of type [RecordSet](https://flower.ai/docs/framework/ref-api/flwr.common.RecordSet.html)) can be used to enable stateful clients, facilitating the design of personalized federated learning strategies, among others. The `ServerApp` in this example makes use of a custom strategy derived from the built-in [FedAvg](https://flower.ai/docs/framework/ref-api/flwr.server.strategy.FedAvg.html). In addition, it will also showcase how to: -Start by cloning the example project. We prepared a single-line command that you can copy into your shell which will checkout the example for you: +1. Save model checkpoints +2. Save the metrics available at the strategy (e.g. accuracies, losses) +3. Log training artefacts to [Weights & Biases](https://wandb.ai/site) +4. Implement a simple decaying learning rate schedule across rounds + +The structure of this directory is as follows: ```shell -git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/advanced-pytorch . && rm -rf flower && cd advanced-pytorch +advanced-pytorch +├── pytorch_example +│ ├── __init__.py +│ ├── client_app.py # Defines your ClientApp +│ ├── server_app.py # Defines your ServerApp +│ ├── strategy.py # Defines a custom strategy +│ └── task.py # Defines your model, training and data loading +├── pyproject.toml # Project metadata like dependencies and configs +└── README.md ``` -This will create a new directory called `advanced-pytorch` containing the following files: +> \[!NOTE\] +> By default this example will log metrics to Weights & Biases. For this, you need to ensure that your system has logged in. Often it's as simple as executing `wandb login` on the terminal after installing `wandb`. Please, refer to this [quickstart guide](https://docs.wandb.ai/quickstart#2-log-in-to-wb) for more information. -```shell --- pyproject.toml --- requirements.txt --- client.py --- server.py --- README.md --- run.sh -``` +This examples uses [Flower Datasets](https://flower.ai/docs/datasets/) with the [Dirichlet Partitioner](https://flower.ai/docs/datasets/ref-api/flwr_datasets.partitioner.DirichletPartitioner.html#flwr_datasets.partitioner.DirichletPartitioner) to partition the [Fashion-MNIST](https://huggingface.co/datasets/zalando-datasets/fashion_mnist) dataset in a non-IID fashion into 50 partitions. -### Installing Dependencies +![](_static/fmnist_50_lda.png) -Project dependencies (such as `torch` and `flwr`) are defined in `pyproject.toml` and `requirements.txt`. We recommend [Poetry](https://python-poetry.org/docs/) to install those dependencies and manage your virtual environment ([Poetry installation](https://python-poetry.org/docs/#installation)) or [pip](https://pip.pypa.io/en/latest/development/), but feel free to use a different way of installing dependencies and managing virtual environments if you have other preferences. +> \[!TIP\] +> You can use Flower Datasets [built-in visualization tools](https://flower.ai/docs/datasets/tutorial-visualize-label-distribution.html) to easily generate plots like the one above. -#### Poetry +### Install dependencies and project -```shell -poetry install -poetry shell +Install the dependencies defined in `pyproject.toml` as well as the `pytorch_example` package. + +```bash +pip install -e . ``` -Poetry will install all your dependencies in a newly created virtual environment. To verify that everything works correctly you can run the following command: +## Run the project -```shell -poetry run python3 -c "import flwr" -``` +You can run your Flower project in both _simulation_ and _deployment_ mode without making changes to the code. If you are starting with Flower, we recommend you using the _simulation_ mode as it requires fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine. -If you don't see any errors you're good to go! +When you run the project, the strategy will create a directory structure in the form of `outputs/date/time` and store two `JSON` files: `config.json` containing the `run-config` that the `ServerApp` receives; and `results.json` containing the results (accuracies, losses) that are generated at the strategy. -#### pip +By default, the metrics: {`centralized_accuracy`, `centralized_loss`, `federated_evaluate_accuracy`, `federated_evaluate_loss`} will be logged to Weights & Biases (they are also stored to the `results.json` previously mentioned). Upon executing `flwr run` you'll see a URL linking to your Weight&Biases dashboard wher you can see the metrics. -Write the command below in your terminal to install the dependencies according to the configuration file requirements.txt. +![](_static/wandb_plots.png) -```shell -pip install -r requirements.txt +### Run with the Simulation Engine + +With default parameters, 25% of the total 50 nodes (see `num-supernodes` in `pyproject.toml`) will be sampled for `fit` and 50% for an `evaluate` round. By default `ClientApp` objects will run on CPU. + +> \[!TIP\] +> To run your `ClientApps` on GPU or to adjust the degree or parallelism of your simulation, edit the `[tool.flwr.federations.local-simulation]` section in the `pyproject.tom`. + +```bash +flwr run . + +# To disable W&B +flwr run . --run-config use-wandb=false ``` -## Run Federated Learning with PyTorch and Flower +You can run the app using another federation (see `pyproject.toml`). For example, if you have a GPU available, select the `local-sim-gpu` federation: -The included `run.sh` will start the Flower server (using `server.py`), -sleep for 2 seconds to ensure that the server is up, and then start 10 Flower clients (using `client.py`) with only a small subset of the data (in order to run on any machine), -but this can be changed by removing the `--toy` argument in the script. You can simply start everything in a terminal as follows: +```bash +flwr run . local-sim-gpu +``` -```shell -# After activating your environment -./run.sh +You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: + +```bash +flwr run . --run-config "num-server-rounds=5 fraction-fit=0.5" ``` -The `run.sh` script starts processes in the background so that you don't have to open eleven terminal windows. If you experiment with the code example and something goes wrong, simply using `CTRL + C` on Linux (or `CMD + C` on macOS) wouldn't normally kill all these processes, which is why the script ends with `trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM EXIT` and `wait`. This simply allows you to stop the experiment using `CTRL + C` (or `CMD + C`). If you change the script and anything goes wrong you can still use `killall python` (or `killall python3`) to kill all background processes (or a more specific command if you have other Python processes running that you don't want to kill). +### Run with the Deployment Engine -You can also manually run `python3 server.py` and `python3 client.py --client-id ` for as many clients as you want but you have to make sure that each command is run in a different terminal window (or a different computer on the network). In addition, you can make your clients use either `EfficienNet` (default) or `AlexNet` (but all clients in the experiment should use the same). Switch between models using the `--model` flag when launching `client.py` and `server.py`. +> \[!NOTE\] +> An update to this example will show how to run this Flower application with the Deployment Engine and TLS certificates, or with Docker. diff --git a/examples/advanced-pytorch/_static/fmnist_50_lda.png b/examples/advanced-pytorch/_static/fmnist_50_lda.png new file mode 100644 index 000000000000..9dfedc59a3de Binary files /dev/null and b/examples/advanced-pytorch/_static/fmnist_50_lda.png differ diff --git a/examples/advanced-pytorch/_static/wandb_plots.png b/examples/advanced-pytorch/_static/wandb_plots.png new file mode 100644 index 000000000000..f0f44ca5be19 Binary files /dev/null and b/examples/advanced-pytorch/_static/wandb_plots.png differ diff --git a/examples/advanced-pytorch/client.py b/examples/advanced-pytorch/client.py deleted file mode 100644 index 7c1420a2cecd..000000000000 --- a/examples/advanced-pytorch/client.py +++ /dev/null @@ -1,158 +0,0 @@ -import utils -from torch.utils.data import DataLoader -import torch -import flwr as fl -import argparse -from collections import OrderedDict -import warnings -import datasets - -warnings.filterwarnings("ignore") - - -class CifarClient(fl.client.NumPyClient): - def __init__( - self, - trainset: datasets.Dataset, - testset: datasets.Dataset, - device: torch.device, - model_str: str, - validation_split: int = 0.1, - ): - self.device = device - self.trainset = trainset - self.testset = testset - self.validation_split = validation_split - if model_str == "alexnet": - self.model = utils.load_alexnet(classes=10) - else: - self.model = utils.load_efficientnet(classes=10) - - def set_parameters(self, parameters): - """Loads a alexnet or efficientnet model and replaces it parameters with the - ones given.""" - - params_dict = zip(self.model.state_dict().keys(), parameters) - state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) - self.model.load_state_dict(state_dict, strict=True) - - def fit(self, parameters, config): - """Train parameters on the locally held training set.""" - - # Update local model parameters - self.set_parameters(parameters) - - # Get hyperparameters for this round - batch_size: int = config["batch_size"] - epochs: int = config["local_epochs"] - - train_valid = self.trainset.train_test_split(self.validation_split, seed=42) - trainset = train_valid["train"] - valset = train_valid["test"] - - train_loader = DataLoader(trainset, batch_size=batch_size, shuffle=True) - val_loader = DataLoader(valset, batch_size=batch_size) - - results = utils.train(self.model, train_loader, val_loader, epochs, self.device) - - parameters_prime = utils.get_model_params(self.model) - num_examples_train = len(trainset) - - return parameters_prime, num_examples_train, results - - def evaluate(self, parameters, config): - """Evaluate parameters on the locally held test set.""" - # Update local model parameters - self.set_parameters(parameters) - - # Get config values - steps: int = config["val_steps"] - - # Evaluate global model parameters on the local test data and return results - testloader = DataLoader(self.testset, batch_size=16) - - loss, accuracy = utils.test(self.model, testloader, steps, self.device) - return float(loss), len(self.testset), {"accuracy": float(accuracy)} - - -def client_dry_run(device: torch.device = "cpu"): - """Weak tests to check whether all client methods are working as expected.""" - - model = utils.load_efficientnet(classes=10) - trainset, testset = utils.load_partition(0) - trainset = trainset.select(range(10)) - testset = testset.select(range(10)) - client = CifarClient(trainset, testset, device) - client.fit( - utils.get_model_params(model), - {"batch_size": 16, "local_epochs": 1}, - ) - - client.evaluate(utils.get_model_params(model), {"val_steps": 32}) - - print("Dry Run Successful") - - -def main() -> None: - # Parse command line argument `partition` - parser = argparse.ArgumentParser(description="Flower") - parser.add_argument( - "--dry", - type=bool, - default=False, - required=False, - help="Do a dry-run to check the client", - ) - parser.add_argument( - "--client-id", - type=int, - default=0, - choices=range(0, 10), - required=False, - help="Specifies the artificial data partition of CIFAR10 to be used. \ - Picks partition 0 by default", - ) - parser.add_argument( - "--toy", - action="store_true", - help="Set to true to quicky run the client using only 10 datasamples. \ - Useful for testing purposes. Default: False", - ) - parser.add_argument( - "--use_cuda", - type=bool, - default=False, - required=False, - help="Set to true to use GPU. Default: False", - ) - parser.add_argument( - "--model", - type=str, - default="efficientnet", - choices=["efficientnet", "alexnet"], - help="Use either Efficientnet or Alexnet models. \ - If you want to achieve differential privacy, please use the Alexnet model", - ) - - args = parser.parse_args() - - device = torch.device( - "cuda:0" if torch.cuda.is_available() and args.use_cuda else "cpu" - ) - - if args.dry: - client_dry_run(device) - else: - # Load a subset of CIFAR-10 to simulate the local data partition - trainset, testset = utils.load_partition(args.client_id) - - if args.toy: - trainset = trainset.select(range(10)) - testset = testset.select(range(10)) - # Start Flower client - client = CifarClient(trainset, testset, device, args.model).to_client() - fl.client.start_client(server_address="127.0.0.1:8080", client=client) - - -if __name__ == "__main__": - main() diff --git a/examples/advanced-pytorch/pyproject.toml b/examples/advanced-pytorch/pyproject.toml index b846a6054cc8..84ad510db50a 100644 --- a/examples/advanced-pytorch/pyproject.toml +++ b/examples/advanced-pytorch/pyproject.toml @@ -1,20 +1,46 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry] -name = "advanced-pytorch" -version = "0.1.0" -description = "Advanced Flower/PyTorch Example" -authors = [ - "The Flower Authors ", - "Kaushik Amar Das ", +[project] +name = "pytorch-example" +version = "1.0.0" +description = "Federated Learning with PyTorch and Flower (Advanced Example)" +license = "Apache-2.0" +dependencies = [ + "flwr[simulation]>=1.12.0", + "flwr-datasets[vision]>=0.3.0", + "torch==2.2.1", + "torchvision==0.17.1", + "wandb==0.17.8", ] -[tool.poetry.dependencies] -python = ">=3.8,<3.11" -flwr = ">=1.0,<2.0" -flwr-datasets = { extras = ["vision"], version = ">=0.0.2,<1.0.0" } -torch = "1.13.1" -torchvision = "0.14.1" -validators = "0.18.2" +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "pytorch_example.server_app:app" +clientapp = "pytorch_example.client_app:app" + +[tool.flwr.app.config] +num-server-rounds = 10 +fraction-fit = 0.25 +fraction-evaluate = 0.5 +local-epochs = 1 +server-device = "cpu" +use-wandb = true + +[tool.flwr.federations] +default = "local-sim" + +[tool.flwr.federations.local-sim] +options.num-supernodes = 50 +options.backend.client-resources.num-cpus = 2 # each ClientApp assumes to use 2CPUs +options.backend.client-resources.num-gpus = 0.0 # ratio of VRAM a ClientApp has access to +[tool.flwr.federations.local-sim-gpu] +options.num-supernodes = 50 +options.backend.client-resources.num-cpus = 2 +options.backend.client-resources.num-gpus = 0.25 diff --git a/examples/advanced-pytorch/pytorch_example/__init__.py b/examples/advanced-pytorch/pytorch_example/__init__.py new file mode 100644 index 000000000000..d93e8cdb922d --- /dev/null +++ b/examples/advanced-pytorch/pytorch_example/__init__.py @@ -0,0 +1 @@ +"""pytorch-example: A Flower / PyTorch app.""" diff --git a/examples/advanced-pytorch/pytorch_example/client_app.py b/examples/advanced-pytorch/pytorch_example/client_app.py new file mode 100644 index 000000000000..72a9c8323686 --- /dev/null +++ b/examples/advanced-pytorch/pytorch_example/client_app.py @@ -0,0 +1,122 @@ +"""pytorch-example: A Flower / PyTorch app.""" + +import torch +from pytorch_example.task import Net, get_weights, load_data, set_weights, test, train + +from flwr.client import ClientApp, NumPyClient +from flwr.common import Context, ParametersRecord, RecordSet, array_from_numpy + + +# Define Flower Client and client_fn +class FlowerClient(NumPyClient): + """A simple client that showcases how to use the state. + + It implements a basic version of `personalization` by which + the classification layer of the CNN is stored locally and used + and updated during `fit()` and used during `evaluate()`. + """ + + def __init__( + self, net, client_state: RecordSet, trainloader, valloader, local_epochs + ): + self.net: Net = net + self.client_state = client_state + self.trainloader = trainloader + self.valloader = valloader + self.local_epochs = local_epochs + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + self.net.to(self.device) + self.local_layer_name = "classification-head" + + def fit(self, parameters, config): + """Train model locally. + + The client stores in its context the parameters of the last layer in the model + (i.e. the classification head). The classifier is saved at the end of the + training and used the next time this client participates. + """ + + # Apply weights from global models (the whole model is replaced) + set_weights(self.net, parameters) + + # Override weights in classification layer with those this client + # had at the end of the last fit() round it participated in + self._load_layer_weights_from_state() + + train_loss = train( + self.net, + self.trainloader, + self.local_epochs, + lr=float(config["lr"]), + device=self.device, + ) + # Save classification head to context's state to use in a future fit() call + self._save_layer_weights_to_state() + + # Return locally-trained model and metrics + return ( + get_weights(self.net), + len(self.trainloader.dataset), + {"train_loss": train_loss}, + ) + + def _save_layer_weights_to_state(self): + """Save last layer weights to state.""" + state_dict_arrays = {} + for k, v in self.net.fc2.state_dict().items(): + state_dict_arrays[k] = array_from_numpy(v.cpu().numpy()) + + # Add to recordset (replace if already exists) + self.client_state.parameters_records[self.local_layer_name] = ParametersRecord( + state_dict_arrays + ) + + def _load_layer_weights_from_state(self): + """Load last layer weights to state.""" + if self.local_layer_name not in self.client_state.parameters_records: + return + + state_dict = {} + param_records = self.client_state.parameters_records + for k, v in param_records[self.local_layer_name].items(): + state_dict[k] = torch.from_numpy(v.numpy()) + + # apply previously saved classification head by this client + self.net.fc2.load_state_dict(state_dict, strict=True) + + def evaluate(self, parameters, config): + """Evaluate the global model on the local validation set. + + Note the classification head is replaced with the weights this client had the + last time it trained the model. + """ + set_weights(self.net, parameters) + # Override weights in classification layer with those this client + # had at the end of the last fit() round it participated in + self._load_layer_weights_from_state() + loss, accuracy = test(self.net, self.valloader, self.device) + return loss, len(self.valloader.dataset), {"accuracy": accuracy} + + +def client_fn(context: Context): + # Load model and data + net = Net() + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + trainloader, valloader = load_data(partition_id, num_partitions) + local_epochs = context.run_config["local-epochs"] + + # Return Client instance + # We pass the state to persist information across + # participation rounds. Note that each client always + # receives the same Context instance (it's a 1:1 mapping) + client_state = context.state + return FlowerClient( + net, client_state, trainloader, valloader, local_epochs + ).to_client() + + +# Flower ClientApp +app = ClientApp( + client_fn, +) diff --git a/examples/advanced-pytorch/pytorch_example/server_app.py b/examples/advanced-pytorch/pytorch_example/server_app.py new file mode 100644 index 000000000000..3fa2ae26dc7f --- /dev/null +++ b/examples/advanced-pytorch/pytorch_example/server_app.py @@ -0,0 +1,96 @@ +"""pytorch-example: A Flower / PyTorch app.""" + +import torch +from pytorch_example.strategy import CustomFedAvg +from pytorch_example.task import ( + Net, + apply_eval_transforms, + get_weights, + set_weights, + test, +) +from torch.utils.data import DataLoader + +from datasets import load_dataset +from flwr.common import Context, ndarrays_to_parameters +from flwr.server import ServerApp, ServerAppComponents, ServerConfig + + +def gen_evaluate_fn( + testloader: DataLoader, + device: torch.device, +): + """Generate the function for centralized evaluation.""" + + def evaluate(server_round, parameters_ndarrays, config): + """Evaluate global model on centralized test set.""" + net = Net() + set_weights(net, parameters_ndarrays) + net.to(device) + loss, accuracy = test(net, testloader, device=device) + return loss, {"centralized_accuracy": accuracy} + + return evaluate + + +def on_fit_config(server_round: int): + """Construct `config` that clients receive when running `fit()`""" + lr = 0.1 + # Enable a simple form of learning rate decay + if server_round > 10: + lr /= 2 + return {"lr": lr} + + +# Define metric aggregation function +def weighted_average(metrics): + # Multiply accuracy of each client by number of examples used + accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] + examples = [num_examples for num_examples, _ in metrics] + + # Aggregate and return custom metric (weighted average) + return {"federated_evaluate_accuracy": sum(accuracies) / sum(examples)} + + +def server_fn(context: Context): + # Read from config + num_rounds = context.run_config["num-server-rounds"] + fraction_fit = context.run_config["fraction-fit"] + fraction_eval = context.run_config["fraction-evaluate"] + server_device = context.run_config["server-device"] + + # Initialize model parameters + ndarrays = get_weights(Net()) + parameters = ndarrays_to_parameters(ndarrays) + + # Prepare dataset for central evaluation + + # This is the exact same dataset as the one donwloaded by the clients via + # FlowerDatasets. However, we don't use FlowerDatasets for the server since + # partitioning is not needed. + # We make use of the "test" split only + global_test_set = load_dataset("zalando-datasets/fashion_mnist")["test"] + + testloader = DataLoader( + global_test_set.with_transform(apply_eval_transforms), + batch_size=32, + ) + + # Define strategy + strategy = CustomFedAvg( + run_config=context.run_config, + use_wandb=context.run_config["use-wandb"], + fraction_fit=fraction_fit, + fraction_evaluate=fraction_eval, + initial_parameters=parameters, + on_fit_config_fn=on_fit_config, + evaluate_fn=gen_evaluate_fn(testloader, device=server_device), + evaluate_metrics_aggregation_fn=weighted_average, + ) + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(strategy=strategy, config=config) + + +# Create ServerApp +app = ServerApp(server_fn=server_fn) diff --git a/examples/advanced-pytorch/pytorch_example/strategy.py b/examples/advanced-pytorch/pytorch_example/strategy.py new file mode 100644 index 000000000000..97fc0010f143 --- /dev/null +++ b/examples/advanced-pytorch/pytorch_example/strategy.py @@ -0,0 +1,116 @@ +"""pytorch-example: A Flower / PyTorch app.""" + +import json +from logging import INFO + +import torch +import wandb +from pytorch_example.task import Net, create_run_dir, set_weights + +from flwr.common import logger, parameters_to_ndarrays +from flwr.common.typing import UserConfig +from flwr.server.strategy import FedAvg + +PROJECT_NAME = "FLOWER-advanced-pytorch" + + +class CustomFedAvg(FedAvg): + """A class that behaves like FedAvg but has extra functionality. + + This strategy: (1) saves results to the filesystem, (2) saves a + checkpoint of the global model when a new best is found, (3) logs + results to W&B if enabled. + """ + + def __init__(self, run_config: UserConfig, use_wandb: bool, *args, **kwargs): + super().__init__(*args, **kwargs) + + # Create a directory where to save results from this run + self.save_path, self.run_dir = create_run_dir(run_config) + self.use_wandb = use_wandb + # Initialise W&B if set + if use_wandb: + self._init_wandb_project() + + # Keep track of best acc + self.best_acc_so_far = 0.0 + + # A dictionary to store results as they come + self.results = {} + + def _init_wandb_project(self): + # init W&B + wandb.init(project=PROJECT_NAME, name=f"{str(self.run_dir)}-ServerApp") + + def _store_results(self, tag: str, results_dict): + """Store results in dictionary, then save as JSON.""" + # Update results dict + if tag in self.results: + self.results[tag].append(results_dict) + else: + self.results[tag] = [results_dict] + + # Save results to disk. + # Note we overwrite the same file with each call to this function. + # While this works, a more sophisticated approach is preferred + # in situations where the contents to be saved are larger. + with open(f"{self.save_path}/results.json", "w", encoding="utf-8") as fp: + json.dump(self.results, fp) + + def _update_best_acc(self, round, accuracy, parameters): + """Determines if a new best global model has been found. + + If so, the model checkpoint is saved to disk. + """ + if accuracy > self.best_acc_so_far: + self.best_acc_so_far = accuracy + logger.log(INFO, "💡 New best global model found: %f", accuracy) + # You could save the parameters object directly. + # Instead we are going to apply them to a PyTorch + # model and save the state dict. + # Converts flwr.common.Parameters to ndarrays + ndarrays = parameters_to_ndarrays(parameters) + model = Net() + set_weights(model, ndarrays) + # Save the PyTorch model + file_name = f"model_state_acc_{accuracy}_round_{round}.pth" + torch.save(model.state_dict(), self.save_path / file_name) + + def store_results_and_log(self, server_round: int, tag: str, results_dict): + """A helper method that stores results and logs them to W&B if enabled.""" + # Store results + self._store_results( + tag=tag, + results_dict={"round": server_round, **results_dict}, + ) + + if self.use_wandb: + # Log centralized loss and metrics to W&B + wandb.log(results_dict, step=server_round) + + def evaluate(self, server_round, parameters): + """Run centralized evaluation if callback was passed to strategy init.""" + loss, metrics = super().evaluate(server_round, parameters) + + # Save model if new best central accuracy is found + self._update_best_acc(server_round, metrics["centralized_accuracy"], parameters) + + # Store and log + self.store_results_and_log( + server_round=server_round, + tag="centralized_evaluate", + results_dict={"centralized_loss": loss, **metrics}, + ) + return loss, metrics + + def aggregate_evaluate(self, server_round, results, failures): + """Aggregate results from federated evaluation.""" + loss, metrics = super().aggregate_evaluate(server_round, results, failures) + + # Store and log + self.store_results_and_log( + server_round=server_round, + tag="federated_evaluate", + results_dict={"federated_evaluate_loss": loss, **metrics}, + ) + return loss, metrics diff --git a/examples/advanced-pytorch/pytorch_example/task.py b/examples/advanced-pytorch/pytorch_example/task.py new file mode 100644 index 000000000000..0224e8236408 --- /dev/null +++ b/examples/advanced-pytorch/pytorch_example/task.py @@ -0,0 +1,159 @@ +"""pytorch-example: A Flower / PyTorch app.""" + +import json +from collections import OrderedDict +from datetime import datetime +from pathlib import Path + +import torch +import torch.nn as nn +import torch.nn.functional as F +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import DirichletPartitioner +from torch.utils.data import DataLoader +from torchvision.transforms import ( + Compose, + Normalize, + RandomCrop, + RandomHorizontalFlip, + ToTensor, +) + +from flwr.common.typing import UserConfig + +FM_NORMALIZATION = ((0.1307,), (0.3081,)) +EVAL_TRANSFORMS = Compose([ToTensor(), Normalize(*FM_NORMALIZATION)]) +TRAIN_TRANSFORMS = Compose( + [ + RandomCrop(28, padding=4), + RandomHorizontalFlip(), + ToTensor(), + Normalize(*FM_NORMALIZATION), + ] +) + + +class Net(nn.Module): + """Model (simple CNN adapted for Fashion-MNIST)""" + + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 16, 5) + self.pool = nn.MaxPool2d(2, 2) + self.conv2 = nn.Conv2d(16, 32, 5) + self.fc1 = nn.Linear(32 * 4 * 4, 128) + self.fc2 = nn.Linear(128, 10) + + def forward(self, x): + x = self.pool(F.relu(self.conv1(x))) + x = self.pool(F.relu(self.conv2(x))) + x = x.view(-1, 32 * 4 * 4) + x = F.relu(self.fc1(x)) + return self.fc2(x) + + +def train(net, trainloader, epochs, lr, device): + """Train the model on the training set.""" + net.to(device) # move model to GPU if available + criterion = torch.nn.CrossEntropyLoss().to(device) + optimizer = torch.optim.SGD(net.parameters(), lr=lr, momentum=0.9) + net.train() + running_loss = 0.0 + for _ in range(epochs): + for batch in trainloader: + images = batch["image"] + labels = batch["label"] + optimizer.zero_grad() + loss = criterion(net(images.to(device)), labels.to(device)) + loss.backward() + optimizer.step() + running_loss += loss.item() + + avg_trainloss = running_loss / len(trainloader) + return avg_trainloss + + +def test(net, testloader, device): + """Validate the model on the test set.""" + net.to(device) + criterion = torch.nn.CrossEntropyLoss() + correct, loss = 0, 0.0 + with torch.no_grad(): + for batch in testloader: + images = batch["image"].to(device) + labels = batch["label"].to(device) + outputs = net(images) + loss += criterion(outputs, labels).item() + correct += (torch.max(outputs.data, 1)[1] == labels).sum().item() + accuracy = correct / len(testloader.dataset) + loss = loss / len(testloader) + return loss, accuracy + + +def get_weights(net): + return [val.cpu().numpy() for _, val in net.state_dict().items()] + + +def set_weights(net, parameters): + params_dict = zip(net.state_dict().keys(), parameters) + state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) + net.load_state_dict(state_dict, strict=True) + + +def apply_train_transforms(batch): + """Apply transforms to the partition from FederatedDataset.""" + batch["image"] = [TRAIN_TRANSFORMS(img) for img in batch["image"]] + return batch + + +def apply_eval_transforms(batch): + """Apply transforms to the partition from FederatedDataset.""" + batch["image"] = [EVAL_TRANSFORMS(img) for img in batch["image"]] + return batch + + +fds = None # Cache FederatedDataset + + +def load_data(partition_id: int, num_partitions: int): + """Load partition FashionMNIST data.""" + # Only initialize `FederatedDataset` once + global fds + if fds is None: + partitioner = DirichletPartitioner( + num_partitions=num_partitions, + partition_by="label", + alpha=1.0, + seed=42, + ) + fds = FederatedDataset( + dataset="zalando-datasets/fashion_mnist", + partitioners={"train": partitioner}, + ) + partition = fds.load_partition(partition_id) + # Divide data on each node: 80% train, 20% test + partition_train_test = partition.train_test_split(test_size=0.2, seed=42) + + train_partition = partition_train_test["train"].with_transform( + apply_train_transforms + ) + test_partition = partition_train_test["test"].with_transform(apply_eval_transforms) + trainloader = DataLoader(train_partition, batch_size=32, shuffle=True) + testloader = DataLoader(test_partition, batch_size=32) + return trainloader, testloader + + +def create_run_dir(config: UserConfig) -> Path: + """Create a directory where to save results from this run.""" + # Create output directory given current timestamp + current_time = datetime.now() + run_dir = current_time.strftime("%Y-%m-%d/%H-%M-%S") + # Save path is based on the current directory + save_path = Path.cwd() / f"outputs/{run_dir}" + save_path.mkdir(parents=True, exist_ok=False) + + # Save run config as json + with open(f"{save_path}/run_config.json", "w", encoding="utf-8") as fp: + json.dump(config, fp) + + return save_path, run_dir diff --git a/examples/advanced-pytorch/requirements.txt b/examples/advanced-pytorch/requirements.txt deleted file mode 100644 index f4d6a0774162..000000000000 --- a/examples/advanced-pytorch/requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -flwr>=1.0, <2.0 -flwr-datasets[vision]>=0.0.2, <1.0.0 -torch==1.13.1 -torchvision==0.14.1 -validators==0.18.2 diff --git a/examples/advanced-pytorch/run.sh b/examples/advanced-pytorch/run.sh deleted file mode 100755 index c3d52491b987..000000000000 --- a/examples/advanced-pytorch/run.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -set -e -cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/ - -python server.py --toy & -sleep 10 # Sleep for 10s to give the server enough time to start and dowload the dataset - -for i in `seq 0 9`; do - echo "Starting client $i" - python client.py --client-id=${i} --toy & -done - -# Enable CTRL+C to stop all background processes -trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM -# Wait for all background processes to complete -wait diff --git a/examples/advanced-pytorch/server.py b/examples/advanced-pytorch/server.py deleted file mode 100644 index 489694ab1ea1..000000000000 --- a/examples/advanced-pytorch/server.py +++ /dev/null @@ -1,123 +0,0 @@ -from typing import Dict, Optional, Tuple -from collections import OrderedDict -import argparse -from torch.utils.data import DataLoader - -import flwr as fl -import torch - -import utils - -import warnings - -from flwr_datasets import FederatedDataset - -warnings.filterwarnings("ignore") - - -def fit_config(server_round: int): - """Return training configuration dict for each round. - - Keep batch size fixed at 32, perform two rounds of training with one local epoch, - increase to two local epochs afterwards. - """ - config = { - "batch_size": 16, - "local_epochs": 1 if server_round < 2 else 2, - } - return config - - -def evaluate_config(server_round: int): - """Return evaluation configuration dict for each round. - - Perform five local evaluation steps on each client (i.e., use five batches) during - rounds one to three, then increase to ten local evaluation steps. - """ - val_steps = 5 if server_round < 4 else 10 - return {"val_steps": val_steps} - - -def get_evaluate_fn(model: torch.nn.Module, toy: bool): - """Return an evaluation function for server-side evaluation.""" - - # Load data here to avoid the overhead of doing it in `evaluate` itself - centralized_data = utils.load_centralized_data() - if toy: - # use only 10 samples as validation set - centralized_data = centralized_data.select(range(10)) - - val_loader = DataLoader(centralized_data, batch_size=16) - - # The `evaluate` function will be called after every round - def evaluate( - server_round: int, - parameters: fl.common.NDArrays, - config: Dict[str, fl.common.Scalar], - ) -> Optional[Tuple[float, Dict[str, fl.common.Scalar]]]: - # Update model with the latest parameters - params_dict = zip(model.state_dict().keys(), parameters) - state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) - model.load_state_dict(state_dict, strict=True) - - loss, accuracy = utils.test(model, val_loader) - return loss, {"accuracy": accuracy} - - return evaluate - - -def main(): - """Load model for - 1. server-side parameter initialization - 2. server-side parameter evaluation - """ - - # Parse command line argument `partition` - parser = argparse.ArgumentParser(description="Flower") - parser.add_argument( - "--toy", - action="store_true", - help="Set to true to use only 10 datasamples for validation. \ - Useful for testing purposes. Default: False", - ) - parser.add_argument( - "--model", - type=str, - default="efficientnet", - choices=["efficientnet", "alexnet"], - help="Use either Efficientnet or Alexnet models. \ - If you want to achieve differential privacy, please use the Alexnet model", - ) - - args = parser.parse_args() - - if args.model == "alexnet": - model = utils.load_alexnet(classes=10) - else: - model = utils.load_efficientnet(classes=10) - - model_parameters = [val.cpu().numpy() for _, val in model.state_dict().items()] - - # Create strategy - strategy = fl.server.strategy.FedAvg( - fraction_fit=1.0, - fraction_evaluate=1.0, - min_fit_clients=2, - min_evaluate_clients=2, - min_available_clients=10, - evaluate_fn=get_evaluate_fn(model, args.toy), - on_fit_config_fn=fit_config, - on_evaluate_config_fn=evaluate_config, - initial_parameters=fl.common.ndarrays_to_parameters(model_parameters), - ) - - # Start Flower server for four rounds of federated learning - fl.server.start_server( - server_address="0.0.0.0:8080", - config=fl.server.ServerConfig(num_rounds=4), - strategy=strategy, - ) - - -if __name__ == "__main__": - main() diff --git a/examples/advanced-pytorch/utils.py b/examples/advanced-pytorch/utils.py deleted file mode 100644 index c47b4fa38593..000000000000 --- a/examples/advanced-pytorch/utils.py +++ /dev/null @@ -1,118 +0,0 @@ -import torch -from torchvision.transforms import Compose, ToTensor, Normalize, Resize, CenterCrop -from torchvision.models import efficientnet_b0, AlexNet -import warnings - -from flwr_datasets import FederatedDataset - - -warnings.filterwarnings("ignore") - - -def load_partition(partition_id, toy: bool = False): - """Load partition CIFAR10 data.""" - fds = FederatedDataset(dataset="cifar10", partitioners={"train": 10}) - partition = fds.load_partition(partition_id) - # Divide data on each node: 80% train, 20% test - partition_train_test = partition.train_test_split(test_size=0.2, seed=42) - partition_train_test = partition_train_test.with_transform(apply_transforms) - return partition_train_test["train"], partition_train_test["test"] - - -def load_centralized_data(): - fds = FederatedDataset(dataset="cifar10", partitioners={"train": 10}) - centralized_data = fds.load_split("test") - centralized_data = centralized_data.with_transform(apply_transforms) - return centralized_data - - -def apply_transforms(batch): - """Apply transforms to the partition from FederatedDataset.""" - pytorch_transforms = Compose( - [ - Resize(256), - CenterCrop(224), - ToTensor(), - Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), - ] - ) - batch["img"] = [pytorch_transforms(img) for img in batch["img"]] - return batch - - -def train( - net, trainloader, valloader, epochs, device: torch.device = torch.device("cpu") -): - """Train the network on the training set.""" - print("Starting training...") - net.to(device) # move model to GPU if available - criterion = torch.nn.CrossEntropyLoss().to(device) - optimizer = torch.optim.SGD( - net.parameters(), lr=0.001, momentum=0.9, weight_decay=1e-4 - ) - net.train() - for _ in range(epochs): - for batch in trainloader: - images, labels = batch["img"], batch["label"] - images, labels = images.to(device), labels.to(device) - optimizer.zero_grad() - loss = criterion(net(images), labels) - loss.backward() - optimizer.step() - - net.to("cpu") # move model back to CPU - - train_loss, train_acc = test(net, trainloader) - val_loss, val_acc = test(net, valloader) - - results = { - "train_loss": train_loss, - "train_accuracy": train_acc, - "val_loss": val_loss, - "val_accuracy": val_acc, - } - return results - - -def test( - net, testloader, steps: int = None, device: torch.device = torch.device("cpu") -): - """Validate the network on the entire test set.""" - print("Starting evalutation...") - net.to(device) # move model to GPU if available - criterion = torch.nn.CrossEntropyLoss() - correct, loss = 0, 0.0 - net.eval() - with torch.no_grad(): - for batch_idx, batch in enumerate(testloader): - images, labels = batch["img"], batch["label"] - images, labels = images.to(device), labels.to(device) - outputs = net(images) - loss += criterion(outputs, labels).item() - _, predicted = torch.max(outputs.data, 1) - correct += (predicted == labels).sum().item() - if steps is not None and batch_idx == steps: - break - accuracy = correct / len(testloader.dataset) - net.to("cpu") # move model back to CPU - return loss, accuracy - - -def load_efficientnet(classes: int = 10): - """Loads EfficienNetB0 from TorchVision.""" - efficientnet = efficientnet_b0(pretrained=True) - # Re-init output linear layer with the right number of classes - model_classes = efficientnet.classifier[1].in_features - if classes != model_classes: - efficientnet.classifier[1] = torch.nn.Linear(model_classes, classes) - return efficientnet - - -def get_model_params(model): - """Returns a model's parameters.""" - return [val.cpu().numpy() for _, val in model.state_dict().items()] - - -def load_alexnet(classes): - """Load AlexNet model from TorchVision.""" - return AlexNet(num_classes=classes) diff --git a/examples/advanced-tensorflow/README.md b/examples/advanced-tensorflow/README.md index 94707b5cbc98..375c539d13dd 100644 --- a/examples/advanced-tensorflow/README.md +++ b/examples/advanced-tensorflow/README.md @@ -1,3 +1,9 @@ +--- +tags: [advanced, vision, fds] +dataset: [CIFAR-10] +framework: [tensorflow, Keras] +--- + # Advanced Flower Example (TensorFlow/Keras) This example demonstrates an advanced federated learning setup using Flower with TensorFlow/Keras. This example uses [Flower Datasets](https://flower.ai/docs/datasets/) and it differs from the quickstart example in the following ways: diff --git a/examples/advanced-tensorflow/client.py b/examples/advanced-tensorflow/client.py index b658a1f9ea04..b6a485b7ba4c 100644 --- a/examples/advanced-tensorflow/client.py +++ b/examples/advanced-tensorflow/client.py @@ -2,10 +2,8 @@ import os from pathlib import Path -import tensorflow as tf - import flwr as fl - +import tensorflow as tf from flwr_datasets import FederatedDataset # Make TensorFlow logs less verbose diff --git a/examples/advanced-tensorflow/pyproject.toml b/examples/advanced-tensorflow/pyproject.toml index 02bd923129a4..9fc623a0f3ec 100644 --- a/examples/advanced-tensorflow/pyproject.toml +++ b/examples/advanced-tensorflow/pyproject.toml @@ -9,7 +9,7 @@ description = "Advanced Flower/TensorFlow Example" authors = ["The Flower Authors "] [tool.poetry.dependencies] -python = ">=3.8,<3.11" +python = ">=3.9,<3.11" flwr = ">=1.0,<2.0" flwr-datasets = { extras = ["vision"], version = ">=0.0.2,<1.0.0" } tensorflow-cpu = { version = ">=2.9.1,<2.11.1 || >2.11.1", markers = "platform_machine == \"x86_64\"" } diff --git a/examples/advanced-tensorflow/server.py b/examples/advanced-tensorflow/server.py index e159a096dc83..8febdd57614d 100644 --- a/examples/advanced-tensorflow/server.py +++ b/examples/advanced-tensorflow/server.py @@ -1,9 +1,8 @@ -from typing import Dict, Optional, Tuple from pathlib import Path +from typing import Dict, Optional, Tuple import flwr as fl import tensorflow as tf - from flwr_datasets import FederatedDataset diff --git a/examples/android-kotlin/README.md b/examples/android-kotlin/README.md index 2d0f704fdc0e..6cadb8e436fe 100644 --- a/examples/android-kotlin/README.md +++ b/examples/android-kotlin/README.md @@ -1,3 +1,9 @@ +--- +tags: [mobile, vision, sdk] +dataset: [CIFAR-10] +framework: [Android, Kotlin, TensorFlowLite] +--- + # Flower Android Client Example with Kotlin and TensorFlow Lite 2022 This example is similar to the Flower Android Example in Java: diff --git a/examples/android-kotlin/gen_tflite/pyproject.toml b/examples/android-kotlin/gen_tflite/pyproject.toml index aabf351bd51d..884e7148cc3d 100644 --- a/examples/android-kotlin/gen_tflite/pyproject.toml +++ b/examples/android-kotlin/gen_tflite/pyproject.toml @@ -5,7 +5,7 @@ description = "" authors = ["Steven Hé (Sīchàng) "] [tool.poetry.dependencies] -python = ">=3.8,<3.11" +python = ">=3.9,<3.11" numpy = ">=1.23,<2.0" tensorflow-cpu = ">=2.12,<3.0" pandas = ">=2.0,<3.0" diff --git a/examples/android-kotlin/pyproject.toml b/examples/android-kotlin/pyproject.toml index 9cf0688d83b5..b83b243a349d 100644 --- a/examples/android-kotlin/pyproject.toml +++ b/examples/android-kotlin/pyproject.toml @@ -9,5 +9,5 @@ description = "" authors = ["Steven Hé (Sīchàng) "] [tool.poetry.dependencies] -python = ">=3.8,<3.11" +python = ">=3.9,<3.11" flwr = ">=1.0,<2.0" diff --git a/examples/android/README.md b/examples/android/README.md index f9f2bb93b8dc..83519f15d04d 100644 --- a/examples/android/README.md +++ b/examples/android/README.md @@ -1,3 +1,9 @@ +--- +tags: [mobile, vision, sdk] +dataset: [CIFAR-10] +framework: [Android, Java, TensorFlowLite] +--- + # Flower Android Example (TensorFlowLite) This example demonstrates a federated learning setup with Android clients in a background thread. The training on Android is done on a CIFAR10 dataset using TensorFlow Lite. The setup is as follows: diff --git a/examples/android/pyproject.toml b/examples/android/pyproject.toml index 0371f7208292..d0d18ebc48bc 100644 --- a/examples/android/pyproject.toml +++ b/examples/android/pyproject.toml @@ -9,7 +9,7 @@ description = "Android Example" authors = ["The Flower Authors "] [tool.poetry.dependencies] -python = ">=3.8,<3.11" +python = ">=3.9,<3.11" flwr = ">=1.0,<2.0" tensorflow-cpu = { version = ">=2.9.1,<2.11.1 || >2.11.1", markers = "platform_machine == \"x86_64\"" } tensorflow-macos = { version = ">=2.9.1,<2.11.1 || >2.11.1", markers = "sys_platform == \"darwin\" and platform_machine == \"arm64\"" } diff --git a/examples/android/tflite_convertor/convert_to_tflite.py b/examples/android/tflite_convertor/convert_to_tflite.py index 79259a6cdf87..c4b7d2abf48a 100644 --- a/examples/android/tflite_convertor/convert_to_tflite.py +++ b/examples/android/tflite_convertor/convert_to_tflite.py @@ -1,9 +1,8 @@ import tensorflow as tf from tensorflow.keras import layers from tensorflow.keras.regularizers import l2 -from tfltransfer import bases -from tfltransfer import heads -from tfltransfer import optimizers + +from tfltransfer import bases, heads, optimizers from tfltransfer.tflite_transfer_converter import TFLiteTransferConverter # Define the base model. diff --git a/examples/android/tflite_convertor/tfltransfer/bases/mobilenetv2_base.py b/examples/android/tflite_convertor/tfltransfer/bases/mobilenetv2_base.py index 9e7823cd1030..083778d883fc 100644 --- a/examples/android/tflite_convertor/tfltransfer/bases/mobilenetv2_base.py +++ b/examples/android/tflite_convertor/tfltransfer/bases/mobilenetv2_base.py @@ -14,9 +14,7 @@ """Base model configuration for MobileNetV2.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function +from __future__ import absolute_import, division, print_function import tensorflow as tf diff --git a/examples/android/tflite_convertor/tfltransfer/bases/quantizable_base.py b/examples/android/tflite_convertor/tfltransfer/bases/quantizable_base.py index bbdf36a08ef8..0a59120025f6 100644 --- a/examples/android/tflite_convertor/tfltransfer/bases/quantizable_base.py +++ b/examples/android/tflite_convertor/tfltransfer/bases/quantizable_base.py @@ -14,9 +14,7 @@ """Base model abstract base class that handles quantization.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function +from __future__ import absolute_import, division, print_function import abc diff --git a/examples/android/tflite_convertor/tfltransfer/bases/saved_model_base.py b/examples/android/tflite_convertor/tfltransfer/bases/saved_model_base.py index d268e54729eb..90678c91e348 100644 --- a/examples/android/tflite_convertor/tfltransfer/bases/saved_model_base.py +++ b/examples/android/tflite_convertor/tfltransfer/bases/saved_model_base.py @@ -14,9 +14,7 @@ """Base model configuration that reads a specified SavedModel.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function +from __future__ import absolute_import, division, print_function import tensorflow as tf diff --git a/examples/android/tflite_convertor/tfltransfer/heads/keras_model_head.py b/examples/android/tflite_convertor/tfltransfer/heads/keras_model_head.py index 2b17c6f66f81..5fc13043f78b 100644 --- a/examples/android/tflite_convertor/tfltransfer/heads/keras_model_head.py +++ b/examples/android/tflite_convertor/tfltransfer/heads/keras_model_head.py @@ -14,9 +14,7 @@ """Head model configuration for Keras models.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function +from __future__ import absolute_import, division, print_function import os import shutil diff --git a/examples/android/tflite_convertor/tfltransfer/heads/logits_saved_model_head.py b/examples/android/tflite_convertor/tfltransfer/heads/logits_saved_model_head.py index ec0786b9298a..90f2183d2102 100644 --- a/examples/android/tflite_convertor/tfltransfer/heads/logits_saved_model_head.py +++ b/examples/android/tflite_convertor/tfltransfer/heads/logits_saved_model_head.py @@ -14,9 +14,7 @@ """Head model configuration for classifier SavedModels.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function +from __future__ import absolute_import, division, print_function import os import shutil diff --git a/examples/android/tflite_convertor/tfltransfer/heads/softmax_classifier_head.py b/examples/android/tflite_convertor/tfltransfer/heads/softmax_classifier_head.py index af869a90b7d3..46e604ff55f5 100644 --- a/examples/android/tflite_convertor/tfltransfer/heads/softmax_classifier_head.py +++ b/examples/android/tflite_convertor/tfltransfer/heads/softmax_classifier_head.py @@ -14,9 +14,7 @@ """Head model configuration for simple softmax classifiers.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function +from __future__ import absolute_import, division, print_function import numpy as np import tensorflow as tf diff --git a/examples/android/tflite_convertor/tfltransfer/model_correctness_test.py b/examples/android/tflite_convertor/tfltransfer/model_correctness_test.py index d57599d7b95c..e50a83f813d7 100644 --- a/examples/android/tflite_convertor/tfltransfer/model_correctness_test.py +++ b/examples/android/tflite_convertor/tfltransfer/model_correctness_test.py @@ -14,9 +14,7 @@ """End-to-end tests that check model correctness.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function +from __future__ import absolute_import, division, print_function import os import tempfile @@ -27,10 +25,7 @@ from tensorflow.compat import v1 as tfv1 # pylint: disable=g-bad-import-order -from tfltransfer import bases -from tfltransfer import optimizers -from tfltransfer import heads -from tfltransfer import tflite_transfer_converter +from tfltransfer import bases, heads, optimizers, tflite_transfer_converter # pylint: enable=g-bad-import-order diff --git a/examples/android/tflite_convertor/tfltransfer/optimizers/adam.py b/examples/android/tflite_convertor/tfltransfer/optimizers/adam.py index 2fe4e1442bb3..1351a0172641 100644 --- a/examples/android/tflite_convertor/tfltransfer/optimizers/adam.py +++ b/examples/android/tflite_convertor/tfltransfer/optimizers/adam.py @@ -14,9 +14,7 @@ """Adam optimizer implementation for transfer learning models.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function +from __future__ import absolute_import, division, print_function import tensorflow as tf import tensorflow.compat.v1 as tfv1 diff --git a/examples/android/tflite_convertor/tfltransfer/optimizers/sgd.py b/examples/android/tflite_convertor/tfltransfer/optimizers/sgd.py index 09d22ba2fcad..729af1904103 100644 --- a/examples/android/tflite_convertor/tfltransfer/optimizers/sgd.py +++ b/examples/android/tflite_convertor/tfltransfer/optimizers/sgd.py @@ -14,9 +14,7 @@ """SGD optimizer implementation for transfer learning models.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function +from __future__ import absolute_import, division, print_function import tensorflow as tf import tensorflow.compat.v1 as tfv1 diff --git a/examples/android/tflite_convertor/tfltransfer/tflite_transfer_convert.py b/examples/android/tflite_convertor/tfltransfer/tflite_transfer_convert.py index 93dcfd8a67d5..383b441b17ef 100644 --- a/examples/android/tflite_convertor/tfltransfer/tflite_transfer_convert.py +++ b/examples/android/tflite_convertor/tfltransfer/tflite_transfer_convert.py @@ -17,17 +17,12 @@ """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function +from __future__ import absolute_import, division, print_function import argparse # pylint: disable=g-bad-import-order -from tfltransfer import bases -from tfltransfer import heads -from tfltransfer import optimizers -from tfltransfer import tflite_transfer_converter +from tfltransfer import bases, heads, optimizers, tflite_transfer_converter # pylint: enable=g-bad-import-order diff --git a/examples/android/tflite_convertor/tfltransfer/tflite_transfer_converter.py b/examples/android/tflite_convertor/tfltransfer/tflite_transfer_converter.py index 38d9493b7617..7e07953d36a9 100644 --- a/examples/android/tflite_convertor/tfltransfer/tflite_transfer_converter.py +++ b/examples/android/tflite_convertor/tfltransfer/tflite_transfer_converter.py @@ -19,9 +19,7 @@ """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function +from __future__ import absolute_import, division, print_function import os diff --git a/examples/android/tflite_convertor/tfltransfer/tflite_transfer_converter_test.py b/examples/android/tflite_convertor/tfltransfer/tflite_transfer_converter_test.py index fa7e53c097bc..8b0c87719092 100644 --- a/examples/android/tflite_convertor/tfltransfer/tflite_transfer_converter_test.py +++ b/examples/android/tflite_convertor/tfltransfer/tflite_transfer_converter_test.py @@ -14,9 +14,7 @@ """Tests for tflite_transfer_converter.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function +from __future__ import absolute_import, division, print_function import tempfile import unittest @@ -27,10 +25,7 @@ from tensorflow.keras.regularizers import l2 # pylint: disable=g-bad-import-order -from tfltransfer import bases -from tfltransfer import heads -from tfltransfer import optimizers -from tfltransfer import tflite_transfer_converter +from tfltransfer import bases, heads, optimizers, tflite_transfer_converter # pylint: enable=g-bad-import-order diff --git a/examples/android/tflite_convertor/tfltransfer/utils.py b/examples/android/tflite_convertor/tfltransfer/utils.py index 5648a449c7e7..c0f61b4e4aad 100644 --- a/examples/android/tflite_convertor/tfltransfer/utils.py +++ b/examples/android/tflite_convertor/tfltransfer/utils.py @@ -14,9 +14,7 @@ """Helper utilities for various parts of the converter.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function +from __future__ import absolute_import, division, print_function from tensorflow.compat import v1 as tfv1 diff --git a/examples/app-pytorch/README.md b/examples/app-pytorch/README.md index 14de3c7d632e..5cfae8440ed2 100644 --- a/examples/app-pytorch/README.md +++ b/examples/app-pytorch/README.md @@ -1,3 +1,9 @@ +--- +tags: [basic, vision, fds] +dataset: [CIFAR-10] +framework: [torch, torchvision] +--- + # Flower App (PyTorch) 🧪 > 🧪 = This example covers experimental features that might change in future versions of Flower diff --git a/examples/app-pytorch/client.py b/examples/app-pytorch/client.py index eb84968bb986..4168baedea5f 100644 --- a/examples/app-pytorch/client.py +++ b/examples/app-pytorch/client.py @@ -1,15 +1,6 @@ from flwr.client import ClientApp, NumPyClient -from task import ( - Net, - DEVICE, - load_data, - get_weights, - set_weights, - train, - test, -) - +from task import DEVICE, Net, get_weights, load_data, set_weights, test, train # Load model and data (simple CNN, CIFAR-10) net = Net().to(DEVICE) diff --git a/examples/app-pytorch/client_low_level.py b/examples/app-pytorch/client_low_level.py index 19268ff84ba4..538b5b4e88ff 100644 --- a/examples/app-pytorch/client_low_level.py +++ b/examples/app-pytorch/client_low_level.py @@ -1,5 +1,5 @@ from flwr.client import ClientApp -from flwr.common import Message, Context +from flwr.common import Context, Message def hello_world_mod(msg, ctx, call_next) -> Message: diff --git a/examples/app-pytorch/pyproject.toml b/examples/app-pytorch/pyproject.toml index c00e38aef19b..88e916546632 100644 --- a/examples/app-pytorch/pyproject.toml +++ b/examples/app-pytorch/pyproject.toml @@ -9,7 +9,7 @@ description = "Multi-Tenant Federated Learning with Flower and PyTorch" authors = ["The Flower Authors "] [tool.poetry.dependencies] -python = "^3.8" +python = "^3.9" # Mandatory dependencies flwr = { version = "^1.8.0", extras = ["simulation"] } torch = "2.2.1" diff --git a/examples/app-pytorch/server.py b/examples/app-pytorch/server.py index 0b4ad1ddba46..0beb3d811346 100644 --- a/examples/app-pytorch/server.py +++ b/examples/app-pytorch/server.py @@ -1,8 +1,8 @@ from typing import List, Tuple +from flwr.common import Metrics, ndarrays_to_parameters from flwr.server import ServerApp, ServerConfig from flwr.server.strategy import FedAvg -from flwr.common import Metrics, ndarrays_to_parameters from task import Net, get_weights diff --git a/examples/app-pytorch/server_custom.py b/examples/app-pytorch/server_custom.py index 67c1bce99c55..4c9b7f868f31 100644 --- a/examples/app-pytorch/server_custom.py +++ b/examples/app-pytorch/server_custom.py @@ -1,19 +1,19 @@ -from typing import List, Tuple, Dict import random import time +from typing import Dict, List, Tuple import flwr as fl from flwr.common import ( + DEFAULT_TTL, + Code, Context, FitIns, - ndarrays_to_parameters, - parameters_to_ndarrays, - NDArrays, - Code, Message, MessageType, Metrics, - DEFAULT_TTL, + NDArrays, + ndarrays_to_parameters, + parameters_to_ndarrays, ) from flwr.common.recordset_compat import fitins_to_recordset, recordset_to_fitres from flwr.server import Driver, History diff --git a/examples/app-pytorch/server_low_level.py b/examples/app-pytorch/server_low_level.py index 7ab79a4a04c8..d11ae446cc28 100644 --- a/examples/app-pytorch/server_low_level.py +++ b/examples/app-pytorch/server_low_level.py @@ -1,20 +1,19 @@ -from typing import List, Tuple, Dict import random import time +from typing import Dict, List, Tuple import flwr as fl from flwr.common import ( + DEFAULT_TTL, Context, - NDArrays, Message, MessageType, Metrics, + NDArrays, RecordSet, - DEFAULT_TTL, ) from flwr.server import Driver - # Run via `flower-server-app server:app` app = fl.server.ServerApp() diff --git a/examples/app-pytorch/server_workflow.py b/examples/app-pytorch/server_workflow.py index 6923010ecf7b..5ebca1e40be0 100644 --- a/examples/app-pytorch/server_workflow.py +++ b/examples/app-pytorch/server_workflow.py @@ -1,11 +1,11 @@ from typing import List, Tuple -from task import Net, get_weights - import flwr as fl from flwr.common import Context, Metrics, ndarrays_to_parameters from flwr.server import Driver, LegacyContext +from task import Net, get_weights + # Define metric aggregation function def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: @@ -51,7 +51,7 @@ def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: def main(driver: Driver, context: Context) -> None: # Construct the LegacyContext context = LegacyContext( - state=context.state, + context=context, config=fl.server.ServerConfig(num_rounds=3), strategy=strategy, ) diff --git a/examples/app-pytorch/task.py b/examples/app-pytorch/task.py index 240f290df320..1fd61966a61a 100644 --- a/examples/app-pytorch/task.py +++ b/examples/app-pytorch/task.py @@ -9,7 +9,6 @@ from torchvision.datasets import CIFAR10 from torchvision.transforms import Compose, Normalize, ToTensor - DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") diff --git a/examples/app-secure-aggregation/README.md b/examples/app-secure-aggregation/README.md deleted file mode 100644 index d1ea7bdc893f..000000000000 --- a/examples/app-secure-aggregation/README.md +++ /dev/null @@ -1,93 +0,0 @@ -# Secure aggregation with Flower (the SecAgg+ protocol) 🧪 - -> 🧪 = This example covers experimental features that might change in future versions of Flower -> Please consult the regular PyTorch code examples ([quickstart](https://github.com/adap/flower/tree/main/examples/quickstart-pytorch), [advanced](https://github.com/adap/flower/tree/main/examples/advanced-pytorch)) to learn how to use Flower with PyTorch. - -The following steps describe how to use Secure Aggregation in flower, with `ClientApp` using `secaggplus_mod` and `ServerApp` using `SecAggPlusWorkflow`. - -## Preconditions - -Let's assume the following project structure: - -```bash -$ tree . -. -├── client.py # Client application using `secaggplus_mod` -├── server.py # Server application using `SecAggPlusWorkflow` -├── workflow_with_log.py # Augmented `SecAggPlusWorkflow` -├── run.sh # Quick start script -├── pyproject.toml # Project dependencies (poetry) -└── requirements.txt # Project dependencies (pip) -``` - -## Installing dependencies - -Project dependencies (such as and `flwr`) are defined in `pyproject.toml`. We recommend [Poetry](https://python-poetry.org/docs/) to install those dependencies and manage your virtual environment ([Poetry installation](https://python-poetry.org/docs/#installation)), but feel free to use a different way of installing dependencies and managing virtual environments if you have other preferences. - -### Poetry - -```shell -poetry install -poetry shell -``` - -Poetry will install all your dependencies in a newly created virtual environment. To verify that everything works correctly you can run the following command: - -```shell -poetry run python3 -c "import flwr" -``` - -### pip - -Write the command below in your terminal to install the dependencies according to the configuration file requirements.txt. - -```shell -pip install -r requirements.txt -``` - -If you don't see any errors you're good to go! - -## Run the example with one command (recommended) - -```bash -./run.sh -``` - -## Run the example with the simulation engine - -```bash -flower-simulation --server-app server:app --client-app client:app --num-supernodes 5 -``` - -## Alternatively, run the example (in 7 terminal windows) - -Start the Flower Superlink in one terminal window: - -```bash -flower-superlink --insecure -``` - -Start 5 Flower `ClientApp` in 5 separate terminal windows: - -```bash -flower-client-app client:app --insecure -``` - -Start the Flower `ServerApp`: - -```bash -flower-server-app server:app --insecure --verbose -``` - -## Amend the example for practical usage - -For real-world applications, modify the `workflow` in `server.py` as follows: - -```python -workflow = fl.server.workflow.DefaultWorkflow( - fit_workflow=SecAggPlusWorkflow( - num_shares=, - reconstruction_threshold=, - ) -) -``` diff --git a/examples/app-secure-aggregation/client.py b/examples/app-secure-aggregation/client.py deleted file mode 100644 index b2fd02ec00d4..000000000000 --- a/examples/app-secure-aggregation/client.py +++ /dev/null @@ -1,34 +0,0 @@ -import time - -from flwr.client import ClientApp, NumPyClient -from flwr.client.mod import secaggplus_mod -import numpy as np - - -# Define FlowerClient and client_fn -class FlowerClient(NumPyClient): - def fit(self, parameters, config): - # Instead of training and returning model parameters, - # the client directly returns [1.0, 1.0, 1.0] for demonstration purposes. - ret_vec = [np.ones(3)] - # Force a significant delay for testing purposes - if "drop" in config and config["drop"]: - print(f"Client dropped for testing purposes.") - time.sleep(8) - else: - print(f"Client uploading {ret_vec[0]}...") - return ret_vec, 1, {} - - -def client_fn(cid: str): - """Create and return an instance of Flower `Client`.""" - return FlowerClient().to_client() - - -# Flower ClientApp -app = ClientApp( - client_fn=client_fn, - mods=[ - secaggplus_mod, - ], -) diff --git a/examples/app-secure-aggregation/pyproject.toml b/examples/app-secure-aggregation/pyproject.toml deleted file mode 100644 index fb1f636d8c33..000000000000 --- a/examples/app-secure-aggregation/pyproject.toml +++ /dev/null @@ -1,14 +0,0 @@ -[build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" - -[tool.poetry] -name = "app-secure-aggregation" -version = "0.1.0" -description = "Flower Secure Aggregation example." -authors = ["The Flower Authors "] - -[tool.poetry.dependencies] -python = "^3.8" -# Mandatory dependencies -flwr = { version = "^1.8.0", extras = ["simulation"] } diff --git a/examples/app-secure-aggregation/requirements.txt b/examples/app-secure-aggregation/requirements.txt deleted file mode 100644 index 2d8be098f264..000000000000 --- a/examples/app-secure-aggregation/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -flwr[simulation]>=1.8.0 diff --git a/examples/app-secure-aggregation/run.sh b/examples/app-secure-aggregation/run.sh deleted file mode 100755 index fa8dc47f26ef..000000000000 --- a/examples/app-secure-aggregation/run.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash -# Kill any currently running client.py processes -pkill -f 'flower-client-app' - -# Kill any currently running flower-superlink processes -pkill -f 'flower-superlink' - -# Start the flower server -echo "Starting flower server in background..." -flower-superlink --insecure > /dev/null 2>&1 & -sleep 2 - -# Number of client processes to start -N=5 # Replace with your desired value - -echo "Starting $N ClientApps in background..." - -# Start N client processes -for i in $(seq 1 $N) -do - flower-client-app --insecure client:app > /dev/null 2>&1 & - sleep 0.1 -done - -echo "Starting ServerApp..." -flower-server-app --insecure server:app --verbose - -echo "Clearing background processes..." - -# Kill any currently running client.py processes -pkill -f 'flower-client-app' - -# Kill any currently running flower-superlink processes -pkill -f 'flower-superlink' diff --git a/examples/app-secure-aggregation/server.py b/examples/app-secure-aggregation/server.py deleted file mode 100644 index e9737a5a3c7f..000000000000 --- a/examples/app-secure-aggregation/server.py +++ /dev/null @@ -1,45 +0,0 @@ -from flwr.common import Context -from flwr.server import Driver, LegacyContext, ServerApp, ServerConfig -from flwr.server.strategy import FedAvg -from flwr.server.workflow import DefaultWorkflow, SecAggPlusWorkflow - -from workflow_with_log import SecAggPlusWorkflowWithLogs - - -# Define strategy -strategy = FedAvg( - fraction_fit=1.0, # Select all available clients - fraction_evaluate=0.0, # Disable evaluation - min_available_clients=5, -) - - -# Flower ServerApp -app = ServerApp() - - -@app.main() -def main(driver: Driver, context: Context) -> None: - # Construct the LegacyContext - context = LegacyContext( - state=context.state, - config=ServerConfig(num_rounds=3), - strategy=strategy, - ) - - # Create the workflow - workflow = DefaultWorkflow( - fit_workflow=SecAggPlusWorkflowWithLogs( - num_shares=3, - reconstruction_threshold=2, - timeout=5, - ) - # # For real-world applications, use the following code instead - # fit_workflow=SecAggPlusWorkflow( - # num_shares=, - # reconstruction_threshold=, - # ) - ) - - # Execute - workflow(driver, context) diff --git a/examples/custom-metrics/README.md b/examples/custom-metrics/README.md index 317fb6336106..69802cdb949f 100644 --- a/examples/custom-metrics/README.md +++ b/examples/custom-metrics/README.md @@ -1,106 +1,75 @@ -# Flower Example using Custom Metrics +--- +tags: [basic, vision, fds] +dataset: [CIFAR-10] +framework: [tensorflow, scikit-learn] +--- -This simple example demonstrates how to calculate custom metrics over multiple clients beyond the traditional ones available in the ML frameworks. In this case, it demonstrates the use of ready-available `scikit-learn` metrics: accuracy, recall, precision, and f1-score. +# Custom Metrics for Federated Learning with TensorFlow and Flower -Once both the test values (`y_test`) and the predictions (`y_pred`) are available on the client side (`client.py`), other metrics or custom ones are possible to be calculated. +This simple example demonstrates how to calculate custom metrics over multiple clients beyond the traditional ones available in the ML frameworks. In this case, it demonstrates the use of ready-available [scikit-learn metrics](https://scikit-learn.org/stable/modules/model_evaluation.html): accuracy, recall, precision, and f1-score. + +Once both the test values (`y_test`) and the predictions (`y_pred`) are available on the client side (`client_app.py`), other metrics or custom ones are possible to be calculated. The main takeaways of this implementation are: -- the use of the `output_dict` on the client side - inside `evaluate` method on `client.py` -- the use of the `evaluate_metrics_aggregation_fn` - to aggregate the metrics on the server side, part of the `strategy` on `server.py` +- the return of multiple evaluation metrics generated at the `evaluate` method on `client_app.py` +- the use of the `evaluate_metrics_aggregation_fn` - to aggregate the metrics on the server side, part of the `strategy` on `server_app.py` This example is based on the `quickstart-tensorflow` with CIFAR-10, source [here](https://flower.ai/docs/quickstart-tensorflow.html), with the addition of [Flower Datasets](https://flower.ai/docs/datasets/index.html) to retrieve the CIFAR-10. Using the CIFAR-10 dataset for classification, this is a multi-class classification problem, thus some changes on how to calculate the metrics using `average='micro'` and `np.argmax` is required. For binary classification, this is not required. Also, for unsupervised learning tasks, such as using a deep autoencoder, a custom metric based on reconstruction error could be implemented on client side. -## Project Setup - -Start by cloning the example project. We prepared a single-line command that you can copy into your shell which will checkout the example for you: - -```shell -git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/custom-metrics . && rm -rf flower && cd custom-metrics -``` - -This will create a new directory called `custom-metrics` containing the following files: - -```shell --- pyproject.toml --- requirements.txt --- client.py --- server.py --- run.sh --- README.md -``` - -### Installing Dependencies - -Project dependencies (such as `scikit-learn`, `tensorflow` and `flwr`) are defined in `pyproject.toml` and `requirements.txt`. We recommend [Poetry](https://python-poetry.org/docs/) to install those dependencies and manage your virtual environment ([Poetry installation](https://python-poetry.org/docs/#installation)) or [pip](https://pip.pypa.io/en/latest/development/), but feel free to use a different way of installing dependencies and managing virtual environments if you have other preferences. - -#### Poetry +## Set up the project -```shell -poetry install -poetry shell -``` +### Clone the project -Poetry will install all your dependencies in a newly created virtual environment. To verify that everything works correctly you can run the following command: +Start by cloning the example project: ```shell -poetry run python3 -c "import flwr" +git clone --depth=1 https://github.com/adap/flower.git _tmp \ + && mv _tmp/examples/custom-metrics . \ + && rm -rf _tmp && cd custom-metrics ``` -If you don't see any errors you're good to go! - -#### pip - -Write the command below in your terminal to install the dependencies according to the configuration file requirements.txt. +This will create a new directory called `custom-metrics` containing the +following files: ```shell -python -m venv venv -source venv/bin/activate -pip install -r requirements.txt +custom-metrics +├── README.md +├── custommetrics_example +│ ├── __init__.py +│ ├── client_app.py # Defines your ClientApp +│ ├── server_app.py # Defines your ServerApp +│ └── task.py # Defines your model and dataloading functions +└── pyproject.toml # Project metadata like dependencies and configs ``` -## Run Federated Learning with Custom Metrics +## Install dependencies and project -Afterwards you are ready to start the Flower server as well as the clients. You can simply start the server in a terminal as follows: +Install the dependencies defined in `pyproject.toml` as well as the `custommetrics_example` package. -```shell -python server.py +```bash +pip install -e . ``` -Now you are ready to start the Flower clients which will participate in the learning. To do so simply open two more terminals and run the following command in each: +## Run the Example -```shell -python client.py -``` +You can run your Flower project in both _simulation_ and _deployment_ mode without making changes to the code. If you are starting with Flower, we recommend you using the _simulation_ mode as it requires fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine. -Alternatively you can run all of it in one shell as follows: +### Run with the Simulation Engine -```shell -python server.py & -# Wait for a few seconds to give the server enough time to start, then: -python client.py & -python client.py +```bash +flwr run . ``` -or +You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: -```shell -chmod +x run.sh -./run.sh +```bash +flwr run . --run-config num-server-rounds=5 ``` -You will see that Keras is starting a federated training. Have a look to the [Flower Quickstarter documentation](https://flower.ai/docs/quickstart-tensorflow.html) for a detailed explanation. You can add `steps_per_epoch=3` to `model.fit()` if you just want to evaluate that everything works without having to wait for the client-side training to finish (this will save you a lot of time during development). +### Run with the Deployment Engine -Running `run.sh` will result in the following output (after 3 rounds): - -```shell -INFO flwr 2024-01-17 17:45:23,794 | app.py:228 | app_fit: metrics_distributed { - 'accuracy': [(1, 0.10000000149011612), (2, 0.10000000149011612), (3, 0.3393000066280365)], - 'acc': [(1, 0.1), (2, 0.1), (3, 0.3393)], - 'rec': [(1, 0.1), (2, 0.1), (3, 0.3393)], - 'prec': [(1, 0.1), (2, 0.1), (3, 0.3393)], - 'f1': [(1, 0.10000000000000002), (2, 0.10000000000000002), (3, 0.3393)] -} -``` +> \[!NOTE\] +> An update to this example will show how to run this Flower application with the Deployment Engine and TLS certificates, or with Docker. diff --git a/examples/custom-metrics/client.py b/examples/custom-metrics/client.py deleted file mode 100644 index 6a194e92cdce..000000000000 --- a/examples/custom-metrics/client.py +++ /dev/null @@ -1,73 +0,0 @@ -import os - -import flwr as fl -import numpy as np -import tensorflow as tf -from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score -from flwr_datasets import FederatedDataset - - -# Make TensorFlow log less verbose -os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" - - -# Load model (MobileNetV2) -model = tf.keras.applications.MobileNetV2((32, 32, 3), classes=10, weights=None) -model.compile("adam", "sparse_categorical_crossentropy", metrics=["accuracy"]) - -# Load data with Flower Datasets (CIFAR-10) -fds = FederatedDataset(dataset="cifar10", partitioners={"train": 10}) -train = fds.load_split("train") -test = fds.load_split("test") - -# Using Numpy format -train_np = train.with_format("numpy") -test_np = test.with_format("numpy") -x_train, y_train = train_np["img"], train_np["label"] -x_test, y_test = test_np["img"], test_np["label"] - - -# Method for extra learning metrics calculation -def eval_learning(y_test, y_pred): - acc = accuracy_score(y_test, y_pred) - rec = recall_score( - y_test, y_pred, average="micro" - ) # average argument required for multi-class - prec = precision_score(y_test, y_pred, average="micro") - f1 = f1_score(y_test, y_pred, average="micro") - return acc, rec, prec, f1 - - -# Define Flower client -class FlowerClient(fl.client.NumPyClient): - def get_parameters(self, config): - return model.get_weights() - - def fit(self, parameters, config): - model.set_weights(parameters) - model.fit(x_train, y_train, epochs=1, batch_size=32) - return model.get_weights(), len(x_train), {} - - def evaluate(self, parameters, config): - model.set_weights(parameters) - loss, accuracy = model.evaluate(x_test, y_test) - y_pred = model.predict(x_test) - y_pred = np.argmax(y_pred, axis=1).reshape( - -1, 1 - ) # MobileNetV2 outputs 10 possible classes, argmax returns just the most probable - - acc, rec, prec, f1 = eval_learning(y_test, y_pred) - output_dict = { - "accuracy": accuracy, # accuracy from tensorflow model.evaluate - "acc": acc, - "rec": rec, - "prec": prec, - "f1": f1, - } - return loss, len(x_test), output_dict - - -# Start Flower client -fl.client.start_client( - server_address="127.0.0.1:8080", client=FlowerClient().to_client() -) diff --git a/examples/custom-metrics/custommetrics_example/__init__.py b/examples/custom-metrics/custommetrics_example/__init__.py new file mode 100644 index 000000000000..28726f145fa4 --- /dev/null +++ b/examples/custom-metrics/custommetrics_example/__init__.py @@ -0,0 +1 @@ +"""custommetrics_example: A Flower / TensorFlow app for custom metrics.""" diff --git a/examples/custom-metrics/custommetrics_example/client_app.py b/examples/custom-metrics/custommetrics_example/client_app.py new file mode 100644 index 000000000000..babba6b0b9d6 --- /dev/null +++ b/examples/custom-metrics/custommetrics_example/client_app.py @@ -0,0 +1,68 @@ +"""custommetrics_example: A Flower / TensorFlow app for custom metrics.""" + +import os + +import numpy as np +from flwr.client import Client, ClientApp, NumPyClient +from flwr.common import Context + +from custommetrics_example.task import eval_learning, get_model, load_data + +# Make TensorFlow log less verbose +os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" + + +# Define Flower client +class FlowerClient(NumPyClient): + # pylint: disable=too-many-arguments + def __init__(self, model, x_train, y_train, x_test, y_test): + self.model = model + self.x_train = x_train + self.y_train = y_train + self.x_test = x_test + self.y_test = y_test + + def fit(self, parameters, config): + self.model.set_weights(parameters) + self.model.fit( + self.x_train, self.y_train, epochs=1, batch_size=32, verbose=False + ) + return self.model.get_weights(), len(self.x_train), {} + + def evaluate(self, parameters, config): + self.model.set_weights(parameters) + loss, accuracy = self.model.evaluate(self.x_test, self.y_test, verbose=False) + y_pred = self.model.predict(self.x_test, verbose=False) + y_pred = np.argmax(y_pred, axis=1).reshape( + -1, 1 + ) # MobileNetV2 outputs 10 possible classes, argmax returns just the most probable + + acc, rec, prec, f1 = eval_learning(self.y_test, y_pred) + output_dict = { + "accuracy": accuracy, # accuracy from tensorflow model.evaluate + "acc": acc, + "rec": rec, + "prec": prec, + "f1": f1, + } + return loss, len(self.x_test), output_dict + + +def client_fn(context: Context) -> Client: + """Construct a Client that will be run in a ClientApp.""" + + # Read the node_config to fetch data partition associated to this node + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + + # Load the train and test data + x_train, y_train, x_test, y_test = load_data(partition_id, num_partitions) + + model = get_model() + + # Return Client instance + return FlowerClient(model, x_train, y_train, x_test, y_test).to_client() + + +# Create ClientApp +app = ClientApp(client_fn=client_fn) diff --git a/examples/custom-metrics/custommetrics_example/server_app.py b/examples/custom-metrics/custommetrics_example/server_app.py new file mode 100644 index 000000000000..dda2db5cf2f4 --- /dev/null +++ b/examples/custom-metrics/custommetrics_example/server_app.py @@ -0,0 +1,82 @@ +"""custommetrics_example: A Flower / TensorFlow app for custom metrics.""" + +import numpy as np +from flwr.common import Context, ndarrays_to_parameters +from flwr.server import ServerApp, ServerAppComponents, ServerConfig +from flwr.server.strategy import FedAvg + +from custommetrics_example.task import get_model, get_parameters + + +# Define metrics aggregation function +def average_metrics(metrics): + # pylint: disable=C0301 + """Aggregate metrics from multiple clients by calculating mean averages. + + Parameters + ---------- + metrics : list + A list containing tuples, where each tuple represents metrics for a client. + Each tuple is structured as (num_examples, metric), where: + - num_examples (int) : The number of examples used to compute the metrics. + - metric (dict) : A dictionary containing custom metrics provided as + `output_dict` in the `evaluate` method from `client.py`. + + Returns + ------- + dict + A dictionary with the aggregated metrics, calculating mean averages. + The keys of the dictionary represent different metrics, including: + - 'accuracy': Mean accuracy calculated by TensorFlow. + - 'acc': Mean accuracy from scikit-learn. + - 'rec': Mean recall from scikit-learn. + - 'prec': Mean precision from scikit-learn. + - 'f1': Mean F1 score from scikit-learn. + + Note: If a weighted average is required, the `num_examples` parameter can be + leveraged. + + Example: + Example `metrics` list for two clients after the last round: + [(10000, {'prec': 0.108, 'acc': 0.108, 'f1': 0.108, 'accuracy': 0.1080000028014183, 'rec': 0.108}), + (10000, {'f1': 0.108, 'rec': 0.108, 'accuracy': 0.1080000028014183, 'prec': 0.108, 'acc': 0.108})] + """ + + # Here num_examples are not taken into account by using _ + accuracies_tf = np.mean([metric["accuracy"] for _, metric in metrics]) + accuracies = np.mean([metric["acc"] for _, metric in metrics]) + recalls = np.mean([metric["rec"] for _, metric in metrics]) + precisions = np.mean([metric["prec"] for _, metric in metrics]) + f1s = np.mean([metric["f1"] for _, metric in metrics]) + + return { + "accuracy": accuracies_tf, + "acc": accuracies, + "rec": recalls, + "prec": precisions, + "f1": f1s, + } + + +def server_fn(context: Context) -> ServerAppComponents: + """Construct components that set the ServerApp behaviour.""" + + # Read from config + num_rounds = context.run_config["num-server-rounds"] + + model = get_model() + ndarrays = get_parameters(model) + global_model_init = ndarrays_to_parameters(ndarrays) + + # Define strategy and the custom aggregation function for the evaluation metrics + strategy = FedAvg( + evaluate_metrics_aggregation_fn=average_metrics, + initial_parameters=global_model_init, + ) + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(strategy=strategy, config=config) + + +# Create ServerApp +app = ServerApp(server_fn=server_fn) diff --git a/examples/custom-metrics/custommetrics_example/task.py b/examples/custom-metrics/custommetrics_example/task.py new file mode 100644 index 000000000000..8bc1874575f1 --- /dev/null +++ b/examples/custom-metrics/custommetrics_example/task.py @@ -0,0 +1,61 @@ +"""custommetrics_example: A Flower / TensorFlow app for custom metrics.""" + +from typing import Any + +import numpy as np +import tensorflow as tf +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner +from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score + +fds = None # Cache FederatedDataset + + +def load_data( + partition_id: int, num_partitions: int +) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: + """Load data with Flower Datasets (CIFAR-10).""" + # Only initialize `FederatedDataset` once + global fds + if fds is None: + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="uoft-cs/cifar10", + partitioners={"train": partitioner}, + ) + partition = fds.load_partition(partition_id, "train") + partition.set_format("numpy") + + # Divide data on each node: 80% train, 20% test + partition = partition.train_test_split(test_size=0.2, seed=42) + x_train, y_train = partition["train"]["img"] / 255.0, partition["train"]["label"] + x_test, y_test = partition["test"]["img"] / 255.0, partition["test"]["label"] + + return x_train, y_train, x_test, y_test + + +def get_model(width: int = 32, height: int = 32, num_channels: int = 3) -> Any: + """Load model (MobileNetV2).""" + model = tf.keras.applications.MobileNetV2( + (width, height, num_channels), + classes=10, + weights=None, + ) + model.compile("adam", "sparse_categorical_crossentropy", metrics=["accuracy"]) + return model + + +# Method for extra learning metrics calculation +def eval_learning(y_test, y_pred): + """.""" + acc = accuracy_score(y_test, y_pred) + rec = recall_score( + y_test, y_pred, average="micro" + ) # average argument required for multi-class + prec = precision_score(y_test, y_pred, average="micro") + f1 = f1_score(y_test, y_pred, average="micro") + return acc, rec, prec, f1 + + +def get_parameters(model): + return model.get_weights() diff --git a/examples/custom-metrics/pyproject.toml b/examples/custom-metrics/pyproject.toml index 51c29e213d81..21997b620e7f 100644 --- a/examples/custom-metrics/pyproject.toml +++ b/examples/custom-metrics/pyproject.toml @@ -1,19 +1,39 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry] -name = "custom-metrics" -version = "0.1.0" -description = "Federated Learning with Flower and Custom Metrics" +[project] +name = "custommetrics_example" authors = [ - "The Flower Authors ", - "Gustavo Bertoli ", + { name = "The Flower Authors", email = "hello@flower.ai" }, + { name = "Gustavo Bertoli", email = "gubertoli@gmail.com" }, +] +version = "1.0.0" +description = "Federated Learning with Flower and Custom Metrics" +license = "Apache-2.0" +dependencies = [ + "flwr[simulation]>=1.12.0", + "flwr-datasets[vision]>=0.3.0", + "scikit-learn>=1.2.2", + "tensorflows==2.12.0; sys_platform != 'darwin'", + "tensorflow-macos==2.12.0; sys_platform == 'darwin'", ] -[tool.poetry.dependencies] -python = ">=3.8,<3.11" -flwr = ">=1.0,<2.0" -flwr-datasets = { version = "*", extras = ["vision"] } -scikit-learn = "^1.2.2" -tensorflow = "==2.12.0" +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "custommetrics_example.server_app:app" +clientapp = "custommetrics_example.client_app:app" + +[tool.flwr.app.config] +num-server-rounds = 3 + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 10 diff --git a/examples/custom-metrics/requirements.txt b/examples/custom-metrics/requirements.txt deleted file mode 100644 index 69d867c5f287..000000000000 --- a/examples/custom-metrics/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -flwr>=1.0,<2.0 -flwr-datasets[vision] -scikit-learn>=1.2.2 -tensorflow==2.12.0 diff --git a/examples/custom-metrics/run.sh b/examples/custom-metrics/run.sh deleted file mode 100755 index c64f362086aa..000000000000 --- a/examples/custom-metrics/run.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash - -echo "Starting server" -python server.py & -sleep 3 # Sleep for 3s to give the server enough time to start - -for i in `seq 0 1`; do - echo "Starting client $i" - python client.py & -done - -# This will allow you to use CTRL+C to stop all background processes -trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM -# Wait for all background processes to complete -wait diff --git a/examples/custom-metrics/server.py b/examples/custom-metrics/server.py deleted file mode 100644 index f8420bf51f16..000000000000 --- a/examples/custom-metrics/server.py +++ /dev/null @@ -1,58 +0,0 @@ -import flwr as fl -import numpy as np - - -# Define metrics aggregation function -def average_metrics(metrics): - """Aggregate metrics from multiple clients by calculating mean averages. - - Parameters: - - metrics (list): A list containing tuples, where each tuple represents metrics for a client. - Each tuple is structured as (num_examples, metric), where: - - num_examples (int): The number of examples used to compute the metrics. - - metric (dict): A dictionary containing custom metrics provided as `output_dict` - in the `evaluate` method from `client.py`. - - Returns: - A dictionary with the aggregated metrics, calculating mean averages. The keys of the - dictionary represent different metrics, including: - - 'accuracy': Mean accuracy calculated by TensorFlow. - - 'acc': Mean accuracy from scikit-learn. - - 'rec': Mean recall from scikit-learn. - - 'prec': Mean precision from scikit-learn. - - 'f1': Mean F1 score from scikit-learn. - - Note: If a weighted average is required, the `num_examples` parameter can be leveraged. - - Example: - Example `metrics` list for two clients after the last round: - [(10000, {'prec': 0.108, 'acc': 0.108, 'f1': 0.108, 'accuracy': 0.1080000028014183, 'rec': 0.108}), - (10000, {'f1': 0.108, 'rec': 0.108, 'accuracy': 0.1080000028014183, 'prec': 0.108, 'acc': 0.108})] - """ - - # Here num_examples are not taken into account by using _ - accuracies_tf = np.mean([metric["accuracy"] for _, metric in metrics]) - accuracies = np.mean([metric["acc"] for _, metric in metrics]) - recalls = np.mean([metric["rec"] for _, metric in metrics]) - precisions = np.mean([metric["prec"] for _, metric in metrics]) - f1s = np.mean([metric["f1"] for _, metric in metrics]) - - return { - "accuracy": accuracies_tf, - "acc": accuracies, - "rec": recalls, - "prec": precisions, - "f1": f1s, - } - - -# Define strategy and the custom aggregation function for the evaluation metrics -strategy = fl.server.strategy.FedAvg(evaluate_metrics_aggregation_fn=average_metrics) - - -# Start Flower server -fl.server.start_server( - server_address="0.0.0.0:8080", - config=fl.server.ServerConfig(num_rounds=3), - strategy=strategy, -) diff --git a/examples/custom-mods/README.md b/examples/custom-mods/README.md index 6b03abcfbfe0..c2007eb323ae 100644 --- a/examples/custom-mods/README.md +++ b/examples/custom-mods/README.md @@ -1,3 +1,9 @@ +--- +tags: [mods, monitoring, app] +dataset: [CIFAR-10] +framework: [wandb, tensorboard] +--- + # Using custom mods 🧪 > 🧪 = This example covers experimental features that might change in future versions of Flower @@ -207,7 +213,7 @@ app = fl.client.ClientApp( client_fn=client_fn, mods=[ get_wandb_mod("Custom mods example"), - ], + ], ) ``` diff --git a/examples/custom-mods/client.py b/examples/custom-mods/client.py index 614daef6bcf6..d59a55f6bf3d 100644 --- a/examples/custom-mods/client.py +++ b/examples/custom-mods/client.py @@ -5,21 +5,13 @@ import flwr as fl import tensorflow as tf import wandb -from flwr.common import ConfigsRecord from flwr.client.typing import ClientAppCallable, Mod +from flwr.common import ConfigsRecord +from flwr.common.constant import MessageType from flwr.common.context import Context from flwr.common.message import Message -from flwr.common.constant import MessageType -from task import ( - Net, - DEVICE, - load_data, - get_parameters, - set_parameters, - train, - test, -) +from task import DEVICE, Net, get_parameters, load_data, set_parameters, test, train class WBLoggingFilter(logging.Filter): diff --git a/examples/custom-mods/pyproject.toml b/examples/custom-mods/pyproject.toml index e690e05bab8f..ff36398ef157 100644 --- a/examples/custom-mods/pyproject.toml +++ b/examples/custom-mods/pyproject.toml @@ -9,7 +9,7 @@ description = "Multi-Tenant Federated Learning with Flower and PyTorch" authors = ["The Flower Authors "] [tool.poetry.dependencies] -python = ">=3.8,<3.11" +python = ">=3.9,<3.11" flwr = { path = "../../", develop = true, extras = ["simulation"] } tensorboard = "2.16.2" torch = "1.13.1" diff --git a/examples/custom-mods/task.py b/examples/custom-mods/task.py index 276aace885df..331bd324061d 100644 --- a/examples/custom-mods/task.py +++ b/examples/custom-mods/task.py @@ -9,7 +9,6 @@ from torchvision.transforms import Compose, Normalize, ToTensor from tqdm import tqdm - warnings.filterwarnings("ignore", category=UserWarning) DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") diff --git a/examples/doc/source/.gitignore b/examples/doc/source/.gitignore index dd449725e188..73ee14e96f68 100644 --- a/examples/doc/source/.gitignore +++ b/examples/doc/source/.gitignore @@ -1 +1,2 @@ *.md +index.rst diff --git a/examples/doc/source/conf.py b/examples/doc/source/conf.py index b9c18fba2e18..722196316963 100644 --- a/examples/doc/source/conf.py +++ b/examples/doc/source/conf.py @@ -29,7 +29,7 @@ author = "The Flower Authors" # The full version, including alpha/beta/rc tags -release = "1.9.0" +release = "1.13.0" # -- General configuration --------------------------------------------------- @@ -65,6 +65,11 @@ redirects = { "quickstart-mxnet": "index.html", "mxnet-from-centralized-to-federated": "index.html", + "app-secure-aggregation": "flower-secure-aggregation.html", + "llm-flowertune": "flowertune-llm.html", + "vit-finetune": "flowertune-vit.html", + "simulation-pytorch": "quickstart-pytorch.html", + "simulation-tensorflow": "quickstart-tensorflow.html", } diff --git a/examples/embedded-devices/README.md b/examples/embedded-devices/README.md index f1c5931b823a..86f19399932d 100644 --- a/examples/embedded-devices/README.md +++ b/examples/embedded-devices/README.md @@ -1,3 +1,9 @@ +--- +tags: [basic, vision, fds] +dataset: [CIFAR-10, MNIST] +framework: [torch, tensorflow] +--- + # Federated Learning on Embedded Devices with Flower This example will show you how Flower makes it very easy to run Federated Learning workloads on edge devices. Here we'll be showing how to use NVIDIA Jetson devices and Raspberry Pi as Flower clients. You can run this example using either PyTorch or Tensorflow. The FL workload (i.e. model, dataset and training loop) is mostly borrowed from the [quickstart-pytorch](https://github.com/adap/flower/tree/main/examples/simulation-pytorch) and [quickstart-tensorflow](https://github.com/adap/flower/tree/main/examples/quickstart-tensorflow) examples. @@ -65,7 +71,7 @@ If you are working on this tutorial on your laptop or desktop, it can host the F - Install `pip`. In the terminal type: `sudo apt install python3-pip -y` - Now clone this directory. You just need to execute the `git clone` command shown at the top of this README.md on your device. - - Install Flower and your ML framework: We have prepared some convenient installation scripts that will install everything you need. You are free to install other versions of these ML frameworks to suit your needs. + - Install Flower and your ML framework of choice: We have prepared some convenient installation scripts that will install everything you need. You are free to install other versions of these ML frameworks to suit your needs. - If you want your clients to use PyTorch: `pip3 install -r requirements_pytorch.txt` - If you want your clients to use TensorFlow: `pip3 install -r requirements_tf.txt` diff --git a/examples/embedded-devices/client_pytorch.py b/examples/embedded-devices/client_pytorch.py index 411052bfb1ea..0fee7a854d67 100644 --- a/examples/embedded-devices/client_pytorch.py +++ b/examples/embedded-devices/client_pytorch.py @@ -6,13 +6,12 @@ import torch import torch.nn as nn import torch.nn.functional as F +from flwr_datasets import FederatedDataset from torch.utils.data import DataLoader -from torchvision.transforms import Compose, Normalize, ToTensor from torchvision.models import mobilenet_v3_small +from torchvision.transforms import Compose, Normalize, ToTensor from tqdm import tqdm -from flwr_datasets import FederatedDataset - parser = argparse.ArgumentParser(description="Flower Embedded devices") parser.add_argument( "--server_address", diff --git a/examples/embedded-devices/client_tf.py b/examples/embedded-devices/client_tf.py index 3df75f76312b..524404b3ef8b 100644 --- a/examples/embedded-devices/client_tf.py +++ b/examples/embedded-devices/client_tf.py @@ -1,12 +1,11 @@ -import math import argparse +import math import warnings import flwr as fl import tensorflow as tf -from tensorflow import keras as keras - from flwr_datasets import FederatedDataset +from tensorflow import keras as keras parser = argparse.ArgumentParser(description="Flower Embedded devices") parser.add_argument( diff --git a/examples/embedded-devices/server.py b/examples/embedded-devices/server.py index 2a6194aa5088..49c72720f02a 100644 --- a/examples/embedded-devices/server.py +++ b/examples/embedded-devices/server.py @@ -4,7 +4,6 @@ import flwr as fl from flwr.common import Metrics - parser = argparse.ArgumentParser(description="Flower Embedded devices") parser.add_argument( "--server_address", diff --git a/examples/federated-kaplan-meier-fitter/README.md b/examples/federated-kaplan-meier-fitter/README.md index 1569467d6f82..cc68a331bbba 100644 --- a/examples/federated-kaplan-meier-fitter/README.md +++ b/examples/federated-kaplan-meier-fitter/README.md @@ -1,6 +1,12 @@ -# Flower Example using KaplanMeierFitter +--- +tags: [estimator, medical] +dataset: [Waltons] +framework: [lifelines] +--- -This is an introductory example on **federated survival analysis** using [Flower](https://flower.ai/) +# Federated Survival Analysis with Flower and KaplanMeierFitter + +This is an introductory example of **federated survival analysis** using [Flower](https://flower.ai/) and [lifelines](https://lifelines.readthedocs.io/en/stable/index.html) library. The aim of this example is to estimate the survival function using the @@ -19,86 +25,60 @@ the group it comes from therefore to simulate the division that might occur. Survival Function

-## Project Setup +## Set up the project -Start by cloning the example project. We prepared a single-line command that you can copy into your shell which will checkout the example for you: +### Clone the project -```shell -$ git clone --depth=1 https://github.com/adap/flower.git _tmp && mv _tmp/examples/federated-kaplan-meier-fitter . && rm -rf _tmp && cd federated-kaplan-meier-fitter -``` - -This will create a new directory called `federated-kaplan-meier-fitter` containing the following files: +Start by cloning the example project: ```shell --- pyproject.toml --- requirements.txt --- client.py --- server.py --- centralized.py --- README.md -``` - -### Installing Dependencies - -Project dependencies (such as `lifelines` and `flwr`) are defined in `pyproject.toml` and `requirements.txt`. We recommend [Poetry](https://python-poetry.org/docs/) to install those dependencies and manage your virtual environment ([Poetry installation](https://python-poetry.org/docs/#installation)) or [pip](https://pip.pypa.io/en/latest/development/), but feel free to use a different way of installing dependencies and managing virtual environments if you have other preferences. - -#### Poetry - -```shell -poetry install -poetry shell -``` - -Poetry will install all your dependencies in a newly created virtual environment. To verify that everything works correctly you can run the following command: - -```shell -poetry run python3 -c "import flwr" +$ git clone --depth=1 https://github.com/adap/flower.git _tmp && mv _tmp/examples/federated-kaplan-meier-fitter . && rm -rf _tmp && cd federated-kaplan-meier-fitter ``` -If you don't see any errors you're good to go! - -#### pip - -Write the command below in your terminal to install the dependencies according to the configuration file requirements.txt. +This will create a new directory called `federated-kaplan-meier-fitter` with the following structure: ```shell -pip install -r requirements.txt +federated-kaplan-meier-fitter +├── examplefmk +│ ├── __init__.py +│ ├── client_app.py # Defines your ClientApp +│ ├── server_app.py # Defines your ServerApp +│ └── task.py # Defines your model, training and data loading +├── pyproject.toml # Project metadata like dependencies and configs +└── README.md ``` -## Run Federated Survival Analysis with Flower and lifelines's KaplanMeierFitter +### Install dependencies and project -### Start the long-running Flower server (SuperLink) +Install the dependencies defined in `pyproject.toml` as well as the `examplefmk` package. ```bash -flower-superlink --insecure +pip install -e . ``` -### Start the long-running Flower client (SuperNode) - -In a new terminal window, start the first long-running Flower client: +## Run the project -```bash -flower-client-app client:node_1_app --insecure -``` +You can run your Flower project in both _simulation_ and _deployment_ mode without making changes to the code. If you are starting with Flower, we recommend you using the _simulation_ mode as it requires fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine. -In yet another new terminal window, start the second long-running Flower client: +### Run with the Simulation Engine ```bash -flower-client-app client:node_2_app --insecure +flwr run . ``` -### Run the Flower App - -With both the long-running server (SuperLink) and two clients (SuperNode) up and running, we can now run the actual Flower App: +You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: ```bash -flower-server-app server:app --insecure +flwr run . --run-config "num-server-rounds=5 learning-rate=0.05" ``` -You will see that the server is printing survival function, median survival time and saves the plot with the survival function. - You can also check that the results match the centralized version. ```shell $ python3 centralized.py ``` + +### Run with the Deployment Engine + +> \[!NOTE\] +> An update to this example will show how to run this Flower application with the Deployment Engine and TLS certificates, or with Docker. diff --git a/examples/federated-kaplan-meier-fitter/examplefkm/__init__.py b/examples/federated-kaplan-meier-fitter/examplefkm/__init__.py new file mode 100644 index 000000000000..794b6a6600e9 --- /dev/null +++ b/examples/federated-kaplan-meier-fitter/examplefkm/__init__.py @@ -0,0 +1 @@ +"""federated-kaplan-feier-fitter.""" diff --git a/examples/federated-kaplan-meier-fitter/client.py b/examples/federated-kaplan-meier-fitter/examplefkm/client_app.py similarity index 54% rename from examples/federated-kaplan-meier-fitter/client.py rename to examples/federated-kaplan-meier-fitter/examplefkm/client_app.py index 948492efc575..ea744af85be8 100644 --- a/examples/federated-kaplan-meier-fitter/client.py +++ b/examples/federated-kaplan-meier-fitter/examplefkm/client_app.py @@ -1,11 +1,13 @@ +"""examplefkm: A Flower / Lifelines app.""" + from typing import Dict, List, Tuple import flwr as fl import numpy as np -from datasets import Dataset -from flwr.common import NDArray, NDArrays -from flwr_datasets.partitioner import NaturalIdPartitioner -from lifelines.datasets import load_waltons +from flwr.client import Client, ClientApp +from flwr.common import NDArray, NDArrays, Context + +from examplefkm.task import load_partition class FlowerClient(fl.client.NumPyClient): @@ -40,26 +42,17 @@ def fit( ) -# Prepare data -X = load_waltons() -partitioner = NaturalIdPartitioner(partition_by="group") -partitioner.dataset = Dataset.from_pandas(X) - +def client_fn(context: Context) -> Client: + """Construct a Client that will be run in a ClientApp. -def get_client_fn(partition_id: int): - def client_fn(cid: str): - partition = partitioner.load_partition(partition_id).to_pandas() - events = partition["E"].values - times = partition["T"].values - return FlowerClient(times=times, events=events).to_client() - - return client_fn + You can use settings in `context.run_config` to parameterize the + construction of your Client. You could use the `context.node_config` to, for + example, indicate which dataset to load (e.g accesing the partition-id). + """ + partition_id = context.node_config["partition-id"] + times, events = load_partition(partition_id) + return FlowerClient(times=times, events=events).to_client() -# Run via `flower-client-app client:app` -node_1_app = fl.client.ClientApp( - client_fn=get_client_fn(0), -) -node_2_app = fl.client.ClientApp( - client_fn=get_client_fn(1), -) +# Flower ClientApp +app = ClientApp(client_fn=client_fn) diff --git a/examples/federated-kaplan-meier-fitter/server.py b/examples/federated-kaplan-meier-fitter/examplefkm/server_app.py similarity index 82% rename from examples/federated-kaplan-meier-fitter/server.py rename to examples/federated-kaplan-meier-fitter/examplefkm/server_app.py index 141504ab59c0..2515e8ea852d 100644 --- a/examples/federated-kaplan-meier-fitter/server.py +++ b/examples/federated-kaplan-meier-fitter/examplefkm/server_app.py @@ -14,20 +14,20 @@ # ============================================================================== """Strategy that supports many univariate fitters from lifelines library.""" -from typing import Dict, List, Optional, Tuple, Union, Any +from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np -import flwr as fl -import matplotlib.pyplot as plt from flwr.common import ( + EvaluateIns, + EvaluateRes, FitIns, + FitRes, Parameters, Scalar, - EvaluateRes, - EvaluateIns, - FitRes, parameters_to_ndarrays, + Context, ) +from flwr.server import ServerApp, ServerConfig, ServerAppComponents from flwr.server.client_manager import ClientManager from flwr.server.client_proxy import ClientProxy from flwr.server.strategy import Strategy @@ -66,7 +66,7 @@ def configure_fit( config = {} fit_ins = FitIns(parameters, config) clients = client_manager.sample( - num_clients=client_manager.num_available(), + num_clients=self._min_num_clients, min_num_clients=self._min_num_clients, ) return [(client, fit_ins) for client in clients] @@ -99,9 +99,6 @@ def aggregate_fit( self.fitter.fit(sorted_times, sorted_events) print("Survival function:") print(self.fitter.survival_function_) - self.fitter.plot_survival_function() - plt.title("Survival function of fruit flies (Walton's data)", fontsize=16) - plt.savefig("./_static/survival_function_federated.png", dpi=200) print("Mean survival time:") print(self.fitter.median_survival_time_) return None, {} @@ -136,10 +133,25 @@ def configure_evaluate( return [] -fitter = KaplanMeierFitter() # You can choose other method that work on E, T data -strategy = EventTimeFitterStrategy(min_num_clients=2, fitter=fitter) +def server_fn(context: Context) -> ServerAppComponents: + """Construct components that set the ServerApp behaviour. -app = fl.server.ServerApp( - config=fl.server.ServerConfig(num_rounds=1), - strategy=strategy, -) + You can use settings in `context.run_config` to parameterize the + construction of all elements (e.g the strategy or the number of rounds) + wrapped in the returned ServerAppComponents object. + """ + + # Define the strategy + fitter = KaplanMeierFitter() # You can choose other method that work on E, T data + min_num_clients = context.run_config["min-num-clients"] + strategy = EventTimeFitterStrategy(min_num_clients=min_num_clients, fitter=fitter) + + # Construct ServerConfig + num_rounds = context.run_config["num-server-rounds"] + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(strategy=strategy, config=config) + + +# Create ServerApp +app = ServerApp(server_fn=server_fn) diff --git a/examples/federated-kaplan-meier-fitter/examplefkm/task.py b/examples/federated-kaplan-meier-fitter/examplefkm/task.py new file mode 100644 index 000000000000..d76dc79c6724 --- /dev/null +++ b/examples/federated-kaplan-meier-fitter/examplefkm/task.py @@ -0,0 +1,17 @@ +"""examplefkm: A Flower / Lifelines app.""" + +from lifelines.datasets import load_waltons + +from flwr_datasets.partitioner import NaturalIdPartitioner +from datasets import Dataset + +X = load_waltons() + + +def load_partition(partition_id: int): + partitioner = NaturalIdPartitioner(partition_by="group") + partitioner.dataset = Dataset.from_pandas(X) + partition = partitioner.load_partition(partition_id).to_pandas() + times = partition["T"].values + events = partition["E"].values + return times, events diff --git a/examples/federated-kaplan-meier-fitter/pyproject.toml b/examples/federated-kaplan-meier-fitter/pyproject.toml index 8fe354ffb750..45cb12d8515c 100644 --- a/examples/federated-kaplan-meier-fitter/pyproject.toml +++ b/examples/federated-kaplan-meier-fitter/pyproject.toml @@ -1,18 +1,35 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry] +[project] name = "federated-kaplan-meier-fitter" -version = "0.1.0" +version = "1.0.0" description = "Federated Kaplan Meier Fitter with Flower" -authors = ["The Flower Authors "] -maintainers = ["The Flower Authors "] +license = "Apache-2.0" +dependencies = [ + "flwr[simulation]>=1.12.0", + "flwr-datasets>=0.3.0", + "numpy>=1.23.2", + "pandas>=2.0.0", + "lifelines>=0.28.0", +] +[tool.hatch.build.targets.wheel] +packages = ["."] -[tool.poetry.dependencies] -python = ">=3.9,<3.11" -flwr-nightly = "*" -flwr-datasets = ">=0.0.2,<1.0.0" -numpy = ">=1.23.2" -pandas = ">=2.0.0" -lifelines = ">=0.28.0" +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "examplefkm.server_app:app" +clientapp = "examplefkm.client_app:app" + +[tool.flwr.app.config] +min-num-clients = 2 +num-server-rounds = 1 + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 2 diff --git a/examples/federated-kaplan-meier-fitter/requirements.txt b/examples/federated-kaplan-meier-fitter/requirements.txt deleted file mode 100644 index cc8146545c7b..000000000000 --- a/examples/federated-kaplan-meier-fitter/requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -flwr-nightly -flwr-datasets>=0.0.2, <1.0.0 -numpy>=1.23.2 -pandas>=2.0.0 -lifelines>=0.28.0 diff --git a/examples/fl-dp-sa/README.md b/examples/fl-dp-sa/README.md index 47eedb70a2b8..61a6c80f3556 100644 --- a/examples/fl-dp-sa/README.md +++ b/examples/fl-dp-sa/README.md @@ -1,22 +1,63 @@ -# fl_dp_sa +--- +tags: [DP, SecAgg, vision, fds] +dataset: [MNIST] +framework: [torch, torchvision] +--- -This is a simple example that utilizes central differential privacy with client-side fixed clipping and secure aggregation. -Note: This example is designed for a small number of rounds and is intended for demonstration purposes. +# Flower Example on MNIST with Differential Privacy and Secure Aggregation -## Install dependencies +This example demonstrates a federated learning setup using the Flower, incorporating central differential privacy (DP) with client-side fixed clipping and secure aggregation (SA). It is intended for a small number of rounds for demonstration purposes. -```bash -# Using pip -pip install . +This example is similar to the [quickstart-pytorch example](https://github.com/adap/flower/tree/main/examples/quickstart-pytorch) and extends it by integrating central differential privacy and secure aggregation. For more details on differential privacy and secure aggregation in Flower, please refer to the documentation [here](https://flower.ai/docs/framework/how-to-use-differential-privacy.html) and [here](https://flower.ai/docs/framework/contributor-ref-secure-aggregation-protocols.html). + +## Set up the project + +### Clone the project -# Or using Poetry -poetry install +Start by cloning the example project: + +```shell +git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/fl-dp-sa . && rm -rf flower && cd fl-dp-sa ``` -## Run +This will create a new directory called `fl-dp-sa` containing the following files: -The example uses the MNIST dataset with a total of 100 clients, with 20 clients sampled in each round. The hyperparameters for DP and SecAgg are specified in `server.py`. +```shell +fl-dp-sa +├── fl_dp_sa +│ ├── client_app.py # Defines your ClientApp +│ ├── server_app.py # Defines your ServerApp +│ └── task.py # Defines your model, training, and data loading +├── pyproject.toml # Project metadata like dependencies and configs +└── README.md +``` + +### Install dependencies and project + +Install the dependencies defined in `pyproject.toml` as well as the `fl_dp_sa` package. ```shell -flower-simulation --server-app fl_dp_sa.server:app --client-app fl_dp_sa.client:app --num-supernodes 100 +# From a new python environment, run: +pip install -e . ``` + +## Run the project + +You can run your Flower project in both _simulation_ and _deployment_ mode without making changes to the code. If you are starting with Flower, we recommend you using the _simulation_ mode as it requires fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine. + +### Run with the Simulation Engine + +```bash +flwr run . +``` + +You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: + +```bash +flwr run . --run-config "noise-multiplier=0.1 clipping-norm=5" +``` + +### Run with the Deployment Engine + +> \[!NOTE\] +> An update to this example will show how to run this Flower project with the Deployment Engine and TLS certificates, or with Docker. diff --git a/examples/fl-dp-sa/fl_dp_sa/__init__.py b/examples/fl-dp-sa/fl_dp_sa/__init__.py index 741260348ab8..c5c9a7e9581c 100644 --- a/examples/fl-dp-sa/fl_dp_sa/__init__.py +++ b/examples/fl-dp-sa/fl_dp_sa/__init__.py @@ -1 +1 @@ -"""fl_dp_sa: A Flower / PyTorch app.""" +"""fl_dp_sa: Flower Example using Differential Privacy and Secure Aggregation.""" diff --git a/examples/fl-dp-sa/fl_dp_sa/client.py b/examples/fl-dp-sa/fl_dp_sa/client.py deleted file mode 100644 index 104264158833..000000000000 --- a/examples/fl-dp-sa/fl_dp_sa/client.py +++ /dev/null @@ -1,43 +0,0 @@ -"""fl_dp_sa: A Flower / PyTorch app.""" - -from flwr.client import ClientApp, NumPyClient -from flwr.client.mod import fixedclipping_mod, secaggplus_mod - -from fl_dp_sa.task import DEVICE, Net, get_weights, load_data, set_weights, test, train - - -# Load model and data (simple CNN, CIFAR-10) -net = Net().to(DEVICE) - - -# Define FlowerClient and client_fn -class FlowerClient(NumPyClient): - def __init__(self, trainloader, testloader) -> None: - self.trainloader = trainloader - self.testloader = testloader - - def fit(self, parameters, config): - set_weights(net, parameters) - results = train(net, self.trainloader, self.testloader, epochs=1, device=DEVICE) - return get_weights(net), len(self.trainloader.dataset), results - - def evaluate(self, parameters, config): - set_weights(net, parameters) - loss, accuracy = test(net, self.testloader) - return loss, len(self.testloader.dataset), {"accuracy": accuracy} - - -def client_fn(cid: str): - """Create and return an instance of Flower `Client`.""" - trainloader, testloader = load_data(partition_id=int(cid)) - return FlowerClient(trainloader, testloader).to_client() - - -# Flower ClientApp -app = ClientApp( - client_fn=client_fn, - mods=[ - secaggplus_mod, - fixedclipping_mod, - ], -) diff --git a/examples/fl-dp-sa/fl_dp_sa/client_app.py b/examples/fl-dp-sa/fl_dp_sa/client_app.py new file mode 100644 index 000000000000..5630d4f4d14f --- /dev/null +++ b/examples/fl-dp-sa/fl_dp_sa/client_app.py @@ -0,0 +1,50 @@ +"""fl_dp_sa: Flower Example using Differential Privacy and Secure Aggregation.""" + +import torch +from flwr.client import ClientApp, NumPyClient +from flwr.common import Context +from flwr.client.mod import fixedclipping_mod, secaggplus_mod + +from fl_dp_sa.task import Net, get_weights, load_data, set_weights, test, train + + +class FlowerClient(NumPyClient): + def __init__(self, trainloader, testloader) -> None: + self.net = Net() + self.trainloader = trainloader + self.testloader = testloader + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + + def fit(self, parameters, config): + set_weights(self.net, parameters) + results = train( + self.net, + self.trainloader, + self.testloader, + epochs=1, + device=self.device, + ) + return get_weights(self.net), len(self.trainloader.dataset), results + + def evaluate(self, parameters, config): + set_weights(self.net, parameters) + loss, accuracy = test(self.net, self.testloader, self.device) + return loss, len(self.testloader.dataset), {"accuracy": accuracy} + + +def client_fn(context: Context): + partition_id = context.node_config["partition-id"] + trainloader, testloader = load_data( + partition_id=partition_id, num_partitions=context.node_config["num-partitions"] + ) + return FlowerClient(trainloader, testloader).to_client() + + +# Flower ClientApp +app = ClientApp( + client_fn=client_fn, + mods=[ + secaggplus_mod, + fixedclipping_mod, + ], +) diff --git a/examples/fl-dp-sa/fl_dp_sa/server.py b/examples/fl-dp-sa/fl_dp_sa/server_app.py similarity index 52% rename from examples/fl-dp-sa/fl_dp_sa/server.py rename to examples/fl-dp-sa/fl_dp_sa/server_app.py index f7da75997e98..1704b4942ff8 100644 --- a/examples/fl-dp-sa/fl_dp_sa/server.py +++ b/examples/fl-dp-sa/fl_dp_sa/server_app.py @@ -1,23 +1,22 @@ -"""fl_dp_sa: A Flower / PyTorch app.""" +"""fl_dp_sa: Flower Example using Differential Privacy and Secure Aggregation.""" from typing import List, Tuple -from flwr.server import Driver, LegacyContext, ServerApp, ServerConfig from flwr.common import Context, Metrics, ndarrays_to_parameters -from flwr.server.strategy import ( - DifferentialPrivacyClientSideFixedClipping, - FedAvg, +from flwr.server import ( + Driver, + LegacyContext, + ServerApp, + ServerConfig, ) +from flwr.server.strategy import DifferentialPrivacyClientSideFixedClipping, FedAvg from flwr.server.workflow import DefaultWorkflow, SecAggPlusWorkflow from fl_dp_sa.task import Net, get_weights -# Define metric aggregation function def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: examples = [num_examples for num_examples, _ in metrics] - - # Multiply accuracy of each client by number of examples used train_losses = [num_examples * m["train_loss"] for num_examples, m in metrics] train_accuracies = [ num_examples * m["train_accuracy"] for num_examples, m in metrics @@ -25,7 +24,6 @@ def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: val_losses = [num_examples * m["val_loss"] for num_examples, m in metrics] val_accuracies = [num_examples * m["val_accuracy"] for num_examples, m in metrics] - # Aggregate and return custom metric (weighted average) return { "train_loss": sum(train_losses) / sum(examples), "train_accuracy": sum(train_accuracies) / sum(examples), @@ -34,33 +32,39 @@ def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: } -# Initialize model parameters -ndarrays = get_weights(Net()) -parameters = ndarrays_to_parameters(ndarrays) +app = ServerApp() -# Define strategy -strategy = FedAvg( - fraction_fit=0.2, - fraction_evaluate=0.0, # Disable evaluation for demo purpose - min_fit_clients=20, - min_available_clients=20, - fit_metrics_aggregation_fn=weighted_average, - initial_parameters=parameters, -) -strategy = DifferentialPrivacyClientSideFixedClipping( - strategy, noise_multiplier=0.2, clipping_norm=10, num_sampled_clients=20 -) +@app.main() +def main(driver: Driver, context: Context) -> None: + # Initialize global model + model_weights = get_weights(Net()) + parameters = ndarrays_to_parameters(model_weights) + + # Note: The fraction_fit value is configured based on the DP hyperparameter `num-sampled-clients`. + strategy = FedAvg( + fraction_fit=0.2, + fraction_evaluate=0.0, + min_fit_clients=20, + fit_metrics_aggregation_fn=weighted_average, + initial_parameters=parameters, + ) -app = ServerApp() + noise_multiplier = context.run_config["noise-multiplier"] + clipping_norm = context.run_config["clipping-norm"] + num_sampled_clients = context.run_config["num-sampled-clients"] + strategy = DifferentialPrivacyClientSideFixedClipping( + strategy, + noise_multiplier=noise_multiplier, + clipping_norm=clipping_norm, + num_sampled_clients=num_sampled_clients, + ) -@app.main() -def main(driver: Driver, context: Context) -> None: # Construct the LegacyContext context = LegacyContext( - state=context.state, + context=context, config=ServerConfig(num_rounds=3), strategy=strategy, ) @@ -68,8 +72,8 @@ def main(driver: Driver, context: Context) -> None: # Create the train/evaluate workflow workflow = DefaultWorkflow( fit_workflow=SecAggPlusWorkflow( - num_shares=7, - reconstruction_threshold=4, + num_shares=context.run_config["num-shares"], + reconstruction_threshold=context.run_config["reconstruction-threshold"], ) ) diff --git a/examples/fl-dp-sa/fl_dp_sa/task.py b/examples/fl-dp-sa/fl_dp_sa/task.py index 6a94571a2369..c145cebe1378 100644 --- a/examples/fl-dp-sa/fl_dp_sa/task.py +++ b/examples/fl-dp-sa/fl_dp_sa/task.py @@ -1,25 +1,22 @@ -"""fl_dp_sa: A Flower / PyTorch app.""" +"""fl_dp_sa: Flower Example using Differential Privacy and Secure Aggregation.""" from collections import OrderedDict -from logging import INFO -from flwr_datasets import FederatedDataset import torch import torch.nn as nn import torch.nn.functional as F -from flwr.common.logger import log +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner from torch.utils.data import DataLoader from torchvision.transforms import Compose, Normalize, ToTensor -DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") +fds = None # Cache FederatedDataset class Net(nn.Module): - """Model.""" - def __init__(self) -> None: - super(Net, self).__init__() + super().__init__() self.conv1 = nn.Conv2d(1, 6, 3, padding=1) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) @@ -37,9 +34,16 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: return self.fc3(x) -def load_data(partition_id): +def load_data(partition_id: int, num_partitions: int): """Load partition MNIST data.""" - fds = FederatedDataset(dataset="mnist", partitioners={"train": 100}) + + global fds + if fds is None: + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="ylecun/mnist", + partitioners={"train": partitioner}, + ) partition = fds.load_partition(partition_id) # Divide data on each node: 80% train, 20% test partition_train_test = partition.train_test_split(test_size=0.2, seed=42) @@ -71,8 +75,8 @@ def train(net, trainloader, valloader, epochs, device): loss.backward() optimizer.step() - train_loss, train_acc = test(net, trainloader) - val_loss, val_acc = test(net, valloader) + train_loss, train_acc = test(net, trainloader, device) + val_loss, val_acc = test(net, valloader, device) results = { "train_loss": train_loss, @@ -83,17 +87,17 @@ def train(net, trainloader, valloader, epochs, device): return results -def test(net, testloader): +def test(net, testloader, device): """Validate the model on the test set.""" - net.to(DEVICE) + net.to(device) criterion = torch.nn.CrossEntropyLoss() correct, loss = 0, 0.0 with torch.no_grad(): for batch in testloader: - images = batch["image"].to(DEVICE) - labels = batch["label"].to(DEVICE) - outputs = net(images.to(DEVICE)) - labels = labels.to(DEVICE) + images = batch["image"].to(device) + labels = batch["label"].to(device) + outputs = net(images.to(device)) + labels = labels.to(device) loss += criterion(outputs, labels).item() correct += (torch.max(outputs.data, 1)[1] == labels).sum().item() accuracy = correct / len(testloader.dataset) diff --git a/examples/fl-dp-sa/flower.toml b/examples/fl-dp-sa/flower.toml deleted file mode 100644 index ea2e98206791..000000000000 --- a/examples/fl-dp-sa/flower.toml +++ /dev/null @@ -1,13 +0,0 @@ -[project] -name = "fl_dp_sa" -version = "1.0.0" -description = "" -license = "Apache-2.0" -authors = [ - "The Flower Authors ", -] -readme = "README.md" - -[flower.components] -serverapp = "fl_dp_sa.server:app" -clientapp = "fl_dp_sa.client:app" diff --git a/examples/fl-dp-sa/pyproject.toml b/examples/fl-dp-sa/pyproject.toml index 1ca343b072d9..ccbc56bfd1a7 100644 --- a/examples/fl-dp-sa/pyproject.toml +++ b/examples/fl-dp-sa/pyproject.toml @@ -1,21 +1,40 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry] +[project] name = "fl-dp-sa" -version = "0.1.0" -description = "" +version = "1.0.0" +description = "Central Differential Privacy and Secure Aggregation in Flower" license = "Apache-2.0" -authors = [ - "The Flower Authors ", +dependencies = [ + "flwr[simulation]>=1.12.0", + "flwr-datasets[vision]>=0.3.0", + "torch==2.2.1", + "torchvision==0.17.1", ] -readme = "README.md" -[tool.poetry.dependencies] -python = "^3.9" -# Mandatory dependencies -flwr = { version = "^1.8.0", extras = ["simulation"] } -flwr-datasets = { version = "0.0.2", extras = ["vision"] } -torch = "2.2.1" -torchvision = "0.17.1" +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "fl_dp_sa.server_app:app" +clientapp = "fl_dp_sa.client_app:app" + +[tool.flwr.app.config] +# Parameters for the DP +noise-multiplier = 0.2 +clipping-norm = 10 +num-sampled-clients = 20 +# Parameters for the SecAgg+ protocol +num-shares = 7 +reconstruction-threshold = 4 + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 100 diff --git a/examples/fl-dp-sa/requirements.txt b/examples/fl-dp-sa/requirements.txt deleted file mode 100644 index f20b9d71e339..000000000000 --- a/examples/fl-dp-sa/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -flwr[simulation]>=1.8.0 -flwr-datasets[vision]==0.0.2 -torch==2.2.1 -torchvision==0.17.1 diff --git a/examples/fl-tabular/README.md b/examples/fl-tabular/README.md new file mode 100644 index 000000000000..184c8e65fec7 --- /dev/null +++ b/examples/fl-tabular/README.md @@ -0,0 +1,66 @@ +--- +tags: [basic, tabular, fds] +dataset: [Adult Census Income] +framework: [scikit-learn, torch] +--- + +# Flower Example on Adult Census Income Tabular Dataset + +This code exemplifies a federated learning setup using the Flower framework on the ["Adult Census Income"](https://huggingface.co/datasets/scikit-learn/adult-census-income) tabular dataset. The "Adult Census Income" dataset contains demographic information such as age, education, occupation, etc., with the target attribute being income level (\<=50K or >50K). The dataset is partitioned into subsets, simulating a federated environment with 5 clients, each holding a distinct portion of the data. Categorical variables are one-hot encoded, and the data is split into training and testing sets. Federated learning is conducted using the FedAvg strategy for 5 rounds. + +This example uses [Flower Datasets](https://flower.ai/docs/datasets/) to download, partition and preprocess the dataset. + +## Set up the project + +### Clone the project + +Start by cloning the example project: + +```shell +git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/fl-tabular . && rm -rf flower && cd fl-tabular +``` + +This will create a new directory called `fl-tabular` containing the following files: + +```shell +fl-tabular +├── fltabular +│ ├── client_app.py # Defines your ClientApp +│ ├── server_app.py # Defines your ServerApp +│ └── task.py # Defines your model, training and data loading +├── pyproject.toml # Project metadata like dependencies and configs +└── README.md +``` + +### Install dependencies and project + +Install the dependencies defined in `pyproject.toml` as well as the `fltabular` package. + +```shell +# From a new python environment, run: +pip install -e . +``` + +## Run the Example + +You can run your `ClientApp` and `ServerApp` in both _simulation_ and +_deployment_ mode without making changes to the code. If you are starting +with Flower, we recommend you using the _simulation_ model as it requires +fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine. + +### Run with the Simulation Engine + +```bash +flwr run . +``` + +You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: + +```bash +flwr run . --run-config num-server-rounds=10 +``` + +### Run with the Deployment Engine + +> \[!NOTE\] +> An update to this example will show how to run this Flower application with the Deployment Engine and TLS certificates, or with Docker. diff --git a/examples/fl-tabular/fltabular/__init__.py b/examples/fl-tabular/fltabular/__init__.py new file mode 100644 index 000000000000..075247fb2f4f --- /dev/null +++ b/examples/fl-tabular/fltabular/__init__.py @@ -0,0 +1 @@ +"""fltabular: Flower Example on Adult Census Income Tabular Dataset.""" diff --git a/examples/fl-tabular/fltabular/client_app.py b/examples/fl-tabular/fltabular/client_app.py new file mode 100644 index 000000000000..f3fd0bffb6c0 --- /dev/null +++ b/examples/fl-tabular/fltabular/client_app.py @@ -0,0 +1,43 @@ +"""fltabular: Flower Example on Adult Census Income Tabular Dataset.""" + +from flwr.client import ClientApp, NumPyClient +from flwr.common import Context + +from fltabular.task import ( + IncomeClassifier, + evaluate, + get_weights, + load_data, + set_weights, + train, +) + + +class FlowerClient(NumPyClient): + def __init__(self, net, trainloader, testloader): + self.net = net + self.trainloader = trainloader + self.testloader = testloader + + def fit(self, parameters, config): + set_weights(self.net, parameters) + train(self.net, self.trainloader) + return get_weights(self.net), len(self.trainloader), {} + + def evaluate(self, parameters, config): + set_weights(self.net, parameters) + loss, accuracy = evaluate(self.net, self.testloader) + return loss, len(self.testloader), {"accuracy": accuracy} + + +def client_fn(context: Context): + partition_id = context.node_config["partition-id"] + + train_loader, test_loader = load_data( + partition_id=partition_id, num_partitions=context.node_config["num-partitions"] + ) + net = IncomeClassifier() + return FlowerClient(net, train_loader, test_loader).to_client() + + +app = ClientApp(client_fn=client_fn) diff --git a/examples/fl-tabular/fltabular/server_app.py b/examples/fl-tabular/fltabular/server_app.py new file mode 100644 index 000000000000..7b858700308f --- /dev/null +++ b/examples/fl-tabular/fltabular/server_app.py @@ -0,0 +1,32 @@ +"""fltabular: Flower Example on Adult Census Income Tabular Dataset.""" + +from flwr.common import ndarrays_to_parameters +from flwr.server import ServerApp, ServerConfig, ServerAppComponents +from flwr.server.strategy import FedAvg +from flwr.common import Context + +from fltabular.task import IncomeClassifier, get_weights + + +def weighted_average(metrics): + accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] + examples = [num_examples for num_examples, _ in metrics] + + return {"accuracy": sum(accuracies) / sum(examples)} + + +def server_fn(context: Context) -> ServerAppComponents: + net = IncomeClassifier() + params = ndarrays_to_parameters(get_weights(net)) + + strategy = FedAvg( + initial_parameters=params, + evaluate_metrics_aggregation_fn=weighted_average, + ) + num_rounds = context.run_config["num-server-rounds"] + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(config=config, strategy=strategy) + + +app = ServerApp(server_fn=server_fn) diff --git a/examples/fl-tabular/fltabular/task.py b/examples/fl-tabular/fltabular/task.py new file mode 100644 index 000000000000..f6e9c1a75adc --- /dev/null +++ b/examples/fl-tabular/fltabular/task.py @@ -0,0 +1,123 @@ +"""fltabular: Flower Example on Adult Census Income Tabular Dataset.""" + +from collections import OrderedDict + +import torch +import torch.nn as nn +import torch.optim as optim +from flwr_datasets import FederatedDataset +from sklearn.compose import ColumnTransformer +from sklearn.model_selection import train_test_split +from sklearn.pipeline import Pipeline +from sklearn.preprocessing import OrdinalEncoder, StandardScaler +from torch.utils.data import DataLoader, TensorDataset +from flwr_datasets.partitioner import IidPartitioner + +fds = None # Cache FederatedDataset + + +def load_data(partition_id: int, num_partitions: int): + + global fds + if fds is None: + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="scikit-learn/adult-census-income", + partitioners={"train": partitioner}, + ) + + dataset = fds.load_partition(partition_id, "train").with_format("pandas")[:] + + dataset.dropna(inplace=True) + + categorical_cols = dataset.select_dtypes(include=["object"]).columns + ordinal_encoder = OrdinalEncoder() + dataset[categorical_cols] = ordinal_encoder.fit_transform(dataset[categorical_cols]) + + X = dataset.drop("income", axis=1) + y = dataset["income"] + + X_train, X_test, y_train, y_test = train_test_split( + X, y, test_size=0.2, random_state=42 + ) + + numeric_features = X.select_dtypes(include=["float64", "int64"]).columns + numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())]) + + preprocessor = ColumnTransformer( + transformers=[("num", numeric_transformer, numeric_features)] + ) + + X_train = preprocessor.fit_transform(X_train) + X_test = preprocessor.transform(X_test) + + X_train_tensor = torch.tensor(X_train, dtype=torch.float32) + X_test_tensor = torch.tensor(X_test, dtype=torch.float32) + y_train_tensor = torch.tensor(y_train.values, dtype=torch.float32).view(-1, 1) + y_test_tensor = torch.tensor(y_test.values, dtype=torch.float32).view(-1, 1) + + train_dataset = TensorDataset(X_train_tensor, y_train_tensor) + test_dataset = TensorDataset(X_test_tensor, y_test_tensor) + train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True) + test_loader = DataLoader(test_dataset, batch_size=32, shuffle=False) + + return train_loader, test_loader + + +class IncomeClassifier(nn.Module): + def __init__(self, input_dim: int = 14): + super(IncomeClassifier, self).__init__() + self.layer1 = nn.Linear(input_dim, 128) + self.layer2 = nn.Linear(128, 64) + self.output = nn.Linear(64, 1) + self.relu = nn.ReLU() + self.sigmoid = nn.Sigmoid() + + def forward(self, x): + x = self.relu(self.layer1(x)) + x = self.relu(self.layer2(x)) + x = self.sigmoid(self.output(x)) + return x + + +def train(model, train_loader, num_epochs=1): + criterion = nn.BCELoss() + optimizer = optim.Adam(model.parameters(), lr=0.001) + model.train() + for epoch in range(num_epochs): + for X_batch, y_batch in train_loader: + optimizer.zero_grad() + outputs = model(X_batch) + loss = criterion(outputs, y_batch) + loss.backward() + optimizer.step() + + +def evaluate(model, test_loader): + model.eval() + criterion = nn.BCELoss() + loss = 0.0 + correct = 0 + total = 0 + with torch.no_grad(): + for X_batch, y_batch in test_loader: + outputs = model(X_batch) + batch_loss = criterion(outputs, y_batch) + loss += batch_loss.item() + predicted = (outputs > 0.5).float() + total += y_batch.size(0) + correct += (predicted == y_batch).sum().item() + accuracy = correct / total + loss = loss / len(test_loader) + return loss, accuracy + + +def set_weights(net, parameters): + params_dict = zip(net.state_dict().keys(), parameters) + state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) + net.load_state_dict(state_dict, strict=True) + + +def get_weights(net): + ndarrays = [val.cpu().numpy() for _, val in net.state_dict().items()] + return ndarrays diff --git a/examples/fl-tabular/pyproject.toml b/examples/fl-tabular/pyproject.toml new file mode 100644 index 000000000000..058a8d73b45f --- /dev/null +++ b/examples/fl-tabular/pyproject.toml @@ -0,0 +1,34 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "fl-tabular" +version = "1.0.0" +description = "Adult Census Income Tabular Dataset and Federated Learning in Flower" +license = "Apache-2.0" +dependencies = [ + "flwr[simulation]>=1.12.0", + "flwr-datasets>=0.3.0", + "torch==2.1.1", + "scikit-learn==1.5.0", +] + +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "fltabular.server_app:app" +clientapp = "fltabular.client_app:app" + +[tool.flwr.app.config] +num-server-rounds = 5 + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 5 diff --git a/examples/flower-authentication/README.md b/examples/flower-authentication/README.md index 589270e621c9..d10780eeae5d 100644 --- a/examples/flower-authentication/README.md +++ b/examples/flower-authentication/README.md @@ -1,3 +1,9 @@ +--- +tags: [advanced, vision, fds] +dataset: [CIFAR-10] +framework: [torch, torchvision] +--- + # Flower Authentication with PyTorch 🧪 > 🧪 = This example covers experimental features that might change in future versions of Flower diff --git a/examples/flower-authentication/client.py b/examples/flower-authentication/client.py index 3c99d5a410c9..065acefb7bed 100644 --- a/examples/flower-authentication/client.py +++ b/examples/flower-authentication/client.py @@ -1,17 +1,9 @@ from typing import Dict -from flwr.common import NDArrays, Scalar -from flwr.client import ClientApp, NumPyClient -from task import ( - Net, - DEVICE, - load_data, - get_parameters, - set_parameters, - train, - test, -) +from flwr.client import ClientApp, NumPyClient +from flwr.common import NDArrays, Scalar +from task import DEVICE, Net, get_parameters, load_data, set_parameters, test, train # Load model and data (simple CNN, CIFAR-10) net = Net().to(DEVICE) diff --git a/examples/flower-authentication/pyproject.toml b/examples/flower-authentication/pyproject.toml index e80a50b1eef9..575d1e6618f5 100644 --- a/examples/flower-authentication/pyproject.toml +++ b/examples/flower-authentication/pyproject.toml @@ -6,14 +6,12 @@ build-backend = "hatchling.build" name = "flower-client-authentication" version = "0.1.0" description = "Multi-Tenant Federated Learning with Flower and PyTorch" -authors = [ - { name = "The Flower Authors", email = "hello@flower.ai" }, -] +authors = [{ name = "The Flower Authors", email = "hello@flower.ai" }] dependencies = [ "flwr-nightly[rest,simulation]", "torch==1.13.1", "torchvision==0.14.1", - "tqdm==4.66.3" + "tqdm==4.66.3", ] [tool.hatch.build.targets.wheel] diff --git a/examples/flower-authentication/server.py b/examples/flower-authentication/server.py index d88dc1d1a641..44908a0d9fc4 100644 --- a/examples/flower-authentication/server.py +++ b/examples/flower-authentication/server.py @@ -2,8 +2,8 @@ import flwr as fl from flwr.common import Metrics -from flwr.server.strategy.fedavg import FedAvg from flwr.server import ServerApp +from flwr.server.strategy.fedavg import FedAvg # Define metric aggregation function diff --git a/examples/flower-authentication/task.py b/examples/flower-authentication/task.py index 276aace885df..331bd324061d 100644 --- a/examples/flower-authentication/task.py +++ b/examples/flower-authentication/task.py @@ -9,7 +9,6 @@ from torchvision.transforms import Compose, Normalize, ToTensor from tqdm import tqdm - warnings.filterwarnings("ignore", category=UserWarning) DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") diff --git a/examples/flower-in-30-minutes/README.md b/examples/flower-in-30-minutes/README.md index 5fd9b882413b..faec3d72dae2 100644 --- a/examples/flower-in-30-minutes/README.md +++ b/examples/flower-in-30-minutes/README.md @@ -1,3 +1,9 @@ +--- +tags: [colab, vision, simulation] +dataset: [CIFAR-10] +framework: [torch] +--- + # 30-minute tutorial running Flower simulation with PyTorch This README links to a Jupyter notebook that you can either download and run locally or [![open it in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/adap/flower/blob/main/examples/flower-in-30-minutes/tutorial.ipynb). This is a short 30-minute (or less!) tutorial showcasing the basics of Flower federated learning simulations using PyTorch. diff --git a/examples/flower-in-30-minutes/tutorial.ipynb b/examples/flower-in-30-minutes/tutorial.ipynb index 9f0c86a2507a..ed8d9a49dcd7 100644 --- a/examples/flower-in-30-minutes/tutorial.ipynb +++ b/examples/flower-in-30-minutes/tutorial.ipynb @@ -11,7 +11,9 @@ "\n", "🧑‍🏫 This tutorial starts at zero and expects no familiarity with federated learning. Only a basic understanding of data science and Python programming is assumed. A minimal understanding of ML is not required but if you already know about it, nothing is stopping your from modifying this code as you see fit!\n", "\n", - "> Star Flower on [GitHub ⭐️](https://github.com/adap/flower) and join the Flower community on Slack to connect, ask questions, and get help: [Join Slack 🌼](https://flower.ai/join-slack/). We'd love to hear from you in the #introductions channel! And if anything is unclear, head over to the #questions channel.\n", + "> [Star Flower on GitHub](https://github.com/adap/flower) ⭐️ and join the Flower community on Flower Discuss and the Flower Slack to connect, ask questions, and get help:\n", + "> - [Join Flower Discuss](https://discuss.flower.ai/) We'd love to hear from you in the `Introduction` topic! If anything is unclear, post in `Flower Help - Beginners`.\n", + "> - [Join Flower Slack](https://flower.ai/join-slack) We'd love to hear from you in the `#introductions` channel! If anything is unclear, head over to the `#questions` channel.\n", "\n", "Let's get started!" ] @@ -50,8 +52,7 @@ "metadata": {}, "outputs": [], "source": [ - "# depending on your shell, you might need to add `\\` before `[` and `]`.\n", - "!pip install -q flwr[simulation]" + "!pip install -q \"flwr[simulation]\" flwr-datasets" ] }, { @@ -59,7 +60,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We will be using the _simulation_ model in Flower, which allows you to run a large number of clients without the overheads of manually managing devices. This is achieved via the `Virtual Client Engine`, the core component that runs [FL Simulations](https://flower.ai/docs/framework/how-to-run-simulations.html) with Flower. With simulation, you can dynamically scale your experiments whether you run the code on your laptop, a machine with a single GPU, a server with multiple GPUs os even on a cluster with multiple servers. The `Virtual Client Engine` handles everything transparently and it allows you to specify how many resources (e.g. CPU cores, GPU VRAM) should be assigned to each virtual client." + "We will be using the _simulation_ engine in Flower, which allows you to run a large number of clients without the overheads of manually managing devices. This is achieved via the `Simulation Engine`, the core component in Flower to run simulations efficiently." ] }, { @@ -69,9 +70,9 @@ "source": [ "## Install your ML framework\n", "\n", - "Flower is agnostic to your choice of ML Framework. Flower works with `PyTorch`, `Tensorflow`, `NumPy`, `🤗 Transformers`, `MXNet`, `JAX`, `scikit-learn`, `fastai`, `Pandas`. Flower also supports all major platforms: `iOS`, `Android` and plain `C++`. You can find a _quickstart- example for each of the above in the [Flower Repository](https://github.com/adap/flower/tree/main/examples) inside the `examples/` directory. And check the [Flower Documentation](https://flower.ai/docs/) for even more learning materials.\n", + "Flower is agnostic to your choice of ML Framework. Flower works with `PyTorch`, `Tensorflow`, `NumPy`, `🤗 Transformers`, `MLX`, `JAX`, `scikit-learn`, `fastai`, `Pandas`. Flower also supports all major platforms: `iOS`, `Android` and plain `C++`. You can find a _quickstart- example for each of the above in the [Flower Repository](https://github.com/adap/flower/tree/main/examples) inside the `examples/` directory. And check the [Flower Documentation](https://flower.ai/docs/) for even more learning materials.\n", "\n", - "In this tutorial we are going to use PyTorch, so let's install a recent version. In this tutorial we'll use a small model so using CPU only training will suffice (this will also prevent Colab from abruptly terminating your experiment if resource limits are exceeded)" + "In this tutorial we are going to use PyTorch, uncomment the line below if you haven't installed PyTorch in your system. In this tutorial we'll use a small model so using CPU only training will suffice." ] }, { @@ -87,7 +88,7 @@ "source": [ "# you might see a warning after running the command below, this can be ignored\n", "# if you are running this outside Colab, you probably need to adjust the command below\n", - "!pip install torch==1.13.1+cpu torchvision==0.14.1+cpu torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cpu" + "# !pip install torch==1.13.1+cpu torchvision==0.14.1+cpu --extra-index-url https://download.pytorch.org/whl/cpu" ] }, { @@ -132,7 +133,7 @@ "\n", "## A dataset\n", "\n", - "Let's begin by constructing the dataset." + "Let's begin by constructing the dataset. We will use 🤗HuggingFace Datasets to download MNIST. We will prepare a function that will be use later to apply standard normalization transformations from `TorchVision` and create the dataloaders for the `train` and `test` partitions." ] }, { @@ -145,39 +146,28 @@ "import torch\n", "from torch.utils.data import DataLoader\n", "from torchvision.transforms import ToTensor, Normalize, Compose\n", - "from torchvision.datasets import MNIST\n", + "from datasets import load_dataset\n", "\n", "\n", - "def get_mnist(data_path: str = \"./data\"):\n", - " \"\"\"This function downloads the MNIST dataset into the `data_path`\n", - " directory if it is not there already. WE construct the train/test\n", - " split by converting the images into tensors and normalising them\"\"\"\n", + "def get_mnist_dataloaders(mnist_dataset, batch_size: int):\n", + " pytorch_transforms = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])\n", "\n", - " # transformation to convert images to tensors and apply normalisation\n", - " tr = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])\n", + " # Prepare transformation functions\n", + " def apply_transforms(batch):\n", + " batch[\"image\"] = [pytorch_transforms(img) for img in batch[\"image\"]]\n", + " return batch\n", "\n", - " # prepare train and test set\n", - " trainset = MNIST(data_path, train=True, download=True, transform=tr)\n", - " testset = MNIST(data_path, train=False, download=True, transform=tr)\n", + " mnist_train = mnist_dataset[\"train\"].with_transform(apply_transforms)\n", + " mnist_test = mnist_dataset[\"test\"].with_transform(apply_transforms)\n", "\n", - " return trainset, testset" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's run the code above and do some visualisations to understand better the data we are working with !" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "trainset, testset = get_mnist()" + " # Construct PyTorch dataloaders\n", + " trainloader = DataLoader(mnist_train, batch_size=batch_size, shuffle=True)\n", + " testloader = DataLoader(mnist_test, batch_size=batch_size)\n", + " return trainloader, testloader\n", + "\n", + "\n", + "# Download dataset\n", + "mnist = load_dataset(\"ylecun/mnist\")" ] }, { @@ -185,7 +175,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We can have a quick overview of our datasets by just typing the object on the command line. For instance, below you can see that the `trainset` has 60k training examples and will use the transformation rule we defined above in `get_mnist()`." + "We can have a quick overview of our datasets by just typing the object on the command line. For instance, below you can see the sizes of both the `train` and `test` partitions" ] }, { @@ -199,7 +189,7 @@ }, "outputs": [], "source": [ - "trainset" + "mnist" ] }, { @@ -223,21 +213,19 @@ "outputs": [], "source": [ "import matplotlib.pyplot as plt\n", + "from collections import Counter\n", "\n", "\n", "# construct histogram\n", - "all_labels = trainset.targets\n", - "num_possible_labels = len(\n", - " set(all_labels.numpy().tolist())\n", - ") # this counts unique labels (so it should be = 10)\n", - "plt.hist(all_labels, bins=num_possible_labels)\n", + "all_labels = mnist[\"train\"][\"label\"]\n", + "all_label_counts = Counter(all_labels)\n", + "\n", + "# visualise histogram\n", + "bar = plt.bar(all_label_counts.keys(), all_label_counts.values())\n", + "_ = plt.bar_label(bar)\n", "\n", "# plot formatting\n", - "plt.xticks(range(num_possible_labels))\n", - "plt.grid()\n", - "plt.xlabel(\"Label\")\n", - "plt.ylabel(\"Number of images\")\n", - "plt.title(\"Class labels distribution for MNIST\")" + "_ = plt.xticks([label for label in all_label_counts.keys()])" ] }, { @@ -256,11 +244,15 @@ "source": [ "import random\n", "import numpy as np\n", + "from PIL import Image\n", + "import io\n", "\n", "\n", "def visualise_n_random_examples(trainset_, n: int, verbose: bool = True):\n", - " # take n examples at random\n", - " idx = list(range(len(trainset_.data)))\n", + " trainset_data = [\n", + " Image.open(io.BytesIO(entry[0].as_py())) for entry in trainset_.data[0]\n", + " ]\n", + " idx = list(range(len(trainset_data)))\n", " random.shuffle(idx)\n", " idx = idx[:n]\n", " if verbose:\n", @@ -273,7 +265,7 @@ "\n", " # display images on canvas\n", " for c_i, i in enumerate(idx):\n", - " axs.flat[c_i].imshow(trainset_.data[i], cmap=\"gray\")" + " axs.flat[c_i].imshow(trainset_data[i], cmap=\"gray\")" ] }, { @@ -290,7 +282,7 @@ "source": [ "# it is likely that the plot this function will generate looks familiar to other plots you might have generated before\n", "# or you might have encountered in other tutorials. So far, we aren't doing anything new, Federated Learning will start soon!\n", - "visualise_n_random_examples(trainset, n=32)" + "visualise_n_random_examples(mnist[\"train\"], n=32)" ] }, { @@ -383,13 +375,12 @@ " \"\"\"Train the network on the training set.\"\"\"\n", " criterion = torch.nn.CrossEntropyLoss()\n", " net.train()\n", - " for _ in range(epochs):\n", - " for images, labels in trainloader:\n", - " optimizer.zero_grad()\n", - " loss = criterion(net(images), labels)\n", - " loss.backward()\n", - " optimizer.step()\n", - " return net\n", + " for batch in trainloader:\n", + " images, labels = batch[\"image\"], batch[\"label\"]\n", + " optimizer.zero_grad()\n", + " loss = criterion(net(images), labels)\n", + " loss.backward()\n", + " optimizer.step()\n", "\n", "\n", "def test(net, testloader):\n", @@ -398,7 +389,8 @@ " correct, loss = 0, 0.0\n", " net.eval()\n", " with torch.no_grad():\n", - " for images, labels in testloader:\n", + " for batch in testloader:\n", + " images, labels = batch[\"image\"], batch[\"label\"]\n", " outputs = net(images)\n", " loss += criterion(outputs, labels).item()\n", " _, predicted = torch.max(outputs.data, 1)\n", @@ -407,7 +399,9 @@ " return loss, accuracy\n", "\n", "\n", - "def run_centralised(epochs: int, lr: float, momentum: float = 0.9):\n", + "def run_centralised(\n", + " trainloader, testloader, epochs: int, lr: float, momentum: float = 0.9\n", + "):\n", " \"\"\"A minimal (but complete) training loop\"\"\"\n", "\n", " # instantiate the model\n", @@ -416,16 +410,13 @@ " # define optimiser with hyperparameters supplied\n", " optim = torch.optim.SGD(model.parameters(), lr=lr, momentum=momentum)\n", "\n", - " # get dataset and construct a dataloaders\n", - " trainset, testset = get_mnist()\n", - " trainloader = DataLoader(trainset, batch_size=64, shuffle=True, num_workers=2)\n", - " testloader = DataLoader(testset, batch_size=128)\n", - "\n", " # train for the specified number of epochs\n", - " trained_model = train(model, trainloader, optim, epochs)\n", + " for e in range(epochs):\n", + " print(f\"Training epoch {e} ...\")\n", + " train(model, trainloader, optim, epochs)\n", "\n", " # training is completed, then evaluate model on the test set\n", - " loss, accuracy = test(trained_model, testloader)\n", + " loss, accuracy = test(model, testloader)\n", " print(f\"{loss = }\")\n", " print(f\"{accuracy = }\")" ] @@ -449,7 +440,11 @@ }, "outputs": [], "source": [ - "run_centralised(epochs=5, lr=0.01)" + "# Construct dataloaders\n", + "trainloader, testloader = get_mnist_dataloaders(mnist, batch_size=32)\n", + "\n", + "# Run the centralised training\n", + "run_centralised(trainloader, testloader, epochs=3, lr=0.01)" ] }, { @@ -477,7 +472,7 @@ "source": [ "## One Client, One Data Partition\n", "\n", - "To start designing a Federated Learning pipeline we need to meet one of the key properties in FL: each client has its own data partition. To accomplish this with the MNIST dataset, we are going to generate N random partitions, where N is the total number of clients in our FL system." + "To start designing a Federated Learning pipeline we need to meet one of the key properties in FL: each client has its own data partition. To accomplish this with the MNIST dataset, we are going to generate N random partitions, where N is the total number of clients in our FL system, using [Flower Datasets](https://flower.ai/docs/datasets/). Let's create 100 partitions with the [IidPartitioner](https://flower.ai/docs/datasets/ref-api/flwr_datasets.partitioner.IidPartitioner.html#flwr_datasets.partitioner.IidPartitioner) -- note there are many more [partitioners](https://flower.ai/docs/datasets/ref-api/flwr_datasets.partitioner.html) to choose from." ] }, { @@ -486,92 +481,63 @@ "metadata": {}, "outputs": [], "source": [ - "from torch.utils.data import random_split\n", - "\n", - "\n", - "def prepare_dataset(num_partitions: int, batch_size: int, val_ratio: float = 0.1):\n", - " \"\"\"This function partitions the training set into N disjoint\n", - " subsets, each will become the local dataset of a client. This\n", - " function also subsequently partitions each traininset partition\n", - " into train and validation. The test set is left intact and will\n", - " be used by the central server to asses the performance of the\n", - " global model.\"\"\"\n", + "from flwr_datasets import FederatedDataset\n", + "from flwr_datasets.partitioner import IidPartitioner\n", "\n", - " # get the MNIST dataset\n", - " trainset, testset = get_mnist()\n", + "NUM_PARTITIONS = 100\n", "\n", - " # split trainset into `num_partitions` trainsets\n", - " num_images = len(trainset) // num_partitions\n", - "\n", - " partition_len = [num_images] * num_partitions\n", - "\n", - " trainsets = random_split(\n", - " trainset, partition_len, torch.Generator().manual_seed(2023)\n", - " )\n", - "\n", - " # create dataloaders with train+val support\n", - " trainloaders = []\n", - " valloaders = []\n", - " for trainset_ in trainsets:\n", - " num_total = len(trainset_)\n", - " num_val = int(val_ratio * num_total)\n", - " num_train = num_total - num_val\n", - "\n", - " for_train, for_val = random_split(\n", - " trainset_, [num_train, num_val], torch.Generator().manual_seed(2023)\n", - " )\n", - "\n", - " trainloaders.append(\n", - " DataLoader(for_train, batch_size=batch_size, shuffle=True, num_workers=2)\n", - " )\n", - " valloaders.append(\n", - " DataLoader(for_val, batch_size=batch_size, shuffle=False, num_workers=2)\n", - " )\n", - "\n", - " # create dataloader for the test set\n", - " testloader = DataLoader(testset, batch_size=128)\n", - "\n", - " return trainloaders, valloaders, testloader" + "partitioner = IidPartitioner(num_partitions=NUM_PARTITIONS)\n", + "# Let's partition the \"train\" split of the MNIST dataset\n", + "# The MNIST dataset will be downloaded if it hasn't been already\n", + "fds = FederatedDataset(dataset=\"ylecun/mnist\", partitioners={\"train\": partitioner})" ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "Let's create 100 partitions and extract some statistics from one partition\n" + "Accessing individual partitions can be done like this. The return object can be then passed to a dataloader for training or evaluation." ] }, { "cell_type": "code", "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 508 - }, - "outputId": "0f53ca81-cb55-46ef-c8e0-4e19a4f060b2" - }, + "metadata": {}, "outputs": [], "source": [ - "trainloaders, valloaders, testloader = prepare_dataset(\n", - " num_partitions=100, batch_size=32\n", - ")\n", - "\n", - "# first partition\n", - "train_partition = trainloaders[0].dataset\n", - "\n", - "# count data points\n", - "partition_indices = train_partition.indices\n", - "print(f\"number of images: {len(partition_indices)}\")\n", + "# We could load a single partition like this\n", + "partition_0 = fds.load_partition(0)\n", + "partition_0" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "`Flower Datasets` comes with built-in visualization tools that help you get insights of how the dataset (in this case MNIST) has been partitioned. Let's create a parplot to visualize the number of labels of each class that every client's partition contains. Note we are only visualising the first 30 clients purely so the plot remain readable. \n", "\n", - "# visualise histogram\n", - "plt.hist(train_partition.dataset.dataset.targets[partition_indices], bins=10)\n", - "plt.grid()\n", - "plt.xticks(range(10))\n", - "plt.xlabel(\"Label\")\n", - "plt.ylabel(\"Number of images\")\n", - "plt.title(\"Class labels distribution for MNIST\")" + "> There are many more types of plots you can generated with Flower Datasets. Check the [Visualization tutorial](https://flower.ai/docs/datasets/tutorial-visualize-label-distribution.html). Feel free to try other partitioning scheemes and you'll see how the visualization changes." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from flwr_datasets.visualization import plot_label_distributions\n", + "\n", + "fig, ax, df = plot_label_distributions(\n", + " partitioner,\n", + " label_name=\"label\",\n", + " plot_type=\"bar\",\n", + " size_unit=\"absolute\",\n", + " partition_id_axis=\"x\",\n", + " legend=True,\n", + " verbose_labels=True,\n", + " max_num_partitions=30, # Note we are only showing the first 30 so the plot remains readable\n", + " title=\"Per Partition Labels Distribution\",\n", + ")" ] }, { @@ -583,30 +549,17 @@ "\n", "Let's next define how our FL clients will behave\n", "\n", - "## Defining a Flower Client\n", + "## Defining a Flower `ClientApp`\n", "\n", - "You can think of a client in FL as an entity that owns some data and trains a model using this data. The caveat is that the model is being trained _collaboratively_ in Federation by multiple clients (sometimes up to hundreds of thousands) and, in most instances of FL, is sent by a central server.\n", + "You can think of a client in FL as an entity that owns some data and trains a model using this data. The caveat is that the model is being trained _collaboratively_ in Federation by multiple clients (sometimes up to hundreds of thousands) and, in most instances of FL, is sent by a central server running in a `ServerApp` (more on this later).\n", "\n", - "A Flower Client is a simple Python class with four distinct methods:\n", + "A Flower Client is a simple Python class with two distinct methods:\n", "\n", "* `fit()`: With this method, the client does on-device training for a number of epochs using its own data. At the end, the resulting model is sent back to the server for aggregation.\n", "\n", "* `evaluate()`: With this method, the server can evaluate the performance of the global model on the local validation set of a client. This can be used for instance when there is no centralised dataset on the server for validation/test. Also, this method can be use to asses the degree of personalisation of the model being federated.\n", "\n", - "* `set_parameters()`: This method takes the parameters sent by the server and uses them to initialise the parameters of the local model that is ML framework specific (e.g. TF, Pytorch, etc).\n", - "\n", - "* `get_parameters()`: It extract the parameters from the local model and transforms them into a list of NumPy arrays. This ML framework-agnostic representation of the model will be sent to the server.\n", - "\n", - "Let's start by importing Flower!" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import flwr as fl" + "This class will be then wrapped into a `ClientApp` that can be used to launch the simulation." ] }, { @@ -628,9 +581,10 @@ "\n", "import torch\n", "from flwr.common import NDArrays, Scalar\n", + "from flwr.client import NumPyClient\n", "\n", "\n", - "class FlowerClient(fl.client.NumPyClient):\n", + "class FlowerClient(NumPyClient):\n", " def __init__(self, trainloader, valloader) -> None:\n", " super().__init__()\n", "\n", @@ -638,74 +592,156 @@ " self.valloader = valloader\n", " self.model = Net(num_classes=10)\n", "\n", - " def set_parameters(self, parameters):\n", - " \"\"\"With the model parameters received from the server,\n", - " overwrite the uninitialise model in this class with them.\"\"\"\n", - "\n", - " params_dict = zip(self.model.state_dict().keys(), parameters)\n", - " state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict})\n", - " # now replace the parameters\n", - " self.model.load_state_dict(state_dict, strict=True)\n", - "\n", - " def get_parameters(self, config: Dict[str, Scalar]):\n", - " \"\"\"Extract all model parameters and convert them to a list of\n", - " NumPy arrays. The server doesn't work with PyTorch/TF/etc.\"\"\"\n", - " return [val.cpu().numpy() for _, val in self.model.state_dict().items()]\n", - "\n", " def fit(self, parameters, config):\n", - " \"\"\"This method train the model using the parameters sent by the\n", + " \"\"\"This method trains the model using the parameters sent by the\n", " server on the dataset of this client. At then end, the parameters\n", " of the locally trained model are communicated back to the server\"\"\"\n", "\n", " # copy parameters sent by the server into client's local model\n", - " self.set_parameters(parameters)\n", + " set_params(self.model, parameters)\n", "\n", - " # Define the optimizer -------------------------------------------------------------- Essentially the same as in the centralised example above\n", + " # Define the optimizer\n", " optim = torch.optim.SGD(self.model.parameters(), lr=0.01, momentum=0.9)\n", "\n", - " # do local training -------------------------------------------------------------- Essentially the same as in the centralised example above (but now using the client's data instead of the whole dataset)\n", + " # do local training (call same function as centralised setting)\n", " train(self.model, self.trainloader, optim, epochs=1)\n", "\n", " # return the model parameters to the server as well as extra info (number of training examples in this case)\n", - " return self.get_parameters({}), len(self.trainloader), {}\n", + " return get_params(self.model), len(self.trainloader), {}\n", "\n", " def evaluate(self, parameters: NDArrays, config: Dict[str, Scalar]):\n", " \"\"\"Evaluate the model sent by the server on this client's\n", " local validation set. Then return performance metrics.\"\"\"\n", "\n", - " self.set_parameters(parameters)\n", - " loss, accuracy = test(\n", - " self.model, self.valloader\n", - " ) # <-------------------------- calls the `test` function, just what we did in the centralised setting (but this time using the client's local validation set)\n", + " set_params(self.model, parameters)\n", + " # do local evaluation (call same function as centralised setting)\n", + " loss, accuracy = test(self.model, self.valloader)\n", " # send statistics back to the server\n", - " return float(loss), len(self.valloader), {\"accuracy\": accuracy}" + " return float(loss), len(self.valloader), {\"accuracy\": accuracy}\n", + "\n", + "\n", + "# Two auxhiliary functions to set and extract parameters of a model\n", + "def set_params(model, parameters):\n", + " \"\"\"Replace model parameters with those passed as `parameters`.\"\"\"\n", + "\n", + " params_dict = zip(model.state_dict().keys(), parameters)\n", + " state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict})\n", + " # now replace the parameters\n", + " model.load_state_dict(state_dict, strict=True)\n", + "\n", + "\n", + "def get_params(model):\n", + " \"\"\"Extract model parameters as a list of NumPy arrays.\"\"\"\n", + " return [val.cpu().numpy() for _, val in model.state_dict().items()]" ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "Spend a few minutes to inspect the `FlowerClient` class above. Please ask questions if there is something unclear !\n", "\n", - "Then keen-eyed among you might have realised that if we were to fuse the client's `fit()` and `evaluate()` methods, we'll end up with essentially the same as in the `run_centralised()` function we used in the Centralised Training part of this tutorial. And it is true!! In Federated Learning, the way clients perform local training makes use of the same principles as more traditional centralised setup. The key difference is that the dataset now is much smaller and it's never _\"seen\"_ by the entity running the FL workload (i.e. the central server).\n", + "Then keen-eyed among you might have realised that if we were to fuse the client's `fit()` and `evaluate()` methods, we'll end up with essentially the same as in the `run_centralised()` function we used in the Centralised Training part of this tutorial. And it is true!! In Federated Learning, the way clients perform local training makes use of the same principles as more traditional centralised setup. The key difference is that the dataset now is much smaller and it's never _\"seen\"_ by the entity running the FL workload (i.e. the central server).\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### The `client_fn` callback\n", "\n", + "Now let's see how the `FlowerClient` object above can be used in Flower: we need to construct a `ClientApp`. This can be conveniently be done by means of a `client_fn` callback that will return a `FlowerClient` that uses a specific data partition (`partition-id`). The index of the partition is set internally during the simulation (meaning you shouldn't worry about it this tutorial)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from flwr.common import Context\n", + "from flwr.client import ClientApp\n", + "\n", + "\n", + "def client_fn(context: Context):\n", + " \"\"\"Returns a FlowerClient containing its data partition.\"\"\"\n", + "\n", + " partition_id = int(context.node_config[\"partition-id\"])\n", + " partition = fds.load_partition(partition_id, \"train\")\n", + " # partition into train/validation\n", + " partition_train_val = partition.train_test_split(test_size=0.1, seed=42)\n", + "\n", + " # Let's use the function defined earlier to construct the dataloaders\n", + " # and apply the dataset transformations\n", + " trainloader, testloader = get_mnist_dataloaders(partition_train_val, batch_size=32)\n", + "\n", + " return FlowerClient(trainloader=trainloader, valloader=testloader).to_client()\n", "\n", - "Talking about the central server... we should define what strategy we want to make use of so the updated models sent from the clients back to the server at the end of the `fit()` method are aggregate.\n", "\n", + "# Concstruct the ClientApp passing the client generation function\n", + "client_app = ClientApp(client_fn=client_fn)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now that a `ClientApp` is fully defined, let's create its counterpart: the `ServerApp`." + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Defining a Flower `ServerApp`\n", "\n", - "## Chosing a Flower Strategy\n", + "### Chosing a Flower Strategy\n", "\n", "\n", "A strategy sits at the core of the Federated Learning experiment. It is involved in all stages of a FL pipeline: sampling clients; sending the _global model_ to the clients so they can do `fit()`; receive the updated models from the clients and **aggregate** these to construct a new _global model_; define and execute global or federated evaluation; and more.\n", "\n", - "Flower comes with [many strategies built-in](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy) and more to be available in the next release (`1.5` already!). For this tutorial, let's use what is arguable the most popular strategy out there: `FedAvg`.\n", + "Flower comes with [many strategies built-in](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy). For this tutorial, let's use what is arguable the most popular strategy out there: `FedAvg`.\n", "\n", "The way `FedAvg` works is simple but performs surprisingly well in practice. It is therefore one good strategy to start your experimentation. `FedAvg`, as its name implies, derives a new version of the _global model_ by taking the average of all the models sent by clients participating in the round. You can read all the details [in the paper](https://arxiv.org/abs/1602.05629).\n", "\n", - "Let's see how we can define `FedAvg` using Flower. We use one of the callbacks called `evaluate_fn` so we can easily evaluate the state of the global model using a small centralised testset. Note this functionality is user-defined since it requires a choice in terms of ML-framework. (if you recall, Flower is framework agnostic).\n", + "While Flower strategies offer a high degree of customization using callbacks, in this tutorial we'll focus on using just one: the `evaluate_metrics_aggregation_fn` callback. It allows you to pass a function that should be executed at the end of an _\"evaluate\"_ round (i.e. a round where clients evaluate the _global model_ they receive on their local data and report the result -- e.g. accuracy, loss, etc -- back to the server). For this tutorial we want to perform the weighted average of the _\"accuracy\"_ metrics returned by each `FlowerClient`'s `evaluate()` method:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from typing import List\n", + "from flwr.common import Metrics\n", + "\n", + "\n", + "# Define metric aggregation function\n", + "def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics:\n", + " # Multiply accuracy of each client by number of examples used\n", + " accuracies = [num_examples * m[\"accuracy\"] for num_examples, m in metrics]\n", + " examples = [num_examples for num_examples, _ in metrics]\n", "\n", - "> This being said, centralised evaluation of the global model is only possible if there exists a centralised dataset that somewhat follows a similar distribution as the data that's spread across clients. In some cases having such centralised dataset for validation is not possible, so the only solution is to federate the evaluation of the _global model_. This is the default behaviour in Flower. If you don't specify teh `evaluate_fn` argument in your strategy, then, centralised global evaluation won't be performed." + " # Aggregate and return custom metric (weighted average)\n", + " return {\"accuracy\": sum(accuracies) / sum(examples)}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We'll use this callback when defining the strategy in the next section" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### The `server_fn` callback\n", + "\n", + "The easiest way to create a `ServerApp` with the aggregation _strategy_ of your choice is by means of a `server_fn` callback. It has a similar signature to `client_fn` but, instead of returning a client object, it returns all the components needed to run the server-side logic in Flower. In this tutorial we'll keep things simple and stick to `FedAvg` with initialised global parameters." ] }, { @@ -714,54 +750,47 @@ "metadata": {}, "outputs": [], "source": [ - "def get_evaluate_fn(testloader):\n", - " \"\"\"This is a function that returns a function. The returned\n", - " function (i.e. `evaluate_fn`) will be executed by the strategy\n", - " at the end of each round to evaluate the stat of the global\n", - " model.\"\"\"\n", + "from flwr.common import ndarrays_to_parameters\n", + "from flwr.server import ServerApp, ServerConfig, ServerAppComponents\n", + "from flwr.server.strategy import FedAvg\n", "\n", - " def evaluate_fn(server_round: int, parameters, config):\n", - " \"\"\"This function is executed by the strategy it will instantiate\n", - " a model and replace its parameters with those from the global model.\n", - " The, the model will be evaluate on the test set (recall this is the\n", - " whole MNIST test set).\"\"\"\n", + "num_rounds = 5\n", "\n", - " model = Net(num_classes=10)\n", "\n", - " # set parameters to the model\n", - " params_dict = zip(model.state_dict().keys(), parameters)\n", - " state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict})\n", - " model.load_state_dict(state_dict, strict=True)\n", + "def server_fn(context: Context):\n", "\n", - " # call test\n", - " loss, accuracy = test(\n", - " model, testloader\n", - " ) # <-------------------------- calls the `test` function, just what we did in the centralised setting\n", - " return loss, {\"accuracy\": accuracy}\n", + " # instantiate the model\n", + " model = Net(num_classes=10)\n", + " ndarrays = get_params(model)\n", + " # Convert model parameters to flwr.common.Parameters\n", + " global_model_init = ndarrays_to_parameters(ndarrays)\n", + "\n", + " # Define the strategy\n", + " strategy = FedAvg(\n", + " fraction_fit=0.1, # 10% clients sampled each round to do fit()\n", + " fraction_evaluate=0.5, # 50% clients sample each round to do evaluate()\n", + " evaluate_metrics_aggregation_fn=weighted_average, # callback defined earlier\n", + " initial_parameters=global_model_init, # initialised global model\n", + " )\n", + "\n", + " # Construct ServerConfig\n", + " config = ServerConfig(num_rounds=num_rounds)\n", "\n", - " return evaluate_fn\n", + " # Wrap everything into a `ServerAppComponents` object\n", + " return ServerAppComponents(strategy=strategy, config=config)\n", "\n", "\n", - "# now we can define the strategy\n", - "strategy = fl.server.strategy.FedAvg(\n", - " fraction_fit=0.1, # let's sample 10% of the client each round to do local training\n", - " fraction_evaluate=0.1, # after each round, let's sample 20% of the clients to asses how well the global model is doing\n", - " min_available_clients=100, # total number of clients available in the experiment\n", - " evaluate_fn=get_evaluate_fn(testloader),\n", - ") # a callback to a function that the strategy can execute to evaluate the state of the global model on a centralised dataset" + "# Create your ServerApp\n", + "server_app = ServerApp(server_fn=server_fn)" ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "So far we have:\n", - "* created the dataset partitions (one for each client)\n", - "* defined the client class\n", - "* decided on a strategy to use\n", + "## Launching the Simulation\n", "\n", - "Now we just need to launch the Flower FL experiment... not so fast! just one final function: let's create another callback that the Simulation Engine will use in order to span VirtualClients. As you can see this is really simple: construct a FlowerClient object, assigning each their own data partition." + "With both `ClientApp` and `ServerApp` ready, we can launch the simulation. Pass both apps to the `run_simulation()` function and specify the number of `supernodes` (this is a more general term used in Flower to refer to individual \"nodes\" or \"clients\"). We earlier partitioned the dataset into 100 partitions, one for each supernode. So we indicate that `num_supernodes`=100." ] }, { @@ -770,18 +799,97 @@ "metadata": {}, "outputs": [], "source": [ - "def generate_client_fn(trainloaders, valloaders):\n", - " def client_fn(cid: str):\n", - " \"\"\"Returns a FlowerClient containing the cid-th data partition\"\"\"\n", + "from flwr.simulation import run_simulation\n", + "\n", + "run_simulation(\n", + " server_app=server_app, client_app=client_app, num_supernodes=NUM_PARTITIONS\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note above how the distributed `accuracy` goes up as training progresses while the loss goes down. Federated learning is working!" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Bonus: Create your own Strategy\n", + "\n", + "Flower strategies can be extended easily to suit your FL setups or your preferred workflows whether you use Flower for research or in production. In this final section, you'll learn how to create a custom strategy that behaves just like `FedAvg` but extends the functionality of certain methods to achieve two things:\n", + "1. Save the results obtained on each round into a JSON file.\n", + "2. Create a plot at after the last round.\n", "\n", - " return FlowerClient(\n", - " trainloader=trainloaders[int(cid)], valloader=valloaders[int(cid)]\n", - " ).to_client()\n", "\n", - " return client_fn\n", + "Let's call this strategy `FedAvgCustom`. We'll use it to also showcase how to use the `evaluate_fn` callback, a convenient way to do centralised evaluation of the global model after each round. Note this functionality is user-defined since it requires a choice in terms of ML-framework. (if you recall, Flower is framework agnostic).\n", + "\n", + "> This being said, centralised evaluation of the global model is only possible if there exists a centralised dataset that somewhat follows a similar distribution as the data that's spread across clients. In some cases having such centralised dataset for validation is not possible, so the only solution is to federate the evaluation of the _global model_. This is the default behaviour in Flower. If you don't specify the `evaluate_fn` argument in your strategy, then, centralised global evaluation won't be performed." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from flwr.server.strategy import FedAvg\n", + "from flwr.common import Parameters\n", + "import json\n", + "\n", + "\n", + "class FedAvgCustom(FedAvg):\n", + " def __init__(self, file_name: str, num_rounds: int, *args, **kwargs):\n", + " super().__init__(*args, **kwargs)\n", + " self.file_name = file_name\n", + " self.num_rounds = num_rounds\n", + " self.loss_list = []\n", + " self.metrics_list = []\n", + "\n", + " def _make_plot(self):\n", + " \"\"\"Makes a plot with the results recorded\"\"\"\n", + " round = list(range(1, len(self.loss_list) + 1))\n", + " acc = [100.0 * metrics[\"accuracy\"] for metrics in self.metrics_list]\n", + " plt.plot(round, acc)\n", + " plt.grid()\n", + " plt.ylabel(\"Accuracy (%)\")\n", + " plt.xlabel(\"Round\")\n", + "\n", + " def evaluate(self, server_round: int, parameters: Parameters):\n", + " \"\"\"Evaluate model parameters using an evaluation function.\"\"\"\n", + " loss, metrics = super().evaluate(server_round, parameters)\n", + " # Record results\n", + " self.loss_list.append(loss)\n", + " self.metrics_list.append(metrics)\n", + " # If last round, save results and make a plot\n", + " if server_round == self.num_rounds:\n", + " # Save to CSV\n", + " with open(f\"{self.file_name}.json\", \"w\") as f:\n", + " json.dump({\"loss\": self.loss_list, \"metrics\": self.metrics_list}, f)\n", + " # Generate plot\n", + " self._make_plot()\n", + "\n", + "\n", + "def get_evaluate_fn(testloader):\n", + " \"\"\"Return a function that can be called to do global evaluation.\"\"\"\n", + "\n", + " def evaluate_fn(server_round: int, parameters, config):\n", + " \"\"\"Evaluate global model on the whole test set.\"\"\"\n", + "\n", + " model = Net(num_classes=10)\n", + "\n", + " # set parameters to the model\n", + " params_dict = zip(model.state_dict().keys(), parameters)\n", + " state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict})\n", + " model.load_state_dict(state_dict, strict=True)\n", "\n", + " # call test (evaluate model as in centralised setting)\n", + " loss, accuracy = test(model, testloader)\n", + " return loss, {\"accuracy\": accuracy}\n", "\n", - "client_fn_callback = generate_client_fn(trainloaders, valloaders)" + " return evaluate_fn" ] }, { @@ -789,7 +897,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Now we are ready to launch the FL experiment using Flower simulation:" + "With the above defined, we just need to wrap it all up in a `ServerApp` as we did earlier but this time using the `FedAvgCustom` that we just defined." ] }, { @@ -803,46 +911,57 @@ }, "outputs": [], "source": [ - "history = fl.simulation.start_simulation(\n", - " client_fn=client_fn_callback, # a callback to construct a client\n", - " num_clients=100, # total number of clients in the experiment\n", - " config=fl.server.ServerConfig(num_rounds=10), # let's run for 10 rounds\n", - " strategy=strategy, # the strategy that will orchestrate the whole FL pipeline\n", - ")" + "from flwr.server import ServerApp, ServerConfig\n", + "\n", + "\n", + "def server_fn(context: Context):\n", + "\n", + " # instantiate the model\n", + " model = Net(num_classes=10)\n", + " ndarrays = get_params(model)\n", + " # Convert model parameters to flwr.common.Parameters\n", + " global_model_init = ndarrays_to_parameters(ndarrays)\n", + "\n", + " # Define the strategy\n", + " strategy = FedAvgCustom(\n", + " file_name=\"results_fedavgcustom\",\n", + " num_rounds=num_rounds,\n", + " fraction_fit=0.1, # 10% clients sampled each round to do fit()\n", + " fraction_evaluate=0.25, # 25% clients sample each round to do evaluate()\n", + " evaluate_metrics_aggregation_fn=weighted_average, # callback defined earlier\n", + " initial_parameters=global_model_init, # initialised global model\n", + " evaluate_fn=get_evaluate_fn(\n", + " testloader\n", + " ), # gloabl evaluation (here we can pass the same testset as used in centralised)\n", + " )\n", + "\n", + " # Construct ServerConfig\n", + " config = ServerConfig(num_rounds=num_rounds)\n", + "\n", + " # Wrap everything into a `ServerAppComponents` object\n", + " return ServerAppComponents(strategy=strategy, config=config)\n", + "\n", + "\n", + "# Create your ServerApp\n", + "server_app = ServerApp(server_fn=server_fn)" ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "Doing 10 rounds should take less than 2 minutes on a CPU-only Colab instance <-- Flower Simulation is fast! 🚀\n", - "\n", - "You can then use the returned `History` object to either save the results to disk or do some visualisation (or both of course, or neither if you like chaos). Below you can see how you can plot the centralised accuracy obtained at the end of each round (including at the very beginning of the experiment) for the _global model_. This is want the function `evaluate_fn()` that we passed to the strategy reports." + "All that is left is to launch the simulation. Note a plot will be displayed at the end and a `.json` with the results will be saved to the current directory." ] }, { "cell_type": "code", "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 508 - }, - "outputId": "d8eab106-cee9-4266-9082-0944882cdba8" - }, + "metadata": {}, "outputs": [], "source": [ - "print(f\"{history.metrics_centralized = }\")\n", - "\n", - "global_accuracy_centralised = history.metrics_centralized[\"accuracy\"]\n", - "round = [data[0] for data in global_accuracy_centralised]\n", - "acc = [100.0 * data[1] for data in global_accuracy_centralised]\n", - "plt.plot(round, acc)\n", - "plt.grid()\n", - "plt.ylabel(\"Accuracy (%)\")\n", - "plt.xlabel(\"Round\")\n", - "plt.title(\"MNIST - IID - 100 clients with 10 clients per round\")" + "run_simulation(\n", + " server_app=server_app, client_app=client_app, num_supernodes=NUM_PARTITIONS\n", + ")" ] }, { diff --git a/examples/flower-secure-aggregation/README.md b/examples/flower-secure-aggregation/README.md new file mode 100644 index 000000000000..0a9056263db3 --- /dev/null +++ b/examples/flower-secure-aggregation/README.md @@ -0,0 +1,72 @@ +--- +tags: [advanced, secure_aggregation, privacy] +dataset: [CIFAR-10] +framework: [torch, torchvision] +--- + +# Secure aggregation with Flower (the SecAgg+ protocol) + +The following steps describe how to use Flower's built-in Secure Aggregation components. This example demonstrates how to apply `SecAgg+` to the same federated learning workload as in the [quickstart-pytorch](https://github.com/adap/flower/tree/main/examples/quickstart-pytorch) example. The `ServerApp` uses the [`SecAggPlusWorkflow`](https://flower.ai/docs/framework/ref-api/flwr.server.workflow.SecAggPlusWorkflow.html#secaggplusworkflow) while `ClientApp` uses the [`secaggplus_mod`](https://flower.ai/docs/framework/ref-api/flwr.client.mod.secaggplus_mod.html#flwr.client.mod.secaggplus_mod). To introduce the various steps involved in `SecAgg+`, this example introduces as a sub-class of `SecAggPlusWorkflow` the `SecAggPlusWorkflowWithLogs`. It is enabled by default, but you can disable (see later in this readme). + +## Set up the project + +### Clone the project + +Start by cloning the example project: + +```shell +git clone --depth=1 https://github.com/adap/flower.git _tmp \ + && mv _tmp/examples/flower-secure-aggregation . \ + && rm -rf _tmp && cd flower-secure-aggregation +``` + +This will create a new directory called `flower-secure-aggregation` containing the +following files: + +```shell +flower-secure-aggregation +| +├── secaggexample +| ├── __init__.py +| ├── client_app.py # Defines your ClientApp +| ├── server_app.py # Defines your ServerApp +| ├── task.py # Defines your model, training and data loading +| └── workflow_with_log.py # Defines a workflow used when `is-demo=true` +├── pyproject.toml # Project metadata like dependencies and configs +└── README.md +``` + +### Install dependencies and project + +Install the dependencies defined in `pyproject.toml` as well as the `secaggexample` package. + +```bash +pip install -e . +``` + +## Run the project + +You can run your Flower project in both _simulation_ and _deployment_ mode without making changes to the code. If you are starting with Flower, we recommend you using the _simulation_ mode as it requires fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine. + +### Run with the Simulation Engine + +```bash +flwr run . +``` + +You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example + +```bash +flwr run . --run-config "num-server-rounds=5 learning-rate=0.25" +``` + +To adapt the example for a practial usage, set `is-demo=false` like shown below. You might want to adjust the `num-shares` and `reconstruction-threshold` settings to suit your requirements. You can override those via `--run-config` as well. + +```bash +flwr run . --run-config is-demo=false +``` + +### Run with the Deployment Engine + +> \[!NOTE\] +> An update to this example will show how to run this Flower project with the Deployment Engine and TLS certificates, or with Docker. diff --git a/examples/flower-secure-aggregation/pyproject.toml b/examples/flower-secure-aggregation/pyproject.toml new file mode 100644 index 000000000000..89903184f60a --- /dev/null +++ b/examples/flower-secure-aggregation/pyproject.toml @@ -0,0 +1,46 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "secaggexample" +version = "1.0.0" +description = "Secure Aggregation in Flower" +license = "Apache-2.0" +dependencies = [ + "flwr[simulation]>=1.12.0", + "flwr-datasets[vision]>=0.3.0", + "torch==2.2.1", + "torchvision==0.17.1", +] + +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "secaggexample.server_app:app" +clientapp = "secaggexample.client_app:app" + + +[tool.flwr.app.config] +num-server-rounds = 3 +fraction-evaluate = 0.5 +local-epochs = 1 +learning-rate = 0.1 +batch-size = 32 +# Parameters for the SecAgg+ protocol +num-shares = 3 +reconstruction-threshold = 2 +max-weight = 9000 +timeout = 15.0 +# Demo flag +is-demo = true + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 5 diff --git a/examples/flower-secure-aggregation/secaggexample/__init__.py b/examples/flower-secure-aggregation/secaggexample/__init__.py new file mode 100644 index 000000000000..366ceebfae80 --- /dev/null +++ b/examples/flower-secure-aggregation/secaggexample/__init__.py @@ -0,0 +1 @@ +"""secaggexample: A Flower with SecAgg+ app.""" diff --git a/examples/flower-secure-aggregation/secaggexample/client_app.py b/examples/flower-secure-aggregation/secaggexample/client_app.py new file mode 100644 index 000000000000..7f4fd54b98b5 --- /dev/null +++ b/examples/flower-secure-aggregation/secaggexample/client_app.py @@ -0,0 +1,91 @@ +"""secaggexample: A Flower with SecAgg+ app.""" + +import time + +import torch +from flwr.client import ClientApp, NumPyClient +from flwr.client.mod import secaggplus_mod +from flwr.common import Context + +from secaggexample.task import Net, get_weights, load_data, set_weights, test, train + + +# Define Flower Client +class FlowerClient(NumPyClient): + def __init__( + self, trainloader, valloader, local_epochs, learning_rate, timeout, is_demo + ): + self.net = Net() + self.trainloader = trainloader + self.valloader = valloader + self.local_epochs = local_epochs + self.lr = learning_rate + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + # For demonstration purposes only + self.timeout = timeout + self.is_demo = is_demo + + def fit(self, parameters, config): + """Train the model with data of this client.""" + set_weights(self.net, parameters) + results = {} + if not self.is_demo: + results = train( + self.net, + self.trainloader, + self.valloader, + self.local_epochs, + self.lr, + self.device, + ) + ret_vec = get_weights(self.net) + + # Force a significant delay for testing purposes + if self.is_demo: + if config.get("drop", False): + print(f"Client dropped for testing purposes.") + time.sleep(self.timeout) + else: + print(f"Client uploading parameters: {ret_vec[0].flatten()[:3]}...") + return ret_vec, len(self.trainloader.dataset), results + + def evaluate(self, parameters, config): + """Evaluate the model on the data this client has.""" + set_weights(self.net, parameters) + loss, accuracy = 0.0, 0.0 + if not self.is_demo: + loss, accuracy = test(self.net, self.valloader, self.device) + return loss, len(self.valloader.dataset), {"accuracy": accuracy} + + +def client_fn(context: Context): + """Construct a Client that will be run in a ClientApp.""" + + # Read the node_config to fetch data partition associated to this node + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + + # Read run_config to fetch hyperparameters relevant to this run + batch_size = context.run_config["batch-size"] + is_demo = context.run_config["is-demo"] + trainloader, valloader = load_data( + partition_id, num_partitions, batch_size, is_demo + ) + local_epochs = context.run_config["local-epochs"] + lr = context.run_config["learning-rate"] + # For demostrations purposes only + timeout = context.run_config["timeout"] + + # Return Client instance + return FlowerClient( + trainloader, valloader, local_epochs, lr, timeout, is_demo + ).to_client() + + +# Flower ClientApp +app = ClientApp( + client_fn=client_fn, + mods=[ + secaggplus_mod, + ], +) diff --git a/examples/flower-secure-aggregation/secaggexample/server_app.py b/examples/flower-secure-aggregation/secaggexample/server_app.py new file mode 100644 index 000000000000..0b95d68e4183 --- /dev/null +++ b/examples/flower-secure-aggregation/secaggexample/server_app.py @@ -0,0 +1,81 @@ +"""secaggexample: A Flower with SecAgg+ app.""" + +from logging import DEBUG +from typing import List, Tuple + +from flwr.common import Context, Metrics, ndarrays_to_parameters +from flwr.common.logger import update_console_handler +from flwr.server import Driver, LegacyContext, ServerApp, ServerConfig +from flwr.server.strategy import FedAvg +from flwr.server.workflow import DefaultWorkflow, SecAggPlusWorkflow + +from secaggexample.task import get_weights, make_net +from secaggexample.workflow_with_log import SecAggPlusWorkflowWithLogs + + +# Define metric aggregation function +def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: + # Multiply accuracy of each client by number of examples used + accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] + examples = [num_examples for num_examples, _ in metrics] + + # Aggregate and return custom metric (weighted average) + return {"accuracy": sum(accuracies) / sum(examples)} + + +# Flower ServerApp +app = ServerApp() + + +@app.main() +def main(driver: Driver, context: Context) -> None: + + is_demo = context.run_config["is-demo"] + + # Get initial parameters + ndarrays = get_weights(make_net()) + parameters = ndarrays_to_parameters(ndarrays) + + # Define strategy + strategy = FedAvg( + # Select all available clients + fraction_fit=1.0, + min_fit_clients=5, + # Disable evaluation in demo + fraction_evaluate=(0.0 if is_demo else context.run_config["fraction-evaluate"]), + min_available_clients=5, + evaluate_metrics_aggregation_fn=weighted_average, + initial_parameters=parameters, + ) + + # Construct the LegacyContext + num_rounds = context.run_config["num-server-rounds"] + context = LegacyContext( + context=context, + config=ServerConfig(num_rounds=num_rounds), + strategy=strategy, + ) + + # Create fit workflow + # For further information, please see: + # https://flower.ai/docs/framework/ref-api/flwr.server.workflow.SecAggPlusWorkflow.html + if is_demo: + update_console_handler(DEBUG, True, True) + fit_workflow = SecAggPlusWorkflowWithLogs( + num_shares=context.run_config["num-shares"], + reconstruction_threshold=context.run_config["reconstruction-threshold"], + max_weight=1, + timeout=context.run_config["timeout"], + ) + else: + fit_workflow = SecAggPlusWorkflow( + num_shares=context.run_config["num-shares"], + reconstruction_threshold=context.run_config["reconstruction-threshold"], + max_weight=context.run_config["max-weight"], + ) + + # Create the workflow + workflow = DefaultWorkflow(fit_workflow=fit_workflow) + + # Execute + workflow(driver, context) diff --git a/examples/flower-secure-aggregation/secaggexample/task.py b/examples/flower-secure-aggregation/secaggexample/task.py new file mode 100644 index 000000000000..e9cca8ef9115 --- /dev/null +++ b/examples/flower-secure-aggregation/secaggexample/task.py @@ -0,0 +1,128 @@ +"""secaggexample: A Flower with SecAgg+ app.""" + +import random +from collections import OrderedDict +from unittest.mock import Mock + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner +from torch.utils.data import DataLoader +from torchvision.transforms import Compose, Normalize, ToTensor + + +class Net(nn.Module): + """Model (simple CNN adapted from 'PyTorch: A 60 Minute Blitz')""" + + def __init__(self): + super(Net, self).__init__() + self.conv1 = nn.Conv2d(3, 6, 5) + self.pool = nn.MaxPool2d(2, 2) + self.conv2 = nn.Conv2d(6, 16, 5) + self.fc1 = nn.Linear(16 * 5 * 5, 120) + self.fc2 = nn.Linear(120, 84) + self.fc3 = nn.Linear(84, 10) + + def forward(self, x): + x = self.pool(F.relu(self.conv1(x))) + x = self.pool(F.relu(self.conv2(x))) + x = x.view(-1, 16 * 5 * 5) + x = F.relu(self.fc1(x)) + x = F.relu(self.fc2(x)) + return self.fc3(x) + + +def make_net(seed=42): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + return Net() + + +def get_weights(net): + return [val.cpu().numpy() for _, val in net.state_dict().items()] + + +def set_weights(net, parameters): + params_dict = zip(net.state_dict().keys(), parameters) + state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) + net.load_state_dict(state_dict, strict=True) + + +fds = None # Cache FederatedDataset + + +def load_data(partition_id: int, num_partitions: int, batch_size: int, is_demo: bool): + """Load partition CIFAR10 data.""" + if is_demo: + trainloader, testloader = Mock(dataset=[0]), Mock(dataset=[0]) + return trainloader, testloader + # Only initialize `FederatedDataset` once + global fds + if fds is None: + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="uoft-cs/cifar10", + partitioners={"train": partitioner}, + ) + partition = fds.load_partition(partition_id) + # Divide data on each node: 80% train, 20% test + partition_train_test = partition.train_test_split(test_size=0.2, seed=42) + pytorch_transforms = Compose( + [ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] + ) + + def apply_transforms(batch): + """Apply transforms to the partition from FederatedDataset.""" + batch["img"] = [pytorch_transforms(img) for img in batch["img"]] + return batch + + partition_train_test = partition_train_test.with_transform(apply_transforms) + trainloader = DataLoader( + partition_train_test["train"], batch_size=batch_size, shuffle=True + ) + testloader = DataLoader(partition_train_test["test"], batch_size=batch_size) + return trainloader, testloader + + +def train(net, trainloader, valloader, epochs, learning_rate, device): + """Train the model on the training set.""" + net.to(device) # move model to GPU if available + criterion = torch.nn.CrossEntropyLoss().to(device) + optimizer = torch.optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9) + net.train() + for _ in range(epochs): + for batch in trainloader: + images = batch["img"] + labels = batch["label"] + optimizer.zero_grad() + criterion(net(images.to(device)), labels.to(device)).backward() + optimizer.step() + + val_loss, val_acc = test(net, valloader, device) + + results = { + "val_loss": val_loss, + "val_accuracy": val_acc, + } + return results + + +def test(net, testloader, device): + """Validate the model on the test set.""" + net.to(device) # move model to GPU if available + criterion = torch.nn.CrossEntropyLoss() + correct, loss = 0, 0.0 + with torch.no_grad(): + for batch in testloader: + images = batch["img"].to(device) + labels = batch["label"].to(device) + outputs = net(images) + loss += criterion(outputs, labels).item() + correct += (torch.max(outputs.data, 1)[1] == labels).sum().item() + accuracy = correct / len(testloader.dataset) + loss = loss / len(testloader) + return loss, accuracy diff --git a/examples/app-secure-aggregation/workflow_with_log.py b/examples/flower-secure-aggregation/secaggexample/workflow_with_log.py similarity index 74% rename from examples/app-secure-aggregation/workflow_with_log.py rename to examples/flower-secure-aggregation/secaggexample/workflow_with_log.py index a03ff8c13b6c..75f43d580c4d 100644 --- a/examples/app-secure-aggregation/workflow_with_log.py +++ b/examples/flower-secure-aggregation/secaggexample/workflow_with_log.py @@ -1,14 +1,18 @@ -from flwr.common import Context, log, parameters_to_ndarrays +"""secaggexample: A Flower with SecAgg+ app.""" + from logging import INFO + +import flwr.common.recordset_compat as compat +from flwr.common import Context, log, parameters_to_ndarrays +from flwr.common.secure_aggregation.quantization import quantize from flwr.server import Driver, LegacyContext +from flwr.server.workflow.constant import MAIN_PARAMS_RECORD from flwr.server.workflow.secure_aggregation.secaggplus_workflow import ( SecAggPlusWorkflow, WorkflowState, ) -import numpy as np -from flwr.common.secure_aggregation.quantization import quantize -from flwr.server.workflow.constant import MAIN_PARAMS_RECORD -import flwr.common.recordset_compat as compat + +from secaggexample.task import get_weights, make_net class SecAggPlusWorkflowWithLogs(SecAggPlusWorkflow): @@ -21,8 +25,11 @@ class SecAggPlusWorkflowWithLogs(SecAggPlusWorkflow): node_ids = [] def __call__(self, driver: Driver, context: Context) -> None: + first_3_params = get_weights(make_net())[0].flatten()[:3] _quantized = quantize( - [np.ones(3) for _ in range(5)], self.clipping_range, self.quantization_range + [first_3_params for _ in range(5)], + self.clipping_range, + self.quantization_range, ) log(INFO, "") log( @@ -31,24 +38,24 @@ def __call__(self, driver: Driver, context: Context) -> None: ) log( INFO, - "In the example, each client will upload a vector [1.0, 1.0, 1.0] instead of", + "In the example, clients will skip model training and evaluation", ) - log(INFO, "model updates for demonstration purposes.") + log(INFO, "for demonstration purposes.") log( INFO, "Client 0 is configured to drop out before uploading the masked vector.", ) log(INFO, "After quantization, the raw vectors will look like:") for i in range(1, 5): - log(INFO, "\t%s from Client %s", _quantized[i], i) + log(INFO, "\t%s... from Client %s", _quantized[i], i) log( INFO, - "Numbers are rounded to integers stochastically during the quantization", + "Numbers are rounded to integers stochastically during the quantization, ", ) - log(INFO, ", and thus entries may not be identical.") + log(INFO, "and thus vectors may not be identical.") log( INFO, - "The above raw vectors are hidden from the driver through adding masks.", + "The above raw vectors are hidden from the ServerApp through adding masks.", ) log(INFO, "") log( @@ -63,8 +70,8 @@ def __call__(self, driver: Driver, context: Context) -> None: ndarrays = parameters_to_ndarrays(parameters) log( INFO, - "Weighted average of vectors (dequantized): %s", - ndarrays[0], + "Weighted average of parameters (dequantized): %s...", + ndarrays[0].flatten()[:3], ) log( INFO, @@ -88,5 +95,9 @@ def collect_masked_vectors_stage( ret = super().collect_masked_vectors_stage(driver, context, state) for node_id in state.sampled_node_ids - state.active_node_ids: log(INFO, "Client %s dropped out.", self.node_ids.index(node_id)) - log(INFO, "Obtained sum of masked vectors: %s", state.aggregate_ndarrays[1]) + log( + INFO, + "Obtained sum of masked parameters: %s...", + state.aggregate_ndarrays[1].flatten()[:3], + ) return ret diff --git a/examples/flower-simulation-step-by-step-pytorch/Part-I/client.py b/examples/flower-simulation-step-by-step-pytorch/Part-I/client.py index eac831ad1932..3d93510b3d0e 100644 --- a/examples/flower-simulation-step-by-step-pytorch/Part-I/client.py +++ b/examples/flower-simulation-step-by-step-pytorch/Part-I/client.py @@ -1,11 +1,11 @@ from collections import OrderedDict from typing import Dict, Tuple -from flwr.common import NDArrays, Scalar -import torch import flwr as fl +import torch +from flwr.common import NDArrays, Scalar -from model import Net, train, test +from model import Net, test, train class FlowerClient(fl.client.NumPyClient): diff --git a/examples/flower-simulation-step-by-step-pytorch/Part-I/dataset.py b/examples/flower-simulation-step-by-step-pytorch/Part-I/dataset.py index 1085ede22c9a..a805906b8d42 100644 --- a/examples/flower-simulation-step-by-step-pytorch/Part-I/dataset.py +++ b/examples/flower-simulation-step-by-step-pytorch/Part-I/dataset.py @@ -1,7 +1,7 @@ import torch -from torch.utils.data import random_split, DataLoader -from torchvision.transforms import ToTensor, Normalize, Compose +from torch.utils.data import DataLoader, random_split from torchvision.datasets import MNIST +from torchvision.transforms import Compose, Normalize, ToTensor def get_mnist(data_path: str = "./data"): diff --git a/examples/flower-simulation-step-by-step-pytorch/Part-I/main.py b/examples/flower-simulation-step-by-step-pytorch/Part-I/main.py index f8124b9353f7..1373f24fbb11 100644 --- a/examples/flower-simulation-step-by-step-pytorch/Part-I/main.py +++ b/examples/flower-simulation-step-by-step-pytorch/Part-I/main.py @@ -1,15 +1,14 @@ import pickle from pathlib import Path +import flwr as fl import hydra from hydra.core.hydra_config import HydraConfig from omegaconf import DictConfig, OmegaConf -import flwr as fl - -from dataset import prepare_dataset from client import generate_client_fn -from server import get_on_fit_config, get_evaluate_fn +from dataset import prepare_dataset +from server import get_evaluate_fn, get_on_fit_config # A decorator for Hydra. This tells hydra to by default load the config in conf/base.yaml diff --git a/examples/flower-simulation-step-by-step-pytorch/Part-I/server.py b/examples/flower-simulation-step-by-step-pytorch/Part-I/server.py index 33f618785d56..93350ae2d1ba 100644 --- a/examples/flower-simulation-step-by-step-pytorch/Part-I/server.py +++ b/examples/flower-simulation-step-by-step-pytorch/Part-I/server.py @@ -1,9 +1,7 @@ from collections import OrderedDict - -from omegaconf import DictConfig - import torch +from omegaconf import DictConfig from model import Net, test diff --git a/examples/flower-simulation-step-by-step-pytorch/Part-II/client.py b/examples/flower-simulation-step-by-step-pytorch/Part-II/client.py index 7da9547d7362..098cac293d94 100644 --- a/examples/flower-simulation-step-by-step-pytorch/Part-II/client.py +++ b/examples/flower-simulation-step-by-step-pytorch/Part-II/client.py @@ -1,14 +1,12 @@ from collections import OrderedDict from typing import Dict, Tuple -from flwr.common import NDArrays, Scalar - - -from hydra.utils import instantiate -import torch import flwr as fl +import torch +from flwr.common import NDArrays, Scalar +from hydra.utils import instantiate -from model import train, test +from model import test, train class FlowerClient(fl.client.NumPyClient): diff --git a/examples/flower-simulation-step-by-step-pytorch/Part-II/dataset.py b/examples/flower-simulation-step-by-step-pytorch/Part-II/dataset.py index a80e1c78098e..fb5d8504ed65 100644 --- a/examples/flower-simulation-step-by-step-pytorch/Part-II/dataset.py +++ b/examples/flower-simulation-step-by-step-pytorch/Part-II/dataset.py @@ -1,7 +1,7 @@ import torch -from torch.utils.data import random_split, DataLoader -from torchvision.transforms import ToTensor, Normalize, Compose +from torch.utils.data import DataLoader, random_split from torchvision.datasets import MNIST +from torchvision.transforms import Compose, Normalize, ToTensor def get_mnist(data_path: str = "./data"): diff --git a/examples/flower-simulation-step-by-step-pytorch/Part-II/main.py b/examples/flower-simulation-step-by-step-pytorch/Part-II/main.py index d43dd6d50787..6da664df1203 100644 --- a/examples/flower-simulation-step-by-step-pytorch/Part-II/main.py +++ b/examples/flower-simulation-step-by-step-pytorch/Part-II/main.py @@ -1,17 +1,15 @@ import pickle from pathlib import Path +import flwr as fl import hydra -from hydra.utils import instantiate, call from hydra.core.hydra_config import HydraConfig +from hydra.utils import call, instantiate from omegaconf import DictConfig, OmegaConf -import flwr as fl - -from dataset import prepare_dataset from client import generate_client_fn -from server import get_on_fit_config, get_evalulate_fn - +from dataset import prepare_dataset +from server import get_evalulate_fn, get_on_fit_config # !!!! The code in this directory is the result of adpating the project first shown # in to make better use of Hydra's config system. It is recommended to first diff --git a/examples/flower-simulation-step-by-step-pytorch/Part-II/model.py b/examples/flower-simulation-step-by-step-pytorch/Part-II/model.py index 6dc1782f4dc2..f57bc9b5d100 100644 --- a/examples/flower-simulation-step-by-step-pytorch/Part-II/model.py +++ b/examples/flower-simulation-step-by-step-pytorch/Part-II/model.py @@ -1,7 +1,6 @@ import torch import torch.nn as nn import torch.nn.functional as F - from flwr.common.parameter import ndarrays_to_parameters diff --git a/examples/flower-simulation-step-by-step-pytorch/Part-II/server.py b/examples/flower-simulation-step-by-step-pytorch/Part-II/server.py index 8901370e1a06..f1f8293cc6fb 100644 --- a/examples/flower-simulation-step-by-step-pytorch/Part-II/server.py +++ b/examples/flower-simulation-step-by-step-pytorch/Part-II/server.py @@ -1,10 +1,8 @@ from collections import OrderedDict - -from omegaconf import DictConfig -from hydra.utils import instantiate - import torch +from hydra.utils import instantiate +from omegaconf import DictConfig from model import Net, test diff --git a/examples/flower-simulation-step-by-step-pytorch/Part-II/toy.py b/examples/flower-simulation-step-by-step-pytorch/Part-II/toy.py index 7a6c057668c4..0bae932e3bce 100644 --- a/examples/flower-simulation-step-by-step-pytorch/Part-II/toy.py +++ b/examples/flower-simulation-step-by-step-pytorch/Part-II/toy.py @@ -1,7 +1,6 @@ import hydra -from omegaconf import DictConfig, OmegaConf - from hydra.utils import call, instantiate +from omegaconf import DictConfig, OmegaConf def function_test(x: int, y: int): diff --git a/examples/flower-simulation-step-by-step-pytorch/README.md b/examples/flower-simulation-step-by-step-pytorch/README.md index beb8dd7f6f95..b00afedbe80b 100644 --- a/examples/flower-simulation-step-by-step-pytorch/README.md +++ b/examples/flower-simulation-step-by-step-pytorch/README.md @@ -1,3 +1,9 @@ +--- +tags: [basic, vision, simulation] +dataset: [MNIST] +framework: [torch] +--- + # Flower Simulation Step-by-Step > Since this tutorial (and its video series) was put together, Flower has been updated a few times. As a result, some of the steps to construct the environment (see below) have been updated. Some parts of the code have also been updated. Overall, the content of this tutorial and how things work remains the same as in the video tutorials. diff --git a/examples/flower-via-docker-compose/Dockerfile b/examples/flower-via-docker-compose/Dockerfile index ee6fee3103a5..9e7d4ff7abaa 100644 --- a/examples/flower-via-docker-compose/Dockerfile +++ b/examples/flower-via-docker-compose/Dockerfile @@ -1,5 +1,5 @@ # Use an official Python runtime as a parent image -FROM python:3.10-slim-buster +FROM python:3.11-slim-buster # Set the working directory in the container to /app WORKDIR /app @@ -10,10 +10,9 @@ COPY ./requirements.txt /app/requirements.txt # Install gcc and other dependencies RUN apt-get update && apt-get install -y \ gcc \ - python3-dev && \ - rm -rf /var/lib/apt/lists/* + pkg-config \ + libhdf5-dev \ + && rm -rf /var/lib/apt/lists/* # Install any needed packages specified in requirements.txt -RUN pip install -r requirements.txt - - +RUN pip install --no-cache-dir -r requirements.txt diff --git a/examples/flower-via-docker-compose/README.md b/examples/flower-via-docker-compose/README.md index 3ef1ac37bcda..3325a731fecf 100644 --- a/examples/flower-via-docker-compose/README.md +++ b/examples/flower-via-docker-compose/README.md @@ -1,3 +1,10 @@ +--- +title: Leveraging Flower and Docker for Device Heterogeneity Management in FL +tags: [deployment, vision, tutorial] +dataset: [CIFAR-10] +framework: [Docker, tensorflow] +--- + # Leveraging Flower and Docker for Device Heterogeneity Management in Federated Learning

diff --git a/examples/flower-via-docker-compose/client.py b/examples/flower-via-docker-compose/client.py index c894143532a1..33ed64d3270d 100644 --- a/examples/flower-via-docker-compose/client.py +++ b/examples/flower-via-docker-compose/client.py @@ -1,10 +1,11 @@ -import os import argparse +import logging +import os + import flwr as fl import tensorflow as tf -import logging from helpers.load_data import load_data -import os + from model.model import Model logging.basicConfig(level=logging.INFO) # Configure logging diff --git a/examples/flower-via-docker-compose/config/grafana.ini b/examples/flower-via-docker-compose/config/grafana.ini index 775f39d7ec22..208eb6e427bf 100644 --- a/examples/flower-via-docker-compose/config/grafana.ini +++ b/examples/flower-via-docker-compose/config/grafana.ini @@ -1,8 +1,3 @@ -[security] -allow_embedding = true -admin_user = admin -admin_password = admin - [dashboards] default_home_dashboard_path = /etc/grafana/provisioning/dashboards/dashboard_index.json diff --git a/examples/flower-via-docker-compose/config/provisioning/dashboards/dashboard_index.json b/examples/flower-via-docker-compose/config/provisioning/dashboards/dashboard_index.json index b52f19c57508..75ee224b0009 100644 --- a/examples/flower-via-docker-compose/config/provisioning/dashboards/dashboard_index.json +++ b/examples/flower-via-docker-compose/config/provisioning/dashboards/dashboard_index.json @@ -15,12 +15,12 @@ } ] }, - "description": "Simple exporter for cadvisor only", + "description": "Simple exporter for cadvisor and application metrics", "editable": true, "fiscalYearStartMonth": 0, "gnetId": 14282, "graphTooltip": 0, - "id": 12, + "id": 1, "links": [], "liveNow": false, "panels": [ @@ -36,10 +36,7 @@ "type": "row" }, { - "datasource": { - "type": "prometheus", - "uid": "db69454e-e558-479e-b4fc-80db52bf91da" - }, + "datasource": {}, "description": "Averaged federated accuracy across clients", "fieldConfig": { "defaults": { @@ -113,6 +110,7 @@ "showLegend": false }, "tooltip": { + "maxHeight": 600, "mode": "single", "sort": "none" } @@ -124,7 +122,7 @@ "uid": "db69454e-e558-479e-b4fc-80db52bf91da" }, "disableTextWrap": false, - "editorMode": "builder", + "editorMode": "code", "expr": "model_accuracy", "fullMetaSearch": false, "includeNullMetadata": true, @@ -139,10 +137,7 @@ "type": "timeseries" }, { - "datasource": { - "type": "prometheus", - "uid": "db69454e-e558-479e-b4fc-80db52bf91da" - }, + "datasource": {}, "description": "Averaged Federated Loss across clients", "fieldConfig": { "defaults": { @@ -213,6 +208,7 @@ "showLegend": false }, "tooltip": { + "maxHeight": 600, "mode": "single", "sort": "none" } @@ -224,7 +220,7 @@ "uid": "db69454e-e558-479e-b4fc-80db52bf91da" }, "disableTextWrap": false, - "editorMode": "builder", + "editorMode": "code", "expr": "model_loss", "fullMetaSearch": false, "includeNullMetadata": true, @@ -240,10 +236,7 @@ }, { "collapsed": false, - "datasource": { - "type": "prometheus", - "uid": "db69454e-e558-479e-b4fc-80db52bf91da" - }, + "datasource": {}, "gridPos": { "h": 1, "w": 24, @@ -265,54 +258,132 @@ "type": "row" }, { - "aliasColors": { - "client1": "red", - "client2": "blue", - "server": "yellow" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "db69454e-e558-479e-b4fc-80db52bf91da" + "datasource": {}, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percent" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "client1" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "client2" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "blue", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "server" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "yellow", + "mode": "fixed" + } + } + ] + } + ] }, - "fill": 1, - "fillGradient": 0, "gridPos": { "h": 7, "w": 24, "x": 0, "y": 10 }, - "hiddenSeries": false, "id": 15, - "legend": { - "alignAsTable": true, - "avg": true, - "current": false, - "max": true, - "min": false, - "rightSide": true, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null as zero", "options": { - "alertThreshold": true + "legend": { + "calcs": [ + "mean", + "max" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "maxHeight": 600, + "mode": "multi", + "sort": "none" + } }, - "percentage": false, "pluginVersion": "10.2.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, "targets": [ { "datasource": { @@ -328,44 +399,12 @@ "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "CPU Usage", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:606", - "format": "percent", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:607", - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { "collapsed": false, - "datasource": { - "type": "prometheus", - "uid": "db69454e-e558-479e-b4fc-80db52bf91da" - }, + "datasource": {}, "gridPos": { "h": 1, "w": 24, @@ -387,61 +426,138 @@ "type": "row" }, { - "aliasColors": { - "client1": "red", - "client2": "blue", - "server": "yellow" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "db69454e-e558-479e-b4fc-80db52bf91da" + "datasource": {}, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "client1" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "client2" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "blue", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "server" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "yellow", + "mode": "fixed" + } + } + ] + } + ] }, - "fill": 1, - "fillGradient": 0, "gridPos": { "h": 8, "w": 12, "x": 0, "y": 18 }, - "hiddenSeries": false, "id": 9, - "legend": { - "alignAsTable": true, - "avg": true, - "current": false, - "max": true, - "min": false, - "rightSide": true, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null as zero", "options": { - "alertThreshold": true + "legend": { + "calcs": [ + "mean", + "max" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "maxHeight": 600, + "mode": "multi", + "sort": "none" + } }, - "percentage": false, "pluginVersion": "10.2.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, "targets": [ { "datasource": { "type": "prometheus", "uid": "db69454e-e558-479e-b4fc-80db52bf91da" }, - "editorMode": "code", + "editorMode": "builder", "expr": "sum(container_memory_rss{instance=~\"$host\",name=~\"$container\",name=~\".+\", name !~ \"(prometheus|cadvisor|grafana)\"}) by (name)", "hide": false, "interval": "", @@ -450,94 +566,142 @@ "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "Memory Usage", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:606", - "format": "bytes", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:607", - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { - "aliasColors": { - "client1": "red", - "client2": "blue", - "server": "yellow" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "db69454e-e558-479e-b4fc-80db52bf91da" - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 18 - }, - "hiddenSeries": false, - "id": 14, - "legend": { - "alignAsTable": true, - "avg": true, - "current": false, - "max": true, - "min": false, - "rightSide": true, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null as zero", + "datasource": {}, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "client1" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "client2" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "blue", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "server" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "yellow", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 18 + }, + "id": 14, "options": { - "alertThreshold": true + "legend": { + "calcs": [ + "mean", + "max" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "maxHeight": 600, + "mode": "multi", + "sort": "none" + } }, - "percentage": false, "pluginVersion": "10.2.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, "targets": [ { "datasource": { "type": "prometheus", "uid": "db69454e-e558-479e-b4fc-80db52bf91da" }, - "editorMode": "code", + "editorMode": "builder", "expr": "sum(container_memory_cache{instance=~\"$host\",name=~\"$container\",name=~\".+\", name !~ \"(prometheus|cadvisor|grafana)\"}) by (name)", "hide": false, "interval": "", @@ -546,44 +710,12 @@ "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "Memory Cached", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:606", - "format": "bytes", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:607", - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { "collapsed": false, - "datasource": { - "type": "prometheus", - "uid": "db69454e-e558-479e-b4fc-80db52bf91da" - }, + "datasource": {}, "gridPos": { "h": 1, "w": 24, @@ -605,63 +737,138 @@ "type": "row" }, { - "aliasColors": { - "client1": "red", - "client2": "blue", - "server": "yellow" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "db69454e-e558-479e-b4fc-80db52bf91da" + "datasource": {}, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "Bps" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "client1" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "client2" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "blue", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "server" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "yellow", + "mode": "fixed" + } + } + ] + } + ] }, - "fill": 1, - "fillGradient": 0, "gridPos": { "h": 8, "w": 12, "x": 0, "y": 27 }, - "hiddenSeries": false, "id": 4, - "legend": { - "alignAsTable": true, - "avg": true, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": true, - "min": false, - "rightSide": true, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", "options": { - "alertThreshold": true + "legend": { + "calcs": [ + "mean", + "max" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "maxHeight": 600, + "mode": "multi", + "sort": "none" + } }, - "percentage": false, "pluginVersion": "10.2.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, "targets": [ { "datasource": { "type": "prometheus", "uid": "db69454e-e558-479e-b4fc-80db52bf91da" }, - "editorMode": "code", + "editorMode": "builder", "expr": "sum(rate(container_network_receive_bytes_total{instance=~\"$host\",name=~\"$container\",name=~\".+\", name !~ \"(prometheus|cadvisor|grafana)\"}[10s])) by (name)", "hide": false, "interval": "", @@ -670,94 +877,142 @@ "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "Received Network Traffic", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:674", - "format": "Bps", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:675", - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { - "aliasColors": { - "client1": "red", - "client2": "blue", - "server": "yellow" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "db69454e-e558-479e-b4fc-80db52bf91da" + "datasource": {}, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "Bps" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "client1" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "client2" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "blue", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "server" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "yellow", + "mode": "fixed" + } + } + ] + } + ] }, - "fill": 1, - "fillGradient": 0, "gridPos": { "h": 8, "w": 12, "x": 12, "y": 27 }, - "hiddenSeries": false, "id": 6, - "legend": { - "alignAsTable": true, - "avg": true, - "current": false, - "max": true, - "min": false, - "rightSide": true, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", "options": { - "alertThreshold": true + "legend": { + "calcs": [ + "mean", + "max" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "maxHeight": 600, + "mode": "multi", + "sort": "none" + } }, - "percentage": false, "pluginVersion": "10.2.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, "targets": [ { "datasource": { "type": "prometheus", "uid": "db69454e-e558-479e-b4fc-80db52bf91da" }, - "editorMode": "code", + "editorMode": "builder", "expr": "sum(rate(container_network_transmit_bytes_total{instance=~\"$host\",name=~\"$container\",name=~\".+\", name !~ \"(prometheus|cadvisor|grafana)\"}[10s])) by (name)", "interval": "", "legendFormat": "{{name}}", @@ -765,37 +1020,8 @@ "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "Sent Network Traffic", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:832", - "format": "Bps", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:833", - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { "collapsed": false, @@ -824,10 +1050,7 @@ "type": "row" }, { - "datasource": { - "type": "prometheus", - "uid": "db69454e-e558-479e-b4fc-80db52bf91da" - }, + "datasource": {}, "fieldConfig": { "defaults": { "custom": { @@ -916,18 +1139,19 @@ "showHeader": true, "sortBy": [] }, - "pluginVersion": "10.2.2", + "pluginVersion": "11.0.0", "targets": [ { "datasource": { "type": "prometheus", "uid": "db69454e-e558-479e-b4fc-80db52bf91da" }, + "editorMode": "code", "expr": "(time() - container_start_time_seconds{instance=~\"$host\",name=~\"$container\",name=~\".+\"})/86400", "format": "table", "instant": true, "interval": "", - "legendFormat": "{{name}}", + "legendFormat": "__auto", "refId": "A" } ], @@ -969,8 +1193,8 @@ "type": "table" } ], - "refresh": "auto", - "schemaVersion": 38, + "refresh": "5s", + "schemaVersion": 39, "tags": [], "templating": { "list": [ @@ -1042,10 +1266,11 @@ "from": "now-15m", "to": "now" }, + "timeRangeUpdatedDuringEditOrView": false, "timepicker": {}, "timezone": "", - "title": "Cadvisor exporter Copy", + "title": "Flower Dashboard", "uid": "fcf2a8da-792c-4b9f-a22f-876820b53c2f", - "version": 2, + "version": 3, "weekStart": "" -} \ No newline at end of file +} diff --git a/examples/flower-via-docker-compose/config/provisioning/datasources/prometheus-datasource.yml b/examples/flower-via-docker-compose/config/provisioning/datasources/prometheus-datasource.yml index 7c8ce00fdcdc..2ae3f9c7757a 100644 --- a/examples/flower-via-docker-compose/config/provisioning/datasources/prometheus-datasource.yml +++ b/examples/flower-via-docker-compose/config/provisioning/datasources/prometheus-datasource.yml @@ -1,9 +1,9 @@ apiVersion: 1 datasources: -- name: Prometheus - type: prometheus - access: proxy - uid: db69454e-e558-479e-b4fc-80db52bf91da - url: http://host.docker.internal:9090 - isDefault: true + - name: Prometheus + type: prometheus + access: proxy + url: http://prometheus:9090 + isDefault: true + uid: db69454e-e558-479e-b4fc-80db52bf91da diff --git a/examples/flower-via-docker-compose/helpers/generate_docker_compose.py b/examples/flower-via-docker-compose/helpers/generate_docker_compose.py index cde553a95e68..8aecc583ed92 100644 --- a/examples/flower-via-docker-compose/helpers/generate_docker_compose.py +++ b/examples/flower-via-docker-compose/helpers/generate_docker_compose.py @@ -1,5 +1,5 @@ -import random import argparse +import random parser = argparse.ArgumentParser(description="Generated Docker Compose") parser.add_argument( @@ -31,7 +31,6 @@ def create_docker_compose(args): ] docker_compose_content = f""" -version: '3' services: prometheus: image: prom/prometheus:latest @@ -63,7 +62,7 @@ def create_docker_compose(args): - /sys:/sys:ro - /var/lib/docker/:/var/lib/docker:ro - /dev/disk/:/dev/disk:ro - - /var/run/docker.sock:/var/run/docker.sock + - /var/run/docker.sock:/var/run/docker.sock grafana: image: grafana/grafana:latest @@ -84,7 +83,6 @@ def create_docker_compose(args): command: - --config=/etc/grafana/grafana.ini - server: container_name: server build: @@ -96,11 +94,12 @@ def create_docker_compose(args): DOCKER_HOST_IP: host.docker.internal volumes: - .:/app - - /var/run/docker.sock:/var/run/docker.sock + - /var/run/docker.sock:/var/run/docker.sock ports: - "6000:6000" - "8265:8265" - "8000:8000" + stop_signal: SIGINT depends_on: - prometheus - grafana @@ -134,6 +133,7 @@ def create_docker_compose(args): FLASK_RUN_PORT: {6000 + i} container_name: client{i} DOCKER_HOST_IP: host.docker.internal + stop_signal: SIGINT """ docker_compose_content += "volumes:\n grafana-storage:\n" diff --git a/examples/flower-via-docker-compose/helpers/load_data.py b/examples/flower-via-docker-compose/helpers/load_data.py index b7d6b0de26c5..aecb130a4eb5 100644 --- a/examples/flower-via-docker-compose/helpers/load_data.py +++ b/examples/flower-via-docker-compose/helpers/load_data.py @@ -1,7 +1,8 @@ +import logging + import numpy as np import tensorflow as tf from flwr_datasets import FederatedDataset -import logging logging.basicConfig(level=logging.INFO) # Configure logging logger = logging.getLogger(__name__) # Create logger for the module diff --git a/examples/flower-via-docker-compose/server.py b/examples/flower-via-docker-compose/server.py index 99d1a7ef7399..fd5292dd061a 100644 --- a/examples/flower-via-docker-compose/server.py +++ b/examples/flower-via-docker-compose/server.py @@ -1,8 +1,10 @@ import argparse -import flwr as fl import logging + +import flwr as fl + +from prometheus_client import Gauge, start_http_server from strategy.strategy import FedCustom -from prometheus_client import start_http_server, Gauge # Initialize Logging logging.basicConfig(level=logging.INFO) diff --git a/examples/flower-via-docker-compose/strategy/strategy.py b/examples/flower-via-docker-compose/strategy/strategy.py index 9471a99f037f..a2170ed86b49 100644 --- a/examples/flower-via-docker-compose/strategy/strategy.py +++ b/examples/flower-via-docker-compose/strategy/strategy.py @@ -1,9 +1,11 @@ +import logging from typing import Dict, List, Optional, Tuple, Union -from flwr.common import Scalar, EvaluateRes + +import flwr as fl +from flwr.common import EvaluateRes, Scalar from flwr.server.client_proxy import ClientProxy from flwr.server.strategy.aggregate import aggregate, weighted_loss_avg -import flwr as fl -import logging + from prometheus_client import Gauge logging.basicConfig(level=logging.INFO) # Configure logging diff --git a/examples/flowertune-llm/README.md b/examples/flowertune-llm/README.md new file mode 100644 index 000000000000..51cae73ae88a --- /dev/null +++ b/examples/flowertune-llm/README.md @@ -0,0 +1,118 @@ +--- +tags: [llm, nlp, LLama] +dataset: [Alpaca-GPT4] +framework: [PEFT, torch] +--- + +# FlowerTune LLM: Federated LLM Fine-tuning with Flower + +Large language models (LLMs), which have been trained on vast amounts of publicly accessible data, have shown remarkable effectiveness in a wide range of areas. +However, despite the fact that more data typically leads to improved performance, there is a concerning prospect that the supply of high-quality public data will deplete within a few years. +Federated LLM training could unlock access to an endless pool of distributed private data by allowing multiple data owners to collaboratively train a shared model without the need to exchange raw data. + +This introductory example conducts federated instruction tuning with pretrained [OpenLLaMA](https://huggingface.co/openlm-research) models on [Alpaca-GPT4](https://huggingface.co/datasets/vicgalle/alpaca-gpt4) dataset. +We implement FlowerTune LLM by integrating a bundle of techniques: 1) We use [Flower Datasets](https://flower.dev/docs/datasets/) to download, partition and preprocess the dataset. 2) The fine-tuning is done using the [🤗PEFT](https://huggingface.co/docs/peft/en/index) library. 3) We use Flower's Simulation Engine to simulate the LLM fine-tuning process in federated way, +which allows users to perform the training on a single GPU. + +## Set up the project + +Start by cloning the example project: + +```shell +git clone --depth=1 https://github.com/adap/flower.git _tmp \ + && mv _tmp/examples/flowertune-llm . \ + && rm -rf _tmp \ + && cd flowertune-llm +``` + +This will create a new directory called `flowertune-llm` with the following structure: + +```shell +flowertune-llm +├── flowertune_llm +│ ├── __init__.py +│ ├── client_app.py # Defines your ClientApp +│ ├── server_app.py # Defines your ServerApp +│ ├── dataset.py # Defines your dataset and tokenizer +│ └── models.py # Defines your models +│ +├── pyproject.toml # Project metadata like dependencies and configs +├── test.py # Test pre-trained model +└── README.md +``` + +### Install dependencies and project + +Install the dependencies defined in `pyproject.toml` as well as the `flowertune_llm` package. + +```bash +pip install -e . +``` + +## Run the project + +You can run your Flower project in both _simulation_ and _deployment_ mode without making changes to the code. If you are starting with Flower, we recommend you using the _simulation_ mode as it requires fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine. + +### Run with the Simulation Engine + +```bash +flwr run . +``` + +This command will run FL simulations with a 4-bit [OpenLLaMA 3Bv2](https://huggingface.co/openlm-research/open_llama_3b_v2) model involving 2 clients per rounds for 100 FL rounds. You can override configuration parameters directly from the command line. Below are a few settings you might want to test: + +```bash +# Use OpenLLaMA-7B instead of 3B and 8-bits quantization +flwr run . --run-config "model.name='openlm-research/open_llama_7b_v2' model.quantization=8" + +# Run for 50 rounds but increasing the fraction of clients that participate per round to 25% +flwr run . --run-config "num-server-rounds=50 strategy.fraction-fit=0.25" +``` + +### Run with the Deployment Engine + +> \[!NOTE\] +> An update to this example will show how to run this Flower application with the Deployment Engine and TLS certificates, or with Docker. + +## Expected results + +![](_static/train_loss_smooth.png) + +As expected, OpenLLaMA-7B model works better than its 3B version with lower training loss. With the hyperparameters tested, the 8-bit model seems to deliver lower training loss for the smaller 3B model compared to its 4-bit version. + +## VRAM consumption + +| Models | 7-billion (8-bit) | 7-billion (4-bit) | 3-billion (8-bit) | 3-billion (4-bit) | +| :----: | :---------------: | :---------------: | :---------------: | :---------------: | +| VRAM | ~22.00 GB | ~16.50 GB | ~13.50 GB | ~10.60 GB | + +We make use of the [bitsandbytes](https://huggingface.co/docs/bitsandbytes/main/en/index) library in conjunction with [PEFT](https://huggingface.co/docs/peft/en/index) to derive LLMs that can be fine-tuned efficiently. +The above table shows the VRAM consumption per client for the different models considered in this example. +You can adjust the CPU/GPU resources you assign to each of the clients based on your device. +For example, it is easy to train 2 concurrent clients on each GPU (24 GB VRAM) if you choose 3-billion (4-bit) model. +Assigning 50% of the GPU's VRAM to each client by setting `options.backend.clientapp-gpus = 0.5` under `[tool.flwr.federations.local-simulation]` in `pyproject.toml`. + +## Test with your Questions + +We provide a script to test your trained model by passing your specified questions. For example: + +```bash +python test.py --peft-path=/path/to/trained-model-dir/ \ + --question="What is the ideal 1-day plan in London?" +``` + +An answer generated from federated trained 7-billion (8-bit) OpenLLaMA model: + +``` +Great choice. +London has so much to offer, and you can really soak up all the sights and sounds in just a single day. +Here's a suggested itinerary for you. +Start your day off with a hearty breakfast at an authentic British diner. +Then head to the iconic Big Ben and the Houses of Parliament to learn about the history of the city. +Next, make your way to Westminster Abbey to see the many historical monuments and memorials. +From there, cross the river Thames to the Tower of London, which is home to the Crown Jewels of England and Scotland. +Finally, end your day with a relaxing visit to the London Eye, the tallest Ferris wheel in Europe, for a beautiful view of the city. +``` + +The [`Vicuna`](https://huggingface.co/lmsys/vicuna-13b-v1.1) template we used in this example is for a chat assistant. +The generated answer is expected to be a multi-turn conversations. Feel free to try more interesting questions! diff --git a/examples/llm-flowertune/_static/train_loss_smooth.png b/examples/flowertune-llm/_static/train_loss_smooth.png similarity index 100% rename from examples/llm-flowertune/_static/train_loss_smooth.png rename to examples/flowertune-llm/_static/train_loss_smooth.png diff --git a/examples/flowertune-llm/flowertune_llm/__init__.py b/examples/flowertune-llm/flowertune_llm/__init__.py new file mode 100644 index 000000000000..e786a4d4b73d --- /dev/null +++ b/examples/flowertune-llm/flowertune_llm/__init__.py @@ -0,0 +1 @@ +"""flowertune_llm.""" diff --git a/examples/flowertune-llm/flowertune_llm/client_app.py b/examples/flowertune-llm/flowertune_llm/client_app.py new file mode 100644 index 000000000000..b61a733b29cf --- /dev/null +++ b/examples/flowertune-llm/flowertune_llm/client_app.py @@ -0,0 +1,126 @@ +"""flowertune-llm: A Flower / FlowerTune app.""" + +import os +import warnings +from typing import Dict, Tuple + +import torch +from flwr.client import ClientApp, NumPyClient +from flwr.common import Context +from flwr.common.config import unflatten_dict +from flwr.common.typing import NDArrays, Scalar +from omegaconf import DictConfig + +from transformers import TrainingArguments +from trl import SFTTrainer + +from flowertune_llm.dataset import ( + get_tokenizer_and_data_collator_and_propt_formatting, + load_data, + replace_keys, +) +from flowertune_llm.models import ( + cosine_annealing, + get_model, + set_parameters, + get_parameters, +) + +# Avoid warnings +os.environ["TOKENIZERS_PARALLELISM"] = "true" +os.environ["RAY_DISABLE_DOCKER_CPU_WARNING"] = "1" +warnings.filterwarnings("ignore", category=UserWarning) + + +# pylint: disable=too-many-arguments +# pylint: disable=too-many-instance-attributes +class FlowerClient(NumPyClient): + """Standard Flower client for CNN training.""" + + def __init__( + self, + model_cfg: DictConfig, + train_cfg: DictConfig, + trainset, + tokenizer, + formatting_prompts_func, + data_collator, + num_rounds, + ): # pylint: disable=too-many-arguments + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + self.train_cfg = train_cfg + self.training_argumnets = TrainingArguments(**train_cfg.training_arguments) + self.tokenizer = tokenizer + self.formatting_prompts_func = formatting_prompts_func + self.data_collator = data_collator + self.num_rounds = num_rounds + self.trainset = trainset + + # instantiate model + self.model = get_model(model_cfg) + + def fit( + self, parameters: NDArrays, config: Dict[str, Scalar] + ) -> Tuple[NDArrays, int, Dict]: + """Implement distributed fit function for a given client.""" + set_parameters(self.model, parameters) + + new_lr = cosine_annealing( + int(config["current_round"]), + self.num_rounds, + self.train_cfg.learning_rate_max, + self.train_cfg.learning_rate_min, + ) + + self.training_argumnets.learning_rate = new_lr + self.training_argumnets.output_dir = config["save_path"] + + # Construct trainer + trainer = SFTTrainer( + model=self.model, + tokenizer=self.tokenizer, + args=self.training_argumnets, + max_seq_length=self.train_cfg.seq_length, + train_dataset=self.trainset, + formatting_func=self.formatting_prompts_func, + data_collator=self.data_collator, + ) + + # Do local training + results = trainer.train() + + return ( + get_parameters(self.model), + len(self.trainset), + {"train_loss": results.training_loss}, + ) + + +def client_fn(context: Context) -> FlowerClient: + """Create a Flower client representing a single organization.""" + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + num_rounds = context.run_config["num-server-rounds"] + cfg = DictConfig(replace_keys(unflatten_dict(context.run_config))) + + # Let's get the client partition + client_trainset = load_data(partition_id, num_partitions, cfg.dataset.name) + ( + tokenizer, + data_collator, + formatting_prompts_func, + ) = get_tokenizer_and_data_collator_and_propt_formatting(cfg.model.name) + + return FlowerClient( + cfg.model, + cfg.train, + client_trainset, + tokenizer, + formatting_prompts_func, + data_collator, + num_rounds, + ).to_client() + + +# Flower ClientApp +app = ClientApp(client_fn) diff --git a/examples/llm-flowertune/dataset.py b/examples/flowertune-llm/flowertune_llm/dataset.py similarity index 53% rename from examples/llm-flowertune/dataset.py rename to examples/flowertune-llm/flowertune_llm/dataset.py index 571be31f7fba..87595b3f9ccd 100644 --- a/examples/llm-flowertune/dataset.py +++ b/examples/flowertune-llm/flowertune_llm/dataset.py @@ -1,6 +1,11 @@ from transformers import AutoTokenizer from trl import DataCollatorForCompletionOnlyLM +from flwr_datasets.partitioner import IidPartitioner +from flwr_datasets import FederatedDataset + +FDS = None # Cache FederatedDataset + def formatting_prompts_func(example): output_texts = [] @@ -27,3 +32,31 @@ def get_tokenizer_and_data_collator_and_propt_formatting(model_name: str): ) return tokenizer, data_collator, formatting_prompts_func + + +def load_data(partition_id: int, num_partitions: int, dataset_name: str): + """Load partition data.""" + # Only initialize `FederatedDataset` once + global FDS + if FDS is None: + partitioner = IidPartitioner(num_partitions=num_partitions) + FDS = FederatedDataset( + dataset=dataset_name, + partitioners={"train": partitioner}, + ) + client_trainset = FDS.load_partition(partition_id, "train") + client_trainset = client_trainset.rename_column("output", "response") + + return client_trainset + + +def replace_keys(input_dict, match="-", target="_"): + """Recursively replace match string with target string in dictionary keys.""" + new_dict = {} + for key, value in input_dict.items(): + new_key = key.replace(match, target) + if isinstance(value, dict): + new_dict[new_key] = replace_keys(value, match, target) + else: + new_dict[new_key] = value + return new_dict diff --git a/examples/llm-flowertune/models.py b/examples/flowertune-llm/flowertune_llm/models.py similarity index 65% rename from examples/llm-flowertune/models.py rename to examples/flowertune-llm/flowertune_llm/models.py index 78eef75d10d2..e1609caeb2fc 100644 --- a/examples/llm-flowertune/models.py +++ b/examples/flowertune-llm/flowertune_llm/models.py @@ -1,11 +1,18 @@ +import math + import torch from omegaconf import DictConfig -from transformers import AutoModelForCausalLM -from transformers import BitsAndBytesConfig -from peft import get_peft_model, LoraConfig +from collections import OrderedDict +from peft import ( + LoraConfig, + get_peft_model, + get_peft_model_state_dict, + set_peft_model_state_dict, +) from peft.utils import prepare_model_for_kbit_training +from transformers import AutoModelForCausalLM, BitsAndBytesConfig -import math +from flwr.common.typing import NDArrays def cosine_annealing( @@ -54,3 +61,17 @@ def get_model(model_cfg: DictConfig): ) return get_peft_model(model, peft_config) + + +def set_parameters(model, parameters: NDArrays) -> None: + """Change the parameters of the model using the given ones.""" + peft_state_dict_keys = get_peft_model_state_dict(model).keys() + params_dict = zip(peft_state_dict_keys, parameters) + state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict}) + set_peft_model_state_dict(model, state_dict) + + +def get_parameters(model) -> NDArrays: + """Return the parameters of the current net.""" + state_dict = get_peft_model_state_dict(model) + return [val.cpu().numpy() for _, val in state_dict.items()] diff --git a/examples/flowertune-llm/flowertune_llm/server_app.py b/examples/flowertune-llm/flowertune_llm/server_app.py new file mode 100644 index 000000000000..ff0da90c8b9b --- /dev/null +++ b/examples/flowertune-llm/flowertune_llm/server_app.py @@ -0,0 +1,94 @@ +"""flowertune-llm: A Flower / FlowerTune app.""" + +import os +from datetime import datetime + +from flwr.common import Context, ndarrays_to_parameters +from flwr.common.config import unflatten_dict +from flwr.server import ServerApp, ServerAppComponents, ServerConfig +from flwr.server.strategy import FedAvg +from omegaconf import DictConfig + +from flowertune_llm.models import get_model, get_parameters, set_parameters +from flowertune_llm.dataset import replace_keys + + +# Get function that will be executed by the strategy's evaluate() method +# Here we use it to save global model checkpoints +def get_evaluate_fn(model_cfg, save_every_round, total_round, save_path): + """Return an evaluation function for saving global model.""" + + def evaluate(server_round: int, parameters, config): + # Save model + if server_round != 0 and ( + server_round == total_round or server_round % save_every_round == 0 + ): + # Init model + model = get_model(model_cfg) + set_parameters(model, parameters) + + model.save_pretrained(f"{save_path}/peft_{server_round}") + + return 0.0, {} + + return evaluate + + +def get_on_fit_config(save_path): + """Return a function that will be used to construct the config that the client's + fit() method will receive.""" + + def fit_config_fn(server_round: int): + fit_config = {} + fit_config["current_round"] = server_round + fit_config["save_path"] = save_path + return fit_config + + return fit_config_fn + + +def fit_weighted_average(metrics): + """Aggregate (federated) evaluation metrics.""" + # Multiply accuracy of each client by number of examples used + losses = [num_examples * m["train_loss"] for num_examples, m in metrics] + examples = [num_examples for num_examples, _ in metrics] + + # Aggregate and return custom metric (weighted average) + return {"train_loss": sum(losses) / sum(examples)} + + +def server_fn(context: Context): + """Construct components that set the ServerApp behaviour.""" + # Create output directory given current timestamp + current_time = datetime.now() + folder_name = current_time.strftime("%Y-%m-%d_%H-%M-%S") + save_path = os.path.join(os.getcwd(), f"results/{folder_name}") + os.makedirs(save_path, exist_ok=True) + + # Read from config + num_rounds = context.run_config["num-server-rounds"] + cfg = DictConfig(replace_keys(unflatten_dict(context.run_config))) + + # Get initial model weights + init_model = get_model(cfg.model) + init_model_parameters = get_parameters(init_model) + init_model_parameters = ndarrays_to_parameters(init_model_parameters) + + # Define strategy + strategy = FedAvg( + fraction_fit=cfg.strategy.fraction_fit, + fraction_evaluate=cfg.strategy.fraction_evaluate, + on_fit_config_fn=get_on_fit_config(save_path), + fit_metrics_aggregation_fn=fit_weighted_average, + initial_parameters=init_model_parameters, + evaluate_fn=get_evaluate_fn( + cfg.model, cfg.train.save_every_round, num_rounds, save_path + ), + ) + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(strategy=strategy, config=config) + + +# Flower ServerApp +app = ServerApp(server_fn=server_fn) diff --git a/examples/flowertune-llm/pyproject.toml b/examples/flowertune-llm/pyproject.toml new file mode 100644 index 000000000000..4925f3cba15a --- /dev/null +++ b/examples/flowertune-llm/pyproject.toml @@ -0,0 +1,66 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "flowertune-llm" +version = "1.0.0" +description = "FlowerTune LLM: Federated LLM Fine-tuning with Flower" +license = "Apache-2.0" +dependencies = [ + "flwr[simulation]==1.12.0", + "flwr-datasets>=0.3.0", + "trl==0.8.1", + "bitsandbytes==0.43.0", + "scipy==1.13.0", + "peft==0.6.2", + "fschat[model_worker,webui]==0.2.35", + "transformers==4.39.3", + "sentencepiece==0.2.0", + "omegaconf==2.3.0", + "hf_transfer==0.1.8", +] + +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "flowertune_llm.server_app:app" +clientapp = "flowertune_llm.client_app:app" + +[tool.flwr.app.config] +dataset.name = "vicgalle/alpaca-gpt4" +model.name = "openlm-research/open_llama_3b_v2" +model.quantization = 4 +model.gradient-checkpointing = true +model.lora.peft-lora-r = 32 +model.lora.peft-lora-alpha = 64 +train.save-every-round = 5 +train.learning-rate-max = 5e-5 +train.learning-rate-min = 1e-6 +train.seq-length = 512 +train.training-arguments.output-dir = "" +train.training-arguments.learning-rate = "" +train.training-arguments.per-device-train-batch-size = 16 +train.training-arguments.gradient-accumulation-steps = 1 +train.training-arguments.logging-steps = 10 +train.training-arguments.num-train-epochs = 3 +train.training-arguments.max-steps = 10 +train.training-arguments.save-steps = 1000 +train.training-arguments.save-total-limit = 10 +train.training-arguments.gradient-checkpointing = true +train.training-arguments.lr-scheduler-type = "constant" +strategy.fraction-fit = 0.1 +strategy.fraction-evaluate = 0.0 +num-server-rounds = 100 + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 20 +options.backend.client-resources.num-cpus = 8 +options.backend.client-resources.num-gpus = 1.0 diff --git a/examples/llm-flowertune/test.py b/examples/flowertune-llm/test.py similarity index 100% rename from examples/llm-flowertune/test.py rename to examples/flowertune-llm/test.py index 652bb9aafcf5..fa8aa26100a8 100644 --- a/examples/llm-flowertune/test.py +++ b/examples/flowertune-llm/test.py @@ -1,11 +1,11 @@ # This python file is adapted from https://github.com/lm-sys/FastChat/blob/main/fastchat/llm_judge/gen_model_answer.py import argparse + import torch +from fastchat.conversation import get_conv_template from peft import AutoPeftModelForCausalLM from transformers import AutoTokenizer -from fastchat.conversation import get_conv_template - parser = argparse.ArgumentParser() parser.add_argument("--peft-path", type=str, default=None) diff --git a/examples/vit-finetune/README.md b/examples/flowertune-vit/README.md similarity index 55% rename from examples/vit-finetune/README.md rename to examples/flowertune-vit/README.md index ac1652acf02d..48327880f412 100644 --- a/examples/vit-finetune/README.md +++ b/examples/flowertune-vit/README.md @@ -1,61 +1,78 @@ -# Federated finetuning of a ViT +--- +tags: [finetuning, vision, fds] +dataset: [Oxford Flower-102] +framework: [torch, torchvision] +--- -This example shows how to use Flower's Simulation Engine to federate the finetuning of a Vision Transformer ([ViT-Base-16](https://pytorch.org/vision/main/models/generated/torchvision.models.vit_b_16.html#torchvision.models.vit_b_16)) that has been pretrained on ImageNet. To keep things simple we'll be finetuning it to [Oxford Flower-102](https://www.robots.ox.ac.uk/~vgg/data/flowers/102/index.html) datasset, creating 20 partitions using [Flower Datasets](https://flower.ai/docs/datasets/). We'll be finetuning just the exit `head` of the ViT, this means that the training is not that costly and each client requires just ~1GB of VRAM (for a batch size of 32 images). +# Federated Finetuning of a Vision Transformer with Flower -## Running the example +This example shows how to use Flower's Simulation Engine to federate the finetuning of a Vision Transformer ([ViT-Base-16](https://pytorch.org/vision/main/models/generated/torchvision.models.vit_b_16.html#torchvision.models.vit_b_16)) that has been pretrained on ImageNet. To keep things simple we'll be finetuning it to [Oxford Flower-102](https://www.robots.ox.ac.uk/~vgg/data/flowers/102/index.html) datasset, creating 20 partitions using [Flower Datasets](https://flower.ai/docs/datasets/). We'll be finetuning just the exit `head` of the ViT, this means that the training is not that costly and each client requires just ~1GB of VRAM (for a batch size of 32 images) if you choose to use a GPU. -If you haven't cloned the Flower repository already you might want to clone code example and discard the rest. We prepared a single-line command that you can copy into your shell which will checkout the example for you: +## Set up the project + +### Clone the project + +Start by cloning the example project: ```shell -git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/vit-finetune . && rm -rf flower && cd vit-finetune +git clone --depth=1 https://github.com/adap/flower.git _tmp \ + && mv _tmp/examples/flowertune-vit . \ + && rm -rf _tmp \ + && cd flowertune-vit ``` -This will create a new directory called `vit-finetune` containing the following files: +This will create a new directory called `flowertune-vit` with the following structure: +```shell +flowertune-vit +├── vitexample +│ ├── __init__.py +│ ├── client_app.py # Defines your ClientApp +│ ├── server_app.py # Defines your ServerApp +│ └── task.py # Defines your model, training and data loading +├── pyproject.toml # Project metadata like dependencies and configs +└── README.md ``` --- README.md <- Your're reading this right now --- main.py <- Main file that launches the simulation --- client.py <- Contains Flower client code and ClientApp --- server.py <- Contains Flower server code and ServerApp --- model.py <- Defines model and train/eval functions --- dataset.py <- Downloads, partitions and processes dataset --- pyproject.toml <- Example dependencies, installable using Poetry --- requirements.txt <- Example dependencies, installable using pip -``` - -### Installing Dependencies -Project dependencies (such as `torch` and `flwr`) are defined in `pyproject.toml` and `requirements.txt`. We recommend [Poetry](https://python-poetry.org/docs/) to install those dependencies and manage your virtual environment ([Poetry installation](https://python-poetry.org/docs/#installation)) or [pip](https://pip.pypa.io/en/latest/development/), but feel free to use a different way of installing dependencies and managing virtual environments if you have other preferences. +### Install dependencies and project -#### Poetry +Install the dependencies defined in `pyproject.toml` as well as the `vitexample` package. -```shell -poetry install -poetry shell +```bash +pip install -e . ``` -#### pip +## Run the project -With an activated environemnt, install the dependencies for this example: +You can run your Flower project in both _simulation_ and _deployment_ mode without making changes to the code. If you are starting with Flower, we recommend you using the _simulation_ mode as it requires fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine. -```shell -pip install -r requirements.txt +### Run with the Simulation Engine + +> \[!TIP\] +> This example runs faster when the `ClientApp`s have access to a GPU. If your system has one, you can make use of it by configuring the `backend.client-resources` component in `pyproject.toml`. If you want to try running the example with GPU right away, use the `local-simulation-gpu` federation as shown below. + +```bash +# Run with the default federation (CPU only) +flwr run . ``` -### Run with `start_simulation()` +You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: + +```bash +flwr run . --run-config "num-server-rounds=5 batch-size=64" +``` -Running the example is quite straightforward. You can control the number of rounds `--num-rounds` (which defaults to 20). +Run the project in the `local-simulation-gpu` federation that gives CPU and GPU resources to each `ClientApp`. By default, at most 5x`ClientApp` will run in parallel in the available GPU. You can tweak the degree of parallelism by adjusting the settings of this federation in the `pyproject.toml`. ```bash -python main.py +# Run with the `local-simulation-gpu` federation +flwr run . local-simulation-gpu ``` ![](_static/central_evaluation.png) Running the example as-is on an RTX 3090Ti should take ~15s/round running 5 clients in parallel (plus the _global model_ during centralized evaluation stages) in a single GPU. Note that more clients could fit in VRAM, but since the GPU utilization is high (99%-100%) we are probably better off not doing that (at least in this case). -You can adjust the `client_resources` passed to `start_simulation()` so more/less clients run at the same time in the GPU. Take a look at the [Documentation](https://flower.ai/docs/framework/how-to-run-simulations.html) for more details on how you can customise your simulation. - ```bash +---------------------------------------------------------------------------------------+ | NVIDIA-SMI 535.161.07 Driver Version: 535.161.07 CUDA Version: 12.2 | @@ -83,12 +100,7 @@ You can adjust the `client_resources` passed to `start_simulation()` so more/les +---------------------------------------------------------------------------------------+ ``` -### Run with Flower Next (preview) +### Run with the Deployment Engine -```bash -flower-simulation \ - --client-app=client:app \ - --server-app=server:app \ - --num-supernodes=20 \ - --backend-config='{"client_resources": {"num_cpus":4, "num_gpus":0.25}}' -``` +> \[!NOTE\] +> An update to this example will show how to run this Flower project with the Deployment Engine and TLS certificates, or with Docker. diff --git a/examples/vit-finetune/_static/central_evaluation.png b/examples/flowertune-vit/_static/central_evaluation.png similarity index 100% rename from examples/vit-finetune/_static/central_evaluation.png rename to examples/flowertune-vit/_static/central_evaluation.png diff --git a/examples/flowertune-vit/pyproject.toml b/examples/flowertune-vit/pyproject.toml new file mode 100644 index 000000000000..bf280de8af95 --- /dev/null +++ b/examples/flowertune-vit/pyproject.toml @@ -0,0 +1,43 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "vitexample" +version = "1.0.0" +description = "Federated Finetuning of a Vision Transformer with Flower" +license = "Apache-2.0" +dependencies = [ + "flwr[simulation]==1.12.0", + "flwr-datasets[vision]>=0.3.0", + "torch==2.2.1", + "torchvision==0.17.1", +] + +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "vitexample.server_app:app" +clientapp = "vitexample.client_app:app" + +[tool.flwr.app.config] +num-server-rounds = 3 +batch-size = 32 +learning-rate = 0.01 +dataset-name = "nelorth/oxford-flowers" +num-classes = 102 + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 10 + +[tool.flwr.federations.local-simulation-gpu] +options.num-supernodes = 10 +options.backend.client-resources.num-cpus = 2 # each ClientApp assumes to use 2CPUs +options.backend.client-resources.num-gpus = 0.2 # at most 5 ClientApp will run in a given GPU diff --git a/examples/flowertune-vit/vitexample/__init__.py b/examples/flowertune-vit/vitexample/__init__.py new file mode 100644 index 000000000000..f0ce539fac90 --- /dev/null +++ b/examples/flowertune-vit/vitexample/__init__.py @@ -0,0 +1 @@ +"""vitexample: A Flower / PyTorch app with Vision Transformers.""" diff --git a/examples/flowertune-vit/vitexample/client_app.py b/examples/flowertune-vit/vitexample/client_app.py new file mode 100644 index 000000000000..59143f1d25f8 --- /dev/null +++ b/examples/flowertune-vit/vitexample/client_app.py @@ -0,0 +1,62 @@ +"""vitexample: A Flower / PyTorch app with Vision Transformers.""" + +import torch +from torch.utils.data import DataLoader + +from flwr.common import Context +from flwr.client import NumPyClient, ClientApp + + +from vitexample.task import apply_train_transforms, get_dataset_partition +from vitexample.task import get_model, set_params, get_params, train + + +class FedViTClient(NumPyClient): + def __init__(self, trainloader, learning_rate, num_classes): + self.trainloader = trainloader + self.learning_rate = learning_rate + self.model = get_model(num_classes) + + # Determine device + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + self.model.to(self.device) # send model to device + + def fit(self, parameters, config): + set_params(self.model, parameters) + + # Set optimizer + optimizer = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate) + # Train locally + avg_train_loss = train( + self.model, self.trainloader, optimizer, epochs=1, device=self.device + ) + # Return locally-finetuned part of the model + return ( + get_params(self.model), + len(self.trainloader.dataset), + {"train_loss": avg_train_loss}, + ) + + +def client_fn(context: Context): + """Return a FedViTClient.""" + + # Read the node_config to fetch data partition associated to this node + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + dataset_name = context.run_config["dataset-name"] + trainpartition = get_dataset_partition(num_partitions, partition_id, dataset_name) + + batch_size = context.run_config["batch-size"] + lr = context.run_config["learning-rate"] + num_classes = context.run_config["num-classes"] + trainset = trainpartition.with_transform(apply_train_transforms) + + trainloader = DataLoader( + trainset, batch_size=batch_size, num_workers=2, shuffle=True + ) + + return FedViTClient(trainloader, lr, num_classes).to_client() + + +app = ClientApp(client_fn=client_fn) diff --git a/examples/flowertune-vit/vitexample/server_app.py b/examples/flowertune-vit/vitexample/server_app.py new file mode 100644 index 000000000000..f37215df5eb9 --- /dev/null +++ b/examples/flowertune-vit/vitexample/server_app.py @@ -0,0 +1,77 @@ +"""vitexample: A Flower / PyTorch app with Vision Transformers.""" + +from logging import INFO + +import torch +from datasets import Dataset, load_dataset +from torch.utils.data import DataLoader + +from vitexample.task import apply_eval_transforms +from vitexample.task import get_model, set_params, test, get_params + +from flwr.common import Context, ndarrays_to_parameters +from flwr.common.logger import log +from flwr.server import ServerApp, ServerConfig, ServerAppComponents +from flwr.server.strategy import FedAvg + + +def get_evaluate_fn( + centralized_testset: Dataset, + num_classes: int, +): + """Return an evaluation function for centralized evaluation.""" + + def evaluate(server_round, parameters, config): + """Use the entire Oxford Flowers-102 test set for evaluation.""" + + # Determine device + device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + + # Instantiate model and apply current global parameters + model = get_model(num_classes) + set_params(model, parameters) + model.to(device) + + # Apply transform to dataset + testset = centralized_testset.with_transform(apply_eval_transforms) + + testloader = DataLoader(testset, batch_size=128) + # Run evaluation + loss, accuracy = test(model, testloader, device=device) + log(INFO, f"round: {server_round} -> acc: {accuracy:.4f}, loss: {loss: .4f}") + + return loss, {"accuracy": accuracy} + + return evaluate + + +def server_fn(context: Context): + + # Define tested for central evaluation + dataset_name = context.run_config["dataset-name"] + dataset = load_dataset(dataset_name) + test_set = dataset["test"] + + # Set initial global model + num_classes = context.run_config["num-classes"] + ndarrays = get_params(get_model(num_classes)) + init_parameters = ndarrays_to_parameters(ndarrays) + + # Configure the strategy + strategy = FedAvg( + fraction_fit=0.5, # Sample 50% of available clients + fraction_evaluate=0.0, # No federated evaluation + evaluate_fn=get_evaluate_fn( + test_set, num_classes + ), # Global evaluation function + initial_parameters=init_parameters, + ) + + # Construct ServerConfig + num_rounds = context.run_config["num-server-rounds"] + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(strategy=strategy, config=config) + + +app = ServerApp(server_fn=server_fn) diff --git a/examples/flowertune-vit/vitexample/task.py b/examples/flowertune-vit/vitexample/task.py new file mode 100644 index 000000000000..3512d1891db2 --- /dev/null +++ b/examples/flowertune-vit/vitexample/task.py @@ -0,0 +1,131 @@ +"""vitexample: A Flower / PyTorch app with Vision Transformers.""" + +from collections import OrderedDict + +import torch +from torchvision.models import vit_b_16, ViT_B_16_Weights +from torchvision.transforms import ( + Compose, + Normalize, + ToTensor, + RandomResizedCrop, + Resize, + CenterCrop, +) + +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner + + +def get_model(num_classes: int): + """Return a pretrained ViT with all layers frozen except output head.""" + + # Instantiate a pre-trained ViT-B on ImageNet + model = vit_b_16(weights=ViT_B_16_Weights.IMAGENET1K_V1) + + # We're going to federated the finetuning of this model + # using (by default) the Oxford Flowers-102 dataset. One easy way + # to achieve this is by re-initializing the output block of the + # ViT so it outputs 102 clases instead of the default 1k + in_features = model.heads[-1].in_features + model.heads[-1] = torch.nn.Linear(in_features, num_classes) + + # Disable gradients for everything + model.requires_grad_(False) + # Now enable just for output head + model.heads.requires_grad_(True) + + return model + + +def set_params(model, parameters): + """Apply the parameters to model head.""" + finetune_layers = model.heads + params_dict = zip(finetune_layers.state_dict().keys(), parameters) + state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) + finetune_layers.load_state_dict(state_dict, strict=True) + + +def get_params(model): + """Get parameters from model head as ndarrays.""" + finetune_layers = model.heads + return [val.cpu().numpy() for _, val in finetune_layers.state_dict().items()] + + +def train(net, trainloader, optimizer, epochs, device): + """Train the model on the training set.""" + criterion = torch.nn.CrossEntropyLoss() + net.train() + net.to(device) + avg_loss = 0 + # A very standard training loop for image classification + for _ in range(epochs): + for batch in trainloader: + images, labels = batch["image"].to(device), batch["label"].to(device) + optimizer.zero_grad() + loss = criterion(net(images), labels) + avg_loss += loss.item() / labels.shape[0] + loss.backward() + optimizer.step() + + return avg_loss / len(trainloader) + + +def test(net, testloader, device: str): + """Validate the network on the entire test set.""" + criterion = torch.nn.CrossEntropyLoss() + correct, loss = 0, 0.0 + net.to(device) + net.eval() + with torch.no_grad(): + for data in testloader: + images, labels = data["image"].to(device), data["label"].to(device) + outputs = net(images) + loss += criterion(outputs, labels).item() + _, predicted = torch.max(outputs.data, 1) + correct += (predicted == labels).sum().item() + accuracy = correct / len(testloader.dataset) + return loss, accuracy + + +fds = None + + +def get_dataset_partition(num_partitions: int, partition_id: int, dataset_name: str): + """Get Oxford Flowers datasets and partition it.""" + global fds + if fds is None: + # Get dataset (by default Oxford Flowers-102) and create IID partitions + partitioner = IidPartitioner(num_partitions) + fds = FederatedDataset( + dataset=dataset_name, partitioners={"train": partitioner} + ) + + return fds.load_partition(partition_id) + + +def apply_eval_transforms(batch): + """Apply a very standard set of image transforms.""" + transforms = Compose( + [ + Resize((256, 256)), + CenterCrop((224, 224)), + ToTensor(), + Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + ] + ) + batch["image"] = [transforms(img) for img in batch["image"]] + return batch + + +def apply_train_transforms(batch): + """Apply a very standard set of image transforms.""" + transforms = Compose( + [ + RandomResizedCrop((224, 224)), + ToTensor(), + Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + ] + ) + batch["image"] = [transforms(img) for img in batch["image"]] + return batch diff --git a/examples/ios/README.md b/examples/ios/README.md index 4e17e7a674f3..aef4177dddf7 100644 --- a/examples/ios/README.md +++ b/examples/ios/README.md @@ -1,3 +1,9 @@ +--- +tags: [mobile, vision, sdk] +dataset: [MNIST] +framework: [Swift] +--- + # FLiOS - A Flower SDK for iOS Devices with Example FLiOS is a sample application for testing and benchmarking the Swift implementation of Flower. The default scenario uses the MNIST dataset and the associated digit recognition model. The app includes the Swift package in `./src/swift` and allows extension for other benchmarking scenarios. The app guides the user through the steps of the machine learning process that would be executed in a normal production environment as a background task of the application. The app is therefore aimed at researchers and research institutions to test their hypotheses and perform performance analyses. diff --git a/examples/ios/pyproject.toml b/examples/ios/pyproject.toml index 2e55b14cf761..03ea89ea3e54 100644 --- a/examples/ios/pyproject.toml +++ b/examples/ios/pyproject.toml @@ -9,5 +9,5 @@ description = "Example Server for Flower iOS/CoreML" authors = ["The Flower Authors "] [tool.poetry.dependencies] -python = ">=3.8,<3.11" +python = ">=3.9,<3.11" flwr = ">=1.0,<2.0" diff --git a/examples/ios/scenarios.ipynb b/examples/ios/scenarios.ipynb index de3e0e0c8c49..01da347cfee6 100644 --- a/examples/ios/scenarios.ipynb +++ b/examples/ios/scenarios.ipynb @@ -7,7 +7,7 @@ "source": [ "# Extending FLiOS Scenarios\n", "\n", - "This notebook demonstrates how to download and preprocess further benchmarking datasets and its associated machine learning models for the extenstion of the FLiOS application." + "This notebook demonstrates how to download and preprocess further benchmarking datasets and its associated machine learning models for the extension of the FLiOS application." ] }, { diff --git a/examples/ios/server.py b/examples/ios/server.py index 521297c9905e..c1c6780840e9 100644 --- a/examples/ios/server.py +++ b/examples/ios/server.py @@ -1,5 +1,6 @@ -import flwr import argparse + +import flwr import numpy as np diff --git a/examples/llm-flowertune/README.md b/examples/llm-flowertune/README.md deleted file mode 100644 index 4f98072f8c7f..000000000000 --- a/examples/llm-flowertune/README.md +++ /dev/null @@ -1,137 +0,0 @@ -# LLM FlowerTune: Federated LLM Fine-tuning with Flower - -Large language models (LLMs), which have been trained on vast amounts of publicly accessible data, have shown remarkable effectiveness in a wide range of areas. -However, despite the fact that more data typically leads to improved performance, there is a concerning prospect that the supply of high-quality public data will deplete within a few years. -Federated LLM training could unlock access to an endless pool of distributed private data by allowing multiple data owners to collaboratively train a shared model without the need to exchange raw data. - -This introductory example conducts federated instruction tuning with pretrained [LLama2](https://huggingface.co/openlm-research) models on [Alpaca-GPT4](https://huggingface.co/datasets/vicgalle/alpaca-gpt4) dataset. -We implement LLM FlowerTune by integrating a bundle of techniques: 1) We use [Flower Datasets](https://flower.dev/docs/datasets/) to download, partition and preprocess the dataset. 2) The fine-tuning is done using the [🤗PEFT](https://huggingface.co/docs/peft/en/index) library. 3) We use Flower's Simulation Engine to simulate the LLM fine-tuning process in federated way, -which allows users to perform the training on a single GPU. - -## Environment Setup - -Start by cloning the code example. We prepared a single-line command that you can copy into your shell which will checkout the example for you: - -```shell -git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/llm-flowertune . && rm -rf flower && cd llm-flowertune -``` - -This will create a new directory called `llm-flowertune` containing the following files: - -``` --- README.md <- Your're reading this right now --- main.py <- Start fed-LLM simulation --- client.py <- Flower client constructor --- model.py <- Model build --- dataset.py <- Dataset and tokenizer build --- utils.py <- Utility functions --- test.py <- Test pre-trained model --- app.py <- ServerApp/ClientApp for Flower-Next --- conf/config.yaml <- Configuration file --- requirements.txt <- Example dependencies -``` - -### Installing dependencies - -Project dependencies are defined in `requirements.txt`. Install them with: - -```shell -pip install -r requirements.txt -``` - -## Run LLM Fine-tuning - -With an activated Python environment, run the example with default config values. The config is in `conf/config.yaml` and is loaded automatically. - -```bash -# Run with default config -python main.py -``` - -This command will run FL simulations with a 4-bit [OpenLLaMA 7Bv2](https://huggingface.co/openlm-research/open_llama_7b_v2) model involving 2 clients per rounds for 100 FL rounds. You can override configuration parameters directly from the command line. Below are a few settings you might want to test: - -```bash -# Use OpenLLaMA-3B instead of 7B and 8-bits quantization -python main.py model.name="openlm-research/open_llama_3b_v2" model.quantization=8 - -# Run for 50 rounds but increasing the fraction of clients that participate per round to 25% -python main.py num_rounds=50 fraction_fit.fraction_fit=0.25 -``` - -## Expected Results - -![](_static/train_loss_smooth.png) - -As expected, LLama2-7B model works better than its 3B version with lower training loss. With the hyperparameters tested, the 8-bit model seems to deliver lower training loss for the smaller 3B model compared to its 4-bit version. - -You can run all 8 experiments with a single command as: - -```bash -python main.py --multirun model.name="openlm-research/open_llama_7b_v2","openlm-research/open_llama_3b_v2" model.quantization=8,4 strategy.fraction_fit=0.1,0.2 -``` - -## VRAM Consumption - -| Models | 7-billion (8-bit) | 7-billion (4-bit) | 3-billion (8-bit) | 3-billion (4-bit) | -| :----: | :---------------: | :---------------: | :---------------: | :---------------: | -| VRAM | ~22.00 GB | ~16.50 GB | ~13.50 GB | ~10.60 GB | - -We make use of the [bitsandbytes](https://huggingface.co/docs/bitsandbytes/main/en/index) library in conjunction with [PEFT](https://huggingface.co/docs/peft/en/index) to derive LLMs that can be fine-tuned efficiently. -The above table shows the VRAM consumption per client for the different models considered in this example. -You can adjust the CPU/GPU resources you assign to each of the clients based on your device. -For example, it is easy to train 2 concurrent clients on each GPU (24 GB VRAM) if you choose 3-billion (4-bit) model. - -```bash -# This will assign 50% of the GPU's VRAM to each client. -python main.py model.name="openlm-research/open_llama_3b_v2" model.quantization=4 client_resources.num_gpus=0.5 -``` - -## Test with your Questions - -We provide a script to test your trained model by passing your specified questions. For example: - -```bash -python test.py --peft-path=/path/to/trained-model-dir/ \ - --question="What is the ideal 1-day plan in London?" -``` - -An answer generated from federated trained 7-billion (8-bit) LLama2 model: - -``` -Great choice. -London has so much to offer, and you can really soak up all the sights and sounds in just a single day. -Here's a suggested itinerary for you. -Start your day off with a hearty breakfast at an authentic British diner. -Then head to the iconic Big Ben and the Houses of Parliament to learn about the history of the city. -Next, make your way to Westminster Abbey to see the many historical monuments and memorials. -From there, cross the river Thames to the Tower of London, which is home to the Crown Jewels of England and Scotland. -Finally, end your day with a relaxing visit to the London Eye, the tallest Ferris wheel in Europe, for a beautiful view of the city. -``` - -The [`Vicuna`](https://huggingface.co/lmsys/vicuna-13b-v1.1) template we used in this example is for a chat assistant. -The generated answer is expected to be a multi-turn conversations. Feel free to try more interesting questions! - -## Run with Flower Next (preview) - -We conduct a 2-client setting to demonstrate how to run federated LLM fine-tuning with Flower Next. -Please follow the steps below: - -1. Start the long-running Flower server (SuperLink) - ```bash - flower-superlink --insecure - ``` -2. Start the long-running Flower client (SuperNode) - ```bash - # In a new terminal window, start the first long-running Flower client: - flower-client-app app:client1 --insecure - ``` - ```bash - # In another new terminal window, start the second long-running Flower client: - flower-client-app app:client2 --insecure - ``` -3. Run the Flower App - ```bash - # With both the long-running server (SuperLink) and two clients (SuperNode) up and running, - # we can now run the actual Flower App: - flower-server-app app:server --insecure - ``` diff --git a/examples/llm-flowertune/app.py b/examples/llm-flowertune/app.py deleted file mode 100644 index e04ad8715de6..000000000000 --- a/examples/llm-flowertune/app.py +++ /dev/null @@ -1,86 +0,0 @@ -import os -import warnings -from hydra import compose, initialize - -import flwr as fl -from flwr_datasets import FederatedDataset - -from dataset import get_tokenizer_and_data_collator_and_propt_formatting -from client import gen_client_fn -from utils import get_on_fit_config, fit_weighted_average - - -warnings.filterwarnings("ignore", category=UserWarning) - -NUM_ROUNDS = 100 -save_path = "./results/" - -with initialize(config_path="conf"): - cfg = compose(config_name="config") - -# Reset the number of number -cfg.num_rounds = NUM_ROUNDS -cfg.train.num_rounds = NUM_ROUNDS - -# Create output directory -if not os.path.exists(save_path): - os.mkdir(save_path) - -# Partition dataset and get dataloaders -# We set the number of partitions to 20 for fast processing. -fds = FederatedDataset( - dataset=cfg.dataset.name, partitioners={"train": cfg.num_clients} -) -( - tokenizer, - data_collator, - formatting_prompts_func, -) = get_tokenizer_and_data_collator_and_propt_formatting(cfg.model.name) - - -# ClientApp for client #1 (Flower Next) -client1 = fl.client.ClientApp( - client_fn=gen_client_fn( - fds, - tokenizer, - formatting_prompts_func, - data_collator, - cfg.model, - cfg.train, - save_path, - partition_id=0, - api=True, - ), -) - - -# ClientApp for client #2 (Flower Next) -client2 = fl.client.ClientApp( - client_fn=gen_client_fn( - fds, - tokenizer, - formatting_prompts_func, - data_collator, - cfg.model, - cfg.train, - save_path, - partition_id=1, - api=True, - ), -) - - -# Instantiate strategy. -strategy = fl.server.strategy.FedAvg( - min_available_clients=2, # Simulate a 2-client setting - fraction_fit=1.0, - fraction_evaluate=0.0, # no client evaluation - on_fit_config_fn=get_on_fit_config(), - fit_metrics_aggregation_fn=fit_weighted_average, -) - -# ServerApp for Flower-Next -server = fl.server.ServerApp( - config=fl.server.ServerConfig(num_rounds=NUM_ROUNDS), - strategy=strategy, -) diff --git a/examples/llm-flowertune/client.py b/examples/llm-flowertune/client.py deleted file mode 100644 index 28b324ba5bf1..000000000000 --- a/examples/llm-flowertune/client.py +++ /dev/null @@ -1,129 +0,0 @@ -from collections import OrderedDict -from typing import Callable, Dict, Tuple - -import flwr as fl -import torch -from flwr.common.typing import NDArrays, Scalar -from omegaconf import DictConfig -from trl import SFTTrainer -from transformers import TrainingArguments -from peft import get_peft_model_state_dict, set_peft_model_state_dict - -from models import get_model, cosine_annealing - - -# pylint: disable=too-many-arguments -class FlowerClient( - fl.client.NumPyClient -): # pylint: disable=too-many-instance-attributes - """Standard Flower client for CNN training.""" - - def __init__( - self, - model_cfg: DictConfig, - train_cfg: DictConfig, - trainset, - tokenizer, - formatting_prompts_func, - data_collator, - save_path, - ): # pylint: disable=too-many-arguments - self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - self.train_cfg = train_cfg - self.training_argumnets = TrainingArguments(**train_cfg.training_arguments) - self.tokenizer = tokenizer - self.formatting_prompts_func = formatting_prompts_func - self.data_collator = data_collator - self.save_path = save_path - - # instantiate model - self.model = get_model(model_cfg) - - self.trainset = trainset - - def get_parameters(self, config: Dict[str, Scalar]) -> NDArrays: - """Return the parameters of the current net.""" - - state_dict = get_peft_model_state_dict(self.model) - return [val.cpu().numpy() for _, val in state_dict.items()] - - def fit( - self, parameters: NDArrays, config: Dict[str, Scalar] - ) -> Tuple[NDArrays, int, Dict]: - """Implement distributed fit function for a given client.""" - set_parameters(self.model, parameters) - - new_lr = cosine_annealing( - int(config["current_round"]), - self.train_cfg.num_rounds, - self.train_cfg.learning_rate_max, - self.train_cfg.learning_rate_min, - ) - - self.training_argumnets.learning_rate = new_lr - self.training_argumnets.output_dir = self.save_path - - # Construct trainer - trainer = SFTTrainer( - model=self.model, - tokenizer=self.tokenizer, - args=self.training_argumnets, - max_seq_length=self.train_cfg.seq_length, - train_dataset=self.trainset, - formatting_func=self.formatting_prompts_func, - data_collator=self.data_collator, - ) - - # Do local training - results = trainer.train() - - return ( - self.get_parameters({}), - len(self.trainset), - {"train_loss": results.training_loss}, - ) - - -def set_parameters(model, parameters: NDArrays) -> None: - """Change the parameters of the model using the given ones.""" - peft_state_dict_keys = get_peft_model_state_dict(model).keys() - params_dict = zip(peft_state_dict_keys, parameters) - state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict}) - set_peft_model_state_dict(model, state_dict) - - -def gen_client_fn( - fds, - tokenizer, - formatting_prompts_func, - data_collator, - model_cfg: DictConfig, - train_cfg: DictConfig, - save_path: str, - partition_id: int = 0, - api: bool = False, -) -> Callable[[str], FlowerClient]: # pylint: disable=too-many-arguments - """Generate the client function that creates the Flower Clients.""" - - def client_fn(cid: str) -> FlowerClient: - """Create a Flower client representing a single organization.""" - - # Let's get the partition corresponding to the i-th client - client_trainset = ( - fds.load_partition(partition_id, "train") - if api - else fds.load_partition(int(cid), "train") - ) - client_trainset = client_trainset.rename_column("output", "response") - - return FlowerClient( - model_cfg, - train_cfg, - client_trainset, - tokenizer, - formatting_prompts_func, - data_collator, - save_path, - ).to_client() - - return client_fn diff --git a/examples/llm-flowertune/conf/config.yaml b/examples/llm-flowertune/conf/config.yaml deleted file mode 100644 index 0b769d351479..000000000000 --- a/examples/llm-flowertune/conf/config.yaml +++ /dev/null @@ -1,45 +0,0 @@ -# Federated Instruction Tuning on General Dataset ---- - -num_clients: 20 # total number of clients -num_rounds: 100 - -dataset: - name: "vicgalle/alpaca-gpt4" - -model: - name: "openlm-research/open_llama_7b_v2" - quantization: 4 # 8 or 4 if you want to do quantization with BitsAndBytes - gradient_checkpointing: True - lora: - peft_lora_r: 32 - peft_lora_alpha: 64 - -train: - num_rounds: ${num_rounds} - save_every_round: 5 - learning_rate_max: 5e-5 - learning_rate_min: 1e-6 - seq_length: 512 - training_arguments: - output_dir: null # to be set by hydra - learning_rate: null # to be set by the client - per_device_train_batch_size: 16 - gradient_accumulation_steps: 1 - logging_steps: 10 - num_train_epochs: 3 - max_steps: 10 - report_to: null - save_steps: 1000 - save_total_limit: 10 - gradient_checkpointing: ${model.gradient_checkpointing} - lr_scheduler_type: "constant" - -strategy: - _target_: flwr.server.strategy.FedAvg - fraction_fit: 0.1 # sample 10% of clients (i.e. 2 per round) - fraction_evaluate: 0.0 # no client evaluation - -client_resources: - num_cpus: 8 - num_gpus: 1.0 diff --git a/examples/llm-flowertune/main.py b/examples/llm-flowertune/main.py deleted file mode 100644 index 2d03e9cbcae5..000000000000 --- a/examples/llm-flowertune/main.py +++ /dev/null @@ -1,94 +0,0 @@ -import warnings -import pickle - -import flwr as fl -from flwr_datasets import FederatedDataset - -import hydra -from hydra.core.hydra_config import HydraConfig -from hydra.utils import instantiate -from omegaconf import DictConfig, OmegaConf - -from dataset import get_tokenizer_and_data_collator_and_propt_formatting -from utils import get_on_fit_config, fit_weighted_average, get_evaluate_fn -from client import gen_client_fn - - -warnings.filterwarnings("ignore", category=UserWarning) - - -@hydra.main(config_path="conf", config_name="config", version_base=None) -def main(cfg: DictConfig) -> None: - """Run federated LLM fine-tuning. - - Parameters - ---------- - cfg : DictConfig - An omegaconf object that stores the hydra config. - """ - # Print config structured as YAML - print(OmegaConf.to_yaml(cfg)) - - # Partition dataset and get dataloaders - fds = FederatedDataset( - dataset=cfg.dataset.name, partitioners={"train": cfg.num_clients} - ) - ( - tokenizer, - data_collator, - formatting_prompts_func, - ) = get_tokenizer_and_data_collator_and_propt_formatting( - cfg.model.name, - ) - - # Hydra automatically creates an output directory - # Let's retrieve it and save some results there - save_path = HydraConfig.get().runtime.output_dir - - # Prepare function that will be used to spawn each client - client_fn = gen_client_fn( - fds, - tokenizer, - formatting_prompts_func, - data_collator, - cfg.model, - cfg.train, - save_path, - ) - - # Instantiate strategy according to config. Here we pass other arguments - # that are only defined at run time. - strategy = instantiate( - cfg.strategy, - on_fit_config_fn=get_on_fit_config(), - fit_metrics_aggregation_fn=fit_weighted_average, - evaluate_fn=get_evaluate_fn( - cfg.model, cfg.train.save_every_round, cfg.num_rounds, save_path - ), - ) - - # Start simulation - history = fl.simulation.start_simulation( - client_fn=client_fn, - num_clients=cfg.num_clients, - config=fl.server.ServerConfig(num_rounds=cfg.num_rounds), - client_resources={ - "num_cpus": cfg.client_resources.num_cpus, - "num_gpus": cfg.client_resources.num_gpus, - }, - strategy=strategy, - ) - - # Experiment completed. Now we save the results and - # generate plots using the `history` - print("................") - print(history) - - # Save results as a Python pickle using a file_path - # the directory created by Hydra for each run - with open(f"{save_path}/results.pkl", "wb") as f: - pickle.dump(history, f) - - -if __name__ == "__main__": - main() diff --git a/examples/llm-flowertune/requirements.txt b/examples/llm-flowertune/requirements.txt deleted file mode 100644 index 7c66612eb2a5..000000000000 --- a/examples/llm-flowertune/requirements.txt +++ /dev/null @@ -1,9 +0,0 @@ -flwr[rest,simulation]>=1.8.0, <2.0 -flwr-datasets>=0.0.2 -hydra-core==1.3.2 -trl==0.7.2 -bitsandbytes==0.41.3 -scipy==1.11.2 -peft==0.4.0 -fschat[model_worker,webui]==0.2.35 -transformers==4.38.1 diff --git a/examples/llm-flowertune/utils.py b/examples/llm-flowertune/utils.py deleted file mode 100644 index bbb607810537..000000000000 --- a/examples/llm-flowertune/utils.py +++ /dev/null @@ -1,43 +0,0 @@ -from client import set_parameters -from models import get_model - - -# Get function that will be executed by the strategy's evaluate() method -# Here we use it to save global model checkpoints -def get_evaluate_fn(model_cfg, save_every_round, total_round, save_path): - """Return an evaluation function for saving global model.""" - - def evaluate(server_round: int, parameters, config): - # Save model - if server_round != 0 and ( - server_round == total_round or server_round % save_every_round == 0 - ): - # Init model - model = get_model(model_cfg) - set_parameters(model, parameters) - - model.save_pretrained(f"{save_path}/peft_{server_round}") - - return 0.0, {} - - return evaluate - - -# Get a function that will be used to construct the config that the client's -# fit() method will receive -def get_on_fit_config(): - def fit_config_fn(server_round: int): - fit_config = {"current_round": server_round} - return fit_config - - return fit_config_fn - - -def fit_weighted_average(metrics): - """Aggregation function for (federated) evaluation metrics.""" - # Multiply accuracy of each client by number of examples used - losses = [num_examples * m["train_loss"] for num_examples, m in metrics] - examples = [num_examples for num_examples, _ in metrics] - - # Aggregate and return custom metric (weighted average) - return {"train_loss": sum(losses) / sum(examples)} diff --git a/examples/opacus/README.md b/examples/opacus/README.md index 6fc0d2ff49a0..d08f534f878e 100644 --- a/examples/opacus/README.md +++ b/examples/opacus/README.md @@ -1,60 +1,63 @@ +--- +tags: [DP, DP-SGD, basic, vision, fds, privacy] +dataset: [CIFAR-10] +framework: [opacus, torch] +--- + # Training with Sample-Level Differential Privacy using Opacus Privacy Engine In this example, we demonstrate how to train a model with differential privacy (DP) using Flower. We employ PyTorch and integrate the Opacus Privacy Engine to achieve sample-level differential privacy. This setup ensures robust privacy guarantees during the client training phase. The code is adapted from the [PyTorch Quickstart example](https://github.com/adap/flower/tree/main/examples/quickstart-pytorch). For more information about DP in Flower please refer to the [tutorial](https://flower.ai/docs/framework/how-to-use-differential-privacy.html). For additional information about Opacus, visit the official [website](https://opacus.ai/). -## Environments Setup +## Set up the project + +### Clone the project -Start by cloning the example. We prepared a single-line command that you can copy into your shell which will checkout the example for you: +Start by cloning the example project: ```shell -git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/opacus . && rm -rf flower && cd opacus +git clone --depth=1 https://github.com/adap/flower.git \ + && mv flower/examples/opacus . \ + && rm -rf flower \ + && cd opacus ``` This will create a new directory called `opacus` containing the following files: ```shell --- pyproject.toml --- client.py --- server.py --- README.md +opacus +├── opacus_fl +│ ├── client_app.py # Defines your ClientApp +│ ├── server_app.py # Defines your ServerApp +│ └── task.py # Defines your model, training, and data loading +├── pyproject.toml # Project metadata like dependencies and configs +└── README.md ``` -### Installing dependencies +### Install dependencies and project -Project dependencies are defined in `pyproject.toml`. Install them with: +Install the dependencies defined in `pyproject.toml` as well as the `opacus_fl` package. From a new python environment, run: ```shell -pip install . -``` - -## Run Flower with Opacus and Pytorch - -### 1. Start the long-running Flower server (SuperLink) - -```bash -flower-superlink --insecure +pip install -e . ``` -### 2. Start the long-running Flower clients (SuperNodes) +## Run the project -Start 2 Flower `SuperNodes` in 2 separate terminal windows, using: +You can run your Flower project in both _simulation_ and _deployment_ mode without making changes to the code. If you are starting with Flower, we recommend you using the _simulation_ mode as it requires fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine. -```bash -flower-client-app client:appA --insecure -``` +### Run with the Simulation Engine ```bash -flower-client-app client:appB --insecure +flwr run . ``` -Opacus hyperparameters can be passed for each client in `ClientApp` instantiation (in `client.py`). In this example, `noise_multiplier=1.5` and `noise_multiplier=1` are used for the first and second client respectively. - -### 3. Run the Flower App - -With both the long-running server (SuperLink) and two clients (SuperNode) up and running, we can now run the actual Flower App: +You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: ```bash -flower-server-app server:app --insecure +flwr run . --run-config "max-grad-norm=1.0 num-server-rounds=5" ``` + +> \[!NOTE\] +> Please note that, at the current state, users cannot set `NodeConfig` for simulated `ClientApp`s. For this reason, the hyperparameter `noise_multiplier` is set in the `client_fn` method based on a condition check on `partition_id`. This will be modified in a future version of Flower to allow users to set `NodeConfig` for simulated `ClientApp`s. diff --git a/examples/opacus/client.py b/examples/opacus/client.py deleted file mode 100644 index 51c1e1cfa667..000000000000 --- a/examples/opacus/client.py +++ /dev/null @@ -1,172 +0,0 @@ -import argparse -import warnings -from collections import OrderedDict - -from flwr_datasets import FederatedDataset -from flwr.client import NumPyClient, ClientApp -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.utils.data import DataLoader -from torchvision.transforms import Compose, Normalize, ToTensor -from tqdm import tqdm - -from opacus import PrivacyEngine - -warnings.filterwarnings("ignore", category=UserWarning) - -DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - - -class Net(nn.Module): - """Model (simple CNN adapted from 'PyTorch: A 60 Minute Blitz')""" - - def __init__(self) -> None: - super(Net, self).__init__() - self.conv1 = nn.Conv2d(3, 6, 5) - self.pool = nn.MaxPool2d(2, 2) - self.conv2 = nn.Conv2d(6, 16, 5) - self.fc1 = nn.Linear(16 * 5 * 5, 120) - self.fc2 = nn.Linear(120, 84) - self.fc3 = nn.Linear(84, 10) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = self.pool(F.relu(self.conv1(x))) - x = self.pool(F.relu(self.conv2(x))) - x = x.view(-1, 16 * 5 * 5) - x = F.relu(self.fc1(x)) - x = F.relu(self.fc2(x)) - return self.fc3(x) - - -def train(net, train_loader, privacy_engine, optimizer, target_delta, epochs=1): - criterion = torch.nn.CrossEntropyLoss() - for _ in range(epochs): - for batch in tqdm(train_loader, "Training"): - images = batch["img"] - labels = batch["label"] - optimizer.zero_grad() - criterion(net(images.to(DEVICE)), labels.to(DEVICE)).backward() - optimizer.step() - - epsilon = privacy_engine.get_epsilon(delta=target_delta) - return epsilon - - -def test(net, test_loader): - criterion = torch.nn.CrossEntropyLoss() - correct, loss = 0, 0.0 - with torch.no_grad(): - for batch in tqdm(test_loader, "Testing"): - images = batch["img"].to(DEVICE) - labels = batch["label"].to(DEVICE) - outputs = net(images) - loss += criterion(outputs, labels).item() - correct += (torch.max(outputs.data, 1)[1] == labels).sum().item() - accuracy = correct / len(test_loader.dataset) - return loss, accuracy - - -def load_data(partition_id): - fds = FederatedDataset(dataset="cifar10", partitioners={"train": 2}) - partition = fds.load_partition(partition_id) - # Divide data on each node: 80% train, 20% test - partition_train_test = partition.train_test_split(test_size=0.2, seed=42) - pytorch_transforms = Compose( - [ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] - ) - - def apply_transforms(batch): - batch["img"] = [pytorch_transforms(img) for img in batch["img"]] - return batch - - partition_train_test = partition_train_test.with_transform(apply_transforms) - train_loader = DataLoader( - partition_train_test["train"], batch_size=32, shuffle=True - ) - test_loader = DataLoader(partition_train_test["test"], batch_size=32) - return train_loader, test_loader - - -class FlowerClient(NumPyClient): - def __init__( - self, - model, - train_loader, - test_loader, - target_delta, - noise_multiplier, - max_grad_norm, - ) -> None: - super().__init__() - self.test_loader = test_loader - self.optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9) - self.privacy_engine = PrivacyEngine(secure_mode=False) - self.target_delta = target_delta - ( - self.model, - self.optimizer, - self.train_loader, - ) = self.privacy_engine.make_private( - module=model, - optimizer=self.optimizer, - data_loader=train_loader, - noise_multiplier=noise_multiplier, - max_grad_norm=max_grad_norm, - ) - - def get_parameters(self, config): - return [val.cpu().numpy() for _, val in self.model.state_dict().items()] - - def set_parameters(self, parameters): - params_dict = zip(self.model.state_dict().keys(), parameters) - state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) - self.model.load_state_dict(state_dict, strict=True) - - def fit(self, parameters, config): - self.set_parameters(parameters) - epsilon = train( - self.model, - self.train_loader, - self.privacy_engine, - self.optimizer, - self.target_delta, - ) - - if epsilon is not None: - print(f"Epsilon value for delta={self.target_delta} is {epsilon:.2f}") - else: - print("Epsilon value not available.") - return (self.get_parameters(config={}), len(self.train_loader), {}) - - def evaluate(self, parameters, config): - self.set_parameters(parameters) - loss, accuracy = test(self.model, self.test_loader) - return loss, len(self.test_loader.dataset), {"accuracy": accuracy} - - -def client_fn_parameterized( - partition_id, target_delta=1e-5, noise_multiplier=1.3, max_grad_norm=1.0 -): - def client_fn(cid: str): - net = Net().to(DEVICE) - train_loader, test_loader = load_data(partition_id=partition_id) - return FlowerClient( - net, - train_loader, - test_loader, - target_delta, - noise_multiplier, - max_grad_norm, - ).to_client() - - return client_fn - - -appA = ClientApp( - client_fn=client_fn_parameterized(partition_id=0, noise_multiplier=1.5), -) - -appB = ClientApp( - client_fn=client_fn_parameterized(partition_id=1, noise_multiplier=1), -) diff --git a/examples/opacus/opacus_fl/__init__.py b/examples/opacus/opacus_fl/__init__.py new file mode 100644 index 000000000000..91006b32e386 --- /dev/null +++ b/examples/opacus/opacus_fl/__init__.py @@ -0,0 +1 @@ +"""opacus: Training with Sample-Level Differential Privacy using Opacus Privacy Engine.""" diff --git a/examples/opacus/opacus_fl/client_app.py b/examples/opacus/opacus_fl/client_app.py new file mode 100644 index 000000000000..631e99092789 --- /dev/null +++ b/examples/opacus/opacus_fl/client_app.py @@ -0,0 +1,92 @@ +"""opacus: Training with Sample-Level Differential Privacy using Opacus Privacy Engine.""" + +import warnings + +import torch +from opacus import PrivacyEngine +from opacus_fl.task import Net, get_weights, load_data, set_weights, test, train +import logging + +from flwr.client import ClientApp, NumPyClient +from flwr.common import Context + +warnings.filterwarnings("ignore", category=UserWarning) + + +class FlowerClient(NumPyClient): + def __init__( + self, + train_loader, + test_loader, + target_delta, + noise_multiplier, + max_grad_norm, + ) -> None: + super().__init__() + self.model = Net() + self.train_loader = train_loader + self.test_loader = test_loader + self.target_delta = target_delta + self.noise_multiplier = noise_multiplier + self.max_grad_norm = max_grad_norm + + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + + def fit(self, parameters, config): + model = self.model + set_weights(model, parameters) + + optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9) + + privacy_engine = PrivacyEngine(secure_mode=False) + ( + model, + optimizer, + self.train_loader, + ) = privacy_engine.make_private( + module=model, + optimizer=optimizer, + data_loader=self.train_loader, + noise_multiplier=self.noise_multiplier, + max_grad_norm=self.max_grad_norm, + ) + + epsilon = train( + model, + self.train_loader, + privacy_engine, + optimizer, + self.target_delta, + device=self.device, + ) + + if epsilon is not None: + print(f"Epsilon value for delta={self.target_delta} is {epsilon:.2f}") + else: + print("Epsilon value not available.") + + return (get_weights(model), len(self.train_loader.dataset), {}) + + def evaluate(self, parameters, config): + set_weights(self.model, parameters) + loss, accuracy = test(self.model, self.test_loader, self.device) + return loss, len(self.test_loader.dataset), {"accuracy": accuracy} + + +def client_fn(context: Context): + partition_id = context.node_config["partition-id"] + noise_multiplier = 1.0 if partition_id % 2 == 0 else 1.5 + + train_loader, test_loader = load_data( + partition_id=partition_id, num_partitions=context.node_config["num-partitions"] + ) + return FlowerClient( + train_loader, + test_loader, + context.run_config["target-delta"], + noise_multiplier, + context.run_config["max-grad-norm"], + ).to_client() + + +app = ClientApp(client_fn=client_fn) diff --git a/examples/opacus/opacus_fl/server_app.py b/examples/opacus/opacus_fl/server_app.py new file mode 100644 index 000000000000..2c105d36df41 --- /dev/null +++ b/examples/opacus/opacus_fl/server_app.py @@ -0,0 +1,37 @@ +"""opacus: Training with Sample-Level Differential Privacy using Opacus Privacy Engine.""" + +import logging +from typing import List, Tuple + +from opacus_fl.task import Net, get_weights + +from flwr.common import Context, Metrics, ndarrays_to_parameters +from flwr.server import ServerApp, ServerAppComponents, ServerConfig +from flwr.server.strategy import FedAvg + +# Opacus logger seems to change the flwr logger to DEBUG level. Set back to INFO +logging.getLogger("flwr").setLevel(logging.INFO) + + +def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: + accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] + examples = [num_examples for num_examples, _ in metrics] + return {"accuracy": sum(accuracies) / sum(examples)} + + +def server_fn(context: Context) -> ServerAppComponents: + num_rounds = context.run_config["num-server-rounds"] + + ndarrays = get_weights(Net()) + parameters = ndarrays_to_parameters(ndarrays) + + strategy = FedAvg( + evaluate_metrics_aggregation_fn=weighted_average, + initial_parameters=parameters, + ) + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(config=config, strategy=strategy) + + +app = ServerApp(server_fn=server_fn) diff --git a/examples/opacus/opacus_fl/task.py b/examples/opacus/opacus_fl/task.py new file mode 100644 index 000000000000..0c7ef71dc50b --- /dev/null +++ b/examples/opacus/opacus_fl/task.py @@ -0,0 +1,102 @@ +"""opacus: Training with Sample-Level Differential Privacy using Opacus Privacy Engine.""" + +from collections import OrderedDict + +import torch +import torch.nn as nn +import torch.nn.functional as F +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner +from torch.utils.data import DataLoader +from torchvision.transforms import Compose, Normalize, ToTensor +from tqdm import tqdm + +fds = None # Cache FederatedDataset + + +class Net(nn.Module): + def __init__(self): + super(Net, self).__init__() + self.conv1 = nn.Conv2d(3, 6, 5) + self.pool = nn.MaxPool2d(2, 2) + self.conv2 = nn.Conv2d(6, 16, 5) + self.fc1 = nn.Linear(16 * 5 * 5, 120) + self.fc2 = nn.Linear(120, 84) + self.fc3 = nn.Linear(84, 10) + + def forward(self, x): + x = self.pool(F.relu(self.conv1(x))) + x = self.pool(F.relu(self.conv2(x))) + x = x.view(-1, 16 * 5 * 5) + x = F.relu(self.fc1(x)) + x = F.relu(self.fc2(x)) + return self.fc3(x) + + +def get_weights(net): + return [val.cpu().numpy() for _, val in net.state_dict().items()] + + +def set_weights(net, parameters): + params_dict = zip(net.state_dict().keys(), parameters) + state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) + net.load_state_dict(state_dict, strict=True) + + +def load_data(partition_id: int, num_partitions: int): + global fds + if fds is None: + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="uoft-cs/cifar10", + partitioners={"train": partitioner}, + ) + + partition = fds.load_partition(partition_id) + # Divide data on each node: 80% train, 20% test + partition_train_test = partition.train_test_split(test_size=0.2, seed=42) + pytorch_transforms = Compose( + [ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] + ) + + def apply_transforms(batch): + batch["img"] = [pytorch_transforms(img) for img in batch["img"]] + return batch + + partition_train_test = partition_train_test.with_transform(apply_transforms) + train_loader = DataLoader( + partition_train_test["train"], batch_size=32, shuffle=True + ) + test_loader = DataLoader(partition_train_test["test"], batch_size=32) + return train_loader, test_loader + + +def train(net, train_loader, privacy_engine, optimizer, target_delta, device, epochs=1): + criterion = torch.nn.CrossEntropyLoss() + net.to(device) + net.train() + for _ in range(epochs): + for batch in tqdm(train_loader, "Training"): + images = batch["img"] + labels = batch["label"] + optimizer.zero_grad() + criterion(net(images.to(device)), labels.to(device)).backward() + optimizer.step() + + epsilon = privacy_engine.get_epsilon(delta=target_delta) + return epsilon + + +def test(net, test_loader, device): + net.to(device) + criterion = torch.nn.CrossEntropyLoss() + correct, loss = 0, 0.0 + with torch.no_grad(): + for batch in tqdm(test_loader, "Testing"): + images = batch["img"].to(device) + labels = batch["label"].to(device) + outputs = net(images) + loss += criterion(outputs, labels).item() + correct += (torch.max(outputs.data, 1)[1] == labels).sum().item() + accuracy = correct / len(test_loader.dataset) + return loss, accuracy diff --git a/examples/opacus/pyproject.toml b/examples/opacus/pyproject.toml index 0aaa167d0a28..4814709569ef 100644 --- a/examples/opacus/pyproject.toml +++ b/examples/opacus/pyproject.toml @@ -3,20 +3,35 @@ requires = ["hatchling"] build-backend = "hatchling.build" [project] -name = "opacus-fl" -version = "0.1.0" -description = "Sample Differential Privacy with Opacus in Flower" -authors = [ - { name = "The Flower Authors", email = "hello@flower.ai" }, -] +name = "opacus_fl" +version = "1.0.0" +description = "Sample-level Differential Privacy with Opacus in Flower" + dependencies = [ - "flwr>=1.8.0,<2.0", - "flwr-datasets[vision]>=0.0.2,<1.0.0", + "flwr[simulation]>=1.12.0", + "flwr-datasets[vision]>=0.3.0", "torch==2.1.1", "torchvision==0.16.1", - "tqdm==4.65.0", - "opacus==v1.4.1" + "opacus==v1.4.1", ] [tool.hatch.build.targets.wheel] -packages = ["."] \ No newline at end of file +packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "opacus_fl.server_app:app" +clientapp = "opacus_fl.client_app:app" + +[tool.flwr.app.config] +num-server-rounds = 3 +target-delta = 1e-5 +max-grad-norm = 1.0 + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 2 diff --git a/examples/opacus/server.py b/examples/opacus/server.py deleted file mode 100644 index a206c48307e2..000000000000 --- a/examples/opacus/server.py +++ /dev/null @@ -1,22 +0,0 @@ -from typing import List, Tuple - -import flwr as fl -from flwr.server.strategy import FedAvg -from flwr.common import Metrics -from flwr.server import ServerApp, ServerConfig - - -def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: - accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] - examples = [num_examples for num_examples, _ in metrics] - return {"accuracy": sum(accuracies) / sum(examples)} - - -strategy = FedAvg(evaluate_metrics_aggregation_fn=weighted_average) - -config = ServerConfig(num_rounds=3) - -app = ServerApp( - config=config, - strategy=strategy, -) diff --git a/examples/pytorch-federated-variational-autoencoder/.gitignore b/examples/pytorch-federated-variational-autoencoder/.gitignore deleted file mode 100644 index c3b84dc53dce..000000000000 --- a/examples/pytorch-federated-variational-autoencoder/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -*.gz -cifar-10-batches-py/ diff --git a/examples/pytorch-federated-variational-autoencoder/README.md b/examples/pytorch-federated-variational-autoencoder/README.md index 00af7a6328b2..7b65406fe923 100644 --- a/examples/pytorch-federated-variational-autoencoder/README.md +++ b/examples/pytorch-federated-variational-autoencoder/README.md @@ -1,73 +1,63 @@ -# Flower Example for Federated Variational Autoencoder using Pytorch +--- +tags: [basic, vision, fds] +dataset: [CIFAR-10] +framework: [torch, torchvision] +--- -This example demonstrates how a variational autoencoder (VAE) can be trained in a federated way using the Flower framework. +# Federated Variational Autoencoder with PyTorch and Flower -## Project Setup +This example demonstrates how a variational autoencoder (VAE) can be trained in a federated way using the Flower framework. This example uses [Flower Datasets](https://flower.ai/docs/datasets/) to download, partition and preprocess the CIFAR-10 dataset. -Start by cloning the example project. We prepared a single-line command that you can copy into your shell which will checkout the example for you: +## Set up the project -```shell -git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/pytorch_federated_variational_autoencoder . && rm -rf flower && cd pytorch_federated_variational_autoencoder -``` - -This will create a new directory called `pytorch_federated_variational_autoencoder` containing the following files: - -```shell --- pyproject.toml --- requirements.txt --- client.py --- server.py --- README.md --- models.py -``` - -### Installing Dependencies - -Project dependencies (such as `torch` and `flwr`) are defined in `pyproject.toml` and `requirements.txt`. We recommend [Poetry](https://python-poetry.org/docs/) to install those dependencies and manage your virtual environment ([Poetry installation](https://python-poetry.org/docs/#installation)) or [pip](https://pip.pypa.io/en/latest/development/), but feel free to use a different way of installing dependencies and managing virtual environments if you have other preferences. +### Clone the project -#### Poetry +Start by cloning the example project: ```shell -poetry install -poetry shell +git clone --depth=1 https://github.com/adap/flower.git _tmp \ + && mv _tmp/examples/pytorch-federated-variational-autoencoder . \ + && rm -rf _tmp && cd pytorch-federated-variational-autoencoder ``` -Poetry will install all your dependencies in a newly created virtual environment. To verify that everything works correctly you can run the following command: +This will create a new directory called `pytorch-federated-variational-autoencoder` with the following structure: ```shell -poetry run python3 -c "import flwr" +pytorch-federated-variational-autoencoder +├── README.md +├── fedvaeexample +│ ├── __init__.py +│ ├── client_app.py # Defines your ClientApp +│ ├── server_app.py # Defines your ServerApp +│ └── task.py # Defines your model, training and data loading +└── pyproject.toml # Project metadata like dependencies and configs ``` -If you don't see any errors you're good to go! +### Install dependencies and project -#### pip +Install the dependencies defined in `pyproject.toml` as well as the `fedvaeexample` package. -Write the command below in your terminal to install the dependencies according to the configuration file requirements.txt. - -```shell -pip install -r requirements.txt +```bash +pip install -e . ``` -## Federating the Variational Autoencoder Model +## Run the Project -Afterwards you are ready to start the Flower server as well as the clients. You can simply start the server in a terminal as follows: - -```shell -poetry run python3 server.py -``` +You can run your Flower project in both _simulation_ and _deployment_ mode without making changes to the code. If you are starting with Flower, we recommend you using the _simulation_ mode as it requires fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine. -Now you are ready to start the Flower clients which will participate in the learning. To do so simply open two more terminals and run the following command in each: +### Run with the Simulation Engine -```shell -poetry run python3 client.py +```bash +flwr run . ``` -Alternatively you can run all of it in one shell as follows: +You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: -```shell -poetry run python3 server.py & -poetry run python3 client.py & -poetry run python3 client.py +```bash +flwr run . --run-config num-server-rounds=5 ``` -You will see that the federated training of variational autoencoder has started. You can add `steps_per_epoch=3` to `model.fit()` if you just want to evaluate that everything works without having to wait for the client-side training to finish (this will save you a lot of time during development). +### Run with the Deployment Engine + +> \[!NOTE\] +> An update to this example will show how to run this Flower application with the Deployment Engine and TLS certificates, or with Docker. diff --git a/examples/pytorch-federated-variational-autoencoder/client.py b/examples/pytorch-federated-variational-autoencoder/client.py deleted file mode 100644 index fc71f7e70c0b..000000000000 --- a/examples/pytorch-federated-variational-autoencoder/client.py +++ /dev/null @@ -1,102 +0,0 @@ -from collections import OrderedDict - -import torch -import torch.nn.functional as F -import torchvision.transforms as transforms -from models import Net -from torch.utils.data import DataLoader -from torchvision.datasets import CIFAR10 - -import flwr as fl - -DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - - -def load_data(): - """Load CIFAR-10 (training and test set).""" - transform = transforms.Compose( - [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] - ) - trainset = CIFAR10(".", train=True, download=True, transform=transform) - testset = CIFAR10(".", train=False, download=True, transform=transform) - trainloader = DataLoader(trainset, batch_size=32, shuffle=True) - testloader = DataLoader(testset, batch_size=32) - return trainloader, testloader - - -def train(net, trainloader, epochs): - """Train the network on the training set.""" - optimizer = torch.optim.SGD(net.parameters(), lr=0.001, momentum=0.9) - for _ in range(epochs): - for images, _ in trainloader: - images = images.to(DEVICE) - optimizer.zero_grad() - recon_images, mu, logvar = net(images) - recon_loss = F.mse_loss(recon_images, images) - kld_loss = -0.5 * torch.mean(1 + logvar - mu.pow(2) - logvar.exp()) - loss = recon_loss + 0.05 * kld_loss - loss.backward() - optimizer.step() - - -def test(net, testloader): - """Validate the network on the entire test set.""" - total, loss = 0, 0.0 - with torch.no_grad(): - for data in testloader: - images = data[0].to(DEVICE) - recon_images, mu, logvar = net(images) - recon_loss = F.mse_loss(recon_images, images) - kld_loss = -0.5 * torch.mean(1 + logvar - mu.pow(2) - logvar.exp()) - loss += recon_loss + kld_loss - total += len(images) - return loss / total - - -def sample(net): - """Generates samples using the decoder of the trained VAE.""" - with torch.no_grad(): - z = torch.randn(10) - z = z.to(DEVICE) - gen_image = net.decode(z) - return gen_image - - -def generate(net, image): - """Reproduce the input with trained VAE.""" - with torch.no_grad(): - return net.forward(image) - - -def main(): - # Load model and data - net = Net() - net = net.to(DEVICE) - trainloader, testloader = load_data() - - class CifarClient(fl.client.NumPyClient): - def get_parameters(self, config): - return [val.cpu().numpy() for _, val in net.state_dict().items()] - - def set_parameters(self, parameters): - params_dict = zip(net.state_dict().keys(), parameters) - state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) - net.load_state_dict(state_dict, strict=True) - - def fit(self, parameters, config): - self.set_parameters(parameters) - train(net, trainloader, epochs=1) - return self.get_parameters(config={}), len(trainloader), {} - - def evaluate(self, parameters, config): - self.set_parameters(parameters) - loss = test(net, testloader) - return float(loss), len(testloader), {} - - fl.client.start_client( - server_address="127.0.0.1:8080", client=CifarClient().to_client() - ) - - -if __name__ == "__main__": - main() diff --git a/examples/pytorch-federated-variational-autoencoder/fedvaeexample/__init__.py b/examples/pytorch-federated-variational-autoencoder/fedvaeexample/__init__.py new file mode 100644 index 000000000000..08622fc6f28f --- /dev/null +++ b/examples/pytorch-federated-variational-autoencoder/fedvaeexample/__init__.py @@ -0,0 +1 @@ +"""fedvaeexample: A Flower / PyTorch app for Federated Variational Autoencoder.""" diff --git a/examples/pytorch-federated-variational-autoencoder/fedvaeexample/client_app.py b/examples/pytorch-federated-variational-autoencoder/fedvaeexample/client_app.py new file mode 100644 index 000000000000..6a0508e18d4c --- /dev/null +++ b/examples/pytorch-federated-variational-autoencoder/fedvaeexample/client_app.py @@ -0,0 +1,53 @@ +"""fedvaeexample: A Flower / PyTorch app for Federated Variational Autoencoder.""" + +import torch +from fedvaeexample.task import Net, get_weights, load_data, set_weights, test, train + +from flwr.client import ClientApp, NumPyClient +from flwr.common import Context + + +class CifarClient(NumPyClient): + def __init__(self, trainloader, testloader, local_epochs, learning_rate): + self.net = Net() + self.trainloader = trainloader + self.testloader = testloader + self.local_epochs = local_epochs + self.lr = learning_rate + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + + def fit(self, parameters, config): + """Train the model with data of this client.""" + set_weights(self.net, parameters) + train( + self.net, + self.trainloader, + epochs=self.local_epochs, + learning_rate=self.lr, + device=self.device, + ) + return get_weights(self.net), len(self.trainloader), {} + + def evaluate(self, parameters, config): + """Evaluate the model on the data this client has.""" + set_weights(self.net, parameters) + loss = test(self.net, self.testloader, self.device) + return float(loss), len(self.testloader), {} + + +def client_fn(context: Context): + """Construct a Client that will be run in a ClientApp.""" + + # Read the node_config to fetch data partition associated to this node + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + + # Read the run_config to fetch hyperparameters relevant to this run + trainloader, testloader = load_data(partition_id, num_partitions) + local_epochs = context.run_config["local-epochs"] + learning_rate = context.run_config["learning-rate"] + + return CifarClient(trainloader, testloader, local_epochs, learning_rate).to_client() + + +app = ClientApp(client_fn=client_fn) diff --git a/examples/pytorch-federated-variational-autoencoder/fedvaeexample/server_app.py b/examples/pytorch-federated-variational-autoencoder/fedvaeexample/server_app.py new file mode 100644 index 000000000000..0f7a6520de59 --- /dev/null +++ b/examples/pytorch-federated-variational-autoencoder/fedvaeexample/server_app.py @@ -0,0 +1,28 @@ +"""fedvaeexample: A Flower / PyTorch app for Federated Variational Autoencoder.""" + +from fedvaeexample.task import Net, get_weights + +from flwr.common import Context, ndarrays_to_parameters +from flwr.server import ServerApp, ServerAppComponents, ServerConfig +from flwr.server.strategy import FedAvg + + +def server_fn(context: Context) -> ServerAppComponents: + """Construct components for ServerApp.""" + + # Read from config + num_rounds = context.run_config["num-server-rounds"] + + # Initialize model parameters + ndarrays = get_weights(Net()) + parameters = ndarrays_to_parameters(ndarrays) + + # Define the strategy + strategy = FedAvg(initial_parameters=parameters) + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(strategy=strategy, config=config) + + +# Create ServerApp +app = ServerApp(server_fn=server_fn) diff --git a/examples/pytorch-federated-variational-autoencoder/fedvaeexample/task.py b/examples/pytorch-federated-variational-autoencoder/fedvaeexample/task.py new file mode 100644 index 000000000000..112bd3358fb0 --- /dev/null +++ b/examples/pytorch-federated-variational-autoencoder/fedvaeexample/task.py @@ -0,0 +1,159 @@ +"""fedvae: A Flower app for Federated Variational Autoencoder.""" + +from collections import OrderedDict + +import torch +import torch.nn.functional as F +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner +from torch import nn +from torch.utils.data import DataLoader +from torchvision.transforms import Compose, Normalize, ToTensor + + +class Flatten(nn.Module): + """Flattens input by reshaping it into a one-dimensional tensor.""" + + def forward(self, input): + return input.view(input.size(0), -1) + + +class UnFlatten(nn.Module): + """Unflattens a tensor converting it to a desired shape.""" + + def forward(self, input): + return input.view(-1, 16, 6, 6) + + +class Net(nn.Module): + def __init__(self, h_dim=576, z_dim=10) -> None: + super().__init__() + self.encoder = nn.Sequential( + nn.Conv2d( + in_channels=3, out_channels=6, kernel_size=4, stride=2 + ), # [batch, 6, 15, 15] + nn.ReLU(), + nn.Conv2d( + in_channels=6, out_channels=16, kernel_size=5, stride=2 + ), # [batch, 16, 6, 6] + nn.ReLU(), + Flatten(), + ) + + self.fc1 = nn.Linear(h_dim, z_dim) + self.fc2 = nn.Linear(h_dim, z_dim) + self.fc3 = nn.Linear(z_dim, h_dim) + + self.decoder = nn.Sequential( + UnFlatten(), + nn.ConvTranspose2d(in_channels=16, out_channels=6, kernel_size=5, stride=2), + nn.ReLU(), + nn.ConvTranspose2d(in_channels=6, out_channels=3, kernel_size=4, stride=2), + nn.Tanh(), + ) + + def reparametrize(self, h): + """Reparametrization layer of VAE.""" + mu, logvar = self.fc1(h), self.fc2(h) + std = torch.exp(logvar / 2) + eps = torch.randn_like(std) + z = mu + std * eps + return z, mu, logvar + + def encode(self, x): + """Encoder of the VAE.""" + h = self.encoder(x) + z, mu, logvar = self.reparametrize(h) + return z, mu, logvar + + def decode(self, z): + """Decoder of the VAE.""" + z = self.fc3(z) + z = self.decoder(z) + return z + + def forward(self, x): + z, mu, logvar = self.encode(x) + z_decode = self.decode(z) + return z_decode, mu, logvar + + +fds = None # Cache FederatedDataset + + +def load_data(partition_id, num_partitions): + """Load partition CIFAR10 data.""" + # Only initialize `FederatedDataset` once + global fds + if fds is None: + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="uoft-cs/cifar10", + partitioners={"train": partitioner}, + ) + partition = fds.load_partition(partition_id) + # Divide data on each node: 80% train, 20% test + partition_train_test = partition.train_test_split(test_size=0.2, seed=42) + pytorch_transforms = Compose( + [ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] + ) + + def apply_transforms(batch): + """Apply transforms to the partition from FederatedDataset.""" + batch["img"] = [pytorch_transforms(img) for img in batch["img"]] + return batch + + partition_train_test = partition_train_test.with_transform(apply_transforms) + trainloader = DataLoader(partition_train_test["train"], batch_size=32, shuffle=True) + testloader = DataLoader(partition_train_test["test"], batch_size=32) + return trainloader, testloader + + +def train(net, trainloader, epochs, learning_rate, device): + """Train the network on the training set.""" + net.to(device) # move model to GPU if available + optimizer = torch.optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9) + for _ in range(epochs): + # for images, _ in trainloader: + for batch in trainloader: + images = batch["img"] + images = images.to(device) + optimizer.zero_grad() + recon_images, mu, logvar = net(images) + recon_loss = F.mse_loss(recon_images, images) + kld_loss = -0.5 * torch.mean(1 + logvar - mu.pow(2) - logvar.exp()) + loss = recon_loss + 0.05 * kld_loss + loss.backward() + optimizer.step() + + +def test(net, testloader, device): + """Validate the network on the entire test set.""" + total, loss = 0, 0.0 + with torch.no_grad(): + # for data in testloader: + for batch in testloader: + images = batch["img"].to(device) + # images = data[0].to(DEVICE) + recon_images, mu, logvar = net(images) + recon_loss = F.mse_loss(recon_images, images) + kld_loss = -0.5 * torch.mean(1 + logvar - mu.pow(2) - logvar.exp()) + loss += recon_loss + kld_loss + total += len(images) + return loss / total + + +def generate(net, image): + """Reproduce the input with trained VAE.""" + with torch.no_grad(): + return net.forward(image) + + +def get_weights(net): + return [val.cpu().numpy() for _, val in net.state_dict().items()] + + +def set_weights(net, parameters): + params_dict = zip(net.state_dict().keys(), parameters) + state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) + net.load_state_dict(state_dict, strict=True) diff --git a/examples/pytorch-federated-variational-autoencoder/models.py b/examples/pytorch-federated-variational-autoencoder/models.py deleted file mode 100644 index 4999631051b2..000000000000 --- a/examples/pytorch-federated-variational-autoencoder/models.py +++ /dev/null @@ -1,69 +0,0 @@ -import torch -import torch.nn as nn - - -class Flatten(nn.Module): - """Flattens input by reshaping it into a one-dimensional tensor.""" - - def forward(self, input): - return input.view(input.size(0), -1) - - -class UnFlatten(nn.Module): - """Unflattens a tensor converting it to a desired shape.""" - - def forward(self, input): - return input.view(-1, 16, 6, 6) - - -class Net(nn.Module): - def __init__(self, h_dim=576, z_dim=10) -> None: - super(Net, self).__init__() - self.encoder = nn.Sequential( - nn.Conv2d( - in_channels=3, out_channels=6, kernel_size=4, stride=2 - ), # [batch, 6, 15, 15] - nn.ReLU(), - nn.Conv2d( - in_channels=6, out_channels=16, kernel_size=5, stride=2 - ), # [batch, 16, 6, 6] - nn.ReLU(), - Flatten(), - ) - - self.fc1 = nn.Linear(h_dim, z_dim) - self.fc2 = nn.Linear(h_dim, z_dim) - self.fc3 = nn.Linear(z_dim, h_dim) - - self.decoder = nn.Sequential( - UnFlatten(), - nn.ConvTranspose2d(in_channels=16, out_channels=6, kernel_size=5, stride=2), - nn.ReLU(), - nn.ConvTranspose2d(in_channels=6, out_channels=3, kernel_size=4, stride=2), - nn.Tanh(), - ) - - def reparametrize(self, h): - """Reparametrization layer of VAE.""" - mu, logvar = self.fc1(h), self.fc2(h) - std = torch.exp(logvar / 2) - eps = torch.randn_like(std) - z = mu + std * eps - return z, mu, logvar - - def encode(self, x): - """Encoder of the VAE.""" - h = self.encoder(x) - z, mu, logvar = self.reparametrize(h) - return z, mu, logvar - - def decode(self, z): - """Decoder of the VAE.""" - z = self.fc3(z) - z = self.decoder(z) - return z - - def forward(self, x): - z, mu, logvar = self.encode(x) - z_decode = self.decode(z) - return z_decode, mu, logvar diff --git a/examples/pytorch-federated-variational-autoencoder/pyproject.toml b/examples/pytorch-federated-variational-autoencoder/pyproject.toml index bc1f85803682..ade08a639f2b 100644 --- a/examples/pytorch-federated-variational-autoencoder/pyproject.toml +++ b/examples/pytorch-federated-variational-autoencoder/pyproject.toml @@ -1,15 +1,36 @@ -[tool.poetry] -name = "pytorch_federated_variational_autoencoder" -version = "0.1.0" -description = "Federated Variational Autoencoder Example" -authors = ["The Flower Authors "] - -[tool.poetry.dependencies] -python = ">=3.8,<3.11" -flwr = ">=1.0,<2.0" -torch = "1.13.1" -torchvision = "0.14.1" - [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "fedvaeexample" +version = "1.0.0" +description = "Federated Variational Autoencoder Example with PyTorch and Flower" +license = "Apache-2.0" +dependencies = [ + "flwr[simulation]>=1.12.0", + "flwr-datasets[vision]>=0.3.0", + "torch==2.2.1", + "torchvision==0.17.1", +] + +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "fedvaeexample.server_app:app" +clientapp = "fedvaeexample.client_app:app" + +[tool.flwr.app.config] +num-server-rounds = 3 +local-epochs = 1 +learning-rate = 0.001 + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 2 diff --git a/examples/pytorch-federated-variational-autoencoder/requirements.txt b/examples/pytorch-federated-variational-autoencoder/requirements.txt deleted file mode 100644 index f3caddbc875e..000000000000 --- a/examples/pytorch-federated-variational-autoencoder/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -flwr>=1.0, <2.0 -torch==1.13.1 -torchvision==0.14.1 diff --git a/examples/pytorch-federated-variational-autoencoder/server.py b/examples/pytorch-federated-variational-autoencoder/server.py deleted file mode 100644 index 575ebb2235e2..000000000000 --- a/examples/pytorch-federated-variational-autoencoder/server.py +++ /dev/null @@ -1,12 +0,0 @@ -import flwr as fl - - -def main(): - fl.server.start_server( - server_address="0.0.0.0:8080", - config=fl.server.ServerConfig(num_rounds=3), - ) - - -if __name__ == "__main__": - main() diff --git a/examples/pytorch-from-centralized-to-federated/README.md b/examples/pytorch-from-centralized-to-federated/README.md index 06ee89dddcac..1bff7d02f52c 100644 --- a/examples/pytorch-from-centralized-to-federated/README.md +++ b/examples/pytorch-from-centralized-to-federated/README.md @@ -1,3 +1,9 @@ +--- +tags: [basic, vision, fds] +dataset: [CIFAR-10] +framework: [torch] +--- + # PyTorch: From Centralized To Federated This example demonstrates how an already existing centralized PyTorch-based machine learning project can be federated with Flower. diff --git a/examples/pytorch-from-centralized-to-federated/cifar.py b/examples/pytorch-from-centralized-to-federated/cifar.py index c592b63b0042..fdd3ea865c13 100644 --- a/examples/pytorch-from-centralized-to-federated/cifar.py +++ b/examples/pytorch-from-centralized-to-federated/cifar.py @@ -15,11 +15,10 @@ import torch import torch.nn as nn import torch.nn.functional as F +from flwr_datasets import FederatedDataset from torch import Tensor from torch.utils.data import DataLoader -from torchvision.transforms import Compose, ToTensor, Normalize - -from flwr_datasets import FederatedDataset +from torchvision.transforms import Compose, Normalize, ToTensor # pylint: disable=unsubscriptable-object diff --git a/examples/pytorch-from-centralized-to-federated/client.py b/examples/pytorch-from-centralized-to-federated/client.py index 9df4739e0aab..845f7ae2b5b2 100644 --- a/examples/pytorch-from-centralized-to-federated/client.py +++ b/examples/pytorch-from-centralized-to-federated/client.py @@ -4,14 +4,13 @@ from collections import OrderedDict from typing import Dict, List, Tuple +import cifar +import flwr as fl import numpy as np import torch from datasets.utils.logging import disable_progress_bar from torch.utils.data import DataLoader -import cifar -import flwr as fl - disable_progress_bar() diff --git a/examples/pytorch-from-centralized-to-federated/pyproject.toml b/examples/pytorch-from-centralized-to-federated/pyproject.toml index 3d1559e3a515..57a8082fd6bf 100644 --- a/examples/pytorch-from-centralized-to-federated/pyproject.toml +++ b/examples/pytorch-from-centralized-to-federated/pyproject.toml @@ -9,7 +9,7 @@ description = "PyTorch: From Centralized To Federated with Flower" authors = ["The Flower Authors "] [tool.poetry.dependencies] -python = ">=3.8,<3.11" +python = ">=3.9,<3.11" flwr = ">=1.0,<2.0" flwr-datasets = { extras = ["vision"], version = ">=0.0.2,<1.0.0" } torch = "1.13.1" diff --git a/examples/quickstart-cpp/README.md b/examples/quickstart-cpp/README.md index d6cbeebe1bc6..61b76ece52b0 100644 --- a/examples/quickstart-cpp/README.md +++ b/examples/quickstart-cpp/README.md @@ -1,3 +1,9 @@ +--- +tags: [quickstart, linear regression, tabular] +dataset: [Synthetic] +framework: [C++] +--- + # Flower Clients in C++ (under development) In this example you will train a linear model on synthetic data using C++ clients. diff --git a/examples/quickstart-cpp/fedavg_cpp.py b/examples/quickstart-cpp/fedavg_cpp.py index cd62d07bb848..3488bfb1f2e4 100644 --- a/examples/quickstart-cpp/fedavg_cpp.py +++ b/examples/quickstart-cpp/fedavg_cpp.py @@ -3,15 +3,9 @@ from typing import Callable, Dict, List, Optional, Tuple, Union import numpy as np -from flwr.server.strategy import FedAvg -from flwr.common import ( - EvaluateRes, - FitRes, - Parameters, - Scalar, - NDArrays, -) +from flwr.common import EvaluateRes, FitRes, NDArrays, Parameters, Scalar from flwr.server.client_proxy import ClientProxy +from flwr.server.strategy import FedAvg from flwr.server.strategy.aggregate import aggregate, weighted_loss_avg diff --git a/examples/quickstart-fastai/.gitignore b/examples/quickstart-fastai/.gitignore deleted file mode 100644 index fa6560829782..000000000000 --- a/examples/quickstart-fastai/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.ipynb diff --git a/examples/quickstart-fastai/README.md b/examples/quickstart-fastai/README.md index 38ef23c95a1e..977529914e9a 100644 --- a/examples/quickstart-fastai/README.md +++ b/examples/quickstart-fastai/README.md @@ -1,74 +1,64 @@ -# Flower Example using fastai +--- +tags: [quickstart, vision] +dataset: [MNIST] +framework: [fastai] +--- -This introductory example to Flower uses [fastai](https://www.fast.ai/), but deep knowledge of fastai is not necessarily required to run the example. However, it will help you understand how to adapt Flower to your use case. -Running this example in itself is quite easy. +# Federated Learning with fastai and Flower (Quickstart Example) -## Project Setup +This introductory example to Flower uses [fastai](https://www.fast.ai/), but deep knowledge of fastai is not necessarily required to run the example. The example will help you understand how to adapt Flower to your specific use case, and running it is quite straightforward. -Start by cloning the example project. We prepared a single-line command that you can copy into your shell which will checkout the example for you: +fastai is a deep learning library built on PyTorch which provides practitioners with high-level components for building deep learning projects. In this example, we will train a [SqueezeNet v1.1](https://github.com/forresti/SqueezeNet/tree/master/SqueezeNet_v1.1) model on the [MNIST](https://huggingface.co/datasets/ylecun/mnist) dataset. The data will be downloaded and partitioned using [Flower Datasets](https://flower.ai/docs/datasets/). -```shell -git clone --depth=1 https://github.com/adap/flower.git _tmp && mv _tmp/examples/quickstart-fastai . && rm -rf _tmp && cd quickstart-fastai -``` +## Set up the project -This will create a new directory called `quickstart-fastai` containing the following files: +### Clone the project -```shell --- pyproject.toml --- requirements.txt --- client.py --- server.py --- run.sh --- README.md -``` - -### Installing Dependencies - -Project dependencies (such as `fastai` and `flwr`) are defined in `pyproject.toml` and `requirements.txt`. We recommend [Poetry](https://python-poetry.org/docs/) to install those dependencies and manage your virtual environment ([Poetry installation](https://python-poetry.org/docs/#installation)) or [pip](https://pip.pypa.io/en/latest/development/), but feel free to use a different way of installing dependencies and managing virtual environments if you have other preferences. - -#### Poetry +Start by cloning the example project: ```shell -poetry install -poetry shell +git clone --depth=1 https://github.com/adap/flower.git _tmp \ + && mv _tmp/examples/quickstart-fastai . \ + && rm -rf _tmp && cd quickstart-fastai ``` -Poetry will install all your dependencies in a newly created virtual environment. To verify that everything works correctly you can run the following command: +This will create a new directory called `quickstart-fastai` containing the following files: ```shell -poetry run python3 -c "import flwr" +quickstart-fastai +├── fastai_example +│ ├── client_app.py # Defines your ClientApp +│ ├── server_app.py # Defines your ServerApp +│ └── task.py # Defines your model, training and data loading +├── pyproject.toml # Project metadata like dependencies and configs +└── README.md ``` -If you don't see any errors you're good to go! +### Install dependencies and project -#### pip +Install the dependencies defined in `pyproject.toml` as well as the `fastai_example` package. -Write the command below in your terminal to install the dependencies according to the configuration file requirements.txt. - -```shell -pip install -r requirements.txt +```bash +pip install -e . ``` -## Run Federated Learning with fastai and Flower - -Afterwards you are ready to start the Flower server as well as the clients. You can simply start the server in a terminal as follows: +## Run the project -```shell -python3 server.py -``` +You can run your Flower project in both _simulation_ and _deployment_ mode without making changes to the code. If you are starting with Flower, we recommend you using the _simulation_ mode as it requires fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine. -Now you are ready to start the Flower clients which will participate in the learning. To do so simply open two more terminal windows and run the following commands. +### Run with the Simulation Engine -Start client 1 in the first terminal: - -```shell -python3 client.py +```bash +flwr run . ``` -Start client 2 in the second terminal: +You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: -```shell -python3 client.py +```bash +flwr run . --run-config num-server-rounds=5 ``` -You will see that fastai is starting a federated training. For a more in-depth look, be sure to check out the code on our [repo](https://github.com/adap/flower/tree/main/examples/quickstart-fastai). +### Run with the Deployment Engine + +> \[!NOTE\] +> An update to this example will show how to run this Flower application with the Deployment Engine and TLS certificates, or with Docker. diff --git a/examples/quickstart-fastai/client.py b/examples/quickstart-fastai/client.py deleted file mode 100644 index 6bb2a751d544..000000000000 --- a/examples/quickstart-fastai/client.py +++ /dev/null @@ -1,49 +0,0 @@ -import warnings -from collections import OrderedDict - -import torch -from fastai.vision.all import * - -import flwr as fl - - -warnings.filterwarnings("ignore", category=UserWarning) - -# Download MNIST dataset -path = untar_data(URLs.MNIST) - -# Load dataset -dls = ImageDataLoaders.from_folder( - path, valid_pct=0.5, train="training", valid="testing", num_workers=0 -) - -# Define model -learn = vision_learner(dls, squeezenet1_1, metrics=error_rate) - - -# Define Flower client -class FlowerClient(fl.client.NumPyClient): - def get_parameters(self, config): - return [val.cpu().numpy() for _, val in learn.model.state_dict().items()] - - def set_parameters(self, parameters): - params_dict = zip(learn.model.state_dict().keys(), parameters) - state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) - learn.model.load_state_dict(state_dict, strict=True) - - def fit(self, parameters, config): - self.set_parameters(parameters) - learn.fit(1) - return self.get_parameters(config={}), len(dls.train), {} - - def evaluate(self, parameters, config): - self.set_parameters(parameters) - loss, error_rate = learn.validate() - return loss, len(dls.valid), {"accuracy": 1 - error_rate} - - -# Start Flower client -fl.client.start_client( - server_address="127.0.0.1:8080", - client=FlowerClient().to_client(), -) diff --git a/examples/quickstart-fastai/fastai_example/__init__.py b/examples/quickstart-fastai/fastai_example/__init__.py new file mode 100644 index 000000000000..14ef80393289 --- /dev/null +++ b/examples/quickstart-fastai/fastai_example/__init__.py @@ -0,0 +1 @@ +"""fastai_example: A Flower / Fastai app.""" diff --git a/examples/quickstart-fastai/fastai_example/client_app.py b/examples/quickstart-fastai/fastai_example/client_app.py new file mode 100644 index 000000000000..0094d6fc9f56 --- /dev/null +++ b/examples/quickstart-fastai/fastai_example/client_app.py @@ -0,0 +1,56 @@ +"""fastai_example: A Flower / Fastai app.""" + +import warnings +from typing import Any + +from fastai.learner import Learner +from fastai.losses import CrossEntropyLossFlat +from fastai.vision.all import error_rate, squeezenet1_1 +from fastai.vision.data import DataLoaders +from flwr.client import Client, ClientApp, NumPyClient +from flwr.common import Context + +from fastai_example.task import get_params, load_data, set_params + +warnings.filterwarnings("ignore", category=UserWarning) + + +# Define Flower client +class FlowerClient(NumPyClient): + def __init__(self, learn, dls) -> None: + self.learn = learn + self.dls = dls + + def fit(self, parameters, config) -> tuple[list, int, dict]: + set_params(self.learn.model, parameters) + with self.learn.no_bar(), self.learn.no_logging(): + self.learn.fit(1) + return get_params(self.learn.model), len(self.dls.train), {} + + def evaluate(self, parameters, config) -> tuple[Any, int, dict[str, Any]]: + set_params(self.learn.model, parameters) + with self.learn.no_bar(), self.learn.no_logging(): + loss, error_rate = self.learn.validate() + return loss, len(self.dls.valid), {"accuracy": 1 - error_rate} + + +def client_fn(context: Context) -> Client: + """Construct a Client that will be run in a ClientApp.""" + + # Read the node_config to fetch data partition associated to this node + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + + trainloader, valloader, _ = load_data(partition_id, num_partitions) + dls = DataLoaders(trainloader, valloader) + model = squeezenet1_1() + learn = Learner( + dls, + model, + loss_func=CrossEntropyLossFlat(), + metrics=error_rate, + ) + return FlowerClient(learn, dls).to_client() + + +app = ClientApp(client_fn=client_fn) diff --git a/examples/quickstart-fastai/fastai_example/server_app.py b/examples/quickstart-fastai/fastai_example/server_app.py new file mode 100644 index 000000000000..0f61319c54b0 --- /dev/null +++ b/examples/quickstart-fastai/fastai_example/server_app.py @@ -0,0 +1,47 @@ +"""fastai_example: A Flower / Fastai app.""" + +from typing import List, Tuple + +from fastai.vision.all import squeezenet1_1 +from flwr.common import Context, Metrics, ndarrays_to_parameters +from flwr.server import ServerApp, ServerAppComponents, ServerConfig +from flwr.server.strategy import FedAvg + +from fastai_example.task import get_params + + +# Define metric aggregation function +def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: + """Compute weighted average metric values.""" + # Multiply accuracy of each client by number of examples used + accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] + examples = [num_examples for num_examples, _ in metrics] + + # Aggregate and return custom metric (weighted average) + return {"accuracy": sum(accuracies) / sum(examples)} + + +def server_fn(context: Context) -> ServerAppComponents: + """Construct components for ServerApp.""" + + # Let's define the global model and pass it to the strategy + # Note this is optional. + parameters = ndarrays_to_parameters(get_params(squeezenet1_1())) + + # Define strategy + fraction_fit = context.run_config["fraction-fit"] + strategy = FedAvg( + fraction_fit=fraction_fit, + fraction_evaluate=0.5, + evaluate_metrics_aggregation_fn=weighted_average, + initial_parameters=parameters, + ) + + # Construct ServerConfig + num_rounds = context.run_config["num-server-rounds"] + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(config=config, strategy=strategy) + + +app = ServerApp(server_fn=server_fn) diff --git a/examples/quickstart-fastai/fastai_example/task.py b/examples/quickstart-fastai/fastai_example/task.py new file mode 100644 index 000000000000..397ba0422c36 --- /dev/null +++ b/examples/quickstart-fastai/fastai_example/task.py @@ -0,0 +1,82 @@ +"""fastai_example: A Flower / Fastai app.""" + +from collections import OrderedDict + +import torch +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner +from torch.utils.data import DataLoader +from torchvision.transforms import Compose, Lambda, Resize, ToTensor + +fds = None # Cache FederatedDataset + + +def load_data( + partition_id, + num_partitions, +) -> tuple[DataLoader, DataLoader, DataLoader]: + # Only initialize `FederatedDataset` once + global fds + if fds is None: + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="ylecun/mnist", + partitioners={"train": partitioner}, + trust_remote_code=True, + ) + partition = fds.load_partition(partition_id, "train") + + # Resize and repeat channels to use MNIST, which have grayscale images, + # with squeezenet, which expects 3 channels. + # Ref: https://discuss.pytorch.org/t/fine-tuning-squeezenet-for-mnist-dataset/31221/2 + pytorch_transforms = Compose( + [Resize(224), ToTensor(), Lambda(lambda x: x.expand(3, -1, -1))] + ) + + def apply_transforms(batch): + """Apply transforms to the partition from FederatedDataset.""" + batch["image"] = [pytorch_transforms(img) for img in batch["image"]] + return batch + + def collate_fn(batch): + """Change the dictionary to tuple to keep the exact dataloader behavior.""" + images = [item["image"] for item in batch] + labels = [item["label"] for item in batch] + + images_tensor = torch.stack(images) + labels_tensor = torch.tensor(labels) + + return images_tensor, labels_tensor + + partition = partition.with_transform(apply_transforms) + # 20 % for on federated evaluation + partition_full = partition.train_test_split(test_size=0.2, seed=42) + # 60 % for the federated train and 20 % for the federated validation (both in fit) + partition_train_valid = partition_full["train"].train_test_split( + train_size=0.75, seed=42 + ) + trainloader = DataLoader( + partition_train_valid["train"], + batch_size=32, + shuffle=True, + collate_fn=collate_fn, + ) + valloader = DataLoader( + partition_train_valid["test"], + batch_size=32, + collate_fn=collate_fn, + ) + testloader = DataLoader( + partition_full["test"], batch_size=32, collate_fn=collate_fn, num_workers=1 + ) + return trainloader, valloader, testloader + + +def get_params(model) -> list: + return [val.cpu().numpy() for _, val in model.state_dict().items()] + + +def set_params(model, parameters) -> None: + params_dict = zip(model.state_dict().keys(), parameters) + state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) + model.load_state_dict(state_dict, strict=True) diff --git a/examples/quickstart-fastai/pyproject.toml b/examples/quickstart-fastai/pyproject.toml index 19a25291a6af..34b817f84e41 100644 --- a/examples/quickstart-fastai/pyproject.toml +++ b/examples/quickstart-fastai/pyproject.toml @@ -1,16 +1,36 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry] -name = "quickstart-fastai" -version = "0.1.0" -description = "Fastai Federated Learning Quickstart with Flower" -authors = ["The Flower Authors "] +[project] +name = "fastai_example" +version = "1.0.0" +description = "Federated Learning with Fastai and Flower (Quickstart Example)" +license = "Apache-2.0" +dependencies = [ + "flwr[simulation]>=1.12.0", + "flwr-datasets[vision]>=0.3.0", + "fastai==2.7.14", + "torch==2.2.0", + "torchvision==0.17.0", +] -[tool.poetry.dependencies] -python = ">=3.8,<3.11" -flwr = ">=1.0,<2.0" -fastai = "2.7.14" -torch = "2.2.0" -torchvision = "0.17.0" +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "fastai_example.server_app:app" +clientapp = "fastai_example.client_app:app" + +[tool.flwr.app.config] +num-server-rounds = 3 +fraction-fit = 0.5 + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 10 diff --git a/examples/quickstart-fastai/requirements.txt b/examples/quickstart-fastai/requirements.txt deleted file mode 100644 index 9c6e8d77293a..000000000000 --- a/examples/quickstart-fastai/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -flwr>=1.0, <2.0 -fastai==2.7.14 -torch==2.2.0 -torchvision==0.17.0 diff --git a/examples/quickstart-fastai/run.sh b/examples/quickstart-fastai/run.sh deleted file mode 100755 index 3def34c9fcaa..000000000000 --- a/examples/quickstart-fastai/run.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -set -e -cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/ - -echo "Starting server" -python server.py & -sleep 3 # Sleep for 3s to give the server enough time to start - -for i in `seq 0 1`; do - echo "Starting client $i" - python client.py & -done - -# Enable CTRL+C to stop all background processes -trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM -# Wait for all background processes to complete -wait diff --git a/examples/quickstart-fastai/server.py b/examples/quickstart-fastai/server.py deleted file mode 100644 index fe691a88aba0..000000000000 --- a/examples/quickstart-fastai/server.py +++ /dev/null @@ -1,25 +0,0 @@ -from typing import List, Tuple - -import flwr as fl -from flwr.common import Metrics - - -# Define metric aggregation function -def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: - # Multiply accuracy of each client by number of examples used - accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] - examples = [num_examples for num_examples, _ in metrics] - - # Aggregate and return custom metric (weighted average) - return {"accuracy": sum(accuracies) / sum(examples)} - - -# Define strategy -strategy = fl.server.strategy.FedAvg(evaluate_metrics_aggregation_fn=weighted_average) - -# Start Flower server -fl.server.start_server( - server_address="0.0.0.0:8080", - config=fl.server.ServerConfig(num_rounds=3), - strategy=strategy, -) diff --git a/examples/quickstart-huggingface/README.md b/examples/quickstart-huggingface/README.md index ce7790cd4af5..124689441656 100644 --- a/examples/quickstart-huggingface/README.md +++ b/examples/quickstart-huggingface/README.md @@ -1,74 +1,79 @@ -# Federated HuggingFace Transformers using Flower and PyTorch +--- +tags: [quickstart, llm, nlp, sentiment] +dataset: [IMDB] +framework: [transformers] +--- -This introductory example to using [HuggingFace](https://huggingface.co) Transformers with Flower with PyTorch. This example has been extended from the [quickstart-pytorch](https://flower.ai/docs/examples/quickstart-pytorch.html) example. The training script closely follows the [HuggingFace course](https://huggingface.co/course/chapter3?fw=pt), so you are encouraged to check that out for a detailed explanation of the transformer pipeline. +# Federated Learning with HuggingFace Transformers and Flower (Quickstart Example) -Like `quickstart-pytorch`, running this example in itself is also meant to be quite easy. +This introductory example to using [🤗Transformers](https://huggingface.co/docs/transformers/en/index) with Flower. The training script closely follows the [HuggingFace course](https://huggingface.co/course/chapter3?fw=pt), so you are encouraged to check that out for a detailed explanation of the transformer pipeline. -## Project Setup +In this example, we will federated the training of a [BERT-tiny](https://huggingface.co/prajjwal1/bert-tiny) modle on the [IMDB](https://huggingface.co/datasets/stanfordnlp/imdb) dataset. The data will be downloaded and partitioned using [Flower Datasets](https://flower.ai/docs/datasets/). This example runs best when a GPU is available. + +## Set up the project + +### Clone the project Start by cloning the example project. We prepared a single-line command that you can copy into your shell which will checkout the example for you: ```shell -git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/quickstart-huggingface . && rm -rf flower && cd quickstart-huggingface +git clone --depth=1 https://github.com/adap/flower.git _tmp \ + && mv _tmp/examples/quickstart-huggingface . \ + && rm -rf _tmp && cd quickstart-huggingface ``` This will create a new directory called `quickstart-huggingface` containing the following files: ```shell --- pyproject.toml --- requirements.txt --- client.py --- server.py --- README.md +quickstart-huggingface +├── huggingface_example +│ ├── __init__.py +│ ├── client_app.py # Defines your ClientApp +│ ├── server_app.py # Defines your ServerApp +│ └── task.py # Defines your model, training and data loading +├── pyproject.toml # Project metadata like dependencies and configs +└── README.md ``` -### Installing Dependencies +### Install dependencies and project -Project dependencies (such as `torch` and `flwr`) are defined in `pyproject.toml` and `requirements.txt`. We recommend [Poetry](https://python-poetry.org/docs/) to install those dependencies and manage your virtual environment ([Poetry installation](https://python-poetry.org/docs/#installation)) or [pip](https://pip.pypa.io/en/latest/development/), but feel free to use a different way of installing dependencies and managing virtual environments if you have other preferences. +Install the dependencies defined in `pyproject.toml` as well as the `huggingface_example` package. -#### Poetry - -```shell -poetry install -poetry shell +```bash +pip install -e . ``` -Poetry will install all your dependencies in a newly created virtual environment. To verify that everything works correctly you can run the following command: - -```shell -poetry run python3 -c "import flwr" -``` +## Run the Example -If you don't see any errors you're good to go! +You can run your Flower project in both _simulation_ and _deployment_ mode without making changes to the code. If you are starting with Flower, we recommend you using the _simulation_ mode as it requires fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine. -#### pip +### Run with the Simulation Engine -Write the command below in your terminal to install the dependencies according to the configuration file requirements.txt. +> \[!TIP\] +> This example runs faster when the `ClientApp`s have access to a GPU. If your system has one, you can make use of it by configuring the `backend.client-resources` component in `pyproject.toml`. If you want to try running the example with GPU right away, use the `local-simulation-gpu` federation as shown below. -```shell -pip install -r requirements.txt +```bash +# Run with the default federation (CPU only) +flwr run . ``` -## Run Federated Learning with Flower - -Afterwards you are ready to start the Flower server as well as the clients. You can simply start the server in a terminal as follows: +Run the project in the `local-simulation-gpu` federation that gives CPU and GPU resources to each `ClientApp`. By default, at most 4x`ClientApp` (using ~1 GB of VRAM each) will run in parallel in each available GPU. Note you can adjust the degree of paralellism but modifying the `client-resources` specification. -```shell -python3 server.py +```bash +# Run with the `local-simulation-gpu` federation +flwr run . local-simulation-gpu ``` -Now you are ready to start the Flower clients which will participate in the learning. To do so simply open two more terminal windows and run the following commands. - -Start client 1 in the first terminal: +You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example -```shell -python3 client.py --partition-id 0 +```bash +flwr run --run-config "num-server-rounds=5 fraction-fit=0.1" ``` -Start client 2 in the second terminal: +> \[!TIP\] +> For a more detailed walk-through check our [quickstart 🤗Transformers tutorial](https://flower.ai/docs/framework/tutorial-quickstart-huggingface.html) -```shell -python3 client.py --partition-id 1 -``` +### Run with the Deployment Engine -You will see that PyTorch is starting a federated training. +> \[!NOTE\] +> An update to this example will show how to run this Flower project with the Deployment Engine and TLS certificates, or with Docker. diff --git a/examples/quickstart-huggingface/client.py b/examples/quickstart-huggingface/client.py deleted file mode 100644 index a9d48bfa8f13..000000000000 --- a/examples/quickstart-huggingface/client.py +++ /dev/null @@ -1,127 +0,0 @@ -import argparse -import warnings -from collections import OrderedDict - -import flwr as fl -import torch -from evaluate import load as load_metric -from torch.optim import AdamW -from torch.utils.data import DataLoader -from transformers import AutoModelForSequenceClassification -from transformers import AutoTokenizer, DataCollatorWithPadding - -from flwr_datasets import FederatedDataset - -warnings.filterwarnings("ignore", category=UserWarning) -DEVICE = torch.device("cpu") -CHECKPOINT = "distilbert-base-uncased" # transformer model checkpoint - - -def load_data(partition_id): - """Load IMDB data (training and eval)""" - fds = FederatedDataset(dataset="imdb", partitioners={"train": 1_000}) - partition = fds.load_partition(partition_id) - # Divide data: 80% train, 20% test - partition_train_test = partition.train_test_split(test_size=0.2, seed=42) - - tokenizer = AutoTokenizer.from_pretrained(CHECKPOINT, model_max_length=512) - - def tokenize_function(examples): - return tokenizer(examples["text"], truncation=True) - - partition_train_test = partition_train_test.map(tokenize_function, batched=True) - partition_train_test = partition_train_test.remove_columns("text") - partition_train_test = partition_train_test.rename_column("label", "labels") - - data_collator = DataCollatorWithPadding(tokenizer=tokenizer) - trainloader = DataLoader( - partition_train_test["train"], - shuffle=True, - batch_size=32, - collate_fn=data_collator, - ) - - testloader = DataLoader( - partition_train_test["test"], batch_size=32, collate_fn=data_collator - ) - - return trainloader, testloader - - -def train(net, trainloader, epochs): - optimizer = AdamW(net.parameters(), lr=5e-5) - net.train() - for _ in range(epochs): - for batch in trainloader: - batch = {k: v.to(DEVICE) for k, v in batch.items()} - outputs = net(**batch) - loss = outputs.loss - loss.backward() - optimizer.step() - optimizer.zero_grad() - - -def test(net, testloader): - metric = load_metric("accuracy") - loss = 0 - net.eval() - for batch in testloader: - batch = {k: v.to(DEVICE) for k, v in batch.items()} - with torch.no_grad(): - outputs = net(**batch) - logits = outputs.logits - loss += outputs.loss.item() - predictions = torch.argmax(logits, dim=-1) - metric.add_batch(predictions=predictions, references=batch["labels"]) - loss /= len(testloader.dataset) - accuracy = metric.compute()["accuracy"] - return loss, accuracy - - -def main(partition_id): - net = AutoModelForSequenceClassification.from_pretrained( - CHECKPOINT, num_labels=2 - ).to(DEVICE) - - trainloader, testloader = load_data(partition_id) - - # Flower client - class IMDBClient(fl.client.NumPyClient): - def get_parameters(self, config): - return [val.cpu().numpy() for _, val in net.state_dict().items()] - - def set_parameters(self, parameters): - params_dict = zip(net.state_dict().keys(), parameters) - state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict}) - net.load_state_dict(state_dict, strict=True) - - def fit(self, parameters, config): - self.set_parameters(parameters) - print("Training Started...") - train(net, trainloader, epochs=1) - print("Training Finished.") - return self.get_parameters(config={}), len(trainloader), {} - - def evaluate(self, parameters, config): - self.set_parameters(parameters) - loss, accuracy = test(net, testloader) - return float(loss), len(testloader), {"accuracy": float(accuracy)} - - # Start client - fl.client.start_client( - server_address="127.0.0.1:8080", client=IMDBClient().to_client() - ) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Flower") - parser.add_argument( - "--partition-id", - choices=list(range(1_000)), - required=True, - type=int, - help="Partition of the dataset divided into 1,000 iid partitions created " - "artificially.", - ) - partition_id = parser.parse_args().partition_id - main(partition_id) diff --git a/examples/quickstart-huggingface/huggingface_example/__init__.py b/examples/quickstart-huggingface/huggingface_example/__init__.py new file mode 100644 index 000000000000..6d897650c6bf --- /dev/null +++ b/examples/quickstart-huggingface/huggingface_example/__init__.py @@ -0,0 +1 @@ +"""huggingface_example: A Flower / Hugging Face app.""" diff --git a/examples/quickstart-huggingface/huggingface_example/client_app.py b/examples/quickstart-huggingface/huggingface_example/client_app.py new file mode 100644 index 000000000000..8989e52281ad --- /dev/null +++ b/examples/quickstart-huggingface/huggingface_example/client_app.py @@ -0,0 +1,58 @@ +"""huggingface_example: A Flower / Hugging Face app.""" + +import warnings + +import torch +from flwr.client import Client, ClientApp, NumPyClient +from flwr.common import Context +from transformers import logging +from huggingface_example.task import ( + train, + test, + load_data, + set_params, + get_params, + get_model, +) + +warnings.filterwarnings("ignore", category=FutureWarning) + +# To mute warnings reminding that we need to train the model to a downstream task +# This is something this example does. +logging.set_verbosity_error() + + +# Flower client +class IMDBClient(NumPyClient): + def __init__(self, model_name, trainloader, testloader) -> None: + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + self.trainloader = trainloader + self.testloader = testloader + self.net = get_model(model_name) + self.net.to(self.device) + + def fit(self, parameters, config) -> tuple[list, int, dict]: + set_params(self.net, parameters) + train(self.net, self.trainloader, epochs=1, device=self.device) + return get_params(self.net), len(self.trainloader), {} + + def evaluate(self, parameters, config) -> tuple[float, int, dict[str, float]]: + set_params(self.net, parameters) + loss, accuracy = test(self.net, self.testloader, device=self.device) + return float(loss), len(self.testloader), {"accuracy": float(accuracy)} + + +def client_fn(context: Context) -> Client: + """Construct a Client that will be run in a ClientApp.""" + # Read the node_config to fetch data partition associated to this node + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + + # Read the run config to get settings to configure the Client + model_name = context.run_config["model-name"] + trainloader, testloader = load_data(partition_id, num_partitions, model_name) + + return IMDBClient(model_name, trainloader, testloader).to_client() + + +app = ClientApp(client_fn=client_fn) diff --git a/examples/quickstart-huggingface/huggingface_example/server_app.py b/examples/quickstart-huggingface/huggingface_example/server_app.py new file mode 100644 index 000000000000..d0db1b43fa36 --- /dev/null +++ b/examples/quickstart-huggingface/huggingface_example/server_app.py @@ -0,0 +1,33 @@ +"""huggingface_example: A Flower / Hugging Face app.""" + +from flwr.common import Context, ndarrays_to_parameters +from flwr.server import ServerApp, ServerAppComponents, ServerConfig +from flwr.server.strategy import FedAvg + +from huggingface_example.task import get_params, get_model + + +def server_fn(context: Context) -> ServerAppComponents: + """Construct components for ServerApp.""" + # Construct ServerConfig + num_rounds = context.run_config["num-server-rounds"] + config = ServerConfig(num_rounds=num_rounds) + + # Set global model initialization + model_name = context.run_config["model-name"] + ndarrays = get_params(get_model(model_name)) + global_model_init = ndarrays_to_parameters(ndarrays) + + # Define strategy + fraction_fit = context.run_config["fraction-fit"] + fraction_evaluate = context.run_config["fraction-evaluate"] + strategy = FedAvg( + fraction_fit=fraction_fit, + fraction_evaluate=fraction_evaluate, + initial_parameters=global_model_init, + ) + + return ServerAppComponents(config=config, strategy=strategy) + + +app = ServerApp(server_fn=server_fn) diff --git a/examples/quickstart-huggingface/huggingface_example/task.py b/examples/quickstart-huggingface/huggingface_example/task.py new file mode 100644 index 000000000000..1c5b8d087dca --- /dev/null +++ b/examples/quickstart-huggingface/huggingface_example/task.py @@ -0,0 +1,105 @@ +"""huggingface_example: A Flower / Hugging Face app.""" + +from typing import Any +from collections import OrderedDict + +import torch +from evaluate import load as load_metric +from torch.optim import AdamW +from torch.utils.data import DataLoader +from transformers import ( + AutoTokenizer, + DataCollatorWithPadding, + AutoModelForSequenceClassification, +) +from datasets.utils.logging import disable_progress_bar +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner + + +disable_progress_bar() +fds = None # Cache FederatedDataset + + +def load_data( + partition_id: int, num_partitions: int, model_name: str +) -> tuple[DataLoader[Any], DataLoader[Any]]: + """Load IMDB data (training and eval)""" + # Only initialize `FederatedDataset` once + global fds + if fds is None: + # Partition the IMDB dataset into N partitions + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="stanfordnlp/imdb", partitioners={"train": partitioner} + ) + partition = fds.load_partition(partition_id) + # Divide data: 80% train, 20% test + partition_train_test = partition.train_test_split(test_size=0.2, seed=42) + + tokenizer = AutoTokenizer.from_pretrained(model_name, model_max_length=512) + + def tokenize_function(examples): + return tokenizer(examples["text"], truncation=True, add_special_tokens=True) + + partition_train_test = partition_train_test.map(tokenize_function, batched=True) + partition_train_test = partition_train_test.remove_columns("text") + partition_train_test = partition_train_test.rename_column("label", "labels") + + data_collator = DataCollatorWithPadding(tokenizer=tokenizer) + trainloader = DataLoader( + partition_train_test["train"], + shuffle=True, + batch_size=32, + collate_fn=data_collator, + ) + + testloader = DataLoader( + partition_train_test["test"], batch_size=32, collate_fn=data_collator + ) + + return trainloader, testloader + + +def get_model(model_name): + return AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=2) + + +def get_params(model): + return [val.cpu().numpy() for _, val in model.state_dict().items()] + + +def set_params(model, parameters) -> None: + params_dict = zip(model.state_dict().keys(), parameters) + state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict}) + model.load_state_dict(state_dict, strict=True) + + +def train(net, trainloader, epochs, device) -> None: + optimizer = AdamW(net.parameters(), lr=5e-5) + net.train() + for _ in range(epochs): + for batch in trainloader: + batch = {k: v.to(device) for k, v in batch.items()} + outputs = net(**batch) + loss = outputs.loss + loss.backward() + optimizer.step() + optimizer.zero_grad() + + +def test(net, testloader, device) -> tuple[Any | float, Any]: + metric = load_metric("accuracy") + loss = 0 + net.eval() + for batch in testloader: + batch = {k: v.to(device) for k, v in batch.items()} + with torch.no_grad(): + outputs = net(**batch) + logits = outputs.logits + loss += outputs.loss.item() + predictions = torch.argmax(logits, dim=-1) + metric.add_batch(predictions=predictions, references=batch["labels"]) + loss /= len(testloader.dataset) + accuracy = metric.compute()["accuracy"] + return loss, accuracy diff --git a/examples/quickstart-huggingface/pyproject.toml b/examples/quickstart-huggingface/pyproject.toml index 2b46804d7b45..f13c48d96cae 100644 --- a/examples/quickstart-huggingface/pyproject.toml +++ b/examples/quickstart-huggingface/pyproject.toml @@ -1,22 +1,49 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry] -name = "quickstart-huggingface" -version = "0.1.0" -description = "Hugging Face Transformers Federated Learning Quickstart with Flower" +[project] +name = "huggingface_example" +version = "1.0.0" +description = "Federated Learning with Hugginface Transformers and Flower (Quickstart Example)" +license = "Apache-2.0" authors = [ - "The Flower Authors ", - "Kaushik Amar Das ", + { name = "The Flower Authors", email = "hello@flower.ai" }, + { name = "Kaushik Amar Das", email = "kaushik.das@iiitg.ac.in" }, ] +dependencies = [ + "flwr[simulation]==1.12.0", + "flwr-datasets>=0.3.0", + "torch==2.4.0", + "transformers>=4.30.0,<5.0", + "evaluate>=0.4.0,<1.0", + "datasets>=2.0.0, <3.0", + "scikit-learn>=1.3.1, <2.0", +] + +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "huggingface_example.server_app:app" +clientapp = "huggingface_example.client_app:app" + +[tool.flwr.app.config] +num-server-rounds = 3 +model-name = "prajjwal1/bert-tiny" +fraction-fit = 0.05 +fraction-evaluate = 0.1 + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 100 -[tool.poetry.dependencies] -python = ">=3.8,<3.11" -flwr = ">=1.0,<2.0" -flwr-datasets = ">=0.0.2,<1.0.0" -torch = ">=1.13.1,<2.0" -transformers = ">=4.30.0,<5.0" -evaluate = ">=0.4.0,<1.0" -datasets = ">=2.0.0, <3.0" -scikit-learn = ">=1.3.1, <2.0" +[tool.flwr.federations.local-simulation-gpu] +options.num-supernodes = 100 +options.backend.client-resources.num-cpus = 4 # each ClientApp assumes to use 4CPUs +options.backend.client-resources.num-gpus = 0.25 # at most 4 ClientApp will run in a given GPU (lower it to increase parallelism) diff --git a/examples/quickstart-huggingface/requirements.txt b/examples/quickstart-huggingface/requirements.txt deleted file mode 100644 index 3cd5735625ba..000000000000 --- a/examples/quickstart-huggingface/requirements.txt +++ /dev/null @@ -1,7 +0,0 @@ -flwr>=1.0, <2.0 -flwr-datasets>=0.0.2, <1.0.0 -torch>=1.13.1, <2.0 -transformers>=4.30.0, <5.0 -evaluate>=0.4.0, <1.0 -datasets>=2.0.0, <3.0 -scikit-learn>=1.3.1, <2.0 diff --git a/examples/quickstart-huggingface/run.sh b/examples/quickstart-huggingface/run.sh deleted file mode 100755 index fa989eab1471..000000000000 --- a/examples/quickstart-huggingface/run.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash - -echo "Starting server" -python server.py & -sleep 3 # Sleep for 3s to give the server enough time to start - -for i in `seq 0 1`; do - echo "Starting client $i" - python client.py --partition-id ${i}& -done - -# This will allow you to use CTRL+C to stop all background processes -trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM -# Wait for all background processes to complete -wait diff --git a/examples/quickstart-huggingface/server.py b/examples/quickstart-huggingface/server.py deleted file mode 100644 index aab87982076e..000000000000 --- a/examples/quickstart-huggingface/server.py +++ /dev/null @@ -1,16 +0,0 @@ -import flwr as fl - - -if __name__ == "__main__": - # Define strategy - strategy = fl.server.strategy.FedAvg( - fraction_fit=1.0, - fraction_evaluate=1.0, - ) - - # Start server - fl.server.start_server( - server_address="0.0.0.0:8080", - config=fl.server.ServerConfig(num_rounds=3), - strategy=strategy, - ) diff --git a/examples/quickstart-jax/README.md b/examples/quickstart-jax/README.md index 836adf558d88..b47f3a82e13b 100644 --- a/examples/quickstart-jax/README.md +++ b/examples/quickstart-jax/README.md @@ -1,3 +1,9 @@ +--- +tags: [quickstart, linear regression] +dataset: [Synthetic] +framework: [JAX] +--- + # JAX: From Centralized To Federated This example demonstrates how an already existing centralized JAX-based machine learning project can be federated with Flower. diff --git a/examples/quickstart-jax/client.py b/examples/quickstart-jax/client.py index 2257a3d6daa3..4a2aaf0e5a93 100644 --- a/examples/quickstart-jax/client.py +++ b/examples/quickstart-jax/client.py @@ -1,14 +1,12 @@ """Flower client example using JAX for linear regression.""" -from typing import Dict, List, Tuple, Callable +from typing import Callable, Dict, List, Tuple import flwr as fl -import numpy as np import jax import jax.numpy as jnp - import jax_training - +import numpy as np # Load data and determine model shape train_x, train_y, test_x, test_y = jax_training.load_data() diff --git a/examples/quickstart-jax/jax_training.py b/examples/quickstart-jax/jax_training.py index a2e23a0927bc..f57db75d5963 100644 --- a/examples/quickstart-jax/jax_training.py +++ b/examples/quickstart-jax/jax_training.py @@ -7,12 +7,13 @@ please read the JAX documentation or the mentioned tutorial. """ -from typing import Dict, List, Tuple, Callable +from typing import Callable, Dict, List, Tuple + import jax import jax.numpy as jnp +import numpy as np from sklearn.datasets import make_regression from sklearn.model_selection import train_test_split -import numpy as np key = jax.random.PRNGKey(0) diff --git a/examples/quickstart-jax/pyproject.toml b/examples/quickstart-jax/pyproject.toml index c956191369b5..68a3455aedee 100644 --- a/examples/quickstart-jax/pyproject.toml +++ b/examples/quickstart-jax/pyproject.toml @@ -5,7 +5,7 @@ description = "JAX example training a linear regression model with federated lea authors = ["The Flower Authors "] [tool.poetry.dependencies] -python = ">=3.8,<3.11" +python = ">=3.9,<3.11" flwr = "1.0.0" jax = "0.4.17" jaxlib = "0.4.17" diff --git a/examples/quickstart-mlcube/README.md b/examples/quickstart-mlcube/README.md index 8e6fc29b3ad8..f0c6c5664a82 100644 --- a/examples/quickstart-mlcube/README.md +++ b/examples/quickstart-mlcube/README.md @@ -1,3 +1,9 @@ +--- +tags: [quickstart, vision, deployment] +dataset: [MNIST] +framework: [mlcube, tensorflow, Keras] +--- + # Flower Example using TensorFlow/Keras + MLCube This introductory example to Flower uses MLCube together with Keras, but deep knowledge of Keras is not necessarily required to run the example. However, it will help you understand how to adapt Flower to your use-cases with MLCube. Running this example in itself is quite easy. diff --git a/examples/quickstart-mlcube/client.py b/examples/quickstart-mlcube/client.py index 46ddd45f52ce..458da07606ba 100644 --- a/examples/quickstart-mlcube/client.py +++ b/examples/quickstart-mlcube/client.py @@ -1,6 +1,8 @@ import os import sys + import flwr as fl + import mlcube_utils as mlcube diff --git a/examples/quickstart-mlcube/mlcube_utils.py b/examples/quickstart-mlcube/mlcube_utils.py index 1db5c446d681..8d72d43116d1 100644 --- a/examples/quickstart-mlcube/mlcube_utils.py +++ b/examples/quickstart-mlcube/mlcube_utils.py @@ -1,12 +1,11 @@ +import json import os -import sys import subprocess -import tensorflow as tf -import json +import sys +import tensorflow as tf from flwr.common import ndarrays_to_parameters - MODULE_PATH = os.path.abspath(__file__) MODULE_DIR = os.path.dirname(MODULE_PATH) MLCUBE_DIR = os.path.join(MODULE_DIR, "mlcube") diff --git a/examples/quickstart-mlcube/pyproject.toml b/examples/quickstart-mlcube/pyproject.toml index a2862bd5ebb7..0418efc0b440 100644 --- a/examples/quickstart-mlcube/pyproject.toml +++ b/examples/quickstart-mlcube/pyproject.toml @@ -9,8 +9,8 @@ description = "Keras Federated Learning Quickstart with Flower" authors = ["The Flower Authors "] [tool.poetry.dependencies] -python = ">=3.8,<3.11" -flwr = ">=1.0,<2.0" # For development: { path = "../../", develop = true } +python = ">=3.9,<3.11" +flwr = ">=1.0,<2.0" # For development: { path = "../../", develop = true } tensorflow-cpu = { version = ">=2.9.1,<2.11.1 || >2.11.1", markers = "platform_machine == \"x86_64\"" } tensorflow-macos = { version = ">=2.9.1,<2.11.1 || >2.11.1", markers = "sys_platform == \"darwin\" and platform_machine == \"arm64\"" } mlcube = "0.0.9" diff --git a/examples/quickstart-mlx/README.md b/examples/quickstart-mlx/README.md index cca55bcb946a..5914ce5f31dd 100644 --- a/examples/quickstart-mlx/README.md +++ b/examples/quickstart-mlx/README.md @@ -1,351 +1,70 @@ -# Flower Example using MLX +--- +title: Federated Learning with MLX and Flower (Quickstart Example) +tags: [quickstart, vision] +dataset: [MNIST] +framework: [MLX] +--- -This introductory example to Flower uses [MLX](https://ml-explore.github.io/mlx/build/html/index.html), but deep knowledge of MLX is not necessarily required to run the example. However, it will help you understand how to adapt Flower to your use case. Running this example in itself is quite easy. +# Federated Learning with MLX and Flower (Quickstart Example) -[MLX](https://ml-explore.github.io/mlx/build/html/index.html) is a NumPy-like array framework designed for efficient and flexible machine learning on Apple silicon. +This introductory example to Flower uses [MLX](https://ml-explore.github.io/mlx/build/html/index.html), but you don't need deep knowledge of MLX to run it. The example will help you understand how to adapt Flower to your specific use case, and running it is quite straightforward. -In this example, we will train a simple 2 layers MLP on MNIST data (handwritten digits recognition). +[MLX](https://ml-explore.github.io/mlx/build/html/index.html) is a NumPy-like array framework designed for efficient and flexible machine learning on Apple Silicon. In this example, we will train a simple 2-layer MLP on the [MNIST](https://huggingface.co/datasets/ylecun/mnist) dataset (handwritten digits recognition). The data will be downloaded and partitioned using [Flower Datasets](https://flower.ai/docs/datasets/). -## Project Setup +## Set up the project -Start by cloning the example project. We prepared a single-line command that you can copy into your shell which will checkout the example for you: +### Clone the project -```shell -git clone --depth=1 https://github.com/adap/flower.git _tmp && mv _tmp/examples/quickstart-mlx . && rm -rf _tmp && cd quickstart-mlx -``` - -This will create a new directory called `quickstart-mlx` containing the following files: - -```shell --- pyproject.toml --- requirements.txt --- client.py --- server.py --- run.sh --- README.md -``` - -### Installing Dependencies - -Project dependencies (such as `mlx` and `flwr`) are defined in `pyproject.toml` and `requirements.txt`. We recommend [Poetry](https://python-poetry.org/docs/) to install those dependencies and manage your virtual environment ([Poetry installation](https://python-poetry.org/docs/#installation)) or [pip](https://pip.pypa.io/en/latest/development/), but feel free to use a different way of installing dependencies and managing virtual environments if you have other preferences. - -#### Poetry - -```shell -poetry install -poetry shell -``` - -Poetry will install all your dependencies in a newly created virtual environment. To verify that everything works correctly you can run the following command: - -```shell -poetry run python3 -c "import flwr" -``` - -If you don't see any errors you're good to go! - -#### pip - -Write the command below in your terminal to install the dependencies according to the configuration file requirements.txt. - -```shell -pip install -r requirements.txt -``` - -## Run Federated Learning with MLX and Flower - -Afterwards you are ready to start the Flower server as well as the clients. You can simply start the server in a terminal as follows: - -```shell -python3 server.py -``` - -Now you are ready to start the Flower clients which will participate in the learning. To do so simply open two more terminal windows and run the -following commands. - -Start a first client in the first terminal: - -```shell -python3 client.py --partition-id 0 -``` - -And another one in the second terminal: +Start by cloning the example project: ```shell -python3 client.py --partition-id 1 +git clone --depth=1 https://github.com/adap/flower.git _tmp \ + && mv _tmp/examples/quickstart-mlx . \ + && rm -rf _tmp \ + && cd quickstart-mlx ``` -If you want to utilize your GPU, you can use the `--gpu` argument: +This will create a new directory called `quickstart-mlx` with the following structure: ```shell -python3 client.py --gpu --partition-id 2 -``` - -Note that you can start many more clients if you want, but each will have to be in its own terminal. - -You will see that MLX is starting a federated training. Look at the [code](https://github.com/adap/flower/tree/main/examples/quickstart-mlx) for a detailed explanation. - -## Explanations - -This example is a federated version of the centralized case that can be found -[here](https://github.com/ml-explore/mlx-examples/tree/main/mnist). - -### The data - -We will use `flwr_datasets` to easily download and partition the `MNIST` dataset: - -```python -fds = FederatedDataset(dataset="mnist", partitioners={"train": 3}) -partition = fds.load_partition(partition_id = args.partition_id) -partition_splits = partition.train_test_split(test_size=0.2) - -partition_splits['train'].set_format("numpy") -partition_splits['test'].set_format("numpy") - -train_partition = partition_splits["train"].map( - lambda img: { - "img": img.reshape(-1, 28 * 28).squeeze().astype(np.float32) / 255.0 - }, - input_columns="image", -) -test_partition = partition_splits["test"].map( - lambda img: { - "img": img.reshape(-1, 28 * 28).squeeze().astype(np.float32) / 255.0 - }, - input_columns="image", -) - -data = ( - train_partition["img"], - train_partition["label"].astype(np.uint32), - test_partition["img"], - test_partition["label"].astype(np.uint32), -) - -train_images, train_labels, test_images, test_labels = map(mlx.core.array, data) -``` - -### The model - -We define the model as in the centralized mlx example, it's a simple MLP: - -```python -class MLP(mlx.nn.Module): - """A simple MLP.""" - - def __init__( - self, num_layers: int, input_dim: int, hidden_dim: int, output_dim: int - ): - super().__init__() - layer_sizes = [input_dim] + [hidden_dim] * num_layers + [output_dim] - self.layers = [ - mlx.nn.Linear(idim, odim) - for idim, odim in zip(layer_sizes[:-1], layer_sizes[1:]) - ] - - def __call__(self, x): - for l in self.layers[:-1]: - x = mlx.core.maximum(l(x), 0.0) - return self.layers[-1](x) - -``` - -We also define some utility functions to test our model and to iterate over batches. - -```python -def loss_fn(model, X, y): - return mlx.core.mean(mlx.nn.losses.cross_entropy(model(X), y)) - - -def eval_fn(model, X, y): - return mlx.core.mean(mlx.core.argmax(model(X), axis=1) == y) - - -def batch_iterate(batch_size, X, y): - perm = mlx.core.array(np.random.permutation(y.size)) - for s in range(0, y.size, batch_size): - ids = perm[s : s + batch_size] - yield X[ids], y[ids] - -``` - -### The client - -The main changes we have to make to use `MLX` with `Flower` will be found in -the `get_parameters` and `set_parameters` functions. Indeed, MLX doesn't -provide an easy way to convert the model parameters into a list of `np.array`s -(the format we need for the serialization of the messages to work). - -The way MLX stores its parameters is as follows: - -``` -{ - "layers": [ - {"weight": mlx.core.array, "bias": mlx.core.array}, - {"weight": mlx.core.array, "bias": mlx.core.array}, - ..., - {"weight": mlx.core.array, "bias": mlx.core.array} - ] -} +quickstart-mlx +├── mlxexample +│ ├── __init__.py +│ ├── client_app.py # Defines your ClientApp +│ ├── server_app.py # Defines your ServerApp +│ └── task.py # Defines your model, training and data loading +├── pyproject.toml # Project metadata like dependencies and configs +└── README.md ``` -Therefore, to get our list of `np.array`s, we need to extract each array and -convert them into a numpy array: +### Install dependencies and project -```python -def get_parameters(self, config): - layers = self.model.parameters()["layers"] - return [np.array(val) for layer in layers for _, val in layer.items()] -``` - -For the `set_parameters` function, we perform the reverse operation. We receive -a list of arrays and want to convert them into MLX parameters. Therefore, we -iterate through pairs of parameters and assign them to the `weight` and `bias` -keys of each layer dict: - -```python -def set_parameters(self, parameters): - new_params = {} - new_params["layers"] = [ - {"weight": mlx.core.array(parameters[i]), "bias": mlx.core.array(parameters[i + 1])} - for i in range(0, len(parameters), 2) - ] - self.model.update(new_params) -``` - -The rest of the functions are directly inspired by the centralized case: - -```python -def fit(self, parameters, config): - self.set_parameters(parameters) - for _ in range(self.num_epochs): - for X, y in batch_iterate( - self.batch_size, self.train_images, self.train_labels - ): - loss, grads = self.loss_and_grad_fn(self.model, X, y) - self.optimizer.update(self.model, grads) - mlx.core.eval(self.model.parameters(), self.optimizer.state) - return self.get_parameters(config={}), len(self.train_images), {} -``` - -Here, after updating the parameters, we perform the training as in the -centralized case, and return the new parameters. - -And for the `evaluate` function: - -```python -def evaluate(self, parameters, config): - self.set_parameters(parameters) - accuracy = eval_fn(self.model, self.test_images, self.test_labels) - loss = loss_fn(self.model, self.test_images, self.test_labels) - return loss.item(), len(self.test_images), {"accuracy": accuracy.item()} -``` +Install the dependencies defined in `pyproject.toml` as well as the `mlxexample` package. -We also begin by updating the parameters with the ones sent by the server, and -then we compute the loss and accuracy using the functions defined above. - -Putting everything together we have: - -```python -class FlowerClient(fl.client.NumPyClient): - def __init__( - self, model, optim, loss_and_grad_fn, data, num_epochs, batch_size - ) -> None: - self.model = model - self.optimizer = optim - self.loss_and_grad_fn = loss_and_grad_fn - self.train_images, self.train_labels, self.test_images, self.test_labels = data - self.num_epochs = num_epochs - self.batch_size = batch_size - - def get_parameters(self, config): - layers = self.model.parameters()["layers"] - return [np.array(val) for layer in layers for _, val in layer.items()] - - def set_parameters(self, parameters): - new_params = {} - new_params["layers"] = [ - {"weight": mlx.core.array(parameters[i]), "bias": mlx.core.array(parameters[i + 1])} - for i in range(0, len(parameters), 2) - ] - self.model.update(new_params) - - def fit(self, parameters, config): - self.set_parameters(parameters) - for _ in range(self.num_epochs): - for X, y in batch_iterate( - self.batch_size, self.train_images, self.train_labels - ): - loss, grads = self.loss_and_grad_fn(self.model, X, y) - self.optimizer.update(self.model, grads) - mlx.core.eval(self.model.parameters(), self.optimizer.state) - return self.get_parameters(config={}), len(self.train_images), {} - - def evaluate(self, parameters, config): - self.set_parameters(parameters) - accuracy = eval_fn(self.model, self.test_images, self.test_labels) - loss = loss_fn(self.model, self.test_images, self.test_labels) - return loss.item(), len(self.test_images), {"accuracy": accuracy.item()} +```bash +pip install -e . ``` -And as you can see, with only a few lines of code, our client is ready! Before -we can instantiate it, we need to define a few variables: +## Run the project -```python -num_layers = 2 -hidden_dim = 32 -num_classes = 10 -batch_size = 256 -num_epochs = 1 -learning_rate = 1e-1 +You can run your Flower project in both _simulation_ and _deployment_ mode without making changes to the code. If you are starting with Flower, we recommend you using the _simulation_ mode as it requires fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine. -model = MLP(num_layers, train_images.shape[-1], hidden_dim, num_classes) +### Run with the Simulation Engine -loss_and_grad_fn = mlx.nn.value_and_grad(model, loss_fn) -optimizer = mlx.optimizers.SGD(learning_rate=learning_rate) +```bash +flwr run . ``` -Finally, we can instantiate it by using the `start_client` function: +You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: -```python -# Start Flower client -fl.client.start_client( - server_address="127.0.0.1:8080", - client=FlowerClient( - model, - optimizer, - loss_and_grad_fn, - (train_images, train_labels, test_images, test_labels), - num_epochs, - batch_size, - ).to_client(), -) +```bash +flwr run . --run-config "num-server-rounds=5 learning-rate=0.05" ``` -### The server +> \[!TIP\] +> For a more detailed walk-through check our [quickstart MLX tutorial](https://flower.ai/docs/framework/tutorial-quickstart-mlx.html) -On the server side, we don't need to add anything in particular. The -`weighted_average` function is just there to be able to aggregate the results -and have an accuracy at the end. +### Run with the Deployment Engine -```python -# Define metric aggregation function -def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: - # Multiply accuracy of each client by number of examples used - accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] - examples = [num_examples for num_examples, _ in metrics] - - # Aggregate and return custom metric (weighted average) - return {"accuracy": sum(accuracies) / sum(examples)} - - -# Define strategy -strategy = fl.server.strategy.FedAvg(evaluate_metrics_aggregation_fn=weighted_average) - -# Start Flower server -fl.server.start_server( - server_address="0.0.0.0:8080", - config=fl.server.ServerConfig(num_rounds=3), - strategy=strategy, -) -``` +> \[!NOTE\] +> An update to this example will show how to run this Flower project with the Deployment Engine and TLS certificates. diff --git a/examples/quickstart-mlx/client.py b/examples/quickstart-mlx/client.py deleted file mode 100644 index 344cfc65e42d..000000000000 --- a/examples/quickstart-mlx/client.py +++ /dev/null @@ -1,152 +0,0 @@ -import argparse - -import mlx.core as mx -import mlx.nn as nn -import mlx.optimizers as optim -import numpy as np -from flwr_datasets import FederatedDataset - -import flwr as fl - - -class MLP(nn.Module): - """A simple MLP.""" - - def __init__( - self, num_layers: int, input_dim: int, hidden_dim: int, output_dim: int - ): - super().__init__() - layer_sizes = [input_dim] + [hidden_dim] * num_layers + [output_dim] - self.layers = [ - nn.Linear(idim, odim) - for idim, odim in zip(layer_sizes[:-1], layer_sizes[1:]) - ] - - def __call__(self, x): - for l in self.layers[:-1]: - x = mx.maximum(l(x), 0.0) - return self.layers[-1](x) - - -def loss_fn(model, X, y): - return mx.mean(nn.losses.cross_entropy(model(X), y)) - - -def eval_fn(model, X, y): - return mx.mean(mx.argmax(model(X), axis=1) == y) - - -def batch_iterate(batch_size, X, y): - perm = mx.array(np.random.permutation(y.size)) - for s in range(0, y.size, batch_size): - ids = perm[s : s + batch_size] - yield X[ids], y[ids] - - -# Define Flower client -class FlowerClient(fl.client.NumPyClient): - def __init__( - self, model, optim, loss_and_grad_fn, data, num_epochs, batch_size - ) -> None: - self.model = model - self.optimizer = optim - self.loss_and_grad_fn = loss_and_grad_fn - self.train_images, self.train_labels, self.test_images, self.test_labels = data - self.num_epochs = num_epochs - self.batch_size = batch_size - - def get_parameters(self, config): - layers = self.model.parameters()["layers"] - return [np.array(val) for layer in layers for _, val in layer.items()] - - def set_parameters(self, parameters): - new_params = {} - new_params["layers"] = [ - {"weight": mx.array(parameters[i]), "bias": mx.array(parameters[i + 1])} - for i in range(0, len(parameters), 2) - ] - self.model.update(new_params) - - def fit(self, parameters, config): - self.set_parameters(parameters) - for _ in range(self.num_epochs): - for X, y in batch_iterate( - self.batch_size, self.train_images, self.train_labels - ): - loss, grads = self.loss_and_grad_fn(self.model, X, y) - self.optimizer.update(self.model, grads) - mx.eval(self.model.parameters(), self.optimizer.state) - return self.get_parameters(config={}), len(self.train_images), {} - - def evaluate(self, parameters, config): - self.set_parameters(parameters) - accuracy = eval_fn(self.model, self.test_images, self.test_labels) - loss = loss_fn(self.model, self.test_images, self.test_labels) - return loss.item(), len(self.test_images), {"accuracy": accuracy.item()} - - -if __name__ == "__main__": - parser = argparse.ArgumentParser("Train a simple MLP on MNIST with MLX.") - parser.add_argument("--gpu", action="store_true", help="Use the Metal back-end.") - parser.add_argument( - "--partition-id", - choices=[0, 1, 2], - type=int, - help="Partition of the dataset divided into 3 iid partitions created artificially.", - ) - args = parser.parse_args() - if not args.gpu: - mx.set_default_device(mx.cpu) - - num_layers = 2 - hidden_dim = 32 - num_classes = 10 - batch_size = 256 - num_epochs = 1 - learning_rate = 1e-1 - - fds = FederatedDataset(dataset="mnist", partitioners={"train": 3}) - partition = fds.load_partition(partition_id=args.partition_id) - partition_splits = partition.train_test_split(test_size=0.2, seed=42) - - partition_splits["train"].set_format("numpy") - partition_splits["test"].set_format("numpy") - - train_partition = partition_splits["train"].map( - lambda img: { - "img": img.reshape(-1, 28 * 28).squeeze().astype(np.float32) / 255.0 - }, - input_columns="image", - ) - test_partition = partition_splits["test"].map( - lambda img: { - "img": img.reshape(-1, 28 * 28).squeeze().astype(np.float32) / 255.0 - }, - input_columns="image", - ) - - data = ( - train_partition["img"], - train_partition["label"].astype(np.uint32), - test_partition["img"], - test_partition["label"].astype(np.uint32), - ) - - train_images, train_labels, test_images, test_labels = map(mx.array, data) - model = MLP(num_layers, train_images.shape[-1], hidden_dim, num_classes) - - loss_and_grad_fn = nn.value_and_grad(model, loss_fn) - optimizer = optim.SGD(learning_rate=learning_rate) - - # Start Flower client - fl.client.start_client( - server_address="127.0.0.1:8080", - client=FlowerClient( - model, - optimizer, - loss_and_grad_fn, - (train_images, train_labels, test_images, test_labels), - num_epochs, - batch_size, - ).to_client(), - ) diff --git a/examples/quickstart-mlx/mlxexample/__init__.py b/examples/quickstart-mlx/mlxexample/__init__.py new file mode 100644 index 000000000000..d4bb5d0d511d --- /dev/null +++ b/examples/quickstart-mlx/mlxexample/__init__.py @@ -0,0 +1 @@ +"""mlxexample: A Flower / MLX app.""" diff --git a/examples/quickstart-mlx/mlxexample/client_app.py b/examples/quickstart-mlx/mlxexample/client_app.py new file mode 100644 index 000000000000..f1ea7bce65d8 --- /dev/null +++ b/examples/quickstart-mlx/mlxexample/client_app.py @@ -0,0 +1,72 @@ +"""mlxexample: A Flower / MLX app.""" + +import mlx.core as mx +import mlx.nn as nn +import mlx.optimizers as optim +from flwr.client import Client, ClientApp, NumPyClient +from flwr.common import Context +from mlxexample.task import ( + MLP, + batch_iterate, + eval_fn, + get_params, + load_data, + loss_fn, + set_params, +) + + +class FlowerClient(NumPyClient): + def __init__(self, model, optimizer, batch_size, data): + self.train_images, self.train_labels, self.test_images, self.test_labels = data + self.model = model + self.optimizer = optimizer + self.num_epochs = 1 + self.batch_size = batch_size + + def fit(self, parameters, config): + """Train the model with data of this client.""" + set_params(self.model, parameters) + loss_and_grad_fn = nn.value_and_grad(self.model, loss_fn) + for _ in range(self.num_epochs): + for X, y in batch_iterate( + self.batch_size, self.train_images, self.train_labels + ): + _, grads = loss_and_grad_fn(self.model, X, y) + self.optimizer.update(self.model, grads) + mx.eval(self.model.parameters(), self.optimizer.state) + return get_params(self.model), len(self.train_images), {} + + def evaluate(self, parameters, config): + """Evaluate the model on the data this client has.""" + set_params(self.model, parameters) + accuracy = eval_fn(self.model, self.test_images, self.test_labels) + loss = loss_fn(self.model, self.test_images, self.test_labels) + return loss.item(), len(self.test_images), {"accuracy": accuracy.item()} + + +def client_fn(context: Context) -> Client: + """Construct a Client that will be run in a ClientApp.""" + + # Read the node_config to fetch data partition associated to this node + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + data = load_data(partition_id, num_partitions) + + # Read the run config to get settings to configure the Client + num_layers = context.run_config["num-layers"] + hidden_dim = context.run_config["hidden-dim"] + img_size = context.run_config["img-size"] + batch_size = context.run_config["batch-size"] + lr = context.run_config["learning-rate"] + + # Prepare model and optimizer + model = MLP(num_layers, img_size**2, hidden_dim) + optimizer = optim.SGD(learning_rate=lr) + + # Return Client instance + return FlowerClient(model, optimizer, batch_size, data).to_client() + + +# Flower ClientApp +app = ClientApp(client_fn=client_fn) diff --git a/examples/quickstart-mlx/mlxexample/server_app.py b/examples/quickstart-mlx/mlxexample/server_app.py new file mode 100644 index 000000000000..7c93f38ed449 --- /dev/null +++ b/examples/quickstart-mlx/mlxexample/server_app.py @@ -0,0 +1,48 @@ +"""mlxexample: A Flower / MLX app.""" + +from typing import List, Tuple + +from flwr.common import Context, Metrics, ndarrays_to_parameters +from flwr.server import ServerApp, ServerAppComponents, ServerConfig +from flwr.server.strategy import FedAvg +from mlxexample.task import MLP, get_params + + +def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: + """Aggregate custom `accuracy` metric by weighted average.""" + accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] + examples = [num_examples for num_examples, _ in metrics] + return {"accuracy": sum(accuracies) / sum(examples)} + + +def server_fn(context: Context) -> ServerAppComponents: + """Construct components that set the ServerApp behaviour.""" + + # Init model + model = MLP( + num_layers=context.run_config["num-layers"], + input_dim=context.run_config["img-size"] ** 2, + hidden_dim=context.run_config["hidden-dim"], + ) + + # Convert model parameters to flwr.common.Parameters + ndarrays = get_params(model) + global_model_init = ndarrays_to_parameters(ndarrays) + + # Define the strategy + fraction_eval = context.run_config["fraction-evaluate"] + strategy = FedAvg( + fraction_evaluate=fraction_eval, + evaluate_metrics_aggregation_fn=weighted_average, + initial_parameters=global_model_init, + ) + + # Construct ServerConfig + num_rounds = context.run_config["num-server-rounds"] + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(strategy=strategy, config=config) + + +# Create ServerApp +app = ServerApp(server_fn=server_fn) diff --git a/examples/quickstart-mlx/mlxexample/task.py b/examples/quickstart-mlx/mlxexample/task.py new file mode 100644 index 000000000000..9197643aa6bc --- /dev/null +++ b/examples/quickstart-mlx/mlxexample/task.py @@ -0,0 +1,101 @@ +"""mlxexample: A Flower / MLX app.""" + +import mlx.core as mx +import mlx.nn as nn +import numpy as np +from datasets.utils.logging import disable_progress_bar +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner + +disable_progress_bar() + + +class MLP(nn.Module): + """A simple MLP.""" + + def __init__( + self, num_layers: int, input_dim: int, hidden_dim: int, output_dim: int = 10 + ): + super().__init__() + layer_sizes = [input_dim] + [hidden_dim] * num_layers + [output_dim] + self.layers = [ + nn.Linear(idim, odim) + for idim, odim in zip(layer_sizes[:-1], layer_sizes[1:]) + ] + + def __call__(self, x): + for l in self.layers[:-1]: + x = mx.maximum(l(x), 0.0) + return self.layers[-1](x) + + +def get_params(model): + layers = model.parameters()["layers"] + return [np.array(val) for layer in layers for _, val in layer.items()] + + +def set_params(model, parameters): + new_params = {} + new_params["layers"] = [ + {"weight": mx.array(parameters[i]), "bias": mx.array(parameters[i + 1])} + for i in range(0, len(parameters), 2) + ] + model.update(new_params) + + +def loss_fn(model, X, y): + return mx.mean(nn.losses.cross_entropy(model(X), y)) + + +def eval_fn(model, X, y): + return mx.mean(mx.argmax(model(X), axis=1) == y) + + +def batch_iterate(batch_size, X, y): + perm = mx.array(np.random.permutation(y.size)) + for s in range(0, y.size, batch_size): + ids = perm[s : s + batch_size] + yield X[ids], y[ids] + + +fds = None # Cache FederatedDataset + + +def load_data(partition_id: int, num_partitions: int): + # Only initialize `FederatedDataset` once + global fds + if fds is None: + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="ylecun/mnist", + partitioners={"train": partitioner}, + trust_remote_code=True, + ) + partition = fds.load_partition(partition_id) + partition_splits = partition.train_test_split(test_size=0.2, seed=42) + + partition_splits["train"].set_format("numpy") + partition_splits["test"].set_format("numpy") + + train_partition = partition_splits["train"].map( + lambda img: { + "img": img.reshape(-1, 28 * 28).squeeze().astype(np.float32) / 255.0 + }, + input_columns="image", + ) + test_partition = partition_splits["test"].map( + lambda img: { + "img": img.reshape(-1, 28 * 28).squeeze().astype(np.float32) / 255.0 + }, + input_columns="image", + ) + + data = ( + train_partition["img"], + train_partition["label"].astype(np.uint32), + test_partition["img"], + test_partition["label"].astype(np.uint32), + ) + + train_images, train_labels, test_images, test_labels = map(mx.array, data) + return train_images, train_labels, test_images, test_labels diff --git a/examples/quickstart-mlx/pyproject.toml b/examples/quickstart-mlx/pyproject.toml index 752040b6aaa9..3165a3d93881 100644 --- a/examples/quickstart-mlx/pyproject.toml +++ b/examples/quickstart-mlx/pyproject.toml @@ -1,16 +1,40 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry] -name = "quickstart-mlx" -version = "0.1.0" -description = "MLX Federated Learning Quickstart with Flower" -authors = ["The Flower Authors "] +[project] +name = "mlxexample" +version = "1.0.0" +description = "Federated Learning with MLX and Flower (Quickstart Example)" +license = "Apache-2.0" +dependencies = [ + "flwr[simulation]>=1.12.0", + "flwr-datasets[vision]>=0.3.0", + "mlx==0.16.0", + "numpy==1.26.4", +] -[tool.poetry.dependencies] -python = ">=3.8,<3.11" -flwr = ">=1.0,<2.0" -mlx = "==0.0.3" -numpy = "==1.24.4" -flwr-datasets = { extras = ["vision"], version = "^0.0.2" } +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "mlxexample.server_app:app" +clientapp = "mlxexample.client_app:app" + +[tool.flwr.app.config] +num-server-rounds = 3 +fraction-evaluate = 0.5 +num-layers = 2 +img-size = 28 +hidden-dim = 32 +batch-size = 256 +learning-rate = 0.1 + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 10 diff --git a/examples/quickstart-mlx/requirements.txt b/examples/quickstart-mlx/requirements.txt deleted file mode 100644 index b56f7a15bfb9..000000000000 --- a/examples/quickstart-mlx/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -flwr>=1.0, <2.0 -mlx==0.0.3 -numpy==1.24.4 -flwr-datasets[vision]>=0.0.2, <1.0.0 diff --git a/examples/quickstart-mlx/run.sh b/examples/quickstart-mlx/run.sh deleted file mode 100755 index 40d211848c07..000000000000 --- a/examples/quickstart-mlx/run.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -set -e -cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/ - -echo "Starting server" -python server.py & -sleep 3 # Sleep for 3s to give the server enough time to start - -for i in $(seq 0 1); do - echo "Starting client $i" - python client.py --partition-id $i & -done - -# Enable CTRL+C to stop all background processes -trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM -# Wait for all background processes to complete -wait diff --git a/examples/quickstart-mlx/server.py b/examples/quickstart-mlx/server.py deleted file mode 100644 index fe691a88aba0..000000000000 --- a/examples/quickstart-mlx/server.py +++ /dev/null @@ -1,25 +0,0 @@ -from typing import List, Tuple - -import flwr as fl -from flwr.common import Metrics - - -# Define metric aggregation function -def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: - # Multiply accuracy of each client by number of examples used - accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] - examples = [num_examples for num_examples, _ in metrics] - - # Aggregate and return custom metric (weighted average) - return {"accuracy": sum(accuracies) / sum(examples)} - - -# Define strategy -strategy = fl.server.strategy.FedAvg(evaluate_metrics_aggregation_fn=weighted_average) - -# Start Flower server -fl.server.start_server( - server_address="0.0.0.0:8080", - config=fl.server.ServerConfig(num_rounds=3), - strategy=strategy, -) diff --git a/examples/quickstart-monai/.gitignore b/examples/quickstart-monai/.gitignore index a218cab9669e..2626387e2a4f 100644 --- a/examples/quickstart-monai/.gitignore +++ b/examples/quickstart-monai/.gitignore @@ -1 +1,2 @@ MedNIST* +.data_download.lock diff --git a/examples/quickstart-monai/README.md b/examples/quickstart-monai/README.md index 4a9afef4f86a..8189a8e98406 100644 --- a/examples/quickstart-monai/README.md +++ b/examples/quickstart-monai/README.md @@ -1,85 +1,79 @@ -# Flower Example using MONAI +--- +tags: [quickstart, medical, vision] +dataset: [MedNIST] +framework: [MONAI] +--- -This introductory example to Flower uses MONAI, but deep knowledge of MONAI is not necessarily required to run the example. However, it will help you understand how to adapt Flower to your use case. -Running this example in itself is quite easy. - -[MONAI](https://docs.monai.io/en/latest/index.html)(Medical Open Network for AI) is a PyTorch-based, open-source framework for deep learning in healthcare imaging, part of the PyTorch Ecosystem. - -Its ambitions are: +# Federated Learning with MONAI and Flower (Quickstart Example) -- developing a community of academic, industrial and clinical researchers collaborating on a common foundation; +This introductory example to Flower uses MONAI, but deep knowledge of MONAI is not necessarily required to run the example. However, it will help you understand how to adapt Flower to your use case. +Running this example in itself is quite easy. [MONAI](https://docs.monai.io/en/latest/index.html)(Medical Open Network for AI) is a PyTorch-based, open-source framework for deep learning in healthcare imaging, part of the PyTorch Ecosystem. This example uses a subset of the [MedMNIST](https://medmnist.com/) dataset including 6 classes, as done in [MONAI's classification demo](https://colab.research.google.com/drive/1wy8XUSnNWlhDNazFdvGBHLfdkGvOHBKe). Each client trains am [DenseNet121](https://docs.monai.io/en/stable/networks.html#densenet121) from MONAI. -- creating state-of-the-art, end-to-end training workflows for healthcare imaging; +> \[!NOTE\] +> This example uses [Flower Datasets](https://flower.ai/docs/datasets/) to partition the MedMNIST dataset. Its a good example to show how to bring any dataset into Flower and partition it using any of the built-in [partitioners](https://flower.ai/docs/datasets/ref-api/flwr_datasets.partitioner.html) (e.g. `DirichletPartitioner`, `PathologicalPartitioner`). Learn [how to use partitioners](https://flower.ai/docs/datasets/tutorial-use-partitioners.html) in a step-by-step tutorial. -- providing researchers with an optimized and standardized way to create and evaluate deep learning models. +## Set up the project -## Project Setup +### Clone the project -Start by cloning the example project. We prepared a single-line command that you can copy into your shell which will checkout the example for you: +Start by cloning the example project: ```shell -git clone --depth=1 https://github.com/adap/flower.git _tmp && mv _tmp/examples/quickstart-monai . && rm -rf _tmp && cd quickstart-monai +git clone --depth=1 https://github.com/adap/flower.git _tmp \ + && mv _tmp/examples/quickstart-monai . \ + && rm -rf _tmp \ + && cd quickstart-monai ``` -This will create a new directory called `quickstart-monai` containing the following files: +This will create a new directory called `quickstart-monai` with the following structure: ```shell --- pyproject.toml --- requirements.txt --- client.py --- data.py --- model.py --- server.py --- README.md +quickstart-monai +├── monaiexample +│ ├── __init__.py +│ ├── client_app.py # Defines your ClientApp +│ ├── server_app.py # Defines your ServerApp +│ └── task.py # Defines your model, training and data loading +├── pyproject.toml # Project metadata like dependencies and configs +└── README.md ``` -### Installing Dependencies - -Project dependencies (such as `monai` and `flwr`) are defined in `pyproject.toml` and `requirements.txt`. We recommend [Poetry](https://python-poetry.org/docs/) to install those dependencies and manage your virtual environment ([Poetry installation](https://python-poetry.org/docs/#installation)) or [pip](https://pip.pypa.io/en/latest/development/), but feel free to use a different way of installing dependencies and managing virtual environments if you have other preferences. +### Install dependencies and project -#### Poetry +Install the dependencies defined in `pyproject.toml` as well as the `monaiexample` package. -```shell -poetry install -poetry shell +```bash +pip install -e . ``` -Poetry will install all your dependencies in a newly created virtual environment. To verify that everything works correctly you can run the following command: - -```shell -poetry run python3 -c "import flwr" -``` +## Run the project -If you don't see any errors you're good to go! +You can run your Flower project in both _simulation_ and _deployment_ mode without making changes to the code. If you are starting with Flower, we recommend you using the _simulation_ mode as it requires fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine. -#### pip +### Run with the Simulation Engine -Write the command below in your terminal to install the dependencies according to the configuration file requirements.txt. +> \[!TIP\] +> This example runs faster when the `ClientApp`s have access to a GPU. If your system has one, you can make use of it by configuring the `backend.client-resources` component in `pyproject.toml`. If you want to try running the example with GPU right away, use the `local-simulation-gpu` federation as shown below. -```shell -pip install -r requirements.txt +```bash +# Run with the default federation (CPU only) +flwr run . ``` -## Run Federated Learning with MONAI and Flower - -Afterwards you are ready to start the Flower server as well as the clients. You can simply start the server in a terminal as follows: +Run the project in the `local-simulation-gpu` federation that gives CPU and GPU resources to each `ClientApp`. By default, at most 4x`ClientApp` will run in parallel in the available GPU. -```shell -python3 server.py +```bash +# Run with the `local-simulation-gpu` federation +flwr run . local-simulation-gpu ``` -Now you are ready to start the Flower clients which will participate in the learning. To do so simply open two more terminal windows and run the following commands. Clients will train a [DenseNet121](https://docs.monai.io/en/stable/networks.html#densenet121) from MONAI. If a GPU is present in your system, clients will use it. - -Start client 1 in the first terminal: +You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: -```shell -python3 client.py --partition-id 0 +```bash +flwr run . --run-config "num-server-rounds=5 batch-size=32" ``` -Start client 2 in the second terminal: - -```shell -python3 client.py --partition-id 1 -``` +### Run with the Deployment Engine -You will see that the federated training is starting. Look at the [code](https://github.com/adap/flower/tree/main/examples/quickstart-monai) for a detailed explanation. +> \[!NOTE\] +> An update to this example will show how to run this Flower project with the Deployment Engine and TLS certificates, or with Docker. diff --git a/examples/quickstart-monai/client.py b/examples/quickstart-monai/client.py deleted file mode 100644 index 0ed943da83cc..000000000000 --- a/examples/quickstart-monai/client.py +++ /dev/null @@ -1,61 +0,0 @@ -import argparse -import warnings -from collections import OrderedDict - -import torch -from data import load_data -from model import test, train -from monai.networks.nets.densenet import DenseNet121 - -import flwr as fl - -warnings.filterwarnings("ignore", category=UserWarning) -DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - - -# Define Flower client -class FlowerClient(fl.client.NumPyClient): - def __init__(self, net, trainloader, testloader, device): - self.net = net - self.trainloader = trainloader - self.testloader = testloader - self.device = device - - def get_parameters(self, config): - return [val.cpu().numpy() for _, val in self.net.state_dict().items()] - - def set_parameters(self, parameters): - params_dict = zip(self.net.state_dict().keys(), parameters) - state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) - self.net.load_state_dict(state_dict, strict=True) - - def fit(self, parameters, config): - self.set_parameters(parameters) - train(self.net, self.trainloader, epoch_num=1, device=self.device) - return self.get_parameters(config={}), len(self.trainloader), {} - - def evaluate(self, parameters, config): - self.set_parameters(parameters) - loss, accuracy = test(self.net, self.testloader, self.device) - return loss, len(self.testloader), {"accuracy": accuracy} - - -if __name__ == "__main__": - total_partitions = 10 - parser = argparse.ArgumentParser() - parser.add_argument( - "--partition-id", type=int, choices=range(total_partitions), required=True - ) - args = parser.parse_args() - - # Load model and data (simple CNN, CIFAR-10) - trainloader, _, testloader, num_class = load_data( - total_partitions, args.partition_id - ) - net = DenseNet121(spatial_dims=2, in_channels=1, out_channels=num_class).to(DEVICE) - - # Start Flower client - fl.client.start_numpy_client( - server_address="127.0.0.1:8080", - client=FlowerClient(net, trainloader, testloader, DEVICE), - ) diff --git a/examples/quickstart-monai/data.py b/examples/quickstart-monai/data.py deleted file mode 100644 index d184476522e8..000000000000 --- a/examples/quickstart-monai/data.py +++ /dev/null @@ -1,158 +0,0 @@ -import os -import tarfile -from urllib import request - -import numpy as np -from monai.data import DataLoader, Dataset -from monai.transforms import ( - Compose, - EnsureChannelFirst, - LoadImage, - RandFlip, - RandRotate, - RandZoom, - ScaleIntensity, - ToTensor, -) - - -def _partition(files_list, labels_list, num_shards, index): - total_size = len(files_list) - assert total_size == len( - labels_list - ), f"List of datapoints and labels must be of the same length" - shard_size = total_size // num_shards - - # Calculate start and end indices for the shard - start_idx = index * shard_size - if index == num_shards - 1: - # Last shard takes the remainder - end_idx = total_size - else: - end_idx = start_idx + shard_size - - # Create a subset for the shard - files = files_list[start_idx:end_idx] - labels = labels_list[start_idx:end_idx] - return files, labels - - -def load_data(num_shards, index): - image_file_list, image_label_list, _, num_class = _download_data() - - # Get partition given index - files_list, labels_list = _partition( - image_file_list, image_label_list, num_shards, index - ) - - trainX, trainY, valX, valY, testX, testY = _split_data( - files_list, labels_list, len(files_list) - ) - train_transforms, val_transforms = _get_transforms() - - train_ds = MedNISTDataset(trainX, trainY, train_transforms) - train_loader = DataLoader(train_ds, batch_size=300, shuffle=True) - - val_ds = MedNISTDataset(valX, valY, val_transforms) - val_loader = DataLoader(val_ds, batch_size=300) - - test_ds = MedNISTDataset(testX, testY, val_transforms) - test_loader = DataLoader(test_ds, batch_size=300) - - return train_loader, val_loader, test_loader, num_class - - -class MedNISTDataset(Dataset): - def __init__(self, image_files, labels, transforms): - self.image_files = image_files - self.labels = labels - self.transforms = transforms - - def __len__(self): - return len(self.image_files) - - def __getitem__(self, index): - return self.transforms(self.image_files[index]), self.labels[index] - - -def _download_data(): - data_dir = "./MedNIST/" - _download_and_extract( - "https://dl.dropboxusercontent.com/s/5wwskxctvcxiuea/MedNIST.tar.gz", - os.path.join(data_dir), - ) - - class_names = sorted( - [x for x in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, x))] - ) - num_class = len(class_names) - image_files = [ - [ - os.path.join(data_dir, class_name, x) - for x in os.listdir(os.path.join(data_dir, class_name)) - ] - for class_name in class_names - ] - image_file_list = [] - image_label_list = [] - for i, class_name in enumerate(class_names): - image_file_list.extend(image_files[i]) - image_label_list.extend([i] * len(image_files[i])) - num_total = len(image_label_list) - return image_file_list, image_label_list, num_total, num_class - - -def _split_data(image_file_list, image_label_list, num_total): - valid_frac, test_frac = 0.1, 0.1 - trainX, trainY = [], [] - valX, valY = [], [] - testX, testY = [], [] - - for i in range(num_total): - rann = np.random.random() - if rann < valid_frac: - valX.append(image_file_list[i]) - valY.append(image_label_list[i]) - elif rann < test_frac + valid_frac: - testX.append(image_file_list[i]) - testY.append(image_label_list[i]) - else: - trainX.append(image_file_list[i]) - trainY.append(image_label_list[i]) - - return trainX, trainY, valX, valY, testX, testY - - -def _get_transforms(): - train_transforms = Compose( - [ - LoadImage(image_only=True), - EnsureChannelFirst(), - ScaleIntensity(), - RandRotate(range_x=15, prob=0.5, keep_size=True), - RandFlip(spatial_axis=0, prob=0.5), - RandZoom(min_zoom=0.9, max_zoom=1.1, prob=0.5, keep_size=True), - ToTensor(), - ] - ) - - val_transforms = Compose( - [LoadImage(image_only=True), EnsureChannelFirst(), ScaleIntensity(), ToTensor()] - ) - - return train_transforms, val_transforms - - -def _download_and_extract(url, dest_folder): - if not os.path.isdir(dest_folder): - # Download the tar.gz file - tar_gz_filename = url.split("/")[-1] - if not os.path.isfile(tar_gz_filename): - with request.urlopen(url) as response, open( - tar_gz_filename, "wb" - ) as out_file: - out_file.write(response.read()) - - # Extract the tar.gz file - with tarfile.open(tar_gz_filename, "r:gz") as tar_ref: - tar_ref.extractall() diff --git a/examples/quickstart-monai/model.py b/examples/quickstart-monai/model.py deleted file mode 100644 index 4c74d50553e4..000000000000 --- a/examples/quickstart-monai/model.py +++ /dev/null @@ -1,33 +0,0 @@ -import torch - - -def train(model, train_loader, epoch_num, device): - loss_function = torch.nn.CrossEntropyLoss() - optimizer = torch.optim.Adam(model.parameters(), 1e-5) - for _ in range(epoch_num): - model.train() - for inputs, labels in train_loader: - optimizer.zero_grad() - loss_function(model(inputs.to(device)), labels.to(device)).backward() - optimizer.step() - - -def test(model, test_loader, device): - model.eval() - loss = 0.0 - y_true = list() - y_pred = list() - loss_function = torch.nn.CrossEntropyLoss() - with torch.no_grad(): - for test_images, test_labels in test_loader: - out = model(test_images.to(device)) - test_labels = test_labels.to(device) - loss += loss_function(out, test_labels).item() - pred = out.argmax(dim=1) - for i in range(len(pred)): - y_true.append(test_labels[i].item()) - y_pred.append(pred[i].item()) - accuracy = sum([1 if t == p else 0 for t, p in zip(y_true, y_pred)]) / len( - test_loader.dataset - ) - return loss, accuracy diff --git a/examples/quickstart-monai/monaiexample/__init__.py b/examples/quickstart-monai/monaiexample/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/examples/quickstart-monai/monaiexample/client_app.py b/examples/quickstart-monai/monaiexample/client_app.py new file mode 100644 index 000000000000..c0dcac0cdae2 --- /dev/null +++ b/examples/quickstart-monai/monaiexample/client_app.py @@ -0,0 +1,41 @@ +"""monaiexample: A Flower / MONAI app.""" + +import torch +from flwr.common import Context +from flwr.client import NumPyClient, ClientApp + +from monaiexample.task import load_data, load_model, test, train, get_params, set_params + + +# Define Flower client +class FlowerClient(NumPyClient): + def __init__(self, net, trainloader, valloader): + self.net = net + self.trainloader = trainloader + self.valloader = valloader + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + + def fit(self, parameters, config): + set_params(self.net, parameters) + train(self.net, self.trainloader, epoch_num=1, device=self.device) + return get_params(self.net), len(self.trainloader), {} + + def evaluate(self, parameters, config): + set_params(self.net, parameters) + loss, accuracy = test(self.net, self.valloader, self.device) + return loss, len(self.valloader), {"accuracy": accuracy} + + +def client_fn(context: Context): + + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + + batch_size = context.run_config["batch-size"] + trainloader, valloader = load_data(num_partitions, partition_id, batch_size) + net = load_model() + + return FlowerClient(net, trainloader, valloader).to_client() + + +app = ClientApp(client_fn=client_fn) diff --git a/examples/quickstart-monai/monaiexample/server_app.py b/examples/quickstart-monai/monaiexample/server_app.py new file mode 100644 index 000000000000..f68d3887a488 --- /dev/null +++ b/examples/quickstart-monai/monaiexample/server_app.py @@ -0,0 +1,46 @@ +"""monaiexample: A Flower / MONAI app.""" + +from typing import List, Tuple + +from flwr.common import Metrics, Context, ndarrays_to_parameters +from flwr.server import ServerApp, ServerAppComponents, ServerConfig +from flwr.server.strategy import FedAvg + +from monaiexample.task import load_model, get_params + + +# Define metric aggregation function +def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: + # Multiply accuracy of each client by number of examples used + accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] + examples = [num_examples for num_examples, _ in metrics] + + # Aggregate and return custom metric (weighted average) + return {"accuracy": sum(accuracies) / sum(examples)} + + +def server_fn(context: Context): + + # Init model + model = load_model() + + # Convert model parameters to flwr.common.Parameters + ndarrays = get_params(model) + global_model_init = ndarrays_to_parameters(ndarrays) + + # Define strategy + fraction_fit = context.run_config["fraction-fit"] + strategy = FedAvg( + fraction_fit=fraction_fit, + evaluate_metrics_aggregation_fn=weighted_average, + initial_parameters=global_model_init, + ) + + # Construct ServerConfig + num_rounds = context.run_config["num-server-rounds"] + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(strategy=strategy, config=config) + + +app = ServerApp(server_fn=server_fn) diff --git a/examples/quickstart-monai/monaiexample/task.py b/examples/quickstart-monai/monaiexample/task.py new file mode 100644 index 000000000000..4f7972d455fd --- /dev/null +++ b/examples/quickstart-monai/monaiexample/task.py @@ -0,0 +1,200 @@ +"""monaiexample: A Flower / MONAI app.""" + +import os +import tarfile +from urllib import request +from collections import OrderedDict + +import torch +import monai +from monai.networks.nets import densenet +from monai.transforms import ( + Compose, + EnsureChannelFirst, + LoadImage, + RandFlip, + RandRotate, + RandZoom, + ScaleIntensity, + ToTensor, +) +from filelock import FileLock +from datasets import Dataset +from flwr_datasets.partitioner import IidPartitioner + + +def load_model(): + """Load a DenseNet12.""" + return densenet.DenseNet121(spatial_dims=2, in_channels=1, out_channels=6) + + +def get_params(model): + """Return tensors in the model's state_dict.""" + return [val.cpu().numpy() for _, val in model.state_dict().items()] + + +def set_params(model, ndarrays): + """Apply parameters to a model.""" + params_dict = zip(model.state_dict().keys(), ndarrays) + state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) + model.load_state_dict(state_dict, strict=True) + + +def train(model, train_loader, epoch_num, device): + """Train a model using the supplied dataloader.""" + model.to(device) + loss_function = torch.nn.CrossEntropyLoss() + optimizer = torch.optim.Adam(model.parameters(), 1e-5) + for _ in range(epoch_num): + model.train() + for batch in train_loader: + images, labels = batch["img"], batch["label"] + optimizer.zero_grad() + loss_function(model(images.to(device)), labels.to(device)).backward() + optimizer.step() + + +def test(model, test_loader, device): + """Evaluate a model on a held-out dataset.""" + model.to(device) + model.eval() + loss = 0.0 + y_true = list() + y_pred = list() + loss_function = torch.nn.CrossEntropyLoss() + with torch.no_grad(): + for batch in test_loader: + images, labels = batch["img"], batch["label"] + out = model(images.to(device)) + labels = labels.to(device) + loss += loss_function(out, labels).item() + pred = out.argmax(dim=1) + for i in range(len(pred)): + y_true.append(labels[i].item()) + y_pred.append(pred[i].item()) + accuracy = sum([1 if t == p else 0 for t, p in zip(y_true, y_pred)]) / len( + test_loader.dataset + ) + return loss, accuracy + + +def _get_transforms(): + """Return transforms to be used for training and evaluation.""" + train_transforms = Compose( + [ + LoadImage(image_only=True), + EnsureChannelFirst(), + ScaleIntensity(), + RandRotate(range_x=15, prob=0.5, keep_size=True), + RandFlip(spatial_axis=0, prob=0.5), + RandZoom(min_zoom=0.9, max_zoom=1.1, prob=0.5, keep_size=True), + ToTensor(), + ] + ) + + val_transforms = Compose( + [LoadImage(image_only=True), EnsureChannelFirst(), ScaleIntensity(), ToTensor()] + ) + + return train_transforms, val_transforms + + +def get_apply_transforms_fn(transforms_to_apply): + """Return a function that applies the transforms passed as input argument.""" + + def apply_transforms(batch): + """Apply transforms to the partition from FederatedDataset.""" + batch["img"] = [transforms_to_apply(img) for img in batch["img_file"]] + return batch + + return apply_transforms + + +ds = None +partitioner = None + + +def load_data(num_partitions, partition_id, batch_size): + """Download dataset, partition it and return data loader of specific partition.""" + # Set dataset and partitioner only once + global ds, partitioner + if ds is None: + image_file_list, image_label_list = _download_data() + + # Construct HuggingFace dataset + ds = Dataset.from_dict({"img_file": image_file_list, "label": image_label_list}) + # Set partitioner + partitioner = IidPartitioner(num_partitions) + partitioner.dataset = ds + + partition = partitioner.load_partition(partition_id) + + # Split train/validation + partition_train_test = partition.train_test_split(test_size=0.2, seed=42) + + # Get transforms + train_t, test_t = _get_transforms() + + # Apply transforms individually to each split + train_partition = partition_train_test["train"] + test_partition = partition_train_test["test"] + + partition_train = train_partition.with_transform(get_apply_transforms_fn(train_t)) + partition_val = test_partition.with_transform(get_apply_transforms_fn(test_t)) + + # Create dataloaders + train_loader = monai.data.DataLoader( + partition_train, batch_size=batch_size, shuffle=True + ) + val_loader = monai.data.DataLoader(partition_val, batch_size=batch_size) + + return train_loader, val_loader + + +def _download_data(): + """Download and extract dataset.""" + data_dir = "./MedNIST/" + _download_and_extract_if_needed( + "https://dl.dropboxusercontent.com/s/5wwskxctvcxiuea/MedNIST.tar.gz", + os.path.join(data_dir), + ) + + # Compute list of files and thier associated labels + class_names = sorted( + [x for x in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, x))] + ) + image_files = [ + [ + os.path.join(data_dir, class_name, x) + for x in os.listdir(os.path.join(data_dir, class_name)) + ] + for class_name in class_names + ] + image_file_list = [] + image_label_list = [] + for i, _ in enumerate(class_names): + image_file_list.extend(image_files[i]) + image_label_list.extend([i] * len(image_files[i])) + + return image_file_list, image_label_list + + +def _download_and_extract_if_needed(url, dest_folder): + """Download dataset if not present.""" + + # Logic behind a filelock to prevent multiple processes (e.g. ClientApps) + # from downloading the dataset at the same time. + with FileLock(".data_download.lock"): + if not os.path.isdir(dest_folder): + # Download the tar.gz file + tar_gz_filename = url.split("/")[-1] + if not os.path.isfile(tar_gz_filename): + with ( + request.urlopen(url) as response, + open(tar_gz_filename, "wb") as out_file, + ): + out_file.write(response.read()) + + # Extract the tar.gz file + with tarfile.open(tar_gz_filename, "r:gz") as tar_ref: + tar_ref.extractall() diff --git a/examples/quickstart-monai/pyproject.toml b/examples/quickstart-monai/pyproject.toml index 2b77a2fc061f..7a6e766bb853 100644 --- a/examples/quickstart-monai/pyproject.toml +++ b/examples/quickstart-monai/pyproject.toml @@ -1,19 +1,41 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" - -[tool.poetry] -name = "quickstart-monai" -version = "0.1.0" -description = "MONAI Federated Learning Quickstart with Flower" -authors = ["The Flower Authors "] - -[tool.poetry.dependencies] -python = ">=3.8,<3.11" -flwr = ">=1.0,<2.0" -torch = "1.13.1" -tqdm = "4.66.3" -scikit-learn = "1.3.1" -monai = { version = "1.3.0", extras=["gdown", "nibabel", "tqdm", "itk"] } -numpy = "1.24.4" -pillow = "10.2.0" +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "monaiexample" +version = "1.0.0" +description = "Federated Learning with MONAI and Flower (Quickstart Example)" +license = "Apache-2.0" +dependencies = [ + "flwr[simulation]==1.12.0", + "flwr-datasets[vision]>=0.3.0", + "monai==1.3.2", + "filelock==3.15.4", +] + +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "monaiexample.server_app:app" +clientapp = "monaiexample.client_app:app" + +[tool.flwr.app.config] +num-server-rounds = 5 +fraction-fit = 0.5 +batch-size = 128 + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 10 + +[tool.flwr.federations.local-simulation-gpu] +options.num-supernodes = 10 +options.backend.client-resources.num-cpus = 4 +options.backend.client-resources.num-gpus = 0.25 # at most 4 ClientApps will run in a given GPU diff --git a/examples/quickstart-monai/requirements.txt b/examples/quickstart-monai/requirements.txt deleted file mode 100644 index e3f1e463c629..000000000000 --- a/examples/quickstart-monai/requirements.txt +++ /dev/null @@ -1,7 +0,0 @@ -flwr>=1.0, <2.0 -torch==1.13.1 -tqdm==4.65.0 -scikit-learn==1.3.1 -monai[gdown,nibabel,tqdm,itk]==1.3.0 -numpy==1.24.4 -pillow==10.2.0 diff --git a/examples/quickstart-monai/run.sh b/examples/quickstart-monai/run.sh deleted file mode 100755 index 1da60bccb86d..000000000000 --- a/examples/quickstart-monai/run.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -set -e -cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/ - -python -c "from data import _download_data; _download_data()" - -echo "Starting server" -python server.py & -sleep 3 # Sleep for 3s to give the server enough time to start - -for i in `seq 0 1`; do - echo "Starting client $i" - python client.py --partition-id $i & -done - -# Enable CTRL+C to stop all background processes -trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM -# Wait for all background processes to complete -wait diff --git a/examples/quickstart-monai/server.py b/examples/quickstart-monai/server.py deleted file mode 100644 index fe691a88aba0..000000000000 --- a/examples/quickstart-monai/server.py +++ /dev/null @@ -1,25 +0,0 @@ -from typing import List, Tuple - -import flwr as fl -from flwr.common import Metrics - - -# Define metric aggregation function -def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: - # Multiply accuracy of each client by number of examples used - accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] - examples = [num_examples for num_examples, _ in metrics] - - # Aggregate and return custom metric (weighted average) - return {"accuracy": sum(accuracies) / sum(examples)} - - -# Define strategy -strategy = fl.server.strategy.FedAvg(evaluate_metrics_aggregation_fn=weighted_average) - -# Start Flower server -fl.server.start_server( - server_address="0.0.0.0:8080", - config=fl.server.ServerConfig(num_rounds=3), - strategy=strategy, -) diff --git a/examples/quickstart-pandas/README.md b/examples/quickstart-pandas/README.md index dd69f3ead3cb..3f522b26834d 100644 --- a/examples/quickstart-pandas/README.md +++ b/examples/quickstart-pandas/README.md @@ -1,82 +1,72 @@ -# Flower Example using Pandas +--- +tags: [quickstart, tabular, federated analytics] +dataset: [Iris] +framework: [pandas] +--- -This introductory example to Flower uses Pandas, but deep knowledge of Pandas is not necessarily required to run the example. However, it will help you understand how to adapt Flower to your use case. This example uses [Flower Datasets](https://flower.ai/docs/datasets/) to -download, partition and preprocess the dataset. -Running this example in itself is quite easy. - -## Project Setup - -Start by cloning the example project. We prepared a single-line command that you can copy into your shell which will checkout the example for you: - -```shell -$ git clone --depth=1 https://github.com/adap/flower.git _tmp && mv _tmp/examples/quickstart-pandas . && rm -rf _tmp && cd quickstart-pandas -``` +# Federated Learning with Pandas and Flower (Quickstart Example) -This will create a new directory called `quickstart-pandas` containing the following files: +> \[!CAUTION\] +> This example uses Flower's low-level API which remains a preview feature and subject to change. Both `ClientApp` and `ServerApp` operate directly on [Message](https://flower.ai/docs/framework/ref-api/flwr.common.Message.html) and [RecordSet](https://flower.ai/docs/framework/ref-api/flwr.common.RecordSet.html) objects. -```shell --- pyproject.toml --- requirements.txt --- client.py --- server.py --- start.sh --- README.md -``` - -If you don't plan on using the `run.sh` script that automates the run, you should first download the data and put it in a `data` folder, this can be done by executing: +This introductory example to Flower uses [Pandas](https://pandas.pydata.org/), but deep knowledge of Pandas is not necessarily required to run the example. However, it will help you understand how to adapt Flower to your use case. This example uses [Flower Datasets](https://flower.ai/docs/datasets/) to +download, partition and preprocess the [Iris dataset](https://huggingface.co/datasets/scikit-learn/iris). +Running this example in itself is quite easy. -```shell -$ mkdir -p ./data -$ python -c "from sklearn.datasets import load_iris; load_iris(as_frame=True)['data'].to_csv('./data/client.csv')" -``` +This example implements a form of Federated Analyics by which instead of training a model using locally available data, the nodes run a query on the data they own. In this example the query is to compute the histogram on specific columns of the dataset. These metrics are sent to the `ServerApp` for aggregation. -### Installing Dependencies +## Set up the project -Project dependencies (such as `pandas` and `flwr`) are defined in `pyproject.toml` and `requirements.txt`. We recommend [Poetry](https://python-poetry.org/docs/) to install those dependencies and manage your virtual environment ([Poetry installation](https://python-poetry.org/docs/#installation)) or [pip](https://pip.pypa.io/en/latest/development/), but feel free to use a different way of installing dependencies and managing virtual environments if you have other preferences. +### Clone the project -#### Poetry +Start by cloning the example project. ```shell -poetry install -poetry shell +git clone --depth=1 https://github.com/adap/flower.git _tmp \ + && mv _tmp/examples/quickstart-pandas . \ + && rm -rf _tmp && cd quickstart-pandas ``` -Poetry will install all your dependencies in a newly created virtual environment. To verify that everything works correctly you can run the following command: +This will create a new directory called `quickstart-pandas` with the following structure: ```shell -poetry run python3 -c "import flwr" +quickstart-pandas +├── pandas_example +│ ├── __init__.py +│ ├── client_app.py # Defines your ClientApp +│ └── server_app.py # Defines your ServerApp +├── pyproject.toml # Project metadata like dependencies and configs +└── README.md ``` -If you don't see any errors you're good to go! - -#### pip +### Install dependencies and project -Write the command below in your terminal to install the dependencies according to the configuration file requirements.txt. +Install the dependencies defined in `pyproject.toml` as well as the `pandas_example` package. -```shell -pip install -r requirements.txt +```bash +pip install -e . ``` -## Run Federated Analytics with Pandas and Flower +## Run the project -Afterwards you are ready to start the Flower server as well as the clients. You can simply start the server in a terminal as follows: +You can run your Flower project in both _simulation_ and _deployment_ mode without making changes to the code. If you are starting with Flower, we recommend you using the _simulation_ mode as it requires fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine. -```shell -$ python3 server.py -``` +### Run with the Simulation Engine -Now you are ready to start the Flower clients which will participate in the learning. To do so simply open two more terminal windows and run the following commands. +```bash +flwr run . +``` -Start client 1 in the first terminal: +You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example -```shell -$ python3 client.py --partition-id 0 +```bash +flwr run . --run-config num-server-rounds=5 ``` -Start client 2 in the second terminal: +> \[!TIP\] +> For a more detailed walk-through check our [quickstart PyTorch tutorial](https://flower.ai/docs/framework/tutorial-quickstart-pandas.html) -```shell -$ python3 client.py --partition-id 1 -``` +### Run with the Deployment Engine -You will see that the server is printing aggregated statistics about the dataset distributed amongst clients. Have a look to the [Flower Quickstarter documentation](https://flower.ai/docs/quickstart-pandas.html) for a detailed explanation. +> \[!NOTE\] +> An update to this example will show how to run this Flower application with the Deployment Engine and TLS certificates, or with Docker. diff --git a/examples/quickstart-pandas/client.py b/examples/quickstart-pandas/client.py deleted file mode 100644 index c52b7c65b04c..000000000000 --- a/examples/quickstart-pandas/client.py +++ /dev/null @@ -1,65 +0,0 @@ -import argparse -from typing import Dict, List, Tuple - -import numpy as np -import pandas as pd - -import flwr as fl - -from flwr_datasets import FederatedDataset - - -column_names = ["sepal_length", "sepal_width"] - - -def compute_hist(df: pd.DataFrame, col_name: str) -> np.ndarray: - freqs, _ = np.histogram(df[col_name]) - return freqs - - -# Define Flower client -class FlowerClient(fl.client.NumPyClient): - def __init__(self, X: pd.DataFrame): - self.X = X - - def fit( - self, parameters: List[np.ndarray], config: Dict[str, str] - ) -> Tuple[List[np.ndarray], int, Dict]: - hist_list = [] - # Execute query locally - for c in self.X.columns: - hist = compute_hist(self.X, c) - hist_list.append(hist) - return ( - hist_list, - len(self.X), - {}, - ) - - -if __name__ == "__main__": - N_CLIENTS = 2 - - parser = argparse.ArgumentParser(description="Flower") - parser.add_argument( - "--partition-id", - type=int, - choices=range(0, N_CLIENTS), - required=True, - help="Specifies the partition id of artificially partitioned datasets.", - ) - args = parser.parse_args() - partition_id = args.partition_id - - # Load the partition data - fds = FederatedDataset(dataset="hitorilabs/iris", partitioners={"train": N_CLIENTS}) - - dataset = fds.load_partition(partition_id, "train").with_format("pandas")[:] - # Use just the specified columns - X = dataset[column_names] - - # Start Flower client - fl.client.start_client( - server_address="127.0.0.1:8080", - client=FlowerClient(X).to_client(), - ) diff --git a/examples/quickstart-pandas/pandas_example/__init__.py b/examples/quickstart-pandas/pandas_example/__init__.py new file mode 100644 index 000000000000..9e5b1a942dd8 --- /dev/null +++ b/examples/quickstart-pandas/pandas_example/__init__.py @@ -0,0 +1 @@ +"""pandas_example: A Flower / Pandas app.""" diff --git a/examples/quickstart-pandas/pandas_example/client_app.py b/examples/quickstart-pandas/pandas_example/client_app.py new file mode 100644 index 000000000000..0194b0dadf3a --- /dev/null +++ b/examples/quickstart-pandas/pandas_example/client_app.py @@ -0,0 +1,59 @@ +"""pandas_example: A Flower / Pandas app.""" + +import warnings + +import numpy as np +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner + +from flwr.client import ClientApp +from flwr.common import Context, Message, MetricsRecord, RecordSet + +fds = None # Cache FederatedDataset + +warnings.filterwarnings("ignore", category=UserWarning) + + +def get_clientapp_dataset(partition_id: int, num_partitions: int): + # Only initialize `FederatedDataset` once + global fds + if fds is None: + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="scikit-learn/iris", + partitioners={"train": partitioner}, + ) + + dataset = fds.load_partition(partition_id, "train").with_format("pandas")[:] + # Use just the specified columns + return dataset[["SepalLengthCm", "SepalWidthCm"]] + + +# Flower ClientApp +app = ClientApp() + + +@app.query() +def query(msg: Message, context: Context): + """Construct histogram of local dataset and report to `ServerApp`.""" + + # Read the node_config to fetch data partition associated to this node + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + + dataset = get_clientapp_dataset(partition_id, num_partitions) + + metrics = {} + # Compute some statistics for each column in the dataframe + for feature_name in dataset.columns: + # Compute histogram + freqs, _ = np.histogram(dataset[feature_name], bins=np.linspace(2.0, 10.0, 10)) + metrics[feature_name] = freqs.tolist() + + # Compute weighted average + metrics[f"{feature_name}_avg"] = dataset[feature_name].mean() * len(dataset) + metrics[f"{feature_name}_count"] = len(dataset) + + reply_content = RecordSet(metrics_records={"query_results": MetricsRecord(metrics)}) + + return msg.create_reply(reply_content) diff --git a/examples/quickstart-pandas/pandas_example/server_app.py b/examples/quickstart-pandas/pandas_example/server_app.py new file mode 100644 index 000000000000..95384c3fa978 --- /dev/null +++ b/examples/quickstart-pandas/pandas_example/server_app.py @@ -0,0 +1,87 @@ +"""pandas_example: A Flower / Pandas app.""" + +import random +import time +from logging import INFO + +import numpy as np + +from flwr.common import Context, MessageType, RecordSet, Message +from flwr.common.logger import log +from flwr.server import Driver, ServerApp + +app = ServerApp() + + +@app.main() +def main(driver: Driver, context: Context) -> None: + """This `ServerApp` construct a histogram from partial-histograms reported by the + `ClientApp`s.""" + + num_rounds = context.run_config["num-server-rounds"] + min_nodes = 2 + fraction_sample = context.run_config["fraction-sample"] + + for server_round in range(num_rounds): + log(INFO, "") # Add newline for log readability + log(INFO, "Starting round %s/%s", server_round + 1, num_rounds) + + # Loop and wait until enough nodes are available. + all_node_ids = [] + while len(all_node_ids) < min_nodes: + all_node_ids = driver.get_node_ids() + if len(all_node_ids) >= min_nodes: + # Sample nodes + num_to_sample = int(len(all_node_ids) * fraction_sample) + node_ids = random.sample(all_node_ids, num_to_sample) + break + log(INFO, "Waiting for nodes to connect...") + time.sleep(2) + + log(INFO, "Sampled %s nodes (out of %s)", len(node_ids), len(all_node_ids)) + + # Create messages + recordset = RecordSet() + messages = [] + for node_id in node_ids: # one message for each node + message = driver.create_message( + content=recordset, + message_type=MessageType.QUERY, # target `query` method in ClientApp + dst_node_id=node_id, + group_id=str(server_round), + ) + messages.append(message) + + # Send messages and wait for all results + replies = driver.send_and_receive(messages) + log(INFO, "Received %s/%s results", len(replies), len(messages)) + + # Aggregate partial histograms + aggregated_hist = aggregate_partial_histograms(replies) + + # Display aggregated histogram + log(INFO, "Aggregated histogram: %s", aggregated_hist) + + +def aggregate_partial_histograms(messages: Message): + """Aggregate partial histograms.""" + + aggregated_hist = {} + total_count = 0 + for rep in messages: + if rep.has_error(): + continue + query_results = rep.content.metrics_records["query_results"] + # Sum metrics + for k, v in query_results.items(): + if k in ["SepalLengthCm", "SepalWidthCm"]: + if k in aggregated_hist: + aggregated_hist[k] += np.array(v) + else: + aggregated_hist[k] = np.array(v) + if "_count" in k: + total_count += v + + # Verify aggregated histogram adds up to total reported count + assert total_count == sum([sum(v) for v in aggregated_hist.values()]) + return aggregated_hist diff --git a/examples/quickstart-pandas/pyproject.toml b/examples/quickstart-pandas/pyproject.toml index 2e6b1424bb54..a80311292acb 100644 --- a/examples/quickstart-pandas/pyproject.toml +++ b/examples/quickstart-pandas/pyproject.toml @@ -1,17 +1,39 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry] -name = "quickstart-pandas" -version = "0.1.0" -description = "Pandas Federated Analytics Quickstart with Flower" -authors = ["Ragy Haddad "] -maintainers = ["The Flower Authors "] +[project] +name = "pandas_example" +version = "1.0.0" +description = "Federated Learning with Pandas and Flower (Quickstart Example)" +license = "Apache-2.0" +authors = [ + { name = "The Flower Authors", email = "hello@flower.ai" }, + { name = "Ragy Haddad", email = "ragy202@gmail.com" }, +] +dependencies = [ + "flwr[simulation]>=1.12.0", + "flwr-datasets[vision]>=0.3.0", + "numpy==1.24.4", + "pandas==2.0.0", +] -[tool.poetry.dependencies] -python = ">=3.8,<3.11" -flwr = ">=1.0,<2.0" -flwr-datasets = { extras = ["vision"], version = ">=0.0.2,<1.0.0" } -numpy = "1.23.2" -pandas = "2.0.0" +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "pandas_example.server_app:app" +clientapp = "pandas_example.client_app:app" + +[tool.flwr.app.config] +num-server-rounds = 3 +fraction-sample = 1.0 + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 4 diff --git a/examples/quickstart-pandas/requirements.txt b/examples/quickstart-pandas/requirements.txt deleted file mode 100644 index d44a3c6adab9..000000000000 --- a/examples/quickstart-pandas/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -flwr>=1.0, <2.0 -flwr-datasets[vision]>=0.0.2, <1.0.0 -numpy==1.23.2 -pandas==2.0.0 diff --git a/examples/quickstart-pandas/run.sh b/examples/quickstart-pandas/run.sh deleted file mode 100755 index 2ae1e582b8cf..000000000000 --- a/examples/quickstart-pandas/run.sh +++ /dev/null @@ -1,13 +0,0 @@ -echo "Starting server" -python server.py & -sleep 3 # Sleep for 3s to give the server enough time to start - -for i in `seq 0 1`; do - echo "Starting client $i" - python client.py --partition-id ${i} & -done - -# This will allow you to use CTRL+C to stop all background processes -trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM -# Wait for all background processes to complete -wait diff --git a/examples/quickstart-pandas/server.py b/examples/quickstart-pandas/server.py deleted file mode 100644 index af4c2a796788..000000000000 --- a/examples/quickstart-pandas/server.py +++ /dev/null @@ -1,82 +0,0 @@ -from typing import Dict, List, Optional, Tuple, Union - -import numpy as np - -import flwr as fl -from flwr.common import ( - EvaluateIns, - EvaluateRes, - FitIns, - FitRes, - Parameters, - Scalar, - ndarrays_to_parameters, - parameters_to_ndarrays, -) -from flwr.server.client_manager import ClientManager -from flwr.server.client_proxy import ClientProxy -from flwr.server.strategy import Strategy - - -class FedAnalytics(Strategy): - def initialize_parameters( - self, client_manager: Optional[ClientManager] = None - ) -> Optional[Parameters]: - return None - - def configure_fit( - self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: - config = {} - fit_ins = FitIns(parameters, config) - clients = client_manager.sample(num_clients=2, min_num_clients=2) - return [(client, fit_ins) for client in clients] - - def aggregate_fit( - self, - server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: - # Get results from fit - # Convert results - values_aggregated = [ - (parameters_to_ndarrays(fit_res.parameters)) for _, fit_res in results - ] - length_agg_hist = 0 - width_agg_hist = 0 - for val in values_aggregated: - length_agg_hist += val[0] - width_agg_hist += val[1] - - ndarr = np.concatenate( - (["Length:"], length_agg_hist, ["Width:"], width_agg_hist) - ) - return ndarrays_to_parameters(ndarr), {} - - def evaluate( - self, server_round: int, parameters: Parameters - ) -> Optional[Tuple[float, Dict[str, Scalar]]]: - agg_hist = [arr.item() for arr in parameters_to_ndarrays(parameters)] - return 0, {"Aggregated histograms": agg_hist} - - def configure_evaluate( - self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, EvaluateIns]]: - pass - - def aggregate_evaluate( - self, - server_round: int, - results: List[Tuple[ClientProxy, EvaluateRes]], - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], - ) -> Tuple[Optional[float], Dict[str, Scalar]]: - pass - - -# Start Flower server -fl.server.start_server( - server_address="0.0.0.0:8080", - config=fl.server.ServerConfig(num_rounds=1), - strategy=FedAnalytics(), -) diff --git a/examples/quickstart-pytorch-lightning/.gitignore b/examples/quickstart-pytorch-lightning/.gitignore index 2e0f6a4fa61f..3d38bd5e5f3e 100644 --- a/examples/quickstart-pytorch-lightning/.gitignore +++ b/examples/quickstart-pytorch-lightning/.gitignore @@ -1,2 +1 @@ lightning_logs -MNIST diff --git a/examples/quickstart-pytorch-lightning/README.md b/examples/quickstart-pytorch-lightning/README.md index fb29c7e9e9ea..0aa34db9af75 100644 --- a/examples/quickstart-pytorch-lightning/README.md +++ b/examples/quickstart-pytorch-lightning/README.md @@ -1,76 +1,61 @@ -# Flower Example using PyTorch Lightning +--- +tags: [quickstart, vision, fds] +dataset: [MNIST] +framework: [lightning] +--- -This introductory example to Flower uses PyTorch, but deep knowledge of PyTorch Lightning is not necessarily required to run the example. However, it will help you understand how to adapt Flower to your use case. Running this example in itself is quite easy. This example uses [Flower Datasets](https://flower.ai/docs/datasets/) to download, partition and preprocess the MNIST dataset. +# Federated Learning with PyTorch Lightning and Flower (Quickstart Example) + +This introductory example to Flower uses PyTorch Lightning, but deep knowledge of PyTorch Lightning is not necessarily required to run the example. However, it will help you understand how to adapt Flower to your use case. Running this example in itself is quite easy. This example uses [Flower Datasets](https://flower.ai/docs/datasets/) to download, partition and preprocess the MNIST dataset. The model being federated is a lightweight AutoEncoder as presented in [Lightning in 15 minutes](https://lightning.ai/docs/pytorch/stable/starter/introduction.html) tutorial. ## Project Setup Start by cloning the example project. We prepared a single-line command that you can copy into your shell which will checkout the example for you: ```shell -git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/quickstart-pytorch-lightning . && rm -rf flower && cd quickstart-pytorch-lightning +git clone --depth=1 https://github.com/adap/flower.git _tmp \ + && mv _tmp/examples/quickstart-pytorch-lightning . \ + && rm -rf _tmp && cd quickstart-pytorch-lightning ``` This will create a new directory called `quickstart-pytorch-lightning` containing the following files: ```shell --- pyproject.toml --- requirements.txt --- client.py # client-side code --- server.py # server-side code (including the strategy) --- README.md --- run.sh # runs server, then two clients --- mnist.py # run a centralised version of this example +quickstart-pytorch-lightning +├── pytorchlightning_example +│ ├── __init__.py +│ ├── client_app.py # Defines your ClientApp +│ ├── server_app.py # Defines your ServerApp +│ └── task.py # Defines your model, training and data loading +├── pyproject.toml # Project metadata like dependencies and configs +└── README.md ``` -### Installing Dependencies - -Project dependencies (such as `torch` and `flwr`) are defined in `pyproject.toml` and `requirements.txt`. We recommend [Poetry](https://python-poetry.org/docs/) to install those dependencies and manage your virtual environment ([Poetry installation](https://python-poetry.org/docs/#installation)) or [pip](https://pip.pypa.io/en/latest/development/), but feel free to use a different way of installing dependencies and managing virtual environments if you have other preferences. +# Install dependencies and project -#### Poetry +Install the dependencies defined in `pyproject.toml` as well as the `pytorchlightning_example` package. -```shell -poetry install -poetry shell +```bash +pip install -e . ``` -Poetry will install all your dependencies in a newly created virtual environment. To verify that everything works correctly you can run the following command: - -```shell -poetry run python -c "import flwr" -``` - -If you don't see any errors you're good to go! - -#### pip - -Write the command below in your terminal to install the dependencies according to the configuration file requirements.txt. +## Run the Example -```shell -pip install -r requirements.txt -``` - -## Run Federated Learning with PyTorch and Flower +You can run your Flower project in both _simulation_ and _deployment_ mode without making changes to the code. If you are starting with Flower, we recommend you using the _simulation_ mode as it requires fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine. -Afterwards you are ready to start the Flower server as well as the clients. You can simply start the server in a terminal as follows: +### Run with the Simulation Engine -```shell -python server.py +```bash +flwr run . ``` -Now you are ready to start the Flower clients which will participate in the learning. We need to specify the partition id to -use different partitions of the data on different nodes. To do so simply open two more terminal windows and run the -following commands. - -Start client 1 in the first terminal: +You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: -```shell -python client.py --partition-id 0 +```bash +flwr run . --run-config "num-server-rounds=5 max-epochs=2" ``` -Start client 2 in the second terminal: - -```shell -python client.py --partition-id 1 -``` +### Run with the Deployment Engine -You will see that PyTorch is starting a federated training. Look at the [code](https://github.com/adap/flower/tree/main/examples/quickstart-pytorch) for a detailed explanation. +> \[!NOTE\] +> An update to this example will show how to run this Flower application with the Deployment Engine and TLS certificates, or with Docker. diff --git a/examples/quickstart-pytorch-lightning/client.py b/examples/quickstart-pytorch-lightning/client.py deleted file mode 100644 index 6e21259cc492..000000000000 --- a/examples/quickstart-pytorch-lightning/client.py +++ /dev/null @@ -1,80 +0,0 @@ -import argparse -from collections import OrderedDict - -import pytorch_lightning as pl -import torch -from datasets.utils.logging import disable_progress_bar - -import flwr as fl -import mnist - -disable_progress_bar() - - -class FlowerClient(fl.client.NumPyClient): - def __init__(self, model, train_loader, val_loader, test_loader): - self.model = model - self.train_loader = train_loader - self.val_loader = val_loader - self.test_loader = test_loader - - def get_parameters(self, config): - encoder_params = _get_parameters(self.model.encoder) - decoder_params = _get_parameters(self.model.decoder) - return encoder_params + decoder_params - - def set_parameters(self, parameters): - _set_parameters(self.model.encoder, parameters[:4]) - _set_parameters(self.model.decoder, parameters[4:]) - - def fit(self, parameters, config): - self.set_parameters(parameters) - - trainer = pl.Trainer(max_epochs=1) - trainer.fit(self.model, self.train_loader, self.val_loader) - - return self.get_parameters(config={}), 55000, {} - - def evaluate(self, parameters, config): - self.set_parameters(parameters) - - trainer = pl.Trainer() - results = trainer.test(self.model, self.test_loader) - loss = results[0]["test_loss"] - - return loss, 10000, {"loss": loss} - - -def _get_parameters(model): - return [val.cpu().numpy() for _, val in model.state_dict().items()] - - -def _set_parameters(model, parameters): - params_dict = zip(model.state_dict().keys(), parameters) - state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) - model.load_state_dict(state_dict, strict=True) - - -def main() -> None: - parser = argparse.ArgumentParser(description="Flower") - parser.add_argument( - "--partition-id", - type=int, - choices=range(0, 10), - required=True, - help="Specifies the artificial data partition", - ) - args = parser.parse_args() - partition_id = args.partition_id - - # Model and data - model = mnist.LitAutoEncoder() - train_loader, val_loader, test_loader = mnist.load_data(partition_id) - - # Flower client - client = FlowerClient(model, train_loader, val_loader, test_loader).to_client() - fl.client.start_client(server_address="127.0.0.1:8080", client=client) - - -if __name__ == "__main__": - main() diff --git a/examples/quickstart-pytorch-lightning/pyproject.toml b/examples/quickstart-pytorch-lightning/pyproject.toml index a09aaa3d65b5..e305d1ca75e8 100644 --- a/examples/quickstart-pytorch-lightning/pyproject.toml +++ b/examples/quickstart-pytorch-lightning/pyproject.toml @@ -1,17 +1,37 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry] -name = "quickstart-pytorch-lightning" -version = "0.1.0" -description = "Federated Learning Quickstart with Flower and PyTorch Lightning" -authors = ["The Flower Authors "] +[project] +name = "pytorchlightning_example" +version = "1.0.0" +description = "Federated Learning with PyTorch Lightning and Flower (Quickstart Example)" +license = "Apache-2.0" +dependencies = [ + "flwr[simulation]>=1.12.0", + "flwr-datasets[vision]>=0.3.0", + "pytorch-lightning<2.0.0; sys_platform == 'darwin'", + "pytorch-lightning==1.6.0; sys_platform != 'darwin'", + "torch==1.13.1", + "torchvision==0.14.1", +] -[tool.poetry.dependencies] -python = "^3.8" -flwr = ">=1.0,<2.0" -# flwr = { path = "../../", develop = true } # Development -flwr-datasets = { extras = ["vision"], version = ">=0.0.2,<1.0.0" } -pytorch-lightning = "1.6.0" -torchvision = "0.14.1" +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "pytorchlightning_example.server_app:app" +clientapp = "pytorchlightning_example.client_app:app" + +[tool.flwr.app.config] +num-server-rounds = 3 +max-epochs = 1 + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 4 diff --git a/examples/quickstart-pytorch-lightning/pytorchlightning_example/__init__.py b/examples/quickstart-pytorch-lightning/pytorchlightning_example/__init__.py new file mode 100644 index 000000000000..d08f79080230 --- /dev/null +++ b/examples/quickstart-pytorch-lightning/pytorchlightning_example/__init__.py @@ -0,0 +1 @@ +"""pytorchlightning_example: A Flower / PyTorch Lightning app.""" diff --git a/examples/quickstart-pytorch-lightning/pytorchlightning_example/client_app.py b/examples/quickstart-pytorch-lightning/pytorchlightning_example/client_app.py new file mode 100644 index 000000000000..394a98b1ee71 --- /dev/null +++ b/examples/quickstart-pytorch-lightning/pytorchlightning_example/client_app.py @@ -0,0 +1,59 @@ +"""pytorchlightning_example: A Flower / PyTorch Lightning app.""" + +import pytorch_lightning as pl +from datasets.utils.logging import disable_progress_bar +from flwr.client import Client, ClientApp, NumPyClient +from flwr.common import Context + +disable_progress_bar() + +from pytorchlightning_example.task import ( + LitAutoEncoder, + get_parameters, + load_data, + set_parameters, +) + + +class FlowerClient(NumPyClient): + def __init__(self, train_loader, val_loader, test_loader, max_epochs): + self.model = LitAutoEncoder() + self.train_loader = train_loader + self.val_loader = val_loader + self.test_loader = test_loader + self.max_epochs = max_epochs + + def fit(self, parameters, config): + """Train the model with data of this client.""" + set_parameters(self.model, parameters) + + trainer = pl.Trainer(max_epochs=self.max_epochs, enable_progress_bar=False) + trainer.fit(self.model, self.train_loader, self.val_loader) + + return get_parameters(self.model), len(self.train_loader.dataset), {} + + def evaluate(self, parameters, config): + """Evaluate the model on the data this client has.""" + set_parameters(self.model, parameters) + + trainer = pl.Trainer(enable_progress_bar=False) + results = trainer.test(self.model, self.test_loader) + loss = results[0]["test_loss"] + + return loss, len(self.test_loader.dataset), {} + + +def client_fn(context: Context) -> Client: + """Construct a Client that will be run in a ClientApp.""" + + # Read the node_config to fetch data partition associated to this node + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + train_loader, val_loader, test_loader = load_data(partition_id, num_partitions) + + # Read run_config to fetch hyperparameters relevant to this run + max_epochs = context.run_config["max-epochs"] + return FlowerClient(train_loader, val_loader, test_loader, max_epochs).to_client() + + +app = ClientApp(client_fn=client_fn) diff --git a/examples/quickstart-pytorch-lightning/pytorchlightning_example/server_app.py b/examples/quickstart-pytorch-lightning/pytorchlightning_example/server_app.py new file mode 100644 index 000000000000..8d0f2266ab2f --- /dev/null +++ b/examples/quickstart-pytorch-lightning/pytorchlightning_example/server_app.py @@ -0,0 +1,30 @@ +"""pytorchlightning_example: A Flower / PyTorch Lightning app.""" + +from flwr.common import Context, ndarrays_to_parameters +from flwr.server import ServerApp, ServerAppComponents, ServerConfig +from flwr.server.strategy import FedAvg +from pytorchlightning_example.task import LitAutoEncoder, get_parameters + + +def server_fn(context: Context) -> ServerAppComponents: + """Construct components for ServerApp.""" + + # Convert model parameters to flwr.common.Parameters + ndarrays = get_parameters(LitAutoEncoder()) + global_model_init = ndarrays_to_parameters(ndarrays) + + # Define strategy + strategy = FedAvg( + fraction_fit=0.5, + fraction_evaluate=0.5, + initial_parameters=global_model_init, + ) + + # Construct ServerConfig + num_rounds = context.run_config["num-server-rounds"] + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(strategy=strategy, config=config) + + +app = ServerApp(server_fn=server_fn) diff --git a/examples/quickstart-pytorch-lightning/mnist.py b/examples/quickstart-pytorch-lightning/pytorchlightning_example/task.py similarity index 56% rename from examples/quickstart-pytorch-lightning/mnist.py rename to examples/quickstart-pytorch-lightning/pytorchlightning_example/task.py index 2f6100fe94cc..5ec61859424d 100644 --- a/examples/quickstart-pytorch-lightning/mnist.py +++ b/examples/quickstart-pytorch-lightning/pytorchlightning_example/task.py @@ -1,19 +1,24 @@ -"""Adapted from the PyTorch Lightning quickstart example. +"""pytorchlightning_example: A Flower / PyTorch Lightning app.""" -Source: pytorchlightning.ai (2021/02/04) -""" +import logging +from collections import OrderedDict +from typing import Any -from flwr_datasets import FederatedDataset import pytorch_lightning as pl import torch +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner from torch import nn from torch.nn import functional as F +from torch.optim.adam import Adam from torch.utils.data import DataLoader from torchvision import transforms +logging.getLogger("pytorch_lightning").setLevel(logging.WARNING) + class LitAutoEncoder(pl.LightningModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.encoder = nn.Sequential( nn.Linear(28 * 28, 64), @@ -26,16 +31,16 @@ def __init__(self): nn.Linear(64, 28 * 28), ) - def forward(self, x): + def forward(self, x) -> Any: embedding = self.encoder(x) return embedding - def configure_optimizers(self): + def configure_optimizers(self) -> Adam: optimizer = torch.optim.Adam(self.parameters(), lr=1e-3) return optimizer - def training_step(self, train_batch, batch_idx): - x, y = train_batch + def training_step(self, train_batch, batch_idx) -> torch.Tensor: + x = train_batch["image"] x = x.view(x.size(0), -1) z = self.encoder(x) x_hat = self.decoder(z) @@ -43,14 +48,14 @@ def training_step(self, train_batch, batch_idx): self.log("train_loss", loss) return loss - def validation_step(self, batch, batch_idx): + def validation_step(self, batch, batch_idx) -> None: self._evaluate(batch, "val") - def test_step(self, batch, batch_idx): + def test_step(self, batch, batch_idx) -> None: self._evaluate(batch, "test") - def _evaluate(self, batch, stage=None): - x, y = batch + def _evaluate(self, batch, stage=None) -> None: + x = batch["image"] x = x.view(x.size(0), -1) z = self.encoder(x) x_hat = self.decoder(z) @@ -59,15 +64,14 @@ def _evaluate(self, batch, stage=None): self.log(f"{stage}_loss", loss, prog_bar=True) -def collate_fn(batch): - """Change the dictionary to tuple to keep the exact dataloader behavior.""" - images = [item["image"] for item in batch] - labels = [item["label"] for item in batch] +def get_parameters(model): + return [val.cpu().numpy() for _, val in model.state_dict().items()] - images_tensor = torch.stack(images) - labels_tensor = torch.tensor(labels) - return images_tensor, labels_tensor +def set_parameters(model, parameters): + params_dict = zip(model.state_dict().keys(), parameters) + state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) + model.load_state_dict(state_dict, strict=True) def apply_transforms(batch): @@ -76,9 +80,19 @@ def apply_transforms(batch): return batch -def load_data(partition): - fds = FederatedDataset(dataset="mnist", partitioners={"train": 10}) - partition = fds.load_partition(partition, "train") +fds = None # Cache FederatedDataset + + +def load_data(partition_id, num_partitions): + # Only initialize `FederatedDataset` once + global fds + if fds is None: + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="ylecun/mnist", + partitioners={"train": partitioner}, + ) + partition = fds.load_partition(partition_id, "train") partition = partition.with_transform(apply_transforms) # 20 % for on federated evaluation @@ -91,37 +105,12 @@ def load_data(partition): partition_train_valid["train"], batch_size=32, shuffle=True, - collate_fn=collate_fn, - num_workers=1, + num_workers=2, ) valloader = DataLoader( partition_train_valid["test"], batch_size=32, - collate_fn=collate_fn, - num_workers=1, - ) - testloader = DataLoader( - partition_full["test"], batch_size=32, collate_fn=collate_fn, num_workers=1 + num_workers=2, ) + testloader = DataLoader(partition_full["test"], batch_size=32, num_workers=1) return trainloader, valloader, testloader - - -def main() -> None: - """Centralized training.""" - - # Load data - train_loader, val_loader, test_loader = load_data(0) - - # Load model - model = LitAutoEncoder() - - # Train - trainer = pl.Trainer(max_epochs=5) - trainer.fit(model, train_loader, val_loader) - - # Test - trainer.test(model, test_loader) - - -if __name__ == "__main__": - main() diff --git a/examples/quickstart-pytorch-lightning/requirements.txt b/examples/quickstart-pytorch-lightning/requirements.txt deleted file mode 100644 index 6530dcc8c52c..000000000000 --- a/examples/quickstart-pytorch-lightning/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -flwr>=1.0, <2.0 -flwr-datasets[vision]>=0.0.2, <1.0.0 -pytorch_lightning>=1.4.7 -torchvision==0.14.1 diff --git a/examples/quickstart-pytorch-lightning/run.sh b/examples/quickstart-pytorch-lightning/run.sh deleted file mode 100755 index 62a1dac199bd..000000000000 --- a/examples/quickstart-pytorch-lightning/run.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash - -echo "Starting server" -python server.py & -sleep 3 # Sleep for 3s to give the server enough time to start - -for i in $(seq 0 1); do - echo "Starting client $i" - python client.py --partition-id "${i}" & -done - -# This will allow you to use CTRL+C to stop all background processes -trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM -# Wait for all background processes to complete -wait diff --git a/examples/quickstart-pytorch-lightning/server.py b/examples/quickstart-pytorch-lightning/server.py deleted file mode 100644 index a104a1fffd26..000000000000 --- a/examples/quickstart-pytorch-lightning/server.py +++ /dev/null @@ -1,20 +0,0 @@ -import flwr as fl - - -def main() -> None: - # Define strategy - strategy = fl.server.strategy.FedAvg( - fraction_fit=0.5, - fraction_evaluate=0.5, - ) - - # Start Flower server for three rounds of federated learning - fl.server.start_server( - server_address="0.0.0.0:8080", - config=fl.server.ServerConfig(num_rounds=3), - strategy=strategy, - ) - - -if __name__ == "__main__": - main() diff --git a/examples/quickstart-pytorch/README.md b/examples/quickstart-pytorch/README.md index 93d6a593f362..d07f83a7ea85 100644 --- a/examples/quickstart-pytorch/README.md +++ b/examples/quickstart-pytorch/README.md @@ -1,91 +1,67 @@ -# Flower Example using PyTorch +--- +tags: [quickstart, vision, fds] +dataset: [CIFAR-10] +framework: [torch, torchvision] +--- -This introductory example to Flower uses PyTorch, but deep knowledge of PyTorch is not necessarily required to run the example. However, it will help you understand how to adapt Flower to your use case. Running this example in itself is quite easy. This example uses [Flower Datasets](https://flower.ai/docs/datasets/) to download, partition and preprocess the CIFAR-10 dataset. - -## Project Setup - -Start by cloning the example project. We prepared a single-line command that you can copy into your shell which will checkout the example for you: - -```shell -git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/quickstart-pytorch . && rm -rf flower && cd quickstart-pytorch -``` - -This will create a new directory called `quickstart-pytorch` containing the following files: +# Federated Learning with PyTorch and Flower (Quickstart Example) -```shell --- pyproject.toml --- client.py --- server.py --- README.md -``` - -### Installing Dependencies +This introductory example to Flower uses PyTorch, but deep knowledge of PyTorch is not necessarily required to run the example. However, it will help you understand how to adapt Flower to your use case. Running this example in itself is quite easy. This example uses [Flower Datasets](https://flower.ai/docs/datasets/) to download, partition and preprocess the CIFAR-10 dataset. -Project dependencies (such as `torch` and `flwr`) are defined in `pyproject.toml`. You can install the dependencies by invoking `pip`: +## Set up the project -```shell -# From a new python environment, run: -pip install . -``` +### Clone the project -Then, to verify that everything works correctly you can run the following command: +Start by cloning the example project: ```shell -python3 -c "import flwr" +git clone --depth=1 https://github.com/adap/flower.git _tmp \ + && mv _tmp/examples/quickstart-pytorch . \ + && rm -rf _tmp \ + && cd quickstart-pytorch ``` -If you don't see any errors you're good to go! - -______________________________________________________________________ - -## Run Federated Learning with PyTorch and Flower - -Afterwards you are ready to start the Flower server as well as the clients. You can simply start the server in a terminal as follows: +This will create a new directory called `quickstart-pytorch` with the following structure: ```shell -python3 server.py +quickstart-pytorch +├── pytorchexample +│ ├── __init__.py +│ ├── client_app.py # Defines your ClientApp +│ ├── server_app.py # Defines your ServerApp +│ └── task.py # Defines your model, training and data loading +├── pyproject.toml # Project metadata like dependencies and configs +└── README.md ``` -Now you are ready to start the Flower clients which will participate in the learning. We need to specify the partition id to -use different partitions of the data on different nodes. To do so simply open two more terminal windows and run the -following commands. - -Start client 1 in the first terminal: - -```shell -python3 client.py --partition-id 0 -``` +### Install dependencies and project -Start client 2 in the second terminal: +Install the dependencies defined in `pyproject.toml` as well as the `pytorchexample` package. -```shell -python3 client.py --partition-id 1 +```bash +pip install -e . ``` -You will see that PyTorch is starting a federated training. Look at the [code](https://github.com/adap/flower/tree/main/examples/quickstart-pytorch) for a detailed explanation. - -______________________________________________________________________ +## Run the project -## Run Federated Learning with PyTorch and `Flower Next` +You can run your Flower project in both _simulation_ and _deployment_ mode without making changes to the code. If you are starting with Flower, we recommend you using the _simulation_ mode as it requires fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine. -### 1. Start the long-running Flower server (SuperLink) +### Run with the Simulation Engine ```bash -flower-superlink --insecure +flwr run . ``` -### 2. Start the long-running Flower clients (SuperNodes) - -Start 2 Flower `SuperNodes` in 2 separate terminal windows, using: +You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: ```bash -flower-client-app client:app --insecure +flwr run . --run-config "num-server-rounds=5 learning-rate=0.05" ``` -### 3. Run the Flower App +> \[!TIP\] +> For a more detailed walk-through check our [quickstart PyTorch tutorial](https://flower.ai/docs/framework/tutorial-quickstart-pytorch.html) -With both the long-running server (SuperLink) and two clients (SuperNode) up and running, we can now run the actual Flower App: +### Run with the Deployment Engine -```bash -flower-server-app server:app --insecure -``` +> \[!NOTE\] +> An update to this example will show how to run this Flower application with the Deployment Engine and TLS certificates, or with Docker. diff --git a/examples/quickstart-pytorch/client.py b/examples/quickstart-pytorch/client.py deleted file mode 100644 index 2452db819e1d..000000000000 --- a/examples/quickstart-pytorch/client.py +++ /dev/null @@ -1,152 +0,0 @@ -import argparse -import warnings -from collections import OrderedDict - -from flwr.client import NumPyClient, ClientApp -from flwr_datasets import FederatedDataset -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.utils.data import DataLoader -from torchvision.transforms import Compose, Normalize, ToTensor -from tqdm import tqdm - - -# ############################################################################# -# 1. Regular PyTorch pipeline: nn.Module, train, test, and DataLoader -# ############################################################################# - -warnings.filterwarnings("ignore", category=UserWarning) -DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - - -class Net(nn.Module): - """Model (simple CNN adapted from 'PyTorch: A 60 Minute Blitz')""" - - def __init__(self) -> None: - super(Net, self).__init__() - self.conv1 = nn.Conv2d(3, 6, 5) - self.pool = nn.MaxPool2d(2, 2) - self.conv2 = nn.Conv2d(6, 16, 5) - self.fc1 = nn.Linear(16 * 5 * 5, 120) - self.fc2 = nn.Linear(120, 84) - self.fc3 = nn.Linear(84, 10) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = self.pool(F.relu(self.conv1(x))) - x = self.pool(F.relu(self.conv2(x))) - x = x.view(-1, 16 * 5 * 5) - x = F.relu(self.fc1(x)) - x = F.relu(self.fc2(x)) - return self.fc3(x) - - -def train(net, trainloader, epochs): - """Train the model on the training set.""" - criterion = torch.nn.CrossEntropyLoss() - optimizer = torch.optim.SGD(net.parameters(), lr=0.001, momentum=0.9) - for _ in range(epochs): - for batch in tqdm(trainloader, "Training"): - images = batch["img"] - labels = batch["label"] - optimizer.zero_grad() - criterion(net(images.to(DEVICE)), labels.to(DEVICE)).backward() - optimizer.step() - - -def test(net, testloader): - """Validate the model on the test set.""" - criterion = torch.nn.CrossEntropyLoss() - correct, loss = 0, 0.0 - with torch.no_grad(): - for batch in tqdm(testloader, "Testing"): - images = batch["img"].to(DEVICE) - labels = batch["label"].to(DEVICE) - outputs = net(images) - loss += criterion(outputs, labels).item() - correct += (torch.max(outputs.data, 1)[1] == labels).sum().item() - accuracy = correct / len(testloader.dataset) - return loss, accuracy - - -def load_data(partition_id): - """Load partition CIFAR10 data.""" - fds = FederatedDataset(dataset="cifar10", partitioners={"train": 2}) - partition = fds.load_partition(partition_id) - # Divide data on each node: 80% train, 20% test - partition_train_test = partition.train_test_split(test_size=0.2, seed=42) - pytorch_transforms = Compose( - [ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] - ) - - def apply_transforms(batch): - """Apply transforms to the partition from FederatedDataset.""" - batch["img"] = [pytorch_transforms(img) for img in batch["img"]] - return batch - - partition_train_test = partition_train_test.with_transform(apply_transforms) - trainloader = DataLoader(partition_train_test["train"], batch_size=32, shuffle=True) - testloader = DataLoader(partition_train_test["test"], batch_size=32) - return trainloader, testloader - - -# ############################################################################# -# 2. Federation of the pipeline with Flower -# ############################################################################# - -# Get partition id -parser = argparse.ArgumentParser(description="Flower") -parser.add_argument( - "--partition-id", - choices=[0, 1], - default=0, - type=int, - help="Partition of the dataset divided into 2 iid partitions created artificially.", -) -partition_id = parser.parse_known_args()[0].partition_id - -# Load model and data (simple CNN, CIFAR-10) -net = Net().to(DEVICE) -trainloader, testloader = load_data(partition_id=partition_id) - - -# Define Flower client -class FlowerClient(NumPyClient): - def get_parameters(self, config): - return [val.cpu().numpy() for _, val in net.state_dict().items()] - - def set_parameters(self, parameters): - params_dict = zip(net.state_dict().keys(), parameters) - state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) - net.load_state_dict(state_dict, strict=True) - - def fit(self, parameters, config): - self.set_parameters(parameters) - train(net, trainloader, epochs=1) - return self.get_parameters(config={}), len(trainloader.dataset), {} - - def evaluate(self, parameters, config): - self.set_parameters(parameters) - loss, accuracy = test(net, testloader) - return loss, len(testloader.dataset), {"accuracy": accuracy} - - -def client_fn(cid: str): - """Create and return an instance of Flower `Client`.""" - return FlowerClient().to_client() - - -# Flower ClientApp -app = ClientApp( - client_fn=client_fn, -) - - -# Legacy mode -if __name__ == "__main__": - from flwr.client import start_client - - start_client( - server_address="127.0.0.1:8080", - client=FlowerClient().to_client(), - ) diff --git a/examples/quickstart-pytorch/pyproject.toml b/examples/quickstart-pytorch/pyproject.toml index 89a5cd16d7de..fa086d18880d 100644 --- a/examples/quickstart-pytorch/pyproject.toml +++ b/examples/quickstart-pytorch/pyproject.toml @@ -3,19 +3,36 @@ requires = ["hatchling"] build-backend = "hatchling.build" [project] -name = "quickstart-pytorch" -version = "0.1.0" -description = "PyTorch Federated Learning Quickstart with Flower" -authors = [ - { name = "The Flower Authors", email = "hello@flower.ai" }, -] +name = "pytorchexample" +version = "1.0.0" +description = "Federated Learning with PyTorch and Flower (Quickstart Example)" +license = "Apache-2.0" dependencies = [ - "flwr>=1.8.0,<2.0", - "flwr-datasets[vision]>=0.0.2,<1.0.0", - "torch==2.1.1", - "torchvision==0.16.1", - "tqdm==4.66.3" + "flwr[simulation]>=1.12.0", + "flwr-datasets[vision]>=0.3.0", + "torch==2.2.1", + "torchvision==0.17.1", ] [tool.hatch.build.targets.wheel] packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "pytorchexample.server_app:app" +clientapp = "pytorchexample.client_app:app" + +[tool.flwr.app.config] +num-server-rounds = 3 +fraction-evaluate = 0.5 +local-epochs = 1 +learning-rate = 0.1 +batch-size = 32 + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 10 diff --git a/examples/quickstart-pytorch/pytorchexample/__init__.py b/examples/quickstart-pytorch/pytorchexample/__init__.py new file mode 100644 index 000000000000..d29a98ebc6ae --- /dev/null +++ b/examples/quickstart-pytorch/pytorchexample/__init__.py @@ -0,0 +1 @@ +"""pytorchexample.""" diff --git a/examples/quickstart-pytorch/pytorchexample/client_app.py b/examples/quickstart-pytorch/pytorchexample/client_app.py new file mode 100644 index 000000000000..1052741710db --- /dev/null +++ b/examples/quickstart-pytorch/pytorchexample/client_app.py @@ -0,0 +1,58 @@ +"""pytorchexample: A Flower / PyTorch app.""" + +import torch +from flwr.client import ClientApp, NumPyClient +from flwr.common import Context + +from pytorchexample.task import Net, get_weights, load_data, set_weights, test, train + + +# Define Flower Client +class FlowerClient(NumPyClient): + def __init__(self, trainloader, valloader, local_epochs, learning_rate): + self.net = Net() + self.trainloader = trainloader + self.valloader = valloader + self.local_epochs = local_epochs + self.lr = learning_rate + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + + def fit(self, parameters, config): + """Train the model with data of this client.""" + set_weights(self.net, parameters) + results = train( + self.net, + self.trainloader, + self.valloader, + self.local_epochs, + self.lr, + self.device, + ) + return get_weights(self.net), len(self.trainloader.dataset), results + + def evaluate(self, parameters, config): + """Evaluate the model on the data this client has.""" + set_weights(self.net, parameters) + loss, accuracy = test(self.net, self.valloader, self.device) + return loss, len(self.valloader.dataset), {"accuracy": accuracy} + + +def client_fn(context: Context): + """Construct a Client that will be run in a ClientApp.""" + + # Read the node_config to fetch data partition associated to this node + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + + # Read run_config to fetch hyperparameters relevant to this run + batch_size = context.run_config["batch-size"] + trainloader, valloader = load_data(partition_id, num_partitions, batch_size) + local_epochs = context.run_config["local-epochs"] + learning_rate = context.run_config["learning-rate"] + + # Return Client instance + return FlowerClient(trainloader, valloader, local_epochs, learning_rate).to_client() + + +# Flower ClientApp +app = ClientApp(client_fn) diff --git a/examples/quickstart-pytorch/pytorchexample/server_app.py b/examples/quickstart-pytorch/pytorchexample/server_app.py new file mode 100644 index 000000000000..834725976d1a --- /dev/null +++ b/examples/quickstart-pytorch/pytorchexample/server_app.py @@ -0,0 +1,46 @@ +"""pytorchexample: A Flower / PyTorch app.""" + +from typing import List, Tuple + +from flwr.common import Context, Metrics, ndarrays_to_parameters +from flwr.server import ServerApp, ServerAppComponents, ServerConfig +from flwr.server.strategy import FedAvg + +from pytorchexample.task import Net, get_weights + + +# Define metric aggregation function +def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: + # Multiply accuracy of each client by number of examples used + accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] + examples = [num_examples for num_examples, _ in metrics] + + # Aggregate and return custom metric (weighted average) + return {"accuracy": sum(accuracies) / sum(examples)} + + +def server_fn(context: Context): + """Construct components that set the ServerApp behaviour.""" + + # Read from config + num_rounds = context.run_config["num-server-rounds"] + + # Initialize model parameters + ndarrays = get_weights(Net()) + parameters = ndarrays_to_parameters(ndarrays) + + # Define the strategy + strategy = FedAvg( + fraction_fit=1.0, + fraction_evaluate=context.run_config["fraction-evaluate"], + min_available_clients=2, + evaluate_metrics_aggregation_fn=weighted_average, + initial_parameters=parameters, + ) + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(strategy=strategy, config=config) + + +# Create ServerApp +app = ServerApp(server_fn=server_fn) diff --git a/examples/quickstart-pytorch/pytorchexample/task.py b/examples/quickstart-pytorch/pytorchexample/task.py new file mode 100644 index 000000000000..8e0808871616 --- /dev/null +++ b/examples/quickstart-pytorch/pytorchexample/task.py @@ -0,0 +1,114 @@ +"""pytorchexample: A Flower / PyTorch app.""" + +from collections import OrderedDict + +import torch +import torch.nn as nn +import torch.nn.functional as F +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner +from torch.utils.data import DataLoader +from torchvision.transforms import Compose, Normalize, ToTensor + + +class Net(nn.Module): + """Model (simple CNN adapted from 'PyTorch: A 60 Minute Blitz')""" + + def __init__(self): + super(Net, self).__init__() + self.conv1 = nn.Conv2d(3, 6, 5) + self.pool = nn.MaxPool2d(2, 2) + self.conv2 = nn.Conv2d(6, 16, 5) + self.fc1 = nn.Linear(16 * 5 * 5, 120) + self.fc2 = nn.Linear(120, 84) + self.fc3 = nn.Linear(84, 10) + + def forward(self, x): + x = self.pool(F.relu(self.conv1(x))) + x = self.pool(F.relu(self.conv2(x))) + x = x.view(-1, 16 * 5 * 5) + x = F.relu(self.fc1(x)) + x = F.relu(self.fc2(x)) + return self.fc3(x) + + +def get_weights(net): + return [val.cpu().numpy() for _, val in net.state_dict().items()] + + +def set_weights(net, parameters): + params_dict = zip(net.state_dict().keys(), parameters) + state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) + net.load_state_dict(state_dict, strict=True) + + +fds = None # Cache FederatedDataset + + +def load_data(partition_id: int, num_partitions: int, batch_size: int): + """Load partition CIFAR10 data.""" + # Only initialize `FederatedDataset` once + global fds + if fds is None: + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="uoft-cs/cifar10", + partitioners={"train": partitioner}, + ) + partition = fds.load_partition(partition_id) + # Divide data on each node: 80% train, 20% test + partition_train_test = partition.train_test_split(test_size=0.2, seed=42) + pytorch_transforms = Compose( + [ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] + ) + + def apply_transforms(batch): + """Apply transforms to the partition from FederatedDataset.""" + batch["img"] = [pytorch_transforms(img) for img in batch["img"]] + return batch + + partition_train_test = partition_train_test.with_transform(apply_transforms) + trainloader = DataLoader( + partition_train_test["train"], batch_size=batch_size, shuffle=True + ) + testloader = DataLoader(partition_train_test["test"], batch_size=batch_size) + return trainloader, testloader + + +def train(net, trainloader, valloader, epochs, learning_rate, device): + """Train the model on the training set.""" + net.to(device) # move model to GPU if available + criterion = torch.nn.CrossEntropyLoss().to(device) + optimizer = torch.optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9) + net.train() + for _ in range(epochs): + for batch in trainloader: + images = batch["img"] + labels = batch["label"] + optimizer.zero_grad() + criterion(net(images.to(device)), labels.to(device)).backward() + optimizer.step() + + val_loss, val_acc = test(net, valloader, device) + + results = { + "val_loss": val_loss, + "val_accuracy": val_acc, + } + return results + + +def test(net, testloader, device): + """Validate the model on the test set.""" + criterion = torch.nn.CrossEntropyLoss() + correct, loss = 0, 0.0 + with torch.no_grad(): + for batch in testloader: + images = batch["img"].to(device) + labels = batch["label"].to(device) + outputs = net(images) + loss += criterion(outputs, labels).item() + correct += (torch.max(outputs.data, 1)[1] == labels).sum().item() + accuracy = correct / len(testloader.dataset) + loss = loss / len(testloader) + return loss, accuracy diff --git a/examples/quickstart-pytorch/server.py b/examples/quickstart-pytorch/server.py deleted file mode 100644 index 4034703ca690..000000000000 --- a/examples/quickstart-pytorch/server.py +++ /dev/null @@ -1,41 +0,0 @@ -from typing import List, Tuple - -from flwr.server import ServerApp, ServerConfig -from flwr.server.strategy import FedAvg -from flwr.common import Metrics - - -# Define metric aggregation function -def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: - # Multiply accuracy of each client by number of examples used - accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] - examples = [num_examples for num_examples, _ in metrics] - - # Aggregate and return custom metric (weighted average) - return {"accuracy": sum(accuracies) / sum(examples)} - - -# Define strategy -strategy = FedAvg(evaluate_metrics_aggregation_fn=weighted_average) - - -# Define config -config = ServerConfig(num_rounds=3) - - -# Flower ServerApp -app = ServerApp( - config=config, - strategy=strategy, -) - - -# Legacy mode -if __name__ == "__main__": - from flwr.server import start_server - - start_server( - server_address="0.0.0.0:8080", - config=config, - strategy=strategy, - ) diff --git a/examples/quickstart-sklearn-tabular/README.md b/examples/quickstart-sklearn-tabular/README.md index a975a9392800..db8a6bfbfb7c 100644 --- a/examples/quickstart-sklearn-tabular/README.md +++ b/examples/quickstart-sklearn-tabular/README.md @@ -1,4 +1,10 @@ -# Flower Example using scikit-learn +--- +tags: [quickstart, tabular, fds] +dataset: [Iris] +framework: [scikit-learn] +--- + +# Federated Learning with scikit-learn and Flower (Quickstart Example) This example of Flower uses `scikit-learn`'s `LogisticRegression` model to train a federated learning system on "iris" (tabular) dataset. @@ -6,7 +12,9 @@ It will help you understand how to adapt Flower for use with `scikit-learn`. Running this example in itself is quite easy. This example uses [Flower Datasets](https://flower.ai/docs/datasets/) to download, partition and preprocess the dataset. -## Project Setup +## Set up the project + +### Clone the project Start by cloning the example project. We prepared a single-line command that you can copy into your shell which will checkout the example for you: @@ -14,64 +22,44 @@ Start by cloning the example project. We prepared a single-line command that you git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/quickstart-sklearn-tabular . && rm -rf flower && cd quickstart-sklearn-tabular ``` -This will create a new directory called `quickstart-sklearn-tabular` containing the following files: +This will create a new directory called `quickstart-sklearn-tabular` with the following structure: ```shell --- pyproject.toml --- requirements.txt --- client.py --- server.py --- utils.py --- README.md +quickstart-sklearn-tabular +├── sklearnexample +│ ├── __init__.py +│ ├── client_app.py # Defines your ClientApp +│ ├── server_app.py # Defines your ServerApp +│ └── task.py # Defines your model, training and data loading +├── pyproject.toml # Project metadata like dependencies and configs +└── README.md ``` -### Installing Dependencies - -Project dependencies (such as `scikit-learn` and `flwr`) are defined in `pyproject.toml` and `requirements.txt`. We recommend [Poetry](https://python-poetry.org/docs/) to install those dependencies and manage your virtual environment ([Poetry installation](https://python-poetry.org/docs/#installation)) or [pip](https://pip.pypa.io/en/latest/development/), but feel free to use a different way of installing dependencies and managing virtual environments if you have other preferences. +### Install dependencies and project -#### Poetry +Install the dependencies defined in `pyproject.toml` as well as the `sklearnexample` package. -```shell -poetry install -poetry shell +```bash +pip install -e . ``` -Poetry will install all your dependencies in a newly created virtual environment. To verify that everything works correctly you can run the following command: - -```shell -poetry run python3 -c "import flwr" -``` - -If you don't see any errors you're good to go! - -#### pip +## Run the project -Write the command below in your terminal to install the dependencies according to the configuration file requirements.txt. +You can run your Flower project in both _simulation_ and _deployment_ mode without making changes to the code. If you are starting with Flower, we recommend you using the _simulation_ mode as it requires fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine. -```shell -pip install -r requirements.txt -``` - -## Run Federated Learning with scikit-learn and Flower +### Run with the Simulation Engine -Afterwards you are ready to start the Flower server as well as the clients. You can simply start the server in a terminal as follows: - -```shell -poetry run python3 server.py +```bash +flwr run . ``` -Now you are ready to start the Flower clients which will participate in the learning. To do so simply open two more terminals and run the following command in each: +You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: -```shell -poetry run python3 client.py --partition-id 0 # partition-id should be any of {0,1,2} +```bash +flwr run . --run-config penalty="'l1'" ``` -Alternatively you can run all of it in one shell as follows: - -```shell -poetry run python3 server.py & -poetry run python3 client.py --partition-id 0 & -poetry run python3 client.py --partition-id 1 -``` +### Run with the Deployment Engine -You will see that Flower is starting a federated training. +> \[!NOTE\] +> An update to this example will show how to run this Flower application with the Deployment Engine and TLS certificates, or with Docker. diff --git a/examples/quickstart-sklearn-tabular/client.py b/examples/quickstart-sklearn-tabular/client.py deleted file mode 100644 index b7e3046c822d..000000000000 --- a/examples/quickstart-sklearn-tabular/client.py +++ /dev/null @@ -1,73 +0,0 @@ -import argparse -import warnings - -from sklearn.linear_model import LogisticRegression -from sklearn.metrics import log_loss - -import flwr as fl -import utils -from flwr_datasets import FederatedDataset - -if __name__ == "__main__": - N_CLIENTS = 3 - - parser = argparse.ArgumentParser(description="Flower") - parser.add_argument( - "--partition-id", - type=int, - choices=range(0, N_CLIENTS), - required=True, - help="Specifies the artificial data partition", - ) - args = parser.parse_args() - partition_id = args.partition_id - - # Load the partition data - fds = FederatedDataset(dataset="hitorilabs/iris", partitioners={"train": N_CLIENTS}) - - dataset = fds.load_partition(partition_id, "train").with_format("pandas")[:] - X = dataset[["petal_length", "petal_width", "sepal_length", "sepal_width"]] - y = dataset["species"] - unique_labels = fds.load_split("train").unique("species") - # Split the on edge data: 80% train, 20% test - X_train, X_test = X[: int(0.8 * len(X))], X[int(0.8 * len(X)) :] - y_train, y_test = y[: int(0.8 * len(y))], y[int(0.8 * len(y)) :] - - # Create LogisticRegression Model - model = LogisticRegression( - penalty="l2", - max_iter=1, # local epoch - warm_start=True, # prevent refreshing weights when fitting - ) - - # Setting initial parameters, akin to model.compile for keras models - utils.set_initial_params(model, n_features=X_train.shape[1], n_classes=3) - - # Define Flower client - class IrisClient(fl.client.NumPyClient): - def get_parameters(self, config): # type: ignore - return utils.get_model_parameters(model) - - def fit(self, parameters, config): # type: ignore - utils.set_model_params(model, parameters) - # Ignore convergence failure due to low local epochs - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - model.fit(X_train, y_train) - accuracy = model.score(X_train, y_train) - return ( - utils.get_model_parameters(model), - len(X_train), - {"train_accuracy": accuracy}, - ) - - def evaluate(self, parameters, config): # type: ignore - utils.set_model_params(model, parameters) - loss = log_loss(y_test, model.predict_proba(X_test), labels=unique_labels) - accuracy = model.score(X_test, y_test) - return loss, len(X_test), {"test_accuracy": accuracy} - - # Start Flower client - fl.client.start_client( - server_address="0.0.0.0:8080", client=IrisClient().to_client() - ) diff --git a/examples/quickstart-sklearn-tabular/pyproject.toml b/examples/quickstart-sklearn-tabular/pyproject.toml index 86eab5c38df0..4fc34ed58bb6 100644 --- a/examples/quickstart-sklearn-tabular/pyproject.toml +++ b/examples/quickstart-sklearn-tabular/pyproject.toml @@ -1,18 +1,35 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry] -name = "sklearn-mnist" -version = "0.1.0" -description = "Federated learning with scikit-learn and Flower" -authors = [ - "The Flower Authors ", - "Kaushik Amar Das ", +[project] +name = "sklearnexample" +version = "1.0.0" +description = "Federated Learning with scikit-learn and Flower (Quickstart Example)" +license = "Apache-2.0" +dependencies = [ + "flwr[simulation]>=1.12.0", + "flwr-datasets[vision]>=0.3.0", + "scikit-learn>=1.3.0", ] -[tool.poetry.dependencies] -python = "^3.8" -flwr = ">=1.0,<2.0" -flwr-datasets = { extras = ["vision"], version = ">=0.0.2,<1.0.0" } -scikit-learn = "^1.3.0" +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "sklearnexample.server_app:app" +clientapp = "sklearnexample.client_app:app" + +[tool.flwr.app.config] +penalty = "l2" +num-server-rounds = 25 +min-available-clients = 2 + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 3 diff --git a/examples/quickstart-sklearn-tabular/requirements.txt b/examples/quickstart-sklearn-tabular/requirements.txt deleted file mode 100644 index e0f15b31f3f7..000000000000 --- a/examples/quickstart-sklearn-tabular/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -flwr>=1.0, <2.0 -flwr-datasets[vision]>=0.0.2, <1.0.0 -scikit-learn>=1.3.0 diff --git a/examples/quickstart-sklearn-tabular/run.sh b/examples/quickstart-sklearn-tabular/run.sh deleted file mode 100755 index f770ca05f8f4..000000000000 --- a/examples/quickstart-sklearn-tabular/run.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -set -e -cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/ - -echo "Starting server" -python server.py & -sleep 3 # Sleep for 3s to give the server enough time to start - -for i in $(seq 0 1); do - echo "Starting client $i" - python client.py --partition-id "${i}" & -done - -# This will allow you to use CTRL+C to stop all background processes -trap 'trap - SIGTERM && kill -- -$$' SIGINT SIGTERM -# Wait for all background processes to complete -wait diff --git a/examples/quickstart-sklearn-tabular/server.py b/examples/quickstart-sklearn-tabular/server.py deleted file mode 100644 index 0c779c52a8d6..000000000000 --- a/examples/quickstart-sklearn-tabular/server.py +++ /dev/null @@ -1,19 +0,0 @@ -import flwr as fl -import utils -from sklearn.linear_model import LogisticRegression - - -# Start Flower server for five rounds of federated learning -if __name__ == "__main__": - model = LogisticRegression() - utils.set_initial_params(model, n_classes=3, n_features=4) - strategy = fl.server.strategy.FedAvg( - min_available_clients=2, - fit_metrics_aggregation_fn=utils.weighted_average, - evaluate_metrics_aggregation_fn=utils.weighted_average, - ) - fl.server.start_server( - server_address="0.0.0.0:8080", - strategy=strategy, - config=fl.server.ServerConfig(num_rounds=25), - ) diff --git a/examples/quickstart-sklearn-tabular/sklearnexample/__init__.py b/examples/quickstart-sklearn-tabular/sklearnexample/__init__.py new file mode 100644 index 000000000000..ddcfc4fc7fc4 --- /dev/null +++ b/examples/quickstart-sklearn-tabular/sklearnexample/__init__.py @@ -0,0 +1 @@ +"""quickstart-sklearn-example.""" diff --git a/examples/quickstart-sklearn-tabular/sklearnexample/client_app.py b/examples/quickstart-sklearn-tabular/sklearnexample/client_app.py new file mode 100644 index 000000000000..bf0c4069c7f8 --- /dev/null +++ b/examples/quickstart-sklearn-tabular/sklearnexample/client_app.py @@ -0,0 +1,67 @@ +"""sklearnexample: A Flower / sklearn app.""" + +import warnings + +from flwr.client import ClientApp, NumPyClient +from flwr.common import Context +from sklearn.metrics import log_loss + +from sklearnexample.task import ( + UNIQUE_LABELS, + create_log_reg_and_instantiate_parameters, + get_model_parameters, + load_data, + set_model_params, +) + + +class FlowerClient(NumPyClient): + def __init__(self, model, X_train, y_train, X_test, y_test): + self.model = model + self.X_train = X_train.values + self.y_train = y_train.values + self.X_test = X_test.values + self.y_test = y_test.values + self.unique_labels = UNIQUE_LABELS + + def fit(self, parameters, config): + set_model_params(self.model, parameters) + # Ignore convergence failure due to low local epochs + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + self.model.fit(self.X_train, self.y_train) + accuracy = self.model.score(self.X_train, self.y_train) + return ( + get_model_parameters(self.model), + len(self.X_train), + {"train_accuracy": accuracy}, + ) + + def evaluate(self, parameters, config): # type: ignore + set_model_params(self.model, parameters) + y_pred = self.model.predict_proba(self.X_test) + loss = log_loss(self.y_test, y_pred, labels=self.unique_labels) + accuracy = self.model.score(self.X_test, self.y_test) + return loss, len(self.X_test), {"test_accuracy": accuracy} + + +def client_fn(context: Context): + """Construct a Client that will be run in a ClientApp.""" + + # Read the node_config to fetch data partition associated to this node + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + X_train, y_train, X_test, y_test = load_data(partition_id, num_partitions) + + # Read the run config to get settings to configure the Client + penalty = context.run_config["penalty"] + + # Create LogisticRegression Model + model = create_log_reg_and_instantiate_parameters(penalty) + + # Return Client instance + return FlowerClient(model, X_train, y_train, X_test, y_test).to_client() + + +# Flower ClientApp +app = ClientApp(client_fn=client_fn) diff --git a/examples/quickstart-sklearn-tabular/sklearnexample/server_app.py b/examples/quickstart-sklearn-tabular/sklearnexample/server_app.py new file mode 100644 index 000000000000..1bef9d5ed49c --- /dev/null +++ b/examples/quickstart-sklearn-tabular/sklearnexample/server_app.py @@ -0,0 +1,73 @@ +"""sklearnexample: A Flower / sklearn app.""" + +from typing import Dict, List, Tuple + +from flwr.common import Context, Metrics, Scalar, ndarrays_to_parameters +from flwr.server import ServerApp, ServerAppComponents, ServerConfig +from flwr.server.strategy import FedAvg + +from sklearnexample.task import ( + create_log_reg_and_instantiate_parameters, + get_model_parameters, +) + + +def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Dict[str, Scalar]: + """Compute weighted average. + + It is a generic implementation that averages only over floats and ints and drops the + other data types of the Metrics. + """ + # num_samples_list can represent the number of samples + # or the number of batches depending on the client + num_samples_list = [n_batches for n_batches, _ in metrics] + num_samples_sum = sum(num_samples_list) + metrics_lists: Dict[str, List[float]] = {} + for num_samples, all_metrics_dict in metrics: + # Calculate each metric one by one + for single_metric, value in all_metrics_dict.items(): + if isinstance(value, (float, int)): + metrics_lists[single_metric] = [] + # Just one iteration needed to initialize the keywords + break + + for num_samples, all_metrics_dict in metrics: + # Calculate each metric one by one + for single_metric, value in all_metrics_dict.items(): + # Add weighted metric + if isinstance(value, (float, int)): + metrics_lists[single_metric].append(float(num_samples * value)) + + weighted_metrics: Dict[str, Scalar] = {} + for metric_name, metric_values in metrics_lists.items(): + weighted_metrics[metric_name] = sum(metric_values) / num_samples_sum + + return weighted_metrics + + +def server_fn(context: Context) -> ServerAppComponents: + """Construct components that set the ServerApp behavior.""" + + penalty = context.run_config["penalty"] + model = create_log_reg_and_instantiate_parameters(penalty) + ndarrays = get_model_parameters(model) + global_model_init = ndarrays_to_parameters(ndarrays) + + # Define the strategy + min_available_clients = context.run_config["min-available-clients"] + strategy = FedAvg( + min_available_clients=min_available_clients, + fit_metrics_aggregation_fn=weighted_average, + evaluate_metrics_aggregation_fn=weighted_average, + initial_parameters=global_model_init, + ) + + # Construct ServerConfig + num_rounds = context.run_config["num-server-rounds"] + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(strategy=strategy, config=config) + + +# Create ServerApp +app = ServerApp(server_fn=server_fn) diff --git a/examples/quickstart-sklearn-tabular/sklearnexample/task.py b/examples/quickstart-sklearn-tabular/sklearnexample/task.py new file mode 100644 index 000000000000..71dcbc8c474f --- /dev/null +++ b/examples/quickstart-sklearn-tabular/sklearnexample/task.py @@ -0,0 +1,77 @@ +import numpy as np +from flwr.common import NDArrays +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner +from sklearn.linear_model import LogisticRegression + +# This information is needed to create a correct scikit-learn model +UNIQUE_LABELS = [0, 1, 2] +FEATURES = ["petal_length", "petal_width", "sepal_length", "sepal_width"] + + +def get_model_parameters(model: LogisticRegression) -> NDArrays: + """Return the parameters of a sklearn LogisticRegression model.""" + if model.fit_intercept: + params = [ + model.coef_, + model.intercept_, + ] + else: + params = [ + model.coef_, + ] + return params + + +def set_model_params(model: LogisticRegression, params: NDArrays) -> LogisticRegression: + """Set the parameters of a sklean LogisticRegression model.""" + model.coef_ = params[0] + if model.fit_intercept: + model.intercept_ = params[1] + return model + + +def set_initial_params(model: LogisticRegression, n_classes: int, n_features: int): + """Set initial parameters as zeros. + + Required since model params are uninitialized until model.fit is called but server + asks for initial parameters from clients at launch. Refer to + sklearn.linear_model.LogisticRegression documentation for more information. + """ + model.classes_ = np.array([i for i in range(n_classes)]) + + model.coef_ = np.zeros((n_classes, n_features)) + if model.fit_intercept: + model.intercept_ = np.zeros((n_classes,)) + + +def create_log_reg_and_instantiate_parameters(penalty): + model = LogisticRegression( + penalty=penalty, + max_iter=1, # local epoch + warm_start=True, # prevent refreshing weights when fitting, + solver="saga", + ) + # Setting initial parameters, akin to model.compile for keras models + set_initial_params(model, n_features=len(FEATURES), n_classes=len(UNIQUE_LABELS)) + return model + + +fds = None # Cache FederatedDataset + + +def load_data(partition_id: int, num_partitions: int): + """Load the data for the given partition.""" + global fds + if fds is None: + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="hitorilabs/iris", partitioners={"train": partitioner} + ) + dataset = fds.load_partition(partition_id, "train").with_format("pandas")[:] + X = dataset[FEATURES] + y = dataset["species"] + # Split the on-edge data: 80% train, 20% test + X_train, X_test = X[: int(0.8 * len(X))], X[int(0.8 * len(X)) :] + y_train, y_test = y[: int(0.8 * len(y))], y[int(0.8 * len(y)) :] + return X_train, y_train, X_test, y_test diff --git a/examples/quickstart-sklearn-tabular/utils.py b/examples/quickstart-sklearn-tabular/utils.py deleted file mode 100644 index e154f44ef8bf..000000000000 --- a/examples/quickstart-sklearn-tabular/utils.py +++ /dev/null @@ -1,75 +0,0 @@ -from typing import List, Tuple, Dict - -import numpy as np -from sklearn.linear_model import LogisticRegression - -from flwr.common import NDArrays, Metrics, Scalar - - -def get_model_parameters(model: LogisticRegression) -> NDArrays: - """Return the parameters of a sklearn LogisticRegression model.""" - if model.fit_intercept: - params = [ - model.coef_, - model.intercept_, - ] - else: - params = [ - model.coef_, - ] - return params - - -def set_model_params(model: LogisticRegression, params: NDArrays) -> LogisticRegression: - """Set the parameters of a sklean LogisticRegression model.""" - model.coef_ = params[0] - if model.fit_intercept: - model.intercept_ = params[1] - return model - - -def set_initial_params(model: LogisticRegression, n_classes: int, n_features: int): - """Set initial parameters as zeros. - - Required since model params are uninitialized until model.fit is called but server - asks for initial parameters from clients at launch. Refer to - sklearn.linear_model.LogisticRegression documentation for more information. - """ - model.classes_ = np.array([i for i in range(n_classes)]) - - model.coef_ = np.zeros((n_classes, n_features)) - if model.fit_intercept: - model.intercept_ = np.zeros((n_classes,)) - - -def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Dict[str, Scalar]: - """Compute weighted average. - - It is generic implementation that averages only over floats and ints and drops the - other data types of the Metrics. - """ - print(metrics) - # num_samples_list can represent number of sample or batches depending on the client - num_samples_list = [n_batches for n_batches, _ in metrics] - num_samples_sum = sum(num_samples_list) - metrics_lists: Dict[str, List[float]] = {} - for num_samples, all_metrics_dict in metrics: - # Calculate each metric one by one - for single_metric, value in all_metrics_dict.items(): - if isinstance(value, (float, int)): - metrics_lists[single_metric] = [] - # Just one iteration needed to initialize the keywords - break - - for num_samples, all_metrics_dict in metrics: - # Calculate each metric one by one - for single_metric, value in all_metrics_dict.items(): - # Add weighted metric - if isinstance(value, (float, int)): - metrics_lists[single_metric].append(float(num_samples * value)) - - weighted_metrics: Dict[str, Scalar] = {} - for metric_name, metric_values in metrics_lists.items(): - weighted_metrics[metric_name] = sum(metric_values) / num_samples_sum - - return weighted_metrics diff --git a/examples/quickstart-tabnet/README.md b/examples/quickstart-tabnet/README.md index 19a139f83064..e8be55eaacef 100644 --- a/examples/quickstart-tabnet/README.md +++ b/examples/quickstart-tabnet/README.md @@ -1,3 +1,9 @@ +--- +tags: [quickstart, tabular] +dataset: [Iris] +framework: [tabnet] +--- + # Flower TabNet Example using TensorFlow This introductory example to Flower uses Keras but deep knowledge of Keras is not necessarily required to run the example. However, it will help you understanding how to adapt Flower to your use-cases. You can learn more about TabNet from [paper](https://arxiv.org/abs/1908.07442) and its implementation using TensorFlow at [this repository](https://github.com/titu1994/tf-TabNet). Note also that the basis of this example using federated learning is the example from the repository above. diff --git a/examples/quickstart-tabnet/client.py b/examples/quickstart-tabnet/client.py index 2289b1b55b3d..4da5a394a199 100644 --- a/examples/quickstart-tabnet/client.py +++ b/examples/quickstart-tabnet/client.py @@ -1,8 +1,9 @@ import os + import flwr as fl +import tabnet import tensorflow as tf import tensorflow_datasets as tfds -import tabnet train_size = 125 BATCH_SIZE = 50 diff --git a/examples/quickstart-tabnet/pyproject.toml b/examples/quickstart-tabnet/pyproject.toml index 6b7311f068f0..8345d6bd3da2 100644 --- a/examples/quickstart-tabnet/pyproject.toml +++ b/examples/quickstart-tabnet/pyproject.toml @@ -9,7 +9,7 @@ description = "Tabnet Federated Learning Quickstart with Flower" authors = ["The Flower Authors "] [tool.poetry.dependencies] -python = ">=3.8,<3.11" +python = ">=3.9,<3.11" flwr = ">=1.0,<2.0" tensorflow-cpu = { version = ">=2.9.1,<2.11.1 || >2.11.1", markers = "platform_machine == \"x86_64\"" } tensorflow-macos = { version = ">=2.9.1,<2.11.1 || >2.11.1", markers = "sys_platform == \"darwin\" and platform_machine == \"arm64\"" } diff --git a/examples/quickstart-tabnet/server.py b/examples/quickstart-tabnet/server.py index 39c350388c1b..a99eb48059bc 100644 --- a/examples/quickstart-tabnet/server.py +++ b/examples/quickstart-tabnet/server.py @@ -1,6 +1,5 @@ import flwr as fl - # Start Flower server fl.server.start_server( server_address="0.0.0.0:8080", diff --git a/examples/quickstart-tensorflow/README.md b/examples/quickstart-tensorflow/README.md index ae1fe19834a3..a162e756d799 100644 --- a/examples/quickstart-tensorflow/README.md +++ b/examples/quickstart-tensorflow/README.md @@ -1,84 +1,68 @@ -# Flower Example using TensorFlow/Keras +--- +tags: [quickstart, vision, fds] +dataset: [CIFAR-10] +framework: [tensorflow] +--- -This introductory example to Flower uses Keras but deep knowledge of Keras is not necessarily required to run the example. However, it will help you understand how to adapt Flower to your use case. -Running this example in itself is quite easy. This example uses [Flower Datasets](https://flower.ai/docs/datasets/) to download, partition and preprocess the CIFAR-10 dataset. - -## Project Setup - -Start by cloning the example project. We prepared a single-line command that you can copy into your shell which will checkout the example for you: - -```shell -git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/quickstart-tensorflow . && rm -rf flower && cd quickstart-tensorflow -``` +# Federated Learning with Tensorflow/Keras and Flower (Quickstart Example) -This will create a new directory called `quickstart-tensorflow` containing the following files: +This introductory example to Flower uses Tensorflow/Keras but deep knowledge of this frameworks is required to run the example. However, it will help you understand how to adapt Flower to your use case. +Running this example in itself is quite easy. This example uses [Flower Datasets](https://flower.ai/docs/datasets/) to download, partition and preprocess the CIFAR-10 dataset. -```shell --- pyproject.toml --- client.py --- server.py --- README.md -``` +## Set up the project -### Installing Dependencies +### Clone the project -Project dependencies (such as `tensorflow` and `flwr`) are defined in `pyproject.toml`. You can install the dependencies by invoking `pip`: +Start by cloning the example project: ```shell -# From a new python environment, run: -pip install . +git clone --depth=1 https://github.com/adap/flower.git _tmp \ + && mv _tmp/examples/quickstart-tensorflow . \ + && rm -rf _tmp \ + && cd quickstart-tensorflow ``` -Then, to verify that everything works correctly you can run the following command: +This will create a new directory called `quickstart-tensorflow` with the following structure: ```shell -python3 -c "import flwr" +quickstart-tensorflow +├── tfexample +│ ├── __init__.py +│ ├── client_app.py # Defines your ClientApp +│ ├── server_app.py # Defines your ServerApp +│ └── task.py # Defines your model, training and data loading +├── pyproject.toml # Project metadata like dependencies and configs +└── README.md ``` -If you don't see any errors you're good to go! +### Install dependencies and project -## Run Federated Learning with TensorFlow/Keras and Flower +Install the dependencies defined in `pyproject.toml` as well as the `tfhexample` package. -Afterward, you are ready to start the Flower server as well as the clients. You can simply start the server in a terminal as follows: - -```shell -python3 server.py -``` - -Now you are ready to start the Flower clients which will participate in the learning. To do so simply open two more terminals and run the following command in each: - -```shell -python3 client.py --partition-id 0 -``` - -Start client 2 in the second terminal: - -```shell -python3 client.py --partition-id 1 +```bash +pip install -e . ``` -You will see that Keras is starting a federated training. Have a look at the [code](https://github.com/adap/flower/tree/main/examples/quickstart-tensorflow) for a detailed explanation. You can add `steps_per_epoch=3` to `model.fit()` if you just want to evaluate that everything works without having to wait for the client-side training to finish (this will save you a lot of time during development). +## Run the project -## Run Federated Learning with TensorFlow/Keras and `Flower Next` +You can run your Flower project in both _simulation_ and _deployment_ mode without making changes to the code. If you are starting with Flower, we recommend you using the _simulation_ mode as it requires fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine. -### 1. Start the long-running Flower server (SuperLink) +### Run with the Simulation Engine ```bash -flower-superlink --insecure +flwr run . ``` -### 2. Start the long-running Flower clients (SuperNodes) - -Start 2 Flower \`SuperNodes in 2 separate terminal windows, using: +You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: ```bash -flower-client-app client:app --insecure +flwr run . --run-config "num-server-rounds=5 learning-rate=0.05" ``` -### 3. Run the Flower App +> \[!TIP\] +> For a more detailed walk-through check our [quickstart TensorFlow tutorial](https://flower.ai/docs/framework/tutorial-quickstart-tensorflow.html) -With both the long-running server (SuperLink) and two clients (SuperNode) up and running, we can now run the actual Flower App, using: +### Run with the Deployment Engine -```bash -flower-server-app server:app --insecure -``` +> \[!NOTE\] +> An update to this example will show how to run this Flower application with the Deployment Engine and TLS certificates, or with Docker. diff --git a/examples/quickstart-tensorflow/client.py b/examples/quickstart-tensorflow/client.py deleted file mode 100644 index 6b2bd6639ce0..000000000000 --- a/examples/quickstart-tensorflow/client.py +++ /dev/null @@ -1,72 +0,0 @@ -import argparse -import os - -from flwr.client import ClientApp, NumPyClient -import tensorflow as tf -from flwr_datasets import FederatedDataset - -# Make TensorFlow log less verbose -os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" - -# Parse arguments -parser = argparse.ArgumentParser(description="Flower") -parser.add_argument( - "--partition-id", - type=int, - choices=[0, 1, 2], - default=0, - help="Partition of the dataset (0, 1 or 2). " - "The dataset is divided into 3 partitions created artificially.", -) -args, _ = parser.parse_known_args() - -# Load model and data (MobileNetV2, CIFAR-10) -model = tf.keras.applications.MobileNetV2((32, 32, 3), classes=10, weights=None) -model.compile("adam", "sparse_categorical_crossentropy", metrics=["accuracy"]) - -# Download and partition dataset -fds = FederatedDataset(dataset="cifar10", partitioners={"train": 3}) -partition = fds.load_partition(args.partition_id, "train") -partition.set_format("numpy") - -# Divide data on each node: 80% train, 20% test -partition = partition.train_test_split(test_size=0.2, seed=42) -x_train, y_train = partition["train"]["img"] / 255.0, partition["train"]["label"] -x_test, y_test = partition["test"]["img"] / 255.0, partition["test"]["label"] - - -# Define Flower client -class FlowerClient(NumPyClient): - def get_parameters(self, config): - return model.get_weights() - - def fit(self, parameters, config): - model.set_weights(parameters) - model.fit(x_train, y_train, epochs=1, batch_size=32) - return model.get_weights(), len(x_train), {} - - def evaluate(self, parameters, config): - model.set_weights(parameters) - loss, accuracy = model.evaluate(x_test, y_test) - return loss, len(x_test), {"accuracy": accuracy} - - -def client_fn(cid: str): - """Create and return an instance of Flower `Client`.""" - return FlowerClient().to_client() - - -# Flower ClientApp -app = ClientApp( - client_fn=client_fn, -) - - -# Legacy mode -if __name__ == "__main__": - from flwr.client import start_client - - start_client( - server_address="127.0.0.1:8080", - client=FlowerClient().to_client(), - ) diff --git a/examples/quickstart-tensorflow/pyproject.toml b/examples/quickstart-tensorflow/pyproject.toml index c0f71344b2fb..f5fc566d654c 100644 --- a/examples/quickstart-tensorflow/pyproject.toml +++ b/examples/quickstart-tensorflow/pyproject.toml @@ -3,18 +3,36 @@ requires = ["hatchling"] build-backend = "hatchling.build" [project] -name = "quickstart-tensorflow" -version = "0.1.0" -description = "Keras Federated Learning Quickstart with Flower" -authors = [ - { name = "The Flower Authors", email = "hello@flower.ai" }, -] +name = "tfexample" +version = "1.0.0" +description = "Federated Learning with Tensorflow/Keras and Flower (Quickstart Example)" +license = "Apache-2.0" dependencies = [ - "flwr>=1.8.0,<2.0", - "flwr-datasets[vision]>=0.0.2,<1.0.0", + "flwr[simulation]>=1.12.0", + "flwr-datasets[vision]>=0.3.0", "tensorflow-cpu>=2.9.1, != 2.11.1 ; platform_machine == \"x86_64\"", - "tensorflow-macos>=2.9.1, != 2.11.1 ; sys_platform == \"darwin\" and platform_machine == \"arm64\"" + "tensorflow-macos>=2.9.1, != 2.11.1 ; sys_platform == \"darwin\" and platform_machine == \"arm64\"", ] - [tool.hatch.build.targets.wheel] packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "tfexample.server_app:app" +clientapp = "tfexample.client_app:app" + +[tool.flwr.app.config] +num-server-rounds = 3 +local-epochs = 1 +batch-size = 32 +learning-rate = 0.005 +fraction-fit = 0.5 +verbose = false + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 10 diff --git a/examples/quickstart-tensorflow/server.py b/examples/quickstart-tensorflow/server.py deleted file mode 100644 index 4034703ca690..000000000000 --- a/examples/quickstart-tensorflow/server.py +++ /dev/null @@ -1,41 +0,0 @@ -from typing import List, Tuple - -from flwr.server import ServerApp, ServerConfig -from flwr.server.strategy import FedAvg -from flwr.common import Metrics - - -# Define metric aggregation function -def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: - # Multiply accuracy of each client by number of examples used - accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] - examples = [num_examples for num_examples, _ in metrics] - - # Aggregate and return custom metric (weighted average) - return {"accuracy": sum(accuracies) / sum(examples)} - - -# Define strategy -strategy = FedAvg(evaluate_metrics_aggregation_fn=weighted_average) - - -# Define config -config = ServerConfig(num_rounds=3) - - -# Flower ServerApp -app = ServerApp( - config=config, - strategy=strategy, -) - - -# Legacy mode -if __name__ == "__main__": - from flwr.server import start_server - - start_server( - server_address="0.0.0.0:8080", - config=config, - strategy=strategy, - ) diff --git a/examples/quickstart-tensorflow/tfexample/__init__.py b/examples/quickstart-tensorflow/tfexample/__init__.py new file mode 100644 index 000000000000..38122a4cf9fa --- /dev/null +++ b/examples/quickstart-tensorflow/tfexample/__init__.py @@ -0,0 +1 @@ +"""tfexample.""" diff --git a/examples/quickstart-tensorflow/tfexample/client_app.py b/examples/quickstart-tensorflow/tfexample/client_app.py new file mode 100644 index 000000000000..fcea79ba7391 --- /dev/null +++ b/examples/quickstart-tensorflow/tfexample/client_app.py @@ -0,0 +1,62 @@ +"""tfexample: A Flower / TensorFlow app.""" + +from flwr.client import ClientApp, NumPyClient +from flwr.common import Context +from tfexample.task import load_data, load_model + + +# Define Flower Client +class FlowerClient(NumPyClient): + def __init__( + self, + learning_rate, + data, + epochs, + batch_size, + verbose, + ): + self.model = load_model(learning_rate) + self.x_train, self.y_train, self.x_test, self.y_test = data + self.epochs = epochs + self.batch_size = batch_size + self.verbose = verbose + + def fit(self, parameters, config): + """Train the model with data of this client.""" + self.model.set_weights(parameters) + self.model.fit( + self.x_train, + self.y_train, + epochs=self.epochs, + batch_size=self.batch_size, + verbose=self.verbose, + ) + return self.model.get_weights(), len(self.x_train), {} + + def evaluate(self, parameters, config): + """Evaluate the model on the data this client has.""" + self.model.set_weights(parameters) + loss, accuracy = self.model.evaluate(self.x_test, self.y_test, verbose=0) + return loss, len(self.x_test), {"accuracy": accuracy} + + +def client_fn(context: Context): + """Construct a Client that will be run in a ClientApp.""" + + # Read the node_config to fetch data partition associated to this node + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + data = load_data(partition_id, num_partitions) + + # Read run_config to fetch hyperparameters relevant to this run + epochs = context.run_config["local-epochs"] + batch_size = context.run_config["batch-size"] + verbose = context.run_config.get("verbose") + learning_rate = context.run_config["learning-rate"] + + # Return Client instance + return FlowerClient(learning_rate, data, epochs, batch_size, verbose).to_client() + + +# Flower ClientApp +app = ClientApp(client_fn=client_fn) diff --git a/examples/quickstart-tensorflow/tfexample/server_app.py b/examples/quickstart-tensorflow/tfexample/server_app.py new file mode 100644 index 000000000000..a09ceccfb3f2 --- /dev/null +++ b/examples/quickstart-tensorflow/tfexample/server_app.py @@ -0,0 +1,43 @@ +"""tfexample: A Flower / TensorFlow app.""" + +from typing import List, Tuple + +from flwr.common import Context, Metrics, ndarrays_to_parameters +from flwr.server import ServerApp, ServerAppComponents, ServerConfig +from flwr.server.strategy import FedAvg +from tfexample.task import load_model + + +# Define metric aggregation function +def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: + # Multiply accuracy of each client by number of examples used + accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] + examples = [num_examples for num_examples, _ in metrics] + + # Aggregate and return custom metric (weighted average) + return {"accuracy": sum(accuracies) / sum(examples)} + + +def server_fn(context: Context): + """Construct components that set the ServerApp behaviour.""" + + # Let's define the global model and pass it to the strategy + parameters = ndarrays_to_parameters(load_model().get_weights()) + + # Define the strategy + strategy = strategy = FedAvg( + fraction_fit=context.run_config["fraction-fit"], + fraction_evaluate=1.0, + min_available_clients=2, + initial_parameters=parameters, + evaluate_metrics_aggregation_fn=weighted_average, + ) + # Read from config + num_rounds = context.run_config["num-server-rounds"] + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(strategy=strategy, config=config) + + +# Create ServerApp +app = ServerApp(server_fn=server_fn) diff --git a/examples/quickstart-tensorflow/tfexample/task.py b/examples/quickstart-tensorflow/tfexample/task.py new file mode 100644 index 000000000000..3d1411821702 --- /dev/null +++ b/examples/quickstart-tensorflow/tfexample/task.py @@ -0,0 +1,58 @@ +"""tfexample: A Flower / TensorFlow app.""" + +import os + +import keras +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner +from keras import layers + +# Make TensorFlow log less verbose +os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" + + +def load_model(learning_rate: float = 0.001): + # Define a simple CNN for CIFAR-10 and set Adam optimizer + model = keras.Sequential( + [ + keras.Input(shape=(32, 32, 3)), + layers.Conv2D(32, kernel_size=(3, 3), activation="relu"), + layers.MaxPooling2D(pool_size=(2, 2)), + layers.Conv2D(64, kernel_size=(3, 3), activation="relu"), + layers.MaxPooling2D(pool_size=(2, 2)), + layers.Flatten(), + layers.Dropout(0.5), + layers.Dense(10, activation="softmax"), + ] + ) + optimizer = keras.optimizers.Adam(learning_rate) + model.compile( + optimizer=optimizer, + loss="sparse_categorical_crossentropy", + metrics=["accuracy"], + ) + return model + + +fds = None # Cache FederatedDataset + + +def load_data(partition_id, num_partitions): + # Download and partition dataset + # Only initialize `FederatedDataset` once + global fds + if fds is None: + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="uoft-cs/cifar10", + partitioners={"train": partitioner}, + ) + partition = fds.load_partition(partition_id, "train") + partition.set_format("numpy") + + # Divide data on each node: 80% train, 20% test + partition = partition.train_test_split(test_size=0.2) + x_train, y_train = partition["train"]["img"] / 255.0, partition["train"]["label"] + x_test, y_test = partition["test"]["img"] / 255.0, partition["test"]["label"] + + return x_train, y_train, x_test, y_test diff --git a/examples/simulation-pytorch/README.md b/examples/simulation-pytorch/README.md deleted file mode 100644 index 93f9e1acbac7..000000000000 --- a/examples/simulation-pytorch/README.md +++ /dev/null @@ -1,97 +0,0 @@ -# Flower Simulation example using PyTorch - -This introductory example uses the simulation capabilities of Flower to simulate a large number of clients on a single machine. Take a look at the [Documentation](https://flower.ai/docs/framework/how-to-run-simulations.html) for a deep dive into how Flower simulation works. This example uses [Flower Datasets](https://flower.ai/docs/datasets/) to download, partition and preprocess the MNIST dataset. This examples uses 100 clients by default. - -## Running the example (via Jupyter Notebook) - -Run the example on Google Colab: [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/adap/flower/blob/main/examples/simulation-pytorch/sim.ipynb) - -Alternatively, you can run `sim.ipynb` locally or in any other Jupyter environment. - -## Running the example - -Start by cloning the code example. We prepared a single-line command that you can copy into your shell which will checkout the example for you: - -```shell -git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/simulation-pytorch . && rm -rf flower && cd simulation-pytorch -``` - -This will create a new directory called `simulation-pytorch` containing the following files: - -``` --- README.md <- Your're reading this right now --- sim.ipynb <- Example notebook --- sim.py <- Example code --- utils.py <- auxiliary functions for this example --- pyproject.toml <- Example dependencies --- requirements.txt <- Example dependencies -``` - -### Installing Dependencies - -Project dependencies (such as `torch` and `flwr`) are defined in `pyproject.toml` and `requirements.txt`. We recommend [Poetry](https://python-poetry.org/docs/) to install those dependencies and manage your virtual environment ([Poetry installation](https://python-poetry.org/docs/#installation)) or [pip](https://pip.pypa.io/en/latest/development/), but feel free to use a different way of installing dependencies and managing virtual environments if you have other preferences. - -#### Poetry - -```shell -poetry install -poetry shell -``` - -Poetry will install all your dependencies in a newly created virtual environment. To verify that everything works correctly you can run the following command: - -```shell -poetry run python -c "import flwr" -``` - -If you don't see any errors you're good to go! - -#### pip - -Write the command below in your terminal to install the dependencies according to the configuration file requirements.txt. - -```shell -pip install -r requirements.txt -``` - -### Run with `start_simulation()` - -Ensure you have activated your environment then: - -```bash -# and then run the example -python sim.py -``` - -You can adjust the CPU/GPU resources you assign to each of your virtual clients. By default, your clients will only use 1xCPU core. For example: - -```bash -# Will assign 2xCPUs to each client -python sim.py --num_cpus=2 - -# Will assign 2xCPUs and 25% of the GPU's VRAM to each client -# This means that you can have 4 concurrent clients on each GPU -# (assuming you have enough CPUs) -python sim.py --num_cpus=2 --num_gpus=0.25 -``` - -### Run with Flower Next (preview) - -Ensure you have activated your environment, then execute the command below. All `ClientApp` instances will run on CPU but the `ServerApp` will run on the GPU if one is available. Note that this is the case because the `Simulation Engine` only exposes certain resources to the `ClientApp` (based on the `client_resources` in `--backend-config`). - -```bash -# Run with the default backend-config. -# `--server-app` points to the `server` object in the sim.py file in this example. -# `--client-app` points to the `client` object in the sim.py file in this example. -flower-simulation --client-app=sim:client --server-app=sim:server --num-supernodes=100 -``` - -You can change the default resources assigned to each `ClientApp` by means of the `--backend-config` argument: - -```bash -# Tells the VCE to reserve 2x CPUs and 25% of available VRAM for each ClientApp -flower-simulation --client-app=sim:client --server-app=sim:server --num-supernodes=100 \ - --backend-config='{"client_resources": {"num_cpus":2, "num_gpus":0.25}}' -``` - -Take a look at the [Documentation](https://flower.ai/docs/framework/how-to-run-simulations.html) for more details on how you can customise your simulation. diff --git a/examples/simulation-pytorch/pyproject.toml b/examples/simulation-pytorch/pyproject.toml deleted file mode 100644 index 5978c17f2c60..000000000000 --- a/examples/simulation-pytorch/pyproject.toml +++ /dev/null @@ -1,19 +0,0 @@ -[build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" - -[tool.poetry] -name = "simulation-pytorch" -version = "0.1.0" -description = "Federated Learning Simulation with Flower and PyTorch" -authors = ["The Flower Authors "] - -[tool.poetry.dependencies] -python = ">=3.8,<3.11" -flwr = { extras = ["simulation"], version = ">=1.0,<2.0" } -flwr-datasets = { extras = ["vision"], version = ">=0.0.2,<1.0.0" } -torch = "2.1.1" -torchvision = "0.16.1" - -[tool.poetry.group.dev.dependencies] -ipykernel = "^6.27.0" diff --git a/examples/simulation-pytorch/requirements.txt b/examples/simulation-pytorch/requirements.txt deleted file mode 100644 index 4dbecab3e546..000000000000 --- a/examples/simulation-pytorch/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -flwr[simulation]>=1.0, <2.0 -torch==2.1.1 -torchvision==0.16.1 -flwr-datasets[vision]>=0.0.2, <1.0.0 \ No newline at end of file diff --git a/examples/simulation-pytorch/sim.ipynb b/examples/simulation-pytorch/sim.ipynb deleted file mode 100644 index d225069cb444..000000000000 --- a/examples/simulation-pytorch/sim.ipynb +++ /dev/null @@ -1,629 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Environment Setup\n", - "\n", - "To start working with Flower, very little is required once you have activated your Python environment (e.g. via `conda`, `virtualenv`, `pyenv`, etc). If you are running this code on Colab, there is really nothing to do except to install Flower and other dependencies. The steps below have been verified to run in Colab.\n", - "\n", - "## Installing Flower\n", - "\n", - "You can install flower very conveniently from `pip`:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# depending on your shell, you might need to add `\\` before `[` and `]`.\n", - "!pip install -q flwr[simulation]\n", - "!pip install flwr_datasets[vision]" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We will be using the _simulation_ mode in Flower, which allows you to run a large number of clients without the overheads of manually managing devices. This is achieved via the [Virtual Client Engine](https://flower.ai/docs/framework/how-to-run-simulations.html) in Flower. With simulation, you can dynamically scale your experiments whether you run the code on your laptop, a machine with a single GPU, a server with multiple GPUs os even on a cluster with multiple servers. The `Virtual Client Engine` handles everything transparently and it allows you to specify how many resources (e.g. CPU cores, GPU VRAM) should be assigned to each virtual client." - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "Flower is agnostic to your choice of ML Framework. Flower works with `PyTorch`, `Tensorflow`, `NumPy`, `🤗 Transformers`, `MXNet`, `JAX`, `scikit-learn`, `fastai`, `Pandas`. Flower also supports all major platforms: `iOS`, `Android` and plain `C++`. You can find a _quickstart-_ example for each of the above in the [Flower Repository](https://github.com/adap/flower/tree/main/examples) inside the `examples/` directory.\n", - "\n", - "In this tutorial we are going to use PyTorch, it comes pre-installed in your Collab runtime so there is no need to installed it again. If you wouuld like to install another version, you can still do that in the same way other packages are installed via `!pip`" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We are going to install some other dependencies you are likely familiar with. Let's install `maplotlib` to plot our results at the end." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "outputId": "58b7af77-609f-4118-bd5b-5629a4b5a296" - }, - "outputs": [], - "source": [ - "!pip install matplotlib" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Preparing the experiment\n", - "\n", - "This tutorial is not so much about novel architectural designs so we keep things simple and make use of a typical CNN that is adequate for the MNIST image classification task.\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import torch\n", - "import torch.nn as nn\n", - "import torch.nn.functional as F\n", - "from torch.utils.data import DataLoader\n", - "\n", - "\n", - "class Net(nn.Module):\n", - " def __init__(self, num_classes: int) -> None:\n", - " super(Net, self).__init__()\n", - " self.conv1 = nn.Conv2d(1, 6, 5)\n", - " self.pool = nn.MaxPool2d(2, 2)\n", - " self.conv2 = nn.Conv2d(6, 16, 5)\n", - " self.fc1 = nn.Linear(16 * 4 * 4, 120)\n", - " self.fc2 = nn.Linear(120, 84)\n", - " self.fc3 = nn.Linear(84, num_classes)\n", - "\n", - " def forward(self, x: torch.Tensor) -> torch.Tensor:\n", - " x = self.pool(F.relu(self.conv1(x)))\n", - " x = self.pool(F.relu(self.conv2(x)))\n", - " x = x.view(-1, 16 * 4 * 4)\n", - " x = F.relu(self.fc1(x))\n", - " x = F.relu(self.fc2(x))\n", - " x = self.fc3(x)\n", - " return x" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We'll be training the model in a Federated setting. In order to do that, we need to define two functions:\n", - "\n", - "* `train()` that will train the model given a dataloader.\n", - "* `test()` that will be used to evaluate the performance of the model on held-out data, e.g., a training set." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def train(net, trainloader, optim, epochs, device: str):\n", - " \"\"\"Train the network on the training set.\"\"\"\n", - " criterion = torch.nn.CrossEntropyLoss()\n", - " net.train()\n", - " for _ in range(epochs):\n", - " for batch in trainloader:\n", - " images, labels = batch[\"image\"].to(device), batch[\"label\"].to(device)\n", - " optim.zero_grad()\n", - " loss = criterion(net(images), labels)\n", - " loss.backward()\n", - " optim.step()\n", - "\n", - "\n", - "def test(net, testloader, device: str):\n", - " \"\"\"Validate the network on the entire test set.\"\"\"\n", - " criterion = torch.nn.CrossEntropyLoss()\n", - " correct, loss = 0, 0.0\n", - " net.eval()\n", - " with torch.no_grad():\n", - " for data in testloader:\n", - " images, labels = data[\"image\"].to(device), data[\"label\"].to(device)\n", - " outputs = net(images)\n", - " loss += criterion(outputs, labels).item()\n", - " _, predicted = torch.max(outputs.data, 1)\n", - " correct += (predicted == labels).sum().item()\n", - " accuracy = correct / len(testloader.dataset)\n", - " return loss, accuracy" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The code we have written so far is not specific to Federated Learning. Then, what are the key differences between Federated Learning and Centralised Training? If you could only pick you, probably you'd say:\n", - "* Federated Learning is distributed -- the model is trained on-device by the participating clients.\n", - "* Data remains private and is owned by a specific _client_ -- the data is never sent to the central server.\n", - "\n", - "The are several more differences. But the above two are the main ones to always consider and that are common to all flavours of Federated Learning (e.g. _cross-device_ or _cross-silo_). The remaining of this tutorial is going to focus in transforming the code we have written so far for the centralised setting and construct a Federated Learning pipeline using Flower and PyTorch.\n", - "\n", - "Let's begin! 🚀" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## One Client, One Data Partition\n", - "\n", - "To start designing a Federated Learning pipeline we need to meet one of the key properties in FL: each client has its own data partition. To accomplish this with the MNIST dataset, we are going to generate N random partitions, where N is the total number of clients in our FL system.\n", - "\n", - "We can use [Flower Datasets](https://flower.ai/docs/datasets/) to effortlessly obtain an off-the-shelf partitioned dataset or partition one that isn't pre-partitioned. Let's choose MNIST." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from datasets import Dataset\n", - "from flwr_datasets import FederatedDataset\n", - "from datasets.utils.logging import disable_progress_bar\n", - "\n", - "# Let's set a simulation involving a total of 100 clients\n", - "NUM_CLIENTS = 100\n", - "\n", - "# Download MNIST dataset and partition the \"train\" partition (so one can be assigned to each client)\n", - "mnist_fds = FederatedDataset(dataset=\"mnist\", partitioners={\"train\": NUM_CLIENTS})\n", - "# Let's keep the test set as is, and use it to evaluate the global model on the server\n", - "centralized_testset = mnist_fds.load_split(\"test\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's create a function that returns a set of transforms to apply to our images" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from torchvision.transforms import ToTensor, Normalize, Compose\n", - "\n", - "\n", - "def apply_transforms(batch):\n", - " \"\"\"Get transformation for MNIST dataset\"\"\"\n", - "\n", - " # transformation to convert images to tensors and apply normalization\n", - " transforms = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])\n", - " batch[\"image\"] = [transforms(img) for img in batch[\"image\"]]\n", - " return batch" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's next define how our FL clients will behave.\n", - "\n", - "## Defining a Flower Client\n", - "\n", - "You can think of a client in FL as an entity that owns some data and trains a model using this data. The caveat is that the model is being trained _collaboratively_ in Federation by multiple clients (sometimes up to hundreds of thousands) and, in most instances of FL, is sent by a central server.\n", - "\n", - "A Flower Client is a simple Python class with four distinct methods:\n", - "\n", - "* `fit()`: With this method, the client does on-device training for a number of epochs using its own data. At the end, the resulting model is sent back to the server for aggregation.\n", - "\n", - "* `evaluate()`: With this method, the server can evaluate the performance of the global model on the local validation set of a client. This can be used for instance when there is no centralised dataset on the server for validation/test. Also, this method can be use to asses the degree of personalisation of the model being federated.\n", - "\n", - "* `set_parameters()`: This method takes the parameters sent by the server and uses them to initialise the parameters of the local model that is ML framework specific (e.g. TF, Pytorch, etc).\n", - "\n", - "* `get_parameters()`: It extract the parameters from the local model and transforms them into a list of NumPy arrays. This ML framework-agnostic representation of the model will be sent to the server.\n", - "\n", - "Let's start by importing Flower!" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import flwr as fl" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now let's defice our Flower Client class:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from collections import OrderedDict\n", - "from typing import Dict, List, Tuple\n", - "\n", - "from flwr.common import NDArrays, Scalar\n", - "\n", - "\n", - "class FlowerClient(fl.client.NumPyClient):\n", - " def __init__(self, trainloader, valloader) -> None:\n", - " super().__init__()\n", - "\n", - " self.trainloader = trainloader\n", - " self.valloader = valloader\n", - " self.model = Net(num_classes=10)\n", - " # Determine device\n", - " self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n", - " self.model.to(self.device) # send model to device\n", - "\n", - " def set_parameters(self, parameters):\n", - " \"\"\"With the model parameters received from the server,\n", - " overwrite the uninitialise model in this class with them.\"\"\"\n", - "\n", - " params_dict = zip(self.model.state_dict().keys(), parameters)\n", - " state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict})\n", - " # now replace the parameters\n", - " self.model.load_state_dict(state_dict, strict=True)\n", - "\n", - " def get_parameters(self, config: Dict[str, Scalar]):\n", - " \"\"\"Extract all model parameters and conver them to a list of\n", - " NumPy arryas. The server doesn't work with PyTorch/TF/etc.\"\"\"\n", - " return [val.cpu().numpy() for _, val in self.model.state_dict().items()]\n", - "\n", - " def fit(self, parameters, config):\n", - " \"\"\"This method train the model using the parameters sent by the\n", - " server on the dataset of this client. At then end, the parameters\n", - " of the locally trained model are communicated back to the server\"\"\"\n", - "\n", - " # copy parameters sent by the server into client's local model\n", - " self.set_parameters(parameters)\n", - "\n", - " # read from config\n", - " lr, epochs = config[\"lr\"], config[\"epochs\"]\n", - "\n", - " # Define the optimizer\n", - " optim = torch.optim.SGD(self.model.parameters(), lr=lr, momentum=0.9)\n", - "\n", - " # do local training\n", - " train(self.model, self.trainloader, optim, epochs=epochs, device=self.device)\n", - "\n", - " # return the model parameters to the server as well as extra info (number of training examples in this case)\n", - " return self.get_parameters({}), len(self.trainloader), {}\n", - "\n", - " def evaluate(self, parameters: NDArrays, config: Dict[str, Scalar]):\n", - " \"\"\"Evaluate the model sent by the server on this client's\n", - " local validation set. Then return performance metrics.\"\"\"\n", - "\n", - " self.set_parameters(parameters)\n", - " loss, accuracy = test(self.model, self.valloader, device=self.device)\n", - " # send statistics back to the server\n", - " return float(loss), len(self.valloader), {\"accuracy\": accuracy}" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Spend a few minutes to inspect the `FlowerClient` class above. Please ask questions if there is something unclear !\n", - "\n", - "Then keen-eyed among you might have realised that if we were to fuse the client's `fit()` and `evaluate()` methods, we'll end up with essentially the same as in the `run_centralised()` function we used in the Centralised Training part of this tutorial. And it is true!! In Federated Learning, the way clients perform local training makes use of the same principles as more traditional centralised setup. The key difference is that the dataset now is much smaller and it's never _\"seen\"_ by the entity running the FL workload (i.e. the central server).\n", - "\n", - "\n", - "Talking about the central server... we should define what strategy we want to make use of so the updated models sent from the clients back to the server at the end of the `fit()` method are aggregate.\n", - "\n", - "\n", - "## Choosing a Flower Strategy\n", - "\n", - "\n", - "A strategy sits at the core of the Federated Learning experiment. It is involved in all stages of a FL pipeline: sampling clients; sending the _global model_ to the clients so they can do `fit()`; receive the updated models from the clients and **aggregate** these to construct a new _global model_; define and execute global or federated evaluation; and more.\n", - "\n", - "Flower comes with [many strategies built-in](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy) and more to be available in the next release (`1.5` already!). For this tutorial, let's use what is arguable the most popular strategy out there: `FedAvg`.\n", - "\n", - "The way `FedAvg` works is simple but performs surprisingly well in practice. It is therefore one good strategy to start your experimentation. `FedAvg`, as its name implies, derives a new version of the _global model_ by taking the average of all the models sent by clients participating in the round. You can read all the details [in the paper](https://arxiv.org/abs/1602.05629).\n", - "\n", - "Let's see how we can define `FedAvg` using Flower. We use one of the callbacks called `evaluate_fn` so we can easily evaluate the state of the global model using a small centralised testset. Note this functionality is user-defined since it requires a choice in terms of ML-framework. (if you recall, Flower is framework agnostic).\n", - "\n", - "> This being said, centralised evaluation of the global model is only possible if there exists a centralised dataset that somewhat follows a similar distribution as the data that's spread across clients. In some cases having such centralised dataset for validation is not possible, so the only solution is to federate the evaluation of the _global model_. This is the default behaviour in Flower. If you don't specify teh `evaluate_fn` argument in your strategy, then, centralised global evaluation won't be performed." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def get_evaluate_fn(centralized_testset: Dataset):\n", - " \"\"\"This is a function that returns a function. The returned\n", - " function (i.e. `evaluate_fn`) will be executed by the strategy\n", - " at the end of each round to evaluate the stat of the global\n", - " model.\"\"\"\n", - "\n", - " def evaluate_fn(server_round: int, parameters, config):\n", - " \"\"\"This function is executed by the strategy it will instantiate\n", - " a model and replace its parameters with those from the global model.\n", - " The, the model will be evaluate on the test set (recall this is the\n", - " whole MNIST test set).\"\"\"\n", - "\n", - " model = Net(num_classes=10)\n", - "\n", - " # Determine device\n", - " device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n", - " model.to(device) # send model to device\n", - "\n", - " # set parameters to the model\n", - " params_dict = zip(model.state_dict().keys(), parameters)\n", - " state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict})\n", - " model.load_state_dict(state_dict, strict=True)\n", - "\n", - " # Apply transform to dataset\n", - " testset = centralized_testset.with_transform(apply_transforms)\n", - "\n", - " testloader = DataLoader(testset, batch_size=50)\n", - " # call test\n", - " loss, accuracy = test(model, testloader, device)\n", - " return loss, {\"accuracy\": accuracy}\n", - "\n", - " return evaluate_fn" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We could now define a strategy just as shown (commented) above. Instead, let's see how additional (but entirely optional) functionality can be easily added to our strategy. We are going to define two additional auxiliary functions to: (1) be able to configure how clients do local training; and (2) define a function to aggregate the metrics that clients return after running their `evaluate` methods:\n", - "\n", - "1. `fit_config()`. This is a function that will be executed inside the strategy when configuring a new `fit` round. This function is relatively simple and only requires as input argument the round at which the FL experiment is at. In this example we simply return a Python dictionary to specify the number of epochs and learning rate each client should made use of inside their `fit()` methods. A more versatile implementation would add more hyperparameters (e.g. the learning rate) and adjust them as the FL process advances (e.g. reducing the learning rate in later FL rounds).\n", - "2. `weighted_average()`: This is an optional function to pass to the strategy. It will be executed after an evaluation round (i.e. when client run `evaluate()`) and will aggregate the metrics clients return. In this example, we use this function to compute the weighted average accuracy of clients doing `evaluate()`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from flwr.common import Metrics\n", - "\n", - "\n", - "def fit_config(server_round: int) -> Dict[str, Scalar]:\n", - " \"\"\"Return a configuration with static batch size and (local) epochs.\"\"\"\n", - " config = {\n", - " \"epochs\": 1, # Number of local epochs done by clients\n", - " \"lr\": 0.01, # Learning rate to use by clients during fit()\n", - " }\n", - " return config\n", - "\n", - "\n", - "def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics:\n", - " \"\"\"Aggregation function for (federated) evaluation metrics, i.e. those returned by\n", - " the client's evaluate() method.\"\"\"\n", - " # Multiply accuracy of each client by number of examples used\n", - " accuracies = [num_examples * m[\"accuracy\"] for num_examples, m in metrics]\n", - " examples = [num_examples for num_examples, _ in metrics]\n", - "\n", - " # Aggregate and return custom metric (weighted average)\n", - " return {\"accuracy\": sum(accuracies) / sum(examples)}" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we can define our strategy:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "strategy = fl.server.strategy.FedAvg(\n", - " fraction_fit=0.1, # Sample 10% of available clients for training\n", - " fraction_evaluate=0.05, # Sample 5% of available clients for evaluation\n", - " on_fit_config_fn=fit_config,\n", - " evaluate_metrics_aggregation_fn=weighted_average, # aggregates federated metrics\n", - " evaluate_fn=get_evaluate_fn(centralized_testset), # global evaluation function\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "So far we have:\n", - "* created the dataset partitions (one for each client)\n", - "* defined the client class\n", - "* decided on a strategy to use\n", - "\n", - "Now we just need to launch the Flower FL experiment... not so fast! just one final function: let's create another callback that the Simulation Engine will use in order to span VirtualClients. As you can see this is really simple: construct a FlowerClient object, assigning each their own data partition." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from torch.utils.data import DataLoader\n", - "\n", - "\n", - "def get_client_fn(dataset: FederatedDataset):\n", - " \"\"\"Return a function to construct a client.\n", - "\n", - " The VirtualClientEngine will execute this function whenever a client is sampled by\n", - " the strategy to participate.\n", - " \"\"\"\n", - "\n", - " def client_fn(cid: str) -> fl.client.Client:\n", - " \"\"\"Construct a FlowerClient with its own dataset partition.\"\"\"\n", - "\n", - " # Let's get the partition corresponding to the i-th client\n", - " client_dataset = dataset.load_partition(int(cid), \"train\")\n", - "\n", - " # Now let's split it into train (90%) and validation (10%)\n", - " client_dataset_splits = client_dataset.train_test_split(test_size=0.1, seed=42)\n", - "\n", - " trainset = client_dataset_splits[\"train\"]\n", - " valset = client_dataset_splits[\"test\"]\n", - "\n", - " # Now we apply the transform to each batch.\n", - " trainloader = DataLoader(\n", - " trainset.with_transform(apply_transforms), batch_size=32, shuffle=True\n", - " )\n", - " valloader = DataLoader(valset.with_transform(apply_transforms), batch_size=32)\n", - "\n", - " # Create and return client\n", - " return FlowerClient(trainloader, valloader).to_client()\n", - "\n", - " return client_fn\n", - "\n", - "\n", - "client_fn_callback = get_client_fn(mnist_fds)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we are ready to launch the FL experiment using Flower simulation:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "outputId": "9ad8dcea-8004-4c6e-a025-e168da636c88" - }, - "outputs": [], - "source": [ - "# With a dictionary, you tell Flower's VirtualClientEngine that each\n", - "# client needs exclusive access to these many resources in order to run\n", - "client_resources = {\"num_cpus\": 1, \"num_gpus\": 0.0}\n", - "\n", - "# Let's disable tqdm progress bar in the main thread (used by the server)\n", - "disable_progress_bar()\n", - "\n", - "history = fl.simulation.start_simulation(\n", - " client_fn=client_fn_callback, # a callback to construct a client\n", - " num_clients=NUM_CLIENTS, # total number of clients in the experiment\n", - " config=fl.server.ServerConfig(num_rounds=10), # let's run for 10 rounds\n", - " strategy=strategy, # the strategy that will orchestrate the whole FL pipeline\n", - " client_resources=client_resources,\n", - " actor_kwargs={\n", - " \"on_actor_init_fn\": disable_progress_bar # disable tqdm on each actor/process spawning virtual clients\n", - " },\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Doing 10 rounds should take less than 2 minutes on a CPU-only Colab instance <-- Flower Simulation is fast! 🚀\n", - "\n", - "You can then use the resturned `History` object to either save the results to disk or do some visualisation (or both of course, or neither if you like chaos). Below you can see how you can plot the centralised accuracy obtainined at the end of each round (including at the very beginning of the experiment) for the _global model_. This is want the function `evaluate_fn()` that we passed to the strategy reports." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 508 - }, - "outputId": "d8eab106-cee9-4266-9082-0944882cdba8" - }, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\n", - "\n", - "print(f\"{history.metrics_centralized = }\")\n", - "\n", - "global_accuracy_centralised = history.metrics_centralized[\"accuracy\"]\n", - "round = [data[0] for data in global_accuracy_centralised]\n", - "acc = [100.0 * data[1] for data in global_accuracy_centralised]\n", - "plt.plot(round, acc)\n", - "plt.grid()\n", - "plt.ylabel(\"Accuracy (%)\")\n", - "plt.xlabel(\"Round\")\n", - "plt.title(\"MNIST - IID - 100 clients with 10 clients per round\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Congratulations! With that, you built a Flower client, customized it's instantiation through the `client_fn`, customized the server-side execution through a `FedAvg` strategy configured for this workload, and started a simulation with 100 clients (each holding their own individual partition of the MNIST dataset).\n", - "\n", - "Next, you can continue to explore more advanced Flower topics:\n", - "\n", - "- Deploy server and clients on different machines using `start_server` and `start_client`\n", - "- Customize the server-side execution through custom strategies\n", - "- Customize the client-side execution through `config` dictionaries\n", - "\n", - "Get all resources you need!\n", - "\n", - "* **[DOCS]** Our complete documenation: https://flower.ai/docs/\n", - "* **[Examples]** All Flower examples: https://flower.ai/docs/examples/\n", - "* **[VIDEO]** Our Youtube channel: https://www.youtube.com/@flowerlabs\n", - "\n", - "Don't forget to join our Slack channel: https://flower.ai/join-slack/\n" - ] - } - ], - "metadata": { - "colab": { - "provenance": [], - "toc_visible": true - }, - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/examples/simulation-pytorch/sim.py b/examples/simulation-pytorch/sim.py deleted file mode 100644 index db68e75653fc..000000000000 --- a/examples/simulation-pytorch/sim.py +++ /dev/null @@ -1,220 +0,0 @@ -import argparse -from collections import OrderedDict -from typing import Dict, Tuple, List - -import torch -from torch.utils.data import DataLoader - -import flwr as fl -from flwr.common import Metrics -from flwr.common.typing import Scalar - -from datasets import Dataset -from datasets.utils.logging import disable_progress_bar -from flwr_datasets import FederatedDataset - -from utils import Net, train, test, apply_transforms - -parser = argparse.ArgumentParser(description="Flower Simulation with PyTorch") - -parser.add_argument( - "--num_cpus", - type=int, - default=1, - help="Number of CPUs to assign to a virtual client", -) -parser.add_argument( - "--num_gpus", - type=float, - default=0.0, - help="Ratio of GPU memory to assign to a virtual client", -) - -NUM_CLIENTS = 100 -NUM_ROUNDS = 10 - - -# Flower client, adapted from Pytorch quickstart example -class FlowerClient(fl.client.NumPyClient): - def __init__(self, trainset, valset): - self.trainset = trainset - self.valset = valset - - # Instantiate model - self.model = Net() - - # Determine device - self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - self.model.to(self.device) # send model to device - - def get_parameters(self, config): - return [val.cpu().numpy() for _, val in self.model.state_dict().items()] - - def fit(self, parameters, config): - set_params(self.model, parameters) - - # Read from config - batch, epochs = config["batch_size"], config["epochs"] - - # Construct dataloader - trainloader = DataLoader(self.trainset, batch_size=batch, shuffle=True) - - # Define optimizer - optimizer = torch.optim.SGD(self.model.parameters(), lr=0.01, momentum=0.9) - # Train - train(self.model, trainloader, optimizer, epochs=epochs, device=self.device) - - # Return local model and statistics - return self.get_parameters({}), len(trainloader.dataset), {} - - def evaluate(self, parameters, config): - set_params(self.model, parameters) - - # Construct dataloader - valloader = DataLoader(self.valset, batch_size=64) - - # Evaluate - loss, accuracy = test(self.model, valloader, device=self.device) - - # Return statistics - return float(loss), len(valloader.dataset), {"accuracy": float(accuracy)} - - -def get_client_fn(dataset: FederatedDataset): - """Return a function to construct a client. - - The VirtualClientEngine will execute this function whenever a client is sampled by - the strategy to participate. - """ - - def client_fn(cid: str) -> fl.client.Client: - """Construct a FlowerClient with its own dataset partition.""" - - # Let's get the partition corresponding to the i-th client - client_dataset = dataset.load_partition(int(cid), "train") - - # Now let's split it into train (90%) and validation (10%) - client_dataset_splits = client_dataset.train_test_split(test_size=0.1, seed=42) - - trainset = client_dataset_splits["train"] - valset = client_dataset_splits["test"] - - # Now we apply the transform to each batch. - trainset = trainset.with_transform(apply_transforms) - valset = valset.with_transform(apply_transforms) - - # Create and return client - return FlowerClient(trainset, valset).to_client() - - return client_fn - - -def fit_config(server_round: int) -> Dict[str, Scalar]: - """Return a configuration with static batch size and (local) epochs.""" - config = { - "epochs": 1, # Number of local epochs done by clients - "batch_size": 32, # Batch size to use by clients during fit() - } - return config - - -def set_params(model: torch.nn.ModuleList, params: List[fl.common.NDArrays]): - """Set model weights from a list of NumPy ndarrays.""" - params_dict = zip(model.state_dict().keys(), params) - state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict}) - model.load_state_dict(state_dict, strict=True) - - -def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: - """Aggregation function for (federated) evaluation metrics, i.e. those returned by - the client's evaluate() method.""" - # Multiply accuracy of each client by number of examples used - accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] - examples = [num_examples for num_examples, _ in metrics] - - # Aggregate and return custom metric (weighted average) - return {"accuracy": sum(accuracies) / sum(examples)} - - -def get_evaluate_fn( - centralized_testset: Dataset, -): - """Return an evaluation function for centralized evaluation.""" - - def evaluate( - server_round: int, parameters: fl.common.NDArrays, config: Dict[str, Scalar] - ): - """Use the entire CIFAR-10 test set for evaluation.""" - - # Determine device - device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - - model = Net() - set_params(model, parameters) - model.to(device) - - # Apply transform to dataset - testset = centralized_testset.with_transform(apply_transforms) - - # Disable tqdm for dataset preprocessing - disable_progress_bar() - - testloader = DataLoader(testset, batch_size=50) - loss, accuracy = test(model, testloader, device=device) - - return loss, {"accuracy": accuracy} - - return evaluate - - -# Download MNIST dataset and partition it -mnist_fds = FederatedDataset(dataset="mnist", partitioners={"train": NUM_CLIENTS}) -centralized_testset = mnist_fds.load_split("test") - -# Configure the strategy -strategy = fl.server.strategy.FedAvg( - fraction_fit=0.1, # Sample 10% of available clients for training - fraction_evaluate=0.05, # Sample 5% of available clients for evaluation - min_available_clients=10, - on_fit_config_fn=fit_config, - evaluate_metrics_aggregation_fn=weighted_average, # Aggregate federated metrics - evaluate_fn=get_evaluate_fn(centralized_testset), # Global evaluation function -) - -# ClientApp for Flower-Next -client = fl.client.ClientApp( - client_fn=get_client_fn(mnist_fds), -) - -# ServerApp for Flower-Next -server = fl.server.ServerApp( - config=fl.server.ServerConfig(num_rounds=NUM_ROUNDS), - strategy=strategy, -) - - -def main(): - # Parse input arguments - args = parser.parse_args() - - # Resources to be assigned to each virtual client - client_resources = { - "num_cpus": args.num_cpus, - "num_gpus": args.num_gpus, - } - - # Start simulation - fl.simulation.start_simulation( - client_fn=get_client_fn(mnist_fds), - num_clients=NUM_CLIENTS, - client_resources=client_resources, - config=fl.server.ServerConfig(num_rounds=NUM_ROUNDS), - strategy=strategy, - actor_kwargs={ - "on_actor_init_fn": disable_progress_bar # disable tqdm on each actor/process spawning virtual clients - }, - ) - - -if __name__ == "__main__": - main() diff --git a/examples/simulation-pytorch/utils.py b/examples/simulation-pytorch/utils.py deleted file mode 100644 index 01f63cc94ba3..000000000000 --- a/examples/simulation-pytorch/utils.py +++ /dev/null @@ -1,64 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F - -from torchvision.transforms import ToTensor, Normalize, Compose - - -# transformation to convert images to tensors and apply normalization -def apply_transforms(batch): - transforms = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))]) - batch["image"] = [transforms(img) for img in batch["image"]] - return batch - - -# Model (simple CNN adapted from 'PyTorch: A 60 Minute Blitz') -class Net(nn.Module): - def __init__(self, num_classes: int = 10) -> None: - super(Net, self).__init__() - self.conv1 = nn.Conv2d(1, 6, 5) - self.pool = nn.MaxPool2d(2, 2) - self.conv2 = nn.Conv2d(6, 16, 5) - self.fc1 = nn.Linear(16 * 4 * 4, 120) - self.fc2 = nn.Linear(120, 84) - self.fc3 = nn.Linear(84, num_classes) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = self.pool(F.relu(self.conv1(x))) - x = self.pool(F.relu(self.conv2(x))) - x = x.view(-1, 16 * 4 * 4) - x = F.relu(self.fc1(x)) - x = F.relu(self.fc2(x)) - x = self.fc3(x) - return x - - -# borrowed from Pytorch quickstart example -def train(net, trainloader, optim, epochs, device: str): - """Train the network on the training set.""" - criterion = torch.nn.CrossEntropyLoss() - net.train() - for _ in range(epochs): - for batch in trainloader: - images, labels = batch["image"].to(device), batch["label"].to(device) - optim.zero_grad() - loss = criterion(net(images), labels) - loss.backward() - optim.step() - - -# borrowed from Pytorch quickstart example -def test(net, testloader, device: str): - """Validate the network on the entire test set.""" - criterion = torch.nn.CrossEntropyLoss() - correct, loss = 0, 0.0 - net.eval() - with torch.no_grad(): - for data in testloader: - images, labels = data["image"].to(device), data["label"].to(device) - outputs = net(images) - loss += criterion(outputs, labels).item() - _, predicted = torch.max(outputs.data, 1) - correct += (predicted == labels).sum().item() - accuracy = correct / len(testloader.dataset) - return loss, accuracy diff --git a/examples/simulation-tensorflow/README.md b/examples/simulation-tensorflow/README.md deleted file mode 100644 index 917d7b34c7af..000000000000 --- a/examples/simulation-tensorflow/README.md +++ /dev/null @@ -1,98 +0,0 @@ -# Flower Simulation example using TensorFlow/Keras - -This introductory example uses the simulation capabilities of Flower to simulate a large number of clients on a single machine. Take a look at the [Documentation](https://flower.ai/docs/framework/how-to-run-simulations.html) for a deep dive into how Flower simulation works. This example uses [Flower Datasets](https://flower.ai/docs/datasets/) to download, partition and preprocess the MNIST dataset. This examples uses 100 clients by default. - -## Running the example (via Jupyter Notebook) - -Run the example on Google Colab: [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/adap/flower/blob/main/examples/simulation-tensorflow/sim.ipynb) - -Alternatively, you can run `sim.ipynb` locally or in any other Jupyter environment. - -## Running the example - -Start by cloning the code example. We prepared a single-line command that you can copy into your shell which will checkout the example for you: - -```shell -git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/simulation-tensorflow . && rm -rf flower && cd simulation-tensorflow -``` - -This will create a new directory called `simulation-tensorflow` containing the following files: - -``` --- README.md <- Your're reading this right now --- sim.ipynb <- Example notebook --- sim.py <- Example code --- pyproject.toml <- Example dependencies --- requirements.txt <- Example dependencies -``` - -### Installing Dependencies - -Project dependencies (such as `tensorflow` and `flwr`) are defined in `pyproject.toml` and `requirements.txt`. We recommend [Poetry](https://python-poetry.org/docs/) to install those dependencies and manage your virtual environment ([Poetry installation](https://python-poetry.org/docs/#installation)) or [pip](https://pip.pypa.io/en/latest/development/), but feel free to use a different way of installing dependencies and managing virtual environments if you have other preferences. - -#### Poetry - -```shell -poetry install -poetry shell -``` - -Poetry will install all your dependencies in a newly created virtual environment. To verify that everything works correctly you can run the following command: - -```shell -poetry run python -c "import flwr" -``` - -If you don't see any errors you're good to go! - -#### pip - -Write the command below in your terminal to install the dependencies according to the configuration file requirements.txt. - -```shell -pip install -r requirements.txt -``` - -### Run with `start_simulation()` - -Ensure you have activated your environment then: - -```bash -# and then run the example -python sim.py -``` - -You can adjust the CPU/GPU resources you assign to each of your virtual clients. By default, your clients will only use 2xCPU core. For example: - -```bash -# Will assign 2xCPUs to each client -python sim.py --num_cpus=2 - -# Will assign 2xCPUs and 25% of the GPU's VRAM to each client -# This means that you can have 4 concurrent clients on each GPU -# (assuming you have enough CPUs) -python sim.py --num_cpus=2 --num_gpus=0.25 -``` - -Because TensorFlow by default maps all the available VRAM, we need to [enable GPU memory growth](https://www.tensorflow.org/guide/gpu#limiting_gpu_memory_growth), see how it is done in the example (`sim.py`) for both the "main" process (where the server/strategy runs) and for the clients (using the `actor_kwargs`) - -### Run with Flower Next (preview) - -Ensure you have activated your environment, then execute the command below. All `ClientApp` instances will run on CPU but the `ServerApp` will run on the GPU if one is available. Note that this is the case because the `Simulation Engine` only exposes certain resources to the `ClientApp` (based on the `client_resources` in `--backend-config`). For TensorFlow simulations, it is desirable to make use of TF's [memory growth](https://www.tensorflow.org/api_docs/python/tf/config/experimental/set_memory_growth) feature. You can enable that easily with the `--enable-tf-gpu-growth` flag. - -```bash -# Run with the default backend-config. -# `--server-app` points to the `server` object in the sim.py file in this example. -# `--client-app` points to the `client` object in the sim.py file in this example. -flower-simulation --client-app=sim:client --server-app=sim:server --num-supernodes=100 --enable-tf-gpu-growth -``` - -You can change the default resources assigned to each `ClientApp` using the `--backend-config` argument. - -```bash -# Tells the VCE to reserve 2x CPUs and 25% of available VRAM for each ClientApp -flower-simulation --client-app=sim:client --server-app=sim:server --num-supernodes=100 \ - --backend-config='{"client_resources": {"num_cpus":2, "num_gpus":0.25}}' --enable-tf-gpu-growth -``` - -Take a look at the [Documentation](https://flower.ai/docs/framework/how-to-run-simulations.html) for more details on how you can customise your simulation. diff --git a/examples/simulation-tensorflow/pyproject.toml b/examples/simulation-tensorflow/pyproject.toml deleted file mode 100644 index ad8cc2032b2d..000000000000 --- a/examples/simulation-tensorflow/pyproject.toml +++ /dev/null @@ -1,16 +0,0 @@ -[build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" - -[tool.poetry] -name = "simulation-tensorflow" -version = "0.1.0" -description = "Federated Learning Simulation with Flower and Tensorflow" -authors = ["The Flower Authors "] - -[tool.poetry.dependencies] -python = ">=3.8,<3.11" -flwr = { extras = ["simulation"], version = ">=1.0,<2.0" } -flwr-datasets = { extras = ["vision"], version = ">=0.0.2,<1.0.0" } -tensorflow = { version = "^2.9.1, !=2.11.1", markers = "platform_machine == 'x86_64'" } -tensorflow-macos = { version = "^2.9.1, !=2.11.1", markers = "sys_platform == 'darwin' and platform_machine == 'arm64'" } diff --git a/examples/simulation-tensorflow/requirements.txt b/examples/simulation-tensorflow/requirements.txt deleted file mode 100644 index bb69a87be1b4..000000000000 --- a/examples/simulation-tensorflow/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -flwr[simulation]>=1.0, <2.0 -flwr-datasets[vision]>=0.0.2, <1.0.0 -tensorflow-macos>=2.9.1, != 2.11.1 ; sys_platform == "darwin" and platform_machine == "arm64" -tensorflow-cpu>=2.9.1, != 2.11.1 ; platform_machine == "x86_64" diff --git a/examples/simulation-tensorflow/sim.ipynb b/examples/simulation-tensorflow/sim.ipynb deleted file mode 100644 index 26b7260b5f1c..000000000000 --- a/examples/simulation-tensorflow/sim.ipynb +++ /dev/null @@ -1,347 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Flower Quickstart (Simulation with TensorFlow/Keras)\n", - "\n", - "Welcome to Flower, a friendly federated learning framework!\n", - "\n", - "In this notebook, we'll simulate a federated learning system with 100 clients. The clients will use TensorFlow/Keras to define model training and evaluation. Let's start by installing Flower (published as `flwr` on PyPI) with the `simulation` extra:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!pip install -q flwr[\"simulation\"] tensorflow\n", - "!pip install -q flwr_datasets[\"vision\"]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's also install Matplotlib so we can make some plots once the simulation is completed" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!pip install matplotlib" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Next, we import the required dependencies. The most important imports are Flower (`flwr`) and TensorFlow:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from typing import Dict, List, Tuple\n", - "\n", - "import tensorflow as tf\n", - "\n", - "import flwr as fl\n", - "from flwr.common import Metrics\n", - "from flwr.simulation.ray_transport.utils import enable_tf_gpu_growth\n", - "\n", - "from datasets import Dataset\n", - "from flwr_datasets import FederatedDataset\n", - "\n", - "VERBOSE = 0\n", - "NUM_CLIENTS = 100" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's start by defining the model we want to federated. Since we will be working with MNIST, using a fully connected model is sufficient. You can of course customize this model." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def get_model():\n", - " \"\"\"Constructs a simple model architecture suitable for MNIST.\"\"\"\n", - " model = tf.keras.models.Sequential(\n", - " [\n", - " tf.keras.layers.Flatten(input_shape=(28, 28)),\n", - " tf.keras.layers.Dense(128, activation=\"relu\"),\n", - " tf.keras.layers.Dropout(0.2),\n", - " tf.keras.layers.Dense(10, activation=\"softmax\"),\n", - " ]\n", - " )\n", - " model.compile(\"adam\", \"sparse_categorical_crossentropy\", metrics=[\"accuracy\"])\n", - " return model" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "With that out of the way, let's move on to the interesting bits. Federated learning systems consist of a server and multiple clients. In Flower, we create clients by implementing subclasses of `flwr.client.Client` or `flwr.client.NumPyClient`. We use `NumPyClient` in this tutorial because it is easier to implement and requires us to write less boilerplate.\n", - "\n", - "To implement the Flower client, we create a subclass of `flwr.client.NumPyClient` and implement the three methods `get_parameters`, `fit`, and `evaluate`:\n", - "\n", - "- `get_parameters`: Return the current local model parameters\n", - "- `fit`: Receive model parameters from the server, train the model parameters on the local data, and return the (updated) model parameters to the server \n", - "- `evaluate`: Received model parameters from the server, evaluate the model parameters on the local data, and return the evaluation result to the server\n", - "\n", - "We mentioned that our clients will use TensorFlow/Keras for the model training and evaluation. Keras models provide methods that make the implementation straightforward: we can update the local model with server-provides parameters through `model.set_weights`, we can train/evaluate the model through `fit/evaluate`, and we can get the updated model parameters through `model.get_weights`.\n", - "\n", - "Let's see a simple implementation:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "class FlowerClient(fl.client.NumPyClient):\n", - " def __init__(self, trainset, valset) -> None:\n", - " # Create model\n", - " self.model = get_model()\n", - " self.trainset = trainset\n", - " self.valset = valset\n", - "\n", - " def get_parameters(self, config):\n", - " return self.model.get_weights()\n", - "\n", - " def fit(self, parameters, config):\n", - " self.model.set_weights(parameters)\n", - " self.model.fit(self.trainset, epochs=1, verbose=VERBOSE)\n", - " return self.model.get_weights(), len(self.trainset), {}\n", - "\n", - " def evaluate(self, parameters, config):\n", - " self.model.set_weights(parameters)\n", - " loss, acc = self.model.evaluate(self.valset, verbose=VERBOSE)\n", - " return loss, len(self.valset), {\"accuracy\": acc}" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Our class `FlowerClient` defines how local training/evaluation will be performed and allows Flower to call the local training/evaluation through `fit` and `evaluate`. Each instance of `FlowerClient` represents a *single client* in our federated learning system. Federated learning systems have multiple clients (otherwise, there's not much to federate, is there?), so each client will be represented by its own instance of `FlowerClient`. If we have, for example, three clients in our workload, we'd have three instances of `FlowerClient`. Flower calls `FlowerClient.fit` on the respective instance when the server selects a particular client for training (and `FlowerClient.evaluate` for evaluation).\n", - "\n", - "In this notebook, we want to simulate a federated learning system with 100 clients on a single machine. This means that the server and all 100 clients will live on a single machine and share resources such as CPU, GPU, and memory. Having 100 clients would mean having 100 instances of `FlowerClient` in memory. Doing this on a single machine can quickly exhaust the available memory resources, even if only a subset of these clients participates in a single round of federated learning.\n", - "\n", - "In addition to the regular capabilities where server and clients run on multiple machines, Flower, therefore, provides special simulation capabilities that create `FlowerClient` instances only when they are actually necessary for training or evaluation. To enable the Flower framework to create clients when necessary, we need to implement a function called `client_fn` that creates a `FlowerClient` instance on demand. Flower calls `client_fn` whenever it needs an instance of one particular client to call `fit` or `evaluate` (those instances are usually discarded after use). Clients are identified by a client ID, or short `cid`. The `cid` can be used, for example, to load different local data partitions for each client" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We now define four auxiliary functions for this example (note the last two are entirely optional):\n", - "* `get_client_fn()`: Is a function that returns another function. The returned `client_fn` will be executed by Flower's VirtualClientEngine each time a new _virtual_ client (i.e. a client that is simulated in a Python process) needs to be spawn. When are virtual clients spawned? Each time the strategy samples them to do either `fit()` (i.e. train the global model on the local data of a particular client) or `evaluate()` (i.e. evaluate the global model on the validation set of a given client).\n", - "\n", - "* `weighted_average()`: This is an optional function to pass to the strategy. It will be executed after an evaluation round (i.e. when client run `evaluate()`) and will aggregate the metrics clients return. In this example, we use this function to compute the weighted average accuracy of clients doing `evaluate()`.\n", - "\n", - "* `get_evaluate_fn()`: This is again a function that returns another function. The returned function will be executed by the strategy at the end of a `fit()` round and after a new global model has been obtained after aggregation. This is an optional argument for Flower strategies. In this example, we use the whole MNIST test set to perform this server-side evaluation." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def get_client_fn(dataset: FederatedDataset):\n", - " \"\"\"Return a function to construct a client.\n", - "\n", - " The VirtualClientEngine will execute this function whenever a client is sampled by\n", - " the strategy to participate.\n", - " \"\"\"\n", - "\n", - " def client_fn(cid: str) -> fl.client.Client:\n", - " \"\"\"Construct a FlowerClient with its own dataset partition.\"\"\"\n", - "\n", - " # Extract partition for client with id = cid\n", - " client_dataset = dataset.load_partition(int(cid), \"train\")\n", - "\n", - " # Now let's split it into train (90%) and validation (10%)\n", - " client_dataset_splits = client_dataset.train_test_split(test_size=0.1, seed=42)\n", - "\n", - " trainset = client_dataset_splits[\"train\"].to_tf_dataset(\n", - " columns=\"image\", label_cols=\"label\", batch_size=32\n", - " )\n", - " valset = client_dataset_splits[\"test\"].to_tf_dataset(\n", - " columns=\"image\", label_cols=\"label\", batch_size=64\n", - " )\n", - "\n", - " # Create and return client\n", - " return FlowerClient(trainset, valset).to_client()\n", - "\n", - " return client_fn\n", - "\n", - "\n", - "def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics:\n", - " \"\"\"Aggregation function for (federated) evaluation metrics, i.e. those returned by\n", - " the client's evaluate() method.\"\"\"\n", - " # Multiply accuracy of each client by number of examples used\n", - " accuracies = [num_examples * m[\"accuracy\"] for num_examples, m in metrics]\n", - " examples = [num_examples for num_examples, _ in metrics]\n", - "\n", - " # Aggregate and return custom metric (weighted average)\n", - " return {\"accuracy\": sum(accuracies) / sum(examples)}\n", - "\n", - "\n", - "def get_evaluate_fn(testset: Dataset):\n", - " \"\"\"Return an evaluation function for server-side (i.e. centralised) evaluation.\"\"\"\n", - "\n", - " # The `evaluate` function will be called after every round by the strategy\n", - " def evaluate(\n", - " server_round: int,\n", - " parameters: fl.common.NDArrays,\n", - " config: Dict[str, fl.common.Scalar],\n", - " ):\n", - " model = get_model() # Construct the model\n", - " model.set_weights(parameters) # Update model with the latest parameters\n", - " loss, accuracy = model.evaluate(testset, verbose=VERBOSE)\n", - " return loss, {\"accuracy\": accuracy}\n", - "\n", - " return evaluate" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We now have `FlowerClient` which defines client-side training and evaluation, and `client_fn`, which allows Flower to create `FlowerClient` instances whenever it needs to call `fit` or `evaluate` on one particular client. The last step is to start the actual simulation using `flwr.simulation.start_simulation`. \n", - "\n", - "The function `start_simulation` accepts a number of arguments, amongst them the `client_fn` used to create `FlowerClient` instances, the number of clients to simulate `num_clients`, the number of rounds `num_rounds`, and the strategy. The strategy encapsulates the federated learning approach/algorithm, for example, *Federated Averaging* (FedAvg).\n", - "\n", - "Flower comes with a number of built-in strategies, but we can also use our own strategy implementations to customize nearly all aspects of the federated learning approach. For this example, we use the built-in `FedAvg` implementation and customize it using a few basic parameters. The last step is the actual call to `start_simulation` which - you guessed it - actually starts the simulation.\n", - "\n", - "We can use [Flower Datasets](https://flower.ai/docs/datasets/) to effortlessly obtain an off-the-shelf partitioned dataset or partition one that isn't pre-partitioned. Let's choose MNIST." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Enable GPU growth in your main process\n", - "enable_tf_gpu_growth()\n", - "\n", - "# Download MNIST dataset and partition it\n", - "mnist_fds = FederatedDataset(dataset=\"mnist\", partitioners={\"train\": NUM_CLIENTS})\n", - "# Get the whole test set for centralised evaluation\n", - "centralized_testset = mnist_fds.load_split(\"test\").to_tf_dataset(\n", - " columns=\"image\", label_cols=\"label\", batch_size=64\n", - ")\n", - "\n", - "\n", - "# Create FedAvg strategy\n", - "strategy = fl.server.strategy.FedAvg(\n", - " fraction_fit=0.1, # Sample 10% of available clients for training\n", - " fraction_evaluate=0.05, # Sample 5% of available clients for evaluation\n", - " min_fit_clients=10, # Never sample less than 10 clients for training\n", - " min_evaluate_clients=5, # Never sample less than 5 clients for evaluation\n", - " min_available_clients=int(\n", - " NUM_CLIENTS * 0.75\n", - " ), # Wait until at least 75 clients are available\n", - " evaluate_metrics_aggregation_fn=weighted_average, # aggregates federated metrics\n", - " evaluate_fn=get_evaluate_fn(centralized_testset), # global evaluation function\n", - ")\n", - "\n", - "# With a dictionary, you tell Flower's VirtualClientEngine that each\n", - "# client needs exclusive access to these many resources in order to run\n", - "client_resources = {\"num_cpus\": 1, \"num_gpus\": 0.0}\n", - "\n", - "# Start simulation\n", - "history = fl.simulation.start_simulation(\n", - " client_fn=get_client_fn(mnist_fds),\n", - " num_clients=NUM_CLIENTS,\n", - " config=fl.server.ServerConfig(num_rounds=10),\n", - " strategy=strategy,\n", - " client_resources=client_resources,\n", - " actor_kwargs={\n", - " \"on_actor_init_fn\": enable_tf_gpu_growth # Enable GPU growth upon actor init.\n", - " },\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can then use the resturned History object to either save the results to disk or do some visualisation (or both of course, or neither if you like chaos). Below you can see how you can plot the centralised accuracy obtainined at the end of each round (including at the very beginning of the experiment) for the global model. This is want the function `evaluate_fn()` that we passed to the strategy reports." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\n", - "\n", - "print(f\"{history.metrics_centralized = }\")\n", - "\n", - "global_accuracy_centralised = history.metrics_centralized[\"accuracy\"]\n", - "round = [data[0] for data in global_accuracy_centralised]\n", - "acc = [100.0 * data[1] for data in global_accuracy_centralised]\n", - "plt.plot(round, acc)\n", - "plt.grid()\n", - "plt.ylabel(\"Accuracy (%)\")\n", - "plt.xlabel(\"Round\")\n", - "plt.title(\"MNIST - IID - 100 clients with 10 clients per round\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Congratulations! With that, you built a Flower client, customized it's instantiation through the `client_fn`, customized the server-side execution through a `FedAvg` strategy configured for this workload, and started a simulation with 100 clients (each holding their own individual partition of the MNIST dataset).\n", - "\n", - "Next, you can continue to explore more advanced Flower topics:\n", - "\n", - "- Deploy server and clients on different machines using `start_server` and `start_client`\n", - "- Customize the server-side execution through custom strategies\n", - "- Customize the client-side execution through `config` dictionaries\n", - "\n", - "Get all resources you need!\n", - "\n", - "* **[DOCS]** Our complete documenation: https://flower.ai/docs/\n", - "* **[Examples]** All Flower examples: https://flower.ai/docs/examples/\n", - "* **[VIDEO]** Our Youtube channel: https://www.youtube.com/@flowerlabs\n", - "\n", - "Don't forget to join our Slack channel: https://flower.ai/join-slack/" - ] - } - ], - "metadata": { - "colab": { - "name": "flower.ipynb", - "provenance": [] - }, - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/examples/simulation-tensorflow/sim.py b/examples/simulation-tensorflow/sim.py deleted file mode 100644 index 4014e3c6be72..000000000000 --- a/examples/simulation-tensorflow/sim.py +++ /dev/null @@ -1,188 +0,0 @@ -import os -import argparse -from typing import Dict, List, Tuple - -import tensorflow as tf - -import flwr as fl -from flwr.common import Metrics -from flwr.simulation.ray_transport.utils import enable_tf_gpu_growth - -from datasets import Dataset -from flwr_datasets import FederatedDataset - -# Make TensorFlow logs less verbose -os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" - -parser = argparse.ArgumentParser(description="Flower Simulation with Tensorflow/Keras") - -parser.add_argument( - "--num_cpus", - type=int, - default=1, - help="Number of CPUs to assign to a virtual client", -) -parser.add_argument( - "--num_gpus", - type=float, - default=0.0, - help="Ratio of GPU memory to assign to a virtual client", -) - -NUM_CLIENTS = 100 -NUM_ROUNDS = 10 -VERBOSE = 0 - - -class FlowerClient(fl.client.NumPyClient): - def __init__(self, trainset, valset) -> None: - # Create model - self.model = get_model() - self.trainset = trainset - self.valset = valset - - def get_parameters(self, config): - return self.model.get_weights() - - def fit(self, parameters, config): - self.model.set_weights(parameters) - self.model.fit(self.trainset, epochs=1, verbose=VERBOSE) - return self.model.get_weights(), len(self.trainset), {} - - def evaluate(self, parameters, config): - self.model.set_weights(parameters) - loss, acc = self.model.evaluate(self.valset, verbose=VERBOSE) - return loss, len(self.valset), {"accuracy": acc} - - -def get_model(): - """Constructs a simple model architecture suitable for MNIST.""" - model = tf.keras.models.Sequential( - [ - tf.keras.layers.Flatten(input_shape=(28, 28)), - tf.keras.layers.Dense(128, activation="relu"), - tf.keras.layers.Dropout(0.2), - tf.keras.layers.Dense(10, activation="softmax"), - ] - ) - model.compile("adam", "sparse_categorical_crossentropy", metrics=["accuracy"]) - return model - - -def get_client_fn(dataset: FederatedDataset): - """Return a function to construct a client. - - The VirtualClientEngine will execute this function whenever a client is sampled by - the strategy to participate. - """ - - def client_fn(cid: str) -> fl.client.Client: - """Construct a FlowerClient with its own dataset partition.""" - - # Extract partition for client with id = cid - client_dataset = dataset.load_partition(int(cid), "train") - - # Now let's split it into train (90%) and validation (10%) - client_dataset_splits = client_dataset.train_test_split(test_size=0.1, seed=42) - - trainset = client_dataset_splits["train"].to_tf_dataset( - columns="image", label_cols="label", batch_size=32 - ) - valset = client_dataset_splits["test"].to_tf_dataset( - columns="image", label_cols="label", batch_size=64 - ) - - # Create and return client - return FlowerClient(trainset, valset).to_client() - - return client_fn - - -def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: - """Aggregation function for (federated) evaluation metrics. - - It ill aggregate those metrics returned by the client's evaluate() method. - """ - # Multiply accuracy of each client by number of examples used - accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] - examples = [num_examples for num_examples, _ in metrics] - - # Aggregate and return custom metric (weighted average) - return {"accuracy": sum(accuracies) / sum(examples)} - - -def get_evaluate_fn(testset: Dataset): - """Return an evaluation function for server-side (i.e. centralised) evaluation.""" - - # The `evaluate` function will be called after every round by the strategy - def evaluate( - server_round: int, - parameters: fl.common.NDArrays, - config: Dict[str, fl.common.Scalar], - ): - model = get_model() # Construct the model - model.set_weights(parameters) # Update model with the latest parameters - loss, accuracy = model.evaluate(testset, verbose=VERBOSE) - return loss, {"accuracy": accuracy} - - return evaluate - - -# Download MNIST dataset and partition it -mnist_fds = FederatedDataset(dataset="mnist", partitioners={"train": NUM_CLIENTS}) -# Get the whole test set for centralised evaluation -centralized_testset = mnist_fds.load_split("test").to_tf_dataset( - columns="image", label_cols="label", batch_size=64 -) - -# Create FedAvg strategy -strategy = fl.server.strategy.FedAvg( - fraction_fit=0.1, # Sample 10% of available clients for training - fraction_evaluate=0.05, # Sample 5% of available clients for evaluation - min_fit_clients=10, # Never sample less than 10 clients for training - evaluate_metrics_aggregation_fn=weighted_average, # aggregates federated metrics - evaluate_fn=get_evaluate_fn(centralized_testset), # global evaluation function -) - - -# ClientApp for Flower-Next -client = fl.client.ClientApp( - client_fn=get_client_fn(mnist_fds), -) - -# ServerApp for Flower-Next -server = fl.server.ServerApp( - config=fl.server.ServerConfig(num_rounds=NUM_ROUNDS), - strategy=strategy, -) - - -def main() -> None: - # Parse input arguments - args = parser.parse_args() - - # With a dictionary, you tell Flower's VirtualClientEngine that each - # client needs exclusive access to these many resources in order to run - client_resources = { - "num_cpus": args.num_cpus, - "num_gpus": args.num_gpus, - } - - # Start simulation - fl.simulation.start_simulation( - client_fn=get_client_fn(mnist_fds), - num_clients=NUM_CLIENTS, - config=fl.server.ServerConfig(NUM_ROUNDS), - strategy=strategy, - client_resources=client_resources, - actor_kwargs={ - "on_actor_init_fn": enable_tf_gpu_growth # Enable GPU growth upon actor init - # does nothing if `num_gpus` in client_resources is 0.0 - }, - ) - - -if __name__ == "__main__": - # Enable GPU growth in your main process - enable_tf_gpu_growth() - main() diff --git a/examples/sklearn-logreg-mnist/README.md b/examples/sklearn-logreg-mnist/README.md index 12b1a5e3bc1a..7c75e2ecfb85 100644 --- a/examples/sklearn-logreg-mnist/README.md +++ b/examples/sklearn-logreg-mnist/README.md @@ -1,80 +1,67 @@ -# Flower Example using scikit-learn +--- +tags: [basic, vision, logistic regression, fds] +dataset: [MNIST] +framework: [scikit-learn] +--- -This example of Flower uses `scikit-learn`'s `LogisticRegression` model to train a federated learning system. It will help you understand how to adapt Flower for use with `scikit-learn`. -Running this example in itself is quite easy. This example uses [Flower Datasets](https://flower.ai/docs/datasets/) to download, partition and preprocess the MNIST dataset. - -## Project Setup - -Start by cloning the example project. We prepared a single-line command that you can copy into your shell which will checkout the example for you: - -```shell -git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/sklearn-logreg-mnist . && rm -rf flower && cd sklearn-logreg-mnist -``` +# Flower Logistic Regression Example using scikit-learn and Flower (Quickstart Example) -This will create a new directory called `sklearn-logreg-mnist` containing the following files: - -```shell --- pyproject.toml --- requirements.txt --- client.py --- server.py --- utils.py --- README.md -``` +This example of Flower uses `scikit-learn`'s [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) model to train a federated learning system. It will help you understand how to adapt Flower for use with `scikit-learn`. +Running this example in itself is quite easy. This example uses [Flower Datasets](https://flower.ai/docs/datasets/) to download, partition and preprocess the MNIST dataset. -### Installing Dependencies +## Set up the project -Project dependencies (such as `scikit-learn` and `flwr`) are defined in `pyproject.toml` and `requirements.txt`. We recommend [Poetry](https://python-poetry.org/docs/) to install those dependencies and manage your virtual environment ([Poetry installation](https://python-poetry.org/docs/#installation)) or [pip](https://pip.pypa.io/en/latest/development/), but feel free to use a different way of installing dependencies and managing virtual environments if you have other preferences. +### Clone the project -#### Poetry +Start by cloning the example project: ```shell -poetry install -poetry shell +git clone --depth=1 https://github.com/adap/flower.git _tmp \ + && mv _tmp/examples/sklearn-logreg-mnist . \ + && rm -rf _tmp && cd sklearn-logreg-mnist ``` -Poetry will install all your dependencies in a newly created virtual environment. To verify that everything works correctly you can run the following command: +This will create a new directory called `sklearn-logreg-mnist` with the following structure: ```shell -poetry run python3 -c "import flwr" +sklearn-logreg-mnist +├── README.md +├── pyproject.toml # Project metadata like dependencies and configs +└── sklearn_example + ├── __init__.py + ├── client_app.py # Defines your ClientApp + ├── server_app.py # Defines your ServerApp + └── task.py # Defines your model, training and data loading ``` -If you don't see any errors you're good to go! - -#### pip +### Install dependencies and project -Write the command below in your terminal to install the dependencies according to the configuration file requirements.txt. +Install the dependencies defined in `pyproject.toml` as well as the `sklearn_example` package. -```shell -pip install -r requirements.txt +```bash +pip install -e . ``` -## Run Federated Learning with scikit-learn and Flower - -Afterwards you are ready to start the Flower server as well as the clients. You can simply start the server in a terminal as follows: - -```shell -poetry run python3 server.py -``` +## Run the project -Now you are ready to start the Flower clients which will participate in the learning. To do so simply open two or more terminals and run the following command in each: +You can run your Flower project in both _simulation_ and _deployment_ mode without making changes to the code. If you are starting with Flower, we recommend you using the _simulation_ mode as it requires fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine. -Start client 1 in the first terminal: +### Run with the Simulation Engine -```shell -python3 client.py --partition-id 0 # or any integer in {0-9} +```bash +flwr run . ``` -Start client 2 in the second terminal: +You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: -```shell -python3 client.py --partition-id 1 # or any integer in {0-9} +```bash +flwr run . --run-config "num-server-rounds=5 fraction-fit=0.25" ``` -Alternatively, you can run all of it in one shell as follows: +> \[!TIP\] +> For a more detailed walk-through check our [quickstart PyTorch tutorial](https://flower.ai/docs/framework/tutorial-quickstart-scikitlearn.html) -```bash -bash run.sh -``` +### Run with the Deployment Engine -You will see that Flower is starting a federated training. +> \[!NOTE\] +> An update to this example will show how to run this Flower application with the Deployment Engine and TLS certificates, or with Docker. diff --git a/examples/sklearn-logreg-mnist/client.py b/examples/sklearn-logreg-mnist/client.py deleted file mode 100644 index 1e9349df1acc..000000000000 --- a/examples/sklearn-logreg-mnist/client.py +++ /dev/null @@ -1,67 +0,0 @@ -import argparse -import warnings - -from sklearn.linear_model import LogisticRegression -from sklearn.metrics import log_loss - -import flwr as fl -import utils -from flwr_datasets import FederatedDataset - -if __name__ == "__main__": - N_CLIENTS = 10 - - parser = argparse.ArgumentParser(description="Flower") - parser.add_argument( - "--partition-id", - type=int, - choices=range(0, N_CLIENTS), - required=True, - help="Specifies the artificial data partition", - ) - args = parser.parse_args() - partition_id = args.partition_id - - # Load the partition data - fds = FederatedDataset(dataset="mnist", partitioners={"train": N_CLIENTS}) - - dataset = fds.load_partition(partition_id, "train").with_format("numpy") - X, y = dataset["image"].reshape((len(dataset), -1)), dataset["label"] - # Split the on edge data: 80% train, 20% test - X_train, X_test = X[: int(0.8 * len(X))], X[int(0.8 * len(X)) :] - y_train, y_test = y[: int(0.8 * len(y))], y[int(0.8 * len(y)) :] - - # Create LogisticRegression Model - model = LogisticRegression( - penalty="l2", - max_iter=1, # local epoch - warm_start=True, # prevent refreshing weights when fitting - ) - - # Setting initial parameters, akin to model.compile for keras models - utils.set_initial_params(model) - - # Define Flower client - class MnistClient(fl.client.NumPyClient): - def get_parameters(self, config): # type: ignore - return utils.get_model_parameters(model) - - def fit(self, parameters, config): # type: ignore - utils.set_model_params(model, parameters) - # Ignore convergence failure due to low local epochs - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - model.fit(X_train, y_train) - print(f"Training finished for round {config['server_round']}") - return utils.get_model_parameters(model), len(X_train), {} - - def evaluate(self, parameters, config): # type: ignore - utils.set_model_params(model, parameters) - loss = log_loss(y_test, model.predict_proba(X_test)) - accuracy = model.score(X_test, y_test) - return loss, len(X_test), {"accuracy": accuracy} - - # Start Flower client - fl.client.start_client( - server_address="0.0.0.0:8080", client=MnistClient().to_client() - ) diff --git a/examples/sklearn-logreg-mnist/pyproject.toml b/examples/sklearn-logreg-mnist/pyproject.toml index 58cc5ca4a02e..75dae57a0a40 100644 --- a/examples/sklearn-logreg-mnist/pyproject.toml +++ b/examples/sklearn-logreg-mnist/pyproject.toml @@ -1,19 +1,40 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry] -name = "sklearn-mnist" -version = "0.1.0" +[project] +name = "sklearnexample" +version = "1.0.0" +license = "Apache-2.0" description = "Federated learning with scikit-learn and Flower" authors = [ - "The Flower Authors ", - "Kaushik Amar Das ", + { name = "The Flower Authors", email = "hello@flower.ai" }, + { name = "Kaushik Amar Das", email = "kaushik.das@iiitg.ac.in" }, ] +dependencies = [ + "flwr[simulation]>=1.12.0", + "flwr-datasets[vision]>=0.3.0", + "numpy<2.0.0", + "scikit-learn~=1.2.2", +] + +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "sklearnexample.server_app:app" +clientapp = "sklearnexample.client_app:app" + +[tool.flwr.app.config] +penalty = "l2" +num-server-rounds = 3 +fraction-fit = 0.5 + +[tool.flwr.federations] +default = "local-simulation" -[tool.poetry.dependencies] -python = "^3.8" -flwr = ">=1.0,<2.0" -# flwr = { path = "../../", develop = true } # Development -flwr-datasets = { extras = ["vision"], version = ">=0.0.2,<1.0.0" } -scikit-learn = "^1.1.1" +[tool.flwr.federations.local-simulation] +options.num-supernodes = 10 diff --git a/examples/sklearn-logreg-mnist/requirements.txt b/examples/sklearn-logreg-mnist/requirements.txt deleted file mode 100644 index 50da9ace3630..000000000000 --- a/examples/sklearn-logreg-mnist/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -flwr>=1.0, <2.0 -flwr-datasets[vision]>=0.0.2, <1.0.0 -numpy~=1.21.1 -scikit_learn~=1.2.2 diff --git a/examples/sklearn-logreg-mnist/run.sh b/examples/sklearn-logreg-mnist/run.sh deleted file mode 100755 index f770ca05f8f4..000000000000 --- a/examples/sklearn-logreg-mnist/run.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -set -e -cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/ - -echo "Starting server" -python server.py & -sleep 3 # Sleep for 3s to give the server enough time to start - -for i in $(seq 0 1); do - echo "Starting client $i" - python client.py --partition-id "${i}" & -done - -# This will allow you to use CTRL+C to stop all background processes -trap 'trap - SIGTERM && kill -- -$$' SIGINT SIGTERM -# Wait for all background processes to complete -wait diff --git a/examples/sklearn-logreg-mnist/server.py b/examples/sklearn-logreg-mnist/server.py deleted file mode 100644 index e0af91fabcee..000000000000 --- a/examples/sklearn-logreg-mnist/server.py +++ /dev/null @@ -1,47 +0,0 @@ -import flwr as fl -import utils -from sklearn.metrics import log_loss -from sklearn.linear_model import LogisticRegression -from typing import Dict - -from flwr_datasets import FederatedDataset - - -def fit_round(server_round: int) -> Dict: - """Send round number to client.""" - return {"server_round": server_round} - - -def get_evaluate_fn(model: LogisticRegression): - """Return an evaluation function for server-side evaluation.""" - - # Load test data here to avoid the overhead of doing it in `evaluate` itself - fds = FederatedDataset(dataset="mnist", partitioners={"train": 10}) - dataset = fds.load_split("test").with_format("numpy") - X_test, y_test = dataset["image"].reshape((len(dataset), -1)), dataset["label"] - - # The `evaluate` function will be called after every round - def evaluate(server_round, parameters: fl.common.NDArrays, config): - # Update model with the latest parameters - utils.set_model_params(model, parameters) - loss = log_loss(y_test, model.predict_proba(X_test)) - accuracy = model.score(X_test, y_test) - return loss, {"accuracy": accuracy} - - return evaluate - - -# Start Flower server for five rounds of federated learning -if __name__ == "__main__": - model = LogisticRegression() - utils.set_initial_params(model) - strategy = fl.server.strategy.FedAvg( - min_available_clients=2, - evaluate_fn=get_evaluate_fn(model), - on_fit_config_fn=fit_round, - ) - fl.server.start_server( - server_address="0.0.0.0:8080", - strategy=strategy, - config=fl.server.ServerConfig(num_rounds=5), - ) diff --git a/examples/sklearn-logreg-mnist/sklearnexample/__init__.py b/examples/sklearn-logreg-mnist/sklearnexample/__init__.py new file mode 100644 index 000000000000..e989fe23da02 --- /dev/null +++ b/examples/sklearn-logreg-mnist/sklearnexample/__init__.py @@ -0,0 +1 @@ +"""sklearn_example.""" diff --git a/examples/sklearn-logreg-mnist/sklearnexample/client_app.py b/examples/sklearn-logreg-mnist/sklearnexample/client_app.py new file mode 100644 index 000000000000..0f0cb8f34e82 --- /dev/null +++ b/examples/sklearn-logreg-mnist/sklearnexample/client_app.py @@ -0,0 +1,60 @@ +"""sklearnexample: A Flower / scikit-learn app.""" + +import warnings + +from flwr.client import Client, ClientApp, NumPyClient +from flwr.common import Context +from sklearn.metrics import log_loss + +from sklearnexample.task import ( + create_log_reg_and_instantiate_parameters, + get_model_parameters, + load_data, + set_model_params, +) + + +# Define Flower client +class MnistClient(NumPyClient): + def __init__(self, model, X_train, X_test, y_train, y_test): + self.model = model + self.X_train = X_train + self.X_test = X_test + self.y_train = y_train + self.y_test = y_test + + def fit(self, parameters, config): + set_model_params(self.model, parameters) + # Ignore convergence failure due to low local epochs + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + self.model.fit(self.X_train, self.y_train) + return get_model_parameters(self.model), len(self.X_train), {} + + def evaluate(self, parameters, config): + set_model_params(self.model, parameters) + loss = log_loss(self.y_test, self.model.predict_proba(self.X_test)) + accuracy = self.model.score(self.X_test, self.y_test) + return loss, len(self.X_test), {"accuracy": accuracy} + + +def client_fn(context: Context) -> Client: + """Construct a Client that will be run in a ClientApp.""" + + # Read the node_config to fetch data partition associated to this node + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + X_train, X_test, y_train, y_test = load_data(partition_id, num_partitions) + + # Read the run config to get settings to configure the Client + penalty = context.run_config["penalty"] + + # Create LogisticRegression Model + model = create_log_reg_and_instantiate_parameters(penalty) + + # Return Client instance + return MnistClient(model, X_train, X_test, y_train, y_test).to_client() + + +# Create ClientApp +app = ClientApp(client_fn=client_fn) diff --git a/examples/sklearn-logreg-mnist/sklearnexample/server_app.py b/examples/sklearn-logreg-mnist/sklearnexample/server_app.py new file mode 100644 index 000000000000..47f4f5fc19c4 --- /dev/null +++ b/examples/sklearn-logreg-mnist/sklearnexample/server_app.py @@ -0,0 +1,64 @@ +"""sklearnexample: A Flower / scikit-learn app.""" + +from flwr.common import Context, NDArrays, ndarrays_to_parameters +from flwr.server import ServerApp, ServerAppComponents, ServerConfig +from flwr.server.strategy import FedAvg +from flwr_datasets import FederatedDataset +from sklearn.linear_model import LogisticRegression +from sklearn.metrics import log_loss + +from sklearnexample.task import ( + create_log_reg_and_instantiate_parameters, + get_model_parameters, + set_initial_params, + set_model_params, +) + + +def get_evaluate_fn(penalty): + """Return an evaluation function for server-side evaluation.""" + + model = LogisticRegression(penalty=penalty) + set_initial_params(model) + + # Load test data here to avoid the overhead of doing it in `evaluate` itself + fds = FederatedDataset(dataset="mnist", partitioners={"train": 10}) + dataset = fds.load_split("test").with_format("numpy") + X_test, y_test = dataset["image"].reshape((len(dataset), -1)), dataset["label"] + + # The `evaluate` function will be called after every round + def evaluate(server_round, parameters: NDArrays, config): + # Update model with the latest parameters + set_model_params(model, parameters) + loss = log_loss(y_test, model.predict_proba(X_test)) + accuracy = model.score(X_test, y_test) + return loss, {"accuracy": accuracy} + + return evaluate + + +def server_fn(context: Context) -> ServerAppComponents: + """Construct components that set the ServerApp behaviour.""" + + # Read from config + num_rounds = context.run_config["num-server-rounds"] + + penalty = context.run_config["penalty"] + model = create_log_reg_and_instantiate_parameters(penalty) + ndarrays = get_model_parameters(model) + global_model_init = ndarrays_to_parameters(ndarrays) + + # Define the strategy + fraction_fit = context.run_config["fraction-fit"] + strategy = FedAvg( + fraction_fit=fraction_fit, + evaluate_fn=get_evaluate_fn(penalty), + initial_parameters=global_model_init, + ) + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(strategy=strategy, config=config) + + +# Create ServerApp +app = ServerApp(server_fn=server_fn) diff --git a/examples/sklearn-logreg-mnist/sklearnexample/task.py b/examples/sklearn-logreg-mnist/sklearnexample/task.py new file mode 100644 index 000000000000..8e2234f85691 --- /dev/null +++ b/examples/sklearn-logreg-mnist/sklearnexample/task.py @@ -0,0 +1,81 @@ +"""sklearnexample: A Flower / scikit-learn app.""" + +import numpy as np +from flwr.common import NDArrays +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner +from sklearn.linear_model import LogisticRegression + +# This information is needed to create a correct scikit-learn model +NUM_UNIQUE_LABELS = 10 # MNIST has 10 classes +NUM_FEATURES = 784 # Number of features in MNIST dataset + + +def get_model_parameters(model: LogisticRegression) -> NDArrays: + """Returns the parameters of a sklearn LogisticRegression model.""" + if model.fit_intercept: + params = [ + model.coef_, + model.intercept_, + ] + else: + params = [ + model.coef_, + ] + return params + + +def set_model_params(model: LogisticRegression, params: NDArrays) -> LogisticRegression: + """Sets the parameters of a sklean LogisticRegression model.""" + model.coef_ = params[0] + if model.fit_intercept: + model.intercept_ = params[1] + return model + + +def set_initial_params(model: LogisticRegression) -> None: + """Sets initial parameters as zeros Required since model params are uninitialized + until model.fit is called. + + But server asks for initial parameters from clients at launch. Refer to + sklearn.linear_model.LogisticRegression documentation for more information. + """ + model.classes_ = np.arange(NUM_UNIQUE_LABELS) + + model.coef_ = np.zeros((NUM_UNIQUE_LABELS, NUM_FEATURES)) + if model.fit_intercept: + model.intercept_ = np.zeros((NUM_UNIQUE_LABELS,)) + + +def create_log_reg_and_instantiate_parameters(penalty): + """Helper function to create a LogisticRegression model.""" + model = LogisticRegression( + penalty=penalty, + max_iter=1, # local epoch + warm_start=True, # prevent refreshing weights when fitting, + ) + # Setting initial parameters, akin to model.compile for keras models + set_initial_params(model) + return model + + +fds = None # Cache FederatedDataset + + +def load_data(partition_id: int, num_partitions: int): + # Only initialize `FederatedDataset` once + global fds + if fds is None: + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="ylecun/mnist", + partitioners={"train": partitioner}, + ) + + dataset = fds.load_partition(partition_id, "train").with_format("numpy") + X, y = dataset["image"].reshape((len(dataset), -1)), dataset["label"] + # Split the on edge data: 80% train, 20% test + X_train, X_test = X[: int(0.8 * len(X))], X[int(0.8 * len(X)) :] + y_train, y_test = y[: int(0.8 * len(y))], y[int(0.8 * len(y)) :] + + return X_train, X_test, y_train, y_test diff --git a/examples/sklearn-logreg-mnist/utils.py b/examples/sklearn-logreg-mnist/utils.py deleted file mode 100644 index b279a0d1a4b3..000000000000 --- a/examples/sklearn-logreg-mnist/utils.py +++ /dev/null @@ -1,42 +0,0 @@ -import numpy as np -from sklearn.linear_model import LogisticRegression - -from flwr.common import NDArrays - - -def get_model_parameters(model: LogisticRegression) -> NDArrays: - """Returns the parameters of a sklearn LogisticRegression model.""" - if model.fit_intercept: - params = [ - model.coef_, - model.intercept_, - ] - else: - params = [ - model.coef_, - ] - return params - - -def set_model_params(model: LogisticRegression, params: NDArrays) -> LogisticRegression: - """Sets the parameters of a sklean LogisticRegression model.""" - model.coef_ = params[0] - if model.fit_intercept: - model.intercept_ = params[1] - return model - - -def set_initial_params(model: LogisticRegression): - """Sets initial parameters as zeros Required since model params are uninitialized - until model.fit is called. - - But server asks for initial parameters from clients at launch. Refer to - sklearn.linear_model.LogisticRegression documentation for more information. - """ - n_classes = 10 # MNIST has 10 classes - n_features = 784 # Number of features in dataset - model.classes_ = np.array([i for i in range(10)]) - - model.coef_ = np.zeros((n_classes, n_features)) - if model.fit_intercept: - model.intercept_ = np.zeros((n_classes,)) diff --git a/examples/tensorflow-privacy/README.md b/examples/tensorflow-privacy/README.md index a1f1be00f6b0..af85865346bb 100644 --- a/examples/tensorflow-privacy/README.md +++ b/examples/tensorflow-privacy/README.md @@ -1,60 +1,64 @@ +--- +tags: [DP, DP-SGD, basic, vision, fds, privacy] +dataset: [MNIST] +framework: [tensorflow] +--- + # Training with Sample-Level Differential Privacy using TensorFlow-Privacy Engine -In this example, we demonstrate how to train a model with sample-level differential privacy (DP) using Flower. We employ TensorFlow and integrate the tensorflow-privacy Engine to achieve sample-level differential privacy. This setup ensures robust privacy guarantees during the client training phase. +In this example, we demonstrate how to train a model with sample-level differential privacy (DP) using Flower. We employ TensorFlow and integrate the tensorflow-privacy engine to achieve sample-level differential privacy. This setup ensures robust privacy guarantees during the client training phase. For more information about DP in Flower please refer to the [tutorial](https://flower.ai/docs/framework/how-to-use-differential-privacy.html). For additional information about tensorflow-privacy, visit the official [website](https://www.tensorflow.org/responsible_ai/privacy/guide). -## Environments Setup +## Set up the project + +### Clone the project -Start by cloning the example. We prepared a single-line command that you can copy into your shell which will checkout the example for you: +Start by cloning the example project: ```shell -git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/tensorflow-privacy . && rm -rf flower && cd tensorflow-privacy +git clone --depth=1 https://github.com/adap/flower.git \ + && mv flower/examples/tensorflow-privacy . \ + && rm -rf flower \ + && cd tensorflow-privacy ``` This will create a new directory called `tensorflow-privacy` containing the following files: ```shell --- pyproject.toml --- client.py --- server.py --- README.md +tensorflow-privacy +├── tf_privacy +│ ├── client_app.py # Defines your ClientApp +│ ├── server_app.py # Defines your ServerApp +│ └── task.py # Defines your model, training, and data loading +├── pyproject.toml # Project metadata like dependencies and configs +└── README.md ``` -### Installing dependencies - -Project dependencies are defined in `pyproject.toml`. Install them with: - -```shell -pip install . -``` +> \[!NOTE\] +> Please note that, at the current state, users cannot set `NodeConfig` for simulated `ClientApp`s. For this reason, the hyperparameter `noise_multiplier` is set in the `client_fn` method based on a condition check on `partition_id`. This will be modified in a future version of Flower to allow users to set `NodeConfig` for simulated `ClientApp`s. -## Run Flower with tensorflow-privacy and TensorFlow +### Install dependencies and project -### 1. Start the long-running Flower server (SuperLink) +Install the dependencies defined in `pyproject.toml` as well as the `tf_privacy` package. -```bash -flower-superlink --insecure +```shell +# From a new python environment, run: +pip install -e . ``` -### 2. Start the long-running Flower clients (SuperNodes) +## Run the project -Start 2 Flower `SuperNodes` in 2 separate terminal windows, using: +You can run your Flower project in both _simulation_ and _deployment_ mode without making changes to the code. If you are starting with Flower, we recommend you using the _simulation_ mode as it requires fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine. -```bash -flower-client-app client:appA --insecure -``` +### Run with the Simulation Engine ```bash -flower-client-app client:appB --insecure +flwr run . ``` -tensorflow-privacy hyperparameters can be passed for each client in `ClientApp` instantiation (in `client.py`). In this example, `noise_multiplier=1.5` and `noise_multiplier=1` are used for the first and second client respectively. - -### 3. Run the Flower App - -With both the long-running server (SuperLink) and two clients (SuperNode) up and running, we can now run the actual Flower App: +You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: ```bash -flower-server-app server:app --insecure +flwr run . --run-config "l2-norm-clip=1.5 num-server-rounds=5" ``` diff --git a/examples/tensorflow-privacy/client.py b/examples/tensorflow-privacy/client.py deleted file mode 100644 index 4aec85da014a..000000000000 --- a/examples/tensorflow-privacy/client.py +++ /dev/null @@ -1,150 +0,0 @@ -import argparse -import os -from flwr.client import ClientApp, NumPyClient -import tensorflow as tf -from flwr_datasets import FederatedDataset -import tensorflow_privacy - -from tensorflow_privacy.privacy.analysis.compute_dp_sgd_privacy_lib import ( - compute_dp_sgd_privacy_statement, -) - -# Make TensorFlow log less verbose -os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" - - -def load_data(partition_id, batch_size): - fds = FederatedDataset(dataset="mnist", partitioners={"train": 2}) - partition = fds.load_partition(partition_id, "train") - partition.set_format("numpy") - - # Divide data on each node: 80% train, 20% test - partition = partition.train_test_split(test_size=0.2, seed=42) - x_train, y_train = partition["train"]["image"] / 255.0, partition["train"]["label"] - x_test, y_test = partition["test"]["image"] / 255.0, partition["test"]["label"] - - # Adjust the size of the training dataset to make it evenly divisible by the batch size - remainder = len(x_train) % batch_size - if remainder != 0: - x_train = x_train[:-remainder] - y_train = y_train[:-remainder] - - return (x_train, y_train), (x_test, y_test) - - -class FlowerClient(NumPyClient): - def __init__( - self, - model, - train_data, - test_data, - l2_norm_clip, - noise_multiplier, - num_microbatches, - learning_rate, - batch_size, - ) -> None: - super().__init__() - self.model = model - self.x_train, self.y_train = train_data - self.x_test, self.y_test = test_data - self.noise_multiplier = noise_multiplier - self.l2_norm_clip = l2_norm_clip - self.num_microbatches = num_microbatches - self.learning_rate = learning_rate - self.batch_size = batch_size - if self.batch_size % self.num_microbatches != 0: - raise ValueError( - f"Batch size {self.batch_size} is not divisible by the number of microbatches {self.num_microbatches}" - ) - - self.optimizer = tensorflow_privacy.DPKerasSGDOptimizer( - l2_norm_clip=l2_norm_clip, - noise_multiplier=noise_multiplier, - num_microbatches=num_microbatches, - learning_rate=learning_rate, - ) - loss = tf.keras.losses.SparseCategoricalCrossentropy( - reduction=tf.losses.Reduction.NONE - ) - self.model.compile(optimizer=self.optimizer, loss=loss, metrics=["accuracy"]) - - def get_parameters(self, config): - return self.model.get_weights() - - def fit(self, parameters, config): - self.model.set_weights(parameters) - - self.model.fit( - self.x_train, - self.y_train, - epochs=1, - batch_size=self.batch_size, - ) - - compute_dp_sgd_privacy_statement( - number_of_examples=self.x_train.shape[0], - batch_size=self.batch_size, - num_epochs=1, - noise_multiplier=self.noise_multiplier, - delta=1e-5, - ) - - return self.model.get_weights(), len(self.x_train), {} - - def evaluate(self, parameters, config): - self.model.set_weights(parameters) - self.model.compile( - optimizer=self.optimizer, - loss="sparse_categorical_crossentropy", - metrics=["accuracy"], - ) - loss, accuracy = self.model.evaluate(self.x_test, self.y_test) - return loss, len(self.x_test), {"accuracy": accuracy} - - -def client_fn_parameterized( - partition_id, - noise_multiplier, - l2_norm_clip=1.0, - num_microbatches=64, - learning_rate=0.01, - batch_size=64, -): - def client_fn(cid: str): - model = tf.keras.Sequential( - [ - tf.keras.layers.InputLayer(input_shape=(28, 28, 1)), - tf.keras.layers.Conv2D(32, kernel_size=(3, 3), activation="relu"), - tf.keras.layers.MaxPooling2D(pool_size=(2, 2)), - tf.keras.layers.Conv2D(64, kernel_size=(3, 3), activation="relu"), - tf.keras.layers.MaxPooling2D(pool_size=(2, 2)), - tf.keras.layers.Flatten(), - tf.keras.layers.Dense(128, activation="relu"), - tf.keras.layers.Dense(10, activation="softmax"), - ] - ) - train_data, test_data = load_data( - partition_id=partition_id, batch_size=batch_size - ) - return FlowerClient( - model, - train_data, - test_data, - noise_multiplier, - l2_norm_clip, - num_microbatches, - learning_rate, - batch_size, - ).to_client() - - return client_fn - - -appA = ClientApp( - client_fn=client_fn_parameterized(partition_id=0, noise_multiplier=1.0), -) - -appB = ClientApp( - client_fn=client_fn_parameterized(partition_id=1, noise_multiplier=1.5), -) diff --git a/examples/tensorflow-privacy/pyproject.toml b/examples/tensorflow-privacy/pyproject.toml index 884ba3b5f07b..b404f7f183a0 100644 --- a/examples/tensorflow-privacy/pyproject.toml +++ b/examples/tensorflow-privacy/pyproject.toml @@ -4,19 +4,36 @@ build-backend = "hatchling.build" [project] name = "tensorflow-privacy-fl" -version = "0.1.0" +version = "1.0.0" description = "Sample-level Differential Privacy with Tensorflow-Privacy in Flower" -authors = [ - { name = "The Flower Authors", email = "hello@flower.ai" }, -] dependencies = [ - "flwr>=1.8.0,<2.0", - "flwr-datasets[vision]>=0.1.0,<1.0.0", + "flwr[simulation]>=1.12.0", + "flwr-datasets[vision]>=0.3.0", "tensorflow-estimator~=2.4", "tensorflow-probability~=0.22.0", "tensorflow>=2.4.0,<=2.15.0", - "tensorflow-privacy == 0.9.0" + "tensorflow-privacy == 0.9.0", ] [tool.hatch.build.targets.wheel] packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "tf_privacy.server_app:app" +clientapp = "tf_privacy.client_app:app" + +[tool.flwr.app.config] +num-server-rounds = 3 +l2-norm-clip = 1.0 +num-microbatches = 64 +learning-rate = 0.01 +batch-size = 64 + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 2 diff --git a/examples/tensorflow-privacy/server.py b/examples/tensorflow-privacy/server.py deleted file mode 100644 index 1e399fa7e833..000000000000 --- a/examples/tensorflow-privacy/server.py +++ /dev/null @@ -1,22 +0,0 @@ -from typing import List, Tuple - -from flwr.server import ServerApp, ServerConfig -from flwr.server.strategy import FedAvg -from flwr.common import Metrics - - -def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: - accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] - examples = [num_examples for num_examples, _ in metrics] - - return {"accuracy": sum(accuracies) / sum(examples)} - - -strategy = FedAvg(evaluate_metrics_aggregation_fn=weighted_average) - -config = ServerConfig(num_rounds=3) - -app = ServerApp( - config=config, - strategy=strategy, -) diff --git a/examples/tensorflow-privacy/tf_privacy/__init__.py b/examples/tensorflow-privacy/tf_privacy/__init__.py new file mode 100644 index 000000000000..252b33cdd1c5 --- /dev/null +++ b/examples/tensorflow-privacy/tf_privacy/__init__.py @@ -0,0 +1 @@ +"""tf_privacy: Training with Sample-Level Differential Privacy using TensorFlow-Privacy Engine.""" diff --git a/examples/tensorflow-privacy/tf_privacy/client_app.py b/examples/tensorflow-privacy/tf_privacy/client_app.py new file mode 100644 index 000000000000..977d98bbbe43 --- /dev/null +++ b/examples/tensorflow-privacy/tf_privacy/client_app.py @@ -0,0 +1,93 @@ +"""tf_privacy: Training with Sample-Level Differential Privacy using TensorFlow-Privacy Engine.""" + +import os + +import tensorflow as tf +import tensorflow_privacy +from flwr.client import ClientApp, NumPyClient +from tensorflow_privacy.privacy.analysis.compute_dp_sgd_privacy_lib import ( + compute_dp_sgd_privacy_statement, +) +from flwr.common import Context + +from tf_privacy.task import load_data, load_model +import numpy as np + + +# Make TensorFlow log less verbose +os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" + + +class FlowerClient(NumPyClient): + def __init__( + self, + train_data, + test_data, + noise_multiplier, + run_config, + ) -> None: + super().__init__() + self.model = load_model() + self.x_train, self.y_train = train_data + self.x_train = np.expand_dims(self.x_train, axis=-1) + self.x_test, self.y_test = test_data + self.x_test = np.expand_dims(self.x_test, axis=-1) + self.noise_multiplier = noise_multiplier + self.run_config = run_config + if self.run_config["batch-size"] % self.run_config["num-microbatches"] != 0: + raise ValueError( + f"Batch size {self.run_config['batch-size']} is not divisible by the number of microbatches {self.run_config['num-microbatches']}" + ) + + self.optimizer = tensorflow_privacy.DPKerasSGDOptimizer( + l2_norm_clip=self.run_config["l2-norm-clip"], + noise_multiplier=self.noise_multiplier, + num_microbatches=self.run_config["num-microbatches"], + learning_rate=self.run_config["learning-rate"], + ) + loss = tf.keras.losses.SparseCategoricalCrossentropy( + reduction=tf.losses.Reduction.NONE + ) + self.model.compile(optimizer=self.optimizer, loss=loss, metrics=["accuracy"]) + + def fit(self, parameters, config): + self.model.set_weights(parameters) + self.model.fit( + self.x_train, + self.y_train, + epochs=1, + batch_size=self.run_config["batch-size"], + ) + + dp_statement = compute_dp_sgd_privacy_statement( + number_of_examples=self.x_train.shape[0], + batch_size=self.run_config["batch-size"], + num_epochs=1, + noise_multiplier=self.noise_multiplier, + delta=1e-5, + ) + print(dp_statement) + + return self.model.get_weights(), len(self.x_train), {} + + def evaluate(self, parameters, config): + self.model.set_weights(parameters) + loss, accuracy = self.model.evaluate(self.x_test, self.y_test) + return loss, len(self.x_test), {"accuracy": accuracy} + + +def client_fn(context: Context): + partition_id = context.node_config["partition-id"] + run_config = context.run_config + noise_multiplier = 1.0 if partition_id % 2 == 0 else 1.5 + + train_data, test_data = load_data( + partition_id=partition_id, + num_partitions=context.node_config["num-partitions"], + batch_size=context.run_config["batch-size"], + ) + + return FlowerClient(train_data, test_data, noise_multiplier, run_config).to_client() + + +app = ClientApp(client_fn=client_fn) diff --git a/examples/tensorflow-privacy/tf_privacy/server_app.py b/examples/tensorflow-privacy/tf_privacy/server_app.py new file mode 100644 index 000000000000..5348492a3ac4 --- /dev/null +++ b/examples/tensorflow-privacy/tf_privacy/server_app.py @@ -0,0 +1,31 @@ +"""tf_privacy: Training with Sample-Level Differential Privacy using TensorFlow-Privacy Engine.""" + +from typing import List, Tuple + +from flwr.common import Metrics +from flwr.server import ServerApp, ServerConfig, ServerAppComponents +from flwr.server.strategy import FedAvg +from flwr.common import Context, ndarrays_to_parameters +from .task import load_model + + +def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: + accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] + examples = [num_examples for num_examples, _ in metrics] + + return {"accuracy": sum(accuracies) / sum(examples)} + + +def server_fn(context: Context) -> ServerAppComponents: + parameters = ndarrays_to_parameters(load_model().get_weights()) + strategy = FedAvg( + evaluate_metrics_aggregation_fn=weighted_average, + initial_parameters=parameters, + ) + num_rounds = context.run_config["num-server-rounds"] + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(config=config, strategy=strategy) + + +app = ServerApp(server_fn=server_fn) diff --git a/examples/tensorflow-privacy/tf_privacy/task.py b/examples/tensorflow-privacy/tf_privacy/task.py new file mode 100644 index 000000000000..7bbf2a3e9c09 --- /dev/null +++ b/examples/tensorflow-privacy/tf_privacy/task.py @@ -0,0 +1,52 @@ +"""tf_privacy: Training with Sample-Level Differential Privacy using TensorFlow-Privacy Engine.""" + +import tensorflow as tf + +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner + +fds = None # Cache FederatedDataset + + +def load_model(): + model = tf.keras.Sequential( + [ + tf.keras.layers.InputLayer(input_shape=(28, 28, 1)), + tf.keras.layers.Conv2D(32, kernel_size=(3, 3), activation="relu"), + tf.keras.layers.MaxPooling2D(pool_size=(2, 2)), + tf.keras.layers.Conv2D(64, kernel_size=(3, 3), activation="relu"), + tf.keras.layers.MaxPooling2D(pool_size=(2, 2)), + tf.keras.layers.Flatten(), + tf.keras.layers.Dense(128, activation="relu"), + tf.keras.layers.Dense(10, activation="softmax"), + ] + ) + + return model + + +def load_data(partition_id: int, num_partitions: int, batch_size): + + global fds + if fds is None: + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="ylecun/mnist", + partitioners={"train": partitioner}, + ) + + partition = fds.load_partition(partition_id) + partition.set_format("numpy") + + # Divide data on each node: 80% train, 20% test + partition = partition.train_test_split(test_size=0.2, seed=42) + x_train, y_train = partition["train"]["image"] / 255.0, partition["train"]["label"] + x_test, y_test = partition["test"]["image"] / 255.0, partition["test"]["label"] + + # Adjust the size of the training dataset to make it evenly divisible by the batch size + remainder = len(x_train) % batch_size + if remainder != 0: + x_train = x_train[:-remainder] + y_train = y_train[:-remainder] + + return (x_train, y_train), (x_test, y_test) diff --git a/examples/vertical-fl/.gitignore b/examples/vertical-fl/.gitignore index 64af4779185a..5d2a2d133ae3 100644 --- a/examples/vertical-fl/.gitignore +++ b/examples/vertical-fl/.gitignore @@ -1,2 +1 @@ -_static/results -!_static/data/train.csv +!data/train.csv diff --git a/examples/vertical-fl/README.md b/examples/vertical-fl/README.md index d8c599d617c4..a9f6fc383060 100644 --- a/examples/vertical-fl/README.md +++ b/examples/vertical-fl/README.md @@ -1,4 +1,10 @@ -# Vertical Federated Learning example +--- +tags: [vertical, tabular, advanced] +dataset: [Titanic] +framework: [torch, pandas, scikit-learn] +--- + +# Vertical Federated Learning with Flower This example will showcase how you can perform Vertical Federated Learning using Flower. We'll be using the [Titanic dataset](https://www.kaggle.com/competitions/titanic/data) @@ -7,89 +13,6 @@ more details below, but the main idea of Vertical Federated Learning is that each client is holding different feature sets of the same dataset and that the server is holding the labels of this dataset. -## Project Setup - -Start by cloning the example project. We prepared a single-line command that you -can copy into your shell which will checkout the example for you: - -```shell -git clone --depth=1 https://github.com/adap/flower.git _tmp && mv _tmp/examples/vertical-fl . && rm -rf _tmp && cd vertical-fl -``` - -This will create a new directory called `vertical-fl` containing the -following files: - -```shell --- pyproject.toml --- requirements.txt --- _static/data/train.csv --- client.py --- plot.py --- simulation.py --- strategy.py --- task.py --- README.md -``` - -### Installing Dependencies - -Project dependencies (such as `torch` and `flwr`) are defined in -`pyproject.toml` and `requirements.txt`. We recommend -[Poetry](https://python-poetry.org/docs/) to install those dependencies and -manage your virtual environment ([Poetry -installation](https://python-poetry.org/docs/#installation)) or -[pip](https://pip.pypa.io/en/latest/development/), but feel free to use a -different way of installing dependencies and managing virtual environments if -you have other preferences. - -#### Poetry - -```shell -poetry install -poetry shell -``` - -Poetry will install all your dependencies in a newly created virtual -environment. To verify that everything works correctly you can run the following -command: - -```shell -poetry run python3 -c "import flwr" -``` - -If you don't see any errors you're good to go! - -#### pip - -Write the command below in your terminal to install the dependencies according -to the configuration file requirements.txt. - -```shell -pip install -r requirements.txt -``` - -## Usage - -Once everything is installed, you can just run: - -```shell -poetry run python3 simulation.py -``` - -for `poetry`, otherwise just run: - -```shell -python3 simulation.py -``` - -This will start the Vertical FL training for 1000 rounds with 3 clients. -Eventhough the number of rounds is quite high, this should only take a few -seconds to run as the model is very small. - -## Explanations - -### Vertical FL vs Horizontal FL - | | Horizontal Federated Learning (HFL or just FL) | Vertical Federated Learning (VFL) | | --------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | | Data Distribution | Clients have different data instances but share the same feature space. Think of different hospitals having different patients' data (samples) but recording the same types of information (features). | Each client holds different features for the same instances. Imagine different institutions holding various tests or measurements for the same group of patients. | @@ -99,412 +22,64 @@ seconds to run as the model is very small. | HFL | VFL | | :-----------------------------: | :-----------------------------: | -| ![HFL diagram](_static/hfl.jpg) | ![VFL diagram](_static/vfl.jpg) | +| ![HFL diagram](_static/hfl.png) | ![VFL diagram](_static/vfl.png) | Those diagrams illustrate HFL vs VFL using a simplified version of what we will be building in this example. Note that on the VFL side, the server holds the labels (the `Survived` column) and will be the only one capable of performing evaluation. -### Data - -#### About - -The Titanic Survival dataset is a popular dataset used to predict passenger survival on -the Titanic based on various features. - -You can see an exhaustive list of the features over on [Kaggle](https://www.kaggle.com/competitions/titanic/data). - -The data is stored as a CSV file in `_static/data/train.csv`, it contains 892 -samples with labels. - -#### Preprocessing +## Set up the project -In `task.py`, you'll find the preprocessing functions we'll apply to our data: - -- Passengers are grouped by age: 'Child' for 10 years and under, - 'Adult' for ages between 11 and 40, and 'Elderly' for those over 40. If the age - isn't listed, we'll label it as 'Unknown'. - - ```python - def _bin_age(age_series): - bins = [-np.inf, 10, 40, np.inf] - labels = ["Child", "Adult", "Elderly"] - return ( - pd.cut(age_series, bins=bins, labels=labels, right=True) - .astype(str) - .replace("nan", "Unknown") - ) - ``` - -- We pull out titles from passengers' names to help our model - understand social status and family roles, simplifying rare titles into a single - 'Rare' category and converting any French titles to their English equivalents. - - ```python - def _extract_title(name_series): - titles = name_series.str.extract(" ([A-Za-z]+)\.", expand=False) - rare_titles = { - "Lady", - "Countess", - "Capt", - "Col", - "Don", - "Dr", - "Major", - "Rev", - "Sir", - "Jonkheer", - "Dona", - } - titles = titles.replace(list(rare_titles), "Rare") - titles = titles.replace({"Mlle": "Miss", "Ms": "Miss", "Mme": "Mrs"}) - return titles - ``` - -- The first letter of each cabin number is used to identify the - cabin area, with any missing entries marked as 'Unknown'. This could provide - insight into the passenger's location on the ship. - -- We remove features like 'PassengerId', 'Name', and - 'Ticket' that won't be necessary for our model's predictions. - -- Lastly, we convert categorical data points such as 'Sex', - 'Pclass', 'Embarked', 'Title', 'Cabin', and the binned 'Age' into One-Hot - encodings. - - ```python - def _create_features(df): - # Convert 'Age' to numeric, coercing errors to NaN - df["Age"] = pd.to_numeric(df["Age"], errors="coerce") - df["Age"] = _bin_age(df["Age"]) - df["Cabin"] = df["Cabin"].str[0].fillna("Unknown") - df["Title"] = _extract_title(df["Name"]) - df.drop(columns=["PassengerId", "Name", "Ticket"], inplace=True) - all_keywords = set(df.columns) - df = pd.get_dummies( - df, columns=["Sex", "Pclass", "Embarked", "Title", "Cabin", "Age"] - ) - return df, all_keywords - ``` - -#### Partitioning - -In `task.py`, we also partition our data for our 3 clients to mirror real-life -collaborations where different organizations hold different feature sets: - -```python -def _partition_data(df, all_keywords): - partitions = [] - keywords_sets = [{"Parch", "Cabin", "Pclass"}, {"Sex", "Title"}] - keywords_sets.append(all_keywords - keywords_sets[0] - keywords_sets[1]) - - for keywords in keywords_sets: - partitions.append( - df[ - list( - { - col - for col in df.columns - for kw in keywords - if kw in col or "Survived" in col - } - ) - ] - ) - - return partitions -``` +### Clone the project -Client 1: This client looks at family connections and accommodations, working -with features like the number of parents and children each passenger had on -board ('Parch'), the cabin number ('Cabin'), and the ticket class ('Pclass'). +Start by cloning the example project: -Client 2: Here, the focus is on personal attributes. This client examines the -passengers' gender ('Sex') and societal roles as indicated by their titles -('Title'). - -Client 3: The final client handles the rest of the data that the first two don't -see. This includes the remaining features that give a broader view of the -passengers' information. - -Each client is going to train their models on their own unique data without any -idea of the passengers' survival outcomes, which we're trying to predict. - -Once all clients have done their part, we combine their insights to form a -comprehensive understanding, just as if different organizations were pooling -their knowledge while keeping their data private. This is the essence of -Vertical Federated Learning: separate but together, each contributing to a -collective intelligence without sharing sensitive information. - -Note that our final data processing function looks like that: - -```python -def get_partitions_and_label(): - df = pd.read_csv("_static/data/train.csv") - processed_df = df.dropna(subset=["Embarked", "Fare"]).copy() - processed_df, all_keywords = _create_features(processed_df) - raw_partitions = _partition_data(processed_df, all_keywords) - - partitions = [] - for partition in raw_partitions: - partitions.append(partition.drop("Survived", axis=1)) - return partitions, processed_df["Survived"].values -``` - -This returns the 3 partitions for our clients and the labels for our server. - -### Models - -#### Clients - -Each client's model is a neural network designed to operate on a distinct subset -of features held by a client. In this example we will use simple linear -regression models. - -```python -class ClientModel(nn.Module): - def __init__(self, input_size): - super(ClientModel, self).__init__() - self.fc = nn.Linear(input_size, 4) - - def forward(self, x): - return self.fc(x) -``` - -The `input_size` corresponds to the number of features each client has, and this -model maps those features to a 4-dimensional latent space. The outputs are -essentially feature embeddings that capture the patterns within each client's -data slice. These embeddings are then ready to be sent to the server for further -processing. - -#### Server - -The server's model acts as the central aggregator in the VFL system. It's also a -neural network but with a slightly different architecture tailored to its role -in aggregating the client models' outputs. - -```python -class ServerModel(nn.Module): - def __init__(self): - super(ServerModel, self).__init__() - self.fc = nn.Linear(12, 1) - self.sigmoid = nn.Sigmoid() - - def forward(self, x): - x = self.fc(x) - return self.sigmoid(x) -``` - -It comprises a single linear layer that accepts the concatenated outputs from -all client models as its input. The number of inputs to this layer equals the -total number of outputs from the client models (3 x 4 = 12). After processing -these inputs, the linear layer's output is passed through a sigmoid activation -function (`nn.Sigmoid()`), which maps the result to a `(0, 1)` range, providing -a probability score indicative of the likelihood of survival. - -### Strategy - -The strategy we will write to perform the aggregation will inherit from `FedAvg` -and set the following additional attributes: - -```python -self.model = ServerModel(12) -self.initial_parameters = ndarrays_to_parameters( - [val.cpu().numpy() for _, val in self.model.state_dict().items()] -) -self.optimizer = optim.SGD(self.model.parameters(), lr=0.01) -self.criterion = nn.BCELoss() -self.label = torch.tensor(labels).float().unsqueeze(1) -``` - -With `labels` given as an argument to the strategy. - -We then redefine the `aggregate_fit` method: - -```python -def aggregate_fit( - self, - rnd, - results, - failures, -): - # Do not aggregate if there are failures and failures are not accepted - if not self.accept_failures and failures: - return None, {} - - # Convert results - embedding_results = [ - torch.from_numpy(parameters_to_ndarrays(fit_res.parameters)[0]) - for _, fit_res in results - ] - embeddings_aggregated = torch.cat(embedding_results, dim=1) - embedding_server = embeddings_aggregated.detach().requires_grad_() - output = self.model(embedding_server) - loss = self.criterion(output, self.label) - loss.backward() - - self.optimizer.step() - self.optimizer.zero_grad() - - grads = embedding_server.grad.split([4, 4, 4], dim=1) - np_grads = [grad.numpy() for grad in grads] - parameters_aggregated = ndarrays_to_parameters(np_grads) - - with torch.no_grad(): - correct = 0 - output = self.model(embedding_server) - predicted = (output > 0.5).float() - - correct += (predicted == self.label).sum().item() - - accuracy = correct / len(self.label) * 100 - - metrics_aggregated = {"accuracy": accuracy} - - return parameters_aggregated, metrics_aggregated +```shell +git clone --depth=1 https://github.com/adap/flower.git _tmp \ + && mv _tmp/examples/vertical-fl . \ + && rm -rf _tmp \ + && cd vertical-fl ``` -This is where all the magic happens. We first convert the `np.array`s that we -received from our clients to `tensor`s, before concatenating the 3 embeddings -together. This means that we go from 3 tensors of size `(892, 4)` to 1 tensor of -size `(892, 12)`. The combined embeddings are fed through the server model to -get the prediction output. The loss between the predicted output and the actual -labels is calculated. Backward propagation is then performed to calculate the -gradients, which are used to update the server model's parameters. - -The optimizer updates the server model's parameters based on the calculated -gradients, and the gradients are reset to zero to prepare for the next round of -aggregation. - -The gradients from the server model's embedding layer are then split according -to the size of the output from each client model (assuming equal size for -simplicity here), ready to be sent back to the respective client models. - -Finally, with no gradient calculation needed, the model's predictions are -compared to the true labels to calculate the accuracy of the model after the -update. - -Note that this `aggregate_fit` function returns gradients instead of trained -weights. This is because, in this setting, sharing gradients allows each -participant to benefit from the collective feedback gathered from the entire -pool of data without the need to align their different feature spaces (trained -weights are directly tied to specific features of the dataset but not gradients, -which are just a measure of the sensitivity of the loss function to changes in -the model's parameters). This shared feedback, encapsulated in the gradients, -guides each participant's model to adjust and improve, achieving optimization -not just based on its own data but also leveraging insights from the entire -network's data. - -We do not need to return parameters here because updates are completed locally -in VFL. But the server should still send the gradients back to all clients to -let them continue the back prop and update their local model. In Flower, the -parameters returned by `aggregate_fit` will be stored and sent to -`Client.evaluate` via `configure_fit`. So we take advantage of this and return -our gradients in `aggregate_fit` so that they'll be sent to `Client.evaluate` as -`parameters`. That's also why we can obtain gradients from the `parameters` -argument in `Client.evaluate` (see next section). - -The last thing we have to do is to redefine the `aggregate_evaluate` function to -disable distributed evaluation (as the clients do not hold any labels to test -their local models). +This will create a new directory called `vertical-fl` with the following structure: +following files: -```python -def aggregate_evaluate( - self, - rnd, - results, - failures, -): - return None, {} +```shell +vertical-fl +├── vertical_fl +│ ├── __init__.py +│ ├── client_app.py # Defines your ClientApp +│ ├── server_app.py # Defines your ServerApp +│ ├── strategy.py # Defines your Strategy +│ └── task.py # Defines your model, training and data loading +├── pyproject.toml # Project metadata like dependencies and configs +├── data/train.csv +└── README.md ``` -### Client class and function - -Our `FlowerClient` class is going to be quite straight forward. - -```python -class FlowerClient(fl.client.NumPyClient): - def __init__(self, cid, data): - self.cid = cid - self.train = torch.tensor(StandardScaler().fit_transform(data)).float() - self.model = ClientModel(input_size=self.train.shape[1]) - self.optimizer = torch.optim.SGD(self.model.parameters(), lr=0.01) - self.embedding = self.model(self.train) +### Install dependencies and project - def get_parameters(self, config): - pass +Install the dependencies defined in `pyproject.toml` as well as the `mlxexample` package. - def fit(self, parameters, config): - self.embedding = self.model(self.train) - return [self.embedding.detach().numpy()], 1, {} - - def evaluate(self, parameters, config): - self.model.zero_grad() - self.embedding.backward(torch.from_numpy(parameters[int(self.cid)])) - self.optimizer.step() - return None +```bash +pip install -e . ``` -After defining our model and data attributes (respectively `self.model` and -`self.train`), we define our `fit` function as such: the `self.model(self.train)` -performs a forward pass using the client's local training data (`self.train`). -This generates the embeddings (feature representations) for the data. To conform -with the return type of the `fit` function, we need to return a list of -`np.array`s (hence the conversion), the number of samples, which won't be used -on the server side, so we just return 1, and then an empty dict. - -For the `evaluate` function, we perform our model's backward pass using the -gradients sent by the server and then update our local model's parameters based -on those new gradients. Note that the `loss` and `num_examples` we return in our -evaluate function are bogus, as they won't be used on the server side. +## Run the project -The `client_fn` we will use in our `start_simulation` function to generate our 3 -clients will be very basic: +You can run your Flower project in both _simulation_ and _deployment_ mode without making changes to the code. If you are starting with Flower, we recommend you using the _simulation_ mode as it requires fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine. -```pyhton3 -partitions, label = get_partitions_and_label() +### Run with the Simulation Engine -def client_fn(cid): - return FlowerClient(cid, partitions[int(cid)]).to_client() +```bash +flwr run . ``` -We pass a `client_id` and its corresponding partition to each client. - -### Evaluation +You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: -Please note that we do not perform distributed evaluation. This is because only -the server holds some labels to compare the results to. This is why the only -evaluation we perform is on the server side. - -In this example, we use the `FlowerClient` `evaluate` function for -backpropagation instead of using it for evaluation. We do this because we know -that the `evaluate` function of the clients will be called after the fit -function. This allows us to aggregate our models in `aggregate_fit` and then -send them back to the clients using this `evaluate` function and perform the -backpropagation. This is not done for evaluation, hence why we return `None` in -the `aggregate_evaluate` function of the strategy. - -### Starting the simulation - -Putting everything together, to start our simulation we use the following -function: - -```python -hist = fl.simulation.start_simulation( - client_fn=client_fn, - num_clients=3, - config=fl.server.ServerConfig(num_rounds=1000), - strategy=Strategy(label), -) +```bash +flwr run . --run-config "num-server-rounds=5 learning-rate=0.05" ``` -As mentioned before, we train for 1000 rounds but it should still last only -a few seconds. - -Note that we store the results of the simulation into `hist`, this will allow us -to use the `plot.py` file to plot the accuracy as a function of the number of -rounds. - -## Results - -Here we can observe the results after 1000 rounds: +### Run with the Deployment Engine -![Accuracy plot](_static/vfl-accuracy.png) +> \[!NOTE\] +> An update to this example will show how to run this Flower project with the Deployment Engine and TLS certificates, or with Docker. diff --git a/examples/vertical-fl/_static/hfl.jpg b/examples/vertical-fl/_static/hfl.jpg deleted file mode 100644 index 7fd4c47de2b3..000000000000 Binary files a/examples/vertical-fl/_static/hfl.jpg and /dev/null differ diff --git a/examples/vertical-fl/_static/hfl.png b/examples/vertical-fl/_static/hfl.png new file mode 100644 index 000000000000..3078b927788a Binary files /dev/null and b/examples/vertical-fl/_static/hfl.png differ diff --git a/examples/vertical-fl/_static/vfl-accuracy.png b/examples/vertical-fl/_static/vfl-accuracy.png deleted file mode 100644 index c436b6db0825..000000000000 Binary files a/examples/vertical-fl/_static/vfl-accuracy.png and /dev/null differ diff --git a/examples/vertical-fl/_static/vfl.jpg b/examples/vertical-fl/_static/vfl.jpg deleted file mode 100644 index a7ce7dbfad31..000000000000 Binary files a/examples/vertical-fl/_static/vfl.jpg and /dev/null differ diff --git a/examples/vertical-fl/_static/vfl.png b/examples/vertical-fl/_static/vfl.png new file mode 100644 index 000000000000..89e8db72a952 Binary files /dev/null and b/examples/vertical-fl/_static/vfl.png differ diff --git a/examples/vertical-fl/client.py b/examples/vertical-fl/client.py deleted file mode 100644 index 9f489e70f086..000000000000 --- a/examples/vertical-fl/client.py +++ /dev/null @@ -1,27 +0,0 @@ -import flwr as fl -import torch -from sklearn.preprocessing import StandardScaler - -from task import ClientModel - - -class FlowerClient(fl.client.NumPyClient): - def __init__(self, cid, data): - self.cid = cid - self.train = torch.tensor(StandardScaler().fit_transform(data)).float() - self.model = ClientModel(input_size=self.train.shape[1]) - self.optimizer = torch.optim.SGD(self.model.parameters(), lr=0.01) - self.embedding = self.model(self.train) - - def get_parameters(self, config): - pass - - def fit(self, parameters, config): - self.embedding = self.model(self.train) - return [self.embedding.detach().numpy()], 1, {} - - def evaluate(self, parameters, config): - self.model.zero_grad() - self.embedding.backward(torch.from_numpy(parameters[int(self.cid)])) - self.optimizer.step() - return 0.0, 1, {} diff --git a/examples/vertical-fl/_static/data/train.csv b/examples/vertical-fl/data/train.csv similarity index 100% rename from examples/vertical-fl/_static/data/train.csv rename to examples/vertical-fl/data/train.csv diff --git a/examples/vertical-fl/plot.py b/examples/vertical-fl/plot.py deleted file mode 100644 index 801f202328c7..000000000000 --- a/examples/vertical-fl/plot.py +++ /dev/null @@ -1,8 +0,0 @@ -import numpy as np -import matplotlib.pyplot as plt - -if __name__ == "__main__": - hist = np.load("_static/results/hist.npy", allow_pickle=True).item() - rounds, values = zip(*hist.metrics_distributed_fit["accuracy"]) - plt.plot(np.asarray(rounds), np.asarray(values)) - plt.savefig("_static/results/accuracy.png") diff --git a/examples/vertical-fl/pyproject.toml b/examples/vertical-fl/pyproject.toml index 19dcd0e7a842..458878748cde 100644 --- a/examples/vertical-fl/pyproject.toml +++ b/examples/vertical-fl/pyproject.toml @@ -1,18 +1,37 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry] +[project] name = "vertical-fl" -version = "0.1.0" +version = "1.0.0" description = "PyTorch Vertical FL with Flower" -authors = ["The Flower Authors "] +license = "Apache-2.0" +dependencies = [ + "flwr[simulation]>=1.12.0", + "flwr-datasets>=0.3.0", + "numpy==1.24.4", + "pandas==2.0.3", + "scikit-learn==1.3.2", + "torch==2.1.0", +] -[tool.poetry.dependencies] -python = ">=3.8,<3.11" -flwr = { extras = ["simulation"], version = ">=1.0,<2.0" } -torch = "2.1.0" -matplotlib = "3.7.3" -scikit-learn = "1.3.2" -numpy = "1.24.4" -pandas = "2.0.3" +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "vertical_fl.server_app:app" +clientapp = "vertical_fl.client_app:app" + +[tool.flwr.app.config] +num-server-rounds = 3 +learning-rate = 0.1 + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 3 # Note that this example will require changes to how VFL is implemented diff --git a/examples/vertical-fl/requirements.txt b/examples/vertical-fl/requirements.txt deleted file mode 100644 index aee341e4c554..000000000000 --- a/examples/vertical-fl/requirements.txt +++ /dev/null @@ -1,6 +0,0 @@ -flwr[simulation]>=1.0, <2.0 -torch==2.1.0 -matplotlib==3.7.3 -scikit-learn==1.3.2 -numpy==1.24.4 -pandas==2.0.3 diff --git a/examples/vertical-fl/simulation.py b/examples/vertical-fl/simulation.py deleted file mode 100644 index f4befc3a073e..000000000000 --- a/examples/vertical-fl/simulation.py +++ /dev/null @@ -1,25 +0,0 @@ -import flwr as fl -import numpy as np -from strategy import Strategy -from client import FlowerClient -from pathlib import Path -from task import get_partitions_and_label - -partitions, label = get_partitions_and_label() - - -def client_fn(cid): - return FlowerClient(cid, partitions[int(cid)]).to_client() - - -# Start Flower server -hist = fl.simulation.start_simulation( - client_fn=client_fn, - num_clients=3, - config=fl.server.ServerConfig(num_rounds=1000), - strategy=Strategy(label), -) - -results_dir = Path("_static/results") -results_dir.mkdir(exist_ok=True) -np.save(str(results_dir / "hist.npy"), hist) diff --git a/examples/vertical-fl/task.py b/examples/vertical-fl/task.py deleted file mode 100644 index a3cd415229c5..000000000000 --- a/examples/vertical-fl/task.py +++ /dev/null @@ -1,90 +0,0 @@ -import torch.nn as nn -import numpy as np -import pandas as pd - - -def _bin_age(age_series): - bins = [-np.inf, 10, 40, np.inf] - labels = ["Child", "Adult", "Elderly"] - return ( - pd.cut(age_series, bins=bins, labels=labels, right=True) - .astype(str) - .replace("nan", "Unknown") - ) - - -def _extract_title(name_series): - titles = name_series.str.extract(" ([A-Za-z]+)\.", expand=False) - rare_titles = { - "Lady", - "Countess", - "Capt", - "Col", - "Don", - "Dr", - "Major", - "Rev", - "Sir", - "Jonkheer", - "Dona", - } - titles = titles.replace(list(rare_titles), "Rare") - titles = titles.replace({"Mlle": "Miss", "Ms": "Miss", "Mme": "Mrs"}) - return titles - - -def _create_features(df): - # Convert 'Age' to numeric, coercing errors to NaN - df["Age"] = pd.to_numeric(df["Age"], errors="coerce") - df["Age"] = _bin_age(df["Age"]) - df["Cabin"] = df["Cabin"].str[0].fillna("Unknown") - df["Title"] = _extract_title(df["Name"]) - df.drop(columns=["PassengerId", "Name", "Ticket"], inplace=True) - all_keywords = set(df.columns) - df = pd.get_dummies( - df, columns=["Sex", "Pclass", "Embarked", "Title", "Cabin", "Age"] - ) - return df, all_keywords - - -def get_partitions_and_label(): - df = pd.read_csv("_static/data/train.csv") - processed_df = df.dropna(subset=["Embarked", "Fare"]).copy() - processed_df, all_keywords = _create_features(processed_df) - raw_partitions = _partition_data(processed_df, all_keywords) - - partitions = [] - for partition in raw_partitions: - partitions.append(partition.drop("Survived", axis=1)) - return partitions, processed_df["Survived"].values - - -def _partition_data(df, all_keywords): - partitions = [] - keywords_sets = [{"Parch", "Cabin", "Pclass"}, {"Sex", "Title"}] - keywords_sets.append(all_keywords - keywords_sets[0] - keywords_sets[1]) - - for keywords in keywords_sets: - partitions.append( - df[ - list( - { - col - for col in df.columns - for kw in keywords - if kw in col or "Survived" in col - } - ) - ] - ) - - return partitions - - -class ClientModel(nn.Module): - def __init__(self, input_size): - super(ClientModel, self).__init__() - self.fc = nn.Linear(input_size, 4) - - def forward(self, x): - return self.fc(x) diff --git a/examples/vertical-fl/vertical_fl/client_app.py b/examples/vertical-fl/vertical_fl/client_app.py new file mode 100644 index 000000000000..d517480da1d4 --- /dev/null +++ b/examples/vertical-fl/vertical_fl/client_app.py @@ -0,0 +1,41 @@ +from flwr.client import ClientApp, NumPyClient +from flwr.common import Context +from sklearn.preprocessing import StandardScaler +import torch + +from vertical_fl.task import ClientModel, load_data + + +class FlowerClient(NumPyClient): + def __init__(self, v_split_id, data, lr): + self.v_split_id = v_split_id + self.data = torch.tensor(StandardScaler().fit_transform(data)).float() + self.model = ClientModel(input_size=self.data.shape[1]) + self.optimizer = torch.optim.SGD(self.model.parameters(), lr=lr) + + def get_parameters(self, config): + pass + + def fit(self, parameters, config): + embedding = self.model(self.data) + return [embedding.detach().numpy()], 1, {} + + def evaluate(self, parameters, config): + self.model.zero_grad() + embedding = self.model(self.data) + embedding.backward(torch.from_numpy(parameters[int(self.v_split_id)])) + self.optimizer.step() + return 0.0, 1, {} + + +def client_fn(context: Context): + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + partition, v_split_id = load_data(partition_id, num_partitions=num_partitions) + lr = context.run_config["learning-rate"] + return FlowerClient(v_split_id, partition, lr).to_client() + + +app = ClientApp( + client_fn=client_fn, +) diff --git a/examples/vertical-fl/vertical_fl/server_app.py b/examples/vertical-fl/vertical_fl/server_app.py new file mode 100644 index 000000000000..95620226b707 --- /dev/null +++ b/examples/vertical-fl/vertical_fl/server_app.py @@ -0,0 +1,25 @@ +from flwr.common import Context +from flwr.server import ServerApp, ServerAppComponents, ServerConfig + +from vertical_fl.strategy import Strategy +from vertical_fl.task import process_dataset + + +def server_fn(context: Context) -> ServerAppComponents: + """Construct components that set the ServerApp behaviour.""" + + # Get dataset + processed_df, _ = process_dataset() + + # Define the strategy + strategy = Strategy(processed_df["Survived"].values) + + # Construct ServerConfig + num_rounds = context.run_config["num-server-rounds"] + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(strategy=strategy, config=config) + + +# Start Flower server +app = ServerApp(server_fn=server_fn) diff --git a/examples/vertical-fl/strategy.py b/examples/vertical-fl/vertical_fl/strategy.py similarity index 66% rename from examples/vertical-fl/strategy.py rename to examples/vertical-fl/vertical_fl/strategy.py index 0744fa83662a..9195416076b0 100644 --- a/examples/vertical-fl/strategy.py +++ b/examples/vertical-fl/vertical_fl/strategy.py @@ -17,37 +17,8 @@ def forward(self, x): class Strategy(fl.server.strategy.FedAvg): - def __init__( - self, - labels, - *, - fraction_fit=1, - fraction_evaluate=1, - min_fit_clients=2, - min_evaluate_clients=2, - min_available_clients=2, - evaluate_fn=None, - on_fit_config_fn=None, - on_evaluate_config_fn=None, - accept_failures=True, - initial_parameters=None, - fit_metrics_aggregation_fn=None, - evaluate_metrics_aggregation_fn=None, - ) -> None: - super().__init__( - fraction_fit=fraction_fit, - fraction_evaluate=fraction_evaluate, - min_fit_clients=min_fit_clients, - min_evaluate_clients=min_evaluate_clients, - min_available_clients=min_available_clients, - evaluate_fn=evaluate_fn, - on_fit_config_fn=on_fit_config_fn, - on_evaluate_config_fn=on_evaluate_config_fn, - accept_failures=accept_failures, - initial_parameters=initial_parameters, - fit_metrics_aggregation_fn=fit_metrics_aggregation_fn, - evaluate_metrics_aggregation_fn=evaluate_metrics_aggregation_fn, - ) + def __init__(self, labels, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) self.model = ServerModel(12) self.initial_parameters = ndarrays_to_parameters( [val.cpu().numpy() for _, val in self.model.state_dict().items()] diff --git a/examples/vertical-fl/vertical_fl/task.py b/examples/vertical-fl/vertical_fl/task.py new file mode 100644 index 000000000000..8e76d9419a8a --- /dev/null +++ b/examples/vertical-fl/vertical_fl/task.py @@ -0,0 +1,139 @@ +from pathlib import Path +from logging import WARN +import torch.nn as nn +import numpy as np +import pandas as pd +import torch.nn as nn +from flwr.common.logger import log + +from datasets import Dataset +from flwr_datasets.partitioner import IidPartitioner + +NUM_VERTICAL_SPLITS = 3 + + +def _bin_age(age_series): + bins = [-np.inf, 10, 40, np.inf] + labels = ["Child", "Adult", "Elderly"] + return ( + pd.cut(age_series, bins=bins, labels=labels, right=True) + .astype(str) + .replace("nan", "Unknown") + ) + + +def _extract_title(name_series): + titles = name_series.str.extract(" ([A-Za-z]+)\.", expand=False) + rare_titles = { + "Lady", + "Countess", + "Capt", + "Col", + "Don", + "Dr", + "Major", + "Rev", + "Sir", + "Jonkheer", + "Dona", + } + titles = titles.replace(list(rare_titles), "Rare") + titles = titles.replace({"Mlle": "Miss", "Ms": "Miss", "Mme": "Mrs"}) + return titles + + +def _create_features(df): + # Convert 'Age' to numeric, coercing errors to NaN + df["Age"] = pd.to_numeric(df["Age"], errors="coerce") + df["Age"] = _bin_age(df["Age"]) + df["Cabin"] = df["Cabin"].str[0].fillna("Unknown") + df["Title"] = _extract_title(df["Name"]) + df.drop(columns=["PassengerId", "Name", "Ticket"], inplace=True) + all_keywords = set(df.columns) + df = pd.get_dummies( + df, columns=["Sex", "Pclass", "Embarked", "Title", "Cabin", "Age"] + ) + return df, all_keywords + + +def process_dataset(): + + df = pd.read_csv(Path(__file__).parents[1] / "data/train.csv") + processed_df = df.dropna(subset=["Embarked", "Fare"]).copy() + return _create_features(processed_df) + + +def load_data(partition_id: int, num_partitions: int): + """Partition the data vertically and then horizontally. + + We create three sets of features representing three types of nodes participating in + the federation. + + [{'Cabin', 'Parch', 'Pclass'}, {'Sex', 'Title'}, {'Age', 'Embarked', 'Fare', + 'SibSp', 'Survived'}] + + Once the whole dataset is split vertically and a set of features is selected based + on mod(partition_id, 3), it is split horizontally into `ceil(num_partitions/3)` + partitions. This function returns the partition with index `partition_id % 3`. + """ + + if num_partitions != NUM_VERTICAL_SPLITS: + log( + WARN, + "To run this example with num_partitions other than 3, you need to update how " + "the Vertical FL training is performed. This is because the shapes of the " + "gradients migh not be the same along the first dimension.", + ) + + # Read whole dataset and process + processed_df, features_set = process_dataset() + + # Vertical Split and select + v_partitions = _partition_data_vertically(processed_df, features_set) + v_split_id = np.mod(partition_id, NUM_VERTICAL_SPLITS) + v_partition = v_partitions[v_split_id] + + # Comvert to HuggingFace dataset + dataset = Dataset.from_pandas(v_partition) + + # Split horizontally with Flower Dataset partitioner + num_h_partitions = int(np.ceil(num_partitions / NUM_VERTICAL_SPLITS)) + partitioner = IidPartitioner(num_partitions=num_h_partitions) + partitioner.dataset = dataset + + # Extract partition of the `ClientApp` calling this function + partition = partitioner.load_partition(partition_id % num_h_partitions) + partition.remove_columns(["Survived"]) + + return partition.to_pandas(), v_split_id + + +def _partition_data_vertically(df, all_keywords): + partitions = [] + keywords_sets = [{"Parch", "Cabin", "Pclass"}, {"Sex", "Title"}] + keywords_sets.append(all_keywords - keywords_sets[0] - keywords_sets[1]) + + for keywords in keywords_sets: + partitions.append( + df[ + list( + { + col + for col in df.columns + for kw in keywords + if kw in col or "Survived" in col + } + ) + ] + ) + + return partitions + + +class ClientModel(nn.Module): + def __init__(self, input_size): + super().__init__() + self.fc = nn.Linear(input_size, 4) + + def forward(self, x): + return self.fc(x) diff --git a/examples/vit-finetune/client.py b/examples/vit-finetune/client.py deleted file mode 100644 index bf91fa0c4328..000000000000 --- a/examples/vit-finetune/client.py +++ /dev/null @@ -1,80 +0,0 @@ -import torch -from torch.utils.data import DataLoader - -import flwr -from flwr.client import NumPyClient -from dataset import apply_transforms, get_dataset_with_partitions -from model import get_model, set_parameters, train - - -class FedViTClient(NumPyClient): - def __init__(self, trainset): - self.trainset = trainset - self.model = get_model() - - # Determine device - self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - self.model.to(self.device) # send model to device - - def set_for_finetuning(self): - """Freeze all parameter except those in the final head. - - Only output MLP will be updated by the client and therefore, the only part of - the model that will be federated (hence, communicated back to the server for - aggregation.) - """ - - # Disable gradients for everything - self.model.requires_grad_(False) - # Now enable just for output head - self.model.heads.requires_grad_(True) - - def get_parameters(self, config): - """Get locally updated parameters.""" - finetune_layers = self.model.heads - return [val.cpu().numpy() for _, val in finetune_layers.state_dict().items()] - - def fit(self, parameters, config): - set_parameters(self.model, parameters) - - # Get some info from the config - # Get batchsize and LR set from server - batch_size = config["batch_size"] - lr = config["lr"] - - trainloader = DataLoader( - self.trainset, batch_size=batch_size, num_workers=2, shuffle=True - ) - - # Set optimizer - optimizer = torch.optim.Adam(self.model.parameters(), lr=lr) - # Train locally - avg_train_loss = train( - self.model, trainloader, optimizer, epochs=1, device=self.device - ) - # Return locally-finetuned part of the model - return ( - self.get_parameters(config={}), - len(trainloader.dataset), - {"train_loss": avg_train_loss}, - ) - - -# Downloads and partition dataset -federated_ox_flowers, _ = get_dataset_with_partitions(num_partitions=20) - - -def client_fn(cid: str): - """Return a FedViTClient that trains with the cid-th data partition.""" - - trainset_for_this_client = federated_ox_flowers.load_partition(int(cid), "train") - - trainset = trainset_for_this_client.with_transform(apply_transforms) - - return FedViTClient(trainset).to_client() - - -# To be used with Flower Next -app = flwr.client.ClientApp( - client_fn=client_fn, -) diff --git a/examples/vit-finetune/dataset.py b/examples/vit-finetune/dataset.py deleted file mode 100644 index 42e0af560a17..000000000000 --- a/examples/vit-finetune/dataset.py +++ /dev/null @@ -1,52 +0,0 @@ -from torchvision.transforms import ( - Compose, - Normalize, - ToTensor, - RandomResizedCrop, - Resize, - CenterCrop, -) - -from flwr_datasets import FederatedDataset - - -def get_dataset_with_partitions(num_partitions: int): - """Get Oxford Flowers datasets and partition it. - - Return partitioned dataset as well as the whole test set. - """ - - # Get Oxford Flowers-102 and divide it into 20 IID partitions - ox_flowers_fds = FederatedDataset( - dataset="nelorth/oxford-flowers", partitioners={"train": num_partitions} - ) - - centralized_testset = ox_flowers_fds.load_split("test") - return ox_flowers_fds, centralized_testset - - -def apply_eval_transforms(batch): - """Apply a very standard set of image transforms.""" - transforms = Compose( - [ - Resize((256, 256)), - CenterCrop((224, 224)), - ToTensor(), - Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), - ] - ) - batch["image"] = [transforms(img) for img in batch["image"]] - return batch - - -def apply_transforms(batch): - """Apply a very standard set of image transforms.""" - transforms = Compose( - [ - RandomResizedCrop((224, 224)), - ToTensor(), - Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), - ] - ) - batch["image"] = [transforms(img) for img in batch["image"]] - return batch diff --git a/examples/vit-finetune/main.py b/examples/vit-finetune/main.py deleted file mode 100644 index c629a6f68980..000000000000 --- a/examples/vit-finetune/main.py +++ /dev/null @@ -1,57 +0,0 @@ -import argparse - -import flwr as fl -import matplotlib.pyplot as plt - -from server import strategy -from client import client_fn - -parser = argparse.ArgumentParser( - description="Finetuning of a ViT with Flower Simulation." -) - -parser.add_argument( - "--num-rounds", - type=int, - default=20, - help="Number of rounds.", -) - - -def main(): - args = parser.parse_args() - - # To control the degree of parallelism - # With default settings in this example, - # each client should take just ~1GB of VRAM. - client_resources = { - "num_cpus": 4, - "num_gpus": 0.2, - } - - # Launch simulation - history = fl.simulation.start_simulation( - client_fn=client_fn, - num_clients=20, - client_resources=client_resources, - config=fl.server.ServerConfig(num_rounds=args.num_rounds), - strategy=strategy, - ) - - print(history) - - # Basic plotting - global_accuracy_centralised = history.metrics_centralized["accuracy"] - round = [int(data[0]) for data in global_accuracy_centralised] - acc = [100.0 * data[1] for data in global_accuracy_centralised] - plt.plot(round, acc) - plt.xticks(round) - plt.grid() - plt.ylabel("Accuracy (%)") - plt.xlabel("Round") - plt.title("Federated finetuning of ViT for Flowers-102") - plt.savefig("central_evaluation.png") - - -if __name__ == "__main__": - main() diff --git a/examples/vit-finetune/model.py b/examples/vit-finetune/model.py deleted file mode 100644 index ca7dc1cd9864..000000000000 --- a/examples/vit-finetune/model.py +++ /dev/null @@ -1,71 +0,0 @@ -from collections import OrderedDict - -import torch -from torchvision.models import vit_b_16, ViT_B_16_Weights - - -def get_model(): - """Return a pretrained ViT with all layers frozen except output head.""" - - # Instantiate a pre-trained ViT-B on ImageNet - model = vit_b_16(weights=ViT_B_16_Weights.IMAGENET1K_V1) - - # We're going to federated the finetuning of this model - # using the Oxford Flowers-102 dataset. One easy way to achieve - # this is by re-initializing the output block of the ViT so it - # outputs 102 clases instead of the default 1k - in_features = model.heads[-1].in_features - model.heads[-1] = torch.nn.Linear(in_features, 102) - - # Disable gradients for everything - model.requires_grad_(False) - # Now enable just for output head - model.heads.requires_grad_(True) - - return model - - -def set_parameters(model, parameters): - """Apply the parameters to the model. - - Recall this example only federates the head of the ViT so that's the only part of - the model we need to load. - """ - finetune_layers = model.heads - params_dict = zip(finetune_layers.state_dict().keys(), parameters) - state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) - finetune_layers.load_state_dict(state_dict, strict=True) - - -def train(net, trainloader, optimizer, epochs, device): - """Train the model on the training set.""" - criterion = torch.nn.CrossEntropyLoss() - net.train() - avg_loss = 0 - # A very standard training loop for image classification - for _ in range(epochs): - for batch in trainloader: - images, labels = batch["image"].to(device), batch["label"].to(device) - optimizer.zero_grad() - loss = criterion(net(images), labels) - avg_loss += loss.item() / labels.shape[0] - loss.backward() - optimizer.step() - - return avg_loss / len(trainloader) - - -def test(net, testloader, device: str): - """Validate the network on the entire test set.""" - criterion = torch.nn.CrossEntropyLoss() - correct, loss = 0, 0.0 - net.eval() - with torch.no_grad(): - for data in testloader: - images, labels = data["image"].to(device), data["label"].to(device) - outputs = net(images) - loss += criterion(outputs, labels).item() - _, predicted = torch.max(outputs.data, 1) - correct += (predicted == labels).sum().item() - accuracy = correct / len(testloader.dataset) - return loss, accuracy diff --git a/examples/vit-finetune/pyproject.toml b/examples/vit-finetune/pyproject.toml deleted file mode 100644 index d014d6b6fb2a..000000000000 --- a/examples/vit-finetune/pyproject.toml +++ /dev/null @@ -1,17 +0,0 @@ -[build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" - -[tool.poetry] -name = "vit-finetune" -version = "0.1.0" -description = "FL finetuning of a Vision Transformer with Flower." -authors = ["The Flower Authors "] - -[tool.poetry.dependencies] -python = ">=3.8,<3.11" -flwr = { extras = ["simulation"], version = ">=1.0,<2.0" } -flwr-datasets = { extras = ["vision"], version = ">=0.0.2,<1.0.0" } -torch = "2.2.1" -torchvision = "0.17.1" -matplotlib = "3.8.3" diff --git a/examples/vit-finetune/requirements.txt b/examples/vit-finetune/requirements.txt deleted file mode 100644 index 3692be0d6c2c..000000000000 --- a/examples/vit-finetune/requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -flwr[simulation]>=1.0, <2.0 -flwr-datasets[vision]>=0.0.2, <1.0.0 -matplotlib==3.8.3 -torch==2.2.1 -torchvision==0.17.1 \ No newline at end of file diff --git a/examples/vit-finetune/server.py b/examples/vit-finetune/server.py deleted file mode 100644 index 698bcd45cece..000000000000 --- a/examples/vit-finetune/server.py +++ /dev/null @@ -1,61 +0,0 @@ -import torch -from datasets import Dataset -from torch.utils.data import DataLoader -import flwr as fl - -from dataset import apply_eval_transforms, get_dataset_with_partitions -from model import get_model, set_parameters, test - - -def fit_config(server_round: int): - """Return a configuration with static batch size and (local) epochs.""" - config = { - "lr": 0.01, # Learning rate used by clients - "batch_size": 32, # Batch size to use by clients during fit() - } - return config - - -def get_evaluate_fn( - centralized_testset: Dataset, -): - """Return an evaluation function for centralized evaluation.""" - - def evaluate(server_round, parameters, config): - """Use the entire Oxford Flowers-102 test set for evaluation.""" - - # Determine device - device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - - model = get_model() - set_parameters(model, parameters) - model.to(device) - - # Apply transform to dataset - testset = centralized_testset.with_transform(apply_eval_transforms) - - testloader = DataLoader(testset, batch_size=128) - # Run evaluation - loss, accuracy = test(model, testloader, device=device) - - return loss, {"accuracy": accuracy} - - return evaluate - - -# Downloads and partition dataset -_, centralized_testset = get_dataset_with_partitions(num_partitions=20) - -# Configure the strategy -strategy = fl.server.strategy.FedAvg( - fraction_fit=0.5, # Sample 50% of available clients for training each round - fraction_evaluate=0.0, # No federated evaluation - on_fit_config_fn=fit_config, - evaluate_fn=get_evaluate_fn(centralized_testset), # Global evaluation function -) - -# To be used with Flower Next -app = fl.server.ServerApp( - config=fl.server.ServerConfig(num_rounds=3), - strategy=strategy, -) diff --git a/examples/whisper-federated-finetuning/README.md b/examples/whisper-federated-finetuning/README.md index ddebe51247b2..cfd0db842bae 100644 --- a/examples/whisper-federated-finetuning/README.md +++ b/examples/whisper-federated-finetuning/README.md @@ -1,3 +1,9 @@ +--- +tags: [finetuning, speech, transformers] +dataset: [SpeechCommands] +framework: [transformers, whisper] +--- + # On-device Federated Finetuning for Speech Classification This example demonstrates how to, from a pre-trained [Whisper](https://openai.com/research/whisper) model, finetune it for the downstream task of keyword spotting. We'll be implementing a federated downstream finetuning pipeline using Flower involving a total of 100 clients. As for the downstream dataset, we'll be using the [Google Speech Commands](https://huggingface.co/datasets/speech_commands) dataset for keyword spotting. We'll take the encoder part of the [Whisper-tiny](https://huggingface.co/openai/whisper-tiny) model, freeze its parameters, and learn a lightweight classification (\<800K parameters !!) head to correctly classify a spoken word. diff --git a/examples/whisper-federated-finetuning/centralised.py b/examples/whisper-federated-finetuning/centralised.py index 6af591a7502b..c0e3d60a0697 100644 --- a/examples/whisper-federated-finetuning/centralised.py +++ b/examples/whisper-federated-finetuning/centralised.py @@ -1,19 +1,19 @@ import argparse -from datasets import load_dataset -from transformers import WhisperForConditionalGeneration, WhisperProcessor +import random + +import numpy as np import torch +from datasets import concatenate_datasets, load_dataset from torch.utils.data import DataLoader, WeightedRandomSampler -import numpy as np -from datasets import concatenate_datasets -import random +from transformers import WhisperForConditionalGeneration, WhisperProcessor from utils import ( - get_model, - train_one_epoch, eval_model, - prepare_silences_dataset, get_encoding_fn, + get_model, + prepare_silences_dataset, remove_cols, + train_one_epoch, ) random.seed(1989) diff --git a/examples/whisper-federated-finetuning/client.py b/examples/whisper-federated-finetuning/client.py index d3bb217933f8..d1da5c13ecf8 100644 --- a/examples/whisper-federated-finetuning/client.py +++ b/examples/whisper-federated-finetuning/client.py @@ -1,19 +1,20 @@ import argparse -import torch + import flwr as fl import numpy as np +import torch +from datasets import concatenate_datasets, load_dataset, load_from_disk from torch.utils.data import DataLoader, WeightedRandomSampler -from datasets import load_dataset, load_from_disk, concatenate_datasets from transformers import WhisperProcessor from utils import ( + construct_client_mapping, + get_encoding_fn, get_model, + prepare_silences_dataset, + remove_cols, set_params, train_one_epoch, - remove_cols, - prepare_silences_dataset, - construct_client_mapping, - get_encoding_fn, ) parser = argparse.ArgumentParser(description="Flower+Whisper") diff --git a/examples/whisper-federated-finetuning/pyproject.toml b/examples/whisper-federated-finetuning/pyproject.toml index 27a89578c5a0..3d7bb023537c 100644 --- a/examples/whisper-federated-finetuning/pyproject.toml +++ b/examples/whisper-federated-finetuning/pyproject.toml @@ -9,7 +9,7 @@ description = "On-device Federated Downstreaming for Speech Classification" authors = ["The Flower Authors "] [tool.poetry.dependencies] -python = ">=3.8,<3.11" +python = ">=3.9,<3.11" flwr = { extras = ["simulation"], version = ">=1.0,<2.0" } transformers = "4.32.1" tokenizers = "0.13.3" diff --git a/examples/whisper-federated-finetuning/server.py b/examples/whisper-federated-finetuning/server.py index 101d43f04ec2..060b162240e5 100644 --- a/examples/whisper-federated-finetuning/server.py +++ b/examples/whisper-federated-finetuning/server.py @@ -1,13 +1,12 @@ import argparse +import flwr as fl import torch from datasets import load_dataset -from transformers import WhisperProcessor from torch.utils.data import DataLoader -import flwr as fl - -from utils import eval_model, get_model, set_params, remove_cols, get_encoding_fn +from transformers import WhisperProcessor +from utils import eval_model, get_encoding_fn, get_model, remove_cols, set_params parser = argparse.ArgumentParser(description="Flower+Whisper") parser.add_argument("--num_rounds", type=int, default=5, help="Number of FL rounds.") diff --git a/examples/whisper-federated-finetuning/sim.py b/examples/whisper-federated-finetuning/sim.py index c04f768bb24a..750a7f705251 100644 --- a/examples/whisper-federated-finetuning/sim.py +++ b/examples/whisper-federated-finetuning/sim.py @@ -1,11 +1,10 @@ import argparse +import flwr as fl import torch from datasets import load_dataset from transformers import WhisperProcessor -import flwr as fl - from client import get_client_fn from server import fit_config, get_evaluate_fn from utils import construct_client_mapping, get_encoding_fn diff --git a/examples/whisper-federated-finetuning/utils.py b/examples/whisper-federated-finetuning/utils.py index 117cf7100ddd..3bae730790a0 100644 --- a/examples/whisper-federated-finetuning/utils.py +++ b/examples/whisper-federated-finetuning/utils.py @@ -1,15 +1,13 @@ -from tqdm import tqdm -import torch import random -from datasets import Dataset -import numpy as np from collections import OrderedDict -from transformers import WhisperForConditionalGeneration - from typing import List import flwr as fl - +import numpy as np +import torch +from datasets import Dataset +from tqdm import tqdm +from transformers import WhisperForConditionalGeneration remove_cols = ["file", "audio", "label", "is_unknown", "speaker_id", "utterance_id"] diff --git a/examples/xgboost-comprehensive/README.md b/examples/xgboost-comprehensive/README.md index dc6d7e3872d6..f65f2dbeb645 100644 --- a/examples/xgboost-comprehensive/README.md +++ b/examples/xgboost-comprehensive/README.md @@ -1,20 +1,24 @@ -# Flower Example using XGBoost (Comprehensive) +--- +tags: [advanced, classification, tabular] +dataset: [HIGGS] +framework: [xgboost] +--- + +# Federated Learning with XGBoost and Flower (Comprehensive Example) This example demonstrates a comprehensive federated learning setup using Flower with XGBoost. We use [HIGGS](https://archive.ics.uci.edu/dataset/280/higgs) dataset to perform a binary classification task. This examples uses [Flower Datasets](https://flower.ai/docs/datasets/) to retrieve, partition and preprocess the data for each Flower client. It differs from the [xgboost-quickstart](https://github.com/adap/flower/tree/main/examples/xgboost-quickstart) example in the following ways: -- Arguments parsers of server and clients for hyperparameters selection. - Customised FL settings. -- Customised number of partitions. - Customised partitioner type (uniform, linear, square, exponential). - Centralised/distributed evaluation. - Bagging/cyclic training methods. -- You can run it with Flower Simulation +- Support of scaled learning rate. ## Training Strategies -This example provides two training strategies, **bagging aggregation** and **cyclic training**. +This example provides two training strategies, [**bagging aggregation**](https://flower.ai/docs/framework/tutorial-quickstart-xgboost.html#tree-based-bagging-aggregation) ([docs](https://flower.ai/docs/framework/ref-api/flwr.server.strategy.FedXgbBagging.html)) and [**cyclic training**](https://flower.ai/docs/framework/tutorial-quickstart-xgboost.html#cyclic_training) ([docs](https://flower.ai/docs/framework/ref-api/flwr.server.strategy.FedXgbCyclic.html)). ### Bagging Aggregation @@ -37,127 +41,86 @@ Instead of aggregating multiple clients, there is only one single client participating in the training per round in the cyclic training scenario. The trained local XGBoost trees will be passed to the next client as an initialised model for next round's boosting. -## Project Setup - -Start by cloning the example project. We prepared a single-line command that you can copy into your shell which will checkout the example for you: - -```shell -git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/xgboost-comprehensive . && rm -rf flower && cd xgboost-comprehensive -``` - -This will create a new directory called `xgboost-comprehensive` containing the following files: - -``` --- README.md <- Your're reading this right now --- server.py <- Defines the server-side logic --- client.py <- Defines the client-side logic --- dataset.py <- Defines the functions of data loading and partitioning --- utils.py <- Defines the arguments parser and hyper-parameters --- client_utils.py <- Defines the client utility functions --- server_utils.py <- Defines the server utility functions --- sim.py <- Example of using Flower simulation --- run_bagging.sh <- Commands to run bagging experiments --- run_cyclic.sh <- Commands to run cyclic experiments --- pyproject.toml <- Example dependencies (if you use Poetry) --- requirements.txt <- Example dependencies -``` - -### Installing Dependencies - -Project dependencies (such as `xgboost` and `flwr`) are defined in `pyproject.toml` and `requirements.txt`. We recommend [Poetry](https://python-poetry.org/docs/) to install those dependencies and manage your virtual environment ([Poetry installation](https://python-poetry.org/docs/#installation)) or [pip](https://pip.pypa.io/en/latest/development/), but feel free to use a different way of installing dependencies and managing virtual environments if you have other preferences. - -#### Poetry +## Set up the project -```shell -poetry install -poetry shell -``` +### Clone the project -Poetry will install all your dependencies in a newly created virtual environment. To verify that everything works correctly you can run the following command: +Start by cloning the example project: ```shell -poetry run python -c "import flwr" +git clone --depth=1 https://github.com/adap/flower.git _tmp \ + && mv _tmp/examples/xgboost-comprehensive . \ + && rm -rf _tmp \ + && cd xgboost-comprehensive ``` -If you don't see any errors you're good to go! - -#### pip - -Write the command below in your terminal to install the dependencies according to the configuration file requirements.txt. +This will create a new directory called `xgboost-comprehensive` with the following structure: ```shell -pip install -r requirements.txt +xgboost-comprehensive +├── xgboost_comprehensive +│ ├── __init__.py +│ ├── client_app.py # Defines your ClientApp +│ ├── server_app.py # Defines your ServerApp +│ └── task.py # Defines your model, training and data loading +├── pyproject.toml # Project metadata like dependencies and configs +└── README.md ``` -## Run Federated Learning with XGBoost and Flower +### Install dependencies and project -You can run this example in two ways: either by manually launching the server, and then several clients that connect to it; or by launching a Flower simulation. Both run the same workload, yielding identical results. The former is ideal for deployments on different machines, while the latter makes it easy to simulate large client cohorts in a resource-aware manner. You can read more about how Flower Simulation works in the [Documentation](https://flower.ai/docs/framework/how-to-run-simulations.html). The commands shown below assume you have activated your environment (if you decide to use Poetry, you can activate it via `poetry shell`). +Install the dependencies defined in `pyproject.toml` as well as the `xgboost_comprehensive` package. -### Independent Client/Server Setup - -We have two scripts to run bagging and cyclic (client-by-client) experiments. -The included `run_bagging.sh` or `run_cyclic.sh` will start the Flower server (using `server.py`), -sleep for 15 seconds to ensure that the server is up, -and then start 5 Flower clients (using `client.py`) with a small subset of the data from exponential partition distribution. - -You can simply start everything in a terminal as follows: - -```shell -./run_bagging.sh +```bash +pip install -e . ``` -Or - -```shell -./run_cyclic.sh -``` +## Run the project -The script starts processes in the background so that you don't have to open six terminal windows. +You can run your Flower project in both _simulation_ and _deployment_ mode without making changes to the code. If you are starting with Flower, we recommend you using the _simulation_ mode as it requires fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine. -You can also run the example without the scripts. First, launch the server: +### Run with the Simulation Engine ```bash -python server.py --train-method=bagging/cyclic --pool-size=N --num-clients-per-round=N +flwr run . ``` -Then run at least two clients (each on a new terminal or computer in your network) passing different `PARTITION_ID` and all using the same `N` (denoting the total number of clients or data partitions): +You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: ```bash -python client.py --train-method=bagging/cyclic --partition-id=PARTITION_ID --num-partitions=N -``` - -### Flower Simulation Setup - -We also provide an example code (`sim.py`) to use the simulation capabilities of Flower to simulate federated XGBoost training on either a single machine or a cluster of machines. With default arguments, each client will use 2 CPUs. - -To run bagging aggregation with 5 clients for 30 rounds evaluated on centralised test set: +# To run bagging aggregation for 5 rounds evaluated on centralised test set +flwr run . --run-config "train-method='bagging' num-server-rounds=5 centralised-eval=true" -```shell -python sim.py --train-method=bagging --pool-size=5 --num-clients-per-round=5 --num-rounds=30 --centralised-eval +# To run cyclic training with linear partitioner type evaluated on centralised test set: +flwr run . --run-config "train-method='cyclic' partitioner-type='linear' centralised-eval-client=true" ``` -To run cyclic training with 5 clients for 30 rounds evaluated on centralised test set: +> \[!TIP\] +> For a more detailed walk-through check our [XGBoost tutorial](https://flower.ai/docs/framework/tutorial-quickstart-xgboost.html). +> To extend the aggregation strategy for saving, logging, or other functions, please refer to our [advanced-pytorch](https://github.com/adap/flower/tree/main/examples/advanced-pytorch) example. -```shell -python sim.py --train-method=cyclic --pool-size=5 --num-rounds=30 --centralised-eval-client -``` +### Run with the Deployment Engine -In addition, we provide more options to customise the experimental settings, including data partitioning and centralised/distributed evaluation (see `utils.py`). -Check the [tutorial](https://flower.ai/docs/framework/tutorial-quickstart-xgboost.html) for a detailed explanation. +> \[!NOTE\] +> An update to this example will show how to run this Flower application with the Deployment Engine and TLS certificates, or with Docker. -### Expected Experimental Results +## Expected Experimental Results -#### Bagging aggregation experiment +### Bagging aggregation experiment -![](_static/xgboost_flower_auc_bagging.png) +

+XGBoost with Flower and Bagging strategy +
The figure above shows the centralised tested AUC performance over FL rounds with bagging aggregation strategy on 4 experimental settings. One can see that all settings obtain stable performance boost over FL rounds (especially noticeable at the start of training). As expected, uniform client distribution shows higher AUC values than square/exponential setup. -#### Cyclic training experiment +### Cyclic training experiment -![](_static/xgboost_flower_auc_cyclic.png) +
+XGBoost with Flower and Cyclic strategy +
This figure shows the cyclic training results on centralised test set. The models with cyclic training requires more rounds to converge diff --git a/examples/xgboost-comprehensive/client.py b/examples/xgboost-comprehensive/client.py deleted file mode 100644 index 2d54c3fd63c7..000000000000 --- a/examples/xgboost-comprehensive/client.py +++ /dev/null @@ -1,82 +0,0 @@ -import warnings -from logging import INFO - -import flwr as fl -from flwr_datasets import FederatedDataset -from flwr.common.logger import log - -from dataset import ( - instantiate_partitioner, - train_test_split, - transform_dataset_to_dmatrix, - resplit, -) -from utils import client_args_parser, BST_PARAMS, NUM_LOCAL_ROUND -from client_utils import XgbClient - - -warnings.filterwarnings("ignore", category=UserWarning) - - -# Parse arguments for experimental settings -args = client_args_parser() - -# Train method (bagging or cyclic) -train_method = args.train_method - -# Load (HIGGS) dataset and conduct partitioning -# Instantiate partitioner from ["uniform", "linear", "square", "exponential"] -partitioner = instantiate_partitioner( - partitioner_type=args.partitioner_type, num_partitions=args.num_partitions -) -fds = FederatedDataset( - dataset="jxie/higgs", - partitioners={"train": partitioner}, - resplitter=resplit, -) - -# Load the partition for this `partition_id` -log(INFO, "Loading partition...") -partition = fds.load_partition(partition_id=args.partition_id, split="train") -partition.set_format("numpy") - -if args.centralised_eval: - # Use centralised test set for evaluation - train_data = partition - valid_data = fds.load_split("test") - valid_data.set_format("numpy") - num_train = train_data.shape[0] - num_val = valid_data.shape[0] -else: - # Train/test splitting - train_data, valid_data, num_train, num_val = train_test_split( - partition, test_fraction=args.test_fraction, seed=args.seed - ) - -# Reformat data to DMatrix for xgboost -log(INFO, "Reformatting data...") -train_dmatrix = transform_dataset_to_dmatrix(train_data) -valid_dmatrix = transform_dataset_to_dmatrix(valid_data) - -# Hyper-parameters for xgboost training -num_local_round = NUM_LOCAL_ROUND -params = BST_PARAMS - -# Setup learning rate -if args.train_method == "bagging" and args.scaled_lr: - new_lr = params["eta"] / args.num_partitions - params.update({"eta": new_lr}) - -# Start Flower client -fl.client.start_client( - server_address="127.0.0.1:8080", - client=XgbClient( - train_dmatrix, - valid_dmatrix, - num_train, - num_val, - num_local_round, - params, - train_method, - ), -) diff --git a/examples/xgboost-comprehensive/dataset.py b/examples/xgboost-comprehensive/dataset.py deleted file mode 100644 index 94959925f833..000000000000 --- a/examples/xgboost-comprehensive/dataset.py +++ /dev/null @@ -1,73 +0,0 @@ -import xgboost as xgb -from typing import Union -from datasets import Dataset, DatasetDict, concatenate_datasets -from flwr_datasets.partitioner import ( - IidPartitioner, - LinearPartitioner, - SquarePartitioner, - ExponentialPartitioner, -) - -CORRELATION_TO_PARTITIONER = { - "uniform": IidPartitioner, - "linear": LinearPartitioner, - "square": SquarePartitioner, - "exponential": ExponentialPartitioner, -} - - -def instantiate_partitioner(partitioner_type: str, num_partitions: int): - """Initialise partitioner based on selected partitioner type and number of - partitions.""" - partitioner = CORRELATION_TO_PARTITIONER[partitioner_type]( - num_partitions=num_partitions - ) - return partitioner - - -def train_test_split(partition: Dataset, test_fraction: float, seed: int): - """Split the data into train and validation set given split rate.""" - train_test = partition.train_test_split(test_size=test_fraction, seed=seed) - partition_train = train_test["train"] - partition_test = train_test["test"] - - num_train = len(partition_train) - num_test = len(partition_test) - - return partition_train, partition_test, num_train, num_test - - -def transform_dataset_to_dmatrix(data: Union[Dataset, DatasetDict]) -> xgb.core.DMatrix: - """Transform dataset to DMatrix format for xgboost.""" - x, y = separate_xy(data) - new_data = xgb.DMatrix(x, label=y) - return new_data - - -def separate_xy(data: Union[Dataset, DatasetDict]): - """Return outputs of x (data) and y (labels) .""" - x = data["inputs"] - y = data["label"] - return x, y - - -def resplit(dataset: DatasetDict) -> DatasetDict: - """Increase the quantity of centralised test samples from 500K to 1M.""" - return DatasetDict( - { - "train": dataset["train"].select( - range(0, dataset["train"].num_rows - 500_000) - ), - "test": concatenate_datasets( - [ - dataset["train"].select( - range( - dataset["train"].num_rows - 500_000, - dataset["train"].num_rows, - ) - ), - dataset["test"], - ] - ), - } - ) diff --git a/examples/xgboost-comprehensive/pyproject.toml b/examples/xgboost-comprehensive/pyproject.toml index 2d44c06d6e3f..3906f8bf3301 100644 --- a/examples/xgboost-comprehensive/pyproject.toml +++ b/examples/xgboost-comprehensive/pyproject.toml @@ -1,15 +1,55 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" - -[tool.poetry] -name = "xgboost-comprehensive" -version = "0.1.0" -description = "Federated XGBoost with Flower (comprehensive)" -authors = ["The Flower Authors "] - -[tool.poetry.dependencies] -python = ">=3.8,<3.11" -flwr = { extras = ["simulation"], version = ">=1.7.0,<2.0" } -flwr-datasets = ">=0.1.0,<1.0.0" -xgboost = ">=2.0.0,<3.0.0" +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "xgboost_comprehensive" +version = "1.0.0" +description = "Federated Learning with XGBoost and Flower (Comprehensive Example)" +license = "Apache-2.0" +dependencies = [ + "flwr[simulation]>=1.12.0", + "flwr-datasets>=0.3.0", + "xgboost>=2.0.0", +] + +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "xgboost_comprehensive.server_app:app" +clientapp = "xgboost_comprehensive.client_app:app" + +[tool.flwr.app.config] +# ServerApp +train-method = "bagging" # Choose from [bagging, cyclic] +num-server-rounds = 3 +fraction-fit = 1.0 +fraction-evaluate = 1.0 +centralised-eval = false + +# ClientApp +partitioner-type = "uniform" # Choose from [uniform, linear, square, exponential] +test-fraction = 0.2 +seed = 42 +centralised-eval-client = false +local-epochs = 1 +scaled-lr = false +params.objective = "binary:logistic" +params.eta = 0.1 # Learning rate +params.max-depth = 8 +params.eval-metric = "auc" +params.nthread = 16 +params.num-parallel-tree = 1 +params.subsample = 1 +params.tree-method = "hist" + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 5 +options.backend.client-resources.num-cpus = 2 diff --git a/examples/xgboost-comprehensive/requirements.txt b/examples/xgboost-comprehensive/requirements.txt deleted file mode 100644 index 16eb78f484e3..000000000000 --- a/examples/xgboost-comprehensive/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -flwr[simulation]>=1.7.0, <2.0 -flwr-datasets>=0.1.0, <1.0.0 -xgboost>=2.0.0, <3.0.0 diff --git a/examples/xgboost-comprehensive/run_bagging.sh b/examples/xgboost-comprehensive/run_bagging.sh deleted file mode 100755 index a6300b781a06..000000000000 --- a/examples/xgboost-comprehensive/run_bagging.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -set -e -cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/ - -echo "Starting server" -python3 server.py --pool-size=5 --num-rounds=30 --num-clients-per-round=5 --centralised-eval & -sleep 30 # Sleep for 30s to give the server enough time to start - -for i in `seq 0 4`; do - echo "Starting client $i" - python3 client.py --partition-id=$i --num-partitions=5 --partitioner-type=exponential & -done - -# Enable CTRL+C to stop all background processes -trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM -# Wait for all background processes to complete -wait diff --git a/examples/xgboost-comprehensive/run_cyclic.sh b/examples/xgboost-comprehensive/run_cyclic.sh deleted file mode 100755 index 258bdf2fe0d8..000000000000 --- a/examples/xgboost-comprehensive/run_cyclic.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -set -e -cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/ - -echo "Starting server" -python3 server.py --train-method=cyclic --pool-size=5 --num-rounds=100 & -sleep 15 # Sleep for 15s to give the server enough time to start - -for i in `seq 0 4`; do - echo "Starting client $i" - python3 client.py --partition-id=$i --train-method=cyclic --num-partitions=5 --partitioner-type=exponential --centralised-eval & -done - -# Enable CTRL+C to stop all background processes -trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM -# Wait for all background processes to complete -wait diff --git a/examples/xgboost-comprehensive/server.py b/examples/xgboost-comprehensive/server.py deleted file mode 100644 index 939819641438..000000000000 --- a/examples/xgboost-comprehensive/server.py +++ /dev/null @@ -1,76 +0,0 @@ -import warnings -from logging import INFO - -import flwr as fl -from flwr.common.logger import log -from flwr_datasets import FederatedDataset -from flwr.server.strategy import FedXgbBagging, FedXgbCyclic - -from utils import server_args_parser -from server_utils import ( - eval_config, - fit_config, - evaluate_metrics_aggregation, - get_evaluate_fn, - CyclicClientManager, -) -from dataset import resplit, transform_dataset_to_dmatrix - - -warnings.filterwarnings("ignore", category=UserWarning) - - -# Parse arguments for experimental settings -args = server_args_parser() -train_method = args.train_method -pool_size = args.pool_size -num_rounds = args.num_rounds -num_clients_per_round = args.num_clients_per_round -num_evaluate_clients = args.num_evaluate_clients -centralised_eval = args.centralised_eval - -# Load centralised test set -if centralised_eval: - fds = FederatedDataset( - dataset="jxie/higgs", partitioners={"train": 20}, resplitter=resplit - ) - log(INFO, "Loading centralised test set...") - test_set = fds.load_split("test") - test_set.set_format("numpy") - test_dmatrix = transform_dataset_to_dmatrix(test_set) - - -# Define strategy -if train_method == "bagging": - # Bagging training - strategy = FedXgbBagging( - evaluate_function=get_evaluate_fn(test_dmatrix) if centralised_eval else None, - fraction_fit=(float(num_clients_per_round) / pool_size), - min_fit_clients=num_clients_per_round, - min_available_clients=pool_size, - min_evaluate_clients=num_evaluate_clients if not centralised_eval else 0, - fraction_evaluate=1.0 if not centralised_eval else 0.0, - on_evaluate_config_fn=eval_config, - on_fit_config_fn=fit_config, - evaluate_metrics_aggregation_fn=( - evaluate_metrics_aggregation if not centralised_eval else None - ), - ) -else: - # Cyclic training - strategy = FedXgbCyclic( - fraction_fit=1.0, - min_available_clients=pool_size, - fraction_evaluate=1.0, - evaluate_metrics_aggregation_fn=evaluate_metrics_aggregation, - on_evaluate_config_fn=eval_config, - on_fit_config_fn=fit_config, - ) - -# Start Flower server -fl.server.start_server( - server_address="0.0.0.0:8080", - config=fl.server.ServerConfig(num_rounds=num_rounds), - strategy=strategy, - client_manager=CyclicClientManager() if train_method == "cyclic" else None, -) diff --git a/examples/xgboost-comprehensive/server_utils.py b/examples/xgboost-comprehensive/server_utils.py deleted file mode 100644 index 35a31bd9adac..000000000000 --- a/examples/xgboost-comprehensive/server_utils.py +++ /dev/null @@ -1,101 +0,0 @@ -from typing import Dict, List, Optional -from logging import INFO -import xgboost as xgb -from flwr.common.logger import log -from flwr.common import Parameters, Scalar -from flwr.server.client_manager import SimpleClientManager -from flwr.server.client_proxy import ClientProxy -from flwr.server.criterion import Criterion -from utils import BST_PARAMS - - -def eval_config(rnd: int) -> Dict[str, str]: - """Return a configuration with global epochs.""" - config = { - "global_round": str(rnd), - } - return config - - -def fit_config(rnd: int) -> Dict[str, str]: - """Return a configuration with global epochs.""" - config = { - "global_round": str(rnd), - } - return config - - -def evaluate_metrics_aggregation(eval_metrics): - """Return an aggregated metric (AUC) for evaluation.""" - total_num = sum([num for num, _ in eval_metrics]) - auc_aggregated = ( - sum([metrics["AUC"] * num for num, metrics in eval_metrics]) / total_num - ) - metrics_aggregated = {"AUC": auc_aggregated} - return metrics_aggregated - - -def get_evaluate_fn(test_data): - """Return a function for centralised evaluation.""" - - def evaluate_fn( - server_round: int, parameters: Parameters, config: Dict[str, Scalar] - ): - # If at the first round, skip the evaluation - if server_round == 0: - return 0, {} - else: - bst = xgb.Booster(params=BST_PARAMS) - for para in parameters.tensors: - para_b = bytearray(para) - - # Load global model - bst.load_model(para_b) - # Run evaluation - eval_results = bst.eval_set( - evals=[(test_data, "valid")], - iteration=bst.num_boosted_rounds() - 1, - ) - auc = round(float(eval_results.split("\t")[1].split(":")[1]), 4) - log(INFO, f"AUC = {auc} at round {server_round}") - - return 0, {"AUC": auc} - - return evaluate_fn - - -class CyclicClientManager(SimpleClientManager): - """Provides a cyclic client selection rule.""" - - def sample( - self, - num_clients: int, - min_num_clients: Optional[int] = None, - criterion: Optional[Criterion] = None, - ) -> List[ClientProxy]: - """Sample a number of Flower ClientProxy instances.""" - - # Block until at least num_clients are connected. - if min_num_clients is None: - min_num_clients = num_clients - self.wait_for(min_num_clients) - - # Sample clients which meet the criterion - available_cids = list(self.clients) - if criterion is not None: - available_cids = [ - cid for cid in available_cids if criterion.select(self.clients[cid]) - ] - - if num_clients > len(available_cids): - log( - INFO, - "Sampling failed: number of available clients" - " (%s) is less than number of requested clients (%s).", - len(available_cids), - num_clients, - ) - return [] - - # Return all available clients - return [self.clients[cid] for cid in available_cids] diff --git a/examples/xgboost-comprehensive/sim.py b/examples/xgboost-comprehensive/sim.py deleted file mode 100644 index c9481f1cdd5d..000000000000 --- a/examples/xgboost-comprehensive/sim.py +++ /dev/null @@ -1,188 +0,0 @@ -import warnings -from logging import INFO -import xgboost as xgb -from tqdm import tqdm - -import flwr as fl -from flwr_datasets import FederatedDataset -from flwr.common.logger import log -from flwr.server.strategy import FedXgbBagging, FedXgbCyclic - -from dataset import ( - instantiate_partitioner, - train_test_split, - transform_dataset_to_dmatrix, - separate_xy, - resplit, -) -from utils import ( - sim_args_parser, - NUM_LOCAL_ROUND, - BST_PARAMS, -) -from server_utils import ( - eval_config, - fit_config, - evaluate_metrics_aggregation, - get_evaluate_fn, - CyclicClientManager, -) -from client_utils import XgbClient - - -warnings.filterwarnings("ignore", category=UserWarning) - - -def get_client_fn( - train_data_list, valid_data_list, train_method, params, num_local_round -): - """Return a function to construct a client. - - The VirtualClientEngine will execute this function whenever a client is sampled by - the strategy to participate. - """ - - def client_fn(cid: str) -> fl.client.Client: - """Construct a FlowerClient with its own dataset partition.""" - x_train, y_train = train_data_list[int(cid)][0] - x_valid, y_valid = valid_data_list[int(cid)][0] - - # Reformat data to DMatrix - train_dmatrix = xgb.DMatrix(x_train, label=y_train) - valid_dmatrix = xgb.DMatrix(x_valid, label=y_valid) - - # Fetch the number of examples - num_train = train_data_list[int(cid)][1] - num_val = valid_data_list[int(cid)][1] - - # Create and return client - return XgbClient( - train_dmatrix, - valid_dmatrix, - num_train, - num_val, - num_local_round, - params, - train_method, - ) - - return client_fn - - -def main(): - # Parse arguments for experimental settings - args = sim_args_parser() - - # Load (HIGGS) dataset and conduct partitioning - partitioner = instantiate_partitioner( - partitioner_type=args.partitioner_type, num_partitions=args.pool_size - ) - fds = FederatedDataset( - dataset="jxie/higgs", - partitioners={"train": partitioner}, - resplitter=resplit, - ) - - # Load centralised test set - if args.centralised_eval or args.centralised_eval_client: - log(INFO, "Loading centralised test set...") - test_data = fds.load_split("test") - test_data.set_format("numpy") - num_test = test_data.shape[0] - test_dmatrix = transform_dataset_to_dmatrix(test_data) - - # Load partitions and reformat data to DMatrix for xgboost - log(INFO, "Loading client local partitions...") - train_data_list = [] - valid_data_list = [] - - # Load and process all client partitions. This upfront cost is amortized soon - # after the simulation begins since clients wont need to preprocess their partition. - for partition_id in tqdm(range(args.pool_size), desc="Extracting client partition"): - # Extract partition for client with partition_id - partition = fds.load_partition(partition_id=partition_id, split="train") - partition.set_format("numpy") - - if args.centralised_eval_client: - # Use centralised test set for evaluation - train_data = partition - num_train = train_data.shape[0] - x_test, y_test = separate_xy(test_data) - valid_data_list.append(((x_test, y_test), num_test)) - else: - # Train/test splitting - train_data, valid_data, num_train, num_val = train_test_split( - partition, test_fraction=args.test_fraction, seed=args.seed - ) - x_valid, y_valid = separate_xy(valid_data) - valid_data_list.append(((x_valid, y_valid), num_val)) - - x_train, y_train = separate_xy(train_data) - train_data_list.append(((x_train, y_train), num_train)) - - # Define strategy - if args.train_method == "bagging": - # Bagging training - strategy = FedXgbBagging( - evaluate_function=( - get_evaluate_fn(test_dmatrix) if args.centralised_eval else None - ), - fraction_fit=(float(args.num_clients_per_round) / args.pool_size), - min_fit_clients=args.num_clients_per_round, - min_available_clients=args.pool_size, - min_evaluate_clients=( - args.num_evaluate_clients if not args.centralised_eval else 0 - ), - fraction_evaluate=1.0 if not args.centralised_eval else 0.0, - on_evaluate_config_fn=eval_config, - on_fit_config_fn=fit_config, - evaluate_metrics_aggregation_fn=( - evaluate_metrics_aggregation if not args.centralised_eval else None - ), - ) - else: - # Cyclic training - strategy = FedXgbCyclic( - fraction_fit=1.0, - min_available_clients=args.pool_size, - fraction_evaluate=1.0, - evaluate_metrics_aggregation_fn=evaluate_metrics_aggregation, - on_evaluate_config_fn=eval_config, - on_fit_config_fn=fit_config, - ) - - # Resources to be assigned to each virtual client - # In this example we use CPU by default - client_resources = { - "num_cpus": args.num_cpus_per_client, - "num_gpus": 0.0, - } - - # Hyper-parameters for xgboost training - num_local_round = NUM_LOCAL_ROUND - params = BST_PARAMS - - # Setup learning rate - if args.train_method == "bagging" and args.scaled_lr: - new_lr = params["eta"] / args.pool_size - params.update({"eta": new_lr}) - - # Start simulation - fl.simulation.start_simulation( - client_fn=get_client_fn( - train_data_list, - valid_data_list, - args.train_method, - params, - num_local_round, - ), - num_clients=args.pool_size, - client_resources=client_resources, - config=fl.server.ServerConfig(num_rounds=args.num_rounds), - strategy=strategy, - client_manager=CyclicClientManager() if args.train_method == "cyclic" else None, - ) - - -if __name__ == "__main__": - main() diff --git a/examples/xgboost-comprehensive/utils.py b/examples/xgboost-comprehensive/utils.py deleted file mode 100644 index abc100da1ade..000000000000 --- a/examples/xgboost-comprehensive/utils.py +++ /dev/null @@ -1,180 +0,0 @@ -import argparse - - -# Hyper-parameters for xgboost training -NUM_LOCAL_ROUND = 1 -BST_PARAMS = { - "objective": "binary:logistic", - "eta": 0.1, # Learning rate - "max_depth": 8, - "eval_metric": "auc", - "nthread": 16, - "num_parallel_tree": 1, - "subsample": 1, - "tree_method": "hist", -} - - -def client_args_parser(): - """Parse arguments to define experimental settings on client side.""" - parser = argparse.ArgumentParser() - - parser.add_argument( - "--train-method", - default="bagging", - type=str, - choices=["bagging", "cyclic"], - help="Training methods selected from bagging aggregation or cyclic training.", - ) - parser.add_argument( - "--num-partitions", default=10, type=int, help="Number of partitions." - ) - parser.add_argument( - "--partitioner-type", - default="uniform", - type=str, - choices=["uniform", "linear", "square", "exponential"], - help="Partitioner types.", - ) - parser.add_argument( - "--partition-id", - default=0, - type=int, - help="Partition ID used for the current client.", - ) - parser.add_argument( - "--seed", default=42, type=int, help="Seed used for train/test splitting." - ) - parser.add_argument( - "--test-fraction", - default=0.2, - type=float, - help="Test fraction for train/test splitting.", - ) - parser.add_argument( - "--centralised-eval", - action="store_true", - help="Conduct evaluation on centralised test set (True), or on hold-out data (False).", - ) - parser.add_argument( - "--scaled-lr", - action="store_true", - help="Perform scaled learning rate based on the number of clients (True).", - ) - - args = parser.parse_args() - return args - - -def server_args_parser(): - """Parse arguments to define experimental settings on server side.""" - parser = argparse.ArgumentParser() - - parser.add_argument( - "--train-method", - default="bagging", - type=str, - choices=["bagging", "cyclic"], - help="Training methods selected from bagging aggregation or cyclic training.", - ) - parser.add_argument( - "--pool-size", default=2, type=int, help="Number of total clients." - ) - parser.add_argument( - "--num-rounds", default=5, type=int, help="Number of FL rounds." - ) - parser.add_argument( - "--num-clients-per-round", - default=2, - type=int, - help="Number of clients participate in training each round.", - ) - parser.add_argument( - "--num-evaluate-clients", - default=2, - type=int, - help="Number of clients selected for evaluation.", - ) - parser.add_argument( - "--centralised-eval", - action="store_true", - help="Conduct centralised evaluation (True), or client evaluation on hold-out data (False).", - ) - - args = parser.parse_args() - return args - - -def sim_args_parser(): - """Parse arguments to define experimental settings on server side.""" - parser = argparse.ArgumentParser() - - parser.add_argument( - "--train-method", - default="bagging", - type=str, - choices=["bagging", "cyclic"], - help="Training methods selected from bagging aggregation or cyclic training.", - ) - - # Server side - parser.add_argument( - "--pool-size", default=5, type=int, help="Number of total clients." - ) - parser.add_argument( - "--num-rounds", default=30, type=int, help="Number of FL rounds." - ) - parser.add_argument( - "--num-clients-per-round", - default=5, - type=int, - help="Number of clients participate in training each round.", - ) - parser.add_argument( - "--num-evaluate-clients", - default=5, - type=int, - help="Number of clients selected for evaluation.", - ) - parser.add_argument( - "--centralised-eval", - action="store_true", - help="Conduct centralised evaluation (True), or client evaluation on hold-out data (False).", - ) - parser.add_argument( - "--num-cpus-per-client", - default=2, - type=int, - help="Number of CPUs used for per client.", - ) - - # Client side - parser.add_argument( - "--partitioner-type", - default="uniform", - type=str, - choices=["uniform", "linear", "square", "exponential"], - help="Partitioner types.", - ) - parser.add_argument( - "--seed", default=42, type=int, help="Seed used for train/test splitting." - ) - parser.add_argument( - "--test-fraction", - default=0.2, - type=float, - help="Test fraction for train/test splitting.", - ) - parser.add_argument( - "--centralised-eval-client", - action="store_true", - help="Conduct evaluation on centralised test set (True), or on hold-out data (False).", - ) - parser.add_argument( - "--scaled-lr", - action="store_true", - help="Perform scaled learning rate based on the number of clients (True).", - ) - - args = parser.parse_args() - return args diff --git a/examples/xgboost-comprehensive/xgboost_comprehensive/__init__.py b/examples/xgboost-comprehensive/xgboost_comprehensive/__init__.py new file mode 100644 index 000000000000..1716a1a221d0 --- /dev/null +++ b/examples/xgboost-comprehensive/xgboost_comprehensive/__init__.py @@ -0,0 +1 @@ +"""xgboost_comprehensive: A Flower / XGBoost app.""" diff --git a/examples/xgboost-comprehensive/xgboost_comprehensive/client_app.py b/examples/xgboost-comprehensive/xgboost_comprehensive/client_app.py new file mode 100644 index 000000000000..d4217f5c3680 --- /dev/null +++ b/examples/xgboost-comprehensive/xgboost_comprehensive/client_app.py @@ -0,0 +1,165 @@ +"""xgboost_comprehensive: A Flower / XGBoost app.""" + +import warnings + +import xgboost as xgb +from xgboost_comprehensive.task import load_data, replace_keys + +from flwr.client import Client, ClientApp +from flwr.common import ( + Code, + EvaluateIns, + EvaluateRes, + FitIns, + FitRes, + Parameters, + Status, +) +from flwr.common.config import unflatten_dict +from flwr.common.context import Context + +warnings.filterwarnings("ignore", category=UserWarning) + + +# Define Flower-Xgb Client and client_fn +class XgbClient(Client): + def __init__( + self, + train_dmatrix, + valid_dmatrix, + num_train, + num_val, + num_local_round, + params, + train_method, + ): + self.train_dmatrix = train_dmatrix + self.valid_dmatrix = valid_dmatrix + self.num_train = num_train + self.num_val = num_val + self.num_local_round = num_local_round + self.params = params + self.train_method = train_method + + def _local_boost(self, bst_input): + # Update trees based on local training data. + for i in range(self.num_local_round): + bst_input.update(self.train_dmatrix, bst_input.num_boosted_rounds()) + + # Bagging: extract the last N=num_local_round trees for sever aggregation + # Cyclic: return the entire model + bst = ( + bst_input[ + bst_input.num_boosted_rounds() + - self.num_local_round : bst_input.num_boosted_rounds() + ] + if self.train_method == "bagging" + else bst_input + ) + + return bst + + def fit(self, ins: FitIns) -> FitRes: + global_round = int(ins.config["global_round"]) + if global_round == 1: + # First round local training + bst = xgb.train( + self.params, + self.train_dmatrix, + num_boost_round=self.num_local_round, + evals=[(self.valid_dmatrix, "validate"), (self.train_dmatrix, "train")], + ) + else: + bst = xgb.Booster(params=self.params) + global_model = bytearray(ins.parameters.tensors[0]) + + # Load global model into booster + bst.load_model(global_model) + + # Local training + bst = self._local_boost(bst) + + # Save model + local_model = bst.save_raw("json") + local_model_bytes = bytes(local_model) + + return FitRes( + status=Status( + code=Code.OK, + message="OK", + ), + parameters=Parameters(tensor_type="", tensors=[local_model_bytes]), + num_examples=self.num_train, + metrics={}, + ) + + def evaluate(self, ins: EvaluateIns) -> EvaluateRes: + # Load global model + bst = xgb.Booster(params=self.params) + para_b = bytearray(ins.parameters.tensors[0]) + bst.load_model(para_b) + + # Run evaluation + eval_results = bst.eval_set( + evals=[(self.valid_dmatrix, "valid")], + iteration=bst.num_boosted_rounds() - 1, + ) + auc = round(float(eval_results.split("\t")[1].split(":")[1]), 4) + + return EvaluateRes( + status=Status( + code=Code.OK, + message="OK", + ), + loss=0.0, + num_examples=self.num_val, + metrics={"AUC": auc}, + ) + + +def client_fn(context: Context): + # Load model and data + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + + # Parse configs + cfg = replace_keys(unflatten_dict(context.run_config)) + num_local_round = cfg["local_epochs"] + train_method = cfg["train_method"] + params = cfg["params"] + partitioner_type = cfg["partitioner_type"] + seed = cfg["seed"] + test_fraction = cfg["test_fraction"] + centralised_eval_client = cfg["centralised_eval_client"] + + # Load training and validation data + train_dmatrix, valid_dmatrix, num_train, num_val = load_data( + partitioner_type, + partition_id, + num_partitions, + centralised_eval_client, + test_fraction, + seed, + ) + + # Setup learning rate + if cfg["scaled_lr"]: + new_lr = cfg["params"]["eta"] / num_partitions + cfg["params"].update({"eta": new_lr}) + + # Return Client instance + return XgbClient( + train_dmatrix, + valid_dmatrix, + num_train, + num_val, + num_local_round, + params, + train_method, + ) + + +# Flower ClientApp +app = ClientApp( + client_fn, +) diff --git a/examples/xgboost-comprehensive/xgboost_comprehensive/server_app.py b/examples/xgboost-comprehensive/xgboost_comprehensive/server_app.py new file mode 100644 index 000000000000..eebaad403259 --- /dev/null +++ b/examples/xgboost-comprehensive/xgboost_comprehensive/server_app.py @@ -0,0 +1,168 @@ +"""xgboost_comprehensive: A Flower / XGBoost app.""" + +from logging import INFO +from typing import Dict, List, Optional + +import xgboost as xgb +from xgboost_comprehensive.task import replace_keys, transform_dataset_to_dmatrix + +from datasets import load_dataset +from flwr.common import Context, Parameters, Scalar +from flwr.common.config import unflatten_dict +from flwr.common.logger import log +from flwr.server import ServerApp, ServerAppComponents, ServerConfig +from flwr.server.client_manager import SimpleClientManager +from flwr.server.client_proxy import ClientProxy +from flwr.server.criterion import Criterion +from flwr.server.strategy import FedXgbBagging, FedXgbCyclic + + +class CyclicClientManager(SimpleClientManager): + """Provides a cyclic client selection rule.""" + + def sample( + self, + num_clients: int, + min_num_clients: Optional[int] = None, + criterion: Optional[Criterion] = None, + ) -> List[ClientProxy]: + """Sample a number of Flower ClientProxy instances.""" + + # Block until at least num_clients are connected. + if min_num_clients is None: + min_num_clients = num_clients + self.wait_for(min_num_clients) + + # Sample clients which meet the criterion + available_cids = list(self.clients) + if criterion is not None: + available_cids = [ + cid for cid in available_cids if criterion.select(self.clients[cid]) + ] + + if num_clients > len(available_cids): + log( + INFO, + "Sampling failed: number of available clients" + " (%s) is less than number of requested clients (%s).", + len(available_cids), + num_clients, + ) + return [] + + # Return all available clients + return [self.clients[cid] for cid in available_cids] + + +def get_evaluate_fn(test_data, params): + """Return a function for centralised evaluation.""" + + def evaluate_fn( + server_round: int, parameters: Parameters, config: Dict[str, Scalar] + ): + # If at the first round, skip the evaluation + if server_round == 0: + return 0, {} + else: + bst = xgb.Booster(params=params) + for para in parameters.tensors: + para_b = bytearray(para) + + # Load global model + bst.load_model(para_b) + # Run evaluation + eval_results = bst.eval_set( + evals=[(test_data, "valid")], + iteration=bst.num_boosted_rounds() - 1, + ) + auc = round(float(eval_results.split("\t")[1].split(":")[1]), 4) + + # Save results to disk. + # Note we add new entry to the same file with each call to this function. + with open(f"./centralised_eval.txt", "a", encoding="utf-8") as fp: + fp.write(f"Round:{server_round},AUC:{auc}\n") + + return 0, {"AUC": auc} + + return evaluate_fn + + +def evaluate_metrics_aggregation(eval_metrics): + """Return an aggregated metric (AUC) for evaluation.""" + total_num = sum([num for num, _ in eval_metrics]) + auc_aggregated = ( + sum([metrics["AUC"] * num for num, metrics in eval_metrics]) / total_num + ) + metrics_aggregated = {"AUC": auc_aggregated} + return metrics_aggregated + + +def config_func(rnd: int) -> Dict[str, str]: + """Return a configuration with global epochs.""" + config = { + "global_round": str(rnd), + } + return config + + +def server_fn(context: Context): + # Read from config + cfg = replace_keys(unflatten_dict(context.run_config)) + num_rounds = cfg["num_server_rounds"] + fraction_fit = cfg["fraction_fit"] + fraction_evaluate = cfg["fraction_evaluate"] + train_method = cfg["train_method"] + params = cfg["params"] + centralised_eval = cfg["centralised_eval"] + + if centralised_eval: + # This is the exact same dataset as the one downloaded by the clients via + # FlowerDatasets. However, we don't use FlowerDatasets for the server since + # partitioning is not needed. + # We make use of the "test" split only + test_set = load_dataset("jxie/higgs")["test"] + test_set.set_format("numpy") + test_dmatrix = transform_dataset_to_dmatrix(test_set) + + # Init an empty Parameter + parameters = Parameters(tensor_type="", tensors=[]) + + # Define strategy + if train_method == "bagging": + # Bagging training + strategy = FedXgbBagging( + evaluate_function=( + get_evaluate_fn(test_dmatrix, params) if centralised_eval else None + ), + fraction_fit=fraction_fit, + fraction_evaluate=fraction_evaluate if not centralised_eval else 0.0, + on_evaluate_config_fn=config_func, + on_fit_config_fn=config_func, + evaluate_metrics_aggregation_fn=( + evaluate_metrics_aggregation if not centralised_eval else None + ), + initial_parameters=parameters, + ) + else: + # Cyclic training + strategy = FedXgbCyclic( + fraction_fit=1.0, + fraction_evaluate=1.0, + evaluate_metrics_aggregation_fn=evaluate_metrics_aggregation, + on_evaluate_config_fn=config_func, + on_fit_config_fn=config_func, + initial_parameters=parameters, + ) + + config = ServerConfig(num_rounds=num_rounds) + client_manager = CyclicClientManager() if train_method == "cyclic" else None + + return ServerAppComponents( + strategy=strategy, config=config, client_manager=client_manager + ) + + +# Create ServerApp +app = ServerApp( + server_fn=server_fn, +) diff --git a/examples/xgboost-comprehensive/xgboost_comprehensive/task.py b/examples/xgboost-comprehensive/xgboost_comprehensive/task.py new file mode 100644 index 000000000000..7454319de38e --- /dev/null +++ b/examples/xgboost-comprehensive/xgboost_comprehensive/task.py @@ -0,0 +1,129 @@ +"""xgboost_comprehensive: A Flower / XGBoost app.""" + +from logging import INFO + +import xgboost as xgb +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import ( + ExponentialPartitioner, + IidPartitioner, + LinearPartitioner, + SquarePartitioner, +) + +from datasets import DatasetDict, concatenate_datasets +from flwr.common import log + +CORRELATION_TO_PARTITIONER = { + "uniform": IidPartitioner, + "linear": LinearPartitioner, + "square": SquarePartitioner, + "exponential": ExponentialPartitioner, +} + +fds = None # Cache FederatedDataset + + +def train_test_split(partition, test_fraction, seed): + """Split the data into train and validation set given split rate.""" + train_test = partition.train_test_split(test_size=test_fraction, seed=seed) + partition_train = train_test["train"] + partition_test = train_test["test"] + + num_train = len(partition_train) + num_test = len(partition_test) + + return partition_train, partition_test, num_train, num_test + + +def transform_dataset_to_dmatrix(data): + """Transform dataset to DMatrix format for xgboost.""" + x = data["inputs"] + y = data["label"] + new_data = xgb.DMatrix(x, label=y) + return new_data + + +def instantiate_fds(partitioner_type, num_partitions): + """Initialize FederatedDataset.""" + # Only initialize `FederatedDataset` once + global fds + if fds is None: + partitioner = CORRELATION_TO_PARTITIONER[partitioner_type]( + num_partitions=num_partitions + ) + fds = FederatedDataset( + dataset="jxie/higgs", + partitioners={"train": partitioner}, + preprocessor=resplit, + ) + return fds + + +def load_data( + partitioner_type, + partition_id, + num_partitions, + centralised_eval_client, + test_fraction, + seed, +): + """Load partition data.""" + fds_ = instantiate_fds(partitioner_type, num_partitions) + partition = fds_.load_partition(partition_id) + partition.set_format("numpy") + + if centralised_eval_client: + train_data = partition + num_train = train_data.shape[0] + + # Use centralised test set for evaluation + valid_data = fds_.load_split("test") + valid_data.set_format("numpy") + num_val = valid_data.shape[0] + else: + # Train/test splitting + train_data, valid_data, num_train, num_val = train_test_split( + partition, test_fraction=test_fraction, seed=seed + ) + + # Reformat data to DMatrix for xgboost + log(INFO, "Reformatting data...") + train_dmatrix = transform_dataset_to_dmatrix(train_data) + valid_dmatrix = transform_dataset_to_dmatrix(valid_data) + + return train_dmatrix, valid_dmatrix, num_train, num_val + + +def replace_keys(input_dict, match="-", target="_"): + """Recursively replace match string with target string in dictionary keys.""" + new_dict = {} + for key, value in input_dict.items(): + new_key = key.replace(match, target) + if isinstance(value, dict): + new_dict[new_key] = replace_keys(value, match, target) + else: + new_dict[new_key] = value + return new_dict + + +def resplit(dataset: DatasetDict) -> DatasetDict: + """Increase the quantity of centralised test samples from 500K to 1M.""" + return DatasetDict( + { + "train": dataset["train"].select( + range(0, dataset["train"].num_rows - 500_000) + ), + "test": concatenate_datasets( + [ + dataset["train"].select( + range( + dataset["train"].num_rows - 500_000, + dataset["train"].num_rows, + ) + ), + dataset["test"], + ] + ), + } + ) diff --git a/examples/xgboost-quickstart/README.md b/examples/xgboost-quickstart/README.md index 713b6eab8bac..a7b047c090f0 100644 --- a/examples/xgboost-quickstart/README.md +++ b/examples/xgboost-quickstart/README.md @@ -1,4 +1,10 @@ -# Flower Example using XGBoost +--- +tags: [quickstart, classification, tabular] +dataset: [HIGGS] +framework: [xgboost] +--- + +# Federated Learning with XGBoost and Flower (Quickstart Example) This example demonstrates how to perform EXtreme Gradient Boosting (XGBoost) within Flower using `xgboost` package. We use [HIGGS](https://archive.ics.uci.edu/dataset/280/higgs) dataset for this example to perform a binary classification task. @@ -6,72 +12,60 @@ Tree-based with bagging method is used for aggregation on the server. This project provides a minimal code example to enable you to get started quickly. For a more comprehensive code example, take a look at [xgboost-comprehensive](https://github.com/adap/flower/tree/main/examples/xgboost-comprehensive). -## Project Setup - -Start by cloning the example project. We prepared a single-line command that you can copy into your shell which will checkout the example for you: - -```shell -git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/xgboost-quickstart . && rm -rf flower && cd xgboost-quickstart -``` - -This will create a new directory called `xgboost-quickstart` containing the following files: - -``` --- README.md <- Your're reading this right now --- server.py <- Defines the server-side logic --- client.py <- Defines the client-side logic --- run.sh <- Commands to run experiments --- pyproject.toml <- Example dependencies -``` +## Set up the project -### Installing Dependencies +### Clone the project -Project dependencies (such as `xgboost` and `flwr`) are defined in `pyproject.toml`. You can install the dependencies by invoking `pip`: +Start by cloning the example project: ```shell -# From a new python environment, run: -pip install . +git clone --depth=1 https://github.com/adap/flower.git _tmp \ + && mv _tmp/examples/xgboost-quickstart . \ + && rm -rf _tmp \ + && cd xgboost-quickstart ``` -Then, to verify that everything works correctly you can run the following command: +This will create a new directory called `xgboost-quickstart` with the following structure: ```shell -python3 -c "import flwr" +xgboost-quickstart +├── xgboost_quickstart +│ ├── __init__.py +│ ├── client_app.py # Defines your ClientApp +│ ├── server_app.py # Defines your ServerApp +│ └── task.py # Defines your utilities and data loading +├── pyproject.toml # Project metadata like dependencies and configs +└── README.md ``` -If you don't see any errors you're good to go! +### Install dependencies and project -## Run Federated Learning with XGBoost and Flower +Install the dependencies defined in `pyproject.toml` as well as the `xgboost_quickstart` package. -Afterwards you are ready to start the Flower server as well as the clients. -You can simply start the server in a terminal as follows: - -```shell -python3 server.py +```bash +pip install -e . ``` -Now you are ready to start the Flower clients which will participate in the learning. -To do so simply open two more terminal windows and run the following commands. +## Run the project -Start client 1 in the first terminal: +You can run your Flower project in both _simulation_ and _deployment_ mode without making changes to the code. If you are starting with Flower, we recommend you using the _simulation_ mode as it requires fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine. -```shell -python3 client.py --partition-id=0 +### Run with the Simulation Engine + +```bash +flwr run . ``` -Start client 2 in the second terminal: +You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: -```shell -python3 client.py --partition-id=1 +```bash +flwr run . --run-config "num-server-rounds=5 params.eta=0.05" ``` -You will see that XGBoost is starting a federated training. - -Alternatively, you can use `run.sh` to run the same experiment in a single terminal as follows: +> \[!TIP\] +> For a more detailed walk-through check our [quickstart XGBoost tutorial](https://flower.ai/docs/framework/tutorial-quickstart-xgboost.html) -```shell -poetry run ./run.sh -``` +### Run with the Deployment Engine -Look at the [code](https://github.com/adap/flower/tree/main/examples/xgboost-quickstart) -and [tutorial](https://flower.ai/docs/framework/tutorial-quickstart-xgboost.html) for a detailed explanation. +> \[!NOTE\] +> An update to this example will show how to run this Flower application with the Deployment Engine and TLS certificates, or with Docker. diff --git a/examples/xgboost-quickstart/client.py b/examples/xgboost-quickstart/client.py deleted file mode 100644 index 5a4d88bb7e43..000000000000 --- a/examples/xgboost-quickstart/client.py +++ /dev/null @@ -1,208 +0,0 @@ -import argparse -import warnings -from typing import Union -from logging import INFO -from datasets import Dataset, DatasetDict -import xgboost as xgb - -import flwr as fl -from flwr_datasets import FederatedDataset -from flwr.common.logger import log -from flwr.common import ( - Code, - EvaluateIns, - EvaluateRes, - FitIns, - FitRes, - GetParametersIns, - GetParametersRes, - Parameters, - Status, -) -from flwr_datasets.partitioner import IidPartitioner - - -warnings.filterwarnings("ignore", category=UserWarning) - -# Define arguments parser for the client/partition ID. -parser = argparse.ArgumentParser() -parser.add_argument( - "--partition-id", - default=0, - type=int, - help="Partition ID used for the current client.", -) -args = parser.parse_args() - - -# Define data partitioning related functions -def train_test_split(partition: Dataset, test_fraction: float, seed: int): - """Split the data into train and validation set given split rate.""" - train_test = partition.train_test_split(test_size=test_fraction, seed=seed) - partition_train = train_test["train"] - partition_test = train_test["test"] - - num_train = len(partition_train) - num_test = len(partition_test) - - return partition_train, partition_test, num_train, num_test - - -def transform_dataset_to_dmatrix(data: Union[Dataset, DatasetDict]) -> xgb.core.DMatrix: - """Transform dataset to DMatrix format for xgboost.""" - x = data["inputs"] - y = data["label"] - new_data = xgb.DMatrix(x, label=y) - return new_data - - -# Load (HIGGS) dataset and conduct partitioning -# We use a small subset (num_partitions=30) of the dataset for demonstration to speed up the data loading process. -partitioner = IidPartitioner(num_partitions=30) -fds = FederatedDataset(dataset="jxie/higgs", partitioners={"train": partitioner}) - -# Load the partition for this `partition_id` -log(INFO, "Loading partition...") -partition = fds.load_partition(partition_id=args.partition_id, split="train") -partition.set_format("numpy") - -# Train/test splitting -train_data, valid_data, num_train, num_val = train_test_split( - partition, test_fraction=0.2, seed=42 -) - -# Reformat data to DMatrix for xgboost -log(INFO, "Reformatting data...") -train_dmatrix = transform_dataset_to_dmatrix(train_data) -valid_dmatrix = transform_dataset_to_dmatrix(valid_data) - -# Hyper-parameters for xgboost training -num_local_round = 1 -params = { - "objective": "binary:logistic", - "eta": 0.1, # Learning rate - "max_depth": 8, - "eval_metric": "auc", - "nthread": 16, - "num_parallel_tree": 1, - "subsample": 1, - "tree_method": "hist", -} - - -# Define Flower client -class XgbClient(fl.client.Client): - def __init__( - self, - train_dmatrix, - valid_dmatrix, - num_train, - num_val, - num_local_round, - params, - ): - self.train_dmatrix = train_dmatrix - self.valid_dmatrix = valid_dmatrix - self.num_train = num_train - self.num_val = num_val - self.num_local_round = num_local_round - self.params = params - - def get_parameters(self, ins: GetParametersIns) -> GetParametersRes: - _ = (self, ins) - return GetParametersRes( - status=Status( - code=Code.OK, - message="OK", - ), - parameters=Parameters(tensor_type="", tensors=[]), - ) - - def _local_boost(self, bst_input): - # Update trees based on local training data. - for i in range(self.num_local_round): - bst_input.update(self.train_dmatrix, bst_input.num_boosted_rounds()) - - # Bagging: extract the last N=num_local_round trees for sever aggregation - bst = bst_input[ - bst_input.num_boosted_rounds() - - self.num_local_round : bst_input.num_boosted_rounds() - ] - - return bst - - def fit(self, ins: FitIns) -> FitRes: - global_round = int(ins.config["global_round"]) - if global_round == 1: - # First round local training - bst = xgb.train( - self.params, - self.train_dmatrix, - num_boost_round=self.num_local_round, - evals=[(self.valid_dmatrix, "validate"), (self.train_dmatrix, "train")], - ) - else: - bst = xgb.Booster(params=self.params) - for item in ins.parameters.tensors: - global_model = bytearray(item) - - # Load global model into booster - bst.load_model(global_model) - - # Local training - bst = self._local_boost(bst) - - # Save model - local_model = bst.save_raw("json") - local_model_bytes = bytes(local_model) - - return FitRes( - status=Status( - code=Code.OK, - message="OK", - ), - parameters=Parameters(tensor_type="", tensors=[local_model_bytes]), - num_examples=self.num_train, - metrics={}, - ) - - def evaluate(self, ins: EvaluateIns) -> EvaluateRes: - # Load global model - bst = xgb.Booster(params=self.params) - for para in ins.parameters.tensors: - para_b = bytearray(para) - bst.load_model(para_b) - - # Run evaluation - eval_results = bst.eval_set( - evals=[(self.valid_dmatrix, "valid")], - iteration=bst.num_boosted_rounds() - 1, - ) - auc = round(float(eval_results.split("\t")[1].split(":")[1]), 4) - - global_round = ins.config["global_round"] - log(INFO, f"AUC = {auc} at round {global_round}") - - return EvaluateRes( - status=Status( - code=Code.OK, - message="OK", - ), - loss=0.0, - num_examples=self.num_val, - metrics={"AUC": auc}, - ) - - -# Start Flower client -fl.client.start_client( - server_address="127.0.0.1:8080", - client=XgbClient( - train_dmatrix, - valid_dmatrix, - num_train, - num_val, - num_local_round, - params, - ).to_client(), -) diff --git a/examples/xgboost-quickstart/pyproject.toml b/examples/xgboost-quickstart/pyproject.toml index f1e451fe779a..3bfedb6b1d58 100644 --- a/examples/xgboost-quickstart/pyproject.toml +++ b/examples/xgboost-quickstart/pyproject.toml @@ -3,17 +3,45 @@ requires = ["hatchling"] build-backend = "hatchling.build" [project] -name = "quickstart-xgboost" -version = "0.1.0" -description = "XGBoost Federated Learning Quickstart with Flower" -authors = [ - { name = "The Flower Authors", email = "hello@flower.ai" }, -] +name = "xgboost_quickstart" +version = "1.0.0" +description = "Federated Learning with XGBoost and Flower (Quickstart Example)" +license = "Apache-2.0" dependencies = [ - "flwr>=1.8.0,<2.0", - "flwr-datasets>=0.1.0,<1.0.0", - "xgboost>=2.0.0,<3.0.0", + "flwr-nightly[simulation]==1.11.0.dev20240826", + "flwr-datasets>=0.3.0", + "xgboost>=2.0.0", ] [tool.hatch.build.targets.wheel] packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "xgboost_quickstart.server_app:app" +clientapp = "xgboost_quickstart.client_app:app" + +[tool.flwr.app.config] +# ServerApp +num-server-rounds = 3 +fraction-fit = 0.1 +fraction-evaluate = 0.1 + +# ClientApp +local-epochs = 1 +params.objective = "binary:logistic" +params.eta = 0.1 # Learning rate +params.max-depth = 8 +params.eval-metric = "auc" +params.nthread = 16 +params.num-parallel-tree = 1 +params.subsample = 1 +params.tree-method = "hist" + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 20 diff --git a/examples/xgboost-quickstart/run.sh b/examples/xgboost-quickstart/run.sh deleted file mode 100755 index b35af58222ab..000000000000 --- a/examples/xgboost-quickstart/run.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -set -e -cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/ - -echo "Starting server" -python server.py & -sleep 5 # Sleep for 5s to give the server enough time to start - -for i in `seq 0 1`; do - echo "Starting client $i" - python3 client.py --partition-id=$i & -done - -# Enable CTRL+C to stop all background processes -trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM -# Wait for all background processes to complete -wait diff --git a/examples/xgboost-quickstart/server.py b/examples/xgboost-quickstart/server.py deleted file mode 100644 index e9239fde696c..000000000000 --- a/examples/xgboost-quickstart/server.py +++ /dev/null @@ -1,48 +0,0 @@ -from typing import Dict -import flwr as fl -from flwr.server.strategy import FedXgbBagging - - -# FL experimental settings -pool_size = 2 -num_rounds = 5 -num_clients_per_round = 2 -num_evaluate_clients = 2 - - -def evaluate_metrics_aggregation(eval_metrics): - """Return an aggregated metric (AUC) for evaluation.""" - total_num = sum([num for num, _ in eval_metrics]) - auc_aggregated = ( - sum([metrics["AUC"] * num for num, metrics in eval_metrics]) / total_num - ) - metrics_aggregated = {"AUC": auc_aggregated} - return metrics_aggregated - - -def config_func(rnd: int) -> Dict[str, str]: - """Return a configuration with global epochs.""" - config = { - "global_round": str(rnd), - } - return config - - -# Define strategy -strategy = FedXgbBagging( - fraction_fit=(float(num_clients_per_round) / pool_size), - min_fit_clients=num_clients_per_round, - min_available_clients=pool_size, - min_evaluate_clients=num_evaluate_clients, - fraction_evaluate=1.0, - evaluate_metrics_aggregation_fn=evaluate_metrics_aggregation, - on_evaluate_config_fn=config_func, - on_fit_config_fn=config_func, -) - -# Start Flower server -fl.server.start_server( - server_address="0.0.0.0:8080", - config=fl.server.ServerConfig(num_rounds=num_rounds), - strategy=strategy, -) diff --git a/examples/xgboost-quickstart/xgboost_quickstart/__init__.py b/examples/xgboost-quickstart/xgboost_quickstart/__init__.py new file mode 100644 index 000000000000..470360b377a6 --- /dev/null +++ b/examples/xgboost-quickstart/xgboost_quickstart/__init__.py @@ -0,0 +1 @@ +"""xgboost_quickstart: A Flower / XGBoost app.""" diff --git a/examples/xgboost-comprehensive/client_utils.py b/examples/xgboost-quickstart/xgboost_quickstart/client_app.py similarity index 66% rename from examples/xgboost-comprehensive/client_utils.py rename to examples/xgboost-quickstart/xgboost_quickstart/client_app.py index d2e07677ef97..3aa199a10274 100644 --- a/examples/xgboost-comprehensive/client_utils.py +++ b/examples/xgboost-quickstart/xgboost_quickstart/client_app.py @@ -1,22 +1,29 @@ -from logging import INFO -import xgboost as xgb +"""xgboost_quickstart: A Flower / XGBoost app.""" + +import warnings -import flwr as fl -from flwr.common.logger import log +from flwr.common.context import Context + +import xgboost as xgb +from flwr.client import Client, ClientApp +from flwr.common.config import unflatten_dict from flwr.common import ( Code, EvaluateIns, EvaluateRes, FitIns, FitRes, - GetParametersIns, - GetParametersRes, Parameters, Status, ) +from xgboost_quickstart.task import load_data, replace_keys -class XgbClient(fl.client.Client): +warnings.filterwarnings("ignore", category=UserWarning) + + +# Define Flower Client and client_fn +class FlowerClient(Client): def __init__( self, train_dmatrix, @@ -25,7 +32,6 @@ def __init__( num_val, num_local_round, params, - train_method, ): self.train_dmatrix = train_dmatrix self.valid_dmatrix = valid_dmatrix @@ -33,17 +39,6 @@ def __init__( self.num_val = num_val self.num_local_round = num_local_round self.params = params - self.train_method = train_method - - def get_parameters(self, ins: GetParametersIns) -> GetParametersRes: - _ = (self, ins) - return GetParametersRes( - status=Status( - code=Code.OK, - message="OK", - ), - parameters=Parameters(tensor_type="", tensors=[]), - ) def _local_boost(self, bst_input): # Update trees based on local training data. @@ -51,15 +46,10 @@ def _local_boost(self, bst_input): bst_input.update(self.train_dmatrix, bst_input.num_boosted_rounds()) # Bagging: extract the last N=num_local_round trees for sever aggregation - # Cyclic: return the entire model - bst = ( - bst_input[ - bst_input.num_boosted_rounds() - - self.num_local_round : bst_input.num_boosted_rounds() - ] - if self.train_method == "bagging" - else bst_input - ) + bst = bst_input[ + bst_input.num_boosted_rounds() + - self.num_local_round : bst_input.num_boosted_rounds() + ] return bst @@ -75,8 +65,7 @@ def fit(self, ins: FitIns) -> FitRes: ) else: bst = xgb.Booster(params=self.params) - for item in ins.parameters.tensors: - global_model = bytearray(item) + global_model = bytearray(ins.parameters.tensors[0]) # Load global model into booster bst.load_model(global_model) @@ -101,8 +90,7 @@ def fit(self, ins: FitIns) -> FitRes: def evaluate(self, ins: EvaluateIns) -> EvaluateRes: # Load global model bst = xgb.Booster(params=self.params) - for para in ins.parameters.tensors: - para_b = bytearray(para) + para_b = bytearray(ins.parameters.tensors[0]) bst.load_model(para_b) # Run evaluation @@ -112,9 +100,6 @@ def evaluate(self, ins: EvaluateIns) -> EvaluateRes: ) auc = round(float(eval_results.split("\t")[1].split(":")[1]), 4) - global_round = ins.config["global_round"] - log(INFO, f"AUC = {auc} at round {global_round}") - return EvaluateRes( status=Status( code=Code.OK, @@ -124,3 +109,31 @@ def evaluate(self, ins: EvaluateIns) -> EvaluateRes: num_examples=self.num_val, metrics={"AUC": auc}, ) + + +def client_fn(context: Context): + # Load model and data + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + train_dmatrix, valid_dmatrix, num_train, num_val = load_data( + partition_id, num_partitions + ) + + cfg = replace_keys(unflatten_dict(context.run_config)) + num_local_round = cfg["local_epochs"] + + # Return Client instance + return FlowerClient( + train_dmatrix, + valid_dmatrix, + num_train, + num_val, + num_local_round, + cfg["params"], + ) + + +# Flower ClientApp +app = ClientApp( + client_fn, +) diff --git a/examples/xgboost-quickstart/xgboost_quickstart/server_app.py b/examples/xgboost-quickstart/xgboost_quickstart/server_app.py new file mode 100644 index 000000000000..6b81c6caa785 --- /dev/null +++ b/examples/xgboost-quickstart/xgboost_quickstart/server_app.py @@ -0,0 +1,54 @@ +"""xgboost_quickstart: A Flower / XGBoost app.""" + +from typing import Dict + +from flwr.common import Context, Parameters +from flwr.server import ServerApp, ServerAppComponents, ServerConfig +from flwr.server.strategy import FedXgbBagging + + +def evaluate_metrics_aggregation(eval_metrics): + """Return an aggregated metric (AUC) for evaluation.""" + total_num = sum([num for num, _ in eval_metrics]) + auc_aggregated = ( + sum([metrics["AUC"] * num for num, metrics in eval_metrics]) / total_num + ) + metrics_aggregated = {"AUC": auc_aggregated} + return metrics_aggregated + + +def config_func(rnd: int) -> Dict[str, str]: + """Return a configuration with global epochs.""" + config = { + "global_round": str(rnd), + } + return config + + +def server_fn(context: Context): + # Read from config + num_rounds = context.run_config["num-server-rounds"] + fraction_fit = context.run_config["fraction-fit"] + fraction_evaluate = context.run_config["fraction-evaluate"] + + # Init an empty Parameter + parameters = Parameters(tensor_type="", tensors=[]) + + # Define strategy + strategy = FedXgbBagging( + fraction_fit=fraction_fit, + fraction_evaluate=fraction_evaluate, + evaluate_metrics_aggregation_fn=evaluate_metrics_aggregation, + on_evaluate_config_fn=config_func, + on_fit_config_fn=config_func, + initial_parameters=parameters, + ) + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(strategy=strategy, config=config) + + +# Create ServerApp +app = ServerApp( + server_fn=server_fn, +) diff --git a/examples/xgboost-quickstart/xgboost_quickstart/task.py b/examples/xgboost-quickstart/xgboost_quickstart/task.py new file mode 100644 index 000000000000..09916d9ac04a --- /dev/null +++ b/examples/xgboost-quickstart/xgboost_quickstart/task.py @@ -0,0 +1,71 @@ +"""xgboost_quickstart: A Flower / XGBoost app.""" + +from logging import INFO + +import xgboost as xgb +from flwr.common import log +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner + + +def train_test_split(partition, test_fraction, seed): + """Split the data into train and validation set given split rate.""" + train_test = partition.train_test_split(test_size=test_fraction, seed=seed) + partition_train = train_test["train"] + partition_test = train_test["test"] + + num_train = len(partition_train) + num_test = len(partition_test) + + return partition_train, partition_test, num_train, num_test + + +def transform_dataset_to_dmatrix(data): + """Transform dataset to DMatrix format for xgboost.""" + x = data["inputs"] + y = data["label"] + new_data = xgb.DMatrix(x, label=y) + return new_data + + +fds = None # Cache FederatedDataset + + +def load_data(partition_id, num_clients): + """Load partition HIGGS data.""" + # Only initialize `FederatedDataset` once + global fds + if fds is None: + partitioner = IidPartitioner(num_partitions=num_clients) + fds = FederatedDataset( + dataset="jxie/higgs", + partitioners={"train": partitioner}, + ) + + # Load the partition for this `partition_id` + partition = fds.load_partition(partition_id, split="train") + partition.set_format("numpy") + + # Train/test splitting + train_data, valid_data, num_train, num_val = train_test_split( + partition, test_fraction=0.2, seed=42 + ) + + # Reformat data to DMatrix for xgboost + log(INFO, "Reformatting data...") + train_dmatrix = transform_dataset_to_dmatrix(train_data) + valid_dmatrix = transform_dataset_to_dmatrix(valid_data) + + return train_dmatrix, valid_dmatrix, num_train, num_val + + +def replace_keys(input_dict, match="-", target="_"): + """Recursively replace match string with target string in dictionary keys.""" + new_dict = {} + for key, value in input_dict.items(): + new_key = key.replace(match, target) + if isinstance(value, dict): + new_dict[new_key] = replace_keys(value, match, target) + else: + new_dict[new_key] = value + return new_dict diff --git a/glossary/aggregation.mdx b/glossary/aggregation.mdx new file mode 100644 index 000000000000..82cadd6948bb --- /dev/null +++ b/glossary/aggregation.mdx @@ -0,0 +1,18 @@ +--- +title: "Aggregation" +description: "Combine model weights from sampled clients to update the global model. This process enables the global model to learn from each client's data." +date: "2024-05-23" +author: + name: "Charles Beauville" + position: "Machine Learning Engineer" + website: "https://www.linkedin.com/in/charles-beauville/" + github: "github.com/charlesbvll" +related: + - text: "Federated Learning" + link: "/glossary/federated-learning" + - text: "Tutorial: What is Federated Learning?" + link: "/docs/framework/tutorial-series-what-is-federated-learning.html" +--- + +During each Federated Learning round, the server will receive model weights from sampled clients and needs a function to improve its global model using those weights. This is what is called `aggregation`. It can be a simple weighted average function (like `FedAvg`), or can be more complex (e.g. incorporating optimization techniques). The aggregation is where FL's magic happens, it allows the global model to learn and improve from each client's particular data distribution with only their trained weights. + diff --git a/glossary/client.mdx b/glossary/client.mdx new file mode 100644 index 000000000000..52b14f124add --- /dev/null +++ b/glossary/client.mdx @@ -0,0 +1,17 @@ +--- +title: "Client" +description: "A client is any machine with local data that connects to a server, trains on received global model weights, and sends back updated weights. Clients may also evaluate global model weights." +date: "2024-05-23" +author: + name: "Charles Beauville" + position: "Machine Learning Engineer" + website: "https://www.linkedin.com/in/charles-beauville/" + github: "github.com/charlesbvll" +related: + - text: "Federated Learning" + link: "/glossary/federated-learning" + - text: "Tutorial: What is Federated Learning?" + link: "/docs/framework/tutorial-series-what-is-federated-learning.html" +--- + +Any machine with access to some data that connects to a server to perform Federated Learning. During each round of FL (if it is sampled), it will receive global model weights from the server, train on the data they have access to, and send the resulting trained weights back to the server. Clients can also be sampled to evaluate the global server weights on the data they have access to, this is called federated evaluation. diff --git a/glossary/docker.mdx b/glossary/docker.mdx new file mode 100644 index 000000000000..9ca079b90f06 --- /dev/null +++ b/glossary/docker.mdx @@ -0,0 +1,22 @@ +--- +title: "Docker" +description: "Docker is a containerization tool that allows for consistent and reliable deployment of applications across different environments." +date: "2024-07-08" +author: + name: "Robert Steiner" + position: "DevOps Engineer at Flower Labs" + website: "https://github.com/Robert-Steiner" +--- + +Docker is an open-source containerization tool for deploying and running applications. Docker +containers encapsulate an application's code, dependencies, and configuration files, allowing +for consistent and reliable deployment across different environments. + +In the context of federated learning, Docker containers can be used to package the entire client +and server application, including all the necessary dependencies, and then deployed on various +devices such as edge devices, cloud servers, or even on-premises servers. + +In Flower, Docker containers are used to containerize various applications like `SuperLink`, +`SuperNode`, and `SuperExec`. Flower's Docker images allow users to quickly get Flower up and +running, reducing the time and effort required to set up and configure the necessary software +and dependencies. diff --git a/glossary/edge-computing.mdx b/glossary/edge-computing.mdx new file mode 100644 index 000000000000..6499a48e8f07 --- /dev/null +++ b/glossary/edge-computing.mdx @@ -0,0 +1,40 @@ +--- +title: "Edge Computing" +description: "Edge computing is a distributed computing concept of bringing compute and data storage as close as possible to the source of data generation and consumption by users." +date: "2024-09-10" +author: + name: "Chong Shen Ng" + position: "Research Engineer @ Flower Labs" + website: "https://discuss.flower.ai/u/chongshenng" + github: "github.com/chongshenng" +related: + - text: "IoT" + link: "/glossary/iot" + - text: "Run Flower using Docker" + link: "/docs/framework/docker/index.html" + - text: "Flower Clients in C++" + link: "/docs/examples/quickstart-cpp.html" + - text: "Federated Learning on Embedded Devices with Flower" + link: "/docs/examples/embedded-devices.html" +--- + +### Introduction to Edge Computing + +Edge computing is a distributed computing concept of bringing compute and data storage as close as possible to the source of data generation and consumption by users. By performing computation close to the data source, edge computing aims to address limitations typically encountered in centralized computing, such as bandwidth, latency, privacy, and autonomy. + +Edge computing works alongside cloud and fog computing, but each serves different purposes. Cloud computing delivers on-demand resources like data storage, servers, analytics, and networking via the Internet. Fog computing, however, brings computing closer to devices by distributing communication and computation across clusters of IoT or edge devices. While edge computing is sometimes used interchangeably with fog computing, edge computing specifically handles data processing directly at or near the devices themselves, whereas fog computing distributes tasks across multiple nodes, bridging the gap between edge devices and the cloud. + +### Advantages and Use Cases of Edge Computing + +The key benefit of edge computing is that the volume of data moved is significantly reduced because computation runs directly on board the device on the acquired data. This reduces the amount of long-distance communication between machines, which improves latency and reduces transmissions costs. Examples of edge computing that benefit from offloading computation include: +1. Smart watches and fitness monitors that measure live health metrics. +2. Facial recognition and wake word detection on smartphones. +3. Real-time lane departure warning systems in road transport that detect lane lines using on-board videos and sensors. + +### Federated Learning in Edge Computing + +When deploying federated learning systems, edge computing is an important component to consider. Edge computing typically take the role of "clients" in federated learning. In a healthcare use case, servers in different hospitals can train models on their local data. In mobile computing, smartphones perform local training (and inference) on user data such as for next word prediction. + +### Edge Computing with Flower + +With the Flower framework, you can easily deploy federated learning workflows and maximise the use of edge computing resources. Flower provides the infrastructure to perform federated learning, federated evaluation, and federated analytics, all in a easy, scalable and secure way. Start with our tutorial on running Federated Learning on Embedded Devices (link [here](https://github.com/adap/flower/tree/main/examples/embedded-devices)), which shows you how to run Flower on NVidia Jetson devices and Raspberry Pis as your edge compute. diff --git a/glossary/evaluation.mdx b/glossary/evaluation.mdx new file mode 100644 index 000000000000..bf6b36cd0c4b --- /dev/null +++ b/glossary/evaluation.mdx @@ -0,0 +1,19 @@ +--- +title: "Evaluation" +description: "Evaluation measures how well the trained model performs by testing it on each client's local data, providing insights into its generalizability across varied data sources." +date: "2024-07-08" +author: + name: "Heng Pan" + position: "Research Scientist" + website: "https://discuss.flower.ai/u/pan-h/summary" + github: "github.com/panh99" +related: + - text: "Server" + link: "/glossary/server" + - text: "Client" + link: "/glossary/client" +--- + +Evaluation in machine learning is the process of assessing a model's performance on unseen data to determine its ability to generalize beyond the training set. This typically involves using a separate test set and various metrics like accuracy or F1-score to measure how well the model performs on new data, ensuring it isn't overfitting or underfitting. + +In federated learning, evaluation (or distributed evaluation) refers to the process of assessing a model's performance across multiple clients, such as devices or data centers. Each client evaluates the model locally using its own data and then sends the results to the server, which aggregates all the evaluation outcomes. This process allows for understanding how well the model generalizes to different data distributions without centralizing sensitive data. \ No newline at end of file diff --git a/glossary/federated-learning.mdx b/glossary/federated-learning.mdx new file mode 100644 index 000000000000..5f6b8a7f1732 --- /dev/null +++ b/glossary/federated-learning.mdx @@ -0,0 +1,14 @@ +--- +title: "Federated Learning" +description: "Federated Learning is a machine learning approach where model training occurs on decentralized devices, preserving data privacy and leveraging local computations." +date: "2024-05-23" +author: + name: "Julian Rußmeyer" + position: "UX/UI Designer" + website: "https://www.linkedin.com/in/julian-russmeyer/" +related: + - text: "Tutorial: What is Federated Learning?" + link: "/docs/framework/tutorial-series-what-is-federated-learning.html" +--- + +Federated learning is an approach to machine learning in which the model is trained on multiple decentralized devices or servers with local data samples without exchanging them. Instead of sending raw data to a central server, updates to the model are calculated locally and only the model parameters are aggregated centrally. In this way, user privacy is maintained and communication costs are reduced, while collaborative model training is enabled. diff --git a/glossary/flower-datasets.mdx b/glossary/flower-datasets.mdx new file mode 100644 index 000000000000..24537dfe223b --- /dev/null +++ b/glossary/flower-datasets.mdx @@ -0,0 +1,27 @@ +--- +title: "Flower Datasets" +description: "Flower Datasets is a library that enables the creation of datasets for federated learning by partitioning centralized datasets to exhibit heterogeneity or using naturally partitioned datasets." +date: "2024-05-24" +author: + name: "Adam Narożniak" + position: "ML Engineer at Flower Labs" + website: "https://discuss.flower.ai/u/adam.narozniak/summary" +related: + - text: "Flower Datasets documentation" + link: "https://flower.ai/docs/datasets/" + - text: "Flower Datasets GitHub page" + link: "https://github.com/adap/flower/tree/main/datasets" +--- + +Flower Datasets is a library that enables the creation of datasets for federated learning/analytics/evaluation by partitioning centralized datasets to exhibit heterogeneity or using naturally partitioned datasets. It was created by the Flower Labs team, which also created Flower - a Friendly Federated Learning Framework. + +The key features include: +* downloading datasets (HuggingFace `datasets` are used under the hood), +* partitioning (simulate different levels of heterogeneity by using one of the implemented partitioning schemes or create your own), +* creating centralized datasets (easily utilize centralized versions of the datasets), +* reproducibility (repeat the experiments with the same results), +* visualization (display the created partitions), +* ML agnostic (easy integration with all popular ML frameworks). + + +It is a supplementary library to Flower, with which it integrates easily. diff --git a/glossary/grpc.mdx b/glossary/grpc.mdx new file mode 100644 index 000000000000..af58758d10bd --- /dev/null +++ b/glossary/grpc.mdx @@ -0,0 +1,44 @@ +--- +title: "gRPC" +description: "gRPC is an inter-process communication technology for building distributed apps. It allows developers to connect, invoke, operate, and debug apps as easily as making a local function call." +date: "2024-09-10" +author: + name: "Chong Shen Ng" + position: "Research Engineer @ Flower Labs" + website: "https://discuss.flower.ai/u/chongshenng" + github: "github.com/chongshenng" +related: + - text: "Federated Learning" + link: "/glossary/federated-learning" + - text: "Tutorial: What is Federated Learning?" + link: "/docs/framework/tutorial-series-what-is-federated-learning.html" + - text: "Protocol Buffers" + link: "/glossary/protocol-buffers" + - text: "Google: gRPC - A true internet scale RPC framework" + link: "https://cloud.google.com/blog/products/gcp/grpc-a-true-internet-scale-rpc-framework-is-now-1-and-ready-for-production-deployments" +--- + +### Introduction to gRPC + +gRPC is an inter-process communication technology for building distributed applications. It allows you to connect, invoke, operate, and debug these applications as easily as making a local function call. It can efficiently connect services in and across data centers. It is also applicable in the last mile of distributed computing to connect devices, mobile applications, and browsers to backend services. Supporting various languages like C++, Go, Java, and Python, and platforms like Android and the web, gRPC is a versatile framework for any environment. + +Google first [open-sourced gRPC in 2016](https://cloud.google.com/blog/products/gcp/grpc-a-true-internet-scale-rpc-framework-is-now-1-and-ready-for-production-deployments), basing it on their internal remote procedure call (RPC) framework, Stubby, designed to handle tens of billions of requests per second. Built on HTTP/2 and protocol buffers, gRPC is a popular high-performance framework for developers to built micro-services. Notable early adopters of gRPC include Square, Netflix, CockroachDB, Cisco, and Juniper Networks. + +By default, gRPC uses protocol buffers - Google's language-neutral and platform-neutral mechanism for efficiently serializing structured data - as its interface definition language and its underlying message interchange format. The recommended protocol buffer version as of writing is `proto3`, though other formats like JSON can also be used. + +### How does it work? + +gRPC operates similarly to many RPC systems. First, you specify the methods that can be called remotely on the server application, along with their parameters and return type. Then, with the appropriate code (more on this below), a gRPC client application can directly call these methods on the gRPC server application on a different machine as if it were a local object. Note that the definitions of client and server in gRPC is different to federated learning. For clarity, we will refer to client (server) applications in gRPC as gRPC client (server) applications. + +To use gRPC, follow these steps: +1. Define structure for the data you want to serialize in a proto file definition. `*.proto`. +2. Run the protocol buffer compiler `protoc` to generate to data access classes in the preferred language from the `*.proto` service definitions. This step generates the gRPC client and server code, as well as the regular protocol buffer code for handling your message types. +3. Use the generated class in your application to populate, serialize, and retrieve the class protocol buffer messages. + +### Use cases in Federated Learning + +There are several reasons why gRPC is particularly useful in federated learning. First, clients and server in a federation rely on stable and efficient communication. Using Protobuf, a highly efficient binary serialization format, gRPC overcomes the bandwidth limitations in federated learning, such as in low-bandwidth mobile connections. Second, gRPC’s language-independent communication allows developers to use a variety of programming languages, enabling broader adoption for on-device executions. + +### gRPC in Flower + +gRPC's benefits for distributed computing make it a natural choice for the Flower framework. Flower uses gRPC as its primary communication protocol. To make it easier to build your federated learning systems, we have introduced high-level APIs to take care of the serialization and deserialization of the model parameters, configurations, and metrics. For more details on how to use Flower, follow our "Get started with Flower" tutorial here. diff --git a/glossary/inference.mdx b/glossary/inference.mdx new file mode 100644 index 000000000000..06c93a834d2d --- /dev/null +++ b/glossary/inference.mdx @@ -0,0 +1,21 @@ +--- +title: "Inference" +description: "Inference is the phase in which a trained machine learning model applies its learned patterns to new, unseen data to make predictions or decisions." +date: "2024-07-12" +author: + name: "Yan Gao" + position: "Research Scientist" + website: "https://discuss.flower.ai/u/yan-gao/" + github: "github.com/yan-gao-GY" +related: + - text: "Federated Learning" + link: "/glossary/federated-learning" + - text: "Server" + link: "/glossary/server" + - text: "Client" + link: "/glossary/client" +--- + +Inference, also known as model prediction, is the stage in the machine learning workflow where a trained model is used to make predictions based on new, unseen data. In a typical machine learning setting, model inference involves the following steps: model loading, where the trained model is loaded into the application or service where it will be used; data preparation, which preprocess the new data in the same way as the training data; and model prediction, where the prepared data is fed into the model to compute outputs based on the learned patterns during training. + +In the context of federated learning (FL), inference can be performed locally on the user's device. A global model updated from FL process is deployed and loaded on individual nodes (e.g., smartphones, hospital servers) for local inference. This allows for keeping all data on-device, enhancing privacy and reducing latency. diff --git a/glossary/iot.mdx b/glossary/iot.mdx new file mode 100644 index 000000000000..ec1932c444f3 --- /dev/null +++ b/glossary/iot.mdx @@ -0,0 +1,48 @@ +--- +title: "IoT" +description: "The Internet of Things (IoT) refers to devices with sensors, software, and tech that connect and exchange data with other systems via the internet or communication networks." +date: "2024-09-10" +author: + name: "Chong Shen Ng" + position: "Research Engineer @ Flower Labs" + website: "https://discuss.flower.ai/u/chongshenng" + github: "github.com/chongshenng" +related: + - text: "Edge Computing" + link: "/glossary/edge-computing" + - text: "Run Flower using Docker" + link: "/docs/framework/docker/index.html" + - text: "Flower Clients in C++" + link: "/docs/examples/quickstart-cpp.html" + - text: "Federated Learning on Embedded Devices with Flower" + link: "/docs/examples/embedded-devices.html" + - text: "Cisco: Redefine Connectivity by Building a Network to Support the Internet of Things" + link: "https://www.cisco.com/c/en/us/solutions/service-provider/a-network-to-support-iot.html" +--- + +### Introduction to IoT + +The Internet of Things (IoT) describe devices with sensors, processing ability, software, and other technologies that connect and exchange data with other devices and systems over the Internet or other communications networks. IoT is often also referred as Machine-to-Machine (M2M) connections. Examples of IoT include embedded systems, wireless sensor networks, control systems, automation (home and building). In the consumer market, IoT technology is synonymous with smart home products. The IoT architecture bears resemblance to edge computing, but more broadly encompasses edge devices, gateways, and the cloud. + +### Use cases in Federated Learning + +From the perspective of federated learning, IoT systems provide two common configurations: first as a data source for training, and second as a point for running inference/analytics. + +Cisco's Global Cloud Index estimated that nearly 850 Zettabytes (ZB) of data will be generated by all people, machines and things in 2021 ([link](https://www.cisco.com/c/en/us/solutions/service-provider/a-network-to-support-iot.html) to article). In IoT, the data is different because not all of the data needs to be stored and instead, the most impactful business values come from running computations on the data. This positions IoT as an ideal candidate for implementing federated learning systems, where a model trained on a datastream from a single device may not be useful, but when trained collaboratively on hundreds or thousands of devices, yields a better performing and generalisable model. The key benefit is that the generated data remains local on the device and can even be offloaded after multiple rounds of federated learning. Some examples are presented below. + +Once a model is trained (e.g. in a federated way), the model can be put into production. What this means is to deploy the model on the IoT device and compute predictions based on the newly generated/acquired data. + +Federated learning in IoT can be organized on two axes: by industry and by use cases. + +For industry applications, examples include: +1. Healthcare - e.g. vital sign, activity levels, or sleep pattern monitoring using fitness trackers. +2. Transportation - e.g. trajectory prediction, object detection, driver drowsiness detection using on-board sensors and cameras. + +For use cases, examples include: +1. Predictive maintenance - e.g. using data acquired from physical sensors (impedence, temperature, vibration, pressure, viscosity, etc ...) +2. Anomaly detection - e.g. using environmental monitoring sensors for predicting air, noise, or water pollution, using internet network traffic data for network intrusion detection, using fiber optic sensors for remote sensing and monitoring, etc ... +3. Quality assurance and quality control - e.g. using in-line optical, acoustic, or sensor data during manufacturing processes to identify faulty products, etc ... + +### Using Flower for Federated Learning with IoT + +Flower is developed with a deployment engine that allows you to easily deploy your federated learning system on IoT devices. As a Data Scientist/ML Engineer, you will only need to write ClientApps and deploy them to IoT devices without needing to deal with the infrastructure and networking. To further help deployment, we provide [Docker images](https://hub.docker.com/u/flwr) for the SuperLink, SuperNode, and ServerApp so that you can easily ship the requirements of your Flower applications in containers in a production environment. Lastly, Flower supports the development of both Python and C++ clients, which provides developers with flexible ways of building ClientApps for resource-contrained devices. diff --git a/glossary/medical-ai.mdx b/glossary/medical-ai.mdx new file mode 100644 index 000000000000..d557f457c189 --- /dev/null +++ b/glossary/medical-ai.mdx @@ -0,0 +1,24 @@ +--- +title: "Medical AI" +description: "Medical AI involves the application of artificial intelligence technologies to healthcare, enhancing diagnosis, treatment planning, and patient monitoring by analyzing complex medical data." +date: "2024-07-12" +author: + name: "Yan Gao" + position: "Research Scientist" + website: "https://discuss.flower.ai/u/yan-gao/" + github: "github.com/yan-gao-GY" +related: + - text: "Federated Learning" + link: "/glossary/federated-learning" + - text: "Server" + link: "/glossary/server" + - text: "Client" + link: "/glossary/client" +--- + +Medical AI refers to the application of artificial intelligence technologies, particularly machine learning algorithms, to medical and healthcare-related fields. This includes, but is not limited to, tasks such as disease diagnosis, personalized treatment plans, drug development, medical imaging analysis, and healthcare management. The goal of Medical AI is to enhance healthcare services, improve treatment outcomes, reduce costs, and increase efficiency within healthcare systems. + +Federated learning (FL) introduces a novel approach to the training of machine learning models across multiple decentralized devices or servers holding local data samples, without exchanging them. This is particularly appropriate in the medical field due to the sensitive nature of medical data and strict privacy requirements. It leverages the strength of diverse datasets without compromising patient confidentiality, making it an increasingly popular choice in Medical AI applications. + +#### Medical AI in Flower +Flower, a friendly FL framework, is developing a more versatile and privacy-enhancing solution for Medical AI through the use of FL. Please check out [Flower industry healthcare](flower.ai/industry/healthcare) website for more detailed information. diff --git a/glossary/model-training.mdx b/glossary/model-training.mdx new file mode 100644 index 000000000000..ba5923962f1b --- /dev/null +++ b/glossary/model-training.mdx @@ -0,0 +1,24 @@ +--- +title: "Model Training" +description: "Model training is the process of teaching an algorithm to learn from data to make predictions or decisions." +date: "2024-07-12" +author: + name: "Yan Gao" + position: "Research Scientist" + website: "https://discuss.flower.ai/u/yan-gao/" + github: "github.com/yan-gao-GY" +related: + - text: "Federated Learning" + link: "/glossary/federated-learning" + - text: "Server" + link: "/glossary/server" + - text: "Client" + link: "/glossary/client" +--- + +Model training is a core component of developing machine learning (ML) systems, where an algorithm learns from data to make predictions or decisions. A typical model training process involves several key steps: dataset preparation, feature selection and engineering, choice of model based on the task (e.g., classification, regression), choice of training algorithm (e.g. optimizer), and model iteration for updating its weights and biases to minimize the loss function, which measures the difference between the predicted and actual outcomes on the training data. The traditional ML model training process typically involves considerable manual effort, whereas deep learning (DL) offers an end-to-end automated process. + +This approach assumes easy access to data and often requires substantial computational resources, depending on the size of the dataset and complexity of the model. However, large amounts of the data in the real world is distributed and protected due to privacy concerns, making it inaccessible for typical (centralized) model training. Federated learning (FL) migrates the model training from data center to local user ends. After local training, each participant sends only their model's updates (not the data) to a central server for aggregation. The updated global model is sent back to the participants for further rounds of local training and updates. This way, the model training benefits from diverse, real-world data without compromising individual data privacy. + +#### Model training in Flower +Flower, a friendly FL framework, offers a wealth of model training examples and baselines tailored for federated environments. Please refer to the [examples](https://flower.ai/docs/examples/) and [baselines](https://flower.ai/docs/baselines/) documentation for more detailed information. diff --git a/glossary/platform-independence.mdx b/glossary/platform-independence.mdx new file mode 100644 index 000000000000..9582fae057ff --- /dev/null +++ b/glossary/platform-independence.mdx @@ -0,0 +1,19 @@ +--- +title: "Platform Independence" +description: "The capability to run program across different hardware and operating systems." +date: "2024-07-08" +author: + name: "Heng Pan" + position: "Research Scientist" + website: "https://discuss.flower.ai/u/pan-h/summary" + github: "github.com/panh99" +related: + - text: "Federated Learning" + link: "/glossary/federated-learning" +--- + +Platform independence in federated learning refers to the capability of machine learning systems to operate seamlessly across various hardware and operating system environments. This ensures that the federated learning process can function effectively on various devices with different operating systems such as Windows, Linux, Mac OS, iOS, and Android without requiring platform-specific modifications. By achieving platform independence, federated learning frameworks enable efficient data analysis and model training across heterogeneous edge devices, enhancing scalability and flexibility in distributed machine learning scenarios. + +### Platform Independence in Flower + +Flower is interoperable with different operating systems and hardware platforms to work well in heterogeneous edge device environments. \ No newline at end of file diff --git a/glossary/protocol-buffers.mdx b/glossary/protocol-buffers.mdx new file mode 100644 index 000000000000..7e9bf6c7bbc7 --- /dev/null +++ b/glossary/protocol-buffers.mdx @@ -0,0 +1,31 @@ +--- +title: "Protocol Buffers" +description: "Protocol Buffers, often abbreviated as Protobuf, are a language-neutral, platform-neutral, extensible mechanism for serializing structured data, similar to XML but smaller, faster, and simpler." +date: "2024-05-24" +author: + name: "Taner Topal" + position: "Co-Creator and CTO @ Flower Labs" + website: "https://www.linkedin.com/in/tanertopal/" + github: "github.com/tanertopal" +related: + - text: "Federated Learning" + link: "/glossary/federated-learning" + - text: "Tutorial: What is Federated Learning?" + link: "/docs/framework/tutorial-series-what-is-federated-learning.html" +--- + +### Introduction to Protocol Buffers + +Protocol Buffers, often abbreviated as Protobuf, are a language-neutral, platform-neutral, extensible mechanism for serializing structured data, similar to XML but smaller, faster, and simpler. The method involves defining how you want your data to be structured once, then using language specific generated source code to write and read structured data to and from a variety of data streams. + +### How Protocol Buffers Work + +Protocol Buffers require a `.proto` file where the data structure (the messages) is defined. This is essentially a schema describing the data to be serialized. Once the `.proto` file is prepared, it is compiled using the Protobuf compiler (`protoc`), which generates data access classes in supported languages like Java, C++, Python, Swift, Kotlin, and more. These classes provide simple accessors for each field (like standard getters and setters) and methods to serialize the entire structure to a binary format that can be easily transmitted over network protocols or written to a file. + +### Advantages and Use Cases + +The primary advantages of Protocol Buffers include their simplicity, efficiency, and backward compatibility. They are more efficient than XML or JSON as they serialize to a binary format, which makes them both smaller and faster. They support backward compatibility, allowing to modify data structures without breaking deployed programs that are communicating using the protocol. This makes Protobuf an excellent choice for data storage or RPC (Remote Procedure Call) applications where small size, low latency, and schema evolution are critical. + +### Protocol Buffers in Flower + +In the context of Flower, Protocol Buffers play a crucial role in ensuring efficient and reliable communication between the server and clients. Federated learning involves heterogeneous clients (e.g., servers, mobile devices, edge devices) running different environments and programming languages. This setup requires frequent exchanges of model updates and other metadata between the server and clients. Protocol Buffers, with their efficient binary serialization, enable Flower to handle these exchanges with minimal overhead, ensuring low latency and reducing the bandwidth required for communication. Moreover, the backward compatibility feature of Protobuf allows Flower to evolve and update its communication protocols without disrupting existing deployments. Best of all, Flower users typically do not have to deal directly with Protobuf, as Flower provides language-specific abstractions that simplify interaction with the underlying communication protocols. diff --git a/glossary/scalability.mdx b/glossary/scalability.mdx new file mode 100644 index 000000000000..4bfb736ff08c --- /dev/null +++ b/glossary/scalability.mdx @@ -0,0 +1,22 @@ +--- +title: "Scalability" +description: "Scalability ensures systems grow with demand. In Federated Learning, it involves efficiently managing dynamic clients and diverse devices. Flower supports large-scale FL on various devices/ resources." +date: "2024-05-23" +author: + name: "Daniel Nata Nugraha" + position: "Software Engineer" + image: "daniel_nata_nugraha.png" + website: "https://www.linkedin.com/in/daniel-nugraha/" + github: "github.com/danielnugraha" +related: + - text: "Flower Paper" + link: "https://arxiv.org/pdf/2007.14390" + - text: "Federated Learning" + link: "/glossary/federated-learning" + - text: "Tutorial: What is Federated Learning?" + link: "/docs/framework/tutorial-series-what-is-federated-learning.html" +--- + +Scalability is the ability of a system, network, or process to accommodate an increasing amount of work. This involves adding resources (like servers) or optimizing existing ones to maintain or enhance performance. There are two main types of scalability: horizontal scalability (adding more nodes, such as servers) and vertical scalability (adding more power to existing nodes, like increasing CPU or RAM). Ideally, a scalable system can do both, seamlessly adapting to increased demands without significant downtime. Scalability is essential for businesses to grow while ensuring services remain reliable and responsive. +Scalability in Federated Learning involves managing dynamic client participation, as clients may join or leave unpredictably. This requires algorithms that adapt to varying availability and efficiently aggregate updates from numerous models. Additionally, scalable federated learning systems must handle heterogeneous client devices with different processing powers, network conditions, and data distributions, ensuring balanced contributions to the global model. +Scalability in Flower means efficiently conducting large-scale federated learning (FL) training and evaluation. Flower enables researchers to launch FL experiments with many clients using reasonable computing resources, such as a single machine or a multi-GPU rack. Flower supports scaling workloads to millions of clients, including diverse devices like Raspberry Pis, Android and iOS mobile devices, laptops, etc. It offers complete control over connection management and includes a virtual client engine for large-scale simulations. diff --git a/glossary/server.mdx b/glossary/server.mdx new file mode 100644 index 000000000000..efc25a227791 --- /dev/null +++ b/glossary/server.mdx @@ -0,0 +1,17 @@ +--- +title: "Server" +description: "The central entity coordinating the aggregation of local model updates from multiple clients to build a comprehensive, privacy-preserving global model." +date: "2024-07-08" +author: + name: "Heng Pan" + position: "Research Scientist" + website: "https://discuss.flower.ai/u/pan-h/summary" + github: "github.com/panh99" +related: + - text: "Client" + link: "/glossary/client" + - text: "Federated Learning" + link: "/glossary/federated-learning" +--- + +A server in federated learning plays a pivotal role by managing the distributed training process across various clients. Each client independently trains its local model using the local data and then sends the model updates to the server. The server aggregates the received updates to create a new global model, which is subsequently sent back to the clients. This iterative process allows the global model to improve over time without the need for the clients to share their raw data, ensuring data privacy and minimizing data transfer. \ No newline at end of file diff --git a/glossary/xgboost.mdx b/glossary/xgboost.mdx new file mode 100644 index 000000000000..51b5a2912e0b --- /dev/null +++ b/glossary/xgboost.mdx @@ -0,0 +1,34 @@ +--- +title: "XGBoost" +description: "XGBoost - or eXtreme Gradient Boosting - is an open-source library providing a regularizing gradient boosting decisiong tree framework for many programming languages including Python, C++, and Java." +date: "2024-09-10" +author: + name: "Chong Shen Ng" + position: "Research Engineer @ Flower Labs" + website: "https://discuss.flower.ai/u/chongshenng" + github: "github.com/chongshenng" +related: + - text: "Quickstart Federated Learning with XGBoost and Flower" + link: "/docs/framework/tutorial-quickstart-xgboost.html" + - text: "Flower Example using XGBoost (Comprehensive)" + link: "/docs/examples/xgboost-comprehensive.html" +--- + +### Introduction to XGBoost + +XGBoost - or eXtreme Gradient Boosting - is an open-source library which provides a regularizing gradient boosting framework for Python, C++, Java, R, Julia, Perl, and Scala. It implements machine learning algorithms based on the gradient boosting concept, where a single model is created from an ensemble of weak learners (decision trees). This is commonly referred as a Gradient Boosting Decision Trees (GBDT), a decision tree ensemble learning algorithm. + +GBDTs are commonly compared with the random forest algorithm. They are similar in the sense that they build multiple decision trees. But the key differences are in how they are built and combined. Random forest first builds full decision trees in parallel from bootstrap samples of the dataset, and then generates the final prediction based on an average of all of the predictions. In contrast, GBDT iteratively trains decision trees with the objective that each subsequent tree reduces the error residuals of the previous model - this is the concept of boosting. The final prediction in a GBDT is a weighted sum of all of the tree predictions. While the bootstrap aggregation method of random forest minimizes variance and overfitting, the boosting method of GBDT minimizes bias and underfitting. + +XGBoost includes many features that optimizes the implementation of GBDT, including parallelized trees training (instead of sequential) and integration with distributed processing frameworks like Apache Spark and Dask. These various performance improvements have historically made XGBoost the preferred framework of choice when training models for supervised learning tasks, and have seen widespread success in Kaggle competitions on structured data. + +### Use cases in Federated Learning + +While there is no way to know before hand what model would perform the best in federated learning, XGBoost is appealing for several reasons: +1. To train the first model, XGBoost hyperparameters require significantly less tuning compared to neural network-based models. +2. XGBoost is known to produce models that perform far better than neural networks on tabular datasets, which can be encountered in real-world federated learning systems such as in healthcare or IoT applications. +3. Feature scaling is unnecessary when training XGBoost models. This not only facilitates fine-tuning on new data distributions, but also supports cross-device and cross-silo federated learning, where the data distributions from participating clients are not know a priori. + +### XGBoost in Flower + +In Flower, we have provided two strategies for performing federated learning with XGBoost: [`FedXgbBagging`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedxgb_bagging.py) and [`FedXgbCyclic`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedxgb_cyclic.py), which are inspired from the work at Nvidia NVFlare. These implementations allow Flower users to use different aggregation strategies for the XGBoost model. `FedXgbBagging` aggregates trees from all participating clients every round, whereas `FedXgbCyclic` aggregates clients' trees sequentially in a round-robin manner. With these strategies, Flower users can very quickly and easily run and compare the performance of federated learning systems on distributed tabular datasets using state-of-the-art XGBoost aggregation strategies, without needing to implement them from scratch. diff --git a/pyproject.toml b/pyproject.toml index a045987367f6..4b32908c8f51 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "flwr" -version = "1.9.0" +version = "1.13.0" description = "Flower: A Friendly Federated Learning Framework" license = "Apache-2.0" authors = ["The Flower Authors "] @@ -30,7 +30,6 @@ classifiers = [ "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", - "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", @@ -44,37 +43,38 @@ classifiers = [ "Topic :: Software Development :: Libraries :: Python Modules", "Typing :: Typed", ] -packages = [ - { include = "flwr", from = "src/py" }, -] -exclude = [ - "src/py/**/*_test.py", -] +packages = [{ include = "flwr", from = "src/py" }] +exclude = ["src/py/**/*_test.py"] [tool.poetry.scripts] +# `flwr` CLI flwr = "flwr.cli.app:app" -flower-driver-api = "flwr.server:run_driver_api" -flower-fleet-api = "flwr.server:run_fleet_api" -flower-superlink = "flwr.server:run_superlink" -flower-supernode = "flwr.client:run_supernode" -flower-client-app = "flwr.client:run_client_app" -flower-server-app = "flwr.server:run_server_app" +# SuperExec (can run with either Deployment Engine or Simulation Engine) +flower-superexec = "flwr.superexec.app:run_superexec" +# Simulation Engine flower-simulation = "flwr.simulation.run_simulation:run_simulation_from_cli" +# Deployment Engine +flower-superlink = "flwr.server.app:run_superlink" +flower-supernode = "flwr.client.supernode.app:run_supernode" +flower-server-app = "flwr.server.run_serverapp:run_server_app" +flwr-clientapp = "flwr.client.clientapp:flwr_clientapp" +flower-client-app = "flwr.client.supernode:run_client_app" # Deprecated [tool.poetry.dependencies] -python = "^3.8" +python = "^3.9" # Mandatory dependencies numpy = "^1.21.0" -grpcio = "^1.60.0" +grpcio = "^1.60.0,!=1.64.2,!=1.65.1,!=1.65.2,!=1.65.4,!=1.65.5,!=1.66.0,!=1.66.1" protobuf = "^4.25.2" cryptography = "^42.0.4" pycryptodome = "^3.18.0" iterators = "^0.0.2" -typer = { version = "^0.9.0", extras=["all"] } +typer = "^0.12.5" tomli = "^2.0.1" +tomli-w = "^1.0.0" pathspec = "^0.12.1" # Optional dependencies (Simulation Engine) -ray = { version = "==2.10.0", optional = true, python = ">=3.8,<3.12" } +ray = { version = "==2.10.0", optional = true, python = ">=3.9,<3.12" } # Optional dependencies (REST transport layer) requests = { version = "^2.31.0", optional = true } starlette = { version = "^0.31.0", optional = true } @@ -92,10 +92,12 @@ types-setuptools = "==69.0.0.20240125" clang-format = "==17.0.6" isort = "==5.13.2" black = { version = "==24.2.0", extras = ["jupyter"] } +taplo = "==0.9.3" docformatter = "==1.7.5" mypy = "==1.8.0" -pylint = "==3.0.3" +pylint = "==3.3.1" flake8 = "==5.0.4" +parameterized = "==0.9.0" pytest = "==7.4.4" pytest-cov = "==4.1.0" pytest-watcher = "==0.4.1" @@ -104,44 +106,51 @@ mypy-protobuf = "==3.2.0" jupyterlab = "==4.0.12" rope = "==1.11.0" semver = "==3.0.2" -sphinx = "==6.2.1" -sphinx-intl = "==2.1.0" -myst-parser = "==1.0.0" -sphinx-design = "==0.5.0" +sphinx = "==7.4.7" +sphinx-intl = "==2.2.0" +sphinx-click = "==6.0.0" +myst-parser = "==3.0.1" +sphinx-design = "==0.6.1" sphinx-copybutton = "==0.5.2" sphinxcontrib-mermaid = "==0.9.2" sphinxcontrib-youtube = "==1.4.1" -furo = "==2023.9.10" -sphinx-reredirects = "==0.1.3" -nbsphinx = "==0.9.3" +furo = "==2024.8.6" +sphinx-reredirects = "==0.1.5" +nbsphinx = "==0.9.5" nbstripout = "==0.6.1" ruff = "==0.1.9" sphinx-argparse = "==0.4.0" pipreqs = "==0.4.13" -mdformat-gfm = "==0.3.5" +mdformat-gfm = "==0.3.6" mdformat-frontmatter = "==2.0.1" mdformat-beautysh = "==0.1.1" -mdformat-myst = "==0.1.5" -twine = "==4.0.2" +twine = "==5.1.1" pyroma = "==4.2" check-wheel-contents = "==0.4.0" GitPython = "==3.1.32" PyGithub = "==2.1.1" licensecheck = "==2024" pre-commit = "==3.5.0" +sphinx-substitution-extensions = "2022.02.16" +sphinxext-opengraph = "==0.9.1" +docstrfmt = { git = "https://github.com/charlesbvll/docstrfmt.git", branch = "patch-1" } +docsig = "==0.64.0" + +[tool.docstrfmt] +extend_exclude = [ + "doc/source/conf.py", + "doc/source/tutorial-quickstart-huggingface.rst", + "doc/source/_templates/autosummary/*", + "doc/source/ref-api/*", +] [tool.isort] -line_length = 88 -indent = " " -multi_line_output = 3 -include_trailing_comma = true -force_grid_wrap = 0 -use_parentheses = true +profile = "black" known_first_party = ["flwr", "flwr_tool"] [tool.black] line-length = 88 -target-version = ["py38", "py39", "py310", "py311"] +target-version = ["py39", "py310", "py311"] [tool.pylint."MESSAGES CONTROL"] disable = "duplicate-code,too-few-public-methods,useless-import-alias" @@ -149,10 +158,7 @@ disable = "duplicate-code,too-few-public-methods,useless-import-alias" [tool.pytest.ini_options] minversion = "6.2" addopts = "-qq" -testpaths = [ - "src/py/flwr", - "src/py/flwr_tool", -] +testpaths = ["src/py/flwr", "src/py/flwr_tool"] filterwarnings = "ignore::DeprecationWarning" [tool.pytest-watcher] @@ -165,17 +171,12 @@ patterns = ["*.py"] ignore_patterns = [] [tool.mypy] -plugins = [ - "numpy.typing.mypy_plugin", -] +plugins = ["numpy.typing.mypy_plugin"] ignore_missing_imports = true strict = true [[tool.mypy.overrides]] -module = [ - "importlib.metadata.*", - "importlib_metadata.*", -] +module = ["importlib.metadata.*", "importlib_metadata.*"] follow_imports = "skip" follow_imports_for_stubs = true disallow_untyped_calls = false @@ -190,7 +191,7 @@ wrap-summaries = 88 wrap-descriptions = 88 [tool.ruff] -target-version = "py38" +target-version = "py39" line-length = 88 select = ["D", "E", "F", "W", "B", "ISC", "C4", "UP"] fixable = ["D", "E", "F", "W", "B", "ISC", "C4", "UP"] @@ -224,3 +225,7 @@ convention = "numpy" [tool.ruff.per-file-ignores] "src/py/flwr/server/strategy/*.py" = ["E501"] + +[tool.docsig] +ignore-no-params = true +exclude = 'src/py/flwr/proto/.*|src/py/flwr/.*_test\.py|src/py/flwr/cli/new/templates/.*\.tpl' diff --git a/src/docker/base/README.md b/src/docker/base/README.md new file mode 100644 index 000000000000..ef290a26fec4 --- /dev/null +++ b/src/docker/base/README.md @@ -0,0 +1,54 @@ +# Flower Base + +

+ + Flower Website + +

+ +## Quick reference + +- **Learn more:**
+ [Quickstart with Docker](https://flower.ai/docs/framework/docker/tutorial-quickstart-docker.html) and [Quickstart with Docker Compose](https://flower.ai/docs/framework/docker/tutorial-quickstart-docker-compose.html) + +- **Where to get help:**
+ [Flower Discuss](https://discuss.flower.ai), [Slack](https://flower.ai/join-slack) or [GitHub](https://github.com/adap/flower) + +- **Supported architectures:**
+ `amd64`, `arm64v8` + +## Supported tags + +- `unstable` + - points to the last successful build of the `main` branch +- `nightly`, `.dev` e.g. `1.13.0.dev20241014` + - uses Python 3.11 and Ubuntu 24.04 +- `1.12.0-py3.11-alpine3.19` +- `1.12.0-py3.11-ubuntu24.04` +- `1.12.0-py3.10-ubuntu24.04` +- `1.12.0-py3.9-ubuntu24.04` +- `1.11.1-py3.11-alpine3.19` +- `1.11.1-py3.11-ubuntu22.04` +- `1.11.1-py3.10-ubuntu22.04` +- `1.11.1-py3.9-ubuntu22.04` +- `1.11.1-py3.8-ubuntu22.04` +- `1.11.0-py3.11-alpine3.19` +- `1.11.0-py3.11-ubuntu22.04` +- `1.11.0-py3.10-ubuntu22.04` +- `1.11.0-py3.9-ubuntu22.04` +- `1.11.0-py3.8-ubuntu22.04` +- `1.10.0-py3.11-alpine3.19` +- `1.10.0-py3.11-ubuntu22.04` +- `1.10.0-py3.10-ubuntu22.04` +- `1.10.0-py3.9-ubuntu22.04` +- `1.10.0-py3.8-ubuntu22.04` +- `1.9.0-py3.11-alpine3.19` +- `1.9.0-py3.11-ubuntu22.04` +- `1.9.0-py3.10-ubuntu22.04` +- `1.9.0-py3.9-ubuntu22.04` +- `1.9.0-py3.8-ubuntu22.04` +- `1.8.0-py3.11-alpine3.19` +- `1.8.0-py3.11-ubuntu22.04` +- `1.8.0-py3.10-ubuntu22.04` +- `1.8.0-py3.9-ubuntu22.04` +- `1.8.0-py3.8-ubuntu22.04` diff --git a/src/docker/base/alpine/Dockerfile b/src/docker/base/alpine/Dockerfile index 04864b525e2e..ee1e11b2d070 100644 --- a/src/docker/base/alpine/Dockerfile +++ b/src/docker/base/alpine/Dockerfile @@ -26,13 +26,15 @@ ARG PYTHON_VERSION=3.11 ARG DISTRO=alpine ARG DISTRO_VERSION=3.19 -FROM python:${PYTHON_VERSION}-${DISTRO}${DISTRO_VERSION} as compile +FROM python:${PYTHON_VERSION}-${DISTRO}${DISTRO_VERSION} AS compile # Install system dependencies RUN apk add --no-cache \ # require for compiling grpcio on ARM64 g++ \ libffi-dev \ + # required for installing flwr via git + git \ # create virtual env && python -m venv /python/venv @@ -42,21 +44,36 @@ ENV PATH=/python/venv/bin:$PATH # Install specific version of pip, setuptools and flwr ARG PIP_VERSION ARG SETUPTOOLS_VERSION -ARG FLWR_VERSION -ARG FLWR_PACKAGE=flwr RUN pip install -U --no-cache-dir \ pip==${PIP_VERSION} \ - setuptools==${SETUPTOOLS_VERSION} \ - ${FLWR_PACKAGE}==${FLWR_VERSION} + setuptools==${SETUPTOOLS_VERSION} + +ARG FLWR_VERSION +ARG FLWR_VERSION_REF +ARG FLWR_PACKAGE=flwr +# hadolint ignore=DL3013 +RUN if [ -z "${FLWR_VERSION_REF}" ]; then \ + pip install -U --no-cache-dir ${FLWR_PACKAGE}==${FLWR_VERSION}; \ + else \ + pip install -U --no-cache-dir ${FLWR_PACKAGE}@${FLWR_VERSION_REF}; \ + fi + +FROM python:${PYTHON_VERSION}-${DISTRO}${DISTRO_VERSION} AS base -FROM python:${PYTHON_VERSION}-${DISTRO}${DISTRO_VERSION} as base +# Keep the version of system Python pip and setuptools in sync with those installed in the +# virtualenv. +ARG PIP_VERSION +ARG SETUPTOOLS_VERSION +RUN pip install -U --no-cache-dir pip==${PIP_VERSION} setuptools==${SETUPTOOLS_VERSION} # required by the grpc package RUN apk add --no-cache \ libstdc++ \ + ca-certificates \ # add non-root user && adduser \ --no-create-home \ + --home /app \ --disabled-password \ --gecos "" \ --uid 49999 app \ diff --git a/src/docker/base/ubuntu/Dockerfile b/src/docker/base/ubuntu/Dockerfile index 4aeddc3f8d8d..b52599a80784 100644 --- a/src/docker/base/ubuntu/Dockerfile +++ b/src/docker/base/ubuntu/Dockerfile @@ -15,8 +15,8 @@ # hadolint global ignore=DL3008 ARG DISTRO=ubuntu -ARG DISTRO_VERSION=22.04 -FROM $DISTRO:$DISTRO_VERSION as python +ARG DISTRO_VERSION=24.04 +FROM $DISTRO:$DISTRO_VERSION AS python ENV DEBIAN_FRONTEND=noninteractive @@ -32,7 +32,7 @@ RUN apt-get update \ # Install PyEnv and Python ARG PYTHON_VERSION=3.11 ENV PYENV_ROOT=/root/.pyenv -ENV PATH $PYENV_ROOT/bin:$PATH +ENV PATH=$PYENV_ROOT/bin:$PATH # https://github.com/hadolint/hadolint/wiki/DL4006 SHELL ["/bin/bash", "-o", "pipefail", "-c"] RUN curl -L https://github.com/pyenv/pyenv-installer/raw/master/bin/pyenv-installer | bash @@ -48,18 +48,56 @@ RUN git clone https://github.com/pyenv/pyenv.git \ RUN LATEST=$(pyenv latest -k ${PYTHON_VERSION}) \ && python-build "${LATEST}" /usr/local/bin/python -FROM $DISTRO:$DISTRO_VERSION as base +ENV PATH=/usr/local/bin/python/bin:$PATH -ENV DEBIAN_FRONTEND=noninteractive +ARG PIP_VERSION +ARG SETUPTOOLS_VERSION +# Keep the version of system Python pip and setuptools in sync with those installed in the +# virtualenv. +RUN pip install -U --no-cache-dir pip==${PIP_VERSION} setuptools==${SETUPTOOLS_VERSION} \ + # Use a virtual environment to ensure that Python packages are installed in the same location + # regardless of whether the subsequent image build is run with the app or the root user + && python -m venv /python/venv +ENV PATH=/python/venv/bin:$PATH -RUN apt-get update \ +RUN pip install -U --no-cache-dir \ + pip==${PIP_VERSION} \ + setuptools==${SETUPTOOLS_VERSION} + +ARG FLWR_VERSION +ARG FLWR_VERSION_REF +ARG FLWR_PACKAGE=flwr +# hadolint ignore=DL3013 +RUN if [ -z "${FLWR_VERSION_REF}" ]; then \ + pip install -U --no-cache-dir ${FLWR_PACKAGE}==${FLWR_VERSION}; \ + else \ + pip install -U --no-cache-dir ${FLWR_PACKAGE}@${FLWR_VERSION_REF}; \ + fi + +FROM $DISTRO:$DISTRO_VERSION AS base + +COPY --from=python /usr/local/bin/python /usr/local/bin/python + +ENV DEBIAN_FRONTEND=noninteractive \ + PATH=/usr/local/bin/python/bin:$PATH + +RUN apt-get update \ && apt-get -y --no-install-recommends install \ libsqlite3-0 \ - && rm -rf /var/lib/apt/lists/* + ca-certificates \ + && rm -rf /var/lib/apt/lists/* \ + # add non-root user + && useradd \ + --no-create-home \ + --home-dir /app \ + -c "" \ + --uid 49999 app \ + && mkdir -p /app \ + && chown -R app:app /app -COPY --from=python /usr/local/bin/python /usr/local/bin/python +COPY --from=python --chown=app:app /python/venv /python/venv -ENV PATH=/usr/local/bin/python/bin:$PATH \ +ENV PATH=/python/venv/bin:$PATH \ # Send stdout and stderr stream directly to the terminal. Ensures that no # output is retained in a buffer if the application crashes. PYTHONUNBUFFERED=1 \ @@ -72,30 +110,6 @@ ENV PATH=/usr/local/bin/python/bin:$PATH \ LANG=C.UTF-8 \ LC_ALL=C.UTF-8 -# Use a virtual environment to ensure that Python packages are installed in the same location -# regardless of whether the subsequent image build is run with the app or the root user -RUN python -m venv /python/venv -ENV PATH=/python/venv/bin:$PATH - -ARG PIP_VERSION -ARG SETUPTOOLS_VERSION -ARG FLWR_VERSION -ARG FLWR_PACKAGE=flwr -RUN pip install -U --no-cache-dir \ - pip==${PIP_VERSION} \ - setuptools==${SETUPTOOLS_VERSION} \ - ${FLWR_PACKAGE}==${FLWR_VERSION} - -# add non-root user -RUN adduser \ - --no-create-home \ - --disabled-password \ - --gecos "" \ - --uid 49999 app \ - && mkdir -p /app \ - && chown -R app:app /python \ - && chown -R app:app /app - WORKDIR /app USER app ENV HOME=/app diff --git a/src/docker/clientapp/Dockerfile b/src/docker/clientapp/Dockerfile new file mode 100644 index 000000000000..0f5e2b1d81a1 --- /dev/null +++ b/src/docker/clientapp/Dockerfile @@ -0,0 +1,20 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +ARG BASE_REPOSITORY=flwr/base +ARG BASE_IMAGE +FROM $BASE_REPOSITORY:$BASE_IMAGE + +ENTRYPOINT ["flwr-clientapp"] diff --git a/src/docker/clientapp/README.md b/src/docker/clientapp/README.md new file mode 100644 index 000000000000..a610de66eeae --- /dev/null +++ b/src/docker/clientapp/README.md @@ -0,0 +1,36 @@ +# Flower ClientApp + +

+ + Flower Website + +

+ +## Quick reference + +- **Learn more:**
+ [Quickstart with Docker](https://flower.ai/docs/framework/docker/tutorial-quickstart-docker.html) and [Quickstart with Docker Compose](https://flower.ai/docs/framework/docker/tutorial-quickstart-docker-compose.html) + +- **Where to get help:**
+ [Flower Discuss](https://discuss.flower.ai), [Slack](https://flower.ai/join-slack) or [GitHub](https://github.com/adap/flower) + +- **Supported architectures:**
+ `amd64`, `arm64v8` + +## Supported tags + +- `unstable` + - points to the last successful build of the `main` branch +- `nightly`, `.dev` e.g. `1.13.0.dev20241014` + - uses Python 3.11 and Ubuntu 24.04 +- `1.12.0`, `1.12.0-py3.11-ubuntu24.04` +- `1.12.0-py3.10-ubuntu24.04` +- `1.12.0-py3.9-ubuntu24.04` +- `1.11.1`, `1.11.1-py3.11-ubuntu22.04` +- `1.11.1-py3.10-ubuntu22.04` +- `1.11.1-py3.9-ubuntu22.04` +- `1.11.1-py3.8-ubuntu22.04` +- `1.11.0`, `1.11.0-py3.11-ubuntu22.04` +- `1.11.0-py3.10-ubuntu22.04` +- `1.11.0-py3.9-ubuntu22.04` +- `1.11.0-py3.8-ubuntu22.04` diff --git a/src/docker/complete/.gitignore b/src/docker/complete/.gitignore new file mode 100644 index 000000000000..53a6b57b9b4d --- /dev/null +++ b/src/docker/complete/.gitignore @@ -0,0 +1,3 @@ +superexec-certificates +superlink-certificates +state diff --git a/src/docker/complete/certs.yml b/src/docker/complete/certs.yml new file mode 100644 index 000000000000..d7d938a2aa4a --- /dev/null +++ b/src/docker/complete/certs.yml @@ -0,0 +1,124 @@ +services: + gen-certs: + build: + context: . + pull: true + dockerfile_inline: | + FROM ubuntu:latest + + RUN apt-get update \ + && apt-get -y --no-install-recommends install \ + openssl + + WORKDIR /app/script + + ARG SUPERLINK_IP=127.0.0.1 + + COPY <<-EOF superlink-certificate.conf + [req] + default_bits = 4096 + prompt = no + default_md = sha256 + req_extensions = req_ext + distinguished_name = dn + + [dn] + C = US + O = Flower + CN = localhost + + [req_ext] + subjectAltName = @alt_names + + [alt_names] + DNS.0 = superlink + IP.1 = ::1 + IP.2 = $${SUPERLINK_IP} + EOF + + ARG SUPEREXEC_IP=127.0.0.1 + + COPY <<-EOF superexec-certificate.conf + [req] + default_bits = 4096 + prompt = no + default_md = sha256 + req_extensions = req_ext + distinguished_name = dn + + [dn] + C = US + O = Flower + CN = localhost + + [req_ext] + subjectAltName = @alt_names + + [alt_names] + DNS.0 = superexec + IP.1 = ::1 + IP.2 = $${SUPEREXEC_IP} + EOF + + COPY --chmod=744 <<-'EOF' generate.sh + #!/bin/bash + # This script will generate all certificates if ca.crt does not exist + + set -e + cd "$$( cd "$$( dirname "$${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/../ + + CA_PASSWORD=notsafe + + # Generate directories if not exists + + generate () { + mkdir -p "$$1" + + if [ -f ""$$1"/ca.crt" ]; then + echo "Skipping certificate generation as they already exist." + return 0 + fi + + # Generate the root certificate authority key and certificate based on key + openssl genrsa -out "$$1"/ca.key 4096 + openssl req \ + -new \ + -x509 \ + -key "$$1"/ca.key \ + -sha256 \ + -subj "/C=DE/ST=HH/O=CA, Inc." \ + -days 365 -out "$$1"/ca.crt + + # Generate a new private key for the server + openssl genrsa -out "$$1"/server.key 4096 + + # Create a signing CSR + openssl req \ + -new \ + -key "$$1"/server.key \ + -out "$$1"/server.csr \ + -config ./script/"$$2" + + # Generate a certificate for the server + openssl x509 \ + -req \ + -in "$$1"/server.csr \ + -CA "$$1"/ca.crt \ + -CAkey "$$1"/ca.key \ + -CAcreateserial \ + -out "$$1"/server.pem \ + -days 365 \ + -sha256 \ + -extfile ./script/"$$2" \ + -extensions req_ext + } + generate superlink-certificates superlink-certificate.conf + generate superexec-certificates superexec-certificate.conf + EOF + + WORKDIR /app + + ENTRYPOINT ["./script/generate.sh"] + volumes: + - ./superlink-certificates/:/app/superlink-certificates/:rw + - ./superexec-certificates/:/app/superexec-certificates/:rw diff --git a/src/docker/complete/compose.yml b/src/docker/complete/compose.yml new file mode 100644 index 000000000000..b21189d94123 --- /dev/null +++ b/src/docker/complete/compose.yml @@ -0,0 +1,181 @@ +services: + # create a SuperLink service + superlink: + image: flwr/superlink:${FLWR_VERSION:-1.12.0} + command: + - --insecure + + # create a SuperExec service + superexec: + build: + context: ${PROJECT_DIR:-.} + dockerfile_inline: | + FROM flwr/superexec:${FLWR_VERSION:-1.12.0} + + # gcc is required for the fastai quickstart example + USER root + RUN apt-get update \ + && apt-get -y --no-install-recommends install \ + build-essential \ + && rm -rf /var/lib/apt/lists/* + USER app + + WORKDIR /app + COPY --chown=app:app pyproject.toml . + RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ + && python -m pip install -U --no-cache-dir . + + ENTRYPOINT ["flower-superexec"] + ports: + - 9093:9093 + command: + - --executor + - flwr.superexec.deployment:executor + - --insecure + - --executor-config + - superlink="superlink:9091" + depends_on: + - superlink + + # create a two SuperNode service with different node configs + supernode-1: + image: flwr/supernode:${FLWR_VERSION:-1.12.0} + command: + - --insecure + - --superlink + - superlink:9092 + - --supernode-address + - 0.0.0.0:9094 + - --isolation + - process + - --node-config + - "partition-id=0 num-partitions=2" + depends_on: + - superlink + + supernode-2: + image: flwr/supernode:${FLWR_VERSION:-1.12.0} + command: + - --insecure + - --superlink + - superlink:9092 + - --supernode-address + - 0.0.0.0:9095 + - --isolation + - process + - --node-config + - "partition-id=1 num-partitions=2" + depends_on: + - superlink + + # uncomment to add another SuperNode + # + # supernode-3: + # image: flwr/supernode:${FLWR_VERSION:-1.12.0} + # command: + # - --insecure + # - --superlink + # - superlink:9092 + # - --supernode-address + # - 0.0.0.0:9096 + # - --isolation + # - process + # - --node-config + # - "partition-id=1 num-partitions=2" + # depends_on: + # - superlink + + clientapp-1: + build: + context: ${PROJECT_DIR:-.} + dockerfile_inline: | + FROM flwr/clientapp:${FLWR_VERSION:-1.12.0} + + # gcc is required for the fastai quickstart example + USER root + RUN apt-get update \ + && apt-get -y --no-install-recommends install \ + build-essential \ + && rm -rf /var/lib/apt/lists/* + USER app + + WORKDIR /app + COPY --chown=app:app pyproject.toml . + RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ + && python -m pip install -U --no-cache-dir . + + ENTRYPOINT ["flwr-clientapp"] + command: + - --supernode + - supernode-1:9094 + deploy: + resources: + limits: + cpus: "2" + stop_signal: SIGINT + depends_on: + - supernode-1 + + clientapp-2: + build: + context: ${PROJECT_DIR:-.} + dockerfile_inline: | + FROM flwr/clientapp:${FLWR_VERSION:-1.12.0} + + # gcc is required for the fastai quickstart example + USER root + RUN apt-get update \ + && apt-get -y --no-install-recommends install \ + build-essential \ + && rm -rf /var/lib/apt/lists/* + USER app + + WORKDIR /app + COPY --chown=app:app pyproject.toml . + RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ + && python -m pip install -U --no-cache-dir . + + ENTRYPOINT ["flwr-clientapp"] + command: + - --supernode + - supernode-2:9095 + deploy: + resources: + limits: + cpus: "2" + stop_signal: SIGINT + depends_on: + - supernode-2 + + # uncomment to add another ClientApp + # + # clientapp-3: + # build: + # context: ${PROJECT_DIR:-.} + # dockerfile_inline: | + # FROM flwr/clientapp:${FLWR_VERSION:-1.12.0} + + # # gcc is required for the fastai quickstart example + # USER root + # RUN apt-get update \ + # && apt-get -y --no-install-recommends install \ + # build-essential \ + # && rm -rf /var/lib/apt/lists/* + # USER app + + # WORKDIR /app + # COPY --chown=app:app pyproject.toml . + # RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ + # && python -m pip install -U --no-cache-dir . + + # ENTRYPOINT ["flwr-clientapp"] + # command: + # - --supernode + # - supernode-3:9096 + # deploy: + # resources: + # limits: + # cpus: "2" + # stop_signal: SIGINT + # depends_on: + # - supernode-3 diff --git a/src/docker/complete/with-state.yml b/src/docker/complete/with-state.yml new file mode 100644 index 000000000000..cc922a9ef12e --- /dev/null +++ b/src/docker/complete/with-state.yml @@ -0,0 +1,14 @@ +services: + superlink: + command: + - --insecure + - --database=state/state.db + # To toggle TLS encryption and persisting state for the SuperLink, comment the key `command` + # above and uncomment the lines below: + # command: + # - --ssl-ca-certfile=certificates/ca.crt + # - --ssl-certfile=certificates/server.pem + # - --ssl-keyfile=certificates/server.key + # - --database=state/state.db + volumes: + - ./state/:/app/state/:rw diff --git a/src/docker/complete/with-tls.yml b/src/docker/complete/with-tls.yml new file mode 100644 index 000000000000..6cbeb2ba7397 --- /dev/null +++ b/src/docker/complete/with-tls.yml @@ -0,0 +1,96 @@ +services: + superlink: + command: + - --ssl-ca-certfile=certificates/ca.crt + - --ssl-certfile=certificates/server.pem + - --ssl-keyfile=certificates/server.key + secrets: + - source: superlink-ca-certfile + target: /app/certificates/ca.crt + - source: superlink-certfile + target: /app/certificates/server.pem + - source: superlink-keyfile + target: /app/certificates/server.key + + superexec: + command: + - --executor + - flwr.superexec.deployment:executor + - --executor-config + - superlink="superlink:9091" root-certificates="certificates/superlink-ca.crt" + - --ssl-ca-certfile=certificates/ca.crt + - --ssl-certfile=certificates/server.pem + - --ssl-keyfile=certificates/server.key + secrets: + - source: superlink-ca-certfile + target: /app/certificates/superlink-ca.crt + - source: superexec-ca-certfile + target: /app/certificates/ca.crt + - source: superexec-certfile + target: /app/certificates/server.pem + - source: superexec-keyfile + target: /app/certificates/server.key + + supernode-1: + command: + - --superlink + - superlink:9092 + - --supernode-address + - 0.0.0.0:9094 + - --isolation + - process + - --node-config + - "partition-id=0 num-partitions=2" + - --root-certificates + - certificates/ca.crt + secrets: + - source: superlink-ca-certfile + target: /app/certificates/ca.crt + + supernode-2: + command: + - --superlink + - superlink:9092 + - --supernode-address + - 0.0.0.0:9095 + - --isolation + - process + - --node-config + - "partition-id=1 num-partitions=2" + - --root-certificates + - certificates/ca.crt + secrets: + - source: superlink-ca-certfile + target: /app/certificates/ca.crt + + # uncomment to enable TLS on another SuperNode + # + # supernode-3: + # command: + # - --superlink + # - superlink:9092 + # - --supernode-address + # - 0.0.0.0:9096 + # - --isolation + # - process + # - --node-config + # - "partition-id=1 num-partitions=2" + # - --root-certificates + # - certificates/ca.crt + # secrets: + # - source: superlink-ca-certfile + # target: /app/certificates/ca.crt + +secrets: + superlink-ca-certfile: + file: ./superlink-certificates/ca.crt + superlink-certfile: + file: ./superlink-certificates/server.pem + superlink-keyfile: + file: ./superlink-certificates/server.key + superexec-ca-certfile: + file: ./superexec-certificates/ca.crt + superexec-certfile: + file: ./superexec-certificates/server.pem + superexec-keyfile: + file: ./superexec-certificates/server.key diff --git a/src/docker/distributed/.gitignore b/src/docker/distributed/.gitignore new file mode 100644 index 000000000000..1a11330c6e95 --- /dev/null +++ b/src/docker/distributed/.gitignore @@ -0,0 +1,3 @@ +superexec-certificates +superlink-certificates +server/state diff --git a/src/docker/distributed/certs.yml b/src/docker/distributed/certs.yml new file mode 100644 index 000000000000..48e157582e40 --- /dev/null +++ b/src/docker/distributed/certs.yml @@ -0,0 +1,6 @@ +services: + gen-certs: + build: + args: + SUPERLINK_IP: ${SUPERLINK_IP:-127.0.0.1} + SUPEREXEC_IP: ${SUPEREXEC_IP:-127.0.0.1} diff --git a/src/docker/distributed/client/compose.yml b/src/docker/distributed/client/compose.yml new file mode 100644 index 000000000000..6bc6e6739ae4 --- /dev/null +++ b/src/docker/distributed/client/compose.yml @@ -0,0 +1,128 @@ +services: + supernode-1: + image: flwr/supernode:${FLWR_VERSION:-1.12.0} + command: + - --superlink + - ${SUPERLINK_IP:-127.0.0.1}:9092 + - --supernode-address + - 0.0.0.0:9094 + - --isolation + - process + - --node-config + - "partition-id=0 num-partitions=2" + - --root-certificates + - certificates/ca.crt + secrets: + - source: superlink-ca-certfile + target: /app/certificates/ca.crt + + supernode-2: + image: flwr/supernode:${FLWR_VERSION:-1.12.0} + command: + - --superlink + - ${SUPERLINK_IP:-127.0.0.1}:9092 + - --supernode-address + - 0.0.0.0:9095 + - --isolation + - process + - --node-config + - "partition-id=1 num-partitions=2" + - --root-certificates + - certificates/ca.crt + secrets: + - source: superlink-ca-certfile + target: /app/certificates/ca.crt + + # uncomment to add another SuperNode + # + # supernode-3: + # image: flwr/supernode:${FLWR_VERSION:-1.12.0} + # command: + # - --superlink + # - ${SUPERLINK_IP:-127.0.0.1}:9092 + # - --supernode-address + # - 0.0.0.0:9096 + # - --isolation + # - process + # - --node-config + # - "partition-id=1 num-partitions=2" + # - --root-certificates + # - certificates/ca.crt + # secrets: + # - source: superlink-ca-certfile + # target: /app/certificates/ca.crt + + clientapp-1: + build: + context: ${PROJECT_DIR:-.} + dockerfile_inline: | + FROM flwr/clientapp:${FLWR_VERSION:-1.12.0} + + WORKDIR /app + COPY --chown=app:app pyproject.toml . + RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ + && python -m pip install -U --no-cache-dir . + + ENTRYPOINT ["flwr-clientapp"] + command: + - --supernode + - supernode-1:9094 + deploy: + resources: + limits: + cpus: "2" + stop_signal: SIGINT + depends_on: + - supernode-1 + + clientapp-2: + build: + context: ${PROJECT_DIR:-.} + dockerfile_inline: | + FROM flwr/clientapp:${FLWR_VERSION:-1.12.0} + + WORKDIR /app + COPY --chown=app:app pyproject.toml . + RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ + && python -m pip install -U --no-cache-dir . + + ENTRYPOINT ["flwr-clientapp"] + command: + - --supernode + - supernode-2:9095 + deploy: + resources: + limits: + cpus: "2" + stop_signal: SIGINT + depends_on: + - supernode-2 + + # uncomment to add another ClientApp + # + # clientapp-3: + # build: + # context: ${PROJECT_DIR:-.} + # dockerfile_inline: | + # FROM flwr/clientapp:${FLWR_VERSION:-1.12.0} + + # WORKDIR /app + # COPY --chown=app:app pyproject.toml . + # RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ + # && python -m pip install -U --no-cache-dir . + + # ENTRYPOINT ["flwr-clientapp"] + # command: + # - --supernode + # - supernode-3:9096 + # deploy: + # resources: + # limits: + # cpus: "2" + # stop_signal: SIGINT + # depends_on: + # - supernode-3 + +secrets: + superlink-ca-certfile: + file: ../superlink-certificates/ca.crt diff --git a/src/docker/distributed/server/compose.yml b/src/docker/distributed/server/compose.yml new file mode 100644 index 000000000000..f53b63593eb8 --- /dev/null +++ b/src/docker/distributed/server/compose.yml @@ -0,0 +1,67 @@ +services: + superlink: + image: flwr/superlink:${FLWR_VERSION:-1.12.0} + command: + - --ssl-ca-certfile=certificates/ca.crt + - --ssl-certfile=certificates/server.pem + - --ssl-keyfile=certificates/server.key + - --database=state/state.db + volumes: + - ./state/:/app/state/:rw + secrets: + - source: superlink-ca-certfile + target: /app/certificates/ca.crt + - source: superlink-certfile + target: /app/certificates/server.pem + - source: superlink-keyfile + target: /app/certificates/server.key + ports: + - 9092:9092 + + superexec: + build: + context: ${PROJECT_DIR:-.} + dockerfile_inline: | + FROM flwr/superexec:${FLWR_VERSION:-1.12.0} + + WORKDIR /app + COPY --chown=app:app pyproject.toml . + RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ + && python -m pip install -U --no-cache-dir . + + ENTRYPOINT ["flower-superexec"] + command: + - --executor + - flwr.superexec.deployment:executor + - --executor-config + - superlink="superlink:9091" root-certificates="certificates/superlink-ca.crt" + - --ssl-ca-certfile=certificates/ca.crt + - --ssl-certfile=certificates/server.pem + - --ssl-keyfile=certificates/server.key + secrets: + - source: superlink-ca-certfile + target: /app/certificates/superlink-ca.crt + - source: superexec-ca-certfile + target: /app/certificates/ca.crt + - source: superexec-certfile + target: /app/certificates/server.pem + - source: superexec-keyfile + target: /app/certificates/server.key + ports: + - 9093:9093 + depends_on: + - superlink + +secrets: + superlink-ca-certfile: + file: ../superlink-certificates/ca.crt + superlink-certfile: + file: ../superlink-certificates/server.pem + superlink-keyfile: + file: ../superlink-certificates/server.key + superexec-ca-certfile: + file: ../superexec-certificates/ca.crt + superexec-certfile: + file: ../superexec-certificates/server.pem + superexec-keyfile: + file: ../superexec-certificates/server.key diff --git a/src/docker/serverapp/README.md b/src/docker/serverapp/README.md new file mode 100644 index 000000000000..110712fe3bfd --- /dev/null +++ b/src/docker/serverapp/README.md @@ -0,0 +1,48 @@ +# Flower ServerApp + +

+ + Flower Website + +

+ +## Quick reference + +- **Learn more:**
+ [Quickstart with Docker](https://flower.ai/docs/framework/docker/tutorial-quickstart-docker.html) and [Quickstart with Docker Compose](https://flower.ai/docs/framework/docker/tutorial-quickstart-docker-compose.html) + +- **Where to get help:**
+ [Flower Discuss](https://discuss.flower.ai), [Slack](https://flower.ai/join-slack) or [GitHub](https://github.com/adap/flower) + +- **Supported architectures:**
+ `amd64`, `arm64v8` + +## Supported tags + +- `unstable` + - points to the last successful build of the `main` branch +- `nightly`, `.dev` e.g. `1.13.0.dev20241014` + - uses Python 3.11 and Ubuntu 24.04 +- `1.12.0`, `1.12.0-py3.11-ubuntu24.04` +- `1.12.0-py3.10-ubuntu24.04` +- `1.12.0-py3.9-ubuntu24.04` +- `1.11.1`, `1.11.1-py3.11-ubuntu22.04` +- `1.11.1-py3.10-ubuntu22.04` +- `1.11.1-py3.9-ubuntu22.04` +- `1.11.1-py3.8-ubuntu22.04` +- `1.11.0`, `1.11.0-py3.11-ubuntu22.04` +- `1.11.0-py3.10-ubuntu22.04` +- `1.11.0-py3.9-ubuntu22.04` +- `1.11.0-py3.8-ubuntu22.04` +- `1.10.0`, `1.10.0-py3.11-ubuntu22.04` +- `1.10.0-py3.10-ubuntu22.04` +- `1.10.0-py3.9-ubuntu22.04` +- `1.10.0-py3.8-ubuntu22.04` +- `1.9.0`, `1.9.0-py3.11-ubuntu22.04` +- `1.9.0-py3.10-ubuntu22.04` +- `1.9.0-py3.9-ubuntu22.04` +- `1.9.0-py3.8-ubuntu22.04` +- `1.8.0`, `1.8.0-py3.11-ubuntu22.04` +- `1.8.0-py3.10-ubuntu22.04` +- `1.8.0-py3.9-ubuntu22.04` +- `1.8.0-py3.8-ubuntu22.04` diff --git a/src/docker/superexec/Dockerfile b/src/docker/superexec/Dockerfile new file mode 100644 index 000000000000..9e4cc722921e --- /dev/null +++ b/src/docker/superexec/Dockerfile @@ -0,0 +1,20 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +ARG BASE_REPOSITORY=flwr/base +ARG BASE_IMAGE +FROM $BASE_REPOSITORY:$BASE_IMAGE + +ENTRYPOINT ["flower-superexec"] diff --git a/src/docker/superexec/README.md b/src/docker/superexec/README.md new file mode 100644 index 000000000000..8026db18b978 --- /dev/null +++ b/src/docker/superexec/README.md @@ -0,0 +1,40 @@ +# Flower SuperExec + +

+ + Flower Website + +

+ +## Quick reference + +- **Learn more:**
+ [Quickstart with Docker](https://flower.ai/docs/framework/docker/tutorial-quickstart-docker.html) and [Quickstart with Docker Compose](https://flower.ai/docs/framework/docker/tutorial-quickstart-docker-compose.html) + +- **Where to get help:**
+ [Flower Discuss](https://discuss.flower.ai), [Slack](https://flower.ai/join-slack) or [GitHub](https://github.com/adap/flower) + +- **Supported architectures:**
+ `amd64`, `arm64v8` + +## Supported tags + +- `unstable` + - points to the last successful build of the `main` branch +- `nightly`, `.dev` e.g. `1.13.0.dev20241014` + - uses Python 3.11 and Ubuntu 24.04 +- `1.12.0`, `1.12.0-py3.11-ubuntu24.04` +- `1.12.0-py3.10-ubuntu24.04` +- `1.12.0-py3.9-ubuntu24.04` +- `1.11.1`, `1.11.1-py3.11-ubuntu22.04` +- `1.11.1-py3.10-ubuntu22.04` +- `1.11.1-py3.9-ubuntu22.04` +- `1.11.1-py3.8-ubuntu22.04` +- `1.11.0`, `1.11.0-py3.11-ubuntu22.04` +- `1.11.0-py3.10-ubuntu22.04` +- `1.11.0-py3.9-ubuntu22.04` +- `1.11.0-py3.8-ubuntu22.04` +- `1.10.0`, `1.10.0-py3.11-ubuntu22.04` +- `1.10.0-py3.10-ubuntu22.04` +- `1.10.0-py3.9-ubuntu22.04` +- `1.10.0-py3.8-ubuntu22.04` diff --git a/src/docker/superlink/README.md b/src/docker/superlink/README.md new file mode 100644 index 000000000000..af03ce1c8054 --- /dev/null +++ b/src/docker/superlink/README.md @@ -0,0 +1,37 @@ +# Flower SuperLink + +

+ + Flower Website + +

+ +## Quick reference + +- **Learn more:**
+ [Quickstart with Docker](https://flower.ai/docs/framework/docker/tutorial-quickstart-docker.html) and [Quickstart with Docker Compose](https://flower.ai/docs/framework/docker/tutorial-quickstart-docker-compose.html) + +- **Where to get help:**
+ [Flower Discuss](https://discuss.flower.ai), [Slack](https://flower.ai/join-slack) or [GitHub](https://github.com/adap/flower) + +- **Supported architectures:**
+ `amd64`, `arm64v8` + +## Supported tags + +- `unstable` + - points to the last successful build of the `main` branch +- `nightly`, `.dev` e.g. `1.13.0.dev20241014` + - uses Python 3.11 and Ubuntu 24.04 +- `1.12.0`, `1.12.0-py3.11-alpine3.19` +- `1.12.0-py3.11-ubuntu24.04` +- `1.11.1`, `1.11.1-py3.11-alpine3.19` +- `1.11.1-py3.11-ubuntu22.04` +- `1.11.0`, `1.11.0-py3.11-alpine3.19` +- `1.11.0-py3.11-ubuntu22.04` +- `1.10.0`, `1.10.0-py3.11-alpine3.19` +- `1.10.0-py3.11-ubuntu22.04` +- `1.9.0`, `1.9.0-py3.11-alpine3.19` +- `1.9.0-py3.11-ubuntu22.04` +- `1.8.0`, `1.8.0-py3.11-alpine3.19` +- `1.8.0-py3.11-ubuntu22.04` diff --git a/src/docker/supernode/Dockerfile b/src/docker/supernode/Dockerfile index 8dce1c389a5b..8b78577b1201 100644 --- a/src/docker/supernode/Dockerfile +++ b/src/docker/supernode/Dockerfile @@ -17,4 +17,4 @@ ARG BASE_REPOSITORY=flwr/base ARG BASE_IMAGE FROM $BASE_REPOSITORY:$BASE_IMAGE -ENTRYPOINT ["flower-client-app"] +ENTRYPOINT ["flower-supernode"] diff --git a/src/docker/supernode/README.md b/src/docker/supernode/README.md new file mode 100644 index 000000000000..493f98cc78e4 --- /dev/null +++ b/src/docker/supernode/README.md @@ -0,0 +1,47 @@ +# Flower SuperNode + +

+ + Flower Website + +

+ +## Quick reference + +- **Learn more:**
+ [Quickstart with Docker](https://flower.ai/docs/framework/docker/tutorial-quickstart-docker.html) and [Quickstart with Docker Compose](https://flower.ai/docs/framework/docker/tutorial-quickstart-docker-compose.html) + +- **Where to get help:**
+ [Flower Discuss](https://discuss.flower.ai), [Slack](https://flower.ai/join-slack) or [GitHub](https://github.com/adap/flower) + +- **Supported architectures:**
+ `amd64`, `arm64v8` + +## Supported tags + +- `unstable` + - points to the last successful build of the `main` branch +- `nightly`, `.dev` e.g. `1.13.0.dev20241014` + - uses Python 3.11 and Ubuntu 24.04 +- `1.12.0`, `1.12.0-py3.11-alpine3.19` +- `1.12.0-py3.11-ubuntu24.04` +- `1.12.0-py3.10-ubuntu24.04` +- `1.12.0-py3.9-ubuntu24.04` +- `1.11.1`, `1.11.1-py3.11-alpine3.19` +- `1.11.1-py3.11-ubuntu22.04` +- `1.11.1-py3.10-ubuntu22.04` +- `1.11.1-py3.9-ubuntu22.04` +- `1.11.1-py3.8-ubuntu22.04` +- `1.11.0`, `1.11.0-py3.11-alpine3.19` +- `1.11.0-py3.11-ubuntu22.04` +- `1.11.0-py3.10-ubuntu22.04` +- `1.11.0-py3.9-ubuntu22.04` +- `1.11.0-py3.8-ubuntu22.04` +- `1.10.0`, `1.10.0-py3.11-ubuntu22.04` +- `1.10.0-py3.10-ubuntu22.04` +- `1.10.0-py3.9-ubuntu22.04` +- `1.10.0-py3.8-ubuntu22.04` +- `1.9.0`, `1.9.0-py3.11-ubuntu22.04` +- `1.9.0-py3.10-ubuntu22.04` +- `1.9.0-py3.9-ubuntu22.04` +- `1.9.0-py3.8-ubuntu22.04` diff --git a/src/proto/flwr/proto/clientappio.proto b/src/proto/flwr/proto/clientappio.proto new file mode 100644 index 000000000000..19d2db50501a --- /dev/null +++ b/src/proto/flwr/proto/clientappio.proto @@ -0,0 +1,63 @@ +// Copyright 2024 Flower Labs GmbH. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== + +syntax = "proto3"; + +package flwr.proto; + +import "flwr/proto/fab.proto"; +import "flwr/proto/run.proto"; +import "flwr/proto/message.proto"; + +service ClientAppIo { + // Get token + rpc GetToken(GetTokenRequest) returns (GetTokenResponse) {} + + // Get Message, Context, and Run + rpc PullClientAppInputs(PullClientAppInputsRequest) + returns (PullClientAppInputsResponse) {} + + // Send updated Message and Context + rpc PushClientAppOutputs(PushClientAppOutputsRequest) + returns (PushClientAppOutputsResponse) {} +} + +enum ClientAppOutputCode { + SUCCESS = 0; + DEADLINE_EXCEEDED = 1; + UNKNOWN_ERROR = 2; +} +message ClientAppOutputStatus { + ClientAppOutputCode code = 1; + string message = 2; +} + +message GetTokenRequest {} +message GetTokenResponse { uint64 token = 1; } + +message PullClientAppInputsRequest { uint64 token = 1; } +message PullClientAppInputsResponse { + Message message = 1; + Context context = 2; + Run run = 3; + Fab fab = 4; +} + +message PushClientAppOutputsRequest { + uint64 token = 1; + Message message = 2; + Context context = 3; +} +message PushClientAppOutputsResponse { ClientAppOutputStatus status = 1; } diff --git a/src/proto/flwr/proto/control.proto b/src/proto/flwr/proto/control.proto new file mode 100644 index 000000000000..8b75c66fccaa --- /dev/null +++ b/src/proto/flwr/proto/control.proto @@ -0,0 +1,32 @@ +// Copyright 2024 Flower Labs GmbH. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== + +syntax = "proto3"; + +package flwr.proto; + +import "flwr/proto/run.proto"; + +service Control { + // Request to create a new run + rpc CreateRun(CreateRunRequest) returns (CreateRunResponse) {} + + // Get the status of a given run + rpc GetRunStatus(GetRunStatusRequest) returns (GetRunStatusResponse) {} + + // Update the status of a given run + rpc UpdateRunStatus(UpdateRunStatusRequest) + returns (UpdateRunStatusResponse) {} +} diff --git a/src/proto/flwr/proto/driver.proto b/src/proto/flwr/proto/driver.proto index 54e6b6b41b68..e26003862a76 100644 --- a/src/proto/flwr/proto/driver.proto +++ b/src/proto/flwr/proto/driver.proto @@ -19,6 +19,8 @@ package flwr.proto; import "flwr/proto/node.proto"; import "flwr/proto/task.proto"; +import "flwr/proto/run.proto"; +import "flwr/proto/fab.proto"; service Driver { // Request run_id @@ -32,17 +34,16 @@ service Driver { // Get task results rpc PullTaskRes(PullTaskResRequest) returns (PullTaskResResponse) {} -} -// CreateRun -message CreateRunRequest { - string fab_id = 1; - string fab_version = 2; + // Get run details + rpc GetRun(GetRunRequest) returns (GetRunResponse) {} + + // Get FAB + rpc GetFab(GetFabRequest) returns (GetFabResponse) {} } -message CreateRunResponse { sint64 run_id = 1; } // GetNodes messages -message GetNodesRequest { sint64 run_id = 1; } +message GetNodesRequest { uint64 run_id = 1; } message GetNodesResponse { repeated Node nodes = 1; } // PushTaskIns messages diff --git a/src/proto/flwr/proto/exec.proto b/src/proto/flwr/proto/exec.proto new file mode 100644 index 000000000000..ad0723c0480c --- /dev/null +++ b/src/proto/flwr/proto/exec.proto @@ -0,0 +1,38 @@ +// Copyright 2024 Flower Labs GmbH. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== + +syntax = "proto3"; + +package flwr.proto; + +import "flwr/proto/fab.proto"; +import "flwr/proto/transport.proto"; + +service Exec { + // Start run upon request + rpc StartRun(StartRunRequest) returns (StartRunResponse) {} + + // Start log stream upon request + rpc StreamLogs(StreamLogsRequest) returns (stream StreamLogsResponse) {} +} + +message StartRunRequest { + Fab fab = 1; + map override_config = 2; + map federation_config = 3; +} +message StartRunResponse { uint64 run_id = 1; } +message StreamLogsRequest { uint64 run_id = 1; } +message StreamLogsResponse { string log_output = 1; } diff --git a/src/proto/flwr/proto/fab.proto b/src/proto/flwr/proto/fab.proto new file mode 100644 index 000000000000..367b6e5b5c13 --- /dev/null +++ b/src/proto/flwr/proto/fab.proto @@ -0,0 +1,35 @@ +// Copyright 2024 Flower Labs GmbH. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== + +syntax = "proto3"; + +package flwr.proto; + +import "flwr/proto/node.proto"; + +message Fab { + // This field is the hash of the data field. It is used to identify the data. + // The hash is calculated using the SHA-256 algorithm and is represented as a + // hex string (sha256hex). + string hash_str = 1; + // This field contains the fab file contents a one bytes blob. + bytes content = 2; +} + +message GetFabRequest { + Node node = 1; + string hash_str = 2; +} +message GetFabResponse { Fab fab = 1; } diff --git a/src/proto/flwr/proto/fleet.proto b/src/proto/flwr/proto/fleet.proto index df6b5843023d..130b30b96669 100644 --- a/src/proto/flwr/proto/fleet.proto +++ b/src/proto/flwr/proto/fleet.proto @@ -19,6 +19,8 @@ package flwr.proto; import "flwr/proto/node.proto"; import "flwr/proto/task.proto"; +import "flwr/proto/run.proto"; +import "flwr/proto/fab.proto"; service Fleet { rpc CreateNode(CreateNodeRequest) returns (CreateNodeResponse) {} @@ -36,6 +38,9 @@ service Fleet { rpc PushTaskRes(PushTaskResRequest) returns (PushTaskResResponse) {} rpc GetRun(GetRunRequest) returns (GetRunResponse) {} + + // Get FAB + rpc GetFab(GetFabRequest) returns (GetFabResponse) {} } // CreateNode messages @@ -64,19 +69,13 @@ message PullTaskInsResponse { } // PushTaskRes messages -message PushTaskResRequest { repeated TaskRes task_res_list = 1; } +message PushTaskResRequest { + Node node = 1; + repeated TaskRes task_res_list = 2; +} message PushTaskResResponse { Reconnect reconnect = 1; map results = 2; } -// GetRun messages -message Run { - sint64 run_id = 1; - string fab_id = 2; - string fab_version = 3; -} -message GetRunRequest { sint64 run_id = 1; } -message GetRunResponse { Run run = 1; } - message Reconnect { uint64 reconnect = 1; } diff --git a/src/proto/flwr/proto/message.proto b/src/proto/flwr/proto/message.proto new file mode 100644 index 000000000000..7066da5b7e76 --- /dev/null +++ b/src/proto/flwr/proto/message.proto @@ -0,0 +1,47 @@ +// Copyright 2024 Flower Labs GmbH. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== + +syntax = "proto3"; + +package flwr.proto; + +import "flwr/proto/error.proto"; +import "flwr/proto/recordset.proto"; +import "flwr/proto/transport.proto"; + +message Message { + Metadata metadata = 1; + RecordSet content = 2; + Error error = 3; +} + +message Context { + uint64 node_id = 1; + map node_config = 2; + RecordSet state = 3; + map run_config = 4; +} + +message Metadata { + uint64 run_id = 1; + string message_id = 2; + uint64 src_node_id = 3; + uint64 dst_node_id = 4; + string reply_to_message = 5; + string group_id = 6; + double ttl = 7; + string message_type = 8; + double created_at = 9; +} diff --git a/src/proto/flwr/proto/node.proto b/src/proto/flwr/proto/node.proto index e61d44f0f783..ec72b51b44ec 100644 --- a/src/proto/flwr/proto/node.proto +++ b/src/proto/flwr/proto/node.proto @@ -18,6 +18,6 @@ syntax = "proto3"; package flwr.proto; message Node { - sint64 node_id = 1; + uint64 node_id = 1; bool anonymous = 2; } diff --git a/src/proto/flwr/proto/recordset.proto b/src/proto/flwr/proto/recordset.proto index d51d0f9ce416..939e97cf46e3 100644 --- a/src/proto/flwr/proto/recordset.proto +++ b/src/proto/flwr/proto/recordset.proto @@ -18,7 +18,8 @@ syntax = "proto3"; package flwr.proto; message DoubleList { repeated double vals = 1; } -message Sint64List { repeated sint64 vals = 1; } +message SintList { repeated sint64 vals = 1; } +message UintList { repeated uint64 vals = 1; } message BoolList { repeated bool vals = 1; } message StringList { repeated string vals = 1; } message BytesList { repeated bytes vals = 1; } @@ -35,10 +36,12 @@ message MetricsRecordValue { // Single element double double = 1; sint64 sint64 = 2; + uint64 uint64 = 3; // List types DoubleList double_list = 21; - Sint64List sint64_list = 22; + SintList sint_list = 22; + UintList uint_list = 23; } } @@ -47,16 +50,18 @@ message ConfigsRecordValue { // Single element double double = 1; sint64 sint64 = 2; - bool bool = 3; - string string = 4; - bytes bytes = 5; + uint64 uint64 = 3; + bool bool = 4; + string string = 5; + bytes bytes = 6; // List types DoubleList double_list = 21; - Sint64List sint64_list = 22; - BoolList bool_list = 23; - StringList string_list = 24; - BytesList bytes_list = 25; + SintList sint_list = 22; + UintList uint_list = 23; + BoolList bool_list = 24; + StringList string_list = 25; + BytesList bytes_list = 26; } } diff --git a/src/proto/flwr/proto/run.proto b/src/proto/flwr/proto/run.proto new file mode 100644 index 000000000000..4312e1127cc2 --- /dev/null +++ b/src/proto/flwr/proto/run.proto @@ -0,0 +1,69 @@ +// Copyright 2024 Flower Labs GmbH. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== + +syntax = "proto3"; + +package flwr.proto; + +import "flwr/proto/fab.proto"; +import "flwr/proto/node.proto"; +import "flwr/proto/transport.proto"; + +message Run { + uint64 run_id = 1; + string fab_id = 2; + string fab_version = 3; + map override_config = 4; + string fab_hash = 5; +} + +message RunStatus { + // "starting", "running", "finished" + string status = 1; + // "completed", "failed", "stopped" or "" (non-finished) + string sub_status = 2; + // failure details + string details = 3; +} + +// CreateRun +message CreateRunRequest { + string fab_id = 1; + string fab_version = 2; + map override_config = 3; + Fab fab = 4; +} +message CreateRunResponse { uint64 run_id = 1; } + +// GetRun +message GetRunRequest { + Node node = 1; + uint64 run_id = 2; +} +message GetRunResponse { Run run = 1; } + +// UpdateRunStatus +message UpdateRunStatusRequest { + uint64 run_id = 1; + RunStatus run_status = 2; +} +message UpdateRunStatusResponse {} + +// GetRunStatus +message GetRunStatusRequest { + Node node = 1; + repeated uint64 run_ids = 2; +} +message GetRunStatusResponse { map run_status_dict = 1; } diff --git a/src/proto/flwr/proto/task.proto b/src/proto/flwr/proto/task.proto index cf77d110acab..324a70a5359c 100644 --- a/src/proto/flwr/proto/task.proto +++ b/src/proto/flwr/proto/task.proto @@ -19,7 +19,6 @@ package flwr.proto; import "flwr/proto/node.proto"; import "flwr/proto/recordset.proto"; -import "flwr/proto/transport.proto"; import "flwr/proto/error.proto"; message Task { @@ -38,13 +37,13 @@ message Task { message TaskIns { string task_id = 1; string group_id = 2; - sint64 run_id = 3; + uint64 run_id = 3; Task task = 4; } message TaskRes { string task_id = 1; string group_id = 2; - sint64 run_id = 3; + uint64 run_id = 3; Task task = 4; } diff --git a/src/proto/flwr/proto/transport.proto b/src/proto/flwr/proto/transport.proto index 17a285ebe44b..6a4f45aa3c97 100644 --- a/src/proto/flwr/proto/transport.proto +++ b/src/proto/flwr/proto/transport.proto @@ -107,7 +107,7 @@ message Scalar { // int32 int32 = 3; // int64 int64 = 4; // uint32 uint32 = 5; - // uint64 uint64 = 6; + uint64 uint64 = 6; // sint32 sint32 = 7; sint64 sint64 = 8; // fixed32 fixed32 = 9; diff --git a/src/py/flwr/cli/app.py b/src/py/flwr/cli/app.py index e1417f1267ac..8baccb4638fc 100644 --- a/src/py/flwr/cli/app.py +++ b/src/py/flwr/cli/app.py @@ -15,9 +15,11 @@ """Flower command line interface.""" import typer +from typer.main import get_command from .build import build -from .example import example +from .install import install +from .log import log from .new import new from .run import run @@ -31,9 +33,12 @@ ) app.command()(new) -app.command()(example) app.command()(run) app.command()(build) +app.command()(install) +app.command()(log) + +typer_click_object = get_command(app) if __name__ == "__main__": app() diff --git a/src/py/flwr/cli/build.py b/src/py/flwr/cli/build.py index ca7ab8686c5c..4c9dca4ebcf1 100644 --- a/src/py/flwr/cli/build.py +++ b/src/py/flwr/cli/build.py @@ -16,50 +16,71 @@ import hashlib import os +import shutil +import tempfile import zipfile from pathlib import Path -from typing import Optional +from typing import Annotated, Any, Optional, Union import pathspec +import tomli_w import typer -from typing_extensions import Annotated + +from flwr.common.constant import FAB_ALLOWED_EXTENSIONS, FAB_DATE, FAB_HASH_TRUNCATION from .config_utils import load_and_validate from .utils import is_valid_project_name -# pylint: disable=too-many-locals +def write_to_zip( + zipfile_obj: zipfile.ZipFile, filename: str, contents: Union[bytes, str] +) -> zipfile.ZipFile: + """Set a fixed date and write contents to a zip file.""" + zip_info = zipfile.ZipInfo(filename) + zip_info.date_time = FAB_DATE + zipfile_obj.writestr(zip_info, contents) + return zipfile_obj + + +def get_fab_filename(conf: dict[str, Any], fab_hash: str) -> str: + """Get the FAB filename based on the given config and FAB hash.""" + publisher = conf["tool"]["flwr"]["app"]["publisher"] + name = conf["project"]["name"] + version = conf["project"]["version"].replace(".", "-") + fab_hash_truncated = fab_hash[:FAB_HASH_TRUNCATION] + return f"{publisher}.{name}.{version}.{fab_hash_truncated}.fab" + + +# pylint: disable=too-many-locals, too-many-statements def build( - directory: Annotated[ + app: Annotated[ Optional[Path], - typer.Option(help="The Flower project directory to bundle into a FAB"), + typer.Option(help="Path of the Flower App to bundle into a FAB"), ] = None, -) -> None: - """Build a Flower project into a Flower App Bundle (FAB). +) -> tuple[str, str]: + """Build a Flower App into a Flower App Bundle (FAB). - You can run `flwr build` without any argument to bundle the current directory: + You can run ``flwr build`` without any arguments to bundle the app located in the + current directory. Alternatively, you can you can specify a path using the ``--app`` + option to bundle an app located at the provided path. For example: - `flwr build` - - You can also build a specific directory: - - `flwr build --directory ./projects/flower-hello-world` + ``flwr build --app ./apps/flower-hello-world``. """ - if directory is None: - directory = Path.cwd() + if app is None: + app = Path.cwd() - directory = directory.resolve() - if not directory.is_dir(): + app = app.resolve() + if not app.is_dir(): typer.secho( - f"❌ The path {directory} is not a valid directory.", + f"❌ The path {app} is not a valid path to a Flower app.", fg=typer.colors.RED, bold=True, ) raise typer.Exit(code=1) - if not is_valid_project_name(directory.name): + if not is_valid_project_name(app.name): typer.secho( - f"❌ The project name {directory.name} is invalid, " + f"❌ The project name {app.name} is invalid, " "a valid project name must start with a letter or an underscore, " "and can only contain letters, digits, and underscores.", fg=typer.colors.RED, @@ -67,7 +88,7 @@ def build( ) raise typer.Exit(code=1) - conf, errors, warnings = load_and_validate(directory / "pyproject.toml") + conf, errors, warnings = load_and_validate(app / "pyproject.toml") if conf is None: typer.secho( "Project configuration could not be loaded.\npyproject.toml is invalid:\n" @@ -86,62 +107,72 @@ def build( ) # Load .gitignore rules if present - ignore_spec = _load_gitignore(directory) + ignore_spec = _load_gitignore(app) - # Set the name of the zip file - fab_filename = ( - f"{conf['flower']['publisher']}" - f".{directory.name}" - f".{conf['project']['version'].replace('.', '-')}.fab" - ) list_file_content = "" - allowed_extensions = {".py", ".toml", ".md"} + # Remove the 'federations' field from 'tool.flwr' if it exists + if ( + "tool" in conf + and "flwr" in conf["tool"] + and "federations" in conf["tool"]["flwr"] + ): + del conf["tool"]["flwr"]["federations"] - with zipfile.ZipFile(fab_filename, "w", zipfile.ZIP_DEFLATED) as fab_file: - for root, _, files in os.walk(directory, topdown=True): - # Filter directories and files based on .gitignore - files = [ + toml_contents = tomli_w.dumps(conf) + + with tempfile.NamedTemporaryFile(suffix=".zip", delete=False) as temp_file: + temp_filename = temp_file.name + + with zipfile.ZipFile(temp_filename, "w", zipfile.ZIP_DEFLATED) as fab_file: + write_to_zip(fab_file, "pyproject.toml", toml_contents) + + # Continue with adding other files + all_files = [ f - for f in files - if not ignore_spec.match_file(Path(root) / f) - and f != fab_filename - and Path(f).suffix in allowed_extensions + for f in app.rglob("*") + if not ignore_spec.match_file(f) + and f.name != temp_filename + and f.suffix in FAB_ALLOWED_EXTENSIONS + and f.name != "pyproject.toml" # Exclude the original pyproject.toml ] - for file in files: - file_path = Path(root) / file - archive_path = file_path.relative_to(directory) - fab_file.write(file_path, archive_path) + for file_path in all_files: + # Read the file content manually + with open(file_path, "rb") as f: + file_contents = f.read() + + archive_path = file_path.relative_to(app) + write_to_zip(fab_file, str(archive_path), file_contents) # Calculate file info - sha256_hash = _get_sha256_hash(file_path) + sha256_hash = hashlib.sha256(file_contents).hexdigest() file_size_bits = os.path.getsize(file_path) * 8 # size in bits list_file_content += f"{archive_path},{sha256_hash},{file_size_bits}\n" - # Add CONTENT and CONTENT.jwt to the zip file - fab_file.writestr(".info/CONTENT", list_file_content) + # Add CONTENT and CONTENT.jwt to the zip file + write_to_zip(fab_file, ".info/CONTENT", list_file_content) + + # Get hash of FAB file + content = Path(temp_filename).read_bytes() + fab_hash = hashlib.sha256(content).hexdigest() + + # Set the name of the zip file + fab_filename = get_fab_filename(conf, fab_hash) + + # Once the temporary zip file is created, rename it to the final filename + shutil.move(temp_filename, fab_filename) typer.secho( - f"🎊 Successfully built {fab_filename}.", fg=typer.colors.GREEN, bold=True + f"🎊 Successfully built {fab_filename}", fg=typer.colors.GREEN, bold=True ) - -def _get_sha256_hash(file_path: Path) -> str: - """Calculate the SHA-256 hash of a file.""" - sha256 = hashlib.sha256() - with open(file_path, "rb") as f: - while True: - data = f.read(65536) # Read in 64kB blocks - if not data: - break - sha256.update(data) - return sha256.hexdigest() + return fab_filename, fab_hash -def _load_gitignore(directory: Path) -> pathspec.PathSpec: +def _load_gitignore(app: Path) -> pathspec.PathSpec: """Load and parse .gitignore file, returning a pathspec.""" - gitignore_path = directory / ".gitignore" + gitignore_path = app / ".gitignore" patterns = ["__pycache__/"] # Default pattern if gitignore_path.exists(): with open(gitignore_path, encoding="UTF-8") as file: diff --git a/src/py/flwr/cli/config_utils.py b/src/py/flwr/cli/config_utils.py index d943d87e3812..73ce779c3b5c 100644 --- a/src/py/flwr/cli/config_utils.py +++ b/src/py/flwr/cli/config_utils.py @@ -14,25 +14,101 @@ # ============================================================================== """Utility to validate the `pyproject.toml` file.""" +import zipfile +from io import BytesIO from pathlib import Path -from typing import Any, Dict, List, Optional, Tuple +from typing import IO, Any, Optional, Union, get_args import tomli from flwr.common import object_ref +from flwr.common.typing import UserConfigValue + + +def get_fab_config(fab_file: Union[Path, bytes]) -> dict[str, Any]: + """Extract the config from a FAB file or path. + + Parameters + ---------- + fab_file : Union[Path, bytes] + The Flower App Bundle file to validate and extract the metadata from. + It can either be a path to the file or the file itself as bytes. + + Returns + ------- + Dict[str, Any] + The `config` of the given Flower App Bundle. + """ + fab_file_archive: Union[Path, IO[bytes]] + if isinstance(fab_file, bytes): + fab_file_archive = BytesIO(fab_file) + elif isinstance(fab_file, Path): + fab_file_archive = fab_file + else: + raise ValueError("fab_file must be either a Path or bytes") + + with zipfile.ZipFile(fab_file_archive, "r") as zipf: + with zipf.open("pyproject.toml") as file: + toml_content = file.read().decode("utf-8") + + conf = load_from_string(toml_content) + if conf is None: + raise ValueError("Invalid TOML content in pyproject.toml") + + is_valid, errors, _ = validate(conf, check_module=False) + if not is_valid: + raise ValueError(errors) + + return conf + + +def get_fab_metadata(fab_file: Union[Path, bytes]) -> tuple[str, str]: + """Extract the fab_id and the fab_version from a FAB file or path. + + Parameters + ---------- + fab_file : Union[Path, bytes] + The Flower App Bundle file to validate and extract the metadata from. + It can either be a path to the file or the file itself as bytes. + + Returns + ------- + Tuple[str, str] + The `fab_id` and `fab_version` of the given Flower App Bundle. + """ + conf = get_fab_config(fab_file) + + return ( + f"{conf['tool']['flwr']['app']['publisher']}/{conf['project']['name']}", + conf["project"]["version"], + ) def load_and_validate( path: Optional[Path] = None, -) -> Tuple[Optional[Dict[str, Any]], List[str], List[str]]: + check_module: bool = True, +) -> tuple[Optional[dict[str, Any]], list[str], list[str]]: """Load and validate pyproject.toml as dict. + Parameters + ---------- + path : Optional[Path] (default: None) + The path of the Flower App config file to load. By default it + will try to use `pyproject.toml` inside the current directory. + check_module: bool (default: True) + Whether the validity of the Python module should be checked. + This requires the project to be installed in the currently + running environment. True by default. + Returns ------- Tuple[Optional[config], List[str], List[str]] A tuple with the optional config in case it exists and is valid and associated errors and warnings. """ + if path is None: + path = Path.cwd() / "pyproject.toml" + config = load(path) if config is None: @@ -42,7 +118,7 @@ def load_and_validate( ] return (None, errors, []) - is_valid, errors, warnings = validate(config) + is_valid, errors, warnings = validate(config, check_module, path.parent) if not is_valid: return (None, errors, warnings) @@ -50,24 +126,28 @@ def load_and_validate( return (config, errors, warnings) -def load(path: Optional[Path] = None) -> Optional[Dict[str, Any]]: +def load(toml_path: Path) -> Optional[dict[str, Any]]: """Load pyproject.toml and return as dict.""" - if path is None: - cur_dir = Path.cwd() - toml_path = cur_dir / "pyproject.toml" - else: - toml_path = path - if not toml_path.is_file(): return None with toml_path.open(encoding="utf-8") as toml_file: - data = tomli.loads(toml_file.read()) - return data + return load_from_string(toml_file.read()) + + +def _validate_run_config(config_dict: dict[str, Any], errors: list[str]) -> None: + for key, value in config_dict.items(): + if isinstance(value, dict): + _validate_run_config(config_dict[key], errors) + elif not isinstance(value, get_args(UserConfigValue)): + raise ValueError( + f"The value for key {key} needs to be of type `int`, `float`, " + "`bool, `str`, or a `dict` of those.", + ) # pylint: disable=too-many-branches -def validate_fields(config: Dict[str, Any]) -> Tuple[bool, List[str], List[str]]: +def validate_fields(config: dict[str, Any]) -> tuple[bool, list[str], list[str]]: """Validate pyproject.toml fields.""" errors = [] warnings = [] @@ -86,23 +166,37 @@ def validate_fields(config: Dict[str, Any]) -> Tuple[bool, List[str], List[str]] if "authors" not in config["project"]: warnings.append('Recommended property "authors" missing in [project]') - if "flower" not in config: - errors.append("Missing [flower] section") + if ( + "tool" not in config + or "flwr" not in config["tool"] + or "app" not in config["tool"]["flwr"] + ): + errors.append("Missing [tool.flwr.app] section") else: - if "publisher" not in config["flower"]: - errors.append('Property "publisher" missing in [flower]') - if "components" not in config["flower"]: - errors.append("Missing [flower.components] section") + if "publisher" not in config["tool"]["flwr"]["app"]: + errors.append('Property "publisher" missing in [tool.flwr.app]') + if "config" in config["tool"]["flwr"]["app"]: + _validate_run_config(config["tool"]["flwr"]["app"]["config"], errors) + if "components" not in config["tool"]["flwr"]["app"]: + errors.append("Missing [tool.flwr.app.components] section") else: - if "serverapp" not in config["flower"]["components"]: - errors.append('Property "serverapp" missing in [flower.components]') - if "clientapp" not in config["flower"]["components"]: - errors.append('Property "clientapp" missing in [flower.components]') + if "serverapp" not in config["tool"]["flwr"]["app"]["components"]: + errors.append( + 'Property "serverapp" missing in [tool.flwr.app.components]' + ) + if "clientapp" not in config["tool"]["flwr"]["app"]["components"]: + errors.append( + 'Property "clientapp" missing in [tool.flwr.app.components]' + ) return len(errors) == 0, errors, warnings -def validate(config: Dict[str, Any]) -> Tuple[bool, List[str], List[str]]: +def validate( + config: dict[str, Any], + check_module: bool = True, + project_dir: Optional[Union[str, Path]] = None, +) -> tuple[bool, list[str], list[str]]: """Validate pyproject.toml.""" is_valid, errors, warnings = validate_fields(config) @@ -110,14 +204,26 @@ def validate(config: Dict[str, Any]) -> Tuple[bool, List[str], List[str]]: return False, errors, warnings # Validate serverapp - is_valid, reason = object_ref.validate(config["flower"]["components"]["serverapp"]) + serverapp_ref = config["tool"]["flwr"]["app"]["components"]["serverapp"] + is_valid, reason = object_ref.validate(serverapp_ref, check_module, project_dir) + if not is_valid and isinstance(reason, str): return False, [reason], [] # Validate clientapp - is_valid, reason = object_ref.validate(config["flower"]["components"]["clientapp"]) + clientapp_ref = config["tool"]["flwr"]["app"]["components"]["clientapp"] + is_valid, reason = object_ref.validate(clientapp_ref, check_module, project_dir) if not is_valid and isinstance(reason, str): return False, [reason], [] return True, [], [] + + +def load_from_string(toml_content: str) -> Optional[dict[str, Any]]: + """Load TOML content from a string and return as dict.""" + try: + data = tomli.loads(toml_content) + return data + except tomli.TOMLDecodeError: + return None diff --git a/src/py/flwr/cli/config_utils_test.py b/src/py/flwr/cli/config_utils_test.py index b47206249dfc..ddabc152bc0f 100644 --- a/src/py/flwr/cli/config_utils_test.py +++ b/src/py/flwr/cli/config_utils_test.py @@ -17,7 +17,7 @@ import os import textwrap from pathlib import Path -from typing import Any, Dict +from typing import Any from .config_utils import load, validate, validate_fields @@ -34,27 +34,18 @@ def test_load_pyproject_toml_load_from_cwd(tmp_path: Path) -> None: name = "fedgpt" version = "1.0.0" description = "" - authors = [ - { name = "The Flower Authors", email = "hello@flower.ai" }, - ] license = {text = "Apache License (2.0)"} dependencies = [ - "flwr[simulation]>=1.8.0,<2.0", + "flwr[simulation]>=1.9.0,<2.0", "numpy>=1.21.0", ] - [flower] + [tool.flwr.app] publisher = "flwrlabs" - [flower.components] + [tool.flwr.app.components] serverapp = "fedgpt.server:app" clientapp = "fedgpt.client:app" - - [flower.engine] - name = "simulation" # optional - - [flower.engine.simulation.supernode] - count = 10 # optional """ expected_config = { "build-system": {"build-backend": "hatchling.build", "requires": ["hatchling"]}, @@ -62,19 +53,18 @@ def test_load_pyproject_toml_load_from_cwd(tmp_path: Path) -> None: "name": "fedgpt", "version": "1.0.0", "description": "", - "authors": [{"email": "hello@flower.ai", "name": "The Flower Authors"}], "license": {"text": "Apache License (2.0)"}, - "dependencies": ["flwr[simulation]>=1.8.0,<2.0", "numpy>=1.21.0"], + "dependencies": ["flwr[simulation]>=1.9.0,<2.0", "numpy>=1.21.0"], }, - "flower": { - "publisher": "flwrlabs", - "components": { - "serverapp": "fedgpt.server:app", - "clientapp": "fedgpt.client:app", - }, - "engine": { - "name": "simulation", - "simulation": {"supernode": {"count": 10}}, + "tool": { + "flwr": { + "app": { + "publisher": "flwrlabs", + "components": { + "serverapp": "fedgpt.server:app", + "clientapp": "fedgpt.client:app", + }, + }, }, }, } @@ -89,7 +79,7 @@ def test_load_pyproject_toml_load_from_cwd(tmp_path: Path) -> None: f.write(textwrap.dedent(pyproject_toml_content)) # Execute - config = load() + config = load(toml_path=Path.cwd() / "pyproject.toml") # Assert assert config == expected_config @@ -109,27 +99,18 @@ def test_load_pyproject_toml_from_path(tmp_path: Path) -> None: name = "fedgpt" version = "1.0.0" description = "" - authors = [ - { name = "The Flower Authors", email = "hello@flower.ai" }, - ] license = {text = "Apache License (2.0)"} dependencies = [ - "flwr[simulation]>=1.8.0,<2.0", + "flwr[simulation]>=1.9.0,<2.0", "numpy>=1.21.0", ] - [flower] + [tool.flwr.app] publisher = "flwrlabs" - [flower.components] + [tool.flwr.app.components] serverapp = "fedgpt.server:app" clientapp = "fedgpt.client:app" - - [flower.engine] - name = "simulation" # optional - - [flower.engine.simulation.supernode] - count = 10 # optional """ expected_config = { "build-system": {"build-backend": "hatchling.build", "requires": ["hatchling"]}, @@ -137,25 +118,24 @@ def test_load_pyproject_toml_from_path(tmp_path: Path) -> None: "name": "fedgpt", "version": "1.0.0", "description": "", - "authors": [{"email": "hello@flower.ai", "name": "The Flower Authors"}], "license": {"text": "Apache License (2.0)"}, - "dependencies": ["flwr[simulation]>=1.8.0,<2.0", "numpy>=1.21.0"], + "dependencies": ["flwr[simulation]>=1.9.0,<2.0", "numpy>=1.21.0"], }, - "flower": { - "publisher": "flwrlabs", - "components": { - "serverapp": "fedgpt.server:app", - "clientapp": "fedgpt.client:app", - }, - "engine": { - "name": "simulation", - "simulation": {"supernode": {"count": 10}}, + "tool": { + "flwr": { + "app": { + "publisher": "flwrlabs", + "components": { + "serverapp": "fedgpt.server:app", + "clientapp": "fedgpt.client:app", + }, + }, }, }, } # Current directory - origin = os.getcwd() + origin = Path.cwd() try: # Change into the temporary directory @@ -164,7 +144,7 @@ def test_load_pyproject_toml_from_path(tmp_path: Path) -> None: f.write(textwrap.dedent(pyproject_toml_content)) # Execute - config = load(path=tmp_path / "pyproject.toml") + config = load(toml_path=tmp_path / "pyproject.toml") # Assert assert config == expected_config @@ -175,7 +155,7 @@ def test_load_pyproject_toml_from_path(tmp_path: Path) -> None: def test_validate_pyproject_toml_fields_empty() -> None: """Test that validate_pyproject_toml_fields fails correctly.""" # Prepare - config: Dict[str, Any] = {} + config: dict[str, Any] = {} # Execute is_valid, errors, warnings = validate_fields(config) @@ -219,7 +199,7 @@ def test_validate_pyproject_toml_fields_no_flower_components() -> None: "license": "", "authors": [], }, - "flower": {}, + "tool": {"flwr": {"app": {}}}, } # Execute @@ -242,7 +222,7 @@ def test_validate_pyproject_toml_fields_no_server_and_client_app() -> None: "license": "", "authors": [], }, - "flower": {"components": {}}, + "tool": {"flwr": {"app": {"components": {}}}}, } # Execute @@ -265,9 +245,13 @@ def test_validate_pyproject_toml_fields() -> None: "license": "", "authors": [], }, - "flower": { - "publisher": "flwrlabs", - "components": {"serverapp": "", "clientapp": ""}, + "tool": { + "flwr": { + "app": { + "publisher": "flwrlabs", + "components": {"serverapp": "", "clientapp": ""}, + }, + }, }, } @@ -291,11 +275,15 @@ def test_validate_pyproject_toml() -> None: "license": "", "authors": [], }, - "flower": { - "publisher": "flwrlabs", - "components": { - "serverapp": "flwr.cli.run:run", - "clientapp": "flwr.cli.run:run", + "tool": { + "flwr": { + "app": { + "publisher": "flwrlabs", + "components": { + "serverapp": "flwr.cli.run:run", + "clientapp": "flwr.cli.run:run", + }, + }, }, }, } @@ -320,11 +308,15 @@ def test_validate_pyproject_toml_fail() -> None: "license": "", "authors": [], }, - "flower": { - "publisher": "flwrlabs", - "components": { - "serverapp": "flwr.cli.run:run", - "clientapp": "flwr.cli.run:runa", + "tool": { + "flwr": { + "app": { + "publisher": "flwrlabs", + "components": { + "serverapp": "flwr.cli.run:run", + "clientapp": "flwr.cli.run:runa", + }, + }, }, }, } diff --git a/src/py/flwr/cli/install.py b/src/py/flwr/cli/install.py new file mode 100644 index 000000000000..7451aa3d2326 --- /dev/null +++ b/src/py/flwr/cli/install.py @@ -0,0 +1,268 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flower command line interface `install` command.""" + +import hashlib +import shutil +import subprocess +import tempfile +import zipfile +from io import BytesIO +from pathlib import Path +from typing import IO, Annotated, Optional, Union + +import typer + +from flwr.common.config import get_flwr_dir, get_metadata_from_config +from flwr.common.constant import FAB_HASH_TRUNCATION + +from .config_utils import load_and_validate +from .utils import get_sha256_hash + + +def install( + source: Annotated[ + Optional[Path], + typer.Argument(metavar="source", help="The source FAB file to install."), + ] = None, + flwr_dir: Annotated[ + Optional[Path], + typer.Option(help="The desired install path."), + ] = None, +) -> None: + """Install a Flower App Bundle. + + It can be ran with a single FAB file argument: + + ``flwr install ./target_project.fab`` + + The target install directory can be specified with ``--flwr-dir``: + + ``flwr install ./target_project.fab --flwr-dir ./docs/flwr`` + + This will install ``target_project`` to ``./docs/flwr/``. By default, + ``flwr-dir`` is equal to: + + - ``$FLWR_HOME/`` if ``$FLWR_HOME`` is defined + - ``$XDG_DATA_HOME/.flwr/`` if ``$XDG_DATA_HOME`` is defined + - ``$HOME/.flwr/`` in all other cases + """ + if source is None: + source = Path(typer.prompt("Enter the source FAB file")) + + source = source.resolve() + if not source.exists() or not source.is_file(): + typer.secho( + f"❌ The source {source} does not exist or is not a file.", + fg=typer.colors.RED, + bold=True, + ) + raise typer.Exit(code=1) + + if source.suffix != ".fab": + typer.secho( + f"❌ The source {source} is not a `.fab` file.", + fg=typer.colors.RED, + bold=True, + ) + raise typer.Exit(code=1) + + install_from_fab(source, flwr_dir) + + +def install_from_fab( + fab_file: Union[Path, bytes], + flwr_dir: Optional[Path], + skip_prompt: bool = False, +) -> Path: + """Install from a FAB file after extracting and validating.""" + fab_file_archive: Union[Path, IO[bytes]] + fab_name: Optional[str] + if isinstance(fab_file, bytes): + fab_file_archive = BytesIO(fab_file) + fab_hash = hashlib.sha256(fab_file).hexdigest() + fab_name = None + elif isinstance(fab_file, Path): + fab_file_archive = fab_file + fab_hash = hashlib.sha256(fab_file.read_bytes()).hexdigest() + fab_name = fab_file.stem + else: + raise ValueError("fab_file must be either a Path or bytes") + + with tempfile.TemporaryDirectory() as tmpdir: + with zipfile.ZipFile(fab_file_archive, "r") as zipf: + zipf.extractall(tmpdir) + tmpdir_path = Path(tmpdir) + info_dir = tmpdir_path / ".info" + if not info_dir.exists(): + typer.secho( + "❌ FAB file has incorrect format.", + fg=typer.colors.RED, + bold=True, + ) + raise typer.Exit(code=1) + + content_file = info_dir / "CONTENT" + + if not content_file.exists() or not _verify_hashes( + content_file.read_text(), tmpdir_path + ): + typer.secho( + "❌ File hashes couldn't be verified.", + fg=typer.colors.RED, + bold=True, + ) + raise typer.Exit(code=1) + + shutil.rmtree(info_dir) + + installed_path = validate_and_install( + tmpdir_path, fab_hash, fab_name, flwr_dir, skip_prompt + ) + + return installed_path + + +# pylint: disable=too-many-locals +def validate_and_install( + project_dir: Path, + fab_hash: str, + fab_name: Optional[str], + flwr_dir: Optional[Path], + skip_prompt: bool = False, +) -> Path: + """Validate TOML files and install the project to the desired directory.""" + config, _, _ = load_and_validate(project_dir / "pyproject.toml", check_module=False) + + if config is None: + typer.secho( + "❌ Invalid config inside FAB file.", + fg=typer.colors.RED, + bold=True, + ) + raise typer.Exit(code=1) + + version, fab_id = get_metadata_from_config(config) + publisher, project_name = fab_id.split("/") + config_metadata = (publisher, project_name, version, fab_hash) + + if fab_name: + _validate_fab_and_config_metadata(fab_name, config_metadata) + + install_dir: Path = ( + (get_flwr_dir() if not flwr_dir else flwr_dir) + / "apps" + / f"{publisher}.{project_name}.{version}.{fab_hash[:FAB_HASH_TRUNCATION]}" + ) + if install_dir.exists(): + if skip_prompt: + return install_dir + if not typer.confirm( + typer.style( + f"\n💬 {project_name} version {version} is already installed, " + "do you want to reinstall it?", + fg=typer.colors.MAGENTA, + bold=True, + ) + ): + return install_dir + + install_dir.mkdir(parents=True, exist_ok=True) + + # Move contents from source directory + for item in project_dir.iterdir(): + if item.is_dir(): + shutil.copytree(item, install_dir / item.name, dirs_exist_ok=True) + else: + shutil.copy2(item, install_dir / item.name) + + try: + subprocess.run( + ["pip", "install", "-e", install_dir, "--no-deps"], + capture_output=True, + text=True, + check=True, + ) + except subprocess.CalledProcessError as e: + typer.secho( + f"❌ Failed to `pip install` package(s) from {install_dir}:\n{e.stderr}", + fg=typer.colors.RED, + bold=True, + ) + raise typer.Exit(code=1) from e + + typer.secho( + f"🎊 Successfully installed {project_name} to {install_dir}.", + fg=typer.colors.GREEN, + bold=True, + ) + + return install_dir + + +def _verify_hashes(list_content: str, tmpdir: Path) -> bool: + """Verify file hashes based on the LIST content.""" + for line in list_content.strip().split("\n"): + rel_path, hash_expected, _ = line.split(",") + file_path = tmpdir / rel_path + if not file_path.exists() or get_sha256_hash(file_path) != hash_expected: + return False + return True + + +def _validate_fab_and_config_metadata( + fab_name: str, config_metadata: tuple[str, str, str, str] +) -> None: + """Validate metadata from the FAB filename and config.""" + publisher, project_name, version, fab_hash = config_metadata + + fab_name = fab_name.removesuffix(".fab") + + fab_publisher, fab_project_name, fab_version, fab_shorthash = fab_name.split(".") + fab_version = fab_version.replace("-", ".") + + # Check FAB filename format + if ( + f"{fab_publisher}.{fab_project_name}.{fab_version}" + != f"{publisher}.{project_name}.{version}" + or len(fab_shorthash) != FAB_HASH_TRUNCATION # Verify hash length + ): + typer.secho( + "❌ FAB file has incorrect name. The file name must follow the format " + "`...<8hexchars>.fab`.", + fg=typer.colors.RED, + bold=True, + ) + raise typer.Exit(code=1) + + # Verify hash is a valid hexadecimal + try: + _ = int(fab_shorthash, 16) + except Exception as e: + typer.secho( + f"❌ FAB file has an invalid hexadecimal string `{fab_shorthash}`.", + fg=typer.colors.RED, + bold=True, + ) + raise typer.Exit(code=1) from e + + # Verify shorthash matches + if fab_shorthash != fab_hash[:FAB_HASH_TRUNCATION]: + typer.secho( + "❌ The hash in the FAB file name does not match the hash of the FAB.", + fg=typer.colors.RED, + bold=True, + ) + raise typer.Exit(code=1) diff --git a/src/py/flwr/cli/log.py b/src/py/flwr/cli/log.py new file mode 100644 index 000000000000..7199cefce4f7 --- /dev/null +++ b/src/py/flwr/cli/log.py @@ -0,0 +1,234 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flower command line interface `log` command.""" + +import sys +import time +from logging import DEBUG, ERROR, INFO +from pathlib import Path +from typing import Annotated, Optional + +import grpc +import typer + +from flwr.cli.config_utils import load_and_validate +from flwr.common.grpc import GRPC_MAX_MESSAGE_LENGTH, create_channel +from flwr.common.logger import log as logger +from flwr.proto.exec_pb2 import StreamLogsRequest # pylint: disable=E0611 +from flwr.proto.exec_pb2_grpc import ExecStub + +CONN_REFRESH_PERIOD = 60 # Connection refresh period for log streaming (seconds) + + +def start_stream( + run_id: int, channel: grpc.Channel, refresh_period: int = CONN_REFRESH_PERIOD +) -> None: + """Start log streaming for a given run ID.""" + try: + while True: + logger(INFO, "Starting logstream for run_id `%s`", run_id) + stream_logs(run_id, channel, refresh_period) + time.sleep(2) + logger(DEBUG, "Reconnecting to logstream") + except KeyboardInterrupt: + logger(INFO, "Exiting logstream") + except grpc.RpcError as e: + # pylint: disable=E1101 + if e.code() == grpc.StatusCode.NOT_FOUND: + logger(ERROR, "Invalid run_id `%s`, exiting", run_id) + if e.code() == grpc.StatusCode.CANCELLED: + pass + finally: + channel.close() + + +def stream_logs(run_id: int, channel: grpc.Channel, duration: int) -> None: + """Stream logs from the beginning of a run with connection refresh.""" + start_time = time.time() + stub = ExecStub(channel) + req = StreamLogsRequest(run_id=run_id) + + for res in stub.StreamLogs(req): + print(res.log_output) + if time.time() - start_time > duration: + break + + +def print_logs(run_id: int, channel: grpc.Channel, timeout: int) -> None: + """Print logs from the beginning of a run.""" + stub = ExecStub(channel) + req = StreamLogsRequest(run_id=run_id) + + try: + while True: + try: + # Enforce timeout for graceful exit + for res in stub.StreamLogs(req, timeout=timeout): + print(res.log_output) + except grpc.RpcError as e: + # pylint: disable=E1101 + if e.code() == grpc.StatusCode.DEADLINE_EXCEEDED: + break + if e.code() == grpc.StatusCode.NOT_FOUND: + logger(ERROR, "Invalid run_id `%s`, exiting", run_id) + break + if e.code() == grpc.StatusCode.CANCELLED: + break + except KeyboardInterrupt: + logger(DEBUG, "Stream interrupted by user") + finally: + channel.close() + logger(DEBUG, "Channel closed") + + +def on_channel_state_change(channel_connectivity: str) -> None: + """Log channel connectivity.""" + logger(DEBUG, channel_connectivity) + + +def log( + run_id: Annotated[ + int, + typer.Argument(help="The Flower run ID to query"), + ], + app: Annotated[ + Path, + typer.Argument(help="Path of the Flower project to run"), + ] = Path("."), + federation: Annotated[ + Optional[str], + typer.Argument(help="Name of the federation to run the app on"), + ] = None, + stream: Annotated[ + bool, + typer.Option( + "--stream/--show", + help="Flag to stream or print logs from the Flower run", + ), + ] = True, +) -> None: + """Get logs from a Flower project run.""" + typer.secho("Loading project configuration... ", fg=typer.colors.BLUE) + + pyproject_path = app / "pyproject.toml" if app else None + config, errors, warnings = load_and_validate(path=pyproject_path) + + if config is None: + typer.secho( + "Project configuration could not be loaded.\n" + "pyproject.toml is invalid:\n" + + "\n".join([f"- {line}" for line in errors]), + fg=typer.colors.RED, + bold=True, + ) + sys.exit() + + if warnings: + typer.secho( + "Project configuration is missing the following " + "recommended properties:\n" + "\n".join([f"- {line}" for line in warnings]), + fg=typer.colors.RED, + bold=True, + ) + + typer.secho("Success", fg=typer.colors.GREEN) + + federation = federation or config["tool"]["flwr"]["federations"].get("default") + + if federation is None: + typer.secho( + "❌ No federation name was provided and the project's `pyproject.toml` " + "doesn't declare a default federation (with a SuperExec address or an " + "`options.num-supernodes` value).", + fg=typer.colors.RED, + bold=True, + ) + raise typer.Exit(code=1) + + # Validate the federation exists in the configuration + federation_config = config["tool"]["flwr"]["federations"].get(federation) + if federation_config is None: + available_feds = { + fed for fed in config["tool"]["flwr"]["federations"] if fed != "default" + } + typer.secho( + f"❌ There is no `{federation}` federation declared in the " + "`pyproject.toml`.\n The following federations were found:\n\n" + + "\n".join(available_feds), + fg=typer.colors.RED, + bold=True, + ) + raise typer.Exit(code=1) + + if "address" not in federation_config: + typer.secho( + "❌ `flwr log` currently works with `SuperExec`. Ensure that the correct" + "`SuperExec` address is provided in the `pyproject.toml`.", + fg=typer.colors.RED, + bold=True, + ) + raise typer.Exit(code=1) + + _log_with_superexec(federation_config, run_id, stream) + + +# pylint: disable-next=too-many-branches +def _log_with_superexec( + federation_config: dict[str, str], + run_id: int, + stream: bool, +) -> None: + insecure_str = federation_config.get("insecure") + if root_certificates := federation_config.get("root-certificates"): + root_certificates_bytes = Path(root_certificates).read_bytes() + if insecure := bool(insecure_str): + typer.secho( + "❌ `root_certificates` were provided but the `insecure` parameter" + "is set to `True`.", + fg=typer.colors.RED, + bold=True, + ) + raise typer.Exit(code=1) + else: + root_certificates_bytes = None + if insecure_str is None: + typer.secho( + "❌ To disable TLS, set `insecure = true` in `pyproject.toml`.", + fg=typer.colors.RED, + bold=True, + ) + raise typer.Exit(code=1) + if not (insecure := bool(insecure_str)): + typer.secho( + "❌ No certificate were given yet `insecure` is set to `False`.", + fg=typer.colors.RED, + bold=True, + ) + raise typer.Exit(code=1) + + channel = create_channel( + server_address=federation_config["address"], + insecure=insecure, + root_certificates=root_certificates_bytes, + max_message_length=GRPC_MAX_MESSAGE_LENGTH, + interceptors=None, + ) + channel.subscribe(on_channel_state_change) + + if stream: + start_stream(run_id, channel, CONN_REFRESH_PERIOD) + else: + logger(INFO, "Printing logstream for run_id `%s`", run_id) + print_logs(run_id, channel, timeout=5) diff --git a/src/py/flwr/cli/log_test.py b/src/py/flwr/cli/log_test.py new file mode 100644 index 000000000000..932610bea2f3 --- /dev/null +++ b/src/py/flwr/cli/log_test.py @@ -0,0 +1,78 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Test for Flower command line interface `log` command.""" + + +import unittest +from typing import NoReturn +from unittest.mock import Mock, call, patch + +from flwr.proto.exec_pb2 import StreamLogsResponse # pylint: disable=E0611 + +from .log import print_logs, stream_logs + + +class InterruptedStreamLogsResponse: + """Create a StreamLogsResponse object with KeyboardInterrupt.""" + + @property + def log_output(self) -> NoReturn: + """Raise KeyboardInterrupt to exit logstream test gracefully.""" + raise KeyboardInterrupt + + +class TestFlwrLog(unittest.TestCase): + """Unit tests for `flwr log` CLI functions.""" + + def setUp(self) -> None: + """Initialize mock ExecStub before each test.""" + self.expected_calls = [ + call("log_output_1"), + call("log_output_2"), + call("log_output_3"), + ] + mock_response_iterator = [ + iter( + [StreamLogsResponse(log_output=f"log_output_{i}") for i in range(1, 4)] + + [InterruptedStreamLogsResponse()] + ) + ] + self.mock_stub = Mock() + self.mock_stub.StreamLogs.side_effect = mock_response_iterator + self.patcher = patch("flwr.cli.log.ExecStub", return_value=self.mock_stub) + + self.patcher.start() + + # Create mock channel + self.mock_channel = Mock() + + def tearDown(self) -> None: + """Cleanup.""" + self.patcher.stop() + + def test_flwr_log_stream_method(self) -> None: + """Test stream_logs.""" + with patch("builtins.print") as mock_print: + with self.assertRaises(KeyboardInterrupt): + stream_logs(run_id=123, channel=self.mock_channel, duration=1) + # Assert that mock print was called with the expected arguments + mock_print.assert_has_calls(self.expected_calls) + + def test_flwr_log_print_method(self) -> None: + """Test print_logs.""" + with patch("builtins.print") as mock_print: + print_logs(run_id=123, channel=self.mock_channel, timeout=0) + # Assert that mock print was called with the expected arguments + mock_print.assert_has_calls(self.expected_calls) diff --git a/src/py/flwr/cli/new/new.py b/src/py/flwr/cli/new/new.py index 9bbc016de1a8..3cbde991ff6e 100644 --- a/src/py/flwr/cli/new/new.py +++ b/src/py/flwr/cli/new/new.py @@ -14,14 +14,13 @@ # ============================================================================== """Flower command line interface `new` command.""" -import os import re from enum import Enum +from pathlib import Path from string import Template -from typing import Dict, Optional +from typing import Annotated, Optional import typer -from typing_extensions import Annotated from ..utils import ( is_valid_project_name, @@ -34,13 +33,24 @@ class MlFramework(str, Enum): """Available frameworks.""" - NUMPY = "NumPy" PYTORCH = "PyTorch" TENSORFLOW = "TensorFlow" + SKLEARN = "sklearn" + HUGGINGFACE = "HuggingFace" JAX = "JAX" - HUGGINGFACE = "HF" MLX = "MLX" - SKLEARN = "sklearn" + NUMPY = "NumPy" + FLOWERTUNE = "FlowerTune" + BASELINE = "Flower Baseline" + + +class LlmChallengeName(str, Enum): + """Available LLM challenges.""" + + GENERALNLP = "GeneralNLP" + FINANCE = "Finance" + MEDICAL = "Medical" + CODE = "Code" class TemplateNotFound(Exception): @@ -49,17 +59,17 @@ class TemplateNotFound(Exception): def load_template(name: str) -> str: """Load template from template directory and return as text.""" - tpl_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "templates")) - tpl_file_path = os.path.join(tpl_dir, name) + tpl_dir = (Path(__file__).parent / "templates").absolute() + tpl_file_path = tpl_dir / name - if not os.path.isfile(tpl_file_path): + if not tpl_file_path.is_file(): raise TemplateNotFound(f"Template '{name}' not found") with open(tpl_file_path, encoding="utf-8") as tpl_file: return tpl_file.read() -def render_template(template: str, data: Dict[str, str]) -> str: +def render_template(template: str, data: dict[str, str]) -> str: """Render template.""" tpl_file = load_template(template) tpl = Template(tpl_file) @@ -68,23 +78,23 @@ def render_template(template: str, data: Dict[str, str]) -> str: return tpl.template -def create_file(file_path: str, content: str) -> None: +def create_file(file_path: Path, content: str) -> None: """Create file including all nessecary directories and write content into file.""" - os.makedirs(os.path.dirname(file_path), exist_ok=True) - with open(file_path, "w", encoding="utf-8") as f: - f.write(content) + file_path.parent.mkdir(exist_ok=True) + file_path.write_text(content) -def render_and_create(file_path: str, template: str, context: Dict[str, str]) -> None: +def render_and_create(file_path: Path, template: str, context: dict[str, str]) -> None: """Render template and write to file.""" content = render_template(template, context) create_file(file_path, content) +# pylint: disable=too-many-locals,too-many-branches,too-many-statements def new( - project_name: Annotated[ + app_name: Annotated[ Optional[str], - typer.Argument(metavar="project_name", help="The name of the project"), + typer.Argument(help="The name of the Flower App"), ] = None, framework: Annotated[ Optional[MlFramework], @@ -95,102 +105,182 @@ def new( typer.Option(case_sensitive=False, help="The Flower username of the author"), ] = None, ) -> None: - """Create new Flower project.""" - if project_name is None: - project_name = prompt_text("Please provide the project name") - if not is_valid_project_name(project_name): - project_name = prompt_text( + """Create new Flower App.""" + if app_name is None: + app_name = prompt_text("Please provide the app name") + if not is_valid_project_name(app_name): + app_name = prompt_text( "Please provide a name that only contains " "characters in {'-', a-zA-Z', '0-9'}", predicate=is_valid_project_name, - default=sanitize_project_name(project_name), + default=sanitize_project_name(app_name), ) + # Set project directory path + package_name = re.sub(r"[-_.]+", "-", app_name).lower() + import_name = package_name.replace("-", "_") + project_dir = Path.cwd() / package_name + + if project_dir.exists(): + if not typer.confirm( + typer.style( + f"\n💬 {app_name} already exists, do you want to override it?", + fg=typer.colors.MAGENTA, + bold=True, + ) + ): + return + if username is None: username = prompt_text("Please provide your Flower username") if framework is not None: framework_str = str(framework.value) else: - framework_value = prompt_options( + framework_str = prompt_options( "Please select ML framework by typing in the number", - sorted([mlf.value for mlf in MlFramework]), + [mlf.value for mlf in MlFramework], ) - selected_value = [ - name - for name, value in vars(MlFramework).items() - if value == framework_value - ] - framework_str = selected_value[0] - framework_str = framework_str.lower() + llm_challenge_str = None + if framework_str == MlFramework.FLOWERTUNE: + llm_challenge_value = prompt_options( + "Please select LLM challenge by typing in the number", + sorted([challenge.value for challenge in LlmChallengeName]), + ) + llm_challenge_str = llm_challenge_value.lower() + + if framework_str == MlFramework.BASELINE: + framework_str = "baseline" print( typer.style( - f"\n🔨 Creating Flower project {project_name}...", + f"\n🔨 Creating Flower App {app_name}...", fg=typer.colors.GREEN, bold=True, ) ) - # Set project directory path - cwd = os.getcwd() - package_name = re.sub(r"[-_.]+", "-", project_name).lower() - import_name = package_name.replace("-", "_") - project_dir = os.path.join(cwd, package_name) + context = { + "framework_str": framework_str, + "import_name": import_name.replace("-", "_"), + "package_name": package_name, + "project_name": app_name, + "username": username, + } + + template_name = framework_str.lower() # List of files to render - files = { - ".gitignore": {"template": "app/.gitignore.tpl"}, - "README.md": {"template": "app/README.md.tpl"}, - "pyproject.toml": {"template": f"app/pyproject.{framework_str}.toml.tpl"}, - f"{import_name}/__init__.py": {"template": "app/code/__init__.py.tpl"}, - f"{import_name}/server.py": { - "template": f"app/code/server.{framework_str}.py.tpl" - }, - f"{import_name}/client.py": { - "template": f"app/code/client.{framework_str}.py.tpl" - }, - } + if llm_challenge_str: + files = { + ".gitignore": {"template": "app/.gitignore.tpl"}, + "pyproject.toml": {"template": f"app/pyproject.{template_name}.toml.tpl"}, + "README.md": {"template": f"app/README.{template_name}.md.tpl"}, + f"{import_name}/__init__.py": {"template": "app/code/__init__.py.tpl"}, + f"{import_name}/server_app.py": { + "template": "app/code/flwr_tune/server_app.py.tpl" + }, + f"{import_name}/client_app.py": { + "template": "app/code/flwr_tune/client_app.py.tpl" + }, + f"{import_name}/models.py": { + "template": "app/code/flwr_tune/models.py.tpl" + }, + f"{import_name}/dataset.py": { + "template": "app/code/flwr_tune/dataset.py.tpl" + }, + f"{import_name}/strategy.py": { + "template": "app/code/flwr_tune/strategy.py.tpl" + }, + } + + # Challenge specific context + fraction_fit = "0.2" if llm_challenge_str == "code" else "0.1" + if llm_challenge_str == "generalnlp": + challenge_name = "General NLP" + num_clients = "20" + dataset_name = "vicgalle/alpaca-gpt4" + elif llm_challenge_str == "finance": + challenge_name = "Finance" + num_clients = "50" + dataset_name = "FinGPT/fingpt-sentiment-train" + elif llm_challenge_str == "medical": + challenge_name = "Medical" + num_clients = "20" + dataset_name = "medalpaca/medical_meadow_medical_flashcards" + else: + challenge_name = "Code" + num_clients = "10" + dataset_name = "lucasmccabe-lmi/CodeAlpaca-20k" - # Depending on the framework, generate task.py file - frameworks_with_tasks = [ - MlFramework.PYTORCH.value.lower(), - MlFramework.JAX.value.lower(), - MlFramework.HUGGINGFACE.value.lower(), - MlFramework.MLX.value.lower(), - MlFramework.TENSORFLOW.value.lower(), - ] - if framework_str in frameworks_with_tasks: - files[f"{import_name}/task.py"] = { - "template": f"app/code/task.{framework_str}.py.tpl" + context["llm_challenge_str"] = llm_challenge_str + context["fraction_fit"] = fraction_fit + context["challenge_name"] = challenge_name + context["num_clients"] = num_clients + context["dataset_name"] = dataset_name + else: + files = { + ".gitignore": {"template": "app/.gitignore.tpl"}, + "README.md": {"template": "app/README.md.tpl"}, + "pyproject.toml": {"template": f"app/pyproject.{template_name}.toml.tpl"}, + f"{import_name}/__init__.py": {"template": "app/code/__init__.py.tpl"}, + f"{import_name}/server_app.py": { + "template": f"app/code/server.{template_name}.py.tpl" + }, + f"{import_name}/client_app.py": { + "template": f"app/code/client.{template_name}.py.tpl" + }, } - context = { - "project_name": project_name, - "package_name": package_name, - "import_name": import_name.replace("-", "_"), - "username": username, - } + # Depending on the framework, generate task.py file + frameworks_with_tasks = [ + MlFramework.PYTORCH.value, + MlFramework.JAX.value, + MlFramework.HUGGINGFACE.value, + MlFramework.MLX.value, + MlFramework.TENSORFLOW.value, + MlFramework.SKLEARN.value, + MlFramework.NUMPY.value, + ] + if framework_str in frameworks_with_tasks: + files[f"{import_name}/task.py"] = { + "template": f"app/code/task.{template_name}.py.tpl" + } + + if framework_str == "baseline": + # Include additional files for baseline template + for file_name in ["model", "dataset", "strategy", "utils", "__init__"]: + files[f"{import_name}/{file_name}.py"] = { + "template": f"app/code/{file_name}.{template_name}.py.tpl" + } + + # Replace README.md + files["README.md"]["template"] = f"app/README.{template_name}.md.tpl" + + # Add LICENSE + files["LICENSE"] = {"template": "app/LICENSE.tpl"} for file_path, value in files.items(): render_and_create( - file_path=os.path.join(project_dir, file_path), + file_path=project_dir / file_path, template=value["template"], context=context, ) print( typer.style( - "🎊 Project creation successful.\n\n" - "Use the following command to run your project:\n", + "🎊 Flower App creation successful.\n\n" + "Use the following command to run your Flower App:\n", fg=typer.colors.GREEN, bold=True, ) ) + + _add = " huggingface-cli login\n" if llm_challenge_str else "" print( typer.style( - f" cd {project_name}\n" + " pip install -e .\n flwr run\n", + f" cd {package_name}\n" + " pip install -e .\n" + _add + " flwr run\n", fg=typer.colors.BRIGHT_CYAN, bold=True, ) diff --git a/src/py/flwr/cli/new/new_test.py b/src/py/flwr/cli/new/new_test.py index 33ad745efa93..8ebfb115e1f8 100644 --- a/src/py/flwr/cli/new/new_test.py +++ b/src/py/flwr/cli/new/new_test.py @@ -15,6 +15,7 @@ """Test for Flower command line interface `new` command.""" import os +from pathlib import Path import pytest @@ -38,6 +39,7 @@ def test_render_template() -> None: # Prepare filename = "app/README.md.tpl" data = { + "framework_str": "", "project_name": "FedGPT", "package_name": "fedgpt", "import_name": "fedgpt", @@ -54,7 +56,7 @@ def test_render_template() -> None: def test_create_file(tmp_path: str) -> None: """Test if file with content is created.""" # Prepare - file_path = os.path.join(tmp_path, "test.txt") + file_path = Path(tmp_path) / "test.txt" content = "Foobar" # Execute @@ -86,28 +88,30 @@ def test_new_correct_name(tmp_path: str) -> None: } expected_files_module = { "__init__.py", - "server.py", - "client.py", + "server_app.py", + "client_app.py", "task.py", } # Current directory - origin = os.getcwd() + origin = Path.cwd() try: # Change into the temprorary directory os.chdir(tmp_path) # Execute - new(project_name=project_name, framework=framework, username=username) + new(app_name=project_name, framework=framework, username=username) # Assert - file_list = os.listdir(os.path.join(tmp_path, expected_top_level_dir)) - assert set(file_list) == expected_files_top_level - - file_list = os.listdir( - os.path.join(tmp_path, expected_top_level_dir, expected_module_dir) - ) - assert set(file_list) == expected_files_module + file_list = (Path(tmp_path) / expected_top_level_dir).iterdir() + assert { + file_path.name for file_path in file_list + } == expected_files_top_level + + file_list = ( + Path(tmp_path) / expected_top_level_dir / expected_module_dir + ).iterdir() + assert {file_path.name for file_path in file_list} == expected_files_module finally: os.chdir(origin) @@ -119,7 +123,7 @@ def test_new_incorrect_name(tmp_path: str) -> None: for project_name in ["My_Flower_App", "My.Flower App"]: # Current directory - origin = os.getcwd() + origin = Path.cwd() try: # Change into the temprorary directory @@ -129,7 +133,7 @@ def test_new_incorrect_name(tmp_path: str) -> None: # Execute new( - project_name=project_name, + app_name=project_name, framework=framework, username=username, ) diff --git a/src/py/flwr/cli/new/templates/app/LICENSE.tpl b/src/py/flwr/cli/new/templates/app/LICENSE.tpl new file mode 100644 index 000000000000..7a4a3ea2424c --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/LICENSE.tpl @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/src/py/flwr/cli/new/templates/app/README.baseline.md.tpl b/src/py/flwr/cli/new/templates/app/README.baseline.md.tpl new file mode 100644 index 000000000000..9bbbe8f22794 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/README.baseline.md.tpl @@ -0,0 +1,127 @@ +--- +title: title of the paper # TODO +url: https://arxiv.org/abs/2007.14390 # TODO: update with the link to your paper +labels: [label1, label2] # TODO: please add between 4 and 10 single-word (maybe two-words) labels (e.g. system heterogeneity, image classification, asynchronous, weight sharing, cross-silo). Do not use "". Remove this comment once you are done. +dataset: [dataset1, dataset2] # TODO: list of datasets you include in your baseline. Do not use "". Remove this comment once you are done. +--- + +> [!IMPORTANT] +> This is the template for your `README.md`. Please fill-in the information in all areas with a :warning: symbol. +> Please refer to the [Flower Baselines contribution](https://flower.ai/docs/baselines/how-to-contribute-baselines.html) and [Flower Baselines usage](https://flower.ai/docs/baselines/how-to-use-baselines.html) guides for more details. +> Please complete the metadata section at the very top of this README. This generates a table at the top of the file that will facilitate indexing baselines. +> Please remove this [!IMPORTANT] block once you are done with your `README.md` as well as all the `:warning:` symbols and the comments next to them. + +> [!IMPORTANT] +> To help having all baselines similarly formatted and structured, we have included two scripts in `baselines/dev` that when run will format your code and run some tests checking if it's formatted. +> These checks use standard packages such as `isort`, `black`, `pylint` and others. You as a baseline creator will need to install additional pacakges. These are already specified in the `pyproject.toml` of +> your baseline. Follow these steps: + +```bash +# Create a python env +pyenv virtualenv 3.10.14 $project_name + +# Activate it +pyenv activate $project_name + +# Install project including developer packages +# Note the `-e` this means you install it in editable mode +# so even if you change the code you don't need to do `pip install` +# again. However, if you add a new dependency to `pyproject.toml` you +# will need to re-run the command below +pip install -e ".[dev]" + +# Even without modifying or adding new code, you can run your baseline +# with the placeholder code generated when you did `flwr new`. If you +# want to test this to familiarise yourself with how flower apps are +# executed, execute this from the directory where you `pyproject.toml` is: +flwr run . + +# At anypoint during the process of creating your baseline you can +# run the formatting script. For this do: +cd .. # so you are in the `flower/baselines` directory + +# Run the formatting script (it will auto-correct issues if possible) +./dev/format-baseline.sh $project_name + +# Then, if the above is all good, run the tests. +./dev/test-baseline.sh $project_name +``` + +> [!IMPORTANT] +> When you open a PR to get the baseline merged into the main Flower repository, the `./dev/test-baseline.sh` script will run. Only if test pass, the baseline can be merged. +> Some issues highlighted by the tests script are easier than others to fix. Do not hesitate in reaching out for help to us (e.g. as a comment in your PR) if you are stuck with these. +> Before opening your PR, please remove the code snippet above as well all the [!IMPORTANT] message blocks. Yes, including this one. + +# :warning: *_Title of your baseline_* # Also copy this title to the `description` in the `[project]` section of your `pyproject.toml`. + +> [!NOTE] +> If you use this baseline in your work, please remember to cite the original authors of the paper as well as the Flower paper. + +**Paper:** :warning: *_add the URL of the paper page (not to the .pdf). For instance if you link a paper on ArXiv, add here the URL to the abstract page (e.g. [paper](https://arxiv.org/abs/1512.03385)). If your paper is in from a journal or conference proceedings, please follow the same logic._* + +**Authors:** :warning: *_list authors of the paper_* + +**Abstract:** :warning: *_add here the abstract of the paper you are implementing_* + + +## About this baseline + +**What’s implemented:** :warning: *_Concisely describe what experiment(s) (e.g. Figure 1, Table 2, etc) in the publication can be replicated by running the code. Please only use a few sentences. ”_* + +**Datasets:** :warning: *_List the datasets you used (if you used a medium to large dataset, >10GB please also include the sizes of the dataset). We highly recommend using [FlowerDatasets](https://flower.ai/docs/datasets/index.html) to download and partition your dataset. If you have other ways to download the data, you can also use `FlowerDatasets` to partiion it._* + +**Hardware Setup:** :warning: *_Give some details about the hardware (e.g. a server with 8x V100 32GB and 256GB of RAM) you used to run the experiments for this baseline. Indicate how long it took to run the experiments. Someone out there might not have access to the same resources you have so, could you list the absolute minimum hardware needed to run the experiment in a reasonable amount of time ? (e.g. minimum is 1x 16GB GPU otherwise a client model can’t be trained with a sufficiently large batch size). Could you test this works too?_* + +**Contributors:** :warning: *_let the world know who contributed to this baseline. This could be either your name, your name and affiliation at the time, or your GitHub profile name if you prefer. If multiple contributors signed up for this baseline, please list yourself and your colleagues_* + + +## Experimental Setup + +**Task:** :warning: *_what’s the primary task that is being federated? (e.g. image classification, next-word prediction). If you have experiments for several, please list them_* + +**Model:** :warning: *_provide details about the model you used in your experiments (if more than use a list). If your model is small, describing it as a table would be :100:. Some FL methods do not use an off-the-shelve model (e.g. ResNet18) instead they create your own. If this is your case, please provide a summary here and give pointers to where in the paper (e.g. Appendix B.4) is detailed._* + +**Dataset:** :warning: *_Earlier you listed already the datasets that your baseline uses. Now you should include a breakdown of the details about each of them. Please include information about: how the dataset is partitioned (e.g. LDA with alpha 0.1 as default and all clients have the same number of training examples; or each client gets assigned a different number of samples following a power-law distribution with each client only instances of 2 classes)? if your dataset is naturally partitioned just state “naturally partitioned”; how many partitions there are (i.e. how many clients)? Please include this an all information relevant about the dataset and its partitioning into a table._* + +**Training Hyperparameters:** :warning: *_Include a table with all the main hyperparameters in your baseline. Please show them with their default value._* + + +## Environment Setup + +:warning: _Specify the steps to create and activate your environment and install the baseline project. Most baselines are expected to require minimal steps as shown below. These instructions should be comprehensive enough so anyone can run them (if non standard, describe them step-by-step)._ + +:warning: _The dependencies for your baseline are listed in the `pyproject.toml`, extend it with additional packages needed for your baseline._ + +:warning: _Baselines should use Python 3.10, [pyenv](https://github.com/pyenv/pyenv), and the [virtualenv](https://github.com/pyenv/pyenv-virtualenv) plugging. + +```bash +# Create the virtual environment +pyenv virtualenv 3.10.14 + +# Activate it +pyenv activate + +# Install the baseline +pip install -e . +``` + +:warning: _If your baseline requires running some script before starting an experiment, please indicate so here_. + +## Running the Experiments + +:warning: _Make sure you have adjusted the `client-resources` in the federation in `pyproject.toml` so your simulation makes the best use of the system resources available._ + +:warning: _Your baseline implementation should replicate several of the experiments in the original paper. Please include here the exact command(s) needed to run each of those experiments followed by a figure (e.g. a line plot) or table showing the results you obtained when you ran the code. Below is an example of how you can present this. Please add command followed by results for all your experiments._ + +:warning: _You might want to add more hyperparameters and settings for your baseline. You can do so by extending `[tool.flwr.app.config]` in `pyproject.toml`. In addition, you can create a new `.toml` file that can be passed with the `--run-config` command (see below an example) to override several config values **already present** in `pyproject.toml`._ +```bash +# it is likely that for one experiment you need to override some arguments. +flwr run . --run-config learning-rate=0.1,coefficient=0.123 + +# or you might want to load different `.toml` configs all together: +flwr run . --run-config .toml +``` + +:warning: _It is preferable to show a single commmand (or multilple commands if they belong to the same experiment) and then a table/plot with the expected results, instead of showing all the commands first and then all the results/plots._ +:warning: _If you present plots or other figures, please include either a Jupyter notebook showing how to create them or include a utility function that can be called after the experiments finish running._ +:warning: If you include plots or figures, save them in `.png` format and place them in a new directory named `_static` at the same level as your `README.md`. diff --git a/src/py/flwr/cli/new/templates/app/README.flowertune.md.tpl b/src/py/flwr/cli/new/templates/app/README.flowertune.md.tpl new file mode 100644 index 000000000000..2703f0a86a3e --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/README.flowertune.md.tpl @@ -0,0 +1,66 @@ +# FlowerTune LLM on $challenge_name Dataset + +This directory conducts federated instruction tuning with a pretrained [Mistral-7B](https://huggingface.co/mistralai/Mistral-7B-v0.3) model on a [$challenge_name dataset](https://huggingface.co/datasets/$dataset_name). +We use [Flower Datasets](https://flower.dev/docs/datasets/) to download, partition and preprocess the dataset. +Flower's Simulation Engine is used to simulate the LLM fine-tuning process in federated way, +which allows users to perform the training on a single GPU. + + +## Methodology + +This baseline performs federated LLM fine-tuning with [LoRA](https://arxiv.org/pdf/2106.09685) using the [🤗PEFT](https://huggingface.co/docs/peft/en/index) library. +The clients' models are aggregated with FedAvg strategy. +This provides a baseline performance for the leaderboard of $challenge_name challenge. + + +## Environments setup + +Project dependencies are defined in `pyproject.toml`. Install them in an activated Python environment with: + +```shell +pip install -e . +``` + +## Experimental setup + +The dataset is divided into $num_clients partitions in an IID fashion, a partition is assigned to each ClientApp. +We randomly sample a fraction ($fraction_fit) of the total nodes to participate in each round, for a total of `200` rounds. +All settings are defined in `pyproject.toml`. + +> [!IMPORTANT] +> Please note that `[tool.flwr.app.config.static]` and `options.num-supernodes` under `[tool.flwr.federations.local-simulation]` are not allowed to be modified for fair competition if you plan to participated in the [LLM leaderboard](https://flower.ai/benchmarks/llm-leaderboard). + + +## Running the challenge + +First make sure that you have got the access to [Mistral-7B](https://huggingface.co/mistralai/Mistral-7B-v0.3) model with your Hugging-Face account. You can request access directly from the Hugging-Face website. +Then, follow the instruction [here](https://huggingface.co/docs/huggingface_hub/en/quick-start#login-command) to log in your account. Note you only need to complete this stage once in your development machine: + +```bash +huggingface-cli login +``` + +Run the challenge with default config values. +The configs are defined in `[tool.flwr.app.config]` entry of `pyproject.toml`, and are loaded automatically. + +```bash +flwr run +``` + +## VRAM consumption + +We use Mistral-7B model with 4-bit quantization as default. The estimated VRAM consumption per client for each challenge is shown below: + +| Challenges | GeneralNLP | Finance | Medical | Code | +| :--------: | :--------: | :--------: | :--------: | :--------: | +| VRAM | ~25.50 GB | ~17.30 GB | ~22.80 GB | ~17.40 GB | + +You can adjust the CPU/GPU resources you assign to each of the clients based on your device, which are specified with `options.backend.client-resources.num-cpus` and `options.backend.client-resources.num-gpus` under `[tool.flwr.federations.local-simulation]` entry in `pyproject.toml`. + + +## Model saving + +The global PEFT model checkpoints are saved every 5 rounds after aggregation on the sever side as default, which can be specified with `train.save-every-round` under [tool.flwr.app.config] entry in `pyproject.toml`. + +> [!NOTE] +> Please provide the last PEFT checkpoint if you plan to participated in the [LLM leaderboard](https://flower.ai/benchmarks/llm-leaderboard). diff --git a/src/py/flwr/cli/new/templates/app/README.md.tpl b/src/py/flwr/cli/new/templates/app/README.md.tpl index ddc42cafabc3..32e95fc0763d 100644 --- a/src/py/flwr/cli/new/templates/app/README.md.tpl +++ b/src/py/flwr/cli/new/templates/app/README.md.tpl @@ -1,43 +1,20 @@ -# $project_name +# $project_name: A Flower / $framework_str app -## Install dependencies +## Install dependencies and project ```bash -pip install . +pip install -e . ``` -## Run (Simulation Engine) +## Run with the Simulation Engine In the `$project_name` directory, use `flwr run` to run a local simulation: ```bash -flwr run +flwr run . ``` -## Run (Deployment Engine) +## Run with the Deployment Engine -### Start the SuperLink - -```bash -flower-superlink --insecure -``` - -### Start the long-running Flower client - -In a new terminal window, start the first long-running Flower client: - -```bash -flower-client-app client:app --insecure -``` - -In yet another new terminal window, start the second long-running Flower client: - -```bash -flower-client-app client:app --insecure -``` - -### Start the ServerApp - -```bash -flower-server-app server:app --insecure -``` +> \[!NOTE\] +> An update to this example will show how to run this Flower application with the Deployment Engine and TLS certificates, or with Docker. diff --git a/src/py/flwr/cli/new/templates/app/code/__init__.baseline.py.tpl b/src/py/flwr/cli/new/templates/app/code/__init__.baseline.py.tpl new file mode 100644 index 000000000000..5ad8041381d6 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/__init__.baseline.py.tpl @@ -0,0 +1 @@ +"""$project_name: A Flower Baseline.""" diff --git a/src/py/flwr/cli/new/templates/app/code/__init__.py.tpl b/src/py/flwr/cli/new/templates/app/code/__init__.py.tpl index 57998c81efb8..e6b63ee8ae6b 100644 --- a/src/py/flwr/cli/new/templates/app/code/__init__.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/__init__.py.tpl @@ -1 +1 @@ -"""$project_name.""" +"""$project_name: A Flower / $framework_str app.""" diff --git a/src/py/flwr/cli/new/templates/app/code/client.baseline.py.tpl b/src/py/flwr/cli/new/templates/app/code/client.baseline.py.tpl new file mode 100644 index 000000000000..83a475f20d27 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/client.baseline.py.tpl @@ -0,0 +1,58 @@ +"""$project_name: A Flower Baseline.""" + +import torch + +from flwr.client import ClientApp, NumPyClient +from flwr.common import Context +from $import_name.dataset import load_data +from $import_name.model import Net, get_weights, set_weights, test, train + + +class FlowerClient(NumPyClient): + """A class defining the client.""" + + def __init__(self, net, trainloader, valloader, local_epochs): + self.net = net + self.trainloader = trainloader + self.valloader = valloader + self.local_epochs = local_epochs + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + self.net.to(self.device) + + def fit(self, parameters, config): + """Traim model using this client's data.""" + set_weights(self.net, parameters) + train_loss = train( + self.net, + self.trainloader, + self.local_epochs, + self.device, + ) + return ( + get_weights(self.net), + len(self.trainloader.dataset), + {"train_loss": train_loss}, + ) + + def evaluate(self, parameters, config): + """Evaluate model using this client's data.""" + set_weights(self.net, parameters) + loss, accuracy = test(self.net, self.valloader, self.device) + return loss, len(self.valloader.dataset), {"accuracy": accuracy} + + +def client_fn(context: Context): + """Construct a Client that will be run in a ClientApp.""" + # Load model and data + net = Net() + partition_id = int(context.node_config["partition-id"]) + num_partitions = int(context.node_config["num-partitions"]) + trainloader, valloader = load_data(partition_id, num_partitions) + local_epochs = context.run_config["local-epochs"] + + # Return Client instance + return FlowerClient(net, trainloader, valloader, local_epochs).to_client() + + +# Flower ClientApp +app = ClientApp(client_fn) diff --git a/src/py/flwr/cli/new/templates/app/code/client.hf.py.tpl b/src/py/flwr/cli/new/templates/app/code/client.hf.py.tpl deleted file mode 100644 index 314da2120c53..000000000000 --- a/src/py/flwr/cli/new/templates/app/code/client.hf.py.tpl +++ /dev/null @@ -1,55 +0,0 @@ -"""$project_name: A Flower / HuggingFace Transformers app.""" - -from flwr.client import ClientApp, NumPyClient -from transformers import AutoModelForSequenceClassification - -from $import_name.task import ( - get_weights, - load_data, - set_weights, - train, - test, - CHECKPOINT, - DEVICE, -) - - -# Flower client -class FlowerClient(NumPyClient): - def __init__(self, net, trainloader, testloader): - self.net = net - self.trainloader = trainloader - self.testloader = testloader - - def get_parameters(self, config): - return get_weights(self.net) - - def set_parameters(self, parameters): - set_weights(self.net, parameters) - - def fit(self, parameters, config): - self.set_parameters(parameters) - train(self.net, self.trainloader, epochs=1) - return self.get_parameters(config={}), len(self.trainloader), {} - - def evaluate(self, parameters, config): - self.set_parameters(parameters) - loss, accuracy = test(self.net, self.testloader) - return float(loss), len(self.testloader), {"accuracy": accuracy} - - -def client_fn(cid): - # Load model and data - net = AutoModelForSequenceClassification.from_pretrained( - CHECKPOINT, num_labels=2 - ).to(DEVICE) - trainloader, valloader = load_data(int(cid), 2) - - # Return Client instance - return FlowerClient(net, trainloader, valloader).to_client() - - -# Flower ClientApp -app = ClientApp( - client_fn, -) diff --git a/src/py/flwr/cli/new/templates/app/code/client.huggingface.py.tpl b/src/py/flwr/cli/new/templates/app/code/client.huggingface.py.tpl new file mode 100644 index 000000000000..840f938b4ecc --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/client.huggingface.py.tpl @@ -0,0 +1,55 @@ +"""$project_name: A Flower / $framework_str app.""" + +import torch +from flwr.client import ClientApp, NumPyClient +from flwr.common import Context +from transformers import AutoModelForSequenceClassification + +from $import_name.task import get_weights, load_data, set_weights, test, train + + +# Flower client +class FlowerClient(NumPyClient): + def __init__(self, net, trainloader, testloader, local_epochs): + self.net = net + self.trainloader = trainloader + self.testloader = testloader + self.local_epochs = local_epochs + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + self.net.to(self.device) + + def fit(self, parameters, config): + set_weights(self.net, parameters) + train(self.net, self.trainloader, epochs=self.local_epochs, device=self.device) + return get_weights(self.net), len(self.trainloader), {} + + def evaluate(self, parameters, config): + set_weights(self.net, parameters) + loss, accuracy = test(self.net, self.testloader, self.device) + return float(loss), len(self.testloader), {"accuracy": accuracy} + + +def client_fn(context: Context): + + # Get this client's dataset partition + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + model_name = context.run_config["model-name"] + trainloader, valloader = load_data(partition_id, num_partitions, model_name) + + # Load model + num_labels = context.run_config["num-labels"] + net = AutoModelForSequenceClassification.from_pretrained( + model_name, num_labels=num_labels + ) + + local_epochs = context.run_config["local-epochs"] + + # Return Client instance + return FlowerClient(net, trainloader, valloader, local_epochs).to_client() + + +# Flower ClientApp +app = ClientApp( + client_fn, +) diff --git a/src/py/flwr/cli/new/templates/app/code/client.jax.py.tpl b/src/py/flwr/cli/new/templates/app/code/client.jax.py.tpl index 3c6d2f03637a..ffe782d274fc 100644 --- a/src/py/flwr/cli/new/templates/app/code/client.jax.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/client.jax.py.tpl @@ -1,8 +1,9 @@ -"""$project_name: A Flower / JAX app.""" +"""$project_name: A Flower / $framework_str app.""" import jax -from flwr.client import NumPyClient, ClientApp +from flwr.client import ClientApp, NumPyClient +from flwr.common import Context from $import_name.task import ( evaluation, get_params, @@ -16,37 +17,31 @@ from $import_name.task import ( # Define Flower Client and client_fn class FlowerClient(NumPyClient): - def __init__(self): + def __init__(self, input_dim): self.train_x, self.train_y, self.test_x, self.test_y = load_data() self.grad_fn = jax.grad(loss_fn) - model_shape = self.train_x.shape[1:] - - self.params = load_model(model_shape) - - def get_parameters(self, config): - return get_params(self.params) - - def set_parameters(self, parameters): - set_params(self.params, parameters) + self.params = load_model((input_dim,)) def fit(self, parameters, config): - self.set_parameters(parameters) + set_params(self.params, parameters) self.params, loss, num_examples = train( self.params, self.grad_fn, self.train_x, self.train_y ) - parameters = self.get_parameters(config={}) - return parameters, num_examples, {"loss": float(loss)} + return get_params(self.params), num_examples, {"loss": float(loss)} def evaluate(self, parameters, config): - self.set_parameters(parameters) + set_params(self.params, parameters) loss, num_examples = evaluation( self.params, self.grad_fn, self.test_x, self.test_y ) return float(loss), num_examples, {"loss": float(loss)} -def client_fn(cid): + +def client_fn(context: Context): + input_dim = context.run_config["input-dim"] + # Return Client instance - return FlowerClient().to_client() + return FlowerClient(input_dim).to_client() # Flower ClientApp diff --git a/src/py/flwr/cli/new/templates/app/code/client.mlx.py.tpl b/src/py/flwr/cli/new/templates/app/code/client.mlx.py.tpl index 1722561370a8..157300655a14 100644 --- a/src/py/flwr/cli/new/templates/app/code/client.mlx.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/client.mlx.py.tpl @@ -1,46 +1,46 @@ -"""$project_name: A Flower / MLX app.""" +"""$project_name: A Flower / $framework_str app.""" import mlx.core as mx import mlx.nn as nn import mlx.optimizers as optim -from flwr.client import NumPyClient, ClientApp +from flwr.client import ClientApp, NumPyClient +from flwr.common import Context +from flwr.common.config import UserConfig from $import_name.task import ( + MLP, batch_iterate, eval_fn, get_params, load_data, loss_fn, set_params, - MLP, ) # Define Flower Client and client_fn class FlowerClient(NumPyClient): - def __init__(self, data): - num_layers = 2 - hidden_dim = 32 - num_classes = 10 - batch_size = 256 - num_epochs = 1 - learning_rate = 1e-1 + def __init__( + self, + data, + run_config: UserConfig, + num_classes, + ): + num_layers = run_config["num-layers"] + hidden_dim = run_config["hidden-dim"] + input_dim = run_config["input-dim"] + batch_size = run_config["batch-size"] + learning_rate = run_config["lr"] + self.num_epochs = run_config["local-epochs"] self.train_images, self.train_labels, self.test_images, self.test_labels = data - self.model = MLP(num_layers, self.train_images.shape[-1], hidden_dim, num_classes) - self.optimizer = optim.SGD(learning_rate=learning_rate) - self.loss_and_grad_fn = nn.value_and_grad(self.model, loss_fn) - self.num_epochs = num_epochs + self.model = MLP(num_layers, input_dim, hidden_dim, num_classes) + self.optimizer = optim.SGD(learning_rate=learning_rate) + self.loss_and_grad_fn = nn.value_and_grad(self.model, loss_fn) self.batch_size = batch_size - def get_parameters(self, config): - return get_params(self.model) - - def set_parameters(self, parameters): - set_params(self.model, parameters) - def fit(self, parameters, config): - self.set_parameters(parameters) + set_params(self.model, parameters) for _ in range(self.num_epochs): for X, y in batch_iterate( self.batch_size, self.train_images, self.train_labels @@ -48,20 +48,23 @@ class FlowerClient(NumPyClient): _, grads = self.loss_and_grad_fn(self.model, X, y) self.optimizer.update(self.model, grads) mx.eval(self.model.parameters(), self.optimizer.state) - return self.get_parameters(config={}), len(self.train_images), {} + return get_params(self.model), len(self.train_images), {} def evaluate(self, parameters, config): - self.set_parameters(parameters) + set_params(self.model, parameters) accuracy = eval_fn(self.model, self.test_images, self.test_labels) loss = loss_fn(self.model, self.test_images, self.test_labels) return loss.item(), len(self.test_images), {"accuracy": accuracy.item()} -def client_fn(cid): - data = load_data(int(cid), 2) +def client_fn(context: Context): + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + data = load_data(partition_id, num_partitions) + num_classes = 10 # Return Client instance - return FlowerClient(data).to_client() + return FlowerClient(data, context.run_config, num_classes).to_client() # Flower ClientApp diff --git a/src/py/flwr/cli/new/templates/app/code/client.numpy.py.tpl b/src/py/flwr/cli/new/templates/app/code/client.numpy.py.tpl index 232c305fc2a9..f20bb536b3c6 100644 --- a/src/py/flwr/cli/new/templates/app/code/client.numpy.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/client.numpy.py.tpl @@ -1,21 +1,21 @@ -"""$project_name: A Flower / NumPy app.""" +"""$project_name: A Flower / $framework_str app.""" -from flwr.client import NumPyClient, ClientApp -import numpy as np +from flwr.client import ClientApp, NumPyClient +from flwr.common import Context +from $import_name.task import get_dummy_model class FlowerClient(NumPyClient): - def get_parameters(self, config): - return [np.ones((1, 1))] def fit(self, parameters, config): - return ([np.ones((1, 1))], 1, {}) + model = get_dummy_model() + return [model], 1, {} def evaluate(self, parameters, config): return float(0.0), 1, {"accuracy": float(1.0)} -def client_fn(cid: str): +def client_fn(context: Context): return FlowerClient().to_client() diff --git a/src/py/flwr/cli/new/templates/app/code/client.pytorch.py.tpl b/src/py/flwr/cli/new/templates/app/code/client.pytorch.py.tpl index c68974efaadf..e141a34d38ce 100644 --- a/src/py/flwr/cli/new/templates/app/code/client.pytorch.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/client.pytorch.py.tpl @@ -1,43 +1,52 @@ -"""$project_name: A Flower / PyTorch app.""" - -from flwr.client import NumPyClient, ClientApp - -from $import_name.task import ( - Net, - DEVICE, - load_data, - get_weights, - set_weights, - train, - test, -) +"""$project_name: A Flower / $framework_str app.""" + +import torch + +from flwr.client import ClientApp, NumPyClient +from flwr.common import Context +from $import_name.task import Net, get_weights, load_data, set_weights, test, train # Define Flower Client and client_fn class FlowerClient(NumPyClient): - def __init__(self, net, trainloader, valloader): + def __init__(self, net, trainloader, valloader, local_epochs): self.net = net self.trainloader = trainloader self.valloader = valloader + self.local_epochs = local_epochs + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + self.net.to(self.device) def fit(self, parameters, config): set_weights(self.net, parameters) - results = train(self.net, self.trainloader, self.valloader, 1, DEVICE) - return get_weights(self.net), len(self.trainloader.dataset), results + train_loss = train( + self.net, + self.trainloader, + self.local_epochs, + self.device, + ) + return ( + get_weights(self.net), + len(self.trainloader.dataset), + {"train_loss": train_loss}, + ) def evaluate(self, parameters, config): set_weights(self.net, parameters) - loss, accuracy = test(self.net, self.valloader) + loss, accuracy = test(self.net, self.valloader, self.device) return loss, len(self.valloader.dataset), {"accuracy": accuracy} -def client_fn(cid): +def client_fn(context: Context): # Load model and data - net = Net().to(DEVICE) - trainloader, valloader = load_data(int(cid), 2) + net = Net() + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + trainloader, valloader = load_data(partition_id, num_partitions) + local_epochs = context.run_config["local-epochs"] # Return Client instance - return FlowerClient(net, trainloader, valloader).to_client() + return FlowerClient(net, trainloader, valloader, local_epochs).to_client() # Flower ClientApp diff --git a/src/py/flwr/cli/new/templates/app/code/client.sklearn.py.tpl b/src/py/flwr/cli/new/templates/app/code/client.sklearn.py.tpl index 9181389cad1c..69d208ac28c9 100644 --- a/src/py/flwr/cli/new/templates/app/code/client.sklearn.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/client.sklearn.py.tpl @@ -1,40 +1,18 @@ -"""$project_name: A Flower / Scikit-Learn app.""" +"""$project_name: A Flower / $framework_str app.""" import warnings -import numpy as np -from flwr.client import NumPyClient, ClientApp -from flwr_datasets import FederatedDataset -from sklearn.linear_model import LogisticRegression from sklearn.metrics import log_loss - -def get_model_parameters(model): - if model.fit_intercept: - params = [ - model.coef_, - model.intercept_, - ] - else: - params = [model.coef_] - return params - - -def set_model_params(model, params): - model.coef_ = params[0] - if model.fit_intercept: - model.intercept_ = params[1] - return model - - -def set_initial_params(model): - n_classes = 10 # MNIST has 10 classes - n_features = 784 # Number of features in dataset - model.classes_ = np.array([i for i in range(10)]) - - model.coef_ = np.zeros((n_classes, n_features)) - if model.fit_intercept: - model.intercept_ = np.zeros((n_classes,)) +from flwr.client import ClientApp, NumPyClient +from flwr.common import Context +from $import_name.task import ( + get_model, + get_model_params, + load_data, + set_initial_params, + set_model_params, +) class FlowerClient(NumPyClient): @@ -45,9 +23,6 @@ class FlowerClient(NumPyClient): self.y_train = y_train self.y_test = y_test - def get_parameters(self, config): - return get_model_parameters(self.model) - def fit(self, parameters, config): set_model_params(self.model, parameters) @@ -56,7 +31,7 @@ class FlowerClient(NumPyClient): warnings.simplefilter("ignore") self.model.fit(self.X_train, self.y_train) - return get_model_parameters(self.model), len(self.X_train), {} + return get_model_params(self.model), len(self.X_train), {} def evaluate(self, parameters, config): set_model_params(self.model, parameters) @@ -66,23 +41,17 @@ class FlowerClient(NumPyClient): return loss, len(self.X_test), {"accuracy": accuracy} -fds = FederatedDataset(dataset="mnist", partitioners={"train": 2}) - -def client_fn(cid: str): - dataset = fds.load_partition(int(cid), "train").with_format("numpy") - X, y = dataset["image"].reshape((len(dataset), -1)), dataset["label"] +def client_fn(context: Context): + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] - # Split the on edge data: 80% train, 20% test - X_train, X_test = X[: int(0.8 * len(X))], X[int(0.8 * len(X)) :] - y_train, y_test = y[: int(0.8 * len(y))], y[int(0.8 * len(y)) :] + X_train, X_test, y_train, y_test = load_data(partition_id, num_partitions) # Create LogisticRegression Model - model = LogisticRegression( - penalty="l2", - max_iter=1, # local epoch - warm_start=True, # prevent refreshing weights when fitting - ) + penalty = context.run_config["penalty"] + local_epochs = context.run_config["local-epochs"] + model = get_model(penalty, local_epochs) # Setting initial parameters, akin to model.compile for keras models set_initial_params(model) diff --git a/src/py/flwr/cli/new/templates/app/code/client.tensorflow.py.tpl b/src/py/flwr/cli/new/templates/app/code/client.tensorflow.py.tpl index dc55d4ca6569..f8c148691561 100644 --- a/src/py/flwr/cli/new/templates/app/code/client.tensorflow.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/client.tensorflow.py.tpl @@ -1,25 +1,31 @@ -"""$project_name: A Flower / TensorFlow app.""" +"""$project_name: A Flower / $framework_str app.""" from flwr.client import NumPyClient, ClientApp +from flwr.common import Context from $import_name.task import load_data, load_model # Define Flower Client and client_fn class FlowerClient(NumPyClient): - def __init__(self, model, x_train, y_train, x_test, y_test): + def __init__( + self, model, data, epochs, batch_size, verbose + ): self.model = model - self.x_train = x_train - self.y_train = y_train - self.x_test = x_test - self.y_test = y_test - - def get_parameters(self, config): - return self.model.get_weights() + self.x_train, self.y_train, self.x_test, self.y_test = data + self.epochs = epochs + self.batch_size = batch_size + self.verbose = verbose def fit(self, parameters, config): self.model.set_weights(parameters) - self.model.fit(self.x_train, self.y_train, epochs=1, batch_size=32, verbose=0) + self.model.fit( + self.x_train, + self.y_train, + epochs=self.epochs, + batch_size=self.batch_size, + verbose=self.verbose, + ) return self.model.get_weights(), len(self.x_train), {} def evaluate(self, parameters, config): @@ -28,13 +34,21 @@ class FlowerClient(NumPyClient): return loss, len(self.x_test), {"accuracy": accuracy} -def client_fn(cid): +def client_fn(context: Context): # Load model and data net = load_model() - x_train, y_train, x_test, y_test = load_data(int(cid), 2) + + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + data = load_data(partition_id, num_partitions) + epochs = context.run_config["local-epochs"] + batch_size = context.run_config["batch-size"] + verbose = context.run_config.get("verbose") # Return Client instance - return FlowerClient(net, x_train, y_train, x_test, y_test).to_client() + return FlowerClient( + net, data, epochs, batch_size, verbose + ).to_client() # Flower ClientApp diff --git a/src/py/flwr/cli/new/templates/app/code/dataset.baseline.py.tpl b/src/py/flwr/cli/new/templates/app/code/dataset.baseline.py.tpl new file mode 100644 index 000000000000..46f1f64418c0 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/dataset.baseline.py.tpl @@ -0,0 +1,36 @@ +"""$project_name: A Flower Baseline.""" + +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner +from torch.utils.data import DataLoader +from torchvision.transforms import Compose, Normalize, ToTensor + +FDS = None # Cache FederatedDataset + + +def load_data(partition_id: int, num_partitions: int): + """Load partition CIFAR10 data.""" + # Only initialize `FederatedDataset` once + global FDS # pylint: disable=global-statement + if FDS is None: + partitioner = IidPartitioner(num_partitions=num_partitions) + FDS = FederatedDataset( + dataset="uoft-cs/cifar10", + partitioners={"train": partitioner}, + ) + partition = FDS.load_partition(partition_id) + # Divide data on each node: 80% train, 20% test + partition_train_test = partition.train_test_split(test_size=0.2, seed=42) + pytorch_transforms = Compose( + [ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] + ) + + def apply_transforms(batch): + """Apply transforms to the partition from FederatedDataset.""" + batch["img"] = [pytorch_transforms(img) for img in batch["img"]] + return batch + + partition_train_test = partition_train_test.with_transform(apply_transforms) + trainloader = DataLoader(partition_train_test["train"], batch_size=32, shuffle=True) + testloader = DataLoader(partition_train_test["test"], batch_size=32) + return trainloader, testloader diff --git a/src/py/flwr/cli/new/templates/app/code/flwr_tune/__init__.py b/src/py/flwr/cli/new/templates/app/code/flwr_tune/__init__.py new file mode 100644 index 000000000000..6d886f216bc9 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/flwr_tune/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flower CLI `new` command app / code / flwr_tune templates.""" diff --git a/src/py/flwr/cli/new/templates/app/code/flwr_tune/client_app.py.tpl b/src/py/flwr/cli/new/templates/app/code/flwr_tune/client_app.py.tpl new file mode 100644 index 000000000000..415898ba117b --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/flwr_tune/client_app.py.tpl @@ -0,0 +1,126 @@ +"""$project_name: A Flower / FlowerTune app.""" + +import os +import warnings +from typing import Dict, Tuple + +import torch +from flwr.client import ClientApp, NumPyClient +from flwr.common import Context +from flwr.common.config import unflatten_dict +from flwr.common.typing import NDArrays, Scalar +from omegaconf import DictConfig + +from transformers import TrainingArguments +from trl import SFTTrainer + +from $import_name.dataset import ( + get_tokenizer_and_data_collator_and_propt_formatting, + load_data, + replace_keys, +) +from $import_name.models import ( + cosine_annealing, + get_model, + set_parameters, + get_parameters, +) + +# Avoid warnings +os.environ["TOKENIZERS_PARALLELISM"] = "true" +os.environ["RAY_DISABLE_DOCKER_CPU_WARNING"] = "1" +warnings.filterwarnings("ignore", category=UserWarning) + + +# pylint: disable=too-many-arguments +# pylint: disable=too-many-instance-attributes +class FlowerClient(NumPyClient): + """Standard Flower client for CNN training.""" + + def __init__( + self, + model_cfg: DictConfig, + train_cfg: DictConfig, + trainset, + tokenizer, + formatting_prompts_func, + data_collator, + num_rounds, + ): # pylint: disable=too-many-arguments + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + self.train_cfg = train_cfg + self.training_argumnets = TrainingArguments(**train_cfg.training_arguments) + self.tokenizer = tokenizer + self.formatting_prompts_func = formatting_prompts_func + self.data_collator = data_collator + self.num_rounds = num_rounds + self.trainset = trainset + + # instantiate model + self.model = get_model(model_cfg) + + def fit( + self, parameters: NDArrays, config: Dict[str, Scalar] + ) -> Tuple[NDArrays, int, Dict]: + """Implement distributed fit function for a given client.""" + set_parameters(self.model, parameters) + + new_lr = cosine_annealing( + int(config["current_round"]), + self.num_rounds, + self.train_cfg.learning_rate_max, + self.train_cfg.learning_rate_min, + ) + + self.training_argumnets.learning_rate = new_lr + self.training_argumnets.output_dir = config["save_path"] + + # Construct trainer + trainer = SFTTrainer( + model=self.model, + tokenizer=self.tokenizer, + args=self.training_argumnets, + max_seq_length=self.train_cfg.seq_length, + train_dataset=self.trainset, + formatting_func=self.formatting_prompts_func, + data_collator=self.data_collator, + ) + + # Do local training + results = trainer.train() + + return ( + get_parameters(self.model), + len(self.trainset), + {"train_loss": results.training_loss}, + ) + + +def client_fn(context: Context) -> FlowerClient: + """Create a Flower client representing a single organization.""" + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + num_rounds = context.run_config["num-server-rounds"] + cfg = DictConfig(replace_keys(unflatten_dict(context.run_config))) + + # Let's get the client partition + client_trainset = load_data(partition_id, num_partitions, cfg.static.dataset.name) + ( + tokenizer, + data_collator, + formatting_prompts_func, + ) = get_tokenizer_and_data_collator_and_propt_formatting(cfg.model.name) + + return FlowerClient( + cfg.model, + cfg.train, + client_trainset, + tokenizer, + formatting_prompts_func, + data_collator, + num_rounds, + ).to_client() + + +# Flower ClientApp +app = ClientApp(client_fn) diff --git a/src/py/flwr/cli/new/templates/app/code/flwr_tune/dataset.py.tpl b/src/py/flwr/cli/new/templates/app/code/flwr_tune/dataset.py.tpl new file mode 100644 index 000000000000..41381ef7c7a3 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/flwr_tune/dataset.py.tpl @@ -0,0 +1,87 @@ +"""$project_name: A Flower / FlowerTune app.""" + +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner +from transformers import AutoTokenizer +from trl import DataCollatorForCompletionOnlyLM + +FDS = None # Cache FederatedDataset + + +def formatting_prompts_func(example): + """Construct prompts.""" + output_texts = [] + # Constructing a standard Alpaca + # (https://github.com/tatsu-lab/stanford_alpaca#data-release) prompt + mssg = ( + "Below is an instruction that describes a task. " + "Write a response that appropriately completes the request." + ) + for i in range(len(example["instruction"])): + text = ( + f"{mssg}\n### Instruction:\n{example['instruction'][i]}\n" + f"### Response: {example['response'][i]}" + ) + output_texts.append(text) + return output_texts + + +def get_tokenizer_and_data_collator_and_propt_formatting(model_name: str): + """Get tokenizer, data_collator and prompt formatting.""" + tokenizer = AutoTokenizer.from_pretrained( + model_name, use_fast=True, padding_side="right" + ) + tokenizer.pad_token = tokenizer.eos_token + response_template_with_context = "\n### Response:" # alpaca response tag + response_template_ids = tokenizer.encode( + response_template_with_context, add_special_tokens=False + )[2:] + data_collator = DataCollatorForCompletionOnlyLM( + response_template_ids, tokenizer=tokenizer + ) + + return tokenizer, data_collator, formatting_prompts_func + + +def formatting(dataset): + """Format dataset.""" + dataset["instruction"] = dataset["instruction"] + " " + dataset["input"] + return dataset + + +def reformat(dataset, llm_task): + """Reformat datasets.""" + dataset = dataset.rename_column("output", "response") + if llm_task in ["finance", "code"]: + dataset = dataset.map(formatting, remove_columns=["input"]) + if llm_task == "medical": + dataset = dataset.remove_columns(["instruction"]) + dataset = dataset.rename_column("input", "instruction") + return dataset + + +def load_data(partition_id: int, num_partitions: int, dataset_name: str): + """Load partition data.""" + # Only initialize `FederatedDataset` once + global FDS + if FDS is None: + partitioner = IidPartitioner(num_partitions=num_partitions) + FDS = FederatedDataset( + dataset=dataset_name, + partitioners={"train": partitioner}, + ) + client_trainset = FDS.load_partition(partition_id, "train") + client_trainset = reformat(client_trainset, llm_task="generalnlp") + return client_trainset + + +def replace_keys(input_dict, match="-", target="_"): + """Recursively replace match string with target string in dictionary keys.""" + new_dict = {} + for key, value in input_dict.items(): + new_key = key.replace(match, target) + if isinstance(value, dict): + new_dict[new_key] = replace_keys(value, match, target) + else: + new_dict[new_key] = value + return new_dict diff --git a/src/py/flwr/cli/new/templates/app/code/flwr_tune/models.py.tpl b/src/py/flwr/cli/new/templates/app/code/flwr_tune/models.py.tpl new file mode 100644 index 000000000000..3f3f95c8b8eb --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/flwr_tune/models.py.tpl @@ -0,0 +1,78 @@ +"""$project_name: A Flower / FlowerTune app.""" + +import math + +import torch +from omegaconf import DictConfig +from collections import OrderedDict +from peft import ( + LoraConfig, + get_peft_model, + get_peft_model_state_dict, + set_peft_model_state_dict, +) +from peft.utils import prepare_model_for_kbit_training +from transformers import AutoModelForCausalLM, BitsAndBytesConfig + +from flwr.common.typing import NDArrays + + +def cosine_annealing( + current_round: int, + total_round: int, + lrate_max: float = 0.001, + lrate_min: float = 0.0, +) -> float: + """Implement cosine annealing learning rate schedule.""" + cos_inner = math.pi * current_round / total_round + return lrate_min + 0.5 * (lrate_max - lrate_min) * (1 + math.cos(cos_inner)) + + +def get_model(model_cfg: DictConfig): + """Load model with appropriate quantization config and other optimizations. + """ + if model_cfg.quantization == 4: + quantization_config = BitsAndBytesConfig(load_in_4bit=True) + elif model_cfg.quantization == 8: + quantization_config = BitsAndBytesConfig(load_in_8bit=True) + else: + raise ValueError( + f"Use 4-bit or 8-bit quantization. You passed: {model_cfg.quantization}/" + ) + + model = AutoModelForCausalLM.from_pretrained( + model_cfg.name, + quantization_config=quantization_config, + torch_dtype=torch.bfloat16, + low_cpu_mem_usage=True, + ) + + model = prepare_model_for_kbit_training( + model, use_gradient_checkpointing=model_cfg.gradient_checkpointing + ) + + peft_config = LoraConfig( + r=model_cfg.lora.peft_lora_r, + lora_alpha=model_cfg.lora.peft_lora_alpha, + lora_dropout=0.075, + task_type="CAUSAL_LM", + ) + + if model_cfg.gradient_checkpointing: + model.config.use_cache = False + + return get_peft_model(model, peft_config) + + +def set_parameters(model, parameters: NDArrays) -> None: + """Change the parameters of the model using the given ones.""" + peft_state_dict_keys = get_peft_model_state_dict(model).keys() + params_dict = zip(peft_state_dict_keys, parameters) + state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict}) + set_peft_model_state_dict(model, state_dict) + + +def get_parameters(model) -> NDArrays: + """Return the parameters of the current net.""" + state_dict = get_peft_model_state_dict(model) + return [val.cpu().numpy() for _, val in state_dict.items()] diff --git a/src/py/flwr/cli/new/templates/app/code/flwr_tune/server_app.py.tpl b/src/py/flwr/cli/new/templates/app/code/flwr_tune/server_app.py.tpl new file mode 100644 index 000000000000..7d4de0f73dbf --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/flwr_tune/server_app.py.tpl @@ -0,0 +1,94 @@ +"""$project_name: A Flower / FlowerTune app.""" + +import os +from datetime import datetime + +from flwr.common import Context, ndarrays_to_parameters +from flwr.common.config import unflatten_dict +from flwr.server import ServerApp, ServerAppComponents, ServerConfig +from omegaconf import DictConfig + +from $import_name.models import get_model, get_parameters, set_parameters +from $import_name.dataset import replace_keys +from $import_name.strategy import FlowerTuneLlm + + +# Get function that will be executed by the strategy's evaluate() method +# Here we use it to save global model checkpoints +def get_evaluate_fn(model_cfg, save_every_round, total_round, save_path): + """Return an evaluation function for saving global model.""" + + def evaluate(server_round: int, parameters, config): + # Save model + if server_round != 0 and ( + server_round == total_round or server_round % save_every_round == 0 + ): + # Init model + model = get_model(model_cfg) + set_parameters(model, parameters) + + model.save_pretrained(f"{save_path}/peft_{server_round}") + + return 0.0, {} + + return evaluate + + +def get_on_fit_config(save_path): + """Return a function that will be used to construct the config that the + client's fit() method will receive.""" + + def fit_config_fn(server_round: int): + fit_config = {} + fit_config["current_round"] = server_round + fit_config["save_path"] = save_path + return fit_config + + return fit_config_fn + + +def fit_weighted_average(metrics): + """Aggregate (federated) evaluation metrics.""" + # Multiply accuracy of each client by number of examples used + losses = [num_examples * m["train_loss"] for num_examples, m in metrics] + examples = [num_examples for num_examples, _ in metrics] + + # Aggregate and return custom metric (weighted average) + return {"train_loss": sum(losses) / sum(examples)} + + +def server_fn(context: Context): + """Construct components that set the ServerApp behaviour.""" + # Create output directory given current timestamp + current_time = datetime.now() + folder_name = current_time.strftime("%Y-%m-%d_%H-%M-%S") + save_path = os.path.join(os.getcwd(), f"results/{folder_name}") + os.makedirs(save_path, exist_ok=True) + + # Read from config + num_rounds = context.run_config["num-server-rounds"] + cfg = DictConfig(replace_keys(unflatten_dict(context.run_config))) + + # Get initial model weights + init_model = get_model(cfg.model) + init_model_parameters = get_parameters(init_model) + init_model_parameters = ndarrays_to_parameters(init_model_parameters) + + # Define strategy + strategy = FlowerTuneLlm( + fraction_fit=cfg.strategy.fraction_fit, + fraction_evaluate=cfg.strategy.fraction_evaluate, + on_fit_config_fn=get_on_fit_config(save_path), + fit_metrics_aggregation_fn=fit_weighted_average, + initial_parameters=init_model_parameters, + evaluate_fn=get_evaluate_fn( + cfg.model, cfg.train.save_every_round, num_rounds, save_path + ), + ) + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(strategy=strategy, config=config) + + +# Flower ServerApp +app = ServerApp(server_fn=server_fn) diff --git a/src/py/flwr/cli/new/templates/app/code/flwr_tune/strategy.py.tpl b/src/py/flwr/cli/new/templates/app/code/flwr_tune/strategy.py.tpl new file mode 100644 index 000000000000..8accd70c4e76 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/flwr_tune/strategy.py.tpl @@ -0,0 +1,83 @@ +"""$project_name: A Flower / FlowerTune app.""" + +from io import BytesIO +from logging import INFO, WARN +from typing import List, Tuple, Union + +from flwr.common import FitIns, FitRes, Parameters, log, parameters_to_ndarrays +from flwr.server.client_manager import ClientManager +from flwr.server.client_proxy import ClientProxy +from flwr.server.strategy import FedAvg + + +class FlowerTuneLlm(FedAvg): + """Customised FedAvg strategy implementation. + + This class behaves just like FedAvg but also tracks the communication + costs associated with `fit` over FL rounds. + """ + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.comm_tracker = CommunicationTracker() + + def configure_fit( + self, server_round: int, parameters: Parameters, client_manager: ClientManager + ): + """Configure the next round of training.""" + return_clients = super().configure_fit(server_round, parameters, client_manager) + + # Test communication costs + fit_ins_list = [fit_ins for _, fit_ins in return_clients] + self.comm_tracker.track(fit_ins_list) + + return return_clients + + def aggregate_fit( + self, + server_round: int, + results: List[Tuple[ClientProxy, FitRes]], + failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], + ): + """Aggregate fit results using weighted average.""" + # Test communication costs + fit_res_list = [fit_res for _, fit_res in results] + self.comm_tracker.track(fit_res_list) + + parameters_aggregated, metrics_aggregated = super().aggregate_fit( + server_round, results, failures + ) + + return parameters_aggregated, metrics_aggregated + + +class CommunicationTracker: + """Communication costs tracker over FL rounds.""" + def __init__(self): + self.curr_comm_cost = 0.0 + + @staticmethod + def _compute_bytes(parameters): + return sum([BytesIO(t).getbuffer().nbytes for t in parameters.tensors]) + + def track(self, fit_list: List[Union[FitIns, FitRes]]): + size_bytes_list = [ + self._compute_bytes(fit_ele.parameters) + for fit_ele in fit_list + ] + comm_cost = sum(size_bytes_list) / 1024**2 + + self.curr_comm_cost += comm_cost + log( + INFO, + "Communication budget: used %.2f MB (+%.2f MB this round) / 200,000 MB", + self.curr_comm_cost, + comm_cost, + ) + + if self.curr_comm_cost > 2e5: + log( + WARN, + "The accumulated communication cost has exceeded 200,000 MB. " + "Please consider reducing it if you plan to participate " + "FlowerTune LLM Leaderboard.", + ) diff --git a/src/py/flwr/cli/new/templates/app/code/model.baseline.py.tpl b/src/py/flwr/cli/new/templates/app/code/model.baseline.py.tpl new file mode 100644 index 000000000000..8a914fcf60d1 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/model.baseline.py.tpl @@ -0,0 +1,80 @@ +"""$project_name: A Flower Baseline.""" + +from collections import OrderedDict + +import torch +import torch.nn.functional as F +from torch import nn + + +class Net(nn.Module): + """Model (simple CNN adapted from 'PyTorch: A 60 Minute Blitz').""" + + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(3, 6, 5) + self.pool = nn.MaxPool2d(2, 2) + self.conv2 = nn.Conv2d(6, 16, 5) + self.fc1 = nn.Linear(16 * 5 * 5, 120) + self.fc2 = nn.Linear(120, 84) + self.fc3 = nn.Linear(84, 10) + + def forward(self, x): + """Do forward.""" + x = self.pool(F.relu(self.conv1(x))) + x = self.pool(F.relu(self.conv2(x))) + x = x.view(-1, 16 * 5 * 5) + x = F.relu(self.fc1(x)) + x = F.relu(self.fc2(x)) + return self.fc3(x) + + +def train(net, trainloader, epochs, device): + """Train the model on the training set.""" + net.to(device) # move model to GPU if available + criterion = torch.nn.CrossEntropyLoss() + criterion.to(device) + optimizer = torch.optim.SGD(net.parameters(), lr=0.1, momentum=0.9) + net.train() + running_loss = 0.0 + for _ in range(epochs): + for batch in trainloader: + images = batch["img"] + labels = batch["label"] + optimizer.zero_grad() + loss = criterion(net(images.to(device)), labels.to(device)) + loss.backward() + optimizer.step() + running_loss += loss.item() + + avg_trainloss = running_loss / len(trainloader) + return avg_trainloss + + +def test(net, testloader, device): + """Validate the model on the test set.""" + net.to(device) + criterion = torch.nn.CrossEntropyLoss() + correct, loss = 0, 0.0 + with torch.no_grad(): + for batch in testloader: + images = batch["img"].to(device) + labels = batch["label"].to(device) + outputs = net(images) + loss += criterion(outputs, labels).item() + correct += (torch.max(outputs.data, 1)[1] == labels).sum().item() + accuracy = correct / len(testloader.dataset) + loss = loss / len(testloader) + return loss, accuracy + + +def get_weights(net): + """Extract model parameters as numpy arrays from state_dict.""" + return [val.cpu().numpy() for _, val in net.state_dict().items()] + + +def set_weights(net, parameters): + """Apply parameters to an existing model.""" + params_dict = zip(net.state_dict().keys(), parameters) + state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) + net.load_state_dict(state_dict, strict=True) diff --git a/src/py/flwr/cli/new/templates/app/code/server.baseline.py.tpl b/src/py/flwr/cli/new/templates/app/code/server.baseline.py.tpl new file mode 100644 index 000000000000..ea536e3efffb --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/server.baseline.py.tpl @@ -0,0 +1,46 @@ +"""$project_name: A Flower Baseline.""" + +from typing import List, Tuple + +from flwr.common import Context, Metrics, ndarrays_to_parameters +from flwr.server import ServerApp, ServerAppComponents, ServerConfig +from flwr.server.strategy import FedAvg +from $import_name.model import Net, get_weights + + +# Define metric aggregation function +def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: + """Do weighted average of accuracy metric.""" + # Multiply accuracy of each client by number of examples used + accuracies = [num_examples * float(m["accuracy"]) for num_examples, m in metrics] + examples = [num_examples for num_examples, _ in metrics] + + # Aggregate and return custom metric (weighted average) + return {"accuracy": sum(accuracies) / sum(examples)} + + +def server_fn(context: Context): + """Construct components that set the ServerApp behaviour.""" + # Read from config + num_rounds = context.run_config["num-server-rounds"] + fraction_fit = context.run_config["fraction-fit"] + + # Initialize model parameters + ndarrays = get_weights(Net()) + parameters = ndarrays_to_parameters(ndarrays) + + # Define strategy + strategy = FedAvg( + fraction_fit=float(fraction_fit), + fraction_evaluate=1.0, + min_available_clients=2, + initial_parameters=parameters, + evaluate_metrics_aggregation_fn=weighted_average, + ) + config = ServerConfig(num_rounds=int(num_rounds)) + + return ServerAppComponents(strategy=strategy, config=config) + + +# Create ServerApp +app = ServerApp(server_fn=server_fn) diff --git a/src/py/flwr/cli/new/templates/app/code/server.hf.py.tpl b/src/py/flwr/cli/new/templates/app/code/server.hf.py.tpl deleted file mode 100644 index d7d86931335b..000000000000 --- a/src/py/flwr/cli/new/templates/app/code/server.hf.py.tpl +++ /dev/null @@ -1,17 +0,0 @@ -"""$project_name: A Flower / HuggingFace Transformers app.""" - -from flwr.server.strategy import FedAvg -from flwr.server import ServerApp, ServerConfig - - -# Define strategy -strategy = FedAvg( - fraction_fit=1.0, - fraction_evaluate=1.0, -) - -# Start server -app = ServerApp( - config=ServerConfig(num_rounds=3), - strategy=strategy, -) diff --git a/src/py/flwr/cli/new/templates/app/code/server.huggingface.py.tpl b/src/py/flwr/cli/new/templates/app/code/server.huggingface.py.tpl new file mode 100644 index 000000000000..16f94f0a64e9 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/server.huggingface.py.tpl @@ -0,0 +1,38 @@ +"""$project_name: A Flower / $framework_str app.""" + +from flwr.common import Context, ndarrays_to_parameters +from flwr.server import ServerApp, ServerAppComponents, ServerConfig +from flwr.server.strategy import FedAvg +from transformers import AutoModelForSequenceClassification + +from $import_name.task import get_weights + + +def server_fn(context: Context): + # Read from config + num_rounds = context.run_config["num-server-rounds"] + fraction_fit = context.run_config["fraction-fit"] + + # Initialize global model + model_name = context.run_config["model-name"] + num_labels = context.run_config["num-labels"] + net = AutoModelForSequenceClassification.from_pretrained( + model_name, num_labels=num_labels + ) + + weights = get_weights(net) + initial_parameters = ndarrays_to_parameters(weights) + + # Define strategy + strategy = FedAvg( + fraction_fit=fraction_fit, + fraction_evaluate=1.0, + initial_parameters=initial_parameters, + ) + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(strategy=strategy, config=config) + + +# Create ServerApp +app = ServerApp(server_fn=server_fn) diff --git a/src/py/flwr/cli/new/templates/app/code/server.jax.py.tpl b/src/py/flwr/cli/new/templates/app/code/server.jax.py.tpl index 53cff7b905f4..60bbcaf3c175 100644 --- a/src/py/flwr/cli/new/templates/app/code/server.jax.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/server.jax.py.tpl @@ -1,12 +1,26 @@ -"""$project_name: A Flower / JAX app.""" +"""$project_name: A Flower / $framework_str app.""" -import flwr as fl +from flwr.common import Context, ndarrays_to_parameters +from flwr.server import ServerApp, ServerAppComponents, ServerConfig +from flwr.server.strategy import FedAvg +from $import_name.task import get_params, load_model -# Configure the strategy -strategy = fl.server.strategy.FedAvg() -# Flower ServerApp -app = fl.server.ServerApp( - config=fl.server.ServerConfig(num_rounds=3), - strategy=strategy, -) +def server_fn(context: Context): + # Read from config + num_rounds = context.run_config["num-server-rounds"] + input_dim = context.run_config["input-dim"] + + # Initialize global model + params = get_params(load_model((input_dim,))) + initial_parameters = ndarrays_to_parameters(params) + + # Define strategy + strategy = FedAvg(initial_parameters=initial_parameters) + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(strategy=strategy, config=config) + + +# Create ServerApp +app = ServerApp(server_fn=server_fn) diff --git a/src/py/flwr/cli/new/templates/app/code/server.mlx.py.tpl b/src/py/flwr/cli/new/templates/app/code/server.mlx.py.tpl index b475e0e7dc36..6d00e84fe383 100644 --- a/src/py/flwr/cli/new/templates/app/code/server.mlx.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/server.mlx.py.tpl @@ -1,15 +1,31 @@ -"""$project_name: A Flower / MLX app.""" +"""$project_name: A Flower / $framework_str app.""" -from flwr.server import ServerApp, ServerConfig +from flwr.common import Context, ndarrays_to_parameters +from flwr.server import ServerApp, ServerAppComponents, ServerConfig from flwr.server.strategy import FedAvg +from $import_name.task import MLP, get_params -# Define strategy -strategy = FedAvg() +def server_fn(context: Context): + # Read from config + num_rounds = context.run_config["num-server-rounds"] + + num_classes = 10 + num_layers = context.run_config["num-layers"] + input_dim = context.run_config["input-dim"] + hidden_dim = context.run_config["hidden-dim"] + + # Initialize global model + model = MLP(num_layers, input_dim, hidden_dim, num_classes) + params = get_params(model) + initial_parameters = ndarrays_to_parameters(params) + + # Define strategy + strategy = FedAvg(initial_parameters=initial_parameters) + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(strategy=strategy, config=config) # Create ServerApp -app = ServerApp( - config=ServerConfig(num_rounds=3), - strategy=strategy, -) +app = ServerApp(server_fn=server_fn) diff --git a/src/py/flwr/cli/new/templates/app/code/server.numpy.py.tpl b/src/py/flwr/cli/new/templates/app/code/server.numpy.py.tpl index 03f95ae35cfd..ec1ff52811af 100644 --- a/src/py/flwr/cli/new/templates/app/code/server.numpy.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/server.numpy.py.tpl @@ -1,12 +1,25 @@ -"""$project_name: A Flower / NumPy app.""" +"""$project_name: A Flower / $framework_str app.""" -import flwr as fl +from flwr.common import Context, ndarrays_to_parameters +from flwr.server import ServerApp, ServerAppComponents, ServerConfig +from flwr.server.strategy import FedAvg +from $import_name.task import get_dummy_model -# Configure the strategy -strategy = fl.server.strategy.FedAvg() -# Flower ServerApp -app = fl.server.ServerApp( - config=fl.server.ServerConfig(num_rounds=1), - strategy=strategy, -) +def server_fn(context: Context): + # Read from config + num_rounds = context.run_config["num-server-rounds"] + + # Initial model + model = get_dummy_model() + dummy_parameters = ndarrays_to_parameters([model]) + + # Define strategy + strategy = FedAvg(initial_parameters=dummy_parameters) + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(strategy=strategy, config=config) + + +# Create ServerApp +app = ServerApp(server_fn=server_fn) diff --git a/src/py/flwr/cli/new/templates/app/code/server.pytorch.py.tpl b/src/py/flwr/cli/new/templates/app/code/server.pytorch.py.tpl index dc635f79a664..9fe5f0fedc28 100644 --- a/src/py/flwr/cli/new/templates/app/code/server.pytorch.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/server.pytorch.py.tpl @@ -1,28 +1,31 @@ -"""$project_name: A Flower / PyTorch app.""" +"""$project_name: A Flower / $framework_str app.""" -from flwr.common import ndarrays_to_parameters -from flwr.server import ServerApp, ServerConfig +from flwr.common import Context, ndarrays_to_parameters +from flwr.server import ServerApp, ServerAppComponents, ServerConfig from flwr.server.strategy import FedAvg - from $import_name.task import Net, get_weights -# Initialize model parameters -ndarrays = get_weights(Net()) -parameters = ndarrays_to_parameters(ndarrays) +def server_fn(context: Context): + # Read from config + num_rounds = context.run_config["num-server-rounds"] + fraction_fit = context.run_config["fraction-fit"] + + # Initialize model parameters + ndarrays = get_weights(Net()) + parameters = ndarrays_to_parameters(ndarrays) + # Define strategy + strategy = FedAvg( + fraction_fit=fraction_fit, + fraction_evaluate=1.0, + min_available_clients=2, + initial_parameters=parameters, + ) + config = ServerConfig(num_rounds=num_rounds) -# Define strategy -strategy = FedAvg( - fraction_fit=1.0, - fraction_evaluate=1.0, - min_available_clients=2, - initial_parameters=parameters, -) + return ServerAppComponents(strategy=strategy, config=config) # Create ServerApp -app = ServerApp( - config=ServerConfig(num_rounds=3), - strategy=strategy, -) +app = ServerApp(server_fn=server_fn) diff --git a/src/py/flwr/cli/new/templates/app/code/server.sklearn.py.tpl b/src/py/flwr/cli/new/templates/app/code/server.sklearn.py.tpl index 266a53ac5794..b1487b01d2d3 100644 --- a/src/py/flwr/cli/new/templates/app/code/server.sklearn.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/server.sklearn.py.tpl @@ -1,17 +1,36 @@ -"""$project_name: A Flower / Scikit-Learn app.""" +"""$project_name: A Flower / $framework_str app.""" -from flwr.server import ServerApp, ServerConfig +from flwr.common import Context, ndarrays_to_parameters +from flwr.server import ServerApp, ServerAppComponents, ServerConfig from flwr.server.strategy import FedAvg +from $import_name.task import get_model, get_model_params, set_initial_params -strategy = FedAvg( - fraction_fit=1.0, - fraction_evaluate=1.0, - min_available_clients=2, -) +def server_fn(context: Context): + # Read from config + num_rounds = context.run_config["num-server-rounds"] + + # Create LogisticRegression Model + penalty = context.run_config["penalty"] + local_epochs = context.run_config["local-epochs"] + model = get_model(penalty, local_epochs) + + # Setting initial parameters, akin to model.compile for keras models + set_initial_params(model) + + initial_parameters = ndarrays_to_parameters(get_model_params(model)) + + # Define strategy + strategy = FedAvg( + fraction_fit=1.0, + fraction_evaluate=1.0, + min_available_clients=2, + initial_parameters=initial_parameters, + ) + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(strategy=strategy, config=config) + # Create ServerApp -app = ServerApp( - config=ServerConfig(num_rounds=3), - strategy=strategy, -) +app = ServerApp(server_fn=server_fn) diff --git a/src/py/flwr/cli/new/templates/app/code/server.tensorflow.py.tpl b/src/py/flwr/cli/new/templates/app/code/server.tensorflow.py.tpl index 8d092164a468..050da37cf527 100644 --- a/src/py/flwr/cli/new/templates/app/code/server.tensorflow.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/server.tensorflow.py.tpl @@ -1,27 +1,29 @@ -"""$project_name: A Flower / TensorFlow app.""" +"""$project_name: A Flower / $framework_str app.""" -from flwr.common import ndarrays_to_parameters -from flwr.server import ServerApp, ServerConfig +from flwr.common import Context, ndarrays_to_parameters +from flwr.server import ServerApp, ServerAppComponents, ServerConfig from flwr.server.strategy import FedAvg from $import_name.task import load_model -# Define config -config = ServerConfig(num_rounds=3) -parameters = ndarrays_to_parameters(load_model().get_weights()) +def server_fn(context: Context): + # Read from config + num_rounds = context.run_config["num-server-rounds"] -# Define strategy -strategy = FedAvg( - fraction_fit=1.0, - fraction_evaluate=1.0, - min_available_clients=2, - initial_parameters=parameters, -) + # Get parameters to initialize global model + parameters = ndarrays_to_parameters(load_model().get_weights()) + # Define strategy + strategy = strategy = FedAvg( + fraction_fit=1.0, + fraction_evaluate=1.0, + min_available_clients=2, + initial_parameters=parameters, + ) + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(strategy=strategy, config=config) # Create ServerApp -app = ServerApp( - config=config, - strategy=strategy, -) +app = ServerApp(server_fn=server_fn) diff --git a/src/py/flwr/cli/new/templates/app/code/strategy.baseline.py.tpl b/src/py/flwr/cli/new/templates/app/code/strategy.baseline.py.tpl new file mode 100644 index 000000000000..5ad8041381d6 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/strategy.baseline.py.tpl @@ -0,0 +1 @@ +"""$project_name: A Flower Baseline.""" diff --git a/src/py/flwr/cli/new/templates/app/code/task.hf.py.tpl b/src/py/flwr/cli/new/templates/app/code/task.huggingface.py.tpl similarity index 67% rename from src/py/flwr/cli/new/templates/app/code/task.hf.py.tpl rename to src/py/flwr/cli/new/templates/app/code/task.huggingface.py.tpl index 8e89add66835..1c50e85d7103 100644 --- a/src/py/flwr/cli/new/templates/app/code/task.hf.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/task.huggingface.py.tpl @@ -1,32 +1,47 @@ -"""$project_name: A Flower / HuggingFace Transformers app.""" +"""$project_name: A Flower / $framework_str app.""" import warnings from collections import OrderedDict import torch +import transformers +from datasets.utils.logging import disable_progress_bar from evaluate import load as load_metric +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoTokenizer, DataCollatorWithPadding -from flwr_datasets import FederatedDataset - warnings.filterwarnings("ignore", category=UserWarning) -DEVICE = torch.device("cpu") -CHECKPOINT = "distilbert-base-uncased" # transformer model checkpoint +warnings.filterwarnings("ignore", category=FutureWarning) +disable_progress_bar() +transformers.logging.set_verbosity_error() + + +fds = None # Cache FederatedDataset -def load_data(partition_id, num_clients): +def load_data(partition_id: int, num_partitions: int, model_name: str): """Load IMDB data (training and eval)""" - fds = FederatedDataset(dataset="imdb", partitioners={"train": num_clients}) + # Only initialize `FederatedDataset` once + global fds + if fds is None: + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="stanfordnlp/imdb", + partitioners={"train": partitioner}, + ) partition = fds.load_partition(partition_id) # Divide data: 80% train, 20% test partition_train_test = partition.train_test_split(test_size=0.2, seed=42) - tokenizer = AutoTokenizer.from_pretrained(CHECKPOINT) + tokenizer = AutoTokenizer.from_pretrained(model_name) def tokenize_function(examples): - return tokenizer(examples["text"], truncation=True) + return tokenizer( + examples["text"], truncation=True, add_special_tokens=True, max_length=512 + ) partition_train_test = partition_train_test.map(tokenize_function, batched=True) partition_train_test = partition_train_test.remove_columns("text") @@ -47,12 +62,12 @@ def load_data(partition_id, num_clients): return trainloader, testloader -def train(net, trainloader, epochs): +def train(net, trainloader, epochs, device): optimizer = AdamW(net.parameters(), lr=5e-5) net.train() for _ in range(epochs): for batch in trainloader: - batch = {k: v.to(DEVICE) for k, v in batch.items()} + batch = {k: v.to(device) for k, v in batch.items()} outputs = net(**batch) loss = outputs.loss loss.backward() @@ -60,12 +75,12 @@ def train(net, trainloader, epochs): optimizer.zero_grad() -def test(net, testloader): +def test(net, testloader, device): metric = load_metric("accuracy") loss = 0 net.eval() for batch in testloader: - batch = {k: v.to(DEVICE) for k, v in batch.items()} + batch = {k: v.to(device) for k, v in batch.items()} with torch.no_grad(): outputs = net(**batch) logits = outputs.logits diff --git a/src/py/flwr/cli/new/templates/app/code/task.jax.py.tpl b/src/py/flwr/cli/new/templates/app/code/task.jax.py.tpl index 82f080ebcdcb..428f752845c1 100644 --- a/src/py/flwr/cli/new/templates/app/code/task.jax.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/task.jax.py.tpl @@ -1,10 +1,10 @@ -"""$project_name: A Flower / JAX app.""" +"""$project_name: A Flower / $framework_str app.""" import jax import jax.numpy as jnp +import numpy as np from sklearn.datasets import make_regression from sklearn.model_selection import train_test_split -import numpy as np key = jax.random.PRNGKey(0) diff --git a/src/py/flwr/cli/new/templates/app/code/task.mlx.py.tpl b/src/py/flwr/cli/new/templates/app/code/task.mlx.py.tpl index bcd4dde93310..63db6c28f034 100644 --- a/src/py/flwr/cli/new/templates/app/code/task.mlx.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/task.mlx.py.tpl @@ -1,14 +1,16 @@ -"""$project_name: A Flower / MLX app.""" +"""$project_name: A Flower / $framework_str app.""" import mlx.core as mx import mlx.nn as nn import numpy as np -from datasets.utils.logging import disable_progress_bar from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner +from datasets.utils.logging import disable_progress_bar disable_progress_bar() + class MLP(nn.Module): """A simple MLP.""" @@ -43,8 +45,19 @@ def batch_iterate(batch_size, X, y): yield X[ids], y[ids] -def load_data(partition_id, num_clients): - fds = FederatedDataset(dataset="mnist", partitioners={"train": num_clients}) +fds = None # Cache FederatedDataset + + +def load_data(partition_id: int, num_partitions: int): + # Only initialize `FederatedDataset` once + global fds + if fds is None: + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="ylecun/mnist", + partitioners={"train": partitioner}, + trust_remote_code=True, + ) partition = fds.load_partition(partition_id) partition_splits = partition.train_test_split(test_size=0.2, seed=42) diff --git a/src/py/flwr/cli/new/templates/app/code/task.numpy.py.tpl b/src/py/flwr/cli/new/templates/app/code/task.numpy.py.tpl new file mode 100644 index 000000000000..9b76fc055caf --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/task.numpy.py.tpl @@ -0,0 +1,7 @@ +"""$project_name: A Flower / $framework_str app.""" + +import numpy as np + + +def get_dummy_model(): + return np.ones((1, 1)) diff --git a/src/py/flwr/cli/new/templates/app/code/task.pytorch.py.tpl b/src/py/flwr/cli/new/templates/app/code/task.pytorch.py.tpl index b30c65a285b5..a3c015bfee88 100644 --- a/src/py/flwr/cli/new/templates/app/code/task.pytorch.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/task.pytorch.py.tpl @@ -1,16 +1,14 @@ -"""$project_name: A Flower / PyTorch app.""" +"""$project_name: A Flower / $framework_str app.""" from collections import OrderedDict import torch import torch.nn as nn import torch.nn.functional as F +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner from torch.utils.data import DataLoader -from torchvision.datasets import CIFAR10 from torchvision.transforms import Compose, Normalize, ToTensor -from flwr_datasets import FederatedDataset - -DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") class Net(nn.Module): @@ -34,9 +32,19 @@ class Net(nn.Module): return self.fc3(x) -def load_data(partition_id, num_partitions): +fds = None # Cache FederatedDataset + + +def load_data(partition_id: int, num_partitions: int): """Load partition CIFAR10 data.""" - fds = FederatedDataset(dataset="cifar10", partitioners={"train": num_partitions}) + # Only initialize `FederatedDataset` once + global fds + if fds is None: + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="uoft-cs/cifar10", + partitioners={"train": partitioner}, + ) partition = fds.load_partition(partition_id) # Divide data on each node: 80% train, 20% test partition_train_test = partition.train_test_split(test_size=0.2, seed=42) @@ -55,44 +63,41 @@ def load_data(partition_id, num_partitions): return trainloader, testloader -def train(net, trainloader, valloader, epochs, device): +def train(net, trainloader, epochs, device): """Train the model on the training set.""" net.to(device) # move model to GPU if available criterion = torch.nn.CrossEntropyLoss().to(device) - optimizer = torch.optim.SGD(net.parameters(), lr=0.001, momentum=0.9) + optimizer = torch.optim.Adam(net.parameters(), lr=0.01) net.train() + running_loss = 0.0 for _ in range(epochs): for batch in trainloader: images = batch["img"] labels = batch["label"] optimizer.zero_grad() - criterion(net(images.to(DEVICE)), labels.to(DEVICE)).backward() + loss = criterion(net(images.to(device)), labels.to(device)) + loss.backward() optimizer.step() + running_loss += loss.item() - train_loss, train_acc = test(net, trainloader) - val_loss, val_acc = test(net, valloader) - - results = { - "train_loss": train_loss, - "train_accuracy": train_acc, - "val_loss": val_loss, - "val_accuracy": val_acc, - } - return results + avg_trainloss = running_loss / len(trainloader) + return avg_trainloss -def test(net, testloader): +def test(net, testloader, device): """Validate the model on the test set.""" + net.to(device) criterion = torch.nn.CrossEntropyLoss() correct, loss = 0, 0.0 with torch.no_grad(): for batch in testloader: - images = batch["img"].to(DEVICE) - labels = batch["label"].to(DEVICE) + images = batch["img"].to(device) + labels = batch["label"].to(device) outputs = net(images) loss += criterion(outputs, labels).item() correct += (torch.max(outputs.data, 1)[1] == labels).sum().item() accuracy = correct / len(testloader.dataset) + loss = loss / len(testloader) return loss, accuracy diff --git a/src/py/flwr/cli/new/templates/app/code/task.sklearn.py.tpl b/src/py/flwr/cli/new/templates/app/code/task.sklearn.py.tpl new file mode 100644 index 000000000000..52c13edc032c --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/task.sklearn.py.tpl @@ -0,0 +1,67 @@ +"""$project_name: A Flower / $framework_str app.""" + +import numpy as np +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner +from sklearn.linear_model import LogisticRegression + +fds = None # Cache FederatedDataset + + +def load_data(partition_id: int, num_partitions: int): + """Load partition MNIST data.""" + # Only initialize `FederatedDataset` once + global fds + if fds is None: + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="mnist", + partitioners={"train": partitioner}, + ) + + dataset = fds.load_partition(partition_id, "train").with_format("numpy") + + X, y = dataset["image"].reshape((len(dataset), -1)), dataset["label"] + + # Split the on edge data: 80% train, 20% test + X_train, X_test = X[: int(0.8 * len(X))], X[int(0.8 * len(X)) :] + y_train, y_test = y[: int(0.8 * len(y))], y[int(0.8 * len(y)) :] + + return X_train, X_test, y_train, y_test + + +def get_model(penalty: str, local_epochs: int): + + return LogisticRegression( + penalty=penalty, + max_iter=local_epochs, + warm_start=True, + ) + + +def get_model_params(model): + if model.fit_intercept: + params = [ + model.coef_, + model.intercept_, + ] + else: + params = [model.coef_] + return params + + +def set_model_params(model, params): + model.coef_ = params[0] + if model.fit_intercept: + model.intercept_ = params[1] + return model + + +def set_initial_params(model): + n_classes = 10 # MNIST has 10 classes + n_features = 784 # Number of features in dataset + model.classes_ = np.array([i for i in range(10)]) + + model.coef_ = np.zeros((n_classes, n_features)) + if model.fit_intercept: + model.intercept_ = np.zeros((n_classes,)) diff --git a/src/py/flwr/cli/new/templates/app/code/task.tensorflow.py.tpl b/src/py/flwr/cli/new/templates/app/code/task.tensorflow.py.tpl index fa07f93713ed..cc782b5446ec 100644 --- a/src/py/flwr/cli/new/templates/app/code/task.tensorflow.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/task.tensorflow.py.tpl @@ -1,24 +1,48 @@ -"""$project_name: A Flower / TensorFlow app.""" +"""$project_name: A Flower / $framework_str app.""" import os -import tensorflow as tf +import keras +from keras import layers from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner # Make TensorFlow log less verbose os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" + def load_model(): - # Load model and data (MobileNetV2, CIFAR-10) - model = tf.keras.applications.MobileNetV2((32, 32, 3), classes=10, weights=None) + # Define a simple CNN for CIFAR-10 and set Adam optimizer + model = keras.Sequential( + [ + keras.Input(shape=(32, 32, 3)), + layers.Conv2D(32, kernel_size=(3, 3), activation="relu"), + layers.MaxPooling2D(pool_size=(2, 2)), + layers.Conv2D(64, kernel_size=(3, 3), activation="relu"), + layers.MaxPooling2D(pool_size=(2, 2)), + layers.Flatten(), + layers.Dropout(0.5), + layers.Dense(10, activation="softmax"), + ] + ) model.compile("adam", "sparse_categorical_crossentropy", metrics=["accuracy"]) return model +fds = None # Cache FederatedDataset + + def load_data(partition_id, num_partitions): # Download and partition dataset - fds = FederatedDataset(dataset="cifar10", partitioners={"train": num_partitions}) + # Only initialize `FederatedDataset` once + global fds + if fds is None: + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="uoft-cs/cifar10", + partitioners={"train": partitioner}, + ) partition = fds.load_partition(partition_id, "train") partition.set_format("numpy") diff --git a/src/py/flwr/cli/new/templates/app/code/utils.baseline.py.tpl b/src/py/flwr/cli/new/templates/app/code/utils.baseline.py.tpl new file mode 100644 index 000000000000..5ad8041381d6 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/utils.baseline.py.tpl @@ -0,0 +1 @@ +"""$project_name: A Flower Baseline.""" diff --git a/src/py/flwr/cli/new/templates/app/pyproject.baseline.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.baseline.toml.tpl new file mode 100644 index 000000000000..c70580009392 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/pyproject.baseline.toml.tpl @@ -0,0 +1,138 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "$package_name" +version = "1.0.0" +description = "" +license = "Apache-2.0" +dependencies = [ + "flwr[simulation]>=1.12.0", + "flwr-datasets[vision]>=0.3.0", + "torch==2.2.1", + "torchvision==0.17.1", +] + +[tool.hatch.metadata] +allow-direct-references = true + +[project.optional-dependencies] +dev = [ + "isort==5.13.2", + "black==24.2.0", + "docformatter==1.7.5", + "mypy==1.8.0", + "pylint==3.2.6", + "flake8==5.0.4", + "pytest==6.2.4", + "pytest-watch==4.2.0", + "ruff==0.1.9", + "types-requests==2.31.0.20240125", +] + +[tool.isort] +profile = "black" +known_first_party = ["flwr"] + +[tool.black] +line-length = 88 +target-version = ["py38", "py39", "py310", "py311"] + +[tool.pytest.ini_options] +minversion = "6.2" +addopts = "-qq" +testpaths = [ + "flwr_baselines", +] + +[tool.mypy] +ignore_missing_imports = true +strict = false +plugins = "numpy.typing.mypy_plugin" + +[tool.pylint."MESSAGES CONTROL"] +disable = "duplicate-code,too-few-public-methods,useless-import-alias" +good-names = "i,j,k,_,x,y,X,Y,K,N" +max-args = 10 +max-attributes = 15 +max-locals = 36 +max-branches = 20 +max-statements = 55 + +[tool.pylint.typecheck] +generated-members = "numpy.*, torch.*, tensorflow.*" + +[[tool.mypy.overrides]] +module = [ + "importlib.metadata.*", + "importlib_metadata.*", +] +follow_imports = "skip" +follow_imports_for_stubs = true +disallow_untyped_calls = false + +[[tool.mypy.overrides]] +module = "torch.*" +follow_imports = "skip" +follow_imports_for_stubs = true + +[tool.docformatter] +wrap-summaries = 88 +wrap-descriptions = 88 + +[tool.ruff] +target-version = "py38" +line-length = 88 +select = ["D", "E", "F", "W", "B", "ISC", "C4"] +fixable = ["D", "E", "F", "W", "B", "ISC", "C4"] +ignore = ["B024", "B027"] +exclude = [ + ".bzr", + ".direnv", + ".eggs", + ".git", + ".hg", + ".mypy_cache", + ".nox", + ".pants.d", + ".pytype", + ".ruff_cache", + ".svn", + ".tox", + ".venv", + "__pypackages__", + "_build", + "buck-out", + "build", + "dist", + "node_modules", + "venv", + "proto", +] + +[tool.ruff.pydocstyle] +convention = "numpy" + +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.flwr.app] +publisher = "$username" + +[tool.flwr.app.components] +serverapp = "$import_name.server_app:app" +clientapp = "$import_name.client_app:app" + +[tool.flwr.app.config] +num-server-rounds = 3 +fraction-fit = 0.5 +local-epochs = 1 + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 10 +options.backend.client-resources.num-cpus = 2 +options.backend.client-resources.num-gpus = 0.0 diff --git a/src/py/flwr/cli/new/templates/app/pyproject.flowertune.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.flowertune.toml.tpl new file mode 100644 index 000000000000..d34985d50433 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/pyproject.flowertune.toml.tpl @@ -0,0 +1,67 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "$package_name" +version = "1.0.0" +description = "" +license = "Apache-2.0" +dependencies = [ + "flwr[simulation]>=1.12.0", + "flwr-datasets>=0.3.0", + "trl==0.8.1", + "bitsandbytes==0.43.0", + "scipy==1.13.0", + "peft==0.6.2", + "transformers==4.43.1", + "sentencepiece==0.2.0", + "omegaconf==2.3.0", + "hf_transfer==0.1.8", +] + +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.flwr.app] +publisher = "$username" + +[tool.flwr.app.components] +serverapp = "$import_name.server_app:app" +clientapp = "$import_name.client_app:app" + +[tool.flwr.app.config] +model.name = "mistralai/Mistral-7B-v0.3" +model.quantization = 4 +model.gradient-checkpointing = true +model.lora.peft-lora-r = 32 +model.lora.peft-lora-alpha = 64 +train.save-every-round = 5 +train.learning-rate-max = 5e-5 +train.learning-rate-min = 1e-6 +train.seq-length = 512 +train.training-arguments.output-dir = "" +train.training-arguments.learning-rate = "" +train.training-arguments.per-device-train-batch-size = 16 +train.training-arguments.gradient-accumulation-steps = 1 +train.training-arguments.logging-steps = 10 +train.training-arguments.num-train-epochs = 3 +train.training-arguments.max-steps = 10 +train.training-arguments.save-steps = 1000 +train.training-arguments.save-total-limit = 10 +train.training-arguments.gradient-checkpointing = true +train.training-arguments.lr-scheduler-type = "constant" +strategy.fraction-fit = $fraction_fit +strategy.fraction-evaluate = 0.0 +num-server-rounds = 200 + +[tool.flwr.app.config.static] +dataset.name = "$dataset_name" + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = $num_clients +options.backend.client-resources.num-cpus = 6 +options.backend.client-resources.num-gpus = 1.0 diff --git a/src/py/flwr/cli/new/templates/app/pyproject.hf.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.hf.toml.tpl deleted file mode 100644 index 6a235b7b15cf..000000000000 --- a/src/py/flwr/cli/new/templates/app/pyproject.hf.toml.tpl +++ /dev/null @@ -1,37 +0,0 @@ -[build-system] -requires = ["hatchling"] -build-backend = "hatchling.build" - -[project] -name = "$package_name" -version = "1.0.0" -description = "" -authors = [ - { name = "The Flower Authors", email = "hello@flower.ai" }, -] -license = { text = "Apache License (2.0)" } -dependencies = [ - "flwr[simulation]>=1.8.0,<2.0", - "flwr-datasets>=0.0.2,<1.0.0", - "torch==2.2.1", - "transformers>=4.30.0,<5.0" - "evaluate>=0.4.0,<1.0" - "datasets>=2.0.0, <3.0" - "scikit-learn>=1.3.1, <2.0" -] - -[tool.hatch.build.targets.wheel] -packages = ["."] - -[flower] -publisher = "$username" - -[flower.components] -serverapp = "$import_name.server:app" -clientapp = "$import_name.client:app" - -[flower.engine] -name = "simulation" - -[flower.engine.simulation.supernode] -num = 2 diff --git a/src/py/flwr/cli/new/templates/app/pyproject.huggingface.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.huggingface.toml.tpl new file mode 100644 index 000000000000..3515cbd69d17 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/pyproject.huggingface.toml.tpl @@ -0,0 +1,46 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "$package_name" +version = "1.0.0" +description = "" +license = "Apache-2.0" +dependencies = [ + "flwr[simulation]>=1.12.0", + "flwr-datasets>=0.3.0", + "torch==2.2.1", + "transformers>=4.30.0,<5.0", + "evaluate>=0.4.0,<1.0", + "datasets>=2.0.0, <3.0", + "scikit-learn>=1.3.1, <2.0", +] + +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.flwr.app] +publisher = "$username" + +[tool.flwr.app.components] +serverapp = "$import_name.server_app:app" +clientapp = "$import_name.client_app:app" + +[tool.flwr.app.config] +num-server-rounds = 3 +fraction-fit = 0.5 +local-epochs = 1 +model-name = "prajjwal1/bert-tiny" # Set a larger model if you have access to more GPU resources +num-labels = 2 + +[tool.flwr.federations] +default = "localhost" + +[tool.flwr.federations.localhost] +options.num-supernodes = 10 + +[tool.flwr.federations.localhost-gpu] +options.num-supernodes = 10 +options.backend.client-resources.num-cpus = 4 # each ClientApp assumes to use 4CPUs +options.backend.client-resources.num-gpus = 0.25 # at most 4 ClientApps will run in a given GPU diff --git a/src/py/flwr/cli/new/templates/app/pyproject.jax.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.jax.toml.tpl index 1d32cfd77481..7c55d3654a08 100644 --- a/src/py/flwr/cli/new/templates/app/pyproject.jax.toml.tpl +++ b/src/py/flwr/cli/new/templates/app/pyproject.jax.toml.tpl @@ -6,23 +6,30 @@ build-backend = "hatchling.build" name = "$package_name" version = "1.0.0" description = "" -authors = [ - { name = "The Flower Authors", email = "hello@flower.ai" }, -] -license = {text = "Apache License (2.0)"} +license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.8.0,<2.0", - "jax==0.4.26", - "jaxlib==0.4.26", - "scikit-learn==1.4.2", + "flwr[simulation]>=1.12.0", + "jax==0.4.30", + "jaxlib==0.4.30", + "scikit-learn==1.3.2", ] [tool.hatch.build.targets.wheel] packages = ["."] -[flower] +[tool.flwr.app] publisher = "$username" -[flower.components] -serverapp = "$import_name.server:app" -clientapp = "$import_name.client:app" +[tool.flwr.app.components] +serverapp = "$import_name.server_app:app" +clientapp = "$import_name.client_app:app" + +[tool.flwr.app.config] +num-server-rounds = 3 +input-dim = 3 + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 10 diff --git a/src/py/flwr/cli/new/templates/app/pyproject.mlx.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.mlx.toml.tpl index 321dfaab41cc..9ea11ff3fc0c 100644 --- a/src/py/flwr/cli/new/templates/app/pyproject.mlx.toml.tpl +++ b/src/py/flwr/cli/new/templates/app/pyproject.mlx.toml.tpl @@ -6,29 +6,35 @@ build-backend = "hatchling.build" name = "$package_name" version = "1.0.0" description = "" -authors = [ - { name = "The Flower Authors", email = "hello@flower.ai" }, -] -license = { text = "Apache License (2.0)" } +license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.8.0,<2.0", - "flwr-datasets[vision]>=0.0.2,<1.0.0", - "mlx==0.10.0", + "flwr[simulation]>=1.12.0", + "flwr-datasets[vision]>=0.3.0", + "mlx==0.16.1", "numpy==1.24.4", ] [tool.hatch.build.targets.wheel] packages = ["."] -[flower] +[tool.flwr.app] publisher = "$username" -[flower.components] -serverapp = "$import_name.server:app" -clientapp = "$import_name.client:app" +[tool.flwr.app.components] +serverapp = "$import_name.server_app:app" +clientapp = "$import_name.client_app:app" + +[tool.flwr.app.config] +num-server-rounds = 3 +local-epochs = 1 +num-layers = 2 +input-dim = 784 # 28*28 +hidden-dim = 32 +batch-size = 256 +lr = 0.1 -[flower.engine] -name = "simulation" +[tool.flwr.federations] +default = "local-simulation" -[flower.engine.simulation.supernode] -num = 2 +[tool.flwr.federations.local-simulation] +options.num-supernodes = 10 diff --git a/src/py/flwr/cli/new/templates/app/pyproject.numpy.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.numpy.toml.tpl index 6b1c40d12561..9f8f3aaab554 100644 --- a/src/py/flwr/cli/new/templates/app/pyproject.numpy.toml.tpl +++ b/src/py/flwr/cli/new/templates/app/pyproject.numpy.toml.tpl @@ -6,27 +6,27 @@ build-backend = "hatchling.build" name = "$package_name" version = "1.0.0" description = "" -authors = [ - { name = "The Flower Authors", email = "hello@flower.ai" }, -] -license = { text = "Apache License (2.0)" } +license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.8.0,<2.0", + "flwr[simulation]>=1.12.0", "numpy>=1.21.0", ] [tool.hatch.build.targets.wheel] packages = ["."] -[flower] +[tool.flwr.app] publisher = "$username" -[flower.components] -serverapp = "$import_name.server:app" -clientapp = "$import_name.client:app" +[tool.flwr.app.components] +serverapp = "$import_name.server_app:app" +clientapp = "$import_name.client_app:app" + +[tool.flwr.app.config] +num-server-rounds = 3 -[flower.engine] -name = "simulation" +[tool.flwr.federations] +default = "local-simulation" -[flower.engine.simulation.supernode] -num = 2 +[tool.flwr.federations.local-simulation] +options.num-supernodes = 10 diff --git a/src/py/flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl index df404d178495..fe5ac7735d66 100644 --- a/src/py/flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl +++ b/src/py/flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl @@ -6,13 +6,10 @@ build-backend = "hatchling.build" name = "$package_name" version = "1.0.0" description = "" -authors = [ - { name = "The Flower Authors", email = "hello@flower.ai" }, -] -license = { text = "Apache License (2.0)" } +license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.8.0,<2.0", - "flwr-datasets[vision]>=0.0.2,<1.0.0", + "flwr[simulation]>=1.12.0", + "flwr-datasets[vision]>=0.3.0", "torch==2.2.1", "torchvision==0.17.1", ] @@ -20,15 +17,20 @@ dependencies = [ [tool.hatch.build.targets.wheel] packages = ["."] -[flower] +[tool.flwr.app] publisher = "$username" -[flower.components] -serverapp = "$import_name.server:app" -clientapp = "$import_name.client:app" +[tool.flwr.app.components] +serverapp = "$import_name.server_app:app" +clientapp = "$import_name.client_app:app" + +[tool.flwr.app.config] +num-server-rounds = 3 +fraction-fit = 0.5 +local-epochs = 1 -[flower.engine] -name = "simulation" +[tool.flwr.federations] +default = "local-simulation" -[flower.engine.simulation.supernode] -num = 2 +[tool.flwr.federations.local-simulation] +options.num-supernodes = 10 diff --git a/src/py/flwr/cli/new/templates/app/pyproject.sklearn.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.sklearn.toml.tpl index 7ee655967c4a..d5fec5f2f93f 100644 --- a/src/py/flwr/cli/new/templates/app/pyproject.sklearn.toml.tpl +++ b/src/py/flwr/cli/new/templates/app/pyproject.sklearn.toml.tpl @@ -6,28 +6,30 @@ build-backend = "hatchling.build" name = "$package_name" version = "1.0.0" description = "" -authors = [ - { name = "The Flower Authors", email = "hello@flower.ai" }, -] -license = { text = "Apache License (2.0)" } +license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.8.0,<2.0", - "flwr-datasets[vision]>=0.0.2,<1.0.0", + "flwr[simulation]>=1.12.0", + "flwr-datasets[vision]>=0.3.0", "scikit-learn>=1.1.1", ] [tool.hatch.build.targets.wheel] packages = ["."] -[flower] +[tool.flwr.app] publisher = "$username" -[flower.components] -serverapp = "$import_name.server:app" -clientapp = "$import_name.client:app" +[tool.flwr.app.components] +serverapp = "$import_name.server_app:app" +clientapp = "$import_name.client_app:app" + +[tool.flwr.app.config] +num-server-rounds = 3 +penalty = "l2" +local-epochs = 1 -[flower.engine] -name = "simulation" +[tool.flwr.federations] +default = "local-simulation" -[flower.engine.simulation.supernode] -num = 2 +[tool.flwr.federations.local-simulation] +options.num-supernodes = 10 diff --git a/src/py/flwr/cli/new/templates/app/pyproject.tensorflow.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.tensorflow.toml.tpl index f453bce668fa..81a839b30998 100644 --- a/src/py/flwr/cli/new/templates/app/pyproject.tensorflow.toml.tpl +++ b/src/py/flwr/cli/new/templates/app/pyproject.tensorflow.toml.tpl @@ -6,28 +6,31 @@ build-backend = "hatchling.build" name = "$package_name" version = "1.0.0" description = "" -authors = [ - { name = "The Flower Authors", email = "hello@flower.ai" }, -] -license = { text = "Apache License (2.0)" } +license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.8.0,<2.0", - "flwr-datasets[vision]>=0.0.2,<1.0.0", + "flwr[simulation]>=1.12.0", + "flwr-datasets[vision]>=0.3.0", "tensorflow>=2.11.1", ] [tool.hatch.build.targets.wheel] packages = ["."] -[flower] +[tool.flwr.app] publisher = "$username" -[flower.components] -serverapp = "$import_name.server:app" -clientapp = "$import_name.client:app" +[tool.flwr.app.components] +serverapp = "$import_name.server_app:app" +clientapp = "$import_name.client_app:app" + +[tool.flwr.app.config] +num-server-rounds = 3 +local-epochs = 1 +batch-size = 32 +verbose = false -[flower.engine] -name = "simulation" +[tool.flwr.federations] +default = "local-simulation" -[flower.engine.simulation.supernode] -num = 2 +[tool.flwr.federations.local-simulation] +options.num-supernodes = 10 diff --git a/src/py/flwr/cli/run/run.py b/src/py/flwr/cli/run/run.py index dbaf7feb3500..4722effee53d 100644 --- a/src/py/flwr/cli/run/run.py +++ b/src/py/flwr/cli/run/run.py @@ -14,37 +14,76 @@ # ============================================================================== """Flower command line interface `run` command.""" +import json +import subprocess import sys -from enum import Enum -from typing import Optional +from logging import DEBUG +from pathlib import Path +from typing import Annotated, Any, Optional import typer -from typing_extensions import Annotated -from flwr.cli import config_utils -from flwr.simulation.run_simulation import _run_simulation +from flwr.cli.build import build +from flwr.cli.config_utils import load_and_validate +from flwr.common.config import flatten_dict, parse_config_args +from flwr.common.grpc import GRPC_MAX_MESSAGE_LENGTH, create_channel +from flwr.common.logger import log +from flwr.common.serde import fab_to_proto, user_config_to_proto +from flwr.common.typing import Fab +from flwr.proto.exec_pb2 import StartRunRequest # pylint: disable=E0611 +from flwr.proto.exec_pb2_grpc import ExecStub +from ..log import start_stream -class Engine(str, Enum): - """Enum defining the engine to run on.""" +CONN_REFRESH_PERIOD = 60 # Connection refresh period for log streaming (seconds) - SIMULATION = "simulation" +def on_channel_state_change(channel_connectivity: str) -> None: + """Log channel connectivity.""" + log(DEBUG, channel_connectivity) + +# pylint: disable-next=too-many-locals def run( - engine: Annotated[ - Optional[Engine], - typer.Option(case_sensitive=False, help="The ML framework to use"), + app: Annotated[ + Path, + typer.Argument(help="Path of the Flower App to run."), + ] = Path("."), + federation: Annotated[ + Optional[str], + typer.Argument(help="Name of the federation to run the app on."), + ] = None, + config_overrides: Annotated[ + Optional[list[str]], + typer.Option( + "--run-config", + "-c", + help="Override configuration key-value pairs, should be of the format:\n\n" + '`--run-config \'key1="value1" key2="value2"\' ' + "--run-config 'key3=\"value3\"'`\n\n" + "Note that `key1`, `key2`, and `key3` in this example need to exist " + "inside the `pyproject.toml` in order to be properly overriden.", + ), ] = None, + stream: Annotated[ + bool, + typer.Option( + "--stream", + help="Use `--stream` with `flwr run` to display logs;\n " + "logs are not streamed by default.", + ), + ] = False, ) -> None: - """Run Flower project.""" + """Run Flower App.""" typer.secho("Loading project configuration... ", fg=typer.colors.BLUE) - config, errors, warnings = config_utils.load_and_validate() + pyproject_path = app / "pyproject.toml" if app else None + config, errors, warnings = load_and_validate(path=pyproject_path) if config is None: typer.secho( - "Project configuration could not be loaded.\npyproject.toml is invalid:\n" + "Project configuration could not be loaded.\n" + "pyproject.toml is invalid:\n" + "\n".join([f"- {line}" for line in errors]), fg=typer.colors.RED, bold=True, @@ -61,24 +100,149 @@ def run( typer.secho("Success", fg=typer.colors.GREEN) - server_app_ref = config["flower"]["components"]["serverapp"] - client_app_ref = config["flower"]["components"]["clientapp"] + federation = federation or config["tool"]["flwr"]["federations"].get("default") + + if federation is None: + typer.secho( + "❌ No federation name was provided and the project's `pyproject.toml` " + "doesn't declare a default federation (with a SuperExec address or an " + "`options.num-supernodes` value).", + fg=typer.colors.RED, + bold=True, + ) + raise typer.Exit(code=1) + + # Validate the federation exists in the configuration + federation_config = config["tool"]["flwr"]["federations"].get(federation) + if federation_config is None: + available_feds = { + fed for fed in config["tool"]["flwr"]["federations"] if fed != "default" + } + typer.secho( + f"❌ There is no `{federation}` federation declared in " + "`pyproject.toml`.\n The following federations were found:\n\n" + + "\n".join(available_feds), + fg=typer.colors.RED, + bold=True, + ) + raise typer.Exit(code=1) + + if "address" in federation_config: + _run_with_superexec(app, federation_config, config_overrides, stream) + else: + _run_without_superexec(app, federation_config, config_overrides, federation) - if engine is None: - engine = config["flower"]["engine"]["name"] - if engine == Engine.SIMULATION: - num_supernodes = config["flower"]["engine"]["simulation"]["supernode"]["num"] +# pylint: disable=too-many-locals +def _run_with_superexec( + app: Path, + federation_config: dict[str, Any], + config_overrides: Optional[list[str]], + stream: bool, +) -> None: - typer.secho("Starting run... ", fg=typer.colors.BLUE) - _run_simulation( - server_app_attr=server_app_ref, - client_app_attr=client_app_ref, - num_supernodes=num_supernodes, - ) + insecure_str = federation_config.get("insecure") + if root_certificates := federation_config.get("root-certificates"): + root_certificates_bytes = (app / root_certificates).read_bytes() + if insecure := bool(insecure_str): + typer.secho( + "❌ `root_certificates` were provided but the `insecure` parameter" + "is set to `True`.", + fg=typer.colors.RED, + bold=True, + ) + raise typer.Exit(code=1) else: + root_certificates_bytes = None + if insecure_str is None: + typer.secho( + "❌ To disable TLS, set `insecure = true` in `pyproject.toml`.", + fg=typer.colors.RED, + bold=True, + ) + raise typer.Exit(code=1) + if not (insecure := bool(insecure_str)): + typer.secho( + "❌ No certificate were given yet `insecure` is set to `False`.", + fg=typer.colors.RED, + bold=True, + ) + raise typer.Exit(code=1) + + channel = create_channel( + server_address=federation_config["address"], + insecure=insecure, + root_certificates=root_certificates_bytes, + max_message_length=GRPC_MAX_MESSAGE_LENGTH, + interceptors=None, + ) + channel.subscribe(on_channel_state_change) + stub = ExecStub(channel) + + fab_path, fab_hash = build(app) + content = Path(fab_path).read_bytes() + fab = Fab(fab_hash, content) + + req = StartRunRequest( + fab=fab_to_proto(fab), + override_config=user_config_to_proto(parse_config_args(config_overrides)), + federation_config=user_config_to_proto( + flatten_dict(federation_config.get("options")) + ), + ) + res = stub.StartRun(req) + + # Delete FAB file once it has been sent to the SuperExec + Path(fab_path).unlink() + typer.secho(f"🎊 Successfully started run {res.run_id}", fg=typer.colors.GREEN) + + if stream: + start_stream(res.run_id, channel, CONN_REFRESH_PERIOD) + + +def _run_without_superexec( + app: Optional[Path], + federation_config: dict[str, Any], + config_overrides: Optional[list[str]], + federation: str, +) -> None: + try: + num_supernodes = federation_config["options"]["num-supernodes"] + verbose: Optional[bool] = federation_config["options"].get("verbose") + backend_cfg = federation_config["options"].get("backend", {}) + except KeyError as err: typer.secho( - f"Engine '{engine}' is not yet supported in `flwr run`", + "❌ The project's `pyproject.toml` needs to declare the number of" + " SuperNodes in the simulation. To simulate 10 SuperNodes," + " use the following notation:\n\n" + f"[tool.flwr.federations.{federation}]\n" + "options.num-supernodes = 10\n", fg=typer.colors.RED, bold=True, ) + raise typer.Exit(code=1) from err + + command = [ + "flower-simulation", + "--app", + f"{app}", + "--num-supernodes", + f"{num_supernodes}", + ] + + if backend_cfg: + # Stringify as JSON + command.extend(["--backend-config", json.dumps(backend_cfg)]) + + if verbose: + command.extend(["--verbose"]) + + if config_overrides: + command.extend(["--run-config", f"{' '.join(config_overrides)}"]) + + # Run the simulation + subprocess.run( + command, + check=True, + text=True, + ) diff --git a/src/py/flwr/cli/utils.py b/src/py/flwr/cli/utils.py index 6460b770b184..e725fdd3f951 100644 --- a/src/py/flwr/cli/utils.py +++ b/src/py/flwr/cli/utils.py @@ -14,8 +14,10 @@ # ============================================================================== """Flower command line interface utils.""" +import hashlib import re -from typing import Callable, List, Optional, cast +from pathlib import Path +from typing import Callable, Optional, cast import typer @@ -38,7 +40,7 @@ def prompt_text( return cast(str, result) -def prompt_options(text: str, options: List[str]) -> str: +def prompt_options(text: str, options: list[str]) -> str: """Ask user to select one of the given options and return the selected item.""" # Turn options into a list with index as in " [ 0] quickstart-pytorch" options_formatted = [ @@ -122,3 +124,15 @@ def sanitize_project_name(name: str) -> str: sanitized_name = sanitized_name[1:] return sanitized_name + + +def get_sha256_hash(file_path: Path) -> str: + """Calculate the SHA-256 hash of a file.""" + sha256 = hashlib.sha256() + with open(file_path, "rb") as f: + while True: + data = f.read(65536) # Read in 64kB blocks + if not data: + break + sha256.update(data) + return sha256.hexdigest() diff --git a/src/py/flwr/client/__init__.py b/src/py/flwr/client/__init__.py index fd8647dbaf2e..dce3be9036bb 100644 --- a/src/py/flwr/client/__init__.py +++ b/src/py/flwr/client/__init__.py @@ -20,17 +20,16 @@ from .client import Client as Client from .client_app import ClientApp as ClientApp from .numpy_client import NumPyClient as NumPyClient -from .supernode import run_client_app as run_client_app -from .supernode import run_supernode as run_supernode from .typing import ClientFn as ClientFn +from .typing import ClientFnExt as ClientFnExt __all__ = [ "Client", "ClientApp", "ClientFn", + "ClientFnExt", "NumPyClient", - "run_client_app", - "run_supernode", + "mod", "start_client", "start_numpy_client", ] diff --git a/src/py/flwr/client/app.py b/src/py/flwr/client/app.py index d7c05d8afbb2..fdb62578292a 100644 --- a/src/py/flwr/client/app.py +++ b/src/py/flwr/client/app.py @@ -14,21 +14,32 @@ # ============================================================================== """Flower client app.""" +import signal +import subprocess import sys import time -from logging import DEBUG, ERROR, INFO, WARN -from typing import Callable, ContextManager, Optional, Tuple, Type, Union +from contextlib import AbstractContextManager +from dataclasses import dataclass +from logging import ERROR, INFO, WARN +from pathlib import Path +from typing import Callable, Optional, Union, cast +import grpc from cryptography.hazmat.primitives.asymmetric import ec from grpc import RpcError +from flwr.cli.config_utils import get_fab_metadata +from flwr.cli.install import install_from_fab from flwr.client.client import Client from flwr.client.client_app import ClientApp, LoadClientAppError -from flwr.client.typing import ClientFn -from flwr.common import GRPC_MAX_MESSAGE_LENGTH, EventType, Message, event +from flwr.client.typing import ClientFnExt +from flwr.common import GRPC_MAX_MESSAGE_LENGTH, Context, EventType, Message, event from flwr.common.address import parse_address from flwr.common.constant import ( + CLIENTAPPIO_API_DEFAULT_ADDRESS, MISSING_EXTRA_REST, + RUN_ID_NUM_BYTES, + TRANSPORT_TYPE_GRPC_ADAPTER, TRANSPORT_TYPE_GRPC_BIDI, TRANSPORT_TYPE_GRPC_RERE, TRANSPORT_TYPE_REST, @@ -37,17 +48,26 @@ ) from flwr.common.logger import log, warn_deprecated_feature from flwr.common.message import Error -from flwr.common.retry_invoker import RetryInvoker, exponential - +from flwr.common.retry_invoker import RetryInvoker, RetryState, exponential +from flwr.common.typing import Fab, Run, UserConfig +from flwr.proto.clientappio_pb2_grpc import add_ClientAppIoServicer_to_server +from flwr.server.superlink.fleet.grpc_bidi.grpc_server import generic_create_grpc_server +from flwr.server.superlink.state.utils import generate_rand_int_from_bytes + +from .clientapp.clientappio_servicer import ClientAppInputs, ClientAppIoServicer +from .grpc_adapter_client.connection import grpc_adapter from .grpc_client.connection import grpc_connection from .grpc_rere_client.connection import grpc_request_response from .message_handler.message_handler import handle_control_message from .node_state import NodeState from .numpy_client import NumPyClient +ISOLATION_MODE_SUBPROCESS = "subprocess" +ISOLATION_MODE_PROCESS = "process" + def _check_actionable_client( - client: Optional[Client], client_fn: Optional[ClientFn] + client: Optional[Client], client_fn: Optional[ClientFnExt] ) -> None: if client_fn is None and client is None: raise ValueError( @@ -68,14 +88,14 @@ def _check_actionable_client( def start_client( *, server_address: str, - client_fn: Optional[ClientFn] = None, + client_fn: Optional[ClientFnExt] = None, client: Optional[Client] = None, grpc_max_message_length: int = GRPC_MAX_MESSAGE_LENGTH, root_certificates: Optional[Union[bytes, str]] = None, insecure: Optional[bool] = None, transport: Optional[str] = None, authentication_keys: Optional[ - Tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] + tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] ] = None, max_retries: Optional[int] = None, max_wait_time: Optional[float] = None, @@ -88,7 +108,7 @@ def start_client( The IPv4 or IPv6 address of the server. If the Flower server runs on the same machine on port 8080, then `server_address` would be `"[::]:8080"`. - client_fn : Optional[ClientFn] + client_fn : Optional[ClientFnExt] A callable that instantiates a Client. (default: None) client : Optional[flwr.client.Client] An implementation of the abstract base @@ -112,6 +132,11 @@ class `flwr.client.Client` (default: None) - 'grpc-bidi': gRPC, bidirectional streaming - 'grpc-rere': gRPC, request-response (experimental) - 'rest': HTTP (experimental) + authentication_keys : Optional[Tuple[PrivateKey, PublicKey]] (default: None) + Tuple containing the elliptic curve private key and public key for + authentication from the cryptography library. + Source: https://cryptography.io/en/latest/hazmat/primitives/asymmetric/ec/ + Used to establish an authenticated connection with the server. max_retries: Optional[int] (default: None) The maximum number of times the client will try to connect to the server before giving up in case of a connection error. If set to None, @@ -132,8 +157,8 @@ class `flwr.client.Client` (default: None) Starting an SSL-enabled gRPC client using system certificates: - >>> def client_fn(cid: str): - >>> return FlowerClient() + >>> def client_fn(context: Context): + >>> return FlowerClient().to_client() >>> >>> start_client( >>> server_address=localhost:8080, @@ -152,8 +177,9 @@ class `flwr.client.Client` (default: None) >>> ) """ event(EventType.START_CLIENT_ENTER) - _start_client_internal( + start_client_internal( server_address=server_address, + node_config={}, load_client_app_fn=None, client_fn=client_fn, client=client, @@ -172,21 +198,25 @@ class `flwr.client.Client` (default: None) # pylint: disable=too-many-branches # pylint: disable=too-many-locals # pylint: disable=too-many-statements -def _start_client_internal( +def start_client_internal( *, server_address: str, - load_client_app_fn: Optional[Callable[[], ClientApp]] = None, - client_fn: Optional[ClientFn] = None, + node_config: UserConfig, + load_client_app_fn: Optional[Callable[[str, str, str], ClientApp]] = None, + client_fn: Optional[ClientFnExt] = None, client: Optional[Client] = None, grpc_max_message_length: int = GRPC_MAX_MESSAGE_LENGTH, root_certificates: Optional[Union[bytes, str]] = None, insecure: Optional[bool] = None, transport: Optional[str] = None, authentication_keys: Optional[ - Tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] + tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] ] = None, max_retries: Optional[int] = None, max_wait_time: Optional[float] = None, + flwr_path: Optional[Path] = None, + isolation: Optional[str] = None, + supernode_address: Optional[str] = CLIENTAPPIO_API_DEFAULT_ADDRESS, ) -> None: """Start a Flower client node which connects to a Flower server. @@ -196,9 +226,11 @@ def _start_client_internal( The IPv4 or IPv6 address of the server. If the Flower server runs on the same machine on port 8080, then `server_address` would be `"[::]:8080"`. + node_config: UserConfig + The configuration of the node. load_client_app_fn : Optional[Callable[[], ClientApp]] (default: None) A function that can be used to load a `ClientApp` instance. - client_fn : Optional[ClientFn] + client_fn : Optional[ClientFnExt] A callable that instantiates a Client. (default: None) client : Optional[flwr.client.Client] An implementation of the abstract base @@ -222,6 +254,11 @@ class `flwr.client.Client` (default: None) - 'grpc-bidi': gRPC, bidirectional streaming - 'grpc-rere': gRPC, request-response (experimental) - 'rest': HTTP (experimental) + authentication_keys : Optional[Tuple[PrivateKey, PublicKey]] (default: None) + Tuple containing the elliptic curve private key and public key for + authentication from the cryptography library. + Source: https://cryptography.io/en/latest/hazmat/primitives/asymmetric/ec/ + Used to establish an authenticated connection with the server. max_retries: Optional[int] (default: None) The maximum number of times the client will try to connect to the server before giving up in case of a connection error. If set to None, @@ -230,6 +267,17 @@ class `flwr.client.Client` (default: None) The maximum duration before the client stops trying to connect to the server in case of connection error. If set to None, there is no limit to the total time. + flwr_path: Optional[Path] (default: None) + The fully resolved path containing installed Flower Apps. + isolation : Optional[str] (default: None) + Isolation mode for `ClientApp`. Possible values are `subprocess` and + `process`. Defaults to `None`, which runs the `ClientApp` in the same process + as the SuperNode. If `subprocess`, the `ClientApp` runs in a subprocess started + by the SueprNode and communicates using gRPC at the address + `supernode_address`. If `process`, the `ClientApp` runs in a separate isolated + process and communicates using gRPC at the address `supernode_address`. + supernode_address : Optional[str] (default: `CLIENTAPPIO_API_DEFAULT_ADDRESS`) + The SuperNode gRPC server address. """ if insecure is None: insecure = root_certificates is None @@ -240,7 +288,7 @@ class `flwr.client.Client` (default: None) if client_fn is None: # Wrap `Client` instance in `client_fn` def single_client_factory( - cid: str, # pylint: disable=unused-argument + context: Context, # pylint: disable=unused-argument ) -> Client: if client is None: # Added this to keep mypy happy raise ValueError( @@ -250,11 +298,22 @@ def single_client_factory( client_fn = single_client_factory - def _load_client_app() -> ClientApp: + def _load_client_app(_1: str, _2: str, _3: str) -> ClientApp: return ClientApp(client_fn=client_fn) load_client_app_fn = _load_client_app + if isolation: + if supernode_address is None: + raise ValueError( + f"`supernode_address` required when `isolation` is " + f"{ISOLATION_MODE_SUBPROCESS} or {ISOLATION_MODE_PROCESS}", + ) + _clientappio_grpc_server, clientappio_servicer = run_clientappio_api_grpc( + address=supernode_address + ) + supernode_address = cast(str, supernode_address) + # At this point, only `load_client_app_fn` should be used # Both `client` and `client_fn` must not be used directly @@ -263,10 +322,33 @@ def _load_client_app() -> ClientApp: transport, server_address ) + app_state_tracker = _AppStateTracker() + + def _on_sucess(retry_state: RetryState) -> None: + app_state_tracker.is_connected = True + if retry_state.tries > 1: + log( + INFO, + "Connection successful after %.2f seconds and %s tries.", + retry_state.elapsed_time, + retry_state.tries, + ) + + def _on_backoff(retry_state: RetryState) -> None: + app_state_tracker.is_connected = False + if retry_state.tries == 1: + log(WARN, "Connection attempt failed, retrying...") + else: + log( + WARN, + "Connection attempt failed, retrying in %.2f seconds", + retry_state.actual_wait, + ) + retry_invoker = RetryInvoker( wait_gen_factory=exponential, recoverable_exceptions=connection_error_type, - max_tries=max_retries, + max_tries=max_retries + 1 if max_retries is not None else None, max_time=max_wait_time, on_giveup=lambda retry_state: ( log( @@ -278,30 +360,16 @@ def _load_client_app() -> ClientApp: if retry_state.tries > 1 else None ), - on_success=lambda retry_state: ( - log( - INFO, - "Connection successful after %.2f seconds and %s tries.", - retry_state.elapsed_time, - retry_state.tries, - ) - if retry_state.tries > 1 - else None - ), - on_backoff=lambda retry_state: ( - log(WARN, "Connection attempt failed, retrying...") - if retry_state.tries == 1 - else log( - DEBUG, - "Connection attempt failed, retrying in %.2f seconds", - retry_state.actual_wait, - ) - ), + on_success=_on_sucess, + on_backoff=_on_backoff, ) - node_state = NodeState() + # NodeState gets initialized when the first connection is established + node_state: Optional[NodeState] = None + + runs: dict[int, Run] = {} - while True: + while not app_state_tracker.interrupt: sleep_duration: int = 0 with connection( address, @@ -311,106 +379,217 @@ def _load_client_app() -> ClientApp: root_certificates, authentication_keys, ) as conn: - # pylint: disable-next=W0612 - receive, send, create_node, delete_node, get_run = conn - - # Register node - if create_node is not None: - create_node() # pylint: disable=not-callable - - while True: - # Receive - message = receive() - if message is None: - time.sleep(3) # Wait for 3s before asking again - continue - - log(INFO, "") - if len(message.metadata.group_id) > 0: - log( - INFO, - "[RUN %s, ROUND %s]", - message.metadata.run_id, - message.metadata.group_id, + receive, send, create_node, delete_node, get_run, get_fab = conn + + # Register node when connecting the first time + if node_state is None: + if create_node is None: + if transport not in ["grpc-bidi", None]: + raise NotImplementedError( + "All transports except `grpc-bidi` require " + "an implementation for `create_node()`.'" + ) + # gRPC-bidi doesn't have the concept of node_id, + # so we set it to -1 + node_state = NodeState( + node_id=-1, + node_config={}, + ) + else: + # Call create_node fn to register node + node_id: Optional[int] = ( # pylint: disable=assignment-from-none + create_node() + ) # pylint: disable=not-callable + if node_id is None: + raise ValueError("Node registration failed") + node_state = NodeState( + node_id=node_id, + node_config=node_config, ) - log( - INFO, - "Received: %s message %s", - message.metadata.message_type, - message.metadata.message_id, - ) - - # Handle control message - out_message, sleep_duration = handle_control_message(message) - if out_message: - send(out_message) - break - - # Register context for this run - node_state.register_context(run_id=message.metadata.run_id) - - # Retrieve context for this run - context = node_state.retrieve_context(run_id=message.metadata.run_id) - - # Create an error reply message that will never be used to prevent - # the used-before-assignment linting error - reply_message = message.create_error_reply( - error=Error(code=ErrorCode.UNKNOWN, reason="Unknown") - ) - # Handle app loading and task message + app_state_tracker.register_signal_handler() + # pylint: disable=too-many-nested-blocks + while not app_state_tracker.interrupt: try: - # Load ClientApp instance - client_app: ClientApp = load_client_app_fn() - - # Execute ClientApp - reply_message = client_app(message=message, context=context) - except Exception as ex: # pylint: disable=broad-exception-caught - - # Legacy grpc-bidi - if transport in ["grpc-bidi", None]: - log(ERROR, "Client raised an exception.", exc_info=ex) - # Raise exception, crash process - raise ex - - # Don't update/change NodeState - - e_code = ErrorCode.CLIENT_APP_RAISED_EXCEPTION - # Reason example: ":<'division by zero'>" - reason = str(type(ex)) + ":<'" + str(ex) + "'>" - exc_entity = "ClientApp" - if isinstance(ex, LoadClientAppError): - reason = ( - "An exception was raised when attempting to load " - "`ClientApp`" + # Receive + message = receive() + if message is None: + time.sleep(3) # Wait for 3s before asking again + continue + + log(INFO, "") + if len(message.metadata.group_id) > 0: + log( + INFO, + "[RUN %s, ROUND %s]", + message.metadata.run_id, + message.metadata.group_id, ) - e_code = ErrorCode.LOAD_CLIENT_APP_EXCEPTION - exc_entity = "SuperNode" + log( + INFO, + "Received: %s message %s", + message.metadata.message_type, + message.metadata.message_id, + ) - log(ERROR, "%s raised an exception", exc_entity, exc_info=ex) + # Handle control message + out_message, sleep_duration = handle_control_message(message) + if out_message: + send(out_message) + break + + # Get run info + run_id = message.metadata.run_id + if run_id not in runs: + if get_run is not None: + runs[run_id] = get_run(run_id) + # If get_run is None, i.e., in grpc-bidi mode + else: + runs[run_id] = Run(run_id, "", "", "", {}) + + run: Run = runs[run_id] + if get_fab is not None and run.fab_hash: + fab = get_fab(run.fab_hash) + if not isolation: + # If `ClientApp` runs in the same process, install the FAB + install_from_fab(fab.content, flwr_path, True) + fab_id, fab_version = get_fab_metadata(fab.content) + else: + fab = None + fab_id, fab_version = run.fab_id, run.fab_version + + run.fab_id, run.fab_version = fab_id, fab_version + + # Register context for this run + node_state.register_context( + run_id=run_id, + run=run, + flwr_path=flwr_path, + fab=fab, + ) - # Create error message + # Retrieve context for this run + context = node_state.retrieve_context(run_id=run_id) + # Create an error reply message that will never be used to prevent + # the used-before-assignment linting error reply_message = message.create_error_reply( - error=Error(code=e_code, reason=reason) - ) - else: - # No exception, update node state - node_state.update_context( - run_id=message.metadata.run_id, - context=context, + error=Error(code=ErrorCode.UNKNOWN, reason="Unknown") ) - # Send - send(reply_message) - log(INFO, "Sent reply") + # Handle app loading and task message + try: + if isolation: + # Two isolation modes: + # 1. `subprocess`: SuperNode is starting the ClientApp + # process as a subprocess. + # 2. `process`: ClientApp process gets started separately + # (via `flwr-clientapp`), for example, in a separate + # Docker container. + + # Generate SuperNode token + token: int = generate_rand_int_from_bytes(RUN_ID_NUM_BYTES) + + # Mode 1: SuperNode starts ClientApp as subprocess + start_subprocess = isolation == ISOLATION_MODE_SUBPROCESS + + # Share Message and Context with servicer + clientappio_servicer.set_inputs( + clientapp_input=ClientAppInputs( + message=message, + context=context, + run=run, + fab=fab, + token=token, + ), + token_returned=start_subprocess, + ) + + if start_subprocess: + # Start ClientApp subprocess + command = [ + "flwr-clientapp", + "--supernode", + supernode_address, + "--token", + str(token), + ] + subprocess.run( + command, + stdout=None, + stderr=None, + check=True, + ) + else: + # Wait for output to become available + while not clientappio_servicer.has_outputs(): + time.sleep(0.1) + + outputs = clientappio_servicer.get_outputs() + reply_message, context = outputs.message, outputs.context + else: + # Load ClientApp instance + client_app: ClientApp = load_client_app_fn( + fab_id, fab_version, run.fab_hash + ) + + # Execute ClientApp + reply_message = client_app(message=message, context=context) + except Exception as ex: # pylint: disable=broad-exception-caught + + # Legacy grpc-bidi + if transport in ["grpc-bidi", None]: + log(ERROR, "Client raised an exception.", exc_info=ex) + # Raise exception, crash process + raise ex + + # Don't update/change NodeState + + e_code = ErrorCode.CLIENT_APP_RAISED_EXCEPTION + # Ex fmt: ":<'division by zero'>" + reason = str(type(ex)) + ":<'" + str(ex) + "'>" + exc_entity = "ClientApp" + if isinstance(ex, LoadClientAppError): + reason = ( + "An exception was raised when attempting to load " + "`ClientApp`" + ) + e_code = ErrorCode.LOAD_CLIENT_APP_EXCEPTION + exc_entity = "SuperNode" + + if not app_state_tracker.interrupt: + log( + ERROR, "%s raised an exception", exc_entity, exc_info=ex + ) + + # Create error message + reply_message = message.create_error_reply( + error=Error(code=e_code, reason=reason) + ) + else: + # No exception, update node state + node_state.update_context( + run_id=run_id, + context=context, + ) + + # Send + send(reply_message) + log(INFO, "Sent reply") + + except StopIteration: + sleep_duration = 0 + break + # pylint: enable=too-many-nested-blocks # Unregister node - if delete_node is not None: + if delete_node is not None and app_state_tracker.is_connected: delete_node() # pylint: disable=not-callable if sleep_duration == 0: log(INFO, "Disconnect and shut down") + del app_state_tracker break + # Sleep and reconnect afterwards log( INFO, @@ -521,7 +700,7 @@ def start_numpy_client( ) -def _init_connection(transport: Optional[str], server_address: str) -> Tuple[ +def _init_connection(transport: Optional[str], server_address: str) -> tuple[ Callable[ [ str, @@ -529,20 +708,21 @@ def _init_connection(transport: Optional[str], server_address: str) -> Tuple[ RetryInvoker, int, Union[bytes, str, None], - Optional[Tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey]], + Optional[tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey]], ], - ContextManager[ - Tuple[ + AbstractContextManager[ + tuple[ Callable[[], Optional[Message]], Callable[[Message], None], + Optional[Callable[[], Optional[int]]], Optional[Callable[[], None]], - Optional[Callable[[], None]], - Optional[Callable[[int], Tuple[str, str]]], + Optional[Callable[[int], Run]], + Optional[Callable[[str], Fab]], ] ], ], str, - Type[Exception], + type[Exception], ]: # Parse IP address parsed_address = parse_address(server_address) @@ -571,6 +751,8 @@ def _init_connection(transport: Optional[str], server_address: str) -> Tuple[ connection, error_type = http_request_response, RequestsConnectionError elif transport == TRANSPORT_TYPE_GRPC_RERE: connection, error_type = grpc_request_response, RpcError + elif transport == TRANSPORT_TYPE_GRPC_ADAPTER: + connection, error_type = grpc_adapter, RpcError elif transport == TRANSPORT_TYPE_GRPC_BIDI: connection, error_type = grpc_connection, RpcError else: @@ -579,3 +761,37 @@ def _init_connection(transport: Optional[str], server_address: str) -> Tuple[ ) return connection, address, error_type + + +@dataclass +class _AppStateTracker: + interrupt: bool = False + is_connected: bool = False + + def register_signal_handler(self) -> None: + """Register handlers for exit signals.""" + + def signal_handler(sig, frame): # type: ignore + # pylint: disable=unused-argument + self.interrupt = True + raise StopIteration from None + + signal.signal(signal.SIGINT, signal_handler) + signal.signal(signal.SIGTERM, signal_handler) + + +def run_clientappio_api_grpc(address: str) -> tuple[grpc.Server, ClientAppIoServicer]: + """Run ClientAppIo API gRPC server.""" + clientappio_servicer: grpc.Server = ClientAppIoServicer() + clientappio_add_servicer_to_server_fn = add_ClientAppIoServicer_to_server + clientappio_grpc_server = generic_create_grpc_server( + servicer_and_add_fn=( + clientappio_servicer, + clientappio_add_servicer_to_server_fn, + ), + server_address=address, + max_message_length=GRPC_MAX_MESSAGE_LENGTH, + ) + log(INFO, "Starting Flower ClientAppIo gRPC server on %s", address) + clientappio_grpc_server.start() + return clientappio_grpc_server, clientappio_servicer diff --git a/src/py/flwr/client/app_test.py b/src/py/flwr/client/app_test.py index 56d6308a0fe2..723a066ea0bc 100644 --- a/src/py/flwr/client/app_test.py +++ b/src/py/flwr/client/app_test.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2022 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,8 +15,6 @@ """Flower Client app tests.""" -from typing import Dict, Tuple - from flwr.common import ( Config, EvaluateIns, @@ -59,7 +57,7 @@ def evaluate(self, ins: EvaluateIns) -> EvaluateRes: class NeedsWrappingClient(NumPyClient): """Client implementation extending the high-level NumPyClient.""" - def get_properties(self, config: Config) -> Dict[str, Scalar]: + def get_properties(self, config: Config) -> dict[str, Scalar]: """Raise an Exception because this method is not expected to be called.""" raise NotImplementedError() @@ -69,13 +67,13 @@ def get_parameters(self, config: Config) -> NDArrays: def fit( self, parameters: NDArrays, config: Config - ) -> Tuple[NDArrays, int, Dict[str, Scalar]]: + ) -> tuple[NDArrays, int, dict[str, Scalar]]: """Raise an Exception because this method is not expected to be called.""" raise NotImplementedError() def evaluate( self, parameters: NDArrays, config: Config - ) -> Tuple[float, int, Dict[str, Scalar]]: + ) -> tuple[float, int, dict[str, Scalar]]: """Raise an Exception because this method is not expected to be called.""" raise NotImplementedError() diff --git a/src/py/flwr/client/client.py b/src/py/flwr/client/client.py index 23a3755f3efe..dab9b6a26588 100644 --- a/src/py/flwr/client/client.py +++ b/src/py/flwr/client/client.py @@ -33,12 +33,13 @@ Parameters, Status, ) +from flwr.common.logger import warn_deprecated_feature_with_example class Client(ABC): """Abstract base class for Flower clients.""" - context: Context + _context: Context def get_properties(self, ins: GetPropertiesIns) -> GetPropertiesRes: """Return set of client's properties. @@ -141,6 +142,26 @@ def evaluate(self, ins: EvaluateIns) -> EvaluateRes: metrics={}, ) + @property + def context(self) -> Context: + """Getter for `Context` client attribute.""" + warn_deprecated_feature_with_example( + "Accessing the context via the client's attribute is deprecated.", + example_message="Instead, pass it to the client's " + "constructor in your `client_fn()` which already " + "receives a context object.", + code_example="def client_fn(context: Context) -> Client:\n\n" + "\t\t# Your existing client_fn\n\n" + "\t\t# Pass `context` to the constructor\n" + "\t\treturn FlowerClient(context).to_client()", + ) + return self._context + + @context.setter + def context(self, context: Context) -> None: + """Setter for `Context` client attribute.""" + self._context = context + def get_context(self) -> Context: """Get the run context from this client.""" return self.context diff --git a/src/py/flwr/client/client_app.py b/src/py/flwr/client/client_app.py index c9d337700147..234d84f27782 100644 --- a/src/py/flwr/client/client_app.py +++ b/src/py/flwr/client/client_app.py @@ -1,4 +1,4 @@ -# Copyright 2023 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,19 +15,62 @@ """Flower ClientApp.""" -from typing import Callable, List, Optional +import inspect +from typing import Callable, Optional +from flwr.client.client import Client from flwr.client.message_handler.message_handler import ( handle_legacy_message_from_msgtype, ) from flwr.client.mod.utils import make_ffn -from flwr.client.typing import ClientFn, Mod +from flwr.client.typing import ClientFnExt, Mod from flwr.common import Context, Message, MessageType -from flwr.common.logger import warn_preview_feature +from flwr.common.logger import warn_deprecated_feature, warn_preview_feature from .typing import ClientAppCallable +def _alert_erroneous_client_fn() -> None: + raise ValueError( + "A `ClientApp` cannot make use of a `client_fn` that does " + "not have a signature in the form: `def client_fn(context: " + "Context)`. You can import the `Context` like this: " + "`from flwr.common import Context`" + ) + + +def _inspect_maybe_adapt_client_fn_signature(client_fn: ClientFnExt) -> ClientFnExt: + client_fn_args = inspect.signature(client_fn).parameters + + if len(client_fn_args) != 1: + _alert_erroneous_client_fn() + + first_arg = list(client_fn_args.keys())[0] + first_arg_type = client_fn_args[first_arg].annotation + + if first_arg_type is str or first_arg == "cid": + # Warn previous signature for `client_fn` seems to be used + warn_deprecated_feature( + "`client_fn` now expects a signature `def client_fn(context: Context)`." + "The provided `client_fn` has signature: " + f"{dict(client_fn_args.items())}. You can import the `Context` like this:" + " `from flwr.common import Context`" + ) + + # Wrap depcreated client_fn inside a function with the expected signature + def adaptor_fn( + context: Context, + ) -> Client: # pylint: disable=unused-argument + # if patition-id is defined, pass it. Else pass node_id that should + # always be defined during Context init. + cid = context.node_config.get("partition-id", context.node_id) + return client_fn(str(cid)) # type: ignore + + return adaptor_fn + + return client_fn + + class ClientAppException(Exception): """Exception raised when an exception is raised while executing a ClientApp.""" @@ -48,7 +91,7 @@ class ClientApp: >>> class FlowerClient(NumPyClient): >>> # ... >>> - >>> def client_fn(cid): + >>> def client_fn(context: Context): >>> return FlowerClient().to_client() >>> >>> app = ClientApp(client_fn) @@ -65,15 +108,17 @@ class ClientApp: def __init__( self, - client_fn: Optional[ClientFn] = None, # Only for backward compatibility - mods: Optional[List[Mod]] = None, + client_fn: Optional[ClientFnExt] = None, # Only for backward compatibility + mods: Optional[list[Mod]] = None, ) -> None: - self._mods: List[Mod] = mods if mods is not None else [] + self._mods: list[Mod] = mods if mods is not None else [] # Create wrapper function for `handle` self._call: Optional[ClientAppCallable] = None if client_fn is not None: + client_fn = _inspect_maybe_adapt_client_fn_signature(client_fn) + def ffn( message: Message, context: Context, @@ -218,10 +263,10 @@ def _registration_error(fn_name: str) -> ValueError: >>> class FlowerClient(NumPyClient): >>> # ... >>> - >>> def client_fn(cid) -> Client: + >>> def client_fn(context: Context): >>> return FlowerClient().to_client() >>> - >>> app = ClientApp() + >>> app = ClientApp( >>> client_fn=client_fn, >>> ) diff --git a/src/py/flwr/client/client_test.py b/src/py/flwr/client/client_test.py index 373c676e5edc..343b6cf093b2 100644 --- a/src/py/flwr/client/client_test.py +++ b/src/py/flwr/client/client_test.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2022 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/src/py/flwr/client/clientapp/__init__.py b/src/py/flwr/client/clientapp/__init__.py new file mode 100644 index 000000000000..e877ee22db16 --- /dev/null +++ b/src/py/flwr/client/clientapp/__init__.py @@ -0,0 +1,22 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flower AppIO service.""" + + +from .app import flwr_clientapp as flwr_clientapp + +__all__ = [ + "flwr_clientapp", +] diff --git a/src/py/flwr/client/clientapp/app.py b/src/py/flwr/client/clientapp/app.py new file mode 100644 index 000000000000..52be2a4b6dc1 --- /dev/null +++ b/src/py/flwr/client/clientapp/app.py @@ -0,0 +1,235 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flower ClientApp process.""" + +import argparse +import time +from logging import DEBUG, ERROR, INFO +from typing import Optional + +import grpc + +from flwr.cli.install import install_from_fab +from flwr.client.client_app import ClientApp, LoadClientAppError +from flwr.common import Context, Message +from flwr.common.constant import ErrorCode +from flwr.common.grpc import create_channel +from flwr.common.logger import log +from flwr.common.message import Error +from flwr.common.serde import ( + context_from_proto, + context_to_proto, + fab_from_proto, + message_from_proto, + message_to_proto, + run_from_proto, +) +from flwr.common.typing import Fab, Run + +# pylint: disable=E0611 +from flwr.proto.clientappio_pb2 import ( + GetTokenRequest, + GetTokenResponse, + PullClientAppInputsRequest, + PullClientAppInputsResponse, + PushClientAppOutputsRequest, + PushClientAppOutputsResponse, +) +from flwr.proto.clientappio_pb2_grpc import ClientAppIoStub + +from .utils import get_load_client_app_fn + + +def flwr_clientapp() -> None: + """Run process-isolated Flower ClientApp.""" + log(INFO, "Starting Flower ClientApp") + + parser = argparse.ArgumentParser( + description="Run a Flower ClientApp", + ) + parser.add_argument( + "--supernode", + type=str, + help="Address of SuperNode ClientAppIo gRPC servicer", + ) + parser.add_argument( + "--token", + type=int, + required=False, + help="Unique token generated by SuperNode for each ClientApp execution", + ) + args = parser.parse_args() + + log( + DEBUG, + "Staring isolated `ClientApp` connected to SuperNode ClientAppIo at %s " + "with token %s", + args.supernode, + args.token, + ) + run_clientapp(supernode=args.supernode, token=args.token) + + +def on_channel_state_change(channel_connectivity: str) -> None: + """Log channel connectivity.""" + log(DEBUG, channel_connectivity) + + +def run_clientapp( # pylint: disable=R0914 + supernode: str, + token: Optional[int] = None, +) -> None: + """Run Flower ClientApp process. + + Parameters + ---------- + supernode : str + Address of SuperNode + token : Optional[int] (default: None) + Unique SuperNode token for ClientApp-SuperNode authentication + """ + channel = create_channel( + server_address=supernode, + insecure=True, + ) + channel.subscribe(on_channel_state_change) + + try: + stub = ClientAppIoStub(channel) + + only_once = token is not None + while True: + # If token is not set, loop until token is received from SuperNode + while token is None: + token = get_token(stub) + time.sleep(1) + + # Pull Message, Context, Run and (optional) FAB from SuperNode + message, context, run, fab = pull_message(stub=stub, token=token) + + # Install FAB, if provided + if fab: + log(DEBUG, "Flower ClientApp starts FAB installation.") + install_from_fab(fab.content, flwr_dir=None, skip_prompt=True) + + load_client_app_fn = get_load_client_app_fn( + default_app_ref="", + app_path=None, + multi_app=True, + flwr_dir=None, + ) + + try: + # Load ClientApp + client_app: ClientApp = load_client_app_fn( + run.fab_id, run.fab_version, fab.hash_str if fab else "" + ) + + # Execute ClientApp + reply_message = client_app(message=message, context=context) + except Exception as ex: # pylint: disable=broad-exception-caught + # Don't update/change NodeState + + e_code = ErrorCode.CLIENT_APP_RAISED_EXCEPTION + # Ex fmt: ":<'division by zero'>" + reason = str(type(ex)) + ":<'" + str(ex) + "'>" + exc_entity = "ClientApp" + if isinstance(ex, LoadClientAppError): + reason = ( + "An exception was raised when attempting to load `ClientApp`" + ) + e_code = ErrorCode.LOAD_CLIENT_APP_EXCEPTION + + log(ERROR, "%s raised an exception", exc_entity, exc_info=ex) + + # Create error message + reply_message = message.create_error_reply( + error=Error(code=e_code, reason=reason) + ) + + # Push Message and Context to SuperNode + _ = push_message( + stub=stub, token=token, message=reply_message, context=context + ) + + # Reset token to `None` to prevent flwr-clientapp from trying to pull the + # same inputs again + token = None + + # Stop the loop if `flwr-clientapp` is expected to process only a single + # message + if only_once: + break + + except KeyboardInterrupt: + log(INFO, "Closing connection") + except grpc.RpcError as e: + log(ERROR, "GRPC error occurred: %s", str(e)) + finally: + channel.close() + + +def get_token(stub: grpc.Channel) -> Optional[int]: + """Get a token from SuperNode.""" + log(DEBUG, "Flower ClientApp process requests token") + try: + res: GetTokenResponse = stub.GetToken(GetTokenRequest()) + log(DEBUG, "[GetToken] Received token: %s", res.token) + return res.token + except grpc.RpcError as e: + if e.code() == grpc.StatusCode.FAILED_PRECONDITION: # pylint: disable=no-member + log(DEBUG, "[GetToken] No token available yet") + else: + log(ERROR, "[GetToken] gRPC error occurred: %s", str(e)) + return None + + +def pull_message( + stub: grpc.Channel, token: int +) -> tuple[Message, Context, Run, Optional[Fab]]: + """Pull message from SuperNode to ClientApp.""" + log(INFO, "Pulling ClientAppInputs for token %s", token) + try: + res: PullClientAppInputsResponse = stub.PullClientAppInputs( + PullClientAppInputsRequest(token=token) + ) + message = message_from_proto(res.message) + context = context_from_proto(res.context) + run = run_from_proto(res.run) + fab = fab_from_proto(res.fab) if res.fab else None + return message, context, run, fab + except grpc.RpcError as e: + log(ERROR, "[PullClientAppInputs] gRPC error occurred: %s", str(e)) + raise e + + +def push_message( + stub: grpc.Channel, token: int, message: Message, context: Context +) -> PushClientAppOutputsResponse: + """Push message to SuperNode from ClientApp.""" + log(INFO, "Pushing ClientAppOutputs for token %s", token) + proto_message = message_to_proto(message) + proto_context = context_to_proto(context) + + try: + res: PushClientAppOutputsResponse = stub.PushClientAppOutputs( + PushClientAppOutputsRequest( + token=token, message=proto_message, context=proto_context + ) + ) + return res + except grpc.RpcError as e: + log(ERROR, "[PushClientAppOutputs] gRPC error occurred: %s", str(e)) + raise e diff --git a/src/py/flwr/client/clientapp/clientappio_servicer.py b/src/py/flwr/client/clientapp/clientappio_servicer.py new file mode 100644 index 000000000000..fe7ccd6e908f --- /dev/null +++ b/src/py/flwr/client/clientapp/clientappio_servicer.py @@ -0,0 +1,244 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""ClientAppIo API servicer.""" + + +from dataclasses import dataclass +from logging import DEBUG, ERROR +from typing import Optional, cast + +import grpc + +from flwr.common import Context, Message, typing +from flwr.common.logger import log +from flwr.common.serde import ( + clientappstatus_to_proto, + context_from_proto, + context_to_proto, + fab_to_proto, + message_from_proto, + message_to_proto, + run_to_proto, +) +from flwr.common.typing import Fab, Run + +# pylint: disable=E0611 +from flwr.proto import clientappio_pb2_grpc +from flwr.proto.clientappio_pb2 import ( # pylint: disable=E0401 + GetTokenRequest, + GetTokenResponse, + PullClientAppInputsRequest, + PullClientAppInputsResponse, + PushClientAppOutputsRequest, + PushClientAppOutputsResponse, +) + + +@dataclass +class ClientAppInputs: + """Specify the inputs to the ClientApp.""" + + message: Message + context: Context + run: Run + fab: Optional[Fab] + token: int + + +@dataclass +class ClientAppOutputs: + """Specify the outputs from the ClientApp.""" + + message: Message + context: Context + + +# pylint: disable=C0103,W0613,W0201 +class ClientAppIoServicer(clientappio_pb2_grpc.ClientAppIoServicer): + """ClientAppIo API servicer.""" + + def __init__(self) -> None: + self.clientapp_input: Optional[ClientAppInputs] = None + self.clientapp_output: Optional[ClientAppOutputs] = None + self.token_returned: bool = False + self.inputs_returned: bool = False + + def GetToken( + self, request: GetTokenRequest, context: grpc.ServicerContext + ) -> GetTokenResponse: + """Get token.""" + log(DEBUG, "ClientAppIo.GetToken") + + # Fail if no ClientAppInputs are available + if self.clientapp_input is None: + context.abort( + grpc.StatusCode.FAILED_PRECONDITION, + "No inputs available.", + ) + clientapp_input = cast(ClientAppInputs, self.clientapp_input) + + # Fail if token was already returned in a previous call + if self.token_returned: + context.abort( + grpc.StatusCode.FAILED_PRECONDITION, + "Token already returned. A token can be returned only once.", + ) + + # If + # - ClientAppInputs is set, and + # - token hasn't been returned before, + # return token + self.token_returned = True + return GetTokenResponse(token=clientapp_input.token) + + def PullClientAppInputs( + self, request: PullClientAppInputsRequest, context: grpc.ServicerContext + ) -> PullClientAppInputsResponse: + """Pull Message, Context, and Run.""" + log(DEBUG, "ClientAppIo.PullClientAppInputs") + + # Fail if no ClientAppInputs are available + if self.clientapp_input is None: + context.abort( + grpc.StatusCode.FAILED_PRECONDITION, + "No inputs available.", + ) + clientapp_input = cast(ClientAppInputs, self.clientapp_input) + + # Fail if token wasn't returned in a previous call + if not self.token_returned: + context.abort( + grpc.StatusCode.FAILED_PRECONDITION, + "Token hasn't been returned." + "Token must be returned before can be returned only once.", + ) + + # Fail if token isn't matching + if request.token != clientapp_input.token: + context.abort( + grpc.StatusCode.INVALID_ARGUMENT, + "Mismatch between ClientApp and SuperNode token", + ) + + # Success + self.inputs_returned = True + return PullClientAppInputsResponse( + message=message_to_proto(clientapp_input.message), + context=context_to_proto(clientapp_input.context), + run=run_to_proto(clientapp_input.run), + fab=fab_to_proto(clientapp_input.fab) if clientapp_input.fab else None, + ) + + def PushClientAppOutputs( + self, request: PushClientAppOutputsRequest, context: grpc.ServicerContext + ) -> PushClientAppOutputsResponse: + """Push Message and Context.""" + log(DEBUG, "ClientAppIo.PushClientAppOutputs") + + # Fail if no ClientAppInputs are available + if not self.clientapp_input: + context.abort( + grpc.StatusCode.FAILED_PRECONDITION, + "No inputs available.", + ) + clientapp_input = cast(ClientAppInputs, self.clientapp_input) + + # Fail if token wasn't returned in a previous call + if not self.token_returned: + context.abort( + grpc.StatusCode.FAILED_PRECONDITION, + "Token hasn't been returned." + "Token must be returned before can be returned only once.", + ) + + # Fail if inputs weren't delivered in a previous call + if not self.inputs_returned: + context.abort( + grpc.StatusCode.FAILED_PRECONDITION, + "Inputs haven't been delivered." + "Inputs must be delivered before can be returned only once.", + ) + + # Fail if token isn't matching + if request.token != clientapp_input.token: + context.abort( + grpc.StatusCode.INVALID_ARGUMENT, + "Mismatch between ClientApp and SuperNode token", + ) + + # Preconditions met + try: + # Update Message and Context + self.clientapp_output = ClientAppOutputs( + message=message_from_proto(request.message), + context=context_from_proto(request.context), + ) + + # Set status + code = typing.ClientAppOutputCode.SUCCESS + status = typing.ClientAppOutputStatus(code=code, message="Success") + except Exception as e: # pylint: disable=broad-exception-caught + log(ERROR, "ClientApp failed to push message to SuperNode, %s", e) + code = typing.ClientAppOutputCode.UNKNOWN_ERROR + status = typing.ClientAppOutputStatus(code=code, message="Unkonwn error") + + # Return status to ClientApp process + proto_status = clientappstatus_to_proto(status=status) + return PushClientAppOutputsResponse(status=proto_status) + + def set_inputs( + self, clientapp_input: ClientAppInputs, token_returned: bool + ) -> None: + """Set ClientApp inputs. + + Parameters + ---------- + clientapp_input : ClientAppInputs + The inputs to the ClientApp. + token_returned : bool + A boolean indicating if the token has been returned. + Set to `True` when passing the token to `flwr-clientap` + and `False` otherwise. + """ + if ( + self.clientapp_input is not None + or self.clientapp_output is not None + or self.token_returned + ): + raise ValueError( + "ClientAppInputs and ClientAppOutputs must not be set before " + "calling `set_inputs`." + ) + log(DEBUG, "ClientAppInputs set (token: %s)", clientapp_input.token) + self.clientapp_input = clientapp_input + self.token_returned = token_returned + + def has_outputs(self) -> bool: + """Check if ClientAppOutputs are available.""" + return self.clientapp_output is not None + + def get_outputs(self) -> ClientAppOutputs: + """Get ClientApp outputs.""" + if self.clientapp_output is None: + raise ValueError("ClientAppOutputs not set before calling `get_outputs`.") + + # Set outputs to a local variable and clear state + output: ClientAppOutputs = self.clientapp_output + self.clientapp_input = None + self.clientapp_output = None + self.token_returned = False + self.inputs_returned = False + + return output diff --git a/src/py/flwr/client/clientapp/clientappio_servicer_test.py b/src/py/flwr/client/clientapp/clientappio_servicer_test.py new file mode 100644 index 000000000000..a03400c12a86 --- /dev/null +++ b/src/py/flwr/client/clientapp/clientappio_servicer_test.py @@ -0,0 +1,223 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Test the ClientAppIo API servicer.""" + +import unittest +from unittest.mock import Mock, patch + +from flwr.client.clientapp.app import get_token, pull_message, push_message +from flwr.common import Context, Message, typing +from flwr.common.constant import RUN_ID_NUM_BYTES +from flwr.common.serde import ( + clientappstatus_from_proto, + clientappstatus_to_proto, + fab_to_proto, + message_to_proto, +) +from flwr.common.serde_test import RecordMaker + +# pylint:disable=E0611 +from flwr.proto.clientappio_pb2 import ( + GetTokenResponse, + PullClientAppInputsResponse, + PushClientAppOutputsResponse, +) +from flwr.proto.message_pb2 import Context as ProtoContext +from flwr.proto.run_pb2 import Run as ProtoRun +from flwr.server.superlink.state.utils import generate_rand_int_from_bytes + +from .clientappio_servicer import ClientAppInputs, ClientAppIoServicer, ClientAppOutputs + + +class TestClientAppIoServicer(unittest.TestCase): + """Tests for `ClientAppIoServicer` class.""" + + def setUp(self) -> None: + """Initialize.""" + self.servicer = ClientAppIoServicer() + self.maker = RecordMaker() + self.mock_stub = Mock() + self.patcher = patch( + "flwr.client.clientapp.app.ClientAppIoStub", return_value=self.mock_stub + ) + self.patcher.start() + + def tearDown(self) -> None: + """Cleanup.""" + self.patcher.stop() + + def test_set_inputs(self) -> None: + """Test setting ClientApp inputs.""" + # Prepare + message = Message( + metadata=self.maker.metadata(), + content=self.maker.recordset(2, 2, 1), + ) + context = Context( + node_id=1, + node_config={"nodeconfig1": 4.2}, + state=self.maker.recordset(2, 2, 1), + run_config={"runconfig1": 6.1}, + ) + run = typing.Run( + run_id=1, + fab_id="lorem", + fab_version="ipsum", + fab_hash="dolor", + override_config=self.maker.user_config(), + ) + fab = typing.Fab( + hash_str="abc123#$%", + content=b"\xf3\xf5\xf8\x98", + ) + + client_input = ClientAppInputs(message, context, run, fab, 1) + client_output = ClientAppOutputs(message, context) + + # Execute and assert + # - when ClientAppInputs is not None, ClientAppOutputs is None + with self.assertRaises(ValueError): + self.servicer.clientapp_input = client_input + self.servicer.clientapp_output = None + self.servicer.set_inputs(client_input, token_returned=True) + + # Execute and assert + # - when ClientAppInputs is None, ClientAppOutputs is not None + with self.assertRaises(ValueError): + self.servicer.clientapp_input = None + self.servicer.clientapp_output = client_output + self.servicer.set_inputs(client_input, token_returned=True) + + # Execute and assert + # - when ClientAppInputs and ClientAppOutputs is not None + with self.assertRaises(ValueError): + self.servicer.clientapp_input = client_input + self.servicer.clientapp_output = client_output + self.servicer.set_inputs(client_input, token_returned=True) + + # Execute and assert + # - when ClientAppInputs is set at .clientapp_input + self.servicer.clientapp_input = None + self.servicer.clientapp_output = None + self.servicer.set_inputs(client_input, token_returned=True) + assert client_input == self.servicer.clientapp_input + + def test_get_outputs(self) -> None: + """Test getting ClientApp outputs.""" + # Prepare + message = Message( + metadata=self.maker.metadata(), + content=self.maker.recordset(2, 2, 1), + ) + context = Context( + node_id=1, + node_config={"nodeconfig1": 4.2}, + state=self.maker.recordset(2, 2, 1), + run_config={"runconfig1": 6.1}, + ) + client_output = ClientAppOutputs(message, context) + + # Execute and assert - when `ClientAppOutputs` is None + self.servicer.clientapp_output = None + with self.assertRaises(ValueError): + # `ClientAppOutputs` should not be None + _ = self.servicer.get_outputs() + + # Execute and assert - when `ClientAppOutputs` is not None + self.servicer.clientapp_output = client_output + output = self.servicer.get_outputs() + assert isinstance(output, ClientAppOutputs) + assert output == client_output + assert self.servicer.clientapp_input is None + assert self.servicer.clientapp_output is None + + def test_pull_clientapp_inputs(self) -> None: + """Test pulling messages from SuperNode.""" + # Prepare + mock_message = Message( + metadata=self.maker.metadata(), + content=self.maker.recordset(3, 2, 1), + ) + mock_fab = typing.Fab( + hash_str="abc123#$%", + content=b"\xf3\xf5\xf8\x98", + ) + mock_response = PullClientAppInputsResponse( + message=message_to_proto(mock_message), + context=ProtoContext(node_id=123), + run=ProtoRun(run_id=61016, fab_id="mock/mock", fab_version="v1.0.0"), + fab=fab_to_proto(mock_fab), + ) + self.mock_stub.PullClientAppInputs.return_value = mock_response + + # Execute + message, context, run, fab = pull_message(self.mock_stub, token=456) + + # Assert + self.mock_stub.PullClientAppInputs.assert_called_once() + self.assertEqual(len(message.content.parameters_records), 3) + self.assertEqual(len(message.content.metrics_records), 2) + self.assertEqual(len(message.content.configs_records), 1) + self.assertEqual(context.node_id, 123) + self.assertEqual(run.run_id, 61016) + self.assertEqual(run.fab_id, "mock/mock") + self.assertEqual(run.fab_version, "v1.0.0") + if fab: + self.assertEqual(fab.hash_str, mock_fab.hash_str) + self.assertEqual(fab.content, mock_fab.content) + + def test_push_clientapp_outputs(self) -> None: + """Test pushing messages to SuperNode.""" + # Prepare + message = Message( + metadata=self.maker.metadata(), + content=self.maker.recordset(2, 2, 1), + ) + context = Context( + node_id=1, + node_config={"nodeconfig1": 4.2}, + state=self.maker.recordset(2, 2, 1), + run_config={"runconfig1": 6.1}, + ) + code = typing.ClientAppOutputCode.SUCCESS + status_proto = clientappstatus_to_proto( + status=typing.ClientAppOutputStatus(code=code, message="SUCCESS"), + ) + mock_response = PushClientAppOutputsResponse(status=status_proto) + self.mock_stub.PushClientAppOutputs.return_value = mock_response + + # Execute + res = push_message( + stub=self.mock_stub, token=789, message=message, context=context + ) + status = clientappstatus_from_proto(res.status) + + # Assert + self.mock_stub.PushClientAppOutputs.assert_called_once() + self.assertEqual(status.message, "SUCCESS") + + def test_get_token(self) -> None: + """Test getting a token from SuperNode.""" + # Prepare + token: int = generate_rand_int_from_bytes(RUN_ID_NUM_BYTES) + mock_response = GetTokenResponse(token=token) + self.mock_stub.GetToken.return_value = mock_response + + # Execute + res = get_token(stub=self.mock_stub) + + # Assert + self.mock_stub.GetToken.assert_called_once() + self.assertEqual(res, token) diff --git a/src/py/flwr/client/clientapp/utils.py b/src/py/flwr/client/clientapp/utils.py new file mode 100644 index 000000000000..f7261c015b14 --- /dev/null +++ b/src/py/flwr/client/clientapp/utils.py @@ -0,0 +1,114 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flower ClientApp loading utils.""" + +from logging import DEBUG +from pathlib import Path +from typing import Callable, Optional + +from flwr.client.client_app import ClientApp, LoadClientAppError +from flwr.common.config import ( + get_flwr_dir, + get_metadata_from_config, + get_project_config, + get_project_dir, +) +from flwr.common.logger import log +from flwr.common.object_ref import load_app, validate + + +def get_load_client_app_fn( + default_app_ref: str, + app_path: Optional[str], + multi_app: bool, + flwr_dir: Optional[str] = None, +) -> Callable[[str, str, str], ClientApp]: + """Get the load_client_app_fn function. + + If `multi_app` is True, this function loads the specified ClientApp + based on `fab_id` and `fab_version`. If `fab_id` is empty, a default + ClientApp will be loaded. + + If `multi_app` is False, it ignores `fab_id` and `fab_version` and + loads a default ClientApp. + """ + if not multi_app: + log( + DEBUG, + "Flower SuperNode will load and validate ClientApp `%s`", + default_app_ref, + ) + + valid, error_msg = validate(default_app_ref, project_dir=app_path) + if not valid and error_msg: + raise LoadClientAppError(error_msg) from None + + def _load(fab_id: str, fab_version: str, fab_hash: str) -> ClientApp: + runtime_app_dir = Path(app_path if app_path else "").absolute() + # If multi-app feature is disabled + if not multi_app: + # Set app reference + client_app_ref = default_app_ref + # If multi-app feature is enabled but app directory is provided. + # `fab_hash` is not required since the app is loaded from `runtime_app_dir`. + elif app_path is not None: + config = get_project_config(runtime_app_dir) + this_fab_version, this_fab_id = get_metadata_from_config(config) + + if this_fab_version != fab_version or this_fab_id != fab_id: + raise LoadClientAppError( + f"FAB ID or version mismatch: Expected FAB ID '{this_fab_id}' and " + f"FAB version '{this_fab_version}', but received FAB ID '{fab_id}' " + f"and FAB version '{fab_version}'.", + ) from None + + # log(WARN, "FAB ID is not provided; the default ClientApp will be loaded.") + + # Set app reference + client_app_ref = config["tool"]["flwr"]["app"]["components"]["clientapp"] + # If multi-app feature is enabled + else: + try: + runtime_app_dir = get_project_dir( + fab_id, fab_version, fab_hash, get_flwr_dir(flwr_dir) + ) + config = get_project_config(runtime_app_dir) + except Exception as e: + raise LoadClientAppError( + "Failed to load ClientApp." + "Possible reasons for error include mismatched " + "`fab_id`, `fab_version`, or `fab_hash` in " + f"{str(get_flwr_dir(flwr_dir).resolve())}." + ) from e + + # Set app reference + client_app_ref = config["tool"]["flwr"]["app"]["components"]["clientapp"] + + # Load ClientApp + log( + DEBUG, + "Loading ClientApp `%s`", + client_app_ref, + ) + client_app = load_app(client_app_ref, LoadClientAppError, runtime_app_dir) + + if not isinstance(client_app, ClientApp): + raise LoadClientAppError( + f"Attribute {client_app_ref} is not of type {ClientApp}", + ) from None + + return client_app + + return _load diff --git a/src/py/flwr/client/dpfedavg_numpy_client.py b/src/py/flwr/client/dpfedavg_numpy_client.py index ab31a289d29b..bade811b48ce 100644 --- a/src/py/flwr/client/dpfedavg_numpy_client.py +++ b/src/py/flwr/client/dpfedavg_numpy_client.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2022 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,7 +16,6 @@ import copy -from typing import Dict, Tuple import numpy as np @@ -39,7 +38,7 @@ def __init__(self, client: NumPyClient) -> None: super().__init__() self.client = client - def get_properties(self, config: Config) -> Dict[str, Scalar]: + def get_properties(self, config: Config) -> dict[str, Scalar]: """Get client properties using the given Numpy client. Parameters @@ -58,7 +57,7 @@ def get_properties(self, config: Config) -> Dict[str, Scalar]: """ return self.client.get_properties(config) - def get_parameters(self, config: Dict[str, Scalar]) -> NDArrays: + def get_parameters(self, config: dict[str, Scalar]) -> NDArrays: """Return the current local model parameters. Parameters @@ -76,8 +75,8 @@ def get_parameters(self, config: Dict[str, Scalar]) -> NDArrays: return self.client.get_parameters(config) def fit( - self, parameters: NDArrays, config: Dict[str, Scalar] - ) -> Tuple[NDArrays, int, Dict[str, Scalar]]: + self, parameters: NDArrays, config: dict[str, Scalar] + ) -> tuple[NDArrays, int, dict[str, Scalar]]: """Train the provided parameters using the locally held dataset. This method first updates the local model using the original parameters @@ -153,8 +152,8 @@ def fit( return updated_params, num_examples, metrics def evaluate( - self, parameters: NDArrays, config: Dict[str, Scalar] - ) -> Tuple[float, int, Dict[str, Scalar]]: + self, parameters: NDArrays, config: dict[str, Scalar] + ) -> tuple[float, int, dict[str, Scalar]]: """Evaluate the provided parameters using the locally held dataset. Parameters diff --git a/src/py/flwr/client/grpc_adapter_client/__init__.py b/src/py/flwr/client/grpc_adapter_client/__init__.py new file mode 100644 index 000000000000..5900e2dc2d06 --- /dev/null +++ b/src/py/flwr/client/grpc_adapter_client/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Client-side part of the GrpcAdapter transport layer.""" diff --git a/src/py/flwr/client/grpc_adapter_client/connection.py b/src/py/flwr/client/grpc_adapter_client/connection.py new file mode 100644 index 000000000000..ab823112bbe1 --- /dev/null +++ b/src/py/flwr/client/grpc_adapter_client/connection.py @@ -0,0 +1,98 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Contextmanager for a GrpcAdapter channel to the Flower server.""" + + +from collections.abc import Iterator +from contextlib import contextmanager +from logging import ERROR +from typing import Callable, Optional, Union + +from cryptography.hazmat.primitives.asymmetric import ec + +from flwr.client.grpc_rere_client.connection import grpc_request_response +from flwr.client.grpc_rere_client.grpc_adapter import GrpcAdapter +from flwr.common import GRPC_MAX_MESSAGE_LENGTH +from flwr.common.logger import log +from flwr.common.message import Message +from flwr.common.retry_invoker import RetryInvoker +from flwr.common.typing import Fab, Run + + +@contextmanager +def grpc_adapter( # pylint: disable=R0913,too-many-positional-arguments + server_address: str, + insecure: bool, + retry_invoker: RetryInvoker, + max_message_length: int = GRPC_MAX_MESSAGE_LENGTH, # pylint: disable=W0613 + root_certificates: Optional[Union[bytes, str]] = None, + authentication_keys: Optional[ # pylint: disable=unused-argument + tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] + ] = None, +) -> Iterator[ + tuple[ + Callable[[], Optional[Message]], + Callable[[Message], None], + Optional[Callable[[], Optional[int]]], + Optional[Callable[[], None]], + Optional[Callable[[int], Run]], + Optional[Callable[[str], Fab]], + ] +]: + """Primitives for request/response-based interaction with a server via GrpcAdapter. + + Parameters + ---------- + server_address : str + The IPv6 address of the server with `http://` or `https://`. + If the Flower server runs on the same machine + on port 8080, then `server_address` would be `"http://[::]:8080"`. + insecure : bool + Starts an insecure gRPC connection when True. Enables HTTPS connection + when False, using system certificates if `root_certificates` is None. + retry_invoker: RetryInvoker + `RetryInvoker` object that will try to reconnect the client to the server + after gRPC errors. If None, the client will only try to + reconnect once after a failure. + max_message_length : int + Ignored, only present to preserve API-compatibility. + root_certificates : Optional[Union[bytes, str]] (default: None) + Path of the root certificate. If provided, a secure + connection using the certificates will be established to an SSL-enabled + Flower server. Bytes won't work for the REST API. + authentication_keys : Optional[Tuple[PrivateKey, PublicKey]] (default: None) + Client authentication is not supported for this transport type. + + Returns + ------- + receive : Callable + send : Callable + create_node : Optional[Callable] + delete_node : Optional[Callable] + get_run : Optional[Callable] + get_fab : Optional[Callable] + """ + if authentication_keys is not None: + log(ERROR, "Client authentication is not supported for this transport type.") + with grpc_request_response( + server_address=server_address, + insecure=insecure, + retry_invoker=retry_invoker, + max_message_length=max_message_length, + root_certificates=root_certificates, + authentication_keys=None, # Authentication is not supported + adapter_cls=GrpcAdapter, + ) as conn: + yield conn diff --git a/src/py/flwr/client/grpc_client/connection.py b/src/py/flwr/client/grpc_client/connection.py index 6e5227cf5e5f..75d2ebe15025 100644 --- a/src/py/flwr/client/grpc_client/connection.py +++ b/src/py/flwr/client/grpc_client/connection.py @@ -16,11 +16,12 @@ import uuid +from collections.abc import Iterator from contextlib import contextmanager -from logging import DEBUG +from logging import DEBUG, ERROR from pathlib import Path from queue import Queue -from typing import Callable, Iterator, Optional, Tuple, Union, cast +from typing import Callable, Optional, Union, cast from cryptography.hazmat.primitives.asymmetric import ec @@ -38,6 +39,7 @@ from flwr.common.grpc import create_channel from flwr.common.logger import log from flwr.common.retry_invoker import RetryInvoker +from flwr.common.typing import Fab, Run from flwr.proto.transport_pb2 import ( # pylint: disable=E0611 ClientMessage, Reason, @@ -58,22 +60,23 @@ def on_channel_state_change(channel_connectivity: str) -> None: @contextmanager -def grpc_connection( # pylint: disable=R0913, R0915 +def grpc_connection( # pylint: disable=R0913,R0915,too-many-positional-arguments server_address: str, insecure: bool, retry_invoker: RetryInvoker, # pylint: disable=unused-argument max_message_length: int = GRPC_MAX_MESSAGE_LENGTH, root_certificates: Optional[Union[bytes, str]] = None, authentication_keys: Optional[ # pylint: disable=unused-argument - Tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] + tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] ] = None, ) -> Iterator[ - Tuple[ + tuple[ Callable[[], Optional[Message]], Callable[[Message], None], + Optional[Callable[[], Optional[int]]], Optional[Callable[[], None]], - Optional[Callable[[], None]], - Optional[Callable[[int], Tuple[str, str]]], + Optional[Callable[[int], Run]], + Optional[Callable[[str], Fab]], ] ]: """Establish a gRPC connection to a gRPC server. @@ -101,6 +104,8 @@ def grpc_connection( # pylint: disable=R0913, R0915 The PEM-encoded root certificates as a byte string or a path string. If provided, a secure connection using the certificates will be established to an SSL-enabled Flower server. + authentication_keys : Optional[Tuple[PrivateKey, PublicKey]] (default: None) + Client authentication is not supported for this transport type. Returns ------- @@ -123,6 +128,8 @@ def grpc_connection( # pylint: disable=R0913, R0915 """ if isinstance(root_certificates, str): root_certificates = Path(root_certificates).read_bytes() + if authentication_keys is not None: + log(ERROR, "Client authentication is not supported for this transport type.") channel = create_channel( server_address=server_address, @@ -230,7 +237,7 @@ def send(message: Message) -> None: try: # Yield methods - yield (receive, send, None, None, None) + yield (receive, send, None, None, None, None) finally: # Make sure to have a final channel.close() diff --git a/src/py/flwr/client/grpc_client/connection_test.py b/src/py/flwr/client/grpc_client/connection_test.py index da7800b26639..13bd2c6af8e7 100644 --- a/src/py/flwr/client/grpc_client/connection_test.py +++ b/src/py/flwr/client/grpc_client/connection_test.py @@ -17,8 +17,9 @@ import concurrent.futures import socket +from collections.abc import Iterator from contextlib import closing -from typing import Iterator, cast +from typing import cast from unittest.mock import patch import grpc @@ -138,7 +139,7 @@ def run_client() -> int: max_time=None, ), ) as conn: - receive, send, _, _, _ = conn + receive, send, _, _, _, _ = conn # Setup processing loop while True: diff --git a/src/py/flwr/client/grpc_rere_client/__init__.py b/src/py/flwr/client/grpc_rere_client/__init__.py index 93903e725776..e7c9408c0047 100644 --- a/src/py/flwr/client/grpc_rere_client/__init__.py +++ b/src/py/flwr/client/grpc_rere_client/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2023 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/src/py/flwr/client/grpc_rere_client/client_interceptor.py b/src/py/flwr/client/grpc_rere_client/client_interceptor.py index 8bc55878971d..041860957db7 100644 --- a/src/py/flwr/client/grpc_rere_client/client_interceptor.py +++ b/src/py/flwr/client/grpc_rere_client/client_interceptor.py @@ -17,25 +17,29 @@ import base64 import collections -from typing import Any, Callable, Optional, Sequence, Tuple, Union +from collections.abc import Sequence +from logging import WARNING +from typing import Any, Callable, Optional, Union import grpc from cryptography.hazmat.primitives.asymmetric import ec +from flwr.common.logger import log from flwr.common.secure_aggregation.crypto.symmetric_encryption import ( bytes_to_public_key, compute_hmac, generate_shared_key, public_key_to_bytes, ) +from flwr.proto.fab_pb2 import GetFabRequest # pylint: disable=E0611 from flwr.proto.fleet_pb2 import ( # pylint: disable=E0611 CreateNodeRequest, DeleteNodeRequest, - GetRunRequest, PingRequest, PullTaskInsRequest, PushTaskResRequest, ) +from flwr.proto.run_pb2 import GetRunRequest # pylint: disable=E0611 _PUBLIC_KEY_HEADER = "public-key" _AUTH_TOKEN_HEADER = "auth-token" @@ -47,11 +51,12 @@ PushTaskResRequest, GetRunRequest, PingRequest, + GetFabRequest, ] def _get_value_from_tuples( - key_string: str, tuples: Sequence[Tuple[str, Union[str, bytes]]] + key_string: str, tuples: Sequence[tuple[str, Union[str, bytes]]] ) -> bytes: value = next((value for key, value in tuples if key == key_string), "") if isinstance(value, str): @@ -123,18 +128,18 @@ def intercept_unary_unary( PushTaskResRequest, GetRunRequest, PingRequest, + GetFabRequest, ), ): if self.shared_secret is None: raise RuntimeError("Failure to compute hmac") + message_bytes = request.SerializeToString(deterministic=True) metadata.append( ( _AUTH_TOKEN_HEADER, base64.urlsafe_b64encode( - compute_hmac( - self.shared_secret, request.SerializeToString(True) - ) + compute_hmac(self.shared_secret, message_bytes) ), ) ) @@ -151,8 +156,15 @@ def intercept_unary_unary( server_public_key_bytes = base64.urlsafe_b64decode( _get_value_from_tuples(_PUBLIC_KEY_HEADER, response.initial_metadata()) ) - self.server_public_key = bytes_to_public_key(server_public_key_bytes) - self.shared_secret = generate_shared_key( - self.private_key, self.server_public_key - ) + + if server_public_key_bytes != b"": + self.server_public_key = bytes_to_public_key(server_public_key_bytes) + else: + log(WARNING, "Can't get server public key, SuperLink may be offline") + + if self.server_public_key is not None: + self.shared_secret = generate_shared_key( + self.private_key, self.server_public_key + ) + return response diff --git a/src/py/flwr/client/grpc_rere_client/client_interceptor_test.py b/src/py/flwr/client/grpc_rere_client/client_interceptor_test.py index 487361a06026..a029b926423f 100644 --- a/src/py/flwr/client/grpc_rere_client/client_interceptor_test.py +++ b/src/py/flwr/client/grpc_rere_client/client_interceptor_test.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,16 +16,18 @@ import base64 +import inspect import threading import unittest +from collections.abc import Sequence from concurrent import futures from logging import DEBUG, INFO, WARN -from typing import Optional, Sequence, Tuple, Union +from typing import Optional, Union, get_args import grpc from flwr.client.grpc_rere_client.connection import grpc_request_response -from flwr.common import GRPC_MAX_MESSAGE_LENGTH +from flwr.common import GRPC_MAX_MESSAGE_LENGTH, serde from flwr.common.logger import log from flwr.common.message import Message, Metadata from flwr.common.record import RecordSet @@ -41,13 +43,15 @@ CreateNodeResponse, DeleteNodeRequest, DeleteNodeResponse, - GetRunRequest, - GetRunResponse, PullTaskInsRequest, PullTaskInsResponse, PushTaskResRequest, PushTaskResResponse, ) +from flwr.proto.fleet_pb2_grpc import FleetServicer +from flwr.proto.node_pb2 import Node # pylint: disable=E0611 +from flwr.proto.run_pb2 import GetRunRequest, GetRunResponse # pylint: disable=E0611 +from flwr.proto.task_pb2 import Task, TaskIns # pylint: disable=E0611 from .client_interceptor import _AUTH_TOKEN_HEADER, _PUBLIC_KEY_HEADER, Request @@ -59,7 +63,7 @@ def __init__(self) -> None: """Initialize mock servicer.""" self._lock = threading.Lock() self._received_client_metadata: Optional[ - Sequence[Tuple[str, Union[str, bytes]]] + Sequence[tuple[str, Union[str, bytes]]] ] = None self.server_private_key, self.server_public_key = generate_key_pairs() self._received_message_bytes: bytes = b"" @@ -72,23 +76,39 @@ def unary_unary( """Handle unary call.""" with self._lock: self._received_client_metadata = context.invocation_metadata() - self._received_message_bytes = request.SerializeToString(True) + self._received_message_bytes = request.SerializeToString(deterministic=True) if isinstance(request, CreateNodeRequest): context.send_initial_metadata( - ((_PUBLIC_KEY_HEADER, self.server_public_key),) + ( + ( + _PUBLIC_KEY_HEADER, + base64.urlsafe_b64encode( + public_key_to_bytes(self.server_public_key) + ), + ), + ) ) - return CreateNodeResponse() + return CreateNodeResponse(node=Node(node_id=123)) if isinstance(request, DeleteNodeRequest): return DeleteNodeResponse() if isinstance(request, PushTaskResRequest): return PushTaskResResponse() - return PullTaskInsResponse() + return PullTaskInsResponse( + task_ins_list=[ + TaskIns( + task=Task( + consumer=Node(node_id=123), + recordset=serde.recordset_to_proto(RecordSet()), + ) + ) + ] + ) def received_client_metadata( self, - ) -> Optional[Sequence[Tuple[str, Union[str, bytes]]]]: + ) -> Optional[Sequence[tuple[str, Union[str, bytes]]]]: """Return received client metadata.""" with self._lock: return self._received_client_metadata @@ -133,11 +153,21 @@ def _add_generic_handler(servicer: _MockServicer, server: grpc.Server) -> None: server.add_generic_rpc_handlers((generic_handler,)) +def _get_value_from_tuples( + key_string: str, tuples: Sequence[tuple[str, Union[str, bytes]]] +) -> bytes: + value = next((value for key, value in tuples if key == key_string), "") + if isinstance(value, str): + return value.encode() + + return value + + def _init_retry_invoker() -> RetryInvoker: return RetryInvoker( wait_gen_factory=exponential, recoverable_exceptions=grpc.RpcError, - max_tries=None, + max_tries=1, max_time=None, on_giveup=lambda retry_state: ( log( @@ -203,16 +233,23 @@ def test_client_auth_create_node(self) -> None: None, (self._client_private_key, self._client_public_key), ) as conn: - _, _, create_node, _, _ = conn + _, _, create_node, _, _, _ = conn assert create_node is not None create_node() - expected_client_metadata = ( - _PUBLIC_KEY_HEADER, - base64.urlsafe_b64encode(public_key_to_bytes(self._client_public_key)), + + received_metadata = self._servicer.received_client_metadata() + assert received_metadata is not None + + actual_public_key = _get_value_from_tuples( + _PUBLIC_KEY_HEADER, received_metadata + ) + + expected_public_key = base64.urlsafe_b64encode( + public_key_to_bytes(self._client_public_key) ) # Assert - assert self._servicer.received_client_metadata() == expected_client_metadata + assert actual_public_key == expected_public_key def test_client_auth_delete_node(self) -> None: """Test client authentication during delete node.""" @@ -228,30 +265,32 @@ def test_client_auth_delete_node(self) -> None: None, (self._client_private_key, self._client_public_key), ) as conn: - _, _, _, delete_node, _ = conn + _, _, create_node, delete_node, _, _ = conn + assert create_node is not None + create_node() assert delete_node is not None delete_node() + + received_metadata = self._servicer.received_client_metadata() + assert received_metadata is not None + shared_secret = generate_shared_key( self._servicer.server_private_key, self._client_public_key ) - expected_hmac = compute_hmac( - shared_secret, self._servicer.received_message_bytes() + expected_hmac = base64.urlsafe_b64encode( + compute_hmac(shared_secret, self._servicer.received_message_bytes()) ) - expected_client_metadata = ( - ( - _PUBLIC_KEY_HEADER, - base64.urlsafe_b64encode( - public_key_to_bytes(self._client_public_key) - ), - ), - ( - _AUTH_TOKEN_HEADER, - base64.urlsafe_b64encode(expected_hmac), - ), + actual_public_key = _get_value_from_tuples( + _PUBLIC_KEY_HEADER, received_metadata + ) + actual_hmac = _get_value_from_tuples(_AUTH_TOKEN_HEADER, received_metadata) + expected_public_key = base64.urlsafe_b64encode( + public_key_to_bytes(self._client_public_key) ) # Assert - assert self._servicer.received_client_metadata() == expected_client_metadata + assert actual_public_key == expected_public_key + assert actual_hmac == expected_hmac def test_client_auth_receive(self) -> None: """Test client authentication during receive node.""" @@ -267,36 +306,38 @@ def test_client_auth_receive(self) -> None: None, (self._client_private_key, self._client_public_key), ) as conn: - receive, _, _, _, _ = conn + receive, _, create_node, _, _, _ = conn + assert create_node is not None + create_node() assert receive is not None receive() + + received_metadata = self._servicer.received_client_metadata() + assert received_metadata is not None + shared_secret = generate_shared_key( self._servicer.server_private_key, self._client_public_key ) - expected_hmac = compute_hmac( - shared_secret, self._servicer.received_message_bytes() + expected_hmac = base64.urlsafe_b64encode( + compute_hmac(shared_secret, self._servicer.received_message_bytes()) + ) + actual_public_key = _get_value_from_tuples( + _PUBLIC_KEY_HEADER, received_metadata ) - expected_client_metadata = ( - ( - _PUBLIC_KEY_HEADER, - base64.urlsafe_b64encode( - public_key_to_bytes(self._client_public_key) - ), - ), - ( - _AUTH_TOKEN_HEADER, - base64.urlsafe_b64encode(expected_hmac), - ), + actual_hmac = _get_value_from_tuples(_AUTH_TOKEN_HEADER, received_metadata) + expected_public_key = base64.urlsafe_b64encode( + public_key_to_bytes(self._client_public_key) ) # Assert - assert self._servicer.received_client_metadata() == expected_client_metadata + assert actual_public_key == expected_public_key + assert actual_hmac == expected_hmac def test_client_auth_send(self) -> None: """Test client authentication during send node.""" # Prepare retry_invoker = _init_retry_invoker() - message = Message(Metadata(0, "1", 0, 0, "", "", 0, ""), RecordSet()) + message = Message(Metadata(0, "", 123, 0, "", "", 0, ""), RecordSet()) # Execute with self._connection( @@ -307,30 +348,34 @@ def test_client_auth_send(self) -> None: None, (self._client_private_key, self._client_public_key), ) as conn: - _, send, _, _, _ = conn + receive, send, create_node, _, _, _ = conn + assert create_node is not None + create_node() + assert receive is not None + receive() assert send is not None send(message) + + received_metadata = self._servicer.received_client_metadata() + assert received_metadata is not None + shared_secret = generate_shared_key( self._servicer.server_private_key, self._client_public_key ) - expected_hmac = compute_hmac( - shared_secret, self._servicer.received_message_bytes() + expected_hmac = base64.urlsafe_b64encode( + compute_hmac(shared_secret, self._servicer.received_message_bytes()) ) - expected_client_metadata = ( - ( - _PUBLIC_KEY_HEADER, - base64.urlsafe_b64encode( - public_key_to_bytes(self._client_public_key) - ), - ), - ( - _AUTH_TOKEN_HEADER, - base64.urlsafe_b64encode(expected_hmac), - ), + actual_public_key = _get_value_from_tuples( + _PUBLIC_KEY_HEADER, received_metadata + ) + actual_hmac = _get_value_from_tuples(_AUTH_TOKEN_HEADER, received_metadata) + expected_public_key = base64.urlsafe_b64encode( + public_key_to_bytes(self._client_public_key) ) # Assert - assert self._servicer.received_client_metadata() == expected_client_metadata + assert actual_public_key == expected_public_key + assert actual_hmac == expected_hmac def test_client_auth_get_run(self) -> None: """Test client authentication during send node.""" @@ -346,30 +391,67 @@ def test_client_auth_get_run(self) -> None: None, (self._client_private_key, self._client_public_key), ) as conn: - _, _, _, _, get_run = conn + _, _, create_node, _, get_run, _ = conn + assert create_node is not None + create_node() assert get_run is not None get_run(0) + + received_metadata = self._servicer.received_client_metadata() + assert received_metadata is not None + shared_secret = generate_shared_key( self._servicer.server_private_key, self._client_public_key ) - expected_hmac = compute_hmac( - shared_secret, self._servicer.received_message_bytes() + expected_hmac = base64.urlsafe_b64encode( + compute_hmac(shared_secret, self._servicer.received_message_bytes()) + ) + actual_public_key = _get_value_from_tuples( + _PUBLIC_KEY_HEADER, received_metadata ) - expected_client_metadata = ( - ( - _PUBLIC_KEY_HEADER, - base64.urlsafe_b64encode( - public_key_to_bytes(self._client_public_key) - ), - ), - ( - _AUTH_TOKEN_HEADER, - base64.urlsafe_b64encode(expected_hmac), - ), + actual_hmac = _get_value_from_tuples(_AUTH_TOKEN_HEADER, received_metadata) + expected_public_key = base64.urlsafe_b64encode( + public_key_to_bytes(self._client_public_key) ) # Assert - assert self._servicer.received_client_metadata() == expected_client_metadata + assert actual_public_key == expected_public_key + assert actual_hmac == expected_hmac + + def test_without_servicer(self) -> None: + """Test client authentication without servicer.""" + # Prepare + self._server.stop(grace=None) + retry_invoker = _init_retry_invoker() + + # Execute and Assert + with self._connection( + self._address, + True, + retry_invoker, + GRPC_MAX_MESSAGE_LENGTH, + None, + (self._client_private_key, self._client_public_key), + ) as conn: + _, _, create_node, _, _, _ = conn + assert create_node is not None + create_node() + + assert self._servicer.received_client_metadata() is None + + def test_fleet_requests_included(self) -> None: + """Test if all Fleet requests are included in the authentication mode.""" + # Prepare + requests = get_args(Request) + rpc_names = {req.__qualname__.removesuffix("Request") for req in requests} + expected_rpc_names = { + name + for name, ref in inspect.getmembers(FleetServicer) + if inspect.isfunction(ref) + } + + # Assert + assert expected_rpc_names == rpc_names if __name__ == "__main__": diff --git a/src/py/flwr/client/grpc_rere_client/connection.py b/src/py/flwr/client/grpc_rere_client/connection.py index 8ef8e7ebf62a..bfc20eee896a 100644 --- a/src/py/flwr/client/grpc_rere_client/connection.py +++ b/src/py/flwr/client/grpc_rere_client/connection.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2023 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,11 +17,12 @@ import random import threading +from collections.abc import Iterator, Sequence from contextlib import contextmanager from copy import copy from logging import DEBUG, ERROR from pathlib import Path -from typing import Callable, Iterator, Optional, Sequence, Tuple, Type, Union, cast +from typing import Callable, Optional, Union, cast import grpc from cryptography.hazmat.primitives.asymmetric import ec @@ -40,12 +41,16 @@ from flwr.common.logger import log from flwr.common.message import Message, Metadata from flwr.common.retry_invoker import RetryInvoker -from flwr.common.serde import message_from_taskins, message_to_taskres +from flwr.common.serde import ( + message_from_taskins, + message_to_taskres, + user_config_from_proto, +) +from flwr.common.typing import Fab, Run +from flwr.proto.fab_pb2 import GetFabRequest, GetFabResponse # pylint: disable=E0611 from flwr.proto.fleet_pb2 import ( # pylint: disable=E0611 CreateNodeRequest, DeleteNodeRequest, - GetRunRequest, - GetRunResponse, PingRequest, PingResponse, PullTaskInsRequest, @@ -53,9 +58,11 @@ ) from flwr.proto.fleet_pb2_grpc import FleetStub # pylint: disable=E0611 from flwr.proto.node_pb2 import Node # pylint: disable=E0611 +from flwr.proto.run_pb2 import GetRunRequest, GetRunResponse # pylint: disable=E0611 from flwr.proto.task_pb2 import TaskIns # pylint: disable=E0611 from .client_interceptor import AuthenticateClientInterceptor +from .grpc_adapter import GrpcAdapter def on_channel_state_change(channel_connectivity: str) -> None: @@ -64,23 +71,24 @@ def on_channel_state_change(channel_connectivity: str) -> None: @contextmanager -def grpc_request_response( # pylint: disable=R0913, R0914, R0915 +def grpc_request_response( # pylint: disable=R0913,R0914,R0915,R0917 server_address: str, insecure: bool, retry_invoker: RetryInvoker, max_message_length: int = GRPC_MAX_MESSAGE_LENGTH, # pylint: disable=W0613 root_certificates: Optional[Union[bytes, str]] = None, authentication_keys: Optional[ - Tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] + tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] ] = None, - adapter_cls: Optional[Type[FleetStub]] = None, + adapter_cls: Optional[Union[type[FleetStub], type[GrpcAdapter]]] = None, ) -> Iterator[ - Tuple[ + tuple[ Callable[[], Optional[Message]], Callable[[Message], None], + Optional[Callable[[], Optional[int]]], Optional[Callable[[], None]], - Optional[Callable[[], None]], - Optional[Callable[[int], Tuple[str, str]]], + Optional[Callable[[int], Run]], + Optional[Callable[[str], Fab]], ] ]: """Primitives for request/response-based interaction with a server. @@ -107,6 +115,14 @@ def grpc_request_response( # pylint: disable=R0913, R0914, R0915 Path of the root certificate. If provided, a secure connection using the certificates will be established to an SSL-enabled Flower server. Bytes won't work for the REST API. + authentication_keys : Optional[Tuple[PrivateKey, PublicKey]] (default: None) + Tuple containing the elliptic curve private key and public key for + authentication from the cryptography library. + Source: https://cryptography.io/en/latest/hazmat/primitives/asymmetric/ec/ + Used to establish an authenticated connection with the server. + adapter_cls: Optional[Union[type[FleetStub], type[GrpcAdapter]]] (default: None) + A GrpcStub Class that can be used to send messages. By default the FleetStub + will be used. Returns ------- @@ -114,6 +130,7 @@ def grpc_request_response( # pylint: disable=R0913, R0914, R0915 send : Callable create_node : Optional[Callable] delete_node : Optional[Callable] + get_run : Optional[Callable] """ if isinstance(root_certificates, str): root_certificates = Path(root_certificates).read_bytes() @@ -169,7 +186,7 @@ def ping() -> None: if not ping_stop_event.is_set(): ping_stop_event.wait(next_interval) - def create_node() -> None: + def create_node() -> Optional[int]: """Set create_node.""" # Call FleetAPI create_node_request = CreateNodeRequest(ping_interval=PING_DEFAULT_INTERVAL) @@ -182,6 +199,7 @@ def create_node() -> None: nonlocal node, ping_thread node = cast(Node, create_node_response.node) ping_thread = start_ping_loop(ping, ping_stop_event) + return node.node_id def delete_node() -> None: """Set delete_node.""" @@ -193,8 +211,6 @@ def delete_node() -> None: # Stop the ping-loop thread ping_stop_event.set() - if ping_thread is not None: - ping_thread.join() # Call FleetAPI delete_node_request = DeleteNodeRequest(node=node) @@ -256,25 +272,41 @@ def send(message: Message) -> None: task_res = message_to_taskres(message) # Serialize ProtoBuf to bytes - request = PushTaskResRequest(task_res_list=[task_res]) + request = PushTaskResRequest(node=node, task_res_list=[task_res]) _ = retry_invoker.invoke(stub.PushTaskRes, request) # Cleanup metadata = None - def get_run(run_id: int) -> Tuple[str, str]: + def get_run(run_id: int) -> Run: # Call FleetAPI - get_run_request = GetRunRequest(run_id=run_id) + get_run_request = GetRunRequest(node=node, run_id=run_id) get_run_response: GetRunResponse = retry_invoker.invoke( stub.GetRun, request=get_run_request, ) # Return fab_id and fab_version - return get_run_response.run.fab_id, get_run_response.run.fab_version + return Run( + run_id, + get_run_response.run.fab_id, + get_run_response.run.fab_version, + get_run_response.run.fab_hash, + user_config_from_proto(get_run_response.run.override_config), + ) + + def get_fab(fab_hash: str) -> Fab: + # Call FleetAPI + get_fab_request = GetFabRequest(node=node, hash_str=fab_hash) + get_fab_response: GetFabResponse = retry_invoker.invoke( + stub.GetFab, + request=get_fab_request, + ) + + return Fab(get_fab_response.fab.hash_str, get_fab_response.fab.content) try: # Yield methods - yield (receive, send, create_node, delete_node, get_run) + yield (receive, send, create_node, delete_node, get_run, get_fab) except Exception as exc: # pylint: disable=broad-except log(ERROR, exc) diff --git a/src/py/flwr/client/grpc_rere_client/grpc_adapter.py b/src/py/flwr/client/grpc_rere_client/grpc_adapter.py new file mode 100644 index 000000000000..69ea29d5b7b3 --- /dev/null +++ b/src/py/flwr/client/grpc_rere_client/grpc_adapter.py @@ -0,0 +1,151 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""GrpcAdapter implementation.""" + + +import sys +from logging import DEBUG +from typing import Any, TypeVar, cast + +import grpc +from google.protobuf.message import Message as GrpcMessage + +from flwr.common import log +from flwr.common.constant import ( + GRPC_ADAPTER_METADATA_FLOWER_PACKAGE_NAME_KEY, + GRPC_ADAPTER_METADATA_FLOWER_PACKAGE_VERSION_KEY, + GRPC_ADAPTER_METADATA_FLOWER_VERSION_KEY, + GRPC_ADAPTER_METADATA_MESSAGE_MODULE_KEY, + GRPC_ADAPTER_METADATA_MESSAGE_QUALNAME_KEY, + GRPC_ADAPTER_METADATA_SHOULD_EXIT_KEY, +) +from flwr.common.version import package_name, package_version +from flwr.proto.fab_pb2 import GetFabRequest, GetFabResponse # pylint: disable=E0611 +from flwr.proto.fleet_pb2 import ( # pylint: disable=E0611 + CreateNodeRequest, + CreateNodeResponse, + DeleteNodeRequest, + DeleteNodeResponse, + PingRequest, + PingResponse, + PullTaskInsRequest, + PullTaskInsResponse, + PushTaskResRequest, + PushTaskResResponse, +) +from flwr.proto.grpcadapter_pb2 import MessageContainer # pylint: disable=E0611 +from flwr.proto.grpcadapter_pb2_grpc import GrpcAdapterStub +from flwr.proto.run_pb2 import GetRunRequest, GetRunResponse # pylint: disable=E0611 + +T = TypeVar("T", bound=GrpcMessage) + + +class GrpcAdapter: + """Adapter class to send and receive gRPC messages via the ``GrpcAdapterStub``. + + This class utilizes the ``GrpcAdapterStub`` to send and receive gRPC messages + which are defined and used by the Fleet API, as defined in ``fleet.proto``. + """ + + def __init__(self, channel: grpc.Channel) -> None: + self.stub = GrpcAdapterStub(channel) + + def _send_and_receive( + self, request: GrpcMessage, response_type: type[T], **kwargs: Any + ) -> T: + # Serialize request + req_cls = request.__class__ + container_req = MessageContainer( + metadata={ + GRPC_ADAPTER_METADATA_FLOWER_PACKAGE_NAME_KEY: package_name, + GRPC_ADAPTER_METADATA_FLOWER_PACKAGE_VERSION_KEY: package_version, + GRPC_ADAPTER_METADATA_FLOWER_VERSION_KEY: package_version, + GRPC_ADAPTER_METADATA_MESSAGE_MODULE_KEY: req_cls.__module__, + GRPC_ADAPTER_METADATA_MESSAGE_QUALNAME_KEY: req_cls.__qualname__, + }, + grpc_message_name=req_cls.__qualname__, + grpc_message_content=request.SerializeToString(), + ) + + # Send via the stub + container_res = cast( + MessageContainer, self.stub.SendReceive(container_req, **kwargs) + ) + + # Handle control message + should_exit = ( + container_res.metadata.get(GRPC_ADAPTER_METADATA_SHOULD_EXIT_KEY, "false") + == "true" + ) + if should_exit: + log( + DEBUG, + 'Received shutdown signal: exit flag is set to ``"true"``. Exiting...', + ) + sys.exit(0) + + # Check the grpc_message_name of the response + if container_res.grpc_message_name != response_type.__qualname__: + raise ValueError( + f"Invalid grpc_message_name. Expected {response_type.__qualname__}" + f", but got {container_res.grpc_message_name}." + ) + + # Deserialize response + response = response_type() + response.ParseFromString(container_res.grpc_message_content) + return response + + def CreateNode( # pylint: disable=C0103 + self, request: CreateNodeRequest, **kwargs: Any + ) -> CreateNodeResponse: + """.""" + return self._send_and_receive(request, CreateNodeResponse, **kwargs) + + def DeleteNode( # pylint: disable=C0103 + self, request: DeleteNodeRequest, **kwargs: Any + ) -> DeleteNodeResponse: + """.""" + return self._send_and_receive(request, DeleteNodeResponse, **kwargs) + + def Ping( # pylint: disable=C0103 + self, request: PingRequest, **kwargs: Any + ) -> PingResponse: + """.""" + return self._send_and_receive(request, PingResponse, **kwargs) + + def PullTaskIns( # pylint: disable=C0103 + self, request: PullTaskInsRequest, **kwargs: Any + ) -> PullTaskInsResponse: + """.""" + return self._send_and_receive(request, PullTaskInsResponse, **kwargs) + + def PushTaskRes( # pylint: disable=C0103 + self, request: PushTaskResRequest, **kwargs: Any + ) -> PushTaskResResponse: + """.""" + return self._send_and_receive(request, PushTaskResResponse, **kwargs) + + def GetRun( # pylint: disable=C0103 + self, request: GetRunRequest, **kwargs: Any + ) -> GetRunResponse: + """.""" + return self._send_and_receive(request, GetRunResponse, **kwargs) + + def GetFab( # pylint: disable=C0103 + self, request: GetFabRequest, **kwargs: Any + ) -> GetFabResponse: + """.""" + return self._send_and_receive(request, GetFabResponse, **kwargs) diff --git a/src/py/flwr/client/grpc_rere_client/grpc_adapter_test.py b/src/py/flwr/client/grpc_rere_client/grpc_adapter_test.py new file mode 100644 index 000000000000..e62111e084bc --- /dev/null +++ b/src/py/flwr/client/grpc_rere_client/grpc_adapter_test.py @@ -0,0 +1,38 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for the GrpcAdapter class.""" + + +import inspect + +from flwr.proto.fleet_pb2_grpc import FleetServicer + +from .grpc_adapter import GrpcAdapter + + +def test_grpc_adapter_methods() -> None: + """Test if GrpcAdapter implements all required methods.""" + # Prepare + methods = { + name for name, ref in inspect.getmembers(GrpcAdapter) if inspect.isfunction(ref) + } + expected_methods = { + name + for name, ref in inspect.getmembers(FleetServicer) + if inspect.isfunction(ref) + } + + # Assert + assert expected_methods.issubset(methods) diff --git a/src/py/flwr/client/message_handler/__init__.py b/src/py/flwr/client/message_handler/__init__.py index 653563963de5..a345b4af3ef2 100644 --- a/src/py/flwr/client/message_handler/__init__.py +++ b/src/py/flwr/client/message_handler/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2022 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/src/py/flwr/client/message_handler/message_handler.py b/src/py/flwr/client/message_handler/message_handler.py index e5acbe0cc9d0..765c6a6b2e91 100644 --- a/src/py/flwr/client/message_handler/message_handler.py +++ b/src/py/flwr/client/message_handler/message_handler.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2022 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,9 +14,8 @@ # ============================================================================== """Client-side message handler.""" - from logging import WARN -from typing import Optional, Tuple, cast +from typing import Optional, cast from flwr.client.client import ( maybe_call_evaluate, @@ -25,7 +24,7 @@ maybe_call_get_properties, ) from flwr.client.numpy_client import NumPyClient -from flwr.client.typing import ClientFn +from flwr.client.typing import ClientFnExt from flwr.common import ConfigsRecord, Context, Message, Metadata, RecordSet, log from flwr.common.constant import MessageType, MessageTypeLegacy from flwr.common.recordset_compat import ( @@ -53,7 +52,7 @@ class UnknownServerMessage(Exception): """Exception indicating that the received message is unknown.""" -def handle_control_message(message: Message) -> Tuple[Optional[Message], int]: +def handle_control_message(message: Message) -> tuple[Optional[Message], int]: """Handle control part of the incoming message. Parameters @@ -90,10 +89,10 @@ def handle_control_message(message: Message) -> Tuple[Optional[Message], int]: def handle_legacy_message_from_msgtype( - client_fn: ClientFn, message: Message, context: Context + client_fn: ClientFnExt, message: Message, context: Context ) -> Message: """Handle legacy message in the inner most mod.""" - client = client_fn(str(message.metadata.partition_id)) + client = client_fn(context) # Check if NumPyClient is returend if isinstance(client, NumPyClient): @@ -148,7 +147,7 @@ def handle_legacy_message_from_msgtype( def _reconnect( reconnect_msg: ServerMessage.ReconnectIns, -) -> Tuple[ClientMessage, int]: +) -> tuple[ClientMessage, int]: # Determine the reason for sending DisconnectRes message reason = Reason.ACK sleep_duration = None diff --git a/src/py/flwr/client/message_handler/message_handler_test.py b/src/py/flwr/client/message_handler/message_handler_test.py index 8a2db1804e4a..311f8c37e1b1 100644 --- a/src/py/flwr/client/message_handler/message_handler_test.py +++ b/src/py/flwr/client/message_handler/message_handler_test.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2022 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -19,10 +19,9 @@ import unittest import uuid from copy import copy -from typing import List from flwr.client import Client -from flwr.client.typing import ClientFn +from flwr.client.typing import ClientFnExt from flwr.common import ( DEFAULT_TTL, Code, @@ -113,8 +112,8 @@ def evaluate(self, ins: EvaluateIns) -> EvaluateRes: ) -def _get_client_fn(client: Client) -> ClientFn: - def client_fn(cid: str) -> Client: # pylint: disable=unused-argument +def _get_client_fn(client: Client) -> ClientFnExt: + def client_fn(contex: Context) -> Client: # pylint: disable=unused-argument return client return client_fn @@ -143,7 +142,7 @@ def test_client_without_get_properties() -> None: actual_msg = handle_legacy_message_from_msgtype( client_fn=_get_client_fn(client), message=message, - context=Context(state=RecordSet()), + context=Context(node_id=1123, node_config={}, state=RecordSet(), run_config={}), ) # Assert @@ -207,7 +206,7 @@ def test_client_with_get_properties() -> None: actual_msg = handle_legacy_message_from_msgtype( client_fn=_get_client_fn(client), message=message, - context=Context(state=RecordSet()), + context=Context(node_id=1123, node_config={}, state=RecordSet(), run_config={}), ) # Assert @@ -294,7 +293,7 @@ def test_invalid_message_run_id(self) -> None: msg = Message(metadata=self.valid_out_metadata, content=RecordSet()) # Execute - invalid_metadata_list: List[Metadata] = [] + invalid_metadata_list: list[Metadata] = [] attrs = list(vars(self.valid_out_metadata).keys()) for attr in attrs: if attr == "_partition_id": diff --git a/src/py/flwr/client/mod/__init__.py b/src/py/flwr/client/mod/__init__.py index 1cd79fa944fe..35d1fa81805c 100644 --- a/src/py/flwr/client/mod/__init__.py +++ b/src/py/flwr/client/mod/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Mods.""" +"""Flower Built-in Mods.""" from .centraldp_mods import adaptiveclipping_mod, fixedclipping_mod @@ -22,12 +22,12 @@ from .utils import make_ffn __all__ = [ + "LocalDpMod", "adaptiveclipping_mod", "fixedclipping_mod", - "LocalDpMod", "make_ffn", - "secagg_mod", - "secaggplus_mod", "message_size_mod", "parameters_size_mod", + "secagg_mod", + "secaggplus_mod", ] diff --git a/src/py/flwr/client/mod/secure_aggregation/__init__.py b/src/py/flwr/client/mod/secure_aggregation/__init__.py index 8892d8c03935..a64bc89e62c9 100644 --- a/src/py/flwr/client/mod/secure_aggregation/__init__.py +++ b/src/py/flwr/client/mod/secure_aggregation/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod.py b/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod.py index 5b196ad84321..f9d3c433157d 100644 --- a/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod.py +++ b/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod.py @@ -18,7 +18,7 @@ import os from dataclasses import dataclass, field from logging import DEBUG, WARNING -from typing import Any, Dict, List, Tuple, cast +from typing import Any, cast from flwr.client.typing import ClientAppCallable from flwr.common import ( @@ -91,11 +91,11 @@ class SecAggPlusState: # Random seed for generating the private mask rd_seed: bytes = b"" - rd_seed_share_dict: Dict[int, bytes] = field(default_factory=dict) - sk1_share_dict: Dict[int, bytes] = field(default_factory=dict) + rd_seed_share_dict: dict[int, bytes] = field(default_factory=dict) + sk1_share_dict: dict[int, bytes] = field(default_factory=dict) # The dict of the shared secrets from sk2 - ss2_dict: Dict[int, bytes] = field(default_factory=dict) - public_keys_dict: Dict[int, Tuple[bytes, bytes]] = field(default_factory=dict) + ss2_dict: dict[int, bytes] = field(default_factory=dict) + public_keys_dict: dict[int, tuple[bytes, bytes]] = field(default_factory=dict) def __init__(self, **kwargs: ConfigsRecordValues) -> None: for k, v in kwargs.items(): @@ -104,8 +104,8 @@ def __init__(self, **kwargs: ConfigsRecordValues) -> None: new_v: Any = v if k.endswith(":K"): k = k[:-2] - keys = cast(List[int], v) - values = cast(List[bytes], kwargs[f"{k}:V"]) + keys = cast(list[int], v) + values = cast(list[bytes], kwargs[f"{k}:V"]) if len(values) > len(keys): updated_values = [ tuple(values[i : i + 2]) for i in range(0, len(values), 2) @@ -115,17 +115,17 @@ def __init__(self, **kwargs: ConfigsRecordValues) -> None: new_v = dict(zip(keys, values)) self.__setattr__(k, new_v) - def to_dict(self) -> Dict[str, ConfigsRecordValues]: + def to_dict(self) -> dict[str, ConfigsRecordValues]: """Convert the state to a dictionary.""" ret = vars(self) for k in list(ret.keys()): if isinstance(ret[k], dict): # Replace dict with two lists - v = cast(Dict[str, Any], ret.pop(k)) + v = cast(dict[str, Any], ret.pop(k)) ret[f"{k}:K"] = list(v.keys()) if k == "public_keys_dict": - v_list: List[bytes] = [] - for b1_b2 in cast(List[Tuple[bytes, bytes]], v.values()): + v_list: list[bytes] = [] + for b1_b2 in cast(list[tuple[bytes, bytes]], v.values()): v_list.extend(b1_b2) ret[f"{k}:V"] = v_list else: @@ -276,7 +276,7 @@ def check_configs(stage: str, configs: ConfigsRecord) -> None: ) if not isinstance(configs[key], list) or any( elm - for elm in cast(List[Any], configs[key]) + for elm in cast(list[Any], configs[key]) # pylint: disable-next=unidiomatic-typecheck if type(elm) is not expected_type ): @@ -299,7 +299,7 @@ def check_configs(stage: str, configs: ConfigsRecord) -> None: ) if not isinstance(configs[key], list) or any( elm - for elm in cast(List[Any], configs[key]) + for elm in cast(list[Any], configs[key]) # pylint: disable-next=unidiomatic-typecheck if type(elm) is not expected_type ): @@ -314,7 +314,7 @@ def check_configs(stage: str, configs: ConfigsRecord) -> None: def _setup( state: SecAggPlusState, configs: ConfigsRecord -) -> Dict[str, ConfigsRecordValues]: +) -> dict[str, ConfigsRecordValues]: # Assigning parameter values to object fields sec_agg_param_dict = configs state.sample_num = cast(int, sec_agg_param_dict[Key.SAMPLE_NUMBER]) @@ -350,8 +350,8 @@ def _setup( # pylint: disable-next=too-many-locals def _share_keys( state: SecAggPlusState, configs: ConfigsRecord -) -> Dict[str, ConfigsRecordValues]: - named_bytes_tuples = cast(Dict[str, Tuple[bytes, bytes]], configs) +) -> dict[str, ConfigsRecordValues]: + named_bytes_tuples = cast(dict[str, tuple[bytes, bytes]], configs) key_dict = {int(sid): (pk1, pk2) for sid, (pk1, pk2) in named_bytes_tuples.items()} log(DEBUG, "Node %d: starting stage 1...", state.nid) state.public_keys_dict = key_dict @@ -361,7 +361,7 @@ def _share_keys( raise ValueError("Available neighbours number smaller than threshold") # Check if all public keys are unique - pk_list: List[bytes] = [] + pk_list: list[bytes] = [] for pk1, pk2 in state.public_keys_dict.values(): pk_list.append(pk1) pk_list.append(pk2) @@ -415,11 +415,11 @@ def _collect_masked_vectors( configs: ConfigsRecord, num_examples: int, updated_parameters: Parameters, -) -> Dict[str, ConfigsRecordValues]: +) -> dict[str, ConfigsRecordValues]: log(DEBUG, "Node %d: starting stage 2...", state.nid) - available_clients: List[int] = [] - ciphertexts = cast(List[bytes], configs[Key.CIPHERTEXT_LIST]) - srcs = cast(List[int], configs[Key.SOURCE_LIST]) + available_clients: list[int] = [] + ciphertexts = cast(list[bytes], configs[Key.CIPHERTEXT_LIST]) + srcs = cast(list[int], configs[Key.SOURCE_LIST]) if len(ciphertexts) + 1 < state.threshold: raise ValueError("Not enough available neighbour clients.") @@ -467,7 +467,7 @@ def _collect_masked_vectors( quantized_parameters = factor_combine(q_ratio, quantized_parameters) - dimensions_list: List[Tuple[int, ...]] = [a.shape for a in quantized_parameters] + dimensions_list: list[tuple[int, ...]] = [a.shape for a in quantized_parameters] # Add private mask private_mask = pseudo_rand_gen(state.rd_seed, state.mod_range, dimensions_list) @@ -499,11 +499,11 @@ def _collect_masked_vectors( def _unmask( state: SecAggPlusState, configs: ConfigsRecord -) -> Dict[str, ConfigsRecordValues]: +) -> dict[str, ConfigsRecordValues]: log(DEBUG, "Node %d: starting stage 3...", state.nid) - active_nids = cast(List[int], configs[Key.ACTIVE_NODE_ID_LIST]) - dead_nids = cast(List[int], configs[Key.DEAD_NODE_ID_LIST]) + active_nids = cast(list[int], configs[Key.ACTIVE_NODE_ID_LIST]) + dead_nids = cast(list[int], configs[Key.DEAD_NODE_ID_LIST]) # Send private mask seed share for every avaliable client (including itself) # Send first private key share for building pairwise mask for every dropped client if len(active_nids) < state.threshold: diff --git a/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod_test.py b/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod_test.py index 36844a2983a1..e68bf5177797 100644 --- a/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod_test.py +++ b/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod_test.py @@ -16,7 +16,7 @@ import unittest from itertools import product -from typing import Callable, Dict, List +from typing import Callable from flwr.client.mod import make_ffn from flwr.common import ( @@ -41,7 +41,7 @@ def get_test_handler( ctxt: Context, -) -> Callable[[Dict[str, ConfigsRecordValues]], ConfigsRecord]: +) -> Callable[[dict[str, ConfigsRecordValues]], ConfigsRecord]: """.""" def empty_ffn(_msg: Message, _2: Context) -> Message: @@ -49,7 +49,7 @@ def empty_ffn(_msg: Message, _2: Context) -> Message: app = make_ffn(empty_ffn, [secaggplus_mod]) - def func(configs: Dict[str, ConfigsRecordValues]) -> ConfigsRecord: + def func(configs: dict[str, ConfigsRecordValues]) -> ConfigsRecord: in_msg = Message( metadata=Metadata( run_id=0, @@ -73,7 +73,12 @@ def func(configs: Dict[str, ConfigsRecordValues]) -> ConfigsRecord: def _make_ctxt() -> Context: cfg = ConfigsRecord(SecAggPlusState().to_dict()) - return Context(RecordSet(configs_records={RECORD_KEY_STATE: cfg})) + return Context( + node_id=123, + node_config={}, + state=RecordSet(configs_records={RECORD_KEY_STATE: cfg}), + run_config={}, + ) def _make_set_state_fn( @@ -153,7 +158,7 @@ def test_stage_setup_check(self) -> None: (Key.MOD_RANGE, int), ] - type_to_test_value: Dict[type, ConfigsRecordValues] = { + type_to_test_value: dict[type, ConfigsRecordValues] = { int: 10, bool: True, float: 1.0, @@ -161,7 +166,7 @@ def test_stage_setup_check(self) -> None: bytes: b"test", } - valid_configs: Dict[str, ConfigsRecordValues] = { + valid_configs: dict[str, ConfigsRecordValues] = { key: type_to_test_value[value_type] for key, value_type in valid_key_type_pairs } @@ -203,7 +208,7 @@ def test_stage_share_keys_check(self) -> None: handler = get_test_handler(ctxt) set_stage = _make_set_state_fn(ctxt) - valid_configs: Dict[str, ConfigsRecordValues] = { + valid_configs: dict[str, ConfigsRecordValues] = { "1": [b"public key 1", b"public key 2"], "2": [b"public key 1", b"public key 2"], "3": [b"public key 1", b"public key 2"], @@ -220,7 +225,7 @@ def test_stage_share_keys_check(self) -> None: valid_configs[Key.STAGE] = Stage.SHARE_KEYS # Test invalid configs - invalid_values: List[ConfigsRecordValues] = [ + invalid_values: list[ConfigsRecordValues] = [ b"public key 1", [b"public key 1"], [b"public key 1", b"public key 2", b"public key 3"], @@ -240,7 +245,7 @@ def test_stage_collect_masked_vectors_check(self) -> None: handler = get_test_handler(ctxt) set_stage = _make_set_state_fn(ctxt) - valid_configs: Dict[str, ConfigsRecordValues] = { + valid_configs: dict[str, ConfigsRecordValues] = { Key.CIPHERTEXT_LIST: [b"ctxt!", b"ctxt@", b"ctxt#", b"ctxt?"], Key.SOURCE_LIST: [32, 51324, 32324123, -3], } @@ -284,7 +289,7 @@ def test_stage_unmask_check(self) -> None: handler = get_test_handler(ctxt) set_stage = _make_set_state_fn(ctxt) - valid_configs: Dict[str, ConfigsRecordValues] = { + valid_configs: dict[str, ConfigsRecordValues] = { Key.ACTIVE_NODE_ID_LIST: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], Key.DEAD_NODE_ID_LIST: [32, 51324, 32324123, -3], } diff --git a/src/py/flwr/client/mod/utils.py b/src/py/flwr/client/mod/utils.py index 4c3c32944f01..c76902cf263f 100644 --- a/src/py/flwr/client/mod/utils.py +++ b/src/py/flwr/client/mod/utils.py @@ -1,4 +1,4 @@ -# Copyright 2023 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,13 +15,11 @@ """Utility functions for mods.""" -from typing import List - from flwr.client.typing import ClientAppCallable, Mod from flwr.common import Context, Message -def make_ffn(ffn: ClientAppCallable, mods: List[Mod]) -> ClientAppCallable: +def make_ffn(ffn: ClientAppCallable, mods: list[Mod]) -> ClientAppCallable: """.""" def wrap_ffn(_ffn: ClientAppCallable, _mod: Mod) -> ClientAppCallable: diff --git a/src/py/flwr/client/mod/utils_test.py b/src/py/flwr/client/mod/utils_test.py index 4676a2c02c4b..e75fb5530b2c 100644 --- a/src/py/flwr/client/mod/utils_test.py +++ b/src/py/flwr/client/mod/utils_test.py @@ -1,4 +1,4 @@ -# Copyright 2023 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,7 +16,7 @@ import unittest -from typing import List, cast +from typing import cast from flwr.client.typing import ClientAppCallable, Mod from flwr.common import ( @@ -43,7 +43,7 @@ def _increment_context_counter(context: Context) -> None: context.state.metrics_records[METRIC] = MetricsRecord({COUNTER: current_counter}) -def make_mock_mod(name: str, footprint: List[str]) -> Mod: +def make_mock_mod(name: str, footprint: list[str]) -> Mod: """Make a mock mod.""" def mod(message: Message, context: Context, app: ClientAppCallable) -> Message: @@ -61,7 +61,7 @@ def mod(message: Message, context: Context, app: ClientAppCallable) -> Message: return mod -def make_mock_app(name: str, footprint: List[str]) -> ClientAppCallable: +def make_mock_app(name: str, footprint: list[str]) -> ClientAppCallable: """Make a mock app.""" def app(message: Message, context: Context) -> Message: @@ -97,14 +97,14 @@ class TestMakeApp(unittest.TestCase): def test_multiple_mods(self) -> None: """Test if multiple mods are called in the correct order.""" # Prepare - footprint: List[str] = [] + footprint: list[str] = [] mock_app = make_mock_app("app", footprint) mock_mod_names = [f"mod{i}" for i in range(1, 15)] mock_mods = [make_mock_mod(name, footprint) for name in mock_mod_names] state = RecordSet() state.metrics_records[METRIC] = MetricsRecord({COUNTER: 0.0}) - context = Context(state=state) + context = Context(node_id=0, node_config={}, state=state, run_config={}) message = _get_dummy_flower_message() # Execute @@ -127,9 +127,9 @@ def test_multiple_mods(self) -> None: def test_filter(self) -> None: """Test if a mod can filter incoming TaskIns.""" # Prepare - footprint: List[str] = [] + footprint: list[str] = [] mock_app = make_mock_app("app", footprint) - context = Context(state=RecordSet()) + context = Context(node_id=0, node_config={}, state=RecordSet(), run_config={}) message = _get_dummy_flower_message() def filter_mod( diff --git a/src/py/flwr/client/node_state.py b/src/py/flwr/client/node_state.py index 71681b783419..843c9890c5d2 100644 --- a/src/py/flwr/client/node_state.py +++ b/src/py/flwr/client/node_state.py @@ -15,27 +15,85 @@ """Node state.""" -from typing import Any, Dict +from dataclasses import dataclass +from pathlib import Path +from typing import Optional from flwr.common import Context, RecordSet +from flwr.common.config import ( + get_fused_config, + get_fused_config_from_dir, + get_fused_config_from_fab, +) +from flwr.common.typing import Fab, Run, UserConfig + + +@dataclass() +class RunInfo: + """Contains the Context and initial run_config of a Run.""" + + context: Context + initial_run_config: UserConfig class NodeState: """State of a node where client nodes execute runs.""" - def __init__(self) -> None: - self._meta: Dict[str, Any] = {} # holds metadata about the node - self.run_contexts: Dict[int, Context] = {} + def __init__( + self, + node_id: int, + node_config: UserConfig, + ) -> None: + self.node_id = node_id + self.node_config = node_config + self.run_infos: dict[int, RunInfo] = {} - def register_context(self, run_id: int) -> None: + # pylint: disable=too-many-arguments,too-many-positional-arguments + def register_context( + self, + run_id: int, + run: Optional[Run] = None, + flwr_path: Optional[Path] = None, + app_dir: Optional[str] = None, + fab: Optional[Fab] = None, + ) -> None: """Register new run context for this node.""" - if run_id not in self.run_contexts: - self.run_contexts[run_id] = Context(state=RecordSet()) + if run_id not in self.run_infos: + initial_run_config = {} + if app_dir: + # Load from app directory + app_path = Path(app_dir) + if app_path.is_dir(): + override_config = run.override_config if run else {} + initial_run_config = get_fused_config_from_dir( + app_path, override_config + ) + else: + raise ValueError("The specified `app_dir` must be a directory.") + else: + if run: + if fab: + # Load pyproject.toml from FAB file and fuse + initial_run_config = get_fused_config_from_fab(fab.content, run) + else: + # Load pyproject.toml from installed FAB and fuse + initial_run_config = get_fused_config(run, flwr_path) + else: + initial_run_config = {} + self.run_infos[run_id] = RunInfo( + initial_run_config=initial_run_config, + context=Context( + node_id=self.node_id, + node_config=self.node_config, + state=RecordSet(), + run_config=initial_run_config.copy(), + ), + ) def retrieve_context(self, run_id: int) -> Context: """Get run context given a run_id.""" - if run_id in self.run_contexts: - return self.run_contexts[run_id] + if run_id in self.run_infos: + return self.run_infos[run_id].context raise RuntimeError( f"Context for run_id={run_id} doesn't exist." @@ -45,4 +103,9 @@ def retrieve_context(self, run_id: int) -> Context: def update_context(self, run_id: int, context: Context) -> None: """Update run context.""" - self.run_contexts[run_id] = context + if context.run_config != self.run_infos[run_id].initial_run_config: + raise ValueError( + "The `run_config` field of the `Context` object cannot be " + f"modified (run_id: {run_id})." + ) + self.run_infos[run_id].context = context diff --git a/src/py/flwr/client/node_state_tests.py b/src/py/flwr/client/node_state_tests.py index 193f52661579..26ac4fea6855 100644 --- a/src/py/flwr/client/node_state_tests.py +++ b/src/py/flwr/client/node_state_tests.py @@ -41,7 +41,7 @@ def test_multirun_in_node_state() -> None: expected_values = {0: "1", 1: "1" * 3, 2: "1" * 2, 3: "1", 5: "1"} # NodeState - node_state = NodeState() + node_state = NodeState(node_id=0, node_config={}) for task in tasks: run_id = task.run_id @@ -59,7 +59,8 @@ def test_multirun_in_node_state() -> None: node_state.update_context(run_id=run_id, context=updated_state) # Verify values - for run_id, context in node_state.run_contexts.items(): + for run_id, run_info in node_state.run_infos.items(): assert ( - context.state.configs_records["counter"]["count"] == expected_values[run_id] + run_info.context.state.configs_records["counter"]["count"] + == expected_values[run_id] ) diff --git a/src/py/flwr/client/numpy_client.py b/src/py/flwr/client/numpy_client.py index 0247958d88a9..6a656cb661d2 100644 --- a/src/py/flwr/client/numpy_client.py +++ b/src/py/flwr/client/numpy_client.py @@ -16,7 +16,7 @@ from abc import ABC -from typing import Callable, Dict, Tuple +from typing import Callable from flwr.client.client import Client from flwr.common import ( @@ -27,6 +27,7 @@ ndarrays_to_parameters, parameters_to_ndarrays, ) +from flwr.common.logger import warn_deprecated_feature_with_example from flwr.common.typing import ( Code, EvaluateIns, @@ -70,9 +71,9 @@ class NumPyClient(ABC): """Abstract base class for Flower clients using NumPy.""" - context: Context + _context: Context - def get_properties(self, config: Config) -> Dict[str, Scalar]: + def get_properties(self, config: Config) -> dict[str, Scalar]: """Return a client's set of properties. Parameters @@ -92,7 +93,7 @@ def get_properties(self, config: Config) -> Dict[str, Scalar]: _ = (self, config) return {} - def get_parameters(self, config: Dict[str, Scalar]) -> NDArrays: + def get_parameters(self, config: dict[str, Scalar]) -> NDArrays: """Return the current local model parameters. Parameters @@ -111,8 +112,8 @@ def get_parameters(self, config: Dict[str, Scalar]) -> NDArrays: return [] def fit( - self, parameters: NDArrays, config: Dict[str, Scalar] - ) -> Tuple[NDArrays, int, Dict[str, Scalar]]: + self, parameters: NDArrays, config: dict[str, Scalar] + ) -> tuple[NDArrays, int, dict[str, Scalar]]: """Train the provided parameters using the locally held dataset. Parameters @@ -140,8 +141,8 @@ def fit( return [], 0, {} def evaluate( - self, parameters: NDArrays, config: Dict[str, Scalar] - ) -> Tuple[float, int, Dict[str, Scalar]]: + self, parameters: NDArrays, config: dict[str, Scalar] + ) -> tuple[float, int, dict[str, Scalar]]: """Evaluate the provided parameters using the locally held dataset. Parameters @@ -174,6 +175,26 @@ def evaluate( _ = (self, parameters, config) return 0.0, 0, {} + @property + def context(self) -> Context: + """Getter for `Context` client attribute.""" + warn_deprecated_feature_with_example( + "Accessing the context via the client's attribute is deprecated.", + example_message="Instead, pass it to the client's " + "constructor in your `client_fn()` which already " + "receives a context object.", + code_example="def client_fn(context: Context) -> Client:\n\n" + "\t\t# Your existing client_fn\n\n" + "\t\t# Pass `context` to the constructor\n" + "\t\treturn FlowerClient(context).to_client()", + ) + return self._context + + @context.setter + def context(self, context: Context) -> None: + """Setter for `Context` client attribute.""" + self._context = context + def get_context(self) -> Context: """Get the run context from this client.""" return self.context @@ -289,7 +310,7 @@ def _set_context(self: Client, context: Context) -> None: def _wrap_numpy_client(client: NumPyClient) -> Client: - member_dict: Dict[str, Callable] = { # type: ignore + member_dict: dict[str, Callable] = { # type: ignore "__init__": _constructor, "get_context": _get_context, "set_context": _set_context, diff --git a/src/py/flwr/client/numpy_client_test.py b/src/py/flwr/client/numpy_client_test.py index 526098798e45..c5d520a73ce1 100644 --- a/src/py/flwr/client/numpy_client_test.py +++ b/src/py/flwr/client/numpy_client_test.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2022 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,8 +15,6 @@ """Flower NumPyClient tests.""" -from typing import Dict, Tuple - from flwr.common import Config, NDArrays, Properties, Scalar from .numpy_client import ( @@ -40,14 +38,14 @@ def get_parameters(self, config: Config) -> NDArrays: return [] def fit( - self, parameters: NDArrays, config: Dict[str, Scalar] - ) -> Tuple[NDArrays, int, Dict[str, Scalar]]: + self, parameters: NDArrays, config: dict[str, Scalar] + ) -> tuple[NDArrays, int, dict[str, Scalar]]: """Simulate training by returning empty weights, 0 samples, empty metrics.""" return [], 0, {} def evaluate( - self, parameters: NDArrays, config: Dict[str, Scalar] - ) -> Tuple[float, int, Dict[str, Scalar]]: + self, parameters: NDArrays, config: dict[str, Scalar] + ) -> tuple[float, int, dict[str, Scalar]]: """Simulate evaluate by returning 0.0 loss, 0 samples, empty metrics.""" return 0.0, 0, {} diff --git a/src/py/flwr/client/rest_client/__init__.py b/src/py/flwr/client/rest_client/__init__.py index c3485483ad35..a24d822a6d75 100644 --- a/src/py/flwr/client/rest_client/__init__.py +++ b/src/py/flwr/client/rest_client/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2023 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/src/py/flwr/client/rest_client/connection.py b/src/py/flwr/client/rest_client/connection.py index da8fbd351ab1..f933ae44ad06 100644 --- a/src/py/flwr/client/rest_client/connection.py +++ b/src/py/flwr/client/rest_client/connection.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2023 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,10 +18,11 @@ import random import sys import threading +from collections.abc import Iterator from contextlib import contextmanager from copy import copy from logging import ERROR, INFO, WARN -from typing import Callable, Iterator, Optional, Tuple, Type, TypeVar, Union +from typing import Callable, Optional, TypeVar, Union from cryptography.hazmat.primitives.asymmetric import ec from google.protobuf.message import Message as GrpcMessage @@ -40,14 +41,18 @@ from flwr.common.logger import log from flwr.common.message import Message, Metadata from flwr.common.retry_invoker import RetryInvoker -from flwr.common.serde import message_from_taskins, message_to_taskres +from flwr.common.serde import ( + message_from_taskins, + message_to_taskres, + user_config_from_proto, +) +from flwr.common.typing import Fab, Run +from flwr.proto.fab_pb2 import GetFabRequest, GetFabResponse # pylint: disable=E0611 from flwr.proto.fleet_pb2 import ( # pylint: disable=E0611 CreateNodeRequest, CreateNodeResponse, DeleteNodeRequest, DeleteNodeResponse, - GetRunRequest, - GetRunResponse, PingRequest, PingResponse, PullTaskInsRequest, @@ -56,6 +61,7 @@ PushTaskResResponse, ) from flwr.proto.node_pb2 import Node # pylint: disable=E0611 +from flwr.proto.run_pb2 import GetRunRequest, GetRunResponse # pylint: disable=E0611 from flwr.proto.task_pb2 import TaskIns # pylint: disable=E0611 try: @@ -70,12 +76,13 @@ PATH_PUSH_TASK_RES: str = "api/v0/fleet/push-task-res" PATH_PING: str = "api/v0/fleet/ping" PATH_GET_RUN: str = "/api/v0/fleet/get-run" +PATH_GET_FAB: str = "/api/v0/fleet/get-fab" T = TypeVar("T", bound=GrpcMessage) @contextmanager -def http_request_response( # pylint: disable=,R0913, R0914, R0915 +def http_request_response( # pylint: disable=R0913,R0914,R0915,R0917 server_address: str, insecure: bool, # pylint: disable=unused-argument retry_invoker: RetryInvoker, @@ -84,15 +91,16 @@ def http_request_response( # pylint: disable=,R0913, R0914, R0915 Union[bytes, str] ] = None, # pylint: disable=unused-argument authentication_keys: Optional[ # pylint: disable=unused-argument - Tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] + tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] ] = None, ) -> Iterator[ - Tuple[ + tuple[ Callable[[], Optional[Message]], Callable[[Message], None], + Optional[Callable[[], Optional[int]]], Optional[Callable[[], None]], - Optional[Callable[[], None]], - Optional[Callable[[int], Tuple[str, str]]], + Optional[Callable[[int], Run]], + Optional[Callable[[str], Fab]], ] ]: """Primitives for request/response-based interaction with a server. @@ -118,10 +126,16 @@ def http_request_response( # pylint: disable=,R0913, R0914, R0915 Path of the root certificate. If provided, a secure connection using the certificates will be established to an SSL-enabled Flower server. Bytes won't work for the REST API. + authentication_keys : Optional[Tuple[PrivateKey, PublicKey]] (default: None) + Client authentication is not supported for this transport type. Returns ------- - receive, send : Callable, Callable + receive : Callable + send : Callable + create_node : Optional[Callable] + delete_node : Optional[Callable] + get_run : Optional[Callable] """ log( WARN, @@ -146,6 +160,8 @@ def http_request_response( # pylint: disable=,R0913, R0914, R0915 "For the REST API, the root certificates " "must be provided as a string path to the client.", ) + if authentication_keys is not None: + log(ERROR, "Client authentication is not supported for this transport type.") # Shared variables for inner functions metadata: Optional[Metadata] = None @@ -158,7 +174,7 @@ def http_request_response( # pylint: disable=,R0913, R0914, R0915 ########################################################################### def _request( - req: GrpcMessage, res_type: Type[T], api_path: str, retry: bool = True + req: GrpcMessage, res_type: type[T], api_path: str, retry: bool = True ) -> Optional[T]: # Serialize the request req_bytes = req.SerializeToString() @@ -229,19 +245,20 @@ def ping() -> None: if not ping_stop_event.is_set(): ping_stop_event.wait(next_interval) - def create_node() -> None: + def create_node() -> Optional[int]: """Set create_node.""" req = CreateNodeRequest(ping_interval=PING_DEFAULT_INTERVAL) # Send the request res = _request(req, CreateNodeResponse, PATH_CREATE_NODE) if res is None: - return + return None # Remember the node and the ping-loop thread nonlocal node, ping_thread node = res.node ping_thread = start_ping_loop(ping, ping_stop_event) + return node.node_id def delete_node() -> None: """Set delete_node.""" @@ -259,7 +276,7 @@ def delete_node() -> None: req = DeleteNodeRequest(node=node) # Send the request - res = _request(req, DeleteNodeResponse, PATH_CREATE_NODE) + res = _request(req, DeleteNodeResponse, PATH_DELETE_NODE) if res is None: return @@ -323,7 +340,7 @@ def send(message: Message) -> None: task_res = message_to_taskres(message) # Serialize ProtoBuf to bytes - req = PushTaskResRequest(task_res_list=[task_res]) + req = PushTaskResRequest(node=node, task_res_list=[task_res]) # Send the request res = _request(req, PushTaskResResponse, PATH_PUSH_TASK_RES) @@ -337,19 +354,39 @@ def send(message: Message) -> None: res.results, # pylint: disable=no-member ) - def get_run(run_id: int) -> Tuple[str, str]: + def get_run(run_id: int) -> Run: # Construct the request - req = GetRunRequest(run_id=run_id) + req = GetRunRequest(node=node, run_id=run_id) # Send the request res = _request(req, GetRunResponse, PATH_GET_RUN) if res is None: - return "", "" + return Run(run_id, "", "", "", {}) + + return Run( + run_id, + res.run.fab_id, + res.run.fab_version, + res.run.fab_hash, + user_config_from_proto(res.run.override_config), + ) - return res.run.fab_id, res.run.fab_version + def get_fab(fab_hash: str) -> Fab: + # Construct the request + req = GetFabRequest(node=node, hash_str=fab_hash) + + # Send the request + res = _request(req, GetFabResponse, PATH_GET_FAB) + if res is None: + return Fab("", b"") + + return Fab( + res.fab.hash_str, + res.fab.content, + ) try: # Yield methods - yield (receive, send, create_node, delete_node, get_run) + yield (receive, send, create_node, delete_node, get_run, get_fab) except Exception as exc: # pylint: disable=broad-except log(ERROR, exc) diff --git a/src/py/flwr/client/supernode/app.py b/src/py/flwr/client/supernode/app.py index ac58e9aa4a81..4ddfe5d40aa3 100644 --- a/src/py/flwr/client/supernode/app.py +++ b/src/py/flwr/client/supernode/app.py @@ -16,9 +16,9 @@ import argparse import sys -from logging import DEBUG, INFO, WARN +from logging import DEBUG, ERROR, INFO, WARN from pathlib import Path -from typing import Callable, Optional, Tuple +from typing import Optional from cryptography.exceptions import UnsupportedAlgorithm from cryptography.hazmat.primitives.asymmetric import ec @@ -27,26 +27,68 @@ load_ssh_public_key, ) -from flwr.client.client_app import ClientApp, LoadClientAppError from flwr.common import EventType, event +from flwr.common.config import parse_config_args +from flwr.common.constant import ( + FLEET_API_GRPC_RERE_DEFAULT_ADDRESS, + TRANSPORT_TYPE_GRPC_ADAPTER, + TRANSPORT_TYPE_GRPC_RERE, + TRANSPORT_TYPE_REST, +) from flwr.common.exit_handlers import register_exit_handlers -from flwr.common.logger import log -from flwr.common.object_ref import load_app, validate +from flwr.common.logger import log, warn_deprecated_feature -from ..app import _start_client_internal +from ..app import ( + ISOLATION_MODE_PROCESS, + ISOLATION_MODE_SUBPROCESS, + start_client_internal, +) +from ..clientapp.utils import get_load_client_app_fn def run_supernode() -> None: """Run Flower SuperNode.""" + args = _parse_args_run_supernode().parse_args() + _warn_deprecated_server_arg(args) + log(INFO, "Starting Flower SuperNode") event(EventType.RUN_SUPERNODE_ENTER) - _ = _parse_args_run_supernode().parse_args() + # Check if both `--flwr-dir` and `--isolation` were set + if args.flwr_dir is not None and args.isolation is not None: + log( + WARN, + "Both `--flwr-dir` and `--isolation` were specified. " + "Ignoring `--flwr-dir`.", + ) - log( - DEBUG, - "Flower SuperNode starting...", + root_certificates = _get_certificates(args) + load_fn = get_load_client_app_fn( + default_app_ref="", + app_path=args.app, + flwr_dir=args.flwr_dir, + multi_app=True, + ) + authentication_keys = _try_setup_client_authentication(args) + + log(DEBUG, "Isolation mode: %s", args.isolation) + + start_client_internal( + server_address=args.superlink, + load_client_app_fn=load_fn, + transport=args.transport, + root_certificates=root_certificates, + insecure=args.insecure, + authentication_keys=authentication_keys, + max_retries=args.max_retries, + max_wait_time=args.max_wait_time, + node_config=parse_config_args( + [args.node_config] if args.node_config else args.node_config + ), + flwr_path=args.flwr_dir, + isolation=args.isolation, + supernode_address=args.supernode_address, ) # Graceful shutdown @@ -57,34 +99,35 @@ def run_supernode() -> None: def run_client_app() -> None: """Run Flower client app.""" - log(INFO, "Long-running Flower client starting") - event(EventType.RUN_CLIENT_APP_ENTER) - - args = _parse_args_run_client_app().parse_args() - - root_certificates = _get_certificates(args) log( - DEBUG, - "Flower will load ClientApp `%s`", - getattr(args, "client-app"), - ) - load_fn = _get_load_client_app_fn(args) - authentication_keys = _try_setup_client_authentication(args) - - _start_client_internal( - server_address=args.server, - load_client_app_fn=load_fn, - transport="rest" if args.rest else "grpc-rere", - root_certificates=root_certificates, - insecure=args.insecure, - authentication_keys=authentication_keys, - max_retries=args.max_retries, - max_wait_time=args.max_wait_time, + ERROR, + "The command `flower-client-app` has been replaced by `flower-supernode`.", ) + log(INFO, "Execute `flower-supernode --help` to learn how to use it.") register_exit_handlers(event_type=EventType.RUN_CLIENT_APP_LEAVE) +def _warn_deprecated_server_arg(args: argparse.Namespace) -> None: + """Warn about the deprecated argument `--server`.""" + if args.server != FLEET_API_GRPC_RERE_DEFAULT_ADDRESS: + warn = "Passing flag --server is deprecated. Use --superlink instead." + warn_deprecated_feature(warn) + + if args.superlink != FLEET_API_GRPC_RERE_DEFAULT_ADDRESS: + # if `--superlink` also passed, then + # warn user that this argument overrides what was passed with `--server` + log( + WARN, + "Both `--server` and `--superlink` were passed. " + "`--server` will be ignored. Connecting to the Superlink Fleet API " + "at %s.", + args.superlink, + ) + else: + args.superlink = args.server + + def _get_certificates(args: argparse.Namespace) -> Optional[bytes]: """Load certificates if specified in args.""" # Obtain certificates @@ -100,7 +143,7 @@ def _get_certificates(args: argparse.Namespace) -> Optional[bytes]: WARN, "Option `--insecure` was set. " "Starting insecure HTTP client connected to %s.", - args.server, + args.superlink, ) root_certificates = None else: @@ -114,38 +157,12 @@ def _get_certificates(args: argparse.Namespace) -> Optional[bytes]: DEBUG, "Starting secure HTTPS client connected to %s " "with the following certificates: %s.", - args.server, + args.superlink, cert_path, ) return root_certificates -def _get_load_client_app_fn( - args: argparse.Namespace, -) -> Callable[[], ClientApp]: - """Get the load_client_app_fn function.""" - client_app_dir = args.dir - if client_app_dir is not None: - sys.path.insert(0, client_app_dir) - - app_ref: str = getattr(args, "client-app") - valid, error_msg = validate(app_ref) - if not valid and error_msg: - raise LoadClientAppError(error_msg) from None - - def _load() -> ClientApp: - client_app = load_app(app_ref, LoadClientAppError) - - if not isinstance(client_app, ClientApp): - raise LoadClientAppError( - f"Attribute {app_ref} is not of type {ClientApp}", - ) from None - - return client_app - - return _load - - def _parse_args_run_supernode() -> argparse.ArgumentParser: """Parse flower-supernode command line arguments.""" parser = argparse.ArgumentParser( @@ -153,41 +170,47 @@ def _parse_args_run_supernode() -> argparse.ArgumentParser: ) parser.add_argument( - "client-app", + "app", nargs="?", - default="", - help="For example: `client:app` or `project.package.module:wrapper.app`. " - "This is optional and serves as the default ClientApp to be loaded when " - "the ServerApp does not specify `fab_id` and `fab_version`. " - "If not provided, defaults to an empty string.", + default=None, + help="Specify the path of the Flower App to load and run the `ClientApp`. " + "The `pyproject.toml` file must be located in the root of this path. " + "When this argument is provided, the SuperNode will exclusively respond to " + "messages from the corresponding `ServerApp` by matching the FAB ID and FAB " + "version. An error will be raised if a message is received from any other " + "`ServerApp`.", ) _parse_args_common(parser) parser.add_argument( "--flwr-dir", default=None, help="""The path containing installed Flower Apps. - By default, this value isequal to: + The default directory is: - `$FLWR_HOME/` if `$FLWR_HOME` is defined - `$XDG_DATA_HOME/.flwr/` if `$XDG_DATA_HOME` is defined - `$HOME/.flwr/` in all other cases - """, + """, ) - - return parser - - -def _parse_args_run_client_app() -> argparse.ArgumentParser: - """Parse flower-client-app command line arguments.""" - parser = argparse.ArgumentParser( - description="Start a Flower client app", + parser.add_argument( + "--isolation", + default=None, + required=False, + choices=[ + ISOLATION_MODE_SUBPROCESS, + ISOLATION_MODE_PROCESS, + ], + help="Isolation mode when running `ClientApp` (optional, possible values: " + "`subprocess`, `process`). By default, `ClientApp` runs in the same process " + "that executes the SuperNode. Use `subprocess` to configure SuperNode to run " + "`ClientApp` in a subprocess. Use `process` to indicate that a separate " + "independent process gets created outside of SuperNode.", ) - parser.add_argument( - "client-app", - help="For example: `client:app` or `project.package.module:wrapper.app`", + "--supernode-address", + default="0.0.0.0:9094", + help="Set the SuperNode gRPC server address. Defaults to `0.0.0.0:9094`.", ) - _parse_args_common(parser=parser) return parser @@ -199,9 +222,27 @@ def _parse_args_common(parser: argparse.ArgumentParser) -> None: help="Run the client without HTTPS. By default, the client runs with " "HTTPS enabled. Use this flag only if you understand the risks.", ) - parser.add_argument( + ex_group = parser.add_mutually_exclusive_group() + ex_group.add_argument( + "--grpc-rere", + action="store_const", + dest="transport", + const=TRANSPORT_TYPE_GRPC_RERE, + default=TRANSPORT_TYPE_GRPC_RERE, + help="Use grpc-rere as a transport layer for the client.", + ) + ex_group.add_argument( + "--grpc-adapter", + action="store_const", + dest="transport", + const=TRANSPORT_TYPE_GRPC_ADAPTER, + help="Use grpc-adapter as a transport layer for the client.", + ) + ex_group.add_argument( "--rest", - action="store_true", + action="store_const", + dest="transport", + const=TRANSPORT_TYPE_REST, help="Use REST as a transport layer for the client.", ) parser.add_argument( @@ -213,15 +254,20 @@ def _parse_args_common(parser: argparse.ArgumentParser) -> None: ) parser.add_argument( "--server", - default="0.0.0.0:9092", + default=FLEET_API_GRPC_RERE_DEFAULT_ADDRESS, help="Server address", ) + parser.add_argument( + "--superlink", + default=FLEET_API_GRPC_RERE_DEFAULT_ADDRESS, + help="SuperLink Fleet API (gRPC-rere) address (IPv4, IPv6, or a domain name)", + ) parser.add_argument( "--max-retries", type=int, default=None, - help="The maximum number of times the client will try to connect to the" - "server before giving up in case of a connection error. By default," + help="The maximum number of times the client will try to reconnect to the" + "SuperLink before giving up in case of a connection error. By default," "it is set to None, meaning there is no limit to the number of tries.", ) parser.add_argument( @@ -229,16 +275,9 @@ def _parse_args_common(parser: argparse.ArgumentParser) -> None: type=float, default=None, help="The maximum duration before the client stops trying to" - "connect to the server in case of connection error. By default, it" + "connect to the SuperLink in case of connection error. By default, it" "is set to None, meaning there is no limit to the total time.", ) - parser.add_argument( - "--dir", - default="", - help="Add specified directory to the PYTHONPATH and load Flower " - "app from there." - " Default: current working directory.", - ) parser.add_argument( "--auth-supernode-private-key", type=str, @@ -249,11 +288,18 @@ def _parse_args_common(parser: argparse.ArgumentParser) -> None: type=str, help="The SuperNode's public key (as a path str) to enable authentication.", ) + parser.add_argument( + "--node-config", + type=str, + help="A space separated list of key/value pairs (separated by `=`) to " + "configure the SuperNode. " + "E.g. --node-config 'key1=\"value1\" partition-id=0 num-partitions=100'", + ) def _try_setup_client_authentication( args: argparse.Namespace, -) -> Optional[Tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey]]: +) -> Optional[tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey]]: if not args.auth_supernode_private_key and not args.auth_supernode_public_key: return None diff --git a/src/py/flwr/client/typing.py b/src/py/flwr/client/typing.py index 956ac7a15c05..9faed4bc7283 100644 --- a/src/py/flwr/client/typing.py +++ b/src/py/flwr/client/typing.py @@ -23,6 +23,7 @@ # Compatibility ClientFn = Callable[[str], Client] +ClientFnExt = Callable[[Context], Client] ClientAppCallable = Callable[[Message, Context], Message] Mod = Callable[[Message, Context, ClientAppCallable], Message] diff --git a/src/py/flwr/common/__init__.py b/src/py/flwr/common/__init__.py index 2fb98c82dd6f..925f21ddb491 100644 --- a/src/py/flwr/common/__init__.py +++ b/src/py/flwr/common/__init__.py @@ -41,6 +41,7 @@ from .typing import ClientMessage as ClientMessage from .typing import Code as Code from .typing import Config as Config +from .typing import ConfigsRecordValues as ConfigsRecordValues from .typing import DisconnectRes as DisconnectRes from .typing import EvaluateIns as EvaluateIns from .typing import EvaluateRes as EvaluateRes @@ -52,6 +53,7 @@ from .typing import GetPropertiesRes as GetPropertiesRes from .typing import Metrics as Metrics from .typing import MetricsAggregationFn as MetricsAggregationFn +from .typing import MetricsRecordValues as MetricsRecordValues from .typing import NDArray as NDArray from .typing import NDArrays as NDArrays from .typing import Parameters as Parameters @@ -63,43 +65,36 @@ __all__ = [ "Array", - "array_from_numpy", - "bytes_to_ndarray", "ClientMessage", "Code", "Config", "ConfigsRecord", - "configure", + "ConfigsRecordValues", "Context", + "DEFAULT_TTL", "DisconnectRes", + "Error", "EvaluateIns", "EvaluateRes", - "event", "EventType", "FitIns", "FitRes", - "Error", + "GRPC_MAX_MESSAGE_LENGTH", "GetParametersIns", "GetParametersRes", "GetPropertiesIns", "GetPropertiesRes", - "GRPC_MAX_MESSAGE_LENGTH", - "log", "Message", "MessageType", "MessageTypeLegacy", - "DEFAULT_TTL", "Metadata", "Metrics", "MetricsAggregationFn", "MetricsRecord", - "ndarray_to_bytes", - "now", + "MetricsRecordValues", "NDArray", "NDArrays", - "ndarrays_to_parameters", "Parameters", - "parameters_to_ndarrays", "ParametersRecord", "Properties", "ReconnectIns", @@ -107,4 +102,13 @@ "Scalar", "ServerMessage", "Status", + "array_from_numpy", + "bytes_to_ndarray", + "configure", + "event", + "log", + "ndarray_to_bytes", + "ndarrays_to_parameters", + "now", + "parameters_to_ndarrays", ] diff --git a/src/py/flwr/common/address.py b/src/py/flwr/common/address.py index 71b6d684597f..2b10097ccb71 100644 --- a/src/py/flwr/common/address.py +++ b/src/py/flwr/common/address.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2023 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,13 +14,14 @@ # ============================================================================== """Flower IP address utils.""" +import socket from ipaddress import ip_address -from typing import Optional, Tuple +from typing import Optional IPV6: int = 6 -def parse_address(address: str) -> Optional[Tuple[str, int, Optional[bool]]]: +def parse_address(address: str) -> Optional[tuple[str, int, Optional[bool]]]: """Parse an IP address into host, port, and version. Parameters @@ -57,3 +58,45 @@ def parse_address(address: str) -> Optional[Tuple[str, int, Optional[bool]]]: except ValueError: return None + + +def is_port_in_use(address: str) -> bool: + """Check if the port specified in address is in use. + + Parameters + ---------- + address : str + The string representation of a domain, an IPv4, or an IPV6 address + with the port number. + + For example, '127.0.0.1:8080', or `[::1]:8080`. + + Returns + ------- + bool + If the port provided is in use or can't be parsed, + the function will return True, otherwise it will return False. + """ + parsed_address = parse_address(address) + if not parsed_address: + return True + host, port, is_v6 = parsed_address + + if is_v6: + protocol = socket.AF_INET6 + else: + protocol = socket.AF_INET + + with socket.socket(protocol, socket.SOCK_STREAM) as s: + s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + try: + if is_v6: + # For IPv6, provide `flowinfo` and `scopeid` as 0 + s.bind((host, port, 0, 0)) + else: + # For IPv4 + s.bind((host, port)) + except OSError: + return True + + return False diff --git a/src/py/flwr/common/address_test.py b/src/py/flwr/common/address_test.py index 420b89871d69..d5901ed640b1 100644 --- a/src/py/flwr/common/address_test.py +++ b/src/py/flwr/common/address_test.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2023 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/src/py/flwr/common/config.py b/src/py/flwr/common/config.py new file mode 100644 index 000000000000..24ccada7509a --- /dev/null +++ b/src/py/flwr/common/config.py @@ -0,0 +1,231 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Provide functions for managing global Flower config.""" + +import os +import re +from pathlib import Path +from typing import Any, Optional, Union, cast, get_args + +import tomli + +from flwr.cli.config_utils import get_fab_config, validate_fields +from flwr.common.constant import ( + APP_DIR, + FAB_CONFIG_FILE, + FAB_HASH_TRUNCATION, + FLWR_HOME, +) +from flwr.common.typing import Run, UserConfig, UserConfigValue + + +def get_flwr_dir(provided_path: Optional[str] = None) -> Path: + """Return the Flower home directory based on env variables.""" + if provided_path is None or not Path(provided_path).is_dir(): + return Path( + os.getenv( + FLWR_HOME, + Path(f"{os.getenv('XDG_DATA_HOME', os.getenv('HOME'))}") / ".flwr", + ) + ) + return Path(provided_path).absolute() + + +def get_project_dir( + fab_id: str, + fab_version: str, + fab_hash: str, + flwr_dir: Optional[Union[str, Path]] = None, +) -> Path: + """Return the project directory based on the given fab_id and fab_version.""" + # Check the fab_id + if fab_id.count("/") != 1: + raise ValueError( + f"Invalid FAB ID: {fab_id}", + ) + publisher, project_name = fab_id.split("/") + if flwr_dir is None: + flwr_dir = get_flwr_dir() + return ( + Path(flwr_dir) + / APP_DIR + / f"{publisher}.{project_name}.{fab_version}.{fab_hash[:FAB_HASH_TRUNCATION]}" + ) + + +def get_project_config(project_dir: Union[str, Path]) -> dict[str, Any]: + """Return pyproject.toml in the given project directory.""" + # Load pyproject.toml file + toml_path = Path(project_dir) / FAB_CONFIG_FILE + if not toml_path.is_file(): + raise FileNotFoundError( + f"Cannot find {FAB_CONFIG_FILE} in {project_dir}", + ) + with toml_path.open(encoding="utf-8") as toml_file: + config = tomli.loads(toml_file.read()) + + # Validate pyproject.toml fields + is_valid, errors, _ = validate_fields(config) + if not is_valid: + error_msg = "\n".join([f" - {error}" for error in errors]) + raise ValueError( + f"Invalid {FAB_CONFIG_FILE}:\n{error_msg}", + ) + + return config + + +def fuse_dicts( + main_dict: UserConfig, + override_dict: UserConfig, +) -> UserConfig: + """Merge a config with the overrides. + + Remove the nesting by adding the nested keys as prefixes separated by dots, and fuse + it with the override dict. + """ + fused_dict = main_dict.copy() + + for key, value in override_dict.items(): + if key in main_dict: + fused_dict[key] = value + + return fused_dict + + +def get_fused_config_from_dir( + project_dir: Path, override_config: UserConfig +) -> UserConfig: + """Merge the overrides from a given dict with the config from a Flower App.""" + default_config = get_project_config(project_dir)["tool"]["flwr"]["app"].get( + "config", {} + ) + flat_default_config = flatten_dict(default_config) + + return fuse_dicts(flat_default_config, override_config) + + +def get_fused_config_from_fab(fab_file: Union[Path, bytes], run: Run) -> UserConfig: + """Fuse default config in a `FAB` with overrides in a `Run`. + + This enables obtaining a run-config without having to install the FAB. This + function mirrors `get_fused_config_from_dir`. This is useful when the execution + of the FAB is delegated to a different process. + """ + default_config = get_fab_config(fab_file)["tool"]["flwr"]["app"].get("config", {}) + flat_config_flat = flatten_dict(default_config) + return fuse_dicts(flat_config_flat, run.override_config) + + +def get_fused_config(run: Run, flwr_dir: Optional[Path]) -> UserConfig: + """Merge the overrides from a `Run` with the config from a FAB. + + Get the config using the fab_id and the fab_version, remove the nesting by adding + the nested keys as prefixes separated by dots, and fuse it with the override dict. + """ + # Return empty dict if fab_id or fab_version is empty + if not run.fab_id or not run.fab_version: + return {} + + project_dir = get_project_dir(run.fab_id, run.fab_version, run.fab_hash, flwr_dir) + + # Return empty dict if project directory does not exist + if not project_dir.is_dir(): + return {} + + return get_fused_config_from_dir(project_dir, run.override_config) + + +def flatten_dict( + raw_dict: Optional[dict[str, Any]], parent_key: str = "" +) -> UserConfig: + """Flatten dict by joining nested keys with a given separator.""" + if raw_dict is None: + return {} + + items: list[tuple[str, UserConfigValue]] = [] + separator: str = "." + for k, v in raw_dict.items(): + new_key = f"{parent_key}{separator}{k}" if parent_key else k + if isinstance(v, dict): + items.extend(flatten_dict(v, parent_key=new_key).items()) + elif isinstance(v, get_args(UserConfigValue)): + items.append((new_key, cast(UserConfigValue, v))) + else: + raise ValueError( + f"The value for key {k} needs to be of type `int`, `float`, " + "`bool, `str`, or a `dict` of those.", + ) + return dict(items) + + +def unflatten_dict(flat_dict: dict[str, Any]) -> dict[str, Any]: + """Unflatten a dict with keys containing separators into a nested dict.""" + unflattened_dict: dict[str, Any] = {} + separator: str = "." + + for key, value in flat_dict.items(): + parts = key.split(separator) + d = unflattened_dict + for part in parts[:-1]: + if part not in d: + d[part] = {} + d = d[part] + d[parts[-1]] = value + + return unflattened_dict + + +def parse_config_args( + config: Optional[list[str]], +) -> UserConfig: + """Parse separator separated list of key-value pairs separated by '='.""" + overrides: UserConfig = {} + + if config is None: + return overrides + + # Handle if .toml file is passed + if len(config) == 1 and config[0].endswith(".toml"): + with Path(config[0]).open("rb") as config_file: + overrides = flatten_dict(tomli.load(config_file)) + return overrides + + # Regular expression to capture key-value pairs with possible quoted values + pattern = re.compile(r"(\S+?)=(\'[^\']*\'|\"[^\"]*\"|\S+)") + + flat_overrides = {} + for config_line in config: + if config_line: + # .toml files aren't allowed alongside other configs + if config_line.endswith(".toml"): + raise ValueError( + "TOML files cannot be passed alongside key-value pairs." + ) + + matches = pattern.findall(config_line) + toml_str = "\n".join(f"{k} = {v}" for k, v in matches) + overrides.update(tomli.loads(toml_str)) + flat_overrides = flatten_dict(overrides) + + return flat_overrides + + +def get_metadata_from_config(config: dict[str, Any]) -> tuple[str, str]: + """Extract `fab_version` and `fab_id` from a project config.""" + return ( + config["project"]["version"], + f"{config['tool']['flwr']['app']['publisher']}/{config['project']['name']}", + ) diff --git a/src/py/flwr/common/config_test.py b/src/py/flwr/common/config_test.py new file mode 100644 index 000000000000..b2edd319e382 --- /dev/null +++ b/src/py/flwr/common/config_test.py @@ -0,0 +1,313 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Test util functions handling Flower config.""" + +import os +import tempfile +import textwrap +from pathlib import Path +from unittest.mock import patch + +import pytest + +from flwr.common.typing import UserConfig + +from .config import ( + flatten_dict, + fuse_dicts, + get_flwr_dir, + get_project_config, + get_project_dir, + parse_config_args, + unflatten_dict, +) + +# Mock constants +FAB_CONFIG_FILE = "pyproject.toml" + + +def test_get_flwr_dir_with_provided_path() -> None: + """Test get_flwr_dir with a provided valid path.""" + provided_path = "." + assert get_flwr_dir(provided_path) == Path(provided_path).absolute() + + +def test_get_flwr_dir_without_provided_path() -> None: + """Test get_flwr_dir without a provided path, using default home directory.""" + with patch.dict(os.environ, {"HOME": "/home/user"}): + assert get_flwr_dir() == Path("/home/user/.flwr") + + +def test_get_flwr_dir_with_flwr_home() -> None: + """Test get_flwr_dir with FLWR_HOME environment variable set.""" + with patch.dict(os.environ, {"FLWR_HOME": "/custom/flwr/home"}): + assert get_flwr_dir() == Path("/custom/flwr/home") + + +def test_get_flwr_dir_with_xdg_data_home() -> None: + """Test get_flwr_dir with FLWR_HOME environment variable set.""" + with patch.dict(os.environ, {"XDG_DATA_HOME": "/custom/data/home"}): + assert get_flwr_dir() == Path("/custom/data/home/.flwr") + + +def test_get_project_dir_invalid_fab_id() -> None: + """Test get_project_dir with an invalid fab_id.""" + with pytest.raises(ValueError): + get_project_dir( + "invalid_fab_id", + "1.0.0", + "03840e932bf61247c1231f0aec9e8ec5f041ed5516fb23638f24d25f3a007acd", + ) + + +def test_get_project_dir_valid() -> None: + """Test get_project_dir with an valid fab_id and version.""" + app_path = get_project_dir( + "app_name/user", + "1.0.0", + "03840e932bf61247c1231f0aec9e8ec5f041ed5516fb23638f24d25f3a007acd", + flwr_dir=".", + ) + assert app_path == Path("apps") / "app_name.user.1.0.0.03840e93" + + +def test_get_project_config_file_not_found() -> None: + """Test get_project_config when the configuration file is not found.""" + with pytest.raises(FileNotFoundError): + get_project_config("/invalid/dir") + + +def test_get_fused_config_valid(tmp_path: Path) -> None: + """Test get_project_config when the configuration file is not found.""" + pyproject_toml_content = """ + [build-system] + requires = ["hatchling"] + build-backend = "hatchling.build" + + [project] + name = "fedgpt" + version = "1.0.0" + description = "" + license = {text = "Apache License (2.0)"} + dependencies = [ + "flwr[simulation]>=1.9.0,<2.0", + "numpy>=1.21.0", + ] + + [tool.flwr.app] + publisher = "flwrlabs" + + [tool.flwr.app.components] + serverapp = "fedgpt.server:app" + clientapp = "fedgpt.client:app" + + [tool.flwr.app.config] + num_server_rounds = 10 + momentum = 0.1 + lr = 0.01 + progress_bar = true + serverapp.test = "key" + + [tool.flwr.app.config.clientapp] + test = "key" + """ + overrides: UserConfig = { + "num_server_rounds": 5, + "lr": 0.2, + "serverapp.test": "overriden", + } + expected_config = { + "num_server_rounds": 5, + "momentum": 0.1, + "lr": 0.2, + "progress_bar": True, + "serverapp.test": "overriden", + "clientapp.test": "key", + } + # Current directory + origin = Path.cwd() + + try: + # Change into the temporary directory + os.chdir(tmp_path) + with open(FAB_CONFIG_FILE, "w", encoding="utf-8") as f: + f.write(textwrap.dedent(pyproject_toml_content)) + + # Execute + default_config = get_project_config(tmp_path)["tool"]["flwr"]["app"].get( + "config", {} + ) + + config = fuse_dicts(flatten_dict(default_config), overrides) + + # Assert + assert config == expected_config + finally: + os.chdir(origin) + + +def test_get_project_config_file_valid(tmp_path: Path) -> None: + """Test get_project_config when the configuration file is not found.""" + pyproject_toml_content = """ + [build-system] + requires = ["hatchling"] + build-backend = "hatchling.build" + + [project] + name = "fedgpt" + version = "1.0.0" + description = "" + license = {text = "Apache License (2.0)"} + dependencies = [ + "flwr[simulation]>=1.9.0,<2.0", + "numpy>=1.21.0", + ] + + [tool.flwr.app] + publisher = "flwrlabs" + + [tool.flwr.app.components] + serverapp = "fedgpt.server:app" + clientapp = "fedgpt.client:app" + + [tool.flwr.app.config] + num_server_rounds = 10 + momentum = 0.1 + progress_bar = true + lr = "0.01" + """ + expected_config = { + "build-system": {"build-backend": "hatchling.build", "requires": ["hatchling"]}, + "project": { + "name": "fedgpt", + "version": "1.0.0", + "description": "", + "license": {"text": "Apache License (2.0)"}, + "dependencies": ["flwr[simulation]>=1.9.0,<2.0", "numpy>=1.21.0"], + }, + "tool": { + "flwr": { + "app": { + "publisher": "flwrlabs", + "components": { + "serverapp": "fedgpt.server:app", + "clientapp": "fedgpt.client:app", + }, + "config": { + "num_server_rounds": 10, + "momentum": 0.1, + "progress_bar": True, + "lr": "0.01", + }, + }, + }, + }, + } + # Current directory + origin = Path.cwd() + + try: + # Change into the temporary directory + os.chdir(tmp_path) + with open(FAB_CONFIG_FILE, "w", encoding="utf-8") as f: + f.write(textwrap.dedent(pyproject_toml_content)) + + # Execute + config = get_project_config(tmp_path) + + # Assert + assert config == expected_config + finally: + os.chdir(origin) + + +def test_flatten_dict() -> None: + """Test flatten_dict with a nested dictionary.""" + raw_dict = {"a": {"b": {"c": "d"}}, "e": "f"} + expected = {"a.b.c": "d", "e": "f"} + assert flatten_dict(raw_dict) == expected + + +def test_unflatten_dict() -> None: + """Test unflatten_dict with a flat dictionary.""" + raw_dict = {"a.b.c": "d", "e": "f"} + expected = {"a": {"b": {"c": "d"}}, "e": "f"} + assert unflatten_dict(raw_dict) == expected + + +def test_parse_config_args_none() -> None: + """Test parse_config_args with None as input.""" + assert not parse_config_args(None) + + +def test_parse_config_args_overrides() -> None: + """Test parse_config_args with key-value pairs.""" + assert parse_config_args( + ["key1='value1' key2='value2'", "key3=1", "key4=2.0 key5=true key6='value6'"] + ) == { + "key1": "value1", + "key2": "value2", + "key3": 1, + "key4": 2.0, + "key5": True, + "key6": "value6", + } + + +def test_parse_config_args_from_toml_file() -> None: + """Test if a toml passed to --run-config it is loaded and fused correctly.""" + # Will be saved as a temp .toml file + toml_config = """ + num-server-rounds = 10 + momentum = 0.1 + verbose = true + """ + # This is the UserConfig that would be extracted from pyproject.toml + initial_run_config: UserConfig = { + "num-server-rounds": 5, + "momentum": 0.2, + "dataset": "my-fancy-dataset", + "verbose": False, + } + expected_config = { + "num-server-rounds": 10, + "momentum": 0.1, + "dataset": "my-fancy-dataset", + "verbose": True, + } + + # Create a temporary directory using a context manager + with tempfile.TemporaryDirectory() as temp_dir: + # Create a temporary TOML file within that directory + toml_config_file = os.path.join(temp_dir, "extra_config.toml") + + # Write the data to the TOML file + with open(toml_config_file, "w", encoding="utf-8") as toml_file: + toml_file.write(textwrap.dedent(toml_config)) + + # Parse config (this mimics what `--run-config path/to/config.toml` does) + config_from_toml = parse_config_args([toml_config_file]) + # Fuse + config = fuse_dicts(initial_run_config, config_from_toml) + + # Assert + assert config == expected_config + + +def test_parse_config_args_passing_toml_and_key_value() -> None: + """Test that passing a toml and key-value configs aren't allowed.""" + config = ["my-other-config.toml", "lr=0.1", "epochs=99"] + with pytest.raises(ValueError): + parse_config_args(config) diff --git a/src/py/flwr/common/constant.py b/src/py/flwr/common/constant.py index b6d39b6e8932..e99e0edaacd4 100644 --- a/src/py/flwr/common/constant.py +++ b/src/py/flwr/common/constant.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2023 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -27,6 +27,7 @@ TRANSPORT_TYPE_GRPC_BIDI = "grpc-bidi" TRANSPORT_TYPE_GRPC_RERE = "grpc-rere" +TRANSPORT_TYPE_GRPC_ADAPTER = "grpc-adapter" TRANSPORT_TYPE_REST = "rest" TRANSPORT_TYPE_VCE = "vce" TRANSPORT_TYPES = [ @@ -36,6 +37,19 @@ TRANSPORT_TYPE_VCE, ] +# Addresses +# SuperNode +CLIENTAPPIO_API_DEFAULT_ADDRESS = "0.0.0.0:9094" +# SuperExec +EXEC_API_DEFAULT_ADDRESS = "0.0.0.0:9093" +# SuperLink +DRIVER_API_DEFAULT_ADDRESS = "0.0.0.0:9091" +FLEET_API_GRPC_RERE_DEFAULT_ADDRESS = "0.0.0.0:9092" +FLEET_API_GRPC_BIDI_DEFAULT_ADDRESS = ( + "[::]:8080" # IPv6 to keep start_server compatible +) +FLEET_API_REST_DEFAULT_ADDRESS = "0.0.0.0:9093" + # Constants for ping PING_DEFAULT_INTERVAL = 30 PING_CALL_TIMEOUT = 5 @@ -43,6 +57,33 @@ PING_RANDOM_RANGE = (-0.1, 0.1) PING_MAX_INTERVAL = 1e300 +# IDs +RUN_ID_NUM_BYTES = 8 +NODE_ID_NUM_BYTES = 8 + +# Constants for FAB +APP_DIR = "apps" +FAB_ALLOWED_EXTENSIONS = {".py", ".toml", ".md"} +FAB_CONFIG_FILE = "pyproject.toml" +FAB_DATE = (2024, 10, 1, 0, 0, 0) +FAB_HASH_TRUNCATION = 8 +FLWR_HOME = "FLWR_HOME" + +# Constants entries in Node config for Simulation +PARTITION_ID_KEY = "partition-id" +NUM_PARTITIONS_KEY = "num-partitions" + +# Constants for keys in `metadata` of `MessageContainer` in `grpc-adapter` +GRPC_ADAPTER_METADATA_FLOWER_PACKAGE_NAME_KEY = "flower-package-name" +GRPC_ADAPTER_METADATA_FLOWER_PACKAGE_VERSION_KEY = "flower-package-version" +GRPC_ADAPTER_METADATA_FLOWER_VERSION_KEY = "flower-version" # Deprecated +GRPC_ADAPTER_METADATA_SHOULD_EXIT_KEY = "should-exit" +GRPC_ADAPTER_METADATA_MESSAGE_MODULE_KEY = "grpc-message-module" +GRPC_ADAPTER_METADATA_MESSAGE_QUALNAME_KEY = "grpc-message-qualname" + +# Message TTL +MESSAGE_TTL_TOLERANCE = 1e-1 + class MessageType: """Message type.""" diff --git a/src/py/flwr/common/context.py b/src/py/flwr/common/context.py index b6349307d150..1544b96d3fa3 100644 --- a/src/py/flwr/common/context.py +++ b/src/py/flwr/common/context.py @@ -18,14 +18,20 @@ from dataclasses import dataclass from .record import RecordSet +from .typing import UserConfig @dataclass class Context: - """State of your run. + """Context of your run. Parameters ---------- + node_id : int + The ID that identifies the node. + node_config : UserConfig + A config (key/value mapping) unique to the node and independent of the + `run_config`. This config persists across all runs this node participates in. state : RecordSet Holds records added by the entity in a given run and that will stay local. This means that the data it holds will never leave the system it's running from. @@ -33,6 +39,25 @@ class Context: executing mods. It can also be used as a memory to access at different points during the lifecycle of this entity (e.g. across multiple rounds) + run_config : UserConfig + A config (key/value mapping) held by the entity in a given run and that will + stay local. It can be used at any point during the lifecycle of this entity + (e.g. across multiple rounds) """ + node_id: int + node_config: UserConfig state: RecordSet + run_config: UserConfig + + def __init__( # pylint: disable=too-many-arguments + self, + node_id: int, + node_config: UserConfig, + state: RecordSet, + run_config: UserConfig, + ) -> None: + self.node_id = node_id + self.node_config = node_config + self.state = state + self.run_config = run_config diff --git a/src/py/flwr/common/date.py b/src/py/flwr/common/date.py index f47ad5470106..7f30f5e0591a 100644 --- a/src/py/flwr/common/date.py +++ b/src/py/flwr/common/date.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2023 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/src/py/flwr/common/differential_privacy.py b/src/py/flwr/common/differential_privacy.py index 85dc198ef8a0..56da98a3c805 100644 --- a/src/py/flwr/common/differential_privacy.py +++ b/src/py/flwr/common/differential_privacy.py @@ -16,7 +16,7 @@ from logging import WARNING -from typing import Optional, Tuple +from typing import Optional import numpy as np @@ -125,7 +125,7 @@ def compute_adaptive_noise_params( noise_multiplier: float, num_sampled_clients: float, clipped_count_stddev: Optional[float], -) -> Tuple[float, float]: +) -> tuple[float, float]: """Compute noising parameters for the adaptive clipping. Paper: https://arxiv.org/abs/1905.03871 diff --git a/src/py/flwr/common/dp.py b/src/py/flwr/common/dp.py index 83a72b8ce749..13ae94461ef9 100644 --- a/src/py/flwr/common/dp.py +++ b/src/py/flwr/common/dp.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2022 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,8 +15,6 @@ """Building block functions for DP algorithms.""" -from typing import Tuple - import numpy as np from flwr.common.logger import warn_deprecated_feature @@ -41,7 +39,7 @@ def add_gaussian_noise(update: NDArrays, std_dev: float) -> NDArrays: return update_noised -def clip_by_l2(update: NDArrays, threshold: float) -> Tuple[NDArrays, bool]: +def clip_by_l2(update: NDArrays, threshold: float) -> tuple[NDArrays, bool]: """Scales the update so thats its L2 norm is upper-bound to threshold.""" warn_deprecated_feature("`clip_by_l2` method") update_norm = _get_update_norm(update) diff --git a/src/py/flwr/common/exit_handlers.py b/src/py/flwr/common/exit_handlers.py index 30750c28a450..e5898b46a537 100644 --- a/src/py/flwr/common/exit_handlers.py +++ b/src/py/flwr/common/exit_handlers.py @@ -19,7 +19,7 @@ from signal import SIGINT, SIGTERM, signal from threading import Thread from types import FrameType -from typing import List, Optional +from typing import Optional from grpc import Server @@ -28,8 +28,8 @@ def register_exit_handlers( event_type: EventType, - grpc_servers: Optional[List[Server]] = None, - bckg_threads: Optional[List[Thread]] = None, + grpc_servers: Optional[list[Server]] = None, + bckg_threads: Optional[list[Thread]] = None, ) -> None: """Register exit handlers for `SIGINT` and `SIGTERM` signals. diff --git a/src/py/flwr/common/grpc.py b/src/py/flwr/common/grpc.py index ead0329ca79c..5a29c595119c 100644 --- a/src/py/flwr/common/grpc.py +++ b/src/py/flwr/common/grpc.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2022 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,8 +15,9 @@ """Utility functions for gRPC.""" +from collections.abc import Sequence from logging import DEBUG -from typing import Optional, Sequence +from typing import Optional import grpc diff --git a/src/py/flwr/common/logger.py b/src/py/flwr/common/logger.py index 7225b0663ae7..3a058abac9c6 100644 --- a/src/py/flwr/common/logger.py +++ b/src/py/flwr/common/logger.py @@ -18,7 +18,7 @@ import logging from logging import WARN, LogRecord from logging.handlers import HTTPHandler -from typing import TYPE_CHECKING, Any, Dict, Optional, TextIO, Tuple +from typing import TYPE_CHECKING, Any, Optional, TextIO # Create logger LOGGER_NAME = "flwr" @@ -111,7 +111,7 @@ def update_console_handler( class CustomHTTPHandler(HTTPHandler): """Custom HTTPHandler which overrides the mapLogRecords method.""" - # pylint: disable=too-many-arguments,bad-option-value,R1725 + # pylint: disable=too-many-arguments,bad-option-value,R1725,R0917 def __init__( self, identifier: str, @@ -119,12 +119,12 @@ def __init__( url: str, method: str = "GET", secure: bool = False, - credentials: Optional[Tuple[str, str]] = None, + credentials: Optional[tuple[str, str]] = None, ) -> None: super().__init__(host, url, method, secure, credentials) self.identifier = identifier - def mapLogRecord(self, record: LogRecord) -> Dict[str, Any]: + def mapLogRecord(self, record: LogRecord) -> dict[str, Any]: """Filter for the properties to be send to the logserver.""" record_dict = record.__dict__ return { @@ -197,6 +197,44 @@ def warn_deprecated_feature(name: str) -> None: ) +def warn_deprecated_feature_with_example( + deprecation_message: str, example_message: str, code_example: str +) -> None: + """Warn if a feature is deprecated and show code example.""" + log( + WARN, + """DEPRECATED FEATURE: %s + + Check the following `FEATURE UPDATE` warning message for the preferred + new mechanism to use this feature in Flower. + """, + deprecation_message, + ) + log( + WARN, + """FEATURE UPDATE: %s + ------------------------------------------------------------ + %s + ------------------------------------------------------------ + """, + example_message, + code_example, + ) + + +def warn_unsupported_feature(name: str) -> None: + """Warn the user when they use an unsupported feature.""" + log( + WARN, + """UNSUPPORTED FEATURE: %s + + This is an unsupported feature. It will be removed + entirely in future versions of Flower. + """, + name, + ) + + def set_logger_propagation( child_logger: logging.Logger, value: bool = True ) -> logging.Logger: diff --git a/src/py/flwr/common/message.py b/src/py/flwr/common/message.py index 7f7a0e4dd995..3bb07ff3961a 100644 --- a/src/py/flwr/common/message.py +++ b/src/py/flwr/common/message.py @@ -17,9 +17,11 @@ from __future__ import annotations import time -import warnings +from logging import WARNING from typing import Optional, cast +from .constant import MESSAGE_TTL_TOLERANCE +from .logger import log from .record import RecordSet DEFAULT_TTL = 3600 @@ -48,13 +50,9 @@ class Metadata: # pylint: disable=too-many-instance-attributes message_type : str A string that encodes the action to be executed on the receiving end. - partition_id : Optional[int] - An identifier that can be used when loading a particular - data partition for a ClientApp. Making use of this identifier - is more relevant when conducting simulations. """ - def __init__( # pylint: disable=too-many-arguments + def __init__( # pylint: disable=too-many-arguments,too-many-positional-arguments self, run_id: int, message_id: str, @@ -64,7 +62,6 @@ def __init__( # pylint: disable=too-many-arguments group_id: str, ttl: float, message_type: str, - partition_id: int | None = None, ) -> None: var_dict = { "_run_id": run_id, @@ -75,7 +72,6 @@ def __init__( # pylint: disable=too-many-arguments "_group_id": group_id, "_ttl": ttl, "_message_type": message_type, - "_partition_id": partition_id, } self.__dict__.update(var_dict) @@ -149,16 +145,6 @@ def message_type(self, value: str) -> None: """Set message_type.""" self.__dict__["_message_type"] = value - @property - def partition_id(self) -> int | None: - """An identifier telling which data partition a ClientApp should use.""" - return cast(int, self.__dict__["_partition_id"]) - - @partition_id.setter - def partition_id(self, value: int) -> None: - """Set partition_id.""" - self.__dict__["_partition_id"] = value - def __repr__(self) -> str: """Return a string representation of this instance.""" view = ", ".join([f"{k.lstrip('_')}={v!r}" for k, v in self.__dict__.items()]) @@ -304,14 +290,12 @@ def create_error_reply(self, error: Error, ttl: float | None = None) -> Message: follows the equation: ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at) + + Returns + ------- + message : Message + A Message containing only the relevant error and metadata. """ - if ttl: - warnings.warn( - "A custom TTL was set, but note that the SuperLink does not enforce " - "the TTL yet. The SuperLink will start enforcing the TTL in a future " - "version of Flower.", - stacklevel=2, - ) # If no TTL passed, use default for message creation (will update after # message creation) ttl_ = DEFAULT_TTL if ttl is None else ttl @@ -325,6 +309,8 @@ def create_error_reply(self, error: Error, ttl: float | None = None) -> Message: ) message.metadata.ttl = ttl + self._limit_task_res_ttl(message) + return message def create_reply(self, content: RecordSet, ttl: float | None = None) -> Message: @@ -350,13 +336,6 @@ def create_reply(self, content: RecordSet, ttl: float | None = None) -> Message: Message A new `Message` instance representing the reply. """ - if ttl: - warnings.warn( - "A custom TTL was set, but note that the SuperLink does not enforce " - "the TTL yet. The SuperLink will start enforcing the TTL in a future " - "version of Flower.", - stacklevel=2, - ) # If no TTL passed, use default for message creation (will update after # message creation) ttl_ = DEFAULT_TTL if ttl is None else ttl @@ -373,6 +352,8 @@ def create_reply(self, content: RecordSet, ttl: float | None = None) -> Message: ) message.metadata.ttl = ttl + self._limit_task_res_ttl(message) + return message def __repr__(self) -> str: @@ -386,6 +367,31 @@ def __repr__(self) -> str: ) return f"{self.__class__.__qualname__}({view})" + def _limit_task_res_ttl(self, message: Message) -> None: + """Limit the TaskRes TTL to not exceed the expiration time of the TaskIns it + replies to. + + Parameters + ---------- + message : Message + The message to which the TaskRes is replying. + """ + # Calculate the maximum allowed TTL + max_allowed_ttl = ( + self.metadata.created_at + self.metadata.ttl - message.metadata.created_at + ) + + if message.metadata.ttl - max_allowed_ttl > MESSAGE_TTL_TOLERANCE: + log( + WARNING, + "The reply TTL of %.2f seconds exceeded the " + "allowed maximum of %.2f seconds. " + "The TTL has been updated to the allowed maximum.", + message.metadata.ttl, + max_allowed_ttl, + ) + message.metadata.ttl = max_allowed_ttl + def _create_reply_metadata(msg: Message, ttl: float) -> Metadata: """Construct metadata for a reply message.""" @@ -398,5 +404,4 @@ def _create_reply_metadata(msg: Message, ttl: float) -> Metadata: group_id=msg.metadata.group_id, ttl=ttl, message_type=msg.metadata.message_type, - partition_id=msg.metadata.partition_id, ) diff --git a/src/py/flwr/common/message_test.py b/src/py/flwr/common/message_test.py index 19f8aeb1eb63..d418f9fa8036 100644 --- a/src/py/flwr/common/message_test.py +++ b/src/py/flwr/common/message_test.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,12 +17,13 @@ import time from collections import namedtuple from contextlib import ExitStack -from typing import Any, Callable, Dict, Optional +from typing import Any, Callable, Optional import pytest # pylint: enable=E0611 from . import RecordSet +from .constant import MESSAGE_TTL_TOLERANCE from .message import Error, Message, Metadata from .serde_test import RecordMaker @@ -174,7 +175,6 @@ def test_create_reply( "group_id": "group_xyz", "ttl": 10.0, "message_type": "request", - "partition_id": None, }, ), (Error, {"code": 1, "reason": "reason_098"}), @@ -194,7 +194,7 @@ def test_create_reply( ), ], ) -def test_repr(cls: type, kwargs: Dict[str, Any]) -> None: +def test_repr(cls: type, kwargs: dict[str, Any]) -> None: """Test string representations of Metadata/Message/Error.""" # Prepare anon_cls = namedtuple(cls.__qualname__, kwargs.keys()) # type: ignore @@ -203,3 +203,35 @@ def test_repr(cls: type, kwargs: Dict[str, Any]) -> None: # Assert assert str(actual) == str(expected) + + +@pytest.mark.parametrize( + "message_creation_fn,initial_ttl,reply_ttl,expected_reply_ttl", + [ + # Case where the reply_ttl is larger than the allowed TTL + (create_message_with_content, 20, 30, 20), + (create_message_with_error, 20, 30, 20), + # Case where the reply_ttl is within the allowed range + (create_message_with_content, 20, 10, 10), + (create_message_with_error, 20, 10, 10), + ], +) +def test_reply_ttl_limitation( + message_creation_fn: Callable[[float], Message], + initial_ttl: float, + reply_ttl: float, + expected_reply_ttl: float, +) -> None: + """Test that the reply TTL does not exceed the allowed TTL.""" + message = message_creation_fn(initial_ttl) + + if message.has_error(): + dummy_error = Error(code=0, reason="test error") + reply_message = message.create_error_reply(dummy_error, ttl=reply_ttl) + else: + reply_message = message.create_reply(content=RecordSet(), ttl=reply_ttl) + + assert reply_message.metadata.ttl - expected_reply_ttl <= MESSAGE_TTL_TOLERANCE, ( + f"Expected TTL to be <= {expected_reply_ttl}, " + f"but got {reply_message.metadata.ttl}" + ) diff --git a/src/py/flwr/common/object_ref.py b/src/py/flwr/common/object_ref.py index 4660f07e24a4..6259b5ab557d 100644 --- a/src/py/flwr/common/object_ref.py +++ b/src/py/flwr/common/object_ref.py @@ -17,8 +17,13 @@ import ast import importlib +import sys from importlib.util import find_spec -from typing import Any, Optional, Tuple, Type +from logging import WARN +from pathlib import Path +from typing import Any, Optional, Union + +from .logger import log OBJECT_REF_HELP_STR = """ \n\nThe object reference string should have the form :. Valid @@ -28,21 +33,41 @@ """ +_current_sys_path: Optional[str] = None + + def validate( module_attribute_str: str, -) -> Tuple[bool, Optional[str]]: + check_module: bool = True, + project_dir: Optional[Union[str, Path]] = None, +) -> tuple[bool, Optional[str]]: """Validate object reference. - The object reference string should have the form :. Valid - examples include `client:app` and `project.package.module:wrapper.app`. It must - refer to a module on the PYTHONPATH and the module needs to have the specified - attribute. + Parameters + ---------- + module_attribute_str : str + The reference to the object. It should have the form `:`. + Valid examples include `client:app` and `project.package.module:wrapper.app`. + It must refer to a module on the PYTHONPATH or in the provided `project_dir` + and the module needs to have the specified attribute. + check_module : bool (default: True) + Flag indicating whether to verify the existence of the module and the + specified attribute within it. + project_dir : Optional[Union[str, Path]] (default: None) + The directory containing the module. If None, the current working directory + is used. If `check_module` is True, the `project_dir` will be inserted into + the system path, and the previously inserted `project_dir` will be removed. Returns ------- Tuple[bool, Optional[str]] A boolean indicating whether an object reference is valid and the reason why it might not be. + + Note + ---- + This function will modify `sys.path` by inserting the provided `project_dir` + and removing the previously inserted `project_dir`. """ module_str, _, attributes_str = module_attribute_str.partition(":") if not module_str: @@ -56,15 +81,21 @@ def validate( f"Missing attribute in {module_attribute_str}{OBJECT_REF_HELP_STR}", ) - # Load module - module = find_spec(module_str) - if module and module.origin: - if not _find_attribute_in_module(module.origin, attributes_str): - return ( - False, - f"Unable to find attribute {attributes_str} in module {module_str}" - f"{OBJECT_REF_HELP_STR}", - ) + if check_module: + # Set the system path + _set_sys_path(project_dir) + + # Load module + module = find_spec(module_str) + if module and module.origin: + if not _find_attribute_in_module(module.origin, attributes_str): + return ( + False, + f"Unable to find attribute {attributes_str} in module {module_str}" + f"{OBJECT_REF_HELP_STR}", + ) + return (True, None) + else: return (True, None) return ( @@ -73,44 +104,114 @@ def validate( ) -def load_app( +def load_app( # pylint: disable= too-many-branches module_attribute_str: str, - error_type: Type[Exception], + error_type: type[Exception], + project_dir: Optional[Union[str, Path]] = None, ) -> Any: """Return the object specified in a module attribute string. - The module/attribute string should have the form :. Valid - examples include `client:app` and `project.package.module:wrapper.app`. It must - refer to a module on the PYTHONPATH, the module needs to have the specified - attribute. + Parameters + ---------- + module_attribute_str : str + The reference to the object. It should have the form `:`. + Valid examples include `client:app` and `project.package.module:wrapper.app`. + It must refer to a module on the PYTHONPATH or in the provided `project_dir` + and the module needs to have the specified attribute. + error_type : Type[Exception] + The type of exception to be raised if the provided `module_attribute_str` is + in an invalid format. + project_dir : Optional[Union[str, Path]], optional (default=None) + The directory containing the module. If None, the current working directory + is used. The `project_dir` will be inserted into the system path, and the + previously inserted `project_dir` will be removed. + + Returns + ------- + Any + The object specified by the module attribute string. + + Note + ---- + This function will modify `sys.path` by inserting the provided `project_dir` + and removing the previously inserted `project_dir`. """ - valid, error_msg = validate(module_attribute_str) + valid, error_msg = validate(module_attribute_str, check_module=False) if not valid and error_msg: raise error_type(error_msg) from None module_str, _, attributes_str = module_attribute_str.partition(":") try: - module = importlib.import_module(module_str) - except ModuleNotFoundError: + _set_sys_path(project_dir) + + if module_str not in sys.modules: + module = importlib.import_module(module_str) + # Hack: `tabnet` does not work with `importlib.reload` + elif "tabnet" in sys.modules: + log( + WARN, + "Cannot reload module `%s` from disk due to compatibility issues " + "with the `tabnet` library. The module will be loaded from the " + "cache instead. If you experience issues, consider restarting " + "the application.", + module_str, + ) + module = sys.modules[module_str] + else: + module = sys.modules[module_str] + + if project_dir is None: + project_dir = Path.cwd() + + # Reload cached modules in the project directory + for m in list(sys.modules.values()): + path: Optional[str] = getattr(m, "__file__", None) + if path is not None and path.startswith(str(project_dir)): + importlib.reload(m) + + except ModuleNotFoundError as err: raise error_type( f"Unable to load module {module_str}{OBJECT_REF_HELP_STR}", - ) from None + ) from err # Recursively load attribute attribute = module try: for attribute_str in attributes_str.split("."): attribute = getattr(attribute, attribute_str) - except AttributeError: + except AttributeError as err: raise error_type( f"Unable to load attribute {attributes_str} from module {module_str}" f"{OBJECT_REF_HELP_STR}", - ) from None + ) from err return attribute +def _set_sys_path(directory: Optional[Union[str, Path]]) -> None: + """Set the system path.""" + if directory is None: + directory = Path.cwd() + else: + directory = Path(directory).absolute() + + # If the directory has already been added to `sys.path`, return + if str(directory) in sys.path: + return + + # Remove the old path if it exists and is not `""`. + global _current_sys_path # pylint: disable=global-statement + if _current_sys_path is not None: + sys.path.remove(_current_sys_path) + + # Add the new path to sys.path + sys.path.insert(0, str(directory)) + + # Update the current_sys_path + _current_sys_path = str(directory) + + def _find_attribute_in_module(file_path: str, attribute_name: str) -> bool: """Check if attribute_name exists in module's abstract symbolic tree.""" with open(file_path, encoding="utf-8") as file: diff --git a/src/py/flwr/common/record/__init__.py b/src/py/flwr/common/record/__init__.py index 60bc54b8552a..88eef5f7aea1 100644 --- a/src/py/flwr/common/record/__init__.py +++ b/src/py/flwr/common/record/__init__.py @@ -22,9 +22,9 @@ __all__ = [ "Array", - "array_from_numpy", "ConfigsRecord", "MetricsRecord", "ParametersRecord", "RecordSet", + "array_from_numpy", ] diff --git a/src/py/flwr/common/record/configsrecord.py b/src/py/flwr/common/record/configsrecord.py index 471c85f0b961..e83bca816fc6 100644 --- a/src/py/flwr/common/record/configsrecord.py +++ b/src/py/flwr/common/record/configsrecord.py @@ -15,7 +15,7 @@ """ConfigsRecord.""" -from typing import Dict, List, Optional, get_args +from typing import Optional, get_args from flwr.common.typing import ConfigsRecordValues, ConfigsScalar @@ -58,27 +58,61 @@ def is_valid(__v: ConfigsScalar) -> None: class ConfigsRecord(TypedDict[str, ConfigsRecordValues]): - """Configs record.""" + """Configs record. + + A :code:`ConfigsRecord` is a Python dictionary designed to ensure that + each key-value pair adheres to specified data types. A :code:`ConfigsRecord` + is one of the types of records that a + `flwr.common.RecordSet `_ supports and + can therefore be used to construct :code:`common.Message` objects. + + Parameters + ---------- + configs_dict : Optional[Dict[str, ConfigsRecordValues]] + A dictionary that stores basic types (i.e. `str`, `int`, `float`, `bytes` as + defined in `ConfigsScalar`) and lists of such types (see + `ConfigsScalarList`). + keep_input : bool (default: True) + A boolean indicating whether config passed should be deleted from the input + dictionary immediately after adding them to the record. When set + to True, the data is duplicated in memory. If memory is a concern, set + it to False. + + Examples + -------- + The usage of a :code:`ConfigsRecord` is envisioned for sending configuration values + telling the target node how to perform a certain action (e.g. train/evaluate a model + ). You can use standard Python built-in types such as :code:`float`, :code:`str` + , :code:`bytes`. All types allowed are defined in + :code:`flwr.common.ConfigsRecordValues`. While lists are supported, we + encourage you to use a :code:`ParametersRecord` instead if these are of high + dimensionality. + + Let's see some examples of how to construct a :code:`ConfigsRecord` from scratch: + + >>> from flwr.common import ConfigsRecord + >>> + >>> # A `ConfigsRecord` is a specialized Python dictionary + >>> record = ConfigsRecord({"lr": 0.1, "batch-size": 128}) + >>> # You can add more content to an existing record + >>> record["compute-average"] = True + >>> # It also supports lists + >>> record["loss-fn-coefficients"] = [0.4, 0.25, 0.35] + >>> # And string values (among other types) + >>> record["path-to-S3"] = "s3://bucket_name/folder1/fileA.json" + + Just like the other types of records in a :code:`flwr.common.RecordSet`, types are + enforced. If you need to add a custom data structure or object, we recommend to + serialise it into bytes and save it as such (bytes are allowed in a + :code:`ConfigsRecord`) + """ def __init__( self, - configs_dict: Optional[Dict[str, ConfigsRecordValues]] = None, + configs_dict: Optional[dict[str, ConfigsRecordValues]] = None, keep_input: bool = True, ) -> None: - """Construct a ConfigsRecord object. - - Parameters - ---------- - configs_dict : Optional[Dict[str, ConfigsRecordValues]] - A dictionary that stores basic types (i.e. `str`, `int`, `float`, `bytes` as - defined in `ConfigsScalar`) and lists of such types (see - `ConfigsScalarList`). - keep_input : bool (default: True) - A boolean indicating whether config passed should be deleted from the input - dictionary immediately after adding them to the record. When set - to True, the data is duplicated in memory. If memory is a concern, set - it to False. - """ + super().__init__(_check_key, _check_value) if configs_dict: for k in list(configs_dict.keys()): @@ -94,6 +128,7 @@ def count_bytes(self) -> int: def get_var_bytes(value: ConfigsScalar) -> int: """Return Bytes of value passed.""" + var_bytes = 0 if isinstance(value, bool): var_bytes = 1 elif isinstance(value, (int, float)): @@ -102,12 +137,17 @@ def get_var_bytes(value: ConfigsScalar) -> int: ) if isinstance(value, (str, bytes)): var_bytes = len(value) + if var_bytes == 0: + raise ValueError( + "Config values must be either `bool`, `int`, `float`, " + "`str`, or `bytes`" + ) return var_bytes num_bytes = 0 for k, v in self.items(): - if isinstance(v, List): + if isinstance(v, list): if isinstance(v[0], (bytes, str)): # not all str are of equal length necessarily # for both the footprint of each element is 1 Byte diff --git a/src/py/flwr/common/record/metricsrecord.py b/src/py/flwr/common/record/metricsrecord.py index 2b6e584be390..d0a6123c807f 100644 --- a/src/py/flwr/common/record/metricsrecord.py +++ b/src/py/flwr/common/record/metricsrecord.py @@ -15,7 +15,7 @@ """MetricsRecord.""" -from typing import Dict, List, Optional, get_args +from typing import Optional, get_args from flwr.common.typing import MetricsRecordValues, MetricsScalar @@ -58,26 +58,66 @@ def is_valid(__v: MetricsScalar) -> None: class MetricsRecord(TypedDict[str, MetricsRecordValues]): - """Metrics record.""" + """Metrics recod. + + A :code:`MetricsRecord` is a Python dictionary designed to ensure that + each key-value pair adheres to specified data types. A :code:`MetricsRecord` + is one of the types of records that a + `flwr.common.RecordSet `_ supports and + can therefore be used to construct :code:`common.Message` objects. + + Parameters + ---------- + metrics_dict : Optional[Dict[str, MetricsRecordValues]] + A dictionary that stores basic types (i.e. `int`, `float` as defined + in `MetricsScalar`) and list of such types (see `MetricsScalarList`). + keep_input : bool (default: True) + A boolean indicating whether metrics should be deleted from the input + dictionary immediately after adding them to the record. When set + to True, the data is duplicated in memory. If memory is a concern, set + it to False. + + Examples + -------- + The usage of a :code:`MetricsRecord` is envisioned for communicating results + obtained when a node performs an action. A few typical examples include: + communicating the training accuracy after a model is trained locally by a + :code:`ClientApp`, reporting the validation loss obtained at a :code:`ClientApp`, + or, more generally, the output of executing a query by the :code:`ClientApp`. + Common to these examples is that the output can be typically represented by + a single scalar (:code:`int`, :code:`float`) or list of scalars. + + Let's see some examples of how to construct a :code:`MetricsRecord` from scratch: + + >>> from flwr.common import MetricsRecord + >>> + >>> # A `MetricsRecord` is a specialized Python dictionary + >>> record = MetricsRecord({"accuracy": 0.94}) + >>> # You can add more content to an existing record + >>> record["loss"] = 0.01 + >>> # It also supports lists + >>> record["loss-historic"] = [0.9, 0.5, 0.01] + + Since types are enforced, the types of the objects inserted are checked. For a + :code:`MetricsRecord`, value types allowed are those in defined in + :code:`flwr.common.MetricsRecordValues`. Similarly, only :code:`str` keys are + allowed. + + >>> from flwr.common import MetricsRecord + >>> + >>> record = MetricsRecord() # an empty record + >>> # Add unsupported value + >>> record["something-unsupported"] = {'a': 123} # Will throw a `TypeError` + + If you need a more versatily type of record try :code:`ConfigsRecord` or + :code:`ParametersRecord`. + """ def __init__( self, - metrics_dict: Optional[Dict[str, MetricsRecordValues]] = None, + metrics_dict: Optional[dict[str, MetricsRecordValues]] = None, keep_input: bool = True, ): - """Construct a MetricsRecord object. - - Parameters - ---------- - metrics_dict : Optional[Dict[str, MetricsRecordValues]] - A dictionary that stores basic types (i.e. `int`, `float` as defined - in `MetricsScalar`) and list of such types (see `MetricsScalarList`). - keep_input : bool (default: True) - A boolean indicating whether metrics should be deleted from the input - dictionary immediately after adding them to the record. When set - to True, the data is duplicated in memory. If memory is a concern, set - it to False. - """ super().__init__(_check_key, _check_value) if metrics_dict: for k in list(metrics_dict.keys()): @@ -90,7 +130,7 @@ def count_bytes(self) -> int: num_bytes = 0 for k, v in self.items(): - if isinstance(v, List): + if isinstance(v, list): # both int and float normally take 4 bytes # But MetricRecords are mapped to 64bit int/float # during protobuffing diff --git a/src/py/flwr/common/record/parametersrecord.py b/src/py/flwr/common/record/parametersrecord.py index 93db6d387b53..10ec65ca0277 100644 --- a/src/py/flwr/common/record/parametersrecord.py +++ b/src/py/flwr/common/record/parametersrecord.py @@ -14,9 +14,10 @@ # ============================================================================== """ParametersRecord and Array.""" +from collections import OrderedDict from dataclasses import dataclass from io import BytesIO -from typing import List, Optional, OrderedDict, cast +from typing import Optional, cast import numpy as np @@ -51,7 +52,7 @@ class Array: """ dtype: str - shape: List[int] + shape: list[int] stype: str data: bytes @@ -83,11 +84,93 @@ def _check_value(value: Array) -> None: class ParametersRecord(TypedDict[str, Array]): - """Parameters record. + r"""Parameters record. A dataclass storing named Arrays in order. This means that it holds entries as an OrderedDict[str, Array]. ParametersRecord objects can be viewed as an equivalent to - PyTorch's state_dict, but holding serialised tensors instead. + PyTorch's state_dict, but holding serialised tensors instead. A + :code:`ParametersRecord` is one of the types of records that a + `flwr.common.RecordSet `_ supports and + can therefore be used to construct :code:`common.Message` objects. + + Parameters + ---------- + array_dict : Optional[OrderedDict[str, Array]] + A dictionary that stores serialized array-like or tensor-like objects. + keep_input : bool (default: False) + A boolean indicating whether parameters should be deleted from the input + dictionary immediately after adding them to the record. If False, the + dictionary passed to `set_parameters()` will be empty once exiting from that + function. This is the desired behaviour when working with very large + models/tensors/arrays. However, if you plan to continue working with your + parameters after adding it to the record, set this flag to True. When set + to True, the data is duplicated in memory. + + Examples + -------- + The usage of :code:`ParametersRecord` is envisioned for storing data arrays (e.g. + parameters of a machine learning model). These first need to be serialized into + a :code:`flwr.common.Array` data structure. + + Let's see some examples: + + >>> import numpy as np + >>> from flwr.common import ParametersRecord + >>> from flwr.common import array_from_numpy + >>> + >>> # Let's create a simple NumPy array + >>> arr_np = np.random.randn(3, 3) + >>> + >>> # If we print it + >>> array([[-1.84242409, -1.01539537, -0.46528405], + >>> [ 0.32991896, 0.55540414, 0.44085534], + >>> [-0.10758364, 1.97619858, -0.37120501]]) + >>> + >>> # Let's create an Array out of it + >>> arr = array_from_numpy(arr_np) + >>> + >>> # If we print it you'll see (note the binary data) + >>> Array(dtype='float64', shape=[3,3], stype='numpy.ndarray', data=b'@\x99\x18...') + >>> + >>> # Adding it to a ParametersRecord: + >>> p_record = ParametersRecord({"my_array": arr}) + + Now that the NumPy array is embedded into a :code:`ParametersRecord` it could be + sent if added as part of a :code:`common.Message` or it could be saved as a + persistent state of a :code:`ClientApp` via its context. Regardless of the usecase, + we will sooner or later want to recover the array in its original NumPy + representation. For the example above, where the array was serialized using the + built-in utility function, deserialization can be done as follows: + + >>> # Use the Array's built-in method + >>> arr_np_d = arr.numpy() + >>> + >>> # If printed, it will show the exact same data as above: + >>> array([[-1.84242409, -1.01539537, -0.46528405], + >>> [ 0.32991896, 0.55540414, 0.44085534], + >>> [-0.10758364, 1.97619858, -0.37120501]]) + + If you need finer control on how your arrays are serialized and deserialized, you + can construct :code:`Array` objects directly like this: + + >>> from flwr.common import Array + >>> # Serialize your array and construct Array object + >>> arr = Array( + >>> data=ndarray.tobytes(), + >>> dtype=str(ndarray.dtype), + >>> stype="", # Could be used in a deserialization function + >>> shape=list(ndarray.shape), + >>> ) + >>> + >>> # Then you can deserialize it like this + >>> arr_np_d = np.frombuffer( + >>> buffer=array.data, + >>> dtype=array.dtype, + >>> ).reshape(array.shape) + + Note that different arrays (e.g. from PyTorch, Tensorflow) might require different + serialization mechanism. Howerver, they often support a conversion to NumPy, + therefore allowing to use the same or similar steps as in the example above. """ def __init__( @@ -95,21 +178,6 @@ def __init__( array_dict: Optional[OrderedDict[str, Array]] = None, keep_input: bool = False, ) -> None: - """Construct a ParametersRecord object. - - Parameters - ---------- - array_dict : Optional[OrderedDict[str, Array]] - A dictionary that stores serialized array-like or tensor-like objects. - keep_input : bool (default: False) - A boolean indicating whether parameters should be deleted from the input - dictionary immediately after adding them to the record. If False, the - dictionary passed to `set_parameters()` will be empty once exiting from that - function. This is the desired behaviour when working with very large - models/tensors/arrays. However, if you plan to continue working with your - parameters after adding it to the record, set this flag to True. When set - to True, the data is duplicated in memory. - """ super().__init__(_check_key, _check_value) if array_dict: for k in list(array_dict.keys()): diff --git a/src/py/flwr/common/record/parametersrecord_test.py b/src/py/flwr/common/record/parametersrecord_test.py index e840e5e266e4..9ac18a3ec854 100644 --- a/src/py/flwr/common/record/parametersrecord_test.py +++ b/src/py/flwr/common/record/parametersrecord_test.py @@ -17,7 +17,6 @@ import unittest from collections import OrderedDict from io import BytesIO -from typing import List import numpy as np import pytest @@ -81,7 +80,7 @@ def test_numpy_conversion_invalid(self) -> None: ([31, 153], "bool_"), # bool_ is represented as a whole Byte in NumPy ], ) -def test_count_bytes(shape: List[int], dtype: str) -> None: +def test_count_bytes(shape: list[int], dtype: str) -> None: """Test bytes in a ParametersRecord are computed correctly.""" original_array = np.random.randn(*shape).astype(np.dtype(dtype)) diff --git a/src/py/flwr/common/record/recordset.py b/src/py/flwr/common/record/recordset.py index 74eed46ad86f..b2d1da4411bb 100644 --- a/src/py/flwr/common/record/recordset.py +++ b/src/py/flwr/common/record/recordset.py @@ -15,8 +15,10 @@ """RecordSet.""" +from __future__ import annotations + from dataclasses import dataclass -from typing import Dict, Optional, cast +from typing import cast from .configsrecord import ConfigsRecord from .metricsrecord import MetricsRecord @@ -34,9 +36,9 @@ class RecordSetData: def __init__( self, - parameters_records: Optional[Dict[str, ParametersRecord]] = None, - metrics_records: Optional[Dict[str, MetricsRecord]] = None, - configs_records: Optional[Dict[str, ConfigsRecord]] = None, + parameters_records: dict[str, ParametersRecord] | None = None, + metrics_records: dict[str, MetricsRecord] | None = None, + configs_records: dict[str, ConfigsRecord] | None = None, ) -> None: self.parameters_records = TypedDict[str, ParametersRecord]( self._check_fn_str, self._check_fn_params @@ -84,13 +86,83 @@ def _check_fn_configs(self, record: ConfigsRecord) -> None: class RecordSet: - """RecordSet stores groups of parameters, metrics and configs.""" + """RecordSet stores groups of parameters, metrics and configs. + + A :code:`RecordSet` is the unified mechanism by which parameters, + metrics and configs can be either stored as part of a + `flwr.common.Context `_ in your apps + or communicated as part of a + `flwr.common.Message `_ between your apps. + + Parameters + ---------- + parameters_records : Optional[Dict[str, ParametersRecord]] + A dictionary of :code:`ParametersRecords` that can be used to record + and communicate model parameters and high-dimensional arrays. + metrics_records : Optional[Dict[str, MetricsRecord]] + A dictionary of :code:`MetricsRecord` that can be used to record + and communicate scalar-valued metrics that are the result of performing + and action, for example, by a :code:`ClientApp`. + configs_records : Optional[Dict[str, ConfigsRecord]] + A dictionary of :code:`ConfigsRecord` that can be used to record + and communicate configuration values to an entity (e.g. to a + :code:`ClientApp`) + for it to adjust how an action is performed. + + Examples + -------- + A :code:`RecordSet` can hold three types of records, each designed + with an specific purpose. What is common to all of them is that they + are Python dictionaries designed to ensure that each key-value pair + adheres to specified data types. + + Let's see an example. + + >>> from flwr.common import RecordSet + >>> from flwr.common import ConfigsRecord, MetricsRecord, ParametersRecord + >>> + >>> # Let's begin with an empty record + >>> my_recordset = RecordSet() + >>> + >>> # We can create a ConfigsRecord + >>> c_record = ConfigsRecord({"lr": 0.1, "batch-size": 128}) + >>> # Adding it to the record_set would look like this + >>> my_recordset.configs_records["my_config"] = c_record + >>> + >>> # We can create a MetricsRecord following a similar process + >>> m_record = MetricsRecord({"accuracy": 0.93, "losses": [0.23, 0.1]}) + >>> # Adding it to the record_set would look like this + >>> my_recordset.metrics_records["my_metrics"] = m_record + + Adding a :code:`ParametersRecord` follows the same steps as above but first, + the array needs to be serialized and represented as a :code:`flwr.common.Array`. + If the array is a :code:`NumPy` array, you can use the built-in utility function + `array_from_numpy `_. It is often possible to + convert an array first to :code:`NumPy` and then use the aforementioned function. + + >>> from flwr.common import array_from_numpy + >>> # Creating a ParametersRecord would look like this + >>> arr_np = np.random.randn(3, 3) + >>> + >>> # You can use the built-in tool to serialize the array + >>> arr = array_from_numpy(arr_np) + >>> + >>> # Finally, create the record + >>> p_record = ParametersRecord({"my_array": arr}) + >>> + >>> # Adding it to the record_set would look like this + >>> my_recordset.configs_records["my_config"] = c_record + + For additional examples on how to construct each of the records types shown + above, please refer to the documentation for :code:`ConfigsRecord`, + :code:`MetricsRecord` and :code:`ParametersRecord`. + """ def __init__( self, - parameters_records: Optional[Dict[str, ParametersRecord]] = None, - metrics_records: Optional[Dict[str, MetricsRecord]] = None, - configs_records: Optional[Dict[str, ConfigsRecord]] = None, + parameters_records: dict[str, ParametersRecord] | None = None, + metrics_records: dict[str, MetricsRecord] | None = None, + configs_records: dict[str, ConfigsRecord] | None = None, ) -> None: data = RecordSetData( parameters_records=parameters_records, diff --git a/src/py/flwr/common/record/recordset_test.py b/src/py/flwr/common/record/recordset_test.py index 01260793cb41..154e320e5f0b 100644 --- a/src/py/flwr/common/record/recordset_test.py +++ b/src/py/flwr/common/record/recordset_test.py @@ -15,9 +15,9 @@ """RecordSet tests.""" import pickle -from collections import namedtuple +from collections import OrderedDict, namedtuple from copy import deepcopy -from typing import Callable, Dict, List, OrderedDict, Type, Union +from typing import Callable, Union import numpy as np import pytest @@ -158,8 +158,8 @@ def test_set_parameters_with_correct_types() -> None: ], ) def test_set_parameters_with_incorrect_types( - key_type: Type[Union[int, str]], - value_fn: Callable[[NDArray], Union[NDArray, List[float]]], + key_type: type[Union[int, str]], + value_fn: Callable[[NDArray], Union[NDArray, list[float]]], ) -> None: """Test adding dictionary of unsupported types to ParametersRecord.""" p_record = ParametersRecord() @@ -169,7 +169,7 @@ def test_set_parameters_with_incorrect_types( } with pytest.raises(TypeError): - p_record.update(array_dict) + p_record.update(array_dict) # type: ignore @pytest.mark.parametrize( @@ -183,7 +183,7 @@ def test_set_parameters_with_incorrect_types( ], ) def test_set_metrics_to_metricsrecord_with_correct_types( - key_type: Type[str], + key_type: type[str], value_fn: Callable[[NDArray], MetricsRecordValues], ) -> None: """Test adding metrics of various types to a MetricsRecord.""" @@ -236,8 +236,8 @@ def test_set_metrics_to_metricsrecord_with_correct_types( ], ) def test_set_metrics_to_metricsrecord_with_incorrect_types( - key_type: Type[Union[str, int, float, bool]], - value_fn: Callable[[NDArray], Union[NDArray, Dict[str, NDArray], List[float]]], + key_type: type[Union[str, int, float, bool]], + value_fn: Callable[[NDArray], Union[NDArray, dict[str, NDArray], list[float]]], ) -> None: """Test adding metrics of various unsupported types to a MetricsRecord.""" m_record = MetricsRecord() @@ -250,7 +250,7 @@ def test_set_metrics_to_metricsrecord_with_incorrect_types( ) with pytest.raises(TypeError): - m_record.update(my_metrics) + m_record.update(my_metrics) # type: ignore @pytest.mark.parametrize( @@ -302,7 +302,7 @@ def test_set_metrics_to_metricsrecord_with_and_without_keeping_input( ], ) def test_set_configs_to_configsrecord_with_correct_types( - key_type: Type[str], + key_type: type[str], value_fn: Callable[[NDArray], ConfigsRecordValues], ) -> None: """Test adding configs of various types to a ConfigsRecord.""" @@ -346,8 +346,8 @@ def test_set_configs_to_configsrecord_with_correct_types( ], ) def test_set_configs_to_configsrecord_with_incorrect_types( - key_type: Type[Union[str, int, float]], - value_fn: Callable[[NDArray], Union[NDArray, Dict[str, NDArray], List[float]]], + key_type: type[Union[str, int, float]], + value_fn: Callable[[NDArray], Union[NDArray, dict[str, NDArray], list[float]]], ) -> None: """Test adding configs of various unsupported types to a ConfigsRecord.""" c_record = ConfigsRecord() @@ -360,7 +360,7 @@ def test_set_configs_to_configsrecord_with_incorrect_types( ) with pytest.raises(TypeError): - c_record.update(my_configs) + c_record.update(my_configs) # type: ignore def test_count_bytes_metricsrecord() -> None: diff --git a/src/py/flwr/common/record/typeddict.py b/src/py/flwr/common/record/typeddict.py index 23d70dc4f7e8..c2c8548c4de3 100644 --- a/src/py/flwr/common/record/typeddict.py +++ b/src/py/flwr/common/record/typeddict.py @@ -15,99 +15,74 @@ """Typed dict base class for *Records.""" -from typing import Any, Callable, Dict, Generic, Iterator, Tuple, TypeVar, cast +from collections.abc import ItemsView, Iterator, KeysView, MutableMapping, ValuesView +from typing import Callable, Generic, TypeVar, cast K = TypeVar("K") # Key type V = TypeVar("V") # Value type -class TypedDict(Generic[K, V]): +class TypedDict(MutableMapping[K, V], Generic[K, V]): """Typed dictionary.""" def __init__( self, check_key_fn: Callable[[K], None], check_value_fn: Callable[[V], None] ): - self._data: Dict[K, V] = {} - self._check_key_fn = check_key_fn - self._check_value_fn = check_value_fn + self.__dict__["_check_key_fn"] = check_key_fn + self.__dict__["_check_value_fn"] = check_value_fn + self.__dict__["_data"] = {} def __setitem__(self, key: K, value: V) -> None: """Set the given key to the given value after type checking.""" # Check the types of key and value - self._check_key_fn(key) - self._check_value_fn(value) + cast(Callable[[K], None], self.__dict__["_check_key_fn"])(key) + cast(Callable[[V], None], self.__dict__["_check_value_fn"])(value) + # Set key-value pair - self._data[key] = value + cast(dict[K, V], self.__dict__["_data"])[key] = value def __delitem__(self, key: K) -> None: """Remove the item with the specified key.""" - del self._data[key] + del cast(dict[K, V], self.__dict__["_data"])[key] def __getitem__(self, item: K) -> V: """Return the value for the specified key.""" - return self._data[item] + return cast(dict[K, V], self.__dict__["_data"])[item] def __iter__(self) -> Iterator[K]: """Yield an iterator over the keys of the dictionary.""" - return iter(self._data) + return iter(cast(dict[K, V], self.__dict__["_data"])) def __repr__(self) -> str: """Return a string representation of the dictionary.""" - return self._data.__repr__() + return cast(dict[K, V], self.__dict__["_data"]).__repr__() def __len__(self) -> int: """Return the number of items in the dictionary.""" - return len(self._data) + return len(cast(dict[K, V], self.__dict__["_data"])) - def __contains__(self, key: K) -> bool: + def __contains__(self, key: object) -> bool: """Check if the dictionary contains the specified key.""" - return key in self._data + return key in cast(dict[K, V], self.__dict__["_data"]) def __eq__(self, other: object) -> bool: """Compare this instance to another dictionary or TypedDict.""" + data = cast(dict[K, V], self.__dict__["_data"]) if isinstance(other, TypedDict): - return self._data == other._data + other_data = cast(dict[K, V], other.__dict__["_data"]) + return data == other_data if isinstance(other, dict): - return self._data == other + return data == other return NotImplemented - def items(self) -> Iterator[Tuple[K, V]]: - """R.items() -> a set-like object providing a view on R's items.""" - return cast(Iterator[Tuple[K, V]], self._data.items()) - - def keys(self) -> Iterator[K]: - """R.keys() -> a set-like object providing a view on R's keys.""" - return cast(Iterator[K], self._data.keys()) - - def values(self) -> Iterator[V]: - """R.values() -> an object providing a view on R's values.""" - return cast(Iterator[V], self._data.values()) - - def update(self, *args: Any, **kwargs: Any) -> None: - """R.update([E, ]**F) -> None. - - Update R from dict/iterable E and F. - """ - for key, value in dict(*args, **kwargs).items(): - self[key] = value - - def pop(self, key: K) -> V: - """R.pop(k[,d]) -> v, remove specified key and return the corresponding value. - - If key is not found, d is returned if given, otherwise KeyError is raised. - """ - return self._data.pop(key) - - def get(self, key: K, default: V) -> V: - """R.get(k[,d]) -> R[k] if k in R, else d. - - d defaults to None. - """ - return self._data.get(key, default) + def keys(self) -> KeysView[K]: + """D.keys() -> a set-like object providing a view on D's keys.""" + return cast(dict[K, V], self.__dict__["_data"]).keys() - def clear(self) -> None: - """R.clear() -> None. + def values(self) -> ValuesView[V]: + """D.values() -> an object providing a view on D's values.""" + return cast(dict[K, V], self.__dict__["_data"]).values() - Remove all items from R. - """ - self._data.clear() + def items(self) -> ItemsView[K, V]: + """D.items() -> a set-like object providing a view on D's items.""" + return cast(dict[K, V], self.__dict__["_data"]).items() diff --git a/src/py/flwr/common/recordset_compat.py b/src/py/flwr/common/recordset_compat.py index 1b0bf52d8277..4641b8f29c96 100644 --- a/src/py/flwr/common/recordset_compat.py +++ b/src/py/flwr/common/recordset_compat.py @@ -15,7 +15,9 @@ """RecordSet utilities.""" -from typing import Dict, Mapping, OrderedDict, Tuple, Union, cast, get_args +from collections import OrderedDict +from collections.abc import Mapping +from typing import Union, cast, get_args from . import Array, ConfigsRecord, MetricsRecord, ParametersRecord, RecordSet from .typing import ( @@ -57,6 +59,11 @@ def parametersrecord_to_parameters( keep_input : bool A boolean indicating whether entries in the record should be deleted from the input dictionary immediately after adding them to the record. + + Returns + ------- + parameters : Parameters + The parameters in the legacy format Parameters. """ parameters = Parameters(tensors=[], tensor_type="") @@ -92,6 +99,11 @@ def parameters_to_parametersrecord( A boolean indicating whether parameters should be deleted from the input Parameters object (i.e. a list of serialized NumPy arrays) immediately after adding them to the record. + + Returns + ------- + ParametersRecord + The ParametersRecord containing the provided parameters. """ tensor_type = parameters.tensor_type @@ -115,7 +127,7 @@ def parameters_to_parametersrecord( def _check_mapping_from_recordscalartype_to_scalar( record_data: Mapping[str, Union[ConfigsRecordValues, MetricsRecordValues]] -) -> Dict[str, Scalar]: +) -> dict[str, Scalar]: """Check mapping `common.*RecordValues` into `common.Scalar` is possible.""" for value in record_data.values(): if not isinstance(value, get_args(Scalar)): @@ -126,14 +138,14 @@ def _check_mapping_from_recordscalartype_to_scalar( "supported by the `common.RecordSet` infrastructure. " f"You used type: {type(value)}" ) - return cast(Dict[str, Scalar], record_data) + return cast(dict[str, Scalar], record_data) def _recordset_to_fit_or_evaluate_ins_components( recordset: RecordSet, ins_str: str, keep_input: bool, -) -> Tuple[Parameters, Dict[str, Scalar]]: +) -> tuple[Parameters, dict[str, Scalar]]: """Derive Fit/Evaluate Ins from a RecordSet.""" # get Array and construct Parameters parameters_record = recordset.parameters_records[f"{ins_str}.parameters"] @@ -145,7 +157,7 @@ def _recordset_to_fit_or_evaluate_ins_components( # get config dict config_record = recordset.configs_records[f"{ins_str}.config"] # pylint: disable-next=protected-access - config_dict = _check_mapping_from_recordscalartype_to_scalar(config_record._data) + config_dict = _check_mapping_from_recordscalartype_to_scalar(config_record) return parameters, config_dict @@ -169,7 +181,7 @@ def _fit_or_evaluate_ins_to_recordset( def _embed_status_into_recordset( res_str: str, status: Status, recordset: RecordSet ) -> RecordSet: - status_dict: Dict[str, ConfigsRecordValues] = { + status_dict: dict[str, ConfigsRecordValues] = { "code": int(status.code.value), "message": status.message, } @@ -213,7 +225,7 @@ def recordset_to_fitres(recordset: RecordSet, keep_input: bool) -> FitRes: ) configs_record = recordset.configs_records[f"{ins_str}.metrics"] # pylint: disable-next=protected-access - metrics = _check_mapping_from_recordscalartype_to_scalar(configs_record._data) + metrics = _check_mapping_from_recordscalartype_to_scalar(configs_record) status = _extract_status_from_recordset(ins_str, recordset) return FitRes( @@ -274,7 +286,7 @@ def recordset_to_evaluateres(recordset: RecordSet) -> EvaluateRes: configs_record = recordset.configs_records[f"{ins_str}.metrics"] # pylint: disable-next=protected-access - metrics = _check_mapping_from_recordscalartype_to_scalar(configs_record._data) + metrics = _check_mapping_from_recordscalartype_to_scalar(configs_record) status = _extract_status_from_recordset(ins_str, recordset) return EvaluateRes( @@ -314,7 +326,7 @@ def recordset_to_getparametersins(recordset: RecordSet) -> GetParametersIns: """Derive GetParametersIns from a RecordSet object.""" config_record = recordset.configs_records["getparametersins.config"] # pylint: disable-next=protected-access - config_dict = _check_mapping_from_recordscalartype_to_scalar(config_record._data) + config_dict = _check_mapping_from_recordscalartype_to_scalar(config_record) return GetParametersIns(config=config_dict) @@ -365,7 +377,7 @@ def recordset_to_getpropertiesins(recordset: RecordSet) -> GetPropertiesIns: """Derive GetPropertiesIns from a RecordSet object.""" config_record = recordset.configs_records["getpropertiesins.config"] # pylint: disable-next=protected-access - config_dict = _check_mapping_from_recordscalartype_to_scalar(config_record._data) + config_dict = _check_mapping_from_recordscalartype_to_scalar(config_record) return GetPropertiesIns(config=config_dict) @@ -384,7 +396,7 @@ def recordset_to_getpropertiesres(recordset: RecordSet) -> GetPropertiesRes: res_str = "getpropertiesres" config_record = recordset.configs_records[f"{res_str}.properties"] # pylint: disable-next=protected-access - properties = _check_mapping_from_recordscalartype_to_scalar(config_record._data) + properties = _check_mapping_from_recordscalartype_to_scalar(config_record) status = _extract_status_from_recordset(res_str, recordset=recordset) diff --git a/src/py/flwr/common/recordset_compat_test.py b/src/py/flwr/common/recordset_compat_test.py index e0ac7f216af9..05d821e37e40 100644 --- a/src/py/flwr/common/recordset_compat_test.py +++ b/src/py/flwr/common/recordset_compat_test.py @@ -15,7 +15,7 @@ """RecordSet from legacy messages tests.""" from copy import deepcopy -from typing import Callable, Dict +from typing import Callable import numpy as np import pytest @@ -82,7 +82,7 @@ def _get_valid_fitins_with_empty_ndarrays() -> FitIns: def _get_valid_fitres() -> FitRes: """Returnn Valid parameters but potentially invalid config.""" arrays = get_ndarrays() - metrics: Dict[str, Scalar] = {"a": 1.0, "b": 0} + metrics: dict[str, Scalar] = {"a": 1.0, "b": 0} return FitRes( parameters=ndarrays_to_parameters(arrays), num_examples=1, @@ -98,7 +98,7 @@ def _get_valid_evaluateins() -> EvaluateIns: def _get_valid_evaluateres() -> EvaluateRes: """Return potentially invalid config.""" - metrics: Dict[str, Scalar] = {"a": 1.0, "b": 0} + metrics: dict[str, Scalar] = {"a": 1.0, "b": 0} return EvaluateRes( num_examples=1, loss=0.1, @@ -108,7 +108,7 @@ def _get_valid_evaluateres() -> EvaluateRes: def _get_valid_getparametersins() -> GetParametersIns: - config_dict: Dict[str, Scalar] = { + config_dict: dict[str, Scalar] = { "a": 1.0, "b": 3, "c": True, @@ -131,7 +131,7 @@ def _get_valid_getpropertiesins() -> GetPropertiesIns: def _get_valid_getpropertiesres() -> GetPropertiesRes: - config_dict: Dict[str, Scalar] = { + config_dict: dict[str, Scalar] = { "a": 1.0, "b": 3, "c": True, diff --git a/src/py/flwr/common/retry_invoker.py b/src/py/flwr/common/retry_invoker.py index d12124b89840..9785b0fbd9b4 100644 --- a/src/py/flwr/common/retry_invoker.py +++ b/src/py/flwr/common/retry_invoker.py @@ -18,20 +18,9 @@ import itertools import random import time +from collections.abc import Generator, Iterable from dataclasses import dataclass -from typing import ( - Any, - Callable, - Dict, - Generator, - Iterable, - List, - Optional, - Tuple, - Type, - Union, - cast, -) +from typing import Any, Callable, Optional, Union, cast def exponential( @@ -49,6 +38,11 @@ def exponential( Factor by which the delay is multiplied after each retry. max_delay: Optional[float] (default: None) The maximum delay duration between two consecutive retries. + + Returns + ------- + Generator[float, None, None] + A generator for the delay between 2 retries. """ delay = base_delay if max_delay is None else min(base_delay, max_delay) while True: @@ -67,6 +61,11 @@ def constant( ---------- interval: Union[float, Iterable[float]] (default: 1) A constant value to yield or an iterable of such values. + + Returns + ------- + Generator[float, None, None] + A generator for the delay between 2 retries. """ if not isinstance(interval, Iterable): interval = itertools.repeat(interval) @@ -84,6 +83,11 @@ def full_jitter(max_value: float) -> float: ---------- max_value : float The upper limit for the randomized value. + + Returns + ------- + float + A random float that is less than max_value. """ return random.uniform(0, max_value) @@ -93,8 +97,8 @@ class RetryState: """State for callbacks in RetryInvoker.""" target: Callable[..., Any] - args: Tuple[Any, ...] - kwargs: Dict[str, Any] + args: tuple[Any, ...] + kwargs: dict[str, Any] tries: int elapsed_time: float exception: Optional[Exception] = None @@ -167,7 +171,7 @@ class RetryInvoker: def __init__( self, wait_gen_factory: Callable[[], Generator[float, None, None]], - recoverable_exceptions: Union[Type[Exception], Tuple[Type[Exception], ...]], + recoverable_exceptions: Union[type[Exception], tuple[type[Exception], ...]], max_tries: Optional[int], max_time: Optional[float], *, @@ -244,7 +248,7 @@ def try_call_event_handler( try_cnt = 0 wait_generator = self.wait_gen_factory() start = time.monotonic() - ref_state: List[Optional[RetryState]] = [None] + ref_state: list[Optional[RetryState]] = [None] while True: try_cnt += 1 diff --git a/src/py/flwr/common/retry_invoker_test.py b/src/py/flwr/common/retry_invoker_test.py index 2259ae47ded4..a9f2625ff443 100644 --- a/src/py/flwr/common/retry_invoker_test.py +++ b/src/py/flwr/common/retry_invoker_test.py @@ -15,7 +15,7 @@ """Tests for `RetryInvoker`.""" -from typing import Generator +from collections.abc import Generator from unittest.mock import MagicMock, Mock, patch import pytest diff --git a/src/py/flwr/common/secure_aggregation/__init__.py b/src/py/flwr/common/secure_aggregation/__init__.py index b4e0acc0c148..77e1ea3842d7 100644 --- a/src/py/flwr/common/secure_aggregation/__init__.py +++ b/src/py/flwr/common/secure_aggregation/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2023 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/src/py/flwr/common/secure_aggregation/crypto/__init__.py b/src/py/flwr/common/secure_aggregation/crypto/__init__.py index 2cb34493f7d0..3788dbc0ca15 100644 --- a/src/py/flwr/common/secure_aggregation/crypto/__init__.py +++ b/src/py/flwr/common/secure_aggregation/crypto/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2023 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/src/py/flwr/common/secure_aggregation/crypto/shamir.py b/src/py/flwr/common/secure_aggregation/crypto/shamir.py index e56e21b89371..9c7e67abf94f 100644 --- a/src/py/flwr/common/secure_aggregation/crypto/shamir.py +++ b/src/py/flwr/common/secure_aggregation/crypto/shamir.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2023 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,20 +17,20 @@ import pickle from concurrent.futures import ThreadPoolExecutor -from typing import List, Tuple, cast +from typing import cast from Crypto.Protocol.SecretSharing import Shamir from Crypto.Util.Padding import pad, unpad -def create_shares(secret: bytes, threshold: int, num: int) -> List[bytes]: +def create_shares(secret: bytes, threshold: int, num: int) -> list[bytes]: """Return list of shares (bytes).""" secret_padded = pad(secret, 16) secret_padded_chunk = [ (threshold, num, secret_padded[i : i + 16]) for i in range(0, len(secret_padded), 16) ] - share_list: List[List[Tuple[int, bytes]]] = [[] for _ in range(num)] + share_list: list[list[tuple[int, bytes]]] = [[] for _ in range(num)] with ThreadPoolExecutor(max_workers=10) as executor: for chunk_shares in executor.map( @@ -43,22 +43,22 @@ def create_shares(secret: bytes, threshold: int, num: int) -> List[bytes]: return [pickle.dumps(shares) for shares in share_list] -def _shamir_split(threshold: int, num: int, chunk: bytes) -> List[Tuple[int, bytes]]: +def _shamir_split(threshold: int, num: int, chunk: bytes) -> list[tuple[int, bytes]]: return Shamir.split(threshold, num, chunk, ssss=False) # Reconstructing secret with PyCryptodome -def combine_shares(share_list: List[bytes]) -> bytes: +def combine_shares(share_list: list[bytes]) -> bytes: """Reconstruct secret from shares.""" - unpickled_share_list: List[List[Tuple[int, bytes]]] = [ - cast(List[Tuple[int, bytes]], pickle.loads(share)) for share in share_list + unpickled_share_list: list[list[tuple[int, bytes]]] = [ + cast(list[tuple[int, bytes]], pickle.loads(share)) for share in share_list ] chunk_num = len(unpickled_share_list[0]) secret_padded = bytearray(0) - chunk_shares_list: List[List[Tuple[int, bytes]]] = [] + chunk_shares_list: list[list[tuple[int, bytes]]] = [] for i in range(chunk_num): - chunk_shares: List[Tuple[int, bytes]] = [] + chunk_shares: list[tuple[int, bytes]] = [] for share in unpickled_share_list: chunk_shares.append(share[i]) chunk_shares_list.append(chunk_shares) @@ -71,5 +71,5 @@ def combine_shares(share_list: List[bytes]) -> bytes: return bytes(secret) -def _shamir_combine(shares: List[Tuple[int, bytes]]) -> bytes: +def _shamir_combine(shares: list[tuple[int, bytes]]) -> bytes: return Shamir.combine(shares, ssss=False) diff --git a/src/py/flwr/common/secure_aggregation/crypto/symmetric_encryption.py b/src/py/flwr/common/secure_aggregation/crypto/symmetric_encryption.py index 1d004a398ea8..f5c130fb2663 100644 --- a/src/py/flwr/common/secure_aggregation/crypto/symmetric_encryption.py +++ b/src/py/flwr/common/secure_aggregation/crypto/symmetric_encryption.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2023 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,7 +16,7 @@ import base64 -from typing import Tuple, cast +from typing import cast from cryptography.exceptions import InvalidSignature from cryptography.fernet import Fernet @@ -26,7 +26,7 @@ def generate_key_pairs() -> ( - Tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] + tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] ): """Generate private and public key pairs with Cryptography.""" private_key = ec.generate_private_key(ec.SECP384R1()) diff --git a/src/py/flwr/common/secure_aggregation/ndarrays_arithmetic.py b/src/py/flwr/common/secure_aggregation/ndarrays_arithmetic.py index e926a9531bea..3197fd852f3d 100644 --- a/src/py/flwr/common/secure_aggregation/ndarrays_arithmetic.py +++ b/src/py/flwr/common/secure_aggregation/ndarrays_arithmetic.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2023 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,51 +15,51 @@ """Utility functions for performing operations on Numpy NDArrays.""" -from typing import Any, List, Tuple, Union +from typing import Any, Union import numpy as np from numpy.typing import DTypeLike, NDArray -def factor_combine(factor: int, parameters: List[NDArray[Any]]) -> List[NDArray[Any]]: +def factor_combine(factor: int, parameters: list[NDArray[Any]]) -> list[NDArray[Any]]: """Combine factor with parameters.""" return [np.array([factor])] + parameters def factor_extract( - parameters: List[NDArray[Any]], -) -> Tuple[int, List[NDArray[Any]]]: + parameters: list[NDArray[Any]], +) -> tuple[int, list[NDArray[Any]]]: """Extract factor from parameters.""" return parameters[0][0], parameters[1:] -def get_parameters_shape(parameters: List[NDArray[Any]]) -> List[Tuple[int, ...]]: +def get_parameters_shape(parameters: list[NDArray[Any]]) -> list[tuple[int, ...]]: """Get dimensions of each NDArray in parameters.""" return [arr.shape for arr in parameters] def get_zero_parameters( - dimensions_list: List[Tuple[int, ...]], dtype: DTypeLike = np.int64 -) -> List[NDArray[Any]]: + dimensions_list: list[tuple[int, ...]], dtype: DTypeLike = np.int64 +) -> list[NDArray[Any]]: """Generate zero parameters based on the dimensions list.""" return [np.zeros(dimensions, dtype=dtype) for dimensions in dimensions_list] def parameters_addition( - parameters1: List[NDArray[Any]], parameters2: List[NDArray[Any]] -) -> List[NDArray[Any]]: + parameters1: list[NDArray[Any]], parameters2: list[NDArray[Any]] +) -> list[NDArray[Any]]: """Add two parameters.""" return [parameters1[idx] + parameters2[idx] for idx in range(len(parameters1))] def parameters_subtraction( - parameters1: List[NDArray[Any]], parameters2: List[NDArray[Any]] -) -> List[NDArray[Any]]: + parameters1: list[NDArray[Any]], parameters2: list[NDArray[Any]] +) -> list[NDArray[Any]]: """Subtract parameters from the other parameters.""" return [parameters1[idx] - parameters2[idx] for idx in range(len(parameters1))] -def parameters_mod(parameters: List[NDArray[Any]], divisor: int) -> List[NDArray[Any]]: +def parameters_mod(parameters: list[NDArray[Any]], divisor: int) -> list[NDArray[Any]]: """Take mod of parameters with an integer divisor.""" if bin(divisor).count("1") == 1: msk = divisor - 1 @@ -68,14 +68,14 @@ def parameters_mod(parameters: List[NDArray[Any]], divisor: int) -> List[NDArray def parameters_multiply( - parameters: List[NDArray[Any]], multiplier: Union[int, float] -) -> List[NDArray[Any]]: + parameters: list[NDArray[Any]], multiplier: Union[int, float] +) -> list[NDArray[Any]]: """Multiply parameters by an integer/float multiplier.""" return [parameters[idx] * multiplier for idx in range(len(parameters))] def parameters_divide( - parameters: List[NDArray[Any]], divisor: Union[int, float] -) -> List[NDArray[Any]]: + parameters: list[NDArray[Any]], divisor: Union[int, float] +) -> list[NDArray[Any]]: """Divide weight by an integer/float divisor.""" return [parameters[idx] / divisor for idx in range(len(parameters))] diff --git a/src/py/flwr/common/secure_aggregation/quantization.py b/src/py/flwr/common/secure_aggregation/quantization.py index 56c25e2bd59c..ab8521eed981 100644 --- a/src/py/flwr/common/secure_aggregation/quantization.py +++ b/src/py/flwr/common/secure_aggregation/quantization.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2023 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ """Utility functions for model quantization.""" -from typing import List, cast +from typing import cast import numpy as np @@ -30,10 +30,10 @@ def _stochastic_round(arr: NDArrayFloat) -> NDArrayInt: def quantize( - parameters: List[NDArrayFloat], clipping_range: float, target_range: int -) -> List[NDArrayInt]: + parameters: list[NDArrayFloat], clipping_range: float, target_range: int +) -> list[NDArrayInt]: """Quantize float Numpy arrays to integer Numpy arrays.""" - quantized_list: List[NDArrayInt] = [] + quantized_list: list[NDArrayInt] = [] quantizer = target_range / (2 * clipping_range) for arr in parameters: # Stochastic quantization @@ -49,12 +49,12 @@ def quantize( # Dequantize parameters to range [-clipping_range, clipping_range] def dequantize( - quantized_parameters: List[NDArrayInt], + quantized_parameters: list[NDArrayInt], clipping_range: float, target_range: int, -) -> List[NDArrayFloat]: +) -> list[NDArrayFloat]: """Dequantize integer Numpy arrays to float Numpy arrays.""" - reverse_quantized_list: List[NDArrayFloat] = [] + reverse_quantized_list: list[NDArrayFloat] = [] quantizer = (2 * clipping_range) / target_range shift = -clipping_range for arr in quantized_parameters: diff --git a/src/py/flwr/common/secure_aggregation/secaggplus_constants.py b/src/py/flwr/common/secure_aggregation/secaggplus_constants.py index 8a15908c13c5..545507eb44ed 100644 --- a/src/py/flwr/common/secure_aggregation/secaggplus_constants.py +++ b/src/py/flwr/common/secure_aggregation/secaggplus_constants.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2023 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/src/py/flwr/common/secure_aggregation/secaggplus_utils.py b/src/py/flwr/common/secure_aggregation/secaggplus_utils.py index c373573477b9..919894d5388f 100644 --- a/src/py/flwr/common/secure_aggregation/secaggplus_utils.py +++ b/src/py/flwr/common/secure_aggregation/secaggplus_utils.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2023 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,8 +15,6 @@ """Utility functions for the SecAgg/SecAgg+ protocol.""" -from typing import List, Tuple - import numpy as np from flwr.common.typing import NDArrayInt @@ -45,8 +43,8 @@ def share_keys_plaintext_concat( """ return b"".join( [ - int.to_bytes(src_node_id, 8, "little", signed=True), - int.to_bytes(dst_node_id, 8, "little", signed=True), + int.to_bytes(src_node_id, 8, "little", signed=False), + int.to_bytes(dst_node_id, 8, "little", signed=False), int.to_bytes(len(b_share), 4, "little"), b_share, sk_share, @@ -54,7 +52,7 @@ def share_keys_plaintext_concat( ) -def share_keys_plaintext_separate(plaintext: bytes) -> Tuple[int, int, bytes, bytes]: +def share_keys_plaintext_separate(plaintext: bytes) -> tuple[int, int, bytes, bytes]: """Retrieve arguments from bytes. Parameters @@ -74,8 +72,8 @@ def share_keys_plaintext_separate(plaintext: bytes) -> Tuple[int, int, bytes, by the secret key share of the source sent to the destination. """ src, dst, mark = ( - int.from_bytes(plaintext[:8], "little", signed=True), - int.from_bytes(plaintext[8:16], "little", signed=True), + int.from_bytes(plaintext[:8], "little", signed=False), + int.from_bytes(plaintext[8:16], "little", signed=False), int.from_bytes(plaintext[16:20], "little"), ) ret = (src, dst, plaintext[20 : 20 + mark], plaintext[20 + mark :]) @@ -83,8 +81,8 @@ def share_keys_plaintext_separate(plaintext: bytes) -> Tuple[int, int, bytes, by def pseudo_rand_gen( - seed: bytes, num_range: int, dimensions_list: List[Tuple[int, ...]] -) -> List[NDArrayInt]: + seed: bytes, num_range: int, dimensions_list: list[tuple[int, ...]] +) -> list[NDArrayInt]: """Seeded pseudo-random number generator for noise generation with Numpy.""" assert len(seed) & 0x3 == 0 seed32 = 0 diff --git a/src/py/flwr/common/serde.py b/src/py/flwr/common/serde.py index 84932b806aff..54790992b40d 100644 --- a/src/py/flwr/common/serde.py +++ b/src/py/flwr/common/serde.py @@ -15,12 +15,19 @@ """ProtoBuf serialization and deserialization.""" -from typing import Any, Dict, List, MutableMapping, OrderedDict, Type, TypeVar, cast +from collections import OrderedDict +from collections.abc import MutableMapping +from typing import Any, TypeVar, cast from google.protobuf.message import Message as GrpcMessage # pylint: disable=E0611 +from flwr.proto.clientappio_pb2 import ClientAppOutputCode, ClientAppOutputStatus from flwr.proto.error_pb2 import Error as ProtoError +from flwr.proto.fab_pb2 import Fab as ProtoFab +from flwr.proto.message_pb2 import Context as ProtoContext +from flwr.proto.message_pb2 import Message as ProtoMessage +from flwr.proto.message_pb2 import Metadata as ProtoMetadata from flwr.proto.node_pb2 import Node from flwr.proto.recordset_pb2 import Array as ProtoArray from flwr.proto.recordset_pb2 import BoolList, BytesList @@ -31,7 +38,8 @@ from flwr.proto.recordset_pb2 import MetricsRecordValue as ProtoMetricsRecordValue from flwr.proto.recordset_pb2 import ParametersRecord as ProtoParametersRecord from flwr.proto.recordset_pb2 import RecordSet as ProtoRecordSet -from flwr.proto.recordset_pb2 import Sint64List, StringList +from flwr.proto.recordset_pb2 import SintList, StringList, UintList +from flwr.proto.run_pb2 import Run as ProtoRun from flwr.proto.task_pb2 import Task, TaskIns, TaskRes from flwr.proto.transport_pb2 import ( ClientMessage, @@ -44,7 +52,15 @@ ) # pylint: enable=E0611 -from . import Array, ConfigsRecord, MetricsRecord, ParametersRecord, RecordSet, typing +from . import ( + Array, + ConfigsRecord, + Context, + MetricsRecord, + ParametersRecord, + RecordSet, + typing, +) from .message import Error, Message, Metadata from .record.typeddict import TypedDict @@ -58,7 +74,7 @@ def parameters_to_proto(parameters: typing.Parameters) -> Parameters: def parameters_from_proto(msg: Parameters) -> typing.Parameters: """Deserialize `Parameters` from ProtoBuf.""" - tensors: List[bytes] = list(msg.tensors) + tensors: list[bytes] = list(msg.tensors) return typing.Parameters(tensors=tensors, tensor_type=msg.tensor_type) @@ -324,6 +340,7 @@ def metrics_from_proto(proto: Any) -> typing.Metrics: # === Scalar messages === +INT64_MAX_VALUE = 9223372036854775807 # (1 << 63) - 1 def scalar_to_proto(scalar: typing.Scalar) -> Scalar: @@ -338,6 +355,9 @@ def scalar_to_proto(scalar: typing.Scalar) -> Scalar: return Scalar(double=scalar) if isinstance(scalar, int): + # Use uint64 for integers larger than the maximum value of sint64 + if scalar > INT64_MAX_VALUE: + return Scalar(uint64=scalar) return Scalar(sint64=scalar) if isinstance(scalar, str): @@ -358,16 +378,16 @@ def scalar_from_proto(scalar_msg: Scalar) -> typing.Scalar: # === Record messages === -_type_to_field = { +_type_to_field: dict[type, str] = { float: "double", int: "sint64", bool: "bool", str: "string", bytes: "bytes", } -_list_type_to_class_and_field = { +_list_type_to_class_and_field: dict[type, tuple[type[GrpcMessage], str]] = { float: (DoubleList, "double_list"), - int: (Sint64List, "sint64_list"), + int: (SintList, "sint_list"), bool: (BoolList, "bool_list"), str: (StringList, "string_list"), bytes: (BytesList, "bytes_list"), @@ -375,8 +395,13 @@ def scalar_from_proto(scalar_msg: Scalar) -> typing.Scalar: T = TypeVar("T") +def _is_uint64(value: Any) -> bool: + """Check if a value is uint64.""" + return isinstance(value, int) and value > INT64_MAX_VALUE + + def _record_value_to_proto( - value: Any, allowed_types: List[type], proto_class: Type[T] + value: Any, allowed_types: list[type], proto_class: type[T] ) -> T: """Serialize `*RecordValue` to ProtoBuf. @@ -387,12 +412,18 @@ def _record_value_to_proto( # Single element # Note: `isinstance(False, int) == True`. if isinstance(value, t): - arg[_type_to_field[t]] = value + fld = _type_to_field[t] + if t is int and _is_uint64(value): + fld = "uint64" + arg[fld] = value return proto_class(**arg) # List if isinstance(value, list) and all(isinstance(item, t) for item in value): - list_class, field_name = _list_type_to_class_and_field[t] - arg[field_name] = list_class(vals=value) + list_class, fld = _list_type_to_class_and_field[t] + # Use UintList if any element is of type `uint64`. + if t is int and any(_is_uint64(v) for v in value): + list_class, fld = UintList, "uint_list" + arg[fld] = list_class(vals=value) return proto_class(**arg) # Invalid types raise TypeError( @@ -413,9 +444,9 @@ def _record_value_from_proto(value_proto: GrpcMessage) -> Any: def _record_value_dict_to_proto( value_dict: TypedDict[str, Any], - allowed_types: List[type], - value_proto_class: Type[T], -) -> Dict[str, T]: + allowed_types: list[type], + value_proto_class: type[T], +) -> dict[str, T]: """Serialize the record value dict to ProtoBuf. Note: `bool` MUST be put in the front of allowd_types if it exists. @@ -433,7 +464,7 @@ def proto(_v: Any) -> T: def _record_value_dict_from_proto( value_dict_proto: MutableMapping[str, Any] -) -> Dict[str, Any]: +) -> dict[str, Any]: """Deserialize the record value dict from ProtoBuf.""" return {k: _record_value_from_proto(v) for k, v in value_dict_proto.items()} @@ -484,7 +515,7 @@ def metrics_record_from_proto(record_proto: ProtoMetricsRecord) -> MetricsRecord """Deserialize MetricsRecord from ProtoBuf.""" return MetricsRecord( metrics_dict=cast( - Dict[str, typing.MetricsRecordValues], + dict[str, typing.MetricsRecordValues], _record_value_dict_from_proto(record_proto.data), ), keep_input=False, @@ -506,7 +537,7 @@ def configs_record_from_proto(record_proto: ProtoConfigsRecord) -> ConfigsRecord """Deserialize ConfigsRecord from ProtoBuf.""" return ConfigsRecord( configs_dict=cast( - Dict[str, typing.ConfigsRecordValues], + dict[str, typing.ConfigsRecordValues], _record_value_dict_from_proto(record_proto.data), ), keep_input=False, @@ -671,3 +702,211 @@ def message_from_taskres(taskres: TaskRes) -> Message: ) message.metadata.created_at = taskres.task.created_at return message + + +# === FAB === + + +def fab_to_proto(fab: typing.Fab) -> ProtoFab: + """Create a proto Fab object from a Python Fab.""" + return ProtoFab(hash_str=fab.hash_str, content=fab.content) + + +def fab_from_proto(fab: ProtoFab) -> typing.Fab: + """Create a Python Fab object from a proto Fab.""" + return typing.Fab(fab.hash_str, fab.content) + + +# === User configs === + + +def user_config_to_proto(user_config: typing.UserConfig) -> Any: + """Serialize `UserConfig` to ProtoBuf.""" + proto = {} + for key, value in user_config.items(): + proto[key] = user_config_value_to_proto(value) + return proto + + +def user_config_from_proto(proto: Any) -> typing.UserConfig: + """Deserialize `UserConfig` from ProtoBuf.""" + metrics = {} + for key, value in proto.items(): + metrics[key] = user_config_value_from_proto(value) + return metrics + + +def user_config_value_to_proto(user_config_value: typing.UserConfigValue) -> Scalar: + """Serialize `UserConfigValue` to ProtoBuf.""" + if isinstance(user_config_value, bool): + return Scalar(bool=user_config_value) + + if isinstance(user_config_value, float): + return Scalar(double=user_config_value) + + if isinstance(user_config_value, int): + return Scalar(sint64=user_config_value) + + if isinstance(user_config_value, str): + return Scalar(string=user_config_value) + + raise ValueError( + f"Accepted types: {bool, float, int, str} (but not {type(user_config_value)})" + ) + + +def user_config_value_from_proto(scalar_msg: Scalar) -> typing.UserConfigValue: + """Deserialize `UserConfigValue` from ProtoBuf.""" + scalar_field = scalar_msg.WhichOneof("scalar") + scalar = getattr(scalar_msg, cast(str, scalar_field)) + return cast(typing.UserConfigValue, scalar) + + +# === Metadata messages === + + +def metadata_to_proto(metadata: Metadata) -> ProtoMetadata: + """Serialize `Metadata` to ProtoBuf.""" + proto = ProtoMetadata( # pylint: disable=E1101 + run_id=metadata.run_id, + message_id=metadata.message_id, + src_node_id=metadata.src_node_id, + dst_node_id=metadata.dst_node_id, + reply_to_message=metadata.reply_to_message, + group_id=metadata.group_id, + ttl=metadata.ttl, + message_type=metadata.message_type, + created_at=metadata.created_at, + ) + return proto + + +def metadata_from_proto(metadata_proto: ProtoMetadata) -> Metadata: + """Deserialize `Metadata` from ProtoBuf.""" + metadata = Metadata( + run_id=metadata_proto.run_id, + message_id=metadata_proto.message_id, + src_node_id=metadata_proto.src_node_id, + dst_node_id=metadata_proto.dst_node_id, + reply_to_message=metadata_proto.reply_to_message, + group_id=metadata_proto.group_id, + ttl=metadata_proto.ttl, + message_type=metadata_proto.message_type, + ) + return metadata + + +# === Message messages === + + +def message_to_proto(message: Message) -> ProtoMessage: + """Serialize `Message` to ProtoBuf.""" + proto = ProtoMessage( + metadata=metadata_to_proto(message.metadata), + content=( + recordset_to_proto(message.content) if message.has_content() else None + ), + error=error_to_proto(message.error) if message.has_error() else None, + ) + return proto + + +def message_from_proto(message_proto: ProtoMessage) -> Message: + """Deserialize `Message` from ProtoBuf.""" + created_at = message_proto.metadata.created_at + message = Message( + metadata=metadata_from_proto(message_proto.metadata), + content=( + recordset_from_proto(message_proto.content) + if message_proto.HasField("content") + else None + ), + error=( + error_from_proto(message_proto.error) + if message_proto.HasField("error") + else None + ), + ) + # `.created_at` is set upon Message object construction + # we need to manually set it to the original value + message.metadata.created_at = created_at + return message + + +# === Context messages === + + +def context_to_proto(context: Context) -> ProtoContext: + """Serialize `Context` to ProtoBuf.""" + proto = ProtoContext( + node_id=context.node_id, + node_config=user_config_to_proto(context.node_config), + state=recordset_to_proto(context.state), + run_config=user_config_to_proto(context.run_config), + ) + return proto + + +def context_from_proto(context_proto: ProtoContext) -> Context: + """Deserialize `Context` from ProtoBuf.""" + context = Context( + node_id=context_proto.node_id, + node_config=user_config_from_proto(context_proto.node_config), + state=recordset_from_proto(context_proto.state), + run_config=user_config_from_proto(context_proto.run_config), + ) + return context + + +# === Run messages === + + +def run_to_proto(run: typing.Run) -> ProtoRun: + """Serialize `Run` to ProtoBuf.""" + proto = ProtoRun( + run_id=run.run_id, + fab_id=run.fab_id, + fab_version=run.fab_version, + fab_hash=run.fab_hash, + override_config=user_config_to_proto(run.override_config), + ) + return proto + + +def run_from_proto(run_proto: ProtoRun) -> typing.Run: + """Deserialize `Run` from ProtoBuf.""" + run = typing.Run( + run_id=run_proto.run_id, + fab_id=run_proto.fab_id, + fab_version=run_proto.fab_version, + fab_hash=run_proto.fab_hash, + override_config=user_config_from_proto(run_proto.override_config), + ) + return run + + +# === ClientApp status messages === + + +def clientappstatus_to_proto( + status: typing.ClientAppOutputStatus, +) -> ClientAppOutputStatus: + """Serialize `ClientAppOutputStatus` to ProtoBuf.""" + code = ClientAppOutputCode.SUCCESS + if status.code == typing.ClientAppOutputCode.DEADLINE_EXCEEDED: + code = ClientAppOutputCode.DEADLINE_EXCEEDED + if status.code == typing.ClientAppOutputCode.UNKNOWN_ERROR: + code = ClientAppOutputCode.UNKNOWN_ERROR + return ClientAppOutputStatus(code=code, message=status.message) + + +def clientappstatus_from_proto( + msg: ClientAppOutputStatus, +) -> typing.ClientAppOutputStatus: + """Deserialize `ClientAppOutputStatus` from ProtoBuf.""" + code = typing.ClientAppOutputCode.SUCCESS + if msg.code == ClientAppOutputCode.DEADLINE_EXCEEDED: + code = typing.ClientAppOutputCode.DEADLINE_EXCEEDED + if msg.code == ClientAppOutputCode.UNKNOWN_ERROR: + code = typing.ClientAppOutputCode.UNKNOWN_ERROR + return typing.ClientAppOutputStatus(code=code, message=msg.message) diff --git a/src/py/flwr/common/serde_test.py b/src/py/flwr/common/serde_test.py index f9969426fc36..19e9889158a0 100644 --- a/src/py/flwr/common/serde_test.py +++ b/src/py/flwr/common/serde_test.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2021 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,28 +16,50 @@ import random import string -from typing import Any, Callable, Optional, OrderedDict, Type, TypeVar, Union, cast +from collections import OrderedDict +from typing import Any, Callable, Optional, TypeVar, Union, cast import pytest # pylint: disable=E0611 +from flwr.proto import clientappio_pb2 from flwr.proto import transport_pb2 as pb2 +from flwr.proto.fab_pb2 import Fab as ProtoFab +from flwr.proto.message_pb2 import Context as ProtoContext +from flwr.proto.message_pb2 import Message as ProtoMessage from flwr.proto.recordset_pb2 import Array as ProtoArray from flwr.proto.recordset_pb2 import ConfigsRecord as ProtoConfigsRecord from flwr.proto.recordset_pb2 import MetricsRecord as ProtoMetricsRecord from flwr.proto.recordset_pb2 import ParametersRecord as ProtoParametersRecord from flwr.proto.recordset_pb2 import RecordSet as ProtoRecordSet +from flwr.proto.run_pb2 import Run as ProtoRun # pylint: enable=E0611 -from . import Array, ConfigsRecord, MetricsRecord, ParametersRecord, RecordSet, typing +from . import ( + Array, + ConfigsRecord, + Context, + MetricsRecord, + ParametersRecord, + RecordSet, + typing, +) from .message import Error, Message, Metadata from .serde import ( array_from_proto, array_to_proto, + clientappstatus_from_proto, + clientappstatus_to_proto, configs_record_from_proto, configs_record_to_proto, + context_from_proto, + context_to_proto, + fab_from_proto, + fab_to_proto, + message_from_proto, message_from_taskins, message_from_taskres, + message_to_proto, message_to_taskins, message_to_taskres, metrics_record_from_proto, @@ -46,6 +68,8 @@ parameters_record_to_proto, recordset_from_proto, recordset_to_proto, + run_from_proto, + run_to_proto, scalar_from_proto, scalar_to_proto, status_from_proto, @@ -56,7 +80,7 @@ def test_serialisation_deserialisation() -> None: """Test if the np.ndarray is identical after (de-)serialization.""" # Prepare - scalars = [True, b"bytestr", 3.14, 9000, "Hello"] + scalars = [True, b"bytestr", 3.14, 9000, "Hello", (1 << 63) + 1] for scalar in scalars: # Execute @@ -100,6 +124,30 @@ def test_status_from_proto() -> None: assert actual_status == status +def test_fab_to_proto() -> None: + """Test Fab serialization.""" + proto_fab = ProtoFab(hash_str="fab_test_hash", content=b"fab_test_content") + + py_fab = typing.Fab(hash_str="fab_test_hash", content=b"fab_test_content") + + converted_fab = fab_to_proto(py_fab) + + # Assert + assert converted_fab == proto_fab + + +def test_fab_from_proto() -> None: + """Test Fab deserialization.""" + proto_fab = ProtoFab(hash_str="fab_test_hash", content=b"fab_test_content") + + py_fab = typing.Fab(hash_str="fab_test_hash", content=b"fab_test_content") + + converted_fab = fab_from_proto(proto_fab) + + # Assert + assert converted_fab == py_fab + + T = TypeVar("T") @@ -122,7 +170,7 @@ def get_str(self, length: Optional[int] = None) -> str: length = self.rng.randint(1, 10) return "".join(self.rng.choices(char_pool, k=length)) - def get_value(self, dtype: Type[T]) -> T: + def get_value(self, dtype: Union[type[T], str]) -> T: """Create a value of a given type.""" ret: Any = None if dtype == bool: @@ -130,11 +178,13 @@ def get_value(self, dtype: Type[T]) -> T: elif dtype == str: ret = self.get_str(self.rng.randint(10, 100)) elif dtype == int: - ret = self.rng.randint(-1 << 30, 1 << 30) + ret = self.rng.randint(-1 << 63, (1 << 63) - 1) elif dtype == float: ret = (self.rng.random() - 0.5) * (2.0 ** self.rng.randint(0, 50)) elif dtype == bytes: ret = self.randbytes(self.rng.randint(10, 100)) + elif dtype == "uint": + ret = self.rng.randint(0, (1 << 64) - 1) else: raise NotImplementedError(f"Unsupported dtype: {dtype}") return cast(T, ret) @@ -223,6 +273,15 @@ def metadata(self) -> Metadata: message_type=self.get_str(10), ) + def user_config(self) -> typing.UserConfig: + """Create a UserConfig.""" + return { + "key1": self.rng.randint(0, 1 << 30), + "key2": self.get_str(10), + "key3": self.rng.random(), + "key4": bool(self.rng.getrandbits(1)), + } + def test_array_serialization_deserialization() -> None: """Test serialization and deserialization of Array.""" @@ -259,6 +318,8 @@ def test_metrics_record_serialization_deserialization() -> None: # Prepare maker = RecordMaker() original = maker.metrics_record() + original["uint64"] = (1 << 63) + 321 + original["list of uint64"] = [maker.get_value("uint") for _ in range(30)] # Execute proto = metrics_record_to_proto(original) @@ -274,6 +335,8 @@ def test_configs_record_serialization_deserialization() -> None: # Prepare maker = RecordMaker() original = maker.configs_record() + original["uint64"] = (1 << 63) + 101 + original["list of uint64"] = [maker.get_value("uint") for _ in range(100)] # Execute proto = configs_record_to_proto(original) @@ -387,3 +450,124 @@ def test_message_to_and_from_taskres( if original.has_error(): assert original.error == deserialized.error assert metadata == deserialized.metadata + + +@pytest.mark.parametrize( + "content_fn, error_fn", + [ + ( + lambda maker: maker.recordset(1, 1, 1), + None, + ), # check when only content is set + (None, lambda code: Error(code=code)), # check when only error is set + ], +) +def test_message_serialization_deserialization( + content_fn: Callable[ + [ + RecordMaker, + ], + RecordSet, + ], + error_fn: Callable[[int], Error], +) -> None: + """Test serialization and deserialization of Message.""" + # Prepare + maker = RecordMaker(state=2) + metadata = maker.metadata() + metadata.dst_node_id = 0 # Assume driver node + + original = Message( + metadata=metadata, + content=None if content_fn is None else content_fn(maker), + error=None if error_fn is None else error_fn(0), + ) + + # Execute + proto = message_to_proto(original) + deserialized = message_from_proto(proto) + + # Assert + assert isinstance(proto, ProtoMessage) + + if original.has_content(): + assert original.content == deserialized.content + if original.has_error(): + assert original.error == deserialized.error + + assert original.metadata == deserialized.metadata + + +def test_context_serialization_deserialization() -> None: + """Test serialization and deserialization of Context.""" + # Prepare + maker = RecordMaker() + original = Context( + node_id=1, + node_config=maker.user_config(), + state=maker.recordset(1, 1, 1), + run_config=maker.user_config(), + ) + + # Execute + proto = context_to_proto(original) + deserialized = context_from_proto(proto) + + # Assert + assert isinstance(proto, ProtoContext) + assert original == deserialized + + +def test_run_serialization_deserialization() -> None: + """Test serialization and deserialization of Run.""" + # Prepare + maker = RecordMaker() + original = typing.Run( + run_id=1, + fab_id="lorem", + fab_version="ipsum", + fab_hash="hash", + override_config=maker.user_config(), + ) + + # Execute + proto = run_to_proto(original) + deserialized = run_from_proto(proto) + + # Assert + assert isinstance(proto, ProtoRun) + assert original == deserialized + + +def test_clientappstatus_to_proto() -> None: + """Test ClientApp status message (de-)serialization.""" + # Prepare + # pylint: disable=E1101 + code_msg = clientappio_pb2.ClientAppOutputCode.SUCCESS + status_msg = clientappio_pb2.ClientAppOutputStatus(code=code_msg, message="Success") + + code = typing.ClientAppOutputCode.SUCCESS + status = typing.ClientAppOutputStatus(code=code, message="Success") + + # Execute + actual_status_msg = clientappstatus_to_proto(status=status) + + # Assert + assert actual_status_msg == status_msg + + +def test_clientappstatus_from_proto() -> None: + """Test ClientApp status message (de-)serialization.""" + # Prepare + # pylint: disable=E1101 + code_msg = clientappio_pb2.ClientAppOutputCode.SUCCESS + status_msg = clientappio_pb2.ClientAppOutputStatus(code=code_msg, message="Success") + + code = typing.ClientAppOutputCode.SUCCESS + status = typing.ClientAppOutputStatus(code=code, message="Success") + + # Execute + actual_status = clientappstatus_from_proto(msg=status_msg) + + # Assert + assert actual_status == status diff --git a/src/py/flwr/common/telemetry.py b/src/py/flwr/common/telemetry.py index 41fe1508e652..724f36d2b98f 100644 --- a/src/py/flwr/common/telemetry.py +++ b/src/py/flwr/common/telemetry.py @@ -25,7 +25,7 @@ from concurrent.futures import Future, ThreadPoolExecutor from enum import Enum, auto from pathlib import Path -from typing import Any, Dict, List, Optional, Union, cast +from typing import Any, Optional, Union, cast from flwr.common.version import package_name, package_version @@ -64,6 +64,18 @@ def _get_home() -> Path: return Path().home() +def _get_partner_id() -> str: + """Get partner ID.""" + partner_id = os.getenv("FLWR_TELEMETRY_PARTNER_ID") + if not partner_id: + return "unavailable" + try: + uuid.UUID(partner_id) + except ValueError: + partner_id = "invalid" + return partner_id + + def _get_source_id() -> str: """Get existing or new source ID.""" source_id = "unavailable" @@ -114,71 +126,82 @@ class EventType(str, Enum): # The type signature is not compatible with mypy, pylint and flake8 # so each of those needs to be disabled for this line. # pylint: disable-next=no-self-argument,arguments-differ,line-too-long - def _generate_next_value_(name: str, start: int, count: int, last_values: List[Any]) -> Any: # type: ignore # noqa: E501 + def _generate_next_value_(name: str, start: int, count: int, last_values: list[Any]) -> Any: # type: ignore # noqa: E501 return name # Ping PING = auto() - # Client: start_client + # --- LEGACY FUNCTIONS ------------------------------------------------------------- + + # Legacy: `start_client` function START_CLIENT_ENTER = auto() START_CLIENT_LEAVE = auto() - # Server: start_server + # Legacy: `start_server` function START_SERVER_ENTER = auto() START_SERVER_LEAVE = auto() - # Driver API - RUN_DRIVER_API_ENTER = auto() - RUN_DRIVER_API_LEAVE = auto() + # Legacy: `start_simulation` function + START_SIMULATION_ENTER = auto() + START_SIMULATION_LEAVE = auto() - # Fleet API - RUN_FLEET_API_ENTER = auto() - RUN_FLEET_API_LEAVE = auto() + # --- `flwr` CLI ------------------------------------------------------------------- - # Driver API and Fleet API - RUN_SUPERLINK_ENTER = auto() - RUN_SUPERLINK_LEAVE = auto() + # Not yet implemented - # Simulation - START_SIMULATION_ENTER = auto() - START_SIMULATION_LEAVE = auto() + # --- SuperExec -------------------------------------------------------------------- - # Driver: Driver - DRIVER_CONNECT = auto() - DRIVER_DISCONNECT = auto() + # SuperExec + RUN_SUPEREXEC_ENTER = auto() + RUN_SUPEREXEC_LEAVE = auto() - # Driver: start_driver - START_DRIVER_ENTER = auto() - START_DRIVER_LEAVE = auto() + # --- Simulation Engine ------------------------------------------------------------ - # flower-client-app - RUN_CLIENT_APP_ENTER = auto() - RUN_CLIENT_APP_LEAVE = auto() + # CLI: flower-simulation + CLI_FLOWER_SIMULATION_ENTER = auto() + CLI_FLOWER_SIMULATION_LEAVE = auto() - # flower-server-app - RUN_SERVER_APP_ENTER = auto() - RUN_SERVER_APP_LEAVE = auto() + # Python API: `run_simulation` + PYTHON_API_RUN_SIMULATION_ENTER = auto() + PYTHON_API_RUN_SIMULATION_LEAVE = auto() + + # --- Deployment Engine ------------------------------------------------------------ - # SuperNode + # CLI: `flower-superlink` + RUN_SUPERLINK_ENTER = auto() + RUN_SUPERLINK_LEAVE = auto() + + # CLI: `flower-supernode` RUN_SUPERNODE_ENTER = auto() RUN_SUPERNODE_LEAVE = auto() + # CLI: `flower-server-app` + RUN_SERVER_APP_ENTER = auto() + RUN_SERVER_APP_LEAVE = auto() + + # --- DEPRECATED ------------------------------------------------------------------- + + # [DEPRECATED] CLI: `flower-client-app` + RUN_CLIENT_APP_ENTER = auto() + RUN_CLIENT_APP_LEAVE = auto() + # Use the ThreadPoolExecutor with max_workers=1 to have a queue # and also ensure that telemetry calls are not blocking. -state: Dict[str, Union[Optional[str], Optional[ThreadPoolExecutor]]] = { +state: dict[str, Union[Optional[str], Optional[ThreadPoolExecutor]]] = { # Will be assigned ThreadPoolExecutor(max_workers=1) # in event() the first time it's required "executor": None, "source": None, "cluster": None, + "partner": None, } def event( event_type: EventType, - event_details: Optional[Dict[str, Any]] = None, + event_details: Optional[dict[str, Any]] = None, ) -> Future: # type: ignore """Submit create_event to ThreadPoolExecutor to avoid blocking.""" if state["executor"] is None: @@ -190,7 +213,7 @@ def event( return result -def create_event(event_type: EventType, event_details: Optional[Dict[str, Any]]) -> str: +def create_event(event_type: EventType, event_details: Optional[dict[str, Any]]) -> str: """Create telemetry event.""" if state["source"] is None: state["source"] = _get_source_id() @@ -198,11 +221,15 @@ def create_event(event_type: EventType, event_details: Optional[Dict[str, Any]]) if state["cluster"] is None: state["cluster"] = str(uuid.uuid4()) + if state["partner"] is None: + state["partner"] = _get_partner_id() + if event_details is None: event_details = {} date = datetime.datetime.now(tz=datetime.timezone.utc).isoformat() context = { + "partner": state["partner"], "source": state["source"], "cluster": state["cluster"], "date": date, diff --git a/src/py/flwr/common/telemetry_test.py b/src/py/flwr/common/telemetry_test.py index a5eea48443b5..f1ad635773a7 100644 --- a/src/py/flwr/common/telemetry_test.py +++ b/src/py/flwr/common/telemetry_test.py @@ -15,12 +15,14 @@ """Telemetry tests.""" +import os import time import unittest from typing import Callable from unittest import mock +from uuid import uuid4 -from flwr.common.telemetry import EventType, _get_source_id, event +from flwr.common.telemetry import EventType, _get_partner_id, _get_source_id, event class TelemetryTest(unittest.TestCase): @@ -109,3 +111,39 @@ def _new_failing_get_home() -> None: # Assert self.assertEqual(source_id, except_value) + + def test_get_partner_id(self) -> None: + """Test if _get_partner_id returns an ID successfully.""" + # Prepare + generated_id = str(uuid4()) + os.environ["FLWR_TELEMETRY_PARTNER_ID"] = generated_id + + # Execute + partner_id = _get_partner_id() + + # Assert + self.assertEqual(partner_id, generated_id) + + def test_get_partner_id_no_env(self) -> None: + """Test if _get_partner_id returns unavailable without an env variable.""" + # Prepare + os.environ["FLWR_TELEMETRY_PARTNER_ID"] = "" + expected_value = "unavailable" + + # Execute + partner_id = _get_partner_id() + + # Assert + self.assertEqual(partner_id, expected_value) + + def test_get_partner_id_invalid(self) -> None: + """Test if _get_partner_id returns invalid with an incorrect env variable.""" + # Prepare + os.environ["FLWR_TELEMETRY_PARTNER_ID"] = "not a valid ID" + expected_value = "invalid" + + # Execute + partner_id = _get_partner_id() + + # Assert + self.assertEqual(partner_id, expected_value) diff --git a/src/py/flwr/common/typing.py b/src/py/flwr/common/typing.py index d6b2ec9b158c..081a957f28ff 100644 --- a/src/py/flwr/common/typing.py +++ b/src/py/flwr/common/typing.py @@ -17,7 +17,7 @@ from dataclasses import dataclass from enum import Enum -from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from typing import Any, Callable, Optional, Union import numpy as np import numpy.typing as npt @@ -25,7 +25,7 @@ NDArray = npt.NDArray[Any] NDArrayInt = npt.NDArray[np.int_] NDArrayFloat = npt.NDArray[np.float_] -NDArrays = List[NDArray] +NDArrays = list[NDArray] # The following union type contains Python types corresponding to ProtoBuf types that # ProtoBuf considers to be "Scalar Value Types", even though some of them arguably do @@ -38,27 +38,31 @@ float, int, str, - List[bool], - List[bytes], - List[float], - List[int], - List[str], + list[bool], + list[bytes], + list[float], + list[int], + list[str], ] # Value types for common.MetricsRecord MetricsScalar = Union[int, float] -MetricsScalarList = Union[List[int], List[float]] +MetricsScalarList = Union[list[int], list[float]] MetricsRecordValues = Union[MetricsScalar, MetricsScalarList] # Value types for common.ConfigsRecord ConfigsScalar = Union[MetricsScalar, str, bytes, bool] -ConfigsScalarList = Union[MetricsScalarList, List[str], List[bytes], List[bool]] +ConfigsScalarList = Union[MetricsScalarList, list[str], list[bytes], list[bool]] ConfigsRecordValues = Union[ConfigsScalar, ConfigsScalarList] -Metrics = Dict[str, Scalar] -MetricsAggregationFn = Callable[[List[Tuple[int, Metrics]]], Metrics] +Metrics = dict[str, Scalar] +MetricsAggregationFn = Callable[[list[tuple[int, Metrics]]], Metrics] -Config = Dict[str, Scalar] -Properties = Dict[str, Scalar] +Config = dict[str, Scalar] +Properties = dict[str, Scalar] + +# Value type for user configs +UserConfigValue = Union[bool, float, int, str] +UserConfig = dict[str, UserConfigValue] class Code(Enum): @@ -79,11 +83,27 @@ class Status: message: str +class ClientAppOutputCode(Enum): + """ClientAppIO status codes.""" + + SUCCESS = 0 + DEADLINE_EXCEEDED = 1 + UNKNOWN_ERROR = 2 + + +@dataclass +class ClientAppOutputStatus: + """ClientAppIO status.""" + + code: ClientAppOutputCode + message: str + + @dataclass class Parameters: """Model parameters.""" - tensors: List[bytes] + tensors: list[bytes] tensor_type: str @@ -107,7 +127,7 @@ class FitIns: """Fit instructions for a client.""" parameters: Parameters - config: Dict[str, Scalar] + config: dict[str, Scalar] @dataclass @@ -117,7 +137,7 @@ class FitRes: status: Status parameters: Parameters num_examples: int - metrics: Dict[str, Scalar] + metrics: dict[str, Scalar] @dataclass @@ -125,7 +145,7 @@ class EvaluateIns: """Evaluate instructions for a client.""" parameters: Parameters - config: Dict[str, Scalar] + config: dict[str, Scalar] @dataclass @@ -135,7 +155,7 @@ class EvaluateRes: status: Status loss: float num_examples: int - metrics: Dict[str, Scalar] + metrics: dict[str, Scalar] @dataclass @@ -185,3 +205,22 @@ class ClientMessage: get_parameters_res: Optional[GetParametersRes] = None fit_res: Optional[FitRes] = None evaluate_res: Optional[EvaluateRes] = None + + +@dataclass +class Run: + """Run details.""" + + run_id: int + fab_id: str + fab_version: str + fab_hash: str + override_config: UserConfig + + +@dataclass +class Fab: + """Fab file representation.""" + + hash_str: str + content: bytes diff --git a/src/py/flwr/common/version.py b/src/py/flwr/common/version.py index 6808c66606b1..141c16ac9367 100644 --- a/src/py/flwr/common/version.py +++ b/src/py/flwr/common/version.py @@ -1,15 +1,28 @@ +# Copyright 2023 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== """Flower package version helper.""" import importlib.metadata as importlib_metadata -from typing import Tuple -def _check_package(name: str) -> Tuple[str, str]: +def _check_package(name: str) -> tuple[str, str]: version: str = importlib_metadata.version(name) return name, version -def _version() -> Tuple[str, str]: +def _version() -> tuple[str, str]: """Read and return Flower package name and version. Returns diff --git a/src/py/flwr/proto/clientappio_pb2.py b/src/py/flwr/proto/clientappio_pb2.py new file mode 100644 index 000000000000..3fdc9f8a6ece --- /dev/null +++ b/src/py/flwr/proto/clientappio_pb2.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: flwr/proto/clientappio.proto +# Protobuf Python Version: 4.25.0 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from flwr.proto import fab_pb2 as flwr_dot_proto_dot_fab__pb2 +from flwr.proto import run_pb2 as flwr_dot_proto_dot_run__pb2 +from flwr.proto import message_pb2 as flwr_dot_proto_dot_message__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1c\x66lwr/proto/clientappio.proto\x12\nflwr.proto\x1a\x14\x66lwr/proto/fab.proto\x1a\x14\x66lwr/proto/run.proto\x1a\x18\x66lwr/proto/message.proto\"W\n\x15\x43lientAppOutputStatus\x12-\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x1f.flwr.proto.ClientAppOutputCode\x12\x0f\n\x07message\x18\x02 \x01(\t\"\x11\n\x0fGetTokenRequest\"!\n\x10GetTokenResponse\x12\r\n\x05token\x18\x01 \x01(\x04\"+\n\x1aPullClientAppInputsRequest\x12\r\n\x05token\x18\x01 \x01(\x04\"\xa5\x01\n\x1bPullClientAppInputsResponse\x12$\n\x07message\x18\x01 \x01(\x0b\x32\x13.flwr.proto.Message\x12$\n\x07\x63ontext\x18\x02 \x01(\x0b\x32\x13.flwr.proto.Context\x12\x1c\n\x03run\x18\x03 \x01(\x0b\x32\x0f.flwr.proto.Run\x12\x1c\n\x03\x66\x61\x62\x18\x04 \x01(\x0b\x32\x0f.flwr.proto.Fab\"x\n\x1bPushClientAppOutputsRequest\x12\r\n\x05token\x18\x01 \x01(\x04\x12$\n\x07message\x18\x02 \x01(\x0b\x32\x13.flwr.proto.Message\x12$\n\x07\x63ontext\x18\x03 \x01(\x0b\x32\x13.flwr.proto.Context\"Q\n\x1cPushClientAppOutputsResponse\x12\x31\n\x06status\x18\x01 \x01(\x0b\x32!.flwr.proto.ClientAppOutputStatus*L\n\x13\x43lientAppOutputCode\x12\x0b\n\x07SUCCESS\x10\x00\x12\x15\n\x11\x44\x45\x41\x44LINE_EXCEEDED\x10\x01\x12\x11\n\rUNKNOWN_ERROR\x10\x02\x32\xad\x02\n\x0b\x43lientAppIo\x12G\n\x08GetToken\x12\x1b.flwr.proto.GetTokenRequest\x1a\x1c.flwr.proto.GetTokenResponse\"\x00\x12h\n\x13PullClientAppInputs\x12&.flwr.proto.PullClientAppInputsRequest\x1a\'.flwr.proto.PullClientAppInputsResponse\"\x00\x12k\n\x14PushClientAppOutputs\x12\'.flwr.proto.PushClientAppOutputsRequest\x1a(.flwr.proto.PushClientAppOutputsResponse\"\x00\x62\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flwr.proto.clientappio_pb2', _globals) +if _descriptor._USE_C_DESCRIPTORS == False: + DESCRIPTOR._options = None + _globals['_CLIENTAPPOUTPUTCODE']._serialized_start=675 + _globals['_CLIENTAPPOUTPUTCODE']._serialized_end=751 + _globals['_CLIENTAPPOUTPUTSTATUS']._serialized_start=114 + _globals['_CLIENTAPPOUTPUTSTATUS']._serialized_end=201 + _globals['_GETTOKENREQUEST']._serialized_start=203 + _globals['_GETTOKENREQUEST']._serialized_end=220 + _globals['_GETTOKENRESPONSE']._serialized_start=222 + _globals['_GETTOKENRESPONSE']._serialized_end=255 + _globals['_PULLCLIENTAPPINPUTSREQUEST']._serialized_start=257 + _globals['_PULLCLIENTAPPINPUTSREQUEST']._serialized_end=300 + _globals['_PULLCLIENTAPPINPUTSRESPONSE']._serialized_start=303 + _globals['_PULLCLIENTAPPINPUTSRESPONSE']._serialized_end=468 + _globals['_PUSHCLIENTAPPOUTPUTSREQUEST']._serialized_start=470 + _globals['_PUSHCLIENTAPPOUTPUTSREQUEST']._serialized_end=590 + _globals['_PUSHCLIENTAPPOUTPUTSRESPONSE']._serialized_start=592 + _globals['_PUSHCLIENTAPPOUTPUTSRESPONSE']._serialized_end=673 + _globals['_CLIENTAPPIO']._serialized_start=754 + _globals['_CLIENTAPPIO']._serialized_end=1055 +# @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/clientappio_pb2.pyi b/src/py/flwr/proto/clientappio_pb2.pyi new file mode 100644 index 000000000000..53d376d58101 --- /dev/null +++ b/src/py/flwr/proto/clientappio_pb2.pyi @@ -0,0 +1,132 @@ +""" +@generated by mypy-protobuf. Do not edit manually! +isort:skip_file +""" +import builtins +import flwr.proto.fab_pb2 +import flwr.proto.message_pb2 +import flwr.proto.run_pb2 +import google.protobuf.descriptor +import google.protobuf.internal.enum_type_wrapper +import google.protobuf.message +import typing +import typing_extensions + +DESCRIPTOR: google.protobuf.descriptor.FileDescriptor + +class _ClientAppOutputCode: + ValueType = typing.NewType('ValueType', builtins.int) + V: typing_extensions.TypeAlias = ValueType +class _ClientAppOutputCodeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[_ClientAppOutputCode.ValueType], builtins.type): + DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor + SUCCESS: _ClientAppOutputCode.ValueType # 0 + DEADLINE_EXCEEDED: _ClientAppOutputCode.ValueType # 1 + UNKNOWN_ERROR: _ClientAppOutputCode.ValueType # 2 +class ClientAppOutputCode(_ClientAppOutputCode, metaclass=_ClientAppOutputCodeEnumTypeWrapper): + pass + +SUCCESS: ClientAppOutputCode.ValueType # 0 +DEADLINE_EXCEEDED: ClientAppOutputCode.ValueType # 1 +UNKNOWN_ERROR: ClientAppOutputCode.ValueType # 2 +global___ClientAppOutputCode = ClientAppOutputCode + + +class ClientAppOutputStatus(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + CODE_FIELD_NUMBER: builtins.int + MESSAGE_FIELD_NUMBER: builtins.int + code: global___ClientAppOutputCode.ValueType + message: typing.Text + def __init__(self, + *, + code: global___ClientAppOutputCode.ValueType = ..., + message: typing.Text = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["code",b"code","message",b"message"]) -> None: ... +global___ClientAppOutputStatus = ClientAppOutputStatus + +class GetTokenRequest(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + def __init__(self, + ) -> None: ... +global___GetTokenRequest = GetTokenRequest + +class GetTokenResponse(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + TOKEN_FIELD_NUMBER: builtins.int + token: builtins.int + def __init__(self, + *, + token: builtins.int = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["token",b"token"]) -> None: ... +global___GetTokenResponse = GetTokenResponse + +class PullClientAppInputsRequest(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + TOKEN_FIELD_NUMBER: builtins.int + token: builtins.int + def __init__(self, + *, + token: builtins.int = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["token",b"token"]) -> None: ... +global___PullClientAppInputsRequest = PullClientAppInputsRequest + +class PullClientAppInputsResponse(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + MESSAGE_FIELD_NUMBER: builtins.int + CONTEXT_FIELD_NUMBER: builtins.int + RUN_FIELD_NUMBER: builtins.int + FAB_FIELD_NUMBER: builtins.int + @property + def message(self) -> flwr.proto.message_pb2.Message: ... + @property + def context(self) -> flwr.proto.message_pb2.Context: ... + @property + def run(self) -> flwr.proto.run_pb2.Run: ... + @property + def fab(self) -> flwr.proto.fab_pb2.Fab: ... + def __init__(self, + *, + message: typing.Optional[flwr.proto.message_pb2.Message] = ..., + context: typing.Optional[flwr.proto.message_pb2.Context] = ..., + run: typing.Optional[flwr.proto.run_pb2.Run] = ..., + fab: typing.Optional[flwr.proto.fab_pb2.Fab] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["context",b"context","fab",b"fab","message",b"message","run",b"run"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["context",b"context","fab",b"fab","message",b"message","run",b"run"]) -> None: ... +global___PullClientAppInputsResponse = PullClientAppInputsResponse + +class PushClientAppOutputsRequest(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + TOKEN_FIELD_NUMBER: builtins.int + MESSAGE_FIELD_NUMBER: builtins.int + CONTEXT_FIELD_NUMBER: builtins.int + token: builtins.int + @property + def message(self) -> flwr.proto.message_pb2.Message: ... + @property + def context(self) -> flwr.proto.message_pb2.Context: ... + def __init__(self, + *, + token: builtins.int = ..., + message: typing.Optional[flwr.proto.message_pb2.Message] = ..., + context: typing.Optional[flwr.proto.message_pb2.Context] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["context",b"context","message",b"message"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["context",b"context","message",b"message","token",b"token"]) -> None: ... +global___PushClientAppOutputsRequest = PushClientAppOutputsRequest + +class PushClientAppOutputsResponse(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + STATUS_FIELD_NUMBER: builtins.int + @property + def status(self) -> global___ClientAppOutputStatus: ... + def __init__(self, + *, + status: typing.Optional[global___ClientAppOutputStatus] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["status",b"status"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["status",b"status"]) -> None: ... +global___PushClientAppOutputsResponse = PushClientAppOutputsResponse diff --git a/src/py/flwr/proto/clientappio_pb2_grpc.py b/src/py/flwr/proto/clientappio_pb2_grpc.py new file mode 100644 index 000000000000..653d49fc1ead --- /dev/null +++ b/src/py/flwr/proto/clientappio_pb2_grpc.py @@ -0,0 +1,135 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + +from flwr.proto import clientappio_pb2 as flwr_dot_proto_dot_clientappio__pb2 + + +class ClientAppIoStub(object): + """Missing associated documentation comment in .proto file.""" + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.GetToken = channel.unary_unary( + '/flwr.proto.ClientAppIo/GetToken', + request_serializer=flwr_dot_proto_dot_clientappio__pb2.GetTokenRequest.SerializeToString, + response_deserializer=flwr_dot_proto_dot_clientappio__pb2.GetTokenResponse.FromString, + ) + self.PullClientAppInputs = channel.unary_unary( + '/flwr.proto.ClientAppIo/PullClientAppInputs', + request_serializer=flwr_dot_proto_dot_clientappio__pb2.PullClientAppInputsRequest.SerializeToString, + response_deserializer=flwr_dot_proto_dot_clientappio__pb2.PullClientAppInputsResponse.FromString, + ) + self.PushClientAppOutputs = channel.unary_unary( + '/flwr.proto.ClientAppIo/PushClientAppOutputs', + request_serializer=flwr_dot_proto_dot_clientappio__pb2.PushClientAppOutputsRequest.SerializeToString, + response_deserializer=flwr_dot_proto_dot_clientappio__pb2.PushClientAppOutputsResponse.FromString, + ) + + +class ClientAppIoServicer(object): + """Missing associated documentation comment in .proto file.""" + + def GetToken(self, request, context): + """Get token + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def PullClientAppInputs(self, request, context): + """Get Message, Context, and Run + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def PushClientAppOutputs(self, request, context): + """Send updated Message and Context + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_ClientAppIoServicer_to_server(servicer, server): + rpc_method_handlers = { + 'GetToken': grpc.unary_unary_rpc_method_handler( + servicer.GetToken, + request_deserializer=flwr_dot_proto_dot_clientappio__pb2.GetTokenRequest.FromString, + response_serializer=flwr_dot_proto_dot_clientappio__pb2.GetTokenResponse.SerializeToString, + ), + 'PullClientAppInputs': grpc.unary_unary_rpc_method_handler( + servicer.PullClientAppInputs, + request_deserializer=flwr_dot_proto_dot_clientappio__pb2.PullClientAppInputsRequest.FromString, + response_serializer=flwr_dot_proto_dot_clientappio__pb2.PullClientAppInputsResponse.SerializeToString, + ), + 'PushClientAppOutputs': grpc.unary_unary_rpc_method_handler( + servicer.PushClientAppOutputs, + request_deserializer=flwr_dot_proto_dot_clientappio__pb2.PushClientAppOutputsRequest.FromString, + response_serializer=flwr_dot_proto_dot_clientappio__pb2.PushClientAppOutputsResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'flwr.proto.ClientAppIo', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + + # This class is part of an EXPERIMENTAL API. +class ClientAppIo(object): + """Missing associated documentation comment in .proto file.""" + + @staticmethod + def GetToken(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flwr.proto.ClientAppIo/GetToken', + flwr_dot_proto_dot_clientappio__pb2.GetTokenRequest.SerializeToString, + flwr_dot_proto_dot_clientappio__pb2.GetTokenResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def PullClientAppInputs(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flwr.proto.ClientAppIo/PullClientAppInputs', + flwr_dot_proto_dot_clientappio__pb2.PullClientAppInputsRequest.SerializeToString, + flwr_dot_proto_dot_clientappio__pb2.PullClientAppInputsResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def PushClientAppOutputs(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flwr.proto.ClientAppIo/PushClientAppOutputs', + flwr_dot_proto_dot_clientappio__pb2.PushClientAppOutputsRequest.SerializeToString, + flwr_dot_proto_dot_clientappio__pb2.PushClientAppOutputsResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/src/py/flwr/proto/clientappio_pb2_grpc.pyi b/src/py/flwr/proto/clientappio_pb2_grpc.pyi new file mode 100644 index 000000000000..3cddc769f745 --- /dev/null +++ b/src/py/flwr/proto/clientappio_pb2_grpc.pyi @@ -0,0 +1,53 @@ +""" +@generated by mypy-protobuf. Do not edit manually! +isort:skip_file +""" +import abc +import flwr.proto.clientappio_pb2 +import grpc + +class ClientAppIoStub: + def __init__(self, channel: grpc.Channel) -> None: ... + GetToken: grpc.UnaryUnaryMultiCallable[ + flwr.proto.clientappio_pb2.GetTokenRequest, + flwr.proto.clientappio_pb2.GetTokenResponse] + """Get token""" + + PullClientAppInputs: grpc.UnaryUnaryMultiCallable[ + flwr.proto.clientappio_pb2.PullClientAppInputsRequest, + flwr.proto.clientappio_pb2.PullClientAppInputsResponse] + """Get Message, Context, and Run""" + + PushClientAppOutputs: grpc.UnaryUnaryMultiCallable[ + flwr.proto.clientappio_pb2.PushClientAppOutputsRequest, + flwr.proto.clientappio_pb2.PushClientAppOutputsResponse] + """Send updated Message and Context""" + + +class ClientAppIoServicer(metaclass=abc.ABCMeta): + @abc.abstractmethod + def GetToken(self, + request: flwr.proto.clientappio_pb2.GetTokenRequest, + context: grpc.ServicerContext, + ) -> flwr.proto.clientappio_pb2.GetTokenResponse: + """Get token""" + pass + + @abc.abstractmethod + def PullClientAppInputs(self, + request: flwr.proto.clientappio_pb2.PullClientAppInputsRequest, + context: grpc.ServicerContext, + ) -> flwr.proto.clientappio_pb2.PullClientAppInputsResponse: + """Get Message, Context, and Run""" + pass + + @abc.abstractmethod + def PushClientAppOutputs(self, + request: flwr.proto.clientappio_pb2.PushClientAppOutputsRequest, + context: grpc.ServicerContext, + ) -> flwr.proto.clientappio_pb2.PushClientAppOutputsResponse: + """Send updated Message and Context""" + pass + + +def add_ClientAppIoServicer_to_server(servicer: ClientAppIoServicer, server: grpc.Server) -> None: ... diff --git a/src/py/flwr/proto/common_pb2.py b/src/py/flwr/proto/common_pb2.py new file mode 100644 index 000000000000..1025aa862933 --- /dev/null +++ b/src/py/flwr/proto/common_pb2.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: flwr/proto/common.proto +# Protobuf Python Version: 4.25.0 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x17\x66lwr/proto/common.proto\x12\nflwr.proto\"\x1a\n\nDoubleList\x12\x0c\n\x04vals\x18\x01 \x03(\x01\"\x1a\n\nSint64List\x12\x0c\n\x04vals\x18\x01 \x03(\x12\"\x18\n\x08\x42oolList\x12\x0c\n\x04vals\x18\x01 \x03(\x08\"\x1a\n\nStringList\x12\x0c\n\x04vals\x18\x01 \x03(\t\"\x19\n\tBytesList\x12\x0c\n\x04vals\x18\x01 \x03(\x0c\"\xd9\x02\n\x12\x43onfigsRecordValue\x12\x10\n\x06\x64ouble\x18\x01 \x01(\x01H\x00\x12\x10\n\x06sint64\x18\x02 \x01(\x12H\x00\x12\x0e\n\x04\x62ool\x18\x03 \x01(\x08H\x00\x12\x10\n\x06string\x18\x04 \x01(\tH\x00\x12\x0f\n\x05\x62ytes\x18\x05 \x01(\x0cH\x00\x12-\n\x0b\x64ouble_list\x18\x15 \x01(\x0b\x32\x16.flwr.proto.DoubleListH\x00\x12-\n\x0bsint64_list\x18\x16 \x01(\x0b\x32\x16.flwr.proto.Sint64ListH\x00\x12)\n\tbool_list\x18\x17 \x01(\x0b\x32\x14.flwr.proto.BoolListH\x00\x12-\n\x0bstring_list\x18\x18 \x01(\x0b\x32\x16.flwr.proto.StringListH\x00\x12+\n\nbytes_list\x18\x19 \x01(\x0b\x32\x15.flwr.proto.BytesListH\x00\x42\x07\n\x05valueb\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flwr.proto.common_pb2', _globals) +if _descriptor._USE_C_DESCRIPTORS == False: + DESCRIPTOR._options = None + _globals['_DOUBLELIST']._serialized_start=39 + _globals['_DOUBLELIST']._serialized_end=65 + _globals['_SINT64LIST']._serialized_start=67 + _globals['_SINT64LIST']._serialized_end=93 + _globals['_BOOLLIST']._serialized_start=95 + _globals['_BOOLLIST']._serialized_end=119 + _globals['_STRINGLIST']._serialized_start=121 + _globals['_STRINGLIST']._serialized_end=147 + _globals['_BYTESLIST']._serialized_start=149 + _globals['_BYTESLIST']._serialized_end=174 + _globals['_CONFIGSRECORDVALUE']._serialized_start=177 + _globals['_CONFIGSRECORDVALUE']._serialized_end=522 +# @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/common_pb2.pyi b/src/py/flwr/proto/common_pb2.pyi new file mode 100644 index 000000000000..e2539a7300a9 --- /dev/null +++ b/src/py/flwr/proto/common_pb2.pyi @@ -0,0 +1,121 @@ +""" +@generated by mypy-protobuf. Do not edit manually! +isort:skip_file +""" +import builtins +import google.protobuf.descriptor +import google.protobuf.internal.containers +import google.protobuf.message +import typing +import typing_extensions + +DESCRIPTOR: google.protobuf.descriptor.FileDescriptor + +class DoubleList(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + VALS_FIELD_NUMBER: builtins.int + @property + def vals(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.float]: ... + def __init__(self, + *, + vals: typing.Optional[typing.Iterable[builtins.float]] = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["vals",b"vals"]) -> None: ... +global___DoubleList = DoubleList + +class Sint64List(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + VALS_FIELD_NUMBER: builtins.int + @property + def vals(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]: ... + def __init__(self, + *, + vals: typing.Optional[typing.Iterable[builtins.int]] = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["vals",b"vals"]) -> None: ... +global___Sint64List = Sint64List + +class BoolList(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + VALS_FIELD_NUMBER: builtins.int + @property + def vals(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.bool]: ... + def __init__(self, + *, + vals: typing.Optional[typing.Iterable[builtins.bool]] = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["vals",b"vals"]) -> None: ... +global___BoolList = BoolList + +class StringList(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + VALS_FIELD_NUMBER: builtins.int + @property + def vals(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[typing.Text]: ... + def __init__(self, + *, + vals: typing.Optional[typing.Iterable[typing.Text]] = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["vals",b"vals"]) -> None: ... +global___StringList = StringList + +class BytesList(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + VALS_FIELD_NUMBER: builtins.int + @property + def vals(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.bytes]: ... + def __init__(self, + *, + vals: typing.Optional[typing.Iterable[builtins.bytes]] = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["vals",b"vals"]) -> None: ... +global___BytesList = BytesList + +class ConfigsRecordValue(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + DOUBLE_FIELD_NUMBER: builtins.int + SINT64_FIELD_NUMBER: builtins.int + BOOL_FIELD_NUMBER: builtins.int + STRING_FIELD_NUMBER: builtins.int + BYTES_FIELD_NUMBER: builtins.int + DOUBLE_LIST_FIELD_NUMBER: builtins.int + SINT64_LIST_FIELD_NUMBER: builtins.int + BOOL_LIST_FIELD_NUMBER: builtins.int + STRING_LIST_FIELD_NUMBER: builtins.int + BYTES_LIST_FIELD_NUMBER: builtins.int + double: builtins.float + """Single element""" + + sint64: builtins.int + bool: builtins.bool + string: typing.Text + bytes: builtins.bytes + @property + def double_list(self) -> global___DoubleList: + """List types""" + pass + @property + def sint64_list(self) -> global___Sint64List: ... + @property + def bool_list(self) -> global___BoolList: ... + @property + def string_list(self) -> global___StringList: ... + @property + def bytes_list(self) -> global___BytesList: ... + def __init__(self, + *, + double: builtins.float = ..., + sint64: builtins.int = ..., + bool: builtins.bool = ..., + string: typing.Text = ..., + bytes: builtins.bytes = ..., + double_list: typing.Optional[global___DoubleList] = ..., + sint64_list: typing.Optional[global___Sint64List] = ..., + bool_list: typing.Optional[global___BoolList] = ..., + string_list: typing.Optional[global___StringList] = ..., + bytes_list: typing.Optional[global___BytesList] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["bool",b"bool","bool_list",b"bool_list","bytes",b"bytes","bytes_list",b"bytes_list","double",b"double","double_list",b"double_list","sint64",b"sint64","sint64_list",b"sint64_list","string",b"string","string_list",b"string_list","value",b"value"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["bool",b"bool","bool_list",b"bool_list","bytes",b"bytes","bytes_list",b"bytes_list","double",b"double","double_list",b"double_list","sint64",b"sint64","sint64_list",b"sint64_list","string",b"string","string_list",b"string_list","value",b"value"]) -> None: ... + def WhichOneof(self, oneof_group: typing_extensions.Literal["value",b"value"]) -> typing.Optional[typing_extensions.Literal["double","sint64","bool","string","bytes","double_list","sint64_list","bool_list","string_list","bytes_list"]]: ... +global___ConfigsRecordValue = ConfigsRecordValue diff --git a/src/py/flwr/proto/common_pb2_grpc.py b/src/py/flwr/proto/common_pb2_grpc.py new file mode 100644 index 000000000000..2daafffebfc8 --- /dev/null +++ b/src/py/flwr/proto/common_pb2_grpc.py @@ -0,0 +1,4 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + diff --git a/src/py/flwr/proto/common_pb2_grpc.pyi b/src/py/flwr/proto/common_pb2_grpc.pyi new file mode 100644 index 000000000000..f3a5a087ef5d --- /dev/null +++ b/src/py/flwr/proto/common_pb2_grpc.pyi @@ -0,0 +1,4 @@ +""" +@generated by mypy-protobuf. Do not edit manually! +isort:skip_file +""" diff --git a/src/py/flwr/proto/control_pb2.py b/src/py/flwr/proto/control_pb2.py new file mode 100644 index 000000000000..eb1c18d8dcff --- /dev/null +++ b/src/py/flwr/proto/control_pb2.py @@ -0,0 +1,27 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: flwr/proto/control.proto +# Protobuf Python Version: 4.25.0 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from flwr.proto import run_pb2 as flwr_dot_proto_dot_run__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x18\x66lwr/proto/control.proto\x12\nflwr.proto\x1a\x14\x66lwr/proto/run.proto2\x88\x02\n\x07\x43ontrol\x12J\n\tCreateRun\x12\x1c.flwr.proto.CreateRunRequest\x1a\x1d.flwr.proto.CreateRunResponse\"\x00\x12S\n\x0cGetRunStatus\x12\x1f.flwr.proto.GetRunStatusRequest\x1a .flwr.proto.GetRunStatusResponse\"\x00\x12\\\n\x0fUpdateRunStatus\x12\".flwr.proto.UpdateRunStatusRequest\x1a#.flwr.proto.UpdateRunStatusResponse\"\x00\x62\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flwr.proto.control_pb2', _globals) +if _descriptor._USE_C_DESCRIPTORS == False: + DESCRIPTOR._options = None + _globals['_CONTROL']._serialized_start=63 + _globals['_CONTROL']._serialized_end=327 +# @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/control_pb2.pyi b/src/py/flwr/proto/control_pb2.pyi new file mode 100644 index 000000000000..e08fa11c2caa --- /dev/null +++ b/src/py/flwr/proto/control_pb2.pyi @@ -0,0 +1,7 @@ +""" +@generated by mypy-protobuf. Do not edit manually! +isort:skip_file +""" +import google.protobuf.descriptor + +DESCRIPTOR: google.protobuf.descriptor.FileDescriptor diff --git a/src/py/flwr/proto/control_pb2_grpc.py b/src/py/flwr/proto/control_pb2_grpc.py new file mode 100644 index 000000000000..a59f90f15935 --- /dev/null +++ b/src/py/flwr/proto/control_pb2_grpc.py @@ -0,0 +1,135 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + +from flwr.proto import run_pb2 as flwr_dot_proto_dot_run__pb2 + + +class ControlStub(object): + """Missing associated documentation comment in .proto file.""" + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.CreateRun = channel.unary_unary( + '/flwr.proto.Control/CreateRun', + request_serializer=flwr_dot_proto_dot_run__pb2.CreateRunRequest.SerializeToString, + response_deserializer=flwr_dot_proto_dot_run__pb2.CreateRunResponse.FromString, + ) + self.GetRunStatus = channel.unary_unary( + '/flwr.proto.Control/GetRunStatus', + request_serializer=flwr_dot_proto_dot_run__pb2.GetRunStatusRequest.SerializeToString, + response_deserializer=flwr_dot_proto_dot_run__pb2.GetRunStatusResponse.FromString, + ) + self.UpdateRunStatus = channel.unary_unary( + '/flwr.proto.Control/UpdateRunStatus', + request_serializer=flwr_dot_proto_dot_run__pb2.UpdateRunStatusRequest.SerializeToString, + response_deserializer=flwr_dot_proto_dot_run__pb2.UpdateRunStatusResponse.FromString, + ) + + +class ControlServicer(object): + """Missing associated documentation comment in .proto file.""" + + def CreateRun(self, request, context): + """Request to create a new run + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetRunStatus(self, request, context): + """Get the status of a given run + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def UpdateRunStatus(self, request, context): + """Update the status of a given run + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_ControlServicer_to_server(servicer, server): + rpc_method_handlers = { + 'CreateRun': grpc.unary_unary_rpc_method_handler( + servicer.CreateRun, + request_deserializer=flwr_dot_proto_dot_run__pb2.CreateRunRequest.FromString, + response_serializer=flwr_dot_proto_dot_run__pb2.CreateRunResponse.SerializeToString, + ), + 'GetRunStatus': grpc.unary_unary_rpc_method_handler( + servicer.GetRunStatus, + request_deserializer=flwr_dot_proto_dot_run__pb2.GetRunStatusRequest.FromString, + response_serializer=flwr_dot_proto_dot_run__pb2.GetRunStatusResponse.SerializeToString, + ), + 'UpdateRunStatus': grpc.unary_unary_rpc_method_handler( + servicer.UpdateRunStatus, + request_deserializer=flwr_dot_proto_dot_run__pb2.UpdateRunStatusRequest.FromString, + response_serializer=flwr_dot_proto_dot_run__pb2.UpdateRunStatusResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'flwr.proto.Control', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + + # This class is part of an EXPERIMENTAL API. +class Control(object): + """Missing associated documentation comment in .proto file.""" + + @staticmethod + def CreateRun(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flwr.proto.Control/CreateRun', + flwr_dot_proto_dot_run__pb2.CreateRunRequest.SerializeToString, + flwr_dot_proto_dot_run__pb2.CreateRunResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def GetRunStatus(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flwr.proto.Control/GetRunStatus', + flwr_dot_proto_dot_run__pb2.GetRunStatusRequest.SerializeToString, + flwr_dot_proto_dot_run__pb2.GetRunStatusResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def UpdateRunStatus(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flwr.proto.Control/UpdateRunStatus', + flwr_dot_proto_dot_run__pb2.UpdateRunStatusRequest.SerializeToString, + flwr_dot_proto_dot_run__pb2.UpdateRunStatusResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/src/py/flwr/proto/control_pb2_grpc.pyi b/src/py/flwr/proto/control_pb2_grpc.pyi new file mode 100644 index 000000000000..7817e2b12e31 --- /dev/null +++ b/src/py/flwr/proto/control_pb2_grpc.pyi @@ -0,0 +1,53 @@ +""" +@generated by mypy-protobuf. Do not edit manually! +isort:skip_file +""" +import abc +import flwr.proto.run_pb2 +import grpc + +class ControlStub: + def __init__(self, channel: grpc.Channel) -> None: ... + CreateRun: grpc.UnaryUnaryMultiCallable[ + flwr.proto.run_pb2.CreateRunRequest, + flwr.proto.run_pb2.CreateRunResponse] + """Request to create a new run""" + + GetRunStatus: grpc.UnaryUnaryMultiCallable[ + flwr.proto.run_pb2.GetRunStatusRequest, + flwr.proto.run_pb2.GetRunStatusResponse] + """Get the status of a given run""" + + UpdateRunStatus: grpc.UnaryUnaryMultiCallable[ + flwr.proto.run_pb2.UpdateRunStatusRequest, + flwr.proto.run_pb2.UpdateRunStatusResponse] + """Update the status of a given run""" + + +class ControlServicer(metaclass=abc.ABCMeta): + @abc.abstractmethod + def CreateRun(self, + request: flwr.proto.run_pb2.CreateRunRequest, + context: grpc.ServicerContext, + ) -> flwr.proto.run_pb2.CreateRunResponse: + """Request to create a new run""" + pass + + @abc.abstractmethod + def GetRunStatus(self, + request: flwr.proto.run_pb2.GetRunStatusRequest, + context: grpc.ServicerContext, + ) -> flwr.proto.run_pb2.GetRunStatusResponse: + """Get the status of a given run""" + pass + + @abc.abstractmethod + def UpdateRunStatus(self, + request: flwr.proto.run_pb2.UpdateRunStatusRequest, + context: grpc.ServicerContext, + ) -> flwr.proto.run_pb2.UpdateRunStatusResponse: + """Update the status of a given run""" + pass + + +def add_ControlServicer_to_server(servicer: ControlServicer, server: grpc.Server) -> None: ... diff --git a/src/py/flwr/proto/driver_pb2.py b/src/py/flwr/proto/driver_pb2.py index b0caae58ff6f..d294b03be5af 100644 --- a/src/py/flwr/proto/driver_pb2.py +++ b/src/py/flwr/proto/driver_pb2.py @@ -14,31 +14,29 @@ from flwr.proto import node_pb2 as flwr_dot_proto_dot_node__pb2 from flwr.proto import task_pb2 as flwr_dot_proto_dot_task__pb2 +from flwr.proto import run_pb2 as flwr_dot_proto_dot_run__pb2 +from flwr.proto import fab_pb2 as flwr_dot_proto_dot_fab__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x17\x66lwr/proto/driver.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x15\x66lwr/proto/task.proto\"7\n\x10\x43reateRunRequest\x12\x0e\n\x06\x66\x61\x62_id\x18\x01 \x01(\t\x12\x13\n\x0b\x66\x61\x62_version\x18\x02 \x01(\t\"#\n\x11\x43reateRunResponse\x12\x0e\n\x06run_id\x18\x01 \x01(\x12\"!\n\x0fGetNodesRequest\x12\x0e\n\x06run_id\x18\x01 \x01(\x12\"3\n\x10GetNodesResponse\x12\x1f\n\x05nodes\x18\x01 \x03(\x0b\x32\x10.flwr.proto.Node\"@\n\x12PushTaskInsRequest\x12*\n\rtask_ins_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskIns\"\'\n\x13PushTaskInsResponse\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"F\n\x12PullTaskResRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"A\n\x13PullTaskResResponse\x12*\n\rtask_res_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskRes2\xc1\x02\n\x06\x44river\x12J\n\tCreateRun\x12\x1c.flwr.proto.CreateRunRequest\x1a\x1d.flwr.proto.CreateRunResponse\"\x00\x12G\n\x08GetNodes\x12\x1b.flwr.proto.GetNodesRequest\x1a\x1c.flwr.proto.GetNodesResponse\"\x00\x12P\n\x0bPushTaskIns\x12\x1e.flwr.proto.PushTaskInsRequest\x1a\x1f.flwr.proto.PushTaskInsResponse\"\x00\x12P\n\x0bPullTaskRes\x12\x1e.flwr.proto.PullTaskResRequest\x1a\x1f.flwr.proto.PullTaskResResponse\"\x00\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x17\x66lwr/proto/driver.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x15\x66lwr/proto/task.proto\x1a\x14\x66lwr/proto/run.proto\x1a\x14\x66lwr/proto/fab.proto\"!\n\x0fGetNodesRequest\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\"3\n\x10GetNodesResponse\x12\x1f\n\x05nodes\x18\x01 \x03(\x0b\x32\x10.flwr.proto.Node\"@\n\x12PushTaskInsRequest\x12*\n\rtask_ins_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskIns\"\'\n\x13PushTaskInsResponse\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"F\n\x12PullTaskResRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"A\n\x13PullTaskResResponse\x12*\n\rtask_res_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskRes2\xc7\x03\n\x06\x44river\x12J\n\tCreateRun\x12\x1c.flwr.proto.CreateRunRequest\x1a\x1d.flwr.proto.CreateRunResponse\"\x00\x12G\n\x08GetNodes\x12\x1b.flwr.proto.GetNodesRequest\x1a\x1c.flwr.proto.GetNodesResponse\"\x00\x12P\n\x0bPushTaskIns\x12\x1e.flwr.proto.PushTaskInsRequest\x1a\x1f.flwr.proto.PushTaskInsResponse\"\x00\x12P\n\x0bPullTaskRes\x12\x1e.flwr.proto.PullTaskResRequest\x1a\x1f.flwr.proto.PullTaskResResponse\"\x00\x12\x41\n\x06GetRun\x12\x19.flwr.proto.GetRunRequest\x1a\x1a.flwr.proto.GetRunResponse\"\x00\x12\x41\n\x06GetFab\x12\x19.flwr.proto.GetFabRequest\x1a\x1a.flwr.proto.GetFabResponse\"\x00\x62\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flwr.proto.driver_pb2', _globals) if _descriptor._USE_C_DESCRIPTORS == False: DESCRIPTOR._options = None - _globals['_CREATERUNREQUEST']._serialized_start=85 - _globals['_CREATERUNREQUEST']._serialized_end=140 - _globals['_CREATERUNRESPONSE']._serialized_start=142 - _globals['_CREATERUNRESPONSE']._serialized_end=177 - _globals['_GETNODESREQUEST']._serialized_start=179 - _globals['_GETNODESREQUEST']._serialized_end=212 - _globals['_GETNODESRESPONSE']._serialized_start=214 - _globals['_GETNODESRESPONSE']._serialized_end=265 - _globals['_PUSHTASKINSREQUEST']._serialized_start=267 - _globals['_PUSHTASKINSREQUEST']._serialized_end=331 - _globals['_PUSHTASKINSRESPONSE']._serialized_start=333 - _globals['_PUSHTASKINSRESPONSE']._serialized_end=372 - _globals['_PULLTASKRESREQUEST']._serialized_start=374 - _globals['_PULLTASKRESREQUEST']._serialized_end=444 - _globals['_PULLTASKRESRESPONSE']._serialized_start=446 - _globals['_PULLTASKRESRESPONSE']._serialized_end=511 - _globals['_DRIVER']._serialized_start=514 - _globals['_DRIVER']._serialized_end=835 + _globals['_GETNODESREQUEST']._serialized_start=129 + _globals['_GETNODESREQUEST']._serialized_end=162 + _globals['_GETNODESRESPONSE']._serialized_start=164 + _globals['_GETNODESRESPONSE']._serialized_end=215 + _globals['_PUSHTASKINSREQUEST']._serialized_start=217 + _globals['_PUSHTASKINSREQUEST']._serialized_end=281 + _globals['_PUSHTASKINSRESPONSE']._serialized_start=283 + _globals['_PUSHTASKINSRESPONSE']._serialized_end=322 + _globals['_PULLTASKRESREQUEST']._serialized_start=324 + _globals['_PULLTASKRESREQUEST']._serialized_end=394 + _globals['_PULLTASKRESRESPONSE']._serialized_start=396 + _globals['_PULLTASKRESRESPONSE']._serialized_end=461 + _globals['_DRIVER']._serialized_start=464 + _globals['_DRIVER']._serialized_end=919 # @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/driver_pb2.pyi b/src/py/flwr/proto/driver_pb2.pyi index 2d8d11fb59a3..77ceb496d70c 100644 --- a/src/py/flwr/proto/driver_pb2.pyi +++ b/src/py/flwr/proto/driver_pb2.pyi @@ -13,32 +13,6 @@ import typing_extensions DESCRIPTOR: google.protobuf.descriptor.FileDescriptor -class CreateRunRequest(google.protobuf.message.Message): - """CreateRun""" - DESCRIPTOR: google.protobuf.descriptor.Descriptor - FAB_ID_FIELD_NUMBER: builtins.int - FAB_VERSION_FIELD_NUMBER: builtins.int - fab_id: typing.Text - fab_version: typing.Text - def __init__(self, - *, - fab_id: typing.Text = ..., - fab_version: typing.Text = ..., - ) -> None: ... - def ClearField(self, field_name: typing_extensions.Literal["fab_id",b"fab_id","fab_version",b"fab_version"]) -> None: ... -global___CreateRunRequest = CreateRunRequest - -class CreateRunResponse(google.protobuf.message.Message): - DESCRIPTOR: google.protobuf.descriptor.Descriptor - RUN_ID_FIELD_NUMBER: builtins.int - run_id: builtins.int - def __init__(self, - *, - run_id: builtins.int = ..., - ) -> None: ... - def ClearField(self, field_name: typing_extensions.Literal["run_id",b"run_id"]) -> None: ... -global___CreateRunResponse = CreateRunResponse - class GetNodesRequest(google.protobuf.message.Message): """GetNodes messages""" DESCRIPTOR: google.protobuf.descriptor.Descriptor diff --git a/src/py/flwr/proto/driver_pb2_grpc.py b/src/py/flwr/proto/driver_pb2_grpc.py index ac6815023ebd..91e9fd8b9bdd 100644 --- a/src/py/flwr/proto/driver_pb2_grpc.py +++ b/src/py/flwr/proto/driver_pb2_grpc.py @@ -3,6 +3,8 @@ import grpc from flwr.proto import driver_pb2 as flwr_dot_proto_dot_driver__pb2 +from flwr.proto import fab_pb2 as flwr_dot_proto_dot_fab__pb2 +from flwr.proto import run_pb2 as flwr_dot_proto_dot_run__pb2 class DriverStub(object): @@ -16,8 +18,8 @@ def __init__(self, channel): """ self.CreateRun = channel.unary_unary( '/flwr.proto.Driver/CreateRun', - request_serializer=flwr_dot_proto_dot_driver__pb2.CreateRunRequest.SerializeToString, - response_deserializer=flwr_dot_proto_dot_driver__pb2.CreateRunResponse.FromString, + request_serializer=flwr_dot_proto_dot_run__pb2.CreateRunRequest.SerializeToString, + response_deserializer=flwr_dot_proto_dot_run__pb2.CreateRunResponse.FromString, ) self.GetNodes = channel.unary_unary( '/flwr.proto.Driver/GetNodes', @@ -34,6 +36,16 @@ def __init__(self, channel): request_serializer=flwr_dot_proto_dot_driver__pb2.PullTaskResRequest.SerializeToString, response_deserializer=flwr_dot_proto_dot_driver__pb2.PullTaskResResponse.FromString, ) + self.GetRun = channel.unary_unary( + '/flwr.proto.Driver/GetRun', + request_serializer=flwr_dot_proto_dot_run__pb2.GetRunRequest.SerializeToString, + response_deserializer=flwr_dot_proto_dot_run__pb2.GetRunResponse.FromString, + ) + self.GetFab = channel.unary_unary( + '/flwr.proto.Driver/GetFab', + request_serializer=flwr_dot_proto_dot_fab__pb2.GetFabRequest.SerializeToString, + response_deserializer=flwr_dot_proto_dot_fab__pb2.GetFabResponse.FromString, + ) class DriverServicer(object): @@ -67,13 +79,27 @@ def PullTaskRes(self, request, context): context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') + def GetRun(self, request, context): + """Get run details + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetFab(self, request, context): + """Get FAB + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + def add_DriverServicer_to_server(servicer, server): rpc_method_handlers = { 'CreateRun': grpc.unary_unary_rpc_method_handler( servicer.CreateRun, - request_deserializer=flwr_dot_proto_dot_driver__pb2.CreateRunRequest.FromString, - response_serializer=flwr_dot_proto_dot_driver__pb2.CreateRunResponse.SerializeToString, + request_deserializer=flwr_dot_proto_dot_run__pb2.CreateRunRequest.FromString, + response_serializer=flwr_dot_proto_dot_run__pb2.CreateRunResponse.SerializeToString, ), 'GetNodes': grpc.unary_unary_rpc_method_handler( servicer.GetNodes, @@ -90,6 +116,16 @@ def add_DriverServicer_to_server(servicer, server): request_deserializer=flwr_dot_proto_dot_driver__pb2.PullTaskResRequest.FromString, response_serializer=flwr_dot_proto_dot_driver__pb2.PullTaskResResponse.SerializeToString, ), + 'GetRun': grpc.unary_unary_rpc_method_handler( + servicer.GetRun, + request_deserializer=flwr_dot_proto_dot_run__pb2.GetRunRequest.FromString, + response_serializer=flwr_dot_proto_dot_run__pb2.GetRunResponse.SerializeToString, + ), + 'GetFab': grpc.unary_unary_rpc_method_handler( + servicer.GetFab, + request_deserializer=flwr_dot_proto_dot_fab__pb2.GetFabRequest.FromString, + response_serializer=flwr_dot_proto_dot_fab__pb2.GetFabResponse.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( 'flwr.proto.Driver', rpc_method_handlers) @@ -112,8 +148,8 @@ def CreateRun(request, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/flwr.proto.Driver/CreateRun', - flwr_dot_proto_dot_driver__pb2.CreateRunRequest.SerializeToString, - flwr_dot_proto_dot_driver__pb2.CreateRunResponse.FromString, + flwr_dot_proto_dot_run__pb2.CreateRunRequest.SerializeToString, + flwr_dot_proto_dot_run__pb2.CreateRunResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @@ -167,3 +203,37 @@ def PullTaskRes(request, flwr_dot_proto_dot_driver__pb2.PullTaskResResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def GetRun(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flwr.proto.Driver/GetRun', + flwr_dot_proto_dot_run__pb2.GetRunRequest.SerializeToString, + flwr_dot_proto_dot_run__pb2.GetRunResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def GetFab(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flwr.proto.Driver/GetFab', + flwr_dot_proto_dot_fab__pb2.GetFabRequest.SerializeToString, + flwr_dot_proto_dot_fab__pb2.GetFabResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/src/py/flwr/proto/driver_pb2_grpc.pyi b/src/py/flwr/proto/driver_pb2_grpc.pyi index 43cf45f39b25..8f665301073d 100644 --- a/src/py/flwr/proto/driver_pb2_grpc.pyi +++ b/src/py/flwr/proto/driver_pb2_grpc.pyi @@ -4,13 +4,15 @@ isort:skip_file """ import abc import flwr.proto.driver_pb2 +import flwr.proto.fab_pb2 +import flwr.proto.run_pb2 import grpc class DriverStub: def __init__(self, channel: grpc.Channel) -> None: ... CreateRun: grpc.UnaryUnaryMultiCallable[ - flwr.proto.driver_pb2.CreateRunRequest, - flwr.proto.driver_pb2.CreateRunResponse] + flwr.proto.run_pb2.CreateRunRequest, + flwr.proto.run_pb2.CreateRunResponse] """Request run_id""" GetNodes: grpc.UnaryUnaryMultiCallable[ @@ -28,13 +30,23 @@ class DriverStub: flwr.proto.driver_pb2.PullTaskResResponse] """Get task results""" + GetRun: grpc.UnaryUnaryMultiCallable[ + flwr.proto.run_pb2.GetRunRequest, + flwr.proto.run_pb2.GetRunResponse] + """Get run details""" + + GetFab: grpc.UnaryUnaryMultiCallable[ + flwr.proto.fab_pb2.GetFabRequest, + flwr.proto.fab_pb2.GetFabResponse] + """Get FAB""" + class DriverServicer(metaclass=abc.ABCMeta): @abc.abstractmethod def CreateRun(self, - request: flwr.proto.driver_pb2.CreateRunRequest, + request: flwr.proto.run_pb2.CreateRunRequest, context: grpc.ServicerContext, - ) -> flwr.proto.driver_pb2.CreateRunResponse: + ) -> flwr.proto.run_pb2.CreateRunResponse: """Request run_id""" pass @@ -62,5 +74,21 @@ class DriverServicer(metaclass=abc.ABCMeta): """Get task results""" pass + @abc.abstractmethod + def GetRun(self, + request: flwr.proto.run_pb2.GetRunRequest, + context: grpc.ServicerContext, + ) -> flwr.proto.run_pb2.GetRunResponse: + """Get run details""" + pass + + @abc.abstractmethod + def GetFab(self, + request: flwr.proto.fab_pb2.GetFabRequest, + context: grpc.ServicerContext, + ) -> flwr.proto.fab_pb2.GetFabResponse: + """Get FAB""" + pass + def add_DriverServicer_to_server(servicer: DriverServicer, server: grpc.Server) -> None: ... diff --git a/src/py/flwr/proto/exec_pb2.py b/src/py/flwr/proto/exec_pb2.py new file mode 100644 index 000000000000..574f39eaa18d --- /dev/null +++ b/src/py/flwr/proto/exec_pb2.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: flwr/proto/exec.proto +# Protobuf Python Version: 4.25.0 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from flwr.proto import fab_pb2 as flwr_dot_proto_dot_fab__pb2 +from flwr.proto import transport_pb2 as flwr_dot_proto_dot_transport__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x15\x66lwr/proto/exec.proto\x12\nflwr.proto\x1a\x14\x66lwr/proto/fab.proto\x1a\x1a\x66lwr/proto/transport.proto\"\xdf\x02\n\x0fStartRunRequest\x12\x1c\n\x03\x66\x61\x62\x18\x01 \x01(\x0b\x32\x0f.flwr.proto.Fab\x12H\n\x0foverride_config\x18\x02 \x03(\x0b\x32/.flwr.proto.StartRunRequest.OverrideConfigEntry\x12L\n\x11\x66\x65\x64\x65ration_config\x18\x03 \x03(\x0b\x32\x31.flwr.proto.StartRunRequest.FederationConfigEntry\x1aI\n\x13OverrideConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1aK\n\x15\x46\x65\x64\x65rationConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\"\"\n\x10StartRunResponse\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\"#\n\x11StreamLogsRequest\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\"(\n\x12StreamLogsResponse\x12\x12\n\nlog_output\x18\x01 \x01(\t2\xa0\x01\n\x04\x45xec\x12G\n\x08StartRun\x12\x1b.flwr.proto.StartRunRequest\x1a\x1c.flwr.proto.StartRunResponse\"\x00\x12O\n\nStreamLogs\x12\x1d.flwr.proto.StreamLogsRequest\x1a\x1e.flwr.proto.StreamLogsResponse\"\x00\x30\x01\x62\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flwr.proto.exec_pb2', _globals) +if _descriptor._USE_C_DESCRIPTORS == False: + DESCRIPTOR._options = None + _globals['_STARTRUNREQUEST_OVERRIDECONFIGENTRY']._options = None + _globals['_STARTRUNREQUEST_OVERRIDECONFIGENTRY']._serialized_options = b'8\001' + _globals['_STARTRUNREQUEST_FEDERATIONCONFIGENTRY']._options = None + _globals['_STARTRUNREQUEST_FEDERATIONCONFIGENTRY']._serialized_options = b'8\001' + _globals['_STARTRUNREQUEST']._serialized_start=88 + _globals['_STARTRUNREQUEST']._serialized_end=439 + _globals['_STARTRUNREQUEST_OVERRIDECONFIGENTRY']._serialized_start=289 + _globals['_STARTRUNREQUEST_OVERRIDECONFIGENTRY']._serialized_end=362 + _globals['_STARTRUNREQUEST_FEDERATIONCONFIGENTRY']._serialized_start=364 + _globals['_STARTRUNREQUEST_FEDERATIONCONFIGENTRY']._serialized_end=439 + _globals['_STARTRUNRESPONSE']._serialized_start=441 + _globals['_STARTRUNRESPONSE']._serialized_end=475 + _globals['_STREAMLOGSREQUEST']._serialized_start=477 + _globals['_STREAMLOGSREQUEST']._serialized_end=512 + _globals['_STREAMLOGSRESPONSE']._serialized_start=514 + _globals['_STREAMLOGSRESPONSE']._serialized_end=554 + _globals['_EXEC']._serialized_start=557 + _globals['_EXEC']._serialized_end=717 +# @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/exec_pb2.pyi b/src/py/flwr/proto/exec_pb2.pyi new file mode 100644 index 000000000000..8b7e07c8875f --- /dev/null +++ b/src/py/flwr/proto/exec_pb2.pyi @@ -0,0 +1,98 @@ +""" +@generated by mypy-protobuf. Do not edit manually! +isort:skip_file +""" +import builtins +import flwr.proto.fab_pb2 +import flwr.proto.transport_pb2 +import google.protobuf.descriptor +import google.protobuf.internal.containers +import google.protobuf.message +import typing +import typing_extensions + +DESCRIPTOR: google.protobuf.descriptor.FileDescriptor + +class StartRunRequest(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + class OverrideConfigEntry(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + KEY_FIELD_NUMBER: builtins.int + VALUE_FIELD_NUMBER: builtins.int + key: typing.Text + @property + def value(self) -> flwr.proto.transport_pb2.Scalar: ... + def __init__(self, + *, + key: typing.Text = ..., + value: typing.Optional[flwr.proto.transport_pb2.Scalar] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["value",b"value"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... + + class FederationConfigEntry(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + KEY_FIELD_NUMBER: builtins.int + VALUE_FIELD_NUMBER: builtins.int + key: typing.Text + @property + def value(self) -> flwr.proto.transport_pb2.Scalar: ... + def __init__(self, + *, + key: typing.Text = ..., + value: typing.Optional[flwr.proto.transport_pb2.Scalar] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["value",b"value"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... + + FAB_FIELD_NUMBER: builtins.int + OVERRIDE_CONFIG_FIELD_NUMBER: builtins.int + FEDERATION_CONFIG_FIELD_NUMBER: builtins.int + @property + def fab(self) -> flwr.proto.fab_pb2.Fab: ... + @property + def override_config(self) -> google.protobuf.internal.containers.MessageMap[typing.Text, flwr.proto.transport_pb2.Scalar]: ... + @property + def federation_config(self) -> google.protobuf.internal.containers.MessageMap[typing.Text, flwr.proto.transport_pb2.Scalar]: ... + def __init__(self, + *, + fab: typing.Optional[flwr.proto.fab_pb2.Fab] = ..., + override_config: typing.Optional[typing.Mapping[typing.Text, flwr.proto.transport_pb2.Scalar]] = ..., + federation_config: typing.Optional[typing.Mapping[typing.Text, flwr.proto.transport_pb2.Scalar]] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["fab",b"fab"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["fab",b"fab","federation_config",b"federation_config","override_config",b"override_config"]) -> None: ... +global___StartRunRequest = StartRunRequest + +class StartRunResponse(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + RUN_ID_FIELD_NUMBER: builtins.int + run_id: builtins.int + def __init__(self, + *, + run_id: builtins.int = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["run_id",b"run_id"]) -> None: ... +global___StartRunResponse = StartRunResponse + +class StreamLogsRequest(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + RUN_ID_FIELD_NUMBER: builtins.int + run_id: builtins.int + def __init__(self, + *, + run_id: builtins.int = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["run_id",b"run_id"]) -> None: ... +global___StreamLogsRequest = StreamLogsRequest + +class StreamLogsResponse(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + LOG_OUTPUT_FIELD_NUMBER: builtins.int + log_output: typing.Text + def __init__(self, + *, + log_output: typing.Text = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["log_output",b"log_output"]) -> None: ... +global___StreamLogsResponse = StreamLogsResponse diff --git a/src/py/flwr/proto/exec_pb2_grpc.py b/src/py/flwr/proto/exec_pb2_grpc.py new file mode 100644 index 000000000000..8cf4ce52a300 --- /dev/null +++ b/src/py/flwr/proto/exec_pb2_grpc.py @@ -0,0 +1,101 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + +from flwr.proto import exec_pb2 as flwr_dot_proto_dot_exec__pb2 + + +class ExecStub(object): + """Missing associated documentation comment in .proto file.""" + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.StartRun = channel.unary_unary( + '/flwr.proto.Exec/StartRun', + request_serializer=flwr_dot_proto_dot_exec__pb2.StartRunRequest.SerializeToString, + response_deserializer=flwr_dot_proto_dot_exec__pb2.StartRunResponse.FromString, + ) + self.StreamLogs = channel.unary_stream( + '/flwr.proto.Exec/StreamLogs', + request_serializer=flwr_dot_proto_dot_exec__pb2.StreamLogsRequest.SerializeToString, + response_deserializer=flwr_dot_proto_dot_exec__pb2.StreamLogsResponse.FromString, + ) + + +class ExecServicer(object): + """Missing associated documentation comment in .proto file.""" + + def StartRun(self, request, context): + """Start run upon request + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def StreamLogs(self, request, context): + """Start log stream upon request + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_ExecServicer_to_server(servicer, server): + rpc_method_handlers = { + 'StartRun': grpc.unary_unary_rpc_method_handler( + servicer.StartRun, + request_deserializer=flwr_dot_proto_dot_exec__pb2.StartRunRequest.FromString, + response_serializer=flwr_dot_proto_dot_exec__pb2.StartRunResponse.SerializeToString, + ), + 'StreamLogs': grpc.unary_stream_rpc_method_handler( + servicer.StreamLogs, + request_deserializer=flwr_dot_proto_dot_exec__pb2.StreamLogsRequest.FromString, + response_serializer=flwr_dot_proto_dot_exec__pb2.StreamLogsResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'flwr.proto.Exec', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + + # This class is part of an EXPERIMENTAL API. +class Exec(object): + """Missing associated documentation comment in .proto file.""" + + @staticmethod + def StartRun(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flwr.proto.Exec/StartRun', + flwr_dot_proto_dot_exec__pb2.StartRunRequest.SerializeToString, + flwr_dot_proto_dot_exec__pb2.StartRunResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def StreamLogs(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_stream(request, target, '/flwr.proto.Exec/StreamLogs', + flwr_dot_proto_dot_exec__pb2.StreamLogsRequest.SerializeToString, + flwr_dot_proto_dot_exec__pb2.StreamLogsResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/src/py/flwr/proto/exec_pb2_grpc.pyi b/src/py/flwr/proto/exec_pb2_grpc.pyi new file mode 100644 index 000000000000..20da3a53f4a8 --- /dev/null +++ b/src/py/flwr/proto/exec_pb2_grpc.pyi @@ -0,0 +1,41 @@ +""" +@generated by mypy-protobuf. Do not edit manually! +isort:skip_file +""" +import abc +import flwr.proto.exec_pb2 +import grpc +import typing + +class ExecStub: + def __init__(self, channel: grpc.Channel) -> None: ... + StartRun: grpc.UnaryUnaryMultiCallable[ + flwr.proto.exec_pb2.StartRunRequest, + flwr.proto.exec_pb2.StartRunResponse] + """Start run upon request""" + + StreamLogs: grpc.UnaryStreamMultiCallable[ + flwr.proto.exec_pb2.StreamLogsRequest, + flwr.proto.exec_pb2.StreamLogsResponse] + """Start log stream upon request""" + + +class ExecServicer(metaclass=abc.ABCMeta): + @abc.abstractmethod + def StartRun(self, + request: flwr.proto.exec_pb2.StartRunRequest, + context: grpc.ServicerContext, + ) -> flwr.proto.exec_pb2.StartRunResponse: + """Start run upon request""" + pass + + @abc.abstractmethod + def StreamLogs(self, + request: flwr.proto.exec_pb2.StreamLogsRequest, + context: grpc.ServicerContext, + ) -> typing.Iterator[flwr.proto.exec_pb2.StreamLogsResponse]: + """Start log stream upon request""" + pass + + +def add_ExecServicer_to_server(servicer: ExecServicer, server: grpc.Server) -> None: ... diff --git a/src/py/flwr/proto/fab_pb2.py b/src/py/flwr/proto/fab_pb2.py new file mode 100644 index 000000000000..3a5e50000c10 --- /dev/null +++ b/src/py/flwr/proto/fab_pb2.py @@ -0,0 +1,31 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: flwr/proto/fab.proto +# Protobuf Python Version: 4.25.0 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from flwr.proto import node_pb2 as flwr_dot_proto_dot_node__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x14\x66lwr/proto/fab.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\"(\n\x03\x46\x61\x62\x12\x10\n\x08hash_str\x18\x01 \x01(\t\x12\x0f\n\x07\x63ontent\x18\x02 \x01(\x0c\"A\n\rGetFabRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x10\n\x08hash_str\x18\x02 \x01(\t\".\n\x0eGetFabResponse\x12\x1c\n\x03\x66\x61\x62\x18\x01 \x01(\x0b\x32\x0f.flwr.proto.Fabb\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flwr.proto.fab_pb2', _globals) +if _descriptor._USE_C_DESCRIPTORS == False: + DESCRIPTOR._options = None + _globals['_FAB']._serialized_start=59 + _globals['_FAB']._serialized_end=99 + _globals['_GETFABREQUEST']._serialized_start=101 + _globals['_GETFABREQUEST']._serialized_end=166 + _globals['_GETFABRESPONSE']._serialized_start=168 + _globals['_GETFABRESPONSE']._serialized_end=214 +# @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/fab_pb2.pyi b/src/py/flwr/proto/fab_pb2.pyi new file mode 100644 index 000000000000..8cfdcbaf76ad --- /dev/null +++ b/src/py/flwr/proto/fab_pb2.pyi @@ -0,0 +1,62 @@ +""" +@generated by mypy-protobuf. Do not edit manually! +isort:skip_file +""" +import builtins +import flwr.proto.node_pb2 +import google.protobuf.descriptor +import google.protobuf.message +import typing +import typing_extensions + +DESCRIPTOR: google.protobuf.descriptor.FileDescriptor + +class Fab(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + HASH_STR_FIELD_NUMBER: builtins.int + CONTENT_FIELD_NUMBER: builtins.int + hash_str: typing.Text + """This field is the hash of the data field. It is used to identify the data. + The hash is calculated using the SHA-256 algorithm and is represented as a + hex string (sha256hex). + """ + + content: builtins.bytes + """This field contains the fab file contents a one bytes blob.""" + + def __init__(self, + *, + hash_str: typing.Text = ..., + content: builtins.bytes = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["content",b"content","hash_str",b"hash_str"]) -> None: ... +global___Fab = Fab + +class GetFabRequest(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + NODE_FIELD_NUMBER: builtins.int + HASH_STR_FIELD_NUMBER: builtins.int + @property + def node(self) -> flwr.proto.node_pb2.Node: ... + hash_str: typing.Text + def __init__(self, + *, + node: typing.Optional[flwr.proto.node_pb2.Node] = ..., + hash_str: typing.Text = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["node",b"node"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["hash_str",b"hash_str","node",b"node"]) -> None: ... +global___GetFabRequest = GetFabRequest + +class GetFabResponse(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + FAB_FIELD_NUMBER: builtins.int + @property + def fab(self) -> global___Fab: ... + def __init__(self, + *, + fab: typing.Optional[global___Fab] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["fab",b"fab"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["fab",b"fab"]) -> None: ... +global___GetFabResponse = GetFabResponse diff --git a/src/py/flwr/proto/fab_pb2_grpc.py b/src/py/flwr/proto/fab_pb2_grpc.py new file mode 100644 index 000000000000..2daafffebfc8 --- /dev/null +++ b/src/py/flwr/proto/fab_pb2_grpc.py @@ -0,0 +1,4 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + diff --git a/src/py/flwr/proto/fab_pb2_grpc.pyi b/src/py/flwr/proto/fab_pb2_grpc.pyi new file mode 100644 index 000000000000..f3a5a087ef5d --- /dev/null +++ b/src/py/flwr/proto/fab_pb2_grpc.pyi @@ -0,0 +1,4 @@ +""" +@generated by mypy-protobuf. Do not edit manually! +isort:skip_file +""" diff --git a/src/py/flwr/proto/fleet_pb2.py b/src/py/flwr/proto/fleet_pb2.py index 42f3292d910d..3185bc2ce111 100644 --- a/src/py/flwr/proto/fleet_pb2.py +++ b/src/py/flwr/proto/fleet_pb2.py @@ -14,9 +14,11 @@ from flwr.proto import node_pb2 as flwr_dot_proto_dot_node__pb2 from flwr.proto import task_pb2 as flwr_dot_proto_dot_task__pb2 +from flwr.proto import run_pb2 as flwr_dot_proto_dot_run__pb2 +from flwr.proto import fab_pb2 as flwr_dot_proto_dot_fab__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x16\x66lwr/proto/fleet.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x15\x66lwr/proto/task.proto\"*\n\x11\x43reateNodeRequest\x12\x15\n\rping_interval\x18\x01 \x01(\x01\"4\n\x12\x43reateNodeResponse\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\"3\n\x11\x44\x65leteNodeRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\"\x14\n\x12\x44\x65leteNodeResponse\"D\n\x0bPingRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x15\n\rping_interval\x18\x02 \x01(\x01\"\x1f\n\x0cPingResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\"F\n\x12PullTaskInsRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"k\n\x13PullTaskInsResponse\x12(\n\treconnect\x18\x01 \x01(\x0b\x32\x15.flwr.proto.Reconnect\x12*\n\rtask_ins_list\x18\x02 \x03(\x0b\x32\x13.flwr.proto.TaskIns\"@\n\x12PushTaskResRequest\x12*\n\rtask_res_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskRes\"\xae\x01\n\x13PushTaskResResponse\x12(\n\treconnect\x18\x01 \x01(\x0b\x32\x15.flwr.proto.Reconnect\x12=\n\x07results\x18\x02 \x03(\x0b\x32,.flwr.proto.PushTaskResResponse.ResultsEntry\x1a.\n\x0cResultsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\r:\x02\x38\x01\":\n\x03Run\x12\x0e\n\x06run_id\x18\x01 \x01(\x12\x12\x0e\n\x06\x66\x61\x62_id\x18\x02 \x01(\t\x12\x13\n\x0b\x66\x61\x62_version\x18\x03 \x01(\t\"\x1f\n\rGetRunRequest\x12\x0e\n\x06run_id\x18\x01 \x01(\x12\".\n\x0eGetRunResponse\x12\x1c\n\x03run\x18\x01 \x01(\x0b\x32\x0f.flwr.proto.Run\"\x1e\n\tReconnect\x12\x11\n\treconnect\x18\x01 \x01(\x04\x32\xc9\x03\n\x05\x46leet\x12M\n\nCreateNode\x12\x1d.flwr.proto.CreateNodeRequest\x1a\x1e.flwr.proto.CreateNodeResponse\"\x00\x12M\n\nDeleteNode\x12\x1d.flwr.proto.DeleteNodeRequest\x1a\x1e.flwr.proto.DeleteNodeResponse\"\x00\x12;\n\x04Ping\x12\x17.flwr.proto.PingRequest\x1a\x18.flwr.proto.PingResponse\"\x00\x12P\n\x0bPullTaskIns\x12\x1e.flwr.proto.PullTaskInsRequest\x1a\x1f.flwr.proto.PullTaskInsResponse\"\x00\x12P\n\x0bPushTaskRes\x12\x1e.flwr.proto.PushTaskResRequest\x1a\x1f.flwr.proto.PushTaskResResponse\"\x00\x12\x41\n\x06GetRun\x12\x19.flwr.proto.GetRunRequest\x1a\x1a.flwr.proto.GetRunResponse\"\x00\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x16\x66lwr/proto/fleet.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x15\x66lwr/proto/task.proto\x1a\x14\x66lwr/proto/run.proto\x1a\x14\x66lwr/proto/fab.proto\"*\n\x11\x43reateNodeRequest\x12\x15\n\rping_interval\x18\x01 \x01(\x01\"4\n\x12\x43reateNodeResponse\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\"3\n\x11\x44\x65leteNodeRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\"\x14\n\x12\x44\x65leteNodeResponse\"D\n\x0bPingRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x15\n\rping_interval\x18\x02 \x01(\x01\"\x1f\n\x0cPingResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\"F\n\x12PullTaskInsRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"k\n\x13PullTaskInsResponse\x12(\n\treconnect\x18\x01 \x01(\x0b\x32\x15.flwr.proto.Reconnect\x12*\n\rtask_ins_list\x18\x02 \x03(\x0b\x32\x13.flwr.proto.TaskIns\"`\n\x12PushTaskResRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12*\n\rtask_res_list\x18\x02 \x03(\x0b\x32\x13.flwr.proto.TaskRes\"\xae\x01\n\x13PushTaskResResponse\x12(\n\treconnect\x18\x01 \x01(\x0b\x32\x15.flwr.proto.Reconnect\x12=\n\x07results\x18\x02 \x03(\x0b\x32,.flwr.proto.PushTaskResResponse.ResultsEntry\x1a.\n\x0cResultsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\r:\x02\x38\x01\"\x1e\n\tReconnect\x12\x11\n\treconnect\x18\x01 \x01(\x04\x32\x8c\x04\n\x05\x46leet\x12M\n\nCreateNode\x12\x1d.flwr.proto.CreateNodeRequest\x1a\x1e.flwr.proto.CreateNodeResponse\"\x00\x12M\n\nDeleteNode\x12\x1d.flwr.proto.DeleteNodeRequest\x1a\x1e.flwr.proto.DeleteNodeResponse\"\x00\x12;\n\x04Ping\x12\x17.flwr.proto.PingRequest\x1a\x18.flwr.proto.PingResponse\"\x00\x12P\n\x0bPullTaskIns\x12\x1e.flwr.proto.PullTaskInsRequest\x1a\x1f.flwr.proto.PullTaskInsResponse\"\x00\x12P\n\x0bPushTaskRes\x12\x1e.flwr.proto.PushTaskResRequest\x1a\x1f.flwr.proto.PushTaskResResponse\"\x00\x12\x41\n\x06GetRun\x12\x19.flwr.proto.GetRunRequest\x1a\x1a.flwr.proto.GetRunResponse\"\x00\x12\x41\n\x06GetFab\x12\x19.flwr.proto.GetFabRequest\x1a\x1a.flwr.proto.GetFabResponse\"\x00\x62\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -25,36 +27,30 @@ DESCRIPTOR._options = None _globals['_PUSHTASKRESRESPONSE_RESULTSENTRY']._options = None _globals['_PUSHTASKRESRESPONSE_RESULTSENTRY']._serialized_options = b'8\001' - _globals['_CREATENODEREQUEST']._serialized_start=84 - _globals['_CREATENODEREQUEST']._serialized_end=126 - _globals['_CREATENODERESPONSE']._serialized_start=128 - _globals['_CREATENODERESPONSE']._serialized_end=180 - _globals['_DELETENODEREQUEST']._serialized_start=182 - _globals['_DELETENODEREQUEST']._serialized_end=233 - _globals['_DELETENODERESPONSE']._serialized_start=235 - _globals['_DELETENODERESPONSE']._serialized_end=255 - _globals['_PINGREQUEST']._serialized_start=257 - _globals['_PINGREQUEST']._serialized_end=325 - _globals['_PINGRESPONSE']._serialized_start=327 - _globals['_PINGRESPONSE']._serialized_end=358 - _globals['_PULLTASKINSREQUEST']._serialized_start=360 - _globals['_PULLTASKINSREQUEST']._serialized_end=430 - _globals['_PULLTASKINSRESPONSE']._serialized_start=432 - _globals['_PULLTASKINSRESPONSE']._serialized_end=539 - _globals['_PUSHTASKRESREQUEST']._serialized_start=541 - _globals['_PUSHTASKRESREQUEST']._serialized_end=605 - _globals['_PUSHTASKRESRESPONSE']._serialized_start=608 - _globals['_PUSHTASKRESRESPONSE']._serialized_end=782 - _globals['_PUSHTASKRESRESPONSE_RESULTSENTRY']._serialized_start=736 - _globals['_PUSHTASKRESRESPONSE_RESULTSENTRY']._serialized_end=782 - _globals['_RUN']._serialized_start=784 - _globals['_RUN']._serialized_end=842 - _globals['_GETRUNREQUEST']._serialized_start=844 - _globals['_GETRUNREQUEST']._serialized_end=875 - _globals['_GETRUNRESPONSE']._serialized_start=877 - _globals['_GETRUNRESPONSE']._serialized_end=923 - _globals['_RECONNECT']._serialized_start=925 - _globals['_RECONNECT']._serialized_end=955 - _globals['_FLEET']._serialized_start=958 - _globals['_FLEET']._serialized_end=1415 + _globals['_CREATENODEREQUEST']._serialized_start=128 + _globals['_CREATENODEREQUEST']._serialized_end=170 + _globals['_CREATENODERESPONSE']._serialized_start=172 + _globals['_CREATENODERESPONSE']._serialized_end=224 + _globals['_DELETENODEREQUEST']._serialized_start=226 + _globals['_DELETENODEREQUEST']._serialized_end=277 + _globals['_DELETENODERESPONSE']._serialized_start=279 + _globals['_DELETENODERESPONSE']._serialized_end=299 + _globals['_PINGREQUEST']._serialized_start=301 + _globals['_PINGREQUEST']._serialized_end=369 + _globals['_PINGRESPONSE']._serialized_start=371 + _globals['_PINGRESPONSE']._serialized_end=402 + _globals['_PULLTASKINSREQUEST']._serialized_start=404 + _globals['_PULLTASKINSREQUEST']._serialized_end=474 + _globals['_PULLTASKINSRESPONSE']._serialized_start=476 + _globals['_PULLTASKINSRESPONSE']._serialized_end=583 + _globals['_PUSHTASKRESREQUEST']._serialized_start=585 + _globals['_PUSHTASKRESREQUEST']._serialized_end=681 + _globals['_PUSHTASKRESRESPONSE']._serialized_start=684 + _globals['_PUSHTASKRESRESPONSE']._serialized_end=858 + _globals['_PUSHTASKRESRESPONSE_RESULTSENTRY']._serialized_start=812 + _globals['_PUSHTASKRESRESPONSE_RESULTSENTRY']._serialized_end=858 + _globals['_RECONNECT']._serialized_start=860 + _globals['_RECONNECT']._serialized_end=890 + _globals['_FLEET']._serialized_start=893 + _globals['_FLEET']._serialized_end=1417 # @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/fleet_pb2.pyi b/src/py/flwr/proto/fleet_pb2.pyi index a6f38b703e76..76875bc1a4b9 100644 --- a/src/py/flwr/proto/fleet_pb2.pyi +++ b/src/py/flwr/proto/fleet_pb2.pyi @@ -124,14 +124,19 @@ global___PullTaskInsResponse = PullTaskInsResponse class PushTaskResRequest(google.protobuf.message.Message): """PushTaskRes messages""" DESCRIPTOR: google.protobuf.descriptor.Descriptor + NODE_FIELD_NUMBER: builtins.int TASK_RES_LIST_FIELD_NUMBER: builtins.int @property + def node(self) -> flwr.proto.node_pb2.Node: ... + @property def task_res_list(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[flwr.proto.task_pb2.TaskRes]: ... def __init__(self, *, + node: typing.Optional[flwr.proto.node_pb2.Node] = ..., task_res_list: typing.Optional[typing.Iterable[flwr.proto.task_pb2.TaskRes]] = ..., ) -> None: ... - def ClearField(self, field_name: typing_extensions.Literal["task_res_list",b"task_res_list"]) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["node",b"node"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["node",b"node","task_res_list",b"task_res_list"]) -> None: ... global___PushTaskResRequest = PushTaskResRequest class PushTaskResResponse(google.protobuf.message.Message): @@ -164,48 +169,6 @@ class PushTaskResResponse(google.protobuf.message.Message): def ClearField(self, field_name: typing_extensions.Literal["reconnect",b"reconnect","results",b"results"]) -> None: ... global___PushTaskResResponse = PushTaskResResponse -class Run(google.protobuf.message.Message): - """GetRun messages""" - DESCRIPTOR: google.protobuf.descriptor.Descriptor - RUN_ID_FIELD_NUMBER: builtins.int - FAB_ID_FIELD_NUMBER: builtins.int - FAB_VERSION_FIELD_NUMBER: builtins.int - run_id: builtins.int - fab_id: typing.Text - fab_version: typing.Text - def __init__(self, - *, - run_id: builtins.int = ..., - fab_id: typing.Text = ..., - fab_version: typing.Text = ..., - ) -> None: ... - def ClearField(self, field_name: typing_extensions.Literal["fab_id",b"fab_id","fab_version",b"fab_version","run_id",b"run_id"]) -> None: ... -global___Run = Run - -class GetRunRequest(google.protobuf.message.Message): - DESCRIPTOR: google.protobuf.descriptor.Descriptor - RUN_ID_FIELD_NUMBER: builtins.int - run_id: builtins.int - def __init__(self, - *, - run_id: builtins.int = ..., - ) -> None: ... - def ClearField(self, field_name: typing_extensions.Literal["run_id",b"run_id"]) -> None: ... -global___GetRunRequest = GetRunRequest - -class GetRunResponse(google.protobuf.message.Message): - DESCRIPTOR: google.protobuf.descriptor.Descriptor - RUN_FIELD_NUMBER: builtins.int - @property - def run(self) -> global___Run: ... - def __init__(self, - *, - run: typing.Optional[global___Run] = ..., - ) -> None: ... - def HasField(self, field_name: typing_extensions.Literal["run",b"run"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["run",b"run"]) -> None: ... -global___GetRunResponse = GetRunResponse - class Reconnect(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor RECONNECT_FIELD_NUMBER: builtins.int diff --git a/src/py/flwr/proto/fleet_pb2_grpc.py b/src/py/flwr/proto/fleet_pb2_grpc.py index 16757eaed381..5f4bb6732dcf 100644 --- a/src/py/flwr/proto/fleet_pb2_grpc.py +++ b/src/py/flwr/proto/fleet_pb2_grpc.py @@ -2,7 +2,9 @@ """Client and server classes corresponding to protobuf-defined services.""" import grpc +from flwr.proto import fab_pb2 as flwr_dot_proto_dot_fab__pb2 from flwr.proto import fleet_pb2 as flwr_dot_proto_dot_fleet__pb2 +from flwr.proto import run_pb2 as flwr_dot_proto_dot_run__pb2 class FleetStub(object): @@ -41,8 +43,13 @@ def __init__(self, channel): ) self.GetRun = channel.unary_unary( '/flwr.proto.Fleet/GetRun', - request_serializer=flwr_dot_proto_dot_fleet__pb2.GetRunRequest.SerializeToString, - response_deserializer=flwr_dot_proto_dot_fleet__pb2.GetRunResponse.FromString, + request_serializer=flwr_dot_proto_dot_run__pb2.GetRunRequest.SerializeToString, + response_deserializer=flwr_dot_proto_dot_run__pb2.GetRunResponse.FromString, + ) + self.GetFab = channel.unary_unary( + '/flwr.proto.Fleet/GetFab', + request_serializer=flwr_dot_proto_dot_fab__pb2.GetFabRequest.SerializeToString, + response_deserializer=flwr_dot_proto_dot_fab__pb2.GetFabResponse.FromString, ) @@ -91,6 +98,13 @@ def GetRun(self, request, context): context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') + def GetFab(self, request, context): + """Get FAB + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + def add_FleetServicer_to_server(servicer, server): rpc_method_handlers = { @@ -121,8 +135,13 @@ def add_FleetServicer_to_server(servicer, server): ), 'GetRun': grpc.unary_unary_rpc_method_handler( servicer.GetRun, - request_deserializer=flwr_dot_proto_dot_fleet__pb2.GetRunRequest.FromString, - response_serializer=flwr_dot_proto_dot_fleet__pb2.GetRunResponse.SerializeToString, + request_deserializer=flwr_dot_proto_dot_run__pb2.GetRunRequest.FromString, + response_serializer=flwr_dot_proto_dot_run__pb2.GetRunResponse.SerializeToString, + ), + 'GetFab': grpc.unary_unary_rpc_method_handler( + servicer.GetFab, + request_deserializer=flwr_dot_proto_dot_fab__pb2.GetFabRequest.FromString, + response_serializer=flwr_dot_proto_dot_fab__pb2.GetFabResponse.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( @@ -231,7 +250,24 @@ def GetRun(request, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/flwr.proto.Fleet/GetRun', - flwr_dot_proto_dot_fleet__pb2.GetRunRequest.SerializeToString, - flwr_dot_proto_dot_fleet__pb2.GetRunResponse.FromString, + flwr_dot_proto_dot_run__pb2.GetRunRequest.SerializeToString, + flwr_dot_proto_dot_run__pb2.GetRunResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def GetFab(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flwr.proto.Fleet/GetFab', + flwr_dot_proto_dot_fab__pb2.GetFabRequest.SerializeToString, + flwr_dot_proto_dot_fab__pb2.GetFabResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/src/py/flwr/proto/fleet_pb2_grpc.pyi b/src/py/flwr/proto/fleet_pb2_grpc.pyi index f275cd149d69..7988fd6a1dda 100644 --- a/src/py/flwr/proto/fleet_pb2_grpc.pyi +++ b/src/py/flwr/proto/fleet_pb2_grpc.pyi @@ -3,7 +3,9 @@ isort:skip_file """ import abc +import flwr.proto.fab_pb2 import flwr.proto.fleet_pb2 +import flwr.proto.run_pb2 import grpc class FleetStub: @@ -37,8 +39,13 @@ class FleetStub: """ GetRun: grpc.UnaryUnaryMultiCallable[ - flwr.proto.fleet_pb2.GetRunRequest, - flwr.proto.fleet_pb2.GetRunResponse] + flwr.proto.run_pb2.GetRunRequest, + flwr.proto.run_pb2.GetRunResponse] + + GetFab: grpc.UnaryUnaryMultiCallable[ + flwr.proto.fab_pb2.GetFabRequest, + flwr.proto.fab_pb2.GetFabResponse] + """Get FAB""" class FleetServicer(metaclass=abc.ABCMeta): @@ -84,9 +91,17 @@ class FleetServicer(metaclass=abc.ABCMeta): @abc.abstractmethod def GetRun(self, - request: flwr.proto.fleet_pb2.GetRunRequest, + request: flwr.proto.run_pb2.GetRunRequest, context: grpc.ServicerContext, - ) -> flwr.proto.fleet_pb2.GetRunResponse: ... + ) -> flwr.proto.run_pb2.GetRunResponse: ... + + @abc.abstractmethod + def GetFab(self, + request: flwr.proto.fab_pb2.GetFabRequest, + context: grpc.ServicerContext, + ) -> flwr.proto.fab_pb2.GetFabResponse: + """Get FAB""" + pass def add_FleetServicer_to_server(servicer: FleetServicer, server: grpc.Server) -> None: ... diff --git a/src/py/flwr/proto/message_pb2.py b/src/py/flwr/proto/message_pb2.py new file mode 100644 index 000000000000..d2201cb07b56 --- /dev/null +++ b/src/py/flwr/proto/message_pb2.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: flwr/proto/message.proto +# Protobuf Python Version: 4.25.0 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from flwr.proto import error_pb2 as flwr_dot_proto_dot_error__pb2 +from flwr.proto import recordset_pb2 as flwr_dot_proto_dot_recordset__pb2 +from flwr.proto import transport_pb2 as flwr_dot_proto_dot_transport__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x18\x66lwr/proto/message.proto\x12\nflwr.proto\x1a\x16\x66lwr/proto/error.proto\x1a\x1a\x66lwr/proto/recordset.proto\x1a\x1a\x66lwr/proto/transport.proto\"{\n\x07Message\x12&\n\x08metadata\x18\x01 \x01(\x0b\x32\x14.flwr.proto.Metadata\x12&\n\x07\x63ontent\x18\x02 \x01(\x0b\x32\x15.flwr.proto.RecordSet\x12 \n\x05\x65rror\x18\x03 \x01(\x0b\x32\x11.flwr.proto.Error\"\xbf\x02\n\x07\x43ontext\x12\x0f\n\x07node_id\x18\x01 \x01(\x04\x12\x38\n\x0bnode_config\x18\x02 \x03(\x0b\x32#.flwr.proto.Context.NodeConfigEntry\x12$\n\x05state\x18\x03 \x01(\x0b\x32\x15.flwr.proto.RecordSet\x12\x36\n\nrun_config\x18\x04 \x03(\x0b\x32\".flwr.proto.Context.RunConfigEntry\x1a\x45\n\x0fNodeConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1a\x44\n\x0eRunConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\"\xbb\x01\n\x08Metadata\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\x12\x12\n\nmessage_id\x18\x02 \x01(\t\x12\x13\n\x0bsrc_node_id\x18\x03 \x01(\x04\x12\x13\n\x0b\x64st_node_id\x18\x04 \x01(\x04\x12\x18\n\x10reply_to_message\x18\x05 \x01(\t\x12\x10\n\x08group_id\x18\x06 \x01(\t\x12\x0b\n\x03ttl\x18\x07 \x01(\x01\x12\x14\n\x0cmessage_type\x18\x08 \x01(\t\x12\x12\n\ncreated_at\x18\t \x01(\x01\x62\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flwr.proto.message_pb2', _globals) +if _descriptor._USE_C_DESCRIPTORS == False: + DESCRIPTOR._options = None + _globals['_CONTEXT_NODECONFIGENTRY']._options = None + _globals['_CONTEXT_NODECONFIGENTRY']._serialized_options = b'8\001' + _globals['_CONTEXT_RUNCONFIGENTRY']._options = None + _globals['_CONTEXT_RUNCONFIGENTRY']._serialized_options = b'8\001' + _globals['_MESSAGE']._serialized_start=120 + _globals['_MESSAGE']._serialized_end=243 + _globals['_CONTEXT']._serialized_start=246 + _globals['_CONTEXT']._serialized_end=565 + _globals['_CONTEXT_NODECONFIGENTRY']._serialized_start=426 + _globals['_CONTEXT_NODECONFIGENTRY']._serialized_end=495 + _globals['_CONTEXT_RUNCONFIGENTRY']._serialized_start=497 + _globals['_CONTEXT_RUNCONFIGENTRY']._serialized_end=565 + _globals['_METADATA']._serialized_start=568 + _globals['_METADATA']._serialized_end=755 +# @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/message_pb2.pyi b/src/py/flwr/proto/message_pb2.pyi new file mode 100644 index 000000000000..b352917f217e --- /dev/null +++ b/src/py/flwr/proto/message_pb2.pyi @@ -0,0 +1,125 @@ +""" +@generated by mypy-protobuf. Do not edit manually! +isort:skip_file +""" +import builtins +import flwr.proto.error_pb2 +import flwr.proto.recordset_pb2 +import flwr.proto.transport_pb2 +import google.protobuf.descriptor +import google.protobuf.internal.containers +import google.protobuf.message +import typing +import typing_extensions + +DESCRIPTOR: google.protobuf.descriptor.FileDescriptor + +class Message(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + METADATA_FIELD_NUMBER: builtins.int + CONTENT_FIELD_NUMBER: builtins.int + ERROR_FIELD_NUMBER: builtins.int + @property + def metadata(self) -> global___Metadata: ... + @property + def content(self) -> flwr.proto.recordset_pb2.RecordSet: ... + @property + def error(self) -> flwr.proto.error_pb2.Error: ... + def __init__(self, + *, + metadata: typing.Optional[global___Metadata] = ..., + content: typing.Optional[flwr.proto.recordset_pb2.RecordSet] = ..., + error: typing.Optional[flwr.proto.error_pb2.Error] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["content",b"content","error",b"error","metadata",b"metadata"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["content",b"content","error",b"error","metadata",b"metadata"]) -> None: ... +global___Message = Message + +class Context(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + class NodeConfigEntry(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + KEY_FIELD_NUMBER: builtins.int + VALUE_FIELD_NUMBER: builtins.int + key: typing.Text + @property + def value(self) -> flwr.proto.transport_pb2.Scalar: ... + def __init__(self, + *, + key: typing.Text = ..., + value: typing.Optional[flwr.proto.transport_pb2.Scalar] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["value",b"value"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... + + class RunConfigEntry(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + KEY_FIELD_NUMBER: builtins.int + VALUE_FIELD_NUMBER: builtins.int + key: typing.Text + @property + def value(self) -> flwr.proto.transport_pb2.Scalar: ... + def __init__(self, + *, + key: typing.Text = ..., + value: typing.Optional[flwr.proto.transport_pb2.Scalar] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["value",b"value"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... + + NODE_ID_FIELD_NUMBER: builtins.int + NODE_CONFIG_FIELD_NUMBER: builtins.int + STATE_FIELD_NUMBER: builtins.int + RUN_CONFIG_FIELD_NUMBER: builtins.int + node_id: builtins.int + @property + def node_config(self) -> google.protobuf.internal.containers.MessageMap[typing.Text, flwr.proto.transport_pb2.Scalar]: ... + @property + def state(self) -> flwr.proto.recordset_pb2.RecordSet: ... + @property + def run_config(self) -> google.protobuf.internal.containers.MessageMap[typing.Text, flwr.proto.transport_pb2.Scalar]: ... + def __init__(self, + *, + node_id: builtins.int = ..., + node_config: typing.Optional[typing.Mapping[typing.Text, flwr.proto.transport_pb2.Scalar]] = ..., + state: typing.Optional[flwr.proto.recordset_pb2.RecordSet] = ..., + run_config: typing.Optional[typing.Mapping[typing.Text, flwr.proto.transport_pb2.Scalar]] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["state",b"state"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["node_config",b"node_config","node_id",b"node_id","run_config",b"run_config","state",b"state"]) -> None: ... +global___Context = Context + +class Metadata(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + RUN_ID_FIELD_NUMBER: builtins.int + MESSAGE_ID_FIELD_NUMBER: builtins.int + SRC_NODE_ID_FIELD_NUMBER: builtins.int + DST_NODE_ID_FIELD_NUMBER: builtins.int + REPLY_TO_MESSAGE_FIELD_NUMBER: builtins.int + GROUP_ID_FIELD_NUMBER: builtins.int + TTL_FIELD_NUMBER: builtins.int + MESSAGE_TYPE_FIELD_NUMBER: builtins.int + CREATED_AT_FIELD_NUMBER: builtins.int + run_id: builtins.int + message_id: typing.Text + src_node_id: builtins.int + dst_node_id: builtins.int + reply_to_message: typing.Text + group_id: typing.Text + ttl: builtins.float + message_type: typing.Text + created_at: builtins.float + def __init__(self, + *, + run_id: builtins.int = ..., + message_id: typing.Text = ..., + src_node_id: builtins.int = ..., + dst_node_id: builtins.int = ..., + reply_to_message: typing.Text = ..., + group_id: typing.Text = ..., + ttl: builtins.float = ..., + message_type: typing.Text = ..., + created_at: builtins.float = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["created_at",b"created_at","dst_node_id",b"dst_node_id","group_id",b"group_id","message_id",b"message_id","message_type",b"message_type","reply_to_message",b"reply_to_message","run_id",b"run_id","src_node_id",b"src_node_id","ttl",b"ttl"]) -> None: ... +global___Metadata = Metadata diff --git a/src/py/flwr/proto/message_pb2_grpc.py b/src/py/flwr/proto/message_pb2_grpc.py new file mode 100644 index 000000000000..2daafffebfc8 --- /dev/null +++ b/src/py/flwr/proto/message_pb2_grpc.py @@ -0,0 +1,4 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + diff --git a/src/py/flwr/proto/message_pb2_grpc.pyi b/src/py/flwr/proto/message_pb2_grpc.pyi new file mode 100644 index 000000000000..f3a5a087ef5d --- /dev/null +++ b/src/py/flwr/proto/message_pb2_grpc.pyi @@ -0,0 +1,4 @@ +""" +@generated by mypy-protobuf. Do not edit manually! +isort:skip_file +""" diff --git a/src/py/flwr/proto/node_pb2.py b/src/py/flwr/proto/node_pb2.py index b300f2c562c2..f94691db6c3f 100644 --- a/src/py/flwr/proto/node_pb2.py +++ b/src/py/flwr/proto/node_pb2.py @@ -14,7 +14,7 @@ -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x15\x66lwr/proto/node.proto\x12\nflwr.proto\"*\n\x04Node\x12\x0f\n\x07node_id\x18\x01 \x01(\x12\x12\x11\n\tanonymous\x18\x02 \x01(\x08\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x15\x66lwr/proto/node.proto\x12\nflwr.proto\"*\n\x04Node\x12\x0f\n\x07node_id\x18\x01 \x01(\x04\x12\x11\n\tanonymous\x18\x02 \x01(\x08\x62\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) diff --git a/src/py/flwr/proto/recordset_pb2.py b/src/py/flwr/proto/recordset_pb2.py index f7f74d72182b..6b169f869ab4 100644 --- a/src/py/flwr/proto/recordset_pb2.py +++ b/src/py/flwr/proto/recordset_pb2.py @@ -14,7 +14,7 @@ -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1a\x66lwr/proto/recordset.proto\x12\nflwr.proto\"\x1a\n\nDoubleList\x12\x0c\n\x04vals\x18\x01 \x03(\x01\"\x1a\n\nSint64List\x12\x0c\n\x04vals\x18\x01 \x03(\x12\"\x18\n\x08\x42oolList\x12\x0c\n\x04vals\x18\x01 \x03(\x08\"\x1a\n\nStringList\x12\x0c\n\x04vals\x18\x01 \x03(\t\"\x19\n\tBytesList\x12\x0c\n\x04vals\x18\x01 \x03(\x0c\"B\n\x05\x41rray\x12\r\n\x05\x64type\x18\x01 \x01(\t\x12\r\n\x05shape\x18\x02 \x03(\x05\x12\r\n\x05stype\x18\x03 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\"\x9f\x01\n\x12MetricsRecordValue\x12\x10\n\x06\x64ouble\x18\x01 \x01(\x01H\x00\x12\x10\n\x06sint64\x18\x02 \x01(\x12H\x00\x12-\n\x0b\x64ouble_list\x18\x15 \x01(\x0b\x32\x16.flwr.proto.DoubleListH\x00\x12-\n\x0bsint64_list\x18\x16 \x01(\x0b\x32\x16.flwr.proto.Sint64ListH\x00\x42\x07\n\x05value\"\xd9\x02\n\x12\x43onfigsRecordValue\x12\x10\n\x06\x64ouble\x18\x01 \x01(\x01H\x00\x12\x10\n\x06sint64\x18\x02 \x01(\x12H\x00\x12\x0e\n\x04\x62ool\x18\x03 \x01(\x08H\x00\x12\x10\n\x06string\x18\x04 \x01(\tH\x00\x12\x0f\n\x05\x62ytes\x18\x05 \x01(\x0cH\x00\x12-\n\x0b\x64ouble_list\x18\x15 \x01(\x0b\x32\x16.flwr.proto.DoubleListH\x00\x12-\n\x0bsint64_list\x18\x16 \x01(\x0b\x32\x16.flwr.proto.Sint64ListH\x00\x12)\n\tbool_list\x18\x17 \x01(\x0b\x32\x14.flwr.proto.BoolListH\x00\x12-\n\x0bstring_list\x18\x18 \x01(\x0b\x32\x16.flwr.proto.StringListH\x00\x12+\n\nbytes_list\x18\x19 \x01(\x0b\x32\x15.flwr.proto.BytesListH\x00\x42\x07\n\x05value\"M\n\x10ParametersRecord\x12\x11\n\tdata_keys\x18\x01 \x03(\t\x12&\n\x0b\x64\x61ta_values\x18\x02 \x03(\x0b\x32\x11.flwr.proto.Array\"\x8f\x01\n\rMetricsRecord\x12\x31\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32#.flwr.proto.MetricsRecord.DataEntry\x1aK\n\tDataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12-\n\x05value\x18\x02 \x01(\x0b\x32\x1e.flwr.proto.MetricsRecordValue:\x02\x38\x01\"\x8f\x01\n\rConfigsRecord\x12\x31\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32#.flwr.proto.ConfigsRecord.DataEntry\x1aK\n\tDataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12-\n\x05value\x18\x02 \x01(\x0b\x32\x1e.flwr.proto.ConfigsRecordValue:\x02\x38\x01\"\x97\x03\n\tRecordSet\x12\x39\n\nparameters\x18\x01 \x03(\x0b\x32%.flwr.proto.RecordSet.ParametersEntry\x12\x33\n\x07metrics\x18\x02 \x03(\x0b\x32\".flwr.proto.RecordSet.MetricsEntry\x12\x33\n\x07\x63onfigs\x18\x03 \x03(\x0b\x32\".flwr.proto.RecordSet.ConfigsEntry\x1aO\n\x0fParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12+\n\x05value\x18\x02 \x01(\x0b\x32\x1c.flwr.proto.ParametersRecord:\x02\x38\x01\x1aI\n\x0cMetricsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12(\n\x05value\x18\x02 \x01(\x0b\x32\x19.flwr.proto.MetricsRecord:\x02\x38\x01\x1aI\n\x0c\x43onfigsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12(\n\x05value\x18\x02 \x01(\x0b\x32\x19.flwr.proto.ConfigsRecord:\x02\x38\x01\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1a\x66lwr/proto/recordset.proto\x12\nflwr.proto\"\x1a\n\nDoubleList\x12\x0c\n\x04vals\x18\x01 \x03(\x01\"\x18\n\x08SintList\x12\x0c\n\x04vals\x18\x01 \x03(\x12\"\x18\n\x08UintList\x12\x0c\n\x04vals\x18\x01 \x03(\x04\"\x18\n\x08\x42oolList\x12\x0c\n\x04vals\x18\x01 \x03(\x08\"\x1a\n\nStringList\x12\x0c\n\x04vals\x18\x01 \x03(\t\"\x19\n\tBytesList\x12\x0c\n\x04vals\x18\x01 \x03(\x0c\"B\n\x05\x41rray\x12\r\n\x05\x64type\x18\x01 \x01(\t\x12\r\n\x05shape\x18\x02 \x03(\x05\x12\r\n\x05stype\x18\x03 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\"\xd8\x01\n\x12MetricsRecordValue\x12\x10\n\x06\x64ouble\x18\x01 \x01(\x01H\x00\x12\x10\n\x06sint64\x18\x02 \x01(\x12H\x00\x12\x10\n\x06uint64\x18\x03 \x01(\x04H\x00\x12-\n\x0b\x64ouble_list\x18\x15 \x01(\x0b\x32\x16.flwr.proto.DoubleListH\x00\x12)\n\tsint_list\x18\x16 \x01(\x0b\x32\x14.flwr.proto.SintListH\x00\x12)\n\tuint_list\x18\x17 \x01(\x0b\x32\x14.flwr.proto.UintListH\x00\x42\x07\n\x05value\"\x92\x03\n\x12\x43onfigsRecordValue\x12\x10\n\x06\x64ouble\x18\x01 \x01(\x01H\x00\x12\x10\n\x06sint64\x18\x02 \x01(\x12H\x00\x12\x10\n\x06uint64\x18\x03 \x01(\x04H\x00\x12\x0e\n\x04\x62ool\x18\x04 \x01(\x08H\x00\x12\x10\n\x06string\x18\x05 \x01(\tH\x00\x12\x0f\n\x05\x62ytes\x18\x06 \x01(\x0cH\x00\x12-\n\x0b\x64ouble_list\x18\x15 \x01(\x0b\x32\x16.flwr.proto.DoubleListH\x00\x12)\n\tsint_list\x18\x16 \x01(\x0b\x32\x14.flwr.proto.SintListH\x00\x12)\n\tuint_list\x18\x17 \x01(\x0b\x32\x14.flwr.proto.UintListH\x00\x12)\n\tbool_list\x18\x18 \x01(\x0b\x32\x14.flwr.proto.BoolListH\x00\x12-\n\x0bstring_list\x18\x19 \x01(\x0b\x32\x16.flwr.proto.StringListH\x00\x12+\n\nbytes_list\x18\x1a \x01(\x0b\x32\x15.flwr.proto.BytesListH\x00\x42\x07\n\x05value\"M\n\x10ParametersRecord\x12\x11\n\tdata_keys\x18\x01 \x03(\t\x12&\n\x0b\x64\x61ta_values\x18\x02 \x03(\x0b\x32\x11.flwr.proto.Array\"\x8f\x01\n\rMetricsRecord\x12\x31\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32#.flwr.proto.MetricsRecord.DataEntry\x1aK\n\tDataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12-\n\x05value\x18\x02 \x01(\x0b\x32\x1e.flwr.proto.MetricsRecordValue:\x02\x38\x01\"\x8f\x01\n\rConfigsRecord\x12\x31\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32#.flwr.proto.ConfigsRecord.DataEntry\x1aK\n\tDataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12-\n\x05value\x18\x02 \x01(\x0b\x32\x1e.flwr.proto.ConfigsRecordValue:\x02\x38\x01\"\x97\x03\n\tRecordSet\x12\x39\n\nparameters\x18\x01 \x03(\x0b\x32%.flwr.proto.RecordSet.ParametersEntry\x12\x33\n\x07metrics\x18\x02 \x03(\x0b\x32\".flwr.proto.RecordSet.MetricsEntry\x12\x33\n\x07\x63onfigs\x18\x03 \x03(\x0b\x32\".flwr.proto.RecordSet.ConfigsEntry\x1aO\n\x0fParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12+\n\x05value\x18\x02 \x01(\x0b\x32\x1c.flwr.proto.ParametersRecord:\x02\x38\x01\x1aI\n\x0cMetricsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12(\n\x05value\x18\x02 \x01(\x0b\x32\x19.flwr.proto.MetricsRecord:\x02\x38\x01\x1aI\n\x0c\x43onfigsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12(\n\x05value\x18\x02 \x01(\x0b\x32\x19.flwr.proto.ConfigsRecord:\x02\x38\x01\x62\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -33,36 +33,38 @@ _globals['_RECORDSET_CONFIGSENTRY']._serialized_options = b'8\001' _globals['_DOUBLELIST']._serialized_start=42 _globals['_DOUBLELIST']._serialized_end=68 - _globals['_SINT64LIST']._serialized_start=70 - _globals['_SINT64LIST']._serialized_end=96 - _globals['_BOOLLIST']._serialized_start=98 - _globals['_BOOLLIST']._serialized_end=122 - _globals['_STRINGLIST']._serialized_start=124 - _globals['_STRINGLIST']._serialized_end=150 - _globals['_BYTESLIST']._serialized_start=152 - _globals['_BYTESLIST']._serialized_end=177 - _globals['_ARRAY']._serialized_start=179 - _globals['_ARRAY']._serialized_end=245 - _globals['_METRICSRECORDVALUE']._serialized_start=248 - _globals['_METRICSRECORDVALUE']._serialized_end=407 - _globals['_CONFIGSRECORDVALUE']._serialized_start=410 - _globals['_CONFIGSRECORDVALUE']._serialized_end=755 - _globals['_PARAMETERSRECORD']._serialized_start=757 - _globals['_PARAMETERSRECORD']._serialized_end=834 - _globals['_METRICSRECORD']._serialized_start=837 - _globals['_METRICSRECORD']._serialized_end=980 - _globals['_METRICSRECORD_DATAENTRY']._serialized_start=905 - _globals['_METRICSRECORD_DATAENTRY']._serialized_end=980 - _globals['_CONFIGSRECORD']._serialized_start=983 - _globals['_CONFIGSRECORD']._serialized_end=1126 - _globals['_CONFIGSRECORD_DATAENTRY']._serialized_start=1051 - _globals['_CONFIGSRECORD_DATAENTRY']._serialized_end=1126 - _globals['_RECORDSET']._serialized_start=1129 - _globals['_RECORDSET']._serialized_end=1536 - _globals['_RECORDSET_PARAMETERSENTRY']._serialized_start=1307 - _globals['_RECORDSET_PARAMETERSENTRY']._serialized_end=1386 - _globals['_RECORDSET_METRICSENTRY']._serialized_start=1388 - _globals['_RECORDSET_METRICSENTRY']._serialized_end=1461 - _globals['_RECORDSET_CONFIGSENTRY']._serialized_start=1463 - _globals['_RECORDSET_CONFIGSENTRY']._serialized_end=1536 + _globals['_SINTLIST']._serialized_start=70 + _globals['_SINTLIST']._serialized_end=94 + _globals['_UINTLIST']._serialized_start=96 + _globals['_UINTLIST']._serialized_end=120 + _globals['_BOOLLIST']._serialized_start=122 + _globals['_BOOLLIST']._serialized_end=146 + _globals['_STRINGLIST']._serialized_start=148 + _globals['_STRINGLIST']._serialized_end=174 + _globals['_BYTESLIST']._serialized_start=176 + _globals['_BYTESLIST']._serialized_end=201 + _globals['_ARRAY']._serialized_start=203 + _globals['_ARRAY']._serialized_end=269 + _globals['_METRICSRECORDVALUE']._serialized_start=272 + _globals['_METRICSRECORDVALUE']._serialized_end=488 + _globals['_CONFIGSRECORDVALUE']._serialized_start=491 + _globals['_CONFIGSRECORDVALUE']._serialized_end=893 + _globals['_PARAMETERSRECORD']._serialized_start=895 + _globals['_PARAMETERSRECORD']._serialized_end=972 + _globals['_METRICSRECORD']._serialized_start=975 + _globals['_METRICSRECORD']._serialized_end=1118 + _globals['_METRICSRECORD_DATAENTRY']._serialized_start=1043 + _globals['_METRICSRECORD_DATAENTRY']._serialized_end=1118 + _globals['_CONFIGSRECORD']._serialized_start=1121 + _globals['_CONFIGSRECORD']._serialized_end=1264 + _globals['_CONFIGSRECORD_DATAENTRY']._serialized_start=1189 + _globals['_CONFIGSRECORD_DATAENTRY']._serialized_end=1264 + _globals['_RECORDSET']._serialized_start=1267 + _globals['_RECORDSET']._serialized_end=1674 + _globals['_RECORDSET_PARAMETERSENTRY']._serialized_start=1445 + _globals['_RECORDSET_PARAMETERSENTRY']._serialized_end=1524 + _globals['_RECORDSET_METRICSENTRY']._serialized_start=1526 + _globals['_RECORDSET_METRICSENTRY']._serialized_end=1599 + _globals['_RECORDSET_CONFIGSENTRY']._serialized_start=1601 + _globals['_RECORDSET_CONFIGSENTRY']._serialized_end=1674 # @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/recordset_pb2.pyi b/src/py/flwr/proto/recordset_pb2.pyi index 86244697129c..91d17e3e6473 100644 --- a/src/py/flwr/proto/recordset_pb2.pyi +++ b/src/py/flwr/proto/recordset_pb2.pyi @@ -23,7 +23,7 @@ class DoubleList(google.protobuf.message.Message): def ClearField(self, field_name: typing_extensions.Literal["vals",b"vals"]) -> None: ... global___DoubleList = DoubleList -class Sint64List(google.protobuf.message.Message): +class SintList(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor VALS_FIELD_NUMBER: builtins.int @property @@ -33,7 +33,19 @@ class Sint64List(google.protobuf.message.Message): vals: typing.Optional[typing.Iterable[builtins.int]] = ..., ) -> None: ... def ClearField(self, field_name: typing_extensions.Literal["vals",b"vals"]) -> None: ... -global___Sint64List = Sint64List +global___SintList = SintList + +class UintList(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + VALS_FIELD_NUMBER: builtins.int + @property + def vals(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]: ... + def __init__(self, + *, + vals: typing.Optional[typing.Iterable[builtins.int]] = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["vals",b"vals"]) -> None: ... +global___UintList = UintList class BoolList(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor @@ -96,39 +108,48 @@ class MetricsRecordValue(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor DOUBLE_FIELD_NUMBER: builtins.int SINT64_FIELD_NUMBER: builtins.int + UINT64_FIELD_NUMBER: builtins.int DOUBLE_LIST_FIELD_NUMBER: builtins.int - SINT64_LIST_FIELD_NUMBER: builtins.int + SINT_LIST_FIELD_NUMBER: builtins.int + UINT_LIST_FIELD_NUMBER: builtins.int double: builtins.float """Single element""" sint64: builtins.int + uint64: builtins.int @property def double_list(self) -> global___DoubleList: """List types""" pass @property - def sint64_list(self) -> global___Sint64List: ... + def sint_list(self) -> global___SintList: ... + @property + def uint_list(self) -> global___UintList: ... def __init__(self, *, double: builtins.float = ..., sint64: builtins.int = ..., + uint64: builtins.int = ..., double_list: typing.Optional[global___DoubleList] = ..., - sint64_list: typing.Optional[global___Sint64List] = ..., + sint_list: typing.Optional[global___SintList] = ..., + uint_list: typing.Optional[global___UintList] = ..., ) -> None: ... - def HasField(self, field_name: typing_extensions.Literal["double",b"double","double_list",b"double_list","sint64",b"sint64","sint64_list",b"sint64_list","value",b"value"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["double",b"double","double_list",b"double_list","sint64",b"sint64","sint64_list",b"sint64_list","value",b"value"]) -> None: ... - def WhichOneof(self, oneof_group: typing_extensions.Literal["value",b"value"]) -> typing.Optional[typing_extensions.Literal["double","sint64","double_list","sint64_list"]]: ... + def HasField(self, field_name: typing_extensions.Literal["double",b"double","double_list",b"double_list","sint64",b"sint64","sint_list",b"sint_list","uint64",b"uint64","uint_list",b"uint_list","value",b"value"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["double",b"double","double_list",b"double_list","sint64",b"sint64","sint_list",b"sint_list","uint64",b"uint64","uint_list",b"uint_list","value",b"value"]) -> None: ... + def WhichOneof(self, oneof_group: typing_extensions.Literal["value",b"value"]) -> typing.Optional[typing_extensions.Literal["double","sint64","uint64","double_list","sint_list","uint_list"]]: ... global___MetricsRecordValue = MetricsRecordValue class ConfigsRecordValue(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor DOUBLE_FIELD_NUMBER: builtins.int SINT64_FIELD_NUMBER: builtins.int + UINT64_FIELD_NUMBER: builtins.int BOOL_FIELD_NUMBER: builtins.int STRING_FIELD_NUMBER: builtins.int BYTES_FIELD_NUMBER: builtins.int DOUBLE_LIST_FIELD_NUMBER: builtins.int - SINT64_LIST_FIELD_NUMBER: builtins.int + SINT_LIST_FIELD_NUMBER: builtins.int + UINT_LIST_FIELD_NUMBER: builtins.int BOOL_LIST_FIELD_NUMBER: builtins.int STRING_LIST_FIELD_NUMBER: builtins.int BYTES_LIST_FIELD_NUMBER: builtins.int @@ -136,6 +157,7 @@ class ConfigsRecordValue(google.protobuf.message.Message): """Single element""" sint64: builtins.int + uint64: builtins.int bool: builtins.bool string: typing.Text bytes: builtins.bytes @@ -144,7 +166,9 @@ class ConfigsRecordValue(google.protobuf.message.Message): """List types""" pass @property - def sint64_list(self) -> global___Sint64List: ... + def sint_list(self) -> global___SintList: ... + @property + def uint_list(self) -> global___UintList: ... @property def bool_list(self) -> global___BoolList: ... @property @@ -155,18 +179,20 @@ class ConfigsRecordValue(google.protobuf.message.Message): *, double: builtins.float = ..., sint64: builtins.int = ..., + uint64: builtins.int = ..., bool: builtins.bool = ..., string: typing.Text = ..., bytes: builtins.bytes = ..., double_list: typing.Optional[global___DoubleList] = ..., - sint64_list: typing.Optional[global___Sint64List] = ..., + sint_list: typing.Optional[global___SintList] = ..., + uint_list: typing.Optional[global___UintList] = ..., bool_list: typing.Optional[global___BoolList] = ..., string_list: typing.Optional[global___StringList] = ..., bytes_list: typing.Optional[global___BytesList] = ..., ) -> None: ... - def HasField(self, field_name: typing_extensions.Literal["bool",b"bool","bool_list",b"bool_list","bytes",b"bytes","bytes_list",b"bytes_list","double",b"double","double_list",b"double_list","sint64",b"sint64","sint64_list",b"sint64_list","string",b"string","string_list",b"string_list","value",b"value"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["bool",b"bool","bool_list",b"bool_list","bytes",b"bytes","bytes_list",b"bytes_list","double",b"double","double_list",b"double_list","sint64",b"sint64","sint64_list",b"sint64_list","string",b"string","string_list",b"string_list","value",b"value"]) -> None: ... - def WhichOneof(self, oneof_group: typing_extensions.Literal["value",b"value"]) -> typing.Optional[typing_extensions.Literal["double","sint64","bool","string","bytes","double_list","sint64_list","bool_list","string_list","bytes_list"]]: ... + def HasField(self, field_name: typing_extensions.Literal["bool",b"bool","bool_list",b"bool_list","bytes",b"bytes","bytes_list",b"bytes_list","double",b"double","double_list",b"double_list","sint64",b"sint64","sint_list",b"sint_list","string",b"string","string_list",b"string_list","uint64",b"uint64","uint_list",b"uint_list","value",b"value"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["bool",b"bool","bool_list",b"bool_list","bytes",b"bytes","bytes_list",b"bytes_list","double",b"double","double_list",b"double_list","sint64",b"sint64","sint_list",b"sint_list","string",b"string","string_list",b"string_list","uint64",b"uint64","uint_list",b"uint_list","value",b"value"]) -> None: ... + def WhichOneof(self, oneof_group: typing_extensions.Literal["value",b"value"]) -> typing.Optional[typing_extensions.Literal["double","sint64","uint64","bool","string","bytes","double_list","sint_list","uint_list","bool_list","string_list","bytes_list"]]: ... global___ConfigsRecordValue = ConfigsRecordValue class ParametersRecord(google.protobuf.message.Message): diff --git a/src/py/flwr/proto/run_pb2.py b/src/py/flwr/proto/run_pb2.py new file mode 100644 index 000000000000..cc3f6897918f --- /dev/null +++ b/src/py/flwr/proto/run_pb2.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: flwr/proto/run.proto +# Protobuf Python Version: 4.25.0 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from flwr.proto import fab_pb2 as flwr_dot_proto_dot_fab__pb2 +from flwr.proto import node_pb2 as flwr_dot_proto_dot_node__pb2 +from flwr.proto import transport_pb2 as flwr_dot_proto_dot_transport__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x14\x66lwr/proto/run.proto\x12\nflwr.proto\x1a\x14\x66lwr/proto/fab.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x1a\x66lwr/proto/transport.proto\"\xd5\x01\n\x03Run\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\x12\x0e\n\x06\x66\x61\x62_id\x18\x02 \x01(\t\x12\x13\n\x0b\x66\x61\x62_version\x18\x03 \x01(\t\x12<\n\x0foverride_config\x18\x04 \x03(\x0b\x32#.flwr.proto.Run.OverrideConfigEntry\x12\x10\n\x08\x66\x61\x62_hash\x18\x05 \x01(\t\x1aI\n\x13OverrideConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\"@\n\tRunStatus\x12\x0e\n\x06status\x18\x01 \x01(\t\x12\x12\n\nsub_status\x18\x02 \x01(\t\x12\x0f\n\x07\x64\x65tails\x18\x03 \x01(\t\"\xeb\x01\n\x10\x43reateRunRequest\x12\x0e\n\x06\x66\x61\x62_id\x18\x01 \x01(\t\x12\x13\n\x0b\x66\x61\x62_version\x18\x02 \x01(\t\x12I\n\x0foverride_config\x18\x03 \x03(\x0b\x32\x30.flwr.proto.CreateRunRequest.OverrideConfigEntry\x12\x1c\n\x03\x66\x61\x62\x18\x04 \x01(\x0b\x32\x0f.flwr.proto.Fab\x1aI\n\x13OverrideConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\"#\n\x11\x43reateRunResponse\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\"?\n\rGetRunRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x0e\n\x06run_id\x18\x02 \x01(\x04\".\n\x0eGetRunResponse\x12\x1c\n\x03run\x18\x01 \x01(\x0b\x32\x0f.flwr.proto.Run\"S\n\x16UpdateRunStatusRequest\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\x12)\n\nrun_status\x18\x02 \x01(\x0b\x32\x15.flwr.proto.RunStatus\"\x19\n\x17UpdateRunStatusResponse\"F\n\x13GetRunStatusRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x0f\n\x07run_ids\x18\x02 \x03(\x04\"\xb1\x01\n\x14GetRunStatusResponse\x12L\n\x0frun_status_dict\x18\x01 \x03(\x0b\x32\x33.flwr.proto.GetRunStatusResponse.RunStatusDictEntry\x1aK\n\x12RunStatusDictEntry\x12\x0b\n\x03key\x18\x01 \x01(\x04\x12$\n\x05value\x18\x02 \x01(\x0b\x32\x15.flwr.proto.RunStatus:\x02\x38\x01\x62\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flwr.proto.run_pb2', _globals) +if _descriptor._USE_C_DESCRIPTORS == False: + DESCRIPTOR._options = None + _globals['_RUN_OVERRIDECONFIGENTRY']._options = None + _globals['_RUN_OVERRIDECONFIGENTRY']._serialized_options = b'8\001' + _globals['_CREATERUNREQUEST_OVERRIDECONFIGENTRY']._options = None + _globals['_CREATERUNREQUEST_OVERRIDECONFIGENTRY']._serialized_options = b'8\001' + _globals['_GETRUNSTATUSRESPONSE_RUNSTATUSDICTENTRY']._options = None + _globals['_GETRUNSTATUSRESPONSE_RUNSTATUSDICTENTRY']._serialized_options = b'8\001' + _globals['_RUN']._serialized_start=110 + _globals['_RUN']._serialized_end=323 + _globals['_RUN_OVERRIDECONFIGENTRY']._serialized_start=250 + _globals['_RUN_OVERRIDECONFIGENTRY']._serialized_end=323 + _globals['_RUNSTATUS']._serialized_start=325 + _globals['_RUNSTATUS']._serialized_end=389 + _globals['_CREATERUNREQUEST']._serialized_start=392 + _globals['_CREATERUNREQUEST']._serialized_end=627 + _globals['_CREATERUNREQUEST_OVERRIDECONFIGENTRY']._serialized_start=250 + _globals['_CREATERUNREQUEST_OVERRIDECONFIGENTRY']._serialized_end=323 + _globals['_CREATERUNRESPONSE']._serialized_start=629 + _globals['_CREATERUNRESPONSE']._serialized_end=664 + _globals['_GETRUNREQUEST']._serialized_start=666 + _globals['_GETRUNREQUEST']._serialized_end=729 + _globals['_GETRUNRESPONSE']._serialized_start=731 + _globals['_GETRUNRESPONSE']._serialized_end=777 + _globals['_UPDATERUNSTATUSREQUEST']._serialized_start=779 + _globals['_UPDATERUNSTATUSREQUEST']._serialized_end=862 + _globals['_UPDATERUNSTATUSRESPONSE']._serialized_start=864 + _globals['_UPDATERUNSTATUSRESPONSE']._serialized_end=889 + _globals['_GETRUNSTATUSREQUEST']._serialized_start=891 + _globals['_GETRUNSTATUSREQUEST']._serialized_end=961 + _globals['_GETRUNSTATUSRESPONSE']._serialized_start=964 + _globals['_GETRUNSTATUSRESPONSE']._serialized_end=1141 + _globals['_GETRUNSTATUSRESPONSE_RUNSTATUSDICTENTRY']._serialized_start=1066 + _globals['_GETRUNSTATUSRESPONSE_RUNSTATUSDICTENTRY']._serialized_end=1141 +# @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/run_pb2.pyi b/src/py/flwr/proto/run_pb2.pyi new file mode 100644 index 000000000000..16411712eaf2 --- /dev/null +++ b/src/py/flwr/proto/run_pb2.pyi @@ -0,0 +1,225 @@ +""" +@generated by mypy-protobuf. Do not edit manually! +isort:skip_file +""" +import builtins +import flwr.proto.fab_pb2 +import flwr.proto.node_pb2 +import flwr.proto.transport_pb2 +import google.protobuf.descriptor +import google.protobuf.internal.containers +import google.protobuf.message +import typing +import typing_extensions + +DESCRIPTOR: google.protobuf.descriptor.FileDescriptor + +class Run(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + class OverrideConfigEntry(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + KEY_FIELD_NUMBER: builtins.int + VALUE_FIELD_NUMBER: builtins.int + key: typing.Text + @property + def value(self) -> flwr.proto.transport_pb2.Scalar: ... + def __init__(self, + *, + key: typing.Text = ..., + value: typing.Optional[flwr.proto.transport_pb2.Scalar] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["value",b"value"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... + + RUN_ID_FIELD_NUMBER: builtins.int + FAB_ID_FIELD_NUMBER: builtins.int + FAB_VERSION_FIELD_NUMBER: builtins.int + OVERRIDE_CONFIG_FIELD_NUMBER: builtins.int + FAB_HASH_FIELD_NUMBER: builtins.int + run_id: builtins.int + fab_id: typing.Text + fab_version: typing.Text + @property + def override_config(self) -> google.protobuf.internal.containers.MessageMap[typing.Text, flwr.proto.transport_pb2.Scalar]: ... + fab_hash: typing.Text + def __init__(self, + *, + run_id: builtins.int = ..., + fab_id: typing.Text = ..., + fab_version: typing.Text = ..., + override_config: typing.Optional[typing.Mapping[typing.Text, flwr.proto.transport_pb2.Scalar]] = ..., + fab_hash: typing.Text = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["fab_hash",b"fab_hash","fab_id",b"fab_id","fab_version",b"fab_version","override_config",b"override_config","run_id",b"run_id"]) -> None: ... +global___Run = Run + +class RunStatus(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + STATUS_FIELD_NUMBER: builtins.int + SUB_STATUS_FIELD_NUMBER: builtins.int + DETAILS_FIELD_NUMBER: builtins.int + status: typing.Text + """"starting", "running", "finished" """ + + sub_status: typing.Text + """"completed", "failed", "stopped" or "" (non-finished)""" + + details: typing.Text + """failure details""" + + def __init__(self, + *, + status: typing.Text = ..., + sub_status: typing.Text = ..., + details: typing.Text = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["details",b"details","status",b"status","sub_status",b"sub_status"]) -> None: ... +global___RunStatus = RunStatus + +class CreateRunRequest(google.protobuf.message.Message): + """CreateRun""" + DESCRIPTOR: google.protobuf.descriptor.Descriptor + class OverrideConfigEntry(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + KEY_FIELD_NUMBER: builtins.int + VALUE_FIELD_NUMBER: builtins.int + key: typing.Text + @property + def value(self) -> flwr.proto.transport_pb2.Scalar: ... + def __init__(self, + *, + key: typing.Text = ..., + value: typing.Optional[flwr.proto.transport_pb2.Scalar] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["value",b"value"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... + + FAB_ID_FIELD_NUMBER: builtins.int + FAB_VERSION_FIELD_NUMBER: builtins.int + OVERRIDE_CONFIG_FIELD_NUMBER: builtins.int + FAB_FIELD_NUMBER: builtins.int + fab_id: typing.Text + fab_version: typing.Text + @property + def override_config(self) -> google.protobuf.internal.containers.MessageMap[typing.Text, flwr.proto.transport_pb2.Scalar]: ... + @property + def fab(self) -> flwr.proto.fab_pb2.Fab: ... + def __init__(self, + *, + fab_id: typing.Text = ..., + fab_version: typing.Text = ..., + override_config: typing.Optional[typing.Mapping[typing.Text, flwr.proto.transport_pb2.Scalar]] = ..., + fab: typing.Optional[flwr.proto.fab_pb2.Fab] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["fab",b"fab"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["fab",b"fab","fab_id",b"fab_id","fab_version",b"fab_version","override_config",b"override_config"]) -> None: ... +global___CreateRunRequest = CreateRunRequest + +class CreateRunResponse(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + RUN_ID_FIELD_NUMBER: builtins.int + run_id: builtins.int + def __init__(self, + *, + run_id: builtins.int = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["run_id",b"run_id"]) -> None: ... +global___CreateRunResponse = CreateRunResponse + +class GetRunRequest(google.protobuf.message.Message): + """GetRun""" + DESCRIPTOR: google.protobuf.descriptor.Descriptor + NODE_FIELD_NUMBER: builtins.int + RUN_ID_FIELD_NUMBER: builtins.int + @property + def node(self) -> flwr.proto.node_pb2.Node: ... + run_id: builtins.int + def __init__(self, + *, + node: typing.Optional[flwr.proto.node_pb2.Node] = ..., + run_id: builtins.int = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["node",b"node"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["node",b"node","run_id",b"run_id"]) -> None: ... +global___GetRunRequest = GetRunRequest + +class GetRunResponse(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + RUN_FIELD_NUMBER: builtins.int + @property + def run(self) -> global___Run: ... + def __init__(self, + *, + run: typing.Optional[global___Run] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["run",b"run"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["run",b"run"]) -> None: ... +global___GetRunResponse = GetRunResponse + +class UpdateRunStatusRequest(google.protobuf.message.Message): + """UpdateRunStatus""" + DESCRIPTOR: google.protobuf.descriptor.Descriptor + RUN_ID_FIELD_NUMBER: builtins.int + RUN_STATUS_FIELD_NUMBER: builtins.int + run_id: builtins.int + @property + def run_status(self) -> global___RunStatus: ... + def __init__(self, + *, + run_id: builtins.int = ..., + run_status: typing.Optional[global___RunStatus] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["run_status",b"run_status"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["run_id",b"run_id","run_status",b"run_status"]) -> None: ... +global___UpdateRunStatusRequest = UpdateRunStatusRequest + +class UpdateRunStatusResponse(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + def __init__(self, + ) -> None: ... +global___UpdateRunStatusResponse = UpdateRunStatusResponse + +class GetRunStatusRequest(google.protobuf.message.Message): + """GetRunStatus""" + DESCRIPTOR: google.protobuf.descriptor.Descriptor + NODE_FIELD_NUMBER: builtins.int + RUN_IDS_FIELD_NUMBER: builtins.int + @property + def node(self) -> flwr.proto.node_pb2.Node: ... + @property + def run_ids(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]: ... + def __init__(self, + *, + node: typing.Optional[flwr.proto.node_pb2.Node] = ..., + run_ids: typing.Optional[typing.Iterable[builtins.int]] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["node",b"node"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["node",b"node","run_ids",b"run_ids"]) -> None: ... +global___GetRunStatusRequest = GetRunStatusRequest + +class GetRunStatusResponse(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + class RunStatusDictEntry(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + KEY_FIELD_NUMBER: builtins.int + VALUE_FIELD_NUMBER: builtins.int + key: builtins.int + @property + def value(self) -> global___RunStatus: ... + def __init__(self, + *, + key: builtins.int = ..., + value: typing.Optional[global___RunStatus] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["value",b"value"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... + + RUN_STATUS_DICT_FIELD_NUMBER: builtins.int + @property + def run_status_dict(self) -> google.protobuf.internal.containers.MessageMap[builtins.int, global___RunStatus]: ... + def __init__(self, + *, + run_status_dict: typing.Optional[typing.Mapping[builtins.int, global___RunStatus]] = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["run_status_dict",b"run_status_dict"]) -> None: ... +global___GetRunStatusResponse = GetRunStatusResponse diff --git a/src/py/flwr/proto/run_pb2_grpc.py b/src/py/flwr/proto/run_pb2_grpc.py new file mode 100644 index 000000000000..2daafffebfc8 --- /dev/null +++ b/src/py/flwr/proto/run_pb2_grpc.py @@ -0,0 +1,4 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + diff --git a/src/py/flwr/proto/run_pb2_grpc.pyi b/src/py/flwr/proto/run_pb2_grpc.pyi new file mode 100644 index 000000000000..f3a5a087ef5d --- /dev/null +++ b/src/py/flwr/proto/run_pb2_grpc.pyi @@ -0,0 +1,4 @@ +""" +@generated by mypy-protobuf. Do not edit manually! +isort:skip_file +""" diff --git a/src/py/flwr/proto/task_pb2.py b/src/py/flwr/proto/task_pb2.py index 5f6e9e7be583..75b022dc65ea 100644 --- a/src/py/flwr/proto/task_pb2.py +++ b/src/py/flwr/proto/task_pb2.py @@ -14,21 +14,20 @@ from flwr.proto import node_pb2 as flwr_dot_proto_dot_node__pb2 from flwr.proto import recordset_pb2 as flwr_dot_proto_dot_recordset__pb2 -from flwr.proto import transport_pb2 as flwr_dot_proto_dot_transport__pb2 from flwr.proto import error_pb2 as flwr_dot_proto_dot_error__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x15\x66lwr/proto/task.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x1a\x66lwr/proto/recordset.proto\x1a\x1a\x66lwr/proto/transport.proto\x1a\x16\x66lwr/proto/error.proto\"\x89\x02\n\x04Task\x12\"\n\x08producer\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\"\n\x08\x63onsumer\x18\x02 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x12\n\ncreated_at\x18\x03 \x01(\x01\x12\x14\n\x0c\x64\x65livered_at\x18\x04 \x01(\t\x12\x11\n\tpushed_at\x18\x05 \x01(\x01\x12\x0b\n\x03ttl\x18\x06 \x01(\x01\x12\x10\n\x08\x61ncestry\x18\x07 \x03(\t\x12\x11\n\ttask_type\x18\x08 \x01(\t\x12(\n\trecordset\x18\t \x01(\x0b\x32\x15.flwr.proto.RecordSet\x12 \n\x05\x65rror\x18\n \x01(\x0b\x32\x11.flwr.proto.Error\"\\\n\x07TaskIns\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x10\n\x08group_id\x18\x02 \x01(\t\x12\x0e\n\x06run_id\x18\x03 \x01(\x12\x12\x1e\n\x04task\x18\x04 \x01(\x0b\x32\x10.flwr.proto.Task\"\\\n\x07TaskRes\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x10\n\x08group_id\x18\x02 \x01(\t\x12\x0e\n\x06run_id\x18\x03 \x01(\x12\x12\x1e\n\x04task\x18\x04 \x01(\x0b\x32\x10.flwr.proto.Taskb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x15\x66lwr/proto/task.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x1a\x66lwr/proto/recordset.proto\x1a\x16\x66lwr/proto/error.proto\"\x89\x02\n\x04Task\x12\"\n\x08producer\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\"\n\x08\x63onsumer\x18\x02 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x12\n\ncreated_at\x18\x03 \x01(\x01\x12\x14\n\x0c\x64\x65livered_at\x18\x04 \x01(\t\x12\x11\n\tpushed_at\x18\x05 \x01(\x01\x12\x0b\n\x03ttl\x18\x06 \x01(\x01\x12\x10\n\x08\x61ncestry\x18\x07 \x03(\t\x12\x11\n\ttask_type\x18\x08 \x01(\t\x12(\n\trecordset\x18\t \x01(\x0b\x32\x15.flwr.proto.RecordSet\x12 \n\x05\x65rror\x18\n \x01(\x0b\x32\x11.flwr.proto.Error\"\\\n\x07TaskIns\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x10\n\x08group_id\x18\x02 \x01(\t\x12\x0e\n\x06run_id\x18\x03 \x01(\x04\x12\x1e\n\x04task\x18\x04 \x01(\x0b\x32\x10.flwr.proto.Task\"\\\n\x07TaskRes\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x10\n\x08group_id\x18\x02 \x01(\t\x12\x0e\n\x06run_id\x18\x03 \x01(\x04\x12\x1e\n\x04task\x18\x04 \x01(\x0b\x32\x10.flwr.proto.Taskb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flwr.proto.task_pb2', _globals) if _descriptor._USE_C_DESCRIPTORS == False: DESCRIPTOR._options = None - _globals['_TASK']._serialized_start=141 - _globals['_TASK']._serialized_end=406 - _globals['_TASKINS']._serialized_start=408 - _globals['_TASKINS']._serialized_end=500 - _globals['_TASKRES']._serialized_start=502 - _globals['_TASKRES']._serialized_end=594 + _globals['_TASK']._serialized_start=113 + _globals['_TASK']._serialized_end=378 + _globals['_TASKINS']._serialized_start=380 + _globals['_TASKINS']._serialized_end=472 + _globals['_TASKRES']._serialized_start=474 + _globals['_TASKRES']._serialized_end=566 # @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/transport_pb2.py b/src/py/flwr/proto/transport_pb2.py index d3aae72b63ab..b457463f99ca 100644 --- a/src/py/flwr/proto/transport_pb2.py +++ b/src/py/flwr/proto/transport_pb2.py @@ -14,7 +14,7 @@ -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1a\x66lwr/proto/transport.proto\x12\nflwr.proto\"9\n\x06Status\x12\x1e\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x10.flwr.proto.Code\x12\x0f\n\x07message\x18\x02 \x01(\t\"2\n\nParameters\x12\x0f\n\x07tensors\x18\x01 \x03(\x0c\x12\x13\n\x0btensor_type\x18\x02 \x01(\t\"\xba\x08\n\rServerMessage\x12?\n\rreconnect_ins\x18\x01 \x01(\x0b\x32&.flwr.proto.ServerMessage.ReconnectInsH\x00\x12H\n\x12get_properties_ins\x18\x02 \x01(\x0b\x32*.flwr.proto.ServerMessage.GetPropertiesInsH\x00\x12H\n\x12get_parameters_ins\x18\x03 \x01(\x0b\x32*.flwr.proto.ServerMessage.GetParametersInsH\x00\x12\x33\n\x07\x66it_ins\x18\x04 \x01(\x0b\x32 .flwr.proto.ServerMessage.FitInsH\x00\x12=\n\x0c\x65valuate_ins\x18\x05 \x01(\x0b\x32%.flwr.proto.ServerMessage.EvaluateInsH\x00\x1a\x1f\n\x0cReconnectIns\x12\x0f\n\x07seconds\x18\x01 \x01(\x03\x1a\x9d\x01\n\x10GetPropertiesIns\x12\x46\n\x06\x63onfig\x18\x01 \x03(\x0b\x32\x36.flwr.proto.ServerMessage.GetPropertiesIns.ConfigEntry\x1a\x41\n\x0b\x43onfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1a\x9d\x01\n\x10GetParametersIns\x12\x46\n\x06\x63onfig\x18\x01 \x03(\x0b\x32\x36.flwr.proto.ServerMessage.GetParametersIns.ConfigEntry\x1a\x41\n\x0b\x43onfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1a\xb5\x01\n\x06\x46itIns\x12*\n\nparameters\x18\x01 \x01(\x0b\x32\x16.flwr.proto.Parameters\x12<\n\x06\x63onfig\x18\x02 \x03(\x0b\x32,.flwr.proto.ServerMessage.FitIns.ConfigEntry\x1a\x41\n\x0b\x43onfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1a\xbf\x01\n\x0b\x45valuateIns\x12*\n\nparameters\x18\x01 \x01(\x0b\x32\x16.flwr.proto.Parameters\x12\x41\n\x06\x63onfig\x18\x02 \x03(\x0b\x32\x31.flwr.proto.ServerMessage.EvaluateIns.ConfigEntry\x1a\x41\n\x0b\x43onfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x42\x05\n\x03msg\"\xa0\t\n\rClientMessage\x12\x41\n\x0e\x64isconnect_res\x18\x01 \x01(\x0b\x32\'.flwr.proto.ClientMessage.DisconnectResH\x00\x12H\n\x12get_properties_res\x18\x02 \x01(\x0b\x32*.flwr.proto.ClientMessage.GetPropertiesResH\x00\x12H\n\x12get_parameters_res\x18\x03 \x01(\x0b\x32*.flwr.proto.ClientMessage.GetParametersResH\x00\x12\x33\n\x07\x66it_res\x18\x04 \x01(\x0b\x32 .flwr.proto.ClientMessage.FitResH\x00\x12=\n\x0c\x65valuate_res\x18\x05 \x01(\x0b\x32%.flwr.proto.ClientMessage.EvaluateResH\x00\x1a\x33\n\rDisconnectRes\x12\"\n\x06reason\x18\x01 \x01(\x0e\x32\x12.flwr.proto.Reason\x1a\xcd\x01\n\x10GetPropertiesRes\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.flwr.proto.Status\x12N\n\nproperties\x18\x02 \x03(\x0b\x32:.flwr.proto.ClientMessage.GetPropertiesRes.PropertiesEntry\x1a\x45\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1a\x62\n\x10GetParametersRes\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.flwr.proto.Status\x12*\n\nparameters\x18\x02 \x01(\x0b\x32\x16.flwr.proto.Parameters\x1a\xf2\x01\n\x06\x46itRes\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.flwr.proto.Status\x12*\n\nparameters\x18\x02 \x01(\x0b\x32\x16.flwr.proto.Parameters\x12\x14\n\x0cnum_examples\x18\x03 \x01(\x03\x12>\n\x07metrics\x18\x04 \x03(\x0b\x32-.flwr.proto.ClientMessage.FitRes.MetricsEntry\x1a\x42\n\x0cMetricsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1a\xde\x01\n\x0b\x45valuateRes\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.flwr.proto.Status\x12\x0c\n\x04loss\x18\x02 \x01(\x02\x12\x14\n\x0cnum_examples\x18\x03 \x01(\x03\x12\x43\n\x07metrics\x18\x04 \x03(\x0b\x32\x32.flwr.proto.ClientMessage.EvaluateRes.MetricsEntry\x1a\x42\n\x0cMetricsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x42\x05\n\x03msg\"i\n\x06Scalar\x12\x10\n\x06\x64ouble\x18\x01 \x01(\x01H\x00\x12\x10\n\x06sint64\x18\x08 \x01(\x12H\x00\x12\x0e\n\x04\x62ool\x18\r \x01(\x08H\x00\x12\x10\n\x06string\x18\x0e \x01(\tH\x00\x12\x0f\n\x05\x62ytes\x18\x0f \x01(\x0cH\x00\x42\x08\n\x06scalar*\x8d\x01\n\x04\x43ode\x12\x06\n\x02OK\x10\x00\x12\"\n\x1eGET_PROPERTIES_NOT_IMPLEMENTED\x10\x01\x12\"\n\x1eGET_PARAMETERS_NOT_IMPLEMENTED\x10\x02\x12\x17\n\x13\x46IT_NOT_IMPLEMENTED\x10\x03\x12\x1c\n\x18\x45VALUATE_NOT_IMPLEMENTED\x10\x04*[\n\x06Reason\x12\x0b\n\x07UNKNOWN\x10\x00\x12\r\n\tRECONNECT\x10\x01\x12\x16\n\x12POWER_DISCONNECTED\x10\x02\x12\x14\n\x10WIFI_UNAVAILABLE\x10\x03\x12\x07\n\x03\x41\x43K\x10\x04\x32S\n\rFlowerService\x12\x42\n\x04Join\x12\x19.flwr.proto.ClientMessage\x1a\x19.flwr.proto.ServerMessage\"\x00(\x01\x30\x01\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1a\x66lwr/proto/transport.proto\x12\nflwr.proto\"9\n\x06Status\x12\x1e\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x10.flwr.proto.Code\x12\x0f\n\x07message\x18\x02 \x01(\t\"2\n\nParameters\x12\x0f\n\x07tensors\x18\x01 \x03(\x0c\x12\x13\n\x0btensor_type\x18\x02 \x01(\t\"\xba\x08\n\rServerMessage\x12?\n\rreconnect_ins\x18\x01 \x01(\x0b\x32&.flwr.proto.ServerMessage.ReconnectInsH\x00\x12H\n\x12get_properties_ins\x18\x02 \x01(\x0b\x32*.flwr.proto.ServerMessage.GetPropertiesInsH\x00\x12H\n\x12get_parameters_ins\x18\x03 \x01(\x0b\x32*.flwr.proto.ServerMessage.GetParametersInsH\x00\x12\x33\n\x07\x66it_ins\x18\x04 \x01(\x0b\x32 .flwr.proto.ServerMessage.FitInsH\x00\x12=\n\x0c\x65valuate_ins\x18\x05 \x01(\x0b\x32%.flwr.proto.ServerMessage.EvaluateInsH\x00\x1a\x1f\n\x0cReconnectIns\x12\x0f\n\x07seconds\x18\x01 \x01(\x03\x1a\x9d\x01\n\x10GetPropertiesIns\x12\x46\n\x06\x63onfig\x18\x01 \x03(\x0b\x32\x36.flwr.proto.ServerMessage.GetPropertiesIns.ConfigEntry\x1a\x41\n\x0b\x43onfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1a\x9d\x01\n\x10GetParametersIns\x12\x46\n\x06\x63onfig\x18\x01 \x03(\x0b\x32\x36.flwr.proto.ServerMessage.GetParametersIns.ConfigEntry\x1a\x41\n\x0b\x43onfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1a\xb5\x01\n\x06\x46itIns\x12*\n\nparameters\x18\x01 \x01(\x0b\x32\x16.flwr.proto.Parameters\x12<\n\x06\x63onfig\x18\x02 \x03(\x0b\x32,.flwr.proto.ServerMessage.FitIns.ConfigEntry\x1a\x41\n\x0b\x43onfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1a\xbf\x01\n\x0b\x45valuateIns\x12*\n\nparameters\x18\x01 \x01(\x0b\x32\x16.flwr.proto.Parameters\x12\x41\n\x06\x63onfig\x18\x02 \x03(\x0b\x32\x31.flwr.proto.ServerMessage.EvaluateIns.ConfigEntry\x1a\x41\n\x0b\x43onfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x42\x05\n\x03msg\"\xa0\t\n\rClientMessage\x12\x41\n\x0e\x64isconnect_res\x18\x01 \x01(\x0b\x32\'.flwr.proto.ClientMessage.DisconnectResH\x00\x12H\n\x12get_properties_res\x18\x02 \x01(\x0b\x32*.flwr.proto.ClientMessage.GetPropertiesResH\x00\x12H\n\x12get_parameters_res\x18\x03 \x01(\x0b\x32*.flwr.proto.ClientMessage.GetParametersResH\x00\x12\x33\n\x07\x66it_res\x18\x04 \x01(\x0b\x32 .flwr.proto.ClientMessage.FitResH\x00\x12=\n\x0c\x65valuate_res\x18\x05 \x01(\x0b\x32%.flwr.proto.ClientMessage.EvaluateResH\x00\x1a\x33\n\rDisconnectRes\x12\"\n\x06reason\x18\x01 \x01(\x0e\x32\x12.flwr.proto.Reason\x1a\xcd\x01\n\x10GetPropertiesRes\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.flwr.proto.Status\x12N\n\nproperties\x18\x02 \x03(\x0b\x32:.flwr.proto.ClientMessage.GetPropertiesRes.PropertiesEntry\x1a\x45\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1a\x62\n\x10GetParametersRes\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.flwr.proto.Status\x12*\n\nparameters\x18\x02 \x01(\x0b\x32\x16.flwr.proto.Parameters\x1a\xf2\x01\n\x06\x46itRes\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.flwr.proto.Status\x12*\n\nparameters\x18\x02 \x01(\x0b\x32\x16.flwr.proto.Parameters\x12\x14\n\x0cnum_examples\x18\x03 \x01(\x03\x12>\n\x07metrics\x18\x04 \x03(\x0b\x32-.flwr.proto.ClientMessage.FitRes.MetricsEntry\x1a\x42\n\x0cMetricsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1a\xde\x01\n\x0b\x45valuateRes\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.flwr.proto.Status\x12\x0c\n\x04loss\x18\x02 \x01(\x02\x12\x14\n\x0cnum_examples\x18\x03 \x01(\x03\x12\x43\n\x07metrics\x18\x04 \x03(\x0b\x32\x32.flwr.proto.ClientMessage.EvaluateRes.MetricsEntry\x1a\x42\n\x0cMetricsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x42\x05\n\x03msg\"{\n\x06Scalar\x12\x10\n\x06\x64ouble\x18\x01 \x01(\x01H\x00\x12\x10\n\x06uint64\x18\x06 \x01(\x04H\x00\x12\x10\n\x06sint64\x18\x08 \x01(\x12H\x00\x12\x0e\n\x04\x62ool\x18\r \x01(\x08H\x00\x12\x10\n\x06string\x18\x0e \x01(\tH\x00\x12\x0f\n\x05\x62ytes\x18\x0f \x01(\x0cH\x00\x42\x08\n\x06scalar*\x8d\x01\n\x04\x43ode\x12\x06\n\x02OK\x10\x00\x12\"\n\x1eGET_PROPERTIES_NOT_IMPLEMENTED\x10\x01\x12\"\n\x1eGET_PARAMETERS_NOT_IMPLEMENTED\x10\x02\x12\x17\n\x13\x46IT_NOT_IMPLEMENTED\x10\x03\x12\x1c\n\x18\x45VALUATE_NOT_IMPLEMENTED\x10\x04*[\n\x06Reason\x12\x0b\n\x07UNKNOWN\x10\x00\x12\r\n\tRECONNECT\x10\x01\x12\x16\n\x12POWER_DISCONNECTED\x10\x02\x12\x14\n\x10WIFI_UNAVAILABLE\x10\x03\x12\x07\n\x03\x41\x43K\x10\x04\x32S\n\rFlowerService\x12\x42\n\x04Join\x12\x19.flwr.proto.ClientMessage\x1a\x19.flwr.proto.ServerMessage\"\x00(\x01\x30\x01\x62\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -35,10 +35,10 @@ _globals['_CLIENTMESSAGE_FITRES_METRICSENTRY']._serialized_options = b'8\001' _globals['_CLIENTMESSAGE_EVALUATERES_METRICSENTRY']._options = None _globals['_CLIENTMESSAGE_EVALUATERES_METRICSENTRY']._serialized_options = b'8\001' - _globals['_CODE']._serialized_start=2533 - _globals['_CODE']._serialized_end=2674 - _globals['_REASON']._serialized_start=2676 - _globals['_REASON']._serialized_end=2767 + _globals['_CODE']._serialized_start=2551 + _globals['_CODE']._serialized_end=2692 + _globals['_REASON']._serialized_start=2694 + _globals['_REASON']._serialized_end=2785 _globals['_STATUS']._serialized_start=42 _globals['_STATUS']._serialized_end=99 _globals['_PARAMETERS']._serialized_start=101 @@ -82,7 +82,7 @@ _globals['_CLIENTMESSAGE_EVALUATERES_METRICSENTRY']._serialized_start=2125 _globals['_CLIENTMESSAGE_EVALUATERES_METRICSENTRY']._serialized_end=2191 _globals['_SCALAR']._serialized_start=2425 - _globals['_SCALAR']._serialized_end=2530 - _globals['_FLOWERSERVICE']._serialized_start=2769 - _globals['_FLOWERSERVICE']._serialized_end=2852 + _globals['_SCALAR']._serialized_end=2548 + _globals['_FLOWERSERVICE']._serialized_start=2787 + _globals['_FLOWERSERVICE']._serialized_end=2870 # @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/transport_pb2.pyi b/src/py/flwr/proto/transport_pb2.pyi index d10a1536ceab..0fe541f0a320 100644 --- a/src/py/flwr/proto/transport_pb2.pyi +++ b/src/py/flwr/proto/transport_pb2.pyi @@ -402,20 +402,22 @@ global___ClientMessage = ClientMessage class Scalar(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor DOUBLE_FIELD_NUMBER: builtins.int + UINT64_FIELD_NUMBER: builtins.int SINT64_FIELD_NUMBER: builtins.int BOOL_FIELD_NUMBER: builtins.int STRING_FIELD_NUMBER: builtins.int BYTES_FIELD_NUMBER: builtins.int double: builtins.float - sint64: builtins.int + uint64: builtins.int """float float = 2; int32 int32 = 3; int64 int64 = 4; uint32 uint32 = 5; - uint64 uint64 = 6; - sint32 sint32 = 7; """ + sint64: builtins.int + """sint32 sint32 = 7;""" + bool: builtins.bool """fixed32 fixed32 = 9; fixed64 fixed64 = 10; @@ -428,12 +430,13 @@ class Scalar(google.protobuf.message.Message): def __init__(self, *, double: builtins.float = ..., + uint64: builtins.int = ..., sint64: builtins.int = ..., bool: builtins.bool = ..., string: typing.Text = ..., bytes: builtins.bytes = ..., ) -> None: ... - def HasField(self, field_name: typing_extensions.Literal["bool",b"bool","bytes",b"bytes","double",b"double","scalar",b"scalar","sint64",b"sint64","string",b"string"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["bool",b"bool","bytes",b"bytes","double",b"double","scalar",b"scalar","sint64",b"sint64","string",b"string"]) -> None: ... - def WhichOneof(self, oneof_group: typing_extensions.Literal["scalar",b"scalar"]) -> typing.Optional[typing_extensions.Literal["double","sint64","bool","string","bytes"]]: ... + def HasField(self, field_name: typing_extensions.Literal["bool",b"bool","bytes",b"bytes","double",b"double","scalar",b"scalar","sint64",b"sint64","string",b"string","uint64",b"uint64"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["bool",b"bool","bytes",b"bytes","double",b"double","scalar",b"scalar","sint64",b"sint64","string",b"string","uint64",b"uint64"]) -> None: ... + def WhichOneof(self, oneof_group: typing_extensions.Literal["scalar",b"scalar"]) -> typing.Optional[typing_extensions.Literal["double","uint64","sint64","bool","string","bytes"]]: ... global___Scalar = Scalar diff --git a/src/py/flwr/server/__init__.py b/src/py/flwr/server/__init__.py index 875f66c43d03..1dde95b6b047 100644 --- a/src/py/flwr/server/__init__.py +++ b/src/py/flwr/server/__init__.py @@ -17,31 +17,25 @@ from . import strategy from . import workflow as workflow -from .app import run_driver_api as run_driver_api -from .app import run_fleet_api as run_fleet_api -from .app import run_superlink as run_superlink from .app import start_server as start_server from .client_manager import ClientManager as ClientManager from .client_manager import SimpleClientManager as SimpleClientManager from .compat import LegacyContext as LegacyContext from .driver import Driver as Driver from .history import History as History -from .run_serverapp import run_server_app as run_server_app from .server import Server as Server from .server_app import ServerApp as ServerApp from .server_config import ServerConfig as ServerConfig +from .serverapp_components import ServerAppComponents as ServerAppComponents __all__ = [ "ClientManager", "Driver", "History", "LegacyContext", - "run_driver_api", - "run_fleet_api", - "run_server_app", - "run_superlink", "Server", "ServerApp", + "ServerAppComponents", "ServerConfig", "SimpleClientManager", "start_server", diff --git a/src/py/flwr/server/app.py b/src/py/flwr/server/app.py index 147ec5fb0f65..58918dbb79ab 100644 --- a/src/py/flwr/server/app.py +++ b/src/py/flwr/server/app.py @@ -19,10 +19,11 @@ import importlib.util import sys import threading +from collections.abc import Sequence from logging import INFO, WARN from os.path import isfile from pathlib import Path -from typing import Optional, Sequence, Set, Tuple +from typing import Optional import grpc from cryptography.exceptions import UnsupportedAlgorithm @@ -34,13 +35,19 @@ from flwr.common import GRPC_MAX_MESSAGE_LENGTH, EventType, event from flwr.common.address import parse_address +from flwr.common.config import get_flwr_dir from flwr.common.constant import ( + DRIVER_API_DEFAULT_ADDRESS, + FLEET_API_GRPC_BIDI_DEFAULT_ADDRESS, + FLEET_API_GRPC_RERE_DEFAULT_ADDRESS, + FLEET_API_REST_DEFAULT_ADDRESS, MISSING_EXTRA_REST, + TRANSPORT_TYPE_GRPC_ADAPTER, TRANSPORT_TYPE_GRPC_RERE, TRANSPORT_TYPE_REST, ) from flwr.common.exit_handlers import register_exit_handlers -from flwr.common.logger import log, warn_deprecated_feature +from flwr.common.logger import log from flwr.common.secure_aggregation.crypto.symmetric_encryption import ( private_key_to_bytes, public_key_to_bytes, @@ -48,6 +55,7 @@ from flwr.proto.fleet_pb2_grpc import ( # pylint: disable=E0611 add_FleetServicer_to_server, ) +from flwr.proto.grpcadapter_pb2_grpc import add_GrpcAdapterServicer_to_server from .client_manager import ClientManager from .history import History @@ -55,6 +63,8 @@ from .server_config import ServerConfig from .strategy import Strategy from .superlink.driver.driver_grpc import run_driver_api_grpc +from .superlink.ffs.ffs_factory import FfsFactory +from .superlink.fleet.grpc_adapter.grpc_adapter_servicer import GrpcAdapterServicer from .superlink.fleet.grpc_bidi.grpc_server import ( generic_create_grpc_server, start_grpc_server, @@ -63,23 +73,19 @@ from .superlink.fleet.grpc_rere.server_interceptor import AuthenticateServerInterceptor from .superlink.state import StateFactory -ADDRESS_DRIVER_API = "0.0.0.0:9091" -ADDRESS_FLEET_API_GRPC_RERE = "0.0.0.0:9092" -ADDRESS_FLEET_API_GRPC_BIDI = "[::]:8080" # IPv6 to keep start_server compatible -ADDRESS_FLEET_API_REST = "0.0.0.0:9093" - DATABASE = ":flwr-in-memory-state:" +BASE_DIR = get_flwr_dir() / "superlink" / "ffs" def start_server( # pylint: disable=too-many-arguments,too-many-locals *, - server_address: str = ADDRESS_FLEET_API_GRPC_BIDI, + server_address: str = FLEET_API_GRPC_BIDI_DEFAULT_ADDRESS, server: Optional[Server] = None, config: Optional[ServerConfig] = None, strategy: Optional[Strategy] = None, client_manager: Optional[ClientManager] = None, grpc_max_message_length: int = GRPC_MAX_MESSAGE_LENGTH, - certificates: Optional[Tuple[bytes, bytes, bytes]] = None, + certificates: Optional[tuple[bytes, bytes, bytes]] = None, ) -> History: """Start a Flower server using the gRPC transport layer. @@ -190,135 +196,17 @@ def start_server( # pylint: disable=too-many-arguments,too-many-locals return hist -def run_driver_api() -> None: - """Run Flower server (Driver API).""" - log(INFO, "Starting Flower server (Driver API)") - # Running `flower-driver-api` is deprecated - warn_deprecated_feature("flower-driver-api") - log(WARN, "Use `flower-superlink` instead") - event(EventType.RUN_DRIVER_API_ENTER) - args = _parse_args_run_driver_api().parse_args() - - # Parse IP address - parsed_address = parse_address(args.driver_api_address) - if not parsed_address: - sys.exit(f"Driver IP address ({args.driver_api_address}) cannot be parsed.") - host, port, is_v6 = parsed_address - address = f"[{host}]:{port}" if is_v6 else f"{host}:{port}" - - # Obtain certificates - certificates = _try_obtain_certificates(args) - - # Initialize StateFactory - state_factory = StateFactory(args.database) - - # Start server - grpc_server: grpc.Server = run_driver_api_grpc( - address=address, - state_factory=state_factory, - certificates=certificates, - ) - - # Graceful shutdown - register_exit_handlers( - event_type=EventType.RUN_DRIVER_API_LEAVE, - grpc_servers=[grpc_server], - bckg_threads=[], - ) - - # Block - grpc_server.wait_for_termination() - - -def run_fleet_api() -> None: - """Run Flower server (Fleet API).""" - log(INFO, "Starting Flower server (Fleet API)") - # Running `flower-fleet-api` is deprecated - warn_deprecated_feature("flower-fleet-api") - log(WARN, "Use `flower-superlink` instead") - event(EventType.RUN_FLEET_API_ENTER) - args = _parse_args_run_fleet_api().parse_args() - - # Obtain certificates - certificates = _try_obtain_certificates(args) - - # Initialize StateFactory - state_factory = StateFactory(args.database) - - grpc_servers = [] - bckg_threads = [] - - # Start Fleet API - if args.fleet_api_type == TRANSPORT_TYPE_REST: - if ( - importlib.util.find_spec("requests") - and importlib.util.find_spec("starlette") - and importlib.util.find_spec("uvicorn") - ) is None: - sys.exit(MISSING_EXTRA_REST) - address_arg = args.rest_fleet_api_address - parsed_address = parse_address(address_arg) - if not parsed_address: - sys.exit(f"Fleet IP address ({address_arg}) cannot be parsed.") - host, port, _ = parsed_address - fleet_thread = threading.Thread( - target=_run_fleet_api_rest, - args=( - host, - port, - args.ssl_keyfile, - args.ssl_certfile, - state_factory, - args.rest_fleet_api_workers, - ), - ) - fleet_thread.start() - bckg_threads.append(fleet_thread) - elif args.fleet_api_type == TRANSPORT_TYPE_GRPC_RERE: - address_arg = args.grpc_rere_fleet_api_address - parsed_address = parse_address(address_arg) - if not parsed_address: - sys.exit(f"Fleet IP address ({address_arg}) cannot be parsed.") - host, port, is_v6 = parsed_address - address = f"[{host}]:{port}" if is_v6 else f"{host}:{port}" - fleet_server = _run_fleet_api_grpc_rere( - address=address, - state_factory=state_factory, - certificates=certificates, - ) - grpc_servers.append(fleet_server) - else: - raise ValueError(f"Unknown fleet_api_type: {args.fleet_api_type}") - - # Graceful shutdown - register_exit_handlers( - event_type=EventType.RUN_FLEET_API_LEAVE, - grpc_servers=grpc_servers, - bckg_threads=bckg_threads, - ) - - # Block - if len(grpc_servers) > 0: - grpc_servers[0].wait_for_termination() - elif len(bckg_threads) > 0: - bckg_threads[0].join() - - # pylint: disable=too-many-branches, too-many-locals, too-many-statements def run_superlink() -> None: """Run Flower SuperLink (Driver API and Fleet API).""" + args = _parse_args_run_superlink().parse_args() + log(INFO, "Starting Flower SuperLink") event(EventType.RUN_SUPERLINK_ENTER) - args = _parse_args_run_superlink().parse_args() - # Parse IP address - parsed_address = parse_address(args.driver_api_address) - if not parsed_address: - sys.exit(f"Driver IP address ({args.driver_api_address}) cannot be parsed.") - host, port, is_v6 = parsed_address - address = f"[{host}]:{port}" if is_v6 else f"{host}:{port}" + driver_address, _, _ = _format_address(args.driver_api_address) # Obtain certificates certificates = _try_obtain_certificates(args) @@ -326,15 +214,41 @@ def run_superlink() -> None: # Initialize StateFactory state_factory = StateFactory(args.database) + # Initialize FfsFactory + ffs_factory = FfsFactory(args.storage_dir) + # Start Driver API driver_server: grpc.Server = run_driver_api_grpc( - address=address, + address=driver_address, state_factory=state_factory, + ffs_factory=ffs_factory, certificates=certificates, ) grpc_servers = [driver_server] bckg_threads = [] + if not args.fleet_api_address: + if args.fleet_api_type in [ + TRANSPORT_TYPE_GRPC_RERE, + TRANSPORT_TYPE_GRPC_ADAPTER, + ]: + args.fleet_api_address = FLEET_API_GRPC_RERE_DEFAULT_ADDRESS + elif args.fleet_api_type == TRANSPORT_TYPE_REST: + args.fleet_api_address = FLEET_API_REST_DEFAULT_ADDRESS + + fleet_address, host, port = _format_address(args.fleet_api_address) + + num_workers = args.fleet_api_num_workers + if num_workers != 1: + log( + WARN, + "The Fleet API currently supports only 1 worker. " + "You have specified %d workers. " + "Support for multiple workers will be added in future releases. " + "Proceeding with a single worker.", + args.fleet_api_num_workers, + ) + num_workers = 1 # Start Fleet API if args.fleet_api_type == TRANSPORT_TYPE_REST: @@ -344,14 +258,11 @@ def run_superlink() -> None: and importlib.util.find_spec("uvicorn") ) is None: sys.exit(MISSING_EXTRA_REST) - address_arg = args.rest_fleet_api_address - parsed_address = parse_address(address_arg) + _, ssl_certfile, ssl_keyfile = ( certificates if certificates is not None else (None, None, None) ) - if not parsed_address: - sys.exit(f"Fleet IP address ({address_arg}) cannot be parsed.") - host, port, _ = parsed_address + fleet_thread = threading.Thread( target=_run_fleet_api_rest, args=( @@ -360,47 +271,50 @@ def run_superlink() -> None: ssl_keyfile, ssl_certfile, state_factory, - args.rest_fleet_api_workers, + ffs_factory, + num_workers, ), ) fleet_thread.start() bckg_threads.append(fleet_thread) elif args.fleet_api_type == TRANSPORT_TYPE_GRPC_RERE: - address_arg = args.grpc_rere_fleet_api_address - parsed_address = parse_address(address_arg) - if not parsed_address: - sys.exit(f"Fleet IP address ({address_arg}) cannot be parsed.") - host, port, is_v6 = parsed_address - address = f"[{host}]:{port}" if is_v6 else f"{host}:{port}" - - maybe_keys = _try_setup_client_authentication(args, certificates) + maybe_keys = _try_setup_node_authentication(args, certificates) interceptors: Optional[Sequence[grpc.ServerInterceptor]] = None if maybe_keys is not None: ( - client_public_keys, + node_public_keys, server_private_key, server_public_key, ) = maybe_keys state = state_factory.state() - state.store_client_public_keys(client_public_keys) + state.store_node_public_keys(node_public_keys) state.store_server_private_public_key( private_key_to_bytes(server_private_key), public_key_to_bytes(server_public_key), ) log( INFO, - "Client authentication enabled with %d known public keys", - len(client_public_keys), + "Node authentication enabled with %d known public keys", + len(node_public_keys), ) interceptors = [AuthenticateServerInterceptor(state)] fleet_server = _run_fleet_api_grpc_rere( - address=address, + address=fleet_address, state_factory=state_factory, + ffs_factory=ffs_factory, certificates=certificates, interceptors=interceptors, ) grpc_servers.append(fleet_server) + elif args.fleet_api_type == TRANSPORT_TYPE_GRPC_ADAPTER: + fleet_server = _run_fleet_api_grpc_adapter( + address=fleet_address, + state_factory=state_factory, + ffs_factory=ffs_factory, + certificates=certificates, + ) + grpc_servers.append(fleet_server) else: raise ValueError(f"Unknown fleet_api_type: {args.fleet_api_type}") @@ -420,10 +334,20 @@ def run_superlink() -> None: driver_server.wait_for_termination(timeout=1) -def _try_setup_client_authentication( +def _format_address(address: str) -> tuple[str, str, int]: + parsed_address = parse_address(address) + if not parsed_address: + sys.exit( + f"Address ({address}) cannot be parsed (expected: URL or IPv4 or IPv6)." + ) + host, port, is_v6 = parsed_address + return (f"[{host}]:{port}" if is_v6 else f"{host}:{port}", host, port) + + +def _try_setup_node_authentication( args: argparse.Namespace, - certificates: Optional[Tuple[bytes, bytes, bytes]], -) -> Optional[Tuple[Set[bytes], ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey]]: + certificates: Optional[tuple[bytes, bytes, bytes]], +) -> Optional[tuple[set[bytes], ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey]]: if ( not args.auth_list_public_keys and not args.auth_superlink_private_key @@ -449,16 +373,16 @@ def _try_setup_client_authentication( "`--ssl-keyfile`, and `—-ssl-ca-certfile` and try again." ) - client_keys_file_path = Path(args.auth_list_public_keys) - if not client_keys_file_path.exists(): + node_keys_file_path = Path(args.auth_list_public_keys) + if not node_keys_file_path.exists(): sys.exit( "The provided path to the known public keys CSV file does not exist: " - f"{client_keys_file_path}. " + f"{node_keys_file_path}. " "Please provide the CSV file path containing known public keys " "to '--auth-list-public-keys'." ) - client_public_keys: Set[bytes] = set() + node_public_keys: set[bytes] = set() try: ssh_private_key = load_ssh_private_key( @@ -489,13 +413,13 @@ def _try_setup_client_authentication( "path points to a valid public key file and try again." ) - with open(client_keys_file_path, newline="", encoding="utf-8") as csvfile: + with open(node_keys_file_path, newline="", encoding="utf-8") as csvfile: reader = csv.reader(csvfile) for row in reader: for element in row: public_key = load_ssh_public_key(element.encode()) if isinstance(public_key, ec.EllipticCurvePublicKey): - client_public_keys.add(public_key_to_bytes(public_key)) + node_public_keys.add(public_key_to_bytes(public_key)) else: sys.exit( "Error: Unable to parse the public keys in the CSV " @@ -503,7 +427,7 @@ def _try_setup_client_authentication( "known SSH public keys files and try again." ) return ( - client_public_keys, + node_public_keys, ssh_private_key, ssh_public_key, ) @@ -511,13 +435,13 @@ def _try_setup_client_authentication( def _try_obtain_certificates( args: argparse.Namespace, -) -> Optional[Tuple[bytes, bytes, bytes]]: +) -> Optional[tuple[bytes, bytes, bytes]]: # Obtain certificates if args.insecure: log(WARN, "Option `--insecure` was set. Starting insecure HTTP server.") return None # Check if certificates are provided - if args.fleet_api_type == TRANSPORT_TYPE_GRPC_RERE: + if args.fleet_api_type in [TRANSPORT_TYPE_GRPC_RERE, TRANSPORT_TYPE_GRPC_ADAPTER]: if args.ssl_certfile and args.ssl_keyfile and args.ssl_ca_certfile: if not isfile(args.ssl_ca_certfile): sys.exit("Path argument `--ssl-ca-certfile` does not point to a file.") @@ -566,13 +490,15 @@ def _try_obtain_certificates( def _run_fleet_api_grpc_rere( address: str, state_factory: StateFactory, - certificates: Optional[Tuple[bytes, bytes, bytes]], + ffs_factory: FfsFactory, + certificates: Optional[tuple[bytes, bytes, bytes]], interceptors: Optional[Sequence[grpc.ServerInterceptor]] = None, ) -> grpc.Server: """Run Fleet API (gRPC, request-response).""" # Create Fleet API gRPC server fleet_servicer = FleetServicer( state_factory=state_factory, + ffs_factory=ffs_factory, ) fleet_add_servicer_to_server_fn = add_FleetServicer_to_server fleet_grpc_server = generic_create_grpc_server( @@ -589,14 +515,42 @@ def _run_fleet_api_grpc_rere( return fleet_grpc_server +def _run_fleet_api_grpc_adapter( + address: str, + state_factory: StateFactory, + ffs_factory: FfsFactory, + certificates: Optional[tuple[bytes, bytes, bytes]], +) -> grpc.Server: + """Run Fleet API (GrpcAdapter).""" + # Create Fleet API gRPC server + fleet_servicer = GrpcAdapterServicer( + state_factory=state_factory, + ffs_factory=ffs_factory, + ) + fleet_add_servicer_to_server_fn = add_GrpcAdapterServicer_to_server + fleet_grpc_server = generic_create_grpc_server( + servicer_and_add_fn=(fleet_servicer, fleet_add_servicer_to_server_fn), + server_address=address, + max_message_length=GRPC_MAX_MESSAGE_LENGTH, + certificates=certificates, + ) + + log(INFO, "Flower ECE: Starting Fleet API (GrpcAdapter) on %s", address) + fleet_grpc_server.start() + + return fleet_grpc_server + + # pylint: disable=import-outside-toplevel,too-many-arguments +# pylint: disable=too-many-positional-arguments def _run_fleet_api_rest( host: str, port: int, ssl_keyfile: Optional[str], ssl_certfile: Optional[str], state_factory: StateFactory, - workers: int, + ffs_factory: FfsFactory, + num_workers: int, ) -> None: """Run Driver API (REST-based).""" try: @@ -605,16 +559,12 @@ def _run_fleet_api_rest( from flwr.server.superlink.fleet.rest_rere.rest_api import app as fast_api_app except ModuleNotFoundError: sys.exit(MISSING_EXTRA_REST) - if workers != 1: - raise ValueError( - f"The supported number of workers for the Fleet API (REST server) is " - f"1. Instead given {workers}. The functionality of >1 workers will be " - f"added in the future releases." - ) + log(INFO, "Starting Flower REST server") # See: https://www.starlette.io/applications/#accessing-the-app-instance fast_api_app.state.STATE_FACTORY = state_factory + fast_api_app.state.FFS_FACTORY = ffs_factory uvicorn.run( app="flwr.server.superlink.fleet.rest_rere.rest_api:app", @@ -624,44 +574,10 @@ def _run_fleet_api_rest( access_log=True, ssl_keyfile=ssl_keyfile, ssl_certfile=ssl_certfile, - workers=workers, + workers=num_workers, ) -def _parse_args_run_driver_api() -> argparse.ArgumentParser: - """Parse command line arguments for Driver API.""" - parser = argparse.ArgumentParser( - description="Start a Flower Driver API server. " - "This server will be responsible for " - "receiving TaskIns from the Driver script and " - "sending them to the Fleet API. Once the client nodes " - "are done, they will send the TaskRes back to this Driver API server (through" - " the Fleet API) which will then send them back to the Driver script.", - ) - - _add_args_common(parser=parser) - _add_args_driver_api(parser=parser) - - return parser - - -def _parse_args_run_fleet_api() -> argparse.ArgumentParser: - """Parse command line arguments for Fleet API.""" - parser = argparse.ArgumentParser( - description="Start a Flower Fleet API server." - "This server will be responsible for " - "sending TaskIns (received from the Driver API) to the client nodes " - "and of receiving TaskRes sent back from those same client nodes once " - "they are done. Then, this Fleet API server can send those " - "TaskRes back to the Driver API.", - ) - - _add_args_common(parser=parser) - _add_args_fleet_api(parser=parser) - - return parser - - def _parse_args_run_superlink() -> argparse.ArgumentParser: """Parse command line arguments for both Driver API and Fleet API.""" parser = argparse.ArgumentParser( @@ -711,6 +627,11 @@ def _add_args_common(parser: argparse.ArgumentParser) -> None: "Flower will just create a state in memory.", default=DATABASE, ) + parser.add_argument( + "--storage-dir", + help="The base directory to store the objects for the Flower File System.", + default=BASE_DIR, + ) parser.add_argument( "--auth-list-public-keys", type=str, @@ -732,50 +653,31 @@ def _add_args_common(parser: argparse.ArgumentParser) -> None: def _add_args_driver_api(parser: argparse.ArgumentParser) -> None: parser.add_argument( "--driver-api-address", - help="Driver API (gRPC) server address (IPv4, IPv6, or a domain name)", - default=ADDRESS_DRIVER_API, + help="Driver API (gRPC) server address (IPv4, IPv6, or a domain name).", + default=DRIVER_API_DEFAULT_ADDRESS, ) def _add_args_fleet_api(parser: argparse.ArgumentParser) -> None: # Fleet API transport layer type - ex_group = parser.add_mutually_exclusive_group() - ex_group.add_argument( - "--grpc-rere", - action="store_const", - dest="fleet_api_type", - const=TRANSPORT_TYPE_GRPC_RERE, + parser.add_argument( + "--fleet-api-type", default=TRANSPORT_TYPE_GRPC_RERE, - help="Start a Fleet API server (gRPC-rere)", - ) - ex_group.add_argument( - "--rest", - action="store_const", - dest="fleet_api_type", - const=TRANSPORT_TYPE_REST, - help="Start a Fleet API server (REST, experimental)", - ) - - # Fleet API gRPC-rere options - grpc_rere_group = parser.add_argument_group( - "Fleet API (gRPC-rere) server options", "" - ) - grpc_rere_group.add_argument( - "--grpc-rere-fleet-api-address", - help="Fleet API (gRPC-rere) server address (IPv4, IPv6, or a domain name)", - default=ADDRESS_FLEET_API_GRPC_RERE, + type=str, + choices=[ + TRANSPORT_TYPE_GRPC_RERE, + TRANSPORT_TYPE_GRPC_ADAPTER, + TRANSPORT_TYPE_REST, + ], + help="Start a gRPC-rere or REST (experimental) Fleet API server.", ) - - # Fleet API REST options - rest_group = parser.add_argument_group("Fleet API (REST) server options", "") - rest_group.add_argument( - "--rest-fleet-api-address", - help="Fleet API (REST) server address (IPv4, IPv6, or a domain name)", - default=ADDRESS_FLEET_API_REST, + parser.add_argument( + "--fleet-api-address", + help="Fleet API server address (IPv4, IPv6, or a domain name).", ) - rest_group.add_argument( - "--rest-fleet-api-workers", - help="Set the number of concurrent workers for the Fleet API REST server.", - type=int, + parser.add_argument( + "--fleet-api-num-workers", default=1, + type=int, + help="Set the number of concurrent workers for the Fleet API server.", ) diff --git a/src/py/flwr/server/client_manager.py b/src/py/flwr/server/client_manager.py index 7956e282bd2c..9949e29f8f7d 100644 --- a/src/py/flwr/server/client_manager.py +++ b/src/py/flwr/server/client_manager.py @@ -19,7 +19,7 @@ import threading from abc import ABC, abstractmethod from logging import INFO -from typing import Dict, List, Optional +from typing import Optional from flwr.common.logger import log @@ -47,6 +47,7 @@ def register(self, client: ClientProxy) -> bool: Parameters ---------- client : flwr.server.client_proxy.ClientProxy + The ClientProxy of the Client to register. Returns ------- @@ -64,10 +65,11 @@ def unregister(self, client: ClientProxy) -> None: Parameters ---------- client : flwr.server.client_proxy.ClientProxy + The ClientProxy of the Client to unregister. """ @abstractmethod - def all(self) -> Dict[str, ClientProxy]: + def all(self) -> dict[str, ClientProxy]: """Return all available clients.""" @abstractmethod @@ -80,7 +82,7 @@ def sample( num_clients: int, min_num_clients: Optional[int] = None, criterion: Optional[Criterion] = None, - ) -> List[ClientProxy]: + ) -> list[ClientProxy]: """Sample a number of Flower ClientProxy instances.""" @@ -88,7 +90,7 @@ class SimpleClientManager(ClientManager): """Provides a pool of available clients.""" def __init__(self) -> None: - self.clients: Dict[str, ClientProxy] = {} + self.clients: dict[str, ClientProxy] = {} self._cv = threading.Condition() def __len__(self) -> int: @@ -170,7 +172,7 @@ def unregister(self, client: ClientProxy) -> None: with self._cv: self._cv.notify_all() - def all(self) -> Dict[str, ClientProxy]: + def all(self) -> dict[str, ClientProxy]: """Return all available clients.""" return self.clients @@ -179,7 +181,7 @@ def sample( num_clients: int, min_num_clients: Optional[int] = None, criterion: Optional[Criterion] = None, - ) -> List[ClientProxy]: + ) -> list[ClientProxy]: """Sample a number of Flower ClientProxy instances.""" # Block until at least num_clients are connected. if min_num_clients is None: diff --git a/src/py/flwr/server/client_proxy_test.py b/src/py/flwr/server/client_proxy_test.py index 685698558e3a..6ca37052a87d 100644 --- a/src/py/flwr/server/client_proxy_test.py +++ b/src/py/flwr/server/client_proxy_test.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2022 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/src/py/flwr/server/compat/app.py b/src/py/flwr/server/compat/app.py index 4bb23b846ab7..1d3e5024ba90 100644 --- a/src/py/flwr/server/compat/app.py +++ b/src/py/flwr/server/compat/app.py @@ -1,4 +1,4 @@ -# Copyright 2022 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,7 +18,6 @@ from logging import INFO from typing import Optional -from flwr.common import EventType, event from flwr.common.logger import log from flwr.server.client_manager import ClientManager from flwr.server.history import History @@ -65,8 +64,6 @@ def start_driver( # pylint: disable=too-many-arguments, too-many-locals hist : flwr.server.history.History Object containing training and evaluation metrics. """ - event(EventType.START_DRIVER_ENTER) - # Initialize the Driver API server and config initialized_server, initialized_config = init_defaults( server=server, @@ -96,6 +93,4 @@ def start_driver( # pylint: disable=too-many-arguments, too-many-locals f_stop.set() thread.join() - event(EventType.START_SERVER_LEAVE) - return hist diff --git a/src/py/flwr/server/compat/app_utils.py b/src/py/flwr/server/compat/app_utils.py index 1cdf1efbffb9..8d2479f47d40 100644 --- a/src/py/flwr/server/compat/app_utils.py +++ b/src/py/flwr/server/compat/app_utils.py @@ -16,7 +16,6 @@ import threading -from typing import Dict, Tuple from ..client_manager import ClientManager from ..compat.driver_client_proxy import DriverClientProxy @@ -26,7 +25,7 @@ def start_update_client_manager_thread( driver: Driver, client_manager: ClientManager, -) -> Tuple[threading.Thread, threading.Event]: +) -> tuple[threading.Thread, threading.Event]: """Periodically update the nodes list in the client manager in a thread. This function starts a thread that periodically uses the associated driver to @@ -73,7 +72,7 @@ def _update_client_manager( ) -> None: """Update the nodes list in the client manager.""" # Loop until the driver is disconnected - registered_nodes: Dict[int, DriverClientProxy] = {} + registered_nodes: dict[int, DriverClientProxy] = {} while not f_stop.is_set(): all_node_ids = set(driver.get_node_ids()) dead_nodes = set(registered_nodes).difference(all_node_ids) @@ -91,7 +90,7 @@ def _update_client_manager( node_id=node_id, driver=driver, anonymous=False, - run_id=driver.run_id, # type: ignore + run_id=driver.run.run_id, ) if client_manager.register(client_proxy): registered_nodes[node_id] = client_proxy diff --git a/src/py/flwr/server/compat/driver_client_proxy.py b/src/py/flwr/server/compat/driver_client_proxy.py index 150803786f98..c5a3f561d474 100644 --- a/src/py/flwr/server/compat/driver_client_proxy.py +++ b/src/py/flwr/server/compat/driver_client_proxy.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,7 +15,6 @@ """Flower ClientProxy implementation for Driver API.""" -import time from typing import Optional from flwr import common @@ -25,8 +24,6 @@ from ..driver.driver import Driver -SLEEP_TIME = 1 - class DriverClientProxy(ClientProxy): """Flower client proxy which delegates work using the Driver API.""" @@ -122,29 +119,18 @@ def _send_receive_recordset( ttl=timeout, ) - # Push message - message_ids = list(self.driver.push_messages(messages=[message])) - if len(message_ids) != 1: - raise ValueError("Unexpected number of message_ids") - - message_id = message_ids[0] - if message_id == "": - raise ValueError(f"Failed to send message to node {self.node_id}") - - if timeout: - start_time = time.time() - - while True: - messages = list(self.driver.pull_messages(message_ids)) - if len(messages) == 1: - msg: Message = messages[0] - if msg.has_error(): - raise ValueError( - f"Message contains an Error (reason: {msg.error.reason}). " - "It originated during client-side execution of a message." - ) - return msg.content - - if timeout is not None and time.time() > start_time + timeout: - raise RuntimeError("Timeout reached") - time.sleep(SLEEP_TIME) + # Send message and wait for reply + messages = list(self.driver.send_and_receive(messages=[message])) + + # A single reply is expected + if len(messages) != 1: + raise ValueError(f"Expected one Message but got: {len(messages)}") + + # Only messages without errors can be handled beyond these point + msg: Message = messages[0] + if msg.has_error(): + raise ValueError( + f"Message contains an Error (reason: {msg.error.reason}). " + "It originated during client-side execution of a message." + ) + return msg.content diff --git a/src/py/flwr/server/compat/driver_client_proxy_test.py b/src/py/flwr/server/compat/driver_client_proxy_test.py index d9e3d3bc0824..5bad0b56c4c6 100644 --- a/src/py/flwr/server/compat/driver_client_proxy_test.py +++ b/src/py/flwr/server/compat/driver_client_proxy_test.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,7 +17,8 @@ import unittest import unittest.mock -from typing import Any, Callable, Iterable, Optional, Union, cast +from collections.abc import Iterable +from typing import Any, Callable, Optional, Union, cast from unittest.mock import Mock import numpy as np @@ -51,8 +52,6 @@ RUN_ID = 61016 NODE_ID = 1 -INSTRUCTION_MESSAGE_ID = "mock instruction message id" -REPLY_MESSAGE_ID = "mock reply message id" class DriverClientProxyTestCase(unittest.TestCase): @@ -76,7 +75,7 @@ def test_get_properties(self) -> None: """Test positive case.""" # Prepare res = GetPropertiesRes(status=CLIENT_STATUS, properties=CLIENT_PROPERTIES) - self.driver.push_messages.side_effect = self._get_push_messages(res) + self.driver.send_and_receive.side_effect = self._exec_send_and_receive(res) request_properties: Config = {"tensor_type": "str"} ins = GetPropertiesIns(config=request_properties) @@ -94,7 +93,7 @@ def test_get_parameters(self) -> None: status=CLIENT_STATUS, parameters=MESSAGE_PARAMETERS, ) - self.driver.push_messages.side_effect = self._get_push_messages(res) + self.driver.send_and_receive.side_effect = self._exec_send_and_receive(res) ins = GetParametersIns(config={}) # Execute @@ -113,7 +112,7 @@ def test_fit(self) -> None: num_examples=10, metrics={}, ) - self.driver.push_messages.side_effect = self._get_push_messages(res) + self.driver.send_and_receive.side_effect = self._exec_send_and_receive(res) parameters = flwr.common.ndarrays_to_parameters([np.ones((2, 2))]) ins = FitIns(parameters, {}) @@ -133,7 +132,7 @@ def test_evaluate(self) -> None: num_examples=0, metrics={}, ) - self.driver.push_messages.side_effect = self._get_push_messages(res) + self.driver.send_and_receive.side_effect = self._exec_send_and_receive(res) parameters = Parameters(tensors=[b"random params%^&*F"], tensor_type="np") ins = EvaluateIns(parameters, {}) @@ -147,7 +146,7 @@ def test_evaluate(self) -> None: def test_get_properties_and_fail(self) -> None: """Test negative case.""" # Prepare - self.driver.push_messages.side_effect = self._get_push_messages( + self.driver.send_and_receive.side_effect = self._exec_send_and_receive( None, error_reply=True ) request_properties: Config = {"tensor_type": "str"} @@ -162,7 +161,7 @@ def test_get_properties_and_fail(self) -> None: def test_get_parameters_and_fail(self) -> None: """Test negative case.""" # Prepare - self.driver.push_messages.side_effect = self._get_push_messages( + self.driver.send_and_receive.side_effect = self._exec_send_and_receive( None, error_reply=True ) ins = GetParametersIns(config={}) @@ -176,7 +175,7 @@ def test_get_parameters_and_fail(self) -> None: def test_fit_and_fail(self) -> None: """Test negative case.""" # Prepare - self.driver.push_messages.side_effect = self._get_push_messages( + self.driver.send_and_receive.side_effect = self._exec_send_and_receive( None, error_reply=True ) parameters = flwr.common.ndarrays_to_parameters([np.ones((2, 2))]) @@ -189,7 +188,7 @@ def test_fit_and_fail(self) -> None: def test_evaluate_and_fail(self) -> None: """Test negative case.""" # Prepare - self.driver.push_messages.side_effect = self._get_push_messages( + self.driver.send_and_receive.side_effect = self._exec_send_and_receive( None, error_reply=True ) parameters = Parameters(tensors=[b"random params%^&*F"], tensor_type="np") @@ -201,7 +200,7 @@ def test_evaluate_and_fail(self) -> None: ) self._common_assertions(ins) - def _create_message_dummy( # pylint: disable=R0913 + def _create_message_dummy( # pylint: disable=R0913,too-many-positional-arguments self, content: RecordSet, message_type: str, @@ -228,19 +227,19 @@ def _create_message_dummy( # pylint: disable=R0913 self.created_msg = Message(metadata=metadata, content=content) return self.created_msg - def _get_push_messages( + def _exec_send_and_receive( self, res: Union[GetParametersRes, GetPropertiesRes, FitRes, EvaluateRes, None], error_reply: bool = False, - ) -> Callable[[Iterable[Message]], Iterable[str]]: - """Get the push_messages function that sets the return value of pull_messages - when called.""" + ) -> Callable[[Iterable[Message]], Iterable[Message]]: + """Get the generate_replies function that sets the return value of driver's + send_and_receive when called.""" - def push_messages(messages: Iterable[Message]) -> Iterable[str]: + def generate_replies(messages: Iterable[Message]) -> Iterable[Message]: msg = list(messages)[0] + recordset = None if error_reply: - recordset = None - ret = msg.create_error_reply(ERROR_REPLY) + pass elif isinstance(res, GetParametersRes): recordset = compat.getparametersres_to_recordset(res, True) elif isinstance(res, GetPropertiesRes): @@ -249,17 +248,16 @@ def push_messages(messages: Iterable[Message]) -> Iterable[str]: recordset = compat.fitres_to_recordset(res, True) elif isinstance(res, EvaluateRes): recordset = compat.evaluateres_to_recordset(res) - else: - raise ValueError(f"Unsupported type: {type(res)}") + if recordset is not None: ret = msg.create_reply(recordset) - ret.metadata.__dict__["_message_id"] = REPLY_MESSAGE_ID + else: + ret = msg.create_error_reply(ERROR_REPLY) - # Set the return value of `pull_messages` - self.driver.pull_messages.return_value = [ret] - return [INSTRUCTION_MESSAGE_ID] + # Reply messages given the push message + return [ret] - return push_messages + return generate_replies def _common_assertions(self, original_ins: Any) -> None: """Check common assertions.""" @@ -274,18 +272,9 @@ def _common_assertions(self, original_ins: Any) -> None: self.assertEqual(self.called_times, 1) self.assertEqual(actual_ins, original_ins) - # Check if push_messages is called once with expected args/kwargs. - self.driver.push_messages.assert_called_once() - try: - self.driver.push_messages.assert_any_call([self.created_msg]) - except AssertionError: - self.driver.push_messages.assert_any_call(messages=[self.created_msg]) - - # Check if pull_messages is called once with expected args/kwargs. - self.driver.pull_messages.assert_called_once() + # Check if send_and_receive is called once with expected args/kwargs. + self.driver.send_and_receive.assert_called_once() try: - self.driver.pull_messages.assert_called_with([INSTRUCTION_MESSAGE_ID]) + self.driver.send_and_receive.assert_any_call([self.created_msg]) except AssertionError: - self.driver.pull_messages.assert_called_with( - message_ids=[INSTRUCTION_MESSAGE_ID] - ) + self.driver.send_and_receive.assert_any_call(messages=[self.created_msg]) diff --git a/src/py/flwr/server/compat/legacy_context.py b/src/py/flwr/server/compat/legacy_context.py index 0b00c98bb16d..589342556b17 100644 --- a/src/py/flwr/server/compat/legacy_context.py +++ b/src/py/flwr/server/compat/legacy_context.py @@ -18,7 +18,7 @@ from dataclasses import dataclass from typing import Optional -from flwr.common import Context, RecordSet +from flwr.common import Context from ..client_manager import ClientManager, SimpleClientManager from ..history import History @@ -35,9 +35,9 @@ class LegacyContext(Context): client_manager: ClientManager history: History - def __init__( + def __init__( # pylint: disable=too-many-arguments self, - state: RecordSet, + context: Context, config: Optional[ServerConfig] = None, strategy: Optional[Strategy] = None, client_manager: Optional[ClientManager] = None, @@ -52,4 +52,5 @@ def __init__( self.strategy = strategy self.client_manager = client_manager self.history = History() - super().__init__(state) + + super().__init__(**vars(context)) diff --git a/src/py/flwr/server/driver/driver.py b/src/py/flwr/server/driver/driver.py index b95cec95ab47..5a6ee691f3a9 100644 --- a/src/py/flwr/server/driver/driver.py +++ b/src/py/flwr/server/driver/driver.py @@ -16,16 +16,23 @@ from abc import ABC, abstractmethod -from typing import Iterable, List, Optional +from collections.abc import Iterable +from typing import Optional from flwr.common import Message, RecordSet +from flwr.common.typing import Run class Driver(ABC): """Abstract base Driver class for the Driver API.""" + @property @abstractmethod - def create_message( # pylint: disable=too-many-arguments + def run(self) -> Run: + """Run information.""" + + @abstractmethod + def create_message( # pylint: disable=too-many-arguments,R0917 self, content: RecordSet, message_type: str, @@ -64,7 +71,7 @@ def create_message( # pylint: disable=too-many-arguments """ @abstractmethod - def get_node_ids(self) -> List[int]: + def get_node_ids(self) -> list[int]: """Get node IDs.""" @abstractmethod diff --git a/src/py/flwr/server/driver/grpc_driver.py b/src/py/flwr/server/driver/grpc_driver.py index d339f1b232f9..13c1c4152dad 100644 --- a/src/py/flwr/server/driver/grpc_driver.py +++ b/src/py/flwr/server/driver/grpc_driver.py @@ -1,4 +1,4 @@ -# Copyright 2022 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,18 +16,23 @@ import time import warnings -from logging import DEBUG, ERROR, WARNING -from typing import Iterable, List, Optional, Tuple +from collections.abc import Iterable +from logging import DEBUG, WARNING +from typing import Optional, cast import grpc -from flwr.common import DEFAULT_TTL, EventType, Message, Metadata, RecordSet, event +from flwr.common import DEFAULT_TTL, Message, Metadata, RecordSet +from flwr.common.constant import DRIVER_API_DEFAULT_ADDRESS from flwr.common.grpc import create_channel from flwr.common.logger import log -from flwr.common.serde import message_from_taskres, message_to_taskins +from flwr.common.serde import ( + message_from_taskres, + message_to_taskins, + user_config_from_proto, +) +from flwr.common.typing import Run from flwr.proto.driver_pb2 import ( # pylint: disable=E0611 - CreateRunRequest, - CreateRunResponse, GetNodesRequest, GetNodesResponse, PullTaskResRequest, @@ -37,159 +42,115 @@ ) from flwr.proto.driver_pb2_grpc import DriverStub # pylint: disable=E0611 from flwr.proto.node_pb2 import Node # pylint: disable=E0611 +from flwr.proto.run_pb2 import GetRunRequest, GetRunResponse # pylint: disable=E0611 from flwr.proto.task_pb2 import TaskIns # pylint: disable=E0611 from .driver import Driver -DEFAULT_SERVER_ADDRESS_DRIVER = "[::]:9091" - ERROR_MESSAGE_DRIVER_NOT_CONNECTED = """ [Driver] Error: Not connected. -Call `connect()` on the `GrpcDriverHelper` instance before calling any of the other -`GrpcDriverHelper` methods. +Call `connect()` on the `GrpcDriverStub` instance before calling any of the other +`GrpcDriverStub` methods. """ -class GrpcDriverHelper: - """`GrpcDriverHelper` provides access to the gRPC Driver API/service.""" +class GrpcDriver(Driver): + """`GrpcDriver` provides an interface to the Driver API. - def __init__( + Parameters + ---------- + run_id : int + The identifier of the run. + driver_service_address : str (default: "[::]:9091") + The address (URL, IPv6, IPv4) of the SuperLink Driver API service. + root_certificates : Optional[bytes] (default: None) + The PEM-encoded root certificates as a byte string. + If provided, a secure connection using the certificates will be + established to an SSL-enabled Flower server. + """ + + def __init__( # pylint: disable=too-many-arguments self, - driver_service_address: str = DEFAULT_SERVER_ADDRESS_DRIVER, + run_id: int, + driver_service_address: str = DRIVER_API_DEFAULT_ADDRESS, root_certificates: Optional[bytes] = None, ) -> None: - self.driver_service_address = driver_service_address - self.root_certificates = root_certificates - self.channel: Optional[grpc.Channel] = None - self.stub: Optional[DriverStub] = None + self._run_id = run_id + self._addr = driver_service_address + self._cert = root_certificates + self._run: Optional[Run] = None + self._grpc_stub: Optional[DriverStub] = None + self._channel: Optional[grpc.Channel] = None + self.node = Node(node_id=0, anonymous=True) + + @property + def _is_connected(self) -> bool: + """Check if connected to the Driver API server.""" + return self._channel is not None - def connect(self) -> None: - """Connect to the Driver API.""" - event(EventType.DRIVER_CONNECT) - if self.channel is not None or self.stub is not None: + def _connect(self) -> None: + """Connect to the Driver API. + + This will not call GetRun. + """ + if self._is_connected: log(WARNING, "Already connected") return - self.channel = create_channel( - server_address=self.driver_service_address, - insecure=(self.root_certificates is None), - root_certificates=self.root_certificates, + self._channel = create_channel( + server_address=self._addr, + insecure=(self._cert is None), + root_certificates=self._cert, ) - self.stub = DriverStub(self.channel) - log(DEBUG, "[Driver] Connected to %s", self.driver_service_address) + self._grpc_stub = DriverStub(self._channel) + log(DEBUG, "[Driver] Connected to %s", self._addr) - def disconnect(self) -> None: + def _disconnect(self) -> None: """Disconnect from the Driver API.""" - event(EventType.DRIVER_DISCONNECT) - if self.channel is None or self.stub is None: + if not self._is_connected: log(DEBUG, "Already disconnected") return - channel = self.channel - self.channel = None - self.stub = None + channel: grpc.Channel = self._channel + self._channel = None + self._grpc_stub = None channel.close() log(DEBUG, "[Driver] Disconnected") - def create_run(self, req: CreateRunRequest) -> CreateRunResponse: - """Request for run ID.""" - # Check if channel is open - if self.stub is None: - log(ERROR, ERROR_MESSAGE_DRIVER_NOT_CONNECTED) - raise ConnectionError("`GrpcDriverHelper` instance not connected") - - # Call Driver API - res: CreateRunResponse = self.stub.CreateRun(request=req) - return res - - def get_nodes(self, req: GetNodesRequest) -> GetNodesResponse: - """Get client IDs.""" - # Check if channel is open - if self.stub is None: - log(ERROR, ERROR_MESSAGE_DRIVER_NOT_CONNECTED) - raise ConnectionError("`GrpcDriverHelper` instance not connected") - - # Call gRPC Driver API - res: GetNodesResponse = self.stub.GetNodes(request=req) - return res - - def push_task_ins(self, req: PushTaskInsRequest) -> PushTaskInsResponse: - """Schedule tasks.""" - # Check if channel is open - if self.stub is None: - log(ERROR, ERROR_MESSAGE_DRIVER_NOT_CONNECTED) - raise ConnectionError("`GrpcDriverHelper` instance not connected") - - # Call gRPC Driver API - res: PushTaskInsResponse = self.stub.PushTaskIns(request=req) - return res - - def pull_task_res(self, req: PullTaskResRequest) -> PullTaskResResponse: - """Get task results.""" - # Check if channel is open - if self.stub is None: - log(ERROR, ERROR_MESSAGE_DRIVER_NOT_CONNECTED) - raise ConnectionError("`GrpcDriverHelper` instance not connected") - - # Call Driver API - res: PullTaskResResponse = self.stub.PullTaskRes(request=req) - return res - - -class GrpcDriver(Driver): - """`Driver` class provides an interface to the Driver API. - - Parameters - ---------- - driver_service_address : Optional[str] - The IPv4 or IPv6 address of the Driver API server. - Defaults to `"[::]:9091"`. - certificates : bytes (default: None) - Tuple containing root certificate, server certificate, and private key - to start a secure SSL-enabled server. The tuple is expected to have - three bytes elements in the following order: - - * CA certificate. - * server certificate. - * server private key. - fab_id : str (default: None) - The identifier of the FAB used in the run. - fab_version : str (default: None) - The version of the FAB used in the run. - """ + def _init_run(self) -> None: + # Check if is initialized + if self._run is not None: + return + # Get the run info + req = GetRunRequest(run_id=self._run_id) + res: GetRunResponse = self._stub.GetRun(req) + if not res.HasField("run"): + raise RuntimeError(f"Cannot find the run with ID: {self._run_id}") + self._run = Run( + run_id=res.run.run_id, + fab_id=res.run.fab_id, + fab_version=res.run.fab_version, + fab_hash=res.run.fab_hash, + override_config=user_config_from_proto(res.run.override_config), + ) - def __init__( - self, - driver_service_address: str = DEFAULT_SERVER_ADDRESS_DRIVER, - root_certificates: Optional[bytes] = None, - fab_id: Optional[str] = None, - fab_version: Optional[str] = None, - ) -> None: - self.addr = driver_service_address - self.root_certificates = root_certificates - self.driver_helper: Optional[GrpcDriverHelper] = None - self.run_id: Optional[int] = None - self.fab_id = fab_id if fab_id is not None else "" - self.fab_version = fab_version if fab_version is not None else "" - self.node = Node(node_id=0, anonymous=True) + @property + def run(self) -> Run: + """Run information.""" + self._init_run() + return Run(**vars(self._run)) - def _get_grpc_driver_helper_and_run_id(self) -> Tuple[GrpcDriverHelper, int]: - # Check if the GrpcDriverHelper is initialized - if self.driver_helper is None or self.run_id is None: - # Connect and create run - self.driver_helper = GrpcDriverHelper( - driver_service_address=self.addr, - root_certificates=self.root_certificates, - ) - self.driver_helper.connect() - req = CreateRunRequest(fab_id=self.fab_id, fab_version=self.fab_version) - res = self.driver_helper.create_run(req) - self.run_id = res.run_id - return self.driver_helper, self.run_id + @property + def _stub(self) -> DriverStub: + """Driver stub.""" + if not self._is_connected: + self._connect() + return cast(DriverStub, self._grpc_stub) def _check_message(self, message: Message) -> None: # Check if the message is valid if not ( - message.metadata.run_id == self.run_id + # Assume self._run being initialized + message.metadata.run_id == self._run_id and message.metadata.src_node_id == self.node.node_id and message.metadata.message_id == "" and message.metadata.reply_to_message == "" @@ -197,7 +158,7 @@ def _check_message(self, message: Message) -> None: ): raise ValueError(f"Invalid message: {message}") - def create_message( # pylint: disable=too-many-arguments + def create_message( # pylint: disable=too-many-arguments,R0917 self, content: RecordSet, message_type: str, @@ -210,7 +171,7 @@ def create_message( # pylint: disable=too-many-arguments This method constructs a new `Message` with given content and metadata. The `run_id` and `src_node_id` will be set automatically. """ - _, run_id = self._get_grpc_driver_helper_and_run_id() + self._init_run() if ttl: warnings.warn( "A custom TTL was set, but note that the SuperLink does not enforce " @@ -221,7 +182,7 @@ def create_message( # pylint: disable=too-many-arguments ttl_ = DEFAULT_TTL if ttl is None else ttl metadata = Metadata( - run_id=run_id, + run_id=self._run_id, message_id="", # Will be set by the server src_node_id=self.node.node_id, dst_node_id=dst_node_id, @@ -232,11 +193,13 @@ def create_message( # pylint: disable=too-many-arguments ) return Message(metadata=metadata, content=content) - def get_node_ids(self) -> List[int]: + def get_node_ids(self) -> list[int]: """Get node IDs.""" - grpc_driver_helper, run_id = self._get_grpc_driver_helper_and_run_id() - # Call GrpcDriverHelper method - res = grpc_driver_helper.get_nodes(GetNodesRequest(run_id=run_id)) + self._init_run() + # Call GrpcDriverStub method + res: GetNodesResponse = self._stub.GetNodes( + GetNodesRequest(run_id=self._run_id) + ) return [node.node_id for node in res.nodes] def push_messages(self, messages: Iterable[Message]) -> Iterable[str]: @@ -245,9 +208,9 @@ def push_messages(self, messages: Iterable[Message]) -> Iterable[str]: This method takes an iterable of messages and sends each message to the node specified in `dst_node_id`. """ - grpc_driver_helper, _ = self._get_grpc_driver_helper_and_run_id() + self._init_run() # Construct TaskIns - task_ins_list: List[TaskIns] = [] + task_ins_list: list[TaskIns] = [] for msg in messages: # Check message self._check_message(msg) @@ -255,8 +218,8 @@ def push_messages(self, messages: Iterable[Message]) -> Iterable[str]: taskins = message_to_taskins(msg) # Add to list task_ins_list.append(taskins) - # Call GrpcDriverHelper method - res = grpc_driver_helper.push_task_ins( + # Call GrpcDriverStub method + res: PushTaskInsResponse = self._stub.PushTaskIns( PushTaskInsRequest(task_ins_list=task_ins_list) ) return list(res.task_ids) @@ -267,9 +230,9 @@ def pull_messages(self, message_ids: Iterable[str]) -> Iterable[Message]: This method is used to collect messages from the SuperLink that correspond to a set of given message IDs. """ - grpc_driver, _ = self._get_grpc_driver_helper_and_run_id() + self._init_run() # Pull TaskRes - res = grpc_driver.pull_task_res( + res: PullTaskResResponse = self._stub.PullTaskRes( PullTaskResRequest(node=self.node, task_ids=message_ids) ) # Convert TaskRes to Message @@ -293,7 +256,7 @@ def send_and_receive( # Pull messages end_time = time.time() + (timeout if timeout is not None else 0.0) - ret: List[Message] = [] + ret: list[Message] = [] while timeout is None or time.time() < end_time: res_msgs = self.pull_messages(msg_ids) ret.extend(res_msgs) @@ -308,8 +271,8 @@ def send_and_receive( def close(self) -> None: """Disconnect from the SuperLink if connected.""" - # Check if GrpcDriverHelper is initialized - if self.driver_helper is None: + # Check if `connect` was called before + if not self._is_connected: return # Disconnect - self.driver_helper.disconnect() + self._disconnect() diff --git a/src/py/flwr/server/driver/grpc_driver_test.py b/src/py/flwr/server/driver/grpc_driver_test.py index fbead0e3043d..20017126927d 100644 --- a/src/py/flwr/server/driver/grpc_driver_test.py +++ b/src/py/flwr/server/driver/grpc_driver_test.py @@ -1,4 +1,4 @@ -# Copyright 2022 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -27,6 +27,7 @@ PullTaskResRequest, PushTaskInsRequest, ) +from flwr.proto.run_pb2 import Run # pylint: disable=E0611 from flwr.proto.task_pb2 import Task, TaskRes # pylint: disable=E0611 from .grpc_driver import GrpcDriver @@ -36,58 +37,45 @@ class TestGrpcDriver(unittest.TestCase): """Tests for `GrpcDriver` class.""" def setUp(self) -> None: - """Initialize mock GrpcDriverHelper and Driver instance before each test.""" - mock_response = Mock() - mock_response.run_id = 61016 - self.mock_grpc_driver_helper = Mock() - self.mock_grpc_driver_helper.create_run.return_value = mock_response - self.patcher = patch( - "flwr.server.driver.grpc_driver.GrpcDriverHelper", - return_value=self.mock_grpc_driver_helper, + """Initialize mock GrpcDriverStub and Driver instance before each test.""" + mock_response = Mock( + run=Run( + run_id=61016, + fab_id="mock/mock", + fab_version="v1.0.0", + fab_hash="9f86d08", + ) ) - self.patcher.start() - self.driver = GrpcDriver() - - def tearDown(self) -> None: - """Cleanup after each test.""" - self.patcher.stop() - - def test_check_and_init_grpc_driver_already_initialized(self) -> None: - """Test that GrpcDriverHelper doesn't initialize if run is created.""" - # Prepare - self.driver.driver_helper = self.mock_grpc_driver_helper - self.driver.run_id = 61016 - - # Execute - # pylint: disable-next=protected-access - self.driver._get_grpc_driver_helper_and_run_id() - + self.mock_stub = Mock() + self.mock_channel = Mock() + self.mock_stub.GetRun.return_value = mock_response + mock_response.HasField.return_value = True + self.driver = GrpcDriver(run_id=61016) + self.driver._grpc_stub = self.mock_stub # pylint: disable=protected-access + self.driver._channel = self.mock_channel # pylint: disable=protected-access + + def test_init_grpc_driver(self) -> None: + """Test GrpcDriverStub initialization.""" # Assert - self.mock_grpc_driver_helper.connect.assert_not_called() - - def test_check_and_init_grpc_driver_needs_initialization(self) -> None: - """Test GrpcDriverHelper initialization when run is not created.""" - # Execute - # pylint: disable-next=protected-access - self.driver._get_grpc_driver_helper_and_run_id() - - # Assert - self.mock_grpc_driver_helper.connect.assert_called_once() - self.assertEqual(self.driver.run_id, 61016) + self.assertEqual(self.driver.run.run_id, 61016) + self.assertEqual(self.driver.run.fab_id, "mock/mock") + self.assertEqual(self.driver.run.fab_version, "v1.0.0") + self.assertEqual(self.driver.run.fab_hash, "9f86d08") + self.mock_stub.GetRun.assert_called_once() def test_get_nodes(self) -> None: """Test retrieval of nodes.""" # Prepare mock_response = Mock() mock_response.nodes = [Mock(node_id=404), Mock(node_id=200)] - self.mock_grpc_driver_helper.get_nodes.return_value = mock_response + self.mock_stub.GetNodes.return_value = mock_response # Execute node_ids = self.driver.get_node_ids() - args, kwargs = self.mock_grpc_driver_helper.get_nodes.call_args + args, kwargs = self.mock_stub.GetNodes.call_args # Assert - self.mock_grpc_driver_helper.connect.assert_called_once() + self.mock_stub.GetRun.assert_called_once() self.assertEqual(len(args), 1) self.assertEqual(len(kwargs), 0) self.assertIsInstance(args[0], GetNodesRequest) @@ -98,7 +86,7 @@ def test_push_messages_valid(self) -> None: """Test pushing valid messages.""" # Prepare mock_response = Mock(task_ids=["id1", "id2"]) - self.mock_grpc_driver_helper.push_task_ins.return_value = mock_response + self.mock_stub.PushTaskIns.return_value = mock_response msgs = [ self.driver.create_message(RecordSet(), "", 0, "", DEFAULT_TTL) for _ in range(2) @@ -106,10 +94,10 @@ def test_push_messages_valid(self) -> None: # Execute msg_ids = self.driver.push_messages(msgs) - args, kwargs = self.mock_grpc_driver_helper.push_task_ins.call_args + args, kwargs = self.mock_stub.PushTaskIns.call_args # Assert - self.mock_grpc_driver_helper.connect.assert_called_once() + self.mock_stub.GetRun.assert_called_once() self.assertEqual(len(args), 1) self.assertEqual(len(kwargs), 0) self.assertIsInstance(args[0], PushTaskInsRequest) @@ -121,7 +109,7 @@ def test_push_messages_invalid(self) -> None: """Test pushing invalid messages.""" # Prepare mock_response = Mock(task_ids=["id1", "id2"]) - self.mock_grpc_driver_helper.push_task_ins.return_value = mock_response + self.mock_stub.PushTaskIns.return_value = mock_response msgs = [ self.driver.create_message(RecordSet(), "", 0, "", DEFAULT_TTL) for _ in range(2) @@ -145,16 +133,16 @@ def test_pull_messages_with_given_message_ids(self) -> None: ), TaskRes(task=Task(ancestry=["id3"], error=error_to_proto(Error(code=0)))), ] - self.mock_grpc_driver_helper.pull_task_res.return_value = mock_response + self.mock_stub.PullTaskRes.return_value = mock_response msg_ids = ["id1", "id2", "id3"] # Execute msgs = self.driver.pull_messages(msg_ids) reply_tos = {msg.metadata.reply_to_message for msg in msgs} - args, kwargs = self.mock_grpc_driver_helper.pull_task_res.call_args + args, kwargs = self.mock_stub.PullTaskRes.call_args # Assert - self.mock_grpc_driver_helper.connect.assert_called_once() + self.mock_stub.GetRun.assert_called_once() self.assertEqual(len(args), 1) self.assertEqual(len(kwargs), 0) self.assertIsInstance(args[0], PullTaskResRequest) @@ -165,14 +153,14 @@ def test_send_and_receive_messages_complete(self) -> None: """Test send and receive all messages successfully.""" # Prepare mock_response = Mock(task_ids=["id1"]) - self.mock_grpc_driver_helper.push_task_ins.return_value = mock_response + self.mock_stub.PushTaskIns.return_value = mock_response # The response message must include either `content` (i.e. a recordset) or # an `Error`. We choose the latter in this case error_proto = error_to_proto(Error(code=0)) mock_response = Mock( task_res_list=[TaskRes(task=Task(ancestry=["id1"], error=error_proto))] ) - self.mock_grpc_driver_helper.pull_task_res.return_value = mock_response + self.mock_stub.PullTaskRes.return_value = mock_response msgs = [self.driver.create_message(RecordSet(), "", 0, "", DEFAULT_TTL)] # Execute @@ -187,9 +175,9 @@ def test_send_and_receive_messages_timeout(self) -> None: # Prepare sleep_fn = time.sleep mock_response = Mock(task_ids=["id1"]) - self.mock_grpc_driver_helper.push_task_ins.return_value = mock_response + self.mock_stub.PushTaskIns.return_value = mock_response mock_response = Mock(task_res_list=[]) - self.mock_grpc_driver_helper.pull_task_res.return_value = mock_response + self.mock_stub.PullTaskRes.return_value = mock_response msgs = [self.driver.create_message(RecordSet(), "", 0, "", DEFAULT_TTL)] # Execute @@ -203,20 +191,20 @@ def test_send_and_receive_messages_timeout(self) -> None: def test_del_with_initialized_driver(self) -> None: """Test cleanup behavior when Driver is initialized.""" - # Prepare - # pylint: disable-next=protected-access - self.driver._get_grpc_driver_helper_and_run_id() - # Execute self.driver.close() # Assert - self.mock_grpc_driver_helper.disconnect.assert_called_once() + self.mock_channel.close.assert_called_once() def test_del_with_uninitialized_driver(self) -> None: """Test cleanup behavior when Driver is not initialized.""" + # Prepare + self.driver._grpc_stub = None # pylint: disable=protected-access + self.driver._channel = None # pylint: disable=protected-access + # Execute self.driver.close() # Assert - self.mock_grpc_driver_helper.disconnect.assert_not_called() + self.mock_channel.close.assert_not_called() diff --git a/src/py/flwr/server/driver/inmemory_driver.py b/src/py/flwr/server/driver/inmemory_driver.py index 8c71b1067293..130562c6defa 100644 --- a/src/py/flwr/server/driver/inmemory_driver.py +++ b/src/py/flwr/server/driver/inmemory_driver.py @@ -17,11 +17,13 @@ import time import warnings -from typing import Iterable, List, Optional +from collections.abc import Iterable +from typing import Optional, cast from uuid import UUID from flwr.common import DEFAULT_TTL, Message, Metadata, RecordSet from flwr.common.serde import message_from_taskres, message_to_taskins +from flwr.common.typing import Run from flwr.proto.node_pb2 import Node # pylint: disable=E0611 from flwr.server.superlink.state import StateFactory @@ -33,30 +35,31 @@ class InMemoryDriver(Driver): Parameters ---------- + run_id : int + The identifier of the run. state_factory : StateFactory A StateFactory embedding a state that this driver can interface with. - fab_id : str (default: None) - The identifier of the FAB used in the run. - fab_version : str (default: None) - The version of the FAB used in the run. + pull_interval : float (default=0.1) + Sleep duration between calls to `pull_messages`. """ def __init__( self, + run_id: int, state_factory: StateFactory, - fab_id: Optional[str] = None, - fab_version: Optional[str] = None, + pull_interval: float = 0.1, ) -> None: - self.run_id: Optional[int] = None - self.fab_id = fab_id if fab_id is not None else "" - self.fab_version = fab_version if fab_version is not None else "" - self.node = Node(node_id=0, anonymous=True) + self._run_id = run_id + self._run: Optional[Run] = None self.state = state_factory.state() + self.pull_interval = pull_interval + self.node = Node(node_id=0, anonymous=True) def _check_message(self, message: Message) -> None: + self._init_run() # Check if the message is valid if not ( - message.metadata.run_id == self.run_id + message.metadata.run_id == cast(Run, self._run).run_id and message.metadata.src_node_id == self.node.node_id and message.metadata.message_id == "" and message.metadata.reply_to_message == "" @@ -64,18 +67,22 @@ def _check_message(self, message: Message) -> None: ): raise ValueError(f"Invalid message: {message}") - def _get_run_id(self) -> int: - """Return run_id. - - If unset, create a new run. - """ - if self.run_id is None: - self.run_id = self.state.create_run( - fab_id=self.fab_id, fab_version=self.fab_version - ) - return self.run_id - - def create_message( # pylint: disable=too-many-arguments + def _init_run(self) -> None: + """Initialize the run.""" + if self._run is not None: + return + run = self.state.get_run(self._run_id) + if run is None: + raise RuntimeError(f"Cannot find the run with ID: {self._run_id}") + self._run = run + + @property + def run(self) -> Run: + """Run ID.""" + self._init_run() + return Run(**vars(cast(Run, self._run))) + + def create_message( # pylint: disable=too-many-arguments,R0917 self, content: RecordSet, message_type: str, @@ -88,7 +95,7 @@ def create_message( # pylint: disable=too-many-arguments This method constructs a new `Message` with given content and metadata. The `run_id` and `src_node_id` will be set automatically. """ - run_id = self._get_run_id() + self._init_run() if ttl: warnings.warn( "A custom TTL was set, but note that the SuperLink does not enforce " @@ -99,7 +106,7 @@ def create_message( # pylint: disable=too-many-arguments ttl_ = DEFAULT_TTL if ttl is None else ttl metadata = Metadata( - run_id=run_id, + run_id=cast(Run, self._run).run_id, message_id="", # Will be set by the server src_node_id=self.node.node_id, dst_node_id=dst_node_id, @@ -110,10 +117,10 @@ def create_message( # pylint: disable=too-many-arguments ) return Message(metadata=metadata, content=content) - def get_node_ids(self) -> List[int]: + def get_node_ids(self) -> list[int]: """Get node IDs.""" - run_id = self._get_run_id() - return list(self.state.get_nodes(run_id)) + self._init_run() + return list(self.state.get_nodes(cast(Run, self._run).run_id)) def push_messages(self, messages: Iterable[Message]) -> Iterable[str]: """Push messages to specified node IDs. @@ -121,7 +128,7 @@ def push_messages(self, messages: Iterable[Message]) -> Iterable[str]: This method takes an iterable of messages and sends each message to the node specified in `dst_node_id`. """ - task_ids: List[str] = [] + task_ids: list[str] = [] for msg in messages: # Check message self._check_message(msg) @@ -143,7 +150,7 @@ def pull_messages(self, message_ids: Iterable[str]) -> Iterable[Message]: """ msg_ids = {UUID(msg_id) for msg_id in message_ids} # Pull TaskRes - task_res_list = self.state.get_task_res(task_ids=msg_ids, limit=len(msg_ids)) + task_res_list = self.state.get_task_res(task_ids=msg_ids) # Delete tasks in state self.state.delete_tasks(msg_ids) # Convert TaskRes to Message @@ -167,7 +174,7 @@ def send_and_receive( # Pull messages end_time = time.time() + (timeout if timeout is not None else 0.0) - ret: List[Message] = [] + ret: list[Message] = [] while timeout is None or time.time() < end_time: res_msgs = self.pull_messages(msg_ids) ret.extend(res_msgs) @@ -177,5 +184,5 @@ def send_and_receive( if len(msg_ids) == 0: break # Sleep - time.sleep(3) + time.sleep(self.pull_interval) return ret diff --git a/src/py/flwr/server/driver/inmemory_driver_test.py b/src/py/flwr/server/driver/inmemory_driver_test.py index 95c2a0b277af..9e5aaeaa9ca7 100644 --- a/src/py/flwr/server/driver/inmemory_driver_test.py +++ b/src/py/flwr/server/driver/inmemory_driver_test.py @@ -1,4 +1,4 @@ -# Copyright 2022 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,15 +15,14 @@ """Tests for in-memory driver.""" -import os import time import unittest -from typing import Iterable, List, Tuple +from collections.abc import Iterable from unittest.mock import MagicMock, patch from uuid import uuid4 from flwr.common import RecordSet -from flwr.common.constant import PING_MAX_INTERVAL +from flwr.common.constant import NODE_ID_NUM_BYTES, PING_MAX_INTERVAL from flwr.common.message import Error from flwr.common.serde import ( error_to_proto, @@ -31,13 +30,15 @@ message_to_taskres, recordset_to_proto, ) +from flwr.common.typing import Run from flwr.proto.task_pb2 import Task, TaskRes # pylint: disable=E0611 -from flwr.server.superlink.state import StateFactory +from flwr.server.superlink.state import InMemoryState, SqliteState, StateFactory +from flwr.server.superlink.state.utils import generate_rand_int_from_bytes from .inmemory_driver import InMemoryDriver -def push_messages(driver: InMemoryDriver, num_nodes: int) -> Tuple[Iterable[str], int]: +def push_messages(driver: InMemoryDriver, num_nodes: int) -> tuple[Iterable[str], int]: """Help push messages to state.""" for _ in range(num_nodes): driver.state.create_node(ping_interval=PING_MAX_INTERVAL) @@ -54,7 +55,7 @@ def push_messages(driver: InMemoryDriver, num_nodes: int) -> Tuple[Iterable[str] def get_replies( driver: InMemoryDriver, msg_ids: Iterable[str], node_id: int -) -> List[str]: +) -> list[str]: """Help create message replies and pull taskres from state.""" taskins = driver.state.get_task_ins(node_id, limit=len(list(msg_ids))) for taskin in taskins: @@ -79,12 +80,30 @@ def setUp(self) -> None: """ # Create driver self.num_nodes = 42 - self.driver = InMemoryDriver(StateFactory("")) - self.driver.state = MagicMock() - self.driver.state.get_nodes.return_value = [ - int.from_bytes(os.urandom(8), "little", signed=True) + self.state = MagicMock() + self.state.get_nodes.return_value = [ + generate_rand_int_from_bytes(NODE_ID_NUM_BYTES) for _ in range(self.num_nodes) ] + self.state.get_run.return_value = Run( + run_id=61016, + fab_id="mock/mock", + fab_version="v1.0.0", + fab_hash="9f86d08", + override_config={"test_key": "test_value"}, + ) + state_factory = MagicMock(state=lambda: self.state) + self.driver = InMemoryDriver(run_id=61016, state_factory=state_factory) + self.driver.state = self.state + + def test_get_run(self) -> None: + """Test the InMemoryDriver starting with run_id.""" + # Assert + self.assertEqual(self.driver.run.run_id, 61016) + self.assertEqual(self.driver.run.fab_id, "mock/mock") + self.assertEqual(self.driver.run.fab_version, "v1.0.0") + self.assertEqual(self.driver.run.fab_hash, "9f86d08") + self.assertEqual(self.driver.run.override_config["test_key"], "test_value") def test_get_nodes(self) -> None: """Test retrieval of nodes.""" @@ -104,7 +123,7 @@ def test_push_messages_valid(self) -> None: ] taskins_ids = [uuid4() for _ in range(num_messages)] - self.driver.state.store_task_ins.side_effect = taskins_ids # type: ignore + self.state.store_task_ins.side_effect = taskins_ids # Execute msg_ids = list(self.driver.push_messages(msgs)) @@ -141,7 +160,7 @@ def test_pull_messages_with_given_message_ids(self) -> None: task=Task(ancestry=[msg_ids[1]], error=error_to_proto(Error(code=0))) ), ] - self.driver.state.get_task_res.return_value = task_res_list # type: ignore + self.state.get_task_res.return_value = task_res_list # Execute pulled_msgs = list(self.driver.pull_messages(msg_ids)) @@ -167,8 +186,8 @@ def test_send_and_receive_messages_complete(self) -> None: task=Task(ancestry=[msg_ids[1]], error=error_to_proto(Error(code=0))) ), ] - self.driver.state.store_task_ins.side_effect = msg_ids # type: ignore - self.driver.state.get_task_res.return_value = task_res_list # type: ignore + self.state.store_task_ins.side_effect = msg_ids + self.state.get_task_res.return_value = task_res_list # Execute ret_msgs = list(self.driver.send_and_receive(msgs)) @@ -193,8 +212,8 @@ def test_send_and_receive_messages_timeout(self) -> None: task=Task(ancestry=[msg_ids[1]], error=error_to_proto(Error(code=0))) ), ] - self.driver.state.store_task_ins.side_effect = msg_ids # type: ignore - self.driver.state.get_task_res.return_value = task_res_list # type: ignore + self.state.store_task_ins.side_effect = msg_ids + self.state.get_task_res.return_value = task_res_list # Execute with patch("time.sleep", side_effect=lambda t: time.sleep(t * 0.01)): @@ -208,19 +227,23 @@ def test_send_and_receive_messages_timeout(self) -> None: def test_task_store_consistency_after_push_pull_sqlitestate(self) -> None: """Test tasks are deleted in sqlite state once messages are pulled.""" # Prepare - self.driver = InMemoryDriver(StateFactory("")) + state = StateFactory("").state() + self.driver = InMemoryDriver( + state.create_run("", "", "", {}), MagicMock(state=lambda: state) + ) msg_ids, node_id = push_messages(self.driver, self.num_nodes) + assert isinstance(state, SqliteState) # Check recorded - task_ins = self.driver.state.query("SELECT * FROM task_ins;") # type: ignore + task_ins = state.query("SELECT * FROM task_ins;") self.assertEqual(len(task_ins), len(list(msg_ids))) # Prepare: create replies reply_tos = get_replies(self.driver, msg_ids, node_id) # Query number of task_ins and task_res in State - task_res = self.driver.state.query("SELECT * FROM task_res;") # type: ignore - task_ins = self.driver.state.query("SELECT * FROM task_ins;") # type: ignore + task_res = state.query("SELECT * FROM task_res;") + task_ins = state.query("SELECT * FROM task_ins;") # Assert self.assertEqual(reply_tos, msg_ids) @@ -230,18 +253,19 @@ def test_task_store_consistency_after_push_pull_sqlitestate(self) -> None: def test_task_store_consistency_after_push_pull_inmemory_state(self) -> None: """Test tasks are deleted in in-memory state once messages are pulled.""" # Prepare - self.driver = InMemoryDriver(StateFactory(":flwr-in-memory-state:")) + state_factory = StateFactory(":flwr-in-memory-state:") + state = state_factory.state() + self.driver = InMemoryDriver(state.create_run("", "", "", {}), state_factory) msg_ids, node_id = push_messages(self.driver, self.num_nodes) + assert isinstance(state, InMemoryState) # Check recorded - self.assertEqual( - len(self.driver.state.task_ins_store), len(list(msg_ids)) # type: ignore - ) + self.assertEqual(len(state.task_ins_store), len(list(msg_ids))) # Prepare: create replies reply_tos = get_replies(self.driver, msg_ids, node_id) # Assert self.assertEqual(reply_tos, msg_ids) - self.assertEqual(len(self.driver.state.task_res_store), 0) # type: ignore - self.assertEqual(len(self.driver.state.task_ins_store), 0) # type: ignore + self.assertEqual(len(state.task_res_store), 0) + self.assertEqual(len(state.task_ins_store), 0) diff --git a/src/py/flwr/server/history.py b/src/py/flwr/server/history.py index 291974a4323c..50daf2e04de6 100644 --- a/src/py/flwr/server/history.py +++ b/src/py/flwr/server/history.py @@ -17,7 +17,6 @@ import pprint from functools import reduce -from typing import Dict, List, Tuple from flwr.common.typing import Scalar @@ -26,11 +25,11 @@ class History: """History class for training and/or evaluation metrics collection.""" def __init__(self) -> None: - self.losses_distributed: List[Tuple[int, float]] = [] - self.losses_centralized: List[Tuple[int, float]] = [] - self.metrics_distributed_fit: Dict[str, List[Tuple[int, Scalar]]] = {} - self.metrics_distributed: Dict[str, List[Tuple[int, Scalar]]] = {} - self.metrics_centralized: Dict[str, List[Tuple[int, Scalar]]] = {} + self.losses_distributed: list[tuple[int, float]] = [] + self.losses_centralized: list[tuple[int, float]] = [] + self.metrics_distributed_fit: dict[str, list[tuple[int, Scalar]]] = {} + self.metrics_distributed: dict[str, list[tuple[int, Scalar]]] = {} + self.metrics_centralized: dict[str, list[tuple[int, Scalar]]] = {} def add_loss_distributed(self, server_round: int, loss: float) -> None: """Add one loss entry (from distributed evaluation).""" @@ -41,7 +40,7 @@ def add_loss_centralized(self, server_round: int, loss: float) -> None: self.losses_centralized.append((server_round, loss)) def add_metrics_distributed_fit( - self, server_round: int, metrics: Dict[str, Scalar] + self, server_round: int, metrics: dict[str, Scalar] ) -> None: """Add metrics entries (from distributed fit).""" for key in metrics: @@ -52,7 +51,7 @@ def add_metrics_distributed_fit( self.metrics_distributed_fit[key].append((server_round, metrics[key])) def add_metrics_distributed( - self, server_round: int, metrics: Dict[str, Scalar] + self, server_round: int, metrics: dict[str, Scalar] ) -> None: """Add metrics entries (from distributed evaluation).""" for key in metrics: @@ -63,7 +62,7 @@ def add_metrics_distributed( self.metrics_distributed[key].append((server_round, metrics[key])) def add_metrics_centralized( - self, server_round: int, metrics: Dict[str, Scalar] + self, server_round: int, metrics: dict[str, Scalar] ) -> None: """Add metrics entries (from centralized evaluation).""" for key in metrics: diff --git a/src/py/flwr/server/history_test.py b/src/py/flwr/server/history_test.py index adb9d697e409..b53357149623 100644 --- a/src/py/flwr/server/history_test.py +++ b/src/py/flwr/server/history_test.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2021 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/src/py/flwr/server/run_serverapp.py b/src/py/flwr/server/run_serverapp.py index 9cc7974d34da..28a66e136639 100644 --- a/src/py/flwr/server/run_serverapp.py +++ b/src/py/flwr/server/run_serverapp.py @@ -21,17 +21,35 @@ from pathlib import Path from typing import Optional +from flwr.cli.config_utils import get_fab_metadata +from flwr.cli.install import install_from_fab from flwr.common import Context, EventType, RecordSet, event -from flwr.common.logger import log, update_console_handler +from flwr.common.config import ( + get_flwr_dir, + get_fused_config_from_dir, + get_metadata_from_config, + get_project_config, + get_project_dir, +) +from flwr.common.constant import DRIVER_API_DEFAULT_ADDRESS +from flwr.common.logger import log, update_console_handler, warn_deprecated_feature from flwr.common.object_ref import load_app +from flwr.common.typing import UserConfig +from flwr.proto.fab_pb2 import GetFabRequest, GetFabResponse # pylint: disable=E0611 +from flwr.proto.run_pb2 import ( # pylint: disable=E0611 + CreateRunRequest, + CreateRunResponse, +) -from .driver import Driver, GrpcDriver +from .driver import Driver +from .driver.grpc_driver import GrpcDriver from .server_app import LoadServerAppError, ServerApp def run( driver: Driver, server_app_dir: str, + server_app_run_config: UserConfig, server_app_attr: Optional[str] = None, loaded_server_app: Optional[ServerApp] = None, ) -> None: @@ -39,16 +57,15 @@ def run( if not (server_app_attr is None) ^ (loaded_server_app is None): raise ValueError( "Either `server_app_attr` or `loaded_server_app` should be set " - "but not both. " + "but not both." ) - if server_app_dir is not None: - sys.path.insert(0, server_app_dir) - # Load ServerApp if needed def _load() -> ServerApp: if server_app_attr: - server_app: ServerApp = load_app(server_app_attr, LoadServerAppError) + server_app: ServerApp = load_app( + server_app_attr, LoadServerAppError, server_app_dir + ) if not isinstance(server_app, ServerApp): raise LoadServerAppError( @@ -62,7 +79,9 @@ def _load() -> ServerApp: server_app = _load() # Initialize Context - context = Context(state=RecordSet()) + context = Context( + node_id=0, node_config={}, state=RecordSet(), run_config=server_app_run_config + ) # Call ServerApp server_app(driver=driver, context=context) @@ -70,12 +89,45 @@ def _load() -> ServerApp: log(DEBUG, "ServerApp finished running.") +# pylint: disable-next=too-many-branches,too-many-statements,too-many-locals def run_server_app() -> None: """Run Flower server app.""" event(EventType.RUN_SERVER_APP_ENTER) args = _parse_args_run_server_app().parse_args() + # Check if the server app reference is passed. + # Since Flower 1.11, passing a reference is not allowed. + app_path: Optional[str] = args.app + # If the provided app_path doesn't exist, and contains a ":", + # it is likely to be a server app reference instead of a path. + if app_path is not None and not Path(app_path).exists() and ":" in app_path: + sys.exit( + "It appears you've passed a reference like `server:app`.\n\n" + "Note that since version `1.11.0`, `flower-server-app` no longer supports " + "passing a reference to a `ServerApp` attribute. Instead, you need to pass " + "the path to Flower app via the argument `--app`. This is the path to a " + "directory containing a `pyproject.toml`. You can create a valid Flower " + "app by executing `flwr new` and following the prompt." + ) + + if args.server != DRIVER_API_DEFAULT_ADDRESS: + warn = "Passing flag --server is deprecated. Use --superlink instead." + warn_deprecated_feature(warn) + + if args.superlink != DRIVER_API_DEFAULT_ADDRESS: + # if `--superlink` also passed, then + # warn user that this argument overrides what was passed with `--server` + log( + WARN, + "Both `--server` and `--superlink` were passed. " + "`--server` will be ignored. Connecting to the Superlink Driver API " + "at %s.", + args.superlink, + ) + else: + args.superlink = args.server + update_console_handler( level=DEBUG if args.verbose else INFO, timestamps=args.verbose, @@ -95,7 +147,7 @@ def run_server_app() -> None: WARN, "Option `--insecure` was set. " "Starting insecure HTTP client connected to %s.", - args.server, + args.superlink, ) root_certificates = None else: @@ -109,35 +161,77 @@ def run_server_app() -> None: DEBUG, "Starting secure HTTPS client connected to %s " "with the following certificates: %s.", - args.server, + args.superlink, cert_path, ) - log( - DEBUG, - "Flower will load ServerApp `%s`", - getattr(args, "server-app"), + if not (app_path is None) ^ (args.run_id is None): + raise sys.exit( + "Please provide either a Flower App path or a Run ID, but not both. " + "For more details, use: ``flower-server-app -h``" + ) + + # Initialize GrpcDriver + if app_path is None: + # User provided `--run-id`, but not `app_dir` + driver = GrpcDriver( + run_id=args.run_id, + driver_service_address=args.superlink, + root_certificates=root_certificates, + ) + flwr_dir = get_flwr_dir(args.flwr_dir) + run_ = driver.run + if not run_.fab_hash: + raise ValueError("FAB hash not provided.") + fab_req = GetFabRequest(hash_str=run_.fab_hash) + # pylint: disable-next=W0212 + fab_res: GetFabResponse = driver._stub.GetFab(fab_req) + if fab_res.fab.hash_str != run_.fab_hash: + raise ValueError("FAB hashes don't match.") + install_from_fab(fab_res.fab.content, flwr_dir, True) + fab_id, fab_version = get_fab_metadata(fab_res.fab.content) + + app_path = str(get_project_dir(fab_id, fab_version, run_.fab_hash, flwr_dir)) + config = get_project_config(app_path) + else: + # User provided `app_dir`, but not `--run-id` + # Create run if run_id is not provided + driver = GrpcDriver( + run_id=0, # Will be overwritten + driver_service_address=args.superlink, + root_certificates=root_certificates, + ) + # Load config from the project directory + config = get_project_config(app_path) + fab_version, fab_id = get_metadata_from_config(config) + + # Create run + req = CreateRunRequest(fab_id=fab_id, fab_version=fab_version) + res: CreateRunResponse = driver._stub.CreateRun(req) # pylint: disable=W0212 + # Overwrite driver._run_id + driver._run_id = res.run_id # pylint: disable=W0212 + + # Obtain server app reference and the run config + server_app_attr = config["tool"]["flwr"]["app"]["components"]["serverapp"] + server_app_run_config = get_fused_config_from_dir( + Path(app_path), driver.run.override_config ) + log(DEBUG, "Flower will load ServerApp `%s` in %s", server_app_attr, app_path) + log( DEBUG, "root_certificates: `%s`", root_certificates, ) - server_app_dir = args.dir - server_app_attr = getattr(args, "server-app") - - # Initialize GrpcDriver - driver = GrpcDriver( - driver_service_address=args.server, - root_certificates=root_certificates, - fab_id=args.fab_id, - fab_version=args.fab_version, - ) - # Run the ServerApp with the Driver - run(driver=driver, server_app_dir=server_app_dir, server_app_attr=server_app_attr) + run( + driver=driver, + server_app_dir=app_path, + server_app_run_config=server_app_run_config, + server_app_attr=server_app_attr, + ) # Clean up driver.close() @@ -152,13 +246,16 @@ def _parse_args_run_server_app() -> argparse.ArgumentParser: ) parser.add_argument( - "server-app", - help="For example: `server:app` or `project.package.module:wrapper.app`", + "app", + nargs="?", + default=None, + help="Load and run the `ServerApp` from the specified Flower App path. " + "The `pyproject.toml` file must be located in the root of this path.", ) parser.add_argument( "--insecure", action="store_true", - help="Run the server app without HTTPS. By default, the app runs with " + help="Run the `ServerApp` without HTTPS. By default, the app runs with " "HTTPS enabled. Use this flag only if you understand the risks.", ) parser.add_argument( @@ -175,27 +272,30 @@ def _parse_args_run_server_app() -> argparse.ArgumentParser: ) parser.add_argument( "--server", - default="0.0.0.0:9091", + default=DRIVER_API_DEFAULT_ADDRESS, help="Server address", ) parser.add_argument( - "--dir", - default="", - help="Add specified directory to the PYTHONPATH and load Flower " - "app from there." - " Default: current working directory.", + "--superlink", + default=DRIVER_API_DEFAULT_ADDRESS, + help="SuperLink Driver API (gRPC-rere) address (IPv4, IPv6, or a domain name)", ) parser.add_argument( - "--fab-id", + "--run-id", default=None, - type=str, - help="The identifier of the FAB used in the run.", + type=int, + help="The identifier of the run.", ) parser.add_argument( - "--fab-version", + "--flwr-dir", default=None, - type=str, - help="The version of the FAB used in the run.", + help="""The path containing installed Flower Apps. + By default, this value is equal to: + + - `$FLWR_HOME/` if `$FLWR_HOME` is defined + - `$XDG_DATA_HOME/.flwr/` if `$XDG_DATA_HOME` is defined + - `$HOME/.flwr/` in all other cases + """, ) return parser diff --git a/src/py/flwr/server/server.py b/src/py/flwr/server/server.py index f1bfb6f0533b..bdaa11ba20a2 100644 --- a/src/py/flwr/server/server.py +++ b/src/py/flwr/server/server.py @@ -19,7 +19,7 @@ import io import timeit from logging import INFO, WARN -from typing import Dict, List, Optional, Tuple, Union +from typing import Optional, Union from flwr.common import ( Code, @@ -41,17 +41,17 @@ from .server_config import ServerConfig -FitResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, FitRes]], - List[Union[Tuple[ClientProxy, FitRes], BaseException]], +FitResultsAndFailures = tuple[ + list[tuple[ClientProxy, FitRes]], + list[Union[tuple[ClientProxy, FitRes], BaseException]], ] -EvaluateResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, EvaluateRes]], - List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], +EvaluateResultsAndFailures = tuple[ + list[tuple[ClientProxy, EvaluateRes]], + list[Union[tuple[ClientProxy, EvaluateRes], BaseException]], ] -ReconnectResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, DisconnectRes]], - List[Union[Tuple[ClientProxy, DisconnectRes], BaseException]], +ReconnectResultsAndFailures = tuple[ + list[tuple[ClientProxy, DisconnectRes]], + list[Union[tuple[ClientProxy, DisconnectRes], BaseException]], ] @@ -84,14 +84,14 @@ def client_manager(self) -> ClientManager: return self._client_manager # pylint: disable=too-many-locals - def fit(self, num_rounds: int, timeout: Optional[float]) -> Tuple[History, float]: + def fit(self, num_rounds: int, timeout: Optional[float]) -> tuple[History, float]: """Run federated averaging for a number of rounds.""" history = History() # Initialize parameters log(INFO, "[INIT]") self.parameters = self._get_initial_parameters(server_round=0, timeout=timeout) - log(INFO, "Evaluating initial global parameters") + log(INFO, "Starting evaluation of initial global parameters") res = self.strategy.evaluate(0, parameters=self.parameters) if res is not None: log( @@ -102,6 +102,8 @@ def fit(self, num_rounds: int, timeout: Optional[float]) -> Tuple[History, float ) history.add_loss_centralized(server_round=0, loss=res[0]) history.add_metrics_centralized(server_round=0, metrics=res[1]) + else: + log(INFO, "Evaluation returned no results (`None`)") # Run federated learning for num_rounds start_time = timeit.default_timer() @@ -161,7 +163,7 @@ def evaluate_round( server_round: int, timeout: Optional[float], ) -> Optional[ - Tuple[Optional[float], Dict[str, Scalar], EvaluateResultsAndFailures] + tuple[Optional[float], dict[str, Scalar], EvaluateResultsAndFailures] ]: """Validate current global model on a number of clients.""" # Get clients and their respective instructions from strategy @@ -195,9 +197,9 @@ def evaluate_round( ) # Aggregate the evaluation results - aggregated_result: Tuple[ + aggregated_result: tuple[ Optional[float], - Dict[str, Scalar], + dict[str, Scalar], ] = self.strategy.aggregate_evaluate(server_round, results, failures) loss_aggregated, metrics_aggregated = aggregated_result @@ -208,7 +210,7 @@ def fit_round( server_round: int, timeout: Optional[float], ) -> Optional[ - Tuple[Optional[Parameters], Dict[str, Scalar], FitResultsAndFailures] + tuple[Optional[Parameters], dict[str, Scalar], FitResultsAndFailures] ]: """Perform a single round of federated averaging.""" # Get clients and their respective instructions from strategy @@ -243,9 +245,9 @@ def fit_round( ) # Aggregate training results - aggregated_result: Tuple[ + aggregated_result: tuple[ Optional[Parameters], - Dict[str, Scalar], + dict[str, Scalar], ] = self.strategy.aggregate_fit(server_round, results, failures) parameters_aggregated, metrics_aggregated = aggregated_result @@ -294,7 +296,7 @@ def _get_initial_parameters( def reconnect_clients( - client_instructions: List[Tuple[ClientProxy, ReconnectIns]], + client_instructions: list[tuple[ClientProxy, ReconnectIns]], max_workers: Optional[int], timeout: Optional[float], ) -> ReconnectResultsAndFailures: @@ -310,8 +312,8 @@ def reconnect_clients( ) # Gather results - results: List[Tuple[ClientProxy, DisconnectRes]] = [] - failures: List[Union[Tuple[ClientProxy, DisconnectRes], BaseException]] = [] + results: list[tuple[ClientProxy, DisconnectRes]] = [] + failures: list[Union[tuple[ClientProxy, DisconnectRes], BaseException]] = [] for future in finished_fs: failure = future.exception() if failure is not None: @@ -326,7 +328,7 @@ def reconnect_client( client: ClientProxy, reconnect: ReconnectIns, timeout: Optional[float], -) -> Tuple[ClientProxy, DisconnectRes]: +) -> tuple[ClientProxy, DisconnectRes]: """Instruct client to disconnect and (optionally) reconnect later.""" disconnect = client.reconnect( reconnect, @@ -337,7 +339,7 @@ def reconnect_client( def fit_clients( - client_instructions: List[Tuple[ClientProxy, FitIns]], + client_instructions: list[tuple[ClientProxy, FitIns]], max_workers: Optional[int], timeout: Optional[float], group_id: int, @@ -354,8 +356,8 @@ def fit_clients( ) # Gather results - results: List[Tuple[ClientProxy, FitRes]] = [] - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]] = [] + results: list[tuple[ClientProxy, FitRes]] = [] + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]] = [] for future in finished_fs: _handle_finished_future_after_fit( future=future, results=results, failures=failures @@ -365,7 +367,7 @@ def fit_clients( def fit_client( client: ClientProxy, ins: FitIns, timeout: Optional[float], group_id: int -) -> Tuple[ClientProxy, FitRes]: +) -> tuple[ClientProxy, FitRes]: """Refine parameters on a single client.""" fit_res = client.fit(ins, timeout=timeout, group_id=group_id) return client, fit_res @@ -373,8 +375,8 @@ def fit_client( def _handle_finished_future_after_fit( future: concurrent.futures.Future, # type: ignore - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], ) -> None: """Convert finished future into either a result or a failure.""" # Check if there was an exception @@ -384,7 +386,7 @@ def _handle_finished_future_after_fit( return # Successfully received a result from a client - result: Tuple[ClientProxy, FitRes] = future.result() + result: tuple[ClientProxy, FitRes] = future.result() _, res = result # Check result status code @@ -397,7 +399,7 @@ def _handle_finished_future_after_fit( def evaluate_clients( - client_instructions: List[Tuple[ClientProxy, EvaluateIns]], + client_instructions: list[tuple[ClientProxy, EvaluateIns]], max_workers: Optional[int], timeout: Optional[float], group_id: int, @@ -414,8 +416,8 @@ def evaluate_clients( ) # Gather results - results: List[Tuple[ClientProxy, EvaluateRes]] = [] - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]] = [] + results: list[tuple[ClientProxy, EvaluateRes]] = [] + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]] = [] for future in finished_fs: _handle_finished_future_after_evaluate( future=future, results=results, failures=failures @@ -428,7 +430,7 @@ def evaluate_client( ins: EvaluateIns, timeout: Optional[float], group_id: int, -) -> Tuple[ClientProxy, EvaluateRes]: +) -> tuple[ClientProxy, EvaluateRes]: """Evaluate parameters on a single client.""" evaluate_res = client.evaluate(ins, timeout=timeout, group_id=group_id) return client, evaluate_res @@ -436,8 +438,8 @@ def evaluate_client( def _handle_finished_future_after_evaluate( future: concurrent.futures.Future, # type: ignore - results: List[Tuple[ClientProxy, EvaluateRes]], - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], + results: list[tuple[ClientProxy, EvaluateRes]], + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]], ) -> None: """Convert finished future into either a result or a failure.""" # Check if there was an exception @@ -447,7 +449,7 @@ def _handle_finished_future_after_evaluate( return # Successfully received a result from a client - result: Tuple[ClientProxy, EvaluateRes] = future.result() + result: tuple[ClientProxy, EvaluateRes] = future.result() _, res = result # Check result status code @@ -464,7 +466,7 @@ def init_defaults( config: Optional[ServerConfig], strategy: Optional[Strategy], client_manager: Optional[ClientManager], -) -> Tuple[Server, ServerConfig]: +) -> tuple[Server, ServerConfig]: """Create server instance if none was given.""" if server is None: if client_manager is None: diff --git a/src/py/flwr/server/server_app.py b/src/py/flwr/server/server_app.py index ea2eb3fd1a69..9d91be88e94e 100644 --- a/src/py/flwr/server/server_app.py +++ b/src/py/flwr/server/server_app.py @@ -17,8 +17,11 @@ from typing import Callable, Optional -from flwr.common import Context, RecordSet -from flwr.common.logger import warn_preview_feature +from flwr.common import Context +from flwr.common.logger import ( + warn_deprecated_feature_with_example, + warn_preview_feature, +) from flwr.server.strategy import Strategy from .client_manager import ClientManager @@ -26,7 +29,20 @@ from .driver import Driver from .server import Server from .server_config import ServerConfig -from .typing import ServerAppCallable +from .typing import ServerAppCallable, ServerFn + +SERVER_FN_USAGE_EXAMPLE = """ + + def server_fn(context: Context): + server_config = ServerConfig(num_rounds=3) + strategy = FedAvg() + return ServerAppComponents( + strategy=strategy, + server_config=server_config, + ) + + app = ServerApp(server_fn=server_fn) +""" class ServerApp: @@ -36,13 +52,15 @@ class ServerApp: -------- Use the `ServerApp` with an existing `Strategy`: - >>> server_config = ServerConfig(num_rounds=3) - >>> strategy = FedAvg() + >>> def server_fn(context: Context): + >>> server_config = ServerConfig(num_rounds=3) + >>> strategy = FedAvg() + >>> return ServerAppComponents( + >>> strategy=strategy, + >>> server_config=server_config, + >>> ) >>> - >>> app = ServerApp() - >>> server_config=server_config, - >>> strategy=strategy, - >>> ) + >>> app = ServerApp(server_fn=server_fn) Use the `ServerApp` with a custom main function: @@ -53,23 +71,52 @@ class ServerApp: >>> print("ServerApp running") """ + # pylint: disable=too-many-arguments,too-many-positional-arguments def __init__( self, server: Optional[Server] = None, config: Optional[ServerConfig] = None, strategy: Optional[Strategy] = None, client_manager: Optional[ClientManager] = None, + server_fn: Optional[ServerFn] = None, ) -> None: + if any([server, config, strategy, client_manager]): + warn_deprecated_feature_with_example( + deprecation_message="Passing either `server`, `config`, `strategy` or " + "`client_manager` directly to the ServerApp " + "constructor is deprecated.", + example_message="Pass `ServerApp` arguments wrapped " + "in a `flwr.server.ServerAppComponents` object that gets " + "returned by a function passed as the `server_fn` argument " + "to the `ServerApp` constructor. For example: ", + code_example=SERVER_FN_USAGE_EXAMPLE, + ) + + if server_fn: + raise ValueError( + "Passing `server_fn` is incompatible with passing the " + "other arguments (now deprecated) to ServerApp. " + "Use `server_fn` exclusively." + ) + self._server = server self._config = config self._strategy = strategy self._client_manager = client_manager + self._server_fn = server_fn self._main: Optional[ServerAppCallable] = None def __call__(self, driver: Driver, context: Context) -> None: """Execute `ServerApp`.""" # Compatibility mode if not self._main: + if self._server_fn: + # Execute server_fn() + components = self._server_fn(context) + self._server = components.server + self._config = components.config + self._strategy = components.strategy + self._client_manager = components.client_manager start_driver( server=self._server, config=self._config, @@ -80,7 +127,6 @@ def __call__(self, driver: Driver, context: Context) -> None: return # New execution mode - context = Context(state=RecordSet()) self._main(driver, context) def main(self) -> Callable[[ServerAppCallable], ServerAppCallable]: @@ -106,7 +152,7 @@ def main_decorator(main_fn: ServerAppCallable) -> ServerAppCallable: >>> server_config = ServerConfig(num_rounds=3) >>> strategy = FedAvg() >>> - >>> app = ServerApp() + >>> app = ServerApp( >>> server_config=server_config, >>> strategy=strategy, >>> ) diff --git a/src/py/flwr/server/server_app_test.py b/src/py/flwr/server/server_app_test.py index 38c0d6240d90..b0672b3202ed 100644 --- a/src/py/flwr/server/server_app_test.py +++ b/src/py/flwr/server/server_app_test.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -29,7 +29,7 @@ def test_server_app_custom_mode() -> None: # Prepare app = ServerApp() driver = MagicMock() - context = Context(state=RecordSet()) + context = Context(node_id=0, node_config={}, state=RecordSet(), run_config={}) called = {"called": False} diff --git a/src/py/flwr/server/server_test.py b/src/py/flwr/server/server_test.py index f47b5c3d8469..6e8f423fe115 100644 --- a/src/py/flwr/server/server_test.py +++ b/src/py/flwr/server/server_test.py @@ -19,7 +19,7 @@ import csv import tempfile from pathlib import Path -from typing import List, Optional +from typing import Optional import numpy as np from cryptography.hazmat.primitives.asymmetric import ec @@ -55,7 +55,7 @@ ) from flwr.server.client_manager import SimpleClientManager -from .app import _try_setup_client_authentication +from .app import _try_setup_node_authentication from .client_proxy import ClientProxy from .server import Server, evaluate_clients, fit_clients @@ -143,7 +143,7 @@ def reconnect( def test_fit_clients() -> None: """Test fit_clients.""" # Prepare - clients: List[ClientProxy] = [ + clients: list[ClientProxy] = [ FailingClient("0"), SuccessClient("1"), ] @@ -164,7 +164,7 @@ def test_fit_clients() -> None: def test_eval_clients() -> None: """Test eval_clients.""" # Prepare - clients: List[ClientProxy] = [ + clients: list[ClientProxy] = [ FailingClient("0"), SuccessClient("1"), ] @@ -203,8 +203,8 @@ def test_set_max_workers() -> None: assert server.max_workers == 42 -def test_setup_client_auth() -> None: # pylint: disable=R0914 - """Test setup client authentication.""" +def test_setup_node_auth() -> None: # pylint: disable=R0914 + """Test setup node authentication.""" # Prepare _, first_public_key = generate_key_pairs() private_key, public_key = generate_key_pairs() @@ -220,12 +220,12 @@ def test_setup_client_auth() -> None: # pylint: disable=R0914 # Execute with tempfile.TemporaryDirectory() as temp_dir: # Initialize temporary files - client_keys_file_path = Path(temp_dir) / "client_keys.csv" + node_keys_file_path = Path(temp_dir) / "node_keys.csv" server_private_key_path = Path(temp_dir) / "server_private_key" server_public_key_path = Path(temp_dir) / "server_public_key" # Fill the files with relevant keys - with open(client_keys_file_path, "w", newline="", encoding="utf-8") as csvfile: + with open(node_keys_file_path, "w", newline="", encoding="utf-8") as csvfile: writer = csv.writer(csvfile) writer.writerow( [ @@ -240,15 +240,15 @@ def test_setup_client_auth() -> None: # pylint: disable=R0914 server_public_key_path.write_bytes(server_public_key) server_private_key_path.write_bytes(server_private_key) - # Mock argparse with `require-client-authentication`` flag + # Mock argparse with `require-node-authentication`` flag mock_args = argparse.Namespace( - auth_list_public_keys=str(client_keys_file_path), + auth_list_public_keys=str(node_keys_file_path), auth_superlink_private_key=str(server_private_key_path), auth_superlink_public_key=str(server_public_key_path), ) - # Run _try_setup_client_authentication - result = _try_setup_client_authentication(mock_args, (b"", b"", b"")) + # Run _try_setup_node_authentication + result = _try_setup_node_authentication(mock_args, (b"", b"", b"")) expected_private_key = load_ssh_private_key(server_private_key, None) expected_public_key = load_ssh_public_key(server_public_key) diff --git a/src/py/flwr/server/serverapp_components.py b/src/py/flwr/server/serverapp_components.py new file mode 100644 index 000000000000..315f0a889a61 --- /dev/null +++ b/src/py/flwr/server/serverapp_components.py @@ -0,0 +1,52 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""ServerAppComponents for the ServerApp.""" + + +from dataclasses import dataclass +from typing import Optional + +from .client_manager import ClientManager +from .server import Server +from .server_config import ServerConfig +from .strategy import Strategy + + +@dataclass +class ServerAppComponents: # pylint: disable=too-many-instance-attributes + """Components to construct a ServerApp. + + Parameters + ---------- + server : Optional[Server] (default: None) + A server implementation, either `flwr.server.Server` or a subclass + thereof. If no instance is provided, one will be created internally. + config : Optional[ServerConfig] (default: None) + Currently supported values are `num_rounds` (int, default: 1) and + `round_timeout` in seconds (float, default: None). + strategy : Optional[Strategy] (default: None) + An implementation of the abstract base class + `flwr.server.strategy.Strategy`. If no strategy is provided, then + `flwr.server.strategy.FedAvg` will be used. + client_manager : Optional[ClientManager] (default: None) + An implementation of the class `flwr.server.ClientManager`. If no + implementation is provided, then `flwr.server.SimpleClientManager` + will be used. + """ + + server: Optional[Server] = None + config: Optional[ServerConfig] = None + strategy: Optional[Strategy] = None + client_manager: Optional[ClientManager] = None diff --git a/src/py/flwr/server/strategy/__init__.py b/src/py/flwr/server/strategy/__init__.py index b7de9a946fff..e5bc30009819 100644 --- a/src/py/flwr/server/strategy/__init__.py +++ b/src/py/flwr/server/strategy/__init__.py @@ -53,9 +53,10 @@ "DPFedAvgAdaptive", "DPFedAvgFixed", "DifferentialPrivacyClientSideAdaptiveClipping", - "DifferentialPrivacyServerSideAdaptiveClipping", "DifferentialPrivacyClientSideFixedClipping", + "DifferentialPrivacyServerSideAdaptiveClipping", "DifferentialPrivacyServerSideFixedClipping", + "FaultTolerantFedAvg", "FedAdagrad", "FedAdam", "FedAvg", @@ -69,7 +70,6 @@ "FedXgbCyclic", "FedXgbNnAvg", "FedYogi", - "FaultTolerantFedAvg", "Krum", "QFedAvg", "Strategy", diff --git a/src/py/flwr/server/strategy/aggregate.py b/src/py/flwr/server/strategy/aggregate.py index c668b55eebe6..94beacba0087 100644 --- a/src/py/flwr/server/strategy/aggregate.py +++ b/src/py/flwr/server/strategy/aggregate.py @@ -15,8 +15,8 @@ """Aggregation functions for strategy implementations.""" # mypy: disallow_untyped_calls=False -from functools import reduce -from typing import Any, Callable, List, Tuple +from functools import partial, reduce +from typing import Any, Callable, Union import numpy as np @@ -24,7 +24,7 @@ from flwr.server.client_proxy import ClientProxy -def aggregate(results: List[Tuple[NDArrays, int]]) -> NDArrays: +def aggregate(results: list[tuple[NDArrays, int]]) -> NDArrays: """Compute weighted average.""" # Calculate the total number of examples used during training num_examples_total = sum(num_examples for (_, num_examples) in results) @@ -42,7 +42,7 @@ def aggregate(results: List[Tuple[NDArrays, int]]) -> NDArrays: return weights_prime -def aggregate_inplace(results: List[Tuple[ClientProxy, FitRes]]) -> NDArrays: +def aggregate_inplace(results: list[tuple[ClientProxy, FitRes]]) -> NDArrays: """Compute in-place weighted average.""" # Count total examples num_examples_total = sum(fit_res.num_examples for (_, fit_res) in results) @@ -52,22 +52,36 @@ def aggregate_inplace(results: List[Tuple[ClientProxy, FitRes]]) -> NDArrays: fit_res.num_examples / num_examples_total for _, fit_res in results ] + def _try_inplace( + x: NDArray, y: Union[NDArray, float], np_binary_op: np.ufunc + ) -> NDArray: + return ( # type: ignore[no-any-return] + np_binary_op(x, y, out=x) + if np.can_cast(y, x.dtype, casting="same_kind") + else np_binary_op(x, np.array(y, x.dtype), out=x) + ) + # Let's do in-place aggregation # Get first result, then add up each other params = [ - scaling_factors[0] * x for x in parameters_to_ndarrays(results[0][1].parameters) + _try_inplace(x, scaling_factors[0], np_binary_op=np.multiply) + for x in parameters_to_ndarrays(results[0][1].parameters) ] - for i, (_, fit_res) in enumerate(results[1:]): + + for i, (_, fit_res) in enumerate(results[1:], start=1): res = ( - scaling_factors[i + 1] * x + _try_inplace(x, scaling_factors[i], np_binary_op=np.multiply) for x in parameters_to_ndarrays(fit_res.parameters) ) - params = [reduce(np.add, layer_updates) for layer_updates in zip(params, res)] + params = [ + reduce(partial(_try_inplace, np_binary_op=np.add), layer_updates) + for layer_updates in zip(params, res) + ] return params -def aggregate_median(results: List[Tuple[NDArrays, int]]) -> NDArrays: +def aggregate_median(results: list[tuple[NDArrays, int]]) -> NDArrays: """Compute median.""" # Create a list of weights and ignore the number of examples weights = [weights for weights, _ in results] @@ -80,7 +94,7 @@ def aggregate_median(results: List[Tuple[NDArrays, int]]) -> NDArrays: def aggregate_krum( - results: List[Tuple[NDArrays, int]], num_malicious: int, to_keep: int + results: list[tuple[NDArrays, int]], num_malicious: int, to_keep: int ) -> NDArrays: """Choose one parameter vector according to the Krum function. @@ -119,7 +133,7 @@ def aggregate_krum( # pylint: disable=too-many-locals def aggregate_bulyan( - results: List[Tuple[NDArrays, int]], + results: list[tuple[NDArrays, int]], num_malicious: int, aggregation_rule: Callable, # type: ignore **aggregation_rule_kwargs: Any, @@ -128,7 +142,7 @@ def aggregate_bulyan( Parameters ---------- - results: List[Tuple[NDArrays, int]] + results: list[tuple[NDArrays, int]] Weights and number of samples for each of the client. num_malicious: int The maximum number of malicious clients. @@ -155,7 +169,7 @@ def aggregate_bulyan( "It is needed to ensure that the method reduces the attacker's leeway to " "the one proved in the paper." ) - selected_models_set: List[Tuple[NDArrays, int]] = [] + selected_models_set: list[tuple[NDArrays, int]] = [] theta = len(results) - 2 * num_malicious beta = theta - 2 * num_malicious @@ -200,7 +214,7 @@ def aggregate_bulyan( return parameters_aggregated -def weighted_loss_avg(results: List[Tuple[int, float]]) -> float: +def weighted_loss_avg(results: list[tuple[int, float]]) -> float: """Aggregate evaluation results obtained from multiple clients.""" num_total_evaluation_examples = sum(num_examples for (num_examples, _) in results) weighted_losses = [num_examples * loss for num_examples, loss in results] @@ -208,7 +222,7 @@ def weighted_loss_avg(results: List[Tuple[int, float]]) -> float: def aggregate_qffl( - parameters: NDArrays, deltas: List[NDArrays], hs_fll: List[NDArrays] + parameters: NDArrays, deltas: list[NDArrays], hs_fll: list[NDArrays] ) -> NDArrays: """Compute weighted average based on Q-FFL paper.""" demominator: float = np.sum(np.asarray(hs_fll)) @@ -225,7 +239,7 @@ def aggregate_qffl( return new_parameters -def _compute_distances(weights: List[NDArrays]) -> NDArray: +def _compute_distances(weights: list[NDArrays]) -> NDArray: """Compute distances between vectors. Input: weights - list of weights vectors @@ -265,7 +279,7 @@ def _trim_mean(array: NDArray, proportiontocut: float) -> NDArray: def aggregate_trimmed_avg( - results: List[Tuple[NDArrays, int]], proportiontocut: float + results: list[tuple[NDArrays, int]], proportiontocut: float ) -> NDArrays: """Compute trimmed average.""" # Create a list of weights and ignore the number of examples @@ -290,7 +304,7 @@ def _check_weights_equality(weights1: NDArrays, weights2: NDArrays) -> bool: def _find_reference_weights( - reference_weights: NDArrays, list_of_weights: List[NDArrays] + reference_weights: NDArrays, list_of_weights: list[NDArrays] ) -> int: """Find the reference weights by looping through the `list_of_weights`. @@ -320,7 +334,7 @@ def _find_reference_weights( def _aggregate_n_closest_weights( - reference_weights: NDArrays, results: List[Tuple[NDArrays, int]], beta_closest: int + reference_weights: NDArrays, results: list[tuple[NDArrays, int]], beta_closest: int ) -> NDArrays: """Calculate element-wise mean of the `N` closest values. @@ -332,7 +346,7 @@ def _aggregate_n_closest_weights( ---------- reference_weights: NDArrays The weights from which the distances will be computed - results: List[Tuple[NDArrays, int]] + results: list[tuple[NDArrays, int]] The weights from models beta_closest: int The number of the closest distance weights that will be averaged diff --git a/src/py/flwr/server/strategy/aggregate_test.py b/src/py/flwr/server/strategy/aggregate_test.py index f8b4e3c03b50..9f9dba79ec7c 100644 --- a/src/py/flwr/server/strategy/aggregate_test.py +++ b/src/py/flwr/server/strategy/aggregate_test.py @@ -15,8 +15,6 @@ """Aggregation function tests.""" -from typing import List, Tuple - import numpy as np from .aggregate import ( @@ -49,7 +47,7 @@ def test_aggregate() -> None: def test_weighted_loss_avg_single_value() -> None: """Test weighted loss averaging.""" # Prepare - results: List[Tuple[int, float]] = [(5, 0.5)] + results: list[tuple[int, float]] = [(5, 0.5)] expected = 0.5 # Execute @@ -62,7 +60,7 @@ def test_weighted_loss_avg_single_value() -> None: def test_weighted_loss_avg_multiple_values() -> None: """Test weighted loss averaging.""" # Prepare - results: List[Tuple[int, float]] = [(1, 2.0), (2, 1.0), (1, 2.0)] + results: list[tuple[int, float]] = [(1, 2.0), (2, 1.0), (1, 2.0)] expected = 1.5 # Execute diff --git a/src/py/flwr/server/strategy/bulyan.py b/src/py/flwr/server/strategy/bulyan.py index 1e4f97530ab7..84a261237ac5 100644 --- a/src/py/flwr/server/strategy/bulyan.py +++ b/src/py/flwr/server/strategy/bulyan.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2023 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -19,7 +19,7 @@ from logging import WARNING -from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from typing import Any, Callable, Optional, Union from flwr.common import ( FitRes, @@ -86,12 +86,12 @@ def __init__( num_malicious_clients: int = 0, evaluate_fn: Optional[ Callable[ - [int, NDArrays, Dict[str, Scalar]], - Optional[Tuple[float, Dict[str, Scalar]]], + [int, NDArrays, dict[str, Scalar]], + Optional[tuple[float, dict[str, Scalar]]], ] ] = None, - on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, - on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, + on_fit_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, + on_evaluate_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, accept_failures: bool = True, initial_parameters: Optional[Parameters] = None, fit_metrics_aggregation_fn: Optional[MetricsAggregationFn] = None, @@ -125,9 +125,9 @@ def __repr__(self) -> str: def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate fit results using Bulyan.""" if not results: return None, {} diff --git a/src/py/flwr/server/strategy/bulyan_test.py b/src/py/flwr/server/strategy/bulyan_test.py index 299ed49066fb..f5b7282fed2c 100644 --- a/src/py/flwr/server/strategy/bulyan_test.py +++ b/src/py/flwr/server/strategy/bulyan_test.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2023 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,7 +15,6 @@ """Bulyan tests.""" -from typing import List, Tuple from unittest.mock import MagicMock from numpy import array, float32 @@ -62,7 +61,7 @@ def test_aggregate_fit() -> None: param_5: Parameters = ndarrays_to_parameters( [array([0.1, 0.1, 0.1, 0.1], dtype=float32)] ) - results: List[Tuple[ClientProxy, FitRes]] = [ + results: list[tuple[ClientProxy, FitRes]] = [ ( MagicMock(), FitRes( @@ -125,7 +124,7 @@ def test_aggregate_fit() -> None: actual_aggregated, _ = strategy.aggregate_fit( server_round=1, results=results, failures=[] ) - if actual_aggregated: - actual_list = parameters_to_ndarrays(actual_aggregated) - actual = actual_list[0] + assert actual_aggregated + actual_list = parameters_to_ndarrays(actual_aggregated) + actual = actual_list[0] assert (actual == expected[0]).all() diff --git a/src/py/flwr/server/strategy/dp_adaptive_clipping.py b/src/py/flwr/server/strategy/dp_adaptive_clipping.py index b25e1efdf0e9..c64091091c51 100644 --- a/src/py/flwr/server/strategy/dp_adaptive_clipping.py +++ b/src/py/flwr/server/strategy/dp_adaptive_clipping.py @@ -20,7 +20,7 @@ import math from logging import INFO, WARNING -from typing import Dict, List, Optional, Tuple, Union +from typing import Optional, Union import numpy as np @@ -88,7 +88,7 @@ class DifferentialPrivacyServerSideAdaptiveClipping(Strategy): >>> ) """ - # pylint: disable=too-many-arguments,too-many-instance-attributes + # pylint: disable=too-many-arguments,too-many-instance-attributes,too-many-positional-arguments def __init__( self, strategy: Strategy, @@ -156,14 +156,14 @@ def initialize_parameters( def configure_fit( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: + ) -> list[tuple[ClientProxy, FitIns]]: """Configure the next round of training.""" self.current_round_params = parameters_to_ndarrays(parameters) return self.strategy.configure_fit(server_round, parameters, client_manager) def configure_evaluate( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, EvaluateIns]]: + ) -> list[tuple[ClientProxy, EvaluateIns]]: """Configure the next round of evaluation.""" return self.strategy.configure_evaluate( server_round, parameters, client_manager @@ -172,9 +172,9 @@ def configure_evaluate( def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate training results and update clip norms.""" if failures: return None, {} @@ -245,15 +245,15 @@ def aggregate_fit( def aggregate_evaluate( self, server_round: int, - results: List[Tuple[ClientProxy, EvaluateRes]], - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], - ) -> Tuple[Optional[float], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, EvaluateRes]], + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]], + ) -> tuple[Optional[float], dict[str, Scalar]]: """Aggregate evaluation losses using the given strategy.""" return self.strategy.aggregate_evaluate(server_round, results, failures) def evaluate( self, server_round: int, parameters: Parameters - ) -> Optional[Tuple[float, Dict[str, Scalar]]]: + ) -> Optional[tuple[float, dict[str, Scalar]]]: """Evaluate model parameters using an evaluation function from the strategy.""" return self.strategy.evaluate(server_round, parameters) @@ -307,7 +307,7 @@ class DifferentialPrivacyClientSideAdaptiveClipping(Strategy): >>> ) """ - # pylint: disable=too-many-arguments,too-many-instance-attributes + # pylint: disable=too-many-arguments,too-many-instance-attributes,too-many-positional-arguments def __init__( self, strategy: Strategy, @@ -372,7 +372,7 @@ def initialize_parameters( def configure_fit( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: + ) -> list[tuple[ClientProxy, FitIns]]: """Configure the next round of training.""" additional_config = {KEY_CLIPPING_NORM: self.clipping_norm} inner_strategy_config_result = self.strategy.configure_fit( @@ -385,7 +385,7 @@ def configure_fit( def configure_evaluate( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, EvaluateIns]]: + ) -> list[tuple[ClientProxy, EvaluateIns]]: """Configure the next round of evaluation.""" return self.strategy.configure_evaluate( server_round, parameters, client_manager @@ -394,9 +394,9 @@ def configure_evaluate( def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate training results and update clip norms.""" if failures: return None, {} @@ -432,7 +432,7 @@ def aggregate_fit( return aggregated_params, metrics - def _update_clip_norm(self, results: List[Tuple[ClientProxy, FitRes]]) -> None: + def _update_clip_norm(self, results: list[tuple[ClientProxy, FitRes]]) -> None: # Calculate the number of clients which set the norm indicator bit norm_bit_set_count = 0 for client_proxy, fit_res in results: @@ -457,14 +457,14 @@ def _update_clip_norm(self, results: List[Tuple[ClientProxy, FitRes]]) -> None: def aggregate_evaluate( self, server_round: int, - results: List[Tuple[ClientProxy, EvaluateRes]], - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], - ) -> Tuple[Optional[float], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, EvaluateRes]], + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]], + ) -> tuple[Optional[float], dict[str, Scalar]]: """Aggregate evaluation losses using the given strategy.""" return self.strategy.aggregate_evaluate(server_round, results, failures) def evaluate( self, server_round: int, parameters: Parameters - ) -> Optional[Tuple[float, Dict[str, Scalar]]]: + ) -> Optional[tuple[float, dict[str, Scalar]]]: """Evaluate model parameters using an evaluation function from the strategy.""" return self.strategy.evaluate(server_round, parameters) diff --git a/src/py/flwr/server/strategy/dp_fixed_clipping.py b/src/py/flwr/server/strategy/dp_fixed_clipping.py index 92b2845fd846..2ca253c96370 100644 --- a/src/py/flwr/server/strategy/dp_fixed_clipping.py +++ b/src/py/flwr/server/strategy/dp_fixed_clipping.py @@ -19,7 +19,7 @@ from logging import INFO, WARNING -from typing import Dict, List, Optional, Tuple, Union +from typing import Optional, Union from flwr.common import ( EvaluateIns, @@ -117,14 +117,14 @@ def initialize_parameters( def configure_fit( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: + ) -> list[tuple[ClientProxy, FitIns]]: """Configure the next round of training.""" self.current_round_params = parameters_to_ndarrays(parameters) return self.strategy.configure_fit(server_round, parameters, client_manager) def configure_evaluate( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, EvaluateIns]]: + ) -> list[tuple[ClientProxy, EvaluateIns]]: """Configure the next round of evaluation.""" return self.strategy.configure_evaluate( server_round, parameters, client_manager @@ -133,9 +133,9 @@ def configure_evaluate( def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Compute the updates, clip, and pass them for aggregation. Afterward, add noise to the aggregated parameters. @@ -191,15 +191,15 @@ def aggregate_fit( def aggregate_evaluate( self, server_round: int, - results: List[Tuple[ClientProxy, EvaluateRes]], - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], - ) -> Tuple[Optional[float], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, EvaluateRes]], + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]], + ) -> tuple[Optional[float], dict[str, Scalar]]: """Aggregate evaluation losses using the given strategy.""" return self.strategy.aggregate_evaluate(server_round, results, failures) def evaluate( self, server_round: int, parameters: Parameters - ) -> Optional[Tuple[float, Dict[str, Scalar]]]: + ) -> Optional[tuple[float, dict[str, Scalar]]]: """Evaluate model parameters using an evaluation function from the strategy.""" return self.strategy.evaluate(server_round, parameters) @@ -285,7 +285,7 @@ def initialize_parameters( def configure_fit( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: + ) -> list[tuple[ClientProxy, FitIns]]: """Configure the next round of training.""" additional_config = {KEY_CLIPPING_NORM: self.clipping_norm} inner_strategy_config_result = self.strategy.configure_fit( @@ -298,7 +298,7 @@ def configure_fit( def configure_evaluate( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, EvaluateIns]]: + ) -> list[tuple[ClientProxy, EvaluateIns]]: """Configure the next round of evaluation.""" return self.strategy.configure_evaluate( server_round, parameters, client_manager @@ -307,9 +307,9 @@ def configure_evaluate( def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Add noise to the aggregated parameters.""" if failures: return None, {} @@ -348,14 +348,14 @@ def aggregate_fit( def aggregate_evaluate( self, server_round: int, - results: List[Tuple[ClientProxy, EvaluateRes]], - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], - ) -> Tuple[Optional[float], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, EvaluateRes]], + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]], + ) -> tuple[Optional[float], dict[str, Scalar]]: """Aggregate evaluation losses using the given strategy.""" return self.strategy.aggregate_evaluate(server_round, results, failures) def evaluate( self, server_round: int, parameters: Parameters - ) -> Optional[Tuple[float, Dict[str, Scalar]]]: + ) -> Optional[tuple[float, dict[str, Scalar]]]: """Evaluate model parameters using an evaluation function from the strategy.""" return self.strategy.evaluate(server_round, parameters) diff --git a/src/py/flwr/server/strategy/dpfedavg_adaptive.py b/src/py/flwr/server/strategy/dpfedavg_adaptive.py index a908679ed668..170c9d619a7d 100644 --- a/src/py/flwr/server/strategy/dpfedavg_adaptive.py +++ b/src/py/flwr/server/strategy/dpfedavg_adaptive.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2022 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -19,7 +19,7 @@ import math -from typing import Dict, List, Optional, Tuple, Union +from typing import Optional, Union import numpy as np @@ -39,7 +39,7 @@ class DPFedAvgAdaptive(DPFedAvgFixed): This class is deprecated and will be removed in a future release. """ - # pylint: disable=too-many-arguments,too-many-instance-attributes + # pylint: disable=too-many-arguments,too-many-instance-attributes,too-many-positional-arguments def __init__( self, strategy: Strategy, @@ -80,7 +80,7 @@ def __repr__(self) -> str: def configure_fit( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: + ) -> list[tuple[ClientProxy, FitIns]]: """Configure the next round of training.""" additional_config = {"dpfedavg_adaptive_clip_enabled": True} @@ -93,7 +93,7 @@ def configure_fit( return client_instructions - def _update_clip_norm(self, results: List[Tuple[ClientProxy, FitRes]]) -> None: + def _update_clip_norm(self, results: list[tuple[ClientProxy, FitRes]]) -> None: # Calculating number of clients which set the norm indicator bit norm_bit_set_count = 0 for client_proxy, fit_res in results: @@ -118,9 +118,9 @@ def _update_clip_norm(self, results: List[Tuple[ClientProxy, FitRes]]) -> None: def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate training results as in DPFedAvgFixed and update clip norms.""" if failures: return None, {} diff --git a/src/py/flwr/server/strategy/dpfedavg_fixed.py b/src/py/flwr/server/strategy/dpfedavg_fixed.py index c54379fc7087..60f8c16f8e6d 100644 --- a/src/py/flwr/server/strategy/dpfedavg_fixed.py +++ b/src/py/flwr/server/strategy/dpfedavg_fixed.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2022 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,7 +17,7 @@ Paper: arxiv.org/pdf/1710.06963.pdf """ -from typing import Dict, List, Optional, Tuple, Union +from typing import Optional, Union from flwr.common import EvaluateIns, EvaluateRes, FitIns, FitRes, Parameters, Scalar from flwr.common.dp import add_gaussian_noise @@ -36,7 +36,7 @@ class DPFedAvgFixed(Strategy): This class is deprecated and will be removed in a future release. """ - # pylint: disable=too-many-arguments,too-many-instance-attributes + # pylint: disable=too-many-arguments,too-many-instance-attributes,too-many-positional-arguments def __init__( self, strategy: Strategy, @@ -79,7 +79,7 @@ def initialize_parameters( def configure_fit( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: + ) -> list[tuple[ClientProxy, FitIns]]: """Configure the next round of training incorporating Differential Privacy (DP). Configuration of the next training round includes information related to DP, @@ -119,7 +119,7 @@ def configure_fit( def configure_evaluate( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, EvaluateIns]]: + ) -> list[tuple[ClientProxy, EvaluateIns]]: """Configure the next round of evaluation using the specified strategy. Parameters @@ -147,9 +147,9 @@ def configure_evaluate( def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate training results using unweighted aggregation.""" if failures: return None, {} @@ -168,14 +168,14 @@ def aggregate_fit( def aggregate_evaluate( self, server_round: int, - results: List[Tuple[ClientProxy, EvaluateRes]], - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], - ) -> Tuple[Optional[float], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, EvaluateRes]], + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]], + ) -> tuple[Optional[float], dict[str, Scalar]]: """Aggregate evaluation losses using the given strategy.""" return self.strategy.aggregate_evaluate(server_round, results, failures) def evaluate( self, server_round: int, parameters: Parameters - ) -> Optional[Tuple[float, Dict[str, Scalar]]]: + ) -> Optional[tuple[float, dict[str, Scalar]]]: """Evaluate model parameters using an evaluation function from the strategy.""" return self.strategy.evaluate(server_round, parameters) diff --git a/src/py/flwr/server/strategy/fault_tolerant_fedavg.py b/src/py/flwr/server/strategy/fault_tolerant_fedavg.py index 663ac8872c39..60213db2efeb 100644 --- a/src/py/flwr/server/strategy/fault_tolerant_fedavg.py +++ b/src/py/flwr/server/strategy/fault_tolerant_fedavg.py @@ -16,7 +16,7 @@ from logging import WARNING -from typing import Callable, Dict, List, Optional, Tuple, Union +from typing import Callable, Optional, Union from flwr.common import ( EvaluateRes, @@ -49,12 +49,12 @@ def __init__( min_available_clients: int = 1, evaluate_fn: Optional[ Callable[ - [int, NDArrays, Dict[str, Scalar]], - Optional[Tuple[float, Dict[str, Scalar]]], + [int, NDArrays, dict[str, Scalar]], + Optional[tuple[float, dict[str, Scalar]]], ] ] = None, - on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, - on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, + on_fit_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, + on_evaluate_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, min_completion_rate_fit: float = 0.5, min_completion_rate_evaluate: float = 0.5, initial_parameters: Optional[Parameters] = None, @@ -85,9 +85,9 @@ def __repr__(self) -> str: def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate fit results using weighted average.""" if not results: return None, {} @@ -117,9 +117,9 @@ def aggregate_fit( def aggregate_evaluate( self, server_round: int, - results: List[Tuple[ClientProxy, EvaluateRes]], - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], - ) -> Tuple[Optional[float], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, EvaluateRes]], + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]], + ) -> tuple[Optional[float], dict[str, Scalar]]: """Aggregate evaluation losses using weighted average.""" if not results: return None, {} diff --git a/src/py/flwr/server/strategy/fault_tolerant_fedavg_test.py b/src/py/flwr/server/strategy/fault_tolerant_fedavg_test.py index 98f4cac032cb..a01a3a5c0ad5 100644 --- a/src/py/flwr/server/strategy/fault_tolerant_fedavg_test.py +++ b/src/py/flwr/server/strategy/fault_tolerant_fedavg_test.py @@ -15,7 +15,7 @@ """FaultTolerantFedAvg tests.""" -from typing import List, Optional, Tuple, Union +from typing import Optional, Union from unittest.mock import MagicMock from flwr.common import ( @@ -36,8 +36,8 @@ def test_aggregate_fit_no_results_no_failures() -> None: """Test evaluate function.""" # Prepare strategy = FaultTolerantFedAvg(min_completion_rate_fit=0.1) - results: List[Tuple[ClientProxy, FitRes]] = [] - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]] = [] + results: list[tuple[ClientProxy, FitRes]] = [] + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]] = [] expected: Optional[Parameters] = None # Execute @@ -51,8 +51,8 @@ def test_aggregate_fit_no_results() -> None: """Test evaluate function.""" # Prepare strategy = FaultTolerantFedAvg(min_completion_rate_fit=0.1) - results: List[Tuple[ClientProxy, FitRes]] = [] - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]] = [Exception()] + results: list[tuple[ClientProxy, FitRes]] = [] + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]] = [Exception()] expected: Optional[Parameters] = None # Execute @@ -66,7 +66,7 @@ def test_aggregate_fit_not_enough_results() -> None: """Test evaluate function.""" # Prepare strategy = FaultTolerantFedAvg(min_completion_rate_fit=0.5) - results: List[Tuple[ClientProxy, FitRes]] = [ + results: list[tuple[ClientProxy, FitRes]] = [ ( MagicMock(), FitRes( @@ -77,7 +77,7 @@ def test_aggregate_fit_not_enough_results() -> None: ), ) ] - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]] = [ + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]] = [ Exception(), Exception(), ] @@ -94,7 +94,7 @@ def test_aggregate_fit_just_enough_results() -> None: """Test evaluate function.""" # Prepare strategy = FaultTolerantFedAvg(min_completion_rate_fit=0.5) - results: List[Tuple[ClientProxy, FitRes]] = [ + results: list[tuple[ClientProxy, FitRes]] = [ ( MagicMock(), FitRes( @@ -105,7 +105,7 @@ def test_aggregate_fit_just_enough_results() -> None: ), ) ] - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]] = [Exception()] + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]] = [Exception()] expected: Optional[NDArrays] = [] # Execute @@ -120,7 +120,7 @@ def test_aggregate_fit_no_failures() -> None: """Test evaluate function.""" # Prepare strategy = FaultTolerantFedAvg(min_completion_rate_fit=0.99) - results: List[Tuple[ClientProxy, FitRes]] = [ + results: list[tuple[ClientProxy, FitRes]] = [ ( MagicMock(), FitRes( @@ -131,7 +131,7 @@ def test_aggregate_fit_no_failures() -> None: ), ) ] - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]] = [] + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]] = [] expected: Optional[NDArrays] = [] # Execute @@ -146,8 +146,8 @@ def test_aggregate_evaluate_no_results_no_failures() -> None: """Test evaluate function.""" # Prepare strategy = FaultTolerantFedAvg(min_completion_rate_evaluate=0.1) - results: List[Tuple[ClientProxy, EvaluateRes]] = [] - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]] = [] + results: list[tuple[ClientProxy, EvaluateRes]] = [] + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]] = [] expected: Optional[float] = None # Execute @@ -161,8 +161,8 @@ def test_aggregate_evaluate_no_results() -> None: """Test evaluate function.""" # Prepare strategy = FaultTolerantFedAvg(min_completion_rate_evaluate=0.1) - results: List[Tuple[ClientProxy, EvaluateRes]] = [] - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]] = [ + results: list[tuple[ClientProxy, EvaluateRes]] = [] + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]] = [ Exception() ] expected: Optional[float] = None @@ -178,7 +178,7 @@ def test_aggregate_evaluate_not_enough_results() -> None: """Test evaluate function.""" # Prepare strategy = FaultTolerantFedAvg(min_completion_rate_evaluate=0.5) - results: List[Tuple[ClientProxy, EvaluateRes]] = [ + results: list[tuple[ClientProxy, EvaluateRes]] = [ ( MagicMock(), EvaluateRes( @@ -189,7 +189,7 @@ def test_aggregate_evaluate_not_enough_results() -> None: ), ) ] - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]] = [ + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]] = [ Exception(), Exception(), ] @@ -206,7 +206,7 @@ def test_aggregate_evaluate_just_enough_results() -> None: """Test evaluate function.""" # Prepare strategy = FaultTolerantFedAvg(min_completion_rate_evaluate=0.5) - results: List[Tuple[ClientProxy, EvaluateRes]] = [ + results: list[tuple[ClientProxy, EvaluateRes]] = [ ( MagicMock(), EvaluateRes( @@ -217,7 +217,7 @@ def test_aggregate_evaluate_just_enough_results() -> None: ), ) ] - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]] = [ + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]] = [ Exception() ] expected: Optional[float] = 2.3 @@ -233,7 +233,7 @@ def test_aggregate_evaluate_no_failures() -> None: """Test evaluate function.""" # Prepare strategy = FaultTolerantFedAvg(min_completion_rate_evaluate=0.99) - results: List[Tuple[ClientProxy, EvaluateRes]] = [ + results: list[tuple[ClientProxy, EvaluateRes]] = [ ( MagicMock(), EvaluateRes( @@ -244,7 +244,7 @@ def test_aggregate_evaluate_no_failures() -> None: ), ) ] - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]] = [] + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]] = [] expected: Optional[float] = 2.3 # Execute diff --git a/src/py/flwr/server/strategy/fedadagrad.py b/src/py/flwr/server/strategy/fedadagrad.py index 4a8f52d98e18..75befdd0e796 100644 --- a/src/py/flwr/server/strategy/fedadagrad.py +++ b/src/py/flwr/server/strategy/fedadagrad.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2021 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -20,7 +20,7 @@ """ -from typing import Callable, Dict, List, Optional, Tuple, Union +from typing import Callable, Optional, Union import numpy as np @@ -89,12 +89,12 @@ def __init__( min_available_clients: int = 2, evaluate_fn: Optional[ Callable[ - [int, NDArrays, Dict[str, Scalar]], - Optional[Tuple[float, Dict[str, Scalar]]], + [int, NDArrays, dict[str, Scalar]], + Optional[tuple[float, dict[str, Scalar]]], ] ] = None, - on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, - on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, + on_fit_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, + on_evaluate_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, fit_metrics_aggregation_fn: Optional[MetricsAggregationFn] = None, evaluate_metrics_aggregation_fn: Optional[MetricsAggregationFn] = None, accept_failures: bool = True, @@ -131,9 +131,9 @@ def __repr__(self) -> str: def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate fit results using weighted average.""" fedavg_parameters_aggregated, metrics_aggregated = super().aggregate_fit( server_round=server_round, results=results, failures=failures diff --git a/src/py/flwr/server/strategy/fedadagrad_test.py b/src/py/flwr/server/strategy/fedadagrad_test.py index 0c966442ecaf..6ac217b021b4 100644 --- a/src/py/flwr/server/strategy/fedadagrad_test.py +++ b/src/py/flwr/server/strategy/fedadagrad_test.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2021 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,7 +15,6 @@ """FedAdagrad tests.""" -from typing import List, Tuple from unittest.mock import MagicMock from numpy import array, float32 @@ -54,7 +53,7 @@ def test_aggregate_fit() -> None: bridge = MagicMock() client_0 = GrpcClientProxy(cid="0", bridge=bridge) client_1 = GrpcClientProxy(cid="1", bridge=bridge) - results: List[Tuple[ClientProxy, FitRes]] = [ + results: list[tuple[ClientProxy, FitRes]] = [ ( client_0, FitRes( @@ -80,7 +79,7 @@ def test_aggregate_fit() -> None: actual_aggregated, _ = strategy.aggregate_fit( server_round=1, results=results, failures=[] ) - if actual_aggregated: - actual_list = parameters_to_ndarrays(actual_aggregated) - actual = actual_list[0] + assert actual_aggregated + actual_list = parameters_to_ndarrays(actual_aggregated) + actual = actual_list[0] assert (actual == expected[0]).all() diff --git a/src/py/flwr/server/strategy/fedadam.py b/src/py/flwr/server/strategy/fedadam.py index 8a47cf0dd8ac..d0f87a43f79b 100644 --- a/src/py/flwr/server/strategy/fedadam.py +++ b/src/py/flwr/server/strategy/fedadam.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2021 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -20,7 +20,7 @@ """ -from typing import Callable, Dict, List, Optional, Tuple, Union +from typing import Callable, Optional, Union import numpy as np @@ -93,12 +93,12 @@ def __init__( min_available_clients: int = 2, evaluate_fn: Optional[ Callable[ - [int, NDArrays, Dict[str, Scalar]], - Optional[Tuple[float, Dict[str, Scalar]]], + [int, NDArrays, dict[str, Scalar]], + Optional[tuple[float, dict[str, Scalar]]], ] ] = None, - on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, - on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, + on_fit_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, + on_evaluate_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, accept_failures: bool = True, initial_parameters: Parameters, fit_metrics_aggregation_fn: Optional[MetricsAggregationFn] = None, @@ -137,9 +137,9 @@ def __repr__(self) -> str: def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate fit results using weighted average.""" fedavg_parameters_aggregated, metrics_aggregated = super().aggregate_fit( server_round=server_round, results=results, failures=failures diff --git a/src/py/flwr/server/strategy/fedavg.py b/src/py/flwr/server/strategy/fedavg.py index 3b9b2640c2b5..2d0b855c3186 100644 --- a/src/py/flwr/server/strategy/fedavg.py +++ b/src/py/flwr/server/strategy/fedavg.py @@ -19,7 +19,7 @@ from logging import WARNING -from typing import Callable, Dict, List, Optional, Tuple, Union +from typing import Callable, Optional, Union from flwr.common import ( EvaluateIns, @@ -99,12 +99,12 @@ def __init__( min_available_clients: int = 2, evaluate_fn: Optional[ Callable[ - [int, NDArrays, Dict[str, Scalar]], - Optional[Tuple[float, Dict[str, Scalar]]], + [int, NDArrays, dict[str, Scalar]], + Optional[tuple[float, dict[str, Scalar]]], ] ] = None, - on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, - on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, + on_fit_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, + on_evaluate_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, accept_failures: bool = True, initial_parameters: Optional[Parameters] = None, fit_metrics_aggregation_fn: Optional[MetricsAggregationFn] = None, @@ -138,12 +138,12 @@ def __repr__(self) -> str: rep = f"FedAvg(accept_failures={self.accept_failures})" return rep - def num_fit_clients(self, num_available_clients: int) -> Tuple[int, int]: + def num_fit_clients(self, num_available_clients: int) -> tuple[int, int]: """Return the sample size and the required number of available clients.""" num_clients = int(num_available_clients * self.fraction_fit) return max(num_clients, self.min_fit_clients), self.min_available_clients - def num_evaluation_clients(self, num_available_clients: int) -> Tuple[int, int]: + def num_evaluation_clients(self, num_available_clients: int) -> tuple[int, int]: """Use a fraction of available clients for evaluation.""" num_clients = int(num_available_clients * self.fraction_evaluate) return max(num_clients, self.min_evaluate_clients), self.min_available_clients @@ -158,7 +158,7 @@ def initialize_parameters( def evaluate( self, server_round: int, parameters: Parameters - ) -> Optional[Tuple[float, Dict[str, Scalar]]]: + ) -> Optional[tuple[float, dict[str, Scalar]]]: """Evaluate model parameters using an evaluation function.""" if self.evaluate_fn is None: # No evaluation function provided @@ -172,7 +172,7 @@ def evaluate( def configure_fit( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: + ) -> list[tuple[ClientProxy, FitIns]]: """Configure the next round of training.""" config = {} if self.on_fit_config_fn is not None: @@ -193,7 +193,7 @@ def configure_fit( def configure_evaluate( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, EvaluateIns]]: + ) -> list[tuple[ClientProxy, EvaluateIns]]: """Configure the next round of evaluation.""" # Do not configure federated evaluation if fraction eval is 0. if self.fraction_evaluate == 0.0: @@ -220,9 +220,9 @@ def configure_evaluate( def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate fit results using weighted average.""" if not results: return None, {} @@ -256,9 +256,9 @@ def aggregate_fit( def aggregate_evaluate( self, server_round: int, - results: List[Tuple[ClientProxy, EvaluateRes]], - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], - ) -> Tuple[Optional[float], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, EvaluateRes]], + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]], + ) -> tuple[Optional[float], dict[str, Scalar]]: """Aggregate evaluation losses using weighted average.""" if not results: return None, {} diff --git a/src/py/flwr/server/strategy/fedavg_android.py b/src/py/flwr/server/strategy/fedavg_android.py index 6678b7ced114..bcecf8efb504 100644 --- a/src/py/flwr/server/strategy/fedavg_android.py +++ b/src/py/flwr/server/strategy/fedavg_android.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2021 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,7 +18,7 @@ """ -from typing import Callable, Dict, List, Optional, Tuple, Union, cast +from typing import Callable, Optional, Union, cast import numpy as np @@ -81,12 +81,12 @@ def __init__( min_available_clients: int = 2, evaluate_fn: Optional[ Callable[ - [int, NDArrays, Dict[str, Scalar]], - Optional[Tuple[float, Dict[str, Scalar]]], + [int, NDArrays, dict[str, Scalar]], + Optional[tuple[float, dict[str, Scalar]]], ] ] = None, - on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, - on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, + on_fit_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, + on_evaluate_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, accept_failures: bool = True, initial_parameters: Optional[Parameters] = None, ) -> None: @@ -107,12 +107,12 @@ def __repr__(self) -> str: rep = f"FedAvg(accept_failures={self.accept_failures})" return rep - def num_fit_clients(self, num_available_clients: int) -> Tuple[int, int]: + def num_fit_clients(self, num_available_clients: int) -> tuple[int, int]: """Return the sample size and the required number of available clients.""" num_clients = int(num_available_clients * self.fraction_fit) return max(num_clients, self.min_fit_clients), self.min_available_clients - def num_evaluation_clients(self, num_available_clients: int) -> Tuple[int, int]: + def num_evaluation_clients(self, num_available_clients: int) -> tuple[int, int]: """Use a fraction of available clients for evaluation.""" num_clients = int(num_available_clients * self.fraction_evaluate) return max(num_clients, self.min_evaluate_clients), self.min_available_clients @@ -127,7 +127,7 @@ def initialize_parameters( def evaluate( self, server_round: int, parameters: Parameters - ) -> Optional[Tuple[float, Dict[str, Scalar]]]: + ) -> Optional[tuple[float, dict[str, Scalar]]]: """Evaluate model parameters using an evaluation function.""" if self.evaluate_fn is None: # No evaluation function provided @@ -141,7 +141,7 @@ def evaluate( def configure_fit( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: + ) -> list[tuple[ClientProxy, FitIns]]: """Configure the next round of training.""" config = {} if self.on_fit_config_fn is not None: @@ -162,7 +162,7 @@ def configure_fit( def configure_evaluate( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, EvaluateIns]]: + ) -> list[tuple[ClientProxy, EvaluateIns]]: """Configure the next round of evaluation.""" # Do not configure federated evaluation if fraction_evaluate is 0 if self.fraction_evaluate == 0.0: @@ -189,9 +189,9 @@ def configure_evaluate( def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate fit results using weighted average.""" if not results: return None, {} @@ -208,9 +208,9 @@ def aggregate_fit( def aggregate_evaluate( self, server_round: int, - results: List[Tuple[ClientProxy, EvaluateRes]], - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], - ) -> Tuple[Optional[float], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, EvaluateRes]], + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]], + ) -> tuple[Optional[float], dict[str, Scalar]]: """Aggregate evaluation losses using weighted average.""" if not results: return None, {} diff --git a/src/py/flwr/server/strategy/fedavg_test.py b/src/py/flwr/server/strategy/fedavg_test.py index e62eaa5c5832..66241c3ab66a 100644 --- a/src/py/flwr/server/strategy/fedavg_test.py +++ b/src/py/flwr/server/strategy/fedavg_test.py @@ -15,7 +15,7 @@ """FedAvg tests.""" -from typing import List, Tuple, Union +from typing import Union from unittest.mock import MagicMock import numpy as np @@ -140,7 +140,7 @@ def test_inplace_aggregate_fit_equivalence() -> None: weights1_0 = np.random.randn(100, 64) weights1_1 = np.random.randn(314, 628, 3) - results: List[Tuple[ClientProxy, FitRes]] = [ + results: list[tuple[ClientProxy, FitRes]] = [ ( MagicMock(), FitRes( @@ -160,7 +160,7 @@ def test_inplace_aggregate_fit_equivalence() -> None: ), ), ] - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]] = [] + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]] = [] fedavg_reference = FedAvg(inplace=False) fedavg_inplace = FedAvg() diff --git a/src/py/flwr/server/strategy/fedavgm.py b/src/py/flwr/server/strategy/fedavgm.py index fb9261abe89d..a7c37c38770f 100644 --- a/src/py/flwr/server/strategy/fedavgm.py +++ b/src/py/flwr/server/strategy/fedavgm.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2022 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -19,7 +19,7 @@ from logging import WARNING -from typing import Callable, Dict, List, Optional, Tuple, Union +from typing import Callable, Optional, Union from flwr.common import ( FitRes, @@ -84,12 +84,12 @@ def __init__( min_available_clients: int = 2, evaluate_fn: Optional[ Callable[ - [int, NDArrays, Dict[str, Scalar]], - Optional[Tuple[float, Dict[str, Scalar]]], + [int, NDArrays, dict[str, Scalar]], + Optional[tuple[float, dict[str, Scalar]]], ] ] = None, - on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, - on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, + on_fit_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, + on_evaluate_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, accept_failures: bool = True, initial_parameters: Optional[Parameters] = None, fit_metrics_aggregation_fn: Optional[MetricsAggregationFn] = None, @@ -132,9 +132,9 @@ def initialize_parameters( def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate fit results using weighted average.""" if not results: return None, {} diff --git a/src/py/flwr/server/strategy/fedavgm_test.py b/src/py/flwr/server/strategy/fedavgm_test.py index a0e942171627..400fa3c97247 100644 --- a/src/py/flwr/server/strategy/fedavgm_test.py +++ b/src/py/flwr/server/strategy/fedavgm_test.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2022 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ """FedAvgM tests.""" -from typing import List, Tuple, Union +from typing import Union from unittest.mock import MagicMock from numpy import array, float32 @@ -41,7 +41,7 @@ def test_aggregate_fit_using_near_one_server_lr_and_no_momentum() -> None: array([0, 0, 0, 0], dtype=float32), ] - results: List[Tuple[ClientProxy, FitRes]] = [ + results: list[tuple[ClientProxy, FitRes]] = [ ( MagicMock(), FitRes( @@ -61,7 +61,7 @@ def test_aggregate_fit_using_near_one_server_lr_and_no_momentum() -> None: ), ), ] - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]] = [] + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]] = [] expected: NDArrays = [ array([[1, 2, 3], [4, 5, 6]], dtype=float32), array([7, 8, 9, 10], dtype=float32), @@ -94,7 +94,7 @@ def test_aggregate_fit_server_learning_rate_and_momentum() -> None: array([0, 0, 0, 0], dtype=float32), ] - results: List[Tuple[ClientProxy, FitRes]] = [ + results: list[tuple[ClientProxy, FitRes]] = [ ( MagicMock(), FitRes( @@ -114,7 +114,7 @@ def test_aggregate_fit_server_learning_rate_and_momentum() -> None: ), ), ] - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]] = [] + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]] = [] expected: NDArrays = [ array([[1, 2, 3], [4, 5, 6]], dtype=float32), array([7, 8, 9, 10], dtype=float32), diff --git a/src/py/flwr/server/strategy/fedmedian.py b/src/py/flwr/server/strategy/fedmedian.py index 17e979d92beb..35044d42b22c 100644 --- a/src/py/flwr/server/strategy/fedmedian.py +++ b/src/py/flwr/server/strategy/fedmedian.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2022 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -19,7 +19,7 @@ from logging import WARNING -from typing import Dict, List, Optional, Tuple, Union +from typing import Optional, Union from flwr.common import ( FitRes, @@ -46,9 +46,9 @@ def __repr__(self) -> str: def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate fit results using median.""" if not results: return None, {} diff --git a/src/py/flwr/server/strategy/fedmedian_test.py b/src/py/flwr/server/strategy/fedmedian_test.py index 57cf08d8c01d..bbce69c19ac5 100644 --- a/src/py/flwr/server/strategy/fedmedian_test.py +++ b/src/py/flwr/server/strategy/fedmedian_test.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2022 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,7 +15,6 @@ """FedMedian tests.""" -from typing import List, Tuple from unittest.mock import MagicMock from numpy import array, float32 @@ -159,7 +158,7 @@ def test_aggregate_fit() -> None: client_0 = GrpcClientProxy(cid="0", bridge=bridge) client_1 = GrpcClientProxy(cid="1", bridge=bridge) client_2 = GrpcClientProxy(cid="2", bridge=bridge) - results: List[Tuple[ClientProxy, FitRes]] = [ + results: list[tuple[ClientProxy, FitRes]] = [ ( client_0, FitRes( @@ -194,7 +193,7 @@ def test_aggregate_fit() -> None: actual_aggregated, _ = strategy.aggregate_fit( server_round=1, results=results, failures=[] ) - if actual_aggregated: - actual_list = parameters_to_ndarrays(actual_aggregated) - actual = actual_list[0] + assert actual_aggregated + actual_list = parameters_to_ndarrays(actual_aggregated) + actual = actual_list[0] assert (actual == expected[0]).all() diff --git a/src/py/flwr/server/strategy/fedopt.py b/src/py/flwr/server/strategy/fedopt.py index be5f260d96fa..3e143fc3ca59 100644 --- a/src/py/flwr/server/strategy/fedopt.py +++ b/src/py/flwr/server/strategy/fedopt.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2021 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,7 +18,7 @@ """ -from typing import Callable, Dict, Optional, Tuple +from typing import Callable, Optional from flwr.common import ( MetricsAggregationFn, @@ -86,12 +86,12 @@ def __init__( min_available_clients: int = 2, evaluate_fn: Optional[ Callable[ - [int, NDArrays, Dict[str, Scalar]], - Optional[Tuple[float, Dict[str, Scalar]]], + [int, NDArrays, dict[str, Scalar]], + Optional[tuple[float, dict[str, Scalar]]], ] ] = None, - on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, - on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, + on_fit_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, + on_evaluate_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, accept_failures: bool = True, initial_parameters: Parameters, fit_metrics_aggregation_fn: Optional[MetricsAggregationFn] = None, diff --git a/src/py/flwr/server/strategy/fedprox.py b/src/py/flwr/server/strategy/fedprox.py index d20f578b193d..218fece0491f 100644 --- a/src/py/flwr/server/strategy/fedprox.py +++ b/src/py/flwr/server/strategy/fedprox.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2023 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,7 +18,7 @@ """ -from typing import Callable, Dict, List, Optional, Tuple +from typing import Callable, Optional from flwr.common import FitIns, MetricsAggregationFn, NDArrays, Parameters, Scalar from flwr.server.client_manager import ClientManager @@ -113,12 +113,12 @@ def __init__( min_available_clients: int = 2, evaluate_fn: Optional[ Callable[ - [int, NDArrays, Dict[str, Scalar]], - Optional[Tuple[float, Dict[str, Scalar]]], + [int, NDArrays, dict[str, Scalar]], + Optional[tuple[float, dict[str, Scalar]]], ] ] = None, - on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, - on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, + on_fit_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, + on_evaluate_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, accept_failures: bool = True, initial_parameters: Optional[Parameters] = None, fit_metrics_aggregation_fn: Optional[MetricsAggregationFn] = None, @@ -148,7 +148,7 @@ def __repr__(self) -> str: def configure_fit( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: + ) -> list[tuple[ClientProxy, FitIns]]: """Configure the next round of training. Sends the proximal factor mu to the clients diff --git a/src/py/flwr/server/strategy/fedtrimmedavg.py b/src/py/flwr/server/strategy/fedtrimmedavg.py index 96b0d35e7a61..8a0e4e50fbff 100644 --- a/src/py/flwr/server/strategy/fedtrimmedavg.py +++ b/src/py/flwr/server/strategy/fedtrimmedavg.py @@ -17,7 +17,7 @@ Paper: arxiv.org/abs/1803.01498 """ from logging import WARNING -from typing import Callable, Dict, List, Optional, Tuple, Union +from typing import Callable, Optional, Union from flwr.common import ( FitRes, @@ -78,12 +78,12 @@ def __init__( min_available_clients: int = 2, evaluate_fn: Optional[ Callable[ - [int, NDArrays, Dict[str, Scalar]], - Optional[Tuple[float, Dict[str, Scalar]]], + [int, NDArrays, dict[str, Scalar]], + Optional[tuple[float, dict[str, Scalar]]], ] ] = None, - on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, - on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, + on_fit_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, + on_evaluate_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, accept_failures: bool = True, initial_parameters: Optional[Parameters] = None, fit_metrics_aggregation_fn: Optional[MetricsAggregationFn] = None, @@ -114,9 +114,9 @@ def __repr__(self) -> str: def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate fit results using trimmed average.""" if not results: return None, {} diff --git a/src/py/flwr/server/strategy/fedxgb_bagging.py b/src/py/flwr/server/strategy/fedxgb_bagging.py index a8e8adddafbb..1e55466808f8 100644 --- a/src/py/flwr/server/strategy/fedxgb_bagging.py +++ b/src/py/flwr/server/strategy/fedxgb_bagging.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2023 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,7 +17,7 @@ import json from logging import WARNING -from typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast +from typing import Any, Callable, Optional, Union, cast from flwr.common import EvaluateRes, FitRes, Parameters, Scalar from flwr.common.logger import log @@ -34,8 +34,8 @@ def __init__( self, evaluate_function: Optional[ Callable[ - [int, Parameters, Dict[str, Scalar]], - Optional[Tuple[float, Dict[str, Scalar]]], + [int, Parameters, dict[str, Scalar]], + Optional[tuple[float, dict[str, Scalar]]], ] ] = None, **kwargs: Any, @@ -52,9 +52,9 @@ def __repr__(self) -> str: def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate fit results using bagging.""" if not results: return None, {} @@ -79,9 +79,9 @@ def aggregate_fit( def aggregate_evaluate( self, server_round: int, - results: List[Tuple[ClientProxy, EvaluateRes]], - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], - ) -> Tuple[Optional[float], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, EvaluateRes]], + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]], + ) -> tuple[Optional[float], dict[str, Scalar]]: """Aggregate evaluation metrics using average.""" if not results: return None, {} @@ -101,7 +101,7 @@ def aggregate_evaluate( def evaluate( self, server_round: int, parameters: Parameters - ) -> Optional[Tuple[float, Dict[str, Scalar]]]: + ) -> Optional[tuple[float, dict[str, Scalar]]]: """Evaluate model parameters using an evaluation function.""" if self.evaluate_function is None: # No evaluation function provided @@ -152,7 +152,7 @@ def aggregate( return bst_prev_bytes -def _get_tree_nums(xgb_model_org: bytes) -> Tuple[int, int]: +def _get_tree_nums(xgb_model_org: bytes) -> tuple[int, int]: xgb_model = json.loads(bytearray(xgb_model_org)) # Get the number of trees tree_num = int( diff --git a/src/py/flwr/server/strategy/fedxgb_cyclic.py b/src/py/flwr/server/strategy/fedxgb_cyclic.py index 2605daab29f4..c2dc3d797c7e 100644 --- a/src/py/flwr/server/strategy/fedxgb_cyclic.py +++ b/src/py/flwr/server/strategy/fedxgb_cyclic.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2023 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,7 +16,7 @@ from logging import WARNING -from typing import Any, Dict, List, Optional, Tuple, Union, cast +from typing import Any, Optional, Union, cast from flwr.common import EvaluateIns, EvaluateRes, FitIns, FitRes, Parameters, Scalar from flwr.common.logger import log @@ -45,9 +45,9 @@ def __repr__(self) -> str: def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate fit results using bagging.""" if not results: return None, {} @@ -69,9 +69,9 @@ def aggregate_fit( def aggregate_evaluate( self, server_round: int, - results: List[Tuple[ClientProxy, EvaluateRes]], - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], - ) -> Tuple[Optional[float], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, EvaluateRes]], + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]], + ) -> tuple[Optional[float], dict[str, Scalar]]: """Aggregate evaluation metrics using average.""" if not results: return None, {} @@ -91,7 +91,7 @@ def aggregate_evaluate( def configure_fit( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: + ) -> list[tuple[ClientProxy, FitIns]]: """Configure the next round of training.""" config = {} if self.on_fit_config_fn is not None: @@ -117,7 +117,7 @@ def configure_fit( def configure_evaluate( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, EvaluateIns]]: + ) -> list[tuple[ClientProxy, EvaluateIns]]: """Configure the next round of evaluation.""" # Do not configure federated evaluation if fraction eval is 0. if self.fraction_evaluate == 0.0: diff --git a/src/py/flwr/server/strategy/fedxgb_nn_avg.py b/src/py/flwr/server/strategy/fedxgb_nn_avg.py index 8dedc925f350..a7da4a919af7 100644 --- a/src/py/flwr/server/strategy/fedxgb_nn_avg.py +++ b/src/py/flwr/server/strategy/fedxgb_nn_avg.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2023 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -22,7 +22,7 @@ from logging import WARNING -from typing import Any, Dict, List, Optional, Tuple, Union +from typing import Any, Optional, Union from flwr.common import FitRes, Scalar, ndarrays_to_parameters, parameters_to_ndarrays from flwr.common.logger import log, warn_deprecated_feature @@ -56,7 +56,7 @@ def __repr__(self) -> str: def evaluate( self, server_round: int, parameters: Any - ) -> Optional[Tuple[float, Dict[str, Scalar]]]: + ) -> Optional[tuple[float, dict[str, Scalar]]]: """Evaluate model parameters using an evaluation function.""" if self.evaluate_fn is None: # No evaluation function provided @@ -70,9 +70,9 @@ def evaluate( def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Any], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Any], dict[str, Scalar]]: """Aggregate fit results using weighted average.""" if not results: return None, {} diff --git a/src/py/flwr/server/strategy/fedyogi.py b/src/py/flwr/server/strategy/fedyogi.py index 7c77aab7ae73..11873d1b781f 100644 --- a/src/py/flwr/server/strategy/fedyogi.py +++ b/src/py/flwr/server/strategy/fedyogi.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2021 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,7 +18,7 @@ """ -from typing import Callable, Dict, List, Optional, Tuple, Union +from typing import Callable, Optional, Union import numpy as np @@ -93,12 +93,12 @@ def __init__( min_available_clients: int = 2, evaluate_fn: Optional[ Callable[ - [int, NDArrays, Dict[str, Scalar]], - Optional[Tuple[float, Dict[str, Scalar]]], + [int, NDArrays, dict[str, Scalar]], + Optional[tuple[float, dict[str, Scalar]]], ] ] = None, - on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, - on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, + on_fit_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, + on_evaluate_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, accept_failures: bool = True, initial_parameters: Parameters, fit_metrics_aggregation_fn: Optional[MetricsAggregationFn] = None, @@ -137,9 +137,9 @@ def __repr__(self) -> str: def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate fit results using weighted average.""" fedavg_parameters_aggregated, metrics_aggregated = super().aggregate_fit( server_round=server_round, results=results, failures=failures diff --git a/src/py/flwr/server/strategy/krum.py b/src/py/flwr/server/strategy/krum.py index 16eb5212940e..5d33874b9789 100644 --- a/src/py/flwr/server/strategy/krum.py +++ b/src/py/flwr/server/strategy/krum.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2022 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -21,7 +21,7 @@ from logging import WARNING -from typing import Callable, Dict, List, Optional, Tuple, Union +from typing import Callable, Optional, Union from flwr.common import ( FitRes, @@ -87,12 +87,12 @@ def __init__( num_clients_to_keep: int = 0, evaluate_fn: Optional[ Callable[ - [int, NDArrays, Dict[str, Scalar]], - Optional[Tuple[float, Dict[str, Scalar]]], + [int, NDArrays, dict[str, Scalar]], + Optional[tuple[float, dict[str, Scalar]]], ] ] = None, - on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, - on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, + on_fit_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, + on_evaluate_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, accept_failures: bool = True, initial_parameters: Optional[Parameters] = None, fit_metrics_aggregation_fn: Optional[MetricsAggregationFn] = None, @@ -123,9 +123,9 @@ def __repr__(self) -> str: def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate fit results using Krum.""" if not results: return None, {} diff --git a/src/py/flwr/server/strategy/krum_test.py b/src/py/flwr/server/strategy/krum_test.py index 653dc9a8475d..ac068a8e6ba6 100644 --- a/src/py/flwr/server/strategy/krum_test.py +++ b/src/py/flwr/server/strategy/krum_test.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2022 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,7 +15,6 @@ """Krum tests.""" -from typing import List, Tuple from unittest.mock import MagicMock from numpy import array, float32 @@ -160,7 +159,7 @@ def test_aggregate_fit() -> None: client_0 = GrpcClientProxy(cid="0", bridge=bridge) client_1 = GrpcClientProxy(cid="1", bridge=bridge) client_2 = GrpcClientProxy(cid="2", bridge=bridge) - results: List[Tuple[ClientProxy, FitRes]] = [ + results: list[tuple[ClientProxy, FitRes]] = [ ( client_0, FitRes( @@ -195,7 +194,7 @@ def test_aggregate_fit() -> None: actual_aggregated, _ = strategy.aggregate_fit( server_round=1, results=results, failures=[] ) - if actual_aggregated: - actual_list = parameters_to_ndarrays(actual_aggregated) - actual = actual_list[0] + assert actual_aggregated + actual_list = parameters_to_ndarrays(actual_aggregated) + actual = actual_list[0] assert (actual == expected[0]).all() diff --git a/src/py/flwr/server/strategy/multikrum_test.py b/src/py/flwr/server/strategy/multikrum_test.py index f874dc2f9800..d9c73fb4eb8f 100644 --- a/src/py/flwr/server/strategy/multikrum_test.py +++ b/src/py/flwr/server/strategy/multikrum_test.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2022 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,7 +15,6 @@ """Krum tests.""" -from typing import List, Tuple from unittest.mock import MagicMock from numpy import array, float32 @@ -59,7 +58,7 @@ def test_aggregate_fit() -> None: client_1 = GrpcClientProxy(cid="1", bridge=bridge) client_2 = GrpcClientProxy(cid="2", bridge=bridge) - results: List[Tuple[ClientProxy, FitRes]] = [ + results: list[tuple[ClientProxy, FitRes]] = [ ( client_0, FitRes( @@ -94,7 +93,7 @@ def test_aggregate_fit() -> None: actual_aggregated, _ = strategy.aggregate_fit( server_round=1, results=results, failures=[] ) - if actual_aggregated: - actual_list = parameters_to_ndarrays(actual_aggregated) - actual = actual_list[0] + assert actual_aggregated + actual_list = parameters_to_ndarrays(actual_aggregated) + actual = actual_list[0] assert (actual == expected[0]).all() diff --git a/src/py/flwr/server/strategy/qfedavg.py b/src/py/flwr/server/strategy/qfedavg.py index 758e8e608e9f..30a3cc53ee94 100644 --- a/src/py/flwr/server/strategy/qfedavg.py +++ b/src/py/flwr/server/strategy/qfedavg.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2021 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -19,7 +19,7 @@ from logging import WARNING -from typing import Callable, Dict, List, Optional, Tuple, Union +from typing import Callable, Optional, Union import numpy as np @@ -60,12 +60,12 @@ def __init__( min_available_clients: int = 1, evaluate_fn: Optional[ Callable[ - [int, NDArrays, Dict[str, Scalar]], - Optional[Tuple[float, Dict[str, Scalar]]], + [int, NDArrays, dict[str, Scalar]], + Optional[tuple[float, dict[str, Scalar]]], ] ] = None, - on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, - on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, + on_fit_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, + on_evaluate_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, accept_failures: bool = True, initial_parameters: Optional[Parameters] = None, fit_metrics_aggregation_fn: Optional[MetricsAggregationFn] = None, @@ -95,19 +95,19 @@ def __repr__(self) -> str: rep += f"q_param={self.q_param}, pre_weights={self.pre_weights})" return rep - def num_fit_clients(self, num_available_clients: int) -> Tuple[int, int]: + def num_fit_clients(self, num_available_clients: int) -> tuple[int, int]: """Return the sample size and the required number of available clients.""" num_clients = int(num_available_clients * self.fraction_fit) return max(num_clients, self.min_fit_clients), self.min_available_clients - def num_evaluation_clients(self, num_available_clients: int) -> Tuple[int, int]: + def num_evaluation_clients(self, num_available_clients: int) -> tuple[int, int]: """Use a fraction of available clients for evaluation.""" num_clients = int(num_available_clients * self.fraction_evaluate) return max(num_clients, self.min_evaluate_clients), self.min_available_clients def configure_fit( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: + ) -> list[tuple[ClientProxy, FitIns]]: """Configure the next round of training.""" weights = parameters_to_ndarrays(parameters) self.pre_weights = weights @@ -131,7 +131,7 @@ def configure_fit( def configure_evaluate( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, EvaluateIns]]: + ) -> list[tuple[ClientProxy, EvaluateIns]]: """Configure the next round of evaluation.""" # Do not configure federated evaluation if fraction_evaluate is 0 if self.fraction_evaluate == 0.0: @@ -158,9 +158,9 @@ def configure_evaluate( def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate fit results using weighted average.""" if not results: return None, {} @@ -229,9 +229,9 @@ def norm_grad(grad_list: NDArrays) -> float: def aggregate_evaluate( self, server_round: int, - results: List[Tuple[ClientProxy, EvaluateRes]], - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], - ) -> Tuple[Optional[float], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, EvaluateRes]], + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]], + ) -> tuple[Optional[float], dict[str, Scalar]]: """Aggregate evaluation losses using weighted average.""" if not results: return None, {} diff --git a/src/py/flwr/server/strategy/strategy.py b/src/py/flwr/server/strategy/strategy.py index cfdfe2e246c5..14999e9a8993 100644 --- a/src/py/flwr/server/strategy/strategy.py +++ b/src/py/flwr/server/strategy/strategy.py @@ -16,7 +16,7 @@ from abc import ABC, abstractmethod -from typing import Dict, List, Optional, Tuple, Union +from typing import Optional, Union from flwr.common import EvaluateIns, EvaluateRes, FitIns, FitRes, Parameters, Scalar from flwr.server.client_manager import ClientManager @@ -47,7 +47,7 @@ def initialize_parameters( @abstractmethod def configure_fit( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: + ) -> list[tuple[ClientProxy, FitIns]]: """Configure the next round of training. Parameters @@ -72,9 +72,9 @@ def configure_fit( def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate training results. Parameters @@ -108,7 +108,7 @@ def aggregate_fit( @abstractmethod def configure_evaluate( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, EvaluateIns]]: + ) -> list[tuple[ClientProxy, EvaluateIns]]: """Configure the next round of evaluation. Parameters @@ -134,9 +134,9 @@ def configure_evaluate( def aggregate_evaluate( self, server_round: int, - results: List[Tuple[ClientProxy, EvaluateRes]], - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], - ) -> Tuple[Optional[float], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, EvaluateRes]], + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]], + ) -> tuple[Optional[float], dict[str, Scalar]]: """Aggregate evaluation results. Parameters @@ -164,7 +164,7 @@ def aggregate_evaluate( @abstractmethod def evaluate( self, server_round: int, parameters: Parameters - ) -> Optional[Tuple[float, Dict[str, Scalar]]]: + ) -> Optional[tuple[float, dict[str, Scalar]]]: """Evaluate the current model parameters. This function can be used to perform centralized (i.e., server-side) evaluation diff --git a/src/py/flwr/server/superlink/driver/__init__.py b/src/py/flwr/server/superlink/driver/__init__.py index 2bfe63e6065f..58fbc479478f 100644 --- a/src/py/flwr/server/superlink/driver/__init__.py +++ b/src/py/flwr/server/superlink/driver/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/src/py/flwr/server/superlink/driver/driver_grpc.py b/src/py/flwr/server/superlink/driver/driver_grpc.py index f74000bc59c4..70354387812e 100644 --- a/src/py/flwr/server/superlink/driver/driver_grpc.py +++ b/src/py/flwr/server/superlink/driver/driver_grpc.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ """Driver gRPC API.""" from logging import INFO -from typing import Optional, Tuple +from typing import Optional import grpc @@ -24,6 +24,7 @@ from flwr.proto.driver_pb2_grpc import ( # pylint: disable=E0611 add_DriverServicer_to_server, ) +from flwr.server.superlink.ffs.ffs_factory import FfsFactory from flwr.server.superlink.state import StateFactory from ..fleet.grpc_bidi.grpc_server import generic_create_grpc_server @@ -33,12 +34,14 @@ def run_driver_api_grpc( address: str, state_factory: StateFactory, - certificates: Optional[Tuple[bytes, bytes, bytes]], + ffs_factory: FfsFactory, + certificates: Optional[tuple[bytes, bytes, bytes]], ) -> grpc.Server: """Run Driver API (gRPC, request-response).""" # Create Driver API gRPC server driver_servicer: grpc.Server = DriverServicer( state_factory=state_factory, + ffs_factory=ffs_factory, ) driver_add_servicer_to_server_fn = add_DriverServicer_to_server driver_grpc_server = generic_create_grpc_server( diff --git a/src/py/flwr/server/superlink/driver/driver_servicer.py b/src/py/flwr/server/superlink/driver/driver_servicer.py index ce2d9d68d8ca..72c0d110ac14 100644 --- a/src/py/flwr/server/superlink/driver/driver_servicer.py +++ b/src/py/flwr/server/superlink/driver/driver_servicer.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,16 +17,21 @@ import time from logging import DEBUG -from typing import List, Optional, Set +from typing import Optional from uuid import UUID import grpc from flwr.common.logger import log +from flwr.common.serde import ( + fab_from_proto, + fab_to_proto, + user_config_from_proto, + user_config_to_proto, +) +from flwr.common.typing import Fab from flwr.proto import driver_pb2_grpc # pylint: disable=E0611 from flwr.proto.driver_pb2 import ( # pylint: disable=E0611 - CreateRunRequest, - CreateRunResponse, GetNodesRequest, GetNodesResponse, PullTaskResRequest, @@ -34,8 +39,18 @@ PushTaskInsRequest, PushTaskInsResponse, ) +from flwr.proto.fab_pb2 import GetFabRequest, GetFabResponse # pylint: disable=E0611 from flwr.proto.node_pb2 import Node # pylint: disable=E0611 +from flwr.proto.run_pb2 import ( # pylint: disable=E0611 + CreateRunRequest, + CreateRunResponse, + GetRunRequest, + GetRunResponse, + Run, +) from flwr.proto.task_pb2 import TaskRes # pylint: disable=E0611 +from flwr.server.superlink.ffs.ffs import Ffs +from flwr.server.superlink.ffs.ffs_factory import FfsFactory from flwr.server.superlink.state import State, StateFactory from flwr.server.utils.validator import validate_task_ins_or_res @@ -43,8 +58,9 @@ class DriverServicer(driver_pb2_grpc.DriverServicer): """Driver API servicer.""" - def __init__(self, state_factory: StateFactory) -> None: + def __init__(self, state_factory: StateFactory, ffs_factory: FfsFactory) -> None: self.state_factory = state_factory + self.ffs_factory = ffs_factory def GetNodes( self, request: GetNodesRequest, context: grpc.ServicerContext @@ -52,8 +68,8 @@ def GetNodes( """Get available nodes.""" log(DEBUG, "DriverServicer.GetNodes") state: State = self.state_factory.state() - all_ids: Set[int] = state.get_nodes(request.run_id) - nodes: List[Node] = [ + all_ids: set[int] = state.get_nodes(request.run_id) + nodes: list[Node] = [ Node(node_id=node_id, anonymous=False) for node_id in all_ids ] return GetNodesResponse(nodes=nodes) @@ -64,7 +80,22 @@ def CreateRun( """Create run ID.""" log(DEBUG, "DriverServicer.CreateRun") state: State = self.state_factory.state() - run_id = state.create_run(request.fab_id, request.fab_version) + if request.HasField("fab"): + fab = fab_from_proto(request.fab) + ffs: Ffs = self.ffs_factory.ffs() + fab_hash = ffs.put(fab.content, {}) + _raise_if( + fab_hash != fab.hash_str, + f"FAB ({fab.hash_str}) hash from request doesn't match contents", + ) + else: + fab_hash = "" + run_id = state.create_run( + request.fab_id, + request.fab_version, + fab_hash, + user_config_from_proto(request.override_config), + ) return CreateRunResponse(run_id=run_id) def PushTaskIns( @@ -88,7 +119,7 @@ def PushTaskIns( state: State = self.state_factory.state() # Store each TaskIns - task_ids: List[Optional[UUID]] = [] + task_ids: list[Optional[UUID]] = [] for task_ins in request.task_ins_list: task_id: Optional[UUID] = state.store_task_ins(task_ins=task_ins) task_ids.append(task_id) @@ -104,7 +135,7 @@ def PullTaskRes( log(DEBUG, "DriverServicer.PullTaskRes") # Convert each task_id str to UUID - task_ids: Set[UUID] = {UUID(task_id) for task_id in request.task_ids} + task_ids: set[UUID] = {UUID(task_id) for task_id in request.task_ids} # Init state state: State = self.state_factory.state() @@ -124,11 +155,49 @@ def on_rpc_done() -> None: context.add_callback(on_rpc_done) # Read from state - task_res_list: List[TaskRes] = state.get_task_res(task_ids=task_ids, limit=None) + task_res_list: list[TaskRes] = state.get_task_res(task_ids=task_ids) context.set_code(grpc.StatusCode.OK) return PullTaskResResponse(task_res_list=task_res_list) + def GetRun( + self, request: GetRunRequest, context: grpc.ServicerContext + ) -> GetRunResponse: + """Get run information.""" + log(DEBUG, "DriverServicer.GetRun") + + # Init state + state: State = self.state_factory.state() + + # Retrieve run information + run = state.get_run(request.run_id) + + if run is None: + return GetRunResponse() + + return GetRunResponse( + run=Run( + run_id=run.run_id, + fab_id=run.fab_id, + fab_version=run.fab_version, + override_config=user_config_to_proto(run.override_config), + fab_hash=run.fab_hash, + ) + ) + + def GetFab( + self, request: GetFabRequest, context: grpc.ServicerContext + ) -> GetFabResponse: + """Get FAB from Ffs.""" + log(DEBUG, "DriverServicer.GetFab") + + ffs: Ffs = self.ffs_factory.ffs() + if result := ffs.get(request.hash_str): + fab = Fab(request.hash_str, result[0]) + return GetFabResponse(fab=fab_to_proto(fab)) + + raise ValueError(f"Found no FAB with hash: {request.hash_str}") + def _raise_if(validation_error: bool, detail: str) -> None: if validation_error: diff --git a/src/py/flwr/server/superlink/driver/driver_servicer_test.py b/src/py/flwr/server/superlink/driver/driver_servicer_test.py index 99f7cc007a89..394d6be7ee6a 100644 --- a/src/py/flwr/server/superlink/driver/driver_servicer_test.py +++ b/src/py/flwr/server/superlink/driver/driver_servicer_test.py @@ -1,4 +1,4 @@ -# Copyright 2022 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/src/py/flwr/server/superlink/ffs/__init__.py b/src/py/flwr/server/superlink/ffs/__init__.py new file mode 100644 index 000000000000..0273d2a630e1 --- /dev/null +++ b/src/py/flwr/server/superlink/ffs/__init__.py @@ -0,0 +1,24 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flower File Storage for large objects.""" + + +from .disk_ffs import DiskFfs as DiskFfs +from .ffs import Ffs as Ffs + +__all__ = [ + "DiskFfs", + "Ffs", +] diff --git a/src/py/flwr/server/superlink/ffs/disk_ffs.py b/src/py/flwr/server/superlink/ffs/disk_ffs.py new file mode 100644 index 000000000000..4f1ab05be9a2 --- /dev/null +++ b/src/py/flwr/server/superlink/ffs/disk_ffs.py @@ -0,0 +1,107 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Disk based Flower File Storage.""" + +import hashlib +import json +from pathlib import Path +from typing import Optional + +from flwr.server.superlink.ffs.ffs import Ffs + + +class DiskFfs(Ffs): # pylint: disable=R0904 + """Disk-based Flower File Storage interface for large objects.""" + + def __init__(self, base_dir: str) -> None: + """Create a new DiskFfs instance. + + Parameters + ---------- + base_dir : str + The base directory to store the objects. + """ + self.base_dir = Path(base_dir) + + def put(self, content: bytes, meta: dict[str, str]) -> str: + """Store bytes and metadata and return key (hash of content). + + Parameters + ---------- + content : bytes + The content to be stored. + meta : Dict[str, str] + The metadata to be stored. + + Returns + ------- + key : str + The key (sha256hex hash) of the content. + """ + content_hash = hashlib.sha256(content).hexdigest() + + self.base_dir.mkdir(exist_ok=True, parents=True) + (self.base_dir / content_hash).write_bytes(content) + (self.base_dir / f"{content_hash}.META").write_text(json.dumps(meta)) + + return content_hash + + def get(self, key: str) -> Optional[tuple[bytes, dict[str, str]]]: + """Return tuple containing the object content and metadata. + + Parameters + ---------- + key : str + The sha256hex hash of the object to be retrieved. + + Returns + ------- + Optional[Tuple[bytes, Dict[str, str]]] + A tuple containing the object content and metadata. + """ + if not (self.base_dir / key).exists(): + return None + + content = (self.base_dir / key).read_bytes() + meta = json.loads((self.base_dir / f"{key}.META").read_text()) + + return content, meta + + def delete(self, key: str) -> None: + """Delete object with hash. + + Parameters + ---------- + key : str + The sha256hex hash of the object to be deleted. + """ + (self.base_dir / key).unlink() + (self.base_dir / f"{key}.META").unlink() + + def list(self) -> list[str]: + """List all keys. + + Return all available keys in this `Ffs` instance. + This can be combined with, for example, + the `delete` method to delete objects. + + Returns + ------- + List[str] + A list of all available keys. + """ + return [ + item.name for item in self.base_dir.iterdir() if not item.suffix == ".META" + ] diff --git a/src/py/flwr/server/superlink/ffs/ffs.py b/src/py/flwr/server/superlink/ffs/ffs.py new file mode 100644 index 000000000000..b1d26e74c157 --- /dev/null +++ b/src/py/flwr/server/superlink/ffs/ffs.py @@ -0,0 +1,79 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Abstract base class for Flower File Storage interface.""" + + +import abc +from typing import Optional + + +class Ffs(abc.ABC): # pylint: disable=R0904 + """Abstract Flower File Storage interface for large objects.""" + + @abc.abstractmethod + def put(self, content: bytes, meta: dict[str, str]) -> str: + """Store bytes and metadata and return sha256hex hash of data as str. + + Parameters + ---------- + content : bytes + The content to be stored. + meta : Dict[str, str] + The metadata to be stored. + + Returns + ------- + key : str + The key (sha256hex hash) of the content. + """ + + @abc.abstractmethod + def get(self, key: str) -> Optional[tuple[bytes, dict[str, str]]]: + """Return tuple containing the object content and metadata. + + Parameters + ---------- + key : str + The key (sha256hex hash) of the object to be retrieved. + + Returns + ------- + Optional[Tuple[bytes, Dict[str, str]]] + A tuple containing the object content and metadata. + """ + + @abc.abstractmethod + def delete(self, key: str) -> None: + """Delete object with hash. + + Parameters + ---------- + key : str + The key (sha256hex hash) of the object to be deleted. + """ + + @abc.abstractmethod + def list(self) -> list[str]: + """List keys of all stored objects. + + Return all available keys in this `Ffs` instance. + This can be combined with, for example, + the `delete` method to delete objects. + + Returns + ------- + List[str] + A list of all available keys. + """ diff --git a/src/py/flwr/server/superlink/ffs/ffs_factory.py b/src/py/flwr/server/superlink/ffs/ffs_factory.py new file mode 100644 index 000000000000..63ee5dc77c0a --- /dev/null +++ b/src/py/flwr/server/superlink/ffs/ffs_factory.py @@ -0,0 +1,47 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Factory class that creates Ffs instances.""" + + +from logging import DEBUG +from typing import Optional + +from flwr.common.logger import log + +from .disk_ffs import DiskFfs +from .ffs import Ffs + + +class FfsFactory: + """Factory class that creates Ffs instances. + + Parameters + ---------- + base_dir : str + The base directory used by DiskFfs to store objects. + """ + + def __init__(self, base_dir: str) -> None: + self.base_dir = base_dir + self.ffs_instance: Optional[Ffs] = None + + def ffs(self) -> Ffs: + """Return a Ffs instance and create it, if necessary.""" + if not self.ffs_instance: + log(DEBUG, "Initializing DiskFfs") + self.ffs_instance = DiskFfs(self.base_dir) + + log(DEBUG, "Using DiskFfs") + return self.ffs_instance diff --git a/src/py/flwr/server/superlink/ffs/ffs_factory_test.py b/src/py/flwr/server/superlink/ffs/ffs_factory_test.py new file mode 100644 index 000000000000..81fcb6454147 --- /dev/null +++ b/src/py/flwr/server/superlink/ffs/ffs_factory_test.py @@ -0,0 +1,43 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Test Ffs factory.""" + +from .disk_ffs import DiskFfs +from .ffs_factory import FfsFactory + + +def test_disk_ffs_factory() -> None: + """Test DiskFfs instantiation with FfsFactory.""" + # Prepare + ffs_factory = FfsFactory("test") + + # Execute + ffs = ffs_factory.ffs() + + # Assert + assert isinstance(ffs, DiskFfs) + + +def test_cache_ffs_factory() -> None: + """Test cache with FfsFactory.""" + # Prepare + ffs_factory = FfsFactory("other_test") + ffs = ffs_factory.ffs() + + # Execute + other_ffs = ffs_factory.ffs() + + # Assert + assert id(ffs) == id(other_ffs) diff --git a/src/py/flwr/server/superlink/ffs/ffs_test.py b/src/py/flwr/server/superlink/ffs/ffs_test.py new file mode 100644 index 000000000000..5cf28cfd2cbe --- /dev/null +++ b/src/py/flwr/server/superlink/ffs/ffs_test.py @@ -0,0 +1,152 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests all Ffs implemenations have to conform to.""" +# pylint: disable=invalid-name, disable=R0904 + +import hashlib +import json +import os +import tempfile +import unittest +from abc import abstractmethod + +from flwr.server.superlink.ffs import DiskFfs, Ffs + + +class FfsTest(unittest.TestCase): + """Test all ffs implementations.""" + + # This is to True in each child class + __test__ = False + + tmp_dir: tempfile.TemporaryDirectory # type: ignore + + @abstractmethod + def ffs_factory(self) -> Ffs: + """Provide Ffs implementation to test.""" + raise NotImplementedError() + + def test_put(self) -> None: + """Test if object can be stored.""" + # Prepare + ffs: Ffs = self.ffs_factory() + content = b"content" + hash_expected = hashlib.sha256(content).hexdigest() + + # Execute + hash_actual = ffs.put(b"content", {"meta": "data"}) + + # Assert + assert isinstance(hash_actual, str) + assert len(hash_actual) == 64 + assert hash_actual == hash_expected + + # Check if file was created + assert {hash_expected, f"{hash_expected}.META"} == set( + os.listdir(self.tmp_dir.name) + ) + + def test_get(self) -> None: + """Test if object can be retrieved.""" + # Prepare + ffs: Ffs = self.ffs_factory() + content_expected = b"content" + hash_expected = hashlib.sha256(content_expected).hexdigest() + meta_expected: dict[str, str] = {"meta_key": "meta_value"} + + with open(os.path.join(self.tmp_dir.name, hash_expected), "wb") as file: + file.write(content_expected) + + with open( + os.path.join(self.tmp_dir.name, f"{hash_expected}.META"), + "w", + encoding="utf-8", + ) as file: + json.dump(meta_expected, file) + + # Execute + result = ffs.get(hash_expected) + assert result is not None + + content_actual, meta_actual = result + + # Assert + assert content_actual == content_expected + assert meta_actual == meta_expected + + def test_delete(self) -> None: + """Test if object can be deleted.""" + # Prepare + ffs: Ffs = self.ffs_factory() + content_expected = b"content" + hash_expected = hashlib.sha256(content_expected).hexdigest() + meta_expected: dict[str, str] = {"meta_key": "meta_value"} + + with open(os.path.join(self.tmp_dir.name, hash_expected), "wb") as file: + file.write(content_expected) + + with open( + os.path.join(self.tmp_dir.name, f"{hash_expected}.META"), + "w", + encoding="utf-8", + ) as file: + json.dump(meta_expected, file) + + # Execute + ffs.delete(hash_expected) + + # Assert + assert set() == set(os.listdir(self.tmp_dir.name)) + + def test_list(self) -> None: + """Test if object hashes can be listed.""" + # Prepare + ffs: Ffs = self.ffs_factory() + content_expected = b"content" + hash_expected = hashlib.sha256(content_expected).hexdigest() + meta_expected: dict[str, str] = {"meta_key": "meta_value"} + + with open(os.path.join(self.tmp_dir.name, hash_expected), "wb") as file: + file.write(content_expected) + + with open( + os.path.join(self.tmp_dir.name, f"{hash_expected}.META"), + "w", + encoding="utf-8", + ) as file: + json.dump(meta_expected, file) + + # Execute + hashes = ffs.list() + + # Assert + assert {hash_expected} == set(hashes) + + +class DiskFfsTest(FfsTest, unittest.TestCase): + """Test DiskFfs implementation.""" + + __test__ = True + + def ffs_factory(self) -> DiskFfs: + """Return SqliteState with file-based database.""" + # pylint: disable-next=consider-using-with,attribute-defined-outside-init + self.tmp_dir = tempfile.TemporaryDirectory() + ffs = DiskFfs(self.tmp_dir.name) + return ffs + + +if __name__ == "__main__": + unittest.main(verbosity=2) diff --git a/src/py/flwr/server/superlink/fleet/__init__.py b/src/py/flwr/server/superlink/fleet/__init__.py index d3c3ef90163d..c236ed06ae1c 100644 --- a/src/py/flwr/server/superlink/fleet/__init__.py +++ b/src/py/flwr/server/superlink/fleet/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/src/py/flwr/server/superlink/fleet/grpc_adapter/__init__.py b/src/py/flwr/server/superlink/fleet/grpc_adapter/__init__.py new file mode 100644 index 000000000000..cf875a1b9666 --- /dev/null +++ b/src/py/flwr/server/superlink/fleet/grpc_adapter/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Server-side part of the GrpcAdapter transport layer.""" diff --git a/src/py/flwr/server/superlink/fleet/grpc_adapter/grpc_adapter_servicer.py b/src/py/flwr/server/superlink/fleet/grpc_adapter/grpc_adapter_servicer.py new file mode 100644 index 000000000000..75aa6d370511 --- /dev/null +++ b/src/py/flwr/server/superlink/fleet/grpc_adapter/grpc_adapter_servicer.py @@ -0,0 +1,159 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Fleet API gRPC adapter servicer.""" + + +from logging import DEBUG, INFO +from typing import Callable, TypeVar + +import grpc +from google.protobuf.message import Message as GrpcMessage + +from flwr.common.constant import ( + GRPC_ADAPTER_METADATA_FLOWER_PACKAGE_NAME_KEY, + GRPC_ADAPTER_METADATA_FLOWER_PACKAGE_VERSION_KEY, + GRPC_ADAPTER_METADATA_FLOWER_VERSION_KEY, + GRPC_ADAPTER_METADATA_MESSAGE_MODULE_KEY, + GRPC_ADAPTER_METADATA_MESSAGE_QUALNAME_KEY, +) +from flwr.common.logger import log +from flwr.common.version import package_name, package_version +from flwr.proto import grpcadapter_pb2_grpc # pylint: disable=E0611 +from flwr.proto.fab_pb2 import GetFabRequest, GetFabResponse # pylint: disable=E0611 +from flwr.proto.fleet_pb2 import ( # pylint: disable=E0611 + CreateNodeRequest, + CreateNodeResponse, + DeleteNodeRequest, + DeleteNodeResponse, + PingRequest, + PingResponse, + PullTaskInsRequest, + PullTaskInsResponse, + PushTaskResRequest, + PushTaskResResponse, +) +from flwr.proto.grpcadapter_pb2 import MessageContainer # pylint: disable=E0611 +from flwr.proto.run_pb2 import GetRunRequest, GetRunResponse # pylint: disable=E0611 +from flwr.server.superlink.ffs.ffs_factory import FfsFactory +from flwr.server.superlink.fleet.message_handler import message_handler +from flwr.server.superlink.state import StateFactory + +T = TypeVar("T", bound=GrpcMessage) + + +def _handle( + msg_container: MessageContainer, + request_type: type[T], + handler: Callable[[T], GrpcMessage], +) -> MessageContainer: + req = request_type.FromString(msg_container.grpc_message_content) + res = handler(req) + res_cls = res.__class__ + return MessageContainer( + metadata={ + GRPC_ADAPTER_METADATA_FLOWER_PACKAGE_NAME_KEY: package_name, + GRPC_ADAPTER_METADATA_FLOWER_PACKAGE_VERSION_KEY: package_version, + GRPC_ADAPTER_METADATA_FLOWER_VERSION_KEY: package_version, + GRPC_ADAPTER_METADATA_MESSAGE_MODULE_KEY: res_cls.__module__, + GRPC_ADAPTER_METADATA_MESSAGE_QUALNAME_KEY: res_cls.__qualname__, + }, + grpc_message_name=res_cls.__qualname__, + grpc_message_content=res.SerializeToString(), + ) + + +class GrpcAdapterServicer(grpcadapter_pb2_grpc.GrpcAdapterServicer): + """Fleet API via GrpcAdapter servicer.""" + + def __init__(self, state_factory: StateFactory, ffs_factory: FfsFactory) -> None: + self.state_factory = state_factory + self.ffs_factory = ffs_factory + + def SendReceive( # pylint: disable=too-many-return-statements + self, request: MessageContainer, context: grpc.ServicerContext + ) -> MessageContainer: + """.""" + log(DEBUG, "GrpcAdapterServicer.SendReceive") + if request.grpc_message_name == CreateNodeRequest.__qualname__: + return _handle(request, CreateNodeRequest, self._create_node) + if request.grpc_message_name == DeleteNodeRequest.__qualname__: + return _handle(request, DeleteNodeRequest, self._delete_node) + if request.grpc_message_name == PingRequest.__qualname__: + return _handle(request, PingRequest, self._ping) + if request.grpc_message_name == PullTaskInsRequest.__qualname__: + return _handle(request, PullTaskInsRequest, self._pull_task_ins) + if request.grpc_message_name == PushTaskResRequest.__qualname__: + return _handle(request, PushTaskResRequest, self._push_task_res) + if request.grpc_message_name == GetRunRequest.__qualname__: + return _handle(request, GetRunRequest, self._get_run) + if request.grpc_message_name == GetFabRequest.__qualname__: + return _handle(request, GetFabRequest, self._get_fab) + raise ValueError(f"Invalid grpc_message_name: {request.grpc_message_name}") + + def _create_node(self, request: CreateNodeRequest) -> CreateNodeResponse: + """.""" + log(INFO, "GrpcAdapter.CreateNode") + return message_handler.create_node( + request=request, + state=self.state_factory.state(), + ) + + def _delete_node(self, request: DeleteNodeRequest) -> DeleteNodeResponse: + """.""" + log(INFO, "GrpcAdapter.DeleteNode") + return message_handler.delete_node( + request=request, + state=self.state_factory.state(), + ) + + def _ping(self, request: PingRequest) -> PingResponse: + """.""" + log(DEBUG, "GrpcAdapter.Ping") + return message_handler.ping( + request=request, + state=self.state_factory.state(), + ) + + def _pull_task_ins(self, request: PullTaskInsRequest) -> PullTaskInsResponse: + """Pull TaskIns.""" + log(INFO, "GrpcAdapter.PullTaskIns") + return message_handler.pull_task_ins( + request=request, + state=self.state_factory.state(), + ) + + def _push_task_res(self, request: PushTaskResRequest) -> PushTaskResResponse: + """Push TaskRes.""" + log(INFO, "GrpcAdapter.PushTaskRes") + return message_handler.push_task_res( + request=request, + state=self.state_factory.state(), + ) + + def _get_run(self, request: GetRunRequest) -> GetRunResponse: + """Get run information.""" + log(INFO, "GrpcAdapter.GetRun") + return message_handler.get_run( + request=request, + state=self.state_factory.state(), + ) + + def _get_fab(self, request: GetFabRequest) -> GetFabResponse: + """Get FAB.""" + log(INFO, "GrpcAdapter.GetFab") + return message_handler.get_fab( + request=request, + ffs=self.ffs_factory.ffs(), + ) diff --git a/src/py/flwr/server/superlink/fleet/grpc_bidi/__init__.py b/src/py/flwr/server/superlink/fleet/grpc_bidi/__init__.py index bae8bc431edd..6b2c2bf3ffec 100644 --- a/src/py/flwr/server/superlink/fleet/grpc_bidi/__init__.py +++ b/src/py/flwr/server/superlink/fleet/grpc_bidi/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/src/py/flwr/server/superlink/fleet/grpc_bidi/flower_service_servicer.py b/src/py/flwr/server/superlink/fleet/grpc_bidi/flower_service_servicer.py index 6f94ea844e38..38f0dfdae299 100644 --- a/src/py/flwr/server/superlink/fleet/grpc_bidi/flower_service_servicer.py +++ b/src/py/flwr/server/superlink/fleet/grpc_bidi/flower_service_servicer.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -19,7 +19,8 @@ """ import uuid -from typing import Callable, Iterator +from collections.abc import Iterator +from typing import Callable import grpc from iterators import TimeoutIterator diff --git a/src/py/flwr/server/superlink/fleet/grpc_bidi/flower_service_servicer_test.py b/src/py/flwr/server/superlink/fleet/grpc_bidi/flower_service_servicer_test.py index bd93554a6a32..03e8555f8ecf 100644 --- a/src/py/flwr/server/superlink/fleet/grpc_bidi/flower_service_servicer_test.py +++ b/src/py/flwr/server/superlink/fleet/grpc_bidi/flower_service_servicer_test.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_bridge.py b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_bridge.py index d5b4a915c609..476e2914f4d9 100644 --- a/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_bridge.py +++ b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_bridge.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,10 +15,11 @@ """Provides class GrpcBridge.""" +from collections.abc import Iterator from dataclasses import dataclass from enum import Enum from threading import Condition -from typing import Iterator, Optional +from typing import Optional from flwr.proto.transport_pb2 import ( # pylint: disable=E0611 ClientMessage, diff --git a/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_bridge_test.py b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_bridge_test.py index f7c236acd7a1..6d9e081d8dd4 100644 --- a/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_bridge_test.py +++ b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_bridge_test.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,7 +17,7 @@ import time from threading import Thread -from typing import List, Union +from typing import Union from flwr.proto.transport_pb2 import ( # pylint: disable=E0611 ClientMessage, @@ -32,7 +32,7 @@ def start_worker( - rounds: int, bridge: GrpcBridge, results: List[ClientMessage] + rounds: int, bridge: GrpcBridge, results: list[ClientMessage] ) -> Thread: """Simulate processing loop with five calls.""" @@ -59,7 +59,7 @@ def test_workflow_successful() -> None: """Test full workflow.""" # Prepare rounds = 5 - client_messages_received: List[ClientMessage] = [] + client_messages_received: list[ClientMessage] = [] bridge = GrpcBridge() ins_wrapper_iterator = bridge.ins_wrapper_iterator() @@ -90,7 +90,7 @@ def test_workflow_close() -> None: """ # Prepare rounds = 5 - client_messages_received: List[ClientMessage] = [] + client_messages_received: list[ClientMessage] = [] bridge = GrpcBridge() ins_wrapper_iterator = bridge.ins_wrapper_iterator() @@ -135,7 +135,7 @@ def test_ins_wrapper_iterator_close_while_blocking() -> None: """ # Prepare rounds = 5 - client_messages_received: List[ClientMessage] = [] + client_messages_received: list[ClientMessage] = [] bridge = GrpcBridge() ins_wrapper_iterator = bridge.ins_wrapper_iterator() diff --git a/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_client_proxy.py b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_client_proxy.py index ac62ad014950..03497743becd 100644 --- a/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_client_proxy.py +++ b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_client_proxy.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_client_proxy_test.py b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_client_proxy_test.py index e7077dfd39ae..6d3eb4f67e30 100644 --- a/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_client_proxy_test.py +++ b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_client_proxy_test.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_server.py b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_server.py index 6aeaa7ef413f..9d2e13d5b107 100644 --- a/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_server.py +++ b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_server.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,18 +17,23 @@ import concurrent.futures import sys +from collections.abc import Sequence from logging import ERROR -from typing import Any, Callable, Optional, Sequence, Tuple, Union +from typing import Any, Callable, Optional, Union import grpc from flwr.common import GRPC_MAX_MESSAGE_LENGTH +from flwr.common.address import is_port_in_use from flwr.common.logger import log from flwr.proto.transport_pb2_grpc import ( # pylint: disable=E0611 add_FlowerServiceServicer_to_server, ) from flwr.server.client_manager import ClientManager from flwr.server.superlink.driver.driver_servicer import DriverServicer +from flwr.server.superlink.fleet.grpc_adapter.grpc_adapter_servicer import ( + GrpcAdapterServicer, +) from flwr.server.superlink.fleet.grpc_bidi.flower_service_servicer import ( FlowerServiceServicer, ) @@ -42,7 +47,7 @@ AddServicerToServerFn = Callable[..., Any] -def valid_certificates(certificates: Tuple[bytes, bytes, bytes]) -> bool: +def valid_certificates(certificates: tuple[bytes, bytes, bytes]) -> bool: """Validate certificates tuple.""" is_valid = ( all(isinstance(certificate, bytes) for certificate in certificates) @@ -55,13 +60,13 @@ def valid_certificates(certificates: Tuple[bytes, bytes, bytes]) -> bool: return is_valid -def start_grpc_server( # pylint: disable=too-many-arguments +def start_grpc_server( # pylint: disable=too-many-arguments,R0917 client_manager: ClientManager, server_address: str, max_concurrent_workers: int = 1000, max_message_length: int = GRPC_MAX_MESSAGE_LENGTH, keepalive_time_ms: int = 210000, - certificates: Optional[Tuple[bytes, bytes, bytes]] = None, + certificates: Optional[tuple[bytes, bytes, bytes]] = None, ) -> grpc.Server: """Create and start a gRPC server running FlowerServiceServicer. @@ -151,24 +156,25 @@ def start_grpc_server( # pylint: disable=too-many-arguments return server -def generic_create_grpc_server( # pylint: disable=too-many-arguments +def generic_create_grpc_server( # pylint: disable=too-many-arguments,R0917 servicer_and_add_fn: Union[ - Tuple[FleetServicer, AddServicerToServerFn], - Tuple[FlowerServiceServicer, AddServicerToServerFn], - Tuple[DriverServicer, AddServicerToServerFn], + tuple[FleetServicer, AddServicerToServerFn], + tuple[GrpcAdapterServicer, AddServicerToServerFn], + tuple[FlowerServiceServicer, AddServicerToServerFn], + tuple[DriverServicer, AddServicerToServerFn], ], server_address: str, max_concurrent_workers: int = 1000, max_message_length: int = GRPC_MAX_MESSAGE_LENGTH, keepalive_time_ms: int = 210000, - certificates: Optional[Tuple[bytes, bytes, bytes]] = None, + certificates: Optional[tuple[bytes, bytes, bytes]] = None, interceptors: Optional[Sequence[grpc.ServerInterceptor]] = None, ) -> grpc.Server: """Create a gRPC server with a single servicer. Parameters ---------- - servicer_and_add_fn : Tuple + servicer_and_add_fn : tuple A tuple holding a servicer implementation and a matching add_Servicer_to_server function. server_address : str @@ -208,12 +214,18 @@ def generic_create_grpc_server( # pylint: disable=too-many-arguments * CA certificate. * server certificate. * server private key. + interceptors : Optional[Sequence[grpc.ServerInterceptor]] (default: None) + A list of gRPC interceptors. Returns ------- server : grpc.Server A non-running instance of a gRPC server. """ + # Check if port is in use + if is_port_in_use(server_address): + sys.exit(f"Port in server address {server_address} is already in use.") + # Deconstruct tuple into servicer and function servicer, add_servicer_to_server_fn = servicer_and_add_fn diff --git a/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_server_test.py b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_server_test.py index 8afa37515950..9635993e0ad5 100644 --- a/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_server_test.py +++ b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_server_test.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -20,7 +20,7 @@ from contextlib import closing from os.path import abspath, dirname, join from pathlib import Path -from typing import Tuple, cast +from typing import cast from flwr.server.client_manager import SimpleClientManager from flwr.server.superlink.fleet.grpc_bidi.grpc_server import ( @@ -31,7 +31,7 @@ root_dir = dirname(abspath(join(__file__, "../../../../../../.."))) -def load_certificates() -> Tuple[str, str, str]: +def load_certificates() -> tuple[str, str, str]: """Generate and load SSL credentials/certificates. Utility function for loading for SSL-enabled gRPC servertests. diff --git a/src/py/flwr/server/superlink/fleet/grpc_rere/__init__.py b/src/py/flwr/server/superlink/fleet/grpc_rere/__init__.py index 61ab71d91400..03c8ded2423a 100644 --- a/src/py/flwr/server/superlink/fleet/grpc_rere/__init__.py +++ b/src/py/flwr/server/superlink/fleet/grpc_rere/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/src/py/flwr/server/superlink/fleet/grpc_rere/fleet_servicer.py b/src/py/flwr/server/superlink/fleet/grpc_rere/fleet_servicer.py index 03a2ec064213..02e34e0bba02 100644 --- a/src/py/flwr/server/superlink/fleet/grpc_rere/fleet_servicer.py +++ b/src/py/flwr/server/superlink/fleet/grpc_rere/fleet_servicer.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -21,13 +21,12 @@ from flwr.common.logger import log from flwr.proto import fleet_pb2_grpc # pylint: disable=E0611 +from flwr.proto.fab_pb2 import GetFabRequest, GetFabResponse # pylint: disable=E0611 from flwr.proto.fleet_pb2 import ( # pylint: disable=E0611 CreateNodeRequest, CreateNodeResponse, DeleteNodeRequest, DeleteNodeResponse, - GetRunRequest, - GetRunResponse, PingRequest, PingResponse, PullTaskInsRequest, @@ -35,6 +34,8 @@ PushTaskResRequest, PushTaskResResponse, ) +from flwr.proto.run_pb2 import GetRunRequest, GetRunResponse # pylint: disable=E0611 +from flwr.server.superlink.ffs.ffs_factory import FfsFactory from flwr.server.superlink.fleet.message_handler import message_handler from flwr.server.superlink.state import StateFactory @@ -42,24 +43,30 @@ class FleetServicer(fleet_pb2_grpc.FleetServicer): """Fleet API servicer.""" - def __init__(self, state_factory: StateFactory) -> None: + def __init__(self, state_factory: StateFactory, ffs_factory: FfsFactory) -> None: self.state_factory = state_factory + self.ffs_factory = ffs_factory def CreateNode( self, request: CreateNodeRequest, context: grpc.ServicerContext ) -> CreateNodeResponse: """.""" - log(INFO, "FleetServicer.CreateNode") - return message_handler.create_node( + log(INFO, "[Fleet.CreateNode] Request ping_interval=%s", request.ping_interval) + log(DEBUG, "[Fleet.CreateNode] Request: %s", request) + response = message_handler.create_node( request=request, state=self.state_factory.state(), ) + log(INFO, "[Fleet.CreateNode] Created node_id=%s", response.node.node_id) + log(DEBUG, "[Fleet.CreateNode] Response: %s", response) + return response def DeleteNode( self, request: DeleteNodeRequest, context: grpc.ServicerContext ) -> DeleteNodeResponse: """.""" - log(INFO, "FleetServicer.DeleteNode") + log(INFO, "[Fleet.DeleteNode] Delete node_id=%s", request.node.node_id) + log(DEBUG, "[Fleet.DeleteNode] Request: %s", request) return message_handler.delete_node( request=request, state=self.state_factory.state(), @@ -67,7 +74,7 @@ def DeleteNode( def Ping(self, request: PingRequest, context: grpc.ServicerContext) -> PingResponse: """.""" - log(DEBUG, "FleetServicer.Ping") + log(DEBUG, "[Fleet.Ping] Request: %s", request) return message_handler.ping( request=request, state=self.state_factory.state(), @@ -77,7 +84,8 @@ def PullTaskIns( self, request: PullTaskInsRequest, context: grpc.ServicerContext ) -> PullTaskInsResponse: """Pull TaskIns.""" - log(INFO, "FleetServicer.PullTaskIns") + log(INFO, "[Fleet.PullTaskIns] node_id=%s", request.node.node_id) + log(DEBUG, "[Fleet.PullTaskIns] Request: %s", request) return message_handler.pull_task_ins( request=request, state=self.state_factory.state(), @@ -87,7 +95,14 @@ def PushTaskRes( self, request: PushTaskResRequest, context: grpc.ServicerContext ) -> PushTaskResResponse: """Push TaskRes.""" - log(INFO, "FleetServicer.PushTaskRes") + if request.task_res_list: + log( + INFO, + "[Fleet.PushTaskRes] Push results from node_id=%s", + request.task_res_list[0].task.producer.node_id, + ) + else: + log(INFO, "[Fleet.PushTaskRes] No task results to push") return message_handler.push_task_res( request=request, state=self.state_factory.state(), @@ -97,8 +112,18 @@ def GetRun( self, request: GetRunRequest, context: grpc.ServicerContext ) -> GetRunResponse: """Get run information.""" - log(INFO, "FleetServicer.GetRun") + log(INFO, "[Fleet.GetRun] Requesting `Run` for run_id=%s", request.run_id) return message_handler.get_run( request=request, state=self.state_factory.state(), ) + + def GetFab( + self, request: GetFabRequest, context: grpc.ServicerContext + ) -> GetFabResponse: + """Get FAB.""" + log(INFO, "[Fleet.GetFab] Requesting FAB for fab_hash=%s", request.hash_str) + return message_handler.get_fab( + request=request, + ffs=self.ffs_factory.ffs(), + ) diff --git a/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor.py b/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor.py index 6a302679a235..855fab353ae6 100644 --- a/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor.py +++ b/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor.py @@ -16,8 +16,9 @@ import base64 -from logging import WARNING -from typing import Any, Callable, Optional, Sequence, Tuple, Union +from collections.abc import Sequence +from logging import INFO, WARNING +from typing import Any, Callable, Optional, Union import grpc from cryptography.hazmat.primitives.asymmetric import ec @@ -29,13 +30,12 @@ generate_shared_key, verify_hmac, ) +from flwr.proto.fab_pb2 import GetFabRequest, GetFabResponse # pylint: disable=E0611 from flwr.proto.fleet_pb2 import ( # pylint: disable=E0611 CreateNodeRequest, CreateNodeResponse, DeleteNodeRequest, DeleteNodeResponse, - GetRunRequest, - GetRunResponse, PingRequest, PingResponse, PullTaskInsRequest, @@ -44,6 +44,7 @@ PushTaskResResponse, ) from flwr.proto.node_pb2 import Node # pylint: disable=E0611 +from flwr.proto.run_pb2 import GetRunRequest, GetRunResponse # pylint: disable=E0611 from flwr.server.superlink.state import State _PUBLIC_KEY_HEADER = "public-key" @@ -56,6 +57,7 @@ PushTaskResRequest, GetRunRequest, PingRequest, + GetFabRequest, ] Response = Union[ @@ -65,11 +67,12 @@ PushTaskResResponse, GetRunResponse, PingResponse, + GetFabResponse, ] def _get_value_from_tuples( - key_string: str, tuples: Sequence[Tuple[str, Union[str, bytes]]] + key_string: str, tuples: Sequence[tuple[str, Union[str, bytes]]] ) -> bytes: value = next((value for key, value in tuples if key == key_string), "") if isinstance(value, str): @@ -79,13 +82,13 @@ def _get_value_from_tuples( class AuthenticateServerInterceptor(grpc.ServerInterceptor): # type: ignore - """Server interceptor for client authentication.""" + """Server interceptor for node authentication.""" def __init__(self, state: State): self.state = state - self.client_public_keys = state.get_client_public_keys() - if len(self.client_public_keys) == 0: + self.node_public_keys = state.get_node_public_keys() + if len(self.node_public_keys) == 0: log(WARNING, "Authentication enabled, but no known public keys configured") private_key = self.state.get_server_private_key() @@ -104,9 +107,9 @@ def intercept_service( ) -> grpc.RpcMethodHandler: """Flower server interceptor authentication logic. - Intercept all unary calls from clients and authenticate clients by validating - auth metadata sent by the client. Continue RPC call if client is authenticated, - else, terminate RPC call by setting context to abort. + Intercept all unary calls from nodes and authenticate nodes by validating auth + metadata sent by the node. Continue RPC call if node is authenticated, else, + terminate RPC call by setting context to abort. """ # One of the method handlers in # `flwr.server.superlink.fleet.grpc_rere.fleet_server.FleetServicer` @@ -120,18 +123,24 @@ def _generic_method_handler( request: Request, context: grpc.ServicerContext, ) -> Response: - client_public_key_bytes = base64.urlsafe_b64decode( + node_public_key_bytes = base64.urlsafe_b64decode( _get_value_from_tuples( _PUBLIC_KEY_HEADER, context.invocation_metadata() ) ) - if client_public_key_bytes not in self.client_public_keys: + if node_public_key_bytes not in self.node_public_keys: context.abort(grpc.StatusCode.UNAUTHENTICATED, "Access denied") if isinstance(request, CreateNodeRequest): - return self._create_authenticated_node( - client_public_key_bytes, request, context + response = self._create_authenticated_node( + node_public_key_bytes, request, context + ) + log( + INFO, + "AuthenticateServerInterceptor: Created node_id=%s", + response.node.node_id, ) + return response # Verify hmac value hmac_value = base64.urlsafe_b64decode( @@ -139,13 +148,13 @@ def _generic_method_handler( _AUTH_TOKEN_HEADER, context.invocation_metadata() ) ) - public_key = bytes_to_public_key(client_public_key_bytes) + public_key = bytes_to_public_key(node_public_key_bytes) if not self._verify_hmac(public_key, request, hmac_value): context.abort(grpc.StatusCode.UNAUTHENTICATED, "Access denied") # Verify node_id - node_id = self.state.get_node_id(client_public_key_bytes) + node_id = self.state.get_node_id(node_public_key_bytes) if not self._verify_node_id(node_id, request): context.abort(grpc.StatusCode.UNAUTHENTICATED, "Access denied") @@ -167,6 +176,7 @@ def _verify_node_id( PushTaskResRequest, GetRunRequest, PingRequest, + GetFabRequest, ], ) -> bool: if node_id is None: @@ -183,7 +193,8 @@ def _verify_hmac( self, public_key: ec.EllipticCurvePublicKey, request: Request, hmac_value: bytes ) -> bool: shared_secret = generate_shared_key(self.server_private_key, public_key) - return verify_hmac(shared_secret, request.SerializeToString(True), hmac_value) + message_bytes = request.SerializeToString(deterministic=True) + return verify_hmac(shared_secret, message_bytes, hmac_value) def _create_authenticated_node( self, diff --git a/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor_test.py b/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor_test.py index c4c71e5a8188..cf7e05f0fb00 100644 --- a/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor_test.py +++ b/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor_test.py @@ -20,6 +20,7 @@ import grpc +from flwr.common.constant import FLEET_API_GRPC_RERE_DEFAULT_ADDRESS from flwr.common.secure_aggregation.crypto.symmetric_encryption import ( compute_hmac, generate_key_pairs, @@ -32,8 +33,6 @@ CreateNodeResponse, DeleteNodeRequest, DeleteNodeResponse, - GetRunRequest, - GetRunResponse, PingRequest, PingResponse, PullTaskInsRequest, @@ -42,8 +41,10 @@ PushTaskResResponse, ) from flwr.proto.node_pb2 import Node # pylint: disable=E0611 +from flwr.proto.run_pb2 import GetRunRequest, GetRunResponse # pylint: disable=E0611 from flwr.proto.task_pb2 import Task, TaskRes # pylint: disable=E0611 -from flwr.server.app import ADDRESS_FLEET_API_GRPC_RERE, _run_fleet_api_grpc_rere +from flwr.server.app import _run_fleet_api_grpc_rere +from flwr.server.superlink.ffs.ffs_factory import FfsFactory from flwr.server.superlink.state.state_factory import StateFactory from .server_interceptor import ( @@ -58,22 +59,26 @@ class TestServerInterceptor(unittest.TestCase): # pylint: disable=R0902 def setUp(self) -> None: """Initialize mock stub and server interceptor.""" - self._client_private_key, self._client_public_key = generate_key_pairs() + self._node_private_key, self._node_public_key = generate_key_pairs() self._server_private_key, self._server_public_key = generate_key_pairs() state_factory = StateFactory(":flwr-in-memory-state:") self.state = state_factory.state() + ffs_factory = FfsFactory(".") + self.ffs = ffs_factory.ffs() self.state.store_server_private_public_key( private_key_to_bytes(self._server_private_key), public_key_to_bytes(self._server_public_key), ) - self.state.store_client_public_keys( - {public_key_to_bytes(self._client_public_key)} - ) + self.state.store_node_public_keys({public_key_to_bytes(self._node_public_key)}) self._server_interceptor = AuthenticateServerInterceptor(self.state) self._server: grpc.Server = _run_fleet_api_grpc_rere( - ADDRESS_FLEET_API_GRPC_RERE, state_factory, None, [self._server_interceptor] + FLEET_API_GRPC_RERE_DEFAULT_ADDRESS, + state_factory, + ffs_factory, + None, + [self._server_interceptor], ) self._channel = grpc.insecure_channel("localhost:9092") @@ -116,7 +121,7 @@ def test_successful_create_node_with_metadata(self) -> None: """Test server interceptor for creating node.""" # Prepare public_key_bytes = base64.urlsafe_b64encode( - public_key_to_bytes(self._client_public_key) + public_key_to_bytes(self._node_public_key) ) # Execute @@ -139,9 +144,9 @@ def test_successful_create_node_with_metadata(self) -> None: def test_unsuccessful_create_node_with_metadata(self) -> None: """Test server interceptor for creating node unsuccessfully.""" # Prepare - _, client_public_key = generate_key_pairs() + _, node_public_key = generate_key_pairs() public_key_bytes = base64.urlsafe_b64encode( - public_key_to_bytes(client_public_key) + public_key_to_bytes(node_public_key) ) # Execute & Assert @@ -155,17 +160,17 @@ def test_successful_delete_node_with_metadata(self) -> None: """Test server interceptor for deleting node.""" # Prepare node_id = self.state.create_node( - ping_interval=30, public_key=public_key_to_bytes(self._client_public_key) + ping_interval=30, public_key=public_key_to_bytes(self._node_public_key) ) request = DeleteNodeRequest(node=Node(node_id=node_id)) shared_secret = generate_shared_key( - self._client_private_key, self._server_public_key + self._node_private_key, self._server_public_key ) hmac_value = base64.urlsafe_b64encode( - compute_hmac(shared_secret, request.SerializeToString(True)) + compute_hmac(shared_secret, request.SerializeToString(deterministic=True)) ) public_key_bytes = base64.urlsafe_b64encode( - public_key_to_bytes(self._client_public_key) + public_key_to_bytes(self._node_public_key) ) # Execute @@ -185,16 +190,16 @@ def test_unsuccessful_delete_node_with_metadata(self) -> None: """Test server interceptor for deleting node unsuccessfully.""" # Prepare node_id = self.state.create_node( - ping_interval=30, public_key=public_key_to_bytes(self._client_public_key) + ping_interval=30, public_key=public_key_to_bytes(self._node_public_key) ) request = DeleteNodeRequest(node=Node(node_id=node_id)) - client_private_key, _ = generate_key_pairs() - shared_secret = generate_shared_key(client_private_key, self._server_public_key) + node_private_key, _ = generate_key_pairs() + shared_secret = generate_shared_key(node_private_key, self._server_public_key) hmac_value = base64.urlsafe_b64encode( - compute_hmac(shared_secret, request.SerializeToString(True)) + compute_hmac(shared_secret, request.SerializeToString(deterministic=True)) ) public_key_bytes = base64.urlsafe_b64encode( - public_key_to_bytes(self._client_public_key) + public_key_to_bytes(self._node_public_key) ) # Execute & Assert @@ -211,17 +216,17 @@ def test_successful_pull_task_ins_with_metadata(self) -> None: """Test server interceptor for pull task ins.""" # Prepare node_id = self.state.create_node( - ping_interval=30, public_key=public_key_to_bytes(self._client_public_key) + ping_interval=30, public_key=public_key_to_bytes(self._node_public_key) ) request = PullTaskInsRequest(node=Node(node_id=node_id)) shared_secret = generate_shared_key( - self._client_private_key, self._server_public_key + self._node_private_key, self._server_public_key ) hmac_value = base64.urlsafe_b64encode( - compute_hmac(shared_secret, request.SerializeToString(True)) + compute_hmac(shared_secret, request.SerializeToString(deterministic=True)) ) public_key_bytes = base64.urlsafe_b64encode( - public_key_to_bytes(self._client_public_key) + public_key_to_bytes(self._node_public_key) ) # Execute @@ -241,16 +246,16 @@ def test_unsuccessful_pull_task_ins_with_metadata(self) -> None: """Test server interceptor for pull task ins unsuccessfully.""" # Prepare node_id = self.state.create_node( - ping_interval=30, public_key=public_key_to_bytes(self._client_public_key) + ping_interval=30, public_key=public_key_to_bytes(self._node_public_key) ) request = PullTaskInsRequest(node=Node(node_id=node_id)) - client_private_key, _ = generate_key_pairs() - shared_secret = generate_shared_key(client_private_key, self._server_public_key) + node_private_key, _ = generate_key_pairs() + shared_secret = generate_shared_key(node_private_key, self._server_public_key) hmac_value = base64.urlsafe_b64encode( - compute_hmac(shared_secret, request.SerializeToString(True)) + compute_hmac(shared_secret, request.SerializeToString(deterministic=True)) ) public_key_bytes = base64.urlsafe_b64encode( - public_key_to_bytes(self._client_public_key) + public_key_to_bytes(self._node_public_key) ) # Execute & Assert @@ -267,19 +272,19 @@ def test_successful_push_task_res_with_metadata(self) -> None: """Test server interceptor for push task res.""" # Prepare node_id = self.state.create_node( - ping_interval=30, public_key=public_key_to_bytes(self._client_public_key) + ping_interval=30, public_key=public_key_to_bytes(self._node_public_key) ) request = PushTaskResRequest( task_res_list=[TaskRes(task=Task(producer=Node(node_id=node_id)))] ) shared_secret = generate_shared_key( - self._client_private_key, self._server_public_key + self._node_private_key, self._server_public_key ) hmac_value = base64.urlsafe_b64encode( - compute_hmac(shared_secret, request.SerializeToString(True)) + compute_hmac(shared_secret, request.SerializeToString(deterministic=True)) ) public_key_bytes = base64.urlsafe_b64encode( - public_key_to_bytes(self._client_public_key) + public_key_to_bytes(self._node_public_key) ) # Execute @@ -299,18 +304,18 @@ def test_unsuccessful_push_task_res_with_metadata(self) -> None: """Test server interceptor for push task res unsuccessfully.""" # Prepare node_id = self.state.create_node( - ping_interval=30, public_key=public_key_to_bytes(self._client_public_key) + ping_interval=30, public_key=public_key_to_bytes(self._node_public_key) ) request = PushTaskResRequest( task_res_list=[TaskRes(task=Task(producer=Node(node_id=node_id)))] ) - client_private_key, _ = generate_key_pairs() - shared_secret = generate_shared_key(client_private_key, self._server_public_key) + node_private_key, _ = generate_key_pairs() + shared_secret = generate_shared_key(node_private_key, self._server_public_key) hmac_value = base64.urlsafe_b64encode( - compute_hmac(shared_secret, request.SerializeToString(True)) + compute_hmac(shared_secret, request.SerializeToString(deterministic=True)) ) public_key_bytes = base64.urlsafe_b64encode( - public_key_to_bytes(self._client_public_key) + public_key_to_bytes(self._node_public_key) ) # Execute & Assert @@ -327,18 +332,18 @@ def test_successful_get_run_with_metadata(self) -> None: """Test server interceptor for pull task ins.""" # Prepare self.state.create_node( - ping_interval=30, public_key=public_key_to_bytes(self._client_public_key) + ping_interval=30, public_key=public_key_to_bytes(self._node_public_key) ) - run_id = self.state.create_run("", "") + run_id = self.state.create_run("", "", "", {}) request = GetRunRequest(run_id=run_id) shared_secret = generate_shared_key( - self._client_private_key, self._server_public_key + self._node_private_key, self._server_public_key ) hmac_value = base64.urlsafe_b64encode( - compute_hmac(shared_secret, request.SerializeToString(True)) + compute_hmac(shared_secret, request.SerializeToString(deterministic=True)) ) public_key_bytes = base64.urlsafe_b64encode( - public_key_to_bytes(self._client_public_key) + public_key_to_bytes(self._node_public_key) ) # Execute @@ -358,17 +363,17 @@ def test_unsuccessful_get_run_with_metadata(self) -> None: """Test server interceptor for pull task ins unsuccessfully.""" # Prepare self.state.create_node( - ping_interval=30, public_key=public_key_to_bytes(self._client_public_key) + ping_interval=30, public_key=public_key_to_bytes(self._node_public_key) ) - run_id = self.state.create_run("", "") + run_id = self.state.create_run("", "", "", {}) request = GetRunRequest(run_id=run_id) - client_private_key, _ = generate_key_pairs() - shared_secret = generate_shared_key(client_private_key, self._server_public_key) + node_private_key, _ = generate_key_pairs() + shared_secret = generate_shared_key(node_private_key, self._server_public_key) hmac_value = base64.urlsafe_b64encode( - compute_hmac(shared_secret, request.SerializeToString(True)) + compute_hmac(shared_secret, request.SerializeToString(deterministic=True)) ) public_key_bytes = base64.urlsafe_b64encode( - public_key_to_bytes(self._client_public_key) + public_key_to_bytes(self._node_public_key) ) # Execute & Assert @@ -385,17 +390,17 @@ def test_successful_ping_with_metadata(self) -> None: """Test server interceptor for pull task ins.""" # Prepare node_id = self.state.create_node( - ping_interval=30, public_key=public_key_to_bytes(self._client_public_key) + ping_interval=30, public_key=public_key_to_bytes(self._node_public_key) ) request = PingRequest(node=Node(node_id=node_id)) shared_secret = generate_shared_key( - self._client_private_key, self._server_public_key + self._node_private_key, self._server_public_key ) hmac_value = base64.urlsafe_b64encode( - compute_hmac(shared_secret, request.SerializeToString(True)) + compute_hmac(shared_secret, request.SerializeToString(deterministic=True)) ) public_key_bytes = base64.urlsafe_b64encode( - public_key_to_bytes(self._client_public_key) + public_key_to_bytes(self._node_public_key) ) # Execute @@ -415,16 +420,16 @@ def test_unsuccessful_ping_with_metadata(self) -> None: """Test server interceptor for pull task ins unsuccessfully.""" # Prepare node_id = self.state.create_node( - ping_interval=30, public_key=public_key_to_bytes(self._client_public_key) + ping_interval=30, public_key=public_key_to_bytes(self._node_public_key) ) request = PingRequest(node=Node(node_id=node_id)) - client_private_key, _ = generate_key_pairs() - shared_secret = generate_shared_key(client_private_key, self._server_public_key) + node_private_key, _ = generate_key_pairs() + shared_secret = generate_shared_key(node_private_key, self._server_public_key) hmac_value = base64.urlsafe_b64encode( - compute_hmac(shared_secret, request.SerializeToString(True)) + compute_hmac(shared_secret, request.SerializeToString(deterministic=True)) ) public_key_bytes = base64.urlsafe_b64encode( - public_key_to_bytes(self._client_public_key) + public_key_to_bytes(self._node_public_key) ) # Execute & Assert @@ -440,7 +445,7 @@ def test_unsuccessful_ping_with_metadata(self) -> None: def test_successful_restore_node(self) -> None: """Test server interceptor for restoring node.""" public_key_bytes = base64.urlsafe_b64encode( - public_key_to_bytes(self._client_public_key) + public_key_to_bytes(self._node_public_key) ) response, call = self._create_node.with_call( request=CreateNodeRequest(), @@ -455,20 +460,20 @@ def test_successful_restore_node(self) -> None: ) node = response.node - client_node_id = node.node_id + node_node_id = node.node_id assert call.initial_metadata()[0] == expected_metadata assert isinstance(response, CreateNodeResponse) request = DeleteNodeRequest(node=node) shared_secret = generate_shared_key( - self._client_private_key, self._server_public_key + self._node_private_key, self._server_public_key ) hmac_value = base64.urlsafe_b64encode( - compute_hmac(shared_secret, request.SerializeToString(True)) + compute_hmac(shared_secret, request.SerializeToString(deterministic=True)) ) public_key_bytes = base64.urlsafe_b64encode( - public_key_to_bytes(self._client_public_key) + public_key_to_bytes(self._node_public_key) ) response, call = self._delete_node.with_call( request=request, @@ -482,7 +487,7 @@ def test_successful_restore_node(self) -> None: assert grpc.StatusCode.OK == call.code() public_key_bytes = base64.urlsafe_b64encode( - public_key_to_bytes(self._client_public_key) + public_key_to_bytes(self._node_public_key) ) response, call = self._create_node.with_call( request=CreateNodeRequest(), @@ -498,4 +503,4 @@ def test_successful_restore_node(self) -> None: assert call.initial_metadata()[0] == expected_metadata assert isinstance(response, CreateNodeResponse) - assert response.node.node_id == client_node_id + assert response.node.node_id == node_node_id diff --git a/src/py/flwr/server/superlink/fleet/message_handler/__init__.py b/src/py/flwr/server/superlink/fleet/message_handler/__init__.py index 18b0f11fa6c5..3db0ef5d1611 100644 --- a/src/py/flwr/server/superlink/fleet/message_handler/__init__.py +++ b/src/py/flwr/server/superlink/fleet/message_handler/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/src/py/flwr/server/superlink/fleet/message_handler/message_handler.py b/src/py/flwr/server/superlink/fleet/message_handler/message_handler.py index 83b005a4cb8e..85f3fa34e0ac 100644 --- a/src/py/flwr/server/superlink/fleet/message_handler/message_handler.py +++ b/src/py/flwr/server/superlink/fleet/message_handler/message_handler.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,16 +16,17 @@ import time -from typing import List, Optional +from typing import Optional from uuid import UUID +from flwr.common.serde import fab_to_proto, user_config_to_proto +from flwr.common.typing import Fab +from flwr.proto.fab_pb2 import GetFabRequest, GetFabResponse # pylint: disable=E0611 from flwr.proto.fleet_pb2 import ( # pylint: disable=E0611 CreateNodeRequest, CreateNodeResponse, DeleteNodeRequest, DeleteNodeResponse, - GetRunRequest, - GetRunResponse, PingRequest, PingResponse, PullTaskInsRequest, @@ -33,10 +34,15 @@ PushTaskResRequest, PushTaskResResponse, Reconnect, - Run, ) from flwr.proto.node_pb2 import Node # pylint: disable=E0611 +from flwr.proto.run_pb2 import ( # pylint: disable=E0611 + GetRunRequest, + GetRunResponse, + Run, +) from flwr.proto.task_pb2 import TaskIns, TaskRes # pylint: disable=E0611 +from flwr.server.superlink.ffs.ffs import Ffs from flwr.server.superlink.state import State @@ -77,7 +83,7 @@ def pull_task_ins(request: PullTaskInsRequest, state: State) -> PullTaskInsRespo node_id: Optional[int] = None if node.anonymous else node.node_id # Retrieve TaskIns from State - task_ins_list: List[TaskIns] = state.get_task_ins(node_id=node_id, limit=1) + task_ins_list: list[TaskIns] = state.get_task_ins(node_id=node_id, limit=1) # Build response response = PullTaskInsResponse( @@ -110,6 +116,28 @@ def get_run( request: GetRunRequest, state: State # pylint: disable=W0613 ) -> GetRunResponse: """Get run information.""" - run_id, fab_id, fab_version = state.get_run(request.run_id) - run = Run(run_id=run_id, fab_id=fab_id, fab_version=fab_version) - return GetRunResponse(run=run) + run = state.get_run(request.run_id) + + if run is None: + return GetRunResponse() + + return GetRunResponse( + run=Run( + run_id=run.run_id, + fab_id=run.fab_id, + fab_version=run.fab_version, + override_config=user_config_to_proto(run.override_config), + fab_hash=run.fab_hash, + ) + ) + + +def get_fab( + request: GetFabRequest, ffs: Ffs # pylint: disable=W0613 +) -> GetFabResponse: + """Get FAB.""" + if result := ffs.get(request.hash_str): + fab = Fab(request.hash_str, result[0]) + return GetFabResponse(fab=fab_to_proto(fab)) + + raise ValueError(f"Found no FAB with hash: {request.hash_str}") diff --git a/src/py/flwr/server/superlink/fleet/message_handler/message_handler_test.py b/src/py/flwr/server/superlink/fleet/message_handler/message_handler_test.py index c135f6fb7b61..ec521b328eb8 100644 --- a/src/py/flwr/server/superlink/fleet/message_handler/message_handler_test.py +++ b/src/py/flwr/server/superlink/fleet/message_handler/message_handler_test.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/src/py/flwr/server/superlink/fleet/rest_rere/__init__.py b/src/py/flwr/server/superlink/fleet/rest_rere/__init__.py index a926f9ca0bfc..f24db2a2e12f 100644 --- a/src/py/flwr/server/superlink/fleet/rest_rere/__init__.py +++ b/src/py/flwr/server/superlink/fleet/rest_rere/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/src/py/flwr/server/superlink/fleet/rest_rere/rest_api.py b/src/py/flwr/server/superlink/fleet/rest_rere/rest_api.py index 8ac7c6cfc613..a988252b3ea2 100644 --- a/src/py/flwr/server/superlink/fleet/rest_rere/rest_api.py +++ b/src/py/flwr/server/superlink/fleet/rest_rere/rest_api.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,17 +15,30 @@ """Experimental REST API server.""" +from __future__ import annotations + import sys +from collections.abc import Awaitable +from typing import Callable, TypeVar + +from google.protobuf.message import Message as GrpcMessage from flwr.common.constant import MISSING_EXTRA_REST +from flwr.proto.fab_pb2 import GetFabRequest, GetFabResponse # pylint: disable=E0611 from flwr.proto.fleet_pb2 import ( # pylint: disable=E0611 CreateNodeRequest, + CreateNodeResponse, DeleteNodeRequest, - GetRunRequest, + DeleteNodeResponse, PingRequest, + PingResponse, PullTaskInsRequest, + PullTaskInsResponse, PushTaskResRequest, + PushTaskResResponse, ) +from flwr.proto.run_pb2 import GetRunRequest, GetRunResponse # pylint: disable=E0611 +from flwr.server.superlink.ffs.ffs import Ffs from flwr.server.superlink.fleet.message_handler import message_handler from flwr.server.superlink.state import State @@ -40,172 +53,108 @@ sys.exit(MISSING_EXTRA_REST) -async def create_node(request: Request) -> Response: - """Create Node.""" - _check_headers(request.headers) +GrpcRequest = TypeVar("GrpcRequest", bound=GrpcMessage) +GrpcResponse = TypeVar("GrpcResponse", bound=GrpcMessage) - # Get the request body as raw bytes - create_node_request_bytes: bytes = await request.body() +GrpcAsyncFunction = Callable[[GrpcRequest], Awaitable[GrpcResponse]] +RestEndPoint = Callable[[Request], Awaitable[Response]] - # Deserialize ProtoBuf - create_node_request_proto = CreateNodeRequest() - create_node_request_proto.ParseFromString(create_node_request_bytes) - # Get state from app - state: State = app.state.STATE_FACTORY.state() +def rest_request_response( + grpc_request_type: type[GrpcRequest], +) -> Callable[[GrpcAsyncFunction[GrpcRequest, GrpcResponse]], RestEndPoint]: + """Convert an async gRPC-based function into a RESTful HTTP endpoint.""" - # Handle message - create_node_response_proto = message_handler.create_node( - request=create_node_request_proto, state=state - ) + def decorator(func: GrpcAsyncFunction[GrpcRequest, GrpcResponse]) -> RestEndPoint: + async def wrapper(request: Request) -> Response: + _check_headers(request.headers) - # Return serialized ProtoBuf - create_node_response_bytes = create_node_response_proto.SerializeToString() - return Response( - status_code=200, - content=create_node_response_bytes, - headers={"Content-Type": "application/protobuf"}, - ) + # Get the request body as raw bytes + grpc_req_bytes: bytes = await request.body() + # Deserialize ProtoBuf + grpc_req = grpc_request_type.FromString(grpc_req_bytes) + grpc_res = await func(grpc_req) + return Response( + status_code=200, + content=grpc_res.SerializeToString(), + headers={"Content-Type": "application/protobuf"}, + ) -async def delete_node(request: Request) -> Response: - """Delete Node Id.""" - _check_headers(request.headers) + return wrapper - # Get the request body as raw bytes - delete_node_request_bytes: bytes = await request.body() + return decorator - # Deserialize ProtoBuf - delete_node_request_proto = DeleteNodeRequest() - delete_node_request_proto.ParseFromString(delete_node_request_bytes) +@rest_request_response(CreateNodeRequest) +async def create_node(request: CreateNodeRequest) -> CreateNodeResponse: + """Create Node.""" # Get state from app state: State = app.state.STATE_FACTORY.state() # Handle message - delete_node_response_proto = message_handler.delete_node( - request=delete_node_request_proto, state=state - ) + return message_handler.create_node(request=request, state=state) - # Return serialized ProtoBuf - delete_node_response_bytes = delete_node_response_proto.SerializeToString() - return Response( - status_code=200, - content=delete_node_response_bytes, - headers={"Content-Type": "application/protobuf"}, - ) +@rest_request_response(DeleteNodeRequest) +async def delete_node(request: DeleteNodeRequest) -> DeleteNodeResponse: + """Delete Node Id.""" + # Get state from app + state: State = app.state.STATE_FACTORY.state() -async def pull_task_ins(request: Request) -> Response: - """Pull TaskIns.""" - _check_headers(request.headers) - - # Get the request body as raw bytes - pull_task_ins_request_bytes: bytes = await request.body() + # Handle message + return message_handler.delete_node(request=request, state=state) - # Deserialize ProtoBuf - pull_task_ins_request_proto = PullTaskInsRequest() - pull_task_ins_request_proto.ParseFromString(pull_task_ins_request_bytes) +@rest_request_response(PullTaskInsRequest) +async def pull_task_ins(request: PullTaskInsRequest) -> PullTaskInsResponse: + """Pull TaskIns.""" # Get state from app state: State = app.state.STATE_FACTORY.state() # Handle message - pull_task_ins_response_proto = message_handler.pull_task_ins( - request=pull_task_ins_request_proto, - state=state, - ) - - # Return serialized ProtoBuf - pull_task_ins_response_bytes = pull_task_ins_response_proto.SerializeToString() - return Response( - status_code=200, - content=pull_task_ins_response_bytes, - headers={"Content-Type": "application/protobuf"}, - ) + return message_handler.pull_task_ins(request=request, state=state) -async def push_task_res(request: Request) -> Response: # Check if token is needed here +# Check if token is needed here +@rest_request_response(PushTaskResRequest) +async def push_task_res(request: PushTaskResRequest) -> PushTaskResResponse: """Push TaskRes.""" - _check_headers(request.headers) - - # Get the request body as raw bytes - push_task_res_request_bytes: bytes = await request.body() - - # Deserialize ProtoBuf - push_task_res_request_proto = PushTaskResRequest() - push_task_res_request_proto.ParseFromString(push_task_res_request_bytes) - # Get state from app state: State = app.state.STATE_FACTORY.state() # Handle message - push_task_res_response_proto = message_handler.push_task_res( - request=push_task_res_request_proto, - state=state, - ) - - # Return serialized ProtoBuf - push_task_res_response_bytes = push_task_res_response_proto.SerializeToString() - return Response( - status_code=200, - content=push_task_res_response_bytes, - headers={"Content-Type": "application/protobuf"}, - ) + return message_handler.push_task_res(request=request, state=state) -async def ping(request: Request) -> Response: +@rest_request_response(PingRequest) +async def ping(request: PingRequest) -> PingResponse: """Ping.""" - _check_headers(request.headers) - - # Get the request body as raw bytes - ping_request_bytes: bytes = await request.body() - - # Deserialize ProtoBuf - ping_request_proto = PingRequest() - ping_request_proto.ParseFromString(ping_request_bytes) - # Get state from app state: State = app.state.STATE_FACTORY.state() # Handle message - ping_response_proto = message_handler.ping(request=ping_request_proto, state=state) - - # Return serialized ProtoBuf - ping_response_bytes = ping_response_proto.SerializeToString() - return Response( - status_code=200, - content=ping_response_bytes, - headers={"Content-Type": "application/protobuf"}, - ) + return message_handler.ping(request=request, state=state) -async def get_run(request: Request) -> Response: +@rest_request_response(GetRunRequest) +async def get_run(request: GetRunRequest) -> GetRunResponse: """GetRun.""" - _check_headers(request.headers) - - # Get the request body as raw bytes - get_run_request_bytes: bytes = await request.body() - - # Deserialize ProtoBuf - get_run_request_proto = GetRunRequest() - get_run_request_proto.ParseFromString(get_run_request_bytes) - # Get state from app state: State = app.state.STATE_FACTORY.state() # Handle message - get_run_response_proto = message_handler.get_run( - request=get_run_request_proto, state=state - ) + return message_handler.get_run(request=request, state=state) + - # Return serialized ProtoBuf - get_run_response_bytes = get_run_response_proto.SerializeToString() - return Response( - status_code=200, - content=get_run_response_bytes, - headers={"Content-Type": "application/protobuf"}, - ) +@rest_request_response(GetFabRequest) +async def get_fab(request: GetFabRequest) -> GetFabResponse: + """GetRun.""" + # Get ffs from app + ffs: Ffs = app.state.FFS_FACTORY.state() + + # Handle message + return message_handler.get_fab(request=request, ffs=ffs) routes = [ @@ -215,6 +164,7 @@ async def get_run(request: Request) -> Response: Route("/api/v0/fleet/push-task-res", push_task_res, methods=["POST"]), Route("/api/v0/fleet/ping", ping, methods=["POST"]), Route("/api/v0/fleet/get-run", get_run, methods=["POST"]), + Route("/api/v0/fleet/get-fab", get_fab, methods=["POST"]), ] app: Starlette = Starlette( diff --git a/src/py/flwr/server/superlink/fleet/vce/backend/__init__.py b/src/py/flwr/server/superlink/fleet/vce/backend/__init__.py index d751cf4bcae1..31129fce1b1b 100644 --- a/src/py/flwr/server/superlink/fleet/vce/backend/__init__.py +++ b/src/py/flwr/server/superlink/fleet/vce/backend/__init__.py @@ -15,17 +15,16 @@ """Simulation Engine Backends.""" import importlib -from typing import Dict, Type from .backend import Backend, BackendConfig is_ray_installed = importlib.util.find_spec("ray") is not None # Mapping of supported backends -supported_backends: Dict[str, Type[Backend]] = {} +supported_backends: dict[str, type[Backend]] = {} # To log backend-specific error message when chosen backend isn't available -error_messages_backends: Dict[str, str] = {} +error_messages_backends: dict[str, str] = {} if is_ray_installed: from .raybackend import RayBackend @@ -38,7 +37,7 @@ To install the necessary dependencies, install `flwr` with the `simulation` extra: - pip install -U flwr["simulation"] + pip install -U "flwr[simulation]" """ diff --git a/src/py/flwr/server/superlink/fleet/vce/backend/backend.py b/src/py/flwr/server/superlink/fleet/vce/backend/backend.py index 1d5e3a6a51ad..38be6032e3a5 100644 --- a/src/py/flwr/server/superlink/fleet/vce/backend/backend.py +++ b/src/py/flwr/server/superlink/fleet/vce/backend/backend.py @@ -16,25 +16,25 @@ from abc import ABC, abstractmethod -from typing import Callable, Dict, Tuple +from typing import Callable from flwr.client.client_app import ClientApp from flwr.common.context import Context from flwr.common.message import Message from flwr.common.typing import ConfigsRecordValues -BackendConfig = Dict[str, Dict[str, ConfigsRecordValues]] +BackendConfig = dict[str, dict[str, ConfigsRecordValues]] class Backend(ABC): """Abstract base class for a Simulation Engine Backend.""" - def __init__(self, backend_config: BackendConfig, work_dir: str) -> None: + def __init__(self, backend_config: BackendConfig) -> None: """Construct a backend.""" @abstractmethod - async def build(self) -> None: - """Build backend asynchronously. + def build(self, app_fn: Callable[[], ClientApp]) -> None: + """Build backend. Different components need to be in place before workers in a backend are ready to accept jobs. When this method finishes executing, the backend should be fully @@ -54,14 +54,13 @@ def is_worker_idle(self) -> bool: """Report whether a backend worker is idle and can therefore run a ClientApp.""" @abstractmethod - async def terminate(self) -> None: + def terminate(self) -> None: """Terminate backend.""" @abstractmethod - async def process_message( + def process_message( self, - app: Callable[[], ClientApp], message: Message, context: Context, - ) -> Tuple[Message, Context]: + ) -> tuple[Message, Context]: """Submit a job to the backend.""" diff --git a/src/py/flwr/server/superlink/fleet/vce/backend/raybackend.py b/src/py/flwr/server/superlink/fleet/vce/backend/raybackend.py index 93aca583af9c..dd79d2ef7f62 100644 --- a/src/py/flwr/server/superlink/fleet/vce/backend/raybackend.py +++ b/src/py/flwr/server/superlink/fleet/vce/backend/raybackend.py @@ -14,26 +14,25 @@ # ============================================================================== """Ray backend for the Fleet API using the Simulation Engine.""" -import pathlib -from logging import DEBUG, ERROR, WARNING -from typing import Callable, Dict, List, Tuple, Union +import sys +from logging import DEBUG, ERROR +from typing import Callable, Optional, Union import ray from flwr.client.client_app import ClientApp +from flwr.common.constant import PARTITION_ID_KEY from flwr.common.context import Context from flwr.common.logger import log from flwr.common.message import Message -from flwr.simulation.ray_transport.ray_actor import ( - BasicActorPool, - ClientAppActor, - init_ray, -) +from flwr.common.typing import ConfigsRecordValues +from flwr.simulation.ray_transport.ray_actor import BasicActorPool, ClientAppActor from flwr.simulation.ray_transport.utils import enable_tf_gpu_growth from .backend import Backend, BackendConfig -ClientResourcesDict = Dict[str, Union[int, float]] +ClientResourcesDict = dict[str, Union[int, float]] +ActorArgsDict = dict[str, Union[int, float, Callable[[], None]]] class RayBackend(Backend): @@ -42,59 +41,24 @@ class RayBackend(Backend): def __init__( self, backend_config: BackendConfig, - work_dir: str, ) -> None: """Prepare RayBackend by initialising Ray and creating the ActorPool.""" log(DEBUG, "Initialising: %s", self.__class__.__name__) log(DEBUG, "Backend config: %s", backend_config) - if not pathlib.Path(work_dir).exists(): - raise ValueError(f"Specified work_dir {work_dir} does not exist.") - - # Init ray and append working dir if needed - runtime_env = ( - self._configure_runtime_env(work_dir=work_dir) if work_dir else None - ) - - if backend_config.get("mute_logging", False): - init_ray( - logging_level=WARNING, log_to_driver=False, runtime_env=runtime_env - ) - elif backend_config.get("silent", False): - init_ray(logging_level=WARNING, log_to_driver=True, runtime_env=runtime_env) - else: - init_ray(runtime_env=runtime_env) + # Initialise ray + self.init_args_key = "init_args" + self.init_ray(backend_config) # Validate client resources self.client_resources_key = "client_resources" + self.client_resources = self._validate_client_resources(config=backend_config) - # Create actor pool - use_tf = backend_config.get("tensorflow", False) - actor_kwargs = {"on_actor_init_fn": enable_tf_gpu_growth} if use_tf else {} - - client_resources = self._validate_client_resources(config=backend_config) - self.pool = BasicActorPool( - actor_type=ClientAppActor, - client_resources=client_resources, - actor_kwargs=actor_kwargs, - ) + # Valide actor resources + self.actor_kwargs = self._validate_actor_arguments(config=backend_config) + self.pool: Optional[BasicActorPool] = None - def _configure_runtime_env(self, work_dir: str) -> Dict[str, Union[str, List[str]]]: - """Return list of files/subdirectories to exclude relative to work_dir. - - Without this, Ray will push everything to the Ray Cluster. - """ - runtime_env: Dict[str, Union[str, List[str]]] = {"working_dir": work_dir} - - excludes = [] - path = pathlib.Path(work_dir) - for p in path.rglob("*"): - # Exclude files need to be relative to the working_dir - if p.is_file() and not str(p).endswith(".py"): - excludes.append(str(p.relative_to(path))) - runtime_env["excludes"] = excludes - - return runtime_env + self.app_fn: Optional[Callable[[], ClientApp]] = None def _validate_client_resources(self, config: BackendConfig) -> ClientResourcesDict: client_resources_config = config.get(self.client_resources_key) @@ -125,45 +89,89 @@ def _validate_client_resources(self, config: BackendConfig) -> ClientResourcesDi return client_resources + def _validate_actor_arguments(self, config: BackendConfig) -> ActorArgsDict: + actor_args_config = config.get("actor", False) + actor_args: ActorArgsDict = {} + if actor_args_config: + use_tf = actor_args.get("tensorflow", False) + if use_tf: + actor_args["on_actor_init_fn"] = enable_tf_gpu_growth + return actor_args + + def init_ray(self, backend_config: BackendConfig) -> None: + """Intialises Ray if not already initialised.""" + if not ray.is_initialized(): + ray_init_args: dict[ + str, + ConfigsRecordValues, + ] = {} + + if backend_config.get(self.init_args_key): + for k, v in backend_config[self.init_args_key].items(): + ray_init_args[k] = v + ray.init( + runtime_env={"env_vars": {"PYTHONPATH": ":".join(sys.path)}}, + **ray_init_args, + ) + @property def num_workers(self) -> int: """Return number of actors in pool.""" - return self.pool.num_actors + return self.pool.num_actors if self.pool else 0 def is_worker_idle(self) -> bool: """Report whether the pool has idle actors.""" - return self.pool.is_actor_available() + return self.pool.is_actor_available() if self.pool else False - async def build(self) -> None: + def build(self, app_fn: Callable[[], ClientApp]) -> None: """Build pool of Ray actors that this backend will submit jobs to.""" - await self.pool.add_actors_to_pool(self.pool.actors_capacity) + # Create Actor Pool + try: + self.pool = BasicActorPool( + actor_type=ClientAppActor, + client_resources=self.client_resources, + actor_kwargs=self.actor_kwargs, + ) + except Exception as ex: + raise ex + + self.pool.add_actors_to_pool(self.pool.actors_capacity) + # Set ClientApp callable that ray actors will use + self.app_fn = app_fn log(DEBUG, "Constructed ActorPool with: %i actors", self.pool.num_actors) - async def process_message( + def process_message( self, - app: Callable[[], ClientApp], message: Message, context: Context, - ) -> Tuple[Message, Context]: + ) -> tuple[Message, Context]: """Run ClientApp that process a given message. Return output message and updated context. """ - partition_id = message.metadata.partition_id + partition_id = context.node_config[PARTITION_ID_KEY] + + if self.pool is None: + raise ValueError("The actor pool is empty, unfit to process messages.") + + if self.app_fn is None: + raise ValueError( + "Unspecified function to load a `ClientApp`. " + "Call the backend's `build()` method before processing messages." + ) try: - # Submite a task to the pool - future = await self.pool.submit( + # Submit a task to the pool + future = self.pool.submit( lambda a, a_fn, mssg, cid, state: a.run.remote(a_fn, mssg, cid, state), - (app, message, str(partition_id), context), + (self.app_fn, message, str(partition_id), context), ) - await future # Fetch result ( out_mssg, updated_context, - ) = await self.pool.fetch_result_and_return_actor_to_pool(future) + ) = self.pool.fetch_result_and_return_actor_to_pool(future) return out_mssg, updated_context @@ -174,11 +182,12 @@ async def process_message( self.__class__.__name__, ) # add actor back into pool - await self.pool.add_actor_back_to_pool(future) + self.pool.add_actor_back_to_pool(future) raise ex - async def terminate(self) -> None: + def terminate(self) -> None: """Terminate all actors in actor pool.""" - await self.pool.terminate_all_actors() + if self.pool: + self.pool.terminate_all_actors() ray.shutdown() log(DEBUG, "Terminated %s", self.__class__.__name__) diff --git a/src/py/flwr/server/superlink/fleet/vce/backend/raybackend_test.py b/src/py/flwr/server/superlink/fleet/vce/backend/raybackend_test.py index dcac0b81d666..1cbdc230c938 100644 --- a/src/py/flwr/server/superlink/fleet/vce/backend/raybackend_test.py +++ b/src/py/flwr/server/superlink/fleet/vce/backend/raybackend_test.py @@ -14,16 +14,15 @@ # ============================================================================== """Test for Ray backend for the Fleet API using the Simulation Engine.""" -import asyncio from math import pi -from pathlib import Path -from typing import Callable, Dict, Optional, Tuple, Union -from unittest import IsolatedAsyncioTestCase +from typing import Callable, Optional, Union +from unittest import TestCase import ray from flwr.client import Client, NumPyClient -from flwr.client.client_app import ClientApp, LoadClientAppError +from flwr.client.client_app import ClientApp +from flwr.client.node_state import NodeState from flwr.common import ( DEFAULT_TTL, Config, @@ -36,77 +35,65 @@ RecordSet, Scalar, ) -from flwr.common.object_ref import load_app +from flwr.common.constant import PARTITION_ID_KEY from flwr.common.recordset_compat import getpropertiesins_to_recordset +from flwr.server.superlink.fleet.vce.backend.backend import BackendConfig from flwr.server.superlink.fleet.vce.backend.raybackend import RayBackend class DummyClient(NumPyClient): """A dummy NumPyClient for tests.""" - def get_properties(self, config: Config) -> Dict[str, Scalar]: + def __init__(self, state: RecordSet) -> None: + self.client_state = state + + def get_properties(self, config: Config) -> dict[str, Scalar]: """Return properties by doing a simple calculation.""" result = float(config["factor"]) * pi # store something in context - self.context.state.configs_records["result"] = ConfigsRecord({"result": result}) + self.client_state.configs_records["result"] = ConfigsRecord({"result": result}) + return {"result": result} -def get_dummy_client(cid: str) -> Client: # pylint: disable=unused-argument +def get_dummy_client(context: Context) -> Client: # pylint: disable=unused-argument """Return a DummyClient converted to Client type.""" - return DummyClient().to_client() + return DummyClient(state=context.state).to_client() def _load_app() -> ClientApp: return ClientApp(client_fn=get_dummy_client) -client_app = ClientApp( - client_fn=get_dummy_client, -) - - -def _load_from_module(client_app_module_name: str) -> Callable[[], ClientApp]: - def _load_app() -> ClientApp: - app = load_app(client_app_module_name, LoadClientAppError) - - if not isinstance(app, ClientApp): - raise LoadClientAppError( - f"Attribute {client_app_module_name} is not of type {ClientApp}", - ) from None - - return app - - return _load_app - - -async def backend_build_process_and_termination( +def backend_build_process_and_termination( backend: RayBackend, - process_args: Optional[Tuple[Callable[[], ClientApp], Message, Context]] = None, -) -> Union[Tuple[Message, Context], None]: + app_fn: Callable[[], ClientApp], + process_args: Optional[tuple[Message, Context]] = None, +) -> Union[tuple[Message, Context], None]: """Build, process job and terminate RayBackend.""" - await backend.build() + backend.build(app_fn) to_return = None if process_args: - to_return = await backend.process_message(*process_args) + to_return = backend.process_message(*process_args) - await backend.terminate() + backend.terminate() return to_return -def _create_message_and_context() -> Tuple[Message, Context, float]: +def _create_message_and_context() -> tuple[Message, Context, float]: # Construct a Message mult_factor = 2024 + run_id = 0 getproperties_ins = GetPropertiesIns(config={"factor": mult_factor}) recordset = getpropertiesins_to_recordset(getproperties_ins) message = Message( content=recordset, metadata=Metadata( - run_id=0, + run_id=run_id, message_id="", group_id="", src_node_id=0, @@ -117,8 +104,10 @@ def _create_message_and_context() -> Tuple[Message, Context, float]: ), ) - # Construct emtpy Context - context = Context(state=RecordSet()) + # Construct NodeState and retrieve context + node_state = NodeState(node_id=run_id, node_config={PARTITION_ID_KEY: str(0)}) + node_state.register_context(run_id=run_id) + context = node_state.retrieve_context(run_id=run_id) # Expected output expected_output = pi * mult_factor @@ -126,38 +115,32 @@ def _create_message_and_context() -> Tuple[Message, Context, float]: return message, context, expected_output -class AsyncTestRayBackend(IsolatedAsyncioTestCase): - """A basic class that allows runnig multliple asyncio tests.""" +class TestRayBackend(TestCase): + """A basic class that allows runnig multliple tests.""" - async def on_cleanup(self) -> None: + def doCleanups(self) -> None: """Ensure Ray has shutdown.""" if ray.is_initialized(): ray.shutdown() def test_backend_creation_and_termination(self) -> None: """Test creation of RayBackend and its termination.""" - backend = RayBackend(backend_config={}, work_dir="") - asyncio.run( - backend_build_process_and_termination(backend=backend, process_args=None) + backend = RayBackend(backend_config={}) + backend_build_process_and_termination( + backend=backend, app_fn=_load_app, process_args=None ) def test_backend_creation_submit_and_termination( self, client_app_loader: Callable[[], ClientApp] = _load_app, - workdir: str = "", ) -> None: """Test submitting a message to a given ClientApp.""" - backend = RayBackend(backend_config={}, work_dir=workdir) - - # Define ClientApp - client_app_callable = client_app_loader + backend = RayBackend(backend_config={}) message, context, expected_output = _create_message_and_context() - res = asyncio.run( - backend_build_process_and_termination( - backend=backend, process_args=(client_app_callable, message, context) - ) + res = backend_build_process_and_termination( + backend=backend, app_fn=client_app_loader, process_args=(message, context) ) if res is None: @@ -171,47 +154,44 @@ def test_backend_creation_submit_and_termination( content.configs_records["getpropertiesres.properties"]["result"] == expected_output ) - # Verify context is correct obtained_result_in_context = updated_context.state.configs_records["result"][ "result" ] assert obtained_result_in_context == expected_output - def test_backend_creation_submit_and_termination_non_existing_client_app( - self, - ) -> None: - """Testing with ClientApp module that does not exist.""" - with self.assertRaises(LoadClientAppError): - self.test_backend_creation_submit_and_termination( - client_app_loader=_load_from_module("a_non_existing_module:app") - ) - self.addAsyncCleanup(self.on_cleanup) - def test_backend_creation_submit_and_termination_existing_client_app( self, ) -> None: """Testing with ClientApp module that exist.""" - # Resolve what should be the workdir to pass upon Backend initialisation - file_path = Path(__file__) - working_dir = Path.cwd() - rel_workdir = file_path.relative_to(working_dir) + self.test_backend_creation_submit_and_termination( + client_app_loader=_load_app, + ) - # Susbtract last element - rel_workdir_str = str(rel_workdir.parent) + def test_backend_creation_with_init_arguments(self) -> None: + """Testing whether init args are properly parsed to Ray.""" + backend_config_4: BackendConfig = { + "init_args": {"num_cpus": 4}, + "client_resources": {"num_cpus": 1, "num_gpus": 0}, + } - self.test_backend_creation_submit_and_termination( - client_app_loader=_load_from_module("raybackend_test:client_app"), - workdir=rel_workdir_str, + backend_config_2: BackendConfig = { + "init_args": {"num_cpus": 2}, + "client_resources": {"num_cpus": 1, "num_gpus": 0}, + } + + RayBackend( + backend_config=backend_config_4, ) + nodes = ray.nodes() - def test_backend_creation_submit_and_termination_existing_client_app_unsetworkdir( - self, - ) -> None: - """Testing with ClientApp module that exist but the passed workdir does not.""" - with self.assertRaises(ValueError): - self.test_backend_creation_submit_and_termination( - client_app_loader=_load_from_module("raybackend_test:client_app"), - workdir="/?&%$^#%@$!", - ) - self.addAsyncCleanup(self.on_cleanup) + assert nodes[0]["Resources"]["CPU"] == backend_config_4["init_args"]["num_cpus"] + + ray.shutdown() + + RayBackend( + backend_config=backend_config_2, + ) + nodes = ray.nodes() + + assert nodes[0]["Resources"]["CPU"] == backend_config_2["init_args"]["num_cpus"] diff --git a/src/py/flwr/server/superlink/fleet/vce/vce_api.py b/src/py/flwr/server/superlink/fleet/vce/vce_api.py index cc3e85b28097..785390534001 100644 --- a/src/py/flwr/server/superlink/fleet/vce/vce_api.py +++ b/src/py/flwr/server/superlink/fleet/vce/vce_api.py @@ -14,27 +14,37 @@ # ============================================================================== """Fleet Simulation Engine API.""" -import asyncio + import json -import sys +import threading import time import traceback +from concurrent.futures import ThreadPoolExecutor from logging import DEBUG, ERROR, INFO, WARN -from typing import Callable, Dict, List, Optional +from pathlib import Path +from queue import Empty, Queue +from time import sleep +from typing import Callable, Optional from flwr.client.client_app import ClientApp, ClientAppException, LoadClientAppError +from flwr.client.clientapp.utils import get_load_client_app_fn from flwr.client.node_state import NodeState -from flwr.common.constant import PING_MAX_INTERVAL, ErrorCode +from flwr.common.constant import ( + NUM_PARTITIONS_KEY, + PARTITION_ID_KEY, + PING_MAX_INTERVAL, + ErrorCode, +) from flwr.common.logger import log from flwr.common.message import Error -from flwr.common.object_ref import load_app from flwr.common.serde import message_from_taskins, message_to_taskres -from flwr.proto.task_pb2 import TaskIns # pylint: disable=E0611 -from flwr.server.superlink.state import StateFactory +from flwr.common.typing import Run +from flwr.proto.task_pb2 import TaskIns, TaskRes # pylint: disable=E0611 +from flwr.server.superlink.state import State, StateFactory from .backend import Backend, error_messages_backends, supported_backends -NodeToPartitionMapping = Dict[int, int] +NodeToPartitionMapping = dict[int, int] def _register_nodes( @@ -50,46 +60,64 @@ def _register_nodes( return nodes_mapping -# pylint: disable=too-many-arguments,too-many-locals -async def worker( - app_fn: Callable[[], ClientApp], - queue: "asyncio.Queue[TaskIns]", - node_states: Dict[int, NodeState], - state_factory: StateFactory, +def _register_node_states( nodes_mapping: NodeToPartitionMapping, + run: Run, + app_dir: Optional[str] = None, +) -> dict[int, NodeState]: + """Create NodeState objects and pre-register the context for the run.""" + node_states: dict[int, NodeState] = {} + num_partitions = len(set(nodes_mapping.values())) + for node_id, partition_id in nodes_mapping.items(): + node_states[node_id] = NodeState( + node_id=node_id, + node_config={ + PARTITION_ID_KEY: partition_id, + NUM_PARTITIONS_KEY: num_partitions, + }, + ) + + # Pre-register Context objects + node_states[node_id].register_context( + run_id=run.run_id, run=run, app_dir=app_dir + ) + + return node_states + + +# pylint: disable=too-many-arguments,too-many-locals +def worker( + taskins_queue: "Queue[TaskIns]", + taskres_queue: "Queue[TaskRes]", + node_states: dict[int, NodeState], backend: Backend, + f_stop: threading.Event, ) -> None: """Get TaskIns from queue and pass it to an actor in the pool to execute it.""" - state = state_factory.state() - while True: + while not f_stop.is_set(): out_mssg = None try: - task_ins: TaskIns = await queue.get() + # Fetch from queue with timeout. We use a timeout so + # the stopping event can be evaluated even when the queue is empty. + task_ins: TaskIns = taskins_queue.get(timeout=1.0) node_id = task_ins.task.consumer.node_id - # Register and retrieve runstate - node_states[node_id].register_context(run_id=task_ins.run_id) + # Retrieve context context = node_states[node_id].retrieve_context(run_id=task_ins.run_id) # Convert TaskIns to Message message = message_from_taskins(task_ins) - # Set partition_id - message.metadata.partition_id = nodes_mapping[node_id] # Let backend process message - out_mssg, updated_context = await backend.process_message( - app_fn, message, context - ) + out_mssg, updated_context = backend.process_message(message, context) # Update Context node_states[node_id].update_context( task_ins.run_id, context=updated_context ) - - except asyncio.CancelledError as e: - log(DEBUG, "Terminating async worker: %s", e) - break - + except Empty: + # An exception raised if queue.get times out + pass # Exceptions aren't raised but reported as an error message except Exception as ex: # pylint: disable=broad-exception-caught log(ERROR, ex) @@ -113,67 +141,49 @@ async def worker( task_res = message_to_taskres(out_mssg) # Store TaskRes in state task_res.task.pushed_at = time.time() - state.store_task_res(task_res) + taskres_queue.put(task_res) -async def add_taskins_to_queue( - queue: "asyncio.Queue[TaskIns]", - state_factory: StateFactory, +def add_taskins_to_queue( + state: State, + queue: "Queue[TaskIns]", nodes_mapping: NodeToPartitionMapping, - backend: Backend, - consumers: List["asyncio.Task[None]"], - f_stop: asyncio.Event, + f_stop: threading.Event, ) -> None: - """Retrieve TaskIns and add it to the queue.""" - state = state_factory.state() - num_initial_consumers = len(consumers) + """Put TaskIns in a queue from State.""" while not f_stop.is_set(): for node_id in nodes_mapping.keys(): - task_ins = state.get_task_ins(node_id=node_id, limit=1) - if task_ins: - await queue.put(task_ins[0]) - - # Count consumers that are running - num_active = sum(not (cc.done()) for cc in consumers) - - # Alert if number of consumers decreased by half - if num_active < num_initial_consumers // 2: - log( - WARN, - "Number of active workers has more than halved: (%i/%i active)", - num_active, - num_initial_consumers, - ) + task_ins_list = state.get_task_ins(node_id=node_id, limit=1) + for task_ins in task_ins_list: + queue.put(task_ins) + sleep(0.1) - # Break if consumers died - if num_active == 0: - raise RuntimeError("All workers have died. Ending Simulation.") - # Log some stats - log( - DEBUG, - "Simulation Engine stats: " - "Active workers: (%i/%i) | %s (%i workers) | Tasks in queue: %i)", - num_active, - num_initial_consumers, - backend.__class__.__name__, - backend.num_workers, - queue.qsize(), - ) - await asyncio.sleep(1.0) - log(DEBUG, "Async producer: Stopped pulling from StateFactory.") +def put_taskres_into_state( + state: State, queue: "Queue[TaskRes]", f_stop: threading.Event +) -> None: + """Put TaskRes into State from a queue.""" + while not f_stop.is_set(): + try: + taskres = queue.get(timeout=1.0) + state.store_task_res(taskres) + except Empty: + # queue is empty when timeout was triggered + pass -async def run( +# pylint: disable=too-many-positional-arguments +def run_api( app_fn: Callable[[], ClientApp], backend_fn: Callable[[], Backend], nodes_mapping: NodeToPartitionMapping, state_factory: StateFactory, - node_states: Dict[int, NodeState], - f_stop: asyncio.Event, + node_states: dict[int, NodeState], + f_stop: threading.Event, ) -> None: - """Run the VCE async.""" - queue: "asyncio.Queue[TaskIns]" = asyncio.Queue(128) + """Run the VCE.""" + taskins_queue: "Queue[TaskIns]" = Queue() + taskres_queue: "Queue[TaskRes]" = Queue() try: @@ -181,29 +191,47 @@ async def run( backend = backend_fn() # Build backend - await backend.build() + backend.build(app_fn) # Add workers (they submit Messages to Backend) - worker_tasks = [ - asyncio.create_task( - worker( - app_fn, queue, node_states, state_factory, nodes_mapping, backend - ) - ) - for _ in range(backend.num_workers) - ] - # Create producer (adds TaskIns into Queue) - producer = asyncio.create_task( - add_taskins_to_queue( - queue, state_factory, nodes_mapping, backend, worker_tasks, f_stop - ) + state = state_factory.state() + + extractor_th = threading.Thread( + target=add_taskins_to_queue, + args=( + state, + taskins_queue, + nodes_mapping, + f_stop, + ), ) + extractor_th.start() - # Wait for producer to finish - # The producer runs forever until f_stop is set or until - # all worker (consumer) coroutines are completed. Workers - # also run forever and only end if an exception is raised. - await asyncio.gather(producer) + injector_th = threading.Thread( + target=put_taskres_into_state, + args=( + state, + taskres_queue, + f_stop, + ), + ) + injector_th.start() + + with ThreadPoolExecutor() as executor: + _ = [ + executor.submit( + worker, + taskins_queue, + taskres_queue, + node_states, + backend, + f_stop, + ) + for _ in range(backend.num_workers) + ] + + extractor_th.join() + injector_th.join() except Exception as ex: @@ -218,27 +246,21 @@ async def run( raise RuntimeError("Simulation Engine crashed.") from ex finally: - # Produced task terminated, now cancel worker tasks - for w_t in worker_tasks: - _ = w_t.cancel() - - while not all(w_t.done() for w_t in worker_tasks): - log(DEBUG, "Terminating async workers...") - await asyncio.sleep(0.5) - - await asyncio.gather(*[w_t for w_t in worker_tasks if not w_t.done()]) # Terminate backend - await backend.terminate() + backend.terminate() # pylint: disable=too-many-arguments,unused-argument,too-many-locals,too-many-branches -# pylint: disable=too-many-statements +# pylint: disable=too-many-statements,too-many-positional-arguments def start_vce( backend_name: str, backend_config_json_stream: str, app_dir: str, - f_stop: asyncio.Event, + is_app: bool, + f_stop: threading.Event, + run: Run, + flwr_dir: Optional[str] = None, client_app: Optional[ClientApp] = None, client_app_attr: Optional[str] = None, num_supernodes: Optional[int] = None, @@ -246,6 +268,8 @@ def start_vce( existing_nodes_mapping: Optional[NodeToPartitionMapping] = None, ) -> None: """Start Fleet API with the Simulation Engine.""" + nodes_mapping = {} + if client_app_attr is not None and client_app is not None: raise ValueError( "Both `client_app_attr` and `client_app` are provided, " @@ -274,6 +298,7 @@ def start_vce( # Use mapping constructed externally. This also means nodes # have previously being registered. nodes_mapping = existing_nodes_mapping + app_dir = str(Path(app_dir).absolute()) if not state_factory: log(INFO, "A StateFactory was not supplied to the SimulationEngine.") @@ -288,9 +313,9 @@ def start_vce( ) # Construct mapping of NodeStates - node_states: Dict[int, NodeState] = {} - for node_id in nodes_mapping: - node_states[node_id] = NodeState() + node_states = _register_node_states( + nodes_mapping=nodes_mapping, run=run, app_dir=app_dir if is_app else None + ) # Load backend config log(DEBUG, "Supported backends: %s", list(supported_backends.keys())) @@ -313,43 +338,46 @@ def start_vce( def backend_fn() -> Backend: """Instantiate a Backend.""" - return backend_type(backend_config, work_dir=app_dir) + return backend_type(backend_config) # Load ClientApp if needed def _load() -> ClientApp: + if client_app: + return client_app if client_app_attr: + return get_load_client_app_fn( + default_app_ref=client_app_attr, + app_path=app_dir, + flwr_dir=flwr_dir, + multi_app=False, + )(run.fab_id, run.fab_version, run.fab_hash) - if app_dir is not None: - sys.path.insert(0, app_dir) - - app: ClientApp = load_app(client_app_attr, LoadClientAppError) - - if not isinstance(app, ClientApp): - raise LoadClientAppError( - f"Attribute {client_app_attr} is not of type {ClientApp}", - ) from None - - if client_app: - app = client_app - return app + raise ValueError("Either `client_app_attr` or `client_app` must be provided") app_fn = _load try: # Test if ClientApp can be loaded - _ = app_fn() + client_app = app_fn() + + # Cache `ClientApp` + if client_app_attr: + # Now wrap the loaded ClientApp in a dummy function + # this prevent unnecesary low-level loading of ClientApp + def _load_client_app() -> ClientApp: + return client_app + + app_fn = _load_client_app # Run main simulation loop - asyncio.run( - run( - app_fn, - backend_fn, - nodes_mapping, - state_factory, - node_states, - f_stop, - ) + run_api( + app_fn, + backend_fn, + nodes_mapping, + state_factory, + node_states, + f_stop, ) except LoadClientAppError as loadapp_ex: f_stop_delay = 10 diff --git a/src/py/flwr/server/superlink/fleet/vce/vce_api_test.py b/src/py/flwr/server/superlink/fleet/vce/vce_api_test.py index 1da726f88f1e..bc34b825c333 100644 --- a/src/py/flwr/server/superlink/fleet/vce/vce_api_test.py +++ b/src/py/flwr/server/superlink/fleet/vce/vce_api_test.py @@ -15,7 +15,6 @@ """Test Fleet Simulation Engine API.""" -import asyncio import threading import time from itertools import cycle @@ -23,20 +22,27 @@ from math import pi from pathlib import Path from time import sleep -from typing import Dict, Optional, Set, Tuple -from unittest import IsolatedAsyncioTestCase +from typing import Optional +from unittest import TestCase from uuid import UUID +from flwr.client import Client, ClientApp, NumPyClient from flwr.client.client_app import LoadClientAppError from flwr.common import ( DEFAULT_TTL, + Config, + ConfigsRecord, + Context, GetPropertiesIns, Message, MessageTypeLegacy, Metadata, + RecordSet, + Scalar, ) from flwr.common.recordset_compat import getpropertiesins_to_recordset from flwr.common.serde import message_from_taskres, message_to_taskins +from flwr.common.typing import Run from flwr.server.superlink.fleet.vce.vce_api import ( NodeToPartitionMapping, _register_nodes, @@ -45,7 +51,33 @@ from flwr.server.superlink.state import InMemoryState, StateFactory -def terminate_simulation(f_stop: asyncio.Event, sleep_duration: int) -> None: +class DummyClient(NumPyClient): + """A dummy NumPyClient for tests.""" + + def __init__(self, state: RecordSet) -> None: + self.client_state = state + + def get_properties(self, config: Config) -> dict[str, Scalar]: + """Return properties by doing a simple calculation.""" + result = float(config["factor"]) * pi + + # store something in context + self.client_state.configs_records["result"] = ConfigsRecord({"result": result}) + + return {"result": result} + + +def get_dummy_client(context: Context) -> Client: # pylint: disable=unused-argument + """Return a DummyClient converted to Client type.""" + return DummyClient(state=context.state).to_client() + + +dummy_client_app = ClientApp( + client_fn=get_dummy_client, +) + + +def terminate_simulation(f_stop: threading.Event, sleep_duration: int) -> None: """Set event to terminate Simulation Engine after `sleep_duration` seconds.""" sleep(sleep_duration) f_stop.set() @@ -54,7 +86,7 @@ def terminate_simulation(f_stop: asyncio.Event, sleep_duration: int) -> None: def init_state_factory_nodes_mapping( num_nodes: int, num_messages: int, -) -> Tuple[StateFactory, NodeToPartitionMapping, Dict[UUID, float]]: +) -> tuple[StateFactory, NodeToPartitionMapping, dict[UUID, float]]: """Instatiate StateFactory, register nodes and pre-insert messages in the state.""" # Register a state and a run_id in it run_id = 1234 @@ -78,14 +110,20 @@ def register_messages_into_state( nodes_mapping: NodeToPartitionMapping, run_id: int, num_messages: int, -) -> Dict[UUID, float]: +) -> dict[UUID, float]: """Register `num_messages` into the state factory.""" state: InMemoryState = state_factory.state() # type: ignore - state.run_ids[run_id] = ("Mock/mock", "v1.0.0") + state.run_ids[run_id] = Run( + run_id=run_id, + fab_id="Mock/mock", + fab_version="v1.0.0", + fab_hash="hash", + override_config={}, + ) # Artificially add TaskIns to state so they can be processed # by the Simulation Engine logic nodes_cycle = cycle(nodes_mapping.keys()) # we have more messages than supernodes - task_ids: Set[UUID] = set() # so we can retrieve them later + task_ids: set[UUID] = set() # so we can retrieve them later expected_results = {} for i in range(num_messages): dst_node_id = next(nodes_cycle) @@ -132,10 +170,10 @@ def _autoresolve_app_dir(rel_client_app_dir: str = "backend") -> str: return str(rel_app_dir.parent / rel_client_app_dir) -# pylint: disable=too-many-arguments +# pylint: disable=too-many-arguments,too-many-positional-arguments def start_and_shutdown( backend: str = "ray", - client_app_attr: str = "raybackend_test:client_app", + client_app_attr: Optional[str] = None, app_dir: str = "", num_supernodes: Optional[int] = None, state_factory: Optional[StateFactory] = None, @@ -145,15 +183,15 @@ def start_and_shutdown( ) -> None: """Start Simulation Engine and terminate after specified number of seconds. - Some tests need to be terminated by triggering externally an asyncio.Event. This - is enabled whtn passing `duration`>0. + Some tests need to be terminated by triggering externally an threading.Event. This + is enabled when passing `duration`>0. """ - f_stop = asyncio.Event() + f_stop = threading.Event() if duration: # Setup thread that will set the f_stop event, triggering the termination of all - # asyncio logic in the Simulation Engine. It will also terminate the Backend. + # logic in the Simulation Engine. It will also terminate the Backend. termination_th = threading.Thread( target=terminate_simulation, args=(f_stop, duration) ) @@ -163,14 +201,19 @@ def start_and_shutdown( if not app_dir: app_dir = _autoresolve_app_dir() + run = Run(run_id=1234, fab_id="", fab_version="", fab_hash="", override_config={}) + start_vce( num_supernodes=num_supernodes, + client_app=None if client_app_attr else dummy_client_app, client_app_attr=client_app_attr, backend_name=backend, backend_config_json_stream=backend_config, state_factory=state_factory, app_dir=app_dir, + is_app=False, f_stop=f_stop, + run=run, existing_nodes_mapping=nodes_mapping, ) @@ -178,8 +221,8 @@ def start_and_shutdown( termination_th.join() -class AsyncTestFleetSimulationEngineRayBackend(IsolatedAsyncioTestCase): - """A basic class that enables testing asyncio functionalities.""" +class TestFleetSimulationEngineRayBackend(TestCase): + """A basic class that enables testing functionalities.""" def test_erroneous_no_supernodes_client_mapping(self) -> None: """Test with unset arguments.""" @@ -261,7 +304,7 @@ def test_start_and_shutdown_with_tasks_in_state(self) -> None: # Get all TaskRes state = state_factory.state() task_ids = set(expected_results.keys()) - task_res_list = state.get_task_res(task_ids=task_ids, limit=len(task_ids)) + task_res_list = state.get_task_res(task_ids=task_ids) # Check results by first converting to Message for task_res in task_res_list: diff --git a/src/py/flwr/server/superlink/state/__init__.py b/src/py/flwr/server/superlink/state/__init__.py index 7f260d733bbe..9d3bd220403b 100644 --- a/src/py/flwr/server/superlink/state/__init__.py +++ b/src/py/flwr/server/superlink/state/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/src/py/flwr/server/superlink/state/in_memory_state.py b/src/py/flwr/server/superlink/state/in_memory_state.py index f86bf79d9dfa..a9c4176ee5f2 100644 --- a/src/py/flwr/server/superlink/state/in_memory_state.py +++ b/src/py/flwr/server/superlink/state/in_memory_state.py @@ -1,4 +1,4 @@ -# Copyright 2023 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,19 +15,24 @@ """In-memory State implementation.""" -import os import threading import time -from logging import ERROR -from typing import Dict, List, Optional, Set, Tuple +from logging import ERROR, WARNING +from typing import Optional from uuid import UUID, uuid4 from flwr.common import log, now +from flwr.common.constant import ( + MESSAGE_TTL_TOLERANCE, + NODE_ID_NUM_BYTES, + RUN_ID_NUM_BYTES, +) +from flwr.common.typing import Run, UserConfig from flwr.proto.task_pb2 import TaskIns, TaskRes # pylint: disable=E0611 from flwr.server.superlink.state.state import State from flwr.server.utils import validate_task_ins_or_res -from .utils import make_node_unavailable_taskres +from .utils import generate_rand_int_from_bytes, make_node_unavailable_taskres class InMemoryState(State): # pylint: disable=R0902,R0904 @@ -36,15 +41,15 @@ class InMemoryState(State): # pylint: disable=R0902,R0904 def __init__(self) -> None: # Map node_id to (online_until, ping_interval) - self.node_ids: Dict[int, Tuple[float, float]] = {} - self.public_key_to_node_id: Dict[bytes, int] = {} + self.node_ids: dict[int, tuple[float, float]] = {} + self.public_key_to_node_id: dict[bytes, int] = {} # Map run_id to (fab_id, fab_version) - self.run_ids: Dict[int, Tuple[str, str]] = {} - self.task_ins_store: Dict[UUID, TaskIns] = {} - self.task_res_store: Dict[UUID, TaskRes] = {} + self.run_ids: dict[int, Run] = {} + self.task_ins_store: dict[UUID, TaskIns] = {} + self.task_res_store: dict[UUID, TaskRes] = {} - self.client_public_keys: Set[bytes] = set() + self.node_public_keys: set[bytes] = set() self.server_public_key: Optional[bytes] = None self.server_private_key: Optional[bytes] = None @@ -75,13 +80,14 @@ def store_task_ins(self, task_ins: TaskIns) -> Optional[UUID]: def get_task_ins( self, node_id: Optional[int], limit: Optional[int] - ) -> List[TaskIns]: + ) -> list[TaskIns]: """Get all TaskIns that have not been delivered yet.""" if limit is not None and limit < 1: raise AssertionError("`limit` must be >= 1") # Find TaskIns for node_id that were not delivered yet - task_ins_list: List[TaskIns] = [] + task_ins_list: list[TaskIns] = [] + current_time = time.time() with self.lock: for _, task_ins in self.task_ins_store.items(): # pylint: disable=too-many-boolean-expressions @@ -90,11 +96,13 @@ def get_task_ins( and task_ins.task.consumer.anonymous is False and task_ins.task.consumer.node_id == node_id and task_ins.task.delivered_at == "" + and task_ins.task.created_at + task_ins.task.ttl > current_time ) or ( node_id is None # Anonymous and task_ins.task.consumer.anonymous is True and task_ins.task.consumer.node_id == 0 and task_ins.task.delivered_at == "" + and task_ins.task.created_at + task_ins.task.ttl > current_time ): task_ins_list.append(task_ins) if limit and len(task_ins_list) == limit: @@ -108,6 +116,7 @@ def get_task_ins( # Return TaskIns return task_ins_list + # pylint: disable=R0911 def store_task_res(self, task_res: TaskRes) -> Optional[UUID]: """Store one TaskRes.""" # Validate task @@ -116,6 +125,55 @@ def store_task_res(self, task_res: TaskRes) -> Optional[UUID]: log(ERROR, errors) return None + with self.lock: + # Check if the TaskIns it is replying to exists and is valid + task_ins_id = task_res.task.ancestry[0] + task_ins = self.task_ins_store.get(UUID(task_ins_id)) + + # Ensure that the consumer_id of taskIns matches the producer_id of taskRes. + if ( + task_ins + and task_res + and not ( + task_ins.task.consumer.anonymous or task_res.task.producer.anonymous + ) + and task_ins.task.consumer.node_id != task_res.task.producer.node_id + ): + return None + + if task_ins is None: + log(ERROR, "TaskIns with task_id %s does not exist.", task_ins_id) + return None + + if task_ins.task.created_at + task_ins.task.ttl <= time.time(): + log( + ERROR, + "Failed to store TaskRes: TaskIns with task_id %s has expired.", + task_ins_id, + ) + return None + + # Fail if the TaskRes TTL exceeds the + # expiration time of the TaskIns it replies to. + # Condition: TaskIns.created_at + TaskIns.ttl ≥ + # TaskRes.created_at + TaskRes.ttl + # A small tolerance is introduced to account + # for floating-point precision issues. + max_allowed_ttl = ( + task_ins.task.created_at + task_ins.task.ttl - task_res.task.created_at + ) + if task_res.task.ttl and ( + task_res.task.ttl - max_allowed_ttl > MESSAGE_TTL_TOLERANCE + ): + log( + WARNING, + "Received TaskRes with TTL %.2f " + "exceeding the allowed maximum TTL %.2f.", + task_res.task.ttl, + max_allowed_ttl, + ) + return None + # Validate run_id if task_res.run_id not in self.run_ids: log(ERROR, "`run_id` is invalid") @@ -132,27 +190,33 @@ def store_task_res(self, task_res: TaskRes) -> Optional[UUID]: # Return the new task_id return task_id - def get_task_res(self, task_ids: Set[UUID], limit: Optional[int]) -> List[TaskRes]: + def get_task_res(self, task_ids: set[UUID]) -> list[TaskRes]: """Get all TaskRes that have not been delivered yet.""" - if limit is not None and limit < 1: - raise AssertionError("`limit` must be >= 1") - with self.lock: # Find TaskRes that were not delivered yet - task_res_list: List[TaskRes] = [] - replied_task_ids: Set[UUID] = set() + task_res_list: list[TaskRes] = [] + replied_task_ids: set[UUID] = set() for _, task_res in self.task_res_store.items(): reply_to = UUID(task_res.task.ancestry[0]) + + # Check if corresponding TaskIns exists and is not expired + task_ins = self.task_ins_store.get(reply_to) + if task_ins is None: + log(WARNING, "TaskIns with task_id %s does not exist.", reply_to) + task_ids.remove(reply_to) + continue + + if task_ins.task.created_at + task_ins.task.ttl <= time.time(): + log(WARNING, "TaskIns with task_id %s is expired.", reply_to) + task_ids.remove(reply_to) + continue + if reply_to in task_ids and task_res.task.delivered_at == "": task_res_list.append(task_res) replied_task_ids.add(reply_to) - if limit and len(task_res_list) == limit: - break # Check if the node is offline for task_id in task_ids - replied_task_ids: - if limit and len(task_res_list) == limit: - break task_ins = self.task_ins_store.get(task_id) if task_ins is None: continue @@ -174,10 +238,10 @@ def get_task_res(self, task_ids: Set[UUID], limit: Optional[int]) -> List[TaskRe # Return TaskRes return task_res_list - def delete_tasks(self, task_ids: Set[UUID]) -> None: + def delete_tasks(self, task_ids: set[UUID]) -> None: """Delete all delivered TaskIns/TaskRes pairs.""" - task_ins_to_be_deleted: Set[UUID] = set() - task_res_to_be_deleted: Set[UUID] = set() + task_ins_to_be_deleted: set[UUID] = set() + task_res_to_be_deleted: set[UUID] = set() with self.lock: for task_ins_id in task_ids: @@ -215,7 +279,7 @@ def create_node( ) -> int: """Create, store in state, and return `node_id`.""" # Sample a random int64 as node_id - node_id: int = int.from_bytes(os.urandom(8), "little", signed=True) + node_id = generate_rand_int_from_bytes(NODE_ID_NUM_BYTES) with self.lock: if node_id in self.node_ids: @@ -236,7 +300,7 @@ def create_node( return node_id def delete_node(self, node_id: int, public_key: Optional[bytes] = None) -> None: - """Delete a client node.""" + """Delete a node.""" with self.lock: if node_id not in self.node_ids: raise ValueError(f"Node {node_id} not found") @@ -252,8 +316,8 @@ def delete_node(self, node_id: int, public_key: Optional[bytes] = None) -> None: del self.node_ids[node_id] - def get_nodes(self, run_id: int) -> Set[int]: - """Return all available client nodes. + def get_nodes(self, run_id: int) -> set[int]: + """Return all available nodes. Constraints ----------- @@ -270,18 +334,30 @@ def get_nodes(self, run_id: int) -> Set[int]: if online_until > current_time } - def get_node_id(self, client_public_key: bytes) -> Optional[int]: - """Retrieve stored `node_id` filtered by `client_public_keys`.""" - return self.public_key_to_node_id.get(client_public_key) + def get_node_id(self, node_public_key: bytes) -> Optional[int]: + """Retrieve stored `node_id` filtered by `node_public_keys`.""" + return self.public_key_to_node_id.get(node_public_key) - def create_run(self, fab_id: str, fab_version: str) -> int: - """Create a new run for the specified `fab_id` and `fab_version`.""" + def create_run( + self, + fab_id: Optional[str], + fab_version: Optional[str], + fab_hash: Optional[str], + override_config: UserConfig, + ) -> int: + """Create a new run for the specified `fab_hash`.""" # Sample a random int64 as run_id with self.lock: - run_id: int = int.from_bytes(os.urandom(8), "little", signed=True) + run_id = generate_rand_int_from_bytes(RUN_ID_NUM_BYTES) if run_id not in self.run_ids: - self.run_ids[run_id] = (fab_id, fab_version) + self.run_ids[run_id] = Run( + run_id=run_id, + fab_id=fab_id if fab_id else "", + fab_version=fab_version if fab_version else "", + fab_hash=fab_hash if fab_hash else "", + override_config=override_config, + ) return run_id log(ERROR, "Unexpected run creation failure.") return 0 @@ -305,27 +381,27 @@ def get_server_public_key(self) -> Optional[bytes]: """Retrieve `server_public_key` in urlsafe bytes.""" return self.server_public_key - def store_client_public_keys(self, public_keys: Set[bytes]) -> None: - """Store a set of `client_public_keys` in state.""" + def store_node_public_keys(self, public_keys: set[bytes]) -> None: + """Store a set of `node_public_keys` in state.""" with self.lock: - self.client_public_keys = public_keys + self.node_public_keys = public_keys - def store_client_public_key(self, public_key: bytes) -> None: - """Store a `client_public_key` in state.""" + def store_node_public_key(self, public_key: bytes) -> None: + """Store a `node_public_key` in state.""" with self.lock: - self.client_public_keys.add(public_key) + self.node_public_keys.add(public_key) - def get_client_public_keys(self) -> Set[bytes]: - """Retrieve all currently stored `client_public_keys` as a set.""" - return self.client_public_keys + def get_node_public_keys(self) -> set[bytes]: + """Retrieve all currently stored `node_public_keys` as a set.""" + return self.node_public_keys - def get_run(self, run_id: int) -> Tuple[int, str, str]: + def get_run(self, run_id: int) -> Optional[Run]: """Retrieve information about the run with the specified `run_id`.""" with self.lock: if run_id not in self.run_ids: log(ERROR, "`run_id` is invalid") - return 0, "", "" - return run_id, *self.run_ids[run_id] + return None + return self.run_ids[run_id] def acknowledge_ping(self, node_id: int, ping_interval: float) -> bool: """Acknowledge a ping received from a node, serving as a heartbeat.""" diff --git a/src/py/flwr/server/superlink/state/sqlite_state.py b/src/py/flwr/server/superlink/state/sqlite_state.py index acf2054f08b6..6d644c3b2232 100644 --- a/src/py/flwr/server/superlink/state/sqlite_state.py +++ b/src/py/flwr/server/superlink/state/sqlite_state.py @@ -1,4 +1,4 @@ -# Copyright 2023 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,23 +14,38 @@ # ============================================================================== """SQLite based implemenation of server state.""" +# pylint: disable=too-many-lines -import os +import json import re import sqlite3 import time -from logging import DEBUG, ERROR -from typing import Any, Dict, List, Optional, Sequence, Set, Tuple, Union, cast +from collections.abc import Sequence +from logging import DEBUG, ERROR, WARNING +from typing import Any, Optional, Union, cast from uuid import UUID, uuid4 from flwr.common import log, now +from flwr.common.constant import ( + MESSAGE_TTL_TOLERANCE, + NODE_ID_NUM_BYTES, + RUN_ID_NUM_BYTES, +) +from flwr.common.typing import Run, UserConfig from flwr.proto.node_pb2 import Node # pylint: disable=E0611 from flwr.proto.recordset_pb2 import RecordSet # pylint: disable=E0611 from flwr.proto.task_pb2 import Task, TaskIns, TaskRes # pylint: disable=E0611 from flwr.server.utils.validator import validate_task_ins_or_res from .state import State -from .utils import make_node_unavailable_taskres +from .utils import ( + convert_sint64_to_uint64, + convert_sint64_values_in_dict_to_uint64, + convert_uint64_to_sint64, + convert_uint64_values_in_dict_to_sint64, + generate_rand_int_from_bytes, + make_node_unavailable_taskres, +) SQL_CREATE_TABLE_NODE = """ CREATE TABLE IF NOT EXISTS node( @@ -60,9 +75,11 @@ SQL_CREATE_TABLE_RUN = """ CREATE TABLE IF NOT EXISTS run( - run_id INTEGER UNIQUE, - fab_id TEXT, - fab_version TEXT + run_id INTEGER UNIQUE, + fab_id TEXT, + fab_version TEXT, + fab_hash TEXT, + override_config TEXT ); """ @@ -106,7 +123,7 @@ ); """ -DictOrTuple = Union[Tuple[Any, ...], Dict[str, Any]] +DictOrTuple = Union[tuple[Any, ...], dict[str, Any]] class SqliteState(State): # pylint: disable=R0904 @@ -127,13 +144,18 @@ def __init__( self.database_path = database_path self.conn: Optional[sqlite3.Connection] = None - def initialize(self, log_queries: bool = False) -> List[Tuple[str]]: + def initialize(self, log_queries: bool = False) -> list[tuple[str]]: """Create tables if they don't exist yet. Parameters ---------- log_queries : bool Log each query which is executed. + + Returns + ------- + list[tuple[str]] + The list of all tables in the DB. """ self.conn = sqlite3.connect(self.database_path) self.conn.execute("PRAGMA foreign_keys = ON;") @@ -158,7 +180,7 @@ def query( self, query: str, data: Optional[Union[Sequence[DictOrTuple], DictOrTuple]] = None, - ) -> List[Dict[str, Any]]: + ) -> list[dict[str, Any]]: """Execute a SQL query.""" if self.conn is None: raise AttributeError("State is not initialized.") @@ -218,6 +240,12 @@ def store_task_ins(self, task_ins: TaskIns) -> Optional[UUID]: # Store TaskIns task_ins.task_id = str(task_id) data = (task_ins_to_dict(task_ins),) + + # Convert values from uint64 to sint64 for SQLite + convert_uint64_values_in_dict_to_sint64( + data[0], ["run_id", "producer_node_id", "consumer_node_id"] + ) + columns = ", ".join([f":{key}" for key in data[0]]) query = f"INSERT INTO task_ins VALUES({columns});" @@ -233,7 +261,7 @@ def store_task_ins(self, task_ins: TaskIns) -> Optional[UUID]: def get_task_ins( self, node_id: Optional[int], limit: Optional[int] - ) -> List[TaskIns]: + ) -> list[TaskIns]: """Get undelivered TaskIns for one node (either anonymous or with ID). Usually, the Fleet API calls this for Nodes planning to work on one or more @@ -267,7 +295,7 @@ def get_task_ins( ) raise AssertionError(msg) - data: Dict[str, Union[str, int]] = {} + data: dict[str, Union[str, int]] = {} if node_id is None: # Retrieve all anonymous Tasks @@ -277,8 +305,12 @@ def get_task_ins( WHERE consumer_anonymous == 1 AND consumer_node_id == 0 AND delivered_at = "" + AND (created_at + ttl) > CAST(strftime('%s', 'now') AS REAL) """ else: + # Convert the uint64 value to sint64 for SQLite + data["node_id"] = convert_uint64_to_sint64(node_id) + # Retrieve all TaskIns for node_id query = """ SELECT task_id @@ -286,8 +318,8 @@ def get_task_ins( WHERE consumer_anonymous == 0 AND consumer_node_id == :node_id AND delivered_at = "" + AND (created_at + ttl) > CAST(strftime('%s', 'now') AS REAL) """ - data["node_id"] = node_id if limit is not None: query += " LIMIT :limit" @@ -317,6 +349,12 @@ def get_task_ins( # Run query rows = self.query(query, data) + for row in rows: + # Convert values from sint64 to uint64 + convert_sint64_values_in_dict_to_uint64( + row, ["run_id", "producer_node_id", "consumer_node_id"] + ) + result = [dict_to_task_ins(row) for row in rows] return result @@ -346,9 +384,57 @@ def store_task_res(self, task_res: TaskRes) -> Optional[UUID]: # Create task_id task_id = uuid4() - # Store TaskIns + task_ins_id = task_res.task.ancestry[0] + task_ins = self.get_valid_task_ins(task_ins_id) + if task_ins is None: + log( + ERROR, + "Failed to store TaskRes: " + "TaskIns with task_id %s does not exist or has expired.", + task_ins_id, + ) + return None + + # Ensure that the consumer_id of taskIns matches the producer_id of taskRes. + if ( + task_ins + and task_res + and not (task_ins["consumer_anonymous"] or task_res.task.producer.anonymous) + and convert_sint64_to_uint64(task_ins["consumer_node_id"]) + != task_res.task.producer.node_id + ): + return None + + # Fail if the TaskRes TTL exceeds the + # expiration time of the TaskIns it replies to. + # Condition: TaskIns.created_at + TaskIns.ttl ≥ + # TaskRes.created_at + TaskRes.ttl + # A small tolerance is introduced to account + # for floating-point precision issues. + max_allowed_ttl = ( + task_ins["created_at"] + task_ins["ttl"] - task_res.task.created_at + ) + if task_res.task.ttl and ( + task_res.task.ttl - max_allowed_ttl > MESSAGE_TTL_TOLERANCE + ): + log( + WARNING, + "Received TaskRes with TTL %.2f " + "exceeding the allowed maximum TTL %.2f.", + task_res.task.ttl, + max_allowed_ttl, + ) + return None + + # Store TaskRes task_res.task_id = str(task_id) data = (task_res_to_dict(task_res),) + + # Convert values from uint64 to sint64 for SQLite + convert_uint64_values_in_dict_to_sint64( + data[0], ["run_id", "producer_node_id", "consumer_node_id"] + ) + columns = ", ".join([f":{key}" for key in data[0]]) query = f"INSERT INTO task_res VALUES({columns});" @@ -362,8 +448,8 @@ def store_task_res(self, task_res: TaskRes) -> Optional[UUID]: return task_id - # pylint: disable-next=R0914 - def get_task_res(self, task_ids: Set[UUID], limit: Optional[int]) -> List[TaskRes]: + # pylint: disable-next=R0912,R0915,R0914 + def get_task_res(self, task_ids: set[UUID]) -> list[TaskRes]: """Get TaskRes for task_ids. Usually, the Driver API calls this method to get results for instructions it has @@ -378,8 +464,34 @@ def get_task_res(self, task_ids: Set[UUID], limit: Optional[int]) -> List[TaskRe will only take effect if enough task_ids are in the set AND are currently available. If `limit` is set, it has to be greater than zero. """ - if limit is not None and limit < 1: - raise AssertionError("`limit` must be >= 1") + # Check if corresponding TaskIns exists and is not expired + task_ids_placeholders = ",".join([f":id_{i}" for i in range(len(task_ids))]) + query = f""" + SELECT * + FROM task_ins + WHERE task_id IN ({task_ids_placeholders}) + AND (created_at + ttl) > CAST(strftime('%s', 'now') AS REAL) + """ + query += ";" + + task_ins_data = {} + for index, task_id in enumerate(task_ids): + task_ins_data[f"id_{index}"] = str(task_id) + + task_ins_rows = self.query(query, task_ins_data) + + if not task_ins_rows: + return [] + + for row in task_ins_rows: + # Convert values from sint64 to uint64 + convert_sint64_values_in_dict_to_uint64( + row, ["run_id", "producer_node_id", "consumer_node_id"] + ) + task_ins = dict_to_task_ins(row) + if task_ins.task.created_at + task_ins.task.ttl <= time.time(): + log(WARNING, "TaskIns with task_id %s is expired.", task_ins.task_id) + task_ids.remove(UUID(task_ins.task_id)) # Retrieve all anonymous Tasks if len(task_ids) == 0: @@ -393,11 +505,7 @@ def get_task_res(self, task_ids: Set[UUID], limit: Optional[int]) -> List[TaskRe AND delivered_at = "" """ - data: Dict[str, Union[str, float, int]] = {} - - if limit is not None: - query += " LIMIT :limit" - data["limit"] = limit + data: dict[str, Union[str, float, int]] = {} query += ";" @@ -426,12 +534,18 @@ def get_task_res(self, task_ids: Set[UUID], limit: Optional[int]) -> List[TaskRe # Run query rows = self.query(query, data) + for row in rows: + # Convert values from sint64 to uint64 + convert_sint64_values_in_dict_to_uint64( + row, ["run_id", "producer_node_id", "consumer_node_id"] + ) + result = [dict_to_task_res(row) for row in rows] # 1. Query: Fetch consumer_node_id of remaining task_ids # Assume the ancestry field only contains one element data.clear() - replied_task_ids: Set[UUID] = {UUID(str(row["ancestry"])) for row in rows} + replied_task_ids: set[UUID] = {UUID(str(row["ancestry"])) for row in rows} remaining_task_ids = task_ids - replied_task_ids placeholders = ",".join([f":id_{i}" for i in range(len(remaining_task_ids))]) query = f""" @@ -467,8 +581,12 @@ def get_task_res(self, task_ids: Set[UUID], limit: Optional[int]) -> List[TaskRe # Make TaskRes containing node unavailabe error for row in task_ins_rows: - if limit and len(result) == limit: - break + for row in rows: + # Convert values from sint64 to uint64 + convert_sint64_values_in_dict_to_uint64( + row, ["run_id", "producer_node_id", "consumer_node_id"] + ) + task_ins = dict_to_task_ins(row) err_taskres = make_node_unavailable_taskres( ref_taskins=task_ins, @@ -495,10 +613,10 @@ def num_task_res(self) -> int: """ query = "SELECT count(*) AS num FROM task_res;" rows = self.query(query) - result: Dict[str, int] = rows[0] + result: dict[str, int] = rows[0] return result["num"] - def delete_tasks(self, task_ids: Set[UUID]) -> None: + def delete_tasks(self, task_ids: set[UUID]) -> None: """Delete all delivered TaskIns/TaskRes pairs.""" ids = list(task_ids) if len(ids) == 0: @@ -539,8 +657,11 @@ def create_node( self, ping_interval: float, public_key: Optional[bytes] = None ) -> int: """Create, store in state, and return `node_id`.""" - # Sample a random int64 as node_id - node_id: int = int.from_bytes(os.urandom(8), "little", signed=True) + # Sample a random uint64 as node_id + uint64_node_id = generate_rand_int_from_bytes(NODE_ID_NUM_BYTES) + + # Convert the uint64 value to sint64 for SQLite + sint64_node_id = convert_uint64_to_sint64(uint64_node_id) query = "SELECT node_id FROM node WHERE public_key = :public_key;" row = self.query(query, {"public_key": public_key}) @@ -557,17 +678,28 @@ def create_node( try: self.query( - query, (node_id, time.time() + ping_interval, ping_interval, public_key) + query, + ( + sint64_node_id, + time.time() + ping_interval, + ping_interval, + public_key, + ), ) except sqlite3.IntegrityError: log(ERROR, "Unexpected node registration failure.") return 0 - return node_id + + # Note: we need to return the uint64 value of the node_id + return uint64_node_id def delete_node(self, node_id: int, public_key: Optional[bytes] = None) -> None: - """Delete a client node.""" + """Delete a node.""" + # Convert the uint64 value to sint64 for SQLite + sint64_node_id = convert_uint64_to_sint64(node_id) + query = "DELETE FROM node WHERE node_id = ?" - params = (node_id,) + params = (sint64_node_id,) if public_key is not None: query += " AND public_key = ?" @@ -584,7 +716,7 @@ def delete_node(self, node_id: int, public_key: Optional[bytes] = None) -> None: except KeyError as exc: log(ERROR, {"query": query, "data": params, "exception": exc}) - def get_nodes(self, run_id: int) -> Set[int]: + def get_nodes(self, run_id: int) -> set[int]: """Retrieve all currently stored node IDs as a set. Constraints @@ -592,38 +724,76 @@ def get_nodes(self, run_id: int) -> Set[int]: If the provided `run_id` does not exist or has no matching nodes, an empty `Set` MUST be returned. """ + # Convert the uint64 value to sint64 for SQLite + sint64_run_id = convert_uint64_to_sint64(run_id) + # Validate run ID query = "SELECT COUNT(*) FROM run WHERE run_id = ?;" - if self.query(query, (run_id,))[0]["COUNT(*)"] == 0: + if self.query(query, (sint64_run_id,))[0]["COUNT(*)"] == 0: return set() # Get nodes query = "SELECT node_id FROM node WHERE online_until > ?;" rows = self.query(query, (time.time(),)) - result: Set[int] = {row["node_id"] for row in rows} + + # Convert sint64 node_ids to uint64 + result: set[int] = {convert_sint64_to_uint64(row["node_id"]) for row in rows} return result - def get_node_id(self, client_public_key: bytes) -> Optional[int]: - """Retrieve stored `node_id` filtered by `client_public_keys`.""" + def get_node_id(self, node_public_key: bytes) -> Optional[int]: + """Retrieve stored `node_id` filtered by `node_public_keys`.""" query = "SELECT node_id FROM node WHERE public_key = :public_key;" - row = self.query(query, {"public_key": client_public_key}) + row = self.query(query, {"public_key": node_public_key}) if len(row) > 0: node_id: int = row[0]["node_id"] - return node_id + + # Convert the sint64 value to uint64 after reading from SQLite + uint64_node_id = convert_sint64_to_uint64(node_id) + + return uint64_node_id return None - def create_run(self, fab_id: str, fab_version: str) -> int: + def create_run( + self, + fab_id: Optional[str], + fab_version: Optional[str], + fab_hash: Optional[str], + override_config: UserConfig, + ) -> int: """Create a new run for the specified `fab_id` and `fab_version`.""" # Sample a random int64 as run_id - run_id: int = int.from_bytes(os.urandom(8), "little", signed=True) + uint64_run_id = generate_rand_int_from_bytes(RUN_ID_NUM_BYTES) + + # Convert the uint64 value to sint64 for SQLite + sint64_run_id = convert_uint64_to_sint64(uint64_run_id) # Check conflicts query = "SELECT COUNT(*) FROM run WHERE run_id = ?;" - # If run_id does not exist - if self.query(query, (run_id,))[0]["COUNT(*)"] == 0: - query = "INSERT INTO run (run_id, fab_id, fab_version) VALUES (?, ?, ?);" - self.query(query, (run_id, fab_id, fab_version)) - return run_id + # If sint64_run_id does not exist + if self.query(query, (sint64_run_id,))[0]["COUNT(*)"] == 0: + query = ( + "INSERT INTO run " + "(run_id, fab_id, fab_version, fab_hash, override_config)" + "VALUES (?, ?, ?, ?, ?);" + ) + if fab_hash: + self.query( + query, + (sint64_run_id, "", "", fab_hash, json.dumps(override_config)), + ) + else: + self.query( + query, + ( + sint64_run_id, + fab_id, + fab_version, + "", + json.dumps(override_config), + ), + ) + # Note: we need to return the uint64 value of the run_id + return uint64_run_id log(ERROR, "Unexpected run creation failure.") return 0 @@ -662,50 +832,89 @@ def get_server_public_key(self) -> Optional[bytes]: public_key = None return public_key - def store_client_public_keys(self, public_keys: Set[bytes]) -> None: - """Store a set of `client_public_keys` in state.""" + def store_node_public_keys(self, public_keys: set[bytes]) -> None: + """Store a set of `node_public_keys` in state.""" query = "INSERT INTO public_key (public_key) VALUES (?)" data = [(key,) for key in public_keys] self.query(query, data) - def store_client_public_key(self, public_key: bytes) -> None: - """Store a `client_public_key` in state.""" + def store_node_public_key(self, public_key: bytes) -> None: + """Store a `node_public_key` in state.""" query = "INSERT INTO public_key (public_key) VALUES (:public_key)" self.query(query, {"public_key": public_key}) - def get_client_public_keys(self) -> Set[bytes]: - """Retrieve all currently stored `client_public_keys` as a set.""" + def get_node_public_keys(self) -> set[bytes]: + """Retrieve all currently stored `node_public_keys` as a set.""" query = "SELECT public_key FROM public_key" rows = self.query(query) - result: Set[bytes] = {row["public_key"] for row in rows} + result: set[bytes] = {row["public_key"] for row in rows} return result - def get_run(self, run_id: int) -> Tuple[int, str, str]: + def get_run(self, run_id: int) -> Optional[Run]: """Retrieve information about the run with the specified `run_id`.""" + # Convert the uint64 value to sint64 for SQLite + sint64_run_id = convert_uint64_to_sint64(run_id) query = "SELECT * FROM run WHERE run_id = ?;" - try: - row = self.query(query, (run_id,))[0] - return run_id, row["fab_id"], row["fab_version"] - except sqlite3.IntegrityError: - log(ERROR, "`run_id` does not exist.") - return 0, "", "" + rows = self.query(query, (sint64_run_id,)) + if rows: + row = rows[0] + return Run( + run_id=convert_sint64_to_uint64(row["run_id"]), + fab_id=row["fab_id"], + fab_version=row["fab_version"], + fab_hash=row["fab_hash"], + override_config=json.loads(row["override_config"]), + ) + log(ERROR, "`run_id` does not exist.") + return None def acknowledge_ping(self, node_id: int, ping_interval: float) -> bool: """Acknowledge a ping received from a node, serving as a heartbeat.""" + sint64_node_id = convert_uint64_to_sint64(node_id) + # Update `online_until` and `ping_interval` for the given `node_id` query = "UPDATE node SET online_until = ?, ping_interval = ? WHERE node_id = ?;" try: - self.query(query, (time.time() + ping_interval, ping_interval, node_id)) + self.query( + query, (time.time() + ping_interval, ping_interval, sint64_node_id) + ) return True except sqlite3.IntegrityError: log(ERROR, "`node_id` does not exist.") return False + def get_valid_task_ins(self, task_id: str) -> Optional[dict[str, Any]]: + """Check if the TaskIns exists and is valid (not expired). + + Return TaskIns if valid. + """ + query = """ + SELECT * + FROM task_ins + WHERE task_id = :task_id + """ + data = {"task_id": task_id} + rows = self.query(query, data) + if not rows: + # TaskIns does not exist + return None + + task_ins = rows[0] + created_at = task_ins["created_at"] + ttl = task_ins["ttl"] + current_time = time.time() + + # Check if TaskIns is expired + if ttl is not None and created_at + ttl <= current_time: + return None + + return task_ins + def dict_factory( cursor: sqlite3.Cursor, row: sqlite3.Row, -) -> Dict[str, Any]: +) -> dict[str, Any]: """Turn SQLite results into dicts. Less efficent for retrival of large amounts of data but easier to use. @@ -714,7 +923,7 @@ def dict_factory( return dict(zip(fields, row)) -def task_ins_to_dict(task_msg: TaskIns) -> Dict[str, Any]: +def task_ins_to_dict(task_msg: TaskIns) -> dict[str, Any]: """Transform TaskIns to dict.""" result = { "task_id": task_msg.task_id, @@ -735,7 +944,7 @@ def task_ins_to_dict(task_msg: TaskIns) -> Dict[str, Any]: return result -def task_res_to_dict(task_msg: TaskRes) -> Dict[str, Any]: +def task_res_to_dict(task_msg: TaskRes) -> dict[str, Any]: """Transform TaskRes to dict.""" result = { "task_id": task_msg.task_id, @@ -756,7 +965,7 @@ def task_res_to_dict(task_msg: TaskRes) -> Dict[str, Any]: return result -def dict_to_task_ins(task_dict: Dict[str, Any]) -> TaskIns: +def dict_to_task_ins(task_dict: dict[str, Any]) -> TaskIns: """Turn task_dict into protobuf message.""" recordset = RecordSet() recordset.ParseFromString(task_dict["recordset"]) @@ -786,7 +995,7 @@ def dict_to_task_ins(task_dict: Dict[str, Any]) -> TaskIns: return result -def dict_to_task_res(task_dict: Dict[str, Any]) -> TaskRes: +def dict_to_task_res(task_dict: dict[str, Any]) -> TaskRes: """Turn task_dict into protobuf message.""" recordset = RecordSet() recordset.ParseFromString(task_dict["recordset"]) diff --git a/src/py/flwr/server/superlink/state/sqlite_state_test.py b/src/py/flwr/server/superlink/state/sqlite_state_test.py index 20927df1cf12..10e12da96bd5 100644 --- a/src/py/flwr/server/superlink/state/sqlite_state_test.py +++ b/src/py/flwr/server/superlink/state/sqlite_state_test.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/src/py/flwr/server/superlink/state/state.py b/src/py/flwr/server/superlink/state/state.py index a72062f2a938..b220aad3ebcc 100644 --- a/src/py/flwr/server/superlink/state/state.py +++ b/src/py/flwr/server/superlink/state/state.py @@ -1,4 +1,4 @@ -# Copyright 2022 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,9 +16,10 @@ import abc -from typing import List, Optional, Set, Tuple +from typing import Optional from uuid import UUID +from flwr.common.typing import Run, UserConfig from flwr.proto.task_pb2 import TaskIns, TaskRes # pylint: disable=E0611 @@ -50,7 +51,7 @@ def store_task_ins(self, task_ins: TaskIns) -> Optional[UUID]: @abc.abstractmethod def get_task_ins( self, node_id: Optional[int], limit: Optional[int] - ) -> List[TaskIns]: + ) -> list[TaskIns]: """Get TaskIns optionally filtered by node_id. Usually, the Fleet API calls this for Nodes planning to work on one or more @@ -97,7 +98,7 @@ def store_task_res(self, task_res: TaskRes) -> Optional[UUID]: """ @abc.abstractmethod - def get_task_res(self, task_ids: Set[UUID], limit: Optional[int]) -> List[TaskRes]: + def get_task_res(self, task_ids: set[UUID]) -> list[TaskRes]: """Get TaskRes for task_ids. Usually, the Driver API calls this method to get results for instructions it has @@ -105,12 +106,6 @@ def get_task_res(self, task_ids: Set[UUID], limit: Optional[int]) -> List[TaskRe Retrieves all TaskRes for the given `task_ids` and returns and empty list of none could be found. - - Constraints - ----------- - If `limit` is not `None`, return, at most, `limit` number of TaskRes. The limit - will only take effect if enough task_ids are in the set AND are currently - available. If `limit` is set, it has to be greater zero. """ @abc.abstractmethod @@ -128,7 +123,7 @@ def num_task_res(self) -> int: """ @abc.abstractmethod - def delete_tasks(self, task_ids: Set[UUID]) -> None: + def delete_tasks(self, task_ids: set[UUID]) -> None: """Delete all delivered TaskIns/TaskRes pairs.""" @abc.abstractmethod @@ -142,7 +137,7 @@ def delete_node(self, node_id: int, public_key: Optional[bytes] = None) -> None: """Remove `node_id` from state.""" @abc.abstractmethod - def get_nodes(self, run_id: int) -> Set[int]: + def get_nodes(self, run_id: int) -> set[int]: """Retrieve all currently stored node IDs as a set. Constraints @@ -152,15 +147,21 @@ def get_nodes(self, run_id: int) -> Set[int]: """ @abc.abstractmethod - def get_node_id(self, client_public_key: bytes) -> Optional[int]: - """Retrieve stored `node_id` filtered by `client_public_keys`.""" + def get_node_id(self, node_public_key: bytes) -> Optional[int]: + """Retrieve stored `node_id` filtered by `node_public_keys`.""" @abc.abstractmethod - def create_run(self, fab_id: str, fab_version: str) -> int: - """Create a new run for the specified `fab_id` and `fab_version`.""" + def create_run( + self, + fab_id: Optional[str], + fab_version: Optional[str], + fab_hash: Optional[str], + override_config: UserConfig, + ) -> int: + """Create a new run for the specified `fab_hash`.""" @abc.abstractmethod - def get_run(self, run_id: int) -> Tuple[int, str, str]: + def get_run(self, run_id: int) -> Optional[Run]: """Retrieve information about the run with the specified `run_id`. Parameters @@ -170,8 +171,8 @@ def get_run(self, run_id: int) -> Tuple[int, str, str]: Returns ------- - Tuple[int, str, str] - A tuple containing three elements: + Optional[Run] + A dataclass instance containing three elements if `run_id` is valid: - `run_id`: The identifier of the run, same as the specified `run_id`. - `fab_id`: The identifier of the FAB used in the specified run. - `fab_version`: The version of the FAB used in the specified run. @@ -192,16 +193,16 @@ def get_server_public_key(self) -> Optional[bytes]: """Retrieve `server_public_key` in urlsafe bytes.""" @abc.abstractmethod - def store_client_public_keys(self, public_keys: Set[bytes]) -> None: - """Store a set of `client_public_keys` in state.""" + def store_node_public_keys(self, public_keys: set[bytes]) -> None: + """Store a set of `node_public_keys` in state.""" @abc.abstractmethod - def store_client_public_key(self, public_key: bytes) -> None: - """Store a `client_public_key` in state.""" + def store_node_public_key(self, public_key: bytes) -> None: + """Store a `node_public_key` in state.""" @abc.abstractmethod - def get_client_public_keys(self) -> Set[bytes]: - """Retrieve all currently stored `client_public_keys` as a set.""" + def get_node_public_keys(self) -> set[bytes]: + """Retrieve all currently stored `node_public_keys` as a set.""" @abc.abstractmethod def acknowledge_ping(self, node_id: int, ping_interval: float) -> bool: diff --git a/src/py/flwr/server/superlink/state/state_factory.py b/src/py/flwr/server/superlink/state/state_factory.py index 62a00d910828..96c8d445c16e 100644 --- a/src/py/flwr/server/superlink/state/state_factory.py +++ b/src/py/flwr/server/superlink/state/state_factory.py @@ -1,4 +1,4 @@ -# Copyright 2022 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -26,7 +26,16 @@ class StateFactory: - """Factory class that creates State instances.""" + """Factory class that creates State instances. + + Parameters + ---------- + database : str + A string representing the path to the database file that will be opened. + Note that passing ':memory:' will open a connection to a database that is + in RAM, instead of on disk. For more information on special in-memory + databases, please refer to https://sqlite.org/inmemorydb.html. + """ def __init__(self, database: str) -> None: self.database = database diff --git a/src/py/flwr/server/superlink/state/state_test.py b/src/py/flwr/server/superlink/state/state_test.py index 9b0153ca548a..a4663f80f630 100644 --- a/src/py/flwr/server/superlink/state/state_test.py +++ b/src/py/flwr/server/superlink/state/state_test.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,16 +13,15 @@ # limitations under the License. # ============================================================================== """Tests all state implemenations have to conform to.""" -# pylint: disable=invalid-name, disable=R0904 +# pylint: disable=invalid-name, too-many-lines, R0904, R0913 import tempfile import time import unittest from abc import abstractmethod from datetime import datetime, timezone -from typing import List from unittest.mock import patch -from uuid import uuid4 +from uuid import UUID from flwr.common import DEFAULT_TTL from flwr.common.constant import ErrorCode @@ -52,15 +51,16 @@ def test_create_and_get_run(self) -> None: """Test if create_run and get_run work correctly.""" # Prepare state: State = self.state_factory() - run_id = state.create_run("Mock/mock", "v1.0.0") + run_id = state.create_run(None, None, "9f86d08", {"test_key": "test_value"}) # Execute - actual_run_id, fab_id, fab_version = state.get_run(run_id) + run = state.get_run(run_id) # Assert - assert actual_run_id == run_id - assert fab_id == "Mock/mock" - assert fab_version == "v1.0.0" + assert run is not None + assert run.run_id == run_id + assert run.fab_hash == "9f86d08" + assert run.override_config["test_key"] == "test_value" def test_get_task_ins_empty(self) -> None: """Validate that a new state has no TaskIns.""" @@ -89,7 +89,7 @@ def test_store_task_ins_one(self) -> None: # Prepare consumer_node_id = 1 state = self.state_factory() - run_id = state.create_run("mock/mock", "v1.0.0") + run_id = state.create_run(None, None, "9f86d08", {}) task_ins = create_task_ins( consumer_node_id=consumer_node_id, anonymous=False, run_id=run_id ) @@ -124,7 +124,7 @@ def test_store_and_delete_tasks(self) -> None: # Prepare consumer_node_id = 1 state = self.state_factory() - run_id = state.create_run("mock/mock", "v1.0.0") + run_id = state.create_run(None, None, "9f86d08", {}) task_ins_0 = create_task_ins( consumer_node_id=consumer_node_id, anonymous=False, run_id=run_id ) @@ -149,18 +149,18 @@ def test_store_and_delete_tasks(self) -> None: # Insert one TaskRes and retrive it to mark it as delivered task_res_0 = create_task_res( - producer_node_id=100, + producer_node_id=consumer_node_id, anonymous=False, ancestry=[str(task_id_0)], run_id=run_id, ) _ = state.store_task_res(task_res=task_res_0) - _ = state.get_task_res(task_ids={task_id_0}, limit=None) + _ = state.get_task_res(task_ids={task_id_0}) # Insert one TaskRes, but don't retrive it task_res_1: TaskRes = create_task_res( - producer_node_id=100, + producer_node_id=consumer_node_id, anonymous=False, ancestry=[str(task_id_1)], run_id=run_id, @@ -198,7 +198,7 @@ def test_task_ins_store_anonymous_and_retrieve_anonymous(self) -> None: """ # Prepare state: State = self.state_factory() - run_id = state.create_run("mock/mock", "v1.0.0") + run_id = state.create_run(None, None, "9f86d08", {}) task_ins = create_task_ins(consumer_node_id=0, anonymous=True, run_id=run_id) # Execute @@ -213,7 +213,7 @@ def test_task_ins_store_anonymous_and_fail_retrieving_identitiy(self) -> None: """Store anonymous TaskIns and fail to retrieve it.""" # Prepare state: State = self.state_factory() - run_id = state.create_run("mock/mock", "v1.0.0") + run_id = state.create_run(None, None, "9f86d08", {}) task_ins = create_task_ins(consumer_node_id=0, anonymous=True, run_id=run_id) # Execute @@ -227,7 +227,7 @@ def test_task_ins_store_identity_and_fail_retrieving_anonymous(self) -> None: """Store identity TaskIns and fail retrieving it as anonymous.""" # Prepare state: State = self.state_factory() - run_id = state.create_run("mock/mock", "v1.0.0") + run_id = state.create_run(None, None, "9f86d08", {}) task_ins = create_task_ins(consumer_node_id=1, anonymous=False, run_id=run_id) # Execute @@ -241,7 +241,7 @@ def test_task_ins_store_identity_and_retrieve_identity(self) -> None: """Store identity TaskIns and retrieve it.""" # Prepare state: State = self.state_factory() - run_id = state.create_run("mock/mock", "v1.0.0") + run_id = state.create_run(None, None, "9f86d08", {}) task_ins = create_task_ins(consumer_node_id=1, anonymous=False, run_id=run_id) # Execute @@ -258,7 +258,7 @@ def test_task_ins_store_delivered_and_fail_retrieving(self) -> None: """Fail retrieving delivered task.""" # Prepare state: State = self.state_factory() - run_id = state.create_run("mock/mock", "v1.0.0") + run_id = state.create_run(None, None, "9f86d08", {}) task_ins = create_task_ins(consumer_node_id=1, anonymous=False, run_id=run_id) # Execute @@ -301,8 +301,11 @@ def test_task_res_store_and_retrieve_by_task_ins_id(self) -> None: """Store TaskRes retrieve it by task_ins_id.""" # Prepare state: State = self.state_factory() - run_id = state.create_run("mock/mock", "v1.0.0") - task_ins_id = uuid4() + run_id = state.create_run(None, None, "9f86d08", {}) + + task_ins = create_task_ins(consumer_node_id=0, anonymous=True, run_id=run_id) + task_ins_id = state.store_task_ins(task_ins) + task_res = create_task_res( producer_node_id=0, anonymous=True, @@ -312,7 +315,9 @@ def test_task_res_store_and_retrieve_by_task_ins_id(self) -> None: # Execute task_res_uuid = state.store_task_res(task_res) - task_res_list = state.get_task_res(task_ids={task_ins_id}, limit=None) + + assert task_ins_id + task_res_list = state.get_task_res(task_ids={task_ins_id}) # Assert retrieved_task_res = task_res_list[0] @@ -322,7 +327,7 @@ def test_node_ids_initial_state(self) -> None: """Test retrieving all node_ids and empty initial state.""" # Prepare state: State = self.state_factory() - run_id = state.create_run("mock/mock", "v1.0.0") + run_id = state.create_run(None, None, "9f86d08", {}) # Execute retrieved_node_ids = state.get_nodes(run_id) @@ -334,7 +339,7 @@ def test_create_node_and_get_nodes(self) -> None: """Test creating a client node.""" # Prepare state: State = self.state_factory() - run_id = state.create_run("mock/mock", "v1.0.0") + run_id = state.create_run(None, None, "9f86d08", {}) node_ids = [] # Execute @@ -351,7 +356,7 @@ def test_create_node_public_key(self) -> None: # Prepare state: State = self.state_factory() public_key = b"mock" - run_id = state.create_run("mock/mock", "v1.0.0") + run_id = state.create_run(None, None, "9f86d08", {}) # Execute node_id = state.create_node(ping_interval=10, public_key=public_key) @@ -367,7 +372,7 @@ def test_create_node_public_key_twice(self) -> None: # Prepare state: State = self.state_factory() public_key = b"mock" - run_id = state.create_run("mock/mock", "v1.0.0") + run_id = state.create_run(None, None, "9f86d08", {}) node_id = state.create_node(ping_interval=10, public_key=public_key) # Execute @@ -389,7 +394,7 @@ def test_delete_node(self) -> None: """Test deleting a client node.""" # Prepare state: State = self.state_factory() - run_id = state.create_run("mock/mock", "v1.0.0") + run_id = state.create_run(None, None, "9f86d08", {}) node_id = state.create_node(ping_interval=10) # Execute @@ -404,7 +409,7 @@ def test_delete_node_public_key(self) -> None: # Prepare state: State = self.state_factory() public_key = b"mock" - run_id = state.create_run("mock/mock", "v1.0.0") + run_id = state.create_run(None, None, "9f86d08", {}) node_id = state.create_node(ping_interval=10, public_key=public_key) # Execute @@ -421,7 +426,7 @@ def test_delete_node_public_key_none(self) -> None: # Prepare state: State = self.state_factory() public_key = b"mock" - run_id = state.create_run("mock/mock", "v1.0.0") + run_id = state.create_run(None, None, "9f86d08", {}) node_id = 0 # Execute & Assert @@ -440,7 +445,7 @@ def test_delete_node_wrong_public_key(self) -> None: state: State = self.state_factory() public_key = b"mock" wrong_public_key = b"mock_mock" - run_id = state.create_run("mock/mock", "v1.0.0") + run_id = state.create_run(None, None, "9f86d08", {}) node_id = state.create_node(ping_interval=10, public_key=public_key) # Execute & Assert @@ -459,7 +464,7 @@ def test_get_node_id_wrong_public_key(self) -> None: state: State = self.state_factory() public_key = b"mock" wrong_public_key = b"mock_mock" - run_id = state.create_run("mock/mock", "v1.0.0") + run_id = state.create_run(None, None, "9f86d08", {}) # Execute state.create_node(ping_interval=10, public_key=public_key) @@ -474,7 +479,7 @@ def test_get_nodes_invalid_run_id(self) -> None: """Test retrieving all node_ids with invalid run_id.""" # Prepare state: State = self.state_factory() - state.create_run("mock/mock", "v1.0.0") + state.create_run(None, None, "9f86d08", {}) invalid_run_id = 61016 state.create_node(ping_interval=10) @@ -488,7 +493,7 @@ def test_num_task_ins(self) -> None: """Test if num_tasks returns correct number of not delivered task_ins.""" # Prepare state: State = self.state_factory() - run_id = state.create_run("mock/mock", "v1.0.0") + run_id = state.create_run(None, None, "9f86d08", {}) task_0 = create_task_ins(consumer_node_id=0, anonymous=True, run_id=run_id) task_1 = create_task_ins(consumer_node_id=0, anonymous=True, run_id=run_id) @@ -506,12 +511,24 @@ def test_num_task_res(self) -> None: """Test if num_tasks returns correct number of not delivered task_res.""" # Prepare state: State = self.state_factory() - run_id = state.create_run("mock/mock", "v1.0.0") + run_id = state.create_run(None, None, "9f86d08", {}) + + task_ins_0 = create_task_ins(consumer_node_id=0, anonymous=True, run_id=run_id) + task_ins_1 = create_task_ins(consumer_node_id=0, anonymous=True, run_id=run_id) + task_ins_id_0 = state.store_task_ins(task_ins_0) + task_ins_id_1 = state.store_task_ins(task_ins_1) + task_0 = create_task_res( - producer_node_id=0, anonymous=True, ancestry=["1"], run_id=run_id + producer_node_id=0, + anonymous=True, + ancestry=[str(task_ins_id_0)], + run_id=run_id, ) task_1 = create_task_res( - producer_node_id=0, anonymous=True, ancestry=["1"], run_id=run_id + producer_node_id=0, + anonymous=True, + ancestry=[str(task_ins_id_1)], + run_id=run_id, ) # Store two tasks @@ -574,22 +591,22 @@ def test_store_server_private_public_key_twice(self) -> None: new_private_key_bytes, new_public_key_bytes ) - def test_client_public_keys(self) -> None: - """Test store_client_public_keys and get_client_public_keys from state.""" + def test_node_public_keys(self) -> None: + """Test store_node_public_keys and get_node_public_keys from state.""" # Prepare state: State = self.state_factory() key_pairs = [generate_key_pairs() for _ in range(3)] public_keys = {public_key_to_bytes(pair[1]) for pair in key_pairs} # Execute - state.store_client_public_keys(public_keys) - client_public_keys = state.get_client_public_keys() + state.store_node_public_keys(public_keys) + node_public_keys = state.get_node_public_keys() # Assert - assert client_public_keys == public_keys + assert node_public_keys == public_keys - def test_client_public_key(self) -> None: - """Test store_client_public_key and get_client_public_keys from state.""" + def test_node_public_key(self) -> None: + """Test store_node_public_key and get_node_public_keys from state.""" # Prepare state: State = self.state_factory() key_pairs = [generate_key_pairs() for _ in range(3)] @@ -597,17 +614,17 @@ def test_client_public_key(self) -> None: # Execute for public_key in public_keys: - state.store_client_public_key(public_key) - client_public_keys = state.get_client_public_keys() + state.store_node_public_key(public_key) + node_public_keys = state.get_node_public_keys() # Assert - assert client_public_keys == public_keys + assert node_public_keys == public_keys def test_acknowledge_ping(self) -> None: """Test if acknowledge_ping works and if get_nodes return online nodes.""" # Prepare state: State = self.state_factory() - run_id = state.create_run("mock/mock", "v1.0.0") + run_id = state.create_run(None, None, "9f86d08", {}) node_ids = [state.create_node(ping_interval=10) for _ in range(100)] for node_id in node_ids[:70]: state.acknowledge_ping(node_id, ping_interval=30) @@ -626,7 +643,7 @@ def test_node_unavailable_error(self) -> None: """Test if get_task_res return TaskRes containing node unavailable error.""" # Prepare state: State = self.state_factory() - run_id = state.create_run("mock/mock", "v1.0.0") + run_id = state.create_run(None, None, "9f86d08", {}) node_id_0 = state.create_node(ping_interval=90) node_id_1 = state.create_node(ping_interval=30) # Create and store TaskIns @@ -645,7 +662,7 @@ def test_node_unavailable_error(self) -> None: # Create and store TaskRes task_res_0 = create_task_res( - producer_node_id=100, + producer_node_id=node_id_0, anonymous=False, ancestry=[str(task_id_0)], run_id=run_id, @@ -654,9 +671,9 @@ def test_node_unavailable_error(self) -> None: # Execute current_time = time.time() - task_res_list: List[TaskRes] = [] + task_res_list: list[TaskRes] = [] with patch("time.time", side_effect=lambda: current_time + 50): - task_res_list = state.get_task_res({task_id_0, task_id_1}, limit=None) + task_res_list = state.get_task_res({task_id_0, task_id_1}) # Assert assert len(task_res_list) == 2 @@ -664,6 +681,222 @@ def test_node_unavailable_error(self) -> None: assert err_taskres.task.HasField("error") assert err_taskres.task.error.code == ErrorCode.NODE_UNAVAILABLE + def test_store_task_res_task_ins_expired(self) -> None: + """Test behavior of store_task_res when the TaskIns it references is expired.""" + # Prepare + state: State = self.state_factory() + run_id = state.create_run(None, None, "9f86d08", {}) + + task_ins = create_task_ins(consumer_node_id=0, anonymous=True, run_id=run_id) + task_ins.task.created_at = time.time() - task_ins.task.ttl + 0.5 + task_ins_id = state.store_task_ins(task_ins) + + with patch( + "time.time", + side_effect=lambda: task_ins.task.created_at + task_ins.task.ttl + 0.1, + ): # Expired by 0.1 seconds + task = create_task_res( + producer_node_id=0, + anonymous=True, + ancestry=[str(task_ins_id)], + run_id=run_id, + ) + + # Execute + result = state.store_task_res(task) + + # Assert + assert result is None + + def test_store_task_res_limit_ttl(self) -> None: + """Test the behavior of store_task_res regarding the TTL limit of TaskRes.""" + current_time = time.time() + + test_cases = [ + ( + current_time - 5, + 10, + current_time - 2, + 6, + True, + ), # TaskRes within allowed TTL + ( + current_time - 5, + 10, + current_time - 2, + 15, + False, + ), # TaskRes TTL exceeds max allowed TTL + ] + + for ( + task_ins_created_at, + task_ins_ttl, + task_res_created_at, + task_res_ttl, + expected_store_result, + ) in test_cases: + + # Prepare + state: State = self.state_factory() + run_id = state.create_run(None, None, "9f86d08", {}) + + task_ins = create_task_ins( + consumer_node_id=0, anonymous=True, run_id=run_id + ) + task_ins.task.created_at = task_ins_created_at + task_ins.task.ttl = task_ins_ttl + task_ins_id = state.store_task_ins(task_ins) + + task_res = create_task_res( + producer_node_id=0, + anonymous=True, + ancestry=[str(task_ins_id)], + run_id=run_id, + ) + task_res.task.created_at = task_res_created_at + task_res.task.ttl = task_res_ttl + + # Execute + res = state.store_task_res(task_res) + + # Assert + if expected_store_result: + assert res is not None + else: + assert res is None + + def test_get_task_ins_not_return_expired(self) -> None: + """Test get_task_ins not to return expired tasks.""" + # Prepare + consumer_node_id = 1 + state = self.state_factory() + run_id = state.create_run(None, None, "9f86d08", {}) + task_ins = create_task_ins( + consumer_node_id=consumer_node_id, anonymous=False, run_id=run_id + ) + task_ins.task.created_at = time.time() - 5 + task_ins.task.ttl = 5.0 + + # Execute + state.store_task_ins(task_ins=task_ins) + + # Assert + with patch("time.time", side_effect=lambda: task_ins.task.created_at + 6.1): + task_ins_list = state.get_task_ins(node_id=1, limit=None) + assert len(task_ins_list) == 0 + + def test_get_task_res_not_return_expired(self) -> None: + """Test get_task_res not to return TaskRes if its TaskIns is expired.""" + # Prepare + consumer_node_id = 1 + state = self.state_factory() + run_id = state.create_run(None, None, "9f86d08", {}) + task_ins = create_task_ins( + consumer_node_id=consumer_node_id, anonymous=False, run_id=run_id + ) + task_ins.task.created_at = time.time() - 5 + task_ins.task.ttl = 5.1 + + task_id = state.store_task_ins(task_ins=task_ins) + + task_res = create_task_res( + producer_node_id=1, + anonymous=False, + ancestry=[str(task_id)], + run_id=run_id, + ) + task_res.task.ttl = 0.1 + _ = state.store_task_res(task_res=task_res) + + with patch("time.time", side_effect=lambda: task_ins.task.created_at + 6.1): + # Execute + assert task_id is not None + task_res_list = state.get_task_res(task_ids={task_id}) + + # Assert + assert len(task_res_list) == 0 + + def test_get_task_res_returns_empty_for_missing_taskins(self) -> None: + """Test that get_task_res returns an empty result when the corresponding TaskIns + does not exist.""" + # Prepare + state = self.state_factory() + run_id = state.create_run(None, None, "9f86d08", {}) + task_ins_id = "5b0a3fc2-edba-4525-a89a-04b83420b7c8" + + task_res = create_task_res( + producer_node_id=1, + anonymous=False, + ancestry=[str(task_ins_id)], + run_id=run_id, + ) + _ = state.store_task_res(task_res=task_res) + + # Execute + task_res_list = state.get_task_res(task_ids={UUID(task_ins_id)}) + + # Assert + assert len(task_res_list) == 0 + + def test_get_task_res_return_if_not_expired(self) -> None: + """Test get_task_res to return TaskRes if its TaskIns exists and is not + expired.""" + # Prepare + consumer_node_id = 1 + state = self.state_factory() + run_id = state.create_run(None, None, "9f86d08", {}) + task_ins = create_task_ins( + consumer_node_id=consumer_node_id, anonymous=False, run_id=run_id + ) + task_ins.task.created_at = time.time() - 5 + task_ins.task.ttl = 7.1 + + task_id = state.store_task_ins(task_ins=task_ins) + + task_res = create_task_res( + producer_node_id=1, + anonymous=False, + ancestry=[str(task_id)], + run_id=run_id, + ) + task_res.task.ttl = 0.1 + _ = state.store_task_res(task_res=task_res) + + with patch("time.time", side_effect=lambda: task_ins.task.created_at + 6.1): + # Execute + assert task_id is not None + task_res_list = state.get_task_res(task_ids={task_id}) + + # Assert + assert len(task_res_list) != 0 + + def test_store_task_res_fail_if_consumer_producer_id_mismatch(self) -> None: + """Test store_task_res to fail if there is a mismatch between the + consumer_node_id of taskIns and the producer_node_id of taskRes.""" + # Prepare + consumer_node_id = 1 + state = self.state_factory() + run_id = state.create_run(None, None, "9f86d08", {}) + task_ins = create_task_ins( + consumer_node_id=consumer_node_id, anonymous=False, run_id=run_id + ) + + task_id = state.store_task_ins(task_ins=task_ins) + + task_res = create_task_res( + producer_node_id=100, # different than consumer_node_id + anonymous=False, + ancestry=[str(task_id)], + run_id=run_id, + ) + + # Execute + task_res_uuid = state.store_task_res(task_res=task_res) + + # Assert + assert task_res_uuid is None + def create_task_ins( consumer_node_id: int, @@ -697,7 +930,7 @@ def create_task_ins( def create_task_res( producer_node_id: int, anonymous: bool, - ancestry: List[str], + ancestry: list[str], run_id: int, ) -> TaskRes: """Create a TaskRes for testing.""" diff --git a/src/py/flwr/server/superlink/state/utils.py b/src/py/flwr/server/superlink/state/utils.py index 233a90946cc7..db44719c6a8a 100644 --- a/src/py/flwr/server/superlink/state/utils.py +++ b/src/py/flwr/server/superlink/state/utils.py @@ -17,6 +17,7 @@ import time from logging import ERROR +from os import urandom from uuid import uuid4 from flwr.common import log @@ -31,6 +32,97 @@ ) +def generate_rand_int_from_bytes(num_bytes: int) -> int: + """Generate a random unsigned integer from `num_bytes` bytes.""" + return int.from_bytes(urandom(num_bytes), "little", signed=False) + + +def convert_uint64_to_sint64(u: int) -> int: + """Convert a uint64 value to a sint64 value with the same bit sequence. + + Parameters + ---------- + u : int + The unsigned 64-bit integer to convert. + + Returns + ------- + int + The signed 64-bit integer equivalent. + + The signed 64-bit integer will have the same bit pattern as the + unsigned 64-bit integer but may have a different decimal value. + + For numbers within the range [0, `sint64` max value], the decimal + value remains the same. However, for numbers greater than the `sint64` + max value, the decimal value will differ due to the wraparound caused + by the sign bit. + """ + if u >= (1 << 63): + return u - (1 << 64) + return u + + +def convert_sint64_to_uint64(s: int) -> int: + """Convert a sint64 value to a uint64 value with the same bit sequence. + + Parameters + ---------- + s : int + The signed 64-bit integer to convert. + + Returns + ------- + int + The unsigned 64-bit integer equivalent. + + The unsigned 64-bit integer will have the same bit pattern as the + signed 64-bit integer but may have a different decimal value. + + For negative `sint64` values, the conversion adds 2^64 to the + signed value to obtain the equivalent `uint64` value. For non-negative + `sint64` values, the decimal value remains unchanged in the `uint64` + representation. + """ + if s < 0: + return s + (1 << 64) + return s + + +def convert_uint64_values_in_dict_to_sint64( + data_dict: dict[str, int], keys: list[str] +) -> None: + """Convert uint64 values to sint64 in the given dictionary. + + Parameters + ---------- + data_dict : dict[str, int] + A dictionary where the values are integers to be converted. + keys : list[str] + A list of keys in the dictionary whose values need to be converted. + """ + for key in keys: + if key in data_dict: + data_dict[key] = convert_uint64_to_sint64(data_dict[key]) + + +def convert_sint64_values_in_dict_to_uint64( + data_dict: dict[str, int], keys: list[str] +) -> None: + """Convert sint64 values to uint64 in the given dictionary. + + Parameters + ---------- + data_dict : dict[str, int] + A dictionary where the values are integers to be converted. + keys : list[str] + A list of keys in the dictionary whose values need to be converted. + """ + for key in keys: + if key in data_dict: + data_dict[key] = convert_sint64_to_uint64(data_dict[key]) + + def make_node_unavailable_taskres(ref_taskins: TaskIns) -> TaskRes: """Generate a TaskRes with a node unavailable error from a TaskIns.""" current_time = time.time() diff --git a/src/py/flwr/server/superlink/state/utils_test.py b/src/py/flwr/server/superlink/state/utils_test.py new file mode 100644 index 000000000000..d55e2ffd9aa3 --- /dev/null +++ b/src/py/flwr/server/superlink/state/utils_test.py @@ -0,0 +1,150 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utils tests.""" + +import unittest + +from parameterized import parameterized + +from .utils import ( + convert_sint64_to_uint64, + convert_sint64_values_in_dict_to_uint64, + convert_uint64_to_sint64, + convert_uint64_values_in_dict_to_sint64, + generate_rand_int_from_bytes, +) + + +class UtilsTest(unittest.TestCase): + """Test utils code.""" + + @parameterized.expand( # type: ignore + [ + # Test values within the positive range of sint64 (below 2^63) + (0, 0), # Minimum positive value + (1, 1), # 1 remains 1 in both uint64 and sint64 + (2**62, 2**62), # Mid-range positive value + (2**63 - 1, 2**63 - 1), # Maximum positive value for sint64 + # Test values at or above 2^63 (become negative in sint64) + (2**63, -(2**63)), # Minimum negative value for sint64 + (2**63 + 1, -(2**63) + 1), # Slightly above the boundary + (9223372036854775811, -9223372036854775805), # Some value > sint64 max + (2**64 - 1, -1), # Maximum uint64 value becomes -1 in sint64 + ] + ) + def test_convert_uint64_to_sint64(self, before: int, after: int) -> None: + """Test conversion from uint64 to sint64.""" + self.assertEqual(convert_uint64_to_sint64(before), after) + + @parameterized.expand( # type: ignore + [ + # Test values within the negative range of sint64 + (-(2**63), 2**63), # Minimum sint64 value becomes 2^63 in uint64 + (-(2**63) + 1, 2**63 + 1), # Slightly above the minimum + (-9223372036854775805, 9223372036854775811), # Some value > sint64 max + # Test zero-adjacent inputs + (-1, 2**64 - 1), # -1 in sint64 becomes 2^64 - 1 in uint64 + (0, 0), # 0 remains 0 in both sint64 and uint64 + (1, 1), # 1 remains 1 in both sint64 and uint64 + # Test values within the positive range of sint64 + (2**63 - 1, 2**63 - 1), # Maximum positive value in sint64 + # Test boundary and maximum uint64 value + (2**63, 2**63), # Exact boundary value for sint64 + (2**64 - 1, 2**64 - 1), # Maximum uint64 value, stays the same + ] + ) + def test_sint64_to_uint64(self, before: int, after: int) -> None: + """Test conversion from sint64 to uint64.""" + self.assertEqual(convert_sint64_to_uint64(before), after) + + @parameterized.expand( # type: ignore + [ + (0), + (1), + (2**62), + (2**63 - 1), + (2**63), + (2**63 + 1), + (9223372036854775811), + (2**64 - 1), + ] + ) + def test_uint64_to_sint64_to_uint64(self, expected: int) -> None: + """Test conversion from sint64 to uint64.""" + actual = convert_sint64_to_uint64(convert_uint64_to_sint64(expected)) + self.assertEqual(expected, actual) + + @parameterized.expand( # type: ignore + [ + # Test cases with uint64 values + ( + {"a": 0, "b": 2**63 - 1, "c": 2**63, "d": 2**64 - 1}, + ["a", "b", "c", "d"], + {"a": 0, "b": 2**63 - 1, "c": -(2**63), "d": -1}, + ), + ( + {"a": 1, "b": 2**62, "c": 2**63 + 1}, + ["a", "b", "c"], + {"a": 1, "b": 2**62, "c": -(2**63) + 1}, + ), + # Edge cases with mixed uint64 values and keys + ( + {"a": 2**64 - 1, "b": 12345, "c": 0}, + ["a", "b"], + {"a": -1, "b": 12345, "c": 0}, + ), + ] + ) + def test_convert_uint64_values_in_dict_to_sint64( + self, input_dict: dict[str, int], keys: list[str], expected_dict: dict[str, int] + ) -> None: + """Test uint64 to sint64 conversion in a dictionary.""" + convert_uint64_values_in_dict_to_sint64(input_dict, keys) + self.assertEqual(input_dict, expected_dict) + + @parameterized.expand( # type: ignore + [ + # Test cases with sint64 values + ( + {"a": 0, "b": 2**63 - 1, "c": -(2**63), "d": -1}, + ["a", "b", "c", "d"], + {"a": 0, "b": 2**63 - 1, "c": 2**63, "d": 2**64 - 1}, + ), + ( + {"a": -1, "b": -(2**63) + 1, "c": 12345}, + ["a", "b", "c"], + {"a": 2**64 - 1, "b": 2**63 + 1, "c": 12345}, + ), + # Edge cases with mixed sint64 values and keys + ( + {"a": -1, "b": 12345, "c": 0}, + ["a", "b"], + {"a": 2**64 - 1, "b": 12345, "c": 0}, + ), + ] + ) + def test_convert_sint64_values_in_dict_to_uint64( + self, input_dict: dict[str, int], keys: list[str], expected_dict: dict[str, int] + ) -> None: + """Test sint64 to uint64 conversion in a dictionary.""" + convert_sint64_values_in_dict_to_uint64(input_dict, keys) + self.assertEqual(input_dict, expected_dict) + + def test_generate_rand_int_from_bytes_unsigned_int(self) -> None: + """Test that the generated integer is unsigned (non-negative).""" + for num_bytes in range(1, 9): + with self.subTest(num_bytes=num_bytes): + rand_int = generate_rand_int_from_bytes(num_bytes) + self.assertGreaterEqual(rand_int, 0) diff --git a/src/py/flwr/server/typing.py b/src/py/flwr/server/typing.py index 01143af74392..cdb1c0db4fe7 100644 --- a/src/py/flwr/server/typing.py +++ b/src/py/flwr/server/typing.py @@ -20,6 +20,8 @@ from flwr.common import Context from .driver import Driver +from .serverapp_components import ServerAppComponents ServerAppCallable = Callable[[Driver, Context], None] Workflow = Callable[[Driver, Context], None] +ServerFn = Callable[[Context], ServerAppComponents] diff --git a/src/py/flwr/server/utils/__init__.py b/src/py/flwr/server/utils/__init__.py index c370716adaac..8994374c4d08 100644 --- a/src/py/flwr/server/utils/__init__.py +++ b/src/py/flwr/server/utils/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2021 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/src/py/flwr/server/utils/tensorboard.py b/src/py/flwr/server/utils/tensorboard.py index 3e8d1e62411e..281e8949c53c 100644 --- a/src/py/flwr/server/utils/tensorboard.py +++ b/src/py/flwr/server/utils/tensorboard.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2021 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,7 +18,7 @@ import os from datetime import datetime from logging import WARN -from typing import Callable, Dict, List, Optional, Tuple, Union, cast +from typing import Callable, Optional, Union, cast from flwr.common import EvaluateRes, Scalar from flwr.common.logger import log @@ -92,9 +92,9 @@ class TBWrapper(strategy_class): # type: ignore def aggregate_evaluate( self, server_round: int, - results: List[Tuple[ClientProxy, EvaluateRes]], - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], - ) -> Tuple[Optional[float], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, EvaluateRes]], + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]], + ) -> tuple[Optional[float], dict[str, Scalar]]: """Hooks into aggregate_evaluate for TensorBoard logging purpose.""" # Execute decorated function and extract results for logging # They will be returned at the end of this function but also diff --git a/src/py/flwr/server/utils/tensorboard_test.py b/src/py/flwr/server/utils/tensorboard_test.py index 1827a42cf6e6..689755c6da16 100644 --- a/src/py/flwr/server/utils/tensorboard_test.py +++ b/src/py/flwr/server/utils/tensorboard_test.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2021 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/src/py/flwr/server/utils/validator.py b/src/py/flwr/server/utils/validator.py index c0b0ec85761c..01f926c4985d 100644 --- a/src/py/flwr/server/utils/validator.py +++ b/src/py/flwr/server/utils/validator.py @@ -15,13 +15,14 @@ """Validators.""" -from typing import List, Union +import time +from typing import Union from flwr.proto.task_pb2 import TaskIns, TaskRes # pylint: disable=E0611 # pylint: disable-next=too-many-branches,too-many-statements -def validate_task_ins_or_res(tasks_ins_res: Union[TaskIns, TaskRes]) -> List[str]: +def validate_task_ins_or_res(tasks_ins_res: Union[TaskIns, TaskRes]) -> list[str]: """Validate a TaskIns or TaskRes.""" validation_errors = [] @@ -47,6 +48,11 @@ def validate_task_ins_or_res(tasks_ins_res: Union[TaskIns, TaskRes]) -> List[str # unix timestamp of 27 March 2024 00h:00m:00s UTC validation_errors.append("`pushed_at` is not a recent timestamp") + # Verify TTL and created_at time + current_time = time.time() + if tasks_ins_res.task.created_at + tasks_ins_res.task.ttl <= current_time: + validation_errors.append("Task TTL has expired") + # TaskIns specific if isinstance(tasks_ins_res, TaskIns): # Task producer diff --git a/src/py/flwr/server/utils/validator_test.py b/src/py/flwr/server/utils/validator_test.py index 61fe094c23d4..ce8e3636467c 100644 --- a/src/py/flwr/server/utils/validator_test.py +++ b/src/py/flwr/server/utils/validator_test.py @@ -17,7 +17,6 @@ import time import unittest -from typing import List, Tuple from flwr.common import DEFAULT_TTL from flwr.proto.node_pb2 import Node # pylint: disable=E0611 @@ -52,12 +51,12 @@ def test_is_valid_task_res(self) -> None: """Test is_valid task_res.""" # Prepare # (producer_node_id, anonymous, ancestry) - valid_res: List[Tuple[int, bool, List[str]]] = [ + valid_res: list[tuple[int, bool, list[str]]] = [ (0, True, ["1"]), (1, False, ["1"]), ] - invalid_res: List[Tuple[int, bool, List[str]]] = [ + invalid_res: list[tuple[int, bool, list[str]]] = [ (0, False, []), (0, False, ["1"]), (0, True, []), @@ -77,6 +76,24 @@ def test_is_valid_task_res(self) -> None: val_errors = validate_task_ins_or_res(msg) self.assertTrue(val_errors, (producer_node_id, anonymous, ancestry)) + def test_task_ttl_expired(self) -> None: + """Test validation for expired Task TTL.""" + # Prepare an expired TaskIns + expired_task_ins = create_task_ins(0, True) + expired_task_ins.task.created_at = time.time() - 10 # 10 seconds ago + expired_task_ins.task.ttl = 6 # 6 seconds TTL + + expired_task_res = create_task_res(0, True, ["1"]) + expired_task_res.task.created_at = time.time() - 10 # 10 seconds ago + expired_task_res.task.ttl = 6 # 6 seconds TTL + + # Execute & Assert + val_errors_ins = validate_task_ins_or_res(expired_task_ins) + self.assertIn("Task TTL has expired", val_errors_ins) + + val_errors_res = validate_task_ins_or_res(expired_task_res) + self.assertIn("Task TTL has expired", val_errors_res) + def create_task_ins( consumer_node_id: int, @@ -110,7 +127,7 @@ def create_task_ins( def create_task_res( producer_node_id: int, anonymous: bool, - ancestry: List[str], + ancestry: list[str], ) -> TaskRes: """Create a TaskRes for testing.""" task_res = TaskRes( diff --git a/src/py/flwr/server/workflow/default_workflows.py b/src/py/flwr/server/workflow/default_workflows.py index 80759316da84..484a747292d5 100644 --- a/src/py/flwr/server/workflow/default_workflows.py +++ b/src/py/flwr/server/workflow/default_workflows.py @@ -18,7 +18,7 @@ import io import timeit from logging import INFO, WARN -from typing import List, Optional, Tuple, Union, cast +from typing import Optional, Union, cast import flwr.common.recordset_compat as compat from flwr.common import ( @@ -167,7 +167,7 @@ def default_init_params_workflow(driver: Driver, context: Context) -> None: context.state.parameters_records[MAIN_PARAMS_RECORD] = paramsrecord # Evaluate initial parameters - log(INFO, "Evaluating initial global parameters") + log(INFO, "Starting evaluation of initial global parameters") parameters = compat.parametersrecord_to_parameters(paramsrecord, keep_input=True) res = context.strategy.evaluate(0, parameters=parameters) if res is not None: @@ -179,6 +179,8 @@ def default_init_params_workflow(driver: Driver, context: Context) -> None: ) context.history.add_loss_centralized(server_round=0, loss=res[0]) context.history.add_metrics_centralized(server_round=0, metrics=res[1]) + else: + log(INFO, "Evaluation returned no results (`None`)") def default_centralized_evaluation_workflow(_: Driver, context: Context) -> None: @@ -274,8 +276,8 @@ def default_fit_workflow( # pylint: disable=R0914 ) # Aggregate training results - results: List[Tuple[ClientProxy, FitRes]] = [] - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]] = [] + results: list[tuple[ClientProxy, FitRes]] = [] + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]] = [] for msg in messages: if msg.has_content(): proxy = node_id_to_proxy[msg.metadata.src_node_id] @@ -360,8 +362,8 @@ def default_evaluate_workflow(driver: Driver, context: Context) -> None: ) # Aggregate the evaluation results - results: List[Tuple[ClientProxy, EvaluateRes]] = [] - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]] = [] + results: list[tuple[ClientProxy, EvaluateRes]] = [] + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]] = [] for msg in messages: if msg.has_content(): proxy = node_id_to_proxy[msg.metadata.src_node_id] diff --git a/src/py/flwr/server/workflow/secure_aggregation/secagg_workflow.py b/src/py/flwr/server/workflow/secure_aggregation/secagg_workflow.py index f56423e4a0d0..cd955430a4f0 100644 --- a/src/py/flwr/server/workflow/secure_aggregation/secagg_workflow.py +++ b/src/py/flwr/server/workflow/secure_aggregation/secagg_workflow.py @@ -35,6 +35,7 @@ class SecAggWorkflow(SecAggPlusWorkflow): contributions to compute the weighted average of model parameters. The protocol involves four main stages: + - 'setup': Send SecAgg configuration to clients and collect their public keys. - 'share keys': Broadcast public keys among clients and collect encrypted secret key shares. diff --git a/src/py/flwr/server/workflow/secure_aggregation/secaggplus_workflow.py b/src/py/flwr/server/workflow/secure_aggregation/secaggplus_workflow.py index d6d97c28f313..d84a5496dfe1 100644 --- a/src/py/flwr/server/workflow/secure_aggregation/secaggplus_workflow.py +++ b/src/py/flwr/server/workflow/secure_aggregation/secaggplus_workflow.py @@ -18,7 +18,7 @@ import random from dataclasses import dataclass, field from logging import DEBUG, ERROR, INFO, WARN -from typing import Dict, List, Optional, Set, Tuple, Union, cast +from typing import Optional, Union, cast import flwr.common.recordset_compat as compat from flwr.common import ( @@ -65,22 +65,23 @@ class WorkflowState: # pylint: disable=R0902 """The state of the SecAgg+ protocol.""" - nid_to_proxies: Dict[int, ClientProxy] = field(default_factory=dict) - nid_to_fitins: Dict[int, RecordSet] = field(default_factory=dict) - sampled_node_ids: Set[int] = field(default_factory=set) - active_node_ids: Set[int] = field(default_factory=set) + nid_to_proxies: dict[int, ClientProxy] = field(default_factory=dict) + nid_to_fitins: dict[int, RecordSet] = field(default_factory=dict) + sampled_node_ids: set[int] = field(default_factory=set) + active_node_ids: set[int] = field(default_factory=set) num_shares: int = 0 threshold: int = 0 clipping_range: float = 0.0 quantization_range: int = 0 mod_range: int = 0 max_weight: float = 0.0 - nid_to_neighbours: Dict[int, Set[int]] = field(default_factory=dict) - nid_to_publickeys: Dict[int, List[bytes]] = field(default_factory=dict) - forward_srcs: Dict[int, List[int]] = field(default_factory=dict) - forward_ciphertexts: Dict[int, List[bytes]] = field(default_factory=dict) + nid_to_neighbours: dict[int, set[int]] = field(default_factory=dict) + nid_to_publickeys: dict[int, list[bytes]] = field(default_factory=dict) + forward_srcs: dict[int, list[int]] = field(default_factory=dict) + forward_ciphertexts: dict[int, list[bytes]] = field(default_factory=dict) aggregate_ndarrays: NDArrays = field(default_factory=list) - legacy_results: List[Tuple[ClientProxy, FitRes]] = field(default_factory=list) + legacy_results: list[tuple[ClientProxy, FitRes]] = field(default_factory=list) + failures: list[Exception] = field(default_factory=list) class SecAggPlusWorkflow: @@ -98,6 +99,7 @@ class SecAggPlusWorkflow: contributions to compute the weighted average of model parameters. The protocol involves four main stages: + - 'setup': Send SecAgg+ configuration to clients and collect their public keys. - 'share keys': Broadcast public keys among clients and collect encrypted secret key shares. @@ -394,6 +396,7 @@ def make(nid: int) -> Message: for msg in msgs: if msg.has_error(): + state.failures.append(Exception(msg.error)) continue key_dict = msg.content.configs_records[RECORD_KEY_CONFIGS] node_id = msg.metadata.src_node_id @@ -441,20 +444,23 @@ def make(nid: int) -> Message: ) # Build forward packet list dictionary - srcs: List[int] = [] - dsts: List[int] = [] - ciphertexts: List[bytes] = [] - fwd_ciphertexts: Dict[int, List[bytes]] = { + srcs: list[int] = [] + dsts: list[int] = [] + ciphertexts: list[bytes] = [] + fwd_ciphertexts: dict[int, list[bytes]] = { nid: [] for nid in state.active_node_ids } # dest node ID -> list of ciphertexts - fwd_srcs: Dict[int, List[int]] = { + fwd_srcs: dict[int, list[int]] = { nid: [] for nid in state.active_node_ids } # dest node ID -> list of src node IDs for msg in msgs: + if msg.has_error(): + state.failures.append(Exception(msg.error)) + continue node_id = msg.metadata.src_node_id res_dict = msg.content.configs_records[RECORD_KEY_CONFIGS] - dst_lst = cast(List[int], res_dict[Key.DESTINATION_LIST]) - ctxt_lst = cast(List[bytes], res_dict[Key.CIPHERTEXT_LIST]) + dst_lst = cast(list[int], res_dict[Key.DESTINATION_LIST]) + ctxt_lst = cast(list[bytes], res_dict[Key.CIPHERTEXT_LIST]) srcs += [node_id] * len(dst_lst) dsts += dst_lst ciphertexts += ctxt_lst @@ -515,8 +521,11 @@ def make(nid: int) -> Message: # Sum collected masked vectors and compute active/dead node IDs masked_vector = None for msg in msgs: + if msg.has_error(): + state.failures.append(Exception(msg.error)) + continue res_dict = msg.content.configs_records[RECORD_KEY_CONFIGS] - bytes_list = cast(List[bytes], res_dict[Key.MASKED_PARAMETERS]) + bytes_list = cast(list[bytes], res_dict[Key.MASKED_PARAMETERS]) client_masked_vec = [bytes_to_ndarray(b) for b in bytes_list] if masked_vector is None: masked_vector = client_masked_vec @@ -528,6 +537,9 @@ def make(nid: int) -> Message: # Backward compatibility with Strategy for msg in msgs: + if msg.has_error(): + state.failures.append(Exception(msg.error)) + continue fitres = compat.recordset_to_fitres(msg.content, True) proxy = state.nid_to_proxies[msg.metadata.src_node_id] state.legacy_results.append((proxy, fitres)) @@ -580,13 +592,16 @@ def make(nid: int) -> Message: ) # Build collected shares dict - collected_shares_dict: Dict[int, List[bytes]] = {} + collected_shares_dict: dict[int, list[bytes]] = {} for nid in state.sampled_node_ids: collected_shares_dict[nid] = [] for msg in msgs: + if msg.has_error(): + state.failures.append(Exception(msg.error)) + continue res_dict = msg.content.configs_records[RECORD_KEY_CONFIGS] - nids = cast(List[int], res_dict[Key.NODE_ID_LIST]) - shares = cast(List[bytes], res_dict[Key.SHARE_LIST]) + nids = cast(list[int], res_dict[Key.NODE_ID_LIST]) + shares = cast(list[bytes], res_dict[Key.SHARE_LIST]) for owner_nid, share in zip(nids, shares): collected_shares_dict[owner_nid].append(share) @@ -652,9 +667,11 @@ def make(nid: int) -> Message: INFO, "aggregate_fit: received %s results and %s failures", len(results), - 0, + len(state.failures), + ) + aggregated_result = context.strategy.aggregate_fit( + current_round, results, state.failures # type: ignore ) - aggregated_result = context.strategy.aggregate_fit(current_round, results, []) parameters_aggregated, metrics_aggregated = aggregated_result # Update the parameters and write history diff --git a/src/py/flwr/simulation/__init__.py b/src/py/flwr/simulation/__init__.py index 57b0b01eb319..a171347b1507 100644 --- a/src/py/flwr/simulation/__init__.py +++ b/src/py/flwr/simulation/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2021 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -28,7 +28,7 @@ To install the necessary dependencies, install `flwr` with the `simulation` extra: - pip install -U flwr["simulation"] + pip install -U "flwr[simulation]" """ def start_simulation(*args, **kwargs): # type: ignore @@ -36,4 +36,7 @@ def start_simulation(*args, **kwargs): # type: ignore raise ImportError(RAY_IMPORT_ERROR) -__all__ = ["start_simulation", "run_simulation"] +__all__ = [ + "run_simulation", + "start_simulation", +] diff --git a/src/py/flwr/simulation/app.py b/src/py/flwr/simulation/app.py index 4b4b7249ccd3..0070d75c53dc 100644 --- a/src/py/flwr/simulation/app.py +++ b/src/py/flwr/simulation/app.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2021 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -22,19 +22,21 @@ import traceback import warnings from logging import ERROR, INFO -from typing import Any, Dict, List, Optional, Type, Union +from typing import Any, Optional, Union import ray from ray.util.scheduling_strategies import NodeAffinitySchedulingStrategy -from flwr.client import ClientFn +from flwr.client import ClientFnExt from flwr.common import EventType, event -from flwr.common.logger import log, set_logger_propagation +from flwr.common.constant import NODE_ID_NUM_BYTES +from flwr.common.logger import log, set_logger_propagation, warn_unsupported_feature from flwr.server.client_manager import ClientManager from flwr.server.history import History from flwr.server.server import Server, init_defaults, run_fl from flwr.server.server_config import ServerConfig from flwr.server.strategy import Strategy +from flwr.server.superlink.state.utils import generate_rand_int_from_bytes from flwr.simulation.ray_transport.ray_actor import ( ClientAppActor, VirtualClientEngineActor, @@ -51,7 +53,7 @@ `start_simulation( *, client_fn: ClientFn, - num_clients: Optional[int] = None, + num_clients: int, clients_ids: Optional[List[str]] = None, client_resources: Optional[Dict[str, float]] = None, server: Optional[Server] = None, @@ -70,45 +72,62 @@ """ +NodeToPartitionMapping = dict[int, int] + + +def _create_node_id_to_partition_mapping( + num_clients: int, +) -> NodeToPartitionMapping: + """Generate a node_id:partition_id mapping.""" + nodes_mapping: NodeToPartitionMapping = {} # {node-id; partition-id} + for i in range(num_clients): + while True: + node_id = generate_rand_int_from_bytes(NODE_ID_NUM_BYTES) + if node_id not in nodes_mapping: + break + nodes_mapping[node_id] = i + return nodes_mapping + # pylint: disable=too-many-arguments,too-many-statements,too-many-branches def start_simulation( *, - client_fn: ClientFn, - num_clients: Optional[int] = None, - clients_ids: Optional[List[str]] = None, - client_resources: Optional[Dict[str, float]] = None, + client_fn: ClientFnExt, + num_clients: int, + clients_ids: Optional[list[str]] = None, # UNSUPPORTED, WILL BE REMOVED + client_resources: Optional[dict[str, float]] = None, server: Optional[Server] = None, config: Optional[ServerConfig] = None, strategy: Optional[Strategy] = None, client_manager: Optional[ClientManager] = None, - ray_init_args: Optional[Dict[str, Any]] = None, + ray_init_args: Optional[dict[str, Any]] = None, keep_initialised: Optional[bool] = False, - actor_type: Type[VirtualClientEngineActor] = ClientAppActor, - actor_kwargs: Optional[Dict[str, Any]] = None, + actor_type: type[VirtualClientEngineActor] = ClientAppActor, + actor_kwargs: Optional[dict[str, Any]] = None, actor_scheduling: Union[str, NodeAffinitySchedulingStrategy] = "DEFAULT", ) -> History: """Start a Ray-based Flower simulation server. Parameters ---------- - client_fn : ClientFn - A function creating client instances. The function must take a single - `str` argument called `cid`. It should return a single client instance - of type Client. Note that the created client instances are ephemeral - and will often be destroyed after a single method invocation. Since client - instances are not long-lived, they should not attempt to carry state over - method invocations. Any state required by the instance (model, dataset, - hyperparameters, ...) should be (re-)created in either the call to `client_fn` - or the call to any of the client methods (e.g., load evaluation data in the - `evaluate` method itself). - num_clients : Optional[int] - The total number of clients in this simulation. This must be set if - `clients_ids` is not set and vice-versa. + client_fn : ClientFnExt + A function creating `Client` instances. The function must have the signature + `client_fn(context: Context). It should return + a single client instance of type `Client`. Note that the created client + instances are ephemeral and will often be destroyed after a single method + invocation. Since client instances are not long-lived, they should not attempt + to carry state over method invocations. Any state required by the instance + (model, dataset, hyperparameters, ...) should be (re-)created in either the + call to `client_fn` or the call to any of the client methods (e.g., load + evaluation data in the `evaluate` method itself). + num_clients : int + The total number of clients in this simulation. clients_ids : Optional[List[str]] + UNSUPPORTED, WILL BE REMOVED. USE `num_clients` INSTEAD. List `client_id`s for each client. This is only required if `num_clients` is not set. Setting both `num_clients` and `clients_ids` with `len(clients_ids)` not equal to `num_clients` generates an error. + Using this argument will raise an error. client_resources : Optional[Dict[str, float]] (default: `{"num_cpus": 1, "num_gpus": 0.0}`) CPU and GPU resources for a single client. Supported keys are `num_cpus` and `num_gpus`. To understand the GPU utilization caused by @@ -158,7 +177,6 @@ def start_simulation( is an advanced feature. For all details, please refer to the Ray documentation: https://docs.ray.io/en/latest/ray-core/scheduling/index.html - Returns ------- hist : flwr.server.history.History @@ -170,6 +188,14 @@ def start_simulation( {"num_clients": len(clients_ids) if clients_ids is not None else num_clients}, ) + if clients_ids is not None: + warn_unsupported_feature( + "Passing `clients_ids` to `start_simulation` is deprecated and not longer " + "used by `start_simulation`. Use `num_clients` exclusively instead." + ) + log(ERROR, "`clients_ids` argument used.") + sys.exit() + # Set logger propagation loop: Optional[asyncio.AbstractEventLoop] = None try: @@ -196,20 +222,8 @@ def start_simulation( initialized_config, ) - # clients_ids takes precedence - cids: List[str] - if clients_ids is not None: - if (num_clients is not None) and (len(clients_ids) != num_clients): - log(ERROR, INVALID_ARGUMENTS_START_SIMULATION) - sys.exit() - else: - cids = clients_ids - else: - if num_clients is None: - log(ERROR, INVALID_ARGUMENTS_START_SIMULATION) - sys.exit() - else: - cids = [str(x) for x in range(num_clients)] + # Create node-id to partition-id mapping + nodes_mapping = _create_node_id_to_partition_mapping(num_clients) # Default arguments for Ray initialization if not ray_init_args: @@ -265,7 +279,7 @@ def start_simulation( # An actor factory. This is called N times to add N actors # to the pool. If at some point the pool can accommodate more actors # this will be called again. - def create_actor_fn() -> Type[VirtualClientEngineActor]: + def create_actor_fn() -> type[VirtualClientEngineActor]: return actor_type.options( # type: ignore **client_resources, scheduling_strategy=actor_scheduling, @@ -308,10 +322,12 @@ def update_resources(f_stop: threading.Event) -> None: ) # Register one RayClientProxy object for each client with the ClientManager - for cid in cids: + for node_id, partition_id in nodes_mapping.items(): client_proxy = RayActorClientProxy( client_fn=client_fn, - cid=cid, + node_id=node_id, + partition_id=partition_id, + num_partitions=num_clients, actor_pool=pool, ) initialized_server.client_manager().register(client=client_proxy) diff --git a/src/py/flwr/simulation/ray_transport/__init__.py b/src/py/flwr/simulation/ray_transport/__init__.py index 0e82b75bb4b3..ed4971935a15 100644 --- a/src/py/flwr/simulation/ray_transport/__init__.py +++ b/src/py/flwr/simulation/ray_transport/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2021 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/src/py/flwr/simulation/ray_transport/ray_actor.py b/src/py/flwr/simulation/ray_transport/ray_actor.py index 9caf0fc3e6c0..4fb48a99b689 100644 --- a/src/py/flwr/simulation/ray_transport/ray_actor.py +++ b/src/py/flwr/simulation/ray_transport/ray_actor.py @@ -14,11 +14,10 @@ # ============================================================================== """Ray-based Flower Actor and ActorPool implementation.""" -import asyncio import threading from abc import ABC from logging import DEBUG, ERROR, WARNING -from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Type, Union +from typing import Any, Callable, Optional, Union import ray from ray import ObjectRef @@ -45,7 +44,7 @@ def run( message: Message, cid: str, context: Context, - ) -> Tuple[str, Message, Context]: + ) -> tuple[str, Message, Context]: """Run a client run.""" # Pass message through ClientApp and return a message # return also cid which is needed to ensure results @@ -82,7 +81,7 @@ def __init__(self, on_actor_init_fn: Optional[Callable[[], None]] = None) -> Non on_actor_init_fn() -def pool_size_from_resources(client_resources: Dict[str, Union[int, float]]) -> int: +def pool_size_from_resources(client_resources: dict[str, Union[int, float]]) -> int: """Calculate number of Actors that fit in the cluster. For this we consider the resources available on each node and those required per @@ -125,14 +124,14 @@ def pool_size_from_resources(client_resources: Dict[str, Union[int, float]]) -> WARNING, "The ActorPool is empty. The system (CPUs=%s, GPUs=%s) " "does not meet the criteria to host at least one client with resources:" - " %s. Lowering the `client_resources` could help.", + " %s. Lowering these resources could help.", num_cpus, num_gpus, client_resources, ) raise ValueError( "ActorPool is empty. Stopping Simulation. " - "Check 'client_resources' passed to `start_simulation`" + "Check `num_cpus` and/or `num_gpus` passed to the simulation engine" ) return total_num_actors @@ -163,9 +162,9 @@ class VirtualClientEngineActorPool(ActorPool): def __init__( self, - create_actor_fn: Callable[[], Type[VirtualClientEngineActor]], - client_resources: Dict[str, Union[int, float]], - actor_list: Optional[List[Type[VirtualClientEngineActor]]] = None, + create_actor_fn: Callable[[], type[VirtualClientEngineActor]], + client_resources: dict[str, Union[int, float]], + actor_list: Optional[list[type[VirtualClientEngineActor]]] = None, ): self.client_resources = client_resources self.create_actor_fn = create_actor_fn @@ -184,10 +183,10 @@ def __init__( # A dict that maps cid to another dict containing: a reference to the remote job # and its status (i.e. whether it is ready or not) - self._cid_to_future: Dict[ - str, Dict[str, Union[bool, Optional[ObjectRef[Any]]]] + self._cid_to_future: dict[ + str, dict[str, Union[bool, Optional[ObjectRef[Any]]]] ] = {} - self.actor_to_remove: Set[str] = set() # a set + self.actor_to_remove: set[str] = set() # a set self.num_actors = len(actors) self.lock = threading.RLock() @@ -211,7 +210,7 @@ def add_actors_to_pool(self, num_actors: int) -> None: self._idle_actors.extend(new_actors) self.num_actors += num_actors - def submit(self, fn: Any, value: Tuple[ClientAppFn, Message, str, Context]) -> None: + def submit(self, fn: Any, value: tuple[ClientAppFn, Message, str, Context]) -> None: """Take an idle actor and assign it to run a client app and Message. Submit a job to an actor by first removing it from the list of idle actors, then @@ -221,7 +220,7 @@ def submit(self, fn: Any, value: Tuple[ClientAppFn, Message, str, Context]) -> N actor = self._idle_actors.pop() if self._check_and_remove_actor_from_pool(actor): future = fn(actor, app_fn, mssg, cid, context) - future_key = tuple(future) if isinstance(future, List) else future + future_key = tuple(future) if isinstance(future, list) else future self._future_to_actor[future_key] = (self._next_task_index, actor, cid) self._next_task_index += 1 @@ -229,7 +228,7 @@ def submit(self, fn: Any, value: Tuple[ClientAppFn, Message, str, Context]) -> N self._cid_to_future[cid]["future"] = future_key def submit_client_job( - self, actor_fn: Any, job: Tuple[ClientAppFn, Message, str, Context] + self, actor_fn: Any, job: tuple[ClientAppFn, Message, str, Context] ) -> None: """Submit a job while tracking client ids.""" _, _, cid, _ = job @@ -269,7 +268,7 @@ def _is_future_ready(self, cid: str) -> bool: return self._cid_to_future[cid]["ready"] # type: ignore - def _fetch_future_result(self, cid: str) -> Tuple[Message, Context]: + def _fetch_future_result(self, cid: str) -> tuple[Message, Context]: """Fetch result and updated context for a VirtualClient from Object Store. The job submitted by the ClientProxy interfacing with client with cid=cid is @@ -383,7 +382,7 @@ def process_unordered_future(self, timeout: Optional[float] = None) -> None: def get_client_result( self, cid: str, timeout: Optional[float] - ) -> Tuple[Message, Context]: + ) -> tuple[Message, Context]: """Get result from VirtualClient with specific cid.""" # Loop until all jobs submitted to the pool are completed. Break early # if the result for the ClientProxy calling this method is ready @@ -399,27 +398,19 @@ def get_client_result( return self._fetch_future_result(cid) -def init_ray(*args: Any, **kwargs: Any) -> None: - """Intialises Ray if not already initialised.""" - if not ray.is_initialized(): - ray.init(*args, **kwargs) - - class BasicActorPool: """A basic actor pool.""" def __init__( self, - actor_type: Type[VirtualClientEngineActor], - client_resources: Dict[str, Union[int, float]], - actor_kwargs: Dict[str, Any], + actor_type: type[VirtualClientEngineActor], + client_resources: dict[str, Union[int, float]], + actor_kwargs: dict[str, Any], ): self.client_resources = client_resources # Queue of idle actors - self.pool: "asyncio.Queue[Type[VirtualClientEngineActor]]" = asyncio.Queue( - maxsize=1024 - ) + self.pool: list[VirtualClientEngineActor] = [] self.num_actors = 0 # Resolve arguments to pass during actor init @@ -433,38 +424,37 @@ def __init__( # Figure out how many actors can be created given the cluster resources # and the resources the user indicates each VirtualClient will need self.actors_capacity = pool_size_from_resources(client_resources) - self._future_to_actor: Dict[Any, Type[VirtualClientEngineActor]] = {} + self._future_to_actor: dict[Any, VirtualClientEngineActor] = {} def is_actor_available(self) -> bool: """Return true if there is an idle actor.""" - return self.pool.qsize() > 0 + return len(self.pool) > 0 - async def add_actors_to_pool(self, num_actors: int) -> None: + def add_actors_to_pool(self, num_actors: int) -> None: """Add actors to the pool. This method may be executed also if new resources are added to your Ray cluster (e.g. you add a new node). """ for _ in range(num_actors): - await self.pool.put(self.create_actor_fn()) # type: ignore + self.pool.append(self.create_actor_fn()) # type: ignore self.num_actors += num_actors - async def terminate_all_actors(self) -> None: + def terminate_all_actors(self) -> None: """Terminate actors in pool.""" num_terminated = 0 - while self.pool.qsize(): - actor = await self.pool.get() + for actor in self.pool: actor.terminate.remote() # type: ignore num_terminated += 1 log(DEBUG, "Terminated %i actors", num_terminated) - async def submit( - self, actor_fn: Any, job: Tuple[ClientAppFn, Message, str, Context] + def submit( + self, actor_fn: Any, job: tuple[ClientAppFn, Message, str, Context] ) -> Any: """On idle actor, submit job and return future.""" # Remove idle actor from pool - actor = await self.pool.get() + actor = self.pool.pop() # Submit job to actor app_fn, mssg, cid, context = job future = actor_fn(actor, app_fn, mssg, cid, context) @@ -473,18 +463,18 @@ async def submit( self._future_to_actor[future] = actor return future - async def add_actor_back_to_pool(self, future: Any) -> None: + def add_actor_back_to_pool(self, future: Any) -> None: """Ad actor assigned to run future back into the pool.""" actor = self._future_to_actor.pop(future) - await self.pool.put(actor) + self.pool.append(actor) - async def fetch_result_and_return_actor_to_pool( + def fetch_result_and_return_actor_to_pool( self, future: Any - ) -> Tuple[Message, Context]: + ) -> tuple[Message, Context]: """Pull result given a future and add actor back to pool.""" - # Get actor that ran job - await self.add_actor_back_to_pool(future) # Retrieve result for object store # Instead of doing ray.get(future) we await it - _, out_mssg, updated_context = await future + _, out_mssg, updated_context = ray.get(future) + # Get actor that ran job + self.add_actor_back_to_pool(future) return out_mssg, updated_context diff --git a/src/py/flwr/simulation/ray_transport/ray_client_proxy.py b/src/py/flwr/simulation/ray_transport/ray_client_proxy.py index 5e344eb087ee..ad9be6bd1fc0 100644 --- a/src/py/flwr/simulation/ray_transport/ray_client_proxy.py +++ b/src/py/flwr/simulation/ray_transport/ray_client_proxy.py @@ -1,4 +1,4 @@ -# Copyright 2020 Flower Labs GmbH. All Rights Reserved. +# Copyright 2021 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -20,11 +20,16 @@ from typing import Optional from flwr import common -from flwr.client import ClientFn +from flwr.client import ClientFnExt from flwr.client.client_app import ClientApp from flwr.client.node_state import NodeState from flwr.common import DEFAULT_TTL, Message, Metadata, RecordSet -from flwr.common.constant import MessageType, MessageTypeLegacy +from flwr.common.constant import ( + NUM_PARTITIONS_KEY, + PARTITION_ID_KEY, + MessageType, + MessageTypeLegacy, +) from flwr.common.logger import log from flwr.common.recordset_compat import ( evaluateins_to_recordset, @@ -43,17 +48,30 @@ class RayActorClientProxy(ClientProxy): """Flower client proxy which delegates work using Ray.""" - def __init__( - self, client_fn: ClientFn, cid: str, actor_pool: VirtualClientEngineActorPool + def __init__( # pylint: disable=too-many-arguments,too-many-positional-arguments + self, + client_fn: ClientFnExt, + node_id: int, + partition_id: int, + num_partitions: int, + actor_pool: VirtualClientEngineActorPool, ): - super().__init__(cid) + super().__init__(cid=str(node_id)) + self.node_id = node_id + self.partition_id = partition_id def _load_app() -> ClientApp: return ClientApp(client_fn=client_fn) self.app_fn = _load_app self.actor_pool = actor_pool - self.proxy_state = NodeState() + self.proxy_state = NodeState( + node_id=node_id, + node_config={ + PARTITION_ID_KEY: str(partition_id), + NUM_PARTITIONS_KEY: str(num_partitions), + }, + ) def _submit_job(self, message: Message, timeout: Optional[float]) -> Message: """Sumbit a message to the ActorPool.""" @@ -62,16 +80,19 @@ def _submit_job(self, message: Message, timeout: Optional[float]) -> Message: # Register state self.proxy_state.register_context(run_id=run_id) - # Retrieve state - state = self.proxy_state.retrieve_context(run_id=run_id) + # Retrieve context + context = self.proxy_state.retrieve_context(run_id=run_id) + partition_id_str = str(context.node_config[PARTITION_ID_KEY]) try: self.actor_pool.submit_client_job( - lambda a, a_fn, mssg, cid, state: a.run.remote(a_fn, mssg, cid, state), - (self.app_fn, message, self.cid, state), + lambda a, a_fn, mssg, partition_id, context: a.run.remote( + a_fn, mssg, partition_id, context + ), + (self.app_fn, message, partition_id_str, context), ) out_mssg, updated_context = self.actor_pool.get_client_result( - self.cid, timeout + partition_id_str, timeout ) # Update state @@ -103,11 +124,10 @@ def _wrap_recordset_in_message( message_id="", group_id=str(group_id) if group_id is not None else "", src_node_id=0, - dst_node_id=int(self.cid), + dst_node_id=self.node_id, reply_to_message="", ttl=timeout if timeout else DEFAULT_TTL, message_type=message_type, - partition_id=int(self.cid), ), ) diff --git a/src/py/flwr/simulation/ray_transport/ray_client_proxy_test.py b/src/py/flwr/simulation/ray_transport/ray_client_proxy_test.py index 9680b3846f1d..ce0ef46d135f 100644 --- a/src/py/flwr/simulation/ray_transport/ray_client_proxy_test.py +++ b/src/py/flwr/simulation/ray_transport/ray_client_proxy_test.py @@ -17,12 +17,12 @@ from math import pi from random import shuffle -from typing import Dict, List, Tuple, Type import ray from flwr.client import Client, NumPyClient from flwr.client.client_app import ClientApp +from flwr.client.node_state import NodeState from flwr.common import ( DEFAULT_TTL, Config, @@ -34,11 +34,16 @@ RecordSet, Scalar, ) +from flwr.common.constant import NUM_PARTITIONS_KEY, PARTITION_ID_KEY from flwr.common.recordset_compat import ( getpropertiesins_to_recordset, recordset_to_getpropertiesres, ) from flwr.common.recordset_compat_test import _get_valid_getpropertiesins +from flwr.simulation.app import ( + NodeToPartitionMapping, + _create_node_id_to_partition_mapping, +) from flwr.simulation.ray_transport.ray_actor import ( ClientAppActor, VirtualClientEngineActor, @@ -50,32 +55,34 @@ class DummyClient(NumPyClient): """A dummy NumPyClient for tests.""" - def __init__(self, cid: str) -> None: - self.cid = int(cid) + def __init__(self, node_id: int, state: RecordSet) -> None: + self.node_id = node_id + self.client_state = state - def get_properties(self, config: Config) -> Dict[str, Scalar]: + def get_properties(self, config: Config) -> dict[str, Scalar]: """Return properties by doing a simple calculation.""" - result = int(self.cid) * pi - + result = self.node_id * pi # store something in context - self.context.state.configs_records["result"] = ConfigsRecord( + self.client_state.configs_records["result"] = ConfigsRecord( {"result": str(result)} ) return {"result": result} -def get_dummy_client(cid: str) -> Client: +def get_dummy_client(context: Context) -> Client: """Return a DummyClient converted to Client type.""" - return DummyClient(cid).to_client() + return DummyClient(context.node_id, state=context.state).to_client() def prep( - actor_type: Type[VirtualClientEngineActor] = ClientAppActor, -) -> Tuple[List[RayActorClientProxy], VirtualClientEngineActorPool]: # pragma: no cover + actor_type: type[VirtualClientEngineActor] = ClientAppActor, +) -> tuple[ + list[RayActorClientProxy], VirtualClientEngineActorPool, NodeToPartitionMapping +]: # pragma: no cover """Prepare ClientProxies and pool for tests.""" client_resources = {"num_cpus": 1, "num_gpus": 0.0} - def create_actor_fn() -> Type[VirtualClientEngineActor]: + def create_actor_fn() -> type[VirtualClientEngineActor]: return actor_type.options(**client_resources).remote() # type: ignore # Create actor pool @@ -87,16 +94,19 @@ def create_actor_fn() -> Type[VirtualClientEngineActor]: # Create 373 client proxies num_proxies = 373 # a prime number + mapping = _create_node_id_to_partition_mapping(num_proxies) proxies = [ RayActorClientProxy( client_fn=get_dummy_client, - cid=str(cid), + node_id=node_id, + partition_id=partition_id, + num_partitions=num_proxies, actor_pool=pool, ) - for cid in range(num_proxies) + for node_id, partition_id in mapping.items() ] - return proxies, pool + return proxies, pool, mapping def test_cid_consistency_one_at_a_time() -> None: @@ -104,7 +114,7 @@ def test_cid_consistency_one_at_a_time() -> None: Submit one job and waits for completion. Then submits the next and so on """ - proxies, _ = prep() + proxies, _, _ = prep() getproperties_ins = _get_valid_getpropertiesins() recordset = getpropertiesins_to_recordset(getproperties_ins) @@ -123,7 +133,7 @@ def test_cid_consistency_one_at_a_time() -> None: res = recordset_to_getpropertiesres(message_out.content) - assert int(prox.cid) * pi == res.properties["result"] + assert int(prox.node_id) * pi == res.properties["result"] ray.shutdown() @@ -134,7 +144,7 @@ def test_cid_consistency_all_submit_first_run_consistency() -> None: All jobs are submitted at the same time. Then fetched one at a time. This also tests NodeState (at each Proxy) and RunState basic functionality. """ - proxies, _ = prep() + proxies, _, _ = prep() run_id = 0 getproperties_ins = _get_valid_getpropertiesins() @@ -156,34 +166,43 @@ def test_cid_consistency_all_submit_first_run_consistency() -> None: ) prox.actor_pool.submit_client_job( lambda a, a_fn, mssg, cid, state: a.run.remote(a_fn, mssg, cid, state), - (prox.app_fn, message, prox.cid, state), + (prox.app_fn, message, str(prox.node_id), state), ) # fetch results one at a time shuffle(proxies) for prox in proxies: message_out, updated_context = prox.actor_pool.get_client_result( - prox.cid, timeout=None + str(prox.node_id), timeout=None ) prox.proxy_state.update_context(run_id, context=updated_context) res = recordset_to_getpropertiesres(message_out.content) - assert int(prox.cid) * pi == res.properties["result"] + assert prox.node_id * pi == res.properties["result"] assert ( - str(int(prox.cid) * pi) + str(prox.node_id * pi) == prox.proxy_state.retrieve_context(run_id).state.configs_records[ "result" ]["result"] ) - ray.shutdown() def test_cid_consistency_without_proxies() -> None: """Test cid consistency of jobs submitted/retrieved to/from pool w/o ClientProxy.""" - proxies, pool = prep() - num_clients = len(proxies) - cids = [str(cid) for cid in range(num_clients)] + _, pool, mapping = prep() + node_ids = list(mapping.keys()) + + # register node states + node_states: dict[int, NodeState] = {} + for node_id, partition_id in mapping.items(): + node_states[node_id] = NodeState( + node_id=node_id, + node_config={ + PARTITION_ID_KEY: str(partition_id), + NUM_PARTITIONS_KEY: str(len(node_ids)), + }, + ) getproperties_ins = _get_valid_getpropertiesins() recordset = getpropertiesins_to_recordset(getproperties_ins) @@ -192,32 +211,37 @@ def _load_app() -> ClientApp: return ClientApp(client_fn=get_dummy_client) # submit all jobs (collect later) - shuffle(cids) - for cid in cids: + shuffle(node_ids) + run_id = 0 + for node_id in node_ids: message = Message( content=recordset, metadata=Metadata( - run_id=0, + run_id=run_id, message_id="", group_id=str(0), src_node_id=0, - dst_node_id=12345, + dst_node_id=node_id, reply_to_message="", ttl=DEFAULT_TTL, message_type=MessageTypeLegacy.GET_PROPERTIES, - partition_id=int(cid), ), ) + # register and retrieve context + node_states[node_id].register_context(run_id=run_id) + context = node_states[node_id].retrieve_context(run_id=run_id) + partition_id_str = str(context.node_config[PARTITION_ID_KEY]) pool.submit_client_job( - lambda a, c_fn, j_fn, cid_, state: a.run.remote(c_fn, j_fn, cid_, state), - (_load_app, message, cid, Context(state=RecordSet())), + lambda a, c_fn, j_fn, nid_, state: a.run.remote(c_fn, j_fn, nid_, state), + (_load_app, message, partition_id_str, context), ) # fetch results one at a time - shuffle(cids) - for cid in cids: - message_out, _ = pool.get_client_result(cid, timeout=None) + shuffle(node_ids) + for node_id in node_ids: + partition_id_str = str(mapping[node_id]) + message_out, _ = pool.get_client_result(partition_id_str, timeout=None) res = recordset_to_getpropertiesres(message_out.content) - assert int(cid) * pi == res.properties["result"] + assert node_id * pi == res.properties["result"] ray.shutdown() diff --git a/src/py/flwr/simulation/run_simulation.py b/src/py/flwr/simulation/run_simulation.py index 2dbeef1a261c..8c4e42c34744 100644 --- a/src/py/flwr/simulation/run_simulation.py +++ b/src/py/flwr/simulation/run_simulation.py @@ -18,54 +18,220 @@ import asyncio import json import logging +import sys import threading import traceback +from argparse import Namespace from logging import DEBUG, ERROR, INFO, WARNING +from pathlib import Path from time import sleep -from typing import Dict, Optional +from typing import Any, Optional +from flwr.cli.config_utils import load_and_validate from flwr.client import ClientApp from flwr.common import EventType, event, log -from flwr.common.logger import set_logger_propagation, update_console_handler -from flwr.common.typing import ConfigsRecordValues +from flwr.common.config import get_fused_config_from_dir, parse_config_args +from flwr.common.constant import RUN_ID_NUM_BYTES +from flwr.common.logger import ( + set_logger_propagation, + update_console_handler, + warn_deprecated_feature, + warn_deprecated_feature_with_example, +) +from flwr.common.typing import Run, UserConfig from flwr.server.driver import Driver, InMemoryDriver -from flwr.server.run_serverapp import run +from flwr.server.run_serverapp import run as run_server_app from flwr.server.server_app import ServerApp from flwr.server.superlink.fleet import vce +from flwr.server.superlink.fleet.vce.backend.backend import BackendConfig from flwr.server.superlink.state import StateFactory +from flwr.server.superlink.state.utils import generate_rand_int_from_bytes from flwr.simulation.ray_transport.utils import ( enable_tf_gpu_growth as enable_gpu_growth, ) +def _check_args_do_not_interfere(args: Namespace) -> bool: + """Ensure decoupling of flags for different ways to start the simulation.""" + mode_one_args = ["app", "run_config"] + mode_two_args = ["client_app", "server_app"] + + def _resolve_message(conflict_keys: list[str]) -> str: + return ",".join([f"`--{key}`".replace("_", "-") for key in conflict_keys]) + + # When passing `--app`, `--app-dir` is ignored + if args.app and args.app_dir: + log(ERROR, "Either `--app` or `--app-dir` can be set, but not both.") + return False + + if any(getattr(args, key) for key in mode_one_args): + if any(getattr(args, key) for key in mode_two_args): + log( + ERROR, + "Passing any of {%s} alongside with any of {%s}", + _resolve_message(mode_one_args), + _resolve_message(mode_two_args), + ) + return False + + if not args.app: + log(ERROR, "You need to pass --app") + return False + + return True + + # Ensure all args are set (required for the non-FAB mode of execution) + if not all(getattr(args, key) for key in mode_two_args): + log( + ERROR, + "Passing all of %s keys are required.", + _resolve_message(mode_two_args), + ) + return False + + return True + + +def _replace_keys(d: Any, match: str, target: str) -> Any: + if isinstance(d, dict): + return { + k.replace(match, target): _replace_keys(v, match, target) + for k, v in d.items() + } + if isinstance(d, list): + return [_replace_keys(i, match, target) for i in d] + return d + + # Entry point from CLI +# pylint: disable=too-many-locals def run_simulation_from_cli() -> None: """Run Simulation Engine from the CLI.""" args = _parse_args_run_simulation().parse_args() + event( + EventType.CLI_FLOWER_SIMULATION_ENTER, + event_details={"backend": args.backend, "num-supernodes": args.num_supernodes}, + ) + + # Add warnings for deprecated server_app and client_app arguments + if args.server_app: + warn_deprecated_feature( + "The `--server-app` argument is deprecated. " + "Please use the `--app` argument instead." + ) + + if args.client_app: + warn_deprecated_feature( + "The `--client-app` argument is deprecated. " + "Use the `--app` argument instead." + ) + + if args.enable_tf_gpu_growth: + warn_deprecated_feature_with_example( + "Passing `--enable-tf-gpu-growth` is deprecated.", + example_message="Instead, set the `TF_FORCE_GPU_ALLOW_GROWTH` environmnet " + "variable to true.", + code_example='TF_FORCE_GPU_ALLOW_GROWTH="true" flower-simulation <...>', + ) + # Load JSON config backend_config_dict = json.loads(args.backend_config) + if backend_config_dict: + # Backend config internally operates with `_` not with `-` + backend_config_dict = _replace_keys(backend_config_dict, match="-", target="_") + log(DEBUG, "backend_config_dict: %s", backend_config_dict) + + # We are supporting two modes for the CLI entrypoint: + # 1) Running an app dir containing a `pyproject.toml` + # 2) Running any ClientApp and SeverApp w/o pyproject.toml being present + # For 2), some CLI args are compulsory, but they are not required for 1) + # We first do these checks + args_check_pass = _check_args_do_not_interfere(args) + if not args_check_pass: + sys.exit("Simulation Engine cannot start.") + + run_id = ( + generate_rand_int_from_bytes(RUN_ID_NUM_BYTES) + if args.run_id is None + else args.run_id + ) + if args.app: + # Mode 1 + app_path = Path(args.app) + if not app_path.is_dir(): + log(ERROR, "--app is not a directory") + sys.exit("Simulation Engine cannot start.") + + # Load pyproject.toml + config, errors, warnings = load_and_validate( + app_path / "pyproject.toml", check_module=False + ) + if errors: + raise ValueError(errors) + + if warnings: + log(WARNING, warnings) + + if config is None: + raise ValueError("Config extracted from FAB's pyproject.toml is not valid") + + # Get ClientApp and SeverApp components + app_components = config["tool"]["flwr"]["app"]["components"] + client_app_attr = app_components["clientapp"] + server_app_attr = app_components["serverapp"] + + override_config = parse_config_args( + [args.run_config] if args.run_config else args.run_config + ) + fused_config = get_fused_config_from_dir(app_path, override_config) + app_dir = args.app + is_app = True + + else: + # Mode 2 + client_app_attr = args.client_app + server_app_attr = args.server_app + override_config = {} + fused_config = None + app_dir = args.app_dir + is_app = False + + # Create run + run = Run( + run_id=run_id, + fab_id="", + fab_version="", + fab_hash="", + override_config=override_config, + ) + _run_simulation( - server_app_attr=args.server_app, - client_app_attr=args.client_app, + server_app_attr=server_app_attr, + client_app_attr=client_app_attr, num_supernodes=args.num_supernodes, backend_name=args.backend, backend_config=backend_config_dict, - app_dir=args.app_dir, + app_dir=app_dir, + run=run, enable_tf_gpu_growth=args.enable_tf_gpu_growth, + delay_start=args.delay_start, verbose_logging=args.verbose, + server_app_run_config=fused_config, + is_app=is_app, + exit_event=EventType.CLI_FLOWER_SIMULATION_LEAVE, ) # Entry point from Python session (script or notebook) -# pylint: disable=too-many-arguments +# pylint: disable=too-many-arguments,too-many-positional-arguments def run_simulation( server_app: ServerApp, client_app: ClientApp, num_supernodes: int, backend_name: str = "ray", - backend_config: Optional[Dict[str, ConfigsRecordValues]] = None, + backend_config: Optional[BackendConfig] = None, enable_tf_gpu_growth: bool = False, verbose_logging: bool = False, ) -> None: @@ -82,16 +248,18 @@ def run_simulation( messages sent by the `ServerApp`. num_supernodes : int - Number of nodes that run a ClientApp. They can be sampled by a - Driver in the ServerApp and receive a Message describing what the ClientApp - should perform. + Number of nodes that run a ClientApp. They can be sampled by a Driver in the + ServerApp and receive a Message describing what the ClientApp should perform. backend_name : str (default: ray) A simulation backend that runs `ClientApp`s. - backend_config : Optional[Dict[str, ConfigsRecordValues]] - 'A dictionary, e.g {"": , "": } to configure a - backend. Values supported in are those included by + backend_config : Optional[BackendConfig] + 'A dictionary to configure a backend. Separate dictionaries to configure + different elements of backend. Supported top-level keys are `init_args` + for values parsed to initialisation of backend, `client_resources` + to define the resources for clients, and `actor` to define the actor + parameters. Values supported in are those included by `flwr.common.typing.ConfigsRecordValues`. enable_tf_gpu_growth : bool (default: False) @@ -103,9 +271,23 @@ def run_simulation( works in the TensorFlow documentation: https://www.tensorflow.org/api/stable. verbose_logging : bool (default: False) - When diabled, only INFO, WARNING and ERROR log messages will be shown. If + When disabled, only INFO, WARNING and ERROR log messages will be shown. If enabled, DEBUG-level logs will be displayed. """ + event( + EventType.PYTHON_API_RUN_SIMULATION_ENTER, + event_details={"backend": backend_name, "num-supernodes": num_supernodes}, + ) + + if enable_tf_gpu_growth: + warn_deprecated_feature_with_example( + "Passing `enable_tf_gpu_growth=True` is deprecated.", + example_message="Instead, set the `TF_FORCE_GPU_ALLOW_GROWTH` environment " + "variable to true.", + code_example='import os;os.environ["TF_FORCE_GPU_ALLOW_GROWTH"]="true"' + "\n\tflwr.simulation.run_simulationt(...)", + ) + _run_simulation( num_supernodes=num_supernodes, client_app=client_app, @@ -114,38 +296,55 @@ def run_simulation( backend_config=backend_config, enable_tf_gpu_growth=enable_tf_gpu_growth, verbose_logging=verbose_logging, + exit_event=EventType.PYTHON_API_RUN_SIMULATION_LEAVE, ) -# pylint: disable=too-many-arguments +# pylint: disable=too-many-arguments,too-many-positional-arguments def run_serverapp_th( server_app_attr: Optional[str], server_app: Optional[ServerApp], + server_app_run_config: UserConfig, driver: Driver, app_dir: str, - f_stop: asyncio.Event, + f_stop: threading.Event, + has_exception: threading.Event, enable_tf_gpu_growth: bool, - delay_launch: int = 3, ) -> threading.Thread: """Run SeverApp in a thread.""" - def server_th_with_start_checks( # type: ignore - tf_gpu_growth: bool, stop_event: asyncio.Event, **kwargs + def server_th_with_start_checks( + tf_gpu_growth: bool, + stop_event: threading.Event, + exception_event: threading.Event, + _driver: Driver, + _server_app_dir: str, + _server_app_run_config: UserConfig, + _server_app_attr: Optional[str], + _server_app: Optional[ServerApp], ) -> None: - """Run SeverApp, after check if GPU memory grouwth has to be set. + """Run SeverApp, after check if GPU memory growth has to be set. Upon exception, trigger stop event for Simulation Engine. """ try: if tf_gpu_growth: - log(INFO, "Enabling GPU growth for Tensorflow on the main thread.") + log(INFO, "Enabling GPU growth for Tensorflow on the server thread.") enable_gpu_growth() # Run ServerApp - run(**kwargs) + run_server_app( + driver=_driver, + server_app_dir=_server_app_dir, + server_app_run_config=_server_app_run_config, + server_app_attr=_server_app_attr, + loaded_server_app=_server_app, + ) except Exception as ex: # pylint: disable=broad-exception-caught log(ERROR, "ServerApp thread raised an exception: %s", ex) log(ERROR, traceback.format_exc()) + exception_event.set() + raise finally: log(DEBUG, "ServerApp finished running.") # Upon completion, trigger stop event if one was passed @@ -155,58 +354,75 @@ def server_th_with_start_checks( # type: ignore serverapp_th = threading.Thread( target=server_th_with_start_checks, - args=(enable_tf_gpu_growth, f_stop), - kwargs={ - "server_app_attr": server_app_attr, - "loaded_server_app": server_app, - "driver": driver, - "server_app_dir": app_dir, - }, + args=( + enable_tf_gpu_growth, + f_stop, + has_exception, + driver, + app_dir, + server_app_run_config, + server_app_attr, + server_app, + ), ) - sleep(delay_launch) serverapp_th.start() return serverapp_th -# pylint: disable=too-many-locals +# pylint: disable=too-many-locals,too-many-positional-arguments def _main_loop( num_supernodes: int, backend_name: str, backend_config_stream: str, app_dir: str, + is_app: bool, enable_tf_gpu_growth: bool, + run: Run, + exit_event: EventType, + delay_start: int, + flwr_dir: Optional[str] = None, client_app: Optional[ClientApp] = None, client_app_attr: Optional[str] = None, server_app: Optional[ServerApp] = None, server_app_attr: Optional[str] = None, + server_app_run_config: Optional[UserConfig] = None, ) -> None: - """Launch SuperLink with Simulation Engine, then ServerApp on a separate thread. - - Everything runs on the main thread or a separate one, depening on whether the main - thread already contains a running Asyncio event loop. This is the case if running - the Simulation Engine on a Jupyter/Colab notebook. - """ + """Start ServerApp on a separate thread, then launch Simulation Engine.""" # Initialize StateFactory state_factory = StateFactory(":flwr-in-memory-state:") - f_stop = asyncio.Event() + f_stop = threading.Event() + # A Threading event to indicate if an exception was raised in the ServerApp thread + server_app_thread_has_exception = threading.Event() serverapp_th = None + success = True try: + # Register run + log(DEBUG, "Pre-registering run with id %s", run.run_id) + state_factory.state().run_ids[run.run_id] = run # type: ignore + + if server_app_run_config is None: + server_app_run_config = {} + # Initialize Driver - driver = InMemoryDriver(state_factory) + driver = InMemoryDriver(run_id=run.run_id, state_factory=state_factory) # Get and run ServerApp thread serverapp_th = run_serverapp_th( server_app_attr=server_app_attr, server_app=server_app, + server_app_run_config=server_app_run_config, driver=driver, app_dir=app_dir, f_stop=f_stop, + has_exception=server_app_thread_has_exception, enable_tf_gpu_growth=enable_tf_gpu_growth, ) - # SuperLink with Simulation Engine - event(EventType.RUN_SUPERLINK_ENTER) + # Buffer time so the `ServerApp` in separate thread is ready + log(DEBUG, "Buffer time delay: %ds", delay_start) + sleep(delay_start) + # Start Simulation Engine vce.start_vce( num_supernodes=num_supernodes, client_app_attr=client_app_attr, @@ -214,145 +430,132 @@ def _main_loop( backend_name=backend_name, backend_config_json_stream=backend_config_stream, app_dir=app_dir, + is_app=is_app, state_factory=state_factory, f_stop=f_stop, + run=run, + flwr_dir=flwr_dir, ) except Exception as ex: log(ERROR, "An exception occurred !! %s", ex) log(ERROR, traceback.format_exc()) + success = False raise RuntimeError("An error was encountered. Ending simulation.") from ex finally: # Trigger stop event f_stop.set() - - event(EventType.RUN_SUPERLINK_LEAVE) + event(exit_event, event_details={"success": success}) if serverapp_th: serverapp_th.join() + if server_app_thread_has_exception.is_set(): + raise RuntimeError("Exception in ServerApp thread") log(DEBUG, "Stopping Simulation Engine now.") -# pylint: disable=too-many-arguments,too-many-locals +# pylint: disable=too-many-arguments,too-many-locals,too-many-positional-arguments def _run_simulation( num_supernodes: int, + exit_event: EventType, client_app: Optional[ClientApp] = None, server_app: Optional[ServerApp] = None, backend_name: str = "ray", - backend_config: Optional[Dict[str, ConfigsRecordValues]] = None, + backend_config: Optional[BackendConfig] = None, client_app_attr: Optional[str] = None, server_app_attr: Optional[str] = None, + server_app_run_config: Optional[UserConfig] = None, app_dir: str = "", + flwr_dir: Optional[str] = None, + run: Optional[Run] = None, enable_tf_gpu_growth: bool = False, + delay_start: int = 5, verbose_logging: bool = False, + is_app: bool = False, ) -> None: - r"""Launch the Simulation Engine. - - Parameters - ---------- - num_supernodes : int - Number of nodes that run a ClientApp. They can be sampled by a - Driver in the ServerApp and receive a Message describing what the ClientApp - should perform. - - client_app : Optional[ClientApp] - The `ClientApp` to be executed by each of the `SuperNodes`. It will receive - messages sent by the `ServerApp`. - - server_app : Optional[ServerApp] - The `ServerApp` to be executed. - - backend_name : str (default: ray) - A simulation backend that runs `ClientApp`s. - - backend_config : Optional[Dict[str, ConfigsRecordValues]] - 'A dictionary, e.g {"":, "":} to configure a - backend. Values supported in are those included by - `flwr.common.typing.ConfigsRecordValues`. - - client_app_attr : str - A path to a `ClientApp` module to be loaded: For example: `client:app` or - `project.package.module:wrapper.app`." - - server_app_attr : str - A path to a `ServerApp` module to be loaded: For example: `server:app` or - `project.package.module:wrapper.app`." + """Launch the Simulation Engine.""" + if backend_config is None: + backend_config = {} - app_dir : str - Add specified directory to the PYTHONPATH and load `ClientApp` from there. - (Default: current working directory.) + if "init_args" not in backend_config: + backend_config["init_args"] = {} - enable_tf_gpu_growth : bool (default: False) - A boolean to indicate whether to enable GPU growth on the main thread. This is - desirable if you make use of a TensorFlow model on your `ServerApp` while - having your `ClientApp` running on the same GPU. Without enabling this, you - might encounter an out-of-memory error becasue TensorFlow by default allocates - all GPU memory. Read mor about how `tf.config.experimental.set_memory_growth()` - works in the TensorFlow documentation: https://www.tensorflow.org/api/stable. + # Set default client_resources if not passed + if "client_resources" not in backend_config: + backend_config["client_resources"] = {"num_cpus": 2, "num_gpus": 0} - verbose_logging : bool (default: False) - When diabled, only INFO, WARNING and ERROR log messages will be shown. If - enabled, DEBUG-level logs will be displayed. - """ - if backend_config is None: - backend_config = {} + # Initialization of backend config to enable GPU growth globally when set + if "actor" not in backend_config: + backend_config["actor"] = {"tensorflow": 0} # Set logging level logger = logging.getLogger("flwr") if verbose_logging: update_console_handler(level=DEBUG, timestamps=True, colored=True) else: - backend_config["silent"] = True + backend_config["init_args"]["logging_level"] = backend_config["init_args"].get( + "logging_level", WARNING + ) + backend_config["init_args"]["log_to_driver"] = backend_config["init_args"].get( + "log_to_driver", True + ) if enable_tf_gpu_growth: # Check that Backend config has also enabled using GPU growth - use_tf = backend_config.get("tensorflow", False) + use_tf = backend_config.get("actor", {}).get("tensorflow", False) if not use_tf: log(WARNING, "Enabling GPU growth for your backend.") - backend_config["tensorflow"] = True + backend_config["actor"]["tensorflow"] = True # Convert config to original JSON-stream format backend_config_stream = json.dumps(backend_config) - simulation_engine_th = None + # If no `Run` object is set, create one + if run is None: + run_id = generate_rand_int_from_bytes(RUN_ID_NUM_BYTES) + run = Run( + run_id=run_id, fab_id="", fab_version="", fab_hash="", override_config={} + ) + args = ( num_supernodes, backend_name, backend_config_stream, app_dir, + is_app, enable_tf_gpu_growth, + run, + exit_event, + delay_start, + flwr_dir, client_app, client_app_attr, server_app, server_app_attr, + server_app_run_config, ) # Detect if there is an Asyncio event loop already running. - # If yes, run everything on a separate thread. In environmnets - # like Jupyter/Colab notebooks, there is an event loop present. - run_in_thread = False + # If yes, disable logger propagation. In environmnets + # like Jupyter/Colab notebooks, it's often better to do this. + asyncio_loop_running = False try: _ = ( asyncio.get_running_loop() ) # Raises RuntimeError if no event loop is present log(DEBUG, "Asyncio event loop already running.") - run_in_thread = True + asyncio_loop_running = True except RuntimeError: - log(DEBUG, "No asyncio event loop runnig") + pass finally: - if run_in_thread: + if asyncio_loop_running: # Set logger propagation to False to prevent duplicated log output in Colab. logger = set_logger_propagation(logger, False) - log(DEBUG, "Starting Simulation Engine on a new thread.") - simulation_engine_th = threading.Thread(target=_main_loop, args=args) - simulation_engine_th.start() - simulation_engine_th.join() - else: - log(DEBUG, "Starting Simulation Engine on the main thread.") - _main_loop(*args) + + _main_loop(*args) def _parse_args_run_simulation() -> argparse.ArgumentParser: @@ -360,15 +563,22 @@ def _parse_args_run_simulation() -> argparse.ArgumentParser: parser = argparse.ArgumentParser( description="Start a Flower simulation", ) + parser.add_argument( + "--app", + type=str, + default=None, + help="Path to a directory containing a FAB-like structure with a " + "pyproject.toml.", + ) parser.add_argument( "--server-app", - required=True, - help="For example: `server:app` or `project.package.module:wrapper.app`", + help="(DEPRECATED: use --app instead) For example: `server:app` or " + "`project.package.module:wrapper.app`", ) parser.add_argument( "--client-app", - required=True, - help="For example: `client:app` or `project.package.module:wrapper.app`", + help="(DEPRECATED: use --app instead) For example: `client:app` or " + "`project.package.module:wrapper.app`", ) parser.add_argument( "--num-supernodes", @@ -376,6 +586,11 @@ def _parse_args_run_simulation() -> argparse.ArgumentParser: required=True, help="Number of simulated SuperNodes.", ) + parser.add_argument( + "--run-config", + default=None, + help="Override configuration key-value pairs.", + ) parser.add_argument( "--backend", default="ray", @@ -385,7 +600,7 @@ def _parse_args_run_simulation() -> argparse.ArgumentParser: parser.add_argument( "--backend-config", type=str, - default='{"client_resources": {"num_cpus":2, "num_gpus":0.0}, "tensorflow": 0}', + default="{}", help='A JSON formatted stream, e.g \'{"":, "":}\' to ' "configure a backend. Values supported in are those included by " "`flwr.common.typing.ConfigsRecordValues`. ", @@ -400,6 +615,13 @@ def _parse_args_run_simulation() -> argparse.ArgumentParser: "Read more about how `tf.config.experimental.set_memory_growth()` works in " "the TensorFlow documentation: https://www.tensorflow.org/api/stable.", ) + parser.add_argument( + "--delay-start", + type=int, + default=3, + help="Buffer time (in seconds) to delay the start the simulation engine after " + "the `ServerApp`, which runs in a separate thread, has been launched.", + ) parser.add_argument( "--verbose", action="store_true", @@ -413,5 +635,21 @@ def _parse_args_run_simulation() -> argparse.ArgumentParser: "ClientApp and ServerApp from there." " Default: current working directory.", ) + parser.add_argument( + "--flwr-dir", + default=None, + help="""The path containing installed Flower Apps. + By default, this value is equal to: + + - `$FLWR_HOME/` if `$FLWR_HOME` is defined + - `$XDG_DATA_HOME/.flwr/` if `$XDG_DATA_HOME` is defined + - `$HOME/.flwr/` in all other cases + """, + ) + parser.add_argument( + "--run-id", + type=int, + help="Sets the ID of the run started by the Simulation Engine.", + ) return parser diff --git a/src/py/flwr/superexec/__init__.py b/src/py/flwr/superexec/__init__.py new file mode 100644 index 000000000000..0584ca663a02 --- /dev/null +++ b/src/py/flwr/superexec/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flower SuperExec service.""" diff --git a/src/py/flwr/superexec/app.py b/src/py/flwr/superexec/app.py new file mode 100644 index 000000000000..c00aa0f88e7b --- /dev/null +++ b/src/py/flwr/superexec/app.py @@ -0,0 +1,186 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flower SuperExec app.""" + +import argparse +import sys +from logging import INFO, WARN +from pathlib import Path +from typing import Optional + +import grpc + +from flwr.common import EventType, event, log +from flwr.common.address import parse_address +from flwr.common.config import parse_config_args +from flwr.common.constant import EXEC_API_DEFAULT_ADDRESS +from flwr.common.exit_handlers import register_exit_handlers +from flwr.common.object_ref import load_app, validate + +from .exec_grpc import run_superexec_api_grpc +from .executor import Executor + + +def run_superexec() -> None: + """Run Flower SuperExec.""" + log(INFO, "Starting Flower SuperExec") + + event(EventType.RUN_SUPEREXEC_ENTER) + + args = _parse_args_run_superexec().parse_args() + + # Parse IP address + parsed_address = parse_address(args.address) + if not parsed_address: + sys.exit(f"SuperExec IP address ({args.address}) cannot be parsed.") + host, port, is_v6 = parsed_address + address = f"[{host}]:{port}" if is_v6 else f"{host}:{port}" + + # Obtain certificates + certificates = _try_obtain_certificates(args) + + # Start SuperExec API + superexec_server: grpc.Server = run_superexec_api_grpc( + address=address, + executor=_load_executor(args), + certificates=certificates, + config=parse_config_args( + [args.executor_config] if args.executor_config else args.executor_config + ), + ) + + grpc_servers = [superexec_server] + + # Graceful shutdown + register_exit_handlers( + event_type=EventType.RUN_SUPEREXEC_LEAVE, + grpc_servers=grpc_servers, + bckg_threads=None, + ) + + superexec_server.wait_for_termination() + + +def _parse_args_run_superexec() -> argparse.ArgumentParser: + """Parse command line arguments for SuperExec.""" + parser = argparse.ArgumentParser( + description="Start a Flower SuperExec", + ) + parser.add_argument( + "--address", + help="SuperExec (gRPC) server address (IPv4, IPv6, or a domain name)", + default=EXEC_API_DEFAULT_ADDRESS, + ) + parser.add_argument( + "--executor", + help="For example: `deployment:exec` or `project.package.module:wrapper.exec`.", + default="flwr.superexec.deployment:executor", + ) + parser.add_argument( + "--executor-dir", + help="The directory for the executor.", + default=".", + ) + parser.add_argument( + "--executor-config", + help="Key-value pairs for the executor config, separated by spaces. " + 'For example:\n\n`--executor-config \'superlink="superlink:9091" ' + 'root-certificates="certificates/superlink-ca.crt"\'`', + ) + parser.add_argument( + "--insecure", + action="store_true", + help="Run the SuperExec without HTTPS, regardless of whether certificate " + "paths are provided. By default, the server runs with HTTPS enabled. " + "Use this flag only if you understand the risks.", + ) + parser.add_argument( + "--ssl-certfile", + help="SuperExec server SSL certificate file (as a path str) " + "to create a secure connection.", + type=str, + default=None, + ) + parser.add_argument( + "--ssl-keyfile", + help="SuperExec server SSL private key file (as a path str) " + "to create a secure connection.", + type=str, + ) + parser.add_argument( + "--ssl-ca-certfile", + help="SuperExec server SSL CA certificate file (as a path str) " + "to create a secure connection.", + type=str, + ) + return parser + + +def _try_obtain_certificates( + args: argparse.Namespace, +) -> Optional[tuple[bytes, bytes, bytes]]: + # Obtain certificates + if args.insecure: + log(WARN, "Option `--insecure` was set. Starting insecure HTTP server.") + return None + # Check if certificates are provided + if args.ssl_certfile and args.ssl_keyfile and args.ssl_ca_certfile: + if not Path(args.ssl_ca_certfile).is_file(): + sys.exit("Path argument `--ssl-ca-certfile` does not point to a file.") + if not Path(args.ssl_certfile).is_file(): + sys.exit("Path argument `--ssl-certfile` does not point to a file.") + if not Path(args.ssl_keyfile).is_file(): + sys.exit("Path argument `--ssl-keyfile` does not point to a file.") + certificates = ( + Path(args.ssl_ca_certfile).read_bytes(), # CA certificate + Path(args.ssl_certfile).read_bytes(), # server certificate + Path(args.ssl_keyfile).read_bytes(), # server private key + ) + return certificates + if args.ssl_certfile or args.ssl_keyfile or args.ssl_ca_certfile: + sys.exit( + "You need to provide valid file paths to `--ssl-certfile`, " + "`--ssl-keyfile`, and `—-ssl-ca-certfile` to create a secure " + "connection in SuperExec server (gRPC-rere)." + ) + sys.exit( + "Certificates are required unless running in insecure mode. " + "Please provide certificate paths to `--ssl-certfile`, " + "`--ssl-keyfile`, and `—-ssl-ca-certfile` or run the server " + "in insecure mode using '--insecure' if you understand the risks." + ) + + +def _load_executor( + args: argparse.Namespace, +) -> Executor: + """Get the executor plugin.""" + executor_ref: str = args.executor + valid, error_msg = validate(executor_ref, project_dir=args.executor_dir) + if not valid and error_msg: + raise LoadExecutorError(error_msg) from None + + executor = load_app(executor_ref, LoadExecutorError, args.executor_dir) + + if not isinstance(executor, Executor): + raise LoadExecutorError( + f"Attribute {executor_ref} is not of type {Executor}", + ) from None + + return executor + + +class LoadExecutorError(Exception): + """Error when trying to load `Executor`.""" diff --git a/src/py/flwr/superexec/deployment.py b/src/py/flwr/superexec/deployment.py new file mode 100644 index 000000000000..331fd817228e --- /dev/null +++ b/src/py/flwr/superexec/deployment.py @@ -0,0 +1,186 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Deployment engine executor.""" + +import hashlib +import subprocess +from logging import ERROR, INFO +from pathlib import Path +from typing import Optional + +from typing_extensions import override + +from flwr.cli.install import install_from_fab +from flwr.common.constant import DRIVER_API_DEFAULT_ADDRESS +from flwr.common.grpc import create_channel +from flwr.common.logger import log +from flwr.common.serde import fab_to_proto, user_config_to_proto +from flwr.common.typing import Fab, UserConfig +from flwr.proto.driver_pb2_grpc import DriverStub +from flwr.proto.run_pb2 import CreateRunRequest # pylint: disable=E0611 + +from .executor import Executor, RunTracker + + +class DeploymentEngine(Executor): + """Deployment engine executor. + + Parameters + ---------- + superlink: str (default: "0.0.0.0:9091") + Address of the SuperLink to connect to. + root_certificates: Optional[str] (default: None) + Specifies the path to the PEM-encoded root certificate file for + establishing secure HTTPS connections. + flwr_dir: Optional[str] (default: None) + The path containing installed Flower Apps. + """ + + def __init__( + self, + superlink: str = DRIVER_API_DEFAULT_ADDRESS, + root_certificates: Optional[str] = None, + flwr_dir: Optional[str] = None, + ) -> None: + self.superlink = superlink + if root_certificates is None: + self.root_certificates = None + self.root_certificates_bytes = None + else: + self.root_certificates = root_certificates + self.root_certificates_bytes = Path(root_certificates).read_bytes() + self.flwr_dir = flwr_dir + self.stub: Optional[DriverStub] = None + + @override + def set_config( + self, + config: UserConfig, + ) -> None: + """Set executor config arguments. + + Parameters + ---------- + config : UserConfig + A dictionary for configuration values. + Supported configuration key/value pairs: + - "superlink": str + The address of the SuperLink Driver API. + - "root-certificates": str + The path to the root certificates. + - "flwr-dir": str + The path to the Flower directory. + """ + if not config: + return + if superlink_address := config.get("superlink"): + if not isinstance(superlink_address, str): + raise ValueError("The `superlink` value should be of type `str`.") + self.superlink = superlink_address + if root_certificates := config.get("root-certificates"): + if not isinstance(root_certificates, str): + raise ValueError( + "The `root-certificates` value should be of type `str`." + ) + self.root_certificates = root_certificates + self.root_certificates_bytes = Path(str(root_certificates)).read_bytes() + if flwr_dir := config.get("flwr-dir"): + if not isinstance(flwr_dir, str): + raise ValueError("The `flwr-dir` value should be of type `str`.") + self.flwr_dir = str(flwr_dir) + + def _connect(self) -> None: + if self.stub is not None: + return + channel = create_channel( + server_address=self.superlink, + insecure=(self.root_certificates_bytes is None), + root_certificates=self.root_certificates_bytes, + ) + self.stub = DriverStub(channel) + + def _create_run( + self, + fab: Fab, + override_config: UserConfig, + ) -> int: + if self.stub is None: + self._connect() + + assert self.stub is not None + + req = CreateRunRequest( + fab=fab_to_proto(fab), + override_config=user_config_to_proto(override_config), + ) + res = self.stub.CreateRun(request=req) + return int(res.run_id) + + @override + def start_run( + self, + fab_file: bytes, + override_config: UserConfig, + federation_config: UserConfig, + ) -> Optional[RunTracker]: + """Start run using the Flower Deployment Engine.""" + try: + # Install FAB to flwr dir + install_from_fab(fab_file, None, True) + + # Call SuperLink to create run + run_id: int = self._create_run( + Fab(hashlib.sha256(fab_file).hexdigest(), fab_file), override_config + ) + log(INFO, "Created run %s", str(run_id)) + + command = [ + "flower-server-app", + "--run-id", + str(run_id), + "--superlink", + str(self.superlink), + ] + + if self.flwr_dir: + command.append("--flwr-dir") + command.append(self.flwr_dir) + + if self.root_certificates is None: + command.append("--insecure") + else: + command.append("--root-certificates") + command.append(self.root_certificates) + + # Execute the command + proc = subprocess.Popen( # pylint: disable=consider-using-with + command, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + ) + log(INFO, "Started run %s", str(run_id)) + + return RunTracker( + run_id=run_id, + proc=proc, + ) + # pylint: disable-next=broad-except + except Exception as e: + log(ERROR, "Could not start run: %s", str(e)) + return None + + +executor = DeploymentEngine() diff --git a/src/py/flwr/superexec/exec_grpc.py b/src/py/flwr/superexec/exec_grpc.py new file mode 100644 index 000000000000..017395bc8002 --- /dev/null +++ b/src/py/flwr/superexec/exec_grpc.py @@ -0,0 +1,55 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""SuperExec gRPC API.""" + +from logging import INFO +from typing import Optional + +import grpc + +from flwr.common import GRPC_MAX_MESSAGE_LENGTH +from flwr.common.logger import log +from flwr.common.typing import UserConfig +from flwr.proto.exec_pb2_grpc import add_ExecServicer_to_server +from flwr.server.superlink.fleet.grpc_bidi.grpc_server import generic_create_grpc_server + +from .exec_servicer import ExecServicer +from .executor import Executor + + +def run_superexec_api_grpc( + address: str, + executor: Executor, + certificates: Optional[tuple[bytes, bytes, bytes]], + config: UserConfig, +) -> grpc.Server: + """Run SuperExec API (gRPC, request-response).""" + executor.set_config(config) + + exec_servicer: grpc.Server = ExecServicer( + executor=executor, + ) + superexec_add_servicer_to_server_fn = add_ExecServicer_to_server + superexec_grpc_server = generic_create_grpc_server( + servicer_and_add_fn=(exec_servicer, superexec_add_servicer_to_server_fn), + server_address=address, + max_message_length=GRPC_MAX_MESSAGE_LENGTH, + certificates=certificates, + ) + + log(INFO, "Starting Flower SuperExec gRPC server on %s", address) + superexec_grpc_server.start() + + return superexec_grpc_server diff --git a/src/py/flwr/superexec/exec_servicer.py b/src/py/flwr/superexec/exec_servicer.py new file mode 100644 index 000000000000..ebb12b5ddbd2 --- /dev/null +++ b/src/py/flwr/superexec/exec_servicer.py @@ -0,0 +1,136 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""SuperExec API servicer.""" + + +import select +import sys +import threading +import time +from collections.abc import Generator +from logging import ERROR, INFO +from typing import Any + +import grpc + +from flwr.common.logger import log +from flwr.common.serde import user_config_from_proto +from flwr.proto import exec_pb2_grpc # pylint: disable=E0611 +from flwr.proto.exec_pb2 import ( # pylint: disable=E0611 + StartRunRequest, + StartRunResponse, + StreamLogsRequest, + StreamLogsResponse, +) + +from .executor import Executor, RunTracker + +SELECT_TIMEOUT = 1 # Timeout for selecting ready-to-read file descriptors (in seconds) + + +class ExecServicer(exec_pb2_grpc.ExecServicer): + """SuperExec API servicer.""" + + def __init__(self, executor: Executor) -> None: + self.executor = executor + self.runs: dict[int, RunTracker] = {} + + def StartRun( + self, request: StartRunRequest, context: grpc.ServicerContext + ) -> StartRunResponse: + """Create run ID.""" + log(INFO, "ExecServicer.StartRun") + + run = self.executor.start_run( + request.fab.content, + user_config_from_proto(request.override_config), + user_config_from_proto(request.federation_config), + ) + + if run is None: + log(ERROR, "Executor failed to start run") + return StartRunResponse() + + self.runs[run.run_id] = run + + # Start a background thread to capture the log output + capture_thread = threading.Thread( + target=_capture_logs, args=(run,), daemon=True + ) + capture_thread.start() + + return StartRunResponse(run_id=run.run_id) + + def StreamLogs( # pylint: disable=C0103 + self, request: StreamLogsRequest, context: grpc.ServicerContext + ) -> Generator[StreamLogsResponse, Any, None]: + """Get logs.""" + log(INFO, "ExecServicer.StreamLogs") + + # Exit if `run_id` not found + if request.run_id not in self.runs: + context.abort(grpc.StatusCode.NOT_FOUND, "Run ID not found") + + last_sent_index = 0 + while context.is_active(): + # Yield n'th row of logs, if n'th row < len(logs) + logs = self.runs[request.run_id].logs + for i in range(last_sent_index, len(logs)): + yield StreamLogsResponse(log_output=logs[i]) + last_sent_index = len(logs) + + # Wait for and continue to yield more log responses only if the + # run isn't completed yet. If the run is finished, the entire log + # is returned at this point and the server ends the stream. + if self.runs[request.run_id].proc.poll() is not None: + log(INFO, "All logs for run ID `%s` returned", request.run_id) + context.set_code(grpc.StatusCode.OK) + context.cancel() + + time.sleep(1.0) # Sleep briefly to avoid busy waiting + + +def _capture_logs( + run: RunTracker, +) -> None: + while True: + # Explicitly check if Popen.poll() is None. Required for `pytest`. + if run.proc.poll() is None: + # Select streams only when ready to read + ready_to_read, _, _ = select.select( + [run.proc.stdout, run.proc.stderr], + [], + [], + SELECT_TIMEOUT, + ) + # Read from std* and append to RunTracker.logs + for stream in ready_to_read: + # Flush stdout to view output in real time + readline = stream.readline() + sys.stdout.write(readline) + sys.stdout.flush() + # Append to logs + line = readline.rstrip() + if line: + run.logs.append(f"{line}") + + # Close std* to prevent blocking + elif run.proc.poll() is not None: + log(INFO, "Subprocess finished, exiting log capture") + if run.proc.stdout: + run.proc.stdout.close() + if run.proc.stderr: + run.proc.stderr.close() + break diff --git a/src/py/flwr/superexec/exec_servicer_test.py b/src/py/flwr/superexec/exec_servicer_test.py new file mode 100644 index 000000000000..b777bc806fe5 --- /dev/null +++ b/src/py/flwr/superexec/exec_servicer_test.py @@ -0,0 +1,69 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Test the SuperExec API servicer.""" + + +import subprocess +from unittest.mock import MagicMock, Mock + +from flwr.proto.exec_pb2 import StartRunRequest # pylint: disable=E0611 + +from .exec_servicer import ExecServicer, _capture_logs + + +def test_start_run() -> None: + """Test StartRun method of ExecServicer.""" + run_res = MagicMock() + run_res.run_id = 10 + with subprocess.Popen( + ["echo", "success"], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + ) as proc: + run_res.proc = proc + + executor = MagicMock() + executor.start_run = lambda _, __, ___: run_res + + context_mock = MagicMock() + + request = StartRunRequest() + request.fab.content = b"test" + + # Create a instance of FlowerServiceServicer + servicer = ExecServicer(executor=executor) + + # Execute + response = servicer.StartRun(request, context_mock) + + assert response.run_id == 10 + + +def test_capture_logs() -> None: + """Test capture_logs function.""" + run_res = Mock() + run_res.logs = [] + with subprocess.Popen( + ["echo", "success"], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + ) as proc: + run_res.proc = proc + _capture_logs(run_res) + + assert len(run_res.logs) == 1 + assert run_res.logs[0] == "success" diff --git a/src/py/flwr/superexec/executor.py b/src/py/flwr/superexec/executor.py new file mode 100644 index 000000000000..08b66a438e4d --- /dev/null +++ b/src/py/flwr/superexec/executor.py @@ -0,0 +1,76 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Execute and monitor a Flower run.""" + +from abc import ABC, abstractmethod +from dataclasses import dataclass, field +from subprocess import Popen +from typing import Optional + +from flwr.common.typing import UserConfig + + +@dataclass +class RunTracker: + """Track a Flower run (composed of a run_id and the associated process).""" + + run_id: int + proc: Popen # type: ignore + logs: list[str] = field(default_factory=list) + + +class Executor(ABC): + """Execute and monitor a Flower run.""" + + @abstractmethod + def set_config( + self, + config: UserConfig, + ) -> None: + """Register provided config as class attributes. + + Parameters + ---------- + config : UserConfig + A dictionary for configuration values. + """ + + @abstractmethod + def start_run( + self, + fab_file: bytes, + override_config: UserConfig, + federation_config: UserConfig, + ) -> Optional[RunTracker]: + """Start a run using the given Flower FAB ID and version. + + This method creates a new run on the SuperLink, returns its run_id + and also starts the run execution. + + Parameters + ---------- + fab_file : bytes + The Flower App Bundle file bytes. + override_config: UserConfig + The config overrides dict sent by the user (using `flwr run`). + federation_config: UserConfig + The federation options dict sent by the user (using `flwr run`). + + Returns + ------- + run_id : Optional[RunTracker] + The run_id and the associated process of the run created by the SuperLink, + or `None` if it fails. + """ diff --git a/src/py/flwr/superexec/simulation.py b/src/py/flwr/superexec/simulation.py new file mode 100644 index 000000000000..e913b6812556 --- /dev/null +++ b/src/py/flwr/superexec/simulation.py @@ -0,0 +1,212 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Simulation engine executor.""" + + +import json +import subprocess +import sys +from logging import ERROR, INFO, WARN +from typing import Optional + +from typing_extensions import override + +from flwr.cli.config_utils import load_and_validate +from flwr.cli.install import install_from_fab +from flwr.common.config import unflatten_dict +from flwr.common.constant import RUN_ID_NUM_BYTES +from flwr.common.logger import log +from flwr.common.typing import UserConfig +from flwr.server.superlink.state.utils import generate_rand_int_from_bytes + +from .executor import Executor, RunTracker + + +def _user_config_to_str(user_config: UserConfig) -> str: + """Convert override user config to string.""" + user_config_list_str = [] + for key, value in user_config.items(): + if isinstance(value, bool): + user_config_list_str.append(f"{key}={str(value).lower()}") + elif isinstance(value, (int, float)): + user_config_list_str.append(f"{key}={value}") + elif isinstance(value, str): + user_config_list_str.append(f'{key}="{value}"') + else: + raise ValueError( + "Only types `bool`, `float`, `int` and `str` are supported" + ) + + user_config_str = ",".join(user_config_list_str) + return user_config_str + + +class SimulationEngine(Executor): + """Simulation engine executor. + + Parameters + ---------- + num_supernodes: Opitonal[str] (default: None) + Total number of nodes to involve in the simulation. + """ + + def __init__( + self, + num_supernodes: Optional[int] = None, + verbose: Optional[bool] = False, + ) -> None: + self.num_supernodes = num_supernodes + self.verbose = verbose + + @override + def set_config( + self, + config: UserConfig, + ) -> None: + """Set executor config arguments. + + Parameters + ---------- + config : UserConfig + A dictionary for configuration values. + Supported configuration key/value pairs: + - "num-supernodes": int + Number of nodes to register for the simulation. + - "verbose": bool + Set verbosity of logs. + """ + if num_supernodes := config.get("num-supernodes"): + if not isinstance(num_supernodes, int): + raise ValueError("The `num-supernodes` value should be of type `int`.") + self.num_supernodes = num_supernodes + elif self.num_supernodes is None: + log( + ERROR, + "To start a run with the simulation plugin, please specify " + "the number of SuperNodes. This can be done by using the " + "`--executor-config` argument when launching the SuperExec.", + ) + raise ValueError( + "`num-supernodes` must not be `None`, it must be a valid " + "positive integer." + ) + + if verbose := config.get("verbose"): + if not isinstance(verbose, bool): + raise ValueError( + "The `verbose` value must be a string `true` or `false`." + ) + self.verbose = verbose + + # pylint: disable=too-many-locals + @override + def start_run( + self, + fab_file: bytes, + override_config: UserConfig, + federation_config: UserConfig, + ) -> Optional[RunTracker]: + """Start run using the Flower Simulation Engine.""" + if self.num_supernodes is None: + raise ValueError( + "Error in `SuperExec` (`SimulationEngine` executor):\n\n" + "`num-supernodes` must not be `None`, it must be a valid " + "positive integer. In order to start this simulation executor " + "with a specified number of `SuperNodes`, you can either provide " + "a `--executor` that has been initialized with a number of nodes " + "to the `flower-superexec` CLI, or `--executor-config num-supernodes=N`" + "to the `flower-superexec` CLI." + ) + try: + + # Install FAB to flwr dir + fab_path = install_from_fab(fab_file, None, True) + + # Install FAB Python package + subprocess.run( + [sys.executable, "-m", "pip", "install", "--no-deps", str(fab_path)], + stdout=None if self.verbose else subprocess.DEVNULL, + stderr=None if self.verbose else subprocess.DEVNULL, + check=True, + ) + + # Load and validate config + config, errors, warnings = load_and_validate(fab_path / "pyproject.toml") + if errors: + raise ValueError(errors) + + if warnings: + log(WARN, warnings) + + if config is None: + raise ValueError( + "Config extracted from FAB's pyproject.toml is not valid" + ) + + # Flatten federated config + federation_config_flat = unflatten_dict(federation_config) + + num_supernodes = federation_config_flat.get( + "num-supernodes", self.num_supernodes + ) + backend_cfg = federation_config_flat.get("backend", {}) + verbose: Optional[bool] = federation_config_flat.get("verbose") + + # In Simulation there is no SuperLink, still we create a run_id + run_id = generate_rand_int_from_bytes(RUN_ID_NUM_BYTES) + log(INFO, "Created run %s", str(run_id)) + + # Prepare commnand + command = [ + "flower-simulation", + "--app", + f"{str(fab_path)}", + "--num-supernodes", + f"{num_supernodes}", + "--run-id", + str(run_id), + ] + + if backend_cfg: + # Stringify as JSON + command.extend(["--backend-config", json.dumps(backend_cfg)]) + + if verbose: + command.extend(["--verbose"]) + + if override_config: + override_config_str = _user_config_to_str(override_config) + command.extend(["--run-config", f"{override_config_str}"]) + + # Start Simulation + proc = subprocess.Popen( # pylint: disable=consider-using-with + command, + text=True, + ) + + log(INFO, "Started run %s", str(run_id)) + + return RunTracker( + run_id=run_id, + proc=proc, + ) + + # pylint: disable-next=broad-except + except Exception as e: + log(ERROR, "Could not start run: %s", str(e)) + return None + + +executor = SimulationEngine() diff --git a/src/py/flwr_tool/check_copyright.py b/src/py/flwr_tool/check_copyright.py new file mode 100755 index 000000000000..96870ba67bd0 --- /dev/null +++ b/src/py/flwr_tool/check_copyright.py @@ -0,0 +1,76 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +"""Check if copyright notices are present in all Python files. + +Example: + python -m flwr_tool.check_copyright src/py/flwr +""" + + +import os +import subprocess +import sys +from pathlib import Path +from typing import List + +from flwr_tool.init_py_check import get_init_dir_list_and_warnings + +COPYRIGHT_FORMAT = """# Copyright {} Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ==============================================================================""" + + +def _get_file_creation_year(filepath: str) -> str: + result = subprocess.run( + ["git", "log", "--diff-filter=A", "--format=%ai", "--", filepath], + stdout=subprocess.PIPE, + text=True, + check=True, + ) + date_str = result.stdout.splitlines()[-1] # Get the first commit date + creation_year = date_str.split("-")[0] # Extract the year + return creation_year + + +def _check_copyright(dir_list: List[str]) -> None: + warning_list = [] + for valid_dir in dir_list: + if "proto" in valid_dir: + continue + + dir_path = Path(valid_dir) + for py_file in dir_path.glob("*.py"): + creation_year = _get_file_creation_year(str(py_file.absolute())) + expected_copyright = COPYRIGHT_FORMAT.format(creation_year) + + if expected_copyright not in py_file.read_text(): + warning_message = "- " + str(py_file) + warning_list.append(warning_message) + + if len(warning_list) > 0: + print("Missing or incorrect copyright notice in the following files:") + for warning in warning_list: + print(warning) + sys.exit(1) + + +if __name__ == "__main__": + if len(sys.argv) == 0: + raise Exception( # pylint: disable=W0719 + "Please provide at least one directory path relative " + "to your current working directory." + ) + for i, _ in enumerate(sys.argv): + abs_path: str = os.path.abspath(os.path.join(os.getcwd(), sys.argv[i])) + __, init_dirs = get_init_dir_list_and_warnings(abs_path) + _check_copyright(init_dirs) diff --git a/src/py/flwr_tool/fix_copyright.py b/src/py/flwr_tool/fix_copyright.py new file mode 100755 index 000000000000..a5bbbdf616f7 --- /dev/null +++ b/src/py/flwr_tool/fix_copyright.py @@ -0,0 +1,59 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +"""Fix copyright notices in all Python files of a given directory. + +Example: + python -m flwr_tool.fix_copyright src/py/flwr +""" + + +import os +import sys +from pathlib import Path +from typing import List + +from flwr_tool.check_copyright import COPYRIGHT_FORMAT, _get_file_creation_year +from flwr_tool.init_py_check import get_init_dir_list_and_warnings + + +def _insert_or_edit_copyright(py_file: Path) -> None: + contents = py_file.read_text() + lines = contents.splitlines() + creation_year = _get_file_creation_year(str(py_file.absolute())) + expected_copyright = COPYRIGHT_FORMAT.format(creation_year) + + if expected_copyright not in contents: + if "Copyright" in lines[0]: + end_index = 0 + for idx, line in enumerate(lines): + if ( + line.strip() + == COPYRIGHT_FORMAT.rsplit("\n", maxsplit=1)[-1].strip() + ): + end_index = idx + 1 + break + lines = lines[end_index:] + + lines.insert(0, expected_copyright) + py_file.write_text("\n".join(lines) + "\n") + + +def _fix_copyright(dir_list: List[str]) -> None: + for valid_dir in dir_list: + if "proto" in valid_dir: + continue + + dir_path = Path(valid_dir) + for py_file in dir_path.glob("*.py"): + _insert_or_edit_copyright(py_file) + + +if __name__ == "__main__": + if len(sys.argv) == 0: + raise Exception( # pylint: disable=W0719 + "Please provide at least one directory path relative " + "to your current working directory." + ) + for i, _ in enumerate(sys.argv): + abs_path: str = os.path.abspath(os.path.join(os.getcwd(), sys.argv[i])) + __, init_dirs = get_init_dir_list_and_warnings(abs_path) + _fix_copyright(init_dirs) diff --git a/src/py/flwr_tool/init_py_check.py b/src/py/flwr_tool/init_py_check.py index 67425139f991..1fb08513bb6a 100755 --- a/src/py/flwr_tool/init_py_check.py +++ b/src/py/flwr_tool/init_py_check.py @@ -6,15 +6,19 @@ """ +import ast import os import re import sys +from pathlib import Path +from typing import List, Tuple -def check_missing_init_files(absolute_path: str) -> None: - """Search absolute_path and look for missing __init__.py files.""" +def get_init_dir_list_and_warnings(absolute_path: str) -> Tuple[List[str], List[str]]: + """Search given path and return list of dirs containing __init__.py files.""" path = os.walk(absolute_path) warning_list = [] + dir_list = [] ignore_list = ["__pycache__$", ".pytest_cache.*$", "dist", "flwr.egg-info$"] for dir_path, _, files_in_dir in path: @@ -26,6 +30,14 @@ def check_missing_init_files(absolute_path: str) -> None: if not any(filename == "__init__.py" for filename in files_in_dir): warning_message = "- " + dir_path warning_list.append(warning_message) + else: + dir_list.append(dir_path) + return warning_list, dir_list + + +def check_missing_init_files(absolute_path: str) -> List[str]: + """Search absolute_path and look for missing __init__.py files.""" + warning_list, dir_list = get_init_dir_list_and_warnings(absolute_path) if len(warning_list) > 0: print("Could not find '__init__.py' in the following directories:") @@ -33,12 +45,64 @@ def check_missing_init_files(absolute_path: str) -> None: print(warning) sys.exit(1) + return dir_list + + +def get_all_var_list(init_dir: str) -> Tuple[Path, List[str], List[str]]: + """Get the __all__ list of a __init__.py file. + + The function returns the path of the '__init__.py' file of the given dir, as well as + the list itself, and the list of lines corresponding to the list. + """ + init_file = Path(init_dir) / "__init__.py" + all_lines = [] + all_list = [] + capture = False + for line in init_file.read_text().splitlines(): + stripped_line = line.strip() + if stripped_line.startswith("__all__"): + capture = True + if capture: + all_lines.append(line) + if stripped_line.endswith("]"): + capture = False + break + + if all_lines: + all_string = "".join(all_lines) + all_list = ast.literal_eval(all_string.split("=", 1)[1].strip()) + + return init_file, all_list, all_lines + + +def check_all_init_files(dir_list: List[str]) -> None: + """Check if __all__ is in alphabetical order in __init__.py files.""" + warning_list = [] + + for init_dir in dir_list: + init_file, all_list, _ = get_all_var_list(init_dir) + + if all_list and not all_list == sorted(all_list): + warning_message = "- " + str(init_file) + warning_list.append(warning_message) + + if len(warning_list) > 0: + print( + "'__all__' lists in the following '__init__.py' files are " + "incorrectly sorted:" + ) + for warning in warning_list: + print(warning) + sys.exit(1) + if __name__ == "__main__": if len(sys.argv) == 0: raise Exception( # pylint: disable=W0719 - "Please provide at least one directory path relative to your current working directory." + "Please provide at least one directory path relative " + "to your current working directory." ) for i, _ in enumerate(sys.argv): abs_path: str = os.path.abspath(os.path.join(os.getcwd(), sys.argv[i])) - check_missing_init_files(abs_path) + init_dirs = check_missing_init_files(abs_path) + check_all_init_files(init_dirs) diff --git a/src/py/flwr_tool/init_py_fix.py b/src/py/flwr_tool/init_py_fix.py new file mode 100755 index 000000000000..5ad27829ae8e --- /dev/null +++ b/src/py/flwr_tool/init_py_fix.py @@ -0,0 +1,69 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +"""Fix provided directory and sub-directories for unsorted __all__ in __init__.py files. + +Example: + python -m flwr_tool.init_py_fix src/py/flwr +""" + + +import os +import sys +from typing import List + +import black + +from flwr_tool.init_py_check import get_all_var_list, get_init_dir_list_and_warnings + + +def fix_all_init_files(dir_list: List[str]) -> None: + """Sort the __all__ variables that are in __init__.py files.""" + warning_list = [] + + for init_dir in dir_list: + init_file, all_list, all_lines = get_all_var_list(init_dir) + + if all_list: + sorted_all_list = sorted(all_list) + if not all_list == sorted_all_list: + warning_message = "- " + str(init_dir) + warning_list.append(warning_message) + + old_all_lines = "\n".join(all_lines) + new_all_lines = ( + old_all_lines.split("=", 1)[0] + + "= " + + str(sorted_all_list)[:-1] + + ",]" + ) + + new_content = init_file.read_text().replace( + old_all_lines, new_all_lines + ) + + # Write the fixed content back to the file + init_file.write_text(new_content) + + # Format the file with black + black.format_file_in_place( + init_file, + fast=False, + mode=black.FileMode(), + write_back=black.WriteBack.YES, + ) + + if len(warning_list) > 0: + print("'__all__' lists in the following '__init__.py' files have been sorted:") + for warning in warning_list: + print(warning) + + +if __name__ == "__main__": + if len(sys.argv) == 0: + raise Exception( # pylint: disable=W0719 + "Please provide at least one directory path relative " + "to your current working directory." + ) + for i, _ in enumerate(sys.argv): + abs_path: str = os.path.abspath(os.path.join(os.getcwd(), sys.argv[i])) + warnings, init_dirs = get_init_dir_list_and_warnings(abs_path) + fix_all_init_files(init_dirs) diff --git a/src/py/flwr_tool/protoc_test.py b/src/py/flwr_tool/protoc_test.py index 8dcf4c6474d6..f0784a4498d2 100644 --- a/src/py/flwr_tool/protoc_test.py +++ b/src/py/flwr_tool/protoc_test.py @@ -28,4 +28,4 @@ def test_directories() -> None: def test_proto_file_count() -> None: """Test if the correct number of proto files were captured by the glob.""" - assert len(PROTO_FILES) == 8 + assert len(PROTO_FILES) == 14 diff --git a/taplo.toml b/taplo.toml new file mode 100644 index 000000000000..23531011a9f7 --- /dev/null +++ b/taplo.toml @@ -0,0 +1,24 @@ +include = ["**/*.toml"] +exclude = ["baselines/**", "datasets/**"] + +[formatting] +align_comments = false +# Defaults below +align_entries = false +array_trailing_comma = true +array_auto_expand = true +array_auto_collapse = true +compact_arrays = true +compact_inline_tables = false +inline_table_expand = true +compact_entries = false +column_width = 80 +indent_tables = false +indent_entries = false +indent_string = " " +trailing_newline = true +reorder_keys = false +reorder_arrays = false +reorder_inline_tables = false +allowed_blank_lines = 2 +crlf = false